summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.clang-format27
-rw-r--r--.mailmap4
-rw-r--r--Documentation/ABI/obsolete/sysfs-class-dax22
-rw-r--r--Documentation/ABI/stable/sysfs-driver-mlxreg-io14
-rw-r--r--Documentation/ABI/testing/debugfs-wilco-ec23
-rw-r--r--Documentation/ABI/testing/sysfs-class-watchdog23
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu28
-rw-r--r--Documentation/ABI/testing/sysfs-fs-ext47
-rw-r--r--Documentation/ABI/testing/sysfs-fs-f2fs7
-rw-r--r--Documentation/DMA-API-HOWTO.txt121
-rw-r--r--Documentation/DMA-API.txt43
-rw-r--r--Documentation/DMA-ISA-LPC.txt4
-rw-r--r--Documentation/RCU/Design/Data-Structures/Data-Structures.html3
-rw-r--r--Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html4
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html5
-rw-r--r--Documentation/RCU/NMI-RCU.txt13
-rw-r--r--Documentation/RCU/UP.txt6
-rw-r--r--Documentation/RCU/checklist.txt91
-rw-r--r--Documentation/RCU/lockdep-splat.txt12
-rw-r--r--Documentation/RCU/rcu.txt8
-rw-r--r--Documentation/RCU/rcu_dereference.txt103
-rw-r--r--Documentation/RCU/rcubarrier.txt27
-rw-r--r--Documentation/RCU/whatisRCU.txt10
-rw-r--r--Documentation/accounting/psi.txt12
-rw-r--r--Documentation/acpi/aml-debugger.txt66
-rw-r--r--Documentation/acpi/apei/output_format.txt147
-rw-r--r--Documentation/acpi/i2c-muxes.txt58
-rw-r--r--Documentation/acpi/initrd_table_override.txt111
-rw-r--r--Documentation/acpi/method-customizing.txt73
-rw-r--r--Documentation/acpi/method-tracing.txt192
-rw-r--r--Documentation/acpi/ssdt-overlays.txt172
-rw-r--r--Documentation/admin-guide/README.rst2
-rw-r--r--Documentation/admin-guide/acpi/cppc_sysfs.rst (renamed from Documentation/acpi/cppc_sysfs.txt)71
-rw-r--r--Documentation/admin-guide/acpi/dsdt-override.rst (renamed from Documentation/acpi/dsdt-override.txt)8
-rw-r--r--Documentation/admin-guide/acpi/index.rst14
-rw-r--r--Documentation/admin-guide/acpi/initrd_table_override.rst115
-rw-r--r--Documentation/admin-guide/acpi/ssdt-overlays.rst180
-rw-r--r--Documentation/admin-guide/index.rst1
-rw-r--r--Documentation/admin-guide/kernel-parameters.rst1
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt85
-rw-r--r--Documentation/admin-guide/md.rst3
-rw-r--r--Documentation/admin-guide/perf-security.rst253
-rw-r--r--Documentation/admin-guide/pm/cpufreq.rst18
-rw-r--r--Documentation/admin-guide/pm/cpuidle.rst8
-rw-r--r--Documentation/admin-guide/pm/index.rst2
-rw-r--r--Documentation/admin-guide/pm/intel_epb.rst41
-rw-r--r--Documentation/admin-guide/pm/intel_pstate.rst32
-rw-r--r--Documentation/admin-guide/pm/sleep-states.rst8
-rw-r--r--Documentation/admin-guide/pm/strategies.rst8
-rw-r--r--Documentation/admin-guide/pm/system-wide.rst2
-rw-r--r--Documentation/admin-guide/pm/working-state.rst3
-rw-r--r--Documentation/admin-guide/tainted-kernels.rst159
-rw-r--r--Documentation/arm/kernel_mode_neon.txt4
-rw-r--r--Documentation/arm64/booting.txt5
-rw-r--r--Documentation/arm64/cpu-feature-registers.txt16
-rw-r--r--Documentation/arm64/elf_hwcaps.txt41
-rw-r--r--Documentation/arm64/pointer-authentication.txt5
-rw-r--r--Documentation/arm64/silicon-errata.txt3
-rw-r--r--Documentation/arm64/sve.txt17
-rw-r--r--Documentation/atomic_t.txt17
-rw-r--r--Documentation/bpf/btf.rst8
-rw-r--r--Documentation/cgroup-v1/memory.txt7
-rw-r--r--Documentation/core-api/cachetlb.rst10
-rw-r--r--Documentation/core-api/flexible-arrays.rst130
-rw-r--r--Documentation/core-api/generic-radix-tree.rst12
-rw-r--r--Documentation/core-api/index.rst1
-rw-r--r--Documentation/core-api/kernel-api.rst4
-rw-r--r--Documentation/core-api/memory-allocation.rst10
-rw-r--r--Documentation/core-api/mm-api.rst2
-rw-r--r--Documentation/core-api/xarray.rst15
-rw-r--r--Documentation/cputopology.txt46
-rw-r--r--Documentation/crypto/api-samples.rst1
-rw-r--r--Documentation/dev-tools/kcov.rst2
-rw-r--r--Documentation/dev-tools/kselftest.rst94
-rw-r--r--Documentation/device-mapper/cache.txt3
-rw-r--r--Documentation/device-mapper/dm-init.txt114
-rw-r--r--Documentation/devicetree/bindings/Makefile2
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/l2c2x0.txt114
-rw-r--r--Documentation/devicetree/bindings/arm/l2c2x0.yaml248
-rw-r--r--Documentation/devicetree/bindings/arm/pmu.txt70
-rw-r--r--Documentation/devicetree/bindings/arm/pmu.yaml87
-rw-r--r--Documentation/devicetree/bindings/clock/actions,owl-cmu.txt7
-rw-r--r--Documentation/devicetree/bindings/clock/amlogic,gxbb-aoclkc.txt1
-rw-r--r--Documentation/devicetree/bindings/clock/amlogic,gxbb-clkc.txt1
-rw-r--r--Documentation/devicetree/bindings/clock/exynos5433-clock.txt23
-rw-r--r--Documentation/devicetree/bindings/clock/fixed-clock.txt23
-rw-r--r--Documentation/devicetree/bindings/clock/fixed-clock.yaml44
-rw-r--r--Documentation/devicetree/bindings/clock/fixed-factor-clock.txt28
-rw-r--r--Documentation/devicetree/bindings/clock/fixed-factor-clock.yaml56
-rw-r--r--Documentation/devicetree/bindings/clock/fixed-mmio-clock.txt24
-rw-r--r--Documentation/devicetree/bindings/clock/imx8mm-clock.txt29
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,rpmcc.txt1
-rw-r--r--Documentation/devicetree/bindings/display/sitronix,st7735r.txt2
-rw-r--r--Documentation/devicetree/bindings/display/ssd1307fb.txt2
-rw-r--r--Documentation/devicetree/bindings/dma/dma.txt4
-rw-r--r--Documentation/devicetree/bindings/dma/fsl-qdma.txt57
-rw-r--r--Documentation/devicetree/bindings/dma/k3dma.txt4
-rw-r--r--Documentation/devicetree/bindings/dma/snps-dma.txt2
-rw-r--r--Documentation/devicetree/bindings/dma/sprd-dma.txt2
-rw-r--r--Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt7
-rw-r--r--Documentation/devicetree/bindings/edac/socfpga-eccmgr.txt135
-rw-r--r--Documentation/devicetree/bindings/hwmon/adc128d818.txt4
-rw-r--r--Documentation/devicetree/bindings/hwmon/cirrus,lochnagar.txt26
-rw-r--r--Documentation/devicetree/bindings/hwmon/g762.txt2
-rw-r--r--Documentation/devicetree/bindings/hwmon/lm75.txt1
-rw-r--r--Documentation/devicetree/bindings/hwmon/pwm-fan.txt21
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-iop3xx.txt (renamed from Documentation/devicetree/bindings/i2c/i2c-xscale.txt)0
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt (renamed from Documentation/devicetree/bindings/i2c/i2c-mtk.txt)0
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-stu300.txt (renamed from Documentation/devicetree/bindings/i2c/i2c-st-ddci2c.txt)0
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-sun6i-p2wi.txt (renamed from Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt)0
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-wmt.txt (renamed from Documentation/devicetree/bindings/i2c/i2c-vt8500.txt)0
-rw-r--r--Documentation/devicetree/bindings/input/cypress,tm2-touchkey.txt9
-rw-r--r--Documentation/devicetree/bindings/input/ilitek,ili2xxx.txt25
-rw-r--r--Documentation/devicetree/bindings/input/msm-vibrator.txt36
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt13
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/goodix.txt14
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/sitronix-st1232.txt8
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/sx8654.txt10
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt175
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml279
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt171
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,gic.yaml223
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt1
-rw-r--r--Documentation/devicetree/bindings/iommu/nvidia,tegra20-gart.txt14
-rw-r--r--Documentation/devicetree/bindings/mailbox/xlnx,zynqmp-ipi-mailbox.txt127
-rw-r--r--Documentation/devicetree/bindings/media/i2c/adv748x.txt11
-rw-r--r--Documentation/devicetree/bindings/media/i2c/melexis,mlx90640.txt20
-rw-r--r--Documentation/devicetree/bindings/media/i2c/mt9m001.txt38
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ov5645.txt6
-rw-r--r--Documentation/devicetree/bindings/media/imx7-csi.txt45
-rw-r--r--Documentation/devicetree/bindings/media/imx7-mipi-csi2.txt90
-rw-r--r--Documentation/devicetree/bindings/media/mediatek-vcodec.txt13
-rw-r--r--Documentation/devicetree/bindings/media/rcar_vin.txt9
-rw-r--r--Documentation/devicetree/bindings/media/renesas,fcp.txt5
-rw-r--r--Documentation/devicetree/bindings/media/renesas,rcar-csi2.txt3
-rw-r--r--Documentation/devicetree/bindings/media/renesas,vsp1.txt6
-rw-r--r--Documentation/devicetree/bindings/media/si470x.txt26
-rw-r--r--Documentation/devicetree/bindings/media/sun6i-csi.txt3
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/nvidia,tegra20-mc.txt27
-rw-r--r--Documentation/devicetree/bindings/net/davinci_emac.txt2
-rw-r--r--Documentation/devicetree/bindings/net/dsa/dsa.txt5
-rw-r--r--Documentation/devicetree/bindings/net/dsa/qca8k.txt73
-rw-r--r--Documentation/devicetree/bindings/net/ethernet.txt5
-rw-r--r--Documentation/devicetree/bindings/net/macb.txt4
-rw-r--r--Documentation/devicetree/bindings/net/stm32-dwmac.txt9
-rw-r--r--Documentation/devicetree/bindings/pci/altera-pcie.txt4
-rw-r--r--Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt18
-rw-r--r--Documentation/devicetree/bindings/pci/layerscape-pci.txt3
-rw-r--r--Documentation/devicetree/bindings/pci/rcar-pci.txt4
-rw-r--r--Documentation/devicetree/bindings/pci/ti-pci.txt11
-rw-r--r--Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt27
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imx50-pinctrl.txt32
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imx8mm-pinctrl.txt36
-rw-r--r--Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt24
-rw-r--r--Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt10
-rw-r--r--Documentation/devicetree/bindings/pwm/atmel-pwm.txt1
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-hibvt.txt2
-rw-r--r--Documentation/devicetree/bindings/regulator/gpio-regulator.txt30
-rw-r--r--Documentation/devicetree/bindings/regulator/st,stm32mp1-pwr-reg.txt43
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,adsp-pil.txt5
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt23
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt79
-rw-r--r--Documentation/devicetree/bindings/serial/mtk-uart.txt1
-rw-r--r--Documentation/devicetree/bindings/spi/fsl-spi.txt4
-rw-r--r--Documentation/devicetree/bindings/spi/nvidia,tegra114-spi.txt20
-rw-r--r--Documentation/devicetree/bindings/spi/sh-msiof.txt1
-rw-r--r--Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.txt10
-rw-r--r--Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt10
-rw-r--r--Documentation/devicetree/bindings/spi/spi-mt65xx.txt1
-rw-r--r--Documentation/devicetree/bindings/spi/spi-mt7621.txt26
-rw-r--r--Documentation/devicetree/bindings/spi/spi-zynq-qspi.txt25
-rw-r--r--Documentation/devicetree/bindings/ufs/ufs-hisi.txt5
-rw-r--r--Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt13
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt2
-rw-r--r--Documentation/devicetree/bindings/watchdog/renesas-wdt.txt1
-rw-r--r--Documentation/doc-guide/kernel-doc.rst17
-rw-r--r--Documentation/doc-guide/sphinx.rst12
-rw-r--r--Documentation/driver-api/acpi/index.rst9
-rw-r--r--Documentation/driver-api/acpi/linuxized-acpica.rst (renamed from Documentation/acpi/linuxized-acpica.txt)109
-rw-r--r--Documentation/driver-api/acpi/scan_handlers.rst (renamed from Documentation/acpi/scan_handlers.txt)24
-rw-r--r--Documentation/driver-api/device-io.rst45
-rw-r--r--Documentation/driver-api/dmaengine/client.rst7
-rw-r--r--Documentation/driver-api/dmaengine/dmatest.rst1
-rw-r--r--Documentation/driver-api/iio/buffers.rst2
-rw-r--r--Documentation/driver-api/iio/core.rst6
-rw-r--r--Documentation/driver-api/iio/hw-consumer.rst2
-rw-r--r--Documentation/driver-api/iio/triggers.rst2
-rw-r--r--Documentation/driver-api/index.rst1
-rw-r--r--Documentation/driver-api/pci/p2pdma.rst4
-rw-r--r--Documentation/driver-api/pinctl.rst9
-rw-r--r--Documentation/driver-api/pm/cpuidle.rst7
-rw-r--r--Documentation/driver-api/pm/devices.rst12
-rw-r--r--Documentation/driver-api/pm/index.rst2
-rw-r--r--Documentation/driver-api/pm/notifiers.rst8
-rw-r--r--Documentation/driver-api/pm/types.rst2
-rw-r--r--Documentation/driver-api/usb/power-management.rst14
-rw-r--r--Documentation/driver-model/devres.txt2
-rw-r--r--Documentation/fault-injection/fault-injection.txt2
-rw-r--r--Documentation/features/time/modern-timekeeping/arch-support.txt2
-rw-r--r--Documentation/filesystems/api-summary.rst150
-rw-r--r--Documentation/filesystems/binderfs.rst68
-rw-r--r--Documentation/filesystems/ceph.txt14
-rw-r--r--Documentation/filesystems/cifs/TODO3
-rw-r--r--Documentation/filesystems/cifs/cifs.txt34
-rw-r--r--Documentation/filesystems/exofs.txt185
-rw-r--r--Documentation/filesystems/f2fs.txt2
-rw-r--r--Documentation/filesystems/fscrypt.rst16
-rw-r--r--Documentation/filesystems/index.rst389
-rw-r--r--Documentation/filesystems/journalling.rst184
-rw-r--r--Documentation/filesystems/mount_api.txt732
-rw-r--r--Documentation/filesystems/path-lookup.rst39
-rw-r--r--Documentation/filesystems/splice.rst22
-rw-r--r--Documentation/filesystems/sysfs.txt21
-rw-r--r--Documentation/firmware-guide/acpi/DSD-properties-rules.rst (renamed from Documentation/acpi/DSD-properties-rules.txt)21
-rw-r--r--Documentation/firmware-guide/acpi/acpi-lid.rst (renamed from Documentation/acpi/acpi-lid.txt)40
-rw-r--r--Documentation/firmware-guide/acpi/aml-debugger.rst75
-rw-r--r--Documentation/firmware-guide/acpi/apei/einj.rst (renamed from Documentation/acpi/apei/einj.txt)94
-rw-r--r--Documentation/firmware-guide/acpi/apei/output_format.rst150
-rw-r--r--Documentation/firmware-guide/acpi/debug.rst (renamed from Documentation/acpi/debug.txt)31
-rw-r--r--Documentation/firmware-guide/acpi/dsd/data-node-references.rst (renamed from Documentation/acpi/dsd/data-node-references.txt)36
-rw-r--r--Documentation/firmware-guide/acpi/dsd/graph.rst (renamed from Documentation/acpi/dsd/graph.txt)157
-rw-r--r--Documentation/firmware-guide/acpi/enumeration.rst (renamed from Documentation/acpi/enumeration.txt)161
-rw-r--r--Documentation/firmware-guide/acpi/gpio-properties.rst (renamed from Documentation/acpi/gpio-properties.txt)78
-rw-r--r--Documentation/firmware-guide/acpi/i2c-muxes.rst61
-rw-r--r--Documentation/firmware-guide/acpi/index.rst26
-rw-r--r--Documentation/firmware-guide/acpi/lpit.rst (renamed from Documentation/acpi/lpit.txt)18
-rw-r--r--Documentation/firmware-guide/acpi/method-customizing.rst89
-rw-r--r--Documentation/firmware-guide/acpi/method-tracing.rst238
-rw-r--r--Documentation/firmware-guide/acpi/namespace.rst (renamed from Documentation/acpi/namespace.txt)294
-rw-r--r--Documentation/firmware-guide/acpi/osi.rst (renamed from Documentation/acpi/osi.txt)15
-rw-r--r--Documentation/firmware-guide/acpi/video_extension.rst (renamed from Documentation/acpi/video_extension.txt)83
-rw-r--r--Documentation/firmware-guide/index.rst13
-rw-r--r--Documentation/flexible-arrays.txt123
-rw-r--r--Documentation/hwmon/ab8500.rst (renamed from Documentation/hwmon/ab8500)10
-rw-r--r--Documentation/hwmon/abituguru92
-rw-r--r--Documentation/hwmon/abituguru-datasheet.rst (renamed from Documentation/hwmon/abituguru-datasheet)160
-rw-r--r--Documentation/hwmon/abituguru.rst113
-rw-r--r--Documentation/hwmon/abituguru3.rst (renamed from Documentation/hwmon/abituguru3)36
-rw-r--r--Documentation/hwmon/abx500.rst (renamed from Documentation/hwmon/abx500)8
-rw-r--r--Documentation/hwmon/acpi_power_meter.rst (renamed from Documentation/hwmon/acpi_power_meter)25
-rw-r--r--Documentation/hwmon/ad7314.rst (renamed from Documentation/hwmon/ad7314)9
-rw-r--r--Documentation/hwmon/adc128d818.rst (renamed from Documentation/hwmon/adc128d818)7
-rw-r--r--Documentation/hwmon/adm1021.rst (renamed from Documentation/hwmon/adm1021)44
-rw-r--r--Documentation/hwmon/adm1025.rst (renamed from Documentation/hwmon/adm1025)13
-rw-r--r--Documentation/hwmon/adm1026.rst (renamed from Documentation/hwmon/adm1026)24
-rw-r--r--Documentation/hwmon/adm1031.rst (renamed from Documentation/hwmon/adm1031)16
-rw-r--r--Documentation/hwmon/adm1275.rst (renamed from Documentation/hwmon/adm1275)30
-rw-r--r--Documentation/hwmon/adm9240.rst (renamed from Documentation/hwmon/adm9240)50
-rw-r--r--Documentation/hwmon/ads1015.rst (renamed from Documentation/hwmon/ads1015)74
-rw-r--r--Documentation/hwmon/ads7828.rst (renamed from Documentation/hwmon/ads7828)29
-rw-r--r--Documentation/hwmon/adt7410.rst (renamed from Documentation/hwmon/adt7410)49
-rw-r--r--Documentation/hwmon/adt7411.rst (renamed from Documentation/hwmon/adt7411)20
-rw-r--r--Documentation/hwmon/adt7462.rst (renamed from Documentation/hwmon/adt7462)11
-rw-r--r--Documentation/hwmon/adt7470.rst (renamed from Documentation/hwmon/adt7470)8
-rw-r--r--Documentation/hwmon/adt7475.rst (renamed from Documentation/hwmon/adt7475)38
-rw-r--r--Documentation/hwmon/amc6821.rst (renamed from Documentation/hwmon/amc6821)18
-rw-r--r--Documentation/hwmon/asb100.rst (renamed from Documentation/hwmon/asb100)55
-rw-r--r--Documentation/hwmon/asc7621.rst (renamed from Documentation/hwmon/asc7621)146
-rw-r--r--Documentation/hwmon/aspeed-pwm-tacho.rst (renamed from Documentation/hwmon/aspeed-pwm-tacho)2
-rw-r--r--Documentation/hwmon/coretemp.rst (renamed from Documentation/hwmon/coretemp)46
-rw-r--r--Documentation/hwmon/da9052.rst (renamed from Documentation/hwmon/da9052)41
-rw-r--r--Documentation/hwmon/da9055.rst (renamed from Documentation/hwmon/da9055)20
-rw-r--r--Documentation/hwmon/dme1737.rst (renamed from Documentation/hwmon/dme1737)88
-rw-r--r--Documentation/hwmon/ds1621.rst (renamed from Documentation/hwmon/ds1621)156
-rw-r--r--Documentation/hwmon/ds620.rst (renamed from Documentation/hwmon/ds620)12
-rw-r--r--Documentation/hwmon/emc1403.rst (renamed from Documentation/hwmon/emc1403)33
-rw-r--r--Documentation/hwmon/emc2103.rst (renamed from Documentation/hwmon/emc2103)6
-rw-r--r--Documentation/hwmon/emc6w201.rst (renamed from Documentation/hwmon/emc6w201)5
-rw-r--r--Documentation/hwmon/f71805f.rst (renamed from Documentation/hwmon/f71805f)36
-rw-r--r--Documentation/hwmon/f71882fg.rst (renamed from Documentation/hwmon/f71882fg)58
-rw-r--r--Documentation/hwmon/fam15h_power.rst (renamed from Documentation/hwmon/fam15h_power)85
-rw-r--r--Documentation/hwmon/ftsteutates.rst (renamed from Documentation/hwmon/ftsteutates)14
-rw-r--r--Documentation/hwmon/g760a.rst (renamed from Documentation/hwmon/g760a)4
-rw-r--r--Documentation/hwmon/g762.rst (renamed from Documentation/hwmon/g762)51
-rw-r--r--Documentation/hwmon/gl518sm.rst (renamed from Documentation/hwmon/gl518sm)21
-rw-r--r--Documentation/hwmon/hih6130.rst (renamed from Documentation/hwmon/hih6130)14
-rw-r--r--Documentation/hwmon/hwmon-kernel-api.rst (renamed from Documentation/hwmon/hwmon-kernel-api.txt)337
-rw-r--r--Documentation/hwmon/ibm-cffps.rst (renamed from Documentation/hwmon/ibm-cffps)3
-rw-r--r--Documentation/hwmon/ibmaem.rst (renamed from Documentation/hwmon/ibmaem)10
-rw-r--r--Documentation/hwmon/ibmpowernv.rst (renamed from Documentation/hwmon/ibmpowernv)31
-rw-r--r--Documentation/hwmon/ina209.rst (renamed from Documentation/hwmon/ina209)18
-rw-r--r--Documentation/hwmon/ina2xx.rst (renamed from Documentation/hwmon/ina2xx)41
-rw-r--r--Documentation/hwmon/ina3221.rst (renamed from Documentation/hwmon/ina3221)35
-rw-r--r--Documentation/hwmon/index.rst182
-rw-r--r--Documentation/hwmon/ir35221.rst (renamed from Documentation/hwmon/ir35221)13
-rw-r--r--Documentation/hwmon/ir38064.rst66
-rw-r--r--Documentation/hwmon/isl68137.rst80
-rw-r--r--Documentation/hwmon/it87.rst (renamed from Documentation/hwmon/it87)102
-rw-r--r--Documentation/hwmon/jc42.rst (renamed from Documentation/hwmon/jc42)55
-rw-r--r--Documentation/hwmon/k10temp.rst (renamed from Documentation/hwmon/k10temp)37
-rw-r--r--Documentation/hwmon/k8temp.rst (renamed from Documentation/hwmon/k8temp)17
-rw-r--r--Documentation/hwmon/lineage-pem.rst (renamed from Documentation/hwmon/lineage-pem)16
-rw-r--r--Documentation/hwmon/lm25066.rst (renamed from Documentation/hwmon/lm25066)32
-rw-r--r--Documentation/hwmon/lm63.rst (renamed from Documentation/hwmon/lm63)24
-rw-r--r--Documentation/hwmon/lm70.rst (renamed from Documentation/hwmon/lm70)13
-rw-r--r--Documentation/hwmon/lm73.rst (renamed from Documentation/hwmon/lm73)16
-rw-r--r--Documentation/hwmon/lm75.rst (renamed from Documentation/hwmon/lm75)102
-rw-r--r--Documentation/hwmon/lm77.rst (renamed from Documentation/hwmon/lm77)9
-rw-r--r--Documentation/hwmon/lm78.rst (renamed from Documentation/hwmon/lm78)20
-rw-r--r--Documentation/hwmon/lm80.rst (renamed from Documentation/hwmon/lm80)19
-rw-r--r--Documentation/hwmon/lm83.rst (renamed from Documentation/hwmon/lm83)16
-rw-r--r--Documentation/hwmon/lm85.rst (renamed from Documentation/hwmon/lm85)97
-rw-r--r--Documentation/hwmon/lm87.rst (renamed from Documentation/hwmon/lm87)23
-rw-r--r--Documentation/hwmon/lm90.rst (renamed from Documentation/hwmon/lm90)174
-rw-r--r--Documentation/hwmon/lm92.rst (renamed from Documentation/hwmon/lm92)17
-rw-r--r--Documentation/hwmon/lm93.rst (renamed from Documentation/hwmon/lm93)157
-rw-r--r--Documentation/hwmon/lm95234.rst (renamed from Documentation/hwmon/lm95234)11
-rw-r--r--Documentation/hwmon/lm95245.rst (renamed from Documentation/hwmon/lm95245)13
-rw-r--r--Documentation/hwmon/lochnagar.rst83
-rw-r--r--Documentation/hwmon/ltc2945.rst (renamed from Documentation/hwmon/ltc2945)16
-rw-r--r--Documentation/hwmon/ltc2978.rst (renamed from Documentation/hwmon/ltc2978)267
-rw-r--r--Documentation/hwmon/ltc2990.rst (renamed from Documentation/hwmon/ltc2990)23
-rw-r--r--Documentation/hwmon/ltc3815.rst (renamed from Documentation/hwmon/ltc3815)12
-rw-r--r--Documentation/hwmon/ltc4151.rst (renamed from Documentation/hwmon/ltc4151)16
-rw-r--r--Documentation/hwmon/ltc4215.rst (renamed from Documentation/hwmon/ltc4215)16
-rw-r--r--Documentation/hwmon/ltc4245.rst (renamed from Documentation/hwmon/ltc4245)17
-rw-r--r--Documentation/hwmon/ltc4260.rst (renamed from Documentation/hwmon/ltc4260)16
-rw-r--r--Documentation/hwmon/ltc4261.rst (renamed from Documentation/hwmon/ltc4261)16
-rw-r--r--Documentation/hwmon/max16064.rst (renamed from Documentation/hwmon/max16064)17
-rw-r--r--Documentation/hwmon/max16065.rst (renamed from Documentation/hwmon/max16065)24
-rw-r--r--Documentation/hwmon/max1619.rst (renamed from Documentation/hwmon/max1619)12
-rw-r--r--Documentation/hwmon/max1668.rst (renamed from Documentation/hwmon/max1668)14
-rw-r--r--Documentation/hwmon/max197.rst (renamed from Documentation/hwmon/max197)36
-rw-r--r--Documentation/hwmon/max20751.rst (renamed from Documentation/hwmon/max20751)9
-rw-r--r--Documentation/hwmon/max31722.rst (renamed from Documentation/hwmon/max31722)12
-rw-r--r--Documentation/hwmon/max31785.rst (renamed from Documentation/hwmon/max31785)6
-rw-r--r--Documentation/hwmon/max31790.rst (renamed from Documentation/hwmon/max31790)6
-rw-r--r--Documentation/hwmon/max34440.rst (renamed from Documentation/hwmon/max34440)90
-rw-r--r--Documentation/hwmon/max6639.rst (renamed from Documentation/hwmon/max6639)16
-rw-r--r--Documentation/hwmon/max6642.rst (renamed from Documentation/hwmon/max6642)10
-rw-r--r--Documentation/hwmon/max6650.rst (renamed from Documentation/hwmon/max6650)17
-rw-r--r--Documentation/hwmon/max6697.rst (renamed from Documentation/hwmon/max6697)33
-rw-r--r--Documentation/hwmon/max8688.rst (renamed from Documentation/hwmon/max8688)20
-rw-r--r--Documentation/hwmon/mc13783-adc.rst (renamed from Documentation/hwmon/mc13783-adc)27
-rw-r--r--Documentation/hwmon/mcp3021.rst (renamed from Documentation/hwmon/mcp3021)15
-rw-r--r--Documentation/hwmon/menf21bmc.rst (renamed from Documentation/hwmon/menf21bmc)5
-rw-r--r--Documentation/hwmon/mlxreg-fan.rst (renamed from Documentation/hwmon/mlxreg-fan)60
-rw-r--r--Documentation/hwmon/nct6683.rst (renamed from Documentation/hwmon/nct6683)11
-rw-r--r--Documentation/hwmon/nct6775.rst (renamed from Documentation/hwmon/nct6775)114
-rw-r--r--Documentation/hwmon/nct7802.rst (renamed from Documentation/hwmon/nct7802)11
-rw-r--r--Documentation/hwmon/nct7904.rst (renamed from Documentation/hwmon/nct7904)9
-rw-r--r--Documentation/hwmon/npcm750-pwm-fan.rst (renamed from Documentation/hwmon/npcm750-pwm-fan)4
-rw-r--r--Documentation/hwmon/nsa320.rst (renamed from Documentation/hwmon/nsa320)15
-rw-r--r--Documentation/hwmon/ntc_thermistor.rst (renamed from Documentation/hwmon/ntc_thermistor)123
-rw-r--r--Documentation/hwmon/occ.rst (renamed from Documentation/hwmon/occ)93
-rw-r--r--Documentation/hwmon/pc87360.rst (renamed from Documentation/hwmon/pc87360)38
-rw-r--r--Documentation/hwmon/pc87427.rst (renamed from Documentation/hwmon/pc87427)4
-rw-r--r--Documentation/hwmon/pcf8591.rst (renamed from Documentation/hwmon/pcf8591)52
-rw-r--r--Documentation/hwmon/pmbus-core.rst (renamed from Documentation/hwmon/pmbus-core)173
-rw-r--r--Documentation/hwmon/pmbus.rst (renamed from Documentation/hwmon/pmbus)90
-rw-r--r--Documentation/hwmon/powr1220.rst (renamed from Documentation/hwmon/powr1220)12
-rw-r--r--Documentation/hwmon/pwm-fan.rst (renamed from Documentation/hwmon/pwm-fan)3
-rw-r--r--Documentation/hwmon/raspberrypi-hwmon.rst (renamed from Documentation/hwmon/raspberrypi-hwmon)3
-rw-r--r--Documentation/hwmon/sch5627.rst (renamed from Documentation/hwmon/sch5627)4
-rw-r--r--Documentation/hwmon/sch5636.rst (renamed from Documentation/hwmon/sch5636)3
-rw-r--r--Documentation/hwmon/scpi-hwmon.rst (renamed from Documentation/hwmon/scpi-hwmon)7
-rw-r--r--Documentation/hwmon/sht15.rst (renamed from Documentation/hwmon/sht15)28
-rw-r--r--Documentation/hwmon/sht21.rst (renamed from Documentation/hwmon/sht21)24
-rw-r--r--Documentation/hwmon/sht3x.rst (renamed from Documentation/hwmon/sht3x)42
-rw-r--r--Documentation/hwmon/shtc1.rst (renamed from Documentation/hwmon/shtc1)19
-rw-r--r--Documentation/hwmon/sis5595.rst (renamed from Documentation/hwmon/sis5595)41
-rw-r--r--Documentation/hwmon/smm665.rst (renamed from Documentation/hwmon/smm665)42
-rw-r--r--Documentation/hwmon/smsc47b397.rst (renamed from Documentation/hwmon/smsc47b397)162
-rw-r--r--Documentation/hwmon/smsc47m1.rst (renamed from Documentation/hwmon/smsc47m1)43
-rw-r--r--Documentation/hwmon/smsc47m192103
-rw-r--r--Documentation/hwmon/smsc47m192.rst116
-rw-r--r--Documentation/hwmon/submitting-patches.rst (renamed from Documentation/hwmon/submitting-patches)21
-rw-r--r--Documentation/hwmon/sysfs-interface.rst (renamed from Documentation/hwmon/sysfs-interface)721
-rw-r--r--Documentation/hwmon/tc654.rst (renamed from Documentation/hwmon/tc654)9
-rw-r--r--Documentation/hwmon/tc74.rst (renamed from Documentation/hwmon/tc74)3
-rw-r--r--Documentation/hwmon/thmc50.rst (renamed from Documentation/hwmon/thmc50)37
-rw-r--r--Documentation/hwmon/tmp102.rst (renamed from Documentation/hwmon/tmp102)7
-rw-r--r--Documentation/hwmon/tmp103.rst (renamed from Documentation/hwmon/tmp103)7
-rw-r--r--Documentation/hwmon/tmp108.rst (renamed from Documentation/hwmon/tmp108)7
-rw-r--r--Documentation/hwmon/tmp401.rst (renamed from Documentation/hwmon/tmp401)32
-rw-r--r--Documentation/hwmon/tmp421.rst (renamed from Documentation/hwmon/tmp421)26
-rw-r--r--Documentation/hwmon/tps40422.rst (renamed from Documentation/hwmon/tps40422)25
-rw-r--r--Documentation/hwmon/twl4030-madc-hwmon.rst (renamed from Documentation/hwmon/twl4030-madc-hwmon)8
-rw-r--r--Documentation/hwmon/ucd9000.rst (renamed from Documentation/hwmon/ucd9000)35
-rw-r--r--Documentation/hwmon/ucd9200.rst (renamed from Documentation/hwmon/ucd9200)46
-rw-r--r--Documentation/hwmon/userspace-tools.rst (renamed from Documentation/hwmon/userspace-tools)3
-rw-r--r--Documentation/hwmon/vexpress.rst (renamed from Documentation/hwmon/vexpress)13
-rw-r--r--Documentation/hwmon/via686a.rst (renamed from Documentation/hwmon/via686a)30
-rw-r--r--Documentation/hwmon/vt1211.rst (renamed from Documentation/hwmon/vt1211)84
-rw-r--r--Documentation/hwmon/w83627ehf.rst (renamed from Documentation/hwmon/w83627ehf)162
-rw-r--r--Documentation/hwmon/w83627hf.rst (renamed from Documentation/hwmon/w83627hf)65
-rw-r--r--Documentation/hwmon/w83773g.rst (renamed from Documentation/hwmon/w83773g)12
-rw-r--r--Documentation/hwmon/w83781d.rst (renamed from Documentation/hwmon/w83781d)330
-rw-r--r--Documentation/hwmon/w83791d.rst (renamed from Documentation/hwmon/w83791d)123
-rw-r--r--Documentation/hwmon/w83792d.rst (renamed from Documentation/hwmon/w83792d)112
-rw-r--r--Documentation/hwmon/w83793106
-rw-r--r--Documentation/hwmon/w83793.rst113
-rw-r--r--Documentation/hwmon/w83795127
-rw-r--r--Documentation/hwmon/w83795.rst142
-rw-r--r--Documentation/hwmon/w83l785ts.rst (renamed from Documentation/hwmon/w83l785ts)9
-rw-r--r--Documentation/hwmon/w83l786ng.rst (renamed from Documentation/hwmon/w83l786ng)42
-rw-r--r--Documentation/hwmon/wm831x.rst (renamed from Documentation/hwmon/wm831x)9
-rw-r--r--Documentation/hwmon/wm8350.rst (renamed from Documentation/hwmon/wm8350)10
-rw-r--r--Documentation/hwmon/xgene-hwmon.rst (renamed from Documentation/hwmon/xgene-hwmon)24
-rw-r--r--Documentation/hwmon/zl6100.rst (renamed from Documentation/hwmon/zl6100)71
-rw-r--r--Documentation/i2c/busses/i2c-i8011
-rw-r--r--Documentation/index.rst12
-rw-r--r--Documentation/infiniband/user_verbs.txt4
-rw-r--r--Documentation/input/devices/xpad.rst2
-rw-r--r--Documentation/kbuild/kbuild.txt15
-rw-r--r--Documentation/kbuild/makefiles.txt28
-rw-r--r--Documentation/kbuild/modules.txt2
-rw-r--r--Documentation/kprobes.txt6
-rw-r--r--Documentation/laptops/lg-laptop.rst4
-rw-r--r--Documentation/locking/lockdep-design.txt4
-rw-r--r--Documentation/lzo.txt8
-rw-r--r--Documentation/media/dvb-drivers/dvb-usb.rst2
-rw-r--r--Documentation/media/kapi/dtv-core.rst2
-rw-r--r--Documentation/media/kapi/dtv-frontend.rst2
-rw-r--r--Documentation/media/kapi/mc-core.rst2
-rw-r--r--Documentation/media/kapi/v4l2-device.rst2
-rw-r--r--Documentation/media/kapi/v4l2-intro.rst2
-rw-r--r--Documentation/media/kapi/v4l2-subdev.rst4
-rw-r--r--Documentation/media/lirc.h.rst.exceptions3
-rw-r--r--Documentation/media/uapi/dvb/audio-set-bypass-mode.rst2
-rw-r--r--Documentation/media/uapi/dvb/ca-set-descr.rst2
-rw-r--r--Documentation/media/uapi/dvb/dmx-qbuf.rst2
-rw-r--r--Documentation/media/uapi/dvb/dvbproperty.rst2
-rw-r--r--Documentation/media/uapi/dvb/video_types.rst2
-rw-r--r--Documentation/media/uapi/fdl-appendix.rst2
-rw-r--r--Documentation/media/uapi/mediactl/media-types.rst2
-rw-r--r--Documentation/media/uapi/mediactl/request-api.rst4
-rw-r--r--Documentation/media/uapi/rc/rc-tables.rst8
-rw-r--r--Documentation/media/uapi/v4l/buffer.rst11
-rw-r--r--Documentation/media/uapi/v4l/common.rst11
-rw-r--r--Documentation/media/uapi/v4l/control.rst2
-rw-r--r--Documentation/media/uapi/v4l/dev-effect.rst28
-rw-r--r--Documentation/media/uapi/v4l/dev-mem2mem.rst (renamed from Documentation/media/uapi/v4l/dev-codec.rst)41
-rw-r--r--Documentation/media/uapi/v4l/dev-teletext.rst41
-rw-r--r--Documentation/media/uapi/v4l/devices.rst4
-rw-r--r--Documentation/media/uapi/v4l/ext-ctrls-camera.rst508
-rw-r--r--Documentation/media/uapi/v4l/ext-ctrls-codec.rst2451
-rw-r--r--Documentation/media/uapi/v4l/ext-ctrls-detect.rst71
-rw-r--r--Documentation/media/uapi/v4l/ext-ctrls-dv.rst166
-rw-r--r--Documentation/media/uapi/v4l/ext-ctrls-flash.rst192
-rw-r--r--Documentation/media/uapi/v4l/ext-ctrls-fm-rx.rst95
-rw-r--r--Documentation/media/uapi/v4l/ext-ctrls-fm-tx.rst188
-rw-r--r--Documentation/media/uapi/v4l/ext-ctrls-image-process.rst63
-rw-r--r--Documentation/media/uapi/v4l/ext-ctrls-image-source.rst57
-rw-r--r--Documentation/media/uapi/v4l/ext-ctrls-jpeg.rst113
-rw-r--r--Documentation/media/uapi/v4l/ext-ctrls-rf-tuner.rst96
-rw-r--r--Documentation/media/uapi/v4l/extended-controls.rst3905
-rw-r--r--Documentation/media/uapi/v4l/meta-formats.rst2
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-compressed.rst2
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-meta-intel-ipu3.rst144
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-packed-yuv.rst170
-rw-r--r--Documentation/media/uapi/v4l/subdev-formats.rst6
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-parm.rst2
-rw-r--r--Documentation/media/uapi/v4l/vidioc-prepare-buf.rst5
-rw-r--r--Documentation/media/uapi/v4l/vidioc-qbuf.rst2
-rw-r--r--Documentation/media/v4l-drivers/bttv.rst4
-rw-r--r--Documentation/media/v4l-drivers/imx.rst107
-rw-r--r--Documentation/media/v4l-drivers/imx7.rst162
-rw-r--r--Documentation/media/v4l-drivers/index.rst1
-rw-r--r--Documentation/media/v4l-drivers/ipu3.rst151
-rw-r--r--Documentation/media/v4l-drivers/pxa_camera.rst2
-rw-r--r--Documentation/media/v4l-drivers/qcom_camss.rst2
-rw-r--r--Documentation/memory-barriers.txt249
-rw-r--r--Documentation/misc-devices/ibmvmc.rst1
-rw-r--r--Documentation/misc-devices/index.rst17
-rw-r--r--Documentation/networking/bpf_flow_dissector.rst126
-rw-r--r--Documentation/networking/checksum-offloads.rst143
-rw-r--r--Documentation/networking/checksum-offloads.txt122
-rw-r--r--Documentation/networking/decnet.txt2
-rw-r--r--Documentation/networking/index.rst4
-rw-r--r--Documentation/networking/ip-sysctl.txt3
-rw-r--r--Documentation/networking/msg_zerocopy.rst2
-rw-r--r--Documentation/networking/netdev-FAQ.rst13
-rw-r--r--Documentation/networking/nf_flowtable.txt8
-rw-r--r--Documentation/networking/rxrpc.txt16
-rw-r--r--Documentation/networking/scaling.rst (renamed from Documentation/networking/scaling.txt)131
-rw-r--r--Documentation/networking/segmentation-offloads.rst (renamed from Documentation/networking/segmentation-offloads.txt)48
-rw-r--r--Documentation/networking/snmp_counter.rst12
-rw-r--r--Documentation/process/coding-style.rst97
-rw-r--r--Documentation/process/howto.rst59
-rw-r--r--Documentation/process/kernel-docs.rst2
-rw-r--r--Documentation/process/license-rules.rst66
-rw-r--r--Documentation/process/stable-api-nonsense.rst15
-rw-r--r--Documentation/process/stable-kernel-rules.rst9
-rw-r--r--Documentation/process/submitting-patches.rst6
-rw-r--r--Documentation/robust-futexes.txt3
-rw-r--r--Documentation/scsi/osd.txt197
-rw-r--r--Documentation/scsi/ufs.txt11
-rw-r--r--Documentation/security/LSM.rst5
-rw-r--r--Documentation/security/SCTP.rst (renamed from Documentation/security/LSM-sctp.rst)180
-rw-r--r--Documentation/security/SELinux-sctp.rst158
-rw-r--r--Documentation/security/index.rst3
-rw-r--r--Documentation/spi/spi-summary6
-rw-r--r--Documentation/static-keys.txt2
-rw-r--r--Documentation/sysctl/kernel.txt50
-rw-r--r--Documentation/sysctl/vm.txt18
-rwxr-xr-xDocumentation/target/tcm_mod_builder.py8
-rw-r--r--Documentation/thermal/sysfs-api.txt2
-rw-r--r--Documentation/timers/highres.txt2
-rw-r--r--Documentation/trace/ftrace.rst89
-rw-r--r--Documentation/trace/histogram.rst316
-rw-r--r--Documentation/trace/uprobetracer.rst7
-rw-r--r--Documentation/translations/it_IT/doc-guide/sphinx.rst2
-rw-r--r--Documentation/translations/it_IT/process/applying-patches.rst12
-rw-r--r--Documentation/translations/it_IT/process/changes.rst487
-rw-r--r--Documentation/translations/it_IT/process/coding-style.rst103
-rw-r--r--Documentation/translations/it_IT/process/howto.rst13
-rw-r--r--Documentation/translations/it_IT/process/stable-api-nonsense.rst202
-rw-r--r--Documentation/translations/it_IT/process/submit-checklist.rst127
-rw-r--r--Documentation/translations/it_IT/process/submitting-drivers.rst8
-rw-r--r--Documentation/translations/it_IT/process/submitting-patches.rst862
-rw-r--r--Documentation/translations/ja_JP/howto.rst12
-rw-r--r--Documentation/translations/ko_KR/howto.rst56
-rw-r--r--Documentation/translations/ko_KR/memory-barriers.txt49
-rw-r--r--Documentation/translations/zh_CN/HOWTO9
-rw-r--r--Documentation/translations/zh_CN/coding-style.rst57
-rw-r--r--Documentation/virtual/kvm/api.txt105
-rw-r--r--Documentation/virtual/kvm/halt-polling.txt37
-rw-r--r--Documentation/virtual/kvm/mmu.txt52
-rw-r--r--Documentation/virtual/kvm/s390-diag.txt3
-rw-r--r--Documentation/vm/index.rst2
-rw-r--r--Documentation/vm/slub.rst4
-rw-r--r--Documentation/watchdog/mlx-wdt.txt52
-rw-r--r--Documentation/x86/kernel-stacks13
-rw-r--r--Documentation/x86/topology.txt2
-rw-r--r--Documentation/x86/x86_64/mm.txt6
-rw-r--r--Kbuild29
-rw-r--r--LICENSES/exceptions/GCC-exception-2.018
-rw-r--r--MAINTAINERS320
-rw-r--r--Makefile252
-rw-r--r--arch/Kconfig21
-rw-r--r--arch/alpha/Kconfig8
-rw-r--r--arch/alpha/include/asm/Kbuild2
-rw-r--r--arch/alpha/include/asm/io.h2
-rw-r--r--arch/alpha/include/asm/rwsem.h211
-rw-r--r--arch/alpha/include/asm/tlb.h6
-rw-r--r--arch/alpha/include/uapi/asm/Kbuild2
-rw-r--r--arch/alpha/include/uapi/asm/kvm_para.h2
-rw-r--r--arch/alpha/include/uapi/asm/socket.h2
-rw-r--r--arch/alpha/kernel/core_cia.c5
-rw-r--r--arch/alpha/kernel/core_marvel.c6
-rw-r--r--arch/alpha/kernel/pci-noop.c13
-rw-r--r--arch/alpha/kernel/pci.c11
-rw-r--r--arch/alpha/kernel/pci_iommu.c16
-rw-r--r--arch/alpha/kernel/setup.c2
-rw-r--r--arch/alpha/kernel/syscalls/syscall.tbl4
-rw-r--r--arch/arc/Kconfig27
-rw-r--r--arch/arc/Makefile6
-rw-r--r--arch/arc/boot/dts/abilis_tb100.dtsi58
-rw-r--r--arch/arc/boot/dts/abilis_tb100_dvk.dts14
-rw-r--r--arch/arc/boot/dts/abilis_tb101.dtsi58
-rw-r--r--arch/arc/boot/dts/abilis_tb101_dvk.dts14
-rw-r--r--arch/arc/boot/dts/abilis_tb10x.dtsi60
-rw-r--r--arch/arc/boot/dts/axc001.dtsi6
-rw-r--r--arch/arc/boot/dts/axc003.dtsi16
-rw-r--r--arch/arc/boot/dts/axc003_idu.dtsi16
-rw-r--r--arch/arc/boot/dts/axs10x_mb.dtsi22
-rw-r--r--arch/arc/boot/dts/hsdk.dts46
-rw-r--r--arch/arc/boot/dts/vdk_axc003.dtsi4
-rw-r--r--arch/arc/boot/dts/vdk_axc003_idu.dtsi4
-rw-r--r--arch/arc/boot/dts/vdk_axs10x_mb.dtsi18
-rw-r--r--arch/arc/configs/hsdk_defconfig1
-rw-r--r--arch/arc/include/asm/Kbuild3
-rw-r--r--arch/arc/include/asm/arcregs.h12
-rw-r--r--arch/arc/include/asm/dma-mapping.h13
-rw-r--r--arch/arc/include/asm/irqflags-arcv2.h8
-rw-r--r--arch/arc/include/asm/perf_event.h2
-rw-r--r--arch/arc/include/asm/spinlock.h49
-rw-r--r--arch/arc/include/asm/syscall.h7
-rw-r--r--arch/arc/include/asm/tlb.h32
-rw-r--r--arch/arc/include/uapi/asm/Kbuild3
-rw-r--r--arch/arc/kernel/head.S6
-rw-r--r--arch/arc/kernel/intc-arcv2.c2
-rw-r--r--arch/arc/kernel/setup.c211
-rw-r--r--arch/arc/kernel/troubleshoot.c5
-rw-r--r--arch/arc/kernel/unwind.c3
-rw-r--r--arch/arc/lib/Makefile8
-rw-r--r--arch/arc/lib/memcpy-archs-unaligned.S47
-rw-r--r--arch/arc/lib/memset-archs.S4
-rw-r--r--arch/arc/mm/cache.c31
-rw-r--r--arch/arc/mm/highmem.c4
-rw-r--r--arch/arc/plat-eznps/Kconfig12
-rw-r--r--arch/arm/Kconfig15
-rw-r--r--arch/arm/Kconfig-nommu2
-rw-r--r--arch/arm/Kconfig.debug6
-rw-r--r--arch/arm/Makefile2
-rw-r--r--arch/arm/boot/bootp/Makefile2
-rw-r--r--arch/arm/boot/bootp/init.S2
-rw-r--r--arch/arm/boot/compressed/Makefile2
-rw-r--r--arch/arm/boot/compressed/head.S16
-rw-r--r--arch/arm/boot/compressed/ll_char_wr.S4
-rw-r--r--arch/arm/boot/dts/am335x-evm.dts26
-rw-r--r--arch/arm/boot/dts/am335x-evmsk.dts26
-rw-r--r--arch/arm/boot/dts/am33xx-l4.dtsi4
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts2
-rw-r--r--arch/arm/boot/dts/imx28-cfa10036.dts3
-rw-r--r--arch/arm/boot/dts/imx6dl-yapp4-common.dtsi6
-rw-r--r--arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi1
-rw-r--r--arch/arm/boot/dts/imx6ull-pinfunc-snvs.h2
-rw-r--r--arch/arm/boot/dts/imx7d.dtsi9
-rw-r--r--arch/arm/boot/dts/rk3288-tinker.dtsi3
-rw-r--r--arch/arm/boot/dts/rk3288-veyron.dtsi2
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi20
-rw-r--r--arch/arm/boot/dts/sama5d2-pinfunc.h2
-rw-r--r--arch/arm/boot/dts/stm32h743-pinctrl.dtsi15
-rw-r--r--arch/arm/boot/dts/stm32h743.dtsi13
-rw-r--r--arch/arm/boot/dts/stm32h743i-disco.dts17
-rw-r--r--arch/arm/boot/dts/stm32h743i-eval.dts17
-rw-r--r--arch/arm/boot/dts/tegra20.dtsi15
-rw-r--r--arch/arm/common/mcpm_entry.c2
-rw-r--r--arch/arm/configs/imx_v4_v5_defconfig3
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig2
-rw-r--r--arch/arm/crypto/aes-neonbs-glue.c2
-rw-r--r--arch/arm/crypto/chacha-neon-glue.c5
-rw-r--r--arch/arm/crypto/crc32-ce-glue.c5
-rw-r--r--arch/arm/crypto/crct10dif-ce-glue.c3
-rw-r--r--arch/arm/crypto/ghash-ce-glue.c10
-rw-r--r--arch/arm/crypto/nhpoly1305-neon-glue.c3
-rw-r--r--arch/arm/crypto/sha1-ce-glue.c5
-rw-r--r--arch/arm/crypto/sha1_neon_glue.c5
-rw-r--r--arch/arm/crypto/sha2-ce-glue.c5
-rw-r--r--arch/arm/crypto/sha256_neon_glue.c5
-rw-r--r--arch/arm/crypto/sha512-neon-glue.c5
-rw-r--r--arch/arm/include/asm/Kbuild3
-rw-r--r--arch/arm/include/asm/arch_gicv3.h37
-rw-r--r--arch/arm/include/asm/arch_timer.h18
-rw-r--r--arch/arm/include/asm/assembler.h12
-rw-r--r--arch/arm/include/asm/barrier.h2
-rw-r--r--arch/arm/include/asm/cp15.h2
-rw-r--r--arch/arm/include/asm/dma-mapping.h9
-rw-r--r--arch/arm/include/asm/hardware/entry-macro-iomd.S10
-rw-r--r--arch/arm/include/asm/io.h2
-rw-r--r--arch/arm/include/asm/kvm_emulate.h8
-rw-r--r--arch/arm/include/asm/kvm_host.h53
-rw-r--r--arch/arm/include/asm/kvm_hyp.h4
-rw-r--r--arch/arm/include/asm/kvm_mmu.h20
-rw-r--r--arch/arm/include/asm/pgtable.h3
-rw-r--r--arch/arm/include/asm/processor.h6
-rw-r--r--arch/arm/include/asm/smp.h1
-rw-r--r--arch/arm/include/asm/smp_twd.h16
-rw-r--r--arch/arm/include/asm/spinlock.h3
-rw-r--r--arch/arm/include/asm/stage2_pgtable.h6
-rw-r--r--arch/arm/include/asm/suspend.h1
-rw-r--r--arch/arm/include/asm/syscall.h47
-rw-r--r--arch/arm/include/asm/tlb.h255
-rw-r--r--arch/arm/include/asm/uaccess.h3
-rw-r--r--arch/arm/include/asm/v7m.h2
-rw-r--r--arch/arm/include/asm/vfpmacros.h8
-rw-r--r--arch/arm/include/debug/tegra.S2
-rw-r--r--arch/arm/include/uapi/asm/Kbuild2
-rw-r--r--arch/arm/include/uapi/asm/kvm_para.h2
-rw-r--r--arch/arm/kernel/debug.S2
-rw-r--r--arch/arm/kernel/entry-armv.S12
-rw-r--r--arch/arm/kernel/entry-common.S2
-rw-r--r--arch/arm/kernel/entry-header.S11
-rw-r--r--arch/arm/kernel/entry-v7m.S4
-rw-r--r--arch/arm/kernel/head-nommu.S6
-rw-r--r--arch/arm/kernel/hyp-stub.S4
-rw-r--r--arch/arm/kernel/machine_kexec.c5
-rw-r--r--arch/arm/kernel/patch.c6
-rw-r--r--arch/arm/kernel/setup.c6
-rw-r--r--arch/arm/kernel/signal.c3
-rw-r--r--arch/arm/kernel/sleep.S12
-rw-r--r--arch/arm/kernel/smp.c10
-rw-r--r--arch/arm/kernel/smp_twd.c66
-rw-r--r--arch/arm/kernel/stacktrace.c6
-rw-r--r--arch/arm/kernel/unwind.c14
-rw-r--r--arch/arm/kvm/Makefile5
-rw-r--r--arch/arm/kvm/coproc.c23
-rw-r--r--arch/arm/kvm/hyp/cp15-sr.c1
-rw-r--r--arch/arm/kvm/hyp/hyp-entry.S2
-rw-r--r--arch/arm/kvm/hyp/switch.c2
-rw-r--r--arch/arm/kvm/hyp/tlb.c4
-rw-r--r--arch/arm/kvm/interrupts.S4
-rw-r--r--arch/arm/lib/Makefile2
-rw-r--r--arch/arm/lib/bitops.h8
-rw-r--r--arch/arm/lib/clear_user.S2
-rw-r--r--arch/arm/lib/copy_from_user.S2
-rw-r--r--arch/arm/lib/copy_page.S4
-rw-r--r--arch/arm/lib/copy_template.S6
-rw-r--r--arch/arm/lib/copy_to_user.S2
-rw-r--r--arch/arm/lib/csumpartial.S20
-rw-r--r--arch/arm/lib/csumpartialcopygeneric.S4
-rw-r--r--arch/arm/lib/csumpartialcopyuser.S2
-rw-r--r--arch/arm/lib/div64.S4
-rw-r--r--arch/arm/lib/floppydma.S10
-rw-r--r--arch/arm/lib/io-readsb.S20
-rw-r--r--arch/arm/lib/io-readsl.S2
-rw-r--r--arch/arm/lib/io-readsw-armv3.S6
-rw-r--r--arch/arm/lib/io-readsw-armv4.S12
-rw-r--r--arch/arm/lib/io-writesb.S20
-rw-r--r--arch/arm/lib/io-writesl.S2
-rw-r--r--arch/arm/lib/io-writesw-armv3.S2
-rw-r--r--arch/arm/lib/io-writesw-armv4.S6
-rw-r--r--arch/arm/lib/lib1funcs.S4
-rw-r--r--arch/arm/lib/memcpy.S4
-rw-r--r--arch/arm/lib/memmove.S24
-rw-r--r--arch/arm/lib/memset.S42
-rw-r--r--arch/arm/lib/xor-neon.c2
-rw-r--r--arch/arm/mach-actions/platsmp.c15
-rw-r--r--arch/arm/mach-at91/pm.c6
-rw-r--r--arch/arm/mach-cns3xxx/core.c2
-rw-r--r--arch/arm/mach-ep93xx/edb93xx.c13
-rw-r--r--arch/arm/mach-ep93xx/simone.c11
-rw-r--r--arch/arm/mach-ep93xx/ts72xx.c25
-rw-r--r--arch/arm/mach-ep93xx/vision_ep9307.c15
-rw-r--r--arch/arm/mach-exynos/headsmp.S2
-rw-r--r--arch/arm/mach-exynos/platsmp.c31
-rw-r--r--arch/arm/mach-imx/cpuidle-imx6q.c27
-rw-r--r--arch/arm/mach-imx/mach-imx27_visstrim_m10.c12
-rw-r--r--arch/arm/mach-imx/mach-imx51.c1
-rw-r--r--arch/arm/mach-imx/mach-mx31moboard.c3
-rw-r--r--arch/arm/mach-iop13xx/setup.c8
-rw-r--r--arch/arm/mach-iop13xx/tpmi.c10
-rw-r--r--arch/arm/mach-ks8695/include/mach/entry-macro.S2
-rw-r--r--arch/arm/mach-milbeaut/platsmp.c4
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c2
-rw-r--r--arch/arm/mach-omap2/display.c4
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c16
-rw-r--r--arch/arm/mach-omap2/prm_common.c4
-rw-r--r--arch/arm/mach-oxnas/Makefile1
-rw-r--r--arch/arm/mach-oxnas/hotplug.c109
-rw-r--r--arch/arm/mach-oxnas/platsmp.c4
-rw-r--r--arch/arm/mach-prima2/common.h2
-rw-r--r--arch/arm/mach-prima2/headsmp.S2
-rw-r--r--arch/arm/mach-prima2/hotplug.c3
-rw-r--r--arch/arm/mach-prima2/platsmp.c17
-rw-r--r--arch/arm/mach-qcom/platsmp.c26
-rw-r--r--arch/arm/mach-spear/generic.h2
-rw-r--r--arch/arm/mach-spear/headsmp.S2
-rw-r--r--arch/arm/mach-spear/hotplug.c4
-rw-r--r--arch/arm/mach-spear/platsmp.c27
-rw-r--r--arch/arm/mach-tegra/reset-handler.S2
-rw-r--r--arch/arm/mm/cache-v6.S8
-rw-r--r--arch/arm/mm/copypage-v4mc.c3
-rw-r--r--arch/arm/mm/copypage-v4wb.c3
-rw-r--r--arch/arm/mm/copypage-v4wt.c3
-rw-r--r--arch/arm/mm/dma-mapping.c4
-rw-r--r--arch/arm/mm/idmap.c4
-rw-r--r--arch/arm/mm/init.c75
-rw-r--r--arch/arm/mm/mmu.c14
-rw-r--r--arch/arm/mm/pmsa-v8.c4
-rw-r--r--arch/arm/mm/proc-v7m.S7
-rw-r--r--arch/arm/plat-iop/adma.c6
-rw-r--r--arch/arm/plat-orion/common.c4
-rw-r--r--arch/arm/tools/syscall.tbl4
-rw-r--r--arch/arm/vdso/vgettimeofday.c5
-rw-r--r--arch/arm64/Kconfig171
-rw-r--r--arch/arm64/Kconfig.platforms3
-rw-r--r--arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi28
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi34
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h629
-rw-r--r--arch/arm64/boot/dts/mediatek/mt2712-pinfunc.h2
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774c0.dtsi7
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77990.dtsi7
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-rock64.dts3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328.dtsi58
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts1
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-glue.c9
-rw-r--r--arch/arm64/crypto/aes-ce-glue.c5
-rw-r--r--arch/arm64/crypto/aes-glue.c6
-rw-r--r--arch/arm64/crypto/aes-neonbs-glue.c6
-rw-r--r--arch/arm64/crypto/chacha-neon-glue.c7
-rw-r--r--arch/arm64/crypto/crct10dif-ce-glue.c9
-rw-r--r--arch/arm64/crypto/ghash-ce-glue.c25
-rw-r--r--arch/arm64/crypto/nhpoly1305-neon-glue.c5
-rw-r--r--arch/arm64/crypto/sha1-ce-glue.c7
-rw-r--r--arch/arm64/crypto/sha2-ce-glue.c7
-rw-r--r--arch/arm64/crypto/sha256-glue.c9
-rw-r--r--arch/arm64/crypto/sha3-ce-glue.c5
-rw-r--r--arch/arm64/crypto/sha512-ce-glue.c7
-rw-r--r--arch/arm64/crypto/sm3-ce-glue.c7
-rw-r--r--arch/arm64/crypto/sm4-ce-glue.c5
-rw-r--r--arch/arm64/include/asm/Kbuild2
-rw-r--r--arch/arm64/include/asm/alternative.h4
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h32
-rw-r--r--arch/arm64/include/asm/arch_timer.h119
-rw-r--r--arch/arm64/include/asm/asm-uaccess.h2
-rw-r--r--arch/arm64/include/asm/assembler.h44
-rw-r--r--arch/arm64/include/asm/barrier.h24
-rw-r--r--arch/arm64/include/asm/brk-imm.h5
-rw-r--r--arch/arm64/include/asm/cpucaps.h4
-rw-r--r--arch/arm64/include/asm/cpufeature.h35
-rw-r--r--arch/arm64/include/asm/cputype.h15
-rw-r--r--arch/arm64/include/asm/daifflags.h60
-rw-r--r--arch/arm64/include/asm/debug-monitors.h25
-rw-r--r--arch/arm64/include/asm/dma-mapping.h9
-rw-r--r--arch/arm64/include/asm/efi.h11
-rw-r--r--arch/arm64/include/asm/elf.h6
-rw-r--r--arch/arm64/include/asm/esr.h7
-rw-r--r--arch/arm64/include/asm/futex.h61
-rw-r--r--arch/arm64/include/asm/hardirq.h31
-rw-r--r--arch/arm64/include/asm/hwcap.h60
-rw-r--r--arch/arm64/include/asm/io.h3
-rw-r--r--arch/arm64/include/asm/irqflags.h100
-rw-r--r--arch/arm64/include/asm/kprobes.h2
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h12
-rw-r--r--arch/arm64/include/asm/kvm_host.h64
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h7
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h24
-rw-r--r--arch/arm64/include/asm/memblock.h21
-rw-r--r--arch/arm64/include/asm/memory.h7
-rw-r--r--arch/arm64/include/asm/mmu.h1
-rw-r--r--arch/arm64/include/asm/module.h5
-rw-r--r--arch/arm64/include/asm/pgalloc.h12
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h1
-rw-r--r--arch/arm64/include/asm/pgtable.h5
-rw-r--r--arch/arm64/include/asm/pointer_auth.h2
-rw-r--r--arch/arm64/include/asm/processor.h11
-rw-r--r--arch/arm64/include/asm/ptdump.h9
-rw-r--r--arch/arm64/include/asm/ptrace.h48
-rw-r--r--arch/arm64/include/asm/sdei.h2
-rw-r--r--arch/arm64/include/asm/signal32.h2
-rw-r--r--arch/arm64/include/asm/stage2_pgtable.h4
-rw-r--r--arch/arm64/include/asm/syscall.h46
-rw-r--r--arch/arm64/include/asm/sysreg.h66
-rw-r--r--arch/arm64/include/asm/system_misc.h1
-rw-r--r--arch/arm64/include/asm/thread_info.h1
-rw-r--r--arch/arm64/include/asm/tlb.h6
-rw-r--r--arch/arm64/include/asm/uaccess.h36
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/include/asm/unistd32.h8
-rw-r--r--arch/arm64/include/asm/vdso_datapage.h1
-rw-r--r--arch/arm64/include/asm/vmap_stack.h2
-rw-r--r--arch/arm64/include/uapi/asm/Kbuild1
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h13
-rw-r--r--arch/arm64/include/uapi/asm/ptrace.h13
-rw-r--r--arch/arm64/kernel/Makefile11
-rw-r--r--arch/arm64/kernel/alternative.c60
-rw-r--r--arch/arm64/kernel/asm-offsets.c23
-rw-r--r--arch/arm64/kernel/cpu_errata.c256
-rw-r--r--arch/arm64/kernel/cpu_ops.c1
-rw-r--r--arch/arm64/kernel/cpufeature.c235
-rw-r--r--arch/arm64/kernel/cpuinfo.c9
-rw-r--r--arch/arm64/kernel/debug-monitors.c115
-rw-r--r--arch/arm64/kernel/entry.S79
-rw-r--r--arch/arm64/kernel/fpsimd.c4
-rw-r--r--arch/arm64/kernel/ftrace.c12
-rw-r--r--arch/arm64/kernel/head.S12
-rw-r--r--arch/arm64/kernel/irq.c3
-rw-r--r--arch/arm64/kernel/kgdb.c28
-rw-r--r--arch/arm64/kernel/kuser32.S66
-rw-r--r--arch/arm64/kernel/perf_event.c6
-rw-r--r--arch/arm64/kernel/probes/kprobes.c72
-rw-r--r--arch/arm64/kernel/probes/uprobes.c19
-rw-r--r--arch/arm64/kernel/process.c51
-rw-r--r--arch/arm64/kernel/ptrace.c147
-rw-r--r--arch/arm64/kernel/sdei.c6
-rw-r--r--arch/arm64/kernel/setup.c9
-rw-r--r--arch/arm64/kernel/signal32.c3
-rw-r--r--arch/arm64/kernel/sigreturn32.S46
-rw-r--r--arch/arm64/kernel/smp.c33
-rw-r--r--arch/arm64/kernel/stacktrace.c5
-rw-r--r--arch/arm64/kernel/sys.c2
-rw-r--r--arch/arm64/kernel/traps.c56
-rw-r--r--arch/arm64/kernel/vdso.c139
-rw-r--r--arch/arm64/kernel/vdso/Makefile19
-rw-r--r--arch/arm64/kernel/vdso/gettimeofday.S22
-rw-r--r--arch/arm64/kvm/Makefile4
-rw-r--r--arch/arm64/kvm/debug.c2
-rw-r--r--arch/arm64/kvm/hyp.S3
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S12
-rw-r--r--arch/arm64/kvm/hyp/switch.c16
-rw-r--r--arch/arm64/kvm/hyp/sysreg-sr.c1
-rw-r--r--arch/arm64/kvm/reset.c6
-rw-r--r--arch/arm64/kvm/sys_regs.c168
-rw-r--r--arch/arm64/lib/Makefile2
-rw-r--r--arch/arm64/mm/dump.c4
-rw-r--r--arch/arm64/mm/fault.c21
-rw-r--r--arch/arm64/mm/init.c27
-rw-r--r--arch/arm64/mm/kasan_init.c10
-rw-r--r--arch/arm64/mm/mmu.c54
-rw-r--r--arch/arm64/mm/numa.c29
-rw-r--r--arch/arm64/mm/proc.S46
-rw-r--r--arch/arm64/mm/ptdump_debugfs.c7
-rw-r--r--arch/c6x/Kconfig4
-rw-r--r--arch/c6x/include/asm/Kbuild2
-rw-r--r--arch/c6x/include/asm/syscall.h79
-rw-r--r--arch/c6x/include/asm/tlb.h2
-rw-r--r--arch/c6x/include/uapi/asm/Kbuild3
-rw-r--r--arch/c6x/mm/dma-coherent.c4
-rw-r--r--arch/c6x/mm/init.c4
-rw-r--r--arch/csky/Kconfig5
-rw-r--r--arch/csky/include/asm/Kbuild1
-rw-r--r--arch/csky/include/asm/syscall.h26
-rw-r--r--arch/csky/include/uapi/asm/Kbuild2
-rw-r--r--arch/csky/mm/highmem.c5
-rw-r--r--arch/h8300/Kconfig3
-rw-r--r--arch/h8300/Makefile2
-rw-r--r--arch/h8300/include/asm/Kbuild3
-rw-r--r--arch/h8300/include/asm/syscall.h34
-rw-r--r--arch/h8300/include/asm/tlb.h2
-rw-r--r--arch/h8300/include/uapi/asm/Kbuild3
-rw-r--r--arch/h8300/mm/init.c4
-rw-r--r--arch/hexagon/Kconfig6
-rw-r--r--arch/hexagon/include/asm/Kbuild3
-rw-r--r--arch/hexagon/include/asm/io.h2
-rw-r--r--arch/hexagon/include/asm/syscall.h4
-rw-r--r--arch/hexagon/include/asm/tlb.h12
-rw-r--r--arch/hexagon/include/uapi/asm/Kbuild2
-rw-r--r--arch/hexagon/include/uapi/asm/kvm_para.h2
-rw-r--r--arch/ia64/Kconfig4
-rw-r--r--arch/ia64/hp/sim/simscsi.c7
-rw-r--r--arch/ia64/include/asm/Kbuild1
-rw-r--r--arch/ia64/include/asm/io.h17
-rw-r--r--arch/ia64/include/asm/machvec.h13
-rw-r--r--arch/ia64/include/asm/machvec_sn2.h2
-rw-r--r--arch/ia64/include/asm/mmiowb.h25
-rw-r--r--arch/ia64/include/asm/rwsem.h172
-rw-r--r--arch/ia64/include/asm/spinlock.h2
-rw-r--r--arch/ia64/include/asm/syscall.h13
-rw-r--r--arch/ia64/include/asm/tlb.h259
-rw-r--r--arch/ia64/include/asm/tlbflush.h25
-rw-r--r--arch/ia64/include/uapi/asm/Kbuild4
-rw-r--r--arch/ia64/kernel/mca.c25
-rw-r--r--arch/ia64/kernel/ptrace.c7
-rw-r--r--arch/ia64/kernel/setup.c4
-rw-r--r--arch/ia64/kernel/syscalls/syscall.tbl4
-rw-r--r--arch/ia64/mm/contig.c8
-rw-r--r--arch/ia64/mm/discontig.c4
-rw-r--r--arch/ia64/mm/init.c38
-rw-r--r--arch/ia64/mm/tlb.c29
-rw-r--r--arch/ia64/sn/kernel/Makefile2
-rw-r--r--arch/ia64/sn/kernel/io_common.c3
-rw-r--r--arch/ia64/sn/kernel/setup.c12
-rw-r--r--arch/ia64/sn/kernel/sn2/Makefile2
-rw-r--r--arch/ia64/sn/kernel/sn2/sn2_smp.c7
-rw-r--r--arch/ia64/sn/pci/Makefile2
-rw-r--r--arch/ia64/sn/pci/pcibr/Makefile2
-rw-r--r--arch/m68k/Kconfig9
-rw-r--r--arch/m68k/amiga/cia.c9
-rw-r--r--arch/m68k/amiga/config.c49
-rw-r--r--arch/m68k/apollo/config.c7
-rw-r--r--arch/m68k/atari/ataints.c4
-rw-r--r--arch/m68k/atari/config.c2
-rw-r--r--arch/m68k/atari/stram.c4
-rw-r--r--arch/m68k/atari/time.c70
-rw-r--r--arch/m68k/bvme6000/config.c77
-rw-r--r--arch/m68k/coldfire/device.c81
-rw-r--r--arch/m68k/coldfire/m5441x.c4
-rw-r--r--arch/m68k/configs/amiga_defconfig14
-rw-r--r--arch/m68k/configs/apollo_defconfig14
-rw-r--r--arch/m68k/configs/atari_defconfig14
-rw-r--r--arch/m68k/configs/bvme6000_defconfig14
-rw-r--r--arch/m68k/configs/hp300_defconfig14
-rw-r--r--arch/m68k/configs/mac_defconfig14
-rw-r--r--arch/m68k/configs/multi_defconfig14
-rw-r--r--arch/m68k/configs/mvme147_defconfig14
-rw-r--r--arch/m68k/configs/mvme16x_defconfig14
-rw-r--r--arch/m68k/configs/q40_defconfig14
-rw-r--r--arch/m68k/configs/sun3_defconfig14
-rw-r--r--arch/m68k/configs/sun3x_defconfig14
-rw-r--r--arch/m68k/hp300/config.c1
-rw-r--r--arch/m68k/hp300/time.c73
-rw-r--r--arch/m68k/hp300/time.h1
-rw-r--r--arch/m68k/include/asm/Kbuild2
-rw-r--r--arch/m68k/include/asm/io_mm.h2
-rw-r--r--arch/m68k/include/asm/m5441xsim.h15
-rw-r--r--arch/m68k/include/asm/mvme147hw.h2
-rw-r--r--arch/m68k/include/asm/tlb.h14
-rw-r--r--arch/m68k/include/uapi/asm/Kbuild3
-rw-r--r--arch/m68k/kernel/syscalls/syscall.tbl4
-rw-r--r--arch/m68k/mac/config.c3
-rw-r--r--arch/m68k/mac/via.c146
-rw-r--r--arch/m68k/mm/init.c3
-rw-r--r--arch/m68k/mm/mcfmmu.c7
-rw-r--r--arch/m68k/mm/motorola.c9
-rw-r--r--arch/m68k/mm/sun3mmu.c6
-rw-r--r--arch/m68k/mvme147/config.c73
-rw-r--r--arch/m68k/mvme16x/config.c97
-rw-r--r--arch/m68k/q40/config.c9
-rw-r--r--arch/m68k/q40/q40ints.c19
-rw-r--r--arch/m68k/sun3/config.c2
-rw-r--r--arch/m68k/sun3/intersil.c7
-rw-r--r--arch/m68k/sun3/sun3dvma.c3
-rw-r--r--arch/m68k/sun3/sun3ints.c3
-rw-r--r--arch/m68k/sun3x/config.c1
-rw-r--r--arch/m68k/sun3x/time.c21
-rw-r--r--arch/m68k/sun3x/time.h1
-rw-r--r--arch/microblaze/Kconfig7
-rw-r--r--arch/microblaze/include/asm/Kbuild2
-rw-r--r--arch/microblaze/include/asm/syscall.h8
-rw-r--r--arch/microblaze/include/asm/tlb.h9
-rw-r--r--arch/microblaze/include/uapi/asm/Kbuild3
-rw-r--r--arch/microblaze/kernel/setup.c13
-rw-r--r--arch/microblaze/kernel/syscalls/syscall.tbl4
-rw-r--r--arch/microblaze/mm/init.c10
-rw-r--r--arch/mips/Kconfig9
-rw-r--r--arch/mips/ath79/setup.c6
-rw-r--r--arch/mips/bcm47xx/workarounds.c1
-rw-r--r--arch/mips/boot/Makefile2
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c3
-rw-r--r--arch/mips/configs/generic/board-ocelot.config8
-rw-r--r--arch/mips/configs/generic_defconfig2
-rw-r--r--arch/mips/include/asm/dma-mapping.h10
-rw-r--r--arch/mips/include/asm/io.h3
-rw-r--r--arch/mips/include/asm/jump_label.h8
-rw-r--r--arch/mips/include/asm/kvm_host.h2
-rw-r--r--arch/mips/include/asm/mmiowb.h11
-rw-r--r--arch/mips/include/asm/spinlock.h15
-rw-r--r--arch/mips/include/asm/syscall.h3
-rw-r--r--arch/mips/include/asm/tlb.h17
-rw-r--r--arch/mips/include/uapi/asm/Kbuild2
-rw-r--r--arch/mips/include/uapi/asm/posix_types.h7
-rw-r--r--arch/mips/include/uapi/asm/socket.h2
-rw-r--r--arch/mips/kernel/kgdb.c3
-rw-r--r--arch/mips/kernel/ptrace.c2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/setup.c3
-rw-r--r--arch/mips/kernel/syscalls/syscall_n32.tbl4
-rw-r--r--arch/mips/kernel/syscalls/syscall_n64.tbl4
-rw-r--r--arch/mips/kernel/syscalls/syscall_o32.tbl4
-rw-r--r--arch/mips/kernel/traps.c5
-rw-r--r--arch/mips/kernel/vmlinux.lds.S12
-rw-r--r--arch/mips/loongson64/lemote-2f/irq.c2
-rw-r--r--arch/mips/mm/dma-noncoherent.c8
-rw-r--r--arch/mips/mm/init.c5
-rw-r--r--arch/mips/net/ebpf_jit.c5
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c3
-rw-r--r--arch/nds32/Kconfig3
-rw-r--r--arch/nds32/configs/defconfig2
-rw-r--r--arch/nds32/include/asm/Kbuild1
-rw-r--r--arch/nds32/include/asm/io.h2
-rw-r--r--arch/nds32/include/asm/syscall.h62
-rw-r--r--arch/nds32/include/asm/tlb.h16
-rw-r--r--arch/nds32/include/asm/tlbflush.h1
-rw-r--r--arch/nds32/include/uapi/asm/Kbuild2
-rw-r--r--arch/nds32/mm/init.c12
-rw-r--r--arch/nios2/Kconfig5
-rw-r--r--arch/nios2/include/asm/Kbuild2
-rw-r--r--arch/nios2/include/asm/pgtable.h1
-rw-r--r--arch/nios2/include/asm/syscall.h84
-rw-r--r--arch/nios2/include/asm/tlb.h14
-rw-r--r--arch/nios2/include/asm/tlbflush.h19
-rw-r--r--arch/nios2/include/uapi/asm/Kbuild3
-rw-r--r--arch/nios2/kernel/nios2_ksyms.c12
-rw-r--r--arch/nios2/mm/cacheflush.c7
-rw-r--r--arch/nios2/mm/fault.c2
-rw-r--r--arch/nios2/mm/tlb.c192
-rw-r--r--arch/nios2/platform/Kconfig.platform9
-rw-r--r--arch/openrisc/Kconfig7
-rw-r--r--arch/openrisc/include/asm/Kbuild4
-rw-r--r--arch/openrisc/include/asm/syscall.h12
-rw-r--r--arch/openrisc/include/asm/tlb.h8
-rw-r--r--arch/openrisc/include/uapi/asm/Kbuild3
-rw-r--r--arch/openrisc/mm/init.c5
-rw-r--r--arch/openrisc/mm/ioremap.c8
-rw-r--r--arch/parisc/Kconfig6
-rw-r--r--arch/parisc/include/asm/Kbuild4
-rw-r--r--arch/parisc/include/asm/io.h2
-rw-r--r--arch/parisc/include/asm/ptrace.h5
-rw-r--r--arch/parisc/include/asm/syscall.h30
-rw-r--r--arch/parisc/include/asm/tlb.h18
-rw-r--r--arch/parisc/include/uapi/asm/Kbuild3
-rw-r--r--arch/parisc/include/uapi/asm/socket.h2
-rw-r--r--arch/parisc/kernel/process.c6
-rw-r--r--arch/parisc/kernel/setup.c3
-rw-r--r--arch/parisc/kernel/stacktrace.c5
-rw-r--r--arch/parisc/kernel/syscalls/syscall.tbl4
-rw-r--r--arch/powerpc/Kconfig15
-rw-r--r--arch/powerpc/boot/Makefile2
-rw-r--r--arch/powerpc/configs/skiroot_defconfig13
-rw-r--r--arch/powerpc/crypto/crc32c-vpmsum_glue.c4
-rw-r--r--arch/powerpc/crypto/crct10dif-vpmsum_glue.c4
-rw-r--r--arch/powerpc/include/asm/Kbuild3
-rw-r--r--arch/powerpc/include/asm/book3s/64/hugetlb.h8
-rw-r--r--arch/powerpc/include/asm/io.h33
-rw-r--r--arch/powerpc/include/asm/kvm_host.h5
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h14
-rw-r--r--arch/powerpc/include/asm/mmiowb.h18
-rw-r--r--arch/powerpc/include/asm/mmu.h2
-rw-r--r--arch/powerpc/include/asm/paca.h6
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h2
-rw-r--r--arch/powerpc/include/asm/spinlock.h17
-rw-r--r--arch/powerpc/include/asm/syscall.h15
-rw-r--r--arch/powerpc/include/asm/tlb.h18
-rw-r--r--arch/powerpc/include/asm/vdso_datapage.h8
-rw-r--r--arch/powerpc/include/uapi/asm/Kbuild2
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h2
-rw-r--r--arch/powerpc/kernel/cpu_setup_6xx.S3
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c8
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S12
-rw-r--r--arch/powerpc/kernel/head_32.S14
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S7
-rw-r--r--arch/powerpc/kernel/kvm.c7
-rw-r--r--arch/powerpc/kernel/paca.c6
-rw-r--r--arch/powerpc/kernel/pci_32.c3
-rw-r--r--arch/powerpc/kernel/prom.c5
-rw-r--r--arch/powerpc/kernel/rtas.c6
-rw-r--r--arch/powerpc/kernel/security.c29
-rw-r--r--arch/powerpc/kernel/setup-common.c3
-rw-r--r--arch/powerpc/kernel/setup_64.c6
-rw-r--r--arch/powerpc/kernel/syscalls/syscall.tbl4
-rw-r--r--arch/powerpc/kernel/traps.c1
-rw-r--r--arch/powerpc/kernel/vdso32/gettimeofday.S2
-rw-r--r--arch/powerpc/kernel/vdso64/gettimeofday.S4
-rw-r--r--arch/powerpc/kvm/book3s.c13
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c14
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c18
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c15
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c14
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c18
-rw-r--r--arch/powerpc/kvm/book3s_hv.c37
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c14
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c7
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S10
-rw-r--r--arch/powerpc/kvm/book3s_rtas.c8
-rw-r--r--arch/powerpc/kvm/powerpc.c22
-rw-r--r--arch/powerpc/lib/alloc.c3
-rw-r--r--arch/powerpc/lib/memcmp_64.S17
-rw-r--r--arch/powerpc/mm/Makefile3
-rw-r--r--arch/powerpc/mm/hash_low_32.S8
-rw-r--r--arch/powerpc/mm/hash_utils_64.c11
-rw-r--r--arch/powerpc/mm/mmu_context_iommu.c97
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c9
-rw-r--r--arch/powerpc/mm/numa.c4
-rw-r--r--arch/powerpc/mm/pgtable-book3e.c12
-rw-r--r--arch/powerpc/mm/pgtable-book3s64.c3
-rw-r--r--arch/powerpc/mm/pgtable-radix.c9
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c21
-rw-r--r--arch/powerpc/net/bpf_jit.h17
-rw-r--r--arch/powerpc/net/bpf_jit32.h4
-rw-r--r--arch/powerpc/net/bpf_jit64.h20
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c12
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype2
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c3
-rw-r--r--arch/powerpc/platforms/powermac/nvram.c3
-rw-r--r--arch/powerpc/platforms/powernv/opal-call.c1
-rw-r--r--arch/powerpc/platforms/powernv/opal.c3
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c8
-rw-r--r--arch/powerpc/platforms/ps3/setup.c3
-rw-r--r--arch/powerpc/platforms/pseries/papr_scm.c1
-rw-r--r--arch/powerpc/platforms/pseries/pseries_energy.c27
-rw-r--r--arch/powerpc/platforms/pseries/ras.c1
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c3
-rw-r--r--arch/powerpc/sysdev/msi_bitmap.c3
-rw-r--r--arch/powerpc/xmon/xmon.c5
-rw-r--r--arch/riscv/Kconfig5
-rw-r--r--arch/riscv/configs/rv32_defconfig84
-rw-r--r--arch/riscv/include/asm/fixmap.h2
-rw-r--r--arch/riscv/include/asm/io.h51
-rw-r--r--arch/riscv/include/asm/mmiowb.h14
-rw-r--r--arch/riscv/include/asm/syscall.h24
-rw-r--r--arch/riscv/include/asm/tlb.h1
-rw-r--r--arch/riscv/include/asm/uaccess.h2
-rw-r--r--arch/riscv/include/uapi/asm/Kbuild1
-rw-r--r--arch/riscv/kernel/Makefile3
-rw-r--r--arch/riscv/kernel/module.c2
-rw-r--r--arch/riscv/kernel/setup.c8
-rw-r--r--arch/riscv/kernel/stacktrace.c2
-rw-r--r--arch/riscv/mm/Makefile6
-rw-r--r--arch/riscv/mm/init.c36
-rw-r--r--arch/s390/Kconfig64
-rw-r--r--arch/s390/Makefile10
-rw-r--r--arch/s390/boot/Makefile38
-rw-r--r--arch/s390/boot/als.c2
-rw-r--r--arch/s390/boot/boot.h5
-rw-r--r--arch/s390/boot/compressed/Makefile4
-rw-r--r--arch/s390/boot/compressed/decompressor.h5
-rw-r--r--arch/s390/boot/compressed/vmlinux.lds.S22
-rw-r--r--arch/s390/boot/head.S48
-rw-r--r--arch/s390/boot/ipl_parm.c54
-rw-r--r--arch/s390/boot/ipl_report.c165
-rw-r--r--arch/s390/boot/kaslr.c144
-rw-r--r--arch/s390/boot/machine_kexec_reloc.c2
-rw-r--r--arch/s390/boot/mem_detect.c2
-rw-r--r--arch/s390/boot/startup.c121
-rw-r--r--arch/s390/boot/text_dma.S184
-rw-r--r--arch/s390/boot/uv.c24
-rw-r--r--arch/s390/configs/debug_defconfig3
-rw-r--r--arch/s390/configs/performance_defconfig3
-rw-r--r--arch/s390/crypto/crc32be-vx.S1
-rw-r--r--arch/s390/crypto/crc32le-vx.S6
-rw-r--r--arch/s390/crypto/des_s390.c21
-rw-r--r--arch/s390/crypto/prng.c135
-rw-r--r--arch/s390/defconfig1
-rw-r--r--arch/s390/hypfs/hypfs_diag0c.c18
-rw-r--r--arch/s390/include/asm/Kbuild3
-rw-r--r--arch/s390/include/asm/airq.h12
-rw-r--r--arch/s390/include/asm/ap.h11
-rw-r--r--arch/s390/include/asm/bitops.h12
-rw-r--r--arch/s390/include/asm/boot_data.h11
-rw-r--r--arch/s390/include/asm/bug.h24
-rw-r--r--arch/s390/include/asm/cio.h1
-rw-r--r--arch/s390/include/asm/diag.h13
-rw-r--r--arch/s390/include/asm/ebcdic.h2
-rw-r--r--arch/s390/include/asm/elf.h15
-rw-r--r--arch/s390/include/asm/extable.h5
-rw-r--r--arch/s390/include/asm/ftrace.h7
-rw-r--r--arch/s390/include/asm/io.h17
-rw-r--r--arch/s390/include/asm/ipl.h132
-rw-r--r--arch/s390/include/asm/irq.h10
-rw-r--r--arch/s390/include/asm/isc.h1
-rw-r--r--arch/s390/include/asm/kexec.h26
-rw-r--r--arch/s390/include/asm/kvm_host.h39
-rw-r--r--arch/s390/include/asm/linkage.h7
-rw-r--r--arch/s390/include/asm/lowcore.h61
-rw-r--r--arch/s390/include/asm/nospec-insn.h10
-rw-r--r--arch/s390/include/asm/pci.h12
-rw-r--r--arch/s390/include/asm/pci_clp.h20
-rw-r--r--arch/s390/include/asm/pci_insn.h97
-rw-r--r--arch/s390/include/asm/pci_io.h49
-rw-r--r--arch/s390/include/asm/pgtable.h112
-rw-r--r--arch/s390/include/asm/processor.h82
-rw-r--r--arch/s390/include/asm/sclp.h3
-rw-r--r--arch/s390/include/asm/sections.h22
-rw-r--r--arch/s390/include/asm/setup.h21
-rw-r--r--arch/s390/include/asm/stacktrace.h114
-rw-r--r--arch/s390/include/asm/syscall.h37
-rw-r--r--arch/s390/include/asm/syscall_wrapper.h4
-rw-r--r--arch/s390/include/asm/tlb.h130
-rw-r--r--arch/s390/include/asm/uaccess.h2
-rw-r--r--arch/s390/include/asm/unwind.h101
-rw-r--r--arch/s390/include/asm/uv.h132
-rw-r--r--arch/s390/include/asm/vmlinux.lds.h13
-rw-r--r--arch/s390/include/uapi/asm/Kbuild2
-rw-r--r--arch/s390/include/uapi/asm/ipl.h154
-rw-r--r--arch/s390/kernel/Makefile7
-rw-r--r--arch/s390/kernel/asm-offsets.c1
-rw-r--r--arch/s390/kernel/base.S71
-rw-r--r--arch/s390/kernel/crash_dump.c3
-rw-r--r--arch/s390/kernel/diag.c67
-rw-r--r--arch/s390/kernel/dumpstack.c167
-rw-r--r--arch/s390/kernel/early.c9
-rw-r--r--arch/s390/kernel/early_nobss.c2
-rw-r--r--arch/s390/kernel/entry.S42
-rw-r--r--arch/s390/kernel/entry.h2
-rw-r--r--arch/s390/kernel/fpu.c2
-rw-r--r--arch/s390/kernel/ftrace.c9
-rw-r--r--arch/s390/kernel/head64.S26
-rw-r--r--arch/s390/kernel/ima_arch.c14
-rw-r--r--arch/s390/kernel/ipl.c370
-rw-r--r--arch/s390/kernel/ipl_vmparm.c8
-rw-r--r--arch/s390/kernel/irq.c48
-rw-r--r--arch/s390/kernel/kexec_elf.c63
-rw-r--r--arch/s390/kernel/kexec_image.c49
-rw-r--r--arch/s390/kernel/kprobes.c37
-rw-r--r--arch/s390/kernel/machine_kexec.c8
-rw-r--r--arch/s390/kernel/machine_kexec_file.c268
-rw-r--r--arch/s390/kernel/machine_kexec_reloc.c53
-rw-r--r--arch/s390/kernel/mcount.S12
-rw-r--r--arch/s390/kernel/nmi.c2
-rw-r--r--arch/s390/kernel/nospec-branch.c9
-rw-r--r--arch/s390/kernel/nospec-sysfs.c2
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c15
-rw-r--r--arch/s390/kernel/perf_cpum_cf_diag.c28
-rw-r--r--arch/s390/kernel/perf_cpum_cf_events.c107
-rw-r--r--arch/s390/kernel/perf_event.c16
-rw-r--r--arch/s390/kernel/pgm_check.S2
-rw-r--r--arch/s390/kernel/process.c1
-rw-r--r--arch/s390/kernel/processor.c3
-rw-r--r--arch/s390/kernel/reipl.S1
-rw-r--r--arch/s390/kernel/relocate_kernel.S4
-rw-r--r--arch/s390/kernel/setup.c87
-rw-r--r--arch/s390/kernel/smp.c15
-rw-r--r--arch/s390/kernel/stacktrace.c81
-rw-r--r--arch/s390/kernel/swsusp.S17
-rw-r--r--arch/s390/kernel/syscalls/syscall.tbl4
-rw-r--r--arch/s390/kernel/topology.c6
-rw-r--r--arch/s390/kernel/traps.c3
-rw-r--r--arch/s390/kernel/unwind_bc.c155
-rw-r--r--arch/s390/kernel/vdso.c10
-rw-r--r--arch/s390/kernel/vdso32/Makefile2
-rw-r--r--arch/s390/kernel/vdso64/Makefile2
-rw-r--r--arch/s390/kernel/vmlinux.lds.S19
-rw-r--r--arch/s390/kernel/vtime.c27
-rw-r--r--arch/s390/kvm/interrupt.c431
-rw-r--r--arch/s390/kvm/kvm-s390.c190
-rw-r--r--arch/s390/kvm/kvm-s390.h4
-rw-r--r--arch/s390/lib/mem.S1
-rw-r--r--arch/s390/mm/Makefile2
-rw-r--r--arch/s390/mm/fault.c14
-rw-r--r--arch/s390/mm/gup.c300
-rw-r--r--arch/s390/mm/init.c3
-rw-r--r--arch/s390/mm/maccess.c1
-rw-r--r--arch/s390/mm/pgalloc.c63
-rw-r--r--arch/s390/mm/pgtable.c2
-rw-r--r--arch/s390/mm/vmem.c2
-rw-r--r--arch/s390/net/bpf_jit_comp.c6
-rw-r--r--arch/s390/numa/mode_emu.c3
-rw-r--r--arch/s390/numa/numa.c6
-rw-r--r--arch/s390/oprofile/init.c22
-rw-r--r--arch/s390/pci/Makefile2
-rw-r--r--arch/s390/pci/pci.c366
-rw-r--r--arch/s390/pci/pci_clp.c25
-rw-r--r--arch/s390/pci/pci_insn.c169
-rw-r--r--arch/s390/pci/pci_irq.c486
-rw-r--r--arch/s390/purgatory/Makefile20
-rw-r--r--arch/s390/purgatory/kexec-purgatory.S14
-rw-r--r--arch/s390/purgatory/purgatory.lds.S54
-rw-r--r--arch/s390/scripts/Makefile.chkbss28
-rw-r--r--arch/s390/tools/opcodes.txt11
-rw-r--r--arch/sh/Kconfig8
-rw-r--r--arch/sh/boards/mach-ap325rxa/setup.c10
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c28
-rw-r--r--arch/sh/boards/mach-kfr2r09/setup.c10
-rw-r--r--arch/sh/boards/mach-migor/setup.c11
-rw-r--r--arch/sh/boards/mach-se/7724/setup.c16
-rw-r--r--arch/sh/boards/of-generic.c4
-rw-r--r--arch/sh/drivers/pci/fixups-dreamcast.c3
-rw-r--r--arch/sh/include/asm/Kbuild2
-rw-r--r--arch/sh/include/asm/io.h3
-rw-r--r--arch/sh/include/asm/mmiowb.h12
-rw-r--r--arch/sh/include/asm/pgalloc.h9
-rw-r--r--arch/sh/include/asm/spinlock-llsc.h2
-rw-r--r--arch/sh/include/asm/syscall_32.h47
-rw-r--r--arch/sh/include/asm/syscall_64.h8
-rw-r--r--arch/sh/include/asm/tlb.h132
-rw-r--r--arch/sh/include/uapi/asm/Kbuild2
-rw-r--r--arch/sh/kernel/machine_kexec.c3
-rw-r--r--arch/sh/kernel/stacktrace.c4
-rw-r--r--arch/sh/kernel/syscalls/syscall.tbl4
-rw-r--r--arch/sh/mm/init.c8
-rw-r--r--arch/sh/mm/numa.c4
-rw-r--r--arch/sparc/Kconfig9
-rw-r--r--arch/sparc/crypto/des_glue.c11
-rw-r--r--arch/sparc/include/asm/Kbuild3
-rw-r--r--arch/sparc/include/asm/io_64.h2
-rw-r--r--arch/sparc/include/asm/syscall.h11
-rw-r--r--arch/sparc/include/asm/tlb_32.h18
-rw-r--r--arch/sparc/include/uapi/asm/Kbuild2
-rw-r--r--arch/sparc/include/uapi/asm/kvm_para.h2
-rw-r--r--arch/sparc/include/uapi/asm/socket.h2
-rw-r--r--arch/sparc/kernel/iommu.c13
-rw-r--r--arch/sparc/kernel/kernel.h6
-rw-r--r--arch/sparc/kernel/pci.c46
-rw-r--r--arch/sparc/kernel/pci_sun4v.c34
-rw-r--r--arch/sparc/kernel/prom_32.c6
-rw-r--r--arch/sparc/kernel/setup_64.c6
-rw-r--r--arch/sparc/kernel/smp_64.c12
-rw-r--r--arch/sparc/kernel/syscalls/syscall.tbl4
-rw-r--r--arch/sparc/mm/init_32.c2
-rw-r--r--arch/sparc/mm/init_64.c11
-rw-r--r--arch/sparc/mm/srmmu.c18
-rw-r--r--arch/um/drivers/net_kern.c3
-rw-r--r--arch/um/drivers/ubd_kern.c6
-rw-r--r--arch/um/drivers/vector_kern.c3
-rw-r--r--arch/um/drivers/vector_user.c3
-rw-r--r--arch/um/include/asm/Kbuild1
-rw-r--r--arch/um/include/asm/syscall-generic.h78
-rw-r--r--arch/um/include/asm/tlb.h158
-rw-r--r--arch/um/kernel/initrd.c2
-rw-r--r--arch/um/kernel/mem.c16
-rw-r--r--arch/um/kernel/stacktrace.c2
-rw-r--r--arch/unicore32/Kconfig8
-rw-r--r--arch/unicore32/boot/compressed/Makefile5
-rw-r--r--arch/unicore32/boot/compressed/vmlinux.lds.S (renamed from arch/unicore32/boot/compressed/vmlinux.lds.in)0
-rw-r--r--arch/unicore32/include/asm/Kbuild2
-rw-r--r--arch/unicore32/include/asm/tlb.h7
-rw-r--r--arch/unicore32/include/uapi/asm/Kbuild3
-rw-r--r--arch/unicore32/kernel/setup.c4
-rw-r--r--arch/unicore32/kernel/stacktrace.c2
-rw-r--r--arch/unicore32/mm/mmu.c15
-rw-r--r--arch/x86/Kconfig55
-rw-r--r--arch/x86/Makefile10
-rw-r--r--arch/x86/boot/compressed/acpi.c2
-rw-r--r--arch/x86/boot/compressed/kaslr.c2
-rw-r--r--arch/x86/boot/compressed/misc.c2
-rw-r--r--arch/x86/boot/compressed/misc.h4
-rw-r--r--arch/x86/boot/string.c3
-rw-r--r--arch/x86/configs/i386_defconfig12
-rw-r--r--arch/x86/configs/x86_64_defconfig12
-rw-r--r--arch/x86/crypto/aegis128-aesni-glue.c157
-rw-r--r--arch/x86/crypto/aegis128l-aesni-glue.c157
-rw-r--r--arch/x86/crypto/aegis256-aesni-glue.c157
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c212
-rw-r--r--arch/x86/crypto/chacha_glue.c6
-rw-r--r--arch/x86/crypto/crc32-pclmul_glue.c5
-rw-r--r--arch/x86/crypto/crc32c-intel_glue.c7
-rw-r--r--arch/x86/crypto/crct10dif-pclmul_glue.c20
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_glue.c11
-rw-r--r--arch/x86/crypto/morus1280-avx2-glue.c12
-rw-r--r--arch/x86/crypto/morus1280-sse2-glue.c12
-rw-r--r--arch/x86/crypto/morus1280_glue.c85
-rw-r--r--arch/x86/crypto/morus640-sse2-glue.c12
-rw-r--r--arch/x86/crypto/morus640_glue.c85
-rw-r--r--arch/x86/crypto/nhpoly1305-avx2-glue.c5
-rw-r--r--arch/x86/crypto/nhpoly1305-sse2-glue.c5
-rw-r--r--arch/x86/crypto/poly1305-avx2-x86_64.S14
-rw-r--r--arch/x86/crypto/poly1305-sse2-x86_64.S22
-rw-r--r--arch/x86/crypto/poly1305_glue.c4
-rw-r--r--arch/x86/crypto/sha1_ssse3_glue.c7
-rw-r--r--arch/x86/crypto/sha256_ssse3_glue.c7
-rw-r--r--arch/x86/crypto/sha512_ssse3_glue.c10
-rw-r--r--arch/x86/entry/entry_32.S5
-rw-r--r--arch/x86/entry/entry_64.S19
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl1
-rw-r--r--arch/x86/entry/syscalls/syscall_64.tbl1
-rw-r--r--arch/x86/entry/vdso/Makefile2
-rw-r--r--arch/x86/entry/vdso/vclock_gettime.c4
-rw-r--r--arch/x86/entry/vdso/vdso2c.h13
-rw-r--r--arch/x86/events/amd/core.c286
-rw-r--r--arch/x86/events/core.c121
-rw-r--r--arch/x86/events/intel/core.c460
-rw-r--r--arch/x86/events/intel/cstate.c12
-rw-r--r--arch/x86/events/intel/ds.c505
-rw-r--r--arch/x86/events/intel/lbr.c35
-rw-r--r--arch/x86/events/intel/pt.c3
-rw-r--r--arch/x86/events/intel/rapl.c2
-rw-r--r--arch/x86/events/intel/uncore.c7
-rw-r--r--arch/x86/events/intel/uncore.h13
-rw-r--r--arch/x86/events/intel/uncore_snb.c95
-rw-r--r--arch/x86/events/msr.c1
-rw-r--r--arch/x86/events/perf_event.h153
-rw-r--r--arch/x86/hyperv/hv_apic.c5
-rw-r--r--arch/x86/hyperv/hv_init.c14
-rw-r--r--arch/x86/hyperv/hv_spinlock.c2
-rw-r--r--arch/x86/ia32/ia32_signal.c29
-rw-r--r--arch/x86/include/asm/Kbuild1
-rw-r--r--arch/x86/include/asm/alternative-asm.h11
-rw-r--r--arch/x86/include/asm/alternative.h10
-rw-r--r--arch/x86/include/asm/asm.h24
-rw-r--r--arch/x86/include/asm/bitops.h47
-rw-r--r--arch/x86/include/asm/cpu_device_id.h31
-rw-r--r--arch/x86/include/asm/cpu_entry_area.h69
-rw-r--r--arch/x86/include/asm/cpufeature.h16
-rw-r--r--arch/x86/include/asm/cpufeatures.h1
-rw-r--r--arch/x86/include/asm/debugreg.h2
-rw-r--r--arch/x86/include/asm/fixmap.h2
-rw-r--r--arch/x86/include/asm/fpu/internal.h7
-rw-r--r--arch/x86/include/asm/intel_ds.h2
-rw-r--r--arch/x86/include/asm/io.h2
-rw-r--r--arch/x86/include/asm/irq.h6
-rw-r--r--arch/x86/include/asm/irq_vectors.h4
-rw-r--r--arch/x86/include/asm/kvm_emulate.h4
-rw-r--r--arch/x86/include/asm/kvm_host.h68
-rw-r--r--arch/x86/include/asm/kvm_vcpu_regs.h25
-rw-r--r--arch/x86/include/asm/mce.h25
-rw-r--r--arch/x86/include/asm/mmu_context.h56
-rw-r--r--arch/x86/include/asm/msr-index.h7
-rw-r--r--arch/x86/include/asm/nospec-branch.h28
-rw-r--r--arch/x86/include/asm/page_32_types.h8
-rw-r--r--arch/x86/include/asm/page_64_types.h20
-rw-r--r--arch/x86/include/asm/perf_event.h57
-rw-r--r--arch/x86/include/asm/pgtable.h5
-rw-r--r--arch/x86/include/asm/processor-cyrix.h21
-rw-r--r--arch/x86/include/asm/processor.h43
-rw-r--r--arch/x86/include/asm/realmode.h6
-rw-r--r--arch/x86/include/asm/rwsem.h237
-rw-r--r--arch/x86/include/asm/set_memory.h3
-rw-r--r--arch/x86/include/asm/smap.h37
-rw-r--r--arch/x86/include/asm/smp.h2
-rw-r--r--arch/x86/include/asm/stackprotector.h6
-rw-r--r--arch/x86/include/asm/stacktrace.h15
-rw-r--r--arch/x86/include/asm/string_32.h104
-rw-r--r--arch/x86/include/asm/string_64.h15
-rw-r--r--arch/x86/include/asm/switch_to.h1
-rw-r--r--arch/x86/include/asm/sync_bitops.h31
-rw-r--r--arch/x86/include/asm/syscall.h142
-rw-r--r--arch/x86/include/asm/text-patching.h7
-rw-r--r--arch/x86/include/asm/tlb.h1
-rw-r--r--arch/x86/include/asm/tlbflush.h4
-rw-r--r--arch/x86/include/asm/uaccess.h15
-rw-r--r--arch/x86/include/asm/uaccess_64.h3
-rw-r--r--arch/x86/include/asm/unwind.h6
-rw-r--r--arch/x86/include/asm/xen/hypercall.h40
-rw-r--r--arch/x86/include/uapi/asm/Kbuild3
-rw-r--r--arch/x86/include/uapi/asm/kvm.h1
-rw-r--r--arch/x86/include/uapi/asm/perf_regs.h23
-rw-r--r--arch/x86/include/uapi/asm/vmx.h1
-rw-r--r--arch/x86/kernel/acpi/boot.c3
-rw-r--r--arch/x86/kernel/acpi/cstate.c12
-rw-r--r--arch/x86/kernel/alternative.c201
-rw-r--r--arch/x86/kernel/aperture_64.c20
-rw-r--r--arch/x86/kernel/apic/apic.c57
-rw-r--r--arch/x86/kernel/apic/apic_numachip.c2
-rw-r--r--arch/x86/kernel/apic/io_apic.c5
-rw-r--r--arch/x86/kernel/asm-offsets_64.c4
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/amd.c5
-rw-r--r--arch/x86/kernel/cpu/aperfmperf.c6
-rw-r--r--arch/x86/kernel/cpu/bugs.c17
-rw-r--r--arch/x86/kernel/cpu/common.c79
-rw-r--r--arch/x86/kernel/cpu/cpu.h1
-rw-r--r--arch/x86/kernel/cpu/cyrix.c14
-rw-r--r--arch/x86/kernel/cpu/hygon.c5
-rw-r--r--arch/x86/kernel/cpu/intel.c34
-rw-r--r--arch/x86/kernel/cpu/intel_epb.c216
-rw-r--r--arch/x86/kernel/cpu/mce/amd.c52
-rw-r--r--arch/x86/kernel/cpu/mce/core.c102
-rw-r--r--arch/x86/kernel/cpu/mce/genpool.c3
-rw-r--r--arch/x86/kernel/cpu/mce/inject.c16
-rw-r--r--arch/x86/kernel/cpu/mce/internal.h9
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c5
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c71
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c12
-rw-r--r--arch/x86/kernel/cpu/proc.c10
-rw-r--r--arch/x86/kernel/cpu/resctrl/ctrlmondata.c4
-rw-r--r--arch/x86/kernel/cpu/resctrl/internal.h16
-rw-r--r--arch/x86/kernel/cpu/resctrl/monitor.c3
-rw-r--r--arch/x86/kernel/cpu/resctrl/rdtgroup.c361
-rw-r--r--arch/x86/kernel/crash.c3
-rw-r--r--arch/x86/kernel/dumpstack_32.c8
-rw-r--r--arch/x86/kernel/dumpstack_64.c99
-rw-r--r--arch/x86/kernel/e820.c10
-rw-r--r--arch/x86/kernel/ftrace.c64
-rw-r--r--arch/x86/kernel/head_64.S2
-rw-r--r--arch/x86/kernel/hpet.c2
-rw-r--r--arch/x86/kernel/hw_breakpoint.c1
-rw-r--r--arch/x86/kernel/idt.c19
-rw-r--r--arch/x86/kernel/irq_32.c41
-rw-r--r--arch/x86/kernel/irq_64.c89
-rw-r--r--arch/x86/kernel/irqinit.c4
-rw-r--r--arch/x86/kernel/jump_label.c21
-rw-r--r--arch/x86/kernel/kexec-bzimage64.c14
-rw-r--r--arch/x86/kernel/kgdb.c25
-rw-r--r--arch/x86/kernel/kprobes/core.c68
-rw-r--r--arch/x86/kernel/kvm.c2
-rw-r--r--arch/x86/kernel/kvmclock.c20
-rw-r--r--arch/x86/kernel/ldt.c14
-rw-r--r--arch/x86/kernel/module.c2
-rw-r--r--arch/x86/kernel/mpparse.c4
-rw-r--r--arch/x86/kernel/nmi.c20
-rw-r--r--arch/x86/kernel/paravirt.c2
-rw-r--r--arch/x86/kernel/perf_regs.c27
-rw-r--r--arch/x86/kernel/process.c12
-rw-r--r--arch/x86/kernel/process_32.c7
-rw-r--r--arch/x86/kernel/process_64.c1
-rw-r--r--arch/x86/kernel/reboot.c23
-rw-r--r--arch/x86/kernel/setup.c38
-rw-r--r--arch/x86/kernel/setup_percpu.c15
-rw-r--r--arch/x86/kernel/signal.c34
-rw-r--r--arch/x86/kernel/smpboot.c21
-rw-r--r--arch/x86/kernel/stacktrace.c128
-rw-r--r--arch/x86/kernel/topology.c2
-rw-r--r--arch/x86/kernel/tsc.c34
-rw-r--r--arch/x86/kernel/unwind_frame.c25
-rw-r--r--arch/x86/kernel/unwind_orc.c17
-rw-r--r--arch/x86/kernel/vm86_32.c2
-rw-r--r--arch/x86/kernel/vmlinux.lds.S15
-rw-r--r--arch/x86/kvm/cpuid.c2
-rw-r--r--arch/x86/kvm/emulate.c191
-rw-r--r--arch/x86/kvm/hyperv.c22
-rw-r--r--arch/x86/kvm/i8254.c2
-rw-r--r--arch/x86/kvm/i8259.c2
-rw-r--r--arch/x86/kvm/ioapic.c2
-rw-r--r--arch/x86/kvm/lapic.c84
-rw-r--r--arch/x86/kvm/lapic.h4
-rw-r--r--arch/x86/kvm/mmu.c528
-rw-r--r--arch/x86/kvm/mmu.h3
-rw-r--r--arch/x86/kvm/mmutrace.h44
-rw-r--r--arch/x86/kvm/page_track.c2
-rw-r--r--arch/x86/kvm/pmu.c4
-rw-r--r--arch/x86/kvm/svm.c229
-rw-r--r--arch/x86/kvm/trace.h4
-rw-r--r--arch/x86/kvm/vmx/nested.c259
-rw-r--r--arch/x86/kvm/vmx/vmcs.h1
-rw-r--r--arch/x86/kvm/vmx/vmenter.S179
-rw-r--r--arch/x86/kvm/vmx/vmx.c249
-rw-r--r--arch/x86/kvm/vmx/vmx.h23
-rw-r--r--arch/x86/kvm/x86.c189
-rw-r--r--arch/x86/kvm/x86.h11
-rw-r--r--arch/x86/lib/Makefile13
-rw-r--r--arch/x86/lib/copy_user_64.S48
-rw-r--r--arch/x86/lib/csum-partial_64.c2
-rw-r--r--arch/x86/lib/delay.c2
-rw-r--r--arch/x86/lib/error-inject.c1
-rw-r--r--arch/x86/lib/memcpy_64.S3
-rw-r--r--arch/x86/lib/rwsem.S156
-rw-r--r--arch/x86/lib/usercopy_64.c20
-rw-r--r--arch/x86/mm/cpu_entry_area.c64
-rw-r--r--arch/x86/mm/dump_pagetables.c7
-rw-r--r--arch/x86/mm/fault.c58
-rw-r--r--arch/x86/mm/init.c43
-rw-r--r--arch/x86/mm/ioremap.c2
-rw-r--r--arch/x86/mm/kasan_init_64.c14
-rw-r--r--arch/x86/mm/kaslr.c96
-rw-r--r--arch/x86/mm/mmap.c2
-rw-r--r--arch/x86/mm/numa.c12
-rw-r--r--arch/x86/mm/pageattr.c20
-rw-r--r--arch/x86/mm/pgtable.c14
-rw-r--r--arch/x86/mm/pti.c10
-rw-r--r--arch/x86/mm/tlb.c116
-rw-r--r--arch/x86/pci/fixup.c16
-rw-r--r--arch/x86/platform/efi/quirks.c2
-rw-r--r--arch/x86/platform/olpc/olpc_dt.c3
-rw-r--r--arch/x86/platform/uv/tlb_uv.c7
-rw-r--r--arch/x86/power/hibernate.c1
-rw-r--r--arch/x86/realmode/init.c11
-rw-r--r--arch/x86/realmode/rm/Makefile3
-rw-r--r--arch/x86/tools/relocs.c76
-rw-r--r--arch/x86/um/Kconfig6
-rw-r--r--arch/x86/um/Makefile4
-rw-r--r--arch/x86/um/vdso/Makefile2
-rw-r--r--arch/x86/xen/mmu_pv.c15
-rw-r--r--arch/x86/xen/p2m.c11
-rw-r--r--arch/x86/xen/setup.c13
-rw-r--r--arch/x86/xen/smp_pv.c4
-rw-r--r--arch/x86/xen/xen-head.S10
-rw-r--r--arch/xtensa/Kconfig4
-rw-r--r--arch/xtensa/include/asm/Kbuild4
-rw-r--r--arch/xtensa/include/asm/processor.h21
-rw-r--r--arch/xtensa/include/asm/syscall.h33
-rw-r--r--arch/xtensa/include/asm/tlb.h26
-rw-r--r--arch/xtensa/include/uapi/asm/Kbuild4
-rw-r--r--arch/xtensa/kernel/entry.S6
-rw-r--r--arch/xtensa/kernel/stacktrace.c6
-rw-r--r--arch/xtensa/kernel/syscalls/syscall.tbl4
-rw-r--r--arch/xtensa/mm/kasan_init.c10
-rw-r--r--arch/xtensa/mm/mmu.c3
-rw-r--r--block/bfq-iosched.c25
-rw-r--r--block/bfq-iosched.h2
-rw-r--r--block/bfq-wf2q.c19
-rw-r--r--block/bio.c48
-rw-r--r--block/blk-cgroup.c9
-rw-r--r--block/blk-core.c7
-rw-r--r--block/blk-flush.c4
-rw-r--r--block/blk-iolatency.c1
-rw-r--r--block/blk-merge.c15
-rw-r--r--block/blk-mq-debugfs.c1
-rw-r--r--block/blk-mq-sched.c8
-rw-r--r--block/blk-mq.c166
-rw-r--r--block/blk-mq.h17
-rw-r--r--block/blk-sysfs.c12
-rw-r--r--block/bsg-lib.c44
-rw-r--r--block/bsg.c190
-rw-r--r--certs/system_keyring.c23
-rw-r--r--crypto/842.c2
-rw-r--r--crypto/Kconfig85
-rw-r--r--crypto/Makefile10
-rw-r--r--crypto/adiantum.c3
-rw-r--r--crypto/aegis128.c2
-rw-r--r--crypto/aegis128l.c2
-rw-r--r--crypto/aegis256.c2
-rw-r--r--crypto/aes_generic.c10
-rw-r--r--crypto/akcipher.c14
-rw-r--r--crypto/algboss.c8
-rw-r--r--crypto/ansi_cprng.c2
-rw-r--r--crypto/anubis.c2
-rw-r--r--crypto/arc4.c2
-rw-r--r--crypto/asymmetric_keys/asym_tpm.c43
-rw-r--r--crypto/asymmetric_keys/pkcs7_verify.c1
-rw-r--r--crypto/asymmetric_keys/public_key.c105
-rw-r--r--crypto/asymmetric_keys/verify_pefile.c1
-rw-r--r--crypto/asymmetric_keys/x509.asn12
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c57
-rw-r--r--crypto/asymmetric_keys/x509_public_key.c1
-rw-r--r--crypto/authenc.c2
-rw-r--r--crypto/authencesn.c2
-rw-r--r--crypto/blowfish_generic.c2
-rw-r--r--crypto/camellia_generic.c2
-rw-r--r--crypto/cast5_generic.c2
-rw-r--r--crypto/cast6_generic.c2
-rw-r--r--crypto/cbc.c2
-rw-r--r--crypto/ccm.c46
-rw-r--r--crypto/cfb.c2
-rw-r--r--crypto/chacha20poly1305.c6
-rw-r--r--crypto/chacha_generic.c12
-rw-r--r--crypto/cmac.c2
-rw-r--r--crypto/crc32_generic.c2
-rw-r--r--crypto/crc32c_generic.c2
-rw-r--r--crypto/crct10dif_generic.c13
-rw-r--r--crypto/cryptd.c252
-rw-r--r--crypto/crypto_null.c2
-rw-r--r--crypto/ctr.c2
-rw-r--r--crypto/cts.c20
-rw-r--r--crypto/deflate.c2
-rw-r--r--crypto/des_generic.c13
-rw-r--r--crypto/dh.c2
-rw-r--r--crypto/drbg.c3
-rw-r--r--crypto/ecb.c2
-rw-r--r--crypto/ecc.c417
-rw-r--r--crypto/ecc.h153
-rw-r--r--crypto/ecc_curve_defs.h15
-rw-r--r--crypto/ecdh.c2
-rw-r--r--crypto/echainiv.c2
-rw-r--r--crypto/ecrdsa.c296
-rw-r--r--crypto/ecrdsa_defs.h225
-rw-r--r--crypto/ecrdsa_params.asn14
-rw-r--r--crypto/ecrdsa_pub_key.asn11
-rw-r--r--crypto/fcrypt.c2
-rw-r--r--crypto/fips.c2
-rw-r--r--crypto/gcm.c36
-rw-r--r--crypto/ghash-generic.c2
-rw-r--r--crypto/hmac.c13
-rw-r--r--crypto/jitterentropy-kcapi.c2
-rw-r--r--crypto/keywrap.c2
-rw-r--r--crypto/khazad.c2
-rw-r--r--crypto/lrw.c12
-rw-r--r--crypto/lz4.c2
-rw-r--r--crypto/lz4hc.c2
-rw-r--r--crypto/lzo-rle.c2
-rw-r--r--crypto/lzo.c2
-rw-r--r--crypto/md4.c2
-rw-r--r--crypto/md5.c2
-rw-r--r--crypto/michael_mic.c2
-rw-r--r--crypto/morus1280.c2
-rw-r--r--crypto/morus640.c2
-rw-r--r--crypto/nhpoly1305.c2
-rw-r--r--crypto/ofb.c2
-rw-r--r--crypto/pcbc.c2
-rw-r--r--crypto/pcrypt.c2
-rw-r--r--crypto/poly1305_generic.c2
-rw-r--r--crypto/rmd128.c2
-rw-r--r--crypto/rmd160.c2
-rw-r--r--crypto/rmd256.c2
-rw-r--r--crypto/rmd320.c2
-rw-r--r--crypto/rsa-pkcs1pad.c33
-rw-r--r--crypto/rsa.c111
-rw-r--r--crypto/salsa20_generic.c13
-rw-r--r--crypto/scompress.c129
-rw-r--r--crypto/seed.c2
-rw-r--r--crypto/seqiv.c2
-rw-r--r--crypto/serpent_generic.c2
-rw-r--r--crypto/sha1_generic.c2
-rw-r--r--crypto/sha256_generic.c2
-rw-r--r--crypto/sha3_generic.c2
-rw-r--r--crypto/sha512_generic.c2
-rw-r--r--crypto/shash.c7
-rw-r--r--crypto/simd.c273
-rw-r--r--crypto/skcipher.c9
-rw-r--r--crypto/sm3_generic.c2
-rw-r--r--crypto/sm4_generic.c2
-rw-r--r--crypto/streebog_generic.c27
-rw-r--r--crypto/tcrypt.c2
-rw-r--r--crypto/tea.c2
-rw-r--r--crypto/testmgr.c1242
-rw-r--r--crypto/testmgr.h225
-rw-r--r--crypto/tgr192.c2
-rw-r--r--crypto/twofish_generic.c2
-rw-r--r--crypto/vmac.c2
-rw-r--r--crypto/wp512.c2
-rw-r--r--crypto/xcbc.c2
-rw-r--r--crypto/xts.c8
-rw-r--r--crypto/zstd.c2
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/acpi_configfs.c29
-rw-r--r--drivers/acpi/acpi_dbg.c2
-rw-r--r--drivers/acpi/acpi_lpat.c2
-rw-r--r--drivers/acpi/acpi_lpss.c6
-rw-r--r--drivers/acpi/acpica/aclocal.h4
-rw-r--r--drivers/acpi/acpica/dbexec.c2
-rw-r--r--drivers/acpi/acpica/dbnames.c2
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/evgpeinit.c4
-rw-r--r--drivers/acpi/acpica/exnames.c6
-rw-r--r--drivers/acpi/acpica/nsaccess.c2
-rw-r--r--drivers/acpi/acpica/nsalloc.c4
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/nsinit.c4
-rw-r--r--drivers/acpi/acpica/nsnames.c8
-rw-r--r--drivers/acpi/acpica/nsobject.c4
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nsrepair.c2
-rw-r--r--drivers/acpi/acpica/nsrepair2.c4
-rw-r--r--drivers/acpi/acpica/nsutils.c14
-rw-r--r--drivers/acpi/acpica/nsxfname.c4
-rw-r--r--drivers/acpi/acpica/psargs.c8
-rw-r--r--drivers/acpi/acpica/rsxface.c8
-rw-r--r--drivers/acpi/acpica/tbdata.c3
-rw-r--r--drivers/acpi/acpica/tbfind.c20
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/tbprint.c10
-rw-r--r--drivers/acpi/acpica/tbutils.c6
-rw-r--r--drivers/acpi/acpica/tbxface.c4
-rw-r--r--drivers/acpi/acpica/tbxfload.c15
-rw-r--r--drivers/acpi/acpica/utascii.c2
-rw-r--r--drivers/acpi/acpica/utdecode.c4
-rw-r--r--drivers/acpi/acpica/utmisc.c8
-rw-r--r--drivers/acpi/acpica/utpredef.c4
-rw-r--r--drivers/acpi/acpica/utstring.c6
-rw-r--r--drivers/acpi/arm64/iort.c150
-rw-r--r--drivers/acpi/bus.c3
-rw-r--r--drivers/acpi/button.c5
-rw-r--r--drivers/acpi/cppc_acpi.c43
-rw-r--r--drivers/acpi/device_pm.c3
-rw-r--r--drivers/acpi/device_sysfs.c6
-rw-r--r--drivers/acpi/dptf/dptf_power.c3
-rw-r--r--drivers/acpi/event.c4
-rw-r--r--drivers/acpi/nfit/core.c205
-rw-r--r--drivers/acpi/nfit/intel.c10
-rw-r--r--drivers/acpi/nfit/nfit.h17
-rw-r--r--drivers/acpi/numa.c1
-rw-r--r--drivers/acpi/power.c4
-rw-r--r--drivers/acpi/pptt.c51
-rw-r--r--drivers/acpi/processor_perflib.c2
-rw-r--r--drivers/acpi/property.c9
-rw-r--r--drivers/acpi/scan.c22
-rw-r--r--drivers/acpi/spcr.c2
-rw-r--r--drivers/acpi/sysfs.c35
-rw-r--r--drivers/acpi/tables.c22
-rw-r--r--drivers/acpi/utils.c17
-rw-r--r--drivers/acpi/video_detect.c10
-rw-r--r--drivers/amba/bus.c45
-rw-r--r--drivers/android/binder.c3
-rw-r--r--drivers/android/binder_alloc.c18
-rw-r--r--drivers/ata/libata-scsi.c5
-rw-r--r--drivers/ata/libata-zpodd.c34
-rw-r--r--drivers/atm/firestream.c2
-rw-r--r--drivers/auxdisplay/Kconfig38
-rw-r--r--drivers/auxdisplay/Makefile2
-rw-r--r--drivers/auxdisplay/charlcd.c55
-rw-r--r--drivers/auxdisplay/hd44780.c4
-rw-r--r--drivers/auxdisplay/panel.c4
-rw-r--r--drivers/base/Kconfig77
-rw-r--r--drivers/base/memory.c3
-rw-r--r--drivers/base/power/domain.c149
-rw-r--r--drivers/base/power/domain_governor.c68
-rw-r--r--drivers/base/power/main.c91
-rw-r--r--drivers/base/power/power.h1
-rw-r--r--drivers/base/power/qos.c2
-rw-r--r--drivers/base/power/runtime.c16
-rw-r--r--drivers/base/power/sysfs.c12
-rw-r--r--drivers/base/power/trace.c2
-rw-r--r--drivers/base/power/wakeup.c34
-rw-r--r--drivers/base/property.c75
-rw-r--r--drivers/base/regmap/internal.h5
-rw-r--r--drivers/base/regmap/regcache-flat.c18
-rw-r--r--drivers/base/regmap/regcache-lzo.c18
-rw-r--r--drivers/base/regmap/regcache-rbtree.c18
-rw-r--r--drivers/base/regmap/regcache.c18
-rw-r--r--drivers/base/regmap/regmap-ac97.c22
-rw-r--r--drivers/base/regmap/regmap-debugfs.c48
-rw-r--r--drivers/base/regmap/regmap-i2c.c18
-rw-r--r--drivers/base/regmap/regmap-irq.c21
-rw-r--r--drivers/base/regmap/regmap-mmio.c22
-rw-r--r--drivers/base/regmap/regmap-spi.c18
-rw-r--r--drivers/base/regmap/regmap-spmi.c29
-rw-r--r--drivers/base/regmap/regmap-w1.c16
-rw-r--r--drivers/base/regmap/regmap.c27
-rw-r--r--drivers/base/swnode.c4
-rw-r--r--drivers/block/drbd/drbd_receiver.c1
-rw-r--r--drivers/block/drbd/drbd_worker.c2
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/null_blk_main.c5
-rw-r--r--drivers/block/paride/pcd.c20
-rw-r--r--drivers/block/paride/pf.c26
-rw-r--r--drivers/block/rbd.c426
-rw-r--r--drivers/block/virtio_blk.c12
-rw-r--r--drivers/block/xen-blkback/xenbus.c99
-rw-r--r--drivers/block/xsysace.c2
-rw-r--r--drivers/block/zram/zram_drv.c39
-rw-r--r--drivers/bluetooth/btusb.c2
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/char/hw_random/omap-rng.c1
-rw-r--r--drivers/char/hw_random/stm32-rng.c9
-rw-r--r--drivers/char/ipmi/ipmi_dmi.c1
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c19
-rw-r--r--drivers/char/ipmi/ipmi_si_hardcode.c2
-rw-r--r--drivers/char/tpm/eventlog/tpm1.c41
-rw-r--r--drivers/char/tpm/eventlog/tpm2.c14
-rw-r--r--drivers/char/tpm/st33zp24/i2c.c2
-rw-r--r--drivers/char/tpm/st33zp24/spi.c2
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.c2
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.h4
-rw-r--r--drivers/char/tpm/tpm-chip.c124
-rw-r--r--drivers/char/tpm/tpm-dev-common.c53
-rw-r--r--drivers/char/tpm/tpm-interface.c327
-rw-r--r--drivers/char/tpm/tpm-sysfs.c138
-rw-r--r--drivers/char/tpm/tpm.h180
-rw-r--r--drivers/char/tpm/tpm1-cmd.c43
-rw-r--r--drivers/char/tpm/tpm2-cmd.c208
-rw-r--r--drivers/char/tpm/tpm2-space.c90
-rw-r--r--drivers/char/tpm/tpm_atmel.c2
-rw-r--r--drivers/char/tpm/tpm_crb.c22
-rw-r--r--drivers/char/tpm/tpm_i2c_atmel.c15
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c17
-rw-r--r--drivers/char/tpm/tpm_i2c_nuvoton.c18
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c8
-rw-r--r--drivers/char/tpm/tpm_infineon.c2
-rw-r--r--drivers/char/tpm/tpm_nsc.c2
-rw-r--r--drivers/char/tpm/tpm_ppi.c78
-rw-r--r--drivers/char/tpm/tpm_tis_core.c21
-rw-r--r--drivers/char/tpm/tpm_vtpm_proxy.c15
-rw-r--r--drivers/char/tpm/xen-tpmfront.c4
-rw-r--r--drivers/clk/Kconfig6
-rw-r--r--drivers/clk/Makefile3
-rw-r--r--drivers/clk/actions/Kconfig5
-rw-r--r--drivers/clk/actions/Makefile1
-rw-r--r--drivers/clk/actions/owl-pll.c2
-rw-r--r--drivers/clk/actions/owl-pll.h30
-rw-r--r--drivers/clk/actions/owl-s500.c525
-rw-r--r--drivers/clk/at91/clk-audio-pll.c9
-rw-r--r--drivers/clk/at91/clk-programmable.c60
-rw-r--r--drivers/clk/at91/pmc.h2
-rw-r--r--drivers/clk/at91/sama5d2.c13
-rw-r--r--drivers/clk/clk-clps711x.c61
-rw-r--r--drivers/clk/clk-devres.c11
-rw-r--r--drivers/clk/clk-fixed-mmio.c101
-rw-r--r--drivers/clk/clk-fractional-divider.c2
-rw-r--r--drivers/clk/clk-gpio.c39
-rw-r--r--drivers/clk/clk-highbank.c1
-rw-r--r--drivers/clk/clk-max77686.c28
-rw-r--r--drivers/clk/clk-qoriq.c5
-rw-r--r--drivers/clk/clk-stm32mp1.c37
-rw-r--r--drivers/clk/clk-twl6040.c53
-rw-r--r--drivers/clk/clk.c262
-rw-r--r--drivers/clk/clk.h23
-rw-r--r--drivers/clk/clkdev.c236
-rw-r--r--drivers/clk/imx/Kconfig6
-rw-r--r--drivers/clk/imx/Makefile4
-rw-r--r--drivers/clk/imx/clk-composite-8m.c2
-rw-r--r--drivers/clk/imx/clk-imx51-imx53.c1
-rw-r--r--drivers/clk/imx/clk-imx6q.c1
-rw-r--r--drivers/clk/imx/clk-imx6sx.c1
-rw-r--r--drivers/clk/imx/clk-imx7d.c1
-rw-r--r--drivers/clk/imx/clk-imx7ulp.c16
-rw-r--r--drivers/clk/imx/clk-imx8mm.c675
-rw-r--r--drivers/clk/imx/clk-imx8mq.c254
-rw-r--r--drivers/clk/imx/clk-imx8qxp.c1
-rw-r--r--drivers/clk/imx/clk-pll14xx.c392
-rw-r--r--drivers/clk/imx/clk-sccg-pll.c514
-rw-r--r--drivers/clk/imx/clk-scu.c123
-rw-r--r--drivers/clk/imx/clk-scu.h16
-rw-r--r--drivers/clk/imx/clk-vf610.c1
-rw-r--r--drivers/clk/imx/clk.h38
-rw-r--r--drivers/clk/ingenic/cgu.c13
-rw-r--r--drivers/clk/ingenic/cgu.h2
-rw-r--r--drivers/clk/ingenic/jz4740-cgu.c2
-rw-r--r--drivers/clk/mediatek/clk-gate.c5
-rw-r--r--drivers/clk/mediatek/clk-gate.h3
-rw-r--r--drivers/clk/mediatek/clk-mt2701.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2712.c9
-rw-r--r--drivers/clk/mediatek/clk-mt6797.c68
-rw-r--r--drivers/clk/mediatek/clk-mt8173.c4
-rw-r--r--drivers/clk/mediatek/clk-mtk.c4
-rw-r--r--drivers/clk/mediatek/clk-mtk.h29
-rw-r--r--drivers/clk/meson/Kconfig101
-rw-r--r--drivers/clk/meson/Makefile29
-rw-r--r--drivers/clk/meson/axg-aoclk.c193
-rw-r--r--drivers/clk/meson/axg-aoclk.h13
-rw-r--r--drivers/clk/meson/axg-audio.c5
-rw-r--r--drivers/clk/meson/axg.c69
-rw-r--r--drivers/clk/meson/clk-dualdiv.c138
-rw-r--r--drivers/clk/meson/clk-dualdiv.h33
-rw-r--r--drivers/clk/meson/clk-input.c7
-rw-r--r--drivers/clk/meson/clk-input.h19
-rw-r--r--drivers/clk/meson/clk-mpll.c12
-rw-r--r--drivers/clk/meson/clk-mpll.h30
-rw-r--r--drivers/clk/meson/clk-phase.c75
-rw-r--r--drivers/clk/meson/clk-phase.h26
-rw-r--r--drivers/clk/meson/clk-pll.c216
-rw-r--r--drivers/clk/meson/clk-pll.h49
-rw-r--r--drivers/clk/meson/clk-regmap.c5
-rw-r--r--drivers/clk/meson/clk-regmap.h20
-rw-r--r--drivers/clk/meson/clk-triphase.c68
-rw-r--r--drivers/clk/meson/clkc.h127
-rw-r--r--drivers/clk/meson/g12a-aoclk.c454
-rw-r--r--drivers/clk/meson/g12a-aoclk.h34
-rw-r--r--drivers/clk/meson/g12a.c2359
-rw-r--r--drivers/clk/meson/g12a.h175
-rw-r--r--drivers/clk/meson/gxbb-aoclk-32k.c193
-rw-r--r--drivers/clk/meson/gxbb-aoclk.c268
-rw-r--r--drivers/clk/meson/gxbb-aoclk.h20
-rw-r--r--drivers/clk/meson/gxbb.c298
-rw-r--r--drivers/clk/meson/meson-aoclk.c54
-rw-r--r--drivers/clk/meson/meson-aoclk.h13
-rw-r--r--drivers/clk/meson/meson-eeclk.c63
-rw-r--r--drivers/clk/meson/meson-eeclk.h25
-rw-r--r--drivers/clk/meson/meson8b.c374
-rw-r--r--drivers/clk/meson/meson8b.h11
-rw-r--r--drivers/clk/meson/parm.h46
-rw-r--r--drivers/clk/meson/sclk-div.c10
-rw-r--r--drivers/clk/meson/sclk-div.h (renamed from drivers/clk/meson/clkc-audio.h)16
-rw-r--r--drivers/clk/meson/vid-pll-div.c14
-rw-r--r--drivers/clk/meson/vid-pll-div.h20
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c5
-rw-r--r--drivers/clk/mvebu/armada-370.c4
-rw-r--r--drivers/clk/mvebu/armada-xp.c4
-rw-r--r--drivers/clk/mvebu/dove.c8
-rw-r--r--drivers/clk/mvebu/kirkwood.c2
-rw-r--r--drivers/clk/mvebu/mv98dx3236.c4
-rw-r--r--drivers/clk/qcom/clk-rcg.h5
-rw-r--r--drivers/clk/qcom/clk-rcg2.c24
-rw-r--r--drivers/clk/qcom/clk-rpmh.c146
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c63
-rw-r--r--drivers/clk/qcom/common.c8
-rw-r--r--drivers/clk/qcom/common.h2
-rw-r--r--drivers/clk/qcom/gcc-ipq8074.c10
-rw-r--r--drivers/clk/qcom/gcc-mdm9615.c11
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c10
-rw-r--r--drivers/clk/qcom/gcc-msm8998.c61
-rw-r--r--drivers/clk/qcom/gcc-qcs404.c10
-rw-r--r--drivers/clk/qcom/gcc-sdm660.c11
-rw-r--r--drivers/clk/qcom/gcc-sdm845.c5
-rw-r--r--drivers/clk/qcom/mmcc-msm8996.c10
-rw-r--r--drivers/clk/renesas/r8a774a1-cpg-mssr.c4
-rw-r--r--drivers/clk/renesas/r8a774c0-cpg-mssr.c15
-rw-r--r--drivers/clk/renesas/r8a77980-cpg-mssr.c8
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c147
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.h4
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c4
-rw-r--r--drivers/clk/rockchip/clk-rk3328.c12
-rw-r--r--drivers/clk/samsung/clk-exynos4.c1
-rw-r--r--drivers/clk/samsung/clk-exynos5-subcmu.c13
-rw-r--r--drivers/clk/samsung/clk-exynos5433.c38
-rw-r--r--drivers/clk/samsung/clk-s3c2443.c2
-rw-r--r--drivers/clk/samsung/clk.h2
-rw-r--r--drivers/clk/socfpga/clk-gate.c22
-rw-r--r--drivers/clk/socfpga/clk-pll-a10.c1
-rw-r--r--drivers/clk/socfpga/clk-pll.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a23.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkmp.c24
-rw-r--r--drivers/clk/tegra/clk-dfll.c18
-rw-r--r--drivers/clk/ti/adpll.c2
-rw-r--r--drivers/clk/ti/apll.c4
-rw-r--r--drivers/clk/ti/autoidle.c101
-rw-r--r--drivers/clk/ti/clk.c83
-rw-r--r--drivers/clk/ti/clkctrl.c4
-rw-r--r--drivers/clk/ti/clock.h5
-rw-r--r--drivers/clk/ti/clockdomain.c2
-rw-r--r--drivers/clk/ti/divider.c2
-rw-r--r--drivers/clk/ti/dpll.c11
-rw-r--r--drivers/clk/ti/dpll3xxx.c2
-rw-r--r--drivers/clk/ti/gate.c2
-rw-r--r--drivers/clk/ti/interface.c4
-rw-r--r--drivers/clk/ti/mux.c2
-rw-r--r--drivers/clk/uniphier/clk-uniphier-cpugear.c2
-rw-r--r--drivers/clk/x86/clk-lpt.c2
-rw-r--r--drivers/clk/x86/clk-pmc-atom.c14
-rw-r--r--drivers/clk/x86/clk-st.c3
-rw-r--r--drivers/clocksource/Kconfig1
-rw-r--r--drivers/clocksource/arm_arch_timer.c154
-rw-r--r--drivers/clocksource/clps711x-timer.c44
-rw-r--r--drivers/clocksource/mips-gic-timer.c2
-rw-r--r--drivers/clocksource/tcb_clksrc.c4
-rw-r--r--drivers/clocksource/timer-oxnas-rps.c2
-rw-r--r--drivers/clocksource/timer-riscv.c5
-rw-r--r--drivers/clocksource/timer-ti-dm.c28
-rw-r--r--drivers/connector/cn_proc.c22
-rw-r--r--drivers/cpufreq/Kconfig4
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c19
-rw-r--r--drivers/cpufreq/amd_freq_sensitivity.c2
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c22
-rw-r--r--drivers/cpufreq/armada-8k-cpufreq.c1
-rw-r--r--drivers/cpufreq/cpufreq.c124
-rw-r--r--drivers/cpufreq/cpufreq_governor.c2
-rw-r--r--drivers/cpufreq/cpufreq_stats.c15
-rw-r--r--drivers/cpufreq/freq_table.c3
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c4
-rw-r--r--drivers/cpufreq/intel_pstate.c77
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c19
-rw-r--r--drivers/cpufreq/maple-cpufreq.c6
-rw-r--r--drivers/cpufreq/pasemi-cpufreq.c1
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c2
-rw-r--r--drivers/cpufreq/powernow-k8.c2
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.c1
-rw-r--r--drivers/cpufreq/pxa2xx-cpufreq.c4
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c2
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c2
-rw-r--r--drivers/cpufreq/speedstep-centrino.c2
-rw-r--r--drivers/cpuidle/cpuidle-exynos.c2
-rw-r--r--drivers/cpuidle/cpuidle.c19
-rw-r--r--drivers/cpuidle/governor.c1
-rw-r--r--drivers/cpuidle/governors/menu.c2
-rw-r--r--drivers/crypto/Kconfig9
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c24
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c48
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h3
-rw-r--r--drivers/crypto/atmel-tdes.c106
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c2
-rw-r--r--drivers/crypto/bcm/cipher.c22
-rw-r--r--drivers/crypto/bcm/spu.c3
-rw-r--r--drivers/crypto/bcm/util.c1
-rw-r--r--drivers/crypto/caam/caamalg.c87
-rw-r--r--drivers/crypto/caam/caamalg_qi.c77
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c243
-rw-r--r--drivers/crypto/caam/caamalg_qi2.h2
-rw-r--r--drivers/crypto/caam/caamhash.c31
-rw-r--r--drivers/crypto/caam/caampkc.c16
-rw-r--r--drivers/crypto/caam/caamrng.c22
-rw-r--r--drivers/crypto/caam/ctrl.c20
-rw-r--r--drivers/crypto/caam/error.c2
-rw-r--r--drivers/crypto/caam/intern.h4
-rw-r--r--drivers/crypto/caam/jr.c33
-rw-r--r--drivers/crypto/caam/qi.c4
-rw-r--r--drivers/crypto/caam/regs.h11
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_algs.c30
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_main.c2
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_mbox.c17
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_reqmanager.c6
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_aead.c337
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_hal.c65
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_req.h46
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c4
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_skcipher.c8
-rw-r--r--drivers/crypto/cavium/zip/zip_crypto.c8
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c21
-rw-r--r--drivers/crypto/ccp/ccp-crypto-rsa.c8
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c2
-rw-r--r--drivers/crypto/ccp/psp-dev.c69
-rw-r--r--drivers/crypto/ccree/Makefile1
-rw-r--r--drivers/crypto/ccree/cc_aead.c118
-rw-r--r--drivers/crypto/ccree/cc_aead.h3
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c341
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.h2
-rw-r--r--drivers/crypto/ccree/cc_cipher.c585
-rw-r--r--drivers/crypto/ccree/cc_cipher.h3
-rw-r--r--drivers/crypto/ccree/cc_crypto_ctx.h10
-rw-r--r--drivers/crypto/ccree/cc_debugfs.c44
-rw-r--r--drivers/crypto/ccree/cc_debugfs.h2
-rw-r--r--drivers/crypto/ccree/cc_driver.c120
-rw-r--r--drivers/crypto/ccree/cc_driver.h36
-rw-r--r--drivers/crypto/ccree/cc_fips.c29
-rw-r--r--drivers/crypto/ccree/cc_fips.h4
-rw-r--r--drivers/crypto/ccree/cc_hash.c64
-rw-r--r--drivers/crypto/ccree/cc_hash.h2
-rw-r--r--drivers/crypto/ccree/cc_host_regs.h123
-rw-r--r--drivers/crypto/ccree/cc_hw_queue_defs.h35
-rw-r--r--drivers/crypto/ccree/cc_ivgen.c11
-rw-r--r--drivers/crypto/ccree/cc_ivgen.h2
-rw-r--r--drivers/crypto/ccree/cc_kernel_regs.h2
-rw-r--r--drivers/crypto/ccree/cc_lli_defs.h4
-rw-r--r--drivers/crypto/ccree/cc_pm.c11
-rw-r--r--drivers/crypto/ccree/cc_pm.h2
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.c116
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.h2
-rw-r--r--drivers/crypto/ccree/cc_sram_mgr.c7
-rw-r--r--drivers/crypto/ccree/cc_sram_mgr.h2
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c2
-rw-r--r--drivers/crypto/hifn_795x.c31
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c12
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c11
-rw-r--r--drivers/crypto/ixp4xx_crypto.c64
-rw-r--r--drivers/crypto/marvell/cipher.c11
-rw-r--r--drivers/crypto/marvell/hash.c3
-rw-r--r--drivers/crypto/mediatek/mtk-sha.c3
-rw-r--r--drivers/crypto/mxc-scc.c767
-rw-r--r--drivers/crypto/mxs-dcp.c14
-rw-r--r--drivers/crypto/n2_core.c15
-rw-r--r--drivers/crypto/nx/nx-842-pseries.c6
-rw-r--r--drivers/crypto/nx/nx-842.c3
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c12
-rw-r--r--drivers/crypto/nx/nx-sha256.c6
-rw-r--r--drivers/crypto/nx/nx-sha512.c6
-rw-r--r--drivers/crypto/omap-des.c29
-rw-r--r--drivers/crypto/omap-sham.c2
-rw-r--r--drivers/crypto/padlock-sha.c5
-rw-r--r--drivers/crypto/picoxcell_crypto.c35
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c1
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c2
-rw-r--r--drivers/crypto/qce/ablkcipher.c22
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c61
-rw-r--r--drivers/crypto/s5p-sss.c3
-rw-r--r--drivers/crypto/sahara.c6
-rw-r--r--drivers/crypto/stm32/Kconfig1
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c74
-rw-r--r--drivers/crypto/stm32/stm32-hash.c4
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-cipher.c78
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-core.c19
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-hash.c5
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss.h2
-rw-r--r--drivers/crypto/talitos.c108
-rw-r--r--drivers/crypto/ux500/cryp/Makefile6
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c86
-rw-r--r--drivers/crypto/vmx/aes.c14
-rw-r--r--drivers/crypto/vmx/aes_cbc.c14
-rw-r--r--drivers/crypto/vmx/aes_ctr.c10
-rw-r--r--drivers/crypto/vmx/aes_xts.c14
-rw-r--r--drivers/crypto/vmx/aesp8-ppc.pl4
-rw-r--r--drivers/crypto/vmx/ghash.c10
-rw-r--r--drivers/crypto/vmx/vmx.c4
-rw-r--r--drivers/dax/Kconfig28
-rw-r--r--drivers/dax/Makefile6
-rw-r--r--drivers/dax/bus.c503
-rw-r--r--drivers/dax/bus.h61
-rw-r--r--drivers/dax/dax-private.h34
-rw-r--r--drivers/dax/dax.h18
-rw-r--r--drivers/dax/device-dax.h25
-rw-r--r--drivers/dax/device.c363
-rw-r--r--drivers/dax/kmem.c108
-rw-r--r--drivers/dax/pmem.c153
-rw-r--r--drivers/dax/pmem/Makefile7
-rw-r--r--drivers/dax/pmem/compat.c73
-rw-r--r--drivers/dax/pmem/core.c71
-rw-r--r--drivers/dax/pmem/pmem.c40
-rw-r--r--drivers/dax/super.c79
-rw-r--r--drivers/devfreq/devfreq-event.c2
-rw-r--r--drivers/devfreq/devfreq.c90
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c2
-rw-r--r--drivers/devfreq/event/rockchip-dfi.c25
-rw-r--r--drivers/devfreq/exynos-bus.c8
-rw-r--r--drivers/devfreq/rk3399_dmc.c73
-rw-r--r--drivers/devfreq/tegra-devfreq.c7
-rw-r--r--drivers/dma/Kconfig14
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/at_hdmac.c5
-rw-r--r--drivers/dma/bcm2835-dma.c29
-rw-r--r--drivers/dma/dma-axi-dmac.c3
-rw-r--r--drivers/dma/dma-jz4780.c5
-rw-r--r--drivers/dma/dmatest.c269
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac.h2
-rw-r--r--drivers/dma/dw/Kconfig2
-rw-r--r--drivers/dma/dw/Makefile2
-rw-r--r--drivers/dma/dw/core.c245
-rw-r--r--drivers/dma/dw/dw.c138
-rw-r--r--drivers/dma/dw/idma32.c160
-rw-r--r--drivers/dma/dw/internal.h15
-rw-r--r--drivers/dma/dw/pci.c53
-rw-r--r--drivers/dma/dw/platform.c22
-rw-r--r--drivers/dma/dw/regs.h30
-rw-r--r--drivers/dma/fsl-edma-common.c70
-rw-r--r--drivers/dma/fsl-edma-common.h4
-rw-r--r--drivers/dma/fsl-edma.c1
-rw-r--r--drivers/dma/fsl-qdma.c1259
-rw-r--r--drivers/dma/fsldma.c16
-rw-r--r--drivers/dma/fsldma.h68
-rw-r--r--drivers/dma/imx-dma.c8
-rw-r--r--drivers/dma/imx-sdma.c49
-rw-r--r--drivers/dma/ioat/dma.c12
-rw-r--r--drivers/dma/ioat/dma.h2
-rw-r--r--drivers/dma/ioat/hw.h3
-rw-r--r--drivers/dma/ioat/init.c40
-rw-r--r--drivers/dma/ioat/registers.h24
-rw-r--r--drivers/dma/k3dma.c61
-rw-r--r--drivers/dma/mcf-edma.c1
-rw-r--r--drivers/dma/mediatek/mtk-cqdma.c2
-rw-r--r--drivers/dma/mv_xor.c7
-rw-r--r--drivers/dma/pch_dma.c1
-rw-r--r--drivers/dma/pl330.c1
-rw-r--r--drivers/dma/qcom/bam_dma.c4
-rw-r--r--drivers/dma/qcom/hidma.c19
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c3
-rw-r--r--drivers/dma/sa11x0-dma.c2
-rw-r--r--drivers/dma/sh/rcar-dmac.c30
-rw-r--r--drivers/dma/sh/usb-dmac.c2
-rw-r--r--drivers/dma/sprd-dma.c19
-rw-r--r--drivers/dma/st_fdma.c6
-rw-r--r--drivers/dma/stm32-dma.c71
-rw-r--r--drivers/dma/stm32-dmamux.c58
-rw-r--r--drivers/dma/stm32-mdma.c52
-rw-r--r--drivers/dma/tegra20-apb-dma.c45
-rw-r--r--drivers/dma/tegra210-adma.c5
-rw-r--r--drivers/dma/timb_dma.c4
-rw-r--r--drivers/dma/txx9dmac.c3
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c170
-rw-r--r--drivers/edac/altera_edac.c262
-rw-r--r--drivers/edac/altera_edac.h69
-rw-r--r--drivers/edac/amd64_edac.c135
-rw-r--r--drivers/edac/amd64_edac.h11
-rw-r--r--drivers/edac/i10nm_base.c52
-rw-r--r--drivers/edac/mce_amd.c4
-rw-r--r--drivers/edac/skx_base.c50
-rw-r--r--drivers/edac/skx_common.c57
-rw-r--r--drivers/edac/skx_common.h8
-rw-r--r--drivers/extcon/Kconfig2
-rw-r--r--drivers/extcon/extcon-axp288.c9
-rw-r--r--drivers/firewire/ohci.c1
-rw-r--r--drivers/firmware/Kconfig15
-rw-r--r--drivers/firmware/Makefile3
-rw-r--r--drivers/firmware/arm_sdei.c3
-rw-r--r--drivers/firmware/dmi_scan.c28
-rw-r--r--drivers/firmware/efi/arm-runtime.c12
-rw-r--r--drivers/firmware/efi/libstub/Makefile20
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c17
-rw-r--r--drivers/firmware/iscsi_ibft.c2
-rw-r--r--drivers/firmware/memmap.c2
-rw-r--r--drivers/firmware/psci/Kconfig13
-rw-r--r--drivers/firmware/psci/Makefile4
-rw-r--r--drivers/firmware/psci/psci.c (renamed from drivers/firmware/psci.c)110
-rw-r--r--drivers/firmware/psci/psci_checker.c (renamed from drivers/firmware/psci_checker.c)0
-rw-r--r--drivers/gpio/gpio-adnp.c6
-rw-r--r--drivers/gpio/gpio-aspeed.c2
-rw-r--r--drivers/gpio/gpio-eic-sprd.c1
-rw-r--r--drivers/gpio/gpio-exar.c2
-rw-r--r--drivers/gpio/gpio-merrifield.c18
-rw-r--r--drivers/gpio/gpio-ml-ioh.c2
-rw-r--r--drivers/gpio/gpio-mockup.c10
-rw-r--r--drivers/gpio/gpio-pch.c1
-rw-r--r--drivers/gpio/gpiolib-of.c17
-rw-r--r--drivers/gpio/gpiolib.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c52
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c48
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c23
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c7
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c30
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c240
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c17
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c6
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c50
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c64
-rw-r--r--drivers/gpu/drm/drm_drv.c6
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/drm_file.c6
-rw-r--r--drivers/gpu/drm/drm_ioc32.c6
-rw-r--r--drivers/gpu/drm/drm_mm.c25
-rw-r--r--drivers/gpu/drm/etnaviv/Kconfig1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_perfmon.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c110
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c13
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c28
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c33
-rw-r--r--drivers/gpu/drm/i915/i915_active.c36
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c35
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c18
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c27
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c11
-rw-r--r--drivers/gpu/drm/i915/icl_dsi.c48
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c1
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c18
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c43
-rw-r--r--drivers/gpu/drm/i915/intel_display.c6
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c75
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h10
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c12
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c10
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c25
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c69
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c2
-rw-r--r--drivers/gpu/drm/i915/vlv_dsi.c24
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c46
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.h3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_phy.c35
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_phy.h5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c49
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c23
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c9
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c14
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c26
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c9
-rw-r--r--drivers/gpu/drm/qxl/qxl_prime.c12
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c18
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c9
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c9
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_tcon_top.c5
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c12
-rw-r--r--drivers/gpu/drm/tegra/hub.c4
-rw-r--r--drivers/gpu/drm/tegra/vic.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c14
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c13
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c72
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c1
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h1
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c2
-rw-r--r--drivers/gpu/drm/udl/udl_main.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c2
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c12
-rw-r--r--drivers/gpu/drm/vkms/vkms_gem.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c33
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c8
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c6
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c26
-rw-r--r--drivers/gpu/ipu-v3/ipu-csi.c126
-rw-r--r--drivers/gpu/ipu-v3/ipu-dp.c12
-rw-r--r--drivers/gpu/vga/vgaarb.c49
-rw-r--r--drivers/hid/Kconfig28
-rw-r--r--drivers/hid/Makefile3
-rw-r--r--drivers/hid/hid-core.c59
-rw-r--r--drivers/hid/hid-debug.c5
-rw-r--r--drivers/hid/hid-ids.h6
-rw-r--r--drivers/hid/hid-input.c99
-rw-r--r--drivers/hid/hid-lg.c2
-rw-r--r--drivers/hid/hid-logitech-dj.c1142
-rw-r--r--drivers/hid/hid-logitech-hidpp.c747
-rw-r--r--drivers/hid/hid-macally.c45
-rw-r--r--drivers/hid/hid-picolcd_core.c18
-rw-r--r--drivers/hid/hid-quirks.c17
-rw-r--r--drivers/hid/hid-sensor-custom.c12
-rw-r--r--drivers/hid/hid-steam.c26
-rw-r--r--drivers/hid/hid-u2fzero.c374
-rw-r--r--drivers/hid/hid-uclogic-params.c4
-rw-r--r--drivers/hid/intel-ish-hid/Kconfig15
-rw-r--r--drivers/hid/intel-ish-hid/Makefile3
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h1
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c1
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-fw-loader.c1085
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid-client.c168
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.c49
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.h14
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c96
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.h37
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.c60
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.h24
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h31
-rw-r--r--drivers/hwmon/Kconfig20
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/ad7414.c2
-rw-r--r--drivers/hwmon/adc128d818.c2
-rw-r--r--drivers/hwmon/adm1025.c98
-rw-r--r--drivers/hwmon/adm1026.c416
-rw-r--r--drivers/hwmon/adm1029.c41
-rw-r--r--drivers/hwmon/adm1031.c201
-rw-r--r--drivers/hwmon/adm9240.c135
-rw-r--r--drivers/hwmon/ads1015.c2
-rw-r--r--drivers/hwmon/ads7828.c4
-rw-r--r--drivers/hwmon/adt7411.c48
-rw-r--r--drivers/hwmon/adt7475.c2
-rw-r--r--drivers/hwmon/f71805f.c15
-rw-r--r--drivers/hwmon/hih6130.c2
-rw-r--r--drivers/hwmon/hwmon.c5
-rw-r--r--drivers/hwmon/iio_hwmon.c27
-rw-r--r--drivers/hwmon/ina209.c2
-rw-r--r--drivers/hwmon/ina2xx.c2
-rw-r--r--drivers/hwmon/ina3221.c176
-rw-r--r--drivers/hwmon/jc42.c18
-rw-r--r--drivers/hwmon/jz4740-hwmon.c4
-rw-r--r--drivers/hwmon/lm63.c2
-rw-r--r--drivers/hwmon/lm75.c45
-rw-r--r--drivers/hwmon/lm78.c114
-rw-r--r--drivers/hwmon/lm85.c342
-rw-r--r--drivers/hwmon/lm87.c165
-rw-r--r--drivers/hwmon/lm90.c15
-rw-r--r--drivers/hwmon/lm95241.c34
-rw-r--r--drivers/hwmon/lm95245.c49
-rw-r--r--drivers/hwmon/lochnagar-hwmon.c412
-rw-r--r--drivers/hwmon/ltc4151.c2
-rw-r--r--drivers/hwmon/ltc4245.c73
-rw-r--r--drivers/hwmon/ltq-cputemp.c26
-rw-r--r--drivers/hwmon/max197.c2
-rw-r--r--drivers/hwmon/max31790.c58
-rw-r--r--drivers/hwmon/max6621.c44
-rw-r--r--drivers/hwmon/max6650.c90
-rw-r--r--drivers/hwmon/max6697.c2
-rw-r--r--drivers/hwmon/menf21bmc_hwmon.c43
-rw-r--r--drivers/hwmon/mlxreg-fan.c121
-rw-r--r--drivers/hwmon/nct7904.c128
-rw-r--r--drivers/hwmon/npcm750-pwm-fan.c70
-rw-r--r--drivers/hwmon/ntc_thermistor.c24
-rw-r--r--drivers/hwmon/occ/Kconfig17
-rw-r--r--drivers/hwmon/occ/Makefile6
-rw-r--r--drivers/hwmon/occ/common.c17
-rw-r--r--drivers/hwmon/occ/common.h3
-rw-r--r--drivers/hwmon/occ/sysfs.c29
-rw-r--r--drivers/hwmon/pc87427.c14
-rw-r--r--drivers/hwmon/pmbus/Kconfig18
-rw-r--r--drivers/hwmon/pmbus/Makefile2
-rw-r--r--drivers/hwmon/pmbus/ir38064.c65
-rw-r--r--drivers/hwmon/pmbus/isl68137.c169
-rw-r--r--drivers/hwmon/pmbus/lm25066.c17
-rw-r--r--drivers/hwmon/pmbus/pmbus.h18
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c129
-rw-r--r--drivers/hwmon/pmbus/tps53679.c2
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c2
-rw-r--r--drivers/hwmon/pmbus/ucd9200.c2
-rw-r--r--drivers/hwmon/pwm-fan.c116
-rw-r--r--drivers/hwmon/raspberrypi-hwmon.c13
-rw-r--r--drivers/hwmon/s3c-hwmon.c4
-rw-r--r--drivers/hwmon/sht15.c2
-rw-r--r--drivers/hwmon/sis5595.c92
-rw-r--r--drivers/hwmon/smsc47b397.c13
-rw-r--r--drivers/hwmon/smsc47m1.c106
-rw-r--r--drivers/hwmon/smsc47m192.c146
-rw-r--r--drivers/hwmon/stts751.c2
-rw-r--r--drivers/hwmon/thmc50.c83
-rw-r--r--drivers/hwmon/tmp102.c28
-rw-r--r--drivers/hwmon/tmp103.c2
-rw-r--r--drivers/hwmon/tmp108.c29
-rw-r--r--drivers/hwmon/tmp421.c2
-rw-r--r--drivers/hwmon/via686a.c148
-rw-r--r--drivers/hwmon/vt1211.c15
-rw-r--r--drivers/hwmon/vt8231.c166
-rw-r--r--drivers/hwmon/w83627hf.c299
-rw-r--r--drivers/hwmon/w83773g.c32
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c44
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c21
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h40
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c14
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c30
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c3
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c7
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c1
-rw-r--r--drivers/i2c/busses/i2c-i801.c4
-rw-r--r--drivers/i2c/busses/i2c-imx.c8
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c8
-rw-r--r--drivers/i2c/busses/i2c-rcar.c15
-rw-r--r--drivers/i2c/busses/i2c-sis630.c4
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c2
-rw-r--r--drivers/i2c/busses/i2c-synquacer.c2
-rw-r--r--drivers/i2c/i2c-core-base.c15
-rw-r--r--drivers/i3c/master.c10
-rw-r--r--drivers/i3c/master/dw-i3c-master.c12
-rw-r--r--drivers/ide/Kconfig26
-rw-r--r--drivers/ide/hpt366.c4
-rw-r--r--drivers/ide/ide-floppy.c2
-rw-r--r--drivers/ide/tx4939ide.c2
-rw-r--r--drivers/iio/accel/kxcjk-1013.c2
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c1
-rw-r--r--drivers/iio/adc/at91_adc.c28
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c3
-rw-r--r--drivers/iio/chemical/Kconfig14
-rw-r--r--drivers/iio/chemical/bme680.h6
-rw-r--r--drivers/iio/chemical/bme680_core.c54
-rw-r--r--drivers/iio/chemical/bme680_i2c.c21
-rw-r--r--drivers/iio/chemical/bme680_spi.c115
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c7
-rw-r--r--drivers/iio/dac/mcp4725.c1
-rw-r--r--drivers/iio/gyro/bmg160_core.c6
-rw-r--r--drivers/iio/gyro/mpu3050-core.c8
-rw-r--r--drivers/iio/industrialio-buffer.c5
-rw-r--r--drivers/iio/industrialio-core.c4
-rw-r--r--drivers/infiniband/Kconfig15
-rw-r--r--drivers/infiniband/core/Makefile4
-rw-r--r--drivers/infiniband/core/cache.c118
-rw-r--r--drivers/infiniband/core/cgroup.c5
-rw-r--r--drivers/infiniband/core/cm.c3
-rw-r--r--drivers/infiniband/core/cma.c139
-rw-r--r--drivers/infiniband/core/cma_priv.h4
-rw-r--r--drivers/infiniband/core/core_priv.h35
-rw-r--r--drivers/infiniband/core/device.c1313
-rw-r--r--drivers/infiniband/core/iwcm.c13
-rw-r--r--drivers/infiniband/core/iwpm_msg.c232
-rw-r--r--drivers/infiniband/core/iwpm_util.c86
-rw-r--r--drivers/infiniband/core/iwpm_util.h12
-rw-r--r--drivers/infiniband/core/mad.c4
-rw-r--r--drivers/infiniband/core/netlink.c4
-rw-r--r--drivers/infiniband/core/nldev.c492
-rw-r--r--drivers/infiniband/core/rdma_core.c42
-rw-r--r--drivers/infiniband/core/restrack.c193
-rw-r--r--drivers/infiniband/core/restrack.h28
-rw-r--r--drivers/infiniband/core/rw.c12
-rw-r--r--drivers/infiniband/core/sa_query.c4
-rw-r--r--drivers/infiniband/core/security.c96
-rw-r--r--drivers/infiniband/core/sysfs.c93
-rw-r--r--drivers/infiniband/core/ucma.c7
-rw-r--r--drivers/infiniband/core/umem.c60
-rw-r--r--drivers/infiniband/core/umem_odp.c21
-rw-r--r--drivers/infiniband/core/user_mad.c52
-rw-r--r--drivers/infiniband/core/uverbs.h1
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c69
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c3
-rw-r--r--drivers/infiniband/core/uverbs_main.c57
-rw-r--r--drivers/infiniband/core/uverbs_std_types.c2
-rw-r--r--drivers/infiniband/core/uverbs_uapi.c15
-rw-r--r--drivers/infiniband/core/verbs.c73
-rw-r--r--drivers/infiniband/hw/bnxt_re/Kconfig1
-rw-r--r--drivers/infiniband/hw/bnxt_re/Makefile2
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c268
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h16
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c134
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c193
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h47
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c40
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h45
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c22
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h30
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c3
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h160
-rw-r--r--drivers/infiniband/hw/cxgb3/Makefile2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c95
-rw-r--r--drivers/infiniband/hw/cxgb4/Makefile4
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c199
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c10
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h16
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c36
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c85
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c33
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h1
-rw-r--r--drivers/infiniband/hw/hfi1/Makefile1
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c42
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h4
-rw-r--r--drivers/infiniband/hw/hfi1/common.h4
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c58
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.h12
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c58
-rw-r--r--drivers/infiniband/hw/hfi1/fault.c53
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h24
-rw-r--r--drivers/infiniband/hw/hfi1/init.c35
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.c34
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.h99
-rw-r--r--drivers/infiniband/hw/hfi1/opfn.c323
-rw-r--r--drivers/infiniband/hw/hfi1/opfn.h85
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c19
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c80
-rw-r--r--drivers/infiniband/hw/hfi1/qp.h7
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c1145
-rw-r--r--drivers/infiniband/hw/hfi1/rc.h51
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c48
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c24
-rw-r--r--drivers/infiniband/hw/hfi1/sdma_txreq.h1
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c16
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c5403
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.h311
-rw-r--r--drivers/infiniband/hw/hfi1/trace.c118
-rw-r--r--drivers/infiniband/hw/hfi1/trace.h1
-rw-r--r--drivers/infiniband/hw/hfi1/trace_ibhdrs.h8
-rw-r--r--drivers/infiniband/hw/hfi1/trace_rc.h48
-rw-r--r--drivers/infiniband/hw/hfi1/trace_rx.h107
-rw-r--r--drivers/infiniband/hw/hfi1/trace_tid.h1610
-rw-r--r--drivers/infiniband/hw/hfi1/trace_tx.h18
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c3
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c24
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.h1
-rw-r--r--drivers/infiniband/hw/hfi1/user_pages.c12
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c9
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c210
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h104
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.h1
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_sdma.c6
-rw-r--r--drivers/infiniband/hw/hns/Kconfig1
-rw-r--r--drivers/infiniband/hw/hns/Makefile2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.c32
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.h12
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c9
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_db.c6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h63
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c74
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.h3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c38
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c596
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h92
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c88
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c99
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c25
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c91
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c16
-rw-r--r--drivers/infiniband/hw/i40iw/Makefile2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_osdep.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c13
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c137
-rw-r--r--drivers/infiniband/hw/mlx4/Kconfig1
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c19
-rw-r--r--drivers/infiniband/hw/mlx4/doorbell.c6
-rw-r--r--drivers/infiniband/hw/mlx4/main.c77
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h3
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c13
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c90
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c12
-rw-r--r--drivers/infiniband/hw/mlx5/Kconfig1
-rw-r--r--drivers/infiniband/hw/mlx5/cong.c15
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c15
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c493
-rw-r--r--drivers/infiniband/hw/mlx5/doorbell.c6
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c6
-rw-r--r--drivers/infiniband/hw/mlx5/main.c268
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c5
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h41
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c126
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c319
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c324
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c11
-rw-r--r--drivers/infiniband/hw/mlx5/srq.h2
-rw-r--r--drivers/infiniband/hw/mlx5/srq_cmd.c16
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c139
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c30
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c27
-rw-r--r--drivers/infiniband/hw/nes/Kconfig2
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c313
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.h1
-rw-r--r--drivers/infiniband/hw/ocrdma/Makefile2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c12
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c67
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c189
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h11
-rw-r--r--drivers/infiniband/hw/qedr/main.c9
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.c2
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c204
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h10
-rw-r--r--drivers/infiniband/hw/qib/qib_debugfs.c27
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c7
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c26
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c18
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c75
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c20
-rw-r--r--drivers/infiniband/hw/usnic/Makefile2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_debugfs.c26
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c57
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_sysfs.c26
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c114
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.h28
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c65
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.h1
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h15
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c14
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c21
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c3
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c6
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c98
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h12
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c38
-rw-r--r--drivers/infiniband/sw/rdmavt/pd.c29
-rw-r--r--drivers/infiniband/sw/rdmavt/pd.h7
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c104
-rw-r--r--drivers/infiniband/sw/rdmavt/rc.c13
-rw-r--r--drivers/infiniband/sw/rdmavt/srq.c5
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_cq.h10
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c34
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c67
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h17
-rw-r--r--drivers/infiniband/sw/rxe/rxe_av.c7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h9
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c15
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c97
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c77
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.h4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c15
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c12
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_sysfs.c40
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c103
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h9
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_fs.c7
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c14
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h2
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c19
-rw-r--r--drivers/infiniband/ulp/isert/Makefile1
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c2
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c26
-rw-r--r--drivers/infiniband/ulp/srpt/Makefile1
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c80
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h4
-rw-r--r--drivers/input/joystick/db9.c2
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/gpio_keys.c10
-rw-r--r--drivers/input/keyboard/mcs_touchkey.c5
-rw-r--r--drivers/input/keyboard/mtk-pmic-keys.c13
-rw-r--r--drivers/input/keyboard/qt2160.c9
-rw-r--r--drivers/input/keyboard/snvs_pwrkey.c6
-rw-r--r--drivers/input/keyboard/tca6416-keypad.c4
-rw-r--r--drivers/input/keyboard/tm2-touchkey.c136
-rw-r--r--drivers/input/misc/Kconfig10
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/ims-pcu.c27
-rw-r--r--drivers/input/misc/msm-vibrator.c281
-rw-r--r--drivers/input/misc/soc_button_array.c6
-rw-r--r--drivers/input/mouse/elan_i2c_core.c26
-rw-r--r--drivers/input/mouse/synaptics_i2c.c22
-rw-r--r--drivers/input/rmi4/rmi_driver.c6
-rw-r--r--drivers/input/rmi4/rmi_f11.c2
-rw-r--r--drivers/input/serio/i8042-sparcio.h21
-rw-r--r--drivers/input/tablet/wacom_serial4.c2
-rw-r--r--drivers/input/touchscreen/Kconfig7
-rw-r--r--drivers/input/touchscreen/ad7879.c11
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c110
-rw-r--r--drivers/input/touchscreen/goodix.c6
-rw-r--r--drivers/input/touchscreen/ili210x.c321
-rw-r--r--drivers/input/touchscreen/st1232.c154
-rw-r--r--drivers/input/touchscreen/stmfts.c30
-rw-r--r--drivers/input/touchscreen/sx8654.c255
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c4
-rw-r--r--drivers/iommu/Kconfig17
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/amd_iommu.c52
-rw-r--r--drivers/iommu/amd_iommu_init.c29
-rw-r--r--drivers/iommu/amd_iommu_types.h2
-rw-r--r--drivers/iommu/amd_iommu_v2.c24
-rw-r--r--drivers/iommu/arm-smmu-v3.c3
-rw-r--r--drivers/iommu/arm-smmu.c2
-rw-r--r--drivers/iommu/dma-iommu.c3
-rw-r--r--drivers/iommu/hyperv-iommu.c196
-rw-r--r--drivers/iommu/intel-iommu.c163
-rw-r--r--drivers/iommu/intel-pasid.c2
-rw-r--r--drivers/iommu/intel-svm.c88
-rw-r--r--drivers/iommu/intel_irq_remapping.c32
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c25
-rw-r--r--drivers/iommu/io-pgtable-arm.c3
-rw-r--r--drivers/iommu/io-pgtable.c5
-rw-r--r--drivers/iommu/iommu-debugfs.c23
-rw-r--r--drivers/iommu/iommu.c24
-rw-r--r--drivers/iommu/iova.c5
-rw-r--r--drivers/iommu/ipmmu-vmsa.c3
-rw-r--r--drivers/iommu/irq_remapping.c3
-rw-r--r--drivers/iommu/irq_remapping.h1
-rw-r--r--drivers/iommu/msm_iommu.c10
-rw-r--r--drivers/iommu/mtk_iommu.h3
-rw-r--r--drivers/iommu/mtk_iommu_v1.c2
-rw-r--r--drivers/iommu/qcom_iommu.c2
-rw-r--r--drivers/iommu/tegra-gart.c473
-rw-r--r--drivers/iommu/tegra-smmu.c4
-rw-r--r--drivers/irqchip/irq-ath79-misc.c11
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c4
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c2
-rw-r--r--drivers/irqchip/irq-gic-v3.c265
-rw-r--r--drivers/irqchip/irq-gic.c45
-rw-r--r--drivers/irqchip/irq-imx-irqsteer.c8
-rw-r--r--drivers/irqchip/irq-ls1x.c1
-rw-r--r--drivers/irqchip/irq-mbigen.c3
-rw-r--r--drivers/irqchip/irq-mmp.c2
-rw-r--r--drivers/irqchip/irq-mvebu-sei.c2
-rw-r--r--drivers/irqchip/irq-stm32-exti.c10
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c3
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c9
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNinfineon.c5
-rw-r--r--drivers/isdn/isdnloop/isdnloop.c2
-rw-r--r--drivers/isdn/mISDN/socket.c4
-rw-r--r--drivers/leds/leds-mlxreg.c19
-rw-r--r--drivers/leds/leds-pca9532.c8
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c16
-rw-r--r--drivers/lightnvm/pblk-read.c50
-rw-r--r--drivers/lightnvm/pblk-rl.c7
-rw-r--r--drivers/macintosh/smu.c5
-rw-r--r--drivers/mailbox/Kconfig11
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/imx-mailbox.c4
-rw-r--r--drivers/mailbox/mailbox-test.c26
-rw-r--r--drivers/mailbox/stm32-ipcc.c4
-rw-r--r--drivers/mailbox/tegra-hsp.c2
-rw-r--r--drivers/mailbox/zynqmp-ipi-mailbox.c725
-rw-r--r--drivers/md/Kconfig12
-rw-r--r--drivers/md/Makefile4
-rw-r--r--drivers/md/dm-bufio.c15
-rw-r--r--drivers/md/dm-cache-target.c127
-rw-r--r--drivers/md/dm-core.h1
-rw-r--r--drivers/md/dm-crypt.c3
-rw-r--r--drivers/md/dm-init.c303
-rw-r--r--drivers/md/dm-integrity.c28
-rw-r--r--drivers/md/dm-ioctl.c103
-rw-r--r--drivers/md/dm-raid.c14
-rw-r--r--drivers/md/dm-rq.c27
-rw-r--r--drivers/md/dm-rq.h16
-rw-r--r--drivers/md/dm-snap.c8
-rw-r--r--drivers/md/dm-switch.c3
-rw-r--r--drivers/md/dm-table.c39
-rw-r--r--drivers/md/dm-thin.c14
-rw-r--r--drivers/md/dm-verity-fec.c6
-rw-r--r--drivers/md/dm-writecache.c2
-rw-r--r--drivers/md/dm-zoned-target.c1
-rw-r--r--drivers/md/dm.c169
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c27
-rw-r--r--drivers/md/raid10.c3
-rw-r--r--drivers/md/raid5-log.h1
-rw-r--r--drivers/md/raid5-ppl.c69
-rw-r--r--drivers/md/raid5.c90
-rw-r--r--drivers/md/raid5.h9
-rw-r--r--drivers/media/cec/cec-api.c2
-rw-r--r--drivers/media/common/saa7146/saa7146_fops.c2
-rw-r--r--drivers/media/common/saa7146/saa7146_i2c.c5
-rw-r--r--drivers/media/common/saa7146/saa7146_video.c2
-rw-r--r--drivers/media/common/siano/sms-cards.c2
-rw-r--r--drivers/media/common/siano/smscoreapi.h2
-rw-r--r--drivers/media/common/v4l2-tpg/v4l2-tpg-core.c12
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c53
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-contig.c41
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-sg.c4
-rw-r--r--drivers/media/common/videobuf2/videobuf2-memops.c2
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c30
-rw-r--r--drivers/media/dvb-core/dmxdev.c8
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c5
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c2
-rw-r--r--drivers/media/dvb-core/dvbdev.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c2
-rw-r--r--drivers/media/dvb-frontends/dib0090.c2
-rw-r--r--drivers/media/dvb-frontends/dib7000m.c4
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c8
-rw-r--r--drivers/media/dvb-frontends/dib8000.c12
-rw-r--r--drivers/media/dvb-frontends/dib9000.c4
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drx_dap_fasi.h8
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drx_driver.h8
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c48
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.h12
-rw-r--r--drivers/media/dvb-frontends/drxd_firm.c2
-rw-r--r--drivers/media/dvb-frontends/drxd_hard.c30
-rw-r--r--drivers/media/dvb-frontends/drxk.h2
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c8
-rw-r--r--drivers/media/dvb-frontends/ds3000.c4
-rw-r--r--drivers/media/dvb-frontends/isl6421.c2
-rw-r--r--drivers/media/dvb-frontends/lgdt3306a.c5
-rw-r--r--drivers/media/dvb-frontends/lgdt330x.c2
-rw-r--r--drivers/media/dvb-frontends/m88rs2000.c2
-rw-r--r--drivers/media/dvb-frontends/mt312.c4
-rw-r--r--drivers/media/dvb-frontends/nxt200x.c4
-rw-r--r--drivers/media/dvb-frontends/or51211.c2
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c2
-rw-r--r--drivers/media/dvb-frontends/s5h1409.c2
-rw-r--r--drivers/media/dvb-frontends/sp8870.c4
-rw-r--r--drivers/media/dvb-frontends/stb0899_algo.c6
-rw-r--r--drivers/media/dvb-frontends/stv0367_defs.h2
-rw-r--r--drivers/media/dvb-frontends/stv0900_core.c4
-rw-r--r--drivers/media/dvb-frontends/stv0910.c4
-rw-r--r--drivers/media/dvb-frontends/stv6110.c2
-rw-r--r--drivers/media/dvb-frontends/tda1004x.h2
-rw-r--r--drivers/media/dvb-frontends/tda10086.c2
-rw-r--r--drivers/media/dvb-frontends/tda18271c2dd.c6
-rw-r--r--drivers/media/i2c/Kconfig36
-rw-r--r--drivers/media/i2c/Makefile4
-rw-r--r--drivers/media/i2c/adv7175.c2
-rw-r--r--drivers/media/i2c/adv748x/adv748x-afe.c2
-rw-r--r--drivers/media/i2c/adv748x/adv748x-core.c335
-rw-r--r--drivers/media/i2c/adv748x/adv748x-csi2.c64
-rw-r--r--drivers/media/i2c/adv748x/adv748x-hdmi.c2
-rw-r--r--drivers/media/i2c/adv748x/adv748x.h28
-rw-r--r--drivers/media/i2c/adv7842.c10
-rw-r--r--drivers/media/i2c/bt819.c4
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c3
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.h3
-rw-r--r--drivers/media/i2c/cx25840/cx25840-ir.c4
-rw-r--r--drivers/media/i2c/dw9714.c2
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_mode.c2
-rw-r--r--drivers/media/i2c/imx214.c2
-rw-r--r--drivers/media/i2c/imx274.c24
-rw-r--r--drivers/media/i2c/lm3560.c2
-rw-r--r--drivers/media/i2c/lm3646.c2
-rw-r--r--drivers/media/i2c/m5mols/m5mols.h2
-rw-r--r--drivers/media/i2c/m5mols/m5mols_core.c2
-rw-r--r--drivers/media/i2c/msp3400-driver.c2
-rw-r--r--drivers/media/i2c/mt9m001.c (renamed from drivers/media/i2c/soc_camera/soc_mt9m001.c)457
-rw-r--r--drivers/media/i2c/mt9m111.c39
-rw-r--r--drivers/media/i2c/mt9t112.c2
-rw-r--r--drivers/media/i2c/ov2640.c45
-rw-r--r--drivers/media/i2c/ov5640.c159
-rw-r--r--drivers/media/i2c/ov6650.c4
-rw-r--r--drivers/media/i2c/ov7670.c201
-rw-r--r--drivers/media/i2c/ov7740.c9
-rw-r--r--drivers/media/i2c/ov8856.c1268
-rw-r--r--drivers/media/i2c/ov9640.c (renamed from drivers/media/i2c/soc_camera/soc_ov9640.c)123
-rw-r--r--drivers/media/i2c/ov9640.h (renamed from drivers/media/i2c/soc_camera/ov9640.h)7
-rw-r--r--drivers/media/i2c/ov9650.c4
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c2
-rw-r--r--drivers/media/i2c/s5k4ecgx.c2
-rw-r--r--drivers/media/i2c/s5k6aa.c2
-rw-r--r--drivers/media/i2c/saa7115.c2
-rw-r--r--drivers/media/i2c/saa717x.c2
-rw-r--r--drivers/media/i2c/soc_camera/Makefile10
-rw-r--r--drivers/media/i2c/soc_camera/soc_mt9t112.c1157
-rw-r--r--drivers/media/i2c/soc_camera/soc_ov772x.c1123
-rw-r--r--drivers/media/i2c/soc_camera/soc_rj54n1cb0c.c1415
-rw-r--r--drivers/media/i2c/soc_camera/soc_tw9910.c999
-rw-r--r--drivers/media/i2c/tda1997x.c4
-rw-r--r--drivers/media/i2c/tda1997x_regs.h2
-rw-r--r--drivers/media/i2c/tda9840.c2
-rw-r--r--drivers/media/i2c/tea6415c.c2
-rw-r--r--drivers/media/i2c/tea6420.c2
-rw-r--r--drivers/media/i2c/tvaudio.c4
-rw-r--r--drivers/media/i2c/tvp514x.c2
-rw-r--r--drivers/media/i2c/tw9910.c29
-rw-r--r--drivers/media/i2c/video-i2c.c110
-rw-r--r--drivers/media/media-request.c3
-rw-r--r--drivers/media/pci/bt8xx/bttv-audio-hook.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-audio-hook.h2
-rw-r--r--drivers/media/pci/bt8xx/bttv-cards.c12
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c12
-rw-r--r--drivers/media/pci/bt8xx/bttv-risc.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv.h2
-rw-r--r--drivers/media/pci/bt8xx/dst.c22
-rw-r--r--drivers/media/pci/cobalt/cobalt-v4l2.c2
-rw-r--r--drivers/media/pci/cx18/cx18-cards.h2
-rw-r--r--drivers/media/pci/cx18/cx18-dvb.c6
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.c2
-rw-r--r--drivers/media/pci/cx18/cx18-io.h2
-rw-r--r--drivers/media/pci/cx18/cx18-mailbox.c2
-rw-r--r--drivers/media/pci/cx18/cx18-vbi.c2
-rw-r--r--drivers/media/pci/cx18/cx23418.h2
-rw-r--r--drivers/media/pci/cx23885/cx23885-417.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-alsa.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c6
-rw-r--r--drivers/media/pci/cx23885/cx23885.h2
-rw-r--r--drivers/media/pci/cx23885/cx23888-ir.c4
-rw-r--r--drivers/media/pci/cx25821/cx25821-alsa.c2
-rw-r--r--drivers/media/pci/cx25821/cx25821-sram.h2
-rw-r--r--drivers/media/pci/cx25821/cx25821.h2
-rw-r--r--drivers/media/pci/dm1105/dm1105.c2
-rw-r--r--drivers/media/pci/dt3155/dt3155.c8
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c11
-rw-r--r--drivers/media/pci/ivtv/Kconfig23
-rw-r--r--drivers/media/pci/ivtv/ivtv-yuv.c2
-rw-r--r--drivers/media/pci/ivtv/ivtvfb.c16
-rw-r--r--drivers/media/pci/meye/meye.c8
-rw-r--r--drivers/media/pci/meye/meye.h4
-rw-r--r--drivers/media/pci/ngene/ngene-core.c2
-rw-r--r--drivers/media/pci/pt1/pt1.c54
-rw-r--r--drivers/media/pci/pt3/pt3.h2
-rw-r--r--drivers/media/pci/saa7134/saa7134-cards.c2
-rw-r--r--drivers/media/pci/saa7146/mxb.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-api.c2
-rw-r--r--drivers/media/pci/saa7164/saa7164-cards.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-dvb.c2
-rw-r--r--drivers/media/pci/saa7164/saa7164-fw.c2
-rw-r--r--drivers/media/pci/smipcie/smipcie-ir.c132
-rw-r--r--drivers/media/pci/smipcie/smipcie.h1
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-disp.c4
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c2
-rw-r--r--drivers/media/pci/ttpci/av7110.c6
-rw-r--r--drivers/media/pci/tw68/tw68-video.c2
-rw-r--r--drivers/media/platform/Kconfig5
-rw-r--r--drivers/media/platform/Makefile2
-rw-r--r--drivers/media/platform/aspeed-video.c1
-rw-r--r--drivers/media/platform/atmel/atmel-isi.c4
-rw-r--r--drivers/media/platform/coda/coda-bit.c24
-rw-r--r--drivers/media/platform/coda/coda-common.c13
-rw-r--r--drivers/media/platform/coda/coda-jpeg.c2
-rw-r--r--drivers/media/platform/coda/coda.h2
-rw-r--r--drivers/media/platform/davinci/isif.c4
-rw-r--r--drivers/media/platform/davinci/vpbe.c2
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c2
-rw-r--r--drivers/media/platform/davinci/vpif.c2
-rw-r--r--drivers/media/platform/davinci/vpif_display.c4
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-command.h2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-param.h2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.c16
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp-video.c4
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.h2
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c4
-rw-r--r--drivers/media/platform/fsl-viu.c2
-rw-r--r--drivers/media/platform/imx-pxp.c16
-rw-r--r--drivers/media/platform/marvell-ccic/mmp-driver.c4
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c40
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_core.h6
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c20
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c64
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c163
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h35
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c74
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c104
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c4
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c2
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_drv_if.h2
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_vpu_if.h2
-rw-r--r--drivers/media/platform/mx2_emmaprp.c6
-rw-r--r--drivers/media/platform/omap/omap_vout.c16
-rw-r--r--drivers/media/platform/omap/omap_voutdef.h4
-rw-r--r--drivers/media/platform/omap3isp/isp.c2
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.c4
-rw-r--r--drivers/media/platform/omap3isp/ispcsi2.c2
-rw-r--r--drivers/media/platform/pxa_camera.c10
-rw-r--r--drivers/media/platform/qcom/venus/core.c12
-rw-r--r--drivers/media/platform/qcom/venus/core.h3
-rw-r--r--drivers/media/platform/qcom/venus/firmware.c53
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c3
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c26
-rw-r--r--drivers/media/platform/rcar-vin/rcar-csi2.c66
-rw-r--r--drivers/media/platform/rcar-vin/rcar-dma.c4
-rw-r--r--drivers/media/platform/rcar-vin/rcar-v4l2.c4
-rw-r--r--drivers/media/platform/rockchip/rga/rga-hw.c6
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c6
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.h2
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c6
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c63
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.h6
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c2
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h2
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-regs.h2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c8
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_common.h2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.c2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c4
-rw-r--r--drivers/media/platform/seco-cec/seco-cec.h2
-rw-r--r--drivers/media/platform/sh_veu.c4
-rw-r--r--drivers/media/platform/soc_camera/Kconfig26
-rw-r--r--drivers/media/platform/soc_camera/Makefile9
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c1810
-rw-r--r--drivers/media/platform/soc_camera/soc_camera_platform.c188
-rw-r--r--drivers/media/platform/soc_camera/soc_scale_crop.c426
-rw-r--r--drivers/media/platform/soc_camera/soc_scale_crop.h47
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-debug.c34
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.h2
-rw-r--r--drivers/media/platform/sti/delta/delta.h2
-rw-r--r--drivers/media/platform/sti/hva/hva-debugfs.c36
-rw-r--r--drivers/media/platform/sti/hva/hva-h264.c2
-rw-r--r--drivers/media/platform/stm32/stm32-dcmi.c2
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c39
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h5
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c3
-rw-r--r--drivers/media/platform/ti-vpe/vpdma.c14
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c2
-rw-r--r--drivers/media/platform/vicodec/codec-fwht.c148
-rw-r--r--drivers/media/platform/vicodec/codec-fwht.h30
-rw-r--r--drivers/media/platform/vicodec/codec-v4l2-fwht.c394
-rw-r--r--drivers/media/platform/vicodec/codec-v4l2-fwht.h15
-rw-r--r--drivers/media/platform/vicodec/vicodec-core.c658
-rw-r--r--drivers/media/platform/video-mux.c20
-rw-r--r--drivers/media/platform/vim2m.c675
-rw-r--r--drivers/media/platform/vimc/Makefile3
-rw-r--r--drivers/media/platform/vimc/vimc-capture.c26
-rw-r--r--drivers/media/platform/vimc/vimc-common.c35
-rw-r--r--drivers/media/platform/vimc/vimc-common.h17
-rw-r--r--drivers/media/platform/vimc/vimc-core.c5
-rw-r--r--drivers/media/platform/vimc/vimc-debayer.c26
-rw-r--r--drivers/media/platform/vimc/vimc-scaler.c28
-rw-r--r--drivers/media/platform/vimc/vimc-sensor.c51
-rw-r--r--drivers/media/platform/vimc/vimc-streamer.c188
-rw-r--r--drivers/media/platform/vimc/vimc-streamer.h38
-rw-r--r--drivers/media/platform/vivid/vivid-core.c26
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c10
-rw-r--r--drivers/media/platform/vivid/vivid-vid-common.c30
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.c57
-rw-r--r--drivers/media/platform/vsp1/vsp1_brx.c4
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.c6
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c2
-rw-r--r--drivers/media/platform/xilinx/xilinx-vip.c2
-rw-r--r--drivers/media/radio/radio-si476x.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c52
-rw-r--r--drivers/media/radio/si470x/radio-si470x.h1
-rw-r--r--drivers/media/radio/wl128x/fmdrv.h4
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c4
-rw-r--r--drivers/media/rc/Kconfig17
-rw-r--r--drivers/media/rc/Makefile1
-rw-r--r--drivers/media/rc/ati_remote.c2
-rw-r--r--drivers/media/rc/ene_ir.c2
-rw-r--r--drivers/media/rc/ene_ir.h2
-rw-r--r--drivers/media/rc/fintek-cir.h2
-rw-r--r--drivers/media/rc/ir-rc6-decoder.c2
-rw-r--r--drivers/media/rc/ir-rcmm-decoder.c254
-rw-r--r--drivers/media/rc/ir-xmp-decoder.c2
-rw-r--r--drivers/media/rc/ite-cir.c2
-rw-r--r--drivers/media/rc/keymaps/rc-behold-columbus.c4
-rw-r--r--drivers/media/rc/keymaps/rc-behold.c2
-rw-r--r--drivers/media/rc/keymaps/rc-manli.c2
-rw-r--r--drivers/media/rc/keymaps/rc-powercolor-real-angel.c2
-rw-r--r--drivers/media/rc/mceusb.c2
-rw-r--r--drivers/media/rc/rc-core-priv.h5
-rw-r--r--drivers/media/rc/rc-ir-raw.c2
-rw-r--r--drivers/media/rc/rc-main.c34
-rw-r--r--drivers/media/rc/redrat3.c2
-rw-r--r--drivers/media/spi/cxd2880-spi.c8
-rw-r--r--drivers/media/tuners/mxl5005s.c2
-rw-r--r--drivers/media/tuners/qm1d1b0004.h2
-rw-r--r--drivers/media/tuners/r820t.c4
-rw-r--r--drivers/media/tuners/tda18271-common.c10
-rw-r--r--drivers/media/tuners/tda18271-fe.c2
-rw-r--r--drivers/media/tuners/tda18271.h4
-rw-r--r--drivers/media/tuners/xc4000.c4
-rw-r--r--drivers/media/usb/au0828/au0828-core.c2
-rw-r--r--drivers/media/usb/au0828/au0828-dvb.c2
-rw-r--r--drivers/media/usb/au0828/au0828.h2
-rw-r--r--drivers/media/usb/cpia2/cpia2.h2
-rw-r--r--drivers/media/usb/cpia2/cpia2_usb.c2
-rw-r--r--drivers/media/usb/cpia2/cpia2_v4l.c11
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c4
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-avcore.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-pcb-cfg.h2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-vbi.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c8
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.c4
-rw-r--r--drivers/media/usb/dvb-usb/af9005.c2
-rw-r--r--drivers/media/usb/dvb-usb/cinergyT2-fe.c2
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c2
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-init.c2
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb.h2
-rw-r--r--drivers/media/usb/dvb-usb/pctv452e.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx-reg.h2
-rw-r--r--drivers/media/usb/gspca/Kconfig2
-rw-r--r--drivers/media/usb/gspca/autogain_functions.c2
-rw-r--r--drivers/media/usb/gspca/benq.c4
-rw-r--r--drivers/media/usb/gspca/cpia1.c14
-rw-r--r--drivers/media/usb/gspca/gspca.c18
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_mt9m111.c8
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_po1030.c8
-rw-r--r--drivers/media/usb/gspca/mr97310a.c10
-rw-r--r--drivers/media/usb/gspca/ov519.c4
-rw-r--r--drivers/media/usb/gspca/ov534.c153
-rw-r--r--drivers/media/usb/gspca/pac_common.h2
-rw-r--r--drivers/media/usb/gspca/sn9c20x.c2
-rw-r--r--drivers/media/usb/gspca/sonixb.c4
-rw-r--r--drivers/media/usb/gspca/sonixj.c2
-rw-r--r--drivers/media/usb/gspca/spca501.c2
-rw-r--r--drivers/media/usb/gspca/sq905.c2
-rw-r--r--drivers/media/usb/gspca/sunplus.c4
-rw-r--r--drivers/media/usb/gspca/t613.c2
-rw-r--r--drivers/media/usb/gspca/touptek.c4
-rw-r--r--drivers/media/usb/gspca/w996Xcf.c2
-rw-r--r--drivers/media/usb/gspca/zc3xx-reg.h2
-rw-r--r--drivers/media/usb/gspca/zc3xx.c8
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-i2c.c14
-rw-r--r--drivers/media/usb/hdpvr/hdpvr.h2
-rw-r--r--drivers/media/usb/pwc/pwc-dec23.c4
-rw-r--r--drivers/media/usb/pwc/pwc-if.c71
-rw-r--r--drivers/media/usb/pwc/pwc-misc.c2
-rw-r--r--drivers/media/usb/siano/smsusb.c2
-rw-r--r--drivers/media/usb/stk1160/stk1160-core.c4
-rw-r--r--drivers/media/usb/stk1160/stk1160-reg.h4
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c4
-rw-r--r--drivers/media/usb/tm6000/tm6000-alsa.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-core.c4
-rw-r--r--drivers/media/usb/tm6000/tm6000-dvb.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-i2c.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-stds.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c4
-rw-r--r--drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c2
-rw-r--r--drivers/media/usb/ttusb-dec/ttusb_dec.c2
-rw-r--r--drivers/media/usb/usbvision/usbvision-core.c10
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c4
-rw-r--r--drivers/media/usb/usbvision/usbvision.h10
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c2
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c16
-rw-r--r--drivers/media/usb/uvc/uvc_video.c10
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h6
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c10
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c16
-rw-r--r--drivers/media/v4l2-core/v4l2-event.c19
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c16
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c20
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c52
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c12
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-contig.c2
-rw-r--r--drivers/media/v4l2-core/videobuf-vmalloc.c22
-rw-r--r--drivers/memory/tegra/mc.c118
-rw-r--r--drivers/memory/tegra/mc.h10
-rw-r--r--drivers/memstick/host/jmb38x_ms.c4
-rw-r--r--drivers/mfd/Kconfig5
-rw-r--r--drivers/mfd/sprd-sc27xx-spi.c42
-rw-r--r--drivers/mfd/twl-core.c23
-rw-r--r--drivers/mfd/wm831x-core.c2
-rw-r--r--drivers/mfd/wm8400-core.c6
-rw-r--r--drivers/misc/fastrpc.c7
-rw-r--r--drivers/misc/habanalabs/command_submission.c6
-rw-r--r--drivers/misc/habanalabs/debugfs.c7
-rw-r--r--drivers/misc/habanalabs/device.c71
-rw-r--r--drivers/misc/habanalabs/goya/goya.c74
-rw-r--r--drivers/misc/habanalabs/habanalabs.h21
-rw-r--r--drivers/misc/habanalabs/hw_queue.c5
-rw-r--r--drivers/misc/habanalabs/memory.c38
-rw-r--r--drivers/misc/habanalabs/mmu.c6
-rw-r--r--drivers/misc/ioc4.c2
-rw-r--r--drivers/misc/mei/hw-me.c3
-rw-r--r--drivers/misc/mic/Kconfig4
-rw-r--r--drivers/misc/mic/scif/scif_rma.c38
-rw-r--r--drivers/misc/pch_phub.c1
-rw-r--r--drivers/misc/pci_endpoint_test.c1
-rw-r--r--drivers/misc/tifm_7xx1.c1
-rw-r--r--drivers/mmc/host/alcor.c60
-rw-r--r--drivers/mmc/host/davinci_mmc.c2
-rw-r--r--drivers/mmc/host/mxcmmc.c16
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c8
-rw-r--r--drivers/mmc/host/sdhci-omap.c41
-rw-r--r--drivers/mmc/host/sdhci.c13
-rw-r--r--drivers/mmc/host/tifm_sd.c3
-rw-r--r--drivers/mmc/host/via-sdmmc.c10
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c6
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c12
-rw-r--r--drivers/mtd/nand/raw/r852.c2
-rw-r--r--drivers/mtd/nand/raw/txx9ndfmc.c1
-rw-r--r--drivers/mtd/ubi/cdev.c30
-rw-r--r--drivers/mtd/ubi/ubi.h1
-rw-r--r--drivers/mtd/ubi/wl.c174
-rw-r--r--drivers/net/Kconfig4
-rw-r--r--drivers/net/bonding/bond_main.c6
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c2
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c11
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h3
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c48
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h4
-rw-r--r--drivers/net/dsa/qca8k.c174
-rw-r--r--drivers/net/dsa/qca8k.h13
-rw-r--r--drivers/net/ethernet/3com/3c515.c2
-rw-r--r--drivers/net/ethernet/8390/mac8390.c19
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c10
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c1
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c5
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c5
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.h2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c3
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c29
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c72
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c3
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c14
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h4
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c14
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.c10
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c10
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c48
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h12
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c9
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c15
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c30
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c53
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c23
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c18
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c1
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c37
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c5
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c7
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c28
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c3
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c62
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c16
-rw-r--r--drivers/net/ethernet/marvell/sky2.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c105
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c161
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c105
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c42
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h93
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c317
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c55
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c24
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c1
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c5
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/cls.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c27
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c4
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c85
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c96
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c15
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c8
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c7
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c7
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c6
-rw-r--r--drivers/net/ethernet/realtek/8139too.c1
-rw-r--r--drivers/net/ethernet/realtek/atp.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c16
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c11
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c3
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c7
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c5
-rw-r--r--drivers/net/ethernet/sfc/falcon/io.h2
-rw-r--r--drivers/net/ethernet/sfc/io.h2
-rw-r--r--drivers/net/ethernet/silan/sc92031.c15
-rw-r--r--drivers/net/ethernet/sis/sis900.c10
-rw-r--r--drivers/net/ethernet/socionext/netsec.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c138
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c58
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c2
-rw-r--r--drivers/net/ethernet/sun/niu.c1
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c4
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c8
-rw-r--r--drivers/net/ethernet/via/via-rhine.c3
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c6
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c15
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h1
-rw-r--r--drivers/net/hyperv/netvsc.c6
-rw-r--r--drivers/net/hyperv/netvsc_drv.c32
-rw-r--r--drivers/net/ieee802154/adf7242.c4
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c2
-rw-r--r--drivers/net/ieee802154/mcr20a.c6
-rw-r--r--drivers/net/phy/Kconfig3
-rw-r--r--drivers/net/phy/broadcom.c13
-rw-r--r--drivers/net/phy/dp83822.c34
-rw-r--r--drivers/net/phy/marvell.c6
-rw-r--r--drivers/net/phy/meson-gxl.c6
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/phy/spi_ks8995.c9
-rw-r--r--drivers/net/ppp/ppp_mppe.c1
-rw-r--r--drivers/net/ppp/pptp.c1
-rw-r--r--drivers/net/slip/slhc.c2
-rw-r--r--drivers/net/team/team.c33
-rw-r--r--drivers/net/tun.c16
-rw-r--r--drivers/net/usb/aqc111.c15
-rw-r--r--drivers/net/usb/cdc_ether.c8
-rw-r--r--drivers/net/usb/qmi_wwan.c11
-rw-r--r--drivers/net/vrf.c11
-rw-r--r--drivers/net/vxlan.c22
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c24
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h2
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c5
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c7
-rw-r--r--drivers/net/wireless/broadcom/b43/sysfs.c1
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/ilt.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c20
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/phy.c1
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/pio.h1
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/radio.c4
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/sysfs.c1
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/5000.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c71
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c2
-rw-r--r--drivers/net/wireless/intersil/orinoco/mic.c1
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c65
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/beacon.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c55
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c24
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/soc.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c81
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c82
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/phy.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c6
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/usb.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c10
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c15
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c1
-rw-r--r--drivers/nfc/st95hf/core.c7
-rw-r--r--drivers/ntb/hw/idt/ntb_hw_idt.c7
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen1.c25
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen1.h5
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen3.c33
-rw-r--r--drivers/ntb/hw/mscc/ntb_hw_switchtec.c20
-rw-r--r--drivers/ntb/ntb_transport.c31
-rw-r--r--drivers/ntb/test/ntb_perf.c3
-rw-r--r--drivers/nvdimm/btt.c33
-rw-r--r--drivers/nvdimm/btt.h2
-rw-r--r--drivers/nvdimm/btt_devs.c26
-rw-r--r--drivers/nvdimm/dimm_devs.c7
-rw-r--r--drivers/nvdimm/e820.c1
-rw-r--r--drivers/nvdimm/label.c26
-rw-r--r--drivers/nvdimm/namespace_devs.c23
-rw-r--r--drivers/nvdimm/nd.h2
-rw-r--r--drivers/nvdimm/of_pmem.c2
-rw-r--r--drivers/nvdimm/pfn_devs.c24
-rw-r--r--drivers/nvdimm/pmem.c8
-rw-r--r--drivers/nvdimm/region_devs.c8
-rw-r--r--drivers/nvdimm/security.c118
-rw-r--r--drivers/nvme/host/core.c32
-rw-r--r--drivers/nvme/host/fc.c56
-rw-r--r--drivers/nvme/host/multipath.c5
-rw-r--r--drivers/nvme/host/nvme.h5
-rw-r--r--drivers/nvme/host/pci.c3
-rw-r--r--drivers/nvme/host/tcp.c32
-rw-r--r--drivers/nvme/host/trace.c14
-rw-r--r--drivers/nvme/host/trace.h2
-rw-r--r--drivers/nvme/target/admin-cmd.c5
-rw-r--r--drivers/nvme/target/core.c24
-rw-r--r--drivers/nvme/target/discovery.c68
-rw-r--r--drivers/nvme/target/fc.c42
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c8
-rw-r--r--drivers/nvme/target/io-cmd-file.c22
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/of/Kconfig6
-rw-r--r--drivers/of/fdt.c8
-rw-r--r--drivers/of/of_net.c1
-rw-r--r--drivers/of/of_reserved_mem.c24
-rw-r--r--drivers/of/unittest.c23
-rw-r--r--drivers/opp/core.c56
-rw-r--r--drivers/opp/of.c16
-rw-r--r--drivers/parisc/ccio-dma.c4
-rw-r--r--drivers/parisc/iosapic.c6
-rw-r--r--drivers/parport/daisy.c32
-rw-r--r--drivers/parport/probe.c2
-rw-r--r--drivers/parport/share.c10
-rw-r--r--drivers/pci/ats.c57
-rw-r--r--drivers/pci/controller/Kconfig3
-rw-r--r--drivers/pci/controller/dwc/Kconfig4
-rw-r--r--drivers/pci/controller/dwc/Makefile2
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c94
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c224
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c156
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c16
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c115
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c19
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h60
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c2
-rw-r--r--drivers/pci/controller/pci-aardvark.c4
-rw-r--r--drivers/pci/controller/pci-hyperv.c61
-rw-r--r--drivers/pci/controller/pci-mvebu.c2
-rw-r--r--drivers/pci/controller/pcie-altera.c270
-rw-r--r--drivers/pci/controller/pcie-cadence-ep.c25
-rw-r--r--drivers/pci/controller/pcie-mediatek.c13
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c16
-rw-r--r--drivers/pci/controller/vmd.c22
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c97
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c53
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c4
-rw-r--r--drivers/pci/hotplug/ibmphp.h1
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c2
-rw-r--r--drivers/pci/hotplug/ibmphp_hpc.c47
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c4
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c21
-rw-r--r--drivers/pci/of.c2
-rw-r--r--drivers/pci/pci-bridge-emul.c86
-rw-r--r--drivers/pci/pci-bridge-emul.h13
-rw-r--r--drivers/pci/pci-driver.c4
-rw-r--r--drivers/pci/pci.c155
-rw-r--r--drivers/pci/pci.h1
-rw-r--r--drivers/pci/pcie/Kconfig15
-rw-r--r--drivers/pci/pcie/Makefile1
-rw-r--r--drivers/pci/pcie/aer.c9
-rw-r--r--drivers/pci/pcie/bw_notification.c121
-rw-r--r--drivers/pci/pcie/dpc.c27
-rw-r--r--drivers/pci/pcie/pme.c48
-rw-r--r--drivers/pci/pcie/portdrv.h10
-rw-r--r--drivers/pci/pcie/portdrv_core.c20
-rw-r--r--drivers/pci/pcie/portdrv_pci.c9
-rw-r--r--drivers/pci/probe.c122
-rw-r--r--drivers/pci/quirks.c6
-rw-r--r--drivers/pci/setup-bus.c63
-rw-r--r--drivers/perf/Kconfig9
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/arm-cci.c21
-rw-r--r--drivers/perf/arm-ccn.c25
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c865
-rw-r--r--drivers/perf/xgene_pmu.c2
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c5
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c4
-rw-r--r--drivers/pinctrl/berlin/pinctrl-as370.c58
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-madera-core.c1
-rw-r--r--drivers/pinctrl/freescale/Kconfig16
-rw-r--r--drivers/pinctrl/freescale/Makefile2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8mm.c348
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8qm.c326
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c8
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c24
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.h1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8b.c6
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c15
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c17
-rw-r--r--drivers/pinctrl/pinconf.c222
-rw-r--r--drivers/pinctrl/pinctrl-amd.c2
-rw-r--r--drivers/pinctrl/pinctrl-at91.c134
-rw-r--r--drivers/pinctrl/pinctrl-at91.h3
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c367
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c31
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs404.c25
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c2
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c12
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-emev2.c20
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7778.c6
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7790.c17
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7791.c70
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7792.c1
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7794.c16
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795.c90
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7796.c90
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77965.c404
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77970.c128
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77980.c64
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77990.c388
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77995.c11
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh73a0.c3
-rw-r--r--drivers/pinctrl/sh-pfc/pinctrl.c2
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h3
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas7.c4
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c2
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c70
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c41
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h12
-rw-r--r--drivers/pinctrl/ti/pinctrl-ti-iodelay.c7
-rw-r--r--drivers/platform/chrome/Kconfig2
-rw-r--r--drivers/platform/chrome/Makefile2
-rw-r--r--drivers/platform/chrome/chromeos_pstore.c17
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c32
-rw-r--r--drivers/platform/chrome/cros_ec_i2c.c22
-rw-r--r--drivers/platform/chrome/cros_ec_lightbar.c24
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c34
-rw-r--r--drivers/platform/chrome/cros_ec_lpc_mec.c78
-rw-r--r--drivers/platform/chrome/cros_ec_lpc_mec.h63
-rw-r--r--drivers/platform/chrome/cros_ec_lpc_reg.c73
-rw-r--r--drivers/platform/chrome/cros_ec_lpc_reg.h20
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c19
-rw-r--r--drivers/platform/chrome/cros_ec_spi.c20
-rw-r--r--drivers/platform/chrome/cros_ec_sysfs.c26
-rw-r--r--drivers/platform/chrome/cros_ec_vbc.c24
-rw-r--r--drivers/platform/chrome/cros_kbd_led_backlight.c19
-rw-r--r--drivers/platform/chrome/wilco_ec/Kconfig20
-rw-r--r--drivers/platform/chrome/wilco_ec/Makefile6
-rw-r--r--drivers/platform/chrome/wilco_ec/core.c136
-rw-r--r--drivers/platform/chrome/wilco_ec/debugfs.c238
-rw-r--r--drivers/platform/chrome/wilco_ec/mailbox.c237
-rw-r--r--drivers/platform/mellanox/mlxreg-hotplug.c28
-rw-r--r--drivers/platform/x86/asus-wmi.c9
-rw-r--r--drivers/platform/x86/dell-smbios-wmi.c2
-rw-r--r--drivers/platform/x86/dell-wmi-descriptor.c2
-rw-r--r--drivers/platform/x86/dell-wmi.c7
-rw-r--r--drivers/platform/x86/dell_rbu.c50
-rw-r--r--drivers/platform/x86/huawei-wmi.c3
-rw-r--r--drivers/platform/x86/i2c-multi-instantiate.c9
-rw-r--r--drivers/platform/x86/ideapad-laptop.c37
-rw-r--r--drivers/platform/x86/intel-hid.c7
-rw-r--r--drivers/platform/x86/intel-wmi-thunderbolt.c2
-rw-r--r--drivers/platform/x86/intel_int0002_vgpio.c32
-rw-r--r--drivers/platform/x86/intel_pmc_core.c159
-rw-r--r--drivers/platform/x86/intel_pmc_core.h14
-rw-r--r--drivers/platform/x86/mlx-platform.c105
-rw-r--r--drivers/platform/x86/pmc_atom.c21
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c79
-rw-r--r--drivers/platform/x86/wmi-bmof.c2
-rw-r--r--drivers/platform/x86/wmi.c5
-rw-r--r--drivers/power/supply/cpcap-battery.c3
-rw-r--r--drivers/power/supply/goldfish_battery.c2
-rw-r--r--drivers/power/supply/power_supply_sysfs.c6
-rw-r--r--drivers/pwm/Kconfig17
-rw-r--r--drivers/pwm/Makefile3
-rw-r--r--drivers/pwm/core.c10
-rw-r--r--drivers/pwm/pwm-atmel.c111
-rw-r--r--drivers/pwm/pwm-bcm-kona.c16
-rw-r--r--drivers/pwm/pwm-hibvt.c44
-rw-r--r--drivers/pwm/pwm-imx1.c199
-rw-r--r--drivers/pwm/pwm-imx27.c (renamed from drivers/pwm/pwm-imx.c)224
-rw-r--r--drivers/pwm/pwm-mtk-disp.c11
-rw-r--r--drivers/pwm/pwm-rcar.c99
-rw-r--r--drivers/ras/cec.c4
-rw-r--r--drivers/regulator/88pm800.c18
-rw-r--r--drivers/regulator/88pm8607.c43
-rw-r--r--drivers/regulator/Kconfig11
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/ab3100.c45
-rw-r--r--drivers/regulator/ab8500-ext.c49
-rw-r--r--drivers/regulator/ab8500.c20
-rw-r--r--drivers/regulator/act8865-regulator.c147
-rw-r--r--drivers/regulator/anatop-regulator.c63
-rw-r--r--drivers/regulator/arizona-ldo1.c19
-rw-r--r--drivers/regulator/arizona-micsupp.c19
-rw-r--r--drivers/regulator/as3711-regulator.c37
-rw-r--r--drivers/regulator/as3722-regulator.c287
-rw-r--r--drivers/regulator/axp20x-regulator.c23
-rw-r--r--drivers/regulator/bcm590xx-regulator.c105
-rw-r--r--drivers/regulator/bd718x7-regulator.c4
-rw-r--r--drivers/regulator/core.c30
-rw-r--r--drivers/regulator/cpcap-regulator.c15
-rw-r--r--drivers/regulator/da903x.c16
-rw-r--r--drivers/regulator/da9052-regulator.c55
-rw-r--r--drivers/regulator/da9055-regulator.c89
-rw-r--r--drivers/regulator/da9062-regulator.c146
-rw-r--r--drivers/regulator/da9063-regulator.c134
-rw-r--r--drivers/regulator/da9210-regulator.c23
-rw-r--r--drivers/regulator/da9210-regulator.h17
-rw-r--r--drivers/regulator/da9211-regulator.c24
-rw-r--r--drivers/regulator/da9211-regulator.h11
-rw-r--r--drivers/regulator/db8500-prcmu.c143
-rw-r--r--drivers/regulator/dbx500-prcmu.h4
-rw-r--r--drivers/regulator/fan53555.c60
-rw-r--r--drivers/regulator/gpio-regulator.c22
-rw-r--r--drivers/regulator/hi6421-regulator.c232
-rw-r--r--drivers/regulator/hi6421v530-regulator.c26
-rw-r--r--drivers/regulator/hi655x-regulator.c37
-rw-r--r--drivers/regulator/lm363x-regulator.c8
-rw-r--r--drivers/regulator/lp8755.c15
-rw-r--r--drivers/regulator/lp87565-regulator.c49
-rw-r--r--drivers/regulator/ltc3589.c269
-rw-r--r--drivers/regulator/ltc3676.c10
-rw-r--r--drivers/regulator/max14577-regulator.c55
-rw-r--r--drivers/regulator/max77620-regulator.c2
-rw-r--r--drivers/regulator/max77650-regulator.c2
-rw-r--r--drivers/regulator/max8925-regulator.c76
-rw-r--r--drivers/regulator/max8998.c300
-rw-r--r--drivers/regulator/mcp16502.c67
-rw-r--r--drivers/regulator/mt6311-regulator.c17
-rw-r--r--drivers/regulator/mt6311-regulator.h10
-rw-r--r--drivers/regulator/mt6323-regulator.c32
-rw-r--r--drivers/regulator/mt6380-regulator.c25
-rw-r--r--drivers/regulator/mt6397-regulator.c33
-rw-r--r--drivers/regulator/of_regulator.c5
-rw-r--r--drivers/regulator/palmas-regulator.c12
-rw-r--r--drivers/regulator/pv88060-regulator.c22
-rw-r--r--drivers/regulator/pv88060-regulator.h11
-rw-r--r--drivers/regulator/pv88080-regulator.c22
-rw-r--r--drivers/regulator/pv88080-regulator.h11
-rw-r--r--drivers/regulator/pv88090-regulator.c22
-rw-r--r--drivers/regulator/pv88090-regulator.h11
-rw-r--r--drivers/regulator/rc5t583-regulator.c25
-rw-r--r--drivers/regulator/rn5t618-regulator.c8
-rw-r--r--drivers/regulator/s2mpa01.c41
-rw-r--r--drivers/regulator/sc2731-regulator.c2
-rw-r--r--drivers/regulator/sky81452-regulator.c26
-rw-r--r--drivers/regulator/stm32-pwr.c186
-rw-r--r--drivers/regulator/sy8106a-regulator.c40
-rw-r--r--drivers/regulator/tps6507x-regulator.c113
-rw-r--r--drivers/regulator/tps65086-regulator.c4
-rw-r--r--drivers/regulator/tps65132-regulator.c29
-rw-r--r--drivers/regulator/tps65217-regulator.c9
-rw-r--r--drivers/regulator/tps65218-regulator.c56
-rw-r--r--drivers/regulator/tps6524x-regulator.c11
-rw-r--r--drivers/regulator/tps80031-regulator.c48
-rw-r--r--drivers/regulator/twl-regulator.c6
-rw-r--r--drivers/regulator/vctrl-regulator.c4
-rw-r--r--drivers/regulator/vexpress-regulator.c72
-rw-r--r--drivers/regulator/wm831x-dcdc.c23
-rw-r--r--drivers/regulator/wm831x-isink.c66
-rw-r--r--drivers/regulator/wm831x-ldo.c21
-rw-r--r--drivers/regulator/wm8350-regulator.c102
-rw-r--r--drivers/regulator/wm8400-regulator.c39
-rw-r--r--drivers/regulator/wm8994-regulator.c19
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c6
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c209
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c13
-rw-r--r--drivers/remoteproc/qcom_sysmon.c82
-rw-r--r--drivers/remoteproc/qcom_wcnss.c6
-rw-r--r--drivers/remoteproc/remoteproc_core.c160
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c47
-rw-r--r--drivers/remoteproc/remoteproc_internal.h12
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c61
-rw-r--r--drivers/remoteproc/st_remoteproc.c91
-rw-r--r--drivers/reset/reset-meson-audio-arb.c1
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c24
-rw-r--r--drivers/rtc/Kconfig15
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-cros-ec.c4
-rw-r--r--drivers/rtc/rtc-da9063.c7
-rw-r--r--drivers/rtc/rtc-sh.c2
-rw-r--r--drivers/rtc/rtc-wilco-ec.c177
-rw-r--r--drivers/s390/block/dasd_eckd.c6
-rw-r--r--drivers/s390/char/con3270.c2
-rw-r--r--drivers/s390/char/fs3270.c3
-rw-r--r--drivers/s390/char/raw3270.c3
-rw-r--r--drivers/s390/char/raw3270.h4
-rw-r--r--drivers/s390/char/sclp.c14
-rw-r--r--drivers/s390/char/sclp.h10
-rw-r--r--drivers/s390/char/sclp_early.c5
-rw-r--r--drivers/s390/char/sclp_early_core.c20
-rw-r--r--drivers/s390/char/sclp_sdias.c74
-rw-r--r--drivers/s390/char/tty3270.c3
-rw-r--r--drivers/s390/char/zcore.c22
-rw-r--r--drivers/s390/cio/Makefile3
-rw-r--r--drivers/s390/cio/airq.c41
-rw-r--r--drivers/s390/cio/chsc.c50
-rw-r--r--drivers/s390/cio/chsc.h1
-rw-r--r--drivers/s390/cio/cio.c2
-rw-r--r--drivers/s390/cio/cio.h4
-rw-r--r--drivers/s390/cio/ioasm.c1
-rw-r--r--drivers/s390/cio/qdio.h6
-rw-r--r--drivers/s390/cio/qdio_debug.c9
-rw-r--r--drivers/s390/cio/qdio_main.c211
-rw-r--r--drivers/s390/cio/qdio_setup.c2
-rw-r--r--drivers/s390/cio/qdio_thinint.c4
-rw-r--r--drivers/s390/cio/vfio_ccw_async.c88
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c21
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.h2
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c89
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c143
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c227
-rw-r--r--drivers/s390/cio/vfio_ccw_private.h48
-rw-r--r--drivers/s390/crypto/ap_bus.c23
-rw-r--r--drivers/s390/crypto/ap_bus.h2
-rw-r--r--drivers/s390/crypto/ap_queue.c26
-rw-r--r--drivers/s390/crypto/pkey_api.c3
-rw-r--r--drivers/s390/crypto/zcrypt_api.c34
-rw-r--r--drivers/s390/net/ctcm_main.c1
-rw-r--r--drivers/s390/net/ism.h29
-rw-r--r--drivers/s390/net/ism_drv.c20
-rw-r--r--drivers/s390/net/qeth_core_main.c5
-rw-r--r--drivers/s390/net/qeth_l2_main.c7
-rw-r--r--drivers/s390/net/qeth_l3_main.c8
-rw-r--r--drivers/s390/scsi/zfcp_erp.c17
-rw-r--r--drivers/s390/scsi/zfcp_ext.h2
-rw-r--r--drivers/s390/scsi/zfcp_fc.c21
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c4
-rw-r--r--drivers/s390/virtio/virtio_ccw.c14
-rw-r--r--drivers/scsi/Kconfig6
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/aacraid/Makefile2
-rw-r--r--drivers/scsi/aacraid/aachba.c2
-rw-r--r--drivers/scsi/aacraid/aacraid.h11
-rw-r--r--drivers/scsi/aacraid/commctrl.c2
-rw-r--r--drivers/scsi/aacraid/commsup.c34
-rw-r--r--drivers/scsi/aacraid/linit.c22
-rw-r--r--drivers/scsi/aacraid/src.c2
-rw-r--r--drivers/scsi/aic7xxx/Makefile1
-rw-r--r--drivers/scsi/aic7xxx/aic7770_osm.c1
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c14
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.h1
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c10
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c1
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h13
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c348
-rw-r--r--drivers/scsi/bfa/bfa.h3
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c8
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c19
-rw-r--r--drivers/scsi/bfa/bfa_hw_cb.c2
-rw-r--r--drivers/scsi/bfa/bfa_hw_ct.c2
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c9
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c18
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c3
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/csiostor/csio_attr.c3
-rw-r--r--drivers/scsi/csiostor/csio_init.c6
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c13
-rw-r--r--drivers/scsi/cxgbi/Makefile2
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c6
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c13
-rw-r--r--drivers/scsi/cxlflash/common.h3
-rw-r--r--drivers/scsi/cxlflash/main.c2
-rw-r--r--drivers/scsi/cxlflash/superpipe.c12
-rw-r--r--drivers/scsi/dpt_i2o.c73
-rw-r--r--drivers/scsi/esas2r/esas2r.h4
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c3
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c16
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c2
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c7
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c15
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c3
-rw-r--r--drivers/scsi/fnic/fnic.h6
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c88
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c10
-rw-r--r--drivers/scsi/fnic/fnic_io.h3
-rw-r--r--drivers/scsi/fnic/fnic_isr.c6
-rw-r--r--drivers/scsi/fnic/fnic_main.c37
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c40
-rw-r--r--drivers/scsi/fnic/fnic_stats.h6
-rw-r--r--drivers/scsi/fnic/fnic_trace.c28
-rw-r--r--drivers/scsi/fnic/fnic_trace.h4
-rw-r--r--drivers/scsi/fnic/vnic_dev.c270
-rw-r--r--drivers/scsi/fnic/vnic_dev.h2
-rw-r--r--drivers/scsi/fnic/vnic_devcmd.h160
-rw-r--r--drivers/scsi/fnic/vnic_resource.h7
-rw-r--r--drivers/scsi/fnic/vnic_rq.c5
-rw-r--r--drivers/scsi/fnic/vnic_wq.c69
-rw-r--r--drivers/scsi/fnic/vnic_wq.h8
-rw-r--r--drivers/scsi/gdth.c1286
-rw-r--r--drivers/scsi/gdth.h30
-rw-r--r--drivers/scsi/gdth_ioctl.h89
-rw-r--r--drivers/scsi/gdth_proc.c113
-rw-r--r--drivers/scsi/gdth_proc.h3
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h110
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c901
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c26
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c468
-rw-r--r--drivers/scsi/hpsa.c19
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c39
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h7
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c23
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c6
-rw-r--r--drivers/scsi/ipr.c3
-rw-r--r--drivers/scsi/iscsi_tcp.c9
-rw-r--r--drivers/scsi/libfc/fc_rport.c1
-rw-r--r--drivers/scsi/libiscsi.c86
-rw-r--r--drivers/scsi/libiscsi_tcp.c19
-rw-r--r--drivers/scsi/libsas/sas_expander.c38
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h97
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c469
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h36
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c18
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c1207
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h73
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c40
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h16
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c2291
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c745
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h66
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c448
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c891
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h63
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c2293
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h89
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h304
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c27
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h54
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c409
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c28
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h1
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c11
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c28
-rw-r--r--drivers/scsi/mvumi.c5
-rw-r--r--drivers/scsi/nsp32.c1
-rw-r--r--drivers/scsi/osd/Kbuild20
-rw-r--r--drivers/scsi/osd/Kconfig49
-rw-r--r--drivers/scsi/osd/osd_debug.h30
-rw-r--r--drivers/scsi/osd/osd_initiator.c2076
-rw-r--r--drivers/scsi/osd/osd_uld.c571
-rw-r--r--drivers/scsi/osst.c2
-rw-r--r--drivers/scsi/pcmcia/Makefile2
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c3
-rw-r--r--drivers/scsi/qedf/qedf_debugfs.c18
-rw-r--r--drivers/scsi/qedf/qedf_io.c7
-rw-r--r--drivers/scsi/qedf/qedf_main.c2
-rw-r--r--drivers/scsi/qedi/qedi_debugfs.c17
-rw-r--r--drivers/scsi/qedi/qedi_fw.c8
-rw-r--r--drivers/scsi/qedi/qedi_main.c7
-rw-r--r--drivers/scsi/qla1280.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c115
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h43
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c45
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c77
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c199
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c395
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c25
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c96
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c47
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c279
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c28
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c237
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.h26
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c31
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c4
-rw-r--r--drivers/scsi/qlogicpti.c3
-rw-r--r--drivers/scsi/qlogicpti.h3
-rw-r--r--drivers/scsi/scsi.c16
-rw-r--r--drivers/scsi/scsi_debug.c186
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_dh.c1
-rw-r--r--drivers/scsi/scsi_error.c3
-rw-r--r--drivers/scsi/scsi_lib.c88
-rw-r--r--drivers/scsi/scsi_scan.c6
-rw-r--r--drivers/scsi/scsi_sysfs.c6
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c2
-rw-r--r--drivers/scsi/scsi_transport_sas.c1
-rw-r--r--drivers/scsi/sd.c548
-rw-r--r--drivers/scsi/sd.h64
-rw-r--r--drivers/scsi/smartpqi/Makefile1
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c9
-rw-r--r--drivers/scsi/snic/snic_debugfs.c133
-rw-r--r--drivers/scsi/snic/snic_main.c14
-rw-r--r--drivers/scsi/snic/snic_stats.h2
-rw-r--r--drivers/scsi/snic/snic_trc.c12
-rw-r--r--drivers/scsi/snic/snic_trc.h4
-rw-r--r--drivers/scsi/sr.c1
-rw-r--r--drivers/scsi/st.c6
-rw-r--r--drivers/scsi/storvsc_drv.c15
-rw-r--r--drivers/scsi/ufs/Kconfig1
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c124
-rw-r--r--drivers/scsi/ufs/ufs-hisi.h4
-rw-r--r--drivers/scsi/ufs/ufs.h1
-rw-r--r--drivers/scsi/ufs/ufs_bsg.c63
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h29
-rw-r--r--drivers/scsi/ufs/ufshcd-dwc.c4
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c2
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.h2
-rw-r--r--drivers/scsi/ufs/ufshcd.c90
-rw-r--r--drivers/scsi/ufs/ufshcd.h2
-rw-r--r--drivers/scsi/virtio_scsi.c17
-rw-r--r--drivers/soc/bcm/bcm2835-power.c49
-rw-r--r--drivers/spi/Kconfig16
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/atmel-quadspi.c8
-rw-r--r--drivers/spi/spi-at91-usart.c8
-rw-r--r--drivers/spi/spi-bcm2835.c39
-rw-r--r--drivers/spi/spi-bcm2835aux.c205
-rw-r--r--drivers/spi/spi-bitbang.c66
-rw-r--r--drivers/spi/spi-dw-mmio.c12
-rw-r--r--drivers/spi/spi-ep93xx.c33
-rw-r--r--drivers/spi/spi-fsl-lib.h2
-rw-r--r--drivers/spi/spi-fsl-lpspi.c573
-rw-r--r--drivers/spi/spi-fsl-qspi.c2
-rw-r--r--drivers/spi/spi-fsl-spi.c81
-rw-r--r--drivers/spi/spi-gpio.c227
-rw-r--r--drivers/spi/spi-imx.c9
-rw-r--r--drivers/spi/spi-mem.c8
-rw-r--r--drivers/spi/spi-mt7621.c (renamed from drivers/staging/mt7621-spi/spi-mt7621.c)83
-rw-r--r--drivers/spi/spi-mxic.c6
-rw-r--r--drivers/spi/spi-orion.c4
-rw-r--r--drivers/spi/spi-pic32.c2
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c4
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c5
-rw-r--r--drivers/spi/spi-pxa2xx.c38
-rw-r--r--drivers/spi/spi-rspi.c119
-rw-r--r--drivers/spi/spi-sh-msiof.c224
-rw-r--r--drivers/spi/spi-stm32-qspi.c229
-rw-r--r--drivers/spi/spi-stm32.c5
-rw-r--r--drivers/spi/spi-tegra114.c310
-rw-r--r--drivers/spi/spi-tegra20-slink.c12
-rw-r--r--drivers/spi/spi-topcliff-pch.c16
-rw-r--r--drivers/spi/spi-zynq-qspi.c761
-rw-r--r--drivers/spi/spi.c76
-rw-r--r--drivers/spi/spidev.c4
-rw-r--r--drivers/ssb/pci.c1
-rw-r--r--drivers/ssb/pcmcia.c4
-rw-r--r--drivers/staging/Kconfig4
-rw-r--r--drivers/staging/Makefile2
-rw-r--r--drivers/staging/axis-fifo/Kconfig1
-rw-r--r--drivers/staging/comedi/comedidev.h2
-rw-r--r--drivers/staging/comedi/drivers.c33
-rw-r--r--drivers/staging/comedi/drivers/mite.c3
-rw-r--r--drivers/staging/comedi/drivers/ni_660x.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.c1
-rw-r--r--drivers/staging/comedi/drivers/ni_usb6501.c10
-rw-r--r--drivers/staging/comedi/drivers/s626.c2
-rw-r--r--drivers/staging/comedi/drivers/vmk80xx.c8
-rw-r--r--drivers/staging/erofs/data.c2
-rw-r--r--drivers/staging/erofs/dir.c45
-rw-r--r--drivers/staging/erofs/unzip_vle.c45
-rw-r--r--drivers/staging/erofs/unzip_vle_lz4.c7
-rw-r--r--drivers/staging/iio/adc/ad7192.c8
-rw-r--r--drivers/staging/iio/meter/ade7854.c2
-rw-r--r--drivers/staging/ks7010/ks_hostif.c1
-rw-r--r--drivers/staging/media/Kconfig6
-rw-r--r--drivers/staging/media/Makefile3
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c2
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_isif.c4
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_resizer.c4
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c2
-rw-r--r--drivers/staging/media/imx/Kconfig9
-rw-r--r--drivers/staging/media/imx/Makefile4
-rw-r--r--drivers/staging/media/imx/TODO9
-rw-r--r--drivers/staging/media/imx/imx-ic-common.c6
-rw-r--r--drivers/staging/media/imx/imx-ic-prp.c25
-rw-r--r--drivers/staging/media/imx/imx-ic-prpencvf.c91
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c119
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c230
-rw-r--r--drivers/staging/media/imx/imx-media-dev-common.c90
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c122
-rw-r--r--drivers/staging/media/imx/imx-media-internal-sd.c20
-rw-r--r--drivers/staging/media/imx/imx-media-of.c6
-rw-r--r--drivers/staging/media/imx/imx-media-utils.c47
-rw-r--r--drivers/staging/media/imx/imx-media-vdic.c21
-rw-r--r--drivers/staging/media/imx/imx-media.h45
-rw-r--r--drivers/staging/media/imx/imx7-media-csi.c1369
-rw-r--r--drivers/staging/media/imx/imx7-mipi-csis.c1160
-rw-r--r--drivers/staging/media/imx074/Kconfig5
-rw-r--r--drivers/staging/media/imx074/Makefile1
-rw-r--r--drivers/staging/media/imx074/TODO5
-rw-r--r--drivers/staging/media/ipu3/Makefile6
-rw-r--r--drivers/staging/media/ipu3/TODO7
-rw-r--r--drivers/staging/media/ipu3/include/intel-ipu3.h10
-rw-r--r--drivers/staging/media/ipu3/ipu3-abi.h2
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-fw.c18
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-fw.h8
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-params.c271
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-params.h8
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-pool.c32
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-pool.h30
-rw-r--r--drivers/staging/media/ipu3/ipu3-css.c460
-rw-r--r--drivers/staging/media/ipu3/ipu3-css.h92
-rw-r--r--drivers/staging/media/ipu3/ipu3-dmamap.c43
-rw-r--r--drivers/staging/media/ipu3/ipu3-dmamap.h14
-rw-r--r--drivers/staging/media/ipu3/ipu3-mmu.c125
-rw-r--r--drivers/staging/media/ipu3/ipu3-mmu.h18
-rw-r--r--drivers/staging/media/ipu3/ipu3-tables.c50
-rw-r--r--drivers/staging/media/ipu3/ipu3-tables.h54
-rw-r--r--drivers/staging/media/ipu3/ipu3-v4l2.c299
-rw-r--r--drivers/staging/media/ipu3/ipu3.c97
-rw-r--r--drivers/staging/media/ipu3/ipu3.h20
-rw-r--r--drivers/staging/media/omap4iss/iss_csi2.c2
-rw-r--r--drivers/staging/media/rockchip/vpu/rk3288_vpu_hw_jpeg_enc.c6
-rw-r--r--drivers/staging/media/rockchip/vpu/rk3399_vpu_hw_jpeg_enc.c6
-rw-r--r--drivers/staging/media/soc_camera/Kconfig (renamed from drivers/media/i2c/soc_camera/Kconfig)46
-rw-r--r--drivers/staging/media/soc_camera/Makefile7
-rw-r--r--drivers/staging/media/soc_camera/imx074.c (renamed from drivers/staging/media/imx074/imx074.c)0
-rw-r--r--drivers/staging/media/soc_camera/mt9t031.c (renamed from drivers/staging/media/mt9t031/mt9t031.c)0
-rw-r--r--drivers/staging/media/soc_camera/soc_camera.c (renamed from drivers/media/platform/soc_camera/soc_camera.c)4
-rw-r--r--drivers/staging/media/soc_camera/soc_mediabus.c (renamed from drivers/media/platform/soc_camera/soc_mediabus.c)0
-rw-r--r--drivers/staging/media/soc_camera/soc_mt9v022.c (renamed from drivers/media/i2c/soc_camera/soc_mt9v022.c)0
-rw-r--r--drivers/staging/media/soc_camera/soc_ov5642.c (renamed from drivers/media/i2c/soc_camera/soc_ov5642.c)0
-rw-r--r--drivers/staging/media/soc_camera/soc_ov9740.c (renamed from drivers/media/i2c/soc_camera/soc_ov9740.c)0
-rw-r--r--drivers/staging/media/sunxi/cedrus/TODO5
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h9
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_dec.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_dec.h6
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.c28
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c23
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.c19
-rw-r--r--drivers/staging/media/zoran/zoran.h2
-rw-r--r--drivers/staging/media/zoran/zoran_card.c2
-rw-r--r--drivers/staging/media/zoran/zoran_device.c6
-rw-r--r--drivers/staging/media/zoran/zoran_driver.c4
-rw-r--r--drivers/staging/most/core.c2
-rw-r--r--drivers/staging/mt7621-dts/gbpc1.dts29
-rw-r--r--drivers/staging/mt7621-dts/mt7621.dtsi73
-rw-r--r--drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt48
-rw-r--r--drivers/staging/mt7621-eth/Kconfig39
-rw-r--r--drivers/staging/mt7621-eth/Makefile14
-rw-r--r--drivers/staging/mt7621-eth/TODO13
-rw-r--r--drivers/staging/mt7621-eth/ethtool.c250
-rw-r--r--drivers/staging/mt7621-eth/ethtool.h15
-rw-r--r--drivers/staging/mt7621-eth/gsw_mt7620.h277
-rw-r--r--drivers/staging/mt7621-eth/gsw_mt7621.c297
-rw-r--r--drivers/staging/mt7621-eth/mdio.c275
-rw-r--r--drivers/staging/mt7621-eth/mdio.h27
-rw-r--r--drivers/staging/mt7621-eth/mdio_mt7620.c173
-rw-r--r--drivers/staging/mt7621-eth/mtk_eth_soc.c2176
-rw-r--r--drivers/staging/mt7621-eth/mtk_eth_soc.h716
-rw-r--r--drivers/staging/mt7621-eth/soc_mt7621.c161
-rw-r--r--drivers/staging/mt7621-pci/Kconfig1
-rw-r--r--drivers/staging/mt7621-spi/Kconfig6
-rw-r--r--drivers/staging/mt7621-spi/Makefile1
-rw-r--r--drivers/staging/mt7621-spi/TODO5
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c2
-rw-r--r--drivers/staging/octeon/ethernet.c40
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h4
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c9
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_xmit.h2
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_tkip.c1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c1
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c10
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.h2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c14
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_xmit.h2
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl_phydm.c2
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/fw.c2
-rw-r--r--drivers/staging/speakup/speakup_soft.c16
-rw-r--r--drivers/staging/speakup/spk_priv.h1
-rw-r--r--drivers/staging/speakup/synth.c6
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c8
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c8
-rw-r--r--drivers/staging/vt6655/device_main.c11
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit.h2
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_ddp.c2
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_main.c2
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_target.c2
-rw-r--r--drivers/target/iscsi/iscsi_target.c47
-rw-r--r--drivers/target/iscsi/iscsi_target.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c13
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c5
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c59
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c23
-rw-r--r--drivers/target/loopback/tcm_loop.c21
-rw-r--r--drivers/target/sbp/sbp_target.c6
-rw-r--r--drivers/target/target_core_alua.c5
-rw-r--r--drivers/target/target_core_configfs.c4
-rw-r--r--drivers/target/target_core_device.c6
-rw-r--r--drivers/target/target_core_pr.c15
-rw-r--r--drivers/target/target_core_tmr.c39
-rw-r--r--drivers/target/target_core_transport.c49
-rw-r--r--drivers/target/target_core_user.c19
-rw-r--r--drivers/target/target_core_xcopy.c6
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h1
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c7
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c1
-rw-r--r--drivers/thermal/broadcom/bcm2835_thermal.c9
-rw-r--r--drivers/thermal/cpu_cooling.c3
-rw-r--r--drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c21
-rw-r--r--drivers/thermal/intel/intel_powerclamp.c4
-rw-r--r--drivers/thermal/mtk_thermal.c7
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c2
-rw-r--r--drivers/thunderbolt/domain.c1
-rw-r--r--drivers/tty/rocket.c2
-rw-r--r--drivers/tty/serial/8250/8250_lpss.c1
-rw-r--r--drivers/tty/serial/ar933x_uart.c24
-rw-r--r--drivers/tty/serial/atmel_serial.c52
-rw-r--r--drivers/tty/serial/kgdboc.c4
-rw-r--r--drivers/tty/serial/max310x.c2
-rw-r--r--drivers/tty/serial/men_z135_uart.c1
-rw-r--r--drivers/tty/serial/mvebu-uart.c3
-rw-r--r--drivers/tty/serial/mxs-auart.c4
-rw-r--r--drivers/tty/serial/pch_uart.c2
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c2
-rw-r--r--drivers/tty/serial/sc16is7xx.c14
-rw-r--r--drivers/tty/serial/serial_txx9.c1
-rw-r--r--drivers/tty/serial/sh-sci.c18
-rw-r--r--drivers/tty/tty_port.c10
-rw-r--r--drivers/tty/vt/vt.c3
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/common/common.c2
-rw-r--r--drivers/usb/core/driver.c13
-rw-r--r--drivers/usb/core/hcd.c3
-rw-r--r--drivers/usb/core/message.c4
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/early/xhci-dbc.c6
-rw-r--r--drivers/usb/gadget/function/f_hid.c6
-rw-r--r--drivers/usb/gadget/function/f_tcm.c9
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c19
-rw-r--r--drivers/usb/gadget/udc/net2272.c1
-rw-r--r--drivers/usb/gadget/udc/net2280.c8
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c1
-rw-r--r--drivers/usb/host/ohci-sm501.c3
-rw-r--r--drivers/usb/host/ohci-tmio.c2
-rw-r--r--drivers/usb/host/u132-hcd.c3
-rw-r--r--drivers/usb/host/xhci-dbgcap.c7
-rw-r--r--drivers/usb/host/xhci-hub.c19
-rw-r--r--drivers/usb/host/xhci-rcar.c1
-rw-r--r--drivers/usb/host/xhci-ring.c9
-rw-r--r--drivers/usb/host/xhci.h8
-rw-r--r--drivers/usb/misc/usb251xb.c4
-rw-r--r--drivers/usb/misc/yurex.c1
-rw-r--r--drivers/usb/mtu3/Kconfig1
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h4
-rw-r--r--drivers/usb/serial/mos7720.c4
-rw-r--r--drivers/usb/serial/option.c17
-rw-r--r--drivers/usb/storage/realtek_cr.c13
-rw-r--r--drivers/usb/storage/uas.c15
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c27
-rw-r--r--drivers/usb/typec/tcpm/wcove.c9
-rw-r--r--drivers/usb/usbip/stub_rx.c12
-rw-r--r--drivers/usb/usbip/usbip_common.h7
-rw-r--r--drivers/vfio/pci/vfio_pci.c4
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c2
-rw-r--r--drivers/vfio/vfio_iommu_type1.c14
-rw-r--r--drivers/vhost/scsi.c6
-rw-r--r--drivers/vhost/vhost.c8
-rw-r--r--drivers/video/fbdev/aty/radeon_pm.c6
-rw-r--r--drivers/video/fbdev/cg14.c4
-rw-r--r--drivers/video/fbdev/cg3.c2
-rw-r--r--drivers/video/fbdev/chipsfb.c3
-rw-r--r--drivers/video/fbdev/core/fb_cmdline.c23
-rw-r--r--drivers/video/fbdev/core/fbcon.c14
-rw-r--r--drivers/video/fbdev/core/fbmem.c3
-rw-r--r--drivers/video/fbdev/core/fbmon.c2
-rw-r--r--drivers/video/fbdev/efifb.c3
-rw-r--r--drivers/video/fbdev/ffb.c2
-rw-r--r--drivers/video/fbdev/geode/gxfb_core.c13
-rw-r--r--drivers/video/fbdev/geode/lxfb_core.c13
-rw-r--r--drivers/video/fbdev/imsttfb.c4
-rw-r--r--drivers/video/fbdev/mbx/mbxdebugfs.c40
-rw-r--r--drivers/video/fbdev/mbx/mbxfb.c2
-rw-r--r--drivers/video/fbdev/offb.c4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/core.c34
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss-of.c4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss.h2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c2
-rw-r--r--drivers/video/fbdev/ssd1307fb.c4
-rw-r--r--drivers/video/fbdev/via/viafbdev.c2
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c106
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.h15
-rw-r--r--drivers/virt/vboxguest/vboxguest_linux.c26
-rw-r--r--drivers/virt/vboxguest/vboxguest_utils.c32
-rw-r--r--drivers/virt/vboxguest/vboxguest_version.h9
-rw-r--r--drivers/virt/vboxguest/vmmdev.h8
-rw-r--r--drivers/virtio/virtio.c2
-rw-r--r--drivers/virtio/virtio_balloon.c6
-rw-r--r--drivers/virtio/virtio_pci_common.c8
-rw-r--r--drivers/virtio/virtio_ring.c13
-rw-r--r--drivers/w1/masters/ds2490.c6
-rw-r--r--drivers/watchdog/Kconfig25
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/dw_wdt.c2
-rw-r--r--drivers/watchdog/mlx_wdt.c290
-rw-r--r--drivers/watchdog/pc87413_wdt.c2
-rw-r--r--drivers/watchdog/pika_wdt.c2
-rw-r--r--drivers/watchdog/qcom-wdt.c23
-rw-r--r--drivers/watchdog/sbc60xxwdt.c2
-rw-r--r--drivers/watchdog/sbsa_gwdt.c2
-rw-r--r--drivers/watchdog/sc1200wdt.c2
-rw-r--r--drivers/watchdog/sc520_wdt.c2
-rw-r--r--drivers/watchdog/smsc37b787_wdt.c2
-rw-r--r--drivers/watchdog/w83877f_wdt.c2
-rw-r--r--drivers/xen/Makefile1
-rw-r--r--drivers/xen/balloon.c5
-rw-r--r--drivers/xen/cpu_hotplug.c2
-rw-r--r--drivers/xen/events/events_base.c1
-rw-r--r--drivers/xen/fallback.c81
-rw-r--r--drivers/xen/gntdev-dmabuf.c21
-rw-r--r--drivers/xen/gntdev-dmabuf.h2
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/privcmd-buf.c3
-rw-r--r--drivers/xen/swiotlb-xen.c7
-rw-r--r--drivers/xen/xen-acpi-processor.c22
-rw-r--r--drivers/xen/xen-balloon.c11
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c2
-rw-r--r--drivers/xen/xen-pciback/xenbus.c2
-rw-r--r--drivers/xen/xen-scsiback.c8
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c4
-rw-r--r--fs/9p/v9fs_vfs.h23
-rw-r--r--fs/9p/vfs_file.c6
-rw-r--r--fs/9p/vfs_inode.c23
-rw-r--r--fs/9p/vfs_inode_dotl.c27
-rw-r--r--fs/9p/vfs_super.c6
-rw-r--r--fs/Kconfig10
-rw-r--r--fs/Makefile3
-rw-r--r--fs/afs/callback.c3
-rw-r--r--fs/afs/cmservice.c2
-rw-r--r--fs/afs/fsclient.c6
-rw-r--r--fs/afs/inode.c4
-rw-r--r--fs/afs/internal.h13
-rw-r--r--fs/afs/mntpt.c149
-rw-r--r--fs/afs/rxrpc.c30
-rw-r--r--fs/afs/server.c1
-rw-r--r--fs/afs/super.c432
-rw-r--r--fs/afs/volume.c4
-rw-r--r--fs/afs/write.c1
-rw-r--r--fs/afs/yfsclient.c2
-rw-r--r--fs/aio.c338
-rw-r--r--fs/block_dev.c23
-rw-r--r--fs/btrfs/ctree.h2
-rw-r--r--fs/btrfs/disk-io.c2
-rw-r--r--fs/btrfs/extent-tree.c76
-rw-r--r--fs/btrfs/extent_io.c4
-rw-r--r--fs/btrfs/file-item.c15
-rw-r--r--fs/btrfs/inode.c2
-rw-r--r--fs/btrfs/ioctl.c31
-rw-r--r--fs/btrfs/ordered-data.c3
-rw-r--r--fs/btrfs/props.c8
-rw-r--r--fs/btrfs/qgroup.c17
-rw-r--r--fs/btrfs/raid56.c3
-rw-r--r--fs/btrfs/ref-verify.c15
-rw-r--r--fs/btrfs/root-tree.c8
-rw-r--r--fs/btrfs/transaction.c49
-rw-r--r--fs/btrfs/tree-log.c33
-rw-r--r--fs/btrfs/volumes.c2
-rw-r--r--fs/btrfs/zstd.c6
-rw-r--r--fs/ceph/caps.c72
-rw-r--r--fs/ceph/debugfs.c27
-rw-r--r--fs/ceph/dir.c461
-rw-r--r--fs/ceph/file.c13
-rw-r--r--fs/ceph/inode.c70
-rw-r--r--fs/ceph/mds_client.c768
-rw-r--r--fs/ceph/mds_client.h43
-rw-r--r--fs/ceph/snap.c166
-rw-r--r--fs/ceph/super.c21
-rw-r--r--fs/ceph/super.h43
-rw-r--r--fs/ceph/xattr.c20
-rw-r--r--fs/cifs/Kconfig120
-rw-r--r--fs/cifs/cifs_debug.c11
-rw-r--r--fs/cifs/cifs_dfs_ref.c4
-rw-r--r--fs/cifs/cifs_fs_sb.h1
-rw-r--r--fs/cifs/cifs_ioctl.h3
-rw-r--r--fs/cifs/cifsfs.c7
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h91
-rw-r--r--fs/cifs/cifsproto.h8
-rw-r--r--fs/cifs/cifssmb.c54
-rw-r--r--fs/cifs/connect.c100
-rw-r--r--fs/cifs/dir.c107
-rw-r--r--fs/cifs/file.c460
-rw-r--r--fs/cifs/inode.c6
-rw-r--r--fs/cifs/link.c14
-rw-r--r--fs/cifs/misc.c49
-rw-r--r--fs/cifs/smb1ops.c134
-rw-r--r--fs/cifs/smb2file.c6
-rw-r--r--fs/cifs/smb2inode.c87
-rw-r--r--fs/cifs/smb2maperror.c3
-rw-r--r--fs/cifs/smb2misc.c30
-rw-r--r--fs/cifs/smb2ops.c572
-rw-r--r--fs/cifs/smb2pdu.c388
-rw-r--r--fs/cifs/smb2pdu.h11
-rw-r--r--fs/cifs/smb2proto.h10
-rw-r--r--fs/cifs/smb2status.h6
-rw-r--r--fs/cifs/smb2transport.c25
-rw-r--r--fs/cifs/smbdirect.c6
-rw-r--r--fs/cifs/trace.h219
-rw-r--r--fs/cifs/transport.c302
-rw-r--r--fs/crypto/Kconfig6
-rw-r--r--fs/crypto/fscrypt_private.h1
-rw-r--r--fs/crypto/hooks.c6
-rw-r--r--fs/crypto/keyinfo.c1
-rw-r--r--fs/crypto/policy.c3
-rw-r--r--fs/dax.c40
-rw-r--r--fs/debugfs/inode.c13
-rw-r--r--fs/devpts/inode.c1
-rw-r--r--fs/ecryptfs/crypto.c1
-rw-r--r--fs/ecryptfs/keystore.c1
-rw-r--r--fs/exofs/BUGS3
-rw-r--r--fs/exofs/Kbuild20
-rw-r--r--fs/exofs/Kconfig13
-rw-r--r--fs/exofs/Kconfig.ore14
-rw-r--r--fs/exofs/common.h262
-rw-r--r--fs/exofs/dir.c661
-rw-r--r--fs/exofs/exofs.h240
-rw-r--r--fs/exofs/file.c83
-rw-r--r--fs/exofs/inode.c1514
-rw-r--r--fs/exofs/namei.c323
-rw-r--r--fs/exofs/ore.c1179
-rw-r--r--fs/exofs/ore_raid.c757
-rw-r--r--fs/exofs/ore_raid.h62
-rw-r--r--fs/exofs/super.c1071
-rw-r--r--fs/exofs/sys.c205
-rw-r--r--fs/ext4/Kconfig17
-rw-r--r--fs/ext4/dir.c10
-rw-r--r--fs/ext4/ext4.h22
-rw-r--r--fs/ext4/ext4_jbd2.h4
-rw-r--r--fs/ext4/extents.c33
-rw-r--r--fs/ext4/file.c2
-rw-r--r--fs/ext4/hash.c2
-rw-r--r--fs/ext4/ialloc.c2
-rw-r--r--fs/ext4/indirect.c49
-rw-r--r--fs/ext4/inode.c75
-rw-r--r--fs/ext4/ioctl.c112
-rw-r--r--fs/ext4/mballoc.c7
-rw-r--r--fs/ext4/move_extent.c3
-rw-r--r--fs/ext4/namei.c18
-rw-r--r--fs/ext4/page-io.c13
-rw-r--r--fs/ext4/readpage.c5
-rw-r--r--fs/ext4/resize.c20
-rw-r--r--fs/ext4/super.c23
-rw-r--r--fs/ext4/sysfs.c17
-rw-r--r--fs/ext4/xattr.c3
-rw-r--r--fs/f2fs/Kconfig12
-rw-r--r--fs/f2fs/checkpoint.c20
-rw-r--r--fs/f2fs/data.c63
-rw-r--r--fs/f2fs/debug.c19
-rw-r--r--fs/f2fs/dir.c25
-rw-r--r--fs/f2fs/extent_cache.c2
-rw-r--r--fs/f2fs/f2fs.h92
-rw-r--r--fs/f2fs/file.c56
-rw-r--r--fs/f2fs/inline.c12
-rw-r--r--fs/f2fs/inode.c19
-rw-r--r--fs/f2fs/namei.c9
-rw-r--r--fs/f2fs/node.c6
-rw-r--r--fs/f2fs/segment.c80
-rw-r--r--fs/f2fs/segment.h2
-rw-r--r--fs/f2fs/super.c117
-rw-r--r--fs/f2fs/sysfs.c21
-rw-r--r--fs/f2fs/trace.c20
-rw-r--r--fs/f2fs/xattr.c25
-rw-r--r--fs/f2fs/xattr.h6
-rw-r--r--fs/filesystems.c4
-rw-r--r--fs/fs_context.c642
-rw-r--r--fs/fs_parser.c447
-rw-r--r--fs/fuse/control.c4
-rw-r--r--fs/fuse/cuse.c7
-rw-r--r--fs/fuse/dev.c127
-rw-r--r--fs/fuse/dir.c54
-rw-r--r--fs/fuse/file.c342
-rw-r--r--fs/fuse/fuse_i.h28
-rw-r--r--fs/fuse/inode.c28
-rw-r--r--fs/fuse/readdir.c4
-rw-r--r--fs/gfs2/glock.c72
-rw-r--r--fs/gfs2/glock.h4
-rw-r--r--fs/gfs2/incore.h3
-rw-r--r--fs/gfs2/inode.h4
-rw-r--r--fs/gfs2/main.c6
-rw-r--r--fs/hpfs/hpfs.h8
-rw-r--r--fs/hugetlbfs/inode.c378
-rw-r--r--fs/inode.c9
-rw-r--r--fs/internal.h13
-rw-r--r--fs/io_uring.c790
-rw-r--r--fs/iomap.c12
-rw-r--r--fs/jbd2/checkpoint.c17
-rw-r--r--fs/jbd2/commit.c6
-rw-r--r--fs/jbd2/journal.c90
-rw-r--r--fs/jbd2/transaction.c83
-rw-r--r--fs/jffs2/readinode.c5
-rw-r--r--fs/jffs2/super.c5
-rw-r--r--fs/kernfs/kernfs-internal.h1
-rw-r--r--fs/kernfs/mount.c119
-rw-r--r--fs/lockd/clnt4xdr.c14
-rw-r--r--fs/lockd/clntxdr.c14
-rw-r--r--fs/lockd/host.c3
-rw-r--r--fs/locks.c5
-rw-r--r--fs/mount.h5
-rw-r--r--fs/namei.c5
-rw-r--r--fs/namespace.c395
-rw-r--r--fs/nfs/callback_xdr.c64
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/delegation.c22
-rw-r--r--fs/nfs/delegation.h1
-rw-r--r--fs/nfs/dir.c98
-rw-r--r--fs/nfs/direct.c7
-rw-r--r--fs/nfs/file.c44
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c230
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.h75
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c161
-rw-r--r--fs/nfs/inode.c33
-rw-r--r--fs/nfs/internal.h5
-rw-r--r--fs/nfs/io.c12
-rw-r--r--fs/nfs/namespace.c8
-rw-r--r--fs/nfs/nfs2xdr.c124
-rw-r--r--fs/nfs/nfs3acl.c2
-rw-r--r--fs/nfs/nfs3xdr.c209
-rw-r--r--fs/nfs/nfs42.h3
-rw-r--r--fs/nfs/nfs42proc.c167
-rw-r--r--fs/nfs/nfs42xdr.c130
-rw-r--r--fs/nfs/nfs4client.c33
-rw-r--r--fs/nfs/nfs4file.c4
-rw-r--r--fs/nfs/nfs4namespace.c5
-rw-r--r--fs/nfs/nfs4proc.c143
-rw-r--r--fs/nfs/nfs4session.c7
-rw-r--r--fs/nfs/nfs4session.h7
-rw-r--r--fs/nfs/nfs4state.c1
-rw-r--r--fs/nfs/nfs4trace.h25
-rw-r--r--fs/nfs/nfs4xdr.c530
-rw-r--r--fs/nfs/nfstrace.c1
-rw-r--r--fs/nfs/nfstrace.h85
-rw-r--r--fs/nfs/pagelist.c47
-rw-r--r--fs/nfs/pnfs.c35
-rw-r--r--fs/nfs/pnfs.h2
-rw-r--r--fs/nfs/pnfs_dev.c13
-rw-r--r--fs/nfs/read.c2
-rw-r--r--fs/nfs/super.c5
-rw-r--r--fs/nfs/unlink.c8
-rw-r--r--fs/nfs/write.c19
-rw-r--r--fs/nfsd/nfs3proc.c35
-rw-r--r--fs/nfsd/nfs3xdr.c14
-rw-r--r--fs/nfsd/nfs4callback.c25
-rw-r--r--fs/nfsd/nfs4recover.c1
-rw-r--r--fs/nfsd/nfs4state.c20
-rw-r--r--fs/nfsd/nfsctl.c2
-rw-r--r--fs/nfsd/state.h1
-rw-r--r--fs/nilfs2/btnode.c2
-rw-r--r--fs/notify/fanotify/fanotify.c14
-rw-r--r--fs/notify/fanotify/fanotify_user.c12
-rw-r--r--fs/notify/inotify/inotify_user.c7
-rw-r--r--fs/notify/mark.c12
-rw-r--r--fs/ocfs2/refcounttree.c42
-rw-r--r--fs/open.c24
-rw-r--r--fs/orangefs/inode.c7
-rw-r--r--fs/overlayfs/copy_up.c59
-rw-r--r--fs/overlayfs/overlayfs.h2
-rw-r--r--fs/overlayfs/util.c55
-rw-r--r--fs/pipe.c36
-rw-r--r--fs/pnode.c5
-rw-r--r--fs/pnode.h3
-rw-r--r--fs/proc/base.c105
-rw-r--r--fs/proc/inode.c52
-rw-r--r--fs/proc/internal.h5
-rw-r--r--fs/proc/kcore.c29
-rw-r--r--fs/proc/proc_sysctl.c7
-rw-r--r--fs/proc/root.c236
-rw-r--r--fs/proc/task_mmu.c20
-rw-r--r--fs/read_write.c15
-rw-r--r--fs/splice.c24
-rw-r--r--fs/stat.c12
-rw-r--r--fs/super.c341
-rw-r--r--fs/sysfs/mount.c75
-rw-r--r--fs/ubifs/Kconfig12
-rw-r--r--fs/ubifs/Makefile2
-rw-r--r--fs/ubifs/auth.c6
-rw-r--r--fs/ubifs/ioctl.c12
-rw-r--r--fs/ubifs/replay.c2
-rw-r--r--fs/ubifs/sb.c2
-rw-r--r--fs/ubifs/super.c6
-rw-r--r--fs/ubifs/ubifs.h5
-rw-r--r--fs/udf/inode.c4
-rw-r--r--fs/udf/truncate.c8
-rw-r--r--fs/udf/udfdecl.h2
-rw-r--r--fs/ufs/util.h2
-rw-r--r--fs/userfaultfd.c9
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c15
-rw-r--r--fs/xfs/libxfs/xfs_dir2_leaf.c37
-rw-r--r--fs/xfs/libxfs/xfs_dir2_node.c18
-rw-r--r--fs/xfs/scrub/btree.c11
-rw-r--r--fs/xfs/scrub/dabtree.c5
-rw-r--r--fs/xfs/xfs_discard.c8
-rw-r--r--fs/xfs/xfs_file.c27
-rw-r--r--include/acpi/acoutput.h3
-rw-r--r--include/acpi/acpi_bus.h8
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/acpi/actbl.h4
-rw-r--r--include/acpi/actypes.h12
-rw-r--r--include/acpi/platform/aclinux.h5
-rw-r--r--include/asm-generic/Kbuild5
-rw-r--r--include/asm-generic/futex.h8
-rw-r--r--include/asm-generic/io.h27
-rw-r--r--include/asm-generic/mmiowb.h63
-rw-r--r--include/asm-generic/mmiowb_types.h12
-rw-r--r--include/asm-generic/pgtable.h2
-rw-r--r--include/asm-generic/rwsem.h140
-rw-r--r--include/asm-generic/sections.h14
-rw-r--r--include/asm-generic/syscall.h21
-rw-r--r--include/asm-generic/tlb.h297
-rw-r--r--include/asm-generic/vmlinux.lds.h2
-rw-r--r--include/clocksource/arm_arch_timer.h1
-rw-r--r--include/crypto/aes.h8
-rw-r--r--include/crypto/akcipher.h54
-rw-r--r--include/crypto/cryptd.h18
-rw-r--r--include/crypto/des.h43
-rw-r--r--include/crypto/hash.h10
-rw-r--r--include/crypto/internal/simd.h44
-rw-r--r--include/crypto/morus1280_glue.h79
-rw-r--r--include/crypto/morus640_glue.h79
-rw-r--r--include/crypto/public_key.h4
-rw-r--r--include/crypto/streebog.h5
-rw-r--r--include/drm/drm_fb_helper.h14
-rw-r--r--include/drm/drm_modeset_helper_vtables.h4
-rw-r--r--include/drm/ttm/ttm_bo_driver.h1
-rw-r--r--include/dt-bindings/clock/actions,s500-cmu.h78
-rw-r--r--include/dt-bindings/clock/axg-aoclkc.h7
-rw-r--r--include/dt-bindings/clock/exynos5433.h8
-rw-r--r--include/dt-bindings/clock/g12a-aoclkc.h34
-rw-r--r--include/dt-bindings/clock/g12a-clkc.h135
-rw-r--r--include/dt-bindings/clock/gxbb-aoclkc.h7
-rw-r--r--include/dt-bindings/clock/imx5-clock.h3
-rw-r--r--include/dt-bindings/clock/imx8mm-clock.h244
-rw-r--r--include/dt-bindings/clock/imx8mq-clock.h215
-rw-r--r--include/dt-bindings/clock/marvell,mmp2.h1
-rw-r--r--include/dt-bindings/clock/meson8b-clkc.h1
-rw-r--r--include/dt-bindings/clock/mt2712-clk.h3
-rw-r--r--include/dt-bindings/clock/mt8173-clk.h3
-rw-r--r--include/dt-bindings/clock/qcom,rpmcc.h10
-rw-r--r--include/dt-bindings/clock/qcom,rpmh.h1
-rw-r--r--include/dt-bindings/clock/r8a774a1-cpg-mssr.h1
-rw-r--r--include/dt-bindings/clock/r8a774c0-cpg-mssr.h1
-rw-r--r--include/dt-bindings/clock/sifive-fu540-prci.h18
-rw-r--r--include/dt-bindings/clock/stm32mp1-clks.h3
-rw-r--r--include/dt-bindings/pinctrl/at91.h4
-rw-r--r--include/dt-bindings/reset/amlogic,meson-g12a-reset.h5
-rw-r--r--include/dt-bindings/reset/g12a-aoclkc.h18
-rw-r--r--include/keys/system_keyring.h8
-rw-r--r--include/keys/trusted.h2
-rw-r--r--include/kvm/arm_arch_timer.h68
-rw-r--r--include/linux/acpi.h11
-rw-r--r--include/linux/acpi_iort.h8
-rw-r--r--include/linux/amba/bus.h39
-rw-r--r--include/linux/atalk.h20
-rw-r--r--include/linux/bio.h20
-rw-r--r--include/linux/bitrev.h46
-rw-r--r--include/linux/blk-mq.h4
-rw-r--r--include/linux/blk_types.h1
-rw-r--r--include/linux/blkdev.h12
-rw-r--r--include/linux/bpf-cgroup.h2
-rw-r--r--include/linux/bpf.h3
-rw-r--r--include/linux/bpf_verifier.h40
-rw-r--r--include/linux/brcmphy.h16
-rw-r--r--include/linux/bsg-lib.h4
-rw-r--r--include/linux/bvec.h19
-rw-r--r--include/linux/ceph/libceph.h2
-rw-r--r--include/linux/ceph/types.h1
-rw-r--r--include/linux/cgroup_rdma.h2
-rw-r--r--include/linux/clk-provider.h3
-rw-r--r--include/linux/clk.h52
-rw-r--r--include/linux/clk/ti.h1
-rw-r--r--include/linux/clkdev.h4
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/cpu.h44
-rw-r--r--include/linux/cpufreq.h14
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/cpuidle.h1
-rw-r--r--include/linux/cred.h2
-rw-r--r--include/linux/device-mapper.h17
-rw-r--r--include/linux/device.h4
-rw-r--r--include/linux/dma-mapping.h69
-rw-r--r--include/linux/dma/dw.h9
-rw-r--r--include/linux/dmi.h8
-rw-r--r--include/linux/efi.h12
-rw-r--r--include/linux/elevator.h1
-rw-r--r--include/linux/errno.h1
-rw-r--r--include/linux/etherdevice.h12
-rw-r--r--include/linux/f2fs_fs.h20
-rw-r--r--include/linux/filter.h18
-rw-r--r--include/linux/firmware/intel/stratix10-smc.h19
-rw-r--r--include/linux/flex_array.h149
-rw-r--r--include/linux/fs.h22
-rw-r--r--include/linux/fs_context.h188
-rw-r--r--include/linux/fs_parser.h151
-rw-r--r--include/linux/fscrypt.h420
-rw-r--r--include/linux/fscrypt_notsupp.h231
-rw-r--r--include/linux/fscrypt_supp.h204
-rw-r--r--include/linux/ftrace.h18
-rw-r--r--include/linux/generic-radix-tree.h231
-rw-r--r--include/linux/hardirq.h7
-rw-r--r--include/linux/hid.h4
-rw-r--r--include/linux/hmm.h4
-rw-r--r--include/linux/hugetlb.h8
-rw-r--r--include/linux/hwmon.h18
-rw-r--r--include/linux/igmp.h2
-rw-r--r--include/linux/ima.h7
-rw-r--r--include/linux/input/ili210x.h11
-rw-r--r--include/linux/intel-iommu.h21
-rw-r--r--include/linux/intel-ish-client-if.h112
-rw-r--r--include/linux/intel-svm.h2
-rw-r--r--include/linux/interrupt.h25
-rw-r--r--include/linux/io-pgtable.h (renamed from drivers/iommu/io-pgtable.h)0
-rw-r--r--include/linux/iommu.h6
-rw-r--r--include/linux/irq.h2
-rw-r--r--include/linux/irqchip/arm-gic.h3
-rw-r--r--include/linux/jbd2.h1
-rw-r--r--include/linux/jump_label_ratelimit.h64
-rw-r--r--include/linux/kcore.h13
-rw-r--r--include/linux/kernel.h4
-rw-r--r--include/linux/kernfs.h39
-rw-r--r--include/linux/kprobes.h1
-rw-r--r--include/linux/kvm_host.h34
-rw-r--r--include/linux/libata.h5
-rw-r--r--include/linux/libnvdimm.h3
-rw-r--r--include/linux/list.h2
-rw-r--r--include/linux/livepatch.h3
-rw-r--r--include/linux/lockdep.h11
-rw-r--r--include/linux/lsm_hooks.h191
-rw-r--r--include/linux/mailbox/zynqmp-ipi-message.h20
-rw-r--r--include/linux/memblock.h71
-rw-r--r--include/linux/memcontrol.h5
-rw-r--r--include/linux/memory_hotplug.h2
-rw-r--r--include/linux/mfd/palmas.h1
-rw-r--r--include/linux/mfd/wm831x/regulator.h2
-rw-r--r--include/linux/mfd/wm8400-private.h8
-rw-r--r--include/linux/mii.h2
-rw-r--r--include/linux/mlx5/driver.h7
-rw-r--r--include/linux/mlx5/qp.h3
-rw-r--r--include/linux/mm.h37
-rw-r--r--include/linux/mm_types.h4
-rw-r--r--include/linux/mod_devicetable.h12
-rw-r--r--include/linux/module.h18
-rw-r--r--include/linux/mount.h3
-rw-r--r--include/linux/msi.h12
-rw-r--r--include/linux/net.h6
-rw-r--r--include/linux/netdevice.h3
-rw-r--r--include/linux/nfs4.h1
-rw-r--r--include/linux/nfs_fs_sb.h1
-rw-r--r--include/linux/nfs_page.h10
-rw-r--r--include/linux/nfs_xdr.h37
-rw-r--r--include/linux/ntb.h10
-rw-r--r--include/linux/nvme.h9
-rw-r--r--include/linux/of_reserved_mem.h7
-rw-r--r--include/linux/oid_registry.h18
-rw-r--r--include/linux/page-isolation.h10
-rw-r--r--include/linux/pagemap.h1
-rw-r--r--include/linux/parport.h13
-rw-r--r--include/linux/pci-ats.h5
-rw-r--r--include/linux/pci-epc.h31
-rw-r--r--include/linux/pci.h5
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/perf_event.h20
-rw-r--r--include/linux/pinctrl/pinconf.h6
-rw-r--r--include/linux/pipe_fs_i.h19
-rw-r--r--include/linux/platform_data/ads7828.h2
-rw-r--r--include/linux/platform_data/dma-dw.h12
-rw-r--r--include/linux/platform_data/dma-imx.h1
-rw-r--r--include/linux/platform_data/ds620.h2
-rw-r--r--include/linux/platform_data/gpio/gpio-amd-fch.h2
-rw-r--r--include/linux/platform_data/ina2xx.h2
-rw-r--r--include/linux/platform_data/max197.h2
-rw-r--r--include/linux/platform_data/media/si4713.h4
-rw-r--r--include/linux/platform_data/media/soc_camera_platform.h83
-rw-r--r--include/linux/platform_data/mlxreg.h25
-rw-r--r--include/linux/platform_data/ntc_thermistor.h2
-rw-r--r--include/linux/platform_data/spi-ep93xx.h4
-rw-r--r--include/linux/platform_data/wilco-ec.h144
-rw-r--r--include/linux/platform_data/x86/clk-lpss.h (renamed from include/linux/platform_data/clk-lpss.h)0
-rw-r--r--include/linux/platform_data/x86/clk-pmc-atom.h3
-rw-r--r--include/linux/pm.h1
-rw-r--r--include/linux/pm_domain.h22
-rw-r--r--include/linux/pm_opp.h8
-rw-r--r--include/linux/pm_wakeup.h9
-rw-r--r--include/linux/poison.h3
-rw-r--r--include/linux/proc_fs.h6
-rw-r--r--include/linux/property.h18
-rw-r--r--include/linux/psp-sev.h3
-rw-r--r--include/linux/ptrace.h11
-rw-r--r--include/linux/pwm.h37
-rw-r--r--include/linux/qed/qed_if.h2
-rw-r--r--include/linux/rcupdate.h6
-rw-r--r--include/linux/rcuwait.h2
-rw-r--r--include/linux/regulator/consumer.h5
-rw-r--r--include/linux/remoteproc.h8
-rw-r--r--include/linux/ring_buffer.h4
-rw-r--r--include/linux/rwsem-spinlock.h47
-rw-r--r--include/linux/rwsem.h37
-rw-r--r--include/linux/sbitmap.h2
-rw-r--r--include/linux/scatterlist.h49
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/sched/mm.h21
-rw-r--r--include/linux/sched/signal.h18
-rw-r--r--include/linux/sched/task.h1
-rw-r--r--include/linux/sched/topology.h4
-rw-r--r--include/linux/sched/user.h7
-rw-r--r--include/linux/security.h18
-rw-r--r--include/linux/set_memory.h11
-rw-r--r--include/linux/shmem_fs.h1
-rw-r--r--include/linux/skbuff.h70
-rw-r--r--include/linux/slab.h2
-rw-r--r--include/linux/smpboot.h2
-rw-r--r--include/linux/socket.h12
-rw-r--r--include/linux/spi/pxa2xx_spi.h1
-rw-r--r--include/linux/spi/spi-mem.h12
-rw-r--r--include/linux/spi/spi.h24
-rw-r--r--include/linux/spi/spi_bitbang.h1
-rw-r--r--include/linux/spinlock.h11
-rw-r--r--include/linux/srcu.h36
-rw-r--r--include/linux/stackdepot.h8
-rw-r--r--include/linux/stacktrace.h81
-rw-r--r--include/linux/string.h7
-rw-r--r--include/linux/sunrpc/auth.h44
-rw-r--r--include/linux/sunrpc/clnt.h3
-rw-r--r--include/linux/sunrpc/gss_krb5_enctypes.h42
-rw-r--r--include/linux/sunrpc/sched.h1
-rw-r--r--include/linux/sunrpc/xdr.h23
-rw-r--r--include/linux/sunrpc/xprt.h7
-rw-r--r--include/linux/sunrpc/xprtsock.h1
-rw-r--r--include/linux/suspend.h3
-rw-r--r--include/linux/swap.h4
-rw-r--r--include/linux/swiotlb.h11
-rw-r--r--include/linux/switchtec.h10
-rw-r--r--include/linux/syscalls.h3
-rw-r--r--include/linux/tick.h13
-rw-r--r--include/linux/time64.h21
-rw-r--r--include/linux/tpm.h129
-rw-r--r--include/linux/tpm_eventlog.h19
-rw-r--r--include/linux/uaccess.h2
-rw-r--r--include/linux/uio.h34
-rw-r--r--include/linux/uprobes.h5
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/linux/vbox_utils.h12
-rw-r--r--include/linux/verification.h1
-rw-r--r--include/linux/vgaarb.h2
-rw-r--r--include/linux/virtio.h2
-rw-r--r--include/linux/virtio_config.h13
-rw-r--r--include/linux/virtio_ring.h2
-rw-r--r--include/linux/vmalloc.h15
-rw-r--r--include/linux/wmi.h5
-rw-r--r--include/linux/xarray.h296
-rw-r--r--include/media/davinci/dm355_ccdc.h4
-rw-r--r--include/media/davinci/dm644x_ccdc.h2
-rw-r--r--include/media/drv-intf/exynos-fimc.h2
-rw-r--r--include/media/drv-intf/saa7146.h2
-rw-r--r--include/media/drv-intf/saa7146_vv.h4
-rw-r--r--include/media/drv-intf/sh_mobile_ceu.h29
-rw-r--r--include/media/dvb_frontend.h8
-rw-r--r--include/media/i2c/tw9910.h2
-rw-r--r--include/media/mpeg2-ctrls.h14
-rw-r--r--include/media/rc-map.h18
-rw-r--r--include/media/v4l2-common.h9
-rw-r--r--include/media/v4l2-ctrls.h2
-rw-r--r--include/media/v4l2-event.h2
-rw-r--r--include/media/v4l2-fwnode.h4
-rw-r--r--include/media/v4l2-mem2mem.h44
-rw-r--r--include/media/v4l2-subdev.h9
-rw-r--r--include/media/videobuf-core.h4
-rw-r--r--include/media/videobuf2-core.h15
-rw-r--r--include/media/videobuf2-dma-sg.h2
-rw-r--r--include/media/videobuf2-v4l2.h16
-rw-r--r--include/misc/charlcd.h1
-rw-r--r--include/net/act_api.h9
-rw-r--r--include/net/af_rxrpc.h4
-rw-r--r--include/net/cfg80211.h5
-rw-r--r--include/net/ip.h2
-rw-r--r--include/net/mac80211.h63
-rw-r--r--include/net/net_namespace.h1
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h6
-rw-r--r--include/net/netfilter/nf_tables.h12
-rw-r--r--include/net/netns/hash.h10
-rw-r--r--include/net/netrom.h2
-rw-r--r--include/net/nfc/nci_core.h2
-rw-r--r--include/net/request_sock.h1
-rw-r--r--include/net/sch_generic.h45
-rw-r--r--include/net/sctp/checksum.h2
-rw-r--r--include/net/sctp/command.h1
-rw-r--r--include/net/sctp/structs.h15
-rw-r--r--include/net/sock.h12
-rw-r--r--include/net/tc_act/tc_gact.h2
-rw-r--r--include/net/tls.h4
-rw-r--r--include/net/xdp_sock.h1
-rw-r--r--include/net/xfrm.h22
-rw-r--r--include/rdma/ib_hdrs.h14
-rw-r--r--include/rdma/ib_mad.h5
-rw-r--r--include/rdma/ib_umem.h8
-rw-r--r--include/rdma/ib_umem_odp.h34
-rw-r--r--include/rdma/ib_verbs.h274
-rw-r--r--include/rdma/iw_cm.h16
-rw-r--r--include/rdma/iw_portmap.h144
-rw-r--r--include/rdma/rdma_cm.h1
-rw-r--r--include/rdma/rdma_netlink.h11
-rw-r--r--include/rdma/rdma_vt.h30
-rw-r--r--include/rdma/rdmavt_qp.h20
-rw-r--r--include/rdma/restrack.h58
-rw-r--r--include/rdma/tid_rdma_defs.h108
-rw-r--r--include/rdma/uverbs_ioctl.h18
-rw-r--r--include/rdma/uverbs_std_types.h18
-rw-r--r--include/rdma/uverbs_types.h1
-rw-r--r--include/scsi/libfcoe.h4
-rw-r--r--include/scsi/libsas.h59
-rw-r--r--include/scsi/osd_initiator.h511
-rw-r--r--include/scsi/osd_ore.h201
-rw-r--r--include/scsi/scsi.h6
-rw-r--r--include/scsi/scsi_cmnd.h34
-rw-r--r--include/scsi/scsi_eh.h1
-rw-r--r--include/scsi/scsi_host.h20
-rw-r--r--include/soc/rockchip/rk3399_grf.h21
-rw-r--r--include/soc/rockchip/rockchip_sip.h1
-rw-r--r--include/soc/tegra/mc.h27
-rw-r--r--include/sound/pcm.h2
-rw-r--r--include/sound/soc.h11
-rw-r--r--include/target/iscsi/iscsi_transport.h4
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/target/target_core_fabric.h2
-rw-r--r--include/trace/events/devfreq.h40
-rw-r--r--include/trace/events/f2fs.h47
-rw-r--r--include/trace/events/pwc.h65
-rw-r--r--include/trace/events/rpcgss.h361
-rw-r--r--include/trace/events/rpcrdma.h12
-rw-r--r--include/trace/events/rxrpc.h2
-rw-r--r--include/trace/events/spi.h10
-rw-r--r--include/trace/events/sunrpc.h363
-rw-r--r--include/trace/events/syscalls.h2
-rw-r--r--include/trace/events/tegra_apb_dma.h61
-rw-r--r--include/trace/events/timer.h17
-rw-r--r--include/uapi/asm-generic/Kbuild (renamed from include/uapi/asm-generic/Kbuild.asm)4
-rw-r--r--include/uapi/asm-generic/socket.h2
-rw-r--r--include/uapi/asm-generic/unistd.h2
-rw-r--r--include/uapi/linux/Kbuild2
-rw-r--r--include/uapi/linux/bpf.h188
-rw-r--r--include/uapi/linux/dm-ioctl.h4
-rw-r--r--include/uapi/linux/elf.h2
-rw-r--r--include/uapi/linux/ethtool.h2
-rw-r--r--include/uapi/linux/fuse.h7
-rw-r--r--include/uapi/linux/input-event-codes.h6
-rw-r--r--include/uapi/linux/lirc.h6
-rw-r--r--include/uapi/linux/ndctl.h1
-rw-r--r--include/uapi/linux/pci_regs.h2
-rw-r--r--include/uapi/linux/psci.h7
-rw-r--r--include/uapi/linux/psp-sev.h18
-rw-r--r--include/uapi/linux/spi/spidev.h6
-rw-r--r--include/uapi/linux/v4l2-controls.h2
-rw-r--r--include/uapi/linux/vbox_vmmdev_types.h60
-rw-r--r--include/uapi/linux/vfio.h4
-rw-r--r--include/uapi/linux/vfio_ccw.h12
-rw-r--r--include/uapi/linux/videodev2.h26
-rw-r--r--include/uapi/mtd/ubi-user.h5
-rw-r--r--include/uapi/rdma/bnxt_re-abi.h11
-rw-r--r--include/uapi/rdma/ib_user_verbs.h2
-rw-r--r--include/uapi/rdma/mlx5-abi.h1
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_cmds.h18
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_verbs.h5
-rw-r--r--include/uapi/rdma/rdma_netlink.h74
-rw-r--r--include/uapi/rdma/rdma_user_cm.h4
-rw-r--r--include/uapi/rdma/rdma_user_rxe.h3
-rw-r--r--include/uapi/sound/asound.h1
-rw-r--r--include/video/imx-ipu-v3.h8
-rw-r--r--include/xen/xen.h4
-rw-r--r--init/Kconfig20
-rw-r--r--init/main.c36
-rw-r--r--ipc/mqueue.c94
-rw-r--r--ipc/namespace.c2
-rw-r--r--kernel/Kconfig.locks9
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/backtracetest.c11
-rw-r--r--kernel/bpf/core.c1
-rw-r--r--kernel/bpf/cpumap.c13
-rw-r--r--kernel/bpf/inode.c32
-rw-r--r--kernel/bpf/syscall.c22
-rw-r--r--kernel/bpf/verifier.c245
-rw-r--r--kernel/cgroup/cgroup-internal.h49
-rw-r--r--kernel/cgroup/cgroup-v1.c394
-rw-r--r--kernel/cgroup/cgroup.c223
-rw-r--r--kernel/cgroup/cpuset.c67
-rw-r--r--kernel/cgroup/rdma.c5
-rw-r--r--kernel/cpu.c111
-rw-r--r--kernel/dma/Kconfig121
-rw-r--r--kernel/dma/Makefile2
-rw-r--r--kernel/dma/coherent.c50
-rw-r--r--kernel/dma/debug.c122
-rw-r--r--kernel/dma/direct.c25
-rw-r--r--kernel/dma/mapping.c14
-rw-r--r--kernel/dma/swiotlb.c35
-rw-r--r--kernel/events/core.c115
-rw-r--r--kernel/events/ring_buffer.c68
-rw-r--r--kernel/events/uprobes.c16
-rw-r--r--kernel/fork.c27
-rw-r--r--kernel/futex.c192
-rw-r--r--kernel/iomem.c4
-rw-r--r--kernel/irq/chip.c4
-rw-r--r--kernel/irq/devres.c5
-rw-r--r--kernel/irq/irqdesc.c1
-rw-r--r--kernel/irq/manage.c5
-rw-r--r--kernel/irq/timings.c522
-rw-r--r--kernel/irq_work.c75
-rw-r--r--kernel/jump_label.c63
-rw-r--r--kernel/kexec_core.c4
-rw-r--r--kernel/kexec_file.c1
-rw-r--r--kernel/kprobes.c6
-rw-r--r--kernel/latencytop.c29
-rw-r--r--kernel/livepatch/core.c91
-rw-r--r--kernel/livepatch/transition.c22
-rw-r--r--kernel/locking/Makefile5
-rw-r--r--kernel/locking/lock_events.c179
-rw-r--r--kernel/locking/lock_events.h59
-rw-r--r--kernel/locking/lock_events_list.h67
-rw-r--r--kernel/locking/lockdep.c396
-rw-r--r--kernel/locking/lockdep_internals.h34
-rw-r--r--kernel/locking/locktorture.c2
-rw-r--r--kernel/locking/percpu-rwsem.c2
-rw-r--r--kernel/locking/qspinlock.c8
-rw-r--r--kernel/locking/qspinlock_paravirt.h19
-rw-r--r--kernel/locking/qspinlock_stat.h242
-rw-r--r--kernel/locking/rwsem-spinlock.c339
-rw-r--r--kernel/locking/rwsem-xadd.c204
-rw-r--r--kernel/locking/rwsem.c25
-rw-r--r--kernel/locking/rwsem.h174
-rw-r--r--kernel/locking/spinlock.c7
-rw-r--r--kernel/locking/spinlock_debug.c6
-rw-r--r--kernel/module.c82
-rw-r--r--kernel/panic.c7
-rw-r--r--kernel/power/Kconfig9
-rw-r--r--kernel/power/hibernate.c17
-rw-r--r--kernel/power/main.c14
-rw-r--r--kernel/power/snapshot.c8
-rw-r--r--kernel/power/suspend.c17
-rw-r--r--kernel/power/user.c5
-rw-r--r--kernel/printk/printk.c10
-rw-r--r--kernel/ptrace.c15
-rw-r--r--kernel/rcu/rcu.h1
-rw-r--r--kernel/rcu/rcuperf.c5
-rw-r--r--kernel/rcu/rcutorture.c21
-rw-r--r--kernel/rcu/srcutiny.c9
-rw-r--r--kernel/rcu/srcutree.c32
-rw-r--r--kernel/rcu/tiny.c2
-rw-r--r--kernel/rcu/tree.c508
-rw-r--r--kernel/rcu/tree.h14
-rw-r--r--kernel/rcu/tree_exp.h36
-rw-r--r--kernel/rcu/tree_plugin.h257
-rw-r--r--kernel/rcu/tree_stall.h709
-rw-r--r--kernel/rcu/update.c59
-rw-r--r--kernel/relay.c1
-rw-r--r--kernel/resource.c29
-rw-r--r--kernel/rseq.c9
-rw-r--r--kernel/sched/core.c130
-rw-r--r--kernel/sched/cpufreq.c2
-rw-r--r--kernel/sched/cpufreq_schedutil.c67
-rw-r--r--kernel/sched/deadline.c3
-rw-r--r--kernel/sched/debug.c2
-rw-r--r--kernel/sched/fair.c144
-rw-r--r--kernel/sched/isolation.c18
-rw-r--r--kernel/sched/rt.c5
-rw-r--r--kernel/sched/sched.h18
-rw-r--r--kernel/sched/topology.c31
-rw-r--r--kernel/seccomp.c21
-rw-r--r--kernel/signal.c128
-rw-r--r--kernel/softirq.c51
-rw-r--r--kernel/stacktrace.c333
-rw-r--r--kernel/sys_ni.c1
-rw-r--r--kernel/sysctl.c55
-rw-r--r--kernel/time/alarmtimer.c2
-rw-r--r--kernel/time/clockevents.c18
-rw-r--r--kernel/time/jiffies.c4
-rw-r--r--kernel/time/sched_clock.c8
-rw-r--r--kernel/time/tick-broadcast.c48
-rw-r--r--kernel/time/tick-common.c54
-rw-r--r--kernel/time/tick-internal.h10
-rw-r--r--kernel/time/tick-sched.c49
-rw-r--r--kernel/time/tick-sched.h13
-rw-r--r--kernel/time/time.c2
-rw-r--r--kernel/time/timekeeping.c24
-rw-r--r--kernel/time/timekeeping.h7
-rw-r--r--kernel/time/timer.c30
-rw-r--r--kernel/torture.c2
-rw-r--r--kernel/trace/Kconfig1
-rw-r--r--kernel/trace/blktrace.c1
-rw-r--r--kernel/trace/bpf_trace.c8
-rw-r--r--kernel/trace/ftrace.c48
-rw-r--r--kernel/trace/ring_buffer.c21
-rw-r--r--kernel/trace/trace.c375
-rw-r--r--kernel/trace/trace.h74
-rw-r--r--kernel/trace/trace_branch.c4
-rw-r--r--kernel/trace/trace_dynevent.c2
-rw-r--r--kernel/trace/trace_entries.h41
-rw-r--r--kernel/trace/trace_event_perf.c16
-rw-r--r--kernel/trace/trace_events_filter.c7
-rw-r--r--kernel/trace/trace_events_hist.c1072
-rw-r--r--kernel/trace/trace_functions_graph.c30
-rw-r--r--kernel/trace/trace_irqsoff.c2
-rw-r--r--kernel/trace/trace_kdb.c6
-rw-r--r--kernel/trace/trace_kprobe.c23
-rw-r--r--kernel/trace/trace_probe.c21
-rw-r--r--kernel/trace/trace_probe.h1
-rw-r--r--kernel/trace/trace_sched_wakeup.c11
-rw-r--r--kernel/trace/trace_stack.c85
-rw-r--r--kernel/trace/trace_syscalls.c9
-rw-r--r--kernel/trace/trace_uprobe.c8
-rw-r--r--kernel/watchdog.c19
-rw-r--r--kernel/watchdog_hld.c3
-rw-r--r--kernel/workqueue.c61
-rw-r--r--kernel/workqueue_internal.h5
-rw-r--r--lib/Kconfig4
-rw-r--r--lib/Kconfig.debug48
-rw-r--r--lib/Makefile20
-rw-r--r--lib/cpumask.c3
-rw-r--r--lib/crc-t10dif.c1
-rw-r--r--lib/digsig.c1
-rw-r--r--lib/fault-inject.c12
-rw-r--r--lib/flex_array.c398
-rw-r--r--lib/generic-radix-tree.c217
-rw-r--r--lib/iov_iter.c21
-rw-r--r--lib/irq_poll.c2
-rw-r--r--lib/libcrc32c.c1
-rw-r--r--lib/lzo/lzo1x_compress.c9
-rw-r--r--lib/lzo/lzo1x_decompress_safe.c4
-rw-r--r--lib/raid6/Makefile2
-rw-r--r--lib/raid6/neon.uc5
-rw-r--r--lib/raid6/recov_neon_inner.c19
-rw-r--r--lib/rhashtable.c8
-rw-r--r--lib/sbitmap.c11
-rw-r--r--lib/scatterlist.c26
-rw-r--r--lib/stackdepot.c54
-rw-r--r--lib/string.c67
-rw-r--r--lib/strncpy_from_user.c5
-rw-r--r--lib/strnlen_user.c4
-rw-r--r--lib/syscall.c57
-rw-r--r--lib/test_bitmap.c20
-rw-r--r--lib/test_printf.c17
-rw-r--r--lib/test_strscpy.c150
-rw-r--r--lib/test_vmalloc.c6
-rw-r--r--lib/test_xarray.c288
-rw-r--r--lib/ubsan.c69
-rw-r--r--lib/ubsan.h5
-rw-r--r--lib/xarray.c163
-rw-r--r--mm/cma.c10
-rw-r--r--mm/compaction.c29
-rw-r--r--mm/debug.c7
-rw-r--r--mm/filemap.c220
-rw-r--r--mm/gup.c48
-rw-r--r--mm/hmm.c2
-rw-r--r--mm/huge_memory.c40
-rw-r--r--mm/hugetlb.c15
-rw-r--r--mm/kasan/Makefile9
-rw-r--r--mm/kasan/common.c43
-rw-r--r--mm/kasan/init.c10
-rw-r--r--mm/kasan/kasan.h5
-rw-r--r--mm/kasan/report.c10
-rw-r--r--mm/kmemleak.c42
-rw-r--r--mm/madvise.c2
-rw-r--r--mm/memblock.c371
-rw-r--r--mm/memcontrol.c20
-rw-r--r--mm/memory.c17
-rw-r--r--mm/memory_hotplug.c58
-rw-r--r--mm/mempolicy.c40
-rw-r--r--mm/migrate.c11
-rw-r--r--mm/mmap.c7
-rw-r--r--mm/mmu_gather.c129
-rw-r--r--mm/page_alloc.c76
-rw-r--r--mm/page_ext.c2
-rw-r--r--mm/page_isolation.c51
-rw-r--r--mm/page_owner.c82
-rw-r--r--mm/percpu.c92
-rw-r--r--mm/shmem.c58
-rw-r--r--mm/slab.c54
-rw-r--r--mm/slab.h3
-rw-r--r--mm/slab_common.c2
-rw-r--r--mm/slub.c26
-rw-r--r--mm/sparse.c29
-rw-r--r--mm/swapfile.c32
-rw-r--r--mm/util.c2
-rw-r--r--mm/vmalloc.c113
-rw-r--r--mm/vmscan.c29
-rw-r--r--mm/vmstat.c5
-rw-r--r--net/8021q/vlan_dev.c26
-rw-r--r--net/9p/client.c2
-rw-r--r--net/9p/trans_xen.c2
-rw-r--r--net/appletalk/aarp.c15
-rw-r--r--net/appletalk/ddp.c21
-rw-r--r--net/atm/lec.c6
-rw-r--r--net/atm/resources.c10
-rw-r--r--net/batman-adv/bat_v_elp.c6
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c16
-rw-r--r--net/batman-adv/sysfs.c7
-rw-r--r--net/batman-adv/translation-table.c32
-rw-r--r--net/bluetooth/amp.c1
-rw-r--r--net/bluetooth/sco.c4
-rw-r--r--net/bluetooth/smp.c1
-rw-r--r--net/bpf/test_run.c2
-rw-r--r--net/bridge/br_input.c23
-rw-r--r--net/bridge/br_multicast.c7
-rw-r--r--net/bridge/br_netfilter_hooks.c1
-rw-r--r--net/bridge/br_netfilter_ipv6.c2
-rw-r--r--net/bridge/br_netlink.c2
-rw-r--r--net/bridge/netfilter/ebtables.c3
-rw-r--r--net/ceph/ceph_common.c18
-rw-r--r--net/ceph/messenger.c8
-rw-r--r--net/ceph/mon_client.c9
-rw-r--r--net/ceph/osdmap.c5
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/dev.c20
-rw-r--r--net/core/devlink.c5
-rw-r--r--net/core/ethtool.c62
-rw-r--r--net/core/failover.c6
-rw-r--r--net/core/filter.c53
-rw-r--r--net/core/flow_dissector.c4
-rw-r--r--net/core/gro_cells.c22
-rw-r--r--net/core/lwt_bpf.c2
-rw-r--r--net/core/net-sysfs.c6
-rw-r--r--net/core/net_namespace.c1
-rw-r--r--net/core/ptp_classifier.c7
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/core/skbuff.c12
-rw-r--r--net/core/skmsg.c1
-rw-r--r--net/core/sock.c4
-rw-r--r--net/dccp/feat.c7
-rw-r--r--net/dccp/ipv6.c4
-rw-r--r--net/dsa/tag_qca.c10
-rw-r--r--net/hsr/hsr_device.c18
-rw-r--r--net/hsr/hsr_framereg.c12
-rw-r--r--net/hsr/hsr_framereg.h1
-rw-r--r--net/ipv4/esp4.c20
-rw-r--r--net/ipv4/esp4_offload.c8
-rw-r--r--net/ipv4/fou.c8
-rw-r--r--net/ipv4/ip_gre.c15
-rw-r--r--net/ipv4/ip_input.c7
-rw-r--r--net/ipv4/ip_options.c4
-rw-r--r--net/ipv4/ip_output.c1
-rw-r--r--net/ipv4/ip_tunnel.c9
-rw-r--r--net/ipv4/ip_vti.c9
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c2
-rw-r--r--net/ipv4/route.c43
-rw-r--r--net/ipv4/syncookies.c7
-rw-r--r--net/ipv4/sysctl_net_ipv4.c5
-rw-r--r--net/ipv4/tcp.c9
-rw-r--r--net/ipv4/tcp_dctcp.c81
-rw-r--r--net/ipv4/tcp_input.c18
-rw-r--r--net/ipv4/tcp_ipv4.c25
-rw-r--r--net/ipv4/udp_offload.c16
-rw-r--r--net/ipv4/xfrm4_policy.c24
-rw-r--r--net/ipv6/addrlabel.c2
-rw-r--r--net/ipv6/esp6_offload.c8
-rw-r--r--net/ipv6/fou6.c4
-rw-r--r--net/ipv6/ila/ila_xlat.c1
-rw-r--r--net/ipv6/ip6_fib.c4
-rw-r--r--net/ipv6/ip6_flowlabel.c22
-rw-r--r--net/ipv6/ip6_gre.c20
-rw-r--r--net/ipv6/ip6_output.c4
-rw-r--r--net/ipv6/ip6_tunnel.c4
-rw-r--r--net/ipv6/netfilter/ip6t_srh.c6
-rw-r--r--net/ipv6/route.c92
-rw-r--r--net/ipv6/sit.c9
-rw-r--r--net/ipv6/tcp_ipv6.c8
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/ipv6/xfrm6_tunnel.c6
-rw-r--r--net/kcm/kcmsock.c16
-rw-r--r--net/key/af_key.c4
-rw-r--r--net/l2tp/l2tp_core.c10
-rw-r--r--net/l2tp/l2tp_ip6.c4
-rw-r--r--net/llc/af_llc.c3
-rw-r--r--net/mac80211/debugfs_netdev.c2
-rw-r--r--net/mac80211/driver-ops.h3
-rw-r--r--net/mac80211/ht.c5
-rw-r--r--net/mac80211/iface.c3
-rw-r--r--net/mac80211/key.c9
-rw-r--r--net/mac80211/mesh_pathtbl.c2
-rw-r--r--net/mac80211/rx.c10
-rw-r--r--net/mac80211/trace_msg.h7
-rw-r--r--net/mac80211/tx.c53
-rw-r--r--net/mpls/mpls_iptunnel.c12
-rw-r--r--net/ncsi/ncsi-netlink.c4
-rw-r--r--net/ncsi/ncsi-rsp.c6
-rw-r--r--net/netfilter/Kconfig1
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c43
-rw-r--r--net/netfilter/nf_conntrack_netlink.c34
-rw-r--r--net/netfilter/nf_conntrack_proto.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_icmp.c93
-rw-r--r--net/netfilter/nf_conntrack_proto_icmpv6.c52
-rw-r--r--net/netfilter/nf_conntrack_sip.c37
-rw-r--r--net/netfilter/nf_nat_core.c11
-rw-r--r--net/netfilter/nf_nat_masquerade.c35
-rw-r--r--net/netfilter/nf_tables_api.c61
-rw-r--r--net/netfilter/nfnetlink_log.c2
-rw-r--r--net/netfilter/nfnetlink_queue.c2
-rw-r--r--net/netfilter/nft_dynset.c13
-rw-r--r--net/netfilter/nft_lookup.c13
-rw-r--r--net/netfilter/nft_objref.c32
-rw-r--r--net/netfilter/nft_redir.c2
-rw-r--r--net/netfilter/nft_set_rbtree.c7
-rw-r--r--net/netfilter/xt_time.c23
-rw-r--r--net/netlink/af_netlink.c3
-rw-r--r--net/netlink/genetlink.c7
-rw-r--r--net/netrom/af_netrom.c76
-rw-r--r--net/netrom/nr_loopback.c2
-rw-r--r--net/netrom/nr_route.c2
-rw-r--r--net/netrom/sysctl_net_netrom.c5
-rw-r--r--net/nfc/llcp_sock.c9
-rw-r--r--net/nfc/nci/hci.c8
-rw-r--r--net/openvswitch/datapath.c12
-rw-r--r--net/openvswitch/flow.h1
-rw-r--r--net/openvswitch/flow_netlink.c4
-rw-r--r--net/openvswitch/flow_netlink.h1
-rw-r--r--net/openvswitch/flow_table.c51
-rw-r--r--net/openvswitch/flow_table.h3
-rw-r--r--net/packet/af_packet.c44
-rw-r--r--net/rds/af_rds.c3
-rw-r--r--net/rds/bind.c2
-rw-r--r--net/rds/ib.h12
-rw-r--r--net/rds/ib_fmr.c19
-rw-r--r--net/rds/ib_frmr.c4
-rw-r--r--net/rds/ib_rdma.c3
-rw-r--r--net/rds/ib_recv.c16
-rw-r--r--net/rds/ib_send.c15
-rw-r--r--net/rds/tcp.c2
-rw-r--r--net/rose/rose_loopback.c27
-rw-r--r--net/rose/rose_subr.c21
-rw-r--r--net/rxrpc/af_rxrpc.c17
-rw-r--r--net/rxrpc/ar-internal.h1
-rw-r--r--net/rxrpc/call_object.c32
-rw-r--r--net/rxrpc/conn_client.c24
-rw-r--r--net/rxrpc/conn_event.c11
-rw-r--r--net/rxrpc/input.c30
-rw-r--r--net/rxrpc/local_object.c3
-rw-r--r--net/rxrpc/output.c11
-rw-r--r--net/rxrpc/peer_event.c5
-rw-r--r--net/rxrpc/sendmsg.c21
-rw-r--r--net/sched/Kconfig3
-rw-r--r--net/sched/act_api.c101
-rw-r--r--net/sched/act_bpf.c25
-rw-r--r--net/sched/act_connmark.c22
-rw-r--r--net/sched/act_csum.c22
-rw-r--r--net/sched/act_gact.c15
-rw-r--r--net/sched/act_ife.c35
-rw-r--r--net/sched/act_ipt.c11
-rw-r--r--net/sched/act_mirred.c25
-rw-r--r--net/sched/act_nat.c15
-rw-r--r--net/sched/act_pedit.c18
-rw-r--r--net/sched/act_police.c13
-rw-r--r--net/sched/act_sample.c31
-rw-r--r--net/sched/act_simple.c54
-rw-r--r--net/sched/act_skbedit.c20
-rw-r--r--net/sched/act_skbmod.c20
-rw-r--r--net/sched/act_tunnel_key.c39
-rw-r--r--net/sched/act_vlan.c22
-rw-r--r--net/sched/cls_api.c51
-rw-r--r--net/sched/cls_flower.c43
-rw-r--r--net/sched/cls_matchall.c5
-rw-r--r--net/sched/sch_api.c15
-rw-r--r--net/sched/sch_cake.c38
-rw-r--r--net/sched/sch_cbq.c10
-rw-r--r--net/sched/sch_drr.c16
-rw-r--r--net/sched/sch_hfsc.c19
-rw-r--r--net/sched/sch_htb.c22
-rw-r--r--net/sched/sch_mq.c2
-rw-r--r--net/sched/sch_mqprio.c3
-rw-r--r--net/sched/sch_multiq.c10
-rw-r--r--net/sched/sch_prio.c10
-rw-r--r--net/sched/sch_qfq.c14
-rw-r--r--net/sched/sch_red.c3
-rw-r--r--net/sched/sch_sfb.c3
-rw-r--r--net/sched/sch_taprio.c2
-rw-r--r--net/sched/sch_tbf.c3
-rw-r--r--net/sctp/auth.c7
-rw-r--r--net/sctp/endpointola.c18
-rw-r--r--net/sctp/protocol.c1
-rw-r--r--net/sctp/sm_make_chunk.c2
-rw-r--r--net/sctp/sm_sideeffect.c29
-rw-r--r--net/sctp/sm_statefuns.c35
-rw-r--r--net/sctp/socket.c101
-rw-r--r--net/sctp/stream.c127
-rw-r--r--net/sctp/stream_interleave.c2
-rw-r--r--net/smc/af_smc.c58
-rw-r--r--net/smc/smc_close.c25
-rw-r--r--net/smc/smc_close.h1
-rw-r--r--net/smc/smc_ism.c5
-rw-r--r--net/smc/smc_pnet.c3
-rw-r--r--net/smc/smc_rx.c1
-rw-r--r--net/socket.c277
-rw-r--r--net/strparser/strparser.c14
-rw-r--r--net/sunrpc/Kconfig16
-rw-r--r--net/sunrpc/auth.c136
-rw-r--r--net/sunrpc/auth_gss/Makefile2
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c553
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c2
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c30
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c8
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c27
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_upcall.c15
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_upcall.h16
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_xdr.c15
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_xdr.h17
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c3
-rw-r--r--net/sunrpc/auth_gss/trace.c11
-rw-r--r--net/sunrpc/auth_null.c56
-rw-r--r--net/sunrpc/auth_unix.c122
-rw-r--r--net/sunrpc/backchannel_rqst.c42
-rw-r--r--net/sunrpc/cache.c3
-rw-r--r--net/sunrpc/clnt.c646
-rw-r--r--net/sunrpc/sched.c17
-rw-r--r--net/sunrpc/svc.c19
-rw-r--r--net/sunrpc/svc_xprt.c24
-rw-r--r--net/sunrpc/svcsock.c20
-rw-r--r--net/sunrpc/xdr.c121
-rw-r--r--net/sunrpc/xprt.c25
-rw-r--r--net/sunrpc/xprtrdma/backchannel.c3
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c4
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c22
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_backchannel.c1
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c12
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_rw.c17
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c4
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c9
-rw-r--r--net/sunrpc/xprtrdma/transport.c2
-rw-r--r--net/sunrpc/xprtrdma/verbs.c4
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h12
-rw-r--r--net/sunrpc/xprtsock.c323
-rw-r--r--net/tipc/group.c3
-rw-r--r--net/tipc/link.c2
-rw-r--r--net/tipc/name_table.c3
-rw-r--r--net/tipc/net.c5
-rw-r--r--net/tipc/netlink_compat.c24
-rw-r--r--net/tipc/node.c7
-rw-r--r--net/tipc/socket.c24
-rw-r--r--net/tipc/sysctl.c8
-rw-r--r--net/tipc/topsrv.c1
-rw-r--r--net/tls/tls_device.c58
-rw-r--r--net/tls/tls_device_fallback.c16
-rw-r--r--net/tls/tls_main.c32
-rw-r--r--net/tls/tls_sw.c20
-rw-r--r--net/vmw_vsock/virtio_transport_common.c22
-rw-r--r--net/wireless/lib80211_crypt_tkip.c1
-rw-r--r--net/wireless/nl80211.c18
-rw-r--r--net/wireless/reg.c44
-rw-r--r--net/wireless/scan.c3
-rw-r--r--net/wireless/util.c6
-rw-r--r--net/x25/af_x25.c7
-rw-r--r--net/xdp/xdp_umem.c19
-rw-r--r--net/xdp/xsk.c5
-rw-r--r--net/xdp/xsk_diag.c4
-rw-r--r--net/xdp/xsk_queue.h4
-rw-r--r--net/xfrm/xfrm_interface.c17
-rw-r--r--net/xfrm/xfrm_policy.c2
-rw-r--r--net/xfrm/xfrm_state.c32
-rw-r--r--net/xfrm/xfrm_user.c16
-rw-r--r--samples/Kconfig7
-rw-r--r--samples/Makefile2
-rw-r--r--samples/binderfs/Makefile1
-rw-r--r--samples/binderfs/binderfs_example.c83
-rw-r--r--samples/bpf/hbm.c4
-rw-r--r--samples/v4l/v4l2-pci-skeleton.c8
-rw-r--r--scripts/Kbuild.include21
-rw-r--r--scripts/Kconfig.include2
-rw-r--r--scripts/Makefile3
-rw-r--r--scripts/Makefile.asm-generic11
-rw-r--r--scripts/Makefile.build43
-rw-r--r--scripts/Makefile.host6
-rw-r--r--scripts/Makefile.lib48
-rw-r--r--scripts/Makefile.modinst2
-rw-r--r--scripts/Makefile.modpost3
-rw-r--r--scripts/Makefile.ubsan1
-rwxr-xr-xscripts/adjust_autoksyms.sh9
-rw-r--r--scripts/atomic/gen-atomics.sh2
-rwxr-xr-xscripts/checkpatch.pl15
-rwxr-xr-xscripts/clang-version.sh10
-rw-r--r--scripts/coccinelle/api/stream_open.cocci363
-rw-r--r--scripts/coccinelle/free/put_device.cocci57
-rw-r--r--scripts/coccinelle/misc/badty.cocci2
-rwxr-xr-xscripts/dtc/dtx_diff13
-rwxr-xr-xscripts/gcc-version.sh27
-rw-r--r--scripts/gdb/linux/Makefile25
-rw-r--r--scripts/kallsyms.c13
-rw-r--r--scripts/kconfig/Makefile9
-rw-r--r--scripts/kconfig/conf.c13
-rw-r--r--scripts/kconfig/expr.h2
-rw-r--r--scripts/kconfig/lexer.l (renamed from scripts/kconfig/zconf.l)2
-rw-r--r--scripts/kconfig/lkc.h2
-rw-r--r--scripts/kconfig/lxdialog/.gitignore4
-rw-r--r--scripts/kconfig/lxdialog/inputbox.c3
-rw-r--r--scripts/kconfig/nconf.c2
-rw-r--r--scripts/kconfig/nconf.gui.c3
-rw-r--r--scripts/kconfig/parser.y (renamed from scripts/kconfig/zconf.y)0
-rw-r--r--scripts/kconfig/qconf.cc42
-rw-r--r--scripts/kconfig/qconf.h1
-rwxr-xr-xscripts/kernel-doc2
-rwxr-xr-xscripts/leaking_addresses.pl9
-rwxr-xr-xscripts/link-vmlinux.sh55
-rwxr-xr-xscripts/mkcompile_h4
-rw-r--r--scripts/mod/devicetable-offsets.c3
-rw-r--r--scripts/mod/file2alias.c28
-rw-r--r--scripts/mod/modpost.c29
-rw-r--r--scripts/package/Makefile8
-rwxr-xr-xscripts/package/builddeb42
-rwxr-xr-xscripts/package/buildtar4
-rwxr-xr-xscripts/package/mkdebian45
-rw-r--r--scripts/selinux/genheaders/genheaders.c1
-rw-r--r--scripts/selinux/mdp/mdp.c1
-rwxr-xr-xscripts/spdxcheck.py8
-rw-r--r--security/Kconfig38
-rw-r--r--security/apparmor/apparmorfs.c14
-rw-r--r--security/apparmor/crypto.c2
-rw-r--r--security/apparmor/lsm.c49
-rw-r--r--security/apparmor/policy_unpack.c1
-rw-r--r--security/device_cgroup.c2
-rw-r--r--security/inode.c13
-rw-r--r--security/integrity/Kconfig11
-rw-r--r--security/integrity/Makefile8
-rw-r--r--security/integrity/digsig.c3
-rw-r--r--security/integrity/digsig_asymmetric.c11
-rw-r--r--security/integrity/evm/evm_crypto.c4
-rw-r--r--security/integrity/evm/evm_main.c6
-rw-r--r--security/integrity/ima/ima.h1
-rw-r--r--security/integrity/ima/ima_crypto.c14
-rw-r--r--security/integrity/ima/ima_init.c4
-rw-r--r--security/integrity/ima/ima_main.c35
-rw-r--r--security/integrity/ima/ima_queue.c27
-rw-r--r--security/integrity/platform_certs/load_ipl_s390.c36
-rw-r--r--security/keys/dh.c1
-rw-r--r--security/keys/encrypted-keys/encrypted.c5
-rw-r--r--security/keys/process_keys.c41
-rw-r--r--security/keys/request_key.c14
-rw-r--r--security/keys/trusted.c100
-rw-r--r--security/security.c10
-rw-r--r--security/selinux/hooks.c98
-rw-r--r--security/selinux/include/classmap.h1
-rw-r--r--security/selinux/include/security.h10
-rw-r--r--security/selinux/ss/avtab.c40
-rw-r--r--security/selinux/ss/avtab.h4
-rw-r--r--security/selinux/ss/conditional.c6
-rw-r--r--security/selinux/ss/policydb.c123
-rw-r--r--security/selinux/ss/policydb.h12
-rw-r--r--security/selinux/ss/services.c22
-rw-r--r--security/smack/smack.h19
-rw-r--r--security/smack/smack_lsm.c92
-rw-r--r--security/yama/yama_lsm.c8
-rw-r--r--sound/core/info.c12
-rw-r--r--sound/core/init.c18
-rw-r--r--sound/core/oss/pcm_oss.c43
-rw-r--r--sound/core/pcm_native.c9
-rw-r--r--sound/core/rawmidi.c2
-rw-r--r--sound/core/seq/oss/seq_oss_synth.c7
-rw-r--r--sound/core/seq/seq_clientmgr.c6
-rw-r--r--sound/drivers/opl3/opl3_voice.h2
-rw-r--r--sound/firewire/motu/motu.c20
-rw-r--r--sound/hda/ext/hdac_ext_bus.c1
-rw-r--r--sound/hda/hdac_bus.c1
-rw-r--r--sound/hda/hdac_component.c6
-rw-r--r--sound/hda/hdac_stream.c5
-rw-r--r--sound/isa/sb/sb8.c4
-rw-r--r--sound/pci/echoaudio/echoaudio.c5
-rw-r--r--sound/pci/hda/hda_codec.c21
-rw-r--r--sound/pci/hda/hda_intel.c18
-rw-r--r--sound/pci/hda/hda_tegra.c12
-rw-r--r--sound/pci/hda/patch_ca0132.c20
-rw-r--r--sound/pci/hda/patch_conexant.c3
-rw-r--r--sound/pci/hda/patch_hdmi.c67
-rw-r--r--sound/pci/hda/patch_realtek.c178
-rw-r--r--sound/soc/codecs/Kconfig1
-rw-r--r--sound/soc/codecs/ab8500-codec.c5
-rw-r--r--sound/soc/codecs/adau1977-spi.c11
-rw-r--r--sound/soc/codecs/cs35l35.c11
-rw-r--r--sound/soc/codecs/cs4270.c1
-rw-r--r--sound/soc/codecs/hdac_hda.c53
-rw-r--r--sound/soc/codecs/hdac_hda.h1
-rw-r--r--sound/soc/codecs/hdmi-codec.c121
-rw-r--r--sound/soc/codecs/nau8810.c4
-rw-r--r--sound/soc/codecs/nau8824.c46
-rw-r--r--sound/soc/codecs/rt5682.c56
-rw-r--r--sound/soc/codecs/tlv320aic32x4-i2c.c4
-rw-r--r--sound/soc/codecs/tlv320aic32x4-spi.c4
-rw-r--r--sound/soc/codecs/tlv320aic32x4.c2
-rw-r--r--sound/soc/codecs/tlv320aic3x.c4
-rw-r--r--sound/soc/codecs/wm_adsp.c40
-rw-r--r--sound/soc/codecs/wm_adsp.h1
-rw-r--r--sound/soc/fsl/fsl_asrc.c14
-rw-r--r--sound/soc/fsl/fsl_esai.c47
-rw-r--r--sound/soc/generic/audio-graph-card.c11
-rw-r--r--sound/soc/generic/simple-card.c12
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform-pcm.c8
-rw-r--r--sound/soc/intel/boards/bytcht_da7213.c9
-rw-r--r--sound/soc/intel/boards/bytcht_es8316.c9
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c10
-rw-r--r--sound/soc/intel/boards/bytcr_rt5651.c14
-rw-r--r--sound/soc/intel/boards/cht_bsw_max98090_ti.c47
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5645.c9
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5672.c9
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c2
-rw-r--r--sound/soc/intel/skylake/skl-messages.c1
-rw-r--r--sound/soc/intel/skylake/skl-pcm.c28
-rw-r--r--sound/soc/mediatek/common/mtk-btcvsd.c69
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-afe-clk.c4
-rw-r--r--sound/soc/rockchip/rockchip_pdm.c4
-rw-r--r--sound/soc/samsung/i2s.c10
-rw-r--r--sound/soc/samsung/odroid.c4
-rw-r--r--sound/soc/sh/rcar/core.c2
-rw-r--r--sound/soc/sh/rcar/rsnd.h5
-rw-r--r--sound/soc/sh/rcar/src.c21
-rw-r--r--sound/soc/soc-core.c6
-rw-r--r--sound/soc/soc-dapm.c11
-rw-r--r--sound/soc/soc-pcm.c66
-rw-r--r--sound/soc/soc-topology.c7
-rw-r--r--sound/soc/stm/stm32_adfsdm.c38
-rw-r--r--sound/soc/stm/stm32_i2s.c3
-rw-r--r--sound/soc/stm/stm32_sai.c8
-rw-r--r--sound/soc/stm/stm32_sai_sub.c114
-rw-r--r--sound/soc/txx9/txx9aclc-ac97.c1
-rw-r--r--sound/usb/line6/driver.c60
-rw-r--r--sound/usb/line6/podhd.c21
-rw-r--r--sound/usb/line6/toneport.c24
-rw-r--r--sound/usb/usx2y/usb_stream.c5
-rw-r--r--sound/xen/xen_snd_front_alsa.c2
-rw-r--r--tools/Makefile14
-rw-r--r--tools/arch/alpha/include/uapi/asm/mman.h2
-rw-r--r--tools/arch/arc/include/uapi/asm/unistd.h51
-rw-r--r--tools/arch/arm64/include/uapi/asm/unistd.h2
-rw-r--r--tools/arch/hexagon/include/uapi/asm/unistd.h40
-rw-r--r--tools/arch/mips/include/uapi/asm/mman.h2
-rw-r--r--tools/arch/parisc/include/uapi/asm/mman.h2
-rw-r--r--tools/arch/powerpc/include/uapi/asm/kvm.h2
-rw-r--r--tools/arch/riscv/include/uapi/asm/unistd.h42
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h1
-rw-r--r--tools/arch/x86/include/uapi/asm/vmx.h1
-rw-r--r--tools/arch/xtensa/include/uapi/asm/mman.h2
-rw-r--r--tools/bpf/bpftool/map.c3
-rw-r--r--tools/bpf/bpftool/prog.c266
-rw-r--r--tools/build/Makefile.feature8
-rw-r--r--tools/build/feature/Makefile6
-rw-r--r--tools/build/feature/test-all.c10
-rw-r--r--tools/build/feature/test-libopencsd.c4
-rw-r--r--tools/build/feature/test-libzstd.c12
-rw-r--r--tools/debugging/Makefile16
-rwxr-xr-xtools/debugging/kernel-chktaint202
-rw-r--r--tools/include/linux/poison.h3
-rw-r--r--tools/include/uapi/asm-generic/mman-common-tools.h23
-rw-r--r--tools/include/uapi/asm-generic/mman-common.h4
-rw-r--r--tools/include/uapi/asm-generic/mman.h2
-rw-r--r--tools/include/uapi/asm-generic/unistd.h158
-rw-r--r--tools/include/uapi/drm/i915_drm.h64
-rw-r--r--tools/include/uapi/linux/bpf.h188
-rw-r--r--tools/include/uapi/linux/fcntl.h1
-rw-r--r--tools/include/uapi/linux/in.h9
-rw-r--r--tools/include/uapi/linux/lirc.h12
-rw-r--r--tools/include/uapi/linux/mman.h4
-rw-r--r--tools/include/uapi/sound/asound.h1
-rw-r--r--tools/io_uring/io_uring-bench.c32
-rw-r--r--tools/lib/bpf/.gitignore1
-rw-r--r--tools/lib/bpf/Makefile50
-rw-r--r--tools/lib/bpf/README.rst1
-rw-r--r--tools/lib/bpf/btf.c50
-rw-r--r--tools/lib/bpf/libbpf.c264
-rw-r--r--tools/lib/bpf/libbpf.h64
-rw-r--r--tools/lib/bpf/libbpf.map3
-rw-r--r--tools/lib/bpf/xsk.c15
-rw-r--r--tools/lib/traceevent/event-parse-api.c278
-rw-r--r--tools/lib/traceevent/event-parse-local.h6
-rw-r--r--tools/lib/traceevent/event-parse.c913
-rw-r--r--tools/lib/traceevent/event-parse.h154
-rw-r--r--tools/lib/traceevent/event-plugin.c32
-rw-r--r--tools/lib/traceevent/kbuffer-parse.c49
-rw-r--r--tools/lib/traceevent/kbuffer.h13
-rw-r--r--tools/lib/traceevent/parse-filter.c216
-rw-r--r--tools/lib/traceevent/parse-utils.c2
-rw-r--r--tools/lib/traceevent/plugin_cfg80211.c8
-rw-r--r--tools/lib/traceevent/plugin_function.c14
-rw-r--r--tools/lib/traceevent/plugin_hrtimer.c12
-rw-r--r--tools/lib/traceevent/plugin_jbd2.c12
-rw-r--r--tools/lib/traceevent/plugin_kmem.c32
-rw-r--r--tools/lib/traceevent/plugin_kvm.c48
-rw-r--r--tools/lib/traceevent/plugin_mac80211.c8
-rw-r--r--tools/lib/traceevent/plugin_sched_switch.c18
-rw-r--r--tools/lib/traceevent/plugin_scsi.c8
-rw-r--r--tools/lib/traceevent/plugin_xen.c8
-rw-r--r--tools/memory-model/Documentation/explanation.txt289
-rw-r--r--tools/memory-model/README33
-rw-r--r--tools/memory-model/linux-kernel.bell35
-rw-r--r--tools/memory-model/linux-kernel.cat39
-rw-r--r--tools/memory-model/linux-kernel.def6
-rw-r--r--tools/memory-model/lock.cat3
-rw-r--r--tools/objtool/Makefile7
-rw-r--r--tools/objtool/arch.h8
-rw-r--r--tools/objtool/arch/x86/decode.c21
-rw-r--r--tools/objtool/builtin-check.c4
-rw-r--r--tools/objtool/builtin.h2
-rw-r--r--tools/objtool/check.c404
-rw-r--r--tools/objtool/check.h4
-rw-r--r--tools/objtool/elf.c15
-rw-r--r--tools/objtool/elf.h3
-rw-r--r--tools/objtool/special.c18
-rw-r--r--tools/objtool/special.h1
-rw-r--r--tools/objtool/warn.h8
-rw-r--r--tools/perf/Documentation/Build.txt24
-rw-r--r--tools/perf/Documentation/perf-config.txt16
-rw-r--r--tools/perf/Documentation/perf-diff.txt56
-rw-r--r--tools/perf/Documentation/perf-record.txt23
-rw-r--r--tools/perf/Documentation/perf-report.txt13
-rw-r--r--tools/perf/Documentation/perf-script.txt3
-rw-r--r--tools/perf/Documentation/perf-stat.txt5
-rw-r--r--tools/perf/Documentation/tips.txt7
-rw-r--r--tools/perf/Makefile.config35
-rw-r--r--tools/perf/Makefile.perf7
-rw-r--r--tools/perf/arch/arm64/annotate/instructions.c2
-rw-r--r--tools/perf/arch/s390/annotate/instructions.c2
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl10
-rw-r--r--tools/perf/arch/x86/util/Build1
-rw-r--r--tools/perf/arch/x86/util/archinsn.c26
-rw-r--r--tools/perf/bench/epoll-ctl.c2
-rw-r--r--tools/perf/bench/epoll-wait.c2
-rw-r--r--tools/perf/bench/numa.c4
-rw-r--r--tools/perf/builtin-c2c.c8
-rw-r--r--tools/perf/builtin-diff.c168
-rw-r--r--tools/perf/builtin-kmem.c2
-rw-r--r--tools/perf/builtin-list.c8
-rw-r--r--tools/perf/builtin-record.c119
-rw-r--r--tools/perf/builtin-report.c88
-rw-r--r--tools/perf/builtin-script.c168
-rw-r--r--tools/perf/builtin-stat.c32
-rw-r--r--tools/perf/builtin-top.c63
-rw-r--r--tools/perf/builtin-version.c2
-rw-r--r--tools/perf/builtin.h3
-rwxr-xr-xtools/perf/check-headers.sh2
-rw-r--r--tools/perf/examples/bpf/augmented_raw_syscalls.c196
-rw-r--r--tools/perf/include/bpf/bpf.h8
-rw-r--r--tools/perf/perf.c1
-rw-r--r--tools/perf/perf.h3
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/other.json594
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z14/extended.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/amdfam17h/branch.json12
-rw-r--r--tools/perf/pmu-events/arch/x86/amdfam17h/cache.json287
-rw-r--r--tools/perf/pmu-events/arch/x86/amdfam17h/core.json134
-rw-r--r--tools/perf/pmu-events/arch/x86/amdfam17h/floating-point.json168
-rw-r--r--tools/perf/pmu-events/arch/x86/amdfam17h/memory.json162
-rw-r--r--tools/perf/pmu-events/arch/x86/amdfam17h/other.json65
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/frontend.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json260
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/cache.json1630
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/floating-point.json51
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/frontend.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/memory.json1640
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/pipeline.json36
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/cache.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json6
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json278
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/cache.json161
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json16
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/memory.json148
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json50
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json304
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/cache.json980
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/memory.json260
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/pipeline.json5
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json9
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/cache.json74
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json5
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json9
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/cache.json175
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/floating-point.json33
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json234
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/memory.json172
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/pipeline.json33
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/cache.json173
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json252
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/memory.json172
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/pipeline.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/cache.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json250
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json256
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/pipeline.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/cache.json6
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json150
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/pipeline.json12
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/cache.json666
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/memory.json268
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json15
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/mapfile.csv1
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/cache.json680
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json126
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/frontend.json268
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/memory.json68
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/other.json18
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json1328
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json144
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json108
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/cache.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/other.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/pipeline.json5
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/cache.json2157
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/frontend.json14
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/memory.json1121
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/pipeline.json39
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json274
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/cache.json786
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/frontend.json234
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/memory.json751
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/pipeline.json173
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json304
-rw-r--r--tools/perf/scripts/python/check-perf-trace.py76
-rw-r--r--tools/perf/scripts/python/compaction-times.py8
-rw-r--r--tools/perf/scripts/python/event_analyzing_sample.py48
-rw-r--r--tools/perf/scripts/python/export-to-postgresql.py77
-rw-r--r--tools/perf/scripts/python/export-to-sqlite.py38
-rwxr-xr-xtools/perf/scripts/python/exported-sql-viewer.py473
-rw-r--r--tools/perf/scripts/python/failed-syscalls-by-pid.py38
-rw-r--r--tools/perf/scripts/python/futex-contention.py10
-rw-r--r--tools/perf/scripts/python/intel-pt-events.py60
-rw-r--r--tools/perf/scripts/python/mem-phys-addr.py7
-rwxr-xr-xtools/perf/scripts/python/net_dropmonitor.py2
-rw-r--r--tools/perf/scripts/python/netdev-times.py12
-rw-r--r--tools/perf/scripts/python/sched-migration.py6
-rw-r--r--tools/perf/scripts/python/sctop.py13
-rwxr-xr-xtools/perf/scripts/python/stackcollapse.py2
-rw-r--r--tools/perf/scripts/python/syscall-counts-by-pid.py47
-rw-r--r--tools/perf/scripts/python/syscall-counts.py31
-rw-r--r--tools/perf/tests/attr/test-record-C02
-rw-r--r--tools/perf/tests/attr/test-record-basic2
-rw-r--r--tools/perf/tests/attr/test-record-branch-any2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-any2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-any_call2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-any_ret2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-hv2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-ind_call2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-k2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-u2
-rw-r--r--tools/perf/tests/attr/test-record-count2
-rw-r--r--tools/perf/tests/attr/test-record-data2
-rw-r--r--tools/perf/tests/attr/test-record-freq2
-rw-r--r--tools/perf/tests/attr/test-record-graph-default2
-rw-r--r--tools/perf/tests/attr/test-record-graph-dwarf2
-rw-r--r--tools/perf/tests/attr/test-record-graph-fp2
-rw-r--r--tools/perf/tests/attr/test-record-group2
-rw-r--r--tools/perf/tests/attr/test-record-group-sampling2
-rw-r--r--tools/perf/tests/attr/test-record-group12
-rw-r--r--tools/perf/tests/attr/test-record-no-buffering2
-rw-r--r--tools/perf/tests/attr/test-record-no-inherit2
-rw-r--r--tools/perf/tests/attr/test-record-no-samples2
-rw-r--r--tools/perf/tests/attr/test-record-period2
-rw-r--r--tools/perf/tests/attr/test-record-raw2
-rw-r--r--tools/perf/tests/backward-ring-buffer.c2
-rw-r--r--tools/perf/tests/evsel-tp-sched.c1
-rw-r--r--tools/perf/tests/expr.c5
-rw-r--r--tools/perf/tests/openat-syscall-all-cpus.c4
-rwxr-xr-xtools/perf/trace/beauty/mmap_flags.sh14
-rw-r--r--tools/perf/trace/beauty/msg_flags.c2
-rw-r--r--tools/perf/trace/beauty/renameat.c1
-rw-r--r--tools/perf/trace/strace/groups/string65
-rw-r--r--tools/perf/ui/browser.c10
-rw-r--r--tools/perf/ui/browsers/Build1
-rw-r--r--tools/perf/ui/browsers/annotate.c2
-rw-r--r--tools/perf/ui/browsers/hists.c141
-rw-r--r--tools/perf/ui/browsers/res_sample.c91
-rw-r--r--tools/perf/ui/browsers/scripts.c274
-rw-r--r--tools/perf/util/annotate.c237
-rw-r--r--tools/perf/util/annotate.h8
-rw-r--r--tools/perf/util/archinsn.h12
-rw-r--r--tools/perf/util/auxtrace.c3
-rw-r--r--tools/perf/util/bpf-event.c425
-rw-r--r--tools/perf/util/bpf-event.h42
-rw-r--r--tools/perf/util/build-id.c1
-rw-r--r--tools/perf/util/c++/clang.cpp2
-rw-r--r--tools/perf/util/cloexec.c1
-rw-r--r--tools/perf/util/config.c3
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c1
-rw-r--r--tools/perf/util/cs-etm.c14
-rw-r--r--tools/perf/util/data-convert-bt.c4
-rw-r--r--tools/perf/util/data.c111
-rw-r--r--tools/perf/util/data.h14
-rw-r--r--tools/perf/util/db-export.c15
-rw-r--r--tools/perf/util/db-export.h3
-rw-r--r--tools/perf/util/dso.c43
-rw-r--r--tools/perf/util/dso.h8
-rw-r--r--tools/perf/util/env.c159
-rw-r--r--tools/perf/util/env.h24
-rw-r--r--tools/perf/util/event.h1
-rw-r--r--tools/perf/util/evlist.c143
-rw-r--r--tools/perf/util/evlist.h17
-rw-r--r--tools/perf/util/evsel.c111
-rw-r--r--tools/perf/util/evsel.h12
-rw-r--r--tools/perf/util/header.c299
-rw-r--r--tools/perf/util/header.h7
-rw-r--r--tools/perf/util/hist.c105
-rw-r--r--tools/perf/util/hist.h31
-rw-r--r--tools/perf/util/intel-bts.c20
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c20
-rw-r--r--tools/perf/util/intel-pt.c2
-rw-r--r--tools/perf/util/machine.c32
-rw-r--r--tools/perf/util/map.c38
-rw-r--r--tools/perf/util/map.h4
-rw-r--r--tools/perf/util/mmap.c4
-rw-r--r--tools/perf/util/mmap.h3
-rw-r--r--tools/perf/util/ordered-events.c2
-rw-r--r--tools/perf/util/parse-events.c60
-rw-r--r--tools/perf/util/parse-events.h5
-rw-r--r--tools/perf/util/parse-events.l11
-rw-r--r--tools/perf/util/parse-events.y12
-rw-r--r--tools/perf/util/pmu.c24
-rw-r--r--tools/perf/util/pmu.h1
-rw-r--r--tools/perf/util/probe-event.c15
-rw-r--r--tools/perf/util/python.c2
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c2
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c10
-rw-r--r--tools/perf/util/session.c40
-rw-r--r--tools/perf/util/sort.c91
-rw-r--r--tools/perf/util/sort.h12
-rw-r--r--tools/perf/util/stat-display.c18
-rw-r--r--tools/perf/util/stat.c12
-rw-r--r--tools/perf/util/symbol.c5
-rw-r--r--tools/perf/util/symbol_conf.h3
-rw-r--r--tools/perf/util/thread-stack.c16
-rw-r--r--tools/perf/util/thread-stack.h6
-rw-r--r--tools/perf/util/thread.c23
-rw-r--r--tools/perf/util/thread.h3
-rw-r--r--tools/perf/util/time-utils.c59
-rw-r--r--tools/perf/util/time-utils.h7
-rw-r--r--tools/perf/util/trace-event-parse.c2
-rw-r--r--tools/perf/util/trace-event-read.c2
-rw-r--r--tools/perf/util/trace-event.c4
-rw-r--r--tools/power/acpi/os_specific/service_layers/oslinuxtbl.c48
-rw-r--r--tools/power/acpi/tools/acpidump/apdump.c8
-rw-r--r--tools/power/acpi/tools/acpidump/apfiles.c8
-rw-r--r--tools/power/cpupower/lib/cpufreq.c19
-rw-r--r--tools/power/cpupower/lib/cpufreq.h16
-rw-r--r--tools/power/cpupower/utils/cpufreq-info.c42
-rw-r--r--tools/power/x86/turbostat/turbostat.c280
-rw-r--r--tools/testing/nvdimm/Kbuild7
-rw-r--r--tools/testing/nvdimm/dax-dev.c16
-rw-r--r--tools/testing/nvdimm/test/nfit.c17
-rw-r--r--tools/testing/selftests/Makefile65
-rw-r--r--tools/testing/selftests/bpf/Makefile33
-rw-r--r--tools/testing/selftests/bpf/bpf_helpers.h2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/flow_dissector.c68
-rw-r--r--tools/testing/selftests/bpf/prog_tests/map_lock.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/signal_pending.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/spinlock.c2
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_flow.c19
-rw-r--r--tools/testing/selftests/bpf/progs/test_sock_fields_kern.c88
-rw-r--r--tools/testing/selftests/bpf/test_btf.c91
-rwxr-xr-xtools/testing/selftests/bpf/test_lwt_ip_encap.sh54
-rw-r--r--tools/testing/selftests/bpf/test_sock_fields.c134
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c88
-rw-r--r--tools/testing/selftests/bpf/verifier/direct_packet_access.c22
-rw-r--r--tools/testing/selftests/bpf/verifier/ld_imm64.c15
-rw-r--r--tools/testing/selftests/bpf/verifier/ref_tracking.c168
-rw-r--r--tools/testing/selftests/bpf/verifier/sock.c4
-rw-r--r--tools/testing/selftests/cgroup/test_memcontrol.c38
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh20
-rwxr-xr-xtools/testing/selftests/efivarfs/efivarfs.sh28
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-action-hist-xfail.tc30
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc1
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc1
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc1
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc1
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onchange-action-hist.tc28
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc1
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc1
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc1
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc43
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-createremove.tc1
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-trace-action-hist.tc42
-rw-r--r--tools/testing/selftests/gpio/gpio-mockup-chardev.c1
-rw-r--r--tools/testing/selftests/ima/config4
-rwxr-xr-xtools/testing/selftests/ima/test_kexec_load.sh54
-rw-r--r--tools/testing/selftests/ipc/msgque.c11
-rw-r--r--tools/testing/selftests/ir/ir_loopback.c9
-rw-r--r--tools/testing/selftests/kexec/Makefile (renamed from tools/testing/selftests/ima/Makefile)5
-rw-r--r--tools/testing/selftests/kexec/config3
-rwxr-xr-xtools/testing/selftests/kexec/kexec_common_lib.sh220
-rwxr-xr-xtools/testing/selftests/kexec/test_kexec_file_load.sh208
-rwxr-xr-xtools/testing/selftests/kexec/test_kexec_load.sh47
-rw-r--r--tools/testing/selftests/kselftest_harness.h2
-rw-r--r--tools/testing/selftests/kselftest_module.h48
-rwxr-xr-xtools/testing/selftests/kselftest_module.sh84
-rw-r--r--tools/testing/selftests/kvm/.gitignore1
-rw-r--r--tools/testing/selftests/kvm/Makefile12
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c9
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h1
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h27
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c21
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c20
-rw-r--r--tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c35
-rw-r--r--tools/testing/selftests/kvm/x86_64/evmcs_test.c5
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c9
-rw-r--r--tools/testing/selftests/kvm/x86_64/smm_test.c157
-rw-r--r--tools/testing/selftests/kvm/x86_64/state_test.c5
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c95
-rw-r--r--tools/testing/selftests/lib.mk38
-rw-r--r--tools/testing/selftests/lib/Makefile2
-rwxr-xr-xtools/testing/selftests/lib/bitmap.sh18
-rw-r--r--tools/testing/selftests/lib/config1
-rwxr-xr-xtools/testing/selftests/lib/prime_numbers.sh17
-rwxr-xr-xtools/testing/selftests/lib/printf.sh19
-rwxr-xr-xtools/testing/selftests/lib/strscpy.sh3
-rw-r--r--tools/testing/selftests/livepatch/Makefile3
-rwxr-xr-xtools/testing/selftests/net/fib_rule_tests.sh10
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh94
-rwxr-xr-xtools/testing/selftests/net/run_afpackettests5
-rwxr-xr-xtools/testing/selftests/net/run_netsocktests2
-rw-r--r--tools/testing/selftests/netfilter/Makefile2
-rwxr-xr-xtools/testing/selftests/netfilter/conntrack_icmp_related.sh283
-rwxr-xr-xtools/testing/selftests/netfilter/nft_nat.sh36
-rw-r--r--tools/testing/selftests/pidfd/Makefile6
-rw-r--r--tools/testing/selftests/pidfd/pidfd_test.c381
-rw-r--r--tools/testing/selftests/proc/proc-pid-vm.c53
-rw-r--r--tools/testing/selftests/proc/proc-self-map-files-002.c20
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/configNR_CPUS.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/config_override.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/configcheck.sh19
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/configinit.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/cpus2use.sh17
-rw-r--r--tools/testing/selftests/rcutorture/bin/functions.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/jitter.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-build.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-find-errors.sh5
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/mkinitrd.sh15
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-build.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-console.sh17
-rw-r--r--tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh17
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh17
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh17
-rw-r--r--tools/testing/selftests/rseq/rseq-s390.h9
-rw-r--r--tools/testing/selftests/rseq/rseq.h1
-rwxr-xr-xtools/testing/selftests/rseq/run_param_test.sh7
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c77
-rwxr-xr-xtools/testing/selftests/sysctl/sysctl.sh55
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/csum.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/gact.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/ife.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/nat.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json51
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/police.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/sample.json49
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/simple.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/tests.json20
-rw-r--r--tools/testing/selftests/timers/skew_consistency.c1
-rw-r--r--tools/testing/selftests/tpm2/Makefile4
-rwxr-xr-xtools/testing/selftests/tpm2/test_smoke.sh4
-rwxr-xr-xtools/testing/selftests/tpm2/test_space.sh4
-rw-r--r--tools/testing/selftests/tpm2/tpm2.py697
-rw-r--r--tools/testing/selftests/tpm2/tpm2_tests.py290
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/vm/test_vmalloc.sh0
-rw-r--r--tools/testing/selftests/x86/mpx-dig.c2
-rw-r--r--virt/kvm/arm/arch_timer.c611
-rw-r--r--virt/kvm/arm/arm.c75
-rw-r--r--virt/kvm/arm/hyp/vgic-v3-sr.c6
-rw-r--r--virt/kvm/arm/mmu.c153
-rw-r--r--virt/kvm/arm/trace.h107
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c31
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c3
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c8
-rw-r--r--virt/kvm/arm/vgic/vgic.c35
-rw-r--r--virt/kvm/arm/vgic/vgic.h1
-rw-r--r--virt/kvm/coalesced_mmio.c3
-rw-r--r--virt/kvm/eventfd.c13
-rw-r--r--virt/kvm/irqchip.c9
-rw-r--r--virt/kvm/kvm_main.c119
-rw-r--r--virt/kvm/vfio.c4
6208 files changed, 178928 insertions, 111636 deletions
diff --git a/.clang-format b/.clang-format
index bc2ffb2a0b53..f3923a1f9858 100644
--- a/.clang-format
+++ b/.clang-format
@@ -78,6 +78,8 @@ ForEachMacros:
- 'ata_qc_for_each_with_internal'
- 'ax25_for_each'
- 'ax25_uid_for_each'
+ - '__bio_for_each_bvec'
+ - 'bio_for_each_bvec'
- 'bio_for_each_integrity_vec'
- '__bio_for_each_segment'
- 'bio_for_each_segment'
@@ -118,10 +120,12 @@ ForEachMacros:
- 'drm_for_each_legacy_plane'
- 'drm_for_each_plane'
- 'drm_for_each_plane_mask'
+ - 'drm_for_each_privobj'
- 'drm_mm_for_each_hole'
- 'drm_mm_for_each_node'
- 'drm_mm_for_each_node_in_range'
- 'drm_mm_for_each_node_safe'
+ - 'flow_action_for_each'
- 'for_each_active_drhd_unit'
- 'for_each_active_iommu'
- 'for_each_available_child_of_node'
@@ -158,6 +162,9 @@ ForEachMacros:
- 'for_each_dss_dev'
- 'for_each_efi_memory_desc'
- 'for_each_efi_memory_desc_in_map'
+ - 'for_each_element'
+ - 'for_each_element_extid'
+ - 'for_each_element_id'
- 'for_each_endpoint_of_node'
- 'for_each_evictable_lru'
- 'for_each_fib6_node_rt_rcu'
@@ -195,6 +202,7 @@ ForEachMacros:
- 'for_each_net_rcu'
- 'for_each_new_connector_in_state'
- 'for_each_new_crtc_in_state'
+ - 'for_each_new_mst_mgr_in_state'
- 'for_each_new_plane_in_state'
- 'for_each_new_private_obj_in_state'
- 'for_each_node'
@@ -210,8 +218,10 @@ ForEachMacros:
- 'for_each_of_pci_range'
- 'for_each_old_connector_in_state'
- 'for_each_old_crtc_in_state'
+ - 'for_each_old_mst_mgr_in_state'
- 'for_each_oldnew_connector_in_state'
- 'for_each_oldnew_crtc_in_state'
+ - 'for_each_oldnew_mst_mgr_in_state'
- 'for_each_oldnew_plane_in_state'
- 'for_each_oldnew_plane_in_state_reverse'
- 'for_each_oldnew_private_obj_in_state'
@@ -240,8 +250,12 @@ ForEachMacros:
- 'for_each_set_bit'
- 'for_each_set_bit_from'
- 'for_each_sg'
+ - 'for_each_sg_dma_page'
- 'for_each_sg_page'
- 'for_each_sibling_event'
+ - 'for_each_subelement'
+ - 'for_each_subelement_extid'
+ - 'for_each_subelement_id'
- '__for_each_thread'
- 'for_each_thread'
- 'for_each_zone'
@@ -251,6 +265,8 @@ ForEachMacros:
- 'fwnode_for_each_child_node'
- 'fwnode_graph_for_each_endpoint'
- 'gadget_for_each_ep'
+ - 'genradix_for_each'
+ - 'genradix_for_each_from'
- 'hash_for_each'
- 'hash_for_each_possible'
- 'hash_for_each_possible_rcu'
@@ -289,11 +305,14 @@ ForEachMacros:
- 'idr_for_each_entry_ul'
- 'inet_bind_bucket_for_each'
- 'inet_lhash2_for_each_icsk_rcu'
- - 'iov_for_each'
- 'key_for_each'
- 'key_for_each_safe'
- 'klp_for_each_func'
+ - 'klp_for_each_func_safe'
+ - 'klp_for_each_func_static'
- 'klp_for_each_object'
+ - 'klp_for_each_object_safe'
+ - 'klp_for_each_object_static'
- 'kvm_for_each_memslot'
- 'kvm_for_each_vcpu'
- 'list_for_each'
@@ -324,6 +343,8 @@ ForEachMacros:
- 'media_device_for_each_intf'
- 'media_device_for_each_link'
- 'media_device_for_each_pad'
+ - 'mp_bvec_for_each_page'
+ - 'mp_bvec_for_each_segment'
- 'nanddev_io_for_each_page'
- 'netdev_for_each_lower_dev'
- 'netdev_for_each_lower_private'
@@ -360,6 +381,7 @@ ForEachMacros:
- 'radix_tree_for_each_slot'
- 'radix_tree_for_each_tagged'
- 'rbtree_postorder_for_each_entry_safe'
+ - 'rdma_for_each_port'
- 'resource_list_for_each_entry'
- 'resource_list_for_each_entry_safe'
- 'rhl_for_each_entry_rcu'
@@ -374,6 +396,7 @@ ForEachMacros:
- 'rht_for_each_rcu'
- 'rht_for_each_rcu_continue'
- '__rq_for_each_bio'
+ - 'rq_for_each_bvec'
- 'rq_for_each_segment'
- 'scsi_for_each_prot_sg'
- 'scsi_for_each_sg'
@@ -409,6 +432,8 @@ ForEachMacros:
- 'v4l2_m2m_for_each_src_buf_safe'
- 'virtio_device_for_each_vq'
- 'xa_for_each'
+ - 'xa_for_each_marked'
+ - 'xa_for_each_start'
- 'xas_for_each'
- 'xas_for_each_conflict'
- 'xas_for_each_marked'
diff --git a/.mailmap b/.mailmap
index 37e1847c7988..ae2bcad06f4b 100644
--- a/.mailmap
+++ b/.mailmap
@@ -156,6 +156,8 @@ Morten Welinder <welinder@darter.rentec.com>
Morten Welinder <welinder@troll.com>
Mythri P K <mythripk@ti.com>
Nguyen Anh Quynh <aquynh@gmail.com>
+Nicolas Pitre <nico@fluxnic.net> <nicolas.pitre@linaro.org>
+Nicolas Pitre <nico@fluxnic.net> <nico@linaro.org>
Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Patrick Mochel <mochel@digitalimplant.org>
Paul Burton <paul.burton@mips.com> <paul.burton@imgtec.com>
@@ -224,3 +226,5 @@ Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com>
Yusuke Goda <goda.yusuke@renesas.com>
Gustavo Padovan <gustavo@las.ic.unicamp.br>
Gustavo Padovan <padovan@profusion.mobi>
+Changbin Du <changbin.du@intel.com> <changbin.du@intel.com>
+Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>
diff --git a/Documentation/ABI/obsolete/sysfs-class-dax b/Documentation/ABI/obsolete/sysfs-class-dax
new file mode 100644
index 000000000000..2cb9fc5e8bd1
--- /dev/null
+++ b/Documentation/ABI/obsolete/sysfs-class-dax
@@ -0,0 +1,22 @@
+What: /sys/class/dax/
+Date: May, 2016
+KernelVersion: v4.7
+Contact: linux-nvdimm@lists.01.org
+Description: Device DAX is the device-centric analogue of Filesystem
+ DAX (CONFIG_FS_DAX). It allows memory ranges to be
+ allocated and mapped without need of an intervening file
+ system. Device DAX is strict, precise and predictable.
+ Specifically this interface:
+
+ 1/ Guarantees fault granularity with respect to a given
+ page size (pte, pmd, or pud) set at configuration time.
+
+ 2/ Enforces deterministic behavior by being strict about
+ what fault scenarios are supported.
+
+ The /sys/class/dax/ interface enumerates all the
+ device-dax instances in the system. The ABI is
+ deprecated and will be removed after 2020. It is
+ replaced with the DAX bus interface /sys/bus/dax/ where
+ device-dax instances can be found under
+ /sys/bus/dax/devices/
diff --git a/Documentation/ABI/stable/sysfs-driver-mlxreg-io b/Documentation/ABI/stable/sysfs-driver-mlxreg-io
index 169fe08a649b..156319fc5b80 100644
--- a/Documentation/ABI/stable/sysfs-driver-mlxreg-io
+++ b/Documentation/ABI/stable/sysfs-driver-mlxreg-io
@@ -21,7 +21,19 @@ Description: These files show with which CPLD versions have been burned
The files are read only.
What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/
- cpld3_version
+ fan_dir
+
+Date: December 2018
+KernelVersion: 5.0
+Contact: Vadim Pasternak <vadimpmellanox.com>
+Description: This file shows the system fans direction:
+ forward direction - relevant bit is set 0;
+ reversed direction - relevant bit is set 1.
+
+ The files are read only.
+
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/
+ jtag_enable
Date: November 2018
KernelVersion: 5.0
diff --git a/Documentation/ABI/testing/debugfs-wilco-ec b/Documentation/ABI/testing/debugfs-wilco-ec
new file mode 100644
index 000000000000..f814f112e213
--- /dev/null
+++ b/Documentation/ABI/testing/debugfs-wilco-ec
@@ -0,0 +1,23 @@
+What: /sys/kernel/debug/wilco_ec/raw
+Date: January 2019
+KernelVersion: 5.1
+Description:
+ Write and read raw mailbox commands to the EC.
+
+ For writing:
+ Bytes 0-1 indicate the message type:
+ 00 F0 = Execute Legacy Command
+ 00 F2 = Read/Write NVRAM Property
+ Byte 2 provides the command code
+ Bytes 3+ consist of the data passed in the request
+
+ At least three bytes are required, for the msg type and command,
+ with additional bytes optional for additional data.
+
+ Example:
+ // Request EC info type 3 (EC firmware build date)
+ $ echo 00 f0 38 00 03 00 > raw
+ // View the result. The decoded ASCII result "12/21/18" is
+ // included after the raw hex.
+ $ cat raw
+ 00 31 32 2f 32 31 2f 31 38 00 38 00 01 00 2f 00 .12/21/18.8...
diff --git a/Documentation/ABI/testing/sysfs-class-watchdog b/Documentation/ABI/testing/sysfs-class-watchdog
index 736046b33040..6317ade5ad19 100644
--- a/Documentation/ABI/testing/sysfs-class-watchdog
+++ b/Documentation/ABI/testing/sysfs-class-watchdog
@@ -49,3 +49,26 @@ Contact: Wim Van Sebroeck <wim@iguana.be>
Description:
It is a read only file. It is read to know about current
value of timeout programmed.
+
+What: /sys/class/watchdog/watchdogn/pretimeout
+Date: December 2016
+Contact: Wim Van Sebroeck <wim@iguana.be>
+Description:
+ It is a read only file. It specifies the time in seconds before
+ timeout when the pretimeout interrupt is delivered. Pretimeout
+ is an optional feature.
+
+What: /sys/class/watchdog/watchdogn/pretimeout_avaialable_governors
+Date: February 2017
+Contact: Wim Van Sebroeck <wim@iguana.be>
+Description:
+ It is a read only file. It shows the pretimeout governors
+ available for this watchdog.
+
+What: /sys/class/watchdog/watchdogn/pretimeout_governor
+Date: February 2017
+Contact: Wim Van Sebroeck <wim@iguana.be>
+Description:
+ It is a read/write file. When read, the currently assigned
+ pretimeout governor is returned. When written, it sets
+ the pretimeout governor.
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 9605dbd4b5b5..4fb76c0e8d30 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -511,10 +511,30 @@ Description: Control Symetric Multi Threading (SMT)
control: Read/write interface to control SMT. Possible
values:
- "on" SMT is enabled
- "off" SMT is disabled
- "forceoff" SMT is force disabled. Cannot be changed.
- "notsupported" SMT is not supported by the CPU
+ "on" SMT is enabled
+ "off" SMT is disabled
+ "forceoff" SMT is force disabled. Cannot be changed.
+ "notsupported" SMT is not supported by the CPU
+ "notimplemented" SMT runtime toggling is not
+ implemented for the architecture
If control status is "forceoff" or "notsupported" writes
are rejected.
+
+What: /sys/devices/system/cpu/cpu#/power/energy_perf_bias
+Date: March 2019
+Contact: linux-pm@vger.kernel.org
+Description: Intel Energy and Performance Bias Hint (EPB)
+
+ EPB for the given CPU in a sliding scale 0 - 15, where a value
+ of 0 corresponds to a hint preference for highest performance
+ and a value of 15 corresponds to the maximum energy savings.
+
+ In order to change the EPB value for the CPU, write either
+ a number in the 0 - 15 sliding scale above, or one of the
+ strings: "performance", "balance-performance", "normal",
+ "balance-power", "power" (that represent values reflected by
+ their meaning), to this attribute.
+
+ This attribute is present for all online CPUs supporting the
+ Intel EPB feature.
diff --git a/Documentation/ABI/testing/sysfs-fs-ext4 b/Documentation/ABI/testing/sysfs-fs-ext4
index c631253cf85c..78604db56279 100644
--- a/Documentation/ABI/testing/sysfs-fs-ext4
+++ b/Documentation/ABI/testing/sysfs-fs-ext4
@@ -109,3 +109,10 @@ Description:
write operation (since a 4k random write might turn
into a much larger write due to the zeroout
operation).
+
+What: /sys/fs/ext4/<disk>/journal_task
+Date: February 2019
+Contact: "Theodore Ts'o" <tytso@mit.edu>
+Description:
+ This file is read-only and shows the pid of journal thread in
+ current pid-namespace or 0 if task is unreachable.
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index a7ce33199457..91822ce25831 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -86,6 +86,13 @@ Description:
The unit size is one block, now only support configuring in range
of [1, 512].
+What: /sys/fs/f2fs/<disk>/umount_discard_timeout
+Date: January 2019
+Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
+Description:
+ Set timeout to issue discard commands during umount.
+ Default: 5 secs
+
What: /sys/fs/f2fs/<disk>/max_victim_search
Date: January 2014
Contact: "Jaegeuk Kim" <jaegeuk.kim@samsung.com>
diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt
index f0cc3f772265..1a721d0f35c8 100644
--- a/Documentation/DMA-API-HOWTO.txt
+++ b/Documentation/DMA-API-HOWTO.txt
@@ -146,114 +146,75 @@ What about block I/O and networking buffers? The block I/O and
networking subsystems make sure that the buffers they use are valid
for you to DMA from/to.
-DMA addressing limitations
+DMA addressing capabilities
==========================
-Does your device have any DMA addressing limitations? For example, is
-your device only capable of driving the low order 24-bits of address?
-If so, you need to inform the kernel of this fact.
+By default, the kernel assumes that your device can address 32-bits of DMA
+addressing. For a 64-bit capable device, this needs to be increased, and for
+a device with limitations, it needs to be decreased.
-By default, the kernel assumes that your device can address the full
-32-bits. For a 64-bit capable device, this needs to be increased.
-And for a device with limitations, as discussed in the previous
-paragraph, it needs to be decreased.
+Special note about PCI: PCI-X specification requires PCI-X devices to support
+64-bit addressing (DAC) for all transactions. And at least one platform (SGI
+SN2) requires 64-bit consistent allocations to operate correctly when the IO
+bus is in PCI-X mode.
-Special note about PCI: PCI-X specification requires PCI-X devices to
-support 64-bit addressing (DAC) for all transactions. And at least
-one platform (SGI SN2) requires 64-bit consistent allocations to
-operate correctly when the IO bus is in PCI-X mode.
+For correct operation, you must set the DMA mask to inform the kernel about
+your devices DMA addressing capabilities.
-For correct operation, you must interrogate the kernel in your device
-probe routine to see if the DMA controller on the machine can properly
-support the DMA addressing limitation your device has. It is good
-style to do this even if your device holds the default setting,
-because this shows that you did think about these issues wrt. your
-device.
-
-The query is performed via a call to dma_set_mask_and_coherent()::
+This is performed via a call to dma_set_mask_and_coherent()::
int dma_set_mask_and_coherent(struct device *dev, u64 mask);
-which will query the mask for both streaming and coherent APIs together.
-If you have some special requirements, then the following two separate
-queries can be used instead:
+which will set the mask for both streaming and coherent APIs together. If you
+have some special requirements, then the following two separate calls can be
+used instead:
- The query for streaming mappings is performed via a call to
+ The setup for streaming mappings is performed via a call to
dma_set_mask()::
int dma_set_mask(struct device *dev, u64 mask);
- The query for consistent allocations is performed via a call
+ The setup for consistent allocations is performed via a call
to dma_set_coherent_mask()::
int dma_set_coherent_mask(struct device *dev, u64 mask);
-Here, dev is a pointer to the device struct of your device, and mask
-is a bit mask describing which bits of an address your device
-supports. It returns zero if your card can perform DMA properly on
-the machine given the address mask you provided. In general, the
-device struct of your device is embedded in the bus-specific device
-struct of your device. For example, &pdev->dev is a pointer to the
-device struct of a PCI device (pdev is a pointer to the PCI device
-struct of your device).
+Here, dev is a pointer to the device struct of your device, and mask is a bit
+mask describing which bits of an address your device supports. Often the
+device struct of your device is embedded in the bus-specific device struct of
+your device. For example, &pdev->dev is a pointer to the device struct of a
+PCI device (pdev is a pointer to the PCI device struct of your device).
-If it returns non-zero, your device cannot perform DMA properly on
-this platform, and attempting to do so will result in undefined
-behavior. You must either use a different mask, or not use DMA.
+These calls usually return zero to indicated your device can perform DMA
+properly on the machine given the address mask you provided, but they might
+return an error if the mask is too small to be supportable on the given
+system. If it returns non-zero, your device cannot perform DMA properly on
+this platform, and attempting to do so will result in undefined behavior.
+You must not use DMA on this device unless the dma_set_mask family of
+functions has returned success.
-This means that in the failure case, you have three options:
+This means that in the failure case, you have two options:
-1) Use another DMA mask, if possible (see below).
-2) Use some non-DMA mode for data transfer, if possible.
-3) Ignore this device and do not initialize it.
+1) Use some non-DMA mode for data transfer, if possible.
+2) Ignore this device and do not initialize it.
-It is recommended that your driver print a kernel KERN_WARNING message
-when you end up performing either #2 or #3. In this manner, if a user
-of your driver reports that performance is bad or that the device is not
-even detected, you can ask them for the kernel messages to find out
-exactly why.
+It is recommended that your driver print a kernel KERN_WARNING message when
+setting the DMA mask fails. In this manner, if a user of your driver reports
+that performance is bad or that the device is not even detected, you can ask
+them for the kernel messages to find out exactly why.
-The standard 32-bit addressing device would do something like this::
+The standard 64-bit addressing device would do something like this::
- if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
dev_warn(dev, "mydev: No suitable DMA available\n");
goto ignore_this_device;
}
-Another common scenario is a 64-bit capable device. The approach here
-is to try for 64-bit addressing, but back down to a 32-bit mask that
-should not fail. The kernel may fail the 64-bit mask not because the
-platform is not capable of 64-bit addressing. Rather, it may fail in
-this case simply because 32-bit addressing is done more efficiently
-than 64-bit addressing. For example, Sparc64 PCI SAC addressing is
-more efficient than DAC addressing.
-
-Here is how you would handle a 64-bit capable device which can drive
-all 64-bits when accessing streaming DMA::
-
- int using_dac;
+If the device only supports 32-bit addressing for descriptors in the
+coherent allocations, but supports full 64-bits for streaming mappings
+it would look like this:
- if (!dma_set_mask(dev, DMA_BIT_MASK(64))) {
- using_dac = 1;
- } else if (!dma_set_mask(dev, DMA_BIT_MASK(32))) {
- using_dac = 0;
- } else {
- dev_warn(dev, "mydev: No suitable DMA available\n");
- goto ignore_this_device;
- }
-
-If a card is capable of using 64-bit consistent allocations as well,
-the case would look like this::
-
- int using_dac, consistent_using_dac;
-
- if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
- using_dac = 1;
- consistent_using_dac = 1;
- } else if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
- using_dac = 0;
- consistent_using_dac = 0;
- } else {
+ if (dma_set_mask(dev, DMA_BIT_MASK(64))) {
dev_warn(dev, "mydev: No suitable DMA available\n");
goto ignore_this_device;
}
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
index e133ccd60228..0076150fdccb 100644
--- a/Documentation/DMA-API.txt
+++ b/Documentation/DMA-API.txt
@@ -195,6 +195,14 @@ Requesting the required mask does not alter the current mask. If you
wish to take advantage of it, you should issue a dma_set_mask()
call to set the mask to the value returned.
+::
+
+ size_t
+ dma_direct_max_mapping_size(struct device *dev);
+
+Returns the maximum size of a mapping for the device. The size parameter
+of the mapping functions like dma_map_single(), dma_map_page() and
+others should not be larger than the returned value.
Part Id - Streaming DMA mappings
--------------------------------
@@ -530,8 +538,8 @@ that simply cannot make consistent memory.
dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle, unsigned long attrs)
-Free memory allocated by the dma_alloc_attrs(). All parameters common
-parameters must identical to those otherwise passed to dma_fre_coherent,
+Free memory allocated by the dma_alloc_attrs(). All common
+parameters must be identical to those otherwise passed to dma_free_coherent,
and the attrs argument must be identical to the attrs passed to
dma_alloc_attrs().
@@ -566,8 +574,7 @@ boundaries when doing this.
int
dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
- dma_addr_t device_addr, size_t size, int
- flags)
+ dma_addr_t device_addr, size_t size);
Declare region of memory to be handed out by dma_alloc_coherent() when
it's asked for coherent memory for this device.
@@ -581,12 +588,6 @@ dma_addr_t in dma_alloc_coherent()).
size is the size of the area (must be multiples of PAGE_SIZE).
-flags can be ORed together and are:
-
-- DMA_MEMORY_EXCLUSIVE - only allocate memory from the declared regions.
- Do not allow dma_alloc_coherent() to fall back to system memory when
- it's out of memory in the declared region.
-
As a simplification for the platforms, only *one* such region of
memory may be declared per device.
@@ -605,23 +606,6 @@ unconditionally having removed all the required structures. It is the
driver's job to ensure that no parts of this memory region are
currently in use.
-::
-
- void *
- dma_mark_declared_memory_occupied(struct device *dev,
- dma_addr_t device_addr, size_t size)
-
-This is used to occupy specific regions of the declared space
-(dma_alloc_coherent() will hand out the first free region it finds).
-
-device_addr is the *device* address of the region requested.
-
-size is the size (and should be a page-sized multiple).
-
-The return value will be either a pointer to the processor virtual
-address of the memory, or an error (via PTR_ERR()) if any part of the
-region is occupied.
-
Part III - Debug drivers use of the DMA-API
-------------------------------------------
@@ -696,6 +680,9 @@ dma-api/disabled This read-only file contains the character 'Y'
happen when it runs out of memory or if it was
disabled at boot time
+dma-api/dump This read-only file contains current DMA
+ mappings.
+
dma-api/error_count This file is read-only and shows the total
numbers of errors found.
@@ -717,7 +704,7 @@ dma-api/num_free_entries The current number of free dma_debug_entries
dma-api/nr_total_entries The total number of dma_debug_entries in the
allocator, both free and used.
-dma-api/driver-filter You can write a name of a driver into this file
+dma-api/driver_filter You can write a name of a driver into this file
to limit the debug output to requests from that
particular driver. Write an empty string to
that file to disable the filter and see
diff --git a/Documentation/DMA-ISA-LPC.txt b/Documentation/DMA-ISA-LPC.txt
index 8c2b8be6e45b..b1ec7b16c21f 100644
--- a/Documentation/DMA-ISA-LPC.txt
+++ b/Documentation/DMA-ISA-LPC.txt
@@ -52,8 +52,8 @@ Address translation
-------------------
To translate the virtual address to a bus address, use the normal DMA
-API. Do _not_ use isa_virt_to_phys() even though it does the same
-thing. The reason for this is that the function isa_virt_to_phys()
+API. Do _not_ use isa_virt_to_bus() even though it does the same
+thing. The reason for this is that the function isa_virt_to_bus()
will require a Kconfig dependency to ISA, not just ISA_DMA_API which
is really all you need. Remember that even though the DMA controller
has its origins in ISA it is used elsewhere.
diff --git a/Documentation/RCU/Design/Data-Structures/Data-Structures.html b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
index 18f179807563..c30c1957c7e6 100644
--- a/Documentation/RCU/Design/Data-Structures/Data-Structures.html
+++ b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
@@ -155,8 +155,7 @@ keeping lock contention under control at all tree levels regardless
of the level of loading on the system.
</p><p>RCU updaters wait for normal grace periods by registering
-RCU callbacks, either directly via <tt>call_rcu()</tt> and
-friends (namely <tt>call_rcu_bh()</tt> and <tt>call_rcu_sched()</tt>),
+RCU callbacks, either directly via <tt>call_rcu()</tt>
or indirectly via <tt>synchronize_rcu()</tt> and friends.
RCU callbacks are represented by <tt>rcu_head</tt> structures,
which are queued on <tt>rcu_data</tt> structures while they are
diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
index 19e7a5fb6b73..57300db4b5ff 100644
--- a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
+++ b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
@@ -56,6 +56,7 @@ sections.
RCU-preempt Expedited Grace Periods</a></h2>
<p>
+<tt>CONFIG_PREEMPT=y</tt> kernels implement RCU-preempt.
The overall flow of the handling of a given CPU by an RCU-preempt
expedited grace period is shown in the following diagram:
@@ -139,6 +140,7 @@ or offline, among other things.
RCU-sched Expedited Grace Periods</a></h2>
<p>
+<tt>CONFIG_PREEMPT=n</tt> kernels implement RCU-sched.
The overall flow of the handling of a given CPU by an RCU-sched
expedited grace period is shown in the following diagram:
@@ -146,7 +148,7 @@ expedited grace period is shown in the following diagram:
<p>
As with RCU-preempt, RCU-sched's
-<tt>synchronize_sched_expedited()</tt> ignores offline and
+<tt>synchronize_rcu_expedited()</tt> ignores offline and
idle CPUs, again because they are in remotely detectable
quiescent states.
However, because the
diff --git a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
index 8d21af02b1f0..c64f8d26609f 100644
--- a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
+++ b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
@@ -34,12 +34,11 @@ Similarly, any code that happens before the beginning of a given RCU grace
period is guaranteed to see the effects of all accesses following the end
of that grace period that are within RCU read-side critical sections.
-<p>This guarantee is particularly pervasive for <tt>synchronize_sched()</tt>,
-for which RCU-sched read-side critical sections include any region
+<p>Note well that RCU-sched read-side critical sections include any region
of code for which preemption is disabled.
Given that each individual machine instruction can be thought of as
an extremely small region of preemption-disabled code, one can think of
-<tt>synchronize_sched()</tt> as <tt>smp_mb()</tt> on steroids.
+<tt>synchronize_rcu()</tt> as <tt>smp_mb()</tt> on steroids.
<p>RCU updaters use this guarantee by splitting their updates into
two phases, one of which is executed before the grace period and
diff --git a/Documentation/RCU/NMI-RCU.txt b/Documentation/RCU/NMI-RCU.txt
index 687777f83b23..881353fd5bff 100644
--- a/Documentation/RCU/NMI-RCU.txt
+++ b/Documentation/RCU/NMI-RCU.txt
@@ -81,18 +81,19 @@ currently executing on some other CPU. We therefore cannot free
up any data structures used by the old NMI handler until execution
of it completes on all other CPUs.
-One way to accomplish this is via synchronize_sched(), perhaps as
+One way to accomplish this is via synchronize_rcu(), perhaps as
follows:
unset_nmi_callback();
- synchronize_sched();
+ synchronize_rcu();
kfree(my_nmi_data);
-This works because synchronize_sched() blocks until all CPUs complete
-any preemption-disabled segments of code that they were executing.
-Since NMI handlers disable preemption, synchronize_sched() is guaranteed
+This works because (as of v4.20) synchronize_rcu() blocks until all
+CPUs complete any preemption-disabled segments of code that they were
+executing.
+Since NMI handlers disable preemption, synchronize_rcu() is guaranteed
not to return until all ongoing NMI handlers exit. It is therefore safe
-to free up the handler's data as soon as synchronize_sched() returns.
+to free up the handler's data as soon as synchronize_rcu() returns.
Important note: for this to work, the architecture in question must
invoke nmi_enter() and nmi_exit() on NMI entry and exit, respectively.
diff --git a/Documentation/RCU/UP.txt b/Documentation/RCU/UP.txt
index 90ec5341ee98..53bde717017b 100644
--- a/Documentation/RCU/UP.txt
+++ b/Documentation/RCU/UP.txt
@@ -86,10 +86,8 @@ even on a UP system. So do not do it! Even on a UP system, the RCU
infrastructure -must- respect grace periods, and -must- invoke callbacks
from a known environment in which no locks are held.
-It -is- safe for synchronize_sched() and synchronize_rcu_bh() to return
-immediately on an UP system. It is also safe for synchronize_rcu()
-to return immediately on UP systems, except when running preemptable
-RCU.
+Note that it -is- safe for synchronize_rcu() to return immediately on
+UP systems, including !PREEMPT SMP builds running on UP systems.
Quick Quiz #3: Why can't synchronize_rcu() return immediately on
UP systems running preemptable RCU?
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt
index 6f469864d9f5..e98ff261a438 100644
--- a/Documentation/RCU/checklist.txt
+++ b/Documentation/RCU/checklist.txt
@@ -182,16 +182,13 @@ over a rather long period of time, but improvements are always welcome!
when publicizing a pointer to a structure that can
be traversed by an RCU read-side critical section.
-5. If call_rcu(), or a related primitive such as call_rcu_bh(),
- call_rcu_sched(), or call_srcu() is used, the callback function
- will be called from softirq context. In particular, it cannot
- block.
+5. If call_rcu() or call_srcu() is used, the callback function will
+ be called from softirq context. In particular, it cannot block.
-6. Since synchronize_rcu() can block, it cannot be called from
- any sort of irq context. The same rule applies for
- synchronize_rcu_bh(), synchronize_sched(), synchronize_srcu(),
- synchronize_rcu_expedited(), synchronize_rcu_bh_expedited(),
- synchronize_sched_expedite(), and synchronize_srcu_expedited().
+6. Since synchronize_rcu() can block, it cannot be called
+ from any sort of irq context. The same rule applies
+ for synchronize_srcu(), synchronize_rcu_expedited(), and
+ synchronize_srcu_expedited().
The expedited forms of these primitives have the same semantics
as the non-expedited forms, but expediting is both expensive and
@@ -212,20 +209,20 @@ over a rather long period of time, but improvements are always welcome!
of the system, especially to real-time workloads running on
the rest of the system.
-7. If the updater uses call_rcu() or synchronize_rcu(), then the
- corresponding readers must use rcu_read_lock() and
- rcu_read_unlock(). If the updater uses call_rcu_bh() or
- synchronize_rcu_bh(), then the corresponding readers must
- use rcu_read_lock_bh() and rcu_read_unlock_bh(). If the
- updater uses call_rcu_sched() or synchronize_sched(), then
- the corresponding readers must disable preemption, possibly
- by calling rcu_read_lock_sched() and rcu_read_unlock_sched().
- If the updater uses synchronize_srcu() or call_srcu(), then
- the corresponding readers must use srcu_read_lock() and
+7. As of v4.20, a given kernel implements only one RCU flavor,
+ which is RCU-sched for PREEMPT=n and RCU-preempt for PREEMPT=y.
+ If the updater uses call_rcu() or synchronize_rcu(),
+ then the corresponding readers my use rcu_read_lock() and
+ rcu_read_unlock(), rcu_read_lock_bh() and rcu_read_unlock_bh(),
+ or any pair of primitives that disables and re-enables preemption,
+ for example, rcu_read_lock_sched() and rcu_read_unlock_sched().
+ If the updater uses synchronize_srcu() or call_srcu(),
+ then the corresponding readers must use srcu_read_lock() and
srcu_read_unlock(), and with the same srcu_struct. The rules for
the expedited primitives are the same as for their non-expedited
counterparts. Mixing things up will result in confusion and
- broken kernels.
+ broken kernels, and has even resulted in an exploitable security
+ issue.
One exception to this rule: rcu_read_lock() and rcu_read_unlock()
may be substituted for rcu_read_lock_bh() and rcu_read_unlock_bh()
@@ -288,8 +285,7 @@ over a rather long period of time, but improvements are always welcome!
d. Periodically invoke synchronize_rcu(), permitting a limited
number of updates per grace period.
- The same cautions apply to call_rcu_bh(), call_rcu_sched(),
- call_srcu(), and kfree_rcu().
+ The same cautions apply to call_srcu() and kfree_rcu().
Note that although these primitives do take action to avoid memory
exhaustion when any given CPU has too many callbacks, a determined
@@ -322,7 +318,7 @@ over a rather long period of time, but improvements are always welcome!
11. Any lock acquired by an RCU callback must be acquired elsewhere
with softirq disabled, e.g., via spin_lock_irqsave(),
- spin_lock_bh(), etc. Failing to disable irq on a given
+ spin_lock_bh(), etc. Failing to disable softirq on a given
acquisition of that lock will result in deadlock as soon as
the RCU softirq handler happens to run your RCU callback while
interrupting that acquisition's critical section.
@@ -335,13 +331,16 @@ over a rather long period of time, but improvements are always welcome!
must use whatever locking or other synchronization is required
to safely access and/or modify that data structure.
- RCU callbacks are -usually- executed on the same CPU that executed
- the corresponding call_rcu(), call_rcu_bh(), or call_rcu_sched(),
- but are by -no- means guaranteed to be. For example, if a given
- CPU goes offline while having an RCU callback pending, then that
- RCU callback will execute on some surviving CPU. (If this was
- not the case, a self-spawning RCU callback would prevent the
- victim CPU from ever going offline.)
+ Do not assume that RCU callbacks will be executed on the same
+ CPU that executed the corresponding call_rcu() or call_srcu().
+ For example, if a given CPU goes offline while having an RCU
+ callback pending, then that RCU callback will execute on some
+ surviving CPU. (If this was not the case, a self-spawning RCU
+ callback would prevent the victim CPU from ever going offline.)
+ Furthermore, CPUs designated by rcu_nocbs= might well -always-
+ have their RCU callbacks executed on some other CPUs, in fact,
+ for some real-time workloads, this is the whole point of using
+ the rcu_nocbs= kernel boot parameter.
13. Unlike other forms of RCU, it -is- permissible to block in an
SRCU read-side critical section (demarked by srcu_read_lock()
@@ -381,11 +380,11 @@ over a rather long period of time, but improvements are always welcome!
SRCU's expedited primitive (synchronize_srcu_expedited())
never sends IPIs to other CPUs, so it is easier on
- real-time workloads than is synchronize_rcu_expedited(),
- synchronize_rcu_bh_expedited() or synchronize_sched_expedited().
+ real-time workloads than is synchronize_rcu_expedited().
- Note that rcu_dereference() and rcu_assign_pointer() relate to
- SRCU just as they do to other forms of RCU.
+ Note that rcu_assign_pointer() relates to SRCU just as it does to
+ other forms of RCU, but instead of rcu_dereference() you should
+ use srcu_dereference() in order to avoid lockdep splats.
14. The whole point of call_rcu(), synchronize_rcu(), and friends
is to wait until all pre-existing readers have finished before
@@ -405,6 +404,9 @@ over a rather long period of time, but improvements are always welcome!
read-side critical sections. It is the responsibility of the
RCU update-side primitives to deal with this.
+ For SRCU readers, you can use smp_mb__after_srcu_read_unlock()
+ immediately after an srcu_read_unlock() to get a full barrier.
+
16. Use CONFIG_PROVE_LOCKING, CONFIG_DEBUG_OBJECTS_RCU_HEAD, and the
__rcu sparse checks to validate your RCU code. These can help
find problems as follows:
@@ -428,22 +430,19 @@ over a rather long period of time, but improvements are always welcome!
These debugging aids can help you find problems that are
otherwise extremely difficult to spot.
-17. If you register a callback using call_rcu(), call_rcu_bh(),
- call_rcu_sched(), or call_srcu(), and pass in a function defined
- within a loadable module, then it in necessary to wait for
- all pending callbacks to be invoked after the last invocation
- and before unloading that module. Note that it is absolutely
- -not- sufficient to wait for a grace period! The current (say)
- synchronize_rcu() implementation waits only for all previous
- callbacks registered on the CPU that synchronize_rcu() is running
- on, but it is -not- guaranteed to wait for callbacks registered
- on other CPUs.
+17. If you register a callback using call_rcu() or call_srcu(), and
+ pass in a function defined within a loadable module, then it in
+ necessary to wait for all pending callbacks to be invoked after
+ the last invocation and before unloading that module. Note that
+ it is absolutely -not- sufficient to wait for a grace period!
+ The current (say) synchronize_rcu() implementation is -not-
+ guaranteed to wait for callbacks registered on other CPUs.
+ Or even on the current CPU if that CPU recently went offline
+ and came back online.
You instead need to use one of the barrier functions:
o call_rcu() -> rcu_barrier()
- o call_rcu_bh() -> rcu_barrier()
- o call_rcu_sched() -> rcu_barrier()
o call_srcu() -> srcu_barrier()
However, these barrier functions are absolutely -not- guaranteed
diff --git a/Documentation/RCU/lockdep-splat.txt b/Documentation/RCU/lockdep-splat.txt
index 238e9f61352f..9c015976b174 100644
--- a/Documentation/RCU/lockdep-splat.txt
+++ b/Documentation/RCU/lockdep-splat.txt
@@ -14,9 +14,9 @@ being the real world and all that.
So let's look at an example RCU lockdep splat from 3.0-rc5, one that
has long since been fixed:
-===============================
-[ INFO: suspicious RCU usage. ]
--------------------------------
+=============================
+WARNING: suspicious RCU usage
+-----------------------------
block/cfq-iosched.c:2776 suspicious rcu_dereference_protected() usage!
other info that might help us debug this:
@@ -24,11 +24,11 @@ other info that might help us debug this:
rcu_scheduler_active = 1, debug_locks = 0
3 locks held by scsi_scan_6/1552:
- #0: (&shost->scan_mutex){+.+.+.}, at: [<ffffffff8145efca>]
+ #0: (&shost->scan_mutex){+.+.}, at: [<ffffffff8145efca>]
scsi_scan_host_selected+0x5a/0x150
- #1: (&eq->sysfs_lock){+.+...}, at: [<ffffffff812a5032>]
+ #1: (&eq->sysfs_lock){+.+.}, at: [<ffffffff812a5032>]
elevator_exit+0x22/0x60
- #2: (&(&q->__queue_lock)->rlock){-.-...}, at: [<ffffffff812b6233>]
+ #2: (&(&q->__queue_lock)->rlock){-.-.}, at: [<ffffffff812b6233>]
cfq_exit_queue+0x43/0x190
stack backtrace:
diff --git a/Documentation/RCU/rcu.txt b/Documentation/RCU/rcu.txt
index 721b3e426515..c818cf65c5a9 100644
--- a/Documentation/RCU/rcu.txt
+++ b/Documentation/RCU/rcu.txt
@@ -52,10 +52,10 @@ o If I am running on a uniprocessor kernel, which can only do one
o How can I see where RCU is currently used in the Linux kernel?
Search for "rcu_read_lock", "rcu_read_unlock", "call_rcu",
- "rcu_read_lock_bh", "rcu_read_unlock_bh", "call_rcu_bh",
- "srcu_read_lock", "srcu_read_unlock", "synchronize_rcu",
- "synchronize_net", "synchronize_srcu", and the other RCU
- primitives. Or grab one of the cscope databases from:
+ "rcu_read_lock_bh", "rcu_read_unlock_bh", "srcu_read_lock",
+ "srcu_read_unlock", "synchronize_rcu", "synchronize_net",
+ "synchronize_srcu", and the other RCU primitives. Or grab one
+ of the cscope databases from:
http://www.rdrop.com/users/paulmck/RCU/linuxusage/rculocktab.html
diff --git a/Documentation/RCU/rcu_dereference.txt b/Documentation/RCU/rcu_dereference.txt
index ab96227bad42..bf699e8cfc75 100644
--- a/Documentation/RCU/rcu_dereference.txt
+++ b/Documentation/RCU/rcu_dereference.txt
@@ -351,3 +351,106 @@ garbage values.
In short, rcu_dereference() is -not- optional when you are going to
dereference the resulting pointer.
+
+
+WHICH MEMBER OF THE rcu_dereference() FAMILY SHOULD YOU USE?
+
+First, please avoid using rcu_dereference_raw() and also please avoid
+using rcu_dereference_check() and rcu_dereference_protected() with a
+second argument with a constant value of 1 (or true, for that matter).
+With that caution out of the way, here is some guidance for which
+member of the rcu_dereference() to use in various situations:
+
+1. If the access needs to be within an RCU read-side critical
+ section, use rcu_dereference(). With the new consolidated
+ RCU flavors, an RCU read-side critical section is entered
+ using rcu_read_lock(), anything that disables bottom halves,
+ anything that disables interrupts, or anything that disables
+ preemption.
+
+2. If the access might be within an RCU read-side critical section
+ on the one hand, or protected by (say) my_lock on the other,
+ use rcu_dereference_check(), for example:
+
+ p1 = rcu_dereference_check(p->rcu_protected_pointer,
+ lockdep_is_held(&my_lock));
+
+
+3. If the access might be within an RCU read-side critical section
+ on the one hand, or protected by either my_lock or your_lock on
+ the other, again use rcu_dereference_check(), for example:
+
+ p1 = rcu_dereference_check(p->rcu_protected_pointer,
+ lockdep_is_held(&my_lock) ||
+ lockdep_is_held(&your_lock));
+
+4. If the access is on the update side, so that it is always protected
+ by my_lock, use rcu_dereference_protected():
+
+ p1 = rcu_dereference_protected(p->rcu_protected_pointer,
+ lockdep_is_held(&my_lock));
+
+ This can be extended to handle multiple locks as in #3 above,
+ and both can be extended to check other conditions as well.
+
+5. If the protection is supplied by the caller, and is thus unknown
+ to this code, that is the rare case when rcu_dereference_raw()
+ is appropriate. In addition, rcu_dereference_raw() might be
+ appropriate when the lockdep expression would be excessively
+ complex, except that a better approach in that case might be to
+ take a long hard look at your synchronization design. Still,
+ there are data-locking cases where any one of a very large number
+ of locks or reference counters suffices to protect the pointer,
+ so rcu_dereference_raw() does have its place.
+
+ However, its place is probably quite a bit smaller than one
+ might expect given the number of uses in the current kernel.
+ Ditto for its synonym, rcu_dereference_check( ... , 1), and
+ its close relative, rcu_dereference_protected(... , 1).
+
+
+SPARSE CHECKING OF RCU-PROTECTED POINTERS
+
+The sparse static-analysis tool checks for direct access to RCU-protected
+pointers, which can result in "interesting" bugs due to compiler
+optimizations involving invented loads and perhaps also load tearing.
+For example, suppose someone mistakenly does something like this:
+
+ p = q->rcu_protected_pointer;
+ do_something_with(p->a);
+ do_something_else_with(p->b);
+
+If register pressure is high, the compiler might optimize "p" out
+of existence, transforming the code to something like this:
+
+ do_something_with(q->rcu_protected_pointer->a);
+ do_something_else_with(q->rcu_protected_pointer->b);
+
+This could fatally disappoint your code if q->rcu_protected_pointer
+changed in the meantime. Nor is this a theoretical problem: Exactly
+this sort of bug cost Paul E. McKenney (and several of his innocent
+colleagues) a three-day weekend back in the early 1990s.
+
+Load tearing could of course result in dereferencing a mashup of a pair
+of pointers, which also might fatally disappoint your code.
+
+These problems could have been avoided simply by making the code instead
+read as follows:
+
+ p = rcu_dereference(q->rcu_protected_pointer);
+ do_something_with(p->a);
+ do_something_else_with(p->b);
+
+Unfortunately, these sorts of bugs can be extremely hard to spot during
+review. This is where the sparse tool comes into play, along with the
+"__rcu" marker. If you mark a pointer declaration, whether in a structure
+or as a formal parameter, with "__rcu", which tells sparse to complain if
+this pointer is accessed directly. It will also cause sparse to complain
+if a pointer not marked with "__rcu" is accessed using rcu_dereference()
+and friends. For example, ->rcu_protected_pointer might be declared as
+follows:
+
+ struct foo __rcu *rcu_protected_pointer;
+
+Use of "__rcu" is opt-in. If you choose not to use it, then you should
+ignore the sparse warnings.
diff --git a/Documentation/RCU/rcubarrier.txt b/Documentation/RCU/rcubarrier.txt
index 5d7759071a3e..a2782df69732 100644
--- a/Documentation/RCU/rcubarrier.txt
+++ b/Documentation/RCU/rcubarrier.txt
@@ -83,16 +83,15 @@ Pseudo-code using rcu_barrier() is as follows:
2. Execute rcu_barrier().
3. Allow the module to be unloaded.
-There are also rcu_barrier_bh(), rcu_barrier_sched(), and srcu_barrier()
-functions for the other flavors of RCU, and you of course must match
-the flavor of rcu_barrier() with that of call_rcu(). If your module
-uses multiple flavors of call_rcu(), then it must also use multiple
+There is also an srcu_barrier() function for SRCU, and you of course
+must match the flavor of rcu_barrier() with that of call_rcu(). If your
+module uses multiple flavors of call_rcu(), then it must also use multiple
flavors of rcu_barrier() when unloading that module. For example, if
-it uses call_rcu_bh(), call_srcu() on srcu_struct_1, and call_srcu() on
+it uses call_rcu(), call_srcu() on srcu_struct_1, and call_srcu() on
srcu_struct_2(), then the following three lines of code will be required
when unloading:
- 1 rcu_barrier_bh();
+ 1 rcu_barrier();
2 srcu_barrier(&srcu_struct_1);
3 srcu_barrier(&srcu_struct_2);
@@ -185,12 +184,12 @@ module invokes call_rcu() from timers, you will need to first cancel all
the timers, and only then invoke rcu_barrier() to wait for any remaining
RCU callbacks to complete.
-Of course, if you module uses call_rcu_bh(), you will need to invoke
-rcu_barrier_bh() before unloading. Similarly, if your module uses
-call_rcu_sched(), you will need to invoke rcu_barrier_sched() before
-unloading. If your module uses call_rcu(), call_rcu_bh(), -and-
-call_rcu_sched(), then you will need to invoke each of rcu_barrier(),
-rcu_barrier_bh(), and rcu_barrier_sched().
+Of course, if you module uses call_rcu(), you will need to invoke
+rcu_barrier() before unloading. Similarly, if your module uses
+call_srcu(), you will need to invoke srcu_barrier() before unloading,
+and on the same srcu_struct structure. If your module uses call_rcu()
+-and- call_srcu(), then you will need to invoke rcu_barrier() -and-
+srcu_barrier().
Implementing rcu_barrier()
@@ -223,8 +222,8 @@ shown below. Note that the final "1" in on_each_cpu()'s argument list
ensures that all the calls to rcu_barrier_func() will have completed
before on_each_cpu() returns. Line 9 then waits for the completion.
-This code was rewritten in 2008 to support rcu_barrier_bh() and
-rcu_barrier_sched() in addition to the original rcu_barrier().
+This code was rewritten in 2008 and several times thereafter, but this
+still gives the general idea.
The rcu_barrier_func() runs on each CPU, where it invokes call_rcu()
to post an RCU callback, as follows:
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index 1ace20815bb1..981651a8b65d 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -310,7 +310,7 @@ reader, updater, and reclaimer.
rcu_assign_pointer()
- +--------+
+ +--------+
+---------------------->| reader |---------+
| +--------+ |
| | |
@@ -318,12 +318,12 @@ reader, updater, and reclaimer.
| | | rcu_read_lock()
| | | rcu_read_unlock()
| rcu_dereference() | |
- +---------+ | |
- | updater |<---------------------+ |
- +---------+ V
+ +---------+ | |
+ | updater |<----------------+ |
+ +---------+ V
| +-----------+
+----------------------------------->| reclaimer |
- +-----------+
+ +-----------+
Defer:
synchronize_rcu() & call_rcu()
diff --git a/Documentation/accounting/psi.txt b/Documentation/accounting/psi.txt
index b8ca28b60215..7e71c9c1d8e9 100644
--- a/Documentation/accounting/psi.txt
+++ b/Documentation/accounting/psi.txt
@@ -56,12 +56,12 @@ situation from a state where some tasks are stalled but the CPU is
still doing productive work. As such, time spent in this subset of the
stall state is tracked separately and exported in the "full" averages.
-The ratios are tracked as recent trends over ten, sixty, and three
-hundred second windows, which gives insight into short term events as
-well as medium and long term trends. The total absolute stall time is
-tracked and exported as well, to allow detection of latency spikes
-which wouldn't necessarily make a dent in the time averages, or to
-average trends over custom time frames.
+The ratios (in %) are tracked as recent trends over ten, sixty, and
+three hundred second windows, which gives insight into short term events
+as well as medium and long term trends. The total absolute stall time
+(in us) is tracked and exported as well, to allow detection of latency
+spikes which wouldn't necessarily make a dent in the time averages,
+or to average trends over custom time frames.
Cgroup2 interface
=================
diff --git a/Documentation/acpi/aml-debugger.txt b/Documentation/acpi/aml-debugger.txt
deleted file mode 100644
index e851cc5de63f..000000000000
--- a/Documentation/acpi/aml-debugger.txt
+++ /dev/null
@@ -1,66 +0,0 @@
-The AML Debugger
-
-Copyright (C) 2016, Intel Corporation
-Author: Lv Zheng <lv.zheng@intel.com>
-
-
-This document describes the usage of the AML debugger embedded in the Linux
-kernel.
-
-1. Build the debugger
-
- The following kernel configuration items are required to enable the AML
- debugger interface from the Linux kernel:
-
- CONFIG_ACPI_DEBUGGER=y
- CONFIG_ACPI_DEBUGGER_USER=m
-
- The userspace utilities can be built from the kernel source tree using
- the following commands:
-
- $ cd tools
- $ make acpi
-
- The resultant userspace tool binary is then located at:
-
- tools/acpi/power/acpi/acpidbg/acpidbg
-
- It can be installed to system directories by running "make install" (as a
- sufficiently privileged user).
-
-2. Start the userspace debugger interface
-
- After booting the kernel with the debugger built-in, the debugger can be
- started by using the following commands:
-
- # mount -t debugfs none /sys/kernel/debug
- # modprobe acpi_dbg
- # tools/acpi/power/acpi/acpidbg/acpidbg
-
- That spawns the interactive AML debugger environment where you can execute
- debugger commands.
-
- The commands are documented in the "ACPICA Overview and Programmer Reference"
- that can be downloaded from
-
- https://acpica.org/documentation
-
- The detailed debugger commands reference is located in Chapter 12 "ACPICA
- Debugger Reference". The "help" command can be used for a quick reference.
-
-3. Stop the userspace debugger interface
-
- The interactive debugger interface can be closed by pressing Ctrl+C or using
- the "quit" or "exit" commands. When finished, unload the module with:
-
- # rmmod acpi_dbg
-
- The module unloading may fail if there is an acpidbg instance running.
-
-4. Run the debugger in a script
-
- It may be useful to run the AML debugger in a test script. "acpidbg" supports
- this in a special "batch" mode. For example, the following command outputs
- the entire ACPI namespace:
-
- # acpidbg -b "namespace"
diff --git a/Documentation/acpi/apei/output_format.txt b/Documentation/acpi/apei/output_format.txt
deleted file mode 100644
index 0c49c197c47a..000000000000
--- a/Documentation/acpi/apei/output_format.txt
+++ /dev/null
@@ -1,147 +0,0 @@
- APEI output format
- ~~~~~~~~~~~~~~~~~~
-
-APEI uses printk as hardware error reporting interface, the output
-format is as follow.
-
-<error record> :=
-APEI generic hardware error status
-severity: <integer>, <severity string>
-section: <integer>, severity: <integer>, <severity string>
-flags: <integer>
-<section flags strings>
-fru_id: <uuid string>
-fru_text: <string>
-section_type: <section type string>
-<section data>
-
-<severity string>* := recoverable | fatal | corrected | info
-
-<section flags strings># :=
-[primary][, containment warning][, reset][, threshold exceeded]\
-[, resource not accessible][, latent error]
-
-<section type string> := generic processor error | memory error | \
-PCIe error | unknown, <uuid string>
-
-<section data> :=
-<generic processor section data> | <memory section data> | \
-<pcie section data> | <null>
-
-<generic processor section data> :=
-[processor_type: <integer>, <proc type string>]
-[processor_isa: <integer>, <proc isa string>]
-[error_type: <integer>
-<proc error type strings>]
-[operation: <integer>, <proc operation string>]
-[flags: <integer>
-<proc flags strings>]
-[level: <integer>]
-[version_info: <integer>]
-[processor_id: <integer>]
-[target_address: <integer>]
-[requestor_id: <integer>]
-[responder_id: <integer>]
-[IP: <integer>]
-
-<proc type string>* := IA32/X64 | IA64
-
-<proc isa string>* := IA32 | IA64 | X64
-
-<processor error type strings># :=
-[cache error][, TLB error][, bus error][, micro-architectural error]
-
-<proc operation string>* := unknown or generic | data read | data write | \
-instruction execution
-
-<proc flags strings># :=
-[restartable][, precise IP][, overflow][, corrected]
-
-<memory section data> :=
-[error_status: <integer>]
-[physical_address: <integer>]
-[physical_address_mask: <integer>]
-[node: <integer>]
-[card: <integer>]
-[module: <integer>]
-[bank: <integer>]
-[device: <integer>]
-[row: <integer>]
-[column: <integer>]
-[bit_position: <integer>]
-[requestor_id: <integer>]
-[responder_id: <integer>]
-[target_id: <integer>]
-[error_type: <integer>, <mem error type string>]
-
-<mem error type string>* :=
-unknown | no error | single-bit ECC | multi-bit ECC | \
-single-symbol chipkill ECC | multi-symbol chipkill ECC | master abort | \
-target abort | parity error | watchdog timeout | invalid address | \
-mirror Broken | memory sparing | scrub corrected error | \
-scrub uncorrected error
-
-<pcie section data> :=
-[port_type: <integer>, <pcie port type string>]
-[version: <integer>.<integer>]
-[command: <integer>, status: <integer>]
-[device_id: <integer>:<integer>:<integer>.<integer>
-slot: <integer>
-secondary_bus: <integer>
-vendor_id: <integer>, device_id: <integer>
-class_code: <integer>]
-[serial number: <integer>, <integer>]
-[bridge: secondary_status: <integer>, control: <integer>]
-[aer_status: <integer>, aer_mask: <integer>
-<aer status string>
-[aer_uncor_severity: <integer>]
-aer_layer=<aer layer string>, aer_agent=<aer agent string>
-aer_tlp_header: <integer> <integer> <integer> <integer>]
-
-<pcie port type string>* := PCIe end point | legacy PCI end point | \
-unknown | unknown | root port | upstream switch port | \
-downstream switch port | PCIe to PCI/PCI-X bridge | \
-PCI/PCI-X to PCIe bridge | root complex integrated endpoint device | \
-root complex event collector
-
-if section severity is fatal or recoverable
-<aer status string># :=
-unknown | unknown | unknown | unknown | Data Link Protocol | \
-unknown | unknown | unknown | unknown | unknown | unknown | unknown | \
-Poisoned TLP | Flow Control Protocol | Completion Timeout | \
-Completer Abort | Unexpected Completion | Receiver Overflow | \
-Malformed TLP | ECRC | Unsupported Request
-else
-<aer status string># :=
-Receiver Error | unknown | unknown | unknown | unknown | unknown | \
-Bad TLP | Bad DLLP | RELAY_NUM Rollover | unknown | unknown | unknown | \
-Replay Timer Timeout | Advisory Non-Fatal
-fi
-
-<aer layer string> :=
-Physical Layer | Data Link Layer | Transaction Layer
-
-<aer agent string> :=
-Receiver ID | Requester ID | Completer ID | Transmitter ID
-
-Where, [] designate corresponding content is optional
-
-All <field string> description with * has the following format:
-
-field: <integer>, <field string>
-
-Where value of <integer> should be the position of "string" in <field
-string> description. Otherwise, <field string> will be "unknown".
-
-All <field strings> description with # has the following format:
-
-field: <integer>
-<field strings>
-
-Where each string in <fields strings> corresponding to one set bit of
-<integer>. The bit position is the position of "string" in <field
-strings> description.
-
-For more detailed explanation of every field, please refer to UEFI
-specification version 2.3 or later, section Appendix N: Common
-Platform Error Record.
diff --git a/Documentation/acpi/i2c-muxes.txt b/Documentation/acpi/i2c-muxes.txt
deleted file mode 100644
index 9fcc4f0b885e..000000000000
--- a/Documentation/acpi/i2c-muxes.txt
+++ /dev/null
@@ -1,58 +0,0 @@
-ACPI I2C Muxes
---------------
-
-Describing an I2C device hierarchy that includes I2C muxes requires an ACPI
-Device () scope per mux channel.
-
-Consider this topology:
-
-+------+ +------+
-| SMB1 |-->| MUX0 |--CH00--> i2c client A (0x50)
-| | | 0x70 |--CH01--> i2c client B (0x50)
-+------+ +------+
-
-which corresponds to the following ASL:
-
-Device (SMB1)
-{
- Name (_HID, ...)
- Device (MUX0)
- {
- Name (_HID, ...)
- Name (_CRS, ResourceTemplate () {
- I2cSerialBus (0x70, ControllerInitiated, I2C_SPEED,
- AddressingMode7Bit, "^SMB1", 0x00,
- ResourceConsumer,,)
- }
-
- Device (CH00)
- {
- Name (_ADR, 0)
-
- Device (CLIA)
- {
- Name (_HID, ...)
- Name (_CRS, ResourceTemplate () {
- I2cSerialBus (0x50, ControllerInitiated, I2C_SPEED,
- AddressingMode7Bit, "^CH00", 0x00,
- ResourceConsumer,,)
- }
- }
- }
-
- Device (CH01)
- {
- Name (_ADR, 1)
-
- Device (CLIB)
- {
- Name (_HID, ...)
- Name (_CRS, ResourceTemplate () {
- I2cSerialBus (0x50, ControllerInitiated, I2C_SPEED,
- AddressingMode7Bit, "^CH01", 0x00,
- ResourceConsumer,,)
- }
- }
- }
- }
-}
diff --git a/Documentation/acpi/initrd_table_override.txt b/Documentation/acpi/initrd_table_override.txt
deleted file mode 100644
index 30437a6db373..000000000000
--- a/Documentation/acpi/initrd_table_override.txt
+++ /dev/null
@@ -1,111 +0,0 @@
-Upgrading ACPI tables via initrd
-================================
-
-1) Introduction (What is this about)
-2) What is this for
-3) How does it work
-4) References (Where to retrieve userspace tools)
-
-1) What is this about
----------------------
-
-If the ACPI_TABLE_UPGRADE compile option is true, it is possible to
-upgrade the ACPI execution environment that is defined by the ACPI tables
-via upgrading the ACPI tables provided by the BIOS with an instrumented,
-modified, more recent version one, or installing brand new ACPI tables.
-
-When building initrd with kernel in a single image, option
-ACPI_TABLE_OVERRIDE_VIA_BUILTIN_INITRD should also be true for this
-feature to work.
-
-For a full list of ACPI tables that can be upgraded/installed, take a look
-at the char *table_sigs[MAX_ACPI_SIGNATURE]; definition in
-drivers/acpi/tables.c.
-All ACPI tables iasl (Intel's ACPI compiler and disassembler) knows should
-be overridable, except:
- - ACPI_SIG_RSDP (has a signature of 6 bytes)
- - ACPI_SIG_FACS (does not have an ordinary ACPI table header)
-Both could get implemented as well.
-
-
-2) What is this for
--------------------
-
-Complain to your platform/BIOS vendor if you find a bug which is so severe
-that a workaround is not accepted in the Linux kernel. And this facility
-allows you to upgrade the buggy tables before your platform/BIOS vendor
-releases an upgraded BIOS binary.
-
-This facility can be used by platform/BIOS vendors to provide a Linux
-compatible environment without modifying the underlying platform firmware.
-
-This facility also provides a powerful feature to easily debug and test
-ACPI BIOS table compatibility with the Linux kernel by modifying old
-platform provided ACPI tables or inserting new ACPI tables.
-
-It can and should be enabled in any kernel because there is no functional
-change with not instrumented initrds.
-
-
-3) How does it work
--------------------
-
-# Extract the machine's ACPI tables:
-cd /tmp
-acpidump >acpidump
-acpixtract -a acpidump
-# Disassemble, modify and recompile them:
-iasl -d *.dat
-# For example add this statement into a _PRT (PCI Routing Table) function
-# of the DSDT:
-Store("HELLO WORLD", debug)
-# And increase the OEM Revision. For example, before modification:
-DefinitionBlock ("DSDT.aml", "DSDT", 2, "INTEL ", "TEMPLATE", 0x00000000)
-# After modification:
-DefinitionBlock ("DSDT.aml", "DSDT", 2, "INTEL ", "TEMPLATE", 0x00000001)
-iasl -sa dsdt.dsl
-# Add the raw ACPI tables to an uncompressed cpio archive.
-# They must be put into a /kernel/firmware/acpi directory inside the cpio
-# archive. Note that if the table put here matches a platform table
-# (similar Table Signature, and similar OEMID, and similar OEM Table ID)
-# with a more recent OEM Revision, the platform table will be upgraded by
-# this table. If the table put here doesn't match a platform table
-# (dissimilar Table Signature, or dissimilar OEMID, or dissimilar OEM Table
-# ID), this table will be appended.
-mkdir -p kernel/firmware/acpi
-cp dsdt.aml kernel/firmware/acpi
-# A maximum of "NR_ACPI_INITRD_TABLES (64)" tables are currently allowed
-# (see osl.c):
-iasl -sa facp.dsl
-iasl -sa ssdt1.dsl
-cp facp.aml kernel/firmware/acpi
-cp ssdt1.aml kernel/firmware/acpi
-# The uncompressed cpio archive must be the first. Other, typically
-# compressed cpio archives, must be concatenated on top of the uncompressed
-# one. Following command creates the uncompressed cpio archive and
-# concatenates the original initrd on top:
-find kernel | cpio -H newc --create > /boot/instrumented_initrd
-cat /boot/initrd >>/boot/instrumented_initrd
-# reboot with increased acpi debug level, e.g. boot params:
-acpi.debug_level=0x2 acpi.debug_layer=0xFFFFFFFF
-# and check your syslog:
-[ 1.268089] ACPI: PCI Interrupt Routing Table [\_SB_.PCI0._PRT]
-[ 1.272091] [ACPI Debug] String [0x0B] "HELLO WORLD"
-
-iasl is able to disassemble and recompile quite a lot different,
-also static ACPI tables.
-
-
-4) Where to retrieve userspace tools
-------------------------------------
-
-iasl and acpixtract are part of Intel's ACPICA project:
-http://acpica.org/
-and should be packaged by distributions (for example in the acpica package
-on SUSE).
-
-acpidump can be found in Len Browns pmtools:
-ftp://kernel.org/pub/linux/kernel/people/lenb/acpi/utils/pmtools/acpidump
-This tool is also part of the acpica package on SUSE.
-Alternatively, used ACPI tables can be retrieved via sysfs in latest kernels:
-/sys/firmware/acpi/tables
diff --git a/Documentation/acpi/method-customizing.txt b/Documentation/acpi/method-customizing.txt
deleted file mode 100644
index 7235da975f23..000000000000
--- a/Documentation/acpi/method-customizing.txt
+++ /dev/null
@@ -1,73 +0,0 @@
-Linux ACPI Custom Control Method How To
-=======================================
-
-Written by Zhang Rui <rui.zhang@intel.com>
-
-
-Linux supports customizing ACPI control methods at runtime.
-
-Users can use this to
-1. override an existing method which may not work correctly,
- or just for debugging purposes.
-2. insert a completely new method in order to create a missing
- method such as _OFF, _ON, _STA, _INI, etc.
-For these cases, it is far simpler to dynamically install a single
-control method rather than override the entire DSDT, because kernel
-rebuild/reboot is not needed and test result can be got in minutes.
-
-Note: Only ACPI METHOD can be overridden, any other object types like
- "Device", "OperationRegion", are not recognized. Methods
- declared inside scope operators are also not supported.
-Note: The same ACPI control method can be overridden for many times,
- and it's always the latest one that used by Linux/kernel.
-Note: To get the ACPI debug object output (Store (AAAA, Debug)),
- please run "echo 1 > /sys/module/acpi/parameters/aml_debug_output".
-
-1. override an existing method
- a) get the ACPI table via ACPI sysfs I/F. e.g. to get the DSDT,
- just run "cat /sys/firmware/acpi/tables/DSDT > /tmp/dsdt.dat"
- b) disassemble the table by running "iasl -d dsdt.dat".
- c) rewrite the ASL code of the method and save it in a new file,
- d) package the new file (psr.asl) to an ACPI table format.
- Here is an example of a customized \_SB._AC._PSR method,
-
- DefinitionBlock ("", "SSDT", 1, "", "", 0x20080715)
- {
- Method (\_SB_.AC._PSR, 0, NotSerialized)
- {
- Store ("In AC _PSR", Debug)
- Return (ACON)
- }
- }
- Note that the full pathname of the method in ACPI namespace
- should be used.
- e) assemble the file to generate the AML code of the method.
- e.g. "iasl -vw 6084 psr.asl" (psr.aml is generated as a result)
- If parameter "-vw 6084" is not supported by your iASL compiler,
- please try a newer version.
- f) mount debugfs by "mount -t debugfs none /sys/kernel/debug"
- g) override the old method via the debugfs by running
- "cat /tmp/psr.aml > /sys/kernel/debug/acpi/custom_method"
-
-2. insert a new method
- This is easier than overriding an existing method.
- We just need to create the ASL code of the method we want to
- insert and then follow the step c) ~ g) in section 1.
-
-3. undo your changes
- The "undo" operation is not supported for a new inserted method
- right now, i.e. we can not remove a method currently.
- For an overridden method, in order to undo your changes, please
- save a copy of the method original ASL code in step c) section 1,
- and redo step c) ~ g) to override the method with the original one.
-
-
-Note: We can use a kernel with multiple custom ACPI method running,
- But each individual write to debugfs can implement a SINGLE
- method override. i.e. if we want to insert/override multiple
- ACPI methods, we need to redo step c) ~ g) for multiple times.
-
-Note: Be aware that root can mis-use this driver to modify arbitrary
- memory and gain additional rights, if root's privileges got
- restricted (for example if root is not allowed to load additional
- modules after boot).
diff --git a/Documentation/acpi/method-tracing.txt b/Documentation/acpi/method-tracing.txt
deleted file mode 100644
index 0aba14c8f459..000000000000
--- a/Documentation/acpi/method-tracing.txt
+++ /dev/null
@@ -1,192 +0,0 @@
-ACPICA Trace Facility
-
-Copyright (C) 2015, Intel Corporation
-Author: Lv Zheng <lv.zheng@intel.com>
-
-
-Abstract:
-
-This document describes the functions and the interfaces of the method
-tracing facility.
-
-1. Functionalities and usage examples:
-
- ACPICA provides method tracing capability. And two functions are
- currently implemented using this capability.
-
- A. Log reducer
- ACPICA subsystem provides debugging outputs when CONFIG_ACPI_DEBUG is
- enabled. The debugging messages which are deployed via
- ACPI_DEBUG_PRINT() macro can be reduced at 2 levels - per-component
- level (known as debug layer, configured via
- /sys/module/acpi/parameters/debug_layer) and per-type level (known as
- debug level, configured via /sys/module/acpi/parameters/debug_level).
-
- But when the particular layer/level is applied to the control method
- evaluations, the quantity of the debugging outputs may still be too
- large to be put into the kernel log buffer. The idea thus is worked out
- to only enable the particular debug layer/level (normally more detailed)
- logs when the control method evaluation is started, and disable the
- detailed logging when the control method evaluation is stopped.
-
- The following command examples illustrate the usage of the "log reducer"
- functionality:
- a. Filter out the debug layer/level matched logs when control methods
- are being evaluated:
- # cd /sys/module/acpi/parameters
- # echo "0xXXXXXXXX" > trace_debug_layer
- # echo "0xYYYYYYYY" > trace_debug_level
- # echo "enable" > trace_state
- b. Filter out the debug layer/level matched logs when the specified
- control method is being evaluated:
- # cd /sys/module/acpi/parameters
- # echo "0xXXXXXXXX" > trace_debug_layer
- # echo "0xYYYYYYYY" > trace_debug_level
- # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
- # echo "method" > /sys/module/acpi/parameters/trace_state
- c. Filter out the debug layer/level matched logs when the specified
- control method is being evaluated for the first time:
- # cd /sys/module/acpi/parameters
- # echo "0xXXXXXXXX" > trace_debug_layer
- # echo "0xYYYYYYYY" > trace_debug_level
- # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
- # echo "method-once" > /sys/module/acpi/parameters/trace_state
- Where:
- 0xXXXXXXXX/0xYYYYYYYY: Refer to Documentation/acpi/debug.txt for
- possible debug layer/level masking values.
- \PPPP.AAAA.TTTT.HHHH: Full path of a control method that can be found
- in the ACPI namespace. It needn't be an entry
- of a control method evaluation.
-
- B. AML tracer
-
- There are special log entries added by the method tracing facility at
- the "trace points" the AML interpreter starts/stops to execute a control
- method, or an AML opcode. Note that the format of the log entries are
- subject to change:
- [ 0.186427] exdebug-0398 ex_trace_point : Method Begin [0xf58394d8:\_SB.PCI0.LPCB.ECOK] execution.
- [ 0.186630] exdebug-0398 ex_trace_point : Opcode Begin [0xf5905c88:If] execution.
- [ 0.186820] exdebug-0398 ex_trace_point : Opcode Begin [0xf5905cc0:LEqual] execution.
- [ 0.187010] exdebug-0398 ex_trace_point : Opcode Begin [0xf5905a20:-NamePath-] execution.
- [ 0.187214] exdebug-0398 ex_trace_point : Opcode End [0xf5905a20:-NamePath-] execution.
- [ 0.187407] exdebug-0398 ex_trace_point : Opcode Begin [0xf5905f60:One] execution.
- [ 0.187594] exdebug-0398 ex_trace_point : Opcode End [0xf5905f60:One] execution.
- [ 0.187789] exdebug-0398 ex_trace_point : Opcode End [0xf5905cc0:LEqual] execution.
- [ 0.187980] exdebug-0398 ex_trace_point : Opcode Begin [0xf5905cc0:Return] execution.
- [ 0.188146] exdebug-0398 ex_trace_point : Opcode Begin [0xf5905f60:One] execution.
- [ 0.188334] exdebug-0398 ex_trace_point : Opcode End [0xf5905f60:One] execution.
- [ 0.188524] exdebug-0398 ex_trace_point : Opcode End [0xf5905cc0:Return] execution.
- [ 0.188712] exdebug-0398 ex_trace_point : Opcode End [0xf5905c88:If] execution.
- [ 0.188903] exdebug-0398 ex_trace_point : Method End [0xf58394d8:\_SB.PCI0.LPCB.ECOK] execution.
-
- Developers can utilize these special log entries to track the AML
- interpretion, thus can aid issue debugging and performance tuning. Note
- that, as the "AML tracer" logs are implemented via ACPI_DEBUG_PRINT()
- macro, CONFIG_ACPI_DEBUG is also required to be enabled for enabling
- "AML tracer" logs.
-
- The following command examples illustrate the usage of the "AML tracer"
- functionality:
- a. Filter out the method start/stop "AML tracer" logs when control
- methods are being evaluated:
- # cd /sys/module/acpi/parameters
- # echo "0x80" > trace_debug_layer
- # echo "0x10" > trace_debug_level
- # echo "enable" > trace_state
- b. Filter out the method start/stop "AML tracer" when the specified
- control method is being evaluated:
- # cd /sys/module/acpi/parameters
- # echo "0x80" > trace_debug_layer
- # echo "0x10" > trace_debug_level
- # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
- # echo "method" > trace_state
- c. Filter out the method start/stop "AML tracer" logs when the specified
- control method is being evaluated for the first time:
- # cd /sys/module/acpi/parameters
- # echo "0x80" > trace_debug_layer
- # echo "0x10" > trace_debug_level
- # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
- # echo "method-once" > trace_state
- d. Filter out the method/opcode start/stop "AML tracer" when the
- specified control method is being evaluated:
- # cd /sys/module/acpi/parameters
- # echo "0x80" > trace_debug_layer
- # echo "0x10" > trace_debug_level
- # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
- # echo "opcode" > trace_state
- e. Filter out the method/opcode start/stop "AML tracer" when the
- specified control method is being evaluated for the first time:
- # cd /sys/module/acpi/parameters
- # echo "0x80" > trace_debug_layer
- # echo "0x10" > trace_debug_level
- # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
- # echo "opcode-opcode" > trace_state
-
- Note that all above method tracing facility related module parameters can
- be used as the boot parameters, for example:
- acpi.trace_debug_layer=0x80 acpi.trace_debug_level=0x10 \
- acpi.trace_method_name=\_SB.LID0._LID acpi.trace_state=opcode-once
-
-2. Interface descriptions:
-
- All method tracing functions can be configured via ACPI module
- parameters that are accessible at /sys/module/acpi/parameters/:
-
- trace_method_name
- The full path of the AML method that the user wants to trace.
- Note that the full path shouldn't contain the trailing "_"s in its
- name segments but may contain "\" to form an absolute path.
-
- trace_debug_layer
- The temporary debug_layer used when the tracing feature is enabled.
- Using ACPI_EXECUTER (0x80) by default, which is the debug_layer
- used to match all "AML tracer" logs.
-
- trace_debug_level
- The temporary debug_level used when the tracing feature is enabled.
- Using ACPI_LV_TRACE_POINT (0x10) by default, which is the
- debug_level used to match all "AML tracer" logs.
-
- trace_state
- The status of the tracing feature.
- Users can enable/disable this debug tracing feature by executing
- the following command:
- # echo string > /sys/module/acpi/parameters/trace_state
- Where "string" should be one of the following:
- "disable"
- Disable the method tracing feature.
- "enable"
- Enable the method tracing feature.
- ACPICA debugging messages matching
- "trace_debug_layer/trace_debug_level" during any method
- execution will be logged.
- "method"
- Enable the method tracing feature.
- ACPICA debugging messages matching
- "trace_debug_layer/trace_debug_level" during method execution
- of "trace_method_name" will be logged.
- "method-once"
- Enable the method tracing feature.
- ACPICA debugging messages matching
- "trace_debug_layer/trace_debug_level" during method execution
- of "trace_method_name" will be logged only once.
- "opcode"
- Enable the method tracing feature.
- ACPICA debugging messages matching
- "trace_debug_layer/trace_debug_level" during method/opcode
- execution of "trace_method_name" will be logged.
- "opcode-once"
- Enable the method tracing feature.
- ACPICA debugging messages matching
- "trace_debug_layer/trace_debug_level" during method/opcode
- execution of "trace_method_name" will be logged only once.
- Note that, the difference between the "enable" and other feature
- enabling options are:
- 1. When "enable" is specified, since
- "trace_debug_layer/trace_debug_level" shall apply to all control
- method evaluations, after configuring "trace_state" to "enable",
- "trace_method_name" will be reset to NULL.
- 2. When "method/opcode" is specified, if
- "trace_method_name" is NULL when "trace_state" is configured to
- these options, the "trace_debug_layer/trace_debug_level" will
- apply to all control method evaluations.
diff --git a/Documentation/acpi/ssdt-overlays.txt b/Documentation/acpi/ssdt-overlays.txt
deleted file mode 100644
index 5ae13f161ea2..000000000000
--- a/Documentation/acpi/ssdt-overlays.txt
+++ /dev/null
@@ -1,172 +0,0 @@
-
-In order to support ACPI open-ended hardware configurations (e.g. development
-boards) we need a way to augment the ACPI configuration provided by the firmware
-image. A common example is connecting sensors on I2C / SPI buses on development
-boards.
-
-Although this can be accomplished by creating a kernel platform driver or
-recompiling the firmware image with updated ACPI tables, neither is practical:
-the former proliferates board specific kernel code while the latter requires
-access to firmware tools which are often not publicly available.
-
-Because ACPI supports external references in AML code a more practical
-way to augment firmware ACPI configuration is by dynamically loading
-user defined SSDT tables that contain the board specific information.
-
-For example, to enumerate a Bosch BMA222E accelerometer on the I2C bus of the
-Minnowboard MAX development board exposed via the LSE connector [1], the
-following ASL code can be used:
-
-DefinitionBlock ("minnowmax.aml", "SSDT", 1, "Vendor", "Accel", 0x00000003)
-{
- External (\_SB.I2C6, DeviceObj)
-
- Scope (\_SB.I2C6)
- {
- Device (STAC)
- {
- Name (_ADR, Zero)
- Name (_HID, "BMA222E")
-
- Method (_CRS, 0, Serialized)
- {
- Name (RBUF, ResourceTemplate ()
- {
- I2cSerialBus (0x0018, ControllerInitiated, 0x00061A80,
- AddressingMode7Bit, "\\_SB.I2C6", 0x00,
- ResourceConsumer, ,)
- GpioInt (Edge, ActiveHigh, Exclusive, PullDown, 0x0000,
- "\\_SB.GPO2", 0x00, ResourceConsumer, , )
- { // Pin list
- 0
- }
- })
- Return (RBUF)
- }
- }
- }
-}
-
-which can then be compiled to AML binary format:
-
-$ iasl minnowmax.asl
-
-Intel ACPI Component Architecture
-ASL Optimizing Compiler version 20140214-64 [Mar 29 2014]
-Copyright (c) 2000 - 2014 Intel Corporation
-
-ASL Input: minnomax.asl - 30 lines, 614 bytes, 7 keywords
-AML Output: minnowmax.aml - 165 bytes, 6 named objects, 1 executable opcodes
-
-[1] http://wiki.minnowboard.org/MinnowBoard_MAX#Low_Speed_Expansion_Connector_.28Top.29
-
-The resulting AML code can then be loaded by the kernel using one of the methods
-below.
-
-== Loading ACPI SSDTs from initrd ==
-
-This option allows loading of user defined SSDTs from initrd and it is useful
-when the system does not support EFI or when there is not enough EFI storage.
-
-It works in a similar way with initrd based ACPI tables override/upgrade: SSDT
-aml code must be placed in the first, uncompressed, initrd under the
-"kernel/firmware/acpi" path. Multiple files can be used and this will translate
-in loading multiple tables. Only SSDT and OEM tables are allowed. See
-initrd_table_override.txt for more details.
-
-Here is an example:
-
-# Add the raw ACPI tables to an uncompressed cpio archive.
-# They must be put into a /kernel/firmware/acpi directory inside the
-# cpio archive.
-# The uncompressed cpio archive must be the first.
-# Other, typically compressed cpio archives, must be
-# concatenated on top of the uncompressed one.
-mkdir -p kernel/firmware/acpi
-cp ssdt.aml kernel/firmware/acpi
-
-# Create the uncompressed cpio archive and concatenate the original initrd
-# on top:
-find kernel | cpio -H newc --create > /boot/instrumented_initrd
-cat /boot/initrd >>/boot/instrumented_initrd
-
-== Loading ACPI SSDTs from EFI variables ==
-
-This is the preferred method, when EFI is supported on the platform, because it
-allows a persistent, OS independent way of storing the user defined SSDTs. There
-is also work underway to implement EFI support for loading user defined SSDTs
-and using this method will make it easier to convert to the EFI loading
-mechanism when that will arrive.
-
-In order to load SSDTs from an EFI variable the efivar_ssdt kernel command line
-parameter can be used. The argument for the option is the variable name to
-use. If there are multiple variables with the same name but with different
-vendor GUIDs, all of them will be loaded.
-
-In order to store the AML code in an EFI variable the efivarfs filesystem can be
-used. It is enabled and mounted by default in /sys/firmware/efi/efivars in all
-recent distribution.
-
-Creating a new file in /sys/firmware/efi/efivars will automatically create a new
-EFI variable. Updating a file in /sys/firmware/efi/efivars will update the EFI
-variable. Please note that the file name needs to be specially formatted as
-"Name-GUID" and that the first 4 bytes in the file (little-endian format)
-represent the attributes of the EFI variable (see EFI_VARIABLE_MASK in
-include/linux/efi.h). Writing to the file must also be done with one write
-operation.
-
-For example, you can use the following bash script to create/update an EFI
-variable with the content from a given file:
-
-#!/bin/sh -e
-
-while ! [ -z "$1" ]; do
- case "$1" in
- "-f") filename="$2"; shift;;
- "-g") guid="$2"; shift;;
- *) name="$1";;
- esac
- shift
-done
-
-usage()
-{
- echo "Syntax: ${0##*/} -f filename [ -g guid ] name"
- exit 1
-}
-
-[ -n "$name" -a -f "$filename" ] || usage
-
-EFIVARFS="/sys/firmware/efi/efivars"
-
-[ -d "$EFIVARFS" ] || exit 2
-
-if stat -tf $EFIVARFS | grep -q -v de5e81e4; then
- mount -t efivarfs none $EFIVARFS
-fi
-
-# try to pick up an existing GUID
-[ -n "$guid" ] || guid=$(find "$EFIVARFS" -name "$name-*" | head -n1 | cut -f2- -d-)
-
-# use a randomly generated GUID
-[ -n "$guid" ] || guid="$(cat /proc/sys/kernel/random/uuid)"
-
-# efivarfs expects all of the data in one write
-tmp=$(mktemp)
-/bin/echo -ne "\007\000\000\000" | cat - $filename > $tmp
-dd if=$tmp of="$EFIVARFS/$name-$guid" bs=$(stat -c %s $tmp)
-rm $tmp
-
-== Loading ACPI SSDTs from configfs ==
-
-This option allows loading of user defined SSDTs from userspace via the configfs
-interface. The CONFIG_ACPI_CONFIGFS option must be select and configfs must be
-mounted. In the following examples, we assume that configfs has been mounted in
-/config.
-
-New tables can be loading by creating new directories in /config/acpi/table/ and
-writing the SSDT aml code in the aml attribute:
-
-cd /config/acpi/table
-mkdir my_ssdt
-cat ~/ssdt.aml > my_ssdt/aml
diff --git a/Documentation/admin-guide/README.rst b/Documentation/admin-guide/README.rst
index 47e577264198..a582c780c3bd 100644
--- a/Documentation/admin-guide/README.rst
+++ b/Documentation/admin-guide/README.rst
@@ -251,7 +251,7 @@ Configuring the kernel
Compiling the kernel
--------------------
- - Make sure you have at least gcc 3.2 available.
+ - Make sure you have at least gcc 4.6 available.
For more information, refer to :ref:`Documentation/process/changes.rst <changes>`.
Please note that you can still run a.out user programs with this kernel.
diff --git a/Documentation/acpi/cppc_sysfs.txt b/Documentation/admin-guide/acpi/cppc_sysfs.rst
index f20fb445135d..a4b99afbe331 100644
--- a/Documentation/acpi/cppc_sysfs.txt
+++ b/Documentation/admin-guide/acpi/cppc_sysfs.rst
@@ -1,5 +1,11 @@
+.. SPDX-License-Identifier: GPL-2.0
- Collaborative Processor Performance Control (CPPC)
+==================================================
+Collaborative Processor Performance Control (CPPC)
+==================================================
+
+CPPC
+====
CPPC defined in the ACPI spec describes a mechanism for the OS to manage the
performance of a logical processor on a contigious and abstract performance
@@ -10,31 +16,28 @@ For more details on CPPC please refer to the ACPI specification at:
http://uefi.org/specifications
-Some of the CPPC registers are exposed via sysfs under:
-
-/sys/devices/system/cpu/cpuX/acpi_cppc/
-
-for each cpu X
+Some of the CPPC registers are exposed via sysfs under::
---------------------------------------------------------------------------------
+ /sys/devices/system/cpu/cpuX/acpi_cppc/
-$ ls -lR /sys/devices/system/cpu/cpu0/acpi_cppc/
-/sys/devices/system/cpu/cpu0/acpi_cppc/:
-total 0
--r--r--r-- 1 root root 65536 Mar 5 19:38 feedback_ctrs
--r--r--r-- 1 root root 65536 Mar 5 19:38 highest_perf
--r--r--r-- 1 root root 65536 Mar 5 19:38 lowest_freq
--r--r--r-- 1 root root 65536 Mar 5 19:38 lowest_nonlinear_perf
--r--r--r-- 1 root root 65536 Mar 5 19:38 lowest_perf
--r--r--r-- 1 root root 65536 Mar 5 19:38 nominal_freq
--r--r--r-- 1 root root 65536 Mar 5 19:38 nominal_perf
--r--r--r-- 1 root root 65536 Mar 5 19:38 reference_perf
--r--r--r-- 1 root root 65536 Mar 5 19:38 wraparound_time
+for each cpu X::
---------------------------------------------------------------------------------
+ $ ls -lR /sys/devices/system/cpu/cpu0/acpi_cppc/
+ /sys/devices/system/cpu/cpu0/acpi_cppc/:
+ total 0
+ -r--r--r-- 1 root root 65536 Mar 5 19:38 feedback_ctrs
+ -r--r--r-- 1 root root 65536 Mar 5 19:38 highest_perf
+ -r--r--r-- 1 root root 65536 Mar 5 19:38 lowest_freq
+ -r--r--r-- 1 root root 65536 Mar 5 19:38 lowest_nonlinear_perf
+ -r--r--r-- 1 root root 65536 Mar 5 19:38 lowest_perf
+ -r--r--r-- 1 root root 65536 Mar 5 19:38 nominal_freq
+ -r--r--r-- 1 root root 65536 Mar 5 19:38 nominal_perf
+ -r--r--r-- 1 root root 65536 Mar 5 19:38 reference_perf
+ -r--r--r-- 1 root root 65536 Mar 5 19:38 wraparound_time
* highest_perf : Highest performance of this processor (abstract scale).
-* nominal_perf : Highest sustained performance of this processor (abstract scale).
+* nominal_perf : Highest sustained performance of this processor
+ (abstract scale).
* lowest_nonlinear_perf : Lowest performance of this processor with nonlinear
power savings (abstract scale).
* lowest_perf : Lowest performance of this processor (abstract scale).
@@ -48,22 +51,26 @@ total 0
* feedback_ctrs : Includes both Reference and delivered performance counter.
Reference counter ticks up proportional to processor's reference performance.
Delivered counter ticks up proportional to processor's delivered performance.
-* wraparound_time: Minimum time for the feedback counters to wraparound (seconds).
+* wraparound_time: Minimum time for the feedback counters to wraparound
+ (seconds).
* reference_perf : Performance level at which reference performance counter
accumulates (abstract scale).
---------------------------------------------------------------------------------
- Computing Average Delivered Performance
+Computing Average Delivered Performance
+=======================================
+
+Below describes the steps to compute the average performance delivered by
+taking two different snapshots of feedback counters at time T1 and T2.
+
+ T1: Read feedback_ctrs as fbc_t1
+ Wait or run some workload
-Below describes the steps to compute the average performance delivered by taking
-two different snapshots of feedback counters at time T1 and T2.
+ T2: Read feedback_ctrs as fbc_t2
-T1: Read feedback_ctrs as fbc_t1
- Wait or run some workload
-T2: Read feedback_ctrs as fbc_t2
+::
-delivered_counter_delta = fbc_t2[del] - fbc_t1[del]
-reference_counter_delta = fbc_t2[ref] - fbc_t1[ref]
+ delivered_counter_delta = fbc_t2[del] - fbc_t1[del]
+ reference_counter_delta = fbc_t2[ref] - fbc_t1[ref]
-delivered_perf = (refernce_perf x delivered_counter_delta) / reference_counter_delta
+ delivered_perf = (refernce_perf x delivered_counter_delta) / reference_counter_delta
diff --git a/Documentation/acpi/dsdt-override.txt b/Documentation/admin-guide/acpi/dsdt-override.rst
index 784841caa6e6..50bd7f194bf4 100644
--- a/Documentation/acpi/dsdt-override.txt
+++ b/Documentation/admin-guide/acpi/dsdt-override.rst
@@ -1,6 +1,12 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===============
+Overriding DSDT
+===============
+
Linux supports a method of overriding the BIOS DSDT:
-CONFIG_ACPI_CUSTOM_DSDT builds the image into the kernel.
+CONFIG_ACPI_CUSTOM_DSDT - builds the image into the kernel.
When to use this method is described in detail on the
Linux/ACPI home page:
diff --git a/Documentation/admin-guide/acpi/index.rst b/Documentation/admin-guide/acpi/index.rst
new file mode 100644
index 000000000000..4d13eeea1eca
--- /dev/null
+++ b/Documentation/admin-guide/acpi/index.rst
@@ -0,0 +1,14 @@
+============
+ACPI Support
+============
+
+Here we document in detail how to interact with various mechanisms in
+the Linux ACPI support.
+
+.. toctree::
+ :maxdepth: 1
+
+ initrd_table_override
+ dsdt-override
+ ssdt-overlays
+ cppc_sysfs
diff --git a/Documentation/admin-guide/acpi/initrd_table_override.rst b/Documentation/admin-guide/acpi/initrd_table_override.rst
new file mode 100644
index 000000000000..cbd768207631
--- /dev/null
+++ b/Documentation/admin-guide/acpi/initrd_table_override.rst
@@ -0,0 +1,115 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+================================
+Upgrading ACPI tables via initrd
+================================
+
+What is this about
+==================
+
+If the ACPI_TABLE_UPGRADE compile option is true, it is possible to
+upgrade the ACPI execution environment that is defined by the ACPI tables
+via upgrading the ACPI tables provided by the BIOS with an instrumented,
+modified, more recent version one, or installing brand new ACPI tables.
+
+When building initrd with kernel in a single image, option
+ACPI_TABLE_OVERRIDE_VIA_BUILTIN_INITRD should also be true for this
+feature to work.
+
+For a full list of ACPI tables that can be upgraded/installed, take a look
+at the char `*table_sigs[MAX_ACPI_SIGNATURE];` definition in
+drivers/acpi/tables.c.
+
+All ACPI tables iasl (Intel's ACPI compiler and disassembler) knows should
+be overridable, except:
+
+ - ACPI_SIG_RSDP (has a signature of 6 bytes)
+ - ACPI_SIG_FACS (does not have an ordinary ACPI table header)
+
+Both could get implemented as well.
+
+
+What is this for
+================
+
+Complain to your platform/BIOS vendor if you find a bug which is so severe
+that a workaround is not accepted in the Linux kernel. And this facility
+allows you to upgrade the buggy tables before your platform/BIOS vendor
+releases an upgraded BIOS binary.
+
+This facility can be used by platform/BIOS vendors to provide a Linux
+compatible environment without modifying the underlying platform firmware.
+
+This facility also provides a powerful feature to easily debug and test
+ACPI BIOS table compatibility with the Linux kernel by modifying old
+platform provided ACPI tables or inserting new ACPI tables.
+
+It can and should be enabled in any kernel because there is no functional
+change with not instrumented initrds.
+
+
+How does it work
+================
+::
+
+ # Extract the machine's ACPI tables:
+ cd /tmp
+ acpidump >acpidump
+ acpixtract -a acpidump
+ # Disassemble, modify and recompile them:
+ iasl -d *.dat
+ # For example add this statement into a _PRT (PCI Routing Table) function
+ # of the DSDT:
+ Store("HELLO WORLD", debug)
+ # And increase the OEM Revision. For example, before modification:
+ DefinitionBlock ("DSDT.aml", "DSDT", 2, "INTEL ", "TEMPLATE", 0x00000000)
+ # After modification:
+ DefinitionBlock ("DSDT.aml", "DSDT", 2, "INTEL ", "TEMPLATE", 0x00000001)
+ iasl -sa dsdt.dsl
+ # Add the raw ACPI tables to an uncompressed cpio archive.
+ # They must be put into a /kernel/firmware/acpi directory inside the cpio
+ # archive. Note that if the table put here matches a platform table
+ # (similar Table Signature, and similar OEMID, and similar OEM Table ID)
+ # with a more recent OEM Revision, the platform table will be upgraded by
+ # this table. If the table put here doesn't match a platform table
+ # (dissimilar Table Signature, or dissimilar OEMID, or dissimilar OEM Table
+ # ID), this table will be appended.
+ mkdir -p kernel/firmware/acpi
+ cp dsdt.aml kernel/firmware/acpi
+ # A maximum of "NR_ACPI_INITRD_TABLES (64)" tables are currently allowed
+ # (see osl.c):
+ iasl -sa facp.dsl
+ iasl -sa ssdt1.dsl
+ cp facp.aml kernel/firmware/acpi
+ cp ssdt1.aml kernel/firmware/acpi
+ # The uncompressed cpio archive must be the first. Other, typically
+ # compressed cpio archives, must be concatenated on top of the uncompressed
+ # one. Following command creates the uncompressed cpio archive and
+ # concatenates the original initrd on top:
+ find kernel | cpio -H newc --create > /boot/instrumented_initrd
+ cat /boot/initrd >>/boot/instrumented_initrd
+ # reboot with increased acpi debug level, e.g. boot params:
+ acpi.debug_level=0x2 acpi.debug_layer=0xFFFFFFFF
+ # and check your syslog:
+ [ 1.268089] ACPI: PCI Interrupt Routing Table [\_SB_.PCI0._PRT]
+ [ 1.272091] [ACPI Debug] String [0x0B] "HELLO WORLD"
+
+iasl is able to disassemble and recompile quite a lot different,
+also static ACPI tables.
+
+
+Where to retrieve userspace tools
+=================================
+
+iasl and acpixtract are part of Intel's ACPICA project:
+http://acpica.org/
+
+and should be packaged by distributions (for example in the acpica package
+on SUSE).
+
+acpidump can be found in Len Browns pmtools:
+ftp://kernel.org/pub/linux/kernel/people/lenb/acpi/utils/pmtools/acpidump
+
+This tool is also part of the acpica package on SUSE.
+Alternatively, used ACPI tables can be retrieved via sysfs in latest kernels:
+/sys/firmware/acpi/tables
diff --git a/Documentation/admin-guide/acpi/ssdt-overlays.rst b/Documentation/admin-guide/acpi/ssdt-overlays.rst
new file mode 100644
index 000000000000..da37455f96c9
--- /dev/null
+++ b/Documentation/admin-guide/acpi/ssdt-overlays.rst
@@ -0,0 +1,180 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=============
+SSDT Overlays
+=============
+
+In order to support ACPI open-ended hardware configurations (e.g. development
+boards) we need a way to augment the ACPI configuration provided by the firmware
+image. A common example is connecting sensors on I2C / SPI buses on development
+boards.
+
+Although this can be accomplished by creating a kernel platform driver or
+recompiling the firmware image with updated ACPI tables, neither is practical:
+the former proliferates board specific kernel code while the latter requires
+access to firmware tools which are often not publicly available.
+
+Because ACPI supports external references in AML code a more practical
+way to augment firmware ACPI configuration is by dynamically loading
+user defined SSDT tables that contain the board specific information.
+
+For example, to enumerate a Bosch BMA222E accelerometer on the I2C bus of the
+Minnowboard MAX development board exposed via the LSE connector [1], the
+following ASL code can be used::
+
+ DefinitionBlock ("minnowmax.aml", "SSDT", 1, "Vendor", "Accel", 0x00000003)
+ {
+ External (\_SB.I2C6, DeviceObj)
+
+ Scope (\_SB.I2C6)
+ {
+ Device (STAC)
+ {
+ Name (_ADR, Zero)
+ Name (_HID, "BMA222E")
+
+ Method (_CRS, 0, Serialized)
+ {
+ Name (RBUF, ResourceTemplate ()
+ {
+ I2cSerialBus (0x0018, ControllerInitiated, 0x00061A80,
+ AddressingMode7Bit, "\\_SB.I2C6", 0x00,
+ ResourceConsumer, ,)
+ GpioInt (Edge, ActiveHigh, Exclusive, PullDown, 0x0000,
+ "\\_SB.GPO2", 0x00, ResourceConsumer, , )
+ { // Pin list
+ 0
+ }
+ })
+ Return (RBUF)
+ }
+ }
+ }
+ }
+
+which can then be compiled to AML binary format::
+
+ $ iasl minnowmax.asl
+
+ Intel ACPI Component Architecture
+ ASL Optimizing Compiler version 20140214-64 [Mar 29 2014]
+ Copyright (c) 2000 - 2014 Intel Corporation
+
+ ASL Input: minnomax.asl - 30 lines, 614 bytes, 7 keywords
+ AML Output: minnowmax.aml - 165 bytes, 6 named objects, 1 executable opcodes
+
+[1] http://wiki.minnowboard.org/MinnowBoard_MAX#Low_Speed_Expansion_Connector_.28Top.29
+
+The resulting AML code can then be loaded by the kernel using one of the methods
+below.
+
+Loading ACPI SSDTs from initrd
+==============================
+
+This option allows loading of user defined SSDTs from initrd and it is useful
+when the system does not support EFI or when there is not enough EFI storage.
+
+It works in a similar way with initrd based ACPI tables override/upgrade: SSDT
+aml code must be placed in the first, uncompressed, initrd under the
+"kernel/firmware/acpi" path. Multiple files can be used and this will translate
+in loading multiple tables. Only SSDT and OEM tables are allowed. See
+initrd_table_override.txt for more details.
+
+Here is an example::
+
+ # Add the raw ACPI tables to an uncompressed cpio archive.
+ # They must be put into a /kernel/firmware/acpi directory inside the
+ # cpio archive.
+ # The uncompressed cpio archive must be the first.
+ # Other, typically compressed cpio archives, must be
+ # concatenated on top of the uncompressed one.
+ mkdir -p kernel/firmware/acpi
+ cp ssdt.aml kernel/firmware/acpi
+
+ # Create the uncompressed cpio archive and concatenate the original initrd
+ # on top:
+ find kernel | cpio -H newc --create > /boot/instrumented_initrd
+ cat /boot/initrd >>/boot/instrumented_initrd
+
+Loading ACPI SSDTs from EFI variables
+=====================================
+
+This is the preferred method, when EFI is supported on the platform, because it
+allows a persistent, OS independent way of storing the user defined SSDTs. There
+is also work underway to implement EFI support for loading user defined SSDTs
+and using this method will make it easier to convert to the EFI loading
+mechanism when that will arrive.
+
+In order to load SSDTs from an EFI variable the efivar_ssdt kernel command line
+parameter can be used. The argument for the option is the variable name to
+use. If there are multiple variables with the same name but with different
+vendor GUIDs, all of them will be loaded.
+
+In order to store the AML code in an EFI variable the efivarfs filesystem can be
+used. It is enabled and mounted by default in /sys/firmware/efi/efivars in all
+recent distribution.
+
+Creating a new file in /sys/firmware/efi/efivars will automatically create a new
+EFI variable. Updating a file in /sys/firmware/efi/efivars will update the EFI
+variable. Please note that the file name needs to be specially formatted as
+"Name-GUID" and that the first 4 bytes in the file (little-endian format)
+represent the attributes of the EFI variable (see EFI_VARIABLE_MASK in
+include/linux/efi.h). Writing to the file must also be done with one write
+operation.
+
+For example, you can use the following bash script to create/update an EFI
+variable with the content from a given file::
+
+ #!/bin/sh -e
+
+ while ! [ -z "$1" ]; do
+ case "$1" in
+ "-f") filename="$2"; shift;;
+ "-g") guid="$2"; shift;;
+ *) name="$1";;
+ esac
+ shift
+ done
+
+ usage()
+ {
+ echo "Syntax: ${0##*/} -f filename [ -g guid ] name"
+ exit 1
+ }
+
+ [ -n "$name" -a -f "$filename" ] || usage
+
+ EFIVARFS="/sys/firmware/efi/efivars"
+
+ [ -d "$EFIVARFS" ] || exit 2
+
+ if stat -tf $EFIVARFS | grep -q -v de5e81e4; then
+ mount -t efivarfs none $EFIVARFS
+ fi
+
+ # try to pick up an existing GUID
+ [ -n "$guid" ] || guid=$(find "$EFIVARFS" -name "$name-*" | head -n1 | cut -f2- -d-)
+
+ # use a randomly generated GUID
+ [ -n "$guid" ] || guid="$(cat /proc/sys/kernel/random/uuid)"
+
+ # efivarfs expects all of the data in one write
+ tmp=$(mktemp)
+ /bin/echo -ne "\007\000\000\000" | cat - $filename > $tmp
+ dd if=$tmp of="$EFIVARFS/$name-$guid" bs=$(stat -c %s $tmp)
+ rm $tmp
+
+Loading ACPI SSDTs from configfs
+================================
+
+This option allows loading of user defined SSDTs from userspace via the configfs
+interface. The CONFIG_ACPI_CONFIGFS option must be select and configfs must be
+mounted. In the following examples, we assume that configfs has been mounted in
+/config.
+
+New tables can be loading by creating new directories in /config/acpi/table/ and
+writing the SSDT aml code in the aml attribute::
+
+ cd /config/acpi/table
+ mkdir my_ssdt
+ cat ~/ssdt.aml > my_ssdt/aml
diff --git a/Documentation/admin-guide/index.rst b/Documentation/admin-guide/index.rst
index 0a491676685e..5b8286fdd91b 100644
--- a/Documentation/admin-guide/index.rst
+++ b/Documentation/admin-guide/index.rst
@@ -77,6 +77,7 @@ configure specific aspects of kernel behavior to your liking.
LSM/index
mm/index
perf-security
+ acpi/index
.. only:: subproject and html
diff --git a/Documentation/admin-guide/kernel-parameters.rst b/Documentation/admin-guide/kernel-parameters.rst
index b8d0bc07ed0a..0124980dca2d 100644
--- a/Documentation/admin-guide/kernel-parameters.rst
+++ b/Documentation/admin-guide/kernel-parameters.rst
@@ -88,6 +88,7 @@ parameter is applicable::
APIC APIC support is enabled.
APM Advanced Power Management support is enabled.
ARM ARM architecture is enabled.
+ ARM64 ARM64 architecture is enabled.
AX25 Appropriate AX.25 support is enabled.
CLK Common clock infrastructure is enabled.
CMA Contiguous Memory Area support is enabled.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 42379633801f..fd03e2b629bb 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -704,8 +704,11 @@
upon panic. This parameter reserves the physical
memory region [offset, offset + size] for that kernel
image. If '@offset' is omitted, then a suitable offset
- is selected automatically. Check
- Documentation/kdump/kdump.txt for further details.
+ is selected automatically.
+ [KNL, x86_64] select a region under 4G first, and
+ fall back to reserve region above 4G when '@offset'
+ hasn't been specified.
+ See Documentation/kdump/kdump.txt for further details.
crashkernel=range1:size1[,range2:size2,...][@offset]
[KNL] Same as above, but depends on the memory
@@ -1197,9 +1200,10 @@
arch/x86/kernel/cpu/cpufreq/elanfreq.c.
elevator= [IOSCHED]
- Format: {"cfq" | "deadline" | "noop"}
- See Documentation/block/cfq-iosched.txt and
- Documentation/block/deadline-iosched.txt for details.
+ Format: { "mq-deadline" | "kyber" | "bfq" }
+ See Documentation/block/deadline-iosched.txt,
+ Documentation/block/kyber-iosched.txt and
+ Documentation/block/bfq-iosched.txt for details.
elfcorehdr=[size[KMG]@]offset[KMG] [IA64,PPC,SH,X86,S390]
Specifies physical address of start of kernel core
@@ -1845,6 +1849,11 @@
to let secondary kernels in charge of setting up
LPIs.
+ irqchip.gicv3_pseudo_nmi= [ARM64]
+ Enables support for pseudo-NMIs in the kernel. This
+ requires the kernel to be built with
+ CONFIG_ARM64_PSEUDO_NMI.
+
irqfixup [HW]
When an interrupt is not handled search all handlers
for it. Intended to get systems with badly broken
@@ -1996,6 +2005,12 @@
Built with CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y,
the default is off.
+ kpti= [ARM64] Control page table isolation of user
+ and kernel address spaces.
+ Default: enabled on cores which need mitigation.
+ 0: force disabled
+ 1: force enabled
+
kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs.
Default is 0 (don't ignore, but inject #GP)
@@ -2532,6 +2547,40 @@
in the "bleeding edge" mini2440 support kernel at
http://repo.or.cz/w/linux-2.6/mini2440.git
+ mitigations=
+ [X86,PPC,S390,ARM64] Control optional mitigations for
+ CPU vulnerabilities. This is a set of curated,
+ arch-independent options, each of which is an
+ aggregation of existing arch-specific options.
+
+ off
+ Disable all optional CPU mitigations. This
+ improves system performance, but it may also
+ expose users to several CPU vulnerabilities.
+ Equivalent to: nopti [X86,PPC]
+ kpti=0 [ARM64]
+ nospectre_v1 [PPC]
+ nobp=0 [S390]
+ nospectre_v2 [X86,PPC,S390,ARM64]
+ spectre_v2_user=off [X86]
+ spec_store_bypass_disable=off [X86,PPC]
+ ssbd=force-off [ARM64]
+ l1tf=off [X86]
+
+ auto (default)
+ Mitigate all CPU vulnerabilities, but leave SMT
+ enabled, even if it's vulnerable. This is for
+ users who don't want to be surprised by SMT
+ getting disabled across kernel upgrades, or who
+ have other ways of avoiding SMT-based attacks.
+ Equivalent to: (default behavior)
+
+ auto,nosmt
+ Mitigate all CPU vulnerabilities, disabling SMT
+ if needed. This is for users who always want to
+ be fully mitigated, even if it means losing SMT.
+ Equivalent to: l1tf=flush,nosmt [X86]
+
mminit_loglevel=
[KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
parameter allows control of the logging verbosity for
@@ -2861,10 +2910,10 @@
check bypass). With this option data leaks are possible
in the system.
- nospectre_v2 [X86,PPC_FSL_BOOK3E] Disable all mitigations for the Spectre variant 2
- (indirect branch prediction) vulnerability. System may
- allow data leaks with this option, which is equivalent
- to spectre_v2=off.
+ nospectre_v2 [X86,PPC_FSL_BOOK3E,ARM64] Disable all mitigations for
+ the Spectre variant 2 (indirect branch prediction)
+ vulnerability. System may allow data leaks with this
+ option.
nospec_store_bypass_disable
[HW] Disable all mitigations for the Speculative Store Bypass vulnerability
@@ -3382,6 +3431,8 @@
bridges without forcing it upstream. Note:
this removes isolation between devices and
may put more devices in an IOMMU group.
+ force_floating [S390] Force usage of floating interrupts.
+ nomio [S390] Do not use MIO instructions.
pcie_aspm= [PCIE] Forcibly enable or disable PCIe Active State Power
Management.
@@ -3611,7 +3662,9 @@
see CONFIG_RAS_CEC help text.
rcu_nocbs= [KNL]
- The argument is a cpu list, as described above.
+ The argument is a cpu list, as described above,
+ except that the string "all" can be used to
+ specify every CPU on the system.
In kernels built with CONFIG_RCU_NOCB_CPU=y, set
the specified list of CPUs to be no-callback CPUs.
@@ -4691,6 +4744,10 @@
[x86] unstable: mark the TSC clocksource as unstable, this
marks the TSC unconditionally unstable at bootup and
avoids any further wobbles once the TSC watchdog notices.
+ [x86] nowatchdog: disable clocksource watchdog. Used
+ in situations with strict latency requirements (where
+ interruptions from clocksource watchdog are not
+ acceptable).
turbografx.map[2|3]= [HW,JOY]
TurboGraFX parallel port interface
@@ -5066,6 +5123,14 @@
or other driver-specific files in the
Documentation/watchdog/ directory.
+ watchdog_thresh=
+ [KNL]
+ Set the hard lockup detector stall duration
+ threshold in seconds. The soft lockup detector
+ threshold is set to twice the value. A value of 0
+ disables both lockup detectors. Default is 10
+ seconds.
+
workqueue.watchdog_thresh=
If CONFIG_WQ_WATCHDOG is configured, workqueue can
warn stall conditions and dump internal state to
diff --git a/Documentation/admin-guide/md.rst b/Documentation/admin-guide/md.rst
index 84de718f24a4..3c51084ffd37 100644
--- a/Documentation/admin-guide/md.rst
+++ b/Documentation/admin-guide/md.rst
@@ -756,3 +756,6 @@ These currently include:
The cache mode for raid5. raid5 could include an extra disk for
caching. The mode can be "write-throuth" and "write-back". The
default is "write-through".
+
+ ppl_write_hint
+ NVMe stream ID to be set for each PPL write request.
diff --git a/Documentation/admin-guide/perf-security.rst b/Documentation/admin-guide/perf-security.rst
index f73ebfe9bfe2..72effa7c23b9 100644
--- a/Documentation/admin-guide/perf-security.rst
+++ b/Documentation/admin-guide/perf-security.rst
@@ -6,83 +6,211 @@ Perf Events and tool security
Overview
--------
-Usage of Performance Counters for Linux (perf_events) [1]_ , [2]_ , [3]_ can
-impose a considerable risk of leaking sensitive data accessed by monitored
-processes. The data leakage is possible both in scenarios of direct usage of
-perf_events system call API [2]_ and over data files generated by Perf tool user
-mode utility (Perf) [3]_ , [4]_ . The risk depends on the nature of data that
-perf_events performance monitoring units (PMU) [2]_ collect and expose for
-performance analysis. Having that said perf_events/Perf performance monitoring
-is the subject for security access control management [5]_ .
+Usage of Performance Counters for Linux (perf_events) [1]_ , [2]_ , [3]_
+can impose a considerable risk of leaking sensitive data accessed by
+monitored processes. The data leakage is possible both in scenarios of
+direct usage of perf_events system call API [2]_ and over data files
+generated by Perf tool user mode utility (Perf) [3]_ , [4]_ . The risk
+depends on the nature of data that perf_events performance monitoring
+units (PMU) [2]_ and Perf collect and expose for performance analysis.
+Collected system and performance data may be split into several
+categories:
+
+1. System hardware and software configuration data, for example: a CPU
+ model and its cache configuration, an amount of available memory and
+ its topology, used kernel and Perf versions, performance monitoring
+ setup including experiment time, events configuration, Perf command
+ line parameters, etc.
+
+2. User and kernel module paths and their load addresses with sizes,
+ process and thread names with their PIDs and TIDs, timestamps for
+ captured hardware and software events.
+
+3. Content of kernel software counters (e.g., for context switches, page
+ faults, CPU migrations), architectural hardware performance counters
+ (PMC) [8]_ and machine specific registers (MSR) [9]_ that provide
+ execution metrics for various monitored parts of the system (e.g.,
+ memory controller (IMC), interconnect (QPI/UPI) or peripheral (PCIe)
+ uncore counters) without direct attribution to any execution context
+ state.
+
+4. Content of architectural execution context registers (e.g., RIP, RSP,
+ RBP on x86_64), process user and kernel space memory addresses and
+ data, content of various architectural MSRs that capture data from
+ this category.
+
+Data that belong to the fourth category can potentially contain
+sensitive process data. If PMUs in some monitoring modes capture values
+of execution context registers or data from process memory then access
+to such monitoring capabilities requires to be ordered and secured
+properly. So, perf_events/Perf performance monitoring is the subject for
+security access control management [5]_ .
perf_events/Perf access control
-------------------------------
-To perform security checks, the Linux implementation splits processes into two
-categories [6]_ : a) privileged processes (whose effective user ID is 0, referred
-to as superuser or root), and b) unprivileged processes (whose effective UID is
-nonzero). Privileged processes bypass all kernel security permission checks so
-perf_events performance monitoring is fully available to privileged processes
-without access, scope and resource restrictions.
-
-Unprivileged processes are subject to a full security permission check based on
-the process's credentials [5]_ (usually: effective UID, effective GID, and
-supplementary group list).
-
-Linux divides the privileges traditionally associated with superuser into
-distinct units, known as capabilities [6]_ , which can be independently enabled
-and disabled on per-thread basis for processes and files of unprivileged users.
-
-Unprivileged processes with enabled CAP_SYS_ADMIN capability are treated as
-privileged processes with respect to perf_events performance monitoring and
-bypass *scope* permissions checks in the kernel.
-
-Unprivileged processes using perf_events system call API is also subject for
-PTRACE_MODE_READ_REALCREDS ptrace access mode check [7]_ , whose outcome
-determines whether monitoring is permitted. So unprivileged processes provided
-with CAP_SYS_PTRACE capability are effectively permitted to pass the check.
-
-Other capabilities being granted to unprivileged processes can effectively
-enable capturing of additional data required for later performance analysis of
-monitored processes or a system. For example, CAP_SYSLOG capability permits
-reading kernel space memory addresses from /proc/kallsyms file.
+To perform security checks, the Linux implementation splits processes
+into two categories [6]_ : a) privileged processes (whose effective user
+ID is 0, referred to as superuser or root), and b) unprivileged
+processes (whose effective UID is nonzero). Privileged processes bypass
+all kernel security permission checks so perf_events performance
+monitoring is fully available to privileged processes without access,
+scope and resource restrictions.
+
+Unprivileged processes are subject to a full security permission check
+based on the process's credentials [5]_ (usually: effective UID,
+effective GID, and supplementary group list).
+
+Linux divides the privileges traditionally associated with superuser
+into distinct units, known as capabilities [6]_ , which can be
+independently enabled and disabled on per-thread basis for processes and
+files of unprivileged users.
+
+Unprivileged processes with enabled CAP_SYS_ADMIN capability are treated
+as privileged processes with respect to perf_events performance
+monitoring and bypass *scope* permissions checks in the kernel.
+
+Unprivileged processes using perf_events system call API is also subject
+for PTRACE_MODE_READ_REALCREDS ptrace access mode check [7]_ , whose
+outcome determines whether monitoring is permitted. So unprivileged
+processes provided with CAP_SYS_PTRACE capability are effectively
+permitted to pass the check.
+
+Other capabilities being granted to unprivileged processes can
+effectively enable capturing of additional data required for later
+performance analysis of monitored processes or a system. For example,
+CAP_SYSLOG capability permits reading kernel space memory addresses from
+/proc/kallsyms file.
+
+perf_events/Perf privileged users
+---------------------------------
+
+Mechanisms of capabilities, privileged capability-dumb files [6]_ and
+file system ACLs [10]_ can be used to create a dedicated group of
+perf_events/Perf privileged users who are permitted to execute
+performance monitoring without scope limits. The following steps can be
+taken to create such a group of privileged Perf users.
+
+1. Create perf_users group of privileged Perf users, assign perf_users
+ group to Perf tool executable and limit access to the executable for
+ other users in the system who are not in the perf_users group:
+
+::
+
+ # groupadd perf_users
+ # ls -alhF
+ -rwxr-xr-x 2 root root 11M Oct 19 15:12 perf
+ # chgrp perf_users perf
+ # ls -alhF
+ -rwxr-xr-x 2 root perf_users 11M Oct 19 15:12 perf
+ # chmod o-rwx perf
+ # ls -alhF
+ -rwxr-x--- 2 root perf_users 11M Oct 19 15:12 perf
+
+2. Assign the required capabilities to the Perf tool executable file and
+ enable members of perf_users group with performance monitoring
+ privileges [6]_ :
+
+::
+
+ # setcap "cap_sys_admin,cap_sys_ptrace,cap_syslog=ep" perf
+ # setcap -v "cap_sys_admin,cap_sys_ptrace,cap_syslog=ep" perf
+ perf: OK
+ # getcap perf
+ perf = cap_sys_ptrace,cap_sys_admin,cap_syslog+ep
+
+As a result, members of perf_users group are capable of conducting
+performance monitoring by using functionality of the configured Perf
+tool executable that, when executes, passes perf_events subsystem scope
+checks.
+
+This specific access control management is only available to superuser
+or root running processes with CAP_SETPCAP, CAP_SETFCAP [6]_
+capabilities.
perf_events/Perf unprivileged users
-----------------------------------
-perf_events/Perf *scope* and *access* control for unprivileged processes is
-governed by perf_event_paranoid [2]_ setting:
+perf_events/Perf *scope* and *access* control for unprivileged processes
+is governed by perf_event_paranoid [2]_ setting:
-1:
- Impose no *scope* and *access* restrictions on using perf_events performance
- monitoring. Per-user per-cpu perf_event_mlock_kb [2]_ locking limit is
- ignored when allocating memory buffers for storing performance data.
- This is the least secure mode since allowed monitored *scope* is
- maximized and no perf_events specific limits are imposed on *resources*
- allocated for performance monitoring.
+ Impose no *scope* and *access* restrictions on using perf_events
+ performance monitoring. Per-user per-cpu perf_event_mlock_kb [2]_
+ locking limit is ignored when allocating memory buffers for storing
+ performance data. This is the least secure mode since allowed
+ monitored *scope* is maximized and no perf_events specific limits
+ are imposed on *resources* allocated for performance monitoring.
>=0:
*scope* includes per-process and system wide performance monitoring
- but excludes raw tracepoints and ftrace function tracepoints monitoring.
- CPU and system events happened when executing either in user or
- in kernel space can be monitored and captured for later analysis.
- Per-user per-cpu perf_event_mlock_kb locking limit is imposed but
- ignored for unprivileged processes with CAP_IPC_LOCK [6]_ capability.
+ but excludes raw tracepoints and ftrace function tracepoints
+ monitoring. CPU and system events happened when executing either in
+ user or in kernel space can be monitored and captured for later
+ analysis. Per-user per-cpu perf_event_mlock_kb locking limit is
+ imposed but ignored for unprivileged processes with CAP_IPC_LOCK
+ [6]_ capability.
>=1:
- *scope* includes per-process performance monitoring only and excludes
- system wide performance monitoring. CPU and system events happened when
- executing either in user or in kernel space can be monitored and
- captured for later analysis. Per-user per-cpu perf_event_mlock_kb
- locking limit is imposed but ignored for unprivileged processes with
- CAP_IPC_LOCK capability.
+ *scope* includes per-process performance monitoring only and
+ excludes system wide performance monitoring. CPU and system events
+ happened when executing either in user or in kernel space can be
+ monitored and captured for later analysis. Per-user per-cpu
+ perf_event_mlock_kb locking limit is imposed but ignored for
+ unprivileged processes with CAP_IPC_LOCK capability.
>=2:
- *scope* includes per-process performance monitoring only. CPU and system
- events happened when executing in user space only can be monitored and
- captured for later analysis. Per-user per-cpu perf_event_mlock_kb
- locking limit is imposed but ignored for unprivileged processes with
- CAP_IPC_LOCK capability.
+ *scope* includes per-process performance monitoring only. CPU and
+ system events happened when executing in user space only can be
+ monitored and captured for later analysis. Per-user per-cpu
+ perf_event_mlock_kb locking limit is imposed but ignored for
+ unprivileged processes with CAP_IPC_LOCK capability.
+
+perf_events/Perf resource control
+---------------------------------
+
+Open file descriptors
++++++++++++++++++++++
+
+The perf_events system call API [2]_ allocates file descriptors for
+every configured PMU event. Open file descriptors are a per-process
+accountable resource governed by the RLIMIT_NOFILE [11]_ limit
+(ulimit -n), which is usually derived from the login shell process. When
+configuring Perf collection for a long list of events on a large server
+system, this limit can be easily hit preventing required monitoring
+configuration. RLIMIT_NOFILE limit can be increased on per-user basis
+modifying content of the limits.conf file [12]_ . Ordinarily, a Perf
+sampling session (perf record) requires an amount of open perf_event
+file descriptors that is not less than the number of monitored events
+multiplied by the number of monitored CPUs.
+
+Memory allocation
++++++++++++++++++
+
+The amount of memory available to user processes for capturing
+performance monitoring data is governed by the perf_event_mlock_kb [2]_
+setting. This perf_event specific resource setting defines overall
+per-cpu limits of memory allowed for mapping by the user processes to
+execute performance monitoring. The setting essentially extends the
+RLIMIT_MEMLOCK [11]_ limit, but only for memory regions mapped
+specifically for capturing monitored performance events and related data.
+
+For example, if a machine has eight cores and perf_event_mlock_kb limit
+is set to 516 KiB, then a user process is provided with 516 KiB * 8 =
+4128 KiB of memory above the RLIMIT_MEMLOCK limit (ulimit -l) for
+perf_event mmap buffers. In particular, this means that, if the user
+wants to start two or more performance monitoring processes, the user is
+required to manually distribute the available 4128 KiB between the
+monitoring processes, for example, using the --mmap-pages Perf record
+mode option. Otherwise, the first started performance monitoring process
+allocates all available 4128 KiB and the other processes will fail to
+proceed due to the lack of memory.
+
+RLIMIT_MEMLOCK and perf_event_mlock_kb resource constraints are ignored
+for processes with the CAP_IPC_LOCK capability. Thus, perf_events/Perf
+privileged users can be provided with memory above the constraints for
+perf_events/Perf performance monitoring purpose by providing the Perf
+executable with CAP_IPC_LOCK capability.
Bibliography
------------
@@ -94,4 +222,9 @@ Bibliography
.. [5] `<https://www.kernel.org/doc/html/latest/security/credentials.html>`_
.. [6] `<http://man7.org/linux/man-pages/man7/capabilities.7.html>`_
.. [7] `<http://man7.org/linux/man-pages/man2/ptrace.2.html>`_
+.. [8] `<https://en.wikipedia.org/wiki/Hardware_performance_counter>`_
+.. [9] `<https://en.wikipedia.org/wiki/Model-specific_register>`_
+.. [10] `<http://man7.org/linux/man-pages/man5/acl.5.html>`_
+.. [11] `<http://man7.org/linux/man-pages/man2/getrlimit.2.html>`_
+.. [12] `<http://man7.org/linux/man-pages/man5/limits.conf.5.html>`_
diff --git a/Documentation/admin-guide/pm/cpufreq.rst b/Documentation/admin-guide/pm/cpufreq.rst
index 7eca9026a9ed..0c74a7784964 100644
--- a/Documentation/admin-guide/pm/cpufreq.rst
+++ b/Documentation/admin-guide/pm/cpufreq.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
.. |struct cpufreq_policy| replace:: :c:type:`struct cpufreq_policy <cpufreq_policy>`
.. |intel_pstate| replace:: :doc:`intel_pstate <intel_pstate>`
@@ -5,9 +8,10 @@
CPU Performance Scaling
=======================
-::
+:Copyright: |copy| 2017 Intel Corporation
+
+:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
- Copyright (c) 2017 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
The Concept of CPU Performance Scaling
======================================
@@ -396,8 +400,8 @@ RT or deadline scheduling classes, the governor will increase the frequency to
the allowed maximum (that is, the ``scaling_max_freq`` policy limit). In turn,
if it is invoked by the CFS scheduling class, the governor will use the
Per-Entity Load Tracking (PELT) metric for the root control group of the
-given CPU as the CPU utilization estimate (see the `Per-entity load tracking`_
-LWN.net article for a description of the PELT mechanism). Then, the new
+given CPU as the CPU utilization estimate (see the *Per-entity load tracking*
+LWN.net article [1]_ for a description of the PELT mechanism). Then, the new
CPU frequency to apply is computed in accordance with the formula
f = 1.25 * ``f_0`` * ``util`` / ``max``
@@ -698,4 +702,8 @@ hardware feature (e.g. all Intel ones), even if the
:c:macro:`CONFIG_X86_ACPI_CPUFREQ_CPB` configuration option is set.
-.. _Per-entity load tracking: https://lwn.net/Articles/531853/
+References
+==========
+
+.. [1] Jonathan Corbet, *Per-entity load tracking*,
+ https://lwn.net/Articles/531853/
diff --git a/Documentation/admin-guide/pm/cpuidle.rst b/Documentation/admin-guide/pm/cpuidle.rst
index 9c58b35a81cb..e70b365dbc60 100644
--- a/Documentation/admin-guide/pm/cpuidle.rst
+++ b/Documentation/admin-guide/pm/cpuidle.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
.. |struct cpuidle_state| replace:: :c:type:`struct cpuidle_state <cpuidle_state>`
.. |cpufreq| replace:: :doc:`CPU Performance Scaling <cpufreq>`
@@ -5,9 +8,10 @@
CPU Idle Time Management
========================
-::
+:Copyright: |copy| 2018 Intel Corporation
+
+:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
- Copyright (c) 2018 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Concepts
========
diff --git a/Documentation/admin-guide/pm/index.rst b/Documentation/admin-guide/pm/index.rst
index 49237ac73442..39f8f9f81e7a 100644
--- a/Documentation/admin-guide/pm/index.rst
+++ b/Documentation/admin-guide/pm/index.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
================
Power Management
================
diff --git a/Documentation/admin-guide/pm/intel_epb.rst b/Documentation/admin-guide/pm/intel_epb.rst
new file mode 100644
index 000000000000..005121167af7
--- /dev/null
+++ b/Documentation/admin-guide/pm/intel_epb.rst
@@ -0,0 +1,41 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+======================================
+Intel Performance and Energy Bias Hint
+======================================
+
+:Copyright: |copy| 2019 Intel Corporation
+
+:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+
+.. kernel-doc:: arch/x86/kernel/cpu/intel_epb.c
+ :doc: overview
+
+Intel Performance and Energy Bias Attribute in ``sysfs``
+========================================================
+
+The Intel Performance and Energy Bias Hint (EPB) value for a given (logical) CPU
+can be checked or updated through a ``sysfs`` attribute (file) under
+:file:`/sys/devices/system/cpu/cpu<N>/power/`, where the CPU number ``<N>``
+is allocated at the system initialization time:
+
+``energy_perf_bias``
+ Shows the current EPB value for the CPU in a sliding scale 0 - 15, where
+ a value of 0 corresponds to a hint preference for highest performance
+ and a value of 15 corresponds to the maximum energy savings.
+
+ In order to update the EPB value for the CPU, this attribute can be
+ written to, either with a number in the 0 - 15 sliding scale above, or
+ with one of the strings: "performance", "balance-performance", "normal",
+ "balance-power", "power" that represent values reflected by their
+ meaning.
+
+ This attribute is present for all online CPUs supporting the EPB
+ feature.
+
+Note that while the EPB interface to the processor is defined at the logical CPU
+level, the physical register backing it may be shared by multiple CPUs (for
+example, SMT siblings or cores in one package). For this reason, updating the
+EPB value for one CPU may cause the EPB values for other CPUs to change.
diff --git a/Documentation/admin-guide/pm/intel_pstate.rst b/Documentation/admin-guide/pm/intel_pstate.rst
index ec0f7c111f65..67e414e34f37 100644
--- a/Documentation/admin-guide/pm/intel_pstate.rst
+++ b/Documentation/admin-guide/pm/intel_pstate.rst
@@ -1,10 +1,13 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
===============================================
``intel_pstate`` CPU Performance Scaling Driver
===============================================
-::
+:Copyright: |copy| 2017 Intel Corporation
- Copyright (c) 2017 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
General Information
@@ -20,11 +23,10 @@ you have not done that yet.]
For the processors supported by ``intel_pstate``, the P-state concept is broader
than just an operating frequency or an operating performance point (see the
-`LinuxCon Europe 2015 presentation by Kristen Accardi <LCEU2015_>`_ for more
+LinuxCon Europe 2015 presentation by Kristen Accardi [1]_ for more
information about that). For this reason, the representation of P-states used
by ``intel_pstate`` internally follows the hardware specification (for details
-refer to `Intel® 64 and IA-32 Architectures Software Developer’s Manual
-Volume 3: System Programming Guide <SDM_>`_). However, the ``CPUFreq`` core
+refer to Intel Software Developer’s Manual [2]_). However, the ``CPUFreq`` core
uses frequencies for identifying operating performance points of CPUs and
frequencies are involved in the user space interface exposed by it, so
``intel_pstate`` maps its internal representation of P-states to frequencies too
@@ -561,9 +563,9 @@ or to pin every task potentially sensitive to them to a specific CPU.]
On the majority of systems supported by ``intel_pstate``, the ACPI tables
provided by the platform firmware contain ``_PSS`` objects returning information
-that can be used for CPU performance scaling (refer to the `ACPI specification`_
-for details on the ``_PSS`` objects and the format of the information returned
-by them).
+that can be used for CPU performance scaling (refer to the ACPI specification
+[3]_ for details on the ``_PSS`` objects and the format of the information
+returned by them).
The information returned by the ACPI ``_PSS`` objects is used by the
``acpi-cpufreq`` scaling driver. On systems supported by ``intel_pstate``
@@ -728,6 +730,14 @@ P-state is called, the ``ftrace`` filter can be set to to
<idle>-0 [000] ..s. 2537.654843: intel_pstate_set_pstate <-intel_pstate_timer_func
-.. _LCEU2015: http://events.linuxfoundation.org/sites/events/files/slides/LinuxConEurope_2015.pdf
-.. _SDM: http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-software-developer-system-programming-manual-325384.html
-.. _ACPI specification: http://www.uefi.org/sites/default/files/resources/ACPI_6_1.pdf
+References
+==========
+
+.. [1] Kristen Accardi, *Balancing Power and Performance in the Linux Kernel*,
+ http://events.linuxfoundation.org/sites/events/files/slides/LinuxConEurope_2015.pdf
+
+.. [2] *Intel® 64 and IA-32 Architectures Software Developer’s Manual Volume 3: System Programming Guide*,
+ http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-software-developer-system-programming-manual-325384.html
+
+.. [3] *Advanced Configuration and Power Interface Specification*,
+ https://uefi.org/sites/default/files/resources/ACPI_6_3_final_Jan30.pdf
diff --git a/Documentation/admin-guide/pm/sleep-states.rst b/Documentation/admin-guide/pm/sleep-states.rst
index dbf5acd49f35..cd3a28cb81f4 100644
--- a/Documentation/admin-guide/pm/sleep-states.rst
+++ b/Documentation/admin-guide/pm/sleep-states.rst
@@ -1,10 +1,14 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
===================
System Sleep States
===================
-::
+:Copyright: |copy| 2017 Intel Corporation
+
+:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
- Copyright (c) 2017 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Sleep states are global low-power states of the entire system in which user
space code cannot be executed and the overall system activity is significantly
diff --git a/Documentation/admin-guide/pm/strategies.rst b/Documentation/admin-guide/pm/strategies.rst
index afe4d3f831fe..dd0362e32fa5 100644
--- a/Documentation/admin-guide/pm/strategies.rst
+++ b/Documentation/admin-guide/pm/strategies.rst
@@ -1,10 +1,14 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
===========================
Power Management Strategies
===========================
-::
+:Copyright: |copy| 2017 Intel Corporation
+
+:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
- Copyright (c) 2017 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
The Linux kernel supports two major high-level power management strategies.
diff --git a/Documentation/admin-guide/pm/system-wide.rst b/Documentation/admin-guide/pm/system-wide.rst
index 0c81e4c5de39..2b1f987b34f0 100644
--- a/Documentation/admin-guide/pm/system-wide.rst
+++ b/Documentation/admin-guide/pm/system-wide.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
============================
System-Wide Power Management
============================
diff --git a/Documentation/admin-guide/pm/working-state.rst b/Documentation/admin-guide/pm/working-state.rst
index b6cef9b5e961..fc298eb1234b 100644
--- a/Documentation/admin-guide/pm/working-state.rst
+++ b/Documentation/admin-guide/pm/working-state.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
==============================
Working-State Power Management
==============================
@@ -8,3 +10,4 @@ Working-State Power Management
cpuidle
cpufreq
intel_pstate
+ intel_epb
diff --git a/Documentation/admin-guide/tainted-kernels.rst b/Documentation/admin-guide/tainted-kernels.rst
index 28a869c509a0..71e9184a9079 100644
--- a/Documentation/admin-guide/tainted-kernels.rst
+++ b/Documentation/admin-guide/tainted-kernels.rst
@@ -1,59 +1,164 @@
Tainted kernels
---------------
-Some oops reports contain the string **'Tainted: '** after the program
-counter. This indicates that the kernel has been tainted by some
-mechanism. The string is followed by a series of position-sensitive
-characters, each representing a particular tainted value.
-
- 1) ``G`` if all modules loaded have a GPL or compatible license, ``P`` if
+The kernel will mark itself as 'tainted' when something occurs that might be
+relevant later when investigating problems. Don't worry too much about this,
+most of the time it's not a problem to run a tainted kernel; the information is
+mainly of interest once someone wants to investigate some problem, as its real
+cause might be the event that got the kernel tainted. That's why bug reports
+from tainted kernels will often be ignored by developers, hence try to reproduce
+problems with an untainted kernel.
+
+Note the kernel will remain tainted even after you undo what caused the taint
+(i.e. unload a proprietary kernel module), to indicate the kernel remains not
+trustworthy. That's also why the kernel will print the tainted state when it
+notices an internal problem (a 'kernel bug'), a recoverable error
+('kernel oops') or a non-recoverable error ('kernel panic') and writes debug
+information about this to the logs ``dmesg`` outputs. It's also possible to
+check the tainted state at runtime through a file in ``/proc/``.
+
+
+Tainted flag in bugs, oops or panics messages
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+You find the tainted state near the top in a line starting with 'CPU:'; if or
+why the kernel was tainted is shown after the Process ID ('PID:') and a shortened
+name of the command ('Comm:') that triggered the event::
+
+ BUG: unable to handle kernel NULL pointer dereference at 0000000000000000
+ Oops: 0002 [#1] SMP PTI
+ CPU: 0 PID: 4424 Comm: insmod Tainted: P W O 4.20.0-0.rc6.fc30 #1
+ Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
+ RIP: 0010:my_oops_init+0x13/0x1000 [kpanic]
+ [...]
+
+You'll find a 'Not tainted: ' there if the kernel was not tainted at the
+time of the event; if it was, then it will print 'Tainted: ' and characters
+either letters or blanks. In above example it looks like this::
+
+ Tainted: P W O
+
+The meaning of those characters is explained in the table below. In tis case
+the kernel got tainted earlier because a proprietary Module (``P``) was loaded,
+a warning occurred (``W``), and an externally-built module was loaded (``O``).
+To decode other letters use the table below.
+
+
+Decoding tainted state at runtime
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+At runtime, you can query the tainted state by reading
+``cat /proc/sys/kernel/tainted``. If that returns ``0``, the kernel is not
+tainted; any other number indicates the reasons why it is. The easiest way to
+decode that number is the script ``tools/debugging/kernel-chktaint``, which your
+distribution might ship as part of a package called ``linux-tools`` or
+``kernel-tools``; if it doesn't you can download the script from
+`git.kernel.org <https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/plain/tools/debugging/kernel-chktaint>`_
+and execute it with ``sh kernel-chktaint``, which would print something like
+this on the machine that had the statements in the logs that were quoted earlier::
+
+ Kernel is Tainted for following reasons:
+ * Proprietary module was loaded (#0)
+ * Kernel issued warning (#9)
+ * Externally-built ('out-of-tree') module was loaded (#12)
+ See Documentation/admin-guide/tainted-kernels.rst in the the Linux kernel or
+ https://www.kernel.org/doc/html/latest/admin-guide/tainted-kernels.html for
+ a more details explanation of the various taint flags.
+ Raw taint value as int/string: 4609/'P W O '
+
+You can try to decode the number yourself. That's easy if there was only one
+reason that got your kernel tainted, as in this case you can find the number
+with the table below. If there were multiple reasons you need to decode the
+number, as it is a bitfield, where each bit indicates the absence or presence of
+a particular type of taint. It's best to leave that to the aforementioned
+script, but if you need something quick you can use this shell command to check
+which bits are set::
+
+ $ for i in $(seq 18); do echo $(($i-1)) $(($(cat /proc/sys/kernel/tainted)>>($i-1)&1));done
+
+Table for decoding tainted state
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+=== === ====== ========================================================
+Bit Log Number Reason that got the kernel tainted
+=== === ====== ========================================================
+ 0 G/P 1 proprietary module was loaded
+ 1 _/F 2 module was force loaded
+ 2 _/S 4 SMP kernel oops on an officially SMP incapable processor
+ 3 _/R 8 module was force unloaded
+ 4 _/M 16 processor reported a Machine Check Exception (MCE)
+ 5 _/B 32 bad page referenced or some unexpected page flags
+ 6 _/U 64 taint requested by userspace application
+ 7 _/D 128 kernel died recently, i.e. there was an OOPS or BUG
+ 8 _/A 256 ACPI table overridden by user
+ 9 _/W 512 kernel issued warning
+ 10 _/C 1024 staging driver was loaded
+ 11 _/I 2048 workaround for bug in platform firmware applied
+ 12 _/O 4096 externally-built ("out-of-tree") module was loaded
+ 13 _/E 8192 unsigned module was loaded
+ 14 _/L 16384 soft lockup occurred
+ 15 _/K 32768 kernel has been live patched
+ 16 _/X 65536 auxiliary taint, defined for and used by distros
+ 17 _/T 131072 kernel was built with the struct randomization plugin
+=== === ====== ========================================================
+
+Note: The character ``_`` is representing a blank in this table to make reading
+easier.
+
+More detailed explanation for tainting
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ 0) ``G`` if all modules loaded have a GPL or compatible license, ``P`` if
any proprietary module has been loaded. Modules without a
MODULE_LICENSE or with a MODULE_LICENSE that is not recognised by
insmod as GPL compatible are assumed to be proprietary.
- 2) ``F`` if any module was force loaded by ``insmod -f``, ``' '`` if all
+ 1) ``F`` if any module was force loaded by ``insmod -f``, ``' '`` if all
modules were loaded normally.
- 3) ``S`` if the oops occurred on an SMP kernel running on hardware that
+ 2) ``S`` if the oops occurred on an SMP kernel running on hardware that
hasn't been certified as safe to run multiprocessor.
Currently this occurs only on various Athlons that are not
SMP capable.
- 4) ``R`` if a module was force unloaded by ``rmmod -f``, ``' '`` if all
+ 3) ``R`` if a module was force unloaded by ``rmmod -f``, ``' '`` if all
modules were unloaded normally.
- 5) ``M`` if any processor has reported a Machine Check Exception,
+ 4) ``M`` if any processor has reported a Machine Check Exception,
``' '`` if no Machine Check Exceptions have occurred.
- 6) ``B`` if a page-release function has found a bad page reference or
- some unexpected page flags.
+ 5) ``B`` If a page-release function has found a bad page reference or some
+ unexpected page flags. This indicates a hardware problem or a kernel bug;
+ there should be other information in the log indicating why this tainting
+ occured.
- 7) ``U`` if a user or user application specifically requested that the
+ 6) ``U`` if a user or user application specifically requested that the
Tainted flag be set, ``' '`` otherwise.
- 8) ``D`` if the kernel has died recently, i.e. there was an OOPS or BUG.
+ 7) ``D`` if the kernel has died recently, i.e. there was an OOPS or BUG.
- 9) ``A`` if the ACPI table has been overridden.
+ 8) ``A`` if an ACPI table has been overridden.
- 10) ``W`` if a warning has previously been issued by the kernel.
+ 9) ``W`` if a warning has previously been issued by the kernel.
(Though some warnings may set more specific taint flags.)
- 11) ``C`` if a staging driver has been loaded.
+ 10) ``C`` if a staging driver has been loaded.
- 12) ``I`` if the kernel is working around a severe bug in the platform
+ 11) ``I`` if the kernel is working around a severe bug in the platform
firmware (BIOS or similar).
- 13) ``O`` if an externally-built ("out-of-tree") module has been loaded.
+ 12) ``O`` if an externally-built ("out-of-tree") module has been loaded.
- 14) ``E`` if an unsigned module has been loaded in a kernel supporting
+ 13) ``E`` if an unsigned module has been loaded in a kernel supporting
module signature.
- 15) ``L`` if a soft lockup has previously occurred on the system.
+ 14) ``L`` if a soft lockup has previously occurred on the system.
+
+ 15) ``K`` if the kernel has been live patched.
- 16) ``K`` if the kernel has been live patched.
+ 16) ``X`` Auxiliary taint, defined for and used by Linux distributors.
-The primary reason for the **'Tainted: '** string is to tell kernel
-debuggers if this is a clean kernel or if anything unusual has
-occurred. Tainting is permanent: even if an offending module is
-unloaded, the tainted value remains to indicate that the kernel is not
-trustworthy.
+ 17) ``T`` Kernel was build with the randstruct plugin, which can intentionally
+ produce extremely unusual kernel structure layouts (even performance
+ pathological ones), which is important to know when debugging. Set at
+ build time.
diff --git a/Documentation/arm/kernel_mode_neon.txt b/Documentation/arm/kernel_mode_neon.txt
index 525452726d31..b9e060c5b61e 100644
--- a/Documentation/arm/kernel_mode_neon.txt
+++ b/Documentation/arm/kernel_mode_neon.txt
@@ -6,7 +6,7 @@ TL;DR summary
* Use only NEON instructions, or VFP instructions that don't rely on support
code
* Isolate your NEON code in a separate compilation unit, and compile it with
- '-mfpu=neon -mfloat-abi=softfp'
+ '-march=armv7-a -mfpu=neon -mfloat-abi=softfp'
* Put kernel_neon_begin() and kernel_neon_end() calls around the calls into your
NEON code
* Don't sleep in your NEON code, and be aware that it will be executed with
@@ -87,7 +87,7 @@ instructions appearing in unexpected places if no special care is taken.
Therefore, the recommended and only supported way of using NEON/VFP in the
kernel is by adhering to the following rules:
* isolate the NEON code in a separate compilation unit and compile it with
- '-mfpu=neon -mfloat-abi=softfp';
+ '-march=armv7-a -mfpu=neon -mfloat-abi=softfp';
* issue the calls to kernel_neon_begin(), kernel_neon_end() as well as the calls
into the unit containing the NEON code from a compilation unit which is *not*
built with the GCC flag '-mfpu=neon' set.
diff --git a/Documentation/arm64/booting.txt b/Documentation/arm64/booting.txt
index 8df9f4658d6f..fbab7e21d116 100644
--- a/Documentation/arm64/booting.txt
+++ b/Documentation/arm64/booting.txt
@@ -188,6 +188,11 @@ Before jumping into the kernel, the following conditions must be met:
the kernel image will be entered must be initialised by software at a
higher exception level to prevent execution in an UNKNOWN state.
+ - SCR_EL3.FIQ must have the same value across all CPUs the kernel is
+ executing on.
+ - The value of SCR_EL3.FIQ must be the same as the one present at boot
+ time whenever the kernel is executing.
+
For systems with a GICv3 interrupt controller to be used in v3 mode:
- If EL3 is present:
ICC_SRE_EL3.Enable (bit 3) must be initialiased to 0b1.
diff --git a/Documentation/arm64/cpu-feature-registers.txt b/Documentation/arm64/cpu-feature-registers.txt
index d4b4dd1fe786..684a0da39378 100644
--- a/Documentation/arm64/cpu-feature-registers.txt
+++ b/Documentation/arm64/cpu-feature-registers.txt
@@ -209,6 +209,22 @@ infrastructure:
| AT | [35-32] | y |
x--------------------------------------------------x
+ 6) ID_AA64ZFR0_EL1 - SVE feature ID register 0
+
+ x--------------------------------------------------x
+ | Name | bits | visible |
+ |--------------------------------------------------|
+ | SM4 | [43-40] | y |
+ |--------------------------------------------------|
+ | SHA3 | [35-32] | y |
+ |--------------------------------------------------|
+ | BitPerm | [19-16] | y |
+ |--------------------------------------------------|
+ | AES | [7-4] | y |
+ |--------------------------------------------------|
+ | SVEVer | [3-0] | y |
+ x--------------------------------------------------x
+
Appendix I: Example
---------------------------
diff --git a/Documentation/arm64/elf_hwcaps.txt b/Documentation/arm64/elf_hwcaps.txt
index 13d6691b37be..b73a2519ecf2 100644
--- a/Documentation/arm64/elf_hwcaps.txt
+++ b/Documentation/arm64/elf_hwcaps.txt
@@ -13,9 +13,9 @@ architected discovery mechanism available to userspace code at EL0. The
kernel exposes the presence of these features to userspace through a set
of flags called hwcaps, exposed in the auxilliary vector.
-Userspace software can test for features by acquiring the AT_HWCAP entry
-of the auxilliary vector, and testing whether the relevant flags are
-set, e.g.
+Userspace software can test for features by acquiring the AT_HWCAP or
+AT_HWCAP2 entry of the auxiliary vector, and testing whether the relevant
+flags are set, e.g.
bool floating_point_is_present(void)
{
@@ -135,6 +135,10 @@ HWCAP_DCPOP
Functionality implied by ID_AA64ISAR1_EL1.DPB == 0b0001.
+HWCAP2_DCPODP
+
+ Functionality implied by ID_AA64ISAR1_EL1.DPB == 0b0010.
+
HWCAP_SHA3
Functionality implied by ID_AA64ISAR0_EL1.SHA3 == 0b0001.
@@ -159,6 +163,30 @@ HWCAP_SVE
Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001.
+HWCAP2_SVE2
+
+ Functionality implied by ID_AA64ZFR0_EL1.SVEVer == 0b0001.
+
+HWCAP2_SVEAES
+
+ Functionality implied by ID_AA64ZFR0_EL1.AES == 0b0001.
+
+HWCAP2_SVEPMULL
+
+ Functionality implied by ID_AA64ZFR0_EL1.AES == 0b0010.
+
+HWCAP2_SVEBITPERM
+
+ Functionality implied by ID_AA64ZFR0_EL1.BitPerm == 0b0001.
+
+HWCAP2_SVESHA3
+
+ Functionality implied by ID_AA64ZFR0_EL1.SHA3 == 0b0001.
+
+HWCAP2_SVESM4
+
+ Functionality implied by ID_AA64ZFR0_EL1.SM4 == 0b0001.
+
HWCAP_ASIMDFHM
Functionality implied by ID_AA64ISAR0_EL1.FHM == 0b0001.
@@ -194,3 +222,10 @@ HWCAP_PACG
Functionality implied by ID_AA64ISAR1_EL1.GPA == 0b0001 or
ID_AA64ISAR1_EL1.GPI == 0b0001, as described by
Documentation/arm64/pointer-authentication.txt.
+
+
+4. Unused AT_HWCAP bits
+-----------------------
+
+For interoperation with userspace, the kernel guarantees that bits 62
+and 63 of AT_HWCAP will always be returned as 0.
diff --git a/Documentation/arm64/pointer-authentication.txt b/Documentation/arm64/pointer-authentication.txt
index a25cd21290e9..5baca42ba146 100644
--- a/Documentation/arm64/pointer-authentication.txt
+++ b/Documentation/arm64/pointer-authentication.txt
@@ -78,6 +78,11 @@ bits can vary between the two. Note that the masks apply to TTBR0
addresses, and are not valid to apply to TTBR1 addresses (e.g. kernel
pointers).
+Additionally, when CONFIG_CHECKPOINT_RESTORE is also set, the kernel
+will expose the NT_ARM_PACA_KEYS and NT_ARM_PACG_KEYS regsets (struct
+user_pac_address_keys and struct user_pac_generic_keys). These can be
+used to get and set the keys for a thread.
+
Virtualization
--------------
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
index ddb8ce5333ba..68d9b74fd751 100644
--- a/Documentation/arm64/silicon-errata.txt
+++ b/Documentation/arm64/silicon-errata.txt
@@ -61,6 +61,7 @@ stable kernels.
| ARM | Cortex-A76 | #1188873 | ARM64_ERRATUM_1188873 |
| ARM | Cortex-A76 | #1165522 | ARM64_ERRATUM_1165522 |
| ARM | Cortex-A76 | #1286807 | ARM64_ERRATUM_1286807 |
+| ARM | Neoverse-N1 | #1188873 | ARM64_ERRATUM_1188873 |
| ARM | MMU-500 | #841119,#826419 | N/A |
| | | | |
| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
@@ -77,8 +78,10 @@ stable kernels.
| Hisilicon | Hip0{5,6,7} | #161010101 | HISILICON_ERRATUM_161010101 |
| Hisilicon | Hip0{6,7} | #161010701 | N/A |
| Hisilicon | Hip07 | #161600802 | HISILICON_ERRATUM_161600802 |
+| Hisilicon | Hip08 SMMU PMCG | #162001800 | N/A |
| | | | |
| Qualcomm Tech. | Kryo/Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |
| Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 |
| Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 |
| Qualcomm Tech. | Falkor v{1,2} | E1041 | QCOM_FALKOR_ERRATUM_1041 |
+| Fujitsu | A64FX | E#010001 | FUJITSU_ERRATUM_010001 |
diff --git a/Documentation/arm64/sve.txt b/Documentation/arm64/sve.txt
index 7169a0ec41d8..9940e924a47e 100644
--- a/Documentation/arm64/sve.txt
+++ b/Documentation/arm64/sve.txt
@@ -34,6 +34,23 @@ model features for SVE is included in Appendix A.
following sections: software that needs to verify that those interfaces are
present must check for HWCAP_SVE instead.
+* On hardware that supports the SVE2 extensions, HWCAP2_SVE2 will also
+ be reported in the AT_HWCAP2 aux vector entry. In addition to this,
+ optional extensions to SVE2 may be reported by the presence of:
+
+ HWCAP2_SVE2
+ HWCAP2_SVEAES
+ HWCAP2_SVEPMULL
+ HWCAP2_SVEBITPERM
+ HWCAP2_SVESHA3
+ HWCAP2_SVESM4
+
+ This list may be extended over time as the SVE architecture evolves.
+
+ These extensions are also reported via the CPU ID register ID_AA64ZFR0_EL1,
+ which userspace can read using an MRS instruction. See elf_hwcaps.txt and
+ cpu-feature-registers.txt for details.
+
* Debuggers should restrict themselves to interacting with the target via the
NT_ARM_SVE regset. The recommended way of detecting support for this regset
is to connect to a target process first and then attempt a
diff --git a/Documentation/atomic_t.txt b/Documentation/atomic_t.txt
index 913396ac5824..dca3fb0554db 100644
--- a/Documentation/atomic_t.txt
+++ b/Documentation/atomic_t.txt
@@ -56,6 +56,23 @@ Barriers:
smp_mb__{before,after}_atomic()
+TYPES (signed vs unsigned)
+-----
+
+While atomic_t, atomic_long_t and atomic64_t use int, long and s64
+respectively (for hysterical raisins), the kernel uses -fno-strict-overflow
+(which implies -fwrapv) and defines signed overflow to behave like
+2s-complement.
+
+Therefore, an explicitly unsigned variant of the atomic ops is strictly
+unnecessary and we can simply cast, there is no UB.
+
+There was a bug in UBSAN prior to GCC-8 that would generate UB warnings for
+signed types.
+
+With this we also conform to the C/C++ _Atomic behaviour and things like
+P1236R1.
+
SEMANTICS
---------
diff --git a/Documentation/bpf/btf.rst b/Documentation/bpf/btf.rst
index 9a60a5d60e38..7313d354f20e 100644
--- a/Documentation/bpf/btf.rst
+++ b/Documentation/bpf/btf.rst
@@ -148,16 +148,16 @@ The ``btf_type.size * 8`` must be equal to or greater than ``BTF_INT_BITS()``
for the type. The maximum value of ``BTF_INT_BITS()`` is 128.
The ``BTF_INT_OFFSET()`` specifies the starting bit offset to calculate values
-for this int. For example, a bitfield struct member has: * btf member bit
-offset 100 from the start of the structure, * btf member pointing to an int
-type, * the int type has ``BTF_INT_OFFSET() = 2`` and ``BTF_INT_BITS() = 4``
+for this int. For example, a bitfield struct member has:
+ * btf member bit offset 100 from the start of the structure,
+ * btf member pointing to an int type,
+ * the int type has ``BTF_INT_OFFSET() = 2`` and ``BTF_INT_BITS() = 4``
Then in the struct memory layout, this member will occupy ``4`` bits starting
from bits ``100 + 2 = 102``.
Alternatively, the bitfield struct member can be the following to access the
same bits as the above:
-
* btf member bit offset 102,
* btf member pointing to an int type,
* the int type has ``BTF_INT_OFFSET() = 0`` and ``BTF_INT_BITS() = 4``
diff --git a/Documentation/cgroup-v1/memory.txt b/Documentation/cgroup-v1/memory.txt
index a347fc9293e5..a33cedf85427 100644
--- a/Documentation/cgroup-v1/memory.txt
+++ b/Documentation/cgroup-v1/memory.txt
@@ -70,7 +70,7 @@ Brief summary of control files.
memory.soft_limit_in_bytes # set/show soft limit of memory usage
memory.stat # show various statistics
memory.use_hierarchy # set/show hierarchical account enabled
- memory.force_empty # trigger forced move charge to parent
+ memory.force_empty # trigger forced page reclaim
memory.pressure_level # set memory pressure notifications
memory.swappiness # set/show swappiness parameter of vmscan
(See sysctl's vm.swappiness)
@@ -459,8 +459,9 @@ About use_hierarchy, see Section 6.
the cgroup will be reclaimed and as many pages reclaimed as possible.
The typical use case for this interface is before calling rmdir().
- Because rmdir() moves all pages to parent, some out-of-use page caches can be
- moved to the parent. If you want to avoid that, force_empty will be useful.
+ Though rmdir() offlines memcg, but the memcg may still stay there due to
+ charged file caches. Some out-of-use page caches may keep charged until
+ memory pressure happens. If you want to avoid that, force_empty will be useful.
Also, note that when memory.kmem.limit_in_bytes is set the charges due to
kernel pages will still be seen. This is not considered a failure and the
diff --git a/Documentation/core-api/cachetlb.rst b/Documentation/core-api/cachetlb.rst
index 6eb9d3f090cd..93cb65d52720 100644
--- a/Documentation/core-api/cachetlb.rst
+++ b/Documentation/core-api/cachetlb.rst
@@ -101,16 +101,6 @@ changes occur:
translations for software managed TLB configurations.
The sparc64 port currently does this.
-6) ``void tlb_migrate_finish(struct mm_struct *mm)``
-
- This interface is called at the end of an explicit
- process migration. This interface provides a hook
- to allow a platform to update TLB or context-specific
- information for the address space.
-
- The ia64 sn2 platform is one example of a platform
- that uses this interface.
-
Next, we have the cache flushing interfaces. In general, when Linux
is changing an existing virtual-->physical mapping to a new value,
the sequence will be in one of the following forms::
diff --git a/Documentation/core-api/flexible-arrays.rst b/Documentation/core-api/flexible-arrays.rst
deleted file mode 100644
index b6b85a1b518e..000000000000
--- a/Documentation/core-api/flexible-arrays.rst
+++ /dev/null
@@ -1,130 +0,0 @@
-
-===================================
-Using flexible arrays in the kernel
-===================================
-
-Large contiguous memory allocations can be unreliable in the Linux kernel.
-Kernel programmers will sometimes respond to this problem by allocating
-pages with :c:func:`vmalloc()`. This solution not ideal, though. On 32-bit
-systems, memory from vmalloc() must be mapped into a relatively small address
-space; it's easy to run out. On SMP systems, the page table changes required
-by vmalloc() allocations can require expensive cross-processor interrupts on
-all CPUs. And, on all systems, use of space in the vmalloc() range increases
-pressure on the translation lookaside buffer (TLB), reducing the performance
-of the system.
-
-In many cases, the need for memory from vmalloc() can be eliminated by piecing
-together an array from smaller parts; the flexible array library exists to make
-this task easier.
-
-A flexible array holds an arbitrary (within limits) number of fixed-sized
-objects, accessed via an integer index. Sparse arrays are handled
-reasonably well. Only single-page allocations are made, so memory
-allocation failures should be relatively rare. The down sides are that the
-arrays cannot be indexed directly, individual object size cannot exceed the
-system page size, and putting data into a flexible array requires a copy
-operation. It's also worth noting that flexible arrays do no internal
-locking at all; if concurrent access to an array is possible, then the
-caller must arrange for appropriate mutual exclusion.
-
-The creation of a flexible array is done with :c:func:`flex_array_alloc()`::
-
- #include <linux/flex_array.h>
-
- struct flex_array *flex_array_alloc(int element_size,
- unsigned int total,
- gfp_t flags);
-
-The individual object size is provided by ``element_size``, while total is the
-maximum number of objects which can be stored in the array. The flags
-argument is passed directly to the internal memory allocation calls. With
-the current code, using flags to ask for high memory is likely to lead to
-notably unpleasant side effects.
-
-It is also possible to define flexible arrays at compile time with::
-
- DEFINE_FLEX_ARRAY(name, element_size, total);
-
-This macro will result in a definition of an array with the given name; the
-element size and total will be checked for validity at compile time.
-
-Storing data into a flexible array is accomplished with a call to
-:c:func:`flex_array_put()`::
-
- int flex_array_put(struct flex_array *array, unsigned int element_nr,
- void *src, gfp_t flags);
-
-This call will copy the data from src into the array, in the position
-indicated by ``element_nr`` (which must be less than the maximum specified when
-the array was created). If any memory allocations must be performed, flags
-will be used. The return value is zero on success, a negative error code
-otherwise.
-
-There might possibly be a need to store data into a flexible array while
-running in some sort of atomic context; in this situation, sleeping in the
-memory allocator would be a bad thing. That can be avoided by using
-``GFP_ATOMIC`` for the flags value, but, often, there is a better way. The
-trick is to ensure that any needed memory allocations are done before
-entering atomic context, using :c:func:`flex_array_prealloc()`::
-
- int flex_array_prealloc(struct flex_array *array, unsigned int start,
- unsigned int nr_elements, gfp_t flags);
-
-This function will ensure that memory for the elements indexed in the range
-defined by ``start`` and ``nr_elements`` has been allocated. Thereafter, a
-``flex_array_put()`` call on an element in that range is guaranteed not to
-block.
-
-Getting data back out of the array is done with :c:func:`flex_array_get()`::
-
- void *flex_array_get(struct flex_array *fa, unsigned int element_nr);
-
-The return value is a pointer to the data element, or NULL if that
-particular element has never been allocated.
-
-Note that it is possible to get back a valid pointer for an element which
-has never been stored in the array. Memory for array elements is allocated
-one page at a time; a single allocation could provide memory for several
-adjacent elements. Flexible array elements are normally initialized to the
-value ``FLEX_ARRAY_FREE`` (defined as 0x6c in <linux/poison.h>), so errors
-involving that number probably result from use of unstored array entries.
-Note that, if array elements are allocated with ``__GFP_ZERO``, they will be
-initialized to zero and this poisoning will not happen.
-
-Individual elements in the array can be cleared with
-:c:func:`flex_array_clear()`::
-
- int flex_array_clear(struct flex_array *array, unsigned int element_nr);
-
-This function will set the given element to ``FLEX_ARRAY_FREE`` and return
-zero. If storage for the indicated element is not allocated for the array,
-``flex_array_clear()`` will return ``-EINVAL`` instead. Note that clearing an
-element does not release the storage associated with it; to reduce the
-allocated size of an array, call :c:func:`flex_array_shrink()`::
-
- int flex_array_shrink(struct flex_array *array);
-
-The return value will be the number of pages of memory actually freed.
-This function works by scanning the array for pages containing nothing but
-``FLEX_ARRAY_FREE`` bytes, so (1) it can be expensive, and (2) it will not work
-if the array's pages are allocated with ``__GFP_ZERO``.
-
-It is possible to remove all elements of an array with a call to
-:c:func:`flex_array_free_parts()`::
-
- void flex_array_free_parts(struct flex_array *array);
-
-This call frees all elements, but leaves the array itself in place.
-Freeing the entire array is done with :c:func:`flex_array_free()`::
-
- void flex_array_free(struct flex_array *array);
-
-As of this writing, there are no users of flexible arrays in the mainline
-kernel. The functions described here are also not exported to modules;
-that will probably be fixed when somebody comes up with a need for it.
-
-
-Flexible array functions
-------------------------
-
-.. kernel-doc:: include/linux/flex_array.h
diff --git a/Documentation/core-api/generic-radix-tree.rst b/Documentation/core-api/generic-radix-tree.rst
new file mode 100644
index 000000000000..ed42839ae42f
--- /dev/null
+++ b/Documentation/core-api/generic-radix-tree.rst
@@ -0,0 +1,12 @@
+=================================
+Generic radix trees/sparse arrays
+=================================
+
+.. kernel-doc:: include/linux/generic-radix-tree.h
+ :doc: Generic radix trees/sparse arrays
+
+generic radix tree functions
+----------------------------
+
+.. kernel-doc:: include/linux/generic-radix-tree.h
+ :functions:
diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst
index 3adee82be311..6870baffef82 100644
--- a/Documentation/core-api/index.rst
+++ b/Documentation/core-api/index.rst
@@ -28,6 +28,7 @@ Core utilities
errseq
printk-formats
circular-buffers
+ generic-radix-tree
memory-allocation
mm-api
gfp_mask-from-fs-io
diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst
index cdd24943fbcc..71f5d2fe39b7 100644
--- a/Documentation/core-api/kernel-api.rst
+++ b/Documentation/core-api/kernel-api.rst
@@ -356,10 +356,6 @@ Read-Copy Update (RCU)
.. kernel-doc:: include/linux/rcupdate.h
-.. kernel-doc:: include/linux/rcupdate_wait.h
-
-.. kernel-doc:: include/linux/rcutree.h
-
.. kernel-doc:: kernel/rcu/tree.c
.. kernel-doc:: kernel/rcu/tree_plugin.h
diff --git a/Documentation/core-api/memory-allocation.rst b/Documentation/core-api/memory-allocation.rst
index 8954a88ff5b7..7744aa3bf2e0 100644
--- a/Documentation/core-api/memory-allocation.rst
+++ b/Documentation/core-api/memory-allocation.rst
@@ -1,4 +1,4 @@
-.. _memory-allocation:
+.. _memory_allocation:
=======================
Memory Allocation Guide
@@ -113,9 +113,11 @@ see :c:func:`kvmalloc_node` reference documentation. Note that
If you need to allocate many identical objects you can use the slab
cache allocator. The cache should be set up with
-:c:func:`kmem_cache_create` before it can be used. Afterwards
-:c:func:`kmem_cache_alloc` and its convenience wrappers can allocate
-memory from that cache.
+:c:func:`kmem_cache_create` or :c:func:`kmem_cache_create_usercopy`
+before it can be used. The second function should be used if a part of
+the cache might be copied to the userspace. After the cache is
+created :c:func:`kmem_cache_alloc` and its convenience wrappers can
+allocate memory from that cache.
When the allocated memory is no longer needed it must be freed. You
can use :c:func:`kvfree` for the memory allocated with `kmalloc`,
diff --git a/Documentation/core-api/mm-api.rst b/Documentation/core-api/mm-api.rst
index aa8e54b85221..128e8a721c1e 100644
--- a/Documentation/core-api/mm-api.rst
+++ b/Documentation/core-api/mm-api.rst
@@ -35,7 +35,7 @@ users will want to use a plain ``GFP_KERNEL``.
:doc: Reclaim modifiers
.. kernel-doc:: include/linux/gfp.h
- :doc: Common combinations
+ :doc: Useful GFP flag combinations
The Slab Cache
==============
diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst
index 5d54b27c6eba..ef6f9f98f595 100644
--- a/Documentation/core-api/xarray.rst
+++ b/Documentation/core-api/xarray.rst
@@ -85,7 +85,7 @@ which was at that index; if it returns the same entry which was passed as
If you want to only store a new entry to an index if the current entry
at that index is ``NULL``, you can use :c:func:`xa_insert` which
-returns ``-EEXIST`` if the entry is not empty.
+returns ``-EBUSY`` if the entry is not empty.
You can enquire whether a mark is set on an entry by using
:c:func:`xa_get_mark`. If the entry is not ``NULL``, you can set a mark
@@ -131,17 +131,23 @@ If you use :c:func:`DEFINE_XARRAY_ALLOC` to define the XArray, or
initialise it by passing ``XA_FLAGS_ALLOC`` to :c:func:`xa_init_flags`,
the XArray changes to track whether entries are in use or not.
-You can call :c:func:`xa_alloc` to store the entry at any unused index
+You can call :c:func:`xa_alloc` to store the entry at an unused index
in the XArray. If you need to modify the array from interrupt context,
you can use :c:func:`xa_alloc_bh` or :c:func:`xa_alloc_irq` to disable
interrupts while allocating the ID.
-Using :c:func:`xa_store`, :c:func:`xa_cmpxchg` or :c:func:`xa_insert`
-will mark the entry as being allocated. Unlike a normal XArray, storing
+Using :c:func:`xa_store`, :c:func:`xa_cmpxchg` or :c:func:`xa_insert` will
+also mark the entry as being allocated. Unlike a normal XArray, storing
``NULL`` will mark the entry as being in use, like :c:func:`xa_reserve`.
To free an entry, use :c:func:`xa_erase` (or :c:func:`xa_release` if
you only want to free the entry if it's ``NULL``).
+By default, the lowest free entry is allocated starting from 0. If you
+want to allocate entries starting at 1, it is more efficient to use
+:c:func:`DEFINE_XARRAY_ALLOC1` or ``XA_FLAGS_ALLOC1``. If you want to
+allocate IDs up to a maximum, then wrap back around to the lowest free
+ID, you can use :c:func:`xa_alloc_cyclic`.
+
You cannot use ``XA_MARK_0`` with an allocating XArray as this mark
is used to track whether an entry is free or not. The other marks are
available for your use.
@@ -209,7 +215,6 @@ Assumes xa_lock held on entry:
* :c:func:`__xa_erase`
* :c:func:`__xa_cmpxchg`
* :c:func:`__xa_alloc`
- * :c:func:`__xa_reserve`
* :c:func:`__xa_set_mark`
* :c:func:`__xa_clear_mark`
diff --git a/Documentation/cputopology.txt b/Documentation/cputopology.txt
index c6e7e9196a8b..cb61277e2308 100644
--- a/Documentation/cputopology.txt
+++ b/Documentation/cputopology.txt
@@ -3,79 +3,79 @@ How CPU topology info is exported via sysfs
===========================================
Export CPU topology info via sysfs. Items (attributes) are similar
-to /proc/cpuinfo output of some architectures:
+to /proc/cpuinfo output of some architectures. They reside in
+/sys/devices/system/cpu/cpuX/topology/:
-1) /sys/devices/system/cpu/cpuX/topology/physical_package_id:
+physical_package_id:
physical package id of cpuX. Typically corresponds to a physical
socket number, but the actual value is architecture and platform
dependent.
-2) /sys/devices/system/cpu/cpuX/topology/core_id:
+core_id:
the CPU core ID of cpuX. Typically it is the hardware platform's
identifier (rather than the kernel's). The actual value is
architecture and platform dependent.
-3) /sys/devices/system/cpu/cpuX/topology/book_id:
+book_id:
the book ID of cpuX. Typically it is the hardware platform's
identifier (rather than the kernel's). The actual value is
architecture and platform dependent.
-4) /sys/devices/system/cpu/cpuX/topology/drawer_id:
+drawer_id:
the drawer ID of cpuX. Typically it is the hardware platform's
identifier (rather than the kernel's). The actual value is
architecture and platform dependent.
-5) /sys/devices/system/cpu/cpuX/topology/thread_siblings:
+thread_siblings:
internal kernel map of cpuX's hardware threads within the same
core as cpuX.
-6) /sys/devices/system/cpu/cpuX/topology/thread_siblings_list:
+thread_siblings_list:
human-readable list of cpuX's hardware threads within the same
core as cpuX.
-7) /sys/devices/system/cpu/cpuX/topology/core_siblings:
+core_siblings:
internal kernel map of cpuX's hardware threads within the same
physical_package_id.
-8) /sys/devices/system/cpu/cpuX/topology/core_siblings_list:
+core_siblings_list:
human-readable list of cpuX's hardware threads within the same
physical_package_id.
-9) /sys/devices/system/cpu/cpuX/topology/book_siblings:
+book_siblings:
internal kernel map of cpuX's hardware threads within the same
book_id.
-10) /sys/devices/system/cpu/cpuX/topology/book_siblings_list:
+book_siblings_list:
human-readable list of cpuX's hardware threads within the same
book_id.
-11) /sys/devices/system/cpu/cpuX/topology/drawer_siblings:
+drawer_siblings:
internal kernel map of cpuX's hardware threads within the same
drawer_id.
-12) /sys/devices/system/cpu/cpuX/topology/drawer_siblings_list:
+drawer_siblings_list:
human-readable list of cpuX's hardware threads within the same
drawer_id.
-To implement it in an architecture-neutral way, a new source file,
-drivers/base/topology.c, is to export the 6 to 12 attributes. The book
-and drawer related sysfs files will only be created if CONFIG_SCHED_BOOK
-and CONFIG_SCHED_DRAWER are selected.
+Architecture-neutral, drivers/base/topology.c, exports these attributes.
+However, the book and drawer related sysfs files will only be created if
+CONFIG_SCHED_BOOK and CONFIG_SCHED_DRAWER are selected, respectively.
-CONFIG_SCHED_BOOK and CONFIG_DRAWER are currently only used on s390, where
-they reflect the cpu and cache hierarchy.
+CONFIG_SCHED_BOOK and CONFIG_SCHED_DRAWER are currently only used on s390,
+where they reflect the cpu and cache hierarchy.
For an architecture to support this feature, it must define some of
these macros in include/asm-XXX/topology.h::
@@ -98,10 +98,10 @@ To be consistent on all architectures, include/linux/topology.h
provides default definitions for any of the above macros that are
not defined by include/asm-XXX/topology.h:
-1) physical_package_id: -1
-2) core_id: 0
-3) sibling_cpumask: just the given CPU
-4) core_cpumask: just the given CPU
+1) topology_physical_package_id: -1
+2) topology_core_id: 0
+3) topology_sibling_cpumask: just the given CPU
+4) topology_core_cpumask: just the given CPU
For architectures that don't support books (CONFIG_SCHED_BOOK) there are no
default definitions for topology_book_id() and topology_book_cpumask().
diff --git a/Documentation/crypto/api-samples.rst b/Documentation/crypto/api-samples.rst
index 0f6ca8b7261e..f14afaaf2f32 100644
--- a/Documentation/crypto/api-samples.rst
+++ b/Documentation/crypto/api-samples.rst
@@ -133,7 +133,6 @@ Code Example For Use of Operational State Memory With SHASH
if (!sdesc)
return ERR_PTR(-ENOMEM);
sdesc->shash.tfm = alg;
- sdesc->shash.flags = 0x0;
return sdesc;
}
diff --git a/Documentation/dev-tools/kcov.rst b/Documentation/dev-tools/kcov.rst
index c2f6452e38ed..42b612677799 100644
--- a/Documentation/dev-tools/kcov.rst
+++ b/Documentation/dev-tools/kcov.rst
@@ -22,7 +22,7 @@ Configure the kernel with::
CONFIG_KCOV=y
-CONFIG_KCOV requires gcc built on revision 231296 or later.
+CONFIG_KCOV requires gcc 6.1.0 or later.
If the comparison operands need to be collected, set::
diff --git a/Documentation/dev-tools/kselftest.rst b/Documentation/dev-tools/kselftest.rst
index 7756f7a7c23b..c8c03388b9de 100644
--- a/Documentation/dev-tools/kselftest.rst
+++ b/Documentation/dev-tools/kselftest.rst
@@ -14,6 +14,10 @@ in safe mode with a limited scope. In limited mode, cpu-hotplug test is
run on a single cpu as opposed to all hotplug capable cpus, and memory
hotplug test is run on 2% of hotplug capable memory instead of 10%.
+kselftest runs as a userspace process. Tests that can be written/run in
+userspace may wish to use the `Test Harness`_. Tests that need to be
+run in kernel space may wish to use a `Test Module`_.
+
Running the selftests (hotplug tests are run in limited mode)
=============================================================
@@ -161,11 +165,97 @@ Contributing new tests (details)
e.g: tools/testing/selftests/android/config
+Test Module
+===========
+
+Kselftest tests the kernel from userspace. Sometimes things need
+testing from within the kernel, one method of doing this is to create a
+test module. We can tie the module into the kselftest framework by
+using a shell script test runner. ``kselftest_module.sh`` is designed
+to facilitate this process. There is also a header file provided to
+assist writing kernel modules that are for use with kselftest:
+
+- ``tools/testing/kselftest/kselftest_module.h``
+- ``tools/testing/kselftest/kselftest_module.sh``
+
+How to use
+----------
+
+Here we show the typical steps to create a test module and tie it into
+kselftest. We use kselftests for lib/ as an example.
+
+1. Create the test module
+
+2. Create the test script that will run (load/unload) the module
+ e.g. ``tools/testing/selftests/lib/printf.sh``
+
+3. Add line to config file e.g. ``tools/testing/selftests/lib/config``
+
+4. Add test script to makefile e.g. ``tools/testing/selftests/lib/Makefile``
+
+5. Verify it works:
+
+.. code-block:: sh
+
+ # Assumes you have booted a fresh build of this kernel tree
+ cd /path/to/linux/tree
+ make kselftest-merge
+ make modules
+ sudo make modules_install
+ make TARGETS=lib kselftest
+
+Example Module
+--------------
+
+A bare bones test module might look like this:
+
+.. code-block:: c
+
+ // SPDX-License-Identifier: GPL-2.0+
+
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+ #include "../tools/testing/selftests/kselftest_module.h"
+
+ KSTM_MODULE_GLOBALS();
+
+ /*
+ * Kernel module for testing the foobinator
+ */
+
+ static int __init test_function()
+ {
+ ...
+ }
+
+ static void __init selftest(void)
+ {
+ KSTM_CHECK_ZERO(do_test_case("", 0));
+ }
+
+ KSTM_MODULE_LOADERS(test_foo);
+ MODULE_AUTHOR("John Developer <jd@fooman.org>");
+ MODULE_LICENSE("GPL");
+
+Example test script
+-------------------
+
+.. code-block:: sh
+
+ #!/bin/bash
+ # SPDX-License-Identifier: GPL-2.0+
+ $(dirname $0)/../kselftest_module.sh "foo" test_foo
+
+
Test Harness
============
-The kselftest_harness.h file contains useful helpers to build tests. The tests
-from tools/testing/selftests/seccomp/seccomp_bpf.c can be used as example.
+The kselftest_harness.h file contains useful helpers to build tests. The
+test harness is for userspace testing, for kernel space testing see `Test
+Module`_ above.
+
+The tests from tools/testing/selftests/seccomp/seccomp_bpf.c can be used as
+example.
Example
-------
diff --git a/Documentation/device-mapper/cache.txt b/Documentation/device-mapper/cache.txt
index ff0841711fd5..8ae1cf8e94da 100644
--- a/Documentation/device-mapper/cache.txt
+++ b/Documentation/device-mapper/cache.txt
@@ -206,6 +206,9 @@ Optional feature arguments are:
in a separate btree, which improves speed of shutting
down the cache.
+ no_discard_passdown : disable passing down discards from the cache
+ to the origin's data device.
+
A policy called 'default' is always registered. This is an alias for
the policy we currently think is giving best all round performance.
diff --git a/Documentation/device-mapper/dm-init.txt b/Documentation/device-mapper/dm-init.txt
new file mode 100644
index 000000000000..8464ee7c01b8
--- /dev/null
+++ b/Documentation/device-mapper/dm-init.txt
@@ -0,0 +1,114 @@
+Early creation of mapped devices
+====================================
+
+It is possible to configure a device-mapper device to act as the root device for
+your system in two ways.
+
+The first is to build an initial ramdisk which boots to a minimal userspace
+which configures the device, then pivot_root(8) in to it.
+
+The second is to create one or more device-mappers using the module parameter
+"dm-mod.create=" through the kernel boot command line argument.
+
+The format is specified as a string of data separated by commas and optionally
+semi-colons, where:
+ - a comma is used to separate fields like name, uuid, flags and table
+ (specifies one device)
+ - a semi-colon is used to separate devices.
+
+So the format will look like this:
+
+ dm-mod.create=<name>,<uuid>,<minor>,<flags>,<table>[,<table>+][;<name>,<uuid>,<minor>,<flags>,<table>[,<table>+]+]
+
+Where,
+ <name> ::= The device name.
+ <uuid> ::= xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx | ""
+ <minor> ::= The device minor number | ""
+ <flags> ::= "ro" | "rw"
+ <table> ::= <start_sector> <num_sectors> <target_type> <target_args>
+ <target_type> ::= "verity" | "linear" | ... (see list below)
+
+The dm line should be equivalent to the one used by the dmsetup tool with the
+--concise argument.
+
+Target types
+============
+
+Not all target types are available as there are serious risks in allowing
+activation of certain DM targets without first using userspace tools to check
+the validity of associated metadata.
+
+ "cache": constrained, userspace should verify cache device
+ "crypt": allowed
+ "delay": allowed
+ "era": constrained, userspace should verify metadata device
+ "flakey": constrained, meant for test
+ "linear": allowed
+ "log-writes": constrained, userspace should verify metadata device
+ "mirror": constrained, userspace should verify main/mirror device
+ "raid": constrained, userspace should verify metadata device
+ "snapshot": constrained, userspace should verify src/dst device
+ "snapshot-origin": allowed
+ "snapshot-merge": constrained, userspace should verify src/dst device
+ "striped": allowed
+ "switch": constrained, userspace should verify dev path
+ "thin": constrained, requires dm target message from userspace
+ "thin-pool": constrained, requires dm target message from userspace
+ "verity": allowed
+ "writecache": constrained, userspace should verify cache device
+ "zero": constrained, not meant for rootfs
+
+If the target is not listed above, it is constrained by default (not tested).
+
+Examples
+========
+An example of booting to a linear array made up of user-mode linux block
+devices:
+
+ dm-mod.create="lroot,,,rw, 0 4096 linear 98:16 0, 4096 4096 linear 98:32 0" root=/dev/dm-0
+
+This will boot to a rw dm-linear target of 8192 sectors split across two block
+devices identified by their major:minor numbers. After boot, udev will rename
+this target to /dev/mapper/lroot (depending on the rules). No uuid was assigned.
+
+An example of multiple device-mappers, with the dm-mod.create="..." contents is shown here
+split on multiple lines for readability:
+
+ vroot,,,ro,
+ 0 1740800 verity 254:0 254:0 1740800 sha1
+ 76e9be054b15884a9fa85973e9cb274c93afadb6
+ 5b3549d54d6c7a3837b9b81ed72e49463a64c03680c47835bef94d768e5646fe;
+ vram,,,rw,
+ 0 32768 linear 1:0 0,
+ 32768 32768 linear 1:1 0
+
+Other examples (per target):
+
+"crypt":
+ dm-crypt,,8,ro,
+ 0 1048576 crypt aes-xts-plain64
+ babebabebabebabebabebabebabebabebabebabebabebabebabebabebabebabe 0
+ /dev/sda 0 1 allow_discards
+
+"delay":
+ dm-delay,,4,ro,0 409600 delay /dev/sda1 0 500
+
+"linear":
+ dm-linear,,,rw,
+ 0 32768 linear /dev/sda1 0,
+ 32768 1024000 linear /dev/sda2 0,
+ 1056768 204800 linear /dev/sda3 0,
+ 1261568 512000 linear /dev/sda4 0
+
+"snapshot-origin":
+ dm-snap-orig,,4,ro,0 409600 snapshot-origin 8:2
+
+"striped":
+ dm-striped,,4,ro,0 1638400 striped 4 4096
+ /dev/sda1 0 /dev/sda2 0 /dev/sda3 0 /dev/sda4 0
+
+"verity":
+ dm-verity,,4,ro,
+ 0 1638400 verity 1 8:1 8:2 4096 4096 204800 1 sha256
+ fb1a5a0f00deb908d8b53cb270858975e76cf64105d412ce764225d53b8f3cfd
+ 51934789604d1b92399c52e7cb149d1b3a1b74bbbcb103b2a0aaacbed5c08584
diff --git a/Documentation/devicetree/bindings/Makefile b/Documentation/devicetree/bindings/Makefile
index 50daa0b3b032..63b139f9ae28 100644
--- a/Documentation/devicetree/bindings/Makefile
+++ b/Documentation/devicetree/bindings/Makefile
@@ -15,7 +15,7 @@ DT_TMP_SCHEMA := processed-schema.yaml
extra-y += $(DT_TMP_SCHEMA)
quiet_cmd_mk_schema = SCHEMA $@
- cmd_mk_schema = $(DT_MK_SCHEMA) $(DT_MK_SCHEMA_FLAGS) -o $@ $(filter-out FORCE, $^)
+ cmd_mk_schema = $(DT_MK_SCHEMA) $(DT_MK_SCHEMA_FLAGS) -o $@ $(real-prereqs)
DT_DOCS = $(shell \
cd $(srctree)/$(src) && \
diff --git a/Documentation/devicetree/bindings/arm/cpus.yaml b/Documentation/devicetree/bindings/arm/cpus.yaml
index 365dcf384d73..82dd7582e945 100644
--- a/Documentation/devicetree/bindings/arm/cpus.yaml
+++ b/Documentation/devicetree/bindings/arm/cpus.yaml
@@ -228,7 +228,7 @@ patternProperties:
- renesas,r9a06g032-smp
- rockchip,rk3036-smp
- rockchip,rk3066-smp
- - socionext,milbeaut-m10v-smp
+ - socionext,milbeaut-m10v-smp
- ste,dbx500-smp
cpu-release-addr:
diff --git a/Documentation/devicetree/bindings/arm/l2c2x0.txt b/Documentation/devicetree/bindings/arm/l2c2x0.txt
deleted file mode 100644
index fbe6cb21f4cf..000000000000
--- a/Documentation/devicetree/bindings/arm/l2c2x0.txt
+++ /dev/null
@@ -1,114 +0,0 @@
-* ARM L2 Cache Controller
-
-ARM cores often have a separate L2C210/L2C220/L2C310 (also known as PL210/PL220/
-PL310 and variants) based level 2 cache controller. All these various implementations
-of the L2 cache controller have compatible programming models (Note 1).
-Some of the properties that are just prefixed "cache-*" are taken from section
-3.7.3 of the Devicetree Specification which can be found at:
-https://www.devicetree.org/specifications/
-
-The ARM L2 cache representation in the device tree should be done as follows:
-
-Required properties:
-
-- compatible : should be one of:
- "arm,pl310-cache"
- "arm,l220-cache"
- "arm,l210-cache"
- "bcm,bcm11351-a2-pl310-cache": DEPRECATED by "brcm,bcm11351-a2-pl310-cache"
- "brcm,bcm11351-a2-pl310-cache": For Broadcom bcm11351 chipset where an
- offset needs to be added to the address before passing down to the L2
- cache controller
- "marvell,aurora-system-cache": Marvell Controller designed to be
- compatible with the ARM one, with system cache mode (meaning
- maintenance operations on L1 are broadcasted to the L2 and L2
- performs the same operation).
- "marvell,aurora-outer-cache": Marvell Controller designed to be
- compatible with the ARM one with outer cache mode.
- "marvell,tauros3-cache": Marvell Tauros3 cache controller, compatible
- with arm,pl310-cache controller.
-- cache-unified : Specifies the cache is a unified cache.
-- cache-level : Should be set to 2 for a level 2 cache.
-- reg : Physical base address and size of cache controller's memory mapped
- registers.
-
-Optional properties:
-
-- arm,data-latency : Cycles of latency for Data RAM accesses. Specifies 3 cells of
- read, write and setup latencies. Minimum valid values are 1. Controllers
- without setup latency control should use a value of 0.
-- arm,tag-latency : Cycles of latency for Tag RAM accesses. Specifies 3 cells of
- read, write and setup latencies. Controllers without setup latency control
- should use 0. Controllers without separate read and write Tag RAM latency
- values should only use the first cell.
-- arm,dirty-latency : Cycles of latency for Dirty RAMs. This is a single cell.
-- arm,filter-ranges : <start length> Starting address and length of window to
- filter. Addresses in the filter window are directed to the M1 port. Other
- addresses will go to the M0 port.
-- arm,io-coherent : indicates that the system is operating in an hardware
- I/O coherent mode. Valid only when the arm,pl310-cache compatible
- string is used.
-- interrupts : 1 combined interrupt.
-- cache-size : specifies the size in bytes of the cache
-- cache-sets : specifies the number of associativity sets of the cache
-- cache-block-size : specifies the size in bytes of a cache block
-- cache-line-size : specifies the size in bytes of a line in the cache,
- if this is not specified, the line size is assumed to be equal to the
- cache block size
-- cache-id-part: cache id part number to be used if it is not present
- on hardware
-- wt-override: If present then L2 is forced to Write through mode
-- arm,double-linefill : Override double linefill enable setting. Enable if
- non-zero, disable if zero.
-- arm,double-linefill-incr : Override double linefill on INCR read. Enable
- if non-zero, disable if zero.
-- arm,double-linefill-wrap : Override double linefill on WRAP read. Enable
- if non-zero, disable if zero.
-- arm,prefetch-drop : Override prefetch drop enable setting. Enable if non-zero,
- disable if zero.
-- arm,prefetch-offset : Override prefetch offset value. Valid values are
- 0-7, 15, 23, and 31.
-- arm,shared-override : The default behavior of the L220 or PL310 cache
- controllers with respect to the shareable attribute is to transform "normal
- memory non-cacheable transactions" into "cacheable no allocate" (for reads)
- or "write through no write allocate" (for writes).
- On systems where this may cause DMA buffer corruption, this property must be
- specified to indicate that such transforms are precluded.
-- arm,parity-enable : enable parity checking on the L2 cache (L220 or PL310).
-- arm,parity-disable : disable parity checking on the L2 cache (L220 or PL310).
-- arm,outer-sync-disable : disable the outer sync operation on the L2 cache.
- Some core tiles, especially ARM PB11MPCore have a faulty L220 cache that
- will randomly hang unless outer sync operations are disabled.
-- prefetch-data : Data prefetch. Value: <0> (forcibly disable), <1>
- (forcibly enable), property absent (retain settings set by firmware)
-- prefetch-instr : Instruction prefetch. Value: <0> (forcibly disable),
- <1> (forcibly enable), property absent (retain settings set by
- firmware)
-- arm,dynamic-clock-gating : L2 dynamic clock gating. Value: <0> (forcibly
- disable), <1> (forcibly enable), property absent (OS specific behavior,
- preferably retain firmware settings)
-- arm,standby-mode: L2 standby mode enable. Value <0> (forcibly disable),
- <1> (forcibly enable), property absent (OS specific behavior,
- preferably retain firmware settings)
-- arm,early-bresp-disable : Disable the CA9 optimization Early BRESP (PL310)
-- arm,full-line-zero-disable : Disable the CA9 optimization Full line of zero
- write (PL310)
-
-Example:
-
-L2: cache-controller {
- compatible = "arm,pl310-cache";
- reg = <0xfff12000 0x1000>;
- arm,data-latency = <1 1 1>;
- arm,tag-latency = <2 2 2>;
- arm,filter-ranges = <0x80000000 0x8000000>;
- cache-unified;
- cache-level = <2>;
- interrupts = <45>;
-};
-
-Note 1: The description in this document doesn't apply to integrated L2
- cache controllers as found in e.g. Cortex-A15/A7/A57/A53. These
- integrated L2 controllers are assumed to be all preconfigured by
- early secure boot code. Thus no need to deal with their configuration
- in the kernel at all.
diff --git a/Documentation/devicetree/bindings/arm/l2c2x0.yaml b/Documentation/devicetree/bindings/arm/l2c2x0.yaml
new file mode 100644
index 000000000000..bfc5c185561c
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/l2c2x0.yaml
@@ -0,0 +1,248 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/l2c2x0.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM L2 Cache Controller
+
+maintainers:
+ - Rob Herring <robh@kernel.org>
+
+description: |+
+ ARM cores often have a separate L2C210/L2C220/L2C310 (also known as PL210/
+ PL220/PL310 and variants) based level 2 cache controller. All these various
+ implementations of the L2 cache controller have compatible programming
+ models (Note 1). Some of the properties that are just prefixed "cache-*" are
+ taken from section 3.7.3 of the Devicetree Specification which can be found
+ at:
+ https://www.devicetree.org/specifications/
+
+ Note 1: The description in this document doesn't apply to integrated L2
+ cache controllers as found in e.g. Cortex-A15/A7/A57/A53. These
+ integrated L2 controllers are assumed to be all preconfigured by
+ early secure boot code. Thus no need to deal with their configuration
+ in the kernel at all.
+
+allOf:
+ - $ref: /schemas/cache-controller.yaml#
+
+properties:
+ compatible:
+ enum:
+ - arm,pl310-cache
+ - arm,l220-cache
+ - arm,l210-cache
+ # DEPRECATED by "brcm,bcm11351-a2-pl310-cache"
+ - bcm,bcm11351-a2-pl310-cache
+ # For Broadcom bcm11351 chipset where an
+ # offset needs to be added to the address before passing down to the L2
+ # cache controller
+ - brcm,bcm11351-a2-pl310-cache
+ # Marvell Controller designed to be
+ # compatible with the ARM one, with system cache mode (meaning
+ # maintenance operations on L1 are broadcasted to the L2 and L2
+ # performs the same operation).
+ - marvell,aurora-system-cache
+ # Marvell Controller designed to be
+ # compatible with the ARM one with outer cache mode.
+ - marvell,aurora-outer-cache
+ # Marvell Tauros3 cache controller, compatible
+ # with arm,pl310-cache controller.
+ - marvell,tauros3-cache
+
+ cache-level:
+ const: 2
+
+ cache-unified: true
+ cache-size: true
+ cache-sets: true
+ cache-block-size: true
+ cache-line-size: true
+
+ reg:
+ maxItems: 1
+
+ arm,data-latency:
+ description: Cycles of latency for Data RAM accesses. Specifies 3 cells of
+ read, write and setup latencies. Minimum valid values are 1. Controllers
+ without setup latency control should use a value of 0.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - minItems: 2
+ maxItems: 3
+ items:
+ minimum: 0
+ maximum: 8
+
+ arm,tag-latency:
+ description: Cycles of latency for Tag RAM accesses. Specifies 3 cells of
+ read, write and setup latencies. Controllers without setup latency control
+ should use 0. Controllers without separate read and write Tag RAM latency
+ values should only use the first cell.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - minItems: 1
+ maxItems: 3
+ items:
+ minimum: 0
+ maximum: 8
+
+ arm,dirty-latency:
+ description: Cycles of latency for Dirty RAMs. This is a single cell.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - minimum: 1
+ maximum: 8
+
+ arm,filter-ranges:
+ description: <start length> Starting address and length of window to
+ filter. Addresses in the filter window are directed to the M1 port. Other
+ addresses will go to the M0 port.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - items:
+ minItems: 2
+ maxItems: 2
+
+ arm,io-coherent:
+ description: indicates that the system is operating in an hardware
+ I/O coherent mode. Valid only when the arm,pl310-cache compatible
+ string is used.
+ type: boolean
+
+ interrupts:
+ # Either a single combined interrupt or up to 9 individual interrupts
+ minItems: 1
+ maxItems: 9
+
+ cache-id-part:
+ description: cache id part number to be used if it is not present
+ on hardware
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ wt-override:
+ description: If present then L2 is forced to Write through mode
+ type: boolean
+
+ arm,double-linefill:
+ description: Override double linefill enable setting. Enable if
+ non-zero, disable if zero.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [ 0, 1 ]
+
+ arm,double-linefill-incr:
+ description: Override double linefill on INCR read. Enable
+ if non-zero, disable if zero.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [ 0, 1 ]
+
+ arm,double-linefill-wrap:
+ description: Override double linefill on WRAP read. Enable
+ if non-zero, disable if zero.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [ 0, 1 ]
+
+ arm,prefetch-drop:
+ description: Override prefetch drop enable setting. Enable if non-zero,
+ disable if zero.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [ 0, 1 ]
+
+ arm,prefetch-offset:
+ description: Override prefetch offset value.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [ 0, 1, 2, 3, 4, 5, 6, 7, 15, 23, 31 ]
+
+ arm,shared-override:
+ description: The default behavior of the L220 or PL310 cache
+ controllers with respect to the shareable attribute is to transform "normal
+ memory non-cacheable transactions" into "cacheable no allocate" (for reads)
+ or "write through no write allocate" (for writes).
+ On systems where this may cause DMA buffer corruption, this property must
+ be specified to indicate that such transforms are precluded.
+ type: boolean
+
+ arm,parity-enable:
+ description: enable parity checking on the L2 cache (L220 or PL310).
+ type: boolean
+
+ arm,parity-disable:
+ description: disable parity checking on the L2 cache (L220 or PL310).
+ type: boolean
+
+ arm,outer-sync-disable:
+ description: disable the outer sync operation on the L2 cache.
+ Some core tiles, especially ARM PB11MPCore have a faulty L220 cache that
+ will randomly hang unless outer sync operations are disabled.
+ type: boolean
+
+ prefetch-data:
+ description: |
+ Data prefetch. Value: <0> (forcibly disable), <1>
+ (forcibly enable), property absent (retain settings set by firmware)
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [ 0, 1 ]
+
+ prefetch-instr:
+ description: |
+ Instruction prefetch. Value: <0> (forcibly disable),
+ <1> (forcibly enable), property absent (retain settings set by
+ firmware)
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [ 0, 1 ]
+
+ arm,dynamic-clock-gating:
+ description: |
+ L2 dynamic clock gating. Value: <0> (forcibly
+ disable), <1> (forcibly enable), property absent (OS specific behavior,
+ preferably retain firmware settings)
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [ 0, 1 ]
+
+ arm,standby-mode:
+ description: L2 standby mode enable. Value <0> (forcibly disable),
+ <1> (forcibly enable), property absent (OS specific behavior,
+ preferably retain firmware settings)
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [ 0, 1 ]
+
+ arm,early-bresp-disable:
+ description: Disable the CA9 optimization Early BRESP (PL310)
+ type: boolean
+
+ arm,full-line-zero-disable:
+ description: Disable the CA9 optimization Full line of zero
+ write (PL310)
+ type: boolean
+
+required:
+ - compatible
+ - cache-unified
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ cache-controller@fff12000 {
+ compatible = "arm,pl310-cache";
+ reg = <0xfff12000 0x1000>;
+ arm,data-latency = <1 1 1>;
+ arm,tag-latency = <2 2 2>;
+ arm,filter-ranges = <0x80000000 0x8000000>;
+ cache-unified;
+ cache-level = <2>;
+ interrupts = <45>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/arm/pmu.txt b/Documentation/devicetree/bindings/arm/pmu.txt
deleted file mode 100644
index 13611a8199bb..000000000000
--- a/Documentation/devicetree/bindings/arm/pmu.txt
+++ /dev/null
@@ -1,70 +0,0 @@
-* ARM Performance Monitor Units
-
-ARM cores often have a PMU for counting cpu and cache events like cache misses
-and hits. The interface to the PMU is part of the ARM ARM. The ARM PMU
-representation in the device tree should be done as under:-
-
-Required properties:
-
-- compatible : should be one of
- "apm,potenza-pmu"
- "arm,armv8-pmuv3"
- "arm,cortex-a73-pmu"
- "arm,cortex-a72-pmu"
- "arm,cortex-a57-pmu"
- "arm,cortex-a53-pmu"
- "arm,cortex-a35-pmu"
- "arm,cortex-a17-pmu"
- "arm,cortex-a15-pmu"
- "arm,cortex-a12-pmu"
- "arm,cortex-a9-pmu"
- "arm,cortex-a8-pmu"
- "arm,cortex-a7-pmu"
- "arm,cortex-a5-pmu"
- "arm,arm11mpcore-pmu"
- "arm,arm1176-pmu"
- "arm,arm1136-pmu"
- "brcm,vulcan-pmu"
- "cavium,thunder-pmu"
- "qcom,scorpion-pmu"
- "qcom,scorpion-mp-pmu"
- "qcom,krait-pmu"
-- interrupts : 1 combined interrupt or 1 per core. If the interrupt is a per-cpu
- interrupt (PPI) then 1 interrupt should be specified.
-
-Optional properties:
-
-- interrupt-affinity : When using SPIs, specifies a list of phandles to CPU
- nodes corresponding directly to the affinity of
- the SPIs listed in the interrupts property.
-
- When using a PPI, specifies a list of phandles to CPU
- nodes corresponding to the set of CPUs which have
- a PMU of this type signalling the PPI listed in the
- interrupts property, unless this is already specified
- by the PPI interrupt specifier itself (in which case
- the interrupt-affinity property shouldn't be present).
-
- This property should be present when there is more than
- a single SPI.
-
-
-- qcom,no-pc-write : Indicates that this PMU doesn't support the 0xc and 0xd
- events.
-
-- secure-reg-access : Indicates that the ARMv7 Secure Debug Enable Register
- (SDER) is accessible. This will cause the driver to do
- any setup required that is only possible in ARMv7 secure
- state. If not present the ARMv7 SDER will not be touched,
- which means the PMU may fail to operate unless external
- code (bootloader or security monitor) has performed the
- appropriate initialisation. Note that this property is
- not valid for non-ARMv7 CPUs or ARMv7 CPUs booting Linux
- in Non-secure state.
-
-Example:
-
-pmu {
- compatible = "arm,cortex-a9-pmu";
- interrupts = <100 101>;
-};
diff --git a/Documentation/devicetree/bindings/arm/pmu.yaml b/Documentation/devicetree/bindings/arm/pmu.yaml
new file mode 100644
index 000000000000..52ae094ce330
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/pmu.yaml
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/pmu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM Performance Monitor Units
+
+maintainers:
+ - Mark Rutland <mark.rutland@arm.com>
+ - Will Deacon <will.deacon@arm.com>
+
+description: |+
+ ARM cores often have a PMU for counting cpu and cache events like cache misses
+ and hits. The interface to the PMU is part of the ARM ARM. The ARM PMU
+ representation in the device tree should be done as under:-
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - apm,potenza-pmu
+ - arm,armv8-pmuv3
+ - arm,cortex-a73-pmu
+ - arm,cortex-a72-pmu
+ - arm,cortex-a57-pmu
+ - arm,cortex-a53-pmu
+ - arm,cortex-a35-pmu
+ - arm,cortex-a17-pmu
+ - arm,cortex-a15-pmu
+ - arm,cortex-a12-pmu
+ - arm,cortex-a9-pmu
+ - arm,cortex-a8-pmu
+ - arm,cortex-a7-pmu
+ - arm,cortex-a5-pmu
+ - arm,arm11mpcore-pmu
+ - arm,arm1176-pmu
+ - arm,arm1136-pmu
+ - brcm,vulcan-pmu
+ - cavium,thunder-pmu
+ - qcom,scorpion-pmu
+ - qcom,scorpion-mp-pmu
+ - qcom,krait-pmu
+
+ interrupts:
+ # Don't know how many CPUs, so no constraints to specify
+ description: 1 per-cpu interrupt (PPI) or 1 interrupt per core.
+
+ interrupt-affinity:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description:
+ When using SPIs, specifies a list of phandles to CPU
+ nodes corresponding directly to the affinity of
+ the SPIs listed in the interrupts property.
+
+ When using a PPI, specifies a list of phandles to CPU
+ nodes corresponding to the set of CPUs which have
+ a PMU of this type signalling the PPI listed in the
+ interrupts property, unless this is already specified
+ by the PPI interrupt specifier itself (in which case
+ the interrupt-affinity property shouldn't be present).
+
+ This property should be present when there is more than
+ a single SPI.
+
+ qcom,no-pc-write:
+ type: boolean
+ description:
+ Indicates that this PMU doesn't support the 0xc and 0xd events.
+
+ secure-reg-access:
+ type: boolean
+ description:
+ Indicates that the ARMv7 Secure Debug Enable Register
+ (SDER) is accessible. This will cause the driver to do
+ any setup required that is only possible in ARMv7 secure
+ state. If not present the ARMv7 SDER will not be touched,
+ which means the PMU may fail to operate unless external
+ code (bootloader or security monitor) has performed the
+ appropriate initialisation. Note that this property is
+ not valid for non-ARMv7 CPUs or ARMv7 CPUs booting Linux
+ in Non-secure state.
+
+required:
+ - compatible
+
+...
diff --git a/Documentation/devicetree/bindings/clock/actions,owl-cmu.txt b/Documentation/devicetree/bindings/clock/actions,owl-cmu.txt
index 2ef86ae96df8..d19885b7c73f 100644
--- a/Documentation/devicetree/bindings/clock/actions,owl-cmu.txt
+++ b/Documentation/devicetree/bindings/clock/actions,owl-cmu.txt
@@ -2,13 +2,14 @@
The Actions Semi Owl Clock Management Unit generates and supplies clock
to various controllers within the SoC. The clock binding described here is
-applicable to S900 and S700 SoC's.
+applicable to S900, S700 and S500 SoC's.
Required Properties:
- compatible: should be one of the following,
"actions,s900-cmu"
"actions,s700-cmu"
+ "actions,s500-cmu"
- reg: physical base address of the controller and length of memory mapped
region.
- clocks: Reference to the parent clocks ("hosc", "losc")
@@ -19,8 +20,8 @@ Each clock is assigned an identifier, and client nodes can use this identifier
to specify the clock which they consume.
All available clocks are defined as preprocessor macros in corresponding
-dt-bindings/clock/actions,s900-cmu.h or actions,s700-cmu.h header and can be
-used in device tree sources.
+dt-bindings/clock/actions,s900-cmu.h or actions,s700-cmu.h or
+actions,s500-cmu.h header and can be used in device tree sources.
External clocks:
diff --git a/Documentation/devicetree/bindings/clock/amlogic,gxbb-aoclkc.txt b/Documentation/devicetree/bindings/clock/amlogic,gxbb-aoclkc.txt
index 79511d7bb321..c41f0be5d438 100644
--- a/Documentation/devicetree/bindings/clock/amlogic,gxbb-aoclkc.txt
+++ b/Documentation/devicetree/bindings/clock/amlogic,gxbb-aoclkc.txt
@@ -10,6 +10,7 @@ Required Properties:
- GXL (S905X, S905D) : "amlogic,meson-gxl-aoclkc"
- GXM (S912) : "amlogic,meson-gxm-aoclkc"
- AXG (A113D, A113X) : "amlogic,meson-axg-aoclkc"
+ - G12A (S905X2, S905D2, S905Y2) : "amlogic,meson-g12a-aoclkc"
followed by the common "amlogic,meson-gx-aoclkc"
- clocks: list of clock phandle, one for each entry clock-names.
- clock-names: should contain the following:
diff --git a/Documentation/devicetree/bindings/clock/amlogic,gxbb-clkc.txt b/Documentation/devicetree/bindings/clock/amlogic,gxbb-clkc.txt
index a6871953bf04..5c8b105be4d6 100644
--- a/Documentation/devicetree/bindings/clock/amlogic,gxbb-clkc.txt
+++ b/Documentation/devicetree/bindings/clock/amlogic,gxbb-clkc.txt
@@ -9,6 +9,7 @@ Required Properties:
"amlogic,gxbb-clkc" for GXBB SoC,
"amlogic,gxl-clkc" for GXL and GXM SoC,
"amlogic,axg-clkc" for AXG SoC.
+ "amlogic,g12a-clkc" for G12A SoC.
- clocks : list of clock phandle, one for each entry clock-names.
- clock-names : should contain the following:
* "xtal": the platform xtal
diff --git a/Documentation/devicetree/bindings/clock/exynos5433-clock.txt b/Documentation/devicetree/bindings/clock/exynos5433-clock.txt
index 50d5897c9849..183c327a7d6b 100644
--- a/Documentation/devicetree/bindings/clock/exynos5433-clock.txt
+++ b/Documentation/devicetree/bindings/clock/exynos5433-clock.txt
@@ -50,6 +50,8 @@ Required Properties:
IPs.
- "samsung,exynos5433-cmu-cam1" - clock controller compatible for CMU_CAM1
which generates clocks for Cortex-A5/MIPI_CSIS2/FIMC-LITE_C/FIMC-FD IPs.
+ - "samsung,exynos5433-cmu-imem" - clock controller compatible for CMU_IMEM
+ which generates clocks for SSS (Security SubSystem) and SlimSSS IPs.
- reg: physical base address of the controller and length of memory mapped
region.
@@ -168,6 +170,12 @@ Required Properties:
- aclk_cam1_400
- aclk_cam1_552
+ Input clocks for imem clock controller:
+ - oscclk
+ - aclk_imem_sssx_266
+ - aclk_imem_266
+ - aclk_imem_200
+
Optional properties:
- power-domains: a phandle to respective power domain node as described by
generic PM domain bindings (see power/power_domain.txt for more
@@ -469,6 +477,21 @@ Example 2: Examples of clock controller nodes are listed below.
power-domains = <&pd_cam1>;
};
+ cmu_imem: clock-controller@11060000 {
+ compatible = "samsung,exynos5433-cmu-imem";
+ reg = <0x11060000 0x1000>;
+ #clock-cells = <1>;
+
+ clock-names = "oscclk",
+ "aclk_imem_sssx_266",
+ "aclk_imem_266",
+ "aclk_imem_200";
+ clocks = <&xxti>,
+ <&cmu_top CLK_DIV_ACLK_IMEM_SSSX_266>,
+ <&cmu_top CLK_DIV_ACLK_IMEM_266>,
+ <&cmu_top CLK_DIV_ACLK_IMEM_200>;
+ };
+
Example 3: UART controller node that consumes the clock generated by the clock
controller.
diff --git a/Documentation/devicetree/bindings/clock/fixed-clock.txt b/Documentation/devicetree/bindings/clock/fixed-clock.txt
deleted file mode 100644
index 0641a663ad69..000000000000
--- a/Documentation/devicetree/bindings/clock/fixed-clock.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-Binding for simple fixed-rate clock sources.
-
-This binding uses the common clock binding[1].
-
-[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-
-Required properties:
-- compatible : shall be "fixed-clock".
-- #clock-cells : from common clock binding; shall be set to 0.
-- clock-frequency : frequency of clock in Hz. Should be a single cell.
-
-Optional properties:
-- clock-accuracy : accuracy of clock in ppb (parts per billion).
- Should be a single cell.
-- clock-output-names : From common clock binding.
-
-Example:
- clock {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <1000000000>;
- clock-accuracy = <100>;
- };
diff --git a/Documentation/devicetree/bindings/clock/fixed-clock.yaml b/Documentation/devicetree/bindings/clock/fixed-clock.yaml
new file mode 100644
index 000000000000..b657ecd0ef1c
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/fixed-clock.yaml
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/fixed-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Binding for simple fixed-rate clock sources
+
+maintainers:
+ - Michael Turquette <mturquette@baylibre.com>
+ - Stephen Boyd <sboyd@kernel.org>
+
+properties:
+ compatible:
+ const: fixed-clock
+
+ "#clock-cells":
+ const: 0
+
+ clock-frequency: true
+
+ clock-accuracy:
+ description: accuracy of clock in ppb (parts per billion).
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ clock-output-names:
+ maxItems: 1
+
+required:
+ - compatible
+ - "#clock-cells"
+ - clock-frequency
+
+additionalProperties: false
+
+examples:
+ - |
+ clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <1000000000>;
+ clock-accuracy = <100>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/clock/fixed-factor-clock.txt b/Documentation/devicetree/bindings/clock/fixed-factor-clock.txt
deleted file mode 100644
index 189467a7188a..000000000000
--- a/Documentation/devicetree/bindings/clock/fixed-factor-clock.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-Binding for simple fixed factor rate clock sources.
-
-This binding uses the common clock binding[1].
-
-[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-
-Required properties:
-- compatible : shall be "fixed-factor-clock".
-- #clock-cells : from common clock binding; shall be set to 0.
-- clock-div: fixed divider.
-- clock-mult: fixed multiplier.
-- clocks: parent clock.
-
-Optional properties:
-- clock-output-names : From common clock binding.
-
-Some clocks that require special treatments are also handled by that
-driver, with the compatibles:
- - allwinner,sun4i-a10-pll3-2x-clk
-
-Example:
- clock {
- compatible = "fixed-factor-clock";
- clocks = <&parentclk>;
- #clock-cells = <0>;
- clock-div = <2>;
- clock-mult = <1>;
- };
diff --git a/Documentation/devicetree/bindings/clock/fixed-factor-clock.yaml b/Documentation/devicetree/bindings/clock/fixed-factor-clock.yaml
new file mode 100644
index 000000000000..b567f8092f8c
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/fixed-factor-clock.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/fixed-factor-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Binding for simple fixed factor rate clock sources
+
+maintainers:
+ - Michael Turquette <mturquette@baylibre.com>
+ - Stephen Boyd <sboyd@kernel.org>
+
+properties:
+ compatible:
+ enum:
+ - allwinner,sun4i-a10-pll3-2x-clk
+ - fixed-factor-clock
+
+ "#clock-cells":
+ const: 0
+
+ clocks:
+ maxItems: 1
+
+ clock-div:
+ description: Fixed divider
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - minimum: 1
+
+ clock-mult:
+ description: Fixed multiplier
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ clock-output-names:
+ maxItems: 1
+
+required:
+ - compatible
+ - clocks
+ - "#clock-cells"
+ - clock-div
+ - clock-mult
+
+additionalProperties: false
+
+examples:
+ - |
+ clock {
+ compatible = "fixed-factor-clock";
+ clocks = <&parentclk>;
+ #clock-cells = <0>;
+ clock-div = <2>;
+ clock-mult = <1>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/clock/fixed-mmio-clock.txt b/Documentation/devicetree/bindings/clock/fixed-mmio-clock.txt
new file mode 100644
index 000000000000..c359367fd1a9
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/fixed-mmio-clock.txt
@@ -0,0 +1,24 @@
+Binding for simple memory mapped io fixed-rate clock sources.
+The driver reads a clock frequency value from a single 32-bit memory mapped
+I/O register and registers it as a fixed rate clock.
+
+It was designed for test systems, like FPGA, not for complete, finished SoCs.
+
+This binding uses the common clock binding[1].
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+- compatible : shall be "fixed-mmio-clock".
+- #clock-cells : from common clock binding; shall be set to 0.
+- reg : Address and length of the clock value register set.
+
+Optional properties:
+- clock-output-names : From common clock binding.
+
+Example:
+sysclock: sysclock@fd020004 {
+ #clock-cells = <0>;
+ compatible = "fixed-mmio-clock";
+ reg = <0xfd020004 0x4>;
+};
diff --git a/Documentation/devicetree/bindings/clock/imx8mm-clock.txt b/Documentation/devicetree/bindings/clock/imx8mm-clock.txt
new file mode 100644
index 000000000000..8e4ab9e619a1
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/imx8mm-clock.txt
@@ -0,0 +1,29 @@
+* Clock bindings for NXP i.MX8M Mini
+
+Required properties:
+- compatible: Should be "fsl,imx8mm-ccm"
+- reg: Address and length of the register set
+- #clock-cells: Should be <1>
+- clocks: list of clock specifiers, must contain an entry for each required
+ entry in clock-names
+- clock-names: should include the following entries:
+ - "osc_32k"
+ - "osc_24m"
+ - "clk_ext1"
+ - "clk_ext2"
+ - "clk_ext3"
+ - "clk_ext4"
+
+clk: clock-controller@30380000 {
+ compatible = "fsl,imx8mm-ccm";
+ reg = <0x0 0x30380000 0x0 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&osc_32k>, <&osc_24m>, <&clk_ext1>, <&clk_ext2>,
+ <&clk_ext3>, <&clk_ext4>;
+ clock-names = "osc_32k", "osc_24m", "clk_ext1", "clk_ext2",
+ "clk_ext3", "clk_ext4";
+};
+
+The clock consumer should specify the desired clock by having the clock
+ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx8mm-clock.h
+for the full list of i.MX8M Mini clock IDs.
diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt
index 87b4949e9bc8..944719bd586f 100644
--- a/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt
@@ -16,6 +16,7 @@ Required properties :
"qcom,rpmcc-msm8974", "qcom,rpmcc"
"qcom,rpmcc-apq8064", "qcom,rpmcc"
"qcom,rpmcc-msm8996", "qcom,rpmcc"
+ "qcom,rpmcc-msm8998", "qcom,rpmcc"
"qcom,rpmcc-qcs404", "qcom,rpmcc"
- #clock-cells : shall contain 1
diff --git a/Documentation/devicetree/bindings/display/sitronix,st7735r.txt b/Documentation/devicetree/bindings/display/sitronix,st7735r.txt
index f0a5090a3326..cd5c7186890a 100644
--- a/Documentation/devicetree/bindings/display/sitronix,st7735r.txt
+++ b/Documentation/devicetree/bindings/display/sitronix,st7735r.txt
@@ -20,7 +20,7 @@ Example:
backlight: backlight {
compatible = "gpio-backlight";
gpios = <&gpio 44 GPIO_ACTIVE_HIGH>;
- }
+ };
...
diff --git a/Documentation/devicetree/bindings/display/ssd1307fb.txt b/Documentation/devicetree/bindings/display/ssd1307fb.txt
index 209d931ef16c..b67f8caa212c 100644
--- a/Documentation/devicetree/bindings/display/ssd1307fb.txt
+++ b/Documentation/devicetree/bindings/display/ssd1307fb.txt
@@ -36,7 +36,6 @@ ssd1307: oled@3c {
reg = <0x3c>;
pwms = <&pwm 4 3000>;
reset-gpios = <&gpio2 7>;
- reset-active-low;
};
ssd1306: oled@3c {
@@ -44,7 +43,6 @@ ssd1306: oled@3c {
reg = <0x3c>;
pwms = <&pwm 4 3000>;
reset-gpios = <&gpio2 7>;
- reset-active-low;
solomon,com-lrremap;
solomon,com-invdir;
solomon,com-offset = <32>;
diff --git a/Documentation/devicetree/bindings/dma/dma.txt b/Documentation/devicetree/bindings/dma/dma.txt
index 6312fb00ce8d..eeb4e4d1771e 100644
--- a/Documentation/devicetree/bindings/dma/dma.txt
+++ b/Documentation/devicetree/bindings/dma/dma.txt
@@ -16,6 +16,9 @@ Optional properties:
- dma-channels: Number of DMA channels supported by the controller.
- dma-requests: Number of DMA request signals supported by the
controller.
+- dma-channel-mask: Bitmask of available DMA channels in ascending order
+ that are not reserved by firmware and are available to
+ the kernel. i.e. first channel corresponds to LSB.
Example:
@@ -29,6 +32,7 @@ Example:
#dma-cells = <1>;
dma-channels = <32>;
dma-requests = <127>;
+ dma-channel-mask = <0xfffe>
};
* DMA router
diff --git a/Documentation/devicetree/bindings/dma/fsl-qdma.txt b/Documentation/devicetree/bindings/dma/fsl-qdma.txt
new file mode 100644
index 000000000000..6a0ff9059e72
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/fsl-qdma.txt
@@ -0,0 +1,57 @@
+NXP Layerscape SoC qDMA Controller
+==================================
+
+This device follows the generic DMA bindings defined in dma/dma.txt.
+
+Required properties:
+
+- compatible: Must be one of
+ "fsl,ls1021a-qdma": for LS1021A Board
+ "fsl,ls1043a-qdma": for ls1043A Board
+ "fsl,ls1046a-qdma": for ls1046A Board
+- reg: Should contain the register's base address and length.
+- interrupts: Should contain a reference to the interrupt used by this
+ device.
+- interrupt-names: Should contain interrupt names:
+ "qdma-queue0": the block0 interrupt
+ "qdma-queue1": the block1 interrupt
+ "qdma-queue2": the block2 interrupt
+ "qdma-queue3": the block3 interrupt
+ "qdma-error": the error interrupt
+- fsl,dma-queues: Should contain number of queues supported.
+- dma-channels: Number of DMA channels supported
+- block-number: the virtual block number
+- block-offset: the offset of different virtual block
+- status-sizes: status queue size of per virtual block
+- queue-sizes: command queue size of per virtual block, the size number
+ based on queues
+
+Optional properties:
+
+- dma-channels: Number of DMA channels supported by the controller.
+- big-endian: If present registers and hardware scatter/gather descriptors
+ of the qDMA are implemented in big endian mode, otherwise in little
+ mode.
+
+Examples:
+
+ qdma: dma-controller@8390000 {
+ compatible = "fsl,ls1021a-qdma";
+ reg = <0x0 0x8388000 0x0 0x1000>, /* Controller regs */
+ <0x0 0x8389000 0x0 0x1000>, /* Status regs */
+ <0x0 0x838a000 0x0 0x2000>; /* Block regs */
+ interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "qdma-error",
+ "qdma-queue0", "qdma-queue1";
+ dma-channels = <8>;
+ block-number = <2>;
+ block-offset = <0x1000>;
+ fsl,dma-queues = <2>;
+ status-sizes = <64>;
+ queue-sizes = <64 64>;
+ big-endian;
+ };
+
+DMA clients must use the format described in dma/dma.txt file.
diff --git a/Documentation/devicetree/bindings/dma/k3dma.txt b/Documentation/devicetree/bindings/dma/k3dma.txt
index 4945aeac4dc4..10a2f15b08a3 100644
--- a/Documentation/devicetree/bindings/dma/k3dma.txt
+++ b/Documentation/devicetree/bindings/dma/k3dma.txt
@@ -3,7 +3,9 @@
See dma.txt first
Required properties:
-- compatible: Should be "hisilicon,k3-dma-1.0"
+- compatible: Must be one of
+- "hisilicon,k3-dma-1.0"
+- "hisilicon,hisi-pcm-asp-dma-1.0"
- reg: Should contain DMA registers location and length.
- interrupts: Should contain one interrupt shared by all channel
- #dma-cells: see dma.txt, should be 1, para number
diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt
index db757df7057d..0bedceed1963 100644
--- a/Documentation/devicetree/bindings/dma/snps-dma.txt
+++ b/Documentation/devicetree/bindings/dma/snps-dma.txt
@@ -23,8 +23,6 @@ Deprecated properties:
Optional properties:
-- is_private: The device channels should be marked as private and not for by the
- general purpose DMA channel allocator. False if not passed.
- multi-block: Multi block transfers supported by hardware. Array property with
one cell per channel. 0: not supported, 1 (default): supported.
- snps,dma-protection-control: AHB HPROT[3:1] protection setting.
diff --git a/Documentation/devicetree/bindings/dma/sprd-dma.txt b/Documentation/devicetree/bindings/dma/sprd-dma.txt
index 7a10fea2e51b..adccea9941f1 100644
--- a/Documentation/devicetree/bindings/dma/sprd-dma.txt
+++ b/Documentation/devicetree/bindings/dma/sprd-dma.txt
@@ -31,7 +31,7 @@ DMA clients connected to the Spreadtrum DMA controller must use the format
described in the dma.txt file, using a two-cell specifier for each channel.
The two cells in order are:
1. A phandle pointing to the DMA controller.
-2. The channel id.
+2. The slave id.
spi0: spi@70a00000{
...
diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
index 174af2c45e77..93b6d961dd4f 100644
--- a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
+++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
@@ -37,10 +37,11 @@ Required properties:
Required properties for VDMA:
- xlnx,num-fstores: Should be the number of framebuffers as configured in h/w.
-Optional properties:
-- xlnx,include-sg: Tells configured for Scatter-mode in
- the hardware.
Optional properties for AXI DMA:
+- xlnx,sg-length-width: Should be set to the width in bits of the length
+ register as configured in h/w. Takes values {8...26}. If the property
+ is missing or invalid then the default value 23 is used. This is the
+ maximum value that is supported by all IP versions.
- xlnx,mcdma: Tells whether configured for multi-channel mode in the hardware.
Optional properties for VDMA:
- xlnx,flush-fsync: Tells which channel to Flush on Frame sync.
diff --git a/Documentation/devicetree/bindings/edac/socfpga-eccmgr.txt b/Documentation/devicetree/bindings/edac/socfpga-eccmgr.txt
index 5626560a6cfd..8f52206cfd2a 100644
--- a/Documentation/devicetree/bindings/edac/socfpga-eccmgr.txt
+++ b/Documentation/devicetree/bindings/edac/socfpga-eccmgr.txt
@@ -232,37 +232,152 @@ Example:
};
};
-Stratix10 SoCFPGA ECC Manager
+Stratix10 SoCFPGA ECC Manager (ARM64)
The Stratix10 SoC ECC Manager handles the IRQs for each peripheral
-in a shared register similar to the Arria10. However, ECC requires
-access to registers that can only be read from Secure Monitor with
-SMC calls. Therefore the device tree is slightly different.
+in a shared register similar to the Arria10. However, Stratix10 ECC
+requires access to registers that can only be read from Secure Monitor
+with SMC calls. Therefore the device tree is slightly different. Note
+that only 1 interrupt is sent in Stratix10 because the double bit errors
+are treated as SErrors in ARM64 instead of IRQs in ARM32.
Required Properties:
- compatible : Should be "altr,socfpga-s10-ecc-manager"
-- interrupts : Should be single bit error interrupt, then double bit error
- interrupt.
+- altr,sysgr-syscon : phandle to Stratix10 System Manager Block
+ containing the ECC manager registers.
+- interrupts : Should be single bit error interrupt.
- interrupt-controller : boolean indicator that ECC Manager is an interrupt controller
- #interrupt-cells : must be set to 2.
+- #address-cells: must be 1
+- #size-cells: must be 1
+- ranges : standard definition, should translate from local addresses
Subcomponents:
SDRAM ECC
Required Properties:
- compatible : Should be "altr,sdram-edac-s10"
-- interrupts : Should be single bit error interrupt, then double bit error
- interrupt, in this order.
+- interrupts : Should be single bit error interrupt.
+
+On-Chip RAM ECC
+Required Properties:
+- compatible : Should be "altr,socfpga-s10-ocram-ecc"
+- reg : Address and size for ECC block registers.
+- altr,ecc-parent : phandle to parent OCRAM node.
+- interrupts : Should be single bit error interrupt.
+
+Ethernet FIFO ECC
+Required Properties:
+- compatible : Should be "altr,socfpga-s10-eth-mac-ecc"
+- reg : Address and size for ECC block registers.
+- altr,ecc-parent : phandle to parent Ethernet node.
+- interrupts : Should be single bit error interrupt.
+
+NAND FIFO ECC
+Required Properties:
+- compatible : Should be "altr,socfpga-s10-nand-ecc"
+- reg : Address and size for ECC block registers.
+- altr,ecc-parent : phandle to parent NAND node.
+- interrupts : Should be single bit error interrupt.
+
+DMA FIFO ECC
+Required Properties:
+- compatible : Should be "altr,socfpga-s10-dma-ecc"
+- reg : Address and size for ECC block registers.
+- altr,ecc-parent : phandle to parent DMA node.
+- interrupts : Should be single bit error interrupt.
+
+USB FIFO ECC
+Required Properties:
+- compatible : Should be "altr,socfpga-s10-usb-ecc"
+- reg : Address and size for ECC block registers.
+- altr,ecc-parent : phandle to parent USB node.
+- interrupts : Should be single bit error interrupt.
+
+SDMMC FIFO ECC
+Required Properties:
+- compatible : Should be "altr,socfpga-s10-sdmmc-ecc"
+- reg : Address and size for ECC block registers.
+- altr,ecc-parent : phandle to parent SD/MMC node.
+- interrupts : Should be single bit error interrupt for port A
+ and then single bit error interrupt for port B.
Example:
eccmgr {
compatible = "altr,socfpga-s10-ecc-manager";
- interrupts = <0 15 4>, <0 95 4>;
+ altr,sysmgr-syscon = <&sysmgr>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupts = <0 15 4>;
interrupt-controller;
#interrupt-cells = <2>;
+ ranges;
sdramedac {
compatible = "altr,sdram-edac-s10";
- interrupts = <16 4>, <48 4>;
+ interrupts = <16 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ ocram-ecc@ff8cc000 {
+ compatible = "altr,socfpga-s10-ocram-ecc";
+ reg = <ff8cc000 0x100>;
+ altr,ecc-parent = <&ocram>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ emac0-rx-ecc@ff8c0000 {
+ compatible = "altr,socfpga-s10-eth-mac-ecc";
+ reg = <0xff8c0000 0x100>;
+ altr,ecc-parent = <&gmac0>;
+ interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ emac0-tx-ecc@ff8c0400 {
+ compatible = "altr,socfpga-s10-eth-mac-ecc";
+ reg = <0xff8c0400 0x100>;
+ altr,ecc-parent = <&gmac0>;
+ interrupts = <5 IRQ_TYPE_LEVEL_HIGH>'
+ };
+
+ nand-buf-ecc@ff8c8000 {
+ compatible = "altr,socfpga-s10-nand-ecc";
+ reg = <0xff8c8000 0x100>;
+ altr,ecc-parent = <&nand>;
+ interrupts = <11 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ nand-rd-ecc@ff8c8400 {
+ compatible = "altr,socfpga-s10-nand-ecc";
+ reg = <0xff8c8400 0x100>;
+ altr,ecc-parent = <&nand>;
+ interrupts = <13 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ nand-wr-ecc@ff8c8800 {
+ compatible = "altr,socfpga-s10-nand-ecc";
+ reg = <0xff8c8800 0x100>;
+ altr,ecc-parent = <&nand>;
+ interrupts = <12 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ dma-ecc@ff8c9000 {
+ compatible = "altr,socfpga-s10-dma-ecc";
+ reg = <0xff8c9000 0x100>;
+ altr,ecc-parent = <&pdma>;
+ interrupts = <10 IRQ_TYPE_LEVEL_HIGH>;
+
+ usb0-ecc@ff8c4000 {
+ compatible = "altr,socfpga-s10-usb-ecc";
+ reg = <0xff8c4000 0x100>;
+ altr,ecc-parent = <&usb0>;
+ interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ sdmmc-ecc@ff8c8c00 {
+ compatible = "altr,socfpga-s10-sdmmc-ecc";
+ reg = <0xff8c8c00 0x100>;
+ altr,ecc-parent = <&mmc>;
+ interrupts = <14 IRQ_TYPE_LEVEL_HIGH>,
+ <15 IRQ_TYPE_LEVEL_HIGH>;
};
};
diff --git a/Documentation/devicetree/bindings/hwmon/adc128d818.txt b/Documentation/devicetree/bindings/hwmon/adc128d818.txt
index 08bab0e94d25..d0ae46d7bac3 100644
--- a/Documentation/devicetree/bindings/hwmon/adc128d818.txt
+++ b/Documentation/devicetree/bindings/hwmon/adc128d818.txt
@@ -26,7 +26,7 @@ Required node properties:
Optional node properties:
- - ti,mode: Operation mode (see above).
+ - ti,mode: Operation mode (u8) (see above).
Example (operation mode 2):
@@ -34,5 +34,5 @@ Example (operation mode 2):
adc128d818@1d {
compatible = "ti,adc128d818";
reg = <0x1d>;
- ti,mode = <2>;
+ ti,mode = /bits/ 8 <2>;
};
diff --git a/Documentation/devicetree/bindings/hwmon/cirrus,lochnagar.txt b/Documentation/devicetree/bindings/hwmon/cirrus,lochnagar.txt
new file mode 100644
index 000000000000..ffb79ccf51ee
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/cirrus,lochnagar.txt
@@ -0,0 +1,26 @@
+Cirrus Logic Lochnagar Audio Development Board
+
+Lochnagar is an evaluation and development board for Cirrus Logic
+Smart CODEC and Amp devices. It allows the connection of most Cirrus
+Logic devices on mini-cards, as well as allowing connection of
+various application processor systems to provide a full evaluation
+platform. Audio system topology, clocking and power can all be
+controlled through the Lochnagar, allowing the device under test
+to be used in a variety of possible use cases.
+
+This binding document describes the binding for the hardware monitor
+portion of the driver.
+
+This binding must be part of the Lochnagar MFD binding:
+ [4] ../mfd/cirrus,lochnagar.txt
+
+Required properties:
+
+ - compatible : One of the following strings:
+ "cirrus,lochnagar2-hwmon"
+
+Example:
+
+lochnagar-hwmon {
+ compatible = "cirrus,lochnagar2-hwmon";
+};
diff --git a/Documentation/devicetree/bindings/hwmon/g762.txt b/Documentation/devicetree/bindings/hwmon/g762.txt
index 25cc6d8ee575..6d154c4923de 100644
--- a/Documentation/devicetree/bindings/hwmon/g762.txt
+++ b/Documentation/devicetree/bindings/hwmon/g762.txt
@@ -21,7 +21,7 @@ If an optional property is not set in .dts file, then current value is kept
unmodified (e.g. u-boot installed value).
Additional information on operational parameters for the device is available
-in Documentation/hwmon/g762. A detailed datasheet for the device is available
+in Documentation/hwmon/g762.rst. A detailed datasheet for the device is available
at http://natisbad.org/NAS/refs/GMT_EDS-762_763-080710-0.2.pdf.
Example g762 node:
diff --git a/Documentation/devicetree/bindings/hwmon/lm75.txt b/Documentation/devicetree/bindings/hwmon/lm75.txt
index 12d8cf7cf592..586b5ed70be7 100644
--- a/Documentation/devicetree/bindings/hwmon/lm75.txt
+++ b/Documentation/devicetree/bindings/hwmon/lm75.txt
@@ -25,6 +25,7 @@ Required properties:
"ti,tmp175",
"ti,tmp275",
"ti,tmp75",
+ "ti,tmp75b",
"ti,tmp75c",
- reg: I2C bus address of the device
diff --git a/Documentation/devicetree/bindings/hwmon/pwm-fan.txt b/Documentation/devicetree/bindings/hwmon/pwm-fan.txt
index 49ca5d83ed13..6ced829b0e58 100644
--- a/Documentation/devicetree/bindings/hwmon/pwm-fan.txt
+++ b/Documentation/devicetree/bindings/hwmon/pwm-fan.txt
@@ -7,7 +7,16 @@ Required properties:
which correspond to thermal cooling states
Optional properties:
-- fan-supply : phandle to the regulator that provides power to the fan
+- fan-supply : phandle to the regulator that provides power to the fan
+- interrupts : This contains a single interrupt specifier which
+ describes the tachometer output of the fan as an
+ interrupt source. The output signal must generate a
+ defined number of interrupts per fan revolution, which
+ require that it must be self resetting edge interrupts.
+ See interrupt-controller/interrupts.txt for the format.
+- pulses-per-revolution : define the tachometer pulses per fan revolution as
+ an integer (default is 2 interrupts per revolution).
+ The value must be greater than zero.
Example:
fan0: pwm-fan {
@@ -38,3 +47,13 @@ Example:
};
};
};
+
+Example 2:
+ fan0: pwm-fan {
+ compatible = "pwm-fan";
+ pwms = <&pwm 0 40000 0>;
+ fan-supply = <&reg_fan>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
+ pulses-per-revolution = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-xscale.txt b/Documentation/devicetree/bindings/i2c/i2c-iop3xx.txt
index dcc8390e0d24..dcc8390e0d24 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-xscale.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-iop3xx.txt
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mtk.txt b/Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt
index ee4c32454198..ee4c32454198 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mtk.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt
diff --git a/Documentation/devicetree/bindings/i2c/i2c-st-ddci2c.txt b/Documentation/devicetree/bindings/i2c/i2c-stu300.txt
index bd81a482634f..bd81a482634f 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-st-ddci2c.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-stu300.txt
diff --git a/Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt b/Documentation/devicetree/bindings/i2c/i2c-sun6i-p2wi.txt
index 49df0053347a..49df0053347a 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-sun6i-p2wi.txt
diff --git a/Documentation/devicetree/bindings/i2c/i2c-vt8500.txt b/Documentation/devicetree/bindings/i2c/i2c-wmt.txt
index 94a425eaa6c7..94a425eaa6c7 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-vt8500.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-wmt.txt
diff --git a/Documentation/devicetree/bindings/input/cypress,tm2-touchkey.txt b/Documentation/devicetree/bindings/input/cypress,tm2-touchkey.txt
index 0c252d9306da..ef2ae729718f 100644
--- a/Documentation/devicetree/bindings/input/cypress,tm2-touchkey.txt
+++ b/Documentation/devicetree/bindings/input/cypress,tm2-touchkey.txt
@@ -1,13 +1,19 @@
Samsung tm2-touchkey
Required properties:
-- compatible: must be "cypress,tm2-touchkey"
+- compatible:
+ * "cypress,tm2-touchkey" - for the touchkey found on the tm2 board
+ * "cypress,midas-touchkey" - for the touchkey found on midas boards
+ * "cypress,aries-touchkey" - for the touchkey found on aries boards
- reg: I2C address of the chip.
- interrupts: interrupt to which the chip is connected (see interrupt
binding[0]).
- vcc-supply : internal regulator output. 1.8V
- vdd-supply : power supply for IC 3.3V
+Optional properties:
+- linux,keycodes: array of keycodes (max 4), default KEY_PHONE and KEY_BACK
+
[0]: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
Example:
@@ -21,5 +27,6 @@ Example:
interrupts = <2 IRQ_TYPE_EDGE_FALLING>;
vcc-supply=<&ldo32_reg>;
vdd-supply=<&ldo33_reg>;
+ linux,keycodes = <KEY_PHONE KEY_BACK>;
};
};
diff --git a/Documentation/devicetree/bindings/input/ilitek,ili2xxx.txt b/Documentation/devicetree/bindings/input/ilitek,ili2xxx.txt
new file mode 100644
index 000000000000..b2a76301e632
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/ilitek,ili2xxx.txt
@@ -0,0 +1,25 @@
+Ilitek ILI210x/ILI251x touchscreen controller
+
+Required properties:
+- compatible:
+ ilitek,ili210x for ILI210x
+ ilitek,ili251x for ILI251x
+
+- reg: The I2C address of the device
+
+- interrupts: The sink for the touchscreen's IRQ output
+ See ../interrupt-controller/interrupts.txt
+
+Optional properties for main touchpad device:
+
+- reset-gpios: GPIO specifier for the touchscreen's reset pin (active low)
+
+Example:
+
+ touchscreen@41 {
+ compatible = "ilitek,ili251x";
+ reg = <0x41>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
+ reset-gpios = <&gpio5 21 GPIO_ACTIVE_LOW>;
+ };
diff --git a/Documentation/devicetree/bindings/input/msm-vibrator.txt b/Documentation/devicetree/bindings/input/msm-vibrator.txt
new file mode 100644
index 000000000000..8dcf014ef2e5
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/msm-vibrator.txt
@@ -0,0 +1,36 @@
+* Device tree bindings for the Qualcomm MSM vibrator
+
+Required properties:
+
+ - compatible: Should be one of
+ "qcom,msm8226-vibrator"
+ "qcom,msm8974-vibrator"
+ - reg: the base address and length of the IO memory for the registers.
+ - pinctrl-names: set to default.
+ - pinctrl-0: phandles pointing to pin configuration nodes. See
+ Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+ - clock-names: set to pwm
+ - clocks: phandle of the clock. See
+ Documentation/devicetree/bindings/clock/clock-bindings.txt
+ - enable-gpios: GPIO that enables the vibrator.
+
+Optional properties:
+
+ - vcc-supply: phandle to the regulator that provides power to the sensor.
+
+Example from a LG Nexus 5 (hammerhead) phone:
+
+vibrator@fd8c3450 {
+ reg = <0xfd8c3450 0x400>;
+ compatible = "qcom,msm8974-vibrator";
+
+ vcc-supply = <&pm8941_l19>;
+
+ clocks = <&mmcc CAMSS_GP1_CLK>;
+ clock-names = "pwm";
+
+ enable-gpios = <&msmgpio 60 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&vibrator_pin>;
+};
diff --git a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt
index da2dc5d6c98b..870b8c5cce9b 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt
@@ -1,11 +1,12 @@
FocalTech EDT-FT5x06 Polytouch driver
=====================================
-There are 3 variants of the chip for various touch panel sizes
+There are 5 variants of the chip for various touch panel sizes
FT5206GE1 2.8" .. 3.8"
FT5306DE4 4.3" .. 7"
FT5406EE8 7" .. 8.9"
FT5506EEG 7" .. 8.9"
+FT5726NEI 5.7†.. 11.6"
The software interface is identical for all those chips, so that
currently there is no need for the driver to distinguish between the
@@ -19,6 +20,7 @@ Required properties:
or: "edt,edt-ft5306"
or: "edt,edt-ft5406"
or: "edt,edt-ft5506"
+ or: "evervision,ev-ft5726"
or: "focaltech,ft6236"
- reg: I2C slave address of the chip (0x38)
@@ -42,6 +44,15 @@ Optional properties:
- offset: allows setting the edge compensation in the range from
0 to 31.
+
+ - offset-x: Same as offset, but applies only to the horizontal position.
+ Range from 0 to 80, only supported by evervision,ev-ft5726
+ devices.
+
+ - offset-y: Same as offset, but applies only to the vertical position.
+ Range from 0 to 80, only supported by evervision,ev-ft5726
+ devices.
+
- touchscreen-size-x : See touchscreen.txt
- touchscreen-size-y : See touchscreen.txt
- touchscreen-fuzz-x : See touchscreen.txt
diff --git a/Documentation/devicetree/bindings/input/touchscreen/goodix.txt b/Documentation/devicetree/bindings/input/touchscreen/goodix.txt
index f7e95c52f3c7..8cf0b4d38a7e 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/goodix.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/goodix.txt
@@ -3,6 +3,7 @@ Device tree bindings for Goodix GT9xx series touchscreen controller
Required properties:
- compatible : Should be "goodix,gt1151"
+ or "goodix,gt5688"
or "goodix,gt911"
or "goodix,gt9110"
or "goodix,gt912"
@@ -18,11 +19,14 @@ Optional properties:
- irq-gpios : GPIO pin used for IRQ. The driver uses the
interrupt gpio pin as output to reset the device.
- reset-gpios : GPIO pin used for reset
-
- - touchscreen-inverted-x : X axis is inverted (boolean)
- - touchscreen-inverted-y : Y axis is inverted (boolean)
- - touchscreen-swapped-x-y : X and Y axis are swapped (boolean)
- (swapping is done after inverting the axis)
+ - touchscreen-inverted-x
+ - touchscreen-inverted-y
+ - touchscreen-size-x
+ - touchscreen-size-y
+ - touchscreen-swapped-x-y
+
+The touchscreen-* properties are documented in touchscreen.txt in this
+directory.
Example:
diff --git a/Documentation/devicetree/bindings/input/touchscreen/sitronix-st1232.txt b/Documentation/devicetree/bindings/input/touchscreen/sitronix-st1232.txt
index 64ad48b824a2..019373253b28 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/sitronix-st1232.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/sitronix-st1232.txt
@@ -1,13 +1,17 @@
-* Sitronix st1232 touchscreen controller
+* Sitronix st1232 or st1633 touchscreen controller
Required properties:
-- compatible: must be "sitronix,st1232"
+- compatible: must contain one of
+ * "sitronix,st1232"
+ * "sitronix,st1633"
- reg: I2C address of the chip
- interrupts: interrupt to which the chip is connected
Optional properties:
- gpios: a phandle to the reset GPIO
+For additional optional properties see: touchscreen.txt
+
Example:
i2c@00000000 {
diff --git a/Documentation/devicetree/bindings/input/touchscreen/sx8654.txt b/Documentation/devicetree/bindings/input/touchscreen/sx8654.txt
index 4886c4aa2906..0ebe6dd043c7 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/sx8654.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/sx8654.txt
@@ -1,10 +1,17 @@
* Semtech SX8654 I2C Touchscreen Controller
Required properties:
-- compatible: must be "semtech,sx8654"
+- compatible: must be one of the following, depending on the model:
+ "semtech,sx8650"
+ "semtech,sx8654"
+ "semtech,sx8655"
+ "semtech,sx8656"
- reg: i2c slave address
- interrupts: touch controller interrupt
+Optional properties:
+ - reset-gpios: GPIO specification for the NRST input
+
Example:
sx8654@48 {
@@ -12,4 +19,5 @@ Example:
reg = <0x48>;
interrupt-parent = <&gpio6>;
interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
+ reset-gpios = <&gpio4 2 GPIO_ACTIVE_LOW>;
};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
deleted file mode 100644
index a3be5298a5eb..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
+++ /dev/null
@@ -1,175 +0,0 @@
-* ARM Generic Interrupt Controller, version 3
-
-AArch64 SMP cores are often associated with a GICv3, providing Private
-Peripheral Interrupts (PPI), Shared Peripheral Interrupts (SPI),
-Software Generated Interrupts (SGI), and Locality-specific Peripheral
-Interrupts (LPI).
-
-Main node required properties:
-
-- compatible : should at least contain "arm,gic-v3" or either
- "qcom,msm8996-gic-v3", "arm,gic-v3" for msm8996 SoCs
- to address SoC specific bugs/quirks
-- interrupt-controller : Identifies the node as an interrupt controller
-- #interrupt-cells : Specifies the number of cells needed to encode an
- interrupt source. Must be a single cell with a value of at least 3.
- If the system requires describing PPI affinity, then the value must
- be at least 4.
-
- The 1st cell is the interrupt type; 0 for SPI interrupts, 1 for PPI
- interrupts. Other values are reserved for future use.
-
- The 2nd cell contains the interrupt number for the interrupt type.
- SPI interrupts are in the range [0-987]. PPI interrupts are in the
- range [0-15].
-
- The 3rd cell is the flags, encoded as follows:
- bits[3:0] trigger type and level flags.
- 1 = edge triggered
- 4 = level triggered
-
- The 4th cell is a phandle to a node describing a set of CPUs this
- interrupt is affine to. The interrupt must be a PPI, and the node
- pointed must be a subnode of the "ppi-partitions" subnode. For
- interrupt types other than PPI or PPIs that are not partitionned,
- this cell must be zero. See the "ppi-partitions" node description
- below.
-
- Cells 5 and beyond are reserved for future use and must have a value
- of 0 if present.
-
-- reg : Specifies base physical address(s) and size of the GIC
- registers, in the following order:
- - GIC Distributor interface (GICD)
- - GIC Redistributors (GICR), one range per redistributor region
- - GIC CPU interface (GICC)
- - GIC Hypervisor interface (GICH)
- - GIC Virtual CPU interface (GICV)
-
- GICC, GICH and GICV are optional.
-
-- interrupts : Interrupt source of the VGIC maintenance interrupt.
-
-Optional
-
-- redistributor-stride : If using padding pages, specifies the stride
- of consecutive redistributors. Must be a multiple of 64kB.
-
-- #redistributor-regions: The number of independent contiguous regions
- occupied by the redistributors. Required if more than one such
- region is present.
-
-- msi-controller: Boolean property. Identifies the node as an MSI
- controller. Only present if the Message Based Interrupt
- functionnality is being exposed by the HW, and the mbi-ranges
- property present.
-
-- mbi-ranges: A list of pairs <intid span>, where "intid" is the first
- SPI of a range that can be used an MBI, and "span" the size of that
- range. Multiple ranges can be provided. Requires "msi-controller" to
- be set.
-
-- mbi-alias: Address property. Base address of an alias of the GICD
- region containing only the {SET,CLR}SPI registers to be used if
- isolation is required, and if supported by the HW.
-
-Sub-nodes:
-
-PPI affinity can be expressed as a single "ppi-partitions" node,
-containing a set of sub-nodes, each with the following property:
-- affinity: Should be a list of phandles to CPU nodes (as described in
- Documentation/devicetree/bindings/arm/cpus.yaml).
-
-GICv3 has one or more Interrupt Translation Services (ITS) that are
-used to route Message Signalled Interrupts (MSI) to the CPUs.
-
-These nodes must have the following properties:
-- compatible : Should at least contain "arm,gic-v3-its".
-- msi-controller : Boolean property. Identifies the node as an MSI controller
-- #msi-cells: Must be <1>. The single msi-cell is the DeviceID of the device
- which will generate the MSI.
-- reg: Specifies the base physical address and size of the ITS
- registers.
-
-Optional:
-- socionext,synquacer-pre-its: (u32, u32) tuple describing the untranslated
- address and size of the pre-ITS window.
-
-The main GIC node must contain the appropriate #address-cells,
-#size-cells and ranges properties for the reg property of all ITS
-nodes.
-
-Examples:
-
- gic: interrupt-controller@2cf00000 {
- compatible = "arm,gic-v3";
- #interrupt-cells = <3>;
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
- interrupt-controller;
- reg = <0x0 0x2f000000 0 0x10000>, // GICD
- <0x0 0x2f100000 0 0x200000>, // GICR
- <0x0 0x2c000000 0 0x2000>, // GICC
- <0x0 0x2c010000 0 0x2000>, // GICH
- <0x0 0x2c020000 0 0x2000>; // GICV
- interrupts = <1 9 4>;
-
- msi-controller;
- mbi-ranges = <256 128>;
-
- gic-its@2c200000 {
- compatible = "arm,gic-v3-its";
- msi-controller;
- #msi-cells = <1>;
- reg = <0x0 0x2c200000 0 0x20000>;
- };
- };
-
- gic: interrupt-controller@2c010000 {
- compatible = "arm,gic-v3";
- #interrupt-cells = <4>;
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
- interrupt-controller;
- redistributor-stride = <0x0 0x40000>; // 256kB stride
- #redistributor-regions = <2>;
- reg = <0x0 0x2c010000 0 0x10000>, // GICD
- <0x0 0x2d000000 0 0x800000>, // GICR 1: CPUs 0-31
- <0x0 0x2e000000 0 0x800000>; // GICR 2: CPUs 32-63
- <0x0 0x2c040000 0 0x2000>, // GICC
- <0x0 0x2c060000 0 0x2000>, // GICH
- <0x0 0x2c080000 0 0x2000>; // GICV
- interrupts = <1 9 4>;
-
- gic-its@2c200000 {
- compatible = "arm,gic-v3-its";
- msi-controller;
- #msi-cells = <1>;
- reg = <0x0 0x2c200000 0 0x20000>;
- };
-
- gic-its@2c400000 {
- compatible = "arm,gic-v3-its";
- msi-controller;
- #msi-cells = <1>;
- reg = <0x0 0x2c400000 0 0x20000>;
- };
-
- ppi-partitions {
- part0: interrupt-partition-0 {
- affinity = <&cpu0 &cpu2>;
- };
-
- part1: interrupt-partition-1 {
- affinity = <&cpu1 &cpu3>;
- };
- };
- };
-
-
- device@0 {
- reg = <0 0 0 4>;
- interrupts = <1 1 4 &part0>;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml
new file mode 100644
index 000000000000..c34df35a25fc
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml
@@ -0,0 +1,279 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/arm,gic-v3.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM Generic Interrupt Controller, version 3
+
+maintainers:
+ - Marc Zyngier <marc.zyngier@arm.com>
+
+description: |
+ AArch64 SMP cores are often associated with a GICv3, providing Private
+ Peripheral Interrupts (PPI), Shared Peripheral Interrupts (SPI),
+ Software Generated Interrupts (SGI), and Locality-specific Peripheral
+ Interrupts (LPI).
+
+allOf:
+ - $ref: /schemas/interrupt-controller.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - qcom,msm8996-gic-v3
+ - const: arm,gic-v3
+ - const: arm,gic-v3
+
+ interrupt-controller: true
+
+ "#address-cells":
+ enum: [ 0, 1, 2 ]
+ "#size-cells":
+ enum: [ 1, 2 ]
+
+ ranges: true
+
+ "#interrupt-cells":
+ description: |
+ Specifies the number of cells needed to encode an interrupt source.
+ Must be a single cell with a value of at least 3.
+ If the system requires describing PPI affinity, then the value must
+ be at least 4.
+
+ The 1st cell is the interrupt type; 0 for SPI interrupts, 1 for PPI
+ interrupts. Other values are reserved for future use.
+
+ The 2nd cell contains the interrupt number for the interrupt type.
+ SPI interrupts are in the range [0-987]. PPI interrupts are in the
+ range [0-15].
+
+ The 3rd cell is the flags, encoded as follows:
+ bits[3:0] trigger type and level flags.
+ 1 = edge triggered
+ 4 = level triggered
+
+ The 4th cell is a phandle to a node describing a set of CPUs this
+ interrupt is affine to. The interrupt must be a PPI, and the node
+ pointed must be a subnode of the "ppi-partitions" subnode. For
+ interrupt types other than PPI or PPIs that are not partitionned,
+ this cell must be zero. See the "ppi-partitions" node description
+ below.
+
+ Cells 5 and beyond are reserved for future use and must have a value
+ of 0 if present.
+ enum: [ 3, 4 ]
+
+ reg:
+ description: |
+ Specifies base physical address(s) and size of the GIC
+ registers, in the following order:
+ - GIC Distributor interface (GICD)
+ - GIC Redistributors (GICR), one range per redistributor region
+ - GIC CPU interface (GICC)
+ - GIC Hypervisor interface (GICH)
+ - GIC Virtual CPU interface (GICV)
+
+ GICC, GICH and GICV are optional.
+ minItems: 2
+ maxItems: 4096 # Should be enough?
+
+ interrupts:
+ description:
+ Interrupt source of the VGIC maintenance interrupt.
+ maxItems: 1
+
+ redistributor-stride:
+ description:
+ If using padding pages, specifies the stride of consecutive
+ redistributors. Must be a multiple of 64kB.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint64
+ - multipleOf: 0x10000
+ exclusiveMinimum: 0
+
+ "#redistributor-regions":
+ description:
+ The number of independent contiguous regions occupied by the
+ redistributors. Required if more than one such region is present.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - maximum: 4096 # Should be enough?
+
+ msi-controller:
+ description:
+ Only present if the Message Based Interrupt functionnality is
+ being exposed by the HW, and the mbi-ranges property present.
+
+ mbi-ranges:
+ description:
+ A list of pairs <intid span>, where "intid" is the first SPI of a range
+ that can be used an MBI, and "span" the size of that range. Multiple
+ ranges can be provided.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ - items:
+ minItems: 2
+ maxItems: 2
+
+ mbi-alias:
+ description:
+ Address property. Base address of an alias of the GICD region containing
+ only the {SET,CLR}SPI registers to be used if isolation is required,
+ and if supported by the HW.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - items:
+ minItems: 1
+ maxItems: 2
+
+ ppi-partitions:
+ type: object
+ description:
+ PPI affinity can be expressed as a single "ppi-partitions" node,
+ containing a set of sub-nodes.
+ patternProperties:
+ "^interrupt-partition-[0-9]+$":
+ properties:
+ affinity:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description:
+ Should be a list of phandles to CPU nodes (as described in
+ Documentation/devicetree/bindings/arm/cpus.yaml).
+
+ required:
+ - affinity
+
+dependencies:
+ mbi-ranges: [ msi-controller ]
+ msi-controller: [ mbi-ranges ]
+
+required:
+ - compatible
+ - interrupts
+ - reg
+
+patternProperties:
+ "^gic-its@": false
+ "^interrupt-controller@[0-9a-f]+$": false
+ # msi-controller is preferred, but allow other names
+ "^(msi-controller|gic-its|interrupt-controller)@[0-9a-f]+$":
+ type: object
+ description:
+ GICv3 has one or more Interrupt Translation Services (ITS) that are
+ used to route Message Signalled Interrupts (MSI) to the CPUs.
+ properties:
+ compatible:
+ const: arm,gic-v3-its
+
+ msi-controller: true
+
+ "#msi-cells":
+ description:
+ The single msi-cell is the DeviceID of the device which will generate
+ the MSI.
+ const: 1
+
+ reg:
+ description:
+ Specifies the base physical address and size of the ITS registers.
+ maxItems: 1
+
+ socionext,synquacer-pre-its:
+ description:
+ (u32, u32) tuple describing the untranslated
+ address and size of the pre-ITS window.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - items:
+ minItems: 2
+ maxItems: 2
+
+ required:
+ - compatible
+ - msi-controller
+ - "#msi-cells"
+ - reg
+
+ additionalProperties: false
+
+additionalProperties: false
+
+examples:
+ - |
+ gic: interrupt-controller@2cf00000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <3>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ interrupt-controller;
+ reg = <0x2f000000 0x10000>, // GICD
+ <0x2f100000 0x200000>, // GICR
+ <0x2c000000 0x2000>, // GICC
+ <0x2c010000 0x2000>, // GICH
+ <0x2c020000 0x2000>; // GICV
+ interrupts = <1 9 4>;
+
+ msi-controller;
+ mbi-ranges = <256 128>;
+
+ msi-controller@2c200000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ #msi-cells = <1>;
+ reg = <0x2c200000 0x20000>;
+ };
+ };
+
+ interrupt-controller@2c010000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ interrupt-controller;
+ redistributor-stride = <0x0 0x40000>; // 256kB stride
+ #redistributor-regions = <2>;
+ reg = <0x2c010000 0x10000>, // GICD
+ <0x2d000000 0x800000>, // GICR 1: CPUs 0-31
+ <0x2e000000 0x800000>, // GICR 2: CPUs 32-63
+ <0x2c040000 0x2000>, // GICC
+ <0x2c060000 0x2000>, // GICH
+ <0x2c080000 0x2000>; // GICV
+ interrupts = <1 9 4>;
+
+ msi-controller@2c200000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ #msi-cells = <1>;
+ reg = <0x2c200000 0x20000>;
+ };
+
+ msi-controller@2c400000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ #msi-cells = <1>;
+ reg = <0x2c400000 0x20000>;
+ };
+
+ ppi-partitions {
+ part0: interrupt-partition-0 {
+ affinity = <&cpu0 &cpu2>;
+ };
+
+ part1: interrupt-partition-1 {
+ affinity = <&cpu1 &cpu3>;
+ };
+ };
+ };
+
+
+ device@0 {
+ reg = <0 4>;
+ interrupts = <1 1 4 &part0>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt
deleted file mode 100644
index 2f3244648646..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt
+++ /dev/null
@@ -1,171 +0,0 @@
-* ARM Generic Interrupt Controller
-
-ARM SMP cores are often associated with a GIC, providing per processor
-interrupts (PPI), shared processor interrupts (SPI) and software
-generated interrupts (SGI).
-
-Primary GIC is attached directly to the CPU and typically has PPIs and SGIs.
-Secondary GICs are cascaded into the upward interrupt controller and do not
-have PPIs or SGIs.
-
-Main node required properties:
-
-- compatible : should be one of:
- "arm,arm1176jzf-devchip-gic"
- "arm,arm11mp-gic"
- "arm,cortex-a15-gic"
- "arm,cortex-a7-gic"
- "arm,cortex-a9-gic"
- "arm,eb11mp-gic"
- "arm,gic-400"
- "arm,pl390"
- "arm,tc11mp-gic"
- "brcm,brahma-b15-gic"
- "nvidia,tegra210-agic"
- "qcom,msm-8660-qgic"
- "qcom,msm-qgic2"
-- interrupt-controller : Identifies the node as an interrupt controller
-- #interrupt-cells : Specifies the number of cells needed to encode an
- interrupt source. The type shall be a <u32> and the value shall be 3.
-
- The 1st cell is the interrupt type; 0 for SPI interrupts, 1 for PPI
- interrupts.
-
- The 2nd cell contains the interrupt number for the interrupt type.
- SPI interrupts are in the range [0-987]. PPI interrupts are in the
- range [0-15].
-
- The 3rd cell is the flags, encoded as follows:
- bits[3:0] trigger type and level flags.
- 1 = low-to-high edge triggered
- 2 = high-to-low edge triggered (invalid for SPIs)
- 4 = active high level-sensitive
- 8 = active low level-sensitive (invalid for SPIs).
- bits[15:8] PPI interrupt cpu mask. Each bit corresponds to each of
- the 8 possible cpus attached to the GIC. A bit set to '1' indicated
- the interrupt is wired to that CPU. Only valid for PPI interrupts.
- Also note that the configurability of PPI interrupts is IMPLEMENTATION
- DEFINED and as such not guaranteed to be present (most SoC available
- in 2014 seem to ignore the setting of this flag and use the hardware
- default value).
-
-- reg : Specifies base physical address(s) and size of the GIC registers. The
- first region is the GIC distributor register base and size. The 2nd region is
- the GIC cpu interface register base and size.
-
-Optional
-- interrupts : Interrupt source of the parent interrupt controller on
- secondary GICs, or VGIC maintenance interrupt on primary GIC (see
- below).
-
-- cpu-offset : per-cpu offset within the distributor and cpu interface
- regions, used when the GIC doesn't have banked registers. The offset is
- cpu-offset * cpu-nr.
-
-- clocks : List of phandle and clock-specific pairs, one for each entry
- in clock-names.
-- clock-names : List of names for the GIC clock input(s). Valid clock names
- depend on the GIC variant:
- "ic_clk" (for "arm,arm11mp-gic")
- "PERIPHCLKEN" (for "arm,cortex-a15-gic")
- "PERIPHCLK", "PERIPHCLKEN" (for "arm,cortex-a9-gic")
- "clk" (for "arm,gic-400" and "nvidia,tegra210")
- "gclk" (for "arm,pl390")
-
-- power-domains : A phandle and PM domain specifier as defined by bindings of
- the power controller specified by phandle, used when the GIC
- is part of a Power or Clock Domain.
-
-
-Example:
-
- intc: interrupt-controller@fff11000 {
- compatible = "arm,cortex-a9-gic";
- #interrupt-cells = <3>;
- #address-cells = <1>;
- interrupt-controller;
- reg = <0xfff11000 0x1000>,
- <0xfff10100 0x100>;
- };
-
-
-* GIC virtualization extensions (VGIC)
-
-For ARM cores that support the virtualization extensions, additional
-properties must be described (they only exist if the GIC is the
-primary interrupt controller).
-
-Required properties:
-
-- reg : Additional regions specifying the base physical address and
- size of the VGIC registers. The first additional region is the GIC
- virtual interface control register base and size. The 2nd additional
- region is the GIC virtual cpu interface register base and size.
-
-- interrupts : VGIC maintenance interrupt.
-
-Example:
-
- interrupt-controller@2c001000 {
- compatible = "arm,cortex-a15-gic";
- #interrupt-cells = <3>;
- interrupt-controller;
- reg = <0x2c001000 0x1000>,
- <0x2c002000 0x2000>,
- <0x2c004000 0x2000>,
- <0x2c006000 0x2000>;
- interrupts = <1 9 0xf04>;
- };
-
-
-* GICv2m extension for MSI/MSI-x support (Optional)
-
-Certain revisions of GIC-400 supports MSI/MSI-x via V2M register frame(s).
-This is enabled by specifying v2m sub-node(s).
-
-Required properties:
-
-- compatible : The value here should contain "arm,gic-v2m-frame".
-
-- msi-controller : Identifies the node as an MSI controller.
-
-- reg : GICv2m MSI interface register base and size
-
-Optional properties:
-
-- arm,msi-base-spi : When the MSI_TYPER register contains an incorrect
- value, this property should contain the SPI base of
- the MSI frame, overriding the HW value.
-
-- arm,msi-num-spis : When the MSI_TYPER register contains an incorrect
- value, this property should contain the number of
- SPIs assigned to the frame, overriding the HW value.
-
-Example:
-
- interrupt-controller@e1101000 {
- compatible = "arm,gic-400";
- #interrupt-cells = <3>;
- #address-cells = <2>;
- #size-cells = <2>;
- interrupt-controller;
- interrupts = <1 8 0xf04>;
- ranges = <0 0 0 0xe1100000 0 0x100000>;
- reg = <0x0 0xe1110000 0 0x01000>,
- <0x0 0xe112f000 0 0x02000>,
- <0x0 0xe1140000 0 0x10000>,
- <0x0 0xe1160000 0 0x10000>;
- v2m0: v2m@8000 {
- compatible = "arm,gic-v2m-frame";
- msi-controller;
- reg = <0x0 0x80000 0 0x1000>;
- };
-
- ....
-
- v2mN: v2m@9000 {
- compatible = "arm,gic-v2m-frame";
- msi-controller;
- reg = <0x0 0x90000 0 0x1000>;
- };
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic.yaml b/Documentation/devicetree/bindings/interrupt-controller/arm,gic.yaml
new file mode 100644
index 000000000000..758fbd7128e7
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic.yaml
@@ -0,0 +1,223 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/arm,gic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM Generic Interrupt Controller v1 and v2
+
+maintainers:
+ - Marc Zyngier <marc.zyngier@arm.com>
+
+description: |+
+ ARM SMP cores are often associated with a GIC, providing per processor
+ interrupts (PPI), shared processor interrupts (SPI) and software
+ generated interrupts (SGI).
+
+ Primary GIC is attached directly to the CPU and typically has PPIs and SGIs.
+ Secondary GICs are cascaded into the upward interrupt controller and do not
+ have PPIs or SGIs.
+
+allOf:
+ - $ref: /schemas/interrupt-controller.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - arm,arm11mp-gic
+ - arm,cortex-a15-gic
+ - arm,cortex-a7-gic
+ - arm,cortex-a5-gic
+ - arm,cortex-a9-gic
+ - arm,eb11mp-gic
+ - arm,gic-400
+ - arm,pl390
+ - arm,tc11mp-gic
+ - nvidia,tegra210-agic
+ - qcom,msm-8660-qgic
+ - qcom,msm-qgic2
+
+ - items:
+ - const: arm,arm1176jzf-devchip-gic
+ - const: arm,arm11mp-gic
+
+ - items:
+ - const: brcm,brahma-b15-gic
+ - const: arm,cortex-a15-gic
+
+ interrupt-controller: true
+
+ "#address-cells":
+ enum: [ 0, 1 ]
+ "#size-cells":
+ const: 1
+
+ "#interrupt-cells":
+ const: 3
+ description: |
+ The 1st cell is the interrupt type; 0 for SPI interrupts, 1 for PPI
+ interrupts.
+
+ The 2nd cell contains the interrupt number for the interrupt type.
+ SPI interrupts are in the range [0-987]. PPI interrupts are in the
+ range [0-15].
+
+ The 3rd cell is the flags, encoded as follows:
+ bits[3:0] trigger type and level flags.
+ 1 = low-to-high edge triggered
+ 2 = high-to-low edge triggered (invalid for SPIs)
+ 4 = active high level-sensitive
+ 8 = active low level-sensitive (invalid for SPIs).
+ bits[15:8] PPI interrupt cpu mask. Each bit corresponds to each of
+ the 8 possible cpus attached to the GIC. A bit set to '1' indicated
+ the interrupt is wired to that CPU. Only valid for PPI interrupts.
+ Also note that the configurability of PPI interrupts is IMPLEMENTATION
+ DEFINED and as such not guaranteed to be present (most SoC available
+ in 2014 seem to ignore the setting of this flag and use the hardware
+ default value).
+
+ reg:
+ description: |
+ Specifies base physical address(s) and size of the GIC registers. The
+ first region is the GIC distributor register base and size. The 2nd region
+ is the GIC cpu interface register base and size.
+
+ For GICv2 with virtualization extensions, additional regions are
+ required for specifying the base physical address and size of the VGIC
+ registers. The first additional region is the GIC virtual interface
+ control register base and size. The 2nd additional region is the GIC
+ virtual cpu interface register base and size.
+ minItems: 2
+ maxItems: 4
+
+ interrupts:
+ description: Interrupt source of the parent interrupt controller on
+ secondary GICs, or VGIC maintenance interrupt on primary GIC (see
+ below).
+ maxItems: 1
+
+ cpu-offset:
+ description: per-cpu offset within the distributor and cpu interface
+ regions, used when the GIC doesn't have banked registers. The offset
+ is cpu-offset * cpu-nr.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ clocks:
+ minItems: 1
+ maxItems: 2
+
+ clock-names:
+ description: List of names for the GIC clock input(s). Valid clock names
+ depend on the GIC variant.
+ oneOf:
+ - const: ic_clk # for "arm,arm11mp-gic"
+ - const: PERIPHCLKEN # for "arm,cortex-a15-gic"
+ - items: # for "arm,cortex-a9-gic"
+ - const: PERIPHCLK
+ - const: PERIPHCLKEN
+ - const: clk # for "arm,gic-400" and "nvidia,tegra210"
+ - const: gclk #for "arm,pl390"
+
+ power-domains:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+patternProperties:
+ "^v2m@[0-9a-f]+$":
+ description: |
+ * GICv2m extension for MSI/MSI-x support (Optional)
+
+ Certain revisions of GIC-400 supports MSI/MSI-x via V2M register frame(s).
+ This is enabled by specifying v2m sub-node(s).
+
+ properties:
+ compatible:
+ const: arm,gic-v2m-frame
+
+ msi-controller: true
+
+ reg:
+ maxItems: 1
+ description: GICv2m MSI interface register base and size
+
+ arm,msi-base-spi:
+ description: When the MSI_TYPER register contains an incorrect value,
+ this property should contain the SPI base of the MSI frame, overriding
+ the HW value.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ arm,msi-num-spis:
+ description: When the MSI_TYPER register contains an incorrect value,
+ this property should contain the number of SPIs assigned to the
+ frame, overriding the HW value.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ required:
+ - compatible
+ - msi-controller
+ - reg
+
+ additionalProperties: false
+
+additionalProperties: false
+
+examples:
+ - |
+ // GICv1
+ intc: interrupt-controller@fff11000 {
+ compatible = "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+ #address-cells = <1>;
+ interrupt-controller;
+ reg = <0xfff11000 0x1000>,
+ <0xfff10100 0x100>;
+ };
+
+ - |
+ // GICv2
+ interrupt-controller@2c001000 {
+ compatible = "arm,cortex-a15-gic";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x2c001000 0x1000>,
+ <0x2c002000 0x2000>,
+ <0x2c004000 0x2000>,
+ <0x2c006000 0x2000>;
+ interrupts = <1 9 0xf04>;
+ };
+
+ - |
+ // GICv2m extension for MSI/MSI-x support
+ interrupt-controller@e1101000 {
+ compatible = "arm,gic-400";
+ #interrupt-cells = <3>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-controller;
+ interrupts = <1 8 0xf04>;
+ ranges = <0 0 0 0xe1100000 0 0x100000>;
+ reg = <0x0 0xe1110000 0 0x01000>,
+ <0x0 0xe112f000 0 0x02000>,
+ <0x0 0xe1140000 0 0x10000>,
+ <0x0 0xe1160000 0 0x10000>;
+
+ v2m0: v2m@8000 {
+ compatible = "arm,gic-v2m-frame";
+ msi-controller;
+ reg = <0x0 0x80000 0 0x1000>;
+ };
+
+ //...
+
+ v2mN: v2m@9000 {
+ compatible = "arm,gic-v2m-frame";
+ msi-controller;
+ reg = <0x0 0x90000 0 0x1000>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
index 8de96a4fb2d5..f977ea7617f6 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
@@ -16,6 +16,7 @@ Required properties:
- "renesas,irqc-r8a7793" (R-Car M2-N)
- "renesas,irqc-r8a7794" (R-Car E2)
- "renesas,intc-ex-r8a774a1" (RZ/G2M)
+ - "renesas,intc-ex-r8a774c0" (RZ/G2E)
- "renesas,intc-ex-r8a7795" (R-Car H3)
- "renesas,intc-ex-r8a7796" (R-Car M3-W)
- "renesas,intc-ex-r8a77965" (R-Car M3-N)
diff --git a/Documentation/devicetree/bindings/iommu/nvidia,tegra20-gart.txt b/Documentation/devicetree/bindings/iommu/nvidia,tegra20-gart.txt
deleted file mode 100644
index 099d9362ebc1..000000000000
--- a/Documentation/devicetree/bindings/iommu/nvidia,tegra20-gart.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-NVIDIA Tegra 20 GART
-
-Required properties:
-- compatible: "nvidia,tegra20-gart"
-- reg: Two pairs of cells specifying the physical address and size of
- the memory controller registers and the GART aperture respectively.
-
-Example:
-
- gart {
- compatible = "nvidia,tegra20-gart";
- reg = <0x7000f024 0x00000018 /* controller registers */
- 0x58000000 0x02000000>; /* GART aperture */
- };
diff --git a/Documentation/devicetree/bindings/mailbox/xlnx,zynqmp-ipi-mailbox.txt b/Documentation/devicetree/bindings/mailbox/xlnx,zynqmp-ipi-mailbox.txt
new file mode 100644
index 000000000000..4438432bfe9b
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/xlnx,zynqmp-ipi-mailbox.txt
@@ -0,0 +1,127 @@
+Xilinx IPI Mailbox Controller
+========================================
+
+The Xilinx IPI(Inter Processor Interrupt) mailbox controller is to manage
+messaging between two Xilinx Zynq UltraScale+ MPSoC IPI agents. Each IPI
+agent owns registers used for notification and buffers for message.
+
+ +-------------------------------------+
+ | Xilinx ZynqMP IPI Controller |
+ +-------------------------------------+
+ +--------------------------------------------------+
+ATF | |
+ | |
+ | |
+ +--------------------------+ |
+ | |
+ | |
+ +--------------------------------------------------+
+ +------------------------------------------+
+ | +----------------+ +----------------+ |
+Hardware | | IPI Agent | | IPI Buffers | |
+ | | Registers | | | |
+ | | | | | |
+ | +----------------+ +----------------+ |
+ | |
+ | Xilinx IPI Agent Block |
+ +------------------------------------------+
+
+
+Controller Device Node:
+===========================
+Required properties:
+--------------------
+IPI agent node:
+- compatible: Shall be: "xlnx,zynqmp-ipi-mailbox"
+- interrupt-parent: Phandle for the interrupt controller
+- interrupts: Interrupt information corresponding to the
+ interrupt-names property.
+- xlnx,ipi-id: local Xilinx IPI agent ID
+- #address-cells: number of address cells of internal IPI mailbox nodes
+- #size-cells: number of size cells of internal IPI mailbox nodes
+
+Internal IPI mailbox node:
+- reg: IPI buffers address ranges
+- reg-names: Names of the reg resources. It should have:
+ * local_request_region
+ - IPI request msg buffer written by local and read
+ by remote
+ * local_response_region
+ - IPI response msg buffer written by local and read
+ by remote
+ * remote_request_region
+ - IPI request msg buffer written by remote and read
+ by local
+ * remote_response_region
+ - IPI response msg buffer written by remote and read
+ by local
+- #mbox-cells: Shall be 1. It contains:
+ * tx(0) or rx(1) channel
+- xlnx,ipi-id: remote Xilinx IPI agent ID of which the mailbox is
+ connected to.
+
+Optional properties:
+--------------------
+- method: The method of accessing the IPI agent registers.
+ Permitted values are: "smc" and "hvc". Default is
+ "smc".
+
+Client Device Node:
+===========================
+Required properties:
+--------------------
+- mboxes: Standard property to specify a mailbox
+ (See ./mailbox.txt)
+- mbox-names: List of identifier strings for each mailbox
+ channel.
+
+Example:
+===========================
+ zynqmp_ipi {
+ compatible = "xlnx,zynqmp-ipi-mailbox";
+ interrupt-parent = <&gic>;
+ interrupts = <0 29 4>;
+ xlnx,ipi-id = <0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ /* APU<->RPU0 IPI mailbox controller */
+ ipi_mailbox_rpu0: mailbox@ff90400 {
+ reg = <0xff990400 0x20>,
+ <0xff990420 0x20>,
+ <0xff990080 0x20>,
+ <0xff9900a0 0x20>;
+ reg-names = "local_request_region",
+ "local_response_region",
+ "remote_request_region",
+ "remote_response_region";
+ #mbox-cells = <1>;
+ xlnx,ipi-id = <1>;
+ };
+ /* APU<->RPU1 IPI mailbox controller */
+ ipi_mailbox_rpu1: mailbox@ff990440 {
+ reg = <0xff990440 0x20>,
+ <0xff990460 0x20>,
+ <0xff990280 0x20>,
+ <0xff9902a0 0x20>;
+ reg-names = "local_request_region",
+ "local_response_region",
+ "remote_request_region",
+ "remote_response_region";
+ #mbox-cells = <1>;
+ xlnx,ipi-id = <2>;
+ };
+ };
+ rpu0 {
+ ...
+ mboxes = <&ipi_mailbox_rpu0 0>,
+ <&ipi_mailbox_rpu0 1>;
+ mbox-names = "tx", "rx";
+ };
+ rpu1 {
+ ...
+ mboxes = <&ipi_mailbox_rpu1 0>,
+ <&ipi_mailbox_rpu1 1>;
+ mbox-names = "tx", "rx";
+ };
diff --git a/Documentation/devicetree/bindings/media/i2c/adv748x.txt b/Documentation/devicetree/bindings/media/i2c/adv748x.txt
index 5dddc95f9cc4..4f91686e54a6 100644
--- a/Documentation/devicetree/bindings/media/i2c/adv748x.txt
+++ b/Documentation/devicetree/bindings/media/i2c/adv748x.txt
@@ -48,7 +48,16 @@ are numbered as follows.
TXA source 10
TXB source 11
-The digital output port nodes must contain at least one endpoint.
+The digital output port nodes, when present, shall contain at least one
+endpoint. Each of those endpoints shall contain the data-lanes property as
+described in video-interfaces.txt.
+
+Required source endpoint properties:
+ - data-lanes: an array of physical data lane indexes
+ The accepted value(s) for this property depends on which of the two
+ sources are described. For TXA 1, 2 or 4 data lanes can be described
+ while for TXB only 1 data lane is valid. See video-interfaces.txt
+ for detailed description.
Ports are optional if they are not connected to anything at the hardware level.
diff --git a/Documentation/devicetree/bindings/media/i2c/melexis,mlx90640.txt b/Documentation/devicetree/bindings/media/i2c/melexis,mlx90640.txt
new file mode 100644
index 000000000000..060d2b7a5893
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/melexis,mlx90640.txt
@@ -0,0 +1,20 @@
+* Melexis MLX90640 FIR Sensor
+
+Melexis MLX90640 FIR sensor support which allows recording of thermal data
+with 32x24 resolution excluding 2 lines of coefficient data that is used by
+userspace to render processed frames.
+
+Required Properties:
+ - compatible : Must be "melexis,mlx90640"
+ - reg : i2c address of the device
+
+Example:
+
+ i2c0@1c22000 {
+ ...
+ mlx90640@33 {
+ compatible = "melexis,mlx90640";
+ reg = <0x33>;
+ };
+ ...
+ };
diff --git a/Documentation/devicetree/bindings/media/i2c/mt9m001.txt b/Documentation/devicetree/bindings/media/i2c/mt9m001.txt
new file mode 100644
index 000000000000..c920552b03ef
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/mt9m001.txt
@@ -0,0 +1,38 @@
+MT9M001: 1/2-Inch Megapixel Digital Image Sensor
+
+The MT9M001 is an SXGA-format with a 1/2-inch CMOS active-pixel digital
+image sensor. It is programmable through I2C interface.
+
+Required Properties:
+
+- compatible: shall be "onnn,mt9m001".
+- clocks: reference to the master clock into sensor
+
+Optional Properties:
+
+- reset-gpios: GPIO handle which is connected to the reset pin of the chip.
+ Active low.
+- standby-gpios: GPIO handle which is connected to the standby pin of the chip.
+ Active high.
+
+The device node must contain one 'port' child node with one 'endpoint' child
+sub-node for its digital output video port, in accordance with the video
+interface bindings defined in:
+Documentation/devicetree/bindings/media/video-interfaces.txt
+
+Example:
+
+ &i2c1 {
+ camera-sensor@5d {
+ compatible = "onnn,mt9m001";
+ reg = <0x5d>;
+ reset-gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
+ standby-gpios = <&gpio0 1 GPIO_ACTIVE_HIGH>;
+ clocks = <&camera_clk>;
+ port {
+ mt9m001_out: endpoint {
+ remote-endpoint = <&vcap_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/i2c/ov5645.txt b/Documentation/devicetree/bindings/media/i2c/ov5645.txt
index fd7aec9f8e24..72ad992f77be 100644
--- a/Documentation/devicetree/bindings/media/i2c/ov5645.txt
+++ b/Documentation/devicetree/bindings/media/i2c/ov5645.txt
@@ -26,9 +26,9 @@ Example:
&i2c1 {
...
- ov5645: ov5645@78 {
+ ov5645: ov5645@3c {
compatible = "ovti,ov5645";
- reg = <0x78>;
+ reg = <0x3c>;
enable-gpios = <&gpio1 6 GPIO_ACTIVE_HIGH>;
reset-gpios = <&gpio5 20 GPIO_ACTIVE_LOW>;
@@ -37,7 +37,7 @@ Example:
clocks = <&clks 200>;
clock-names = "xclk";
- clock-frequency = <23880000>;
+ clock-frequency = <24000000>;
vdddo-supply = <&camera_dovdd_1v8>;
vdda-supply = <&camera_avdd_2v8>;
diff --git a/Documentation/devicetree/bindings/media/imx7-csi.txt b/Documentation/devicetree/bindings/media/imx7-csi.txt
new file mode 100644
index 000000000000..3c07bc676bc3
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/imx7-csi.txt
@@ -0,0 +1,45 @@
+Freescale i.MX7 CMOS Sensor Interface
+=====================================
+
+csi node
+--------
+
+This is device node for the CMOS Sensor Interface (CSI) which enables the chip
+to connect directly to external CMOS image sensors.
+
+Required properties:
+
+- compatible : "fsl,imx7-csi";
+- reg : base address and length of the register set for the device;
+- interrupts : should contain CSI interrupt;
+- clocks : list of clock specifiers, see
+ Documentation/devicetree/bindings/clock/clock-bindings.txt for details;
+- clock-names : must contain "axi", "mclk" and "dcic" entries, matching
+ entries in the clock property;
+
+The device node shall contain one 'port' child node with one child 'endpoint'
+node, according to the bindings defined in:
+Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+In the following example a remote endpoint is a video multiplexer.
+
+example:
+
+ csi: csi@30710000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compatible = "fsl,imx7-csi";
+ reg = <0x30710000 0x10000>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_CLK_DUMMY>,
+ <&clks IMX7D_CSI_MCLK_ROOT_CLK>,
+ <&clks IMX7D_CLK_DUMMY>;
+ clock-names = "axi", "mclk", "dcic";
+
+ port {
+ csi_from_csi_mux: endpoint {
+ remote-endpoint = <&csi_mux_to_csi>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/imx7-mipi-csi2.txt b/Documentation/devicetree/bindings/media/imx7-mipi-csi2.txt
new file mode 100644
index 000000000000..71fd74ed3ec8
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/imx7-mipi-csi2.txt
@@ -0,0 +1,90 @@
+Freescale i.MX7 Mipi CSI2
+=========================
+
+mipi_csi2 node
+--------------
+
+This is the device node for the MIPI CSI-2 receiver core in i.MX7 SoC. It is
+compatible with previous version of Samsung D-phy.
+
+Required properties:
+
+- compatible : "fsl,imx7-mipi-csi2";
+- reg : base address and length of the register set for the device;
+- interrupts : should contain MIPI CSIS interrupt;
+- clocks : list of clock specifiers, see
+ Documentation/devicetree/bindings/clock/clock-bindings.txt for details;
+- clock-names : must contain "pclk", "wrap" and "phy" entries, matching
+ entries in the clock property;
+- power-domains : a phandle to the power domain, see
+ Documentation/devicetree/bindings/power/power_domain.txt for details.
+- reset-names : should include following entry "mrst";
+- resets : a list of phandle, should contain reset entry of
+ reset-names;
+- phy-supply : from the generic phy bindings, a phandle to a regulator that
+ provides power to MIPI CSIS core;
+
+Optional properties:
+
+- clock-frequency : The IP's main (system bus) clock frequency in Hz, default
+ value when this property is not specified is 166 MHz;
+- fsl,csis-hs-settle : differential receiver (HS-RX) settle time;
+
+The device node should contain two 'port' child nodes with one child 'endpoint'
+node, according to the bindings defined in:
+ Documentation/devicetree/bindings/ media/video-interfaces.txt.
+ The following are properties specific to those nodes.
+
+port node
+---------
+
+- reg : (required) can take the values 0 or 1, where 0 shall be
+ related to the sink port and port 1 shall be the source
+ one;
+
+endpoint node
+-------------
+
+- data-lanes : (required) an array specifying active physical MIPI-CSI2
+ data input lanes and their mapping to logical lanes; this
+ shall only be applied to port 0 (sink port), the array's
+ content is unused only its length is meaningful,
+ in this case the maximum length supported is 2;
+
+example:
+
+ mipi_csi: mipi-csi@30750000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compatible = "fsl,imx7-mipi-csi2";
+ reg = <0x30750000 0x10000>;
+ interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_IPG_ROOT_CLK>,
+ <&clks IMX7D_MIPI_CSI_ROOT_CLK>,
+ <&clks IMX7D_MIPI_DPHY_ROOT_CLK>;
+ clock-names = "pclk", "wrap", "phy";
+ clock-frequency = <166000000>;
+ power-domains = <&pgc_mipi_phy>;
+ phy-supply = <&reg_1p0d>;
+ resets = <&src IMX7_RESET_MIPI_PHY_MRST>;
+ reset-names = "mrst";
+ fsl,csis-hs-settle = <3>;
+
+ port@0 {
+ reg = <0>;
+
+ mipi_from_sensor: endpoint {
+ remote-endpoint = <&ov2680_to_mipi>;
+ data-lanes = <1>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ mipi_vc0_to_csi_mux: endpoint {
+ remote-endpoint = <&csi_mux_from_mipi_vc0>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/mediatek-vcodec.txt b/Documentation/devicetree/bindings/media/mediatek-vcodec.txt
index 2a615d84a682..b6b5dde6abd8 100644
--- a/Documentation/devicetree/bindings/media/mediatek-vcodec.txt
+++ b/Documentation/devicetree/bindings/media/mediatek-vcodec.txt
@@ -66,6 +66,15 @@ vcodec_dec: vcodec@16000000 {
"vencpll",
"venc_lt_sel",
"vdec_bus_clk_src";
+ assigned-clocks = <&topckgen CLK_TOP_VENC_LT_SEL>,
+ <&topckgen CLK_TOP_CCI400_SEL>,
+ <&topckgen CLK_TOP_VDEC_SEL>,
+ <&apmixedsys CLK_APMIXED_VCODECPLL>,
+ <&apmixedsys CLK_APMIXED_VENCPLL>;
+ assigned-clock-parents = <&topckgen CLK_TOP_VCODECPLL_370P5>,
+ <&topckgen CLK_TOP_UNIVPLL_D2>,
+ <&topckgen CLK_TOP_VCODECPLL>;
+ assigned-clock-rates = <0>, <0>, <0>, <1482000000>, <800000000>;
};
vcodec_enc: vcodec@18002000 {
@@ -105,4 +114,8 @@ vcodec_dec: vcodec@16000000 {
"venc_sel",
"venc_lt_sel_src",
"venc_lt_sel";
+ assigned-clocks = <&topckgen CLK_TOP_VENC_SEL>,
+ <&topckgen CLK_TOP_VENC_LT_SEL>;
+ assigned-clock-parents = <&topckgen CLK_TOP_VENCPLL_D2>,
+ <&topckgen CLK_TOP_UNIVPLL1_D2>;
};
diff --git a/Documentation/devicetree/bindings/media/rcar_vin.txt b/Documentation/devicetree/bindings/media/rcar_vin.txt
index 0dd84a183ca7..224a4615b418 100644
--- a/Documentation/devicetree/bindings/media/rcar_vin.txt
+++ b/Documentation/devicetree/bindings/media/rcar_vin.txt
@@ -7,12 +7,13 @@ family of devices.
Each VIN instance has a single parallel input that supports RGB and YUV video,
with both external synchronization and BT.656 synchronization for the latter.
Depending on the instance the VIN input is connected to external SoC pins, or
-on Gen3 platforms to a CSI-2 receiver.
+on Gen3 and RZ/G2 platforms to a CSI-2 receiver.
- compatible: Must be one or more of the following
- "renesas,vin-r8a7743" for the R8A7743 device
- "renesas,vin-r8a7744" for the R8A7744 device
- "renesas,vin-r8a7745" for the R8A7745 device
+ - "renesas,vin-r8a774c0" for the R8A774C0 device
- "renesas,vin-r8a7778" for the R8A7778 device
- "renesas,vin-r8a7779" for the R8A7779 device
- "renesas,vin-r8a7790" for the R8A7790 device
@@ -61,10 +62,10 @@ The per-board settings Gen2 platforms:
- data-enable-active: polarity of CLKENB signal, see [1] for
description. Default is active high.
-The per-board settings Gen3 platforms:
+The per-board settings Gen3 and RZ/G2 platforms:
-Gen3 platforms can support both a single connected parallel input source
-from external SoC pins (port@0) and/or multiple parallel input sources
+Gen3 and RZ/G2 platforms can support both a single connected parallel input
+source from external SoC pins (port@0) and/or multiple parallel input sources
from local SoC CSI-2 receivers (port@1) depending on SoC.
- renesas,id - ID number of the VIN, VINx in the documentation.
diff --git a/Documentation/devicetree/bindings/media/renesas,fcp.txt b/Documentation/devicetree/bindings/media/renesas,fcp.txt
index 3ec91803ba58..79c37395b396 100644
--- a/Documentation/devicetree/bindings/media/renesas,fcp.txt
+++ b/Documentation/devicetree/bindings/media/renesas,fcp.txt
@@ -2,8 +2,9 @@ Renesas R-Car Frame Compression Processor (FCP)
-----------------------------------------------
The FCP is a companion module of video processing modules in the Renesas R-Car
-Gen3 SoCs. It provides data compression and decompression, data caching, and
-conversion of AXI transactions in order to reduce the memory bandwidth.
+Gen3 and RZ/G2 SoCs. It provides data compression and decompression, data
+caching, and conversion of AXI transactions in order to reduce the memory
+bandwidth.
There are three types of FCP: FCP for Codec (FCPC), FCP for VSP (FCPV) and FCP
for FDP (FCPF). Their configuration and behaviour depend on the module they
diff --git a/Documentation/devicetree/bindings/media/renesas,rcar-csi2.txt b/Documentation/devicetree/bindings/media/renesas,rcar-csi2.txt
index 541d936b62e8..d63275e17afd 100644
--- a/Documentation/devicetree/bindings/media/renesas,rcar-csi2.txt
+++ b/Documentation/devicetree/bindings/media/renesas,rcar-csi2.txt
@@ -2,12 +2,13 @@ Renesas R-Car MIPI CSI-2
------------------------
The R-Car CSI-2 receiver device provides MIPI CSI-2 capabilities for the
-Renesas R-Car family of devices. It is used in conjunction with the
+Renesas R-Car and RZ/G2 family of devices. It is used in conjunction with the
R-Car VIN module, which provides the video capture capabilities.
Mandatory properties
--------------------
- compatible: Must be one or more of the following
+ - "renesas,r8a774c0-csi2" for the R8A774C0 device.
- "renesas,r8a7795-csi2" for the R8A7795 device.
- "renesas,r8a7796-csi2" for the R8A7796 device.
- "renesas,r8a77965-csi2" for the R8A77965 device.
diff --git a/Documentation/devicetree/bindings/media/renesas,vsp1.txt b/Documentation/devicetree/bindings/media/renesas,vsp1.txt
index 16427017cb45..cd5a955b2ea0 100644
--- a/Documentation/devicetree/bindings/media/renesas,vsp1.txt
+++ b/Documentation/devicetree/bindings/media/renesas,vsp1.txt
@@ -2,13 +2,13 @@
The VSP is a video processing engine that supports up-/down-scaling, alpha
blending, color space conversion and various other image processing features.
-It can be found in the Renesas R-Car second generation SoCs.
+It can be found in the Renesas R-Car Gen2, R-Car Gen3, RZ/G1, and RZ/G2 SoCs.
Required properties:
- compatible: Must contain one of the following values
- - "renesas,vsp1" for the R-Car Gen2 VSP1
- - "renesas,vsp2" for the R-Car Gen3 VSP2
+ - "renesas,vsp1" for the R-Car Gen2 and RZ/G1 VSP1
+ - "renesas,vsp2" for the R-Car Gen3 and RZ/G2 VSP2
- reg: Base address and length of the registers block for the VSP.
- interrupts: VSP interrupt specifier.
diff --git a/Documentation/devicetree/bindings/media/si470x.txt b/Documentation/devicetree/bindings/media/si470x.txt
new file mode 100644
index 000000000000..a9403558362e
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/si470x.txt
@@ -0,0 +1,26 @@
+* Silicon Labs FM Radio receiver
+
+The Silicon Labs Si470x is family of FM radio receivers with receive power scan
+supporting 76-108 MHz, programmable through an I2C interface.
+Some of them includes an RDS encoder.
+
+Required Properties:
+- compatible: Should contain "silabs,si470x"
+- reg: the I2C address of the device
+
+Optional Properties:
+- interrupts : The interrupt number
+- reset-gpios: GPIO specifier for the chips reset line
+
+Example:
+
+&i2c2 {
+ si470x@63 {
+ compatible = "silabs,si470x";
+ reg = <0x63>;
+
+ interrupt-parent = <&gpj2>;
+ interrupts = <4 IRQ_TYPE_EDGE_FALLING>;
+ reset-gpios = <&gpj2 5 GPIO_ACTIVE_HIGH>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/media/sun6i-csi.txt b/Documentation/devicetree/bindings/media/sun6i-csi.txt
index d4ab34f2240c..0dd540bb03db 100644
--- a/Documentation/devicetree/bindings/media/sun6i-csi.txt
+++ b/Documentation/devicetree/bindings/media/sun6i-csi.txt
@@ -6,8 +6,9 @@ Allwinner V3s SoC features a CSI module(CSI1) with parallel interface.
Required properties:
- compatible: value must be one of:
* "allwinner,sun6i-a31-csi"
- * "allwinner,sun8i-h3-csi", "allwinner,sun6i-a31-csi"
+ * "allwinner,sun8i-h3-csi"
* "allwinner,sun8i-v3s-csi"
+ * "allwinner,sun50i-a64-csi"
- reg: base address and size of the memory-mapped region.
- interrupts: interrupt associated to this IP
- clocks: phandles to the clocks feeding the CSI
diff --git a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra20-mc.txt b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra20-mc.txt
index 7d60a50a4fa1..e55328237df4 100644
--- a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra20-mc.txt
+++ b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra20-mc.txt
@@ -1,26 +1,37 @@
NVIDIA Tegra20 MC(Memory Controller)
Required properties:
-- compatible : "nvidia,tegra20-mc"
-- reg : Should contain 2 register ranges(address and length); see the
- example below. Note that the MC registers are interleaved with the
- GART registers, and hence must be represented as multiple ranges.
+- compatible : "nvidia,tegra20-mc-gart"
+- reg : Should contain 2 register ranges: physical base address and length of
+ the controller's registers and the GART aperture respectively.
+- clocks: Must contain an entry for each entry in clock-names.
+ See ../clocks/clock-bindings.txt for details.
+- clock-names: Must include the following entries:
+ - mc: the module's clock input
- interrupts : Should contain MC General interrupt.
- #reset-cells : Should be 1. This cell represents memory client module ID.
The assignments may be found in header file <dt-bindings/memory/tegra20-mc.h>
or in the TRM documentation.
+- #iommu-cells: Should be 0. This cell represents the number of cells in an
+ IOMMU specifier needed to encode an address. GART supports only a single
+ address space that is shared by all devices, therefore no additional
+ information needed for the address encoding.
Example:
mc: memory-controller@7000f000 {
- compatible = "nvidia,tegra20-mc";
- reg = <0x7000f000 0x024
- 0x7000f03c 0x3c4>;
- interrupts = <0 77 0x04>;
+ compatible = "nvidia,tegra20-mc-gart";
+ reg = <0x7000f000 0x400 /* controller registers */
+ 0x58000000 0x02000000>; /* GART aperture */
+ clocks = <&tegra_car TEGRA20_CLK_MC>;
+ clock-names = "mc";
+ interrupts = <GIC_SPI 77 0x04>;
#reset-cells = <1>;
+ #iommu-cells = <0>;
};
video-codec@6001a000 {
compatible = "nvidia,tegra20-vde";
...
resets = <&mc TEGRA20_MC_RESET_VDE>;
+ iommus = <&mc>;
};
diff --git a/Documentation/devicetree/bindings/net/davinci_emac.txt b/Documentation/devicetree/bindings/net/davinci_emac.txt
index 24c5cdaba8d2..ca83dcc84fb8 100644
--- a/Documentation/devicetree/bindings/net/davinci_emac.txt
+++ b/Documentation/devicetree/bindings/net/davinci_emac.txt
@@ -20,6 +20,8 @@ Required properties:
Optional properties:
- phy-handle: See ethernet.txt file in the same directory.
If absent, davinci_emac driver defaults to 100/FULL.
+- nvmem-cells: phandle, reference to an nvmem node for the MAC address
+- nvmem-cell-names: string, should be "mac-address" if nvmem is to be used
- ti,davinci-rmii-en: 1 byte, 1 means use RMII
- ti,davinci-no-bd-ram: boolean, does EMAC have BD RAM?
diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.txt b/Documentation/devicetree/bindings/net/dsa/dsa.txt
index 35694c0c376b..d66a5292b9d3 100644
--- a/Documentation/devicetree/bindings/net/dsa/dsa.txt
+++ b/Documentation/devicetree/bindings/net/dsa/dsa.txt
@@ -71,6 +71,10 @@ properties, described in binding documents:
Documentation/devicetree/bindings/net/fixed-link.txt
for details.
+- local-mac-address : See
+ Documentation/devicetree/bindings/net/ethernet.txt
+ for details.
+
Example
The following example shows three switches on three MDIO busses,
@@ -97,6 +101,7 @@ linked into one DSA cluster.
port@1 {
reg = <1>;
label = "lan1";
+ local-mac-address = [00 00 00 00 00 00];
};
port@2 {
diff --git a/Documentation/devicetree/bindings/net/dsa/qca8k.txt b/Documentation/devicetree/bindings/net/dsa/qca8k.txt
index bbcb255c3150..93a7469e70d4 100644
--- a/Documentation/devicetree/bindings/net/dsa/qca8k.txt
+++ b/Documentation/devicetree/bindings/net/dsa/qca8k.txt
@@ -12,10 +12,15 @@ Required properties:
Subnodes:
The integrated switch subnode should be specified according to the binding
-described in dsa/dsa.txt. As the QCA8K switches do not have a N:N mapping of
-port and PHY id, each subnode describing a port needs to have a valid phandle
-referencing the internal PHY connected to it. The CPU port of this switch is
-always port 0.
+described in dsa/dsa.txt. If the QCA8K switch is connect to a SoC's external
+mdio-bus each subnode describing a port needs to have a valid phandle
+referencing the internal PHY it is connected to. This is because there's no
+N:N mapping of port and PHY id.
+
+Don't use mixed external and internal mdio-bus configurations, as this is
+not supported by the hardware.
+
+The CPU port of this switch is always port 0.
A CPU port node has the following optional node:
@@ -31,8 +36,9 @@ For QCA8K the 'fixed-link' sub-node supports only the following properties:
- 'full-duplex' (boolean, optional), to indicate that full duplex is
used. When absent, half duplex is assumed.
-Example:
+Examples:
+for the external mdio-bus configuration:
&mdio0 {
phy_port1: phy@0 {
@@ -55,12 +61,12 @@ Example:
reg = <4>;
};
- switch0@0 {
+ switch@10 {
compatible = "qca,qca8337";
#address-cells = <1>;
#size-cells = <0>;
- reg = <0>;
+ reg = <0x10>;
ports {
#address-cells = <1>;
@@ -108,3 +114,56 @@ Example:
};
};
};
+
+for the internal master mdio-bus configuration:
+
+ &mdio0 {
+ switch@10 {
+ compatible = "qca,qca8337";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <0x10>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "cpu";
+ ethernet = <&gmac1>;
+ phy-mode = "rgmii";
+ fixed-link {
+ speed = 1000;
+ full-duplex;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan1";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan2";
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "lan3";
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "lan4";
+ };
+
+ port@5 {
+ reg = <5>;
+ label = "wan";
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/ethernet.txt b/Documentation/devicetree/bindings/net/ethernet.txt
index cfc376bc977a..a68621580584 100644
--- a/Documentation/devicetree/bindings/net/ethernet.txt
+++ b/Documentation/devicetree/bindings/net/ethernet.txt
@@ -10,15 +10,14 @@ Documentation/devicetree/bindings/phy/phy-bindings.txt.
the boot program; should be used in cases where the MAC address assigned to
the device by the boot program is different from the "local-mac-address"
property;
-- nvmem-cells: phandle, reference to an nvmem node for the MAC address;
-- nvmem-cell-names: string, should be "mac-address" if nvmem is to be used;
- max-speed: number, specifies maximum speed in Mbit/s supported by the device;
- max-frame-size: number, maximum transfer unit (IEEE defined MTU), rather than
the maximum frame size (there's contradiction in the Devicetree
Specification).
- phy-mode: string, operation mode of the PHY interface. This is now a de-facto
standard property; supported values are:
- * "internal"
+ * "internal" (Internal means there is not a standard bus between the MAC and
+ the PHY, something proprietary is being used to embed the PHY in the MAC.)
* "mii"
* "gmii"
* "sgmii"
diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt
index 174f292d8a3e..8b80515729d7 100644
--- a/Documentation/devicetree/bindings/net/macb.txt
+++ b/Documentation/devicetree/bindings/net/macb.txt
@@ -26,6 +26,10 @@ Required properties:
Optional elements: 'tsu_clk'
- clocks: Phandles to input clocks.
+Optional properties:
+- nvmem-cells: phandle, reference to an nvmem node for the MAC address
+- nvmem-cell-names: string, should be "mac-address" if nvmem is to be used
+
Optional properties for PHY child node:
- reset-gpios : Should specify the gpio for phy reset
- magic-packet : If present, indicates that the hardware supports waking
diff --git a/Documentation/devicetree/bindings/net/stm32-dwmac.txt b/Documentation/devicetree/bindings/net/stm32-dwmac.txt
index 1341012722aa..a90eef11dc46 100644
--- a/Documentation/devicetree/bindings/net/stm32-dwmac.txt
+++ b/Documentation/devicetree/bindings/net/stm32-dwmac.txt
@@ -14,8 +14,7 @@ Required properties:
- clock-names: Should be "stmmaceth" for the host clock.
Should be "mac-clk-tx" for the MAC TX clock.
Should be "mac-clk-rx" for the MAC RX clock.
- For MPU family need to add also "ethstp" for power mode clock and,
- "syscfg-clk" for SYSCFG clock.
+ For MPU family need to add also "ethstp" for power mode clock
- interrupt-names: Should contain a list of interrupt names corresponding to
the interrupts in the interrupts property, if available.
Should be "macirq" for the main MAC IRQ
@@ -24,9 +23,9 @@ Required properties:
encompases the glue register, and the offset of the control register.
Optional properties:
-- clock-names: For MPU family "mac-clk-ck" for PHY without quartz
-- st,int-phyclk (boolean) : valid only where PHY do not have quartz and need to be clock
- by RCC
+- clock-names: For MPU family "eth-ck" for PHY without quartz
+- st,eth-clk-sel (boolean) : set this property in RGMII PHY when you want to select RCC clock instead of ETH_CLK125.
+- st,eth-ref-clk-sel (boolean) : set this property in RMII mode when you have PHY without crystal 50MHz and want to select RCC clock instead of ETH_REF_CLK.
Example:
diff --git a/Documentation/devicetree/bindings/pci/altera-pcie.txt b/Documentation/devicetree/bindings/pci/altera-pcie.txt
index 6c396f17c91a..816b244a221e 100644
--- a/Documentation/devicetree/bindings/pci/altera-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/altera-pcie.txt
@@ -1,11 +1,13 @@
* Altera PCIe controller
Required properties:
-- compatible : should contain "altr,pcie-root-port-1.0"
+- compatible : should contain "altr,pcie-root-port-1.0" or "altr,pcie-root-port-2.0"
- reg: a list of physical base address and length for TXS and CRA.
+ For "altr,pcie-root-port-2.0", additional HIP base address and length.
- reg-names: must include the following entries:
"Txs": TX slave port region
"Cra": Control register access region
+ "Hip": Hard IP region (if "altr,pcie-root-port-2.0")
- interrupts: specifies the interrupt source of the parent interrupt
controller. The format of the interrupt specifier depends
on the parent interrupt controller.
diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
index d514c1f2365f..a7f5f5afa0e6 100644
--- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
@@ -9,6 +9,7 @@ Required properties:
- "fsl,imx6sx-pcie",
- "fsl,imx6qp-pcie"
- "fsl,imx7d-pcie"
+ - "fsl,imx8mq-pcie"
- reg: base address and length of the PCIe controller
- interrupts: A list of interrupt outputs of the controller. Must contain an
entry for each entry in the interrupt-names property.
@@ -45,7 +46,7 @@ Additional required properties for imx6sx-pcie:
PCIE_PHY power domains
- power-domain-names: Must be "pcie", "pcie_phy"
-Additional required properties for imx7d-pcie:
+Additional required properties for imx7d-pcie and imx8mq-pcie:
- power-domains: Must be set to a phandle pointing to PCIE_PHY power domain
- resets: Must contain phandles to PCIe-related reset lines exposed by SRC
IP block
@@ -53,6 +54,11 @@ Additional required properties for imx7d-pcie:
- "pciephy"
- "apps"
- "turnoff"
+- fsl,imx7d-pcie-phy: A phandle to an fsl,imx7d-pcie-phy node.
+
+Additional required properties for imx8mq-pcie:
+- clock-names: Must include the following additional entries:
+ - "pcie_aux"
Example:
@@ -79,3 +85,13 @@ Example:
clocks = <&clks 144>, <&clks 206>, <&clks 189>;
clock-names = "pcie", "pcie_bus", "pcie_phy";
};
+
+* Freescale i.MX7d PCIe PHY
+
+This is the PHY associated with the IMX7d PCIe controller. It's used by the
+PCI-e controller via the fsl,imx7d-pcie-phy phandle.
+
+Required properties:
+- compatible:
+ - "fsl,imx7d-pcie-phy"
+- reg: base address and length of the PCIe PHY controller
diff --git a/Documentation/devicetree/bindings/pci/layerscape-pci.txt b/Documentation/devicetree/bindings/pci/layerscape-pci.txt
index 9b2b8d66d1f4..e20ceaab9b38 100644
--- a/Documentation/devicetree/bindings/pci/layerscape-pci.txt
+++ b/Documentation/devicetree/bindings/pci/layerscape-pci.txt
@@ -13,6 +13,7 @@ information.
Required properties:
- compatible: should contain the platform identifier such as:
+ RC mode:
"fsl,ls1021a-pcie"
"fsl,ls2080a-pcie", "fsl,ls2085a-pcie"
"fsl,ls2088a-pcie"
@@ -20,6 +21,8 @@ Required properties:
"fsl,ls1046a-pcie"
"fsl,ls1043a-pcie"
"fsl,ls1012a-pcie"
+ EP mode:
+ "fsl,ls1046a-pcie-ep", "fsl,ls-pcie-ep"
- reg: base addresses and lengths of the PCIe controller register blocks.
- interrupts: A list of interrupt outputs of the controller. Must contain an
entry for each entry in the interrupt-names property.
diff --git a/Documentation/devicetree/bindings/pci/rcar-pci.txt b/Documentation/devicetree/bindings/pci/rcar-pci.txt
index 976ef7bfff93..6904882a0e94 100644
--- a/Documentation/devicetree/bindings/pci/rcar-pci.txt
+++ b/Documentation/devicetree/bindings/pci/rcar-pci.txt
@@ -3,6 +3,7 @@
Required properties:
compatible: "renesas,pcie-r8a7743" for the R8A7743 SoC;
"renesas,pcie-r8a7744" for the R8A7744 SoC;
+ "renesas,pcie-r8a774c0" for the R8A774C0 SoC;
"renesas,pcie-r8a7779" for the R8A7779 SoC;
"renesas,pcie-r8a7790" for the R8A7790 SoC;
"renesas,pcie-r8a7791" for the R8A7791 SoC;
@@ -13,7 +14,8 @@ compatible: "renesas,pcie-r8a7743" for the R8A7743 SoC;
"renesas,pcie-r8a77990" for the R8A77990 SoC;
"renesas,pcie-rcar-gen2" for a generic R-Car Gen2 or
RZ/G1 compatible device.
- "renesas,pcie-rcar-gen3" for a generic R-Car Gen3 compatible device.
+ "renesas,pcie-rcar-gen3" for a generic R-Car Gen3 or
+ RZ/G2 compatible device.
When compatible with the generic version, nodes must list the
SoC-specific version corresponding to the platform first
diff --git a/Documentation/devicetree/bindings/pci/ti-pci.txt b/Documentation/devicetree/bindings/pci/ti-pci.txt
index 452fe48c4fdd..d5cbfe6b0d89 100644
--- a/Documentation/devicetree/bindings/pci/ti-pci.txt
+++ b/Documentation/devicetree/bindings/pci/ti-pci.txt
@@ -1,14 +1,21 @@
TI PCI Controllers
PCIe DesignWare Controller
- - compatible: Should be "ti,dra7-pcie" for RC
- Should be "ti,dra7-pcie-ep" for EP
+ - compatible: Should be "ti,dra7-pcie" for RC (deprecated)
+ Should be "ti,dra7-pcie-ep" for EP (deprecated)
+ Should be "ti,dra746-pcie-rc" for dra74x/dra76 in RC mode
+ Should be "ti,dra746-pcie-ep" for dra74x/dra76 in EP mode
+ Should be "ti,dra726-pcie-rc" for dra72x in RC mode
+ Should be "ti,dra726-pcie-ep" for dra72x in EP mode
- phys : list of PHY specifiers (used by generic PHY framework)
- phy-names : must be "pcie-phy0", "pcie-phy1", "pcie-phyN".. based on the
number of PHYs as specified in *phys* property.
- ti,hwmods : Name of the hwmod associated to the pcie, "pcie<X>",
where <X> is the instance number of the pcie from the HW spec.
- num-lanes as specified in ../designware-pcie.txt
+ - ti,syscon-lane-sel : phandle/offset pair. Phandle to the system control
+ module and the register offset to specify lane
+ selection.
HOST MODE
=========
diff --git a/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt
index 3e23fece99da..eb39f5051159 100644
--- a/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt
@@ -19,7 +19,7 @@ such as pull-up, multi drive, etc.
Required properties for iomux controller:
- compatible: "atmel,at91rm9200-pinctrl" or "atmel,at91sam9x5-pinctrl"
- or "atmel,sama5d3-pinctrl"
+ or "atmel,sama5d3-pinctrl" or "microchip,sam9x60-pinctrl"
- atmel,mux-mask: array of mask (periph per bank) to describe if a pin can be
configured in this periph mode. All the periph and bank need to be describe.
@@ -100,6 +100,7 @@ DRIVE_STRENGTH (3 << 5): indicate the drive strength of the pin using the
11 - High
OUTPUT (1 << 7): indicate this pin need to be configured as an output.
OUTPUT_VAL (1 << 8): output val (1 = high, 0 = low)
+SLEWRATE (1 << 9): slew rate of the pin: 0 = disable, 1 = enable
DEBOUNCE (1 << 16): indicate this pin needs debounce.
DEBOUNCE_VAL (0x3fff << 17): debounce value.
@@ -116,6 +117,19 @@ Some requirements for using atmel,at91rm9200-pinctrl binding:
configurations by referring to the phandle of that pin configuration node.
4. The gpio controller must be describe in the pinctrl simple-bus.
+For each bank the required properties are:
+- compatible: "atmel,at91sam9x5-gpio" or "atmel,at91rm9200-gpio" or
+ "microchip,sam9x60-gpio"
+- reg: physical base address and length of the controller's registers
+- interrupts: interrupt outputs from the controller
+- interrupt-controller: marks the device node as an interrupt controller
+- #interrupt-cells: should be 2; refer to ../interrupt-controller/interrupts.txt
+ for more details.
+- gpio-controller
+- #gpio-cells: should be 2; the first cell is the GPIO number and the second
+ cell specifies GPIO flags as defined in <dt-bindings/gpio/gpio.h>.
+- clocks: bank clock
+
Examples:
pinctrl@fffff400 {
@@ -125,6 +139,17 @@ pinctrl@fffff400 {
compatible = "atmel,at91rm9200-pinctrl", "simple-bus";
reg = <0xfffff400 0x600>;
+ pioA: gpio@fffff400 {
+ compatible = "atmel,at91sam9x5-gpio";
+ reg = <0xfffff400 0x200>;
+ interrupts = <2 IRQ_TYPE_LEVEL_HIGH 1>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 2>;
+ };
+
atmel,mux-mask = <
/* A B */
0xffffffff 0xffc00c3b /* pioA */
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx50-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/fsl,imx50-pinctrl.txt
new file mode 100644
index 000000000000..6da01d619d33
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx50-pinctrl.txt
@@ -0,0 +1,32 @@
+* Freescale IMX50 IOMUX Controller
+
+Please refer to fsl,imx-pinctrl.txt in this directory for common binding part
+and usage.
+
+Required properties:
+- compatible: "fsl,imx50-iomuxc"
+- fsl,pins: two integers array, represents a group of pins mux and config
+ setting. The format is fsl,pins = <PIN_FUNC_ID CONFIG>, PIN_FUNC_ID is a
+ pin working on a specific function, CONFIG is the pad setting value like
+ pull-up for this pin. Please refer to imx50 datasheet for the valid pad
+ config settings.
+
+CONFIG bits definition:
+PAD_CTL_HVE (1 << 13)
+PAD_CTL_HYS (1 << 8)
+PAD_CTL_PKE (1 << 7)
+PAD_CTL_PUE (1 << 6)
+PAD_CTL_PUS_100K_DOWN (0 << 4)
+PAD_CTL_PUS_47K_UP (1 << 4)
+PAD_CTL_PUS_100K_UP (2 << 4)
+PAD_CTL_PUS_22K_UP (3 << 4)
+PAD_CTL_ODE (1 << 3)
+PAD_CTL_DSE_LOW (0 << 1)
+PAD_CTL_DSE_MED (1 << 1)
+PAD_CTL_DSE_HIGH (2 << 1)
+PAD_CTL_DSE_MAX (3 << 1)
+PAD_CTL_SRE_FAST (1 << 0)
+PAD_CTL_SRE_SLOW (0 << 0)
+
+Refer to imx50-pinfunc.h in device tree source folder for all available
+imx50 PIN_FUNC_ID.
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx8mm-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mm-pinctrl.txt
new file mode 100644
index 000000000000..524a16fca666
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mm-pinctrl.txt
@@ -0,0 +1,36 @@
+* Freescale IMX8MM IOMUX Controller
+
+Please refer to fsl,imx-pinctrl.txt and pinctrl-bindings.txt in this directory
+for common binding part and usage.
+
+Required properties:
+- compatible: "fsl,imx8mm-iomuxc"
+- reg: should contain the base physical address and size of the iomuxc
+ registers.
+
+Required properties in sub-nodes:
+- fsl,pins: each entry consists of 6 integers and represents the mux and config
+ setting for one pin. The first 5 integers <mux_reg conf_reg input_reg mux_val
+ input_val> are specified using a PIN_FUNC_ID macro, which can be found in
+ <dt-bindings/pinctrl/imx8mm-pinfunc.h>. The last integer CONFIG is
+ the pad setting value like pull-up on this pin. Please refer to i.MX8M Mini
+ Reference Manual for detailed CONFIG settings.
+
+Examples:
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+};
+
+iomuxc: pinctrl@30330000 {
+ compatible = "fsl,imx8mm-iomuxc";
+ reg = <0x0 0x30330000 0x0 0x10000>;
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_UART1_RXD_UART1_DCE_RX 0x140
+ MX8MM_IOMUXC_UART1_TXD_UART1_DCE_TX 0x140
+ >;
+ };
+};
diff --git a/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt
index c7c088d2dd50..38dc56a57760 100644
--- a/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt
@@ -58,11 +58,11 @@ group pwm3
- functions pwm, gpio
group pmic1
- - pin 17
+ - pin 7
- functions pmic, gpio
group pmic0
- - pin 16
+ - pin 6
- functions pmic, gpio
group i2c2
@@ -112,19 +112,31 @@ group usb2_drvvbus1
- functions drvbus, gpio
group sdio_sb
- - pins 60-64
+ - pins 60-65
- functions sdio, gpio
group rgmii
- - pins 42-55
+ - pins 42-53
- functions mii, gpio
group pcie1
- - pins 39-40
+ - pins 39
+ - functions pcie, gpio
+
+group pcie1_clkreq
+ - pins 40
- functions pcie, gpio
+group pcie1_wakeup
+ - pins 41
+ - functions pcie, gpio
+
+group smi
+ - pins 54-55
+ - functions smi, gpio
+
group ptp
- - pins 56-58
+ - pins 56
- functions ptp, gpio
group ptp_clk
diff --git a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
index 82ead40311f6..a47dd990a8d3 100644
--- a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
@@ -23,11 +23,11 @@ The GPIO bank for the controller is represented as a sub-node and it acts as a
GPIO controller.
Required properties for sub-nodes are:
- - reg: should contain address and size for mux, pull-enable, pull and
- gpio register sets
- - reg-names: an array of strings describing the "reg" entries. Must
- contain "mux", "pull" and "gpio". "pull-enable" is optional and
- when it is missing the "pull" registers are used instead
+ - reg: should contain a list of address and size, one tuple for each entry
+ in reg-names.
+ - reg-names: an array of strings describing the "reg" entries.
+ Must contain "mux" and "gpio".
+ May contain "pull", "pull-enable" and "ds" when appropriate.
- gpio-controller: identifies the node as a gpio controller
- #gpio-cells: must be 2
diff --git a/Documentation/devicetree/bindings/pwm/atmel-pwm.txt b/Documentation/devicetree/bindings/pwm/atmel-pwm.txt
index c8c831d7b0d1..591ecdd39c7b 100644
--- a/Documentation/devicetree/bindings/pwm/atmel-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/atmel-pwm.txt
@@ -5,6 +5,7 @@ Required properties:
- "atmel,at91sam9rl-pwm"
- "atmel,sama5d3-pwm"
- "atmel,sama5d2-pwm"
+ - "microchip,sam9x60-pwm"
- reg: physical base address and length of the controller's registers
- #pwm-cells: Should be 3. See pwm.txt in this directory for a
description of the cells format.
diff --git a/Documentation/devicetree/bindings/pwm/pwm-hibvt.txt b/Documentation/devicetree/bindings/pwm/pwm-hibvt.txt
index fa7849d67836..daedfef09bb6 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-hibvt.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-hibvt.txt
@@ -5,6 +5,8 @@ Required properties:
The SoC specific strings supported including:
"hisilicon,hi3516cv300-pwm"
"hisilicon,hi3519v100-pwm"
+ "hisilicon,hi3559v100-shub-pwm"
+ "hisilicon,hi3559v100-pwm
- reg: physical base address and length of the controller's registers.
- clocks: phandle and clock specifier of the PWM reference clock.
- resets: phandle and reset specifier for the PWM controller reset.
diff --git a/Documentation/devicetree/bindings/regulator/gpio-regulator.txt b/Documentation/devicetree/bindings/regulator/gpio-regulator.txt
index 1f496159e2bb..dd25e73b5d79 100644
--- a/Documentation/devicetree/bindings/regulator/gpio-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/gpio-regulator.txt
@@ -4,16 +4,30 @@ Required properties:
- compatible : Must be "regulator-gpio".
- regulator-name : Defined in regulator.txt as optional, but required
here.
-- states : Selection of available voltages and GPIO configs.
- if there are no states, then use a fixed regulator
+- gpios : Array of one or more GPIO pins used to select the
+ regulator voltage/current listed in "states".
+- states : Selection of available voltages/currents provided by
+ this regulator and matching GPIO configurations to
+ achieve them. If there are no states in the "states"
+ array, use a fixed regulator instead.
Optional properties:
-- enable-gpio : GPIO to use to enable/disable the regulator.
-- gpios : GPIO group used to control voltage.
-- gpios-states : gpios pin's initial states array. 0: LOW, 1: HIGH.
- defualt is LOW if nothing is specified.
+- enable-gpios : GPIO used to enable/disable the regulator.
+ Warning, the GPIO phandle flags are ignored and the
+ GPIO polarity is controlled solely by the presence
+ of "enable-active-high" DT property. This is due to
+ compatibility with old DTs.
+- enable-active-high : Polarity of "enable-gpio" GPIO is active HIGH.
+ Default is active LOW.
+- gpios-states : On operating systems, that don't support reading back
+ gpio values in output mode (most notably linux), this
+ array provides the state of GPIO pins set when
+ requesting them from the gpio controller. Systems,
+ that are capable of preserving state when requesting
+ the lines, are free to ignore this property.
+ 0: LOW, 1: HIGH. Default is LOW if nothing else
+ is specified.
- startup-delay-us : Startup time in microseconds.
-- enable-active-high : Polarity of GPIO is active high (default is low).
- regulator-type : Specifies what is being regulated, must be either
"voltage" or "current", defaults to voltage.
@@ -30,7 +44,7 @@ Example:
regulator-max-microvolt = <2600000>;
regulator-boot-on;
- enable-gpio = <&gpio0 23 0x4>;
+ enable-gpios = <&gpio0 23 0x4>;
gpios = <&gpio0 24 0x4
&gpio0 25 0x4>;
states = <1800000 0x3
diff --git a/Documentation/devicetree/bindings/regulator/st,stm32mp1-pwr-reg.txt b/Documentation/devicetree/bindings/regulator/st,stm32mp1-pwr-reg.txt
new file mode 100644
index 000000000000..e372dd3f0c8a
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/st,stm32mp1-pwr-reg.txt
@@ -0,0 +1,43 @@
+STM32MP1 PWR Regulators
+-----------------------
+
+Available Regulators in STM32MP1 PWR block are:
+ - reg11 for regulator 1V1
+ - reg18 for regulator 1V8
+ - usb33 for the swtich USB3V3
+
+Required properties:
+- compatible: Must be "st,stm32mp1,pwr-reg"
+- list of child nodes that specify the regulator reg11, reg18 or usb33
+ initialization data for defined regulators. The definition for each of
+ these nodes is defined using the standard binding for regulators found at
+ Documentation/devicetree/bindings/regulator/regulator.txt.
+- vdd-supply: phandle to the parent supply/regulator node for vdd input
+- vdd_3v3_usbfs-supply: phandle to the parent supply/regulator node for usb33
+
+Example:
+
+pwr_regulators: pwr@50001000 {
+ compatible = "st,stm32mp1,pwr-reg";
+ reg = <0x50001000 0x10>;
+ vdd-supply = <&vdd>;
+ vdd_3v3_usbfs-supply = <&vdd_usb>;
+
+ reg11: reg11 {
+ regulator-name = "reg11";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ };
+
+ reg18: reg18 {
+ regulator-name = "reg18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ usb33: usb33 {
+ regulator-name = "usb33";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,adsp-pil.txt b/Documentation/devicetree/bindings/remoteproc/qcom,adsp-pil.txt
index a842a782b557..66af2c30944f 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,adsp-pil.txt
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,adsp-pil.txt
@@ -35,7 +35,7 @@ on the Qualcomm Technology Inc. ADSP Hexagon core.
Value type: <stringlist>
Definition: List of clock input name strings sorted in the same
order as the clocks property. Definition must have
- "xo", "sway_cbcr", "lpass_aon", "lpass_ahbs_aon_cbcr",
+ "xo", "sway_cbcr", "lpass_ahbs_aon_cbcr",
"lpass_ahbm_aon_cbcr", "qdsp6ss_xo", "qdsp6ss_sleep"
and "qdsp6ss_core".
@@ -100,13 +100,12 @@ ADSP, as it is found on SDM845 boards.
clocks = <&rpmhcc RPMH_CXO_CLK>,
<&gcc GCC_LPASS_SWAY_CLK>,
- <&lpasscc LPASS_AUDIO_WRAPPER_AON_CLK>,
<&lpasscc LPASS_Q6SS_AHBS_AON_CLK>,
<&lpasscc LPASS_Q6SS_AHBM_AON_CLK>,
<&lpasscc LPASS_QDSP6SS_XO_CLK>,
<&lpasscc LPASS_QDSP6SS_SLEEP_CLK>,
<&lpasscc LPASS_QDSP6SS_CORE_CLK>;
- clock-names = "xo", "sway_cbcr", "lpass_aon",
+ clock-names = "xo", "sway_cbcr",
"lpass_ahbs_aon_cbcr",
"lpass_ahbm_aon_cbcr", "qdsp6ss_xo",
"qdsp6ss_sleep", "qdsp6ss_core";
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt b/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt
index 9c0cff3a5ed8..292dfda9770d 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt
@@ -19,13 +19,30 @@ on the Qualcomm ADSP Hexagon core.
- interrupts-extended:
Usage: required
Value type: <prop-encoded-array>
- Definition: must list the watchdog, fatal IRQs ready, handover and
- stop-ack IRQs
+ Definition: reference to the interrupts that match interrupt-names
- interrupt-names:
Usage: required
Value type: <stringlist>
- Definition: must be "wdog", "fatal", "ready", "handover", "stop-ack"
+ Definition: The interrupts needed depends on the compatible
+ string:
+ qcom,msm8974-adsp-pil:
+ qcom,msm8996-adsp-pil:
+ qcom,msm8996-slpi-pil:
+ qcom,qcs404-adsp-pas:
+ qcom,qcs404-cdsp-pas:
+ qcom,sdm845-adsp-pas:
+ qcom,sdm845-cdsp-pas:
+ must be "wdog", "fatal", "ready", "handover", "stop-ack"
+ qcom,qcs404-wcss-pas:
+ must be "wdog", "fatal", "ready", "handover", "stop-ack",
+ "shutdown-ack"
+
+- firmware-name:
+ Usage: optional
+ Value type: <string>
+ Definition: must list the relative firmware image path for the
+ Hexagon Core.
- clocks:
Usage: required
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt b/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt
index 9ff5b0309417..41ca5df5be5a 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt
@@ -28,24 +28,51 @@ on the Qualcomm Hexagon core.
- interrupts-extended:
Usage: required
Value type: <prop-encoded-array>
- Definition: must list the watchdog, fatal IRQs ready, handover and
- stop-ack IRQs
+ Definition: reference to the interrupts that match interrupt-names
- interrupt-names:
Usage: required
Value type: <stringlist>
- Definition: must be "wdog", "fatal", "ready", "handover", "stop-ack"
+ Definition: The interrupts needed depends on the the compatible
+ string:
+ qcom,q6v5-pil:
+ qcom,ipq8074-wcss-pil:
+ qcom,msm8916-mss-pil:
+ qcom,msm8974-mss-pil:
+ must be "wdog", "fatal", "ready", "handover", "stop-ack"
+ qcom,msm8996-mss-pil:
+ qcom,sdm845-mss-pil:
+ must be "wdog", "fatal", "ready", "handover", "stop-ack",
+ "shutdown-ack"
+
+- firmware-name:
+ Usage: optional
+ Value type: <stringlist>
+ Definition: must list the relative firmware image paths for mba and
+ modem. They are used for booting and authenticating the
+ Hexagon core.
- clocks:
Usage: required
Value type: <phandle>
- Definition: reference to the iface, bus and mem clocks to be held on
- behalf of the booting of the Hexagon core
+ Definition: reference to the clocks that match clock-names
- clock-names:
Usage: required
Value type: <stringlist>
- Definition: must be "iface", "bus", "mem"
+ Definition: The clocks needed depend on the compatible string:
+ qcom,ipq8074-wcss-pil:
+ no clock names required
+ qcom,q6v5-pil:
+ qcom,msm8916-mss-pil:
+ qcom,msm8974-mss-pil:
+ must be "iface", "bus", "mem", "xo"
+ qcom,msm8996-mss-pil:
+ must be "iface", "bus", "mem", "xo", "gpll0_mss",
+ "snoc_axi", "mnoc_axi", "pnoc", "qdss"
+ qcom,sdm845-mss-pil:
+ must be "iface", "bus", "mem", "xo", "gpll0_mss",
+ "snoc_axi", "mnoc_axi", "prng"
- resets:
Usage: required
@@ -65,6 +92,19 @@ on the Qualcomm Hexagon core.
must be "mss_restart", "pdc_reset" for the modem
sub-system on SDM845 SoCs
+For the compatible strings below the following supplies are required:
+ "qcom,q6v5-pil"
+ "qcom,msm8916-mss-pil",
+- cx-supply:
+- mx-supply:
+- pll-supply:
+ Usage: required
+ Value type: <phandle>
+ Definition: reference to the regulators to be held on behalf of the
+ booting of the Hexagon core
+
+For the compatible string below the following supplies are required:
+ "qcom,msm8974-mss-pil"
- cx-supply:
- mss-supply:
- mx-supply:
@@ -74,6 +114,33 @@ on the Qualcomm Hexagon core.
Definition: reference to the regulators to be held on behalf of the
booting of the Hexagon core
+For the compatible string below the following supplies are required:
+ "qcom,msm8996-mss-pil"
+- pll-supply:
+ Usage: required
+ Value type: <phandle>
+ Definition: reference to the regulators to be held on behalf of the
+ booting of the Hexagon core
+
+- power-domains:
+ Usage: required
+ Value type: <phandle>
+ Definition: reference to power-domains that match power-domain-names
+
+- power-domain-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: The power-domains needed depend on the compatible string:
+ qcom,q6v5-pil:
+ qcom,ipq8074-wcss-pil:
+ qcom,msm8916-mss-pil:
+ qcom,msm8974-mss-pil:
+ no power-domain names required
+ qcom,msm8996-mss-pil:
+ must be "cx", "mx"
+ qcom,sdm845-mss-pil:
+ must be "cx", "mx", "mss", "load_state"
+
- qcom,smem-states:
Usage: required
Value type: <phandle>
diff --git a/Documentation/devicetree/bindings/serial/mtk-uart.txt b/Documentation/devicetree/bindings/serial/mtk-uart.txt
index 742cb470595b..bcfb13194f16 100644
--- a/Documentation/devicetree/bindings/serial/mtk-uart.txt
+++ b/Documentation/devicetree/bindings/serial/mtk-uart.txt
@@ -16,6 +16,7 @@ Required properties:
* "mediatek,mt8127-uart" for MT8127 compatible UARTS
* "mediatek,mt8135-uart" for MT8135 compatible UARTS
* "mediatek,mt8173-uart" for MT8173 compatible UARTS
+ * "mediatek,mt8183-uart", "mediatek,mt6577-uart" for MT8183 compatible UARTS
* "mediatek,mt6577-uart" for MT6577 and all of the above
- reg: The base address of the UART register bank.
diff --git a/Documentation/devicetree/bindings/spi/fsl-spi.txt b/Documentation/devicetree/bindings/spi/fsl-spi.txt
index 8854004a1d3a..411375eac54d 100644
--- a/Documentation/devicetree/bindings/spi/fsl-spi.txt
+++ b/Documentation/devicetree/bindings/spi/fsl-spi.txt
@@ -18,6 +18,10 @@ Optional properties:
- gpios : specifies the gpio pins to be used for chipselects.
The gpios will be referred to as reg = <index> in the SPI child nodes.
If unspecified, a single SPI device without a chip select can be used.
+- fsl,spisel_boot : for the MPC8306 and MPC8309, specifies that the
+ SPISEL_BOOT signal is used as chip select for a slave device. Use
+ reg = <number of gpios> in the corresponding child node, i.e. 0 if
+ the gpios property is not present.
Example:
spi@4c0 {
diff --git a/Documentation/devicetree/bindings/spi/nvidia,tegra114-spi.txt b/Documentation/devicetree/bindings/spi/nvidia,tegra114-spi.txt
index 9ba7c5a273b4..db8e0d71c5bc 100644
--- a/Documentation/devicetree/bindings/spi/nvidia,tegra114-spi.txt
+++ b/Documentation/devicetree/bindings/spi/nvidia,tegra114-spi.txt
@@ -23,6 +23,18 @@ Required properties:
Recommended properties:
- spi-max-frequency: Definition as per
Documentation/devicetree/bindings/spi/spi-bus.txt
+Optional properties:
+- nvidia,tx-clk-tap-delay: Delays the clock going out to the external device
+ with this tap value. This property is used to tune the outgoing data from
+ Tegra SPI master with respect to outgoing Tegra SPI master clock.
+ Tap values vary based on the platform design trace lengths from Tegra SPI
+ to corresponding slave devices. Valid tap values are from 0 thru 63.
+- nvidia,rx-clk-tap-delay: Delays the clock coming in from the external device
+ with this tap value. This property is used to adjust the Tegra SPI master
+ clock with respect to the data from the SPI slave device.
+ Tap values vary based on the platform design trace lengths from Tegra SPI
+ to corresponding slave devices. Valid tap values are from 0 thru 63.
+
Example:
spi@7000d600 {
@@ -38,4 +50,12 @@ spi@7000d600 {
reset-names = "spi";
dmas = <&apbdma 16>, <&apbdma 16>;
dma-names = "rx", "tx";
+ <spi-client>@<bus_num> {
+ ...
+ ...
+ nvidia,rx-clk-tap-delay = <0>;
+ nvidia,tx-clk-tap-delay = <16>;
+ ...
+ };
+
};
diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt
index 37cf69586d10..18e14ee257b2 100644
--- a/Documentation/devicetree/bindings/spi/sh-msiof.txt
+++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt
@@ -4,6 +4,7 @@ Required properties:
- compatible : "renesas,msiof-r8a7743" (RZ/G1M)
"renesas,msiof-r8a7744" (RZ/G1N)
"renesas,msiof-r8a7745" (RZ/G1E)
+ "renesas,msiof-r8a77470" (RZ/G1C)
"renesas,msiof-r8a774a1" (RZ/G2M)
"renesas,msiof-r8a774c0" (RZ/G2E)
"renesas,msiof-r8a7790" (R-Car H2)
diff --git a/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.txt b/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.txt
index 2864bc6b659c..f54c8c36395e 100644
--- a/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.txt
+++ b/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.txt
@@ -8,9 +8,16 @@ Required properties:
- interrupts : One interrupt, used by the controller.
- #address-cells : <1>, as required by generic SPI binding.
- #size-cells : <0>, also as required by generic SPI binding.
+- clocks : phandles for the clocks, see the description of clock-names below.
+ The phandle for the "ssi_clk" is required. The phandle for the "pclk" clock
+ is optional. If a single clock is specified but no clock-name, it is the
+ "ssi_clk" clock. If both clocks are listed, the "ssi_clk" must be first.
Optional properties:
-- cs-gpios : Specifies the gpio pis to be used for chipselects.
+- clock-names : Contains the names of the clocks:
+ "ssi_clk", for the core clock used to generate the external SPI clock.
+ "pclk", the interface clock, required for register access.
+- cs-gpios : Specifies the gpio pins to be used for chipselects.
- num-cs : The number of chipselects. If omitted, this will default to 4.
- reg-io-width : The I/O register width (in bytes) implemented by this
device. Supported values are 2 or 4 (the default).
@@ -25,6 +32,7 @@ Example:
interrupts = <0 154 4>;
#address-cells = <1>;
#size-cells = <0>;
+ clocks = <&spi_m_clk>;
num-cs = <2>;
cs-gpios = <&gpio0 13 0>,
<&gpio0 14 0>;
diff --git a/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt b/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt
index 6cc3c6fe25a3..e71b81a41ac0 100644
--- a/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt
+++ b/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt
@@ -7,7 +7,11 @@ Required properties:
- reg : address and length of the lpspi master registers
- interrupt-parent : core interrupt controller
- interrupts : lpspi interrupt
-- clocks : lpspi clock specifier
+- clocks : lpspi clock specifier. Its number and order need to correspond to the
+ value in clock-names.
+- clock-names : Corresponding to per clock and ipg clock in "clocks"
+ respectively. In i.MX7ULP, it only has per clk, so use CLK_DUMMY
+ to fill the "ipg" blank.
- spi-slave : spi slave mode support. In slave mode, add this attribute without
value. In master mode, remove it.
@@ -18,6 +22,8 @@ lpspi2: lpspi@40290000 {
reg = <0x40290000 0x10000>;
interrupt-parent = <&intc>;
interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7ULP_CLK_LPSPI2>;
+ clocks = <&clks IMX7ULP_CLK_LPSPI2>,
+ <&clks IMX7ULP_CLK_DUMMY>;
+ clock-names = "per", "ipg";
spi-slave;
};
diff --git a/Documentation/devicetree/bindings/spi/spi-mt65xx.txt b/Documentation/devicetree/bindings/spi/spi-mt65xx.txt
index 69c356767cf8..c0f6c8ecfa2e 100644
--- a/Documentation/devicetree/bindings/spi/spi-mt65xx.txt
+++ b/Documentation/devicetree/bindings/spi/spi-mt65xx.txt
@@ -10,6 +10,7 @@ Required properties:
- mediatek,mt8135-spi: for mt8135 platforms
- mediatek,mt8173-spi: for mt8173 platforms
- mediatek,mt8183-spi: for mt8183 platforms
+ - "mediatek,mt8516-spi", "mediatek,mt2712-spi": for mt8516 platforms
- #address-cells: should be 1.
diff --git a/Documentation/devicetree/bindings/spi/spi-mt7621.txt b/Documentation/devicetree/bindings/spi/spi-mt7621.txt
new file mode 100644
index 000000000000..d5baec0fa56e
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-mt7621.txt
@@ -0,0 +1,26 @@
+Binding for MTK SPI controller (MT7621 MIPS)
+
+Required properties:
+- compatible: Should be one of the following:
+ - "ralink,mt7621-spi": for mt7621/mt7628/mt7688 platforms
+- #address-cells: should be 1.
+- #size-cells: should be 0.
+- reg: Address and length of the register set for the device
+- resets: phandle to the reset controller asserting this device in
+ reset
+ See ../reset/reset.txt for details.
+
+Optional properties:
+- cs-gpios: see spi-bus.txt.
+
+Example:
+
+- SoC Specific Portion:
+spi0: spi@b00 {
+ compatible = "ralink,mt7621-spi";
+ reg = <0xb00 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ resets = <&rstctrl 18>;
+ reset-names = "spi";
+};
diff --git a/Documentation/devicetree/bindings/spi/spi-zynq-qspi.txt b/Documentation/devicetree/bindings/spi/spi-zynq-qspi.txt
new file mode 100644
index 000000000000..16b734ad3102
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-zynq-qspi.txt
@@ -0,0 +1,25 @@
+Xilinx Zynq QSPI controller Device Tree Bindings
+-------------------------------------------------------------------
+
+Required properties:
+- compatible : Should be "xlnx,zynq-qspi-1.0".
+- reg : Physical base address and size of QSPI registers map.
+- interrupts : Property with a value describing the interrupt
+ number.
+- clock-names : List of input clock names - "ref_clk", "pclk"
+ (See clock bindings for details).
+- clocks : Clock phandles (see clock bindings for details).
+
+Optional properties:
+- num-cs : Number of chip selects used.
+
+Example:
+ qspi: spi@e000d000 {
+ compatible = "xlnx,zynq-qspi-1.0";
+ reg = <0xe000d000 0x1000>;
+ interrupt-parent = <&intc>;
+ interrupts = <0 19 4>;
+ clock-names = "ref_clk", "pclk";
+ clocks = <&clkc 10>, <&clkc 43>;
+ num-cs = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/ufs/ufs-hisi.txt b/Documentation/devicetree/bindings/ufs/ufs-hisi.txt
index a48c44817367..0b83df1a5418 100644
--- a/Documentation/devicetree/bindings/ufs/ufs-hisi.txt
+++ b/Documentation/devicetree/bindings/ufs/ufs-hisi.txt
@@ -6,9 +6,10 @@ Each UFS Host Controller should have its own node.
Required properties:
- compatible : compatible list, contains one of the following -
"hisilicon,hi3660-ufs", "jedec,ufs-1.1" for hisi ufs
- host controller present on Hi36xx chipset.
+ host controller present on Hi3660 chipset.
+ "hisilicon,hi3670-ufs", "jedec,ufs-2.1" for hisi ufs
+ host controller present on Hi3670 chipset.
- reg : should contain UFS register address space & UFS SYS CTRL register address,
-- interrupt-parent : interrupt device
- interrupts : interrupt number
- clocks : List of phandle and clock specifier pairs
- clock-names : List of clock input name strings sorted in the same
diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
index 8cf59452c675..5111e9130bc3 100644
--- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
@@ -4,11 +4,14 @@ UFSHC nodes are defined to describe on-chip UFS host controllers.
Each UFS controller instance should have its own node.
Required properties:
-- compatible : must contain "jedec,ufs-1.1" or "jedec,ufs-2.0", may
- also list one or more of the following:
- "qcom,msm8994-ufshc"
- "qcom,msm8996-ufshc"
- "qcom,ufshc"
+- compatible : must contain "jedec,ufs-1.1" or "jedec,ufs-2.0"
+
+ For Qualcomm SoCs must contain, as below, an
+ SoC-specific compatible along with "qcom,ufshc" and
+ the appropriate jedec string:
+ "qcom,msm8994-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
+ "qcom,msm8996-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
+ "qcom,sdm845-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
- interrupts : <interrupt mapping for UFS host controller IRQ>
- reg : <registers mapping>
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 385d22460b1d..8162b0eb4b50 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -140,6 +140,7 @@ fairphone Fairphone B.V.
faraday Faraday Technology Corporation
fastrax Fastrax Oy
fcs Fairchild Semiconductor
+feiyang Shenzhen Fly Young Technology Co.,LTD.
firefly Firefly
focaltech FocalTech Systems Co.,Ltd
friendlyarm Guangzhou FriendlyARM Computer Tech Co., Ltd
@@ -400,6 +401,7 @@ tcl Toby Churchill Ltd.
technexion TechNexion
technologic Technologic Systems
tempo Tempo Semiconductor
+techstar Shenzhen Techstar Electronics Co., Ltd.
terasic Terasic Inc.
thine THine Electronics, Inc.
ti Texas Instruments
diff --git a/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt b/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt
index ef2b97b72e08..9f365c1a3399 100644
--- a/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt
@@ -8,6 +8,7 @@ Required properties:
- "renesas,r8a7743-wdt" (RZ/G1M)
- "renesas,r8a7744-wdt" (RZ/G1N)
- "renesas,r8a7745-wdt" (RZ/G1E)
+ - "renesas,r8a77470-wdt" (RZ/G1C)
- "renesas,r8a774a1-wdt" (RZ/G2M)
- "renesas,r8a774c0-wdt" (RZ/G2E)
- "renesas,r8a7790-wdt" (R-Car H2)
diff --git a/Documentation/doc-guide/kernel-doc.rst b/Documentation/doc-guide/kernel-doc.rst
index 51be62aa4385..f96059767c8c 100644
--- a/Documentation/doc-guide/kernel-doc.rst
+++ b/Documentation/doc-guide/kernel-doc.rst
@@ -490,7 +490,7 @@ doc: *title*
functions: *[ function ...]*
Include documentation for each *function* in *source*.
- If no *function* if specified, the documentaion for all functions
+ If no *function* is specified, the documentation for all functions
and types in the *source* will be included.
Examples::
@@ -517,4 +517,17 @@ How to use kernel-doc to generate man pages
If you just want to use kernel-doc to generate man pages you can do this
from the kernel git tree::
- $ scripts/kernel-doc -man $(git grep -l '/\*\*' -- :^Documentation :^tools) | scripts/split-man.pl /tmp/man
+ $ scripts/kernel-doc -man \
+ $(git grep -l '/\*\*' -- :^Documentation :^tools) \
+ | scripts/split-man.pl /tmp/man
+
+Some older versions of git do not support some of the variants of syntax for
+path exclusion. One of the following commands may work for those versions::
+
+ $ scripts/kernel-doc -man \
+ $(git grep -l '/\*\*' -- . ':!Documentation' ':!tools') \
+ | scripts/split-man.pl /tmp/man
+
+ $ scripts/kernel-doc -man \
+ $(git grep -l '/\*\*' -- . ":(exclude)Documentation" ":(exclude)tools") \
+ | scripts/split-man.pl /tmp/man
diff --git a/Documentation/doc-guide/sphinx.rst b/Documentation/doc-guide/sphinx.rst
index 02605ee1d876..c039224b404e 100644
--- a/Documentation/doc-guide/sphinx.rst
+++ b/Documentation/doc-guide/sphinx.rst
@@ -27,8 +27,8 @@ Sphinx Install
==============
The ReST markups currently used by the Documentation/ files are meant to be
-built with ``Sphinx`` version 1.3 or upper. If you're desiring to build
-PDF outputs, it is recommended to use version 1.4.6 or upper.
+built with ``Sphinx`` version 1.3 or higher. If you desire to build
+PDF output, it is recommended to use version 1.4.6 or higher.
There's a script that checks for the Sphinx requirements. Please see
:ref:`sphinx-pre-install` for further details.
@@ -37,15 +37,15 @@ Most distributions are shipped with Sphinx, but its toolchain is fragile,
and it is not uncommon that upgrading it or some other Python packages
on your machine would cause the documentation build to break.
-A way to get rid of that is to use a different version than the one shipped
-on your distributions. In order to do that, it is recommended to install
+A way to avoid that is to use a different version than the one shipped
+with your distributions. In order to do so, it is recommended to install
Sphinx inside a virtual environment, using ``virtualenv-3``
or ``virtualenv``, depending on how your distribution packaged Python 3.
.. note::
#) Sphinx versions below 1.5 don't work properly with Python's
- docutils version 0.13.1 or upper. So, if you're willing to use
+ docutils version 0.13.1 or higher. So, if you're willing to use
those versions, you should run ``pip install 'docutils==0.12'``.
#) It is recommended to use the RTD theme for html output. Depending
@@ -82,7 +82,7 @@ output.
PDF and LaTeX builds
--------------------
-Such builds are currently supported only with Sphinx versions 1.4 and upper.
+Such builds are currently supported only with Sphinx versions 1.4 and higher.
For PDF and LaTeX output, you'll also need ``XeLaTeX`` version 3.14159265.
diff --git a/Documentation/driver-api/acpi/index.rst b/Documentation/driver-api/acpi/index.rst
new file mode 100644
index 000000000000..ace0008e54c2
--- /dev/null
+++ b/Documentation/driver-api/acpi/index.rst
@@ -0,0 +1,9 @@
+============
+ACPI Support
+============
+
+.. toctree::
+ :maxdepth: 2
+
+ linuxized-acpica
+ scan_handlers
diff --git a/Documentation/acpi/linuxized-acpica.txt b/Documentation/driver-api/acpi/linuxized-acpica.rst
index 3ad7b0dfb083..0ca8f1538519 100644
--- a/Documentation/acpi/linuxized-acpica.txt
+++ b/Documentation/driver-api/acpi/linuxized-acpica.rst
@@ -1,31 +1,37 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+============================================================
Linuxized ACPICA - Introduction to ACPICA Release Automation
+============================================================
-Copyright (C) 2013-2016, Intel Corporation
-Author: Lv Zheng <lv.zheng@intel.com>
+:Copyright: |copy| 2013-2016, Intel Corporation
+:Author: Lv Zheng <lv.zheng@intel.com>
-Abstract:
+Abstract
+========
This document describes the ACPICA project and the relationship between
ACPICA and Linux. It also describes how ACPICA code in drivers/acpi/acpica,
include/acpi and tools/power/acpi is automatically updated to follow the
upstream.
+ACPICA Project
+==============
-1. ACPICA Project
-
- The ACPI Component Architecture (ACPICA) project provides an operating
- system (OS)-independent reference implementation of the Advanced
- Configuration and Power Interface Specification (ACPI). It has been
- adapted by various host OSes. By directly integrating ACPICA, Linux can
- also benefit from the application experiences of ACPICA from other host
- OSes.
+The ACPI Component Architecture (ACPICA) project provides an operating
+system (OS)-independent reference implementation of the Advanced
+Configuration and Power Interface Specification (ACPI). It has been
+adapted by various host OSes. By directly integrating ACPICA, Linux can
+also benefit from the application experiences of ACPICA from other host
+OSes.
- The homepage of ACPICA project is: www.acpica.org, it is maintained and
- supported by Intel Corporation.
+The homepage of ACPICA project is: www.acpica.org, it is maintained and
+supported by Intel Corporation.
- The following figure depicts the Linux ACPI subsystem where the ACPICA
- adaptation is included:
+The following figure depicts the Linux ACPI subsystem where the ACPICA
+adaptation is included::
+---------------------------------------------------------+
| |
@@ -71,21 +77,27 @@ upstream.
Figure 1. Linux ACPI Software Components
- NOTE:
+.. note::
A. OS Service Layer - Provided by Linux to offer OS dependent
implementation of the predefined ACPICA interfaces (acpi_os_*).
+ ::
+
include/acpi/acpiosxf.h
drivers/acpi/osl.c
include/acpi/platform
include/asm/acenv.h
B. ACPICA Functionality - Released from ACPICA code base to offer
OS independent implementation of the ACPICA interfaces (acpi_*).
+ ::
+
drivers/acpi/acpica
include/acpi/ac*.h
tools/power/acpi
C. Linux/ACPI Functionality - Providing Linux specific ACPI
functionality to the other Linux kernel subsystems and user space
programs.
+ ::
+
drivers/acpi
include/linux/acpi.h
include/linux/acpi*.h
@@ -95,24 +107,27 @@ upstream.
ACPI subsystem to offer architecture specific implementation of the
ACPI interfaces. They are Linux specific components and are out of
the scope of this document.
+ ::
+
include/asm/acpi.h
include/asm/acpi*.h
arch/*/acpi
-2. ACPICA Release
+ACPICA Release
+==============
- The ACPICA project maintains its code base at the following repository URL:
- https://github.com/acpica/acpica.git. As a rule, a release is made every
- month.
+The ACPICA project maintains its code base at the following repository URL:
+https://github.com/acpica/acpica.git. As a rule, a release is made every
+month.
- As the coding style adopted by the ACPICA project is not acceptable by
- Linux, there is a release process to convert the ACPICA git commits into
- Linux patches. The patches generated by this process are referred to as
- "linuxized ACPICA patches". The release process is carried out on a local
- copy the ACPICA git repository. Each commit in the monthly release is
- converted into a linuxized ACPICA patch. Together, they form the monthly
- ACPICA release patchset for the Linux ACPI community. This process is
- illustrated in the following figure:
+As the coding style adopted by the ACPICA project is not acceptable by
+Linux, there is a release process to convert the ACPICA git commits into
+Linux patches. The patches generated by this process are referred to as
+"linuxized ACPICA patches". The release process is carried out on a local
+copy the ACPICA git repository. Each commit in the monthly release is
+converted into a linuxized ACPICA patch. Together, they form the monthly
+ACPICA release patchset for the Linux ACPI community. This process is
+illustrated in the following figure::
+-----------------------------+
| acpica / master (-) commits |
@@ -153,7 +168,7 @@ upstream.
Figure 2. ACPICA -> Linux Upstream Process
- NOTE:
+.. note::
A. Linuxize Utilities - Provided by the ACPICA repository, including a
utility located in source/tools/acpisrc folder and a number of
scripts located in generate/linux folder.
@@ -170,19 +185,20 @@ upstream.
following kernel configuration options:
CONFIG_ACPI/CONFIG_ACPI_DEBUG/CONFIG_ACPI_DEBUGGER
-3. ACPICA Divergences
+ACPICA Divergences
+==================
- Ideally, all of the ACPICA commits should be converted into Linux patches
- automatically without manual modifications, the "linux / master" tree should
- contain the ACPICA code that exactly corresponds to the ACPICA code
- contained in "new linuxized acpica" tree and it should be possible to run
- the release process fully automatically.
+Ideally, all of the ACPICA commits should be converted into Linux patches
+automatically without manual modifications, the "linux / master" tree should
+contain the ACPICA code that exactly corresponds to the ACPICA code
+contained in "new linuxized acpica" tree and it should be possible to run
+the release process fully automatically.
- As a matter of fact, however, there are source code differences between
- the ACPICA code in Linux and the upstream ACPICA code, referred to as
- "ACPICA Divergences".
+As a matter of fact, however, there are source code differences between
+the ACPICA code in Linux and the upstream ACPICA code, referred to as
+"ACPICA Divergences".
- The various sources of ACPICA divergences include:
+The various sources of ACPICA divergences include:
1. Legacy divergences - Before the current ACPICA release process was
established, there already had been divergences between Linux and
ACPICA. Over the past several years those divergences have been greatly
@@ -213,11 +229,12 @@ upstream.
rebased on the ACPICA side in order to offer better solutions, new ACPICA
divergences are generated.
-4. ACPICA Development
+ACPICA Development
+==================
- This paragraph guides Linux developers to use the ACPICA upstream release
- utilities to obtain Linux patches corresponding to upstream ACPICA commits
- before they become available from the ACPICA release process.
+This paragraph guides Linux developers to use the ACPICA upstream release
+utilities to obtain Linux patches corresponding to upstream ACPICA commits
+before they become available from the ACPICA release process.
1. Cherry-pick an ACPICA commit
@@ -225,7 +242,7 @@ upstream.
you want to cherry pick must be committed into the local repository.
Then the gen-patch.sh command can help to cherry-pick an ACPICA commit
- from the ACPICA local repository:
+ from the ACPICA local repository::
$ git clone https://github.com/acpica/acpica
$ cd acpica
@@ -240,7 +257,7 @@ upstream.
changes that haven't been applied to Linux yet.
You can generate the ACPICA release series yourself and rebase your code on
- top of the generated ACPICA release patches:
+ top of the generated ACPICA release patches::
$ git clone https://github.com/acpica/acpica
$ cd acpica
@@ -254,7 +271,7 @@ upstream.
3. Inspect the current divergences
If you have local copies of both Linux and upstream ACPICA, you can generate
- a diff file indicating the state of the current divergences:
+ a diff file indicating the state of the current divergences::
# git clone https://github.com/acpica/acpica
# git clone http://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/Documentation/acpi/scan_handlers.txt b/Documentation/driver-api/acpi/scan_handlers.rst
index 3246ccf15992..7a197b3a33fc 100644
--- a/Documentation/acpi/scan_handlers.txt
+++ b/Documentation/driver-api/acpi/scan_handlers.rst
@@ -1,7 +1,13 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+==================
ACPI Scan Handlers
+==================
+
+:Copyright: |copy| 2012, Intel Corporation
-Copyright (C) 2012, Intel Corporation
-Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
During system initialization and ACPI-based device hot-add, the ACPI namespace
is scanned in search of device objects that generally represent various pieces
@@ -30,14 +36,14 @@ to configure that link so that the kernel can use it.
Those additional configuration tasks usually depend on the type of the hardware
component represented by the given device node which can be determined on the
basis of the device node's hardware ID (HID). They are performed by objects
-called ACPI scan handlers represented by the following structure:
+called ACPI scan handlers represented by the following structure::
-struct acpi_scan_handler {
- const struct acpi_device_id *ids;
- struct list_head list_node;
- int (*attach)(struct acpi_device *dev, const struct acpi_device_id *id);
- void (*detach)(struct acpi_device *dev);
-};
+ struct acpi_scan_handler {
+ const struct acpi_device_id *ids;
+ struct list_head list_node;
+ int (*attach)(struct acpi_device *dev, const struct acpi_device_id *id);
+ void (*detach)(struct acpi_device *dev);
+ };
where ids is the list of IDs of device nodes the given handler is supposed to
take care of, list_node is the hook to the global list of ACPI scan handlers
diff --git a/Documentation/driver-api/device-io.rst b/Documentation/driver-api/device-io.rst
index b00b23903078..0e389378f71d 100644
--- a/Documentation/driver-api/device-io.rst
+++ b/Documentation/driver-api/device-io.rst
@@ -103,51 +103,6 @@ continuing execution::
ha->flags.ints_enabled = 0;
}
-In addition to write posting, on some large multiprocessing systems
-(e.g. SGI Challenge, Origin and Altix machines) posted writes won't be
-strongly ordered coming from different CPUs. Thus it's important to
-properly protect parts of your driver that do memory-mapped writes with
-locks and use the :c:func:`mmiowb()` to make sure they arrive in the
-order intended. Issuing a regular readX() will also ensure write ordering,
-but should only be used when the
-driver has to be sure that the write has actually arrived at the device
-(not that it's simply ordered with respect to other writes), since a
-full readX() is a relatively expensive operation.
-
-Generally, one should use :c:func:`mmiowb()` prior to releasing a spinlock
-that protects regions using :c:func:`writeb()` or similar functions that
-aren't surrounded by readb() calls, which will ensure ordering
-and flushing. The following pseudocode illustrates what might occur if
-write ordering isn't guaranteed via :c:func:`mmiowb()` or one of the
-readX() functions::
-
- CPU A: spin_lock_irqsave(&dev_lock, flags)
- CPU A: ...
- CPU A: writel(newval, ring_ptr);
- CPU A: spin_unlock_irqrestore(&dev_lock, flags)
- ...
- CPU B: spin_lock_irqsave(&dev_lock, flags)
- CPU B: writel(newval2, ring_ptr);
- CPU B: ...
- CPU B: spin_unlock_irqrestore(&dev_lock, flags)
-
-In the case above, newval2 could be written to ring_ptr before newval.
-Fixing it is easy though::
-
- CPU A: spin_lock_irqsave(&dev_lock, flags)
- CPU A: ...
- CPU A: writel(newval, ring_ptr);
- CPU A: mmiowb(); /* ensure no other writes beat us to the device */
- CPU A: spin_unlock_irqrestore(&dev_lock, flags)
- ...
- CPU B: spin_lock_irqsave(&dev_lock, flags)
- CPU B: writel(newval2, ring_ptr);
- CPU B: ...
- CPU B: mmiowb();
- CPU B: spin_unlock_irqrestore(&dev_lock, flags)
-
-See tg3.c for a real world example of how to use :c:func:`mmiowb()`
-
PCI ordering rules also guarantee that PIO read responses arrive after any
outstanding DMA writes from that bus, since for some devices the result of
a readb() call may signal to the driver that a DMA transaction is
diff --git a/Documentation/driver-api/dmaengine/client.rst b/Documentation/driver-api/dmaengine/client.rst
index fbbb2831f29f..45953f171500 100644
--- a/Documentation/driver-api/dmaengine/client.rst
+++ b/Documentation/driver-api/dmaengine/client.rst
@@ -168,6 +168,13 @@ The details of these operations are:
dmaengine_submit() will not start the DMA operation, it merely adds
it to the pending queue. For this, see step 5, dma_async_issue_pending.
+ .. note::
+
+ After calling ``dmaengine_submit()`` the submitted transfer descriptor
+ (``struct dma_async_tx_descriptor``) belongs to the DMA engine.
+ Consequently, the client must consider invalid the pointer to that
+ descriptor.
+
5. Issue pending DMA requests and wait for callback notification
The transactions in the pending queue can be activated by calling the
diff --git a/Documentation/driver-api/dmaengine/dmatest.rst b/Documentation/driver-api/dmaengine/dmatest.rst
index 8d81f1a7169b..e78d070bb468 100644
--- a/Documentation/driver-api/dmaengine/dmatest.rst
+++ b/Documentation/driver-api/dmaengine/dmatest.rst
@@ -59,6 +59,7 @@ parameter, that specific channel is requested using the dmaengine and a thread
is created with the existing parameters. This thread is set as pending
and will be executed once run is set to 1. Any parameters set after the thread
is created are not applied.
+
.. hint::
available channel list could be extracted by running the following command::
diff --git a/Documentation/driver-api/iio/buffers.rst b/Documentation/driver-api/iio/buffers.rst
index 02c99a6bee18..e9036ef9f8f4 100644
--- a/Documentation/driver-api/iio/buffers.rst
+++ b/Documentation/driver-api/iio/buffers.rst
@@ -26,7 +26,7 @@ IIO buffer setup
================
The meta information associated with a channel reading placed in a buffer is
-called a scan element . The important bits configuring scan elements are
+called a scan element. The important bits configuring scan elements are
exposed to userspace applications via the
:file:`/sys/bus/iio/iio:device{X}/scan_elements/*` directory. This file contains
attributes of the following form:
diff --git a/Documentation/driver-api/iio/core.rst b/Documentation/driver-api/iio/core.rst
index 9a34ae03b679..b0bc0c028cc5 100644
--- a/Documentation/driver-api/iio/core.rst
+++ b/Documentation/driver-api/iio/core.rst
@@ -2,8 +2,8 @@
Core elements
=============
-The Industrial I/O core offers a unified framework for writing drivers for
-many different types of embedded sensors. a standard interface to user space
+The Industrial I/O core offers both a unified framework for writing drivers for
+many different types of embedded sensors and a standard interface to user space
applications manipulating sensors. The implementation can be found under
:file:`drivers/iio/industrialio-*`
@@ -11,7 +11,7 @@ Industrial I/O Devices
----------------------
* struct :c:type:`iio_dev` - industrial I/O device
-* :c:func:`iio_device_alloc()` - alocate an :c:type:`iio_dev` from a driver
+* :c:func:`iio_device_alloc()` - allocate an :c:type:`iio_dev` from a driver
* :c:func:`iio_device_free()` - free an :c:type:`iio_dev` from a driver
* :c:func:`iio_device_register()` - register a device with the IIO subsystem
* :c:func:`iio_device_unregister()` - unregister a device from the IIO
diff --git a/Documentation/driver-api/iio/hw-consumer.rst b/Documentation/driver-api/iio/hw-consumer.rst
index 8facce6a6733..e0fe0b98230e 100644
--- a/Documentation/driver-api/iio/hw-consumer.rst
+++ b/Documentation/driver-api/iio/hw-consumer.rst
@@ -1,7 +1,7 @@
===========
HW consumer
===========
-An IIO device can be directly connected to another device in hardware. in this
+An IIO device can be directly connected to another device in hardware. In this
case the buffers between IIO provider and IIO consumer are handled by hardware.
The Industrial I/O HW consumer offers a way to bond these IIO devices without
software buffer for data. The implementation can be found under
diff --git a/Documentation/driver-api/iio/triggers.rst b/Documentation/driver-api/iio/triggers.rst
index f89d37e7dd82..5c2156de6284 100644
--- a/Documentation/driver-api/iio/triggers.rst
+++ b/Documentation/driver-api/iio/triggers.rst
@@ -38,7 +38,7 @@ There are two locations in sysfs related to triggers:
* :file:`/sys/bus/iio/devices/iio:device{X}/trigger/*`, this directory is
created once the device supports a triggered buffer. We can associate a
- trigger with our device by writing the trigger's name in the
+ trigger with our device by writing the trigger's name in the
:file:`current_trigger` file.
IIO trigger setup
diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
index c0b600ed9961..aa87075c7846 100644
--- a/Documentation/driver-api/index.rst
+++ b/Documentation/driver-api/index.rst
@@ -56,6 +56,7 @@ available subsections can be seen below.
slimbus
soundwire/index
fpga/index
+ acpi/index
.. only:: subproject and html
diff --git a/Documentation/driver-api/pci/p2pdma.rst b/Documentation/driver-api/pci/p2pdma.rst
index 6d85b5a2598d..44deb52beeb4 100644
--- a/Documentation/driver-api/pci/p2pdma.rst
+++ b/Documentation/driver-api/pci/p2pdma.rst
@@ -132,10 +132,6 @@ precludes passing these pages to userspace.
P2P memory is also technically IO memory but should never have any side
effects behind it. Thus, the order of loads and stores should not be important
and ioreadX(), iowriteX() and friends should not be necessary.
-However, as the memory is not cache coherent, if access ever needs to
-be protected by a spinlock then :c:func:`mmiowb()` must be used before
-unlocking the lock. (See ACQUIRES VS I/O ACCESSES in
-Documentation/memory-barriers.txt)
P2P DMA Support Library
diff --git a/Documentation/driver-api/pinctl.rst b/Documentation/driver-api/pinctl.rst
index 6cb68d67fa75..2bb1bc484278 100644
--- a/Documentation/driver-api/pinctl.rst
+++ b/Documentation/driver-api/pinctl.rst
@@ -274,15 +274,6 @@ configuration in the pin controller ops like this::
.confops = &foo_pconf_ops,
};
-Since some controllers have special logic for handling entire groups of pins
-they can exploit the special whole-group pin control function. The
-pin_config_group_set() callback is allowed to return the error code -EAGAIN,
-for groups it does not want to handle, or if it just wants to do some
-group-level handling and then fall through to iterate over all pins, in which
-case each individual pin will be treated by separate pin_config_set() calls as
-well.
-
-
Interaction with the GPIO subsystem
===================================
diff --git a/Documentation/driver-api/pm/cpuidle.rst b/Documentation/driver-api/pm/cpuidle.rst
index 5842ab621a58..006cf6db40c6 100644
--- a/Documentation/driver-api/pm/cpuidle.rst
+++ b/Documentation/driver-api/pm/cpuidle.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
.. |struct cpuidle_governor| replace:: :c:type:`struct cpuidle_governor <cpuidle_governor>`
.. |struct cpuidle_device| replace:: :c:type:`struct cpuidle_device <cpuidle_device>`
.. |struct cpuidle_driver| replace:: :c:type:`struct cpuidle_driver <cpuidle_driver>`
@@ -7,9 +10,9 @@
CPU Idle Time Management
========================
-::
+:Copyright: |copy| 2019 Intel Corporation
- Copyright (c) 2019 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
CPU Idle Time Management Subsystem
diff --git a/Documentation/driver-api/pm/devices.rst b/Documentation/driver-api/pm/devices.rst
index 090c151aa86b..30835683616a 100644
--- a/Documentation/driver-api/pm/devices.rst
+++ b/Documentation/driver-api/pm/devices.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
.. |struct dev_pm_ops| replace:: :c:type:`struct dev_pm_ops <dev_pm_ops>`
.. |struct dev_pm_domain| replace:: :c:type:`struct dev_pm_domain <dev_pm_domain>`
.. |struct bus_type| replace:: :c:type:`struct bus_type <bus_type>`
@@ -12,11 +15,12 @@
Device Power Management Basics
==============================
-::
+:Copyright: |copy| 2010-2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+:Copyright: |copy| 2010 Alan Stern <stern@rowland.harvard.edu>
+:Copyright: |copy| 2016 Intel Corporation
+
+:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
- Copyright (c) 2010-2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
- Copyright (c) 2010 Alan Stern <stern@rowland.harvard.edu>
- Copyright (c) 2016 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Most of the code in Linux is device drivers, so most of the Linux power
management (PM) code is also driver-specific. Most drivers will do very
diff --git a/Documentation/driver-api/pm/index.rst b/Documentation/driver-api/pm/index.rst
index 56975c6bc789..c2a9ef8d115c 100644
--- a/Documentation/driver-api/pm/index.rst
+++ b/Documentation/driver-api/pm/index.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
===============================
CPU and Device Power Management
===============================
diff --git a/Documentation/driver-api/pm/notifiers.rst b/Documentation/driver-api/pm/notifiers.rst
index 62f860026992..186435c43b77 100644
--- a/Documentation/driver-api/pm/notifiers.rst
+++ b/Documentation/driver-api/pm/notifiers.rst
@@ -1,10 +1,14 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
=============================
Suspend/Hibernation Notifiers
=============================
-::
+:Copyright: |copy| 2016 Intel Corporation
+
+:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
- Copyright (c) 2016 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
There are some operations that subsystems or drivers may want to carry out
before hibernation/suspend or after restore/resume, but they require the system
diff --git a/Documentation/driver-api/pm/types.rst b/Documentation/driver-api/pm/types.rst
index 3ebdecc54104..73a231caf764 100644
--- a/Documentation/driver-api/pm/types.rst
+++ b/Documentation/driver-api/pm/types.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
==================================
Device Power Management Data Types
==================================
diff --git a/Documentation/driver-api/usb/power-management.rst b/Documentation/driver-api/usb/power-management.rst
index 79beb807996b..4a74cf6f2797 100644
--- a/Documentation/driver-api/usb/power-management.rst
+++ b/Documentation/driver-api/usb/power-management.rst
@@ -370,11 +370,15 @@ autosuspend the interface's device. When the usage counter is = 0
then the interface is considered to be idle, and the kernel may
autosuspend the device.
-Drivers need not be concerned about balancing changes to the usage
-counter; the USB core will undo any remaining "get"s when a driver
-is unbound from its interface. As a corollary, drivers must not call
-any of the ``usb_autopm_*`` functions after their ``disconnect``
-routine has returned.
+Drivers must be careful to balance their overall changes to the usage
+counter. Unbalanced "get"s will remain in effect when a driver is
+unbound from its interface, preventing the device from going into
+runtime suspend should the interface be bound to a driver again. On
+the other hand, drivers are allowed to achieve this balance by calling
+the ``usb_autopm_*`` functions even after their ``disconnect`` routine
+has returned -- say from within a work-queue routine -- provided they
+retain an active reference to the interface (via ``usb_get_intf`` and
+``usb_put_intf``).
Drivers using the async routines are responsible for their own
synchronization and mutual exclusion.
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index b277cafce71e..d7d6f01e81ff 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -242,9 +242,11 @@ certainly invest a bit more effort into libata core layer).
CLOCK
devm_clk_get()
+ devm_clk_get_optional()
devm_clk_put()
devm_clk_hw_register()
devm_of_clk_add_hw_provider()
+ devm_clk_hw_register_clkdev()
DMA
dmaenginem_async_device_register()
diff --git a/Documentation/fault-injection/fault-injection.txt b/Documentation/fault-injection/fault-injection.txt
index 4d1b7b4ccfaf..a17517a083c3 100644
--- a/Documentation/fault-injection/fault-injection.txt
+++ b/Documentation/fault-injection/fault-injection.txt
@@ -195,7 +195,7 @@ o #include <linux/fault-inject.h>
o define the fault attributes
- DECLARE_FAULT_INJECTION(name);
+ DECLARE_FAULT_ATTR(name);
Please see the definition of struct fault_attr in fault-inject.h
for details.
diff --git a/Documentation/features/time/modern-timekeeping/arch-support.txt b/Documentation/features/time/modern-timekeeping/arch-support.txt
index 2855dfe2464d..1d46da165b75 100644
--- a/Documentation/features/time/modern-timekeeping/arch-support.txt
+++ b/Documentation/features/time/modern-timekeeping/arch-support.txt
@@ -15,7 +15,7 @@
| h8300: | ok |
| hexagon: | ok |
| ia64: | ok |
- | m68k: | TODO |
+ | m68k: | ok |
| microblaze: | ok |
| mips: | ok |
| nds32: | ok |
diff --git a/Documentation/filesystems/api-summary.rst b/Documentation/filesystems/api-summary.rst
new file mode 100644
index 000000000000..aa51ffcfa029
--- /dev/null
+++ b/Documentation/filesystems/api-summary.rst
@@ -0,0 +1,150 @@
+=============================
+Linux Filesystems API summary
+=============================
+
+This section contains API-level documentation, mostly taken from the source
+code itself.
+
+The Linux VFS
+=============
+
+The Filesystem types
+--------------------
+
+.. kernel-doc:: include/linux/fs.h
+ :internal:
+
+The Directory Cache
+-------------------
+
+.. kernel-doc:: fs/dcache.c
+ :export:
+
+.. kernel-doc:: include/linux/dcache.h
+ :internal:
+
+Inode Handling
+--------------
+
+.. kernel-doc:: fs/inode.c
+ :export:
+
+.. kernel-doc:: fs/bad_inode.c
+ :export:
+
+Registration and Superblocks
+----------------------------
+
+.. kernel-doc:: fs/super.c
+ :export:
+
+File Locks
+----------
+
+.. kernel-doc:: fs/locks.c
+ :export:
+
+.. kernel-doc:: fs/locks.c
+ :internal:
+
+Other Functions
+---------------
+
+.. kernel-doc:: fs/mpage.c
+ :export:
+
+.. kernel-doc:: fs/namei.c
+ :export:
+
+.. kernel-doc:: fs/buffer.c
+ :export:
+
+.. kernel-doc:: block/bio.c
+ :export:
+
+.. kernel-doc:: fs/seq_file.c
+ :export:
+
+.. kernel-doc:: fs/filesystems.c
+ :export:
+
+.. kernel-doc:: fs/fs-writeback.c
+ :export:
+
+.. kernel-doc:: fs/block_dev.c
+ :export:
+
+.. kernel-doc:: fs/anon_inodes.c
+ :export:
+
+.. kernel-doc:: fs/attr.c
+ :export:
+
+.. kernel-doc:: fs/d_path.c
+ :export:
+
+.. kernel-doc:: fs/dax.c
+ :export:
+
+.. kernel-doc:: fs/direct-io.c
+ :export:
+
+.. kernel-doc:: fs/file_table.c
+ :export:
+
+.. kernel-doc:: fs/libfs.c
+ :export:
+
+.. kernel-doc:: fs/posix_acl.c
+ :export:
+
+.. kernel-doc:: fs/stat.c
+ :export:
+
+.. kernel-doc:: fs/sync.c
+ :export:
+
+.. kernel-doc:: fs/xattr.c
+ :export:
+
+The proc filesystem
+===================
+
+sysctl interface
+----------------
+
+.. kernel-doc:: kernel/sysctl.c
+ :export:
+
+proc filesystem interface
+-------------------------
+
+.. kernel-doc:: fs/proc/base.c
+ :internal:
+
+Events based on file descriptors
+================================
+
+.. kernel-doc:: fs/eventfd.c
+ :export:
+
+The Filesystem for Exporting Kernel Objects
+===========================================
+
+.. kernel-doc:: fs/sysfs/file.c
+ :export:
+
+.. kernel-doc:: fs/sysfs/symlink.c
+ :export:
+
+The debugfs filesystem
+======================
+
+debugfs interface
+-----------------
+
+.. kernel-doc:: fs/debugfs/inode.c
+ :export:
+
+.. kernel-doc:: fs/debugfs/file.c
+ :export:
diff --git a/Documentation/filesystems/binderfs.rst b/Documentation/filesystems/binderfs.rst
new file mode 100644
index 000000000000..c009671f8434
--- /dev/null
+++ b/Documentation/filesystems/binderfs.rst
@@ -0,0 +1,68 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+The Android binderfs Filesystem
+===============================
+
+Android binderfs is a filesystem for the Android binder IPC mechanism. It
+allows to dynamically add and remove binder devices at runtime. Binder devices
+located in a new binderfs instance are independent of binder devices located in
+other binderfs instances. Mounting a new binderfs instance makes it possible
+to get a set of private binder devices.
+
+Mounting binderfs
+-----------------
+
+Android binderfs can be mounted with::
+
+ mkdir /dev/binderfs
+ mount -t binder binder /dev/binderfs
+
+at which point a new instance of binderfs will show up at ``/dev/binderfs``.
+In a fresh instance of binderfs no binder devices will be present. There will
+only be a ``binder-control`` device which serves as the request handler for
+binderfs. Mounting another binderfs instance at a different location will
+create a new and separate instance from all other binderfs mounts. This is
+identical to the behavior of e.g. ``devpts`` and ``tmpfs``. The Android
+binderfs filesystem can be mounted in user namespaces.
+
+Options
+-------
+max
+ binderfs instances can be mounted with a limit on the number of binder
+ devices that can be allocated. The ``max=<count>`` mount option serves as
+ a per-instance limit. If ``max=<count>`` is set then only ``<count>`` number
+ of binder devices can be allocated in this binderfs instance.
+
+Allocating binder Devices
+-------------------------
+
+.. _ioctl: http://man7.org/linux/man-pages/man2/ioctl.2.html
+
+To allocate a new binder device in a binderfs instance a request needs to be
+sent through the ``binder-control`` device node. A request is sent in the form
+of an `ioctl() <ioctl_>`_.
+
+What a program needs to do is to open the ``binder-control`` device node and
+send a ``BINDER_CTL_ADD`` request to the kernel. Users of binderfs need to
+tell the kernel which name the new binder device should get. By default a name
+can only contain up to ``BINDERFS_MAX_NAME`` chars including the terminating
+zero byte.
+
+Once the request is made via an `ioctl() <ioctl_>`_ passing a ``struct
+binder_device`` with the name to the kernel it will allocate a new binder
+device and return the major and minor number of the new device in the struct
+(This is necessary because binderfs allocates a major device number
+dynamically.). After the `ioctl() <ioctl_>`_ returns there will be a new
+binder device located under /dev/binderfs with the chosen name.
+
+Deleting binder Devices
+-----------------------
+
+.. _unlink: http://man7.org/linux/man-pages/man2/unlink.2.html
+.. _rm: http://man7.org/linux/man-pages/man1/rm.1.html
+
+Binderfs binder devices can be deleted via `unlink() <unlink_>`_. This means
+that the `rm() <rm_>`_ tool can be used to delete them. Note that the
+``binder-control`` device cannot be deleted since this would make the binderfs
+instance unuseable. The ``binder-control`` device will be deleted when the
+binderfs instance is unmounted and all references to it have been dropped.
diff --git a/Documentation/filesystems/ceph.txt b/Documentation/filesystems/ceph.txt
index 1177052701e1..d2c6a5ccf0f5 100644
--- a/Documentation/filesystems/ceph.txt
+++ b/Documentation/filesystems/ceph.txt
@@ -22,9 +22,7 @@ In contrast to cluster filesystems like GFS, OCFS2, and GPFS that rely
on symmetric access by all clients to shared block devices, Ceph
separates data and metadata management into independent server
clusters, similar to Lustre. Unlike Lustre, however, metadata and
-storage nodes run entirely as user space daemons. Storage nodes
-utilize btrfs to store data objects, leveraging its advanced features
-(checksumming, metadata replication, etc.). File data is striped
+storage nodes run entirely as user space daemons. File data is striped
across storage nodes in large chunks to distribute workload and
facilitate high throughputs. When storage nodes fail, data is
re-replicated in a distributed fashion by the storage nodes themselves
@@ -118,6 +116,10 @@ Mount Options
of a non-responsive Ceph file system. The default is 30
seconds.
+ caps_max=X
+ Specify the maximum number of caps to hold. Unused caps are released
+ when number of caps exceeds the limit. The default is 0 (no limit)
+
rbytes
When stat() is called on a directory, set st_size to 'rbytes',
the summation of file sizes over all files nested beneath that
@@ -160,11 +162,11 @@ More Information
================
For more information on Ceph, see the home page at
- http://ceph.newdream.net/
+ https://ceph.com/
The Linux kernel client source tree is available at
- git://ceph.newdream.net/git/ceph-client.git
+ https://github.com/ceph/ceph-client.git
git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
and the source for the full system is at
- git://ceph.newdream.net/git/ceph.git
+ https://github.com/ceph/ceph.git
diff --git a/Documentation/filesystems/cifs/TODO b/Documentation/filesystems/cifs/TODO
index 66b3f54aa6dc..9267f3fb131f 100644
--- a/Documentation/filesystems/cifs/TODO
+++ b/Documentation/filesystems/cifs/TODO
@@ -111,7 +111,8 @@ negotiated size) and send larger write sizes to modern servers.
5) Continue to extend the smb3 "buildbot" which does automated xfstesting
against Windows, Samba and Azure currently - to add additional tests and
-to allow the buildbot to execute the tests faster.
+to allow the buildbot to execute the tests faster. The URL for the
+buildbot is: http://smb3-test-rhel-75.southcentralus.cloudapp.azure.com
6) Address various coverity warnings (most are not bugs per-se, but
the more warnings are addressed, the easier it is to spot real
diff --git a/Documentation/filesystems/cifs/cifs.txt b/Documentation/filesystems/cifs/cifs.txt
index 67756607246e..1be3d21c286e 100644
--- a/Documentation/filesystems/cifs/cifs.txt
+++ b/Documentation/filesystems/cifs/cifs.txt
@@ -1,16 +1,21 @@
This is the client VFS module for the SMB3 NAS protocol as well
- older dialects such as the Common Internet File System (CIFS)
+ as for older dialects such as the Common Internet File System (CIFS)
protocol which was the successor to the Server Message Block
(SMB) protocol, the native file sharing mechanism for most early
PC operating systems. New and improved versions of CIFS are now
- called SMB2 and SMB3. These dialects are also supported by the
- CIFS VFS module. CIFS is fully supported by network
- file servers such as Windows 2000, 2003, 2008, 2012 and 2016
- as well by Samba (which provides excellent CIFS
- server support for Linux and many other operating systems), Apple
- systems, as well as most Network Attached Storage vendors, so
- this network filesystem client can mount to a wide variety of
- servers.
+ called SMB2 and SMB3. Use of SMB3 (and later, including SMB3.1.1)
+ is strongly preferred over using older dialects like CIFS due to
+ security reaasons. All modern dialects, including the most recent,
+ SMB3.1.1 are supported by the CIFS VFS module. The SMB3 protocol
+ is implemented and supported by all major file servers
+ such as all modern versions of Windows (including Windows 2016
+ Server), as well as by Samba (which provides excellent
+ CIFS/SMB2/SMB3 server support and tools for Linux and many other
+ operating systems). Apple systems also support SMB3 well, as
+ do most Network Attached Storage vendors, so this network
+ filesystem client can mount to a wide variety of systems.
+ It also supports mounting to the cloud (for example
+ Microsoft Azure), including the necessary security features.
The intent of this module is to provide the most advanced network
file system function for SMB3 compliant servers, including advanced
@@ -24,12 +29,17 @@
cluster file systems for fileserving in some Linux to Linux environments,
not just in Linux to Windows (or Linux to Mac) environments.
- This filesystem has an mount utility (mount.cifs) that can be obtained from
+ This filesystem has a mount utility (mount.cifs) and various user space
+ tools (including smbinfo and setcifsacl) that can be obtained from
- https://ftp.samba.org/pub/linux-cifs/cifs-utils/
+ https://git.samba.org/?p=cifs-utils.git
+ or
+ git://git.samba.org/cifs-utils.git
- It must be installed in the directory with the other mount helpers.
+ mount.cifs should be installed in the directory with the other mount helpers.
For more information on the module see the project wiki page at
+ https://wiki.samba.org/index.php/LinuxCIFS
+ and
https://wiki.samba.org/index.php/LinuxCIFS_utils
diff --git a/Documentation/filesystems/exofs.txt b/Documentation/filesystems/exofs.txt
deleted file mode 100644
index 23583a136975..000000000000
--- a/Documentation/filesystems/exofs.txt
+++ /dev/null
@@ -1,185 +0,0 @@
-===============================================================================
-WHAT IS EXOFS?
-===============================================================================
-
-exofs is a file system that uses an OSD and exports the API of a normal Linux
-file system. Users access exofs like any other local file system, and exofs
-will in turn issue commands to the local OSD initiator.
-
-OSD is a new T10 command set that views storage devices not as a large/flat
-array of sectors but as a container of objects, each having a length, quota,
-time attributes and more. Each object is addressed by a 64bit ID, and is
-contained in a 64bit ID partition. Each object has associated attributes
-attached to it, which are integral part of the object and provide metadata about
-the object. The standard defines some common obligatory attributes, but user
-attributes can be added as needed.
-
-===============================================================================
-ENVIRONMENT
-===============================================================================
-
-To use this file system, you need to have an object store to run it on. You
-may download a target from:
-http://open-osd.org
-
-See Documentation/scsi/osd.txt for how to setup a working osd environment.
-
-===============================================================================
-USAGE
-===============================================================================
-
-1. Download and compile exofs and open-osd initiator:
- You need an external Kernel source tree or kernel headers from your
- distribution. (anything based on 2.6.26 or later).
-
- a. download open-osd including exofs source using:
- [parent-directory]$ git clone git://git.open-osd.org/open-osd.git
-
- b. Build the library module like this:
- [parent-directory]$ make -C KSRC=$(KER_DIR) open-osd
-
- This will build both the open-osd initiator as well as the exofs kernel
- module. Use whatever parameters you compiled your Kernel with and
- $(KER_DIR) above pointing to the Kernel you compile against. See the file
- open-osd/top-level-Makefile for an example.
-
-2. Get the OSD initiator and target set up properly, and login to the target.
- See Documentation/scsi/osd.txt for farther instructions. Also see ./do-osd
- for example script that does all these steps.
-
-3. Insmod the exofs.ko module:
- [exofs]$ insmod exofs.ko
-
-4. Make sure the directory where you want to mount exists. If not, create it.
- (For example, mkdir /mnt/exofs)
-
-5. At first run you will need to invoke the mkfs.exofs application
-
- As an example, this will create the file system on:
- /dev/osd0 partition ID 65536
-
- mkfs.exofs --pid=65536 --format /dev/osd0
-
- The --format is optional. If not specified, no OSD_FORMAT will be
- performed and a clean file system will be created in the specified pid,
- in the available space of the target. (Use --format=size_in_meg to limit
- the total LUN space available)
-
- If pid already exists, it will be deleted and a new one will be created in
- its place. Be careful.
-
- An exofs lives inside a single OSD partition. You can create multiple exofs
- filesystems on the same device using multiple pids.
-
- (run mkfs.exofs without any parameters for usage help message)
-
-6. Mount the file system.
-
- For example, to mount /dev/osd0, partition ID 0x10000 on /mnt/exofs:
-
- mount -t exofs -o pid=65536 /dev/osd0 /mnt/exofs/
-
-7. For reference (See do-exofs example script):
- do-exofs start - an example of how to perform the above steps.
- do-exofs stop - an example of how to unmount the file system.
- do-exofs format - an example of how to format and mkfs a new exofs.
-
-8. Extra compilation flags (uncomment in fs/exofs/Kbuild):
- CONFIG_EXOFS_DEBUG - for debug messages and extra checks.
-
-===============================================================================
-exofs mount options
-===============================================================================
-Similar to any mount command:
- mount -t exofs -o exofs_options /dev/osdX mount_exofs_directory
-
-Where:
- -t exofs: specifies the exofs file system
-
- /dev/osdX: X is a decimal number. /dev/osdX was created after a successful
- login into an OSD target.
-
- mount_exofs_directory: The directory to mount the file system on
-
- exofs specific options: Options are separated by commas (,)
- pid=<integer> - The partition number to mount/create as
- container of the filesystem.
- This option is mandatory. integer can be
- Hex by pre-pending an 0x to the number.
- osdname=<id> - Mount by a device's osdname.
- osdname is usually a 36 character uuid of the
- form "d2683732-c906-4ee1-9dbd-c10c27bb40df".
- It is one of the device's uuid specified in the
- mkfs.exofs format command.
- If this option is specified then the /dev/osdX
- above can be empty and is ignored.
- to=<integer> - Timeout in ticks for a single command.
- default is (60 * HZ) [for debugging only]
-
-===============================================================================
-DESIGN
-===============================================================================
-
-* The file system control block (AKA on-disk superblock) resides in an object
- with a special ID (defined in common.h).
- Information included in the file system control block is used to fill the
- in-memory superblock structure at mount time. This object is created before
- the file system is used by mkexofs.c. It contains information such as:
- - The file system's magic number
- - The next inode number to be allocated
-
-* Each file resides in its own object and contains the data (and it will be
- possible to extend the file over multiple objects, though this has not been
- implemented yet).
-
-* A directory is treated as a file, and essentially contains a list of <file
- name, inode #> pairs for files that are found in that directory. The object
- IDs correspond to the files' inode numbers and will be allocated according to
- a bitmap (stored in a separate object). Now they are allocated using a
- counter.
-
-* Each file's control block (AKA on-disk inode) is stored in its object's
- attributes. This applies to both regular files and other types (directories,
- device files, symlinks, etc.).
-
-* Credentials are generated per object (inode and superblock) when they are
- created in memory (read from disk or created). The credential works for all
- operations and is used as long as the object remains in memory.
-
-* Async OSD operations are used whenever possible, but the target may execute
- them out of order. The operations that concern us are create, delete,
- readpage, writepage, update_inode, and truncate. The following pairs of
- operations should execute in the order written, and we need to prevent them
- from executing in reverse order:
- - The following are handled with the OBJ_CREATED and OBJ_2BCREATED
- flags. OBJ_CREATED is set when we know the object exists on the OSD -
- in create's callback function, and when we successfully do a
- read_inode.
- OBJ_2BCREATED is set in the beginning of the create function, so we
- know that we should wait.
- - create/delete: delete should wait until the object is created
- on the OSD.
- - create/readpage: readpage should be able to return a page
- full of zeroes in this case. If there was a write already
- en-route (i.e. create, writepage, readpage) then the page
- would be locked, and so it would really be the same as
- create/writepage.
- - create/writepage: if writepage is called for a sync write, it
- should wait until the object is created on the OSD.
- Otherwise, it should just return.
- - create/truncate: truncate should wait until the object is
- created on the OSD.
- - create/update_inode: update_inode should wait until the
- object is created on the OSD.
- - Handled by VFS locks:
- - readpage/delete: shouldn't happen because of page lock.
- - writepage/delete: shouldn't happen because of page lock.
- - readpage/writepage: shouldn't happen because of page lock.
-
-===============================================================================
-LICENSE/COPYRIGHT
-===============================================================================
-The exofs file system is based on ext2 v0.5b (distributed with the Linux kernel
-version 2.6.10). All files include the original copyrights, and the license
-is GPL version 2 (only version 2, as is true for the Linux kernel). The
-Linux kernel can be downloaded from www.kernel.org.
diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt
index e46c2147ddf8..f7b5e4ff0de3 100644
--- a/Documentation/filesystems/f2fs.txt
+++ b/Documentation/filesystems/f2fs.txt
@@ -126,6 +126,8 @@ disable_ext_identify Disable the extension list configured by mkfs, so f2fs
does not aware of cold files such as media files.
inline_xattr Enable the inline xattrs feature.
noinline_xattr Disable the inline xattrs feature.
+inline_xattr_size=%u Support configuring inline xattr size, it depends on
+ flexible inline xattr feature.
inline_data Enable the inline data feature: New created small(<~3.4k)
files can be written into inode block.
inline_dentry Enable the inline dir feature: data in new created
diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst
index 3a7b60521b94..08c23b60e016 100644
--- a/Documentation/filesystems/fscrypt.rst
+++ b/Documentation/filesystems/fscrypt.rst
@@ -343,9 +343,9 @@ FS_IOC_SET_ENCRYPTION_POLICY can fail with the following errors:
- ``ENOTEMPTY``: the file is unencrypted and is a nonempty directory
- ``ENOTTY``: this type of filesystem does not implement encryption
- ``EOPNOTSUPP``: the kernel was not configured with encryption
- support for this filesystem, or the filesystem superblock has not
+ support for filesystems, or the filesystem superblock has not
had encryption enabled on it. (For example, to use encryption on an
- ext4 filesystem, CONFIG_EXT4_ENCRYPTION must be enabled in the
+ ext4 filesystem, CONFIG_FS_ENCRYPTION must be enabled in the
kernel config, and the superblock must have had the "encrypt"
feature flag enabled using ``tune2fs -O encrypt`` or ``mkfs.ext4 -O
encrypt``.)
@@ -451,10 +451,18 @@ astute users may notice some differences in behavior:
- Unencrypted files, or files encrypted with a different encryption
policy (i.e. different key, modes, or flags), cannot be renamed or
linked into an encrypted directory; see `Encryption policy
- enforcement`_. Attempts to do so will fail with EPERM. However,
+ enforcement`_. Attempts to do so will fail with EXDEV. However,
encrypted files can be renamed within an encrypted directory, or
into an unencrypted directory.
+ Note: "moving" an unencrypted file into an encrypted directory, e.g.
+ with the `mv` program, is implemented in userspace by a copy
+ followed by a delete. Be aware that the original unencrypted data
+ may remain recoverable from free space on the disk; prefer to keep
+ all files encrypted from the very beginning. The `shred` program
+ may be used to overwrite the source files but isn't guaranteed to be
+ effective on all filesystems and storage devices.
+
- Direct I/O is not supported on encrypted files. Attempts to use
direct I/O on such files will fall back to buffered I/O.
@@ -541,7 +549,7 @@ not be encrypted.
Except for those special files, it is forbidden to have unencrypted
files, or files encrypted with a different encryption policy, in an
encrypted directory tree. Attempts to link or rename such a file into
-an encrypted directory will fail with EPERM. This is also enforced
+an encrypted directory will fail with EXDEV. This is also enforced
during ->lookup() to provide limited protection against offline
attacks that try to disable or downgrade encryption in known locations
where applications may later write sensitive data. It is recommended
diff --git a/Documentation/filesystems/index.rst b/Documentation/filesystems/index.rst
index 605befab300b..1131c34d77f6 100644
--- a/Documentation/filesystems/index.rst
+++ b/Documentation/filesystems/index.rst
@@ -1,382 +1,43 @@
-=====================
-Linux Filesystems API
-=====================
+===============================
+Filesystems in the Linux kernel
+===============================
-The Linux VFS
-=============
+This under-development manual will, some glorious day, provide
+comprehensive information on how the Linux virtual filesystem (VFS) layer
+works, along with the filesystems that sit below it. For now, what we have
+can be found below.
-The Filesystem types
---------------------
-
-.. kernel-doc:: include/linux/fs.h
- :internal:
-
-The Directory Cache
--------------------
-
-.. kernel-doc:: fs/dcache.c
- :export:
-
-.. kernel-doc:: include/linux/dcache.h
- :internal:
-
-Inode Handling
---------------
-
-.. kernel-doc:: fs/inode.c
- :export:
-
-.. kernel-doc:: fs/bad_inode.c
- :export:
-
-Registration and Superblocks
-----------------------------
-
-.. kernel-doc:: fs/super.c
- :export:
-
-File Locks
-----------
-
-.. kernel-doc:: fs/locks.c
- :export:
-
-.. kernel-doc:: fs/locks.c
- :internal:
-
-Other Functions
----------------
-
-.. kernel-doc:: fs/mpage.c
- :export:
-
-.. kernel-doc:: fs/namei.c
- :export:
-
-.. kernel-doc:: fs/buffer.c
- :export:
-
-.. kernel-doc:: block/bio.c
- :export:
-
-.. kernel-doc:: fs/seq_file.c
- :export:
-
-.. kernel-doc:: fs/filesystems.c
- :export:
-
-.. kernel-doc:: fs/fs-writeback.c
- :export:
-
-.. kernel-doc:: fs/block_dev.c
- :export:
-
-.. kernel-doc:: fs/anon_inodes.c
- :export:
-
-.. kernel-doc:: fs/attr.c
- :export:
-
-.. kernel-doc:: fs/d_path.c
- :export:
-
-.. kernel-doc:: fs/dax.c
- :export:
-
-.. kernel-doc:: fs/direct-io.c
- :export:
-
-.. kernel-doc:: fs/file_table.c
- :export:
-
-.. kernel-doc:: fs/libfs.c
- :export:
-
-.. kernel-doc:: fs/posix_acl.c
- :export:
-
-.. kernel-doc:: fs/stat.c
- :export:
-
-.. kernel-doc:: fs/sync.c
- :export:
-
-.. kernel-doc:: fs/xattr.c
- :export:
-
-The proc filesystem
-===================
-
-sysctl interface
-----------------
-
-.. kernel-doc:: kernel/sysctl.c
- :export:
-
-proc filesystem interface
--------------------------
-
-.. kernel-doc:: fs/proc/base.c
- :internal:
-
-Events based on file descriptors
-================================
-
-.. kernel-doc:: fs/eventfd.c
- :export:
-
-The Filesystem for Exporting Kernel Objects
-===========================================
-
-.. kernel-doc:: fs/sysfs/file.c
- :export:
-
-.. kernel-doc:: fs/sysfs/symlink.c
- :export:
-
-The debugfs filesystem
+Core VFS documentation
======================
-debugfs interface
------------------
+See these manuals for documentation about the VFS layer itself and how its
+algorithms work.
-.. kernel-doc:: fs/debugfs/inode.c
- :export:
+.. toctree::
+ :maxdepth: 2
-.. kernel-doc:: fs/debugfs/file.c
- :export:
+ path-lookup.rst
+ api-summary
+ splice
-The Linux Journalling API
+Filesystem support layers
=========================
-Overview
---------
-
-Details
-~~~~~~~
-
-The journalling layer is easy to use. You need to first of all create a
-journal_t data structure. There are two calls to do this dependent on
-how you decide to allocate the physical media on which the journal
-resides. The :c:func:`jbd2_journal_init_inode` call is for journals stored in
-filesystem inodes, or the :c:func:`jbd2_journal_init_dev` call can be used
-for journal stored on a raw device (in a continuous range of blocks). A
-journal_t is a typedef for a struct pointer, so when you are finally
-finished make sure you call :c:func:`jbd2_journal_destroy` on it to free up
-any used kernel memory.
-
-Once you have got your journal_t object you need to 'mount' or load the
-journal file. The journalling layer expects the space for the journal
-was already allocated and initialized properly by the userspace tools.
-When loading the journal you must call :c:func:`jbd2_journal_load` to process
-journal contents. If the client file system detects the journal contents
-does not need to be processed (or even need not have valid contents), it
-may call :c:func:`jbd2_journal_wipe` to clear the journal contents before
-calling :c:func:`jbd2_journal_load`.
-
-Note that jbd2_journal_wipe(..,0) calls
-:c:func:`jbd2_journal_skip_recovery` for you if it detects any outstanding
-transactions in the journal and similarly :c:func:`jbd2_journal_load` will
-call :c:func:`jbd2_journal_recover` if necessary. I would advise reading
-:c:func:`ext4_load_journal` in fs/ext4/super.c for examples on this stage.
-
-Now you can go ahead and start modifying the underlying filesystem.
-Almost.
-
-You still need to actually journal your filesystem changes, this is done
-by wrapping them into transactions. Additionally you also need to wrap
-the modification of each of the buffers with calls to the journal layer,
-so it knows what the modifications you are actually making are. To do
-this use :c:func:`jbd2_journal_start` which returns a transaction handle.
-
-:c:func:`jbd2_journal_start` and its counterpart :c:func:`jbd2_journal_stop`,
-which indicates the end of a transaction are nestable calls, so you can
-reenter a transaction if necessary, but remember you must call
-:c:func:`jbd2_journal_stop` the same number of times as
-:c:func:`jbd2_journal_start` before the transaction is completed (or more
-accurately leaves the update phase). Ext4/VFS makes use of this feature to
-simplify handling of inode dirtying, quota support, etc.
-
-Inside each transaction you need to wrap the modifications to the
-individual buffers (blocks). Before you start to modify a buffer you
-need to call :c:func:`jbd2_journal_get_create_access()` /
-:c:func:`jbd2_journal_get_write_access()` /
-:c:func:`jbd2_journal_get_undo_access()` as appropriate, this allows the
-journalling layer to copy the unmodified
-data if it needs to. After all the buffer may be part of a previously
-uncommitted transaction. At this point you are at last ready to modify a
-buffer, and once you are have done so you need to call
-:c:func:`jbd2_journal_dirty_metadata`. Or if you've asked for access to a
-buffer you now know is now longer required to be pushed back on the
-device you can call :c:func:`jbd2_journal_forget` in much the same way as you
-might have used :c:func:`bforget` in the past.
-
-A :c:func:`jbd2_journal_flush` may be called at any time to commit and
-checkpoint all your transactions.
-
-Then at umount time , in your :c:func:`put_super` you can then call
-:c:func:`jbd2_journal_destroy` to clean up your in-core journal object.
-
-Unfortunately there a couple of ways the journal layer can cause a
-deadlock. The first thing to note is that each task can only have a
-single outstanding transaction at any one time, remember nothing commits
-until the outermost :c:func:`jbd2_journal_stop`. This means you must complete
-the transaction at the end of each file/inode/address etc. operation you
-perform, so that the journalling system isn't re-entered on another
-journal. Since transactions can't be nested/batched across differing
-journals, and another filesystem other than yours (say ext4) may be
-modified in a later syscall.
-
-The second case to bear in mind is that :c:func:`jbd2_journal_start` can block
-if there isn't enough space in the journal for your transaction (based
-on the passed nblocks param) - when it blocks it merely(!) needs to wait
-for transactions to complete and be committed from other tasks, so
-essentially we are waiting for :c:func:`jbd2_journal_stop`. So to avoid
-deadlocks you must treat :c:func:`jbd2_journal_start` /
-:c:func:`jbd2_journal_stop` as if they were semaphores and include them in
-your semaphore ordering rules to prevent
-deadlocks. Note that :c:func:`jbd2_journal_extend` has similar blocking
-behaviour to :c:func:`jbd2_journal_start` so you can deadlock here just as
-easily as on :c:func:`jbd2_journal_start`.
-
-Try to reserve the right number of blocks the first time. ;-). This will
-be the maximum number of blocks you are going to touch in this
-transaction. I advise having a look at at least ext4_jbd.h to see the
-basis on which ext4 uses to make these decisions.
-
-Another wriggle to watch out for is your on-disk block allocation
-strategy. Why? Because, if you do a delete, you need to ensure you
-haven't reused any of the freed blocks until the transaction freeing
-these blocks commits. If you reused these blocks and crash happens,
-there is no way to restore the contents of the reallocated blocks at the
-end of the last fully committed transaction. One simple way of doing
-this is to mark blocks as free in internal in-memory block allocation
-structures only after the transaction freeing them commits. Ext4 uses
-journal commit callback for this purpose.
-
-With journal commit callbacks you can ask the journalling layer to call
-a callback function when the transaction is finally committed to disk,
-so that you can do some of your own management. You ask the journalling
-layer for calling the callback by simply setting
-``journal->j_commit_callback`` function pointer and that function is
-called after each transaction commit. You can also use
-``transaction->t_private_list`` for attaching entries to a transaction
-that need processing when the transaction commits.
-
-JBD2 also provides a way to block all transaction updates via
-:c:func:`jbd2_journal_lock_updates()` /
-:c:func:`jbd2_journal_unlock_updates()`. Ext4 uses this when it wants a
-window with a clean and stable fs for a moment. E.g.
-
-::
-
-
- jbd2_journal_lock_updates() //stop new stuff happening..
- jbd2_journal_flush() // checkpoint everything.
- ..do stuff on stable fs
- jbd2_journal_unlock_updates() // carry on with filesystem use.
-
-The opportunities for abuse and DOS attacks with this should be obvious,
-if you allow unprivileged userspace to trigger codepaths containing
-these calls.
-
-Summary
-~~~~~~~
-
-Using the journal is a matter of wrapping the different context changes,
-being each mount, each modification (transaction) and each changed
-buffer to tell the journalling layer about them.
-
-Data Types
-----------
-
-The journalling layer uses typedefs to 'hide' the concrete definitions
-of the structures used. As a client of the JBD2 layer you can just rely
-on the using the pointer as a magic cookie of some sort. Obviously the
-hiding is not enforced as this is 'C'.
-
-Structures
-~~~~~~~~~~
-
-.. kernel-doc:: include/linux/jbd2.h
- :internal:
-
-Functions
----------
-
-The functions here are split into two groups those that affect a journal
-as a whole, and those which are used to manage transactions
-
-Journal Level
-~~~~~~~~~~~~~
-
-.. kernel-doc:: fs/jbd2/journal.c
- :export:
-
-.. kernel-doc:: fs/jbd2/recovery.c
- :internal:
-
-Transasction Level
-~~~~~~~~~~~~~~~~~~
-
-.. kernel-doc:: fs/jbd2/transaction.c
-
-See also
---------
-
-`Journaling the Linux ext2fs Filesystem, LinuxExpo 98, Stephen
-Tweedie <http://kernel.org/pub/linux/kernel/people/sct/ext3/journal-design.ps.gz>`__
-
-`Ext3 Journalling FileSystem, OLS 2000, Dr. Stephen
-Tweedie <http://olstrans.sourceforge.net/release/OLS2000-ext3/OLS2000-ext3.html>`__
-
-splice API
-==========
-
-splice is a method for moving blocks of data around inside the kernel,
-without continually transferring them between the kernel and user space.
-
-.. kernel-doc:: fs/splice.c
-
-pipes API
-=========
-
-Pipe interfaces are all for in-kernel (builtin image) use. They are not
-exported for use by modules.
-
-.. kernel-doc:: include/linux/pipe_fs_i.h
- :internal:
-
-.. kernel-doc:: fs/pipe.c
-
-Encryption API
-==============
-
-A library which filesystems can hook into to support transparent
-encryption of files and directories.
+Documentation for the support code within the filesystem layer for use in
+filesystem implementations.
.. toctree::
- :maxdepth: 2
-
- fscrypt
-
-Pathname lookup
-===============
-
-
-This write-up is based on three articles published at lwn.net:
+ :maxdepth: 2
-- <https://lwn.net/Articles/649115/> Pathname lookup in Linux
-- <https://lwn.net/Articles/649729/> RCU-walk: faster pathname lookup in Linux
-- <https://lwn.net/Articles/650786/> A walk among the symlinks
+ journalling
+ fscrypt
-Written by Neil Brown with help from Al Viro and Jon Corbet.
-It has subsequently been updated to reflect changes in the kernel
-including:
+Filesystem-specific documentation
+=================================
-- per-directory parallel name lookup.
+Documentation for individual filesystem types can be found here.
.. toctree::
:maxdepth: 2
- path-lookup.rst
+ binderfs.rst
diff --git a/Documentation/filesystems/journalling.rst b/Documentation/filesystems/journalling.rst
new file mode 100644
index 000000000000..58ce6b395206
--- /dev/null
+++ b/Documentation/filesystems/journalling.rst
@@ -0,0 +1,184 @@
+The Linux Journalling API
+=========================
+
+Overview
+--------
+
+Details
+~~~~~~~
+
+The journalling layer is easy to use. You need to first of all create a
+journal_t data structure. There are two calls to do this dependent on
+how you decide to allocate the physical media on which the journal
+resides. The :c:func:`jbd2_journal_init_inode` call is for journals stored in
+filesystem inodes, or the :c:func:`jbd2_journal_init_dev` call can be used
+for journal stored on a raw device (in a continuous range of blocks). A
+journal_t is a typedef for a struct pointer, so when you are finally
+finished make sure you call :c:func:`jbd2_journal_destroy` on it to free up
+any used kernel memory.
+
+Once you have got your journal_t object you need to 'mount' or load the
+journal file. The journalling layer expects the space for the journal
+was already allocated and initialized properly by the userspace tools.
+When loading the journal you must call :c:func:`jbd2_journal_load` to process
+journal contents. If the client file system detects the journal contents
+does not need to be processed (or even need not have valid contents), it
+may call :c:func:`jbd2_journal_wipe` to clear the journal contents before
+calling :c:func:`jbd2_journal_load`.
+
+Note that jbd2_journal_wipe(..,0) calls
+:c:func:`jbd2_journal_skip_recovery` for you if it detects any outstanding
+transactions in the journal and similarly :c:func:`jbd2_journal_load` will
+call :c:func:`jbd2_journal_recover` if necessary. I would advise reading
+:c:func:`ext4_load_journal` in fs/ext4/super.c for examples on this stage.
+
+Now you can go ahead and start modifying the underlying filesystem.
+Almost.
+
+You still need to actually journal your filesystem changes, this is done
+by wrapping them into transactions. Additionally you also need to wrap
+the modification of each of the buffers with calls to the journal layer,
+so it knows what the modifications you are actually making are. To do
+this use :c:func:`jbd2_journal_start` which returns a transaction handle.
+
+:c:func:`jbd2_journal_start` and its counterpart :c:func:`jbd2_journal_stop`,
+which indicates the end of a transaction are nestable calls, so you can
+reenter a transaction if necessary, but remember you must call
+:c:func:`jbd2_journal_stop` the same number of times as
+:c:func:`jbd2_journal_start` before the transaction is completed (or more
+accurately leaves the update phase). Ext4/VFS makes use of this feature to
+simplify handling of inode dirtying, quota support, etc.
+
+Inside each transaction you need to wrap the modifications to the
+individual buffers (blocks). Before you start to modify a buffer you
+need to call :c:func:`jbd2_journal_get_create_access()` /
+:c:func:`jbd2_journal_get_write_access()` /
+:c:func:`jbd2_journal_get_undo_access()` as appropriate, this allows the
+journalling layer to copy the unmodified
+data if it needs to. After all the buffer may be part of a previously
+uncommitted transaction. At this point you are at last ready to modify a
+buffer, and once you are have done so you need to call
+:c:func:`jbd2_journal_dirty_metadata`. Or if you've asked for access to a
+buffer you now know is now longer required to be pushed back on the
+device you can call :c:func:`jbd2_journal_forget` in much the same way as you
+might have used :c:func:`bforget` in the past.
+
+A :c:func:`jbd2_journal_flush` may be called at any time to commit and
+checkpoint all your transactions.
+
+Then at umount time , in your :c:func:`put_super` you can then call
+:c:func:`jbd2_journal_destroy` to clean up your in-core journal object.
+
+Unfortunately there a couple of ways the journal layer can cause a
+deadlock. The first thing to note is that each task can only have a
+single outstanding transaction at any one time, remember nothing commits
+until the outermost :c:func:`jbd2_journal_stop`. This means you must complete
+the transaction at the end of each file/inode/address etc. operation you
+perform, so that the journalling system isn't re-entered on another
+journal. Since transactions can't be nested/batched across differing
+journals, and another filesystem other than yours (say ext4) may be
+modified in a later syscall.
+
+The second case to bear in mind is that :c:func:`jbd2_journal_start` can block
+if there isn't enough space in the journal for your transaction (based
+on the passed nblocks param) - when it blocks it merely(!) needs to wait
+for transactions to complete and be committed from other tasks, so
+essentially we are waiting for :c:func:`jbd2_journal_stop`. So to avoid
+deadlocks you must treat :c:func:`jbd2_journal_start` /
+:c:func:`jbd2_journal_stop` as if they were semaphores and include them in
+your semaphore ordering rules to prevent
+deadlocks. Note that :c:func:`jbd2_journal_extend` has similar blocking
+behaviour to :c:func:`jbd2_journal_start` so you can deadlock here just as
+easily as on :c:func:`jbd2_journal_start`.
+
+Try to reserve the right number of blocks the first time. ;-). This will
+be the maximum number of blocks you are going to touch in this
+transaction. I advise having a look at at least ext4_jbd.h to see the
+basis on which ext4 uses to make these decisions.
+
+Another wriggle to watch out for is your on-disk block allocation
+strategy. Why? Because, if you do a delete, you need to ensure you
+haven't reused any of the freed blocks until the transaction freeing
+these blocks commits. If you reused these blocks and crash happens,
+there is no way to restore the contents of the reallocated blocks at the
+end of the last fully committed transaction. One simple way of doing
+this is to mark blocks as free in internal in-memory block allocation
+structures only after the transaction freeing them commits. Ext4 uses
+journal commit callback for this purpose.
+
+With journal commit callbacks you can ask the journalling layer to call
+a callback function when the transaction is finally committed to disk,
+so that you can do some of your own management. You ask the journalling
+layer for calling the callback by simply setting
+``journal->j_commit_callback`` function pointer and that function is
+called after each transaction commit. You can also use
+``transaction->t_private_list`` for attaching entries to a transaction
+that need processing when the transaction commits.
+
+JBD2 also provides a way to block all transaction updates via
+:c:func:`jbd2_journal_lock_updates()` /
+:c:func:`jbd2_journal_unlock_updates()`. Ext4 uses this when it wants a
+window with a clean and stable fs for a moment. E.g.
+
+::
+
+
+ jbd2_journal_lock_updates() //stop new stuff happening..
+ jbd2_journal_flush() // checkpoint everything.
+ ..do stuff on stable fs
+ jbd2_journal_unlock_updates() // carry on with filesystem use.
+
+The opportunities for abuse and DOS attacks with this should be obvious,
+if you allow unprivileged userspace to trigger codepaths containing
+these calls.
+
+Summary
+~~~~~~~
+
+Using the journal is a matter of wrapping the different context changes,
+being each mount, each modification (transaction) and each changed
+buffer to tell the journalling layer about them.
+
+Data Types
+----------
+
+The journalling layer uses typedefs to 'hide' the concrete definitions
+of the structures used. As a client of the JBD2 layer you can just rely
+on the using the pointer as a magic cookie of some sort. Obviously the
+hiding is not enforced as this is 'C'.
+
+Structures
+~~~~~~~~~~
+
+.. kernel-doc:: include/linux/jbd2.h
+ :internal:
+
+Functions
+---------
+
+The functions here are split into two groups those that affect a journal
+as a whole, and those which are used to manage transactions
+
+Journal Level
+~~~~~~~~~~~~~
+
+.. kernel-doc:: fs/jbd2/journal.c
+ :export:
+
+.. kernel-doc:: fs/jbd2/recovery.c
+ :internal:
+
+Transasction Level
+~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: fs/jbd2/transaction.c
+
+See also
+--------
+
+`Journaling the Linux ext2fs Filesystem, LinuxExpo 98, Stephen
+Tweedie <http://kernel.org/pub/linux/kernel/people/sct/ext3/journal-design.ps.gz>`__
+
+`Ext3 Journalling FileSystem, OLS 2000, Dr. Stephen
+Tweedie <http://olstrans.sourceforge.net/release/OLS2000-ext3/OLS2000-ext3.html>`__
+
diff --git a/Documentation/filesystems/mount_api.txt b/Documentation/filesystems/mount_api.txt
new file mode 100644
index 000000000000..00ff0cfccfa7
--- /dev/null
+++ b/Documentation/filesystems/mount_api.txt
@@ -0,0 +1,732 @@
+ ====================
+ FILESYSTEM MOUNT API
+ ====================
+
+CONTENTS
+
+ (1) Overview.
+
+ (2) The filesystem context.
+
+ (3) The filesystem context operations.
+
+ (4) Filesystem context security.
+
+ (5) VFS filesystem context API.
+
+ (6) Superblock creation helpers.
+
+ (7) Parameter description.
+
+ (8) Parameter helper functions.
+
+
+========
+OVERVIEW
+========
+
+The creation of new mounts is now to be done in a multistep process:
+
+ (1) Create a filesystem context.
+
+ (2) Parse the parameters and attach them to the context. Parameters are
+ expected to be passed individually from userspace, though legacy binary
+ parameters can also be handled.
+
+ (3) Validate and pre-process the context.
+
+ (4) Get or create a superblock and mountable root.
+
+ (5) Perform the mount.
+
+ (6) Return an error message attached to the context.
+
+ (7) Destroy the context.
+
+To support this, the file_system_type struct gains two new fields:
+
+ int (*init_fs_context)(struct fs_context *fc);
+ const struct fs_parameter_description *parameters;
+
+The first is invoked to set up the filesystem-specific parts of a filesystem
+context, including the additional space, and the second points to the
+parameter description for validation at registration time and querying by a
+future system call.
+
+Note that security initialisation is done *after* the filesystem is called so
+that the namespaces may be adjusted first.
+
+
+======================
+THE FILESYSTEM CONTEXT
+======================
+
+The creation and reconfiguration of a superblock is governed by a filesystem
+context. This is represented by the fs_context structure:
+
+ struct fs_context {
+ const struct fs_context_operations *ops;
+ struct file_system_type *fs_type;
+ void *fs_private;
+ struct dentry *root;
+ struct user_namespace *user_ns;
+ struct net *net_ns;
+ const struct cred *cred;
+ char *source;
+ char *subtype;
+ void *security;
+ void *s_fs_info;
+ unsigned int sb_flags;
+ unsigned int sb_flags_mask;
+ unsigned int s_iflags;
+ unsigned int lsm_flags;
+ enum fs_context_purpose purpose:8;
+ ...
+ };
+
+The fs_context fields are as follows:
+
+ (*) const struct fs_context_operations *ops
+
+ These are operations that can be done on a filesystem context (see
+ below). This must be set by the ->init_fs_context() file_system_type
+ operation.
+
+ (*) struct file_system_type *fs_type
+
+ A pointer to the file_system_type of the filesystem that is being
+ constructed or reconfigured. This retains a reference on the type owner.
+
+ (*) void *fs_private
+
+ A pointer to the file system's private data. This is where the filesystem
+ will need to store any options it parses.
+
+ (*) struct dentry *root
+
+ A pointer to the root of the mountable tree (and indirectly, the
+ superblock thereof). This is filled in by the ->get_tree() op. If this
+ is set, an active reference on root->d_sb must also be held.
+
+ (*) struct user_namespace *user_ns
+ (*) struct net *net_ns
+
+ There are a subset of the namespaces in use by the invoking process. They
+ retain references on each namespace. The subscribed namespaces may be
+ replaced by the filesystem to reflect other sources, such as the parent
+ mount superblock on an automount.
+
+ (*) const struct cred *cred
+
+ The mounter's credentials. This retains a reference on the credentials.
+
+ (*) char *source
+
+ This specifies the source. It may be a block device (e.g. /dev/sda1) or
+ something more exotic, such as the "host:/path" that NFS desires.
+
+ (*) char *subtype
+
+ This is a string to be added to the type displayed in /proc/mounts to
+ qualify it (used by FUSE). This is available for the filesystem to set if
+ desired.
+
+ (*) void *security
+
+ A place for the LSMs to hang their security data for the superblock. The
+ relevant security operations are described below.
+
+ (*) void *s_fs_info
+
+ The proposed s_fs_info for a new superblock, set in the superblock by
+ sget_fc(). This can be used to distinguish superblocks.
+
+ (*) unsigned int sb_flags
+ (*) unsigned int sb_flags_mask
+
+ Which bits SB_* flags are to be set/cleared in super_block::s_flags.
+
+ (*) unsigned int s_iflags
+
+ These will be bitwise-OR'd with s->s_iflags when a superblock is created.
+
+ (*) enum fs_context_purpose
+
+ This indicates the purpose for which the context is intended. The
+ available values are:
+
+ FS_CONTEXT_FOR_MOUNT, -- New superblock for explicit mount
+ FS_CONTEXT_FOR_SUBMOUNT -- New automatic submount of extant mount
+ FS_CONTEXT_FOR_RECONFIGURE -- Change an existing mount
+
+The mount context is created by calling vfs_new_fs_context() or
+vfs_dup_fs_context() and is destroyed with put_fs_context(). Note that the
+structure is not refcounted.
+
+VFS, security and filesystem mount options are set individually with
+vfs_parse_mount_option(). Options provided by the old mount(2) system call as
+a page of data can be parsed with generic_parse_monolithic().
+
+When mounting, the filesystem is allowed to take data from any of the pointers
+and attach it to the superblock (or whatever), provided it clears the pointer
+in the mount context.
+
+The filesystem is also allowed to allocate resources and pin them with the
+mount context. For instance, NFS might pin the appropriate protocol version
+module.
+
+
+=================================
+THE FILESYSTEM CONTEXT OPERATIONS
+=================================
+
+The filesystem context points to a table of operations:
+
+ struct fs_context_operations {
+ void (*free)(struct fs_context *fc);
+ int (*dup)(struct fs_context *fc, struct fs_context *src_fc);
+ int (*parse_param)(struct fs_context *fc,
+ struct struct fs_parameter *param);
+ int (*parse_monolithic)(struct fs_context *fc, void *data);
+ int (*get_tree)(struct fs_context *fc);
+ int (*reconfigure)(struct fs_context *fc);
+ };
+
+These operations are invoked by the various stages of the mount procedure to
+manage the filesystem context. They are as follows:
+
+ (*) void (*free)(struct fs_context *fc);
+
+ Called to clean up the filesystem-specific part of the filesystem context
+ when the context is destroyed. It should be aware that parts of the
+ context may have been removed and NULL'd out by ->get_tree().
+
+ (*) int (*dup)(struct fs_context *fc, struct fs_context *src_fc);
+
+ Called when a filesystem context has been duplicated to duplicate the
+ filesystem-private data. An error may be returned to indicate failure to
+ do this.
+
+ [!] Note that even if this fails, put_fs_context() will be called
+ immediately thereafter, so ->dup() *must* make the
+ filesystem-private data safe for ->free().
+
+ (*) int (*parse_param)(struct fs_context *fc,
+ struct struct fs_parameter *param);
+
+ Called when a parameter is being added to the filesystem context. param
+ points to the key name and maybe a value object. VFS-specific options
+ will have been weeded out and fc->sb_flags updated in the context.
+ Security options will also have been weeded out and fc->security updated.
+
+ The parameter can be parsed with fs_parse() and fs_lookup_param(). Note
+ that the source(s) are presented as parameters named "source".
+
+ If successful, 0 should be returned or a negative error code otherwise.
+
+ (*) int (*parse_monolithic)(struct fs_context *fc, void *data);
+
+ Called when the mount(2) system call is invoked to pass the entire data
+ page in one go. If this is expected to be just a list of "key[=val]"
+ items separated by commas, then this may be set to NULL.
+
+ The return value is as for ->parse_param().
+
+ If the filesystem (e.g. NFS) needs to examine the data first and then
+ finds it's the standard key-val list then it may pass it off to
+ generic_parse_monolithic().
+
+ (*) int (*get_tree)(struct fs_context *fc);
+
+ Called to get or create the mountable root and superblock, using the
+ information stored in the filesystem context (reconfiguration goes via a
+ different vector). It may detach any resources it desires from the
+ filesystem context and transfer them to the superblock it creates.
+
+ On success it should set fc->root to the mountable root and return 0. In
+ the case of an error, it should return a negative error code.
+
+ The phase on a userspace-driven context will be set to only allow this to
+ be called once on any particular context.
+
+ (*) int (*reconfigure)(struct fs_context *fc);
+
+ Called to effect reconfiguration of a superblock using information stored
+ in the filesystem context. It may detach any resources it desires from
+ the filesystem context and transfer them to the superblock. The
+ superblock can be found from fc->root->d_sb.
+
+ On success it should return 0. In the case of an error, it should return
+ a negative error code.
+
+ [NOTE] reconfigure is intended as a replacement for remount_fs.
+
+
+===========================
+FILESYSTEM CONTEXT SECURITY
+===========================
+
+The filesystem context contains a security pointer that the LSMs can use for
+building up a security context for the superblock to be mounted. There are a
+number of operations used by the new mount code for this purpose:
+
+ (*) int security_fs_context_alloc(struct fs_context *fc,
+ struct dentry *reference);
+
+ Called to initialise fc->security (which is preset to NULL) and allocate
+ any resources needed. It should return 0 on success or a negative error
+ code on failure.
+
+ reference will be non-NULL if the context is being created for superblock
+ reconfiguration (FS_CONTEXT_FOR_RECONFIGURE) in which case it indicates
+ the root dentry of the superblock to be reconfigured. It will also be
+ non-NULL in the case of a submount (FS_CONTEXT_FOR_SUBMOUNT) in which case
+ it indicates the automount point.
+
+ (*) int security_fs_context_dup(struct fs_context *fc,
+ struct fs_context *src_fc);
+
+ Called to initialise fc->security (which is preset to NULL) and allocate
+ any resources needed. The original filesystem context is pointed to by
+ src_fc and may be used for reference. It should return 0 on success or a
+ negative error code on failure.
+
+ (*) void security_fs_context_free(struct fs_context *fc);
+
+ Called to clean up anything attached to fc->security. Note that the
+ contents may have been transferred to a superblock and the pointer cleared
+ during get_tree.
+
+ (*) int security_fs_context_parse_param(struct fs_context *fc,
+ struct fs_parameter *param);
+
+ Called for each mount parameter, including the source. The arguments are
+ as for the ->parse_param() method. It should return 0 to indicate that
+ the parameter should be passed on to the filesystem, 1 to indicate that
+ the parameter should be discarded or an error to indicate that the
+ parameter should be rejected.
+
+ The value pointed to by param may be modified (if a string) or stolen
+ (provided the value pointer is NULL'd out). If it is stolen, 1 must be
+ returned to prevent it being passed to the filesystem.
+
+ (*) int security_fs_context_validate(struct fs_context *fc);
+
+ Called after all the options have been parsed to validate the collection
+ as a whole and to do any necessary allocation so that
+ security_sb_get_tree() and security_sb_reconfigure() are less likely to
+ fail. It should return 0 or a negative error code.
+
+ In the case of reconfiguration, the target superblock will be accessible
+ via fc->root.
+
+ (*) int security_sb_get_tree(struct fs_context *fc);
+
+ Called during the mount procedure to verify that the specified superblock
+ is allowed to be mounted and to transfer the security data there. It
+ should return 0 or a negative error code.
+
+ (*) void security_sb_reconfigure(struct fs_context *fc);
+
+ Called to apply any reconfiguration to an LSM's context. It must not
+ fail. Error checking and resource allocation must be done in advance by
+ the parameter parsing and validation hooks.
+
+ (*) int security_sb_mountpoint(struct fs_context *fc, struct path *mountpoint,
+ unsigned int mnt_flags);
+
+ Called during the mount procedure to verify that the root dentry attached
+ to the context is permitted to be attached to the specified mountpoint.
+ It should return 0 on success or a negative error code on failure.
+
+
+==========================
+VFS FILESYSTEM CONTEXT API
+==========================
+
+There are four operations for creating a filesystem context and one for
+destroying a context:
+
+ (*) struct fs_context *fs_context_for_mount(
+ struct file_system_type *fs_type,
+ unsigned int sb_flags);
+
+ Allocate a filesystem context for the purpose of setting up a new mount,
+ whether that be with a new superblock or sharing an existing one. This
+ sets the superblock flags, initialises the security and calls
+ fs_type->init_fs_context() to initialise the filesystem private data.
+
+ fs_type specifies the filesystem type that will manage the context and
+ sb_flags presets the superblock flags stored therein.
+
+ (*) struct fs_context *fs_context_for_reconfigure(
+ struct dentry *dentry,
+ unsigned int sb_flags,
+ unsigned int sb_flags_mask);
+
+ Allocate a filesystem context for the purpose of reconfiguring an
+ existing superblock. dentry provides a reference to the superblock to be
+ configured. sb_flags and sb_flags_mask indicate which superblock flags
+ need changing and to what.
+
+ (*) struct fs_context *fs_context_for_submount(
+ struct file_system_type *fs_type,
+ struct dentry *reference);
+
+ Allocate a filesystem context for the purpose of creating a new mount for
+ an automount point or other derived superblock. fs_type specifies the
+ filesystem type that will manage the context and the reference dentry
+ supplies the parameters. Namespaces are propagated from the reference
+ dentry's superblock also.
+
+ Note that it's not a requirement that the reference dentry be of the same
+ filesystem type as fs_type.
+
+ (*) struct fs_context *vfs_dup_fs_context(struct fs_context *src_fc);
+
+ Duplicate a filesystem context, copying any options noted and duplicating
+ or additionally referencing any resources held therein. This is available
+ for use where a filesystem has to get a mount within a mount, such as NFS4
+ does by internally mounting the root of the target server and then doing a
+ private pathwalk to the target directory.
+
+ The purpose in the new context is inherited from the old one.
+
+ (*) void put_fs_context(struct fs_context *fc);
+
+ Destroy a filesystem context, releasing any resources it holds. This
+ calls the ->free() operation. This is intended to be called by anyone who
+ created a filesystem context.
+
+ [!] filesystem contexts are not refcounted, so this causes unconditional
+ destruction.
+
+In all the above operations, apart from the put op, the return is a mount
+context pointer or a negative error code.
+
+For the remaining operations, if an error occurs, a negative error code will be
+returned.
+
+ (*) int vfs_parse_fs_param(struct fs_context *fc,
+ struct fs_parameter *param);
+
+ Supply a single mount parameter to the filesystem context. This include
+ the specification of the source/device which is specified as the "source"
+ parameter (which may be specified multiple times if the filesystem
+ supports that).
+
+ param specifies the parameter key name and the value. The parameter is
+ first checked to see if it corresponds to a standard mount flag (in which
+ case it is used to set an SB_xxx flag and consumed) or a security option
+ (in which case the LSM consumes it) before it is passed on to the
+ filesystem.
+
+ The parameter value is typed and can be one of:
+
+ fs_value_is_flag, Parameter not given a value.
+ fs_value_is_string, Value is a string
+ fs_value_is_blob, Value is a binary blob
+ fs_value_is_filename, Value is a filename* + dirfd
+ fs_value_is_filename_empty, Value is a filename* + dirfd + AT_EMPTY_PATH
+ fs_value_is_file, Value is an open file (file*)
+
+ If there is a value, that value is stored in a union in the struct in one
+ of param->{string,blob,name,file}. Note that the function may steal and
+ clear the pointer, but then becomes responsible for disposing of the
+ object.
+
+ (*) int vfs_parse_fs_string(struct fs_context *fc, const char *key,
+ const char *value, size_t v_size);
+
+ A wrapper around vfs_parse_fs_param() that copies the value string it is
+ passed.
+
+ (*) int generic_parse_monolithic(struct fs_context *fc, void *data);
+
+ Parse a sys_mount() data page, assuming the form to be a text list
+ consisting of key[=val] options separated by commas. Each item in the
+ list is passed to vfs_mount_option(). This is the default when the
+ ->parse_monolithic() method is NULL.
+
+ (*) int vfs_get_tree(struct fs_context *fc);
+
+ Get or create the mountable root and superblock, using the parameters in
+ the filesystem context to select/configure the superblock. This invokes
+ the ->get_tree() method.
+
+ (*) struct vfsmount *vfs_create_mount(struct fs_context *fc);
+
+ Create a mount given the parameters in the specified filesystem context.
+ Note that this does not attach the mount to anything.
+
+
+===========================
+SUPERBLOCK CREATION HELPERS
+===========================
+
+A number of VFS helpers are available for use by filesystems for the creation
+or looking up of superblocks.
+
+ (*) struct super_block *
+ sget_fc(struct fs_context *fc,
+ int (*test)(struct super_block *sb, struct fs_context *fc),
+ int (*set)(struct super_block *sb, struct fs_context *fc));
+
+ This is the core routine. If test is non-NULL, it searches for an
+ existing superblock matching the criteria held in the fs_context, using
+ the test function to match them. If no match is found, a new superblock
+ is created and the set function is called to set it up.
+
+ Prior to the set function being called, fc->s_fs_info will be transferred
+ to sb->s_fs_info - and fc->s_fs_info will be cleared if set returns
+ success (ie. 0).
+
+The following helpers all wrap sget_fc():
+
+ (*) int vfs_get_super(struct fs_context *fc,
+ enum vfs_get_super_keying keying,
+ int (*fill_super)(struct super_block *sb,
+ struct fs_context *fc))
+
+ This creates/looks up a deviceless superblock. The keying indicates how
+ many superblocks of this type may exist and in what manner they may be
+ shared:
+
+ (1) vfs_get_single_super
+
+ Only one such superblock may exist in the system. Any further
+ attempt to get a new superblock gets this one (and any parameter
+ differences are ignored).
+
+ (2) vfs_get_keyed_super
+
+ Multiple superblocks of this type may exist and they're keyed on
+ their s_fs_info pointer (for example this may refer to a
+ namespace).
+
+ (3) vfs_get_independent_super
+
+ Multiple independent superblocks of this type may exist. This
+ function never matches an existing one and always creates a new
+ one.
+
+
+=====================
+PARAMETER DESCRIPTION
+=====================
+
+Parameters are described using structures defined in linux/fs_parser.h.
+There's a core description struct that links everything together:
+
+ struct fs_parameter_description {
+ const char name[16];
+ const struct fs_parameter_spec *specs;
+ const struct fs_parameter_enum *enums;
+ };
+
+For example:
+
+ enum {
+ Opt_autocell,
+ Opt_bar,
+ Opt_dyn,
+ Opt_foo,
+ Opt_source,
+ };
+
+ static const struct fs_parameter_description afs_fs_parameters = {
+ .name = "kAFS",
+ .specs = afs_param_specs,
+ .enums = afs_param_enums,
+ };
+
+The members are as follows:
+
+ (1) const char name[16];
+
+ The name to be used in error messages generated by the parse helper
+ functions.
+
+ (2) const struct fs_parameter_specification *specs;
+
+ Table of parameter specifications, terminated with a null entry, where the
+ entries are of type:
+
+ struct fs_parameter_spec {
+ const char *name;
+ u8 opt;
+ enum fs_parameter_type type:8;
+ unsigned short flags;
+ };
+
+ The 'name' field is a string to match exactly to the parameter key (no
+ wildcards, patterns and no case-independence) and 'opt' is the value that
+ will be returned by the fs_parser() function in the case of a successful
+ match.
+
+ The 'type' field indicates the desired value type and must be one of:
+
+ TYPE NAME EXPECTED VALUE RESULT IN
+ ======================= ======================= =====================
+ fs_param_is_flag No value n/a
+ fs_param_is_bool Boolean value result->boolean
+ fs_param_is_u32 32-bit unsigned int result->uint_32
+ fs_param_is_u32_octal 32-bit octal int result->uint_32
+ fs_param_is_u32_hex 32-bit hex int result->uint_32
+ fs_param_is_s32 32-bit signed int result->int_32
+ fs_param_is_u64 64-bit unsigned int result->uint_64
+ fs_param_is_enum Enum value name result->uint_32
+ fs_param_is_string Arbitrary string param->string
+ fs_param_is_blob Binary blob param->blob
+ fs_param_is_blockdev Blockdev path * Needs lookup
+ fs_param_is_path Path * Needs lookup
+ fs_param_is_fd File descriptor result->int_32
+
+ Note that if the value is of fs_param_is_bool type, fs_parse() will try
+ to match any string value against "0", "1", "no", "yes", "false", "true".
+
+ Each parameter can also be qualified with 'flags':
+
+ fs_param_v_optional The value is optional
+ fs_param_neg_with_no result->negated set if key is prefixed with "no"
+ fs_param_neg_with_empty result->negated set if value is ""
+ fs_param_deprecated The parameter is deprecated.
+
+ These are wrapped with a number of convenience wrappers:
+
+ MACRO SPECIFIES
+ ======================= ===============================================
+ fsparam_flag() fs_param_is_flag
+ fsparam_flag_no() fs_param_is_flag, fs_param_neg_with_no
+ fsparam_bool() fs_param_is_bool
+ fsparam_u32() fs_param_is_u32
+ fsparam_u32oct() fs_param_is_u32_octal
+ fsparam_u32hex() fs_param_is_u32_hex
+ fsparam_s32() fs_param_is_s32
+ fsparam_u64() fs_param_is_u64
+ fsparam_enum() fs_param_is_enum
+ fsparam_string() fs_param_is_string
+ fsparam_blob() fs_param_is_blob
+ fsparam_bdev() fs_param_is_blockdev
+ fsparam_path() fs_param_is_path
+ fsparam_fd() fs_param_is_fd
+
+ all of which take two arguments, name string and option number - for
+ example:
+
+ static const struct fs_parameter_spec afs_param_specs[] = {
+ fsparam_flag ("autocell", Opt_autocell),
+ fsparam_flag ("dyn", Opt_dyn),
+ fsparam_string ("source", Opt_source),
+ fsparam_flag_no ("foo", Opt_foo),
+ {}
+ };
+
+ An addition macro, __fsparam() is provided that takes an additional pair
+ of arguments to specify the type and the flags for anything that doesn't
+ match one of the above macros.
+
+ (6) const struct fs_parameter_enum *enums;
+
+ Table of enum value names to integer mappings, terminated with a null
+ entry. This is of type:
+
+ struct fs_parameter_enum {
+ u8 opt;
+ char name[14];
+ u8 value;
+ };
+
+ Where the array is an unsorted list of { parameter ID, name }-keyed
+ elements that indicate the value to map to, e.g.:
+
+ static const struct fs_parameter_enum afs_param_enums[] = {
+ { Opt_bar, "x", 1},
+ { Opt_bar, "y", 23},
+ { Opt_bar, "z", 42},
+ };
+
+ If a parameter of type fs_param_is_enum is encountered, fs_parse() will
+ try to look the value up in the enum table and the result will be stored
+ in the parse result.
+
+The parser should be pointed to by the parser pointer in the file_system_type
+struct as this will provide validation on registration (if
+CONFIG_VALIDATE_FS_PARSER=y) and will allow the description to be queried from
+userspace using the fsinfo() syscall.
+
+
+==========================
+PARAMETER HELPER FUNCTIONS
+==========================
+
+A number of helper functions are provided to help a filesystem or an LSM
+process the parameters it is given.
+
+ (*) int lookup_constant(const struct constant_table tbl[],
+ const char *name, int not_found);
+
+ Look up a constant by name in a table of name -> integer mappings. The
+ table is an array of elements of the following type:
+
+ struct constant_table {
+ const char *name;
+ int value;
+ };
+
+ If a match is found, the corresponding value is returned. If a match
+ isn't found, the not_found value is returned instead.
+
+ (*) bool validate_constant_table(const struct constant_table *tbl,
+ size_t tbl_size,
+ int low, int high, int special);
+
+ Validate a constant table. Checks that all the elements are appropriately
+ ordered, that there are no duplicates and that the values are between low
+ and high inclusive, though provision is made for one allowable special
+ value outside of that range. If no special value is required, special
+ should just be set to lie inside the low-to-high range.
+
+ If all is good, true is returned. If the table is invalid, errors are
+ logged to dmesg and false is returned.
+
+ (*) bool fs_validate_description(const struct fs_parameter_description *desc);
+
+ This performs some validation checks on a parameter description. It
+ returns true if the description is good and false if it is not. It will
+ log errors to dmesg if validation fails.
+
+ (*) int fs_parse(struct fs_context *fc,
+ const struct fs_parameter_description *desc,
+ struct fs_parameter *param,
+ struct fs_parse_result *result);
+
+ This is the main interpreter of parameters. It uses the parameter
+ description to look up a parameter by key name and to convert that to an
+ option number (which it returns).
+
+ If successful, and if the parameter type indicates the result is a
+ boolean, integer or enum type, the value is converted by this function and
+ the result stored in result->{boolean,int_32,uint_32,uint_64}.
+
+ If a match isn't initially made, the key is prefixed with "no" and no
+ value is present then an attempt will be made to look up the key with the
+ prefix removed. If this matches a parameter for which the type has flag
+ fs_param_neg_with_no set, then a match will be made and result->negated
+ will be set to true.
+
+ If the parameter isn't matched, -ENOPARAM will be returned; if the
+ parameter is matched, but the value is erroneous, -EINVAL will be
+ returned; otherwise the parameter's option number will be returned.
+
+ (*) int fs_lookup_param(struct fs_context *fc,
+ struct fs_parameter *value,
+ bool want_bdev,
+ struct path *_path);
+
+ This takes a parameter that carries a string or filename type and attempts
+ to do a path lookup on it. If the parameter expects a blockdev, a check
+ is made that the inode actually represents one.
+
+ Returns 0 if successful and *_path will be set; returns a negative error
+ code if not.
diff --git a/Documentation/filesystems/path-lookup.rst b/Documentation/filesystems/path-lookup.rst
index 9d6b68853f5b..434a07b0002b 100644
--- a/Documentation/filesystems/path-lookup.rst
+++ b/Documentation/filesystems/path-lookup.rst
@@ -1,3 +1,18 @@
+===============
+Pathname lookup
+===============
+
+This write-up is based on three articles published at lwn.net:
+
+- <https://lwn.net/Articles/649115/> Pathname lookup in Linux
+- <https://lwn.net/Articles/649729/> RCU-walk: faster pathname lookup in Linux
+- <https://lwn.net/Articles/650786/> A walk among the symlinks
+
+Written by Neil Brown with help from Al Viro and Jon Corbet.
+It has subsequently been updated to reflect changes in the kernel
+including:
+
+- per-directory parallel name lookup.
Introduction to pathname lookup
===============================
@@ -344,7 +359,7 @@ In particular it is held while scanning chains in the dcache hash
table, and the mount point hash table.
Bringing it together with ``struct nameidata``
---------------------------------------------
+----------------------------------------------
.. _First edition Unix: http://minnie.tuhs.org/cgi-bin/utree.pl?file=V1/u2.s
@@ -355,7 +370,7 @@ converts a "name" to an "inode". ``struct nameidata`` contains (among
other fields):
``struct path path``
-~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~
A ``path`` contains a ``struct vfsmount`` (which is
embedded in a ``struct mount``) and a ``struct dentry``. Together these
@@ -366,13 +381,13 @@ step. A reference through ``d_lockref`` and ``mnt_count`` is always
held.
``struct qstr last``
-~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~
This is a string together with a length (i.e. _not_ ``nul`` terminated)
that is the "next" component in the pathname.
``int last_type``
-~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~
This is one of ``LAST_NORM``, ``LAST_ROOT``, ``LAST_DOT``, ``LAST_DOTDOT``, or
``LAST_BIND``. The ``last`` field is only valid if the type is
@@ -381,7 +396,7 @@ components of the symlink have been processed yet. Others should be
fairly self-explanatory.
``struct path root``
-~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~
This is used to hold a reference to the effective root of the
filesystem. Often that reference won't be needed, so this field is
@@ -510,7 +525,7 @@ potentially interesting things about these dentries corresponding
to three different flags that might be set in ``dentry->d_flags``:
``DCACHE_MANAGE_TRANSIT``
-~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~~
If this flag has been set, then the filesystem has requested that the
``d_manage()`` dentry operation be called before handling any possible
@@ -529,7 +544,7 @@ filesystem, which will then give it a special pass through
``d_manage()`` by returning ``-EISDIR``.
``DCACHE_MOUNTED``
-~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~
This flag is set on every dentry that is mounted on. As Linux
supports multiple filesystem namespaces, it is possible that the
@@ -542,7 +557,7 @@ If this flag is set, and ``d_manage()`` didn't return ``-EISDIR``,
and a new ``dentry`` (both with counted references).
``DCACHE_NEED_AUTOMOUNT``
-~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~~
If ``d_manage()`` allowed us to get this far, and ``lookup_mnt()`` didn't
find a mount point, then this flag causes the ``d_automount()`` dentry
@@ -698,7 +713,7 @@ With that little refresher on seqlocks out of the way we can look at
the bigger picture of how RCU-walk uses seqlocks.
``mount_lock`` and ``nd->m_seq``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We already met the ``mount_lock`` seqlock when REF-walk used it to
ensure that crossing a mount point is performed safely. RCU-walk uses
@@ -727,7 +742,7 @@ results would have been the same. This ensures the invariant holds,
at least for vfsmount structures.
``dentry->d_seq`` and ``nd->seq``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In place of taking a count or lock on ``d_reflock``, RCU-walk samples
the per-dentry ``d_seq`` seqlock, and stores the sequence number in the
@@ -774,7 +789,7 @@ getting a counted reference to the new dentry before dropping that for
the old dentry which we saw in REF-walk.
No ``inode->i_rwsem`` or even ``rename_lock``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A semaphore is a fairly heavyweight lock that can only be taken when it is
permissible to sleep. As ``rcu_read_lock()`` forbids sleeping,
@@ -796,7 +811,7 @@ locking. This neatly handles all cases, so adding extra checks on
rename_lock would bring no significant value.
``unlazy walk()`` and ``complete_walk()``
--------------------------------------
+-----------------------------------------
That "dropping down to REF-walk" typically involves a call to
``unlazy_walk()``, so named because "RCU-walk" is also sometimes
diff --git a/Documentation/filesystems/splice.rst b/Documentation/filesystems/splice.rst
new file mode 100644
index 000000000000..edd874808472
--- /dev/null
+++ b/Documentation/filesystems/splice.rst
@@ -0,0 +1,22 @@
+================
+splice and pipes
+================
+
+splice API
+==========
+
+splice is a method for moving blocks of data around inside the kernel,
+without continually transferring them between the kernel and user space.
+
+.. kernel-doc:: fs/splice.c
+
+pipes API
+=========
+
+Pipe interfaces are all for in-kernel (builtin image) use. They are not
+exported for use by modules.
+
+.. kernel-doc:: include/linux/pipe_fs_i.h
+ :internal:
+
+.. kernel-doc:: fs/pipe.c
diff --git a/Documentation/filesystems/sysfs.txt b/Documentation/filesystems/sysfs.txt
index 41411b0c60a3..5b5311f9358d 100644
--- a/Documentation/filesystems/sysfs.txt
+++ b/Documentation/filesystems/sysfs.txt
@@ -116,6 +116,27 @@ static struct device_attribute dev_attr_foo = {
.store = store_foo,
};
+Note as stated in include/linux/kernel.h "OTHER_WRITABLE? Generally
+considered a bad idea." so trying to set a sysfs file writable for
+everyone will fail reverting to RO mode for "Others".
+
+For the common cases sysfs.h provides convenience macros to make
+defining attributes easier as well as making code more concise and
+readable. The above case could be shortened to:
+
+static struct device_attribute dev_attr_foo = __ATTR_RW(foo);
+
+the list of helpers available to define your wrapper function is:
+__ATTR_RO(name): assumes default name_show and mode 0444
+__ATTR_WO(name): assumes a name_store only and is restricted to mode
+ 0200 that is root write access only.
+__ATTR_RO_MODE(name, mode): fore more restrictive RO access currently
+ only use case is the EFI System Resource Table
+ (see drivers/firmware/efi/esrt.c)
+__ATTR_RW(name): assumes default name_show, name_store and setting
+ mode to 0644.
+__ATTR_NULL: which sets the name to NULL and is used as end of list
+ indicator (see: kernel/workqueue.c)
Subsystem-Specific Callbacks
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/acpi/DSD-properties-rules.txt b/Documentation/firmware-guide/acpi/DSD-properties-rules.rst
index 3e4862bdad98..4306f29b6103 100644
--- a/Documentation/acpi/DSD-properties-rules.txt
+++ b/Documentation/firmware-guide/acpi/DSD-properties-rules.rst
@@ -1,8 +1,11 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==================================
_DSD Device Properties Usage Rules
-----------------------------------
+==================================
Properties, Property Sets and Property Subsets
-----------------------------------------------
+==============================================
The _DSD (Device Specific Data) configuration object, introduced in ACPI 5.1,
allows any type of device configuration data to be provided via the ACPI
@@ -18,7 +21,7 @@ specific type) associated with it.
In the ACPI _DSD context it is an element of the sub-package following the
generic Device Properties UUID in the _DSD return package as specified in the
-Device Properties UUID definition document [1].
+Device Properties UUID definition document [1]_.
It also may be regarded as the definition of a key and the associated data type
that can be returned by _DSD in the Device Properties UUID sub-package for a
@@ -33,14 +36,14 @@ Property subsets are nested collections of properties. Each of them is
associated with an additional key (name) allowing the subset to be referred
to as a whole (and to be treated as a separate entity). The canonical
representation of property subsets is via the mechanism specified in the
-Hierarchical Properties Extension UUID definition document [2].
+Hierarchical Properties Extension UUID definition document [2]_.
Property sets may be hierarchical. That is, a property set may contain
multiple property subsets that each may contain property subsets of its
own and so on.
General Validity Rule for Property Sets
----------------------------------------
+=======================================
Valid property sets must follow the guidance given by the Device Properties UUID
definition document [1].
@@ -73,7 +76,7 @@ suitable for the ACPI environment and consequently they cannot belong to a valid
property set.
Property Sets and Device Tree Bindings
---------------------------------------
+======================================
It often is useful to make _DSD return property sets that follow Device Tree
bindings.
@@ -91,7 +94,7 @@ expected to automatically work in the ACPI environment regardless of their
contents.
References
-----------
+==========
-[1] http://www.uefi.org/sites/default/files/resources/_DSD-device-properties-UUID.pdf
-[2] http://www.uefi.org/sites/default/files/resources/_DSD-hierarchical-data-extension-UUID-v1.1.pdf
+.. [1] http://www.uefi.org/sites/default/files/resources/_DSD-device-properties-UUID.pdf
+.. [2] http://www.uefi.org/sites/default/files/resources/_DSD-hierarchical-data-extension-UUID-v1.1.pdf
diff --git a/Documentation/acpi/acpi-lid.txt b/Documentation/firmware-guide/acpi/acpi-lid.rst
index effe7af3a5af..874ce0ed340d 100644
--- a/Documentation/acpi/acpi-lid.txt
+++ b/Documentation/firmware-guide/acpi/acpi-lid.rst
@@ -1,13 +1,18 @@
-Special Usage Model of the ACPI Control Method Lid Device
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
-Copyright (C) 2016, Intel Corporation
-Author: Lv Zheng <lv.zheng@intel.com>
+=========================================================
+Special Usage Model of the ACPI Control Method Lid Device
+=========================================================
+:Copyright: |copy| 2016, Intel Corporation
-Abstract:
+:Author: Lv Zheng <lv.zheng@intel.com>
-Platforms containing lids convey lid state (open/close) to OSPMs using a
-control method lid device. To implement this, the AML tables issue
+Abstract
+========
+Platforms containing lids convey lid state (open/close) to OSPMs
+using a control method lid device. To implement this, the AML tables issue
Notify(lid_device, 0x80) to notify the OSPMs whenever the lid state has
changed. The _LID control method for the lid device must be implemented to
report the "current" state of the lid as either "opened" or "closed".
@@ -19,7 +24,8 @@ taken into account. This document describes the restrictions and the
expections of the Linux ACPI lid device driver.
-1. Restrictions of the returning value of the _LID control method
+Restrictions of the returning value of the _LID control method
+==============================================================
The _LID control method is described to return the "current" lid state.
However the word of "current" has ambiguity, some buggy AML tables return
@@ -30,7 +36,8 @@ initial returning value. When the AML tables implement this control method
with cached value, the initial returning value is likely not reliable.
There are platforms always retun "closed" as initial lid state.
-2. Restrictions of the lid state change notifications
+Restrictions of the lid state change notifications
+==================================================
There are buggy AML tables never notifying when the lid device state is
changed to "opened". Thus the "opened" notification is not guaranteed. But
@@ -39,18 +46,22 @@ state is changed to "closed". The "closed" notification is normally used to
trigger some system power saving operations on Windows. Since it is fully
tested, it is reliable from all AML tables.
-3. Expections for the userspace users of the ACPI lid device driver
+Expections for the userspace users of the ACPI lid device driver
+================================================================
The ACPI button driver exports the lid state to the userspace via the
-following file:
+following file::
+
/proc/acpi/button/lid/LID0/state
+
This file actually calls the _LID control method described above. And given
the previous explanation, it is not reliable enough on some platforms. So
it is advised for the userspace program to not to solely rely on this file
to determine the actual lid state.
The ACPI button driver emits the following input event to the userspace:
- SW_LID
+ * SW_LID
+
The ACPI lid device driver is implemented to try to deliver the platform
triggered events to the userspace. However, given the fact that the buggy
firmware cannot make sure "opened"/"closed" events are paired, the ACPI
@@ -59,20 +70,25 @@ button driver uses the following 3 modes in order not to trigger issues.
If the userspace hasn't been prepared to ignore the unreliable "opened"
events and the unreliable initial state notification, Linux users can use
the following kernel parameters to handle the possible issues:
+
A. button.lid_init_state=method:
When this option is specified, the ACPI button driver reports the
initial lid state using the returning value of the _LID control method
and whether the "opened"/"closed" events are paired fully relies on the
firmware implementation.
+
This option can be used to fix some platforms where the returning value
of the _LID control method is reliable but the initial lid state
notification is missing.
+
This option is the default behavior during the period the userspace
isn't ready to handle the buggy AML tables.
+
B. button.lid_init_state=open:
When this option is specified, the ACPI button driver always reports the
initial lid state as "opened" and whether the "opened"/"closed" events
are paired fully relies on the firmware implementation.
+
This may fix some platforms where the returning value of the _LID
control method is not reliable and the initial lid state notification is
missing.
@@ -80,6 +96,7 @@ B. button.lid_init_state=open:
If the userspace has been prepared to ignore the unreliable "opened" events
and the unreliable initial state notification, Linux users should always
use the following kernel parameter:
+
C. button.lid_init_state=ignore:
When this option is specified, the ACPI button driver never reports the
initial lid state and there is a compensation mechanism implemented to
@@ -89,6 +106,7 @@ C. button.lid_init_state=ignore:
notifications can be delivered to the userspace when the lid is actually
opens given that some AML tables do not send "opened" notifications
reliably.
+
In this mode, if everything is correctly implemented by the platform
firmware, the old userspace programs should still work. Otherwise, the
new userspace programs are required to work with the ACPI button driver.
diff --git a/Documentation/firmware-guide/acpi/aml-debugger.rst b/Documentation/firmware-guide/acpi/aml-debugger.rst
new file mode 100644
index 000000000000..a889d43bc6c5
--- /dev/null
+++ b/Documentation/firmware-guide/acpi/aml-debugger.rst
@@ -0,0 +1,75 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+================
+The AML Debugger
+================
+
+:Copyright: |copy| 2016, Intel Corporation
+:Author: Lv Zheng <lv.zheng@intel.com>
+
+
+This document describes the usage of the AML debugger embedded in the Linux
+kernel.
+
+1. Build the debugger
+=====================
+
+The following kernel configuration items are required to enable the AML
+debugger interface from the Linux kernel::
+
+ CONFIG_ACPI_DEBUGGER=y
+ CONFIG_ACPI_DEBUGGER_USER=m
+
+The userspace utilities can be built from the kernel source tree using
+the following commands::
+
+ $ cd tools
+ $ make acpi
+
+The resultant userspace tool binary is then located at::
+
+ tools/power/acpi/acpidbg
+
+It can be installed to system directories by running "make install" (as a
+sufficiently privileged user).
+
+2. Start the userspace debugger interface
+=========================================
+
+After booting the kernel with the debugger built-in, the debugger can be
+started by using the following commands::
+
+ # mount -t debugfs none /sys/kernel/debug
+ # modprobe acpi_dbg
+ # tools/power/acpi/acpidbg
+
+That spawns the interactive AML debugger environment where you can execute
+debugger commands.
+
+The commands are documented in the "ACPICA Overview and Programmer Reference"
+that can be downloaded from
+
+https://acpica.org/documentation
+
+The detailed debugger commands reference is located in Chapter 12 "ACPICA
+Debugger Reference". The "help" command can be used for a quick reference.
+
+3. Stop the userspace debugger interface
+========================================
+
+The interactive debugger interface can be closed by pressing Ctrl+C or using
+the "quit" or "exit" commands. When finished, unload the module with::
+
+ # rmmod acpi_dbg
+
+The module unloading may fail if there is an acpidbg instance running.
+
+4. Run the debugger in a script
+===============================
+
+It may be useful to run the AML debugger in a test script. "acpidbg" supports
+this in a special "batch" mode. For example, the following command outputs
+the entire ACPI namespace::
+
+ # acpidbg -b "namespace"
diff --git a/Documentation/acpi/apei/einj.txt b/Documentation/firmware-guide/acpi/apei/einj.rst
index e550c8b98139..e588bccf5158 100644
--- a/Documentation/acpi/apei/einj.txt
+++ b/Documentation/firmware-guide/acpi/apei/einj.rst
@@ -1,13 +1,16 @@
- APEI Error INJection
- ~~~~~~~~~~~~~~~~~~~~
+.. SPDX-License-Identifier: GPL-2.0
+
+====================
+APEI Error INJection
+====================
EINJ provides a hardware error injection mechanism. It is very useful
for debugging and testing APEI and RAS features in general.
You need to check whether your BIOS supports EINJ first. For that, look
-for early boot messages similar to this one:
+for early boot messages similar to this one::
-ACPI: EINJ 0x000000007370A000 000150 (v01 INTEL 00000001 INTL 00000001)
+ ACPI: EINJ 0x000000007370A000 000150 (v01 INTEL 00000001 INTL 00000001)
which shows that the BIOS is exposing an EINJ table - it is the
mechanism through which the injection is done.
@@ -23,11 +26,11 @@ order to see the APEI,EINJ,... functionality supported and exposed by
the BIOS menu.
To use EINJ, make sure the following are options enabled in your kernel
-configuration:
+configuration::
-CONFIG_DEBUG_FS
-CONFIG_ACPI_APEI
-CONFIG_ACPI_APEI_EINJ
+ CONFIG_DEBUG_FS
+ CONFIG_ACPI_APEI
+ CONFIG_ACPI_APEI_EINJ
The EINJ user interface is in <debugfs mount point>/apei/einj.
@@ -37,20 +40,22 @@ The following files belong to it:
This file shows which error types are supported:
+ ================ ===================================
Error Type Value Error Description
- ================ =================
- 0x00000001 Processor Correctable
- 0x00000002 Processor Uncorrectable non-fatal
- 0x00000004 Processor Uncorrectable fatal
- 0x00000008 Memory Correctable
- 0x00000010 Memory Uncorrectable non-fatal
- 0x00000020 Memory Uncorrectable fatal
- 0x00000040 PCI Express Correctable
- 0x00000080 PCI Express Uncorrectable fatal
- 0x00000100 PCI Express Uncorrectable non-fatal
- 0x00000200 Platform Correctable
- 0x00000400 Platform Uncorrectable non-fatal
- 0x00000800 Platform Uncorrectable fatal
+ ================ ===================================
+ 0x00000001 Processor Correctable
+ 0x00000002 Processor Uncorrectable non-fatal
+ 0x00000004 Processor Uncorrectable fatal
+ 0x00000008 Memory Correctable
+ 0x00000010 Memory Uncorrectable non-fatal
+ 0x00000020 Memory Uncorrectable fatal
+ 0x00000040 PCI Express Correctable
+ 0x00000080 PCI Express Uncorrectable fatal
+ 0x00000100 PCI Express Uncorrectable non-fatal
+ 0x00000200 Platform Correctable
+ 0x00000400 Platform Uncorrectable non-fatal
+ 0x00000800 Platform Uncorrectable fatal
+ ================ ===================================
The format of the file contents are as above, except present are only
the available error types.
@@ -73,9 +78,12 @@ The following files belong to it:
injection. Value is a bitmask as specified in ACPI5.0 spec for the
SET_ERROR_TYPE_WITH_ADDRESS data structure:
- Bit 0 - Processor APIC field valid (see param3 below).
- Bit 1 - Memory address and mask valid (param1 and param2).
- Bit 2 - PCIe (seg,bus,dev,fn) valid (see param4 below).
+ Bit 0
+ Processor APIC field valid (see param3 below).
+ Bit 1
+ Memory address and mask valid (param1 and param2).
+ Bit 2
+ PCIe (seg,bus,dev,fn) valid (see param4 below).
If set to zero, legacy behavior is mimicked where the type of
injection specifies just one bit set, and param1 is multiplexed.
@@ -121,7 +129,7 @@ BIOS versions based on the ACPI 5.0 specification have more control over
the target of the injection. For processor-related errors (type 0x1, 0x2
and 0x4), you can set flags to 0x3 (param3 for bit 0, and param1 and
param2 for bit 1) so that you have more information added to the error
-signature being injected. The actual data passed is this:
+signature being injected. The actual data passed is this::
memory_address = param1;
memory_address_range = param2;
@@ -131,7 +139,7 @@ signature being injected. The actual data passed is this:
For memory errors (type 0x8, 0x10 and 0x20) the address is set using
param1 with a mask in param2 (0x0 is equivalent to all ones). For PCI
express errors (type 0x40, 0x80 and 0x100) the segment, bus, device and
-function are specified using param1:
+function are specified using param1::
31 24 23 16 15 11 10 8 7 0
+-------------------------------------------------+
@@ -152,26 +160,26 @@ documentation for details (and expect changes to this API if vendors
creativity in using this feature expands beyond our expectations).
-An error injection example:
+An error injection example::
-# cd /sys/kernel/debug/apei/einj
-# cat available_error_type # See which errors can be injected
-0x00000002 Processor Uncorrectable non-fatal
-0x00000008 Memory Correctable
-0x00000010 Memory Uncorrectable non-fatal
-# echo 0x12345000 > param1 # Set memory address for injection
-# echo $((-1 << 12)) > param2 # Mask 0xfffffffffffff000 - anywhere in this page
-# echo 0x8 > error_type # Choose correctable memory error
-# echo 1 > error_inject # Inject now
+ # cd /sys/kernel/debug/apei/einj
+ # cat available_error_type # See which errors can be injected
+ 0x00000002 Processor Uncorrectable non-fatal
+ 0x00000008 Memory Correctable
+ 0x00000010 Memory Uncorrectable non-fatal
+ # echo 0x12345000 > param1 # Set memory address for injection
+ # echo $((-1 << 12)) > param2 # Mask 0xfffffffffffff000 - anywhere in this page
+ # echo 0x8 > error_type # Choose correctable memory error
+ # echo 1 > error_inject # Inject now
-You should see something like this in dmesg:
+You should see something like this in dmesg::
-[22715.830801] EDAC sbridge MC3: HANDLING MCE MEMORY ERROR
-[22715.834759] EDAC sbridge MC3: CPU 0: Machine Check Event: 0 Bank 7: 8c00004000010090
-[22715.834759] EDAC sbridge MC3: TSC 0
-[22715.834759] EDAC sbridge MC3: ADDR 12345000 EDAC sbridge MC3: MISC 144780c86
-[22715.834759] EDAC sbridge MC3: PROCESSOR 0:306e7 TIME 1422553404 SOCKET 0 APIC 0
-[22716.616173] EDAC MC3: 1 CE memory read error on CPU_SrcID#0_Channel#0_DIMM#0 (channel:0 slot:0 page:0x12345 offset:0x0 grain:32 syndrome:0x0 - area:DRAM err_code:0001:0090 socket:0 channel_mask:1 rank:0)
+ [22715.830801] EDAC sbridge MC3: HANDLING MCE MEMORY ERROR
+ [22715.834759] EDAC sbridge MC3: CPU 0: Machine Check Event: 0 Bank 7: 8c00004000010090
+ [22715.834759] EDAC sbridge MC3: TSC 0
+ [22715.834759] EDAC sbridge MC3: ADDR 12345000 EDAC sbridge MC3: MISC 144780c86
+ [22715.834759] EDAC sbridge MC3: PROCESSOR 0:306e7 TIME 1422553404 SOCKET 0 APIC 0
+ [22716.616173] EDAC MC3: 1 CE memory read error on CPU_SrcID#0_Channel#0_DIMM#0 (channel:0 slot:0 page:0x12345 offset:0x0 grain:32 syndrome:0x0 - area:DRAM err_code:0001:0090 socket:0 channel_mask:1 rank:0)
For more information about EINJ, please refer to ACPI specification
version 4.0, section 17.5 and ACPI 5.0, section 18.6.
diff --git a/Documentation/firmware-guide/acpi/apei/output_format.rst b/Documentation/firmware-guide/acpi/apei/output_format.rst
new file mode 100644
index 000000000000..c2e7ebddb529
--- /dev/null
+++ b/Documentation/firmware-guide/acpi/apei/output_format.rst
@@ -0,0 +1,150 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==================
+APEI output format
+==================
+
+APEI uses printk as hardware error reporting interface, the output
+format is as follow::
+
+ <error record> :=
+ APEI generic hardware error status
+ severity: <integer>, <severity string>
+ section: <integer>, severity: <integer>, <severity string>
+ flags: <integer>
+ <section flags strings>
+ fru_id: <uuid string>
+ fru_text: <string>
+ section_type: <section type string>
+ <section data>
+
+ <severity string>* := recoverable | fatal | corrected | info
+
+ <section flags strings># :=
+ [primary][, containment warning][, reset][, threshold exceeded]\
+ [, resource not accessible][, latent error]
+
+ <section type string> := generic processor error | memory error | \
+ PCIe error | unknown, <uuid string>
+
+ <section data> :=
+ <generic processor section data> | <memory section data> | \
+ <pcie section data> | <null>
+
+ <generic processor section data> :=
+ [processor_type: <integer>, <proc type string>]
+ [processor_isa: <integer>, <proc isa string>]
+ [error_type: <integer>
+ <proc error type strings>]
+ [operation: <integer>, <proc operation string>]
+ [flags: <integer>
+ <proc flags strings>]
+ [level: <integer>]
+ [version_info: <integer>]
+ [processor_id: <integer>]
+ [target_address: <integer>]
+ [requestor_id: <integer>]
+ [responder_id: <integer>]
+ [IP: <integer>]
+
+ <proc type string>* := IA32/X64 | IA64
+
+ <proc isa string>* := IA32 | IA64 | X64
+
+ <processor error type strings># :=
+ [cache error][, TLB error][, bus error][, micro-architectural error]
+
+ <proc operation string>* := unknown or generic | data read | data write | \
+ instruction execution
+
+ <proc flags strings># :=
+ [restartable][, precise IP][, overflow][, corrected]
+
+ <memory section data> :=
+ [error_status: <integer>]
+ [physical_address: <integer>]
+ [physical_address_mask: <integer>]
+ [node: <integer>]
+ [card: <integer>]
+ [module: <integer>]
+ [bank: <integer>]
+ [device: <integer>]
+ [row: <integer>]
+ [column: <integer>]
+ [bit_position: <integer>]
+ [requestor_id: <integer>]
+ [responder_id: <integer>]
+ [target_id: <integer>]
+ [error_type: <integer>, <mem error type string>]
+
+ <mem error type string>* :=
+ unknown | no error | single-bit ECC | multi-bit ECC | \
+ single-symbol chipkill ECC | multi-symbol chipkill ECC | master abort | \
+ target abort | parity error | watchdog timeout | invalid address | \
+ mirror Broken | memory sparing | scrub corrected error | \
+ scrub uncorrected error
+
+ <pcie section data> :=
+ [port_type: <integer>, <pcie port type string>]
+ [version: <integer>.<integer>]
+ [command: <integer>, status: <integer>]
+ [device_id: <integer>:<integer>:<integer>.<integer>
+ slot: <integer>
+ secondary_bus: <integer>
+ vendor_id: <integer>, device_id: <integer>
+ class_code: <integer>]
+ [serial number: <integer>, <integer>]
+ [bridge: secondary_status: <integer>, control: <integer>]
+ [aer_status: <integer>, aer_mask: <integer>
+ <aer status string>
+ [aer_uncor_severity: <integer>]
+ aer_layer=<aer layer string>, aer_agent=<aer agent string>
+ aer_tlp_header: <integer> <integer> <integer> <integer>]
+
+ <pcie port type string>* := PCIe end point | legacy PCI end point | \
+ unknown | unknown | root port | upstream switch port | \
+ downstream switch port | PCIe to PCI/PCI-X bridge | \
+ PCI/PCI-X to PCIe bridge | root complex integrated endpoint device | \
+ root complex event collector
+
+ if section severity is fatal or recoverable
+ <aer status string># :=
+ unknown | unknown | unknown | unknown | Data Link Protocol | \
+ unknown | unknown | unknown | unknown | unknown | unknown | unknown | \
+ Poisoned TLP | Flow Control Protocol | Completion Timeout | \
+ Completer Abort | Unexpected Completion | Receiver Overflow | \
+ Malformed TLP | ECRC | Unsupported Request
+ else
+ <aer status string># :=
+ Receiver Error | unknown | unknown | unknown | unknown | unknown | \
+ Bad TLP | Bad DLLP | RELAY_NUM Rollover | unknown | unknown | unknown | \
+ Replay Timer Timeout | Advisory Non-Fatal
+ fi
+
+ <aer layer string> :=
+ Physical Layer | Data Link Layer | Transaction Layer
+
+ <aer agent string> :=
+ Receiver ID | Requester ID | Completer ID | Transmitter ID
+
+Where, [] designate corresponding content is optional
+
+All <field string> description with * has the following format::
+
+ field: <integer>, <field string>
+
+Where value of <integer> should be the position of "string" in <field
+string> description. Otherwise, <field string> will be "unknown".
+
+All <field strings> description with # has the following format::
+
+ field: <integer>
+ <field strings>
+
+Where each string in <fields strings> corresponding to one set bit of
+<integer>. The bit position is the position of "string" in <field
+strings> description.
+
+For more detailed explanation of every field, please refer to UEFI
+specification version 2.3 or later, section Appendix N: Common
+Platform Error Record.
diff --git a/Documentation/acpi/debug.txt b/Documentation/firmware-guide/acpi/debug.rst
index 65bf47c46b6d..1a152dd1d765 100644
--- a/Documentation/acpi/debug.txt
+++ b/Documentation/firmware-guide/acpi/debug.rst
@@ -1,18 +1,21 @@
- ACPI Debug Output
+.. SPDX-License-Identifier: GPL-2.0
+=================
+ACPI Debug Output
+=================
The ACPI CA, the Linux ACPI core, and some ACPI drivers can generate debug
output. This document describes how to use this facility.
Compile-time configuration
---------------------------
+==========================
ACPI debug output is globally enabled by CONFIG_ACPI_DEBUG. If this config
option is turned off, the debug messages are not even built into the
kernel.
Boot- and run-time configuration
---------------------------------
+================================
When CONFIG_ACPI_DEBUG=y, you can select the component and level of messages
you're interested in. At boot-time, use the acpi.debug_layer and
@@ -21,7 +24,7 @@ debug_layer and debug_level files in /sys/module/acpi/parameters/ to control
the debug messages.
debug_layer (component)
------------------------
+=======================
The "debug_layer" is a mask that selects components of interest, e.g., a
specific driver or part of the ACPI interpreter. To build the debug_layer
@@ -33,7 +36,7 @@ to /sys/module/acpi/parameters/debug_layer.
The possible components are defined in include/acpi/acoutput.h and
include/acpi/acpi_drivers.h. Reading /sys/module/acpi/parameters/debug_layer
-shows the supported mask values, currently these:
+shows the supported mask values, currently these::
ACPI_UTILITIES 0x00000001
ACPI_HARDWARE 0x00000002
@@ -65,7 +68,7 @@ shows the supported mask values, currently these:
ACPI_PROCESSOR_COMPONENT 0x20000000
debug_level
------------
+===========
The "debug_level" is a mask that selects different types of messages, e.g.,
those related to initialization, method execution, informational messages, etc.
@@ -81,7 +84,7 @@ to /sys/module/acpi/parameters/debug_level.
The possible levels are defined in include/acpi/acoutput.h. Reading
/sys/module/acpi/parameters/debug_level shows the supported mask values,
-currently these:
+currently these::
ACPI_LV_INIT 0x00000001
ACPI_LV_DEBUG_OBJECT 0x00000002
@@ -113,9 +116,9 @@ currently these:
ACPI_LV_EVENTS 0x80000000
Examples
---------
+========
-For example, drivers/acpi/bus.c contains this:
+For example, drivers/acpi/bus.c contains this::
#define _COMPONENT ACPI_BUS_COMPONENT
...
@@ -127,22 +130,22 @@ statement uses ACPI_DB_INFO, which is macro based on the ACPI_LV_INFO
definition.)
Enable all AML "Debug" output (stores to the Debug object while interpreting
-AML) during boot:
+AML) during boot::
acpi.debug_layer=0xffffffff acpi.debug_level=0x2
-Enable PCI and PCI interrupt routing debug messages:
+Enable PCI and PCI interrupt routing debug messages::
acpi.debug_layer=0x400000 acpi.debug_level=0x4
-Enable all ACPI hardware-related messages:
+Enable all ACPI hardware-related messages::
acpi.debug_layer=0x2 acpi.debug_level=0xffffffff
-Enable all ACPI_DB_INFO messages after boot:
+Enable all ACPI_DB_INFO messages after boot::
# echo 0x4 > /sys/module/acpi/parameters/debug_level
-Show all valid component values:
+Show all valid component values::
# cat /sys/module/acpi/parameters/debug_layer
diff --git a/Documentation/acpi/dsd/data-node-references.txt b/Documentation/firmware-guide/acpi/dsd/data-node-references.rst
index c3871565c8cf..1351984e767c 100644
--- a/Documentation/acpi/dsd/data-node-references.txt
+++ b/Documentation/firmware-guide/acpi/dsd/data-node-references.rst
@@ -1,9 +1,12 @@
-Copyright (C) 2018 Intel Corporation
-Author: Sakari Ailus <sakari.ailus@linux.intel.com>
-
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+===================================
Referencing hierarchical data nodes
------------------------------------
+===================================
+
+:Copyright: |copy| 2018 Intel Corporation
+:Author: Sakari Ailus <sakari.ailus@linux.intel.com>
ACPI in general allows referring to device objects in the tree only.
Hierarchical data extension nodes may not be referred to directly, hence this
@@ -28,13 +31,14 @@ extension key.
Example
--------
+=======
- In the ASL snippet below, the "reference" _DSD property [2] contains a
- device object reference to DEV0 and under that device object, a
- hierarchical data extension key "node@1" referring to the NOD1 object
- and lastly, a hierarchical data extension key "anothernode" referring to
- the ANOD object which is also the final target node of the reference.
+In the ASL snippet below, the "reference" _DSD property [2] contains a
+device object reference to DEV0 and under that device object, a
+hierarchical data extension key "node@1" referring to the NOD1 object
+and lastly, a hierarchical data extension key "anothernode" referring to
+the ANOD object which is also the final target node of the reference.
+::
Device (DEV0)
{
@@ -75,15 +79,15 @@ Example
})
}
-Please also see a graph example in graph.txt .
+Please also see a graph example in :doc:`graph`.
References
-----------
+==========
[1] Hierarchical Data Extension UUID For _DSD.
- <URL:http://www.uefi.org/sites/default/files/resources/_DSD-hierarchical-data-extension-UUID-v1.1.pdf>,
- referenced 2018-07-17.
+<http://www.uefi.org/sites/default/files/resources/_DSD-hierarchical-data-extension-UUID-v1.1.pdf>,
+referenced 2018-07-17.
[2] Device Properties UUID For _DSD.
- <URL:http://www.uefi.org/sites/default/files/resources/_DSD-device-properties-UUID.pdf>,
- referenced 2016-10-04.
+<http://www.uefi.org/sites/default/files/resources/_DSD-device-properties-UUID.pdf>,
+referenced 2016-10-04.
diff --git a/Documentation/acpi/dsd/graph.txt b/Documentation/firmware-guide/acpi/dsd/graph.rst
index b9ce910781dc..e0baed35b037 100644
--- a/Documentation/acpi/dsd/graph.txt
+++ b/Documentation/firmware-guide/acpi/dsd/graph.rst
@@ -1,8 +1,11 @@
-Graphs
+.. SPDX-License-Identifier: GPL-2.0
+======
+Graphs
+======
_DSD
-----
+====
_DSD (Device Specific Data) [7] is a predefined ACPI device
configuration object that can be used to convey information on
@@ -30,7 +33,7 @@ hierarchical data extension array on each depth.
Ports and endpoints
--------------------
+===================
The port and endpoint concepts are very similar to those in Devicetree
[3]. A port represents an interface in a device, and an endpoint
@@ -38,9 +41,9 @@ represents a connection to that interface.
All port nodes are located under the device's "_DSD" node in the hierarchical
data extension tree. The data extension related to each port node must begin
-with "port" and must be followed by the "@" character and the number of the port
-as its key. The target object it refers to should be called "PRTX", where "X" is
-the number of the port. An example of such a package would be:
+with "port" and must be followed by the "@" character and the number of the
+port as its key. The target object it refers to should be called "PRTX", where
+"X" is the number of the port. An example of such a package would be::
Package() { "port@4", PRT4 }
@@ -49,7 +52,7 @@ data extension key of the endpoint nodes must begin with
"endpoint" and must be followed by the "@" character and the number of the
endpoint. The object it refers to should be called "EPXY", where "X" is the
number of the port and "Y" is the number of the endpoint. An example of such a
-package would be:
+package would be::
Package() { "endpoint@0", EP40 }
@@ -62,85 +65,85 @@ of that port shall be zero. Similarly, if a port may only have a single
endpoint, the number of that endpoint shall be zero.
The endpoint reference uses property extension with "remote-endpoint" property
-name followed by a reference in the same package. Such references consist of the
+name followed by a reference in the same package. Such references consist of
the remote device reference, the first package entry of the port data extension
reference under the device and finally the first package entry of the endpoint
-data extension reference under the port. Individual references thus appear as:
+data extension reference under the port. Individual references thus appear as::
Package() { device, "port@X", "endpoint@Y" }
-In the above example, "X" is the number of the port and "Y" is the number of the
-endpoint.
+In the above example, "X" is the number of the port and "Y" is the number of
+the endpoint.
The references to endpoints must be always done both ways, to the
remote endpoint and back from the referred remote endpoint node.
-A simple example of this is show below:
+A simple example of this is show below::
Scope (\_SB.PCI0.I2C2)
{
- Device (CAM0)
- {
- Name (_DSD, Package () {
- ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
- Package () {
- Package () { "compatible", Package () { "nokia,smia" } },
- },
- ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
- Package () {
- Package () { "port@0", PRT0 },
- }
- })
- Name (PRT0, Package() {
- ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
- Package () {
- Package () { "reg", 0 },
- },
- ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
- Package () {
- Package () { "endpoint@0", EP00 },
- }
- })
- Name (EP00, Package() {
- ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
- Package () {
- Package () { "reg", 0 },
- Package () { "remote-endpoint", Package() { \_SB.PCI0.ISP, "port@4", "endpoint@0" } },
- }
- })
- }
+ Device (CAM0)
+ {
+ Name (_DSD, Package () {
+ ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ Package () {
+ Package () { "compatible", Package () { "nokia,smia" } },
+ },
+ ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
+ Package () {
+ Package () { "port@0", PRT0 },
+ }
+ })
+ Name (PRT0, Package() {
+ ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ Package () {
+ Package () { "reg", 0 },
+ },
+ ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
+ Package () {
+ Package () { "endpoint@0", EP00 },
+ }
+ })
+ Name (EP00, Package() {
+ ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ Package () {
+ Package () { "reg", 0 },
+ Package () { "remote-endpoint", Package() { \_SB.PCI0.ISP, "port@4", "endpoint@0" } },
+ }
+ })
+ }
}
Scope (\_SB.PCI0)
{
- Device (ISP)
- {
- Name (_DSD, Package () {
- ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
- Package () {
- Package () { "port@4", PRT4 },
- }
- })
-
- Name (PRT4, Package() {
- ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
- Package () {
- Package () { "reg", 4 }, /* CSI-2 port number */
- },
- ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
- Package () {
- Package () { "endpoint@0", EP40 },
- }
- })
-
- Name (EP40, Package() {
- ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
- Package () {
- Package () { "reg", 0 },
- Package () { "remote-endpoint", Package () { \_SB.PCI0.I2C2.CAM0, "port@0", "endpoint@0" } },
- }
- })
- }
+ Device (ISP)
+ {
+ Name (_DSD, Package () {
+ ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
+ Package () {
+ Package () { "port@4", PRT4 },
+ }
+ })
+
+ Name (PRT4, Package() {
+ ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ Package () {
+ Package () { "reg", 4 }, /* CSI-2 port number */
+ },
+ ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
+ Package () {
+ Package () { "endpoint@0", EP40 },
+ }
+ })
+
+ Name (EP40, Package() {
+ ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ Package () {
+ Package () { "reg", 0 },
+ Package () { "remote-endpoint", Package () { \_SB.PCI0.I2C2.CAM0, "port@0", "endpoint@0" } },
+ }
+ })
+ }
}
Here, the port 0 of the "CAM0" device is connected to the port 4 of
@@ -148,27 +151,27 @@ the "ISP" device and vice versa.
References
-----------
+==========
[1] _DSD (Device Specific Data) Implementation Guide.
- <URL:http://www.uefi.org/sites/default/files/resources/_DSD-implementation-guide-toplevel-1_1.htm>,
+ http://www.uefi.org/sites/default/files/resources/_DSD-implementation-guide-toplevel-1_1.htm,
referenced 2016-10-03.
-[2] Devicetree. <URL:http://www.devicetree.org>, referenced 2016-10-03.
+[2] Devicetree. http://www.devicetree.org, referenced 2016-10-03.
[3] Documentation/devicetree/bindings/graph.txt
[4] Device Properties UUID For _DSD.
- <URL:http://www.uefi.org/sites/default/files/resources/_DSD-device-properties-UUID.pdf>,
+ http://www.uefi.org/sites/default/files/resources/_DSD-device-properties-UUID.pdf,
referenced 2016-10-04.
[5] Hierarchical Data Extension UUID For _DSD.
- <URL:http://www.uefi.org/sites/default/files/resources/_DSD-hierarchical-data-extension-UUID-v1.1.pdf>,
+ http://www.uefi.org/sites/default/files/resources/_DSD-hierarchical-data-extension-UUID-v1.1.pdf,
referenced 2016-10-04.
[6] Advanced Configuration and Power Interface Specification.
- <URL:http://www.uefi.org/sites/default/files/resources/ACPI_6_1.pdf>,
+ http://www.uefi.org/sites/default/files/resources/ACPI_6_1.pdf,
referenced 2016-10-04.
[7] _DSD Device Properties Usage Rules.
- Documentation/acpi/DSD-properties-rules.txt
+ :doc:`../DSD-properties-rules`
diff --git a/Documentation/acpi/enumeration.txt b/Documentation/firmware-guide/acpi/enumeration.rst
index 7bcf9c3d9fbe..6b32b7be8c85 100644
--- a/Documentation/acpi/enumeration.txt
+++ b/Documentation/firmware-guide/acpi/enumeration.rst
@@ -1,5 +1,9 @@
-ACPI based device enumeration
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.. SPDX-License-Identifier: GPL-2.0
+
+=============================
+ACPI Based Device Enumeration
+=============================
+
ACPI 5 introduced a set of new resources (UartTSerialBus, I2cSerialBus,
SpiSerialBus, GpioIo and GpioInt) which can be used in enumerating slave
devices behind serial bus controllers.
@@ -11,12 +15,12 @@ that are accessed through memory-mapped registers.
In order to support this and re-use the existing drivers as much as
possible we decided to do following:
- o Devices that have no bus connector resource are represented as
- platform devices.
+ - Devices that have no bus connector resource are represented as
+ platform devices.
- o Devices behind real busses where there is a connector resource
- are represented as struct spi_device or struct i2c_device
- (standard UARTs are not busses so there is no struct uart_device).
+ - Devices behind real busses where there is a connector resource
+ are represented as struct spi_device or struct i2c_device
+ (standard UARTs are not busses so there is no struct uart_device).
As both ACPI and Device Tree represent a tree of devices (and their
resources) this implementation follows the Device Tree way as much as
@@ -31,7 +35,8 @@ enumerated from ACPI namespace. This handle can be used to extract other
device-specific configuration. There is an example of this below.
Platform bus support
-~~~~~~~~~~~~~~~~~~~~
+====================
+
Since we are using platform devices to represent devices that are not
connected to any physical bus we only need to implement a platform driver
for the device and add supported ACPI IDs. If this same IP-block is used on
@@ -39,7 +44,7 @@ some other non-ACPI platform, the driver might work out of the box or needs
some minor changes.
Adding ACPI support for an existing driver should be pretty
-straightforward. Here is the simplest example:
+straightforward. Here is the simplest example::
#ifdef CONFIG_ACPI
static const struct acpi_device_id mydrv_acpi_match[] = {
@@ -61,12 +66,13 @@ configuring GPIOs it can get its ACPI handle and extract this information
from ACPI tables.
DMA support
-~~~~~~~~~~~
+===========
+
DMA controllers enumerated via ACPI should be registered in the system to
provide generic access to their resources. For example, a driver that would
like to be accessible to slave devices via generic API call
dma_request_slave_channel() must register itself at the end of the probe
-function like this:
+function like this::
err = devm_acpi_dma_controller_register(dev, xlate_func, dw);
/* Handle the error if it's not a case of !CONFIG_ACPI */
@@ -74,7 +80,7 @@ function like this:
and implement custom xlate function if needed (usually acpi_dma_simple_xlate()
is enough) which converts the FixedDMA resource provided by struct
acpi_dma_spec into the corresponding DMA channel. A piece of code for that case
-could look like:
+could look like::
#ifdef CONFIG_ACPI
struct filter_args {
@@ -114,7 +120,7 @@ provided by struct acpi_dma.
Clients must call dma_request_slave_channel() with the string parameter that
corresponds to a specific FixedDMA resource. By default "tx" means the first
entry of the FixedDMA resource array, "rx" means the second entry. The table
-below shows a layout:
+below shows a layout::
Device (I2C0)
{
@@ -138,12 +144,13 @@ acpi_dma_request_slave_chan_by_index() directly and therefore choose the
specific FixedDMA resource by its index.
SPI serial bus support
-~~~~~~~~~~~~~~~~~~~~~~
+======================
+
Slave devices behind SPI bus have SpiSerialBus resource attached to them.
This is extracted automatically by the SPI core and the slave devices are
enumerated once spi_register_master() is called by the bus driver.
-Here is what the ACPI namespace for a SPI slave might look like:
+Here is what the ACPI namespace for a SPI slave might look like::
Device (EEP0)
{
@@ -163,7 +170,7 @@ Here is what the ACPI namespace for a SPI slave might look like:
The SPI device drivers only need to add ACPI IDs in a similar way than with
the platform device drivers. Below is an example where we add ACPI support
-to at25 SPI eeprom driver (this is meant for the above ACPI snippet):
+to at25 SPI eeprom driver (this is meant for the above ACPI snippet)::
#ifdef CONFIG_ACPI
static const struct acpi_device_id at25_acpi_match[] = {
@@ -182,7 +189,7 @@ to at25 SPI eeprom driver (this is meant for the above ACPI snippet):
Note that this driver actually needs more information like page size of the
eeprom etc. but at the time writing this there is no standard way of
-passing those. One idea is to return this in _DSM method like:
+passing those. One idea is to return this in _DSM method like::
Device (EEP0)
{
@@ -202,7 +209,7 @@ passing those. One idea is to return this in _DSM method like:
}
Then the at25 SPI driver can get this configuration by calling _DSM on its
-ACPI handle like:
+ACPI handle like::
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_object_list input;
@@ -220,14 +227,15 @@ ACPI handle like:
kfree(output.pointer);
I2C serial bus support
-~~~~~~~~~~~~~~~~~~~~~~
+======================
+
The slaves behind I2C bus controller only need to add the ACPI IDs like
with the platform and SPI drivers. The I2C core automatically enumerates
any slave devices behind the controller device once the adapter is
registered.
Below is an example of how to add ACPI support to the existing mpu3050
-input driver:
+input driver::
#ifdef CONFIG_ACPI
static const struct acpi_device_id mpu3050_acpi_match[] = {
@@ -251,56 +259,57 @@ input driver:
};
GPIO support
-~~~~~~~~~~~~
+============
+
ACPI 5 introduced two new resources to describe GPIO connections: GpioIo
and GpioInt. These resources can be used to pass GPIO numbers used by
the device to the driver. ACPI 5.1 extended this with _DSD (Device
Specific Data) which made it possible to name the GPIOs among other things.
-For example:
+For example::
-Device (DEV)
-{
- Method (_CRS, 0, NotSerialized)
+ Device (DEV)
{
- Name (SBUF, ResourceTemplate()
+ Method (_CRS, 0, NotSerialized)
{
- ...
- // Used to power on/off the device
- GpioIo (Exclusive, PullDefault, 0x0000, 0x0000,
- IoRestrictionOutputOnly, "\\_SB.PCI0.GPI0",
- 0x00, ResourceConsumer,,)
+ Name (SBUF, ResourceTemplate()
{
- // Pin List
- 0x0055
- }
+ ...
+ // Used to power on/off the device
+ GpioIo (Exclusive, PullDefault, 0x0000, 0x0000,
+ IoRestrictionOutputOnly, "\\_SB.PCI0.GPI0",
+ 0x00, ResourceConsumer,,)
+ {
+ // Pin List
+ 0x0055
+ }
+
+ // Interrupt for the device
+ GpioInt (Edge, ActiveHigh, ExclusiveAndWake, PullNone,
+ 0x0000, "\\_SB.PCI0.GPI0", 0x00, ResourceConsumer,,)
+ {
+ // Pin list
+ 0x0058
+ }
+
+ ...
- // Interrupt for the device
- GpioInt (Edge, ActiveHigh, ExclusiveAndWake, PullNone,
- 0x0000, "\\_SB.PCI0.GPI0", 0x00, ResourceConsumer,,)
- {
- // Pin list
- 0x0058
}
- ...
-
+ Return (SBUF)
}
- Return (SBUF)
- }
-
- // ACPI 5.1 _DSD used for naming the GPIOs
- Name (_DSD, Package ()
- {
- ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
- Package ()
+ // ACPI 5.1 _DSD used for naming the GPIOs
+ Name (_DSD, Package ()
{
- Package () {"power-gpios", Package() {^DEV, 0, 0, 0 }},
- Package () {"irq-gpios", Package() {^DEV, 1, 0, 0 }},
- }
- })
- ...
+ ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ Package ()
+ {
+ Package () {"power-gpios", Package() {^DEV, 0, 0, 0 }},
+ Package () {"irq-gpios", Package() {^DEV, 1, 0, 0 }},
+ }
+ })
+ ...
These GPIO numbers are controller relative and path "\\_SB.PCI0.GPI0"
specifies the path to the controller. In order to use these GPIOs in Linux
@@ -310,7 +319,7 @@ There is a standard GPIO API for that and is documented in
Documentation/gpio/.
In the above example we can get the corresponding two GPIO descriptors with
-a code like this:
+a code like this::
#include <linux/gpio/consumer.h>
...
@@ -334,21 +343,22 @@ See Documentation/acpi/gpio-properties.txt for more information about the
_DSD binding related to GPIOs.
MFD devices
-~~~~~~~~~~~
+===========
+
The MFD devices register their children as platform devices. For the child
devices there needs to be an ACPI handle that they can use to reference
parts of the ACPI namespace that relate to them. In the Linux MFD subsystem
we provide two ways:
- o The children share the parent ACPI handle.
- o The MFD cell can specify the ACPI id of the device.
+ - The children share the parent ACPI handle.
+ - The MFD cell can specify the ACPI id of the device.
For the first case, the MFD drivers do not need to do anything. The
resulting child platform device will have its ACPI_COMPANION() set to point
to the parent device.
If the ACPI namespace has a device that we can match using an ACPI id or ACPI
-adr, the cell should be set like:
+adr, the cell should be set like::
static struct mfd_cell_acpi_match my_subdevice_cell_acpi_match = {
.pnpid = "XYZ0001",
@@ -366,7 +376,8 @@ the MFD device and if found, that ACPI companion device is bound to the
resulting child platform device.
Device Tree namespace link device ID
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+====================================
+
The Device Tree protocol uses device identification based on the "compatible"
property whose value is a string or an array of strings recognized as device
identifiers by drivers and the driver core. The set of all those strings may be
@@ -410,6 +421,32 @@ Specifically, the device IDs returned by _HID and preceding PRP0001 in the _CID
return package will be checked first. Also in that case the bus type the device
will be enumerated to depends on the device ID returned by _HID.
+For example, the following ACPI sample might be used to enumerate an lm75-type
+I2C temperature sensor and match it to the driver using the Device Tree
+namespace link:
+
+ Device (TMP0)
+ {
+ Name (_HID, "PRP0001")
+ Name (_DSD, Package() {
+ ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ Package () {
+ Package (2) { "compatible", "ti,tmp75" },
+ }
+ })
+ Method (_CRS, 0, Serialized)
+ {
+ Name (SBUF, ResourceTemplate ()
+ {
+ I2cSerialBusV2 (0x48, ControllerInitiated,
+ 400000, AddressingMode7Bit,
+ "\\_SB.PCI0.I2C1", 0x00,
+ ResourceConsumer, , Exclusive,)
+ })
+ Return (SBUF)
+ }
+ }
+
It is valid to define device objects with a _HID returning PRP0001 and without
the "compatible" property in the _DSD or a _CID as long as one of their
ancestors provides a _DSD with a valid "compatible" property. Such device
@@ -423,4 +460,4 @@ the _DSD of the device object itself or the _DSD of its ancestor in the
Otherwise, the _DSD itself is regarded as invalid and therefore the "compatible"
property returned by it is meaningless.
-Refer to DSD-properties-rules.txt for more information.
+Refer to :doc:`DSD-properties-rules` for more information.
diff --git a/Documentation/acpi/gpio-properties.txt b/Documentation/firmware-guide/acpi/gpio-properties.rst
index 88c65cb5bf0a..bb6d74f23ee0 100644
--- a/Documentation/acpi/gpio-properties.txt
+++ b/Documentation/firmware-guide/acpi/gpio-properties.rst
@@ -1,5 +1,8 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+======================================
_DSD Device Properties Related to GPIO
---------------------------------------
+======================================
With the release of ACPI 5.1, the _DSD configuration object finally
allows names to be given to GPIOs (and other things as well) returned
@@ -8,7 +11,7 @@ the corresponding GPIO, which is pretty error prone (it depends on
the _CRS output ordering, for example).
With _DSD we can now query GPIOs using a name instead of an integer
-index, like the ASL example below shows:
+index, like the ASL example below shows::
// Bluetooth device with reset and shutdown GPIOs
Device (BTH)
@@ -34,15 +37,19 @@ index, like the ASL example below shows:
})
}
-The format of the supported GPIO property is:
+The format of the supported GPIO property is::
Package () { "name", Package () { ref, index, pin, active_low }}
- ref - The device that has _CRS containing GpioIo()/GpioInt() resources,
- typically this is the device itself (BTH in our case).
- index - Index of the GpioIo()/GpioInt() resource in _CRS starting from zero.
- pin - Pin in the GpioIo()/GpioInt() resource. Typically this is zero.
- active_low - If 1 the GPIO is marked as active_low.
+ref
+ The device that has _CRS containing GpioIo()/GpioInt() resources,
+ typically this is the device itself (BTH in our case).
+index
+ Index of the GpioIo()/GpioInt() resource in _CRS starting from zero.
+pin
+ Pin in the GpioIo()/GpioInt() resource. Typically this is zero.
+active_low
+ If 1 the GPIO is marked as active_low.
Since ACPI GpioIo() resource does not have a field saying whether it is
active low or high, the "active_low" argument can be used here. Setting
@@ -55,7 +62,7 @@ It is possible to leave holes in the array of GPIOs. This is useful in
cases like with SPI host controllers where some chip selects may be
implemented as GPIOs and some as native signals. For example a SPI host
controller can have chip selects 0 and 2 implemented as GPIOs and 1 as
-native:
+native::
Package () {
"cs-gpios",
@@ -67,7 +74,7 @@ native:
}
Other supported properties
---------------------------
+==========================
Following Device Tree compatible device properties are also supported by
_DSD device properties for GPIO controllers:
@@ -78,7 +85,7 @@ _DSD device properties for GPIO controllers:
- input
- line-name
-Example:
+Example::
Name (_DSD, Package () {
// _DSD Hierarchical Properties Extension UUID
@@ -100,7 +107,7 @@ Example:
- gpio-line-names
-Example:
+Example::
Package () {
"gpio-line-names",
@@ -114,7 +121,7 @@ See Documentation/devicetree/bindings/gpio/gpio.txt for more information
about these properties.
ACPI GPIO Mappings Provided by Drivers
---------------------------------------
+======================================
There are systems in which the ACPI tables do not contain _DSD but provide _CRS
with GpioIo()/GpioInt() resources and device drivers still need to work with
@@ -139,16 +146,16 @@ line in that resource starting from zero, and the active-low flag for that line,
respectively, in analogy with the _DSD GPIO property format specified above.
For the example Bluetooth device discussed previously the data structures in
-question would look like this:
+question would look like this::
-static const struct acpi_gpio_params reset_gpio = { 1, 1, false };
-static const struct acpi_gpio_params shutdown_gpio = { 0, 0, false };
+ static const struct acpi_gpio_params reset_gpio = { 1, 1, false };
+ static const struct acpi_gpio_params shutdown_gpio = { 0, 0, false };
-static const struct acpi_gpio_mapping bluetooth_acpi_gpios[] = {
- { "reset-gpios", &reset_gpio, 1 },
- { "shutdown-gpios", &shutdown_gpio, 1 },
- { },
-};
+ static const struct acpi_gpio_mapping bluetooth_acpi_gpios[] = {
+ { "reset-gpios", &reset_gpio, 1 },
+ { "shutdown-gpios", &shutdown_gpio, 1 },
+ { },
+ };
Next, the mapping table needs to be passed as the second argument to
acpi_dev_add_driver_gpios() that will register it with the ACPI device object
@@ -158,12 +165,12 @@ calling acpi_dev_remove_driver_gpios() on the ACPI device object where that
table was previously registered.
Using the _CRS fallback
------------------------
+=======================
If a device does not have _DSD or the driver does not create ACPI GPIO
mapping, the Linux GPIO framework refuses to return any GPIOs. This is
because the driver does not know what it actually gets. For example if we
-have a device like below:
+have a device like below::
Device (BTH)
{
@@ -177,7 +184,7 @@ have a device like below:
})
}
-The driver might expect to get the right GPIO when it does:
+The driver might expect to get the right GPIO when it does::
desc = gpiod_get(dev, "reset", GPIOD_OUT_LOW);
@@ -193,22 +200,25 @@ the ACPI GPIO mapping tables are hardly linked to ACPI ID and certain
objects, as listed in the above chapter, of the device in question.
Getting GPIO descriptor
------------------------
+=======================
+
+There are two main approaches to get GPIO resource from ACPI::
-There are two main approaches to get GPIO resource from ACPI:
- desc = gpiod_get(dev, connection_id, flags);
- desc = gpiod_get_index(dev, connection_id, index, flags);
+ desc = gpiod_get(dev, connection_id, flags);
+ desc = gpiod_get_index(dev, connection_id, index, flags);
We may consider two different cases here, i.e. when connection ID is
provided and otherwise.
-Case 1:
- desc = gpiod_get(dev, "non-null-connection-id", flags);
- desc = gpiod_get_index(dev, "non-null-connection-id", index, flags);
+Case 1::
+
+ desc = gpiod_get(dev, "non-null-connection-id", flags);
+ desc = gpiod_get_index(dev, "non-null-connection-id", index, flags);
+
+Case 2::
-Case 2:
- desc = gpiod_get(dev, NULL, flags);
- desc = gpiod_get_index(dev, NULL, index, flags);
+ desc = gpiod_get(dev, NULL, flags);
+ desc = gpiod_get_index(dev, NULL, index, flags);
Case 1 assumes that corresponding ACPI device description must have
defined device properties and will prevent to getting any GPIO resources
diff --git a/Documentation/firmware-guide/acpi/i2c-muxes.rst b/Documentation/firmware-guide/acpi/i2c-muxes.rst
new file mode 100644
index 000000000000..3a8997ccd7c4
--- /dev/null
+++ b/Documentation/firmware-guide/acpi/i2c-muxes.rst
@@ -0,0 +1,61 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==============
+ACPI I2C Muxes
+==============
+
+Describing an I2C device hierarchy that includes I2C muxes requires an ACPI
+Device () scope per mux channel.
+
+Consider this topology::
+
+ +------+ +------+
+ | SMB1 |-->| MUX0 |--CH00--> i2c client A (0x50)
+ | | | 0x70 |--CH01--> i2c client B (0x50)
+ +------+ +------+
+
+which corresponds to the following ASL::
+
+ Device (SMB1)
+ {
+ Name (_HID, ...)
+ Device (MUX0)
+ {
+ Name (_HID, ...)
+ Name (_CRS, ResourceTemplate () {
+ I2cSerialBus (0x70, ControllerInitiated, I2C_SPEED,
+ AddressingMode7Bit, "^SMB1", 0x00,
+ ResourceConsumer,,)
+ }
+
+ Device (CH00)
+ {
+ Name (_ADR, 0)
+
+ Device (CLIA)
+ {
+ Name (_HID, ...)
+ Name (_CRS, ResourceTemplate () {
+ I2cSerialBus (0x50, ControllerInitiated, I2C_SPEED,
+ AddressingMode7Bit, "^CH00", 0x00,
+ ResourceConsumer,,)
+ }
+ }
+ }
+
+ Device (CH01)
+ {
+ Name (_ADR, 1)
+
+ Device (CLIB)
+ {
+ Name (_HID, ...)
+ Name (_CRS, ResourceTemplate () {
+ I2cSerialBus (0x50, ControllerInitiated, I2C_SPEED,
+ AddressingMode7Bit, "^CH01", 0x00,
+ ResourceConsumer,,)
+ }
+ }
+ }
+ }
+ }
diff --git a/Documentation/firmware-guide/acpi/index.rst b/Documentation/firmware-guide/acpi/index.rst
new file mode 100644
index 000000000000..ae609eec4679
--- /dev/null
+++ b/Documentation/firmware-guide/acpi/index.rst
@@ -0,0 +1,26 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+============
+ACPI Support
+============
+
+.. toctree::
+ :maxdepth: 1
+
+ namespace
+ dsd/graph
+ dsd/data-node-references
+ enumeration
+ osi
+ method-customizing
+ method-tracing
+ DSD-properties-rules
+ debug
+ aml-debugger
+ apei/output_format
+ apei/einj
+ gpio-properties
+ i2c-muxes
+ acpi-lid
+ lpit
+ video_extension
diff --git a/Documentation/acpi/lpit.txt b/Documentation/firmware-guide/acpi/lpit.rst
index b426398d2e97..aca928fab027 100644
--- a/Documentation/acpi/lpit.txt
+++ b/Documentation/firmware-guide/acpi/lpit.rst
@@ -1,3 +1,9 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===========================
+Low Power Idle Table (LPIT)
+===========================
+
To enumerate platform Low Power Idle states, Intel platforms are using
“Low Power Idle Table†(LPIT). More details about this table can be
downloaded from:
@@ -8,13 +14,15 @@ Residencies for each low power state can be read via FFH
On platforms supporting S0ix sleep states, there can be two types of
residencies:
-- CPU PKG C10 (Read via FFH interface)
-- Platform Controller Hub (PCH) SLP_S0 (Read via memory mapped interface)
+
+ - CPU PKG C10 (Read via FFH interface)
+ - Platform Controller Hub (PCH) SLP_S0 (Read via memory mapped interface)
The following attributes are added dynamically to the cpuidle
-sysfs attribute group:
- /sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us
- /sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us
+sysfs attribute group::
+
+ /sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us
+ /sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us
The "low_power_idle_cpu_residency_us" attribute shows time spent
by the CPU package in PKG C10
diff --git a/Documentation/firmware-guide/acpi/method-customizing.rst b/Documentation/firmware-guide/acpi/method-customizing.rst
new file mode 100644
index 000000000000..de3ebcaed4cf
--- /dev/null
+++ b/Documentation/firmware-guide/acpi/method-customizing.rst
@@ -0,0 +1,89 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=======================================
+Linux ACPI Custom Control Method How To
+=======================================
+
+:Author: Zhang Rui <rui.zhang@intel.com>
+
+
+Linux supports customizing ACPI control methods at runtime.
+
+Users can use this to:
+
+1. override an existing method which may not work correctly,
+ or just for debugging purposes.
+2. insert a completely new method in order to create a missing
+ method such as _OFF, _ON, _STA, _INI, etc.
+
+For these cases, it is far simpler to dynamically install a single
+control method rather than override the entire DSDT, because kernel
+rebuild/reboot is not needed and test result can be got in minutes.
+
+.. note::
+
+ - Only ACPI METHOD can be overridden, any other object types like
+ "Device", "OperationRegion", are not recognized. Methods
+ declared inside scope operators are also not supported.
+
+ - The same ACPI control method can be overridden for many times,
+ and it's always the latest one that used by Linux/kernel.
+
+ - To get the ACPI debug object output (Store (AAAA, Debug)),
+ please run::
+
+ echo 1 > /sys/module/acpi/parameters/aml_debug_output
+
+
+1. override an existing method
+==============================
+a) get the ACPI table via ACPI sysfs I/F. e.g. to get the DSDT,
+ just run "cat /sys/firmware/acpi/tables/DSDT > /tmp/dsdt.dat"
+b) disassemble the table by running "iasl -d dsdt.dat".
+c) rewrite the ASL code of the method and save it in a new file,
+d) package the new file (psr.asl) to an ACPI table format.
+ Here is an example of a customized \_SB._AC._PSR method::
+
+ DefinitionBlock ("", "SSDT", 1, "", "", 0x20080715)
+ {
+ Method (\_SB_.AC._PSR, 0, NotSerialized)
+ {
+ Store ("In AC _PSR", Debug)
+ Return (ACON)
+ }
+ }
+
+ Note that the full pathname of the method in ACPI namespace
+ should be used.
+e) assemble the file to generate the AML code of the method.
+ e.g. "iasl -vw 6084 psr.asl" (psr.aml is generated as a result)
+ If parameter "-vw 6084" is not supported by your iASL compiler,
+ please try a newer version.
+f) mount debugfs by "mount -t debugfs none /sys/kernel/debug"
+g) override the old method via the debugfs by running
+ "cat /tmp/psr.aml > /sys/kernel/debug/acpi/custom_method"
+
+2. insert a new method
+======================
+This is easier than overriding an existing method.
+We just need to create the ASL code of the method we want to
+insert and then follow the step c) ~ g) in section 1.
+
+3. undo your changes
+====================
+The "undo" operation is not supported for a new inserted method
+right now, i.e. we can not remove a method currently.
+For an overridden method, in order to undo your changes, please
+save a copy of the method original ASL code in step c) section 1,
+and redo step c) ~ g) to override the method with the original one.
+
+
+.. note:: We can use a kernel with multiple custom ACPI method running,
+ But each individual write to debugfs can implement a SINGLE
+ method override. i.e. if we want to insert/override multiple
+ ACPI methods, we need to redo step c) ~ g) for multiple times.
+
+.. note:: Be aware that root can mis-use this driver to modify arbitrary
+ memory and gain additional rights, if root's privileges got
+ restricted (for example if root is not allowed to load additional
+ modules after boot).
diff --git a/Documentation/firmware-guide/acpi/method-tracing.rst b/Documentation/firmware-guide/acpi/method-tracing.rst
new file mode 100644
index 000000000000..d0b077b73f5f
--- /dev/null
+++ b/Documentation/firmware-guide/acpi/method-tracing.rst
@@ -0,0 +1,238 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+=====================
+ACPICA Trace Facility
+=====================
+
+:Copyright: |copy| 2015, Intel Corporation
+:Author: Lv Zheng <lv.zheng@intel.com>
+
+
+Abstract
+========
+This document describes the functions and the interfaces of the
+method tracing facility.
+
+Functionalities and usage examples
+==================================
+
+ACPICA provides method tracing capability. And two functions are
+currently implemented using this capability.
+
+Log reducer
+-----------
+
+ACPICA subsystem provides debugging outputs when CONFIG_ACPI_DEBUG is
+enabled. The debugging messages which are deployed via
+ACPI_DEBUG_PRINT() macro can be reduced at 2 levels - per-component
+level (known as debug layer, configured via
+/sys/module/acpi/parameters/debug_layer) and per-type level (known as
+debug level, configured via /sys/module/acpi/parameters/debug_level).
+
+But when the particular layer/level is applied to the control method
+evaluations, the quantity of the debugging outputs may still be too
+large to be put into the kernel log buffer. The idea thus is worked out
+to only enable the particular debug layer/level (normally more detailed)
+logs when the control method evaluation is started, and disable the
+detailed logging when the control method evaluation is stopped.
+
+The following command examples illustrate the usage of the "log reducer"
+functionality:
+
+a. Filter out the debug layer/level matched logs when control methods
+ are being evaluated::
+
+ # cd /sys/module/acpi/parameters
+ # echo "0xXXXXXXXX" > trace_debug_layer
+ # echo "0xYYYYYYYY" > trace_debug_level
+ # echo "enable" > trace_state
+
+b. Filter out the debug layer/level matched logs when the specified
+ control method is being evaluated::
+
+ # cd /sys/module/acpi/parameters
+ # echo "0xXXXXXXXX" > trace_debug_layer
+ # echo "0xYYYYYYYY" > trace_debug_level
+ # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
+ # echo "method" > /sys/module/acpi/parameters/trace_state
+
+c. Filter out the debug layer/level matched logs when the specified
+ control method is being evaluated for the first time::
+
+ # cd /sys/module/acpi/parameters
+ # echo "0xXXXXXXXX" > trace_debug_layer
+ # echo "0xYYYYYYYY" > trace_debug_level
+ # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
+ # echo "method-once" > /sys/module/acpi/parameters/trace_state
+
+Where:
+ 0xXXXXXXXX/0xYYYYYYYY
+ Refer to Documentation/acpi/debug.txt for possible debug layer/level
+ masking values.
+ \PPPP.AAAA.TTTT.HHHH
+ Full path of a control method that can be found in the ACPI namespace.
+ It needn't be an entry of a control method evaluation.
+
+AML tracer
+----------
+
+There are special log entries added by the method tracing facility at
+the "trace points" the AML interpreter starts/stops to execute a control
+method, or an AML opcode. Note that the format of the log entries are
+subject to change::
+
+ [ 0.186427] exdebug-0398 ex_trace_point : Method Begin [0xf58394d8:\_SB.PCI0.LPCB.ECOK] execution.
+ [ 0.186630] exdebug-0398 ex_trace_point : Opcode Begin [0xf5905c88:If] execution.
+ [ 0.186820] exdebug-0398 ex_trace_point : Opcode Begin [0xf5905cc0:LEqual] execution.
+ [ 0.187010] exdebug-0398 ex_trace_point : Opcode Begin [0xf5905a20:-NamePath-] execution.
+ [ 0.187214] exdebug-0398 ex_trace_point : Opcode End [0xf5905a20:-NamePath-] execution.
+ [ 0.187407] exdebug-0398 ex_trace_point : Opcode Begin [0xf5905f60:One] execution.
+ [ 0.187594] exdebug-0398 ex_trace_point : Opcode End [0xf5905f60:One] execution.
+ [ 0.187789] exdebug-0398 ex_trace_point : Opcode End [0xf5905cc0:LEqual] execution.
+ [ 0.187980] exdebug-0398 ex_trace_point : Opcode Begin [0xf5905cc0:Return] execution.
+ [ 0.188146] exdebug-0398 ex_trace_point : Opcode Begin [0xf5905f60:One] execution.
+ [ 0.188334] exdebug-0398 ex_trace_point : Opcode End [0xf5905f60:One] execution.
+ [ 0.188524] exdebug-0398 ex_trace_point : Opcode End [0xf5905cc0:Return] execution.
+ [ 0.188712] exdebug-0398 ex_trace_point : Opcode End [0xf5905c88:If] execution.
+ [ 0.188903] exdebug-0398 ex_trace_point : Method End [0xf58394d8:\_SB.PCI0.LPCB.ECOK] execution.
+
+Developers can utilize these special log entries to track the AML
+interpretion, thus can aid issue debugging and performance tuning. Note
+that, as the "AML tracer" logs are implemented via ACPI_DEBUG_PRINT()
+macro, CONFIG_ACPI_DEBUG is also required to be enabled for enabling
+"AML tracer" logs.
+
+The following command examples illustrate the usage of the "AML tracer"
+functionality:
+
+a. Filter out the method start/stop "AML tracer" logs when control
+ methods are being evaluated::
+
+ # cd /sys/module/acpi/parameters
+ # echo "0x80" > trace_debug_layer
+ # echo "0x10" > trace_debug_level
+ # echo "enable" > trace_state
+
+b. Filter out the method start/stop "AML tracer" when the specified
+ control method is being evaluated::
+
+ # cd /sys/module/acpi/parameters
+ # echo "0x80" > trace_debug_layer
+ # echo "0x10" > trace_debug_level
+ # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
+ # echo "method" > trace_state
+
+c. Filter out the method start/stop "AML tracer" logs when the specified
+ control method is being evaluated for the first time::
+
+ # cd /sys/module/acpi/parameters
+ # echo "0x80" > trace_debug_layer
+ # echo "0x10" > trace_debug_level
+ # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
+ # echo "method-once" > trace_state
+
+d. Filter out the method/opcode start/stop "AML tracer" when the
+ specified control method is being evaluated::
+
+ # cd /sys/module/acpi/parameters
+ # echo "0x80" > trace_debug_layer
+ # echo "0x10" > trace_debug_level
+ # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
+ # echo "opcode" > trace_state
+
+e. Filter out the method/opcode start/stop "AML tracer" when the
+ specified control method is being evaluated for the first time::
+
+ # cd /sys/module/acpi/parameters
+ # echo "0x80" > trace_debug_layer
+ # echo "0x10" > trace_debug_level
+ # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
+ # echo "opcode-opcode" > trace_state
+
+Note that all above method tracing facility related module parameters can
+be used as the boot parameters, for example::
+
+ acpi.trace_debug_layer=0x80 acpi.trace_debug_level=0x10 \
+ acpi.trace_method_name=\_SB.LID0._LID acpi.trace_state=opcode-once
+
+
+Interface descriptions
+======================
+
+All method tracing functions can be configured via ACPI module
+parameters that are accessible at /sys/module/acpi/parameters/:
+
+trace_method_name
+ The full path of the AML method that the user wants to trace.
+
+ Note that the full path shouldn't contain the trailing "_"s in its
+ name segments but may contain "\" to form an absolute path.
+
+trace_debug_layer
+ The temporary debug_layer used when the tracing feature is enabled.
+
+ Using ACPI_EXECUTER (0x80) by default, which is the debug_layer
+ used to match all "AML tracer" logs.
+
+trace_debug_level
+ The temporary debug_level used when the tracing feature is enabled.
+
+ Using ACPI_LV_TRACE_POINT (0x10) by default, which is the
+ debug_level used to match all "AML tracer" logs.
+
+trace_state
+ The status of the tracing feature.
+
+ Users can enable/disable this debug tracing feature by executing
+ the following command::
+
+ # echo string > /sys/module/acpi/parameters/trace_state
+
+Where "string" should be one of the following:
+
+"disable"
+ Disable the method tracing feature.
+
+"enable"
+ Enable the method tracing feature.
+
+ ACPICA debugging messages matching "trace_debug_layer/trace_debug_level"
+ during any method execution will be logged.
+
+"method"
+ Enable the method tracing feature.
+
+ ACPICA debugging messages matching "trace_debug_layer/trace_debug_level"
+ during method execution of "trace_method_name" will be logged.
+
+"method-once"
+ Enable the method tracing feature.
+
+ ACPICA debugging messages matching "trace_debug_layer/trace_debug_level"
+ during method execution of "trace_method_name" will be logged only once.
+
+"opcode"
+ Enable the method tracing feature.
+
+ ACPICA debugging messages matching "trace_debug_layer/trace_debug_level"
+ during method/opcode execution of "trace_method_name" will be logged.
+
+"opcode-once"
+ Enable the method tracing feature.
+
+ ACPICA debugging messages matching "trace_debug_layer/trace_debug_level"
+ during method/opcode execution of "trace_method_name" will be logged only
+ once.
+
+Note that, the difference between the "enable" and other feature
+enabling options are:
+
+1. When "enable" is specified, since
+ "trace_debug_layer/trace_debug_level" shall apply to all control
+ method evaluations, after configuring "trace_state" to "enable",
+ "trace_method_name" will be reset to NULL.
+2. When "method/opcode" is specified, if
+ "trace_method_name" is NULL when "trace_state" is configured to
+ these options, the "trace_debug_layer/trace_debug_level" will
+ apply to all control method evaluations.
diff --git a/Documentation/acpi/namespace.txt b/Documentation/firmware-guide/acpi/namespace.rst
index 1860cb3865c6..835521baeb89 100644
--- a/Documentation/acpi/namespace.txt
+++ b/Documentation/firmware-guide/acpi/namespace.rst
@@ -1,85 +1,90 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+===================================================
ACPI Device Tree - Representation of ACPI Namespace
+===================================================
-Copyright (C) 2013, Intel Corporation
-Author: Lv Zheng <lv.zheng@intel.com>
+:Copyright: |copy| 2013, Intel Corporation
+:Author: Lv Zheng <lv.zheng@intel.com>
-Abstract:
+:Credit: Thanks for the help from Zhang Rui <rui.zhang@intel.com> and
+ Rafael J.Wysocki <rafael.j.wysocki@intel.com>.
+Abstract
+========
The Linux ACPI subsystem converts ACPI namespace objects into a Linux
device tree under the /sys/devices/LNXSYSTEM:00 and updates it upon
-receiving ACPI hotplug notification events. For each device object in this
-hierarchy there is a corresponding symbolic link in the
+receiving ACPI hotplug notification events. For each device object
+in this hierarchy there is a corresponding symbolic link in the
/sys/bus/acpi/devices.
+
This document illustrates the structure of the ACPI device tree.
+ACPI Definition Blocks
+======================
+
+The ACPI firmware sets up RSDP (Root System Description Pointer) in the
+system memory address space pointing to the XSDT (Extended System
+Description Table). The XSDT always points to the FADT (Fixed ACPI
+Description Table) using its first entry, the data within the FADT
+includes various fixed-length entries that describe fixed ACPI features
+of the hardware. The FADT contains a pointer to the DSDT
+(Differentiated System Descripition Table). The XSDT also contains
+entries pointing to possibly multiple SSDTs (Secondary System
+Description Table).
+
+The DSDT and SSDT data is organized in data structures called definition
+blocks that contain definitions of various objects, including ACPI
+control methods, encoded in AML (ACPI Machine Language). The data block
+of the DSDT along with the contents of SSDTs represents a hierarchical
+data structure called the ACPI namespace whose topology reflects the
+structure of the underlying hardware platform.
+
+The relationships between ACPI System Definition Tables described above
+are illustrated in the following diagram::
+
+ +---------+ +-------+ +--------+ +------------------------+
+ | RSDP | +->| XSDT | +->| FADT | | +-------------------+ |
+ +---------+ | +-------+ | +--------+ +-|->| DSDT | |
+ | Pointer | | | Entry |-+ | ...... | | | +-------------------+ |
+ +---------+ | +-------+ | X_DSDT |--+ | | Definition Blocks | |
+ | Pointer |-+ | ..... | | ...... | | +-------------------+ |
+ +---------+ +-------+ +--------+ | +-------------------+ |
+ | Entry |------------------|->| SSDT | |
+ +- - - -+ | +-------------------| |
+ | Entry | - - - - - - - -+ | | Definition Blocks | |
+ +- - - -+ | | +-------------------+ |
+ | | +- - - - - - - - - -+ |
+ +-|->| SSDT | |
+ | +-------------------+ |
+ | | Definition Blocks | |
+ | +- - - - - - - - - -+ |
+ +------------------------+
+ |
+ OSPM Loading |
+ \|/
+ +----------------+
+ | ACPI Namespace |
+ +----------------+
+
+ Figure 1. ACPI Definition Blocks
+
+.. note:: RSDP can also contain a pointer to the RSDT (Root System
+ Description Table). Platforms provide RSDT to enable
+ compatibility with ACPI 1.0 operating systems. The OS is expected
+ to use XSDT, if present.
+
+
+Example ACPI Namespace
+======================
+
+All definition blocks are loaded into a single namespace. The namespace
+is a hierarchy of objects identified by names and paths.
+The following naming conventions apply to object names in the ACPI
+namespace:
-Credit:
-
-Thanks for the help from Zhang Rui <rui.zhang@intel.com> and Rafael J.
-Wysocki <rafael.j.wysocki@intel.com>.
-
-
-1. ACPI Definition Blocks
-
- The ACPI firmware sets up RSDP (Root System Description Pointer) in the
- system memory address space pointing to the XSDT (Extended System
- Description Table). The XSDT always points to the FADT (Fixed ACPI
- Description Table) using its first entry, the data within the FADT
- includes various fixed-length entries that describe fixed ACPI features
- of the hardware. The FADT contains a pointer to the DSDT
- (Differentiated System Descripition Table). The XSDT also contains
- entries pointing to possibly multiple SSDTs (Secondary System
- Description Table).
-
- The DSDT and SSDT data is organized in data structures called definition
- blocks that contain definitions of various objects, including ACPI
- control methods, encoded in AML (ACPI Machine Language). The data block
- of the DSDT along with the contents of SSDTs represents a hierarchical
- data structure called the ACPI namespace whose topology reflects the
- structure of the underlying hardware platform.
-
- The relationships between ACPI System Definition Tables described above
- are illustrated in the following diagram.
-
- +---------+ +-------+ +--------+ +------------------------+
- | RSDP | +->| XSDT | +->| FADT | | +-------------------+ |
- +---------+ | +-------+ | +--------+ +-|->| DSDT | |
- | Pointer | | | Entry |-+ | ...... | | | +-------------------+ |
- +---------+ | +-------+ | X_DSDT |--+ | | Definition Blocks | |
- | Pointer |-+ | ..... | | ...... | | +-------------------+ |
- +---------+ +-------+ +--------+ | +-------------------+ |
- | Entry |------------------|->| SSDT | |
- +- - - -+ | +-------------------| |
- | Entry | - - - - - - - -+ | | Definition Blocks | |
- +- - - -+ | | +-------------------+ |
- | | +- - - - - - - - - -+ |
- +-|->| SSDT | |
- | +-------------------+ |
- | | Definition Blocks | |
- | +- - - - - - - - - -+ |
- +------------------------+
- |
- OSPM Loading |
- \|/
- +----------------+
- | ACPI Namespace |
- +----------------+
-
- Figure 1. ACPI Definition Blocks
-
- NOTE: RSDP can also contain a pointer to the RSDT (Root System
- Description Table). Platforms provide RSDT to enable
- compatibility with ACPI 1.0 operating systems. The OS is expected
- to use XSDT, if present.
-
-
-2. Example ACPI Namespace
-
- All definition blocks are loaded into a single namespace. The namespace
- is a hierarchy of objects identified by names and paths.
- The following naming conventions apply to object names in the ACPI
- namespace:
1. All names are 32 bits long.
2. The first byte of a name must be one of 'A' - 'Z', '_'.
3. Each of the remaining bytes of a name must be one of 'A' - 'Z', '0'
@@ -91,7 +96,7 @@ Wysocki <rafael.j.wysocki@intel.com>.
(i.e. names prepended with '^' are relative to the parent of the
current namespace node).
- The figure below shows an example ACPI namespace.
+The figure below shows an example ACPI namespace::
+------+
| \ | Root
@@ -184,19 +189,20 @@ Wysocki <rafael.j.wysocki@intel.com>.
Figure 2. Example ACPI Namespace
-3. Linux ACPI Device Objects
+Linux ACPI Device Objects
+=========================
- The Linux kernel's core ACPI subsystem creates struct acpi_device
- objects for ACPI namespace objects representing devices, power resources
- processors, thermal zones. Those objects are exported to user space via
- sysfs as directories in the subtree under /sys/devices/LNXSYSTM:00. The
- format of their names is <bus_id:instance>, where 'bus_id' refers to the
- ACPI namespace representation of the given object and 'instance' is used
- for distinguishing different object of the same 'bus_id' (it is
- two-digit decimal representation of an unsigned integer).
+The Linux kernel's core ACPI subsystem creates struct acpi_device
+objects for ACPI namespace objects representing devices, power resources
+processors, thermal zones. Those objects are exported to user space via
+sysfs as directories in the subtree under /sys/devices/LNXSYSTM:00. The
+format of their names is <bus_id:instance>, where 'bus_id' refers to the
+ACPI namespace representation of the given object and 'instance' is used
+for distinguishing different object of the same 'bus_id' (it is
+two-digit decimal representation of an unsigned integer).
- The value of 'bus_id' depends on the type of the object whose name it is
- part of as listed in the table below.
+The value of 'bus_id' depends on the type of the object whose name it is
+part of as listed in the table below::
+---+-----------------+-------+----------+
| | Object/Feature | Table | bus_id |
@@ -226,10 +232,11 @@ Wysocki <rafael.j.wysocki@intel.com>.
Table 1. ACPI Namespace Objects Mapping
- The following rules apply when creating struct acpi_device objects on
- the basis of the contents of ACPI System Description Tables (as
- indicated by the letter in the first column and the notation in the
- second column of the table above):
+The following rules apply when creating struct acpi_device objects on
+the basis of the contents of ACPI System Description Tables (as
+indicated by the letter in the first column and the notation in the
+second column of the table above):
+
N:
The object's source is an ACPI namespace node (as indicated by the
named object's type in the second column). In that case the object's
@@ -249,13 +256,14 @@ Wysocki <rafael.j.wysocki@intel.com>.
struct acpi_device object with LNXVIDEO 'bus_id' will be created for
it.
- The third column of the above table indicates which ACPI System
- Description Tables contain information used for the creation of the
- struct acpi_device objects represented by the given row (xSDT means DSDT
- or SSDT).
+The third column of the above table indicates which ACPI System
+Description Tables contain information used for the creation of the
+struct acpi_device objects represented by the given row (xSDT means DSDT
+or SSDT).
+
+The forth column of the above table indicates the 'bus_id' generation
+rule of the struct acpi_device object:
- The forth column of the above table indicates the 'bus_id' generation
- rule of the struct acpi_device object:
_HID:
_HID in the last column of the table means that the object's bus_id
is derived from the _HID/_CID identification objects present under
@@ -275,45 +283,47 @@ Wysocki <rafael.j.wysocki@intel.com>.
object's bus_id.
-4. Linux ACPI Physical Device Glue
-
- ACPI device (i.e. struct acpi_device) objects may be linked to other
- objects in the Linux' device hierarchy that represent "physical" devices
- (for example, devices on the PCI bus). If that happens, it means that
- the ACPI device object is a "companion" of a device otherwise
- represented in a different way and is used (1) to provide configuration
- information on that device which cannot be obtained by other means and
- (2) to do specific things to the device with the help of its ACPI
- control methods. One ACPI device object may be linked this way to
- multiple "physical" devices.
-
- If an ACPI device object is linked to a "physical" device, its sysfs
- directory contains the "physical_node" symbolic link to the sysfs
- directory of the target device object. In turn, the target device's
- sysfs directory will then contain the "firmware_node" symbolic link to
- the sysfs directory of the companion ACPI device object.
- The linking mechanism relies on device identification provided by the
- ACPI namespace. For example, if there's an ACPI namespace object
- representing a PCI device (i.e. a device object under an ACPI namespace
- object representing a PCI bridge) whose _ADR returns 0x00020000 and the
- bus number of the parent PCI bridge is 0, the sysfs directory
- representing the struct acpi_device object created for that ACPI
- namespace object will contain the 'physical_node' symbolic link to the
- /sys/devices/pci0000:00/0000:00:02:0/ sysfs directory of the
- corresponding PCI device.
-
- The linking mechanism is generally bus-specific. The core of its
- implementation is located in the drivers/acpi/glue.c file, but there are
- complementary parts depending on the bus types in question located
- elsewhere. For example, the PCI-specific part of it is located in
- drivers/pci/pci-acpi.c.
-
-
-5. Example Linux ACPI Device Tree
-
- The sysfs hierarchy of struct acpi_device objects corresponding to the
- example ACPI namespace illustrated in Figure 2 with the addition of
- fixed PWR_BUTTON/SLP_BUTTON devices is shown below.
+Linux ACPI Physical Device Glue
+===============================
+
+ACPI device (i.e. struct acpi_device) objects may be linked to other
+objects in the Linux' device hierarchy that represent "physical" devices
+(for example, devices on the PCI bus). If that happens, it means that
+the ACPI device object is a "companion" of a device otherwise
+represented in a different way and is used (1) to provide configuration
+information on that device which cannot be obtained by other means and
+(2) to do specific things to the device with the help of its ACPI
+control methods. One ACPI device object may be linked this way to
+multiple "physical" devices.
+
+If an ACPI device object is linked to a "physical" device, its sysfs
+directory contains the "physical_node" symbolic link to the sysfs
+directory of the target device object. In turn, the target device's
+sysfs directory will then contain the "firmware_node" symbolic link to
+the sysfs directory of the companion ACPI device object.
+The linking mechanism relies on device identification provided by the
+ACPI namespace. For example, if there's an ACPI namespace object
+representing a PCI device (i.e. a device object under an ACPI namespace
+object representing a PCI bridge) whose _ADR returns 0x00020000 and the
+bus number of the parent PCI bridge is 0, the sysfs directory
+representing the struct acpi_device object created for that ACPI
+namespace object will contain the 'physical_node' symbolic link to the
+/sys/devices/pci0000:00/0000:00:02:0/ sysfs directory of the
+corresponding PCI device.
+
+The linking mechanism is generally bus-specific. The core of its
+implementation is located in the drivers/acpi/glue.c file, but there are
+complementary parts depending on the bus types in question located
+elsewhere. For example, the PCI-specific part of it is located in
+drivers/pci/pci-acpi.c.
+
+
+Example Linux ACPI Device Tree
+=================================
+
+The sysfs hierarchy of struct acpi_device objects corresponding to the
+example ACPI namespace illustrated in Figure 2 with the addition of
+fixed PWR_BUTTON/SLP_BUTTON devices is shown below::
+--------------+---+-----------------+
| LNXSYSTEM:00 | \ | acpi:LNXSYSTEM: |
@@ -377,12 +387,14 @@ Wysocki <rafael.j.wysocki@intel.com>.
Figure 3. Example Linux ACPI Device Tree
- NOTE: Each node is represented as "object/path/modalias", where:
- 1. 'object' is the name of the object's directory in sysfs.
- 2. 'path' is the ACPI namespace path of the corresponding
- ACPI namespace object, as returned by the object's 'path'
- sysfs attribute.
- 3. 'modalias' is the value of the object's 'modalias' sysfs
- attribute (as described earlier in this document).
- NOTE: N/A indicates the device object does not have the 'path' or the
- 'modalias' attribute.
+.. note:: Each node is represented as "object/path/modalias", where:
+
+ 1. 'object' is the name of the object's directory in sysfs.
+ 2. 'path' is the ACPI namespace path of the corresponding
+ ACPI namespace object, as returned by the object's 'path'
+ sysfs attribute.
+ 3. 'modalias' is the value of the object's 'modalias' sysfs
+ attribute (as described earlier in this document).
+
+.. note:: N/A indicates the device object does not have the 'path' or the
+ 'modalias' attribute.
diff --git a/Documentation/acpi/osi.txt b/Documentation/firmware-guide/acpi/osi.rst
index 50cde0ceb9b0..29e9ef79ebc0 100644
--- a/Documentation/acpi/osi.txt
+++ b/Documentation/firmware-guide/acpi/osi.rst
@@ -1,5 +1,8 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==========================
ACPI _OSI and _REV methods
---------------------------
+==========================
An ACPI BIOS can use the "Operating System Interfaces" method (_OSI)
to find out what the operating system supports. Eg. If BIOS
@@ -14,7 +17,7 @@ This document explains how and why the BIOS and Linux should use these methods.
It also explains how and why they are widely misused.
How to use _OSI
----------------
+===============
Linux runs on two groups of machines -- those that are tested by the OEM
to be compatible with Linux, and those that were never tested with Linux,
@@ -62,7 +65,7 @@ the string when that support is added to the kernel.
That was easy. Read on, to find out how to do it wrong.
Before _OSI, there was _OS
---------------------------
+==========================
ACPI 1.0 specified "_OS" as an
"object that evaluates to a string that identifies the operating system."
@@ -96,7 +99,7 @@ That is the *only* viable strategy, as that is what modern Windows does,
and so doing otherwise could steer the BIOS down an untested path.
_OSI is born, and immediately misused
---------------------------------------
+=====================================
With _OSI, the *BIOS* provides the string describing an interface,
and asks the OS: "YES/NO, are you compatible with this interface?"
@@ -144,7 +147,7 @@ catastrophic failure resulting from the BIOS taking paths that
were never validated under *any* OS.
Do not use _REV
----------------
+===============
Since _OSI("Linux") went away, some BIOS writers used _REV
to support Linux and Windows differences in the same BIOS.
@@ -164,7 +167,7 @@ from mid-2015 onward. The ACPI specification will also be updated
to reflect that _REV is deprecated, and always returns 2.
Apple Mac and _OSI("Darwin")
-----------------------------
+============================
On Apple's Mac platforms, the ACPI BIOS invokes _OSI("Darwin")
to determine if the machine is running Apple OSX.
diff --git a/Documentation/acpi/video_extension.txt b/Documentation/firmware-guide/acpi/video_extension.rst
index 79bf6a4921be..099b8607e07b 100644
--- a/Documentation/acpi/video_extension.txt
+++ b/Documentation/firmware-guide/acpi/video_extension.rst
@@ -1,5 +1,8 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=====================
ACPI video extensions
-~~~~~~~~~~~~~~~~~~~~~
+=====================
This driver implement the ACPI Extensions For Display Adapters for
integrated graphics devices on motherboard, as specified in ACPI 2.0
@@ -8,9 +11,10 @@ defining the video POST device, retrieving EDID information or to
setup a video output, etc. Note that this is an ref. implementation
only. It may or may not work for your integrated video device.
-The ACPI video driver does 3 things regarding backlight control:
+The ACPI video driver does 3 things regarding backlight control.
-1 Export a sysfs interface for user space to control backlight level
+Export a sysfs interface for user space to control backlight level
+==================================================================
If the ACPI table has a video device, and acpi_backlight=vendor kernel
command line is not present, the driver will register a backlight device
@@ -22,36 +26,41 @@ The backlight sysfs interface has a standard definition here:
Documentation/ABI/stable/sysfs-class-backlight.
And what ACPI video driver does is:
-actual_brightness: on read, control method _BQC will be evaluated to
-get the brightness level the firmware thinks it is at;
-bl_power: not implemented, will set the current brightness instead;
-brightness: on write, control method _BCM will run to set the requested
-brightness level;
-max_brightness: Derived from the _BCL package(see below);
-type: firmware
+
+actual_brightness:
+ on read, control method _BQC will be evaluated to
+ get the brightness level the firmware thinks it is at;
+bl_power:
+ not implemented, will set the current brightness instead;
+brightness:
+ on write, control method _BCM will run to set the requested brightness level;
+max_brightness:
+ Derived from the _BCL package(see below);
+type:
+ firmware
Note that ACPI video backlight driver will always use index for
brightness, actual_brightness and max_brightness. So if we have
-the following _BCL package:
+the following _BCL package::
-Method (_BCL, 0, NotSerialized)
-{
- Return (Package (0x0C)
+ Method (_BCL, 0, NotSerialized)
{
- 0x64,
- 0x32,
- 0x0A,
- 0x14,
- 0x1E,
- 0x28,
- 0x32,
- 0x3C,
- 0x46,
- 0x50,
- 0x5A,
- 0x64
- })
-}
+ Return (Package (0x0C)
+ {
+ 0x64,
+ 0x32,
+ 0x0A,
+ 0x14,
+ 0x1E,
+ 0x28,
+ 0x32,
+ 0x3C,
+ 0x46,
+ 0x50,
+ 0x5A,
+ 0x64
+ })
+ }
The first two levels are for when laptop are on AC or on battery and are
not used by Linux currently. The remaining 10 levels are supported levels
@@ -62,13 +71,15 @@ as a "brightness level" indicator. Thus from the user space perspective
the range of available brightness levels is from 0 to 9 (max_brightness)
inclusive.
-2 Notify user space about hotkey event
+Notify user space about hotkey event
+====================================
There are generally two cases for hotkey event reporting:
+
i) For some laptops, when user presses the hotkey, a scancode will be
generated and sent to user space through the input device created by
the keyboard driver as a key type input event, with proper remap, the
- following key code will appear to user space:
+ following key code will appear to user space::
EV_KEY, KEY_BRIGHTNESSUP
EV_KEY, KEY_BRIGHTNESSDOWN
@@ -84,23 +95,27 @@ ii) For some laptops, the press of the hotkey will not generate the
notify value it received and send the event to user space through the
input device it created:
+ ===== ==================
event keycode
+ ===== ==================
0x86 KEY_BRIGHTNESSUP
0x87 KEY_BRIGHTNESSDOWN
etc.
+ ===== ==================
so this would lead to the same effect as case i) now.
Once user space tool receives this event, it can modify the backlight
level through the sysfs interface.
-3 Change backlight level in the kernel
+Change backlight level in the kernel
+====================================
This works for machines covered by case ii) in Section 2. Once the driver
received a notification, it will set the backlight level accordingly. This does
not affect the sending of event to user space, they are always sent to user
space regardless of whether or not the video module controls the backlight level
directly. This behaviour can be controlled through the brightness_switch_enabled
-module parameter as documented in admin-guide/kernel-parameters.rst. It is recommended to
-disable this behaviour once a GUI environment starts up and wants to have full
-control of the backlight level.
+module parameter as documented in admin-guide/kernel-parameters.rst. It is
+recommended to disable this behaviour once a GUI environment starts up and
+wants to have full control of the backlight level.
diff --git a/Documentation/firmware-guide/index.rst b/Documentation/firmware-guide/index.rst
new file mode 100644
index 000000000000..5355784ca0a2
--- /dev/null
+++ b/Documentation/firmware-guide/index.rst
@@ -0,0 +1,13 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===============================
+The Linux kernel firmware guide
+===============================
+
+This section describes the ACPI subsystem in Linux from firmware perspective.
+
+.. toctree::
+ :maxdepth: 1
+
+ acpi/index
+
diff --git a/Documentation/flexible-arrays.txt b/Documentation/flexible-arrays.txt
deleted file mode 100644
index a0f2989dd804..000000000000
--- a/Documentation/flexible-arrays.txt
+++ /dev/null
@@ -1,123 +0,0 @@
-===================================
-Using flexible arrays in the kernel
-===================================
-
-:Updated: Last updated for 2.6.32
-:Author: Jonathan Corbet <corbet@lwn.net>
-
-Large contiguous memory allocations can be unreliable in the Linux kernel.
-Kernel programmers will sometimes respond to this problem by allocating
-pages with vmalloc(). This solution not ideal, though. On 32-bit systems,
-memory from vmalloc() must be mapped into a relatively small address space;
-it's easy to run out. On SMP systems, the page table changes required by
-vmalloc() allocations can require expensive cross-processor interrupts on
-all CPUs. And, on all systems, use of space in the vmalloc() range
-increases pressure on the translation lookaside buffer (TLB), reducing the
-performance of the system.
-
-In many cases, the need for memory from vmalloc() can be eliminated by
-piecing together an array from smaller parts; the flexible array library
-exists to make this task easier.
-
-A flexible array holds an arbitrary (within limits) number of fixed-sized
-objects, accessed via an integer index. Sparse arrays are handled
-reasonably well. Only single-page allocations are made, so memory
-allocation failures should be relatively rare. The down sides are that the
-arrays cannot be indexed directly, individual object size cannot exceed the
-system page size, and putting data into a flexible array requires a copy
-operation. It's also worth noting that flexible arrays do no internal
-locking at all; if concurrent access to an array is possible, then the
-caller must arrange for appropriate mutual exclusion.
-
-The creation of a flexible array is done with::
-
- #include <linux/flex_array.h>
-
- struct flex_array *flex_array_alloc(int element_size,
- unsigned int total,
- gfp_t flags);
-
-The individual object size is provided by element_size, while total is the
-maximum number of objects which can be stored in the array. The flags
-argument is passed directly to the internal memory allocation calls. With
-the current code, using flags to ask for high memory is likely to lead to
-notably unpleasant side effects.
-
-It is also possible to define flexible arrays at compile time with::
-
- DEFINE_FLEX_ARRAY(name, element_size, total);
-
-This macro will result in a definition of an array with the given name; the
-element size and total will be checked for validity at compile time.
-
-Storing data into a flexible array is accomplished with a call to::
-
- int flex_array_put(struct flex_array *array, unsigned int element_nr,
- void *src, gfp_t flags);
-
-This call will copy the data from src into the array, in the position
-indicated by element_nr (which must be less than the maximum specified when
-the array was created). If any memory allocations must be performed, flags
-will be used. The return value is zero on success, a negative error code
-otherwise.
-
-There might possibly be a need to store data into a flexible array while
-running in some sort of atomic context; in this situation, sleeping in the
-memory allocator would be a bad thing. That can be avoided by using
-GFP_ATOMIC for the flags value, but, often, there is a better way. The
-trick is to ensure that any needed memory allocations are done before
-entering atomic context, using::
-
- int flex_array_prealloc(struct flex_array *array, unsigned int start,
- unsigned int nr_elements, gfp_t flags);
-
-This function will ensure that memory for the elements indexed in the range
-defined by start and nr_elements has been allocated. Thereafter, a
-flex_array_put() call on an element in that range is guaranteed not to
-block.
-
-Getting data back out of the array is done with::
-
- void *flex_array_get(struct flex_array *fa, unsigned int element_nr);
-
-The return value is a pointer to the data element, or NULL if that
-particular element has never been allocated.
-
-Note that it is possible to get back a valid pointer for an element which
-has never been stored in the array. Memory for array elements is allocated
-one page at a time; a single allocation could provide memory for several
-adjacent elements. Flexible array elements are normally initialized to the
-value FLEX_ARRAY_FREE (defined as 0x6c in <linux/poison.h>), so errors
-involving that number probably result from use of unstored array entries.
-Note that, if array elements are allocated with __GFP_ZERO, they will be
-initialized to zero and this poisoning will not happen.
-
-Individual elements in the array can be cleared with::
-
- int flex_array_clear(struct flex_array *array, unsigned int element_nr);
-
-This function will set the given element to FLEX_ARRAY_FREE and return
-zero. If storage for the indicated element is not allocated for the array,
-flex_array_clear() will return -EINVAL instead. Note that clearing an
-element does not release the storage associated with it; to reduce the
-allocated size of an array, call::
-
- int flex_array_shrink(struct flex_array *array);
-
-The return value will be the number of pages of memory actually freed.
-This function works by scanning the array for pages containing nothing but
-FLEX_ARRAY_FREE bytes, so (1) it can be expensive, and (2) it will not work
-if the array's pages are allocated with __GFP_ZERO.
-
-It is possible to remove all elements of an array with a call to::
-
- void flex_array_free_parts(struct flex_array *array);
-
-This call frees all elements, but leaves the array itself in place.
-Freeing the entire array is done with::
-
- void flex_array_free(struct flex_array *array);
-
-As of this writing, there are no users of flexible arrays in the mainline
-kernel. The functions described here are also not exported to modules;
-that will probably be fixed when somebody comes up with a need for it.
diff --git a/Documentation/hwmon/ab8500 b/Documentation/hwmon/ab8500.rst
index cf169c8ef4e3..33f93a9cec04 100644
--- a/Documentation/hwmon/ab8500
+++ b/Documentation/hwmon/ab8500.rst
@@ -2,19 +2,23 @@ Kernel driver ab8500
====================
Supported chips:
+
* ST-Ericsson AB8500
+
Prefix: 'ab8500'
+
Addresses scanned: -
+
Datasheet: http://www.stericsson.com/developers/documentation.jsp
Authors:
- Martin Persson <martin.persson@stericsson.com>
- Hongbo Zhang <hongbo.zhang@linaro.org>
+ - Martin Persson <martin.persson@stericsson.com>
+ - Hongbo Zhang <hongbo.zhang@linaro.org>
Description
-----------
-See also Documentation/hwmon/abx500. This is the ST-Ericsson AB8500 specific
+See also Documentation/hwmon/abx500.rst. This is the ST-Ericsson AB8500 specific
driver.
Currently only the AB8500 internal sensor and one external sensor for battery
diff --git a/Documentation/hwmon/abituguru b/Documentation/hwmon/abituguru
deleted file mode 100644
index 44013d23b3f0..000000000000
--- a/Documentation/hwmon/abituguru
+++ /dev/null
@@ -1,92 +0,0 @@
-Kernel driver abituguru
-=======================
-
-Supported chips:
- * Abit uGuru revision 1 & 2 (Hardware Monitor part only)
- Prefix: 'abituguru'
- Addresses scanned: ISA 0x0E0
- Datasheet: Not available, this driver is based on reverse engineering.
- A "Datasheet" has been written based on the reverse engineering it
- should be available in the same dir as this file under the name
- abituguru-datasheet.
- Note:
- The uGuru is a microcontroller with onboard firmware which programs
- it to behave as a hwmon IC. There are many different revisions of the
- firmware and thus effectivly many different revisions of the uGuru.
- Below is an incomplete list with which revisions are used for which
- Motherboards:
- uGuru 1.00 ~ 1.24 (AI7, KV8-MAX3, AN7) (1)
- uGuru 2.0.0.0 ~ 2.0.4.2 (KV8-PRO)
- uGuru 2.1.0.0 ~ 2.1.2.8 (AS8, AV8, AA8, AG8, AA8XE, AX8)
- uGuru 2.2.0.0 ~ 2.2.0.6 (AA8 Fatal1ty)
- uGuru 2.3.0.0 ~ 2.3.0.9 (AN8)
- uGuru 3.0.0.0 ~ 3.0.x.x (AW8, AL8, AT8, NI8 SLI, AT8 32X, AN8 32X,
- AW9D-MAX) (2)
- 1) For revisions 2 and 3 uGuru's the driver can autodetect the
- sensortype (Volt or Temp) for bank1 sensors, for revision 1 uGuru's
- this does not always work. For these uGuru's the autodetection can
- be overridden with the bank1_types module param. For all 3 known
- revison 1 motherboards the correct use of this param is:
- bank1_types=1,1,0,0,0,0,0,2,0,0,0,0,2,0,0,1
- You may also need to specify the fan_sensors option for these boards
- fan_sensors=5
- 2) There is a separate abituguru3 driver for these motherboards,
- the abituguru (without the 3 !) driver will not work on these
- motherboards (and visa versa)!
-
-Authors:
- Hans de Goede <j.w.r.degoede@hhs.nl>,
- (Initial reverse engineering done by Olle Sandberg
- <ollebull@gmail.com>)
-
-
-Module Parameters
------------------
-
-* force: bool Force detection. Note this parameter only causes the
- detection to be skipped, and thus the insmod to
- succeed. If the uGuru can't be read the actual hwmon
- driver will not load and thus no hwmon device will get
- registered.
-* bank1_types: int[] Bank1 sensortype autodetection override:
- -1 autodetect (default)
- 0 volt sensor
- 1 temp sensor
- 2 not connected
-* fan_sensors: int Tell the driver how many fan speed sensors there are
- on your motherboard. Default: 0 (autodetect).
-* pwms: int Tell the driver how many fan speed controls (fan
- pwms) your motherboard has. Default: 0 (autodetect).
-* verbose: int How verbose should the driver be? (0-3):
- 0 normal output
- 1 + verbose error reporting
- 2 + sensors type probing info (default)
- 3 + retryable error reporting
- Default: 2 (the driver is still in the testing phase)
-
-Notice if you need any of the first three options above please insmod the
-driver with verbose set to 3 and mail me <j.w.r.degoede@hhs.nl> the output of:
-dmesg | grep abituguru
-
-
-Description
------------
-
-This driver supports the hardware monitoring features of the first and
-second revision of the Abit uGuru chip found on Abit uGuru featuring
-motherboards (most modern Abit motherboards).
-
-The first and second revision of the uGuru chip in reality is a Winbond
-W83L950D in disguise (despite Abit claiming it is "a new microprocessor
-designed by the ABIT Engineers"). Unfortunately this doesn't help since the
-W83L950D is a generic microcontroller with a custom Abit application running
-on it.
-
-Despite Abit not releasing any information regarding the uGuru, Olle
-Sandberg <ollebull@gmail.com> has managed to reverse engineer the sensor part
-of the uGuru. Without his work this driver would not have been possible.
-
-Known Issues
-------------
-
-The voltage and frequency control parts of the Abit uGuru are not supported.
diff --git a/Documentation/hwmon/abituguru-datasheet b/Documentation/hwmon/abituguru-datasheet.rst
index 86c0b1251c81..6d5253e2223b 100644
--- a/Documentation/hwmon/abituguru-datasheet
+++ b/Documentation/hwmon/abituguru-datasheet.rst
@@ -1,3 +1,4 @@
+===============
uGuru datasheet
===============
@@ -168,34 +169,35 @@ This bank contains 0 sensors, iow the sensor address is ignored (but must be
written) just use 0. Bank 0x20 contains 3 bytes:
Byte 0:
-This byte holds the alarm flags for sensor 0-7 of Sensor Bank1, with bit 0
-corresponding to sensor 0, 1 to 1, etc.
+ This byte holds the alarm flags for sensor 0-7 of Sensor Bank1, with bit 0
+ corresponding to sensor 0, 1 to 1, etc.
Byte 1:
-This byte holds the alarm flags for sensor 8-15 of Sensor Bank1, with bit 0
-corresponding to sensor 8, 1 to 9, etc.
+ This byte holds the alarm flags for sensor 8-15 of Sensor Bank1, with bit 0
+ corresponding to sensor 8, 1 to 9, etc.
Byte 2:
-This byte holds the alarm flags for sensor 0-5 of Sensor Bank2, with bit 0
-corresponding to sensor 0, 1 to 1, etc.
+ This byte holds the alarm flags for sensor 0-5 of Sensor Bank2, with bit 0
+ corresponding to sensor 0, 1 to 1, etc.
Bank 0x21 Sensor Bank1 Values / Readings (R)
--------------------------------------------
This bank contains 16 sensors, for each sensor it contains 1 byte.
So far the following sensors are known to be available on all motherboards:
-Sensor 0 CPU temp
-Sensor 1 SYS temp
-Sensor 3 CPU core volt
-Sensor 4 DDR volt
-Sensor 10 DDR Vtt volt
-Sensor 15 PWM temp
+
+- Sensor 0 CPU temp
+- Sensor 1 SYS temp
+- Sensor 3 CPU core volt
+- Sensor 4 DDR volt
+- Sensor 10 DDR Vtt volt
+- Sensor 15 PWM temp
Byte 0:
-This byte holds the reading from the sensor. Sensors in Bank1 can be both
-volt and temp sensors, this is motherboard specific. The uGuru however does
-seem to know (be programmed with) what kindoff sensor is attached see Sensor
-Bank1 Settings description.
+ This byte holds the reading from the sensor. Sensors in Bank1 can be both
+ volt and temp sensors, this is motherboard specific. The uGuru however does
+ seem to know (be programmed with) what kindoff sensor is attached see Sensor
+ Bank1 Settings description.
Volt sensors use a linear scale, a reading 0 corresponds with 0 volt and a
reading of 255 with 3494 mV. The sensors for higher voltages however are
@@ -207,96 +209,118 @@ Temp sensors also use a linear scale, a reading of 0 corresponds with 0 degree
Celsius and a reading of 255 with a reading of 255 degrees Celsius.
-Bank 0x22 Sensor Bank1 Settings (R)
-Bank 0x23 Sensor Bank1 Settings (W)
------------------------------------
+Bank 0x22 Sensor Bank1 Settings (R) and Bank 0x23 Sensor Bank1 Settings (W)
+---------------------------------------------------------------------------
-This bank contains 16 sensors, for each sensor it contains 3 bytes. Each
+Those banks contain 16 sensors, for each sensor it contains 3 bytes. Each
set of 3 bytes contains the settings for the sensor with the same sensor
address in Bank 0x21 .
Byte 0:
-Alarm behaviour for the selected sensor. A 1 enables the described behaviour.
-Bit 0: Give an alarm if measured temp is over the warning threshold (RW) *
-Bit 1: Give an alarm if measured volt is over the max threshold (RW) **
-Bit 2: Give an alarm if measured volt is under the min threshold (RW) **
-Bit 3: Beep if alarm (RW)
-Bit 4: 1 if alarm cause measured temp is over the warning threshold (R)
-Bit 5: 1 if alarm cause measured volt is over the max threshold (R)
-Bit 6: 1 if alarm cause measured volt is under the min threshold (R)
-Bit 7: Volt sensor: Shutdown if alarm persist for more than 4 seconds (RW)
- Temp sensor: Shutdown if temp is over the shutdown threshold (RW)
-
-* This bit is only honored/used by the uGuru if a temp sensor is connected
-** This bit is only honored/used by the uGuru if a volt sensor is connected
-Note with some trickery this can be used to find out what kinda sensor is
-detected see the Linux kernel driver for an example with many comments on
-how todo this.
+ Alarm behaviour for the selected sensor. A 1 enables the described
+ behaviour.
+
+Bit 0:
+ Give an alarm if measured temp is over the warning threshold (RW) [1]_
+
+Bit 1:
+ Give an alarm if measured volt is over the max threshold (RW) [2]_
+
+Bit 2:
+ Give an alarm if measured volt is under the min threshold (RW) [2]_
+
+Bit 3:
+ Beep if alarm (RW)
+
+Bit 4:
+ 1 if alarm cause measured temp is over the warning threshold (R)
+
+Bit 5:
+ 1 if alarm cause measured volt is over the max threshold (R)
+
+Bit 6:
+ 1 if alarm cause measured volt is under the min threshold (R)
+
+Bit 7:
+ - Volt sensor: Shutdown if alarm persist for more than 4 seconds (RW)
+ - Temp sensor: Shutdown if temp is over the shutdown threshold (RW)
+
+.. [1] This bit is only honored/used by the uGuru if a temp sensor is connected
+
+.. [2] This bit is only honored/used by the uGuru if a volt sensor is connected
+ Note with some trickery this can be used to find out what kinda sensor
+ is detected see the Linux kernel driver for an example with many
+ comments on how todo this.
Byte 1:
-Temp sensor: warning threshold (scale as bank 0x21)
-Volt sensor: min threshold (scale as bank 0x21)
+ - Temp sensor: warning threshold (scale as bank 0x21)
+ - Volt sensor: min threshold (scale as bank 0x21)
Byte 2:
-Temp sensor: shutdown threshold (scale as bank 0x21)
-Volt sensor: max threshold (scale as bank 0x21)
+ - Temp sensor: shutdown threshold (scale as bank 0x21)
+ - Volt sensor: max threshold (scale as bank 0x21)
-Bank 0x24 PWM outputs for FAN's (R)
-Bank 0x25 PWM outputs for FAN's (W)
------------------------------------
+Bank 0x24 PWM outputs for FAN's (R) and Bank 0x25 PWM outputs for FAN's (W)
+---------------------------------------------------------------------------
-This bank contains 3 "sensors", for each sensor it contains 5 bytes.
-Sensor 0 usually controls the CPU fan
-Sensor 1 usually controls the NB (or chipset for single chip) fan
-Sensor 2 usually controls the System fan
+Those banks contain 3 "sensors", for each sensor it contains 5 bytes.
+ - Sensor 0 usually controls the CPU fan
+ - Sensor 1 usually controls the NB (or chipset for single chip) fan
+ - Sensor 2 usually controls the System fan
Byte 0:
-Flag 0x80 to enable control, Fan runs at 100% when disabled.
-low nibble (temp)sensor address at bank 0x21 used for control.
+ Flag 0x80 to enable control, Fan runs at 100% when disabled.
+ low nibble (temp)sensor address at bank 0x21 used for control.
Byte 1:
-0-255 = 0-12v (linear), specify voltage at which fan will rotate when under
-low threshold temp (specified in byte 3)
+ 0-255 = 0-12v (linear), specify voltage at which fan will rotate when under
+ low threshold temp (specified in byte 3)
Byte 2:
-0-255 = 0-12v (linear), specify voltage at which fan will rotate when above
-high threshold temp (specified in byte 4)
+ 0-255 = 0-12v (linear), specify voltage at which fan will rotate when above
+ high threshold temp (specified in byte 4)
Byte 3:
-Low threshold temp (scale as bank 0x21)
+ Low threshold temp (scale as bank 0x21)
byte 4:
-High threshold temp (scale as bank 0x21)
+ High threshold temp (scale as bank 0x21)
Bank 0x26 Sensors Bank2 Values / Readings (R)
---------------------------------------------
This bank contains 6 sensors (AFAIK), for each sensor it contains 1 byte.
+
So far the following sensors are known to be available on all motherboards:
-Sensor 0: CPU fan speed
-Sensor 1: NB (or chipset for single chip) fan speed
-Sensor 2: SYS fan speed
+ - Sensor 0: CPU fan speed
+ - Sensor 1: NB (or chipset for single chip) fan speed
+ - Sensor 2: SYS fan speed
Byte 0:
-This byte holds the reading from the sensor. 0-255 = 0-15300 (linear)
+ This byte holds the reading from the sensor. 0-255 = 0-15300 (linear)
-Bank 0x27 Sensors Bank2 Settings (R)
-Bank 0x28 Sensors Bank2 Settings (W)
-------------------------------------
+Bank 0x27 Sensors Bank2 Settings (R) and Bank 0x28 Sensors Bank2 Settings (W)
+-----------------------------------------------------------------------------
-This bank contains 6 sensors (AFAIK), for each sensor it contains 2 bytes.
+Those banks contain 6 sensors (AFAIK), for each sensor it contains 2 bytes.
Byte 0:
-Alarm behaviour for the selected sensor. A 1 enables the described behaviour.
-Bit 0: Give an alarm if measured rpm is under the min threshold (RW)
-Bit 3: Beep if alarm (RW)
-Bit 7: Shutdown if alarm persist for more than 4 seconds (RW)
+ Alarm behaviour for the selected sensor. A 1 enables the described behaviour.
+
+Bit 0:
+ Give an alarm if measured rpm is under the min threshold (RW)
+
+Bit 3:
+ Beep if alarm (RW)
+
+Bit 7:
+ Shutdown if alarm persist for more than 4 seconds (RW)
Byte 1:
-min threshold (scale as bank 0x26)
+ min threshold (scale as bank 0x26)
Warning for the adventurous
diff --git a/Documentation/hwmon/abituguru.rst b/Documentation/hwmon/abituguru.rst
new file mode 100644
index 000000000000..d8243c827de9
--- /dev/null
+++ b/Documentation/hwmon/abituguru.rst
@@ -0,0 +1,113 @@
+Kernel driver abituguru
+=======================
+
+Supported chips:
+
+ * Abit uGuru revision 1 & 2 (Hardware Monitor part only)
+
+ Prefix: 'abituguru'
+
+ Addresses scanned: ISA 0x0E0
+
+ Datasheet: Not available, this driver is based on reverse engineering.
+ A "Datasheet" has been written based on the reverse engineering it
+ should be available in the same dir as this file under the name
+ abituguru-datasheet.
+
+ Note:
+ The uGuru is a microcontroller with onboard firmware which programs
+ it to behave as a hwmon IC. There are many different revisions of the
+ firmware and thus effectivly many different revisions of the uGuru.
+ Below is an incomplete list with which revisions are used for which
+ Motherboards:
+
+ - uGuru 1.00 ~ 1.24 (AI7, KV8-MAX3, AN7) [1]_
+ - uGuru 2.0.0.0 ~ 2.0.4.2 (KV8-PRO)
+ - uGuru 2.1.0.0 ~ 2.1.2.8 (AS8, AV8, AA8, AG8, AA8XE, AX8)
+ - uGuru 2.2.0.0 ~ 2.2.0.6 (AA8 Fatal1ty)
+ - uGuru 2.3.0.0 ~ 2.3.0.9 (AN8)
+ - uGuru 3.0.0.0 ~ 3.0.x.x (AW8, AL8, AT8, NI8 SLI, AT8 32X, AN8 32X,
+ AW9D-MAX) [2]_
+
+.. [1] For revisions 2 and 3 uGuru's the driver can autodetect the
+ sensortype (Volt or Temp) for bank1 sensors, for revision 1 uGuru's
+ this does not always work. For these uGuru's the autodetection can
+ be overridden with the bank1_types module param. For all 3 known
+ revison 1 motherboards the correct use of this param is:
+ bank1_types=1,1,0,0,0,0,0,2,0,0,0,0,2,0,0,1
+ You may also need to specify the fan_sensors option for these boards
+ fan_sensors=5
+
+.. [2] There is a separate abituguru3 driver for these motherboards,
+ the abituguru (without the 3 !) driver will not work on these
+ motherboards (and visa versa)!
+
+Authors:
+ - Hans de Goede <j.w.r.degoede@hhs.nl>,
+ - (Initial reverse engineering done by Olle Sandberg
+ <ollebull@gmail.com>)
+
+
+Module Parameters
+-----------------
+
+* force: bool
+ Force detection. Note this parameter only causes the
+ detection to be skipped, and thus the insmod to
+ succeed. If the uGuru can't be read the actual hwmon
+ driver will not load and thus no hwmon device will get
+ registered.
+* bank1_types: int[]
+ Bank1 sensortype autodetection override:
+
+ * -1 autodetect (default)
+ * 0 volt sensor
+ * 1 temp sensor
+ * 2 not connected
+* fan_sensors: int
+ Tell the driver how many fan speed sensors there are
+ on your motherboard. Default: 0 (autodetect).
+* pwms: int
+ Tell the driver how many fan speed controls (fan
+ pwms) your motherboard has. Default: 0 (autodetect).
+* verbose: int
+ How verbose should the driver be? (0-3):
+
+ * 0 normal output
+ * 1 + verbose error reporting
+ * 2 + sensors type probing info (default)
+ * 3 + retryable error reporting
+
+ Default: 2 (the driver is still in the testing phase)
+
+Notice: if you need any of the first three options above please insmod the
+driver with verbose set to 3 and mail me <j.w.r.degoede@hhs.nl> the output of:
+dmesg | grep abituguru
+
+
+Description
+-----------
+
+This driver supports the hardware monitoring features of the first and
+second revision of the Abit uGuru chip found on Abit uGuru featuring
+motherboards (most modern Abit motherboards).
+
+The first and second revision of the uGuru chip in reality is a Winbond
+W83L950D in disguise (despite Abit claiming it is "a new microprocessor
+designed by the ABIT Engineers"). Unfortunately this doesn't help since the
+W83L950D is a generic microcontroller with a custom Abit application running
+on it.
+
+Despite Abit not releasing any information regarding the uGuru, Olle
+Sandberg <ollebull@gmail.com> has managed to reverse engineer the sensor part
+of the uGuru. Without his work this driver would not have been possible.
+
+Known Issues
+------------
+
+The voltage and frequency control parts of the Abit uGuru are not supported.
+
+.. toctree::
+ :maxdepth: 1
+
+ abituguru-datasheet.rst
diff --git a/Documentation/hwmon/abituguru3 b/Documentation/hwmon/abituguru3.rst
index a6ccfe4bb6aa..514f11f41e8b 100644
--- a/Documentation/hwmon/abituguru3
+++ b/Documentation/hwmon/abituguru3.rst
@@ -3,41 +3,51 @@ Kernel driver abituguru3
Supported chips:
* Abit uGuru revision 3 (Hardware Monitor part, reading only)
+
Prefix: 'abituguru3'
+
Addresses scanned: ISA 0x0E0
+
Datasheet: Not available, this driver is based on reverse engineering.
+
Note:
The uGuru is a microcontroller with onboard firmware which programs
it to behave as a hwmon IC. There are many different revisions of the
firmware and thus effectivly many different revisions of the uGuru.
Below is an incomplete list with which revisions are used for which
Motherboards:
- uGuru 1.00 ~ 1.24 (AI7, KV8-MAX3, AN7)
- uGuru 2.0.0.0 ~ 2.0.4.2 (KV8-PRO)
- uGuru 2.1.0.0 ~ 2.1.2.8 (AS8, AV8, AA8, AG8, AA8XE, AX8)
- uGuru 2.3.0.0 ~ 2.3.0.9 (AN8)
- uGuru 3.0.0.0 ~ 3.0.x.x (AW8, AL8, AT8, NI8 SLI, AT8 32X, AN8 32X,
- AW9D-MAX)
+
+ - uGuru 1.00 ~ 1.24 (AI7, KV8-MAX3, AN7)
+ - uGuru 2.0.0.0 ~ 2.0.4.2 (KV8-PRO)
+ - uGuru 2.1.0.0 ~ 2.1.2.8 (AS8, AV8, AA8, AG8, AA8XE, AX8)
+ - uGuru 2.3.0.0 ~ 2.3.0.9 (AN8)
+ - uGuru 3.0.0.0 ~ 3.0.x.x (AW8, AL8, AT8, NI8 SLI, AT8 32X, AN8 32X,
+ AW9D-MAX)
+
The abituguru3 driver is only for revison 3.0.x.x motherboards,
this driver will not work on older motherboards. For older
motherboards use the abituguru (without the 3 !) driver.
Authors:
- Hans de Goede <j.w.r.degoede@hhs.nl>,
- (Initial reverse engineering done by Louis Kruger)
+ - Hans de Goede <j.w.r.degoede@hhs.nl>,
+ - (Initial reverse engineering done by Louis Kruger)
Module Parameters
-----------------
-* force: bool Force detection. Note this parameter only causes the
+* force: bool
+ Force detection. Note this parameter only causes the
detection to be skipped, and thus the insmod to
succeed. If the uGuru can't be read the actual hwmon
driver will not load and thus no hwmon device will get
registered.
-* verbose: bool Should the driver be verbose?
- 0/off/false normal output
- 1/on/true + verbose error reporting (default)
+* verbose: bool
+ Should the driver be verbose?
+
+ * 0/off/false normal output
+ * 1/on/true + verbose error reporting (default)
+
Default: 1 (the driver is still in the testing phase)
Description
@@ -62,4 +72,4 @@ neither is writing any of the sensor settings and writing / reading the
fanspeed control registers (FanEQ)
If you encounter any problems please mail me <j.w.r.degoede@hhs.nl> and
-include the output of: "dmesg | grep abituguru"
+include the output of: `dmesg | grep abituguru`
diff --git a/Documentation/hwmon/abx500 b/Documentation/hwmon/abx500.rst
index 319a058cec7c..3d88b2ce0f00 100644
--- a/Documentation/hwmon/abx500
+++ b/Documentation/hwmon/abx500.rst
@@ -2,14 +2,18 @@ Kernel driver abx500
====================
Supported chips:
+
* ST-Ericsson ABx500 series
+
Prefix: 'abx500'
+
Addresses scanned: -
+
Datasheet: http://www.stericsson.com/developers/documentation.jsp
Authors:
- Martin Persson <martin.persson@stericsson.com>
- Hongbo Zhang <hongbo.zhang@linaro.org>
+ Martin Persson <martin.persson@stericsson.com>
+ Hongbo Zhang <hongbo.zhang@linaro.org>
Description
-----------
diff --git a/Documentation/hwmon/acpi_power_meter b/Documentation/hwmon/acpi_power_meter.rst
index c80399a00c50..4a0941ade0ca 100644
--- a/Documentation/hwmon/acpi_power_meter
+++ b/Documentation/hwmon/acpi_power_meter.rst
@@ -4,8 +4,11 @@ Kernel driver power_meter
This driver talks to ACPI 4.0 power meters.
Supported systems:
+
* Any recent system with ACPI 4.0.
+
Prefix: 'power_meter'
+
Datasheet: http://acpi.info/, section 10.4.
Author: Darrick J. Wong
@@ -18,26 +21,26 @@ the ACPI 4.0 spec (Chapter 10.4). These devices have a simple set of
features--a power meter that returns average power use over a configurable
interval, an optional capping mechanism, and a couple of trip points. The
sysfs interface conforms with the specification outlined in the "Power" section
-of Documentation/hwmon/sysfs-interface.
+of Documentation/hwmon/sysfs-interface.rst.
Special Features
----------------
-The power[1-*]_is_battery knob indicates if the power supply is a battery.
-Both power[1-*]_average_{min,max} must be set before the trip points will work.
+The `power[1-*]_is_battery` knob indicates if the power supply is a battery.
+Both `power[1-*]_average_{min,max}` must be set before the trip points will work.
When both of them are set, an ACPI event will be broadcast on the ACPI netlink
socket and a poll notification will be sent to the appropriate
-power[1-*]_average sysfs file.
+`power[1-*]_average` sysfs file.
-The power[1-*]_{model_number, serial_number, oem_info} fields display arbitrary
-strings that ACPI provides with the meter. The measures/ directory contains
-symlinks to the devices that this meter measures.
+The `power[1-*]_{model_number, serial_number, oem_info}` fields display
+arbitrary strings that ACPI provides with the meter. The measures/ directory
+contains symlinks to the devices that this meter measures.
Some computers have the ability to enforce a power cap in hardware. If this is
-the case, the power[1-*]_cap and related sysfs files will appear. When the
+the case, the `power[1-*]_cap` and related sysfs files will appear. When the
average power consumption exceeds the cap, an ACPI event will be broadcast on
the netlink event socket and a poll notification will be sent to the
-appropriate power[1-*]_alarm file to indicate that capping has begun, and the
+appropriate `power[1-*]_alarm` file to indicate that capping has begun, and the
hardware has taken action to reduce power consumption. Most likely this will
result in reduced performance.
@@ -46,6 +49,6 @@ all cases the ACPI event will be broadcast on the ACPI netlink event socket as
well as sent as a poll notification to a sysfs file. The events are as
follows:
-power[1-*]_cap will be notified if the firmware changes the power cap.
-power[1-*]_interval will be notified if the firmware changes the averaging
+`power[1-*]_cap` will be notified if the firmware changes the power cap.
+`power[1-*]_interval` will be notified if the firmware changes the averaging
interval.
diff --git a/Documentation/hwmon/ad7314 b/Documentation/hwmon/ad7314.rst
index 1912549c7467..bf389736bcd1 100644
--- a/Documentation/hwmon/ad7314
+++ b/Documentation/hwmon/ad7314.rst
@@ -2,14 +2,23 @@ Kernel driver ad7314
====================
Supported chips:
+
* Analog Devices AD7314
+
Prefix: 'ad7314'
+
Datasheet: Publicly available at Analog Devices website.
+
* Analog Devices ADT7301
+
Prefix: 'adt7301'
+
Datasheet: Publicly available at Analog Devices website.
+
* Analog Devices ADT7302
+
Prefix: 'adt7302'
+
Datasheet: Publicly available at Analog Devices website.
Description
diff --git a/Documentation/hwmon/adc128d818 b/Documentation/hwmon/adc128d818.rst
index 39c95004dabc..6753468932ab 100644
--- a/Documentation/hwmon/adc128d818
+++ b/Documentation/hwmon/adc128d818.rst
@@ -2,11 +2,14 @@ Kernel driver adc128d818
========================
Supported chips:
+
* Texas Instruments ADC818D818
+
Prefix: 'adc818d818'
+
Addresses scanned: I2C 0x1d, 0x1e, 0x1f, 0x2d, 0x2e, 0x2f
- Datasheet: Publicly available at the TI website
- http://www.ti.com/
+
+ Datasheet: Publicly available at the TI website http://www.ti.com/
Author: Guenter Roeck
diff --git a/Documentation/hwmon/adm1021 b/Documentation/hwmon/adm1021.rst
index 02ad96cf9b2b..6cbb0f75fe00 100644
--- a/Documentation/hwmon/adm1021
+++ b/Documentation/hwmon/adm1021.rst
@@ -2,51 +2,91 @@ Kernel driver adm1021
=====================
Supported chips:
+
* Analog Devices ADM1021
+
Prefix: 'adm1021'
+
Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
+
Datasheet: Publicly available at the Analog Devices website
+
* Analog Devices ADM1021A/ADM1023
+
Prefix: 'adm1023'
+
Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
+
Datasheet: Publicly available at the Analog Devices website
+
* Genesys Logic GL523SM
+
Prefix: 'gl523sm'
+
Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
+
Datasheet:
+
* Maxim MAX1617
+
Prefix: 'max1617'
+
Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
+
Datasheet: Publicly available at the Maxim website
+
* Maxim MAX1617A
+
Prefix: 'max1617a'
+
Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
+
Datasheet: Publicly available at the Maxim website
+
* National Semiconductor LM84
+
Prefix: 'lm84'
+
Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
+
Datasheet: Publicly available at the National Semiconductor website
+
* Philips NE1617
+
Prefix: 'max1617' (probably detected as a max1617)
+
Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
+
Datasheet: Publicly available at the Philips website
+
* Philips NE1617A
+
Prefix: 'max1617' (probably detected as a max1617)
+
Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
+
Datasheet: Publicly available at the Philips website
+
* TI THMC10
+
Prefix: 'thmc10'
+
Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
+
Datasheet: Publicly available at the TI website
+
* Onsemi MC1066
+
Prefix: 'mc1066'
+
Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
+
Datasheet: Publicly available at the Onsemi website
Authors:
- Frodo Looijaard <frodol@dds.nl>,
- Philip Edelbrock <phil@netroedge.com>
+ - Frodo Looijaard <frodol@dds.nl>,
+ - Philip Edelbrock <phil@netroedge.com>
Module Parameters
-----------------
diff --git a/Documentation/hwmon/adm1025 b/Documentation/hwmon/adm1025.rst
index 99f05049c68a..283e65e348a5 100644
--- a/Documentation/hwmon/adm1025
+++ b/Documentation/hwmon/adm1025.rst
@@ -2,23 +2,32 @@ Kernel driver adm1025
=====================
Supported chips:
+
* Analog Devices ADM1025, ADM1025A
+
Prefix: 'adm1025'
+
Addresses scanned: I2C 0x2c - 0x2e
+
Datasheet: Publicly available at the Analog Devices website
+
* Philips NE1619
+
Prefix: 'ne1619'
+
Addresses scanned: I2C 0x2c - 0x2d
+
Datasheet: Publicly available at the Philips website
The NE1619 presents some differences with the original ADM1025:
+
* Only two possible addresses (0x2c - 0x2d).
* No temperature offset register, but we don't use it anyway.
* No INT mode for pin 16. We don't play with it anyway.
Authors:
- Chen-Yuan Wu <gwu@esoft.com>,
- Jean Delvare <jdelvare@suse.de>
+ - Chen-Yuan Wu <gwu@esoft.com>,
+ - Jean Delvare <jdelvare@suse.de>
Description
-----------
diff --git a/Documentation/hwmon/adm1026 b/Documentation/hwmon/adm1026.rst
index d8fabe0c23ac..35d63e6498a3 100644
--- a/Documentation/hwmon/adm1026
+++ b/Documentation/hwmon/adm1026.rst
@@ -3,28 +3,36 @@ Kernel driver adm1026
Supported chips:
* Analog Devices ADM1026
+
Prefix: 'adm1026'
+
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+
Datasheet: Publicly available at the Analog Devices website
- http://www.onsemi.com/PowerSolutions/product.do?id=ADM1026
+
+ http://www.onsemi.com/PowerSolutions/product.do?id=ADM1026
Authors:
- Philip Pokorny <ppokorny@penguincomputing.com> for Penguin Computing
- Justin Thiessen <jthiessen@penguincomputing.com>
+ - Philip Pokorny <ppokorny@penguincomputing.com> for Penguin Computing
+ - Justin Thiessen <jthiessen@penguincomputing.com>
Module Parameters
-----------------
* gpio_input: int array (min = 1, max = 17)
- List of GPIO pins (0-16) to program as inputs
+ List of GPIO pins (0-16) to program as inputs
+
* gpio_output: int array (min = 1, max = 17)
- List of GPIO pins (0-16) to program as outputs
+ List of GPIO pins (0-16) to program as outputs
+
* gpio_inverted: int array (min = 1, max = 17)
- List of GPIO pins (0-16) to program as inverted
+ List of GPIO pins (0-16) to program as inverted
+
* gpio_normal: int array (min = 1, max = 17)
- List of GPIO pins (0-16) to program as normal/non-inverted
+ List of GPIO pins (0-16) to program as normal/non-inverted
+
* gpio_fan: int array (min = 1, max = 8)
- List of GPIO pins (0-7) to program as fan tachs
+ List of GPIO pins (0-7) to program as fan tachs
Description
diff --git a/Documentation/hwmon/adm1031 b/Documentation/hwmon/adm1031.rst
index a143117c99cb..a677c3ab5574 100644
--- a/Documentation/hwmon/adm1031
+++ b/Documentation/hwmon/adm1031.rst
@@ -3,20 +3,28 @@ Kernel driver adm1031
Supported chips:
* Analog Devices ADM1030
+
Prefix: 'adm1030'
+
Addresses scanned: I2C 0x2c to 0x2e
+
Datasheet: Publicly available at the Analog Devices website
- http://www.analog.com/en/prod/0%2C2877%2CADM1030%2C00.html
+
+ http://www.analog.com/en/prod/0%2C2877%2CADM1030%2C00.html
* Analog Devices ADM1031
+
Prefix: 'adm1031'
+
Addresses scanned: I2C 0x2c to 0x2e
+
Datasheet: Publicly available at the Analog Devices website
- http://www.analog.com/en/prod/0%2C2877%2CADM1031%2C00.html
+
+ http://www.analog.com/en/prod/0%2C2877%2CADM1031%2C00.html
Authors:
- Alexandre d'Alton <alex@alexdalton.org>
- Jean Delvare <jdelvare@suse.de>
+ - Alexandre d'Alton <alex@alexdalton.org>
+ - Jean Delvare <jdelvare@suse.de>
Description
-----------
diff --git a/Documentation/hwmon/adm1275 b/Documentation/hwmon/adm1275.rst
index 5e277b0d91ce..9a1913e5b4d9 100644
--- a/Documentation/hwmon/adm1275
+++ b/Documentation/hwmon/adm1275.rst
@@ -2,29 +2,53 @@ Kernel driver adm1275
=====================
Supported chips:
+
* Analog Devices ADM1075
+
Prefix: 'adm1075'
+
Addresses scanned: -
+
Datasheet: www.analog.com/static/imported-files/data_sheets/ADM1075.pdf
+
* Analog Devices ADM1272
+
Prefix: 'adm1272'
+
Addresses scanned: -
+
Datasheet: www.analog.com/static/imported-files/data_sheets/ADM1272.pdf
+
* Analog Devices ADM1275
+
Prefix: 'adm1275'
+
Addresses scanned: -
+
Datasheet: www.analog.com/static/imported-files/data_sheets/ADM1275.pdf
+
* Analog Devices ADM1276
+
Prefix: 'adm1276'
+
Addresses scanned: -
+
Datasheet: www.analog.com/static/imported-files/data_sheets/ADM1276.pdf
+
* Analog Devices ADM1278
+
Prefix: 'adm1278'
+
Addresses scanned: -
+
Datasheet: www.analog.com/static/imported-files/data_sheets/ADM1278.pdf
+
* Analog Devices ADM1293/ADM1294
+
Prefix: 'adm1293', 'adm1294'
+
Addresses scanned: -
+
Datasheet: http://www.analog.com/media/en/technical-documentation/data-sheets/ADM1293_1294.pdf
Author: Guenter Roeck <linux@roeck-us.net>
@@ -44,7 +68,7 @@ integrated 12 bit analog-to-digital converter (ADC), accessed using a
PMBus interface.
The driver is a client driver to the core PMBus driver. Please see
-Documentation/hwmon/pmbus for details on PMBus client drivers.
+Documentation/hwmon/pmbus.rst for details on PMBus client drivers.
Usage Notes
@@ -66,7 +90,7 @@ Platform data support
---------------------
The driver supports standard PMBus driver platform data. Please see
-Documentation/hwmon/pmbus for details.
+Documentation/hwmon/pmbus.rst for details.
Sysfs entries
@@ -75,6 +99,7 @@ Sysfs entries
The following attributes are supported. Limits are read-write, history reset
attributes are write-only, all other attributes are read-only.
+======================= =======================================================
inX_label "vin1" or "vout1" depending on chip variant and
configuration. On ADM1075, ADM1293, and ADM1294,
vout1 reports the voltage on the VAUX pin.
@@ -120,3 +145,4 @@ temp1_reset_history Write any value to reset history.
Temperature attributes are supported on ADM1272 and
ADM1278.
+======================= =======================================================
diff --git a/Documentation/hwmon/adm9240 b/Documentation/hwmon/adm9240.rst
index 9b174fc700cc..91063b0f4c6f 100644
--- a/Documentation/hwmon/adm9240
+++ b/Documentation/hwmon/adm9240.rst
@@ -2,30 +2,43 @@ Kernel driver adm9240
=====================
Supported chips:
+
* Analog Devices ADM9240
+
Prefix: 'adm9240'
+
Addresses scanned: I2C 0x2c - 0x2f
+
Datasheet: Publicly available at the Analog Devices website
- http://www.analog.com/UploadedFiles/Data_Sheets/79857778ADM9240_0.pdf
+
+ http://www.analog.com/UploadedFiles/Data_Sheets/79857778ADM9240_0.pdf
* Dallas Semiconductor DS1780
+
Prefix: 'ds1780'
+
Addresses scanned: I2C 0x2c - 0x2f
+
Datasheet: Publicly available at the Dallas Semiconductor (Maxim) website
- http://pdfserv.maxim-ic.com/en/ds/DS1780.pdf
+
+ http://pdfserv.maxim-ic.com/en/ds/DS1780.pdf
* National Semiconductor LM81
+
Prefix: 'lm81'
+
Addresses scanned: I2C 0x2c - 0x2f
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/ds.cgi/LM/LM81.pdf
+
+ http://www.national.com/ds.cgi/LM/LM81.pdf
Authors:
- Frodo Looijaard <frodol@dds.nl>,
- Philip Edelbrock <phil@netroedge.com>,
- Michiel Rook <michiel@grendelproject.nl>,
- Grant Coady <gcoady.lk@gmail.com> with guidance
- from Jean Delvare <jdelvare@suse.de>
+ - Frodo Looijaard <frodol@dds.nl>,
+ - Philip Edelbrock <phil@netroedge.com>,
+ - Michiel Rook <michiel@grendelproject.nl>,
+ - Grant Coady <gcoady.lk@gmail.com> with guidance
+ from Jean Delvare <jdelvare@suse.de>
Interface
---------
@@ -87,11 +100,13 @@ rpm = (22500 * 60) / (count * divider)
Automatic fan clock divider
* User sets 0 to fan_min limit
+
- low speed alarm is disabled
- fan clock divider not changed
- auto fan clock adjuster enabled for valid fan speed reading
* User sets fan_min limit too low
+
- low speed alarm is enabled
- fan clock divider set to max
- fan_min set to register value 254 which corresponds
@@ -101,18 +116,20 @@ Automatic fan clock divider
- auto fan clock adjuster disabled
* User sets reasonable fan speed
+
- low speed alarm is enabled
- fan clock divider set to suit fan_min
- auto fan clock adjuster enabled: adjusts fan_min
* User sets unreasonably high low fan speed limit
+
- resolution of the low speed limit may be reduced
- alarm will be asserted
- auto fan clock adjuster enabled: adjusts fan_min
- * fan speed may be displayed as zero until the auto fan clock divider
- adjuster brings fan speed clock divider back into chip measurement
- range, this will occur within a few measurement cycles.
+ * fan speed may be displayed as zero until the auto fan clock divider
+ adjuster brings fan speed clock divider back into chip measurement
+ range, this will occur within a few measurement cycles.
Analog Output
-------------
@@ -122,16 +139,21 @@ power up or reset. This doesn't do much on the test Intel SE440BX-2.
Voltage Monitor
+^^^^^^^^^^^^^^^
+
Voltage (IN) measurement is internally scaled:
+ === =========== =========== ========= ==========
nr label nominal maximum resolution
- mV mV mV
+ mV mV mV
+ === =========== =========== ========= ==========
0 +2.5V 2500 3320 13.0
1 Vccp1 2700 3600 14.1
2 +3.3V 3300 4380 17.2
3 +5V 5000 6640 26.0
4 +12V 12000 15940 62.5
5 Vccp2 2700 3600 14.1
+ === =========== =========== ========= ==========
The reading is an unsigned 8-bit value, nominal voltage measurement is
represented by a reading of 192, being 3/4 of the measurement range.
@@ -159,8 +181,9 @@ Clear the CI latch by writing value 0 to the sysfs intrusion0_alarm file.
Alarm flags reported as 16-bit word
+ === ============= ==========================
bit label comment
- --- ------------- --------------------------
+ === ============= ==========================
0 +2.5 V_Error high or low limit exceeded
1 VCCP_Error high or low limit exceeded
2 +3.3 V_Error high or low limit exceeded
@@ -171,6 +194,7 @@ Alarm flags reported as 16-bit word
8 +12 V_Error high or low limit exceeded
9 VCCP2_Error high or low limit exceeded
12 Chassis_Error CI pin went high
+ === ============= ==========================
Remaining bits are reserved and thus undefined. It is important to note
that alarm bits may be cleared on read, user-space may latch alarms and
diff --git a/Documentation/hwmon/ads1015 b/Documentation/hwmon/ads1015.rst
index 02d2a459385f..e0951c4e57bb 100644
--- a/Documentation/hwmon/ads1015
+++ b/Documentation/hwmon/ads1015.rst
@@ -2,17 +2,25 @@ Kernel driver ads1015
=====================
Supported chips:
+
* Texas Instruments ADS1015
+
Prefix: 'ads1015'
- Datasheet: Publicly available at the Texas Instruments website :
- http://focus.ti.com/lit/ds/symlink/ads1015.pdf
+
+ Datasheet: Publicly available at the Texas Instruments website:
+
+ http://focus.ti.com/lit/ds/symlink/ads1015.pdf
+
* Texas Instruments ADS1115
+
Prefix: 'ads1115'
- Datasheet: Publicly available at the Texas Instruments website :
- http://focus.ti.com/lit/ds/symlink/ads1115.pdf
+
+ Datasheet: Publicly available at the Texas Instruments website:
+
+ http://focus.ti.com/lit/ds/symlink/ads1115.pdf
Authors:
- Dirk Eibach, Guntermann & Drunck GmbH <eibach@gdsys.de>
+ Dirk Eibach, Guntermann & Drunck GmbH <eibach@gdsys.de>
Description
-----------
@@ -24,14 +32,15 @@ This device is a 12/16-bit A-D converter with 4 inputs.
The inputs can be used single ended or in certain differential combinations.
The inputs can be made available by 8 sysfs input files in0_input - in7_input:
-in0: Voltage over AIN0 and AIN1.
-in1: Voltage over AIN0 and AIN3.
-in2: Voltage over AIN1 and AIN3.
-in3: Voltage over AIN2 and AIN3.
-in4: Voltage over AIN0 and GND.
-in5: Voltage over AIN1 and GND.
-in6: Voltage over AIN2 and GND.
-in7: Voltage over AIN3 and GND.
+
+ - in0: Voltage over AIN0 and AIN1.
+ - in1: Voltage over AIN0 and AIN3.
+ - in2: Voltage over AIN1 and AIN3.
+ - in3: Voltage over AIN2 and AIN3.
+ - in4: Voltage over AIN0 and GND.
+ - in5: Voltage over AIN1 and GND.
+ - in6: Voltage over AIN2 and GND.
+ - in7: Voltage over AIN3 and GND.
Which inputs are available can be configured using platform data or devicetree.
@@ -42,29 +51,34 @@ Platform Data
In linux/platform_data/ads1015.h platform data is defined, channel_data contains
configuration data for the used input combinations:
+
- pga is the programmable gain amplifier (values are full scale)
- 0: +/- 6.144 V
- 1: +/- 4.096 V
- 2: +/- 2.048 V
- 3: +/- 1.024 V
- 4: +/- 0.512 V
- 5: +/- 0.256 V
+
+ - 0: +/- 6.144 V
+ - 1: +/- 4.096 V
+ - 2: +/- 2.048 V
+ - 3: +/- 1.024 V
+ - 4: +/- 0.512 V
+ - 5: +/- 0.256 V
+
- data_rate in samples per second
- 0: 128
- 1: 250
- 2: 490
- 3: 920
- 4: 1600
- 5: 2400
- 6: 3300
-
-Example:
-struct ads1015_platform_data data = {
+
+ - 0: 128
+ - 1: 250
+ - 2: 490
+ - 3: 920
+ - 4: 1600
+ - 5: 2400
+ - 6: 3300
+
+Example::
+
+ struct ads1015_platform_data data = {
.channel_data = {
[2] = { .enabled = true, .pga = 1, .data_rate = 0 },
[4] = { .enabled = true, .pga = 4, .data_rate = 5 },
}
-};
+ };
In this case only in2_input (FS +/- 4.096 V, 128 SPS) and in4_input
(FS +/- 0.512 V, 2400 SPS) would be created.
diff --git a/Documentation/hwmon/ads7828 b/Documentation/hwmon/ads7828.rst
index f6e263e0f607..b830b490cfe4 100644
--- a/Documentation/hwmon/ads7828
+++ b/Documentation/hwmon/ads7828.rst
@@ -2,20 +2,27 @@ Kernel driver ads7828
=====================
Supported chips:
+
* Texas Instruments/Burr-Brown ADS7828
+
Prefix: 'ads7828'
+
Datasheet: Publicly available at the Texas Instruments website:
- http://focus.ti.com/lit/ds/symlink/ads7828.pdf
+
+ http://focus.ti.com/lit/ds/symlink/ads7828.pdf
* Texas Instruments ADS7830
+
Prefix: 'ads7830'
+
Datasheet: Publicly available at the Texas Instruments website:
- http://focus.ti.com/lit/ds/symlink/ads7830.pdf
+
+ http://focus.ti.com/lit/ds/symlink/ads7830.pdf
Authors:
- Steve Hardy <shardy@redhat.com>
- Vivien Didelot <vivien.didelot@savoirfairelinux.com>
- Guillaume Roguez <guillaume.roguez@savoirfairelinux.com>
+ - Steve Hardy <shardy@redhat.com>
+ - Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ - Guillaume Roguez <guillaume.roguez@savoirfairelinux.com>
Platform data
-------------
@@ -24,16 +31,16 @@ The ads7828 driver accepts an optional ads7828_platform_data structure (defined
in include/linux/platform_data/ads7828.h). The structure fields are:
* diff_input: (bool) Differential operation
- set to true for differential mode, false for default single ended mode.
+ set to true for differential mode, false for default single ended mode.
* ext_vref: (bool) External reference
- set to true if it operates with an external reference, false for default
- internal reference.
+ set to true if it operates with an external reference, false for default
+ internal reference.
* vref_mv: (unsigned int) Voltage reference
- if using an external reference, set this to the reference voltage in mV,
- otherwise it will default to the internal value (2500mV). This value will be
- bounded with limits accepted by the chip, described in the datasheet.
+ if using an external reference, set this to the reference voltage in mV,
+ otherwise it will default to the internal value (2500mV). This value will be
+ bounded with limits accepted by the chip, described in the datasheet.
If no structure is provided, the configuration defaults to single ended
operation and internal voltage reference (2.5V).
diff --git a/Documentation/hwmon/adt7410 b/Documentation/hwmon/adt7410.rst
index 9817941e5f19..24caaa83c8ec 100644
--- a/Documentation/hwmon/adt7410
+++ b/Documentation/hwmon/adt7410.rst
@@ -2,26 +2,45 @@ Kernel driver adt7410
=====================
Supported chips:
+
* Analog Devices ADT7410
+
Prefix: 'adt7410'
+
Addresses scanned: None
+
Datasheet: Publicly available at the Analog Devices website
- http://www.analog.com/static/imported-files/data_sheets/ADT7410.pdf
+
+ http://www.analog.com/static/imported-files/data_sheets/ADT7410.pdf
* Analog Devices ADT7420
+
Prefix: 'adt7420'
+
Addresses scanned: None
+
Datasheet: Publicly available at the Analog Devices website
- http://www.analog.com/static/imported-files/data_sheets/ADT7420.pdf
+
+ http://www.analog.com/static/imported-files/data_sheets/ADT7420.pdf
+
* Analog Devices ADT7310
+
Prefix: 'adt7310'
+
Addresses scanned: None
+
Datasheet: Publicly available at the Analog Devices website
- http://www.analog.com/static/imported-files/data_sheets/ADT7310.pdf
+
+ http://www.analog.com/static/imported-files/data_sheets/ADT7310.pdf
+
* Analog Devices ADT7320
+
Prefix: 'adt7320'
+
Addresses scanned: None
+
Datasheet: Publicly available at the Analog Devices website
- http://www.analog.com/static/imported-files/data_sheets/ADT7320.pdf
+
+ http://www.analog.com/static/imported-files/data_sheets/ADT7320.pdf
Author: Hartmut Knaack <knaack.h@gmx.de>
@@ -61,13 +80,15 @@ The device is set to 16 bit resolution and comparator mode.
sysfs-Interface
---------------
-temp#_input - temperature input
-temp#_min - temperature minimum setpoint
-temp#_max - temperature maximum setpoint
-temp#_crit - critical temperature setpoint
-temp#_min_hyst - hysteresis for temperature minimum (read-only)
-temp#_max_hyst - hysteresis for temperature maximum (read/write)
-temp#_crit_hyst - hysteresis for critical temperature (read-only)
-temp#_min_alarm - temperature minimum alarm flag
-temp#_max_alarm - temperature maximum alarm flag
-temp#_crit_alarm - critical temperature alarm flag
+======================== ====================================================
+temp#_input temperature input
+temp#_min temperature minimum setpoint
+temp#_max temperature maximum setpoint
+temp#_crit critical temperature setpoint
+temp#_min_hyst hysteresis for temperature minimum (read-only)
+temp#_max_hyst hysteresis for temperature maximum (read/write)
+temp#_crit_hyst hysteresis for critical temperature (read-only)
+temp#_min_alarm temperature minimum alarm flag
+temp#_max_alarm temperature maximum alarm flag
+temp#_crit_alarm critical temperature alarm flag
+======================== ====================================================
diff --git a/Documentation/hwmon/adt7411 b/Documentation/hwmon/adt7411.rst
index 1632960f9745..57ad16fb216a 100644
--- a/Documentation/hwmon/adt7411
+++ b/Documentation/hwmon/adt7411.rst
@@ -2,9 +2,13 @@ Kernel driver adt7411
=====================
Supported chips:
+
* Analog Devices ADT7411
+
Prefix: 'adt7411'
+
Addresses scanned: 0x48, 0x4a, 0x4b
+
Datasheet: Publicly available at the Analog Devices website
Author: Wolfram Sang (based on adt7470 by Darrick J. Wong)
@@ -26,15 +30,19 @@ Check the datasheet for details.
sysfs-Interface
---------------
-in0_input - vdd voltage input
-in[1-8]_input - analog 1-8 input
-temp1_input - temperature input
+================ =================
+in0_input vdd voltage input
+in[1-8]_input analog 1-8 input
+temp1_input temperature input
+================ =================
Besides standard interfaces, this driver adds (0 = off, 1 = on):
- adc_ref_vdd - Use vdd as reference instead of 2.25 V
- fast_sampling - Sample at 22.5 kHz instead of 1.4 kHz, but drop filters
- no_average - Turn off averaging over 16 samples
+ ============== =======================================================
+ adc_ref_vdd Use vdd as reference instead of 2.25 V
+ fast_sampling Sample at 22.5 kHz instead of 1.4 kHz, but drop filters
+ no_average Turn off averaging over 16 samples
+ ============== =======================================================
Notes
-----
diff --git a/Documentation/hwmon/adt7462 b/Documentation/hwmon/adt7462.rst
index ec660b328275..139e19696188 100644
--- a/Documentation/hwmon/adt7462
+++ b/Documentation/hwmon/adt7462.rst
@@ -1,10 +1,14 @@
Kernel driver adt7462
-======================
+=====================
Supported chips:
+
* Analog Devices ADT7462
+
Prefix: 'adt7462'
+
Addresses scanned: I2C 0x58, 0x5C
+
Datasheet: Publicly available at the Analog Devices website
Author: Darrick J. Wong
@@ -57,11 +61,10 @@ Besides standard interfaces driver adds the following:
* pwm#_auto_point1_pwm and temp#_auto_point1_temp and
* pwm#_auto_point2_pwm and temp#_auto_point2_temp -
-point1: Set the pwm speed at a lower temperature bound.
-point2: Set the pwm speed at a higher temperature bound.
+ - point1: Set the pwm speed at a lower temperature bound.
+ - point2: Set the pwm speed at a higher temperature bound.
The ADT7462 will scale the pwm between the lower and higher pwm speed when
the temperature is between the two temperature boundaries. PWM values range
from 0 (off) to 255 (full speed). Fan speed will be set to maximum when the
temperature sensor associated with the PWM control exceeds temp#_max.
-
diff --git a/Documentation/hwmon/adt7470 b/Documentation/hwmon/adt7470.rst
index fe68e18a0c8d..d225f816e992 100644
--- a/Documentation/hwmon/adt7470
+++ b/Documentation/hwmon/adt7470.rst
@@ -2,9 +2,13 @@ Kernel driver adt7470
=====================
Supported chips:
+
* Analog Devices ADT7470
+
Prefix: 'adt7470'
+
Addresses scanned: I2C 0x2C, 0x2E, 0x2F
+
Datasheet: Publicly available at the Analog Devices website
Author: Darrick J. Wong
@@ -56,8 +60,8 @@ Besides standard interfaces driver adds the following:
* pwm#_auto_point1_pwm and pwm#_auto_point1_temp and
* pwm#_auto_point2_pwm and pwm#_auto_point2_temp -
-point1: Set the pwm speed at a lower temperature bound.
-point2: Set the pwm speed at a higher temperature bound.
+ - point1: Set the pwm speed at a lower temperature bound.
+ - point2: Set the pwm speed at a higher temperature bound.
The ADT7470 will scale the pwm between the lower and higher pwm speed when
the temperature is between the two temperature boundaries. PWM values range
diff --git a/Documentation/hwmon/adt7475 b/Documentation/hwmon/adt7475.rst
index 01b46b290532..ef3ea1ea9bc1 100644
--- a/Documentation/hwmon/adt7475
+++ b/Documentation/hwmon/adt7475.rst
@@ -2,28 +2,44 @@ Kernel driver adt7475
=====================
Supported chips:
+
* Analog Devices ADT7473
+
Prefix: 'adt7473'
+
Addresses scanned: I2C 0x2C, 0x2D, 0x2E
+
Datasheet: Publicly available at the On Semiconductors website
+
* Analog Devices ADT7475
+
Prefix: 'adt7475'
+
Addresses scanned: I2C 0x2E
+
Datasheet: Publicly available at the On Semiconductors website
+
* Analog Devices ADT7476
+
Prefix: 'adt7476'
+
Addresses scanned: I2C 0x2C, 0x2D, 0x2E
+
Datasheet: Publicly available at the On Semiconductors website
+
* Analog Devices ADT7490
+
Prefix: 'adt7490'
+
Addresses scanned: I2C 0x2C, 0x2D, 0x2E
+
Datasheet: Publicly available at the On Semiconductors website
Authors:
- Jordan Crouse
- Hans de Goede
- Darrick J. Wong (documentation)
- Jean Delvare
+ - Jordan Crouse
+ - Hans de Goede
+ - Darrick J. Wong (documentation)
+ - Jean Delvare
Description
@@ -82,14 +98,16 @@ ADT7490:
Sysfs Mapping
-------------
- ADT7490 ADT7476 ADT7475 ADT7473
- ------- ------- ------- -------
+==== =========== =========== ========= ==========
+in ADT7490 ADT7476 ADT7475 ADT7473
+==== =========== =========== ========= ==========
in0 2.5VIN (22) 2.5VIN (22) - -
in1 VCCP (23) VCCP (23) VCCP (14) VCCP (14)
in2 VCC (4) VCC (4) VCC (4) VCC (3)
in3 5VIN (20) 5VIN (20)
in4 12VIN (21) 12VIN (21)
in5 VTT (8)
+==== =========== =========== ========= ==========
Special Features
----------------
@@ -107,8 +125,8 @@ Fan Speed Control
The driver exposes two trip points per PWM channel.
-point1: Set the PWM speed at the lower temperature bound
-point2: Set the PWM speed at the higher temperature bound
+- point1: Set the PWM speed at the lower temperature bound
+- point2: Set the PWM speed at the higher temperature bound
The ADT747x will scale the PWM linearly between the lower and higher PWM
speed when the temperature is between the two temperature boundaries.
@@ -123,12 +141,12 @@ the PWM control exceeds temp#_max.
At Tmin - hysteresis the PWM output can either be off (0% duty cycle) or at the
minimum (i.e. auto_point1_pwm). This behaviour can be configured using the
-pwm[1-*]_stall_disable sysfs attribute. A value of 0 means the fans will shut
+`pwm[1-*]_stall_disable sysfs attribute`. A value of 0 means the fans will shut
off. A value of 1 means the fans will run at auto_point1_pwm.
The responsiveness of the ADT747x to temperature changes can be configured.
This allows smoothing of the fan speed transition. To set the transition time
-set the value in ms in the temp[1-*]_smoothing sysfs attribute.
+set the value in ms in the `temp[1-*]_smoothing` sysfs attribute.
Notes
-----
diff --git a/Documentation/hwmon/amc6821 b/Documentation/hwmon/amc6821.rst
index ced8359c50f8..5ddb2849da90 100644
--- a/Documentation/hwmon/amc6821
+++ b/Documentation/hwmon/amc6821.rst
@@ -2,9 +2,13 @@ Kernel driver amc6821
=====================
Supported chips:
+
Texas Instruments AMC6821
+
Prefix: 'amc6821'
+
Addresses scanned: 0x18, 0x19, 0x1a, 0x2c, 0x2d, 0x2e, 0x4c, 0x4d, 0x4e
+
Datasheet: http://focus.ti.com/docs/prod/folders/print/amc6821.html
Authors:
@@ -21,10 +25,11 @@ The pwm can be controlled either from software or automatically.
The driver provides the following sensor accesses in sysfs:
+======================= == ===============================================
temp1_input ro on-chip temperature
temp1_min rw "
temp1_max rw "
-temp1_crit rw "
+temp1_crit rw "
temp1_min_alarm ro "
temp1_max_alarm ro "
temp1_crit_alarm ro "
@@ -32,16 +37,16 @@ temp1_crit_alarm ro "
temp2_input ro remote temperature
temp2_min rw "
temp2_max rw "
-temp2_crit rw "
+temp2_crit rw "
temp2_min_alarm ro "
temp2_max_alarm ro "
temp2_crit_alarm ro "
temp2_fault ro "
-fan1_input ro tachometer speed
+fan1_input ro tachometer speed
fan1_min rw "
fan1_max rw "
-fan1_fault ro "
+fan1_fault ro "
fan1_div rw Fan divisor can be either 2 or 4.
pwm1 rw pwm1
@@ -87,6 +92,7 @@ temp2_auto_point3_temp rw Above this temperature fan runs at maximum
values which depend on temp2_auto_point2_temp
and pwm1_auto_point2_pwm. Read it out after
writing to get actual value.
+======================= == ===============================================
Module parameters
@@ -97,6 +103,6 @@ load the module with: init=0.
If your board BIOS doesn't initialize the chip, or you want
different settings, you can set the following parameters:
-init=1,
-pwminv: 0 default pwm output, 1 inverts pwm output.
+- init=1,
+- pwminv: 0 default pwm output, 1 inverts pwm output.
diff --git a/Documentation/hwmon/asb100 b/Documentation/hwmon/asb100.rst
index ab7365e139be..c2d5f97085fe 100644
--- a/Documentation/hwmon/asb100
+++ b/Documentation/hwmon/asb100.rst
@@ -2,9 +2,13 @@ Kernel driver asb100
====================
Supported Chips:
+
* Asus ASB100 and ASB100-A "Bach"
+
Prefix: 'asb100'
+
Addresses scanned: I2C 0x2d
+
Datasheet: none released
Author: Mark M. Hoffman <mhoffman@lightlink.com>
@@ -41,32 +45,29 @@ processor itself. It is a value in volts.
Alarms: (TODO question marks indicate may or may not work)
-0x0001 => in0 (?)
-0x0002 => in1 (?)
-0x0004 => in2
-0x0008 => in3
-0x0010 => temp1 (1)
-0x0020 => temp2
-0x0040 => fan1
-0x0080 => fan2
-0x0100 => in4
-0x0200 => in5 (?) (2)
-0x0400 => in6 (?) (2)
-0x0800 => fan3
-0x1000 => chassis switch
-0x2000 => temp3
-
-Alarm Notes:
-
-(1) This alarm will only trigger if the hysteresis value is 127C.
-I.e. it behaves the same as w83781d.
-
-(2) The min and max registers for these values appear to
-be read-only or otherwise stuck at 0x00.
+- 0x0001 => in0 (?)
+- 0x0002 => in1 (?)
+- 0x0004 => in2
+- 0x0008 => in3
+- 0x0010 => temp1 [1]_
+- 0x0020 => temp2
+- 0x0040 => fan1
+- 0x0080 => fan2
+- 0x0100 => in4
+- 0x0200 => in5 (?) [2]_
+- 0x0400 => in6 (?) [2]_
+- 0x0800 => fan3
+- 0x1000 => chassis switch
+- 0x2000 => temp3
+
+.. [1] This alarm will only trigger if the hysteresis value is 127C.
+ I.e. it behaves the same as w83781d.
+
+.. [2] The min and max registers for these values appear to
+ be read-only or otherwise stuck at 0x00.
TODO:
-* Experiment with fan divisors > 8.
-* Experiment with temp. sensor types.
-* Are there really 13 voltage inputs? Probably not...
-* Cleanups, no doubt...
-
+ * Experiment with fan divisors > 8.
+ * Experiment with temp. sensor types.
+ * Are there really 13 voltage inputs? Probably not...
+ * Cleanups, no doubt...
diff --git a/Documentation/hwmon/asc7621 b/Documentation/hwmon/asc7621.rst
index 7287be7e1f21..b5a9fad0f172 100644
--- a/Documentation/hwmon/asc7621
+++ b/Documentation/hwmon/asc7621.rst
@@ -1,10 +1,15 @@
+=====================
Kernel driver asc7621
-==================
+=====================
Supported chips:
+
Andigilog aSC7621 and aSC7621a
+
Prefix: 'asc7621'
+
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+
Datasheet: http://www.fairview5.com/linux/asc7621/asc7621.pdf
Author:
@@ -73,8 +78,10 @@ Finally, we have added a tach disable function that turns off the tach
measurement system for individual tachs in order to save power. That is
in register 75h.
---
+--------------------------------------------------------------------------
+
aSC7621 Product Description
+===========================
The aSC7621 has a two wire digital interface compatible with SMBus 2.0.
Using a 10-bit ADC, the aSC7621 measures the temperature of two remote diode
@@ -102,6 +109,8 @@ System voltages of VCCP, 2.5V, 3.3V, 5.0V, and 12V motherboard power are
monitored efficiently with internal scaling resistors.
Features
+--------
+
- Supports PECI interface and monitors internal and remote thermal diodes
- 2-wire, SMBus 2.0 compliant, serial interface
- 10-bit ADC
@@ -110,7 +119,7 @@ Features
- Noise filtering of temperature reading for fan speed control
- 0.25C digital temperature sensor resolution
- 3 PWM fan speed control outputs for 2-, 3- or 4-wire fans and up to 4 fan
- tachometer inputs
+ tachometer inputs
- Enhanced measured temperature to Temperature Zone assignment.
- Provides high and low PWM frequency ranges
- 3 GPIO pins for custom use
@@ -123,17 +132,20 @@ Except where noted below, the sysfs entries created by this driver follow
the standards defined in "sysfs-interface".
temp1_source
+ = ===============================================
0 (default) peci_legacy = 0, Remote 1 Temperature
- peci_legacy = 1, PECI Processor Temperature 0
+ peci_legacy = 1, PECI Processor Temperature 0
1 Remote 1 Temperature
2 Remote 2 Temperature
3 Internal Temperature
4 PECI Processor Temperature 0
5 PECI Processor Temperature 1
6 PECI Processor Temperature 2
- 7 PECI Processor Temperature 3
+ 7 PECI Processor Temperature 3
+ = ===============================================
temp2_source
+ = ===============================================
0 (default) Internal Temperature
1 Remote 1 Temperature
2 Remote 2 Temperature
@@ -142,8 +154,10 @@ temp2_source
5 PECI Processor Temperature 1
6 PECI Processor Temperature 2
7 PECI Processor Temperature 3
+ = ===============================================
temp3_source
+ = ===============================================
0 (default) Remote 2 Temperature
1 Remote 1 Temperature
2 Remote 2 Temperature
@@ -152,10 +166,12 @@ temp3_source
5 PECI Processor Temperature 1
6 PECI Processor Temperature 2
7 PECI Processor Temperature 3
+ = ===============================================
temp4_source
+ = ===============================================
0 (default) peci_legacy = 0, PECI Processor Temperature 0
- peci_legacy = 1, Remote 1 Temperature
+ peci_legacy = 1, Remote 1 Temperature
1 Remote 1 Temperature
2 Remote 2 Temperature
3 Internal Temperature
@@ -163,58 +179,65 @@ temp4_source
5 PECI Processor Temperature 1
6 PECI Processor Temperature 2
7 PECI Processor Temperature 3
+ = ===============================================
-temp[1-4]_smoothing_enable
-temp[1-4]_smoothing_time
+temp[1-4]_smoothing_enable / temp[1-4]_smoothing_time
Smooths spikes in temp readings caused by noise.
Valid values in milliseconds are:
- 35000
- 17600
- 11800
- 7000
- 4400
- 3000
- 1600
- 800
+
+ * 35000
+ * 17600
+ * 11800
+ * 7000
+ * 4400
+ * 3000
+ * 1600
+ * 800
temp[1-4]_crit
When the corresponding zone temperature reaches this value,
ALL pwm outputs will got to 100%.
-temp[5-8]_input
-temp[5-8]_enable
+temp[5-8]_input / temp[5-8]_enable
The aSC7621 can also read temperatures provided by the processor
via the PECI bus. Usually these are "core" temps and are relative
to the point where the automatic thermal control circuit starts
throttling. This means that these are usually negative numbers.
pwm[1-3]_enable
+ =============== ========================================================
0 Fan off.
1 Fan on manual control.
2 Fan on automatic control and will run at the minimum pwm
- if the temperature for the zone is below the minimum.
- 3 Fan on automatic control but will be off if the temperature
- for the zone is below the minimum.
- 4-254 Ignored.
+ if the temperature for the zone is below the minimum.
+ 3 Fan on automatic control but will be off if the
+ temperature for the zone is below the minimum.
+ 4-254 Ignored.
255 Fan on full.
+ =============== ========================================================
pwm[1-3]_auto_channels
Bitmap as described in sysctl-interface with the following
exceptions...
+
Only the following combination of zones (and their corresponding masks)
are valid:
- 1
- 2
- 3
- 2,3
- 1,2,3
- 4
- 1,2,3,4
- Special values:
- 0 Disabled.
- 16 Fan on manual control.
- 31 Fan on full.
+ * 1
+ * 2
+ * 3
+ * 2,3
+ * 1,2,3
+ * 4
+ * 1,2,3,4
+
+ * Special values:
+
+ == ======================
+ 0 Disabled.
+ 16 Fan on manual control.
+ 31 Fan on full.
+ == ======================
pwm[1-3]_invert
@@ -226,22 +249,22 @@ pwm[1-3]_freq
PWM frequency in Hz
Valid values in Hz are:
- 10
- 15
- 23
- 30 (default)
- 38
- 47
- 62
- 94
- 23000
- 24000
- 25000
- 26000
- 27000
- 28000
- 29000
- 30000
+ * 10
+ * 15
+ * 23
+ * 30 (default)
+ * 38
+ * 47
+ * 62
+ * 94
+ * 23000
+ * 24000
+ * 25000
+ * 26000
+ * 27000
+ * 28000
+ * 29000
+ * 30000
Setting any other value will be ignored.
@@ -251,17 +274,17 @@ peci_enable
peci_avg
Input filter average time.
- 0 0 Sec. (no Smoothing) (default)
- 1 0.25 Sec.
- 2 0.5 Sec.
- 3 1.0 Sec.
- 4 2.0 Sec.
- 5 4.0 Sec.
- 6 8.0 Sec.
- 7 0.0 Sec.
+ * 0 0 Sec. (no Smoothing) (default)
+ * 1 0.25 Sec.
+ * 2 0.5 Sec.
+ * 3 1.0 Sec.
+ * 4 2.0 Sec.
+ * 5 4.0 Sec.
+ * 6 8.0 Sec.
+ * 7 0.0 Sec.
peci_legacy
-
+ = ============================================
0 Standard Mode (default)
Remote Diode 1 reading is associated with
Temperature Zone 1, PECI is associated with
@@ -270,10 +293,12 @@ peci_legacy
1 Legacy Mode
PECI is associated with Temperature Zone 1,
Remote Diode 1 is associated with Zone 4
+ = ============================================
peci_diode
Diode filter
+ = ====================
0 0.25 Sec.
1 1.1 Sec.
2 2.4 Sec. (default)
@@ -282,15 +307,20 @@ peci_diode
5 6.8 Sec.
6 10.2 Sec.
7 16.4 Sec.
+ = ====================
peci_4domain
Four domain enable
+ = ===============================================
0 1 or 2 Domains for enabled processors (default)
1 3 or 4 Domains for enabled processors
+ = ===============================================
peci_domain
Domain
+ = ==================================================
0 Processor contains a single domain (0) (default)
1 Processor contains two domains (0,1)
+ = ==================================================
diff --git a/Documentation/hwmon/aspeed-pwm-tacho b/Documentation/hwmon/aspeed-pwm-tacho.rst
index 7cfb34977460..6dcec845fbc7 100644
--- a/Documentation/hwmon/aspeed-pwm-tacho
+++ b/Documentation/hwmon/aspeed-pwm-tacho.rst
@@ -15,8 +15,10 @@ controller supports up to 16 tachometer inputs.
The driver provides the following sensor accesses in sysfs:
+=============== ======= =====================================================
fanX_input ro provide current fan rotation value in RPM as reported
by the fan to the device.
pwmX rw get or set PWM fan control value. This is an integer
value between 0(off) and 255(full speed).
+=============== ======= =====================================================
diff --git a/Documentation/hwmon/coretemp b/Documentation/hwmon/coretemp.rst
index fec5a9bf755f..c609329e3bc4 100644
--- a/Documentation/hwmon/coretemp
+++ b/Documentation/hwmon/coretemp.rst
@@ -3,20 +3,29 @@ Kernel driver coretemp
Supported chips:
* All Intel Core family
+
Prefix: 'coretemp'
- CPUID: family 0x6, models 0xe (Pentium M DC), 0xf (Core 2 DC 65nm),
- 0x16 (Core 2 SC 65nm), 0x17 (Penryn 45nm),
- 0x1a (Nehalem), 0x1c (Atom), 0x1e (Lynnfield),
- 0x26 (Tunnel Creek Atom), 0x27 (Medfield Atom),
- 0x36 (Cedar Trail Atom)
- Datasheet: Intel 64 and IA-32 Architectures Software Developer's Manual
- Volume 3A: System Programming Guide
- http://softwarecommunity.intel.com/Wiki/Mobility/720.htm
+
+ CPUID: family 0x6, models
+
+ - 0xe (Pentium M DC), 0xf (Core 2 DC 65nm),
+ - 0x16 (Core 2 SC 65nm), 0x17 (Penryn 45nm),
+ - 0x1a (Nehalem), 0x1c (Atom), 0x1e (Lynnfield),
+ - 0x26 (Tunnel Creek Atom), 0x27 (Medfield Atom),
+ - 0x36 (Cedar Trail Atom)
+
+ Datasheet:
+
+ Intel 64 and IA-32 Architectures Software Developer's Manual
+ Volume 3A: System Programming Guide
+
+ http://softwarecommunity.intel.com/Wiki/Mobility/720.htm
Author: Rudolf Marek
Description
-----------
+
This driver permits reading the DTS (Digital Temperature Sensor) embedded
inside Intel CPUs. This driver can read both the per-core and per-package
temperature using the appropriate sensors. The per-package sensor is new;
@@ -35,14 +44,17 @@ may be raised, if the temperature grows enough (more than TjMax) to trigger
the Out-Of-Spec bit. Following table summarizes the exported sysfs files:
All Sysfs entries are named with their core_id (represented here by 'X').
-tempX_input - Core temperature (in millidegrees Celsius).
-tempX_max - All cooling devices should be turned on (on Core2).
-tempX_crit - Maximum junction temperature (in millidegrees Celsius).
-tempX_crit_alarm - Set when Out-of-spec bit is set, never clears.
- Correct CPU operation is no longer guaranteed.
-tempX_label - Contains string "Core X", where X is processor
- number. For Package temp, this will be "Physical id Y",
- where Y is the package number.
+
+================= ========================================================
+tempX_input Core temperature (in millidegrees Celsius).
+tempX_max All cooling devices should be turned on (on Core2).
+tempX_crit Maximum junction temperature (in millidegrees Celsius).
+tempX_crit_alarm Set when Out-of-spec bit is set, never clears.
+ Correct CPU operation is no longer guaranteed.
+tempX_label Contains string "Core X", where X is processor
+ number. For Package temp, this will be "Physical id Y",
+ where Y is the package number.
+================= ========================================================
On CPU models which support it, TjMax is read from a model-specific register.
On other models, it is set to an arbitrary value based on weak heuristics.
@@ -52,6 +64,7 @@ as a module parameter (tjmax).
Appendix A. Known TjMax lists (TBD):
Some information comes from ark.intel.com
+=============== =============================================== ================
Process Processor TjMax(C)
22nm Core i5/i7 Processors
@@ -179,3 +192,4 @@ Process Processor TjMax(C)
65nm Celeron Processors
T1700/1600 100
560/550/540/530 100
+=============== =============================================== ================
diff --git a/Documentation/hwmon/da9052 b/Documentation/hwmon/da9052.rst
index 5bc51346b689..c1c0f1f08904 100644
--- a/Documentation/hwmon/da9052
+++ b/Documentation/hwmon/da9052.rst
@@ -1,6 +1,12 @@
+Kernel driver da9052
+====================
+
Supported chips:
+
* Dialog Semiconductors DA9052-BC and DA9053-AA/Bx PMICs
+
Prefix: 'da9052'
+
Datasheet: Datasheet is not publicly available.
Authors: David Dajun Chen <dchen@diasemi.com>
@@ -15,17 +21,20 @@ different inputs. The track and hold circuit ensures stable input voltages at
the input of the ADC during the conversion.
The ADC is used to measure the following inputs:
-Channel 0: VDDOUT - measurement of the system voltage
-Channel 1: ICH - internal battery charger current measurement
-Channel 2: TBAT - output from the battery NTC
-Channel 3: VBAT - measurement of the battery voltage
-Channel 4: ADC_IN4 - high impedance input (0 - 2.5V)
-Channel 5: ADC_IN5 - high impedance input (0 - 2.5V)
-Channel 6: ADC_IN6 - high impedance input (0 - 2.5V)
-Channel 7: XY - TSI interface to measure the X and Y voltage of the touch
- screen resistive potentiometers
-Channel 8: Internal Tjunc. - sense (internal temp. sensor)
-Channel 9: VBBAT - measurement of the backup battery voltage
+
+========= ===================================================================
+Channel 0 VDDOUT - measurement of the system voltage
+Channel 1 ICH - internal battery charger current measurement
+Channel 2 TBAT - output from the battery NTC
+Channel 3 VBAT - measurement of the battery voltage
+Channel 4 ADC_IN4 - high impedance input (0 - 2.5V)
+Channel 5 ADC_IN5 - high impedance input (0 - 2.5V)
+Channel 6 ADC_IN6 - high impedance input (0 - 2.5V)
+Channel 7 XY - TSI interface to measure the X and Y voltage of the touch
+ screen resistive potentiometers
+Channel 8 Internal Tjunc. - sense (internal temp. sensor)
+Channel 9 VBBAT - measurement of the backup battery voltage
+========= ===================================================================
By using sysfs attributes we can measure the system voltage VDDOUT, the battery
charging current ICH, battery temperature TBAT, battery junction temperature
@@ -37,12 +46,15 @@ Voltage Monitoring
Voltages are sampled by a 10 bit ADC.
The battery voltage is calculated as:
+
Milli volt = ((ADC value * 1000) / 512) + 2500
The backup battery voltage is calculated as:
+
Milli volt = (ADC value * 2500) / 512;
The voltages on ADC channels 4, 5 and 6 are calculated as:
+
Milli volt = (ADC value * 2500) / 1023
Temperature Monitoring
@@ -52,10 +64,15 @@ Temperatures are sampled by a 10 bit ADC. Junction and battery temperatures
are monitored by the ADC channels.
The junction temperature is calculated:
+
Degrees celsius = 1.708 * (TJUNC_RES - T_OFFSET) - 108.8
+
The junction temperature attribute is supported by the driver.
The battery temperature is calculated:
- Degree Celsius = 1 / (t1 + 1/298)- 273
+
+ Degree Celsius = 1 / (t1 + 1/298) - 273
+
where t1 = (1/B)* ln(( ADCval * 2.5)/(R25*ITBAT*255))
+
Default values of R25, B, ITBAT are 10e3, 3380 and 50e-6 respectively.
diff --git a/Documentation/hwmon/da9055 b/Documentation/hwmon/da9055.rst
index 855c3f536e00..beae271a3312 100644
--- a/Documentation/hwmon/da9055
+++ b/Documentation/hwmon/da9055.rst
@@ -1,6 +1,11 @@
+Kernel driver da9055
+====================
+
Supported chips:
* Dialog Semiconductors DA9055 PMIC
+
Prefix: 'da9055'
+
Datasheet: Datasheet is not publicly available.
Authors: David Dajun Chen <dchen@diasemi.com>
@@ -15,11 +20,12 @@ different inputs. The track and hold circuit ensures stable input voltages at
the input of the ADC during the conversion.
The ADC is used to measure the following inputs:
-Channel 0: VDDOUT - measurement of the system voltage
-Channel 1: ADC_IN1 - high impedance input (0 - 2.5V)
-Channel 2: ADC_IN2 - high impedance input (0 - 2.5V)
-Channel 3: ADC_IN3 - high impedance input (0 - 2.5V)
-Channel 4: Internal Tjunc. - sense (internal temp. sensor)
+
+- Channel 0: VDDOUT - measurement of the system voltage
+- Channel 1: ADC_IN1 - high impedance input (0 - 2.5V)
+- Channel 2: ADC_IN2 - high impedance input (0 - 2.5V)
+- Channel 3: ADC_IN3 - high impedance input (0 - 2.5V)
+- Channel 4: Internal Tjunc. - sense (internal temp. sensor)
By using sysfs attributes we can measure the system voltage VDDOUT,
chip junction temperature and auxiliary channels voltages.
@@ -31,9 +37,11 @@ Voltages are sampled in a AUTO mode it can be manually sampled too and results
are stored in a 10 bit ADC.
The system voltage is calculated as:
+
Milli volt = ((ADC value * 1000) / 85) + 2500
The voltages on ADC channels 1, 2 and 3 are calculated as:
+
Milli volt = (ADC value * 1000) / 102
Temperature Monitoring
@@ -43,5 +51,7 @@ Temperatures are sampled by a 10 bit ADC. Junction temperatures
are monitored by the ADC channels.
The junction temperature is calculated:
+
Degrees celsius = -0.4084 * (ADC_RES - T_OFFSET) + 307.6332
+
The junction temperature attribute is supported by the driver.
diff --git a/Documentation/hwmon/dme1737 b/Documentation/hwmon/dme1737.rst
index 4d2935145a1c..82fcbc6b2b43 100644
--- a/Documentation/hwmon/dme1737
+++ b/Documentation/hwmon/dme1737.rst
@@ -2,21 +2,37 @@ Kernel driver dme1737
=====================
Supported chips:
+
* SMSC DME1737 and compatibles (like Asus A8000)
+
Prefix: 'dme1737'
+
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+
Datasheet: Provided by SMSC upon request and under NDA
+
* SMSC SCH3112, SCH3114, SCH3116
+
Prefix: 'sch311x'
+
Addresses scanned: none, address read from Super-I/O config space
+
Datasheet: Available on the Internet
+
* SMSC SCH5027
+
Prefix: 'sch5027'
+
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+
Datasheet: Provided by SMSC upon request and under NDA
+
* SMSC SCH5127
+
Prefix: 'sch5127'
+
Addresses scanned: none, address read from Super-I/O config space
+
Datasheet: Provided by SMSC upon request and under NDA
Authors:
@@ -26,11 +42,14 @@ Authors:
Module Parameters
-----------------
-* force_start: bool Enables the monitoring of voltage, fan and temp inputs
+* force_start: bool
+ Enables the monitoring of voltage, fan and temp inputs
and PWM output control functions. Using this parameter
shouldn't be required since the BIOS usually takes care
of this.
-* probe_all_addr: bool Include non-standard LPC addresses 0x162e and 0x164e
+
+* probe_all_addr: bool
+ Include non-standard LPC addresses 0x162e and 0x164e
when probing for ISA devices. This is required for the
following boards:
- VIA EPIA SN18000
@@ -70,7 +89,8 @@ scaling resistors. The values returned by the driver therefore reflect true
millivolts and don't need scaling. The voltage inputs are mapped as follows
(the last column indicates the input ranges):
-DME1737, A8000:
+DME1737, A8000::
+
in0: +5VTR (+5V standby) 0V - 6.64V
in1: Vccp (processor core) 0V - 3V
in2: VCC (internal +3.3V) 0V - 4.38V
@@ -79,7 +99,8 @@ DME1737, A8000:
in5: VTR (+3.3V standby) 0V - 4.38V
in6: Vbat (+3.0V) 0V - 4.38V
-SCH311x:
+SCH311x::
+
in0: +2.5V 0V - 3.32V
in1: Vccp (processor core) 0V - 2V
in2: VCC (internal +3.3V) 0V - 4.38V
@@ -88,7 +109,8 @@ SCH311x:
in5: VTR (+3.3V standby) 0V - 4.38V
in6: Vbat (+3.0V) 0V - 4.38V
-SCH5027:
+SCH5027::
+
in0: +5VTR (+5V standby) 0V - 6.64V
in1: Vccp (processor core) 0V - 3V
in2: VCC (internal +3.3V) 0V - 4.38V
@@ -97,7 +119,8 @@ SCH5027:
in5: VTR (+3.3V standby) 0V - 4.38V
in6: Vbat (+3.0V) 0V - 4.38V
-SCH5127:
+SCH5127::
+
in0: +2.5 0V - 3.32V
in1: Vccp (processor core) 0V - 3V
in2: VCC (internal +3.3V) 0V - 4.38V
@@ -119,7 +142,7 @@ Celsius. The chip also features offsets for all 3 temperature inputs which -
when programmed - get added to the input readings. The chip does all the
scaling by itself and the driver therefore reports true temperatures that don't
need any user-space adjustments. The temperature inputs are mapped as follows
-(the last column indicates the input ranges):
+(the last column indicates the input ranges)::
temp1: Remote diode 1 (3904 type) temperature -127C - +127C
temp2: DME1737 internal temperature -127C - +127C
@@ -171,6 +194,7 @@ pwm[1-3]_auto_pwm_min, respectively. The thermal thresholds of the zones are
programmed via zone[1-3]_auto_point[1-3]_temp and
zone[1-3]_auto_point1_temp_hyst:
+ =============================== =======================================
pwm[1-3]_auto_point2_pwm full-speed duty-cycle (255, i.e., 100%)
pwm[1-3]_auto_point1_pwm low-speed duty-cycle
pwm[1-3]_auto_pwm_min min-speed duty-cycle
@@ -179,6 +203,7 @@ zone[1-3]_auto_point1_temp_hyst:
zone[1-3]_auto_point2_temp full-speed temp
zone[1-3]_auto_point1_temp low-speed temp
zone[1-3]_auto_point1_temp_hyst min-speed temp
+ =============================== =======================================
The chip adjusts the output duty-cycle linearly in the range of auto_point1_pwm
to auto_point2_pwm if the temperature of the associated zone is between
@@ -192,17 +217,21 @@ all PWM outputs are set to 100% duty-cycle.
Following is another representation of how the chip sets the output duty-cycle
based on the temperature of the associated thermal zone:
- Duty-Cycle Duty-Cycle
- Temperature Rising Temp Falling Temp
- ----------- ----------- ------------
+ =============== =============== =================
+ Temperature Duty-Cycle Duty-Cycle
+ Rising Temp Falling Temp
+ =============== =============== =================
full-speed full-speed full-speed
- < linearly adjusted duty-cycle >
+ - < linearly -
+ adjusted
+ duty-cycle >
low-speed low-speed low-speed
- min-speed low-speed
+ - min-speed low-speed
min-speed min-speed min-speed
- min-speed min-speed
+ - min-speed min-speed
+ =============== =============== =================
Sysfs Attributes
@@ -211,8 +240,9 @@ Sysfs Attributes
Following is a list of all sysfs attributes that the driver provides, their
permissions and a short description:
+=============================== ======= =======================================
Name Perm Description
----- ---- -----------
+=============================== ======= =======================================
cpu0_vid RO CPU core reference voltage in
millivolts.
vrm RW Voltage regulator module version
@@ -242,9 +272,10 @@ temp[1-3]_fault RO Temp input fault. Returns 1 if the chip
zone[1-3]_auto_channels_temp RO Temperature zone to temperature input
mapping. This attribute is a bitfield
and supports the following values:
- 1: temp1
- 2: temp2
- 4: temp3
+
+ - 1: temp1
+ - 2: temp2
+ - 4: temp3
zone[1-3]_auto_point1_temp_hyst RW Auto PWM temp point1 hysteresis. The
output of the corresponding PWM is set
to the pwm_auto_min value if the temp
@@ -275,9 +306,10 @@ pmw[1-3,5-6] RO/RW Duty-cycle of PWM output. Supported
manual mode.
pwm[1-3]_enable RW Enable of PWM outputs 1-3. Supported
values are:
- 0: turned off (output @ 100%)
- 1: manual mode
- 2: automatic mode
+
+ - 0: turned off (output @ 100%)
+ - 1: manual mode
+ - 2: automatic mode
pwm[5-6]_enable RO Enable of PWM outputs 5-6. Always
returns 1 since these 2 outputs are
hard-wired to manual mode.
@@ -294,11 +326,12 @@ pmw[1-3]_ramp_rate RW Ramp rate of PWM output. Determines how
pwm[1-3]_auto_channels_zone RW PWM output to temperature zone mapping.
This attribute is a bitfield and
supports the following values:
- 1: zone1
- 2: zone2
- 4: zone3
- 6: highest of zone[2-3]
- 7: highest of zone[1-3]
+
+ - 1: zone1
+ - 2: zone2
+ - 4: zone3
+ - 6: highest of zone[2-3]
+ - 7: highest of zone[1-3]
pwm[1-3]_auto_pwm_min RW Auto PWM min pwm. Minimum PWM duty-
cycle. Supported values are 0 or
auto_point1_pwm.
@@ -307,12 +340,14 @@ pwm[1-3]_auto_point1_pwm RW Auto PWM pwm point. Auto_point1 is the
pwm[1-3]_auto_point2_pwm RO Auto PWM pwm point. Auto_point2 is the
full-speed duty-cycle which is hard-
wired to 255 (100% duty-cycle).
+=============================== ======= =======================================
Chip Differences
----------------
+======================= ======= ======= ======= =======
Feature dme1737 sch311x sch5027 sch5127
--------------------------------------------------------
+======================= ======= ======= ======= =======
temp[1-3]_offset yes yes
vid yes
zone3 yes yes yes
@@ -326,3 +361,4 @@ pwm5 opt opt
fan6 opt opt
pwm6 opt opt
in7 yes
+======================= ======= ======= ======= =======
diff --git a/Documentation/hwmon/ds1621 b/Documentation/hwmon/ds1621.rst
index fa3407997795..552b37e9dd34 100644
--- a/Documentation/hwmon/ds1621
+++ b/Documentation/hwmon/ds1621.rst
@@ -2,42 +2,61 @@ Kernel driver ds1621
====================
Supported chips:
+
* Dallas Semiconductor / Maxim Integrated DS1621
+
Prefix: 'ds1621'
+
Addresses scanned: none
+
Datasheet: Publicly available from www.maximintegrated.com
* Dallas Semiconductor DS1625
+
Prefix: 'ds1625'
+
Addresses scanned: none
+
Datasheet: Publicly available from www.datasheetarchive.com
* Maxim Integrated DS1631
+
Prefix: 'ds1631'
+
Addresses scanned: none
+
Datasheet: Publicly available from www.maximintegrated.com
* Maxim Integrated DS1721
+
Prefix: 'ds1721'
+
Addresses scanned: none
+
Datasheet: Publicly available from www.maximintegrated.com
* Maxim Integrated DS1731
+
Prefix: 'ds1731'
+
Addresses scanned: none
+
Datasheet: Publicly available from www.maximintegrated.com
Authors:
- Christian W. Zuckschwerdt <zany@triq.net>
- valuable contributions by Jan M. Sendler <sendler@sendler.de>
- ported to 2.6 by Aurelien Jarno <aurelien@aurel32.net>
- with the help of Jean Delvare <jdelvare@suse.de>
+ - Christian W. Zuckschwerdt <zany@triq.net>
+ - valuable contributions by Jan M. Sendler <sendler@sendler.de>
+ - ported to 2.6 by Aurelien Jarno <aurelien@aurel32.net>
+ with the help of Jean Delvare <jdelvare@suse.de>
Module Parameters
------------------
* polarity int
- Output's polarity: 0 = active high, 1 = active low
+ Output's polarity:
+
+ * 0 = active high,
+ * 1 = active low
Description
-----------
@@ -87,28 +106,31 @@ are used internally, however, these flags do get set and cleared as the actual
temperature crosses the min or max settings (which by default are set to 75
and 80 degrees respectively).
-Temperature Conversion:
------------------------
-DS1621 - 750ms (older devices may take up to 1000ms)
-DS1625 - 500ms
-DS1631 - 93ms..750ms for 9..12 bits resolution, respectively.
-DS1721 - 93ms..750ms for 9..12 bits resolution, respectively.
-DS1731 - 93ms..750ms for 9..12 bits resolution, respectively.
+Temperature Conversion
+----------------------
+
+- DS1621 - 750ms (older devices may take up to 1000ms)
+- DS1625 - 500ms
+- DS1631 - 93ms..750ms for 9..12 bits resolution, respectively.
+- DS1721 - 93ms..750ms for 9..12 bits resolution, respectively.
+- DS1731 - 93ms..750ms for 9..12 bits resolution, respectively.
Note:
On the DS1621, internal access to non-volatile registers may last for 10ms
or less (unverified on the other devices).
-Temperature Accuracy:
----------------------
-DS1621: +/- 0.5 degree Celsius (from 0 to +70 degrees)
-DS1625: +/- 0.5 degree Celsius (from 0 to +70 degrees)
-DS1631: +/- 0.5 degree Celsius (from 0 to +70 degrees)
-DS1721: +/- 1.0 degree Celsius (from -10 to +85 degrees)
-DS1731: +/- 1.0 degree Celsius (from -10 to +85 degrees)
+Temperature Accuracy
+--------------------
-Note:
-Please refer to the device datasheets for accuracy at other temperatures.
+- DS1621: +/- 0.5 degree Celsius (from 0 to +70 degrees)
+- DS1625: +/- 0.5 degree Celsius (from 0 to +70 degrees)
+- DS1631: +/- 0.5 degree Celsius (from 0 to +70 degrees)
+- DS1721: +/- 1.0 degree Celsius (from -10 to +85 degrees)
+- DS1731: +/- 1.0 degree Celsius (from -10 to +85 degrees)
+
+.. Note::
+
+ Please refer to the device datasheets for accuracy at other temperatures.
Temperature Resolution:
-----------------------
@@ -117,60 +139,67 @@ support, which is achieved via the R0 and R1 config register bits, where:
R0..R1
------
- 0 0 => 9 bits, 0.5 degrees Celsius
- 1 0 => 10 bits, 0.25 degrees Celsius
- 0 1 => 11 bits, 0.125 degrees Celsius
- 1 1 => 12 bits, 0.0625 degrees Celsius
-Note:
-At initial device power-on, the default resolution is set to 12-bits.
+== == ===============================
+R0 R1
+== == ===============================
+ 0 0 9 bits, 0.5 degrees Celsius
+ 1 0 10 bits, 0.25 degrees Celsius
+ 0 1 11 bits, 0.125 degrees Celsius
+ 1 1 12 bits, 0.0625 degrees Celsius
+== == ===============================
+
+.. Note::
+
+ At initial device power-on, the default resolution is set to 12-bits.
The resolution mode for the DS1631, DS1721, or DS1731 can be changed from
userspace, via the device 'update_interval' sysfs attribute. This attribute
will normalize the range of input values to the device maximum resolution
values defined in the datasheet as follows:
+============= ================== ===============
Resolution Conversion Time Input Range
(C/LSB) (msec) (msec)
-------------------------------------------------
+============= ================== ===============
0.5 93.75 0....94
0.25 187.5 95...187
0.125 375 188..375
0.0625 750 376..infinity
-------------------------------------------------
+============= ================== ===============
The following examples show how the 'update_interval' attribute can be
-used to change the conversion time:
-
-$ cat update_interval
-750
-$ cat temp1_input
-22062
-$
-$ echo 300 > update_interval
-$ cat update_interval
-375
-$ cat temp1_input
-22125
-$
-$ echo 150 > update_interval
-$ cat update_interval
-188
-$ cat temp1_input
-22250
-$
-$ echo 1 > update_interval
-$ cat update_interval
-94
-$ cat temp1_input
-22000
-$
-$ echo 1000 > update_interval
-$ cat update_interval
-750
-$ cat temp1_input
-22062
-$
+used to change the conversion time::
+
+ $ cat update_interval
+ 750
+ $ cat temp1_input
+ 22062
+ $
+ $ echo 300 > update_interval
+ $ cat update_interval
+ 375
+ $ cat temp1_input
+ 22125
+ $
+ $ echo 150 > update_interval
+ $ cat update_interval
+ 188
+ $ cat temp1_input
+ 22250
+ $
+ $ echo 1 > update_interval
+ $ cat update_interval
+ 94
+ $ cat temp1_input
+ 22000
+ $
+ $ echo 1000 > update_interval
+ $ cat update_interval
+ 750
+ $ cat temp1_input
+ 22062
+ $
As shown, the ds1621 driver automatically adjusts the 'update_interval'
user input, via a step function. Reading back the 'update_interval' value
@@ -182,6 +211,7 @@ via the following function:
g(x) = 0.5 * [minimum_conversion_time/x]
where:
- -> 'x' = the output from 'update_interval'
- -> 'g(x)' = the resolution in degrees C per LSB.
- -> 93.75ms = minimum conversion time
+
+ - 'x' = the output from 'update_interval'
+ - 'g(x)' = the resolution in degrees C per LSB.
+ - 93.75ms = minimum conversion time
diff --git a/Documentation/hwmon/ds620 b/Documentation/hwmon/ds620.rst
index 1fbe3cd916cc..2d686b17b547 100644
--- a/Documentation/hwmon/ds620
+++ b/Documentation/hwmon/ds620.rst
@@ -2,15 +2,19 @@ Kernel driver ds620
===================
Supported chips:
+
* Dallas Semiconductor DS620
+
Prefix: 'ds620'
+
Datasheet: Publicly available at the Dallas Semiconductor website
- http://www.dalsemi.com/
+
+ http://www.dalsemi.com/
Authors:
- Roland Stigge <stigge@antcom.de>
- based on ds1621.c by
- Christian W. Zuckschwerdt <zany@triq.net>
+ Roland Stigge <stigge@antcom.de>
+ based on ds1621.c by
+ Christian W. Zuckschwerdt <zany@triq.net>
Description
-----------
diff --git a/Documentation/hwmon/emc1403 b/Documentation/hwmon/emc1403.rst
index a869b0ef6a9d..3a4913b63ef3 100644
--- a/Documentation/hwmon/emc1403
+++ b/Documentation/hwmon/emc1403.rst
@@ -2,28 +2,48 @@ Kernel driver emc1403
=====================
Supported chips:
+
* SMSC / Microchip EMC1402, EMC1412
+
Addresses scanned: I2C 0x18, 0x1c, 0x29, 0x4c, 0x4d, 0x5c
+
Prefix: 'emc1402'
+
Datasheets:
- http://ww1.microchip.com/downloads/en/DeviceDoc/1412.pdf
- http://ww1.microchip.com/downloads/en/DeviceDoc/1402.pdf
+
+ - http://ww1.microchip.com/downloads/en/DeviceDoc/1412.pdf
+ - http://ww1.microchip.com/downloads/en/DeviceDoc/1402.pdf
+
* SMSC / Microchip EMC1403, EMC1404, EMC1413, EMC1414
+
Addresses scanned: I2C 0x18, 0x29, 0x4c, 0x4d
+
Prefix: 'emc1403', 'emc1404'
+
Datasheets:
- http://ww1.microchip.com/downloads/en/DeviceDoc/1403_1404.pdf
- http://ww1.microchip.com/downloads/en/DeviceDoc/1413_1414.pdf
+
+ - http://ww1.microchip.com/downloads/en/DeviceDoc/1403_1404.pdf
+ - http://ww1.microchip.com/downloads/en/DeviceDoc/1413_1414.pdf
+
* SMSC / Microchip EMC1422
+
Addresses scanned: I2C 0x4c
+
Prefix: 'emc1422'
+
Datasheet:
- http://ww1.microchip.com/downloads/en/DeviceDoc/1422.pdf
+
+ - http://ww1.microchip.com/downloads/en/DeviceDoc/1422.pdf
+
* SMSC / Microchip EMC1423, EMC1424
+
Addresses scanned: I2C 0x4c
+
Prefix: 'emc1423', 'emc1424'
+
Datasheet:
- http://ww1.microchip.com/downloads/en/DeviceDoc/1423_1424.pdf
+
+ - http://ww1.microchip.com/downloads/en/DeviceDoc/1423_1424.pdf
Author:
Kalhan Trisal <kalhan.trisal@intel.com
@@ -46,6 +66,7 @@ difference between the limit and its hysteresis is always the same for
all three limits.
This implementation detail implies the following:
+
* When setting a limit, its hysteresis will automatically follow, the
difference staying unchanged. For example, if the old critical limit
was 80 degrees C, and the hysteresis was 75 degrees C, and you change
diff --git a/Documentation/hwmon/emc2103 b/Documentation/hwmon/emc2103.rst
index a12b2c127140..6a6ca6d1b34e 100644
--- a/Documentation/hwmon/emc2103
+++ b/Documentation/hwmon/emc2103.rst
@@ -2,13 +2,17 @@ Kernel driver emc2103
======================
Supported chips:
+
* SMSC EMC2103
+
Addresses scanned: I2C 0x2e
+
Prefix: 'emc2103'
+
Datasheet: Not public
Authors:
- Steve Glendinning <steve.glendinning@smsc.com>
+ Steve Glendinning <steve.glendinning@smsc.com>
Description
-----------
diff --git a/Documentation/hwmon/emc6w201 b/Documentation/hwmon/emc6w201.rst
index 757629b12897..a8e1185b9bb6 100644
--- a/Documentation/hwmon/emc6w201
+++ b/Documentation/hwmon/emc6w201.rst
@@ -2,9 +2,13 @@ Kernel driver emc6w201
======================
Supported chips:
+
* SMSC EMC6W201
+
Prefix: 'emc6w201'
+
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+
Datasheet: Not public
Author: Jean Delvare <jdelvare@suse.de>
@@ -38,5 +42,6 @@ Known Systems With EMC6W201
The EMC6W201 is a rare device, only found on a few systems, made in
2005 and 2006. Known systems with this device:
+
* Dell Precision 670 workstation
* Gigabyte 2CEWH mainboard
diff --git a/Documentation/hwmon/f71805f b/Documentation/hwmon/f71805f.rst
index 48a356084bc6..1efe5e5d337c 100644
--- a/Documentation/hwmon/f71805f
+++ b/Documentation/hwmon/f71805f.rst
@@ -2,17 +2,29 @@ Kernel driver f71805f
=====================
Supported chips:
+
* Fintek F71805F/FG
+
Prefix: 'f71805f'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Available from the Fintek website
+
* Fintek F71806F/FG
+
Prefix: 'f71872f'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Available from the Fintek website
+
* Fintek F71872F/FG
+
Prefix: 'f71872f'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Available from the Fintek website
Author: Jean Delvare <jdelvare@suse.de>
@@ -64,24 +76,26 @@ you can only set the limits in steps of 32 mV (before scaling).
The wirings and resistor values suggested by Fintek are as follow:
- pin expected
- name use R1 R2 divider raw val.
-
+======= ======= =========== ==== ======= ============ ==============
+in pin expected
+ name use R1 R2 divider raw val.
+======= ======= =========== ==== ======= ============ ==============
in0 VCC VCC3.3V int. int. 2.00 1.65 V
in1 VIN1 VTT1.2V 10K - 1.00 1.20 V
-in2 VIN2 VRAM 100K 100K 2.00 ~1.25 V (1)
-in3 VIN3 VCHIPSET 47K 100K 1.47 2.24 V (2)
+in2 VIN2 VRAM 100K 100K 2.00 ~1.25 V [1]_
+in3 VIN3 VCHIPSET 47K 100K 1.47 2.24 V [2]_
in4 VIN4 VCC5V 200K 47K 5.25 0.95 V
in5 VIN5 +12V 200K 20K 11.00 1.05 V
in6 VIN6 VCC1.5V 10K - 1.00 1.50 V
-in7 VIN7 VCORE 10K - 1.00 ~1.40 V (1)
+in7 VIN7 VCORE 10K - 1.00 ~1.40 V [1]_
in8 VIN8 VSB5V 200K 47K 1.00 0.95 V
-in10 VSB VSB3.3V int. int. 2.00 1.65 V (3)
-in9 VBAT VBATTERY int. int. 2.00 1.50 V (3)
+in10 VSB VSB3.3V int. int. 2.00 1.65 V [3]_
+in9 VBAT VBATTERY int. int. 2.00 1.50 V [3]_
+======= ======= =========== ==== ======= ============ ==============
-(1) Depends on your hardware setup.
-(2) Obviously not correct, swapping R1 and R2 would make more sense.
-(3) F71872F/FG only.
+.. [1] Depends on your hardware setup.
+.. [2] Obviously not correct, swapping R1 and R2 would make more sense.
+.. [3] F71872F/FG only.
These values can be used as hints at best, as motherboard manufacturers
are free to use a completely different setup. As a matter of fact, the
diff --git a/Documentation/hwmon/f71882fg b/Documentation/hwmon/f71882fg.rst
index de91c0db5846..5c0b7b0db150 100644
--- a/Documentation/hwmon/f71882fg
+++ b/Documentation/hwmon/f71882fg.rst
@@ -2,60 +2,114 @@ Kernel driver f71882fg
======================
Supported chips:
+
* Fintek F71808E
+
Prefix: 'f71808e'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Not public
+
* Fintek F71808A
+
Prefix: 'f71808a'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Not public
+
* Fintek F71858FG
+
Prefix: 'f71858fg'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Available from the Fintek website
+
* Fintek F71862FG and F71863FG
+
Prefix: 'f71862fg'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Available from the Fintek website
+
* Fintek F71869F and F71869E
+
Prefix: 'f71869'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Available from the Fintek website
+
* Fintek F71869A
+
Prefix: 'f71869a'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Not public
+
* Fintek F71882FG and F71883FG
+
Prefix: 'f71882fg'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Available from the Fintek website
+
* Fintek F71889FG
+
Prefix: 'f71889fg'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Available from the Fintek website
+
* Fintek F71889ED
+
Prefix: 'f71889ed'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Should become available on the Fintek website soon
+
* Fintek F71889A
+
Prefix: 'f71889a'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Should become available on the Fintek website soon
+
* Fintek F8000
+
Prefix: 'f8000'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Not public
+
* Fintek F81801U
+
Prefix: 'f71889fg'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Not public
- Note: This is the 64-pin variant of the F71889FG, they have the
+
+ Note:
+ This is the 64-pin variant of the F71889FG, they have the
same device ID and are fully compatible as far as hardware
monitoring is concerned.
+
* Fintek F81865F
+
Prefix: 'f81865f'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Available from the Fintek website
Author: Hans de Goede <hdegoede@redhat.com>
@@ -94,7 +148,7 @@ Note that the lowest numbered temperature zone trip point corresponds to
to the border between the highest and one but highest temperature zones, and
vica versa. So the temperature zone trip points 1-4 (or 1-2) go from high temp
to low temp! This is how things are implemented in the IC, and the driver
-mimicks this.
+mimics this.
There are 2 modes to specify the speed of the fan, PWM duty cycle (or DC
voltage) mode, where 0-100% duty cycle (0-100% of 12V) is specified. And RPM
diff --git a/Documentation/hwmon/fam15h_power b/Documentation/hwmon/fam15h_power.rst
index fb594c281c46..fdde632c93a3 100644
--- a/Documentation/hwmon/fam15h_power
+++ b/Documentation/hwmon/fam15h_power.rst
@@ -2,15 +2,20 @@ Kernel driver fam15h_power
==========================
Supported chips:
+
* AMD Family 15h Processors
+
* AMD Family 16h Processors
Prefix: 'fam15h_power'
+
Addresses scanned: PCI space
+
Datasheets:
- BIOS and Kernel Developer's Guide (BKDG) For AMD Family 15h Processors
- BIOS and Kernel Developer's Guide (BKDG) For AMD Family 16h Processors
- AMD64 Architecture Programmer's Manual Volume 2: System Programming
+
+ - BIOS and Kernel Developer's Guide (BKDG) For AMD Family 15h Processors
+ - BIOS and Kernel Developer's Guide (BKDG) For AMD Family 16h Processors
+ - AMD64 Architecture Programmer's Manual Volume 2: System Programming
Author: Andreas Herrmann <herrmann.der.user@googlemail.com>
@@ -31,14 +36,19 @@ For AMD Family 15h and 16h processors the following power values can
be calculated using different processor northbridge function
registers:
-* BasePwrWatts: Specifies in watts the maximum amount of power
- consumed by the processor for NB and logic external to the core.
-* ProcessorPwrWatts: Specifies in watts the maximum amount of power
- the processor can support.
-* CurrPwrWatts: Specifies in watts the current amount of power being
- consumed by the processor.
+* BasePwrWatts:
+ Specifies in watts the maximum amount of power
+ consumed by the processor for NB and logic external to the core.
+
+* ProcessorPwrWatts:
+ Specifies in watts the maximum amount of power
+ the processor can support.
+* CurrPwrWatts:
+ Specifies in watts the current amount of power being
+ consumed by the processor.
This driver provides ProcessorPwrWatts and CurrPwrWatts:
+
* power1_crit (ProcessorPwrWatts)
* power1_input (CurrPwrWatts)
@@ -53,35 +63,53 @@ calculate the average power consumed by a processor during a
measurement interval Tm. The feature of accumulated power mechanism is
indicated by CPUID Fn8000_0007_EDX[12].
-* Tsample: compute unit power accumulator sample period
-* Tref: the PTSC counter period
-* PTSC: performance timestamp counter
-* N: the ratio of compute unit power accumulator sample period to the
- PTSC period
-* Jmax: max compute unit accumulated power which is indicated by
- MaxCpuSwPwrAcc MSR C001007b
-* Jx/Jy: compute unit accumulated power which is indicated by
- CpuSwPwrAcc MSR C001007a
-* Tx/Ty: the value of performance timestamp counter which is indicated
- by CU_PTSC MSR C0010280
-* PwrCPUave: CPU average power
+* Tsample:
+ compute unit power accumulator sample period
+
+* Tref:
+ the PTSC counter period
+
+* PTSC:
+ performance timestamp counter
+
+* N:
+ the ratio of compute unit power accumulator sample period to the
+ PTSC period
+
+* Jmax:
+ max compute unit accumulated power which is indicated by
+ MaxCpuSwPwrAcc MSR C001007b
+
+* Jx/Jy:
+ compute unit accumulated power which is indicated by
+ CpuSwPwrAcc MSR C001007a
+* Tx/Ty:
+ the value of performance timestamp counter which is indicated
+ by CU_PTSC MSR C0010280
+
+* PwrCPUave:
+ CPU average power
i. Determine the ratio of Tsample to Tref by executing CPUID Fn8000_0007.
+
N = value of CPUID Fn8000_0007_ECX[CpuPwrSampleTimeRatio[15:0]].
ii. Read the full range of the cumulative energy value from the new
-MSR MaxCpuSwPwrAcc.
+ MSR MaxCpuSwPwrAcc.
+
Jmax = value returned.
+
iii. At time x, SW reads CpuSwPwrAcc MSR and samples the PTSC.
- Jx = value read from CpuSwPwrAcc and Tx = value read from
-PTSC.
+
+ Jx = value read from CpuSwPwrAcc and Tx = value read from PTSC.
iv. At time y, SW reads CpuSwPwrAcc MSR and samples the PTSC.
- Jy = value read from CpuSwPwrAcc and Ty = value read from
-PTSC.
+
+ Jy = value read from CpuSwPwrAcc and Ty = value read from PTSC.
v. Calculate the average power consumption for a compute unit over
-time period (y-x). Unit of result is uWatt.
+ time period (y-x). Unit of result is uWatt::
+
if (Jy < Jx) // Rollover has occurred
Jdelta = (Jy + Jmax) - Jx
else
@@ -90,13 +118,14 @@ time period (y-x). Unit of result is uWatt.
This driver provides PwrCPUave and interval(default is 10 millisecond
and maximum is 1 second):
+
* power1_average (PwrCPUave)
* power1_average_interval (Interval)
The power1_average_interval can be updated at /etc/sensors3.conf file
as below:
-chip "fam15h_power-*"
+chip `fam15h_power-*`
set power1_average_interval 0.01
Then save it with "sensors -s".
diff --git a/Documentation/hwmon/ftsteutates b/Documentation/hwmon/ftsteutates.rst
index af54db92391b..58a2483d8d0d 100644
--- a/Documentation/hwmon/ftsteutates
+++ b/Documentation/hwmon/ftsteutates.rst
@@ -1,9 +1,12 @@
Kernel driver ftsteutates
-=====================
+=========================
Supported chips:
+
* FTS Teutates
+
Prefix: 'ftsteutates'
+
Addresses scanned: I2C 0x73 (7-Bit)
Author: Thilo Cestonaro <thilo.cestonaro@ts.fujitsu.com>
@@ -11,6 +14,7 @@ Author: Thilo Cestonaro <thilo.cestonaro@ts.fujitsu.com>
Description
-----------
+
The BMC Teutates is the Eleventh generation of Superior System
monitoring and thermal management solution. It is builds on the basic
functionality of the BMC Theseus and contains several new features and
@@ -19,9 +23,11 @@ enhancements. It can monitor up to 4 voltages, 16 temperatures and
implemented in this driver.
To clear a temperature or fan alarm, execute the following command with the
-correct path to the alarm file:
+correct path to the alarm file::
+
echo 0 >XXXX_alarm
Specification of the chip can be found here:
-ftp://ftp.ts.fujitsu.com/pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/BMC-Teutates_Specification_V1.21.pdf
-ftp://ftp.ts.fujitsu.com/pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/Fujitsu_mainboards-1-Sensors_HowTo-en-US.pdf
+
+- ftp://ftp.ts.fujitsu.com/pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/BMC-Teutates_Specification_V1.21.pdf
+- ftp://ftp.ts.fujitsu.com/pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/Fujitsu_mainboards-1-Sensors_HowTo-en-US.pdf
diff --git a/Documentation/hwmon/g760a b/Documentation/hwmon/g760a.rst
index cfc894537061..d82952cc8319 100644
--- a/Documentation/hwmon/g760a
+++ b/Documentation/hwmon/g760a.rst
@@ -2,9 +2,13 @@ Kernel driver g760a
===================
Supported chips:
+
* Global Mixed-mode Technology Inc. G760A
+
Prefix: 'g760a'
+
Datasheet: Publicly available at the GMT website
+
http://www.gmt.com.tw/product/datasheet/EDS-760A.pdf
Author: Herbert Valerio Riedel <hvr@gnu.org>
diff --git a/Documentation/hwmon/g762 b/Documentation/hwmon/g762.rst
index 923db9c5b5bc..0371b3365c48 100644
--- a/Documentation/hwmon/g762
+++ b/Documentation/hwmon/g762.rst
@@ -7,7 +7,7 @@ modes - PWM or DC - are supported by the device.
For additional information, a detailed datasheet is available at
http://natisbad.org/NAS/ref/GMT_EDS-762_763-080710-0.2.pdf. sysfs
-bindings are described in Documentation/hwmon/sysfs-interface.
+bindings are described in Documentation/hwmon/sysfs-interface.rst.
The following entries are available to the user in a subdirectory of
/sys/bus/i2c/drivers/g762/ to control the operation of the device.
@@ -21,34 +21,43 @@ documented in Documentation/devicetree/bindings/hwmon/g762.txt or
using a specific platform_data structure in board initialization
file (see include/linux/platform_data/g762.h).
- fan1_target: set desired fan speed. This only makes sense in closed-loop
- fan speed control (i.e. when pwm1_enable is set to 2).
+ fan1_target:
+ set desired fan speed. This only makes sense in closed-loop
+ fan speed control (i.e. when pwm1_enable is set to 2).
- fan1_input: provide current fan rotation value in RPM as reported by
- the fan to the device.
+ fan1_input:
+ provide current fan rotation value in RPM as reported by
+ the fan to the device.
- fan1_div: fan clock divisor. Supported value are 1, 2, 4 and 8.
+ fan1_div:
+ fan clock divisor. Supported value are 1, 2, 4 and 8.
- fan1_pulses: number of pulses per fan revolution. Supported values
- are 2 and 4.
+ fan1_pulses:
+ number of pulses per fan revolution. Supported values
+ are 2 and 4.
- fan1_fault: reports fan failure, i.e. no transition on fan gear pin for
- about 0.7s (if the fan is not voluntarily set off).
+ fan1_fault:
+ reports fan failure, i.e. no transition on fan gear pin for
+ about 0.7s (if the fan is not voluntarily set off).
- fan1_alarm: in closed-loop control mode, if fan RPM value is 25% out
- of the programmed value for over 6 seconds 'fan1_alarm' is
- set to 1.
+ fan1_alarm:
+ in closed-loop control mode, if fan RPM value is 25% out
+ of the programmed value for over 6 seconds 'fan1_alarm' is
+ set to 1.
- pwm1_enable: set current fan speed control mode i.e. 1 for manual fan
- speed control (open-loop) via pwm1 described below, 2 for
- automatic fan speed control (closed-loop) via fan1_target
- above.
+ pwm1_enable:
+ set current fan speed control mode i.e. 1 for manual fan
+ speed control (open-loop) via pwm1 described below, 2 for
+ automatic fan speed control (closed-loop) via fan1_target
+ above.
- pwm1_mode: set or get fan driving mode: 1 for PWM mode, 0 for DC mode.
+ pwm1_mode:
+ set or get fan driving mode: 1 for PWM mode, 0 for DC mode.
- pwm1: get or set PWM fan control value in open-loop mode. This is an
- integer value between 0 and 255. 0 stops the fan, 255 makes
- it run at full speed.
+ pwm1:
+ get or set PWM fan control value in open-loop mode. This is an
+ integer value between 0 and 255. 0 stops the fan, 255 makes
+ it run at full speed.
Both in PWM mode ('pwm1_mode' set to 1) and DC mode ('pwm1_mode' set to 0),
when current fan speed control mode is open-loop ('pwm1_enable' set to 1),
diff --git a/Documentation/hwmon/gl518sm b/Documentation/hwmon/gl518sm.rst
index 494bb55b6e72..bf1e0b5e824b 100644
--- a/Documentation/hwmon/gl518sm
+++ b/Documentation/hwmon/gl518sm.rst
@@ -2,27 +2,34 @@ Kernel driver gl518sm
=====================
Supported chips:
+
* Genesys Logic GL518SM release 0x00
+
Prefix: 'gl518sm'
+
Addresses scanned: I2C 0x2c and 0x2d
+
* Genesys Logic GL518SM release 0x80
+
Prefix: 'gl518sm'
+
Addresses scanned: I2C 0x2c and 0x2d
+
Datasheet: http://www.genesyslogic.com/
Authors:
- Frodo Looijaard <frodol@dds.nl>,
- Kyösti Mälkki <kmalkki@cc.hut.fi>
- Hong-Gunn Chew <hglinux@gunnet.org>
- Jean Delvare <jdelvare@suse.de>
+ - Frodo Looijaard <frodol@dds.nl>,
+ - Kyösti Mälkki <kmalkki@cc.hut.fi>
+ - Hong-Gunn Chew <hglinux@gunnet.org>
+ - Jean Delvare <jdelvare@suse.de>
Description
-----------
-IMPORTANT:
+.. important::
-For the revision 0x00 chip, the in0, in1, and in2 values (+5V, +3V,
-and +12V) CANNOT be read. This is a limitation of the chip, not the driver.
+ For the revision 0x00 chip, the in0, in1, and in2 values (+5V, +3V,
+ and +12V) CANNOT be read. This is a limitation of the chip, not the driver.
This driver supports the Genesys Logic GL518SM chip. There are at least
two revision of this chip, which we call revision 0x00 and 0x80. Revision
diff --git a/Documentation/hwmon/hih6130 b/Documentation/hwmon/hih6130.rst
index 73dae918ea7b..649bd4be4fc2 100644
--- a/Documentation/hwmon/hih6130
+++ b/Documentation/hwmon/hih6130.rst
@@ -2,11 +2,16 @@ Kernel driver hih6130
=====================
Supported chips:
+
* Honeywell HIH-6130 / HIH-6131
+
Prefix: 'hih6130'
+
Addresses scanned: none
+
Datasheet: Publicly available at the Honeywell website
- http://sensing.honeywell.com/index.php?ci_id=3106&la_id=1&defId=44872
+
+ http://sensing.honeywell.com/index.php?ci_id=3106&la_id=1&defId=44872
Author:
Iain Paton <ipaton0@gmail.com>
@@ -28,8 +33,11 @@ instantiate I2C devices.
sysfs-Interface
---------------
-temp1_input - temperature input
-humidity1_input - humidity input
+temp1_input
+ temperature input
+
+humidity1_input
+ humidity input
Notes
-----
diff --git a/Documentation/hwmon/hwmon-kernel-api.txt b/Documentation/hwmon/hwmon-kernel-api.rst
index 8bdefb41be30..c41eb6108103 100644
--- a/Documentation/hwmon/hwmon-kernel-api.txt
+++ b/Documentation/hwmon/hwmon-kernel-api.rst
@@ -1,5 +1,5 @@
-The Linux Hardware Monitoring kernel API.
-=========================================
+The Linux Hardware Monitoring kernel API
+========================================
Guenter Roeck
@@ -12,42 +12,43 @@ drivers that want to use the hardware monitoring framework.
This document does not describe what a hardware monitoring (hwmon) Driver or
Device is. It also does not describe the API which can be used by user space
to communicate with a hardware monitoring device. If you want to know this
-then please read the following file: Documentation/hwmon/sysfs-interface.
+then please read the following file: Documentation/hwmon/sysfs-interface.rst.
For additional guidelines on how to write and improve hwmon drivers, please
-also read Documentation/hwmon/submitting-patches.
+also read Documentation/hwmon/submitting-patches.rst.
The API
-------
Each hardware monitoring driver must #include <linux/hwmon.h> and, in most
cases, <linux/hwmon-sysfs.h>. linux/hwmon.h declares the following
-register/unregister functions:
-
-struct device *
-hwmon_device_register_with_groups(struct device *dev, const char *name,
- void *drvdata,
- const struct attribute_group **groups);
-
-struct device *
-devm_hwmon_device_register_with_groups(struct device *dev,
- const char *name, void *drvdata,
- const struct attribute_group **groups);
-
-struct device *
-hwmon_device_register_with_info(struct device *dev,
- const char *name, void *drvdata,
- const struct hwmon_chip_info *info,
- const struct attribute_group **extra_groups);
-
-struct device *
-devm_hwmon_device_register_with_info(struct device *dev,
- const char *name,
- void *drvdata,
- const struct hwmon_chip_info *info,
- const struct attribute_group **extra_groups);
-
-void hwmon_device_unregister(struct device *dev);
-void devm_hwmon_device_unregister(struct device *dev);
+register/unregister functions::
+
+ struct device *
+ hwmon_device_register_with_groups(struct device *dev, const char *name,
+ void *drvdata,
+ const struct attribute_group **groups);
+
+ struct device *
+ devm_hwmon_device_register_with_groups(struct device *dev,
+ const char *name, void *drvdata,
+ const struct attribute_group **groups);
+
+ struct device *
+ hwmon_device_register_with_info(struct device *dev,
+ const char *name, void *drvdata,
+ const struct hwmon_chip_info *info,
+ const struct attribute_group **extra_groups);
+
+ struct device *
+ devm_hwmon_device_register_with_info(struct device *dev,
+ const char *name,
+ void *drvdata,
+ const struct hwmon_chip_info *info,
+ const struct attribute_group **extra_groups);
+
+ void hwmon_device_unregister(struct device *dev);
+
+ void devm_hwmon_device_unregister(struct device *dev);
hwmon_device_register_with_groups registers a hardware monitoring device.
The first parameter of this function is a pointer to the parent device.
@@ -100,78 +101,89 @@ Using devm_hwmon_device_register_with_info()
hwmon_device_register_with_info() registers a hardware monitoring device.
The parameters to this function are
-struct device *dev Pointer to parent device
-const char *name Device name
-void *drvdata Driver private data
-const struct hwmon_chip_info *info
- Pointer to chip description.
-const struct attribute_group **extra_groups
- Null-terminated list of additional non-standard
- sysfs attribute groups.
+=============================================== ===============================================
+`struct device *dev` Pointer to parent device
+`const char *name` Device name
+`void *drvdata` Driver private data
+`const struct hwmon_chip_info *info` Pointer to chip description.
+`const struct attribute_group **extra_groups` Null-terminated list of additional non-standard
+ sysfs attribute groups.
+=============================================== ===============================================
This function returns a pointer to the created hardware monitoring device
on success and a negative error code for failure.
-The hwmon_chip_info structure looks as follows.
+The hwmon_chip_info structure looks as follows::
-struct hwmon_chip_info {
- const struct hwmon_ops *ops;
- const struct hwmon_channel_info **info;
-};
+ struct hwmon_chip_info {
+ const struct hwmon_ops *ops;
+ const struct hwmon_channel_info **info;
+ };
It contains the following fields:
-* ops: Pointer to device operations.
-* info: NULL-terminated list of device channel descriptors.
+* ops:
+ Pointer to device operations.
+* info:
+ NULL-terminated list of device channel descriptors.
-The list of hwmon operations is defined as:
+The list of hwmon operations is defined as::
-struct hwmon_ops {
+ struct hwmon_ops {
umode_t (*is_visible)(const void *, enum hwmon_sensor_types type,
u32 attr, int);
int (*read)(struct device *, enum hwmon_sensor_types type,
u32 attr, int, long *);
int (*write)(struct device *, enum hwmon_sensor_types type,
u32 attr, int, long);
-};
+ };
It defines the following operations.
-* is_visible: Pointer to a function to return the file mode for each supported
- attribute. This function is mandatory.
+* is_visible:
+ Pointer to a function to return the file mode for each supported
+ attribute. This function is mandatory.
-* read: Pointer to a function for reading a value from the chip. This function
- is optional, but must be provided if any readable attributes exist.
+* read:
+ Pointer to a function for reading a value from the chip. This function
+ is optional, but must be provided if any readable attributes exist.
-* write: Pointer to a function for writing a value to the chip. This function is
- optional, but must be provided if any writeable attributes exist.
+* write:
+ Pointer to a function for writing a value to the chip. This function is
+ optional, but must be provided if any writeable attributes exist.
Each sensor channel is described with struct hwmon_channel_info, which is
-defined as follows.
+defined as follows::
-struct hwmon_channel_info {
- enum hwmon_sensor_types type;
- u32 *config;
-};
+ struct hwmon_channel_info {
+ enum hwmon_sensor_types type;
+ u32 *config;
+ };
It contains following fields:
-* type: The hardware monitoring sensor type.
- Supported sensor types are
- * hwmon_chip A virtual sensor type, used to describe attributes
- * which are not bound to a specific input or output
- * hwmon_temp Temperature sensor
- * hwmon_in Voltage sensor
- * hwmon_curr Current sensor
- * hwmon_power Power sensor
- * hwmon_energy Energy sensor
- * hwmon_humidity Humidity sensor
- * hwmon_fan Fan speed sensor
- * hwmon_pwm PWM control
-
-* config: Pointer to a 0-terminated list of configuration values for each
- sensor of the given type. Each value is a combination of bit values
- describing the attributes supposed by a single sensor.
+* type:
+ The hardware monitoring sensor type.
+
+ Supported sensor types are
+
+ ================== ==================================================
+ hwmon_chip A virtual sensor type, used to describe attributes
+ which are not bound to a specific input or output
+ hwmon_temp Temperature sensor
+ hwmon_in Voltage sensor
+ hwmon_curr Current sensor
+ hwmon_power Power sensor
+ hwmon_energy Energy sensor
+ hwmon_humidity Humidity sensor
+ hwmon_fan Fan speed sensor
+ hwmon_pwm PWM control
+ ================== ==================================================
+
+* config:
+ Pointer to a 0-terminated list of configuration values for each
+ sensor of the given type. Each value is a combination of bit values
+ describing the attributes supposed by a single sensor.
As an example, here is the complete description file for a LM75 compatible
sensor chip. The chip has a single temperature sensor. The driver wants to
@@ -179,48 +191,62 @@ register with the thermal subsystem (HWMON_C_REGISTER_TZ), and it supports
the update_interval attribute (HWMON_C_UPDATE_INTERVAL). The chip supports
reading the temperature (HWMON_T_INPUT), it has a maximum temperature
register (HWMON_T_MAX) as well as a maximum temperature hysteresis register
-(HWMON_T_MAX_HYST).
-
-static const u32 lm75_chip_config[] = {
- HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL,
- 0
-};
-
-static const struct hwmon_channel_info lm75_chip = {
- .type = hwmon_chip,
- .config = lm75_chip_config,
-};
-
-static const u32 lm75_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST,
- 0
-};
-
-static const struct hwmon_channel_info lm75_temp = {
- .type = hwmon_temp,
- .config = lm75_temp_config,
-};
-
-static const struct hwmon_channel_info *lm75_info[] = {
- &lm75_chip,
- &lm75_temp,
- NULL
-};
-
-static const struct hwmon_ops lm75_hwmon_ops = {
- .is_visible = lm75_is_visible,
- .read = lm75_read,
- .write = lm75_write,
-};
-
-static const struct hwmon_chip_info lm75_chip_info = {
- .ops = &lm75_hwmon_ops,
- .info = lm75_info,
-};
+(HWMON_T_MAX_HYST)::
+
+ static const u32 lm75_chip_config[] = {
+ HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL,
+ 0
+ };
+
+ static const struct hwmon_channel_info lm75_chip = {
+ .type = hwmon_chip,
+ .config = lm75_chip_config,
+ };
+
+ static const u32 lm75_temp_config[] = {
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST,
+ 0
+ };
+
+ static const struct hwmon_channel_info lm75_temp = {
+ .type = hwmon_temp,
+ .config = lm75_temp_config,
+ };
+
+ static const struct hwmon_channel_info *lm75_info[] = {
+ &lm75_chip,
+ &lm75_temp,
+ NULL
+ };
+
+ The HWMON_CHANNEL_INFO() macro can and should be used when possible.
+ With this macro, the above example can be simplified to
+
+ static const struct hwmon_channel_info *lm75_info[] = {
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST),
+ NULL
+ };
+
+ The remaining declarations are as follows.
+
+ static const struct hwmon_ops lm75_hwmon_ops = {
+ .is_visible = lm75_is_visible,
+ .read = lm75_read,
+ .write = lm75_write,
+ };
+
+ static const struct hwmon_chip_info lm75_chip_info = {
+ .ops = &lm75_hwmon_ops,
+ .info = lm75_info,
+ };
A complete list of bit values indicating individual attribute support
is defined in include/linux/hwmon.h. Definition prefixes are as follows.
+=============== =================================================
HWMON_C_xxxx Chip attributes, for use with hwmon_chip.
HWMON_T_xxxx Temperature attributes, for use with hwmon_temp.
HWMON_I_xxxx Voltage attributes, for use with hwmon_in.
@@ -231,57 +257,76 @@ HWMON_E_xxxx Energy attributes, for use with hwmon_energy.
HWMON_H_xxxx Humidity attributes, for use with hwmon_humidity.
HWMON_F_xxxx Fan speed attributes, for use with hwmon_fan.
HWMON_PWM_xxxx PWM control attributes, for use with hwmon_pwm.
+=============== =================================================
Driver callback functions
-------------------------
Each driver provides is_visible, read, and write functions. Parameters
-and return values for those functions are as follows.
+and return values for those functions are as follows::
-umode_t is_visible_func(const void *data, enum hwmon_sensor_types type,
- u32 attr, int channel)
+ umode_t is_visible_func(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
Parameters:
- data: Pointer to device private data structure.
- type: The sensor type.
- attr: Attribute identifier associated with a specific attribute.
+ data:
+ Pointer to device private data structure.
+ type:
+ The sensor type.
+ attr:
+ Attribute identifier associated with a specific attribute.
For example, the attribute value for HWMON_T_INPUT would be
hwmon_temp_input. For complete mappings of bit fields to
attribute values please see include/linux/hwmon.h.
- channel:The sensor channel number.
+ channel:
+ The sensor channel number.
Return value:
The file mode for this attribute. Typically, this will be 0 (the
attribute will not be created), S_IRUGO, or 'S_IRUGO | S_IWUSR'.
-int read_func(struct device *dev, enum hwmon_sensor_types type,
- u32 attr, int channel, long *val)
+::
+
+ int read_func(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
Parameters:
- dev: Pointer to the hardware monitoring device.
- type: The sensor type.
- attr: Attribute identifier associated with a specific attribute.
+ dev:
+ Pointer to the hardware monitoring device.
+ type:
+ The sensor type.
+ attr:
+ Attribute identifier associated with a specific attribute.
For example, the attribute value for HWMON_T_INPUT would be
hwmon_temp_input. For complete mappings please see
include/linux/hwmon.h.
- channel:The sensor channel number.
- val: Pointer to attribute value.
+ channel:
+ The sensor channel number.
+ val:
+ Pointer to attribute value.
Return value:
0 on success, a negative error number otherwise.
-int write_func(struct device *dev, enum hwmon_sensor_types type,
- u32 attr, int channel, long val)
+::
+
+ int write_func(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
Parameters:
- dev: Pointer to the hardware monitoring device.
- type: The sensor type.
- attr: Attribute identifier associated with a specific attribute.
+ dev:
+ Pointer to the hardware monitoring device.
+ type:
+ The sensor type.
+ attr:
+ Attribute identifier associated with a specific attribute.
For example, the attribute value for HWMON_T_INPUT would be
hwmon_temp_input. For complete mappings please see
include/linux/hwmon.h.
- channel:The sensor channel number.
- val: The value to write to the chip.
+ channel:
+ The sensor channel number.
+ val:
+ The value to write to the chip.
Return value:
0 on success, a negative error number otherwise.
@@ -317,25 +362,25 @@ Standard functions, similar to DEVICE_ATTR_{RW,RO,WO}, have _show and _store
appended to the provided function name.
SENSOR_DEVICE_ATTR and its variants define a struct sensor_device_attribute
-variable. This structure has the following fields.
+variable. This structure has the following fields::
-struct sensor_device_attribute {
- struct device_attribute dev_attr;
- int index;
-};
+ struct sensor_device_attribute {
+ struct device_attribute dev_attr;
+ int index;
+ };
You can use to_sensor_dev_attr to get the pointer to this structure from the
attribute read or write function. Its parameter is the device to which the
attribute is attached.
SENSOR_DEVICE_ATTR_2 and its variants define a struct sensor_device_attribute_2
-variable, which is defined as follows.
+variable, which is defined as follows::
-struct sensor_device_attribute_2 {
- struct device_attribute dev_attr;
- u8 index;
- u8 nr;
-};
+ struct sensor_device_attribute_2 {
+ struct device_attribute dev_attr;
+ u8 index;
+ u8 nr;
+ };
Use to_sensor_dev_attr_2 to get the pointer to this structure. Its parameter
is the device to which the attribute is attached.
diff --git a/Documentation/hwmon/ibm-cffps b/Documentation/hwmon/ibm-cffps.rst
index e05ecd8ecfcf..52e74e39463a 100644
--- a/Documentation/hwmon/ibm-cffps
+++ b/Documentation/hwmon/ibm-cffps.rst
@@ -2,6 +2,7 @@ Kernel driver ibm-cffps
=======================
Supported chips:
+
* IBM Common Form Factor power supply
Author: Eddie James <eajames@us.ibm.com>
@@ -24,6 +25,7 @@ Sysfs entries
The following attributes are supported:
+======================= ======================================================
curr1_alarm Output current over-current alarm.
curr1_input Measured output current in mA.
curr1_label "iout1"
@@ -52,3 +54,4 @@ temp2_alarm Secondary rectifier temp over-temperature alarm.
temp2_input Measured secondary rectifier temp in millidegrees C.
temp3_alarm ORing FET temperature over-temperature alarm.
temp3_input Measured ORing FET temperature in millidegrees C.
+======================= ======================================================
diff --git a/Documentation/hwmon/ibmaem b/Documentation/hwmon/ibmaem.rst
index 1e0d59e000b4..f07a14a1c2f5 100644
--- a/Documentation/hwmon/ibmaem
+++ b/Documentation/hwmon/ibmaem.rst
@@ -1,15 +1,21 @@
Kernel driver ibmaem
-======================
+====================
This driver talks to the IBM Systems Director Active Energy Manager, known
henceforth as AEM.
Supported systems:
+
* Any recent IBM System X server with AEM support.
+
This includes the x3350, x3550, x3650, x3655, x3755, x3850 M2,
- x3950 M2, and certain HC10/HS2x/LS2x/QS2x blades. The IPMI host interface
+ x3950 M2, and certain HC10/HS2x/LS2x/QS2x blades.
+
+ The IPMI host interface
driver ("ipmi-si") needs to be loaded for this driver to do anything.
+
Prefix: 'ibmaem'
+
Datasheet: Not available
Author: Darrick J. Wong
diff --git a/Documentation/hwmon/ibmpowernv b/Documentation/hwmon/ibmpowernv.rst
index 56468258711f..5d642bc3dec0 100644
--- a/Documentation/hwmon/ibmpowernv
+++ b/Documentation/hwmon/ibmpowernv.rst
@@ -2,6 +2,7 @@ Kernel Driver IBMPOWERNV
========================
Supported systems:
+
* Any recent IBM P servers based on POWERNV platform
Author: Neelesh Gupta
@@ -29,10 +30,11 @@ CONFIG_SENSORS_IBMPOWERNV. It can also be built as module 'ibmpowernv'.
Sysfs attributes
----------------
+======================= =======================================================
fanX_input Measured RPM value.
fanX_min Threshold RPM for alert generation.
-fanX_fault 0: No fail condition
- 1: Failing fan
+fanX_fault - 0: No fail condition
+ - 1: Failing fan
tempX_input Measured ambient temperature.
tempX_max Threshold ambient temperature for alert generation.
@@ -42,20 +44,22 @@ tempX_enable Enable/disable all temperature sensors belonging to the
sub-group. In POWER9, this attribute corresponds to
each OCC. Using this attribute each OCC can be asked to
disable/enable all of its temperature sensors.
- 1: Enable
- 0: Disable
+
+ - 1: Enable
+ - 0: Disable
inX_input Measured power supply voltage (millivolt)
-inX_fault 0: No fail condition.
- 1: Failing power supply.
+inX_fault - 0: No fail condition.
+ - 1: Failing power supply.
inX_highest Historical maximum voltage
inX_lowest Historical minimum voltage
inX_enable Enable/disable all voltage sensors belonging to the
sub-group. In POWER9, this attribute corresponds to
each OCC. Using this attribute each OCC can be asked to
disable/enable all of its voltage sensors.
- 1: Enable
- 0: Disable
+
+ - 1: Enable
+ - 0: Disable
powerX_input Power consumption (microWatt)
powerX_input_highest Historical maximum power
@@ -64,8 +68,9 @@ powerX_enable Enable/disable all power sensors belonging to the
sub-group. In POWER9, this attribute corresponds to
each OCC. Using this attribute each OCC can be asked to
disable/enable all of its power sensors.
- 1: Enable
- 0: Disable
+
+ - 1: Enable
+ - 0: Disable
currX_input Measured current (milliampere)
currX_highest Historical maximum current
@@ -74,7 +79,9 @@ currX_enable Enable/disable all current sensors belonging to the
sub-group. In POWER9, this attribute corresponds to
each OCC. Using this attribute each OCC can be asked to
disable/enable all of its current sensors.
- 1: Enable
- 0: Disable
+
+ - 1: Enable
+ - 0: Disable
energyX_input Cumulative energy (microJoule)
+======================= =======================================================
diff --git a/Documentation/hwmon/ina209 b/Documentation/hwmon/ina209.rst
index 672501de4509..64322075a145 100644
--- a/Documentation/hwmon/ina209
+++ b/Documentation/hwmon/ina209.rst
@@ -1,16 +1,21 @@
Kernel driver ina209
-=====================
+====================
Supported chips:
+
* Burr-Brown / Texas Instruments INA209
+
Prefix: 'ina209'
+
Addresses scanned: -
+
Datasheet:
- http://www.ti.com/lit/gpn/ina209
+ http://www.ti.com/lit/gpn/ina209
-Author: Paul Hays <Paul.Hays@cattail.ca>
-Author: Ira W. Snyder <iws@ovro.caltech.edu>
-Author: Guenter Roeck <linux@roeck-us.net>
+Author:
+ - Paul Hays <Paul.Hays@cattail.ca>
+ - Ira W. Snyder <iws@ovro.caltech.edu>
+ - Guenter Roeck <linux@roeck-us.net>
Description
@@ -31,7 +36,7 @@ the I2C bus. See the datasheet for details.
This tries to expose most monitoring features of the hardware via
sysfs. It does not support every feature of this chip.
-
+======================= =======================================================
in0_input shunt voltage (mV)
in0_input_highest shunt voltage historical maximum reading (mV)
in0_input_lowest shunt voltage historical minimum reading (mV)
@@ -70,6 +75,7 @@ curr1_input current measurement (mA)
update_interval data conversion time; affects number of samples used
to average results for shunt and bus voltages.
+======================= =======================================================
General Remarks
---------------
diff --git a/Documentation/hwmon/ina2xx b/Documentation/hwmon/ina2xx.rst
index 0f36c021192d..94b9a260c518 100644
--- a/Documentation/hwmon/ina2xx
+++ b/Documentation/hwmon/ina2xx.rst
@@ -2,35 +2,56 @@ Kernel driver ina2xx
====================
Supported chips:
+
* Texas Instruments INA219
+
+
Prefix: 'ina219'
Addresses: I2C 0x40 - 0x4f
+
Datasheet: Publicly available at the Texas Instruments website
- http://www.ti.com/
+
+ http://www.ti.com/
* Texas Instruments INA220
+
Prefix: 'ina220'
+
Addresses: I2C 0x40 - 0x4f
+
Datasheet: Publicly available at the Texas Instruments website
- http://www.ti.com/
+
+ http://www.ti.com/
* Texas Instruments INA226
+
Prefix: 'ina226'
+
Addresses: I2C 0x40 - 0x4f
+
Datasheet: Publicly available at the Texas Instruments website
- http://www.ti.com/
+
+ http://www.ti.com/
* Texas Instruments INA230
+
Prefix: 'ina230'
+
Addresses: I2C 0x40 - 0x4f
+
Datasheet: Publicly available at the Texas Instruments website
- http://www.ti.com/
+
+ http://www.ti.com/
* Texas Instruments INA231
+
Prefix: 'ina231'
+
Addresses: I2C 0x40 - 0x4f
+
Datasheet: Publicly available at the Texas Instruments website
- http://www.ti.com/
+
+ http://www.ti.com/
Author: Lothar Felten <lothar.felten@gmail.com>
@@ -57,23 +78,27 @@ refer to the Documentation/devicetree/bindings/hwmon/ina2xx.txt for bindings
if the device tree is used.
Additionally ina226 supports update_interval attribute as described in
-Documentation/hwmon/sysfs-interface. Internally the interval is the sum of
+Documentation/hwmon/sysfs-interface.rst. Internally the interval is the sum of
bus and shunt voltage conversion times multiplied by the averaging rate. We
don't touch the conversion times and only modify the number of averages. The
lower limit of the update_interval is 2 ms, the upper limit is 2253 ms.
The actual programmed interval may vary from the desired value.
General sysfs entries
--------------
+---------------------
+======================= ===============================
in0_input Shunt voltage(mV) channel
in1_input Bus voltage(mV) channel
curr1_input Current(mA) measurement channel
power1_input Power(uW) measurement channel
shunt_resistor Shunt resistance(uOhm) channel
+======================= ===============================
Sysfs entries for ina226, ina230 and ina231 only
--------------
+------------------------------------------------
+======================= ====================================================
update_interval data conversion time; affects number of samples used
to average results for shunt and bus voltages.
+======================= ====================================================
diff --git a/Documentation/hwmon/ina3221 b/Documentation/hwmon/ina3221.rst
index 4b82cbfb551c..f6007ae8f4e2 100644
--- a/Documentation/hwmon/ina3221
+++ b/Documentation/hwmon/ina3221.rst
@@ -2,11 +2,16 @@ Kernel driver ina3221
=====================
Supported chips:
+
* Texas Instruments INA3221
+
Prefix: 'ina3221'
+
Addresses: I2C 0x40 - 0x43
+
Datasheet: Publicly available at the Texas Instruments website
- http://www.ti.com/
+
+ http://www.ti.com/
Author: Andrew F. Davis <afd@ti.com>
@@ -21,17 +26,37 @@ and power are calculated host-side from these.
Sysfs entries
-------------
+======================= =======================================================
in[123]_label Voltage channel labels
in[123]_enable Voltage channel enable controls
in[123]_input Bus voltage(mV) channels
curr[123]_input Current(mA) measurement channels
shunt[123]_resistor Shunt resistance(uOhm) channels
curr[123]_crit Critical alert current(mA) setting, activates the
- corresponding alarm when the respective current
- is above this value
+ corresponding alarm when the respective current
+ is above this value
curr[123]_crit_alarm Critical alert current limit exceeded
curr[123]_max Warning alert current(mA) setting, activates the
- corresponding alarm when the respective current
- average is above this value.
+ corresponding alarm when the respective current
+ average is above this value.
curr[123]_max_alarm Warning alert current limit exceeded
in[456]_input Shunt voltage(uV) for channels 1, 2, and 3 respectively
+samples Number of samples using in the averaging mode.
+
+ Supports the list of number of samples:
+
+ 1, 4, 16, 64, 128, 256, 512, 1024
+
+update_interval Data conversion time in millisecond, following:
+
+ update_interval = C x S x (BC + SC)
+
+ * C: number of enabled channels
+ * S: number of samples
+ * BC: bus-voltage conversion time in millisecond
+ * SC: shunt-voltage conversion time in millisecond
+
+ Affects both Bus- and Shunt-voltage conversion time.
+ Note that setting update_interval to 0ms sets both BC
+ and SC to 140 us (minimum conversion time).
+======================= =======================================================
diff --git a/Documentation/hwmon/index.rst b/Documentation/hwmon/index.rst
new file mode 100644
index 000000000000..ee090e51653a
--- /dev/null
+++ b/Documentation/hwmon/index.rst
@@ -0,0 +1,182 @@
+=========================
+Linux Hardware Monitoring
+=========================
+
+.. toctree::
+ :maxdepth: 1
+
+ hwmon-kernel-api
+ pmbus-core
+ submitting-patches
+ sysfs-interface
+ userspace-tools
+
+Hardware Monitoring Kernel Drivers
+==================================
+
+.. toctree::
+ :maxdepth: 1
+
+ ab8500
+ abituguru
+ abituguru3
+ abx500
+ acpi_power_meter
+ ad7314
+ adc128d818
+ adm1021
+ adm1025
+ adm1026
+ adm1031
+ adm1275
+ adm9240
+ ads1015
+ ads7828
+ adt7410
+ adt7411
+ adt7462
+ adt7470
+ adt7475
+ amc6821
+ asb100
+ asc7621
+ aspeed-pwm-tacho
+ coretemp
+ da9052
+ da9055
+ dme1737
+ ds1621
+ ds620
+ emc1403
+ emc2103
+ emc6w201
+ f71805f
+ f71882fg
+ fam15h_power
+ ftsteutates
+ g760a
+ g762
+ gl518sm
+ hih6130
+ ibmaem
+ ibm-cffps
+ ibmpowernv
+ ina209
+ ina2xx
+ ina3221
+ ir35221
+ ir38064
+ isl68137
+ it87
+ jc42
+ k10temp
+ k8temp
+ lineage-pem
+ lm25066
+ lm63
+ lm70
+ lm73
+ lm75
+ lm77
+ lm78
+ lm80
+ lm83
+ lm85
+ lm87
+ lm90
+ lm92
+ lm93
+ lm95234
+ lm95245
+ lochnagar
+ ltc2945
+ ltc2978
+ ltc2990
+ ltc3815
+ ltc4151
+ ltc4215
+ ltc4245
+ ltc4260
+ ltc4261
+ max16064
+ max16065
+ max1619
+ max1668
+ max197
+ max20751
+ max31722
+ max31785
+ max31790
+ max34440
+ max6639
+ max6642
+ max6650
+ max6697
+ max8688
+ mc13783-adc
+ mcp3021
+ menf21bmc
+ mlxreg-fan
+ nct6683
+ nct6775
+ nct7802
+ nct7904
+ npcm750-pwm-fan
+ nsa320
+ ntc_thermistor
+ occ
+ pc87360
+ pc87427
+ pcf8591
+ pmbus
+ powr1220
+ pwm-fan
+ raspberrypi-hwmon
+ sch5627
+ sch5636
+ scpi-hwmon
+ sht15
+ sht21
+ sht3x
+ shtc1
+ sis5595
+ smm665
+ smsc47b397
+ smsc47m192
+ smsc47m1
+ tc654
+ tc74
+ thmc50
+ tmp102
+ tmp103
+ tmp108
+ tmp401
+ tmp421
+ tps40422
+ twl4030-madc-hwmon
+ ucd9000
+ ucd9200
+ vexpress
+ via686a
+ vt1211
+ w83627ehf
+ w83627hf
+ w83773g
+ w83781d
+ w83791d
+ w83792d
+ w83793
+ w83795
+ w83l785ts
+ w83l786ng
+ wm831x
+ wm8350
+ xgene-hwmon
+ zl6100
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/hwmon/ir35221 b/Documentation/hwmon/ir35221.rst
index f7e112752c04..a83922e5ccb5 100644
--- a/Documentation/hwmon/ir35221
+++ b/Documentation/hwmon/ir35221.rst
@@ -2,9 +2,12 @@ Kernel driver ir35221
=====================
Supported chips:
- * Infinion IR35221
+ * Infineon IR35221
+
Prefix: 'ir35221'
+
Addresses scanned: -
+
Datasheet: Datasheet is not publicly available.
Author: Samuel Mendoza-Jonas <sam@mendozajonas.com>
@@ -23,15 +26,16 @@ This driver does not probe for PMBus devices. You will have to instantiate
devices explicitly.
Example: the following commands will load the driver for an IR35221
-at address 0x70 on I2C bus #4:
+at address 0x70 on I2C bus #4::
-# modprobe ir35221
-# echo ir35221 0x70 > /sys/bus/i2c/devices/i2c-4/new_device
+ # modprobe ir35221
+ # echo ir35221 0x70 > /sys/bus/i2c/devices/i2c-4/new_device
Sysfs attributes
----------------
+======================= =======================================================
curr1_label "iin"
curr1_input Measured input current
curr1_max Maximum current
@@ -85,3 +89,4 @@ temp[1-2]_highest Highest temperature
temp[1-2]_lowest Lowest temperature
temp[1-2]_max Maximum temperature
temp[1-2]_max_alarm Chip temperature high alarm
+======================= =======================================================
diff --git a/Documentation/hwmon/ir38064.rst b/Documentation/hwmon/ir38064.rst
new file mode 100644
index 000000000000..c455d755a267
--- /dev/null
+++ b/Documentation/hwmon/ir38064.rst
@@ -0,0 +1,66 @@
+Kernel driver ir38064
+=====================
+
+Supported chips:
+
+ * Infineon IR38064
+
+ Prefix: 'ir38064'
+ Addresses scanned: -
+
+ Datasheet: Publicly available at the Infineon webiste
+ https://www.infineon.com/dgdl/Infineon-IR38064MTRPBF-DS-v03_07-EN.pdf?fileId=5546d462584d1d4a0158db0d9efb67ca
+
+Authors:
+ - Maxim Sloyko <maxims@google.com>
+ - Patrick Venture <venture@google.com>
+
+Description
+-----------
+
+IR38064 is a Single-input Voltage, Synchronous Buck Regulator, DC-DC Converter.
+
+Usage Notes
+-----------
+
+This driver does not probe for PMBus devices. You will have to instantiate
+devices explicitly.
+
+Sysfs attributes
+----------------
+
+======================= ===========================
+curr1_label "iout1"
+curr1_input Measured output current
+curr1_crit Critical maximum current
+curr1_crit_alarm Current critical high alarm
+curr1_max Maximum current
+curr1_max_alarm Current high alarm
+
+in1_label "vin"
+in1_input Measured input voltage
+in1_crit Critical maximum input voltage
+in1_crit_alarm Input voltage critical high alarm
+in1_min Minimum input voltage
+in1_min_alarm Input voltage low alarm
+
+in2_label "vout1"
+in2_input Measured output voltage
+in2_lcrit Critical minimum output voltage
+in2_lcrit_alarm Output voltage critical low alarm
+in2_crit Critical maximum output voltage
+in2_crit_alarm Output voltage critical high alarm
+in2_max Maximum output voltage
+in2_max_alarm Output voltage high alarm
+in2_min Minimum output voltage
+in2_min_alarm Output voltage low alarm
+
+power1_label "pout1"
+power1_input Measured output power
+
+temp1_input Measured temperature
+temp1_crit Critical high temperature
+temp1_crit_alarm Chip temperature critical high alarm
+temp1_max Maximum temperature
+temp1_max_alarm Chip temperature high alarm
+======================= ===========================
diff --git a/Documentation/hwmon/isl68137.rst b/Documentation/hwmon/isl68137.rst
new file mode 100644
index 000000000000..a5a7c8545c9e
--- /dev/null
+++ b/Documentation/hwmon/isl68137.rst
@@ -0,0 +1,80 @@
+Kernel driver isl68137
+======================
+
+Supported chips:
+
+ * Intersil ISL68137
+
+ Prefix: 'isl68137'
+
+ Addresses scanned: -
+
+ Datasheet:
+
+ Publicly available at the Intersil website
+ https://www.intersil.com/content/dam/Intersil/documents/isl6/isl68137.pdf
+
+Authors:
+ - Maxim Sloyko <maxims@google.com>
+ - Robert Lippert <rlippert@google.com>
+ - Patrick Venture <venture@google.com>
+
+Description
+-----------
+
+Intersil ISL68137 is a digital output 7-phase configurable PWM
+controller with an AVSBus interface.
+
+Usage Notes
+-----------
+
+This driver does not probe for PMBus devices. You will have to instantiate
+devices explicitly.
+
+The ISL68137 AVS operation mode must be enabled/disabled at runtime.
+
+Beyond the normal sysfs pmbus attributes, the driver exposes a control attribute.
+
+Additional Sysfs attributes
+---------------------------
+
+======================= ====================================
+avs(0|1)_enable Controls the AVS state of each rail.
+
+curr1_label "iin"
+curr1_input Measured input current
+curr1_crit Critical maximum current
+curr1_crit_alarm Current critical high alarm
+
+curr[2-3]_label "iout[1-2]"
+curr[2-3]_input Measured output current
+curr[2-3]_crit Critical maximum current
+curr[2-3]_crit_alarm Current critical high alarm
+
+in1_label "vin"
+in1_input Measured input voltage
+in1_lcrit Critical minimum input voltage
+in1_lcrit_alarm Input voltage critical low alarm
+in1_crit Critical maximum input voltage
+in1_crit_alarm Input voltage critical high alarm
+
+in[2-3]_label "vout[1-2]"
+in[2-3]_input Measured output voltage
+in[2-3]_lcrit Critical minimum output voltage
+in[2-3]_lcrit_alarm Output voltage critical low alarm
+in[2-3]_crit Critical maximum output voltage
+in[2-3]_crit_alarm Output voltage critical high alarm
+
+power1_label "pin"
+power1_input Measured input power
+power1_alarm Input power high alarm
+
+power[2-3]_label "pout[1-2]"
+power[2-3]_input Measured output power
+
+temp[1-3]_input Measured temperature
+temp[1-3]_crit Critical high temperature
+temp[1-3]_crit_alarm Chip temperature critical high alarm
+temp[1-3]_max Maximum temperature
+temp[1-3]_max_alarm Chip temperature high alarm
+======================= ====================================
diff --git a/Documentation/hwmon/it87 b/Documentation/hwmon/it87.rst
index fff6f6bf55bc..2d83f23bee93 100644
--- a/Documentation/hwmon/it87
+++ b/Documentation/hwmon/it87.rst
@@ -2,105 +2,179 @@ Kernel driver it87
==================
Supported chips:
+
* IT8603E/IT8623E
+
Prefix: 'it8603'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Not publicly available
+
* IT8620E
+
Prefix: 'it8620'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
* IT8628E
+
Prefix: 'it8628'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Not publicly available
+
* IT8705F
+
Prefix: 'it87'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Once publicly available at the ITE website, but no longer
+
* IT8712F
+
Prefix: 'it8712'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Once publicly available at the ITE website, but no longer
+
* IT8716F/IT8726F
+
Prefix: 'it8716'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Once publicly available at the ITE website, but no longer
+
* IT8718F
+
Prefix: 'it8718'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Once publicly available at the ITE website, but no longer
+
* IT8720F
+
Prefix: 'it8720'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Not publicly available
+
* IT8721F/IT8758E
+
Prefix: 'it8721'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Not publicly available
+
* IT8728F
+
Prefix: 'it8728'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Not publicly available
+
* IT8732F
+
Prefix: 'it8732'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Not publicly available
+
* IT8771E
+
Prefix: 'it8771'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Not publicly available
+
* IT8772E
+
Prefix: 'it8772'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Not publicly available
+
* IT8781F
+
Prefix: 'it8781'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Not publicly available
+
* IT8782F
+
Prefix: 'it8782'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Not publicly available
+
* IT8783E/F
+
Prefix: 'it8783'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Not publicly available
+
* IT8786E
+
Prefix: 'it8786'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Not publicly available
+
* IT8790E
+
Prefix: 'it8790'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Not publicly available
+
* SiS950 [clone of IT8705F]
+
Prefix: 'it87'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: No longer be available
+
Authors:
- Christophe Gauthron
- Jean Delvare <jdelvare@suse.de>
+ - Christophe Gauthron
+ - Jean Delvare <jdelvare@suse.de>
Module Parameters
-----------------
* update_vbat: int
-
- 0 if vbat should report power on value, 1 if vbat should be updated after
- each read. Default is 0. On some boards the battery voltage is provided
- by either the battery or the onboard power supply. Only the first reading
- at power on will be the actual battery voltage (which the chip does
- automatically). On other boards the battery voltage is always fed to
- the chip so can be read at any time. Excessive reading may decrease
- battery life but no information is given in the datasheet.
+ 0 if vbat should report power on value, 1 if vbat should be updated after
+ each read. Default is 0. On some boards the battery voltage is provided
+ by either the battery or the onboard power supply. Only the first reading
+ at power on will be the actual battery voltage (which the chip does
+ automatically). On other boards the battery voltage is always fed to
+ the chip so can be read at any time. Excessive reading may decrease
+ battery life but no information is given in the datasheet.
* fix_pwm_polarity int
-
- Force PWM polarity to active high (DANGEROUS). Some chips are
- misconfigured by BIOS - PWM values would be inverted. This option tries
- to fix this. Please contact your BIOS manufacturer and ask him for fix.
+ Force PWM polarity to active high (DANGEROUS). Some chips are
+ misconfigured by BIOS - PWM values would be inverted. This option tries
+ to fix this. Please contact your BIOS manufacturer and ask him for fix.
Hardware Interfaces
diff --git a/Documentation/hwmon/jc42 b/Documentation/hwmon/jc42.rst
index b4b671f22453..5b14b49bb6f7 100644
--- a/Documentation/hwmon/jc42
+++ b/Documentation/hwmon/jc42.rst
@@ -2,53 +2,100 @@ Kernel driver jc42
==================
Supported chips:
+
* Analog Devices ADT7408
+
Datasheets:
+
http://www.analog.com/static/imported-files/data_sheets/ADT7408.pdf
+
* Atmel AT30TS00, AT30TS002A/B, AT30TSE004A
+
Datasheets:
+
http://www.atmel.com/Images/doc8585.pdf
+
http://www.atmel.com/Images/doc8711.pdf
+
http://www.atmel.com/Images/Atmel-8852-SEEPROM-AT30TSE002A-Datasheet.pdf
+
http://www.atmel.com/Images/Atmel-8868-DTS-AT30TSE004A-Datasheet.pdf
+
* IDT TSE2002B3, TSE2002GB2, TSE2004GB2, TS3000B3, TS3000GB0, TS3000GB2,
+
TS3001GB2
+
Datasheets:
+
Available from IDT web site
+
* Maxim MAX6604
+
Datasheets:
+
http://datasheets.maxim-ic.com/en/ds/MAX6604.pdf
+
* Microchip MCP9804, MCP9805, MCP9808, MCP98242, MCP98243, MCP98244, MCP9843
+
Datasheets:
+
http://ww1.microchip.com/downloads/en/DeviceDoc/22203C.pdf
+
http://ww1.microchip.com/downloads/en/DeviceDoc/21977b.pdf
+
http://ww1.microchip.com/downloads/en/DeviceDoc/25095A.pdf
+
http://ww1.microchip.com/downloads/en/DeviceDoc/21996a.pdf
+
http://ww1.microchip.com/downloads/en/DeviceDoc/22153c.pdf
+
http://ww1.microchip.com/downloads/en/DeviceDoc/22327A.pdf
+
* NXP Semiconductors SE97, SE97B, SE98, SE98A
+
Datasheets:
+
http://www.nxp.com/documents/data_sheet/SE97.pdf
+
http://www.nxp.com/documents/data_sheet/SE97B.pdf
+
http://www.nxp.com/documents/data_sheet/SE98.pdf
+
http://www.nxp.com/documents/data_sheet/SE98A.pdf
+
* ON Semiconductor CAT34TS02, CAT6095
+
Datasheet:
+
http://www.onsemi.com/pub_link/Collateral/CAT34TS02-D.PDF
+
http://www.onsemi.com/pub/Collateral/CAT6095-D.PDF
+
* ST Microelectronics STTS424, STTS424E02, STTS2002, STTS2004, STTS3000
+
Datasheets:
+
http://www.st.com/web/en/resource/technical/document/datasheet/CD00157556.pdf
+
http://www.st.com/web/en/resource/technical/document/datasheet/CD00157558.pdf
+
http://www.st.com/web/en/resource/technical/document/datasheet/CD00266638.pdf
+
http://www.st.com/web/en/resource/technical/document/datasheet/CD00225278.pdf
+
http://www.st.com/web/en/resource/technical/document/datasheet/DM00076709.pdf
+
* JEDEC JC 42.4 compliant temperature sensor chips
+
Datasheet:
+
http://www.jedec.org/sites/default/files/docs/4_01_04R19.pdf
+
Common for all chips:
+
Prefix: 'jc42'
+
Addresses scanned: I2C 0x18 - 0x1f
Author:
@@ -67,10 +114,10 @@ The driver auto-detects the chips listed above, but can be manually instantiated
to support other JC 42.4 compliant chips.
Example: the following will load the driver for a generic JC 42.4 compliant
-temperature sensor at address 0x18 on I2C bus #1:
+temperature sensor at address 0x18 on I2C bus #1::
-# modprobe jc42
-# echo jc42 0x18 > /sys/bus/i2c/devices/i2c-1/new_device
+ # modprobe jc42
+ # echo jc42 0x18 > /sys/bus/i2c/devices/i2c-1/new_device
A JC 42.4 compliant chip supports a single temperature sensor. Minimum, maximum,
and critical temperature can be configured. There are alarms for high, low,
@@ -90,6 +137,7 @@ cannot be changed.
Sysfs entries
-------------
+======================= ===========================================
temp1_input Temperature (RO)
temp1_min Minimum temperature (RO or RW)
temp1_max Maximum temperature (RO or RW)
@@ -101,3 +149,4 @@ temp1_max_hyst Maximum hysteresis temperature (RO)
temp1_min_alarm Temperature low alarm
temp1_max_alarm Temperature high alarm
temp1_crit_alarm Temperature critical alarm
+======================= ===========================================
diff --git a/Documentation/hwmon/k10temp b/Documentation/hwmon/k10temp.rst
index 254d2f55345a..12a86ba17de9 100644
--- a/Documentation/hwmon/k10temp
+++ b/Documentation/hwmon/k10temp.rst
@@ -2,42 +2,77 @@ Kernel driver k10temp
=====================
Supported chips:
+
* AMD Family 10h processors:
+
Socket F: Quad-Core/Six-Core/Embedded Opteron (but see below)
+
Socket AM2+: Quad-Core Opteron, Phenom (II) X3/X4, Athlon X2 (but see below)
+
Socket AM3: Quad-Core Opteron, Athlon/Phenom II X2/X3/X4, Sempron II
+
Socket S1G3: Athlon II, Sempron, Turion II
+
* AMD Family 11h processors:
+
Socket S1G2: Athlon (X2), Sempron (X2), Turion X2 (Ultra)
+
* AMD Family 12h processors: "Llano" (E2/A4/A6/A8-Series)
+
* AMD Family 14h processors: "Brazos" (C/E/G/Z-Series)
+
* AMD Family 15h processors: "Bulldozer" (FX-Series), "Trinity", "Kaveri", "Carrizo"
+
* AMD Family 16h processors: "Kabini", "Mullins"
Prefix: 'k10temp'
+
Addresses scanned: PCI space
+
Datasheets:
+
BIOS and Kernel Developer's Guide (BKDG) For AMD Family 10h Processors:
+
http://support.amd.com/us/Processor_TechDocs/31116.pdf
+
BIOS and Kernel Developer's Guide (BKDG) for AMD Family 11h Processors:
+
http://support.amd.com/us/Processor_TechDocs/41256.pdf
+
BIOS and Kernel Developer's Guide (BKDG) for AMD Family 12h Processors:
+
http://support.amd.com/us/Processor_TechDocs/41131.pdf
+
BIOS and Kernel Developer's Guide (BKDG) for AMD Family 14h Models 00h-0Fh Processors:
+
http://support.amd.com/us/Processor_TechDocs/43170.pdf
+
Revision Guide for AMD Family 10h Processors:
+
http://support.amd.com/us/Processor_TechDocs/41322.pdf
+
Revision Guide for AMD Family 11h Processors:
+
http://support.amd.com/us/Processor_TechDocs/41788.pdf
+
Revision Guide for AMD Family 12h Processors:
+
http://support.amd.com/us/Processor_TechDocs/44739.pdf
+
Revision Guide for AMD Family 14h Models 00h-0Fh Processors:
+
http://support.amd.com/us/Processor_TechDocs/47534.pdf
+
AMD Family 11h Processor Power and Thermal Data Sheet for Notebooks:
+
http://support.amd.com/us/Processor_TechDocs/43373.pdf
+
AMD Family 10h Server and Workstation Processor Power and Thermal Data Sheet:
+
http://support.amd.com/us/Processor_TechDocs/43374.pdf
+
AMD Family 10h Desktop Processor Power and Thermal Data Sheet:
+
http://support.amd.com/us/Processor_TechDocs/43375.pdf
Author: Clemens Ladisch <clemens@ladisch.de>
@@ -60,7 +95,7 @@ are using an AM3 processor on an AM2+ mainboard, you can safely use the
There is one temperature measurement value, available as temp1_input in
sysfs. It is measured in degrees Celsius with a resolution of 1/8th degree.
-Please note that it is defined as a relative value; to quote the AMD manual:
+Please note that it is defined as a relative value; to quote the AMD manual::
Tctl is the processor temperature control value, used by the platform to
control cooling systems. Tctl is a non-physical temperature on an
diff --git a/Documentation/hwmon/k8temp b/Documentation/hwmon/k8temp.rst
index 716dc24c7237..72da12aa17e5 100644
--- a/Documentation/hwmon/k8temp
+++ b/Documentation/hwmon/k8temp.rst
@@ -2,12 +2,17 @@ Kernel driver k8temp
====================
Supported chips:
+
* AMD Athlon64/FX or Opteron CPUs
+
Prefix: 'k8temp'
+
Addresses scanned: PCI space
+
Datasheet: http://support.amd.com/us/Processor_TechDocs/32559.pdf
Author: Rudolf Marek
+
Contact: Rudolf Marek <r.marek@assembler.cz>
Description
@@ -27,10 +32,12 @@ implemented sensors.
Mapping of /sys files is as follows:
-temp1_input - temperature of Core 0 and "place" 0
-temp2_input - temperature of Core 0 and "place" 1
-temp3_input - temperature of Core 1 and "place" 0
-temp4_input - temperature of Core 1 and "place" 1
+============= ===================================
+temp1_input temperature of Core 0 and "place" 0
+temp2_input temperature of Core 0 and "place" 1
+temp3_input temperature of Core 1 and "place" 0
+temp4_input temperature of Core 1 and "place" 1
+============= ===================================
Temperatures are measured in degrees Celsius and measurement resolution is
1 degree C. It is expected that future CPU will have better resolution. The
@@ -48,7 +55,7 @@ computed temperature called TControl, which must be lower than TControlMax.
The relationship is following:
-temp1_input - TjOffset*2 < TControlMax,
+ temp1_input - TjOffset*2 < TControlMax,
TjOffset is not yet exported by the driver, TControlMax is usually
70 degrees C. The rule of the thumb -> CPU temperature should not cross
diff --git a/Documentation/hwmon/lineage-pem b/Documentation/hwmon/lineage-pem.rst
index 83b2ddc160c8..10c271dc20e8 100644
--- a/Documentation/hwmon/lineage-pem
+++ b/Documentation/hwmon/lineage-pem.rst
@@ -2,11 +2,16 @@ Kernel driver lineage-pem
=========================
Supported devices:
+
* Lineage Compact Power Line Power Entry Modules
+
Prefix: 'lineage-pem'
+
Addresses scanned: -
+
Documentation:
- http://www.lineagepower.com/oem/pdf/CPLI2C.pdf
+
+ http://www.lineagepower.com/oem/pdf/CPLI2C.pdf
Author: Guenter Roeck <linux@roeck-us.net>
@@ -31,9 +36,10 @@ which can be safely used to identify the chip. You will have to instantiate
the devices explicitly.
Example: the following will load the driver for a Lineage PEM at address 0x40
-on I2C bus #1:
-$ modprobe lineage-pem
-$ echo lineage-pem 0x40 > /sys/bus/i2c/devices/i2c-1/new_device
+on I2C bus #1::
+
+ $ modprobe lineage-pem
+ $ echo lineage-pem 0x40 > /sys/bus/i2c/devices/i2c-1/new_device
All Lineage CPL power entry modules have a built-in I2C bus master selector
(PCA9541). To ensure device access, this driver should only be used as client
@@ -51,6 +57,7 @@ Input voltage, input current, input power, and fan speed measurement is only
supported on newer devices. The driver detects if those attributes are supported,
and only creates respective sysfs entries if they are.
+======================= ===============================
in1_input Output voltage (mV)
in1_min_alarm Output undervoltage alarm
in1_max_alarm Output overvoltage alarm
@@ -75,3 +82,4 @@ temp1_crit
temp1_alarm
temp1_crit_alarm
temp1_fault
+======================= ===============================
diff --git a/Documentation/hwmon/lm25066 b/Documentation/hwmon/lm25066.rst
index 51b32aa203a8..da15e3094c8c 100644
--- a/Documentation/hwmon/lm25066
+++ b/Documentation/hwmon/lm25066.rst
@@ -2,34 +2,62 @@ Kernel driver lm25066
=====================
Supported chips:
+
* TI LM25056
+
Prefix: 'lm25056'
+
Addresses scanned: -
+
Datasheets:
+
http://www.ti.com/lit/gpn/lm25056
+
http://www.ti.com/lit/gpn/lm25056a
+
* National Semiconductor LM25066
+
Prefix: 'lm25066'
+
Addresses scanned: -
+
Datasheets:
+
http://www.national.com/pf/LM/LM25066.html
+
http://www.national.com/pf/LM/LM25066A.html
+
* National Semiconductor LM5064
+
Prefix: 'lm5064'
+
Addresses scanned: -
+
Datasheet:
+
http://www.national.com/pf/LM/LM5064.html
+
* National Semiconductor LM5066
+
Prefix: 'lm5066'
+
Addresses scanned: -
+
Datasheet:
+
http://www.national.com/pf/LM/LM5066.html
+
* Texas Instruments LM5066I
+
Prefix: 'lm5066i'
+
Addresses scanned: -
+
Datasheet:
+
http://www.ti.com/product/LM5066I
+
Author: Guenter Roeck <linux@roeck-us.net>
@@ -41,7 +69,7 @@ LM25066, LM5064, and LM5066/LM5066I Power Management, Monitoring,
Control, and Protection ICs.
The driver is a client driver to the core PMBus driver. Please see
-Documentation/hwmon/pmbus for details on PMBus client drivers.
+Documentation/hwmon/pmbus.rst for details on PMBus client drivers.
Usage Notes
@@ -64,6 +92,7 @@ Sysfs entries
The following attributes are supported. Limits are read-write; all other
attributes are read-only.
+======================= =======================================================
in1_label "vin"
in1_input Measured input voltage.
in1_average Average measured input voltage.
@@ -105,3 +134,4 @@ temp1_max Maximum temperature.
temp1_crit Critical high temperature.
temp1_max_alarm Chip temperature high alarm.
temp1_crit_alarm Chip temperature critical high alarm.
+======================= =======================================================
diff --git a/Documentation/hwmon/lm63 b/Documentation/hwmon/lm63.rst
index 4a00461512a6..f478132b0408 100644
--- a/Documentation/hwmon/lm63
+++ b/Documentation/hwmon/lm63.rst
@@ -2,26 +2,43 @@ Kernel driver lm63
==================
Supported chips:
+
* National Semiconductor LM63
+
Prefix: 'lm63'
+
Addresses scanned: I2C 0x4c
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/pf/LM/LM63.html
+
+ http://www.national.com/pf/LM/LM63.html
+
* National Semiconductor LM64
+
Prefix: 'lm64'
+
Addresses scanned: I2C 0x18 and 0x4e
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/pf/LM/LM64.html
+
+ http://www.national.com/pf/LM/LM64.html
+
* National Semiconductor LM96163
+
Prefix: 'lm96163'
+
Addresses scanned: I2C 0x4c
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/pf/LM/LM96163.html
+
+ http://www.national.com/pf/LM/LM96163.html
+
Author: Jean Delvare <jdelvare@suse.de>
Thanks go to Tyan and especially Alex Buckingham for setting up a remote
access to their S4882 test platform for this driver.
+
http://www.tyan.com/
Description
@@ -32,6 +49,7 @@ and control.
The LM63 is basically an LM86 with fan speed monitoring and control
capabilities added. It misses some of the LM86 features though:
+
- No low limit for local temperature.
- No critical limit for local temperature.
- Critical limit for remote temperature can be changed only once. We
diff --git a/Documentation/hwmon/lm70 b/Documentation/hwmon/lm70.rst
index c3a1f2ea017d..f259bc1fcd91 100644
--- a/Documentation/hwmon/lm70
+++ b/Documentation/hwmon/lm70.rst
@@ -2,19 +2,30 @@ Kernel driver lm70
==================
Supported chips:
+
* National Semiconductor LM70
+
Datasheet: http://www.national.com/pf/LM/LM70.html
+
* Texas Instruments TMP121/TMP123
+
Information: http://focus.ti.com/docs/prod/folders/print/tmp121.html
+
* Texas Instruments TMP122/TMP124
+
Information: http://www.ti.com/product/tmp122
+
* National Semiconductor LM71
+
Datasheet: http://www.ti.com/product/LM71
+
* National Semiconductor LM74
+
Datasheet: http://www.ti.com/product/LM74
+
Author:
- Kaiwan N Billimoria <kaiwan@designergraphix.com>
+ Kaiwan N Billimoria <kaiwan@designergraphix.com>
Description
-----------
diff --git a/Documentation/hwmon/lm73 b/Documentation/hwmon/lm73.rst
index 8af059dcb642..1d6a46844e85 100644
--- a/Documentation/hwmon/lm73
+++ b/Documentation/hwmon/lm73.rst
@@ -2,13 +2,20 @@ Kernel driver lm73
==================
Supported chips:
+
* Texas Instruments LM73
+
Prefix: 'lm73'
+
Addresses scanned: I2C 0x48, 0x49, 0x4a, 0x4c, 0x4d, and 0x4e
+
Datasheet: Publicly available at the Texas Instruments website
- http://www.ti.com/product/lm73
+
+ http://www.ti.com/product/lm73
+
Author: Guillaume Ligneul <guillaume.ligneul@gmail.com>
+
Documentation: Chris Verges <kg4ysn@gmail.com>
@@ -29,17 +36,18 @@ conversion time via the 'update_interval' sysfs attribute for the
device. This attribute will normalize ranges of input values to the
maximum times defined for the resolution in the datasheet.
+ ============= ============= ============
Resolution Conv. Time Input Range
(C/LSB) (msec) (msec)
- --------------------------------------
+ ============= ============= ============
0.25 14 0..14
0.125 28 15..28
0.0625 56 29..56
0.03125 112 57..infinity
- --------------------------------------
+ ============= ============= ============
The following examples show how the 'update_interval' attribute can be
-used to change the conversion time:
+used to change the conversion time::
$ echo 0 > update_interval
$ cat update_interval
diff --git a/Documentation/hwmon/lm75 b/Documentation/hwmon/lm75.rst
index 010583608f12..ba8acbd2a6cb 100644
--- a/Documentation/hwmon/lm75
+++ b/Documentation/hwmon/lm75.rst
@@ -2,68 +2,132 @@ Kernel driver lm75
==================
Supported chips:
+
* National Semiconductor LM75
+
Prefix: 'lm75'
+
Addresses scanned: I2C 0x48 - 0x4f
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/
+
+ http://www.national.com/
+
* National Semiconductor LM75A
+
Prefix: 'lm75a'
+
Addresses scanned: I2C 0x48 - 0x4f
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/
+
+ http://www.national.com/
+
* Dallas Semiconductor (now Maxim) DS75, DS1775, DS7505
+
Prefixes: 'ds75', 'ds1775', 'ds7505'
+
Addresses scanned: none
+
Datasheet: Publicly available at the Maxim website
- http://www.maximintegrated.com/
+
+ http://www.maximintegrated.com/
+
* Maxim MAX6625, MAX6626, MAX31725, MAX31726
+
Prefixes: 'max6625', 'max6626', 'max31725', 'max31726'
+
Addresses scanned: none
+
Datasheet: Publicly available at the Maxim website
- http://www.maxim-ic.com/
+
+ http://www.maxim-ic.com/
+
* Microchip (TelCom) TCN75
+
Prefix: 'tcn75'
+
Addresses scanned: none
+
Datasheet: Publicly available at the Microchip website
- http://www.microchip.com/
+
+ http://www.microchip.com/
+
* Microchip MCP9800, MCP9801, MCP9802, MCP9803
+
Prefix: 'mcp980x'
+
Addresses scanned: none
+
Datasheet: Publicly available at the Microchip website
- http://www.microchip.com/
+
+ http://www.microchip.com/
+
* Analog Devices ADT75
+
Prefix: 'adt75'
+
Addresses scanned: none
+
Datasheet: Publicly available at the Analog Devices website
- http://www.analog.com/adt75
+
+ http://www.analog.com/adt75
+
* ST Microelectronics STDS75
+
Prefix: 'stds75'
+
Addresses scanned: none
+
Datasheet: Publicly available at the ST website
- http://www.st.com/internet/analog/product/121769.jsp
+
+ http://www.st.com/internet/analog/product/121769.jsp
+
* ST Microelectronics STLM75
+
Prefix: 'stlm75'
+
Addresses scanned: none
+
Datasheet: Publicly available at the ST website
+
https://www.st.com/resource/en/datasheet/stlm75.pdf
- * Texas Instruments TMP100, TMP101, TMP105, TMP112, TMP75, TMP75C, TMP175, TMP275
- Prefixes: 'tmp100', 'tmp101', 'tmp105', 'tmp112', 'tmp175', 'tmp75', 'tmp75c', 'tmp275'
+
+ * Texas Instruments TMP100, TMP101, TMP105, TMP112, TMP75, TMP75B, TMP75C, TMP175, TMP275
+
+ Prefixes: 'tmp100', 'tmp101', 'tmp105', 'tmp112', 'tmp175', 'tmp75', 'tmp75b', 'tmp75c', 'tmp275'
+
Addresses scanned: none
+
Datasheet: Publicly available at the Texas Instruments website
- http://www.ti.com/product/tmp100
- http://www.ti.com/product/tmp101
- http://www.ti.com/product/tmp105
- http://www.ti.com/product/tmp112
- http://www.ti.com/product/tmp75
- http://www.ti.com/product/tmp75c
- http://www.ti.com/product/tmp175
- http://www.ti.com/product/tmp275
+
+ http://www.ti.com/product/tmp100
+
+ http://www.ti.com/product/tmp101
+
+ http://www.ti.com/product/tmp105
+
+ http://www.ti.com/product/tmp112
+
+ http://www.ti.com/product/tmp75
+
+ http://www.ti.com/product/tmp75b
+
+ http://www.ti.com/product/tmp75c
+
+ http://www.ti.com/product/tmp175
+
+ http://www.ti.com/product/tmp275
+
* NXP LM75B
+
Prefix: 'lm75b'
+
Addresses scanned: none
+
Datasheet: Publicly available at the NXP website
- http://www.nxp.com/documents/data_sheet/LM75B.pdf
+
+ http://www.nxp.com/documents/data_sheet/LM75B.pdf
Author: Frodo Looijaard <frodol@dds.nl>
diff --git a/Documentation/hwmon/lm77 b/Documentation/hwmon/lm77.rst
index bfc915fe3639..4ed3fe6b999a 100644
--- a/Documentation/hwmon/lm77
+++ b/Documentation/hwmon/lm77.rst
@@ -2,11 +2,17 @@ Kernel driver lm77
==================
Supported chips:
+
* National Semiconductor LM77
+
Prefix: 'lm77'
+
Addresses scanned: I2C 0x48 - 0x4b
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/
+
+ http://www.national.com/
+
Author: Andras BALI <drewie@freemail.hu>
@@ -25,6 +31,7 @@ register on the chip, which means that the relative difference between
the limit and its hysteresis is always the same for all 3 limits.
This implementation detail implies the following:
+
* When setting a limit, its hysteresis will automatically follow, the
difference staying unchanged. For example, if the old critical limit
was 80 degrees C, and the hysteresis was 75 degrees C, and you change
diff --git a/Documentation/hwmon/lm78 b/Documentation/hwmon/lm78.rst
index 4dd47731789f..cb7a4832f35e 100644
--- a/Documentation/hwmon/lm78
+++ b/Documentation/hwmon/lm78.rst
@@ -2,19 +2,31 @@ Kernel driver lm78
==================
Supported chips:
+
* National Semiconductor LM78 / LM78-J
+
Prefix: 'lm78'
+
Addresses scanned: I2C 0x28 - 0x2f, ISA 0x290 (8 I/O ports)
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/
+
+ http://www.national.com/
+
* National Semiconductor LM79
+
Prefix: 'lm79'
+
Addresses scanned: I2C 0x28 - 0x2f, ISA 0x290 (8 I/O ports)
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/
-Authors: Frodo Looijaard <frodol@dds.nl>
- Jean Delvare <jdelvare@suse.de>
+ http://www.national.com/
+
+
+Authors:
+ - Frodo Looijaard <frodol@dds.nl>
+ - Jean Delvare <jdelvare@suse.de>
Description
-----------
diff --git a/Documentation/hwmon/lm80 b/Documentation/hwmon/lm80.rst
index a60b43efc32b..c53186abd82e 100644
--- a/Documentation/hwmon/lm80
+++ b/Documentation/hwmon/lm80.rst
@@ -2,20 +2,31 @@ Kernel driver lm80
==================
Supported chips:
+
* National Semiconductor LM80
+
Prefix: 'lm80'
+
Addresses scanned: I2C 0x28 - 0x2f
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/
+
+ http://www.national.com/
+
* National Semiconductor LM96080
+
Prefix: 'lm96080'
+
Addresses scanned: I2C 0x28 - 0x2f
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/
+
+ http://www.national.com/
+
Authors:
- Frodo Looijaard <frodol@dds.nl>,
- Philip Edelbrock <phil@netroedge.com>
+ - Frodo Looijaard <frodol@dds.nl>,
+ - Philip Edelbrock <phil@netroedge.com>
Description
-----------
diff --git a/Documentation/hwmon/lm83 b/Documentation/hwmon/lm83.rst
index 50be5cb26de9..ecf83819960e 100644
--- a/Documentation/hwmon/lm83
+++ b/Documentation/hwmon/lm83.rst
@@ -2,16 +2,24 @@ Kernel driver lm83
==================
Supported chips:
+
* National Semiconductor LM83
+
Prefix: 'lm83'
+
Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/pf/LM/LM83.html
+
+ http://www.national.com/pf/LM/LM83.html
+
* National Semiconductor LM82
+
Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/pf/LM/LM82.html
+ http://www.national.com/pf/LM/LM82.html
Author: Jean Delvare <jdelvare@suse.de>
@@ -34,13 +42,17 @@ fact that any of these motherboards do actually have an LM83, please
contact us. Note that the LM90 can easily be misdetected as a LM83.
Confirmed motherboards:
+ === =====
SBS P014
SBS PSL09
+ === =====
Unconfirmed motherboards:
+ =========== ==========
Gigabyte GA-8IK1100
Iwill MPX2
Soltek SL-75DRV5
+ =========== ==========
The LM82 is confirmed to have been found on most AMD Geode reference
designs and test platforms.
diff --git a/Documentation/hwmon/lm85 b/Documentation/hwmon/lm85.rst
index 2329c383efe4..faa92f54431c 100644
--- a/Documentation/hwmon/lm85
+++ b/Documentation/hwmon/lm85.rst
@@ -2,49 +2,85 @@ Kernel driver lm85
==================
Supported chips:
+
* National Semiconductor LM85 (B and C versions)
+
Prefix: 'lm85b' or 'lm85c'
+
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+
Datasheet: http://www.national.com/pf/LM/LM85.html
+
* Texas Instruments LM96000
+
Prefix: 'lm9600'
+
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+
Datasheet: http://www.ti.com/lit/ds/symlink/lm96000.pdf
+
* Analog Devices ADM1027
+
Prefix: 'adm1027'
+
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+
Datasheet: http://www.onsemi.com/PowerSolutions/product.do?id=ADM1027
+
* Analog Devices ADT7463
+
Prefix: 'adt7463'
+
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+
Datasheet: http://www.onsemi.com/PowerSolutions/product.do?id=ADT7463
+
* Analog Devices ADT7468
+
Prefix: 'adt7468'
+
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+
Datasheet: http://www.onsemi.com/PowerSolutions/product.do?id=ADT7468
+
* SMSC EMC6D100, SMSC EMC6D101
+
Prefix: 'emc6d100'
+
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
- Datasheet: http://www.smsc.com/media/Downloads_Public/discontinued/6d100.pdf
+
+ Datasheet: http://www.smsc.com/media/Downloads_Public/discontinued/6d100.pdf
+
* SMSC EMC6D102
+
Prefix: 'emc6d102'
+
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+
Datasheet: http://www.smsc.com/main/catalog/emc6d102.html
+
* SMSC EMC6D103
+
Prefix: 'emc6d103'
+
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+
Datasheet: http://www.smsc.com/main/catalog/emc6d103.html
+
* SMSC EMC6D103S
+
Prefix: 'emc6d103s'
+
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+
Datasheet: http://www.smsc.com/main/catalog/emc6d103s.html
Authors:
- Philip Pokorny <ppokorny@penguincomputing.com>,
- Frodo Looijaard <frodol@dds.nl>,
- Richard Barrington <rich_b_nz@clear.net.nz>,
- Margit Schubert-While <margitsw@t-online.de>,
- Justin Thiessen <jthiessen@penguincomputing.com>
+ - Philip Pokorny <ppokorny@penguincomputing.com>,
+ - Frodo Looijaard <frodol@dds.nl>,
+ - Richard Barrington <rich_b_nz@clear.net.nz>,
+ - Margit Schubert-While <margitsw@t-online.de>,
+ - Justin Thiessen <jthiessen@penguincomputing.com>
Description
-----------
@@ -177,38 +213,50 @@ Each temperature sensor is associated with a Zone. There are three
sensors and therefore three zones (# 1, 2 and 3). Each zone has the following
temperature configuration points:
-* temp#_auto_temp_off - temperature below which fans should be off or spinning very low.
-* temp#_auto_temp_min - temperature over which fans start to spin.
-* temp#_auto_temp_max - temperature when fans spin at full speed.
-* temp#_auto_temp_crit - temperature when all fans will run full speed.
+* temp#_auto_temp_off
+ - temperature below which fans should be off or spinning very low.
+* temp#_auto_temp_min
+ - temperature over which fans start to spin.
+* temp#_auto_temp_max
+ - temperature when fans spin at full speed.
+* temp#_auto_temp_crit
+ - temperature when all fans will run full speed.
-* PWM Control
+PWM Control
+^^^^^^^^^^^
There are three PWM outputs. The LM85 datasheet suggests that the
pwm3 output control both fan3 and fan4. Each PWM can be individually
configured and assigned to a zone for its control value. Each PWM can be
configured individually according to the following options.
-* pwm#_auto_pwm_min - this specifies the PWM value for temp#_auto_temp_off
- temperature. (PWM value from 0 to 255)
+* pwm#_auto_pwm_min
+ - this specifies the PWM value for temp#_auto_temp_off
+ temperature. (PWM value from 0 to 255)
+
+* pwm#_auto_pwm_minctl
+ - this flags selects for temp#_auto_temp_off temperature
+ the behaviour of fans. Write 1 to let fans spinning at
+ pwm#_auto_pwm_min or write 0 to let them off.
-* pwm#_auto_pwm_minctl - this flags selects for temp#_auto_temp_off temperature
- the behaviour of fans. Write 1 to let fans spinning at
- pwm#_auto_pwm_min or write 0 to let them off.
+.. note::
-NOTE: It has been reported that there is a bug in the LM85 that causes the flag
-to be associated with the zones not the PWMs. This contradicts all the
-published documentation. Setting pwm#_min_ctl in this case actually affects all
-PWMs controlled by zone '#'.
+ It has been reported that there is a bug in the LM85 that causes
+ the flag to be associated with the zones not the PWMs. This
+ contradicts all the published documentation. Setting pwm#_min_ctl
+ in this case actually affects all PWMs controlled by zone '#'.
-* PWM Controlling Zone selection
+PWM Controlling Zone selection
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-* pwm#_auto_channels - controls zone that is associated with PWM
+* pwm#_auto_channels
+ - controls zone that is associated with PWM
Configuration choices:
- Value Meaning
- ------ ------------------------------------------------
+========== =============================================
+Value Meaning
+========== =============================================
1 Controlled by Zone 1
2 Controlled by Zone 2
3 Controlled by Zone 3
@@ -217,6 +265,7 @@ Configuration choices:
0 PWM always 0% (off)
-1 PWM always 100% (full on)
-2 Manual control (write to 'pwm#' to set)
+========== =============================================
The National LM85's have two vendor specific configuration
features. Tach. mode and Spinup Control. For more details on these,
diff --git a/Documentation/hwmon/lm87 b/Documentation/hwmon/lm87.rst
index a2339fd9acb9..72fcb577ef2a 100644
--- a/Documentation/hwmon/lm87
+++ b/Documentation/hwmon/lm87.rst
@@ -2,23 +2,32 @@ Kernel driver lm87
==================
Supported chips:
+
* National Semiconductor LM87
+
Prefix: 'lm87'
+
Addresses scanned: I2C 0x2c - 0x2e
+
Datasheet: http://www.national.com/pf/LM/LM87.html
+
* Analog Devices ADM1024
+
Prefix: 'adm1024'
+
Addresses scanned: I2C 0x2c - 0x2e
+
Datasheet: http://www.analog.com/en/prod/0,2877,ADM1024,00.html
+
Authors:
- Frodo Looijaard <frodol@dds.nl>,
- Philip Edelbrock <phil@netroedge.com>,
- Mark Studebaker <mdsxyz123@yahoo.com>,
- Stephen Rousset <stephen.rousset@rocketlogix.com>,
- Dan Eaton <dan.eaton@rocketlogix.com>,
- Jean Delvare <jdelvare@suse.de>,
- Original 2.6 port Jeff Oliver
+ - Frodo Looijaard <frodol@dds.nl>,
+ - Philip Edelbrock <phil@netroedge.com>,
+ - Mark Studebaker <mdsxyz123@yahoo.com>,
+ - Stephen Rousset <stephen.rousset@rocketlogix.com>,
+ - Dan Eaton <dan.eaton@rocketlogix.com>,
+ - Jean Delvare <jdelvare@suse.de>,
+ - Original 2.6 port Jeff Oliver
Description
-----------
diff --git a/Documentation/hwmon/lm90 b/Documentation/hwmon/lm90.rst
index 8122675d30f6..953315987c06 100644
--- a/Documentation/hwmon/lm90
+++ b/Documentation/hwmon/lm90.rst
@@ -2,132 +2,256 @@ Kernel driver lm90
==================
Supported chips:
+
* National Semiconductor LM90
+
Prefix: 'lm90'
+
Addresses scanned: I2C 0x4c
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/pf/LM/LM90.html
+
+ http://www.national.com/pf/LM/LM90.html
+
* National Semiconductor LM89
+
Prefix: 'lm89' (no auto-detection)
+
Addresses scanned: I2C 0x4c and 0x4d
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/mpf/LM/LM89.html
+
+ http://www.national.com/mpf/LM/LM89.html
+
* National Semiconductor LM99
+
Prefix: 'lm99'
+
Addresses scanned: I2C 0x4c and 0x4d
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/pf/LM/LM99.html
+
+ http://www.national.com/pf/LM/LM99.html
+
* National Semiconductor LM86
+
Prefix: 'lm86'
+
Addresses scanned: I2C 0x4c
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/mpf/LM/LM86.html
+
+ http://www.national.com/mpf/LM/LM86.html
+
* Analog Devices ADM1032
+
Prefix: 'adm1032'
+
Addresses scanned: I2C 0x4c and 0x4d
+
Datasheet: Publicly available at the ON Semiconductor website
- http://www.onsemi.com/PowerSolutions/product.do?id=ADM1032
+
+ http://www.onsemi.com/PowerSolutions/product.do?id=ADM1032
+
* Analog Devices ADT7461
+
Prefix: 'adt7461'
+
Addresses scanned: I2C 0x4c and 0x4d
+
Datasheet: Publicly available at the ON Semiconductor website
- http://www.onsemi.com/PowerSolutions/product.do?id=ADT7461
+
+ http://www.onsemi.com/PowerSolutions/product.do?id=ADT7461
+
* Analog Devices ADT7461A
+
Prefix: 'adt7461a'
+
Addresses scanned: I2C 0x4c and 0x4d
+
Datasheet: Publicly available at the ON Semiconductor website
- http://www.onsemi.com/PowerSolutions/product.do?id=ADT7461A
+
+ http://www.onsemi.com/PowerSolutions/product.do?id=ADT7461A
+
* ON Semiconductor NCT1008
+
Prefix: 'nct1008'
+
Addresses scanned: I2C 0x4c and 0x4d
+
Datasheet: Publicly available at the ON Semiconductor website
- http://www.onsemi.com/PowerSolutions/product.do?id=NCT1008
+
+ http://www.onsemi.com/PowerSolutions/product.do?id=NCT1008
+
* Maxim MAX6646
+
Prefix: 'max6646'
+
Addresses scanned: I2C 0x4d
+
Datasheet: Publicly available at the Maxim website
- http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3497
+
+ http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3497
+
* Maxim MAX6647
+
Prefix: 'max6646'
+
Addresses scanned: I2C 0x4e
+
Datasheet: Publicly available at the Maxim website
- http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3497
+
+ http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3497
+
* Maxim MAX6648
+
Prefix: 'max6646'
+
Addresses scanned: I2C 0x4c
+
Datasheet: Publicly available at the Maxim website
- http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3500
+
+ http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3500
+
* Maxim MAX6649
+
Prefix: 'max6646'
+
Addresses scanned: I2C 0x4c
+
Datasheet: Publicly available at the Maxim website
- http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3497
+
+ http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3497
+
* Maxim MAX6657
+
Prefix: 'max6657'
+
Addresses scanned: I2C 0x4c
+
Datasheet: Publicly available at the Maxim website
- http://www.maxim-ic.com/quick_view2.cfm/qv_pk/2578
+
+ http://www.maxim-ic.com/quick_view2.cfm/qv_pk/2578
+
* Maxim MAX6658
+
Prefix: 'max6657'
+
Addresses scanned: I2C 0x4c
+
Datasheet: Publicly available at the Maxim website
- http://www.maxim-ic.com/quick_view2.cfm/qv_pk/2578
+
+ http://www.maxim-ic.com/quick_view2.cfm/qv_pk/2578
+
* Maxim MAX6659
+
Prefix: 'max6659'
+
Addresses scanned: I2C 0x4c, 0x4d, 0x4e
+
Datasheet: Publicly available at the Maxim website
- http://www.maxim-ic.com/quick_view2.cfm/qv_pk/2578
+
+ http://www.maxim-ic.com/quick_view2.cfm/qv_pk/2578
+
* Maxim MAX6680
+
Prefix: 'max6680'
+
Addresses scanned: I2C 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b,
- 0x4c, 0x4d and 0x4e
+
+ 0x4c, 0x4d and 0x4e
+
Datasheet: Publicly available at the Maxim website
- http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3370
+
+ http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3370
+
* Maxim MAX6681
+
Prefix: 'max6680'
+
Addresses scanned: I2C 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b,
- 0x4c, 0x4d and 0x4e
+
+ 0x4c, 0x4d and 0x4e
+
Datasheet: Publicly available at the Maxim website
- http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3370
+
+ http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3370
+
* Maxim MAX6692
+
Prefix: 'max6646'
+
Addresses scanned: I2C 0x4c
+
Datasheet: Publicly available at the Maxim website
- http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3500
+
+ http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3500
+
* Maxim MAX6695
+
Prefix: 'max6695'
+
Addresses scanned: I2C 0x18
+
Datasheet: Publicly available at the Maxim website
- http://www.maxim-ic.com/datasheet/index.mvp/id/4199
+
+ http://www.maxim-ic.com/datasheet/index.mvp/id/4199
+
* Maxim MAX6696
+
Prefix: 'max6695'
+
Addresses scanned: I2C 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b,
- 0x4c, 0x4d and 0x4e
+
+ 0x4c, 0x4d and 0x4e
+
Datasheet: Publicly available at the Maxim website
- http://www.maxim-ic.com/datasheet/index.mvp/id/4199
+
+ http://www.maxim-ic.com/datasheet/index.mvp/id/4199
+
* Winbond/Nuvoton W83L771W/G
+
Prefix: 'w83l771'
+
Addresses scanned: I2C 0x4c
+
Datasheet: No longer available
+
* Winbond/Nuvoton W83L771AWG/ASG
+
Prefix: 'w83l771'
+
Addresses scanned: I2C 0x4c
+
Datasheet: Not publicly available, can be requested from Nuvoton
+
* Philips/NXP SA56004X
+
Prefix: 'sa56004'
+
Addresses scanned: I2C 0x48 through 0x4F
+
Datasheet: Publicly available at NXP website
- http://ics.nxp.com/products/interface/datasheet/sa56004x.pdf
+
+ http://ics.nxp.com/products/interface/datasheet/sa56004x.pdf
+
* GMT G781
+
Prefix: 'g781'
+
Addresses scanned: I2C 0x4c, 0x4d
+
Datasheet: Not publicly available from GMT
+
* Texas Instruments TMP451
+
Prefix: 'tmp451'
+
Addresses scanned: I2C 0x4c
+
Datasheet: Publicly available at TI website
- http://www.ti.com/litv/pdf/sbos686
+ http://www.ti.com/litv/pdf/sbos686
Author: Jean Delvare <jdelvare@suse.de>
diff --git a/Documentation/hwmon/lm92 b/Documentation/hwmon/lm92.rst
index cfa99a353b8c..c131b923ed36 100644
--- a/Documentation/hwmon/lm92
+++ b/Documentation/hwmon/lm92.rst
@@ -2,22 +2,35 @@ Kernel driver lm92
==================
Supported chips:
+
* National Semiconductor LM92
+
Prefix: 'lm92'
+
Addresses scanned: I2C 0x48 - 0x4b
+
Datasheet: http://www.national.com/pf/LM/LM92.html
+
* National Semiconductor LM76
+
Prefix: 'lm92'
+
Addresses scanned: none, force parameter needed
+
Datasheet: http://www.national.com/pf/LM/LM76.html
+
* Maxim MAX6633/MAX6634/MAX6635
+
Prefix: 'max6635'
+
Addresses scanned: none, force parameter needed
+
Datasheet: http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3074
+
Authors:
- Abraham van der Merwe <abraham@2d3d.co.za>
- Jean Delvare <jdelvare@suse.de>
+ - Abraham van der Merwe <abraham@2d3d.co.za>
+ - Jean Delvare <jdelvare@suse.de>
Description
diff --git a/Documentation/hwmon/lm93 b/Documentation/hwmon/lm93.rst
index f3b2ad2ceb01..49d199b45b67 100644
--- a/Documentation/hwmon/lm93
+++ b/Documentation/hwmon/lm93.rst
@@ -2,20 +2,29 @@ Kernel driver lm93
==================
Supported chips:
+
* National Semiconductor LM93
+
Prefix 'lm93'
+
Addresses scanned: I2C 0x2c-0x2e
+
Datasheet: http://www.national.com/ds.cgi/LM/LM93.pdf
+
* National Semiconductor LM94
+
Prefix 'lm94'
+
Addresses scanned: I2C 0x2c-0x2e
+
Datasheet: http://www.national.com/ds.cgi/LM/LM94.pdf
+
Authors:
- Mark M. Hoffman <mhoffman@lightlink.com>
- Ported to 2.6 by Eric J. Bowersox <ericb@aspsys.com>
- Adapted to 2.6.20 by Carsten Emde <ce@osadl.org>
- Modified for mainline integration by Hans J. Koch <hjk@hansjkoch.de>
+ - Mark M. Hoffman <mhoffman@lightlink.com>
+ - Ported to 2.6 by Eric J. Bowersox <ericb@aspsys.com>
+ - Adapted to 2.6.20 by Carsten Emde <ce@osadl.org>
+ - Modified for mainline integration by Hans J. Koch <hjk@hansjkoch.de>
Module Parameters
-----------------
@@ -67,7 +76,8 @@ LM94 are not supported.
User Interface
--------------
-#PROCHOT:
+#PROCHOT
+^^^^^^^^
The LM93 can monitor two #PROCHOT signals. The results are found in the
sysfs files prochot1, prochot2, prochot1_avg, prochot2_avg, prochot1_max,
@@ -86,7 +96,8 @@ prochot2_interval. The values in these files specify the intervals for
list will cause the driver to use the next largest interval. The available
intervals are (in seconds):
-#PROCHOT intervals: 0.73, 1.46, 2.9, 5.8, 11.7, 23.3, 46.6, 93.2, 186, 372
+#PROCHOT intervals:
+ 0.73, 1.46, 2.9, 5.8, 11.7, 23.3, 46.6, 93.2, 186, 372
It is possible to configure the LM93 to logically short the two #PROCHOT
signals. I.e. when #P1_PROCHOT is asserted, the LM93 will automatically
@@ -105,16 +116,15 @@ contains a value controlling the duty cycle for the PWM signal used when
the override function is enabled. This value ranges from 0 to 15, with 0
indicating minimum duty cycle and 15 indicating maximum.
-#VRD_HOT:
+#VRD_HOT
+^^^^^^^^
The LM93 can monitor two #VRD_HOT signals. The results are found in the
sysfs files vrdhot1 and vrdhot2. There is one value per file: a boolean for
which 1 indicates #VRD_HOT is asserted and 0 indicates it is negated. These
files are read-only.
-Smart Tach Mode:
-
-(from the datasheet)
+Smart Tach Mode (from the datasheet)::
If a fan is driven using a low-side drive PWM, the tachometer
output of the fan is corrupted. The LM93 includes smart tachometer
@@ -127,7 +137,8 @@ the fan tachometer with a pwm) to the sysfs file fan<n>_smart_tach. A zero
will disable the function for that fan. Note that Smart tach mode cannot be
enabled if the PWM output frequency is 22500 Hz (see below).
-Manual PWM:
+Manual PWM
+^^^^^^^^^^
The LM93 has a fixed or override mode for the two PWM outputs (although, there
are still some conditions that will override even this mode - see section
@@ -141,7 +152,8 @@ will cause the driver to use the next largest value. Also note: when manual
PWM mode is disabled, the value of pwm1 and pwm2 indicates the current duty
cycle chosen by the h/w.
-PWM Output Frequency:
+PWM Output Frequency
+^^^^^^^^^^^^^^^^^^^^
The LM93 supports several different frequencies for the PWM output channels.
The sysfs files pwm1_freq and pwm2_freq are used to select the frequency. The
@@ -149,9 +161,11 @@ frequency values are constrained by the hardware. Selecting a value which is
not available will cause the driver to use the next largest value. Also note
that this parameter has implications for the Smart Tach Mode (see above).
-PWM Output Frequencies (in Hz): 12, 36, 48, 60, 72, 84, 96, 22500 (default)
+PWM Output Frequencies (in Hz):
+ 12, 36, 48, 60, 72, 84, 96, 22500 (default)
-Automatic PWM:
+Automatic PWM
+^^^^^^^^^^^^^
The LM93 is capable of complex automatic fan control, with many different
points of configuration. To start, each PWM output can be bound to any
@@ -163,14 +177,16 @@ The eight control sources are: temp1-temp4 (aka "zones" in the datasheet),
in the sysfs files pwm<n>_auto_channels, where a "1" enables the binding, and
a "0" disables it. The h/w default is 0x0f (all temperatures bound).
- 0x01 - Temp 1
- 0x02 - Temp 2
- 0x04 - Temp 3
- 0x08 - Temp 4
- 0x10 - #PROCHOT 1
- 0x20 - #PROCHOT 2
- 0x40 - #VRDHOT 1
- 0x80 - #VRDHOT 2
+ ====== ===========
+ 0x01 Temp 1
+ 0x02 Temp 2
+ 0x04 Temp 3
+ 0x08 Temp 4
+ 0x10 #PROCHOT 1
+ 0x20 #PROCHOT 2
+ 0x40 #VRDHOT 1
+ 0x80 #VRDHOT 2
+ ====== ===========
The function y = f(x) takes a source temperature x to a PWM output y. This
function of the LM93 is derived from a base temperature and a table of 12
@@ -180,7 +196,9 @@ degrees C, with the value of offset <i> for temperature value <n> being
contained in the file temp<n>_auto_offset<i>. E.g. if the base temperature
is 40C:
+ ========== ======================= =============== =======
offset # temp<n>_auto_offset<i> range pwm
+ ========== ======================= =============== =======
1 0 - 25.00%
2 0 - 28.57%
3 1 40C - 41C 32.14%
@@ -193,7 +211,8 @@ is 40C:
10 2 54C - 56C 57.14%
11 2 56C - 58C 71.43%
12 2 58C - 60C 85.71%
- > 60C 100.00%
+ - - > 60C 100.00%
+ ========== ======================= =============== =======
Valid offsets are in the range 0C <= x <= 7.5C in 0.5C increments.
@@ -213,7 +232,8 @@ temp<n>_auto_pwm_min. Note, there are only two minimums: one each for temp[12]
and temp[34]. Therefore, any change to e.g. temp1_auto_pwm_min will also
affect temp2_auto_pwm_min.
-PWM Spin-Up Cycle:
+PWM Spin-Up Cycle
+^^^^^^^^^^^^^^^^^
A spin-up cycle occurs when a PWM output is commanded from 0% duty cycle to
some value > 0%. The LM93 supports a minimum duty cycle during spin-up. These
@@ -225,10 +245,11 @@ the spin-up time in seconds. The available spin-up times are constrained by
the hardware. Selecting a value which is not available will cause the driver
to use the next largest value.
-Spin-up Durations: 0 (disabled, h/w default), 0.1, 0.25, 0.4, 0.7, 1.0,
- 2.0, 4.0
+Spin-up Durations:
+ 0 (disabled, h/w default), 0.1, 0.25, 0.4, 0.7, 1.0, 2.0, 4.0
-#PROCHOT and #VRDHOT PWM Ramping:
+#PROCHOT and #VRDHOT PWM Ramping
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If the #PROCHOT or #VRDHOT signals are asserted while bound to a PWM output
channel, the LM93 will ramp the PWM output up to 100% duty cycle in discrete
@@ -237,9 +258,11 @@ one value each in seconds: pwm_auto_prochot_ramp and pwm_auto_vrdhot_ramp.
The available ramp times are constrained by the hardware. Selecting a value
which is not available will cause the driver to use the next largest value.
-Ramp Times: 0 (disabled, h/w default) to 0.75 in 0.05 second intervals
+Ramp Times:
+ 0 (disabled, h/w default) to 0.75 in 0.05 second intervals
-Fan Boost:
+Fan Boost
+^^^^^^^^^
For each temperature channel, there is a boost temperature: if the channel
exceeds this limit, the LM93 will immediately drive both PWM outputs to 100%.
@@ -249,7 +272,8 @@ limit is reached, the temperature channel must drop below this value before
the boost function is disabled. This temperature is also expressed in degrees
C in the sysfs files temp<n>_auto_boost_hyst.
-GPIO Pins:
+GPIO Pins
+^^^^^^^^^
The LM93 can monitor the logic level of four dedicated GPIO pins as well as the
four tach input pins. GPIO0-GPIO3 correspond to (fan) tach 1-4, respectively.
@@ -260,50 +284,29 @@ LSB is GPIO0, and the MSB is GPIO7.
LM93 Unique sysfs Files
-----------------------
- file description
- -------------------------------------------------------------
-
- prochot<n> current #PROCHOT %
-
- prochot<n>_avg moving average #PROCHOT %
-
- prochot<n>_max limit #PROCHOT %
-
- prochot_short enable or disable logical #PROCHOT pin short
-
- prochot<n>_override force #PROCHOT assertion as PWM
-
- prochot_override_duty_cycle
- duty cycle for the PWM signal used when
- #PROCHOT is overridden
-
- prochot<n>_interval #PROCHOT PWM sampling interval
-
- vrdhot<n> 0 means negated, 1 means asserted
-
- fan<n>_smart_tach enable or disable smart tach mode
-
- pwm<n>_auto_channels select control sources for PWM outputs
-
- pwm<n>_auto_spinup_min minimum duty cycle during spin-up
-
- pwm<n>_auto_spinup_time duration of spin-up
-
- pwm_auto_prochot_ramp ramp time per step when #PROCHOT asserted
-
- pwm_auto_vrdhot_ramp ramp time per step when #VRDHOT asserted
-
- temp<n>_auto_base temperature channel base
-
- temp<n>_auto_offset[1-12]
- temperature channel offsets
-
- temp<n>_auto_offset_hyst
- temperature channel offset hysteresis
-
- temp<n>_auto_boost temperature channel boost (PWMs to 100%) limit
-
- temp<n>_auto_boost_hyst temperature channel boost hysteresis
-
- gpio input state of 8 GPIO pins; read-only
-
+=========================== ===============================================
+file description
+=========================== ===============================================
+prochot<n> current #PROCHOT %
+prochot<n>_avg moving average #PROCHOT %
+prochot<n>_max limit #PROCHOT %
+prochot_short enable or disable logical #PROCHOT pin short
+prochot<n>_override force #PROCHOT assertion as PWM
+prochot_override_duty_cycle duty cycle for the PWM signal used when
+ #PROCHOT is overridden
+prochot<n>_interval #PROCHOT PWM sampling interval
+vrdhot<n> 0 means negated, 1 means asserted
+fan<n>_smart_tach enable or disable smart tach mode
+pwm<n>_auto_channels select control sources for PWM outputs
+pwm<n>_auto_spinup_min minimum duty cycle during spin-up
+pwm<n>_auto_spinup_time duration of spin-up
+pwm_auto_prochot_ramp ramp time per step when #PROCHOT asserted
+pwm_auto_vrdhot_ramp ramp time per step when #VRDHOT asserted
+temp<n>_auto_base temperature channel base
+temp<n>_auto_offset[1-12] temperature channel offsets
+temp<n>_auto_offset_hyst temperature channel offset hysteresis
+temp<n>_auto_boost temperature channel boost (PWMs to 100%)
+ limit
+temp<n>_auto_boost_hyst temperature channel boost hysteresis
+gpio input state of 8 GPIO pins; read-only
+=========================== ===============================================
diff --git a/Documentation/hwmon/lm95234 b/Documentation/hwmon/lm95234.rst
index 32b777ef224c..e4c14bea5efd 100644
--- a/Documentation/hwmon/lm95234
+++ b/Documentation/hwmon/lm95234.rst
@@ -2,15 +2,22 @@ Kernel driver lm95234
=====================
Supported chips:
+
* National Semiconductor / Texas Instruments LM95233
+
Addresses scanned: I2C 0x18, 0x2a, 0x2b
+
Datasheet: Publicly available at the Texas Instruments website
- http://www.ti.com/product/lm95233
+
+ http://www.ti.com/product/lm95233
+
* National Semiconductor / Texas Instruments LM95234
+
Addresses scanned: I2C 0x18, 0x4d, 0x4e
+
Datasheet: Publicly available at the Texas Instruments website
- http://www.ti.com/product/lm95234
+ http://www.ti.com/product/lm95234
Author: Guenter Roeck <linux@roeck-us.net>
diff --git a/Documentation/hwmon/lm95245 b/Documentation/hwmon/lm95245.rst
index d755901f58c4..566d1dc8c5a6 100644
--- a/Documentation/hwmon/lm95245
+++ b/Documentation/hwmon/lm95245.rst
@@ -1,16 +1,23 @@
Kernel driver lm95245
-==================
+=====================
Supported chips:
+
* TI LM95235
+
Addresses scanned: I2C 0x18, 0x29, 0x4c
+
Datasheet: Publicly available at the TI website
- http://www.ti.com/lit/ds/symlink/lm95235.pdf
+
+ http://www.ti.com/lit/ds/symlink/lm95235.pdf
+
* TI / National Semiconductor LM95245
+
Addresses scanned: I2C 0x18, 0x19, 0x29, 0x4c, 0x4d
+
Datasheet: Publicly available at the TI website
- http://www.ti.com/lit/ds/symlink/lm95245.pdf
+ http://www.ti.com/lit/ds/symlink/lm95245.pdf
Author: Alexander Stein <alexander.stein@systec-electronic.com>
diff --git a/Documentation/hwmon/lochnagar.rst b/Documentation/hwmon/lochnagar.rst
new file mode 100644
index 000000000000..1d609c4d18c3
--- /dev/null
+++ b/Documentation/hwmon/lochnagar.rst
@@ -0,0 +1,83 @@
+Kernel Driver Lochnagar
+=======================
+
+Supported systems:
+ * Cirrus Logic : Lochnagar 2
+
+Author: Lucas A. Tanure Alves
+
+Description
+-----------
+
+Lochnagar 2 features built-in Current Monitor circuitry that allows for the
+measurement of both voltage and current on up to eight of the supply voltage
+rails provided to the minicards. The Current Monitor does not require any
+hardware modifications or external circuitry to operate.
+
+The current and voltage measurements are obtained through the standard register
+map interface to the Lochnagar board controller, and can therefore be monitored
+by software.
+
+Sysfs attributes
+----------------
+
+======================= =======================================================
+temp1_input The Lochnagar board temperature (milliCelsius)
+in0_input Measured voltage for DBVDD1 (milliVolts)
+in0_label "DBVDD1"
+curr1_input Measured current for DBVDD1 (milliAmps)
+curr1_label "DBVDD1"
+power1_average Measured average power for DBVDD1 (microWatts)
+power1_average_interval Power averaging time input valid from 1 to 1708mS
+power1_label "DBVDD1"
+in1_input Measured voltage for 1V8 DSP (milliVolts)
+in1_label "1V8 DSP"
+curr2_input Measured current for 1V8 DSP (milliAmps)
+curr2_label "1V8 DSP"
+power2_average Measured average power for 1V8 DSP (microWatts)
+power2_average_interval Power averaging time input valid from 1 to 1708mS
+power2_label "1V8 DSP"
+in2_input Measured voltage for 1V8 CDC (milliVolts)
+in2_label "1V8 CDC"
+curr3_input Measured current for 1V8 CDC (milliAmps)
+curr3_label "1V8 CDC"
+power3_average Measured average power for 1V8 CDC (microWatts)
+power3_average_interval Power averaging time input valid from 1 to 1708mS
+power3_label "1V8 CDC"
+in3_input Measured voltage for VDDCORE DSP (milliVolts)
+in3_label "VDDCORE DSP"
+curr4_input Measured current for VDDCORE DSP (milliAmps)
+curr4_label "VDDCORE DSP"
+power4_average Measured average power for VDDCORE DSP (microWatts)
+power4_average_interval Power averaging time input valid from 1 to 1708mS
+power4_label "VDDCORE DSP"
+in4_input Measured voltage for AVDD 1V8 (milliVolts)
+in4_label "AVDD 1V8"
+curr5_input Measured current for AVDD 1V8 (milliAmps)
+curr5_label "AVDD 1V8"
+power5_average Measured average power for AVDD 1V8 (microWatts)
+power5_average_interval Power averaging time input valid from 1 to 1708mS
+power5_label "AVDD 1V8"
+curr6_input Measured current for SYSVDD (milliAmps)
+curr6_label "SYSVDD"
+power6_average Measured average power for SYSVDD (microWatts)
+power6_average_interval Power averaging time input valid from 1 to 1708mS
+power6_label "SYSVDD"
+in6_input Measured voltage for VDDCORE CDC (milliVolts)
+in6_label "VDDCORE CDC"
+curr7_input Measured current for VDDCORE CDC (milliAmps)
+curr7_label "VDDCORE CDC"
+power7_average Measured average power for VDDCORE CDC (microWatts)
+power7_average_interval Power averaging time input valid from 1 to 1708mS
+power7_label "VDDCORE CDC"
+in7_input Measured voltage for MICVDD (milliVolts)
+in7_label "MICVDD"
+curr8_input Measured current for MICVDD (milliAmps)
+curr8_label "MICVDD"
+power8_average Measured average power for MICVDD (microWatts)
+power8_average_interval Power averaging time input valid from 1 to 1708mS
+power8_label "MICVDD"
+======================= =======================================================
+
+Note:
+ It is not possible to measure voltage on the SYSVDD rail.
diff --git a/Documentation/hwmon/ltc2945 b/Documentation/hwmon/ltc2945.rst
index f8d0f7f19adb..20c884985367 100644
--- a/Documentation/hwmon/ltc2945
+++ b/Documentation/hwmon/ltc2945.rst
@@ -2,11 +2,16 @@ Kernel driver ltc2945
=====================
Supported chips:
+
* Linear Technology LTC2945
+
Prefix: 'ltc2945'
+
Addresses scanned: -
+
Datasheet:
- http://cds.linear.com/docs/en/datasheet/2945fa.pdf
+
+ http://cds.linear.com/docs/en/datasheet/2945fa.pdf
Author: Guenter Roeck <linux@roeck-us.net>
@@ -26,9 +31,10 @@ which can be safely used to identify the chip. You will have to instantiate
the devices explicitly.
Example: the following will load the driver for an LTC2945 at address 0x10
-on I2C bus #1:
-$ modprobe ltc2945
-$ echo ltc2945 0x10 > /sys/bus/i2c/devices/i2c-1/new_device
+on I2C bus #1::
+
+ $ modprobe ltc2945
+ $ echo ltc2945 0x10 > /sys/bus/i2c/devices/i2c-1/new_device
Sysfs entries
@@ -45,6 +51,7 @@ Current Sense register. The reported value assumes that a 1 mOhm sense resistor
is installed. If a different sense resistor is installed, calculate the real
current by dividing the reported value by the sense resistor value in mOhm.
+======================= ========================================================
in1_input VIN voltage (mV). Voltage is measured either at
SENSE+ or VDD pin depending on chip configuration.
in1_min Undervoltage threshold
@@ -82,3 +89,4 @@ power1_input_highest Historical maximum power use
power1_reset_history Write 1 to reset power1 history
power1_min_alarm Low power alarm
power1_max_alarm High power alarm
+======================= ========================================================
diff --git a/Documentation/hwmon/ltc2978 b/Documentation/hwmon/ltc2978.rst
index dfb2caa401d9..01a24fd6d5fe 100644
--- a/Documentation/hwmon/ltc2978
+++ b/Documentation/hwmon/ltc2978.rst
@@ -2,85 +2,143 @@ Kernel driver ltc2978
=====================
Supported chips:
+
* Linear Technology LTC2974
+
Prefix: 'ltc2974'
+
Addresses scanned: -
+
Datasheet: http://www.linear.com/product/ltc2974
+
* Linear Technology LTC2975
+
Prefix: 'ltc2975'
+
Addresses scanned: -
+
Datasheet: http://www.linear.com/product/ltc2975
+
* Linear Technology LTC2977
+
Prefix: 'ltc2977'
+
Addresses scanned: -
+
Datasheet: http://www.linear.com/product/ltc2977
+
* Linear Technology LTC2978, LTC2978A
+
Prefix: 'ltc2978'
+
Addresses scanned: -
+
Datasheet: http://www.linear.com/product/ltc2978
- http://www.linear.com/product/ltc2978a
+
+ http://www.linear.com/product/ltc2978a
+
* Linear Technology LTC2980
+
Prefix: 'ltc2980'
+
Addresses scanned: -
+
Datasheet: http://www.linear.com/product/ltc2980
+
* Linear Technology LTC3880
+
Prefix: 'ltc3880'
+
Addresses scanned: -
+
Datasheet: http://www.linear.com/product/ltc3880
+
* Linear Technology LTC3882
+
Prefix: 'ltc3882'
+
Addresses scanned: -
+
Datasheet: http://www.linear.com/product/ltc3882
+
* Linear Technology LTC3883
+
Prefix: 'ltc3883'
+
Addresses scanned: -
+
Datasheet: http://www.linear.com/product/ltc3883
+
* Linear Technology LTC3886
+
Prefix: 'ltc3886'
+
Addresses scanned: -
+
Datasheet: http://www.linear.com/product/ltc3886
+
* Linear Technology LTC3887
+
Prefix: 'ltc3887'
+
Addresses scanned: -
+
Datasheet: http://www.linear.com/product/ltc3887
+
* Linear Technology LTM2987
+
Prefix: 'ltm2987'
+
Addresses scanned: -
+
Datasheet: http://www.linear.com/product/ltm2987
+
* Linear Technology LTM4675
+
Prefix: 'ltm4675'
+
Addresses scanned: -
+
Datasheet: http://www.linear.com/product/ltm4675
+
* Linear Technology LTM4676
+
Prefix: 'ltm4676'
+
Addresses scanned: -
+
Datasheet: http://www.linear.com/product/ltm4676
+
* Analog Devices LTM4686
+
Prefix: 'ltm4686'
+
Addresses scanned: -
+
Datasheet: http://www.analog.com/ltm4686
+
Author: Guenter Roeck <linux@roeck-us.net>
Description
-----------
-LTC2974 and LTC2975 are quad digital power supply managers.
-LTC2978 is an octal power supply monitor.
-LTC2977 is a pin compatible replacement for LTC2978.
-LTC2980 is a 16-channel Power System Manager, consisting of two LTC2977
-in a single die. The chip is instantiated and reported as two separate chips
-on two different I2C bus addresses.
-LTC3880, LTC3882, LTC3886, and LTC3887 are dual output poly-phase step-down
-DC/DC controllers.
-LTC3883 is a single phase step-down DC/DC controller.
-LTM2987 is a 16-channel Power System Manager with two LTC2977 plus
-additional components on a single die. The chip is instantiated and reported
-as two separate chips on two different I2C bus addresses.
-LTM4675 is a dual 9A or single 18A μModule regulator
-LTM4676 is a dual 13A or single 26A uModule regulator.
-LTM4686 is a dual 10A or single 20A uModule regulator.
+- LTC2974 and LTC2975 are quad digital power supply managers.
+- LTC2978 is an octal power supply monitor.
+- LTC2977 is a pin compatible replacement for LTC2978.
+- LTC2980 is a 16-channel Power System Manager, consisting of two LTC2977
+- in a single die. The chip is instantiated and reported as two separate chips
+- on two different I2C bus addresses.
+- LTC3880, LTC3882, LTC3886, and LTC3887 are dual output poly-phase step-down
+- DC/DC controllers.
+- LTC3883 is a single phase step-down DC/DC controller.
+- LTM2987 is a 16-channel Power System Manager with two LTC2977 plus
+- additional components on a single die. The chip is instantiated and reported
+- as two separate chips on two different I2C bus addresses.
+- LTM4675 is a dual 9A or single 18A μModule regulator
+- LTM4676 is a dual 13A or single 26A uModule regulator.
+- LTM4686 is a dual 10A or single 20A uModule regulator.
Usage Notes
@@ -90,127 +148,208 @@ This driver does not probe for PMBus devices. You will have to instantiate
devices explicitly.
Example: the following commands will load the driver for an LTC2978 at address
-0x60 on I2C bus #1:
+0x60 on I2C bus #1::
-# modprobe ltc2978
-# echo ltc2978 0x60 > /sys/bus/i2c/devices/i2c-1/new_device
+ # modprobe ltc2978
+ # echo ltc2978 0x60 > /sys/bus/i2c/devices/i2c-1/new_device
Sysfs attributes
----------------
+======================= ========================================================
in1_label "vin"
+
in1_input Measured input voltage.
+
in1_min Minimum input voltage.
+
in1_max Maximum input voltage.
+
LTC2974, LTC2975, LTC2977, LTC2980, LTC2978, and
LTM2987 only.
+
in1_lcrit Critical minimum input voltage.
+
LTC2974, LTC2975, LTC2977, LTC2980, LTC2978, and
LTM2987 only.
+
in1_crit Critical maximum input voltage.
+
in1_min_alarm Input voltage low alarm.
+
in1_max_alarm Input voltage high alarm.
+
LTC2974, LTC2975, LTC2977, LTC2980, LTC2978, and
LTM2987 only.
in1_lcrit_alarm Input voltage critical low alarm.
+
LTC2974, LTC2975, LTC2977, LTC2980, LTC2978, and
LTM2987 only.
in1_crit_alarm Input voltage critical high alarm.
+
in1_lowest Lowest input voltage.
+
LTC2974, LTC2975, LTC2977, LTC2980, LTC2978, and
LTM2987 only.
in1_highest Highest input voltage.
+
in1_reset_history Reset input voltage history.
in[N]_label "vout[1-8]".
- LTC2974, LTC2975: N=2-5
- LTC2977, LTC2980, LTM2987: N=2-9
- LTC2978: N=2-9
- LTC3880, LTC3882, LTC23886 LTC3887, LTM4675, LTM4676:
- N=2-3
- LTC3883: N=2
+
+ - LTC2974, LTC2975: N=2-5
+ - LTC2977, LTC2980, LTM2987: N=2-9
+ - LTC2978: N=2-9
+ - LTC3880, LTC3882, LTC23886 LTC3887, LTM4675, LTM4676:
+ N=2-3
+ - LTC3883: N=2
+
in[N]_input Measured output voltage.
+
in[N]_min Minimum output voltage.
+
in[N]_max Maximum output voltage.
+
in[N]_lcrit Critical minimum output voltage.
+
in[N]_crit Critical maximum output voltage.
+
in[N]_min_alarm Output voltage low alarm.
+
in[N]_max_alarm Output voltage high alarm.
+
in[N]_lcrit_alarm Output voltage critical low alarm.
+
in[N]_crit_alarm Output voltage critical high alarm.
-in[N]_lowest Lowest output voltage. LTC2974, LTC2975,
- and LTC2978 only.
+
+in[N]_lowest Lowest output voltage.
+
+
+ LTC2974, LTC2975,and LTC2978 only.
+
in[N]_highest Highest output voltage.
+
in[N]_reset_history Reset output voltage history.
temp[N]_input Measured temperature.
- On LTC2974 and LTC2975, temp[1-4] report external
- temperatures, and temp5 reports the chip temperature.
- On LTC2977, LTC2980, LTC2978, and LTM2987, only one
- temperature measurement is supported and reports
- the chip temperature.
- On LTC3880, LTC3882, LTC3887, LTM4675, and LTM4676,
- temp1 and temp2 report external temperatures, and temp3
- reports the chip temperature.
- On LTC3883, temp1 reports an external temperature,
- and temp2 reports the chip temperature.
-temp[N]_min Mimimum temperature. LTC2974, LCT2977, LTM2980, LTC2978,
- and LTM2987 only.
+
+ - On LTC2974 and LTC2975, temp[1-4] report external
+ temperatures, and temp5 reports the chip temperature.
+ - On LTC2977, LTC2980, LTC2978, and LTM2987, only one
+ temperature measurement is supported and reports
+ the chip temperature.
+ - On LTC3880, LTC3882, LTC3887, LTM4675, and LTM4676,
+ temp1 and temp2 report external temperatures, and
+ temp3 reports the chip temperature.
+ - On LTC3883, temp1 reports an external temperature,
+ and temp2 reports the chip temperature.
+
+temp[N]_min Mimimum temperature.
+
+ LTC2974, LCT2977, LTM2980, LTC2978, and LTM2987 only.
+
temp[N]_max Maximum temperature.
+
temp[N]_lcrit Critical low temperature.
+
temp[N]_crit Critical high temperature.
+
temp[N]_min_alarm Temperature low alarm.
+
LTC2974, LTC2975, LTC2977, LTM2980, LTC2978, and
LTM2987 only.
+
temp[N]_max_alarm Temperature high alarm.
+
+
temp[N]_lcrit_alarm Temperature critical low alarm.
+
temp[N]_crit_alarm Temperature critical high alarm.
+
temp[N]_lowest Lowest measured temperature.
- LTC2974, LTC2975, LTC2977, LTM2980, LTC2978, and
- LTM2987 only.
- Not supported for chip temperature sensor on LTC2974 and
- LTC2975.
-temp[N]_highest Highest measured temperature. Not supported for chip
- temperature sensor on LTC2974 and LTC2975.
-temp[N]_reset_history Reset temperature history. Not supported for chip
- temperature sensor on LTC2974 and LTC2975.
+
+ - LTC2974, LTC2975, LTC2977, LTM2980, LTC2978, and
+ LTM2987 only.
+ - Not supported for chip temperature sensor on LTC2974
+ and LTC2975.
+
+temp[N]_highest Highest measured temperature.
+
+ Not supported for chip temperature sensor on
+ LTC2974 and LTC2975.
+
+temp[N]_reset_history Reset temperature history.
+
+ Not supported for chip temperature sensor on
+ LTC2974 and LTC2975.
power1_label "pin". LTC3883 and LTC3886 only.
+
power1_input Measured input power.
power[N]_label "pout[1-4]".
- LTC2974, LTC2975: N=1-4
- LTC2977, LTC2980, LTM2987: Not supported
- LTC2978: Not supported
- LTC3880, LTC3882, LTC3886, LTC3887, LTM4675, LTM4676:
- N=1-2
- LTC3883: N=2
+
+ - LTC2974, LTC2975: N=1-4
+ - LTC2977, LTC2980, LTM2987: Not supported
+ - LTC2978: Not supported
+ - LTC3880, LTC3882, LTC3886, LTC3887, LTM4675, LTM4676:
+ N=1-2
+ - LTC3883: N=2
+
power[N]_input Measured output power.
-curr1_label "iin". LTC3880, LTC3883, LTC3886, LTC3887, LTM4675,
+curr1_label "iin".
+
+ LTC3880, LTC3883, LTC3886, LTC3887, LTM4675,
and LTM4676 only.
+
curr1_input Measured input current.
+
curr1_max Maximum input current.
+
curr1_max_alarm Input current high alarm.
-curr1_highest Highest input current. LTC3883 and LTC3886 only.
-curr1_reset_history Reset input current history. LTC3883 and LTC3886 only.
+
+curr1_highest Highest input current.
+
+ LTC3883 and LTC3886 only.
+
+curr1_reset_history Reset input current history.
+
+ LTC3883 and LTC3886 only.
curr[N]_label "iout[1-4]".
- LTC2974, LTC2975: N=1-4
- LTC2977, LTC2980, LTM2987: not supported
- LTC2978: not supported
- LTC3880, LTC3882, LTC3886, LTC3887, LTM4675, LTM4676:
- N=2-3
- LTC3883: N=2
+
+ - LTC2974, LTC2975: N=1-4
+ - LTC2977, LTC2980, LTM2987: not supported
+ - LTC2978: not supported
+ - LTC3880, LTC3882, LTC3886, LTC3887, LTM4675, LTM4676:
+ N=2-3
+ - LTC3883: N=2
+
curr[N]_input Measured output current.
+
curr[N]_max Maximum output current.
+
curr[N]_crit Critical high output current.
-curr[N]_lcrit Critical low output current. LTC2974 and LTC2975 only.
+
+curr[N]_lcrit Critical low output current.
+
+ LTC2974 and LTC2975 only.
+
curr[N]_max_alarm Output current high alarm.
+
curr[N]_crit_alarm Output current critical high alarm.
+
curr[N]_lcrit_alarm Output current critical low alarm.
+
+ LTC2974 and LTC2975 only.
+
+curr[N]_lowest Lowest output current.
+
LTC2974 and LTC2975 only.
-curr[N]_lowest Lowest output current. LTC2974 and LTC2975 only.
+
curr[N]_highest Highest output current.
+
curr[N]_reset_history Reset output current history.
+======================= ========================================================
diff --git a/Documentation/hwmon/ltc2990 b/Documentation/hwmon/ltc2990.rst
index 3ed68f676c0f..e0a369e679d3 100644
--- a/Documentation/hwmon/ltc2990
+++ b/Documentation/hwmon/ltc2990.rst
@@ -1,14 +1,23 @@
Kernel driver ltc2990
=====================
+
Supported chips:
+
* Linear Technology LTC2990
+
Prefix: 'ltc2990'
+
Addresses scanned: -
+
Datasheet: http://www.linear.com/product/ltc2990
-Author: Mike Looijmans <mike.looijmans@topic.nl>
- Tom Levens <tom.levens@cern.ch>
+
+
+Author:
+
+ - Mike Looijmans <mike.looijmans@topic.nl>
+ - Tom Levens <tom.levens@cern.ch>
Description
@@ -31,17 +40,21 @@ devices explicitly.
Sysfs attributes
----------------
+============= ==================================================
in0_input Voltage at Vcc pin in millivolt (range 2.5V to 5V)
-temp1_input Internal chip temperature in millidegrees Celcius
+temp1_input Internal chip temperature in millidegrees Celsius
+============= ==================================================
A subset of the following attributes are visible, depending on the measurement
mode of the chip.
+============= ==========================================================
in[1-4]_input Voltage at V[1-4] pin in millivolt
-temp2_input External temperature sensor TR1 in millidegrees Celcius
-temp3_input External temperature sensor TR2 in millidegrees Celcius
+temp2_input External temperature sensor TR1 in millidegrees Celsius
+temp3_input External temperature sensor TR2 in millidegrees Celsius
curr1_input Current in mA across V1-V2 assuming a 1mOhm sense resistor
curr2_input Current in mA across V3-V4 assuming a 1mOhm sense resistor
+============= ==========================================================
The "curr*_input" measurements actually report the voltage drop across the
input pins in microvolts. This is equivalent to the current through a 1mOhm
diff --git a/Documentation/hwmon/ltc3815 b/Documentation/hwmon/ltc3815.rst
index eb7db2d13587..fb0135fc1925 100644
--- a/Documentation/hwmon/ltc3815
+++ b/Documentation/hwmon/ltc3815.rst
@@ -2,9 +2,13 @@ Kernel driver ltc3815
=====================
Supported chips:
+
* Linear Technology LTC3815
+
Prefix: 'ltc3815'
+
Addresses scanned: -
+
Datasheet: http://www.linear.com/product/ltc3815
Author: Guenter Roeck <linux@roeck-us.net>
@@ -23,15 +27,16 @@ This driver does not probe for PMBus devices. You will have to instantiate
devices explicitly.
Example: the following commands will load the driver for an LTC3815
-at address 0x20 on I2C bus #1:
+at address 0x20 on I2C bus #1::
-# modprobe ltc3815
-# echo ltc3815 0x20 > /sys/bus/i2c/devices/i2c-1/new_device
+ # modprobe ltc3815
+ # echo ltc3815 0x20 > /sys/bus/i2c/devices/i2c-1/new_device
Sysfs attributes
----------------
+======================= =======================================================
in1_label "vin"
in1_input Measured input voltage.
in1_alarm Input voltage alarm.
@@ -59,3 +64,4 @@ curr2_input Measured output current.
curr2_alarm Output current alarm.
curr2_highest Highest output current.
curr2_reset_history Reset output current history.
+======================= =======================================================
diff --git a/Documentation/hwmon/ltc4151 b/Documentation/hwmon/ltc4151.rst
index 43c667e6677a..c39229b19624 100644
--- a/Documentation/hwmon/ltc4151
+++ b/Documentation/hwmon/ltc4151.rst
@@ -2,11 +2,16 @@ Kernel driver ltc4151
=====================
Supported chips:
+
* Linear Technology LTC4151
+
Prefix: 'ltc4151'
+
Addresses scanned: -
+
Datasheet:
- http://www.linear.com/docs/Datasheet/4151fc.pdf
+
+ http://www.linear.com/docs/Datasheet/4151fc.pdf
Author: Per Dalen <per.dalen@appeartv.com>
@@ -25,9 +30,10 @@ which can be safely used to identify the chip. You will have to instantiate
the devices explicitly.
Example: the following will load the driver for an LTC4151 at address 0x6f
-on I2C bus #0:
-# modprobe ltc4151
-# echo ltc4151 0x6f > /sys/bus/i2c/devices/i2c-0/new_device
+on I2C bus #0::
+
+ # modprobe ltc4151
+ # echo ltc4151 0x6f > /sys/bus/i2c/devices/i2c-0/new_device
Sysfs entries
@@ -40,8 +46,10 @@ Current reading provided by this driver is reported as obtained from the Current
Sense register. The reported value assumes that a 1 mOhm sense resistor is
installed.
+======================= ==================
in1_input VDIN voltage (mV)
in2_input ADIN voltage (mV)
curr1_input SENSE current (mA)
+======================= ==================
diff --git a/Documentation/hwmon/ltc4215 b/Documentation/hwmon/ltc4215.rst
index c196a1846259..8d5044d99bab 100644
--- a/Documentation/hwmon/ltc4215
+++ b/Documentation/hwmon/ltc4215.rst
@@ -2,11 +2,16 @@ Kernel driver ltc4215
=====================
Supported chips:
+
* Linear Technology LTC4215
+
Prefix: 'ltc4215'
+
Addresses scanned: 0x44
+
Datasheet:
- http://www.linear.com/pc/downloadDocument.do?navId=H0,C1,C1003,C1006,C1163,P17572,D12697
+
+ http://www.linear.com/pc/downloadDocument.do?navId=H0,C1,C1003,C1006,C1163,P17572,D12697
Author: Ira W. Snyder <iws@ovro.caltech.edu>
@@ -26,9 +31,10 @@ of the possible addresses are unfriendly to probing. You will have to
instantiate the devices explicitly.
Example: the following will load the driver for an LTC4215 at address 0x44
-on I2C bus #0:
-$ modprobe ltc4215
-$ echo ltc4215 0x44 > /sys/bus/i2c/devices/i2c-0/new_device
+on I2C bus #0::
+
+ $ modprobe ltc4215
+ $ echo ltc4215 0x44 > /sys/bus/i2c/devices/i2c-0/new_device
Sysfs entries
@@ -38,6 +44,7 @@ The LTC4215 has built-in limits for overvoltage, undervoltage, and
undercurrent warnings. This makes it very likely that the reference
circuit will be used.
+======================= =========================
in1_input input voltage
in2_input output voltage
@@ -49,3 +56,4 @@ curr1_max_alarm overcurrent alarm
power1_input power usage
power1_alarm power bad alarm
+======================= =========================
diff --git a/Documentation/hwmon/ltc4245 b/Documentation/hwmon/ltc4245.rst
index 4ca7a9da09f9..3dafd08a4e87 100644
--- a/Documentation/hwmon/ltc4245
+++ b/Documentation/hwmon/ltc4245.rst
@@ -2,11 +2,16 @@ Kernel driver ltc4245
=====================
Supported chips:
+
* Linear Technology LTC4245
+
Prefix: 'ltc4245'
+
Addresses scanned: 0x20-0x3f
+
Datasheet:
- http://www.linear.com/pc/downloadDocument.do?navId=H0,C1,C1003,C1006,C1140,P19392,D13517
+
+ http://www.linear.com/pc/downloadDocument.do?navId=H0,C1,C1003,C1006,C1140,P19392,D13517
Author: Ira W. Snyder <iws@ovro.caltech.edu>
@@ -27,9 +32,10 @@ of the possible addresses are unfriendly to probing. You will have to
instantiate the devices explicitly.
Example: the following will load the driver for an LTC4245 at address 0x23
-on I2C bus #1:
-$ modprobe ltc4245
-$ echo ltc4245 0x23 > /sys/bus/i2c/devices/i2c-1/new_device
+on I2C bus #1::
+
+ $ modprobe ltc4245
+ $ echo ltc4245 0x23 > /sys/bus/i2c/devices/i2c-1/new_device
Sysfs entries
@@ -42,6 +48,7 @@ This driver uses the values in the datasheet to change the register values
into the values specified in the sysfs-interface document. The current readings
rely on the sense resistors listed in Table 2: "Sense Resistor Values".
+======================= =======================================================
in1_input 12v input voltage (mV)
in2_input 5v input voltage (mV)
in3_input 3v input voltage (mV)
@@ -80,6 +87,7 @@ power1_input 12v power usage (mW)
power2_input 5v power usage (mW)
power3_input 3v power usage (mW)
power4_input Vee (-12v) power usage (mW)
+======================= =======================================================
Note 1
@@ -96,6 +104,7 @@ slowly, -EAGAIN will be returned when you read the sysfs attribute containing
the sensor reading.
The LTC4245 chip can be configured to sample all GPIO pins with two methods:
+
1) platform data -- see include/linux/platform_data/ltc4245.h
2) OF device tree -- add the "ltc4245,use-extra-gpios" property to each chip
diff --git a/Documentation/hwmon/ltc4260 b/Documentation/hwmon/ltc4260.rst
index c4ff4ad998b2..4c335b6a51d1 100644
--- a/Documentation/hwmon/ltc4260
+++ b/Documentation/hwmon/ltc4260.rst
@@ -2,11 +2,16 @@ Kernel driver ltc4260
=====================
Supported chips:
+
* Linear Technology LTC4260
+
Prefix: 'ltc4260'
+
Addresses scanned: -
+
Datasheet:
- http://cds.linear.com/docs/en/datasheet/4260fc.pdf
+
+ http://cds.linear.com/docs/en/datasheet/4260fc.pdf
Author: Guenter Roeck <linux@roeck-us.net>
@@ -26,9 +31,10 @@ which can be safely used to identify the chip. You will have to instantiate
the devices explicitly.
Example: the following will load the driver for an LTC4260 at address 0x10
-on I2C bus #1:
-$ modprobe ltc4260
-$ echo ltc4260 0x10 > /sys/bus/i2c/devices/i2c-1/new_device
+on I2C bus #1::
+
+ $ modprobe ltc4260
+ $ echo ltc4260 0x10 > /sys/bus/i2c/devices/i2c-1/new_device
Sysfs entries
@@ -45,6 +51,7 @@ Current Sense register. The reported value assumes that a 1 mOhm sense resistor
is installed. If a different sense resistor is installed, calculate the real
current by dividing the reported value by the sense resistor value in mOhm.
+======================= =======================
in1_input SOURCE voltage (mV)
in1_min_alarm Undervoltage alarm
in1_max_alarm Overvoltage alarm
@@ -54,3 +61,4 @@ in2_alarm Power bad alarm
curr1_input SENSE current (mA)
curr1_alarm SENSE overcurrent alarm
+======================= =======================
diff --git a/Documentation/hwmon/ltc4261 b/Documentation/hwmon/ltc4261.rst
index 9378a75c6134..c80233f8082e 100644
--- a/Documentation/hwmon/ltc4261
+++ b/Documentation/hwmon/ltc4261.rst
@@ -2,11 +2,16 @@ Kernel driver ltc4261
=====================
Supported chips:
+
* Linear Technology LTC4261
+
Prefix: 'ltc4261'
+
Addresses scanned: -
+
Datasheet:
- http://cds.linear.com/docs/Datasheet/42612fb.pdf
+
+ http://cds.linear.com/docs/Datasheet/42612fb.pdf
Author: Guenter Roeck <linux@roeck-us.net>
@@ -26,9 +31,10 @@ which can be safely used to identify the chip. You will have to instantiate
the devices explicitly.
Example: the following will load the driver for an LTC4261 at address 0x10
-on I2C bus #1:
-$ modprobe ltc4261
-$ echo ltc4261 0x10 > /sys/bus/i2c/devices/i2c-1/new_device
+on I2C bus #1::
+
+ $ modprobe ltc4261
+ $ echo ltc4261 0x10 > /sys/bus/i2c/devices/i2c-1/new_device
Sysfs entries
@@ -51,6 +57,7 @@ the proximity of the ADIN2 pin to the OV pin. ADIN2 is, however, not available
on all chip variants. To ensure that the alarm condition is reported to the user,
report it with both voltage sensors.
+======================= =============================
in1_input ADIN2 voltage (mV)
in1_min_alarm ADIN/ADIN2 Undervoltage alarm
in1_max_alarm ADIN/ADIN2 Overvoltage alarm
@@ -61,3 +68,4 @@ in2_max_alarm ADIN/ADIN2 Overvoltage alarm
curr1_input SENSE current (mA)
curr1_alarm SENSE overcurrent alarm
+======================= =============================
diff --git a/Documentation/hwmon/max16064 b/Documentation/hwmon/max16064.rst
index 265370f5cb82..6d5e9538991f 100644
--- a/Documentation/hwmon/max16064
+++ b/Documentation/hwmon/max16064.rst
@@ -2,9 +2,13 @@ Kernel driver max16064
======================
Supported chips:
+
* Maxim MAX16064
+
Prefix: 'max16064'
+
Addresses scanned: -
+
Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX16064.pdf
Author: Guenter Roeck <linux@roeck-us.net>
@@ -17,7 +21,7 @@ This driver supports hardware monitoring for Maxim MAX16064 Quad Power-Supply
Controller with Active-Voltage Output Control and PMBus Interface.
The driver is a client driver to the core PMBus driver.
-Please see Documentation/hwmon/pmbus for details on PMBus client drivers.
+Please see Documentation/hwmon/pmbus.rst for details on PMBus client drivers.
Usage Notes
@@ -40,16 +44,20 @@ Sysfs entries
The following attributes are supported. Limits are read-write; all other
attributes are read-only.
+======================= ========================================================
in[1-4]_label "vout[1-4]"
in[1-4]_input Measured voltage. From READ_VOUT register.
in[1-4]_min Minimum Voltage. From VOUT_UV_WARN_LIMIT register.
in[1-4]_max Maximum voltage. From VOUT_OV_WARN_LIMIT register.
in[1-4]_lcrit Critical minimum Voltage. VOUT_UV_FAULT_LIMIT register.
-in[1-4]_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register.
+in[1-4]_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT
+ register.
in[1-4]_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status.
in[1-4]_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status.
-in[1-4]_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT status.
-in[1-4]_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT status.
+in[1-4]_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT
+ status.
+in[1-4]_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT
+ status.
in[1-4]_highest Historical maximum voltage.
in[1-4]_reset_history Write any value to reset history.
@@ -64,3 +72,4 @@ temp1_crit_alarm Chip temperature critical high alarm. Set by comparing
status is set.
temp1_highest Historical maximum temperature.
temp1_reset_history Write any value to reset history.
+======================= ========================================================
diff --git a/Documentation/hwmon/max16065 b/Documentation/hwmon/max16065.rst
index 208a29e43010..fa5c852a178c 100644
--- a/Documentation/hwmon/max16065
+++ b/Documentation/hwmon/max16065.rst
@@ -1,28 +1,48 @@
Kernel driver max16065
======================
+
Supported chips:
+
* Maxim MAX16065, MAX16066
+
Prefixes: 'max16065', 'max16066'
+
Addresses scanned: -
+
Datasheet:
+
http://datasheets.maxim-ic.com/en/ds/MAX16065-MAX16066.pdf
+
* Maxim MAX16067
+
Prefix: 'max16067'
+
Addresses scanned: -
+
Datasheet:
+
http://datasheets.maxim-ic.com/en/ds/MAX16067.pdf
+
* Maxim MAX16068
+
Prefix: 'max16068'
+
Addresses scanned: -
+
Datasheet:
+
http://datasheets.maxim-ic.com/en/ds/MAX16068.pdf
+
* Maxim MAX16070/MAX16071
+
Prefixes: 'max16070', 'max16071'
+
Addresses scanned: -
+
Datasheet:
- http://datasheets.maxim-ic.com/en/ds/MAX16070-MAX16071.pdf
+ http://datasheets.maxim-ic.com/en/ds/MAX16070-MAX16071.pdf
Author: Guenter Roeck <linux@roeck-us.net>
@@ -73,6 +93,7 @@ turn into a brick.
Sysfs entries
-------------
+======================= ========================================================
in[0-11]_input Input voltage measurements.
in12_input Voltage on CSP (Current Sense Positive) pin.
@@ -103,3 +124,4 @@ curr1_input Current sense input; only if the chip supports current
curr1_alarm Overcurrent alarm; only if the chip supports current
sensing and if current sensing is enabled.
+======================= ========================================================
diff --git a/Documentation/hwmon/max1619 b/Documentation/hwmon/max1619.rst
index 518bae3a80c4..e25956e70f73 100644
--- a/Documentation/hwmon/max1619
+++ b/Documentation/hwmon/max1619.rst
@@ -2,15 +2,20 @@ Kernel driver max1619
=====================
Supported chips:
+
* Maxim MAX1619
+
Prefix: 'max1619'
+
Addresses scanned: I2C 0x18-0x1a, 0x29-0x2b, 0x4c-0x4e
+
Datasheet: Publicly available at the Maxim website
- http://pdfserv.maxim-ic.com/en/ds/MAX1619.pdf
+
+ http://pdfserv.maxim-ic.com/en/ds/MAX1619.pdf
Authors:
- Oleksij Rempel <bug-track@fisher-privat.net>,
- Jean Delvare <jdelvare@suse.de>
+ - Oleksij Rempel <bug-track@fisher-privat.net>,
+ - Jean Delvare <jdelvare@suse.de>
Description
-----------
@@ -26,4 +31,3 @@ Only the external sensor has high and low limits.
The max1619 driver will not update its values more frequently than every
other second; reading them more often will do no harm, but will return
'old' values.
-
diff --git a/Documentation/hwmon/max1668 b/Documentation/hwmon/max1668.rst
index 8f9d570dbfec..417f17d750e6 100644
--- a/Documentation/hwmon/max1668
+++ b/Documentation/hwmon/max1668.rst
@@ -2,12 +2,17 @@ Kernel driver max1668
=====================
Supported chips:
+
* Maxim MAX1668, MAX1805 and MAX1989
+
Prefix: 'max1668'
+
Addresses scanned: I2C 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e
+
Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX1668-MAX1989.pdf
Author:
+
David George <david.george@ska.ac.za>
Description
@@ -23,8 +28,9 @@ two ICs.
The driver is able to distinguish between the devices and creates sysfs
entries as follows:
-MAX1805, MAX1668 and MAX1989:
+- MAX1805, MAX1668 and MAX1989:
+=============== == ============================================================
temp1_input ro local (ambient) temperature
temp1_max rw local temperature maximum threshold for alarm
temp1_max_alarm ro local temperature maximum threshold alarm
@@ -40,8 +46,11 @@ temp3_max rw remote temperature 2 maximum threshold for alarm
temp3_max_alarm ro remote temperature 2 maximum threshold alarm
temp3_min rw remote temperature 2 minimum threshold for alarm
temp3_min_alarm ro remote temperature 2 minimum threshold alarm
+=============== == ============================================================
+
+- MAX1668 and MAX1989 only:
-MAX1668 and MAX1989 only:
+=============== == ============================================================
temp4_input ro remote temperature 3
temp4_max rw remote temperature 3 maximum threshold for alarm
temp4_max_alarm ro remote temperature 3 maximum threshold alarm
@@ -52,6 +61,7 @@ temp5_max rw remote temperature 4 maximum threshold for alarm
temp5_max_alarm ro remote temperature 4 maximum threshold alarm
temp5_min rw remote temperature 4 minimum threshold for alarm
temp5_min_alarm ro remote temperature 4 minimum threshold alarm
+=============== == ============================================================
Module Parameters
-----------------
diff --git a/Documentation/hwmon/max197 b/Documentation/hwmon/max197.rst
index 8d89b9009df8..02fe19bc3428 100644
--- a/Documentation/hwmon/max197
+++ b/Documentation/hwmon/max197.rst
@@ -1,16 +1,22 @@
-Maxim MAX197 driver
-===================
+Kernel driver max197
+====================
Author:
+
* Vivien Didelot <vivien.didelot@savoirfairelinux.com>
Supported chips:
+
* Maxim MAX197
+
Prefix: 'max197'
+
Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX197.pdf
* Maxim MAX199
+
Prefix: 'max199'
+
Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX199.pdf
Description
@@ -26,7 +32,7 @@ Platform data
-------------
The MAX197 platform data (defined in linux/platform_data/max197.h) should be
-filled with a pointer to a conversion function, defined like:
+filled with a pointer to a conversion function, defined like::
int convert(u8 ctrl);
@@ -36,25 +42,29 @@ or a negative error code otherwise.
Control byte format:
+======= ========== ============================================
Bit Name Description
7,6 PD1,PD0 Clock and Power-Down modes
5 ACQMOD Internal or External Controlled Acquisition
4 RNG Full-scale voltage magnitude at the input
3 BIP Unipolar or Bipolar conversion mode
2,1,0 A2,A1,A0 Channel
+======= ========== ============================================
Sysfs interface
---------------
-* in[0-7]_input: The conversion value for the corresponding channel.
- RO
+ ============== ==============================================================
+ in[0-7]_input The conversion value for the corresponding channel.
+ RO
-* in[0-7]_min: The lower limit (in mV) for the corresponding channel.
- For the MAX197, it will be adjusted to -10000, -5000, or 0.
- For the MAX199, it will be adjusted to -4000, -2000, or 0.
- RW
+ in[0-7]_min The lower limit (in mV) for the corresponding channel.
+ For the MAX197, it will be adjusted to -10000, -5000, or 0.
+ For the MAX199, it will be adjusted to -4000, -2000, or 0.
+ RW
-* in[0-7]_max: The higher limit (in mV) for the corresponding channel.
- For the MAX197, it will be adjusted to 0, 5000, or 10000.
- For the MAX199, it will be adjusted to 0, 2000, or 4000.
- RW
+ in[0-7]_max The higher limit (in mV) for the corresponding channel.
+ For the MAX197, it will be adjusted to 0, 5000, or 10000.
+ For the MAX199, it will be adjusted to 0, 2000, or 4000.
+ RW
+ ============== ==============================================================
diff --git a/Documentation/hwmon/max20751 b/Documentation/hwmon/max20751.rst
index f9fa25ebb521..aa4469be6674 100644
--- a/Documentation/hwmon/max20751
+++ b/Documentation/hwmon/max20751.rst
@@ -2,10 +2,15 @@ Kernel driver max20751
======================
Supported chips:
+
* maxim MAX20751
+
Prefix: 'max20751'
+
Addresses scanned: -
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX20751.pdf
+
Application note: http://pdfserv.maximintegrated.com/en/an/AN5941.pdf
Author: Guenter Roeck <linux@roeck-us.net>
@@ -18,7 +23,7 @@ This driver supports MAX20751 Multiphase Master with PMBus Interface
and Internal Buck Converter.
The driver is a client driver to the core PMBus driver.
-Please see Documentation/hwmon/pmbus for details on PMBus client drivers.
+Please see Documentation/hwmon/pmbus.rst for details on PMBus client drivers.
Usage Notes
@@ -40,6 +45,7 @@ Sysfs entries
The following attributes are supported.
+======================= =======================================================
in1_label "vin1"
in1_input Measured voltage.
in1_min Minimum input voltage.
@@ -75,3 +81,4 @@ temp1_crit_alarm Chip temperature critical high alarm.
power1_input Output power.
power1_label "pout1"
+======================= =======================================================
diff --git a/Documentation/hwmon/max31722 b/Documentation/hwmon/max31722.rst
index 090da84538c8..0ab15c00b226 100644
--- a/Documentation/hwmon/max31722
+++ b/Documentation/hwmon/max31722.rst
@@ -2,15 +2,25 @@ Kernel driver max31722
======================
Supported chips:
+
* Maxim Integrated MAX31722
+
Prefix: 'max31722'
+
ACPI ID: MAX31722
+
Addresses scanned: -
+
Datasheet: https://datasheets.maximintegrated.com/en/ds/MAX31722-MAX31723.pdf
+
* Maxim Integrated MAX31723
+
Prefix: 'max31723'
+
ACPI ID: MAX31723
+
Addresses scanned: -
+
Datasheet: https://datasheets.maximintegrated.com/en/ds/MAX31722-MAX31723.pdf
Author: Tiberiu Breana <tiberiu.a.breana@intel.com>
@@ -31,4 +41,6 @@ Sysfs entries
The following attribute is supported:
+======================= =======================================================
temp1_input Measured temperature. Read-only.
+======================= =======================================================
diff --git a/Documentation/hwmon/max31785 b/Documentation/hwmon/max31785.rst
index 270c5f865261..c8c6756d0ee1 100644
--- a/Documentation/hwmon/max31785
+++ b/Documentation/hwmon/max31785.rst
@@ -2,9 +2,13 @@ Kernel driver max31785
======================
Supported chips:
+
* Maxim MAX31785, MAX31785A
+
Prefix: 'max31785' or 'max31785a'
+
Addresses scanned: -
+
Datasheet: https://datasheets.maximintegrated.com/en/ds/MAX31785.pdf
Author: Andrew Jeffery <andrew@aj.id.au>
@@ -30,6 +34,7 @@ devices explicitly.
Sysfs attributes
----------------
+======================= =======================================================
fan[1-4]_alarm Fan alarm.
fan[1-4]_fault Fan fault.
fan[1-8]_input Fan RPM. On the MAX31785A, inputs 5-8 correspond to the
@@ -58,3 +63,4 @@ temp[1-11]_crit_alarm Chip temperature critical high alarm
temp[1-11]_input Measured temperature
temp[1-11]_max Maximum temperature
temp[1-11]_max_alarm Chip temperature high alarm
+======================= =======================================================
diff --git a/Documentation/hwmon/max31790 b/Documentation/hwmon/max31790.rst
index 855e62430da9..84c62a12ef3a 100644
--- a/Documentation/hwmon/max31790
+++ b/Documentation/hwmon/max31790.rst
@@ -2,9 +2,13 @@ Kernel driver max31790
======================
Supported chips:
+
* Maxim MAX31790
+
Prefix: 'max31790'
+
Addresses scanned: -
+
Datasheet: http://pdfserv.maximintegrated.com/en/ds/MAX31790.pdf
Author: Il Han <corone.il.han@gmail.com>
@@ -30,8 +34,10 @@ also be configured to serve as tachometer inputs.
Sysfs entries
-------------
+================== === =======================================================
fan[1-12]_input RO fan tachometer speed in RPM
fan[1-12]_fault RO fan experienced fault
fan[1-6]_target RW desired fan speed in RPM
pwm[1-6]_enable RW regulator mode, 0=disabled, 1=manual mode, 2=rpm mode
pwm[1-6] RW fan target duty cycle (0-255)
+================== === =======================================================
diff --git a/Documentation/hwmon/max34440 b/Documentation/hwmon/max34440.rst
index b2de8fa49273..939138e12b02 100644
--- a/Documentation/hwmon/max34440
+++ b/Documentation/hwmon/max34440.rst
@@ -2,34 +2,63 @@ Kernel driver max34440
======================
Supported chips:
+
* Maxim MAX34440
+
Prefixes: 'max34440'
+
Addresses scanned: -
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34440.pdf
+
* Maxim MAX34441
+
PMBus 5-Channel Power-Supply Manager and Intelligent Fan Controller
+
Prefixes: 'max34441'
+
Addresses scanned: -
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34441.pdf
+
* Maxim MAX34446
+
PMBus Power-Supply Data Logger
+
Prefixes: 'max34446'
+
Addresses scanned: -
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34446.pdf
+
* Maxim MAX34451
+
PMBus 16-Channel V/I Monitor and 12-Channel Sequencer/Marginer
+
Prefixes: 'max34451'
+
Addresses scanned: -
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34451.pdf
+
* Maxim MAX34460
+
PMBus 12-Channel Voltage Monitor & Sequencer
+
Prefix: 'max34460'
+
Addresses scanned: -
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34460.pdf
+
* Maxim MAX34461
+
PMBus 16-Channel Voltage Monitor & Sequencer
+
Prefix: 'max34461'
+
Addresses scanned: -
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34461.pdf
Author: Guenter Roeck <linux@roeck-us.net>
@@ -47,7 +76,7 @@ based on GIN pins. The MAX34460 supports 12 voltage channels, and the MAX34461
supports 16 voltage channels.
The driver is a client driver to the core PMBus driver. Please see
-Documentation/hwmon/pmbus for details on PMBus client drivers.
+Documentation/hwmon/pmbus.rst for details on PMBus client drivers.
Usage Notes
@@ -77,42 +106,67 @@ Sysfs entries
The following attributes are supported. Limits are read-write; all other
attributes are read-only.
+In
+~~
+
+======================= =======================================================
in[1-6]_label "vout[1-6]".
in[1-6]_input Measured voltage. From READ_VOUT register.
in[1-6]_min Minimum Voltage. From VOUT_UV_WARN_LIMIT register.
in[1-6]_max Maximum voltage. From VOUT_OV_WARN_LIMIT register.
in[1-6]_lcrit Critical minimum Voltage. VOUT_UV_FAULT_LIMIT register.
-in[1-6]_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register.
+in[1-6]_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT
+ register.
in[1-6]_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status.
in[1-6]_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status.
-in[1-6]_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT status.
-in[1-6]_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT status.
+in[1-6]_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT
+ status.
+in[1-6]_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT
+ status.
in[1-6]_lowest Historical minimum voltage.
in[1-6]_highest Historical maximum voltage.
in[1-6]_reset_history Write any value to reset history.
+======================= =======================================================
+
+.. note:: MAX34446 only supports in[1-4].
- MAX34446 only supports in[1-4].
+Curr
+~~~~
+======================= ========================================================
curr[1-6]_label "iout[1-6]".
curr[1-6]_input Measured current. From READ_IOUT register.
curr[1-6]_max Maximum current. From IOUT_OC_WARN_LIMIT register.
-curr[1-6]_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT register.
+curr[1-6]_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT
+ register.
curr[1-6]_max_alarm Current high alarm. From IOUT_OC_WARNING status.
curr[1-6]_crit_alarm Current critical high alarm. From IOUT_OC_FAULT status.
curr[1-4]_average Historical average current (MAX34446/34451 only).
curr[1-6]_highest Historical maximum current.
curr[1-6]_reset_history Write any value to reset history.
+======================= ========================================================
+
+.. note::
+
+ - in6 and curr6 attributes only exist for MAX34440.
+ - MAX34446 only supports curr[1-4].
- in6 and curr6 attributes only exist for MAX34440.
- MAX34446 only supports curr[1-4].
+Power
+~~~~~
+======================= ========================================================
power[1,3]_label "pout[1,3]"
power[1,3]_input Measured power.
power[1,3]_average Historical average power.
power[1,3]_highest Historical maximum power.
+======================= ========================================================
- Power attributes only exist for MAX34446.
+.. note:: Power attributes only exist for MAX34446.
+Temp
+~~~~
+
+======================= ========================================================
temp[1-8]_input Measured temperatures. From READ_TEMPERATURE_1 register.
temp1 is the chip's internal temperature. temp2..temp5
are remote I2C temperature sensors. For MAX34441, temp6
@@ -125,11 +179,17 @@ temp[1-8]_crit_alarm Temperature critical high alarm.
temp[1-8]_average Historical average temperature (MAX34446 only).
temp[1-8]_highest Historical maximum temperature.
temp[1-8]_reset_history Write any value to reset history.
+======================= ========================================================
+
+
+.. note::
+ - temp7 and temp8 attributes only exist for MAX34440.
+ - MAX34446 only supports temp[1-3].
+
- temp7 and temp8 attributes only exist for MAX34440.
- MAX34446 only supports temp[1-3].
+.. note::
-MAX34451 supports attribute groups in[1-16] (or curr[1-16] based on input pins)
-and temp[1-5].
-MAX34460 supports attribute groups in[1-12] and temp[1-5].
-MAX34461 supports attribute groups in[1-16] and temp[1-5].
+ - MAX34451 supports attribute groups in[1-16] (or curr[1-16] based on
+ input pins) and temp[1-5].
+ - MAX34460 supports attribute groups in[1-12] and temp[1-5].
+ - MAX34461 supports attribute groups in[1-16] and temp[1-5].
diff --git a/Documentation/hwmon/max6639 b/Documentation/hwmon/max6639.rst
index dc49f8be7167..3da54225f83c 100644
--- a/Documentation/hwmon/max6639
+++ b/Documentation/hwmon/max6639.rst
@@ -2,14 +2,18 @@ Kernel driver max6639
=====================
Supported chips:
+
* Maxim MAX6639
+
Prefix: 'max6639'
+
Addresses scanned: I2C 0x2c, 0x2e, 0x2f
+
Datasheet: http://pdfserv.maxim-ic.com/en/ds/MAX6639.pdf
Authors:
- He Changqing <hechangqing@semptian.com>
- Roland Stigge <stigge@antcom.de>
+ - He Changqing <hechangqing@semptian.com>
+ - Roland Stigge <stigge@antcom.de>
Description
-----------
@@ -21,19 +25,20 @@ diode-connected transistors.
The following device attributes are implemented via sysfs:
+====================== ==== ===================================================
Attribute R/W Contents
-----------------------------------------------------------------------------
+====================== ==== ===================================================
temp1_input R Temperature channel 1 input (0..150 C)
temp2_input R Temperature channel 2 input (0..150 C)
temp1_fault R Temperature channel 1 diode fault
temp2_fault R Temperature channel 2 diode fault
temp1_max RW Set THERM temperature for input 1
- (in C, see datasheet)
+ (in C, see datasheet)
temp2_max RW Set THERM temperature for input 2
temp1_crit RW Set ALERT temperature for input 1
temp2_crit RW Set ALERT temperature for input 2
temp1_emergency RW Set OT temperature for input 1
- (in C, see datasheet)
+ (in C, see datasheet)
temp2_emergency RW Set OT temperature for input 2
pwm1 RW Fan 1 target duty cycle (0..255)
pwm2 RW Fan 2 target duty cycle (0..255)
@@ -47,3 +52,4 @@ temp1_crit_alarm R Alarm on ALERT temperature on channel 1
temp2_crit_alarm R Alarm on ALERT temperature on channel 2
temp1_emergency_alarm R Alarm on OT temperature on channel 1
temp2_emergency_alarm R Alarm on OT temperature on channel 2
+====================== ==== ===================================================
diff --git a/Documentation/hwmon/max6642 b/Documentation/hwmon/max6642.rst
index afbd3e4942e2..7e5b7d4f9492 100644
--- a/Documentation/hwmon/max6642
+++ b/Documentation/hwmon/max6642.rst
@@ -2,14 +2,20 @@ Kernel driver max6642
=====================
Supported chips:
+
* Maxim MAX6642
+
Prefix: 'max6642'
+
Addresses scanned: I2C 0x48-0x4f
+
Datasheet: Publicly available at the Maxim website
- http://datasheets.maxim-ic.com/en/ds/MAX6642.pdf
+
+ http://datasheets.maxim-ic.com/en/ds/MAX6642.pdf
Authors:
- Per Dalen <per.dalen@appeartv.com>
+
+ Per Dalen <per.dalen@appeartv.com>
Description
-----------
diff --git a/Documentation/hwmon/max6650 b/Documentation/hwmon/max6650.rst
index dff1d296a48b..253482add082 100644
--- a/Documentation/hwmon/max6650
+++ b/Documentation/hwmon/max6650.rst
@@ -2,19 +2,27 @@ Kernel driver max6650
=====================
Supported chips:
+
* Maxim MAX6650
+
Prefix: 'max6650'
+
Addresses scanned: none
+
Datasheet: http://pdfserv.maxim-ic.com/en/ds/MAX6650-MAX6651.pdf
+
* Maxim MAX6651
+
Prefix: 'max6651'
+
Addresses scanned: none
+
Datasheet: http://pdfserv.maxim-ic.com/en/ds/MAX6650-MAX6651.pdf
Authors:
- Hans J. Koch <hjk@hansjkoch.de>
- John Morris <john.morris@spirentcom.com>
- Claus Gindhart <claus.gindhart@kontron.com>
+ - Hans J. Koch <hjk@hansjkoch.de>
+ - John Morris <john.morris@spirentcom.com>
+ - Claus Gindhart <claus.gindhart@kontron.com>
Description
-----------
@@ -28,6 +36,7 @@ The driver is not able to distinguish between the 2 devices.
The driver provides the following sensor accesses in sysfs:
+=============== ======= =======================================================
fan1_input ro fan tachometer speed in RPM
fan2_input ro "
fan3_input ro "
@@ -40,6 +49,7 @@ pwm1 rw relative speed (0-255), 255=max. speed.
fan1_div rw sets the speed range the inputs can handle. Legal
values are 1, 2, 4, and 8. Use lower values for
faster fans.
+=============== ======= =======================================================
Usage notes
-----------
@@ -62,4 +72,3 @@ clock: The clock frequency in Hz of the chip the driver should assume [254000]
Please have a look at the MAX6650/6651 data sheet and make sure that you fully
understand the meaning of these parameters before you attempt to change them.
-
diff --git a/Documentation/hwmon/max6697 b/Documentation/hwmon/max6697.rst
index 6594177ededa..ffc5a7d8d33b 100644
--- a/Documentation/hwmon/max6697
+++ b/Documentation/hwmon/max6697.rst
@@ -2,38 +2,69 @@ Kernel driver max6697
=====================
Supported chips:
+
* Maxim MAX6581
+
Prefix: 'max6581'
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6581.pdf
+
* Maxim MAX6602
+
Prefix: 'max6602'
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6602.pdf
+
* Maxim MAX6622
+
Prefix: 'max6622'
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6622.pdf
+
* Maxim MAX6636
+
Prefix: 'max6636'
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6636.pdf
+
* Maxim MAX6689
+
Prefix: 'max6689'
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6689.pdf
+
* Maxim MAX6693
+
Prefix: 'max6693'
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6693.pdf
+
* Maxim MAX6694
+
Prefix: 'max6694'
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6694.pdf
+
* Maxim MAX6697
+
Prefix: 'max6697'
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6697.pdf
+
* Maxim MAX6698
+
Prefix: 'max6698'
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6698.pdf
+
* Maxim MAX6699
+
Prefix: 'max6699'
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6699.pdf
Author:
+
Guenter Roeck <linux@roeck-us.net>
Description
@@ -50,9 +81,11 @@ The driver provides the following sysfs attributes. temp1 is the local (chip)
temperature, temp[2..n] are remote temperatures. The actually supported
per-channel attributes are chip type and channel dependent.
+================ == ==========================================================
tempX_input RO temperature
tempX_max RW temperature maximum threshold
tempX_max_alarm RO temperature maximum threshold alarm
tempX_crit RW temperature critical threshold
tempX_crit_alarm RO temperature critical threshold alarm
tempX_fault RO temperature diode fault (remote sensors only)
+================ == ==========================================================
diff --git a/Documentation/hwmon/max8688 b/Documentation/hwmon/max8688.rst
index ca233bec7a8a..009487759c61 100644
--- a/Documentation/hwmon/max8688
+++ b/Documentation/hwmon/max8688.rst
@@ -2,9 +2,13 @@ Kernel driver max8688
=====================
Supported chips:
+
* Maxim MAX8688
+
Prefix: 'max8688'
+
Addresses scanned: -
+
Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX8688.pdf
Author: Guenter Roeck <linux@roeck-us.net>
@@ -17,7 +21,7 @@ This driver supports hardware monitoring for Maxim MAX8688 Digital Power-Supply
Controller/Monitor with PMBus Interface.
The driver is a client driver to the core PMBus driver. Please see
-Documentation/hwmon/pmbus for details on PMBus client drivers.
+Documentation/hwmon/pmbus.rst for details on PMBus client drivers.
Usage Notes
@@ -40,23 +44,28 @@ Sysfs entries
The following attributes are supported. Limits are read-write; all other
attributes are read-only.
+======================= ========================================================
in1_label "vout1"
in1_input Measured voltage. From READ_VOUT register.
in1_min Minimum Voltage. From VOUT_UV_WARN_LIMIT register.
in1_max Maximum voltage. From VOUT_OV_WARN_LIMIT register.
in1_lcrit Critical minimum Voltage. VOUT_UV_FAULT_LIMIT register.
-in1_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register.
+in1_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT
+ register.
in1_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status.
in1_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status.
-in1_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT status.
-in1_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT status.
+in1_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT
+ status.
+in1_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT
+ status.
in1_highest Historical maximum voltage.
in1_reset_history Write any value to reset history.
curr1_label "iout1"
curr1_input Measured current. From READ_IOUT register.
curr1_max Maximum current. From IOUT_OC_WARN_LIMIT register.
-curr1_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT register.
+curr1_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT
+ register.
curr1_max_alarm Current high alarm. From IOUT_OC_WARN_LIMIT register.
curr1_crit_alarm Current critical high alarm. From IOUT_OC_FAULT status.
curr1_highest Historical maximum current.
@@ -73,3 +82,4 @@ temp1_crit_alarm Chip temperature critical high alarm. Set by comparing
status is set.
temp1_highest Historical maximum temperature.
temp1_reset_history Write any value to reset history.
+======================= ========================================================
diff --git a/Documentation/hwmon/mc13783-adc b/Documentation/hwmon/mc13783-adc.rst
index 05ccc9f159f1..cae70350ba2f 100644
--- a/Documentation/hwmon/mc13783-adc
+++ b/Documentation/hwmon/mc13783-adc.rst
@@ -2,16 +2,25 @@ Kernel driver mc13783-adc
=========================
Supported chips:
+
* Freescale MC13783
+
Prefix: 'mc13783'
+
Datasheet: https://www.nxp.com/docs/en/data-sheet/MC13783.pdf
+
* Freescale MC13892
+
Prefix: 'mc13892'
+
Datasheet: https://www.nxp.com/docs/en/data-sheet/MC13892.pdf
+
+
Authors:
- Sascha Hauer <s.hauer@pengutronix.de>
- Luotao Fu <l.fu@pengutronix.de>
+
+ - Sascha Hauer <s.hauer@pengutronix.de>
+ - Luotao Fu <l.fu@pengutronix.de>
Description
-----------
@@ -30,9 +39,11 @@ the General Purpose inputs and touchscreen.
See the following tables for the meaning of the different channels and their
chip internal scaling:
-MC13783:
+- MC13783:
+
+======= =============================================== =============== =======
Channel Signal Input Range Scaling
--------------------------------------------------------------------------------
+======= =============================================== =============== =======
0 Battery Voltage (BATT) 2.50 - 4.65V -2.40V
1 Battery Current (BATT - BATTISNS) -50 - 50 mV x20
2 Application Supply (BP) 2.50 - 4.65V -2.40V
@@ -52,10 +63,13 @@ Channel Signal Input Range Scaling
13 General Purpose TSX2 / Touchscreen X-plate 2 0 - 2.30V No
14 General Purpose TSY1 / Touchscreen Y-plate 1 0 - 2.30V No
15 General Purpose TSY2 / Touchscreen Y-plate 2 0 - 2.30V No
+======= =============================================== =============== =======
+
+- MC13892:
-MC13892:
+======= =============================================== =============== =======
Channel Signal Input Range Scaling
--------------------------------------------------------------------------------
+======= =============================================== =============== =======
0 Battery Voltage (BATT) 0 - 4.8V /2
1 Battery Current (BATT - BATTISNSCC) -60 - 60 mV x20
2 Application Supply (BPSNS) 0 - 4.8V /2
@@ -72,3 +86,4 @@ Channel Signal Input Range Scaling
13 General Purpose TSX2 / Touchscreen X-plate 2 0 - 2.4V No
14 General Purpose TSY1 / Touchscreen Y-plate 1 0 - 2.4V No
15 General Purpose TSY2 / Touchscreen Y-plate 2 0 - 2.4V No
+======= =============================================== =============== =======
diff --git a/Documentation/hwmon/mcp3021 b/Documentation/hwmon/mcp3021.rst
index 74a6b72adf5f..83f4bda2f269 100644
--- a/Documentation/hwmon/mcp3021
+++ b/Documentation/hwmon/mcp3021.rst
@@ -1,17 +1,26 @@
Kernel driver MCP3021
-======================
+=====================
Supported chips:
+
* Microchip Technology MCP3021
+
Prefix: 'mcp3021'
+
Datasheet: http://ww1.microchip.com/downloads/en/DeviceDoc/21805a.pdf
+
* Microchip Technology MCP3221
+
Prefix: 'mcp3221'
+
Datasheet: http://ww1.microchip.com/downloads/en/DeviceDoc/21732c.pdf
+
+
Authors:
- Mingkai Hu
- Sven Schuchmann <schuchmann@schleissheimer.de>
+
+ - Mingkai Hu
+ - Sven Schuchmann <schuchmann@schleissheimer.de>
Description
-----------
diff --git a/Documentation/hwmon/menf21bmc b/Documentation/hwmon/menf21bmc.rst
index 2a273a065c5e..1f0c6b2235ab 100644
--- a/Documentation/hwmon/menf21bmc
+++ b/Documentation/hwmon/menf21bmc.rst
@@ -2,8 +2,11 @@ Kernel driver menf21bmc_hwmon
=============================
Supported chips:
+
* MEN 14F021P00
+
Prefix: 'menf21bmc_hwmon'
+
Adresses scanned: -
Author: Andreas Werner <andreas.werner@men.de>
@@ -34,6 +37,7 @@ Sysfs entries
The following attributes are supported. All attributes are read only
The Limits are read once by the driver.
+=============== ==========================
in0_input +3.3V input voltage
in1_input +5.0V input voltage
in2_input +12.0V input voltage
@@ -48,3 +52,4 @@ in1_label "MON_5V"
in2_label "MON_12V"
in3_label "5V_STANDBY"
in4_label "VBAT"
+=============== ==========================
diff --git a/Documentation/hwmon/mlxreg-fan b/Documentation/hwmon/mlxreg-fan.rst
index fc531c6978d4..c92b8e885f7e 100644
--- a/Documentation/hwmon/mlxreg-fan
+++ b/Documentation/hwmon/mlxreg-fan.rst
@@ -2,32 +2,38 @@ Kernel driver mlxreg-fan
========================
Provides FAN control for the next Mellanox systems:
-QMB700, equipped with 40x200GbE InfiniBand ports;
-MSN3700, equipped with 32x200GbE or 16x400GbE Ethernet ports;
-MSN3410, equipped with 6x400GbE plus 48x50GbE Ethernet ports;
-MSN3800, equipped with 64x1000GbE Ethernet ports;
+
+- QMB700, equipped with 40x200GbE InfiniBand ports;
+- MSN3700, equipped with 32x200GbE or 16x400GbE Ethernet ports;
+- MSN3410, equipped with 6x400GbE plus 48x50GbE Ethernet ports;
+- MSN3800, equipped with 64x1000GbE Ethernet ports;
+
+Author: Vadim Pasternak <vadimp@mellanox.com>
+
These are the Top of the Rack systems, equipped with Mellanox switch
board with Mellanox Quantum or Spectrume-2 devices.
FAN controller is implemented by the programmable device logic.
The default registers offsets set within the programmable device is as
following:
-- pwm1 0xe3
-- fan1 (tacho1) 0xe4
-- fan2 (tacho2) 0xe5
-- fan3 (tacho3) 0xe6
-- fan4 (tacho4) 0xe7
-- fan5 (tacho5) 0xe8
-- fan6 (tacho6) 0xe9
-- fan7 (tacho7) 0xea
-- fan8 (tacho8) 0xeb
-- fan9 (tacho9) 0xec
-- fan10 (tacho10) 0xed
-- fan11 (tacho11) 0xee
-- fan12 (tacho12) 0xef
-This setup can be re-programmed with other registers.
-Author: Vadim Pasternak <vadimp@mellanox.com>
+======================= ====
+pwm1 0xe3
+fan1 (tacho1) 0xe4
+fan2 (tacho2) 0xe5
+fan3 (tacho3) 0xe6
+fan4 (tacho4) 0xe7
+fan5 (tacho5) 0xe8
+fan6 (tacho6) 0xe9
+fan7 (tacho7) 0xea
+fan8 (tacho8) 0xeb
+fan9 (tacho9) 0xec
+fan10 (tacho10) 0xed
+fan11 (tacho11) 0xee
+fan12 (tacho12) 0xef
+======================= ====
+
+This setup can be re-programmed with other registers.
Description
-----------
@@ -48,13 +54,17 @@ thermal's sysfs interfaces.
/sys files in hwmon subsystem
-----------------------------
-fan[1-12]_fault - RO files for tachometers TACH1-TACH12 fault indication
-fan[1-12]_input - RO files for tachometers TACH1-TACH12 input (in RPM)
-pwm1 - RW file for fan[1-12] target duty cycle (0..255)
+================= == ===================================================
+fan[1-12]_fault RO files for tachometers TACH1-TACH12 fault indication
+fan[1-12]_input RO files for tachometers TACH1-TACH12 input (in RPM)
+pwm1 RW file for fan[1-12] target duty cycle (0..255)
+================= == ===================================================
/sys files in thermal subsystem
-------------------------------
-cur_state - RW file for current cooling state of the cooling device
- (0..max_state)
-max_state - RO file for maximum cooling state of the cooling device
+================= == ====================================================
+cur_state RW file for current cooling state of the cooling device
+ (0..max_state)
+max_state RO file for maximum cooling state of the cooling device
+================= == ====================================================
diff --git a/Documentation/hwmon/nct6683 b/Documentation/hwmon/nct6683.rst
index c1301d4300cd..efbf7e9703ec 100644
--- a/Documentation/hwmon/nct6683
+++ b/Documentation/hwmon/nct6683.rst
@@ -2,13 +2,18 @@ Kernel driver nct6683
=====================
Supported chips:
+
* Nuvoton NCT6683D
+
Prefix: 'nct6683'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: Available from Nuvoton upon request
Authors:
- Guenter Roeck <linux@roeck-us.net>
+
+ Guenter Roeck <linux@roeck-us.net>
Description
-----------
@@ -50,8 +55,10 @@ Tested Boards and Firmware Versions
The driver has been reported to work with the following boards and
firmware versions.
+=============== ===============================================
Board Firmware version
----------------------------------------------------------------
+=============== ===============================================
Intel DH87RL NCT6683D EC firmware version 1.0 build 04/03/13
Intel DH87MC NCT6683D EC firmware version 1.0 build 04/03/13
Intel DB85FL NCT6683D EC firmware version 1.0 build 04/03/13
+=============== ===============================================
diff --git a/Documentation/hwmon/nct6775 b/Documentation/hwmon/nct6775.rst
index bd59834d310f..1d0315c40952 100644
--- a/Documentation/hwmon/nct6775
+++ b/Documentation/hwmon/nct6775.rst
@@ -1,52 +1,90 @@
-Note
-====
-
-This driver supersedes the NCT6775F and NCT6776F support in the W83627EHF
-driver.
-
Kernel driver NCT6775
=====================
+.. note::
+
+ This driver supersedes the NCT6775F and NCT6776F support in the W83627EHF
+ driver.
+
Supported chips:
+
* Nuvoton NCT6102D/NCT6104D/NCT6106D
+
Prefix: 'nct6106'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: Available from the Nuvoton web site
+
* Nuvoton NCT5572D/NCT6771F/NCT6772F/NCT6775F/W83677HG-I
+
Prefix: 'nct6775'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: Available from Nuvoton upon request
+
* Nuvoton NCT5573D/NCT5577D/NCT6776D/NCT6776F
+
Prefix: 'nct6776'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: Available from Nuvoton upon request
+
* Nuvoton NCT5532D/NCT6779D
+
Prefix: 'nct6779'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: Available from Nuvoton upon request
+
* Nuvoton NCT6791D
+
Prefix: 'nct6791'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: Available from Nuvoton upon request
+
* Nuvoton NCT6792D
+
Prefix: 'nct6792'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: Available from Nuvoton upon request
+
* Nuvoton NCT6793D
+
Prefix: 'nct6793'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: Available from Nuvoton upon request
+
* Nuvoton NCT6795D
+
Prefix: 'nct6795'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: Available from Nuvoton upon request
+
* Nuvoton NCT6796D
+
Prefix: 'nct6796'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: Available from Nuvoton upon request
+
+
Authors:
- Guenter Roeck <linux@roeck-us.net>
+
+ Guenter Roeck <linux@roeck-us.net>
Description
-----------
@@ -96,10 +134,14 @@ The mode works for fan1-fan5.
sysfs attributes
----------------
-pwm[1-7] - this file stores PWM duty cycle or DC value (fan speed) in range:
+pwm[1-7]
+ - this file stores PWM duty cycle or DC value (fan speed) in range:
+
0 (lowest speed) to 255 (full)
-pwm[1-7]_enable - this file controls mode of fan/temperature control:
+pwm[1-7]_enable
+ - this file controls mode of fan/temperature control:
+
* 0 Fan control disabled (fans set to maximum speed)
* 1 Manual mode, write to pwm[0-5] any value 0-255
* 2 "Thermal Cruise" mode
@@ -107,15 +149,19 @@ pwm[1-7]_enable - this file controls mode of fan/temperature control:
* 4 "Smart Fan III" mode (NCT6775F only)
* 5 "Smart Fan IV" mode
-pwm[1-7]_mode - controls if output is PWM or DC level
- * 0 DC output
- * 1 PWM output
+pwm[1-7]_mode
+ - controls if output is PWM or DC level
+
+ * 0 DC output
+ * 1 PWM output
Common fan control attributes
-----------------------------
-pwm[1-7]_temp_sel Temperature source. Value is temperature sensor index.
+pwm[1-7]_temp_sel
+ Temperature source. Value is temperature sensor index.
For example, select '1' for temp1_input.
+
pwm[1-7]_weight_temp_sel
Secondary temperature source. Value is temperature
sensor index. For example, select '1' for temp1_input.
@@ -126,13 +172,16 @@ following attributes.
pwm[1-7]_weight_duty_step
Duty step size.
+
pwm[1-7]_weight_temp_step
Temperature step size. With each step over
temp_step_base, the value of weight_duty_step is added
to the current pwm value.
+
pwm[1-7]_weight_temp_step_base
Temperature at which secondary temperature control kicks
in.
+
pwm[1-7]_weight_temp_step_tol
Temperature step tolerance.
@@ -141,24 +190,35 @@ Thermal Cruise mode (2)
If the temperature is in the range defined by:
-pwm[1-7]_target_temp Target temperature, unit millidegree Celsius
+pwm[1-7]_target_temp
+ Target temperature, unit millidegree Celsius
(range 0 - 127000)
+
pwm[1-7]_temp_tolerance
Target temperature tolerance, unit millidegree Celsius
-there are no changes to fan speed. Once the temperature leaves the interval, fan
+There are no changes to fan speed. Once the temperature leaves the interval, fan
speed increases (if temperature is higher that desired) or decreases (if
temperature is lower than desired), using the following limits and time
intervals.
-pwm[1-7]_start fan pwm start value (range 1 - 255), to start fan
+pwm[1-7]_start
+ fan pwm start value (range 1 - 255), to start fan
when the temperature is above defined range.
-pwm[1-7]_floor lowest fan pwm (range 0 - 255) if temperature is below
+
+pwm[1-7]_floor
+ lowest fan pwm (range 0 - 255) if temperature is below
the defined range. If set to 0, the fan is expected to
stop if the temperature is below the defined range.
-pwm[1-7]_step_up_time milliseconds before fan speed is increased
-pwm[1-7]_step_down_time milliseconds before fan speed is decreased
-pwm[1-7]_stop_time how many milliseconds must elapse to switch
+
+pwm[1-7]_step_up_time
+ milliseconds before fan speed is increased
+
+pwm[1-7]_step_down_time
+ milliseconds before fan speed is decreased
+
+pwm[1-7]_stop_time
+ how many milliseconds must elapse to switch
corresponding fan off (when the temperature was below
defined range).
@@ -167,7 +227,9 @@ Speed Cruise mode (3)
This modes tries to keep the fan speed constant.
-fan[1-7]_target Target fan speed
+fan[1-7]_target
+ Target fan speed
+
fan[1-7]_tolerance
Target speed tolerance
@@ -188,16 +250,22 @@ critical temperature mode, in which the fans should run at full speed.
pwm[1-7]_auto_point[1-7]_pwm
pwm value to be set if temperature reaches matching
temperature range.
+
pwm[1-7]_auto_point[1-7]_temp
Temperature over which the matching pwm is enabled.
+
pwm[1-7]_temp_tolerance
Temperature tolerance, unit millidegree Celsius
+
pwm[1-7]_crit_temp_tolerance
Temperature tolerance for critical temperature,
unit millidegree Celsius
-pwm[1-7]_step_up_time milliseconds before fan speed is increased
-pwm[1-7]_step_down_time milliseconds before fan speed is decreased
+pwm[1-7]_step_up_time
+ milliseconds before fan speed is increased
+
+pwm[1-7]_step_down_time
+ milliseconds before fan speed is decreased
Usage Notes
-----------
diff --git a/Documentation/hwmon/nct7802 b/Documentation/hwmon/nct7802.rst
index 5438deb6be02..8b7365a7cb32 100644
--- a/Documentation/hwmon/nct7802
+++ b/Documentation/hwmon/nct7802.rst
@@ -2,13 +2,18 @@ Kernel driver nct7802
=====================
Supported chips:
+
* Nuvoton NCT7802Y
+
Prefix: 'nct7802'
+
Addresses scanned: I2C 0x28..0x2f
+
Datasheet: Available from Nuvoton web site
Authors:
- Guenter Roeck <linux@roeck-us.net>
+
+ Guenter Roeck <linux@roeck-us.net>
Description
-----------
@@ -25,7 +30,9 @@ Tested Boards and BIOS Versions
The driver has been reported to work with the following boards and
BIOS versions.
+======================= ===============================================
Board BIOS version
----------------------------------------------------------------
+======================= ===============================================
Kontron COMe-bSC2 CHR2E934.001.GGO
Kontron COMe-bIP2 CCR2E212
+======================= ===============================================
diff --git a/Documentation/hwmon/nct7904 b/Documentation/hwmon/nct7904.rst
index 57fffe33ebfc..5b2f111582ff 100644
--- a/Documentation/hwmon/nct7904
+++ b/Documentation/hwmon/nct7904.rst
@@ -1,11 +1,16 @@
Kernel driver nct7904
-====================
+=====================
Supported chip:
+
* Nuvoton NCT7904D
+
Prefix: nct7904
+
Addresses: I2C 0x2d, 0x2e
+
Datasheet: Publicly available at Nuvoton website
+
http://www.nuvoton.com/
Author: Vadim V. Vlasov <vvlasov@dev.rtsoft.ru>
@@ -25,6 +30,7 @@ Sysfs entries
Currently, the driver supports only the following features:
+======================= =======================================================
in[1-20]_input Input voltage measurements (mV)
fan[1-12]_input Fan tachometer measurements (rpm)
@@ -40,6 +46,7 @@ pwm[1-4]_enable R/W, 1/2 for manual or SmartFan mode
previously configured by BIOS (or configuration EEPROM)
pwm[1-4] R/O in SmartFan mode, R/W in manual control mode
+======================= =======================================================
The driver checks sensor control registers and does not export the sensors
that are not enabled. Anyway, a sensor that is enabled may actually be not
diff --git a/Documentation/hwmon/npcm750-pwm-fan b/Documentation/hwmon/npcm750-pwm-fan.rst
index 6156ef7398e6..c67af08b6773 100644
--- a/Documentation/hwmon/npcm750-pwm-fan
+++ b/Documentation/hwmon/npcm750-pwm-fan.rst
@@ -2,9 +2,11 @@ Kernel driver npcm750-pwm-fan
=============================
Supported chips:
+
NUVOTON NPCM750/730/715/705
Authors:
+
<tomer.maimon@nuvoton.com>
Description:
@@ -15,8 +17,10 @@ controller supports up to 16 tachometer inputs.
The driver provides the following sensor accesses in sysfs:
+=============== ======= =====================================================
fanX_input ro provide current fan rotation value in RPM as reported
by the fan to the device.
pwmX rw get or set PWM fan control value. This is an integer
value between 0(off) and 255(full speed).
+=============== ======= =====================================================
diff --git a/Documentation/hwmon/nsa320 b/Documentation/hwmon/nsa320.rst
index fdbd6947799b..4fe75fd2f937 100644
--- a/Documentation/hwmon/nsa320
+++ b/Documentation/hwmon/nsa320.rst
@@ -2,14 +2,23 @@ Kernel driver nsa320_hwmon
==========================
Supported chips:
+
* Holtek HT46R065 microcontroller with onboard firmware that configures
+
it to act as a hardware monitor.
+
Prefix: 'nsa320'
+
Addresses scanned: none
+
Datasheet: Not available, driver was reverse engineered based upon the
+
Zyxel kernel source
+
+
Author:
+
Adam Baker <linux@baker-net.org.uk>
Description
@@ -31,8 +40,10 @@ tenths of a degree.
sysfs-Interface
---------------
-temp1_input - temperature input
-fan1_input - fan speed
+============= =================
+temp1_input temperature input
+fan1_input fan speed
+============= =================
Notes
-----
diff --git a/Documentation/hwmon/ntc_thermistor b/Documentation/hwmon/ntc_thermistor.rst
index 8b9ff23edc32..d0e7f91726b9 100644
--- a/Documentation/hwmon/ntc_thermistor
+++ b/Documentation/hwmon/ntc_thermistor.rst
@@ -1,22 +1,29 @@
Kernel driver ntc_thermistor
-=================
+============================
Supported thermistors from Murata:
+
* Murata NTC Thermistors NCP15WB473, NCP18WB473, NCP21WB473, NCP03WB473,
NCP15WL333, NCP03WF104, NCP15XH103
+
Prefixes: 'ncp15wb473', 'ncp18wb473', 'ncp21wb473', 'ncp03wb473',
'ncp15wl333', 'ncp03wf104', 'ncp15xh103'
+
Datasheet: Publicly available at Murata
Supported thermistors from EPCOS:
+
* EPCOS NTC Thermistors B57330V2103
+
Prefixes: b57330v2103
+
Datasheet: Publicly available at EPCOS
Other NTC thermistors can be supported simply by adding compensation
tables; e.g., NCP15WL333 support is added by the table ncpXXwl333.
Authors:
+
MyungJoo Ham <myungjoo.ham@samsung.com>
Description
@@ -29,57 +36,60 @@ compensation table to get the temperature input.
The NTC driver provides lookup tables with a linear approximation function
and four circuit models with an option not to use any of the four models.
+Using the following convention::
+
+ $ resistor
+ [TH] the thermistor
+
The four circuit models provided are:
- $: resister, [TH]: the thermistor
-
- 1. connect = NTC_CONNECTED_POSITIVE, pullup_ohm > 0
-
- [pullup_uV]
- | |
- [TH] $ (pullup_ohm)
- | |
- +----+-----------------------[read_uV]
- |
- $ (pulldown_ohm)
- |
- --- (ground)
-
- 2. connect = NTC_CONNECTED_POSITIVE, pullup_ohm = 0 (not-connected)
-
- [pullup_uV]
- |
- [TH]
- |
- +----------------------------[read_uV]
- |
- $ (pulldown_ohm)
- |
- --- (ground)
-
- 3. connect = NTC_CONNECTED_GROUND, pulldown_ohm > 0
-
- [pullup_uV]
- |
- $ (pullup_ohm)
- |
- +----+-----------------------[read_uV]
- | |
- [TH] $ (pulldown_ohm)
- | |
- -------- (ground)
-
- 4. connect = NTC_CONNECTED_GROUND, pulldown_ohm = 0 (not-connected)
-
- [pullup_uV]
- |
- $ (pullup_ohm)
- |
- +----------------------------[read_uV]
- |
- [TH]
- |
- --- (ground)
+1. connect = NTC_CONNECTED_POSITIVE, pullup_ohm > 0::
+
+ [pullup_uV]
+ | |
+ [TH] $ (pullup_ohm)
+ | |
+ +----+-----------------------[read_uV]
+ |
+ $ (pulldown_ohm)
+ |
+ -+- (ground)
+
+2. connect = NTC_CONNECTED_POSITIVE, pullup_ohm = 0 (not-connected)::
+
+ [pullup_uV]
+ |
+ [TH]
+ |
+ +----------------------------[read_uV]
+ |
+ $ (pulldown_ohm)
+ |
+ -+- (ground)
+
+3. connect = NTC_CONNECTED_GROUND, pulldown_ohm > 0::
+
+ [pullup_uV]
+ |
+ $ (pullup_ohm)
+ |
+ +----+-----------------------[read_uV]
+ | |
+ [TH] $ (pulldown_ohm)
+ | |
+ -+----+- (ground)
+
+4. connect = NTC_CONNECTED_GROUND, pulldown_ohm = 0 (not-connected)::
+
+ [pullup_uV]
+ |
+ $ (pullup_ohm)
+ |
+ +----------------------------[read_uV]
+ |
+ [TH]
+ |
+ -+- (ground)
When one of the four circuit models is used, read_uV, pullup_uV, pullup_ohm,
pulldown_ohm, and connect should be provided. When none of the four models
@@ -88,13 +98,14 @@ provide read_ohm and _not_ provide the others.
Sysfs Interface
---------------
-name the mandatory global attribute, the thermistor name.
-temp1_type always 4 (thermistor)
- RO
+=============== == =============================================================
+name the mandatory global attribute, the thermistor name.
+=============== == =============================================================
+temp1_type RO always 4 (thermistor)
-temp1_input measure the temperature and provide the measured value.
- (reading this file initiates the reading procedure.)
- RO
+temp1_input RO measure the temperature and provide the measured value.
+ (reading this file initiates the reading procedure.)
+=============== == =============================================================
Note that each NTC thermistor has only _one_ thermistor; thus, only temp1 exists.
diff --git a/Documentation/hwmon/occ b/Documentation/hwmon/occ.rst
index e787596e03fe..bf41c162d70e 100644
--- a/Documentation/hwmon/occ
+++ b/Documentation/hwmon/occ.rst
@@ -2,6 +2,7 @@ Kernel driver occ-hwmon
=======================
Supported chips:
+
* POWER8
* POWER9
@@ -37,53 +38,87 @@ Some entries are only present with certain OCC sensor versions or only on
certain OCCs in the system. The version number is not exported to the user
but can be inferred.
-temp[1-n]_label OCC sensor ID.
+temp[1-n]_label
+ OCC sensor ID.
+
[with temperature sensor version 1]
- temp[1-n]_input Measured temperature of the component in millidegrees
+
+ temp[1-n]_input
+ Measured temperature of the component in millidegrees
Celsius.
+
[with temperature sensor version >= 2]
- temp[1-n]_type The FRU (Field Replaceable Unit) type
+
+ temp[1-n]_type
+ The FRU (Field Replaceable Unit) type
(represented by an integer) for the component
that this sensor measures.
- temp[1-n]_fault Temperature sensor fault boolean; 1 to indicate
+ temp[1-n]_fault
+ Temperature sensor fault boolean; 1 to indicate
that a fault is present or 0 to indicate that
no fault is present.
+
[with type == 3 (FRU type is VRM)]
- temp[1-n]_alarm VRM temperature alarm boolean; 1 to indicate
+
+ temp[1-n]_alarm
+ VRM temperature alarm boolean; 1 to indicate
alarm, 0 to indicate no alarm
+
[else]
- temp[1-n]_input Measured temperature of the component in
- millidegrees Celsius.
-freq[1-n]_label OCC sensor ID.
-freq[1-n]_input Measured frequency of the component in MHz.
+ temp[1-n]_input
+ Measured temperature of the component in
+ millidegrees Celsius.
-power[1-n]_input Latest measured power reading of the component in
+freq[1-n]_label
+ OCC sensor ID.
+freq[1-n]_input
+ Measured frequency of the component in MHz.
+power[1-n]_input
+ Latest measured power reading of the component in
microwatts.
-power[1-n]_average Average power of the component in microwatts.
-power[1-n]_average_interval The amount of time over which the power average
+power[1-n]_average
+ Average power of the component in microwatts.
+power[1-n]_average_interval
+ The amount of time over which the power average
was taken in microseconds.
+
[with power sensor version < 2]
- power[1-n]_label OCC sensor ID.
+
+ power[1-n]_label
+ OCC sensor ID.
+
[with power sensor version >= 2]
- power[1-n]_label OCC sensor ID + function ID + channel in the form
+
+ power[1-n]_label
+ OCC sensor ID + function ID + channel in the form
of a string, delimited by underscores, i.e. "0_15_1".
Both the function ID and channel are integers that
further identify the power sensor.
+
[with power sensor version 0xa0]
- power[1-n]_label OCC sensor ID + sensor type in the form of a string,
+
+ power[1-n]_label
+ OCC sensor ID + sensor type in the form of a string,
delimited by an underscore, i.e. "0_system". Sensor
type will be one of "system", "proc", "vdd" or "vdn".
For this sensor version, OCC sensor ID will be the same
for all power sensors.
+
[present only on "master" OCC; represents the whole system power; only one of
- this type of power sensor will be present]
- power[1-n]_label "system"
- power[1-n]_input Latest system output power in microwatts.
- power[1-n]_cap Current system power cap in microwatts.
- power[1-n]_cap_not_redundant System power cap in microwatts when
- there is not redundant power.
- power[1-n]_cap_max Maximum power cap that the OCC can enforce in
+this type of power sensor will be present]
+
+ power[1-n]_label
+ "system"
+ power[1-n]_input
+ Latest system output power in microwatts.
+ power[1-n]_cap
+ Current system power cap in microwatts.
+ power[1-n]_cap_not_redundant
+ System power cap in microwatts when
+ there is not redundant power.
+ power[1-n]_cap_max
+ Maximum power cap that the OCC can enforce in
microwatts.
power[1-n]_cap_min Minimum power cap that the OCC can enforce in
microwatts.
@@ -94,8 +129,11 @@ power[1-n]_average_interval The amount of time over which the power average
ignored, i.e. requesting a power cap of
500900000 microwatts will result in a power cap
request of 500 watts.
+
[with caps sensor version > 1]
- power[1-n]_cap_user_source Indicates how the user power cap was
+
+ power[1-n]_cap_user_source
+ Indicates how the user power cap was
set. This is an integer that maps to
system or firmware components that can
set the user power cap.
@@ -104,9 +142,12 @@ The following "extn" sensors are exported as a way for the OCC to provide data
that doesn't fit anywhere else. The meaning of these sensors is entirely
dependent on their data, and cannot be statically defined.
-extn[1-n]_label ASCII ID or OCC sensor ID.
-extn[1-n]_flags This is one byte hexadecimal value. Bit 7 indicates the
+extn[1-n]_label
+ ASCII ID or OCC sensor ID.
+extn[1-n]_flags
+ This is one byte hexadecimal value. Bit 7 indicates the
type of the label attribute; 1 for sensor ID, 0 for
ASCII ID. Other bits are reserved.
-extn[1-n]_input 6 bytes of hexadecimal data, with a meaning defined by
+extn[1-n]_input
+ 6 bytes of hexadecimal data, with a meaning defined by
the sensor ID.
diff --git a/Documentation/hwmon/pc87360 b/Documentation/hwmon/pc87360.rst
index d5f5cf16ce59..4bad07bce54b 100644
--- a/Documentation/hwmon/pc87360
+++ b/Documentation/hwmon/pc87360.rst
@@ -2,14 +2,19 @@ Kernel driver pc87360
=====================
Supported chips:
+
* National Semiconductor PC87360, PC87363, PC87364, PC87365 and PC87366
+
Prefixes: 'pc87360', 'pc87363', 'pc87364', 'pc87365', 'pc87366'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheets: No longer available
Authors: Jean Delvare <jdelvare@suse.de>
Thanks to Sandeep Mehta, Tonko de Rooy and Daniel Ceregatti for testing.
+
Thanks to Rudolf Marek for helping me investigate conversion issues.
@@ -17,11 +22,13 @@ Module Parameters
-----------------
* init int
- Chip initialization level:
- 0: None
- *1: Forcibly enable internal voltage and temperature channels, except in9
- 2: Forcibly enable all voltage and temperature channels, except in9
- 3: Forcibly enable all voltage and temperature channels, including in9
+ Chip initialization level:
+
+ - 0: None
+ - **1**: Forcibly enable internal voltage and temperature channels,
+ except in9
+ - 2: Forcibly enable all voltage and temperature channels, except in9
+ - 3: Forcibly enable all voltage and temperature channels, including in9
Note that this parameter has no effect for the PC87360, PC87363 and PC87364
chips.
@@ -43,13 +50,15 @@ hardware monitoring chipsets, not only controlling and monitoring three fans,
but also monitoring eleven voltage inputs and two (PC87365) or up to four
(PC87366) temperatures.
+ =========== ======= ======= ======= ======= =====
Chip #vin #fan #pwm #temp devid
-
+ =========== ======= ======= ======= ======= =====
PC87360 - 2 2 - 0xE1
PC87363 - 2 2 - 0xE8
PC87364 - 3 3 - 0xE4
PC87365 11 3 3 2 0xE5
PC87366 11 3 3 3-4 0xE9
+ =========== ======= ======= ======= ======= =====
The driver assumes that no more than one chip is present, and one of the
standard Super I/O addresses is used (0x2E/0x2F or 0x4E/0x4F)
@@ -68,18 +77,23 @@ have to care no more.
For reference, here are a few values about clock dividers:
- slowest accuracy highest
- measurable around 3000 accurate
+ =========== =============== =============== ===========
+ slowest accuracy highest
+ measurable around 3000 accurate
divider speed (RPM) RPM (RPM) speed (RPM)
- 1 1882 18 6928
- 2 941 37 4898
- 4 470 74 3464
- 8 235 150 2449
+ =========== =============== =============== ===========
+ 1 1882 18 6928
+ 2 941 37 4898
+ 4 470 74 3464
+ 8 235 150 2449
+ =========== =============== =============== ===========
For the curious, here is how the values above were computed:
+
* slowest measurable speed: clock/(255*divider)
* accuracy around 3000 RPM: 3000^2/clock
* highest accurate speed: sqrt(clock*100)
+
The clock speed for the PC87360 family is 480 kHz. I arbitrarily chose 100
RPM as the lowest acceptable accuracy.
diff --git a/Documentation/hwmon/pc87427 b/Documentation/hwmon/pc87427.rst
index c313eb66e08a..22d8f62d851f 100644
--- a/Documentation/hwmon/pc87427
+++ b/Documentation/hwmon/pc87427.rst
@@ -2,9 +2,13 @@ Kernel driver pc87427
=====================
Supported chips:
+
* National Semiconductor PC87427
+
Prefix: 'pc87427'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: No longer available
Author: Jean Delvare <jdelvare@suse.de>
diff --git a/Documentation/hwmon/pcf8591 b/Documentation/hwmon/pcf8591.rst
index 447c0702c0ec..e98bd542a441 100644
--- a/Documentation/hwmon/pcf8591
+++ b/Documentation/hwmon/pcf8591.rst
@@ -2,16 +2,21 @@ Kernel driver pcf8591
=====================
Supported chips:
+
* Philips/NXP PCF8591
+
Prefix: 'pcf8591'
+
Addresses scanned: none
+
Datasheet: Publicly available at the NXP website
- http://www.nxp.com/pip/PCF8591_6.html
+
+ http://www.nxp.com/pip/PCF8591_6.html
Authors:
- Aurelien Jarno <aurelien@aurel32.net>
- valuable contributions by Jan M. Sendler <sendler@sendler.de>,
- Jean Delvare <jdelvare@suse.de>
+ - Aurelien Jarno <aurelien@aurel32.net>
+ - valuable contributions by Jan M. Sendler <sendler@sendler.de>,
+ - Jean Delvare <jdelvare@suse.de>
Description
@@ -22,24 +27,25 @@ analog output) for the I2C bus produced by Philips Semiconductors (now NXP).
It is designed to provide a byte I2C interface to up to 4 separate devices.
The PCF8591 has 4 analog inputs programmable as single-ended or
-differential inputs :
+differential inputs:
+
- mode 0 : four single ended inputs
- Pins AIN0 to AIN3 are single ended inputs for channels 0 to 3
+ Pins AIN0 to AIN3 are single ended inputs for channels 0 to 3
- mode 1 : three differential inputs
- Pins AIN3 is the common negative differential input
- Pins AIN0 to AIN2 are positive differential inputs for channels 0 to 2
+ Pins AIN3 is the common negative differential input
+ Pins AIN0 to AIN2 are positive differential inputs for channels 0 to 2
- mode 2 : single ended and differential mixed
- Pins AIN0 and AIN1 are single ended inputs for channels 0 and 1
- Pins AIN2 is the positive differential input for channel 3
- Pins AIN3 is the negative differential input for channel 3
+ Pins AIN0 and AIN1 are single ended inputs for channels 0 and 1
+ Pins AIN2 is the positive differential input for channel 3
+ Pins AIN3 is the negative differential input for channel 3
- mode 3 : two differential inputs
- Pins AIN0 is the positive differential input for channel 0
- Pins AIN1 is the negative differential input for channel 0
- Pins AIN2 is the positive differential input for channel 1
- Pins AIN3 is the negative differential input for channel 1
+ Pins AIN0 is the positive differential input for channel 0
+ Pins AIN1 is the negative differential input for channel 0
+ Pins AIN2 is the positive differential input for channel 1
+ Pins AIN3 is the negative differential input for channel 1
See the datasheet for details.
@@ -49,10 +55,11 @@ Module parameters
* input_mode int
Analog input mode:
- 0 = four single ended inputs
- 1 = three differential inputs
- 2 = single ended and differential mixed
- 3 = two differential inputs
+
+ - 0 = four single ended inputs
+ - 1 = three differential inputs
+ - 2 = single ended and differential mixed
+ - 3 = two differential inputs
Accessing PCF8591 via /sys interface
@@ -67,11 +74,12 @@ for details.
Directories are being created for each instantiated PCF8591:
/sys/bus/i2c/devices/<0>-<1>/
-where <0> is the bus the chip is connected to (e. g. i2c-0)
-and <1> the chip address ([48..4f])
+ where <0> is the bus the chip is connected to (e. g. i2c-0)
+ and <1> the chip address ([48..4f])
Inside these directories, there are such files:
-in0_input, in1_input, in2_input, in3_input, out0_enable, out0_output, name
+
+ in0_input, in1_input, in2_input, in3_input, out0_enable, out0_output, name
Name contains chip name.
diff --git a/Documentation/hwmon/pmbus-core b/Documentation/hwmon/pmbus-core.rst
index 8ed10e9ddfb5..92515c446fe3 100644
--- a/Documentation/hwmon/pmbus-core
+++ b/Documentation/hwmon/pmbus-core.rst
@@ -1,3 +1,4 @@
+==================================
PMBus core driver and internal API
==================================
@@ -120,24 +121,24 @@ Specifically, it provides the following information.
non-standard PMBus commands to standard commands, or to augment standard
command return values with device specific information.
- API functions
- -------------
+API functions
+=============
- Functions provided by chip driver
- ---------------------------------
+Functions provided by chip driver
+---------------------------------
- All functions return the command return value (read) or zero (write) if
- successful. A return value of -ENODATA indicates that there is no manufacturer
- specific command, but that a standard PMBus command may exist. Any other
- negative return value indicates that the commands does not exist for this
- chip, and that no attempt should be made to read or write the standard
- command.
+All functions return the command return value (read) or zero (write) if
+successful. A return value of -ENODATA indicates that there is no manufacturer
+specific command, but that a standard PMBus command may exist. Any other
+negative return value indicates that the commands does not exist for this
+chip, and that no attempt should be made to read or write the standard
+command.
- As mentioned above, an exception to this rule applies to virtual commands,
- which _must_ be handled in driver specific code. See "Virtual PMBus Commands"
- above for more details.
+As mentioned above, an exception to this rule applies to virtual commands,
+which *must* be handled in driver specific code. See "Virtual PMBus Commands"
+above for more details.
- Command execution in the core PMBus driver code is as follows.
+Command execution in the core PMBus driver code is as follows::
if (chip_access_function) {
status = chip_access_function();
@@ -148,128 +149,160 @@ Specifically, it provides the following information.
return -EINVAL;
return generic_access();
- Chip drivers may provide pointers to the following functions in struct
- pmbus_driver_info. All functions are optional.
+Chip drivers may provide pointers to the following functions in struct
+pmbus_driver_info. All functions are optional.
+
+::
int (*read_byte_data)(struct i2c_client *client, int page, int reg);
- Read byte from page <page>, register <reg>.
- <page> may be -1, which means "current page".
+Read byte from page <page>, register <reg>.
+<page> may be -1, which means "current page".
+
+
+::
int (*read_word_data)(struct i2c_client *client, int page, int reg);
- Read word from page <page>, register <reg>.
+Read word from page <page>, register <reg>.
+
+::
int (*write_word_data)(struct i2c_client *client, int page, int reg,
- u16 word);
+ u16 word);
- Write word to page <page>, register <reg>.
+Write word to page <page>, register <reg>.
+
+::
int (*write_byte)(struct i2c_client *client, int page, u8 value);
- Write byte to page <page>, register <reg>.
- <page> may be -1, which means "current page".
+Write byte to page <page>, register <reg>.
+<page> may be -1, which means "current page".
+
+::
int (*identify)(struct i2c_client *client, struct pmbus_driver_info *info);
- Determine supported PMBus functionality. This function is only necessary
- if a chip driver supports multiple chips, and the chip functionality is not
- pre-determined. It is currently only used by the generic pmbus driver
- (pmbus.c).
+Determine supported PMBus functionality. This function is only necessary
+if a chip driver supports multiple chips, and the chip functionality is not
+pre-determined. It is currently only used by the generic pmbus driver
+(pmbus.c).
+
+Functions exported by core driver
+---------------------------------
- Functions exported by core driver
- ---------------------------------
+Chip drivers are expected to use the following functions to read or write
+PMBus registers. Chip drivers may also use direct I2C commands. If direct I2C
+commands are used, the chip driver code must not directly modify the current
+page, since the selected page is cached in the core driver and the core driver
+will assume that it is selected. Using pmbus_set_page() to select a new page
+is mandatory.
- Chip drivers are expected to use the following functions to read or write
- PMBus registers. Chip drivers may also use direct I2C commands. If direct I2C
- commands are used, the chip driver code must not directly modify the current
- page, since the selected page is cached in the core driver and the core driver
- will assume that it is selected. Using pmbus_set_page() to select a new page
- is mandatory.
+::
int pmbus_set_page(struct i2c_client *client, u8 page);
- Set PMBus page register to <page> for subsequent commands.
+Set PMBus page register to <page> for subsequent commands.
+
+::
int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg);
- Read word data from <page>, <reg>. Similar to i2c_smbus_read_word_data(), but
- selects page first.
+Read word data from <page>, <reg>. Similar to i2c_smbus_read_word_data(), but
+selects page first.
+
+::
int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg,
u16 word);
- Write word data to <page>, <reg>. Similar to i2c_smbus_write_word_data(), but
- selects page first.
+Write word data to <page>, <reg>. Similar to i2c_smbus_write_word_data(), but
+selects page first.
+
+::
int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg);
- Read byte data from <page>, <reg>. Similar to i2c_smbus_read_byte_data(), but
- selects page first. <page> may be -1, which means "current page".
+Read byte data from <page>, <reg>. Similar to i2c_smbus_read_byte_data(), but
+selects page first. <page> may be -1, which means "current page".
+
+::
int pmbus_write_byte(struct i2c_client *client, int page, u8 value);
- Write byte data to <page>, <reg>. Similar to i2c_smbus_write_byte(), but
- selects page first. <page> may be -1, which means "current page".
+Write byte data to <page>, <reg>. Similar to i2c_smbus_write_byte(), but
+selects page first. <page> may be -1, which means "current page".
+
+::
void pmbus_clear_faults(struct i2c_client *client);
- Execute PMBus "Clear Fault" command on all chip pages.
- This function calls the device specific write_byte function if defined.
- Therefore, it must _not_ be called from that function.
+Execute PMBus "Clear Fault" command on all chip pages.
+This function calls the device specific write_byte function if defined.
+Therefore, it must _not_ be called from that function.
+
+::
bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg);
- Check if byte register exists. Return true if the register exists, false
- otherwise.
- This function calls the device specific write_byte function if defined to
- obtain the chip status. Therefore, it must _not_ be called from that function.
+Check if byte register exists. Return true if the register exists, false
+otherwise.
+This function calls the device specific write_byte function if defined to
+obtain the chip status. Therefore, it must _not_ be called from that function.
+
+::
bool pmbus_check_word_register(struct i2c_client *client, int page, int reg);
- Check if word register exists. Return true if the register exists, false
- otherwise.
- This function calls the device specific write_byte function if defined to
- obtain the chip status. Therefore, it must _not_ be called from that function.
+Check if word register exists. Return true if the register exists, false
+otherwise.
+This function calls the device specific write_byte function if defined to
+obtain the chip status. Therefore, it must _not_ be called from that function.
+
+::
int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
- struct pmbus_driver_info *info);
+ struct pmbus_driver_info *info);
+
+Execute probe function. Similar to standard probe function for other drivers,
+with the pointer to struct pmbus_driver_info as additional argument. Calls
+identify function if supported. Must only be called from device probe
+function.
- Execute probe function. Similar to standard probe function for other drivers,
- with the pointer to struct pmbus_driver_info as additional argument. Calls
- identify function if supported. Must only be called from device probe
- function.
+::
void pmbus_do_remove(struct i2c_client *client);
- Execute driver remove function. Similar to standard driver remove function.
+Execute driver remove function. Similar to standard driver remove function.
+
+::
const struct pmbus_driver_info
*pmbus_get_driver_info(struct i2c_client *client);
- Return pointer to struct pmbus_driver_info as passed to pmbus_do_probe().
+Return pointer to struct pmbus_driver_info as passed to pmbus_do_probe().
PMBus driver platform data
==========================
PMBus platform data is defined in include/linux/pmbus.h. Platform data
-currently only provides a flag field with a single bit used.
+currently only provides a flag field with a single bit used::
-#define PMBUS_SKIP_STATUS_CHECK (1 << 0)
+ #define PMBUS_SKIP_STATUS_CHECK (1 << 0)
-struct pmbus_platform_data {
- u32 flags; /* Device specific flags */
-};
+ struct pmbus_platform_data {
+ u32 flags; /* Device specific flags */
+ };
Flags
-----
PMBUS_SKIP_STATUS_CHECK
-
-During register detection, skip checking the status register for
-communication or command errors.
+ During register detection, skip checking the status register for
+ communication or command errors.
Some PMBus chips respond with valid data when trying to read an unsupported
register. For such chips, checking the status register is mandatory when
diff --git a/Documentation/hwmon/pmbus b/Documentation/hwmon/pmbus.rst
index dfd9c65996c0..abfb9dd4857d 100644
--- a/Documentation/hwmon/pmbus
+++ b/Documentation/hwmon/pmbus.rst
@@ -1,42 +1,77 @@
Kernel driver pmbus
-====================
+===================
Supported chips:
+
* Ericsson BMR453, BMR454
+
Prefixes: 'bmr453', 'bmr454'
+
Addresses scanned: -
+
Datasheet:
+
http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146395
+
* ON Semiconductor ADP4000, NCP4200, NCP4208
+
Prefixes: 'adp4000', 'ncp4200', 'ncp4208'
+
Addresses scanned: -
+
Datasheets:
+
http://www.onsemi.com/pub_link/Collateral/ADP4000-D.PDF
+
http://www.onsemi.com/pub_link/Collateral/NCP4200-D.PDF
+
http://www.onsemi.com/pub_link/Collateral/JUNE%202009-%20REV.%200.PDF
+
* Lineage Power
+
Prefixes: 'mdt040', 'pdt003', 'pdt006', 'pdt012', 'udt020'
+
Addresses scanned: -
+
Datasheets:
+
http://www.lineagepower.com/oem/pdf/PDT003A0X.pdf
+
http://www.lineagepower.com/oem/pdf/PDT006A0X.pdf
+
http://www.lineagepower.com/oem/pdf/PDT012A0X.pdf
+
http://www.lineagepower.com/oem/pdf/UDT020A0X.pdf
+
http://www.lineagepower.com/oem/pdf/MDT040A0X.pdf
+
* Texas Instruments TPS40400, TPS544B20, TPS544B25, TPS544C20, TPS544C25
+
Prefixes: 'tps40400', 'tps544b20', 'tps544b25', 'tps544c20', 'tps544c25'
+
Addresses scanned: -
+
Datasheets:
+
http://www.ti.com/lit/gpn/tps40400
+
http://www.ti.com/lit/gpn/tps544b20
+
http://www.ti.com/lit/gpn/tps544b25
+
http://www.ti.com/lit/gpn/tps544c20
+
http://www.ti.com/lit/gpn/tps544c25
+
* Generic PMBus devices
+
Prefix: 'pmbus'
+
Addresses scanned: -
+
Datasheet: n.a.
+
Author: Guenter Roeck <linux@roeck-us.net>
@@ -62,9 +97,10 @@ supported by all chips), and since there is no well defined address range for
PMBus devices. You will have to instantiate the devices explicitly.
Example: the following will load the driver for an LTC2978 at address 0x60
-on I2C bus #1:
-$ modprobe pmbus
-$ echo ltc2978 0x60 > /sys/bus/i2c/devices/i2c-1/new_device
+on I2C bus #1::
+
+ $ modprobe pmbus
+ $ echo ltc2978 0x60 > /sys/bus/i2c/devices/i2c-1/new_device
Platform data support
@@ -72,9 +108,9 @@ Platform data support
Support for additional PMBus chips can be added by defining chip parameters in
a new chip specific driver file. For example, (untested) code to add support for
-Emerson DS1200 power modules might look as follows.
+Emerson DS1200 power modules might look as follows::
-static struct pmbus_driver_info ds1200_info = {
+ static struct pmbus_driver_info ds1200_info = {
.pages = 1,
/* Note: All other sensors are in linear mode */
.direct[PSC_VOLTAGE_OUT] = true,
@@ -95,45 +131,45 @@ static struct pmbus_driver_info ds1200_info = {
| PMBUS_HAVE_PIN | PMBUS_HAVE_POUT
| PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP
| PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12,
-};
+ };
-static int ds1200_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
+ static int ds1200_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+ {
return pmbus_do_probe(client, id, &ds1200_info);
-}
+ }
-static int ds1200_remove(struct i2c_client *client)
-{
+ static int ds1200_remove(struct i2c_client *client)
+ {
return pmbus_do_remove(client);
-}
+ }
-static const struct i2c_device_id ds1200_id[] = {
+ static const struct i2c_device_id ds1200_id[] = {
{"ds1200", 0},
{}
-};
+ };
-MODULE_DEVICE_TABLE(i2c, ds1200_id);
+ MODULE_DEVICE_TABLE(i2c, ds1200_id);
-/* This is the driver that will be inserted */
-static struct i2c_driver ds1200_driver = {
+ /* This is the driver that will be inserted */
+ static struct i2c_driver ds1200_driver = {
.driver = {
.name = "ds1200",
},
.probe = ds1200_probe,
.remove = ds1200_remove,
.id_table = ds1200_id,
-};
+ };
-static int __init ds1200_init(void)
-{
+ static int __init ds1200_init(void)
+ {
return i2c_add_driver(&ds1200_driver);
-}
+ }
-static void __exit ds1200_exit(void)
-{
+ static void __exit ds1200_exit(void)
+ {
i2c_del_driver(&ds1200_driver);
-}
+ }
Sysfs entries
@@ -148,6 +184,7 @@ a given sysfs entry.
The following attributes are supported. Limits are read-write; all other
attributes are read-only.
+======================= ========================================================
inX_input Measured voltage. From READ_VIN or READ_VOUT register.
inX_min Minimum Voltage.
From VIN_UV_WARN_LIMIT or VOUT_UV_WARN_LIMIT register.
@@ -214,3 +251,4 @@ tempX_lcrit_alarm Chip temperature critical low alarm. Set by comparing
tempX_crit_alarm Chip temperature critical high alarm. Set by comparing
READ_TEMPERATURE_X with OT_FAULT_LIMIT if
TEMP_OT_FAULT status is set.
+======================= ========================================================
diff --git a/Documentation/hwmon/powr1220 b/Documentation/hwmon/powr1220.rst
index 21e44f71ae6e..a7fc258da0a8 100644
--- a/Documentation/hwmon/powr1220
+++ b/Documentation/hwmon/powr1220.rst
@@ -1,12 +1,17 @@
Kernel driver powr1220
-==================
+======================
Supported chips:
+
* Lattice POWR1220AT8
+
Prefix: 'powr1220'
+
Addresses scanned: none
+
Datasheet: Publicly available at the Lattice website
- http://www.latticesemi.com/
+
+ http://www.latticesemi.com/
Author: Scott Kanowitz <scott.kanowitz@gmail.com>
@@ -26,7 +31,9 @@ value over the low measurement range maximum of 2 V.
The input naming convention is as follows:
+============== ========
driver name pin name
+============== ========
in0 VMON1
in1 VMON2
in2 VMON3
@@ -41,5 +48,6 @@ in10 VMON11
in11 VMON12
in12 VCCA
in13 VCCINP
+============== ========
The ADC readings are updated on request with a minimum period of 1s.
diff --git a/Documentation/hwmon/pwm-fan b/Documentation/hwmon/pwm-fan.rst
index 18529d2e3bcf..82fe96742fee 100644
--- a/Documentation/hwmon/pwm-fan
+++ b/Documentation/hwmon/pwm-fan.rst
@@ -15,3 +15,6 @@ The driver implements a simple interface for driving a fan connected to
a PWM output. It uses the generic PWM interface, thus it can be used with
a range of SoCs. The driver exposes the fan to the user space through
the hwmon's sysfs interface.
+
+The fan rotation speed returned via the optional 'fan1_input' is extrapolated
+from the sampled interrupts from the tachometer signal within 1 second.
diff --git a/Documentation/hwmon/raspberrypi-hwmon b/Documentation/hwmon/raspberrypi-hwmon.rst
index 3c92e2cb52d6..8038ade36490 100644
--- a/Documentation/hwmon/raspberrypi-hwmon
+++ b/Documentation/hwmon/raspberrypi-hwmon.rst
@@ -2,6 +2,7 @@ Kernel driver raspberrypi-hwmon
===============================
Supported boards:
+
* Raspberry Pi A+ (via GPIO on SoC)
* Raspberry Pi B+ (via GPIO on SoC)
* Raspberry Pi 2 B (via GPIO on SoC)
@@ -19,4 +20,6 @@ undervoltage conditions.
Sysfs entries
-------------
+======================= ==================
in0_lcrit_alarm Undervoltage alarm
+======================= ==================
diff --git a/Documentation/hwmon/sch5627 b/Documentation/hwmon/sch5627.rst
index 0551d266c51c..187682e99114 100644
--- a/Documentation/hwmon/sch5627
+++ b/Documentation/hwmon/sch5627.rst
@@ -2,9 +2,13 @@ Kernel driver sch5627
=====================
Supported chips:
+
* SMSC SCH5627
+
Prefix: 'sch5627'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Application Note available upon request
Author: Hans de Goede <hdegoede@redhat.com>
diff --git a/Documentation/hwmon/sch5636 b/Documentation/hwmon/sch5636.rst
index 7b0a01da0717..4aaee3672f13 100644
--- a/Documentation/hwmon/sch5636
+++ b/Documentation/hwmon/sch5636.rst
@@ -2,8 +2,11 @@ Kernel driver sch5636
=====================
Supported chips:
+
* SMSC SCH5636
+
Prefix: 'sch5636'
+
Addresses scanned: none, address read from Super I/O config space
Author: Hans de Goede <hdegoede@redhat.com>
diff --git a/Documentation/hwmon/scpi-hwmon b/Documentation/hwmon/scpi-hwmon.rst
index 4cfcdf2d5eab..eee7022b44db 100644
--- a/Documentation/hwmon/scpi-hwmon
+++ b/Documentation/hwmon/scpi-hwmon.rst
@@ -2,8 +2,11 @@ Kernel driver scpi-hwmon
========================
Supported chips:
+
* Chips based on ARM System Control Processor Interface
+
Addresses scanned: -
+
Datasheet: http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0922b/index.html
Author: Punit Agrawal <punit.agrawal@arm.com>
@@ -14,7 +17,7 @@ Description
This driver supports hardware monitoring for SoC's based on the ARM
System Control Processor (SCP) implementing the System Control
Processor Interface (SCPI). The following sensor types are supported
-by the SCP -
+by the SCP:
* temperature
* voltage
@@ -30,4 +33,4 @@ Usage Notes
The driver relies on device tree node to indicate the presence of SCPI
support in the kernel. See
Documentation/devicetree/bindings/arm/arm,scpi.txt for details of the
-devicetree node. \ No newline at end of file
+devicetree node.
diff --git a/Documentation/hwmon/sht15 b/Documentation/hwmon/sht15.rst
index 5e3207c3b177..485abe037f6c 100644
--- a/Documentation/hwmon/sht15
+++ b/Documentation/hwmon/sht15.rst
@@ -2,29 +2,37 @@ Kernel driver sht15
===================
Authors:
+
* Wouter Horre
* Jonathan Cameron
* Vivien Didelot <vivien.didelot@savoirfairelinux.com>
* Jerome Oufella <jerome.oufella@savoirfairelinux.com>
Supported chips:
+
* Sensirion SHT10
+
Prefix: 'sht10'
* Sensirion SHT11
+
Prefix: 'sht11'
* Sensirion SHT15
+
Prefix: 'sht15'
* Sensirion SHT71
+
Prefix: 'sht71'
* Sensirion SHT75
+
Prefix: 'sht75'
Datasheet: Publicly available at the Sensirion website
-http://www.sensirion.ch/en/pdf/product_information/Datasheet-humidity-sensor-SHT1x.pdf
+
+ http://www.sensirion.ch/en/pdf/product_information/Datasheet-humidity-sensor-SHT1x.pdf
Description
-----------
@@ -63,11 +71,13 @@ Platform data
Sysfs interface
---------------
-* temp1_input: temperature input
-* humidity1_input: humidity input
-* heater_enable: write 1 in this attribute to enable the on-chip heater,
- 0 to disable it. Be careful not to enable the heater
- for too long.
-* temp1_fault: if 1, this means that the voltage is low (below 2.47V) and
- measurement may be invalid.
-* humidity1_fault: same as temp1_fault.
+================== ==========================================================
+temp1_input temperature input
+humidity1_input humidity input
+heater_enable write 1 in this attribute to enable the on-chip heater,
+ 0 to disable it. Be careful not to enable the heater
+ for too long.
+temp1_fault if 1, this means that the voltage is low (below 2.47V) and
+ measurement may be invalid.
+humidity1_fault same as temp1_fault.
+================== ==========================================================
diff --git a/Documentation/hwmon/sht21 b/Documentation/hwmon/sht21.rst
index 8b3cdda541c1..f1f5da030108 100644
--- a/Documentation/hwmon/sht21
+++ b/Documentation/hwmon/sht21.rst
@@ -2,19 +2,33 @@ Kernel driver sht21
===================
Supported chips:
+
* Sensirion SHT21
+
Prefix: 'sht21'
+
Addresses scanned: none
+
Datasheet: Publicly available at the Sensirion website
+
http://www.sensirion.com/file/datasheet_sht21
+
+
* Sensirion SHT25
+
Prefix: 'sht25'
+
Addresses scanned: none
+
Datasheet: Publicly available at the Sensirion website
+
http://www.sensirion.com/file/datasheet_sht25
+
+
Author:
+
Urs Fleisch <urs.fleisch@sensirion.com>
Description
@@ -33,9 +47,13 @@ in the board setup code.
sysfs-Interface
---------------
-temp1_input - temperature input
-humidity1_input - humidity input
-eic - Electronic Identification Code
+temp1_input
+ - temperature input
+
+humidity1_input
+ - humidity input
+eic
+ - Electronic Identification Code
Notes
-----
diff --git a/Documentation/hwmon/sht3x b/Documentation/hwmon/sht3x.rst
index d9daa6ab1e8e..978a7117e4b2 100644
--- a/Documentation/hwmon/sht3x
+++ b/Documentation/hwmon/sht3x.rst
@@ -2,14 +2,19 @@ Kernel driver sht3x
===================
Supported chips:
+
* Sensirion SHT3x-DIS
+
Prefix: 'sht3x'
+
Addresses scanned: none
+
Datasheet: https://www.sensirion.com/file/datasheet_sht3x_digital
Author:
- David Frey <david.frey@sensirion.com>
- Pascal Sachs <pascal.sachs@sensirion.com>
+
+ - David Frey <david.frey@sensirion.com>
+ - Pascal Sachs <pascal.sachs@sensirion.com>
Description
-----------
@@ -24,6 +29,7 @@ addresses 0x44 or 0x45, depending on the wiring. See
Documentation/i2c/instantiating-devices for methods to instantiate the device.
There are two options configurable by means of sht3x_platform_data:
+
1. blocking (pull the I2C clock line down while performing the measurement) or
non-blocking mode. Blocking mode will guarantee the fastest result but
the I2C bus will be busy during that time. By default, non-blocking mode
@@ -35,12 +41,15 @@ There are two options configurable by means of sht3x_platform_data:
The sht3x sensor supports a single shot mode as well as 5 periodic measure
modes, which can be controlled with the update_interval sysfs interface.
The allowed update_interval in milliseconds are as follows:
- * 0 single shot mode
- * 2000 0.5 Hz periodic measurement
- * 1000 1 Hz periodic measurement
- * 500 2 Hz periodic measurement
- * 250 4 Hz periodic measurement
- * 100 10 Hz periodic measurement
+
+ ===== ======= ====================
+ 0 single shot mode
+ 2000 0.5 Hz periodic measurement
+ 1000 1 Hz periodic measurement
+ 500 2 Hz periodic measurement
+ 250 4 Hz periodic measurement
+ 100 10 Hz periodic measurement
+ ===== ======= ====================
In the periodic measure mode, the sensor automatically triggers a measurement
with the configured update interval on the chip. When a temperature or humidity
@@ -53,6 +62,7 @@ low.
sysfs-Interface
---------------
+=================== ============================================================
temp1_input: temperature input
humidity1_input: humidity input
temp1_max: temperature max value
@@ -64,13 +74,15 @@ temp1_min_hyst: temperature hysteresis value for min limit
humidity1_min: humidity min value
humidity1_min_hyst: humidity hysteresis value for min limit
temp1_alarm: alarm flag is set to 1 if the temperature is outside the
- configured limits. Alarm only works in periodic measure mode
+ configured limits. Alarm only works in periodic measure mode
humidity1_alarm: alarm flag is set to 1 if the humidity is outside the
- configured limits. Alarm only works in periodic measure mode
+ configured limits. Alarm only works in periodic measure mode
heater_enable: heater enable, heating element removes excess humidity from
- sensor
- 0: turned off
- 1: turned on
+ sensor:
+
+ - 0: turned off
+ - 1: turned on
update_interval: update interval, 0 for single shot, interval in msec
- for periodic measurement. If the interval is not supported
- by the sensor, the next faster interval is chosen
+ for periodic measurement. If the interval is not supported
+ by the sensor, the next faster interval is chosen
+=================== ============================================================
diff --git a/Documentation/hwmon/shtc1 b/Documentation/hwmon/shtc1.rst
index 6b1e05458f0f..aa116332ba26 100644
--- a/Documentation/hwmon/shtc1
+++ b/Documentation/hwmon/shtc1.rst
@@ -2,17 +2,29 @@ Kernel driver shtc1
===================
Supported chips:
+
* Sensirion SHTC1
+
Prefix: 'shtc1'
+
Addresses scanned: none
+
Datasheet: http://www.sensirion.com/file/datasheet_shtc1
+
+
* Sensirion SHTW1
+
Prefix: 'shtw1'
+
Addresses scanned: none
+
Datasheet: Not publicly available
+
+
Author:
+
Johannes Winkelmann <johannes.winkelmann@sensirion.com>
Description
@@ -28,6 +40,7 @@ address 0x70. See Documentation/i2c/instantiating-devices for methods to
instantiate the device.
There are two options configurable by means of shtc1_platform_data:
+
1. blocking (pull the I2C clock line down while performing the measurement) or
non-blocking mode. Blocking mode will guarantee the fastest result but
the I2C bus will be busy during that time. By default, non-blocking mode
@@ -39,5 +52,7 @@ There are two options configurable by means of shtc1_platform_data:
sysfs-Interface
---------------
-temp1_input - temperature input
-humidity1_input - humidity input
+temp1_input
+ - temperature input
+humidity1_input
+ - humidity input
diff --git a/Documentation/hwmon/sis5595 b/Documentation/hwmon/sis5595.rst
index 4f8877a34f37..16123b3bfff9 100644
--- a/Documentation/hwmon/sis5595
+++ b/Documentation/hwmon/sis5595.rst
@@ -2,49 +2,67 @@ Kernel driver sis5595
=====================
Supported chips:
+
* Silicon Integrated Systems Corp. SiS5595 Southbridge Hardware Monitor
+
Prefix: 'sis5595'
+
Addresses scanned: ISA in PCI-space encoded address
+
Datasheet: Publicly available at the Silicon Integrated Systems Corp. site.
+
+
Authors:
- Kyösti Mälkki <kmalkki@cc.hut.fi>,
- Mark D. Studebaker <mdsxyz123@yahoo.com>,
- Aurelien Jarno <aurelien@aurel32.net> 2.6 port
+
+ - Kyösti Mälkki <kmalkki@cc.hut.fi>,
+ - Mark D. Studebaker <mdsxyz123@yahoo.com>,
+ - Aurelien Jarno <aurelien@aurel32.net> 2.6 port
SiS southbridge has a LM78-like chip integrated on the same IC.
This driver is a customized copy of lm78.c
Supports following revisions:
+
+ =============== =============== ==============
Version PCI ID PCI Revision
+ =============== =============== ==============
1 1039/0008 AF or less
2 1039/0008 B0 or greater
+ =============== =============== ==============
Note: these chips contain a 0008 device which is incompatible with the
- 5595. We recognize these by the presence of the listed
- "blacklist" PCI ID and refuse to load.
+ 5595. We recognize these by the presence of the listed
+ "blacklist" PCI ID and refuse to load.
+ =================== =============== ================
NOT SUPPORTED PCI ID BLACKLIST PCI ID
- 540 0008 0540
- 550 0008 0550
+ =================== =============== ================
+ 540 0008 0540
+ 550 0008 0550
5513 0008 5511
5581 0008 5597
5582 0008 5597
5597 0008 5597
- 630 0008 0630
- 645 0008 0645
- 730 0008 0730
- 735 0008 0735
+ 630 0008 0630
+ 645 0008 0645
+ 730 0008 0730
+ 735 0008 0735
+ =================== =============== ================
Module Parameters
-----------------
+
+======================= =====================================================
force_addr=0xaddr Set the I/O base address. Useful for boards
that don't set the address in the BIOS. Does not do a
PCI force; the device must still be present in lspci.
Don't use this unless the driver complains that the
base address is not set.
+
Example: 'modprobe sis5595 force_addr=0x290'
+======================= =====================================================
Description
@@ -103,4 +121,3 @@ Problems
--------
Some chips refuse to be enabled. We don't know why.
The driver will recognize this and print a message in dmesg.
-
diff --git a/Documentation/hwmon/smm665 b/Documentation/hwmon/smm665.rst
index a341eeedab75..a0e27f62b57b 100644
--- a/Documentation/hwmon/smm665
+++ b/Documentation/hwmon/smm665.rst
@@ -2,31 +2,57 @@ Kernel driver smm665
====================
Supported chips:
+
* Summit Microelectronics SMM465
+
Prefix: 'smm465'
+
Addresses scanned: -
+
Datasheet:
+
http://www.summitmicro.com/prod_select/summary/SMM465/SMM465DS.pdf
+
* Summit Microelectronics SMM665, SMM665B
+
Prefix: 'smm665'
+
Addresses scanned: -
+
Datasheet:
+
http://www.summitmicro.com/prod_select/summary/SMM665/SMM665B_2089_20.pdf
+
* Summit Microelectronics SMM665C
+
Prefix: 'smm665c'
+
Addresses scanned: -
+
Datasheet:
+
http://www.summitmicro.com/prod_select/summary/SMM665C/SMM665C_2125.pdf
+
* Summit Microelectronics SMM764
+
Prefix: 'smm764'
+
Addresses scanned: -
+
Datasheet:
+
http://www.summitmicro.com/prod_select/summary/SMM764/SMM764_2098.pdf
+
* Summit Microelectronics SMM766, SMM766B
+
Prefix: 'smm766'
+
Addresses scanned: -
+
Datasheets:
+
http://www.summitmicro.com/prod_select/summary/SMM766/SMM766_2086.pdf
+
http://www.summitmicro.com/prod_select/summary/SMM766B/SMM766B_2122.pdf
Author: Guenter Roeck <linux@roeck-us.net>
@@ -36,9 +62,10 @@ Module Parameters
-----------------
* vref: int
- Default: 1250 (mV)
- Reference voltage on VREF_ADC pin in mV. It should not be necessary to set
- this parameter unless a non-default reference voltage is used.
+ Default: 1250 (mV)
+
+ Reference voltage on VREF_ADC pin in mV. It should not be necessary to set
+ this parameter unless a non-default reference voltage is used.
Description
@@ -64,9 +91,10 @@ the devices explicitly. When instantiating the device, you have to specify
its configuration register address.
Example: the following will load the driver for an SMM665 at address 0x57
-on I2C bus #1:
-$ modprobe smm665
-$ echo smm665 0x57 > /sys/bus/i2c/devices/i2c-1/new_device
+on I2C bus #1::
+
+ $ modprobe smm665
+ $ echo smm665 0x57 > /sys/bus/i2c/devices/i2c-1/new_device
Sysfs entries
@@ -84,6 +112,7 @@ max otherwise. For details please see the SMM665 datasheet.
For SMM465 and SMM764, values for Channel E and F are reported but undefined.
+======================= =======================================================
in1_input 12V input voltage (mV)
in2_input 3.3V (VDD) input voltage (mV)
in3_input Channel A voltage (mV)
@@ -155,3 +184,4 @@ temp1_min Mimimum chip temperature
temp1_max Maximum chip temperature
temp1_crit Critical chip temperature
temp1_crit_alarm Temperature critical alarm
+======================= =======================================================
diff --git a/Documentation/hwmon/smsc47b397 b/Documentation/hwmon/smsc47b397.rst
index 3a43b6948924..600194cf1804 100644
--- a/Documentation/hwmon/smsc47b397
+++ b/Documentation/hwmon/smsc47b397.rst
@@ -2,29 +2,38 @@ Kernel driver smsc47b397
========================
Supported chips:
+
* SMSC LPC47B397-NC
+
* SMSC SCH5307-NS
+
* SMSC SCH5317
+
Prefix: 'smsc47b397'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: In this file
-Authors: Mark M. Hoffman <mhoffman@lightlink.com>
- Utilitek Systems, Inc.
+Authors:
+
+ - Mark M. Hoffman <mhoffman@lightlink.com>
+ - Utilitek Systems, Inc.
November 23, 2004
-The following specification describes the SMSC LPC47B397-NC[1] sensor chip
+The following specification describes the SMSC LPC47B397-NC [1]_ sensor chip
(for which there is no public datasheet available). This document was
provided by Craig Kelly (In-Store Broadcast Network) and edited/corrected
by Mark M. Hoffman <mhoffman@lightlink.com>.
-[1] And SMSC SCH5307-NS and SCH5317, which have different device IDs but are
-otherwise compatible.
+.. [1] And SMSC SCH5307-NS and SCH5317, which have different device IDs but are
+ otherwise compatible.
-* * * * *
+-------------------------------------------------------------------------
-Methods for detecting the HP SIO and reading the thermal data on a dc7100.
+Methods for detecting the HP SIO and reading the thermal data on a dc7100
+-------------------------------------------------------------------------
The thermal information on the dc7100 is contained in the SIO Hardware Monitor
(HWM). The information is accessed through an index/data pair. The index/data
@@ -35,18 +44,22 @@ and 0x61 (LSB). Currently we are using 0x480 for the HWM Base Address and
Reading temperature information.
The temperature information is located in the following registers:
+
+=============== ======= =======================================================
Temp1 0x25 (Currently, this reflects the CPU temp on all systems).
Temp2 0x26
Temp3 0x27
Temp4 0x80
+=============== ======= =======================================================
Programming Example
-The following is an example of how to read the HWM temperature registers:
-MOV DX,480H
-MOV AX,25H
-OUT DX,AL
-MOV DX,481H
-IN AL,DX
+The following is an example of how to read the HWM temperature registers::
+
+ MOV DX,480H
+ MOV AX,25H
+ OUT DX,AL
+ MOV DX,481H
+ IN AL,DX
AL contains the data in hex, the temperature in Celsius is the decimal
equivalent.
@@ -55,25 +68,32 @@ Ex: If AL contains 0x2A, the temperature is 42 degrees C.
Reading tach information.
The fan speed information is located in the following registers:
+
+=============== ======= ======= =================================
LSB MSB
Tach1 0x28 0x29 (Currently, this reflects the CPU
fan speed on all systems).
Tach2 0x2A 0x2B
Tach3 0x2C 0x2D
Tach4 0x2E 0x2F
+=============== ======= ======= =================================
-Important!!!
-Reading the tach LSB locks the tach MSB.
-The LSB Must be read first.
+.. Important::
+
+ Reading the tach LSB locks the tach MSB.
+ The LSB Must be read first.
+
+How to convert the tach reading to RPM
+--------------------------------------
-How to convert the tach reading to RPM.
The tach reading (TCount) is given by: (Tach MSB * 256) + (Tach LSB)
The SIO counts the number of 90kHz (11.111us) pulses per revolution.
RPM = 60/(TCount * 11.111us)
-Example:
-Reg 0x28 = 0x9B
-Reg 0x29 = 0x08
+Example::
+
+ Reg 0x28 = 0x9B
+ Reg 0x29 = 0x08
TCount = 0x89B = 2203
@@ -81,21 +101,28 @@ RPM = 60 / (2203 * 11.11111 E-6) = 2451 RPM
Obtaining the SIO version.
-CONFIGURATION SEQUENCE
+Configuration Sequence
+----------------------
+
To program the configuration registers, the following sequence must be followed:
1. Enter Configuration Mode
2. Configure the Configuration Registers
3. Exit Configuration Mode.
Enter Configuration Mode
+^^^^^^^^^^^^^^^^^^^^^^^^
+
To place the chip into the Configuration State The config key (0x55) is written
to the CONFIG PORT (0x2E).
Configuration Mode
+^^^^^^^^^^^^^^^^^^
+
In configuration mode, the INDEX PORT is located at the CONFIG PORT address and
the DATA PORT is at INDEX PORT address + 1.
The desired configuration registers are accessed in two steps:
+
a. Write the index of the Logical Device Number Configuration Register
(i.e., 0x07) to the INDEX PORT and then write the number of the
desired logical device to the DATA PORT.
@@ -104,30 +131,35 @@ b. Write the address of the desired configuration register within the
logical device to the INDEX PORT and then write or read the config-
uration register through the DATA PORT.
-Note: If accessing the Global Configuration Registers, step (a) is not required.
+Note:
+ If accessing the Global Configuration Registers, step (a) is not required.
Exit Configuration Mode
+^^^^^^^^^^^^^^^^^^^^^^^
+
To exit the Configuration State the write 0xAA to the CONFIG PORT (0x2E).
The chip returns to the RUN State. (This is important).
Programming Example
-The following is an example of how to read the SIO Device ID located at 0x20
-
-; ENTER CONFIGURATION MODE
-MOV DX,02EH
-MOV AX,055H
-OUT DX,AL
-; GLOBAL CONFIGURATION REGISTER
-MOV DX,02EH
-MOV AL,20H
-OUT DX,AL
-; READ THE DATA
-MOV DX,02FH
-IN AL,DX
-; EXIT CONFIGURATION MODE
-MOV DX,02EH
-MOV AX,0AAH
-OUT DX,AL
+^^^^^^^^^^^^^^^^^^^
+
+The following is an example of how to read the SIO Device ID located at 0x20:
+
+ ; ENTER CONFIGURATION MODE
+ MOV DX,02EH
+ MOV AX,055H
+ OUT DX,AL
+ ; GLOBAL CONFIGURATION REGISTER
+ MOV DX,02EH
+ MOV AL,20H
+ OUT DX,AL
+ ; READ THE DATA
+ MOV DX,02FH
+ IN AL,DX
+ ; EXIT CONFIGURATION MODE
+ MOV DX,02EH
+ MOV AX,0AAH
+ OUT DX,AL
The registers of interest for identifying the SIO on the dc7100 are Device ID
(0x20) and Device Rev (0x21).
@@ -135,29 +167,31 @@ The registers of interest for identifying the SIO on the dc7100 are Device ID
The Device ID will read 0x6F (0x81 for SCH5307-NS, and 0x85 for SCH5317)
The Device Rev currently reads 0x01
-Obtaining the HWM Base Address.
+Obtaining the HWM Base Address
+------------------------------
+
The following is an example of how to read the HWM Base Address located in
-Logical Device 8.
-
-; ENTER CONFIGURATION MODE
-MOV DX,02EH
-MOV AX,055H
-OUT DX,AL
-; CONFIGURE REGISTER CRE0,
-; LOGICAL DEVICE 8
-MOV DX,02EH
-MOV AL,07H
-OUT DX,AL ;Point to LD# Config Reg
-MOV DX,02FH
-MOV AL, 08H
-OUT DX,AL;Point to Logical Device 8
-;
-MOV DX,02EH
-MOV AL,60H
-OUT DX,AL ; Point to HWM Base Addr MSB
-MOV DX,02FH
-IN AL,DX ; Get MSB of HWM Base Addr
-; EXIT CONFIGURATION MODE
-MOV DX,02EH
-MOV AX,0AAH
-OUT DX,AL
+Logical Device 8::
+
+ ; ENTER CONFIGURATION MODE
+ MOV DX,02EH
+ MOV AX,055H
+ OUT DX,AL
+ ; CONFIGURE REGISTER CRE0,
+ ; LOGICAL DEVICE 8
+ MOV DX,02EH
+ MOV AL,07H
+ OUT DX,AL ;Point to LD# Config Reg
+ MOV DX,02FH
+ MOV AL, 08H
+ OUT DX,AL;Point to Logical Device 8
+ ;
+ MOV DX,02EH
+ MOV AL,60H
+ OUT DX,AL ; Point to HWM Base Addr MSB
+ MOV DX,02FH
+ IN AL,DX ; Get MSB of HWM Base Addr
+ ; EXIT CONFIGURATION MODE
+ MOV DX,02EH
+ MOV AX,0AAH
+ OUT DX,AL
diff --git a/Documentation/hwmon/smsc47m1 b/Documentation/hwmon/smsc47m1.rst
index 10a24b420686..c54eabd5eb57 100644
--- a/Documentation/hwmon/smsc47m1
+++ b/Documentation/hwmon/smsc47m1.rst
@@ -2,30 +2,53 @@ Kernel driver smsc47m1
======================
Supported chips:
+
* SMSC LPC47B27x, LPC47M112, LPC47M10x, LPC47M13x, LPC47M14x,
+
LPC47M15x and LPC47M192
+
Addresses scanned: none, address read from Super I/O config space
+
Prefix: 'smsc47m1'
+
Datasheets:
- http://www.smsc.com/media/Downloads_Public/Data_Sheets/47b272.pdf
- http://www.smsc.com/media/Downloads_Public/Data_Sheets/47m10x.pdf
- http://www.smsc.com/media/Downloads_Public/Data_Sheets/47m112.pdf
- http://www.smsc.com/
+
+ http://www.smsc.com/media/Downloads_Public/Data_Sheets/47b272.pdf
+
+ http://www.smsc.com/media/Downloads_Public/Data_Sheets/47m10x.pdf
+
+ http://www.smsc.com/media/Downloads_Public/Data_Sheets/47m112.pdf
+
+ http://www.smsc.com/
+
* SMSC LPC47M292
+
Addresses scanned: none, address read from Super I/O config space
+
Prefix: 'smsc47m2'
+
Datasheet: Not public
+
* SMSC LPC47M997
+
Addresses scanned: none, address read from Super I/O config space
+
Prefix: 'smsc47m1'
+
Datasheet: none
+
+
Authors:
- Mark D. Studebaker <mdsxyz123@yahoo.com>,
- With assistance from Bruce Allen <ballen@uwm.edu>, and his
- fan.c program: http://www.lsc-group.phys.uwm.edu/%7Eballen/driver/
- Gabriele Gorla <gorlik@yahoo.com>,
- Jean Delvare <jdelvare@suse.de>
+
+ - Mark D. Studebaker <mdsxyz123@yahoo.com>,
+ - With assistance from Bruce Allen <ballen@uwm.edu>, and his
+ fan.c program:
+
+ - http://www.lsc-group.phys.uwm.edu/%7Eballen/driver/
+
+ - Gabriele Gorla <gorlik@yahoo.com>,
+ - Jean Delvare <jdelvare@suse.de>
Description
-----------
@@ -57,7 +80,7 @@ hardware registers are read whenever any data is read (unless it is less
than 1.5 seconds since the last update). This means that you can easily
miss once-only alarms.
+------------------------------------------------------------------
-**********************
The lm_sensors project gratefully acknowledges the support of
Intel in the development of this driver.
diff --git a/Documentation/hwmon/smsc47m192 b/Documentation/hwmon/smsc47m192
deleted file mode 100644
index 6d54ecb7b3f8..000000000000
--- a/Documentation/hwmon/smsc47m192
+++ /dev/null
@@ -1,103 +0,0 @@
-Kernel driver smsc47m192
-========================
-
-Supported chips:
- * SMSC LPC47M192, LPC47M15x, LPC47M292 and LPC47M997
- Prefix: 'smsc47m192'
- Addresses scanned: I2C 0x2c - 0x2d
- Datasheet: The datasheet for LPC47M192 is publicly available from
- http://www.smsc.com/
- The LPC47M15x, LPC47M292 and LPC47M997 are compatible for
- hardware monitoring.
-
-Author: Hartmut Rick <linux@rick.claranet.de>
- Special thanks to Jean Delvare for careful checking
- of the code and many helpful comments and suggestions.
-
-
-Description
------------
-
-This driver implements support for the hardware sensor capabilities
-of the SMSC LPC47M192 and compatible Super-I/O chips.
-
-These chips support 3 temperature channels and 8 voltage inputs
-as well as CPU voltage VID input.
-
-They do also have fan monitoring and control capabilities, but the
-these features are accessed via ISA bus and are not supported by this
-driver. Use the 'smsc47m1' driver for fan monitoring and control.
-
-Voltages and temperatures are measured by an 8-bit ADC, the resolution
-of the temperatures is 1 bit per degree C.
-Voltages are scaled such that the nominal voltage corresponds to
-192 counts, i.e. 3/4 of the full range. Thus the available range for
-each voltage channel is 0V ... 255/192*(nominal voltage), the resolution
-is 1 bit per (nominal voltage)/192.
-Both voltage and temperature values are scaled by 1000, the sys files
-show voltages in mV and temperatures in units of 0.001 degC.
-
-The +12V analog voltage input channel (in4_input) is multiplexed with
-bit 4 of the encoded CPU voltage. This means that you either get
-a +12V voltage measurement or a 5 bit CPU VID, but not both.
-The default setting is to use the pin as 12V input, and use only 4 bit VID.
-This driver assumes that the information in the configuration register
-is correct, i.e. that the BIOS has updated the configuration if
-the motherboard has this input wired to VID4.
-
-The temperature and voltage readings are updated once every 1.5 seconds.
-Reading them more often repeats the same values.
-
-
-sysfs interface
----------------
-
-in0_input - +2.5V voltage input
-in1_input - CPU voltage input (nominal 2.25V)
-in2_input - +3.3V voltage input
-in3_input - +5V voltage input
-in4_input - +12V voltage input (may be missing if used as VID4)
-in5_input - Vcc voltage input (nominal 3.3V)
- This is the supply voltage of the sensor chip itself.
-in6_input - +1.5V voltage input
-in7_input - +1.8V voltage input
-
-in[0-7]_min,
-in[0-7]_max - lower and upper alarm thresholds for in[0-7]_input reading
-
- All voltages are read and written in mV.
-
-in[0-7]_alarm - alarm flags for voltage inputs
- These files read '1' in case of alarm, '0' otherwise.
-
-temp1_input - chip temperature measured by on-chip diode
-temp[2-3]_input - temperature measured by external diodes (one of these would
- typically be wired to the diode inside the CPU)
-
-temp[1-3]_min,
-temp[1-3]_max - lower and upper alarm thresholds for temperatures
-
-temp[1-3]_offset - temperature offset registers
- The chip adds the offsets stored in these registers to
- the corresponding temperature readings.
- Note that temp1 and temp2 offsets share the same register,
- they cannot both be different from zero at the same time.
- Writing a non-zero number to one of them will reset the other
- offset to zero.
-
- All temperatures and offsets are read and written in
- units of 0.001 degC.
-
-temp[1-3]_alarm - alarm flags for temperature inputs, '1' in case of alarm,
- '0' otherwise.
-temp[2-3]_input_fault - diode fault flags for temperature inputs 2 and 3.
- A fault is detected if the two pins for the corresponding
- sensor are open or shorted, or any of the two is shorted
- to ground or Vcc. '1' indicates a diode fault.
-
-cpu0_vid - CPU voltage as received from the CPU
-
-vrm - CPU VID standard used for decoding CPU voltage
-
- The *_min, *_max, *_offset and vrm files can be read and
- written, all others are read-only.
diff --git a/Documentation/hwmon/smsc47m192.rst b/Documentation/hwmon/smsc47m192.rst
new file mode 100644
index 000000000000..a2e86ab67918
--- /dev/null
+++ b/Documentation/hwmon/smsc47m192.rst
@@ -0,0 +1,116 @@
+Kernel driver smsc47m192
+========================
+
+Supported chips:
+
+ * SMSC LPC47M192, LPC47M15x, LPC47M292 and LPC47M997
+
+ Prefix: 'smsc47m192'
+
+ Addresses scanned: I2C 0x2c - 0x2d
+
+ Datasheet: The datasheet for LPC47M192 is publicly available from
+
+ http://www.smsc.com/
+
+ The LPC47M15x, LPC47M292 and LPC47M997 are compatible for
+
+ hardware monitoring.
+
+
+
+Author:
+ - Hartmut Rick <linux@rick.claranet.de>
+
+ - Special thanks to Jean Delvare for careful checking
+ of the code and many helpful comments and suggestions.
+
+
+Description
+-----------
+
+This driver implements support for the hardware sensor capabilities
+of the SMSC LPC47M192 and compatible Super-I/O chips.
+
+These chips support 3 temperature channels and 8 voltage inputs
+as well as CPU voltage VID input.
+
+They do also have fan monitoring and control capabilities, but the
+these features are accessed via ISA bus and are not supported by this
+driver. Use the 'smsc47m1' driver for fan monitoring and control.
+
+Voltages and temperatures are measured by an 8-bit ADC, the resolution
+of the temperatures is 1 bit per degree C.
+Voltages are scaled such that the nominal voltage corresponds to
+192 counts, i.e. 3/4 of the full range. Thus the available range for
+each voltage channel is 0V ... 255/192*(nominal voltage), the resolution
+is 1 bit per (nominal voltage)/192.
+Both voltage and temperature values are scaled by 1000, the sys files
+show voltages in mV and temperatures in units of 0.001 degC.
+
+The +12V analog voltage input channel (in4_input) is multiplexed with
+bit 4 of the encoded CPU voltage. This means that you either get
+a +12V voltage measurement or a 5 bit CPU VID, but not both.
+The default setting is to use the pin as 12V input, and use only 4 bit VID.
+This driver assumes that the information in the configuration register
+is correct, i.e. that the BIOS has updated the configuration if
+the motherboard has this input wired to VID4.
+
+The temperature and voltage readings are updated once every 1.5 seconds.
+Reading them more often repeats the same values.
+
+
+sysfs interface
+---------------
+
+===================== ==========================================================
+in0_input +2.5V voltage input
+in1_input CPU voltage input (nominal 2.25V)
+in2_input +3.3V voltage input
+in3_input +5V voltage input
+in4_input +12V voltage input (may be missing if used as VID4)
+in5_input Vcc voltage input (nominal 3.3V)
+ This is the supply voltage of the sensor chip itself.
+in6_input +1.5V voltage input
+in7_input +1.8V voltage input
+
+in[0-7]_min,
+in[0-7]_max lower and upper alarm thresholds for in[0-7]_input reading
+
+ All voltages are read and written in mV.
+
+in[0-7]_alarm alarm flags for voltage inputs
+ These files read '1' in case of alarm, '0' otherwise.
+
+temp1_input chip temperature measured by on-chip diode
+temp[2-3]_input temperature measured by external diodes (one of these
+ would typically be wired to the diode inside the CPU)
+
+temp[1-3]_min,
+temp[1-3]_max lower and upper alarm thresholds for temperatures
+
+temp[1-3]_offset temperature offset registers
+ The chip adds the offsets stored in these registers to
+ the corresponding temperature readings.
+ Note that temp1 and temp2 offsets share the same register,
+ they cannot both be different from zero at the same time.
+ Writing a non-zero number to one of them will reset the other
+ offset to zero.
+
+ All temperatures and offsets are read and written in
+ units of 0.001 degC.
+
+temp[1-3]_alarm alarm flags for temperature inputs, '1' in case of alarm,
+ '0' otherwise.
+temp[2-3]_input_fault diode fault flags for temperature inputs 2 and 3.
+ A fault is detected if the two pins for the corresponding
+ sensor are open or shorted, or any of the two is shorted
+ to ground or Vcc. '1' indicates a diode fault.
+
+cpu0_vid CPU voltage as received from the CPU
+
+vrm CPU VID standard used for decoding CPU voltage
+===================== ==========================================================
+
+The `*_min`, `*_max`, `*_offset` and `vrm` files can be read and written,
+all others are read-only.
diff --git a/Documentation/hwmon/submitting-patches b/Documentation/hwmon/submitting-patches.rst
index f88221b46153..f9796b9d9db6 100644
--- a/Documentation/hwmon/submitting-patches
+++ b/Documentation/hwmon/submitting-patches.rst
@@ -1,5 +1,5 @@
- How to Get Your Patch Accepted Into the Hwmon Subsystem
- -------------------------------------------------------
+How to Get Your Patch Accepted Into the Hwmon Subsystem
+=======================================================
This text is a collection of suggestions for people writing patches or
drivers for the hwmon subsystem. Following these suggestions will greatly
@@ -9,11 +9,12 @@ increase the chances of your change being accepted.
1. General
----------
-* It should be unnecessary to mention, but please read and follow
- Documentation/process/submit-checklist.rst
- Documentation/process/submitting-drivers.rst
- Documentation/process/submitting-patches.rst
- Documentation/process/coding-style.rst
+* It should be unnecessary to mention, but please read and follow:
+
+ - Documentation/process/submit-checklist.rst
+ - Documentation/process/submitting-drivers.rst
+ - Documentation/process/submitting-patches.rst
+ - Documentation/process/coding-style.rst
* Please run your patch through 'checkpatch --strict'. There should be no
errors, no warnings, and few if any check messages. If there are any
@@ -38,7 +39,7 @@ increase the chances of your change being accepted.
2. Adding functionality to existing drivers
-------------------------------------------
-* Make sure the documentation in Documentation/hwmon/<driver_name> is up to
+* Make sure the documentation in Documentation/hwmon/<driver_name>.rst is up to
date.
* Make sure the information in Kconfig is up to date.
@@ -60,7 +61,7 @@ increase the chances of your change being accepted.
* Consider adding yourself to MAINTAINERS.
-* Document the driver in Documentation/hwmon/<driver_name>.
+* Document the driver in Documentation/hwmon/<driver_name>.rst.
* Add the driver to Kconfig and Makefile in alphabetical order.
@@ -133,7 +134,7 @@ increase the chances of your change being accepted.
non-standard attributes, or you believe you do, discuss it on the mailing list
first. Either case, provide a detailed explanation why you need the
non-standard attribute(s).
- Standard attributes are specified in Documentation/hwmon/sysfs-interface.
+ Standard attributes are specified in Documentation/hwmon/sysfs-interface.rst.
* When deciding which sysfs attributes to support, look at the chip's
capabilities. While we do not expect your driver to support everything the
diff --git a/Documentation/hwmon/sysfs-interface b/Documentation/hwmon/sysfs-interface.rst
index 2b9e1005d88b..fd590633bb14 100644
--- a/Documentation/hwmon/sysfs-interface
+++ b/Documentation/hwmon/sysfs-interface.rst
@@ -1,5 +1,5 @@
Naming and data format standards for sysfs files
-------------------------------------------------
+================================================
The libsensors library offers an interface to the raw sensors data
through the sysfs interface. Since lm-sensors 3.0.0, libsensors is
@@ -32,7 +32,7 @@ this reason, it is still not recommended to bypass the library.
Each chip gets its own directory in the sysfs /sys/devices tree. To
find all sensor chips, it is easier to follow the device symlinks from
-/sys/class/hwmon/hwmon*.
+`/sys/class/hwmon/hwmon*`.
Up to lm-sensors 3.0.0, libsensors looks for hardware monitoring attributes
in the "physical" device directory. Since lm-sensors 3.0.1, attributes found
@@ -67,11 +67,13 @@ are interpreted as 0! For more on how written strings are interpreted see the
-------------------------------------------------------------------------
-[0-*] denotes any positive number starting from 0
-[1-*] denotes any positive number starting from 1
+======= ===========================================
+`[0-*]` denotes any positive number starting from 0
+`[1-*]` denotes any positive number starting from 1
RO read only value
WO write only value
RW read/write value
+======= ===========================================
Read/write values may be read-only for some chips, depending on the
hardware implementation.
@@ -80,57 +82,82 @@ All entries (except name) are optional, and should only be created in a
given driver if the chip has the feature.
-*********************
-* Global attributes *
-*********************
+*****************
+Global attributes
+*****************
-name The chip name.
+`name`
+ The chip name.
This should be a short, lowercase string, not containing
whitespace, dashes, or the wildcard character '*'.
This attribute represents the chip name. It is the only
mandatory attribute.
I2C devices get this attribute created automatically.
+
RO
-update_interval The interval at which the chip will update readings.
+`update_interval`
+ The interval at which the chip will update readings.
Unit: millisecond
+
RW
+
Some devices have a variable update rate or interval.
This attribute can be used to change it to the desired value.
-************
-* Voltages *
-************
+********
+Voltages
+********
+
+`in[0-*]_min`
+ Voltage min value.
-in[0-*]_min Voltage min value.
Unit: millivolt
+
RW
-
-in[0-*]_lcrit Voltage critical min value.
+
+`in[0-*]_lcrit`
+ Voltage critical min value.
+
Unit: millivolt
+
RW
+
If voltage drops to or below this limit, the system may
take drastic action such as power down or reset. At the very
least, it should report a fault.
-in[0-*]_max Voltage max value.
+`in[0-*]_max`
+ Voltage max value.
+
Unit: millivolt
+
RW
-
-in[0-*]_crit Voltage critical max value.
+
+`in[0-*]_crit`
+ Voltage critical max value.
+
Unit: millivolt
+
RW
+
If voltage reaches or exceeds this limit, the system may
take drastic action such as power down or reset. At the very
least, it should report a fault.
-in[0-*]_input Voltage input value.
+`in[0-*]_input`
+ Voltage input value.
+
Unit: millivolt
+
RO
+
Voltage measured on the chip pin.
+
Actual voltage depends on the scaling resistors on the
motherboard, as recommended in the chip datasheet.
+
This varies by chip and by motherboard.
Because of this variation, values are generally NOT scaled
by the chip driver, and must be done by the application.
@@ -140,166 +167,232 @@ in[0-*]_input Voltage input value.
thumb: drivers should report the voltage values at the
"pins" of the chip.
-in[0-*]_average
+`in[0-*]_average`
Average voltage
+
Unit: millivolt
+
RO
-in[0-*]_lowest
+`in[0-*]_lowest`
Historical minimum voltage
+
Unit: millivolt
+
RO
-in[0-*]_highest
+`in[0-*]_highest`
Historical maximum voltage
+
Unit: millivolt
+
RO
-in[0-*]_reset_history
+`in[0-*]_reset_history`
Reset inX_lowest and inX_highest
+
WO
-in_reset_history
+`in_reset_history`
Reset inX_lowest and inX_highest for all sensors
+
WO
-in[0-*]_label Suggested voltage channel label.
+`in[0-*]_label`
+ Suggested voltage channel label.
+
Text string
+
Should only be created if the driver has hints about what
this voltage channel is being used for, and user-space
doesn't. In all other cases, the label is provided by
user-space.
+
RO
-in[0-*]_enable
+`in[0-*]_enable`
Enable or disable the sensors.
+
When disabled the sensor read will return -ENODATA.
- 1: Enable
- 0: Disable
+
+ - 1: Enable
+ - 0: Disable
+
RW
-cpu[0-*]_vid CPU core reference voltage.
+`cpu[0-*]_vid`
+ CPU core reference voltage.
+
Unit: millivolt
+
RO
+
Not always correct.
-vrm Voltage Regulator Module version number.
+`vrm`
+ Voltage Regulator Module version number.
+
RW (but changing it should no more be necessary)
+
Originally the VRM standard version multiplied by 10, but now
an arbitrary number, as not all standards have a version
number.
+
Affects the way the driver calculates the CPU core reference
voltage from the vid pins.
Also see the Alarms section for status flags associated with voltages.
-********
-* Fans *
-********
+****
+Fans
+****
+
+`fan[1-*]_min`
+ Fan minimum value
-fan[1-*]_min Fan minimum value
Unit: revolution/min (RPM)
+
RW
-fan[1-*]_max Fan maximum value
+`fan[1-*]_max`
+ Fan maximum value
+
Unit: revolution/min (RPM)
+
Only rarely supported by the hardware.
RW
-fan[1-*]_input Fan input value.
+`fan[1-*]_input`
+ Fan input value.
+
Unit: revolution/min (RPM)
+
RO
-fan[1-*]_div Fan divisor.
+`fan[1-*]_div`
+ Fan divisor.
+
Integer value in powers of two (1, 2, 4, 8, 16, 32, 64, 128).
+
RW
+
Some chips only support values 1, 2, 4 and 8.
Note that this is actually an internal clock divisor, which
affects the measurable speed range, not the read value.
-fan[1-*]_pulses Number of tachometer pulses per fan revolution.
+`fan[1-*]_pulses`
+ Number of tachometer pulses per fan revolution.
+
Integer value, typically between 1 and 4.
+
RW
+
This value is a characteristic of the fan connected to the
device's input, so it has to be set in accordance with the fan
model.
+
Should only be created if the chip has a register to configure
the number of pulses. In the absence of such a register (and
thus attribute) the value assumed by all devices is 2 pulses
per fan revolution.
-fan[1-*]_target
+`fan[1-*]_target`
Desired fan speed
+
Unit: revolution/min (RPM)
+
RW
+
Only makes sense if the chip supports closed-loop fan speed
control based on the measured fan speed.
-fan[1-*]_label Suggested fan channel label.
+`fan[1-*]_label`
+ Suggested fan channel label.
+
Text string
+
Should only be created if the driver has hints about what
this fan channel is being used for, and user-space doesn't.
In all other cases, the label is provided by user-space.
+
RO
-fan[1-*]_enable
+`fan[1-*]_enable`
Enable or disable the sensors.
+
When disabled the sensor read will return -ENODATA.
- 1: Enable
- 0: Disable
+
+ - 1: Enable
+ - 0: Disable
+
RW
Also see the Alarms section for status flags associated with fans.
-*******
-* PWM *
-*******
+***
+PWM
+***
+
+`pwm[1-*]`
+ Pulse width modulation fan control.
-pwm[1-*] Pulse width modulation fan control.
Integer value in the range 0 to 255
+
RW
+
255 is max or 100%.
-pwm[1-*]_enable
+`pwm[1-*]_enable`
Fan speed control method:
- 0: no fan speed control (i.e. fan at full speed)
- 1: manual fan speed control enabled (using pwm[1-*])
- 2+: automatic fan speed control enabled
+
+ - 0: no fan speed control (i.e. fan at full speed)
+ - 1: manual fan speed control enabled (using `pwm[1-*]`)
+ - 2+: automatic fan speed control enabled
+
Check individual chip documentation files for automatic mode
details.
+
RW
-pwm[1-*]_mode 0: DC mode (direct current)
- 1: PWM mode (pulse-width modulation)
+`pwm[1-*]_mode`
+ - 0: DC mode (direct current)
+ - 1: PWM mode (pulse-width modulation)
+
RW
-pwm[1-*]_freq Base PWM frequency in Hz.
+`pwm[1-*]_freq`
+ Base PWM frequency in Hz.
+
Only possibly available when pwmN_mode is PWM, but not always
present even then.
+
RW
-pwm[1-*]_auto_channels_temp
+`pwm[1-*]_auto_channels_temp`
Select which temperature channels affect this PWM output in
- auto mode. Bitfield, 1 is temp1, 2 is temp2, 4 is temp3 etc...
+ auto mode.
+
+ Bitfield, 1 is temp1, 2 is temp2, 4 is temp3 etc...
Which values are possible depend on the chip used.
+
RW
-pwm[1-*]_auto_point[1-*]_pwm
-pwm[1-*]_auto_point[1-*]_temp
-pwm[1-*]_auto_point[1-*]_temp_hyst
- Define the PWM vs temperature curve. Number of trip points is
- chip-dependent. Use this for chips which associate trip points
- to PWM output channels.
+`pwm[1-*]_auto_point[1-*]_pwm` / `pwm[1-*]_auto_point[1-*]_temp` / `pwm[1-*]_auto_point[1-*]_temp_hyst`
+ Define the PWM vs temperature curve.
+
+ Number of trip points is chip-dependent. Use this for chips
+ which associate trip points to PWM output channels.
+
RW
-temp[1-*]_auto_point[1-*]_pwm
-temp[1-*]_auto_point[1-*]_temp
-temp[1-*]_auto_point[1-*]_temp_hyst
- Define the PWM vs temperature curve. Number of trip points is
- chip-dependent. Use this for chips which associate trip points
- to temperature channels.
+`temp[1-*]_auto_point[1-*]_pwm` / `temp[1-*]_auto_point[1-*]_temp` / `temp[1-*]_auto_point[1-*]_temp_hyst`
+ Define the PWM vs temperature curve.
+
+ Number of trip points is chip-dependent. Use this for chips
+ which associate trip points to temperature channels.
+
RW
There is a third case where trip points are associated to both PWM output
@@ -312,122 +405,173 @@ The actual result is up to the chip, but in general the highest candidate
value (fastest fan speed) wins.
-****************
-* Temperatures *
-****************
+************
+Temperatures
+************
+
+`temp[1-*]_type`
+ Sensor type selection.
-temp[1-*]_type Sensor type selection.
Integers 1 to 6
+
RW
- 1: CPU embedded diode
- 2: 3904 transistor
- 3: thermal diode
- 4: thermistor
- 5: AMD AMDSI
- 6: Intel PECI
+
+ - 1: CPU embedded diode
+ - 2: 3904 transistor
+ - 3: thermal diode
+ - 4: thermistor
+ - 5: AMD AMDSI
+ - 6: Intel PECI
+
Not all types are supported by all chips
-temp[1-*]_max Temperature max value.
+`temp[1-*]_max`
+ Temperature max value.
+
Unit: millidegree Celsius (or millivolt, see below)
+
RW
-temp[1-*]_min Temperature min value.
+`temp[1-*]_min`
+ Temperature min value.
+
Unit: millidegree Celsius
+
RW
-temp[1-*]_max_hyst
+`temp[1-*]_max_hyst`
Temperature hysteresis value for max limit.
+
Unit: millidegree Celsius
+
Must be reported as an absolute temperature, NOT a delta
from the max value.
+
RW
-temp[1-*]_min_hyst
+`temp[1-*]_min_hyst`
Temperature hysteresis value for min limit.
Unit: millidegree Celsius
+
Must be reported as an absolute temperature, NOT a delta
from the min value.
+
RW
-temp[1-*]_input Temperature input value.
+`temp[1-*]_input`
+ Temperature input value.
+
Unit: millidegree Celsius
+
RO
-temp[1-*]_crit Temperature critical max value, typically greater than
+`temp[1-*]_crit`
+ Temperature critical max value, typically greater than
corresponding temp_max values.
+
Unit: millidegree Celsius
+
RW
-temp[1-*]_crit_hyst
+`temp[1-*]_crit_hyst`
Temperature hysteresis value for critical limit.
+
Unit: millidegree Celsius
+
Must be reported as an absolute temperature, NOT a delta
from the critical value.
+
RW
-temp[1-*]_emergency
+`temp[1-*]_emergency`
Temperature emergency max value, for chips supporting more than
two upper temperature limits. Must be equal or greater than
corresponding temp_crit values.
+
Unit: millidegree Celsius
+
RW
-temp[1-*]_emergency_hyst
+`temp[1-*]_emergency_hyst`
Temperature hysteresis value for emergency limit.
+
Unit: millidegree Celsius
+
Must be reported as an absolute temperature, NOT a delta
from the emergency value.
+
RW
-temp[1-*]_lcrit Temperature critical min value, typically lower than
+`temp[1-*]_lcrit`
+ Temperature critical min value, typically lower than
corresponding temp_min values.
+
Unit: millidegree Celsius
+
RW
-temp[1-*]_lcrit_hyst
+`temp[1-*]_lcrit_hyst`
Temperature hysteresis value for critical min limit.
+
Unit: millidegree Celsius
+
Must be reported as an absolute temperature, NOT a delta
from the critical min value.
+
RW
-temp[1-*]_offset
+`temp[1-*]_offset`
Temperature offset which is added to the temperature reading
by the chip.
+
Unit: millidegree Celsius
+
Read/Write value.
-temp[1-*]_label Suggested temperature channel label.
+`temp[1-*]_label`
+ Suggested temperature channel label.
+
Text string
+
Should only be created if the driver has hints about what
this temperature channel is being used for, and user-space
doesn't. In all other cases, the label is provided by
user-space.
+
RO
-temp[1-*]_lowest
+`temp[1-*]_lowest`
Historical minimum temperature
+
Unit: millidegree Celsius
+
RO
-temp[1-*]_highest
+`temp[1-*]_highest`
Historical maximum temperature
+
Unit: millidegree Celsius
+
RO
-temp[1-*]_reset_history
+`temp[1-*]_reset_history`
Reset temp_lowest and temp_highest
+
WO
-temp_reset_history
+`temp_reset_history`
Reset temp_lowest and temp_highest for all sensors
+
WO
-temp[1-*]_enable
+`temp[1-*]_enable`
Enable or disable the sensors.
+
When disabled the sensor read will return -ENODATA.
- 1: Enable
- 0: Disable
+
+ - 1: Enable
+ - 0: Disable
+
RW
Some chips measure temperature using external thermistors and an ADC, and
@@ -442,201 +586,300 @@ channels by the driver.
Also see the Alarms section for status flags associated with temperatures.
-************
-* Currents *
-************
+********
+Currents
+********
+
+`curr[1-*]_max`
+ Current max value
-curr[1-*]_max Current max value
Unit: milliampere
+
RW
-curr[1-*]_min Current min value.
+`curr[1-*]_min`
+ Current min value.
+
Unit: milliampere
+
RW
-curr[1-*]_lcrit Current critical low value
+`curr[1-*]_lcrit`
+ Current critical low value
+
Unit: milliampere
+
RW
-curr[1-*]_crit Current critical high value.
+`curr[1-*]_crit`
+ Current critical high value.
+
Unit: milliampere
+
RW
-curr[1-*]_input Current input value
+`curr[1-*]_input`
+ Current input value
+
Unit: milliampere
+
RO
-curr[1-*]_average
+`curr[1-*]_average`
Average current use
+
Unit: milliampere
+
RO
-curr[1-*]_lowest
+`curr[1-*]_lowest`
Historical minimum current
+
Unit: milliampere
+
RO
-curr[1-*]_highest
+`curr[1-*]_highest`
Historical maximum current
Unit: milliampere
RO
-curr[1-*]_reset_history
+`curr[1-*]_reset_history`
Reset currX_lowest and currX_highest
+
WO
-curr_reset_history
+`curr_reset_history`
Reset currX_lowest and currX_highest for all sensors
+
WO
-curr[1-*]_enable
+`curr[1-*]_enable`
Enable or disable the sensors.
+
When disabled the sensor read will return -ENODATA.
- 1: Enable
- 0: Disable
+
+ - 1: Enable
+ - 0: Disable
+
RW
Also see the Alarms section for status flags associated with currents.
-*********
-* Power *
-*********
+*****
+Power
+*****
+
+`power[1-*]_average`
+ Average power use
-power[1-*]_average Average power use
Unit: microWatt
+
RO
-power[1-*]_average_interval Power use averaging interval. A poll
+`power[1-*]_average_interval`
+ Power use averaging interval. A poll
notification is sent to this file if the
hardware changes the averaging interval.
+
Unit: milliseconds
+
RW
-power[1-*]_average_interval_max Maximum power use averaging interval
+`power[1-*]_average_interval_max`
+ Maximum power use averaging interval
+
Unit: milliseconds
+
RO
-power[1-*]_average_interval_min Minimum power use averaging interval
+`power[1-*]_average_interval_min`
+ Minimum power use averaging interval
+
Unit: milliseconds
+
RO
-power[1-*]_average_highest Historical average maximum power use
+`power[1-*]_average_highest`
+ Historical average maximum power use
+
Unit: microWatt
+
RO
-power[1-*]_average_lowest Historical average minimum power use
+`power[1-*]_average_lowest`
+ Historical average minimum power use
+
Unit: microWatt
+
RO
-power[1-*]_average_max A poll notification is sent to
- power[1-*]_average when power use
+`power[1-*]_average_max`
+ A poll notification is sent to
+ `power[1-*]_average` when power use
rises above this value.
+
Unit: microWatt
+
RW
-power[1-*]_average_min A poll notification is sent to
- power[1-*]_average when power use
+`power[1-*]_average_min`
+ A poll notification is sent to
+ `power[1-*]_average` when power use
sinks below this value.
+
Unit: microWatt
+
RW
-power[1-*]_input Instantaneous power use
+`power[1-*]_input`
+ Instantaneous power use
+
Unit: microWatt
+
RO
-power[1-*]_input_highest Historical maximum power use
+`power[1-*]_input_highest`
+ Historical maximum power use
+
Unit: microWatt
+
RO
-power[1-*]_input_lowest Historical minimum power use
+`power[1-*]_input_lowest`
+ Historical minimum power use
+
Unit: microWatt
+
RO
-power[1-*]_reset_history Reset input_highest, input_lowest,
+`power[1-*]_reset_history`
+ Reset input_highest, input_lowest,
average_highest and average_lowest.
+
WO
-power[1-*]_accuracy Accuracy of the power meter.
+`power[1-*]_accuracy`
+ Accuracy of the power meter.
+
Unit: Percent
+
RO
-power[1-*]_cap If power use rises above this limit, the
+`power[1-*]_cap`
+ If power use rises above this limit, the
system should take action to reduce power use.
A poll notification is sent to this file if the
- cap is changed by the hardware. The *_cap
+ cap is changed by the hardware. The `*_cap`
files only appear if the cap is known to be
enforced by hardware.
+
Unit: microWatt
+
RW
-power[1-*]_cap_hyst Margin of hysteresis built around capping and
+`power[1-*]_cap_hyst`
+ Margin of hysteresis built around capping and
notification.
+
Unit: microWatt
+
RW
-power[1-*]_cap_max Maximum cap that can be set.
+`power[1-*]_cap_max`
+ Maximum cap that can be set.
+
Unit: microWatt
+
RO
-power[1-*]_cap_min Minimum cap that can be set.
+`power[1-*]_cap_min`
+ Minimum cap that can be set.
+
Unit: microWatt
+
RO
-power[1-*]_max Maximum power.
+`power[1-*]_max`
+ Maximum power.
+
Unit: microWatt
+
RW
-power[1-*]_crit Critical maximum power.
+`power[1-*]_crit`
+ Critical maximum power.
+
If power rises to or above this limit, the
system is expected take drastic action to reduce
power consumption, such as a system shutdown or
a forced powerdown of some devices.
+
Unit: microWatt
+
RW
-power[1-*]_enable Enable or disable the sensors.
+`power[1-*]_enable`
+ Enable or disable the sensors.
+
When disabled the sensor read will return
-ENODATA.
- 1: Enable
- 0: Disable
+
+ - 1: Enable
+ - 0: Disable
+
RW
Also see the Alarms section for status flags associated with power readings.
-**********
-* Energy *
-**********
+******
+Energy
+******
+
+`energy[1-*]_input`
+ Cumulative energy use
-energy[1-*]_input Cumulative energy use
Unit: microJoule
+
RO
-energy[1-*]_enable Enable or disable the sensors.
+`energy[1-*]_enable`
+ Enable or disable the sensors.
+
When disabled the sensor read will return
-ENODATA.
- 1: Enable
- 0: Disable
+
+ - 1: Enable
+ - 0: Disable
+
RW
-************
-* Humidity *
-************
+********
+Humidity
+********
+
+`humidity[1-*]_input`
+ Humidity
-humidity[1-*]_input Humidity
Unit: milli-percent (per cent mille, pcm)
+
RO
-humidity[1-*]_enable Enable or disable the sensors
+`humidity[1-*]_enable`
+ Enable or disable the sensors
+
When disabled the sensor read will return
-ENODATA.
- 1: Enable
- 0: Disable
+
+ - 1: Enable
+ - 0: Disable
+
RW
-**********
-* Alarms *
-**********
+******
+Alarms
+******
Each channel or limit may have an associated alarm file, containing a
boolean value. 1 means than an alarm condition exists, 0 means no alarm.
@@ -645,67 +888,67 @@ Usually a given chip will either use channel-related alarms, or
limit-related alarms, not both. The driver should just reflect the hardware
implementation.
-in[0-*]_alarm
-curr[1-*]_alarm
-power[1-*]_alarm
-fan[1-*]_alarm
-temp[1-*]_alarm
- Channel alarm
- 0: no alarm
- 1: alarm
- RO
-
-OR
-
-in[0-*]_min_alarm
-in[0-*]_max_alarm
-in[0-*]_lcrit_alarm
-in[0-*]_crit_alarm
-curr[1-*]_min_alarm
-curr[1-*]_max_alarm
-curr[1-*]_lcrit_alarm
-curr[1-*]_crit_alarm
-power[1-*]_cap_alarm
-power[1-*]_max_alarm
-power[1-*]_crit_alarm
-fan[1-*]_min_alarm
-fan[1-*]_max_alarm
-temp[1-*]_min_alarm
-temp[1-*]_max_alarm
-temp[1-*]_lcrit_alarm
-temp[1-*]_crit_alarm
-temp[1-*]_emergency_alarm
- Limit alarm
- 0: no alarm
- 1: alarm
- RO
++-------------------------------+-----------------------+
+| **`in[0-*]_alarm`, | Channel alarm |
+| `curr[1-*]_alarm`, | |
+| `power[1-*]_alarm`, | - 0: no alarm |
+| `fan[1-*]_alarm`, | - 1: alarm |
+| `temp[1-*]_alarm`** | |
+| | RO |
++-------------------------------+-----------------------+
+
+**OR**
+
++-------------------------------+-----------------------+
+| **`in[0-*]_min_alarm`, | Limit alarm |
+| `in[0-*]_max_alarm`, | |
+| `in[0-*]_lcrit_alarm`, | - 0: no alarm |
+| `in[0-*]_crit_alarm`, | - 1: alarm |
+| `curr[1-*]_min_alarm`, | |
+| `curr[1-*]_max_alarm`, | RO |
+| `curr[1-*]_lcrit_alarm`, | |
+| `curr[1-*]_crit_alarm`, | |
+| `power[1-*]_cap_alarm`, | |
+| `power[1-*]_max_alarm`, | |
+| `power[1-*]_crit_alarm`, | |
+| `fan[1-*]_min_alarm`, | |
+| `fan[1-*]_max_alarm`, | |
+| `temp[1-*]_min_alarm`, | |
+| `temp[1-*]_max_alarm`, | |
+| `temp[1-*]_lcrit_alarm`, | |
+| `temp[1-*]_crit_alarm`, | |
+| `temp[1-*]_emergency_alarm`** | |
++-------------------------------+-----------------------+
Each input channel may have an associated fault file. This can be used
to notify open diodes, unconnected fans etc. where the hardware
supports it. When this boolean has value 1, the measurement for that
channel should not be trusted.
-fan[1-*]_fault
-temp[1-*]_fault
+`fan[1-*]_fault` / `temp[1-*]_fault`
Input fault condition
- 0: no fault occurred
- 1: fault condition
+
+ - 0: no fault occurred
+ - 1: fault condition
+
RO
Some chips also offer the possibility to get beeped when an alarm occurs:
-beep_enable Master beep enable
- 0: no beeps
- 1: beeps
+`beep_enable`
+ Master beep enable
+
+ - 0: no beeps
+ - 1: beeps
+
RW
-in[0-*]_beep
-curr[1-*]_beep
-fan[1-*]_beep
-temp[1-*]_beep
+`in[0-*]_beep`, `curr[1-*]_beep`, `fan[1-*]_beep`, `temp[1-*]_beep`,
Channel beep
- 0: disable
- 1: enable
+
+ - 0: disable
+ - 1: enable
+
RW
In theory, a chip could provide per-limit beep masking, but no such chip
@@ -715,56 +958,90 @@ Old drivers provided a different, non-standard interface to alarms and
beeps. These interface files are deprecated, but will be kept around
for compatibility reasons:
-alarms Alarm bitmask.
+`alarms`
+ Alarm bitmask.
+
RO
+
Integer representation of one to four bytes.
+
A '1' bit means an alarm.
+
Chips should be programmed for 'comparator' mode so that
the alarm will 'come back' after you read the register
if it is still valid.
+
Generally a direct representation of a chip's internal
alarm registers; there is no standard for the position
of individual bits. For this reason, the use of this
interface file for new drivers is discouraged. Use
- individual *_alarm and *_fault files instead.
+ `individual *_alarm` and `*_fault` files instead.
Bits are defined in kernel/include/sensors.h.
-beep_mask Bitmask for beep.
+`beep_mask`
+ Bitmask for beep.
Same format as 'alarms' with the same bit locations,
use discouraged for the same reason. Use individual
- *_beep files instead.
+ `*_beep` files instead.
RW
-***********************
-* Intrusion detection *
-***********************
+*******************
+Intrusion detection
+*******************
-intrusion[0-*]_alarm
+`intrusion[0-*]_alarm`
Chassis intrusion detection
- 0: OK
- 1: intrusion detected
+
+ - 0: OK
+ - 1: intrusion detected
+
RW
+
Contrary to regular alarm flags which clear themselves
automatically when read, this one sticks until cleared by
the user. This is done by writing 0 to the file. Writing
other values is unsupported.
-intrusion[0-*]_beep
+`intrusion[0-*]_beep`
Chassis intrusion beep
+
0: disable
1: enable
+
RW
+****************************
+Average sample configuration
+****************************
+
+Devices allowing for reading {in,power,curr,temp}_average values may export
+attributes for controlling number of samples used to compute average.
+
++--------------+---------------------------------------------------------------+
+| samples | Sets number of average samples for all types of measurements. |
+| | |
+| | RW |
++--------------+---------------------------------------------------------------+
+| in_samples | Sets number of average samples for specific type of |
+| power_samples| measurements. |
+| curr_samples | |
+| temp_samples | Note that on some devices it won't be possible to set all of |
+| | them to different values so changing one might also change |
+| | some others. |
+| | |
+| | RW |
++--------------+---------------------------------------------------------------+
sysfs attribute writes interpretation
-------------------------------------
hwmon sysfs attributes always contain numbers, so the first thing to do is to
convert the input to a number, there are 2 ways todo this depending whether
-the number can be negative or not:
-unsigned long u = simple_strtoul(buf, NULL, 10);
-long s = simple_strtol(buf, NULL, 10);
+the number can be negative or not::
+
+ unsigned long u = simple_strtoul(buf, NULL, 10);
+ long s = simple_strtol(buf, NULL, 10);
With buf being the buffer with the user input being passed by the kernel.
Notice that we do not use the second argument of strto[u]l, and thus cannot
@@ -789,13 +1066,13 @@ limits using clamp_val(value, min_limit, max_limit). If it is not continuous
like for example a tempX_type, then when an invalid value is written,
-EINVAL should be returned.
-Example1, temp1_max, register is a signed 8 bit value (-128 - 127 degrees):
+Example1, temp1_max, register is a signed 8 bit value (-128 - 127 degrees)::
long v = simple_strtol(buf, NULL, 10) / 1000;
v = clamp_val(v, -128, 127);
/* write v to register */
-Example2, fan divider setting, valid values 2, 4 and 8:
+Example2, fan divider setting, valid values 2, 4 and 8::
unsigned long v = simple_strtoul(buf, NULL, 10);
diff --git a/Documentation/hwmon/tc654 b/Documentation/hwmon/tc654.rst
index 47636a8077b4..ce546ee6dfed 100644
--- a/Documentation/hwmon/tc654
+++ b/Documentation/hwmon/tc654.rst
@@ -2,13 +2,16 @@ Kernel driver tc654
===================
Supported chips:
+
* Microchip TC654 and TC655
+
Prefix: 'tc654'
- Datasheet: http://ww1.microchip.com/downloads/en/DeviceDoc/20001734C.pdf
+ Datasheet: http://ww1.m
+ icrochip.com/downloads/en/DeviceDoc/20001734C.pdf
Authors:
- Chris Packham <chris.packham@alliedtelesis.co.nz>
- Masahiko Iwamoto <iwamoto@allied-telesis.co.jp>
+ - Chris Packham <chris.packham@alliedtelesis.co.nz>
+ - Masahiko Iwamoto <iwamoto@allied-telesis.co.jp>
Description
-----------
diff --git a/Documentation/hwmon/tc74 b/Documentation/hwmon/tc74.rst
index 43027aad5f8e..f1764211c129 100644
--- a/Documentation/hwmon/tc74
+++ b/Documentation/hwmon/tc74.rst
@@ -2,8 +2,11 @@ Kernel driver tc74
====================
Supported chips:
+
* Microchip TC74
+
Prefix: 'tc74'
+
Datasheet: Publicly available at Microchip website.
Description
diff --git a/Documentation/hwmon/thmc50 b/Documentation/hwmon/thmc50.rst
index 8a7772ade8d0..cfff3885287d 100644
--- a/Documentation/hwmon/thmc50
+++ b/Documentation/hwmon/thmc50.rst
@@ -2,30 +2,41 @@ Kernel driver thmc50
=====================
Supported chips:
+
* Analog Devices ADM1022
+
Prefix: 'adm1022'
+
Addresses scanned: I2C 0x2c - 0x2e
+
Datasheet: http://www.analog.com/en/prod/0,2877,ADM1022,00.html
+
* Texas Instruments THMC50
+
Prefix: 'thmc50'
+
Addresses scanned: I2C 0x2c - 0x2e
- Datasheet: http://www.ti.com/
+
+ Datasheet: http://www.ti.com/
+
Author: Krzysztof Helt <krzysztof.h1@wp.pl>
This driver was derived from the 2.4 kernel thmc50.c source file.
Credits:
+
thmc50.c (2.4 kernel):
- Frodo Looijaard <frodol@dds.nl>
- Philip Edelbrock <phil@netroedge.com>
+
+ - Frodo Looijaard <frodol@dds.nl>
+ - Philip Edelbrock <phil@netroedge.com>
Module Parameters
-----------------
* adm1022_temp3: short array
- List of adapter,address pairs to force chips into ADM1022 mode with
- second remote temperature. This does not work for original THMC50 chips.
+ List of adapter,address pairs to force chips into ADM1022 mode with
+ second remote temperature. This does not work for original THMC50 chips.
Description
-----------
@@ -59,16 +70,20 @@ Driver Features
The driver provides up to three temperatures:
-temp1 -- internal
-temp2 -- remote
-temp3 -- 2nd remote only for ADM1022
+temp1
+ - internal
+temp2
+ - remote
+temp3
+ - 2nd remote only for ADM1022
-pwm1 -- fan speed (0 = stop, 255 = full)
-pwm1_mode -- always 0 (DC mode)
+pwm1
+ - fan speed (0 = stop, 255 = full)
+pwm1_mode
+ - always 0 (DC mode)
The value of 0 for pwm1 also forces FAN_OFF signal from the chip,
so it stops fans even if the value 0 into the ANALOG_OUT register does not.
The driver was tested on Compaq AP550 with two ADM1022 chips (one works
in the temp3 mode), five temperature readings and two fans.
-
diff --git a/Documentation/hwmon/tmp102 b/Documentation/hwmon/tmp102.rst
index 8454a7763122..b1f585531a88 100644
--- a/Documentation/hwmon/tmp102
+++ b/Documentation/hwmon/tmp102.rst
@@ -2,12 +2,17 @@ Kernel driver tmp102
====================
Supported chips:
+
* Texas Instruments TMP102
+
Prefix: 'tmp102'
+
Addresses scanned: none
+
Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp102.html
Author:
+
Steven King <sfking@fdwdc.com>
Description
@@ -23,4 +28,4 @@ The TMP102 has a programmable update rate that can select between 8, 4, 1, and
0.5 Hz. (Currently the driver only supports the default of 4 Hz).
The driver provides the common sysfs-interface for temperatures (see
-Documentation/hwmon/sysfs-interface under Temperatures).
+Documentation/hwmon/sysfs-interface.rst under Temperatures).
diff --git a/Documentation/hwmon/tmp103 b/Documentation/hwmon/tmp103.rst
index ec00a15645ba..15d25806d585 100644
--- a/Documentation/hwmon/tmp103
+++ b/Documentation/hwmon/tmp103.rst
@@ -2,12 +2,17 @@ Kernel driver tmp103
====================
Supported chips:
+
* Texas Instruments TMP103
+
Prefix: 'tmp103'
+
Addresses scanned: none
+
Product info and datasheet: http://www.ti.com/product/tmp103
Author:
+
Heiko Schocher <hs@denx.de>
Description
@@ -22,7 +27,7 @@ Resolution: 8 Bits
Accuracy: ±1°C Typ (–10°C to +100°C)
The driver provides the common sysfs-interface for temperatures (see
-Documentation/hwmon/sysfs-interface under Temperatures).
+Documentation/hwmon/sysfs-interface.rst under Temperatures).
Please refer how to instantiate this driver:
Documentation/i2c/instantiating-devices
diff --git a/Documentation/hwmon/tmp108 b/Documentation/hwmon/tmp108.rst
index 25802df23010..5f4266a16cb2 100644
--- a/Documentation/hwmon/tmp108
+++ b/Documentation/hwmon/tmp108.rst
@@ -2,12 +2,17 @@ Kernel driver tmp108
====================
Supported chips:
+
* Texas Instruments TMP108
+
Prefix: 'tmp108'
+
Addresses scanned: none
+
Datasheet: http://www.ti.com/product/tmp108
Author:
+
John Muir <john@jmuir.com>
Description
@@ -33,4 +38,4 @@ and then the device is shut down automatically. (This driver only supports
continuous mode.)
The driver provides the common sysfs-interface for temperatures (see
-Documentation/hwmon/sysfs-interface under Temperatures).
+Documentation/hwmon/sysfs-interface.rst under Temperatures).
diff --git a/Documentation/hwmon/tmp401 b/Documentation/hwmon/tmp401.rst
index 2d9ca42213cf..6a05a0719bc7 100644
--- a/Documentation/hwmon/tmp401
+++ b/Documentation/hwmon/tmp401.rst
@@ -2,33 +2,59 @@ Kernel driver tmp401
====================
Supported chips:
+
* Texas Instruments TMP401
+
Prefix: 'tmp401'
+
Addresses scanned: I2C 0x4c
+
Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp401.html
+
* Texas Instruments TMP411
+
Prefix: 'tmp411'
+
Addresses scanned: I2C 0x4c, 0x4d, 0x4e
+
Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp411.html
+
* Texas Instruments TMP431
+
Prefix: 'tmp431'
+
Addresses scanned: I2C 0x4c, 0x4d
+
Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp431.html
+
* Texas Instruments TMP432
+
Prefix: 'tmp432'
+
Addresses scanned: I2C 0x4c, 0x4d
+
Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp432.html
+
* Texas Instruments TMP435
+
Prefix: 'tmp435'
+
Addresses scanned: I2C 0x48 - 0x4f
+
Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp435.html
+
* Texas Instruments TMP461
+
Prefix: 'tmp461'
+
Datasheet: http://www.ti.com/product/tmp461
+
+
Authors:
- Hans de Goede <hdegoede@redhat.com>
- Andre Prendel <andre.prendel@gmx.de>
+
+ - Hans de Goede <hdegoede@redhat.com>
+ - Andre Prendel <andre.prendel@gmx.de>
Description
-----------
@@ -42,7 +68,7 @@ supported by the driver so far, so using the default resolution of 0.5
degree).
The driver provides the common sysfs-interface for temperatures (see
-Documentation/hwmon/sysfs-interface under Temperatures).
+Documentation/hwmon/sysfs-interface.rst under Temperatures).
The TMP411 and TMP431 chips are compatible with TMP401. TMP411 provides
some additional features.
diff --git a/Documentation/hwmon/tmp421 b/Documentation/hwmon/tmp421.rst
index 9e6fe5549ca1..1ba926a3605c 100644
--- a/Documentation/hwmon/tmp421
+++ b/Documentation/hwmon/tmp421.rst
@@ -2,28 +2,49 @@ Kernel driver tmp421
====================
Supported chips:
+
* Texas Instruments TMP421
+
Prefix: 'tmp421'
+
Addresses scanned: I2C 0x2a, 0x4c, 0x4d, 0x4e and 0x4f
+
Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp421.html
+
* Texas Instruments TMP422
+
Prefix: 'tmp422'
+
Addresses scanned: I2C 0x4c, 0x4d, 0x4e and 0x4f
+
Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp421.html
+
* Texas Instruments TMP423
+
Prefix: 'tmp423'
+
Addresses scanned: I2C 0x4c and 0x4d
+
Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp421.html
+
* Texas Instruments TMP441
+
Prefix: 'tmp441'
+
Addresses scanned: I2C 0x2a, 0x4c, 0x4d, 0x4e and 0x4f
+
Datasheet: http://www.ti.com/product/tmp441
+
* Texas Instruments TMP442
+
Prefix: 'tmp442'
+
Addresses scanned: I2C 0x4c and 0x4d
+
Datasheet: http://www.ti.com/product/tmp442
Authors:
+
Andre Prendel <andre.prendel@gmx.de>
Description
@@ -40,5 +61,6 @@ for both the local and remote channels is 0.0625 degree C.
The chips support only temperature measurement. The driver exports
the temperature values via the following sysfs files:
-temp[1-4]_input
-temp[2-4]_fault
+**temp[1-4]_input**
+
+**temp[2-4]_fault**
diff --git a/Documentation/hwmon/tps40422 b/Documentation/hwmon/tps40422.rst
index 24bb0688d515..b691e30479dd 100644
--- a/Documentation/hwmon/tps40422
+++ b/Documentation/hwmon/tps40422.rst
@@ -2,9 +2,13 @@ Kernel driver tps40422
======================
Supported chips:
+
* TI TPS40422
+
Prefix: 'tps40422'
+
Addresses scanned: -
+
Datasheet: http://www.ti.com/lit/gpn/tps40422
Author: Zhu Laiwen <richard.zhu@nsn.com>
@@ -17,7 +21,7 @@ This driver supports TI TPS40422 Dual-Output or Two-Phase Synchronous Buck
Controller with PMBus
The driver is a client driver to the core PMBus driver.
-Please see Documentation/hwmon/pmbus for details on PMBus client drivers.
+Please see Documentation/hwmon/pmbus.rst for details on PMBus client drivers.
Usage Notes
@@ -39,6 +43,7 @@ Sysfs entries
The following attributes are supported.
+======================= =======================================================
in[1-2]_label "vout[1-2]"
in[1-2]_input Measured voltage. From READ_VOUT register.
in[1-2]_alarm voltage alarm.
@@ -46,19 +51,23 @@ in[1-2]_alarm voltage alarm.
curr[1-2]_input Measured current. From READ_IOUT register.
curr[1-2]_label "iout[1-2]"
curr1_max Maximum current. From IOUT_OC_WARN_LIMIT register.
-curr1_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT register.
+curr1_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT
+ register.
curr1_max_alarm Current high alarm. From IOUT_OC_WARN_LIMIT status.
curr1_crit_alarm Current critical high alarm. From IOUT_OC_FAULT status.
curr2_alarm Current high alarm. From IOUT_OC_WARNING status.
-temp1_input Measured temperature. From READ_TEMPERATURE_2 register on page 0.
+temp1_input Measured temperature. From READ_TEMPERATURE_2 register
+ on page 0.
temp1_max Maximum temperature. From OT_WARN_LIMIT register.
temp1_crit Critical high temperature. From OT_FAULT_LIMIT register.
temp1_max_alarm Chip temperature high alarm. Set by comparing
- READ_TEMPERATURE_2 on page 0 with OT_WARN_LIMIT if TEMP_OT_WARNING
- status is set.
+ READ_TEMPERATURE_2 on page 0 with OT_WARN_LIMIT if
+ TEMP_OT_WARNING status is set.
temp1_crit_alarm Chip temperature critical high alarm. Set by comparing
- READ_TEMPERATURE_2 on page 0 with OT_FAULT_LIMIT if TEMP_OT_FAULT
- status is set.
-temp2_input Measured temperature. From READ_TEMPERATURE_2 register on page 1.
+ READ_TEMPERATURE_2 on page 0 with OT_FAULT_LIMIT if
+ TEMP_OT_FAULT status is set.
+temp2_input Measured temperature. From READ_TEMPERATURE_2 register
+ on page 1.
temp2_alarm Chip temperature alarm on page 1.
+======================= =======================================================
diff --git a/Documentation/hwmon/twl4030-madc-hwmon b/Documentation/hwmon/twl4030-madc-hwmon.rst
index c3a3a5be10ad..22c885383b11 100644
--- a/Documentation/hwmon/twl4030-madc-hwmon
+++ b/Documentation/hwmon/twl4030-madc-hwmon.rst
@@ -1,8 +1,10 @@
Kernel driver twl4030-madc
-=========================
+==========================
Supported chips:
+
* Texas Instruments TWL4030
+
Prefix: 'twl4030-madc'
@@ -19,8 +21,9 @@ channels which can be used in different modes.
See this table for the meaning of the different channels
+======= ==========================================================
Channel Signal
-------------------------------------------
+======= ==========================================================
0 Battery type(BTYPE)
1 BCI: Battery temperature (BTEMP)
2 GP analog input
@@ -37,6 +40,7 @@ Channel Signal
13 Reserved
14 Reserved
15 VRUSB Supply/Speaker left/Speaker right polarization level
+======= ==========================================================
The Sysfs nodes will represent the voltage in the units of mV,
diff --git a/Documentation/hwmon/ucd9000 b/Documentation/hwmon/ucd9000.rst
index 262e713e60ff..ebc4f2b3bfea 100644
--- a/Documentation/hwmon/ucd9000
+++ b/Documentation/hwmon/ucd9000.rst
@@ -2,15 +2,20 @@ Kernel driver ucd9000
=====================
Supported chips:
+
* TI UCD90120, UCD90124, UCD90160, UCD9090, and UCD90910
+
Prefixes: 'ucd90120', 'ucd90124', 'ucd90160', 'ucd9090', 'ucd90910'
+
Addresses scanned: -
+
Datasheets:
- http://focus.ti.com/lit/ds/symlink/ucd90120.pdf
- http://focus.ti.com/lit/ds/symlink/ucd90124.pdf
- http://focus.ti.com/lit/ds/symlink/ucd90160.pdf
- http://focus.ti.com/lit/ds/symlink/ucd9090.pdf
- http://focus.ti.com/lit/ds/symlink/ucd90910.pdf
+
+ - http://focus.ti.com/lit/ds/symlink/ucd90120.pdf
+ - http://focus.ti.com/lit/ds/symlink/ucd90124.pdf
+ - http://focus.ti.com/lit/ds/symlink/ucd90160.pdf
+ - http://focus.ti.com/lit/ds/symlink/ucd9090.pdf
+ - http://focus.ti.com/lit/ds/symlink/ucd90910.pdf
Author: Guenter Roeck <linux@roeck-us.net>
@@ -52,7 +57,7 @@ system-health monitor. The device integrates a 12-bit ADC for monitoring up to
13 power-supply voltage, current, or temperature inputs.
This driver is a client driver to the core PMBus driver. Please see
-Documentation/hwmon/pmbus for details on PMBus client drivers.
+Documentation/hwmon/pmbus.rst for details on PMBus client drivers.
Usage Notes
@@ -67,7 +72,7 @@ Platform data support
---------------------
The driver supports standard PMBus driver platform data. Please see
-Documentation/hwmon/pmbus for details.
+Documentation/hwmon/pmbus.rst for details.
Sysfs entries
@@ -76,23 +81,28 @@ Sysfs entries
The following attributes are supported. Limits are read-write; all other
attributes are read-only.
+======================= ========================================================
in[1-12]_label "vout[1-12]".
in[1-12]_input Measured voltage. From READ_VOUT register.
in[1-12]_min Minimum Voltage. From VOUT_UV_WARN_LIMIT register.
in[1-12]_max Maximum voltage. From VOUT_OV_WARN_LIMIT register.
in[1-12]_lcrit Critical minimum Voltage. VOUT_UV_FAULT_LIMIT register.
-in[1-12]_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register.
+in[1-12]_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT
+ register.
in[1-12]_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status.
in[1-12]_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status.
-in[1-12]_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT status.
-in[1-12]_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT status.
+in[1-12]_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT
+ status.
+in[1-12]_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT
+ status.
curr[1-12]_label "iout[1-12]".
curr[1-12]_input Measured current. From READ_IOUT register.
curr[1-12]_max Maximum current. From IOUT_OC_WARN_LIMIT register.
-curr[1-12]_lcrit Critical minimum output current. From IOUT_UC_FAULT_LIMIT
+curr[1-12]_lcrit Critical minimum output current. From
+ IOUT_UC_FAULT_LIMIT register.
+curr[1-12]_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT
register.
-curr[1-12]_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT register.
curr[1-12]_max_alarm Current high alarm. From IOUT_OC_WARNING status.
curr[1-12]_crit_alarm Current critical high alarm. From IOUT_OC_FAULT status.
@@ -116,3 +126,4 @@ fan[1-4]_fault Fan fault.
created only for enabled fans.
Note that even though UCD90910 supports up to 10 fans,
only up to four fans are currently supported.
+======================= ========================================================
diff --git a/Documentation/hwmon/ucd9200 b/Documentation/hwmon/ucd9200.rst
index 1e8060e631bd..b819dfd75f71 100644
--- a/Documentation/hwmon/ucd9200
+++ b/Documentation/hwmon/ucd9200.rst
@@ -2,18 +2,23 @@ Kernel driver ucd9200
=====================
Supported chips:
+
* TI UCD9220, UCD9222, UCD9224, UCD9240, UCD9244, UCD9246, and UCD9248
+
Prefixes: 'ucd9220', 'ucd9222', 'ucd9224', 'ucd9240', 'ucd9244', 'ucd9246',
- 'ucd9248'
+ 'ucd9248'
+
Addresses scanned: -
+
Datasheets:
- http://focus.ti.com/lit/ds/symlink/ucd9220.pdf
- http://focus.ti.com/lit/ds/symlink/ucd9222.pdf
- http://focus.ti.com/lit/ds/symlink/ucd9224.pdf
- http://focus.ti.com/lit/ds/symlink/ucd9240.pdf
- http://focus.ti.com/lit/ds/symlink/ucd9244.pdf
- http://focus.ti.com/lit/ds/symlink/ucd9246.pdf
- http://focus.ti.com/lit/ds/symlink/ucd9248.pdf
+
+ - http://focus.ti.com/lit/ds/symlink/ucd9220.pdf
+ - http://focus.ti.com/lit/ds/symlink/ucd9222.pdf
+ - http://focus.ti.com/lit/ds/symlink/ucd9224.pdf
+ - http://focus.ti.com/lit/ds/symlink/ucd9240.pdf
+ - http://focus.ti.com/lit/ds/symlink/ucd9244.pdf
+ - http://focus.ti.com/lit/ds/symlink/ucd9246.pdf
+ - http://focus.ti.com/lit/ds/symlink/ucd9248.pdf
Author: Guenter Roeck <linux@roeck-us.net>
@@ -28,7 +33,7 @@ dedicated circuitry for DC/DC loop management with flash memory and a serial
interface to support configuration, monitoring and management.
This driver is a client driver to the core PMBus driver. Please see
-Documentation/hwmon/pmbus for details on PMBus client drivers.
+Documentation/hwmon/pmbus.rst for details on PMBus client drivers.
Usage Notes
@@ -43,7 +48,7 @@ Platform data support
---------------------
The driver supports standard PMBus driver platform data. Please see
-Documentation/hwmon/pmbus for details.
+Documentation/hwmon/pmbus.rst for details.
Sysfs entries
@@ -52,12 +57,14 @@ Sysfs entries
The following attributes are supported. Limits are read-write; all other
attributes are read-only.
+======================= ========================================================
in1_label "vin".
in1_input Measured voltage. From READ_VIN register.
in1_min Minimum Voltage. From VIN_UV_WARN_LIMIT register.
in1_max Maximum voltage. From VIN_OV_WARN_LIMIT register.
in1_lcrit Critical minimum Voltage. VIN_UV_FAULT_LIMIT register.
-in1_crit Critical maximum voltage. From VIN_OV_FAULT_LIMIT register.
+in1_crit Critical maximum voltage. From VIN_OV_FAULT_LIMIT
+ register.
in1_min_alarm Voltage low alarm. From VIN_UV_WARNING status.
in1_max_alarm Voltage high alarm. From VIN_OV_WARNING status.
in1_lcrit_alarm Voltage critical low alarm. From VIN_UV_FAULT status.
@@ -68,11 +75,14 @@ in[2-5]_input Measured voltage. From READ_VOUT register.
in[2-5]_min Minimum Voltage. From VOUT_UV_WARN_LIMIT register.
in[2-5]_max Maximum voltage. From VOUT_OV_WARN_LIMIT register.
in[2-5]_lcrit Critical minimum Voltage. VOUT_UV_FAULT_LIMIT register.
-in[2-5]_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register.
+in[2-5]_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT
+ register.
in[2-5]_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status.
in[2-5]_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status.
-in[2-5]_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT status.
-in[2-5]_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT status.
+in[2-5]_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT
+ status.
+in[2-5]_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT
+ status.
curr1_label "iin".
curr1_input Measured current. From READ_IIN register.
@@ -80,9 +90,10 @@ curr1_input Measured current. From READ_IIN register.
curr[2-5]_label "iout[1-4]".
curr[2-5]_input Measured current. From READ_IOUT register.
curr[2-5]_max Maximum current. From IOUT_OC_WARN_LIMIT register.
-curr[2-5]_lcrit Critical minimum output current. From IOUT_UC_FAULT_LIMIT
+curr[2-5]_lcrit Critical minimum output current. From
+ IOUT_UC_FAULT_LIMIT register.
+curr[2-5]_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT
register.
-curr[2-5]_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT register.
curr[2-5]_max_alarm Current high alarm. From IOUT_OC_WARNING status.
curr[2-5]_crit_alarm Current critical high alarm. From IOUT_OC_FAULT status.
@@ -97,7 +108,7 @@ power[2-5]_label "pout[1-4]"
rails. See chip datasheets for details.
temp[1-5]_input Measured temperatures. From READ_TEMPERATURE_1 and
- READ_TEMPERATURE_2 registers.
+ READ_TEMPERATURE_2 registers.
temp1 is the chip internal temperature. temp[2-5] are
rail temperatures. temp[2-5] attributes are only
created for enabled rails. See chip datasheets for
@@ -110,3 +121,4 @@ temp[1-5]_crit_alarm Temperature critical high alarm.
fan1_input Fan RPM. ucd9240 only.
fan1_alarm Fan alarm. ucd9240 only.
fan1_fault Fan fault. ucd9240 only.
+======================= ========================================================
diff --git a/Documentation/hwmon/userspace-tools b/Documentation/hwmon/userspace-tools.rst
index 9865aeedc58f..bf3797c8e734 100644
--- a/Documentation/hwmon/userspace-tools
+++ b/Documentation/hwmon/userspace-tools.rst
@@ -1,3 +1,6 @@
+Userspace tools
+===============
+
Introduction
------------
diff --git a/Documentation/hwmon/vexpress b/Documentation/hwmon/vexpress.rst
index 557d6d5ad90d..8c861c8151ac 100644
--- a/Documentation/hwmon/vexpress
+++ b/Documentation/hwmon/vexpress.rst
@@ -2,14 +2,21 @@ Kernel driver vexpress
======================
Supported systems:
+
* ARM Ltd. Versatile Express platform
+
Prefix: 'vexpress'
+
Datasheets:
+
* "Hardware Description" sections of the Technical Reference Manuals
- for the Versatile Express boards:
- http://infocenter.arm.com/help/topic/com.arm.doc.subset.boards.express/index.html
+ for the Versatile Express boards:
+
+ - http://infocenter.arm.com/help/topic/com.arm.doc.subset.boards.express/index.html
+
* Section "4.4.14. System Configuration registers" of the V2M-P1 TRM:
- http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0447-/index.html
+
+ - http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0447-/index.html
Author: Pawel Moll
diff --git a/Documentation/hwmon/via686a b/Documentation/hwmon/via686a.rst
index e5f90ab5c48d..a343c35df740 100644
--- a/Documentation/hwmon/via686a
+++ b/Documentation/hwmon/via686a.rst
@@ -2,29 +2,35 @@ Kernel driver via686a
=====================
Supported chips:
+
* Via VT82C686A, VT82C686B Southbridge Integrated Hardware Monitor
+
Prefix: 'via686a'
+
Addresses scanned: ISA in PCI-space encoded address
+
Datasheet: On request through web form (http://www.via.com.tw/en/resources/download-center/)
Authors:
- Kyösti Mälkki <kmalkki@cc.hut.fi>,
- Mark D. Studebaker <mdsxyz123@yahoo.com>
- Bob Dougherty <bobd@stanford.edu>
- (Some conversion-factor data were contributed by
- Jonathan Teh Soon Yew <j.teh@iname.com>
- and Alex van Kaam <darkside@chello.nl>.)
+ - Kyösti Mälkki <kmalkki@cc.hut.fi>,
+ - Mark D. Studebaker <mdsxyz123@yahoo.com>
+ - Bob Dougherty <bobd@stanford.edu>
+ - (Some conversion-factor data were contributed by
+ - Jonathan Teh Soon Yew <j.teh@iname.com>
+ - and Alex van Kaam <darkside@chello.nl>.)
Module Parameters
-----------------
+======================= =======================================================
force_addr=0xaddr Set the I/O base address. Useful for boards that
- don't set the address in the BIOS. Look for a BIOS
- upgrade before resorting to this. Does not do a
- PCI force; the via686a must still be present in lspci.
- Don't use this unless the driver complains that the
- base address is not set.
- Example: 'modprobe via686a force_addr=0x6000'
+ don't set the address in the BIOS. Look for a BIOS
+ upgrade before resorting to this. Does not do a
+ PCI force; the via686a must still be present in lspci.
+ Don't use this unless the driver complains that the
+ base address is not set.
+ Example: 'modprobe via686a force_addr=0x6000'
+======================= =======================================================
Description
-----------
diff --git a/Documentation/hwmon/vt1211 b/Documentation/hwmon/vt1211.rst
index 77fa633b97a8..ddbcde7dd642 100644
--- a/Documentation/hwmon/vt1211
+++ b/Documentation/hwmon/vt1211.rst
@@ -2,9 +2,13 @@ Kernel driver vt1211
====================
Supported chips:
+
* VIA VT1211
+
Prefix: 'vt1211'
+
Addresses scanned: none, address read from Super-I/O config space
+
Datasheet: Provided by VIA upon request and under NDA
Authors: Juerg Haefliger <juergh@gmail.com>
@@ -19,14 +23,17 @@ technical support.
Module Parameters
-----------------
-* uch_config: int Override the BIOS default universal channel (UCH)
+
+* uch_config: int
+ Override the BIOS default universal channel (UCH)
configuration for channels 1-5.
Legal values are in the range of 0-31. Bit 0 maps to
UCH1, bit 1 maps to UCH2 and so on. Setting a bit to 1
enables the thermal input of that particular UCH and
setting a bit to 0 enables the voltage input.
-* int_mode: int Override the BIOS default temperature interrupt mode.
+* int_mode: int
+ Override the BIOS default temperature interrupt mode.
The only possible value is 0 which forces interrupt
mode 0. In this mode, any pending interrupt is cleared
when the status register is read but is regenerated as
@@ -55,8 +62,9 @@ connected to the PWM outputs of the VT1211 :-().
The following table shows the relationship between the vt1211 inputs and the
sysfs nodes.
+=============== ============== =========== ================================
Sensor Voltage Mode Temp Mode Default Use (from the datasheet)
------- ------------ --------- --------------------------------
+=============== ============== =========== ================================
Reading 1 temp1 Intel thermal diode
Reading 3 temp2 Internal thermal diode
UCH1/Reading2 in0 temp3 NTC type thermistor
@@ -65,6 +73,7 @@ UCH3 in2 temp5 VccP (processor core)
UCH4 in3 temp6 +5V
UCH5 in4 temp7 +12V
+3.3V in5 Internal VCC (+3.3V)
+=============== ============== =========== ================================
Voltage Monitoring
@@ -82,19 +91,22 @@ follows. And this is of course totally dependent on the actual board
implementation :-) You will have to find documentation for your own
motherboard and edit sensors.conf accordingly.
- Expected
+============= ====== ====== ========= ============
+ Expected
Voltage R1 R2 Divider Raw Value
------------------------------------------------
+============= ====== ====== ========= ============
+2.5V 2K 10K 1.2 2083 mV
-VccP --- --- 1.0 1400 mV (1)
+VccP --- --- 1.0 1400 mV [1]_
+5V 14K 10K 2.4 2083 mV
+12V 47K 10K 5.7 2105 mV
-+3.3V (int) 2K 3.4K 1.588 3300 mV (2)
++3.3V (int) 2K 3.4K 1.588 3300 mV [2]_
+3.3V (ext) 6.8K 10K 1.68 1964 mV
+============= ====== ====== ========= ============
+
+.. [1] Depending on the CPU (1.4V is for a VIA C3 Nehemiah).
-(1) Depending on the CPU (1.4V is for a VIA C3 Nehemiah).
-(2) R1 and R2 for 3.3V (int) are internal to the VT1211 chip and the driver
- performs the scaling and returns the properly scaled voltage value.
+.. [2] R1 and R2 for 3.3V (int) are internal to the VT1211 chip and the driver
+ performs the scaling and returns the properly scaled voltage value.
Each measured voltage has an associated low and high limit which triggers an
alarm when crossed.
@@ -124,35 +136,37 @@ compute temp1 (@-Offset)/Gain, (@*Gain)+Offset
According to the VIA VT1211 BIOS porting guide, the following gain and offset
values should be used:
+=============== ======== ===========
Diode Type Offset Gain
----------- ------ ----
+=============== ======== ===========
Intel CPU 88.638 0.9528
- 65.000 0.9686 *)
+ 65.000 0.9686 [3]_
VIA C3 Ezra 83.869 0.9528
VIA C3 Ezra-T 73.869 0.9528
+=============== ======== ===========
-*) This is the formula from the lm_sensors 2.10.0 sensors.conf file. I don't
-know where it comes from or how it was derived, it's just listed here for
-completeness.
+.. [3] This is the formula from the lm_sensors 2.10.0 sensors.conf file. I don't
+ know where it comes from or how it was derived, it's just listed here for
+ completeness.
Temp3-temp7 support NTC thermistors. For these channels, the driver returns
the voltages as seen at the individual pins of UCH1-UCH5. The voltage at the
pin (Vpin) is formed by a voltage divider made of the thermistor (Rth) and a
-scaling resistor (Rs):
+scaling resistor (Rs)::
-Vpin = 2200 * Rth / (Rs + Rth) (2200 is the ADC max limit of 2200 mV)
+ Vpin = 2200 * Rth / (Rs + Rth) (2200 is the ADC max limit of 2200 mV)
The equation for the thermistor is as follows (google it if you want to know
-more about it):
+more about it)::
-Rth = Ro * exp(B * (1 / T - 1 / To)) (To is 298.15K (25C) and Ro is the
- nominal resistance at 25C)
+ Rth = Ro * exp(B * (1 / T - 1 / To)) (To is 298.15K (25C) and Ro is the
+ nominal resistance at 25C)
Mingling the above two equations and assuming Rs = Ro and B = 3435 yields the
-following formula for sensors.conf:
+following formula for sensors.conf::
-compute tempx 1 / (1 / 298.15 - (` (2200 / @ - 1)) / 3435) - 273.15,
- 2200 / (1 + (^ (3435 / 298.15 - 3435 / (273.15 + @))))
+ compute tempx 1 / (1 / 298.15 - (` (2200 / @ - 1)) / 3435) - 273.15,
+ 2200 / (1 + (^ (3435 / 298.15 - 3435 / (273.15 + @))))
Fan Speed Control
@@ -176,31 +190,37 @@ registers in the VT1211 and programming one set is sufficient (actually only
the first set pwm1_auto_point[1-4]_temp is writable, the second set is
read-only).
+========================== =========================================
PWM Auto Point PWM Output Duty-Cycle
-------------------------------------------------
+========================== =========================================
pwm[1-2]_auto_point4_pwm full speed duty-cycle (hard-wired to 255)
pwm[1-2]_auto_point3_pwm high speed duty-cycle
pwm[1-2]_auto_point2_pwm low speed duty-cycle
pwm[1-2]_auto_point1_pwm off duty-cycle (hard-wired to 0)
+========================== =========================================
+========================== =================
Temp Auto Point Thermal Threshold
----------------------------------------------
+========================== =================
pwm[1-2]_auto_point4_temp full speed temp
pwm[1-2]_auto_point3_temp high speed temp
pwm[1-2]_auto_point2_temp low speed temp
pwm[1-2]_auto_point1_temp off temp
+========================== =================
Long story short, the controller implements the following algorithm to set the
PWM output duty-cycle based on the input temperature:
-Thermal Threshold Output Duty-Cycle
- (Rising Temp) (Falling Temp)
-----------------------------------------------------------
- full speed duty-cycle full speed duty-cycle
+=================== ======================= ========================
+Thermal Threshold Output Duty-Cycle Output Duty-Cycle
+ (Rising Temp) (Falling Temp)
+=================== ======================= ========================
+- full speed duty-cycle full speed duty-cycle
full speed temp
- high speed duty-cycle full speed duty-cycle
+- high speed duty-cycle full speed duty-cycle
high speed temp
- low speed duty-cycle high speed duty-cycle
+- low speed duty-cycle high speed duty-cycle
low speed temp
- off duty-cycle low speed duty-cycle
+- off duty-cycle low speed duty-cycle
off temp
+=================== ======================= ========================
diff --git a/Documentation/hwmon/w83627ehf b/Documentation/hwmon/w83627ehf.rst
index 735c42a85ead..74d19ef11e1f 100644
--- a/Documentation/hwmon/w83627ehf
+++ b/Documentation/hwmon/w83627ehf.rst
@@ -2,45 +2,79 @@ Kernel driver w83627ehf
=======================
Supported chips:
+
* Winbond W83627EHF/EHG (ISA access ONLY)
+
Prefix: 'w83627ehf'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: not available
+
* Winbond W83627DHG
+
Prefix: 'w83627dhg'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: not available
+
* Winbond W83627DHG-P
+
Prefix: 'w83627dhg'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: not available
+
* Winbond W83627UHG
+
Prefix: 'w83627uhg'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: available from www.nuvoton.com
+
* Winbond W83667HG
+
Prefix: 'w83667hg'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: not available
+
* Winbond W83667HG-B
+
Prefix: 'w83667hg'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: Available from Nuvoton upon request
+
* Nuvoton NCT6775F/W83667HG-I
+
Prefix: 'nct6775'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: Available from Nuvoton upon request
+
* Nuvoton NCT6776F
+
Prefix: 'nct6776'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: Available from Nuvoton upon request
+
Authors:
- Jean Delvare <jdelvare@suse.de>
- Yuan Mu (Winbond)
- Rudolf Marek <r.marek@assembler.cz>
- David Hubbard <david.c.hubbard@gmail.com>
- Gong Jun <JGong@nuvoton.com>
+
+ - Jean Delvare <jdelvare@suse.de>
+ - Yuan Mu (Winbond)
+ - Rudolf Marek <r.marek@assembler.cz>
+ - David Hubbard <david.c.hubbard@gmail.com>
+ - Gong Jun <JGong@nuvoton.com>
Description
-----------
@@ -85,25 +119,30 @@ predefined temperature range. If the temperature goes out of range, fan
is driven slower/faster to reach the predefined range again.
The mode works for fan1-fan4. Mapping of temperatures to pwm outputs is as
-follows:
+follows::
-temp1 -> pwm1
-temp2 -> pwm2
-temp3 -> pwm3 (not on 627UHG)
-prog -> pwm4 (not on 667HG and 667HG-B; the programmable setting is not
- supported by the driver)
+ temp1 -> pwm1
+ temp2 -> pwm2
+ temp3 -> pwm3 (not on 627UHG)
+ prog -> pwm4 (not on 667HG and 667HG-B; the programmable setting is not
+ supported by the driver)
/sys files
----------
-name - this is a standard hwmon device entry, it contains the name of
- the device (see the prefix in the list of supported devices at
- the top of this file)
+name
+ this is a standard hwmon device entry, it contains the name of
+ the device (see the prefix in the list of supported devices at
+ the top of this file)
+
+pwm[1-4]
+ this file stores PWM duty cycle or DC value (fan speed) in range:
-pwm[1-4] - this file stores PWM duty cycle or DC value (fan speed) in range:
0 (stop) to 255 (full)
-pwm[1-4]_enable - this file controls mode of fan/temperature control:
+pwm[1-4]_enable
+ this file controls mode of fan/temperature control:
+
* 1 Manual mode, write to pwm file any value 0-255 (full speed)
* 2 "Thermal Cruise" mode
* 3 "Fan Speed Cruise" mode
@@ -121,33 +160,43 @@ pwm[1-4]_enable - this file controls mode of fan/temperature control:
returned when reading pwm attributes is unrelated to SmartFan IV
operation.
-pwm[1-4]_mode - controls if output is PWM or DC level
- * 0 DC output (0 - 12v)
- * 1 PWM output
+pwm[1-4]_mode
+ controls if output is PWM or DC level
+
+ * 0 DC output (0 - 12v)
+ * 1 PWM output
Thermal Cruise mode
-------------------
If the temperature is in the range defined by:
-pwm[1-4]_target - set target temperature, unit millidegree Celsius
- (range 0 - 127000)
-pwm[1-4]_tolerance - tolerance, unit millidegree Celsius (range 0 - 15000)
+pwm[1-4]_target
+ set target temperature, unit millidegree Celsius
+ (range 0 - 127000)
+pwm[1-4]_tolerance
+ tolerance, unit millidegree Celsius (range 0 - 15000)
there are no changes to fan speed. Once the temperature leaves the interval,
fan speed increases (temp is higher) or decreases if lower than desired.
There are defined steps and times, but not exported by the driver yet.
-pwm[1-4]_min_output - minimum fan speed (range 1 - 255), when the temperature
- is below defined range.
-pwm[1-4]_stop_time - how many milliseconds [ms] must elapse to switch
- corresponding fan off. (when the temperature was below
- defined range).
-pwm[1-4]_start_output-minimum fan speed (range 1 - 255) when spinning up
-pwm[1-4]_step_output- rate of fan speed change (1 - 255)
-pwm[1-4]_stop_output- minimum fan speed (range 1 - 255) when spinning down
-pwm[1-4]_max_output - maximum fan speed (range 1 - 255), when the temperature
- is above defined range.
+pwm[1-4]_min_output
+ minimum fan speed (range 1 - 255), when the temperature
+ is below defined range.
+pwm[1-4]_stop_time
+ how many milliseconds [ms] must elapse to switch
+ corresponding fan off. (when the temperature was below
+ defined range).
+pwm[1-4]_start_output
+ minimum fan speed (range 1 - 255) when spinning up
+pwm[1-4]_step_output
+ rate of fan speed change (1 - 255)
+pwm[1-4]_stop_output
+ minimum fan speed (range 1 - 255) when spinning down
+pwm[1-4]_max_output
+ maximum fan speed (range 1 - 255), when the temperature
+ is above defined range.
Note: last six functions are influenced by other control bits, not yet exported
by the driver, so a change might not have any effect.
@@ -161,26 +210,35 @@ different power-on default values, but BIOS should already be loading
appropriate defaults. Note that bank selection must be performed as is currently
done in the driver for all register addresses.
-0x49: only on DHG, selects temperature source for AUX fan, CPU fan0
-0x4a: not completely documented for the EHF and the DHG documentation assigns
- different behavior to bits 7 and 6, including extending the temperature
- input selection to SmartFan I, not just SmartFan III. Testing on the EHF
- will reveal whether they are compatible or not.
-
-0x58: Chip ID: 0xa1=EHF 0xc1=DHG
-0x5e: only on DHG, has bits to enable "current mode" temperature detection and
- critical temperature protection
-0x45b: only on EHF, bit 3, vin4 alarm (EHF supports 10 inputs, only 9 on DHG)
-0x552: only on EHF, vin4
-0x558: only on EHF, vin4 high limit
-0x559: only on EHF, vin4 low limit
-0x6b: only on DHG, SYS fan critical temperature
-0x6c: only on DHG, CPU fan0 critical temperature
-0x6d: only on DHG, AUX fan critical temperature
-0x6e: only on DHG, CPU fan1 critical temperature
-
-0x50-0x55 and 0x650-0x657 are marked "Test Register" for the EHF, but "Reserved
- Register" for the DHG
+========================= =====================================================
+Register(s) Meaning
+========================= =====================================================
+0x49 only on DHG, selects temperature source for AUX fan,
+ CPU fan0
+0x4a not completely documented for the EHF and the DHG
+ documentation assigns different behavior to bits 7
+ and 6, including extending the temperature input
+ selection to SmartFan I, not just SmartFan III.
+ Testing on the EHF will reveal whether they are
+ compatible or not.
+0x58 Chip ID: 0xa1=EHF 0xc1=DHG
+0x5e only on DHG, has bits to enable "current mode"
+ temperature detection and critical temperature
+ protection
+0x45b only on EHF, bit 3, vin4 alarm (EHF supports 10
+ inputs, only 9 on DHG)
+0x552 only on EHF, vin4
+0x558 only on EHF, vin4 high limit
+0x559 only on EHF, vin4 low limit
+0x6b only on DHG, SYS fan critical temperature
+0x6c only on DHG, CPU fan0 critical temperature
+0x6d only on DHG, AUX fan critical temperature
+0x6e only on DHG, CPU fan1 critical temperature
+0x50-0x55 and 0x650-0x657 marked as:
+
+ - "Test Register" for the EHF
+ - "Reserved Register" for the DHG
+========================= =====================================================
The DHG also supports PECI, where the DHG queries Intel CPU temperatures, and
the ICH8 southbridge gets that data via PECI from the DHG, so that the
diff --git a/Documentation/hwmon/w83627hf b/Documentation/hwmon/w83627hf.rst
index 8432e1118173..d1406c28dee7 100644
--- a/Documentation/hwmon/w83627hf
+++ b/Documentation/hwmon/w83627hf.rst
@@ -20,10 +20,10 @@ Supported chips:
Datasheet: Provided by Winbond on request(http://www.winbond.com/hq/enu)
Authors:
- Frodo Looijaard <frodol@dds.nl>,
- Philip Edelbrock <phil@netroedge.com>,
- Mark Studebaker <mdsxyz123@yahoo.com>,
- Bernhard C. Schrenk <clemy@clemy.org>
+ Frodo Looijaard <frodol@dds.nl>,
+ Philip Edelbrock <phil@netroedge.com>,
+ Mark Studebaker <mdsxyz123@yahoo.com>,
+ Bernhard C. Schrenk <clemy@clemy.org>
Module Parameters
-----------------
@@ -52,8 +52,8 @@ If you really want i2c accesses for these Super I/O chips,
use the w83781d driver. However this is not the preferred method
now that this ISA driver has been developed.
-The w83627_HF_ uses pins 110-106 as VID0-VID4. The w83627_THF_ uses the
-same pins as GPIO[0:4]. Technically, the w83627_THF_ does not support a
+The `w83627_HF_` uses pins 110-106 as VID0-VID4. The `w83627_THF_` uses the
+same pins as GPIO[0:4]. Technically, the `w83627_THF_` does not support a
VID reading. However the two chips have the identical 128 pin package. So,
it is possible or even likely for a w83627thf to have the VID signals routed
to these pins despite their not being labeled for that purpose. Therefore,
@@ -75,19 +75,23 @@ module parameter is gone for technical reasons. If you need this feature,
you can obtain the same result by using the isaset tool (part of
lm-sensors) before loading the driver:
-# Enter the Super I/O config space
-isaset -y -f 0x2e 0x87
-isaset -y -f 0x2e 0x87
+# Enter the Super I/O config space::
-# Select the hwmon logical device
-isaset -y 0x2e 0x2f 0x07 0x0b
+ isaset -y -f 0x2e 0x87
+ isaset -y -f 0x2e 0x87
-# Set the base I/O address (to 0x290 in this example)
-isaset -y 0x2e 0x2f 0x60 0x02
-isaset -y 0x2e 0x2f 0x61 0x90
+# Select the hwmon logical device::
-# Exit the Super-I/O config space
-isaset -y -f 0x2e 0xaa
+ isaset -y 0x2e 0x2f 0x07 0x0b
+
+# Set the base I/O address (to 0x290 in this example)::
+
+ isaset -y 0x2e 0x2f 0x60 0x02
+ isaset -y 0x2e 0x2f 0x61 0x90
+
+# Exit the Super-I/O config space::
+
+ isaset -y -f 0x2e 0xaa
The above sequence assumes a Super-I/O config space at 0x2e/0x2f, but
0x4e/0x4f is also possible.
@@ -97,18 +101,23 @@ Voltage pin mapping
Here is a summary of the voltage pin mapping for the W83627THF. This
can be useful to convert data provided by board manufacturers into
-working libsensors configuration statements.
-
- W83627THF |
- Pin | Name | Register | Sysfs attribute
------------------------------------------------------
- 100 | CPUVCORE | 20h | in0
- 99 | VIN0 | 21h | in1
- 98 | VIN1 | 22h | in2
- 97 | VIN2 | 24h | in4
- 114 | AVCC | 23h | in3
- 61 | 5VSB | 50h (bank 5) | in7
- 74 | VBAT | 51h (bank 5) | in8
+working libsensors configuration statements:
+
+
+- W83627THF
+
+
+ ======== =============== =============== ===============
+ Pin Name Register Sysfs attribute
+ ======== =============== =============== ===============
+ 100 CPUVCORE 20h in0
+ 99 VIN0 21h in1
+ 98 VIN1 22h in2
+ 97 VIN2 24h in4
+ 114 AVCC 23h in3
+ 61 5VSB 50h (bank 5) in7
+ 74 VBAT 51h (bank 5) in8
+ ======== =============== =============== ===============
For other supported devices, you'll have to take the hard path and
look up the information in the datasheet yourself (and then add it
diff --git a/Documentation/hwmon/w83773g b/Documentation/hwmon/w83773g.rst
index 4cc6c0b8257f..cabaed391414 100644
--- a/Documentation/hwmon/w83773g
+++ b/Documentation/hwmon/w83773g.rst
@@ -1,13 +1,18 @@
Kernel driver w83773g
-====================
+=====================
Supported chips:
+
* Nuvoton W83773G
+
Prefix: 'w83773g'
+
Addresses scanned: I2C 0x4c and 0x4d
+
Datasheet: https://www.nuvoton.com/resource-files/W83773G_SG_DatasheetV1_2.pdf
Authors:
+
Lei YU <mine260309@gmail.com>
Description
@@ -27,7 +32,4 @@ Resolution for both the local and remote channels is 0.125 degree C.
The chip supports only temperature measurement. The driver exports
the temperature values via the following sysfs files:
-temp[1-3]_input
-temp[2-3]_fault
-temp[2-3]_offset
-update_interval
+**temp[1-3]_input, temp[2-3]_fault, temp[2-3]_offset, update_interval**
diff --git a/Documentation/hwmon/w83781d b/Documentation/hwmon/w83781d.rst
index 129b0a3b555b..f36d33dfb704 100644
--- a/Documentation/hwmon/w83781d
+++ b/Documentation/hwmon/w83781d.rst
@@ -2,44 +2,64 @@ Kernel driver w83781d
=====================
Supported chips:
+
* Winbond W83781D
+
Prefix: 'w83781d'
+
Addresses scanned: I2C 0x28 - 0x2f, ISA 0x290 (8 I/O ports)
+
Datasheet: http://www.winbond-usa.com/products/winbond_products/pdfs/PCIC/w83781d.pdf
+
* Winbond W83782D
+
Prefix: 'w83782d'
+
Addresses scanned: I2C 0x28 - 0x2f, ISA 0x290 (8 I/O ports)
+
Datasheet: http://www.winbond.com
+
* Winbond W83783S
+
Prefix: 'w83783s'
+
Addresses scanned: I2C 0x2d
+
Datasheet: http://www.winbond-usa.com/products/winbond_products/pdfs/PCIC/w83783s.pdf
+
* Asus AS99127F
+
Prefix: 'as99127f'
+
Addresses scanned: I2C 0x28 - 0x2f
+
Datasheet: Unavailable from Asus
+
+
Authors:
- Frodo Looijaard <frodol@dds.nl>,
- Philip Edelbrock <phil@netroedge.com>,
- Mark Studebaker <mdsxyz123@yahoo.com>
+
+ - Frodo Looijaard <frodol@dds.nl>,
+ - Philip Edelbrock <phil@netroedge.com>,
+ - Mark Studebaker <mdsxyz123@yahoo.com>
Module parameters
-----------------
* init int
- (default 1)
- Use 'init=0' to bypass initializing the chip.
- Try this if your computer crashes when you load the module.
+ (default 1)
+
+ Use 'init=0' to bypass initializing the chip.
+ Try this if your computer crashes when you load the module.
* reset int
- (default 0)
- The driver used to reset the chip on load, but does no more. Use
- 'reset=1' to restore the old behavior. Report if you need to do this.
+ (default 0)
+ The driver used to reset the chip on load, but does no more. Use
+ 'reset=1' to restore the old behavior. Report if you need to do this.
force_subclients=bus,caddr,saddr,saddr
This is used to force the i2c addresses for subclients of
- a certain chip. Typical usage is `force_subclients=0,0x2d,0x4a,0x4b'
+ a certain chip. Typical usage is `force_subclients=0,0x2d,0x4a,0x4b`
to force the subclients of chip 0x2d on bus 0 to i2c addresses
0x4a and 0x4b. This parameter is useful for certain Tyan boards.
@@ -54,12 +74,19 @@ There is quite some difference between these chips, but they are similar
enough that it was sensible to put them together in one driver.
The Asus chips are similar to an I2C-only W83782D.
-Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
-as99127f 7 3 0 3 0x31 0x12c3 yes no
-as99127f rev.2 (type_name = as99127f) 0x31 0x5ca3 yes no
-w83781d 7 3 0 3 0x10-1 0x5ca3 yes yes
-w83782d 9 3 2-4 3 0x30 0x5ca3 yes yes
-w83783s 5-6 3 2 1-2 0x40 0x5ca3 yes no
++----------+---------+--------+-------+-------+---------+--------+------+-----+
+| Chip | #vin | #fanin | #pwm | #temp | wchipid | vendid | i2c | ISA |
++----------+---------+--------+-------+-------+---------+--------+------+-----+
+| as99127f | 7 | 3 | 0 | 3 | 0x31 | 0x12c3 | yes | no |
++----------+---------+--------+-------+-------+---------+--------+------+-----+
+| as99127f rev.2 (type_name = as99127f) | 0x31 | 0x5ca3 | yes | no |
++----------+---------+--------+-------+-------+---------+--------+------+-----+
+| w83781d | 7 | 3 | 0 | 3 | 0x10-1 | 0x5ca3 | yes | yes |
++----------+---------+--------+-------+-------+---------+--------+------+-----+
+| w83782d | 9 | 3 | 2-4 | 3 | 0x30 | 0x5ca3 | yes | yes |
++----------+---------+--------+-------+-------+---------+--------+------+-----+
+| w83783s | 5-6 | 3 | 2 | 1-2 | 0x40 | 0x5ca3 | yes | no |
++----------+---------+--------+-------+-------+---------+--------+------+-----+
Detection of these chips can sometimes be foiled because they can be in
an internal state that allows no clean access. If you know the address
@@ -124,22 +151,24 @@ or only the beeping for some alarms.
Individual alarm and beep bits:
-0x000001: in0
-0x000002: in1
-0x000004: in2
-0x000008: in3
-0x000010: temp1
-0x000020: temp2 (+temp3 on W83781D)
-0x000040: fan1
-0x000080: fan2
-0x000100: in4
-0x000200: in5
-0x000400: in6
-0x000800: fan3
-0x001000: chassis
-0x002000: temp3 (W83782D only)
-0x010000: in7 (W83782D only)
-0x020000: in8 (W83782D only)
+======== ==========================
+0x000001 in0
+0x000002 in1
+0x000004 in2
+0x000008 in3
+0x000010 temp1
+0x000020 temp2 (+temp3 on W83781D)
+0x000040 fan1
+0x000080 fan2
+0x000100 in4
+0x000200 in5
+0x000400 in6
+0x000800 fan3
+0x001000 chassis
+0x002000 temp3 (W83782D only)
+0x010000 in7 (W83782D only)
+0x020000 in8 (W83782D only)
+======== ==========================
If an alarm triggers, it will remain triggered until the hardware register
is read at least once. This means that the cause for the alarm may
@@ -179,68 +208,74 @@ Please do not send mail to the author or the sensors group asking for
a datasheet or ideas on how to convince Asus. We can't help.
-NOTES:
+NOTES
-----
783s has no in1 so that in[2-6] are compatible with the 781d/782d.
783s pin is programmable for -5V or temp1; defaults to -5V,
- no control in driver so temp1 doesn't work.
+ no control in driver so temp1 doesn't work.
782d and 783s datasheets differ on which is pwm1 and which is pwm2.
- We chose to follow 782d.
+ We chose to follow 782d.
782d and 783s pin is programmable for fan3 input or pwm2 output;
- defaults to fan3 input.
- If pwm2 is enabled (with echo 255 1 > pwm2), then
- fan3 will report 0.
+ defaults to fan3 input.
+ If pwm2 is enabled (with echo 255 1 > pwm2), then
+ fan3 will report 0.
782d has pwm1-2 for ISA, pwm1-4 for i2c. (pwm3-4 share pins with
- the ISA pins)
+ the ISA pins)
-Data sheet updates:
+Data sheet updates
------------------
- PWM clock registers:
-
- 000: master / 512
- 001: master / 1024
- 010: master / 2048
- 011: master / 4096
- 100: master / 8192
+ * 000: master / 512
+ * 001: master / 1024
+ * 010: master / 2048
+ * 011: master / 4096
+ * 100: master / 8192
Answers from Winbond tech support
---------------------------------
->
-> 1) In the W83781D data sheet section 7.2 last paragraph, it talks about
-> reprogramming the R-T table if the Beta of the thermistor is not
-> 3435K. The R-T table is described briefly in section 8.20.
-> What formulas do I use to program a new R-T table for a given Beta?
->
- We are sorry that the calculation for R-T table value is
-confidential. If you have another Beta value of thermistor, we can help
-to calculate the R-T table for you. But you should give us real R-T
-Table which can be gotten by thermistor vendor. Therefore we will calculate
-them and obtain 32-byte data, and you can fill the 32-byte data to the
-register in Bank0.CR51 of W83781D.
+::
+
+ >
+ > 1) In the W83781D data sheet section 7.2 last paragraph, it talks about
+ > reprogramming the R-T table if the Beta of the thermistor is not
+ > 3435K. The R-T table is described briefly in section 8.20.
+ > What formulas do I use to program a new R-T table for a given Beta?
+ >
+
+ We are sorry that the calculation for R-T table value is
+ confidential. If you have another Beta value of thermistor, we can help
+ to calculate the R-T table for you. But you should give us real R-T
+ Table which can be gotten by thermistor vendor. Therefore we will calculate
+ them and obtain 32-byte data, and you can fill the 32-byte data to the
+ register in Bank0.CR51 of W83781D.
-> 2) In the W83782D data sheet, it mentions that pins 38, 39, and 40 are
-> programmable to be either thermistor or Pentium II diode inputs.
-> How do I program them for diode inputs? I can't find any register
-> to program these to be diode inputs.
- --> You may program Bank0 CR[5Dh] and CR[59h] registers.
- CR[5Dh] bit 1(VTIN1) bit 2(VTIN2) bit 3(VTIN3)
+ > 2) In the W83782D data sheet, it mentions that pins 38, 39, and 40 are
+ > programmable to be either thermistor or Pentium II diode inputs.
+ > How do I program them for diode inputs? I can't find any register
+ > to program these to be diode inputs.
- thermistor 0 0 0
- diode 1 1 1
+ You may program Bank0 CR[5Dh] and CR[59h] registers.
+ =============================== =============== ============== ============
+ CR[5Dh] bit 1(VTIN1) bit 2(VTIN2) bit 3(VTIN3)
-(error) CR[59h] bit 4(VTIN1) bit 2(VTIN2) bit 3(VTIN3)
-(right) CR[59h] bit 4(VTIN1) bit 5(VTIN2) bit 6(VTIN3)
+ thermistor 0 0 0
+ diode 1 1 1
- PII thermal diode 1 1 1
- 2N3904 diode 0 0 0
+
+ (error) CR[59h] bit 4(VTIN1) bit 2(VTIN2) bit 3(VTIN3)
+ (right) CR[59h] bit 4(VTIN1) bit 5(VTIN2) bit 6(VTIN3)
+
+ PII thermal diode 1 1 1
+ 2N3904 diode 0 0 0
+ =============================== =============== ============== ============
Asus Clones
@@ -251,18 +286,21 @@ Here are some very useful information that were given to us by Alex Van
Kaam about how to detect these chips, and how to read their values. He
also gives advice for another Asus chipset, the Mozart-2 (which we
don't support yet). Thanks Alex!
+
I reworded some parts and added personal comments.
-# Detection:
+Detection
+^^^^^^^^^
AS99127F rev.1, AS99127F rev.2 and ASB100:
- I2C address range: 0x29 - 0x2F
-- If register 0x58 holds 0x31 then we have an Asus (either ASB100 or
- AS99127F)
+- If register 0x58 holds 0x31 then we have an Asus (either ASB100 or AS99127F)
- Which one depends on register 0x4F (manufacturer ID):
- 0x06 or 0x94: ASB100
- 0x12 or 0xC3: AS99127F rev.1
- 0x5C or 0xA3: AS99127F rev.2
+
+ - 0x06 or 0x94: ASB100
+ - 0x12 or 0xC3: AS99127F rev.1
+ - 0x5C or 0xA3: AS99127F rev.2
+
Note that 0x5CA3 is Winbond's ID (WEC), which let us think Asus get their
AS99127F rev.2 direct from Winbond. The other codes mean ATT and DVC,
respectively. ATT could stand for Asustek something (although it would be
@@ -273,88 +311,103 @@ Mozart-2:
- I2C address: 0x77
- If register 0x58 holds 0x56 or 0x10 then we have a Mozart-2
- Of the Mozart there are 3 types:
- 0x58=0x56, 0x4E=0x94, 0x4F=0x36: Asus ASM58 Mozart-2
- 0x58=0x56, 0x4E=0x94, 0x4F=0x06: Asus AS2K129R Mozart-2
- 0x58=0x10, 0x4E=0x5C, 0x4F=0xA3: Asus ??? Mozart-2
+
+ - 0x58=0x56, 0x4E=0x94, 0x4F=0x36: Asus ASM58 Mozart-2
+ - 0x58=0x56, 0x4E=0x94, 0x4F=0x06: Asus AS2K129R Mozart-2
+ - 0x58=0x10, 0x4E=0x5C, 0x4F=0xA3: Asus ??? Mozart-2
+
You can handle all 3 the exact same way :)
-# Temperature sensors:
+Temperature sensors
+^^^^^^^^^^^^^^^^^^^
ASB100:
-- sensor 1: register 0x27
-- sensor 2 & 3 are the 2 LM75's on the SMBus
-- sensor 4: register 0x17
-Remark: I noticed that on Intel boards sensor 2 is used for the CPU
+ - sensor 1: register 0x27
+ - sensor 2 & 3 are the 2 LM75's on the SMBus
+ - sensor 4: register 0x17
+
+Remark:
+
+ I noticed that on Intel boards sensor 2 is used for the CPU
and 4 is ignored/stuck, on AMD boards sensor 4 is the CPU and sensor 2 is
either ignored or a socket temperature.
AS99127F (rev.1 and 2 alike):
-- sensor 1: register 0x27
-- sensor 2 & 3 are the 2 LM75's on the SMBus
-Remark: Register 0x5b is suspected to be temperature type selector. Bit 1
+ - sensor 1: register 0x27
+ - sensor 2 & 3 are the 2 LM75's on the SMBus
+
+Remark:
+
+ Register 0x5b is suspected to be temperature type selector. Bit 1
would control temp1, bit 3 temp2 and bit 5 temp3.
Mozart-2:
-- sensor 1: register 0x27
-- sensor 2: register 0x13
+ - sensor 1: register 0x27
+ - sensor 2: register 0x13
-# Fan sensors:
+Fan sensors
+^^^^^^^^^^^
ASB100, AS99127F (rev.1 and 2 alike):
-- 3 fans, identical to the W83781D
+ - 3 fans, identical to the W83781D
Mozart-2:
-- 2 fans only, 1350000/RPM/div
-- fan 1: register 0x28, divisor on register 0xA1 (bits 4-5)
-- fan 2: register 0x29, divisor on register 0xA1 (bits 6-7)
+ - 2 fans only, 1350000/RPM/div
+ - fan 1: register 0x28, divisor on register 0xA1 (bits 4-5)
+ - fan 2: register 0x29, divisor on register 0xA1 (bits 6-7)
-# Voltages:
+Voltages
+^^^^^^^^
This is where there is a difference between AS99127F rev.1 and 2.
-Remark: The difference is similar to the difference between
+
+Remark:
+
+ The difference is similar to the difference between
W83781D and W83782D.
ASB100:
-in0=r(0x20)*0.016
-in1=r(0x21)*0.016
-in2=r(0x22)*0.016
-in3=r(0x23)*0.016*1.68
-in4=r(0x24)*0.016*3.8
-in5=r(0x25)*(-0.016)*3.97
-in6=r(0x26)*(-0.016)*1.666
+ - in0=r(0x20)*0.016
+ - in1=r(0x21)*0.016
+ - in2=r(0x22)*0.016
+ - in3=r(0x23)*0.016*1.68
+ - in4=r(0x24)*0.016*3.8
+ - in5=r(0x25)*(-0.016)*3.97
+ - in6=r(0x26)*(-0.016)*1.666
AS99127F rev.1:
-in0=r(0x20)*0.016
-in1=r(0x21)*0.016
-in2=r(0x22)*0.016
-in3=r(0x23)*0.016*1.68
-in4=r(0x24)*0.016*3.8
-in5=r(0x25)*(-0.016)*3.97
-in6=r(0x26)*(-0.016)*1.503
+ - in0=r(0x20)*0.016
+ - in1=r(0x21)*0.016
+ - in2=r(0x22)*0.016
+ - in3=r(0x23)*0.016*1.68
+ - in4=r(0x24)*0.016*3.8
+ - in5=r(0x25)*(-0.016)*3.97
+ - in6=r(0x26)*(-0.016)*1.503
AS99127F rev.2:
-in0=r(0x20)*0.016
-in1=r(0x21)*0.016
-in2=r(0x22)*0.016
-in3=r(0x23)*0.016*1.68
-in4=r(0x24)*0.016*3.8
-in5=(r(0x25)*0.016-3.6)*5.14+3.6
-in6=(r(0x26)*0.016-3.6)*3.14+3.6
+ - in0=r(0x20)*0.016
+ - in1=r(0x21)*0.016
+ - in2=r(0x22)*0.016
+ - in3=r(0x23)*0.016*1.68
+ - in4=r(0x24)*0.016*3.8
+ - in5=(r(0x25)*0.016-3.6)*5.14+3.6
+ - in6=(r(0x26)*0.016-3.6)*3.14+3.6
Mozart-2:
-in0=r(0x20)*0.016
-in1=255
-in2=r(0x22)*0.016
-in3=r(0x23)*0.016*1.68
-in4=r(0x24)*0.016*4
-in5=255
-in6=255
+ - in0=r(0x20)*0.016
+ - in1=255
+ - in2=r(0x22)*0.016
+ - in3=r(0x23)*0.016*1.68
+ - in4=r(0x24)*0.016*4
+ - in5=255
+ - in6=255
-# PWM
+PWM
+^^^
* Additional info about PWM on the AS99127F (may apply to other Asus
-chips as well) by Jean Delvare as of 2004-04-09:
+ chips as well) by Jean Delvare as of 2004-04-09:
AS99127F revision 2 seems to have two PWM registers at 0x59 and 0x5A,
and a temperature sensor type selector at 0x5B (which basically means
@@ -401,15 +454,20 @@ AS99127F chips at all.
I've been fiddling around with the (in)famous 0x59 register and
found out the following values do work as a form of coarse pwm:
-0x80 - seems to turn fans off after some time(1-2 minutes)... might be
-some form of auto-fan-control based on temp? hmm (Qfan? this mobo is an
-old ASUS, it isn't marketed as Qfan. Maybe some beta pre-attempt at Qfan
-that was dropped at the BIOS)
-0x81 - off
-0x82 - slightly "on-ner" than off, but my fans do not get to move. I can
-hear the high-pitched PWM sound that motors give off at too-low-pwm.
-0x83 - now they do move. Estimate about 70% speed or so.
-0x84-0x8f - full on
+0x80
+ - seems to turn fans off after some time(1-2 minutes)... might be
+ some form of auto-fan-control based on temp? hmm (Qfan? this mobo is an
+ old ASUS, it isn't marketed as Qfan. Maybe some beta pre-attempt at Qfan
+ that was dropped at the BIOS)
+0x81
+ - off
+0x82
+ - slightly "on-ner" than off, but my fans do not get to move. I can
+ hear the high-pitched PWM sound that motors give off at too-low-pwm.
+0x83
+ - now they do move. Estimate about 70% speed or so.
+0x84-0x8f
+ - full on
Changing the high nibble doesn't seem to do much except the high bit
(0x80) must be set for PWM to work, else the current pwm doesn't seem to
@@ -435,6 +493,7 @@ looks like PWM is filtered on this motherboard.
Here are some of measurements:
+==== =========
0x80 20 mV
0x81 20 mV
0x82 232 mV
@@ -451,3 +510,4 @@ Here are some of measurements:
0x8d 12.4 V
0x8e 12.4 V
0x8f 12.4 V
+==== =========
diff --git a/Documentation/hwmon/w83791d b/Documentation/hwmon/w83791d.rst
index f4021a285460..3adaed39b157 100644
--- a/Documentation/hwmon/w83791d
+++ b/Documentation/hwmon/w83791d.rst
@@ -2,9 +2,13 @@ Kernel driver w83791d
=====================
Supported chips:
+
* Winbond W83791D
+
Prefix: 'w83791d'
+
Addresses scanned: I2C 0x2c - 0x2f
+
Datasheet: http://www.winbond-usa.com/products/winbond_products/pdfs/PCIC/W83791D_W83791Gb.pdf
Author: Charles Spirakis <bezaur@gmail.com>
@@ -12,39 +16,46 @@ Author: Charles Spirakis <bezaur@gmail.com>
This driver was derived from the w83781d.c and w83792d.c source files.
Credits:
+
w83781d.c:
- Frodo Looijaard <frodol@dds.nl>,
- Philip Edelbrock <phil@netroedge.com>,
- and Mark Studebaker <mdsxyz123@yahoo.com>
+
+ - Frodo Looijaard <frodol@dds.nl>,
+ - Philip Edelbrock <phil@netroedge.com>,
+ - Mark Studebaker <mdsxyz123@yahoo.com>
+
w83792d.c:
- Shane Huang (Winbond),
- Rudolf Marek <r.marek@assembler.cz>
+
+ - Shane Huang (Winbond),
+ - Rudolf Marek <r.marek@assembler.cz>
Additional contributors:
- Sven Anders <anders@anduras.de>
- Marc Hulsman <m.hulsman@tudelft.nl>
+
+ - Sven Anders <anders@anduras.de>
+ - Marc Hulsman <m.hulsman@tudelft.nl>
Module Parameters
-----------------
* init boolean
- (default 0)
- Use 'init=1' to have the driver do extra software initializations.
- The default behavior is to do the minimum initialization possible
- and depend on the BIOS to properly setup the chip. If you know you
- have a w83791d and you're having problems, try init=1 before trying
- reset=1.
+ (default 0)
+
+ Use 'init=1' to have the driver do extra software initializations.
+ The default behavior is to do the minimum initialization possible
+ and depend on the BIOS to properly setup the chip. If you know you
+ have a w83791d and you're having problems, try init=1 before trying
+ reset=1.
* reset boolean
- (default 0)
- Use 'reset=1' to reset the chip (via index 0x40, bit 7). The default
- behavior is no chip reset to preserve BIOS settings.
+ (default 0)
+
+ Use 'reset=1' to reset the chip (via index 0x40, bit 7). The default
+ behavior is no chip reset to preserve BIOS settings.
* force_subclients=bus,caddr,saddr,saddr
- This is used to force the i2c addresses for subclients of
- a certain chip. Example usage is `force_subclients=0,0x2f,0x4a,0x4b'
- to force the subclients of chip 0x2f on bus 0 to i2c addresses
- 0x4a and 0x4b.
+ This is used to force the i2c addresses for subclients of
+ a certain chip. Example usage is `force_subclients=0,0x2f,0x4a,0x4b`
+ to force the subclients of chip 0x2f on bus 0 to i2c addresses
+ 0x4a and 0x4b.
Description
@@ -91,11 +102,11 @@ This file is used for both legacy and new code.
The sysfs interface to the beep bitmask has migrated from the original legacy
method of a single sysfs beep_mask file to a newer method using multiple
-*_beep files as described in .../Documentation/hwmon/sysfs-interface.
+`*_beep` files as described in `Documentation/hwmon/sysfs-interface.rst`.
A similar change has occurred for the bitmap corresponding to the alarms. The
original legacy method used a single sysfs alarms file containing a bitmap
-of triggered alarms. The newer method uses multiple sysfs *_alarm files
+of triggered alarms. The newer method uses multiple sysfs `*_alarm` files
(again following the pattern described in sysfs-interface).
Since both methods read and write the underlying hardware, they can be used
@@ -116,46 +127,54 @@ User mode code requesting values more often will receive cached values.
The sysfs-interface is documented in the 'sysfs-interface' file. Only
chip-specific options are documented here.
-pwm[1-3]_enable - this file controls mode of fan/temperature control for
+======================= =======================================================
+pwm[1-3]_enable this file controls mode of fan/temperature control for
fan 1-3. Fan/PWM 4-5 only support manual mode.
- * 1 Manual mode
- * 2 Thermal Cruise mode
- * 3 Fan Speed Cruise mode (no further support)
-temp[1-3]_target - defines the target temperature for Thermal Cruise mode.
+ * 1 Manual mode
+ * 2 Thermal Cruise mode
+ * 3 Fan Speed Cruise mode (no further support)
+
+temp[1-3]_target defines the target temperature for Thermal Cruise mode.
Unit: millidegree Celsius
RW
-temp[1-3]_tolerance - temperature tolerance for Thermal Cruise mode.
+temp[1-3]_tolerance temperature tolerance for Thermal Cruise mode.
Specifies an interval around the target temperature
in which the fan speed is not changed.
Unit: millidegree Celsius
RW
+======================= =======================================================
Alarms bitmap vs. beep_mask bitmask
-------------------------------------
+-----------------------------------
+
For legacy code using the alarms and beep_mask files:
-in0 (VCORE) : alarms: 0x000001 beep_mask: 0x000001
-in1 (VINR0) : alarms: 0x000002 beep_mask: 0x002000 <== mismatch
-in2 (+3.3VIN): alarms: 0x000004 beep_mask: 0x000004
-in3 (5VDD) : alarms: 0x000008 beep_mask: 0x000008
-in4 (+12VIN) : alarms: 0x000100 beep_mask: 0x000100
-in5 (-12VIN) : alarms: 0x000200 beep_mask: 0x000200
-in6 (-5VIN) : alarms: 0x000400 beep_mask: 0x000400
-in7 (VSB) : alarms: 0x080000 beep_mask: 0x010000 <== mismatch
-in8 (VBAT) : alarms: 0x100000 beep_mask: 0x020000 <== mismatch
-in9 (VINR1) : alarms: 0x004000 beep_mask: 0x004000
-temp1 : alarms: 0x000010 beep_mask: 0x000010
-temp2 : alarms: 0x000020 beep_mask: 0x000020
-temp3 : alarms: 0x002000 beep_mask: 0x000002 <== mismatch
-fan1 : alarms: 0x000040 beep_mask: 0x000040
-fan2 : alarms: 0x000080 beep_mask: 0x000080
-fan3 : alarms: 0x000800 beep_mask: 0x000800
-fan4 : alarms: 0x200000 beep_mask: 0x200000
-fan5 : alarms: 0x400000 beep_mask: 0x400000
-tart1 : alarms: 0x010000 beep_mask: 0x040000 <== mismatch
-tart2 : alarms: 0x020000 beep_mask: 0x080000 <== mismatch
-tart3 : alarms: 0x040000 beep_mask: 0x100000 <== mismatch
-case_open : alarms: 0x001000 beep_mask: 0x001000
-global_enable: alarms: -------- beep_mask: 0x800000 (modified via beep_enable)
+============= ======== ========= ==========================
+Signal Alarms beep_mask Obs
+============= ======== ========= ==========================
+in0 (VCORE) 0x000001 0x000001
+in1 (VINR0) 0x000002 0x002000 <== mismatch
+in2 (+3.3VIN) 0x000004 0x000004
+in3 (5VDD) 0x000008 0x000008
+in4 (+12VIN) 0x000100 0x000100
+in5 (-12VIN) 0x000200 0x000200
+in6 (-5VIN) 0x000400 0x000400
+in7 (VSB) 0x080000 0x010000 <== mismatch
+in8 (VBAT) 0x100000 0x020000 <== mismatch
+in9 (VINR1) 0x004000 0x004000
+temp1 0x000010 0x000010
+temp2 0x000020 0x000020
+temp3 0x002000 0x000002 <== mismatch
+fan1 0x000040 0x000040
+fan2 0x000080 0x000080
+fan3 0x000800 0x000800
+fan4 0x200000 0x200000
+fan5 0x400000 0x400000
+tart1 0x010000 0x040000 <== mismatch
+tart2 0x020000 0x080000 <== mismatch
+tart3 0x040000 0x100000 <== mismatch
+case_open 0x001000 0x001000
+global_enable - 0x800000 (modified via beep_enable)
+============= ======== ========= ==========================
diff --git a/Documentation/hwmon/w83792d b/Documentation/hwmon/w83792d.rst
index f2ffc402ea45..92c4bfe4968c 100644
--- a/Documentation/hwmon/w83792d
+++ b/Documentation/hwmon/w83792d.rst
@@ -2,9 +2,13 @@ Kernel driver w83792d
=====================
Supported chips:
+
* Winbond W83792D
+
Prefix: 'w83792d'
+
Addresses scanned: I2C 0x2c - 0x2f
+
Datasheet: http://www.winbond.com.tw
Author: Shane Huang (Winbond)
@@ -15,15 +19,16 @@ Module Parameters
-----------------
* init int
- (default 1)
- Use 'init=0' to bypass initializing the chip.
- Try this if your computer crashes when you load the module.
+ (default 1)
+
+ Use 'init=0' to bypass initializing the chip.
+ Try this if your computer crashes when you load the module.
* force_subclients=bus,caddr,saddr,saddr
- This is used to force the i2c addresses for subclients of
- a certain chip. Example usage is `force_subclients=0,0x2f,0x4a,0x4b'
- to force the subclients of chip 0x2f on bus 0 to i2c addresses
- 0x4a and 0x4b.
+ This is used to force the i2c addresses for subclients of
+ a certain chip. Example usage is `force_subclients=0,0x2f,0x4a,0x4b`
+ to force the subclients of chip 0x2f on bus 0 to i2c addresses
+ 0x4a and 0x4b.
Description
@@ -67,31 +72,34 @@ or maximum limit.
Alarms are provided as output from "realtime status register". Following bits
are defined:
-bit - alarm on:
-0 - in0
-1 - in1
-2 - temp1
-3 - temp2
-4 - temp3
-5 - fan1
-6 - fan2
-7 - fan3
-8 - in2
-9 - in3
-10 - in4
-11 - in5
-12 - in6
-13 - VID change
-14 - chassis
-15 - fan7
-16 - tart1
-17 - tart2
-18 - tart3
-19 - in7
-20 - in8
-21 - fan4
-22 - fan5
-23 - fan6
+==== ==========
+bit alarm on
+==== ==========
+0 in0
+1 in1
+2 temp1
+3 temp2
+4 temp3
+5 fan1
+6 fan2
+7 fan3
+8 in2
+9 in3
+10 in4
+11 in5
+12 in6
+13 VID change
+14 chassis
+15 fan7
+16 tart1
+17 tart2
+18 tart3
+19 in7
+20 in8
+21 fan4
+22 fan5
+23 fan6
+==== ==========
Tart will be asserted while target temperature cannot be achieved after 3 minutes
of full speed rotation of corresponding fan.
@@ -114,7 +122,7 @@ Known problems:
by CR[0x49h].
- The function of vid and vrm has not been finished, because I'm NOT
very familiar with them. Adding support is welcome.
-  - The function of chassis open detection needs more tests.
+ - The function of chassis open detection needs more tests.
- If you have ASUS server board and chip was not found: Then you will
need to upgrade to latest (or beta) BIOS. If it does not help please
contact us.
@@ -165,17 +173,27 @@ for each fan.
/sys files
----------
-pwm[1-7] - this file stores PWM duty cycle or DC value (fan speed) in range:
- 0 (stop) to 255 (full)
-pwm[1-3]_enable - this file controls mode of fan/temperature control:
- * 0 Disabled
- * 1 Manual mode
- * 2 Smart Fan II
- * 3 Thermal Cruise
-pwm[1-7]_mode - Select PWM or DC mode
- * 0 DC
- * 1 PWM
-thermal_cruise[1-3] - Selects the desired temperature for cruise (degC)
-tolerance[1-3] - Value in degrees of Celsius (degC) for +- T
-sf2_point[1-4]_fan[1-3] - four temperature points for each fan for Smart Fan II
-sf2_level[1-3]_fan[1-3] - three PWM/DC levels for each fan for Smart Fan II
+pwm[1-7]
+ - this file stores PWM duty cycle or DC value (fan speed) in range:
+
+ 0 (stop) to 255 (full)
+pwm[1-3]_enable
+ - this file controls mode of fan/temperature control:
+
+ * 0 Disabled
+ * 1 Manual mode
+ * 2 Smart Fan II
+ * 3 Thermal Cruise
+pwm[1-7]_mode
+ - Select PWM or DC mode
+
+ * 0 DC
+ * 1 PWM
+thermal_cruise[1-3]
+ - Selects the desired temperature for cruise (degC)
+tolerance[1-3]
+ - Value in degrees of Celsius (degC) for +- T
+sf2_point[1-4]_fan[1-3]
+ - four temperature points for each fan for Smart Fan II
+sf2_level[1-3]_fan[1-3]
+ - three PWM/DC levels for each fan for Smart Fan II
diff --git a/Documentation/hwmon/w83793 b/Documentation/hwmon/w83793
deleted file mode 100644
index 6cc5f639b721..000000000000
--- a/Documentation/hwmon/w83793
+++ /dev/null
@@ -1,106 +0,0 @@
-Kernel driver w83793
-====================
-
-Supported chips:
- * Winbond W83793G/W83793R
- Prefix: 'w83793'
- Addresses scanned: I2C 0x2c - 0x2f
- Datasheet: Still not published
-
-Authors:
- Yuan Mu (Winbond Electronics)
- Rudolf Marek <r.marek@assembler.cz>
-
-
-Module parameters
------------------
-
-* reset int
- (default 0)
- This parameter is not recommended, it will lose motherboard specific
- settings. Use 'reset=1' to reset the chip when loading this module.
-
-* force_subclients=bus,caddr,saddr1,saddr2
- This is used to force the i2c addresses for subclients of
- a certain chip. Typical usage is `force_subclients=0,0x2f,0x4a,0x4b'
- to force the subclients of chip 0x2f on bus 0 to i2c addresses
- 0x4a and 0x4b.
-
-
-Description
------------
-
-This driver implements support for Winbond W83793G/W83793R chips.
-
-* Exported features
- This driver exports 10 voltage sensors, up to 12 fan tachometer inputs,
- 6 remote temperatures, up to 8 sets of PWM fan controls, SmartFan
- (automatic fan speed control) on all temperature/PWM combinations, 2
- sets of 6-pin CPU VID input.
-
-* Sensor resolutions
- If your motherboard maker used the reference design, the resolution of
- voltage0-2 is 2mV, resolution of voltage3/4/5 is 16mV, 8mV for voltage6,
- 24mV for voltage7/8. Temp1-4 have a 0.25 degree Celsius resolution,
- temp5-6 have a 1 degree Celsiis resolution.
-
-* Temperature sensor types
- Temp1-4 have 2 possible types. It can be read from (and written to)
- temp[1-4]_type.
- - If the value is 3, it starts monitoring using a remote termal diode
- (default).
- - If the value is 6, it starts monitoring using the temperature sensor
- in Intel CPU and get result by PECI.
- Temp5-6 can be connected to external thermistors (value of
- temp[5-6]_type is 4).
-
-* Alarm mechanism
- For voltage sensors, an alarm triggers if the measured value is below
- the low voltage limit or over the high voltage limit.
- For temperature sensors, an alarm triggers if the measured value goes
- above the high temperature limit, and wears off only after the measured
- value drops below the hysteresis value.
- For fan sensors, an alarm triggers if the measured value is below the
- low speed limit.
-
-* SmartFan/PWM control
- If you want to set a pwm fan to manual mode, you just need to make sure it
- is not controlled by any temp channel, for example, you want to set fan1
- to manual mode, you need to check the value of temp[1-6]_fan_map, make
- sure bit 0 is cleared in the 6 values. And then set the pwm1 value to
- control the fan.
-
- Each temperature channel can control all the 8 PWM outputs (by setting the
- corresponding bit in tempX_fan_map), you can set the temperature channel
- mode using temp[1-6]_pwm_enable, 2 is Thermal Cruise mode and 3
- is the SmartFanII mode. Temperature channels will try to speed up or
- slow down all controlled fans, this means one fan can receive different
- PWM value requests from different temperature channels, but the chip
- will always pick the safest (max) PWM value for each fan.
-
- In Thermal Cruise mode, the chip attempts to keep the temperature at a
- predefined value, within a tolerance margin. So if tempX_input >
- thermal_cruiseX + toleranceX, the chip will increase the PWM value,
- if tempX_input < thermal_cruiseX - toleranceX, the chip will decrease
- the PWM value. If the temperature is within the tolerance range, the PWM
- value is left unchanged.
-
- SmartFanII works differently, you have to define up to 7 PWM, temperature
- trip points, defining a PWM/temperature curve which the chip will follow.
- While not fundamentally different from the Thermal Cruise mode, the
- implementation is quite different, giving you a finer-grained control.
-
-* Chassis
- If the case open alarm triggers, it will stay in this state unless cleared
- by writing 0 to the sysfs file "intrusion0_alarm".
-
-* VID and VRM
- The VRM version is detected automatically, don't modify the it unless you
- *do* know the cpu VRM version and it's not properly detected.
-
-
-Notes
------
-
- Only Fan1-5 and PWM1-3 are guaranteed to always exist, other fan inputs and
- PWM outputs may or may not exist depending on the chip pin configuration.
diff --git a/Documentation/hwmon/w83793.rst b/Documentation/hwmon/w83793.rst
new file mode 100644
index 000000000000..83bb40c48645
--- /dev/null
+++ b/Documentation/hwmon/w83793.rst
@@ -0,0 +1,113 @@
+Kernel driver w83793
+====================
+
+Supported chips:
+
+ * Winbond W83793G/W83793R
+
+ Prefix: 'w83793'
+
+ Addresses scanned: I2C 0x2c - 0x2f
+
+ Datasheet: Still not published
+
+Authors:
+ - Yuan Mu (Winbond Electronics)
+ - Rudolf Marek <r.marek@assembler.cz>
+
+
+Module parameters
+-----------------
+
+* reset int
+ (default 0)
+
+ This parameter is not recommended, it will lose motherboard specific
+ settings. Use 'reset=1' to reset the chip when loading this module.
+
+* force_subclients=bus,caddr,saddr1,saddr2
+ This is used to force the i2c addresses for subclients of
+ a certain chip. Typical usage is `force_subclients=0,0x2f,0x4a,0x4b`
+ to force the subclients of chip 0x2f on bus 0 to i2c addresses
+ 0x4a and 0x4b.
+
+
+Description
+-----------
+
+This driver implements support for Winbond W83793G/W83793R chips.
+
+* Exported features
+ This driver exports 10 voltage sensors, up to 12 fan tachometer inputs,
+ 6 remote temperatures, up to 8 sets of PWM fan controls, SmartFan
+ (automatic fan speed control) on all temperature/PWM combinations, 2
+ sets of 6-pin CPU VID input.
+
+* Sensor resolutions
+ If your motherboard maker used the reference design, the resolution of
+ voltage0-2 is 2mV, resolution of voltage3/4/5 is 16mV, 8mV for voltage6,
+ 24mV for voltage7/8. Temp1-4 have a 0.25 degree Celsius resolution,
+ temp5-6 have a 1 degree Celsiis resolution.
+
+* Temperature sensor types
+ Temp1-4 have 2 possible types. It can be read from (and written to)
+ temp[1-4]_type.
+
+ - If the value is 3, it starts monitoring using a remote termal diode
+ (default).
+ - If the value is 6, it starts monitoring using the temperature sensor
+ in Intel CPU and get result by PECI.
+
+ Temp5-6 can be connected to external thermistors (value of
+ temp[5-6]_type is 4).
+
+* Alarm mechanism
+ For voltage sensors, an alarm triggers if the measured value is below
+ the low voltage limit or over the high voltage limit.
+ For temperature sensors, an alarm triggers if the measured value goes
+ above the high temperature limit, and wears off only after the measured
+ value drops below the hysteresis value.
+ For fan sensors, an alarm triggers if the measured value is below the
+ low speed limit.
+
+* SmartFan/PWM control
+ If you want to set a pwm fan to manual mode, you just need to make sure it
+ is not controlled by any temp channel, for example, you want to set fan1
+ to manual mode, you need to check the value of temp[1-6]_fan_map, make
+ sure bit 0 is cleared in the 6 values. And then set the pwm1 value to
+ control the fan.
+
+ Each temperature channel can control all the 8 PWM outputs (by setting the
+ corresponding bit in tempX_fan_map), you can set the temperature channel
+ mode using temp[1-6]_pwm_enable, 2 is Thermal Cruise mode and 3
+ is the SmartFanII mode. Temperature channels will try to speed up or
+ slow down all controlled fans, this means one fan can receive different
+ PWM value requests from different temperature channels, but the chip
+ will always pick the safest (max) PWM value for each fan.
+
+ In Thermal Cruise mode, the chip attempts to keep the temperature at a
+ predefined value, within a tolerance margin. So if tempX_input >
+ thermal_cruiseX + toleranceX, the chip will increase the PWM value,
+ if tempX_input < thermal_cruiseX - toleranceX, the chip will decrease
+ the PWM value. If the temperature is within the tolerance range, the PWM
+ value is left unchanged.
+
+ SmartFanII works differently, you have to define up to 7 PWM, temperature
+ trip points, defining a PWM/temperature curve which the chip will follow.
+ While not fundamentally different from the Thermal Cruise mode, the
+ implementation is quite different, giving you a finer-grained control.
+
+* Chassis
+ If the case open alarm triggers, it will stay in this state unless cleared
+ by writing 0 to the sysfs file "intrusion0_alarm".
+
+* VID and VRM
+ The VRM version is detected automatically, don't modify the it unless you
+ *do* know the cpu VRM version and it's not properly detected.
+
+
+Notes
+-----
+
+ Only Fan1-5 and PWM1-3 are guaranteed to always exist, other fan inputs and
+ PWM outputs may or may not exist depending on the chip pin configuration.
diff --git a/Documentation/hwmon/w83795 b/Documentation/hwmon/w83795
deleted file mode 100644
index d3e678216b9a..000000000000
--- a/Documentation/hwmon/w83795
+++ /dev/null
@@ -1,127 +0,0 @@
-Kernel driver w83795
-====================
-
-Supported chips:
- * Winbond/Nuvoton W83795G
- Prefix: 'w83795g'
- Addresses scanned: I2C 0x2c - 0x2f
- Datasheet: Available for download on nuvoton.com
- * Winbond/Nuvoton W83795ADG
- Prefix: 'w83795adg'
- Addresses scanned: I2C 0x2c - 0x2f
- Datasheet: Available for download on nuvoton.com
-
-Authors:
- Wei Song (Nuvoton)
- Jean Delvare <jdelvare@suse.de>
-
-
-Pin mapping
------------
-
-Here is a summary of the pin mapping for the W83795G and W83795ADG.
-This can be useful to convert data provided by board manufacturers
-into working libsensors configuration statements.
-
- W83795G |
- Pin | Name | Register | Sysfs attribute
-------------------------------------------------------------------
- 13 | VSEN1 (VCORE1) | 10h | in0
- 14 | VSEN2 (VCORE2) | 11h | in1
- 15 | VSEN3 (VCORE3) | 12h | in2
- 16 | VSEN4 | 13h | in3
- 17 | VSEN5 | 14h | in4
- 18 | VSEN6 | 15h | in5
- 19 | VSEN7 | 16h | in6
- 20 | VSEN8 | 17h | in7
- 21 | VSEN9 | 18h | in8
- 22 | VSEN10 | 19h | in9
- 23 | VSEN11 | 1Ah | in10
- 28 | VTT | 1Bh | in11
- 24 | 3VDD | 1Ch | in12
- 25 | 3VSB | 1Dh | in13
- 26 | VBAT | 1Eh | in14
- 3 | VSEN12/TR5 | 1Fh | in15/temp5
- 4 | VSEN13/TR5 | 20h | in16/temp6
- 5/ 6 | VDSEN14/TR1/TD1 | 21h | in17/temp1
- 7/ 8 | VDSEN15/TR2/TD2 | 22h | in18/temp2
- 9/ 10 | VDSEN16/TR3/TD3 | 23h | in19/temp3
- 11/ 12 | VDSEN17/TR4/TD4 | 24h | in20/temp4
- 40 | FANIN1 | 2Eh | fan1
- 42 | FANIN2 | 2Fh | fan2
- 44 | FANIN3 | 30h | fan3
- 46 | FANIN4 | 31h | fan4
- 48 | FANIN5 | 32h | fan5
- 50 | FANIN6 | 33h | fan6
- 52 | FANIN7 | 34h | fan7
- 54 | FANIN8 | 35h | fan8
- 57 | FANIN9 | 36h | fan9
- 58 | FANIN10 | 37h | fan10
- 59 | FANIN11 | 38h | fan11
- 60 | FANIN12 | 39h | fan12
- 31 | FANIN13 | 3Ah | fan13
- 35 | FANIN14 | 3Bh | fan14
- 41 | FANCTL1 | 10h (bank 2) | pwm1
- 43 | FANCTL2 | 11h (bank 2) | pwm2
- 45 | FANCTL3 | 12h (bank 2) | pwm3
- 47 | FANCTL4 | 13h (bank 2) | pwm4
- 49 | FANCTL5 | 14h (bank 2) | pwm5
- 51 | FANCTL6 | 15h (bank 2) | pwm6
- 53 | FANCTL7 | 16h (bank 2) | pwm7
- 55 | FANCTL8 | 17h (bank 2) | pwm8
- 29/ 30 | PECI/TSI (DTS1) | 26h | temp7
- 29/ 30 | PECI/TSI (DTS2) | 27h | temp8
- 29/ 30 | PECI/TSI (DTS3) | 28h | temp9
- 29/ 30 | PECI/TSI (DTS4) | 29h | temp10
- 29/ 30 | PECI/TSI (DTS5) | 2Ah | temp11
- 29/ 30 | PECI/TSI (DTS6) | 2Bh | temp12
- 29/ 30 | PECI/TSI (DTS7) | 2Ch | temp13
- 29/ 30 | PECI/TSI (DTS8) | 2Dh | temp14
- 27 | CASEOPEN# | 46h | intrusion0
-
- W83795ADG |
- Pin | Name | Register | Sysfs attribute
-------------------------------------------------------------------
- 10 | VSEN1 (VCORE1) | 10h | in0
- 11 | VSEN2 (VCORE2) | 11h | in1
- 12 | VSEN3 (VCORE3) | 12h | in2
- 13 | VSEN4 | 13h | in3
- 14 | VSEN5 | 14h | in4
- 15 | VSEN6 | 15h | in5
- 16 | VSEN7 | 16h | in6
- 17 | VSEN8 | 17h | in7
- 22 | VTT | 1Bh | in11
- 18 | 3VDD | 1Ch | in12
- 19 | 3VSB | 1Dh | in13
- 20 | VBAT | 1Eh | in14
- 48 | VSEN12/TR5 | 1Fh | in15/temp5
- 1 | VSEN13/TR5 | 20h | in16/temp6
- 2/ 3 | VDSEN14/TR1/TD1 | 21h | in17/temp1
- 4/ 5 | VDSEN15/TR2/TD2 | 22h | in18/temp2
- 6/ 7 | VDSEN16/TR3/TD3 | 23h | in19/temp3
- 8/ 9 | VDSEN17/TR4/TD4 | 24h | in20/temp4
- 32 | FANIN1 | 2Eh | fan1
- 34 | FANIN2 | 2Fh | fan2
- 36 | FANIN3 | 30h | fan3
- 37 | FANIN4 | 31h | fan4
- 38 | FANIN5 | 32h | fan5
- 39 | FANIN6 | 33h | fan6
- 40 | FANIN7 | 34h | fan7
- 41 | FANIN8 | 35h | fan8
- 43 | FANIN9 | 36h | fan9
- 44 | FANIN10 | 37h | fan10
- 45 | FANIN11 | 38h | fan11
- 46 | FANIN12 | 39h | fan12
- 24 | FANIN13 | 3Ah | fan13
- 28 | FANIN14 | 3Bh | fan14
- 33 | FANCTL1 | 10h (bank 2) | pwm1
- 35 | FANCTL2 | 11h (bank 2) | pwm2
- 23 | PECI (DTS1) | 26h | temp7
- 23 | PECI (DTS2) | 27h | temp8
- 23 | PECI (DTS3) | 28h | temp9
- 23 | PECI (DTS4) | 29h | temp10
- 23 | PECI (DTS5) | 2Ah | temp11
- 23 | PECI (DTS6) | 2Bh | temp12
- 23 | PECI (DTS7) | 2Ch | temp13
- 23 | PECI (DTS8) | 2Dh | temp14
- 21 | CASEOPEN# | 46h | intrusion0
diff --git a/Documentation/hwmon/w83795.rst b/Documentation/hwmon/w83795.rst
new file mode 100644
index 000000000000..d0615e2fabb9
--- /dev/null
+++ b/Documentation/hwmon/w83795.rst
@@ -0,0 +1,142 @@
+Kernel driver w83795
+====================
+
+Supported chips:
+
+ * Winbond/Nuvoton W83795G
+
+ Prefix: 'w83795g'
+
+ Addresses scanned: I2C 0x2c - 0x2f
+
+ Datasheet: Available for download on nuvoton.com
+
+ * Winbond/Nuvoton W83795ADG
+
+ Prefix: 'w83795adg'
+
+ Addresses scanned: I2C 0x2c - 0x2f
+
+ Datasheet: Available for download on nuvoton.com
+
+Authors:
+ - Wei Song (Nuvoton)
+ - Jean Delvare <jdelvare@suse.de>
+
+
+Pin mapping
+-----------
+
+Here is a summary of the pin mapping for the W83795G and W83795ADG.
+This can be useful to convert data provided by board manufacturers
+into working libsensors configuration statements.
+
+
+- W83795G
+
+========= ======================= =============== ================
+Pin Name Register Sysfs attribute
+========= ======================= =============== ================
+ 13 VSEN1 (VCORE1) 10h in0
+ 14 VSEN2 (VCORE2) 11h in1
+ 15 VSEN3 (VCORE3) 12h in2
+ 16 VSEN4 13h in3
+ 17 VSEN5 14h in4
+ 18 VSEN6 15h in5
+ 19 VSEN7 16h in6
+ 20 VSEN8 17h in7
+ 21 VSEN9 18h in8
+ 22 VSEN10 19h in9
+ 23 VSEN11 1Ah in10
+ 28 VTT 1Bh in11
+ 24 3VDD 1Ch in12
+ 25 3VSB 1Dh in13
+ 26 VBAT 1Eh in14
+ 3 VSEN12/TR5 1Fh in15/temp5
+ 4 VSEN13/TR5 20h in16/temp6
+ 5/ 6 VDSEN14/TR1/TD1 21h in17/temp1
+ 7/ 8 VDSEN15/TR2/TD2 22h in18/temp2
+ 9/ 10 VDSEN16/TR3/TD3 23h in19/temp3
+ 11/ 12 VDSEN17/TR4/TD4 24h in20/temp4
+ 40 FANIN1 2Eh fan1
+ 42 FANIN2 2Fh fan2
+ 44 FANIN3 30h fan3
+ 46 FANIN4 31h fan4
+ 48 FANIN5 32h fan5
+ 50 FANIN6 33h fan6
+ 52 FANIN7 34h fan7
+ 54 FANIN8 35h fan8
+ 57 FANIN9 36h fan9
+ 58 FANIN10 37h fan10
+ 59 FANIN11 38h fan11
+ 60 FANIN12 39h fan12
+ 31 FANIN13 3Ah fan13
+ 35 FANIN14 3Bh fan14
+ 41 FANCTL1 10h (bank 2) pwm1
+ 43 FANCTL2 11h (bank 2) pwm2
+ 45 FANCTL3 12h (bank 2) pwm3
+ 47 FANCTL4 13h (bank 2) pwm4
+ 49 FANCTL5 14h (bank 2) pwm5
+ 51 FANCTL6 15h (bank 2) pwm6
+ 53 FANCTL7 16h (bank 2) pwm7
+ 55 FANCTL8 17h (bank 2) pwm8
+ 29/ 30 PECI/TSI (DTS1) 26h temp7
+ 29/ 30 PECI/TSI (DTS2) 27h temp8
+ 29/ 30 PECI/TSI (DTS3) 28h temp9
+ 29/ 30 PECI/TSI (DTS4) 29h temp10
+ 29/ 30 PECI/TSI (DTS5) 2Ah temp11
+ 29/ 30 PECI/TSI (DTS6) 2Bh temp12
+ 29/ 30 PECI/TSI (DTS7) 2Ch temp13
+ 29/ 30 PECI/TSI (DTS8) 2Dh temp14
+ 27 CASEOPEN# 46h intrusion0
+========= ======================= =============== ================
+
+- W83795ADG
+
+========= ======================= =============== ================
+Pin Name Register Sysfs attribute
+========= ======================= =============== ================
+ 10 VSEN1 (VCORE1) 10h in0
+ 11 VSEN2 (VCORE2) 11h in1
+ 12 VSEN3 (VCORE3) 12h in2
+ 13 VSEN4 13h in3
+ 14 VSEN5 14h in4
+ 15 VSEN6 15h in5
+ 16 VSEN7 16h in6
+ 17 VSEN8 17h in7
+ 22 VTT 1Bh in11
+ 18 3VDD 1Ch in12
+ 19 3VSB 1Dh in13
+ 20 VBAT 1Eh in14
+ 48 VSEN12/TR5 1Fh in15/temp5
+ 1 VSEN13/TR5 20h in16/temp6
+ 2/ 3 VDSEN14/TR1/TD1 21h in17/temp1
+ 4/ 5 VDSEN15/TR2/TD2 22h in18/temp2
+ 6/ 7 VDSEN16/TR3/TD3 23h in19/temp3
+ 8/ 9 VDSEN17/TR4/TD4 24h in20/temp4
+ 32 FANIN1 2Eh fan1
+ 34 FANIN2 2Fh fan2
+ 36 FANIN3 30h fan3
+ 37 FANIN4 31h fan4
+ 38 FANIN5 32h fan5
+ 39 FANIN6 33h fan6
+ 40 FANIN7 34h fan7
+ 41 FANIN8 35h fan8
+ 43 FANIN9 36h fan9
+ 44 FANIN10 37h fan10
+ 45 FANIN11 38h fan11
+ 46 FANIN12 39h fan12
+ 24 FANIN13 3Ah fan13
+ 28 FANIN14 3Bh fan14
+ 33 FANCTL1 10h (bank 2) pwm1
+ 35 FANCTL2 11h (bank 2) pwm2
+ 23 PECI (DTS1) 26h temp7
+ 23 PECI (DTS2) 27h temp8
+ 23 PECI (DTS3) 28h temp9
+ 23 PECI (DTS4) 29h temp10
+ 23 PECI (DTS5) 2Ah temp11
+ 23 PECI (DTS6) 2Bh temp12
+ 23 PECI (DTS7) 2Ch temp13
+ 23 PECI (DTS8) 2Dh temp14
+ 21 CASEOPEN# 46h intrusion0
+========= ======================= =============== ================
diff --git a/Documentation/hwmon/w83l785ts b/Documentation/hwmon/w83l785ts.rst
index c8978478871f..7fa5418fed11 100644
--- a/Documentation/hwmon/w83l785ts
+++ b/Documentation/hwmon/w83l785ts.rst
@@ -2,14 +2,19 @@ Kernel driver w83l785ts
=======================
Supported chips:
+
* Winbond W83L785TS-S
+
Prefix: 'w83l785ts'
+
Addresses scanned: I2C 0x2e
+
Datasheet: Publicly available at the Winbond USA website
- http://www.winbond-usa.com/products/winbond_products/pdfs/PCIC/W83L785TS-S.pdf
+
+ http://www.winbond-usa.com/products/winbond_products/pdfs/PCIC/W83L785TS-S.pdf
Authors:
- Jean Delvare <jdelvare@suse.de>
+ Jean Delvare <jdelvare@suse.de>
Description
-----------
diff --git a/Documentation/hwmon/w83l786ng b/Documentation/hwmon/w83l786ng.rst
index d8f55d7fff10..2b7776190de3 100644
--- a/Documentation/hwmon/w83l786ng
+++ b/Documentation/hwmon/w83l786ng.rst
@@ -1,10 +1,14 @@
Kernel driver w83l786ng
-=====================
+=======================
Supported chips:
+
* Winbond W83L786NG/W83L786NR
+
Prefix: 'w83l786ng'
+
Addresses scanned: I2C 0x2e - 0x2f
+
Datasheet: http://www.winbond-usa.com/products/winbond_products/pdfs/PCIC/W83L786NRNG09.pdf
Author: Kevin Lo <kevlo@kevlo.org>
@@ -14,9 +18,10 @@ Module Parameters
-----------------
* reset boolean
- (default 0)
- Use 'reset=1' to reset the chip (via index 0x40, bit 7). The default
- behavior is no chip reset to preserve BIOS settings
+ (default 0)
+
+ Use 'reset=1' to reset the chip (via index 0x40, bit 7). The default
+ behavior is no chip reset to preserve BIOS settings
Description
@@ -41,14 +46,21 @@ or maximum limit.
/sys files
----------
-pwm[1-2] - this file stores PWM duty cycle or DC value (fan speed) in range:
- 0 (stop) to 255 (full)
-pwm[1-2]_enable - this file controls mode of fan/temperature control:
- * 0 Manual Mode
- * 1 Thermal Cruise
- * 2 Smart Fan II
- * 4 FAN_SET
-pwm[1-2]_mode - Select PWM of DC mode
- * 0 DC
- * 1 PWM
-tolerance[1-2] - Value in degrees of Celsius (degC) for +- T
+pwm[1-2]
+ - this file stores PWM duty cycle or DC value (fan speed) in range:
+
+ 0 (stop) to 255 (full)
+pwm[1-2]_enable
+ - this file controls mode of fan/temperature control:
+
+ * 0 Manual Mode
+ * 1 Thermal Cruise
+ * 2 Smart Fan II
+ * 4 FAN_SET
+pwm[1-2]_mode
+ - Select PWM of DC mode
+
+ * 0 DC
+ * 1 PWM
+tolerance[1-2]
+ - Value in degrees of Celsius (degC) for +- T
diff --git a/Documentation/hwmon/wm831x b/Documentation/hwmon/wm831x.rst
index 11446757c8c8..c56fb35a2fb3 100644
--- a/Documentation/hwmon/wm831x
+++ b/Documentation/hwmon/wm831x.rst
@@ -3,11 +3,14 @@ Kernel driver wm831x-hwmon
Supported chips:
* Wolfson Microelectronics WM831x PMICs
+
Prefix: 'wm831x'
+
Datasheet:
- http://www.wolfsonmicro.com/products/WM8310
- http://www.wolfsonmicro.com/products/WM8311
- http://www.wolfsonmicro.com/products/WM8312
+
+ - http://www.wolfsonmicro.com/products/WM8310
+ - http://www.wolfsonmicro.com/products/WM8311
+ - http://www.wolfsonmicro.com/products/WM8312
Authors: Mark Brown <broonie@opensource.wolfsonmicro.com>
diff --git a/Documentation/hwmon/wm8350 b/Documentation/hwmon/wm8350.rst
index 98f923bd2e92..cec044ca5900 100644
--- a/Documentation/hwmon/wm8350
+++ b/Documentation/hwmon/wm8350.rst
@@ -2,12 +2,16 @@ Kernel driver wm8350-hwmon
==========================
Supported chips:
+
* Wolfson Microelectronics WM835x PMICs
+
Prefix: 'wm8350'
+
Datasheet:
- http://www.wolfsonmicro.com/products/WM8350
- http://www.wolfsonmicro.com/products/WM8351
- http://www.wolfsonmicro.com/products/WM8352
+
+ - http://www.wolfsonmicro.com/products/WM8350
+ - http://www.wolfsonmicro.com/products/WM8351
+ - http://www.wolfsonmicro.com/products/WM8352
Authors: Mark Brown <broonie@opensource.wolfsonmicro.com>
diff --git a/Documentation/hwmon/xgene-hwmon b/Documentation/hwmon/xgene-hwmon.rst
index 6ec50ed7cc8f..439b30b881b6 100644
--- a/Documentation/hwmon/xgene-hwmon
+++ b/Documentation/hwmon/xgene-hwmon.rst
@@ -1,7 +1,8 @@
Kernel driver xgene-hwmon
-========================
+=========================
Supported chips:
+
* APM X-Gene SoC
Description
@@ -15,16 +16,21 @@ For ACPI, it is the PCC mailbox.
The following sensors are supported
* Temperature
- - SoC on-die temperature in milli-degree C
- - Alarm when high/over temperature occurs
+ - SoC on-die temperature in milli-degree C
+ - Alarm when high/over temperature occurs
+
* Power
- - CPU power in uW
- - IO power in uW
+ - CPU power in uW
+ - IO power in uW
sysfs-Interface
---------------
-temp0_input - SoC on-die temperature (milli-degree C)
-temp0_critical_alarm - An 1 would indicates on-die temperature exceeded threshold
-power0_input - CPU power in (uW)
-power1_input - IO power in (uW)
+temp0_input
+ - SoC on-die temperature (milli-degree C)
+temp0_critical_alarm
+ - An 1 would indicates on-die temperature exceeded threshold
+power0_input
+ - CPU power in (uW)
+power1_input
+ - IO power in (uW)
diff --git a/Documentation/hwmon/zl6100 b/Documentation/hwmon/zl6100.rst
index 477a94b131ae..41513bb7fe51 100644
--- a/Documentation/hwmon/zl6100
+++ b/Documentation/hwmon/zl6100.rst
@@ -2,57 +2,106 @@ Kernel driver zl6100
====================
Supported chips:
+
* Intersil / Zilker Labs ZL2004
+
Prefix: 'zl2004'
+
Addresses scanned: -
+
Datasheet: http://www.intersil.com/data/fn/fn6847.pdf
+
* Intersil / Zilker Labs ZL2005
+
Prefix: 'zl2005'
+
Addresses scanned: -
+
Datasheet: http://www.intersil.com/data/fn/fn6848.pdf
+
* Intersil / Zilker Labs ZL2006
+
Prefix: 'zl2006'
+
Addresses scanned: -
+
Datasheet: http://www.intersil.com/data/fn/fn6850.pdf
+
* Intersil / Zilker Labs ZL2008
+
Prefix: 'zl2008'
+
Addresses scanned: -
+
Datasheet: http://www.intersil.com/data/fn/fn6859.pdf
+
* Intersil / Zilker Labs ZL2105
+
Prefix: 'zl2105'
+
Addresses scanned: -
+
Datasheet: http://www.intersil.com/data/fn/fn6851.pdf
+
* Intersil / Zilker Labs ZL2106
+
Prefix: 'zl2106'
+
Addresses scanned: -
+
Datasheet: http://www.intersil.com/data/fn/fn6852.pdf
+
* Intersil / Zilker Labs ZL6100
+
Prefix: 'zl6100'
+
Addresses scanned: -
+
Datasheet: http://www.intersil.com/data/fn/fn6876.pdf
+
* Intersil / Zilker Labs ZL6105
+
Prefix: 'zl6105'
+
Addresses scanned: -
+
Datasheet: http://www.intersil.com/data/fn/fn6906.pdf
+
* Intersil / Zilker Labs ZL9101M
+
Prefix: 'zl9101'
+
Addresses scanned: -
+
Datasheet: http://www.intersil.com/data/fn/fn7669.pdf
+
* Intersil / Zilker Labs ZL9117M
+
Prefix: 'zl9117'
+
Addresses scanned: -
+
Datasheet: http://www.intersil.com/data/fn/fn7914.pdf
+
* Ericsson BMR450, BMR451
+
Prefix: 'bmr450', 'bmr451'
+
Addresses scanned: -
+
Datasheet:
+
http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146401
+
* Ericsson BMR462, BMR463, BMR464
+
Prefixes: 'bmr462', 'bmr463', 'bmr464'
+
Addresses scanned: -
+
Datasheet:
-http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146256
+ http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146256
Author: Guenter Roeck <linux@roeck-us.net>
@@ -64,7 +113,7 @@ This driver supports hardware monitoring for Intersil / Zilker Labs ZL6100 and
compatible digital DC-DC controllers.
The driver is a client driver to the core PMBus driver. Please see
-Documentation/hwmon/pmbus and Documentation.hwmon/pmbus-core for details
+Documentation/hwmon/pmbus.rst and Documentation.hwmon/pmbus-core for details
on PMBus client drivers.
@@ -75,13 +124,15 @@ This driver does not auto-detect devices. You will have to instantiate the
devices explicitly. Please see Documentation/i2c/instantiating-devices for
details.
-WARNING: Do not access chip registers using the i2cdump command, and do not use
-any of the i2ctools commands on a command register used to save and restore
-configuration data (0x11, 0x12, 0x15, 0x16, and 0xf4). The chips supported by
-this driver interpret any access to those command registers (including read
-commands) as request to execute the command in question. Unless write accesses
-to those registers are protected, this may result in power loss, board resets,
-and/or Flash corruption. Worst case, your board may turn into a brick.
+.. warning::
+
+ Do not access chip registers using the i2cdump command, and do not use
+ any of the i2ctools commands on a command register used to save and restore
+ configuration data (0x11, 0x12, 0x15, 0x16, and 0xf4). The chips supported by
+ this driver interpret any access to those command registers (including read
+ commands) as request to execute the command in question. Unless write accesses
+ to those registers are protected, this may result in power loss, board resets,
+ and/or Flash corruption. Worst case, your board may turn into a brick.
Platform data support
@@ -110,6 +161,7 @@ Sysfs entries
The following attributes are supported. Limits are read-write; all other
attributes are read-only.
+======================= ========================================================
in1_label "vin"
in1_input Measured input voltage.
in1_min Minimum input voltage.
@@ -158,3 +210,4 @@ temp[12]_min_alarm Chip temperature low alarm.
temp[12]_max_alarm Chip temperature high alarm.
temp[12]_lcrit_alarm Chip temperature critical low alarm.
temp[12]_crit_alarm Chip temperature critical high alarm.
+======================= ========================================================
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index d1ee484a787d..ee9984f35868 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -36,6 +36,7 @@ Supported adapters:
* Intel Cannon Lake (PCH)
* Intel Cedar Fork (PCH)
* Intel Ice Lake (PCH)
+ * Intel Comet Lake (PCH)
Datasheets: Publicly available at the Intel website
On Intel Patsburg and later chipsets, both the normal host SMBus controller
diff --git a/Documentation/index.rst b/Documentation/index.rst
index c858c2e66e36..fec80fee512a 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -35,6 +35,16 @@ trying to get it to work optimally on a given system.
admin-guide/index
+Firmware-related documentation
+------------------------------
+The following holds information on the kernel's expectations regarding the
+platform firmwares.
+
+.. toctree::
+ :maxdepth: 2
+
+ firmware-guide/index
+
Application-developer documentation
-----------------------------------
@@ -83,6 +93,7 @@ needed).
media/index
networking/index
input/index
+ hwmon/index
gpu/index
security/index
sound/index
@@ -90,6 +101,7 @@ needed).
filesystems/index
vm/index
bpf/index
+ misc-devices/index
Architecture-specific documentation
-----------------------------------
diff --git a/Documentation/infiniband/user_verbs.txt b/Documentation/infiniband/user_verbs.txt
index df049b9f5b6e..47ebf2f80b2b 100644
--- a/Documentation/infiniband/user_verbs.txt
+++ b/Documentation/infiniband/user_verbs.txt
@@ -46,11 +46,11 @@ Memory pinning
I/O targets be kept resident at the same physical address. The
ib_uverbs module manages pinning and unpinning memory regions via
get_user_pages() and put_page() calls. It also accounts for the
- amount of memory pinned in the process's locked_vm, and checks that
+ amount of memory pinned in the process's pinned_vm, and checks that
unprivileged processes do not exceed their RLIMIT_MEMLOCK limit.
Pages that are pinned multiple times are counted each time they are
- pinned, so the value of locked_vm may be an overestimate of the
+ pinned, so the value of pinned_vm may be an overestimate of the
number of pages pinned by a process.
/dev files
diff --git a/Documentation/input/devices/xpad.rst b/Documentation/input/devices/xpad.rst
index b8bd65962dd8..173c2acda9fd 100644
--- a/Documentation/input/devices/xpad.rst
+++ b/Documentation/input/devices/xpad.rst
@@ -218,7 +218,7 @@ References
.. [1] http://euc.jp/periphs/xbox-controller.ja.html (ITO Takayuki)
.. [2] http://xpad.xbox-scene.com/
.. [3] http://www.markosweb.com/www/xboxhackz.com/
-.. [4] http://lxr.free-electrons.com/ident?i=xpad_device
+.. [4] https://elixir.bootlin.com/linux/latest/ident/xpad_device
Historic Edits
diff --git a/Documentation/kbuild/kbuild.txt b/Documentation/kbuild/kbuild.txt
index c9e3d93e7a89..8a3830b39c7d 100644
--- a/Documentation/kbuild/kbuild.txt
+++ b/Documentation/kbuild/kbuild.txt
@@ -232,17 +232,12 @@ KBUILD_LDS
--------------------------------------------------
The linker script with full path. Assigned by the top-level Makefile.
-KBUILD_VMLINUX_INIT
+KBUILD_VMLINUX_OBJS
--------------------------------------------------
-All object files for the init (first) part of vmlinux.
-Files specified with KBUILD_VMLINUX_INIT are linked first.
-
-KBUILD_VMLINUX_MAIN
---------------------------------------------------
-All object files for the main part of vmlinux.
+All object files for vmlinux. They are linked to vmlinux in the same
+order as listed in KBUILD_VMLINUX_OBJS.
KBUILD_VMLINUX_LIBS
--------------------------------------------------
-All .a "lib" files for vmlinux.
-KBUILD_VMLINUX_INIT, KBUILD_VMLINUX_MAIN, and KBUILD_VMLINUX_LIBS together
-specify all the object files used to link vmlinux.
+All .a "lib" files for vmlinux. KBUILD_VMLINUX_OBJS and KBUILD_VMLINUX_LIBS
+together specify all the object files used to link vmlinux.
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index bf28c47bfd72..03c065855eaf 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -154,13 +154,8 @@ more details, with real examples.
Kbuild compiles all the $(obj-y) files. It then calls
"$(AR) rcSTP" to merge these files into one built-in.a file.
- This is a thin archive without a symbol table, which makes it
- unsuitable as a linker input.
-
- The scripts/link-vmlinux.sh script later makes an aggregate
- built-in.a with "${AR} rcsTP", which creates the thin archive
- with a symbol table and an index, making it a valid input for
- the final vmlinux link passes.
+ This is a thin archive without a symbol table. It will be later
+ linked into vmlinux by scripts/link-vmlinux.sh
The order of files in $(obj-y) is significant. Duplicates in
the lists are allowed: the first instance will be linked into
@@ -504,23 +499,6 @@ more details, with real examples.
In the above example, -Wno-unused-but-set-variable will be added to
KBUILD_CFLAGS only if gcc really accepts it.
- cc-version
- cc-version returns a numerical version of the $(CC) compiler version.
- The format is <major><minor> where both are two digits. So for example
- gcc 3.41 would return 0341.
- cc-version is useful when a specific $(CC) version is faulty in one
- area, for example -mregparm=3 was broken in some gcc versions
- even though the option was accepted by gcc.
-
- Example:
- #arch/x86/Makefile
- cflags-y += $(shell \
- if [ $(cc-version) -ge 0300 ] ; then \
- echo "-mregparm=3"; fi ;)
-
- In the above example, -mregparm=3 is only used for gcc version greater
- than or equal to gcc 3.0.
-
cc-ifversion
cc-ifversion tests the version of $(CC) and equals the fourth parameter
if version expression is true, or the fifth (if given) if the version
@@ -1296,7 +1274,7 @@ See subsequent chapter for the syntax of the Kbuild file.
--- 7.4 mandatory-y
- mandatory-y is essentially used by include/(uapi/)asm-generic/Kbuild.asm
+ mandatory-y is essentially used by include/(uapi/)asm-generic/Kbuild
to define the minimum set of ASM headers that all architectures must have.
This works like optional generic-y. If a mandatory header is missing
diff --git a/Documentation/kbuild/modules.txt b/Documentation/kbuild/modules.txt
index 3fb39e0116b4..80295c613e37 100644
--- a/Documentation/kbuild/modules.txt
+++ b/Documentation/kbuild/modules.txt
@@ -140,7 +140,7 @@ executed to make module versioning work.
make -C $KDIR M=$PWD bar.lst
make -C $KDIR M=$PWD baz.o
make -C $KDIR M=$PWD foo.ko
- make -C $KDIR M=$PWD /
+ make -C $KDIR M=$PWD ./
=== 3. Creating a Kbuild File for an External Module
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index 10f4499e677c..ee60e519438a 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -243,10 +243,10 @@ Optimization
^^^^^^^^^^^^
The Kprobe-optimizer doesn't insert the jump instruction immediately;
-rather, it calls synchronize_sched() for safety first, because it's
+rather, it calls synchronize_rcu() for safety first, because it's
possible for a CPU to be interrupted in the middle of executing the
-optimized region [3]_. As you know, synchronize_sched() can ensure
-that all interruptions that were active when synchronize_sched()
+optimized region [3]_. As you know, synchronize_rcu() can ensure
+that all interruptions that were active when synchronize_rcu()
was called are done, but only if CONFIG_PREEMPT=n. So, this version
of kprobe optimization supports only kernels with CONFIG_PREEMPT=n [4]_.
diff --git a/Documentation/laptops/lg-laptop.rst b/Documentation/laptops/lg-laptop.rst
index e486fe7ddc35..aa503ee9b3bc 100644
--- a/Documentation/laptops/lg-laptop.rst
+++ b/Documentation/laptops/lg-laptop.rst
@@ -1,4 +1,5 @@
.. SPDX-License-Identifier: GPL-2.0+
+
LG Gram laptop extra features
=============================
@@ -9,6 +10,7 @@ Hotkeys
-------
The following FN keys are ignored by the kernel without this driver:
+
- FN-F1 (LG control panel) - Generates F15
- FN-F5 (Touchpad toggle) - Generates F13
- FN-F6 (Airplane mode) - Generates RFKILL
@@ -16,7 +18,7 @@ The following FN keys are ignored by the kernel without this driver:
This key also changes keyboard backlight mode.
- FN-F9 (Reader mode) - Generates F14
-The rest of the FN key work without a need for a special driver.
+The rest of the FN keys work without a need for a special driver.
Reader mode
diff --git a/Documentation/locking/lockdep-design.txt b/Documentation/locking/lockdep-design.txt
index 49f58a07ee7b..39fae143c9cb 100644
--- a/Documentation/locking/lockdep-design.txt
+++ b/Documentation/locking/lockdep-design.txt
@@ -45,10 +45,10 @@ When locking rules are violated, these state bits are presented in the
locking error messages, inside curlies. A contrived example:
modprobe/2287 is trying to acquire lock:
- (&sio_locks[i].lock){-.-...}, at: [<c02867fd>] mutex_lock+0x21/0x24
+ (&sio_locks[i].lock){-.-.}, at: [<c02867fd>] mutex_lock+0x21/0x24
but task is already holding lock:
- (&sio_locks[i].lock){-.-...}, at: [<c02867fd>] mutex_lock+0x21/0x24
+ (&sio_locks[i].lock){-.-.}, at: [<c02867fd>] mutex_lock+0x21/0x24
The bit position indicates STATE, STATE-read, for each of the states listed
diff --git a/Documentation/lzo.txt b/Documentation/lzo.txt
index f79934225d8d..ca983328976b 100644
--- a/Documentation/lzo.txt
+++ b/Documentation/lzo.txt
@@ -102,9 +102,11 @@ Byte sequences
dictionary which is empty, and that it will always be
invalid at this place.
- 17 : bitstream version. If the first byte is 17, the next byte
- gives the bitstream version (version 1 only). If the first byte
- is not 17, the bitstream version is 0.
+ 17 : bitstream version. If the first byte is 17, and compressed
+ stream length is at least 5 bytes (length of shortest possible
+ versioned bitstream), the next byte gives the bitstream version
+ (version 1 only).
+ Otherwise, the bitstream version is 0.
18..21 : copy 0..3 literals
state = (byte - 17) = 0..3 [ copy <state> literals ]
diff --git a/Documentation/media/dvb-drivers/dvb-usb.rst b/Documentation/media/dvb-drivers/dvb-usb.rst
index 6679191819aa..b2d5d9e62b30 100644
--- a/Documentation/media/dvb-drivers/dvb-usb.rst
+++ b/Documentation/media/dvb-drivers/dvb-usb.rst
@@ -125,7 +125,7 @@ https://linuxtv.org/wiki/index.php/DVB_USB
2004-12-26
- - refactored the dibusb-driver, splitted into separate files
+ - refactored the dibusb-driver, split into separate files
- i2c-probing enabled
2004-12-06
diff --git a/Documentation/media/kapi/dtv-core.rst b/Documentation/media/kapi/dtv-core.rst
index 17454a2cf6b0..ac005b46f23e 100644
--- a/Documentation/media/kapi/dtv-core.rst
+++ b/Documentation/media/kapi/dtv-core.rst
@@ -12,7 +12,7 @@ Digital TV devices are implemented by several different drivers:
- Frontend drivers that are usually implemented as two separate drivers:
- A tuner driver that implements the logic with commands the part of the
- hardware with is reponsible to tune into a digital TV transponder or
+ hardware with is responsible to tune into a digital TV transponder or
physical channel. The output of a tuner is usually a baseband or
Intermediate Frequency (IF) signal;
diff --git a/Documentation/media/kapi/dtv-frontend.rst b/Documentation/media/kapi/dtv-frontend.rst
index 8ea64742c7ba..fbc5517c8d5a 100644
--- a/Documentation/media/kapi/dtv-frontend.rst
+++ b/Documentation/media/kapi/dtv-frontend.rst
@@ -328,7 +328,7 @@ Statistics collect
On almost all frontend hardware, the bit and byte counts are stored by
the hardware after a certain amount of time or after the total bit/block
-counter reaches a certain value (usually programable), for example, on
+counter reaches a certain value (usually programmable), for example, on
every 1000 ms or after receiving 1,000,000 bits.
So, if you read the registers too soon, you'll end by reading the same
diff --git a/Documentation/media/kapi/mc-core.rst b/Documentation/media/kapi/mc-core.rst
index 0bcfeadbc52d..f930725e0d6b 100644
--- a/Documentation/media/kapi/mc-core.rst
+++ b/Documentation/media/kapi/mc-core.rst
@@ -60,7 +60,7 @@ Drivers initialize entity pads by calling
Drivers register entities with a media device by calling
:c:func:`media_device_register_entity()`
-and unregistred by calling
+and unregistered by calling
:c:func:`media_device_unregister_entity()`.
Interfaces
diff --git a/Documentation/media/kapi/v4l2-device.rst b/Documentation/media/kapi/v4l2-device.rst
index c4311f0421be..5e25bf182c18 100644
--- a/Documentation/media/kapi/v4l2-device.rst
+++ b/Documentation/media/kapi/v4l2-device.rst
@@ -93,7 +93,7 @@ You can iterate over all registered devices as follows:
int err;
/* Find driver 'ivtv' on the PCI bus.
- pci_bus_type is a global. For USB busses use usb_bus_type. */
+ pci_bus_type is a global. For USB buses use usb_bus_type. */
drv = driver_find("ivtv", &pci_bus_type);
/* iterate over all ivtv device instances */
err = driver_for_each_device(drv, NULL, p, callback);
diff --git a/Documentation/media/kapi/v4l2-intro.rst b/Documentation/media/kapi/v4l2-intro.rst
index cea3e263e48b..4d54fa9d7a12 100644
--- a/Documentation/media/kapi/v4l2-intro.rst
+++ b/Documentation/media/kapi/v4l2-intro.rst
@@ -11,7 +11,7 @@ hardware: most devices have multiple ICs, export multiple device nodes in
Especially the fact that V4L2 drivers have to setup supporting ICs to
do audio/video muxing/encoding/decoding makes it more complex than most.
Usually these ICs are connected to the main bridge driver through one or
-more I2C busses, but other busses can also be used. Such devices are
+more I2C buses, but other buses can also be used. Such devices are
called 'sub-devices'.
For a long time the framework was limited to the video_device struct for
diff --git a/Documentation/media/kapi/v4l2-subdev.rst b/Documentation/media/kapi/v4l2-subdev.rst
index be4970909f40..29e07e23f888 100644
--- a/Documentation/media/kapi/v4l2-subdev.rst
+++ b/Documentation/media/kapi/v4l2-subdev.rst
@@ -23,7 +23,7 @@ device data.
You also need a way to go from the low-level struct to :c:type:`v4l2_subdev`.
For the common i2c_client struct the i2c_set_clientdata() call is used to store
-a :c:type:`v4l2_subdev` pointer, for other busses you may have to use other
+a :c:type:`v4l2_subdev` pointer, for other buses you may have to use other
methods.
Bridges might also need to store per-subdev private data, such as a pointer to
@@ -33,7 +33,7 @@ provides host private data for that purpose that can be accessed with
From the bridge driver perspective, you load the sub-device module and somehow
obtain the :c:type:`v4l2_subdev` pointer. For i2c devices this is easy: you call
-``i2c_get_clientdata()``. For other busses something similar needs to be done.
+``i2c_get_clientdata()``. For other buses something similar needs to be done.
Helper functions exists for sub-devices on an I2C bus that do most of this
tricky work for you.
diff --git a/Documentation/media/lirc.h.rst.exceptions b/Documentation/media/lirc.h.rst.exceptions
index 379b9e7df5d0..7a8b8ff4f076 100644
--- a/Documentation/media/lirc.h.rst.exceptions
+++ b/Documentation/media/lirc.h.rst.exceptions
@@ -60,6 +60,9 @@ ignore symbol RC_PROTO_SHARP
ignore symbol RC_PROTO_XMP
ignore symbol RC_PROTO_CEC
ignore symbol RC_PROTO_IMON
+ignore symbol RC_PROTO_RCMM12
+ignore symbol RC_PROTO_RCMM24
+ignore symbol RC_PROTO_RCMM32
# Undocumented macros
diff --git a/Documentation/media/uapi/dvb/audio-set-bypass-mode.rst b/Documentation/media/uapi/dvb/audio-set-bypass-mode.rst
index d537da90acf5..d68f05d48d12 100644
--- a/Documentation/media/uapi/dvb/audio-set-bypass-mode.rst
+++ b/Documentation/media/uapi/dvb/audio-set-bypass-mode.rst
@@ -57,7 +57,7 @@ Description
This ioctl call asks the Audio Device to bypass the Audio decoder and
forward the stream without decoding. This mode shall be used if streams
-that can’t be handled by the Digial TV system shall be decoded. Dolby
+that can’t be handled by the Digital TV system shall be decoded. Dolby
DigitalTM streams are automatically forwarded by the Digital TV subsystem if
the hardware can handle it.
diff --git a/Documentation/media/uapi/dvb/ca-set-descr.rst b/Documentation/media/uapi/dvb/ca-set-descr.rst
index 22c8b8f94c7e..d36464ba2317 100644
--- a/Documentation/media/uapi/dvb/ca-set-descr.rst
+++ b/Documentation/media/uapi/dvb/ca-set-descr.rst
@@ -39,7 +39,7 @@ Description
-----------
CA_SET_DESCR is used for feeding descrambler CA slots with descrambling
-keys (refered as control words).
+keys (referred as control words).
Return Value
------------
diff --git a/Documentation/media/uapi/dvb/dmx-qbuf.rst b/Documentation/media/uapi/dvb/dmx-qbuf.rst
index 9a1d85147c25..9dc845daa59d 100644
--- a/Documentation/media/uapi/dvb/dmx-qbuf.rst
+++ b/Documentation/media/uapi/dvb/dmx-qbuf.rst
@@ -61,7 +61,7 @@ the device is closed.
Applications call the ``DMX_DQBUF`` ioctl to dequeue a filled
(capturing) buffer from the driver's outgoing queue.
-They just set the ``index`` field withe the buffer ID to be queued.
+They just set the ``index`` field with the buffer ID to be queued.
When ``DMX_DQBUF`` is called with a pointer to struct :c:type:`dmx_buffer`,
the driver fills the remaining fields or returns an error code.
diff --git a/Documentation/media/uapi/dvb/dvbproperty.rst b/Documentation/media/uapi/dvb/dvbproperty.rst
index 371c72bb9419..0c4f5598f2be 100644
--- a/Documentation/media/uapi/dvb/dvbproperty.rst
+++ b/Documentation/media/uapi/dvb/dvbproperty.rst
@@ -44,7 +44,7 @@ with supports all digital TV delivery systems.
struct :c:type:`dvb_frontend_parameters`.
2. Don't use DVB API version 3 calls on hardware with supports
- newer standards. Such API provides no suport or a very limited
+ newer standards. Such API provides no support or a very limited
support to new standards and/or new hardware.
3. Nowadays, most frontends support multiple delivery systems.
diff --git a/Documentation/media/uapi/dvb/video_types.rst b/Documentation/media/uapi/dvb/video_types.rst
index 2ed8aad84003..479942ce6fb8 100644
--- a/Documentation/media/uapi/dvb/video_types.rst
+++ b/Documentation/media/uapi/dvb/video_types.rst
@@ -202,7 +202,7 @@ If video_blank is set video will be blanked out if the channel is
changed or if playback is stopped. Otherwise, the last picture will be
displayed. play_state indicates if the video is currently frozen,
stopped, or being played back. The stream_source corresponds to the
-seleted source for the video stream. It can come either from the
+selected source for the video stream. It can come either from the
demultiplexer or from memory. The video_format indicates the aspect
ratio (one of 4:3 or 16:9) of the currently played video stream.
Finally, display_format corresponds to the selected cropping mode in
diff --git a/Documentation/media/uapi/fdl-appendix.rst b/Documentation/media/uapi/fdl-appendix.rst
index f8dc85d3939c..9316b8617502 100644
--- a/Documentation/media/uapi/fdl-appendix.rst
+++ b/Documentation/media/uapi/fdl-appendix.rst
@@ -363,7 +363,7 @@ various documents with a single copy that is included in the collection,
provided that you follow the rules of this License for verbatim copying
of each of the documents in all other respects.
-You may extract a single document from such a collection, and dispbibute
+You may extract a single document from such a collection, and distribute
it individually under this License, provided you insert a copy of this
License into the extracted document, and follow this License in all
other respects regarding verbatim copying of that document.
diff --git a/Documentation/media/uapi/mediactl/media-types.rst b/Documentation/media/uapi/mediactl/media-types.rst
index 8627587b7075..3af6a414b501 100644
--- a/Documentation/media/uapi/mediactl/media-types.rst
+++ b/Documentation/media/uapi/mediactl/media-types.rst
@@ -164,7 +164,7 @@ Types and flags used to represent the media graph elements
* - ``MEDIA_ENT_F_PROC_VIDEO_PIXEL_ENC_CONV``
- Video pixel encoding converter. An entity capable of pixel
- enconding conversion must have at least one sink pad and one
+ encoding conversion must have at least one sink pad and one
source pad, and convert the encoding of pixels received on
its sink pad(s) to a different encoding output on its source
pad(s). Pixel encoding conversion includes but isn't limited
diff --git a/Documentation/media/uapi/mediactl/request-api.rst b/Documentation/media/uapi/mediactl/request-api.rst
index 4b25ad03f45a..1ad631e549fe 100644
--- a/Documentation/media/uapi/mediactl/request-api.rst
+++ b/Documentation/media/uapi/mediactl/request-api.rst
@@ -91,7 +91,7 @@ A request must contain at least one buffer, otherwise ``ENOENT`` is returned.
A queued request cannot be modified anymore.
.. caution::
- For :ref:`memory-to-memory devices <codec>` you can use requests only for
+ For :ref:`memory-to-memory devices <mem2mem>` you can use requests only for
output buffers, not for capture buffers. Attempting to add a capture buffer
to a request will result in an ``EACCES`` error.
@@ -152,7 +152,7 @@ if it had just been allocated.
Example for a Codec Device
--------------------------
-For use-cases such as :ref:`codecs <codec>`, the request API can be used
+For use-cases such as :ref:`codecs <mem2mem>`, the request API can be used
to associate specific controls to
be applied by the driver for the OUTPUT buffer, allowing user-space
to queue many such buffers in advance. It can also take advantage of requests'
diff --git a/Documentation/media/uapi/rc/rc-tables.rst b/Documentation/media/uapi/rc/rc-tables.rst
index cb670d10998b..177ac44fa0fa 100644
--- a/Documentation/media/uapi/rc/rc-tables.rst
+++ b/Documentation/media/uapi/rc/rc-tables.rst
@@ -385,7 +385,7 @@ the remote via /dev/input/event devices.
- ``KEY_CHANNELDOWN``
- - Decrease channel sequencially
+ - Decrease channel sequentially
- CHANNEL - / CHANNEL DOWN / DOWN
@@ -393,7 +393,7 @@ the remote via /dev/input/event devices.
- ``KEY_CHANNELUP``
- - Increase channel sequencially
+ - Increase channel sequentially
- CHANNEL + / CHANNEL UP / UP
@@ -623,7 +623,7 @@ the remote via /dev/input/event devices.
- .. row 78
- - ``KEY_SCREEN``
+ - ``KEY_ASPECT_RATIO``
- Select screen aspect ratio
@@ -631,7 +631,7 @@ the remote via /dev/input/event devices.
- .. row 79
- - ``KEY_ZOOM``
+ - ``KEY_FULL_SCREEN``
- Put device into zoom/full screen mode
diff --git a/Documentation/media/uapi/v4l/buffer.rst b/Documentation/media/uapi/v4l/buffer.rst
index 86878bb0087f..81ffdcb89057 100644
--- a/Documentation/media/uapi/v4l/buffer.rst
+++ b/Documentation/media/uapi/v4l/buffer.rst
@@ -230,8 +230,7 @@ struct v4l2_buffer
* - struct :c:type:`v4l2_timecode`
- ``timecode``
-
- - When ``type`` is ``V4L2_BUF_TYPE_VIDEO_CAPTURE`` and the
- ``V4L2_BUF_FLAG_TIMECODE`` flag is set in ``flags``, this
+ - When the ``V4L2_BUF_FLAG_TIMECODE`` flag is set in ``flags``, this
structure contains a frame timecode. In
:c:type:`V4L2_FIELD_ALTERNATE <v4l2_field>` mode the top and
bottom field contain the same timecode. Timecodes are intended to
@@ -714,10 +713,10 @@ enum v4l2_memory
Timecodes
=========
-The struct :c:type:`v4l2_timecode` structure is designed to hold a
-:ref:`smpte12m` or similar timecode. (struct
-struct :c:type:`timeval` timestamps are stored in struct
-:c:type:`v4l2_buffer` field ``timestamp``.)
+The :c:type:`v4l2_buffer_timecode` structure is designed to hold a
+:ref:`smpte12m` or similar timecode.
+(struct :c:type:`timeval` timestamps are stored in the struct
+:c:type:`v4l2_buffer` ``timestamp`` field.)
.. c:type:: v4l2_timecode
diff --git a/Documentation/media/uapi/v4l/common.rst b/Documentation/media/uapi/v4l/common.rst
index 889f2f2632a1..5e87ae24e4b4 100644
--- a/Documentation/media/uapi/v4l/common.rst
+++ b/Documentation/media/uapi/v4l/common.rst
@@ -46,6 +46,17 @@ applicable to all devices.
dv-timings
control
extended-controls
+ ext-ctrls-camera
+ ext-ctrls-flash
+ ext-ctrls-image-source
+ ext-ctrls-image-process
+ ext-ctrls-codec
+ ext-ctrls-jpeg
+ ext-ctrls-dv
+ ext-ctrls-rf-tuner
+ ext-ctrls-fm-tx
+ ext-ctrls-fm-rx
+ ext-ctrls-detect
format
planar-apis
selection-api
diff --git a/Documentation/media/uapi/v4l/control.rst b/Documentation/media/uapi/v4l/control.rst
index 0d46526b5935..71417bba028c 100644
--- a/Documentation/media/uapi/v4l/control.rst
+++ b/Documentation/media/uapi/v4l/control.rst
@@ -499,7 +499,7 @@ Example: Changing controls
.. [#f1]
The use of ``V4L2_CID_PRIVATE_BASE`` is problematic because different
drivers may use the same ``V4L2_CID_PRIVATE_BASE`` ID for different
- controls. This makes it hard to programatically set such controls
+ controls. This makes it hard to programmatically set such controls
since the meaning of the control with that ID is driver dependent. In
order to resolve this drivers use unique IDs and the
``V4L2_CID_PRIVATE_BASE`` IDs are mapped to those unique IDs by the
diff --git a/Documentation/media/uapi/v4l/dev-effect.rst b/Documentation/media/uapi/v4l/dev-effect.rst
deleted file mode 100644
index b165e2c20910..000000000000
--- a/Documentation/media/uapi/v4l/dev-effect.rst
+++ /dev/null
@@ -1,28 +0,0 @@
-.. Permission is granted to copy, distribute and/or modify this
-.. document under the terms of the GNU Free Documentation License,
-.. Version 1.1 or any later version published by the Free Software
-.. Foundation, with no Invariant Sections, no Front-Cover Texts
-.. and no Back-Cover Texts. A copy of the license is included at
-.. Documentation/media/uapi/fdl-appendix.rst.
-..
-.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
-
-.. _effect:
-
-************************
-Effect Devices Interface
-************************
-
-.. note::
- This interface has been be suspended from the V4L2 API.
- The implementation for such effects should be done
- via mem2mem devices.
-
-A V4L2 video effect device can do image effects, filtering, or combine
-two or more images or image streams. For example video transitions or
-wipes. Applications send data to be processed and receive the result
-data either with :ref:`read() <func-read>` and
-:ref:`write() <func-write>` functions, or through the streaming I/O
-mechanism.
-
-[to do]
diff --git a/Documentation/media/uapi/v4l/dev-codec.rst b/Documentation/media/uapi/v4l/dev-mem2mem.rst
index b5e017c17834..67a980818dc8 100644
--- a/Documentation/media/uapi/v4l/dev-codec.rst
+++ b/Documentation/media/uapi/v4l/dev-mem2mem.rst
@@ -7,37 +7,36 @@
..
.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
-.. _codec:
+.. _mem2mem:
-***************
-Codec Interface
-***************
+********************************
+Video Memory-To-Memory Interface
+********************************
-A V4L2 codec can compress, decompress, transform, or otherwise convert
-video data from one format into another format, in memory. Typically
-such devices are memory-to-memory devices (i.e. devices with the
-``V4L2_CAP_VIDEO_M2M`` or ``V4L2_CAP_VIDEO_M2M_MPLANE`` capability set).
+A V4L2 memory-to-memory device can compress, decompress, transform, or
+otherwise convert video data from one format into another format, in memory.
+Such memory-to-memory devices set the ``V4L2_CAP_VIDEO_M2M`` or
+``V4L2_CAP_VIDEO_M2M_MPLANE`` capability. Examples of memory-to-memory
+devices are codecs, scalers, deinterlacers or format converters (i.e.
+converting from YUV to RGB).
A memory-to-memory video node acts just like a normal video node, but it
-supports both output (sending frames from memory to the codec hardware)
-and capture (receiving the processed frames from the codec hardware into
+supports both output (sending frames from memory to the hardware)
+and capture (receiving the processed frames from the hardware into
memory) stream I/O. An application will have to setup the stream I/O for
both sides and finally call :ref:`VIDIOC_STREAMON <VIDIOC_STREAMON>`
-for both capture and output to start the codec.
-
-Video compression codecs use the MPEG controls to setup their codec
-parameters
-
-.. note::
-
- The MPEG controls actually support many more codecs than
- just MPEG. See :ref:`mpeg-controls`.
+for both capture and output to start the hardware.
Memory-to-memory devices function as a shared resource: you can
open the video node multiple times, each application setting up their
-own codec properties that are local to the file handle, and each can use
+own properties that are local to the file handle, and each can use
it independently from the others. The driver will arbitrate access to
-the codec and reprogram it whenever another file handler gets access.
+the hardware and reprogram it whenever another file handler gets access.
This is different from the usual video node behavior where the video
properties are global to the device (i.e. changing something through one
file handle is visible through another file handle).
+
+One of the most common memory-to-memory device is the codec. Codecs
+are more complicated than most and require additional setup for
+their codec parameters. This is done through codec controls.
+See :ref:`mpeg-controls`.
diff --git a/Documentation/media/uapi/v4l/dev-teletext.rst b/Documentation/media/uapi/v4l/dev-teletext.rst
deleted file mode 100644
index 35e8c4b35458..000000000000
--- a/Documentation/media/uapi/v4l/dev-teletext.rst
+++ /dev/null
@@ -1,41 +0,0 @@
-.. Permission is granted to copy, distribute and/or modify this
-.. document under the terms of the GNU Free Documentation License,
-.. Version 1.1 or any later version published by the Free Software
-.. Foundation, with no Invariant Sections, no Front-Cover Texts
-.. and no Back-Cover Texts. A copy of the license is included at
-.. Documentation/media/uapi/fdl-appendix.rst.
-..
-.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
-
-.. _ttx:
-
-******************
-Teletext Interface
-******************
-
-This interface was aimed at devices receiving and demodulating Teletext
-data [:ref:`ets300706`, :ref:`itu653`], evaluating the Teletext
-packages and storing formatted pages in cache memory. Such devices are
-usually implemented as microcontrollers with serial interface
-(I\ :sup:`2`\ C) and could be found on old TV cards, dedicated Teletext
-decoding cards and home-brew devices connected to the PC parallel port.
-
-The Teletext API was designed by Martin Buck. It was defined in the
-kernel header file ``linux/videotext.h``, the specification is available
-from
-`ftp://ftp.gwdg.de/pub/linux/misc/videotext/ <ftp://ftp.gwdg.de/pub/linux/misc/videotext/>`__.
-(Videotext is the name of the German public television Teletext
-service.)
-
-Eventually the Teletext API was integrated into the V4L API with
-character device file names ``/dev/vtx0`` to ``/dev/vtx31``, device
-major number 81, minor numbers 192 to 223.
-
-However, teletext decoders were quickly replaced by more generic VBI
-demodulators and those dedicated teletext decoders no longer exist. For
-many years the vtx devices were still around, even though nobody used
-them. So the decision was made to finally remove support for the
-Teletext API in kernel 2.6.37.
-
-Modern devices all use the :ref:`raw <raw-vbi>` or
-:ref:`sliced` VBI API.
diff --git a/Documentation/media/uapi/v4l/devices.rst b/Documentation/media/uapi/v4l/devices.rst
index 5dbe9d13b6e6..07f8d047662b 100644
--- a/Documentation/media/uapi/v4l/devices.rst
+++ b/Documentation/media/uapi/v4l/devices.rst
@@ -21,11 +21,9 @@ Interfaces
dev-overlay
dev-output
dev-osd
- dev-codec
- dev-effect
+ dev-mem2mem
dev-raw-vbi
dev-sliced-vbi
- dev-teletext
dev-radio
dev-rds
dev-sdr
diff --git a/Documentation/media/uapi/v4l/ext-ctrls-camera.rst b/Documentation/media/uapi/v4l/ext-ctrls-camera.rst
new file mode 100644
index 000000000000..d3a553cd86c9
--- /dev/null
+++ b/Documentation/media/uapi/v4l/ext-ctrls-camera.rst
@@ -0,0 +1,508 @@
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+
+.. _camera-controls:
+
+************************
+Camera Control Reference
+************************
+
+The Camera class includes controls for mechanical (or equivalent
+digital) features of a device such as controllable lenses or sensors.
+
+
+.. _camera-control-id:
+
+Camera Control IDs
+==================
+
+``V4L2_CID_CAMERA_CLASS (class)``
+ The Camera class descriptor. Calling
+ :ref:`VIDIOC_QUERYCTRL` for this control will
+ return a description of this control class.
+
+.. _v4l2-exposure-auto-type:
+
+``V4L2_CID_EXPOSURE_AUTO``
+ (enum)
+
+enum v4l2_exposure_auto_type -
+ Enables automatic adjustments of the exposure time and/or iris
+ aperture. The effect of manual changes of the exposure time or iris
+ aperture while these features are enabled is undefined, drivers
+ should ignore such requests. Possible values are:
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_EXPOSURE_AUTO``
+ - Automatic exposure time, automatic iris aperture.
+ * - ``V4L2_EXPOSURE_MANUAL``
+ - Manual exposure time, manual iris.
+ * - ``V4L2_EXPOSURE_SHUTTER_PRIORITY``
+ - Manual exposure time, auto iris.
+ * - ``V4L2_EXPOSURE_APERTURE_PRIORITY``
+ - Auto exposure time, manual iris.
+
+
+
+``V4L2_CID_EXPOSURE_ABSOLUTE (integer)``
+ Determines the exposure time of the camera sensor. The exposure time
+ is limited by the frame interval. Drivers should interpret the
+ values as 100 µs units, where the value 1 stands for 1/10000th of a
+ second, 10000 for 1 second and 100000 for 10 seconds.
+
+``V4L2_CID_EXPOSURE_AUTO_PRIORITY (boolean)``
+ When ``V4L2_CID_EXPOSURE_AUTO`` is set to ``AUTO`` or
+ ``APERTURE_PRIORITY``, this control determines if the device may
+ dynamically vary the frame rate. By default this feature is disabled
+ (0) and the frame rate must remain constant.
+
+``V4L2_CID_AUTO_EXPOSURE_BIAS (integer menu)``
+ Determines the automatic exposure compensation, it is effective only
+ when ``V4L2_CID_EXPOSURE_AUTO`` control is set to ``AUTO``,
+ ``SHUTTER_PRIORITY`` or ``APERTURE_PRIORITY``. It is expressed in
+ terms of EV, drivers should interpret the values as 0.001 EV units,
+ where the value 1000 stands for +1 EV.
+
+ Increasing the exposure compensation value is equivalent to
+ decreasing the exposure value (EV) and will increase the amount of
+ light at the image sensor. The camera performs the exposure
+ compensation by adjusting absolute exposure time and/or aperture.
+
+.. _v4l2-exposure-metering:
+
+``V4L2_CID_EXPOSURE_METERING``
+ (enum)
+
+enum v4l2_exposure_metering -
+ Determines how the camera measures the amount of light available for
+ the frame exposure. Possible values are:
+
+.. tabularcolumns:: |p{8.5cm}|p{9.0cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_EXPOSURE_METERING_AVERAGE``
+ - Use the light information coming from the entire frame and average
+ giving no weighting to any particular portion of the metered area.
+ * - ``V4L2_EXPOSURE_METERING_CENTER_WEIGHTED``
+ - Average the light information coming from the entire frame giving
+ priority to the center of the metered area.
+ * - ``V4L2_EXPOSURE_METERING_SPOT``
+ - Measure only very small area at the center of the frame.
+ * - ``V4L2_EXPOSURE_METERING_MATRIX``
+ - A multi-zone metering. The light intensity is measured in several
+ points of the frame and the results are combined. The algorithm of
+ the zones selection and their significance in calculating the
+ final value is device dependent.
+
+
+
+``V4L2_CID_PAN_RELATIVE (integer)``
+ This control turns the camera horizontally by the specified amount.
+ The unit is undefined. A positive value moves the camera to the
+ right (clockwise when viewed from above), a negative value to the
+ left. A value of zero does not cause motion. This is a write-only
+ control.
+
+``V4L2_CID_TILT_RELATIVE (integer)``
+ This control turns the camera vertically by the specified amount.
+ The unit is undefined. A positive value moves the camera up, a
+ negative value down. A value of zero does not cause motion. This is
+ a write-only control.
+
+``V4L2_CID_PAN_RESET (button)``
+ When this control is set, the camera moves horizontally to the
+ default position.
+
+``V4L2_CID_TILT_RESET (button)``
+ When this control is set, the camera moves vertically to the default
+ position.
+
+``V4L2_CID_PAN_ABSOLUTE (integer)``
+ This control turns the camera horizontally to the specified
+ position. Positive values move the camera to the right (clockwise
+ when viewed from above), negative values to the left. Drivers should
+ interpret the values as arc seconds, with valid values between -180
+ * 3600 and +180 * 3600 inclusive.
+
+``V4L2_CID_TILT_ABSOLUTE (integer)``
+ This control turns the camera vertically to the specified position.
+ Positive values move the camera up, negative values down. Drivers
+ should interpret the values as arc seconds, with valid values
+ between -180 * 3600 and +180 * 3600 inclusive.
+
+``V4L2_CID_FOCUS_ABSOLUTE (integer)``
+ This control sets the focal point of the camera to the specified
+ position. The unit is undefined. Positive values set the focus
+ closer to the camera, negative values towards infinity.
+
+``V4L2_CID_FOCUS_RELATIVE (integer)``
+ This control moves the focal point of the camera by the specified
+ amount. The unit is undefined. Positive values move the focus closer
+ to the camera, negative values towards infinity. This is a
+ write-only control.
+
+``V4L2_CID_FOCUS_AUTO (boolean)``
+ Enables continuous automatic focus adjustments. The effect of manual
+ focus adjustments while this feature is enabled is undefined,
+ drivers should ignore such requests.
+
+``V4L2_CID_AUTO_FOCUS_START (button)``
+ Starts single auto focus process. The effect of setting this control
+ when ``V4L2_CID_FOCUS_AUTO`` is set to ``TRUE`` (1) is undefined,
+ drivers should ignore such requests.
+
+``V4L2_CID_AUTO_FOCUS_STOP (button)``
+ Aborts automatic focusing started with ``V4L2_CID_AUTO_FOCUS_START``
+ control. It is effective only when the continuous autofocus is
+ disabled, that is when ``V4L2_CID_FOCUS_AUTO`` control is set to
+ ``FALSE`` (0).
+
+.. _v4l2-auto-focus-status:
+
+``V4L2_CID_AUTO_FOCUS_STATUS (bitmask)``
+ The automatic focus status. This is a read-only control.
+
+ Setting ``V4L2_LOCK_FOCUS`` lock bit of the ``V4L2_CID_3A_LOCK``
+ control may stop updates of the ``V4L2_CID_AUTO_FOCUS_STATUS``
+ control value.
+
+.. tabularcolumns:: |p{6.5cm}|p{11.0cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_AUTO_FOCUS_STATUS_IDLE``
+ - Automatic focus is not active.
+ * - ``V4L2_AUTO_FOCUS_STATUS_BUSY``
+ - Automatic focusing is in progress.
+ * - ``V4L2_AUTO_FOCUS_STATUS_REACHED``
+ - Focus has been reached.
+ * - ``V4L2_AUTO_FOCUS_STATUS_FAILED``
+ - Automatic focus has failed, the driver will not transition from
+ this state until another action is performed by an application.
+
+
+
+.. _v4l2-auto-focus-range:
+
+``V4L2_CID_AUTO_FOCUS_RANGE``
+ (enum)
+
+enum v4l2_auto_focus_range -
+ Determines auto focus distance range for which lens may be adjusted.
+
+.. tabularcolumns:: |p{6.5cm}|p{11.0cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_AUTO_FOCUS_RANGE_AUTO``
+ - The camera automatically selects the focus range.
+ * - ``V4L2_AUTO_FOCUS_RANGE_NORMAL``
+ - Normal distance range, limited for best automatic focus
+ performance.
+ * - ``V4L2_AUTO_FOCUS_RANGE_MACRO``
+ - Macro (close-up) auto focus. The camera will use its minimum
+ possible distance for auto focus.
+ * - ``V4L2_AUTO_FOCUS_RANGE_INFINITY``
+ - The lens is set to focus on an object at infinite distance.
+
+
+
+``V4L2_CID_ZOOM_ABSOLUTE (integer)``
+ Specify the objective lens focal length as an absolute value. The
+ zoom unit is driver-specific and its value should be a positive
+ integer.
+
+``V4L2_CID_ZOOM_RELATIVE (integer)``
+ Specify the objective lens focal length relatively to the current
+ value. Positive values move the zoom lens group towards the
+ telephoto direction, negative values towards the wide-angle
+ direction. The zoom unit is driver-specific. This is a write-only
+ control.
+
+``V4L2_CID_ZOOM_CONTINUOUS (integer)``
+ Move the objective lens group at the specified speed until it
+ reaches physical device limits or until an explicit request to stop
+ the movement. A positive value moves the zoom lens group towards the
+ telephoto direction. A value of zero stops the zoom lens group
+ movement. A negative value moves the zoom lens group towards the
+ wide-angle direction. The zoom speed unit is driver-specific.
+
+``V4L2_CID_IRIS_ABSOLUTE (integer)``
+ This control sets the camera's aperture to the specified value. The
+ unit is undefined. Larger values open the iris wider, smaller values
+ close it.
+
+``V4L2_CID_IRIS_RELATIVE (integer)``
+ This control modifies the camera's aperture by the specified amount.
+ The unit is undefined. Positive values open the iris one step
+ further, negative values close it one step further. This is a
+ write-only control.
+
+``V4L2_CID_PRIVACY (boolean)``
+ Prevent video from being acquired by the camera. When this control
+ is set to ``TRUE`` (1), no image can be captured by the camera.
+ Common means to enforce privacy are mechanical obturation of the
+ sensor and firmware image processing, but the device is not
+ restricted to these methods. Devices that implement the privacy
+ control must support read access and may support write access.
+
+``V4L2_CID_BAND_STOP_FILTER (integer)``
+ Switch the band-stop filter of a camera sensor on or off, or specify
+ its strength. Such band-stop filters can be used, for example, to
+ filter out the fluorescent light component.
+
+.. _v4l2-auto-n-preset-white-balance:
+
+``V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE``
+ (enum)
+
+enum v4l2_auto_n_preset_white_balance -
+ Sets white balance to automatic, manual or a preset. The presets
+ determine color temperature of the light as a hint to the camera for
+ white balance adjustments resulting in most accurate color
+ representation. The following white balance presets are listed in
+ order of increasing color temperature.
+
+.. tabularcolumns:: |p{7.0 cm}|p{10.5cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_WHITE_BALANCE_MANUAL``
+ - Manual white balance.
+ * - ``V4L2_WHITE_BALANCE_AUTO``
+ - Automatic white balance adjustments.
+ * - ``V4L2_WHITE_BALANCE_INCANDESCENT``
+ - White balance setting for incandescent (tungsten) lighting. It
+ generally cools down the colors and corresponds approximately to
+ 2500...3500 K color temperature range.
+ * - ``V4L2_WHITE_BALANCE_FLUORESCENT``
+ - White balance preset for fluorescent lighting. It corresponds
+ approximately to 4000...5000 K color temperature.
+ * - ``V4L2_WHITE_BALANCE_FLUORESCENT_H``
+ - With this setting the camera will compensate for fluorescent H
+ lighting.
+ * - ``V4L2_WHITE_BALANCE_HORIZON``
+ - White balance setting for horizon daylight. It corresponds
+ approximately to 5000 K color temperature.
+ * - ``V4L2_WHITE_BALANCE_DAYLIGHT``
+ - White balance preset for daylight (with clear sky). It corresponds
+ approximately to 5000...6500 K color temperature.
+ * - ``V4L2_WHITE_BALANCE_FLASH``
+ - With this setting the camera will compensate for the flash light.
+ It slightly warms up the colors and corresponds roughly to
+ 5000...5500 K color temperature.
+ * - ``V4L2_WHITE_BALANCE_CLOUDY``
+ - White balance preset for moderately overcast sky. This option
+ corresponds approximately to 6500...8000 K color temperature
+ range.
+ * - ``V4L2_WHITE_BALANCE_SHADE``
+ - White balance preset for shade or heavily overcast sky. It
+ corresponds approximately to 9000...10000 K color temperature.
+
+
+
+.. _v4l2-wide-dynamic-range:
+
+``V4L2_CID_WIDE_DYNAMIC_RANGE (boolean)``
+ Enables or disables the camera's wide dynamic range feature. This
+ feature allows to obtain clear images in situations where intensity
+ of the illumination varies significantly throughout the scene, i.e.
+ there are simultaneously very dark and very bright areas. It is most
+ commonly realized in cameras by combining two subsequent frames with
+ different exposure times. [#f1]_
+
+.. _v4l2-image-stabilization:
+
+``V4L2_CID_IMAGE_STABILIZATION (boolean)``
+ Enables or disables image stabilization.
+
+``V4L2_CID_ISO_SENSITIVITY (integer menu)``
+ Determines ISO equivalent of an image sensor indicating the sensor's
+ sensitivity to light. The numbers are expressed in arithmetic scale,
+ as per :ref:`iso12232` standard, where doubling the sensor
+ sensitivity is represented by doubling the numerical ISO value.
+ Applications should interpret the values as standard ISO values
+ multiplied by 1000, e.g. control value 800 stands for ISO 0.8.
+ Drivers will usually support only a subset of standard ISO values.
+ The effect of setting this control while the
+ ``V4L2_CID_ISO_SENSITIVITY_AUTO`` control is set to a value other
+ than ``V4L2_CID_ISO_SENSITIVITY_MANUAL`` is undefined, drivers
+ should ignore such requests.
+
+.. _v4l2-iso-sensitivity-auto-type:
+
+``V4L2_CID_ISO_SENSITIVITY_AUTO``
+ (enum)
+
+enum v4l2_iso_sensitivity_type -
+ Enables or disables automatic ISO sensitivity adjustments.
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_CID_ISO_SENSITIVITY_MANUAL``
+ - Manual ISO sensitivity.
+ * - ``V4L2_CID_ISO_SENSITIVITY_AUTO``
+ - Automatic ISO sensitivity adjustments.
+
+
+
+.. _v4l2-scene-mode:
+
+``V4L2_CID_SCENE_MODE``
+ (enum)
+
+enum v4l2_scene_mode -
+ This control allows to select scene programs as the camera automatic
+ modes optimized for common shooting scenes. Within these modes the
+ camera determines best exposure, aperture, focusing, light metering,
+ white balance and equivalent sensitivity. The controls of those
+ parameters are influenced by the scene mode control. An exact
+ behavior in each mode is subject to the camera specification.
+
+ When the scene mode feature is not used, this control should be set
+ to ``V4L2_SCENE_MODE_NONE`` to make sure the other possibly related
+ controls are accessible. The following scene programs are defined:
+
+.. tabularcolumns:: |p{6.0cm}|p{11.5cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_SCENE_MODE_NONE``
+ - The scene mode feature is disabled.
+ * - ``V4L2_SCENE_MODE_BACKLIGHT``
+ - Backlight. Compensates for dark shadows when light is coming from
+ behind a subject, also by automatically turning on the flash.
+ * - ``V4L2_SCENE_MODE_BEACH_SNOW``
+ - Beach and snow. This mode compensates for all-white or bright
+ scenes, which tend to look gray and low contrast, when camera's
+ automatic exposure is based on an average scene brightness. To
+ compensate, this mode automatically slightly overexposes the
+ frames. The white balance may also be adjusted to compensate for
+ the fact that reflected snow looks bluish rather than white.
+ * - ``V4L2_SCENE_MODE_CANDLELIGHT``
+ - Candle light. The camera generally raises the ISO sensitivity and
+ lowers the shutter speed. This mode compensates for relatively
+ close subject in the scene. The flash is disabled in order to
+ preserve the ambiance of the light.
+ * - ``V4L2_SCENE_MODE_DAWN_DUSK``
+ - Dawn and dusk. Preserves the colors seen in low natural light
+ before dusk and after down. The camera may turn off the flash, and
+ automatically focus at infinity. It will usually boost saturation
+ and lower the shutter speed.
+ * - ``V4L2_SCENE_MODE_FALL_COLORS``
+ - Fall colors. Increases saturation and adjusts white balance for
+ color enhancement. Pictures of autumn leaves get saturated reds
+ and yellows.
+ * - ``V4L2_SCENE_MODE_FIREWORKS``
+ - Fireworks. Long exposure times are used to capture the expanding
+ burst of light from a firework. The camera may invoke image
+ stabilization.
+ * - ``V4L2_SCENE_MODE_LANDSCAPE``
+ - Landscape. The camera may choose a small aperture to provide deep
+ depth of field and long exposure duration to help capture detail
+ in dim light conditions. The focus is fixed at infinity. Suitable
+ for distant and wide scenery.
+ * - ``V4L2_SCENE_MODE_NIGHT``
+ - Night, also known as Night Landscape. Designed for low light
+ conditions, it preserves detail in the dark areas without blowing
+ out bright objects. The camera generally sets itself to a
+ medium-to-high ISO sensitivity, with a relatively long exposure
+ time, and turns flash off. As such, there will be increased image
+ noise and the possibility of blurred image.
+ * - ``V4L2_SCENE_MODE_PARTY_INDOOR``
+ - Party and indoor. Designed to capture indoor scenes that are lit
+ by indoor background lighting as well as the flash. The camera
+ usually increases ISO sensitivity, and adjusts exposure for the
+ low light conditions.
+ * - ``V4L2_SCENE_MODE_PORTRAIT``
+ - Portrait. The camera adjusts the aperture so that the depth of
+ field is reduced, which helps to isolate the subject against a
+ smooth background. Most cameras recognize the presence of faces in
+ the scene and focus on them. The color hue is adjusted to enhance
+ skin tones. The intensity of the flash is often reduced.
+ * - ``V4L2_SCENE_MODE_SPORTS``
+ - Sports. Significantly increases ISO and uses a fast shutter speed
+ to freeze motion of rapidly-moving subjects. Increased image noise
+ may be seen in this mode.
+ * - ``V4L2_SCENE_MODE_SUNSET``
+ - Sunset. Preserves deep hues seen in sunsets and sunrises. It bumps
+ up the saturation.
+ * - ``V4L2_SCENE_MODE_TEXT``
+ - Text. It applies extra contrast and sharpness, it is typically a
+ black-and-white mode optimized for readability. Automatic focus
+ may be switched to close-up mode and this setting may also involve
+ some lens-distortion correction.
+
+
+
+``V4L2_CID_3A_LOCK (bitmask)``
+ This control locks or unlocks the automatic focus, exposure and
+ white balance. The automatic adjustments can be paused independently
+ by setting the corresponding lock bit to 1. The camera then retains
+ the settings until the lock bit is cleared. The following lock bits
+ are defined:
+
+ When a given algorithm is not enabled, drivers should ignore
+ requests to lock it and should return no error. An example might be
+ an application setting bit ``V4L2_LOCK_WHITE_BALANCE`` when the
+ ``V4L2_CID_AUTO_WHITE_BALANCE`` control is set to ``FALSE``. The
+ value of this control may be changed by exposure, white balance or
+ focus controls.
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_LOCK_EXPOSURE``
+ - Automatic exposure adjustments lock.
+ * - ``V4L2_LOCK_WHITE_BALANCE``
+ - Automatic white balance adjustments lock.
+ * - ``V4L2_LOCK_FOCUS``
+ - Automatic focus lock.
+
+
+
+``V4L2_CID_PAN_SPEED (integer)``
+ This control turns the camera horizontally at the specific speed.
+ The unit is undefined. A positive value moves the camera to the
+ right (clockwise when viewed from above), a negative value to the
+ left. A value of zero stops the motion if one is in progress and has
+ no effect otherwise.
+
+``V4L2_CID_TILT_SPEED (integer)``
+ This control turns the camera vertically at the specified speed. The
+ unit is undefined. A positive value moves the camera up, a negative
+ value down. A value of zero stops the motion if one is in progress
+ and has no effect otherwise.
+
+.. [#f1]
+ This control may be changed to a menu control in the future, if more
+ options are required.
diff --git a/Documentation/media/uapi/v4l/ext-ctrls-codec.rst b/Documentation/media/uapi/v4l/ext-ctrls-codec.rst
new file mode 100644
index 000000000000..c97fb7923be5
--- /dev/null
+++ b/Documentation/media/uapi/v4l/ext-ctrls-codec.rst
@@ -0,0 +1,2451 @@
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+
+.. _mpeg-controls:
+
+***********************
+Codec Control Reference
+***********************
+
+Below all controls within the Codec control class are described. First
+the generic controls, then controls specific for certain hardware.
+
+.. note::
+
+ These controls are applicable to all codecs and not just MPEG. The
+ defines are prefixed with V4L2_CID_MPEG/V4L2_MPEG as the controls
+ were originally made for MPEG codecs and later extended to cover all
+ encoding formats.
+
+
+Generic Codec Controls
+======================
+
+
+.. _mpeg-control-id:
+
+Codec Control IDs
+-----------------
+
+``V4L2_CID_MPEG_CLASS (class)``
+ The Codec class descriptor. Calling
+ :ref:`VIDIOC_QUERYCTRL` for this control will
+ return a description of this control class. This description can be
+ used as the caption of a Tab page in a GUI, for example.
+
+.. _v4l2-mpeg-stream-type:
+
+``V4L2_CID_MPEG_STREAM_TYPE``
+ (enum)
+
+enum v4l2_mpeg_stream_type -
+ The MPEG-1, -2 or -4 output stream type. One cannot assume anything
+ here. Each hardware MPEG encoder tends to support different subsets
+ of the available MPEG stream types. This control is specific to
+ multiplexed MPEG streams. The currently defined stream types are:
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_STREAM_TYPE_MPEG2_PS``
+ - MPEG-2 program stream
+ * - ``V4L2_MPEG_STREAM_TYPE_MPEG2_TS``
+ - MPEG-2 transport stream
+ * - ``V4L2_MPEG_STREAM_TYPE_MPEG1_SS``
+ - MPEG-1 system stream
+ * - ``V4L2_MPEG_STREAM_TYPE_MPEG2_DVD``
+ - MPEG-2 DVD-compatible stream
+ * - ``V4L2_MPEG_STREAM_TYPE_MPEG1_VCD``
+ - MPEG-1 VCD-compatible stream
+ * - ``V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD``
+ - MPEG-2 SVCD-compatible stream
+
+
+
+``V4L2_CID_MPEG_STREAM_PID_PMT (integer)``
+ Program Map Table Packet ID for the MPEG transport stream (default
+ 16)
+
+``V4L2_CID_MPEG_STREAM_PID_AUDIO (integer)``
+ Audio Packet ID for the MPEG transport stream (default 256)
+
+``V4L2_CID_MPEG_STREAM_PID_VIDEO (integer)``
+ Video Packet ID for the MPEG transport stream (default 260)
+
+``V4L2_CID_MPEG_STREAM_PID_PCR (integer)``
+ Packet ID for the MPEG transport stream carrying PCR fields (default
+ 259)
+
+``V4L2_CID_MPEG_STREAM_PES_ID_AUDIO (integer)``
+ Audio ID for MPEG PES
+
+``V4L2_CID_MPEG_STREAM_PES_ID_VIDEO (integer)``
+ Video ID for MPEG PES
+
+.. _v4l2-mpeg-stream-vbi-fmt:
+
+``V4L2_CID_MPEG_STREAM_VBI_FMT``
+ (enum)
+
+enum v4l2_mpeg_stream_vbi_fmt -
+ Some cards can embed VBI data (e. g. Closed Caption, Teletext) into
+ the MPEG stream. This control selects whether VBI data should be
+ embedded, and if so, what embedding method should be used. The list
+ of possible VBI formats depends on the driver. The currently defined
+ VBI format types are:
+
+
+
+.. tabularcolumns:: |p{6 cm}|p{11.5cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_STREAM_VBI_FMT_NONE``
+ - No VBI in the MPEG stream
+ * - ``V4L2_MPEG_STREAM_VBI_FMT_IVTV``
+ - VBI in private packets, IVTV format (documented in the kernel
+ sources in the file
+ ``Documentation/media/v4l-drivers/cx2341x.rst``)
+
+
+
+.. _v4l2-mpeg-audio-sampling-freq:
+
+``V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ``
+ (enum)
+
+enum v4l2_mpeg_audio_sampling_freq -
+ MPEG Audio sampling frequency. Possible values are:
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100``
+ - 44.1 kHz
+ * - ``V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000``
+ - 48 kHz
+ * - ``V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000``
+ - 32 kHz
+
+
+
+.. _v4l2-mpeg-audio-encoding:
+
+``V4L2_CID_MPEG_AUDIO_ENCODING``
+ (enum)
+
+enum v4l2_mpeg_audio_encoding -
+ MPEG Audio encoding. This control is specific to multiplexed MPEG
+ streams. Possible values are:
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_AUDIO_ENCODING_LAYER_1``
+ - MPEG-1/2 Layer I encoding
+ * - ``V4L2_MPEG_AUDIO_ENCODING_LAYER_2``
+ - MPEG-1/2 Layer II encoding
+ * - ``V4L2_MPEG_AUDIO_ENCODING_LAYER_3``
+ - MPEG-1/2 Layer III encoding
+ * - ``V4L2_MPEG_AUDIO_ENCODING_AAC``
+ - MPEG-2/4 AAC (Advanced Audio Coding)
+ * - ``V4L2_MPEG_AUDIO_ENCODING_AC3``
+ - AC-3 aka ATSC A/52 encoding
+
+
+
+.. _v4l2-mpeg-audio-l1-bitrate:
+
+``V4L2_CID_MPEG_AUDIO_L1_BITRATE``
+ (enum)
+
+enum v4l2_mpeg_audio_l1_bitrate -
+ MPEG-1/2 Layer I bitrate. Possible values are:
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_AUDIO_L1_BITRATE_32K``
+ - 32 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L1_BITRATE_64K``
+ - 64 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L1_BITRATE_96K``
+ - 96 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L1_BITRATE_128K``
+ - 128 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L1_BITRATE_160K``
+ - 160 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L1_BITRATE_192K``
+ - 192 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L1_BITRATE_224K``
+ - 224 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L1_BITRATE_256K``
+ - 256 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L1_BITRATE_288K``
+ - 288 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L1_BITRATE_320K``
+ - 320 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L1_BITRATE_352K``
+ - 352 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L1_BITRATE_384K``
+ - 384 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L1_BITRATE_416K``
+ - 416 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L1_BITRATE_448K``
+ - 448 kbit/s
+
+
+
+.. _v4l2-mpeg-audio-l2-bitrate:
+
+``V4L2_CID_MPEG_AUDIO_L2_BITRATE``
+ (enum)
+
+enum v4l2_mpeg_audio_l2_bitrate -
+ MPEG-1/2 Layer II bitrate. Possible values are:
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_AUDIO_L2_BITRATE_32K``
+ - 32 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L2_BITRATE_48K``
+ - 48 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L2_BITRATE_56K``
+ - 56 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L2_BITRATE_64K``
+ - 64 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L2_BITRATE_80K``
+ - 80 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L2_BITRATE_96K``
+ - 96 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L2_BITRATE_112K``
+ - 112 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L2_BITRATE_128K``
+ - 128 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L2_BITRATE_160K``
+ - 160 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L2_BITRATE_192K``
+ - 192 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L2_BITRATE_224K``
+ - 224 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L2_BITRATE_256K``
+ - 256 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L2_BITRATE_320K``
+ - 320 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L2_BITRATE_384K``
+ - 384 kbit/s
+
+
+
+.. _v4l2-mpeg-audio-l3-bitrate:
+
+``V4L2_CID_MPEG_AUDIO_L3_BITRATE``
+ (enum)
+
+enum v4l2_mpeg_audio_l3_bitrate -
+ MPEG-1/2 Layer III bitrate. Possible values are:
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_AUDIO_L3_BITRATE_32K``
+ - 32 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L3_BITRATE_40K``
+ - 40 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L3_BITRATE_48K``
+ - 48 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L3_BITRATE_56K``
+ - 56 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L3_BITRATE_64K``
+ - 64 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L3_BITRATE_80K``
+ - 80 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L3_BITRATE_96K``
+ - 96 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L3_BITRATE_112K``
+ - 112 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L3_BITRATE_128K``
+ - 128 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L3_BITRATE_160K``
+ - 160 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L3_BITRATE_192K``
+ - 192 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L3_BITRATE_224K``
+ - 224 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L3_BITRATE_256K``
+ - 256 kbit/s
+ * - ``V4L2_MPEG_AUDIO_L3_BITRATE_320K``
+ - 320 kbit/s
+
+
+
+``V4L2_CID_MPEG_AUDIO_AAC_BITRATE (integer)``
+ AAC bitrate in bits per second.
+
+.. _v4l2-mpeg-audio-ac3-bitrate:
+
+``V4L2_CID_MPEG_AUDIO_AC3_BITRATE``
+ (enum)
+
+enum v4l2_mpeg_audio_ac3_bitrate -
+ AC-3 bitrate. Possible values are:
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_32K``
+ - 32 kbit/s
+ * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_40K``
+ - 40 kbit/s
+ * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_48K``
+ - 48 kbit/s
+ * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_56K``
+ - 56 kbit/s
+ * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_64K``
+ - 64 kbit/s
+ * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_80K``
+ - 80 kbit/s
+ * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_96K``
+ - 96 kbit/s
+ * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_112K``
+ - 112 kbit/s
+ * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_128K``
+ - 128 kbit/s
+ * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_160K``
+ - 160 kbit/s
+ * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_192K``
+ - 192 kbit/s
+ * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_224K``
+ - 224 kbit/s
+ * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_256K``
+ - 256 kbit/s
+ * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_320K``
+ - 320 kbit/s
+ * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_384K``
+ - 384 kbit/s
+ * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_448K``
+ - 448 kbit/s
+ * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_512K``
+ - 512 kbit/s
+ * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_576K``
+ - 576 kbit/s
+ * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_640K``
+ - 640 kbit/s
+
+
+
+.. _v4l2-mpeg-audio-mode:
+
+``V4L2_CID_MPEG_AUDIO_MODE``
+ (enum)
+
+enum v4l2_mpeg_audio_mode -
+ MPEG Audio mode. Possible values are:
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_AUDIO_MODE_STEREO``
+ - Stereo
+ * - ``V4L2_MPEG_AUDIO_MODE_JOINT_STEREO``
+ - Joint Stereo
+ * - ``V4L2_MPEG_AUDIO_MODE_DUAL``
+ - Bilingual
+ * - ``V4L2_MPEG_AUDIO_MODE_MONO``
+ - Mono
+
+
+
+.. _v4l2-mpeg-audio-mode-extension:
+
+``V4L2_CID_MPEG_AUDIO_MODE_EXTENSION``
+ (enum)
+
+enum v4l2_mpeg_audio_mode_extension -
+ Joint Stereo audio mode extension. In Layer I and II they indicate
+ which subbands are in intensity stereo. All other subbands are coded
+ in stereo. Layer III is not (yet) supported. Possible values are:
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4``
+ - Subbands 4-31 in intensity stereo
+ * - ``V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_8``
+ - Subbands 8-31 in intensity stereo
+ * - ``V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_12``
+ - Subbands 12-31 in intensity stereo
+ * - ``V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_16``
+ - Subbands 16-31 in intensity stereo
+
+
+
+.. _v4l2-mpeg-audio-emphasis:
+
+``V4L2_CID_MPEG_AUDIO_EMPHASIS``
+ (enum)
+
+enum v4l2_mpeg_audio_emphasis -
+ Audio Emphasis. Possible values are:
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_AUDIO_EMPHASIS_NONE``
+ - None
+ * - ``V4L2_MPEG_AUDIO_EMPHASIS_50_DIV_15_uS``
+ - 50/15 microsecond emphasis
+ * - ``V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17``
+ - CCITT J.17
+
+
+
+.. _v4l2-mpeg-audio-crc:
+
+``V4L2_CID_MPEG_AUDIO_CRC``
+ (enum)
+
+enum v4l2_mpeg_audio_crc -
+ CRC method. Possible values are:
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_AUDIO_CRC_NONE``
+ - None
+ * - ``V4L2_MPEG_AUDIO_CRC_CRC16``
+ - 16 bit parity check
+
+
+
+``V4L2_CID_MPEG_AUDIO_MUTE (boolean)``
+ Mutes the audio when capturing. This is not done by muting audio
+ hardware, which can still produce a slight hiss, but in the encoder
+ itself, guaranteeing a fixed and reproducible audio bitstream. 0 =
+ unmuted, 1 = muted.
+
+.. _v4l2-mpeg-audio-dec-playback:
+
+``V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK``
+ (enum)
+
+enum v4l2_mpeg_audio_dec_playback -
+ Determines how monolingual audio should be played back. Possible
+ values are:
+
+
+
+.. tabularcolumns:: |p{9.0cm}|p{8.5cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_AUTO``
+ - Automatically determines the best playback mode.
+ * - ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_STEREO``
+ - Stereo playback.
+ * - ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_LEFT``
+ - Left channel playback.
+ * - ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_RIGHT``
+ - Right channel playback.
+ * - ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_MONO``
+ - Mono playback.
+ * - ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_SWAPPED_STEREO``
+ - Stereo playback with swapped left and right channels.
+
+
+
+.. _v4l2-mpeg-audio-dec-multilingual-playback:
+
+``V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK``
+ (enum)
+
+enum v4l2_mpeg_audio_dec_playback -
+ Determines how multilingual audio should be played back.
+
+.. _v4l2-mpeg-video-encoding:
+
+``V4L2_CID_MPEG_VIDEO_ENCODING``
+ (enum)
+
+enum v4l2_mpeg_video_encoding -
+ MPEG Video encoding method. This control is specific to multiplexed
+ MPEG streams. Possible values are:
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_VIDEO_ENCODING_MPEG_1``
+ - MPEG-1 Video encoding
+ * - ``V4L2_MPEG_VIDEO_ENCODING_MPEG_2``
+ - MPEG-2 Video encoding
+ * - ``V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC``
+ - MPEG-4 AVC (H.264) Video encoding
+
+
+
+.. _v4l2-mpeg-video-aspect:
+
+``V4L2_CID_MPEG_VIDEO_ASPECT``
+ (enum)
+
+enum v4l2_mpeg_video_aspect -
+ Video aspect. Possible values are:
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_VIDEO_ASPECT_1x1``
+ * - ``V4L2_MPEG_VIDEO_ASPECT_4x3``
+ * - ``V4L2_MPEG_VIDEO_ASPECT_16x9``
+ * - ``V4L2_MPEG_VIDEO_ASPECT_221x100``
+
+
+
+``V4L2_CID_MPEG_VIDEO_B_FRAMES (integer)``
+ Number of B-Frames (default 2)
+
+``V4L2_CID_MPEG_VIDEO_GOP_SIZE (integer)``
+ GOP size (default 12)
+
+``V4L2_CID_MPEG_VIDEO_GOP_CLOSURE (boolean)``
+ GOP closure (default 1)
+
+``V4L2_CID_MPEG_VIDEO_PULLDOWN (boolean)``
+ Enable 3:2 pulldown (default 0)
+
+.. _v4l2-mpeg-video-bitrate-mode:
+
+``V4L2_CID_MPEG_VIDEO_BITRATE_MODE``
+ (enum)
+
+enum v4l2_mpeg_video_bitrate_mode -
+ Video bitrate mode. Possible values are:
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_VIDEO_BITRATE_MODE_VBR``
+ - Variable bitrate
+ * - ``V4L2_MPEG_VIDEO_BITRATE_MODE_CBR``
+ - Constant bitrate
+
+
+
+``V4L2_CID_MPEG_VIDEO_BITRATE (integer)``
+ Video bitrate in bits per second.
+
+``V4L2_CID_MPEG_VIDEO_BITRATE_PEAK (integer)``
+ Peak video bitrate in bits per second. Must be larger or equal to
+ the average video bitrate. It is ignored if the video bitrate mode
+ is set to constant bitrate.
+
+``V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION (integer)``
+ For every captured frame, skip this many subsequent frames (default
+ 0).
+
+``V4L2_CID_MPEG_VIDEO_MUTE (boolean)``
+ "Mutes" the video to a fixed color when capturing. This is useful
+ for testing, to produce a fixed video bitstream. 0 = unmuted, 1 =
+ muted.
+
+``V4L2_CID_MPEG_VIDEO_MUTE_YUV (integer)``
+ Sets the "mute" color of the video. The supplied 32-bit integer is
+ interpreted as follows (bit 0 = least significant bit):
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - Bit 0:7
+ - V chrominance information
+ * - Bit 8:15
+ - U chrominance information
+ * - Bit 16:23
+ - Y luminance information
+ * - Bit 24:31
+ - Must be zero.
+
+
+
+.. _v4l2-mpeg-video-dec-pts:
+
+``V4L2_CID_MPEG_VIDEO_DEC_PTS (integer64)``
+ This read-only control returns the 33-bit video Presentation Time
+ Stamp as defined in ITU T-REC-H.222.0 and ISO/IEC 13818-1 of the
+ currently displayed frame. This is the same PTS as is used in
+ :ref:`VIDIOC_DECODER_CMD`.
+
+.. _v4l2-mpeg-video-dec-frame:
+
+``V4L2_CID_MPEG_VIDEO_DEC_FRAME (integer64)``
+ This read-only control returns the frame counter of the frame that
+ is currently displayed (decoded). This value is reset to 0 whenever
+ the decoder is started.
+
+``V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE (boolean)``
+ If enabled the decoder expects to receive a single slice per buffer,
+ otherwise the decoder expects a single frame in per buffer.
+ Applicable to the decoder, all codecs.
+
+``V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE (boolean)``
+ Enable writing sample aspect ratio in the Video Usability
+ Information. Applicable to the H264 encoder.
+
+.. _v4l2-mpeg-video-h264-vui-sar-idc:
+
+``V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC``
+ (enum)
+
+enum v4l2_mpeg_video_h264_vui_sar_idc -
+ VUI sample aspect ratio indicator for H.264 encoding. The value is
+ defined in the table E-1 in the standard. Applicable to the H264
+ encoder.
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED``
+ - Unspecified
+ * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1``
+ - 1x1
+ * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_12x11``
+ - 12x11
+ * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_10x11``
+ - 10x11
+ * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_16x11``
+ - 16x11
+ * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_40x33``
+ - 40x33
+ * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_24x11``
+ - 24x11
+ * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_20x11``
+ - 20x11
+ * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_32x11``
+ - 32x11
+ * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_80x33``
+ - 80x33
+ * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_18x11``
+ - 18x11
+ * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_15x11``
+ - 15x11
+ * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_64x33``
+ - 64x33
+ * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_160x99``
+ - 160x99
+ * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_4x3``
+ - 4x3
+ * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_3x2``
+ - 3x2
+ * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_2x1``
+ - 2x1
+ * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED``
+ - Extended SAR
+
+
+
+``V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH (integer)``
+ Extended sample aspect ratio width for H.264 VUI encoding.
+ Applicable to the H264 encoder.
+
+``V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT (integer)``
+ Extended sample aspect ratio height for H.264 VUI encoding.
+ Applicable to the H264 encoder.
+
+.. _v4l2-mpeg-video-h264-level:
+
+``V4L2_CID_MPEG_VIDEO_H264_LEVEL``
+ (enum)
+
+enum v4l2_mpeg_video_h264_level -
+ The level information for the H264 video elementary stream.
+ Applicable to the H264 encoder. Possible values are:
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_VIDEO_H264_LEVEL_1_0``
+ - Level 1.0
+ * - ``V4L2_MPEG_VIDEO_H264_LEVEL_1B``
+ - Level 1B
+ * - ``V4L2_MPEG_VIDEO_H264_LEVEL_1_1``
+ - Level 1.1
+ * - ``V4L2_MPEG_VIDEO_H264_LEVEL_1_2``
+ - Level 1.2
+ * - ``V4L2_MPEG_VIDEO_H264_LEVEL_1_3``
+ - Level 1.3
+ * - ``V4L2_MPEG_VIDEO_H264_LEVEL_2_0``
+ - Level 2.0
+ * - ``V4L2_MPEG_VIDEO_H264_LEVEL_2_1``
+ - Level 2.1
+ * - ``V4L2_MPEG_VIDEO_H264_LEVEL_2_2``
+ - Level 2.2
+ * - ``V4L2_MPEG_VIDEO_H264_LEVEL_3_0``
+ - Level 3.0
+ * - ``V4L2_MPEG_VIDEO_H264_LEVEL_3_1``
+ - Level 3.1
+ * - ``V4L2_MPEG_VIDEO_H264_LEVEL_3_2``
+ - Level 3.2
+ * - ``V4L2_MPEG_VIDEO_H264_LEVEL_4_0``
+ - Level 4.0
+ * - ``V4L2_MPEG_VIDEO_H264_LEVEL_4_1``
+ - Level 4.1
+ * - ``V4L2_MPEG_VIDEO_H264_LEVEL_4_2``
+ - Level 4.2
+ * - ``V4L2_MPEG_VIDEO_H264_LEVEL_5_0``
+ - Level 5.0
+ * - ``V4L2_MPEG_VIDEO_H264_LEVEL_5_1``
+ - Level 5.1
+
+
+
+.. _v4l2-mpeg-video-mpeg4-level:
+
+``V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL``
+ (enum)
+
+enum v4l2_mpeg_video_mpeg4_level -
+ The level information for the MPEG4 elementary stream. Applicable to
+ the MPEG4 encoder. Possible values are:
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_0``
+ - Level 0
+ * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_0B``
+ - Level 0b
+ * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_1``
+ - Level 1
+ * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_2``
+ - Level 2
+ * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_3``
+ - Level 3
+ * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_3B``
+ - Level 3b
+ * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_4``
+ - Level 4
+ * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_5``
+ - Level 5
+
+
+
+.. _v4l2-mpeg-video-h264-profile:
+
+``V4L2_CID_MPEG_VIDEO_H264_PROFILE``
+ (enum)
+
+enum v4l2_mpeg_video_h264_profile -
+ The profile information for H264. Applicable to the H264 encoder.
+ Possible values are:
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE``
+ - Baseline profile
+ * - ``V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE``
+ - Constrained Baseline profile
+ * - ``V4L2_MPEG_VIDEO_H264_PROFILE_MAIN``
+ - Main profile
+ * - ``V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED``
+ - Extended profile
+ * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH``
+ - High profile
+ * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10``
+ - High 10 profile
+ * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422``
+ - High 422 profile
+ * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE``
+ - High 444 Predictive profile
+ * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10_INTRA``
+ - High 10 Intra profile
+ * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422_INTRA``
+ - High 422 Intra profile
+ * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_INTRA``
+ - High 444 Intra profile
+ * - ``V4L2_MPEG_VIDEO_H264_PROFILE_CAVLC_444_INTRA``
+ - CAVLC 444 Intra profile
+ * - ``V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_BASELINE``
+ - Scalable Baseline profile
+ * - ``V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH``
+ - Scalable High profile
+ * - ``V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH_INTRA``
+ - Scalable High Intra profile
+ * - ``V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH``
+ - Stereo High profile
+ * - ``V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH``
+ - Multiview High profile
+
+
+
+.. _v4l2-mpeg-video-mpeg4-profile:
+
+``V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE``
+ (enum)
+
+enum v4l2_mpeg_video_mpeg4_profile -
+ The profile information for MPEG4. Applicable to the MPEG4 encoder.
+ Possible values are:
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE``
+ - Simple profile
+ * - ``V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE``
+ - Advanced Simple profile
+ * - ``V4L2_MPEG_VIDEO_MPEG4_PROFILE_CORE``
+ - Core profile
+ * - ``V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE_SCALABLE``
+ - Simple Scalable profile
+ * - ``V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY``
+ -
+
+
+
+``V4L2_CID_MPEG_VIDEO_MAX_REF_PIC (integer)``
+ The maximum number of reference pictures used for encoding.
+ Applicable to the encoder.
+
+.. _v4l2-mpeg-video-multi-slice-mode:
+
+``V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE``
+ (enum)
+
+enum v4l2_mpeg_video_multi_slice_mode -
+ Determines how the encoder should handle division of frame into
+ slices. Applicable to the encoder. Possible values are:
+
+
+
+.. tabularcolumns:: |p{8.7cm}|p{8.8cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE``
+ - Single slice per frame.
+ * - ``V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB``
+ - Multiple slices with set maximum number of macroblocks per slice.
+ * - ``V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES``
+ - Multiple slice with set maximum size in bytes per slice.
+
+
+
+``V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB (integer)``
+ The maximum number of macroblocks in a slice. Used when
+ ``V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE`` is set to
+ ``V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB``. Applicable to the
+ encoder.
+
+``V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES (integer)``
+ The maximum size of a slice in bytes. Used when
+ ``V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE`` is set to
+ ``V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES``. Applicable to the
+ encoder.
+
+.. _v4l2-mpeg-video-h264-loop-filter-mode:
+
+``V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE``
+ (enum)
+
+enum v4l2_mpeg_video_h264_loop_filter_mode -
+ Loop filter mode for H264 encoder. Possible values are:
+
+
+
+.. tabularcolumns:: |p{14.0cm}|p{3.5cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED``
+ - Loop filter is enabled.
+ * - ``V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED``
+ - Loop filter is disabled.
+ * - ``V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY``
+ - Loop filter is disabled at the slice boundary.
+
+
+
+``V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA (integer)``
+ Loop filter alpha coefficient, defined in the H264 standard.
+ This value corresponds to the slice_alpha_c0_offset_div2 slice header
+ field, and should be in the range of -6 to +6, inclusive. The actual alpha
+ offset FilterOffsetA is twice this value.
+ Applicable to the H264 encoder.
+
+``V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA (integer)``
+ Loop filter beta coefficient, defined in the H264 standard.
+ This corresponds to the slice_beta_offset_div2 slice header field, and
+ should be in the range of -6 to +6, inclusive. The actual beta offset
+ FilterOffsetB is twice this value.
+ Applicable to the H264 encoder.
+
+.. _v4l2-mpeg-video-h264-entropy-mode:
+
+``V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE``
+ (enum)
+
+enum v4l2_mpeg_video_h264_entropy_mode -
+ Entropy coding mode for H264 - CABAC/CAVALC. Applicable to the H264
+ encoder. Possible values are:
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC``
+ - Use CAVLC entropy coding.
+ * - ``V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC``
+ - Use CABAC entropy coding.
+
+
+
+``V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM (boolean)``
+ Enable 8X8 transform for H264. Applicable to the H264 encoder.
+
+``V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION (boolean)``
+ Enable constrained intra prediction for H264. Applicable to the H264
+ encoder.
+
+``V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET (integer)``
+ Specify the offset that should be added to the luma quantization
+ parameter to determine the chroma quantization parameter. Applicable
+ to the H264 encoder.
+
+``V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB (integer)``
+ Cyclic intra macroblock refresh. This is the number of continuous
+ macroblocks refreshed every frame. Each frame a successive set of
+ macroblocks is refreshed until the cycle completes and starts from
+ the top of the frame. Applicable to H264, H263 and MPEG4 encoder.
+
+``V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE (boolean)``
+ Frame level rate control enable. If this control is disabled then
+ the quantization parameter for each frame type is constant and set
+ with appropriate controls (e.g.
+ ``V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP``). If frame rate control is
+ enabled then quantization parameter is adjusted to meet the chosen
+ bitrate. Minimum and maximum value for the quantization parameter
+ can be set with appropriate controls (e.g.
+ ``V4L2_CID_MPEG_VIDEO_H263_MIN_QP``). Applicable to encoders.
+
+``V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE (boolean)``
+ Macroblock level rate control enable. Applicable to the MPEG4 and
+ H264 encoders.
+
+``V4L2_CID_MPEG_VIDEO_MPEG4_QPEL (boolean)``
+ Quarter pixel motion estimation for MPEG4. Applicable to the MPEG4
+ encoder.
+
+``V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP (integer)``
+ Quantization parameter for an I frame for H263. Valid range: from 1
+ to 31.
+
+``V4L2_CID_MPEG_VIDEO_H263_MIN_QP (integer)``
+ Minimum quantization parameter for H263. Valid range: from 1 to 31.
+
+``V4L2_CID_MPEG_VIDEO_H263_MAX_QP (integer)``
+ Maximum quantization parameter for H263. Valid range: from 1 to 31.
+
+``V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP (integer)``
+ Quantization parameter for an P frame for H263. Valid range: from 1
+ to 31.
+
+``V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP (integer)``
+ Quantization parameter for an B frame for H263. Valid range: from 1
+ to 31.
+
+``V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP (integer)``
+ Quantization parameter for an I frame for H264. Valid range: from 0
+ to 51.
+
+``V4L2_CID_MPEG_VIDEO_H264_MIN_QP (integer)``
+ Minimum quantization parameter for H264. Valid range: from 0 to 51.
+
+``V4L2_CID_MPEG_VIDEO_H264_MAX_QP (integer)``
+ Maximum quantization parameter for H264. Valid range: from 0 to 51.
+
+``V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP (integer)``
+ Quantization parameter for an P frame for H264. Valid range: from 0
+ to 51.
+
+``V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP (integer)``
+ Quantization parameter for an B frame for H264. Valid range: from 0
+ to 51.
+
+``V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP (integer)``
+ Quantization parameter for an I frame for MPEG4. Valid range: from 1
+ to 31.
+
+``V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP (integer)``
+ Minimum quantization parameter for MPEG4. Valid range: from 1 to 31.
+
+``V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP (integer)``
+ Maximum quantization parameter for MPEG4. Valid range: from 1 to 31.
+
+``V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP (integer)``
+ Quantization parameter for an P frame for MPEG4. Valid range: from 1
+ to 31.
+
+``V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP (integer)``
+ Quantization parameter for an B frame for MPEG4. Valid range: from 1
+ to 31.
+
+``V4L2_CID_MPEG_VIDEO_VBV_SIZE (integer)``
+ The Video Buffer Verifier size in kilobytes, it is used as a
+ limitation of frame skip. The VBV is defined in the standard as a
+ mean to verify that the produced stream will be successfully
+ decoded. The standard describes it as "Part of a hypothetical
+ decoder that is conceptually connected to the output of the encoder.
+ Its purpose is to provide a constraint on the variability of the
+ data rate that an encoder or editing process may produce.".
+ Applicable to the MPEG1, MPEG2, MPEG4 encoders.
+
+.. _v4l2-mpeg-video-vbv-delay:
+
+``V4L2_CID_MPEG_VIDEO_VBV_DELAY (integer)``
+ Sets the initial delay in milliseconds for VBV buffer control.
+
+.. _v4l2-mpeg-video-hor-search-range:
+
+``V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE (integer)``
+ Horizontal search range defines maximum horizontal search area in
+ pixels to search and match for the present Macroblock (MB) in the
+ reference picture. This V4L2 control macro is used to set horizontal
+ search range for motion estimation module in video encoder.
+
+.. _v4l2-mpeg-video-vert-search-range:
+
+``V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE (integer)``
+ Vertical search range defines maximum vertical search area in pixels
+ to search and match for the present Macroblock (MB) in the reference
+ picture. This V4L2 control macro is used to set vertical search
+ range for motion estimation module in video encoder.
+
+.. _v4l2-mpeg-video-force-key-frame:
+
+``V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME (button)``
+ Force a key frame for the next queued buffer. Applicable to
+ encoders. This is a general, codec-agnostic keyframe control.
+
+``V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE (integer)``
+ The Coded Picture Buffer size in kilobytes, it is used as a
+ limitation of frame skip. The CPB is defined in the H264 standard as
+ a mean to verify that the produced stream will be successfully
+ decoded. Applicable to the H264 encoder.
+
+``V4L2_CID_MPEG_VIDEO_H264_I_PERIOD (integer)``
+ Period between I-frames in the open GOP for H264. In case of an open
+ GOP this is the period between two I-frames. The period between IDR
+ (Instantaneous Decoding Refresh) frames is taken from the GOP_SIZE
+ control. An IDR frame, which stands for Instantaneous Decoding
+ Refresh is an I-frame after which no prior frames are referenced.
+ This means that a stream can be restarted from an IDR frame without
+ the need to store or decode any previous frames. Applicable to the
+ H264 encoder.
+
+.. _v4l2-mpeg-video-header-mode:
+
+``V4L2_CID_MPEG_VIDEO_HEADER_MODE``
+ (enum)
+
+enum v4l2_mpeg_video_header_mode -
+ Determines whether the header is returned as the first buffer or is
+ it returned together with the first frame. Applicable to encoders.
+ Possible values are:
+
+
+
+.. tabularcolumns:: |p{10.3cm}|p{7.2cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE``
+ - The stream header is returned separately in the first buffer.
+ * - ``V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME``
+ - The stream header is returned together with the first encoded
+ frame.
+
+
+
+``V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER (boolean)``
+ Repeat the video sequence headers. Repeating these headers makes
+ random access to the video stream easier. Applicable to the MPEG1, 2
+ and 4 encoder.
+
+``V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER (boolean)``
+ Enabled the deblocking post processing filter for MPEG4 decoder.
+ Applicable to the MPEG4 decoder.
+
+``V4L2_CID_MPEG_VIDEO_MPEG4_VOP_TIME_RES (integer)``
+ vop_time_increment_resolution value for MPEG4. Applicable to the
+ MPEG4 encoder.
+
+``V4L2_CID_MPEG_VIDEO_MPEG4_VOP_TIME_INC (integer)``
+ vop_time_increment value for MPEG4. Applicable to the MPEG4
+ encoder.
+
+``V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING (boolean)``
+ Enable generation of frame packing supplemental enhancement
+ information in the encoded bitstream. The frame packing SEI message
+ contains the arrangement of L and R planes for 3D viewing.
+ Applicable to the H264 encoder.
+
+``V4L2_CID_MPEG_VIDEO_H264_SEI_FP_CURRENT_FRAME_0 (boolean)``
+ Sets current frame as frame0 in frame packing SEI. Applicable to the
+ H264 encoder.
+
+.. _v4l2-mpeg-video-h264-sei-fp-arrangement-type:
+
+``V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE``
+ (enum)
+
+enum v4l2_mpeg_video_h264_sei_fp_arrangement_type -
+ Frame packing arrangement type for H264 SEI. Applicable to the H264
+ encoder. Possible values are:
+
+.. tabularcolumns:: |p{12cm}|p{5.5cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_CHEKERBOARD``
+ - Pixels are alternatively from L and R.
+ * - ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_COLUMN``
+ - L and R are interlaced by column.
+ * - ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_ROW``
+ - L and R are interlaced by row.
+ * - ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_SIDE_BY_SIDE``
+ - L is on the left, R on the right.
+ * - ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TOP_BOTTOM``
+ - L is on top, R on bottom.
+ * - ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TEMPORAL``
+ - One view per frame.
+
+
+
+``V4L2_CID_MPEG_VIDEO_H264_FMO (boolean)``
+ Enables flexible macroblock ordering in the encoded bitstream. It is
+ a technique used for restructuring the ordering of macroblocks in
+ pictures. Applicable to the H264 encoder.
+
+.. _v4l2-mpeg-video-h264-fmo-map-type:
+
+``V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE``
+ (enum)
+
+enum v4l2_mpeg_video_h264_fmo_map_type -
+ When using FMO, the map type divides the image in different scan
+ patterns of macroblocks. Applicable to the H264 encoder. Possible
+ values are:
+
+.. tabularcolumns:: |p{12.5cm}|p{5.0cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_INTERLEAVED_SLICES``
+ - Slices are interleaved one after other with macroblocks in run
+ length order.
+ * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_SCATTERED_SLICES``
+ - Scatters the macroblocks based on a mathematical function known to
+ both encoder and decoder.
+ * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_FOREGROUND_WITH_LEFT_OVER``
+ - Macroblocks arranged in rectangular areas or regions of interest.
+ * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_BOX_OUT``
+ - Slice groups grow in a cyclic way from centre to outwards.
+ * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_RASTER_SCAN``
+ - Slice groups grow in raster scan pattern from left to right.
+ * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_WIPE_SCAN``
+ - Slice groups grow in wipe scan pattern from top to bottom.
+ * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_EXPLICIT``
+ - User defined map type.
+
+
+
+``V4L2_CID_MPEG_VIDEO_H264_FMO_SLICE_GROUP (integer)``
+ Number of slice groups in FMO. Applicable to the H264 encoder.
+
+.. _v4l2-mpeg-video-h264-fmo-change-direction:
+
+``V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_DIRECTION``
+ (enum)
+
+enum v4l2_mpeg_video_h264_fmo_change_dir -
+ Specifies a direction of the slice group change for raster and wipe
+ maps. Applicable to the H264 encoder. Possible values are:
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_VIDEO_H264_FMO_CHANGE_DIR_RIGHT``
+ - Raster scan or wipe right.
+ * - ``V4L2_MPEG_VIDEO_H264_FMO_CHANGE_DIR_LEFT``
+ - Reverse raster scan or wipe left.
+
+
+
+``V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_RATE (integer)``
+ Specifies the size of the first slice group for raster and wipe map.
+ Applicable to the H264 encoder.
+
+``V4L2_CID_MPEG_VIDEO_H264_FMO_RUN_LENGTH (integer)``
+ Specifies the number of consecutive macroblocks for the interleaved
+ map. Applicable to the H264 encoder.
+
+``V4L2_CID_MPEG_VIDEO_H264_ASO (boolean)``
+ Enables arbitrary slice ordering in encoded bitstream. Applicable to
+ the H264 encoder.
+
+``V4L2_CID_MPEG_VIDEO_H264_ASO_SLICE_ORDER (integer)``
+ Specifies the slice order in ASO. Applicable to the H264 encoder.
+ The supplied 32-bit integer is interpreted as follows (bit 0 = least
+ significant bit):
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - Bit 0:15
+ - Slice ID
+ * - Bit 16:32
+ - Slice position or order
+
+
+
+``V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING (boolean)``
+ Enables H264 hierarchical coding. Applicable to the H264 encoder.
+
+.. _v4l2-mpeg-video-h264-hierarchical-coding-type:
+
+``V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE``
+ (enum)
+
+enum v4l2_mpeg_video_h264_hierarchical_coding_type -
+ Specifies the hierarchical coding type. Applicable to the H264
+ encoder. Possible values are:
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_VIDEO_H264_HIERARCHICAL_CODING_B``
+ - Hierarchical B coding.
+ * - ``V4L2_MPEG_VIDEO_H264_HIERARCHICAL_CODING_P``
+ - Hierarchical P coding.
+
+
+
+``V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER (integer)``
+ Specifies the number of hierarchical coding layers. Applicable to
+ the H264 encoder.
+
+``V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER_QP (integer)``
+ Specifies a user defined QP for each layer. Applicable to the H264
+ encoder. The supplied 32-bit integer is interpreted as follows (bit
+ 0 = least significant bit):
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - Bit 0:15
+ - QP value
+ * - Bit 16:32
+ - Layer number
+
+
+
+.. _v4l2-mpeg-mpeg2:
+
+``V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS (struct)``
+ Specifies the slice parameters (as extracted from the bitstream) for the
+ associated MPEG-2 slice data. This includes the necessary parameters for
+ configuring a stateless hardware decoding pipeline for MPEG-2.
+ The bitstream parameters are defined according to :ref:`mpeg2part2`.
+
+ .. note::
+
+ This compound control is not yet part of the public kernel API and
+ it is expected to change.
+
+.. c:type:: v4l2_ctrl_mpeg2_slice_params
+
+.. cssclass:: longtable
+
+.. flat-table:: struct v4l2_ctrl_mpeg2_slice_params
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u32
+ - ``bit_size``
+ - Size (in bits) of the current slice data.
+ * - __u32
+ - ``data_bit_offset``
+ - Offset (in bits) to the video data in the current slice data.
+ * - struct :c:type:`v4l2_mpeg2_sequence`
+ - ``sequence``
+ - Structure with MPEG-2 sequence metadata, merging relevant fields from
+ the sequence header and sequence extension parts of the bitstream.
+ * - struct :c:type:`v4l2_mpeg2_picture`
+ - ``picture``
+ - Structure with MPEG-2 picture metadata, merging relevant fields from
+ the picture header and picture coding extension parts of the bitstream.
+ * - __u64
+ - ``backward_ref_ts``
+ - Timestamp of the V4L2 capture buffer to use as backward reference, used
+ with B-coded and P-coded frames. The timestamp refers to the
+ ``timestamp`` field in struct :c:type:`v4l2_buffer`. Use the
+ :c:func:`v4l2_timeval_to_ns()` function to convert the struct
+ :c:type:`timeval` in struct :c:type:`v4l2_buffer` to a __u64.
+ * - __u64
+ - ``forward_ref_ts``
+ - Timestamp for the V4L2 capture buffer to use as forward reference, used
+ with B-coded frames. The timestamp refers to the ``timestamp`` field in
+ struct :c:type:`v4l2_buffer`. Use the :c:func:`v4l2_timeval_to_ns()`
+ function to convert the struct :c:type:`timeval` in struct
+ :c:type:`v4l2_buffer` to a __u64.
+ * - __u32
+ - ``quantiser_scale_code``
+ - Code used to determine the quantization scale to use for the IDCT.
+
+.. c:type:: v4l2_mpeg2_sequence
+
+.. cssclass:: longtable
+
+.. flat-table:: struct v4l2_mpeg2_sequence
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u16
+ - ``horizontal_size``
+ - The width of the displayable part of the frame's luminance component.
+ * - __u16
+ - ``vertical_size``
+ - The height of the displayable part of the frame's luminance component.
+ * - __u32
+ - ``vbv_buffer_size``
+ - Used to calculate the required size of the video buffering verifier,
+ defined (in bits) as: 16 * 1024 * vbv_buffer_size.
+ * - __u16
+ - ``profile_and_level_indication``
+ - The current profile and level indication as extracted from the
+ bitstream.
+ * - __u8
+ - ``progressive_sequence``
+ - Indication that all the frames for the sequence are progressive instead
+ of interlaced.
+ * - __u8
+ - ``chroma_format``
+ - The chrominance sub-sampling format (1: 4:2:0, 2: 4:2:2, 3: 4:4:4).
+
+.. c:type:: v4l2_mpeg2_picture
+
+.. cssclass:: longtable
+
+.. flat-table:: struct v4l2_mpeg2_picture
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u8
+ - ``picture_coding_type``
+ - Picture coding type for the frame covered by the current slice
+ (V4L2_MPEG2_PICTURE_CODING_TYPE_I, V4L2_MPEG2_PICTURE_CODING_TYPE_P or
+ V4L2_MPEG2_PICTURE_CODING_TYPE_B).
+ * - __u8
+ - ``f_code[2][2]``
+ - Motion vector codes.
+ * - __u8
+ - ``intra_dc_precision``
+ - Precision of Discrete Cosine transform (0: 8 bits precision,
+ 1: 9 bits precision, 2: 10 bits precision, 3: 11 bits precision).
+ * - __u8
+ - ``picture_structure``
+ - Picture structure (1: interlaced top field, 2: interlaced bottom field,
+ 3: progressive frame).
+ * - __u8
+ - ``top_field_first``
+ - If set to 1 and interlaced stream, top field is output first.
+ * - __u8
+ - ``frame_pred_frame_dct``
+ - If set to 1, only frame-DCT and frame prediction are used.
+ * - __u8
+ - ``concealment_motion_vectors``
+ - If set to 1, motion vectors are coded for intra macroblocks.
+ * - __u8
+ - ``q_scale_type``
+ - This flag affects the inverse quantization process.
+ * - __u8
+ - ``intra_vlc_format``
+ - This flag affects the decoding of transform coefficient data.
+ * - __u8
+ - ``alternate_scan``
+ - This flag affects the decoding of transform coefficient data.
+ * - __u8
+ - ``repeat_first_field``
+ - This flag affects the decoding process of progressive frames.
+ * - __u16
+ - ``progressive_frame``
+ - Indicates whether the current frame is progressive.
+
+``V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION (struct)``
+ Specifies quantization matrices (as extracted from the bitstream) for the
+ associated MPEG-2 slice data.
+
+ .. note::
+
+ This compound control is not yet part of the public kernel API and
+ it is expected to change.
+
+.. c:type:: v4l2_ctrl_mpeg2_quantization
+
+.. cssclass:: longtable
+
+.. flat-table:: struct v4l2_ctrl_mpeg2_quantization
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u8
+ - ``load_intra_quantiser_matrix``
+ - One bit to indicate whether to load the ``intra_quantiser_matrix`` data.
+ * - __u8
+ - ``load_non_intra_quantiser_matrix``
+ - One bit to indicate whether to load the ``non_intra_quantiser_matrix``
+ data.
+ * - __u8
+ - ``load_chroma_intra_quantiser_matrix``
+ - One bit to indicate whether to load the
+ ``chroma_intra_quantiser_matrix`` data, only relevant for non-4:2:0 YUV
+ formats.
+ * - __u8
+ - ``load_chroma_non_intra_quantiser_matrix``
+ - One bit to indicate whether to load the
+ ``chroma_non_intra_quantiser_matrix`` data, only relevant for non-4:2:0
+ YUV formats.
+ * - __u8
+ - ``intra_quantiser_matrix[64]``
+ - The quantization matrix coefficients for intra-coded frames, in zigzag
+ scanning order. It is relevant for both luma and chroma components,
+ although it can be superseded by the chroma-specific matrix for
+ non-4:2:0 YUV formats.
+ * - __u8
+ - ``non_intra_quantiser_matrix[64]``
+ - The quantization matrix coefficients for non-intra-coded frames, in
+ zigzag scanning order. It is relevant for both luma and chroma
+ components, although it can be superseded by the chroma-specific matrix
+ for non-4:2:0 YUV formats.
+ * - __u8
+ - ``chroma_intra_quantiser_matrix[64]``
+ - The quantization matrix coefficients for the chominance component of
+ intra-coded frames, in zigzag scanning order. Only relevant for
+ non-4:2:0 YUV formats.
+ * - __u8
+ - ``chroma_non_intra_quantiser_matrix[64]``
+ - The quantization matrix coefficients for the chrominance component of
+ non-intra-coded frames, in zigzag scanning order. Only relevant for
+ non-4:2:0 YUV formats.
+
+MFC 5.1 MPEG Controls
+=====================
+
+The following MPEG class controls deal with MPEG decoding and encoding
+settings that are specific to the Multi Format Codec 5.1 device present
+in the S5P family of SoCs by Samsung.
+
+
+.. _mfc51-control-id:
+
+MFC 5.1 Control IDs
+-------------------
+
+``V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE (boolean)``
+ If the display delay is enabled then the decoder is forced to return
+ a CAPTURE buffer (decoded frame) after processing a certain number
+ of OUTPUT buffers. The delay can be set through
+ ``V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY``. This
+ feature can be used for example for generating thumbnails of videos.
+ Applicable to the H264 decoder.
+
+``V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY (integer)``
+ Display delay value for H264 decoder. The decoder is forced to
+ return a decoded frame after the set 'display delay' number of
+ frames. If this number is low it may result in frames returned out
+ of display order, in addition the hardware may still be using the
+ returned buffer as a reference picture for subsequent frames.
+
+``V4L2_CID_MPEG_MFC51_VIDEO_H264_NUM_REF_PIC_FOR_P (integer)``
+ The number of reference pictures used for encoding a P picture.
+ Applicable to the H264 encoder.
+
+``V4L2_CID_MPEG_MFC51_VIDEO_PADDING (boolean)``
+ Padding enable in the encoder - use a color instead of repeating
+ border pixels. Applicable to encoders.
+
+``V4L2_CID_MPEG_MFC51_VIDEO_PADDING_YUV (integer)``
+ Padding color in the encoder. Applicable to encoders. The supplied
+ 32-bit integer is interpreted as follows (bit 0 = least significant
+ bit):
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - Bit 0:7
+ - V chrominance information
+ * - Bit 8:15
+ - U chrominance information
+ * - Bit 16:23
+ - Y luminance information
+ * - Bit 24:31
+ - Must be zero.
+
+
+
+``V4L2_CID_MPEG_MFC51_VIDEO_RC_REACTION_COEFF (integer)``
+ Reaction coefficient for MFC rate control. Applicable to encoders.
+
+ .. note::
+
+ #. Valid only when the frame level RC is enabled.
+
+ #. For tight CBR, this field must be small (ex. 2 ~ 10). For
+ VBR, this field must be large (ex. 100 ~ 1000).
+
+ #. It is not recommended to use the greater number than
+ FRAME_RATE * (10^9 / BIT_RATE).
+
+``V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_DARK (boolean)``
+ Adaptive rate control for dark region. Valid only when H.264 and
+ macroblock level RC is enabled
+ (``V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE``). Applicable to the H264
+ encoder.
+
+``V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_SMOOTH (boolean)``
+ Adaptive rate control for smooth region. Valid only when H.264 and
+ macroblock level RC is enabled
+ (``V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE``). Applicable to the H264
+ encoder.
+
+``V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_STATIC (boolean)``
+ Adaptive rate control for static region. Valid only when H.264 and
+ macroblock level RC is enabled
+ (``V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE``). Applicable to the H264
+ encoder.
+
+``V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_ACTIVITY (boolean)``
+ Adaptive rate control for activity region. Valid only when H.264 and
+ macroblock level RC is enabled
+ (``V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE``). Applicable to the H264
+ encoder.
+
+.. _v4l2-mpeg-mfc51-video-frame-skip-mode:
+
+``V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE``
+ (enum)
+
+enum v4l2_mpeg_mfc51_video_frame_skip_mode -
+ Indicates in what conditions the encoder should skip frames. If
+ encoding a frame would cause the encoded stream to be larger then a
+ chosen data limit then the frame will be skipped. Possible values
+ are:
+
+
+.. tabularcolumns:: |p{9.0cm}|p{8.5cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_MFC51_FRAME_SKIP_MODE_DISABLED``
+ - Frame skip mode is disabled.
+ * - ``V4L2_MPEG_MFC51_FRAME_SKIP_MODE_LEVEL_LIMIT``
+ - Frame skip mode enabled and buffer limit is set by the chosen
+ level and is defined by the standard.
+ * - ``V4L2_MPEG_MFC51_FRAME_SKIP_MODE_BUF_LIMIT``
+ - Frame skip mode enabled and buffer limit is set by the VBV
+ (MPEG1/2/4) or CPB (H264) buffer size control.
+
+
+
+``V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT (integer)``
+ Enable rate-control with fixed target bit. If this setting is
+ enabled, then the rate control logic of the encoder will calculate
+ the average bitrate for a GOP and keep it below or equal the set
+ bitrate target. Otherwise the rate control logic calculates the
+ overall average bitrate for the stream and keeps it below or equal
+ to the set bitrate. In the first case the average bitrate for the
+ whole stream will be smaller then the set bitrate. This is caused
+ because the average is calculated for smaller number of frames, on
+ the other hand enabling this setting will ensure that the stream
+ will meet tight bandwidth constraints. Applicable to encoders.
+
+.. _v4l2-mpeg-mfc51-video-force-frame-type:
+
+``V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE``
+ (enum)
+
+enum v4l2_mpeg_mfc51_video_force_frame_type -
+ Force a frame type for the next queued buffer. Applicable to
+ encoders. Possible values are:
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_MFC51_FORCE_FRAME_TYPE_DISABLED``
+ - Forcing a specific frame type disabled.
+ * - ``V4L2_MPEG_MFC51_FORCE_FRAME_TYPE_I_FRAME``
+ - Force an I-frame.
+ * - ``V4L2_MPEG_MFC51_FORCE_FRAME_TYPE_NOT_CODED``
+ - Force a non-coded frame.
+
+
+
+
+CX2341x MPEG Controls
+=====================
+
+The following MPEG class controls deal with MPEG encoding settings that
+are specific to the Conexant CX23415 and CX23416 MPEG encoding chips.
+
+
+.. _cx2341x-control-id:
+
+CX2341x Control IDs
+-------------------
+
+.. _v4l2-mpeg-cx2341x-video-spatial-filter-mode:
+
+``V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE``
+ (enum)
+
+enum v4l2_mpeg_cx2341x_video_spatial_filter_mode -
+ Sets the Spatial Filter mode (default ``MANUAL``). Possible values
+ are:
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL``
+ - Choose the filter manually
+ * - ``V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO``
+ - Choose the filter automatically
+
+
+
+``V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER (integer (0-15))``
+ The setting for the Spatial Filter. 0 = off, 15 = maximum. (Default
+ is 0.)
+
+.. _luma-spatial-filter-type:
+
+``V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE``
+ (enum)
+
+enum v4l2_mpeg_cx2341x_video_luma_spatial_filter_type -
+ Select the algorithm to use for the Luma Spatial Filter (default
+ ``1D_HOR``). Possible values:
+
+
+
+.. tabularcolumns:: |p{14.5cm}|p{3.0cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_OFF``
+ - No filter
+ * - ``V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_HOR``
+ - One-dimensional horizontal
+ * - ``V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_VERT``
+ - One-dimensional vertical
+ * - ``V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_HV_SEPARABLE``
+ - Two-dimensional separable
+ * - ``V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_SYM_NON_SEPARABLE``
+ - Two-dimensional symmetrical non-separable
+
+
+
+.. _chroma-spatial-filter-type:
+
+``V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE``
+ (enum)
+
+enum v4l2_mpeg_cx2341x_video_chroma_spatial_filter_type -
+ Select the algorithm for the Chroma Spatial Filter (default
+ ``1D_HOR``). Possible values are:
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_OFF``
+ - No filter
+ * - ``V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR``
+ - One-dimensional horizontal
+
+
+
+.. _v4l2-mpeg-cx2341x-video-temporal-filter-mode:
+
+``V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE``
+ (enum)
+
+enum v4l2_mpeg_cx2341x_video_temporal_filter_mode -
+ Sets the Temporal Filter mode (default ``MANUAL``). Possible values
+ are:
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL``
+ - Choose the filter manually
+ * - ``V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO``
+ - Choose the filter automatically
+
+
+
+``V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER (integer (0-31))``
+ The setting for the Temporal Filter. 0 = off, 31 = maximum. (Default
+ is 8 for full-scale capturing and 0 for scaled capturing.)
+
+.. _v4l2-mpeg-cx2341x-video-median-filter-type:
+
+``V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE``
+ (enum)
+
+enum v4l2_mpeg_cx2341x_video_median_filter_type -
+ Median Filter Type (default ``OFF``). Possible values are:
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF``
+ - No filter
+ * - ``V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR``
+ - Horizontal filter
+ * - ``V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_VERT``
+ - Vertical filter
+ * - ``V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR_VERT``
+ - Horizontal and vertical filter
+ * - ``V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_DIAG``
+ - Diagonal filter
+
+
+
+``V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM (integer (0-255))``
+ Threshold above which the luminance median filter is enabled
+ (default 0)
+
+``V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP (integer (0-255))``
+ Threshold below which the luminance median filter is enabled
+ (default 255)
+
+``V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM (integer (0-255))``
+ Threshold above which the chroma median filter is enabled (default
+ 0)
+
+``V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP (integer (0-255))``
+ Threshold below which the chroma median filter is enabled (default
+ 255)
+
+``V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS (boolean)``
+ The CX2341X MPEG encoder can insert one empty MPEG-2 PES packet into
+ the stream between every four video frames. The packet size is 2048
+ bytes, including the packet_start_code_prefix and stream_id
+ fields. The stream_id is 0xBF (private stream 2). The payload
+ consists of 0x00 bytes, to be filled in by the application. 0 = do
+ not insert, 1 = insert packets.
+
+
+VPX Control Reference
+=====================
+
+The VPX controls include controls for encoding parameters of VPx video
+codec.
+
+
+.. _vpx-control-id:
+
+VPX Control IDs
+---------------
+
+.. _v4l2-vpx-num-partitions:
+
+``V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS``
+ (enum)
+
+enum v4l2_vp8_num_partitions -
+ The number of token partitions to use in VP8 encoder. Possible
+ values are:
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_CID_MPEG_VIDEO_VPX_1_PARTITION``
+ - 1 coefficient partition
+ * - ``V4L2_CID_MPEG_VIDEO_VPX_2_PARTITIONS``
+ - 2 coefficient partitions
+ * - ``V4L2_CID_MPEG_VIDEO_VPX_4_PARTITIONS``
+ - 4 coefficient partitions
+ * - ``V4L2_CID_MPEG_VIDEO_VPX_8_PARTITIONS``
+ - 8 coefficient partitions
+
+
+
+``V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4 (boolean)``
+ Setting this prevents intra 4x4 mode in the intra mode decision.
+
+.. _v4l2-vpx-num-ref-frames:
+
+``V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES``
+ (enum)
+
+enum v4l2_vp8_num_ref_frames -
+ The number of reference pictures for encoding P frames. Possible
+ values are:
+
+.. tabularcolumns:: |p{7.9cm}|p{9.6cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_CID_MPEG_VIDEO_VPX_1_REF_FRAME``
+ - Last encoded frame will be searched
+ * - ``V4L2_CID_MPEG_VIDEO_VPX_2_REF_FRAME``
+ - Two frames will be searched among the last encoded frame, the
+ golden frame and the alternate reference (altref) frame. The
+ encoder implementation will decide which two are chosen.
+ * - ``V4L2_CID_MPEG_VIDEO_VPX_3_REF_FRAME``
+ - The last encoded frame, the golden frame and the altref frame will
+ be searched.
+
+
+
+``V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL (integer)``
+ Indicates the loop filter level. The adjustment of the loop filter
+ level is done via a delta value against a baseline loop filter
+ value.
+
+``V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS (integer)``
+ This parameter affects the loop filter. Anything above zero weakens
+ the deblocking effect on the loop filter.
+
+``V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD (integer)``
+ Sets the refresh period for the golden frame. The period is defined
+ in number of frames. For a value of 'n', every nth frame starting
+ from the first key frame will be taken as a golden frame. For eg.
+ for encoding sequence of 0, 1, 2, 3, 4, 5, 6, 7 where the golden
+ frame refresh period is set as 4, the frames 0, 4, 8 etc will be
+ taken as the golden frames as frame 0 is always a key frame.
+
+.. _v4l2-vpx-golden-frame-sel:
+
+``V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL``
+ (enum)
+
+enum v4l2_vp8_golden_frame_sel -
+ Selects the golden frame for encoding. Possible values are:
+
+.. raw:: latex
+
+ \footnotesize
+
+.. tabularcolumns:: |p{9.0cm}|p{8.0cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_PREV``
+ - Use the (n-2)th frame as a golden frame, current frame index being
+ 'n'.
+ * - ``V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_REF_PERIOD``
+ - Use the previous specific frame indicated by
+ ``V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD`` as a
+ golden frame.
+
+.. raw:: latex
+
+ \normalsize
+
+
+``V4L2_CID_MPEG_VIDEO_VPX_MIN_QP (integer)``
+ Minimum quantization parameter for VP8.
+
+``V4L2_CID_MPEG_VIDEO_VPX_MAX_QP (integer)``
+ Maximum quantization parameter for VP8.
+
+``V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP (integer)``
+ Quantization parameter for an I frame for VP8.
+
+``V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP (integer)``
+ Quantization parameter for a P frame for VP8.
+
+.. _v4l2-mpeg-video-vp8-profile:
+
+``V4L2_CID_MPEG_VIDEO_VP8_PROFILE``
+ (enum)
+
+enum v4l2_mpeg_video_vp8_profile -
+ This control allows selecting the profile for VP8 encoder.
+ This is also used to enumerate supported profiles by VP8 encoder or decoder.
+ Possible values are:
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_VIDEO_VP8_PROFILE_0``
+ - Profile 0
+ * - ``V4L2_MPEG_VIDEO_VP8_PROFILE_1``
+ - Profile 1
+ * - ``V4L2_MPEG_VIDEO_VP8_PROFILE_2``
+ - Profile 2
+ * - ``V4L2_MPEG_VIDEO_VP8_PROFILE_3``
+ - Profile 3
+
+.. _v4l2-mpeg-video-vp9-profile:
+
+``V4L2_CID_MPEG_VIDEO_VP9_PROFILE``
+ (enum)
+
+enum v4l2_mpeg_video_vp9_profile -
+ This control allows selecting the profile for VP9 encoder.
+ This is also used to enumerate supported profiles by VP9 encoder or decoder.
+ Possible values are:
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_VIDEO_VP9_PROFILE_0``
+ - Profile 0
+ * - ``V4L2_MPEG_VIDEO_VP9_PROFILE_1``
+ - Profile 1
+ * - ``V4L2_MPEG_VIDEO_VP9_PROFILE_2``
+ - Profile 2
+ * - ``V4L2_MPEG_VIDEO_VP9_PROFILE_3``
+ - Profile 3
+
+
+High Efficiency Video Coding (HEVC/H.265) Control Reference
+===========================================================
+
+The HEVC/H.265 controls include controls for encoding parameters of HEVC/H.265
+video codec.
+
+
+.. _hevc-control-id:
+
+HEVC/H.265 Control IDs
+----------------------
+
+``V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP (integer)``
+ Minimum quantization parameter for HEVC.
+ Valid range: from 0 to 51.
+
+``V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP (integer)``
+ Maximum quantization parameter for HEVC.
+ Valid range: from 0 to 51.
+
+``V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP (integer)``
+ Quantization parameter for an I frame for HEVC.
+ Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP,
+ V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP].
+
+``V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP (integer)``
+ Quantization parameter for a P frame for HEVC.
+ Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP,
+ V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP].
+
+``V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP (integer)``
+ Quantization parameter for a B frame for HEVC.
+ Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP,
+ V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP].
+
+``V4L2_CID_MPEG_VIDEO_HEVC_HIER_QP (boolean)``
+ HIERARCHICAL_QP allows the host to specify the quantization parameter
+ values for each temporal layer through HIERARCHICAL_QP_LAYER. This is
+ valid only if HIERARCHICAL_CODING_LAYER is greater than 1. Setting the
+ control value to 1 enables setting of the QP values for the layers.
+
+.. _v4l2-hevc-hier-coding-type:
+
+``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE``
+ (enum)
+
+enum v4l2_mpeg_video_hevc_hier_coding_type -
+ Selects the hierarchical coding type for encoding. Possible values are:
+
+.. raw:: latex
+
+ \footnotesize
+
+.. tabularcolumns:: |p{9.0cm}|p{8.0cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_B``
+ - Use the B frame for hierarchical coding.
+ * - ``V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_P``
+ - Use the P frame for hierarchical coding.
+
+.. raw:: latex
+
+ \normalsize
+
+
+``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER (integer)``
+ Selects the hierarchical coding layer. In normal encoding
+ (non-hierarchial coding), it should be zero. Possible values are [0, 6].
+ 0 indicates HIERARCHICAL CODING LAYER 0, 1 indicates HIERARCHICAL CODING
+ LAYER 1 and so on.
+
+``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_QP (integer)``
+ Indicates quantization parameter for hierarchical coding layer 0.
+ Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP,
+ V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP].
+
+``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_QP (integer)``
+ Indicates quantization parameter for hierarchical coding layer 1.
+ Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP,
+ V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP].
+
+``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_QP (integer)``
+ Indicates quantization parameter for hierarchical coding layer 2.
+ Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP,
+ V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP].
+
+``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_QP (integer)``
+ Indicates quantization parameter for hierarchical coding layer 3.
+ Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP,
+ V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP].
+
+``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_QP (integer)``
+ Indicates quantization parameter for hierarchical coding layer 4.
+ Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP,
+ V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP].
+
+``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_QP (integer)``
+ Indicates quantization parameter for hierarchical coding layer 5.
+ Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP,
+ V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP].
+
+``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_QP (integer)``
+ Indicates quantization parameter for hierarchical coding layer 6.
+ Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP,
+ V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP].
+
+.. _v4l2-hevc-profile:
+
+``V4L2_CID_MPEG_VIDEO_HEVC_PROFILE``
+ (enum)
+
+enum v4l2_mpeg_video_hevc_profile -
+ Select the desired profile for HEVC encoder.
+
+.. raw:: latex
+
+ \footnotesize
+
+.. tabularcolumns:: |p{9.0cm}|p{8.0cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN``
+ - Main profile.
+ * - ``V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE``
+ - Main still picture profile.
+ * - ``V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10``
+ - Main 10 profile.
+
+.. raw:: latex
+
+ \normalsize
+
+
+.. _v4l2-hevc-level:
+
+``V4L2_CID_MPEG_VIDEO_HEVC_LEVEL``
+ (enum)
+
+enum v4l2_mpeg_video_hevc_level -
+ Selects the desired level for HEVC encoder.
+
+.. raw:: latex
+
+ \footnotesize
+
+.. tabularcolumns:: |p{9.0cm}|p{8.0cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_1``
+ - Level 1.0
+ * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_2``
+ - Level 2.0
+ * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1``
+ - Level 2.1
+ * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_3``
+ - Level 3.0
+ * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1``
+ - Level 3.1
+ * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_4``
+ - Level 4.0
+ * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1``
+ - Level 4.1
+ * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_5``
+ - Level 5.0
+ * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1``
+ - Level 5.1
+ * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2``
+ - Level 5.2
+ * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_6``
+ - Level 6.0
+ * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1``
+ - Level 6.1
+ * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2``
+ - Level 6.2
+
+.. raw:: latex
+
+ \normalsize
+
+
+``V4L2_CID_MPEG_VIDEO_HEVC_FRAME_RATE_RESOLUTION (integer)``
+ Indicates the number of evenly spaced subintervals, called ticks, within
+ one second. This is a 16 bit unsigned integer and has a maximum value up to
+ 0xffff and a minimum value of 1.
+
+.. _v4l2-hevc-tier:
+
+``V4L2_CID_MPEG_VIDEO_HEVC_TIER``
+ (enum)
+
+enum v4l2_mpeg_video_hevc_tier -
+ TIER_FLAG specifies tiers information of the HEVC encoded picture. Tier
+ were made to deal with applications that differ in terms of maximum bit
+ rate. Setting the flag to 0 selects HEVC tier as Main tier and setting
+ this flag to 1 indicates High tier. High tier is for applications requiring
+ high bit rates.
+
+.. raw:: latex
+
+ \footnotesize
+
+.. tabularcolumns:: |p{9.0cm}|p{8.0cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_VIDEO_HEVC_TIER_MAIN``
+ - Main tier.
+ * - ``V4L2_MPEG_VIDEO_HEVC_TIER_HIGH``
+ - High tier.
+
+.. raw:: latex
+
+ \normalsize
+
+
+``V4L2_CID_MPEG_VIDEO_HEVC_MAX_PARTITION_DEPTH (integer)``
+ Selects HEVC maximum coding unit depth.
+
+.. _v4l2-hevc-loop-filter-mode:
+
+``V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE``
+ (enum)
+
+enum v4l2_mpeg_video_hevc_loop_filter_mode -
+ Loop filter mode for HEVC encoder. Possible values are:
+
+.. raw:: latex
+
+ \footnotesize
+
+.. tabularcolumns:: |p{10.7cm}|p{6.3cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_DISABLED``
+ - Loop filter is disabled.
+ * - ``V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_ENABLED``
+ - Loop filter is enabled.
+ * - ``V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY``
+ - Loop filter is disabled at the slice boundary.
+
+.. raw:: latex
+
+ \normalsize
+
+
+``V4L2_CID_MPEG_VIDEO_HEVC_LF_BETA_OFFSET_DIV2 (integer)``
+ Selects HEVC loop filter beta offset. The valid range is [-6, +6].
+
+``V4L2_CID_MPEG_VIDEO_HEVC_LF_TC_OFFSET_DIV2 (integer)``
+ Selects HEVC loop filter tc offset. The valid range is [-6, +6].
+
+.. _v4l2-hevc-refresh-type:
+
+``V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE``
+ (enum)
+
+enum v4l2_mpeg_video_hevc_hier_refresh_type -
+ Selects refresh type for HEVC encoder.
+ Host has to specify the period into
+ V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_PERIOD.
+
+.. raw:: latex
+
+ \footnotesize
+
+.. tabularcolumns:: |p{8.0cm}|p{9.0cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_VIDEO_HEVC_REFRESH_NONE``
+ - Use the B frame for hierarchical coding.
+ * - ``V4L2_MPEG_VIDEO_HEVC_REFRESH_CRA``
+ - Use CRA (Clean Random Access Unit) picture encoding.
+ * - ``V4L2_MPEG_VIDEO_HEVC_REFRESH_IDR``
+ - Use IDR (Instantaneous Decoding Refresh) picture encoding.
+
+.. raw:: latex
+
+ \normalsize
+
+
+``V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_PERIOD (integer)``
+ Selects the refresh period for HEVC encoder.
+ This specifies the number of I pictures between two CRA/IDR pictures.
+ This is valid only if REFRESH_TYPE is not 0.
+
+``V4L2_CID_MPEG_VIDEO_HEVC_LOSSLESS_CU (boolean)``
+ Indicates HEVC lossless encoding. Setting it to 0 disables lossless
+ encoding. Setting it to 1 enables lossless encoding.
+
+``V4L2_CID_MPEG_VIDEO_HEVC_CONST_INTRA_PRED (boolean)``
+ Indicates constant intra prediction for HEVC encoder. Specifies the
+ constrained intra prediction in which intra largest coding unit (LCU)
+ prediction is performed by using residual data and decoded samples of
+ neighboring intra LCU only. Setting the value to 1 enables constant intra
+ prediction and setting the value to 0 disables constant intra prediction.
+
+``V4L2_CID_MPEG_VIDEO_HEVC_WAVEFRONT (boolean)``
+ Indicates wavefront parallel processing for HEVC encoder. Setting it to 0
+ disables the feature and setting it to 1 enables the wavefront parallel
+ processing.
+
+``V4L2_CID_MPEG_VIDEO_HEVC_GENERAL_PB (boolean)``
+ Setting the value to 1 enables combination of P and B frame for HEVC
+ encoder.
+
+``V4L2_CID_MPEG_VIDEO_HEVC_TEMPORAL_ID (boolean)``
+ Indicates temporal identifier for HEVC encoder which is enabled by
+ setting the value to 1.
+
+``V4L2_CID_MPEG_VIDEO_HEVC_STRONG_SMOOTHING (boolean)``
+ Indicates bi-linear interpolation is conditionally used in the intra
+ prediction filtering process in the CVS when set to 1. Indicates bi-linear
+ interpolation is not used in the CVS when set to 0.
+
+``V4L2_CID_MPEG_VIDEO_HEVC_MAX_NUM_MERGE_MV_MINUS1 (integer)``
+ Indicates maximum number of merge candidate motion vectors.
+ Values are from 0 to 4.
+
+``V4L2_CID_MPEG_VIDEO_HEVC_TMV_PREDICTION (boolean)``
+ Indicates temporal motion vector prediction for HEVC encoder. Setting it to
+ 1 enables the prediction. Setting it to 0 disables the prediction.
+
+``V4L2_CID_MPEG_VIDEO_HEVC_WITHOUT_STARTCODE (boolean)``
+ Specifies if HEVC generates a stream with a size of the length field
+ instead of start code pattern. The size of the length field is configurable
+ through the V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD control. Setting
+ the value to 0 disables encoding without startcode pattern. Setting the
+ value to 1 will enables encoding without startcode pattern.
+
+.. _v4l2-hevc-size-of-length-field:
+
+``V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD``
+(enum)
+
+enum v4l2_mpeg_video_hevc_size_of_length_field -
+ Indicates the size of length field.
+ This is valid when encoding WITHOUT_STARTCODE_ENABLE is enabled.
+
+.. raw:: latex
+
+ \footnotesize
+
+.. tabularcolumns:: |p{6.0cm}|p{11.0cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_VIDEO_HEVC_SIZE_0``
+ - Generate start code pattern (Normal).
+ * - ``V4L2_MPEG_VIDEO_HEVC_SIZE_1``
+ - Generate size of length field instead of start code pattern and length is 1.
+ * - ``V4L2_MPEG_VIDEO_HEVC_SIZE_2``
+ - Generate size of length field instead of start code pattern and length is 2.
+ * - ``V4L2_MPEG_VIDEO_HEVC_SIZE_4``
+ - Generate size of length field instead of start code pattern and length is 4.
+
+.. raw:: latex
+
+ \normalsize
+
+``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR (integer)``
+ Indicates bit rate for hierarchical coding layer 0 for HEVC encoder.
+
+``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR (integer)``
+ Indicates bit rate for hierarchical coding layer 1 for HEVC encoder.
+
+``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR (integer)``
+ Indicates bit rate for hierarchical coding layer 2 for HEVC encoder.
+
+``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR (integer)``
+ Indicates bit rate for hierarchical coding layer 3 for HEVC encoder.
+
+``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR (integer)``
+ Indicates bit rate for hierarchical coding layer 4 for HEVC encoder.
+
+``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR (integer)``
+ Indicates bit rate for hierarchical coding layer 5 for HEVC encoder.
+
+``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_BR (integer)``
+ Indicates bit rate for hierarchical coding layer 6 for HEVC encoder.
+
+``V4L2_CID_MPEG_VIDEO_REF_NUMBER_FOR_PFRAMES (integer)``
+ Selects number of P reference pictures required for HEVC encoder.
+ P-Frame can use 1 or 2 frames for reference.
+
+``V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR (integer)``
+ Indicates whether to generate SPS and PPS at every IDR. Setting it to 0
+ disables generating SPS and PPS at every IDR. Setting it to one enables
+ generating SPS and PPS at every IDR.
diff --git a/Documentation/media/uapi/v4l/ext-ctrls-detect.rst b/Documentation/media/uapi/v4l/ext-ctrls-detect.rst
new file mode 100644
index 000000000000..8a45ce642829
--- /dev/null
+++ b/Documentation/media/uapi/v4l/ext-ctrls-detect.rst
@@ -0,0 +1,71 @@
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+
+.. _detect-controls:
+
+************************
+Detect Control Reference
+************************
+
+The Detect class includes controls for common features of various motion
+or object detection capable devices.
+
+
+.. _detect-control-id:
+
+Detect Control IDs
+==================
+
+``V4L2_CID_DETECT_CLASS (class)``
+ The Detect class descriptor. Calling
+ :ref:`VIDIOC_QUERYCTRL` for this control will
+ return a description of this control class.
+
+``V4L2_CID_DETECT_MD_MODE (menu)``
+ Sets the motion detection mode.
+
+.. tabularcolumns:: |p{7.5cm}|p{10.0cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_DETECT_MD_MODE_DISABLED``
+ - Disable motion detection.
+ * - ``V4L2_DETECT_MD_MODE_GLOBAL``
+ - Use a single motion detection threshold.
+ * - ``V4L2_DETECT_MD_MODE_THRESHOLD_GRID``
+ - The image is divided into a grid, each cell with its own motion
+ detection threshold. These thresholds are set through the
+ ``V4L2_CID_DETECT_MD_THRESHOLD_GRID`` matrix control.
+ * - ``V4L2_DETECT_MD_MODE_REGION_GRID``
+ - The image is divided into a grid, each cell with its own region
+ value that specifies which per-region motion detection thresholds
+ should be used. Each region has its own thresholds. How these
+ per-region thresholds are set up is driver-specific. The region
+ values for the grid are set through the
+ ``V4L2_CID_DETECT_MD_REGION_GRID`` matrix control.
+
+
+
+``V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD (integer)``
+ Sets the global motion detection threshold to be used with the
+ ``V4L2_DETECT_MD_MODE_GLOBAL`` motion detection mode.
+
+``V4L2_CID_DETECT_MD_THRESHOLD_GRID (__u16 matrix)``
+ Sets the motion detection thresholds for each cell in the grid. To
+ be used with the ``V4L2_DETECT_MD_MODE_THRESHOLD_GRID`` motion
+ detection mode. Matrix element (0, 0) represents the cell at the
+ top-left of the grid.
+
+``V4L2_CID_DETECT_MD_REGION_GRID (__u8 matrix)``
+ Sets the motion detection region value for each cell in the grid. To
+ be used with the ``V4L2_DETECT_MD_MODE_REGION_GRID`` motion
+ detection mode. Matrix element (0, 0) represents the cell at the
+ top-left of the grid.
diff --git a/Documentation/media/uapi/v4l/ext-ctrls-dv.rst b/Documentation/media/uapi/v4l/ext-ctrls-dv.rst
new file mode 100644
index 000000000000..57edf211875c
--- /dev/null
+++ b/Documentation/media/uapi/v4l/ext-ctrls-dv.rst
@@ -0,0 +1,166 @@
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+
+.. _dv-controls:
+
+*******************************
+Digital Video Control Reference
+*******************************
+
+The Digital Video control class is intended to control receivers and
+transmitters for `VGA <http://en.wikipedia.org/wiki/Vga>`__,
+`DVI <http://en.wikipedia.org/wiki/Digital_Visual_Interface>`__
+(Digital Visual Interface), HDMI (:ref:`hdmi`) and DisplayPort
+(:ref:`dp`). These controls are generally expected to be private to
+the receiver or transmitter subdevice that implements them, so they are
+only exposed on the ``/dev/v4l-subdev*`` device node.
+
+.. note::
+
+ Note that these devices can have multiple input or output pads which are
+ hooked up to e.g. HDMI connectors. Even though the subdevice will
+ receive or transmit video from/to only one of those pads, the other pads
+ can still be active when it comes to EDID (Extended Display
+ Identification Data, :ref:`vesaedid`) and HDCP (High-bandwidth Digital
+ Content Protection System, :ref:`hdcp`) processing, allowing the
+ device to do the fairly slow EDID/HDCP handling in advance. This allows
+ for quick switching between connectors.
+
+These pads appear in several of the controls in this section as
+bitmasks, one bit for each pad. Bit 0 corresponds to pad 0, bit 1 to pad
+1, etc. The maximum value of the control is the set of valid pads.
+
+
+.. _dv-control-id:
+
+Digital Video Control IDs
+=========================
+
+``V4L2_CID_DV_CLASS (class)``
+ The Digital Video class descriptor.
+
+``V4L2_CID_DV_TX_HOTPLUG (bitmask)``
+ Many connectors have a hotplug pin which is high if EDID information
+ is available from the source. This control shows the state of the
+ hotplug pin as seen by the transmitter. Each bit corresponds to an
+ output pad on the transmitter. If an output pad does not have an
+ associated hotplug pin, then the bit for that pad will be 0. This
+ read-only control is applicable to DVI-D, HDMI and DisplayPort
+ connectors.
+
+``V4L2_CID_DV_TX_RXSENSE (bitmask)``
+ Rx Sense is the detection of pull-ups on the TMDS clock lines. This
+ normally means that the sink has left/entered standby (i.e. the
+ transmitter can sense that the receiver is ready to receive video).
+ Each bit corresponds to an output pad on the transmitter. If an
+ output pad does not have an associated Rx Sense, then the bit for
+ that pad will be 0. This read-only control is applicable to DVI-D
+ and HDMI devices.
+
+``V4L2_CID_DV_TX_EDID_PRESENT (bitmask)``
+ When the transmitter sees the hotplug signal from the receiver it
+ will attempt to read the EDID. If set, then the transmitter has read
+ at least the first block (= 128 bytes). Each bit corresponds to an
+ output pad on the transmitter. If an output pad does not support
+ EDIDs, then the bit for that pad will be 0. This read-only control
+ is applicable to VGA, DVI-A/D, HDMI and DisplayPort connectors.
+
+``V4L2_CID_DV_TX_MODE``
+ (enum)
+
+enum v4l2_dv_tx_mode -
+ HDMI transmitters can transmit in DVI-D mode (just video) or in HDMI
+ mode (video + audio + auxiliary data). This control selects which
+ mode to use: V4L2_DV_TX_MODE_DVI_D or V4L2_DV_TX_MODE_HDMI.
+ This control is applicable to HDMI connectors.
+
+``V4L2_CID_DV_TX_RGB_RANGE``
+ (enum)
+
+enum v4l2_dv_rgb_range -
+ Select the quantization range for RGB output. V4L2_DV_RANGE_AUTO
+ follows the RGB quantization range specified in the standard for the
+ video interface (ie. :ref:`cea861` for HDMI).
+ V4L2_DV_RANGE_LIMITED and V4L2_DV_RANGE_FULL override the
+ standard to be compatible with sinks that have not implemented the
+ standard correctly (unfortunately quite common for HDMI and DVI-D).
+ Full range allows all possible values to be used whereas limited
+ range sets the range to (16 << (N-8)) - (235 << (N-8)) where N is
+ the number of bits per component. This control is applicable to VGA,
+ DVI-A/D, HDMI and DisplayPort connectors.
+
+``V4L2_CID_DV_TX_IT_CONTENT_TYPE``
+ (enum)
+
+enum v4l2_dv_it_content_type -
+ Configures the IT Content Type of the transmitted video. This
+ information is sent over HDMI and DisplayPort connectors as part of
+ the AVI InfoFrame. The term 'IT Content' is used for content that
+ originates from a computer as opposed to content from a TV broadcast
+ or an analog source. The enum v4l2_dv_it_content_type defines
+ the possible content types:
+
+.. tabularcolumns:: |p{7.0cm}|p{10.5cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_DV_IT_CONTENT_TYPE_GRAPHICS``
+ - Graphics content. Pixel data should be passed unfiltered and
+ without analog reconstruction.
+ * - ``V4L2_DV_IT_CONTENT_TYPE_PHOTO``
+ - Photo content. The content is derived from digital still pictures.
+ The content should be passed through with minimal scaling and
+ picture enhancements.
+ * - ``V4L2_DV_IT_CONTENT_TYPE_CINEMA``
+ - Cinema content.
+ * - ``V4L2_DV_IT_CONTENT_TYPE_GAME``
+ - Game content. Audio and video latency should be minimized.
+ * - ``V4L2_DV_IT_CONTENT_TYPE_NO_ITC``
+ - No IT Content information is available and the ITC bit in the AVI
+ InfoFrame is set to 0.
+
+
+
+``V4L2_CID_DV_RX_POWER_PRESENT (bitmask)``
+ Detects whether the receiver receives power from the source (e.g.
+ HDMI carries 5V on one of the pins). This is often used to power an
+ eeprom which contains EDID information, such that the source can
+ read the EDID even if the sink is in standby/power off. Each bit
+ corresponds to an input pad on the receiver. If an input pad
+ cannot detect whether power is present, then the bit for that pad
+ will be 0. This read-only control is applicable to DVI-D, HDMI and
+ DisplayPort connectors.
+
+``V4L2_CID_DV_RX_RGB_RANGE``
+ (enum)
+
+enum v4l2_dv_rgb_range -
+ Select the quantization range for RGB input. V4L2_DV_RANGE_AUTO
+ follows the RGB quantization range specified in the standard for the
+ video interface (ie. :ref:`cea861` for HDMI).
+ V4L2_DV_RANGE_LIMITED and V4L2_DV_RANGE_FULL override the
+ standard to be compatible with sources that have not implemented the
+ standard correctly (unfortunately quite common for HDMI and DVI-D).
+ Full range allows all possible values to be used whereas limited
+ range sets the range to (16 << (N-8)) - (235 << (N-8)) where N is
+ the number of bits per component. This control is applicable to VGA,
+ DVI-A/D, HDMI and DisplayPort connectors.
+
+``V4L2_CID_DV_RX_IT_CONTENT_TYPE``
+ (enum)
+
+enum v4l2_dv_it_content_type -
+ Reads the IT Content Type of the received video. This information is
+ sent over HDMI and DisplayPort connectors as part of the AVI
+ InfoFrame. The term 'IT Content' is used for content that originates
+ from a computer as opposed to content from a TV broadcast or an
+ analog source. See ``V4L2_CID_DV_TX_IT_CONTENT_TYPE`` for the
+ available content types.
diff --git a/Documentation/media/uapi/v4l/ext-ctrls-flash.rst b/Documentation/media/uapi/v4l/ext-ctrls-flash.rst
new file mode 100644
index 000000000000..5f30791c35b5
--- /dev/null
+++ b/Documentation/media/uapi/v4l/ext-ctrls-flash.rst
@@ -0,0 +1,192 @@
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+
+.. _flash-controls:
+
+***********************
+Flash Control Reference
+***********************
+
+The V4L2 flash controls are intended to provide generic access to flash
+controller devices. Flash controller devices are typically used in
+digital cameras.
+
+The interface can support both LED and xenon flash devices. As of
+writing this, there is no xenon flash driver using this interface.
+
+
+.. _flash-controls-use-cases:
+
+Supported use cases
+===================
+
+
+Unsynchronised LED flash (software strobe)
+------------------------------------------
+
+Unsynchronised LED flash is controlled directly by the host as the
+sensor. The flash must be enabled by the host before the exposure of the
+image starts and disabled once it ends. The host is fully responsible
+for the timing of the flash.
+
+Example of such device: Nokia N900.
+
+
+Synchronised LED flash (hardware strobe)
+----------------------------------------
+
+The synchronised LED flash is pre-programmed by the host (power and
+timeout) but controlled by the sensor through a strobe signal from the
+sensor to the flash.
+
+The sensor controls the flash duration and timing. This information
+typically must be made available to the sensor.
+
+
+LED flash as torch
+------------------
+
+LED flash may be used as torch in conjunction with another use case
+involving camera or individually.
+
+
+.. _flash-control-id:
+
+Flash Control IDs
+-----------------
+
+``V4L2_CID_FLASH_CLASS (class)``
+ The FLASH class descriptor.
+
+``V4L2_CID_FLASH_LED_MODE (menu)``
+ Defines the mode of the flash LED, the high-power white LED attached
+ to the flash controller. Setting this control may not be possible in
+ presence of some faults. See V4L2_CID_FLASH_FAULT.
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_FLASH_LED_MODE_NONE``
+ - Off.
+ * - ``V4L2_FLASH_LED_MODE_FLASH``
+ - Flash mode.
+ * - ``V4L2_FLASH_LED_MODE_TORCH``
+ - Torch mode. See V4L2_CID_FLASH_TORCH_INTENSITY.
+
+
+
+``V4L2_CID_FLASH_STROBE_SOURCE (menu)``
+ Defines the source of the flash LED strobe.
+
+.. tabularcolumns:: |p{7.0cm}|p{10.5cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_FLASH_STROBE_SOURCE_SOFTWARE``
+ - The flash strobe is triggered by using the
+ V4L2_CID_FLASH_STROBE control.
+ * - ``V4L2_FLASH_STROBE_SOURCE_EXTERNAL``
+ - The flash strobe is triggered by an external source. Typically
+ this is a sensor, which makes it possible to synchronises the
+ flash strobe start to exposure start.
+
+
+
+``V4L2_CID_FLASH_STROBE (button)``
+ Strobe flash. Valid when V4L2_CID_FLASH_LED_MODE is set to
+ V4L2_FLASH_LED_MODE_FLASH and V4L2_CID_FLASH_STROBE_SOURCE
+ is set to V4L2_FLASH_STROBE_SOURCE_SOFTWARE. Setting this
+ control may not be possible in presence of some faults. See
+ V4L2_CID_FLASH_FAULT.
+
+``V4L2_CID_FLASH_STROBE_STOP (button)``
+ Stop flash strobe immediately.
+
+``V4L2_CID_FLASH_STROBE_STATUS (boolean)``
+ Strobe status: whether the flash is strobing at the moment or not.
+ This is a read-only control.
+
+``V4L2_CID_FLASH_TIMEOUT (integer)``
+ Hardware timeout for flash. The flash strobe is stopped after this
+ period of time has passed from the start of the strobe.
+
+``V4L2_CID_FLASH_INTENSITY (integer)``
+ Intensity of the flash strobe when the flash LED is in flash mode
+ (V4L2_FLASH_LED_MODE_FLASH). The unit should be milliamps (mA)
+ if possible.
+
+``V4L2_CID_FLASH_TORCH_INTENSITY (integer)``
+ Intensity of the flash LED in torch mode
+ (V4L2_FLASH_LED_MODE_TORCH). The unit should be milliamps (mA)
+ if possible. Setting this control may not be possible in presence of
+ some faults. See V4L2_CID_FLASH_FAULT.
+
+``V4L2_CID_FLASH_INDICATOR_INTENSITY (integer)``
+ Intensity of the indicator LED. The indicator LED may be fully
+ independent of the flash LED. The unit should be microamps (uA) if
+ possible.
+
+``V4L2_CID_FLASH_FAULT (bitmask)``
+ Faults related to the flash. The faults tell about specific problems
+ in the flash chip itself or the LEDs attached to it. Faults may
+ prevent further use of some of the flash controls. In particular,
+ V4L2_CID_FLASH_LED_MODE is set to V4L2_FLASH_LED_MODE_NONE
+ if the fault affects the flash LED. Exactly which faults have such
+ an effect is chip dependent. Reading the faults resets the control
+ and returns the chip to a usable state if possible.
+
+.. tabularcolumns:: |p{8.0cm}|p{9.5cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_FLASH_FAULT_OVER_VOLTAGE``
+ - Flash controller voltage to the flash LED has exceeded the limit
+ specific to the flash controller.
+ * - ``V4L2_FLASH_FAULT_TIMEOUT``
+ - The flash strobe was still on when the timeout set by the user ---
+ V4L2_CID_FLASH_TIMEOUT control --- has expired. Not all flash
+ controllers may set this in all such conditions.
+ * - ``V4L2_FLASH_FAULT_OVER_TEMPERATURE``
+ - The flash controller has overheated.
+ * - ``V4L2_FLASH_FAULT_SHORT_CIRCUIT``
+ - The short circuit protection of the flash controller has been
+ triggered.
+ * - ``V4L2_FLASH_FAULT_OVER_CURRENT``
+ - Current in the LED power supply has exceeded the limit specific to
+ the flash controller.
+ * - ``V4L2_FLASH_FAULT_INDICATOR``
+ - The flash controller has detected a short or open circuit
+ condition on the indicator LED.
+ * - ``V4L2_FLASH_FAULT_UNDER_VOLTAGE``
+ - Flash controller voltage to the flash LED has been below the
+ minimum limit specific to the flash controller.
+ * - ``V4L2_FLASH_FAULT_INPUT_VOLTAGE``
+ - The input voltage of the flash controller is below the limit under
+ which strobing the flash at full current will not be possible.The
+ condition persists until this flag is no longer set.
+ * - ``V4L2_FLASH_FAULT_LED_OVER_TEMPERATURE``
+ - The temperature of the LED has exceeded its allowed upper limit.
+
+
+
+``V4L2_CID_FLASH_CHARGE (boolean)``
+ Enable or disable charging of the xenon flash capacitor.
+
+``V4L2_CID_FLASH_READY (boolean)``
+ Is the flash ready to strobe? Xenon flashes require their capacitors
+ charged before strobing. LED flashes often require a cooldown period
+ after strobe during which another strobe will not be possible. This
+ is a read-only control.
diff --git a/Documentation/media/uapi/v4l/ext-ctrls-fm-rx.rst b/Documentation/media/uapi/v4l/ext-ctrls-fm-rx.rst
new file mode 100644
index 000000000000..3ed6dd7f586d
--- /dev/null
+++ b/Documentation/media/uapi/v4l/ext-ctrls-fm-rx.rst
@@ -0,0 +1,95 @@
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+
+.. _fm-rx-controls:
+
+*****************************
+FM Receiver Control Reference
+*****************************
+
+The FM Receiver (FM_RX) class includes controls for common features of
+FM Reception capable devices.
+
+
+.. _fm-rx-control-id:
+
+FM_RX Control IDs
+=================
+
+``V4L2_CID_FM_RX_CLASS (class)``
+ The FM_RX class descriptor. Calling
+ :ref:`VIDIOC_QUERYCTRL` for this control will
+ return a description of this control class.
+
+``V4L2_CID_RDS_RECEPTION (boolean)``
+ Enables/disables RDS reception by the radio tuner
+
+``V4L2_CID_RDS_RX_PTY (integer)``
+ Gets RDS Programme Type field. This encodes up to 31 pre-defined
+ programme types.
+
+``V4L2_CID_RDS_RX_PS_NAME (string)``
+ Gets the Programme Service name (PS_NAME). It is intended for
+ static display on a receiver. It is the primary aid to listeners in
+ programme service identification and selection. In Annex E of
+ :ref:`iec62106`, the RDS specification, there is a full
+ description of the correct character encoding for Programme Service
+ name strings. Also from RDS specification, PS is usually a single
+ eight character text. However, it is also possible to find receivers
+ which can scroll strings sized as 8 x N characters. So, this control
+ must be configured with steps of 8 characters. The result is it must
+ always contain a string with size multiple of 8.
+
+``V4L2_CID_RDS_RX_RADIO_TEXT (string)``
+ Gets the Radio Text info. It is a textual description of what is
+ being broadcasted. RDS Radio Text can be applied when broadcaster
+ wishes to transmit longer PS names, programme-related information or
+ any other text. In these cases, RadioText can be used in addition to
+ ``V4L2_CID_RDS_RX_PS_NAME``. The encoding for Radio Text strings is
+ also fully described in Annex E of :ref:`iec62106`. The length of
+ Radio Text strings depends on which RDS Block is being used to
+ transmit it, either 32 (2A block) or 64 (2B block). However, it is
+ also possible to find receivers which can scroll strings sized as 32
+ x N or 64 x N characters. So, this control must be configured with
+ steps of 32 or 64 characters. The result is it must always contain a
+ string with size multiple of 32 or 64.
+
+``V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT (boolean)``
+ If set, then a traffic announcement is in progress.
+
+``V4L2_CID_RDS_RX_TRAFFIC_PROGRAM (boolean)``
+ If set, then the tuned programme carries traffic announcements.
+
+``V4L2_CID_RDS_RX_MUSIC_SPEECH (boolean)``
+ If set, then this channel broadcasts music. If cleared, then it
+ broadcasts speech. If the transmitter doesn't make this distinction,
+ then it will be set.
+
+``V4L2_CID_TUNE_DEEMPHASIS``
+ (enum)
+
+enum v4l2_deemphasis -
+ Configures the de-emphasis value for reception. A de-emphasis filter
+ is applied to the broadcast to accentuate the high audio
+ frequencies. Depending on the region, a time constant of either 50
+ or 75 useconds is used. The enum v4l2_deemphasis defines possible
+ values for de-emphasis. Here they are:
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_DEEMPHASIS_DISABLED``
+ - No de-emphasis is applied.
+ * - ``V4L2_DEEMPHASIS_50_uS``
+ - A de-emphasis of 50 uS is used.
+ * - ``V4L2_DEEMPHASIS_75_uS``
+ - A de-emphasis of 75 uS is used.
diff --git a/Documentation/media/uapi/v4l/ext-ctrls-fm-tx.rst b/Documentation/media/uapi/v4l/ext-ctrls-fm-tx.rst
new file mode 100644
index 000000000000..db88346d99fd
--- /dev/null
+++ b/Documentation/media/uapi/v4l/ext-ctrls-fm-tx.rst
@@ -0,0 +1,188 @@
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+
+.. _fm-tx-controls:
+
+********************************
+FM Transmitter Control Reference
+********************************
+
+The FM Transmitter (FM_TX) class includes controls for common features
+of FM transmissions capable devices. Currently this class includes
+parameters for audio compression, pilot tone generation, audio deviation
+limiter, RDS transmission and tuning power features.
+
+
+.. _fm-tx-control-id:
+
+FM_TX Control IDs
+=================
+
+``V4L2_CID_FM_TX_CLASS (class)``
+ The FM_TX class descriptor. Calling
+ :ref:`VIDIOC_QUERYCTRL` for this control will
+ return a description of this control class.
+
+``V4L2_CID_RDS_TX_DEVIATION (integer)``
+ Configures RDS signal frequency deviation level in Hz. The range and
+ step are driver-specific.
+
+``V4L2_CID_RDS_TX_PI (integer)``
+ Sets the RDS Programme Identification field for transmission.
+
+``V4L2_CID_RDS_TX_PTY (integer)``
+ Sets the RDS Programme Type field for transmission. This encodes up
+ to 31 pre-defined programme types.
+
+``V4L2_CID_RDS_TX_PS_NAME (string)``
+ Sets the Programme Service name (PS_NAME) for transmission. It is
+ intended for static display on a receiver. It is the primary aid to
+ listeners in programme service identification and selection. In
+ Annex E of :ref:`iec62106`, the RDS specification, there is a full
+ description of the correct character encoding for Programme Service
+ name strings. Also from RDS specification, PS is usually a single
+ eight character text. However, it is also possible to find receivers
+ which can scroll strings sized as 8 x N characters. So, this control
+ must be configured with steps of 8 characters. The result is it must
+ always contain a string with size multiple of 8.
+
+``V4L2_CID_RDS_TX_RADIO_TEXT (string)``
+ Sets the Radio Text info for transmission. It is a textual
+ description of what is being broadcasted. RDS Radio Text can be
+ applied when broadcaster wishes to transmit longer PS names,
+ programme-related information or any other text. In these cases,
+ RadioText should be used in addition to ``V4L2_CID_RDS_TX_PS_NAME``.
+ The encoding for Radio Text strings is also fully described in Annex
+ E of :ref:`iec62106`. The length of Radio Text strings depends on
+ which RDS Block is being used to transmit it, either 32 (2A block)
+ or 64 (2B block). However, it is also possible to find receivers
+ which can scroll strings sized as 32 x N or 64 x N characters. So,
+ this control must be configured with steps of 32 or 64 characters.
+ The result is it must always contain a string with size multiple of
+ 32 or 64.
+
+``V4L2_CID_RDS_TX_MONO_STEREO (boolean)``
+ Sets the Mono/Stereo bit of the Decoder Identification code. If set,
+ then the audio was recorded as stereo.
+
+``V4L2_CID_RDS_TX_ARTIFICIAL_HEAD (boolean)``
+ Sets the
+ `Artificial Head <http://en.wikipedia.org/wiki/Artificial_head>`__
+ bit of the Decoder Identification code. If set, then the audio was
+ recorded using an artificial head.
+
+``V4L2_CID_RDS_TX_COMPRESSED (boolean)``
+ Sets the Compressed bit of the Decoder Identification code. If set,
+ then the audio is compressed.
+
+``V4L2_CID_RDS_TX_DYNAMIC_PTY (boolean)``
+ Sets the Dynamic PTY bit of the Decoder Identification code. If set,
+ then the PTY code is dynamically switched.
+
+``V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT (boolean)``
+ If set, then a traffic announcement is in progress.
+
+``V4L2_CID_RDS_TX_TRAFFIC_PROGRAM (boolean)``
+ If set, then the tuned programme carries traffic announcements.
+
+``V4L2_CID_RDS_TX_MUSIC_SPEECH (boolean)``
+ If set, then this channel broadcasts music. If cleared, then it
+ broadcasts speech. If the transmitter doesn't make this distinction,
+ then it should be set.
+
+``V4L2_CID_RDS_TX_ALT_FREQS_ENABLE (boolean)``
+ If set, then transmit alternate frequencies.
+
+``V4L2_CID_RDS_TX_ALT_FREQS (__u32 array)``
+ The alternate frequencies in kHz units. The RDS standard allows for
+ up to 25 frequencies to be defined. Drivers may support fewer
+ frequencies so check the array size.
+
+``V4L2_CID_AUDIO_LIMITER_ENABLED (boolean)``
+ Enables or disables the audio deviation limiter feature. The limiter
+ is useful when trying to maximize the audio volume, minimize
+ receiver-generated distortion and prevent overmodulation.
+
+``V4L2_CID_AUDIO_LIMITER_RELEASE_TIME (integer)``
+ Sets the audio deviation limiter feature release time. Unit is in
+ useconds. Step and range are driver-specific.
+
+``V4L2_CID_AUDIO_LIMITER_DEVIATION (integer)``
+ Configures audio frequency deviation level in Hz. The range and step
+ are driver-specific.
+
+``V4L2_CID_AUDIO_COMPRESSION_ENABLED (boolean)``
+ Enables or disables the audio compression feature. This feature
+ amplifies signals below the threshold by a fixed gain and compresses
+ audio signals above the threshold by the ratio of Threshold/(Gain +
+ Threshold).
+
+``V4L2_CID_AUDIO_COMPRESSION_GAIN (integer)``
+ Sets the gain for audio compression feature. It is a dB value. The
+ range and step are driver-specific.
+
+``V4L2_CID_AUDIO_COMPRESSION_THRESHOLD (integer)``
+ Sets the threshold level for audio compression freature. It is a dB
+ value. The range and step are driver-specific.
+
+``V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME (integer)``
+ Sets the attack time for audio compression feature. It is a useconds
+ value. The range and step are driver-specific.
+
+``V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME (integer)``
+ Sets the release time for audio compression feature. It is a
+ useconds value. The range and step are driver-specific.
+
+``V4L2_CID_PILOT_TONE_ENABLED (boolean)``
+ Enables or disables the pilot tone generation feature.
+
+``V4L2_CID_PILOT_TONE_DEVIATION (integer)``
+ Configures pilot tone frequency deviation level. Unit is in Hz. The
+ range and step are driver-specific.
+
+``V4L2_CID_PILOT_TONE_FREQUENCY (integer)``
+ Configures pilot tone frequency value. Unit is in Hz. The range and
+ step are driver-specific.
+
+``V4L2_CID_TUNE_PREEMPHASIS``
+ (enum)
+
+enum v4l2_preemphasis -
+ Configures the pre-emphasis value for broadcasting. A pre-emphasis
+ filter is applied to the broadcast to accentuate the high audio
+ frequencies. Depending on the region, a time constant of either 50
+ or 75 useconds is used. The enum v4l2_preemphasis defines possible
+ values for pre-emphasis. Here they are:
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_PREEMPHASIS_DISABLED``
+ - No pre-emphasis is applied.
+ * - ``V4L2_PREEMPHASIS_50_uS``
+ - A pre-emphasis of 50 uS is used.
+ * - ``V4L2_PREEMPHASIS_75_uS``
+ - A pre-emphasis of 75 uS is used.
+
+
+
+``V4L2_CID_TUNE_POWER_LEVEL (integer)``
+ Sets the output power level for signal transmission. Unit is in
+ dBuV. Range and step are driver-specific.
+
+``V4L2_CID_TUNE_ANTENNA_CAPACITOR (integer)``
+ This selects the value of antenna tuning capacitor manually or
+ automatically if set to zero. Unit, range and step are
+ driver-specific.
+
+For more details about RDS specification, refer to :ref:`iec62106`
+document, from CENELEC.
diff --git a/Documentation/media/uapi/v4l/ext-ctrls-image-process.rst b/Documentation/media/uapi/v4l/ext-ctrls-image-process.rst
new file mode 100644
index 000000000000..22fc2d3e433d
--- /dev/null
+++ b/Documentation/media/uapi/v4l/ext-ctrls-image-process.rst
@@ -0,0 +1,63 @@
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+
+.. _image-process-controls:
+
+*******************************
+Image Process Control Reference
+*******************************
+
+The Image Process control class is intended for low-level control of
+image processing functions. Unlike ``V4L2_CID_IMAGE_SOURCE_CLASS``, the
+controls in this class affect processing the image, and do not control
+capturing of it.
+
+
+.. _image-process-control-id:
+
+Image Process Control IDs
+=========================
+
+``V4L2_CID_IMAGE_PROC_CLASS (class)``
+ The IMAGE_PROC class descriptor.
+
+``V4L2_CID_LINK_FREQ (integer menu)``
+ Data bus frequency. Together with the media bus pixel code, bus type
+ (clock cycles per sample), the data bus frequency defines the pixel
+ rate (``V4L2_CID_PIXEL_RATE``) in the pixel array (or possibly
+ elsewhere, if the device is not an image sensor). The frame rate can
+ be calculated from the pixel clock, image width and height and
+ horizontal and vertical blanking. While the pixel rate control may
+ be defined elsewhere than in the subdev containing the pixel array,
+ the frame rate cannot be obtained from that information. This is
+ because only on the pixel array it can be assumed that the vertical
+ and horizontal blanking information is exact: no other blanking is
+ allowed in the pixel array. The selection of frame rate is performed
+ by selecting the desired horizontal and vertical blanking. The unit
+ of this control is Hz.
+
+``V4L2_CID_PIXEL_RATE (64-bit integer)``
+ Pixel rate in the source pads of the subdev. This control is
+ read-only and its unit is pixels / second.
+
+``V4L2_CID_TEST_PATTERN (menu)``
+ Some capture/display/sensor devices have the capability to generate
+ test pattern images. These hardware specific test patterns can be
+ used to test if a device is working properly.
+
+``V4L2_CID_DEINTERLACING_MODE (menu)``
+ The video deinterlacing mode (such as Bob, Weave, ...). The menu items are
+ driver specific and are documented in :ref:`v4l-drivers`.
+
+``V4L2_CID_DIGITAL_GAIN (integer)``
+ Digital gain is the value by which all colour components
+ are multiplied by. Typically the digital gain applied is the
+ control value divided by e.g. 0x100, meaning that to get no
+ digital gain the control value needs to be 0x100. The no-gain
+ configuration is also typically the default.
diff --git a/Documentation/media/uapi/v4l/ext-ctrls-image-source.rst b/Documentation/media/uapi/v4l/ext-ctrls-image-source.rst
new file mode 100644
index 000000000000..2c3ab5796d76
--- /dev/null
+++ b/Documentation/media/uapi/v4l/ext-ctrls-image-source.rst
@@ -0,0 +1,57 @@
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+
+.. _image-source-controls:
+
+******************************
+Image Source Control Reference
+******************************
+
+The Image Source control class is intended for low-level control of
+image source devices such as image sensors. The devices feature an
+analogue to digital converter and a bus transmitter to transmit the
+image data out of the device.
+
+
+.. _image-source-control-id:
+
+Image Source Control IDs
+========================
+
+``V4L2_CID_IMAGE_SOURCE_CLASS (class)``
+ The IMAGE_SOURCE class descriptor.
+
+``V4L2_CID_VBLANK (integer)``
+ Vertical blanking. The idle period after every frame during which no
+ image data is produced. The unit of vertical blanking is a line.
+ Every line has length of the image width plus horizontal blanking at
+ the pixel rate defined by ``V4L2_CID_PIXEL_RATE`` control in the
+ same sub-device.
+
+``V4L2_CID_HBLANK (integer)``
+ Horizontal blanking. The idle period after every line of image data
+ during which no image data is produced. The unit of horizontal
+ blanking is pixels.
+
+``V4L2_CID_ANALOGUE_GAIN (integer)``
+ Analogue gain is gain affecting all colour components in the pixel
+ matrix. The gain operation is performed in the analogue domain
+ before A/D conversion.
+
+``V4L2_CID_TEST_PATTERN_RED (integer)``
+ Test pattern red colour component.
+
+``V4L2_CID_TEST_PATTERN_GREENR (integer)``
+ Test pattern green (next to red) colour component.
+
+``V4L2_CID_TEST_PATTERN_BLUE (integer)``
+ Test pattern blue colour component.
+
+``V4L2_CID_TEST_PATTERN_GREENB (integer)``
+ Test pattern green (next to blue) colour component.
diff --git a/Documentation/media/uapi/v4l/ext-ctrls-jpeg.rst b/Documentation/media/uapi/v4l/ext-ctrls-jpeg.rst
new file mode 100644
index 000000000000..cf9cd8a9f9b4
--- /dev/null
+++ b/Documentation/media/uapi/v4l/ext-ctrls-jpeg.rst
@@ -0,0 +1,113 @@
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+
+.. _jpeg-controls:
+
+**********************
+JPEG Control Reference
+**********************
+
+The JPEG class includes controls for common features of JPEG encoders
+and decoders. Currently it includes features for codecs implementing
+progressive baseline DCT compression process with Huffman entrophy
+coding.
+
+
+.. _jpeg-control-id:
+
+JPEG Control IDs
+================
+
+``V4L2_CID_JPEG_CLASS (class)``
+ The JPEG class descriptor. Calling
+ :ref:`VIDIOC_QUERYCTRL` for this control will
+ return a description of this control class.
+
+``V4L2_CID_JPEG_CHROMA_SUBSAMPLING (menu)``
+ The chroma subsampling factors describe how each component of an
+ input image is sampled, in respect to maximum sample rate in each
+ spatial dimension. See :ref:`itu-t81`, clause A.1.1. for more
+ details. The ``V4L2_CID_JPEG_CHROMA_SUBSAMPLING`` control determines
+ how Cb and Cr components are downsampled after converting an input
+ image from RGB to Y'CbCr color space.
+
+.. tabularcolumns:: |p{7.0cm}|p{10.5cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_JPEG_CHROMA_SUBSAMPLING_444``
+ - No chroma subsampling, each pixel has Y, Cr and Cb values.
+ * - ``V4L2_JPEG_CHROMA_SUBSAMPLING_422``
+ - Horizontally subsample Cr, Cb components by a factor of 2.
+ * - ``V4L2_JPEG_CHROMA_SUBSAMPLING_420``
+ - Subsample Cr, Cb components horizontally and vertically by 2.
+ * - ``V4L2_JPEG_CHROMA_SUBSAMPLING_411``
+ - Horizontally subsample Cr, Cb components by a factor of 4.
+ * - ``V4L2_JPEG_CHROMA_SUBSAMPLING_410``
+ - Subsample Cr, Cb components horizontally by 4 and vertically by 2.
+ * - ``V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY``
+ - Use only luminance component.
+
+
+
+``V4L2_CID_JPEG_RESTART_INTERVAL (integer)``
+ The restart interval determines an interval of inserting RSTm
+ markers (m = 0..7). The purpose of these markers is to additionally
+ reinitialize the encoder process, in order to process blocks of an
+ image independently. For the lossy compression processes the restart
+ interval unit is MCU (Minimum Coded Unit) and its value is contained
+ in DRI (Define Restart Interval) marker. If
+ ``V4L2_CID_JPEG_RESTART_INTERVAL`` control is set to 0, DRI and RSTm
+ markers will not be inserted.
+
+.. _jpeg-quality-control:
+
+``V4L2_CID_JPEG_COMPRESSION_QUALITY (integer)``
+ ``V4L2_CID_JPEG_COMPRESSION_QUALITY`` control determines trade-off
+ between image quality and size. It provides simpler method for
+ applications to control image quality, without a need for direct
+ reconfiguration of luminance and chrominance quantization tables. In
+ cases where a driver uses quantization tables configured directly by
+ an application, using interfaces defined elsewhere,
+ ``V4L2_CID_JPEG_COMPRESSION_QUALITY`` control should be set by
+ driver to 0.
+
+ The value range of this control is driver-specific. Only positive,
+ non-zero values are meaningful. The recommended range is 1 - 100,
+ where larger values correspond to better image quality.
+
+.. _jpeg-active-marker-control:
+
+``V4L2_CID_JPEG_ACTIVE_MARKER (bitmask)``
+ Specify which JPEG markers are included in compressed stream. This
+ control is valid only for encoders.
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_JPEG_ACTIVE_MARKER_APP0``
+ - Application data segment APP\ :sub:`0`.
+ * - ``V4L2_JPEG_ACTIVE_MARKER_APP1``
+ - Application data segment APP\ :sub:`1`.
+ * - ``V4L2_JPEG_ACTIVE_MARKER_COM``
+ - Comment segment.
+ * - ``V4L2_JPEG_ACTIVE_MARKER_DQT``
+ - Quantization tables segment.
+ * - ``V4L2_JPEG_ACTIVE_MARKER_DHT``
+ - Huffman tables segment.
+
+
+
+For more details about JPEG specification, refer to :ref:`itu-t81`,
+:ref:`jfif`, :ref:`w3c-jpeg-jfif`.
diff --git a/Documentation/media/uapi/v4l/ext-ctrls-rf-tuner.rst b/Documentation/media/uapi/v4l/ext-ctrls-rf-tuner.rst
new file mode 100644
index 000000000000..0fb85ba878dd
--- /dev/null
+++ b/Documentation/media/uapi/v4l/ext-ctrls-rf-tuner.rst
@@ -0,0 +1,96 @@
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+
+.. _rf-tuner-controls:
+
+**************************
+RF Tuner Control Reference
+**************************
+
+The RF Tuner (RF_TUNER) class includes controls for common features of
+devices having RF tuner.
+
+In this context, RF tuner is radio receiver circuit between antenna and
+demodulator. It receives radio frequency (RF) from the antenna and
+converts that received signal to lower intermediate frequency (IF) or
+baseband frequency (BB). Tuners that could do baseband output are often
+called Zero-IF tuners. Older tuners were typically simple PLL tuners
+inside a metal box, while newer ones are highly integrated chips
+without a metal box "silicon tuners". These controls are mostly
+applicable for new feature rich silicon tuners, just because older
+tuners does not have much adjustable features.
+
+For more information about RF tuners see
+`Tuner (radio) <http://en.wikipedia.org/wiki/Tuner_%28radio%29>`__
+and `RF front end <http://en.wikipedia.org/wiki/RF_front_end>`__
+from Wikipedia.
+
+
+.. _rf-tuner-control-id:
+
+RF_TUNER Control IDs
+====================
+
+``V4L2_CID_RF_TUNER_CLASS (class)``
+ The RF_TUNER class descriptor. Calling
+ :ref:`VIDIOC_QUERYCTRL` for this control will
+ return a description of this control class.
+
+``V4L2_CID_RF_TUNER_BANDWIDTH_AUTO (boolean)``
+ Enables/disables tuner radio channel bandwidth configuration. In
+ automatic mode bandwidth configuration is performed by the driver.
+
+``V4L2_CID_RF_TUNER_BANDWIDTH (integer)``
+ Filter(s) on tuner signal path are used to filter signal according
+ to receiving party needs. Driver configures filters to fulfill
+ desired bandwidth requirement. Used when
+ V4L2_CID_RF_TUNER_BANDWIDTH_AUTO is not set. Unit is in Hz. The
+ range and step are driver-specific.
+
+``V4L2_CID_RF_TUNER_LNA_GAIN_AUTO (boolean)``
+ Enables/disables LNA automatic gain control (AGC)
+
+``V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO (boolean)``
+ Enables/disables mixer automatic gain control (AGC)
+
+``V4L2_CID_RF_TUNER_IF_GAIN_AUTO (boolean)``
+ Enables/disables IF automatic gain control (AGC)
+
+``V4L2_CID_RF_TUNER_RF_GAIN (integer)``
+ The RF amplifier is the very first amplifier on the receiver signal
+ path, just right after the antenna input. The difference between the
+ LNA gain and the RF gain in this document is that the LNA gain is
+ integrated in the tuner chip while the RF gain is a separate chip.
+ There may be both RF and LNA gain controls in the same device. The
+ range and step are driver-specific.
+
+``V4L2_CID_RF_TUNER_LNA_GAIN (integer)``
+ LNA (low noise amplifier) gain is first gain stage on the RF tuner
+ signal path. It is located very close to tuner antenna input. Used
+ when ``V4L2_CID_RF_TUNER_LNA_GAIN_AUTO`` is not set. See
+ ``V4L2_CID_RF_TUNER_RF_GAIN`` to understand how RF gain and LNA gain
+ differs from the each others. The range and step are
+ driver-specific.
+
+``V4L2_CID_RF_TUNER_MIXER_GAIN (integer)``
+ Mixer gain is second gain stage on the RF tuner signal path. It is
+ located inside mixer block, where RF signal is down-converted by the
+ mixer. Used when ``V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO`` is not set.
+ The range and step are driver-specific.
+
+``V4L2_CID_RF_TUNER_IF_GAIN (integer)``
+ IF gain is last gain stage on the RF tuner signal path. It is
+ located on output of RF tuner. It controls signal level of
+ intermediate frequency output or baseband output. Used when
+ ``V4L2_CID_RF_TUNER_IF_GAIN_AUTO`` is not set. The range and step
+ are driver-specific.
+
+``V4L2_CID_RF_TUNER_PLL_LOCK (boolean)``
+ Is synthesizer PLL locked? RF tuner is receiving given frequency
+ when that control is set. This is a read-only control.
diff --git a/Documentation/media/uapi/v4l/extended-controls.rst b/Documentation/media/uapi/v4l/extended-controls.rst
index 286a2dd7ec36..24274b398e63 100644
--- a/Documentation/media/uapi/v4l/extended-controls.rst
+++ b/Documentation/media/uapi/v4l/extended-controls.rst
@@ -9,9 +9,9 @@
.. _extended-controls:
-*****************
-Extended Controls
-*****************
+*********************
+Extended Controls API
+*********************
Introduction
@@ -181,3902 +181,3 @@ The flags field of struct :ref:`v4l2_queryctrl <v4l2-queryctrl>` also
contains hints on the behavior of the control. See the
:ref:`VIDIOC_QUERYCTRL` documentation for more
details.
-
-
-.. _mpeg-controls:
-
-Codec Control Reference
-=======================
-
-Below all controls within the Codec control class are described. First
-the generic controls, then controls specific for certain hardware.
-
-.. note::
-
- These controls are applicable to all codecs and not just MPEG. The
- defines are prefixed with V4L2_CID_MPEG/V4L2_MPEG as the controls
- were originally made for MPEG codecs and later extended to cover all
- encoding formats.
-
-
-Generic Codec Controls
-----------------------
-
-
-.. _mpeg-control-id:
-
-Codec Control IDs
-^^^^^^^^^^^^^^^^^
-
-``V4L2_CID_MPEG_CLASS (class)``
- The Codec class descriptor. Calling
- :ref:`VIDIOC_QUERYCTRL` for this control will
- return a description of this control class. This description can be
- used as the caption of a Tab page in a GUI, for example.
-
-.. _v4l2-mpeg-stream-type:
-
-``V4L2_CID_MPEG_STREAM_TYPE``
- (enum)
-
-enum v4l2_mpeg_stream_type -
- The MPEG-1, -2 or -4 output stream type. One cannot assume anything
- here. Each hardware MPEG encoder tends to support different subsets
- of the available MPEG stream types. This control is specific to
- multiplexed MPEG streams. The currently defined stream types are:
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_STREAM_TYPE_MPEG2_PS``
- - MPEG-2 program stream
- * - ``V4L2_MPEG_STREAM_TYPE_MPEG2_TS``
- - MPEG-2 transport stream
- * - ``V4L2_MPEG_STREAM_TYPE_MPEG1_SS``
- - MPEG-1 system stream
- * - ``V4L2_MPEG_STREAM_TYPE_MPEG2_DVD``
- - MPEG-2 DVD-compatible stream
- * - ``V4L2_MPEG_STREAM_TYPE_MPEG1_VCD``
- - MPEG-1 VCD-compatible stream
- * - ``V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD``
- - MPEG-2 SVCD-compatible stream
-
-
-
-``V4L2_CID_MPEG_STREAM_PID_PMT (integer)``
- Program Map Table Packet ID for the MPEG transport stream (default
- 16)
-
-``V4L2_CID_MPEG_STREAM_PID_AUDIO (integer)``
- Audio Packet ID for the MPEG transport stream (default 256)
-
-``V4L2_CID_MPEG_STREAM_PID_VIDEO (integer)``
- Video Packet ID for the MPEG transport stream (default 260)
-
-``V4L2_CID_MPEG_STREAM_PID_PCR (integer)``
- Packet ID for the MPEG transport stream carrying PCR fields (default
- 259)
-
-``V4L2_CID_MPEG_STREAM_PES_ID_AUDIO (integer)``
- Audio ID for MPEG PES
-
-``V4L2_CID_MPEG_STREAM_PES_ID_VIDEO (integer)``
- Video ID for MPEG PES
-
-.. _v4l2-mpeg-stream-vbi-fmt:
-
-``V4L2_CID_MPEG_STREAM_VBI_FMT``
- (enum)
-
-enum v4l2_mpeg_stream_vbi_fmt -
- Some cards can embed VBI data (e. g. Closed Caption, Teletext) into
- the MPEG stream. This control selects whether VBI data should be
- embedded, and if so, what embedding method should be used. The list
- of possible VBI formats depends on the driver. The currently defined
- VBI format types are:
-
-
-
-.. tabularcolumns:: |p{6 cm}|p{11.5cm}|
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_STREAM_VBI_FMT_NONE``
- - No VBI in the MPEG stream
- * - ``V4L2_MPEG_STREAM_VBI_FMT_IVTV``
- - VBI in private packets, IVTV format (documented in the kernel
- sources in the file
- ``Documentation/media/v4l-drivers/cx2341x.rst``)
-
-
-
-.. _v4l2-mpeg-audio-sampling-freq:
-
-``V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ``
- (enum)
-
-enum v4l2_mpeg_audio_sampling_freq -
- MPEG Audio sampling frequency. Possible values are:
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100``
- - 44.1 kHz
- * - ``V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000``
- - 48 kHz
- * - ``V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000``
- - 32 kHz
-
-
-
-.. _v4l2-mpeg-audio-encoding:
-
-``V4L2_CID_MPEG_AUDIO_ENCODING``
- (enum)
-
-enum v4l2_mpeg_audio_encoding -
- MPEG Audio encoding. This control is specific to multiplexed MPEG
- streams. Possible values are:
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_AUDIO_ENCODING_LAYER_1``
- - MPEG-1/2 Layer I encoding
- * - ``V4L2_MPEG_AUDIO_ENCODING_LAYER_2``
- - MPEG-1/2 Layer II encoding
- * - ``V4L2_MPEG_AUDIO_ENCODING_LAYER_3``
- - MPEG-1/2 Layer III encoding
- * - ``V4L2_MPEG_AUDIO_ENCODING_AAC``
- - MPEG-2/4 AAC (Advanced Audio Coding)
- * - ``V4L2_MPEG_AUDIO_ENCODING_AC3``
- - AC-3 aka ATSC A/52 encoding
-
-
-
-.. _v4l2-mpeg-audio-l1-bitrate:
-
-``V4L2_CID_MPEG_AUDIO_L1_BITRATE``
- (enum)
-
-enum v4l2_mpeg_audio_l1_bitrate -
- MPEG-1/2 Layer I bitrate. Possible values are:
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_AUDIO_L1_BITRATE_32K``
- - 32 kbit/s
- * - ``V4L2_MPEG_AUDIO_L1_BITRATE_64K``
- - 64 kbit/s
- * - ``V4L2_MPEG_AUDIO_L1_BITRATE_96K``
- - 96 kbit/s
- * - ``V4L2_MPEG_AUDIO_L1_BITRATE_128K``
- - 128 kbit/s
- * - ``V4L2_MPEG_AUDIO_L1_BITRATE_160K``
- - 160 kbit/s
- * - ``V4L2_MPEG_AUDIO_L1_BITRATE_192K``
- - 192 kbit/s
- * - ``V4L2_MPEG_AUDIO_L1_BITRATE_224K``
- - 224 kbit/s
- * - ``V4L2_MPEG_AUDIO_L1_BITRATE_256K``
- - 256 kbit/s
- * - ``V4L2_MPEG_AUDIO_L1_BITRATE_288K``
- - 288 kbit/s
- * - ``V4L2_MPEG_AUDIO_L1_BITRATE_320K``
- - 320 kbit/s
- * - ``V4L2_MPEG_AUDIO_L1_BITRATE_352K``
- - 352 kbit/s
- * - ``V4L2_MPEG_AUDIO_L1_BITRATE_384K``
- - 384 kbit/s
- * - ``V4L2_MPEG_AUDIO_L1_BITRATE_416K``
- - 416 kbit/s
- * - ``V4L2_MPEG_AUDIO_L1_BITRATE_448K``
- - 448 kbit/s
-
-
-
-.. _v4l2-mpeg-audio-l2-bitrate:
-
-``V4L2_CID_MPEG_AUDIO_L2_BITRATE``
- (enum)
-
-enum v4l2_mpeg_audio_l2_bitrate -
- MPEG-1/2 Layer II bitrate. Possible values are:
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_AUDIO_L2_BITRATE_32K``
- - 32 kbit/s
- * - ``V4L2_MPEG_AUDIO_L2_BITRATE_48K``
- - 48 kbit/s
- * - ``V4L2_MPEG_AUDIO_L2_BITRATE_56K``
- - 56 kbit/s
- * - ``V4L2_MPEG_AUDIO_L2_BITRATE_64K``
- - 64 kbit/s
- * - ``V4L2_MPEG_AUDIO_L2_BITRATE_80K``
- - 80 kbit/s
- * - ``V4L2_MPEG_AUDIO_L2_BITRATE_96K``
- - 96 kbit/s
- * - ``V4L2_MPEG_AUDIO_L2_BITRATE_112K``
- - 112 kbit/s
- * - ``V4L2_MPEG_AUDIO_L2_BITRATE_128K``
- - 128 kbit/s
- * - ``V4L2_MPEG_AUDIO_L2_BITRATE_160K``
- - 160 kbit/s
- * - ``V4L2_MPEG_AUDIO_L2_BITRATE_192K``
- - 192 kbit/s
- * - ``V4L2_MPEG_AUDIO_L2_BITRATE_224K``
- - 224 kbit/s
- * - ``V4L2_MPEG_AUDIO_L2_BITRATE_256K``
- - 256 kbit/s
- * - ``V4L2_MPEG_AUDIO_L2_BITRATE_320K``
- - 320 kbit/s
- * - ``V4L2_MPEG_AUDIO_L2_BITRATE_384K``
- - 384 kbit/s
-
-
-
-.. _v4l2-mpeg-audio-l3-bitrate:
-
-``V4L2_CID_MPEG_AUDIO_L3_BITRATE``
- (enum)
-
-enum v4l2_mpeg_audio_l3_bitrate -
- MPEG-1/2 Layer III bitrate. Possible values are:
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_AUDIO_L3_BITRATE_32K``
- - 32 kbit/s
- * - ``V4L2_MPEG_AUDIO_L3_BITRATE_40K``
- - 40 kbit/s
- * - ``V4L2_MPEG_AUDIO_L3_BITRATE_48K``
- - 48 kbit/s
- * - ``V4L2_MPEG_AUDIO_L3_BITRATE_56K``
- - 56 kbit/s
- * - ``V4L2_MPEG_AUDIO_L3_BITRATE_64K``
- - 64 kbit/s
- * - ``V4L2_MPEG_AUDIO_L3_BITRATE_80K``
- - 80 kbit/s
- * - ``V4L2_MPEG_AUDIO_L3_BITRATE_96K``
- - 96 kbit/s
- * - ``V4L2_MPEG_AUDIO_L3_BITRATE_112K``
- - 112 kbit/s
- * - ``V4L2_MPEG_AUDIO_L3_BITRATE_128K``
- - 128 kbit/s
- * - ``V4L2_MPEG_AUDIO_L3_BITRATE_160K``
- - 160 kbit/s
- * - ``V4L2_MPEG_AUDIO_L3_BITRATE_192K``
- - 192 kbit/s
- * - ``V4L2_MPEG_AUDIO_L3_BITRATE_224K``
- - 224 kbit/s
- * - ``V4L2_MPEG_AUDIO_L3_BITRATE_256K``
- - 256 kbit/s
- * - ``V4L2_MPEG_AUDIO_L3_BITRATE_320K``
- - 320 kbit/s
-
-
-
-``V4L2_CID_MPEG_AUDIO_AAC_BITRATE (integer)``
- AAC bitrate in bits per second.
-
-.. _v4l2-mpeg-audio-ac3-bitrate:
-
-``V4L2_CID_MPEG_AUDIO_AC3_BITRATE``
- (enum)
-
-enum v4l2_mpeg_audio_ac3_bitrate -
- AC-3 bitrate. Possible values are:
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_32K``
- - 32 kbit/s
- * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_40K``
- - 40 kbit/s
- * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_48K``
- - 48 kbit/s
- * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_56K``
- - 56 kbit/s
- * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_64K``
- - 64 kbit/s
- * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_80K``
- - 80 kbit/s
- * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_96K``
- - 96 kbit/s
- * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_112K``
- - 112 kbit/s
- * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_128K``
- - 128 kbit/s
- * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_160K``
- - 160 kbit/s
- * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_192K``
- - 192 kbit/s
- * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_224K``
- - 224 kbit/s
- * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_256K``
- - 256 kbit/s
- * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_320K``
- - 320 kbit/s
- * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_384K``
- - 384 kbit/s
- * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_448K``
- - 448 kbit/s
- * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_512K``
- - 512 kbit/s
- * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_576K``
- - 576 kbit/s
- * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_640K``
- - 640 kbit/s
-
-
-
-.. _v4l2-mpeg-audio-mode:
-
-``V4L2_CID_MPEG_AUDIO_MODE``
- (enum)
-
-enum v4l2_mpeg_audio_mode -
- MPEG Audio mode. Possible values are:
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_AUDIO_MODE_STEREO``
- - Stereo
- * - ``V4L2_MPEG_AUDIO_MODE_JOINT_STEREO``
- - Joint Stereo
- * - ``V4L2_MPEG_AUDIO_MODE_DUAL``
- - Bilingual
- * - ``V4L2_MPEG_AUDIO_MODE_MONO``
- - Mono
-
-
-
-.. _v4l2-mpeg-audio-mode-extension:
-
-``V4L2_CID_MPEG_AUDIO_MODE_EXTENSION``
- (enum)
-
-enum v4l2_mpeg_audio_mode_extension -
- Joint Stereo audio mode extension. In Layer I and II they indicate
- which subbands are in intensity stereo. All other subbands are coded
- in stereo. Layer III is not (yet) supported. Possible values are:
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4``
- - Subbands 4-31 in intensity stereo
- * - ``V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_8``
- - Subbands 8-31 in intensity stereo
- * - ``V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_12``
- - Subbands 12-31 in intensity stereo
- * - ``V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_16``
- - Subbands 16-31 in intensity stereo
-
-
-
-.. _v4l2-mpeg-audio-emphasis:
-
-``V4L2_CID_MPEG_AUDIO_EMPHASIS``
- (enum)
-
-enum v4l2_mpeg_audio_emphasis -
- Audio Emphasis. Possible values are:
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_AUDIO_EMPHASIS_NONE``
- - None
- * - ``V4L2_MPEG_AUDIO_EMPHASIS_50_DIV_15_uS``
- - 50/15 microsecond emphasis
- * - ``V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17``
- - CCITT J.17
-
-
-
-.. _v4l2-mpeg-audio-crc:
-
-``V4L2_CID_MPEG_AUDIO_CRC``
- (enum)
-
-enum v4l2_mpeg_audio_crc -
- CRC method. Possible values are:
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_AUDIO_CRC_NONE``
- - None
- * - ``V4L2_MPEG_AUDIO_CRC_CRC16``
- - 16 bit parity check
-
-
-
-``V4L2_CID_MPEG_AUDIO_MUTE (boolean)``
- Mutes the audio when capturing. This is not done by muting audio
- hardware, which can still produce a slight hiss, but in the encoder
- itself, guaranteeing a fixed and reproducible audio bitstream. 0 =
- unmuted, 1 = muted.
-
-.. _v4l2-mpeg-audio-dec-playback:
-
-``V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK``
- (enum)
-
-enum v4l2_mpeg_audio_dec_playback -
- Determines how monolingual audio should be played back. Possible
- values are:
-
-
-
-.. tabularcolumns:: |p{9.0cm}|p{8.5cm}|
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_AUTO``
- - Automatically determines the best playback mode.
- * - ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_STEREO``
- - Stereo playback.
- * - ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_LEFT``
- - Left channel playback.
- * - ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_RIGHT``
- - Right channel playback.
- * - ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_MONO``
- - Mono playback.
- * - ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_SWAPPED_STEREO``
- - Stereo playback with swapped left and right channels.
-
-
-
-.. _v4l2-mpeg-audio-dec-multilingual-playback:
-
-``V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK``
- (enum)
-
-enum v4l2_mpeg_audio_dec_playback -
- Determines how multilingual audio should be played back.
-
-.. _v4l2-mpeg-video-encoding:
-
-``V4L2_CID_MPEG_VIDEO_ENCODING``
- (enum)
-
-enum v4l2_mpeg_video_encoding -
- MPEG Video encoding method. This control is specific to multiplexed
- MPEG streams. Possible values are:
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_VIDEO_ENCODING_MPEG_1``
- - MPEG-1 Video encoding
- * - ``V4L2_MPEG_VIDEO_ENCODING_MPEG_2``
- - MPEG-2 Video encoding
- * - ``V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC``
- - MPEG-4 AVC (H.264) Video encoding
-
-
-
-.. _v4l2-mpeg-video-aspect:
-
-``V4L2_CID_MPEG_VIDEO_ASPECT``
- (enum)
-
-enum v4l2_mpeg_video_aspect -
- Video aspect. Possible values are:
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_VIDEO_ASPECT_1x1``
- * - ``V4L2_MPEG_VIDEO_ASPECT_4x3``
- * - ``V4L2_MPEG_VIDEO_ASPECT_16x9``
- * - ``V4L2_MPEG_VIDEO_ASPECT_221x100``
-
-
-
-``V4L2_CID_MPEG_VIDEO_B_FRAMES (integer)``
- Number of B-Frames (default 2)
-
-``V4L2_CID_MPEG_VIDEO_GOP_SIZE (integer)``
- GOP size (default 12)
-
-``V4L2_CID_MPEG_VIDEO_GOP_CLOSURE (boolean)``
- GOP closure (default 1)
-
-``V4L2_CID_MPEG_VIDEO_PULLDOWN (boolean)``
- Enable 3:2 pulldown (default 0)
-
-.. _v4l2-mpeg-video-bitrate-mode:
-
-``V4L2_CID_MPEG_VIDEO_BITRATE_MODE``
- (enum)
-
-enum v4l2_mpeg_video_bitrate_mode -
- Video bitrate mode. Possible values are:
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_VIDEO_BITRATE_MODE_VBR``
- - Variable bitrate
- * - ``V4L2_MPEG_VIDEO_BITRATE_MODE_CBR``
- - Constant bitrate
-
-
-
-``V4L2_CID_MPEG_VIDEO_BITRATE (integer)``
- Video bitrate in bits per second.
-
-``V4L2_CID_MPEG_VIDEO_BITRATE_PEAK (integer)``
- Peak video bitrate in bits per second. Must be larger or equal to
- the average video bitrate. It is ignored if the video bitrate mode
- is set to constant bitrate.
-
-``V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION (integer)``
- For every captured frame, skip this many subsequent frames (default
- 0).
-
-``V4L2_CID_MPEG_VIDEO_MUTE (boolean)``
- "Mutes" the video to a fixed color when capturing. This is useful
- for testing, to produce a fixed video bitstream. 0 = unmuted, 1 =
- muted.
-
-``V4L2_CID_MPEG_VIDEO_MUTE_YUV (integer)``
- Sets the "mute" color of the video. The supplied 32-bit integer is
- interpreted as follows (bit 0 = least significant bit):
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - Bit 0:7
- - V chrominance information
- * - Bit 8:15
- - U chrominance information
- * - Bit 16:23
- - Y luminance information
- * - Bit 24:31
- - Must be zero.
-
-
-
-.. _v4l2-mpeg-video-dec-pts:
-
-``V4L2_CID_MPEG_VIDEO_DEC_PTS (integer64)``
- This read-only control returns the 33-bit video Presentation Time
- Stamp as defined in ITU T-REC-H.222.0 and ISO/IEC 13818-1 of the
- currently displayed frame. This is the same PTS as is used in
- :ref:`VIDIOC_DECODER_CMD`.
-
-.. _v4l2-mpeg-video-dec-frame:
-
-``V4L2_CID_MPEG_VIDEO_DEC_FRAME (integer64)``
- This read-only control returns the frame counter of the frame that
- is currently displayed (decoded). This value is reset to 0 whenever
- the decoder is started.
-
-``V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE (boolean)``
- If enabled the decoder expects to receive a single slice per buffer,
- otherwise the decoder expects a single frame in per buffer.
- Applicable to the decoder, all codecs.
-
-``V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE (boolean)``
- Enable writing sample aspect ratio in the Video Usability
- Information. Applicable to the H264 encoder.
-
-.. _v4l2-mpeg-video-h264-vui-sar-idc:
-
-``V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC``
- (enum)
-
-enum v4l2_mpeg_video_h264_vui_sar_idc -
- VUI sample aspect ratio indicator for H.264 encoding. The value is
- defined in the table E-1 in the standard. Applicable to the H264
- encoder.
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED``
- - Unspecified
- * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1``
- - 1x1
- * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_12x11``
- - 12x11
- * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_10x11``
- - 10x11
- * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_16x11``
- - 16x11
- * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_40x33``
- - 40x33
- * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_24x11``
- - 24x11
- * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_20x11``
- - 20x11
- * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_32x11``
- - 32x11
- * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_80x33``
- - 80x33
- * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_18x11``
- - 18x11
- * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_15x11``
- - 15x11
- * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_64x33``
- - 64x33
- * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_160x99``
- - 160x99
- * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_4x3``
- - 4x3
- * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_3x2``
- - 3x2
- * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_2x1``
- - 2x1
- * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED``
- - Extended SAR
-
-
-
-``V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH (integer)``
- Extended sample aspect ratio width for H.264 VUI encoding.
- Applicable to the H264 encoder.
-
-``V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT (integer)``
- Extended sample aspect ratio height for H.264 VUI encoding.
- Applicable to the H264 encoder.
-
-.. _v4l2-mpeg-video-h264-level:
-
-``V4L2_CID_MPEG_VIDEO_H264_LEVEL``
- (enum)
-
-enum v4l2_mpeg_video_h264_level -
- The level information for the H264 video elementary stream.
- Applicable to the H264 encoder. Possible values are:
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_VIDEO_H264_LEVEL_1_0``
- - Level 1.0
- * - ``V4L2_MPEG_VIDEO_H264_LEVEL_1B``
- - Level 1B
- * - ``V4L2_MPEG_VIDEO_H264_LEVEL_1_1``
- - Level 1.1
- * - ``V4L2_MPEG_VIDEO_H264_LEVEL_1_2``
- - Level 1.2
- * - ``V4L2_MPEG_VIDEO_H264_LEVEL_1_3``
- - Level 1.3
- * - ``V4L2_MPEG_VIDEO_H264_LEVEL_2_0``
- - Level 2.0
- * - ``V4L2_MPEG_VIDEO_H264_LEVEL_2_1``
- - Level 2.1
- * - ``V4L2_MPEG_VIDEO_H264_LEVEL_2_2``
- - Level 2.2
- * - ``V4L2_MPEG_VIDEO_H264_LEVEL_3_0``
- - Level 3.0
- * - ``V4L2_MPEG_VIDEO_H264_LEVEL_3_1``
- - Level 3.1
- * - ``V4L2_MPEG_VIDEO_H264_LEVEL_3_2``
- - Level 3.2
- * - ``V4L2_MPEG_VIDEO_H264_LEVEL_4_0``
- - Level 4.0
- * - ``V4L2_MPEG_VIDEO_H264_LEVEL_4_1``
- - Level 4.1
- * - ``V4L2_MPEG_VIDEO_H264_LEVEL_4_2``
- - Level 4.2
- * - ``V4L2_MPEG_VIDEO_H264_LEVEL_5_0``
- - Level 5.0
- * - ``V4L2_MPEG_VIDEO_H264_LEVEL_5_1``
- - Level 5.1
-
-
-
-.. _v4l2-mpeg-video-mpeg4-level:
-
-``V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL``
- (enum)
-
-enum v4l2_mpeg_video_mpeg4_level -
- The level information for the MPEG4 elementary stream. Applicable to
- the MPEG4 encoder. Possible values are:
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_0``
- - Level 0
- * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_0B``
- - Level 0b
- * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_1``
- - Level 1
- * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_2``
- - Level 2
- * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_3``
- - Level 3
- * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_3B``
- - Level 3b
- * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_4``
- - Level 4
- * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_5``
- - Level 5
-
-
-
-.. _v4l2-mpeg-video-h264-profile:
-
-``V4L2_CID_MPEG_VIDEO_H264_PROFILE``
- (enum)
-
-enum v4l2_mpeg_video_h264_profile -
- The profile information for H264. Applicable to the H264 encoder.
- Possible values are:
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE``
- - Baseline profile
- * - ``V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE``
- - Constrained Baseline profile
- * - ``V4L2_MPEG_VIDEO_H264_PROFILE_MAIN``
- - Main profile
- * - ``V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED``
- - Extended profile
- * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH``
- - High profile
- * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10``
- - High 10 profile
- * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422``
- - High 422 profile
- * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE``
- - High 444 Predictive profile
- * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10_INTRA``
- - High 10 Intra profile
- * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422_INTRA``
- - High 422 Intra profile
- * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_INTRA``
- - High 444 Intra profile
- * - ``V4L2_MPEG_VIDEO_H264_PROFILE_CAVLC_444_INTRA``
- - CAVLC 444 Intra profile
- * - ``V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_BASELINE``
- - Scalable Baseline profile
- * - ``V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH``
- - Scalable High profile
- * - ``V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH_INTRA``
- - Scalable High Intra profile
- * - ``V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH``
- - Stereo High profile
- * - ``V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH``
- - Multiview High profile
-
-
-
-.. _v4l2-mpeg-video-mpeg4-profile:
-
-``V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE``
- (enum)
-
-enum v4l2_mpeg_video_mpeg4_profile -
- The profile information for MPEG4. Applicable to the MPEG4 encoder.
- Possible values are:
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE``
- - Simple profile
- * - ``V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE``
- - Advanced Simple profile
- * - ``V4L2_MPEG_VIDEO_MPEG4_PROFILE_CORE``
- - Core profile
- * - ``V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE_SCALABLE``
- - Simple Scalable profile
- * - ``V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY``
- -
-
-
-
-``V4L2_CID_MPEG_VIDEO_MAX_REF_PIC (integer)``
- The maximum number of reference pictures used for encoding.
- Applicable to the encoder.
-
-.. _v4l2-mpeg-video-multi-slice-mode:
-
-``V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE``
- (enum)
-
-enum v4l2_mpeg_video_multi_slice_mode -
- Determines how the encoder should handle division of frame into
- slices. Applicable to the encoder. Possible values are:
-
-
-
-.. tabularcolumns:: |p{8.7cm}|p{8.8cm}|
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE``
- - Single slice per frame.
- * - ``V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB``
- - Multiple slices with set maximum number of macroblocks per slice.
- * - ``V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES``
- - Multiple slice with set maximum size in bytes per slice.
-
-
-
-``V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB (integer)``
- The maximum number of macroblocks in a slice. Used when
- ``V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE`` is set to
- ``V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB``. Applicable to the
- encoder.
-
-``V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES (integer)``
- The maximum size of a slice in bytes. Used when
- ``V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE`` is set to
- ``V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES``. Applicable to the
- encoder.
-
-.. _v4l2-mpeg-video-h264-loop-filter-mode:
-
-``V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE``
- (enum)
-
-enum v4l2_mpeg_video_h264_loop_filter_mode -
- Loop filter mode for H264 encoder. Possible values are:
-
-
-
-.. tabularcolumns:: |p{14.0cm}|p{3.5cm}|
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED``
- - Loop filter is enabled.
- * - ``V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED``
- - Loop filter is disabled.
- * - ``V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY``
- - Loop filter is disabled at the slice boundary.
-
-
-
-``V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA (integer)``
- Loop filter alpha coefficient, defined in the H264 standard.
- This value corresponds to the slice_alpha_c0_offset_div2 slice header
- field, and should be in the range of -6 to +6, inclusive. The actual alpha
- offset FilterOffsetA is twice this value.
- Applicable to the H264 encoder.
-
-``V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA (integer)``
- Loop filter beta coefficient, defined in the H264 standard.
- This corresponds to the slice_beta_offset_div2 slice header field, and
- should be in the range of -6 to +6, inclusive. The actual beta offset
- FilterOffsetB is twice this value.
- Applicable to the H264 encoder.
-
-.. _v4l2-mpeg-video-h264-entropy-mode:
-
-``V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE``
- (enum)
-
-enum v4l2_mpeg_video_h264_entropy_mode -
- Entropy coding mode for H264 - CABAC/CAVALC. Applicable to the H264
- encoder. Possible values are:
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC``
- - Use CAVLC entropy coding.
- * - ``V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC``
- - Use CABAC entropy coding.
-
-
-
-``V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM (boolean)``
- Enable 8X8 transform for H264. Applicable to the H264 encoder.
-
-``V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB (integer)``
- Cyclic intra macroblock refresh. This is the number of continuous
- macroblocks refreshed every frame. Each frame a successive set of
- macroblocks is refreshed until the cycle completes and starts from
- the top of the frame. Applicable to H264, H263 and MPEG4 encoder.
-
-``V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE (boolean)``
- Frame level rate control enable. If this control is disabled then
- the quantization parameter for each frame type is constant and set
- with appropriate controls (e.g.
- ``V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP``). If frame rate control is
- enabled then quantization parameter is adjusted to meet the chosen
- bitrate. Minimum and maximum value for the quantization parameter
- can be set with appropriate controls (e.g.
- ``V4L2_CID_MPEG_VIDEO_H263_MIN_QP``). Applicable to encoders.
-
-``V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE (boolean)``
- Macroblock level rate control enable. Applicable to the MPEG4 and
- H264 encoders.
-
-``V4L2_CID_MPEG_VIDEO_MPEG4_QPEL (boolean)``
- Quarter pixel motion estimation for MPEG4. Applicable to the MPEG4
- encoder.
-
-``V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP (integer)``
- Quantization parameter for an I frame for H263. Valid range: from 1
- to 31.
-
-``V4L2_CID_MPEG_VIDEO_H263_MIN_QP (integer)``
- Minimum quantization parameter for H263. Valid range: from 1 to 31.
-
-``V4L2_CID_MPEG_VIDEO_H263_MAX_QP (integer)``
- Maximum quantization parameter for H263. Valid range: from 1 to 31.
-
-``V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP (integer)``
- Quantization parameter for an P frame for H263. Valid range: from 1
- to 31.
-
-``V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP (integer)``
- Quantization parameter for an B frame for H263. Valid range: from 1
- to 31.
-
-``V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP (integer)``
- Quantization parameter for an I frame for H264. Valid range: from 0
- to 51.
-
-``V4L2_CID_MPEG_VIDEO_H264_MIN_QP (integer)``
- Minimum quantization parameter for H264. Valid range: from 0 to 51.
-
-``V4L2_CID_MPEG_VIDEO_H264_MAX_QP (integer)``
- Maximum quantization parameter for H264. Valid range: from 0 to 51.
-
-``V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP (integer)``
- Quantization parameter for an P frame for H264. Valid range: from 0
- to 51.
-
-``V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP (integer)``
- Quantization parameter for an B frame for H264. Valid range: from 0
- to 51.
-
-``V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP (integer)``
- Quantization parameter for an I frame for MPEG4. Valid range: from 1
- to 31.
-
-``V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP (integer)``
- Minimum quantization parameter for MPEG4. Valid range: from 1 to 31.
-
-``V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP (integer)``
- Maximum quantization parameter for MPEG4. Valid range: from 1 to 31.
-
-``V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP (integer)``
- Quantization parameter for an P frame for MPEG4. Valid range: from 1
- to 31.
-
-``V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP (integer)``
- Quantization parameter for an B frame for MPEG4. Valid range: from 1
- to 31.
-
-``V4L2_CID_MPEG_VIDEO_VBV_SIZE (integer)``
- The Video Buffer Verifier size in kilobytes, it is used as a
- limitation of frame skip. The VBV is defined in the standard as a
- mean to verify that the produced stream will be successfully
- decoded. The standard describes it as "Part of a hypothetical
- decoder that is conceptually connected to the output of the encoder.
- Its purpose is to provide a constraint on the variability of the
- data rate that an encoder or editing process may produce.".
- Applicable to the MPEG1, MPEG2, MPEG4 encoders.
-
-.. _v4l2-mpeg-video-vbv-delay:
-
-``V4L2_CID_MPEG_VIDEO_VBV_DELAY (integer)``
- Sets the initial delay in milliseconds for VBV buffer control.
-
-.. _v4l2-mpeg-video-hor-search-range:
-
-``V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE (integer)``
- Horizontal search range defines maximum horizontal search area in
- pixels to search and match for the present Macroblock (MB) in the
- reference picture. This V4L2 control macro is used to set horizontal
- search range for motion estimation module in video encoder.
-
-.. _v4l2-mpeg-video-vert-search-range:
-
-``V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE (integer)``
- Vertical search range defines maximum vertical search area in pixels
- to search and match for the present Macroblock (MB) in the reference
- picture. This V4L2 control macro is used to set vertical search
- range for motion estimation module in video encoder.
-
-.. _v4l2-mpeg-video-force-key-frame:
-
-``V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME (button)``
- Force a key frame for the next queued buffer. Applicable to
- encoders. This is a general, codec-agnostic keyframe control.
-
-``V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE (integer)``
- The Coded Picture Buffer size in kilobytes, it is used as a
- limitation of frame skip. The CPB is defined in the H264 standard as
- a mean to verify that the produced stream will be successfully
- decoded. Applicable to the H264 encoder.
-
-``V4L2_CID_MPEG_VIDEO_H264_I_PERIOD (integer)``
- Period between I-frames in the open GOP for H264. In case of an open
- GOP this is the period between two I-frames. The period between IDR
- (Instantaneous Decoding Refresh) frames is taken from the GOP_SIZE
- control. An IDR frame, which stands for Instantaneous Decoding
- Refresh is an I-frame after which no prior frames are referenced.
- This means that a stream can be restarted from an IDR frame without
- the need to store or decode any previous frames. Applicable to the
- H264 encoder.
-
-.. _v4l2-mpeg-video-header-mode:
-
-``V4L2_CID_MPEG_VIDEO_HEADER_MODE``
- (enum)
-
-enum v4l2_mpeg_video_header_mode -
- Determines whether the header is returned as the first buffer or is
- it returned together with the first frame. Applicable to encoders.
- Possible values are:
-
-
-
-.. tabularcolumns:: |p{10.3cm}|p{7.2cm}|
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE``
- - The stream header is returned separately in the first buffer.
- * - ``V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME``
- - The stream header is returned together with the first encoded
- frame.
-
-
-
-``V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER (boolean)``
- Repeat the video sequence headers. Repeating these headers makes
- random access to the video stream easier. Applicable to the MPEG1, 2
- and 4 encoder.
-
-``V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER (boolean)``
- Enabled the deblocking post processing filter for MPEG4 decoder.
- Applicable to the MPEG4 decoder.
-
-``V4L2_CID_MPEG_VIDEO_MPEG4_VOP_TIME_RES (integer)``
- vop_time_increment_resolution value for MPEG4. Applicable to the
- MPEG4 encoder.
-
-``V4L2_CID_MPEG_VIDEO_MPEG4_VOP_TIME_INC (integer)``
- vop_time_increment value for MPEG4. Applicable to the MPEG4
- encoder.
-
-``V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING (boolean)``
- Enable generation of frame packing supplemental enhancement
- information in the encoded bitstream. The frame packing SEI message
- contains the arrangement of L and R planes for 3D viewing.
- Applicable to the H264 encoder.
-
-``V4L2_CID_MPEG_VIDEO_H264_SEI_FP_CURRENT_FRAME_0 (boolean)``
- Sets current frame as frame0 in frame packing SEI. Applicable to the
- H264 encoder.
-
-.. _v4l2-mpeg-video-h264-sei-fp-arrangement-type:
-
-``V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE``
- (enum)
-
-enum v4l2_mpeg_video_h264_sei_fp_arrangement_type -
- Frame packing arrangement type for H264 SEI. Applicable to the H264
- encoder. Possible values are:
-
-.. tabularcolumns:: |p{12cm}|p{5.5cm}|
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_CHEKERBOARD``
- - Pixels are alternatively from L and R.
- * - ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_COLUMN``
- - L and R are interlaced by column.
- * - ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_ROW``
- - L and R are interlaced by row.
- * - ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_SIDE_BY_SIDE``
- - L is on the left, R on the right.
- * - ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TOP_BOTTOM``
- - L is on top, R on bottom.
- * - ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TEMPORAL``
- - One view per frame.
-
-
-
-``V4L2_CID_MPEG_VIDEO_H264_FMO (boolean)``
- Enables flexible macroblock ordering in the encoded bitstream. It is
- a technique used for restructuring the ordering of macroblocks in
- pictures. Applicable to the H264 encoder.
-
-.. _v4l2-mpeg-video-h264-fmo-map-type:
-
-``V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE``
- (enum)
-
-enum v4l2_mpeg_video_h264_fmo_map_type -
- When using FMO, the map type divides the image in different scan
- patterns of macroblocks. Applicable to the H264 encoder. Possible
- values are:
-
-.. tabularcolumns:: |p{12.5cm}|p{5.0cm}|
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_INTERLEAVED_SLICES``
- - Slices are interleaved one after other with macroblocks in run
- length order.
- * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_SCATTERED_SLICES``
- - Scatters the macroblocks based on a mathematical function known to
- both encoder and decoder.
- * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_FOREGROUND_WITH_LEFT_OVER``
- - Macroblocks arranged in rectangular areas or regions of interest.
- * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_BOX_OUT``
- - Slice groups grow in a cyclic way from centre to outwards.
- * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_RASTER_SCAN``
- - Slice groups grow in raster scan pattern from left to right.
- * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_WIPE_SCAN``
- - Slice groups grow in wipe scan pattern from top to bottom.
- * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_EXPLICIT``
- - User defined map type.
-
-
-
-``V4L2_CID_MPEG_VIDEO_H264_FMO_SLICE_GROUP (integer)``
- Number of slice groups in FMO. Applicable to the H264 encoder.
-
-.. _v4l2-mpeg-video-h264-fmo-change-direction:
-
-``V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_DIRECTION``
- (enum)
-
-enum v4l2_mpeg_video_h264_fmo_change_dir -
- Specifies a direction of the slice group change for raster and wipe
- maps. Applicable to the H264 encoder. Possible values are:
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_VIDEO_H264_FMO_CHANGE_DIR_RIGHT``
- - Raster scan or wipe right.
- * - ``V4L2_MPEG_VIDEO_H264_FMO_CHANGE_DIR_LEFT``
- - Reverse raster scan or wipe left.
-
-
-
-``V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_RATE (integer)``
- Specifies the size of the first slice group for raster and wipe map.
- Applicable to the H264 encoder.
-
-``V4L2_CID_MPEG_VIDEO_H264_FMO_RUN_LENGTH (integer)``
- Specifies the number of consecutive macroblocks for the interleaved
- map. Applicable to the H264 encoder.
-
-``V4L2_CID_MPEG_VIDEO_H264_ASO (boolean)``
- Enables arbitrary slice ordering in encoded bitstream. Applicable to
- the H264 encoder.
-
-``V4L2_CID_MPEG_VIDEO_H264_ASO_SLICE_ORDER (integer)``
- Specifies the slice order in ASO. Applicable to the H264 encoder.
- The supplied 32-bit integer is interpreted as follows (bit 0 = least
- significant bit):
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - Bit 0:15
- - Slice ID
- * - Bit 16:32
- - Slice position or order
-
-
-
-``V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING (boolean)``
- Enables H264 hierarchical coding. Applicable to the H264 encoder.
-
-.. _v4l2-mpeg-video-h264-hierarchical-coding-type:
-
-``V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE``
- (enum)
-
-enum v4l2_mpeg_video_h264_hierarchical_coding_type -
- Specifies the hierarchical coding type. Applicable to the H264
- encoder. Possible values are:
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_VIDEO_H264_HIERARCHICAL_CODING_B``
- - Hierarchical B coding.
- * - ``V4L2_MPEG_VIDEO_H264_HIERARCHICAL_CODING_P``
- - Hierarchical P coding.
-
-
-
-``V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER (integer)``
- Specifies the number of hierarchical coding layers. Applicable to
- the H264 encoder.
-
-``V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER_QP (integer)``
- Specifies a user defined QP for each layer. Applicable to the H264
- encoder. The supplied 32-bit integer is interpreted as follows (bit
- 0 = least significant bit):
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - Bit 0:15
- - QP value
- * - Bit 16:32
- - Layer number
-
-
-
-.. _v4l2-mpeg-mpeg2:
-
-``V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS (struct)``
- Specifies the slice parameters (as extracted from the bitstream) for the
- associated MPEG-2 slice data. This includes the necessary parameters for
- configuring a stateless hardware decoding pipeline for MPEG-2.
- The bitstream parameters are defined according to :ref:`mpeg2part2`.
-
- .. note::
-
- This compound control is not yet part of the public kernel API and
- it is expected to change.
-
-.. c:type:: v4l2_ctrl_mpeg2_slice_params
-
-.. cssclass:: longtable
-
-.. flat-table:: struct v4l2_ctrl_mpeg2_slice_params
- :header-rows: 0
- :stub-columns: 0
- :widths: 1 1 2
-
- * - __u32
- - ``bit_size``
- - Size (in bits) of the current slice data.
- * - __u32
- - ``data_bit_offset``
- - Offset (in bits) to the video data in the current slice data.
- * - struct :c:type:`v4l2_mpeg2_sequence`
- - ``sequence``
- - Structure with MPEG-2 sequence metadata, merging relevant fields from
- the sequence header and sequence extension parts of the bitstream.
- * - struct :c:type:`v4l2_mpeg2_picture`
- - ``picture``
- - Structure with MPEG-2 picture metadata, merging relevant fields from
- the picture header and picture coding extension parts of the bitstream.
- * - __u8
- - ``quantiser_scale_code``
- - Code used to determine the quantization scale to use for the IDCT.
- * - __u8
- - ``backward_ref_index``
- - Index for the V4L2 buffer to use as backward reference, used with
- B-coded and P-coded frames.
- * - __u8
- - ``forward_ref_index``
- - Index for the V4L2 buffer to use as forward reference, used with
- B-coded frames.
-
-.. c:type:: v4l2_mpeg2_sequence
-
-.. cssclass:: longtable
-
-.. flat-table:: struct v4l2_mpeg2_sequence
- :header-rows: 0
- :stub-columns: 0
- :widths: 1 1 2
-
- * - __u16
- - ``horizontal_size``
- - The width of the displayable part of the frame's luminance component.
- * - __u16
- - ``vertical_size``
- - The height of the displayable part of the frame's luminance component.
- * - __u32
- - ``vbv_buffer_size``
- - Used to calculate the required size of the video buffering verifier,
- defined (in bits) as: 16 * 1024 * vbv_buffer_size.
- * - __u8
- - ``profile_and_level_indication``
- - The current profile and level indication as extracted from the
- bitstream.
- * - __u8
- - ``progressive_sequence``
- - Indication that all the frames for the sequence are progressive instead
- of interlaced.
- * - __u8
- - ``chroma_format``
- - The chrominance sub-sampling format (1: 4:2:0, 2: 4:2:2, 3: 4:4:4).
-
-.. c:type:: v4l2_mpeg2_picture
-
-.. cssclass:: longtable
-
-.. flat-table:: struct v4l2_mpeg2_picture
- :header-rows: 0
- :stub-columns: 0
- :widths: 1 1 2
-
- * - __u8
- - ``picture_coding_type``
- - Picture coding type for the frame covered by the current slice
- (V4L2_MPEG2_PICTURE_CODING_TYPE_I, V4L2_MPEG2_PICTURE_CODING_TYPE_P or
- V4L2_MPEG2_PICTURE_CODING_TYPE_B).
- * - __u8
- - ``f_code[2][2]``
- - Motion vector codes.
- * - __u8
- - ``intra_dc_precision``
- - Precision of Discrete Cosine transform (0: 8 bits precision,
- 1: 9 bits precision, 2: 10 bits precision, 3: 11 bits precision).
- * - __u8
- - ``picture_structure``
- - Picture structure (1: interlaced top field, 2: interlaced bottom field,
- 3: progressive frame).
- * - __u8
- - ``top_field_first``
- - If set to 1 and interlaced stream, top field is output first.
- * - __u8
- - ``frame_pred_frame_dct``
- - If set to 1, only frame-DCT and frame prediction are used.
- * - __u8
- - ``concealment_motion_vectors``
- - If set to 1, motion vectors are coded for intra macroblocks.
- * - __u8
- - ``q_scale_type``
- - This flag affects the inverse quantization process.
- * - __u8
- - ``intra_vlc_format``
- - This flag affects the decoding of transform coefficient data.
- * - __u8
- - ``alternate_scan``
- - This flag affects the decoding of transform coefficient data.
- * - __u8
- - ``repeat_first_field``
- - This flag affects the decoding process of progressive frames.
- * - __u8
- - ``progressive_frame``
- - Indicates whether the current frame is progressive.
-
-``V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION (struct)``
- Specifies quantization matrices (as extracted from the bitstream) for the
- associated MPEG-2 slice data.
-
- .. note::
-
- This compound control is not yet part of the public kernel API and
- it is expected to change.
-
-.. c:type:: v4l2_ctrl_mpeg2_quantization
-
-.. cssclass:: longtable
-
-.. flat-table:: struct v4l2_ctrl_mpeg2_quantization
- :header-rows: 0
- :stub-columns: 0
- :widths: 1 1 2
-
- * - __u8
- - ``load_intra_quantiser_matrix``
- - One bit to indicate whether to load the ``intra_quantiser_matrix`` data.
- * - __u8
- - ``load_non_intra_quantiser_matrix``
- - One bit to indicate whether to load the ``non_intra_quantiser_matrix``
- data.
- * - __u8
- - ``load_chroma_intra_quantiser_matrix``
- - One bit to indicate whether to load the
- ``chroma_intra_quantiser_matrix`` data, only relevant for non-4:2:0 YUV
- formats.
- * - __u8
- - ``load_chroma_non_intra_quantiser_matrix``
- - One bit to indicate whether to load the
- ``chroma_non_intra_quantiser_matrix`` data, only relevant for non-4:2:0
- YUV formats.
- * - __u8
- - ``intra_quantiser_matrix[64]``
- - The quantization matrix coefficients for intra-coded frames, in zigzag
- scanning order. It is relevant for both luma and chroma components,
- although it can be superseded by the chroma-specific matrix for
- non-4:2:0 YUV formats.
- * - __u8
- - ``non_intra_quantiser_matrix[64]``
- - The quantization matrix coefficients for non-intra-coded frames, in
- zigzag scanning order. It is relevant for both luma and chroma
- components, although it can be superseded by the chroma-specific matrix
- for non-4:2:0 YUV formats.
- * - __u8
- - ``chroma_intra_quantiser_matrix[64]``
- - The quantization matrix coefficients for the chominance component of
- intra-coded frames, in zigzag scanning order. Only relevant for
- non-4:2:0 YUV formats.
- * - __u8
- - ``chroma_non_intra_quantiser_matrix[64]``
- - The quantization matrix coefficients for the chrominance component of
- non-intra-coded frames, in zigzag scanning order. Only relevant for
- non-4:2:0 YUV formats.
-
-MFC 5.1 MPEG Controls
----------------------
-
-The following MPEG class controls deal with MPEG decoding and encoding
-settings that are specific to the Multi Format Codec 5.1 device present
-in the S5P family of SoCs by Samsung.
-
-
-.. _mfc51-control-id:
-
-MFC 5.1 Control IDs
-^^^^^^^^^^^^^^^^^^^
-
-``V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE (boolean)``
- If the display delay is enabled then the decoder is forced to return
- a CAPTURE buffer (decoded frame) after processing a certain number
- of OUTPUT buffers. The delay can be set through
- ``V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY``. This
- feature can be used for example for generating thumbnails of videos.
- Applicable to the H264 decoder.
-
-``V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY (integer)``
- Display delay value for H264 decoder. The decoder is forced to
- return a decoded frame after the set 'display delay' number of
- frames. If this number is low it may result in frames returned out
- of dispaly order, in addition the hardware may still be using the
- returned buffer as a reference picture for subsequent frames.
-
-``V4L2_CID_MPEG_MFC51_VIDEO_H264_NUM_REF_PIC_FOR_P (integer)``
- The number of reference pictures used for encoding a P picture.
- Applicable to the H264 encoder.
-
-``V4L2_CID_MPEG_MFC51_VIDEO_PADDING (boolean)``
- Padding enable in the encoder - use a color instead of repeating
- border pixels. Applicable to encoders.
-
-``V4L2_CID_MPEG_MFC51_VIDEO_PADDING_YUV (integer)``
- Padding color in the encoder. Applicable to encoders. The supplied
- 32-bit integer is interpreted as follows (bit 0 = least significant
- bit):
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - Bit 0:7
- - V chrominance information
- * - Bit 8:15
- - U chrominance information
- * - Bit 16:23
- - Y luminance information
- * - Bit 24:31
- - Must be zero.
-
-
-
-``V4L2_CID_MPEG_MFC51_VIDEO_RC_REACTION_COEFF (integer)``
- Reaction coefficient for MFC rate control. Applicable to encoders.
-
- .. note::
-
- #. Valid only when the frame level RC is enabled.
-
- #. For tight CBR, this field must be small (ex. 2 ~ 10). For
- VBR, this field must be large (ex. 100 ~ 1000).
-
- #. It is not recommended to use the greater number than
- FRAME_RATE * (10^9 / BIT_RATE).
-
-``V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_DARK (boolean)``
- Adaptive rate control for dark region. Valid only when H.264 and
- macroblock level RC is enabled
- (``V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE``). Applicable to the H264
- encoder.
-
-``V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_SMOOTH (boolean)``
- Adaptive rate control for smooth region. Valid only when H.264 and
- macroblock level RC is enabled
- (``V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE``). Applicable to the H264
- encoder.
-
-``V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_STATIC (boolean)``
- Adaptive rate control for static region. Valid only when H.264 and
- macroblock level RC is enabled
- (``V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE``). Applicable to the H264
- encoder.
-
-``V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_ACTIVITY (boolean)``
- Adaptive rate control for activity region. Valid only when H.264 and
- macroblock level RC is enabled
- (``V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE``). Applicable to the H264
- encoder.
-
-.. _v4l2-mpeg-mfc51-video-frame-skip-mode:
-
-``V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE``
- (enum)
-
-enum v4l2_mpeg_mfc51_video_frame_skip_mode -
- Indicates in what conditions the encoder should skip frames. If
- encoding a frame would cause the encoded stream to be larger then a
- chosen data limit then the frame will be skipped. Possible values
- are:
-
-
-.. tabularcolumns:: |p{9.0cm}|p{8.5cm}|
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_MFC51_FRAME_SKIP_MODE_DISABLED``
- - Frame skip mode is disabled.
- * - ``V4L2_MPEG_MFC51_FRAME_SKIP_MODE_LEVEL_LIMIT``
- - Frame skip mode enabled and buffer limit is set by the chosen
- level and is defined by the standard.
- * - ``V4L2_MPEG_MFC51_FRAME_SKIP_MODE_BUF_LIMIT``
- - Frame skip mode enabled and buffer limit is set by the VBV
- (MPEG1/2/4) or CPB (H264) buffer size control.
-
-
-
-``V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT (integer)``
- Enable rate-control with fixed target bit. If this setting is
- enabled, then the rate control logic of the encoder will calculate
- the average bitrate for a GOP and keep it below or equal the set
- bitrate target. Otherwise the rate control logic calculates the
- overall average bitrate for the stream and keeps it below or equal
- to the set bitrate. In the first case the average bitrate for the
- whole stream will be smaller then the set bitrate. This is caused
- because the average is calculated for smaller number of frames, on
- the other hand enabling this setting will ensure that the stream
- will meet tight bandwidth constraints. Applicable to encoders.
-
-.. _v4l2-mpeg-mfc51-video-force-frame-type:
-
-``V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE``
- (enum)
-
-enum v4l2_mpeg_mfc51_video_force_frame_type -
- Force a frame type for the next queued buffer. Applicable to
- encoders. Possible values are:
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_MFC51_FORCE_FRAME_TYPE_DISABLED``
- - Forcing a specific frame type disabled.
- * - ``V4L2_MPEG_MFC51_FORCE_FRAME_TYPE_I_FRAME``
- - Force an I-frame.
- * - ``V4L2_MPEG_MFC51_FORCE_FRAME_TYPE_NOT_CODED``
- - Force a non-coded frame.
-
-
-
-
-CX2341x MPEG Controls
----------------------
-
-The following MPEG class controls deal with MPEG encoding settings that
-are specific to the Conexant CX23415 and CX23416 MPEG encoding chips.
-
-
-.. _cx2341x-control-id:
-
-CX2341x Control IDs
-^^^^^^^^^^^^^^^^^^^
-
-.. _v4l2-mpeg-cx2341x-video-spatial-filter-mode:
-
-``V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE``
- (enum)
-
-enum v4l2_mpeg_cx2341x_video_spatial_filter_mode -
- Sets the Spatial Filter mode (default ``MANUAL``). Possible values
- are:
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL``
- - Choose the filter manually
- * - ``V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO``
- - Choose the filter automatically
-
-
-
-``V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER (integer (0-15))``
- The setting for the Spatial Filter. 0 = off, 15 = maximum. (Default
- is 0.)
-
-.. _luma-spatial-filter-type:
-
-``V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE``
- (enum)
-
-enum v4l2_mpeg_cx2341x_video_luma_spatial_filter_type -
- Select the algorithm to use for the Luma Spatial Filter (default
- ``1D_HOR``). Possible values:
-
-
-
-.. tabularcolumns:: |p{14.5cm}|p{3.0cm}|
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_OFF``
- - No filter
- * - ``V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_HOR``
- - One-dimensional horizontal
- * - ``V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_VERT``
- - One-dimensional vertical
- * - ``V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_HV_SEPARABLE``
- - Two-dimensional separable
- * - ``V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_SYM_NON_SEPARABLE``
- - Two-dimensional symmetrical non-separable
-
-
-
-.. _chroma-spatial-filter-type:
-
-``V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE``
- (enum)
-
-enum v4l2_mpeg_cx2341x_video_chroma_spatial_filter_type -
- Select the algorithm for the Chroma Spatial Filter (default
- ``1D_HOR``). Possible values are:
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_OFF``
- - No filter
- * - ``V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR``
- - One-dimensional horizontal
-
-
-
-.. _v4l2-mpeg-cx2341x-video-temporal-filter-mode:
-
-``V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE``
- (enum)
-
-enum v4l2_mpeg_cx2341x_video_temporal_filter_mode -
- Sets the Temporal Filter mode (default ``MANUAL``). Possible values
- are:
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL``
- - Choose the filter manually
- * - ``V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO``
- - Choose the filter automatically
-
-
-
-``V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER (integer (0-31))``
- The setting for the Temporal Filter. 0 = off, 31 = maximum. (Default
- is 8 for full-scale capturing and 0 for scaled capturing.)
-
-.. _v4l2-mpeg-cx2341x-video-median-filter-type:
-
-``V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE``
- (enum)
-
-enum v4l2_mpeg_cx2341x_video_median_filter_type -
- Median Filter Type (default ``OFF``). Possible values are:
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF``
- - No filter
- * - ``V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR``
- - Horizontal filter
- * - ``V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_VERT``
- - Vertical filter
- * - ``V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR_VERT``
- - Horizontal and vertical filter
- * - ``V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_DIAG``
- - Diagonal filter
-
-
-
-``V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM (integer (0-255))``
- Threshold above which the luminance median filter is enabled
- (default 0)
-
-``V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP (integer (0-255))``
- Threshold below which the luminance median filter is enabled
- (default 255)
-
-``V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM (integer (0-255))``
- Threshold above which the chroma median filter is enabled (default
- 0)
-
-``V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP (integer (0-255))``
- Threshold below which the chroma median filter is enabled (default
- 255)
-
-``V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS (boolean)``
- The CX2341X MPEG encoder can insert one empty MPEG-2 PES packet into
- the stream between every four video frames. The packet size is 2048
- bytes, including the packet_start_code_prefix and stream_id
- fields. The stream_id is 0xBF (private stream 2). The payload
- consists of 0x00 bytes, to be filled in by the application. 0 = do
- not insert, 1 = insert packets.
-
-
-VPX Control Reference
----------------------
-
-The VPX controls include controls for encoding parameters of VPx video
-codec.
-
-
-.. _vpx-control-id:
-
-VPX Control IDs
-^^^^^^^^^^^^^^^
-
-.. _v4l2-vpx-num-partitions:
-
-``V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS``
- (enum)
-
-enum v4l2_vp8_num_partitions -
- The number of token partitions to use in VP8 encoder. Possible
- values are:
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_CID_MPEG_VIDEO_VPX_1_PARTITION``
- - 1 coefficient partition
- * - ``V4L2_CID_MPEG_VIDEO_VPX_2_PARTITIONS``
- - 2 coefficient partitions
- * - ``V4L2_CID_MPEG_VIDEO_VPX_4_PARTITIONS``
- - 4 coefficient partitions
- * - ``V4L2_CID_MPEG_VIDEO_VPX_8_PARTITIONS``
- - 8 coefficient partitions
-
-
-
-``V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4 (boolean)``
- Setting this prevents intra 4x4 mode in the intra mode decision.
-
-.. _v4l2-vpx-num-ref-frames:
-
-``V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES``
- (enum)
-
-enum v4l2_vp8_num_ref_frames -
- The number of reference pictures for encoding P frames. Possible
- values are:
-
-.. tabularcolumns:: |p{7.9cm}|p{9.6cm}|
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_CID_MPEG_VIDEO_VPX_1_REF_FRAME``
- - Last encoded frame will be searched
- * - ``V4L2_CID_MPEG_VIDEO_VPX_2_REF_FRAME``
- - Two frames will be searched among the last encoded frame, the
- golden frame and the alternate reference (altref) frame. The
- encoder implementation will decide which two are chosen.
- * - ``V4L2_CID_MPEG_VIDEO_VPX_3_REF_FRAME``
- - The last encoded frame, the golden frame and the altref frame will
- be searched.
-
-
-
-``V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL (integer)``
- Indicates the loop filter level. The adjustment of the loop filter
- level is done via a delta value against a baseline loop filter
- value.
-
-``V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS (integer)``
- This parameter affects the loop filter. Anything above zero weakens
- the deblocking effect on the loop filter.
-
-``V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD (integer)``
- Sets the refresh period for the golden frame. The period is defined
- in number of frames. For a value of 'n', every nth frame starting
- from the first key frame will be taken as a golden frame. For eg.
- for encoding sequence of 0, 1, 2, 3, 4, 5, 6, 7 where the golden
- frame refresh period is set as 4, the frames 0, 4, 8 etc will be
- taken as the golden frames as frame 0 is always a key frame.
-
-.. _v4l2-vpx-golden-frame-sel:
-
-``V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL``
- (enum)
-
-enum v4l2_vp8_golden_frame_sel -
- Selects the golden frame for encoding. Possible values are:
-
-.. raw:: latex
-
- \footnotesize
-
-.. tabularcolumns:: |p{9.0cm}|p{8.0cm}|
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_PREV``
- - Use the (n-2)th frame as a golden frame, current frame index being
- 'n'.
- * - ``V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_REF_PERIOD``
- - Use the previous specific frame indicated by
- ``V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD`` as a
- golden frame.
-
-.. raw:: latex
-
- \normalsize
-
-
-``V4L2_CID_MPEG_VIDEO_VPX_MIN_QP (integer)``
- Minimum quantization parameter for VP8.
-
-``V4L2_CID_MPEG_VIDEO_VPX_MAX_QP (integer)``
- Maximum quantization parameter for VP8.
-
-``V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP (integer)``
- Quantization parameter for an I frame for VP8.
-
-``V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP (integer)``
- Quantization parameter for a P frame for VP8.
-
-.. _v4l2-mpeg-video-vp8-profile:
-
-``V4L2_CID_MPEG_VIDEO_VP8_PROFILE``
- (enum)
-
-enum v4l2_mpeg_video_vp8_profile -
- This control allows selecting the profile for VP8 encoder.
- This is also used to enumerate supported profiles by VP8 encoder or decoder.
- Possible values are:
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_VIDEO_VP8_PROFILE_0``
- - Profile 0
- * - ``V4L2_MPEG_VIDEO_VP8_PROFILE_1``
- - Profile 1
- * - ``V4L2_MPEG_VIDEO_VP8_PROFILE_2``
- - Profile 2
- * - ``V4L2_MPEG_VIDEO_VP8_PROFILE_3``
- - Profile 3
-
-.. _v4l2-mpeg-video-vp9-profile:
-
-``V4L2_CID_MPEG_VIDEO_VP9_PROFILE``
- (enum)
-
-enum v4l2_mpeg_video_vp9_profile -
- This control allows selecting the profile for VP9 encoder.
- This is also used to enumerate supported profiles by VP9 encoder or decoder.
- Possible values are:
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_VIDEO_VP9_PROFILE_0``
- - Profile 0
- * - ``V4L2_MPEG_VIDEO_VP9_PROFILE_1``
- - Profile 1
- * - ``V4L2_MPEG_VIDEO_VP9_PROFILE_2``
- - Profile 2
- * - ``V4L2_MPEG_VIDEO_VP9_PROFILE_3``
- - Profile 3
-
-
-High Efficiency Video Coding (HEVC/H.265) Control Reference
------------------------------------------------------------
-
-The HEVC/H.265 controls include controls for encoding parameters of HEVC/H.265
-video codec.
-
-
-.. _hevc-control-id:
-
-HEVC/H.265 Control IDs
-^^^^^^^^^^^^^^^^^^^^^^
-
-``V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP (integer)``
- Minimum quantization parameter for HEVC.
- Valid range: from 0 to 51.
-
-``V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP (integer)``
- Maximum quantization parameter for HEVC.
- Valid range: from 0 to 51.
-
-``V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP (integer)``
- Quantization parameter for an I frame for HEVC.
- Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP,
- V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP].
-
-``V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP (integer)``
- Quantization parameter for a P frame for HEVC.
- Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP,
- V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP].
-
-``V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP (integer)``
- Quantization parameter for a B frame for HEVC.
- Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP,
- V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP].
-
-``V4L2_CID_MPEG_VIDEO_HEVC_HIER_QP (boolean)``
- HIERARCHICAL_QP allows the host to specify the quantization parameter
- values for each temporal layer through HIERARCHICAL_QP_LAYER. This is
- valid only if HIERARCHICAL_CODING_LAYER is greater than 1. Setting the
- control value to 1 enables setting of the QP values for the layers.
-
-.. _v4l2-hevc-hier-coding-type:
-
-``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE``
- (enum)
-
-enum v4l2_mpeg_video_hevc_hier_coding_type -
- Selects the hierarchical coding type for encoding. Possible values are:
-
-.. raw:: latex
-
- \footnotesize
-
-.. tabularcolumns:: |p{9.0cm}|p{8.0cm}|
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_B``
- - Use the B frame for hierarchical coding.
- * - ``V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_P``
- - Use the P frame for hierarchical coding.
-
-.. raw:: latex
-
- \normalsize
-
-
-``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER (integer)``
- Selects the hierarchical coding layer. In normal encoding
- (non-hierarchial coding), it should be zero. Possible values are [0, 6].
- 0 indicates HIERARCHICAL CODING LAYER 0, 1 indicates HIERARCHICAL CODING
- LAYER 1 and so on.
-
-``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_QP (integer)``
- Indicates quantization parameter for hierarchical coding layer 0.
- Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP,
- V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP].
-
-``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_QP (integer)``
- Indicates quantization parameter for hierarchical coding layer 1.
- Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP,
- V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP].
-
-``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_QP (integer)``
- Indicates quantization parameter for hierarchical coding layer 2.
- Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP,
- V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP].
-
-``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_QP (integer)``
- Indicates quantization parameter for hierarchical coding layer 3.
- Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP,
- V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP].
-
-``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_QP (integer)``
- Indicates quantization parameter for hierarchical coding layer 4.
- Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP,
- V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP].
-
-``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_QP (integer)``
- Indicates quantization parameter for hierarchical coding layer 5.
- Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP,
- V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP].
-
-``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_QP (integer)``
- Indicates quantization parameter for hierarchical coding layer 6.
- Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP,
- V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP].
-
-.. _v4l2-hevc-profile:
-
-``V4L2_CID_MPEG_VIDEO_HEVC_PROFILE``
- (enum)
-
-enum v4l2_mpeg_video_hevc_profile -
- Select the desired profile for HEVC encoder.
-
-.. raw:: latex
-
- \footnotesize
-
-.. tabularcolumns:: |p{9.0cm}|p{8.0cm}|
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN``
- - Main profile.
- * - ``V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE``
- - Main still picture profile.
- * - ``V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10``
- - Main 10 profile.
-
-.. raw:: latex
-
- \normalsize
-
-
-.. _v4l2-hevc-level:
-
-``V4L2_CID_MPEG_VIDEO_HEVC_LEVEL``
- (enum)
-
-enum v4l2_mpeg_video_hevc_level -
- Selects the desired level for HEVC encoder.
-
-.. raw:: latex
-
- \footnotesize
-
-.. tabularcolumns:: |p{9.0cm}|p{8.0cm}|
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_1``
- - Level 1.0
- * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_2``
- - Level 2.0
- * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1``
- - Level 2.1
- * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_3``
- - Level 3.0
- * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1``
- - Level 3.1
- * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_4``
- - Level 4.0
- * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1``
- - Level 4.1
- * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_5``
- - Level 5.0
- * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1``
- - Level 5.1
- * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2``
- - Level 5.2
- * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_6``
- - Level 6.0
- * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1``
- - Level 6.1
- * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2``
- - Level 6.2
-
-.. raw:: latex
-
- \normalsize
-
-
-``V4L2_CID_MPEG_VIDEO_HEVC_FRAME_RATE_RESOLUTION (integer)``
- Indicates the number of evenly spaced subintervals, called ticks, within
- one second. This is a 16 bit unsigned integer and has a maximum value up to
- 0xffff and a minimum value of 1.
-
-.. _v4l2-hevc-tier:
-
-``V4L2_CID_MPEG_VIDEO_HEVC_TIER``
- (enum)
-
-enum v4l2_mpeg_video_hevc_tier -
- TIER_FLAG specifies tiers information of the HEVC encoded picture. Tier
- were made to deal with applications that differ in terms of maximum bit
- rate. Setting the flag to 0 selects HEVC tier as Main tier and setting
- this flag to 1 indicates High tier. High tier is for applications requiring
- high bit rates.
-
-.. raw:: latex
-
- \footnotesize
-
-.. tabularcolumns:: |p{9.0cm}|p{8.0cm}|
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_VIDEO_HEVC_TIER_MAIN``
- - Main tier.
- * - ``V4L2_MPEG_VIDEO_HEVC_TIER_HIGH``
- - High tier.
-
-.. raw:: latex
-
- \normalsize
-
-
-``V4L2_CID_MPEG_VIDEO_HEVC_MAX_PARTITION_DEPTH (integer)``
- Selects HEVC maximum coding unit depth.
-
-.. _v4l2-hevc-loop-filter-mode:
-
-``V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE``
- (enum)
-
-enum v4l2_mpeg_video_hevc_loop_filter_mode -
- Loop filter mode for HEVC encoder. Possible values are:
-
-.. raw:: latex
-
- \footnotesize
-
-.. tabularcolumns:: |p{10.7cm}|p{6.3cm}|
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_DISABLED``
- - Loop filter is disabled.
- * - ``V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_ENABLED``
- - Loop filter is enabled.
- * - ``V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY``
- - Loop filter is disabled at the slice boundary.
-
-.. raw:: latex
-
- \normalsize
-
-
-``V4L2_CID_MPEG_VIDEO_HEVC_LF_BETA_OFFSET_DIV2 (integer)``
- Selects HEVC loop filter beta offset. The valid range is [-6, +6].
-
-``V4L2_CID_MPEG_VIDEO_HEVC_LF_TC_OFFSET_DIV2 (integer)``
- Selects HEVC loop filter tc offset. The valid range is [-6, +6].
-
-.. _v4l2-hevc-refresh-type:
-
-``V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE``
- (enum)
-
-enum v4l2_mpeg_video_hevc_hier_refresh_type -
- Selects refresh type for HEVC encoder.
- Host has to specify the period into
- V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_PERIOD.
-
-.. raw:: latex
-
- \footnotesize
-
-.. tabularcolumns:: |p{8.0cm}|p{9.0cm}|
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_VIDEO_HEVC_REFRESH_NONE``
- - Use the B frame for hierarchical coding.
- * - ``V4L2_MPEG_VIDEO_HEVC_REFRESH_CRA``
- - Use CRA (Clean Random Access Unit) picture encoding.
- * - ``V4L2_MPEG_VIDEO_HEVC_REFRESH_IDR``
- - Use IDR (Instantaneous Decoding Refresh) picture encoding.
-
-.. raw:: latex
-
- \normalsize
-
-
-``V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_PERIOD (integer)``
- Selects the refresh period for HEVC encoder.
- This specifies the number of I pictures between two CRA/IDR pictures.
- This is valid only if REFRESH_TYPE is not 0.
-
-``V4L2_CID_MPEG_VIDEO_HEVC_LOSSLESS_CU (boolean)``
- Indicates HEVC lossless encoding. Setting it to 0 disables lossless
- encoding. Setting it to 1 enables lossless encoding.
-
-``V4L2_CID_MPEG_VIDEO_HEVC_CONST_INTRA_PRED (boolean)``
- Indicates constant intra prediction for HEVC encoder. Specifies the
- constrained intra prediction in which intra largest coding unit (LCU)
- prediction is performed by using residual data and decoded samples of
- neighboring intra LCU only. Setting the value to 1 enables constant intra
- prediction and setting the value to 0 disables constant intra prediction.
-
-``V4L2_CID_MPEG_VIDEO_HEVC_WAVEFRONT (boolean)``
- Indicates wavefront parallel processing for HEVC encoder. Setting it to 0
- disables the feature and setting it to 1 enables the wavefront parallel
- processing.
-
-``V4L2_CID_MPEG_VIDEO_HEVC_GENERAL_PB (boolean)``
- Setting the value to 1 enables combination of P and B frame for HEVC
- encoder.
-
-``V4L2_CID_MPEG_VIDEO_HEVC_TEMPORAL_ID (boolean)``
- Indicates temporal identifier for HEVC encoder which is enabled by
- setting the value to 1.
-
-``V4L2_CID_MPEG_VIDEO_HEVC_STRONG_SMOOTHING (boolean)``
- Indicates bi-linear interpolation is conditionally used in the intra
- prediction filtering process in the CVS when set to 1. Indicates bi-linear
- interpolation is not used in the CVS when set to 0.
-
-``V4L2_CID_MPEG_VIDEO_HEVC_MAX_NUM_MERGE_MV_MINUS1 (integer)``
- Indicates maximum number of merge candidate motion vectors.
- Values are from 0 to 4.
-
-``V4L2_CID_MPEG_VIDEO_HEVC_TMV_PREDICTION (boolean)``
- Indicates temporal motion vector prediction for HEVC encoder. Setting it to
- 1 enables the prediction. Setting it to 0 disables the prediction.
-
-``V4L2_CID_MPEG_VIDEO_HEVC_WITHOUT_STARTCODE (boolean)``
- Specifies if HEVC generates a stream with a size of the length field
- instead of start code pattern. The size of the length field is configurable
- through the V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD control. Setting
- the value to 0 disables encoding without startcode pattern. Setting the
- value to 1 will enables encoding without startcode pattern.
-
-.. _v4l2-hevc-size-of-length-field:
-
-``V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD``
-(enum)
-
-enum v4l2_mpeg_video_hevc_size_of_length_field -
- Indicates the size of length field.
- This is valid when encoding WITHOUT_STARTCODE_ENABLE is enabled.
-
-.. raw:: latex
-
- \footnotesize
-
-.. tabularcolumns:: |p{6.0cm}|p{11.0cm}|
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_MPEG_VIDEO_HEVC_SIZE_0``
- - Generate start code pattern (Normal).
- * - ``V4L2_MPEG_VIDEO_HEVC_SIZE_1``
- - Generate size of length field instead of start code pattern and length is 1.
- * - ``V4L2_MPEG_VIDEO_HEVC_SIZE_2``
- - Generate size of length field instead of start code pattern and length is 2.
- * - ``V4L2_MPEG_VIDEO_HEVC_SIZE_4``
- - Generate size of length field instead of start code pattern and length is 4.
-
-.. raw:: latex
-
- \normalsize
-
-``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR (integer)``
- Indicates bit rate for hierarchical coding layer 0 for HEVC encoder.
-
-``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR (integer)``
- Indicates bit rate for hierarchical coding layer 1 for HEVC encoder.
-
-``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR (integer)``
- Indicates bit rate for hierarchical coding layer 2 for HEVC encoder.
-
-``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR (integer)``
- Indicates bit rate for hierarchical coding layer 3 for HEVC encoder.
-
-``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR (integer)``
- Indicates bit rate for hierarchical coding layer 4 for HEVC encoder.
-
-``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR (integer)``
- Indicates bit rate for hierarchical coding layer 5 for HEVC encoder.
-
-``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_BR (integer)``
- Indicates bit rate for hierarchical coding layer 6 for HEVC encoder.
-
-``V4L2_CID_MPEG_VIDEO_REF_NUMBER_FOR_PFRAMES (integer)``
- Selects number of P reference pictures required for HEVC encoder.
- P-Frame can use 1 or 2 frames for reference.
-
-``V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR (integer)``
- Indicates whether to generate SPS and PPS at every IDR. Setting it to 0
- disables generating SPS and PPS at every IDR. Setting it to one enables
- generating SPS and PPS at every IDR.
-
-
-.. _camera-controls:
-
-Camera Control Reference
-========================
-
-The Camera class includes controls for mechanical (or equivalent
-digital) features of a device such as controllable lenses or sensors.
-
-
-.. _camera-control-id:
-
-Camera Control IDs
-------------------
-
-``V4L2_CID_CAMERA_CLASS (class)``
- The Camera class descriptor. Calling
- :ref:`VIDIOC_QUERYCTRL` for this control will
- return a description of this control class.
-
-.. _v4l2-exposure-auto-type:
-
-``V4L2_CID_EXPOSURE_AUTO``
- (enum)
-
-enum v4l2_exposure_auto_type -
- Enables automatic adjustments of the exposure time and/or iris
- aperture. The effect of manual changes of the exposure time or iris
- aperture while these features are enabled is undefined, drivers
- should ignore such requests. Possible values are:
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_EXPOSURE_AUTO``
- - Automatic exposure time, automatic iris aperture.
- * - ``V4L2_EXPOSURE_MANUAL``
- - Manual exposure time, manual iris.
- * - ``V4L2_EXPOSURE_SHUTTER_PRIORITY``
- - Manual exposure time, auto iris.
- * - ``V4L2_EXPOSURE_APERTURE_PRIORITY``
- - Auto exposure time, manual iris.
-
-
-
-``V4L2_CID_EXPOSURE_ABSOLUTE (integer)``
- Determines the exposure time of the camera sensor. The exposure time
- is limited by the frame interval. Drivers should interpret the
- values as 100 µs units, where the value 1 stands for 1/10000th of a
- second, 10000 for 1 second and 100000 for 10 seconds.
-
-``V4L2_CID_EXPOSURE_AUTO_PRIORITY (boolean)``
- When ``V4L2_CID_EXPOSURE_AUTO`` is set to ``AUTO`` or
- ``APERTURE_PRIORITY``, this control determines if the device may
- dynamically vary the frame rate. By default this feature is disabled
- (0) and the frame rate must remain constant.
-
-``V4L2_CID_AUTO_EXPOSURE_BIAS (integer menu)``
- Determines the automatic exposure compensation, it is effective only
- when ``V4L2_CID_EXPOSURE_AUTO`` control is set to ``AUTO``,
- ``SHUTTER_PRIORITY`` or ``APERTURE_PRIORITY``. It is expressed in
- terms of EV, drivers should interpret the values as 0.001 EV units,
- where the value 1000 stands for +1 EV.
-
- Increasing the exposure compensation value is equivalent to
- decreasing the exposure value (EV) and will increase the amount of
- light at the image sensor. The camera performs the exposure
- compensation by adjusting absolute exposure time and/or aperture.
-
-.. _v4l2-exposure-metering:
-
-``V4L2_CID_EXPOSURE_METERING``
- (enum)
-
-enum v4l2_exposure_metering -
- Determines how the camera measures the amount of light available for
- the frame exposure. Possible values are:
-
-.. tabularcolumns:: |p{8.5cm}|p{9.0cm}|
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_EXPOSURE_METERING_AVERAGE``
- - Use the light information coming from the entire frame and average
- giving no weighting to any particular portion of the metered area.
- * - ``V4L2_EXPOSURE_METERING_CENTER_WEIGHTED``
- - Average the light information coming from the entire frame giving
- priority to the center of the metered area.
- * - ``V4L2_EXPOSURE_METERING_SPOT``
- - Measure only very small area at the center of the frame.
- * - ``V4L2_EXPOSURE_METERING_MATRIX``
- - A multi-zone metering. The light intensity is measured in several
- points of the frame and the results are combined. The algorithm of
- the zones selection and their significance in calculating the
- final value is device dependent.
-
-
-
-``V4L2_CID_PAN_RELATIVE (integer)``
- This control turns the camera horizontally by the specified amount.
- The unit is undefined. A positive value moves the camera to the
- right (clockwise when viewed from above), a negative value to the
- left. A value of zero does not cause motion. This is a write-only
- control.
-
-``V4L2_CID_TILT_RELATIVE (integer)``
- This control turns the camera vertically by the specified amount.
- The unit is undefined. A positive value moves the camera up, a
- negative value down. A value of zero does not cause motion. This is
- a write-only control.
-
-``V4L2_CID_PAN_RESET (button)``
- When this control is set, the camera moves horizontally to the
- default position.
-
-``V4L2_CID_TILT_RESET (button)``
- When this control is set, the camera moves vertically to the default
- position.
-
-``V4L2_CID_PAN_ABSOLUTE (integer)``
- This control turns the camera horizontally to the specified
- position. Positive values move the camera to the right (clockwise
- when viewed from above), negative values to the left. Drivers should
- interpret the values as arc seconds, with valid values between -180
- * 3600 and +180 * 3600 inclusive.
-
-``V4L2_CID_TILT_ABSOLUTE (integer)``
- This control turns the camera vertically to the specified position.
- Positive values move the camera up, negative values down. Drivers
- should interpret the values as arc seconds, with valid values
- between -180 * 3600 and +180 * 3600 inclusive.
-
-``V4L2_CID_FOCUS_ABSOLUTE (integer)``
- This control sets the focal point of the camera to the specified
- position. The unit is undefined. Positive values set the focus
- closer to the camera, negative values towards infinity.
-
-``V4L2_CID_FOCUS_RELATIVE (integer)``
- This control moves the focal point of the camera by the specified
- amount. The unit is undefined. Positive values move the focus closer
- to the camera, negative values towards infinity. This is a
- write-only control.
-
-``V4L2_CID_FOCUS_AUTO (boolean)``
- Enables continuous automatic focus adjustments. The effect of manual
- focus adjustments while this feature is enabled is undefined,
- drivers should ignore such requests.
-
-``V4L2_CID_AUTO_FOCUS_START (button)``
- Starts single auto focus process. The effect of setting this control
- when ``V4L2_CID_FOCUS_AUTO`` is set to ``TRUE`` (1) is undefined,
- drivers should ignore such requests.
-
-``V4L2_CID_AUTO_FOCUS_STOP (button)``
- Aborts automatic focusing started with ``V4L2_CID_AUTO_FOCUS_START``
- control. It is effective only when the continuous autofocus is
- disabled, that is when ``V4L2_CID_FOCUS_AUTO`` control is set to
- ``FALSE`` (0).
-
-.. _v4l2-auto-focus-status:
-
-``V4L2_CID_AUTO_FOCUS_STATUS (bitmask)``
- The automatic focus status. This is a read-only control.
-
- Setting ``V4L2_LOCK_FOCUS`` lock bit of the ``V4L2_CID_3A_LOCK``
- control may stop updates of the ``V4L2_CID_AUTO_FOCUS_STATUS``
- control value.
-
-.. tabularcolumns:: |p{6.5cm}|p{11.0cm}|
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_AUTO_FOCUS_STATUS_IDLE``
- - Automatic focus is not active.
- * - ``V4L2_AUTO_FOCUS_STATUS_BUSY``
- - Automatic focusing is in progress.
- * - ``V4L2_AUTO_FOCUS_STATUS_REACHED``
- - Focus has been reached.
- * - ``V4L2_AUTO_FOCUS_STATUS_FAILED``
- - Automatic focus has failed, the driver will not transition from
- this state until another action is performed by an application.
-
-
-
-.. _v4l2-auto-focus-range:
-
-``V4L2_CID_AUTO_FOCUS_RANGE``
- (enum)
-
-enum v4l2_auto_focus_range -
- Determines auto focus distance range for which lens may be adjusted.
-
-.. tabularcolumns:: |p{6.5cm}|p{11.0cm}|
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_AUTO_FOCUS_RANGE_AUTO``
- - The camera automatically selects the focus range.
- * - ``V4L2_AUTO_FOCUS_RANGE_NORMAL``
- - Normal distance range, limited for best automatic focus
- performance.
- * - ``V4L2_AUTO_FOCUS_RANGE_MACRO``
- - Macro (close-up) auto focus. The camera will use its minimum
- possible distance for auto focus.
- * - ``V4L2_AUTO_FOCUS_RANGE_INFINITY``
- - The lens is set to focus on an object at infinite distance.
-
-
-
-``V4L2_CID_ZOOM_ABSOLUTE (integer)``
- Specify the objective lens focal length as an absolute value. The
- zoom unit is driver-specific and its value should be a positive
- integer.
-
-``V4L2_CID_ZOOM_RELATIVE (integer)``
- Specify the objective lens focal length relatively to the current
- value. Positive values move the zoom lens group towards the
- telephoto direction, negative values towards the wide-angle
- direction. The zoom unit is driver-specific. This is a write-only
- control.
-
-``V4L2_CID_ZOOM_CONTINUOUS (integer)``
- Move the objective lens group at the specified speed until it
- reaches physical device limits or until an explicit request to stop
- the movement. A positive value moves the zoom lens group towards the
- telephoto direction. A value of zero stops the zoom lens group
- movement. A negative value moves the zoom lens group towards the
- wide-angle direction. The zoom speed unit is driver-specific.
-
-``V4L2_CID_IRIS_ABSOLUTE (integer)``
- This control sets the camera's aperture to the specified value. The
- unit is undefined. Larger values open the iris wider, smaller values
- close it.
-
-``V4L2_CID_IRIS_RELATIVE (integer)``
- This control modifies the camera's aperture by the specified amount.
- The unit is undefined. Positive values open the iris one step
- further, negative values close it one step further. This is a
- write-only control.
-
-``V4L2_CID_PRIVACY (boolean)``
- Prevent video from being acquired by the camera. When this control
- is set to ``TRUE`` (1), no image can be captured by the camera.
- Common means to enforce privacy are mechanical obturation of the
- sensor and firmware image processing, but the device is not
- restricted to these methods. Devices that implement the privacy
- control must support read access and may support write access.
-
-``V4L2_CID_BAND_STOP_FILTER (integer)``
- Switch the band-stop filter of a camera sensor on or off, or specify
- its strength. Such band-stop filters can be used, for example, to
- filter out the fluorescent light component.
-
-.. _v4l2-auto-n-preset-white-balance:
-
-``V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE``
- (enum)
-
-enum v4l2_auto_n_preset_white_balance -
- Sets white balance to automatic, manual or a preset. The presets
- determine color temperature of the light as a hint to the camera for
- white balance adjustments resulting in most accurate color
- representation. The following white balance presets are listed in
- order of increasing color temperature.
-
-.. tabularcolumns:: |p{7.0 cm}|p{10.5cm}|
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_WHITE_BALANCE_MANUAL``
- - Manual white balance.
- * - ``V4L2_WHITE_BALANCE_AUTO``
- - Automatic white balance adjustments.
- * - ``V4L2_WHITE_BALANCE_INCANDESCENT``
- - White balance setting for incandescent (tungsten) lighting. It
- generally cools down the colors and corresponds approximately to
- 2500...3500 K color temperature range.
- * - ``V4L2_WHITE_BALANCE_FLUORESCENT``
- - White balance preset for fluorescent lighting. It corresponds
- approximately to 4000...5000 K color temperature.
- * - ``V4L2_WHITE_BALANCE_FLUORESCENT_H``
- - With this setting the camera will compensate for fluorescent H
- lighting.
- * - ``V4L2_WHITE_BALANCE_HORIZON``
- - White balance setting for horizon daylight. It corresponds
- approximately to 5000 K color temperature.
- * - ``V4L2_WHITE_BALANCE_DAYLIGHT``
- - White balance preset for daylight (with clear sky). It corresponds
- approximately to 5000...6500 K color temperature.
- * - ``V4L2_WHITE_BALANCE_FLASH``
- - With this setting the camera will compensate for the flash light.
- It slightly warms up the colors and corresponds roughly to
- 5000...5500 K color temperature.
- * - ``V4L2_WHITE_BALANCE_CLOUDY``
- - White balance preset for moderately overcast sky. This option
- corresponds approximately to 6500...8000 K color temperature
- range.
- * - ``V4L2_WHITE_BALANCE_SHADE``
- - White balance preset for shade or heavily overcast sky. It
- corresponds approximately to 9000...10000 K color temperature.
-
-
-
-.. _v4l2-wide-dynamic-range:
-
-``V4L2_CID_WIDE_DYNAMIC_RANGE (boolean)``
- Enables or disables the camera's wide dynamic range feature. This
- feature allows to obtain clear images in situations where intensity
- of the illumination varies significantly throughout the scene, i.e.
- there are simultaneously very dark and very bright areas. It is most
- commonly realized in cameras by combining two subsequent frames with
- different exposure times. [#f1]_
-
-.. _v4l2-image-stabilization:
-
-``V4L2_CID_IMAGE_STABILIZATION (boolean)``
- Enables or disables image stabilization.
-
-``V4L2_CID_ISO_SENSITIVITY (integer menu)``
- Determines ISO equivalent of an image sensor indicating the sensor's
- sensitivity to light. The numbers are expressed in arithmetic scale,
- as per :ref:`iso12232` standard, where doubling the sensor
- sensitivity is represented by doubling the numerical ISO value.
- Applications should interpret the values as standard ISO values
- multiplied by 1000, e.g. control value 800 stands for ISO 0.8.
- Drivers will usually support only a subset of standard ISO values.
- The effect of setting this control while the
- ``V4L2_CID_ISO_SENSITIVITY_AUTO`` control is set to a value other
- than ``V4L2_CID_ISO_SENSITIVITY_MANUAL`` is undefined, drivers
- should ignore such requests.
-
-.. _v4l2-iso-sensitivity-auto-type:
-
-``V4L2_CID_ISO_SENSITIVITY_AUTO``
- (enum)
-
-enum v4l2_iso_sensitivity_type -
- Enables or disables automatic ISO sensitivity adjustments.
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_CID_ISO_SENSITIVITY_MANUAL``
- - Manual ISO sensitivity.
- * - ``V4L2_CID_ISO_SENSITIVITY_AUTO``
- - Automatic ISO sensitivity adjustments.
-
-
-
-.. _v4l2-scene-mode:
-
-``V4L2_CID_SCENE_MODE``
- (enum)
-
-enum v4l2_scene_mode -
- This control allows to select scene programs as the camera automatic
- modes optimized for common shooting scenes. Within these modes the
- camera determines best exposure, aperture, focusing, light metering,
- white balance and equivalent sensitivity. The controls of those
- parameters are influenced by the scene mode control. An exact
- behavior in each mode is subject to the camera specification.
-
- When the scene mode feature is not used, this control should be set
- to ``V4L2_SCENE_MODE_NONE`` to make sure the other possibly related
- controls are accessible. The following scene programs are defined:
-
-.. tabularcolumns:: |p{6.0cm}|p{11.5cm}|
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_SCENE_MODE_NONE``
- - The scene mode feature is disabled.
- * - ``V4L2_SCENE_MODE_BACKLIGHT``
- - Backlight. Compensates for dark shadows when light is coming from
- behind a subject, also by automatically turning on the flash.
- * - ``V4L2_SCENE_MODE_BEACH_SNOW``
- - Beach and snow. This mode compensates for all-white or bright
- scenes, which tend to look gray and low contrast, when camera's
- automatic exposure is based on an average scene brightness. To
- compensate, this mode automatically slightly overexposes the
- frames. The white balance may also be adjusted to compensate for
- the fact that reflected snow looks bluish rather than white.
- * - ``V4L2_SCENE_MODE_CANDLELIGHT``
- - Candle light. The camera generally raises the ISO sensitivity and
- lowers the shutter speed. This mode compensates for relatively
- close subject in the scene. The flash is disabled in order to
- preserve the ambiance of the light.
- * - ``V4L2_SCENE_MODE_DAWN_DUSK``
- - Dawn and dusk. Preserves the colors seen in low natural light
- before dusk and after down. The camera may turn off the flash, and
- automatically focus at infinity. It will usually boost saturation
- and lower the shutter speed.
- * - ``V4L2_SCENE_MODE_FALL_COLORS``
- - Fall colors. Increases saturation and adjusts white balance for
- color enhancement. Pictures of autumn leaves get saturated reds
- and yellows.
- * - ``V4L2_SCENE_MODE_FIREWORKS``
- - Fireworks. Long exposure times are used to capture the expanding
- burst of light from a firework. The camera may invoke image
- stabilization.
- * - ``V4L2_SCENE_MODE_LANDSCAPE``
- - Landscape. The camera may choose a small aperture to provide deep
- depth of field and long exposure duration to help capture detail
- in dim light conditions. The focus is fixed at infinity. Suitable
- for distant and wide scenery.
- * - ``V4L2_SCENE_MODE_NIGHT``
- - Night, also known as Night Landscape. Designed for low light
- conditions, it preserves detail in the dark areas without blowing
- out bright objects. The camera generally sets itself to a
- medium-to-high ISO sensitivity, with a relatively long exposure
- time, and turns flash off. As such, there will be increased image
- noise and the possibility of blurred image.
- * - ``V4L2_SCENE_MODE_PARTY_INDOOR``
- - Party and indoor. Designed to capture indoor scenes that are lit
- by indoor background lighting as well as the flash. The camera
- usually increases ISO sensitivity, and adjusts exposure for the
- low light conditions.
- * - ``V4L2_SCENE_MODE_PORTRAIT``
- - Portrait. The camera adjusts the aperture so that the depth of
- field is reduced, which helps to isolate the subject against a
- smooth background. Most cameras recognize the presence of faces in
- the scene and focus on them. The color hue is adjusted to enhance
- skin tones. The intensity of the flash is often reduced.
- * - ``V4L2_SCENE_MODE_SPORTS``
- - Sports. Significantly increases ISO and uses a fast shutter speed
- to freeze motion of rapidly-moving subjects. Increased image noise
- may be seen in this mode.
- * - ``V4L2_SCENE_MODE_SUNSET``
- - Sunset. Preserves deep hues seen in sunsets and sunrises. It bumps
- up the saturation.
- * - ``V4L2_SCENE_MODE_TEXT``
- - Text. It applies extra contrast and sharpness, it is typically a
- black-and-white mode optimized for readability. Automatic focus
- may be switched to close-up mode and this setting may also involve
- some lens-distortion correction.
-
-
-
-``V4L2_CID_3A_LOCK (bitmask)``
- This control locks or unlocks the automatic focus, exposure and
- white balance. The automatic adjustments can be paused independently
- by setting the corresponding lock bit to 1. The camera then retains
- the settings until the lock bit is cleared. The following lock bits
- are defined:
-
- When a given algorithm is not enabled, drivers should ignore
- requests to lock it and should return no error. An example might be
- an application setting bit ``V4L2_LOCK_WHITE_BALANCE`` when the
- ``V4L2_CID_AUTO_WHITE_BALANCE`` control is set to ``FALSE``. The
- value of this control may be changed by exposure, white balance or
- focus controls.
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_LOCK_EXPOSURE``
- - Automatic exposure adjustments lock.
- * - ``V4L2_LOCK_WHITE_BALANCE``
- - Automatic white balance adjustments lock.
- * - ``V4L2_LOCK_FOCUS``
- - Automatic focus lock.
-
-
-
-``V4L2_CID_PAN_SPEED (integer)``
- This control turns the camera horizontally at the specific speed.
- The unit is undefined. A positive value moves the camera to the
- right (clockwise when viewed from above), a negative value to the
- left. A value of zero stops the motion if one is in progress and has
- no effect otherwise.
-
-``V4L2_CID_TILT_SPEED (integer)``
- This control turns the camera vertically at the specified speed. The
- unit is undefined. A positive value moves the camera up, a negative
- value down. A value of zero stops the motion if one is in progress
- and has no effect otherwise.
-
-
-.. _fm-tx-controls:
-
-FM Transmitter Control Reference
-================================
-
-The FM Transmitter (FM_TX) class includes controls for common features
-of FM transmissions capable devices. Currently this class includes
-parameters for audio compression, pilot tone generation, audio deviation
-limiter, RDS transmission and tuning power features.
-
-
-.. _fm-tx-control-id:
-
-FM_TX Control IDs
------------------
-
-``V4L2_CID_FM_TX_CLASS (class)``
- The FM_TX class descriptor. Calling
- :ref:`VIDIOC_QUERYCTRL` for this control will
- return a description of this control class.
-
-``V4L2_CID_RDS_TX_DEVIATION (integer)``
- Configures RDS signal frequency deviation level in Hz. The range and
- step are driver-specific.
-
-``V4L2_CID_RDS_TX_PI (integer)``
- Sets the RDS Programme Identification field for transmission.
-
-``V4L2_CID_RDS_TX_PTY (integer)``
- Sets the RDS Programme Type field for transmission. This encodes up
- to 31 pre-defined programme types.
-
-``V4L2_CID_RDS_TX_PS_NAME (string)``
- Sets the Programme Service name (PS_NAME) for transmission. It is
- intended for static display on a receiver. It is the primary aid to
- listeners in programme service identification and selection. In
- Annex E of :ref:`iec62106`, the RDS specification, there is a full
- description of the correct character encoding for Programme Service
- name strings. Also from RDS specification, PS is usually a single
- eight character text. However, it is also possible to find receivers
- which can scroll strings sized as 8 x N characters. So, this control
- must be configured with steps of 8 characters. The result is it must
- always contain a string with size multiple of 8.
-
-``V4L2_CID_RDS_TX_RADIO_TEXT (string)``
- Sets the Radio Text info for transmission. It is a textual
- description of what is being broadcasted. RDS Radio Text can be
- applied when broadcaster wishes to transmit longer PS names,
- programme-related information or any other text. In these cases,
- RadioText should be used in addition to ``V4L2_CID_RDS_TX_PS_NAME``.
- The encoding for Radio Text strings is also fully described in Annex
- E of :ref:`iec62106`. The length of Radio Text strings depends on
- which RDS Block is being used to transmit it, either 32 (2A block)
- or 64 (2B block). However, it is also possible to find receivers
- which can scroll strings sized as 32 x N or 64 x N characters. So,
- this control must be configured with steps of 32 or 64 characters.
- The result is it must always contain a string with size multiple of
- 32 or 64.
-
-``V4L2_CID_RDS_TX_MONO_STEREO (boolean)``
- Sets the Mono/Stereo bit of the Decoder Identification code. If set,
- then the audio was recorded as stereo.
-
-``V4L2_CID_RDS_TX_ARTIFICIAL_HEAD (boolean)``
- Sets the
- `Artificial Head <http://en.wikipedia.org/wiki/Artificial_head>`__
- bit of the Decoder Identification code. If set, then the audio was
- recorded using an artificial head.
-
-``V4L2_CID_RDS_TX_COMPRESSED (boolean)``
- Sets the Compressed bit of the Decoder Identification code. If set,
- then the audio is compressed.
-
-``V4L2_CID_RDS_TX_DYNAMIC_PTY (boolean)``
- Sets the Dynamic PTY bit of the Decoder Identification code. If set,
- then the PTY code is dynamically switched.
-
-``V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT (boolean)``
- If set, then a traffic announcement is in progress.
-
-``V4L2_CID_RDS_TX_TRAFFIC_PROGRAM (boolean)``
- If set, then the tuned programme carries traffic announcements.
-
-``V4L2_CID_RDS_TX_MUSIC_SPEECH (boolean)``
- If set, then this channel broadcasts music. If cleared, then it
- broadcasts speech. If the transmitter doesn't make this distinction,
- then it should be set.
-
-``V4L2_CID_RDS_TX_ALT_FREQS_ENABLE (boolean)``
- If set, then transmit alternate frequencies.
-
-``V4L2_CID_RDS_TX_ALT_FREQS (__u32 array)``
- The alternate frequencies in kHz units. The RDS standard allows for
- up to 25 frequencies to be defined. Drivers may support fewer
- frequencies so check the array size.
-
-``V4L2_CID_AUDIO_LIMITER_ENABLED (boolean)``
- Enables or disables the audio deviation limiter feature. The limiter
- is useful when trying to maximize the audio volume, minimize
- receiver-generated distortion and prevent overmodulation.
-
-``V4L2_CID_AUDIO_LIMITER_RELEASE_TIME (integer)``
- Sets the audio deviation limiter feature release time. Unit is in
- useconds. Step and range are driver-specific.
-
-``V4L2_CID_AUDIO_LIMITER_DEVIATION (integer)``
- Configures audio frequency deviation level in Hz. The range and step
- are driver-specific.
-
-``V4L2_CID_AUDIO_COMPRESSION_ENABLED (boolean)``
- Enables or disables the audio compression feature. This feature
- amplifies signals below the threshold by a fixed gain and compresses
- audio signals above the threshold by the ratio of Threshold/(Gain +
- Threshold).
-
-``V4L2_CID_AUDIO_COMPRESSION_GAIN (integer)``
- Sets the gain for audio compression feature. It is a dB value. The
- range and step are driver-specific.
-
-``V4L2_CID_AUDIO_COMPRESSION_THRESHOLD (integer)``
- Sets the threshold level for audio compression freature. It is a dB
- value. The range and step are driver-specific.
-
-``V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME (integer)``
- Sets the attack time for audio compression feature. It is a useconds
- value. The range and step are driver-specific.
-
-``V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME (integer)``
- Sets the release time for audio compression feature. It is a
- useconds value. The range and step are driver-specific.
-
-``V4L2_CID_PILOT_TONE_ENABLED (boolean)``
- Enables or disables the pilot tone generation feature.
-
-``V4L2_CID_PILOT_TONE_DEVIATION (integer)``
- Configures pilot tone frequency deviation level. Unit is in Hz. The
- range and step are driver-specific.
-
-``V4L2_CID_PILOT_TONE_FREQUENCY (integer)``
- Configures pilot tone frequency value. Unit is in Hz. The range and
- step are driver-specific.
-
-``V4L2_CID_TUNE_PREEMPHASIS``
- (enum)
-
-enum v4l2_preemphasis -
- Configures the pre-emphasis value for broadcasting. A pre-emphasis
- filter is applied to the broadcast to accentuate the high audio
- frequencies. Depending on the region, a time constant of either 50
- or 75 useconds is used. The enum v4l2_preemphasis defines possible
- values for pre-emphasis. Here they are:
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_PREEMPHASIS_DISABLED``
- - No pre-emphasis is applied.
- * - ``V4L2_PREEMPHASIS_50_uS``
- - A pre-emphasis of 50 uS is used.
- * - ``V4L2_PREEMPHASIS_75_uS``
- - A pre-emphasis of 75 uS is used.
-
-
-
-``V4L2_CID_TUNE_POWER_LEVEL (integer)``
- Sets the output power level for signal transmission. Unit is in
- dBuV. Range and step are driver-specific.
-
-``V4L2_CID_TUNE_ANTENNA_CAPACITOR (integer)``
- This selects the value of antenna tuning capacitor manually or
- automatically if set to zero. Unit, range and step are
- driver-specific.
-
-For more details about RDS specification, refer to :ref:`iec62106`
-document, from CENELEC.
-
-
-.. _flash-controls:
-
-Flash Control Reference
-=======================
-
-The V4L2 flash controls are intended to provide generic access to flash
-controller devices. Flash controller devices are typically used in
-digital cameras.
-
-The interface can support both LED and xenon flash devices. As of
-writing this, there is no xenon flash driver using this interface.
-
-
-.. _flash-controls-use-cases:
-
-Supported use cases
--------------------
-
-
-Unsynchronised LED flash (software strobe)
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Unsynchronised LED flash is controlled directly by the host as the
-sensor. The flash must be enabled by the host before the exposure of the
-image starts and disabled once it ends. The host is fully responsible
-for the timing of the flash.
-
-Example of such device: Nokia N900.
-
-
-Synchronised LED flash (hardware strobe)
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The synchronised LED flash is pre-programmed by the host (power and
-timeout) but controlled by the sensor through a strobe signal from the
-sensor to the flash.
-
-The sensor controls the flash duration and timing. This information
-typically must be made available to the sensor.
-
-
-LED flash as torch
-^^^^^^^^^^^^^^^^^^
-
-LED flash may be used as torch in conjunction with another use case
-involving camera or individually.
-
-
-.. _flash-control-id:
-
-Flash Control IDs
-"""""""""""""""""
-
-``V4L2_CID_FLASH_CLASS (class)``
- The FLASH class descriptor.
-
-``V4L2_CID_FLASH_LED_MODE (menu)``
- Defines the mode of the flash LED, the high-power white LED attached
- to the flash controller. Setting this control may not be possible in
- presence of some faults. See V4L2_CID_FLASH_FAULT.
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_FLASH_LED_MODE_NONE``
- - Off.
- * - ``V4L2_FLASH_LED_MODE_FLASH``
- - Flash mode.
- * - ``V4L2_FLASH_LED_MODE_TORCH``
- - Torch mode. See V4L2_CID_FLASH_TORCH_INTENSITY.
-
-
-
-``V4L2_CID_FLASH_STROBE_SOURCE (menu)``
- Defines the source of the flash LED strobe.
-
-.. tabularcolumns:: |p{7.0cm}|p{10.5cm}|
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_FLASH_STROBE_SOURCE_SOFTWARE``
- - The flash strobe is triggered by using the
- V4L2_CID_FLASH_STROBE control.
- * - ``V4L2_FLASH_STROBE_SOURCE_EXTERNAL``
- - The flash strobe is triggered by an external source. Typically
- this is a sensor, which makes it possible to synchronises the
- flash strobe start to exposure start.
-
-
-
-``V4L2_CID_FLASH_STROBE (button)``
- Strobe flash. Valid when V4L2_CID_FLASH_LED_MODE is set to
- V4L2_FLASH_LED_MODE_FLASH and V4L2_CID_FLASH_STROBE_SOURCE
- is set to V4L2_FLASH_STROBE_SOURCE_SOFTWARE. Setting this
- control may not be possible in presence of some faults. See
- V4L2_CID_FLASH_FAULT.
-
-``V4L2_CID_FLASH_STROBE_STOP (button)``
- Stop flash strobe immediately.
-
-``V4L2_CID_FLASH_STROBE_STATUS (boolean)``
- Strobe status: whether the flash is strobing at the moment or not.
- This is a read-only control.
-
-``V4L2_CID_FLASH_TIMEOUT (integer)``
- Hardware timeout for flash. The flash strobe is stopped after this
- period of time has passed from the start of the strobe.
-
-``V4L2_CID_FLASH_INTENSITY (integer)``
- Intensity of the flash strobe when the flash LED is in flash mode
- (V4L2_FLASH_LED_MODE_FLASH). The unit should be milliamps (mA)
- if possible.
-
-``V4L2_CID_FLASH_TORCH_INTENSITY (integer)``
- Intensity of the flash LED in torch mode
- (V4L2_FLASH_LED_MODE_TORCH). The unit should be milliamps (mA)
- if possible. Setting this control may not be possible in presence of
- some faults. See V4L2_CID_FLASH_FAULT.
-
-``V4L2_CID_FLASH_INDICATOR_INTENSITY (integer)``
- Intensity of the indicator LED. The indicator LED may be fully
- independent of the flash LED. The unit should be microamps (uA) if
- possible.
-
-``V4L2_CID_FLASH_FAULT (bitmask)``
- Faults related to the flash. The faults tell about specific problems
- in the flash chip itself or the LEDs attached to it. Faults may
- prevent further use of some of the flash controls. In particular,
- V4L2_CID_FLASH_LED_MODE is set to V4L2_FLASH_LED_MODE_NONE
- if the fault affects the flash LED. Exactly which faults have such
- an effect is chip dependent. Reading the faults resets the control
- and returns the chip to a usable state if possible.
-
-.. tabularcolumns:: |p{8.0cm}|p{9.5cm}|
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_FLASH_FAULT_OVER_VOLTAGE``
- - Flash controller voltage to the flash LED has exceeded the limit
- specific to the flash controller.
- * - ``V4L2_FLASH_FAULT_TIMEOUT``
- - The flash strobe was still on when the timeout set by the user ---
- V4L2_CID_FLASH_TIMEOUT control --- has expired. Not all flash
- controllers may set this in all such conditions.
- * - ``V4L2_FLASH_FAULT_OVER_TEMPERATURE``
- - The flash controller has overheated.
- * - ``V4L2_FLASH_FAULT_SHORT_CIRCUIT``
- - The short circuit protection of the flash controller has been
- triggered.
- * - ``V4L2_FLASH_FAULT_OVER_CURRENT``
- - Current in the LED power supply has exceeded the limit specific to
- the flash controller.
- * - ``V4L2_FLASH_FAULT_INDICATOR``
- - The flash controller has detected a short or open circuit
- condition on the indicator LED.
- * - ``V4L2_FLASH_FAULT_UNDER_VOLTAGE``
- - Flash controller voltage to the flash LED has been below the
- minimum limit specific to the flash controller.
- * - ``V4L2_FLASH_FAULT_INPUT_VOLTAGE``
- - The input voltage of the flash controller is below the limit under
- which strobing the flash at full current will not be possible.The
- condition persists until this flag is no longer set.
- * - ``V4L2_FLASH_FAULT_LED_OVER_TEMPERATURE``
- - The temperature of the LED has exceeded its allowed upper limit.
-
-
-
-``V4L2_CID_FLASH_CHARGE (boolean)``
- Enable or disable charging of the xenon flash capacitor.
-
-``V4L2_CID_FLASH_READY (boolean)``
- Is the flash ready to strobe? Xenon flashes require their capacitors
- charged before strobing. LED flashes often require a cooldown period
- after strobe during which another strobe will not be possible. This
- is a read-only control.
-
-
-.. _jpeg-controls:
-
-JPEG Control Reference
-======================
-
-The JPEG class includes controls for common features of JPEG encoders
-and decoders. Currently it includes features for codecs implementing
-progressive baseline DCT compression process with Huffman entrophy
-coding.
-
-
-.. _jpeg-control-id:
-
-JPEG Control IDs
-----------------
-
-``V4L2_CID_JPEG_CLASS (class)``
- The JPEG class descriptor. Calling
- :ref:`VIDIOC_QUERYCTRL` for this control will
- return a description of this control class.
-
-``V4L2_CID_JPEG_CHROMA_SUBSAMPLING (menu)``
- The chroma subsampling factors describe how each component of an
- input image is sampled, in respect to maximum sample rate in each
- spatial dimension. See :ref:`itu-t81`, clause A.1.1. for more
- details. The ``V4L2_CID_JPEG_CHROMA_SUBSAMPLING`` control determines
- how Cb and Cr components are downsampled after converting an input
- image from RGB to Y'CbCr color space.
-
-.. tabularcolumns:: |p{7.0cm}|p{10.5cm}|
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_JPEG_CHROMA_SUBSAMPLING_444``
- - No chroma subsampling, each pixel has Y, Cr and Cb values.
- * - ``V4L2_JPEG_CHROMA_SUBSAMPLING_422``
- - Horizontally subsample Cr, Cb components by a factor of 2.
- * - ``V4L2_JPEG_CHROMA_SUBSAMPLING_420``
- - Subsample Cr, Cb components horizontally and vertically by 2.
- * - ``V4L2_JPEG_CHROMA_SUBSAMPLING_411``
- - Horizontally subsample Cr, Cb components by a factor of 4.
- * - ``V4L2_JPEG_CHROMA_SUBSAMPLING_410``
- - Subsample Cr, Cb components horizontally by 4 and vertically by 2.
- * - ``V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY``
- - Use only luminance component.
-
-
-
-``V4L2_CID_JPEG_RESTART_INTERVAL (integer)``
- The restart interval determines an interval of inserting RSTm
- markers (m = 0..7). The purpose of these markers is to additionally
- reinitialize the encoder process, in order to process blocks of an
- image independently. For the lossy compression processes the restart
- interval unit is MCU (Minimum Coded Unit) and its value is contained
- in DRI (Define Restart Interval) marker. If
- ``V4L2_CID_JPEG_RESTART_INTERVAL`` control is set to 0, DRI and RSTm
- markers will not be inserted.
-
-.. _jpeg-quality-control:
-
-``V4L2_CID_JPEG_COMPRESSION_QUALITY (integer)``
- ``V4L2_CID_JPEG_COMPRESSION_QUALITY`` control determines trade-off
- between image quality and size. It provides simpler method for
- applications to control image quality, without a need for direct
- reconfiguration of luminance and chrominance quantization tables. In
- cases where a driver uses quantization tables configured directly by
- an application, using interfaces defined elsewhere,
- ``V4L2_CID_JPEG_COMPRESSION_QUALITY`` control should be set by
- driver to 0.
-
- The value range of this control is driver-specific. Only positive,
- non-zero values are meaningful. The recommended range is 1 - 100,
- where larger values correspond to better image quality.
-
-.. _jpeg-active-marker-control:
-
-``V4L2_CID_JPEG_ACTIVE_MARKER (bitmask)``
- Specify which JPEG markers are included in compressed stream. This
- control is valid only for encoders.
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_JPEG_ACTIVE_MARKER_APP0``
- - Application data segment APP\ :sub:`0`.
- * - ``V4L2_JPEG_ACTIVE_MARKER_APP1``
- - Application data segment APP\ :sub:`1`.
- * - ``V4L2_JPEG_ACTIVE_MARKER_COM``
- - Comment segment.
- * - ``V4L2_JPEG_ACTIVE_MARKER_DQT``
- - Quantization tables segment.
- * - ``V4L2_JPEG_ACTIVE_MARKER_DHT``
- - Huffman tables segment.
-
-
-
-For more details about JPEG specification, refer to :ref:`itu-t81`,
-:ref:`jfif`, :ref:`w3c-jpeg-jfif`.
-
-
-.. _image-source-controls:
-
-Image Source Control Reference
-==============================
-
-The Image Source control class is intended for low-level control of
-image source devices such as image sensors. The devices feature an
-analogue to digital converter and a bus transmitter to transmit the
-image data out of the device.
-
-
-.. _image-source-control-id:
-
-Image Source Control IDs
-------------------------
-
-``V4L2_CID_IMAGE_SOURCE_CLASS (class)``
- The IMAGE_SOURCE class descriptor.
-
-``V4L2_CID_VBLANK (integer)``
- Vertical blanking. The idle period after every frame during which no
- image data is produced. The unit of vertical blanking is a line.
- Every line has length of the image width plus horizontal blanking at
- the pixel rate defined by ``V4L2_CID_PIXEL_RATE`` control in the
- same sub-device.
-
-``V4L2_CID_HBLANK (integer)``
- Horizontal blanking. The idle period after every line of image data
- during which no image data is produced. The unit of horizontal
- blanking is pixels.
-
-``V4L2_CID_ANALOGUE_GAIN (integer)``
- Analogue gain is gain affecting all colour components in the pixel
- matrix. The gain operation is performed in the analogue domain
- before A/D conversion.
-
-``V4L2_CID_TEST_PATTERN_RED (integer)``
- Test pattern red colour component.
-
-``V4L2_CID_TEST_PATTERN_GREENR (integer)``
- Test pattern green (next to red) colour component.
-
-``V4L2_CID_TEST_PATTERN_BLUE (integer)``
- Test pattern blue colour component.
-
-``V4L2_CID_TEST_PATTERN_GREENB (integer)``
- Test pattern green (next to blue) colour component.
-
-
-.. _image-process-controls:
-
-Image Process Control Reference
-===============================
-
-The Image Process control class is intended for low-level control of
-image processing functions. Unlike ``V4L2_CID_IMAGE_SOURCE_CLASS``, the
-controls in this class affect processing the image, and do not control
-capturing of it.
-
-
-.. _image-process-control-id:
-
-Image Process Control IDs
--------------------------
-
-``V4L2_CID_IMAGE_PROC_CLASS (class)``
- The IMAGE_PROC class descriptor.
-
-``V4L2_CID_LINK_FREQ (integer menu)``
- Data bus frequency. Together with the media bus pixel code, bus type
- (clock cycles per sample), the data bus frequency defines the pixel
- rate (``V4L2_CID_PIXEL_RATE``) in the pixel array (or possibly
- elsewhere, if the device is not an image sensor). The frame rate can
- be calculated from the pixel clock, image width and height and
- horizontal and vertical blanking. While the pixel rate control may
- be defined elsewhere than in the subdev containing the pixel array,
- the frame rate cannot be obtained from that information. This is
- because only on the pixel array it can be assumed that the vertical
- and horizontal blanking information is exact: no other blanking is
- allowed in the pixel array. The selection of frame rate is performed
- by selecting the desired horizontal and vertical blanking. The unit
- of this control is Hz.
-
-``V4L2_CID_PIXEL_RATE (64-bit integer)``
- Pixel rate in the source pads of the subdev. This control is
- read-only and its unit is pixels / second.
-
-``V4L2_CID_TEST_PATTERN (menu)``
- Some capture/display/sensor devices have the capability to generate
- test pattern images. These hardware specific test patterns can be
- used to test if a device is working properly.
-
-``V4L2_CID_DEINTERLACING_MODE (menu)``
- The video deinterlacing mode (such as Bob, Weave, ...). The menu items are
- driver specific and are documented in :ref:`v4l-drivers`.
-
-``V4L2_CID_DIGITAL_GAIN (integer)``
- Digital gain is the value by which all colour components
- are multiplied by. Typically the digital gain applied is the
- control value divided by e.g. 0x100, meaning that to get no
- digital gain the control value needs to be 0x100. The no-gain
- configuration is also typically the default.
-
-
-.. _dv-controls:
-
-Digital Video Control Reference
-===============================
-
-The Digital Video control class is intended to control receivers and
-transmitters for `VGA <http://en.wikipedia.org/wiki/Vga>`__,
-`DVI <http://en.wikipedia.org/wiki/Digital_Visual_Interface>`__
-(Digital Visual Interface), HDMI (:ref:`hdmi`) and DisplayPort
-(:ref:`dp`). These controls are generally expected to be private to
-the receiver or transmitter subdevice that implements them, so they are
-only exposed on the ``/dev/v4l-subdev*`` device node.
-
-.. note::
-
- Note that these devices can have multiple input or output pads which are
- hooked up to e.g. HDMI connectors. Even though the subdevice will
- receive or transmit video from/to only one of those pads, the other pads
- can still be active when it comes to EDID (Extended Display
- Identification Data, :ref:`vesaedid`) and HDCP (High-bandwidth Digital
- Content Protection System, :ref:`hdcp`) processing, allowing the
- device to do the fairly slow EDID/HDCP handling in advance. This allows
- for quick switching between connectors.
-
-These pads appear in several of the controls in this section as
-bitmasks, one bit for each pad. Bit 0 corresponds to pad 0, bit 1 to pad
-1, etc. The maximum value of the control is the set of valid pads.
-
-
-.. _dv-control-id:
-
-Digital Video Control IDs
--------------------------
-
-``V4L2_CID_DV_CLASS (class)``
- The Digital Video class descriptor.
-
-``V4L2_CID_DV_TX_HOTPLUG (bitmask)``
- Many connectors have a hotplug pin which is high if EDID information
- is available from the source. This control shows the state of the
- hotplug pin as seen by the transmitter. Each bit corresponds to an
- output pad on the transmitter. If an output pad does not have an
- associated hotplug pin, then the bit for that pad will be 0. This
- read-only control is applicable to DVI-D, HDMI and DisplayPort
- connectors.
-
-``V4L2_CID_DV_TX_RXSENSE (bitmask)``
- Rx Sense is the detection of pull-ups on the TMDS clock lines. This
- normally means that the sink has left/entered standby (i.e. the
- transmitter can sense that the receiver is ready to receive video).
- Each bit corresponds to an output pad on the transmitter. If an
- output pad does not have an associated Rx Sense, then the bit for
- that pad will be 0. This read-only control is applicable to DVI-D
- and HDMI devices.
-
-``V4L2_CID_DV_TX_EDID_PRESENT (bitmask)``
- When the transmitter sees the hotplug signal from the receiver it
- will attempt to read the EDID. If set, then the transmitter has read
- at least the first block (= 128 bytes). Each bit corresponds to an
- output pad on the transmitter. If an output pad does not support
- EDIDs, then the bit for that pad will be 0. This read-only control
- is applicable to VGA, DVI-A/D, HDMI and DisplayPort connectors.
-
-``V4L2_CID_DV_TX_MODE``
- (enum)
-
-enum v4l2_dv_tx_mode -
- HDMI transmitters can transmit in DVI-D mode (just video) or in HDMI
- mode (video + audio + auxiliary data). This control selects which
- mode to use: V4L2_DV_TX_MODE_DVI_D or V4L2_DV_TX_MODE_HDMI.
- This control is applicable to HDMI connectors.
-
-``V4L2_CID_DV_TX_RGB_RANGE``
- (enum)
-
-enum v4l2_dv_rgb_range -
- Select the quantization range for RGB output. V4L2_DV_RANGE_AUTO
- follows the RGB quantization range specified in the standard for the
- video interface (ie. :ref:`cea861` for HDMI).
- V4L2_DV_RANGE_LIMITED and V4L2_DV_RANGE_FULL override the
- standard to be compatible with sinks that have not implemented the
- standard correctly (unfortunately quite common for HDMI and DVI-D).
- Full range allows all possible values to be used whereas limited
- range sets the range to (16 << (N-8)) - (235 << (N-8)) where N is
- the number of bits per component. This control is applicable to VGA,
- DVI-A/D, HDMI and DisplayPort connectors.
-
-``V4L2_CID_DV_TX_IT_CONTENT_TYPE``
- (enum)
-
-enum v4l2_dv_it_content_type -
- Configures the IT Content Type of the transmitted video. This
- information is sent over HDMI and DisplayPort connectors as part of
- the AVI InfoFrame. The term 'IT Content' is used for content that
- originates from a computer as opposed to content from a TV broadcast
- or an analog source. The enum v4l2_dv_it_content_type defines
- the possible content types:
-
-.. tabularcolumns:: |p{7.0cm}|p{10.5cm}|
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_DV_IT_CONTENT_TYPE_GRAPHICS``
- - Graphics content. Pixel data should be passed unfiltered and
- without analog reconstruction.
- * - ``V4L2_DV_IT_CONTENT_TYPE_PHOTO``
- - Photo content. The content is derived from digital still pictures.
- The content should be passed through with minimal scaling and
- picture enhancements.
- * - ``V4L2_DV_IT_CONTENT_TYPE_CINEMA``
- - Cinema content.
- * - ``V4L2_DV_IT_CONTENT_TYPE_GAME``
- - Game content. Audio and video latency should be minimized.
- * - ``V4L2_DV_IT_CONTENT_TYPE_NO_ITC``
- - No IT Content information is available and the ITC bit in the AVI
- InfoFrame is set to 0.
-
-
-
-``V4L2_CID_DV_RX_POWER_PRESENT (bitmask)``
- Detects whether the receiver receives power from the source (e.g.
- HDMI carries 5V on one of the pins). This is often used to power an
- eeprom which contains EDID information, such that the source can
- read the EDID even if the sink is in standby/power off. Each bit
- corresponds to an input pad on the receiver. If an input pad
- cannot detect whether power is present, then the bit for that pad
- will be 0. This read-only control is applicable to DVI-D, HDMI and
- DisplayPort connectors.
-
-``V4L2_CID_DV_RX_RGB_RANGE``
- (enum)
-
-enum v4l2_dv_rgb_range -
- Select the quantization range for RGB input. V4L2_DV_RANGE_AUTO
- follows the RGB quantization range specified in the standard for the
- video interface (ie. :ref:`cea861` for HDMI).
- V4L2_DV_RANGE_LIMITED and V4L2_DV_RANGE_FULL override the
- standard to be compatible with sources that have not implemented the
- standard correctly (unfortunately quite common for HDMI and DVI-D).
- Full range allows all possible values to be used whereas limited
- range sets the range to (16 << (N-8)) - (235 << (N-8)) where N is
- the number of bits per component. This control is applicable to VGA,
- DVI-A/D, HDMI and DisplayPort connectors.
-
-``V4L2_CID_DV_RX_IT_CONTENT_TYPE``
- (enum)
-
-enum v4l2_dv_it_content_type -
- Reads the IT Content Type of the received video. This information is
- sent over HDMI and DisplayPort connectors as part of the AVI
- InfoFrame. The term 'IT Content' is used for content that originates
- from a computer as opposed to content from a TV broadcast or an
- analog source. See ``V4L2_CID_DV_TX_IT_CONTENT_TYPE`` for the
- available content types.
-
-
-.. _fm-rx-controls:
-
-FM Receiver Control Reference
-=============================
-
-The FM Receiver (FM_RX) class includes controls for common features of
-FM Reception capable devices.
-
-
-.. _fm-rx-control-id:
-
-FM_RX Control IDs
------------------
-
-``V4L2_CID_FM_RX_CLASS (class)``
- The FM_RX class descriptor. Calling
- :ref:`VIDIOC_QUERYCTRL` for this control will
- return a description of this control class.
-
-``V4L2_CID_RDS_RECEPTION (boolean)``
- Enables/disables RDS reception by the radio tuner
-
-``V4L2_CID_RDS_RX_PTY (integer)``
- Gets RDS Programme Type field. This encodes up to 31 pre-defined
- programme types.
-
-``V4L2_CID_RDS_RX_PS_NAME (string)``
- Gets the Programme Service name (PS_NAME). It is intended for
- static display on a receiver. It is the primary aid to listeners in
- programme service identification and selection. In Annex E of
- :ref:`iec62106`, the RDS specification, there is a full
- description of the correct character encoding for Programme Service
- name strings. Also from RDS specification, PS is usually a single
- eight character text. However, it is also possible to find receivers
- which can scroll strings sized as 8 x N characters. So, this control
- must be configured with steps of 8 characters. The result is it must
- always contain a string with size multiple of 8.
-
-``V4L2_CID_RDS_RX_RADIO_TEXT (string)``
- Gets the Radio Text info. It is a textual description of what is
- being broadcasted. RDS Radio Text can be applied when broadcaster
- wishes to transmit longer PS names, programme-related information or
- any other text. In these cases, RadioText can be used in addition to
- ``V4L2_CID_RDS_RX_PS_NAME``. The encoding for Radio Text strings is
- also fully described in Annex E of :ref:`iec62106`. The length of
- Radio Text strings depends on which RDS Block is being used to
- transmit it, either 32 (2A block) or 64 (2B block). However, it is
- also possible to find receivers which can scroll strings sized as 32
- x N or 64 x N characters. So, this control must be configured with
- steps of 32 or 64 characters. The result is it must always contain a
- string with size multiple of 32 or 64.
-
-``V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT (boolean)``
- If set, then a traffic announcement is in progress.
-
-``V4L2_CID_RDS_RX_TRAFFIC_PROGRAM (boolean)``
- If set, then the tuned programme carries traffic announcements.
-
-``V4L2_CID_RDS_RX_MUSIC_SPEECH (boolean)``
- If set, then this channel broadcasts music. If cleared, then it
- broadcasts speech. If the transmitter doesn't make this distinction,
- then it will be set.
-
-``V4L2_CID_TUNE_DEEMPHASIS``
- (enum)
-
-enum v4l2_deemphasis -
- Configures the de-emphasis value for reception. A de-emphasis filter
- is applied to the broadcast to accentuate the high audio
- frequencies. Depending on the region, a time constant of either 50
- or 75 useconds is used. The enum v4l2_deemphasis defines possible
- values for de-emphasis. Here they are:
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_DEEMPHASIS_DISABLED``
- - No de-emphasis is applied.
- * - ``V4L2_DEEMPHASIS_50_uS``
- - A de-emphasis of 50 uS is used.
- * - ``V4L2_DEEMPHASIS_75_uS``
- - A de-emphasis of 75 uS is used.
-
-
-
-
-.. _detect-controls:
-
-Detect Control Reference
-========================
-
-The Detect class includes controls for common features of various motion
-or object detection capable devices.
-
-
-.. _detect-control-id:
-
-Detect Control IDs
-------------------
-
-``V4L2_CID_DETECT_CLASS (class)``
- The Detect class descriptor. Calling
- :ref:`VIDIOC_QUERYCTRL` for this control will
- return a description of this control class.
-
-``V4L2_CID_DETECT_MD_MODE (menu)``
- Sets the motion detection mode.
-
-.. tabularcolumns:: |p{7.5cm}|p{10.0cm}|
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
- * - ``V4L2_DETECT_MD_MODE_DISABLED``
- - Disable motion detection.
- * - ``V4L2_DETECT_MD_MODE_GLOBAL``
- - Use a single motion detection threshold.
- * - ``V4L2_DETECT_MD_MODE_THRESHOLD_GRID``
- - The image is divided into a grid, each cell with its own motion
- detection threshold. These thresholds are set through the
- ``V4L2_CID_DETECT_MD_THRESHOLD_GRID`` matrix control.
- * - ``V4L2_DETECT_MD_MODE_REGION_GRID``
- - The image is divided into a grid, each cell with its own region
- value that specifies which per-region motion detection thresholds
- should be used. Each region has its own thresholds. How these
- per-region thresholds are set up is driver-specific. The region
- values for the grid are set through the
- ``V4L2_CID_DETECT_MD_REGION_GRID`` matrix control.
-
-
-
-``V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD (integer)``
- Sets the global motion detection threshold to be used with the
- ``V4L2_DETECT_MD_MODE_GLOBAL`` motion detection mode.
-
-``V4L2_CID_DETECT_MD_THRESHOLD_GRID (__u16 matrix)``
- Sets the motion detection thresholds for each cell in the grid. To
- be used with the ``V4L2_DETECT_MD_MODE_THRESHOLD_GRID`` motion
- detection mode. Matrix element (0, 0) represents the cell at the
- top-left of the grid.
-
-``V4L2_CID_DETECT_MD_REGION_GRID (__u8 matrix)``
- Sets the motion detection region value for each cell in the grid. To
- be used with the ``V4L2_DETECT_MD_MODE_REGION_GRID`` motion
- detection mode. Matrix element (0, 0) represents the cell at the
- top-left of the grid.
-
-
-.. _rf-tuner-controls:
-
-RF Tuner Control Reference
-==========================
-
-The RF Tuner (RF_TUNER) class includes controls for common features of
-devices having RF tuner.
-
-In this context, RF tuner is radio receiver circuit between antenna and
-demodulator. It receives radio frequency (RF) from the antenna and
-converts that received signal to lower intermediate frequency (IF) or
-baseband frequency (BB). Tuners that could do baseband output are often
-called Zero-IF tuners. Older tuners were typically simple PLL tuners
-inside a metal box, while newer ones are highly integrated chips
-without a metal box "silicon tuners". These controls are mostly
-applicable for new feature rich silicon tuners, just because older
-tuners does not have much adjustable features.
-
-For more information about RF tuners see
-`Tuner (radio) <http://en.wikipedia.org/wiki/Tuner_%28radio%29>`__
-and `RF front end <http://en.wikipedia.org/wiki/RF_front_end>`__
-from Wikipedia.
-
-
-.. _rf-tuner-control-id:
-
-RF_TUNER Control IDs
---------------------
-
-``V4L2_CID_RF_TUNER_CLASS (class)``
- The RF_TUNER class descriptor. Calling
- :ref:`VIDIOC_QUERYCTRL` for this control will
- return a description of this control class.
-
-``V4L2_CID_RF_TUNER_BANDWIDTH_AUTO (boolean)``
- Enables/disables tuner radio channel bandwidth configuration. In
- automatic mode bandwidth configuration is performed by the driver.
-
-``V4L2_CID_RF_TUNER_BANDWIDTH (integer)``
- Filter(s) on tuner signal path are used to filter signal according
- to receiving party needs. Driver configures filters to fulfill
- desired bandwidth requirement. Used when
- V4L2_CID_RF_TUNER_BANDWIDTH_AUTO is not set. Unit is in Hz. The
- range and step are driver-specific.
-
-``V4L2_CID_RF_TUNER_LNA_GAIN_AUTO (boolean)``
- Enables/disables LNA automatic gain control (AGC)
-
-``V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO (boolean)``
- Enables/disables mixer automatic gain control (AGC)
-
-``V4L2_CID_RF_TUNER_IF_GAIN_AUTO (boolean)``
- Enables/disables IF automatic gain control (AGC)
-
-``V4L2_CID_RF_TUNER_RF_GAIN (integer)``
- The RF amplifier is the very first amplifier on the receiver signal
- path, just right after the antenna input. The difference between the
- LNA gain and the RF gain in this document is that the LNA gain is
- integrated in the tuner chip while the RF gain is a separate chip.
- There may be both RF and LNA gain controls in the same device. The
- range and step are driver-specific.
-
-``V4L2_CID_RF_TUNER_LNA_GAIN (integer)``
- LNA (low noise amplifier) gain is first gain stage on the RF tuner
- signal path. It is located very close to tuner antenna input. Used
- when ``V4L2_CID_RF_TUNER_LNA_GAIN_AUTO`` is not set. See
- ``V4L2_CID_RF_TUNER_RF_GAIN`` to understand how RF gain and LNA gain
- differs from the each others. The range and step are
- driver-specific.
-
-``V4L2_CID_RF_TUNER_MIXER_GAIN (integer)``
- Mixer gain is second gain stage on the RF tuner signal path. It is
- located inside mixer block, where RF signal is down-converted by the
- mixer. Used when ``V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO`` is not set.
- The range and step are driver-specific.
-
-``V4L2_CID_RF_TUNER_IF_GAIN (integer)``
- IF gain is last gain stage on the RF tuner signal path. It is
- located on output of RF tuner. It controls signal level of
- intermediate frequency output or baseband output. Used when
- ``V4L2_CID_RF_TUNER_IF_GAIN_AUTO`` is not set. The range and step
- are driver-specific.
-
-``V4L2_CID_RF_TUNER_PLL_LOCK (boolean)``
- Is synthesizer PLL locked? RF tuner is receiving given frequency
- when that control is set. This is a read-only control.
-
-.. [#f1]
- This control may be changed to a menu control in the future, if more
- options are required.
diff --git a/Documentation/media/uapi/v4l/meta-formats.rst b/Documentation/media/uapi/v4l/meta-formats.rst
index 5f956fa784b7..b10ca9ee3968 100644
--- a/Documentation/media/uapi/v4l/meta-formats.rst
+++ b/Documentation/media/uapi/v4l/meta-formats.rst
@@ -19,8 +19,8 @@ These formats are used for the :ref:`metadata` interface only.
.. toctree::
:maxdepth: 1
- pixfmt-meta-intel-ipu3
pixfmt-meta-d4xx
+ pixfmt-meta-intel-ipu3
pixfmt-meta-uvc
pixfmt-meta-vsp1-hgo
pixfmt-meta-vsp1-hgt
diff --git a/Documentation/media/uapi/v4l/pixfmt-compressed.rst b/Documentation/media/uapi/v4l/pixfmt-compressed.rst
index e4c5e456df59..2675bef3eefe 100644
--- a/Documentation/media/uapi/v4l/pixfmt-compressed.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-compressed.rst
@@ -73,7 +73,7 @@ Compressed Formats
- 'MG2S'
- MPEG-2 parsed slice data, as extracted from the MPEG-2 bitstream.
This format is adapted for stateless video decoders that implement a
- MPEG-2 pipeline (using the :ref:`codec` and :ref:`media-request-api`).
+ MPEG-2 pipeline (using the :ref:`mem2mem` and :ref:`media-request-api`).
Metadata associated with the frame to decode is required to be passed
through the ``V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS`` control and
quantization matrices can optionally be specified through the
diff --git a/Documentation/media/uapi/v4l/pixfmt-meta-intel-ipu3.rst b/Documentation/media/uapi/v4l/pixfmt-meta-intel-ipu3.rst
index dc871006b41a..7fb54339f4a7 100644
--- a/Documentation/media/uapi/v4l/pixfmt-meta-intel-ipu3.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-meta-intel-ipu3.rst
@@ -1,4 +1,27 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. This file is dual-licensed: you can use it either under the terms
+.. of the GPL 2.0 or the GFDL 1.1+ license, at your option. Note that this
+.. dual licensing only applies to this file, and not this project as a
+.. whole.
+..
+.. a) This file is free software; you can redistribute it and/or
+.. modify it under the terms of the GNU General Public License version
+.. 2.0 as published by the Free Software Foundation.
+..
+.. This file is distributed in the hope that it will be useful,
+.. but WITHOUT ANY WARRANTY; without even the implied warranty of
+.. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+.. GNU General Public License version 2.0 for more details.
+..
+.. Or, alternatively,
+..
+.. b) Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
.. _v4l2-meta-fmt-params:
.. _v4l2-meta-fmt-stat-3a:
@@ -7,21 +30,22 @@
V4L2_META_FMT_IPU3_PARAMS ('ip3p'), V4L2_META_FMT_IPU3_3A ('ip3s')
******************************************************************
-.. c:type:: ipu3_uapi_stats_3a
+.. ipu3_uapi_stats_3a
3A statistics
=============
-For IPU3 ImgU, the 3A statistics accelerators collect different statistics over
-an input bayer frame. Those statistics, defined in data struct :c:type:`ipu3_uapi_stats_3a`,
-are obtained from "ipu3-imgu 3a stat" metadata capture video node, which are then
-passed to user space for statistics analysis using :c:type:`v4l2_meta_format` interface.
+The IPU3 ImgU 3A statistics accelerators collect different statistics over
+an input Bayer frame. Those statistics are obtained from the "ipu3-imgu [01] 3a
+stat" metadata capture video nodes, using the :c:type:`v4l2_meta_format`
+interface. They are formatted as described by the :c:type:`ipu3_uapi_stats_3a`
+structure.
The statistics collected are AWB (Auto-white balance) RGBS (Red, Green, Blue and
Saturation measure) cells, AWB filter response, AF (Auto-focus) filter response,
and AE (Auto-exposure) histogram.
-struct :c:type:`ipu3_uapi_4a_config` saves configurable parameters for all above.
+The struct :c:type:`ipu3_uapi_4a_config` saves all configurable parameters.
.. code-block:: c
@@ -37,105 +61,14 @@ struct :c:type:`ipu3_uapi_4a_config` saves configurable parameters for all above
struct ipu3_uapi_ff_status stats_3a_status;
};
-.. c:type:: ipu3_uapi_params
+.. ipu3_uapi_params
Pipeline parameters
===================
-IPU3 pipeline has a number of image processing stages, each of which takes a
-set of parameters as input. The major stages of pipelines are shown here:
-
-Raw pixels -> Bayer Downscaling -> Optical Black Correction ->
-
-Linearization -> Lens Shading Correction -> White Balance / Exposure /
-
-Focus Apply -> Bayer Noise Reduction -> ANR -> Demosaicing -> Color
-
-Correction Matrix -> Gamma correction -> Color Space Conversion ->
-
-Chroma Down Scaling -> Chromatic Noise Reduction -> Total Color
-
-Correction -> XNR3 -> TNR -> DDR
-
-The table below presents a description of the above algorithms.
-
-======================== =======================================================
-Name Description
-======================== =======================================================
-Optical Black Correction Optical Black Correction block subtracts a pre-defined
- value from the respective pixel values to obtain better
- image quality.
- Defined in :c:type:`ipu3_uapi_obgrid_param`.
-Linearization This algo block uses linearization parameters to
- address non-linearity sensor effects. The Lookup table
- table is defined in
- :c:type:`ipu3_uapi_isp_lin_vmem_params`.
-SHD Lens shading correction is used to correct spatial
- non-uniformity of the pixel response due to optical
- lens shading. This is done by applying a different gain
- for each pixel. The gain, black level etc are
- configured in :c:type:`ipu3_uapi_shd_config_static`.
-BNR Bayer noise reduction block removes image noise by
- applying a bilateral filter.
- See :c:type:`ipu3_uapi_bnr_static_config` for details.
-ANR Advanced Noise Reduction is a block based algorithm
- that performs noise reduction in the Bayer domain. The
- convolution matrix etc can be found in
- :c:type:`ipu3_uapi_anr_config`.
-Demosaicing Demosaicing converts raw sensor data in Bayer format
- into RGB (Red, Green, Blue) presentation. Then add
- outputs of estimation of Y channel for following stream
- processing by Firmware. The struct is defined as
- :c:type:`ipu3_uapi_dm_config`. (TODO)
-Color Correction Color Correction algo transforms sensor specific color
- space to the standard "sRGB" color space. This is done
- by applying 3x3 matrix defined in
- :c:type:`ipu3_uapi_ccm_mat_config`.
-Gamma correction Gamma correction :c:type:`ipu3_uapi_gamma_config` is a
- basic non-linear tone mapping correction that is
- applied per pixel for each pixel component.
-CSC Color space conversion transforms each pixel from the
- RGB primary presentation to YUV (Y: brightness,
- UV: Luminance) presentation. This is done by applying
- a 3x3 matrix defined in
- :c:type:`ipu3_uapi_csc_mat_config`
-CDS Chroma down sampling
- After the CSC is performed, the Chroma Down Sampling
- is applied for a UV plane down sampling by a factor
- of 2 in each direction for YUV 4:2:0 using a 4x2
- configurable filter :c:type:`ipu3_uapi_cds_params`.
-CHNR Chroma noise reduction
- This block processes only the chrominance pixels and
- performs noise reduction by cleaning the high
- frequency noise.
- See struct :c:type:`ipu3_uapi_yuvp1_chnr_config`.
-TCC Total color correction as defined in struct
- :c:type:`ipu3_uapi_yuvp2_tcc_static_config`.
-XNR3 eXtreme Noise Reduction V3 is the third revision of
- noise reduction algorithm used to improve image
- quality. This removes the low frequency noise in the
- captured image. Two related structs are being defined,
- :c:type:`ipu3_uapi_isp_xnr3_params` for ISP data memory
- and :c:type:`ipu3_uapi_isp_xnr3_vmem_params` for vector
- memory.
-TNR Temporal Noise Reduction block compares successive
- frames in time to remove anomalies / noise in pixel
- values. :c:type:`ipu3_uapi_isp_tnr3_vmem_params` and
- :c:type:`ipu3_uapi_isp_tnr3_params` are defined for ISP
- vector and data memory respectively.
-======================== =======================================================
-
-A few stages of the pipeline will be executed by firmware running on the ISP
-processor, while many others will use a set of fixed hardware blocks also
-called accelerator cluster (ACC) to crunch pixel data and produce statistics.
-
-ACC parameters of individual algorithms, as defined by
-:c:type:`ipu3_uapi_acc_param`, can be chosen to be applied by the user
-space through struct :c:type:`ipu3_uapi_flags` embedded in
-:c:type:`ipu3_uapi_params` structure. For parameters that are configured as
-not enabled by the user space, the corresponding structs are ignored by the
-driver, in which case the existing configuration of the algorithm will be
-preserved.
+The pipeline parameters are passed to the "ipu3-imgu [01] parameters" metadata
+output video nodes, using the :c:type:`v4l2_meta_format` interface. They are
+formatted as described by the :c:type:`ipu3_uapi_params` structure.
Both 3A statistics and pipeline parameters described here are closely tied to
the underlying camera sub-system (CSS) APIs. They are usually consumed and
@@ -143,13 +76,6 @@ produced by dedicated user space libraries that comprise the important tuning
tools, thus freeing the developers from being bothered with the low level
hardware and algorithm details.
-It should be noted that IPU3 DMA operations require the addresses of all data
-structures (that includes both input and output) to be aligned on 32 byte
-boundaries.
-
-The meta data :c:type:`ipu3_uapi_params` will be sent to "ipu3-imgu parameters"
-video node in ``V4L2_BUF_TYPE_META_CAPTURE`` format.
-
.. code-block:: c
struct ipu3_uapi_params {
diff --git a/Documentation/media/uapi/v4l/pixfmt-packed-yuv.rst b/Documentation/media/uapi/v4l/pixfmt-packed-yuv.rst
index f53e8f57a003..7fcee1c11ac4 100644
--- a/Documentation/media/uapi/v4l/pixfmt-packed-yuv.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-packed-yuv.rst
@@ -190,6 +190,170 @@ component of each pixel in one 16 or 32 bit word.
- Cr\ :sub:`2`
- Cr\ :sub:`1`
- Cr\ :sub:`0`
+ -
+ * .. _V4L2-PIX-FMT-AYUV32:
+
+ - ``V4L2_PIX_FMT_AYUV32``
+ - 'AYUV'
+
+ - a\ :sub:`7`
+ - a\ :sub:`6`
+ - a\ :sub:`5`
+ - a\ :sub:`4`
+ - a\ :sub:`3`
+ - a\ :sub:`2`
+ - a\ :sub:`1`
+ - a\ :sub:`0`
+
+ - Y'\ :sub:`7`
+ - Y'\ :sub:`6`
+ - Y'\ :sub:`5`
+ - Y'\ :sub:`4`
+ - Y'\ :sub:`3`
+ - Y'\ :sub:`2`
+ - Y'\ :sub:`1`
+ - Y'\ :sub:`0`
+
+ - Cb\ :sub:`7`
+ - Cb\ :sub:`6`
+ - Cb\ :sub:`5`
+ - Cb\ :sub:`4`
+ - Cb\ :sub:`3`
+ - Cb\ :sub:`2`
+ - Cb\ :sub:`1`
+ - Cb\ :sub:`0`
+
+ - Cr\ :sub:`7`
+ - Cr\ :sub:`6`
+ - Cr\ :sub:`5`
+ - Cr\ :sub:`4`
+ - Cr\ :sub:`3`
+ - Cr\ :sub:`2`
+ - Cr\ :sub:`1`
+ - Cr\ :sub:`0`
+ -
+ * .. _V4L2-PIX-FMT-XYUV32:
+
+ - ``V4L2_PIX_FMT_XYUV32``
+ - 'XYUV'
+
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+
+ - Y'\ :sub:`7`
+ - Y'\ :sub:`6`
+ - Y'\ :sub:`5`
+ - Y'\ :sub:`4`
+ - Y'\ :sub:`3`
+ - Y'\ :sub:`2`
+ - Y'\ :sub:`1`
+ - Y'\ :sub:`0`
+
+ - Cb\ :sub:`7`
+ - Cb\ :sub:`6`
+ - Cb\ :sub:`5`
+ - Cb\ :sub:`4`
+ - Cb\ :sub:`3`
+ - Cb\ :sub:`2`
+ - Cb\ :sub:`1`
+ - Cb\ :sub:`0`
+
+ - Cr\ :sub:`7`
+ - Cr\ :sub:`6`
+ - Cr\ :sub:`5`
+ - Cr\ :sub:`4`
+ - Cr\ :sub:`3`
+ - Cr\ :sub:`2`
+ - Cr\ :sub:`1`
+ - Cr\ :sub:`0`
+ -
+ * .. _V4L2-PIX-FMT-VUYA32:
+
+ - ``V4L2_PIX_FMT_VUYA32``
+ - 'VUYA'
+
+ - Cr\ :sub:`7`
+ - Cr\ :sub:`6`
+ - Cr\ :sub:`5`
+ - Cr\ :sub:`4`
+ - Cr\ :sub:`3`
+ - Cr\ :sub:`2`
+ - Cr\ :sub:`1`
+ - Cr\ :sub:`0`
+
+ - Cb\ :sub:`7`
+ - Cb\ :sub:`6`
+ - Cb\ :sub:`5`
+ - Cb\ :sub:`4`
+ - Cb\ :sub:`3`
+ - Cb\ :sub:`2`
+ - Cb\ :sub:`1`
+ - Cb\ :sub:`0`
+
+ - Y'\ :sub:`7`
+ - Y'\ :sub:`6`
+ - Y'\ :sub:`5`
+ - Y'\ :sub:`4`
+ - Y'\ :sub:`3`
+ - Y'\ :sub:`2`
+ - Y'\ :sub:`1`
+ - Y'\ :sub:`0`
+
+ - a\ :sub:`7`
+ - a\ :sub:`6`
+ - a\ :sub:`5`
+ - a\ :sub:`4`
+ - a\ :sub:`3`
+ - a\ :sub:`2`
+ - a\ :sub:`1`
+ - a\ :sub:`0`
+ -
+ * .. _V4L2-PIX-FMT-VUYX32:
+
+ - ``V4L2_PIX_FMT_VUYX32``
+ - 'VUYX'
+
+ - Cr\ :sub:`7`
+ - Cr\ :sub:`6`
+ - Cr\ :sub:`5`
+ - Cr\ :sub:`4`
+ - Cr\ :sub:`3`
+ - Cr\ :sub:`2`
+ - Cr\ :sub:`1`
+ - Cr\ :sub:`0`
+
+ - Cb\ :sub:`7`
+ - Cb\ :sub:`6`
+ - Cb\ :sub:`5`
+ - Cb\ :sub:`4`
+ - Cb\ :sub:`3`
+ - Cb\ :sub:`2`
+ - Cb\ :sub:`1`
+ - Cb\ :sub:`0`
+
+ - Y'\ :sub:`7`
+ - Y'\ :sub:`6`
+ - Y'\ :sub:`5`
+ - Y'\ :sub:`4`
+ - Y'\ :sub:`3`
+ - Y'\ :sub:`2`
+ - Y'\ :sub:`1`
+ - Y'\ :sub:`0`
+
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
.. raw:: latex
@@ -202,4 +366,8 @@ component of each pixel in one 16 or 32 bit word.
#) The value of a = alpha bits is undefined when reading from the driver,
ignored when writing to the driver, except when alpha blending has
been negotiated for a :ref:`Video Overlay <overlay>` or
- :ref:`Video Output Overlay <osd>`.
+ :ref:`Video Output Overlay <osd>` for the formats Y444, YUV555 and
+ YUV4. However, for formats AYUV32 and VUYA32, the alpha component is
+ expected to contain a meaningful value that can be used by drivers
+ and applications. And, the formats XYUV32 and VUYX32 contain undefined
+ alpha values that must be ignored by all applications and drivers.
diff --git a/Documentation/media/uapi/v4l/subdev-formats.rst b/Documentation/media/uapi/v4l/subdev-formats.rst
index ff4b2a972fd2..f5440d55d510 100644
--- a/Documentation/media/uapi/v4l/subdev-formats.rst
+++ b/Documentation/media/uapi/v4l/subdev-formats.rst
@@ -75,15 +75,15 @@ Media Bus Pixel Codes
---------------------
The media bus pixel codes describe image formats as flowing over
-physical busses (both between separate physical components and inside
+physical buses (both between separate physical components and inside
SoC devices). This should not be confused with the V4L2 pixel formats
that describe, using four character codes, image formats as stored in
memory.
-While there is a relationship between image formats on busses and image
+While there is a relationship between image formats on buses and image
formats in memory (a raw Bayer image won't be magically converted to
JPEG just by storing it to memory), there is no one-to-one
-correspondance between them.
+correspondence between them.
Packed RGB Formats
diff --git a/Documentation/media/uapi/v4l/vidioc-g-parm.rst b/Documentation/media/uapi/v4l/vidioc-g-parm.rst
index 0d2593176c90..d9d5d97848d3 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-parm.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-parm.rst
@@ -213,7 +213,7 @@ union holding separate parameters for input and output devices.
.. _parm-caps:
-.. flat-table:: Streaming Parameters Capabilites
+.. flat-table:: Streaming Parameters Capabilities
:header-rows: 0
:stub-columns: 0
:widths: 3 1 4
diff --git a/Documentation/media/uapi/v4l/vidioc-prepare-buf.rst b/Documentation/media/uapi/v4l/vidioc-prepare-buf.rst
index 60986710967b..7c6b5f4e1011 100644
--- a/Documentation/media/uapi/v4l/vidioc-prepare-buf.rst
+++ b/Documentation/media/uapi/v4l/vidioc-prepare-buf.rst
@@ -43,10 +43,7 @@ Applications can optionally call the :ref:`VIDIOC_PREPARE_BUF` ioctl to
pass ownership of the buffer to the driver before actually enqueuing it,
using the :ref:`VIDIOC_QBUF <VIDIOC_QBUF>` ioctl, and to prepare it for future I/O. Such
preparations may include cache invalidation or cleaning. Performing them
-in advance saves time during the actual I/O. In case such cache
-operations are not required, the application can use one of
-``V4L2_BUF_FLAG_NO_CACHE_INVALIDATE`` and
-``V4L2_BUF_FLAG_NO_CACHE_CLEAN`` flags to skip the respective step.
+in advance saves time during the actual I/O.
The struct :c:type:`v4l2_buffer` structure is specified in
:ref:`buffer`.
diff --git a/Documentation/media/uapi/v4l/vidioc-qbuf.rst b/Documentation/media/uapi/v4l/vidioc-qbuf.rst
index 3259168a7358..c138d149faea 100644
--- a/Documentation/media/uapi/v4l/vidioc-qbuf.rst
+++ b/Documentation/media/uapi/v4l/vidioc-qbuf.rst
@@ -123,7 +123,7 @@ then ``EINVAL`` will be returned.
:ref:`VIDIOC_STREAMOFF <VIDIOC_STREAMON>` or calling :ref:`VIDIOC_REQBUFS`
the check for this will be reset.
- For :ref:`memory-to-memory devices <codec>` you can specify the
+ For :ref:`memory-to-memory devices <mem2mem>` you can specify the
``request_fd`` only for output buffers, not for capture buffers. Attempting
to specify this for a capture buffer will result in an ``EACCES`` error.
diff --git a/Documentation/media/v4l-drivers/bttv.rst b/Documentation/media/v4l-drivers/bttv.rst
index d72a0f8fd267..f956ee264099 100644
--- a/Documentation/media/v4l-drivers/bttv.rst
+++ b/Documentation/media/v4l-drivers/bttv.rst
@@ -85,7 +85,7 @@ same card listens there is much higher...
For problems with sound: There are a lot of different systems used
for TV sound all over the world. And there are also different chips
which decode the audio signal. Reports about sound problems ("stereo
-does'nt work") are pretty useless unless you include some details
+doesn't work") are pretty useless unless you include some details
about your hardware and the TV sound scheme used in your country (or
at least the country you are living in).
@@ -771,7 +771,7 @@ Identifying:
- Lifeview.com.tw states (Feb. 2002):
"The FlyVideo2000 and FlyVideo2000s product name have renamed to FlyVideo98."
Their Bt8x8 cards are listed as discontinued.
- - Flyvideo 2000S was probably sold as Flyvideo 3000 in some contries(Europe?).
+ - Flyvideo 2000S was probably sold as Flyvideo 3000 in some countries(Europe?).
The new Flyvideo 2000/3000 are SAA7130/SAA7134 based.
"Flyvideo II" had been the name for the 848 cards, nowadays (in Germany)
diff --git a/Documentation/media/v4l-drivers/imx.rst b/Documentation/media/v4l-drivers/imx.rst
index 6922dde4a82b..1d7eb8c7bd5c 100644
--- a/Documentation/media/v4l-drivers/imx.rst
+++ b/Documentation/media/v4l-drivers/imx.rst
@@ -24,12 +24,12 @@ memory. Various dedicated DMA channels exist for both video capture and
display paths. During transfer, the IDMAC is also capable of vertical
image flip, 8x8 block transfer (see IRT description), pixel component
re-ordering (for example UYVY to YUYV) within the same colorspace, and
-even packed <--> planar conversion. It can also perform a simple
-de-interlacing by interleaving even and odd lines during transfer
+packed <--> planar conversion. The IDMAC can also perform a simple
+de-interlacing by interweaving even and odd lines during transfer
(without motion compensation which requires the VDIC).
The CSI is the backend capture unit that interfaces directly with
-camera sensors over Parallel, BT.656/1120, and MIPI CSI-2 busses.
+camera sensors over Parallel, BT.656/1120, and MIPI CSI-2 buses.
The IC handles color-space conversion, resizing (downscaling and
upscaling), horizontal flip, and 90/270 degree rotation operations.
@@ -175,15 +175,21 @@ via the SMFC and an IDMAC channel, bypassing IC pre-processing. This
source pad is routed to a capture device node, with a node name of the
format "ipuX_csiY capture".
-Note that since the IDMAC source pad makes use of an IDMAC channel, it
-can do pixel reordering within the same colorspace. For example, the
-sink pad can take UYVY2X8, but the IDMAC source pad can output YUYV2X8.
-If the sink pad is receiving YUV, the output at the capture device can
-also be converted to a planar YUV format such as YUV420.
-
-It will also perform simple de-interlace without motion compensation,
-which is activated if the sink pad's field type is an interlaced type,
-and the IDMAC source pad field type is set to none.
+Note that since the IDMAC source pad makes use of an IDMAC channel,
+pixel reordering within the same colorspace can be carried out by the
+IDMAC channel. For example, if the CSI sink pad is receiving in UYVY
+order, the capture device linked to the IDMAC source pad can capture
+in YUYV order. Also, if the CSI sink pad is receiving a packed YUV
+format, the capture device can capture a planar YUV format such as
+YUV420.
+
+The IDMAC channel at the IDMAC source pad also supports simple
+interweave without motion compensation, which is activated if the source
+pad's field type is sequential top-bottom or bottom-top, and the
+requested capture interface field type is set to interlaced (t-b, b-t,
+or unqualified interlaced). The capture interface will enforce the same
+field order as the source pad field order (interlaced-bt if source pad
+is seq-bt, interlaced-tb if source pad is seq-tb).
This subdev can generate the following event when enabling the second
IDMAC source pad:
@@ -201,7 +207,7 @@ The CSI supports cropping the incoming raw sensor frames. This is
implemented in the ipuX_csiY entities at the sink pad, using the
crop selection subdev API.
-The CSI also supports fixed divide-by-two downscaling indepently in
+The CSI also supports fixed divide-by-two downscaling independently in
width and height. This is implemented in the ipuX_csiY entities at
the sink pad, using the compose selection subdev API.
@@ -325,14 +331,14 @@ ipuX_vdic
The VDIC carries out motion compensated de-interlacing, with three
motion compensation modes: low, medium, and high motion. The mode is
-specified with the menu control V4L2_CID_DEINTERLACING_MODE. It has
-two sink pads and a single source pad.
+specified with the menu control V4L2_CID_DEINTERLACING_MODE. The VDIC
+has two sink pads and a single source pad.
The direct sink pad receives from an ipuX_csiY direct pad. With this
link the VDIC can only operate in high motion mode.
When the IDMAC sink pad is activated, it receives from an output
-or mem2mem device node. With this pipeline, it can also operate
+or mem2mem device node. With this pipeline, the VDIC can also operate
in low and medium modes, because these modes require receiving
frames from memory buffers. Note that an output or mem2mem device
is not implemented yet, so this sink pad currently has no links.
@@ -345,8 +351,8 @@ ipuX_ic_prp
This is the IC pre-processing entity. It acts as a router, routing
data from its sink pad to one or both of its source pads.
-It has a single sink pad. The sink pad can receive from the ipuX_csiY
-direct pad, or from ipuX_vdic.
+This entity has a single sink pad. The sink pad can receive from the
+ipuX_csiY direct pad, or from ipuX_vdic.
This entity has two source pads. One source pad routes to the
pre-process encode task entity (ipuX_ic_prpenc), the other to the
@@ -369,8 +375,8 @@ color-space conversion, resizing (downscaling and upscaling),
horizontal and vertical flip, and 90/270 degree rotation. Flip
and rotation are provided via standard V4L2 controls.
-Like the ipuX_csiY IDMAC source, it can also perform simple de-interlace
-without motion compensation, and pixel reordering.
+Like the ipuX_csiY IDMAC source, this entity also supports simple
+de-interlace without motion compensation, and pixel reordering.
ipuX_ic_prpvf
-------------
@@ -380,18 +386,18 @@ pad from ipuX_ic_prp, and a single source pad. The source pad is routed
to a capture device node, with a node name of the format
"ipuX_ic_prpvf capture".
-It is identical in operation to ipuX_ic_prpenc, with the same resizing
-and CSC operations and flip/rotation controls. It will receive and
-process de-interlaced frames from the ipuX_vdic if ipuX_ic_prp is
+This entity is identical in operation to ipuX_ic_prpenc, with the same
+resizing and CSC operations and flip/rotation controls. It will receive
+and process de-interlaced frames from the ipuX_vdic if ipuX_ic_prp is
receiving from ipuX_vdic.
-Like the ipuX_csiY IDMAC source, it can perform simple de-interlace
-without motion compensation. However, note that if the ipuX_vdic is
-included in the pipeline (ipuX_ic_prp is receiving from ipuX_vdic),
-it's not possible to use simple de-interlace in ipuX_ic_prpvf, since
-the ipuX_vdic has already carried out de-interlacing (with motion
-compensation) and therefore the field type output from ipuX_ic_prp can
-only be none.
+Like the ipuX_csiY IDMAC source, this entity supports simple
+interweaving without motion compensation. However, note that if the
+ipuX_vdic is included in the pipeline (ipuX_ic_prp is receiving from
+ipuX_vdic), it's not possible to use interweave in ipuX_ic_prpvf,
+since the ipuX_vdic has already carried out de-interlacing (with
+motion compensation) and therefore the field type output from
+ipuX_vdic can only be none (progressive).
Capture Pipelines
-----------------
@@ -516,10 +522,33 @@ On the SabreAuto, an on-board ADV7180 SD decoder is connected to the
parallel bus input on the internal video mux to IPU1 CSI0.
The following example configures a pipeline to capture from the ADV7180
-video decoder, assuming NTSC 720x480 input signals, with Motion
-Compensated de-interlacing. Pad field types assume the adv7180 outputs
-"interlaced". $outputfmt can be any format supported by the ipu1_ic_prpvf
-entity at its output pad:
+video decoder, assuming NTSC 720x480 input signals, using simple
+interweave (unconverted and without motion compensation). The adv7180
+must output sequential or alternating fields (field type 'seq-bt' for
+NTSC, or 'alternate'):
+
+.. code-block:: none
+
+ # Setup links
+ media-ctl -l "'adv7180 3-0021':0 -> 'ipu1_csi0_mux':1[1]"
+ media-ctl -l "'ipu1_csi0_mux':2 -> 'ipu1_csi0':0[1]"
+ media-ctl -l "'ipu1_csi0':2 -> 'ipu1_csi0 capture':0[1]"
+ # Configure pads
+ media-ctl -V "'adv7180 3-0021':0 [fmt:UYVY2X8/720x480 field:seq-bt]"
+ media-ctl -V "'ipu1_csi0_mux':2 [fmt:UYVY2X8/720x480]"
+ media-ctl -V "'ipu1_csi0':2 [fmt:AYUV32/720x480]"
+ # Configure "ipu1_csi0 capture" interface (assumed at /dev/video4)
+ v4l2-ctl -d4 --set-fmt-video=field=interlaced_bt
+
+Streaming can then begin on /dev/video4. The v4l2-ctl tool can also be
+used to select any supported YUV pixelformat on /dev/video4.
+
+This example configures a pipeline to capture from the ADV7180
+video decoder, assuming PAL 720x576 input signals, with Motion
+Compensated de-interlacing. The adv7180 must output sequential or
+alternating fields (field type 'seq-tb' for PAL, or 'alternate').
+$outputfmt can be any format supported by the ipu1_ic_prpvf entity
+at its output pad:
.. code-block:: none
@@ -531,11 +560,11 @@ entity at its output pad:
media-ctl -l "'ipu1_ic_prp':2 -> 'ipu1_ic_prpvf':0[1]"
media-ctl -l "'ipu1_ic_prpvf':1 -> 'ipu1_ic_prpvf capture':0[1]"
# Configure pads
- media-ctl -V "'adv7180 3-0021':0 [fmt:UYVY2X8/720x480]"
- media-ctl -V "'ipu1_csi0_mux':2 [fmt:UYVY2X8/720x480 field:interlaced]"
- media-ctl -V "'ipu1_csi0':1 [fmt:AYUV32/720x480 field:interlaced]"
- media-ctl -V "'ipu1_vdic':2 [fmt:AYUV32/720x480 field:none]"
- media-ctl -V "'ipu1_ic_prp':2 [fmt:AYUV32/720x480 field:none]"
+ media-ctl -V "'adv7180 3-0021':0 [fmt:UYVY2X8/720x576 field:seq-tb]"
+ media-ctl -V "'ipu1_csi0_mux':2 [fmt:UYVY2X8/720x576]"
+ media-ctl -V "'ipu1_csi0':1 [fmt:AYUV32/720x576]"
+ media-ctl -V "'ipu1_vdic':2 [fmt:AYUV32/720x576 field:none]"
+ media-ctl -V "'ipu1_ic_prp':2 [fmt:AYUV32/720x576 field:none]"
media-ctl -V "'ipu1_ic_prpvf':1 [fmt:$outputfmt field:none]"
Streaming can then begin on the capture device node at
diff --git a/Documentation/media/v4l-drivers/imx7.rst b/Documentation/media/v4l-drivers/imx7.rst
new file mode 100644
index 000000000000..fe411f65c01c
--- /dev/null
+++ b/Documentation/media/v4l-drivers/imx7.rst
@@ -0,0 +1,162 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+i.MX7 Video Capture Driver
+==========================
+
+Introduction
+------------
+
+The i.MX7 contrary to the i.MX5/6 family does not contain an Image Processing
+Unit (IPU); because of that the capabilities to perform operations or
+manipulation of the capture frames are less feature rich.
+
+For image capture the i.MX7 has three units:
+- CMOS Sensor Interface (CSI)
+- Video Multiplexer
+- MIPI CSI-2 Receiver
+
+.. code-block:: none
+
+ MIPI Camera Input ---> MIPI CSI-2 --- > |\
+ | \
+ | \
+ | M |
+ | U | ------> CSI ---> Capture
+ | X |
+ | /
+ Parallel Camera Input ----------------> | /
+ |/
+
+For additional information, please refer to the latest versions of the i.MX7
+reference manual [#f1]_.
+
+Entities
+--------
+
+imx7-mipi-csi2
+--------------
+
+This is the MIPI CSI-2 receiver entity. It has one sink pad to receive the pixel
+data from MIPI CSI-2 camera sensor. It has one source pad, corresponding to the
+virtual channel 0. This module is compliant to previous version of Samsung
+D-phy, and supports two D-PHY Rx Data lanes.
+
+csi_mux
+-------
+
+This is the video multiplexer. It has two sink pads to select from either camera
+sensor with a parallel interface or from MIPI CSI-2 virtual channel 0. It has
+a single source pad that routes to the CSI.
+
+csi
+---
+
+The CSI enables the chip to connect directly to external CMOS image sensor. CSI
+can interface directly with Parallel and MIPI CSI-2 buses. It has 256 x 64 FIFO
+to store received image pixel data and embedded DMA controllers to transfer data
+from the FIFO through AHB bus.
+
+This entity has one sink pad that receives from the csi_mux entity and a single
+source pad that routes video frames directly to memory buffers. This pad is
+routed to a capture device node.
+
+Usage Notes
+-----------
+
+To aid in configuration and for backward compatibility with V4L2 applications
+that access controls only from video device nodes, the capture device interfaces
+inherit controls from the active entities in the current pipeline, so controls
+can be accessed either directly from the subdev or from the active capture
+device interface. For example, the sensor controls are available either from the
+sensor subdevs or from the active capture device.
+
+Warp7 with OV2680
+-----------------
+
+On this platform an OV2680 MIPI CSI-2 module is connected to the internal MIPI
+CSI-2 receiver. The following example configures a video capture pipeline with
+an output of 800x600, and BGGR 10 bit bayer format:
+
+.. code-block:: none
+
+ # Setup links
+ media-ctl -l "'ov2680 1-0036':0 -> 'imx7-mipi-csis.0':0[1]"
+ media-ctl -l "'imx7-mipi-csis.0':1 -> 'csi_mux':1[1]"
+ media-ctl -l "'csi_mux':2 -> 'csi':0[1]"
+ media-ctl -l "'csi':1 -> 'csi capture':0[1]"
+
+ # Configure pads for pipeline
+ media-ctl -V "'ov2680 1-0036':0 [fmt:SBGGR10_1X10/800x600 field:none]"
+ media-ctl -V "'csi_mux':1 [fmt:SBGGR10_1X10/800x600 field:none]"
+ media-ctl -V "'csi_mux':2 [fmt:SBGGR10_1X10/800x600 field:none]"
+ media-ctl -V "'imx7-mipi-csis.0':0 [fmt:SBGGR10_1X10/800x600 field:none]"
+ media-ctl -V "'csi':0 [fmt:SBGGR10_1X10/800x600 field:none]"
+
+After this streaming can start. The v4l2-ctl tool can be used to select any of
+the resolutions supported by the sensor.
+
+.. code-block:: none
+
+ root@imx7s-warp:~# media-ctl -p
+ Media controller API version 4.17.0
+
+ Media device information
+ ------------------------
+ driver imx-media
+ model imx-media
+ serial
+ bus info
+ hw revision 0x0
+ driver version 4.17.0
+
+ Device topology
+ - entity 1: csi (2 pads, 2 links)
+ type V4L2 subdev subtype Unknown flags 0
+ device node name /dev/v4l-subdev0
+ pad0: Sink
+ [fmt:SBGGR10_1X10/800x600 field:none]
+ <- "csi_mux":2 [ENABLED]
+ pad1: Source
+ [fmt:SBGGR10_1X10/800x600 field:none]
+ -> "csi capture":0 [ENABLED]
+
+ - entity 4: csi capture (1 pad, 1 link)
+ type Node subtype V4L flags 0
+ device node name /dev/video0
+ pad0: Sink
+ <- "csi":1 [ENABLED]
+
+ - entity 10: csi_mux (3 pads, 2 links)
+ type V4L2 subdev subtype Unknown flags 0
+ device node name /dev/v4l-subdev1
+ pad0: Sink
+ [fmt:unknown/0x0]
+ pad1: Sink
+ [fmt:unknown/800x600 field:none]
+ <- "imx7-mipi-csis.0":1 [ENABLED]
+ pad2: Source
+ [fmt:unknown/800x600 field:none]
+ -> "csi":0 [ENABLED]
+
+ - entity 14: imx7-mipi-csis.0 (2 pads, 2 links)
+ type V4L2 subdev subtype Unknown flags 0
+ device node name /dev/v4l-subdev2
+ pad0: Sink
+ [fmt:SBGGR10_1X10/800x600 field:none]
+ <- "ov2680 1-0036":0 [ENABLED]
+ pad1: Source
+ [fmt:SBGGR10_1X10/800x600 field:none]
+ -> "csi_mux":1 [ENABLED]
+
+ - entity 17: ov2680 1-0036 (1 pad, 1 link)
+ type V4L2 subdev subtype Sensor flags 0
+ device node name /dev/v4l-subdev3
+ pad0: Source
+ [fmt:SBGGR10_1X10/800x600 field:none]
+ -> "imx7-mipi-csis.0":0 [ENABLED]
+
+
+References
+----------
+
+.. [#f1] https://www.nxp.com/docs/en/reference-manual/IMX7SRM.pdf
diff --git a/Documentation/media/v4l-drivers/index.rst b/Documentation/media/v4l-drivers/index.rst
index f28570ec9e42..dfd4b205937c 100644
--- a/Documentation/media/v4l-drivers/index.rst
+++ b/Documentation/media/v4l-drivers/index.rst
@@ -44,6 +44,7 @@ For more details see the file COPYING in the source distribution of Linux.
davinci-vpbe
fimc
imx
+ imx7
ipu3
ivtv
max2175
diff --git a/Documentation/media/v4l-drivers/ipu3.rst b/Documentation/media/v4l-drivers/ipu3.rst
index f89b51dafadd..c9f780404eee 100644
--- a/Documentation/media/v4l-drivers/ipu3.rst
+++ b/Documentation/media/v4l-drivers/ipu3.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
.. include:: <isonum.txt>
===============================================================
@@ -355,10 +357,157 @@ https://chromium.googlesource.com/chromiumos/platform/arc-camera/+/master/
The source can be located under hal/intel directory.
+Overview of IPU3 pipeline
+=========================
+
+IPU3 pipeline has a number of image processing stages, each of which takes a
+set of parameters as input. The major stages of pipelines are shown here:
+
+.. kernel-render:: DOT
+ :alt: IPU3 ImgU Pipeline
+ :caption: IPU3 ImgU Pipeline Diagram
+
+ digraph "IPU3 ImgU" {
+ node [shape=box]
+ splines="ortho"
+ rankdir="LR"
+
+ a [label="Raw pixels"]
+ b [label="Bayer Downscaling"]
+ c [label="Optical Black Correction"]
+ d [label="Linearization"]
+ e [label="Lens Shading Correction"]
+ f [label="White Balance / Exposure / Focus Apply"]
+ g [label="Bayer Noise Reduction"]
+ h [label="ANR"]
+ i [label="Demosaicing"]
+ j [label="Color Correction Matrix"]
+ k [label="Gamma correction"]
+ l [label="Color Space Conversion"]
+ m [label="Chroma Down Scaling"]
+ n [label="Chromatic Noise Reduction"]
+ o [label="Total Color Correction"]
+ p [label="XNR3"]
+ q [label="TNR"]
+ r [label="DDR"]
+
+ { rank=same; a -> b -> c -> d -> e -> f }
+ { rank=same; g -> h -> i -> j -> k -> l }
+ { rank=same; m -> n -> o -> p -> q -> r }
+
+ a -> g -> m [style=invis, weight=10]
+
+ f -> g
+ l -> m
+ }
+
+The table below presents a description of the above algorithms.
+
+======================== =======================================================
+Name Description
+======================== =======================================================
+Optical Black Correction Optical Black Correction block subtracts a pre-defined
+ value from the respective pixel values to obtain better
+ image quality.
+ Defined in :c:type:`ipu3_uapi_obgrid_param`.
+Linearization This algo block uses linearization parameters to
+ address non-linearity sensor effects. The Lookup table
+ table is defined in
+ :c:type:`ipu3_uapi_isp_lin_vmem_params`.
+SHD Lens shading correction is used to correct spatial
+ non-uniformity of the pixel response due to optical
+ lens shading. This is done by applying a different gain
+ for each pixel. The gain, black level etc are
+ configured in :c:type:`ipu3_uapi_shd_config_static`.
+BNR Bayer noise reduction block removes image noise by
+ applying a bilateral filter.
+ See :c:type:`ipu3_uapi_bnr_static_config` for details.
+ANR Advanced Noise Reduction is a block based algorithm
+ that performs noise reduction in the Bayer domain. The
+ convolution matrix etc can be found in
+ :c:type:`ipu3_uapi_anr_config`.
+DM Demosaicing converts raw sensor data in Bayer format
+ into RGB (Red, Green, Blue) presentation. Then add
+ outputs of estimation of Y channel for following stream
+ processing by Firmware. The struct is defined as
+ :c:type:`ipu3_uapi_dm_config`.
+Color Correction Color Correction algo transforms sensor specific color
+ space to the standard "sRGB" color space. This is done
+ by applying 3x3 matrix defined in
+ :c:type:`ipu3_uapi_ccm_mat_config`.
+Gamma correction Gamma correction :c:type:`ipu3_uapi_gamma_config` is a
+ basic non-linear tone mapping correction that is
+ applied per pixel for each pixel component.
+CSC Color space conversion transforms each pixel from the
+ RGB primary presentation to YUV (Y: brightness,
+ UV: Luminance) presentation. This is done by applying
+ a 3x3 matrix defined in
+ :c:type:`ipu3_uapi_csc_mat_config`
+CDS Chroma down sampling
+ After the CSC is performed, the Chroma Down Sampling
+ is applied for a UV plane down sampling by a factor
+ of 2 in each direction for YUV 4:2:0 using a 4x2
+ configurable filter :c:type:`ipu3_uapi_cds_params`.
+CHNR Chroma noise reduction
+ This block processes only the chrominance pixels and
+ performs noise reduction by cleaning the high
+ frequency noise.
+ See struct :c:type:`ipu3_uapi_yuvp1_chnr_config`.
+TCC Total color correction as defined in struct
+ :c:type:`ipu3_uapi_yuvp2_tcc_static_config`.
+XNR3 eXtreme Noise Reduction V3 is the third revision of
+ noise reduction algorithm used to improve image
+ quality. This removes the low frequency noise in the
+ captured image. Two related structs are being defined,
+ :c:type:`ipu3_uapi_isp_xnr3_params` for ISP data memory
+ and :c:type:`ipu3_uapi_isp_xnr3_vmem_params` for vector
+ memory.
+TNR Temporal Noise Reduction block compares successive
+ frames in time to remove anomalies / noise in pixel
+ values. :c:type:`ipu3_uapi_isp_tnr3_vmem_params` and
+ :c:type:`ipu3_uapi_isp_tnr3_params` are defined for ISP
+ vector and data memory respectively.
+======================== =======================================================
+
+Other often encountered acronyms not listed in above table:
+
+ ACC
+ Accelerator cluster
+ AWB_FR
+ Auto white balance filter response statistics
+ BDS
+ Bayer downscaler parameters
+ CCM
+ Color correction matrix coefficients
+ IEFd
+ Image enhancement filter directed
+ Obgrid
+ Optical black level compensation
+ OSYS
+ Output system configuration
+ ROI
+ Region of interest
+ YDS
+ Y down sampling
+ YTM
+ Y-tone mapping
+
+A few stages of the pipeline will be executed by firmware running on the ISP
+processor, while many others will use a set of fixed hardware blocks also
+called accelerator cluster (ACC) to crunch pixel data and produce statistics.
+
+ACC parameters of individual algorithms, as defined by
+:c:type:`ipu3_uapi_acc_param`, can be chosen to be applied by the user
+space through struct :c:type:`ipu3_uapi_flags` embedded in
+:c:type:`ipu3_uapi_params` structure. For parameters that are configured as
+not enabled by the user space, the corresponding structs are ignored by the
+driver, in which case the existing configuration of the algorithm will be
+preserved.
+
References
==========
-.. [#f5] include/uapi/linux/intel-ipu3.h
+.. [#f5] drivers/staging/media/ipu3/include/intel-ipu3.h
.. [#f1] https://github.com/intel/nvt
diff --git a/Documentation/media/v4l-drivers/pxa_camera.rst b/Documentation/media/v4l-drivers/pxa_camera.rst
index e4fbca755e1a..ee1bd96b66dd 100644
--- a/Documentation/media/v4l-drivers/pxa_camera.rst
+++ b/Documentation/media/v4l-drivers/pxa_camera.rst
@@ -18,7 +18,7 @@ Global video workflow
---------------------
a) QCI stopped
- Initialy, the QCI interface is stopped.
+ Initially, the QCI interface is stopped.
When a buffer is queued (pxa_videobuf_ops->buf_queue), the QCI starts.
b) QCI started
diff --git a/Documentation/media/v4l-drivers/qcom_camss.rst b/Documentation/media/v4l-drivers/qcom_camss.rst
index 6b15385b12b3..a72e17d09cb7 100644
--- a/Documentation/media/v4l-drivers/qcom_camss.rst
+++ b/Documentation/media/v4l-drivers/qcom_camss.rst
@@ -123,7 +123,7 @@ The considerations to split the driver in this particular way are as follows:
- representing CSIPHY and CSID modules by a separate sub-device for each module
allows to model the hardware links between these modules;
- representing VFE by a separate sub-devices for each input interface allows
- to use the input interfaces concurently and independently as this is
+ to use the input interfaces concurrently and independently as this is
supported by the hardware;
- representing ISPIF by a number of sub-devices equal to the number of CSID
sub-devices allows to create linear media controller pipelines when using two
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index 1c22b21ae922..f70ebcdfe592 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -1937,21 +1937,6 @@ There are some more advanced barrier functions:
information on consistent memory.
-MMIO WRITE BARRIER
-------------------
-
-The Linux kernel also has a special barrier for use with memory-mapped I/O
-writes:
-
- mmiowb();
-
-This is a variation on the mandatory write barrier that causes writes to weakly
-ordered I/O regions to be partially ordered. Its effects may go beyond the
-CPU->Hardware interface and actually affect the hardware at some level.
-
-See the subsection "Acquires vs I/O accesses" for more information.
-
-
===============================
IMPLICIT KERNEL MEMORY BARRIERS
===============================
@@ -2317,75 +2302,6 @@ But it won't see any of:
*E, *F or *G following RELEASE Q
-
-ACQUIRES VS I/O ACCESSES
-------------------------
-
-Under certain circumstances (especially involving NUMA), I/O accesses within
-two spinlocked sections on two different CPUs may be seen as interleaved by the
-PCI bridge, because the PCI bridge does not necessarily participate in the
-cache-coherence protocol, and is therefore incapable of issuing the required
-read memory barriers.
-
-For example:
-
- CPU 1 CPU 2
- =============================== ===============================
- spin_lock(Q)
- writel(0, ADDR)
- writel(1, DATA);
- spin_unlock(Q);
- spin_lock(Q);
- writel(4, ADDR);
- writel(5, DATA);
- spin_unlock(Q);
-
-may be seen by the PCI bridge as follows:
-
- STORE *ADDR = 0, STORE *ADDR = 4, STORE *DATA = 1, STORE *DATA = 5
-
-which would probably cause the hardware to malfunction.
-
-
-What is necessary here is to intervene with an mmiowb() before dropping the
-spinlock, for example:
-
- CPU 1 CPU 2
- =============================== ===============================
- spin_lock(Q)
- writel(0, ADDR)
- writel(1, DATA);
- mmiowb();
- spin_unlock(Q);
- spin_lock(Q);
- writel(4, ADDR);
- writel(5, DATA);
- mmiowb();
- spin_unlock(Q);
-
-this will ensure that the two stores issued on CPU 1 appear at the PCI bridge
-before either of the stores issued on CPU 2.
-
-
-Furthermore, following a store by a load from the same device obviates the need
-for the mmiowb(), because the load forces the store to complete before the load
-is performed:
-
- CPU 1 CPU 2
- =============================== ===============================
- spin_lock(Q)
- writel(0, ADDR)
- a = readl(DATA);
- spin_unlock(Q);
- spin_lock(Q);
- writel(4, ADDR);
- b = readl(DATA);
- spin_unlock(Q);
-
-
-See Documentation/driver-api/device-io.rst for more information.
-
-
=================================
WHERE ARE MEMORY BARRIERS NEEDED?
=================================
@@ -2532,16 +2448,9 @@ the device to malfunction.
Inside of the Linux kernel, I/O should be done through the appropriate accessor
routines - such as inb() or writel() - which know how to make such accesses
appropriately sequential. While this, for the most part, renders the explicit
-use of memory barriers unnecessary, there are a couple of situations where they
-might be needed:
-
- (1) On some systems, I/O stores are not strongly ordered across all CPUs, and
- so for _all_ general drivers locks should be used and mmiowb() must be
- issued prior to unlocking the critical section.
-
- (2) If the accessor functions are used to refer to an I/O memory window with
- relaxed memory access properties, then _mandatory_ memory barriers are
- required to enforce ordering.
+use of memory barriers unnecessary, if the accessor functions are used to refer
+to an I/O memory window with relaxed memory access properties, then _mandatory_
+memory barriers are required to enforce ordering.
See Documentation/driver-api/device-io.rst for more information.
@@ -2586,8 +2495,7 @@ explicit barriers are used.
Normally this won't be a problem because the I/O accesses done inside such
sections will include synchronous load operations on strictly ordered I/O
-registers that form implicit I/O barriers. If this isn't sufficient then an
-mmiowb() may need to be used explicitly.
+registers that form implicit I/O barriers.
A similar situation may occur between an interrupt routine and two routines
@@ -2599,71 +2507,114 @@ likely, then interrupt-disabling locks should be used to guarantee ordering.
KERNEL I/O BARRIER EFFECTS
==========================
-When accessing I/O memory, drivers should use the appropriate accessor
-functions:
-
- (*) inX(), outX():
-
- These are intended to talk to I/O space rather than memory space, but
- that's primarily a CPU-specific concept. The i386 and x86_64 processors
- do indeed have special I/O space access cycles and instructions, but many
- CPUs don't have such a concept.
-
- The PCI bus, amongst others, defines an I/O space concept which - on such
- CPUs as i386 and x86_64 - readily maps to the CPU's concept of I/O
- space. However, it may also be mapped as a virtual I/O space in the CPU's
- memory map, particularly on those CPUs that don't support alternate I/O
- spaces.
-
- Accesses to this space may be fully synchronous (as on i386), but
- intermediary bridges (such as the PCI host bridge) may not fully honour
- that.
-
- They are guaranteed to be fully ordered with respect to each other.
-
- They are not guaranteed to be fully ordered with respect to other types of
- memory and I/O operation.
+Interfacing with peripherals via I/O accesses is deeply architecture and device
+specific. Therefore, drivers which are inherently non-portable may rely on
+specific behaviours of their target systems in order to achieve synchronization
+in the most lightweight manner possible. For drivers intending to be portable
+between multiple architectures and bus implementations, the kernel offers a
+series of accessor functions that provide various degrees of ordering
+guarantees:
(*) readX(), writeX():
- Whether these are guaranteed to be fully ordered and uncombined with
- respect to each other on the issuing CPU depends on the characteristics
- defined for the memory window through which they're accessing. On later
- i386 architecture machines, for example, this is controlled by way of the
- MTRR registers.
+ The readX() and writeX() MMIO accessors take a pointer to the
+ peripheral being accessed as an __iomem * parameter. For pointers
+ mapped with the default I/O attributes (e.g. those returned by
+ ioremap()), the ordering guarantees are as follows:
+
+ 1. All readX() and writeX() accesses to the same peripheral are ordered
+ with respect to each other. This ensures that MMIO register accesses
+ by the same CPU thread to a particular device will arrive in program
+ order.
+
+ 2. A writeX() issued by a CPU thread holding a spinlock is ordered
+ before a writeX() to the same peripheral from another CPU thread
+ issued after a later acquisition of the same spinlock. This ensures
+ that MMIO register writes to a particular device issued while holding
+ a spinlock will arrive in an order consistent with acquisitions of
+ the lock.
+
+ 3. A writeX() by a CPU thread to the peripheral will first wait for the
+ completion of all prior writes to memory either issued by, or
+ propagated to, the same thread. This ensures that writes by the CPU
+ to an outbound DMA buffer allocated by dma_alloc_coherent() will be
+ visible to a DMA engine when the CPU writes to its MMIO control
+ register to trigger the transfer.
+
+ 4. A readX() by a CPU thread from the peripheral will complete before
+ any subsequent reads from memory by the same thread can begin. This
+ ensures that reads by the CPU from an incoming DMA buffer allocated
+ by dma_alloc_coherent() will not see stale data after reading from
+ the DMA engine's MMIO status register to establish that the DMA
+ transfer has completed.
+
+ 5. A readX() by a CPU thread from the peripheral will complete before
+ any subsequent delay() loop can begin execution on the same thread.
+ This ensures that two MMIO register writes by the CPU to a peripheral
+ will arrive at least 1us apart if the first write is immediately read
+ back with readX() and udelay(1) is called prior to the second
+ writeX():
+
+ writel(42, DEVICE_REGISTER_0); // Arrives at the device...
+ readl(DEVICE_REGISTER_0);
+ udelay(1);
+ writel(42, DEVICE_REGISTER_1); // ...at least 1us before this.
+
+ The ordering properties of __iomem pointers obtained with non-default
+ attributes (e.g. those returned by ioremap_wc()) are specific to the
+ underlying architecture and therefore the guarantees listed above cannot
+ generally be relied upon for accesses to these types of mappings.
+
+ (*) readX_relaxed(), writeX_relaxed():
+
+ These are similar to readX() and writeX(), but provide weaker memory
+ ordering guarantees. Specifically, they do not guarantee ordering with
+ respect to locking, normal memory accesses or delay() loops (i.e.
+ bullets 2-5 above) but they are still guaranteed to be ordered with
+ respect to other accesses from the same CPU thread to the same
+ peripheral when operating on __iomem pointers mapped with the default
+ I/O attributes.
+
+ (*) readsX(), writesX():
+
+ The readsX() and writesX() MMIO accessors are designed for accessing
+ register-based, memory-mapped FIFOs residing on peripherals that are not
+ capable of performing DMA. Consequently, they provide only the ordering
+ guarantees of readX_relaxed() and writeX_relaxed(), as documented above.
- Ordinarily, these will be guaranteed to be fully ordered and uncombined,
- provided they're not accessing a prefetchable device.
+ (*) inX(), outX():
- However, intermediary hardware (such as a PCI bridge) may indulge in
- deferral if it so wishes; to flush a store, a load from the same location
- is preferred[*], but a load from the same device or from configuration
- space should suffice for PCI.
+ The inX() and outX() accessors are intended to access legacy port-mapped
+ I/O peripherals, which may require special instructions on some
+ architectures (notably x86). The port number of the peripheral being
+ accessed is passed as an argument.
- [*] NOTE! attempting to load from the same location as was written to may
- cause a malfunction - consider the 16550 Rx/Tx serial registers for
- example.
+ Since many CPU architectures ultimately access these peripherals via an
+ internal virtual memory mapping, the portable ordering guarantees
+ provided by inX() and outX() are the same as those provided by readX()
+ and writeX() respectively when accessing a mapping with the default I/O
+ attributes.
- Used with prefetchable I/O memory, an mmiowb() barrier may be required to
- force stores to be ordered.
+ Device drivers may expect outX() to emit a non-posted write transaction
+ that waits for a completion response from the I/O peripheral before
+ returning. This is not guaranteed by all architectures and is therefore
+ not part of the portable ordering semantics.
- Please refer to the PCI specification for more information on interactions
- between PCI transactions.
+ (*) insX(), outsX():
- (*) readX_relaxed(), writeX_relaxed()
+ As above, the insX() and outsX() accessors provide the same ordering
+ guarantees as readsX() and writesX() respectively when accessing a
+ mapping with the default I/O attributes.
- These are similar to readX() and writeX(), but provide weaker memory
- ordering guarantees. Specifically, they do not guarantee ordering with
- respect to normal memory accesses (e.g. DMA buffers) nor do they guarantee
- ordering with respect to LOCK or UNLOCK operations. If the latter is
- required, an mmiowb() barrier can be used. Note that relaxed accesses to
- the same peripheral are guaranteed to be ordered with respect to each
- other.
+ (*) ioreadX(), iowriteX():
- (*) ioreadX(), iowriteX()
+ These will perform appropriately for the type of access they're actually
+ doing, be it inX()/outX() or readX()/writeX().
- These will perform appropriately for the type of access they're actually
- doing, be it inX()/outX() or readX()/writeX().
+With the exception of the string accessors (insX(), outsX(), readsX() and
+writesX()), all of the above assume that the underlying peripheral is
+little-endian and will therefore perform byte-swapping operations on big-endian
+architectures.
========================================
diff --git a/Documentation/misc-devices/ibmvmc.rst b/Documentation/misc-devices/ibmvmc.rst
index 46ded79554d4..b46df4ea2b81 100644
--- a/Documentation/misc-devices/ibmvmc.rst
+++ b/Documentation/misc-devices/ibmvmc.rst
@@ -1,4 +1,5 @@
.. SPDX-License-Identifier: GPL-2.0+
+
======================================================
IBM Virtual Management Channel Kernel Driver (IBMVMC)
======================================================
diff --git a/Documentation/misc-devices/index.rst b/Documentation/misc-devices/index.rst
new file mode 100644
index 000000000000..dfd1f45a3127
--- /dev/null
+++ b/Documentation/misc-devices/index.rst
@@ -0,0 +1,17 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+============================================
+Assorted Miscellaneous Devices Documentation
+============================================
+
+This documentation contains information for assorted devices that do not
+fit into other categories.
+
+.. class:: toc-title
+
+ Table of contents
+
+.. toctree::
+ :maxdepth: 2
+
+ ibmvmc
diff --git a/Documentation/networking/bpf_flow_dissector.rst b/Documentation/networking/bpf_flow_dissector.rst
new file mode 100644
index 000000000000..b375ae2ec2c4
--- /dev/null
+++ b/Documentation/networking/bpf_flow_dissector.rst
@@ -0,0 +1,126 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==================
+BPF Flow Dissector
+==================
+
+Overview
+========
+
+Flow dissector is a routine that parses metadata out of the packets. It's
+used in the various places in the networking subsystem (RFS, flow hash, etc).
+
+BPF flow dissector is an attempt to reimplement C-based flow dissector logic
+in BPF to gain all the benefits of BPF verifier (namely, limits on the
+number of instructions and tail calls).
+
+API
+===
+
+BPF flow dissector programs operate on an ``__sk_buff``. However, only the
+limited set of fields is allowed: ``data``, ``data_end`` and ``flow_keys``.
+``flow_keys`` is ``struct bpf_flow_keys`` and contains flow dissector input
+and output arguments.
+
+The inputs are:
+ * ``nhoff`` - initial offset of the networking header
+ * ``thoff`` - initial offset of the transport header, initialized to nhoff
+ * ``n_proto`` - L3 protocol type, parsed out of L2 header
+
+Flow dissector BPF program should fill out the rest of the ``struct
+bpf_flow_keys`` fields. Input arguments ``nhoff/thoff/n_proto`` should be
+also adjusted accordingly.
+
+The return code of the BPF program is either BPF_OK to indicate successful
+dissection, or BPF_DROP to indicate parsing error.
+
+__sk_buff->data
+===============
+
+In the VLAN-less case, this is what the initial state of the BPF flow
+dissector looks like::
+
+ +------+------+------------+-----------+
+ | DMAC | SMAC | ETHER_TYPE | L3_HEADER |
+ +------+------+------------+-----------+
+ ^
+ |
+ +-- flow dissector starts here
+
+
+.. code:: c
+
+ skb->data + flow_keys->nhoff point to the first byte of L3_HEADER
+ flow_keys->thoff = nhoff
+ flow_keys->n_proto = ETHER_TYPE
+
+In case of VLAN, flow dissector can be called with the two different states.
+
+Pre-VLAN parsing::
+
+ +------+------+------+-----+-----------+-----------+
+ | DMAC | SMAC | TPID | TCI |ETHER_TYPE | L3_HEADER |
+ +------+------+------+-----+-----------+-----------+
+ ^
+ |
+ +-- flow dissector starts here
+
+.. code:: c
+
+ skb->data + flow_keys->nhoff point the to first byte of TCI
+ flow_keys->thoff = nhoff
+ flow_keys->n_proto = TPID
+
+Please note that TPID can be 802.1AD and, hence, BPF program would
+have to parse VLAN information twice for double tagged packets.
+
+
+Post-VLAN parsing::
+
+ +------+------+------+-----+-----------+-----------+
+ | DMAC | SMAC | TPID | TCI |ETHER_TYPE | L3_HEADER |
+ +------+------+------+-----+-----------+-----------+
+ ^
+ |
+ +-- flow dissector starts here
+
+.. code:: c
+
+ skb->data + flow_keys->nhoff point the to first byte of L3_HEADER
+ flow_keys->thoff = nhoff
+ flow_keys->n_proto = ETHER_TYPE
+
+In this case VLAN information has been processed before the flow dissector
+and BPF flow dissector is not required to handle it.
+
+
+The takeaway here is as follows: BPF flow dissector program can be called with
+the optional VLAN header and should gracefully handle both cases: when single
+or double VLAN is present and when it is not present. The same program
+can be called for both cases and would have to be written carefully to
+handle both cases.
+
+
+Reference Implementation
+========================
+
+See ``tools/testing/selftests/bpf/progs/bpf_flow.c`` for the reference
+implementation and ``tools/testing/selftests/bpf/flow_dissector_load.[hc]``
+for the loader. bpftool can be used to load BPF flow dissector program as well.
+
+The reference implementation is organized as follows:
+ * ``jmp_table`` map that contains sub-programs for each supported L3 protocol
+ * ``_dissect`` routine - entry point; it does input ``n_proto`` parsing and
+ does ``bpf_tail_call`` to the appropriate L3 handler
+
+Since BPF at this point doesn't support looping (or any jumping back),
+jmp_table is used instead to handle multiple levels of encapsulation (and
+IPv6 options).
+
+
+Current Limitations
+===================
+BPF flow dissector doesn't support exporting all the metadata that in-kernel
+C-based implementation can export. Notable example is single VLAN (802.1Q)
+and double VLAN (802.1AD) tags. Please refer to the ``struct bpf_flow_keys``
+for a set of information that's currently can be exported from the BPF context.
diff --git a/Documentation/networking/checksum-offloads.rst b/Documentation/networking/checksum-offloads.rst
new file mode 100644
index 000000000000..905c8a84b103
--- /dev/null
+++ b/Documentation/networking/checksum-offloads.rst
@@ -0,0 +1,143 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=================
+Checksum Offloads
+=================
+
+
+Introduction
+============
+
+This document describes a set of techniques in the Linux networking stack to
+take advantage of checksum offload capabilities of various NICs.
+
+The following technologies are described:
+
+* TX Checksum Offload
+* LCO: Local Checksum Offload
+* RCO: Remote Checksum Offload
+
+Things that should be documented here but aren't yet:
+
+* RX Checksum Offload
+* CHECKSUM_UNNECESSARY conversion
+
+
+TX Checksum Offload
+===================
+
+The interface for offloading a transmit checksum to a device is explained in
+detail in comments near the top of include/linux/skbuff.h.
+
+In brief, it allows to request the device fill in a single ones-complement
+checksum defined by the sk_buff fields skb->csum_start and skb->csum_offset.
+The device should compute the 16-bit ones-complement checksum (i.e. the
+'IP-style' checksum) from csum_start to the end of the packet, and fill in the
+result at (csum_start + csum_offset).
+
+Because csum_offset cannot be negative, this ensures that the previous value of
+the checksum field is included in the checksum computation, thus it can be used
+to supply any needed corrections to the checksum (such as the sum of the
+pseudo-header for UDP or TCP).
+
+This interface only allows a single checksum to be offloaded. Where
+encapsulation is used, the packet may have multiple checksum fields in
+different header layers, and the rest will have to be handled by another
+mechanism such as LCO or RCO.
+
+CRC32c can also be offloaded using this interface, by means of filling
+skb->csum_start and skb->csum_offset as described above, and setting
+skb->csum_not_inet: see skbuff.h comment (section 'D') for more details.
+
+No offloading of the IP header checksum is performed; it is always done in
+software. This is OK because when we build the IP header, we obviously have it
+in cache, so summing it isn't expensive. It's also rather short.
+
+The requirements for GSO are more complicated, because when segmenting an
+encapsulated packet both the inner and outer checksums may need to be edited or
+recomputed for each resulting segment. See the skbuff.h comment (section 'E')
+for more details.
+
+A driver declares its offload capabilities in netdev->hw_features; see
+Documentation/networking/netdev-features.txt for more. Note that a device
+which only advertises NETIF_F_IP[V6]_CSUM must still obey the csum_start and
+csum_offset given in the SKB; if it tries to deduce these itself in hardware
+(as some NICs do) the driver should check that the values in the SKB match
+those which the hardware will deduce, and if not, fall back to checksumming in
+software instead (with skb_csum_hwoffload_help() or one of the
+skb_checksum_help() / skb_crc32c_csum_help functions, as mentioned in
+include/linux/skbuff.h).
+
+The stack should, for the most part, assume that checksum offload is supported
+by the underlying device. The only place that should check is
+validate_xmit_skb(), and the functions it calls directly or indirectly. That
+function compares the offload features requested by the SKB (which may include
+other offloads besides TX Checksum Offload) and, if they are not supported or
+enabled on the device (determined by netdev->features), performs the
+corresponding offload in software. In the case of TX Checksum Offload, that
+means calling skb_csum_hwoffload_help(skb, features).
+
+
+LCO: Local Checksum Offload
+===========================
+
+LCO is a technique for efficiently computing the outer checksum of an
+encapsulated datagram when the inner checksum is due to be offloaded.
+
+The ones-complement sum of a correctly checksummed TCP or UDP packet is equal
+to the complement of the sum of the pseudo header, because everything else gets
+'cancelled out' by the checksum field. This is because the sum was
+complemented before being written to the checksum field.
+
+More generally, this holds in any case where the 'IP-style' ones complement
+checksum is used, and thus any checksum that TX Checksum Offload supports.
+
+That is, if we have set up TX Checksum Offload with a start/offset pair, we
+know that after the device has filled in that checksum, the ones complement sum
+from csum_start to the end of the packet will be equal to the complement of
+whatever value we put in the checksum field beforehand. This allows us to
+compute the outer checksum without looking at the payload: we simply stop
+summing when we get to csum_start, then add the complement of the 16-bit word
+at (csum_start + csum_offset).
+
+Then, when the true inner checksum is filled in (either by hardware or by
+skb_checksum_help()), the outer checksum will become correct by virtue of the
+arithmetic.
+
+LCO is performed by the stack when constructing an outer UDP header for an
+encapsulation such as VXLAN or GENEVE, in udp_set_csum(). Similarly for the
+IPv6 equivalents, in udp6_set_csum().
+
+It is also performed when constructing an IPv4 GRE header, in
+net/ipv4/ip_gre.c:build_header(). It is *not* currently performed when
+constructing an IPv6 GRE header; the GRE checksum is computed over the whole
+packet in net/ipv6/ip6_gre.c:ip6gre_xmit2(), but it should be possible to use
+LCO here as IPv6 GRE still uses an IP-style checksum.
+
+All of the LCO implementations use a helper function lco_csum(), in
+include/linux/skbuff.h.
+
+LCO can safely be used for nested encapsulations; in this case, the outer
+encapsulation layer will sum over both its own header and the 'middle' header.
+This does mean that the 'middle' header will get summed multiple times, but
+there doesn't seem to be a way to avoid that without incurring bigger costs
+(e.g. in SKB bloat).
+
+
+RCO: Remote Checksum Offload
+============================
+
+RCO is a technique for eliding the inner checksum of an encapsulated datagram,
+allowing the outer checksum to be offloaded. It does, however, involve a
+change to the encapsulation protocols, which the receiver must also support.
+For this reason, it is disabled by default.
+
+RCO is detailed in the following Internet-Drafts:
+
+* https://tools.ietf.org/html/draft-herbert-remotecsumoffload-00
+* https://tools.ietf.org/html/draft-herbert-vxlan-rco-00
+
+In Linux, RCO is implemented individually in each encapsulation protocol, and
+most tunnel types have flags controlling its use. For instance, VXLAN has the
+flag VXLAN_F_REMCSUM_TX (per struct vxlan_rdst) to indicate that RCO should be
+used when transmitting to a given remote destination.
diff --git a/Documentation/networking/checksum-offloads.txt b/Documentation/networking/checksum-offloads.txt
deleted file mode 100644
index 27bc09cfcf6d..000000000000
--- a/Documentation/networking/checksum-offloads.txt
+++ /dev/null
@@ -1,122 +0,0 @@
-Checksum Offloads in the Linux Networking Stack
-
-
-Introduction
-============
-
-This document describes a set of techniques in the Linux networking stack
- to take advantage of checksum offload capabilities of various NICs.
-
-The following technologies are described:
- * TX Checksum Offload
- * LCO: Local Checksum Offload
- * RCO: Remote Checksum Offload
-
-Things that should be documented here but aren't yet:
- * RX Checksum Offload
- * CHECKSUM_UNNECESSARY conversion
-
-
-TX Checksum Offload
-===================
-
-The interface for offloading a transmit checksum to a device is explained
- in detail in comments near the top of include/linux/skbuff.h.
-In brief, it allows to request the device fill in a single ones-complement
- checksum defined by the sk_buff fields skb->csum_start and
- skb->csum_offset. The device should compute the 16-bit ones-complement
- checksum (i.e. the 'IP-style' checksum) from csum_start to the end of the
- packet, and fill in the result at (csum_start + csum_offset).
-Because csum_offset cannot be negative, this ensures that the previous
- value of the checksum field is included in the checksum computation, thus
- it can be used to supply any needed corrections to the checksum (such as
- the sum of the pseudo-header for UDP or TCP).
-This interface only allows a single checksum to be offloaded. Where
- encapsulation is used, the packet may have multiple checksum fields in
- different header layers, and the rest will have to be handled by another
- mechanism such as LCO or RCO.
-CRC32c can also be offloaded using this interface, by means of filling
- skb->csum_start and skb->csum_offset as described above, and setting
- skb->csum_not_inet: see skbuff.h comment (section 'D') for more details.
-No offloading of the IP header checksum is performed; it is always done in
- software. This is OK because when we build the IP header, we obviously
- have it in cache, so summing it isn't expensive. It's also rather short.
-The requirements for GSO are more complicated, because when segmenting an
- encapsulated packet both the inner and outer checksums may need to be
- edited or recomputed for each resulting segment. See the skbuff.h comment
- (section 'E') for more details.
-
-A driver declares its offload capabilities in netdev->hw_features; see
- Documentation/networking/netdev-features.txt for more. Note that a device
- which only advertises NETIF_F_IP[V6]_CSUM must still obey the csum_start
- and csum_offset given in the SKB; if it tries to deduce these itself in
- hardware (as some NICs do) the driver should check that the values in the
- SKB match those which the hardware will deduce, and if not, fall back to
- checksumming in software instead (with skb_csum_hwoffload_help() or one of
- the skb_checksum_help() / skb_crc32c_csum_help functions, as mentioned in
- include/linux/skbuff.h).
-
-The stack should, for the most part, assume that checksum offload is
- supported by the underlying device. The only place that should check is
- validate_xmit_skb(), and the functions it calls directly or indirectly.
- That function compares the offload features requested by the SKB (which
- may include other offloads besides TX Checksum Offload) and, if they are
- not supported or enabled on the device (determined by netdev->features),
- performs the corresponding offload in software. In the case of TX
- Checksum Offload, that means calling skb_csum_hwoffload_help(skb, features).
-
-
-LCO: Local Checksum Offload
-===========================
-
-LCO is a technique for efficiently computing the outer checksum of an
- encapsulated datagram when the inner checksum is due to be offloaded.
-The ones-complement sum of a correctly checksummed TCP or UDP packet is
- equal to the complement of the sum of the pseudo header, because everything
- else gets 'cancelled out' by the checksum field. This is because the sum was
- complemented before being written to the checksum field.
-More generally, this holds in any case where the 'IP-style' ones complement
- checksum is used, and thus any checksum that TX Checksum Offload supports.
-That is, if we have set up TX Checksum Offload with a start/offset pair, we
- know that after the device has filled in that checksum, the ones
- complement sum from csum_start to the end of the packet will be equal to
- the complement of whatever value we put in the checksum field beforehand.
- This allows us to compute the outer checksum without looking at the payload:
- we simply stop summing when we get to csum_start, then add the complement of
- the 16-bit word at (csum_start + csum_offset).
-Then, when the true inner checksum is filled in (either by hardware or by
- skb_checksum_help()), the outer checksum will become correct by virtue of
- the arithmetic.
-
-LCO is performed by the stack when constructing an outer UDP header for an
- encapsulation such as VXLAN or GENEVE, in udp_set_csum(). Similarly for
- the IPv6 equivalents, in udp6_set_csum().
-It is also performed when constructing an IPv4 GRE header, in
- net/ipv4/ip_gre.c:build_header(). It is *not* currently performed when
- constructing an IPv6 GRE header; the GRE checksum is computed over the
- whole packet in net/ipv6/ip6_gre.c:ip6gre_xmit2(), but it should be
- possible to use LCO here as IPv6 GRE still uses an IP-style checksum.
-All of the LCO implementations use a helper function lco_csum(), in
- include/linux/skbuff.h.
-
-LCO can safely be used for nested encapsulations; in this case, the outer
- encapsulation layer will sum over both its own header and the 'middle'
- header. This does mean that the 'middle' header will get summed multiple
- times, but there doesn't seem to be a way to avoid that without incurring
- bigger costs (e.g. in SKB bloat).
-
-
-RCO: Remote Checksum Offload
-============================
-
-RCO is a technique for eliding the inner checksum of an encapsulated
- datagram, allowing the outer checksum to be offloaded. It does, however,
- involve a change to the encapsulation protocols, which the receiver must
- also support. For this reason, it is disabled by default.
-RCO is detailed in the following Internet-Drafts:
-https://tools.ietf.org/html/draft-herbert-remotecsumoffload-00
-https://tools.ietf.org/html/draft-herbert-vxlan-rco-00
-In Linux, RCO is implemented individually in each encapsulation protocol,
- and most tunnel types have flags controlling its use. For instance, VXLAN
- has the flag VXLAN_F_REMCSUM_TX (per struct vxlan_rdst) to indicate that
- RCO should be used when transmitting to a given remote destination.
diff --git a/Documentation/networking/decnet.txt b/Documentation/networking/decnet.txt
index e12a4900cf72..d192f8b9948b 100644
--- a/Documentation/networking/decnet.txt
+++ b/Documentation/networking/decnet.txt
@@ -22,8 +22,6 @@ you'll need the following options as well...
CONFIG_DECNET_ROUTER (to be able to add/delete routes)
CONFIG_NETFILTER (will be required for the DECnet routing daemon)
- CONFIG_DECNET_ROUTE_FWMARK is optional
-
Don't turn on SIOCGIFCONF support for DECnet unless you are really sure
that you need it, in general you won't and it can cause ifconfig to
malfunction.
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index f0da1b001514..984e68f9e026 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -9,6 +9,7 @@ Contents:
netdev-FAQ
af_xdp
batman-adv
+ bpf_flow_dissector
can
can_ucan_protocol
device_drivers/freescale/dpaa2/index
@@ -36,6 +37,9 @@ Contents:
alias
bridge
snmp_counter
+ checksum-offloads
+ segmentation-offloads
+ scaling
.. only:: subproject
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index acdfb5d2bcaa..c4ac35234f05 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -422,6 +422,7 @@ tcp_min_rtt_wlen - INTEGER
minimum RTT when it is moved to a longer path (e.g., due to traffic
engineering). A longer window makes the filter more resistant to RTT
inflations such as transient congestion. The unit is seconds.
+ Possible values: 0 - 86400 (1 day)
Default: 300
tcp_moderate_rcvbuf - BOOLEAN
@@ -1336,6 +1337,7 @@ tag - INTEGER
Default value is 0.
xfrm4_gc_thresh - INTEGER
+ (Obsolete since linux-4.14)
The threshold at which we will start garbage collecting for IPv4
destination cache entries. At twice this value the system will
refuse new allocations.
@@ -1919,6 +1921,7 @@ echo_ignore_all - BOOLEAN
Default: 0
xfrm6_gc_thresh - INTEGER
+ (Obsolete since linux-4.14)
The threshold at which we will start garbage collecting for IPv6
destination cache entries. At twice this value the system will
refuse new allocations.
diff --git a/Documentation/networking/msg_zerocopy.rst b/Documentation/networking/msg_zerocopy.rst
index 18c1415e7bfa..ace56204dd03 100644
--- a/Documentation/networking/msg_zerocopy.rst
+++ b/Documentation/networking/msg_zerocopy.rst
@@ -50,7 +50,7 @@ the excellent reporting over at LWN.net or read the original code.
patchset
[PATCH net-next v4 0/9] socket sendmsg MSG_ZEROCOPY
- http://lkml.kernel.org/r/20170803202945.70750-1-willemdebruijn.kernel@gmail.com
+ https://lkml.kernel.org/netdev/20170803202945.70750-1-willemdebruijn.kernel@gmail.com
Interface
diff --git a/Documentation/networking/netdev-FAQ.rst b/Documentation/networking/netdev-FAQ.rst
index 0ac5fa77f501..642fa963be3c 100644
--- a/Documentation/networking/netdev-FAQ.rst
+++ b/Documentation/networking/netdev-FAQ.rst
@@ -131,6 +131,19 @@ it to the maintainer to figure out what is the most recent and current
version that should be applied. If there is any doubt, the maintainer
will reply and ask what should be done.
+Q: I made changes to only a few patches in a patch series should I resend only those changed?
+---------------------------------------------------------------------------------------------
+A: No, please resend the entire patch series and make sure you do number your
+patches such that it is clear this is the latest and greatest set of patches
+that can be applied.
+
+Q: I submitted multiple versions of a patch series and it looks like a version other than the last one has been accepted, what should I do?
+-------------------------------------------------------------------------------------------------------------------------------------------
+A: There is no revert possible, once it is pushed out, it stays like that.
+Please send incremental versions on top of what has been merged in order to fix
+the patches the way they would look like if your latest patch series was to be
+merged.
+
Q: How can I tell what patches are queued up for backporting to the various stable releases?
--------------------------------------------------------------------------------------------
A: Normally Greg Kroah-Hartman collects stable commits himself, but for
diff --git a/Documentation/networking/nf_flowtable.txt b/Documentation/networking/nf_flowtable.txt
index 54128c50d508..ca2136c76042 100644
--- a/Documentation/networking/nf_flowtable.txt
+++ b/Documentation/networking/nf_flowtable.txt
@@ -44,10 +44,10 @@ including the Netfilter hooks and the flowtable fastpath bypass.
/ \ / \ |Routing | / \
--> ingress ---> prerouting ---> |decision| | postrouting |--> neigh_xmit
\_________/ \__________/ ---------- \____________/ ^
- | ^ | | ^ |
- flowtable | | ____\/___ | |
- | | | / \ | |
- __\/___ | --------->| forward |------------ |
+ | ^ | ^ |
+ flowtable | ____\/___ | |
+ | | / \ | |
+ __\/___ | | forward |------------ |
|-----| | \_________/ |
|-----| | 'flow offload' rule |
|-----| | adds entry to |
diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt
index 2df5894353d6..cd7303d7fa25 100644
--- a/Documentation/networking/rxrpc.txt
+++ b/Documentation/networking/rxrpc.txt
@@ -1009,16 +1009,18 @@ The kernel interface functions are as follows:
(*) Check call still alive.
- u32 rxrpc_kernel_check_life(struct socket *sock,
- struct rxrpc_call *call);
+ bool rxrpc_kernel_check_life(struct socket *sock,
+ struct rxrpc_call *call,
+ u32 *_life);
void rxrpc_kernel_probe_life(struct socket *sock,
struct rxrpc_call *call);
- The first function returns a number that is updated when ACKs are received
- from the peer (notably including PING RESPONSE ACKs which we can elicit by
- sending PING ACKs to see if the call still exists on the server). The
- caller should compare the numbers of two calls to see if the call is still
- alive after waiting for a suitable interval.
+ The first function passes back in *_life a number that is updated when
+ ACKs are received from the peer (notably including PING RESPONSE ACKs
+ which we can elicit by sending PING ACKs to see if the call still exists
+ on the server). The caller should compare the numbers of two calls to see
+ if the call is still alive after waiting for a suitable interval. It also
+ returns true as long as the call hasn't yet reached the completed state.
This allows the caller to work out if the server is still contactable and
if the call is still alive on the server while waiting for the server to
diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.rst
index b7056a8a0540..f78d7bf27ff5 100644
--- a/Documentation/networking/scaling.txt
+++ b/Documentation/networking/scaling.rst
@@ -1,4 +1,8 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=====================================
Scaling in the Linux Networking Stack
+=====================================
Introduction
@@ -10,11 +14,11 @@ multi-processor systems.
The following technologies are described:
- RSS: Receive Side Scaling
- RPS: Receive Packet Steering
- RFS: Receive Flow Steering
- Accelerated Receive Flow Steering
- XPS: Transmit Packet Steering
+- RSS: Receive Side Scaling
+- RPS: Receive Packet Steering
+- RFS: Receive Flow Steering
+- Accelerated Receive Flow Steering
+- XPS: Transmit Packet Steering
RSS: Receive Side Scaling
@@ -45,7 +49,9 @@ programmable filters. For example, webserver bound TCP port 80 packets
can be directed to their own receive queue. Such “n-tuple†filters can
be configured from ethtool (--config-ntuple).
-==== RSS Configuration
+
+RSS Configuration
+-----------------
The driver for a multi-queue capable NIC typically provides a kernel
module parameter for specifying the number of hardware queues to
@@ -63,7 +69,9 @@ commands (--show-rxfh-indir and --set-rxfh-indir). Modifying the
indirection table could be done to give different queues different
relative weights.
-== RSS IRQ Configuration
+
+RSS IRQ Configuration
+~~~~~~~~~~~~~~~~~~~~~
Each receive queue has a separate IRQ associated with it. The NIC triggers
this to notify a CPU when new packets arrive on the given queue. The
@@ -77,7 +85,9 @@ affinity of each interrupt see Documentation/IRQ-affinity.txt. Some systems
will be running irqbalance, a daemon that dynamically optimizes IRQ
assignments and as a result may override any manual settings.
-== Suggested Configuration
+
+Suggested Configuration
+~~~~~~~~~~~~~~~~~~~~~~~
RSS should be enabled when latency is a concern or whenever receive
interrupt processing forms a bottleneck. Spreading load between CPUs
@@ -105,10 +115,12 @@ Whereas RSS selects the queue and hence CPU that will run the hardware
interrupt handler, RPS selects the CPU to perform protocol processing
above the interrupt handler. This is accomplished by placing the packet
on the desired CPU’s backlog queue and waking up the CPU for processing.
-RPS has some advantages over RSS: 1) it can be used with any NIC,
-2) software filters can easily be added to hash over new protocols,
+RPS has some advantages over RSS:
+
+1) it can be used with any NIC
+2) software filters can easily be added to hash over new protocols
3) it does not increase hardware device interrupt rate (although it does
-introduce inter-processor interrupts (IPIs)).
+ introduce inter-processor interrupts (IPIs))
RPS is called during bottom half of the receive interrupt handler, when
a driver sends a packet up the network stack with netif_rx() or
@@ -135,21 +147,25 @@ packets have been queued to their backlog queue. The IPI wakes backlog
processing on the remote CPU, and any queued packets are then processed
up the networking stack.
-==== RPS Configuration
+
+RPS Configuration
+-----------------
RPS requires a kernel compiled with the CONFIG_RPS kconfig symbol (on
by default for SMP). Even when compiled in, RPS remains disabled until
explicitly configured. The list of CPUs to which RPS may forward traffic
-can be configured for each receive queue using a sysfs file entry:
+can be configured for each receive queue using a sysfs file entry::
- /sys/class/net/<dev>/queues/rx-<n>/rps_cpus
+ /sys/class/net/<dev>/queues/rx-<n>/rps_cpus
This file implements a bitmap of CPUs. RPS is disabled when it is zero
(the default), in which case packets are processed on the interrupting
CPU. Documentation/IRQ-affinity.txt explains how CPUs are assigned to
the bitmap.
-== Suggested Configuration
+
+Suggested Configuration
+~~~~~~~~~~~~~~~~~~~~~~~
For a single queue device, a typical RPS configuration would be to set
the rps_cpus to the CPUs in the same memory domain of the interrupting
@@ -163,7 +179,9 @@ and unnecessary. If there are fewer hardware queues than CPUs, then
RPS might be beneficial if the rps_cpus for each queue are the ones that
share the same memory domain as the interrupting CPU for that queue.
-==== RPS Flow Limit
+
+RPS Flow Limit
+--------------
RPS scales kernel receive processing across CPUs without introducing
reordering. The trade-off to sending all packets from the same flow
@@ -187,29 +205,33 @@ No packets are dropped when the input packet queue length is below
the threshold, so flow limit does not sever connections outright:
even large flows maintain connectivity.
-== Interface
+
+Interface
+~~~~~~~~~
Flow limit is compiled in by default (CONFIG_NET_FLOW_LIMIT), but not
turned on. It is implemented for each CPU independently (to avoid lock
and cache contention) and toggled per CPU by setting the relevant bit
in sysctl net.core.flow_limit_cpu_bitmap. It exposes the same CPU
-bitmap interface as rps_cpus (see above) when called from procfs:
+bitmap interface as rps_cpus (see above) when called from procfs::
- /proc/sys/net/core/flow_limit_cpu_bitmap
+ /proc/sys/net/core/flow_limit_cpu_bitmap
Per-flow rate is calculated by hashing each packet into a hashtable
bucket and incrementing a per-bucket counter. The hash function is
the same that selects a CPU in RPS, but as the number of buckets can
be much larger than the number of CPUs, flow limit has finer-grained
identification of large flows and fewer false positives. The default
-table has 4096 buckets. This value can be modified through sysctl
+table has 4096 buckets. This value can be modified through sysctl::
- net.core.flow_limit_table_len
+ net.core.flow_limit_table_len
The value is only consulted when a new table is allocated. Modifying
it does not update active tables.
-== Suggested Configuration
+
+Suggested Configuration
+~~~~~~~~~~~~~~~~~~~~~~~
Flow limit is useful on systems with many concurrent connections,
where a single connection taking up 50% of a CPU indicates a problem.
@@ -280,10 +302,10 @@ table), the packet is enqueued onto that CPU’s backlog. If they differ,
the current CPU is updated to match the desired CPU if one of the
following is true:
-- The current CPU's queue head counter >= the recorded tail counter
- value in rps_dev_flow[i]
-- The current CPU is unset (>= nr_cpu_ids)
-- The current CPU is offline
+ - The current CPU's queue head counter >= the recorded tail counter
+ value in rps_dev_flow[i]
+ - The current CPU is unset (>= nr_cpu_ids)
+ - The current CPU is offline
After this check, the packet is sent to the (possibly updated) current
CPU. These rules aim to ensure that a flow only moves to a new CPU when
@@ -291,19 +313,23 @@ there are no packets outstanding on the old CPU, as the outstanding
packets could arrive later than those about to be processed on the new
CPU.
-==== RFS Configuration
+
+RFS Configuration
+-----------------
RFS is only available if the kconfig symbol CONFIG_RPS is enabled (on
by default for SMP). The functionality remains disabled until explicitly
-configured. The number of entries in the global flow table is set through:
+configured. The number of entries in the global flow table is set through::
+
+ /proc/sys/net/core/rps_sock_flow_entries
- /proc/sys/net/core/rps_sock_flow_entries
+The number of entries in the per-queue flow table are set through::
-The number of entries in the per-queue flow table are set through:
+ /sys/class/net/<dev>/queues/rx-<n>/rps_flow_cnt
- /sys/class/net/<dev>/queues/rx-<n>/rps_flow_cnt
-== Suggested Configuration
+Suggested Configuration
+~~~~~~~~~~~~~~~~~~~~~~~
Both of these need to be set before RFS is enabled for a receive queue.
Values for both are rounded up to the nearest power of two. The
@@ -347,7 +373,9 @@ functions in the cpu_rmap (“CPU affinity reverse mapâ€) kernel library
to populate the map. For each CPU, the corresponding queue in the map is
set to be one whose processing CPU is closest in cache locality.
-==== Accelerated RFS Configuration
+
+Accelerated RFS Configuration
+-----------------------------
Accelerated RFS is only available if the kernel is compiled with
CONFIG_RFS_ACCEL and support is provided by the NIC device and driver.
@@ -356,11 +384,14 @@ of CPU to queues is automatically deduced from the IRQ affinities
configured for each receive queue by the driver, so no additional
configuration should be necessary.
-== Suggested Configuration
+
+Suggested Configuration
+~~~~~~~~~~~~~~~~~~~~~~~
This technique should be enabled whenever one wants to use RFS and the
NIC supports hardware acceleration.
+
XPS: Transmit Packet Steering
=============================
@@ -430,20 +461,25 @@ transport layer is responsible for setting ooo_okay appropriately. TCP,
for instance, sets the flag when all data for a connection has been
acknowledged.
-==== XPS Configuration
+XPS Configuration
+-----------------
XPS is only available if the kconfig symbol CONFIG_XPS is enabled (on by
default for SMP). The functionality remains disabled until explicitly
configured. To enable XPS, the bitmap of CPUs/receive-queues that may
use a transmit queue is configured using the sysfs file entry:
-For selection based on CPUs map:
-/sys/class/net/<dev>/queues/tx-<n>/xps_cpus
+For selection based on CPUs map::
+
+ /sys/class/net/<dev>/queues/tx-<n>/xps_cpus
+
+For selection based on receive-queues map::
+
+ /sys/class/net/<dev>/queues/tx-<n>/xps_rxqs
-For selection based on receive-queues map:
-/sys/class/net/<dev>/queues/tx-<n>/xps_rxqs
-== Suggested Configuration
+Suggested Configuration
+~~~~~~~~~~~~~~~~~~~~~~~
For a network device with a single transmission queue, XPS configuration
has no effect, since there is no choice in this case. In a multi-queue
@@ -460,16 +496,18 @@ explicitly configured mapping receive-queue(s) to transmit queue(s). If the
user configuration for receive-queue map does not apply, then the transmit
queue is selected based on the CPUs map.
-Per TX Queue rate limitation:
-=============================
+
+Per TX Queue rate limitation
+============================
These are rate-limitation mechanisms implemented by HW, where currently
-a max-rate attribute is supported, by setting a Mbps value to
+a max-rate attribute is supported, by setting a Mbps value to::
-/sys/class/net/<dev>/queues/tx-<n>/tx_maxrate
+ /sys/class/net/<dev>/queues/tx-<n>/tx_maxrate
A value of zero means disabled, and this is the default.
+
Further Information
===================
RPS and RFS were introduced in kernel 2.6.35. XPS was incorporated into
@@ -480,5 +518,6 @@ Accelerated RFS was introduced in 2.6.35. Original patches were
submitted by Ben Hutchings (bwh@kernel.org)
Authors:
-Tom Herbert (therbert@google.com)
-Willem de Bruijn (willemb@google.com)
+
+- Tom Herbert (therbert@google.com)
+- Willem de Bruijn (willemb@google.com)
diff --git a/Documentation/networking/segmentation-offloads.txt b/Documentation/networking/segmentation-offloads.rst
index aca542ec125c..89d1ee933e9f 100644
--- a/Documentation/networking/segmentation-offloads.txt
+++ b/Documentation/networking/segmentation-offloads.rst
@@ -1,4 +1,9 @@
-Segmentation Offloads in the Linux Networking Stack
+.. SPDX-License-Identifier: GPL-2.0
+
+=====================
+Segmentation Offloads
+=====================
+
Introduction
============
@@ -15,6 +20,7 @@ The following technologies are described:
* Partial Generic Segmentation Offload - GSO_PARTIAL
* SCTP accelleration with GSO - GSO_BY_FRAGS
+
TCP Segmentation Offload
========================
@@ -42,6 +48,7 @@ NETIF_F_TSO_MANGLEID set then the IP ID can be ignored when performing TSO
and we will either increment the IP ID for all frames, or leave it at a
static value based on driver preference.
+
UDP Fragmentation Offload
=========================
@@ -54,6 +61,7 @@ UFO is deprecated: modern kernels will no longer generate UFO skbs, but can
still receive them from tuntap and similar devices. Offload of UDP-based
tunnel protocols is still supported.
+
IPIP, SIT, GRE, UDP Tunnel, and Remote Checksum Offloads
========================================================
@@ -71,17 +79,19 @@ refer to the tunnel headers as the outer headers, while the encapsulated
data is normally referred to as the inner headers. Below is the list of
calls to access the given headers:
-IPIP/SIT Tunnel:
- Outer Inner
-MAC skb_mac_header
-Network skb_network_header skb_inner_network_header
-Transport skb_transport_header
+IPIP/SIT Tunnel::
+
+ Outer Inner
+ MAC skb_mac_header
+ Network skb_network_header skb_inner_network_header
+ Transport skb_transport_header
-UDP/GRE Tunnel:
- Outer Inner
-MAC skb_mac_header skb_inner_mac_header
-Network skb_network_header skb_inner_network_header
-Transport skb_transport_header skb_inner_transport_header
+UDP/GRE Tunnel::
+
+ Outer Inner
+ MAC skb_mac_header skb_inner_mac_header
+ Network skb_network_header skb_inner_network_header
+ Transport skb_transport_header skb_inner_transport_header
In addition to the above tunnel types there are also SKB_GSO_GRE_CSUM and
SKB_GSO_UDP_TUNNEL_CSUM. These two additional tunnel types reflect the
@@ -93,6 +103,7 @@ header has requested a remote checksum offload. In this case the inner
headers will be left with a partial checksum and only the outer header
checksum will be computed.
+
Generic Segmentation Offload
============================
@@ -106,6 +117,7 @@ Before enabling any hardware segmentation offload a corresponding software
offload is required in GSO. Otherwise it becomes possible for a frame to
be re-routed between devices and end up being unable to be transmitted.
+
Generic Receive Offload
=======================
@@ -117,6 +129,7 @@ this is IPv4 ID in the case that the DF bit is set for a given IP header.
If the value of the IPv4 ID is not sequentially incrementing it will be
altered so that it is when a frame assembled via GRO is segmented via GSO.
+
Partial Generic Segmentation Offload
====================================
@@ -134,6 +147,7 @@ is the outer IPv4 ID field. It is up to the device drivers to guarantee
that the IPv4 ID field is incremented in the case that a given header does
not have the DF bit set.
+
SCTP accelleration with GSO
===========================
@@ -157,14 +171,14 @@ appropriately.
There are some helpers to make this easier:
- - skb_is_gso(skb) && skb_is_gso_sctp(skb) is the best way to see if
- an skb is an SCTP GSO skb.
+- skb_is_gso(skb) && skb_is_gso_sctp(skb) is the best way to see if
+ an skb is an SCTP GSO skb.
- - For size checks, the skb_gso_validate_*_len family of helpers correctly
- considers GSO_BY_FRAGS.
+- For size checks, the skb_gso_validate_*_len family of helpers correctly
+ considers GSO_BY_FRAGS.
- - For manipulating packets, skb_increase_gso_size and skb_decrease_gso_size
- will check for GSO_BY_FRAGS and WARN if asked to manipulate these skbs.
+- For manipulating packets, skb_increase_gso_size and skb_decrease_gso_size
+ will check for GSO_BY_FRAGS and WARN if asked to manipulate these skbs.
This also affects drivers with the NETIF_F_FRAGLIST & NETIF_F_GSO_SCTP bits
set. Note also that NETIF_F_GSO_SCTP is included in NETIF_F_GSO_SOFTWARE.
diff --git a/Documentation/networking/snmp_counter.rst b/Documentation/networking/snmp_counter.rst
index 52b026be028f..38a4edc4522b 100644
--- a/Documentation/networking/snmp_counter.rst
+++ b/Documentation/networking/snmp_counter.rst
@@ -413,7 +413,7 @@ algorithm.
.. _F-RTO: https://tools.ietf.org/html/rfc5682
TCP Fast Path
-============
+=============
When kernel receives a TCP packet, it has two paths to handler the
packet, one is fast path, another is slow path. The comment in kernel
code provides a good explanation of them, I pasted them below::
@@ -681,6 +681,7 @@ The TCP stack receives an out of order duplicate packet, so it sends a
DSACK to the sender.
* TcpExtTCPDSACKRecv
+
The TCP stack receives a DSACK, which indicates an acknowledged
duplicate packet is received.
@@ -690,7 +691,7 @@ The TCP stack receives a DSACK, which indicate an out of order
duplicate packet is received.
invalid SACK and DSACK
-====================
+======================
When a SACK (or DSACK) block is invalid, a corresponding counter would
be updated. The validation method is base on the start/end sequence
number of the SACK block. For more details, please refer the comment
@@ -704,11 +705,13 @@ explaination:
.. _Add counters for discarded SACK blocks: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=18f02545a9a16c9a89778b91a162ad16d510bb32
* TcpExtTCPSACKDiscard
+
This counter indicates how many SACK blocks are invalid. If the invalid
SACK block is caused by ACK recording, the TCP stack will only ignore
it and won't update this counter.
* TcpExtTCPDSACKIgnoredOld and TcpExtTCPDSACKIgnoredNoUndo
+
When a DSACK block is invalid, one of these two counters would be
updated. Which counter will be updated depends on the undo_marker flag
of the TCP socket. If the undo_marker is not set, the TCP stack isn't
@@ -719,7 +722,7 @@ will be updated. If the undo_marker is set, TcpExtTCPDSACKIgnoredOld
will be updated. As implied in its name, it might be an old packet.
SACK shift
-=========
+==========
The linux networking stack stores data in sk_buff struct (skb for
short). If a SACK block acrosses multiple skb, the TCP stack will try
to re-arrange data in these skb. E.g. if a SACK block acknowledges seq
@@ -730,12 +733,15 @@ seq 14 to 20. All data in skb2 will be moved to skb1, and skb2 will be
discard, this operation is 'merge'.
* TcpExtTCPSackShifted
+
A skb is shifted
* TcpExtTCPSackMerged
+
A skb is merged
* TcpExtTCPSackShiftFallback
+
A skb should be shifted or merged, but the TCP stack doesn't do it for
some reasons.
diff --git a/Documentation/process/coding-style.rst b/Documentation/process/coding-style.rst
index b78dd680c038..8ea913e99fa1 100644
--- a/Documentation/process/coding-style.rst
+++ b/Documentation/process/coding-style.rst
@@ -443,7 +443,7 @@ In function prototypes, include parameter names with their data types.
Although this is not required by the C language, it is preferred in Linux
because it is a simple way to add valuable information for the reader.
-Do not use the `extern' keyword with function prototypes as this makes
+Do not use the ``extern`` keyword with function prototypes as this makes
lines longer and isn't strictly necessary.
@@ -595,26 +595,43 @@ values. To do the latter, you can stick the following in your .emacs file:
(* (max steps 1)
c-basic-offset)))
- (add-hook 'c-mode-common-hook
- (lambda ()
- ;; Add kernel style
- (c-add-style
- "linux-tabs-only"
- '("linux" (c-offsets-alist
- (arglist-cont-nonempty
- c-lineup-gcc-asm-reg
- c-lineup-arglist-tabs-only))))))
-
- (add-hook 'c-mode-hook
- (lambda ()
- (let ((filename (buffer-file-name)))
- ;; Enable kernel mode for the appropriate files
- (when (and filename
- (string-match (expand-file-name "~/src/linux-trees")
- filename))
- (setq indent-tabs-mode t)
- (setq show-trailing-whitespace t)
- (c-set-style "linux-tabs-only")))))
+ (dir-locals-set-class-variables
+ 'linux-kernel
+ '((c-mode . (
+ (c-basic-offset . 8)
+ (c-label-minimum-indentation . 0)
+ (c-offsets-alist . (
+ (arglist-close . c-lineup-arglist-tabs-only)
+ (arglist-cont-nonempty .
+ (c-lineup-gcc-asm-reg c-lineup-arglist-tabs-only))
+ (arglist-intro . +)
+ (brace-list-intro . +)
+ (c . c-lineup-C-comments)
+ (case-label . 0)
+ (comment-intro . c-lineup-comment)
+ (cpp-define-intro . +)
+ (cpp-macro . -1000)
+ (cpp-macro-cont . +)
+ (defun-block-intro . +)
+ (else-clause . 0)
+ (func-decl-cont . +)
+ (inclass . +)
+ (inher-cont . c-lineup-multi-inher)
+ (knr-argdecl-intro . 0)
+ (label . -1000)
+ (statement . 0)
+ (statement-block-intro . +)
+ (statement-case-intro . +)
+ (statement-cont . +)
+ (substatement . +)
+ ))
+ (indent-tabs-mode . t)
+ (show-trailing-whitespace . t)
+ ))))
+
+ (dir-locals-set-directory-class
+ (expand-file-name "~/src/linux-trees")
+ 'linux-kernel)
This will make emacs go better with the kernel coding style for C
files below ``~/src/linux-trees``.
@@ -921,7 +938,37 @@ result. Typical examples would be functions that return pointers; they use
NULL or the ERR_PTR mechanism to report failure.
-17) Don't re-invent the kernel macros
+17) Using bool
+--------------
+
+The Linux kernel bool type is an alias for the C99 _Bool type. bool values can
+only evaluate to 0 or 1, and implicit or explicit conversion to bool
+automatically converts the value to true or false. When using bool types the
+!! construction is not needed, which eliminates a class of bugs.
+
+When working with bool values the true and false definitions should be used
+instead of 1 and 0.
+
+bool function return types and stack variables are always fine to use whenever
+appropriate. Use of bool is encouraged to improve readability and is often a
+better option than 'int' for storing boolean values.
+
+Do not use bool if cache line layout or size of the value matters, as its size
+and alignment varies based on the compiled architecture. Structures that are
+optimized for alignment and size should not use bool.
+
+If a structure has many true/false values, consider consolidating them into a
+bitfield with 1 bit members, or using an appropriate fixed width type, such as
+u8.
+
+Similarly for function arguments, many true/false values can be consolidated
+into a single bitwise 'flags' argument and 'flags' can often be a more
+readable alternative if the call-sites have naked true/false constants.
+
+Otherwise limited use of bool in structures and arguments can improve
+readability.
+
+18) Don't re-invent the kernel macros
-------------------------------------
The header file include/linux/kernel.h contains a number of macros that
@@ -944,7 +991,7 @@ need them. Feel free to peruse that header file to see what else is already
defined that you shouldn't reproduce in your code.
-18) Editor modelines and other cruft
+19) Editor modelines and other cruft
------------------------------------
Some editors can interpret configuration information embedded in source files,
@@ -978,7 +1025,7 @@ own custom mode, or may have some other magic method for making indentation
work correctly.
-19) Inline assembly
+20) Inline assembly
-------------------
In architecture-specific code, you may need to use inline assembly to interface
@@ -1010,7 +1057,7 @@ the next instruction in the assembly output:
: /* outputs */ : /* inputs */ : /* clobbers */);
-20) Conditional Compilation
+21) Conditional Compilation
---------------------------
Wherever possible, don't use preprocessor conditionals (#if, #ifdef) in .c
diff --git a/Documentation/process/howto.rst b/Documentation/process/howto.rst
index 58b2f46c4f98..ad2b6c852b95 100644
--- a/Documentation/process/howto.rst
+++ b/Documentation/process/howto.rst
@@ -225,7 +225,7 @@ Cross-Reference project, which is able to present source code in a
self-referential, indexed webpage format. An excellent up-to-date
repository of the kernel code may be found at:
- http://lxr.free-electrons.com/
+ https://elixir.bootlin.com/
The development process
@@ -235,23 +235,21 @@ Linux kernel development process currently consists of a few different
main kernel "branches" and lots of different subsystem-specific kernel
branches. These different branches are:
- - main 4.x kernel tree
- - 4.x.y -stable kernel tree
- - 4.x -git kernel patches
- - subsystem specific kernel trees and patches
- - the 4.x -next kernel tree for integration tests
+ - Linus's mainline tree
+ - Various stable trees with multiple major numbers
+ - Subsystem-specific trees
+ - linux-next integration testing tree
-4.x kernel tree
-~~~~~~~~~~~~~~~
+Mainline tree
+~~~~~~~~~~~~~
-4.x kernels are maintained by Linus Torvalds, and can be found on
-https://kernel.org in the pub/linux/kernel/v4.x/ directory. Its development
-process is as follows:
+Mainline tree are maintained by Linus Torvalds, and can be found at
+https://kernel.org or in the repo. Its development process is as follows:
- As soon as a new kernel is released a two weeks window is open,
during this period of time maintainers can submit big diffs to
Linus, usually the patches that have already been included in the
- -next kernel for a few weeks. The preferred way to submit big changes
+ linux-next for a few weeks. The preferred way to submit big changes
is using git (the kernel's source management tool, more information
can be found at https://git-scm.com/) but plain patches are also just
fine.
@@ -278,21 +276,19 @@ mailing list about kernel releases:
released according to perceived bug status, not according to a
preconceived timeline."*
-4.x.y -stable kernel tree
-~~~~~~~~~~~~~~~~~~~~~~~~~
+Various stable trees with multiple major numbers
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Kernels with 3-part versions are -stable kernels. They contain
relatively small and critical fixes for security problems or significant
-regressions discovered in a given 4.x kernel.
+regressions discovered in a given major mainline release, with the first
+2-part of version number are the same correspondingly.
This is the recommended branch for users who want the most recent stable
kernel and are not interested in helping test development/experimental
versions.
-If no 4.x.y kernel is available, then the highest numbered 4.x
-kernel is the current stable kernel.
-
-4.x.y are maintained by the "stable" team <stable@vger.kernel.org>, and
+Stable trees are maintained by the "stable" team <stable@vger.kernel.org>, and
are released as needs dictate. The normal release period is approximately
two weeks, but it can be longer if there are no pressing problems. A
security-related problem, instead, can cause a release to happen almost
@@ -302,17 +298,8 @@ The file :ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rule
in the kernel tree documents what kinds of changes are acceptable for
the -stable tree, and how the release process works.
-4.x -git patches
-~~~~~~~~~~~~~~~~
-
-These are daily snapshots of Linus' kernel tree which are managed in a
-git repository (hence the name.) These patches are usually released
-daily and represent the current state of Linus' tree. They are more
-experimental than -rc kernels since they are generated automatically
-without even a cursory glance to see if they are sane.
-
-Subsystem Specific kernel trees and patches
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Subsystem-specific trees
+~~~~~~~~~~~~~~~~~~~~~~~~
The maintainers of the various kernel subsystems --- and also many
kernel subsystem developers --- expose their current state of
@@ -336,19 +323,19 @@ revisions to it, and maintainers can mark patches as under review,
accepted, or rejected. Most of these patchwork sites are listed at
https://patchwork.kernel.org/.
-4.x -next kernel tree for integration tests
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+linux-next integration testing tree
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Before updates from subsystem trees are merged into the mainline 4.x
-tree, they need to be integration-tested. For this purpose, a special
+Before updates from subsystem trees are merged into the mainline tree,
+they need to be integration-tested. For this purpose, a special
testing repository exists into which virtually all subsystem trees are
pulled on an almost daily basis:
https://git.kernel.org/?p=linux/kernel/git/next/linux-next.git
-This way, the -next kernel gives a summary outlook onto what will be
+This way, the linux-next gives a summary outlook onto what will be
expected to go into the mainline kernel at the next merge period.
-Adventurous testers are very welcome to runtime-test the -next kernel.
+Adventurous testers are very welcome to runtime-test the linux-next.
Bug Reporting
diff --git a/Documentation/process/kernel-docs.rst b/Documentation/process/kernel-docs.rst
index 3fb28de556e4..ab12dddc773e 100644
--- a/Documentation/process/kernel-docs.rst
+++ b/Documentation/process/kernel-docs.rst
@@ -565,7 +565,7 @@ Miscellaneous
* Name: **Cross-Referencing Linux**
- :URL: http://lxr.free-electrons.com/
+ :URL: https://elixir.bootlin.com/
:Keywords: Browsing source code.
:Description: Another web-based Linux kernel source code browser.
Lots of cross references to variables and functions. You can see
diff --git a/Documentation/process/license-rules.rst b/Documentation/process/license-rules.rst
index 2bb8c0fc2238..6b09033a8e9e 100644
--- a/Documentation/process/license-rules.rst
+++ b/Documentation/process/license-rules.rst
@@ -62,7 +62,7 @@ License identifier syntax
The SPDX license identifier in kernel files shall be added at the first
possible line in a file which can contain a comment. For the majority
- or files this is the first line, except for scripts which require the
+ of files this is the first line, except for scripts which require the
'#!PATH_TO_INTERPRETER' in the first line. For those scripts the SPDX
identifier goes into the second line.
@@ -368,7 +368,69 @@ kernel, can be broken down into:
All SPDX license identifiers and exceptions must have a corresponding file
-in the LICENSE subdirectories. This is required to allow tool
+in the LICENSES subdirectories. This is required to allow tool
verification (e.g. checkpatch.pl) and to have the licenses ready to read
and extract right from the source, which is recommended by various FOSS
organizations, e.g. the `FSFE REUSE initiative <https://reuse.software/>`_.
+
+_`MODULE_LICENSE`
+-----------------
+
+ Loadable kernel modules also require a MODULE_LICENSE() tag. This tag is
+ neither a replacement for proper source code license information
+ (SPDX-License-Identifier) nor in any way relevant for expressing or
+ determining the exact license under which the source code of the module
+ is provided.
+
+ The sole purpose of this tag is to provide sufficient information
+ whether the module is free software or proprietary for the kernel
+ module loader and for user space tools.
+
+ The valid license strings for MODULE_LICENSE() are:
+
+ ============================= =============================================
+ "GPL" Module is licensed under GPL version 2. This
+ does not express any distinction between
+ GPL-2.0-only or GPL-2.0-or-later. The exact
+ license information can only be determined
+ via the license information in the
+ corresponding source files.
+
+ "GPL v2" Same as "GPL". It exists for historic
+ reasons.
+
+ "GPL and additional rights" Historical variant of expressing that the
+ module source is dual licensed under a
+ GPL v2 variant and MIT license. Please do
+ not use in new code.
+
+ "Dual MIT/GPL" The correct way of expressing that the
+ module is dual licensed under a GPL v2
+ variant or MIT license choice.
+
+ "Dual BSD/GPL" The module is dual licensed under a GPL v2
+ variant or BSD license choice. The exact
+ variant of the BSD license can only be
+ determined via the license information
+ in the corresponding source files.
+
+ "Dual MPL/GPL" The module is dual licensed under a GPL v2
+ variant or Mozilla Public License (MPL)
+ choice. The exact variant of the MPL
+ license can only be determined via the
+ license information in the corresponding
+ source files.
+
+ "Proprietary" The module is under a proprietary license.
+ This string is solely for proprietary third
+ party modules and cannot be used for modules
+ which have their source code in the kernel
+ tree. Modules tagged that way are tainting
+ the kernel with the 'P' flag when loaded and
+ the kernel module loader refuses to link such
+ modules against symbols which are exported
+ with EXPORT_SYMBOL_GPL().
+ ============================= =============================================
+
+
+
diff --git a/Documentation/process/stable-api-nonsense.rst b/Documentation/process/stable-api-nonsense.rst
index 24f5aeecee91..a9625ab1fdc2 100644
--- a/Documentation/process/stable-api-nonsense.rst
+++ b/Documentation/process/stable-api-nonsense.rst
@@ -169,14 +169,13 @@ driver for every different kernel version for every distribution is a
nightmare, and trying to keep up with an ever changing kernel interface
is also a rough job.
-Simple, get your kernel driver into the main kernel tree (remember we
-are talking about GPL released drivers here, if your code doesn't fall
-under this category, good luck, you are on your own here, you leech
-<insert link to leech comment from Andrew and Linus here>.) If your
-driver is in the tree, and a kernel interface changes, it will be fixed
-up by the person who did the kernel change in the first place. This
-ensures that your driver is always buildable, and works over time, with
-very little effort on your part.
+Simple, get your kernel driver into the main kernel tree (remember we are
+talking about drivers released under a GPL-compatible license here, if your
+code doesn't fall under this category, good luck, you are on your own here,
+you leech). If your driver is in the tree, and a kernel interface changes,
+it will be fixed up by the person who did the kernel change in the first
+place. This ensures that your driver is always buildable, and works over
+time, with very little effort on your part.
The very good side effects of having your driver in the main kernel tree
are:
diff --git a/Documentation/process/stable-kernel-rules.rst b/Documentation/process/stable-kernel-rules.rst
index 0de6f6145cc6..06f743b612c4 100644
--- a/Documentation/process/stable-kernel-rules.rst
+++ b/Documentation/process/stable-kernel-rules.rst
@@ -38,6 +38,9 @@ Procedure for submitting patches to the -stable tree
- If the patch covers files in net/ or drivers/net please follow netdev stable
submission guidelines as described in
:ref:`Documentation/networking/netdev-FAQ.rst <netdev-FAQ>`
+ after first checking the stable networking queue at
+ https://patchwork.ozlabs.org/bundle/davem/stable/?series=&submitter=&state=*&q=&archive=
+ to ensure the requested patch is not already queued up.
- Security patches should not be handled (solely) by the -stable review
process but should follow the procedures in
:ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>`.
@@ -98,9 +101,9 @@ text, like this:
commit <sha1> upstream.
-Additionally, some patches submitted via Option 1 may have additional patch
-prerequisites which can be cherry-picked. This can be specified in the following
-format in the sign-off area:
+Additionally, some patches submitted via :ref:`option_1` may have additional
+patch prerequisites which can be cherry-picked. This can be specified in the
+following format in the sign-off area:
.. code-block:: none
diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst
index 30dc00a364e8..be7d1829c3af 100644
--- a/Documentation/process/submitting-patches.rst
+++ b/Documentation/process/submitting-patches.rst
@@ -182,9 +182,11 @@ change five years from now.
If your patch fixes a bug in a specific commit, e.g. you found an issue using
``git bisect``, please use the 'Fixes:' tag with the first 12 characters of
-the SHA-1 ID, and the one line summary. For example::
+the SHA-1 ID, and the one line summary. Do not split the tag across multiple
+lines, tags are exempt from the "wrap at 75 columns" rule in order to simplify
+parsing scripts. For example::
- Fixes: e21d2170f366 ("video: remove unnecessary platform_set_drvdata()")
+ Fixes: 54a4f0239f2e ("KVM: MMU: make kvm_mmu_zap_page() return the number of pages it actually freed")
The following ``git config`` settings can be used to add a pretty format for
outputting the above style in the ``git log`` or ``git show`` commands::
diff --git a/Documentation/robust-futexes.txt b/Documentation/robust-futexes.txt
index 6c42c75103eb..6361fb01c9c1 100644
--- a/Documentation/robust-futexes.txt
+++ b/Documentation/robust-futexes.txt
@@ -218,5 +218,4 @@ All other architectures should build just fine too - but they won't have
the new syscalls yet.
Architectures need to implement the new futex_atomic_cmpxchg_inatomic()
-inline function before writing up the syscalls (that function returns
--ENOSYS right now).
+inline function before writing up the syscalls.
diff --git a/Documentation/scsi/osd.txt b/Documentation/scsi/osd.txt
deleted file mode 100644
index 5a9879bad073..000000000000
--- a/Documentation/scsi/osd.txt
+++ /dev/null
@@ -1,197 +0,0 @@
-The OSD Standard
-================
-OSD (Object-Based Storage Device) is a T10 SCSI command set that is designed
-to provide efficient operation of input/output logical units that manage the
-allocation, placement, and accessing of variable-size data-storage containers,
-called objects. Objects are intended to contain operating system and application
-constructs. Each object has associated attributes attached to it, which are
-integral part of the object and provide metadata about the object. The standard
-defines some common obligatory attributes, but user attributes can be added as
-needed.
-
-See: http://www.t10.org/ftp/t10/drafts/osd2/ for the latest draft for OSD 2
-or search the web for "OSD SCSI"
-
-OSD in the Linux Kernel
-=======================
-osd-initiator:
- The main component of OSD in Kernel is the osd-initiator library. Its main
-user is intended to be the pNFS-over-objects layout driver, which uses objects
-as its back-end data storage. Other clients are the other osd parts listed below.
-
-osd-uld:
- This is a SCSI ULD that registers for OSD type devices and provides a testing
-platform, both for the in-kernel initiator as well as connected targets. It
-currently has no useful user-mode API, though it could have if need be.
-
-exofs:
- Is an OSD based Linux file system. It uses the osd-initiator and osd-uld,
-to export a usable file system for users.
-See Documentation/filesystems/exofs.txt for more details
-
-osd target:
- There are no current plans for an OSD target implementation in kernel. For all
-needs, a user-mode target that is based on the scsi tgt target framework is
-available from Ohio Supercomputer Center (OSC) at:
-http://www.open-osd.org/bin/view/Main/OscOsdProject
-There are several other target implementations. See http://open-osd.org for more
-links.
-
-Files and Folders
-=================
-This is the complete list of files included in this work:
-include/scsi/
- osd_initiator.h Main API for the initiator library
- osd_types.h Common OSD types
- osd_sec.h Security Manager API
- osd_protocol.h Wire definitions of the OSD standard protocol
- osd_attributes.h Wire definitions of OSD attributes
-
-drivers/scsi/osd/
- osd_initiator.c OSD-Initiator library implementation
- osd_uld.c The OSD scsi ULD
- osd_ktest.{h,c} In-kernel test suite (called by osd_uld)
- osd_debug.h Some printk macros
- Makefile For both in-tree and out-of-tree compilation
- Kconfig Enables inclusion of the different pieces
- osd_test.c User-mode application to call the kernel tests
-
-The OSD-Initiator Library
-=========================
-osd_initiator is a low level implementation of an osd initiator encoder.
-But even though, it should be intuitive and easy to use. Perhaps over time an
-higher lever will form that automates some of the more common recipes.
-
-init/fini:
-- osd_dev_init() associates a scsi_device with an osd_dev structure
- and initializes some global pools. This should be done once per scsi_device
- (OSD LUN). The osd_dev structure is needed for calling osd_start_request().
-
-- osd_dev_fini() cleans up before a osd_dev/scsi_device destruction.
-
-OSD commands encoding, execution, and decoding of results:
-
-struct osd_request's is used to iteratively encode an OSD command and carry
-its state throughout execution. Each request goes through these stages:
-
-a. osd_start_request() allocates the request.
-
-b. Any of the osd_req_* methods is used to encode a request of the specified
- type.
-
-c. osd_req_add_{get,set}_attr_* may be called to add get/set attributes to the
- CDB. "List" or "Page" mode can be used exclusively. The attribute-list API
- can be called multiple times on the same request. However, only one
- attribute-page can be read, as mandated by the OSD standard.
-
-d. osd_finalize_request() computes offsets into the data-in and data-out buffers
- and signs the request using the provided capability key and integrity-
- check parameters.
-
-e. osd_execute_request() may be called to execute the request via the block
- layer and wait for its completion. The request can be executed
- asynchronously by calling the block layer API directly.
-
-f. After execution, osd_req_decode_sense() can be called to decode the request's
- sense information.
-
-g. osd_req_decode_get_attr() may be called to retrieve osd_add_get_attr_list()
- values.
-
-h. osd_end_request() must be called to deallocate the request and any resource
- associated with it. Note that osd_end_request cleans up the request at any
- stage and it must always be called after a successful osd_start_request().
-
-osd_request's structure:
-
-The OSD standard defines a complex structure of IO segments pointed to by
-members in the CDB. Up to 3 segments can be deployed in the IN-Buffer and up to
-4 in the OUT-Buffer. The ASCII illustration below depicts a secure-read with
-associated get+set of attributes-lists. Other combinations very on the same
-basic theme. From no-segments-used up to all-segments-used.
-
-|________OSD-CDB__________|
-| |
-|read_len (offset=0) -|---------\
-| | |
-|get_attrs_list_length | |
-|get_attrs_list_offset -|----\ |
-| | | |
-|retrieved_attrs_alloc_len| | |
-|retrieved_attrs_offset -|----|----|-\
-| | | | |
-|set_attrs_list_length | | | |
-|set_attrs_list_offset -|-\ | | |
-| | | | | |
-|in_data_integ_offset -|-|--|----|-|-\
-|out_data_integ_offset -|-|--|--\ | | |
-\_________________________/ | | | | | |
- | | | | | |
-|_______OUT-BUFFER________| | | | | | |
-| Set attr list |</ | | | | |
-| | | | | | |
-|-------------------------| | | | | |
-| Get attr descriptors |<---/ | | | |
-| | | | | |
-|-------------------------| | | | |
-| Out-data integrity |<------/ | | |
-| | | | |
-\_________________________/ | | |
- | | |
-|________IN-BUFFER________| | | |
-| In-Data read |<--------/ | |
-| | | |
-|-------------------------| | |
-| Get attr list |<----------/ |
-| | |
-|-------------------------| |
-| In-data integrity |<------------/
-| |
-\_________________________/
-
-A block device request can carry bidirectional payload by means of associating
-a bidi_read request with a main write-request. Each in/out request is described
-by a chain of BIOs associated with each request.
-The CDB is of a SCSI VARLEN CDB format, as described by OSD standard.
-The OSD standard also mandates alignment restrictions at start of each segment.
-
-In the code, in struct osd_request, there are two _osd_io_info structures to
-describe the IN/OUT buffers above, two BIOs for the data payload and up to five
-_osd_req_data_segment structures to hold the different segments allocation and
-information.
-
-Important: We have chosen to disregard the assumption that a BIO-chain (and
-the resulting sg-list) describes a linear memory buffer. Meaning only first and
-last scatter chain can be incomplete and all the middle chains are of PAGE_SIZE.
-For us, a scatter-gather-list, as its name implies and as used by the Networking
-layer, is to describe a vector of buffers that will be transferred to/from the
-wire. It works very well with current iSCSI transport. iSCSI is currently the
-only deployed OSD transport. In the future we anticipate SAS and FC attached OSD
-devices as well.
-
-The OSD Testing ULD
-===================
-TODO: More user-mode control on tests.
-
-Authors, Mailing list
-=====================
-Please communicate with us on any deployment of osd, whether using this code
-or not.
-
-Any problems, questions, bug reports, lonely OSD nights, please email:
- OSD Dev List <osd-dev@open-osd.org>
-
-More up-to-date information can be found on:
-http://open-osd.org
-
-Boaz Harrosh <ooo@electrozaur.com>
-
-References
-==========
-Weber, R., "SCSI Object-Based Storage Device Commands",
-T10/1355-D ANSI/INCITS 400-2004,
-http://www.t10.org/ftp/t10/drafts/osd/osd-r10.pdf
-
-Weber, R., "SCSI Object-Based Storage Device Commands -2 (OSD-2)"
-T10/1729-D, Working Draft, rev. 3
-http://www.t10.org/ftp/t10/drafts/osd2/osd2r03.pdf
diff --git a/Documentation/scsi/ufs.txt b/Documentation/scsi/ufs.txt
index 520b5b033256..1769f71c4c20 100644
--- a/Documentation/scsi/ufs.txt
+++ b/Documentation/scsi/ufs.txt
@@ -147,6 +147,17 @@ send SG_IO with the applicable sg_io_v4:
io_hdr_v4.max_response_len = reply_len;
io_hdr_v4.request_len = request_len;
io_hdr_v4.request = (__u64)request_upiu;
+ if (dir == SG_DXFER_TO_DEV) {
+ io_hdr_v4.dout_xfer_len = (uint32_t)byte_cnt;
+ io_hdr_v4.dout_xferp = (uintptr_t)(__u64)buff;
+ } else {
+ io_hdr_v4.din_xfer_len = (uint32_t)byte_cnt;
+ io_hdr_v4.din_xferp = (uintptr_t)(__u64)buff;
+ }
+
+If you wish to read or write a descriptor, use the appropriate xferp of
+sg_io_v4.
+
UFS Specifications can be found at,
UFS - http://www.jedec.org/sites/default/files/docs/JESD220.pdf
diff --git a/Documentation/security/LSM.rst b/Documentation/security/LSM.rst
index 8b9ee597e9d0..31d92bc5fdd2 100644
--- a/Documentation/security/LSM.rst
+++ b/Documentation/security/LSM.rst
@@ -11,4 +11,7 @@ that end users and distros can make a more informed decision about which
LSMs suit their requirements.
For extensive documentation on the available LSM hook interfaces, please
-see ``include/linux/lsm_hooks.h``.
+see ``include/linux/lsm_hooks.h`` and associated structures:
+
+.. kernel-doc:: include/linux/lsm_hooks.h
+ :internal:
diff --git a/Documentation/security/LSM-sctp.rst b/Documentation/security/SCTP.rst
index 6e5a3925a860..d903eb97fcf3 100644
--- a/Documentation/security/LSM-sctp.rst
+++ b/Documentation/security/SCTP.rst
@@ -1,6 +1,15 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+====
+SCTP
+====
+
SCTP LSM Support
================
+Security Hooks
+--------------
+
For security module support, three SCTP specific hooks have been implemented::
security_sctp_assoc_request()
@@ -12,11 +21,11 @@ Also the following security hook has been utilised::
security_inet_conn_established()
The usage of these hooks are described below with the SELinux implementation
-described in ``Documentation/security/SELinux-sctp.rst``
+described in the `SCTP SELinux Support`_ chapter.
security_sctp_assoc_request()
------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Passes the ``@ep`` and ``@chunk->skb`` of the association INIT packet to the
security module. Returns 0 on success, error on failure.
::
@@ -26,7 +35,7 @@ security module. Returns 0 on success, error on failure.
security_sctp_bind_connect()
------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Passes one or more ipv4/ipv6 addresses to the security module for validation
based on the ``@optname`` that will result in either a bind or connect
service as shown in the permission check tables below.
@@ -102,7 +111,7 @@ ASCONF chunk when the corresponding ``@optname``'s are present::
security_sctp_sk_clone()
--------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~
Called whenever a new socket is created by **accept**\(2)
(i.e. a TCP style socket) or when a socket is 'peeled off' e.g userspace
calls **sctp_peeloff**\(3).
@@ -114,7 +123,7 @@ calls **sctp_peeloff**\(3).
security_inet_conn_established()
----------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Called when a COOKIE ACK is received::
@sk - pointer to sock structure.
@@ -122,7 +131,8 @@ Called when a COOKIE ACK is received::
Security Hooks used for Association Establishment
-=================================================
+-------------------------------------------------
+
The following diagram shows the use of ``security_sctp_bind_connect()``,
``security_sctp_assoc_request()``, ``security_inet_conn_established()`` when
establishing an association.
@@ -173,3 +183,161 @@ establishing an association.
------------------------------------------------------------------
+SCTP SELinux Support
+====================
+
+Security Hooks
+--------------
+
+The `SCTP LSM Support`_ chapter above describes the following SCTP security
+hooks with the SELinux specifics expanded below::
+
+ security_sctp_assoc_request()
+ security_sctp_bind_connect()
+ security_sctp_sk_clone()
+ security_inet_conn_established()
+
+
+security_sctp_assoc_request()
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Passes the ``@ep`` and ``@chunk->skb`` of the association INIT packet to the
+security module. Returns 0 on success, error on failure.
+::
+
+ @ep - pointer to sctp endpoint structure.
+ @skb - pointer to skbuff of association packet.
+
+The security module performs the following operations:
+ IF this is the first association on ``@ep->base.sk``, then set the peer
+ sid to that in ``@skb``. This will ensure there is only one peer sid
+ assigned to ``@ep->base.sk`` that may support multiple associations.
+
+ ELSE validate the ``@ep->base.sk peer_sid`` against the ``@skb peer sid``
+ to determine whether the association should be allowed or denied.
+
+ Set the sctp ``@ep sid`` to socket's sid (from ``ep->base.sk``) with
+ MLS portion taken from ``@skb peer sid``. This will be used by SCTP
+ TCP style sockets and peeled off connections as they cause a new socket
+ to be generated.
+
+ If IP security options are configured (CIPSO/CALIPSO), then the ip
+ options are set on the socket.
+
+
+security_sctp_bind_connect()
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Checks permissions required for ipv4/ipv6 addresses based on the ``@optname``
+as follows::
+
+ ------------------------------------------------------------------
+ | BIND Permission Checks |
+ | @optname | @address contains |
+ |----------------------------|-----------------------------------|
+ | SCTP_SOCKOPT_BINDX_ADD | One or more ipv4 / ipv6 addresses |
+ | SCTP_PRIMARY_ADDR | Single ipv4 or ipv6 address |
+ | SCTP_SET_PEER_PRIMARY_ADDR | Single ipv4 or ipv6 address |
+ ------------------------------------------------------------------
+
+ ------------------------------------------------------------------
+ | CONNECT Permission Checks |
+ | @optname | @address contains |
+ |----------------------------|-----------------------------------|
+ | SCTP_SOCKOPT_CONNECTX | One or more ipv4 / ipv6 addresses |
+ | SCTP_PARAM_ADD_IP | One or more ipv4 / ipv6 addresses |
+ | SCTP_SENDMSG_CONNECT | Single ipv4 or ipv6 address |
+ | SCTP_PARAM_SET_PRIMARY | Single ipv4 or ipv6 address |
+ ------------------------------------------------------------------
+
+
+`SCTP LSM Support`_ gives a summary of the ``@optname``
+entries and also describes ASCONF chunk processing when Dynamic Address
+Reconfiguration is enabled.
+
+
+security_sctp_sk_clone()
+~~~~~~~~~~~~~~~~~~~~~~~~
+Called whenever a new socket is created by **accept**\(2) (i.e. a TCP style
+socket) or when a socket is 'peeled off' e.g userspace calls
+**sctp_peeloff**\(3). ``security_sctp_sk_clone()`` will set the new
+sockets sid and peer sid to that contained in the ``@ep sid`` and
+``@ep peer sid`` respectively.
+::
+
+ @ep - pointer to current sctp endpoint structure.
+ @sk - pointer to current sock structure.
+ @sk - pointer to new sock structure.
+
+
+security_inet_conn_established()
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Called when a COOKIE ACK is received where it sets the connection's peer sid
+to that in ``@skb``::
+
+ @sk - pointer to sock structure.
+ @skb - pointer to skbuff of the COOKIE ACK packet.
+
+
+Policy Statements
+-----------------
+The following class and permissions to support SCTP are available within the
+kernel::
+
+ class sctp_socket inherits socket { node_bind }
+
+whenever the following policy capability is enabled::
+
+ policycap extended_socket_class;
+
+SELinux SCTP support adds the ``name_connect`` permission for connecting
+to a specific port type and the ``association`` permission that is explained
+in the section below.
+
+If userspace tools have been updated, SCTP will support the ``portcon``
+statement as shown in the following example::
+
+ portcon sctp 1024-1036 system_u:object_r:sctp_ports_t:s0
+
+
+SCTP Peer Labeling
+------------------
+An SCTP socket will only have one peer label assigned to it. This will be
+assigned during the establishment of the first association. Any further
+associations on this socket will have their packet peer label compared to
+the sockets peer label, and only if they are different will the
+``association`` permission be validated. This is validated by checking the
+socket peer sid against the received packets peer sid to determine whether
+the association should be allowed or denied.
+
+NOTES:
+ 1) If peer labeling is not enabled, then the peer context will always be
+ ``SECINITSID_UNLABELED`` (``unlabeled_t`` in Reference Policy).
+
+ 2) As SCTP can support more than one transport address per endpoint
+ (multi-homing) on a single socket, it is possible to configure policy
+ and NetLabel to provide different peer labels for each of these. As the
+ socket peer label is determined by the first associations transport
+ address, it is recommended that all peer labels are consistent.
+
+ 3) **getpeercon**\(3) may be used by userspace to retrieve the sockets peer
+ context.
+
+ 4) While not SCTP specific, be aware when using NetLabel that if a label
+ is assigned to a specific interface, and that interface 'goes down',
+ then the NetLabel service will remove the entry. Therefore ensure that
+ the network startup scripts call **netlabelctl**\(8) to set the required
+ label (see **netlabel-config**\(8) helper script for details).
+
+ 5) The NetLabel SCTP peer labeling rules apply as discussed in the following
+ set of posts tagged "netlabel" at: http://www.paul-moore.com/blog/t.
+
+ 6) CIPSO is only supported for IPv4 addressing: ``socket(AF_INET, ...)``
+ CALIPSO is only supported for IPv6 addressing: ``socket(AF_INET6, ...)``
+
+ Note the following when testing CIPSO/CALIPSO:
+ a) CIPSO will send an ICMP packet if an SCTP packet cannot be
+ delivered because of an invalid label.
+ b) CALIPSO does not send an ICMP packet, just silently discards it.
+
+ 7) IPSEC is not supported as RFC 3554 - sctp/ipsec support has not been
+ implemented in userspace (**racoon**\(8) or **ipsec_pluto**\(8)),
+ although the kernel supports SCTP/IPSEC.
diff --git a/Documentation/security/SELinux-sctp.rst b/Documentation/security/SELinux-sctp.rst
deleted file mode 100644
index a332cb1c5334..000000000000
--- a/Documentation/security/SELinux-sctp.rst
+++ /dev/null
@@ -1,158 +0,0 @@
-SCTP SELinux Support
-=====================
-
-Security Hooks
-===============
-
-``Documentation/security/LSM-sctp.rst`` describes the following SCTP security
-hooks with the SELinux specifics expanded below::
-
- security_sctp_assoc_request()
- security_sctp_bind_connect()
- security_sctp_sk_clone()
- security_inet_conn_established()
-
-
-security_sctp_assoc_request()
------------------------------
-Passes the ``@ep`` and ``@chunk->skb`` of the association INIT packet to the
-security module. Returns 0 on success, error on failure.
-::
-
- @ep - pointer to sctp endpoint structure.
- @skb - pointer to skbuff of association packet.
-
-The security module performs the following operations:
- IF this is the first association on ``@ep->base.sk``, then set the peer
- sid to that in ``@skb``. This will ensure there is only one peer sid
- assigned to ``@ep->base.sk`` that may support multiple associations.
-
- ELSE validate the ``@ep->base.sk peer_sid`` against the ``@skb peer sid``
- to determine whether the association should be allowed or denied.
-
- Set the sctp ``@ep sid`` to socket's sid (from ``ep->base.sk``) with
- MLS portion taken from ``@skb peer sid``. This will be used by SCTP
- TCP style sockets and peeled off connections as they cause a new socket
- to be generated.
-
- If IP security options are configured (CIPSO/CALIPSO), then the ip
- options are set on the socket.
-
-
-security_sctp_bind_connect()
------------------------------
-Checks permissions required for ipv4/ipv6 addresses based on the ``@optname``
-as follows::
-
- ------------------------------------------------------------------
- | BIND Permission Checks |
- | @optname | @address contains |
- |----------------------------|-----------------------------------|
- | SCTP_SOCKOPT_BINDX_ADD | One or more ipv4 / ipv6 addresses |
- | SCTP_PRIMARY_ADDR | Single ipv4 or ipv6 address |
- | SCTP_SET_PEER_PRIMARY_ADDR | Single ipv4 or ipv6 address |
- ------------------------------------------------------------------
-
- ------------------------------------------------------------------
- | CONNECT Permission Checks |
- | @optname | @address contains |
- |----------------------------|-----------------------------------|
- | SCTP_SOCKOPT_CONNECTX | One or more ipv4 / ipv6 addresses |
- | SCTP_PARAM_ADD_IP | One or more ipv4 / ipv6 addresses |
- | SCTP_SENDMSG_CONNECT | Single ipv4 or ipv6 address |
- | SCTP_PARAM_SET_PRIMARY | Single ipv4 or ipv6 address |
- ------------------------------------------------------------------
-
-
-``Documentation/security/LSM-sctp.rst`` gives a summary of the ``@optname``
-entries and also describes ASCONF chunk processing when Dynamic Address
-Reconfiguration is enabled.
-
-
-security_sctp_sk_clone()
--------------------------
-Called whenever a new socket is created by **accept**\(2) (i.e. a TCP style
-socket) or when a socket is 'peeled off' e.g userspace calls
-**sctp_peeloff**\(3). ``security_sctp_sk_clone()`` will set the new
-sockets sid and peer sid to that contained in the ``@ep sid`` and
-``@ep peer sid`` respectively.
-::
-
- @ep - pointer to current sctp endpoint structure.
- @sk - pointer to current sock structure.
- @sk - pointer to new sock structure.
-
-
-security_inet_conn_established()
----------------------------------
-Called when a COOKIE ACK is received where it sets the connection's peer sid
-to that in ``@skb``::
-
- @sk - pointer to sock structure.
- @skb - pointer to skbuff of the COOKIE ACK packet.
-
-
-Policy Statements
-==================
-The following class and permissions to support SCTP are available within the
-kernel::
-
- class sctp_socket inherits socket { node_bind }
-
-whenever the following policy capability is enabled::
-
- policycap extended_socket_class;
-
-SELinux SCTP support adds the ``name_connect`` permission for connecting
-to a specific port type and the ``association`` permission that is explained
-in the section below.
-
-If userspace tools have been updated, SCTP will support the ``portcon``
-statement as shown in the following example::
-
- portcon sctp 1024-1036 system_u:object_r:sctp_ports_t:s0
-
-
-SCTP Peer Labeling
-===================
-An SCTP socket will only have one peer label assigned to it. This will be
-assigned during the establishment of the first association. Any further
-associations on this socket will have their packet peer label compared to
-the sockets peer label, and only if they are different will the
-``association`` permission be validated. This is validated by checking the
-socket peer sid against the received packets peer sid to determine whether
-the association should be allowed or denied.
-
-NOTES:
- 1) If peer labeling is not enabled, then the peer context will always be
- ``SECINITSID_UNLABELED`` (``unlabeled_t`` in Reference Policy).
-
- 2) As SCTP can support more than one transport address per endpoint
- (multi-homing) on a single socket, it is possible to configure policy
- and NetLabel to provide different peer labels for each of these. As the
- socket peer label is determined by the first associations transport
- address, it is recommended that all peer labels are consistent.
-
- 3) **getpeercon**\(3) may be used by userspace to retrieve the sockets peer
- context.
-
- 4) While not SCTP specific, be aware when using NetLabel that if a label
- is assigned to a specific interface, and that interface 'goes down',
- then the NetLabel service will remove the entry. Therefore ensure that
- the network startup scripts call **netlabelctl**\(8) to set the required
- label (see **netlabel-config**\(8) helper script for details).
-
- 5) The NetLabel SCTP peer labeling rules apply as discussed in the following
- set of posts tagged "netlabel" at: http://www.paul-moore.com/blog/t.
-
- 6) CIPSO is only supported for IPv4 addressing: ``socket(AF_INET, ...)``
- CALIPSO is only supported for IPv6 addressing: ``socket(AF_INET6, ...)``
-
- Note the following when testing CIPSO/CALIPSO:
- a) CIPSO will send an ICMP packet if an SCTP packet cannot be
- delivered because of an invalid label.
- b) CALIPSO does not send an ICMP packet, just silently discards it.
-
- 7) IPSEC is not supported as RFC 3554 - sctp/ipsec support has not been
- implemented in userspace (**racoon**\(8) or **ipsec_pluto**\(8)),
- although the kernel supports SCTP/IPSEC.
diff --git a/Documentation/security/index.rst b/Documentation/security/index.rst
index 85492bfca530..aad6d92ffe31 100644
--- a/Documentation/security/index.rst
+++ b/Documentation/security/index.rst
@@ -9,7 +9,6 @@ Security Documentation
IMA-templates
keys/index
LSM
- LSM-sctp
- SELinux-sctp
+ SCTP
self-protection
tpm/index
diff --git a/Documentation/spi/spi-summary b/Documentation/spi/spi-summary
index 1721c1b570c3..1a63194b74d7 100644
--- a/Documentation/spi/spi-summary
+++ b/Documentation/spi/spi-summary
@@ -572,6 +572,12 @@ SPI MASTER METHODS
0: transfer is finished
1: transfer is still in progress
+ master->set_cs_timing(struct spi_device *spi, u8 setup_clk_cycles,
+ u8 hold_clk_cycles, u8 inactive_clk_cycles)
+ This method allows SPI client drivers to request SPI master controller
+ for configuring device specific CS setup, hold and inactive timing
+ requirements.
+
DEPRECATED METHODS
master->transfer(struct spi_device *spi, struct spi_message *message)
diff --git a/Documentation/static-keys.txt b/Documentation/static-keys.txt
index d68135560895..9803e14639bf 100644
--- a/Documentation/static-keys.txt
+++ b/Documentation/static-keys.txt
@@ -159,7 +159,7 @@ particularly the CPU hotplug lock (in order to avoid races against
CPUs being brought in the kernel while the kernel is getting
patched). Calling the static key API from within a hotplug notifier is
thus a sure deadlock recipe. In order to still allow use of the
-functionnality, the following functions are provided:
+functionality, the following functions are provided:
static_key_enable_cpuslocked()
static_key_disable_cpuslocked()
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 379063e58326..aa058aa7bf28 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -95,7 +95,7 @@ show up in /proc/sys/kernel:
- stop-a [ SPARC only ]
- sysrq ==> Documentation/admin-guide/sysrq.rst
- sysctl_writes_strict
-- tainted
+- tainted ==> Documentation/admin-guide/tainted-kernels.rst
- threads-max
- unknown_nmi_panic
- watchdog
@@ -1031,39 +1031,33 @@ compilation sees a 1% slowdown, other systems and workloads may vary.
1: kernel stack erasing is enabled (default), it is performed before
returning to the userspace at the end of syscalls.
-
==============================================================
-tainted:
+tainted
Non-zero if the kernel has been tainted. Numeric values, which can be
ORed together. The letters are seen in "Tainted" line of Oops reports.
- 1 (P): A module with a non-GPL license has been loaded, this
- includes modules with no license.
- Set by modutils >= 2.4.9 and module-init-tools.
- 2 (F): A module was force loaded by insmod -f.
- Set by modutils >= 2.4.9 and module-init-tools.
- 4 (S): Unsafe SMP processors: SMP with CPUs not designed for SMP.
- 8 (R): A module was forcibly unloaded from the system by rmmod -f.
- 16 (M): A hardware machine check error occurred on the system.
- 32 (B): A bad page was discovered on the system.
- 64 (U): The user has asked that the system be marked "tainted". This
- could be because they are running software that directly modifies
- the hardware, or for other reasons.
- 128 (D): The system has died.
- 256 (A): The ACPI DSDT has been overridden with one supplied by the user
- instead of using the one provided by the hardware.
- 512 (W): A kernel warning has occurred.
- 1024 (C): A module from drivers/staging was loaded.
- 2048 (I): The system is working around a severe firmware bug.
- 4096 (O): An out-of-tree module has been loaded.
- 8192 (E): An unsigned module has been loaded in a kernel supporting module
- signature.
- 16384 (L): A soft lockup has previously occurred on the system.
- 32768 (K): The kernel has been live patched.
- 65536 (X): Auxiliary taint, defined and used by for distros.
-131072 (T): The kernel was built with the struct randomization plugin.
+ 1 (P): proprietary module was loaded
+ 2 (F): module was force loaded
+ 4 (S): SMP kernel oops on an officially SMP incapable processor
+ 8 (R): module was force unloaded
+ 16 (M): processor reported a Machine Check Exception (MCE)
+ 32 (B): bad page referenced or some unexpected page flags
+ 64 (U): taint requested by userspace application
+ 128 (D): kernel died recently, i.e. there was an OOPS or BUG
+ 256 (A): an ACPI table was overridden by user
+ 512 (W): kernel issued warning
+ 1024 (C): staging driver was loaded
+ 2048 (I): workaround for bug in platform firmware applied
+ 4096 (O): externally-built ("out-of-tree") module was loaded
+ 8192 (E): unsigned module was loaded
+ 16384 (L): soft lockup occurred
+ 32768 (K): kernel has been live patched
+ 65536 (X): Auxiliary taint, defined and used by for distros
+131072 (T): The kernel was built with the struct randomization plugin
+
+See Documentation/admin-guide/tainted-kernels.rst for more information.
==============================================================
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 187ce4f599a2..3f13d8599337 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -237,7 +237,7 @@ used:
cat (1234): drop_caches: 3
These are informational only. They do not mean that anything is wrong
-with your system. To disable them, echo 4 (bit 3) into drop_caches.
+with your system. To disable them, echo 4 (bit 2) into drop_caches.
==============================================================
@@ -866,14 +866,14 @@ The intent is that compaction has less work to do in the future and to
increase the success rate of future high-order allocations such as SLUB
allocations, THP and hugetlbfs pages.
-To make it sensible with respect to the watermark_scale_factor parameter,
-the unit is in fractions of 10,000. The default value of 15,000 means
-that up to 150% of the high watermark will be reclaimed in the event of
-a pageblock being mixed due to fragmentation. The level of reclaim is
-determined by the number of fragmentation events that occurred in the
-recent past. If this value is smaller than a pageblock then a pageblocks
-worth of pages will be reclaimed (e.g. 2MB on 64-bit x86). A boost factor
-of 0 will disable the feature.
+To make it sensible with respect to the watermark_scale_factor
+parameter, the unit is in fractions of 10,000. The default value of
+15,000 on !DISCONTIGMEM configurations means that up to 150% of the high
+watermark will be reclaimed in the event of a pageblock being mixed due
+to fragmentation. The level of reclaim is determined by the number of
+fragmentation events that occurred in the recent past. If this value is
+smaller than a pageblock then a pageblocks worth of pages will be reclaimed
+(e.g. 2MB on 64-bit x86). A boost factor of 0 will disable the feature.
=============================================================
diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py
index 94bf6944bb1e..95d6e31f1e3a 100755
--- a/Documentation/target/tcm_mod_builder.py
+++ b/Documentation/target/tcm_mod_builder.py
@@ -297,7 +297,6 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
- buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
@@ -479,13 +478,6 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
- if re.search('write_pending_status\)\(', fo):
- buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
- buf += "{\n"
- buf += " return 0;\n"
- buf += "}\n\n"
- bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
-
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
diff --git a/Documentation/thermal/sysfs-api.txt b/Documentation/thermal/sysfs-api.txt
index 911399730c1c..c3fa500df92c 100644
--- a/Documentation/thermal/sysfs-api.txt
+++ b/Documentation/thermal/sysfs-api.txt
@@ -316,7 +316,7 @@ ACPI thermal zones.
|---temp[1-*]_input: The current temperature of thermal zone [1-*]
|---temp[1-*]_critical: The critical trip point of thermal zone [1-*]
-Please read Documentation/hwmon/sysfs-interface for additional information.
+Please read Documentation/hwmon/sysfs-interface.rst for additional information.
***************************
* Thermal zone attributes *
diff --git a/Documentation/timers/highres.txt b/Documentation/timers/highres.txt
index 9d88f67781c2..8f9741592123 100644
--- a/Documentation/timers/highres.txt
+++ b/Documentation/timers/highres.txt
@@ -231,7 +231,7 @@ in the idle period to make sure that jiffies are up to date and the interrupt
handler has not to deal with an eventually stale jiffy value.
The dynamic tick feature provides statistical values which are exported to
-userspace via /proc/stats and can be made available for enhanced power
+userspace via /proc/stat and can be made available for enhanced power
management control.
The implementation leaves room for further development like full tickless
diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst
index 0131df7f5968..7c5e6d6ab5d1 100644
--- a/Documentation/trace/ftrace.rst
+++ b/Documentation/trace/ftrace.rst
@@ -233,6 +233,12 @@ of ftrace. Here is a list of some of the key files:
This interface also allows for commands to be used. See the
"Filter commands" section for more details.
+ As a speed up, since processing strings can't be quite expensive
+ and requires a check of all functions registered to tracing, instead
+ an index can be written into this file. A number (starting with "1")
+ written will instead select the same corresponding at the line position
+ of the "available_filter_functions" file.
+
set_ftrace_notrace:
This has an effect opposite to that of
@@ -1396,6 +1402,57 @@ enabling function tracing, we incur an added overhead. This
overhead may extend the latency times. But nevertheless, this
trace has provided some very helpful debugging information.
+If we prefer function graph output instead of function, we can set
+display-graph option::
+ with echo 1 > options/display-graph
+
+ # tracer: irqsoff
+ #
+ # irqsoff latency trace v1.1.5 on 4.20.0-rc6+
+ # --------------------------------------------------------------------
+ # latency: 3751 us, #274/274, CPU#0 | (M:desktop VP:0, KP:0, SP:0 HP:0 #P:4)
+ # -----------------
+ # | task: bash-1507 (uid:0 nice:0 policy:0 rt_prio:0)
+ # -----------------
+ # => started at: free_debug_processing
+ # => ended at: return_to_handler
+ #
+ #
+ # _-----=> irqs-off
+ # / _----=> need-resched
+ # | / _---=> hardirq/softirq
+ # || / _--=> preempt-depth
+ # ||| /
+ # REL TIME CPU TASK/PID |||| DURATION FUNCTION CALLS
+ # | | | | |||| | | | | | |
+ 0 us | 0) bash-1507 | d... | 0.000 us | _raw_spin_lock_irqsave();
+ 0 us | 0) bash-1507 | d..1 | 0.378 us | do_raw_spin_trylock();
+ 1 us | 0) bash-1507 | d..2 | | set_track() {
+ 2 us | 0) bash-1507 | d..2 | | save_stack_trace() {
+ 2 us | 0) bash-1507 | d..2 | | __save_stack_trace() {
+ 3 us | 0) bash-1507 | d..2 | | __unwind_start() {
+ 3 us | 0) bash-1507 | d..2 | | get_stack_info() {
+ 3 us | 0) bash-1507 | d..2 | 0.351 us | in_task_stack();
+ 4 us | 0) bash-1507 | d..2 | 1.107 us | }
+ [...]
+ 3750 us | 0) bash-1507 | d..1 | 0.516 us | do_raw_spin_unlock();
+ 3750 us | 0) bash-1507 | d..1 | 0.000 us | _raw_spin_unlock_irqrestore();
+ 3764 us | 0) bash-1507 | d..1 | 0.000 us | tracer_hardirqs_on();
+ bash-1507 0d..1 3792us : <stack trace>
+ => free_debug_processing
+ => __slab_free
+ => kmem_cache_free
+ => vm_area_free
+ => remove_vma
+ => exit_mmap
+ => mmput
+ => flush_old_exec
+ => load_elf_binary
+ => search_binary_handler
+ => __do_execve_file.isra.32
+ => __x64_sys_execve
+ => do_syscall_64
+ => entry_SYSCALL_64_after_hwframe
preemptoff
----------
@@ -2784,6 +2841,38 @@ Produces::
We can see that there's no more lock or preempt tracing.
+Selecting function filters via index
+------------------------------------
+
+Because processing of strings is expensive (the address of the function
+needs to be looked up before comparing to the string being passed in),
+an index can be used as well to enable functions. This is useful in the
+case of setting thousands of specific functions at a time. By passing
+in a list of numbers, no string processing will occur. Instead, the function
+at the specific location in the internal array (which corresponds to the
+functions in the "available_filter_functions" file), is selected.
+
+::
+
+ # echo 1 > set_ftrace_filter
+
+Will select the first function listed in "available_filter_functions"
+
+::
+
+ # head -1 available_filter_functions
+ trace_initcall_finish_cb
+
+ # cat set_ftrace_filter
+ trace_initcall_finish_cb
+
+ # head -50 available_filter_functions | tail -1
+ x86_pmu_commit_txn
+
+ # echo 1 50 > set_ftrace_filter
+ # cat set_ftrace_filter
+ trace_initcall_finish_cb
+ x86_pmu_commit_txn
Dynamic ftrace with the function graph tracer
---------------------------------------------
diff --git a/Documentation/trace/histogram.rst b/Documentation/trace/histogram.rst
index 7dda76503127..0ea59d45aef1 100644
--- a/Documentation/trace/histogram.rst
+++ b/Documentation/trace/histogram.rst
@@ -25,7 +25,7 @@ Documentation written by Tom Zanussi
hist:keys=<field1[,field2,...]>[:values=<field1[,field2,...]>]
[:sort=<field1[,field2,...]>][:size=#entries][:pause][:continue]
- [:clear][:name=histname1] [if <filter>]
+ [:clear][:name=histname1][:<handler>.<action>] [if <filter>]
When a matching event is hit, an entry is added to a hash table
using the key(s) and value(s) named. Keys and values correspond to
@@ -1831,41 +1831,87 @@ and looks and behaves just like any other event::
Like any other event, once a histogram is enabled for the event, the
output can be displayed by reading the event's 'hist' file.
-2.2.3 Hist trigger 'actions'
-----------------------------
+2.2.3 Hist trigger 'handlers' and 'actions'
+-------------------------------------------
-A hist trigger 'action' is a function that's executed whenever a
-histogram entry is added or updated.
+A hist trigger 'action' is a function that's executed (in most cases
+conditionally) whenever a histogram entry is added or updated.
-The default 'action' if no special function is explicitly specified is
-as it always has been, to simply update the set of values associated
-with an entry. Some applications, however, may want to perform
-additional actions at that point, such as generate another event, or
-compare and save a maximum.
+When a histogram entry is added or updated, a hist trigger 'handler'
+is what decides whether the corresponding action is actually invoked
+or not.
-The following additional actions are available. To specify an action
-for a given event, simply specify the action between colons in the
-hist trigger specification.
+Hist trigger handlers and actions are paired together in the general
+form:
- - onmatch(matching.event).<synthetic_event_name>(param list)
+ <handler>.<action>
- The 'onmatch(matching.event).<synthetic_event_name>(params)' hist
- trigger action is invoked whenever an event matches and the
- histogram entry would be added or updated. It causes the named
- synthetic event to be generated with the values given in the
+To specify a handler.action pair for a given event, simply specify
+that handler.action pair between colons in the hist trigger
+specification.
+
+In theory, any handler can be combined with any action, but in
+practice, not every handler.action combination is currently supported;
+if a given handler.action combination isn't supported, the hist
+trigger will fail with -EINVAL;
+
+The default 'handler.action' if none is explicity specified is as it
+always has been, to simply update the set of values associated with an
+entry. Some applications, however, may want to perform additional
+actions at that point, such as generate another event, or compare and
+save a maximum.
+
+The supported handlers and actions are listed below, and each is
+described in more detail in the following paragraphs, in the context
+of descriptions of some common and useful handler.action combinations.
+
+The available handlers are:
+
+ - onmatch(matching.event) - invoke action on any addition or update
+ - onmax(var) - invoke action if var exceeds current max
+ - onchange(var) - invoke action if var changes
+
+The available actions are:
+
+ - trace(<synthetic_event_name>,param list) - generate synthetic event
+ - save(field,...) - save current event fields
+ - snapshot() - snapshot the trace buffer
+
+The following commonly-used handler.action pairs are available:
+
+ - onmatch(matching.event).trace(<synthetic_event_name>,param list)
+
+ The 'onmatch(matching.event).trace(<synthetic_event_name>,param
+ list)' hist trigger action is invoked whenever an event matches
+ and the histogram entry would be added or updated. It causes the
+ named synthetic event to be generated with the values given in the
'param list'. The result is the generation of a synthetic event
that consists of the values contained in those variables at the
- time the invoking event was hit.
-
- The 'param list' consists of one or more parameters which may be
- either variables or fields defined on either the 'matching.event'
- or the target event. The variables or fields specified in the
- param list may be either fully-qualified or unqualified. If a
- variable is specified as unqualified, it must be unique between
- the two events. A field name used as a param can be unqualified
- if it refers to the target event, but must be fully qualified if
- it refers to the matching event. A fully-qualified name is of the
- form 'system.event_name.$var_name' or 'system.event_name.field'.
+ time the invoking event was hit. For example, if the synthetic
+ event name is 'wakeup_latency', a wakeup_latency event is
+ generated using onmatch(event).trace(wakeup_latency,arg1,arg2).
+
+ There is also an equivalent alternative form available for
+ generating synthetic events. In this form, the synthetic event
+ name is used as if it were a function name. For example, using
+ the 'wakeup_latency' synthetic event name again, the
+ wakeup_latency event would be generated by invoking it as if it
+ were a function call, with the event field values passed in as
+ arguments: onmatch(event).wakeup_latency(arg1,arg2). The syntax
+ for this form is:
+
+ onmatch(matching.event).<synthetic_event_name>(param list)
+
+ In either case, the 'param list' consists of one or more
+ parameters which may be either variables or fields defined on
+ either the 'matching.event' or the target event. The variables or
+ fields specified in the param list may be either fully-qualified
+ or unqualified. If a variable is specified as unqualified, it
+ must be unique between the two events. A field name used as a
+ param can be unqualified if it refers to the target event, but
+ must be fully qualified if it refers to the matching event. A
+ fully-qualified name is of the form 'system.event_name.$var_name'
+ or 'system.event_name.field'.
The 'matching.event' specification is simply the fully qualified
event name of the event that matches the target event for the
@@ -1896,6 +1942,12 @@ hist trigger specification.
wakeup_new_test($testpid) if comm=="cyclictest"' >> \
/sys/kernel/debug/tracing/events/sched/sched_wakeup_new/trigger
+ Or, equivalently, using the 'trace' keyword syntax:
+
+ # echo 'hist:keys=$testpid:testpid=pid:onmatch(sched.sched_wakeup_new).\
+ trace(wakeup_new_test,$testpid) if comm=="cyclictest"' >> \
+ /sys/kernel/debug/tracing/events/sched/sched_wakeup_new/trigger
+
Creating and displaying a histogram based on those events is now
just a matter of using the fields and new synthetic event in the
tracing/events/synthetic directory, as usual::
@@ -2000,6 +2052,212 @@ hist trigger specification.
Entries: 2
Dropped: 0
+ - onmax(var).snapshot()
+
+ The 'onmax(var).snapshot()' hist trigger action is invoked
+ whenever the value of 'var' associated with a histogram entry
+ exceeds the current maximum contained in that variable.
+
+ The end result is that a global snapshot of the trace buffer will
+ be saved in the tracing/snapshot file if 'var' exceeds the current
+ maximum for any hist trigger entry.
+
+ Note that in this case the maximum is a global maximum for the
+ current trace instance, which is the maximum across all buckets of
+ the histogram. The key of the specific trace event that caused
+ the global maximum and the global maximum itself are displayed,
+ along with a message stating that a snapshot has been taken and
+ where to find it. The user can use the key information displayed
+ to locate the corresponding bucket in the histogram for even more
+ detail.
+
+ As an example the below defines a couple of hist triggers, one for
+ sched_waking and another for sched_switch, keyed on pid. Whenever
+ a sched_waking event occurs, the timestamp is saved in the entry
+ corresponding to the current pid, and when the scheduler switches
+ back to that pid, the timestamp difference is calculated. If the
+ resulting latency, stored in wakeup_lat, exceeds the current
+ maximum latency, a snapshot is taken. As part of the setup, all
+ the scheduler events are also enabled, which are the events that
+ will show up in the snapshot when it is taken at some point:
+
+ # echo 1 > /sys/kernel/debug/tracing/events/sched/enable
+
+ # echo 'hist:keys=pid:ts0=common_timestamp.usecs \
+ if comm=="cyclictest"' >> \
+ /sys/kernel/debug/tracing/events/sched/sched_waking/trigger
+
+ # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0: \
+ onmax($wakeup_lat).save(next_prio,next_comm,prev_pid,prev_prio, \
+ prev_comm):onmax($wakeup_lat).snapshot() \
+ if next_comm=="cyclictest"' >> \
+ /sys/kernel/debug/tracing/events/sched/sched_switch/trigger
+
+ When the histogram is displayed, for each bucket the max value
+ and the saved values corresponding to the max are displayed
+ following the rest of the fields.
+
+ If a snaphot was taken, there is also a message indicating that,
+ along with the value and event that triggered the global maximum:
+
+ # cat /sys/kernel/debug/tracing/events/sched/sched_switch/hist
+ { next_pid: 2101 } hitcount: 200
+ max: 52 next_prio: 120 next_comm: cyclictest \
+ prev_pid: 0 prev_prio: 120 prev_comm: swapper/6
+
+ { next_pid: 2103 } hitcount: 1326
+ max: 572 next_prio: 19 next_comm: cyclictest \
+ prev_pid: 0 prev_prio: 120 prev_comm: swapper/1
+
+ { next_pid: 2102 } hitcount: 1982 \
+ max: 74 next_prio: 19 next_comm: cyclictest \
+ prev_pid: 0 prev_prio: 120 prev_comm: swapper/5
+
+ Snapshot taken (see tracing/snapshot). Details:
+ triggering value { onmax($wakeup_lat) }: 572 \
+ triggered by event with key: { next_pid: 2103 }
+
+ Totals:
+ Hits: 3508
+ Entries: 3
+ Dropped: 0
+
+ In the above case, the event that triggered the global maximum has
+ the key with next_pid == 2103. If you look at the bucket that has
+ 2103 as the key, you'll find the additional values save()'d along
+ with the local maximum for that bucket, which should be the same
+ as the global maximum (since that was the same value that
+ triggered the global snapshot).
+
+ And finally, looking at the snapshot data should show at or near
+ the end the event that triggered the snapshot (in this case you
+ can verify the timestamps between the sched_waking and
+ sched_switch events, which should match the time displayed in the
+ global maximum):
+
+ # cat /sys/kernel/debug/tracing/snapshot
+
+ <...>-2103 [005] d..3 309.873125: sched_switch: prev_comm=cyclictest prev_pid=2103 prev_prio=19 prev_state=D ==> next_comm=swapper/5 next_pid=0 next_prio=120
+ <idle>-0 [005] d.h3 309.873611: sched_waking: comm=cyclictest pid=2102 prio=19 target_cpu=005
+ <idle>-0 [005] dNh4 309.873613: sched_wakeup: comm=cyclictest pid=2102 prio=19 target_cpu=005
+ <idle>-0 [005] d..3 309.873616: sched_switch: prev_comm=swapper/5 prev_pid=0 prev_prio=120 prev_state=S ==> next_comm=cyclictest next_pid=2102 next_prio=19
+ <...>-2102 [005] d..3 309.873625: sched_switch: prev_comm=cyclictest prev_pid=2102 prev_prio=19 prev_state=D ==> next_comm=swapper/5 next_pid=0 next_prio=120
+ <idle>-0 [005] d.h3 309.874624: sched_waking: comm=cyclictest pid=2102 prio=19 target_cpu=005
+ <idle>-0 [005] dNh4 309.874626: sched_wakeup: comm=cyclictest pid=2102 prio=19 target_cpu=005
+ <idle>-0 [005] dNh3 309.874628: sched_waking: comm=cyclictest pid=2103 prio=19 target_cpu=005
+ <idle>-0 [005] dNh4 309.874630: sched_wakeup: comm=cyclictest pid=2103 prio=19 target_cpu=005
+ <idle>-0 [005] d..3 309.874633: sched_switch: prev_comm=swapper/5 prev_pid=0 prev_prio=120 prev_state=S ==> next_comm=cyclictest next_pid=2102 next_prio=19
+ <idle>-0 [004] d.h3 309.874757: sched_waking: comm=gnome-terminal- pid=1699 prio=120 target_cpu=004
+ <idle>-0 [004] dNh4 309.874762: sched_wakeup: comm=gnome-terminal- pid=1699 prio=120 target_cpu=004
+ <idle>-0 [004] d..3 309.874766: sched_switch: prev_comm=swapper/4 prev_pid=0 prev_prio=120 prev_state=S ==> next_comm=gnome-terminal- next_pid=1699 next_prio=120
+ gnome-terminal--1699 [004] d.h2 309.874941: sched_stat_runtime: comm=gnome-terminal- pid=1699 runtime=180706 [ns] vruntime=1126870572 [ns]
+ <idle>-0 [003] d.s4 309.874956: sched_waking: comm=rcu_sched pid=9 prio=120 target_cpu=007
+ <idle>-0 [003] d.s5 309.874960: sched_wake_idle_without_ipi: cpu=7
+ <idle>-0 [003] d.s5 309.874961: sched_wakeup: comm=rcu_sched pid=9 prio=120 target_cpu=007
+ <idle>-0 [007] d..3 309.874963: sched_switch: prev_comm=swapper/7 prev_pid=0 prev_prio=120 prev_state=S ==> next_comm=rcu_sched next_pid=9 next_prio=120
+ rcu_sched-9 [007] d..3 309.874973: sched_stat_runtime: comm=rcu_sched pid=9 runtime=13646 [ns] vruntime=22531430286 [ns]
+ rcu_sched-9 [007] d..3 309.874978: sched_switch: prev_comm=rcu_sched prev_pid=9 prev_prio=120 prev_state=R+ ==> next_comm=swapper/7 next_pid=0 next_prio=120
+ <...>-2102 [005] d..4 309.874994: sched_migrate_task: comm=cyclictest pid=2103 prio=19 orig_cpu=5 dest_cpu=1
+ <...>-2102 [005] d..4 309.875185: sched_wake_idle_without_ipi: cpu=1
+ <idle>-0 [001] d..3 309.875200: sched_switch: prev_comm=swapper/1 prev_pid=0 prev_prio=120 prev_state=S ==> next_comm=cyclictest next_pid=2103 next_prio=19
+
+ - onchange(var).save(field,.. .)
+
+ The 'onchange(var).save(field,...)' hist trigger action is invoked
+ whenever the value of 'var' associated with a histogram entry
+ changes.
+
+ The end result is that the trace event fields specified as the
+ onchange.save() params will be saved if 'var' changes for that
+ hist trigger entry. This allows context from the event that
+ changed the value to be saved for later reference. When the
+ histogram is displayed, additional fields displaying the saved
+ values will be printed.
+
+ - onchange(var).snapshot()
+
+ The 'onchange(var).snapshot()' hist trigger action is invoked
+ whenever the value of 'var' associated with a histogram entry
+ changes.
+
+ The end result is that a global snapshot of the trace buffer will
+ be saved in the tracing/snapshot file if 'var' changes for any
+ hist trigger entry.
+
+ Note that in this case the changed value is a global variable
+ associated withe current trace instance. The key of the specific
+ trace event that caused the value to change and the global value
+ itself are displayed, along with a message stating that a snapshot
+ has been taken and where to find it. The user can use the key
+ information displayed to locate the corresponding bucket in the
+ histogram for even more detail.
+
+ As an example the below defines a hist trigger on the tcp_probe
+ event, keyed on dport. Whenever a tcp_probe event occurs, the
+ cwnd field is checked against the current value stored in the
+ $cwnd variable. If the value has changed, a snapshot is taken.
+ As part of the setup, all the scheduler and tcp events are also
+ enabled, which are the events that will show up in the snapshot
+ when it is taken at some point:
+
+ # echo 1 > /sys/kernel/debug/tracing/events/sched/enable
+ # echo 1 > /sys/kernel/debug/tracing/events/tcp/enable
+
+ # echo 'hist:keys=dport:cwnd=snd_cwnd: \
+ onchange($cwnd).save(snd_wnd,srtt,rcv_wnd): \
+ onchange($cwnd).snapshot()' >> \
+ /sys/kernel/debug/tracing/events/tcp/tcp_probe/trigger
+
+ When the histogram is displayed, for each bucket the tracked value
+ and the saved values corresponding to that value are displayed
+ following the rest of the fields.
+
+ If a snaphot was taken, there is also a message indicating that,
+ along with the value and event that triggered the snapshot:
+
+ # cat /sys/kernel/debug/tracing/events/tcp/tcp_probe/hist
+ { dport: 1521 } hitcount: 8
+ changed: 10 snd_wnd: 35456 srtt: 154262 rcv_wnd: 42112
+
+ { dport: 80 } hitcount: 23
+ changed: 10 snd_wnd: 28960 srtt: 19604 rcv_wnd: 29312
+
+ { dport: 9001 } hitcount: 172
+ changed: 10 snd_wnd: 48384 srtt: 260444 rcv_wnd: 55168
+
+ { dport: 443 } hitcount: 211
+ changed: 10 snd_wnd: 26960 srtt: 17379 rcv_wnd: 28800
+
+ Snapshot taken (see tracing/snapshot). Details:
+ triggering value { onchange($cwnd) }: 10
+ triggered by event with key: { dport: 80 }
+
+ Totals:
+ Hits: 414
+ Entries: 4
+ Dropped: 0
+
+ In the above case, the event that triggered the snapshot has the
+ key with dport == 80. If you look at the bucket that has 80 as
+ the key, you'll find the additional values save()'d along with the
+ changed value for that bucket, which should be the same as the
+ global changed value (since that was the same value that triggered
+ the global snapshot).
+
+ And finally, looking at the snapshot data should show at or near
+ the end the event that triggered the snapshot:
+
+ # cat /sys/kernel/debug/tracing/snapshot
+
+ gnome-shell-1261 [006] dN.3 49.823113: sched_stat_runtime: comm=gnome-shell pid=1261 runtime=49347 [ns] vruntime=1835730389 [ns]
+ kworker/u16:4-773 [003] d..3 49.823114: sched_switch: prev_comm=kworker/u16:4 prev_pid=773 prev_prio=120 prev_state=R+ ==> next_comm=kworker/3:2 next_pid=135 next_prio=120
+ gnome-shell-1261 [006] d..3 49.823114: sched_switch: prev_comm=gnome-shell prev_pid=1261 prev_prio=120 prev_state=R+ ==> next_comm=kworker/6:2 next_pid=387 next_prio=120
+ kworker/3:2-135 [003] d..3 49.823118: sched_stat_runtime: comm=kworker/3:2 pid=135 runtime=5339 [ns] vruntime=17815800388 [ns]
+ kworker/6:2-387 [006] d..3 49.823120: sched_stat_runtime: comm=kworker/6:2 pid=387 runtime=9594 [ns] vruntime=14589605367 [ns]
+ kworker/6:2-387 [006] d..3 49.823122: sched_switch: prev_comm=kworker/6:2 prev_pid=387 prev_prio=120 prev_state=R+ ==> next_comm=gnome-shell next_pid=1261 next_prio=120
+ kworker/3:2-135 [003] d..3 49.823123: sched_switch: prev_comm=kworker/3:2 prev_pid=135 prev_prio=120 prev_state=T ==> next_comm=swapper/3 next_pid=0 next_prio=120
+ <idle>-0 [004] ..s7 49.823798: tcp_probe: src=10.0.0.10:54326 dest=23.215.104.193:80 mark=0x0 length=32 snd_nxt=0xe3ae2ff5 snd_una=0xe3ae2ecd snd_cwnd=10 ssthresh=2147483647 snd_wnd=28960 srtt=19604 rcv_wnd=29312
+
3. User space creating a trigger
--------------------------------
diff --git a/Documentation/trace/uprobetracer.rst b/Documentation/trace/uprobetracer.rst
index 4c3bfde2ba47..4346e23e3ae7 100644
--- a/Documentation/trace/uprobetracer.rst
+++ b/Documentation/trace/uprobetracer.rst
@@ -73,10 +73,9 @@ For $comm, the default type is "string"; any other type is invalid.
Event Profiling
---------------
-You can check the total number of probe hits and probe miss-hits via
-/sys/kernel/debug/tracing/uprobe_profile.
-The first column is event name, the second is the number of probe hits,
-the third is the number of probe miss-hits.
+You can check the total number of probe hits per event via
+/sys/kernel/debug/tracing/uprobe_profile. The first column is the filename,
+the second is the event name, the third is the number of probe hits.
Usage examples
--------------
diff --git a/Documentation/translations/it_IT/doc-guide/sphinx.rst b/Documentation/translations/it_IT/doc-guide/sphinx.rst
index 474b7e127893..793b5cc33403 100644
--- a/Documentation/translations/it_IT/doc-guide/sphinx.rst
+++ b/Documentation/translations/it_IT/doc-guide/sphinx.rst
@@ -3,6 +3,8 @@
.. note:: Per leggere la documentazione originale in inglese:
:ref:`Documentation/doc-guide/index.rst <doc_guide>`
+.. _it_sphinxdoc:
+
Introduzione
============
diff --git a/Documentation/translations/it_IT/process/applying-patches.rst b/Documentation/translations/it_IT/process/applying-patches.rst
index f5e9c7d0b16d..1d30e5cd2a3d 100644
--- a/Documentation/translations/it_IT/process/applying-patches.rst
+++ b/Documentation/translations/it_IT/process/applying-patches.rst
@@ -1,13 +1,15 @@
.. include:: ../disclaimer-ita.rst
:Original: :ref:`Documentation/process/applying-patches.rst <applying_patches>`
-
+:Translator: Federico Vaga <federico.vaga@vaga.pv.it>
.. _it_applying_patches:
-Applicare modifiche al kernel Linux
-===================================
+Applicare patch al kernel Linux
++++++++++++++++++++++++++++++++
-.. warning::
+.. note::
- TODO ancora da tradurre
+ Questo documento è obsoleto. Nella maggior parte dei casi, piuttosto
+ che usare ``patch`` manualmente, vorrete usare Git. Per questo motivo
+ il documento non verrà tradotto.
diff --git a/Documentation/translations/it_IT/process/changes.rst b/Documentation/translations/it_IT/process/changes.rst
index 956cf95a1214..d0874327f301 100644
--- a/Documentation/translations/it_IT/process/changes.rst
+++ b/Documentation/translations/it_IT/process/changes.rst
@@ -1,12 +1,495 @@
.. include:: ../disclaimer-ita.rst
:Original: :ref:`Documentation/process/changes.rst <changes>`
+:Translator: Federico Vaga <federico.vaga@vaga.pv.it>
.. _it_changes:
Requisiti minimi per compilare il kernel
++++++++++++++++++++++++++++++++++++++++
-.. warning::
+Introduzione
+============
- TODO ancora da tradurre
+Questo documento fornisce una lista dei software necessari per eseguire i
+kernel 4.x.
+
+Questo documento è basato sul file "Changes" del kernel 2.0.x e quindi le
+persone che lo scrissero meritano credito (Jared Mauch, Axel Boldt,
+Alessandro Sigala, e tanti altri nella rete).
+
+Requisiti minimi correnti
+*************************
+
+Prima di pensare d'avere trovato un baco, aggiornate i seguenti programmi
+**almeno** alla versione indicata! Se non siete certi della versione che state
+usando, il comando indicato dovrebbe dirvelo.
+
+Questa lista presume che abbiate già un kernel Linux funzionante. In aggiunta,
+non tutti gli strumenti sono necessari ovunque; ovviamente, se non avete un
+modem ISDN, per esempio, probabilmente non dovreste preoccuparvi di
+isdn4k-utils.
+
+====================== ================= ========================================
+ Programma Versione minima Comando per verificare la versione
+====================== ================= ========================================
+GNU C 4.6 gcc --version
+GNU make 3.81 make --version
+binutils 2.20 ld -v
+flex 2.5.35 flex --version
+bison 2.0 bison --version
+util-linux 2.10o fdformat --version
+kmod 13 depmod -V
+e2fsprogs 1.41.4 e2fsck -V
+jfsutils 1.1.3 fsck.jfs -V
+reiserfsprogs 3.6.3 reiserfsck -V
+xfsprogs 2.6.0 xfs_db -V
+squashfs-tools 4.0 mksquashfs -version
+btrfs-progs 0.18 btrfsck
+pcmciautils 004 pccardctl -V
+quota-tools 3.09 quota -V
+PPP 2.4.0 pppd --version
+isdn4k-utils 3.1pre1 isdnctrl 2>&1|grep version
+nfs-utils 1.0.5 showmount --version
+procps 3.2.0 ps --version
+oprofile 0.9 oprofiled --version
+udev 081 udevd --version
+grub 0.93 grub --version || grub-install --version
+mcelog 0.6 mcelog --version
+iptables 1.4.2 iptables -V
+openssl & libcrypto 1.0.0 openssl version
+bc 1.06.95 bc --version
+Sphinx\ [#f1]_ 1.3 sphinx-build --version
+====================== ================= ========================================
+
+.. [#f1] Sphinx è necessario solo per produrre la documentazione del Kernel
+
+Compilazione del kernel
+***********************
+
+GCC
+---
+
+La versione necessaria di gcc potrebbe variare a seconda del tipo di CPU nel
+vostro calcolatore.
+
+Make
+----
+
+Per compilare il kernel vi servirà GNU make 3.81 o successivo.
+
+Binutils
+--------
+
+Il sistema di compilazione, dalla versione 4.13, per la produzione dei passi
+intermedi, si è convertito all'uso di *thin archive* (`ar T`) piuttosto che
+all'uso del *linking* incrementale (`ld -r`). Questo richiede binutils 2.20 o
+successivo.
+
+pkg-config
+----------
+
+Il sistema di compilazione, dalla versione 4.18, richiede pkg-config per
+verificare l'esistenza degli strumenti kconfig e per determinare le
+impostazioni da usare in 'make {g,x}config'. Precedentemente pkg-config
+veniva usato ma non verificato o documentato.
+
+Flex
+----
+
+Dalla versione 4.16, il sistema di compilazione, durante l'esecuzione, genera
+un analizzatore lessicale. Questo richiede flex 2.5.35 o successivo.
+
+Bison
+-----
+
+Dalla versione 4.16, il sistema di compilazione, durante l'esecuzione, genera
+un parsificatore. Questo richiede bison 2.0 o successivo.
+
+Perl
+----
+
+Per compilare il kernel vi servirà perl 5 e i seguenti moduli ``Getopt::Long``,
+``Getopt::Std``, ``File::Basename``, e ``File::Find``.
+
+BC
+--
+
+Vi servirà bc per compilare i kernel dal 3.10 in poi.
+
+OpenSSL
+-------
+
+Il programma OpenSSL e la libreria crypto vengono usati per la firma dei moduli
+e la gestione dei certificati; sono usati per la creazione della chiave e
+la generazione della firma.
+
+Se la firma dei moduli è abilitata, allora vi servirà openssl per compilare il
+kernel 3.7 e successivi. Vi serviranno anche i pacchetti di sviluppo di
+openssl per compilare il kernel 4.3 o successivi.
+
+
+Strumenti di sistema
+********************
+
+Modifiche architetturali
+------------------------
+
+DevFS è stato reso obsoleto da udev
+(http://www.kernel.org/pub/linux/utils/kernel/hotplug/)
+
+Il supporto per UID a 32-bit è ora disponibile. Divertitevi!
+
+La documentazione delle funzioni in Linux è una fase di transizione
+verso una documentazione integrata nei sorgenti stessi usando dei commenti
+formattati in modo speciale e posizionati vicino alle funzioni che descrivono.
+Al fine di arricchire la documentazione, questi commenti possono essere
+combinati con i file ReST presenti in Documentation/; questi potranno
+poi essere convertiti in formato PostScript, HTML, LaTex, ePUB o PDF.
+Per convertire i documenti da ReST al formato che volete, avete bisogno di
+Sphinx.
+
+Util-linux
+----------
+
+Le versioni più recenti di util-linux: forniscono il supporto a ``fdisk`` per
+dischi di grandi dimensioni; supportano le nuove opzioni di mount; riconoscono
+più tipi di partizioni; hanno un fdformat che funziona con i kernel 2.4;
+e altre chicche. Probabilmente vorrete aggiornarlo.
+
+Ksymoops
+--------
+
+Se l'impensabile succede e il kernel va in oops, potrebbe servirvi lo strumento
+ksymoops per decodificarlo, ma nella maggior parte dei casi non vi servirà.
+Generalmente è preferibile compilare il kernel con l'opzione ``CONFIG_KALLSYMS``
+cosicché venga prodotto un output più leggibile che può essere usato così com'è
+(produce anche un output migliore di ksymoops). Se per qualche motivo il
+vostro kernel non è stato compilato con ``CONFIG_KALLSYMS`` e non avete modo di
+ricompilarlo e riprodurre l'oops con quell'opzione abilitata, allora potete
+usare ksymoops per decodificare l'oops.
+
+Mkinitrd
+--------
+
+I cambiamenti della struttura in ``/lib/modules`` necessita l'aggiornamento di
+mkinitrd.
+
+E2fsprogs
+---------
+
+L'ultima versione di ``e2fsprogs`` corregge diversi bachi in fsck e debugfs.
+Ovviamente, aggiornarlo è una buona idea.
+
+JFSutils
+--------
+
+Il pacchetto ``jfsutils`` contiene programmi per il file-system JFS.
+Sono disponibili i seguenti strumenti:
+
+- ``fsck.jfs`` - avvia la ripetizione del log delle transizioni, e verifica e
+ ripara una partizione formattata secondo JFS
+
+- ``mkfs.jfs`` - crea una partizione formattata secondo JFS
+
+- sono disponibili altri strumenti per il file-system.
+
+Reiserfsprogs
+-------------
+
+Il pacchetto reiserfsprogs dovrebbe essere usato con reiserfs-3.6.x (Linux
+kernel 2.4.x). Questo è un pacchetto combinato che contiene versioni
+funzionanti di ``mkreiserfs``, ``resize_reiserfs``, ``debugreiserfs`` e
+``reiserfsck``. Questi programmi funzionano sulle piattaforme i386 e alpha.
+
+Xfsprogs
+--------
+
+L'ultima versione di ``xfsprogs`` contiene, fra i tanti, i programmi
+``mkfs.xfs``, ``xfs_db`` e ``xfs_repair`` per il file-system XFS.
+Dipendono dell'architettura e qualsiasi versione dalla 2.0.0 in poi
+dovrebbe funzionare correttamente con la versione corrente del codice
+XFS nel kernel (sono raccomandate le versioni 2.6.0 o successive per via
+di importanti miglioramenti).
+
+PCMCIAutils
+-----------
+
+PCMCIAutils sostituisce ``pcmica-cs``. Serve ad impostare correttamente i
+connettori PCMCIA all'avvio del sistema e a caricare i moduli necessari per
+i dispositivi a 16-bit se il kernel è stato modularizzato e il sottosistema
+hotplug è in uso.
+
+Quota-tools
+-----------
+
+Il supporto per uid e gid a 32 bit richiedono l'uso della versione 2 del
+formato quota. La versione 3.07 e successive di quota-tools supportano
+questo formato. Usate la versione raccomandata nella lista qui sopra o una
+successiva.
+
+Micro codice per Intel IA32
+---------------------------
+
+Per poter aggiornare il micro codice per Intel IA32, è stato aggiunto un
+apposito driver; il driver è accessibile come un normale dispositivo a
+caratteri (misc). Se non state usando udev probabilmente sarà necessario
+eseguire i seguenti comandi come root prima di poterlo aggiornare::
+
+ mkdir /dev/cpu
+ mknod /dev/cpu/microcode c 10 184
+ chmod 0644 /dev/cpu/microcode
+
+Probabilmente, vorrete anche il programma microcode_ctl da usare con questo
+dispositivo.
+
+udev
+----
+
+``udev`` è un programma in spazio utente il cui scopo è quello di popolare
+dinamicamente la cartella ``/dev`` coi dispositivi effettivamente presenti.
+``udev`` sostituisce le funzionalità base di devfs, consentendo comunque
+nomi persistenti per i dispositivi.
+
+FUSE
+----
+
+Serve libfuse 2.4.0 o successiva. Il requisito minimo assoluto è 2.3.0 ma
+le opzioni di mount ``direct_io`` e ``kernel_cache`` non funzioneranno.
+
+
+Rete
+****
+
+Cambiamenti generali
+--------------------
+
+Se per quanto riguarda la configurazione di rete avete esigenze di un certo
+livello dovreste prendere in considerazione l'uso degli strumenti in ip-route2.
+
+Filtro dei pacchetti / NAT
+--------------------------
+
+Il codice per filtraggio dei pacchetti e il NAT fanno uso degli stessi
+strumenti come nelle versioni del kernel antecedenti la 2.4.x (iptables).
+Include ancora moduli di compatibilità per 2.2.x ipchains e 2.0.x ipdwadm.
+
+PPP
+---
+
+Il driver per PPP è stato ristrutturato per supportare collegamenti multipli e
+per funzionare su diversi livelli. Se usate PPP, aggiornate pppd almeno alla
+versione 2.4.0.
+
+Se non usate udev, dovete avere un file /dev/ppp che può essere creato da root
+col seguente comando::
+
+ mknod /dev/ppp c 108 0
+
+Isdn4k-utils
+------------
+
+Per via della modifica del campo per il numero di telefono, il pacchetto
+isdn4k-utils dev'essere ricompilato o (preferibilmente) aggiornato.
+
+NFS-utils
+---------
+
+Nei kernel più antichi (2.4 e precedenti), il server NFS doveva essere
+informato sui clienti ai quali si voleva fornire accesso via NFS. Questa
+informazione veniva passata al kernel quando un cliente montava un file-system
+mediante ``mountd``, oppure usando ``exportfs`` all'avvio del sistema.
+exportfs prende le informazioni circa i clienti attivi da ``/var/lib/nfs/rmtab``.
+
+Questo approccio è piuttosto delicato perché dipende dalla correttezza di
+rmtab, che non è facile da garantire, in particolare quando si cerca di
+implementare un *failover*. Anche quando il sistema funziona bene, ``rmtab``
+ha il problema di accumulare vecchie voci inutilizzate.
+
+Sui kernel più recenti il kernel ha la possibilità di informare mountd quando
+arriva una richiesta da una macchina sconosciuta, e mountd può dare al kernel
+le informazioni corrette per l'esportazione. Questo rimuove la dipendenza con
+``rmtab`` e significa che il kernel deve essere al corrente solo dei clienti
+attivi.
+
+Per attivare questa funzionalità, dovete eseguire il seguente comando prima di
+usare exportfs o mountd::
+
+ mount -t nfsd nfsd /proc/fs/nfsd
+
+Dove possibile, raccomandiamo di proteggere tutti i servizi NFS dall'accesso
+via internet mediante un firewall.
+
+mcelog
+------
+
+Quando ``CONFIG_x86_MCE`` è attivo, il programma mcelog processa e registra
+gli eventi *machine check*. Gli eventi *machine check* sono errori riportati
+dalla CPU. Incoraggiamo l'analisi di questi errori.
+
+
+Documentazione del kernel
+*************************
+
+Sphinx
+------
+
+Per i dettaglio sui requisiti di Sphinx, fate riferimento a :ref:`it_sphinx_install`
+in :ref:`Documentation/translations/it_IT/doc-guide/sphinx.rst <it_sphinxdoc>`
+
+Ottenere software aggiornato
+============================
+
+Compilazione del kernel
+***********************
+
+gcc
+---
+
+- <ftp://ftp.gnu.org/gnu/gcc/>
+
+Make
+----
+
+- <ftp://ftp.gnu.org/gnu/make/>
+
+Binutils
+--------
+
+- <https://www.kernel.org/pub/linux/devel/binutils/>
+
+Flex
+----
+
+- <https://github.com/westes/flex/releases>
+
+Bison
+-----
+
+- <ftp://ftp.gnu.org/gnu/bison/>
+
+OpenSSL
+-------
+
+- <https://www.openssl.org/>
+
+Strumenti di sistema
+********************
+
+Util-linux
+----------
+
+- <https://www.kernel.org/pub/linux/utils/util-linux/>
+
+Kmod
+----
+
+- <https://www.kernel.org/pub/linux/utils/kernel/kmod/>
+- <https://git.kernel.org/pub/scm/utils/kernel/kmod/kmod.git>
+
+Ksymoops
+--------
+
+- <https://www.kernel.org/pub/linux/utils/kernel/ksymoops/v2.4/>
+
+Mkinitrd
+--------
+
+- <https://code.launchpad.net/initrd-tools/main>
+
+E2fsprogs
+---------
+
+- <http://prdownloads.sourceforge.net/e2fsprogs/e2fsprogs-1.29.tar.gz>
+
+JFSutils
+--------
+
+- <http://jfs.sourceforge.net/>
+
+Reiserfsprogs
+-------------
+
+- <http://www.kernel.org/pub/linux/utils/fs/reiserfs/>
+
+Xfsprogs
+--------
+
+- <ftp://oss.sgi.com/projects/xfs/>
+
+Pcmciautils
+-----------
+
+- <https://www.kernel.org/pub/linux/utils/kernel/pcmcia/>
+
+Quota-tools
+-----------
+
+- <http://sourceforge.net/projects/linuxquota/>
+
+
+Microcodice Intel P6
+--------------------
+
+- <https://downloadcenter.intel.com/>
+
+udev
+----
+
+- <http://www.freedesktop.org/software/systemd/man/udev.html>
+
+FUSE
+----
+
+- <https://github.com/libfuse/libfuse/releases>
+
+mcelog
+------
+
+- <http://www.mcelog.org/>
+
+Rete
+****
+
+PPP
+---
+
+- <ftp://ftp.samba.org/pub/ppp/>
+
+Isdn4k-utils
+------------
+
+- <ftp://ftp.isdn4linux.de/pub/isdn4linux/utils/>
+
+NFS-utils
+---------
+
+- <http://sourceforge.net/project/showfiles.php?group_id=14>
+
+Iptables
+--------
+
+- <http://www.iptables.org/downloads.html>
+
+Ip-route2
+---------
+
+- <https://www.kernel.org/pub/linux/utils/net/iproute2/>
+
+OProfile
+--------
+
+- <http://oprofile.sf.net/download/>
+
+NFS-Utils
+---------
+
+- <http://nfs.sourceforge.net/>
+
+Documentazione del kernel
+*************************
+
+Sphinx
+------
+
+- <http://www.sphinx-doc.org/>
diff --git a/Documentation/translations/it_IT/process/coding-style.rst b/Documentation/translations/it_IT/process/coding-style.rst
index b707bdbe178c..2fd0e7f79d55 100644
--- a/Documentation/translations/it_IT/process/coding-style.rst
+++ b/Documentation/translations/it_IT/process/coding-style.rst
@@ -449,6 +449,9 @@ Nonostante questo non sia richiesto dal linguaggio C, in Linux viene preferito
perché è un modo semplice per aggiungere informazioni importanti per il
lettore.
+Non usate la parola chiave ``extern`` coi prototipi di funzione perché
+rende le righe più lunghe e non è strettamente necessario.
+
7) Centralizzare il ritorno delle funzioni
------------------------------------------
@@ -600,26 +603,43 @@ segue nel vostro file .emacs:
(* (max steps 1)
c-basic-offset)))
- (add-hook 'c-mode-common-hook
- (lambda ()
- ;; Add kernel style
- (c-add-style
- "linux-tabs-only"
- '("linux" (c-offsets-alist
- (arglist-cont-nonempty
- c-lineup-gcc-asm-reg
- c-lineup-arglist-tabs-only))))))
-
- (add-hook 'c-mode-hook
- (lambda ()
- (let ((filename (buffer-file-name)))
- ;; Enable kernel mode for the appropriate files
- (when (and filename
- (string-match (expand-file-name "~/src/linux-trees")
- filename))
- (setq indent-tabs-mode t)
- (setq show-trailing-whitespace t)
- (c-set-style "linux-tabs-only")))))
+ (dir-locals-set-class-variables
+ 'linux-kernel
+ '((c-mode . (
+ (c-basic-offset . 8)
+ (c-label-minimum-indentation . 0)
+ (c-offsets-alist . (
+ (arglist-close . c-lineup-arglist-tabs-only)
+ (arglist-cont-nonempty .
+ (c-lineup-gcc-asm-reg c-lineup-arglist-tabs-only))
+ (arglist-intro . +)
+ (brace-list-intro . +)
+ (c . c-lineup-C-comments)
+ (case-label . 0)
+ (comment-intro . c-lineup-comment)
+ (cpp-define-intro . +)
+ (cpp-macro . -1000)
+ (cpp-macro-cont . +)
+ (defun-block-intro . +)
+ (else-clause . 0)
+ (func-decl-cont . +)
+ (inclass . +)
+ (inher-cont . c-lineup-multi-inher)
+ (knr-argdecl-intro . 0)
+ (label . -1000)
+ (statement . 0)
+ (statement-block-intro . +)
+ (statement-case-intro . +)
+ (statement-cont . +)
+ (substatement . +)
+ ))
+ (indent-tabs-mode . t)
+ (show-trailing-whitespace . t)
+ ))))
+
+ (dir-locals-set-directory-class
+ (expand-file-name "~/src/linux-trees")
+ 'linux-kernel)
Questo farà funzionare meglio emacs con lo stile del kernel per i file che
si trovano nella cartella ``~/src/linux-trees``.
@@ -929,7 +949,40 @@ qualche valore fuori dai limiti. Un tipico esempio è quello delle funzioni
che ritornano un puntatore; queste utilizzano NULL o ERR_PTR come meccanismo
di notifica degli errori.
-17) Non reinventate le macro del kernel
+17) L'uso di bool
+-----------------
+
+Nel kernel Linux il tipo bool deriva dal tipo _Bool dello standard C99.
+Un valore bool può assumere solo i valori 0 o 1, e implicitamente o
+esplicitamente la conversione a bool converte i valori in vero (*true*) o
+falso (*false*). Quando si usa un tipo bool il costrutto !! non sarà più
+necessario, e questo va ad eliminare una certa serie di bachi.
+
+Quando si usano i valori booleani, dovreste utilizzare le definizioni di true
+e false al posto dei valori 1 e 0.
+
+Per il valore di ritorno delle funzioni e per le variabili sullo stack, l'uso
+del tipo bool è sempre appropriato. L'uso di bool viene incoraggiato per
+migliorare la leggibilità e spesso è molto meglio di 'int' nella gestione di
+valori booleani.
+
+Non usate bool se per voi sono importanti l'ordine delle righe di cache o
+la loro dimensione; la dimensione e l'allineamento cambia a seconda
+dell'architettura per la quale è stato compilato. Le strutture che sono state
+ottimizzate per l'allineamento o la dimensione non dovrebbero usare bool.
+
+Se una struttura ha molti valori true/false, considerate l'idea di raggrupparli
+in un intero usando campi da 1 bit, oppure usate un tipo dalla larghezza fissa,
+come u8.
+
+Come per gli argomenti delle funzioni, molti valori true/false possono essere
+raggruppati in un singolo argomento a bit denominato 'flags'; spesso 'flags' è
+un'alternativa molto più leggibile se si hanno valori costanti per true/false.
+
+Detto ciò, un uso parsimonioso di bool nelle strutture dati e negli argomenti
+può migliorare la leggibilità.
+
+18) Non reinventate le macro del kernel
---------------------------------------
Il file di intestazione include/linux/kernel.h contiene un certo numero
@@ -953,7 +1006,7 @@ rigido sui tipi. Sentitevi liberi di leggere attentamente questo file
d'intestazione per scoprire cos'altro è stato definito che non dovreste
reinventare nel vostro codice.
-18) Linee di configurazione degli editor e altre schifezze
+19) Linee di configurazione degli editor e altre schifezze
-----------------------------------------------------------
Alcuni editor possono interpretare dei parametri di configurazione integrati
@@ -987,8 +1040,8 @@ d'indentazione e di modalità d'uso. Le persone potrebbero aver configurato una
modalità su misura, oppure potrebbero avere qualche altra magia per far
funzionare bene l'indentazione.
-19) Inline assembly
----------------------
+20) Inline assembly
+-------------------
Nel codice specifico per un'architettura, potreste aver bisogno di codice
*inline assembly* per interfacciarvi col processore o con una funzionalità
@@ -1020,7 +1073,7 @@ al fine di allineare correttamente l'assembler che verrà generato:
"more_magic %reg2, %reg3"
: /* outputs */ : /* inputs */ : /* clobbers */);
-20) Compilazione sotto condizione
+21) Compilazione sotto condizione
---------------------------------
Ovunque sia possibile, non usate le direttive condizionali del preprocessore
diff --git a/Documentation/translations/it_IT/process/howto.rst b/Documentation/translations/it_IT/process/howto.rst
index 909e6a55bc43..9903ac7c566b 100644
--- a/Documentation/translations/it_IT/process/howto.rst
+++ b/Documentation/translations/it_IT/process/howto.rst
@@ -234,7 +234,7 @@ il progetto Linux Cross-Reference, che è in grado di presentare codice
sorgente in un formato autoreferenziale ed indicizzato. Un eccellente ed
aggiornata fonte di consultazione del codice del kernel la potete trovare qui:
- http://lxr.free-electrons.com/
+ https://elixir.bootlin.com/
Il processo di sviluppo
@@ -244,7 +244,6 @@ e di molti altri rami per specifici sottosistemi. Questi rami sono:
- I sorgenti kernel 4.x
- I sorgenti stabili del kernel 4.x.y -stable
- - Le modifiche in 4.x -git
- Sorgenti dei sottosistemi del kernel e le loro modifiche
- Il kernel 4.x -next per test d'integrazione
@@ -313,16 +312,6 @@ Il file Documentation/process/stable-kernel-rules.rst (nei sorgenti) documenta
quali tipologie di modifiche sono accettate per i sorgenti -stable, e come
avviene il processo di rilascio.
-Le modifiche in 4.x -git
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-Queste sono istantanee quotidiane del kernel di Linus e sono gestite in
-una repositorio git (da qui il nome). Queste modifiche sono solitamente
-rilasciate giornalmente e rappresentano l'attuale stato dei sorgenti di
-Linus. Queste sono da considerarsi più sperimentali di un -rc in quanto
-generate automaticamente senza nemmeno aver dato una rapida occhiata
-per verificarne lo stato.
-
Sorgenti dei sottosistemi del kernel e le loro patch
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/translations/it_IT/process/stable-api-nonsense.rst b/Documentation/translations/it_IT/process/stable-api-nonsense.rst
index d4fa4abf8dd3..cdfc509b8566 100644
--- a/Documentation/translations/it_IT/process/stable-api-nonsense.rst
+++ b/Documentation/translations/it_IT/process/stable-api-nonsense.rst
@@ -1,13 +1,209 @@
.. include:: ../disclaimer-ita.rst
:Original: :ref:`Documentation/process/stable-api-nonsense.rst <stable_api_nonsense>`
-
+:Translator: Federico Vaga <federico.vaga@vaga.pv.it>
.. _it_stable_api_nonsense:
L'interfaccia dei driver per il kernel Linux
============================================
-.. warning::
+(tutte le risposte alle vostre domande e altro)
+
+Greg Kroah-Hartman <greg@kroah.com>
+
+Questo è stato scritto per cercare di spiegare perché Linux **non ha
+un'interfaccia binaria, e non ha nemmeno un'interfaccia stabile**.
+
+.. note::
+
+ Questo articolo parla di interfacce **interne al kernel**, non delle
+ interfacce verso lo spazio utente.
+
+ L'interfaccia del kernel verso lo spazio utente è quella usata dai
+ programmi, ovvero le chiamate di sistema. Queste interfacce sono **molto**
+ stabili nel tempo e non verranno modificate. Ho vecchi programmi che sono
+ stati compilati su un kernel 0.9 (circa) e tuttora funzionano sulle versioni
+ 2.6 del kernel. Queste interfacce sono quelle che gli utenti e i
+ programmatori possono considerare stabili.
+
+Riepilogo generale
+------------------
+
+Pensate di volere un'interfaccia del kernel stabile, ma in realtà non la
+volete, e nemmeno sapete di non volerla. Quello che volete è un driver
+stabile che funzioni, e questo può essere ottenuto solo se il driver si trova
+nei sorgenti del kernel. Ci sono altri vantaggi nell'avere il proprio driver
+nei sorgenti del kernel, ognuno dei quali hanno reso Linux un sistema operativo
+robusto, stabile e maturo; questi sono anche i motivi per cui avete scelto
+Linux.
+
+Introduzione
+------------
+
+Solo le persone un po' strambe vorrebbero scrivere driver per il kernel con
+la costante preoccupazione per i cambiamenti alle interfacce interne. Per il
+resto del mondo, queste interfacce sono invisibili o non di particolare
+interesse.
+
+Innanzitutto, non tratterò **alcun** problema legale riguardante codice
+chiuso, nascosto, avvolto, blocchi binari, o qualsia altra cosa che descrive
+driver che non hanno i propri sorgenti rilasciati con licenza GPL. Per favore
+fate riferimento ad un avvocato per qualsiasi questione legale, io sono un
+programmatore e perciò qui vi parlerò soltanto delle questioni tecniche (non
+per essere superficiali sui problemi legali, sono veri e dovete esserne a
+conoscenza in ogni circostanza).
+
+Dunque, ci sono due tematiche principali: interfacce binarie del kernel e
+interfacce stabili nei sorgenti. Ognuna dipende dall'altra, ma discuteremo
+prima delle cose binarie per toglierle di mezzo.
+
+Interfaccia binaria del kernel
+------------------------------
+
+Supponiamo d'avere un'interfaccia stabile nei sorgenti del kernel, di
+conseguenza un'interfaccia binaria dovrebbe essere anche'essa stabile, giusto?
+Sbagliato. Prendete in considerazione i seguenti fatti che riguardano il
+kernel Linux:
+
+ - A seconda della versione del compilatore C che state utilizzando, diverse
+ strutture dati del kernel avranno un allineamento diverso, e possibilmente
+ un modo diverso di includere le funzioni (renderle inline oppure no).
+ L'organizzazione delle singole funzioni non è poi così importante, ma la
+ spaziatura (*padding*) nelle strutture dati, invece, lo è.
+
+ - In base alle opzioni che sono state selezionate per generare il kernel,
+ un certo numero di cose potrebbero succedere:
+
+ - strutture dati differenti potrebbero contenere campi differenti
+ - alcune funzioni potrebbero non essere implementate (per esempio,
+ alcuni *lock* spariscono se compilati su sistemi mono-processore)
+ - la memoria interna del kernel può essere allineata in differenti modi
+ a seconda delle opzioni di compilazione.
+
+ - Linux funziona su una vasta gamma di architetture di processore. Non esiste
+ alcuna possibilità che il binario di un driver per un'architettura funzioni
+ correttamente su un'altra.
+
+Alcuni di questi problemi possono essere risolti compilando il proprio modulo
+con la stessa identica configurazione del kernel, ed usando la stessa versione
+del compilatore usato per compilare il kernel. Questo è sufficiente se volete
+fornire un modulo per uno specifico rilascio su una specifica distribuzione
+Linux. Ma moltiplicate questa singola compilazione per il numero di
+distribuzioni Linux e il numero dei rilasci supportati da quest'ultime e vi
+troverete rapidamente in un incubo fatto di configurazioni e piattaforme
+hardware (differenti processori con differenti opzioni); dunque, anche per il
+singolo rilascio di un modulo, dovreste creare differenti versioni dello
+stesso.
+
+Fidatevi, se tenterete questa via, col tempo, diventerete pazzi; l'ho imparato
+a mie spese molto tempo fa...
+
+
+Interfaccia stabile nei sorgenti del kernel
+-------------------------------------------
+
+Se parlate con le persone che cercano di mantenere aggiornato un driver per
+Linux ma che non si trova nei sorgenti, allora per queste persone l'argomento
+sarà "ostico".
+
+Lo sviluppo del kernel Linux è continuo e viaggia ad un ritmo sostenuto, e non
+rallenta mai. Perciò, gli sviluppatori del kernel trovano bachi nelle
+interfacce attuali, o trovano modi migliori per fare le cose. Se le trovano,
+allora le correggeranno per migliorarle. In questo frangente, i nomi delle
+funzioni potrebbero cambiare, le strutture dati potrebbero diventare più grandi
+o più piccole, e gli argomenti delle funzioni potrebbero essere ripensati.
+Se questo dovesse succedere, nello stesso momento, tutte le istanze dove questa
+interfaccia viene utilizzata verranno corrette, garantendo che tutto continui
+a funzionare senza problemi.
+
+Portiamo ad esempio l'interfaccia interna per il sottosistema USB che ha subito
+tre ristrutturazioni nel corso della sua vita. Queste ristrutturazioni furono
+fatte per risolvere diversi problemi:
+
+ - È stato fatto un cambiamento da un flusso di dati sincrono ad uno
+ asincrono. Questo ha ridotto la complessità di molti driver e ha
+ aumentato la capacità di trasmissione di tutti i driver fino a raggiungere
+ quasi la velocità massima possibile.
+ - È stato fatto un cambiamento nell'allocazione dei pacchetti da parte del
+ sottosistema USB per conto dei driver, cosicché ora i driver devono fornire
+ più informazioni al sottosistema USB al fine di correggere un certo numero
+ di stalli.
+
+Questo è completamente l'opposto di quello che succede in alcuni sistemi
+operativi proprietari che hanno dovuto mantenere, nel tempo, il supporto alle
+vecchie interfacce USB. I nuovi sviluppatori potrebbero usare accidentalmente
+le vecchie interfacce e sviluppare codice nel modo sbagliato, portando, di
+conseguenza, all'instabilità del sistema.
+
+In entrambe gli scenari, gli sviluppatori hanno ritenuto che queste importanti
+modifiche erano necessarie, e quindi le hanno fatte con qualche sofferenza.
+Se Linux avesse assicurato di mantenere stabile l'interfaccia interna, si
+sarebbe dovuto procedere alla creazione di una nuova, e quelle vecchie, e
+mal funzionanti, avrebbero dovuto ricevere manutenzione, creando lavoro
+aggiuntivo per gli sviluppatori del sottosistema USB. Dato che gli
+sviluppatori devono dedicare il proprio tempo a questo genere di lavoro,
+chiedergli di dedicarne dell'altro, senza benefici, magari gratuitamente, non
+è contemplabile.
+
+Le problematiche relative alla sicurezza sono molto importanti per Linux.
+Quando viene trovato un problema di sicurezza viene corretto in breve tempo.
+A volte, per prevenire il problema di sicurezza, si sono dovute cambiare
+delle interfacce interne al kernel. Quando è successo, allo stesso tempo,
+tutti i driver che usavano quelle interfacce sono stati aggiornati, garantendo
+la correzione definitiva del problema senza doversi preoccupare di rivederlo
+per sbaglio in futuro. Se non si fossero cambiate le interfacce interne,
+sarebbe stato impossibile correggere il problema e garantire che non si sarebbe
+più ripetuto.
+
+Nel tempo le interfacce del kernel subiscono qualche ripulita. Se nessuno
+sta più usando un'interfaccia, allora questa verrà rimossa. Questo permette
+al kernel di rimanere il più piccolo possibile, e garantisce che tutte le
+potenziali interfacce sono state verificate nel limite del possibile (le
+interfacce inutilizzate sono impossibili da verificare).
+
+
+Cosa fare
+---------
+
+Dunque, se avete un driver per il kernel Linux che non si trova nei sorgenti
+principali del kernel, come sviluppatori, cosa dovreste fare? Rilasciare un
+file binario del driver per ogni versione del kernel e per ogni distribuzione,
+è un incubo; inoltre, tenere il passo con tutti i cambiamenti del kernel è un
+brutto lavoro.
+
+Semplicemente, fate sì che il vostro driver per il kernel venga incluso nei
+sorgenti principali (ricordatevi, stiamo parlando di driver rilasciati secondo
+una licenza compatibile con la GPL; se il vostro codice non ricade in questa
+categoria: buona fortuna, arrangiatevi, siete delle sanguisughe)
+
+Se il vostro driver è nei sorgenti del kernel e un'interfaccia cambia, il
+driver verrà corretto immediatamente dalla persona che l'ha modificata. Questo
+garantisce che sia sempre possibile compilare il driver, che funzioni, e tutto
+con un minimo sforzo da parte vostra.
+
+Avere il proprio driver nei sorgenti principali del kernel ha i seguenti
+vantaggi:
+
+ - La qualità del driver aumenterà e i costi di manutenzione (per lo
+ sviluppatore originale) diminuiranno.
+ - Altri sviluppatori aggiungeranno nuove funzionalità al vostro driver.
+ - Altri persone troveranno e correggeranno bachi nel vostro driver.
+ - Altri persone troveranno degli aggiustamenti da fare al vostro driver.
+ - Altri persone aggiorneranno il driver quando è richiesto da un cambiamento
+ di un'interfaccia.
+ - Il driver sarà automaticamente reso disponibile in tutte le distribuzioni
+ Linux senza dover chiedere a nessuna di queste di aggiungerlo.
+
+Dato che Linux supporta più dispositivi di qualsiasi altro sistema operativo,
+e che girano su molti più tipi di processori di qualsiasi altro sistema
+operativo; ciò dimostra che questo modello di sviluppo qualcosa di giusto,
+dopo tutto, lo fa :)
+
+
+
+------
- TODO ancora da tradurre
+Dei ringraziamenti vanno a Randy Dunlap, Andrew Morton, David Brownell,
+Hanna Linder, Robert Love, e Nishanth Aravamudan per la loro revisione
+e per i loro commenti sulle prime bozze di questo articolo.
diff --git a/Documentation/translations/it_IT/process/submit-checklist.rst b/Documentation/translations/it_IT/process/submit-checklist.rst
index b6b4dd94a660..70e65a7b3620 100644
--- a/Documentation/translations/it_IT/process/submit-checklist.rst
+++ b/Documentation/translations/it_IT/process/submit-checklist.rst
@@ -1,12 +1,131 @@
.. include:: ../disclaimer-ita.rst
:Original: :ref:`Documentation/process/submit-checklist.rst <submitchecklist>`
+:Translator: Federico Vaga <federico.vaga@vaga.pv.it>
.. _it_submitchecklist:
-Lista delle cose da fare per inviare una modifica al kernel Linux
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Lista delle verifiche da fare prima di inviare una patch per il kernel Linux
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. warning::
+Qui troverete una lista di cose che uno sviluppatore dovrebbe fare per
+vedere le proprie patch accettate più rapidamente.
- TODO ancora da tradurre
+Tutti questi punti integrano la documentazione fornita riguardo alla
+sottomissione delle patch, in particolare
+:ref:`Documentation/translations/it_IT/process/submitting-patches.rst <it_submittingpatches>`.
+
+1) Se state usando delle funzionalità del kernel allora includete (#include)
+ i file che le dichiarano/definiscono. Non dipendente dal fatto che un file
+ d'intestazione include anche quelli usati da voi.
+
+2) Compilazione pulita:
+
+ a) con le opzioni ``CONFIG`` negli stati ``=y``, ``=m`` e ``=n``. Nessun
+ avviso/errore di ``gcc`` e nessun avviso/errore dal linker.
+
+ b) con ``allnoconfig``, ``allmodconfig``
+
+ c) quando si usa ``O=builddir``
+
+3) Compilare per diverse architetture di processore usando strumenti per
+ la cross-compilazione o altri.
+
+4) Una buona architettura per la verifica della cross-compilazione è la ppc64
+ perché tende ad usare ``unsigned long`` per le quantità a 64-bit.
+
+5) Controllate lo stile del codice della vostra patch secondo le direttive
+ scritte in :ref:`Documentation/translations/it_IT/process/coding-style.rst <it_codingstyle>`.
+ Prima dell'invio della patch, usate il verificatore di stile
+ (``script/checkpatch.pl``) per scovare le violazioni più semplici.
+ Dovreste essere in grado di giustificare tutte le violazioni rimanenti nella
+ vostra patch.
+
+6) Le opzioni ``CONFIG``, nuove o modificate, non scombussolano il menu
+ di configurazione e sono preimpostate come disabilitate a meno che non
+ soddisfino i criteri descritti in ``Documentation/kbuild/kconfig-language.txt``
+ alla punto "Voci di menu: valori predefiniti".
+
+7) Tutte le nuove opzioni ``Kconfig`` hanno un messaggio di aiuto.
+
+8) La patch è stata accuratamente revisionata rispetto alle più importanti
+ configurazioni ``Kconfig``. Questo è molto difficile da fare
+ correttamente - un buono lavoro di testa sarà utile.
+
+9) Verificare con sparse.
+
+10) Usare ``make checkstack`` e ``make namespacecheck`` e correggere tutti i
+ problemi rilevati.
+
+ .. note::
+
+ ``checkstack`` non evidenzia esplicitamente i problemi, ma una funzione
+ che usa più di 512 byte sullo stack è una buona candidata per una
+ correzione.
+
+11) Includete commenti :ref:`kernel-doc <kernel_doc>` per documentare API
+ globali del kernel. Usate ``make htmldocs`` o ``make pdfdocs`` per
+ verificare i commenti :ref:`kernel-doc <kernel_doc>` ed eventualmente
+ correggerli.
+
+12) La patch è stata verificata con le seguenti opzioni abilitate
+ contemporaneamente: ``CONFIG_PREEMPT``, ``CONFIG_DEBUG_PREEMPT``,
+ ``CONFIG_DEBUG_SLAB``, ``CONFIG_DEBUG_PAGEALLOC``, ``CONFIG_DEBUG_MUTEXES``,
+ ``CONFIG_DEBUG_SPINLOCK``, ``CONFIG_DEBUG_ATOMIC_SLEEP``,
+ ``CONFIG_PROVE_RCU`` e ``CONFIG_DEBUG_OBJECTS_RCU_HEAD``.
+
+13) La patch è stata compilata e verificata in esecuzione con, e senza,
+ le opzioni ``CONFIG_SMP`` e ``CONFIG_PREEMPT``.
+
+14) Se la patch ha effetti sull'IO dei dischi, eccetera: allora dev'essere
+ verificata con, e senza, l'opzione ``CONFIG_LBDAF``.
+
+15) Tutti i percorsi del codice sono stati verificati con tutte le funzionalità
+ di lockdep abilitate.
+
+16) Tutti i nuovi elementi in ``/proc`` sono documentati in ``Documentation/``.
+
+17) Tutti i nuovi parametri d'avvio del kernel sono documentati in
+ ``Documentation/admin-guide/kernel-parameters.rst``.
+
+18) Tutti i nuovi parametri dei moduli sono documentati con ``MODULE_PARM_DESC()``.
+
+19) Tutte le nuove interfacce verso lo spazio utente sono documentate in
+ ``Documentation/ABI/``. Leggete ``Documentation/ABI/README`` per maggiori
+ informazioni. Le patch che modificano le interfacce utente dovrebbero
+ essere inviate in copia anche a linux-api@vger.kernel.org.
+
+20) Verifica che il kernel passi con successo ``make headers_check``
+
+21) La patch è stata verificata con l'iniezione di fallimenti in slab e
+ nell'allocazione di pagine. Vedere ``Documentation/fault-injection/``.
+
+ Se il nuovo codice è corposo, potrebbe essere opportuno aggiungere
+ l'iniezione di fallimenti specifici per il sottosistema.
+
+22) Il nuovo codice è stato compilato con ``gcc -W`` (usate
+ ``make EXTRA_CFLAGS=-W``). Questo genererà molti avvisi, ma è ottimo
+ per scovare bachi come "warning: comparison between signed and unsigned".
+
+23) La patch è stata verificata dopo essere stata inclusa nella serie di patch
+ -mm; questo al fine di assicurarsi che continui a funzionare assieme a
+ tutte le altre patch in coda e i vari cambiamenti nei sottosistemi VM, VFS
+ e altri.
+
+24) Tutte le barriere di sincronizzazione {per esempio, ``barrier()``,
+ ``rmb()``, ``wmb()``} devono essere accompagnate da un commento nei
+ sorgenti che ne spieghi la logica: cosa fanno e perché.
+
+25) Se la patch aggiunge nuove chiamate ioctl, allora aggiornate
+ ``Documentation/ioctl/ioctl-number.txt``.
+
+26) Se il codice che avete modificato dipende o usa una qualsiasi interfaccia o
+ funzionalità del kernel che è associata a uno dei seguenti simboli
+ ``Kconfig``, allora verificate che il kernel compili con diverse
+ configurazioni dove i simboli sono disabilitati e/o ``=m`` (se c'è la
+ possibilità) [non tutti contemporaneamente, solo diverse combinazioni
+ casuali]:
+
+ ``CONFIG_SMP``, ``CONFIG_SYSFS``, ``CONFIG_PROC_FS``, ``CONFIG_INPUT``,
+ ``CONFIG_PCI``, ``CONFIG_BLOCK``, ``CONFIG_PM``, ``CONFIG_MAGIC_SYSRQ``,
+ ``CONFIG_NET``, ``CONFIG_INET=n`` (ma l'ultimo con ``CONFIG_NET=y``).
diff --git a/Documentation/translations/it_IT/process/submitting-drivers.rst b/Documentation/translations/it_IT/process/submitting-drivers.rst
index 16df950ef808..dadd77e47613 100644
--- a/Documentation/translations/it_IT/process/submitting-drivers.rst
+++ b/Documentation/translations/it_IT/process/submitting-drivers.rst
@@ -1,12 +1,16 @@
.. include:: ../disclaimer-ita.rst
:Original: :ref:`Documentation/process/submitting-drivers.rst <submittingdrivers>`
+:Translator: Federico Vaga <federico.vaga@vaga.pv.it>
.. _it_submittingdrivers:
Sottomettere driver per il kernel Linux
=======================================
-.. warning::
+.. note::
- TODO ancora da tradurre
+ Questo documento è vecchio e negli ultimi anni non è stato più aggiornato;
+ dovrebbe essere aggiornato, o forse meglio, rimosso. La maggior parte di
+ quello che viene detto qui può essere trovato anche negli altri documenti
+ dedicati allo sviluppo. Per questo motivo il documento non verrà tradotto.
diff --git a/Documentation/translations/it_IT/process/submitting-patches.rst b/Documentation/translations/it_IT/process/submitting-patches.rst
index d633775ed556..2ab9c1401aa1 100644
--- a/Documentation/translations/it_IT/process/submitting-patches.rst
+++ b/Documentation/translations/it_IT/process/submitting-patches.rst
@@ -1,13 +1,867 @@
.. include:: ../disclaimer-ita.rst
:Original: :ref:`Documentation/process/submitting-patches.rst <submittingpatches>`
-
+:Translator: Federico Vaga <federico.vaga@vaga.pv.it>
.. _it_submittingpatches:
-Sottomettere modifiche: la guida essenziale per vedere il vostro codice nel kernel
-==================================================================================
+Inviare patch: la guida essenziale per vedere il vostro codice nel kernel
+=========================================================================
+
+Una persona o un'azienda che volesse inviare una patch al kernel potrebbe
+sentirsi scoraggiata dal processo di sottomissione, specialmente quando manca
+una certa familiarità col "sistema". Questo testo è una raccolta di
+suggerimenti che aumenteranno significativamente le probabilità di vedere le
+vostre patch accettate.
+
+Questo documento contiene un vasto numero di suggerimenti concisi. Per
+maggiori dettagli su come funziona il processo di sviluppo del kernel leggete
+:ref:`Documentation/translations/it_IT/process <it_development_process_main>`.
+Leggete anche :ref:`Documentation/translations/it_IT/process/submit-checklist.rst <it_submitchecklist>`
+per una lista di punti da verificare prima di inviare del codice. Se state
+inviando un driver, allora leggete anche :ref:`Documentation/translations/it_IT/process/submitting-drivers.rst <it_submittingdrivers>`;
+per delle patch relative alle associazioni per Device Tree leggete
+Documentation/devicetree/bindings/submitting-patches.txt.
+
+Molti di questi passi descrivono il comportamento di base del sistema di
+controllo di versione ``git``; se utilizzate ``git`` per preparare le vostre
+patch molto del lavoro più ripetitivo lo troverete già fatto per voi, tuttavia
+dovete preparare e documentare un certo numero di patch. Generalmente, l'uso
+di ``git`` renderà la vostra vita di sviluppatore del kernel più facile.
+
+0) Ottenere i sorgenti attuali
+------------------------------
+
+Se non avete un repositorio coi sorgenti del kernel più recenti, allora usate
+``git`` per ottenerli. Vorrete iniziare col repositorio principale che può
+essere recuperato col comando::
+
+ git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
+
+Notate, comunque, che potreste non voler sviluppare direttamente coi sorgenti
+principali del kernel. La maggior parte dei manutentori hanno i propri
+sorgenti e desiderano che le patch siano preparate basandosi su di essi.
+Guardate l'elemento **T:** per un determinato sottosistema nel file MAINTANERS
+che troverete nei sorgenti, o semplicemente chiedete al manutentore nel caso
+in cui i sorgenti da usare non siano elencati il quel file.
+
+Esiste ancora la possibilità di scaricare un rilascio del kernel come archivio
+tar (come descritto in una delle prossime sezioni), ma questa è la via più
+complicata per sviluppare per il kernel.
+
+1) ``diff -up``
+---------------
+
+Se dovete produrre le vostre patch a mano, usate ``diff -up`` o ``diff -uprN``
+per crearle. Git produce di base le patch in questo formato; se state
+usando ``git``, potete saltare interamente questa sezione.
+
+Tutte le modifiche al kernel Linux avvengono mediate patch, come descritte
+in :manpage:`diff(1)`. Quando create la vostra patch, assicuratevi di
+crearla nel formato "unified diff", come l'argomento ``-u`` di
+:manpage:`diff(1)`.
+Inoltre, per favore usate l'argomento ``-p`` per mostrare la funzione C
+alla quale si riferiscono le diverse modifiche - questo rende il risultato
+di ``diff`` molto più facile da leggere. Le patch dovrebbero essere basate
+sulla radice dei sorgenti del kernel, e non sulle sue sottocartelle.
+
+Per creare una patch per un singolo file, spesso è sufficiente fare::
+
+ SRCTREE= linux
+ MYFILE= drivers/net/mydriver.c
+
+ cd $SRCTREE
+ cp $MYFILE $MYFILE.orig
+ vi $MYFILE # make your change
+ cd ..
+ diff -up $SRCTREE/$MYFILE{.orig,} > /tmp/patch
+
+Per creare una patch per molteplici file, dovreste spacchettare i sorgenti
+"vergini", o comunque non modificati, e fare un ``diff`` coi vostri.
+Per esempio::
+
+ MYSRC= /devel/linux
+
+ tar xvfz linux-3.19.tar.gz
+ mv linux-3.19 linux-3.19-vanilla
+ diff -uprN -X linux-3.19-vanilla/Documentation/dontdiff \
+ linux-3.19-vanilla $MYSRC > /tmp/patch
+
+``dontdiff`` è una lista di file che sono generati durante il processo di
+compilazione del kernel; questi dovrebbero essere ignorati in qualsiasi
+patch generata con :manpage:`diff(1)`.
+
+Assicuratevi che la vostra patch non includa file che non ne fanno veramente
+parte. Al fine di verificarne la correttezza, assicuratevi anche di
+revisionare la vostra patch -dopo- averla generata con :manpage:`diff(1)`.
+
+Se le vostre modifiche producono molte differenze, allora dovrete dividerle
+in patch indipendenti che modificano le cose in passi logici; leggete
+:ref:`split_changes`. Questo faciliterà la revisione da parte degli altri
+sviluppatori, il che è molto importante se volete che la patch venga accettata.
+
+Se state utilizzando ``git``, ``git rebase -i`` può aiutarvi nel procedimento.
+Se non usate ``git``, un'alternativa popolare è ``quilt``
+<http://savannah.nongnu.org/projects/quilt>.
+
+.. _it_describe_changes:
+
+2) Descrivete le vostre modifiche
+---------------------------------
+
+Descrivete il vostro problema. Esiste sempre un problema che via ha spinto
+ha fare il vostro lavoro, che sia la correzione di un baco da una riga o una
+nuova funzionalità da 5000 righe di codice. Convincete i revisori che vale
+la pena risolvere il vostro problema e che ha senso continuare a leggere oltre
+al primo paragrafo.
+
+Descrivete ciò che sarà visibile agli utenti. Chiari incidenti nel sistema
+e blocchi sono abbastanza convincenti, ma non tutti i bachi sono così evidenti.
+Anche se il problema è stato scoperto durante la revisione del codice,
+descrivete l'impatto che questo avrà sugli utenti. Tenete presente che
+la maggior parte delle installazioni Linux usa un kernel che arriva dai
+sorgenti stabili o dai sorgenti di una distribuzione particolare che prende
+singolarmente le patch dai sorgenti principali; quindi, includete tutte
+le informazioni che possono essere utili a capire le vostre modifiche:
+le circostanze che causano il problema, estratti da dmesg, descrizioni di
+un incidente di sistema, prestazioni di una regressione, picchi di latenza,
+blocchi, eccetera.
+
+Quantificare le ottimizzazioni e i compromessi. Se affermate di aver
+migliorato le prestazioni, il consumo di memoria, l'impatto sollo stack,
+o la dimensione del file binario, includete dei numeri a supporto della
+vostra dichiarazione. Ma ricordatevi di descrivere anche eventuali costi
+che non sono ovvi. Solitamente le ottimizzazioni non sono gratuite, ma sono
+un compromesso fra l'uso di CPU, la memoria e la leggibilità; o, quando si
+parla di ipotesi euristiche, fra differenti carichi. Descrivete i lati
+negativi che vi aspettate dall'ottimizzazione cosicché i revisori possano
+valutare i costi e i benefici.
+
+Una volta che il problema è chiaro, descrivete come lo risolvete andando
+nel dettaglio tecnico. È molto importante che descriviate la modifica
+in un inglese semplice cosicché i revisori possano verificare che il codice si
+comporti come descritto.
+
+I manutentori vi saranno grati se scrivete la descrizione della patch in un
+formato che sia compatibile con il gestore dei sorgenti usato dal kernel,
+``git``, come un "commit log". Leggete :ref:`it_explicit_in_reply_to`.
+
+Risolvete solo un problema per patch. Se la vostra descrizione inizia ad
+essere lunga, potrebbe essere un segno che la vostra patch necessita d'essere
+divisa. Leggete :ref:`split_changes`.
+
+Quando inviate o rinviate una patch o una serie, includete la descrizione
+completa delle modifiche e la loro giustificazione. Non limitatevi a dire che
+questa è la versione N della patch (o serie). Non aspettatevi che i
+manutentori di un sottosistema vadano a cercare le versioni precedenti per
+cercare la descrizione da aggiungere. In pratica, la patch (o serie) e la sua
+descrizione devono essere un'unica cosa. Questo aiuta i manutentori e i
+revisori. Probabilmente, alcuni revisori non hanno nemmeno ricevuto o visto
+le versioni precedenti della patch.
+
+Descrivete le vostro modifiche usando l'imperativo, per esempio "make xyzzy
+do frotz" piuttosto che "[This patch] makes xyzzy do frotz" or "[I] changed
+xyzzy to do frotz", come se steste dando ordini al codice di cambiare il suo
+comportamento.
+
+Se la patch corregge un baco conosciuto, fare riferimento a quel baco inserendo
+il suo numero o il suo URL. Se la patch è la conseguenza di una discussione
+su una lista di discussione, allora fornite l'URL all'archivio di quella
+discussione; usate i collegamenti a https://lkml.kernel.org/ con il
+``Message-Id``, in questo modo vi assicurerete che il collegamento non diventi
+invalido nel tempo.
+
+Tuttavia, cercate di rendere la vostra spiegazione comprensibile anche senza
+far riferimento a fonti esterne. In aggiunta ai collegamenti a bachi e liste
+di discussione, riassumente i punti più importanti della discussione che hanno
+portato alla creazione della patch.
+
+Se volete far riferimento a uno specifico commit, non usate solo
+l'identificativo SHA-1. Per cortesia, aggiungete anche la breve riga
+riassuntiva del commit per rendere la chiaro ai revisori l'oggetto.
+Per esempio::
+
+ Commit e21d2170f36602ae2708 ("video: remove unnecessary
+ platform_set_drvdata()") removed the unnecessary
+ platform_set_drvdata(), but left the variable "dev" unused,
+ delete it.
+
+Dovreste anche assicurarvi di usare almeno i primi 12 caratteri
+dell'identificativo SHA-1. Il repositorio del kernel ha *molti* oggetti e
+questo rende possibile la collisione fra due identificativi con pochi
+caratteri. Tenete ben presente che anche se oggi non ci sono collisioni con il
+vostro identificativo a 6 caratteri, potrebbero essercene fra 5 anni da oggi.
+
+Se la vostra patch corregge un baco in un commit specifico, per esempio avete
+trovato un problema usando ``git bisect``, per favore usate l'etichetta
+'Fixes:' indicando i primi 12 caratteri dell'identificativo SHA-1 seguiti
+dalla riga riassuntiva. Per esempio::
+
+ Fixes: e21d2170f366 ("video: remove unnecessary platform_set_drvdata()")
+
+La seguente configurazione di ``git config`` può essere usata per formattare
+i risultati dei comandi ``git log`` o ``git show`` come nell'esempio
+precedente::
+
+ [core]
+ abbrev = 12
+ [pretty]
+ fixes = Fixes: %h (\"%s\")
+
+.. _it_split_changes:
+
+3) Separate le vostre modifiche
+-------------------------------
+
+Separate ogni **cambiamento logico** in patch distinte.
+
+Per esempio, se i vostri cambiamenti per un singolo driver includono
+sia delle correzioni di bachi che miglioramenti alle prestazioni,
+allora separateli in due o più patch. Se i vostri cambiamenti includono
+un aggiornamento dell'API e un nuovo driver che lo sfrutta, allora separateli
+in due patch.
+
+D'altro canto, se fate una singola modifica su più file, raggruppate tutte
+queste modifiche in una singola patch. Dunque, un singolo cambiamento logico
+è contenuto in una sola patch.
+
+Il punto da ricordare è che ogni modifica dovrebbe fare delle modifiche
+che siano facilmente comprensibili e che possano essere verificate dai revisori.
+Ogni patch dovrebbe essere giustificabile di per sé.
+
+Se al fine di ottenere un cambiamento completo una patch dipende da un'altra,
+va bene. Semplicemente scrivete una nota nella descrizione della patch per
+farlo presente: **"this patch depends on patch X"**.
+
+Quando dividete i vostri cambiamenti in una serie di patch, prestate
+particolare attenzione alla verifica di ogni patch della serie; per ognuna
+il kernel deve compilare ed essere eseguito correttamente. Gli sviluppatori
+che usano ``git bisect`` per scovare i problemi potrebbero finire nel mezzo
+della vostra serie in un punto qualsiasi; non vi saranno grati se nel mezzo
+avete introdotto dei bachi.
+
+Se non potete condensare la vostra serie di patch in una più piccola, allora
+pubblicatene una quindicina alla volta e aspettate che vengano revisionate
+ed integrate.
+
+
+4) Verificate lo stile delle vostre modifiche
+---------------------------------------------
+
+Controllate che la vostra patch non violi lo stile del codice, maggiori
+dettagli sono disponibili in :ref:`Documentation/translations/it_IT/process/coding-style.rst <it_codingstyle>`.
+Non farlo porta semplicemente a una perdita di tempo da parte dei revisori e
+voi vedrete la vostra patch rifiutata, probabilmente senza nemmeno essere stata
+letta.
+
+Un'eccezione importante si ha quando del codice viene spostato da un file
+ad un altro -- in questo caso non dovreste modificare il codice spostato
+per nessun motivo, almeno non nella patch che lo sposta. Questo separa
+chiaramente l'azione di spostare il codice e il vostro cambiamento.
+Questo aiuta enormemente la revisione delle vere differenze e permette agli
+strumenti di tenere meglio la traccia della storia del codice.
+
+Prima di inviare una patch, verificatene lo stile usando l'apposito
+verificatore (scripts/checkpatch.pl). Da notare, comunque, che il verificator
+di stile dovrebbe essere visto come una guida, non come un sostituto al
+giudizio umano. Se il vostro codice è migliore nonostante una violazione
+dello stile, probabilmente è meglio lasciarlo com'è.
+
+Il verificatore ha tre diversi livelli di severità:
+ - ERROR: le cose sono molto probabilmente sbagliate
+ - WARNING: le cose necessitano d'essere revisionate con attenzione
+ - CHECK: le cose necessitano di un pensierino
+
+Dovreste essere in grado di giustificare tutte le eventuali violazioni rimaste
+nella vostra patch.
+
+
+5) Selezionate i destinatari della vostra patch
+-----------------------------------------------
+
+Dovreste sempre inviare una copia della patch ai manutentori dei sottosistemi
+interessati dalle modifiche; date un'occhiata al file MAINTAINERS e alla storia
+delle revisioni per scoprire chi si occupa del codice. Lo script
+scripts/get_maintainer.pl può esservi d'aiuto. Se non riuscite a trovare un
+manutentore per il sottosistema su cui state lavorando, allora Andrew Morton
+(akpm@linux-foundation.org) sarà la vostra ultima possibilità.
+
+Normalmente, dovreste anche scegliere una lista di discussione a cui inviare
+la vostra serie di patch. La lista di discussione linux-kernel@vger.kernel.org
+è proprio l'ultima spiaggia, il volume di email su questa lista fa si che
+diversi sviluppatori non la seguano. Guardate nel file MAINTAINERS per trovare
+la lista di discussione dedicata ad un sottosistema; probabilmente lì la vostra
+patch riceverà molta più attenzione. Tuttavia, per favore, non spammate le
+liste di discussione che non sono interessate al vostro lavoro.
+
+Molte delle liste di discussione relative al kernel vengono ospitate su
+vger.kernel.org; potete trovare un loro elenco alla pagina
+http://vger.kernel.org/vger-lists.html. Tuttavia, ci sono altre liste di
+discussione ospitate altrove.
+
+Non inviate più di 15 patch alla volta sulle liste di discussione vger!!!
+
+L'ultimo giudizio sull'integrazione delle modifiche accettate spetta a
+Linux Torvalds. Il suo indirizzo e-mail è <torvalds@linux-foundation.org>.
+Riceve moltissime e-mail, e, a questo punto, solo poche patch passano
+direttamente attraverso il suo giudizio; quindi, dovreste fare del vostro
+meglio per -evitare di- inviargli e-mail.
+
+Se avete una patch che corregge un baco di sicurezza che potrebbe essere
+sfruttato, inviatela a security@kernel.org. Per bachi importanti, un breve
+embargo potrebbe essere preso in considerazione per dare il tempo alle
+distribuzioni di prendere la patch e renderla disponibile ai loro utenti;
+in questo caso, ovviamente, la patch non dovrebbe essere inviata su alcuna
+lista di discussione pubblica.
+
+Patch che correggono bachi importanti su un kernel già rilasciato, dovrebbero
+essere inviate ai manutentori dei kernel stabili aggiungendo la seguente riga::
+
+ Cc: stable@vger.kernel.org
+
+nella vostra patch, nell'area dedicata alle firme (notate, NON come destinatario
+delle e-mail). In aggiunta a questo file, dovreste leggere anche
+:ref:`Documentation/translations/it_IT/process/stable-kernel-rules.rst <it_stable_kernel_rules>`
+
+Tuttavia, notate, che alcuni manutentori di sottosistema preferiscono avere
+l'ultima parola su quali patch dovrebbero essere aggiunte ai kernel stabili.
+La rete di manutentori, in particolare, non vorrebbe vedere i singoli
+sviluppatori aggiungere alle loro patch delle righe come quella sopracitata.
+
+Se le modifiche hanno effetti sull'interfaccia con lo spazio utente, per favore
+inviate una patch per le pagine man ai manutentori di suddette pagine (elencati
+nel file MAINTAINERS), o almeno una notifica circa la vostra modifica,
+cosicché l'informazione possa trovare la sua strada nel manuale. Le modifiche
+all'API dello spazio utente dovrebbero essere inviate in copia anche a
+linux-api@vger.kernel.org.
+
+Per le piccole patch potreste aggiungere in CC l'indirizzo
+*Trivial Patch Monkey trivial@kernel.org* che ha lo scopo di raccogliere
+le patch "banali". Date uno sguardo al file MAINTAINERS per vedere chi
+è l'attuale amministratore.
+
+Le patch banali devono rientrare in una delle seguenti categorie:
+
+- errori grammaticali nella documentazione
+- errori grammaticali negli errori che potrebbero rompere :manpage:`grep(1)`
+- correzione di avvisi di compilazione (riempirsi di avvisi inutili è negativo)
+- correzione di errori di compilazione (solo se correggono qualcosa sul serio)
+- rimozione di funzioni/macro deprecate
+- sostituzione di codice non potabile con uno portabile (anche in codice
+ specifico per un'architettura, dato che le persone copiano, fintanto che
+ la modifica sia banale)
+- qualsiasi modifica dell'autore/manutentore di un file (in pratica
+ "patch monkey" in modalità ritrasmissione)
+
+
+6) Niente: MIME, links, compressione, allegati. Solo puro testo
+----------------------------------------------------------------
+
+Linus e gli altri sviluppatori del kernel devono poter commentare
+le modifiche che sottomettete. Per uno sviluppatore è importante
+essere in grado di "citare" le vostre modifiche, usando normali
+programmi di posta elettronica, cosicché sia possibile commentare
+una porzione specifica del vostro codice.
+
+Per questa ragione tutte le patch devono essere inviate via e-mail
+come testo.
.. warning::
- TODO ancora da tradurre
+ Se decidete di copiare ed incollare la patch nel corpo dell'e-mail, state
+ attenti che il vostro programma non corrompa il contenuto con andate
+ a capo automatiche.
+
+La patch non deve essere un allegato MIME, compresso o meno. Molti
+dei più popolari programmi di posta elettronica non trasmettono un allegato
+MIME come puro testo, e questo rende impossibile commentare il vostro codice.
+Inoltre, un allegato MIME rende l'attività di Linus più laboriosa, diminuendo
+così la possibilità che il vostro allegato-MIME venga accettato.
+
+Eccezione: se il vostro servizio di posta storpia le patch, allora qualcuno
+potrebbe chiedervi di rinviarle come allegato MIME.
+
+Leggete :ref:`Documentation/translations/it_IT/process/email-clients.rst <it_email_clients>`
+per dei suggerimenti sulla configurazione del programmi di posta elettronica
+per l'invio di patch intatte.
+
+7) Dimensione delle e-mail
+--------------------------
+
+Le grosse modifiche non sono adatte ad una lista di discussione, e nemmeno
+per alcuni manutentori. Se la vostra patch, non compressa, eccede i 300 kB
+di spazio, allora caricatela in una spazio accessibile su internet fornendo
+l'URL (collegamento) ad essa. Ma notate che se la vostra patch eccede i 300 kB
+è quasi certo che necessiti comunque di essere spezzettata.
+
+8) Rispondere ai commenti di revisione
+--------------------------------------
+
+Quasi certamente i revisori vi invieranno dei commenti su come migliorare
+la vostra patch. Dovete rispondere a questi commenti; ignorare i revisori
+è un ottimo modo per essere ignorati. Riscontri o domande che non conducono
+ad una modifica del codice quasi certamente dovrebbero portare ad un commento
+nel changelog cosicché il prossimo revisore potrà meglio comprendere cosa stia
+accadendo.
+
+Assicuratevi di dire ai revisori quali cambiamenti state facendo e di
+ringraziarli per il loro tempo. Revisionare codice è un lavoro faticoso e che
+richiede molto tempo, e a volte i revisori diventano burberi. Tuttavia, anche
+in questo caso, rispondete con educazione e concentratevi sul problema che
+hanno evidenziato.
+
+9) Non scoraggiatevi - o impazientitevi
+---------------------------------------
+
+Dopo che avete inviato le vostre modifiche, siate pazienti e aspettate.
+I revisori sono persone occupate e potrebbero non ricevere la vostra patch
+immediatamente.
+
+Un tempo, le patch erano solite scomparire nel vuoto senza alcun commento,
+ma ora il processo di sviluppo funziona meglio. Dovreste ricevere commenti
+in una settimana o poco più; se questo non dovesse accadere, assicuratevi di
+aver inviato le patch correttamente. Aspettate almeno una settimana prima di
+rinviare le modifiche o sollecitare i revisori - probabilmente anche di più
+durante la finestra d'integrazione.
+
+10) Aggiungete PATCH nell'oggetto
+---------------------------------
+
+Dato l'alto volume di e-mail per Linus, e la lista linux-kernel, è prassi
+prefiggere il vostro oggetto con [PATCH]. Questo permette a Linus e agli
+altri sviluppatori del kernel di distinguere facilmente le patch dalle altre
+discussioni.
+
+
+11) Firmate il vostro lavoro - Il certificato d'origine dello sviluppatore
+--------------------------------------------------------------------------
+
+Per migliorare la tracciabilità su "chi ha fatto cosa", specialmente per
+quelle patch che per raggiungere lo stadio finale passano attraverso
+diversi livelli di manutentori, abbiamo introdotto la procedura di "firma"
+delle patch che vengono inviate per e-mail.
+
+La firma è una semplice riga alla fine della descrizione della patch che
+certifica che l'avete scritta voi o che avete il diritto di pubblicarla
+come patch open-source. Le regole sono abbastanza semplici: se potete
+certificare quanto segue:
+
+Il certificato d'origine dello sviluppatore 1.1
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Contribuendo a questo progetto, io certifico che:
+
+ (a) Il contributo è stato creato interamente, o in parte, da me e che
+ ho il diritto di inviarlo in accordo con la licenza open-source
+ indicata nel file; oppure
+
+ (b) Il contributo è basato su un lavoro precedente che, nei limiti
+ della mia conoscenza, è coperto da un'appropriata licenza
+ open-source che mi da il diritto di modificarlo e inviarlo,
+ le cui modifiche sono interamente o in parte mie, in accordo con
+ la licenza open-source (a meno che non abbia il permesso di usare
+ un'altra licenza) indicata nel file; oppure
+
+ (c) Il contributo mi è stato fornito direttamente da qualcuno che
+ ha certificato (a), (b) o (c) e non l'ho modificata.
+
+ (d) Capisco e concordo col fatto che questo progetto e i suoi
+ contributi sono pubblici e che un registro dei contributi (incluse
+ tutte le informazioni personali che invio con essi, inclusa la mia
+ firma) verrà mantenuto indefinitamente e che possa essere
+ ridistribuito in accordo con questo progetto o le licenze
+ open-source coinvolte.
+
+poi dovete solo aggiungere una riga che dice::
+
+ Signed-off-by: Random J Developer <random@developer.example.org>
+
+usando il vostro vero nome (spiacenti, non si accettano pseudonimi o
+contributi anonimi).
+
+Alcune persone aggiungono delle etichette alla fine. Per ora queste verranno
+ignorate, ma potete farlo per meglio identificare procedure aziendali interne o
+per aggiungere dettagli circa la firma.
+
+Se siete un manutentore di un sottosistema o di un ramo, qualche volta dovrete
+modificare leggermente le patch che avete ricevuto al fine di poterle
+integrare; questo perché il codice non è esattamente lo stesso nei vostri
+sorgenti e in quelli dei vostri contributori. Se rispettate rigidamente la
+regola (c), dovreste chiedere al mittente di rifare la patch, ma questo è
+controproducente e una totale perdita di tempo ed energia. La regola (b)
+vi permette di correggere il codice, ma poi diventa davvero maleducato cambiare
+la patch di qualcuno e addossargli la responsabilità per i vostri bachi.
+Per risolvere questo problema dovreste aggiungere una riga, fra l'ultimo
+Signed-off-by e il vostro, che spiega la vostra modifica. Nonostante non ci
+sia nulla di obbligatorio, un modo efficace è quello di indicare il vostro
+nome o indirizzo email fra parentesi quadre, seguito da una breve descrizione;
+questo renderà abbastanza visibile chi è responsabile per le modifiche
+dell'ultimo minuto. Per esempio::
+
+ Signed-off-by: Random J Developer <random@developer.example.org>
+ [lucky@maintainer.example.org: struct foo moved from foo.c to foo.h]
+ Signed-off-by: Lucky K Maintainer <lucky@maintainer.example.org>
+
+Questa pratica è particolarmente utile se siete i manutentori di un ramo
+stabile ma al contempo volete dare credito agli autori, tracciare e integrare
+le modifiche, e proteggere i mittenti dalle lamentele. Notate che in nessuna
+circostanza è permessa la modifica dell'identità dell'autore (l'intestazione
+From), dato che è quella che appare nei changelog.
+
+Un appunto speciale per chi porta il codice su vecchie versioni. Sembra che
+sia comune l'utile pratica di inserire un'indicazione circa l'origine della
+patch all'inizio del messaggio di commit (appena dopo la riga dell'oggetto)
+al fine di migliorare la tracciabilità. Per esempio, questo è quello che si
+vede nel rilascio stabile 3.x-stable::
+
+ Date: Tue Oct 7 07:26:38 2014 -0400
+
+ libata: Un-break ATA blacklist
+
+ commit 1c40279960bcd7d52dbdf1d466b20d24b99176c8 upstream.
+
+E qui quello che potrebbe vedersi su un kernel più vecchio dove la patch è
+stata applicata::
+
+ Date: Tue May 13 22:12:27 2008 +0200
+
+ wireless, airo: waitbusy() won't delay
+
+ [backport of 2.6 commit b7acbdfbd1f277c1eb23f344f899cfa4cd0bf36a]
+
+Qualunque sia il formato, questa informazione fornisce un importante aiuto
+alle persone che vogliono seguire i vostri sorgenti, e quelle che cercano
+dei bachi.
+
+
+12) Quando utilizzare Acked-by:, Cc:, e Co-developed-by:
+--------------------------------------------------------
+
+L'etichetta Signed-off-by: indica che il firmatario è stato coinvolto nello
+sviluppo della patch, o che era nel suo percorso di consegna.
+
+Se una persona non è direttamente coinvolta con la preparazione o gestione
+della patch ma desidera firmare e mettere agli atti la loro approvazione,
+allora queste persone possono chiedere di aggiungere al changelog della patch
+una riga Acked-by:.
+
+Acked-by: viene spesso utilizzato dai manutentori del sottosistema in oggetto
+quando quello stesso manutentore non ha contribuito né trasmesso la patch.
+
+Acked-by: non è formale come Signed-off-by:. Questo indica che la persona ha
+revisionato la patch e l'ha trovata accettabile. Per cui, a volte, chi
+integra le patch convertirà un "sì, mi sembra che vada bene" in un Acked-by:
+(ma tenete presente che solitamente è meglio chiedere esplicitamente).
+
+Acked-by: non indica l'accettazione di un'intera patch. Per esempio, quando
+una patch ha effetti su diversi sottosistemi e ha un Acked-by: da un
+manutentore di uno di questi, significa che il manutentore accetta quella
+parte di codice relativa al sottosistema che mantiene. Qui dovremmo essere
+giudiziosi. Quando si hanno dei dubbi si dovrebbe far riferimento alla
+discussione originale negli archivi della lista di discussione.
+
+Se una persona ha avuto l'opportunità di commentare la patch, ma non lo ha
+fatto, potete aggiungere l'etichetta ``Cc:`` alla patch. Questa è l'unica
+etichetta che può essere aggiunta senza che la persona in questione faccia
+alcunché - ma dovrebbe indicare che la persona ha ricevuto una copia della
+patch. Questa etichetta documenta che terzi potenzialmente interessati sono
+stati inclusi nella discussione.
+
+L'etichetta Co-developed-by: indica che la patch è stata scritta dall'autore in
+collaborazione con un altro sviluppatore. Qualche volta questo è utile quando
+più persone lavorano sulla stessa patch. Notate, questa persona deve avere
+nella patch anche una riga Signed-off-by:.
+
+
+13) Utilizzare Reported-by:, Tested-by:, Reviewed-by:, Suggested-by: e Fixes:
+-----------------------------------------------------------------------------
+
+L'etichetta Reported-by da credito alle persone che trovano e riportano i bachi
+e si spera che questo possa ispirarli ad aiutarci nuovamente in futuro.
+Rammentate che se il baco è stato riportato in privato, dovrete chiedere il
+permesso prima di poter utilizzare l'etichetta Reported-by.
+
+L'etichetta Tested-by: indica che la patch è stata verificata con successo
+(su un qualche sistema) dalla persona citata. Questa etichetta informa i
+manutentori che qualche verifica è stata fatta, fornisce un mezzo per trovare
+persone che possano verificare il codice in futuro, e garantisce che queste
+stesse persone ricevano credito per il loro lavoro.
+
+Reviewd-by:, invece, indica che la patch è stata revisionata ed è stata
+considerata accettabile in accordo con la dichiarazione dei revisori:
+
+Dichiarazione di svista dei revisori
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Offrendo la mia etichetta Reviewed-by, dichiaro quanto segue:
+
+ (a) Ho effettuato una revisione tecnica di questa patch per valutarne
+ l'adeguatezza ai fini dell'inclusione nel ramo principale del
+ kernel.
+
+ (b) Tutti i problemi e le domande riguardanti la patch sono stati
+ comunicati al mittente. Sono soddisfatto dalle risposte
+ del mittente.
+
+ (c) Nonostante ci potrebbero essere cose migliorabili in queste
+ sottomissione, credo che sia, in questo momento, (1) una modifica
+ di interesse per il kernel, e (2) libera da problemi che
+ potrebbero metterne in discussione l'integrazione.
+
+ (d) Nonostante abbia revisionato la patch e creda che vada bene,
+ non garantisco (se non specificato altrimenti) che questa
+ otterrà quello che promette o funzionerà correttamente in tutte
+ le possibili situazioni.
+
+L'etichetta Reviewed-by è la dichiarazione di un parere sulla bontà di
+una modifica che si ritiene appropriata e senza alcun problema tecnico
+importante. Qualsiasi revisore interessato (quelli che lo hanno fatto)
+possono offrire il proprio Reviewed-by per la patch. Questa etichetta serve
+a dare credito ai revisori e a informare i manutentori sul livello di revisione
+che è stato fatto sulla patch. L'etichetta Reviewd-by, quando fornita da
+revisori conosciuti per la loro conoscenza sulla materia in oggetto e per la
+loro serietà nella revisione, accrescerà le probabilità che la vostra patch
+venga integrate nel kernel.
+
+L'etichetta Suggested-by: indica che l'idea della patch è stata suggerita
+dalla persona nominata e le da credito. Tenete a mente che questa etichetta
+non dovrebbe essere aggiunta senza un permesso esplicito, specialmente se
+l'idea non è stata pubblicata in un forum pubblico. Detto ciò, dando credito
+a chi ci fornisce delle idee, si spera di poterli ispirare ad aiutarci
+nuovamente in futuro.
+
+L'etichetta Fixes: indica che la patch corregge un problema in un commit
+precedente. Serve a chiarire l'origine di un baco, il che aiuta la revisione
+del baco stesso. Questa etichetta è di aiuto anche per i manutentori dei
+kernel stabili al fine di capire quale kernel deve ricevere la correzione.
+Questo è il modo suggerito per indicare che un baco è stato corretto nella
+patch. Per maggiori dettagli leggete :ref:`it_describe_changes`
+
+
+14) Il formato canonico delle patch
+-----------------------------------
+
+Questa sezione descrive il formato che dovrebbe essere usato per le patch.
+Notate che se state usando un repositorio ``git`` per salvare le vostre patch
+potere usare il comando ``git format-patch`` per ottenere patch nel formato
+appropriato. Lo strumento non crea il testo necessario, per cui, leggete
+le seguenti istruzioni.
+
+L'oggetto di una patch canonica è la riga::
+
+ Subject: [PATCH 001/123] subsystem: summary phrase
+
+Il corpo di una patch canonica contiene i seguenti elementi:
+
+ - Una riga ``from`` che specifica l'autore della patch, seguita
+ da una riga vuota (necessaria soltanto se la persona che invia la
+ patch non ne è l'autore).
+
+ - Il corpo della spiegazione, con linee non più lunghe di 75 caratteri,
+ che verrà copiato permanentemente nel changelog per descrivere la patch.
+
+ - Una riga vuota
+
+ - Le righe ``Signed-off-by:``, descritte in precedenza, che finiranno
+ anch'esse nel changelog.
+
+ - Una linea di demarcazione contenente semplicemente ``---``.
+
+ - Qualsiasi altro commento che non deve finire nel changelog.
+
+ - Le effettive modifiche al codice (il prodotto di ``diff``).
+
+Il formato usato per l'oggetto permette ai programmi di posta di usarlo
+per ordinare le patch alfabeticamente - tutti i programmi di posta hanno
+questa funzionalità - dato che al numero sequenziale si antepongono degli zeri;
+in questo modo l'ordine numerico ed alfabetico coincidono.
+
+Il ``subsystem`` nell'oggetto dell'email dovrebbe identificare l'area
+o il sottosistema modificato dalla patch.
+
+La ``summary phrase`` nell'oggetto dell'email dovrebbe descrivere brevemente
+il contenuto della patch. La ``summary phrase`` non dovrebbe essere un nome
+di file. Non utilizzate la stessa ``summary phrase`` per tutte le patch in
+una serie (dove una ``serie di patch`` è una sequenza ordinata di diverse
+patch correlate).
+
+Ricordatevi che la ``summary phrase`` della vostra email diventerà un
+identificatore globale ed unico per quella patch. Si propaga fino al
+changelog ``git``. La ``summary phrase`` potrà essere usata in futuro
+dagli sviluppatori per riferirsi a quella patch. Le persone vorranno
+cercare la ``summary phrase`` su internet per leggere le discussioni che la
+riguardano. Potrebbe anche essere l'unica cosa che le persone vedranno
+quando, in due o tre mesi, riguarderanno centinaia di patch usando strumenti
+come ``gitk`` o ``git log --oneline``.
+
+Per queste ragioni, dovrebbe essere lunga fra i 70 e i 75 caratteri, e deve
+descrivere sia cosa viene modificato, sia il perché sia necessario. Essere
+brevi e descrittivi è una bella sfida, ma questo è quello che fa un riassunto
+ben scritto.
+
+La ``summary phrase`` può avere un'etichetta (*tag*) di prefisso racchiusa fra
+le parentesi quadre "Subject: [PATCH <tag>...] <summary phrase>".
+Le etichette non verranno considerate come parte della frase riassuntiva, ma
+indicano come la patch dovrebbe essere trattata. Fra le etichette più comuni
+ci sono quelle di versione che vengono usate quando una patch è stata inviata
+più volte (per esempio, "v1, v2, v3"); oppure "RFC" per indicare che si
+attendono dei commenti (*Request For Comments*). Se ci sono quattro patch
+nella serie, queste dovrebbero essere enumerate così: 1/4, 2/4, 3/4, 4/4.
+Questo assicura che gli sviluppatori capiranno l'ordine in cui le patch
+dovrebbero essere applicate, e per tracciare quelle che hanno revisionate o
+che hanno applicato.
+
+Un paio di esempi di oggetti::
+
+ Subject: [PATCH 2/5] ext2: improve scalability of bitmap searching
+ Subject: [PATCH v2 01/27] x86: fix eflags tracking
+
+La riga ``from`` dev'essere la prima nel corpo del messaggio ed è nel
+formato:
+
+ From: Original Author <author@example.com>
+
+La riga ``from`` indica chi verrà accreditato nel changelog permanente come
+l'autore della patch. Se la riga ``from`` è mancante, allora per determinare
+l'autore da inserire nel changelog verrà usata la riga ``From``
+nell'intestazione dell'email.
+
+Il corpo della spiegazione verrà incluso nel changelog permanente, per cui
+deve aver senso per un lettore esperto che è ha dimenticato i dettagli della
+discussione che hanno portato alla patch. L'inclusione di informazioni
+sui problemi oggetto dalla patch (messaggi del kernel, messaggi di oops,
+eccetera) è particolarmente utile per le persone che potrebbero cercare fra
+i messaggi di log per la patch che li tratta. Se la patch corregge un errore
+di compilazione, non sarà necessario includere proprio _tutto_ quello che
+è uscito dal compilatore; aggiungete solo quello che è necessario per far si
+che la vostra patch venga trovata. Come nella ``summary phrase``, è importante
+essere sia brevi che descrittivi.
+
+La linea di demarcazione ``---`` serve essenzialmente a segnare dove finisce
+il messaggio di changelog.
+
+Aggiungere il ``diffstat`` dopo ``---`` è un buon uso di questo spazio, per
+mostrare i file che sono cambiati, e il numero di file aggiunto o rimossi.
+Un ``diffstat`` è particolarmente utile per le patch grandi. Altri commenti
+che sono importanti solo per i manutentori, quindi inadatti al changelog
+permanente, dovrebbero essere messi qui. Un buon esempio per questo tipo
+di commenti potrebbe essere quello di descrivere le differenze fra le versioni
+della patch.
+
+Se includete un ``diffstat`` dopo ``---``, usate le opzioni ``-p 1 -w70``
+cosicché i nomi dei file elencati non occupino troppo spazio (facilmente
+rientreranno negli 80 caratteri, magari con qualche indentazione).
+(``git`` genera di base dei diffstat adatti).
+
+Maggiori dettagli sul formato delle patch nei riferimenti qui di seguito.
+
+.. _it_explicit_in_reply_to:
+
+15) Usare esplicitamente In-Reply-To nell'intestazione
+------------------------------------------------------
+
+Aggiungere manualmente In-Reply-To: nell'intestazione dell'e-mail
+potrebbe essere d'aiuto per associare una patch ad una discussione
+precedente, per esempio per collegare la correzione di un baco con l'e-mail
+che lo riportava. Tuttavia, per serie di patch multiple è generalmente
+sconsigliato l'uso di In-Reply-To: per collegare precedenti versioni.
+In questo modo versioni multiple di una patch non diventeranno un'ingestibile
+giungla di riferimenti all'interno dei programmi di posta. Se un collegamento
+è utile, potete usare https://lkml.kernel.org/ per ottenere i collegamenti
+ad una versione precedente di una serie di patch (per esempio, potete usarlo
+per l'email introduttiva alla serie).
+
+16) Inviare richieste ``git pull``
+----------------------------------
+
+Se avete una serie di patch, potrebbe essere più conveniente per un manutentore
+tirarle dentro al repositorio del sottosistema attraverso l'operazione
+``git pull``. Comunque, tenete presente che prendere patch da uno sviluppatore
+in questo modo richiede un livello di fiducia più alto rispetto a prenderle da
+una lista di discussione. Di conseguenza, molti manutentori sono riluttanti
+ad accettare richieste di *pull*, specialmente dagli sviluppatori nuovi e
+quindi sconosciuti. Se siete in dubbio, potete fare una richiesta di *pull*
+come messaggio introduttivo ad una normale pubblicazione di patch, così
+il manutentore avrà la possibilità di scegliere come integrarle.
+
+Una richiesta di *pull* dovrebbe avere nell'oggetto [GIT] o [PULL].
+La richiesta stessa dovrebbe includere il nome del repositorio e quello del
+ramo su una singola riga; dovrebbe essere più o meno così::
+
+ Please pull from
+
+ git://jdelvare.pck.nerim.net/jdelvare-2.6 i2c-for-linus
+
+ to get these changes:
+
+Una richiesta di *pull* dovrebbe includere anche un messaggio generico
+che dica cos'è incluso, una lista delle patch usando ``git shortlog``, e una
+panoramica sugli effetti della serie di patch con ``diffstat``. Il modo più
+semplice per ottenere tutte queste informazioni è, ovviamente, quello di
+lasciar fare tutto a ``git`` con il comando ``git request-pull``.
+
+Alcuni manutentori (incluso Linus) vogliono vedere le richieste di *pull*
+da commit firmati con GPG; questo fornisce una maggiore garanzia sul fatto
+che siate stati proprio voi a fare la richiesta. In assenza di tale etichetta
+firmata Linus, in particolare, non prenderà alcuna patch da siti pubblici come
+GitHub.
+
+Il primo passo verso la creazione di questa etichetta firmata è quello di
+creare una chiave GNUPG ed averla fatta firmare da uno o più sviluppatori
+principali del kernel. Questo potrebbe essere difficile per i nuovi
+sviluppatori, ma non ci sono altre vie. Andare alle conferenze potrebbe
+essere un buon modo per trovare sviluppatori che possano firmare la vostra
+chiave.
+
+Una volta che avete preparato la vostra serie di patch in ``git``, e volete che
+qualcuno le prenda, create una etichetta firmata col comando ``git tag -s``.
+Questo creerà una nuova etichetta che identifica l'ultimo commit della serie
+contenente una firma creata con la vostra chiave privata. Avrete anche
+l'opportunità di aggiungere un messaggio di changelog all'etichetta; questo è
+il posto ideale per descrivere gli effetti della richiesta di *pull*.
+
+Se i sorgenti da cui il manutentore prenderà le patch non sono gli stessi del
+repositorio su cui state lavorando, allora non dimenticatevi di caricare
+l'etichetta firmata anche sui sorgenti pubblici.
+
+Quando generate una richiesta di *pull*, usate l'etichetta firmata come
+obiettivo. Un comando come il seguente farà il suo dovere::
+
+ git request-pull master git://my.public.tree/linux.git my-signed-tag
+
+
+Riferimenti
+-----------
+
+Andrew Morton, "La patch perfetta" (tpp).
+ <http://www.ozlabs.org/~akpm/stuff/tpp.txt>
+
+Jeff Garzik, "Formato per la sottomissione di patch per il kernel Linux"
+ <http://linux.yyz.us/patch-format.html>
+
+Greg Kroah-Hartman, "Come scocciare un manutentore di un sottosistema"
+ <http://www.kroah.com/log/linux/maintainer.html>
+
+ <http://www.kroah.com/log/linux/maintainer-02.html>
+
+ <http://www.kroah.com/log/linux/maintainer-03.html>
+
+ <http://www.kroah.com/log/linux/maintainer-04.html>
+
+ <http://www.kroah.com/log/linux/maintainer-05.html>
+
+ <http://www.kroah.com/log/linux/maintainer-06.html>
+
+No!!!! Basta gigantesche bombe patch alle persone sulla lista linux-kernel@vger.kernel.org!
+ <https://lkml.org/lkml/2005/7/11/336>
+
+Kernel Documentation/translations/it_IT/process/coding-style.rst:
+ :ref:`Documentation/translations/it_IT/process/coding-style.rst <it_codingstyle>`
+
+E-mail di Linus Torvalds sul formato canonico di una patch:
+ <http://lkml.org/lkml/2005/4/7/183>
+
+Andi Kleen, "Su come sottomettere patch del kernel"
+ Alcune strategie su come sottomettere modifiche toste o controverse.
+
+ http://halobates.de/on-submitting-patches.pdf
diff --git a/Documentation/translations/ja_JP/howto.rst b/Documentation/translations/ja_JP/howto.rst
index f3116381c26b..2621b770a745 100644
--- a/Documentation/translations/ja_JP/howto.rst
+++ b/Documentation/translations/ja_JP/howto.rst
@@ -245,7 +245,7 @@ Linux カーãƒãƒ«ã‚½ãƒ¼ã‚¹ãƒ„リーã®ä¸­ã«å«ã¾ã‚Œã‚‹ã€ãã‚Œã„ã«ã—ã€ä¿
ã§ãã¾ã™ã€‚ã“ã®æœ€æ–°ã®ç´ æ™´ã—ã„カーãƒãƒ«ã‚³ãƒ¼ãƒ‰ã®ãƒªãƒã‚¸ãƒˆãƒªã¯ä»¥ä¸‹ã§è¦‹ã¤ã‹ã‚Š
ã¾ã™ -
- http://lxr.free-electrons.com/
+ https://elixir.bootlin.com/
開発プロセス
------------
@@ -256,7 +256,6 @@ Linux カーãƒãƒ«ã®é–‹ç™ºãƒ—ロセスã¯ç¾åœ¨å¹¾ã¤ã‹ã®ç•°ãªã‚‹ãƒ¡ã‚¤ãƒ³ã‚
- メイン㮠4.x カーãƒãƒ«ãƒ„リー
- 4.x.y -stable カーãƒãƒ«ãƒ„リー
- - 4.x -git カーãƒãƒ«ãƒ‘ッãƒ
- サブシステム毎ã®ã‚«ãƒ¼ãƒãƒ«ãƒ„リーã¨ãƒ‘ッãƒ
- çµ±åˆãƒ†ã‚¹ãƒˆã®ãŸã‚ã® 4.x -next カーãƒãƒ«ãƒ„リー
@@ -319,15 +318,6 @@ Documentation/process/stable-kernel-rules.rst ファイルã«ã¯ã©ã®ã‚ˆã†ãªç
é¡žã®å¤‰æ›´ãŒ -stable ツリーã«å—ã‘入れå¯èƒ½ã‹ã€ã¾ãŸãƒªãƒªãƒ¼ã‚¹ãƒ—ロセスãŒã©ã†
å‹•ãã‹ãŒè¨˜è¿°ã•ã‚Œã¦ã„ã¾ã™ã€‚
-4.x -git パッãƒ
-~~~~~~~~~~~~~~~
-
-git リãƒã‚¸ãƒˆãƒªã§ç®¡ç†ã•ã‚Œã¦ã„ã‚‹Linus ã®ã‚«ãƒ¼ãƒãƒ«ãƒ„リーã®æ¯Žæ—¥ã®ã‚¹ãƒŠãƒƒãƒ—
-ショットãŒã‚ã‚Šã¾ã™ã€‚(ã ã‹ã‚‰ -git ã¨ã„ã†åå‰ãŒã¤ã„ã¦ã„ã¾ã™)。ã“れらã®ãƒ‘ッ
-ãƒã¯ãŠãŠã‚€ã­æ¯Žæ—¥ãƒªãƒªãƒ¼ã‚¹ã•ã‚Œã¦ãŠã‚Šã€Linus ã®ãƒ„リーã®ç¾çŠ¶ã‚’表ã—ã¾ã™ã€‚ã“
-れ㯠-rc カーãƒãƒ«ã¨æ¯”ã¹ã¦ã€ãƒ‘ッãƒãŒå¤§ä¸ˆå¤«ã‹ã©ã†ã‹ã‚‚確èªã—ãªã„ã§è‡ªå‹•çš„
-ã«ç”Ÿæˆã•ã‚Œã‚‹ã®ã§ã€ã‚ˆã‚Šå®Ÿé¨“çš„ã§ã™ã€‚
-
サブシステム毎ã®ã‚«ãƒ¼ãƒãƒ«ãƒ„リーã¨ãƒ‘ッãƒ
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/translations/ko_KR/howto.rst b/Documentation/translations/ko_KR/howto.rst
index a8197e072599..bcd63731b80a 100644
--- a/Documentation/translations/ko_KR/howto.rst
+++ b/Documentation/translations/ko_KR/howto.rst
@@ -77,10 +77,12 @@ Documentation/process/howto.rst
리눅스 ì»¤ë„ ì†ŒìŠ¤ 코드는 GPLë¡œ ë°°í¬(release)ë˜ì—ˆë‹¤. ì†ŒìŠ¤íŠ¸ë¦¬ì˜ ë©”ì¸
ë””ë ‰í† ë¦¬ì— ìžˆëŠ” ë¼ì´ì„¼ìŠ¤ì— 관하여 ìƒì„¸í•˜ê²Œ ì“°ì—¬ 있는 COPYINGì´ë¼ëŠ”
-파ì¼ì„ ë´ë¼. ì—¬ëŸ¬ë¶„ì´ ë¼ì´ì„¼ìŠ¤ì— 관한 ë” ê¹Šì€ ë¬¸ì œë¥¼ 가지고 있다면
-리눅스 ì»¤ë„ ë©”ì¼ë§ ë¦¬ìŠ¤íŠ¸ì— ë¬»ì§€ë§ê³  변호사와 ì—°ë½í•˜ë¼. ë©”ì¼ë§
-ë¦¬ìŠ¤íŠ¸ë“¤ì— ìžˆëŠ” ì‚¬ëžŒë“¤ì€ ë³€í˜¸ì‚¬ê°€ 아니기 ë•Œë¬¸ì— ë²•ì  ë¬¸ì œì— ê´€í•˜ì—¬
-ê·¸ë“¤ì˜ ë§ì— ì˜ì§€í•´ì„œëŠ” 안ëœë‹¤.
+파ì¼ì„ ë´ë¼. 리눅스 ì»¤ë„ ë¼ì´ì„¼ì‹± 규칙과 소스 코드 ì•ˆì˜ `SPDX
+<https://spdx.org/>`_ ì‹ë³„ìž ì‚¬ìš©ë²•ì€
+:ref:`Documentation/process/license-rules.rst <kernel_licensing>` ì— ì„¤ëª…ë˜ì–´
+있다. ì—¬ëŸ¬ë¶„ì´ ë¼ì´ì„¼ìŠ¤ì— 관한 ë” ê¹Šì€ ë¬¸ì œë¥¼ 가지고 있다면 리눅스 ì»¤ë„ ë©”ì¼ë§
+ë¦¬ìŠ¤íŠ¸ì— ë¬»ì§€ë§ê³  변호사와 ì—°ë½í•˜ë¼. ë©”ì¼ë§ ë¦¬ìŠ¤íŠ¸ë“¤ì— ìžˆëŠ” ì‚¬ëžŒë“¤ì€ ë³€í˜¸ì‚¬ê°€
+아니기 ë•Œë¬¸ì— ë²•ì  ë¬¸ì œì— ê´€í•˜ì—¬ ê·¸ë“¤ì˜ ë§ì— ì˜ì§€í•´ì„œëŠ” 안ëœë‹¤.
GPLì— ê´€í•œ ìž¦ì€ ì§ˆë¬¸ë“¤ê³¼ ë‹µë³€ë“¤ì€ ë‹¤ìŒì„ 참조하ë¼.
@@ -99,7 +101,7 @@ mtk.manpages@gmail.comì˜ ë©”ì¸í…Œì´ë„ˆì—게 보낼 ê²ƒì„ ê¶Œìž¥í•œë‹¤.
다ìŒì€ ì»¤ë„ ì†ŒìŠ¤ íŠ¸ë¦¬ì— ìžˆëŠ” ì½ì–´ì•¼ í•  파ì¼ë“¤ì˜ 리스트ì´ë‹¤.
- README
+ :ref:`Documentation/admin-guide/README.rst <readme>`
ì´ íŒŒì¼ì€ 리눅스 커ë„ì— ê´€í•˜ì—¬ 간단한 ë°°ê²½ 설명과 커ë„ì„ ì„¤ì •í•˜ê³ 
빌드하기 위해 필요한 ê²ƒì„ ì„¤ëª…í•œë‹¤. 커ë„ì— ìž…ë¬¸í•˜ëŠ” ì‚¬ëžŒë“¤ì€ ì—¬ê¸°ì„œ
시작해야 한다.
@@ -220,13 +222,6 @@ ReST 마í¬ì—…ì„ ì‚¬ìš©í•˜ëŠ” ë¬¸ì„œë“¤ì€ Documentation/output ì— ìƒì„±ëœë‹
가지고 있지 않다면 다ìŒì— ë¬´ì—‡ì„ í•´ì•¼í• ì§€ì— ê´€í•œ ë°©í–¥ì„ ë°°ìš¸ 수 있ì„
것ì´ë‹¤.
-ì—¬ëŸ¬ë¶„ë“¤ì´ ì´ë¯¸ ì»¤ë„ íŠ¸ë¦¬ì— ë°˜ì˜í•˜ê¸¸ ì›í•˜ëŠ” 코드 묶ìŒì„ 가지고 있지만
-올바른 í¬ë§·ìœ¼ë¡œ í¬ìž¥í•˜ëŠ”ë° ë„ì›€ì´ í•„ìš”í•˜ë‹¤ë©´ 그러한 문제를 ë•ê¸° 위해
-만들어진 kernel-mentors 프로ì íŠ¸ê°€ 있다. ê·¸ê³³ì€ ë©”ì¼ë§ 리스트ì´ë©°
-다ìŒì—ì„œ 참조할 수 있다.
-
- https://selenic.com/mailman/listinfo/kernel-mentors
-
리눅스 ì»¤ë„ ì½”ë“œì— ì‹¤ì œ ë³€ê²½ì„ í•˜ê¸° ì „ì— ë°˜ë“œì‹œ ê·¸ 코드가 어떻게
ë™ìž‘하는지 ì´í•´í•˜ê³  있어야 한다. 코드를 분ì„하기 위하여 특정한 툴ì˜
ë„ì›€ì„ ë¹Œë ¤ì„œë¼ë„ 코드를 ì§ì ‘ ì½ëŠ” 것보다 ì¢‹ì€ ê²ƒì€ ì—†ë‹¤(대부분ì˜
@@ -235,7 +230,7 @@ ReST 마í¬ì—…ì„ ì‚¬ìš©í•˜ëŠ” ë¬¸ì„œë“¤ì€ Documentation/output ì— ìƒì„±ëœë‹
소스코드를 ì¸ë±ìŠ¤ëœ 웹 페ì´ì§€ë“¤ì˜ 형태로 보여준다. ìµœì‹ ì˜ ë©‹ì§„ 커ë„
코드 저장소는 다ìŒì„ 통하여 참조할 수 있다.
- http://lxr.free-electrons.com/
+ https://elixir.bootlin.com/
개발 프로세스
@@ -247,7 +242,6 @@ ReST 마í¬ì—…ì„ ì‚¬ìš©í•˜ëŠ” ë¬¸ì„œë“¤ì€ Documentation/output ì— ìƒì„±ëœë‹
- main 4.x ì»¤ë„ íŠ¸ë¦¬
- 4.x.y - ì•ˆì •ëœ ì»¤ë„ íŠ¸ë¦¬
- - 4.x -git ì»¤ë„ íŒ¨ì¹˜ë“¤
- ì„œë¸Œì‹œìŠ¤í…œì„ ìœ„í•œ ì»¤ë„ íŠ¸ë¦¬ë“¤ê³¼ 패치들
- 4.x - 통합 테스트를 위한 next ì»¤ë„ íŠ¸ë¦¬
@@ -303,17 +297,9 @@ Andrew Mortonì˜ ê¸€ì´ ìžˆë‹¤.
4.x.y는 "stable" 팀<stable@vger.kernel.org>ì— ì˜í•´ 관리ë˜ë©° ê±°ì˜ ë§¤ë²ˆ 격주로
ë°°í¬ëœë‹¤.
-ì»¤ë„ íŠ¸ë¦¬ 문서들 ë‚´ì— Documentation/process/stable-kernel-rules.rst 파ì¼ì€ ì–´ë–¤
-ì¢…ë¥˜ì˜ ë³€ê²½ë“¤ì´ -stable 트리로 들어왔는지와 ë°°í¬ í”„ë¡œì„¸ìŠ¤ê°€ 어떻게
-진행ë˜ëŠ”지를 설명한다.
-
-4.x -git 패치들
-~~~~~~~~~~~~~~~
-
-git 저장소(그러므로 -gitì´ë¼ëŠ” ì´ë¦„ì´ ë¶™ìŒ)ì—는 날마다 관리ë˜ëŠ” Linusì˜
-ì»¤ë„ íŠ¸ë¦¬ì˜ snapshot ë“¤ì´ ìžˆë‹¤. ì´ íŒ¨ì¹˜ë“¤ì€ ì¼ë°˜ì ìœ¼ë¡œ 날마다 ë°°í¬ë˜ë©°
-Linusì˜ íŠ¸ë¦¬ì˜ í˜„ìž¬ ìƒíƒœë¥¼ 나타낸다. ì´ íŒ¨ì¹˜ë“¤ì€ ì •ìƒì ì¸ì§€ 조금ë„
-살펴보지 ì•Šê³  ìžë™ì ìœ¼ë¡œ ìƒì„±ëœ 것ì´ë¯€ë¡œ -rc 커ë„들 ë³´ë‹¤ë„ ë” ì‹¤í—˜ì ì´ë‹¤.
+ì»¤ë„ íŠ¸ë¦¬ 문서들 ë‚´ì˜ :ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`
+파ì¼ì€ ì–´ë–¤ ì¢…ë¥˜ì˜ ë³€ê²½ë“¤ì´ -stable 트리로 들어왔는지와
+ë°°í¬ í”„ë¡œì„¸ìŠ¤ê°€ 어떻게 진행ë˜ëŠ”지를 설명한다.
서브시스템 ì»¤ë„ íŠ¸ë¦¬ë“¤ê³¼ 패치들
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -360,9 +346,10 @@ https://bugzilla.kernel.org 는 리눅스 ì»¤ë„ ê°œë°œìžë“¤ì´ 커ë„ì˜ ë²„ê·
https://bugzilla.kernel.org/page.cgi?id=faq.html
-ë©”ì¸ ì»¤ë„ ì†ŒìŠ¤ ë””ë ‰í† ë¦¬ì— ìžˆëŠ” admin-guide/reporting-bugs.rst 파ì¼ì€ ì»¤ë„ ë²„ê·¸ë¼ê³  ìƒê°ë˜ëŠ”
-ê²ƒì„ ë³´ê³ í•˜ëŠ” ë°©ë²•ì— ê´€í•œ ì¢‹ì€ í…œí”Œë¦¿ì´ë©° 문제를 추ì í•˜ê¸° 위해서 커ë„
-개발ìžë“¤ì´ 필요로 하는 ì •ë³´ê°€ 무엇들ì¸ì§€ë¥¼ ìƒì„¸ížˆ 설명하고 있다.
+ë©”ì¸ ì»¤ë„ ì†ŒìŠ¤ ë””ë ‰í† ë¦¬ì— ìžˆëŠ” :ref:`admin-guide/reporting-bugs.rst <reportingbugs>`
+파ì¼ì€ ì»¤ë„ ë²„ê·¸ë¼ê³  ìƒê°ë˜ëŠ” ê²ƒì„ ë³´ê³ í•˜ëŠ” ë°©ë²•ì— ê´€í•œ ì¢‹ì€ í…œí”Œë¦¿ì´ë©° 문제를
+추ì í•˜ê¸° 위해서 ì»¤ë„ ê°œë°œìžë“¤ì´ 필요로 하는 ì •ë³´ê°€ 무엇들ì¸ì§€ë¥¼ ìƒì„¸ížˆ 설명하고
+있다.
버그 리í¬íŠ¸ë“¤ì˜ 관리
@@ -377,15 +364,7 @@ https://bugzilla.kernel.org 는 리눅스 ì»¤ë„ ê°œë°œìžë“¤ì´ 커ë„ì˜ ë²„ê·
다른 ì‚¬ëžŒë“¤ì˜ ë²„ê·¸ë“¤ì„ ìˆ˜ì •í•˜ê¸° 위하여 ì‹œê°„ì„ ë‚­ë¹„í•˜ì§€ 않기 때문ì´ë‹¤.
ì´ë¯¸ ë³´ê³ ëœ ë²„ê·¸ 리í¬íŠ¸ë“¤ì„ 가지고 작업하기 위해서 https://bugzilla.kernel.org
-를 참조하ë¼. ì—¬ëŸ¬ë¶„ì´ ì•žìœ¼ë¡œ ìƒê²¨ë‚  버그 리í¬íŠ¸ë“¤ì˜ ì¡°ì–¸ìžê°€ ë˜ê¸¸ ì›í•œë‹¤ë©´
-bugme-new ë©”ì¼ë§ 리스트나(새로운 버그 리í¬íŠ¸ë“¤ë§Œì´ ì´ê³³ì—ì„œ ë©”ì¼ë¡œ 전해진다)
-bugme-janitor ë©”ì¼ë§ 리스트(bugzillaì— ëª¨ë“  ë³€í™”ë“¤ì´ ì—¬ê¸°ì„œ ë©”ì¼ë¡œ 전해진다)
-ì— ë“±ë¡í•˜ë©´ ëœë‹¤.
-
- https://lists.linux-foundation.org/mailman/listinfo/bugme-new
-
- https://lists.linux-foundation.org/mailman/listinfo/bugme-janitors
-
+를 참조하ë¼.
ë©”ì¼ë§ 리스트들
@@ -430,7 +409,8 @@ bugme-janitor ë©”ì¼ë§ 리스트(bugzillaì— ëª¨ë“  ë³€í™”ë“¤ì´ ì—¬ê¸°ì„œ ë©”ì
"John 커ë„해커는 작성했다...."를 유지하며 ì—¬ëŸ¬ë¶„ë“¤ì˜ ì˜ê²¬ì„ ê·¸ ë©”ì¼ì˜ 윗부분ì—
작성하지 ë§ê³  ê° ì¸ìš©í•œ 단ë½ë“¤ 사ì´ì— 넣어ë¼.
-ì—¬ëŸ¬ë¶„ë“¤ì´ íŒ¨ì¹˜ë“¤ì„ ë©”ì¼ì— 넣는다면 ê·¸ê²ƒë“¤ì€ Documentation/process/submitting-patches.rstì—
+ì—¬ëŸ¬ë¶„ë“¤ì´ íŒ¨ì¹˜ë“¤ì„ ë©”ì¼ì— 넣는다면 그것들ì€
+:ref:`Documentation/process/submitting-patches.rst <submittingpatches>` ì—
나와있는ë°ë¡œ 명백히(plain) ì½ì„ 수 있는 í…스트여야 한다. ì»¤ë„ ê°œë°œìžë“¤ì€
첨부파ì¼ì´ë‚˜ ì••ì¶•ëœ íŒ¨ì¹˜ë“¤ì„ ì›í•˜ì§€ 않는다. ê·¸ë“¤ì€ ì—¬ëŸ¬ë¶„ë“¤ì˜ íŒ¨ì¹˜ì˜
ê° ë¼ì¸ 단위로 코멘트를 하길 ì›í•˜ë©° 압축하거나 첨부하지 ì•Šê³  보내는 것ì´
diff --git a/Documentation/translations/ko_KR/memory-barriers.txt b/Documentation/translations/ko_KR/memory-barriers.txt
index 7f01fb1c1084..db0b9d8619f1 100644
--- a/Documentation/translations/ko_KR/memory-barriers.txt
+++ b/Documentation/translations/ko_KR/memory-barriers.txt
@@ -493,10 +493,8 @@ CPU ì—게 기대할 수 있는 ìµœì†Œí•œì˜ ë³´ìž¥ì‚¬í•­ 몇가지가 있습니
ì´ íƒ€ìž…ì˜ ì˜¤í¼ë ˆì´ì…˜ì€ ë‹¨ë°©í–¥ì˜ íˆ¬ê³¼ì„± 배리어처럼 ë™ìž‘합니다. ACQUIRE
오í¼ë ˆì´ì…˜ ë’¤ì˜ ëª¨ë“  메모리 오í¼ë ˆì´ì…˜ë“¤ì´ ACQUIRE 오í¼ë ˆì´ì…˜ 후ì—
ì¼ì–´ë‚œ 것으로 ì‹œìŠ¤í…œì˜ ë‚˜ë¨¸ì§€ ì»´í¬ë„ŒíŠ¸ë“¤ì— ë³´ì´ê²Œ ë  ê²ƒì´ ë³´ìž¥ë©ë‹ˆë‹¤.
- LOCK 오í¼ë ˆì´ì…˜ê³¼ smp_load_acquire(), smp_cond_acquire() 오í¼ë ˆì´ì…˜ë„
- ACQUIRE 오í¼ë ˆì´ì…˜ì— í¬í•¨ë©ë‹ˆë‹¤. smp_cond_acquire() 오í¼ë ˆì´ì…˜ì€ 컨트롤
- ì˜ì¡´ì„±ê³¼ smp_rmb() 를 사용해서 ACQUIRE ì˜ ì˜ë¯¸ì  요구사항(semantic)ì„
- 충족시킵니다.
+ LOCK 오í¼ë ˆì´ì…˜ê³¼ smp_load_acquire(), smp_cond_load_acquire() 오í¼ë ˆì´ì…˜ë„
+ ACQUIRE 오í¼ë ˆì´ì…˜ì— í¬í•¨ë©ë‹ˆë‹¤.
ACQUIRE 오í¼ë ˆì´ì…˜ ì•žì˜ ë©”ëª¨ë¦¬ 오í¼ë ˆì´ì…˜ë“¤ì€ ACQUIRE 오í¼ë ˆì´ì…˜ 완료 후ì—
ìˆ˜í–‰ëœ ê²ƒì²˜ëŸ¼ ë³´ì¼ ìˆ˜ 있습니다.
@@ -2146,33 +2144,40 @@ set_current_state() 는 다ìŒì˜ 것들로 ê°ì‹¸ì§ˆ ìˆ˜ë„ ìžˆìŠµë‹ˆë‹¤:
event_indicated = 1;
wake_up_process(event_daemon);
-wake_up() ë¥˜ì— ì˜í•´ 쓰기 메모리 배리어가 ë‚´í¬ë©ë‹ˆë‹¤. 만약 ê·¸ê²ƒë“¤ì´ ë­”ê°€ë¥¼
-깨운다면요. ì´ ë°°ë¦¬ì–´ëŠ” íƒœìŠ¤í¬ ìƒíƒœê°€ 지워지기 ì „ì— ìˆ˜í–‰ë˜ë¯€ë¡œ, ì´ë²¤íŠ¸ë¥¼
-알리기 위한 STORE 와 íƒœìŠ¤í¬ ìƒíƒœë¥¼ TASK_RUNNING 으로 설정하는 STORE 사ì´ì—
-위치하게 ë©ë‹ˆë‹¤.
+wake_up() ì´ ë¬´ì–¸ê°€ë¥¼ 깨우게 ë˜ë©´, ì´ í•¨ìˆ˜ëŠ” 범용 메모리 배리어를 수행합니다.
+ì´ í•¨ìˆ˜ê°€ ì•„ë¬´ê²ƒë„ ê¹¨ìš°ì§€ 않는다면 메모리 배리어는 ìˆ˜í–‰ë  ìˆ˜ë„, 수행ë˜ì§€ ì•Šì„
+ìˆ˜ë„ ìžˆìŠµë‹ˆë‹¤; ì´ ê²½ìš°ì— ë©”ëª¨ë¦¬ 배리어를 수행할 ê±°ë¼ ì˜¤í•´í•´ì„  안ë©ë‹ˆë‹¤. ì´
+배리어는 íƒœìŠ¤í¬ ìƒíƒœê°€ ì ‘ê·¼ë˜ê¸° ì „ì— ìˆ˜í–‰ë˜ëŠ”ë°, ìžì„¸ížˆ ë§í•˜ë©´ ì´ ì´ë²¤íŠ¸ë¥¼
+알리기 위한 STORE 와 TASK_RUNNING 으로 ìƒíƒœë¥¼ 쓰는 STORE 사ì´ì— 수행ë©ë‹ˆë‹¤:
- CPU 1 CPU 2
+ CPU 1 (Sleeper) CPU 2 (Waker)
=============================== ===============================
set_current_state(); STORE event_indicated
smp_store_mb(); wake_up();
- STORE current->state <쓰기 배리어>
- <범용 배리어> STORE current->state
- LOAD event_indicated
+ STORE current->state ...
+ <범용 배리어> <범용 배리어>
+ LOAD event_indicated if ((LOAD task->state) & TASK_NORMAL)
+ STORE task->state
-í•œë²ˆë” ë§í•©ë‹ˆë‹¤ë§Œ, ì´ ì“°ê¸° 메모리 배리어는 ì´ ì½”ë“œê°€ ì •ë§ë¡œ 뭔가를 깨울 ë•Œì—만
-실행ë©ë‹ˆë‹¤. ì´ê±¸ 설명하기 위해, X 와 Y 는 ëª¨ë‘ 0 으로 초기화 ë˜ì–´ 있다는 가정
-í•˜ì— ì•„ëž˜ì˜ ì´ë²¤íŠ¸ 시퀀스를 ìƒê°í•´ 봅시다:
+여기서 "task" 는 깨어나지는 쓰레드ì´ê³  CPU 1 ì˜ "current" 와 같습니다.
+
+반복하지만, wake_up() ì´ ë¬´ì–¸ê°€ë¥¼ ì •ë§ ê¹¨ìš´ë‹¤ë©´ 범용 메모리 배리어가 수행ë 
+ê²ƒì´ ë³´ìž¥ë˜ì§€ë§Œ, 그렇지 않다면 그런 ë³´ìž¥ì´ ì—†ìŠµë‹ˆë‹¤. ì´ê±¸ ì´í•´í•˜ê¸° 위해, X 와
+Y 는 ëª¨ë‘ 0 으로 초기화 ë˜ì–´ 있다는 가정 í•˜ì— ì•„ëž˜ì˜ ì´ë²¤íŠ¸ 시퀀스를 ìƒê°í•´
+봅시다:
CPU 1 CPU 2
=============================== ===============================
- X = 1; STORE event_indicated
+ X = 1; Y = 1;
smp_mb(); wake_up();
- Y = 1; wait_event(wq, Y == 1);
- wake_up(); load from Y sees 1, no memory barrier
- load from X might see 0
+ LOAD Y LOAD X
+
+ì •ë§ë¡œ 깨우기가 행해졌다면, ë‘ ë¡œë“œ 중 (최소한) 하나는 1 ì„ ë³´ê²Œ ë©ë‹ˆë‹¤.
+반면ì—, 실제 깨우기가 행해지지 않았다면, ë‘ ë¡œë“œ ëª¨ë‘ 0ì„ ë³¼ ìˆ˜ë„ ìžˆìŠµë‹ˆë‹¤.
-위 예제ì—ì„œì˜ ê²½ìš°ì™€ 달리 깨우기가 ì •ë§ë¡œ 행해졌다면, CPU 2 ì˜ X 로드는 1 ì„
-본다고 ë³´ìž¥ë  ìˆ˜ ìžˆì„ ê²ë‹ˆë‹¤.
+wake_up_process() 는 í•­ìƒ ë²”ìš© 메모리 배리어를 수행합니다. ì´ ë°°ë¦¬ì–´ ì—­ì‹œ
+íƒœìŠ¤í¬ ìƒíƒœê°€ ì ‘ê·¼ë˜ê¸° ì „ì— ìˆ˜í–‰ë©ë‹ˆë‹¤. 특히, ì•žì˜ ì˜ˆì œ 코드ì—ì„œ wake_up() ì´
+wake_up_process() ë¡œ 대체ëœë‹¤ë©´ ë‘ ë¡œë“œ 중 하나는 1ì„ ë³¼ ê²ƒì´ ë³´ìž¥ë©ë‹ˆë‹¤.
사용 가능한 깨우기류 함수들로 다ìŒê³¼ ê°™ì€ ê²ƒë“¤ì´ ìžˆìŠµë‹ˆë‹¤:
@@ -2192,6 +2197,8 @@ wake_up() ë¥˜ì— ì˜í•´ 쓰기 메모리 배리어가 ë‚´í¬ë©ë‹ˆë‹¤. 만약 ê
wake_up_poll();
wake_up_process();
+메모리 순서규칙 ê´€ì ì—ì„œ, ì´ í•¨ìˆ˜ë“¤ì€ ëª¨ë‘ wake_up() ê³¼ 같거나 보다 ê°•í•œ 순서
+ë³´ìž¥ì„ ì œê³µí•©ë‹ˆë‹¤.
[!] 잠재우는 코드와 깨우는 ì½”ë“œì— ë‚´í¬ë˜ëŠ” 메모리 ë°°ë¦¬ì–´ë“¤ì€ ê¹¨ìš°ê¸° ì „ì—
ì´ë£¨ì–´ì§„ 스토어를 잠재우는 코드가 set_current_state() 를 호출한 í›„ì— í–‰í•˜ëŠ”
diff --git a/Documentation/translations/zh_CN/HOWTO b/Documentation/translations/zh_CN/HOWTO
index 5f6d09edc9ac..7a00a8a4eb15 100644
--- a/Documentation/translations/zh_CN/HOWTO
+++ b/Documentation/translations/zh_CN/HOWTO
@@ -192,7 +192,6 @@ Linux内核代ç ä¸­åŒ…å«æœ‰å¤§é‡çš„文档。这些文档对于学习如何与
些分支包括:
- 2.6.x主内核æºç æ ‘
- 2.6.x.y -stable内核æºç æ ‘
- - 2.6.x -git内核补ä¸é›†
- 2.6.x -mm内核补ä¸é›†
- å­ç³»ç»Ÿç›¸å…³çš„内核æºç æ ‘和补ä¸é›†
@@ -240,14 +239,6 @@ kernel.org网站的pub/linux/kernel/v2.6/目录下找到它。它的开å‘éµå¾ª
版内核接å—的修改类型以åŠå‘布的æµç¨‹ã€‚
-2.6.x -gitè¡¥ä¸é›†
-----------------
-Linus的内核æºç æ ‘çš„æ¯æ—¥å¿«ç…§ï¼Œè¿™ä¸ªæºç æ ‘是由git工具管ç†çš„(由此得å)。这
-些补ä¸é€šå¸¸æ¯å¤©æ›´æ–°ä»¥å映Linusçš„æºç æ ‘的最新状æ€ã€‚它们比-rc版本的内核æºç 
-树更具试验性质,因为这个补ä¸é›†æ˜¯å…¨è‡ªåŠ¨ç”Ÿæˆçš„,没有任何人æ¥ç¡®è®¤å…¶æ˜¯å¦çœŸæ­£
-å¥å…¨ã€‚
-
-
2.6.x -mmè¡¥ä¸é›†
---------------
这是由Andrew Morton维护的试验性内核补ä¸é›†ã€‚Andrew将所有å­ç³»ç»Ÿçš„内核æºç 
diff --git a/Documentation/translations/zh_CN/coding-style.rst b/Documentation/translations/zh_CN/coding-style.rst
index 1466aa64b8b4..3cb09803e084 100644
--- a/Documentation/translations/zh_CN/coding-style.rst
+++ b/Documentation/translations/zh_CN/coding-style.rst
@@ -535,26 +535,43 @@ Documentation/doc-guide/ å’Œ scripts/kernel-doc 以获得详细信æ¯ã€‚
(* (max steps 1)
c-basic-offset)))
- (add-hook 'c-mode-common-hook
- (lambda ()
- ;; Add kernel style
- (c-add-style
- "linux-tabs-only"
- '("linux" (c-offsets-alist
- (arglist-cont-nonempty
- c-lineup-gcc-asm-reg
- c-lineup-arglist-tabs-only))))))
-
- (add-hook 'c-mode-hook
- (lambda ()
- (let ((filename (buffer-file-name)))
- ;; Enable kernel mode for the appropriate files
- (when (and filename
- (string-match (expand-file-name "~/src/linux-trees")
- filename))
- (setq indent-tabs-mode t)
- (setq show-trailing-whitespace t)
- (c-set-style "linux-tabs-only")))))
+ (dir-locals-set-class-variables
+ 'linux-kernel
+ '((c-mode . (
+ (c-basic-offset . 8)
+ (c-label-minimum-indentation . 0)
+ (c-offsets-alist . (
+ (arglist-close . c-lineup-arglist-tabs-only)
+ (arglist-cont-nonempty .
+ (c-lineup-gcc-asm-reg c-lineup-arglist-tabs-only))
+ (arglist-intro . +)
+ (brace-list-intro . +)
+ (c . c-lineup-C-comments)
+ (case-label . 0)
+ (comment-intro . c-lineup-comment)
+ (cpp-define-intro . +)
+ (cpp-macro . -1000)
+ (cpp-macro-cont . +)
+ (defun-block-intro . +)
+ (else-clause . 0)
+ (func-decl-cont . +)
+ (inclass . +)
+ (inher-cont . c-lineup-multi-inher)
+ (knr-argdecl-intro . 0)
+ (label . -1000)
+ (statement . 0)
+ (statement-block-intro . +)
+ (statement-case-intro . +)
+ (statement-cont . +)
+ (substatement . +)
+ ))
+ (indent-tabs-mode . t)
+ (show-trailing-whitespace . t)
+ ))))
+
+ (dir-locals-set-directory-class
+ (expand-file-name "~/src/linux-trees")
+ 'linux-kernel)
这会让 emacs 在 ``~/src/linux-trees`` 下的 C æºæ–‡ä»¶èŽ·å¾—更好的内核代ç é£Žæ ¼ã€‚
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 356156f5c52d..64b38dfcc243 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -5,25 +5,32 @@ The Definitive KVM (Kernel-based Virtual Machine) API Documentation
----------------------
The kvm API is a set of ioctls that are issued to control various aspects
-of a virtual machine. The ioctls belong to three classes
+of a virtual machine. The ioctls belong to three classes:
- System ioctls: These query and set global attributes which affect the
whole kvm subsystem. In addition a system ioctl is used to create
- virtual machines
+ virtual machines.
- VM ioctls: These query and set attributes that affect an entire virtual
machine, for example memory layout. In addition a VM ioctl is used to
- create virtual cpus (vcpus).
+ create virtual cpus (vcpus) and devices.
- Only run VM ioctls from the same process (address space) that was used
- to create the VM.
+ VM ioctls must be issued from the same process (address space) that was
+ used to create the VM.
- vcpu ioctls: These query and set attributes that control the operation
of a single virtual cpu.
- Only run vcpu ioctls from the same thread that was used to create the
- vcpu.
+ vcpu ioctls should be issued from the same thread that was used to create
+ the vcpu, except for asynchronous vcpu ioctl that are marked as such in
+ the documentation. Otherwise, the first ioctl after switching threads
+ could see a performance impact.
+ - device ioctls: These query and set attributes that control the operation
+ of a single device.
+
+ device ioctls must be issued from the same process (address space) that
+ was used to create the VM.
2. File descriptors
-------------------
@@ -32,17 +39,51 @@ The kvm API is centered around file descriptors. An initial
open("/dev/kvm") obtains a handle to the kvm subsystem; this handle
can be used to issue system ioctls. A KVM_CREATE_VM ioctl on this
handle will create a VM file descriptor which can be used to issue VM
-ioctls. A KVM_CREATE_VCPU ioctl on a VM fd will create a virtual cpu
-and return a file descriptor pointing to it. Finally, ioctls on a vcpu
-fd can be used to control the vcpu, including the important task of
-actually running guest code.
+ioctls. A KVM_CREATE_VCPU or KVM_CREATE_DEVICE ioctl on a VM fd will
+create a virtual cpu or device and return a file descriptor pointing to
+the new resource. Finally, ioctls on a vcpu or device fd can be used
+to control the vcpu or device. For vcpus, this includes the important
+task of actually running guest code.
In general file descriptors can be migrated among processes by means
of fork() and the SCM_RIGHTS facility of unix domain socket. These
kinds of tricks are explicitly not supported by kvm. While they will
not cause harm to the host, their actual behavior is not guaranteed by
-the API. The only supported use is one virtual machine per process,
-and one vcpu per thread.
+the API. See "General description" for details on the ioctl usage
+model that is supported by KVM.
+
+It is important to note that althought VM ioctls may only be issued from
+the process that created the VM, a VM's lifecycle is associated with its
+file descriptor, not its creator (process). In other words, the VM and
+its resources, *including the associated address space*, are not freed
+until the last reference to the VM's file descriptor has been released.
+For example, if fork() is issued after ioctl(KVM_CREATE_VM), the VM will
+not be freed until both the parent (original) process and its child have
+put their references to the VM's file descriptor.
+
+Because a VM's resources are not freed until the last reference to its
+file descriptor is released, creating additional references to a VM via
+via fork(), dup(), etc... without careful consideration is strongly
+discouraged and may have unwanted side effects, e.g. memory allocated
+by and on behalf of the VM's process may not be freed/unaccounted when
+the VM is shut down.
+
+
+It is important to note that althought VM ioctls may only be issued from
+the process that created the VM, a VM's lifecycle is associated with its
+file descriptor, not its creator (process). In other words, the VM and
+its resources, *including the associated address space*, are not freed
+until the last reference to the VM's file descriptor has been released.
+For example, if fork() is issued after ioctl(KVM_CREATE_VM), the VM will
+not be freed until both the parent (original) process and its child have
+put their references to the VM's file descriptor.
+
+Because a VM's resources are not freed until the last reference to its
+file descriptor is released, creating additional references to a VM via
+via fork(), dup(), etc... without careful consideration is strongly
+discouraged and may have unwanted side effects, e.g. memory allocated
+by and on behalf of the VM's process may not be freed/unaccounted when
+the VM is shut down.
3. Extensions
@@ -280,7 +321,7 @@ cpu's hardware control block.
4.8 KVM_GET_DIRTY_LOG (vm ioctl)
Capability: basic
-Architectures: x86
+Architectures: all
Type: vm ioctl
Parameters: struct kvm_dirty_log (in/out)
Returns: 0 on success, -1 on error
@@ -498,11 +539,15 @@ c) KVM_INTERRUPT_SET_LEVEL
Note that any value for 'irq' other than the ones stated above is invalid
and incurs unexpected behavior.
+This is an asynchronous vcpu ioctl and can be invoked from any thread.
+
MIPS:
Queues an external interrupt to be injected into the virtual CPU. A negative
interrupt number dequeues the interrupt.
+This is an asynchronous vcpu ioctl and can be invoked from any thread.
+
4.17 KVM_DEBUG_GUEST
@@ -1069,14 +1114,12 @@ struct kvm_userspace_memory_region {
#define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0)
#define KVM_MEM_READONLY (1UL << 1)
-This ioctl allows the user to create or modify a guest physical memory
-slot. When changing an existing slot, it may be moved in the guest
-physical memory space, or its flags may be modified. It may not be
-resized. Slots may not overlap in guest physical address space.
-Bits 0-15 of "slot" specifies the slot id and this value should be
-less than the maximum number of user memory slots supported per VM.
-The maximum allowed slots can be queried using KVM_CAP_NR_MEMSLOTS,
-if this capability is supported by the architecture.
+This ioctl allows the user to create, modify or delete a guest physical
+memory slot. Bits 0-15 of "slot" specify the slot id and this value
+should be less than the maximum number of user memory slots supported per
+VM. The maximum allowed slots can be queried using KVM_CAP_NR_MEMSLOTS,
+if this capability is supported by the architecture. Slots may not
+overlap in guest physical address space.
If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of "slot"
specifies the address space which is being modified. They must be
@@ -1085,6 +1128,10 @@ KVM_CAP_MULTI_ADDRESS_SPACE capability. Slots in separate address spaces
are unrelated; the restriction on overlapping slots only applies within
each address space.
+Deleting a slot is done by passing zero for memory_size. When changing
+an existing slot, it may be moved in the guest physical memory space,
+or its flags may be modified, but it may not be resized.
+
Memory for the region is taken starting at the address denoted by the
field userspace_addr, which must point at user addressable memory for
the entire memory slot size. Any object may back this memory, including
@@ -2476,7 +2523,7 @@ KVM_S390_MCHK (vm, vcpu) - machine check interrupt; cr 14 bits in parm,
machine checks needing further payload are not
supported by this ioctl)
-Note that the vcpu ioctl is asynchronous to vcpu execution.
+This is an asynchronous vcpu ioctl and can be invoked from any thread.
4.78 KVM_PPC_GET_HTAB_FD
@@ -3025,8 +3072,7 @@ KVM_S390_INT_EMERGENCY - sigp emergency; parameters in .emerg
KVM_S390_INT_EXTERNAL_CALL - sigp external call; parameters in .extcall
KVM_S390_MCHK - machine check interrupt; parameters in .mchk
-
-Note that the vcpu ioctl is asynchronous to vcpu execution.
+This is an asynchronous vcpu ioctl and can be invoked from any thread.
4.94 KVM_S390_GET_IRQ_STATE
@@ -3764,7 +3810,7 @@ to I/O ports.
4.117 KVM_CLEAR_DIRTY_LOG (vm ioctl)
Capability: KVM_CAP_MANUAL_DIRTY_LOG_PROTECT
-Architectures: x86
+Architectures: x86, arm, arm64, mips
Type: vm ioctl
Parameters: struct kvm_dirty_log (in)
Returns: 0 on success, -1 on error
@@ -3784,8 +3830,9 @@ The ioctl clears the dirty status of pages in a memory slot, according to
the bitmap that is passed in struct kvm_clear_dirty_log's dirty_bitmap
field. Bit 0 of the bitmap corresponds to page "first_page" in the
memory slot, and num_pages is the size in bits of the input bitmap.
-Both first_page and num_pages must be a multiple of 64. For each bit
-that is set in the input bitmap, the corresponding page is marked "clean"
+first_page must be a multiple of 64; num_pages must also be a multiple of
+64 unless first_page + num_pages is the size of the memory slot. For each
+bit that is set in the input bitmap, the corresponding page is marked "clean"
in KVM's dirty bitmap, and dirty tracking is re-enabled for that page
(for example via write-protection, or by clearing the dirty bit in
a page table entry).
@@ -4753,7 +4800,7 @@ and injected exceptions.
7.18 KVM_CAP_MANUAL_DIRTY_LOG_PROTECT
-Architectures: all
+Architectures: x86, arm, arm64, mips
Parameters: args[0] whether feature should be enabled or not
With this capability enabled, KVM_GET_DIRTY_LOG will not automatically
diff --git a/Documentation/virtual/kvm/halt-polling.txt b/Documentation/virtual/kvm/halt-polling.txt
index 4a8418318769..4f791b128dd2 100644
--- a/Documentation/virtual/kvm/halt-polling.txt
+++ b/Documentation/virtual/kvm/halt-polling.txt
@@ -53,7 +53,8 @@ the global max polling interval then the polling interval can be increased in
the hope that next time during the longer polling interval the wake up source
will be received while the host is polling and the latency benefits will be
received. The polling interval is grown in the function grow_halt_poll_ns() and
-is multiplied by the module parameter halt_poll_ns_grow.
+is multiplied by the module parameters halt_poll_ns_grow and
+halt_poll_ns_grow_start.
In the event that the total block time was greater than the global max polling
interval then the host will never poll for long enough (limited by the global
@@ -80,22 +81,30 @@ shrunk. These variables are defined in include/linux/kvm_host.h and as module
parameters in virt/kvm/kvm_main.c, or arch/powerpc/kvm/book3s_hv.c in the
powerpc kvm-hv case.
-Module Parameter | Description | Default Value
+Module Parameter | Description | Default Value
--------------------------------------------------------------------------------
-halt_poll_ns | The global max polling interval | KVM_HALT_POLL_NS_DEFAULT
- | which defines the ceiling value |
- | of the polling interval for | (per arch value)
- | each vcpu. |
+halt_poll_ns | The global max polling | KVM_HALT_POLL_NS_DEFAULT
+ | interval which defines |
+ | the ceiling value of the |
+ | polling interval for | (per arch value)
+ | each vcpu. |
--------------------------------------------------------------------------------
-halt_poll_ns_grow | The value by which the halt | 2
- | polling interval is multiplied |
- | in the grow_halt_poll_ns() |
- | function. |
+halt_poll_ns_grow | The value by which the | 2
+ | halt polling interval is |
+ | multiplied in the |
+ | grow_halt_poll_ns() |
+ | function. |
--------------------------------------------------------------------------------
-halt_poll_ns_shrink | The value by which the halt | 0
- | polling interval is divided in |
- | the shrink_halt_poll_ns() |
- | function. |
+halt_poll_ns_grow_start | The initial value to grow | 10000
+ | to from zero in the |
+ | grow_halt_poll_ns() |
+ | function. |
+--------------------------------------------------------------------------------
+halt_poll_ns_shrink | The value by which the | 0
+ | halt polling interval is |
+ | divided in the |
+ | shrink_halt_poll_ns() |
+ | function. |
--------------------------------------------------------------------------------
These module parameters can be set from the debugfs files in:
diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt
index e507a9e0421e..2efe0efc516e 100644
--- a/Documentation/virtual/kvm/mmu.txt
+++ b/Documentation/virtual/kvm/mmu.txt
@@ -142,7 +142,7 @@ Shadow pages contain the following information:
If clear, this page corresponds to a guest page table denoted by the gfn
field.
role.quadrant:
- When role.cr4_pae=0, the guest uses 32-bit gptes while the host uses 64-bit
+ When role.gpte_is_8_bytes=0, the guest uses 32-bit gptes while the host uses 64-bit
sptes. That means a guest page table contains more ptes than the host,
so multiple shadow pages are needed to shadow one guest page.
For first-level shadow pages, role.quadrant can be 0 or 1 and denotes the
@@ -158,9 +158,9 @@ Shadow pages contain the following information:
The page is invalid and should not be used. It is a root page that is
currently pinned (by a cpu hardware register pointing to it); once it is
unpinned it will be destroyed.
- role.cr4_pae:
- Contains the value of cr4.pae for which the page is valid (e.g. whether
- 32-bit or 64-bit gptes are in use).
+ role.gpte_is_8_bytes:
+ Reflects the size of the guest PTE for which the page is valid, i.e. '1'
+ if 64-bit gptes are in use, '0' if 32-bit gptes are in use.
role.nxe:
Contains the value of efer.nxe for which the page is valid.
role.cr0_wp:
@@ -173,6 +173,9 @@ Shadow pages contain the following information:
Contains the value of cr4.smap && !cr0.wp for which the page is valid
(pages for which this is true are different from other pages; see the
treatment of cr0.wp=0 below).
+ role.ept_sp:
+ This is a virtual flag to denote a shadowed nested EPT page. ept_sp
+ is true if "cr0_wp && smap_andnot_wp", an otherwise invalid combination.
role.smm:
Is 1 if the page is valid in system management mode. This field
determines which of the kvm_memslots array was used to build this
@@ -224,10 +227,6 @@ Shadow pages contain the following information:
A bitmap indicating which sptes in spt point (directly or indirectly) at
pages that may be unsynchronized. Used to quickly locate all unsychronized
pages reachable from a given page.
- mmu_valid_gen:
- Generation number of the page. It is compared with kvm->arch.mmu_valid_gen
- during hash table lookup, and used to skip invalidated shadow pages (see
- "Zapping all pages" below.)
clear_spte_count:
Only present on 32-bit hosts, where a 64-bit spte cannot be written
atomically. The reader uses this while running out of the MMU lock
@@ -402,27 +401,6 @@ causes its disallow_lpage to be incremented, thus preventing instantiation of
a large spte. The frames at the end of an unaligned memory slot have
artificially inflated ->disallow_lpages so they can never be instantiated.
-Zapping all pages (page generation count)
-=========================================
-
-For the large memory guests, walking and zapping all pages is really slow
-(because there are a lot of pages), and also blocks memory accesses of
-all VCPUs because it needs to hold the MMU lock.
-
-To make it be more scalable, kvm maintains a global generation number
-which is stored in kvm->arch.mmu_valid_gen. Every shadow page stores
-the current global generation-number into sp->mmu_valid_gen when it
-is created. Pages with a mismatching generation number are "obsolete".
-
-When KVM need zap all shadow pages sptes, it just simply increases the global
-generation-number then reload root shadow pages on all vcpus. As the VCPUs
-create new shadow page tables, the old pages are not used because of the
-mismatching generation number.
-
-KVM then walks through all pages and zaps obsolete pages. While the zap
-operation needs to take the MMU lock, the lock can be released periodically
-so that the VCPUs can make progress.
-
Fast invalidation of MMIO sptes
===============================
@@ -435,8 +413,7 @@ shadow pages, and is made more scalable with a similar technique.
MMIO sptes have a few spare bits, which are used to store a
generation number. The global generation number is stored in
kvm_memslots(kvm)->generation, and increased whenever guest memory info
-changes. This generation number is distinct from the one described in
-the previous section.
+changes.
When KVM finds an MMIO spte, it checks the generation number of the spte.
If the generation number of the spte does not equal the global generation
@@ -452,13 +429,16 @@ stored into the MMIO spte. Thus, the MMIO spte might be created based on
out-of-date information, but with an up-to-date generation number.
To avoid this, the generation number is incremented again after synchronize_srcu
-returns; thus, the low bit of kvm_memslots(kvm)->generation is only 1 during a
+returns; thus, bit 63 of kvm_memslots(kvm)->generation set to 1 only during a
memslot update, while some SRCU readers might be using the old copy. We do not
want to use an MMIO sptes created with an odd generation number, and we can do
-this without losing a bit in the MMIO spte. The low bit of the generation
-is not stored in MMIO spte, and presumed zero when it is extracted out of the
-spte. If KVM is unlucky and creates an MMIO spte while the low bit is 1,
-the next access to the spte will always be a cache miss.
+this without losing a bit in the MMIO spte. The "update in-progress" bit of the
+generation is not stored in MMIO spte, and is so is implicitly zero when the
+generation is extracted out of the spte. If KVM is unlucky and creates an MMIO
+spte while an update is in-progress, the next access to the spte will always be
+a cache miss. For example, a subsequent access during the update window will
+miss due to the in-progress flag diverging, while an access after the update
+window closes will have a higher generation number (as compared to the spte).
Further reading
diff --git a/Documentation/virtual/kvm/s390-diag.txt b/Documentation/virtual/kvm/s390-diag.txt
index 48c4921794ed..7c52e5f8b210 100644
--- a/Documentation/virtual/kvm/s390-diag.txt
+++ b/Documentation/virtual/kvm/s390-diag.txt
@@ -68,7 +68,8 @@ Subcode 3 - virtio-ccw notification
identifier, it is ignored.
After completion of the DIAGNOSE call, general register 2 may contain
- a 64bit identifier (in the kvm_io_bus cookie case).
+ a 64bit identifier (in the kvm_io_bus cookie case), or a negative
+ error value, if an internal error occurred.
See also the virtio standard for a discussion of this hypercall.
diff --git a/Documentation/vm/index.rst b/Documentation/vm/index.rst
index 2b3ab3a1ccf3..b58cc3bfe777 100644
--- a/Documentation/vm/index.rst
+++ b/Documentation/vm/index.rst
@@ -4,7 +4,7 @@ Linux Memory Management Documentation
This is a collection of documents about the Linux memory management (mm)
subsystem. If you are looking for advice on simply allocating memory,
-see the :ref:`memory-allocation`.
+see the :ref:`memory_allocation`.
User guides for MM features
===========================
diff --git a/Documentation/vm/slub.rst b/Documentation/vm/slub.rst
index 195928808bac..933ada4368ff 100644
--- a/Documentation/vm/slub.rst
+++ b/Documentation/vm/slub.rst
@@ -66,7 +66,7 @@ Trying to find an issue in the dentry cache? Try::
to only enable debugging on the dentry cache. You may use an asterisk at the
end of the slab name, in order to cover all slabs with the same prefix. For
example, here's how you can poison the dentry cache as well as all kmalloc
-slabs:
+slabs::
slub_debug=P,kmalloc-*,dentry
@@ -141,7 +141,7 @@ can be influenced by kernel parameters:
(list_lock) where contention may occur.
``slub_min_order``
- specifies a minim order of slabs. A similar effect like
+ specifies a minimum order of slabs. A similar effect like
``slub_min_objects``.
``slub_max_order``
diff --git a/Documentation/watchdog/mlx-wdt.txt b/Documentation/watchdog/mlx-wdt.txt
new file mode 100644
index 000000000000..66eeb78505c3
--- /dev/null
+++ b/Documentation/watchdog/mlx-wdt.txt
@@ -0,0 +1,52 @@
+ Mellanox watchdog drivers
+ for x86 based system switches
+
+This driver provides watchdog functionality for various Mellanox
+Ethernet and Infiniband switch systems.
+
+Mellanox watchdog device is implemented in a programmable logic device.
+
+There are 2 types of HW watchdog implementations.
+
+Type 1:
+Actual HW timeout can be defined as a power of 2 msec.
+e.g. timeout 20 sec will be rounded up to 32768 msec.
+The maximum timeout period is 32 sec (32768 msec.),
+Get time-left isn't supported
+
+Type 2:
+Actual HW timeout is defined in sec. and it's the same as
+a user-defined timeout.
+Maximum timeout is 255 sec.
+Get time-left is supported.
+
+Type 1 HW watchdog implementation exist in old systems and
+all new systems have type 2 HW watchdog.
+Two types of HW implementation have also different register map.
+
+Mellanox system can have 2 watchdogs: main and auxiliary.
+Main and auxiliary watchdog devices can be enabled together
+on the same system.
+There are several actions that can be defined in the watchdog:
+system reset, start fans on full speed and increase register counter.
+The last 2 actions are performed without a system reset.
+Actions without reset are provided for auxiliary watchdog device,
+which is optional.
+Watchdog can be started during a probe, in this case it will be
+pinged by watchdog core before watchdog device will be opened by
+user space application.
+Watchdog can be initialised in nowayout way, i.e. oncse started
+it can't be stopped.
+
+This mlx-wdt driver supports both HW watchdog implementations.
+
+Watchdog driver is probed from the common mlx_platform driver.
+Mlx_platform driver provides an appropriate set of registers for
+Mellanox watchdog device, identity name (mlx-wdt-main or mlx-wdt-aux),
+initial timeout, performed action in expiration and configuration flags.
+watchdog configuration flags: nowayout and start_at_boot, hw watchdog
+version - type1 or type2.
+The driver checks during initialization if the previous system reset
+was done by the watchdog. If yes, it makes a notification about this event.
+
+Access to HW registers is performed through a generic regmap interface.
diff --git a/Documentation/x86/kernel-stacks b/Documentation/x86/kernel-stacks
index 9a0aa4d3a866..d1bfb0b95ee0 100644
--- a/Documentation/x86/kernel-stacks
+++ b/Documentation/x86/kernel-stacks
@@ -59,7 +59,7 @@ If that assumption is ever broken then the stacks will become corrupt.
The currently assigned IST stacks are :-
-* DOUBLEFAULT_STACK. EXCEPTION_STKSZ (PAGE_SIZE).
+* ESTACK_DF. EXCEPTION_STKSZ (PAGE_SIZE).
Used for interrupt 8 - Double Fault Exception (#DF).
@@ -68,7 +68,7 @@ The currently assigned IST stacks are :-
Using a separate stack allows the kernel to recover from it well enough
in many cases to still output an oops.
-* NMI_STACK. EXCEPTION_STKSZ (PAGE_SIZE).
+* ESTACK_NMI. EXCEPTION_STKSZ (PAGE_SIZE).
Used for non-maskable interrupts (NMI).
@@ -76,7 +76,7 @@ The currently assigned IST stacks are :-
middle of switching stacks. Using IST for NMI events avoids making
assumptions about the previous state of the kernel stack.
-* DEBUG_STACK. DEBUG_STKSZ
+* ESTACK_DB. EXCEPTION_STKSZ (PAGE_SIZE).
Used for hardware debug interrupts (interrupt 1) and for software
debug interrupts (INT3).
@@ -86,7 +86,12 @@ The currently assigned IST stacks are :-
avoids making assumptions about the previous state of the kernel
stack.
-* MCE_STACK. EXCEPTION_STKSZ (PAGE_SIZE).
+ To handle nested #DB correctly there exist two instances of DB stacks. On
+ #DB entry the IST stackpointer for #DB is switched to the second instance
+ so a nested #DB starts from a clean stack. The nested #DB switches
+ the IST stackpointer to a guard hole to catch triple nesting.
+
+* ESTACK_MCE. EXCEPTION_STKSZ (PAGE_SIZE).
Used for interrupt 18 - Machine Check Exception (#MC).
diff --git a/Documentation/x86/topology.txt b/Documentation/x86/topology.txt
index 2953e3ec9a02..06b3cdbc4048 100644
--- a/Documentation/x86/topology.txt
+++ b/Documentation/x86/topology.txt
@@ -51,7 +51,7 @@ The topology of a system is described in the units of:
The physical ID of the package. This information is retrieved via CPUID
and deduced from the APIC IDs of the cores in the package.
- - cpuinfo_x86.logical_id:
+ - cpuinfo_x86.logical_proc_id:
The logical ID of the package. As we do not trust BIOSes to enumerate the
packages in a consistent way, we introduced the concept of logical package
diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
index 804f9426ed17..6cbe652d7a49 100644
--- a/Documentation/x86/x86_64/mm.txt
+++ b/Documentation/x86/x86_64/mm.txt
@@ -72,7 +72,7 @@ Complete virtual memory map with 5-level page tables
Notes:
- With 56-bit addresses, user-space memory gets expanded by a factor of 512x,
- from 0.125 PB to 64 PB. All kernel mappings shift down to the -64 PT starting
+ from 0.125 PB to 64 PB. All kernel mappings shift down to the -64 PB starting
offset and many of the regions expand to support the much larger physical
memory supported.
@@ -83,7 +83,7 @@ Notes:
0000000000000000 | 0 | 00ffffffffffffff | 64 PB | user-space virtual memory, different per mm
__________________|____________|__________________|_________|___________________________________________________________
| | | |
- 0000800000000000 | +64 PB | ffff7fffffffffff | ~16K PB | ... huge, still almost 64 bits wide hole of non-canonical
+ 0100000000000000 | +64 PB | feffffffffffffff | ~16K PB | ... huge, still almost 64 bits wide hole of non-canonical
| | | | virtual memory addresses up to the -64 PB
| | | | starting offset of kernel mappings.
__________________|____________|__________________|_________|___________________________________________________________
@@ -99,7 +99,7 @@ ____________________________________________________________|___________________
ffd2000000000000 | -11.5 PB | ffd3ffffffffffff | 0.5 PB | ... unused hole
ffd4000000000000 | -11 PB | ffd5ffffffffffff | 0.5 PB | virtual memory map (vmemmap_base)
ffd6000000000000 | -10.5 PB | ffdeffffffffffff | 2.25 PB | ... unused hole
- ffdf000000000000 | -8.25 PB | fffffdffffffffff | ~8 PB | KASAN shadow memory
+ ffdf000000000000 | -8.25 PB | fffffbffffffffff | ~8 PB | KASAN shadow memory
__________________|____________|__________________|_________|____________________________________________________________
|
| Identical layout to the 47-bit one from here on:
diff --git a/Kbuild b/Kbuild
index 4a4c47c38d1d..8637fd14135f 100644
--- a/Kbuild
+++ b/Kbuild
@@ -1,16 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
#
# Kbuild for top-level directory of the kernel
-# This file takes care of the following:
-# 1) Generate bounds.h
-# 2) Generate timeconst.h
-# 3) Generate asm-offsets.h (may need bounds.h and timeconst.h)
-# 4) Check for missing system calls
-# 5) check atomics headers are up-to-date
-# 6) Generate constants.py (may need bounds.h)
#####
-# 1) Generate bounds.h
+# Generate bounds.h
bounds-file := include/generated/bounds.h
@@ -21,7 +14,7 @@ $(bounds-file): kernel/bounds.s FORCE
$(call filechk,offsets,__LINUX_BOUNDS_H__)
#####
-# 2) Generate timeconst.h
+# Generate timeconst.h
timeconst-file := include/generated/timeconst.h
@@ -33,8 +26,7 @@ $(timeconst-file): kernel/time/timeconst.bc FORCE
$(call filechk,gentimeconst)
#####
-# 3) Generate asm-offsets.h
-#
+# Generate asm-offsets.h
offsets-file := include/generated/asm-offsets.h
@@ -47,8 +39,7 @@ $(offsets-file): arch/$(SRCARCH)/kernel/asm-offsets.s FORCE
$(call filechk,offsets,__ASM_OFFSETS_H__)
#####
-# 4) Check for missing system calls
-#
+# Check for missing system calls
always += missing-syscalls
targets += missing-syscalls
@@ -60,8 +51,7 @@ missing-syscalls: scripts/checksyscalls.sh $(offsets-file) FORCE
$(call cmd,syscalls)
#####
-# 5) Check atomic headers are up-to-date
-#
+# Check atomic headers are up-to-date
always += old-atomics
targets += old-atomics
@@ -72,14 +62,5 @@ quiet_cmd_atomics = CALL $<
old-atomics: scripts/atomic/check-atomics.sh FORCE
$(call cmd,atomics)
-#####
-# 6) Generate constants for Python GDB integration
-#
-
-extra-$(CONFIG_GDB_SCRIPTS) += build_constants_py
-
-build_constants_py: $(timeconst-file) $(bounds-file)
- @$(MAKE) $(build)=scripts/gdb/linux $@
-
# Keep these three files during make clean
no-clean-files := $(bounds-file) $(offsets-file) $(timeconst-file)
diff --git a/LICENSES/exceptions/GCC-exception-2.0 b/LICENSES/exceptions/GCC-exception-2.0
new file mode 100644
index 000000000000..422914a88c36
--- /dev/null
+++ b/LICENSES/exceptions/GCC-exception-2.0
@@ -0,0 +1,18 @@
+SPDX-Exception-Identifier: GCC-exception-2.0
+SPDX-URL: https://spdx.org/licenses/GCC-exception-2.0.html
+SPDX-Licenses: GPL-2.0, GPL-2.0+, GPL-2.0-only, GPL-2.0-or-later
+Usage-Guide:
+ This exception is used together with one of the above SPDX-Licenses to
+ allow linking the compiled version of code to non GPL compliant code.
+ To use this exception add it with the keyword WITH to one of the
+ identifiers in the SPDX-Licenses tag:
+ SPDX-License-Identifier: <SPDX-License> WITH GCC-exception-2.0
+License-Text:
+
+In addition to the permissions in the GNU Library General Public License,
+the Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs, and to
+distribute those programs without any restriction coming from the use of
+this file. (The General Public License restrictions do apply in other
+respects; for example, they cover modification of the file, and
+distribution when not linked into another program.)
diff --git a/MAINTAINERS b/MAINTAINERS
index c4fc97a9797a..0a52061eacae 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -366,6 +366,7 @@ M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
M: Hanjun Guo <hanjun.guo@linaro.org>
M: Sudeep Holla <sudeep.holla@arm.com>
L: linux-acpi@vger.kernel.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: drivers/acpi/arm64
@@ -467,7 +468,7 @@ ADM1025 HARDWARE MONITOR DRIVER
M: Jean Delvare <jdelvare@suse.com>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/adm1025
+F: Documentation/hwmon/adm1025.rst
F: drivers/hwmon/adm1025.c
ADM1029 HARDWARE MONITOR DRIVER
@@ -519,7 +520,7 @@ ADS1015 HARDWARE MONITOR DRIVER
M: Dirk Eibach <eibach@gdsys.de>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/ads1015
+F: Documentation/hwmon/ads1015.rst
F: drivers/hwmon/ads1015.c
F: include/linux/platform_data/ads1015.h
@@ -532,7 +533,7 @@ ADT7475 HARDWARE MONITOR DRIVER
M: Jean Delvare <jdelvare@suse.com>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/adt7475
+F: Documentation/hwmon/adt7475.rst
F: drivers/hwmon/adt7475.c
ADVANSYS SCSI DRIVER
@@ -763,7 +764,7 @@ AMD FAM15H PROCESSOR POWER MONITORING DRIVER
M: Huang Rui <ray.huang@amd.com>
L: linux-hwmon@vger.kernel.org
S: Supported
-F: Documentation/hwmon/fam15h_power
+F: Documentation/hwmon/fam15h_power.rst
F: drivers/hwmon/fam15h_power.c
AMD FCH GPIO DRIVER
@@ -1058,6 +1059,8 @@ L: netdev@vger.kernel.org
S: Odd fixes
F: drivers/net/appletalk/
F: net/appletalk/
+F: include/linux/atalk.h
+F: include/uapi/linux/atalk.h
APPLIED MICRO (APM) X-GENE DEVICE TREE SUPPORT
M: Khuong Dinh <khuong@os.amperecomputing.com>
@@ -1195,7 +1198,7 @@ F: arch/arm*/include/asm/hw_breakpoint.h
F: arch/arm*/include/asm/perf_event.h
F: drivers/perf/*
F: include/linux/perf/arm_pmu.h
-F: Documentation/devicetree/bindings/arm/pmu.txt
+F: Documentation/devicetree/bindings/arm/pmu.yaml
F: Documentation/devicetree/bindings/perf/
ARM PORT
@@ -1890,14 +1893,15 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-nomadik.git
ARM/NUVOTON NPCM ARCHITECTURE
M: Avi Fishman <avifishman70@gmail.com>
M: Tomer Maimon <tmaimon77@gmail.com>
+M: Tali Perry <tali.perry1@gmail.com>
R: Patrick Venture <venture@google.com>
R: Nancy Yuen <yuenn@google.com>
-R: Brendan Higgins <brendanhiggins@google.com>
+R: Benjamin Fair <benjaminfair@google.com>
L: openbmc@lists.ozlabs.org (moderated for non-subscribers)
S: Supported
F: arch/arm/mach-npcm/
F: arch/arm/boot/dts/nuvoton-npcm*
-F: include/dt-bindings/clock/nuvoton,npcm7xx-clks.h
+F: include/dt-bindings/clock/nuvoton,npcm7xx-clock.h
F: drivers/*/*npcm*
F: Documentation/devicetree/bindings/*/*npcm*
F: Documentation/devicetree/bindings/*/*/*npcm*
@@ -2140,8 +2144,9 @@ F: drivers/media/platform/s5p-cec/
F: Documentation/devicetree/bindings/media/s5p-cec.txt
ARM/SAMSUNG S5P SERIES JPEG CODEC SUPPORT
-M: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+M: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
M: Jacek Anaszewski <jacek.anaszewski@gmail.com>
+M: Sylwester Nawrocki <s.nawrocki@samsung.com>
L: linux-arm-kernel@lists.infradead.org
L: linux-media@vger.kernel.org
S: Maintained
@@ -2352,7 +2357,7 @@ F: arch/arm/mm/cache-uniphier.c
F: arch/arm64/boot/dts/socionext/uniphier*
F: drivers/bus/uniphier-system-bus.c
F: drivers/clk/uniphier/
-F: drivers/dmaengine/uniphier-mdmac.c
+F: drivers/dma/uniphier-mdmac.c
F: drivers/gpio/gpio-uniphier.c
F: drivers/i2c/busses/i2c-uniphier*
F: drivers/irqchip/irq-uniphier-aidet.c
@@ -2508,7 +2513,7 @@ ASC7621 HARDWARE MONITOR DRIVER
M: George Joseph <george.joseph@fairview5.com>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/asc7621
+F: Documentation/hwmon/asc7621.rst
F: drivers/hwmon/asc7621.c
ASPEED VIDEO ENGINE DRIVER
@@ -3116,6 +3121,7 @@ F: drivers/cpufreq/bmips-cpufreq.c
BROADCOM BMIPS MIPS ARCHITECTURE
M: Kevin Cernekee <cernekee@gmail.com>
M: Florian Fainelli <f.fainelli@gmail.com>
+L: bcm-kernel-feedback-list@broadcom.com
L: linux-mips@vger.kernel.org
T: git git://github.com/broadcom/stblinux.git
S: Maintained
@@ -3200,6 +3206,7 @@ F: drivers/phy/broadcom/phy-brcm-usb*
BROADCOM GENET ETHERNET DRIVER
M: Doug Berger <opendmb@gmail.com>
M: Florian Fainelli <f.fainelli@gmail.com>
+L: bcm-kernel-feedback-list@broadcom.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/genet/
@@ -3307,6 +3314,7 @@ F: drivers/spi/spi-iproc-qspi.c
BROADCOM SYSTEMPORT ETHERNET DRIVER
M: Florian Fainelli <f.fainelli@gmail.com>
+L: bcm-kernel-feedback-list@broadcom.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/bcmsysport.*
@@ -3751,7 +3759,7 @@ CHROME HARDWARE PLATFORM SUPPORT
M: Benson Leung <bleung@chromium.org>
M: Enric Balletbo i Serra <enric.balletbo@collabora.com>
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/bleung/chrome-platform.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/chrome-platform/linux.git
F: drivers/platform/chrome/
CHROMEOS EC SUBDRIVERS
@@ -3790,6 +3798,7 @@ M: Richard Fitzgerald <rf@opensource.cirrus.com>
L: patches@opensource.cirrus.com
S: Supported
F: drivers/clk/clk-lochnagar.c
+F: drivers/hwmon/lochnagar-hwmon.c
F: drivers/mfd/lochnagar-i2c.c
F: drivers/pinctrl/cirrus/pinctrl-lochnagar.c
F: drivers/regulator/lochnagar-regulator.c
@@ -3798,8 +3807,10 @@ F: include/dt-bindings/pinctrl/lochnagar.h
F: include/linux/mfd/lochnagar*
F: Documentation/devicetree/bindings/mfd/cirrus,lochnagar.txt
F: Documentation/devicetree/bindings/clock/cirrus,lochnagar.txt
+F: Documentation/devicetree/bindings/hwmon/cirrus,lochnagar.txt
F: Documentation/devicetree/bindings/pinctrl/cirrus,lochnagar.txt
F: Documentation/devicetree/bindings/regulator/cirrus,lochnagar.txt
+F: Documentation/hwmon/lochnagar
CISCO FCOE HBA DRIVER
M: Satish Kharat <satishkh@cisco.com>
@@ -4037,7 +4048,7 @@ CORETEMP HARDWARE MONITORING DRIVER
M: Fenghua Yu <fenghua.yu@intel.com>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/coretemp
+F: Documentation/hwmon/coretemp.rst
F: drivers/hwmon/coretemp.c
COSA/SRP SYNC SERIAL DRIVER
@@ -4123,7 +4134,7 @@ F: drivers/cpuidle/*
F: include/linux/cpuidle.h
CRAMFS FILESYSTEM
-M: Nicolas Pitre <nico@linaro.org>
+M: Nicolas Pitre <nico@fluxnic.net>
S: Maintained
F: Documentation/filesystems/cramfs.txt
F: fs/cramfs/
@@ -4545,6 +4556,7 @@ S: Maintained
F: drivers/devfreq/
F: include/linux/devfreq.h
F: Documentation/devicetree/bindings/devfreq/
+F: include/trace/events/devfreq.h
DEVICE FREQUENCY EVENT (DEVFREQ-EVENT)
M: Chanwoo Choi <cw00.choi@samsung.com>
@@ -4592,7 +4604,7 @@ DIALOG SEMICONDUCTOR DRIVERS
M: Support Opensource <support.opensource@diasemi.com>
W: http://www.dialog-semiconductor.com/products
S: Supported
-F: Documentation/hwmon/da90??
+F: Documentation/hwmon/da90??.rst
F: Documentation/devicetree/bindings/mfd/da90*.txt
F: Documentation/devicetree/bindings/input/da90??-onkey.txt
F: Documentation/devicetree/bindings/thermal/da90??-thermal.txt
@@ -4639,10 +4651,11 @@ S: Maintained
F: drivers/i2c/busses/i2c-diolan-u2c.c
FILESYSTEM DIRECT ACCESS (DAX)
-M: Matthew Wilcox <willy@infradead.org>
-M: Ross Zwisler <zwisler@kernel.org>
-M: Jan Kara <jack@suse.cz>
+M: Dan Williams <dan.j.williams@intel.com>
+R: Matthew Wilcox <willy@infradead.org>
+R: Jan Kara <jack@suse.cz>
L: linux-fsdevel@vger.kernel.org
+L: linux-nvdimm@lists.01.org
S: Supported
F: fs/dax.c
F: include/linux/dax.h
@@ -4650,9 +4663,9 @@ F: include/trace/events/fs_dax.h
DEVICE DIRECT ACCESS (DAX)
M: Dan Williams <dan.j.williams@intel.com>
-M: Dave Jiang <dave.jiang@intel.com>
-M: Ross Zwisler <zwisler@kernel.org>
M: Vishal Verma <vishal.l.verma@intel.com>
+M: Keith Busch <keith.busch@intel.com>
+M: Dave Jiang <dave.jiang@intel.com>
L: linux-nvdimm@lists.01.org
S: Supported
F: drivers/dax/
@@ -4742,7 +4755,7 @@ DME1737 HARDWARE MONITOR DRIVER
M: Juerg Haefliger <juergh@gmail.com>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/dme1737
+F: Documentation/hwmon/dme1737.rst
F: drivers/hwmon/dme1737.c
DMI/SMBIOS SUPPORT
@@ -5271,7 +5284,7 @@ DRM DRIVERS FOR VIVANTE GPU IP
M: Lucas Stach <l.stach@pengutronix.de>
R: Russell King <linux+etnaviv@armlinux.org.uk>
R: Christian Gmeiner <christian.gmeiner@gmail.com>
-L: etnaviv@lists.freedesktop.org
+L: etnaviv@lists.freedesktop.org (moderated for non-subscribers)
L: dri-devel@lists.freedesktop.org
S: Maintained
F: drivers/gpu/drm/etnaviv/
@@ -5589,6 +5602,12 @@ L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/ghes_edac.c
+EDAC-I10NM
+M: Tony Luck <tony.luck@intel.com>
+L: linux-edac@vger.kernel.org
+S: Maintained
+F: drivers/edac/i10nm_base.c
+
EDAC-I3000
L: linux-edac@vger.kernel.org
S: Orphan
@@ -5670,7 +5689,7 @@ EDAC-SKYLAKE
M: Tony Luck <tony.luck@intel.com>
L: linux-edac@vger.kernel.org
S: Maintained
-F: drivers/edac/skx_edac.c
+F: drivers/edac/skx_*.c
EDAC-TI
M: Tero Kristo <t-kristo@ti.com>
@@ -5826,7 +5845,7 @@ L: netdev@vger.kernel.org
S: Maintained
F: Documentation/ABI/testing/sysfs-bus-mdio
F: Documentation/devicetree/bindings/net/mdio*
-F: Documentation/networking/phy.txt
+F: Documentation/networking/phy.rst
F: drivers/net/phy/
F: drivers/of/of_mdio.c
F: drivers/of/of_net.c
@@ -5928,7 +5947,7 @@ F71805F HARDWARE MONITORING DRIVER
M: Jean Delvare <jdelvare@suse.com>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/f71805f
+F: Documentation/hwmon/f71805f.rst
F: drivers/hwmon/f71805f.c
FADDR2LINE
@@ -5989,7 +6008,7 @@ S: Maintained
F: drivers/media/tuners/fc2580*
FCOE SUBSYSTEM (libfc, libfcoe, fcoe)
-M: Johannes Thumshirn <jth@kernel.org>
+M: Hannes Reinecke <hare@suse.de>
L: linux-scsi@vger.kernel.org
W: www.Open-FCoE.org
S: Supported
@@ -6337,9 +6356,10 @@ F: include/linux/fscache*.h
FSCRYPT: FILE SYSTEM LEVEL ENCRYPTION SUPPORT
M: Theodore Y. Ts'o <tytso@mit.edu>
M: Jaegeuk Kim <jaegeuk@kernel.org>
+M: Eric Biggers <ebiggers@kernel.org>
L: linux-fscrypt@vger.kernel.org
Q: https://patchwork.kernel.org/project/linux-fscrypt/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/tytso/fscrypt.git
+T: git git://git.kernel.org/pub/scm/fs/fscrypt/fscrypt.git
S: Supported
F: fs/crypto/
F: include/linux/fscrypt*.h
@@ -6400,7 +6420,6 @@ L: linux-kernel@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
S: Maintained
F: kernel/futex.c
-F: kernel/futex_compat.c
F: include/asm-generic/futex.h
F: include/linux/futex.h
F: include/uapi/linux/futex.h
@@ -6453,7 +6472,7 @@ S: Maintained
F: drivers/media/radio/radio-gemtek*
GENERIC GPIO I2C DRIVER
-M: Haavard Skinnemoen <hskinnemoen@gmail.com>
+M: Wolfram Sang <wsa+renesas@sang-engineering.com>
S: Supported
F: drivers/i2c/busses/i2c-gpio.c
F: include/linux/platform_data/i2c-gpio.h
@@ -6585,7 +6604,7 @@ M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
L: linux-gpio@vger.kernel.org
L: linux-acpi@vger.kernel.org
S: Maintained
-F: Documentation/acpi/gpio-properties.txt
+F: Documentation/firmware-guide/acpi/gpio-properties.rst
F: drivers/gpio/gpiolib-acpi.c
GPIO IR Transmitter
@@ -7167,6 +7186,7 @@ F: drivers/net/hyperv/
F: drivers/scsi/storvsc_drv.c
F: drivers/uio/uio_hv_generic.c
F: drivers/video/fbdev/hyperv_fb.c
+F: drivers/iommu/hyperv_iommu.c
F: net/vmw_vsock/hyperv_transport.c
F: include/linux/hyperv.h
F: include/uapi/linux/hyperv.h
@@ -7324,7 +7344,6 @@ F: Documentation/devicetree/bindings/i3c/
F: Documentation/driver-api/i3c
F: drivers/i3c/
F: include/linux/i3c/
-F: include/dt-bindings/i3c/
I3C DRIVER FOR SYNOPSYS DESIGNWARE
M: Vitor Soares <vitor.soares@synopsys.com>
@@ -7507,7 +7526,7 @@ F: include/net/mac802154.h
F: include/net/af_ieee802154.h
F: include/net/cfg802154.h
F: include/net/ieee802154_netdev.h
-F: Documentation/networking/ieee802154.txt
+F: Documentation/networking/ieee802154.rst
IFE PROTOCOL
M: Yotam Gigi <yotam.gi@gmail.com>
@@ -7609,7 +7628,7 @@ INA209 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/ina209
+F: Documentation/hwmon/ina209.rst
F: Documentation/devicetree/bindings/hwmon/ina2xx.txt
F: drivers/hwmon/ina209.c
@@ -7617,7 +7636,7 @@ INA2XX HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/ina2xx
+F: Documentation/hwmon/ina2xx.rst
F: drivers/hwmon/ina2xx.c
F: include/linux/platform_data/ina2xx.h
@@ -7851,7 +7870,6 @@ M: Yong Zhi <yong.zhi@intel.com>
M: Sakari Ailus <sakari.ailus@linux.intel.com>
M: Bingbu Cao <bingbu.cao@intel.com>
R: Tian Shu Qiu <tian.shu.qiu@intel.com>
-R: Jian Xu Zheng <jian.xu.zheng@intel.com>
L: linux-media@vger.kernel.org
S: Maintained
F: drivers/media/pci/intel/ipu3/
@@ -8088,6 +8106,16 @@ F: include/linux/iommu.h
F: include/linux/of_iommu.h
F: include/linux/iova.h
+IO_URING
+M: Jens Axboe <axboe@kernel.dk>
+L: linux-block@vger.kernel.org
+L: linux-fsdevel@vger.kernel.org
+T: git git://git.kernel.dk/linux-block
+T: git git://git.kernel.dk/liburing
+S: Maintained
+F: fs/io_uring.c
+F: include/uapi/linux/io_uring.h
+
IP MASQUERADING
M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
S: Maintained
@@ -8236,7 +8264,7 @@ IT87 HARDWARE MONITORING DRIVER
M: Jean Delvare <jdelvare@suse.com>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/it87
+F: Documentation/hwmon/it87.rst
F: drivers/hwmon/it87.c
IT913X MEDIA DRIVER
@@ -8280,7 +8308,7 @@ M: Guenter Roeck <linux@roeck-us.net>
L: linux-hwmon@vger.kernel.org
S: Maintained
F: drivers/hwmon/jc42.c
-F: Documentation/hwmon/jc42
+F: Documentation/hwmon/jc42.rst
JFS FILESYSTEM
M: Dave Kleikamp <shaggy@kernel.org>
@@ -8328,14 +8356,14 @@ K10TEMP HARDWARE MONITORING DRIVER
M: Clemens Ladisch <clemens@ladisch.de>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/k10temp
+F: Documentation/hwmon/k10temp.rst
F: drivers/hwmon/k10temp.c
K8TEMP HARDWARE MONITORING DRIVER
M: Rudolf Marek <r.marek@assembler.cz>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/k8temp
+F: Documentation/hwmon/k8temp.rst
F: drivers/hwmon/k8temp.c
KASAN
@@ -8453,6 +8481,7 @@ F: include/linux/kvm*
F: include/kvm/iodev.h
F: virt/kvm/*
F: tools/kvm/
+F: tools/testing/selftests/kvm/
KERNEL VIRTUAL MACHINE FOR AMD-V (KVM/amd)
M: Joerg Roedel <joro@8bytes.org>
@@ -8462,29 +8491,25 @@ S: Maintained
F: arch/x86/include/asm/svm.h
F: arch/x86/kvm/svm.c
-KERNEL VIRTUAL MACHINE FOR ARM (KVM/arm)
+KERNEL VIRTUAL MACHINE FOR ARM/ARM64 (KVM/arm, KVM/arm64)
M: Christoffer Dall <christoffer.dall@arm.com>
M: Marc Zyngier <marc.zyngier@arm.com>
+R: James Morse <james.morse@arm.com>
+R: Julien Thierry <julien.thierry@arm.com>
+R: Suzuki K Pouloze <suzuki.poulose@arm.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: kvmarm@lists.cs.columbia.edu
W: http://systems.cs.columbia.edu/projects/kvm-arm
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm.git
-S: Supported
+S: Maintained
F: arch/arm/include/uapi/asm/kvm*
F: arch/arm/include/asm/kvm*
F: arch/arm/kvm/
-F: virt/kvm/arm/
-F: include/kvm/arm_*
-
-KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
-M: Christoffer Dall <christoffer.dall@arm.com>
-M: Marc Zyngier <marc.zyngier@arm.com>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-L: kvmarm@lists.cs.columbia.edu
-S: Maintained
F: arch/arm64/include/uapi/asm/kvm*
F: arch/arm64/include/asm/kvm*
F: arch/arm64/kvm/
+F: virt/kvm/arm/
+F: include/kvm/arm_*
KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips)
M: James Hogan <jhogan@kernel.org>
@@ -8693,6 +8718,7 @@ F: scripts/leaking_addresses.pl
LED SUBSYSTEM
M: Jacek Anaszewski <jacek.anaszewski@gmail.com>
M: Pavel Machek <pavel@ucw.cz>
+R: Dan Murphy <dmurphy@ti.com>
L: linux-leds@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git
S: Maintained
@@ -8807,7 +8833,6 @@ S: Maintained
F: tools/lib/lockdep/
LIBNVDIMM BLK: MMIO-APERTURE DRIVER
-M: Ross Zwisler <zwisler@kernel.org>
M: Dan Williams <dan.j.williams@intel.com>
M: Vishal Verma <vishal.l.verma@intel.com>
M: Dave Jiang <dave.jiang@intel.com>
@@ -8820,7 +8845,6 @@ F: drivers/nvdimm/region_devs.c
LIBNVDIMM BTT: BLOCK TRANSLATION TABLE
M: Vishal Verma <vishal.l.verma@intel.com>
M: Dan Williams <dan.j.williams@intel.com>
-M: Ross Zwisler <zwisler@kernel.org>
M: Dave Jiang <dave.jiang@intel.com>
L: linux-nvdimm@lists.01.org
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
@@ -8828,7 +8852,6 @@ S: Supported
F: drivers/nvdimm/btt*
LIBNVDIMM PMEM: PERSISTENT MEMORY DRIVER
-M: Ross Zwisler <zwisler@kernel.org>
M: Dan Williams <dan.j.williams@intel.com>
M: Vishal Verma <vishal.l.verma@intel.com>
M: Dave Jiang <dave.jiang@intel.com>
@@ -8847,9 +8870,10 @@ F: Documentation/devicetree/bindings/pmem/pmem-region.txt
LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM
M: Dan Williams <dan.j.williams@intel.com>
-M: Ross Zwisler <zwisler@kernel.org>
M: Vishal Verma <vishal.l.verma@intel.com>
M: Dave Jiang <dave.jiang@intel.com>
+M: Keith Busch <keith.busch@intel.com>
+M: Ira Weiny <ira.weiny@intel.com>
L: linux-nvdimm@lists.01.org
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm.git
@@ -8980,7 +9004,7 @@ R: Daniel Lustig <dlustig@nvidia.com>
L: linux-kernel@vger.kernel.org
L: linux-arch@vger.kernel.org
S: Supported
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
F: tools/memory-model/
F: Documentation/atomic_bitops.txt
F: Documentation/atomic_t.txt
@@ -9031,21 +9055,21 @@ LM78 HARDWARE MONITOR DRIVER
M: Jean Delvare <jdelvare@suse.com>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/lm78
+F: Documentation/hwmon/lm78.rst
F: drivers/hwmon/lm78.c
LM83 HARDWARE MONITOR DRIVER
M: Jean Delvare <jdelvare@suse.com>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/lm83
+F: Documentation/hwmon/lm83.rst
F: drivers/hwmon/lm83.c
LM90 HARDWARE MONITOR DRIVER
M: Jean Delvare <jdelvare@suse.com>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/lm90
+F: Documentation/hwmon/lm90.rst
F: Documentation/devicetree/bindings/hwmon/lm90.txt
F: drivers/hwmon/lm90.c
F: include/dt-bindings/thermal/lm90.h
@@ -9054,7 +9078,7 @@ LM95234 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/lm95234
+F: Documentation/hwmon/lm95234.rst
F: drivers/hwmon/lm95234.c
LME2510 MEDIA DRIVER
@@ -9086,7 +9110,6 @@ F: arch/*/include/asm/spinlock*.h
F: include/linux/rwlock*.h
F: include/linux/mutex*.h
F: include/linux/rwsem*.h
-F: arch/*/include/asm/rwsem.h
F: include/linux/seqlock.h
F: lib/locking*.[ch]
F: kernel/locking/
@@ -9128,7 +9151,7 @@ LTC4261 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/ltc4261
+F: Documentation/hwmon/ltc4261.rst
F: drivers/hwmon/ltc4261.c
LTC4306 I2C MULTIPLEXER DRIVER
@@ -9359,7 +9382,7 @@ MAX16065 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/max16065
+F: Documentation/hwmon/max16065.rst
F: drivers/hwmon/max16065.c
MAX2175 SDR TUNER DRIVER
@@ -9375,14 +9398,14 @@ F: include/uapi/linux/max2175.h
MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER
L: linux-hwmon@vger.kernel.org
S: Orphan
-F: Documentation/hwmon/max6650
+F: Documentation/hwmon/max6650.rst
F: drivers/hwmon/max6650.c
MAX6697 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/max6697
+F: Documentation/hwmon/max6697.rst
F: Documentation/devicetree/bindings/hwmon/max6697.txt
F: drivers/hwmon/max6697.c
F: include/linux/platform_data/max6697.h
@@ -9525,6 +9548,17 @@ T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/media/platform/imx-pxp.[ch]
+MEDIA DRIVERS FOR FREESCALE IMX7
+M: Rui Miguel Silva <rmfrfs@gmail.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: Documentation/devicetree/bindings/media/imx7-csi.txt
+F: Documentation/devicetree/bindings/media/imx7-mipi-csi2.txt
+F: Documentation/media/v4l-drivers/imx7.rst
+F: drivers/staging/media/imx/imx7-media-csi.c
+F: drivers/staging/media/imx/imx7-mipi-csis.c
+
MEDIA DRIVERS FOR HELENE
M: Abylay Ospan <aospan@netup.ru>
L: linux-media@vger.kernel.org
@@ -9886,6 +9920,7 @@ M: Vadim Pasternak <vadimp@mellanox.com>
L: platform-driver-x86@vger.kernel.org
S: Supported
F: drivers/platform/mellanox/
+F: include/linux/platform_data/mlxreg.h
MELLANOX MLX4 core VPI driver
M: Tariq Toukan <tariqt@mellanox.com>
@@ -10016,7 +10051,7 @@ F: drivers/mfd/menf21bmc.c
F: drivers/watchdog/menf21bmc_wdt.c
F: drivers/leds/leds-menf21bmc.c
F: drivers/hwmon/menf21bmc_hwmon.c
-F: Documentation/hwmon/menf21bmc
+F: Documentation/hwmon/menf21bmc.rst
MEN Z069 WATCHDOG DRIVER
M: Johannes Thumshirn <jth@kernel.org>
@@ -10120,7 +10155,7 @@ F: drivers/spi/spi-at91-usart.c
F: Documentation/devicetree/bindings/mfd/atmel-usart.txt
MICROCHIP KSZ SERIES ETHERNET SWITCH DRIVER
-M: Woojung Huh <Woojung.Huh@microchip.com>
+M: Woojung Huh <woojung.huh@microchip.com>
M: Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
L: netdev@vger.kernel.org
S: Maintained
@@ -10644,7 +10679,7 @@ NCT6775 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/nct6775
+F: Documentation/hwmon/nct6775.rst
F: drivers/hwmon/nct6775.c
NET_FAILOVER MODULE
@@ -11440,6 +11475,19 @@ S: Maintained
F: drivers/media/i2c/ov7740.c
F: Documentation/devicetree/bindings/media/i2c/ov7740.txt
+OMNIVISION OV9640 SENSOR DRIVER
+M: Petr Cvek <petrcvekcz@gmail.com>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: drivers/media/i2c/ov9640.*
+
+OMNIVISION OV8856 SENSOR DRIVER
+M: Ben Kao <ben.kao@intel.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/i2c/ov8856.c
+
OMNIVISION OV9650 SENSOR DRIVER
M: Sakari Ailus <sakari.ailus@linux.intel.com>
R: Akinobu Mita <akinobu.mita@gmail.com>
@@ -11604,13 +11652,6 @@ W: http://www.nongnu.org/orinoco/
S: Orphan
F: drivers/net/wireless/intersil/orinoco/
-OSD LIBRARY and FILESYSTEM
-M: Boaz Harrosh <ooo@electrozaur.com>
-S: Maintained
-F: drivers/scsi/osd/
-F: include/scsi/osd_*
-F: fs/exofs/
-
OV2659 OMNIVISION SENSOR DRIVER
M: "Lad, Prabhakar" <prabhakar.csengg@gmail.com>
L: linux-media@vger.kernel.org
@@ -11732,7 +11773,7 @@ PC87360 HARDWARE MONITORING DRIVER
M: Jim Cromie <jim.cromie@gmail.com>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/pc87360
+F: Documentation/hwmon/pc87360.rst
F: drivers/hwmon/pc87360.c
PC8736x GPIO DRIVER
@@ -11744,7 +11785,7 @@ PC87427 HARDWARE MONITORING DRIVER
M: Jean Delvare <jdelvare@suse.com>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/pc87427
+F: Documentation/hwmon/pc87427.rst
F: drivers/hwmon/pc87427.c
PCA9532 LED DRIVER
@@ -11805,7 +11846,7 @@ F: Documentation/devicetree/bindings/pci/pci-armada8k.txt
F: drivers/pci/controller/dwc/pcie-armada8k.c
PCI DRIVER FOR CADENCE PCIE IP
-M: Alan Douglas <adouglas@cadence.com>
+M: Tom Joseph <tjoseph@cadence.com>
L: linux-pci@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/pci/cdns,*.txt
@@ -12144,6 +12185,7 @@ F: arch/*/kernel/*/*/perf_event*.c
F: arch/*/include/asm/perf_event.h
F: arch/*/kernel/perf_callchain.c
F: arch/*/events/*
+F: arch/*/events/*/*
F: tools/perf/
PERSONALITY HANDLING
@@ -12312,23 +12354,23 @@ S: Maintained
F: Documentation/devicetree/bindings/hwmon/ibm,cffps1.txt
F: Documentation/devicetree/bindings/hwmon/max31785.txt
F: Documentation/devicetree/bindings/hwmon/ltc2978.txt
-F: Documentation/hwmon/adm1275
-F: Documentation/hwmon/ibm-cffps
-F: Documentation/hwmon/ir35221
-F: Documentation/hwmon/lm25066
-F: Documentation/hwmon/ltc2978
-F: Documentation/hwmon/ltc3815
-F: Documentation/hwmon/max16064
-F: Documentation/hwmon/max20751
-F: Documentation/hwmon/max31785
-F: Documentation/hwmon/max34440
-F: Documentation/hwmon/max8688
-F: Documentation/hwmon/pmbus
-F: Documentation/hwmon/pmbus-core
-F: Documentation/hwmon/tps40422
-F: Documentation/hwmon/ucd9000
-F: Documentation/hwmon/ucd9200
-F: Documentation/hwmon/zl6100
+F: Documentation/hwmon/adm1275.rst
+F: Documentation/hwmon/ibm-cffps.rst
+F: Documentation/hwmon/ir35221.rst
+F: Documentation/hwmon/lm25066.rst
+F: Documentation/hwmon/ltc2978.rst
+F: Documentation/hwmon/ltc3815.rst
+F: Documentation/hwmon/max16064.rst
+F: Documentation/hwmon/max20751.rst
+F: Documentation/hwmon/max31785.rst
+F: Documentation/hwmon/max34440.rst
+F: Documentation/hwmon/max8688.rst
+F: Documentation/hwmon/pmbus.rst
+F: Documentation/hwmon/pmbus-core.rst
+F: Documentation/hwmon/tps40422.rst
+F: Documentation/hwmon/ucd9000.rst
+F: Documentation/hwmon/ucd9200.rst
+F: Documentation/hwmon/zl6100.rst
F: drivers/hwmon/pmbus/
F: include/linux/pmbus.h
@@ -12384,7 +12426,7 @@ M: Mark Rutland <mark.rutland@arm.com>
M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
L: linux-arm-kernel@lists.infradead.org
S: Maintained
-F: drivers/firmware/psci*.c
+F: drivers/firmware/psci/
F: include/linux/psci.h
F: include/uapi/linux/psci.h
@@ -12584,6 +12626,7 @@ L: linux-media@vger.kernel.org
T: git git://linuxtv.org/media_tree.git
S: Odd Fixes
F: drivers/media/usb/pwc/*
+F: include/trace/events/pwc.h
PWM FAN DRIVER
M: Kamil Debski <kamil@wypas.org>
@@ -12591,7 +12634,7 @@ M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
L: linux-hwmon@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/hwmon/pwm-fan.txt
-F: Documentation/hwmon/pwm-fan
+F: Documentation/hwmon/pwm-fan.rst
F: drivers/hwmon/pwm-fan.c
PWM IR Transmitter
@@ -13009,9 +13052,9 @@ M: Josh Triplett <josh@joshtriplett.org>
R: Steven Rostedt <rostedt@goodmis.org>
R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
R: Lai Jiangshan <jiangshanlai@gmail.com>
-L: linux-kernel@vger.kernel.org
+L: rcu@vger.kernel.org
S: Supported
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
F: tools/testing/selftests/rcutorture
RDC R-321X SoC
@@ -13057,10 +13100,10 @@ R: Steven Rostedt <rostedt@goodmis.org>
R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
R: Lai Jiangshan <jiangshanlai@gmail.com>
R: Joel Fernandes <joel@joelfernandes.org>
-L: linux-kernel@vger.kernel.org
+L: rcu@vger.kernel.org
W: http://www.rdrop.com/users/paulmck/RCU/
S: Supported
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
F: Documentation/RCU/
X: Documentation/RCU/torture.txt
F: include/linux/rcu*
@@ -13740,6 +13783,7 @@ M: "James E.J. Bottomley" <jejb@linux.ibm.com>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git
M: "Martin K. Petersen" <martin.petersen@oracle.com>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git
+Q: https://patchwork.kernel.org/project/linux-scsi/list/
L: linux-scsi@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/scsi/
@@ -13754,6 +13798,18 @@ F: Documentation/scsi/st.txt
F: drivers/scsi/st.*
F: drivers/scsi/st_*.h
+SCSI TARGET SUBSYSTEM
+M: "Martin K. Petersen" <martin.petersen@oracle.com>
+L: linux-scsi@vger.kernel.org
+L: target-devel@vger.kernel.org
+W: http://www.linux-iscsi.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git
+Q: https://patchwork.kernel.org/project/target-devel/list/
+S: Supported
+F: drivers/target/
+F: include/target/
+F: Documentation/target/
+
SCTP PROTOCOL
M: Vlad Yasevich <vyasevich@gmail.com>
M: Neil Horman <nhorman@tuxdriver.com>
@@ -13937,7 +13993,7 @@ F: drivers/media/rc/serial_ir.c
SFC NETWORK DRIVER
M: Solarflare linux maintainers <linux-net-drivers@solarflare.com>
M: Edward Cree <ecree@solarflare.com>
-M: Bert Kenward <bkenward@solarflare.com>
+M: Martin Habets <mhabets@solarflare.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/sfc/
@@ -14199,10 +14255,10 @@ M: "Paul E. McKenney" <paulmck@linux.ibm.com>
M: Josh Triplett <josh@joshtriplett.org>
R: Steven Rostedt <rostedt@goodmis.org>
R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-L: linux-kernel@vger.kernel.org
+L: rcu@vger.kernel.org
W: http://www.rdrop.com/users/paulmck/RCU/
S: Supported
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
F: include/linux/srcu*.h
F: kernel/rcu/srcu*.c
@@ -14243,21 +14299,21 @@ SMM665 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/smm665
+F: Documentation/hwmon/smm665.rst
F: drivers/hwmon/smm665.c
SMSC EMC2103 HARDWARE MONITOR DRIVER
M: Steve Glendinning <steve.glendinning@shawell.net>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/emc2103
+F: Documentation/hwmon/emc2103.rst
F: drivers/hwmon/emc2103.c
SMSC SCH5627 HARDWARE MONITOR DRIVER
M: Hans de Goede <hdegoede@redhat.com>
L: linux-hwmon@vger.kernel.org
S: Supported
-F: Documentation/hwmon/sch5627
+F: Documentation/hwmon/sch5627.rst
F: drivers/hwmon/sch5627.c
SMSC UFX6000 and UFX7000 USB to VGA DRIVER
@@ -14270,7 +14326,7 @@ SMSC47B397 HARDWARE MONITOR DRIVER
M: Jean Delvare <jdelvare@suse.com>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/smsc47b397
+F: Documentation/hwmon/smsc47b397.rst
F: drivers/hwmon/smsc47b397.c
SMSC911x ETHERNET DRIVER
@@ -15025,18 +15081,6 @@ F: Documentation/filesystems/sysv-fs.txt
F: fs/sysv/
F: include/linux/sysv_fs.h
-TARGET SUBSYSTEM
-M: "Nicholas A. Bellinger" <nab@linux-iscsi.org>
-L: linux-scsi@vger.kernel.org
-L: target-devel@vger.kernel.org
-W: http://www.linux-iscsi.org
-W: http://groups.google.com/group/linux-iscsi-target-dev
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
-S: Supported
-F: drivers/target/
-F: include/target/
-F: Documentation/target/
-
TASKSTATS STATISTICS INTERFACE
M: Balbir Singh <bsingharora@gmail.com>
S: Maintained
@@ -15628,7 +15672,7 @@ TMP401 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/tmp401
+F: Documentation/hwmon/tmp401.rst
F: drivers/hwmon/tmp401.c
TMPFS (SHMEM FILESYSTEM)
@@ -15661,7 +15705,7 @@ M: "Paul E. McKenney" <paulmck@linux.ibm.com>
M: Josh Triplett <josh@joshtriplett.org>
L: linux-kernel@vger.kernel.org
S: Supported
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
F: Documentation/RCU/torture.txt
F: kernel/torture.c
F: kernel/rcu/rcutorture.c
@@ -15934,14 +15978,16 @@ F: drivers/visorbus/
F: drivers/staging/unisys/
UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER
-M: Vinayak Holikatti <vinholikatti@gmail.com>
+R: Alim Akhtar <alim.akhtar@samsung.com>
+R: Avri Altman <avri.altman@wdc.com>
+R: Pedro Sousa <pedrom.sousa@synopsys.com>
L: linux-scsi@vger.kernel.org
S: Supported
F: Documentation/scsi/ufs.txt
F: drivers/scsi/ufs/
UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER DWC HOOKS
-M: Joao Pinto <jpinto@synopsys.com>
+M: Pedro Sousa <pedrom.sousa@synopsys.com>
L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/ufs/*dwc*
@@ -16474,7 +16520,7 @@ F: drivers/char/virtio_console.c
F: include/linux/virtio_console.h
F: include/uapi/linux/virtio_console.h
-VIRTIO CORE, NET AND BLOCK DRIVERS
+VIRTIO CORE AND NET DRIVERS
M: "Michael S. Tsirkin" <mst@redhat.com>
M: Jason Wang <jasowang@redhat.com>
L: virtualization@lists.linux-foundation.org
@@ -16489,6 +16535,19 @@ F: include/uapi/linux/virtio_*.h
F: drivers/crypto/virtio/
F: mm/balloon_compaction.c
+VIRTIO BLOCK AND SCSI DRIVERS
+M: "Michael S. Tsirkin" <mst@redhat.com>
+M: Jason Wang <jasowang@redhat.com>
+R: Paolo Bonzini <pbonzini@redhat.com>
+R: Stefan Hajnoczi <stefanha@redhat.com>
+L: virtualization@lists.linux-foundation.org
+S: Maintained
+F: drivers/block/virtio_blk.c
+F: drivers/scsi/virtio_scsi.c
+F: include/uapi/linux/virtio_blk.h
+F: include/uapi/linux/virtio_scsi.h
+F: drivers/vhost/scsi.c
+
VIRTIO CRYPTO DRIVER
M: Gonglei <arei.gonglei@huawei.com>
L: virtualization@lists.linux-foundation.org
@@ -16651,7 +16710,7 @@ VT1211 HARDWARE MONITOR DRIVER
M: Juerg Haefliger <juergh@gmail.com>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/vt1211
+F: Documentation/hwmon/vt1211.rst
F: drivers/hwmon/vt1211.c
VT8231 HARDWARE MONITOR DRIVER
@@ -16679,14 +16738,14 @@ W83791D HARDWARE MONITORING DRIVER
M: Marc Hulsman <m.hulsman@tudelft.nl>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/w83791d
+F: Documentation/hwmon/w83791d.rst
F: drivers/hwmon/w83791d.c
W83793 HARDWARE MONITORING DRIVER
M: Rudolf Marek <r.marek@assembler.cz>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/w83793
+F: Documentation/hwmon/w83793.rst
F: drivers/hwmon/w83793.c
W83795 HARDWARE MONITORING DRIVER
@@ -16763,6 +16822,11 @@ M: David Härdeman <david@hardeman.nu>
S: Maintained
F: drivers/media/rc/winbond-cir.c
+RCMM REMOTE CONTROLS DECODER
+M: Patrick Lerda <patrick9876@free.fr>
+S: Maintained
+F: drivers/media/rc/ir-rcmm-decoder.c
+
WINSYSTEMS EBC-C384 WATCHDOG DRIVER
M: William Breathitt Gray <vilhelm.gray@gmail.com>
L: linux-watchdog@vger.kernel.org
@@ -16790,7 +16854,7 @@ L: patches@opensource.cirrus.com
T: git https://github.com/CirrusLogic/linux-drivers.git
W: https://github.com/CirrusLogic/linux-drivers/wiki
S: Supported
-F: Documentation/hwmon/wm83??
+F: Documentation/hwmon/wm83??.rst
F: Documentation/devicetree/bindings/extcon/extcon-arizona.txt
F: Documentation/devicetree/bindings/regulator/arizona-regulator.txt
F: Documentation/devicetree/bindings/mfd/arizona.txt
@@ -16880,7 +16944,7 @@ M: Tony Luck <tony.luck@intel.com>
M: Borislav Petkov <bp@alien8.de>
L: linux-edac@vger.kernel.org
S: Maintained
-F: arch/x86/kernel/cpu/mcheck/*
+F: arch/x86/kernel/cpu/mce/*
X86 MICROCODE UPDATE SUPPORT
M: Borislav Petkov <bp@alien8.de>
diff --git a/Makefile b/Makefile
index f070e0d65186..f4b0ae1b5c6a 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
-PATCHLEVEL = 0
+PATCHLEVEL = 1
SUBLEVEL = 0
EXTRAVERSION =
NAME = Shy Crocodile
@@ -15,19 +15,6 @@ NAME = Shy Crocodile
PHONY := _all
_all:
-# Do not use make's built-in rules and variables
-# (this increases performance and avoids hard-to-debug behaviour)
-MAKEFLAGS += -rR
-
-# Avoid funny character set dependencies
-unexport LC_ALL
-LC_COLLATE=C
-LC_NUMERIC=C
-export LC_COLLATE LC_NUMERIC
-
-# Avoid interference with shell env settings
-unexport GREP_OPTIONS
-
# We are using a recursive build, so we need to do a little thinking
# to get the ordering right.
#
@@ -44,6 +31,21 @@ unexport GREP_OPTIONS
# descending is started. They are now explicitly listed as the
# prepare rule.
+ifneq ($(sub_make_done),1)
+
+# Do not use make's built-in rules and variables
+# (this increases performance and avoids hard-to-debug behaviour)
+MAKEFLAGS += -rR
+
+# Avoid funny character set dependencies
+unexport LC_ALL
+LC_COLLATE=C
+LC_NUMERIC=C
+export LC_COLLATE LC_NUMERIC
+
+# Avoid interference with shell env settings
+unexport GREP_OPTIONS
+
# Beautify output
# ---------------------------------------------------------------------------
#
@@ -90,7 +92,6 @@ endif
ifneq ($(findstring s,$(filter-out --%,$(MAKEFLAGS))),)
quiet=silent_
- tools_silent=s
endif
export quiet Q KBUILD_VERBOSE
@@ -112,7 +113,6 @@ export quiet Q KBUILD_VERBOSE
# KBUILD_SRC is not intended to be used by the regular user (for now),
# it is set on invocation of make with KBUILD_OUTPUT or O= specified.
-ifeq ($(KBUILD_SRC),)
# OK, Make called in directory where kernel src resides
# Do we want to locate output files in a separate directory?
@@ -120,9 +120,6 @@ ifeq ("$(origin O)", "command line")
KBUILD_OUTPUT := $(O)
endif
-# Cancel implicit rules on top Makefile
-$(CURDIR)/Makefile Makefile: ;
-
ifneq ($(words $(subst :, ,$(CURDIR))), 1)
$(error main directory cannot contain spaces nor colons)
endif
@@ -142,6 +139,26 @@ $(if $(KBUILD_OUTPUT),, \
# 'sub-make' below.
MAKEFLAGS += --include-dir=$(CURDIR)
+need-sub-make := 1
+else
+
+# Do not print "Entering directory ..." at all for in-tree build.
+MAKEFLAGS += --no-print-directory
+
+endif # ifneq ($(KBUILD_OUTPUT),)
+
+ifneq ($(filter 3.%,$(MAKE_VERSION)),)
+# 'MAKEFLAGS += -rR' does not immediately become effective for GNU Make 3.x
+# We need to invoke sub-make to avoid implicit rules in the top Makefile.
+need-sub-make := 1
+# Cancel implicit rules for this Makefile.
+$(lastword $(MAKEFILE_LIST)): ;
+endif
+
+export sub_make_done := 1
+
+ifeq ($(need-sub-make),1)
+
PHONY += $(MAKECMDGOALS) sub-make
$(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
@@ -149,16 +166,15 @@ $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
# Invoke a second make in the output directory, passing relevant variables
sub-make:
- $(Q)$(MAKE) -C $(KBUILD_OUTPUT) KBUILD_SRC=$(CURDIR) \
+ $(Q)$(MAKE) \
+ $(if $(KBUILD_OUTPUT),-C $(KBUILD_OUTPUT) KBUILD_SRC=$(CURDIR)) \
-f $(CURDIR)/Makefile $(filter-out _all sub-make,$(MAKECMDGOALS))
-# Leave processing to above invocation of make
-skip-makefile := 1
-endif # ifneq ($(KBUILD_OUTPUT),)
-endif # ifeq ($(KBUILD_SRC),)
+endif # need-sub-make
+endif # sub_make_done
# We process the rest of the Makefile if this is the final invocation of make
-ifeq ($(skip-makefile),)
+ifeq ($(need-sub-make),)
# Do not print "Entering directory ...",
# but we want to display it when entering to the output directory
@@ -215,7 +231,7 @@ objtree := .
src := $(srctree)
obj := $(objtree)
-VPATH := $(srctree)$(if $(KBUILD_EXTMOD),:$(KBUILD_EXTMOD))
+VPATH := $(srctree)
export srctree objtree VPATH
@@ -300,8 +316,6 @@ __build_one_by_one:
else
-# We need some generic definitions (do not try to remake the file).
-scripts/Kbuild.include: ;
include scripts/Kbuild.include
# Read KERNELRELEASE from include/config/kernel.release (if it exists)
@@ -390,7 +404,6 @@ OBJDUMP = $(CROSS_COMPILE)objdump
LEX = flex
YACC = bison
AWK = awk
-GENKSYMS = scripts/genksyms/genksyms
INSTALLKERNEL := installkernel
DEPMOD = /sbin/depmod
PERL = perl
@@ -401,7 +414,7 @@ CHECK = sparse
CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \
-Wbitwise -Wno-return-void -Wno-unknown-attribute $(CF)
-NOSTDINC_FLAGS =
+NOSTDINC_FLAGS :=
CFLAGS_MODULE =
AFLAGS_MODULE =
LDFLAGS_MODULE =
@@ -429,7 +442,7 @@ LINUXINCLUDE := \
KBUILD_AFLAGS := -D__ASSEMBLY__ -fno-PIE
KBUILD_CFLAGS := -Wall -Wundef -Werror=strict-prototypes -Wno-trigraphs \
-fno-strict-aliasing -fno-common -fshort-wchar -fno-PIE \
- -Werror-implicit-function-declaration -Werror=implicit-int \
+ -Werror=implicit-function-declaration -Werror=implicit-int \
-Wno-format-security \
-std=gnu89
KBUILD_CPPFLAGS := -D__KERNEL__
@@ -443,7 +456,7 @@ GCC_PLUGINS_CFLAGS :=
export ARCH SRCARCH CONFIG_SHELL HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC
export CPP AR NM STRIP OBJCOPY OBJDUMP KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS
-export MAKE LEX YACC AWK GENKSYMS INSTALLKERNEL PERL PYTHON PYTHON2 PYTHON3 UTS_MACHINE
+export MAKE LEX YACC AWK INSTALLKERNEL PERL PYTHON PYTHON2 PYTHON3 UTS_MACHINE
export HOSTCXX KBUILD_HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS
@@ -476,23 +489,24 @@ scripts_basic:
$(Q)$(MAKE) $(build)=scripts/basic
$(Q)rm -f .tmp_quiet_recordmcount
-# To avoid any implicit rule to kick in, define an empty command.
-scripts/basic/%: scripts_basic ;
-
PHONY += outputmakefile
# outputmakefile generates a Makefile in the output directory, if using a
# separate output directory. This allows convenient use of make in the
# output directory.
+# At the same time when output Makefile generated, generate .gitignore to
+# ignore whole output directory
outputmakefile:
ifneq ($(KBUILD_SRC),)
$(Q)ln -fsn $(srctree) source
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkmakefile $(srctree)
+ $(Q)test -e .gitignore || \
+ { echo "# this is build directory, ignore it"; echo "*"; } > .gitignore
endif
ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
ifneq ($(CROSS_COMPILE),)
CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE:%-=%))
-GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD)))
+GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit))
CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)
GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
endif
@@ -624,13 +638,22 @@ ifeq ($(may-sync-config),1)
# because some architectures define CROSS_COMPILE there.
-include include/config/auto.conf.cmd
-# To avoid any implicit rule to kick in, define an empty command
-$(KCONFIG_CONFIG) include/config/auto.conf.cmd: ;
+$(KCONFIG_CONFIG):
+ @echo >&2 '***'
+ @echo >&2 '*** Configuration file "$@" not found!'
+ @echo >&2 '***'
+ @echo >&2 '*** Please run some configurator (e.g. "make oldconfig" or'
+ @echo >&2 '*** "make menuconfig" or "make xconfig").'
+ @echo >&2 '***'
+ @/bin/false
# The actual configuration files used during the build are stored in
# include/generated/ and include/config/. Update them if .config is newer than
# include/config/auto.conf (which mirrors .config).
-include/config/%.conf: $(KCONFIG_CONFIG) include/config/auto.conf.cmd
+#
+# This exploits the 'multi-target pattern rule' trick.
+# The syncconfig should be executed only once to make all the targets.
+%/auto.conf %/auto.conf.cmd %/tristate.conf: $(KCONFIG_CONFIG)
$(Q)$(MAKE) -f $(srctree)/Makefile syncconfig
else
# External modules and some install targets need include/generated/autoconf.h
@@ -655,20 +678,17 @@ KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation)
KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context)
+KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-KBUILD_CFLAGS += $(call cc-option,-Oz,-Os)
-KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
-else
-ifdef CONFIG_PROFILE_ALL_BRANCHES
-KBUILD_CFLAGS += -O2 $(call cc-disable-warning,maybe-uninitialized,)
+KBUILD_CFLAGS += -Os
else
KBUILD_CFLAGS += -O2
endif
-endif
-KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0409, \
- $(call cc-disable-warning,maybe-uninitialized,))
+ifdef CONFIG_CC_DISABLE_WARN_MAYBE_UNINITIALIZED
+KBUILD_CFLAGS += -Wno-maybe-uninitialized
+endif
# Tell gcc to never replace conditional load with a non-conditional one
KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
@@ -700,7 +720,6 @@ ifdef CONFIG_CC_IS_CLANG
KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
-KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
# Quiet clang warning: comparison of unsigned expression < 0 is always false
KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
@@ -729,25 +748,28 @@ KBUILD_CFLAGS += -fomit-frame-pointer
endif
endif
-KBUILD_CFLAGS += $(call cc-option, -fno-var-tracking-assignments)
+DEBUG_CFLAGS := $(call cc-option, -fno-var-tracking-assignments)
ifdef CONFIG_DEBUG_INFO
ifdef CONFIG_DEBUG_INFO_SPLIT
-KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
+DEBUG_CFLAGS += -gsplit-dwarf
else
-KBUILD_CFLAGS += -g
+DEBUG_CFLAGS += -g
endif
KBUILD_AFLAGS += -Wa,-gdwarf-2
endif
ifdef CONFIG_DEBUG_INFO_DWARF4
-KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
+DEBUG_CFLAGS += -gdwarf-4
endif
ifdef CONFIG_DEBUG_INFO_REDUCED
-KBUILD_CFLAGS += $(call cc-option, -femit-struct-debug-baseonly) \
+DEBUG_CFLAGS += $(call cc-option, -femit-struct-debug-baseonly) \
$(call cc-option,-fno-var-tracking)
endif
+KBUILD_CFLAGS += $(DEBUG_CFLAGS)
+export DEBUG_CFLAGS
+
ifdef CONFIG_FUNCTION_TRACER
ifdef CONFIG_FTRACE_MCOUNT_RECORD
# gcc 5 supports generating the mcount tables directly
@@ -789,6 +811,10 @@ KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections
LDFLAGS_vmlinux += --gc-sections
endif
+ifdef CONFIG_LIVEPATCH
+KBUILD_CFLAGS += $(call cc-option, -flive-patching=inline-clone)
+endif
+
# arch Makefile may override CC so keep this after arch Makefile is included
NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include)
@@ -922,19 +948,6 @@ ifdef CONFIG_MODULE_COMPRESS
endif # CONFIG_MODULE_COMPRESS
export mod_compress_cmd
-# Select initial ramdisk compression format, default is gzip(1).
-# This shall be used by the dracut(8) tool while creating an initramfs image.
-#
-INITRD_COMPRESS-y := gzip
-INITRD_COMPRESS-$(CONFIG_RD_BZIP2) := bzip2
-INITRD_COMPRESS-$(CONFIG_RD_LZMA) := lzma
-INITRD_COMPRESS-$(CONFIG_RD_XZ) := xz
-INITRD_COMPRESS-$(CONFIG_RD_LZO) := lzo
-INITRD_COMPRESS-$(CONFIG_RD_LZ4) := lz4
-# do not export INITRD_COMPRESS, since we didn't actually
-# choose a sane default compression above.
-# export INITRD_COMPRESS := $(INITRD_COMPRESS-y)
-
ifdef CONFIG_MODULE_SIG_ALL
$(eval $(call config_filename,MODULE_SIG_KEY))
@@ -944,9 +957,11 @@ mod_sign_cmd = true
endif
export mod_sign_cmd
+HOST_LIBELF_LIBS = $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf)
+
ifdef CONFIG_STACK_VALIDATION
has_libelf := $(call try-run,\
- echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf -,1,0)
+ echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0)
ifeq ($(has_libelf),1)
objtool_target := tools/objtool FORCE
else
@@ -976,15 +991,15 @@ libs-y2 := $(patsubst %/, %/built-in.a, $(filter-out %.a, $(libs-y)))
virt-y := $(patsubst %/, %/built-in.a, $(virt-y))
# Externally visible symbols (used by link-vmlinux.sh)
-export KBUILD_VMLINUX_INIT := $(head-y) $(init-y)
-export KBUILD_VMLINUX_MAIN := $(core-y) $(libs-y2) $(drivers-y) $(net-y) $(virt-y)
+export KBUILD_VMLINUX_OBJS := $(head-y) $(init-y) $(core-y) $(libs-y2) \
+ $(drivers-y) $(net-y) $(virt-y)
export KBUILD_VMLINUX_LIBS := $(libs-y1)
export KBUILD_LDS := arch/$(SRCARCH)/kernel/vmlinux.lds
export LDFLAGS_vmlinux
# used by scripts/package/Makefile
export KBUILD_ALLDIRS := $(sort $(filter-out arch/%,$(vmlinux-alldirs)) arch Documentation include samples scripts tools)
-vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_INIT) $(KBUILD_VMLINUX_MAIN) $(KBUILD_VMLINUX_LIBS)
+vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_OBJS) $(KBUILD_VMLINUX_LIBS)
# Recurse until adjust_autoksyms.sh is satisfied
PHONY += autoksyms_recursive
@@ -1015,9 +1030,6 @@ cmd_link-vmlinux = \
$(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true)
vmlinux: scripts/link-vmlinux.sh autoksyms_recursive $(vmlinux-deps) FORCE
-ifdef CONFIG_GDB_SCRIPTS
- $(Q)ln -fsn $(abspath $(srctree)/scripts/gdb/vmlinux-gdb.py)
-endif
+$(call if_changed,link-vmlinux)
targets := vmlinux
@@ -1062,7 +1074,7 @@ scripts: scripts_basic scripts_dtc
# archprepare is used in arch Makefiles and when processed asm symlink,
# version.h and scripts_basic is processed / created.
-PHONY += prepare archprepare prepare1 prepare2 prepare3
+PHONY += prepare archprepare prepare1 prepare3
# prepare3 is used to check if we are building in a separate output directory,
# and if so do:
@@ -1077,12 +1089,8 @@ ifneq ($(KBUILD_SRC),)
fi;
endif
-# prepare2 creates a makefile if using a separate output directory.
-# From this point forward, .config has been reprocessed, so any rules
-# that need to depend on updated CONFIG_* values can be checked here.
-prepare2: prepare3 outputmakefile asm-generic
-
-prepare1: prepare2 $(version_h) $(autoksyms_h) include/generated/utsrelease.h
+prepare1: prepare3 outputmakefile asm-generic $(version_h) $(autoksyms_h) \
+ include/generated/utsrelease.h
$(cmd_crmodverdir)
archprepare: archheaders archscripts prepare1 scripts
@@ -1099,9 +1107,11 @@ asm-generic := -f $(srctree)/scripts/Makefile.asm-generic obj
PHONY += asm-generic uapi-asm-generic
asm-generic: uapi-asm-generic
- $(Q)$(MAKE) $(asm-generic)=arch/$(SRCARCH)/include/generated/asm
+ $(Q)$(MAKE) $(asm-generic)=arch/$(SRCARCH)/include/generated/asm \
+ generic=include/asm-generic
uapi-asm-generic:
- $(Q)$(MAKE) $(asm-generic)=arch/$(SRCARCH)/include/generated/uapi/asm
+ $(Q)$(MAKE) $(asm-generic)=arch/$(SRCARCH)/include/generated/uapi/asm \
+ generic=include/uapi/asm-generic
PHONY += prepare-objtool
prepare-objtool: $(objtool_target)
@@ -1517,6 +1527,18 @@ PHONY += $(DOC_TARGETS)
$(DOC_TARGETS): scripts_basic FORCE
$(Q)$(MAKE) $(build)=Documentation $@
+# Misc
+# ---------------------------------------------------------------------------
+
+PHONY += scripts_gdb
+scripts_gdb: prepare
+ $(Q)$(MAKE) $(build)=scripts/gdb
+ $(Q)ln -fsn $(abspath $(srctree)/scripts/gdb/vmlinux-gdb.py)
+
+ifdef CONFIG_GDB_SCRIPTS
+all: scripts_gdb
+endif
+
else # KBUILD_EXTMOD
###
@@ -1668,6 +1690,11 @@ image_name:
@echo $(KBUILD_IMAGE)
# Clear a bunch of variables before executing the submake
+
+ifeq ($(quiet),silent_)
+tools_silent=s
+endif
+
tools/: FORCE
$(Q)mkdir -p $(objtree)/tools
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(src)/tools/
@@ -1686,45 +1713,32 @@ tools/%: FORCE
# target-dir => where to store outputfile
# build-dir => directory in kernel source tree to use
-ifeq ($(KBUILD_EXTMOD),)
- build-dir = $(patsubst %/,%,$(dir $@))
- target-dir = $(dir $@)
-else
- zap-slash=$(filter-out .,$(patsubst %/,%,$(dir $@)))
- build-dir = $(KBUILD_EXTMOD)$(if $(zap-slash),/$(zap-slash))
- target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
-endif
-
-%.s: %.c prepare FORCE
- $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
-%.i: %.c prepare FORCE
- $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
-%.o: %.c prepare FORCE
- $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
-%.lst: %.c prepare FORCE
- $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
-%.s: %.S prepare FORCE
- $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
-%.o: %.S prepare FORCE
- $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
-%.symtypes: %.c prepare FORCE
- $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
-%.ll: %.c prepare FORCE
- $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+build-target = $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD)/)$@
+build-dir = $(patsubst %/,%,$(dir $(build-target)))
+
+%.i: prepare FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(build-target)
+%.ll: prepare FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(build-target)
+%.lst: prepare FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(build-target)
+%.o: prepare FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(build-target)
+%.s: prepare FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(build-target)
+%.symtypes: prepare FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(build-target)
+%.ko: %.o
+ $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
# Modules
-/: prepare FORCE
- $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
- $(build)=$(build-dir)
+PHONY += /
+/: ./
+
# Make sure the latest headers are built for Documentation
Documentation/ samples/: headers_install
%/: prepare FORCE
- $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
- $(build)=$(build-dir)
-%.ko: prepare FORCE
- $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
- $(build)=$(build-dir) $(@:.ko=.o)
- $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
+ $(Q)$(MAKE) KBUILD_MODULES=1 $(build)=$(build-dir)
# FIXME Should go into a make.lib or something
# ===========================================================================
@@ -1748,13 +1762,11 @@ cmd_crmodverdir = $(Q)mkdir -p $(MODVERDIR) \
# read saved command lines for existing targets
existing-targets := $(wildcard $(sort $(targets)))
-cmd_files := $(foreach f,$(existing-targets),$(dir $(f)).$(notdir $(f)).cmd)
-$(cmd_files): ; # Do not try to update included dependency files
--include $(cmd_files)
+-include $(foreach f,$(existing-targets),$(dir $(f)).$(notdir $(f)).cmd)
endif # ifeq ($(config-targets),1)
endif # ifeq ($(mixed-targets),1)
-endif # skip-makefile
+endif # need-sub-make
PHONY += FORCE
FORCE:
diff --git a/arch/Kconfig b/arch/Kconfig
index 33687dddd86a..5e43fcbad4ca 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -249,6 +249,10 @@ config ARCH_HAS_FORTIFY_SOURCE
config ARCH_HAS_SET_MEMORY
bool
+# Select if arch has all set_direct_map_invalid/default() functions
+config ARCH_HAS_SET_DIRECT_MAP
+ bool
+
# Select if arch init_task must go in the __init_task_data section
config ARCH_TASK_STRUCT_ON_STACK
bool
@@ -383,7 +387,13 @@ config HAVE_ARCH_JUMP_LABEL_RELATIVE
config HAVE_RCU_TABLE_FREE
bool
-config HAVE_RCU_TABLE_INVALIDATE
+config HAVE_RCU_TABLE_NO_INVALIDATE
+ bool
+
+config HAVE_MMU_GATHER_PAGE_SIZE
+ bool
+
+config HAVE_MMU_GATHER_NO_GATHER
bool
config ARCH_HAVE_NMI_SAFE_CMPXCHG
@@ -901,6 +911,15 @@ config HAVE_ARCH_PREL32_RELOCATIONS
config ARCH_USE_MEMREMAP_PROT
bool
+config LOCK_EVENT_COUNTS
+ bool "Locking event counts collection"
+ depends on DEBUG_FS
+ ---help---
+ Enable light-weight counting of various locking related events
+ in the system with minimal performance impact. This reduces
+ the chance of application behavior change because of timing
+ differences. The counts are reported via debugfs.
+
source "kernel/gcov/Kconfig"
source "scripts/gcc-plugins/Kconfig"
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 584a6e114853..f7b19b813a70 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -36,6 +36,7 @@ config ALPHA
select ODD_RT_SIGACTION
select OLD_SIGSUSPEND
select CPU_NO_EFFICIENT_FFS if !ALPHA_EV67
+ select MMU_GATHER_NO_RANGE
help
The Alpha is a 64-bit general-purpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory,
@@ -49,13 +50,6 @@ config MMU
bool
default y
-config RWSEM_GENERIC_SPINLOCK
- bool
-
-config RWSEM_XCHGADD_ALGORITHM
- bool
- default y
-
config ARCH_HAS_ILOG2_U32
bool
default n
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild
index dc0ab28baca1..89e87bbc987f 100644
--- a/arch/alpha/include/asm/Kbuild
+++ b/arch/alpha/include/asm/Kbuild
@@ -6,8 +6,10 @@ generic-y += exec.h
generic-y += export.h
generic-y += fb.h
generic-y += irq_work.h
+generic-y += kvm_para.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += preempt.h
generic-y += sections.h
generic-y += trace_clock.h
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
index 4c533fc94d62..ccf9d65166bb 100644
--- a/arch/alpha/include/asm/io.h
+++ b/arch/alpha/include/asm/io.h
@@ -513,8 +513,6 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
#define writel_relaxed(b, addr) __raw_writel(b, addr)
#define writeq_relaxed(b, addr) __raw_writeq(b, addr)
-#define mmiowb()
-
/*
* String version of IO memory access ops:
*/
diff --git a/arch/alpha/include/asm/rwsem.h b/arch/alpha/include/asm/rwsem.h
deleted file mode 100644
index cf8fc8f9a2ed..000000000000
--- a/arch/alpha/include/asm/rwsem.h
+++ /dev/null
@@ -1,211 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ALPHA_RWSEM_H
-#define _ALPHA_RWSEM_H
-
-/*
- * Written by Ivan Kokshaysky <ink@jurassic.park.msu.ru>, 2001.
- * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
- */
-
-#ifndef _LINUX_RWSEM_H
-#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
-#endif
-
-#ifdef __KERNEL__
-
-#include <linux/compiler.h>
-
-#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
-#define RWSEM_ACTIVE_BIAS 0x0000000000000001L
-#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
-#define RWSEM_WAITING_BIAS (-0x0000000100000000L)
-#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-
-static inline int ___down_read(struct rw_semaphore *sem)
-{
- long oldcount;
-#ifndef CONFIG_SMP
- oldcount = sem->count.counter;
- sem->count.counter += RWSEM_ACTIVE_READ_BIAS;
-#else
- long temp;
- __asm__ __volatile__(
- "1: ldq_l %0,%1\n"
- " addq %0,%3,%2\n"
- " stq_c %2,%1\n"
- " beq %2,2f\n"
- " mb\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
- :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
-#endif
- return (oldcount < 0);
-}
-
-static inline void __down_read(struct rw_semaphore *sem)
-{
- if (unlikely(___down_read(sem)))
- rwsem_down_read_failed(sem);
-}
-
-static inline int __down_read_killable(struct rw_semaphore *sem)
-{
- if (unlikely(___down_read(sem)))
- if (IS_ERR(rwsem_down_read_failed_killable(sem)))
- return -EINTR;
-
- return 0;
-}
-
-/*
- * trylock for reading -- returns 1 if successful, 0 if contention
- */
-static inline int __down_read_trylock(struct rw_semaphore *sem)
-{
- long old, new, res;
-
- res = atomic_long_read(&sem->count);
- do {
- new = res + RWSEM_ACTIVE_READ_BIAS;
- if (new <= 0)
- break;
- old = res;
- res = atomic_long_cmpxchg(&sem->count, old, new);
- } while (res != old);
- return res >= 0 ? 1 : 0;
-}
-
-static inline long ___down_write(struct rw_semaphore *sem)
-{
- long oldcount;
-#ifndef CONFIG_SMP
- oldcount = sem->count.counter;
- sem->count.counter += RWSEM_ACTIVE_WRITE_BIAS;
-#else
- long temp;
- __asm__ __volatile__(
- "1: ldq_l %0,%1\n"
- " addq %0,%3,%2\n"
- " stq_c %2,%1\n"
- " beq %2,2f\n"
- " mb\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
- :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
-#endif
- return oldcount;
-}
-
-static inline void __down_write(struct rw_semaphore *sem)
-{
- if (unlikely(___down_write(sem)))
- rwsem_down_write_failed(sem);
-}
-
-static inline int __down_write_killable(struct rw_semaphore *sem)
-{
- if (unlikely(___down_write(sem))) {
- if (IS_ERR(rwsem_down_write_failed_killable(sem)))
- return -EINTR;
- }
-
- return 0;
-}
-
-/*
- * trylock for writing -- returns 1 if successful, 0 if contention
- */
-static inline int __down_write_trylock(struct rw_semaphore *sem)
-{
- long ret = atomic_long_cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
- RWSEM_ACTIVE_WRITE_BIAS);
- if (ret == RWSEM_UNLOCKED_VALUE)
- return 1;
- return 0;
-}
-
-static inline void __up_read(struct rw_semaphore *sem)
-{
- long oldcount;
-#ifndef CONFIG_SMP
- oldcount = sem->count.counter;
- sem->count.counter -= RWSEM_ACTIVE_READ_BIAS;
-#else
- long temp;
- __asm__ __volatile__(
- " mb\n"
- "1: ldq_l %0,%1\n"
- " subq %0,%3,%2\n"
- " stq_c %2,%1\n"
- " beq %2,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
- :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
-#endif
- if (unlikely(oldcount < 0))
- if ((int)oldcount - RWSEM_ACTIVE_READ_BIAS == 0)
- rwsem_wake(sem);
-}
-
-static inline void __up_write(struct rw_semaphore *sem)
-{
- long count;
-#ifndef CONFIG_SMP
- sem->count.counter -= RWSEM_ACTIVE_WRITE_BIAS;
- count = sem->count.counter;
-#else
- long temp;
- __asm__ __volatile__(
- " mb\n"
- "1: ldq_l %0,%1\n"
- " subq %0,%3,%2\n"
- " stq_c %2,%1\n"
- " beq %2,2f\n"
- " subq %0,%3,%0\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- :"=&r" (count), "=m" (sem->count), "=&r" (temp)
- :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
-#endif
- if (unlikely(count))
- if ((int)count == 0)
- rwsem_wake(sem);
-}
-
-/*
- * downgrade write lock to read lock
- */
-static inline void __downgrade_write(struct rw_semaphore *sem)
-{
- long oldcount;
-#ifndef CONFIG_SMP
- oldcount = sem->count.counter;
- sem->count.counter -= RWSEM_WAITING_BIAS;
-#else
- long temp;
- __asm__ __volatile__(
- "1: ldq_l %0,%1\n"
- " addq %0,%3,%2\n"
- " stq_c %2,%1\n"
- " beq %2,2f\n"
- " mb\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
- :"Ir" (-RWSEM_WAITING_BIAS), "m" (sem->count) : "memory");
-#endif
- if (unlikely(oldcount < 0))
- rwsem_downgrade_wake(sem);
-}
-
-#endif /* __KERNEL__ */
-#endif /* _ALPHA_RWSEM_H */
diff --git a/arch/alpha/include/asm/tlb.h b/arch/alpha/include/asm/tlb.h
index 8f5042b61875..4f79e331af5e 100644
--- a/arch/alpha/include/asm/tlb.h
+++ b/arch/alpha/include/asm/tlb.h
@@ -2,12 +2,6 @@
#ifndef _ALPHA_TLB_H
#define _ALPHA_TLB_H
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, pte, addr) do { } while (0)
-
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
#include <asm-generic/tlb.h>
#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)
diff --git a/arch/alpha/include/uapi/asm/Kbuild b/arch/alpha/include/uapi/asm/Kbuild
index 439f5157aa35..7417847dc438 100644
--- a/arch/alpha/include/uapi/asm/Kbuild
+++ b/arch/alpha/include/uapi/asm/Kbuild
@@ -1,3 +1 @@
-include include/uapi/asm-generic/Kbuild.asm
-
generated-y += unistd_32.h
diff --git a/arch/alpha/include/uapi/asm/kvm_para.h b/arch/alpha/include/uapi/asm/kvm_para.h
deleted file mode 100644
index baacc4996d18..000000000000
--- a/arch/alpha/include/uapi/asm/kvm_para.h
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#include <asm-generic/kvm_para.h>
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
index 0d0fddb7e738..976e89b116e5 100644
--- a/arch/alpha/include/uapi/asm/socket.h
+++ b/arch/alpha/include/uapi/asm/socket.h
@@ -2,8 +2,8 @@
#ifndef _UAPI_ASM_SOCKET_H
#define _UAPI_ASM_SOCKET_H
+#include <linux/posix_types.h>
#include <asm/sockios.h>
-#include <asm/bitsperlong.h>
/* For setsockopt(2) */
/*
diff --git a/arch/alpha/kernel/core_cia.c b/arch/alpha/kernel/core_cia.c
index 867e8730b0c5..f489170201c3 100644
--- a/arch/alpha/kernel/core_cia.c
+++ b/arch/alpha/kernel/core_cia.c
@@ -331,7 +331,10 @@ cia_prepare_tbia_workaround(int window)
long i;
/* Use minimal 1K map. */
- ppte = memblock_alloc_from(CIA_BROKEN_TBIA_SIZE, 32768, 0);
+ ppte = memblock_alloc(CIA_BROKEN_TBIA_SIZE, 32768);
+ if (!ppte)
+ panic("%s: Failed to allocate %u bytes align=0x%x\n",
+ __func__, CIA_BROKEN_TBIA_SIZE, 32768);
pte = (virt_to_phys(ppte) >> (PAGE_SHIFT - 1)) | 1;
for (i = 0; i < CIA_BROKEN_TBIA_SIZE / sizeof(unsigned long); ++i)
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c
index c1d0c18c71ca..1db9d0eb2922 100644
--- a/arch/alpha/kernel/core_marvel.c
+++ b/arch/alpha/kernel/core_marvel.c
@@ -83,6 +83,9 @@ mk_resource_name(int pe, int port, char *str)
sprintf(tmp, "PCI %s PE %d PORT %d", str, pe, port);
name = memblock_alloc(strlen(tmp) + 1, SMP_CACHE_BYTES);
+ if (!name)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ strlen(tmp) + 1);
strcpy(name, tmp);
return name;
@@ -118,6 +121,9 @@ alloc_io7(unsigned int pe)
}
io7 = memblock_alloc(sizeof(*io7), SMP_CACHE_BYTES);
+ if (!io7)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(*io7));
io7->pe = pe;
raw_spin_lock_init(&io7->irq_lock);
diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c
index 091cff3c68fd..ae82061edae9 100644
--- a/arch/alpha/kernel/pci-noop.c
+++ b/arch/alpha/kernel/pci-noop.c
@@ -34,6 +34,9 @@ alloc_pci_controller(void)
struct pci_controller *hose;
hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES);
+ if (!hose)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(*hose));
*hose_tail = hose;
hose_tail = &hose->next;
@@ -44,7 +47,13 @@ alloc_pci_controller(void)
struct resource * __init
alloc_resource(void)
{
- return memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
+ void *ptr = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
+
+ if (!ptr)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(struct resource));
+
+ return ptr;
}
SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, bus,
@@ -54,7 +63,7 @@ SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, bus,
/* from hose or from bus.devfn */
if (which & IOBASE_FROM_HOSE) {
- for (hose = hose_head; hose; hose = hose->next)
+ for (hose = hose_head; hose; hose = hose->next)
if (hose->index == bus)
break;
if (!hose)
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index 97098127df83..64fbfb0763b2 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -393,6 +393,9 @@ alloc_pci_controller(void)
struct pci_controller *hose;
hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES);
+ if (!hose)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(*hose));
*hose_tail = hose;
hose_tail = &hose->next;
@@ -403,7 +406,13 @@ alloc_pci_controller(void)
struct resource * __init
alloc_resource(void)
{
- return memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
+ void *ptr = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
+
+ if (!ptr)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(struct resource));
+
+ return ptr;
}
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index 4a2ae862b19e..242108439f42 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -80,6 +80,9 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
" falling back to system-wide allocation\n",
__func__, nid);
arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
+ if (!arena)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(*arena));
}
arena->ptes = memblock_alloc_node(sizeof(*arena), align, nid);
@@ -87,13 +90,22 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
printk("%s: couldn't allocate arena ptes from node %d\n"
" falling back to system-wide allocation\n",
__func__, nid);
- arena->ptes = memblock_alloc_from(mem_size, align, 0);
+ arena->ptes = memblock_alloc(mem_size, align);
+ if (!arena->ptes)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, mem_size, align);
}
#else /* CONFIG_DISCONTIGMEM */
arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
- arena->ptes = memblock_alloc_from(mem_size, align, 0);
+ if (!arena)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(*arena));
+ arena->ptes = memblock_alloc(mem_size, align);
+ if (!arena->ptes)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, mem_size, align);
#endif /* CONFIG_DISCONTIGMEM */
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index 4b5b1b244f86..5d4c76a77a9f 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -293,7 +293,7 @@ move_initrd(unsigned long mem_limit)
unsigned long size;
size = initrd_end - initrd_start;
- start = memblock_alloc_from(PAGE_ALIGN(size), PAGE_SIZE, 0);
+ start = memblock_alloc(PAGE_ALIGN(size), PAGE_SIZE);
if (!start || __pa(start) + size > mem_limit) {
initrd_start = initrd_end = 0;
return NULL;
diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl
index 63ed39cbd3bd..165f268beafc 100644
--- a/arch/alpha/kernel/syscalls/syscall.tbl
+++ b/arch/alpha/kernel/syscalls/syscall.tbl
@@ -463,3 +463,7 @@
532 common getppid sys_getppid
# all other architectures have common numbers for new syscall, alpha
# is the exception.
+534 common pidfd_send_signal sys_pidfd_send_signal
+535 common io_uring_setup sys_io_uring_setup
+536 common io_uring_enter sys_io_uring_enter
+537 common io_uring_register sys_io_uring_register
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 2061b652d9c3..23e063df5d2c 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -11,6 +11,7 @@ config ARC
select ARC_TIMERS
select ARCH_HAS_DMA_COHERENT_TO_PFN
select ARCH_HAS_PTE_SPECIAL
+ select ARCH_HAS_SETUP_DMA_OPS
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC
@@ -31,7 +32,6 @@ config ARC
select HAVE_ARCH_TRACEHOOK
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_FUTEX_CMPXCHG if FUTEX
- select HAVE_GENERIC_DMA_COHERENT
select HAVE_IOREMAP_PROT
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZMA
@@ -45,7 +45,6 @@ config ARC
select MODULES_USE_ELF_RELA
select OF
select OF_EARLY_FLATTREE
- select OF_RESERVED_MEM
select PCI_SYSCALL if PCI
select PERF_USE_VMALLOC if ARC_CACHE_VIPT_ALIASING
@@ -64,9 +63,6 @@ config SCHED_OMIT_FRAME_POINTER
config GENERIC_CSUM
def_bool y
-config RWSEM_GENERIC_SPINLOCK
- def_bool y
-
config ARCH_DISCONTIGMEM_ENABLE
def_bool n
@@ -145,11 +141,11 @@ config ARC_CPU_770
Support for ARC770 core introduced with Rel 4.10 (Summer 2011)
This core has a bunch of cool new features:
-MMU-v3: Variable Page Sz (4k, 8k, 16k), bigger J-TLB (128x4)
- Shared Address Spaces (for sharing TLB entries in MMU)
+ Shared Address Spaces (for sharing TLB entries in MMU)
-Caches: New Prog Model, Region Flush
-Insns: endian swap, load-locked/store-conditional, time-stamp-ctr
-endif #ISA_ARCOMPACT
+endif #ISA_ARCOMPACT
config ARC_CPU_HS
bool "ARC-HS"
@@ -199,7 +195,7 @@ config ARC_SMP_HALT_ON_RESET
at designated entry point. For other case, all jump to common
entry point and spin wait for Master's signal.
-endif #SMP
+endif #SMP
config ARC_MCIP
bool "ARConnect Multicore IP (MCIP) Support "
@@ -250,7 +246,7 @@ config ARC_CACHE_VIPT_ALIASING
bool "Support VIPT Aliasing D$"
depends on ARC_HAS_DCACHE && ISA_ARCOMPACT
-endif #ARC_CACHE
+endif #ARC_CACHE
config ARC_HAS_ICCM
bool "Use ICCM"
@@ -371,7 +367,7 @@ config ARC_FPU_SAVE_RESTORE
based on actual usage of FPU by a task. Thus our implemn does
this for all tasks in system.
-endif #ISA_ARCOMPACT
+endif #ISA_ARCOMPACT
config ARC_CANT_LLSC
def_bool n
@@ -387,6 +383,15 @@ config ARC_HAS_SWAPE
if ISA_ARCV2
+config ARC_USE_UNALIGNED_MEM_ACCESS
+ bool "Enable unaligned access in HW"
+ default y
+ select HAVE_EFFICIENT_UNALIGNED_ACCESS
+ help
+ The ARC HS architecture supports unaligned memory access
+ which is disabled by default. Enable unaligned access in
+ hardware and use software to use it
+
config ARC_HAS_LL64
bool "Insn: 64bit LDD/STD"
help
@@ -415,7 +420,7 @@ config ARC_IRQ_NO_AUTOSAVE
This is programmable and can be optionally disabled in which case
software INTERRUPT_PROLOGUE/EPILGUE do the needed work
-endif # ISA_ARCV2
+endif # ISA_ARCV2
endmenu # "ARC CPU Configuration"
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index df00578c279d..e2b991f75bc5 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -28,6 +28,12 @@ cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
ifdef CONFIG_ISA_ARCV2
+ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
+cflags-y += -munaligned-access
+else
+cflags-y += -mno-unaligned-access
+endif
+
ifndef CONFIG_ARC_HAS_LL64
cflags-y += -mno-ll64
endif
diff --git a/arch/arc/boot/dts/abilis_tb100.dtsi b/arch/arc/boot/dts/abilis_tb100.dtsi
index 02410b211433..c0bcd97522bb 100644
--- a/arch/arc/boot/dts/abilis_tb100.dtsi
+++ b/arch/arc/boot/dts/abilis_tb100.dtsi
@@ -38,7 +38,7 @@
clock-div = <6>;
};
- iomux: iomux@FF10601c {
+ iomux: iomux@ff10601c {
/* Port 1 */
pctl_tsin_s0: pctl-tsin-s0 { /* Serial TS-in 0 */
abilis,function = "mis0";
@@ -162,182 +162,182 @@
};
};
- gpioa: gpio@FF140000 {
+ gpioa: gpio@ff140000 {
compatible = "abilis,tb10x-gpio";
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <27 2>;
- reg = <0xFF140000 0x1000>;
+ reg = <0xff140000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
abilis,ngpio = <3>;
gpio-ranges = <&iomux 0 0 0>;
gpio-ranges-group-names = "gpioa";
};
- gpiob: gpio@FF141000 {
+ gpiob: gpio@ff141000 {
compatible = "abilis,tb10x-gpio";
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <27 2>;
- reg = <0xFF141000 0x1000>;
+ reg = <0xff141000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
abilis,ngpio = <2>;
gpio-ranges = <&iomux 0 0 0>;
gpio-ranges-group-names = "gpiob";
};
- gpioc: gpio@FF142000 {
+ gpioc: gpio@ff142000 {
compatible = "abilis,tb10x-gpio";
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <27 2>;
- reg = <0xFF142000 0x1000>;
+ reg = <0xff142000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
abilis,ngpio = <3>;
gpio-ranges = <&iomux 0 0 0>;
gpio-ranges-group-names = "gpioc";
};
- gpiod: gpio@FF143000 {
+ gpiod: gpio@ff143000 {
compatible = "abilis,tb10x-gpio";
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <27 2>;
- reg = <0xFF143000 0x1000>;
+ reg = <0xff143000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
abilis,ngpio = <2>;
gpio-ranges = <&iomux 0 0 0>;
gpio-ranges-group-names = "gpiod";
};
- gpioe: gpio@FF144000 {
+ gpioe: gpio@ff144000 {
compatible = "abilis,tb10x-gpio";
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <27 2>;
- reg = <0xFF144000 0x1000>;
+ reg = <0xff144000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
abilis,ngpio = <3>;
gpio-ranges = <&iomux 0 0 0>;
gpio-ranges-group-names = "gpioe";
};
- gpiof: gpio@FF145000 {
+ gpiof: gpio@ff145000 {
compatible = "abilis,tb10x-gpio";
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <27 2>;
- reg = <0xFF145000 0x1000>;
+ reg = <0xff145000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
abilis,ngpio = <2>;
gpio-ranges = <&iomux 0 0 0>;
gpio-ranges-group-names = "gpiof";
};
- gpiog: gpio@FF146000 {
+ gpiog: gpio@ff146000 {
compatible = "abilis,tb10x-gpio";
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <27 2>;
- reg = <0xFF146000 0x1000>;
+ reg = <0xff146000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
abilis,ngpio = <3>;
gpio-ranges = <&iomux 0 0 0>;
gpio-ranges-group-names = "gpiog";
};
- gpioh: gpio@FF147000 {
+ gpioh: gpio@ff147000 {
compatible = "abilis,tb10x-gpio";
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <27 2>;
- reg = <0xFF147000 0x1000>;
+ reg = <0xff147000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
abilis,ngpio = <2>;
gpio-ranges = <&iomux 0 0 0>;
gpio-ranges-group-names = "gpioh";
};
- gpioi: gpio@FF148000 {
+ gpioi: gpio@ff148000 {
compatible = "abilis,tb10x-gpio";
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <27 2>;
- reg = <0xFF148000 0x1000>;
+ reg = <0xff148000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
abilis,ngpio = <12>;
gpio-ranges = <&iomux 0 0 0>;
gpio-ranges-group-names = "gpioi";
};
- gpioj: gpio@FF149000 {
+ gpioj: gpio@ff149000 {
compatible = "abilis,tb10x-gpio";
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <27 2>;
- reg = <0xFF149000 0x1000>;
+ reg = <0xff149000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
abilis,ngpio = <32>;
gpio-ranges = <&iomux 0 0 0>;
gpio-ranges-group-names = "gpioj";
};
- gpiok: gpio@FF14a000 {
+ gpiok: gpio@ff14a000 {
compatible = "abilis,tb10x-gpio";
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <27 2>;
- reg = <0xFF14A000 0x1000>;
+ reg = <0xff14a000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
abilis,ngpio = <22>;
gpio-ranges = <&iomux 0 0 0>;
gpio-ranges-group-names = "gpiok";
};
- gpiol: gpio@FF14b000 {
+ gpiol: gpio@ff14b000 {
compatible = "abilis,tb10x-gpio";
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <27 2>;
- reg = <0xFF14B000 0x1000>;
+ reg = <0xff14b000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
abilis,ngpio = <4>;
gpio-ranges = <&iomux 0 0 0>;
gpio-ranges-group-names = "gpiol";
};
- gpiom: gpio@FF14c000 {
+ gpiom: gpio@ff14c000 {
compatible = "abilis,tb10x-gpio";
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <27 2>;
- reg = <0xFF14C000 0x1000>;
+ reg = <0xff14c000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
abilis,ngpio = <4>;
gpio-ranges = <&iomux 0 0 0>;
gpio-ranges-group-names = "gpiom";
};
- gpion: gpio@FF14d000 {
+ gpion: gpio@ff14d000 {
compatible = "abilis,tb10x-gpio";
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <27 2>;
- reg = <0xFF14D000 0x1000>;
+ reg = <0xff14d000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
abilis,ngpio = <5>;
diff --git a/arch/arc/boot/dts/abilis_tb100_dvk.dts b/arch/arc/boot/dts/abilis_tb100_dvk.dts
index 3acf04db8030..c968e677db46 100644
--- a/arch/arc/boot/dts/abilis_tb100_dvk.dts
+++ b/arch/arc/boot/dts/abilis_tb100_dvk.dts
@@ -37,27 +37,27 @@
};
soc100 {
- uart@FF100000 {
+ uart@ff100000 {
pinctrl-names = "default";
pinctrl-0 = <&pctl_uart0>;
};
- ethernet@FE100000 {
+ ethernet@fe100000 {
phy-mode = "rgmii";
};
- i2c0: i2c@FF120000 {
+ i2c0: i2c@ff120000 {
i2c-sda-hold-time-ns = <432>;
};
- i2c1: i2c@FF121000 {
+ i2c1: i2c@ff121000 {
i2c-sda-hold-time-ns = <432>;
};
- i2c2: i2c@FF122000 {
+ i2c2: i2c@ff122000 {
i2c-sda-hold-time-ns = <432>;
};
- i2c3: i2c@FF123000 {
+ i2c3: i2c@ff123000 {
i2c-sda-hold-time-ns = <432>;
};
- i2c4: i2c@FF124000 {
+ i2c4: i2c@ff124000 {
i2c-sda-hold-time-ns = <432>;
};
diff --git a/arch/arc/boot/dts/abilis_tb101.dtsi b/arch/arc/boot/dts/abilis_tb101.dtsi
index f9e7686044eb..6a1615f58f05 100644
--- a/arch/arc/boot/dts/abilis_tb101.dtsi
+++ b/arch/arc/boot/dts/abilis_tb101.dtsi
@@ -38,7 +38,7 @@
clock-div = <6>;
};
- iomux: iomux@FF10601c {
+ iomux: iomux@ff10601c {
/* Port 1 */
pctl_tsin_s0: pctl-tsin-s0 { /* Serial TS-in 0 */
abilis,function = "mis0";
@@ -171,182 +171,182 @@
};
};
- gpioa: gpio@FF140000 {
+ gpioa: gpio@ff140000 {
compatible = "abilis,tb10x-gpio";
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <27 2>;
- reg = <0xFF140000 0x1000>;
+ reg = <0xff140000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
abilis,ngpio = <3>;
gpio-ranges = <&iomux 0 0 0>;
gpio-ranges-group-names = "gpioa";
};
- gpiob: gpio@FF141000 {
+ gpiob: gpio@ff141000 {
compatible = "abilis,tb10x-gpio";
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <27 2>;
- reg = <0xFF141000 0x1000>;
+ reg = <0xff141000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
abilis,ngpio = <2>;
gpio-ranges = <&iomux 0 0 0>;
gpio-ranges-group-names = "gpiob";
};
- gpioc: gpio@FF142000 {
+ gpioc: gpio@ff142000 {
compatible = "abilis,tb10x-gpio";
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <27 2>;
- reg = <0xFF142000 0x1000>;
+ reg = <0xff142000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
abilis,ngpio = <3>;
gpio-ranges = <&iomux 0 0 0>;
gpio-ranges-group-names = "gpioc";
};
- gpiod: gpio@FF143000 {
+ gpiod: gpio@ff143000 {
compatible = "abilis,tb10x-gpio";
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <27 2>;
- reg = <0xFF143000 0x1000>;
+ reg = <0xff143000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
abilis,ngpio = <2>;
gpio-ranges = <&iomux 0 0 0>;
gpio-ranges-group-names = "gpiod";
};
- gpioe: gpio@FF144000 {
+ gpioe: gpio@ff144000 {
compatible = "abilis,tb10x-gpio";
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <27 2>;
- reg = <0xFF144000 0x1000>;
+ reg = <0xff144000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
abilis,ngpio = <3>;
gpio-ranges = <&iomux 0 0 0>;
gpio-ranges-group-names = "gpioe";
};
- gpiof: gpio@FF145000 {
+ gpiof: gpio@ff145000 {
compatible = "abilis,tb10x-gpio";
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <27 2>;
- reg = <0xFF145000 0x1000>;
+ reg = <0xff145000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
abilis,ngpio = <2>;
gpio-ranges = <&iomux 0 0 0>;
gpio-ranges-group-names = "gpiof";
};
- gpiog: gpio@FF146000 {
+ gpiog: gpio@ff146000 {
compatible = "abilis,tb10x-gpio";
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <27 2>;
- reg = <0xFF146000 0x1000>;
+ reg = <0xff146000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
abilis,ngpio = <3>;
gpio-ranges = <&iomux 0 0 0>;
gpio-ranges-group-names = "gpiog";
};
- gpioh: gpio@FF147000 {
+ gpioh: gpio@ff147000 {
compatible = "abilis,tb10x-gpio";
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <27 2>;
- reg = <0xFF147000 0x1000>;
+ reg = <0xff147000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
abilis,ngpio = <2>;
gpio-ranges = <&iomux 0 0 0>;
gpio-ranges-group-names = "gpioh";
};
- gpioi: gpio@FF148000 {
+ gpioi: gpio@ff148000 {
compatible = "abilis,tb10x-gpio";
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <27 2>;
- reg = <0xFF148000 0x1000>;
+ reg = <0xff148000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
abilis,ngpio = <12>;
gpio-ranges = <&iomux 0 0 0>;
gpio-ranges-group-names = "gpioi";
};
- gpioj: gpio@FF149000 {
+ gpioj: gpio@ff149000 {
compatible = "abilis,tb10x-gpio";
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <27 2>;
- reg = <0xFF149000 0x1000>;
+ reg = <0xff149000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
abilis,ngpio = <32>;
gpio-ranges = <&iomux 0 0 0>;
gpio-ranges-group-names = "gpioj";
};
- gpiok: gpio@FF14a000 {
+ gpiok: gpio@ff14a000 {
compatible = "abilis,tb10x-gpio";
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <27 2>;
- reg = <0xFF14A000 0x1000>;
+ reg = <0xff14a000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
abilis,ngpio = <22>;
gpio-ranges = <&iomux 0 0 0>;
gpio-ranges-group-names = "gpiok";
};
- gpiol: gpio@FF14b000 {
+ gpiol: gpio@ff14b000 {
compatible = "abilis,tb10x-gpio";
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <27 2>;
- reg = <0xFF14B000 0x1000>;
+ reg = <0xff14b000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
abilis,ngpio = <4>;
gpio-ranges = <&iomux 0 0 0>;
gpio-ranges-group-names = "gpiol";
};
- gpiom: gpio@FF14c000 {
+ gpiom: gpio@ff14c000 {
compatible = "abilis,tb10x-gpio";
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <27 2>;
- reg = <0xFF14C000 0x1000>;
+ reg = <0xff14c000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
abilis,ngpio = <4>;
gpio-ranges = <&iomux 0 0 0>;
gpio-ranges-group-names = "gpiom";
};
- gpion: gpio@FF14d000 {
+ gpion: gpio@ff14d000 {
compatible = "abilis,tb10x-gpio";
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <27 2>;
- reg = <0xFF14D000 0x1000>;
+ reg = <0xff14d000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
abilis,ngpio = <5>;
diff --git a/arch/arc/boot/dts/abilis_tb101_dvk.dts b/arch/arc/boot/dts/abilis_tb101_dvk.dts
index 37d88c5dd181..05143ce9c120 100644
--- a/arch/arc/boot/dts/abilis_tb101_dvk.dts
+++ b/arch/arc/boot/dts/abilis_tb101_dvk.dts
@@ -37,27 +37,27 @@
};
soc100 {
- uart@FF100000 {
+ uart@ff100000 {
pinctrl-names = "default";
pinctrl-0 = <&pctl_uart0>;
};
- ethernet@FE100000 {
+ ethernet@fe100000 {
phy-mode = "rgmii";
};
- i2c0: i2c@FF120000 {
+ i2c0: i2c@ff120000 {
i2c-sda-hold-time-ns = <432>;
};
- i2c1: i2c@FF121000 {
+ i2c1: i2c@ff121000 {
i2c-sda-hold-time-ns = <432>;
};
- i2c2: i2c@FF122000 {
+ i2c2: i2c@ff122000 {
i2c-sda-hold-time-ns = <432>;
};
- i2c3: i2c@FF123000 {
+ i2c3: i2c@ff123000 {
i2c-sda-hold-time-ns = <432>;
};
- i2c4: i2c@FF124000 {
+ i2c4: i2c@ff124000 {
i2c-sda-hold-time-ns = <432>;
};
diff --git a/arch/arc/boot/dts/abilis_tb10x.dtsi b/arch/arc/boot/dts/abilis_tb10x.dtsi
index 3121536b25a3..2fbf1bdfe6de 100644
--- a/arch/arc/boot/dts/abilis_tb10x.dtsi
+++ b/arch/arc/boot/dts/abilis_tb10x.dtsi
@@ -54,7 +54,7 @@
#size-cells = <1>;
device_type = "soc";
ranges = <0xfe000000 0xfe000000 0x02000000
- 0x000F0000 0x000F0000 0x00010000>;
+ 0x000f0000 0x000f0000 0x00010000>;
compatible = "abilis,tb10x", "simple-bus";
pll0: oscillator {
@@ -75,10 +75,10 @@
clock-output-names = "ahb_clk";
};
- iomux: iomux@FF10601c {
+ iomux: iomux@ff10601c {
compatible = "abilis,tb10x-iomux";
#gpio-range-cells = <3>;
- reg = <0xFF10601c 0x4>;
+ reg = <0xff10601c 0x4>;
};
intc: interrupt-controller {
@@ -88,7 +88,7 @@
};
tb10x_ictl: pic@fe002000 {
compatible = "abilis,tb10x-ictl";
- reg = <0xFE002000 0x20>;
+ reg = <0xfe002000 0x20>;
interrupt-controller;
#interrupt-cells = <2>;
interrupt-parent = <&intc>;
@@ -96,27 +96,27 @@
20 21 22 23 24 25 26 27 28 29 30 31>;
};
- uart@FF100000 {
+ uart@ff100000 {
compatible = "snps,dw-apb-uart";
- reg = <0xFF100000 0x100>;
+ reg = <0xff100000 0x100>;
clock-frequency = <166666666>;
interrupts = <25 8>;
reg-shift = <2>;
reg-io-width = <4>;
interrupt-parent = <&tb10x_ictl>;
};
- ethernet@FE100000 {
+ ethernet@fe100000 {
compatible = "snps,dwmac-3.70a","snps,dwmac";
- reg = <0xFE100000 0x1058>;
+ reg = <0xfe100000 0x1058>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <6 8>;
interrupt-names = "macirq";
clocks = <&ahb_clk>;
clock-names = "stmmaceth";
};
- dma@FE000000 {
+ dma@fe000000 {
compatible = "snps,dma-spear1340";
- reg = <0xFE000000 0x400>;
+ reg = <0xfe000000 0x400>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <14 8>;
dma-channels = <6>;
@@ -132,70 +132,70 @@
multi-block = <1 1 1 1 1 1>;
};
- i2c0: i2c@FF120000 {
+ i2c0: i2c@ff120000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "snps,designware-i2c";
- reg = <0xFF120000 0x1000>;
+ reg = <0xff120000 0x1000>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <12 8>;
clocks = <&ahb_clk>;
};
- i2c1: i2c@FF121000 {
+ i2c1: i2c@ff121000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "snps,designware-i2c";
- reg = <0xFF121000 0x1000>;
+ reg = <0xff121000 0x1000>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <12 8>;
clocks = <&ahb_clk>;
};
- i2c2: i2c@FF122000 {
+ i2c2: i2c@ff122000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "snps,designware-i2c";
- reg = <0xFF122000 0x1000>;
+ reg = <0xff122000 0x1000>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <12 8>;
clocks = <&ahb_clk>;
};
- i2c3: i2c@FF123000 {
+ i2c3: i2c@ff123000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "snps,designware-i2c";
- reg = <0xFF123000 0x1000>;
+ reg = <0xff123000 0x1000>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <12 8>;
clocks = <&ahb_clk>;
};
- i2c4: i2c@FF124000 {
+ i2c4: i2c@ff124000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "snps,designware-i2c";
- reg = <0xFF124000 0x1000>;
+ reg = <0xff124000 0x1000>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <12 8>;
clocks = <&ahb_clk>;
};
- spi0: spi@0xFE010000 {
+ spi0: spi@fe010000 {
#address-cells = <1>;
#size-cells = <0>;
cell-index = <0>;
compatible = "abilis,tb100-spi";
num-cs = <1>;
- reg = <0xFE010000 0x20>;
+ reg = <0xfe010000 0x20>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <26 8>;
clocks = <&ahb_clk>;
};
- spi1: spi@0xFE011000 {
+ spi1: spi@fe011000 {
#address-cells = <1>;
#size-cells = <0>;
cell-index = <1>;
compatible = "abilis,tb100-spi";
num-cs = <2>;
- reg = <0xFE011000 0x20>;
+ reg = <0xfe011000 0x20>;
interrupt-parent = <&tb10x_ictl>;
interrupts = <10 8>;
clocks = <&ahb_clk>;
@@ -226,23 +226,23 @@
interrupts = <20 2>, <19 2>;
interrupt-names = "cmd_irq", "event_irq";
};
- tb10x_mdsc0: tb10x-mdscr@FF300000 {
+ tb10x_mdsc0: tb10x-mdscr@ff300000 {
compatible = "abilis,tb100-mdscr";
- reg = <0xFF300000 0x7000>;
+ reg = <0xff300000 0x7000>;
tb100-mdscr-manage-tsin;
};
- tb10x_mscr0: tb10x-mdscr@FF307000 {
+ tb10x_mscr0: tb10x-mdscr@ff307000 {
compatible = "abilis,tb100-mdscr";
- reg = <0xFF307000 0x7000>;
+ reg = <0xff307000 0x7000>;
};
tb10x_scr0: tb10x-mdscr@ff30e000 {
compatible = "abilis,tb100-mdscr";
- reg = <0xFF30e000 0x4000>;
+ reg = <0xff30e000 0x4000>;
tb100-mdscr-manage-tsin;
};
tb10x_scr1: tb10x-mdscr@ff312000 {
compatible = "abilis,tb100-mdscr";
- reg = <0xFF312000 0x4000>;
+ reg = <0xff312000 0x4000>;
tb100-mdscr-manage-tsin;
};
tb10x_wfb: tb10x-wfb@ff319000 {
diff --git a/arch/arc/boot/dts/axc001.dtsi b/arch/arc/boot/dts/axc001.dtsi
index fdc266504ada..37be3bf03ad6 100644
--- a/arch/arc/boot/dts/axc001.dtsi
+++ b/arch/arc/boot/dts/axc001.dtsi
@@ -41,7 +41,7 @@
* this GPIO block ORs all interrupts on CPU card (creg,..)
* to uplink only 1 IRQ to ARC core intc
*/
- dw-apb-gpio@0x2000 {
+ dw-apb-gpio@2000 {
compatible = "snps,dw-apb-gpio";
reg = < 0x2000 0x80 >;
#address-cells = <1>;
@@ -60,7 +60,7 @@
};
};
- debug_uart: dw-apb-uart@0x5000 {
+ debug_uart: dw-apb-uart@5000 {
compatible = "snps,dw-apb-uart";
reg = <0x5000 0x100>;
clock-frequency = <33333000>;
@@ -88,7 +88,7 @@
* avoid duplicating the MB dtsi file given that IRQ from
* this intc to cpu intc are different for axs101 and axs103
*/
- mb_intc: dw-apb-ictl@0xe0012000 {
+ mb_intc: dw-apb-ictl@e0012000 {
#interrupt-cells = <1>;
compatible = "snps,dw-apb-ictl";
reg = < 0x0 0xe0012000 0x0 0x200 >;
diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi
index d75d65ddf8e3..effa37536d7a 100644
--- a/arch/arc/boot/dts/axc003.dtsi
+++ b/arch/arc/boot/dts/axc003.dtsi
@@ -55,7 +55,7 @@
* this GPIO block ORs all interrupts on CPU card (creg,..)
* to uplink only 1 IRQ to ARC core intc
*/
- dw-apb-gpio@0x2000 {
+ dw-apb-gpio@2000 {
compatible = "snps,dw-apb-gpio";
reg = < 0x2000 0x80 >;
#address-cells = <1>;
@@ -74,7 +74,7 @@
};
};
- debug_uart: dw-apb-uart@0x5000 {
+ debug_uart: dw-apb-uart@5000 {
compatible = "snps,dw-apb-uart";
reg = <0x5000 0x100>;
clock-frequency = <33333000>;
@@ -102,19 +102,19 @@
* external DMA buffer located outside of IOC aperture.
*/
axs10x_mb {
- ethernet@0x18000 {
+ ethernet@18000 {
dma-coherent;
};
- ehci@0x40000 {
+ ehci@40000 {
dma-coherent;
};
- ohci@0x60000 {
+ ohci@60000 {
dma-coherent;
};
- mmc@0x15000 {
+ mmc@15000 {
dma-coherent;
};
};
@@ -132,7 +132,7 @@
* avoid duplicating the MB dtsi file given that IRQ from
* this intc to cpu intc are different for axs101 and axs103
*/
- mb_intc: dw-apb-ictl@0xe0012000 {
+ mb_intc: dw-apb-ictl@e0012000 {
#interrupt-cells = <1>;
compatible = "snps,dw-apb-ictl";
reg = < 0x0 0xe0012000 0x0 0x200 >;
@@ -153,7 +153,7 @@
#size-cells = <2>;
ranges;
/*
- * Move frame buffer out of IOC aperture (0x8z-0xAz).
+ * Move frame buffer out of IOC aperture (0x8z-0xaz).
*/
frame_buffer: frame_buffer@be000000 {
compatible = "shared-dma-pool";
diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi
index a05bb737ea63..e401e59f6180 100644
--- a/arch/arc/boot/dts/axc003_idu.dtsi
+++ b/arch/arc/boot/dts/axc003_idu.dtsi
@@ -62,7 +62,7 @@
* this GPIO block ORs all interrupts on CPU card (creg,..)
* to uplink only 1 IRQ to ARC core intc
*/
- dw-apb-gpio@0x2000 {
+ dw-apb-gpio@2000 {
compatible = "snps,dw-apb-gpio";
reg = < 0x2000 0x80 >;
#address-cells = <1>;
@@ -81,7 +81,7 @@
};
};
- debug_uart: dw-apb-uart@0x5000 {
+ debug_uart: dw-apb-uart@5000 {
compatible = "snps,dw-apb-uart";
reg = <0x5000 0x100>;
clock-frequency = <33333000>;
@@ -109,19 +109,19 @@
* external DMA buffer located outside of IOC aperture.
*/
axs10x_mb {
- ethernet@0x18000 {
+ ethernet@18000 {
dma-coherent;
};
- ehci@0x40000 {
+ ehci@40000 {
dma-coherent;
};
- ohci@0x60000 {
+ ohci@60000 {
dma-coherent;
};
- mmc@0x15000 {
+ mmc@15000 {
dma-coherent;
};
};
@@ -138,7 +138,7 @@
* avoid duplicating the MB dtsi file given that IRQ from
* this intc to cpu intc are different for axs101 and axs103
*/
- mb_intc: dw-apb-ictl@0xe0012000 {
+ mb_intc: dw-apb-ictl@e0012000 {
#interrupt-cells = <1>;
compatible = "snps,dw-apb-ictl";
reg = < 0x0 0xe0012000 0x0 0x200 >;
@@ -159,7 +159,7 @@
#size-cells = <2>;
ranges;
/*
- * Move frame buffer out of IOC aperture (0x8z-0xAz).
+ * Move frame buffer out of IOC aperture (0x8z-0xaz).
*/
frame_buffer: frame_buffer@be000000 {
compatible = "shared-dma-pool";
diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
index 37bafd44e36d..4ead6dc9af2f 100644
--- a/arch/arc/boot/dts/axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/axs10x_mb.dtsi
@@ -72,7 +72,7 @@
};
};
- gmac: ethernet@0x18000 {
+ gmac: ethernet@18000 {
#interrupt-cells = <1>;
compatible = "snps,dwmac";
reg = < 0x18000 0x2000 >;
@@ -88,13 +88,13 @@
mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */
};
- ehci@0x40000 {
+ ehci@40000 {
compatible = "generic-ehci";
reg = < 0x40000 0x100 >;
interrupts = < 8 >;
};
- ohci@0x60000 {
+ ohci@60000 {
compatible = "generic-ohci";
reg = < 0x60000 0x100 >;
interrupts = < 8 >;
@@ -118,7 +118,7 @@
* dw_mci_pltfm_prepare_command() is used in generic platform
* code.
*/
- mmc@0x15000 {
+ mmc@15000 {
compatible = "altr,socfpga-dw-mshc";
reg = < 0x15000 0x400 >;
fifo-depth = < 16 >;
@@ -129,7 +129,7 @@
bus-width = < 4 >;
};
- uart@0x20000 {
+ uart@20000 {
compatible = "snps,dw-apb-uart";
reg = <0x20000 0x100>;
clock-frequency = <33333333>;
@@ -139,7 +139,7 @@
reg-io-width = <4>;
};
- uart@0x21000 {
+ uart@21000 {
compatible = "snps,dw-apb-uart";
reg = <0x21000 0x100>;
clock-frequency = <33333333>;
@@ -150,7 +150,7 @@
};
/* UART muxed with USB data port (ttyS3) */
- uart@0x22000 {
+ uart@22000 {
compatible = "snps,dw-apb-uart";
reg = <0x22000 0x100>;
clock-frequency = <33333333>;
@@ -160,7 +160,7 @@
reg-io-width = <4>;
};
- i2c@0x1d000 {
+ i2c@1d000 {
compatible = "snps,designware-i2c";
reg = <0x1d000 0x100>;
clock-frequency = <400000>;
@@ -177,7 +177,7 @@
#sound-dai-cells = <0>;
};
- i2c@0x1f000 {
+ i2c@1f000 {
compatible = "snps,designware-i2c";
#address-cells = <1>;
#size-cells = <0>;
@@ -218,13 +218,13 @@
};
};
- eeprom@0x54{
+ eeprom@54{
compatible = "atmel,24c01";
reg = <0x54>;
pagesize = <0x8>;
};
- eeprom@0x57{
+ eeprom@57{
compatible = "atmel,24c04";
reg = <0x57>;
pagesize = <0x8>;
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
index 43f17b51ee89..7425bb0f2d1b 100644
--- a/arch/arc/boot/dts/hsdk.dts
+++ b/arch/arc/boot/dts/hsdk.dts
@@ -18,8 +18,8 @@
model = "snps,hsdk";
compatible = "snps,hsdk";
- #address-cells = <1>;
- #size-cells = <1>;
+ #address-cells = <2>;
+ #size-cells = <2>;
chosen {
bootargs = "earlycon=uart8250,mmio32,0xf0005000,115200n8 console=ttyS0,115200n8 debug print-fatal-signals=1";
@@ -105,17 +105,17 @@
#size-cells = <1>;
interrupt-parent = <&idu_intc>;
- ranges = <0x00000000 0xf0000000 0x10000000>;
+ ranges = <0x00000000 0x0 0xf0000000 0x10000000>;
cgu_rst: reset-controller@8a0 {
compatible = "snps,hsdk-reset";
#reset-cells = <1>;
- reg = <0x8A0 0x4>, <0xFF0 0x4>;
+ reg = <0x8a0 0x4>, <0xff0 0x4>;
};
core_clk: core-clk@0 {
compatible = "snps,hsdk-core-pll-clock";
- reg = <0x00 0x10>, <0x14B8 0x4>;
+ reg = <0x00 0x10>, <0x14b8 0x4>;
#clock-cells = <0>;
clocks = <&input_clk>;
@@ -167,6 +167,18 @@
#clock-cells = <0>;
};
+ dmac_core_clk: dmac-core-clk {
+ compatible = "fixed-clock";
+ clock-frequency = <400000000>;
+ #clock-cells = <0>;
+ };
+
+ dmac_cfg_clk: dmac-gpu-cfg-clk {
+ compatible = "fixed-clock";
+ clock-frequency = <200000000>;
+ #clock-cells = <0>;
+ };
+
gmac: ethernet@8000 {
#interrupt-cells = <1>;
compatible = "snps,dwmac";
@@ -200,6 +212,7 @@
compatible = "snps,hsdk-v1.0-ohci", "generic-ohci";
reg = <0x60000 0x100>;
interrupts = <15>;
+ resets = <&cgu_rst HSDK_USB_RESET>;
dma-coherent;
};
@@ -207,6 +220,7 @@
compatible = "snps,hsdk-v1.0-ehci", "generic-ehci";
reg = <0x40000 0x100>;
interrupts = <15>;
+ resets = <&cgu_rst HSDK_USB_RESET>;
dma-coherent;
};
@@ -237,12 +251,28 @@
reg = <0>;
};
};
+
+ dmac: dmac@80000 {
+ compatible = "snps,axi-dma-1.01a";
+ reg = <0x80000 0x400>;
+ interrupts = <27>;
+ clocks = <&dmac_core_clk>, <&dmac_cfg_clk>;
+ clock-names = "core-clk", "cfgr-clk";
+
+ dma-channels = <4>;
+ snps,dma-masters = <2>;
+ snps,data-width = <3>;
+ snps,block-size = <4096 4096 4096 4096>;
+ snps,priority = <0 1 2 3>;
+ snps,axi-max-burst-len = <16>;
+ };
};
memory@80000000 {
- #address-cells = <1>;
- #size-cells = <1>;
+ #address-cells = <2>;
+ #size-cells = <2>;
device_type = "memory";
- reg = <0x80000000 0x40000000>; /* 1 GiB */
+ reg = <0x0 0x80000000 0x0 0x40000000>; /* 1 GB lowmem */
+ /* 0x1 0x00000000 0x0 0x40000000>; 1 GB highmem */
};
};
diff --git a/arch/arc/boot/dts/vdk_axc003.dtsi b/arch/arc/boot/dts/vdk_axc003.dtsi
index 0fd6ba985b16..84e8766c8ca2 100644
--- a/arch/arc/boot/dts/vdk_axc003.dtsi
+++ b/arch/arc/boot/dts/vdk_axc003.dtsi
@@ -36,7 +36,7 @@
#interrupt-cells = <1>;
};
- debug_uart: dw-apb-uart@0x5000 {
+ debug_uart: dw-apb-uart@5000 {
compatible = "snps,dw-apb-uart";
reg = <0x5000 0x100>;
clock-frequency = <2403200>;
@@ -49,7 +49,7 @@
};
- mb_intc: dw-apb-ictl@0xe0012000 {
+ mb_intc: dw-apb-ictl@e0012000 {
#interrupt-cells = <1>;
compatible = "snps,dw-apb-ictl";
reg = < 0xe0012000 0x200 >;
diff --git a/arch/arc/boot/dts/vdk_axc003_idu.dtsi b/arch/arc/boot/dts/vdk_axc003_idu.dtsi
index 28956f9a9f3d..eb7e705e8a27 100644
--- a/arch/arc/boot/dts/vdk_axc003_idu.dtsi
+++ b/arch/arc/boot/dts/vdk_axc003_idu.dtsi
@@ -44,7 +44,7 @@
#interrupt-cells = <1>;
};
- debug_uart: dw-apb-uart@0x5000 {
+ debug_uart: dw-apb-uart@5000 {
compatible = "snps,dw-apb-uart";
reg = <0x5000 0x100>;
clock-frequency = <2403200>;
@@ -57,7 +57,7 @@
};
- mb_intc: dw-apb-ictl@0xe0012000 {
+ mb_intc: dw-apb-ictl@e0012000 {
#interrupt-cells = <1>;
compatible = "snps,dw-apb-ictl";
reg = < 0xe0012000 0x200 >;
diff --git a/arch/arc/boot/dts/vdk_axs10x_mb.dtsi b/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
index 48bb4b4cd234..925d5cc95dbb 100644
--- a/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
@@ -36,7 +36,7 @@
};
};
- ethernet@0x18000 {
+ ethernet@18000 {
#interrupt-cells = <1>;
compatible = "snps,dwmac";
reg = < 0x18000 0x2000 >;
@@ -49,13 +49,13 @@
clock-names = "stmmaceth";
};
- ehci@0x40000 {
+ ehci@40000 {
compatible = "generic-ehci";
reg = < 0x40000 0x100 >;
interrupts = < 8 >;
};
- uart@0x20000 {
+ uart@20000 {
compatible = "snps,dw-apb-uart";
reg = <0x20000 0x100>;
clock-frequency = <2403200>;
@@ -65,7 +65,7 @@
reg-io-width = <4>;
};
- uart@0x21000 {
+ uart@21000 {
compatible = "snps,dw-apb-uart";
reg = <0x21000 0x100>;
clock-frequency = <2403200>;
@@ -75,7 +75,7 @@
reg-io-width = <4>;
};
- uart@0x22000 {
+ uart@22000 {
compatible = "snps,dw-apb-uart";
reg = <0x22000 0x100>;
clock-frequency = <2403200>;
@@ -101,7 +101,7 @@
interrupt-names = "arc_ps2_irq";
};
- mmc@0x15000 {
+ mmc@15000 {
compatible = "snps,dw-mshc";
reg = <0x15000 0x400>;
fifo-depth = <1024>;
@@ -117,11 +117,11 @@
* Embedded Vision subsystem UIO mappings; only relevant for EV VDK
*
* This node is intentionally put outside of MB above becase
- * it maps areas outside of MB's 0xEz-0xFz.
+ * it maps areas outside of MB's 0xez-0xfz.
*/
- uio_ev: uio@0xD0000000 {
+ uio_ev: uio@d0000000 {
compatible = "generic-uio";
- reg = <0xD0000000 0x2000 0xD1000000 0x2000 0x90000000 0x10000000 0xC0000000 0x10000000>;
+ reg = <0xd0000000 0x2000 0xd1000000 0x2000 0x90000000 0x10000000 0xc0000000 0x10000000>;
reg-names = "ev_gsa", "ev_ctrl", "ev_shared_mem", "ev_code_mem";
interrupt-parent = <&mb_intc>;
interrupts = <23>;
diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig
index 6fd3d29546af..0e5fd29ed238 100644
--- a/arch/arc/configs/hsdk_defconfig
+++ b/arch/arc/configs/hsdk_defconfig
@@ -8,6 +8,7 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_BLK_DEV_RAM=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index caa270261521..393d4f5e1450 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -3,6 +3,7 @@ generic-y += bugs.h
generic-y += compat.h
generic-y += device.h
generic-y += div64.h
+generic-y += dma-mapping.h
generic-y += emergency-restart.h
generic-y += extable.h
generic-y += ftrace.h
@@ -10,10 +11,12 @@ generic-y += hardirq.h
generic-y += hw_irq.h
generic-y += irq_regs.h
generic-y += irq_work.h
+generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += msi.h
generic-y += parport.h
generic-y += percpu.h
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index a27eafdc8260..a7d4be87b2f0 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -82,6 +82,7 @@
#define ECR_V_DTLB_MISS 0x05
#define ECR_V_PROTV 0x06
#define ECR_V_TRAP 0x09
+#define ECR_V_MISALIGN 0x0d
#endif
/* DTLB Miss and Protection Violation Cause Codes */
@@ -167,14 +168,6 @@ struct bcr_mpy {
#endif
};
-struct bcr_extn_xymem {
-#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int ram_org:2, num_banks:4, bank_sz:4, ver:8;
-#else
- unsigned int ver:8, bank_sz:4, num_banks:4, ram_org:2;
-#endif
-};
-
struct bcr_iccm_arcompact {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int base:16, pad:5, sz:3, ver:8;
@@ -312,7 +305,7 @@ struct cpuinfo_arc {
struct cpuinfo_arc_bpu bpu;
struct bcr_identity core;
struct bcr_isa_arcv2 isa;
- const char *details, *name;
+ const char *release, *name;
unsigned int vec_base;
struct cpuinfo_arc_ccm iccm, dccm;
struct {
@@ -322,7 +315,6 @@ struct cpuinfo_arc {
timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
} extn;
struct bcr_mpy extn_mpy;
- struct bcr_extn_xymem extn_xymem;
};
extern struct cpuinfo_arc cpuinfo_arc700[];
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
deleted file mode 100644
index c946c0a83e76..000000000000
--- a/arch/arc/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,13 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// (C) 2018 Synopsys, Inc. (www.synopsys.com)
-
-#ifndef ASM_ARC_DMA_MAPPING_H
-#define ASM_ARC_DMA_MAPPING_H
-
-#include <asm-generic/dma-mapping.h>
-
-void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- const struct iommu_ops *iommu, bool coherent);
-#define arch_setup_dma_ops arch_setup_dma_ops
-
-#endif
diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h
index 8a4f77ea3238..e66d0339e1d8 100644
--- a/arch/arc/include/asm/irqflags-arcv2.h
+++ b/arch/arc/include/asm/irqflags-arcv2.h
@@ -44,7 +44,13 @@
#define ARCV2_IRQ_DEF_PRIO 1
/* seed value for status register */
-#define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | STATUS_AD_MASK | \
+#ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
+#define __AD_ENB STATUS_AD_MASK
+#else
+#define __AD_ENB 0
+#endif
+
+#define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | __AD_ENB | \
(ARCV2_IRQ_DEF_PRIO << 1))
#ifndef __ASSEMBLY__
diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
index 6958545390f0..9cd7ee4fad39 100644
--- a/arch/arc/include/asm/perf_event.h
+++ b/arch/arc/include/asm/perf_event.h
@@ -105,10 +105,10 @@ static const char * const arc_pmu_ev_hw_map[] = {
[PERF_COUNT_HW_INSTRUCTIONS] = "iall",
/* All jump instructions that are taken */
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
- [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
#ifdef CONFIG_ISA_ARCV2
[PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
#else
+ [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
[PERF_COUNT_HW_BRANCH_MISSES] = "bpfail", /* NP-T, PT-NT, PNT-T */
#endif
[PERF_COUNT_ARC_LDC] = "imemrdc", /* Instr: mem read cached */
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index 2ba04a7db621..daa914da7968 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -21,8 +21,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned int val;
- smp_mb();
-
__asm__ __volatile__(
"1: llock %[val], [%[slock]] \n"
" breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
@@ -34,6 +32,14 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
[LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
: "memory", "cc");
+ /*
+ * ACQUIRE barrier to ensure load/store after taking the lock
+ * don't "bleed-up" out of the critical section (leak-in is allowed)
+ * http://www.spinics.net/lists/kernel/msg2010409.html
+ *
+ * ARCv2 only has load-load, store-store and all-all barrier
+ * thus need the full all-all barrier
+ */
smp_mb();
}
@@ -42,8 +48,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned int val, got_it = 0;
- smp_mb();
-
__asm__ __volatile__(
"1: llock %[val], [%[slock]] \n"
" breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
@@ -67,9 +71,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
smp_mb();
- lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
-
- smp_mb();
+ WRITE_ONCE(lock->slock, __ARCH_SPIN_LOCK_UNLOCKED__);
}
/*
@@ -81,8 +83,6 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned int val;
- smp_mb();
-
/*
* zero means writer holds the lock exclusively, deny Reader.
* Otherwise grant lock to first/subseq reader
@@ -113,8 +113,6 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned int val, got_it = 0;
- smp_mb();
-
__asm__ __volatile__(
"1: llock %[val], [%[rwlock]] \n"
" brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
@@ -140,8 +138,6 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned int val;
- smp_mb();
-
/*
* If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
* deny writer. Otherwise if unlocked grant to writer
@@ -175,8 +171,6 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned int val, got_it = 0;
- smp_mb();
-
__asm__ __volatile__(
"1: llock %[val], [%[rwlock]] \n"
" brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
@@ -217,17 +211,13 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
: [val] "=&r" (val)
: [rwlock] "r" (&(rw->counter))
: "memory", "cc");
-
- smp_mb();
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
smp_mb();
- rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
-
- smp_mb();
+ WRITE_ONCE(rw->counter, __ARCH_RW_LOCK_UNLOCKED__);
}
#else /* !CONFIG_ARC_HAS_LLSC */
@@ -237,10 +227,9 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
/*
- * This smp_mb() is technically superfluous, we only need the one
- * after the lock for providing the ACQUIRE semantics.
- * However doing the "right" thing was regressing hackbench
- * so keeping this, pending further investigation
+ * Per lkmm, smp_mb() is only required after _lock (and before_unlock)
+ * for ACQ and REL semantics respectively. However EX based spinlocks
+ * need the extra smp_mb to workaround a hardware quirk.
*/
smp_mb();
@@ -257,14 +246,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
#endif
: "memory");
- /*
- * ACQUIRE barrier to ensure load/store after taking the lock
- * don't "bleed-up" out of the critical section (leak-in is allowed)
- * http://www.spinics.net/lists/kernel/msg2010409.html
- *
- * ARCv2 only has load-load, store-store and all-all barrier
- * thus need the full all-all barrier
- */
smp_mb();
}
@@ -309,8 +290,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
: "memory");
/*
- * superfluous, but keeping for now - see pairing version in
- * arch_spin_lock above
+ * see pairing version/comment in arch_spin_lock above
*/
smp_mb();
}
@@ -344,7 +324,6 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
arch_spin_unlock(&(rw->lock_mutex));
local_irq_restore(flags);
- smp_mb();
return ret;
}
diff --git a/arch/arc/include/asm/syscall.h b/arch/arc/include/asm/syscall.h
index 29de09804306..c7a4201ed62b 100644
--- a/arch/arc/include/asm/syscall.h
+++ b/arch/arc/include/asm/syscall.h
@@ -55,12 +55,11 @@ syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
*/
static inline void
syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
- unsigned int i, unsigned int n, unsigned long *args)
+ unsigned long *args)
{
unsigned long *inside_ptregs = &(regs->r0);
- inside_ptregs -= i;
-
- BUG_ON((i + n) > 6);
+ unsigned int n = 6;
+ unsigned int i = 0;
while (n--) {
args[i++] = (*inside_ptregs);
diff --git a/arch/arc/include/asm/tlb.h b/arch/arc/include/asm/tlb.h
index a9db5f62aaf3..90cac97643a4 100644
--- a/arch/arc/include/asm/tlb.h
+++ b/arch/arc/include/asm/tlb.h
@@ -9,38 +9,6 @@
#ifndef _ASM_ARC_TLB_H
#define _ASM_ARC_TLB_H
-#define tlb_flush(tlb) \
-do { \
- if (tlb->fullmm) \
- flush_tlb_mm((tlb)->mm); \
-} while (0)
-
-/*
- * This pair is called at time of munmap/exit to flush cache and TLB entries
- * for mappings being torn down.
- * 1) cache-flush part -implemented via tlb_start_vma( ) for VIPT aliasing D$
- * 2) tlb-flush part - implemted via tlb_end_vma( ) flushes the TLB range
- *
- * Note, read http://lkml.org/lkml/2004/1/15/6
- */
-#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
-#define tlb_start_vma(tlb, vma)
-#else
-#define tlb_start_vma(tlb, vma) \
-do { \
- if (!tlb->fullmm) \
- flush_cache_range(vma, vma->vm_start, vma->vm_end); \
-} while(0)
-#endif
-
-#define tlb_end_vma(tlb, vma) \
-do { \
- if (!tlb->fullmm) \
- flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
-} while (0)
-
-#define __tlb_remove_tlb_entry(tlb, ptep, address)
-
#include <linux/pagemap.h>
#include <asm-generic/tlb.h>
diff --git a/arch/arc/include/uapi/asm/Kbuild b/arch/arc/include/uapi/asm/Kbuild
index 0febf1a07c30..1c72f04ff75d 100644
--- a/arch/arc/include/uapi/asm/Kbuild
+++ b/arch/arc/include/uapi/asm/Kbuild
@@ -1,4 +1 @@
-include include/uapi/asm-generic/Kbuild.asm
-
-generic-y += kvm_para.h
generic-y += ucontext.h
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 30e090625916..8f6e0447dd17 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -54,7 +54,12 @@
; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access
; by default
lr r5, [status32]
+#ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
bset r5, r5, STATUS_AD_BIT
+#else
+ ; Although disabled at reset, bootloader might have enabled it
+ bclr r5, r5, STATUS_AD_BIT
+#endif
kflag r5
#endif
.endm
@@ -106,6 +111,7 @@ ENTRY(stext)
; r2 = pointer to uboot provided cmdline or external DTB in mem
; These are handled later in handle_uboot_args()
st r0, [@uboot_tag]
+ st r1, [@uboot_magic]
st r2, [@uboot_arg]
; setup "current" tsk and optionally cache it in dedicated r25
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index cf18b3e5a934..c0d0124de089 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -95,7 +95,7 @@ void arc_init_IRQ(void)
/* setup status32, don't enable intr yet as kernel doesn't want */
tmp = read_aux_reg(ARC_REG_STATUS32);
- tmp |= STATUS_AD_MASK | (ARCV2_IRQ_DEF_PRIO << 1);
+ tmp |= ARCV2_IRQ_DEF_PRIO << 1;
tmp &= ~STATUS_IE_MASK;
asm volatile("kflag %0 \n"::"r"(tmp));
}
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 7b2340996cf8..a9c88b7e9182 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -36,6 +36,7 @@ unsigned int intr_to_DE_cnt;
/* Part of U-boot ABI: see head.S */
int __initdata uboot_tag;
+int __initdata uboot_magic;
char __initdata *uboot_arg;
const struct machine_desc *machine_desc;
@@ -44,29 +45,24 @@ struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
-static const struct id_to_str arc_cpu_rel[] = {
+static const struct id_to_str arc_legacy_rel[] = {
+ /* ID.ARCVER, Release */
#ifdef CONFIG_ISA_ARCOMPACT
- { 0x34, "R4.10"},
- { 0x35, "R4.11"},
+ { 0x34, "R4.10"},
+ { 0x35, "R4.11"},
#else
- { 0x51, "R2.0" },
- { 0x52, "R2.1" },
- { 0x53, "R3.0" },
- { 0x54, "R3.10a" },
+ { 0x51, "R2.0" },
+ { 0x52, "R2.1" },
+ { 0x53, "R3.0" },
#endif
- { 0x00, NULL }
+ { 0x00, NULL }
};
-static const struct id_to_str arc_cpu_nm[] = {
-#ifdef CONFIG_ISA_ARCOMPACT
- { 0x20, "ARC 600" },
- { 0x30, "ARC 770" }, /* 750 identified seperately */
-#else
- { 0x40, "ARC EM" },
- { 0x50, "ARC HS38" },
- { 0x54, "ARC HS48" },
-#endif
- { 0x00, "Unknown" }
+static const struct id_to_str arc_cpu_rel[] = {
+ /* UARCH.MAJOR, Release */
+ { 0, "R3.10a"},
+ { 1, "R3.50a"},
+ { 0xFF, NULL }
};
static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu)
@@ -116,31 +112,72 @@ static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu)
}
}
+static void decode_arc_core(struct cpuinfo_arc *cpu)
+{
+ struct bcr_uarch_build_arcv2 uarch;
+ const struct id_to_str *tbl;
+
+ /*
+ * Up until (including) the first core4 release (0x54) things were
+ * simple: AUX IDENTITY.ARCVER was sufficient to identify arc family
+ * and release: 0x50 to 0x53 was HS38, 0x54 was HS48 (dual issue)
+ */
+
+ if (cpu->core.family < 0x54) { /* includes arc700 */
+
+ for (tbl = &arc_legacy_rel[0]; tbl->id != 0; tbl++) {
+ if (cpu->core.family == tbl->id) {
+ cpu->release = tbl->str;
+ break;
+ }
+ }
+
+ if (is_isa_arcompact())
+ cpu->name = "ARC700";
+ else if (tbl->str)
+ cpu->name = "HS38";
+ else
+ cpu->name = cpu->release = "Unknown";
+
+ return;
+ }
+
+ /*
+ * However the subsequent HS release (same 0x54) allow HS38 or HS48
+ * configurations and encode this info in a different BCR.
+ * The BCR was introduced in 0x54 so can't be read unconditionally.
+ */
+
+ READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
+
+ if (uarch.prod == 4) {
+ cpu->name = "HS48";
+ cpu->extn.dual = 1;
+
+ } else {
+ cpu->name = "HS38";
+ }
+
+ for (tbl = &arc_cpu_rel[0]; tbl->id != 0xFF; tbl++) {
+ if (uarch.maj == tbl->id) {
+ cpu->release = tbl->str;
+ break;
+ }
+ }
+}
+
static void read_arc_build_cfg_regs(void)
{
struct bcr_timer timer;
struct bcr_generic bcr;
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
- const struct id_to_str *tbl;
struct bcr_isa_arcv2 isa;
struct bcr_actionpoint ap;
FIX_PTR(cpu);
READ_BCR(AUX_IDENTITY, cpu->core);
-
- for (tbl = &arc_cpu_rel[0]; tbl->id != 0; tbl++) {
- if (cpu->core.family == tbl->id) {
- cpu->details = tbl->str;
- break;
- }
- }
-
- for (tbl = &arc_cpu_nm[0]; tbl->id != 0; tbl++) {
- if ((cpu->core.family & 0xF4) == tbl->id)
- break;
- }
- cpu->name = tbl->str;
+ decode_arc_core(cpu);
READ_BCR(ARC_REG_TIMERS_BCR, timer);
cpu->extn.timer0 = timer.t0;
@@ -151,16 +188,6 @@ static void read_arc_build_cfg_regs(void)
READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy);
- cpu->extn.norm = read_aux_reg(ARC_REG_NORM_BCR) > 1 ? 1 : 0; /* 2,3 */
- cpu->extn.barrel = read_aux_reg(ARC_REG_BARREL_BCR) > 1 ? 1 : 0; /* 2,3 */
- cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0; /* 1,3 */
- cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR) ? 1 : 0;
- cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR) > 1 ? 1 : 0; /* 2 */
- cpu->extn.swape = (cpu->core.family >= 0x34) ? 1 :
- IS_ENABLED(CONFIG_ARC_HAS_SWAPE);
-
- READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem);
-
/* Read CCM BCRs for boot reporting even if not enabled in Kconfig */
read_decode_ccm_bcr(cpu);
@@ -198,30 +225,12 @@ static void read_arc_build_cfg_regs(void)
cpu->bpu.num_pred = 2048 << bpu.pte;
cpu->bpu.ret_stk = 4 << bpu.rse;
- if (cpu->core.family >= 0x54) {
-
- struct bcr_uarch_build_arcv2 uarch;
-
- /*
- * The first 0x54 core (uarch maj:min 0:1 or 0:2) was
- * dual issue only (HS4x). But next uarch rev (1:0)
- * allows it be configured for single issue (HS3x)
- * Ensure we fiddle with dual issue only on HS4x
- */
- READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
-
- if (uarch.prod == 4) {
- unsigned int exec_ctrl;
-
- /* dual issue hardware always present */
- cpu->extn.dual = 1;
-
- READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
+ /* if dual issue hardware, is it enabled ? */
+ if (cpu->extn.dual) {
+ unsigned int exec_ctrl;
- /* dual issue hardware enabled ? */
- cpu->extn.dual_enb = !(exec_ctrl & 1);
-
- }
+ READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
+ cpu->extn.dual_enb = !(exec_ctrl & 1);
}
}
@@ -263,7 +272,8 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
{
struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
struct bcr_identity *core = &cpu->core;
- int i, n = 0, ua = 0;
+ char mpy_opt[16];
+ int n = 0;
FIX_PTR(cpu);
@@ -272,7 +282,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
core->family, core->cpu_id, core->chip_id);
n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s%s%s\n",
- cpu_id, cpu->name, cpu->details,
+ cpu_id, cpu->name, cpu->release,
is_isa_arcompact() ? "ARCompact" : "ARCv2",
IS_AVAIL1(cpu->isa.be, "[Big-Endian]"),
IS_AVAIL3(cpu->extn.dual, cpu->extn.dual_enb, " Dual-Issue "));
@@ -283,61 +293,50 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
IS_AVAIL2(cpu->extn.rtc, "RTC [UP 64-bit] ", CONFIG_ARC_TIMERS_64BIT),
IS_AVAIL2(cpu->extn.gfrc, "GFRC [SMP 64-bit] ", CONFIG_ARC_TIMERS_64BIT));
-#ifdef __ARC_UNALIGNED__
- ua = 1;
-#endif
- n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s%s",
- IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
- IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
- IS_AVAIL1(cpu->isa.unalign, "unalign "), IS_USED_RUN(ua));
-
- if (i)
- n += scnprintf(buf + n, len - n, "\n\t\t: ");
-
if (cpu->extn_mpy.ver) {
- if (cpu->extn_mpy.ver <= 0x2) { /* ARCompact */
- n += scnprintf(buf + n, len - n, "mpy ");
+ if (is_isa_arcompact()) {
+ scnprintf(mpy_opt, 16, "mpy");
} else {
+
int opt = 2; /* stock MPY/MPYH */
if (cpu->extn_mpy.dsp) /* OPT 7-9 */
opt = cpu->extn_mpy.dsp + 6;
- n += scnprintf(buf + n, len - n, "mpy[opt %d] ", opt);
+ scnprintf(mpy_opt, 16, "mpy[opt %d] ", opt);
}
}
n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n",
- IS_AVAIL1(cpu->isa.div_rem, "div_rem "),
- IS_AVAIL1(cpu->extn.norm, "norm "),
- IS_AVAIL1(cpu->extn.barrel, "barrel-shift "),
- IS_AVAIL1(cpu->extn.swap, "swap "),
- IS_AVAIL1(cpu->extn.minmax, "minmax "),
- IS_AVAIL1(cpu->extn.crc, "crc "),
- IS_AVAIL2(cpu->extn.swape, "swape", CONFIG_ARC_HAS_SWAPE));
-
- if (cpu->bpu.ver)
+ IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
+ IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
+ IS_AVAIL2(cpu->isa.unalign, "unalign ", CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS),
+ IS_AVAIL1(cpu->extn_mpy.ver, mpy_opt),
+ IS_AVAIL1(cpu->isa.div_rem, "div_rem "));
+
+ if (cpu->bpu.ver) {
n += scnprintf(buf + n, len - n,
"BPU\t\t: %s%s match, cache:%d, Predict Table:%d Return stk: %d",
IS_AVAIL1(cpu->bpu.full, "full"),
IS_AVAIL1(!cpu->bpu.full, "partial"),
cpu->bpu.num_cache, cpu->bpu.num_pred, cpu->bpu.ret_stk);
- if (is_isa_arcv2()) {
- struct bcr_lpb lpb;
+ if (is_isa_arcv2()) {
+ struct bcr_lpb lpb;
- READ_BCR(ARC_REG_LPB_BUILD, lpb);
- if (lpb.ver) {
- unsigned int ctl;
- ctl = read_aux_reg(ARC_REG_LPB_CTRL);
+ READ_BCR(ARC_REG_LPB_BUILD, lpb);
+ if (lpb.ver) {
+ unsigned int ctl;
+ ctl = read_aux_reg(ARC_REG_LPB_CTRL);
- n += scnprintf(buf + n, len - n, " Loop Buffer:%d %s",
- lpb.entries,
- IS_DISABLED_RUN(!ctl));
+ n += scnprintf(buf + n, len - n, " Loop Buffer:%d %s",
+ lpb.entries,
+ IS_DISABLED_RUN(!ctl));
+ }
}
+ n += scnprintf(buf + n, len - n, "\n");
}
- n += scnprintf(buf + n, len - n, "\n");
return buf;
}
@@ -390,11 +389,6 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
}
}
- n += scnprintf(buf + n, len - n, "OS ABI [v%d]\t: %s\n",
- EF_ARC_OSABI_CURRENT >> 8,
- EF_ARC_OSABI_CURRENT == EF_ARC_OSABI_V3 ?
- "no-legacy-syscalls" : "64-bit data any register aligned");
-
return buf;
}
@@ -497,6 +491,8 @@ static inline bool uboot_arg_invalid(unsigned long addr)
#define UBOOT_TAG_NONE 0
#define UBOOT_TAG_CMDLINE 1
#define UBOOT_TAG_DTB 2
+/* We always pass 0 as magic from U-boot */
+#define UBOOT_MAGIC_VALUE 0
void __init handle_uboot_args(void)
{
@@ -511,6 +507,11 @@ void __init handle_uboot_args(void)
goto ignore_uboot_args;
}
+ if (uboot_magic != UBOOT_MAGIC_VALUE) {
+ pr_warn(IGNORE_ARGS "non zero uboot magic\n");
+ goto ignore_uboot_args;
+ }
+
if (uboot_tag != UBOOT_TAG_NONE &&
uboot_arg_invalid((unsigned long)uboot_arg)) {
pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index 215f515442e0..b0aa8c028331 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -145,7 +145,8 @@ static void show_ecr_verbose(struct pt_regs *regs)
} else if (vec == ECR_V_PROTV) {
if (cause_code == ECR_C_PROTV_INST_FETCH)
pr_cont("Execute from Non-exec Page\n");
- else if (cause_code == ECR_C_PROTV_MISALIG_DATA)
+ else if (cause_code == ECR_C_PROTV_MISALIG_DATA &&
+ IS_ENABLED(CONFIG_ISA_ARCOMPACT))
pr_cont("Misaligned r/w from 0x%08lx\n", address);
else
pr_cont("%s access not allowed on page\n",
@@ -161,6 +162,8 @@ static void show_ecr_verbose(struct pt_regs *regs)
pr_cont("Bus Error from Data Mem\n");
else
pr_cont("Bus Error, check PRM\n");
+ } else if (vec == ECR_V_MISALIGN) {
+ pr_cont("Misaligned r/w from 0x%08lx\n", address);
#endif
} else if (vec == ECR_V_TRAP) {
if (regs->ecr_param == 5)
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index d34f69eb1a95..271e9fafa479 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -181,8 +181,7 @@ static void init_unwind_hdr(struct unwind_table *table,
*/
static void *__init unw_hdr_alloc_early(unsigned long sz)
{
- return memblock_alloc_from_nopanic(sz, sizeof(unsigned int),
- MAX_DMA_ADDRESS);
+ return memblock_alloc_from(sz, sizeof(unsigned int), MAX_DMA_ADDRESS);
}
static void *unw_hdr_alloc(unsigned long sz)
diff --git a/arch/arc/lib/Makefile b/arch/arc/lib/Makefile
index b1656d156097..f7537b466b23 100644
--- a/arch/arc/lib/Makefile
+++ b/arch/arc/lib/Makefile
@@ -8,4 +8,10 @@
lib-y := strchr-700.o strcpy-700.o strlen.o memcmp.o
lib-$(CONFIG_ISA_ARCOMPACT) += memcpy-700.o memset.o strcmp.o
-lib-$(CONFIG_ISA_ARCV2) += memcpy-archs.o memset-archs.o strcmp-archs.o
+lib-$(CONFIG_ISA_ARCV2) += memset-archs.o strcmp-archs.o
+
+ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
+lib-$(CONFIG_ISA_ARCV2) +=memcpy-archs-unaligned.o
+else
+lib-$(CONFIG_ISA_ARCV2) +=memcpy-archs.o
+endif
diff --git a/arch/arc/lib/memcpy-archs-unaligned.S b/arch/arc/lib/memcpy-archs-unaligned.S
new file mode 100644
index 000000000000..28993a73fdde
--- /dev/null
+++ b/arch/arc/lib/memcpy-archs-unaligned.S
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * ARCv2 memcpy implementation optimized for unaligned memory access using.
+ *
+ * Copyright (C) 2019 Synopsys
+ * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
+ */
+
+#include <linux/linkage.h>
+
+#ifdef CONFIG_ARC_HAS_LL64
+# define LOADX(DST,RX) ldd.ab DST, [RX, 8]
+# define STOREX(SRC,RX) std.ab SRC, [RX, 8]
+# define ZOLSHFT 5
+# define ZOLAND 0x1F
+#else
+# define LOADX(DST,RX) ld.ab DST, [RX, 4]
+# define STOREX(SRC,RX) st.ab SRC, [RX, 4]
+# define ZOLSHFT 4
+# define ZOLAND 0xF
+#endif
+
+ENTRY_CFI(memcpy)
+ mov r3, r0 ; don;t clobber ret val
+
+ lsr.f lp_count, r2, ZOLSHFT
+ lpnz @.Lcopy32_64bytes
+ ;; LOOP START
+ LOADX (r6, r1)
+ LOADX (r8, r1)
+ LOADX (r10, r1)
+ LOADX (r4, r1)
+ STOREX (r6, r3)
+ STOREX (r8, r3)
+ STOREX (r10, r3)
+ STOREX (r4, r3)
+.Lcopy32_64bytes:
+
+ and.f lp_count, r2, ZOLAND ;Last remaining 31 bytes
+ lpnz @.Lcopyremainingbytes
+ ;; LOOP START
+ ldb.ab r5, [r1, 1]
+ stb.ab r5, [r3, 1]
+.Lcopyremainingbytes:
+
+ j [blink]
+END_CFI(memcpy)
diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S
index f230bb7092fd..b3373f5c88e0 100644
--- a/arch/arc/lib/memset-archs.S
+++ b/arch/arc/lib/memset-archs.S
@@ -30,10 +30,10 @@
#else
-.macro PREALLOC_INSTR
+.macro PREALLOC_INSTR reg, off
.endm
-.macro PREFETCHW_INSTR
+.macro PREFETCHW_INSTR reg, off
.endm
#endif
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 4135abec3fb0..63e6e6504699 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -113,10 +113,24 @@ static void read_decode_cache_bcr_arcv2(int cpu)
}
READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
- if (cbcr.c)
+ if (cbcr.c) {
ioc_exists = 1;
- else
+
+ /*
+ * As for today we don't support both IOC and ZONE_HIGHMEM enabled
+ * simultaneously. This happens because as of today IOC aperture covers
+ * only ZONE_NORMAL (low mem) and any dma transactions outside this
+ * region won't be HW coherent.
+ * If we want to use both IOC and ZONE_HIGHMEM we can use
+ * bounce_buffer to handle dma transactions to HIGHMEM.
+ * Also it is possible to modify dma_direct cache ops or increase IOC
+ * aperture size if we are planning to use HIGHMEM without PAE.
+ */
+ if (IS_ENABLED(CONFIG_HIGHMEM) || is_pae40_enabled())
+ ioc_enable = 0;
+ } else {
ioc_enable = 0;
+ }
/* HS 2.0 didn't have AUX_VOL */
if (cpuinfo_arc700[cpu].core.family > 0x51) {
@@ -1158,19 +1172,6 @@ noinline void __init arc_ioc_setup(void)
if (!ioc_enable)
return;
- /*
- * As for today we don't support both IOC and ZONE_HIGHMEM enabled
- * simultaneously. This happens because as of today IOC aperture covers
- * only ZONE_NORMAL (low mem) and any dma transactions outside this
- * region won't be HW coherent.
- * If we want to use both IOC and ZONE_HIGHMEM we can use
- * bounce_buffer to handle dma transactions to HIGHMEM.
- * Also it is possible to modify dma_direct cache ops or increase IOC
- * aperture size if we are planning to use HIGHMEM without PAE.
- */
- if (IS_ENABLED(CONFIG_HIGHMEM))
- panic("IOC and HIGHMEM can't be used simultaneously");
-
/* Flush + invalidate + disable L1 dcache */
__dc_disable();
diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c
index 48e700151810..11f57e2ced8a 100644
--- a/arch/arc/mm/highmem.c
+++ b/arch/arc/mm/highmem.c
@@ -124,6 +124,10 @@ static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
pmd_k = pmd_offset(pud_k, kvaddr);
pte_k = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
+ if (!pte_k)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
+
pmd_populate_kernel(&init_mm, pmd_k, pte_k);
return pte_k;
}
diff --git a/arch/arc/plat-eznps/Kconfig b/arch/arc/plat-eznps/Kconfig
index 8eff057efcae..2eaecfb063a7 100644
--- a/arch/arc/plat-eznps/Kconfig
+++ b/arch/arc/plat-eznps/Kconfig
@@ -26,8 +26,8 @@ config EZNPS_MTM_EXT
help
Here we add new hierarchy for CPUs topology.
We got:
- Core
- Thread
+ Core
+ Thread
At the new thread level each CPU represent one HW thread.
At highest hierarchy each core contain 16 threads,
any of them seem like CPU from Linux point of view.
@@ -35,10 +35,10 @@ config EZNPS_MTM_EXT
core and HW scheduler round robin between them.
config EZNPS_MEM_ERROR_ALIGN
- bool "ARC-EZchip Memory error as an exception"
- depends on EZNPS_MTM_EXT
- default n
- help
+ bool "ARC-EZchip Memory error as an exception"
+ depends on EZNPS_MTM_EXT
+ default n
+ help
On the real chip of the NPS, user memory errors are handled
as a machine check exception, which is fatal, whereas on
simulator platform for NPS, is handled as a Level 2 interrupt
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index e8a4d3ffb6eb..dc9855c4a3b4 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -13,9 +13,11 @@ config ARM
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_PTE_SPECIAL if ARM_LPAE
select ARCH_HAS_PHYS_TO_DMA
+ select ARCH_HAS_SETUP_DMA_OPS
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
select ARCH_HAS_STRICT_MODULE_RWX if MMU
+ select ARCH_HAS_TEARDOWN_DMA_OPS if MMU
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_CUSTOM_GPIO_H
select ARCH_HAS_GCOV_PROFILE_ALL
@@ -31,6 +33,7 @@ config ARM
select CLONE_BACKWARDS
select CPU_PM if SUSPEND || CPU_IDLE
select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS
+ select DMA_DECLARE_COHERENT
select DMA_REMAP if MMU
select EDAC_SUPPORT
select EDAC_ATOMIC_SCRUB
@@ -70,10 +73,9 @@ config ARM
select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
select HAVE_EXIT_THREAD
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
- select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL
+ select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG
select HAVE_FUNCTION_TRACER if !XIP_KERNEL
select HAVE_GCC_PLUGINS
- select HAVE_GENERIC_DMA_COHERENT
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)
select HAVE_IDE if PCI || ISA || PCMCIA
select HAVE_IRQ_TIME_ACCOUNTING
@@ -102,7 +104,6 @@ config ARM
select MODULES_USE_ELF_REL
select NEED_DMA_MAP_STATE
select OF_EARLY_FLATTREE if OF
- select OF_RESERVED_MEM if OF
select OLD_SIGACTION
select OLD_SIGSUSPEND3
select PCI_SYSCALL if PCI
@@ -177,10 +178,6 @@ config TRACE_IRQFLAGS_SUPPORT
bool
default !CPU_V7M
-config RWSEM_XCHGADD_ALGORITHM
- bool
- default y
-
config ARCH_HAS_ILOG2_U32
bool
@@ -595,6 +592,7 @@ config ARCH_DAVINCI
select HAVE_IDE
select PM_GENERIC_DOMAINS if PM
select PM_GENERIC_DOMAINS_OF if PM && OF
+ select REGMAP_MMIO
select RESET_CONTROLLER
select SPARSE_IRQ
select USE_OF
@@ -1309,7 +1307,7 @@ config SCHED_SMT
config HAVE_ARM_SCU
bool
help
- This option enables support for the ARM system coherency unit
+ This option enables support for the ARM snoop control unit
config HAVE_ARM_ARCH_TIMER
bool "Architected timer support"
@@ -1321,7 +1319,6 @@ config HAVE_ARM_ARCH_TIMER
config HAVE_ARM_TWD
bool
- select TIMER_OF if OF
help
This options enables support for the ARM timer and watchdog unit
diff --git a/arch/arm/Kconfig-nommu b/arch/arm/Kconfig-nommu
index 1168a03c8525..36c80d3dd93f 100644
--- a/arch/arm/Kconfig-nommu
+++ b/arch/arm/Kconfig-nommu
@@ -20,10 +20,12 @@ config DRAM_SIZE
config FLASH_MEM_BASE
hex 'FLASH Base Address' if SET_MEM_PARAM
+ depends on CPU_ARM740T || CPU_ARM946E || CPU_ARM940T
default 0x00400000
config FLASH_SIZE
hex 'FLASH Size' if SET_MEM_PARAM
+ depends on CPU_ARM740T || CPU_ARM946E || CPU_ARM940T
default 0x00400000
config PROCESSOR_ID
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 6d6e0330930b..e388af4594a6 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -47,8 +47,8 @@ config DEBUG_WX
choice
prompt "Choose kernel unwinder"
- default UNWINDER_ARM if AEABI && !FUNCTION_GRAPH_TRACER
- default UNWINDER_FRAME_POINTER if !AEABI || FUNCTION_GRAPH_TRACER
+ default UNWINDER_ARM if AEABI
+ default UNWINDER_FRAME_POINTER if !AEABI
help
This determines which method will be used for unwinding kernel stack
traces for panics, oopses, bugs, warnings, perf, /proc/<pid>/stack,
@@ -65,7 +65,7 @@ config UNWINDER_FRAME_POINTER
config UNWINDER_ARM
bool "ARM EABI stack unwinder"
- depends on AEABI
+ depends on AEABI && !FUNCTION_GRAPH_TRACER
select ARM_UNWIND
help
This option enables stack unwinding support in the kernel
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 00000e91ad65..807a7d06c2a0 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -10,7 +10,7 @@
#
# Copyright (C) 1995-2001 by Russell King
-LDFLAGS_vmlinux :=-p --no-undefined -X --pic-veneer
+LDFLAGS_vmlinux := --no-undefined -X --pic-veneer
ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
LDFLAGS_vmlinux += --be8
KBUILD_LDFLAGS_MODULE += --be8
diff --git a/arch/arm/boot/bootp/Makefile b/arch/arm/boot/bootp/Makefile
index 83e1a076a5d6..981a8d03f064 100644
--- a/arch/arm/boot/bootp/Makefile
+++ b/arch/arm/boot/bootp/Makefile
@@ -8,7 +8,7 @@
GCOV_PROFILE := n
-LDFLAGS_bootp :=-p --no-undefined -X \
+LDFLAGS_bootp := --no-undefined -X \
--defsym initrd_phys=$(INITRD_PHYS) \
--defsym params_phys=$(PARAMS_PHYS) -T
AFLAGS_initrd.o :=-DINITRD=\"$(INITRD)\"
diff --git a/arch/arm/boot/bootp/init.S b/arch/arm/boot/bootp/init.S
index 78b508075161..142927e5f485 100644
--- a/arch/arm/boot/bootp/init.S
+++ b/arch/arm/boot/bootp/init.S
@@ -44,7 +44,7 @@ _start: add lr, pc, #-0x8 @ lr = current load addr
*/
movne r10, #0 @ terminator
movne r4, #2 @ Size of this entry (2 words)
- stmneia r9, {r4, r5, r10} @ Size, ATAG_CORE, terminator
+ stmiane r9, {r4, r5, r10} @ Size, ATAG_CORE, terminator
/*
* find the end of the tag list, and then add an INITRD tag on the end.
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 6114ae6ea466..9219389bbe61 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -132,8 +132,6 @@ endif
ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
LDFLAGS_vmlinux += --be8
endif
-# ?
-LDFLAGS_vmlinux += -p
# Report unresolved symbol references
LDFLAGS_vmlinux += --no-undefined
# Delete all temporary local symbols
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 6c7ccb428c07..7135820f76d4 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -1438,7 +1438,21 @@ ENTRY(efi_stub_entry)
@ Preserve return value of efi_entry() in r4
mov r4, r0
- bl cache_clean_flush
+
+ @ our cache maintenance code relies on CP15 barrier instructions
+ @ but since we arrived here with the MMU and caches configured
+ @ by UEFI, we must check that the CP15BEN bit is set in SCTLR.
+ @ Note that this bit is RAO/WI on v6 and earlier, so the ISB in
+ @ the enable path will be executed on v7+ only.
+ mrc p15, 0, r1, c1, c0, 0 @ read SCTLR
+ tst r1, #(1 << 5) @ CP15BEN bit set?
+ bne 0f
+ orr r1, r1, #(1 << 5) @ CP15 barrier instructions
+ mcr p15, 0, r1, c1, c0, 0 @ write SCTLR
+ ARM( .inst 0xf57ff06f @ v7+ isb )
+ THUMB( isb )
+
+0: bl cache_clean_flush
bl cache_off
@ Set parameters for booting zImage according to boot protocol
diff --git a/arch/arm/boot/compressed/ll_char_wr.S b/arch/arm/boot/compressed/ll_char_wr.S
index 8517c8606b4a..b1dcdb9f4030 100644
--- a/arch/arm/boot/compressed/ll_char_wr.S
+++ b/arch/arm/boot/compressed/ll_char_wr.S
@@ -75,7 +75,7 @@ Lrow4bpplp:
tst r1, #7 @ avoid using r7 directly after
str r7, [r0, -r5]!
subne r1, r1, #1
- ldrneb r7, [r6, r1]
+ ldrbne r7, [r6, r1]
bne Lrow4bpplp
ldmfd sp!, {r4 - r7, pc}
@@ -103,7 +103,7 @@ Lrow8bpplp:
sub r0, r0, r5 @ avoid ip
stmia r0, {r4, ip}
subne r1, r1, #1
- ldrneb r7, [r6, r1]
+ ldrbne r7, [r6, r1]
bne Lrow8bpplp
ldmfd sp!, {r4 - r7, pc}
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
index dce5be5df97b..edcff79879e7 100644
--- a/arch/arm/boot/dts/am335x-evm.dts
+++ b/arch/arm/boot/dts/am335x-evm.dts
@@ -57,6 +57,24 @@
enable-active-high;
};
+ /* TPS79501 */
+ v1_8d_reg: fixedregulator-v1_8d {
+ compatible = "regulator-fixed";
+ regulator-name = "v1_8d";
+ vin-supply = <&vbat>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ /* TPS79501 */
+ v3_3d_reg: fixedregulator-v3_3d {
+ compatible = "regulator-fixed";
+ regulator-name = "v3_3d";
+ vin-supply = <&vbat>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
matrix_keypad: matrix_keypad0 {
compatible = "gpio-matrix-keypad";
debounce-delay-ms = <5>;
@@ -499,10 +517,10 @@
status = "okay";
/* Regulators */
- AVDD-supply = <&vaux2_reg>;
- IOVDD-supply = <&vaux2_reg>;
- DRVDD-supply = <&vaux2_reg>;
- DVDD-supply = <&vbat>;
+ AVDD-supply = <&v3_3d_reg>;
+ IOVDD-supply = <&v3_3d_reg>;
+ DRVDD-supply = <&v3_3d_reg>;
+ DVDD-supply = <&v1_8d_reg>;
};
};
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
index b128998097ce..2c2d8b5b8cf5 100644
--- a/arch/arm/boot/dts/am335x-evmsk.dts
+++ b/arch/arm/boot/dts/am335x-evmsk.dts
@@ -73,6 +73,24 @@
enable-active-high;
};
+ /* TPS79518 */
+ v1_8d_reg: fixedregulator-v1_8d {
+ compatible = "regulator-fixed";
+ regulator-name = "v1_8d";
+ vin-supply = <&vbat>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ /* TPS78633 */
+ v3_3d_reg: fixedregulator-v3_3d {
+ compatible = "regulator-fixed";
+ regulator-name = "v3_3d";
+ vin-supply = <&vbat>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
leds {
pinctrl-names = "default";
pinctrl-0 = <&user_leds_s0>;
@@ -501,10 +519,10 @@
status = "okay";
/* Regulators */
- AVDD-supply = <&vaux2_reg>;
- IOVDD-supply = <&vaux2_reg>;
- DRVDD-supply = <&vaux2_reg>;
- DVDD-supply = <&vbat>;
+ AVDD-supply = <&v3_3d_reg>;
+ IOVDD-supply = <&v3_3d_reg>;
+ DRVDD-supply = <&v3_3d_reg>;
+ DVDD-supply = <&v1_8d_reg>;
};
};
diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi
index f459ec316a22..ca6d9f02a800 100644
--- a/arch/arm/boot/dts/am33xx-l4.dtsi
+++ b/arch/arm/boot/dts/am33xx-l4.dtsi
@@ -1762,7 +1762,7 @@
reg = <0xcc000 0x4>;
reg-names = "rev";
/* Domains (P, C): per_pwrdm, l4ls_clkdm */
- clocks = <&l4ls_clkctrl AM3_D_CAN0_CLKCTRL 0>;
+ clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN0_CLKCTRL 0>;
clock-names = "fck";
#address-cells = <1>;
#size-cells = <1>;
@@ -1785,7 +1785,7 @@
reg = <0xd0000 0x4>;
reg-names = "rev";
/* Domains (P, C): per_pwrdm, l4ls_clkdm */
- clocks = <&l4ls_clkctrl AM3_D_CAN1_CLKCTRL 0>;
+ clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN1_CLKCTRL 0>;
clock-names = "fck";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
index 5641d162dfdb..28e7513ce617 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
@@ -93,7 +93,7 @@
};
&hdmi {
- hpd-gpios = <&gpio 46 GPIO_ACTIVE_LOW>;
+ hpd-gpios = <&gpio 46 GPIO_ACTIVE_HIGH>;
};
&pwm {
diff --git a/arch/arm/boot/dts/imx28-cfa10036.dts b/arch/arm/boot/dts/imx28-cfa10036.dts
index d3e3622979c5..de48b5808ef6 100644
--- a/arch/arm/boot/dts/imx28-cfa10036.dts
+++ b/arch/arm/boot/dts/imx28-cfa10036.dts
@@ -11,6 +11,7 @@
/dts-v1/;
#include "imx28.dtsi"
+#include <dt-bindings/gpio/gpio.h>
/ {
model = "Crystalfontz CFA-10036 Board";
@@ -96,7 +97,7 @@
pinctrl-names = "default";
pinctrl-0 = <&ssd1306_cfa10036>;
reg = <0x3c>;
- reset-gpios = <&gpio2 7 0>;
+ reset-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>;
solomon,height = <32>;
solomon,width = <128>;
solomon,page-offset = <0>;
diff --git a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
index b715ab0fa1ff..e8d800fec637 100644
--- a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
+++ b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
@@ -114,9 +114,9 @@
reg = <2>;
};
- switch@0 {
+ switch@10 {
compatible = "qca,qca8334";
- reg = <0>;
+ reg = <10>;
switch_ports: ports {
#address-cells = <1>;
@@ -125,7 +125,7 @@
ethphy0: port@0 {
reg = <0>;
label = "cpu";
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
ethernet = <&fec>;
fixed-link {
diff --git a/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi b/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi
index 1d1b4bd0670f..a4217f564a53 100644
--- a/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi
@@ -264,7 +264,7 @@
pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
vmcc-supply = <&reg_sd3_vmmc>;
cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
- bus-witdh = <4>;
+ bus-width = <4>;
no-1-8-v;
status = "okay";
};
@@ -275,7 +275,7 @@
pinctrl-1 = <&pinctrl_usdhc4_100mhz>;
pinctrl-2 = <&pinctrl_usdhc4_200mhz>;
vmcc-supply = <&reg_sd4_vmmc>;
- bus-witdh = <8>;
+ bus-width = <8>;
no-1-8-v;
non-removable;
status = "okay";
diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
index 433bf09a1954..027df06c5dc7 100644
--- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
@@ -91,6 +91,7 @@
pinctrl-0 = <&pinctrl_enet>;
phy-handle = <&ethphy>;
phy-mode = "rgmii";
+ phy-reset-duration = <10>; /* in msecs */
phy-reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>;
phy-supply = <&vdd_eth_io_reg>;
status = "disabled";
diff --git a/arch/arm/boot/dts/imx6ull-pinfunc-snvs.h b/arch/arm/boot/dts/imx6ull-pinfunc-snvs.h
index f6fb6783c193..54cfe72295aa 100644
--- a/arch/arm/boot/dts/imx6ull-pinfunc-snvs.h
+++ b/arch/arm/boot/dts/imx6ull-pinfunc-snvs.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2016 Freescale Semiconductor, Inc.
* Copyright (C) 2017 NXP
diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi
index 6b298e388f4b..6eb98e7c568d 100644
--- a/arch/arm/boot/dts/imx7d.dtsi
+++ b/arch/arm/boot/dts/imx7d.dtsi
@@ -96,6 +96,14 @@
};
};
+&aips2 {
+ pcie_phy: pcie-phy@306d0000 {
+ compatible = "fsl,imx7d-pcie-phy";
+ reg = <0x306d0000 0x10000>;
+ status = "disabled";
+ };
+};
+
&aips3 {
usbotg2: usb@30b20000 {
compatible = "fsl,imx7d-usb", "fsl,imx27-usb";
@@ -173,6 +181,7 @@
<&src IMX7_RESET_PCIE_CTRL_APPS_EN>,
<&src IMX7_RESET_PCIE_CTRL_APPS_TURNOFF>;
reset-names = "pciephy", "apps", "turnoff";
+ fsl,imx7d-pcie-phy = <&pcie_phy>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/rk3288-tinker.dtsi b/arch/arm/boot/dts/rk3288-tinker.dtsi
index aa107ee41b8b..ef653c3209bc 100644
--- a/arch/arm/boot/dts/rk3288-tinker.dtsi
+++ b/arch/arm/boot/dts/rk3288-tinker.dtsi
@@ -254,6 +254,7 @@
};
vccio_sd: LDO_REG5 {
+ regulator-boot-on;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
regulator-name = "vccio_sd";
@@ -430,7 +431,7 @@
bus-width = <4>;
cap-mmc-highspeed;
cap-sd-highspeed;
- card-detect-delay = <200>;
+ broken-cd;
disable-wp; /* wp not hooked up */
pinctrl-names = "default";
pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>;
diff --git a/arch/arm/boot/dts/rk3288-veyron.dtsi b/arch/arm/boot/dts/rk3288-veyron.dtsi
index 0bc2409f6903..192dbc089ade 100644
--- a/arch/arm/boot/dts/rk3288-veyron.dtsi
+++ b/arch/arm/boot/dts/rk3288-veyron.dtsi
@@ -25,8 +25,6 @@
gpio_keys: gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
pinctrl-names = "default";
pinctrl-0 = <&pwr_key_l>;
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index ca7d52daa8fb..a024d1e7e74c 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -70,7 +70,7 @@
compatible = "arm,cortex-a12";
reg = <0x501>;
resets = <&cru SRST_CORE1>;
- operating-points = <&cpu_opp_table>;
+ operating-points-v2 = <&cpu_opp_table>;
#cooling-cells = <2>; /* min followed by max */
clock-latency = <40000>;
clocks = <&cru ARMCLK>;
@@ -80,7 +80,7 @@
compatible = "arm,cortex-a12";
reg = <0x502>;
resets = <&cru SRST_CORE2>;
- operating-points = <&cpu_opp_table>;
+ operating-points-v2 = <&cpu_opp_table>;
#cooling-cells = <2>; /* min followed by max */
clock-latency = <40000>;
clocks = <&cru ARMCLK>;
@@ -90,7 +90,7 @@
compatible = "arm,cortex-a12";
reg = <0x503>;
resets = <&cru SRST_CORE3>;
- operating-points = <&cpu_opp_table>;
+ operating-points-v2 = <&cpu_opp_table>;
#cooling-cells = <2>; /* min followed by max */
clock-latency = <40000>;
clocks = <&cru ARMCLK>;
@@ -1119,8 +1119,6 @@
clock-names = "ref", "pclk";
power-domains = <&power RK3288_PD_VIO>;
rockchip,grf = <&grf>;
- #address-cells = <1>;
- #size-cells = <0>;
status = "disabled";
ports {
@@ -1282,27 +1280,27 @@
gpu_opp_table: gpu-opp-table {
compatible = "operating-points-v2";
- opp@100000000 {
+ opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
opp-microvolt = <950000>;
};
- opp@200000000 {
+ opp-200000000 {
opp-hz = /bits/ 64 <200000000>;
opp-microvolt = <950000>;
};
- opp@300000000 {
+ opp-300000000 {
opp-hz = /bits/ 64 <300000000>;
opp-microvolt = <1000000>;
};
- opp@400000000 {
+ opp-400000000 {
opp-hz = /bits/ 64 <400000000>;
opp-microvolt = <1100000>;
};
- opp@500000000 {
+ opp-500000000 {
opp-hz = /bits/ 64 <500000000>;
opp-microvolt = <1200000>;
};
- opp@600000000 {
+ opp-600000000 {
opp-hz = /bits/ 64 <600000000>;
opp-microvolt = <1250000>;
};
diff --git a/arch/arm/boot/dts/sama5d2-pinfunc.h b/arch/arm/boot/dts/sama5d2-pinfunc.h
index 1c01a6f843d8..28a2e45752fe 100644
--- a/arch/arm/boot/dts/sama5d2-pinfunc.h
+++ b/arch/arm/boot/dts/sama5d2-pinfunc.h
@@ -518,7 +518,7 @@
#define PIN_PC9__GPIO PINMUX_PIN(PIN_PC9, 0, 0)
#define PIN_PC9__FIQ PINMUX_PIN(PIN_PC9, 1, 3)
#define PIN_PC9__GTSUCOMP PINMUX_PIN(PIN_PC9, 2, 1)
-#define PIN_PC9__ISC_D0 PINMUX_PIN(PIN_PC9, 2, 1)
+#define PIN_PC9__ISC_D0 PINMUX_PIN(PIN_PC9, 3, 1)
#define PIN_PC9__TIOA4 PINMUX_PIN(PIN_PC9, 4, 2)
#define PIN_PC10 74
#define PIN_PC10__GPIO PINMUX_PIN(PIN_PC10, 0, 0)
diff --git a/arch/arm/boot/dts/stm32h743-pinctrl.dtsi b/arch/arm/boot/dts/stm32h743-pinctrl.dtsi
index 24be8e63dec8..980b2769caf9 100644
--- a/arch/arm/boot/dts/stm32h743-pinctrl.dtsi
+++ b/arch/arm/boot/dts/stm32h743-pinctrl.dtsi
@@ -173,6 +173,21 @@
};
};
+ ethernet_rmii: rmii@0 {
+ pins {
+ pinmux = <STM32_PINMUX('G', 11, AF11)>,
+ <STM32_PINMUX('G', 13, AF11)>,
+ <STM32_PINMUX('G', 12, AF11)>,
+ <STM32_PINMUX('C', 4, AF11)>,
+ <STM32_PINMUX('C', 5, AF11)>,
+ <STM32_PINMUX('A', 7, AF11)>,
+ <STM32_PINMUX('C', 1, AF11)>,
+ <STM32_PINMUX('A', 2, AF11)>,
+ <STM32_PINMUX('A', 1, AF11)>;
+ slew-rate = <2>;
+ };
+ };
+
usart1_pins: usart1@0 {
pins1 {
pinmux = <STM32_PINMUX('B', 14, AF4)>; /* USART1_TX */
diff --git a/arch/arm/boot/dts/stm32h743.dtsi b/arch/arm/boot/dts/stm32h743.dtsi
index 299af0723790..5cac79ebebb1 100644
--- a/arch/arm/boot/dts/stm32h743.dtsi
+++ b/arch/arm/boot/dts/stm32h743.dtsi
@@ -513,6 +513,19 @@
status = "disabled";
};
};
+
+ mac: ethernet@40028000 {
+ compatible = "st,stm32-dwmac", "snps,dwmac-4.10a";
+ reg = <0x40028000 0x8000>;
+ reg-names = "stmmaceth";
+ interrupts = <61>;
+ interrupt-names = "macirq";
+ clock-names = "stmmaceth", "mac-clk-tx", "mac-clk-rx";
+ clocks = <&rcc ETH1MAC_CK>, <&rcc ETH1TX_CK>, <&rcc ETH1RX_CK>;
+ st,syscon = <&syscfg 0x4>;
+ snps,pbl = <8>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm/boot/dts/stm32h743i-disco.dts b/arch/arm/boot/dts/stm32h743i-disco.dts
index f8040356fe2d..dd06c8f3d09a 100644
--- a/arch/arm/boot/dts/stm32h743i-disco.dts
+++ b/arch/arm/boot/dts/stm32h743i-disco.dts
@@ -67,6 +67,23 @@
clock-frequency = <25000000>;
};
+&mac {
+ status = "disabled";
+ pinctrl-0 = <&ethernet_rmii>;
+ pinctrl-names = "default";
+ phy-mode = "rmii";
+ phy-handle = <&phy0>;
+
+ mdio0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+ };
+};
+
&usart2 {
pinctrl-0 = <&usart2_pins>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/stm32h743i-eval.dts b/arch/arm/boot/dts/stm32h743i-eval.dts
index ef34fa2f79ea..ebc3f0933f5c 100644
--- a/arch/arm/boot/dts/stm32h743i-eval.dts
+++ b/arch/arm/boot/dts/stm32h743i-eval.dts
@@ -105,6 +105,23 @@
status = "okay";
};
+&mac {
+ status = "disabled";
+ pinctrl-0 = <&ethernet_rmii>;
+ pinctrl-names = "default";
+ phy-mode = "rmii";
+ phy-handle = <&phy0>;
+
+ mdio0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+ };
+};
+
&usart1 {
pinctrl-0 = <&usart1_pins>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
index dcad6d6128cf..8c942e60703e 100644
--- a/arch/arm/boot/dts/tegra20.dtsi
+++ b/arch/arm/boot/dts/tegra20.dtsi
@@ -616,17 +616,14 @@
};
mc: memory-controller@7000f000 {
- compatible = "nvidia,tegra20-mc";
- reg = <0x7000f000 0x024
- 0x7000f03c 0x3c4>;
+ compatible = "nvidia,tegra20-mc-gart";
+ reg = <0x7000f000 0x400 /* controller registers */
+ 0x58000000 0x02000000>; /* GART aperture */
+ clocks = <&tegra_car TEGRA20_CLK_MC>;
+ clock-names = "mc";
interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
#reset-cells = <1>;
- };
-
- iommu@7000f024 {
- compatible = "nvidia,tegra20-gart";
- reg = <0x7000f024 0x00000018 /* controller registers */
- 0x58000000 0x02000000>; /* GART aperture */
+ #iommu-cells = <0>;
};
memory-controller@7000f400 {
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
index ad574d20415c..1b1b82b37ce0 100644
--- a/arch/arm/common/mcpm_entry.c
+++ b/arch/arm/common/mcpm_entry.c
@@ -381,7 +381,7 @@ static int __init nocache_trampoline(unsigned long _arg)
unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
phys_reset_t phys_reset;
- mcpm_set_entry_vector(cpu, cluster, cpu_resume);
+ mcpm_set_entry_vector(cpu, cluster, cpu_resume_no_hyp);
setup_mm_for_reboot();
__mcpm_cpu_going_down(cpu, cluster);
diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig
index 8661dd9b064a..b37f8e675e40 100644
--- a/arch/arm/configs/imx_v4_v5_defconfig
+++ b/arch/arm/configs/imx_v4_v5_defconfig
@@ -170,6 +170,9 @@ CONFIG_IMX_SDMA=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_IIO=y
CONFIG_FSL_MX25_ADC=y
+CONFIG_PWM=y
+CONFIG_PWM_IMX1=y
+CONFIG_PWM_IMX27=y
CONFIG_EXT4_FS=y
# CONFIG_DNOTIFY is not set
CONFIG_VFAT_FS=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index 5586a5074a96..50fb01d70b10 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -398,7 +398,7 @@ CONFIG_MAG3110=y
CONFIG_MPL3115=y
CONFIG_PWM=y
CONFIG_PWM_FSL_FTM=y
-CONFIG_PWM_IMX=y
+CONFIG_PWM_IMX27=y
CONFIG_NVMEM_IMX_OCOTP=y
CONFIG_NVMEM_VF610_OCOTP=y
CONFIG_TEE=y
diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c
index 07e31941dc67..617c2c99ebfb 100644
--- a/arch/arm/crypto/aes-neonbs-glue.c
+++ b/arch/arm/crypto/aes-neonbs-glue.c
@@ -278,6 +278,8 @@ static int __xts_crypt(struct skcipher_request *req,
int err;
err = skcipher_walk_virt(&walk, req, true);
+ if (err)
+ return err;
crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv);
diff --git a/arch/arm/crypto/chacha-neon-glue.c b/arch/arm/crypto/chacha-neon-glue.c
index 9d6fda81986d..48a89537b828 100644
--- a/arch/arm/crypto/chacha-neon-glue.c
+++ b/arch/arm/crypto/chacha-neon-glue.c
@@ -21,6 +21,7 @@
#include <crypto/algapi.h>
#include <crypto/chacha.h>
+#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -93,7 +94,7 @@ static int chacha_neon(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd())
+ if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
return crypto_chacha_crypt(req);
return chacha_neon_stream_xor(req, ctx, req->iv);
@@ -107,7 +108,7 @@ static int xchacha_neon(struct skcipher_request *req)
u32 state[16];
u8 real_iv[16];
- if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd())
+ if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
return crypto_xchacha_crypt(req);
crypto_chacha_init(state, ctx, req->iv);
diff --git a/arch/arm/crypto/crc32-ce-glue.c b/arch/arm/crypto/crc32-ce-glue.c
index cd9e93b46c2d..e712c2a7d387 100644
--- a/arch/arm/crypto/crc32-ce-glue.c
+++ b/arch/arm/crypto/crc32-ce-glue.c
@@ -16,6 +16,7 @@
#include <linux/string.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <asm/hwcap.h>
#include <asm/neon.h>
@@ -113,7 +114,7 @@ static int crc32_pmull_update(struct shash_desc *desc, const u8 *data,
u32 *crc = shash_desc_ctx(desc);
unsigned int l;
- if (may_use_simd()) {
+ if (crypto_simd_usable()) {
if ((u32)data % SCALE_F) {
l = min_t(u32, length, SCALE_F - ((u32)data % SCALE_F));
@@ -147,7 +148,7 @@ static int crc32c_pmull_update(struct shash_desc *desc, const u8 *data,
u32 *crc = shash_desc_ctx(desc);
unsigned int l;
- if (may_use_simd()) {
+ if (crypto_simd_usable()) {
if ((u32)data % SCALE_F) {
l = min_t(u32, length, SCALE_F - ((u32)data % SCALE_F));
diff --git a/arch/arm/crypto/crct10dif-ce-glue.c b/arch/arm/crypto/crct10dif-ce-glue.c
index 3d6b800b8396..3b24f2872592 100644
--- a/arch/arm/crypto/crct10dif-ce-glue.c
+++ b/arch/arm/crypto/crct10dif-ce-glue.c
@@ -15,6 +15,7 @@
#include <linux/string.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <asm/neon.h>
#include <asm/simd.h>
@@ -36,7 +37,7 @@ static int crct10dif_update(struct shash_desc *desc, const u8 *data,
{
u16 *crc = shash_desc_ctx(desc);
- if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) {
+ if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
kernel_neon_begin();
*crc = crc_t10dif_pmull(*crc, data, length);
kernel_neon_end();
diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c
index b7d30b6cf49c..39d1ccec1aab 100644
--- a/arch/arm/crypto/ghash-ce-glue.c
+++ b/arch/arm/crypto/ghash-ce-glue.c
@@ -14,6 +14,7 @@
#include <asm/unaligned.h>
#include <crypto/cryptd.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/gf128mul.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>
@@ -185,7 +186,6 @@ static int ghash_async_init(struct ahash_request *req)
struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
desc->tfm = child;
- desc->flags = req->base.flags;
return crypto_shash_init(desc);
}
@@ -196,7 +196,7 @@ static int ghash_async_update(struct ahash_request *req)
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
- if (!may_use_simd() ||
+ if (!crypto_simd_usable() ||
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
memcpy(cryptd_req, req, sizeof(*req));
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
@@ -214,7 +214,7 @@ static int ghash_async_final(struct ahash_request *req)
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
- if (!may_use_simd() ||
+ if (!crypto_simd_usable() ||
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
memcpy(cryptd_req, req, sizeof(*req));
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
@@ -232,7 +232,7 @@ static int ghash_async_digest(struct ahash_request *req)
struct ahash_request *cryptd_req = ahash_request_ctx(req);
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
- if (!may_use_simd() ||
+ if (!crypto_simd_usable() ||
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
memcpy(cryptd_req, req, sizeof(*req));
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
@@ -242,7 +242,6 @@ static int ghash_async_digest(struct ahash_request *req)
struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
desc->tfm = child;
- desc->flags = req->base.flags;
return shash_ahash_digest(req, desc);
}
}
@@ -255,7 +254,6 @@ static int ghash_async_import(struct ahash_request *req, const void *in)
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
desc->tfm = cryptd_ahash_child(ctx->cryptd_tfm);
- desc->flags = req->base.flags;
return crypto_shash_import(desc, in);
}
diff --git a/arch/arm/crypto/nhpoly1305-neon-glue.c b/arch/arm/crypto/nhpoly1305-neon-glue.c
index 49aae87cb2bc..ae5aefc44a4d 100644
--- a/arch/arm/crypto/nhpoly1305-neon-glue.c
+++ b/arch/arm/crypto/nhpoly1305-neon-glue.c
@@ -9,6 +9,7 @@
#include <asm/neon.h>
#include <asm/simd.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/nhpoly1305.h>
#include <linux/module.h>
@@ -25,7 +26,7 @@ static void _nh_neon(const u32 *key, const u8 *message, size_t message_len,
static int nhpoly1305_neon_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
- if (srclen < 64 || !may_use_simd())
+ if (srclen < 64 || !crypto_simd_usable())
return crypto_nhpoly1305_update(desc, src, srclen);
do {
diff --git a/arch/arm/crypto/sha1-ce-glue.c b/arch/arm/crypto/sha1-ce-glue.c
index b732522e20f8..4c6c6900853c 100644
--- a/arch/arm/crypto/sha1-ce-glue.c
+++ b/arch/arm/crypto/sha1-ce-glue.c
@@ -9,6 +9,7 @@
*/
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/sha.h>
#include <crypto/sha1_base.h>
#include <linux/cpufeature.h>
@@ -33,7 +34,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
{
struct sha1_state *sctx = shash_desc_ctx(desc);
- if (!may_use_simd() ||
+ if (!crypto_simd_usable() ||
(sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
return sha1_update_arm(desc, data, len);
@@ -47,7 +48,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return sha1_finup_arm(desc, data, len, out);
kernel_neon_begin();
diff --git a/arch/arm/crypto/sha1_neon_glue.c b/arch/arm/crypto/sha1_neon_glue.c
index d15e0ea2c95e..d6c95c213d42 100644
--- a/arch/arm/crypto/sha1_neon_glue.c
+++ b/arch/arm/crypto/sha1_neon_glue.c
@@ -19,6 +19,7 @@
*/
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
@@ -39,7 +40,7 @@ static int sha1_neon_update(struct shash_desc *desc, const u8 *data,
{
struct sha1_state *sctx = shash_desc_ctx(desc);
- if (!may_use_simd() ||
+ if (!crypto_simd_usable() ||
(sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
return sha1_update_arm(desc, data, len);
@@ -54,7 +55,7 @@ static int sha1_neon_update(struct shash_desc *desc, const u8 *data,
static int sha1_neon_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return sha1_finup_arm(desc, data, len, out);
kernel_neon_begin();
diff --git a/arch/arm/crypto/sha2-ce-glue.c b/arch/arm/crypto/sha2-ce-glue.c
index 1211a5c129fc..a47a9d4b663e 100644
--- a/arch/arm/crypto/sha2-ce-glue.c
+++ b/arch/arm/crypto/sha2-ce-glue.c
@@ -9,6 +9,7 @@
*/
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/sha.h>
#include <crypto/sha256_base.h>
#include <linux/cpufeature.h>
@@ -34,7 +35,7 @@ static int sha2_ce_update(struct shash_desc *desc, const u8 *data,
{
struct sha256_state *sctx = shash_desc_ctx(desc);
- if (!may_use_simd() ||
+ if (!crypto_simd_usable() ||
(sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
return crypto_sha256_arm_update(desc, data, len);
@@ -49,7 +50,7 @@ static int sha2_ce_update(struct shash_desc *desc, const u8 *data,
static int sha2_ce_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return crypto_sha256_arm_finup(desc, data, len, out);
kernel_neon_begin();
diff --git a/arch/arm/crypto/sha256_neon_glue.c b/arch/arm/crypto/sha256_neon_glue.c
index 1d82c6cd31a4..f3f6b1624fc3 100644
--- a/arch/arm/crypto/sha256_neon_glue.c
+++ b/arch/arm/crypto/sha256_neon_glue.c
@@ -15,6 +15,7 @@
*/
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <linux/string.h>
@@ -34,7 +35,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
{
struct sha256_state *sctx = shash_desc_ctx(desc);
- if (!may_use_simd() ||
+ if (!crypto_simd_usable() ||
(sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
return crypto_sha256_arm_update(desc, data, len);
@@ -49,7 +50,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
static int sha256_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return crypto_sha256_arm_finup(desc, data, len, out);
kernel_neon_begin();
diff --git a/arch/arm/crypto/sha512-neon-glue.c b/arch/arm/crypto/sha512-neon-glue.c
index 8a5642b41fd6..d33ab59c26c0 100644
--- a/arch/arm/crypto/sha512-neon-glue.c
+++ b/arch/arm/crypto/sha512-neon-glue.c
@@ -9,6 +9,7 @@
*/
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/sha.h>
#include <crypto/sha512_base.h>
#include <linux/crypto.h>
@@ -30,7 +31,7 @@ static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
{
struct sha512_state *sctx = shash_desc_ctx(desc);
- if (!may_use_simd() ||
+ if (!crypto_simd_usable() ||
(sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE)
return sha512_arm_update(desc, data, len);
@@ -45,7 +46,7 @@ static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
static int sha512_neon_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return sha512_arm_finup(desc, data, len, out);
kernel_neon_begin();
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 1d66db9c9db5..41deac2451af 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -9,16 +9,15 @@ generic-y += kdebug.h
generic-y += local.h
generic-y += local64.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += msi.h
generic-y += parport.h
generic-y += preempt.h
-generic-y += rwsem.h
generic-y += seccomp.h
generic-y += segment.h
generic-y += serial.h
generic-y += simd.h
generic-y += sizes.h
-generic-y += timex.h
generic-y += trace_clock.h
generated-y += mach-types.h
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
index 0bd530702118..d15b8c99f1b3 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -34,6 +34,7 @@
#define ICC_SRE __ACCESS_CP15(c12, 0, c12, 5)
#define ICC_IGRPEN1 __ACCESS_CP15(c12, 0, c12, 7)
#define ICC_BPR1 __ACCESS_CP15(c12, 0, c12, 3)
+#define ICC_RPR __ACCESS_CP15(c12, 0, c11, 3)
#define __ICC_AP0Rx(x) __ACCESS_CP15(c12, 0, c8, 4 | x)
#define ICC_AP0R0 __ICC_AP0Rx(0)
@@ -54,7 +55,7 @@
#define ICH_VTR __ACCESS_CP15(c12, 4, c11, 1)
#define ICH_MISR __ACCESS_CP15(c12, 4, c11, 2)
#define ICH_EISR __ACCESS_CP15(c12, 4, c11, 3)
-#define ICH_ELSR __ACCESS_CP15(c12, 4, c11, 5)
+#define ICH_ELRSR __ACCESS_CP15(c12, 4, c11, 5)
#define ICH_VMCR __ACCESS_CP15(c12, 4, c11, 7)
#define __LR0(x) __ACCESS_CP15(c12, 4, c12, x)
@@ -151,7 +152,7 @@ CPUIF_MAP(ICH_HCR, ICH_HCR_EL2)
CPUIF_MAP(ICH_VTR, ICH_VTR_EL2)
CPUIF_MAP(ICH_MISR, ICH_MISR_EL2)
CPUIF_MAP(ICH_EISR, ICH_EISR_EL2)
-CPUIF_MAP(ICH_ELSR, ICH_ELSR_EL2)
+CPUIF_MAP(ICH_ELRSR, ICH_ELRSR_EL2)
CPUIF_MAP(ICH_VMCR, ICH_VMCR_EL2)
CPUIF_MAP(ICH_AP0R3, ICH_AP0R3_EL2)
CPUIF_MAP(ICH_AP0R2, ICH_AP0R2_EL2)
@@ -245,6 +246,21 @@ static inline void gic_write_bpr1(u32 val)
write_sysreg(val, ICC_BPR1);
}
+static inline u32 gic_read_pmr(void)
+{
+ return read_sysreg(ICC_PMR);
+}
+
+static inline void gic_write_pmr(u32 val)
+{
+ write_sysreg(val, ICC_PMR);
+}
+
+static inline u32 gic_read_rpr(void)
+{
+ return read_sysreg(ICC_RPR);
+}
+
/*
* Even in 32bit systems that use LPAE, there is no guarantee that the I/O
* interface provides true 64bit atomic accesses, so using strd/ldrd doesn't
@@ -347,5 +363,22 @@ static inline void gits_write_vpendbaser(u64 val, void * __iomem addr)
#define gits_read_vpendbaser(c) __gic_readq_nonatomic(c)
+static inline bool gic_prio_masking_enabled(void)
+{
+ return false;
+}
+
+static inline void gic_pmr_mask_irqs(void)
+{
+ /* Should not get called. */
+ WARN_ON_ONCE(true);
+}
+
+static inline void gic_arch_enable_irqs(void)
+{
+ /* Should not get called. */
+ WARN_ON_ONCE(true);
+}
+
#endif /* !__ASSEMBLY__ */
#endif /* !__ASM_ARCH_GICV3_H */
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
index 0a8d7bba2cb0..4b66ecd6be99 100644
--- a/arch/arm/include/asm/arch_timer.h
+++ b/arch/arm/include/asm/arch_timer.h
@@ -11,6 +11,10 @@
#include <clocksource/arm_arch_timer.h>
#ifdef CONFIG_ARM_ARCH_TIMER
+/* 32bit ARM doesn't know anything about timer errata... */
+#define has_erratum_handler(h) (false)
+#define erratum_handler(h) (arch_timer_##h)
+
int arch_timer_arch_init(void);
/*
@@ -79,7 +83,7 @@ static inline u32 arch_timer_get_cntfrq(void)
return val;
}
-static inline u64 arch_counter_get_cntpct(void)
+static inline u64 __arch_counter_get_cntpct(void)
{
u64 cval;
@@ -88,7 +92,12 @@ static inline u64 arch_counter_get_cntpct(void)
return cval;
}
-static inline u64 arch_counter_get_cntvct(void)
+static inline u64 __arch_counter_get_cntpct_stable(void)
+{
+ return __arch_counter_get_cntpct();
+}
+
+static inline u64 __arch_counter_get_cntvct(void)
{
u64 cval;
@@ -97,6 +106,11 @@ static inline u64 arch_counter_get_cntvct(void)
return cval;
}
+static inline u64 __arch_counter_get_cntvct_stable(void)
+{
+ return __arch_counter_get_cntvct();
+}
+
static inline u32 arch_timer_get_cntkctl(void)
{
u32 cntkctl;
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 28a48e0d4cca..b59921a560da 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -376,9 +376,9 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
.macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
9999:
.if \inc == 1
- \instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
+ \instr\()b\t\cond\().w \reg, [\ptr, #\off]
.elseif \inc == 4
- \instr\cond\()\t\().w \reg, [\ptr, #\off]
+ \instr\t\cond\().w \reg, [\ptr, #\off]
.else
.error "Unsupported inc macro argument"
.endif
@@ -417,9 +417,9 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
.rept \rept
9999:
.if \inc == 1
- \instr\cond\()b\()\t \reg, [\ptr], #\inc
+ \instr\()b\t\cond \reg, [\ptr], #\inc
.elseif \inc == 4
- \instr\cond\()\t \reg, [\ptr], #\inc
+ \instr\t\cond \reg, [\ptr], #\inc
.else
.error "Unsupported inc macro argument"
.endif
@@ -460,7 +460,7 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
.macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
#ifndef CONFIG_CPU_USE_DOMAINS
adds \tmp, \addr, #\size - 1
- sbcccs \tmp, \tmp, \limit
+ sbcscc \tmp, \tmp, \limit
bcs \bad
#ifdef CONFIG_CPU_SPECTRE
movcs \addr, #0
@@ -474,7 +474,7 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
sub \tmp, \limit, #1
subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
- subhss \tmp, \tmp, \size @ tmp = limit - (addr + size) }
+ subshs \tmp, \tmp, \size @ tmp = limit - (addr + size) }
movlo \addr, #0 @ if (tmp < 0) addr = NULL
csdb
#endif
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 69772e742a0a..83ae97c049d9 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -11,6 +11,8 @@
#define sev() __asm__ __volatile__ ("sev" : : : "memory")
#define wfe() __asm__ __volatile__ ("wfe" : : : "memory")
#define wfi() __asm__ __volatile__ ("wfi" : : : "memory")
+#else
+#define wfe() do { } while (0)
#endif
#if __LINUX_ARM_ARCH__ >= 7
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
index 07e27f212dc7..d2453e2d3f1f 100644
--- a/arch/arm/include/asm/cp15.h
+++ b/arch/arm/include/asm/cp15.h
@@ -68,6 +68,8 @@
#define BPIALL __ACCESS_CP15(c7, 0, c5, 6)
#define ICIALLU __ACCESS_CP15(c7, 0, c5, 0)
+#define CNTVCT __ACCESS_CP15_64(1, c14)
+
extern unsigned long cr_alignment; /* defined in entry-armv.S */
static inline unsigned long get_cr(void)
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 31d3b96f0f4b..03ba90ffc0f8 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -96,15 +96,6 @@ static inline unsigned long dma_max_pfn(struct device *dev)
}
#define dma_max_pfn(dev) dma_max_pfn(dev)
-#define arch_setup_dma_ops arch_setup_dma_ops
-extern void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- const struct iommu_ops *iommu, bool coherent);
-
-#ifdef CONFIG_MMU
-#define arch_teardown_dma_ops arch_teardown_dma_ops
-extern void arch_teardown_dma_ops(struct device *dev);
-#endif
-
/* do not use this function in a driver */
static inline bool is_device_dma_coherent(struct device *dev)
{
diff --git a/arch/arm/include/asm/hardware/entry-macro-iomd.S b/arch/arm/include/asm/hardware/entry-macro-iomd.S
index 8c215acd9b57..f7692731e514 100644
--- a/arch/arm/include/asm/hardware/entry-macro-iomd.S
+++ b/arch/arm/include/asm/hardware/entry-macro-iomd.S
@@ -16,25 +16,25 @@
ldr \tmp, =irq_prio_h
teq \irqstat, #0
#ifdef IOMD_BASE
- ldreqb \irqstat, [\base, #IOMD_DMAREQ] @ get dma
+ ldrbeq \irqstat, [\base, #IOMD_DMAREQ] @ get dma
addeq \tmp, \tmp, #256 @ irq_prio_h table size
teqeq \irqstat, #0
bne 2406f
#endif
- ldreqb \irqstat, [\base, #IOMD_IRQREQA] @ get low priority
+ ldrbeq \irqstat, [\base, #IOMD_IRQREQA] @ get low priority
addeq \tmp, \tmp, #256 @ irq_prio_d table size
teqeq \irqstat, #0
#ifdef IOMD_IRQREQC
- ldreqb \irqstat, [\base, #IOMD_IRQREQC]
+ ldrbeq \irqstat, [\base, #IOMD_IRQREQC]
addeq \tmp, \tmp, #256 @ irq_prio_l table size
teqeq \irqstat, #0
#endif
#ifdef IOMD_IRQREQD
- ldreqb \irqstat, [\base, #IOMD_IRQREQD]
+ ldrbeq \irqstat, [\base, #IOMD_IRQREQD]
addeq \tmp, \tmp, #256 @ irq_prio_lc table size
teqeq \irqstat, #0
#endif
-2406: ldrneb \irqnr, [\tmp, \irqstat] @ get IRQ number
+2406: ldrbne \irqnr, [\tmp, \irqstat] @ get IRQ number
.endm
/*
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 6b51826ab3d1..7e22c81398c4 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -281,8 +281,6 @@ extern void _memcpy_fromio(void *, const volatile void __iomem *, size_t);
extern void _memcpy_toio(volatile void __iomem *, const void *, size_t);
extern void _memset_io(volatile void __iomem *, int, size_t);
-#define mmiowb()
-
/*
* Memory access primitives
* ------------------------
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index 77121b713bef..8927cae7c966 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -265,6 +265,14 @@ static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
}
}
+static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
+{
+ if (kvm_vcpu_trap_is_iabt(vcpu))
+ return false;
+
+ return kvm_vcpu_dabt_iswrite(vcpu);
+}
+
static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 50e89869178a..770d73257ad9 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -26,6 +26,7 @@
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
#include <asm/fpstate.h>
+#include <asm/smp_plat.h>
#include <kvm/arm_arch_timer.h>
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
@@ -57,10 +58,13 @@ int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
-struct kvm_arch {
- /* VTTBR value associated with below pgd and vmid */
- u64 vttbr;
+struct kvm_vmid {
+ /* The VMID generation used for the virt. memory system */
+ u64 vmid_gen;
+ u32 vmid;
+};
+struct kvm_arch {
/* The last vcpu id that ran on each physical CPU */
int __percpu *last_vcpu_ran;
@@ -70,11 +74,11 @@ struct kvm_arch {
*/
/* The VMID generation used for the virt. memory system */
- u64 vmid_gen;
- u32 vmid;
+ struct kvm_vmid vmid;
/* Stage-2 page table */
pgd_t *pgd;
+ phys_addr_t pgd_phys;
/* Interrupt controller */
struct vgic_dist vgic;
@@ -148,6 +152,13 @@ struct kvm_cpu_context {
typedef struct kvm_cpu_context kvm_cpu_context_t;
+static inline void kvm_init_host_cpu_context(kvm_cpu_context_t *cpu_ctxt,
+ int cpu)
+{
+ /* The host's MPIDR is immutable, so let's set it up at boot time */
+ cpu_ctxt->cp15[c0_MPIDR] = cpu_logical_map(cpu);
+}
+
struct vcpu_reset_state {
unsigned long pc;
unsigned long r0;
@@ -224,7 +235,35 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
-unsigned long kvm_call_hyp(void *hypfn, ...);
+
+unsigned long __kvm_call_hyp(void *hypfn, ...);
+
+/*
+ * The has_vhe() part doesn't get emitted, but is used for type-checking.
+ */
+#define kvm_call_hyp(f, ...) \
+ do { \
+ if (has_vhe()) { \
+ f(__VA_ARGS__); \
+ } else { \
+ __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \
+ } \
+ } while(0)
+
+#define kvm_call_hyp_ret(f, ...) \
+ ({ \
+ typeof(f(__VA_ARGS__)) ret; \
+ \
+ if (has_vhe()) { \
+ ret = f(__VA_ARGS__); \
+ } else { \
+ ret = __kvm_call_hyp(kvm_ksym_ref(f), \
+ ##__VA_ARGS__); \
+ } \
+ \
+ ret; \
+ })
+
void force_vm_exit(const cpumask_t *mask);
int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events);
@@ -275,7 +314,7 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
* compliant with the PCS!).
*/
- kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
+ __kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
}
static inline void __cpu_init_stage2(void)
diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h
index e93a0cac9add..87bcd18df8d5 100644
--- a/arch/arm/include/asm/kvm_hyp.h
+++ b/arch/arm/include/asm/kvm_hyp.h
@@ -40,6 +40,7 @@
#define TTBR1 __ACCESS_CP15_64(1, c2)
#define VTTBR __ACCESS_CP15_64(6, c2)
#define PAR __ACCESS_CP15_64(0, c7)
+#define CNTP_CVAL __ACCESS_CP15_64(2, c14)
#define CNTV_CVAL __ACCESS_CP15_64(3, c14)
#define CNTVOFF __ACCESS_CP15_64(4, c14)
@@ -85,6 +86,7 @@
#define TID_PRIV __ACCESS_CP15(c13, 0, c0, 4)
#define HTPIDR __ACCESS_CP15(c13, 4, c0, 2)
#define CNTKCTL __ACCESS_CP15(c14, 0, c1, 0)
+#define CNTP_CTL __ACCESS_CP15(c14, 0, c2, 1)
#define CNTV_CTL __ACCESS_CP15(c14, 0, c3, 1)
#define CNTHCTL __ACCESS_CP15(c14, 4, c1, 0)
@@ -94,6 +96,8 @@
#define read_sysreg_el0(r) read_sysreg(r##_el0)
#define write_sysreg_el0(v, r) write_sysreg(v, r##_el0)
+#define cntp_ctl_el0 CNTP_CTL
+#define cntp_cval_el0 CNTP_CVAL
#define cntv_ctl_el0 CNTV_CTL
#define cntv_cval_el0 CNTV_CVAL
#define cntvoff_el2 CNTVOFF
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 3a875fc1b63c..31de4ab93005 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -381,6 +381,17 @@ static inline int kvm_read_guest_lock(struct kvm *kvm,
return ret;
}
+static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
+ const void *data, unsigned long len)
+{
+ int srcu_idx = srcu_read_lock(&kvm->srcu);
+ int ret = kvm_write_guest(kvm, gpa, data, len);
+
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+
+ return ret;
+}
+
static inline void *kvm_get_hyp_vector(void)
{
switch(read_cpuid_part()) {
@@ -421,9 +432,14 @@ static inline int hyp_map_aux_data(void)
static inline void kvm_set_ipa_limit(void) {}
-static inline bool kvm_cpu_has_cnp(void)
+static __always_inline u64 kvm_get_vttbr(struct kvm *kvm)
{
- return false;
+ struct kvm_vmid *vmid = &kvm->arch.vmid;
+ u64 vmid_field, baddr;
+
+ baddr = kvm->arch.pgd_phys;
+ vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT;
+ return kvm_phys_to_vttbr(baddr) | vmid_field;
}
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index a757401129f9..48ce1b19069b 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -125,6 +125,9 @@ extern pgprot_t pgprot_s2_device;
#define pgprot_stronglyordered(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
+#define pgprot_device(prot) \
+ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_SHARED | L_PTE_SHARED | L_PTE_DIRTY | L_PTE_XN)
+
#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 120f4c9bbfde..57fe73ea0f72 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -89,7 +89,11 @@ extern void release_thread(struct task_struct *);
unsigned long get_wchan(struct task_struct *p);
#if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
-#define cpu_relax() smp_mb()
+#define cpu_relax() \
+ do { \
+ smp_mb(); \
+ __asm__ __volatile__("nop; nop; nop; nop; nop; nop; nop; nop; nop; nop;"); \
+ } while (0)
#else
#define cpu_relax() barrier()
#endif
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index 709a55989cb0..451ae684aaf4 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -67,7 +67,6 @@ struct secondary_data {
void *stack;
};
extern struct secondary_data secondary_data;
-extern volatile int pen_release;
extern void secondary_startup(void);
extern void secondary_startup_arm(void);
diff --git a/arch/arm/include/asm/smp_twd.h b/arch/arm/include/asm/smp_twd.h
index 312784ee9936..c729d2113a24 100644
--- a/arch/arm/include/asm/smp_twd.h
+++ b/arch/arm/include/asm/smp_twd.h
@@ -19,20 +19,4 @@
#define TWD_TIMER_CONTROL_PERIODIC (1 << 1)
#define TWD_TIMER_CONTROL_IT_ENABLE (1 << 2)
-#include <linux/ioport.h>
-
-struct twd_local_timer {
- struct resource res[2];
-};
-
-#define DEFINE_TWD_LOCAL_TIMER(name,base,irq) \
-struct twd_local_timer name __initdata = { \
- .res = { \
- DEFINE_RES_MEM(base, 0x10), \
- DEFINE_RES_IRQ(irq), \
- }, \
-};
-
-int twd_local_timer_register(struct twd_local_timer *);
-
#endif
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 099c78fcf62d..8f009e788ad4 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -210,11 +210,12 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
prefetchw(&rw->lock);
__asm__ __volatile__(
+" .syntax unified\n"
"1: ldrex %0, [%2]\n"
" adds %0, %0, #1\n"
" strexpl %1, %0, [%2]\n"
WFE("mi")
-" rsbpls %0, %1, #0\n"
+" rsbspl %0, %1, #0\n"
" bmi 1b"
: "=&r" (tmp), "=&r" (tmp2)
: "r" (&rw->lock)
diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h
index de2089501b8b..9587517649bd 100644
--- a/arch/arm/include/asm/stage2_pgtable.h
+++ b/arch/arm/include/asm/stage2_pgtable.h
@@ -32,14 +32,14 @@
#define stage2_pgd_present(kvm, pgd) pgd_present(pgd)
#define stage2_pgd_populate(kvm, pgd, pud) pgd_populate(NULL, pgd, pud)
#define stage2_pud_offset(kvm, pgd, address) pud_offset(pgd, address)
-#define stage2_pud_free(kvm, pud) pud_free(NULL, pud)
+#define stage2_pud_free(kvm, pud) do { } while (0)
#define stage2_pud_none(kvm, pud) pud_none(pud)
#define stage2_pud_clear(kvm, pud) pud_clear(pud)
#define stage2_pud_present(kvm, pud) pud_present(pud)
#define stage2_pud_populate(kvm, pud, pmd) pud_populate(NULL, pud, pmd)
#define stage2_pmd_offset(kvm, pud, address) pmd_offset(pud, address)
-#define stage2_pmd_free(kvm, pmd) pmd_free(NULL, pmd)
+#define stage2_pmd_free(kvm, pmd) free_page((unsigned long)pmd)
#define stage2_pud_huge(kvm, pud) pud_huge(pud)
@@ -75,6 +75,8 @@ static inline bool kvm_stage2_has_pud(struct kvm *kvm)
#define S2_PMD_MASK PMD_MASK
#define S2_PMD_SIZE PMD_SIZE
+#define S2_PUD_MASK PUD_MASK
+#define S2_PUD_SIZE PUD_SIZE
static inline bool kvm_stage2_has_pmd(struct kvm *kvm)
{
diff --git a/arch/arm/include/asm/suspend.h b/arch/arm/include/asm/suspend.h
index 452bbdcbcc83..506314265c6f 100644
--- a/arch/arm/include/asm/suspend.h
+++ b/arch/arm/include/asm/suspend.h
@@ -10,6 +10,7 @@ struct sleep_save_sp {
};
extern void cpu_resume(void);
+extern void cpu_resume_no_hyp(void);
extern void cpu_resume_arm(void);
extern int cpu_suspend(unsigned long, int (*)(unsigned long));
diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h
index 06dea6bce293..080ce70cab12 100644
--- a/arch/arm/include/asm/syscall.h
+++ b/arch/arm/include/asm/syscall.h
@@ -55,53 +55,22 @@ static inline void syscall_set_return_value(struct task_struct *task,
static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
unsigned long *args)
{
- if (n == 0)
- return;
-
- if (i + n > SYSCALL_MAX_ARGS) {
- unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
- unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
- pr_warn("%s called with max args %d, handling only %d\n",
- __func__, i + n, SYSCALL_MAX_ARGS);
- memset(args_bad, 0, n_bad * sizeof(args[0]));
- n = SYSCALL_MAX_ARGS - i;
- }
-
- if (i == 0) {
- args[0] = regs->ARM_ORIG_r0;
- args++;
- i++;
- n--;
- }
-
- memcpy(args, &regs->ARM_r0 + i, n * sizeof(args[0]));
+ args[0] = regs->ARM_ORIG_r0;
+ args++;
+
+ memcpy(args, &regs->ARM_r0 + 1, 5 * sizeof(args[0]));
}
static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
const unsigned long *args)
{
- if (n == 0)
- return;
-
- if (i + n > SYSCALL_MAX_ARGS) {
- pr_warn("%s called with max args %d, handling only %d\n",
- __func__, i + n, SYSCALL_MAX_ARGS);
- n = SYSCALL_MAX_ARGS - i;
- }
-
- if (i == 0) {
- regs->ARM_ORIG_r0 = args[0];
- args++;
- i++;
- n--;
- }
-
- memcpy(&regs->ARM_r0 + i, args, n * sizeof(args[0]));
+ regs->ARM_ORIG_r0 = args[0];
+ args++;
+
+ memcpy(&regs->ARM_r0 + 1, args, 5 * sizeof(args[0]));
}
static inline int syscall_get_arch(void)
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index f854148c8d7c..bc6d04a09899 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -33,271 +33,42 @@
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
-#define MMU_GATHER_BUNDLE 8
-
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
static inline void __tlb_remove_table(void *_table)
{
free_page_and_swap_cache((struct page *)_table);
}
-struct mmu_table_batch {
- struct rcu_head rcu;
- unsigned int nr;
- void *tables[0];
-};
-
-#define MAX_TABLE_BATCH \
- ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
-
-extern void tlb_table_flush(struct mmu_gather *tlb);
-extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
-
-#define tlb_remove_entry(tlb, entry) tlb_remove_table(tlb, entry)
-#else
-#define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry)
-#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
-
-/*
- * TLB handling. This allows us to remove pages from the page
- * tables, and efficiently handle the TLB issues.
- */
-struct mmu_gather {
- struct mm_struct *mm;
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
- struct mmu_table_batch *batch;
- unsigned int need_flush;
-#endif
- unsigned int fullmm;
- struct vm_area_struct *vma;
- unsigned long start, end;
- unsigned long range_start;
- unsigned long range_end;
- unsigned int nr;
- unsigned int max;
- struct page **pages;
- struct page *local[MMU_GATHER_BUNDLE];
-};
-
-DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
-
-/*
- * This is unnecessarily complex. There's three ways the TLB shootdown
- * code is used:
- * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
- * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
- * tlb->vma will be non-NULL.
- * 2. Unmapping all vmas. See exit_mmap().
- * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
- * tlb->vma will be non-NULL. Additionally, page tables will be freed.
- * 3. Unmapping argument pages. See shift_arg_pages().
- * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
- * tlb->vma will be NULL.
- */
-static inline void tlb_flush(struct mmu_gather *tlb)
-{
- if (tlb->fullmm || !tlb->vma)
- flush_tlb_mm(tlb->mm);
- else if (tlb->range_end > 0) {
- flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
- tlb->range_start = TASK_SIZE;
- tlb->range_end = 0;
- }
-}
-
-static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
-{
- if (!tlb->fullmm) {
- if (addr < tlb->range_start)
- tlb->range_start = addr;
- if (addr + PAGE_SIZE > tlb->range_end)
- tlb->range_end = addr + PAGE_SIZE;
- }
-}
-
-static inline void __tlb_alloc_page(struct mmu_gather *tlb)
-{
- unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
-
- if (addr) {
- tlb->pages = (void *)addr;
- tlb->max = PAGE_SIZE / sizeof(struct page *);
- }
-}
-
-static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
-{
- tlb_flush(tlb);
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
- tlb_table_flush(tlb);
-#endif
-}
-
-static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
-{
- free_pages_and_swap_cache(tlb->pages, tlb->nr);
- tlb->nr = 0;
- if (tlb->pages == tlb->local)
- __tlb_alloc_page(tlb);
-}
-
-static inline void tlb_flush_mmu(struct mmu_gather *tlb)
-{
- tlb_flush_mmu_tlbonly(tlb);
- tlb_flush_mmu_free(tlb);
-}
-
-static inline void
-arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- tlb->mm = mm;
- tlb->fullmm = !(start | (end+1));
- tlb->start = start;
- tlb->end = end;
- tlb->vma = NULL;
- tlb->max = ARRAY_SIZE(tlb->local);
- tlb->pages = tlb->local;
- tlb->nr = 0;
- __tlb_alloc_page(tlb);
+#include <asm-generic/tlb.h>
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
- tlb->batch = NULL;
+#ifndef CONFIG_HAVE_RCU_TABLE_FREE
+#define tlb_remove_table(tlb, entry) tlb_remove_page(tlb, entry)
#endif
-}
-
-static inline void
-arch_tlb_finish_mmu(struct mmu_gather *tlb,
- unsigned long start, unsigned long end, bool force)
-{
- if (force) {
- tlb->range_start = start;
- tlb->range_end = end;
- }
-
- tlb_flush_mmu(tlb);
-
- /* keep the page table cache within bounds */
- check_pgt_cache();
-
- if (tlb->pages != tlb->local)
- free_pages((unsigned long)tlb->pages, 0);
-}
-
-/*
- * Memorize the range for the TLB flush.
- */
-static inline void
-tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
-{
- tlb_add_flush(tlb, addr);
-}
-
-#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
- tlb_remove_tlb_entry(tlb, ptep, address)
-/*
- * In the case of tlb vma handling, we can optimise these away in the
- * case where we're doing a full MM flush. When we're doing a munmap,
- * the vmas are adjusted to only cover the region to be torn down.
- */
-static inline void
-tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
-{
- if (!tlb->fullmm) {
- flush_cache_range(vma, vma->vm_start, vma->vm_end);
- tlb->vma = vma;
- tlb->range_start = TASK_SIZE;
- tlb->range_end = 0;
- }
-}
static inline void
-tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
-{
- if (!tlb->fullmm)
- tlb_flush(tlb);
-}
-
-static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
- tlb->pages[tlb->nr++] = page;
- VM_WARN_ON(tlb->nr > tlb->max);
- if (tlb->nr == tlb->max)
- return true;
- return false;
-}
-
-static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
- if (__tlb_remove_page(tlb, page))
- tlb_flush_mmu(tlb);
-}
-
-static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
- struct page *page, int page_size)
-{
- return __tlb_remove_page(tlb, page);
-}
-
-static inline void tlb_remove_page_size(struct mmu_gather *tlb,
- struct page *page, int page_size)
-{
- return tlb_remove_page(tlb, page);
-}
-
-static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
- unsigned long addr)
+__pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr)
{
pgtable_page_dtor(pte);
-#ifdef CONFIG_ARM_LPAE
- tlb_add_flush(tlb, addr);
-#else
+#ifndef CONFIG_ARM_LPAE
/*
* With the classic ARM MMU, a pte page has two corresponding pmd
* entries, each covering 1MB.
*/
- addr &= PMD_MASK;
- tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
- tlb_add_flush(tlb, addr + SZ_1M);
+ addr = (addr & PMD_MASK) + SZ_1M;
+ __tlb_adjust_range(tlb, addr - PAGE_SIZE, 2 * PAGE_SIZE);
#endif
- tlb_remove_entry(tlb, pte);
-}
-
-static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
- unsigned long addr)
-{
-#ifdef CONFIG_ARM_LPAE
- tlb_add_flush(tlb, addr);
- tlb_remove_entry(tlb, virt_to_page(pmdp));
-#endif
+ tlb_remove_table(tlb, pte);
}
static inline void
-tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
-{
- tlb_add_flush(tlb, addr);
-}
-
-#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
-#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
-#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
-
-#define tlb_migrate_finish(mm) do { } while (0)
-
-#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
-static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
- unsigned int page_size)
+__pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
{
-}
-
-static inline void tlb_flush_remove_tables(struct mm_struct *mm)
-{
-}
+#ifdef CONFIG_ARM_LPAE
+ struct page *page = virt_to_page(pmdp);
-static inline void tlb_flush_remove_tables_local(void *arg)
-{
+ tlb_remove_table(tlb, page);
+#endif
}
#endif /* CONFIG_MMU */
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index ae5a0df5316e..dff49845eb87 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -85,7 +85,8 @@ static inline void set_fs(mm_segment_t fs)
#define __range_ok(addr, size) ({ \
unsigned long flag, roksum; \
__chk_user_ptr(addr); \
- __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
+ __asm__(".syntax unified\n" \
+ "adds %1, %2, %3; sbcscc %1, %1, %0; movcc %0, #0" \
: "=&r" (flag), "=&r" (roksum) \
: "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
: "cc"); \
diff --git a/arch/arm/include/asm/v7m.h b/arch/arm/include/asm/v7m.h
index 187ccf6496ad..2cb00d15831b 100644
--- a/arch/arm/include/asm/v7m.h
+++ b/arch/arm/include/asm/v7m.h
@@ -49,7 +49,7 @@
* (0 -> msp; 1 -> psp). Bits [1:0] are fixed to 0b01.
*/
#define EXC_RET_STACK_MASK 0x00000004
-#define EXC_RET_THREADMODE_PROCESSSTACK 0xfffffffd
+#define EXC_RET_THREADMODE_PROCESSSTACK (3 << 2)
/* Cache related definitions */
diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h
index ef5dfedacd8d..628c336e8e3b 100644
--- a/arch/arm/include/asm/vfpmacros.h
+++ b/arch/arm/include/asm/vfpmacros.h
@@ -29,13 +29,13 @@
ldr \tmp, =elf_hwcap @ may not have MVFR regs
ldr \tmp, [\tmp, #0]
tst \tmp, #HWCAP_VFPD32
- ldcnel p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
+ ldclne p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
addeq \base, \base, #32*4 @ step over unused register space
#else
VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
cmp \tmp, #2 @ 32 x 64bit registers?
- ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
+ ldcleq p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
addne \base, \base, #32*4 @ step over unused register space
#endif
#endif
@@ -53,13 +53,13 @@
ldr \tmp, =elf_hwcap @ may not have MVFR regs
ldr \tmp, [\tmp, #0]
tst \tmp, #HWCAP_VFPD32
- stcnel p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
+ stclne p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
addeq \base, \base, #32*4 @ step over unused register space
#else
VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
cmp \tmp, #2 @ 32 x 64bit registers?
- stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
+ stcleq p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
addne \base, \base, #32*4 @ step over unused register space
#endif
#endif
diff --git a/arch/arm/include/debug/tegra.S b/arch/arm/include/debug/tegra.S
index 3bc80599c022..4a5a645c76e2 100644
--- a/arch/arm/include/debug/tegra.S
+++ b/arch/arm/include/debug/tegra.S
@@ -173,7 +173,7 @@
.macro senduart, rd, rx
cmp \rx, #0
- strneb \rd, [\rx, #UART_TX << UART_SHIFT]
+ strbne \rd, [\rx, #UART_TX << UART_SHIFT]
1001:
.endm
diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild
index eee8f7d23899..ce8573157774 100644
--- a/arch/arm/include/uapi/asm/Kbuild
+++ b/arch/arm/include/uapi/asm/Kbuild
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-include include/uapi/asm-generic/Kbuild.asm
generated-y += unistd-common.h
generated-y += unistd-oabi.h
generated-y += unistd-eabi.h
+generic-y += kvm_para.h
diff --git a/arch/arm/include/uapi/asm/kvm_para.h b/arch/arm/include/uapi/asm/kvm_para.h
deleted file mode 100644
index baacc4996d18..000000000000
--- a/arch/arm/include/uapi/asm/kvm_para.h
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#include <asm-generic/kvm_para.h>
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S
index b795dc2408c0..b9f94e03d916 100644
--- a/arch/arm/kernel/debug.S
+++ b/arch/arm/kernel/debug.S
@@ -86,7 +86,7 @@ hexbuf_rel: .long hexbuf_addr - .
ENTRY(printascii)
addruart_current r3, r1, r2
1: teq r0, #0
- ldrneb r1, [r0], #1
+ ldrbne r1, [r0], #1
teqne r1, #0
reteq lr
2: teq r1, #'\n'
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index e85a3af9ddeb..ce4aea57130a 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -636,7 +636,7 @@ call_fpe:
@ Test if we need to give access to iWMMXt coprocessors
ldr r5, [r10, #TI_FLAGS]
rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
- movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
+ movscs r7, r5, lsr #(TIF_USING_IWMMXT + 1)
bcs iwmmxt_task_enable
#endif
ARM( add pc, pc, r8, lsr #6 )
@@ -872,7 +872,7 @@ __kuser_cmpxchg64: @ 0xffff0f60
smp_dmb arm
1: ldrexd r0, r1, [r2] @ load current val
eors r3, r0, r4 @ compare with oldval (1)
- eoreqs r3, r1, r5 @ compare with oldval (2)
+ eorseq r3, r1, r5 @ compare with oldval (2)
strexdeq r3, r6, r7, [r2] @ store newval if eq
teqeq r3, #1 @ success?
beq 1b @ if no then retry
@@ -896,8 +896,8 @@ __kuser_cmpxchg64: @ 0xffff0f60
ldmia r1, {r6, lr} @ load new val
1: ldmia r2, {r0, r1} @ load current val
eors r3, r0, r4 @ compare with oldval (1)
- eoreqs r3, r1, r5 @ compare with oldval (2)
-2: stmeqia r2, {r6, lr} @ store newval if eq
+ eorseq r3, r1, r5 @ compare with oldval (2)
+2: stmiaeq r2, {r6, lr} @ store newval if eq
rsbs r0, r3, #0 @ set return val and C flag
ldmfd sp!, {r4, r5, r6, pc}
@@ -911,7 +911,7 @@ kuser_cmpxchg64_fixup:
mov r7, #0xffff0fff
sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
subs r8, r4, r7
- rsbcss r8, r8, #(2b - 1b)
+ rsbscs r8, r8, #(2b - 1b)
strcs r7, [sp, #S_PC]
#if __LINUX_ARM_ARCH__ < 6
bcc kuser_cmpxchg32_fixup
@@ -969,7 +969,7 @@ kuser_cmpxchg32_fixup:
mov r7, #0xffff0fff
sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
subs r8, r4, r7
- rsbcss r8, r8, #(2b - 1b)
+ rsbscs r8, r8, #(2b - 1b)
strcs r7, [sp, #S_PC]
ret lr
.previous
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 0465d65d23de..f7649adef505 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -373,7 +373,7 @@ sys_syscall:
movhs scno, #0
csdb
#endif
- stmloia sp, {r5, r6} @ shuffle args
+ stmialo sp, {r5, r6} @ shuffle args
movlo r0, r1
movlo r1, r2
movlo r2, r3
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 773424843d6e..32051ec5b33f 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -127,7 +127,8 @@
*/
.macro v7m_exception_slow_exit ret_r0
cpsid i
- ldr lr, =EXC_RET_THREADMODE_PROCESSSTACK
+ ldr lr, =exc_ret
+ ldr lr, [lr]
@ read original r12, sp, lr, pc and xPSR
add r12, sp, #S_IP
@@ -387,8 +388,8 @@
badr lr, \ret @ return address
.if \reload
add r1, sp, #S_R0 + S_OFF @ pointer to regs
- ldmccia r1, {r0 - r6} @ reload r0-r6
- stmccia sp, {r4, r5} @ update stack arguments
+ ldmiacc r1, {r0 - r6} @ reload r0-r6
+ stmiacc sp, {r4, r5} @ update stack arguments
.endif
ldrcc pc, [\table, \tmp, lsl #2] @ call sys_* routine
#else
@@ -396,8 +397,8 @@
badr lr, \ret @ return address
.if \reload
add r1, sp, #S_R0 + S_OFF @ pointer to regs
- ldmccia r1, {r0 - r6} @ reload r0-r6
- stmccia sp, {r4, r5} @ update stack arguments
+ ldmiacc r1, {r0 - r6} @ reload r0-r6
+ stmiacc sp, {r4, r5} @ update stack arguments
.endif
ldrcc pc, [\table, \nr, lsl #2] @ call sys_* routine
#endif
diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S
index abcf47848525..19d2dcd6530d 100644
--- a/arch/arm/kernel/entry-v7m.S
+++ b/arch/arm/kernel/entry-v7m.S
@@ -146,3 +146,7 @@ ENTRY(vector_table)
.rept CONFIG_CPU_V7M_NUM_IRQ
.long __irq_entry @ External Interrupts
.endr
+ .align 2
+ .globl exc_ret
+exc_ret:
+ .space 4
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index ec29de250076..b38bbd011b35 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -133,9 +133,9 @@ __secondary_data:
*/
.text
__after_proc_init:
-#ifdef CONFIG_ARM_MPU
M_CLASS(movw r12, #:lower16:BASEADDR_V7M_SCB)
M_CLASS(movt r12, #:upper16:BASEADDR_V7M_SCB)
+#ifdef CONFIG_ARM_MPU
M_CLASS(ldr r3, [r12, 0x50])
AR_CLASS(mrc p15, 0, r3, c0, c1, 4) @ Read ID_MMFR0
and r3, r3, #(MMFR0_PMSA) @ PMSA field
@@ -439,8 +439,8 @@ M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(3)])
str r5, [r12, #PMSAv8_RBAR_A(0)]
str r6, [r12, #PMSAv8_RLAR_A(0)]
#else
- mcr p15, 0, r5, c6, c10, 1 @ PRBAR4
- mcr p15, 0, r6, c6, c10, 2 @ PRLAR4
+ mcr p15, 0, r5, c6, c10, 0 @ PRBAR4
+ mcr p15, 0, r6, c6, c10, 1 @ PRLAR4
#endif
#endif
ret lr
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
index 60146e32619a..82a942894fc0 100644
--- a/arch/arm/kernel/hyp-stub.S
+++ b/arch/arm/kernel/hyp-stub.S
@@ -180,8 +180,8 @@ ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE
@ Check whether GICv3 system registers are available
mrc p15, 0, r7, c0, c1, 1 @ ID_PFR1
ubfx r7, r7, #28, #4
- cmp r7, #1
- bne 2f
+ teq r7, #0
+ beq 2f
@ Enable system register accesses
mrc p15, 4, r7, c12, c9, 5 @ ICC_HSRE
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index dd2eb5f76b9f..76300f3813e8 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -91,8 +91,11 @@ void machine_crash_nonpanic_core(void *unused)
set_cpu_online(smp_processor_id(), false);
atomic_dec(&waiting_for_crash_ipi);
- while (1)
+
+ while (1) {
cpu_relax();
+ wfe();
+ }
}
void crash_smp_send_stop(void)
diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
index a50dc00d79a2..d0a05a3bdb96 100644
--- a/arch/arm/kernel/patch.c
+++ b/arch/arm/kernel/patch.c
@@ -16,7 +16,7 @@ struct patch {
unsigned int insn;
};
-static DEFINE_SPINLOCK(patch_lock);
+static DEFINE_RAW_SPINLOCK(patch_lock);
static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
__acquires(&patch_lock)
@@ -33,7 +33,7 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
return addr;
if (flags)
- spin_lock_irqsave(&patch_lock, *flags);
+ raw_spin_lock_irqsave(&patch_lock, *flags);
else
__acquire(&patch_lock);
@@ -48,7 +48,7 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
clear_fixmap(fixmap);
if (flags)
- spin_unlock_irqrestore(&patch_lock, *flags);
+ raw_spin_unlock_irqrestore(&patch_lock, *flags);
else
__release(&patch_lock);
}
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 375b13f7e780..5d78b6ac0429 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -867,6 +867,9 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
boot_alias_start = phys_to_idmap(start);
if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
+ if (!res)
+ panic("%s: Failed to allocate %zu bytes\n",
+ __func__, sizeof(*res));
res->name = "System RAM (boot alias)";
res->start = boot_alias_start;
res->end = phys_to_idmap(end);
@@ -875,6 +878,9 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
}
res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
+ if (!res)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(*res));
res->name = "System RAM";
res->start = start;
res->end = end;
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 76bb8de6bf6b..be5edfdde558 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -549,8 +549,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
int ret;
/*
- * Increment event counter and perform fixup for the pre-signal
- * frame.
+ * Perform fixup for the pre-signal frame.
*/
rseq_signal_deliver(ksig, regs);
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index a8257fc9cf2a..5dc8b80bb693 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -120,6 +120,14 @@ ENDPROC(cpu_resume_after_mmu)
.text
.align
+#ifdef CONFIG_MCPM
+ .arm
+THUMB( .thumb )
+ENTRY(cpu_resume_no_hyp)
+ARM_BE8(setend be) @ ensure we are in BE mode
+ b no_hyp
+#endif
+
#ifdef CONFIG_MMU
.arm
ENTRY(cpu_resume_arm)
@@ -135,6 +143,7 @@ ARM_BE8(setend be) @ ensure we are in BE mode
bl __hyp_stub_install_secondary
#endif
safe_svcmode_maskall r1
+no_hyp:
mov r1, #0
ALT_SMP(mrc p15, 0, r0, c0, c0, 5)
ALT_UP_B(1f)
@@ -164,6 +173,9 @@ ENDPROC(cpu_resume)
#ifdef CONFIG_MMU
ENDPROC(cpu_resume_arm)
#endif
+#ifdef CONFIG_MCPM
+ENDPROC(cpu_resume_no_hyp)
+#endif
.align 2
_sleep_save_sp:
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 1d6f5ea522f4..facd4240ca02 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -62,12 +62,6 @@
*/
struct secondary_data secondary_data;
-/*
- * control for which core is the next to come out of the secondary
- * boot "holding pen"
- */
-volatile int pen_release = -1;
-
enum ipi_msg_type {
IPI_WAKEUP,
IPI_TIMER,
@@ -604,8 +598,10 @@ static void ipi_cpu_stop(unsigned int cpu)
local_fiq_disable();
local_irq_disable();
- while (1)
+ while (1) {
cpu_relax();
+ wfe();
+ }
}
static DEFINE_PER_CPU(struct completion *, cpu_completion);
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index b30eafeef096..3cdc399b9fc3 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -100,8 +100,6 @@ static void twd_timer_stop(void)
disable_percpu_irq(clk->irq);
}
-#ifdef CONFIG_COMMON_CLK
-
/*
* Updates clockevent frequency when the cpu frequency changes.
* Called on the cpu that is changing frequency with interrupts disabled.
@@ -143,54 +141,6 @@ static int twd_clk_init(void)
}
core_initcall(twd_clk_init);
-#elif defined (CONFIG_CPU_FREQ)
-
-#include <linux/cpufreq.h>
-
-/*
- * Updates clockevent frequency when the cpu frequency changes.
- * Called on the cpu that is changing frequency with interrupts disabled.
- */
-static void twd_update_frequency(void *data)
-{
- twd_timer_rate = clk_get_rate(twd_clk);
-
- clockevents_update_freq(raw_cpu_ptr(twd_evt), twd_timer_rate);
-}
-
-static int twd_cpufreq_transition(struct notifier_block *nb,
- unsigned long state, void *data)
-{
- struct cpufreq_freqs *freqs = data;
-
- /*
- * The twd clock events must be reprogrammed to account for the new
- * frequency. The timer is local to a cpu, so cross-call to the
- * changing cpu.
- */
- if (state == CPUFREQ_POSTCHANGE)
- smp_call_function_single(freqs->cpu, twd_update_frequency,
- NULL, 1);
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block twd_cpufreq_nb = {
- .notifier_call = twd_cpufreq_transition,
-};
-
-static int twd_cpufreq_init(void)
-{
- if (twd_evt && raw_cpu_ptr(twd_evt) && !IS_ERR(twd_clk))
- return cpufreq_register_notifier(&twd_cpufreq_nb,
- CPUFREQ_TRANSITION_NOTIFIER);
-
- return 0;
-}
-core_initcall(twd_cpufreq_init);
-
-#endif
-
static void twd_calibrate_rate(void)
{
unsigned long count;
@@ -366,21 +316,6 @@ out_free:
return err;
}
-int __init twd_local_timer_register(struct twd_local_timer *tlt)
-{
- if (twd_base || twd_evt)
- return -EBUSY;
-
- twd_ppi = tlt->res[1].start;
-
- twd_base = ioremap(tlt->res[0].start, resource_size(&tlt->res[0]));
- if (!twd_base)
- return -ENOMEM;
-
- return twd_local_timer_common_register(NULL);
-}
-
-#ifdef CONFIG_OF
static int __init twd_local_timer_of_register(struct device_node *np)
{
int err;
@@ -406,4 +341,3 @@ out:
TIMER_OF_DECLARE(arm_twd_a9, "arm,cortex-a9-twd-timer", twd_local_timer_of_register);
TIMER_OF_DECLARE(arm_twd_a5, "arm,cortex-a5-twd-timer", twd_local_timer_of_register);
TIMER_OF_DECLARE(arm_twd_11mp, "arm,arm11mp-twd-timer", twd_local_timer_of_register);
-#endif
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index a56e7c856ab5..86870f40f9a0 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -115,8 +115,6 @@ static noinline void __save_stack_trace(struct task_struct *tsk,
* running on another CPU? For now, ignore it as we
* can't guarantee we won't explode.
*/
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
return;
#else
frame.fp = thread_saved_fp(tsk);
@@ -134,8 +132,6 @@ static noinline void __save_stack_trace(struct task_struct *tsk,
}
walk_stackframe(&frame, save_trace, &data);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
}
void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
@@ -153,8 +149,6 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
frame.pc = regs->ARM_pc;
walk_stackframe(&frame, save_trace, &data);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
}
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index 0bee233fef9a..314cfb232a63 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -93,7 +93,7 @@ extern const struct unwind_idx __start_unwind_idx[];
static const struct unwind_idx *__origin_unwind_idx;
extern const struct unwind_idx __stop_unwind_idx[];
-static DEFINE_SPINLOCK(unwind_lock);
+static DEFINE_RAW_SPINLOCK(unwind_lock);
static LIST_HEAD(unwind_tables);
/* Convert a prel31 symbol to an absolute address */
@@ -201,7 +201,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
/* module unwind tables */
struct unwind_table *table;
- spin_lock_irqsave(&unwind_lock, flags);
+ raw_spin_lock_irqsave(&unwind_lock, flags);
list_for_each_entry(table, &unwind_tables, list) {
if (addr >= table->begin_addr &&
addr < table->end_addr) {
@@ -213,7 +213,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
break;
}
}
- spin_unlock_irqrestore(&unwind_lock, flags);
+ raw_spin_unlock_irqrestore(&unwind_lock, flags);
}
pr_debug("%s: idx = %p\n", __func__, idx);
@@ -529,9 +529,9 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
tab->begin_addr = text_addr;
tab->end_addr = text_addr + text_size;
- spin_lock_irqsave(&unwind_lock, flags);
+ raw_spin_lock_irqsave(&unwind_lock, flags);
list_add_tail(&tab->list, &unwind_tables);
- spin_unlock_irqrestore(&unwind_lock, flags);
+ raw_spin_unlock_irqrestore(&unwind_lock, flags);
return tab;
}
@@ -543,9 +543,9 @@ void unwind_table_del(struct unwind_table *tab)
if (!tab)
return;
- spin_lock_irqsave(&unwind_lock, flags);
+ raw_spin_lock_irqsave(&unwind_lock, flags);
list_del(&tab->list);
- spin_unlock_irqrestore(&unwind_lock, flags);
+ raw_spin_unlock_irqrestore(&unwind_lock, flags);
kfree(tab);
}
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index 48de846f2246..531e59f5be9c 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -8,9 +8,8 @@ ifeq ($(plus_virt),+virt)
plus_virt_def := -DREQUIRES_VIRT=1
endif
-ccflags-y += -Iarch/arm/kvm -Ivirt/kvm/arm/vgic
-CFLAGS_arm.o := -I. $(plus_virt_def)
-CFLAGS_mmu.o := -I.
+ccflags-y += -I $(srctree)/$(src) -I $(srctree)/virt/kvm/arm/vgic
+CFLAGS_arm.o := $(plus_virt_def)
AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt)
AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index e8bd288fd5be..14915c78bd99 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -293,15 +293,16 @@ static bool access_cntp_tval(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
const struct coproc_reg *r)
{
- u64 now = kvm_phys_timer_read();
- u64 val;
+ u32 val;
if (p->is_write) {
val = *vcpu_reg(vcpu, p->Rt1);
- kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, val + now);
+ kvm_arm_timer_write_sysreg(vcpu,
+ TIMER_PTIMER, TIMER_REG_TVAL, val);
} else {
- val = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
- *vcpu_reg(vcpu, p->Rt1) = val - now;
+ val = kvm_arm_timer_read_sysreg(vcpu,
+ TIMER_PTIMER, TIMER_REG_TVAL);
+ *vcpu_reg(vcpu, p->Rt1) = val;
}
return true;
@@ -315,9 +316,11 @@ static bool access_cntp_ctl(struct kvm_vcpu *vcpu,
if (p->is_write) {
val = *vcpu_reg(vcpu, p->Rt1);
- kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CTL, val);
+ kvm_arm_timer_write_sysreg(vcpu,
+ TIMER_PTIMER, TIMER_REG_CTL, val);
} else {
- val = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CTL);
+ val = kvm_arm_timer_read_sysreg(vcpu,
+ TIMER_PTIMER, TIMER_REG_CTL);
*vcpu_reg(vcpu, p->Rt1) = val;
}
@@ -333,9 +336,11 @@ static bool access_cntp_cval(struct kvm_vcpu *vcpu,
if (p->is_write) {
val = (u64)*vcpu_reg(vcpu, p->Rt2) << 32;
val |= *vcpu_reg(vcpu, p->Rt1);
- kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, val);
+ kvm_arm_timer_write_sysreg(vcpu,
+ TIMER_PTIMER, TIMER_REG_CVAL, val);
} else {
- val = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
+ val = kvm_arm_timer_read_sysreg(vcpu,
+ TIMER_PTIMER, TIMER_REG_CVAL);
*vcpu_reg(vcpu, p->Rt1) = val;
*vcpu_reg(vcpu, p->Rt2) = val >> 32;
}
diff --git a/arch/arm/kvm/hyp/cp15-sr.c b/arch/arm/kvm/hyp/cp15-sr.c
index c4782812714c..8bf895ec6e04 100644
--- a/arch/arm/kvm/hyp/cp15-sr.c
+++ b/arch/arm/kvm/hyp/cp15-sr.c
@@ -27,7 +27,6 @@ static u64 *cp15_64(struct kvm_cpu_context *ctxt, int idx)
void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
{
- ctxt->cp15[c0_MPIDR] = read_sysreg(VMPIDR);
ctxt->cp15[c0_CSSELR] = read_sysreg(CSSELR);
ctxt->cp15[c1_SCTLR] = read_sysreg(SCTLR);
ctxt->cp15[c1_CPACR] = read_sysreg(CPACR);
diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S
index aa3f9a9837ac..6ed3cf23fe89 100644
--- a/arch/arm/kvm/hyp/hyp-entry.S
+++ b/arch/arm/kvm/hyp/hyp-entry.S
@@ -176,7 +176,7 @@ THUMB( orr lr, lr, #PSR_T_BIT )
msr spsr_cxsf, lr
ldr lr, =panic
msr ELR_hyp, lr
- ldr lr, =kvm_call_hyp
+ ldr lr, =__kvm_call_hyp
clrex
eret
ENDPROC(__hyp_do_panic)
diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c
index acf1c37fa49c..3b058a5d7c5f 100644
--- a/arch/arm/kvm/hyp/switch.c
+++ b/arch/arm/kvm/hyp/switch.c
@@ -77,7 +77,7 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
- write_sysreg(kvm->arch.vttbr, VTTBR);
+ write_sysreg(kvm_get_vttbr(kvm), VTTBR);
write_sysreg(vcpu->arch.midr, VPIDR);
}
diff --git a/arch/arm/kvm/hyp/tlb.c b/arch/arm/kvm/hyp/tlb.c
index c0edd450e104..8e4afba73635 100644
--- a/arch/arm/kvm/hyp/tlb.c
+++ b/arch/arm/kvm/hyp/tlb.c
@@ -41,7 +41,7 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
/* Switch to requested VMID */
kvm = kern_hyp_va(kvm);
- write_sysreg(kvm->arch.vttbr, VTTBR);
+ write_sysreg(kvm_get_vttbr(kvm), VTTBR);
isb();
write_sysreg(0, TLBIALLIS);
@@ -61,7 +61,7 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
/* Switch to requested VMID */
- write_sysreg(kvm->arch.vttbr, VTTBR);
+ write_sysreg(kvm_get_vttbr(kvm), VTTBR);
isb();
write_sysreg(0, TLBIALL);
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
index 80a1d6cd261c..a08e6419ebe9 100644
--- a/arch/arm/kvm/interrupts.S
+++ b/arch/arm/kvm/interrupts.S
@@ -42,7 +42,7 @@
* r12: caller save
* rest: callee save
*/
-ENTRY(kvm_call_hyp)
+ENTRY(__kvm_call_hyp)
hvc #0
bx lr
-ENDPROC(kvm_call_hyp)
+ENDPROC(__kvm_call_hyp)
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
index ad25fd1872c7..0bff0176db2c 100644
--- a/arch/arm/lib/Makefile
+++ b/arch/arm/lib/Makefile
@@ -39,7 +39,7 @@ $(obj)/csumpartialcopy.o: $(obj)/csumpartialcopygeneric.S
$(obj)/csumpartialcopyuser.o: $(obj)/csumpartialcopygeneric.S
ifeq ($(CONFIG_KERNEL_MODE_NEON),y)
- NEON_FLAGS := -mfloat-abi=softfp -mfpu=neon
+ NEON_FLAGS := -march=armv7-a -mfloat-abi=softfp -mfpu=neon
CFLAGS_xor-neon.o += $(NEON_FLAGS)
obj-$(CONFIG_XOR_BLOCKS) += xor-neon.o
endif
diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h
index 93cddab73072..95bd35991288 100644
--- a/arch/arm/lib/bitops.h
+++ b/arch/arm/lib/bitops.h
@@ -7,7 +7,7 @@
ENTRY( \name )
UNWIND( .fnstart )
ands ip, r1, #3
- strneb r1, [ip] @ assert word-aligned
+ strbne r1, [ip] @ assert word-aligned
mov r2, #1
and r3, r0, #31 @ Get bit offset
mov r0, r0, lsr #5
@@ -32,7 +32,7 @@ ENDPROC(\name )
ENTRY( \name )
UNWIND( .fnstart )
ands ip, r1, #3
- strneb r1, [ip] @ assert word-aligned
+ strbne r1, [ip] @ assert word-aligned
mov r2, #1
and r3, r0, #31 @ Get bit offset
mov r0, r0, lsr #5
@@ -62,7 +62,7 @@ ENDPROC(\name )
ENTRY( \name )
UNWIND( .fnstart )
ands ip, r1, #3
- strneb r1, [ip] @ assert word-aligned
+ strbne r1, [ip] @ assert word-aligned
and r2, r0, #31
mov r0, r0, lsr #5
mov r3, #1
@@ -89,7 +89,7 @@ ENDPROC(\name )
ENTRY( \name )
UNWIND( .fnstart )
ands ip, r1, #3
- strneb r1, [ip] @ assert word-aligned
+ strbne r1, [ip] @ assert word-aligned
and r3, r0, #31
mov r0, r0, lsr #5
save_and_disable_irqs ip
diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
index e936352ccb00..55946e3fa2ba 100644
--- a/arch/arm/lib/clear_user.S
+++ b/arch/arm/lib/clear_user.S
@@ -44,7 +44,7 @@ UNWIND(.save {r1, lr})
strusr r2, r0, 1, ne, rept=2
tst r1, #1 @ x1 x0 x1 x0 x1 x0 x1
it ne @ explicit IT needed for the label
-USER( strnebt r2, [r0])
+USER( strbtne r2, [r0])
mov r0, #0
ldmfd sp!, {r1, pc}
UNWIND(.fnend)
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index 0d4c189c7f4f..6a3419e2c6d8 100644
--- a/arch/arm/lib/copy_from_user.S
+++ b/arch/arm/lib/copy_from_user.S
@@ -91,7 +91,7 @@
.endm
.macro str1b ptr reg cond=al abort
- str\cond\()b \reg, [\ptr], #1
+ strb\cond \reg, [\ptr], #1
.endm
.macro enter reg1 reg2
diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
index 6ee2f6706f86..b84ce1792043 100644
--- a/arch/arm/lib/copy_page.S
+++ b/arch/arm/lib/copy_page.S
@@ -39,9 +39,9 @@ ENTRY(copy_page)
.endr
subs r2, r2, #1 @ 1
stmia r0!, {r3, r4, ip, lr} @ 4
- ldmgtia r1!, {r3, r4, ip, lr} @ 4
+ ldmiagt r1!, {r3, r4, ip, lr} @ 4
bgt 1b @ 1
- PLD( ldmeqia r1!, {r3, r4, ip, lr} )
+ PLD( ldmiaeq r1!, {r3, r4, ip, lr} )
PLD( beq 2b )
ldmfd sp!, {r4, pc} @ 3
ENDPROC(copy_page)
diff --git a/arch/arm/lib/copy_template.S b/arch/arm/lib/copy_template.S
index 652e4d98cd47..a11f2c25e03a 100644
--- a/arch/arm/lib/copy_template.S
+++ b/arch/arm/lib/copy_template.S
@@ -99,7 +99,7 @@
CALGN( ands ip, r0, #31 )
CALGN( rsb r3, ip, #32 )
- CALGN( sbcnes r4, r3, r2 ) @ C is always set here
+ CALGN( sbcsne r4, r3, r2 ) @ C is always set here
CALGN( bcs 2f )
CALGN( adr r4, 6f )
CALGN( subs r2, r2, r3 ) @ C gets set
@@ -204,7 +204,7 @@
CALGN( ands ip, r0, #31 )
CALGN( rsb ip, ip, #32 )
- CALGN( sbcnes r4, ip, r2 ) @ C is always set here
+ CALGN( sbcsne r4, ip, r2 ) @ C is always set here
CALGN( subcc r2, r2, ip )
CALGN( bcc 15f )
@@ -241,7 +241,7 @@
orr r9, r9, ip, lspush #\push
mov ip, ip, lspull #\pull
orr ip, ip, lr, lspush #\push
- str8w r0, r3, r4, r5, r6, r7, r8, r9, ip, , abort=19f
+ str8w r0, r3, r4, r5, r6, r7, r8, r9, ip, abort=19f
bge 12b
PLD( cmn r2, #96 )
PLD( bge 13b )
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
index 97a6ff4b7e3c..c7d08096e354 100644
--- a/arch/arm/lib/copy_to_user.S
+++ b/arch/arm/lib/copy_to_user.S
@@ -49,7 +49,7 @@
.endm
.macro ldr1b ptr reg cond=al abort
- ldr\cond\()b \reg, [\ptr], #1
+ ldrb\cond \reg, [\ptr], #1
.endm
#ifdef CONFIG_CPU_USE_DOMAINS
diff --git a/arch/arm/lib/csumpartial.S b/arch/arm/lib/csumpartial.S
index 984e0f29d548..bd84e2db353b 100644
--- a/arch/arm/lib/csumpartial.S
+++ b/arch/arm/lib/csumpartial.S
@@ -40,9 +40,9 @@ td3 .req lr
/* we must have at least one byte. */
tst buf, #1 @ odd address?
movne sum, sum, ror #8
- ldrneb td0, [buf], #1
+ ldrbne td0, [buf], #1
subne len, len, #1
- adcnes sum, sum, td0, put_byte_1
+ adcsne sum, sum, td0, put_byte_1
.Lless4: tst len, #6
beq .Lless8_byte
@@ -68,8 +68,8 @@ td3 .req lr
bne .Lless8_wordlp
.Lless8_byte: tst len, #1 @ odd number of bytes
- ldrneb td0, [buf], #1 @ include last byte
- adcnes sum, sum, td0, put_byte_0 @ update checksum
+ ldrbne td0, [buf], #1 @ include last byte
+ adcsne sum, sum, td0, put_byte_0 @ update checksum
.Ldone: adc r0, sum, #0 @ collect up the last carry
ldr td0, [sp], #4
@@ -78,17 +78,17 @@ td3 .req lr
ldr pc, [sp], #4 @ return
.Lnot_aligned: tst buf, #1 @ odd address
- ldrneb td0, [buf], #1 @ make even
+ ldrbne td0, [buf], #1 @ make even
subne len, len, #1
- adcnes sum, sum, td0, put_byte_1 @ update checksum
+ adcsne sum, sum, td0, put_byte_1 @ update checksum
tst buf, #2 @ 32-bit aligned?
#if __LINUX_ARM_ARCH__ >= 4
- ldrneh td0, [buf], #2 @ make 32-bit aligned
+ ldrhne td0, [buf], #2 @ make 32-bit aligned
subne len, len, #2
#else
- ldrneb td0, [buf], #1
- ldrneb ip, [buf], #1
+ ldrbne td0, [buf], #1
+ ldrbne ip, [buf], #1
subne len, len, #2
#ifndef __ARMEB__
orrne td0, td0, ip, lsl #8
@@ -96,7 +96,7 @@ td3 .req lr
orrne td0, ip, td0, lsl #8
#endif
#endif
- adcnes sum, sum, td0 @ update checksum
+ adcsne sum, sum, td0 @ update checksum
ret lr
ENTRY(csum_partial)
diff --git a/arch/arm/lib/csumpartialcopygeneric.S b/arch/arm/lib/csumpartialcopygeneric.S
index 10b45909610c..08e17758cbea 100644
--- a/arch/arm/lib/csumpartialcopygeneric.S
+++ b/arch/arm/lib/csumpartialcopygeneric.S
@@ -148,9 +148,9 @@ FN_ENTRY
strb r5, [dst], #1
mov r5, r4, get_byte_2
.Lexit: tst len, #1
- strneb r5, [dst], #1
+ strbne r5, [dst], #1
andne r5, r5, #255
- adcnes sum, sum, r5, put_byte_0
+ adcsne sum, sum, r5, put_byte_0
/*
* If the dst pointer was not 16-bit aligned, we
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index b83fdc06286a..f4716d98e0b4 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -95,7 +95,7 @@
add r2, r2, r1
mov r0, #0 @ zero the buffer
9002: teq r2, r1
- strneb r0, [r1], #1
+ strbne r0, [r1], #1
bne 9002b
load_regs
.popsection
diff --git a/arch/arm/lib/div64.S b/arch/arm/lib/div64.S
index a9eafe4981eb..4d80f690c48b 100644
--- a/arch/arm/lib/div64.S
+++ b/arch/arm/lib/div64.S
@@ -88,8 +88,8 @@ UNWIND(.fnstart)
@ Break out early if dividend reaches 0.
2: cmp xh, yl
orrcs yh, yh, ip
- subcss xh, xh, yl
- movnes ip, ip, lsr #1
+ subscs xh, xh, yl
+ movsne ip, ip, lsr #1
mov yl, yl, lsr #1
bne 2b
diff --git a/arch/arm/lib/floppydma.S b/arch/arm/lib/floppydma.S
index 617150b1baef..de68d3b343e3 100644
--- a/arch/arm/lib/floppydma.S
+++ b/arch/arm/lib/floppydma.S
@@ -14,8 +14,8 @@
.global floppy_fiqin_end
ENTRY(floppy_fiqin_start)
subs r9, r9, #1
- ldrgtb r12, [r11, #-4]
- ldrleb r12, [r11], #0
+ ldrbgt r12, [r11, #-4]
+ ldrble r12, [r11], #0
strb r12, [r10], #1
subs pc, lr, #4
floppy_fiqin_end:
@@ -23,10 +23,10 @@ floppy_fiqin_end:
.global floppy_fiqout_end
ENTRY(floppy_fiqout_start)
subs r9, r9, #1
- ldrgeb r12, [r10], #1
+ ldrbge r12, [r10], #1
movlt r12, #0
- strleb r12, [r11], #0
- subles pc, lr, #4
+ strble r12, [r11], #0
+ subsle pc, lr, #4
strb r12, [r11, #-4]
subs pc, lr, #4
floppy_fiqout_end:
diff --git a/arch/arm/lib/io-readsb.S b/arch/arm/lib/io-readsb.S
index c31b2f3153f1..91038a0a77b5 100644
--- a/arch/arm/lib/io-readsb.S
+++ b/arch/arm/lib/io-readsb.S
@@ -16,10 +16,10 @@
cmp ip, #2
ldrb r3, [r0]
strb r3, [r1], #1
- ldrgeb r3, [r0]
- strgeb r3, [r1], #1
- ldrgtb r3, [r0]
- strgtb r3, [r1], #1
+ ldrbge r3, [r0]
+ strbge r3, [r1], #1
+ ldrbgt r3, [r0]
+ strbgt r3, [r1], #1
subs r2, r2, ip
bne .Linsb_aligned
@@ -72,7 +72,7 @@ ENTRY(__raw_readsb)
bpl .Linsb_16_lp
tst r2, #15
- ldmeqfd sp!, {r4 - r6, pc}
+ ldmfdeq sp!, {r4 - r6, pc}
.Linsb_no_16: tst r2, #8
beq .Linsb_no_8
@@ -109,15 +109,15 @@ ENTRY(__raw_readsb)
str r3, [r1], #4
.Linsb_no_4: ands r2, r2, #3
- ldmeqfd sp!, {r4 - r6, pc}
+ ldmfdeq sp!, {r4 - r6, pc}
cmp r2, #2
ldrb r3, [r0]
strb r3, [r1], #1
- ldrgeb r3, [r0]
- strgeb r3, [r1], #1
- ldrgtb r3, [r0]
- strgtb r3, [r1]
+ ldrbge r3, [r0]
+ strbge r3, [r1], #1
+ ldrbgt r3, [r0]
+ strbgt r3, [r1]
ldmfd sp!, {r4 - r6, pc}
ENDPROC(__raw_readsb)
diff --git a/arch/arm/lib/io-readsl.S b/arch/arm/lib/io-readsl.S
index 2ed86fa5465f..f2e2064318d2 100644
--- a/arch/arm/lib/io-readsl.S
+++ b/arch/arm/lib/io-readsl.S
@@ -30,7 +30,7 @@ ENTRY(__raw_readsl)
2: movs r2, r2, lsl #31
ldrcs r3, [r0, #0]
ldrcs ip, [r0, #0]
- stmcsia r1!, {r3, ip}
+ stmiacs r1!, {r3, ip}
ldrne r3, [r0, #0]
strne r3, [r1, #0]
ret lr
diff --git a/arch/arm/lib/io-readsw-armv3.S b/arch/arm/lib/io-readsw-armv3.S
index 413da9914529..8b25b69c516e 100644
--- a/arch/arm/lib/io-readsw-armv3.S
+++ b/arch/arm/lib/io-readsw-armv3.S
@@ -68,7 +68,7 @@ ENTRY(__raw_readsw)
bpl .Linsw_8_lp
tst r2, #7
- ldmeqfd sp!, {r4, r5, r6, pc}
+ ldmfdeq sp!, {r4, r5, r6, pc}
.Lno_insw_8: tst r2, #4
beq .Lno_insw_4
@@ -97,9 +97,9 @@ ENTRY(__raw_readsw)
.Lno_insw_2: tst r2, #1
ldrne r3, [r0]
- strneb r3, [r1], #1
+ strbne r3, [r1], #1
movne r3, r3, lsr #8
- strneb r3, [r1]
+ strbne r3, [r1]
ldmfd sp!, {r4, r5, r6, pc}
diff --git a/arch/arm/lib/io-readsw-armv4.S b/arch/arm/lib/io-readsw-armv4.S
index d9a45e9692ae..5efdd66f5dcd 100644
--- a/arch/arm/lib/io-readsw-armv4.S
+++ b/arch/arm/lib/io-readsw-armv4.S
@@ -76,8 +76,8 @@ ENTRY(__raw_readsw)
pack r3, r3, ip
str r3, [r1], #4
-.Lno_insw_2: ldrneh r3, [r0]
- strneh r3, [r1]
+.Lno_insw_2: ldrhne r3, [r0]
+ strhne r3, [r1]
ldmfd sp!, {r4, r5, pc}
@@ -94,7 +94,7 @@ ENTRY(__raw_readsw)
#endif
.Linsw_noalign: stmfd sp!, {r4, lr}
- ldrccb ip, [r1, #-1]!
+ ldrbcc ip, [r1, #-1]!
bcc 1f
ldrh ip, [r0]
@@ -121,11 +121,11 @@ ENTRY(__raw_readsw)
3: tst r2, #1
strb ip, [r1], #1
- ldrneh ip, [r0]
+ ldrhne ip, [r0]
_BE_ONLY_( movne ip, ip, ror #8 )
- strneb ip, [r1], #1
+ strbne ip, [r1], #1
_LE_ONLY_( movne ip, ip, lsr #8 )
_BE_ONLY_( movne ip, ip, lsr #24 )
- strneb ip, [r1]
+ strbne ip, [r1]
ldmfd sp!, {r4, pc}
ENDPROC(__raw_readsw)
diff --git a/arch/arm/lib/io-writesb.S b/arch/arm/lib/io-writesb.S
index a46bbc9b168b..7d2881a2381e 100644
--- a/arch/arm/lib/io-writesb.S
+++ b/arch/arm/lib/io-writesb.S
@@ -36,10 +36,10 @@
cmp ip, #2
ldrb r3, [r1], #1
strb r3, [r0]
- ldrgeb r3, [r1], #1
- strgeb r3, [r0]
- ldrgtb r3, [r1], #1
- strgtb r3, [r0]
+ ldrbge r3, [r1], #1
+ strbge r3, [r0]
+ ldrbgt r3, [r1], #1
+ strbgt r3, [r0]
subs r2, r2, ip
bne .Loutsb_aligned
@@ -64,7 +64,7 @@ ENTRY(__raw_writesb)
bpl .Loutsb_16_lp
tst r2, #15
- ldmeqfd sp!, {r4, r5, pc}
+ ldmfdeq sp!, {r4, r5, pc}
.Loutsb_no_16: tst r2, #8
beq .Loutsb_no_8
@@ -80,15 +80,15 @@ ENTRY(__raw_writesb)
outword r3
.Loutsb_no_4: ands r2, r2, #3
- ldmeqfd sp!, {r4, r5, pc}
+ ldmfdeq sp!, {r4, r5, pc}
cmp r2, #2
ldrb r3, [r1], #1
strb r3, [r0]
- ldrgeb r3, [r1], #1
- strgeb r3, [r0]
- ldrgtb r3, [r1]
- strgtb r3, [r0]
+ ldrbge r3, [r1], #1
+ strbge r3, [r0]
+ ldrbgt r3, [r1]
+ strbgt r3, [r0]
ldmfd sp!, {r4, r5, pc}
ENDPROC(__raw_writesb)
diff --git a/arch/arm/lib/io-writesl.S b/arch/arm/lib/io-writesl.S
index 4ea2435988c1..7596ac0c90b0 100644
--- a/arch/arm/lib/io-writesl.S
+++ b/arch/arm/lib/io-writesl.S
@@ -28,7 +28,7 @@ ENTRY(__raw_writesl)
bpl 1b
ldmfd sp!, {r4, lr}
2: movs r2, r2, lsl #31
- ldmcsia r1!, {r3, ip}
+ ldmiacs r1!, {r3, ip}
strcs r3, [r0, #0]
ldrne r3, [r1, #0]
strcs ip, [r0, #0]
diff --git a/arch/arm/lib/io-writesw-armv3.S b/arch/arm/lib/io-writesw-armv3.S
index 121789eb6802..cb94b9b49405 100644
--- a/arch/arm/lib/io-writesw-armv3.S
+++ b/arch/arm/lib/io-writesw-armv3.S
@@ -79,7 +79,7 @@ ENTRY(__raw_writesw)
bpl .Loutsw_8_lp
tst r2, #7
- ldmeqfd sp!, {r4, r5, r6, pc}
+ ldmfdeq sp!, {r4, r5, r6, pc}
.Lno_outsw_8: tst r2, #4
beq .Lno_outsw_4
diff --git a/arch/arm/lib/io-writesw-armv4.S b/arch/arm/lib/io-writesw-armv4.S
index 269f90c51ad2..e6645b2f249e 100644
--- a/arch/arm/lib/io-writesw-armv4.S
+++ b/arch/arm/lib/io-writesw-armv4.S
@@ -61,8 +61,8 @@ ENTRY(__raw_writesw)
ldr r3, [r1], #4
outword r3
-.Lno_outsw_2: ldrneh r3, [r1]
- strneh r3, [r0]
+.Lno_outsw_2: ldrhne r3, [r1]
+ strhne r3, [r0]
ldmfd sp!, {r4, r5, pc}
@@ -95,6 +95,6 @@ ENTRY(__raw_writesw)
tst r2, #1
3: movne ip, r3, lsr #8
- strneh ip, [r0]
+ strhne ip, [r0]
ret lr
ENDPROC(__raw_writesw)
diff --git a/arch/arm/lib/lib1funcs.S b/arch/arm/lib/lib1funcs.S
index 9397b2e532af..c23f9d9e2970 100644
--- a/arch/arm/lib/lib1funcs.S
+++ b/arch/arm/lib/lib1funcs.S
@@ -96,7 +96,7 @@ Boston, MA 02111-1307, USA. */
subhs \dividend, \dividend, \divisor, lsr #3
orrhs \result, \result, \curbit, lsr #3
cmp \dividend, #0 @ Early termination?
- movnes \curbit, \curbit, lsr #4 @ No, any more bits to do?
+ movsne \curbit, \curbit, lsr #4 @ No, any more bits to do?
movne \divisor, \divisor, lsr #4
bne 1b
@@ -182,7 +182,7 @@ Boston, MA 02111-1307, USA. */
subhs \dividend, \dividend, \divisor, lsr #3
cmp \dividend, #1
mov \divisor, \divisor, lsr #4
- subges \order, \order, #4
+ subsge \order, \order, #4
bge 1b
tst \order, #3
diff --git a/arch/arm/lib/memcpy.S b/arch/arm/lib/memcpy.S
index 64111bd4440b..4a6997bb4404 100644
--- a/arch/arm/lib/memcpy.S
+++ b/arch/arm/lib/memcpy.S
@@ -30,7 +30,7 @@
.endm
.macro ldr1b ptr reg cond=al abort
- ldr\cond\()b \reg, [\ptr], #1
+ ldrb\cond \reg, [\ptr], #1
.endm
.macro str1w ptr reg abort
@@ -42,7 +42,7 @@
.endm
.macro str1b ptr reg cond=al abort
- str\cond\()b \reg, [\ptr], #1
+ strb\cond \reg, [\ptr], #1
.endm
.macro enter reg1 reg2
diff --git a/arch/arm/lib/memmove.S b/arch/arm/lib/memmove.S
index 69a9d47fc5ab..d70304cb2cd0 100644
--- a/arch/arm/lib/memmove.S
+++ b/arch/arm/lib/memmove.S
@@ -59,7 +59,7 @@ ENTRY(memmove)
blt 5f
CALGN( ands ip, r0, #31 )
- CALGN( sbcnes r4, ip, r2 ) @ C is always set here
+ CALGN( sbcsne r4, ip, r2 ) @ C is always set here
CALGN( bcs 2f )
CALGN( adr r4, 6f )
CALGN( subs r2, r2, ip ) @ C is set here
@@ -114,20 +114,20 @@ ENTRY(memmove)
UNWIND( .save {r0, r4, lr} ) @ still in first stmfd block
8: movs r2, r2, lsl #31
- ldrneb r3, [r1, #-1]!
- ldrcsb r4, [r1, #-1]!
- ldrcsb ip, [r1, #-1]
- strneb r3, [r0, #-1]!
- strcsb r4, [r0, #-1]!
- strcsb ip, [r0, #-1]
+ ldrbne r3, [r1, #-1]!
+ ldrbcs r4, [r1, #-1]!
+ ldrbcs ip, [r1, #-1]
+ strbne r3, [r0, #-1]!
+ strbcs r4, [r0, #-1]!
+ strbcs ip, [r0, #-1]
ldmfd sp!, {r0, r4, pc}
9: cmp ip, #2
- ldrgtb r3, [r1, #-1]!
- ldrgeb r4, [r1, #-1]!
+ ldrbgt r3, [r1, #-1]!
+ ldrbge r4, [r1, #-1]!
ldrb lr, [r1, #-1]!
- strgtb r3, [r0, #-1]!
- strgeb r4, [r0, #-1]!
+ strbgt r3, [r0, #-1]!
+ strbge r4, [r0, #-1]!
subs r2, r2, ip
strb lr, [r0, #-1]!
blt 8b
@@ -150,7 +150,7 @@ ENTRY(memmove)
blt 14f
CALGN( ands ip, r0, #31 )
- CALGN( sbcnes r4, ip, r2 ) @ C is always set here
+ CALGN( sbcsne r4, ip, r2 ) @ C is always set here
CALGN( subcc r2, r2, ip )
CALGN( bcc 15f )
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index ed6d35d9cdb5..5593a45e0a8c 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -44,20 +44,20 @@ UNWIND( .save {r8, lr} )
mov lr, r3
2: subs r2, r2, #64
- stmgeia ip!, {r1, r3, r8, lr} @ 64 bytes at a time.
- stmgeia ip!, {r1, r3, r8, lr}
- stmgeia ip!, {r1, r3, r8, lr}
- stmgeia ip!, {r1, r3, r8, lr}
+ stmiage ip!, {r1, r3, r8, lr} @ 64 bytes at a time.
+ stmiage ip!, {r1, r3, r8, lr}
+ stmiage ip!, {r1, r3, r8, lr}
+ stmiage ip!, {r1, r3, r8, lr}
bgt 2b
- ldmeqfd sp!, {r8, pc} @ Now <64 bytes to go.
+ ldmfdeq sp!, {r8, pc} @ Now <64 bytes to go.
/*
* No need to correct the count; we're only testing bits from now on
*/
tst r2, #32
- stmneia ip!, {r1, r3, r8, lr}
- stmneia ip!, {r1, r3, r8, lr}
+ stmiane ip!, {r1, r3, r8, lr}
+ stmiane ip!, {r1, r3, r8, lr}
tst r2, #16
- stmneia ip!, {r1, r3, r8, lr}
+ stmiane ip!, {r1, r3, r8, lr}
ldmfd sp!, {r8, lr}
UNWIND( .fnend )
@@ -87,22 +87,22 @@ UNWIND( .save {r4-r8, lr} )
rsb r8, r8, #32
sub r2, r2, r8
movs r8, r8, lsl #(32 - 4)
- stmcsia ip!, {r4, r5, r6, r7}
- stmmiia ip!, {r4, r5}
+ stmiacs ip!, {r4, r5, r6, r7}
+ stmiami ip!, {r4, r5}
tst r8, #(1 << 30)
mov r8, r1
strne r1, [ip], #4
3: subs r2, r2, #64
- stmgeia ip!, {r1, r3-r8, lr}
- stmgeia ip!, {r1, r3-r8, lr}
+ stmiage ip!, {r1, r3-r8, lr}
+ stmiage ip!, {r1, r3-r8, lr}
bgt 3b
- ldmeqfd sp!, {r4-r8, pc}
+ ldmfdeq sp!, {r4-r8, pc}
tst r2, #32
- stmneia ip!, {r1, r3-r8, lr}
+ stmiane ip!, {r1, r3-r8, lr}
tst r2, #16
- stmneia ip!, {r4-r7}
+ stmiane ip!, {r4-r7}
ldmfd sp!, {r4-r8, lr}
UNWIND( .fnend )
@@ -110,7 +110,7 @@ UNWIND( .fnend )
UNWIND( .fnstart )
4: tst r2, #8
- stmneia ip!, {r1, r3}
+ stmiane ip!, {r1, r3}
tst r2, #4
strne r1, [ip], #4
/*
@@ -118,17 +118,17 @@ UNWIND( .fnstart )
* may have an unaligned pointer as well.
*/
5: tst r2, #2
- strneb r1, [ip], #1
- strneb r1, [ip], #1
+ strbne r1, [ip], #1
+ strbne r1, [ip], #1
tst r2, #1
- strneb r1, [ip], #1
+ strbne r1, [ip], #1
ret lr
6: subs r2, r2, #4 @ 1 do we have enough
blt 5b @ 1 bytes to align with?
cmp r3, #2 @ 1
- strltb r1, [ip], #1 @ 1
- strleb r1, [ip], #1 @ 1
+ strblt r1, [ip], #1 @ 1
+ strble r1, [ip], #1 @ 1
strb r1, [ip], #1 @ 1
add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
b 1b
diff --git a/arch/arm/lib/xor-neon.c b/arch/arm/lib/xor-neon.c
index 2c40aeab3eaa..c691b901092f 100644
--- a/arch/arm/lib/xor-neon.c
+++ b/arch/arm/lib/xor-neon.c
@@ -14,7 +14,7 @@
MODULE_LICENSE("GPL");
#ifndef __ARM_NEON__
-#error You should compile this file with '-mfloat-abi=softfp -mfpu=neon'
+#error You should compile this file with '-march=armv7-a -mfloat-abi=softfp -mfpu=neon'
#endif
/*
diff --git a/arch/arm/mach-actions/platsmp.c b/arch/arm/mach-actions/platsmp.c
index 3efaa10efc43..4fd479c948e6 100644
--- a/arch/arm/mach-actions/platsmp.c
+++ b/arch/arm/mach-actions/platsmp.c
@@ -39,10 +39,6 @@ static void __iomem *sps_base_addr;
static void __iomem *timer_base_addr;
static int ncores;
-static DEFINE_SPINLOCK(boot_lock);
-
-void owl_secondary_startup(void);
-
static int s500_wakeup_secondary(unsigned int cpu)
{
int ret;
@@ -84,7 +80,6 @@ static int s500_wakeup_secondary(unsigned int cpu)
static int s500_smp_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
- unsigned long timeout;
int ret;
ret = s500_wakeup_secondary(cpu);
@@ -93,21 +88,11 @@ static int s500_smp_boot_secondary(unsigned int cpu, struct task_struct *idle)
udelay(10);
- spin_lock(&boot_lock);
-
smp_send_reschedule(cpu);
- timeout = jiffies + (1 * HZ);
- while (time_before(jiffies, timeout)) {
- if (pen_release == -1)
- break;
- }
-
writel(0, timer_base_addr + OWL_CPU1_ADDR + (cpu - 1) * 4);
writel(0, timer_base_addr + OWL_CPU1_FLAG + (cpu - 1) * 4);
- spin_unlock(&boot_lock);
-
return 0;
}
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 51e808adb00c..2a757dcaa1a5 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -591,13 +591,13 @@ static int __init at91_pm_backup_init(void)
np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam");
if (!np)
- goto securam_fail;
+ goto securam_fail_no_ref_dev;
pdev = of_find_device_by_node(np);
of_node_put(np);
if (!pdev) {
pr_warn("%s: failed to find securam device!\n", __func__);
- goto securam_fail;
+ goto securam_fail_no_ref_dev;
}
sram_pool = gen_pool_get(&pdev->dev, NULL);
@@ -620,6 +620,8 @@ static int __init at91_pm_backup_init(void)
return 0;
securam_fail:
+ put_device(&pdev->dev);
+securam_fail_no_ref_dev:
iounmap(pm_data.sfrbu);
pm_data.sfrbu = NULL;
return ret;
diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c
index 7d5a44a06648..f676592d8402 100644
--- a/arch/arm/mach-cns3xxx/core.c
+++ b/arch/arm/mach-cns3xxx/core.c
@@ -90,7 +90,7 @@ void __init cns3xxx_map_io(void)
/* used by entry-macro.S */
void __init cns3xxx_init_irq(void)
{
- gic_init(0, 29, IOMEM(CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT),
+ gic_init(IOMEM(CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT),
IOMEM(CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT));
}
diff --git a/arch/arm/mach-ep93xx/edb93xx.c b/arch/arm/mach-ep93xx/edb93xx.c
index 8e89ec8b6f0f..34e18e9556d9 100644
--- a/arch/arm/mach-ep93xx/edb93xx.c
+++ b/arch/arm/mach-ep93xx/edb93xx.c
@@ -29,6 +29,7 @@
#include <linux/platform_device.h>
#include <linux/i2c.h>
#include <linux/spi/spi.h>
+#include <linux/gpio/machine.h>
#include <sound/cs4271.h>
@@ -105,13 +106,16 @@ static struct spi_board_info edb93xx_spi_board_info[] __initdata = {
},
};
-static int edb93xx_spi_chipselects[] __initdata = {
- EP93XX_GPIO_LINE_EGPIO6,
+static struct gpiod_lookup_table edb93xx_spi_cs_gpio_table = {
+ .dev_id = "ep93xx-spi.0",
+ .table = {
+ GPIO_LOOKUP("A", 6, "cs", GPIO_ACTIVE_LOW),
+ { },
+ },
};
static struct ep93xx_spi_info edb93xx_spi_info __initdata = {
- .chipselect = edb93xx_spi_chipselects,
- .num_chipselect = ARRAY_SIZE(edb93xx_spi_chipselects),
+ /* Intentionally left blank */
};
static void __init edb93xx_register_spi(void)
@@ -123,6 +127,7 @@ static void __init edb93xx_register_spi(void)
else if (machine_is_edb9315a())
edb93xx_cs4271_data.gpio_nreset = EP93XX_GPIO_LINE_EGPIO14;
+ gpiod_add_lookup_table(&edb93xx_spi_cs_gpio_table);
ep93xx_register_spi(&edb93xx_spi_info, edb93xx_spi_board_info,
ARRAY_SIZE(edb93xx_spi_board_info));
}
diff --git a/arch/arm/mach-ep93xx/simone.c b/arch/arm/mach-ep93xx/simone.c
index 80ccb984d521..f0f38c0dba52 100644
--- a/arch/arm/mach-ep93xx/simone.c
+++ b/arch/arm/mach-ep93xx/simone.c
@@ -77,13 +77,15 @@ static struct spi_board_info simone_spi_devices[] __initdata = {
* low between multi-message command blocks. From v1.4, it uses a GPIO instead.
* v1.3 parts will still work, since the signal on SFRMOUT is automatic.
*/
-static int simone_spi_chipselects[] __initdata = {
- EP93XX_GPIO_LINE_EGPIO1,
+static struct gpiod_lookup_table simone_spi_cs_gpio_table = {
+ .dev_id = "ep93xx-spi.0",
+ .table = {
+ GPIO_LOOKUP("A", 1, "cs", GPIO_ACTIVE_LOW),
+ { },
+ },
};
static struct ep93xx_spi_info simone_spi_info __initdata = {
- .chipselect = simone_spi_chipselects,
- .num_chipselect = ARRAY_SIZE(simone_spi_chipselects),
.use_dma = 1,
};
@@ -113,6 +115,7 @@ static void __init simone_init_machine(void)
ep93xx_register_i2c(simone_i2c_board_info,
ARRAY_SIZE(simone_i2c_board_info));
gpiod_add_lookup_table(&simone_mmc_spi_gpio_table);
+ gpiod_add_lookup_table(&simone_spi_cs_gpio_table);
ep93xx_register_spi(&simone_spi_info, simone_spi_devices,
ARRAY_SIZE(simone_spi_devices));
simone_register_audio();
diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c
index 85b74ac943f0..a3a20c83c6b8 100644
--- a/arch/arm/mach-ep93xx/ts72xx.c
+++ b/arch/arm/mach-ep93xx/ts72xx.c
@@ -22,6 +22,7 @@
#include <linux/spi/mmc_spi.h>
#include <linux/mmc/host.h>
#include <linux/platform_data/spi-ep93xx.h>
+#include <linux/gpio/machine.h>
#include <mach/gpio-ep93xx.h>
#include <mach/hardware.h>
@@ -269,13 +270,15 @@ static struct spi_board_info bk3_spi_board_info[] __initdata = {
* The all work is performed automatically by !SPI_FRAME (SFRM1) and
* goes through CPLD
*/
-static int bk3_spi_chipselects[] __initdata = {
- EP93XX_GPIO_LINE_F(3),
+static struct gpiod_lookup_table bk3_spi_cs_gpio_table = {
+ .dev_id = "ep93xx-spi.0",
+ .table = {
+ GPIO_LOOKUP("F", 3, "cs", GPIO_ACTIVE_LOW),
+ { },
+ },
};
static struct ep93xx_spi_info bk3_spi_master __initdata = {
- .chipselect = bk3_spi_chipselects,
- .num_chipselect = ARRAY_SIZE(bk3_spi_chipselects),
.use_dma = 1,
};
@@ -316,13 +319,17 @@ static struct spi_board_info ts72xx_spi_devices[] __initdata = {
},
};
-static int ts72xx_spi_chipselects[] __initdata = {
- EP93XX_GPIO_LINE_F(2), /* DIO_17 */
+static struct gpiod_lookup_table ts72xx_spi_cs_gpio_table = {
+ .dev_id = "ep93xx-spi.0",
+ .table = {
+ /* DIO_17 */
+ GPIO_LOOKUP("F", 2, "cs", GPIO_ACTIVE_LOW),
+ { },
+ },
};
static struct ep93xx_spi_info ts72xx_spi_info __initdata = {
- .chipselect = ts72xx_spi_chipselects,
- .num_chipselect = ARRAY_SIZE(ts72xx_spi_chipselects),
+ /* Intentionally left blank */
};
static void __init ts72xx_init_machine(void)
@@ -339,6 +346,7 @@ static void __init ts72xx_init_machine(void)
if (board_is_ts7300())
platform_device_register(&ts73xx_fpga_device);
#endif
+ gpiod_add_lookup_table(&ts72xx_spi_cs_gpio_table);
ep93xx_register_spi(&ts72xx_spi_info, ts72xx_spi_devices,
ARRAY_SIZE(ts72xx_spi_devices));
}
@@ -398,6 +406,7 @@ static void __init bk3_init_machine(void)
ep93xx_register_eth(&ts72xx_eth_data, 1);
+ gpiod_add_lookup_table(&bk3_spi_cs_gpio_table);
ep93xx_register_spi(&bk3_spi_master, bk3_spi_board_info,
ARRAY_SIZE(bk3_spi_board_info));
diff --git a/arch/arm/mach-ep93xx/vision_ep9307.c b/arch/arm/mach-ep93xx/vision_ep9307.c
index 767ee64628dc..f95a644769e4 100644
--- a/arch/arm/mach-ep93xx/vision_ep9307.c
+++ b/arch/arm/mach-ep93xx/vision_ep9307.c
@@ -245,15 +245,17 @@ static struct spi_board_info vision_spi_board_info[] __initdata = {
},
};
-static int vision_spi_chipselects[] __initdata = {
- EP93XX_GPIO_LINE_EGPIO6,
- EP93XX_GPIO_LINE_EGPIO7,
- EP93XX_GPIO_LINE_G(2),
+static struct gpiod_lookup_table vision_spi_cs_gpio_table = {
+ .dev_id = "ep93xx-spi.0",
+ .table = {
+ GPIO_LOOKUP_IDX("A", 6, "cs", 0, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("A", 7, "cs", 1, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("G", 2, "cs", 2, GPIO_ACTIVE_LOW),
+ { },
+ },
};
static struct ep93xx_spi_info vision_spi_master __initdata = {
- .chipselect = vision_spi_chipselects,
- .num_chipselect = ARRAY_SIZE(vision_spi_chipselects),
.use_dma = 1,
};
@@ -295,6 +297,7 @@ static void __init vision_init_machine(void)
ep93xx_register_i2c(vision_i2c_info,
ARRAY_SIZE(vision_i2c_info));
gpiod_add_lookup_table(&vision_spi_mmc_gpio_table);
+ gpiod_add_lookup_table(&vision_spi_cs_gpio_table);
ep93xx_register_spi(&vision_spi_master, vision_spi_board_info,
ARRAY_SIZE(vision_spi_board_info));
vision_register_i2s();
diff --git a/arch/arm/mach-exynos/headsmp.S b/arch/arm/mach-exynos/headsmp.S
index 005695c9bf40..0ac2cb9a7355 100644
--- a/arch/arm/mach-exynos/headsmp.S
+++ b/arch/arm/mach-exynos/headsmp.S
@@ -36,4 +36,4 @@ ENDPROC(exynos4_secondary_startup)
.align 2
1: .long .
- .long pen_release
+ .long exynos_pen_release
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index b6da7edbbd2f..abcac6164233 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -28,6 +28,9 @@
extern void exynos4_secondary_startup(void);
+/* XXX exynos_pen_release is cargo culted code - DO NOT COPY XXX */
+volatile int exynos_pen_release = -1;
+
#ifdef CONFIG_HOTPLUG_CPU
static inline void cpu_leave_lowpower(u32 core_id)
{
@@ -57,7 +60,7 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
wfi();
- if (pen_release == core_id) {
+ if (exynos_pen_release == core_id) {
/*
* OK, proper wakeup, we're done
*/
@@ -228,15 +231,17 @@ void exynos_core_restart(u32 core_id)
}
/*
- * Write pen_release in a way that is guaranteed to be visible to all
- * observers, irrespective of whether they're taking part in coherency
+ * XXX CARGO CULTED CODE - DO NOT COPY XXX
+ *
+ * Write exynos_pen_release in a way that is guaranteed to be visible to
+ * all observers, irrespective of whether they're taking part in coherency
* or not. This is necessary for the hotplug code to work reliably.
*/
-static void write_pen_release(int val)
+static void exynos_write_pen_release(int val)
{
- pen_release = val;
+ exynos_pen_release = val;
smp_wmb();
- sync_cache_w(&pen_release);
+ sync_cache_w(&exynos_pen_release);
}
static DEFINE_SPINLOCK(boot_lock);
@@ -247,7 +252,7 @@ static void exynos_secondary_init(unsigned int cpu)
* let the primary processor know we're out of the
* pen, then head off into the C entry point
*/
- write_pen_release(-1);
+ exynos_write_pen_release(-1);
/*
* Synchronise with the boot thread.
@@ -322,12 +327,12 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
/*
* The secondary processor is waiting to be released from
* the holding pen - release it, then wait for it to flag
- * that it has been released by resetting pen_release.
+ * that it has been released by resetting exynos_pen_release.
*
- * Note that "pen_release" is the hardware CPU core ID, whereas
+ * Note that "exynos_pen_release" is the hardware CPU core ID, whereas
* "cpu" is Linux's internal ID.
*/
- write_pen_release(core_id);
+ exynos_write_pen_release(core_id);
if (!exynos_cpu_power_state(core_id)) {
exynos_cpu_power_up(core_id);
@@ -376,13 +381,13 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
else
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
- if (pen_release == -1)
+ if (exynos_pen_release == -1)
break;
udelay(10);
}
- if (pen_release != -1)
+ if (exynos_pen_release != -1)
ret = -ETIMEDOUT;
/*
@@ -392,7 +397,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
fail:
spin_unlock(&boot_lock);
- return pen_release != -1 ? ret : 0;
+ return exynos_pen_release != -1 ? ret : 0;
}
static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c
index bfeb25aaf9a2..326e870d7123 100644
--- a/arch/arm/mach-imx/cpuidle-imx6q.c
+++ b/arch/arm/mach-imx/cpuidle-imx6q.c
@@ -16,30 +16,23 @@
#include "cpuidle.h"
#include "hardware.h"
-static atomic_t master = ATOMIC_INIT(0);
-static DEFINE_SPINLOCK(master_lock);
+static int num_idle_cpus = 0;
+static DEFINE_SPINLOCK(cpuidle_lock);
static int imx6q_enter_wait(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
- if (atomic_inc_return(&master) == num_online_cpus()) {
- /*
- * With this lock, we prevent other cpu to exit and enter
- * this function again and become the master.
- */
- if (!spin_trylock(&master_lock))
- goto idle;
+ spin_lock(&cpuidle_lock);
+ if (++num_idle_cpus == num_online_cpus())
imx6_set_lpm(WAIT_UNCLOCKED);
- cpu_do_idle();
- imx6_set_lpm(WAIT_CLOCKED);
- spin_unlock(&master_lock);
- goto done;
- }
+ spin_unlock(&cpuidle_lock);
-idle:
cpu_do_idle();
-done:
- atomic_dec(&master);
+
+ spin_lock(&cpuidle_lock);
+ if (num_idle_cpus-- == num_online_cpus())
+ imx6_set_lpm(WAIT_CLOCKED);
+ spin_unlock(&cpuidle_lock);
return index;
}
diff --git a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
index 5169dfba9718..07d4fcfe5c2e 100644
--- a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
+++ b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
@@ -258,8 +258,7 @@ static void __init visstrim_analog_camera_init(void)
return;
dma_declare_coherent_memory(&pdev->dev, mx2_camera_base,
- mx2_camera_base, MX2_CAMERA_BUF_SIZE,
- DMA_MEMORY_EXCLUSIVE);
+ mx2_camera_base, MX2_CAMERA_BUF_SIZE);
}
static void __init visstrim_reserve(void)
@@ -445,8 +444,7 @@ static void __init visstrim_coda_init(void)
dma_declare_coherent_memory(&pdev->dev,
mx2_camera_base + MX2_CAMERA_BUF_SIZE,
mx2_camera_base + MX2_CAMERA_BUF_SIZE,
- MX2_CAMERA_BUF_SIZE,
- DMA_MEMORY_EXCLUSIVE);
+ MX2_CAMERA_BUF_SIZE);
}
/* DMA deinterlace */
@@ -465,8 +463,7 @@ static void __init visstrim_deinterlace_init(void)
dma_declare_coherent_memory(&pdev->dev,
mx2_camera_base + 2 * MX2_CAMERA_BUF_SIZE,
mx2_camera_base + 2 * MX2_CAMERA_BUF_SIZE,
- MX2_CAMERA_BUF_SIZE,
- DMA_MEMORY_EXCLUSIVE);
+ MX2_CAMERA_BUF_SIZE);
}
/* Emma-PrP for format conversion */
@@ -485,8 +482,7 @@ static void __init visstrim_emmaprp_init(void)
*/
ret = dma_declare_coherent_memory(&pdev->dev,
mx2_camera_base, mx2_camera_base,
- MX2_CAMERA_BUF_SIZE,
- DMA_MEMORY_EXCLUSIVE);
+ MX2_CAMERA_BUF_SIZE);
if (ret)
pr_err("Failed to declare memory for emmaprp\n");
}
diff --git a/arch/arm/mach-imx/mach-imx51.c b/arch/arm/mach-imx/mach-imx51.c
index c7169c2f94c4..08c7892866c2 100644
--- a/arch/arm/mach-imx/mach-imx51.c
+++ b/arch/arm/mach-imx/mach-imx51.c
@@ -59,6 +59,7 @@ static void __init imx51_m4if_setup(void)
return;
m4if_base = of_iomap(np, 0);
+ of_node_put(np);
if (!m4if_base) {
pr_err("Unable to map M4IF registers\n");
return;
diff --git a/arch/arm/mach-imx/mach-mx31moboard.c b/arch/arm/mach-imx/mach-mx31moboard.c
index 643a3d749703..fe50f4cf00a7 100644
--- a/arch/arm/mach-imx/mach-mx31moboard.c
+++ b/arch/arm/mach-imx/mach-mx31moboard.c
@@ -475,8 +475,7 @@ static int __init mx31moboard_init_cam(void)
ret = dma_declare_coherent_memory(&pdev->dev,
mx3_camera_base, mx3_camera_base,
- MX3_CAMERA_BUF_SIZE,
- DMA_MEMORY_EXCLUSIVE);
+ MX3_CAMERA_BUF_SIZE);
if (ret)
goto err;
diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c
index 53c316f7301e..fe4932fda01d 100644
--- a/arch/arm/mach-iop13xx/setup.c
+++ b/arch/arm/mach-iop13xx/setup.c
@@ -300,7 +300,7 @@ static struct resource iop13xx_adma_2_resources[] = {
}
};
-static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(64);
+static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(32);
static struct iop_adma_platform_data iop13xx_adma_0_data = {
.hw_id = 0,
.pool_size = PAGE_SIZE,
@@ -324,7 +324,7 @@ static struct platform_device iop13xx_adma_0_channel = {
.resource = iop13xx_adma_0_resources,
.dev = {
.dma_mask = &iop13xx_adma_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop13xx_adma_0_data,
},
};
@@ -336,7 +336,7 @@ static struct platform_device iop13xx_adma_1_channel = {
.resource = iop13xx_adma_1_resources,
.dev = {
.dma_mask = &iop13xx_adma_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop13xx_adma_1_data,
},
};
@@ -348,7 +348,7 @@ static struct platform_device iop13xx_adma_2_channel = {
.resource = iop13xx_adma_2_resources,
.dev = {
.dma_mask = &iop13xx_adma_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop13xx_adma_2_data,
},
};
diff --git a/arch/arm/mach-iop13xx/tpmi.c b/arch/arm/mach-iop13xx/tpmi.c
index db511ec2b1df..116feb6b261e 100644
--- a/arch/arm/mach-iop13xx/tpmi.c
+++ b/arch/arm/mach-iop13xx/tpmi.c
@@ -152,7 +152,7 @@ static struct resource iop13xx_tpmi_3_resources[] = {
}
};
-u64 iop13xx_tpmi_mask = DMA_BIT_MASK(64);
+u64 iop13xx_tpmi_mask = DMA_BIT_MASK(32);
static struct platform_device iop13xx_tpmi_0_device = {
.name = "iop-tpmi",
.id = 0,
@@ -160,7 +160,7 @@ static struct platform_device iop13xx_tpmi_0_device = {
.resource = iop13xx_tpmi_0_resources,
.dev = {
.dma_mask = &iop13xx_tpmi_mask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -171,7 +171,7 @@ static struct platform_device iop13xx_tpmi_1_device = {
.resource = iop13xx_tpmi_1_resources,
.dev = {
.dma_mask = &iop13xx_tpmi_mask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -182,7 +182,7 @@ static struct platform_device iop13xx_tpmi_2_device = {
.resource = iop13xx_tpmi_2_resources,
.dev = {
.dma_mask = &iop13xx_tpmi_mask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -193,7 +193,7 @@ static struct platform_device iop13xx_tpmi_3_device = {
.resource = iop13xx_tpmi_3_resources,
.dev = {
.dma_mask = &iop13xx_tpmi_mask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
diff --git a/arch/arm/mach-ks8695/include/mach/entry-macro.S b/arch/arm/mach-ks8695/include/mach/entry-macro.S
index 8315b34f32ff..7ff812cb010b 100644
--- a/arch/arm/mach-ks8695/include/mach/entry-macro.S
+++ b/arch/arm/mach-ks8695/include/mach/entry-macro.S
@@ -42,6 +42,6 @@
moveq \irqstat, \irqstat, lsr #2
addeq \irqnr, \irqnr, #2
tst \irqstat, #0x01
- addeqs \irqnr, \irqnr, #1
+ addseq \irqnr, \irqnr, #1
1001:
.endm
diff --git a/arch/arm/mach-milbeaut/platsmp.c b/arch/arm/mach-milbeaut/platsmp.c
index 591543c81399..3ea880f5fcb7 100644
--- a/arch/arm/mach-milbeaut/platsmp.c
+++ b/arch/arm/mach-milbeaut/platsmp.c
@@ -65,6 +65,7 @@ static void m10v_smp_init(unsigned int max_cpus)
writel(KERNEL_UNBOOT_FLAG, m10v_smp_base + cpu * 4);
}
+#ifdef CONFIG_HOTPLUG_CPU
static void m10v_cpu_die(unsigned int l_cpu)
{
gic_cpu_if_down(0);
@@ -83,12 +84,15 @@ static int m10v_cpu_kill(unsigned int l_cpu)
return 1;
}
+#endif
static struct smp_operations m10v_smp_ops __initdata = {
.smp_prepare_cpus = m10v_smp_init,
.smp_boot_secondary = m10v_boot_secondary,
+#ifdef CONFIG_HOTPLUG_CPU
.cpu_die = m10v_cpu_die,
.cpu_kill = m10v_cpu_kill,
+#endif
};
CPU_METHOD_OF_DECLARE(m10v_smp, "socionext,milbeaut-m10v-smp", &m10v_smp_ops);
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index be30c3c061b4..1b15d593837e 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -182,6 +182,7 @@ static struct resource latch1_resources[] = {
static struct bgpio_pdata latch1_pdata = {
.label = LATCH1_LABEL,
+ .base = -1,
.ngpio = LATCH1_NGPIO,
};
@@ -219,6 +220,7 @@ static struct resource latch2_resources[] = {
static struct bgpio_pdata latch2_pdata = {
.label = LATCH2_LABEL,
+ .base = -1,
.ngpio = LATCH2_NGPIO,
};
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index 1444b4b4bd9f..439e143cad7b 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -250,8 +250,10 @@ static int __init omapdss_init_of(void)
if (!node)
return 0;
- if (!of_device_is_available(node))
+ if (!of_device_is_available(node)) {
+ of_node_put(node);
return 0;
+ }
pdev = of_find_device_by_node(node);
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index b5531dd3ae9c..3a04c73ac03c 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1002,8 +1002,10 @@ static int _enable_clocks(struct omap_hwmod *oh)
clk_enable(oh->_clk);
list_for_each_entry(os, &oh->slave_ports, node) {
- if (os->_clk && (os->flags & OCPIF_SWSUP_IDLE))
+ if (os->_clk && (os->flags & OCPIF_SWSUP_IDLE)) {
+ omap2_clk_deny_idle(os->_clk);
clk_enable(os->_clk);
+ }
}
/* The opt clocks are controlled by the device driver. */
@@ -1055,8 +1057,10 @@ static int _disable_clocks(struct omap_hwmod *oh)
clk_disable(oh->_clk);
list_for_each_entry(os, &oh->slave_ports, node) {
- if (os->_clk && (os->flags & OCPIF_SWSUP_IDLE))
+ if (os->_clk && (os->flags & OCPIF_SWSUP_IDLE)) {
clk_disable(os->_clk);
+ omap2_clk_allow_idle(os->_clk);
+ }
}
if (oh->flags & HWMOD_OPT_CLKS_NEEDED)
@@ -2436,9 +2440,13 @@ static void _setup_iclk_autoidle(struct omap_hwmod *oh)
continue;
if (os->flags & OCPIF_SWSUP_IDLE) {
- /* XXX omap_iclk_deny_idle(c); */
+ /*
+ * we might have multiple users of one iclk with
+ * different requirements, disable autoidle when
+ * the module is enabled, e.g. dss iclk
+ */
} else {
- /* XXX omap_iclk_allow_idle(c); */
+ /* we are enabling autoidle afterwards anyways */
clk_enable(os->_clk);
}
}
diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c
index 058a37e6d11c..fd6e0671f957 100644
--- a/arch/arm/mach-omap2/prm_common.c
+++ b/arch/arm/mach-omap2/prm_common.c
@@ -523,8 +523,10 @@ void omap_prm_reset_system(void)
prm_ll_data->reset_system();
- while (1)
+ while (1) {
cpu_relax();
+ wfe();
+ }
}
/**
diff --git a/arch/arm/mach-oxnas/Makefile b/arch/arm/mach-oxnas/Makefile
index b625906a9970..61a34e1c0f22 100644
--- a/arch/arm/mach-oxnas/Makefile
+++ b/arch/arm/mach-oxnas/Makefile
@@ -1,2 +1 @@
obj-$(CONFIG_SMP) += platsmp.o headsmp.o
-obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
diff --git a/arch/arm/mach-oxnas/hotplug.c b/arch/arm/mach-oxnas/hotplug.c
deleted file mode 100644
index 854f29b8cba6..000000000000
--- a/arch/arm/mach-oxnas/hotplug.c
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright (C) 2002 ARM Ltd.
- * All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/smp.h>
-
-#include <asm/cp15.h>
-#include <asm/smp_plat.h>
-
-static inline void cpu_enter_lowpower(void)
-{
- unsigned int v;
-
- asm volatile(
- " mcr p15, 0, %1, c7, c5, 0\n"
- " mcr p15, 0, %1, c7, c10, 4\n"
- /*
- * Turn off coherency
- */
- " mrc p15, 0, %0, c1, c0, 1\n"
- " bic %0, %0, #0x20\n"
- " mcr p15, 0, %0, c1, c0, 1\n"
- " mrc p15, 0, %0, c1, c0, 0\n"
- " bic %0, %0, %2\n"
- " mcr p15, 0, %0, c1, c0, 0\n"
- : "=&r" (v)
- : "r" (0), "Ir" (CR_C)
- : "cc");
-}
-
-static inline void cpu_leave_lowpower(void)
-{
- unsigned int v;
-
- asm volatile( "mrc p15, 0, %0, c1, c0, 0\n"
- " orr %0, %0, %1\n"
- " mcr p15, 0, %0, c1, c0, 0\n"
- " mrc p15, 0, %0, c1, c0, 1\n"
- " orr %0, %0, #0x20\n"
- " mcr p15, 0, %0, c1, c0, 1\n"
- : "=&r" (v)
- : "Ir" (CR_C)
- : "cc");
-}
-
-static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
-{
- /*
- * there is no power-control hardware on this platform, so all
- * we can do is put the core into WFI; this is safe as the calling
- * code will have already disabled interrupts
- */
- for (;;) {
- /*
- * here's the WFI
- */
- asm(".word 0xe320f003\n"
- :
- :
- : "memory", "cc");
-
- if (pen_release == cpu_logical_map(cpu)) {
- /*
- * OK, proper wakeup, we're done
- */
- break;
- }
-
- /*
- * Getting here, means that we have come out of WFI without
- * having been woken up - this shouldn't happen
- *
- * Just note it happening - when we're woken, we can report
- * its occurrence.
- */
- (*spurious)++;
- }
-}
-
-/*
- * platform-specific code to shutdown a CPU
- *
- * Called with IRQs disabled
- */
-void ox820_cpu_die(unsigned int cpu)
-{
- int spurious = 0;
-
- /*
- * we're ready for shutdown now, so do it
- */
- cpu_enter_lowpower();
- platform_do_lowpower(cpu, &spurious);
-
- /*
- * bring this CPU back into the world of cache
- * coherency, and then restore interrupts
- */
- cpu_leave_lowpower();
-
- if (spurious)
- pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
-}
diff --git a/arch/arm/mach-oxnas/platsmp.c b/arch/arm/mach-oxnas/platsmp.c
index 442cc8a2f7dc..735141c0e3a3 100644
--- a/arch/arm/mach-oxnas/platsmp.c
+++ b/arch/arm/mach-oxnas/platsmp.c
@@ -19,7 +19,6 @@
#include <asm/smp_scu.h>
extern void ox820_secondary_startup(void);
-extern void ox820_cpu_die(unsigned int cpu);
static void __iomem *cpu_ctrl;
static void __iomem *gic_cpu_ctrl;
@@ -94,9 +93,6 @@ unmap_scu:
static const struct smp_operations ox820_smp_ops __initconst = {
.smp_prepare_cpus = ox820_smp_prepare_cpus,
.smp_boot_secondary = ox820_boot_secondary,
-#ifdef CONFIG_HOTPLUG_CPU
- .cpu_die = ox820_cpu_die,
-#endif
};
CPU_METHOD_OF_DECLARE(ox820_smp, "oxsemi,ox820-smp", &ox820_smp_ops);
diff --git a/arch/arm/mach-prima2/common.h b/arch/arm/mach-prima2/common.h
index 6d77b622d168..457eb7b18160 100644
--- a/arch/arm/mach-prima2/common.h
+++ b/arch/arm/mach-prima2/common.h
@@ -15,6 +15,8 @@
#include <asm/mach/time.h>
#include <asm/exception.h>
+extern volatile int prima2_pen_release;
+
extern const struct smp_operations sirfsoc_smp_ops;
extern void sirfsoc_secondary_startup(void);
extern void sirfsoc_cpu_die(unsigned int cpu);
diff --git a/arch/arm/mach-prima2/headsmp.S b/arch/arm/mach-prima2/headsmp.S
index 209d9fc5c16c..6cf4fc60347b 100644
--- a/arch/arm/mach-prima2/headsmp.S
+++ b/arch/arm/mach-prima2/headsmp.S
@@ -34,4 +34,4 @@ ENDPROC(sirfsoc_secondary_startup)
.align
1: .long .
- .long pen_release
+ .long prima2_pen_release
diff --git a/arch/arm/mach-prima2/hotplug.c b/arch/arm/mach-prima2/hotplug.c
index a728c78b996f..b6cf1527e330 100644
--- a/arch/arm/mach-prima2/hotplug.c
+++ b/arch/arm/mach-prima2/hotplug.c
@@ -11,6 +11,7 @@
#include <linux/smp.h>
#include <asm/smp_plat.h>
+#include "common.h"
static inline void platform_do_lowpower(unsigned int cpu)
{
@@ -18,7 +19,7 @@ static inline void platform_do_lowpower(unsigned int cpu)
for (;;) {
__asm__ __volatile__("dsb\n\t" "wfi\n\t"
: : : "memory");
- if (pen_release == cpu_logical_map(cpu)) {
+ if (prima2_pen_release == cpu_logical_map(cpu)) {
/*
* OK, proper wakeup, we're done
*/
diff --git a/arch/arm/mach-prima2/platsmp.c b/arch/arm/mach-prima2/platsmp.c
index 75ef5d4be554..d1f8b5168083 100644
--- a/arch/arm/mach-prima2/platsmp.c
+++ b/arch/arm/mach-prima2/platsmp.c
@@ -24,13 +24,16 @@ static void __iomem *clk_base;
static DEFINE_SPINLOCK(boot_lock);
+/* XXX prima2_pen_release is cargo culted code - DO NOT COPY XXX */
+volatile int prima2_pen_release = -1;
+
static void sirfsoc_secondary_init(unsigned int cpu)
{
/*
* let the primary processor know we're out of the
* pen, then head off into the C entry point
*/
- pen_release = -1;
+ prima2_pen_release = -1;
smp_wmb();
/*
@@ -80,13 +83,13 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
/*
* The secondary processor is waiting to be released from
* the holding pen - release it, then wait for it to flag
- * that it has been released by resetting pen_release.
+ * that it has been released by resetting prima2_pen_release.
*
- * Note that "pen_release" is the hardware CPU ID, whereas
+ * Note that "prima2_pen_release" is the hardware CPU ID, whereas
* "cpu" is Linux's internal ID.
*/
- pen_release = cpu_logical_map(cpu);
- sync_cache_w(&pen_release);
+ prima2_pen_release = cpu_logical_map(cpu);
+ sync_cache_w(&prima2_pen_release);
/*
* Send the secondary CPU SEV, thereby causing the boot monitor to read
@@ -97,7 +100,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
timeout = jiffies + (1 * HZ);
while (time_before(jiffies, timeout)) {
smp_rmb();
- if (pen_release == -1)
+ if (prima2_pen_release == -1)
break;
udelay(10);
@@ -109,7 +112,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
*/
spin_unlock(&boot_lock);
- return pen_release != -1 ? -ENOSYS : 0;
+ return prima2_pen_release != -1 ? -ENOSYS : 0;
}
const struct smp_operations sirfsoc_smp_ops __initconst = {
diff --git a/arch/arm/mach-qcom/platsmp.c b/arch/arm/mach-qcom/platsmp.c
index 5494c9e0c909..99a6a5e809e0 100644
--- a/arch/arm/mach-qcom/platsmp.c
+++ b/arch/arm/mach-qcom/platsmp.c
@@ -46,8 +46,6 @@
extern void secondary_startup_arm(void);
-static DEFINE_SPINLOCK(boot_lock);
-
#ifdef CONFIG_HOTPLUG_CPU
static void qcom_cpu_die(unsigned int cpu)
{
@@ -55,15 +53,6 @@ static void qcom_cpu_die(unsigned int cpu)
}
#endif
-static void qcom_secondary_init(unsigned int cpu)
-{
- /*
- * Synchronise with the boot thread.
- */
- spin_lock(&boot_lock);
- spin_unlock(&boot_lock);
-}
-
static int scss_release_secondary(unsigned int cpu)
{
struct device_node *node;
@@ -281,24 +270,12 @@ static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int))
}
/*
- * set synchronisation state between this boot processor
- * and the secondary one
- */
- spin_lock(&boot_lock);
-
- /*
* Send the secondary CPU a soft interrupt, thereby causing
* the boot monitor to read the system wide flags register,
* and branch to the address found there.
*/
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
- /*
- * now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
- spin_unlock(&boot_lock);
-
return ret;
}
@@ -334,7 +311,6 @@ static void __init qcom_smp_prepare_cpus(unsigned int max_cpus)
static const struct smp_operations smp_msm8660_ops __initconst = {
.smp_prepare_cpus = qcom_smp_prepare_cpus,
- .smp_secondary_init = qcom_secondary_init,
.smp_boot_secondary = msm8660_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_die = qcom_cpu_die,
@@ -344,7 +320,6 @@ CPU_METHOD_OF_DECLARE(qcom_smp, "qcom,gcc-msm8660", &smp_msm8660_ops);
static const struct smp_operations qcom_smp_kpssv1_ops __initconst = {
.smp_prepare_cpus = qcom_smp_prepare_cpus,
- .smp_secondary_init = qcom_secondary_init,
.smp_boot_secondary = kpssv1_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_die = qcom_cpu_die,
@@ -354,7 +329,6 @@ CPU_METHOD_OF_DECLARE(qcom_smp_kpssv1, "qcom,kpss-acc-v1", &qcom_smp_kpssv1_ops)
static const struct smp_operations qcom_smp_kpssv2_ops __initconst = {
.smp_prepare_cpus = qcom_smp_prepare_cpus,
- .smp_secondary_init = qcom_secondary_init,
.smp_boot_secondary = kpssv2_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_die = qcom_cpu_die,
diff --git a/arch/arm/mach-spear/generic.h b/arch/arm/mach-spear/generic.h
index 909b97c0b237..25b4c5e66e39 100644
--- a/arch/arm/mach-spear/generic.h
+++ b/arch/arm/mach-spear/generic.h
@@ -20,6 +20,8 @@
#include <asm/mach/time.h>
+extern volatile int spear_pen_release;
+
extern void spear13xx_timer_init(void);
extern void spear3xx_timer_init(void);
extern struct pl022_ssp_controller pl022_plat_data;
diff --git a/arch/arm/mach-spear/headsmp.S b/arch/arm/mach-spear/headsmp.S
index c52192dc3d9f..6e250b6c0aa2 100644
--- a/arch/arm/mach-spear/headsmp.S
+++ b/arch/arm/mach-spear/headsmp.S
@@ -43,5 +43,5 @@ pen: ldr r7, [r6]
.align
1: .long .
- .long pen_release
+ .long spear_pen_release
ENDPROC(spear13xx_secondary_startup)
diff --git a/arch/arm/mach-spear/hotplug.c b/arch/arm/mach-spear/hotplug.c
index 12edd1cf8a12..0dd84f609627 100644
--- a/arch/arm/mach-spear/hotplug.c
+++ b/arch/arm/mach-spear/hotplug.c
@@ -16,6 +16,8 @@
#include <asm/cp15.h>
#include <asm/smp_plat.h>
+#include "generic.h"
+
static inline void cpu_enter_lowpower(void)
{
unsigned int v;
@@ -57,7 +59,7 @@ static inline void spear13xx_do_lowpower(unsigned int cpu, int *spurious)
for (;;) {
wfi();
- if (pen_release == cpu) {
+ if (spear_pen_release == cpu) {
/*
* OK, proper wakeup, we're done
*/
diff --git a/arch/arm/mach-spear/platsmp.c b/arch/arm/mach-spear/platsmp.c
index 39038a03836a..b1ff4bb86f6d 100644
--- a/arch/arm/mach-spear/platsmp.c
+++ b/arch/arm/mach-spear/platsmp.c
@@ -20,16 +20,21 @@
#include <mach/spear.h>
#include "generic.h"
+/* XXX spear_pen_release is cargo culted code - DO NOT COPY XXX */
+volatile int spear_pen_release = -1;
+
/*
- * Write pen_release in a way that is guaranteed to be visible to all
- * observers, irrespective of whether they're taking part in coherency
+ * XXX CARGO CULTED CODE - DO NOT COPY XXX
+ *
+ * Write spear_pen_release in a way that is guaranteed to be visible to
+ * all observers, irrespective of whether they're taking part in coherency
* or not. This is necessary for the hotplug code to work reliably.
*/
-static void write_pen_release(int val)
+static void spear_write_pen_release(int val)
{
- pen_release = val;
+ spear_pen_release = val;
smp_wmb();
- sync_cache_w(&pen_release);
+ sync_cache_w(&spear_pen_release);
}
static DEFINE_SPINLOCK(boot_lock);
@@ -42,7 +47,7 @@ static void spear13xx_secondary_init(unsigned int cpu)
* let the primary processor know we're out of the
* pen, then head off into the C entry point
*/
- write_pen_release(-1);
+ spear_write_pen_release(-1);
/*
* Synchronise with the boot thread.
@@ -64,17 +69,17 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
/*
* The secondary processor is waiting to be released from
* the holding pen - release it, then wait for it to flag
- * that it has been released by resetting pen_release.
+ * that it has been released by resetting spear_pen_release.
*
- * Note that "pen_release" is the hardware CPU ID, whereas
+ * Note that "spear_pen_release" is the hardware CPU ID, whereas
* "cpu" is Linux's internal ID.
*/
- write_pen_release(cpu);
+ spear_write_pen_release(cpu);
timeout = jiffies + (1 * HZ);
while (time_before(jiffies, timeout)) {
smp_rmb();
- if (pen_release == -1)
+ if (spear_pen_release == -1)
break;
udelay(10);
@@ -86,7 +91,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
*/
spin_unlock(&boot_lock);
- return pen_release != -1 ? -ENOSYS : 0;
+ return spear_pen_release != -1 ? -ENOSYS : 0;
}
/*
diff --git a/arch/arm/mach-tegra/reset-handler.S b/arch/arm/mach-tegra/reset-handler.S
index 805f306fa6f7..e22ccf87eded 100644
--- a/arch/arm/mach-tegra/reset-handler.S
+++ b/arch/arm/mach-tegra/reset-handler.S
@@ -172,7 +172,7 @@ after_errata:
mov32 r5, TEGRA_IRAM_BASE + TEGRA_IRAM_RESET_HANDLER_OFFSET
mov r0, #CPU_NOT_RESETTABLE
cmp r10, #0
- strneb r0, [r5, #__tegra20_cpu1_resettable_status_offset]
+ strbne r0, [r5, #__tegra20_cpu1_resettable_status_offset]
1:
#endif
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 24659952c278..be68d62566c7 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -215,8 +215,8 @@ v6_dma_inv_range:
#endif
tst r1, #D_CACHE_LINE_SIZE - 1
#ifdef CONFIG_DMA_CACHE_RWFO
- ldrneb r2, [r1, #-1] @ read for ownership
- strneb r2, [r1, #-1] @ write for ownership
+ ldrbne r2, [r1, #-1] @ read for ownership
+ strbne r2, [r1, #-1] @ write for ownership
#endif
bic r1, r1, #D_CACHE_LINE_SIZE - 1
#ifdef HARVARD_CACHE
@@ -284,8 +284,8 @@ ENTRY(v6_dma_flush_range)
add r0, r0, #D_CACHE_LINE_SIZE
cmp r0, r1
#ifdef CONFIG_DMA_CACHE_RWFO
- ldrlob r2, [r0] @ read for ownership
- strlob r2, [r0] @ write for ownership
+ ldrblo r2, [r0] @ read for ownership
+ strblo r2, [r0] @ write for ownership
#endif
blo 1b
mov r0, #0
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index b03202cddddb..f74cdce6d4da 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -45,6 +45,7 @@ static void mc_copy_user_page(void *from, void *to)
int tmp;
asm volatile ("\
+ .syntax unified\n\
ldmia %0!, {r2, r3, ip, lr} @ 4\n\
1: mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\
stmia %1!, {r2, r3, ip, lr} @ 4\n\
@@ -56,7 +57,7 @@ static void mc_copy_user_page(void *from, void *to)
ldmia %0!, {r2, r3, ip, lr} @ 4\n\
subs %2, %2, #1 @ 1\n\
stmia %1!, {r2, r3, ip, lr} @ 4\n\
- ldmneia %0!, {r2, r3, ip, lr} @ 4\n\
+ ldmiane %0!, {r2, r3, ip, lr} @ 4\n\
bne 1b @ "
: "+&r" (from), "+&r" (to), "=&r" (tmp)
: "2" (PAGE_SIZE / 64)
diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c
index cd3e165afeed..6d336740aae4 100644
--- a/arch/arm/mm/copypage-v4wb.c
+++ b/arch/arm/mm/copypage-v4wb.c
@@ -27,6 +27,7 @@ static void v4wb_copy_user_page(void *kto, const void *kfrom)
int tmp;
asm volatile ("\
+ .syntax unified\n\
ldmia %1!, {r3, r4, ip, lr} @ 4\n\
1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
stmia %0!, {r3, r4, ip, lr} @ 4\n\
@@ -38,7 +39,7 @@ static void v4wb_copy_user_page(void *kto, const void *kfrom)
ldmia %1!, {r3, r4, ip, lr} @ 4\n\
subs %2, %2, #1 @ 1\n\
stmia %0!, {r3, r4, ip, lr} @ 4\n\
- ldmneia %1!, {r3, r4, ip, lr} @ 4\n\
+ ldmiane %1!, {r3, r4, ip, lr} @ 4\n\
bne 1b @ 1\n\
mcr p15, 0, %1, c7, c10, 4 @ 1 drain WB"
: "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c
index 8614572e1296..3851bb396442 100644
--- a/arch/arm/mm/copypage-v4wt.c
+++ b/arch/arm/mm/copypage-v4wt.c
@@ -25,6 +25,7 @@ static void v4wt_copy_user_page(void *kto, const void *kfrom)
int tmp;
asm volatile ("\
+ .syntax unified\n\
ldmia %1!, {r3, r4, ip, lr} @ 4\n\
1: stmia %0!, {r3, r4, ip, lr} @ 4\n\
ldmia %1!, {r3, r4, ip, lr} @ 4+1\n\
@@ -34,7 +35,7 @@ static void v4wt_copy_user_page(void *kto, const void *kfrom)
ldmia %1!, {r3, r4, ip, lr} @ 4\n\
subs %2, %2, #1 @ 1\n\
stmia %0!, {r3, r4, ip, lr} @ 4\n\
- ldmneia %1!, {r3, r4, ip, lr} @ 4\n\
+ ldmiane %1!, {r3, r4, ip, lr} @ 4\n\
bne 1b @ 1\n\
mcr p15, 0, %2, c7, c7, 0 @ flush ID cache"
: "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1e3e08a1c456..43f46aa7ef33 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -188,6 +188,7 @@ const struct dma_map_ops arm_dma_ops = {
.unmap_page = arm_dma_unmap_page,
.map_sg = arm_dma_map_sg,
.unmap_sg = arm_dma_unmap_sg,
+ .map_resource = dma_direct_map_resource,
.sync_single_for_cpu = arm_dma_sync_single_for_cpu,
.sync_single_for_device = arm_dma_sync_single_for_device,
.sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
@@ -211,6 +212,7 @@ const struct dma_map_ops arm_coherent_dma_ops = {
.get_sgtable = arm_dma_get_sgtable,
.map_page = arm_coherent_dma_map_page,
.map_sg = arm_dma_map_sg,
+ .map_resource = dma_direct_map_resource,
.dma_supported = arm_dma_supported,
};
EXPORT_SYMBOL(arm_coherent_dma_ops);
@@ -2277,7 +2279,7 @@ EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
* @dev: valid struct device pointer
*
* Detaches the provided device from a previously attached map.
- * This voids the dma operations (dma_map_ops pointer)
+ * This overwrites the dma_ops pointer with appropriate non-IOMMU ops.
*/
void arm_iommu_detach_device(struct device *dev)
{
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index 1d1edd064199..a033f6134a64 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -6,6 +6,7 @@
#include <asm/cputype.h>
#include <asm/idmap.h>
+#include <asm/hwcap.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/sections.h>
@@ -110,7 +111,8 @@ static int __init init_static_idmap(void)
__idmap_text_end, 0);
/* Flush L1 for the hardware to see this page table content */
- flush_cache_louis();
+ if (!(elf_hwcap & HWCAP_LPAE))
+ flush_cache_louis();
return 0;
}
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 478ea8b7db87..c2daabbe0af0 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -205,7 +205,11 @@ phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
BUG_ON(!arm_memblock_steal_permitted);
- phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
+ phys = memblock_phys_alloc(size, align);
+ if (!phys)
+ panic("Failed to steal %pa bytes at %pS\n",
+ &size, (void *)_RET_IP_);
+
memblock_free(phys, size);
memblock_remove(phys, size);
@@ -278,15 +282,12 @@ void __init arm_memblock_init(const struct machine_desc *mdesc)
void __init bootmem_init(void)
{
- unsigned long min, max_low, max_high;
-
memblock_allow_resize();
- max_low = max_high = 0;
- find_limits(&min, &max_low, &max_high);
+ find_limits(&min_low_pfn, &max_low_pfn, &max_pfn);
- early_memtest((phys_addr_t)min << PAGE_SHIFT,
- (phys_addr_t)max_low << PAGE_SHIFT);
+ early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT,
+ (phys_addr_t)max_low_pfn << PAGE_SHIFT);
/*
* Sparsemem tries to allocate bootmem in memory_present(),
@@ -304,16 +305,7 @@ void __init bootmem_init(void)
* the sparse mem_map arrays initialized by sparse_init()
* for memmap_init_zone(), otherwise all PFNs are invalid.
*/
- zone_sizes_init(min, max_low, max_high);
-
- /*
- * This doesn't seem to be used by the Linux memory manager any
- * more, but is used by ll_rw_block. If we can get rid of it, we
- * also get rid of some of the stuff above as well.
- */
- min_low_pfn = min;
- max_low_pfn = max_low;
- max_pfn = max_high;
+ zone_sizes_init(min_low_pfn, max_low_pfn, max_pfn);
}
/*
@@ -494,55 +486,6 @@ void __init mem_init(void)
mem_init_print_info(NULL);
-#define MLK(b, t) b, t, ((t) - (b)) >> 10
-#define MLM(b, t) b, t, ((t) - (b)) >> 20
-#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
-
- pr_notice("Virtual kernel memory layout:\n"
- " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
-#ifdef CONFIG_HAVE_TCM
- " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
- " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
-#endif
- " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
- " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
- " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
-#ifdef CONFIG_HIGHMEM
- " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
-#endif
-#ifdef CONFIG_MODULES
- " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
-#endif
- " .text : 0x%p" " - 0x%p" " (%4td kB)\n"
- " .init : 0x%p" " - 0x%p" " (%4td kB)\n"
- " .data : 0x%p" " - 0x%p" " (%4td kB)\n"
- " .bss : 0x%p" " - 0x%p" " (%4td kB)\n",
-
- MLK(VECTORS_BASE, VECTORS_BASE + PAGE_SIZE),
-#ifdef CONFIG_HAVE_TCM
- MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
- MLK(ITCM_OFFSET, (unsigned long) itcm_end),
-#endif
- MLK(FIXADDR_START, FIXADDR_END),
- MLM(VMALLOC_START, VMALLOC_END),
- MLM(PAGE_OFFSET, (unsigned long)high_memory),
-#ifdef CONFIG_HIGHMEM
- MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
- (PAGE_SIZE)),
-#endif
-#ifdef CONFIG_MODULES
- MLM(MODULES_VADDR, MODULES_END),
-#endif
-
- MLK_ROUNDUP(_text, _etext),
- MLK_ROUNDUP(__init_begin, __init_end),
- MLK_ROUNDUP(_sdata, _edata),
- MLK_ROUNDUP(__bss_start, __bss_stop));
-
-#undef MLK
-#undef MLM
-#undef MLK_ROUNDUP
-
/*
* Check boundaries twice: Some fundamental inconsistencies can
* be detected at build time already.
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 57de0dde3ae0..f3ce34113f89 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -721,7 +721,13 @@ EXPORT_SYMBOL(phys_mem_access_prot);
static void __init *early_alloc(unsigned long sz)
{
- return memblock_alloc(sz, sz);
+ void *ptr = memblock_alloc(sz, sz);
+
+ if (!ptr)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, sz, sz);
+
+ return ptr;
}
static void *__init late_alloc(unsigned long sz)
@@ -994,6 +1000,9 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
return;
svm = memblock_alloc(sizeof(*svm) * nr, __alignof__(*svm));
+ if (!svm)
+ panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
+ __func__, sizeof(*svm) * nr, __alignof__(*svm));
for (md = io_desc; nr; md++, nr--) {
create_mapping(md);
@@ -1016,6 +1025,9 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
struct static_vm *svm;
svm = memblock_alloc(sizeof(*svm), __alignof__(*svm));
+ if (!svm)
+ panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
+ __func__, sizeof(*svm), __alignof__(*svm));
vm = &svm->vm;
vm->addr = (void *)addr;
diff --git a/arch/arm/mm/pmsa-v8.c b/arch/arm/mm/pmsa-v8.c
index 617a83def88a..0d7d5fb59247 100644
--- a/arch/arm/mm/pmsa-v8.c
+++ b/arch/arm/mm/pmsa-v8.c
@@ -165,7 +165,7 @@ static int __init pmsav8_setup_ram(unsigned int number, phys_addr_t start,phys_a
return -EINVAL;
bar = start;
- lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);;
+ lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);
bar |= PMSAv8_AP_PL1RW_PL0RW | PMSAv8_RGN_SHARED;
lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN;
@@ -181,7 +181,7 @@ static int __init pmsav8_setup_io(unsigned int number, phys_addr_t start,phys_ad
return -EINVAL;
bar = start;
- lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);;
+ lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);
bar |= PMSAv8_AP_PL1RW_PL0RW | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN;
lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN;
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
index 47a5acc64433..acd5a66dfc23 100644
--- a/arch/arm/mm/proc-v7m.S
+++ b/arch/arm/mm/proc-v7m.S
@@ -139,6 +139,9 @@ __v7m_setup_cont:
cpsie i
svc #0
1: cpsid i
+ ldr r0, =exc_ret
+ orr lr, lr, #EXC_RET_THREADMODE_PROCESSSTACK
+ str lr, [r0]
ldmia sp, {r0-r3, r12}
str r5, [r12, #11 * 4] @ restore the original SVC vector entry
mov lr, r6 @ restore LR
@@ -149,10 +152,10 @@ __v7m_setup_cont:
@ Configure caches (if implemented)
teq r8, #0
- stmneia sp, {r0-r6, lr} @ v7m_invalidate_l1 touches r0-r6
+ stmiane sp, {r0-r6, lr} @ v7m_invalidate_l1 touches r0-r6
blne v7m_invalidate_l1
teq r8, #0 @ re-evalutae condition
- ldmneia sp, {r0-r6, lr}
+ ldmiane sp, {r0-r6, lr}
@ Configure the System Control Register to ensure 8-byte stack alignment
@ Note the STKALIGN bit is either RW or RAO.
diff --git a/arch/arm/plat-iop/adma.c b/arch/arm/plat-iop/adma.c
index a4d1f8de3b5b..d9612221e484 100644
--- a/arch/arm/plat-iop/adma.c
+++ b/arch/arm/plat-iop/adma.c
@@ -143,7 +143,7 @@ struct platform_device iop3xx_dma_0_channel = {
.resource = iop3xx_dma_0_resources,
.dev = {
.dma_mask = &iop3xx_adma_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop3xx_dma_0_data,
},
};
@@ -155,7 +155,7 @@ struct platform_device iop3xx_dma_1_channel = {
.resource = iop3xx_dma_1_resources,
.dev = {
.dma_mask = &iop3xx_adma_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop3xx_dma_1_data,
},
};
@@ -167,7 +167,7 @@ struct platform_device iop3xx_aau_channel = {
.resource = iop3xx_aau_resources,
.dev = {
.dma_mask = &iop3xx_adma_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop3xx_aau_data,
},
};
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index a6c81ce00f52..8647cb80a93b 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -622,7 +622,7 @@ static struct platform_device orion_xor0_shared = {
.resource = orion_xor0_shared_resources,
.dev = {
.dma_mask = &orion_xor_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &orion_xor0_pdata,
},
};
@@ -683,7 +683,7 @@ static struct platform_device orion_xor1_shared = {
.resource = orion_xor1_shared_resources,
.dev = {
.dma_mask = &orion_xor_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &orion_xor1_pdata,
},
};
diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl
index 9016f4081bb9..0393917eaa57 100644
--- a/arch/arm/tools/syscall.tbl
+++ b/arch/arm/tools/syscall.tbl
@@ -437,3 +437,7 @@
421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait
422 common futex_time64 sys_futex
423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval
+424 common pidfd_send_signal sys_pidfd_send_signal
+425 common io_uring_setup sys_io_uring_setup
+426 common io_uring_enter sys_io_uring_enter
+427 common io_uring_register sys_io_uring_register
diff --git a/arch/arm/vdso/vgettimeofday.c b/arch/arm/vdso/vgettimeofday.c
index a9dd619c6c29..7bdbf5d5c47d 100644
--- a/arch/arm/vdso/vgettimeofday.c
+++ b/arch/arm/vdso/vgettimeofday.c
@@ -18,9 +18,9 @@
#include <linux/compiler.h>
#include <linux/hrtimer.h>
#include <linux/time.h>
-#include <asm/arch_timer.h>
#include <asm/barrier.h>
#include <asm/bug.h>
+#include <asm/cp15.h>
#include <asm/page.h>
#include <asm/unistd.h>
#include <asm/vdso_datapage.h>
@@ -123,7 +123,8 @@ static notrace u64 get_ns(struct vdso_data *vdata)
u64 cycle_now;
u64 nsec;
- cycle_now = arch_counter_get_cntvct();
+ isb();
+ cycle_now = read_sysreg(CNTVCT);
cycle_delta = (cycle_now - vdata->cs_cycle_last) & vdata->cs_mask;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index cfbf307d6dc4..df350f4e1e7a 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -22,12 +22,14 @@ config ARM64
select ARCH_HAS_KCOV
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_PTE_SPECIAL
+ select ARCH_HAS_SETUP_DMA_OPS
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_STRICT_MODULE_RWX
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYSCALL_WRAPPER
+ select ARCH_HAS_TEARDOWN_DMA_OPS if IOMMU_SUPPORT
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_INLINE_READ_LOCK if !PREEMPT
@@ -88,6 +90,7 @@ config ARM64
select GENERIC_CLOCKEVENTS
select GENERIC_CLOCKEVENTS_BROADCAST
select GENERIC_CPU_AUTOPROBE
+ select GENERIC_CPU_VULNERABILITIES
select GENERIC_EARLY_IOREMAP
select GENERIC_IDLE_POLL_SETUP
select GENERIC_IRQ_MULTI_HANDLER
@@ -137,7 +140,6 @@ config ARM64
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_GCC_PLUGINS
- select HAVE_GENERIC_DMA_COHERENT
select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_MEMBLOCK_NODE_MAP if NUMA
@@ -147,8 +149,8 @@ config ARM64
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_RCU_TABLE_FREE
- select HAVE_RCU_TABLE_INVALIDATE
select HAVE_RSEQ
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
@@ -158,12 +160,10 @@ config ARM64
select IRQ_DOMAIN
select IRQ_FORCED_THREADING
select MODULES_USE_ELF_RELA
- select MULTI_IRQ_HANDLER
select NEED_DMA_MAP_STATE
select NEED_SG_DMA_LENGTH
select OF
select OF_EARLY_FLATTREE
- select OF_RESERVED_MEM
select PCI_DOMAINS_GENERIC if PCI
select PCI_ECAM if (ACPI && PCI)
select PCI_SYSCALL if PCI
@@ -238,9 +238,6 @@ config LOCKDEP_SUPPORT
config TRACE_IRQFLAGS_SUPPORT
def_bool y
-config RWSEM_XCHGADD_ALGORITHM
- def_bool y
-
config GENERIC_BUG
def_bool y
depends on BUG
@@ -298,7 +295,7 @@ menu "Kernel Features"
menu "ARM errata workarounds via the alternatives framework"
config ARM64_WORKAROUND_CLEAN_CACHE
- def_bool n
+ bool
config ARM64_ERRATUM_826319
bool "Cortex-A53: 826319: System might deadlock if a write cannot complete until read data is accepted"
@@ -465,26 +462,28 @@ config ARM64_ERRATUM_1024718
bool "Cortex-A55: 1024718: Update of DBM/AP bits without break before make might result in incorrect update"
default y
help
- This option adds work around for Arm Cortex-A55 Erratum 1024718.
+ This option adds a workaround for ARM Cortex-A55 Erratum 1024718.
Affected Cortex-A55 cores (r0p0, r0p1, r1p0) could cause incorrect
update of the hardware dirty bit when the DBM/AP bits are updated
- without a break-before-make. The work around is to disable the usage
+ without a break-before-make. The workaround is to disable the usage
of hardware DBM locally on the affected cores. CPUs not affected by
- erratum will continue to use the feature.
+ this erratum will continue to use the feature.
If unsure, say Y.
config ARM64_ERRATUM_1188873
- bool "Cortex-A76: MRC read following MRRC read of specific Generic Timer in AArch32 might give incorrect result"
+ bool "Cortex-A76/Neoverse-N1: MRC read following MRRC read of specific Generic Timer in AArch32 might give incorrect result"
default y
+ depends on COMPAT
select ARM_ARCH_TIMER_OOL_WORKAROUND
help
- This option adds work arounds for ARM Cortex-A76 erratum 1188873
+ This option adds a workaround for ARM Cortex-A76/Neoverse-N1
+ erratum 1188873.
- Affected Cortex-A76 cores (r0p0, r1p0, r2p0) could cause
- register corruption when accessing the timer registers from
- AArch32 userspace.
+ Affected Cortex-A76/Neoverse-N1 cores (r0p0, r1p0, r2p0) could
+ cause register corruption when accessing the timer registers
+ from AArch32 userspace.
If unsure, say Y.
@@ -492,7 +491,7 @@ config ARM64_ERRATUM_1165522
bool "Cortex-A76: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
default y
help
- This option adds work arounds for ARM Cortex-A76 erratum 1165522
+ This option adds a workaround for ARM Cortex-A76 erratum 1165522.
Affected Cortex-A76 cores (r0p0, r1p0, r2p0) could end-up with
corrupted TLBs by speculating an AT instruction during a guest
@@ -505,7 +504,7 @@ config ARM64_ERRATUM_1286807
default y
select ARM64_WORKAROUND_REPEAT_TLBI
help
- This option adds workaround for ARM Cortex-A76 erratum 1286807
+ This option adds a workaround for ARM Cortex-A76 erratum 1286807.
On the affected Cortex-A76 cores (r0p0 to r3p0), if a virtual
address for a cacheable mapping of a location is being
@@ -522,10 +521,10 @@ config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313"
default y
help
- Enable workaround for erratum 22375, 24313.
+ Enable workaround for errata 22375 and 24313.
This implements two gicv3-its errata workarounds for ThunderX. Both
- with small impact affecting only ITS table allocation.
+ with a small impact affecting only ITS table allocation.
erratum 22375: only alloc 8MB table size
erratum 24313: ignore memory access type
@@ -589,9 +588,6 @@ config QCOM_FALKOR_ERRATUM_1003
config ARM64_WORKAROUND_REPEAT_TLBI
bool
- help
- Enable the repeat TLBI workaround for Falkor erratum 1009 and
- Cortex-A76 erratum 1286807.
config QCOM_FALKOR_ERRATUM_1009
bool "Falkor E1009: Prematurely complete a DSB after a TLBI"
@@ -627,7 +623,7 @@ config HISILICON_ERRATUM_161600802
bool "Hip07 161600802: Erroneous redistributor VLPI base"
default y
help
- The HiSilicon Hip07 SoC usees the wrong redistributor base
+ The HiSilicon Hip07 SoC uses the wrong redistributor base
when issued ITS commands such as VMOVP and VMAPP, and requires
a 128kB offset to be applied to the target address in this commands.
@@ -643,6 +639,25 @@ config QCOM_FALKOR_ERRATUM_E1041
If unsure, say Y.
+config FUJITSU_ERRATUM_010001
+ bool "Fujitsu-A64FX erratum E#010001: Undefined fault may occur wrongly"
+ default y
+ help
+ This option adds a workaround for Fujitsu-A64FX erratum E#010001.
+ On some variants of the Fujitsu-A64FX cores ver(1.0, 1.1), memory
+ accesses may cause undefined fault (Data abort, DFSC=0b111111).
+ This fault occurs under a specific hardware condition when a
+ load/store instruction performs an address translation using:
+ case-1 TTBR0_EL1 with TCR_EL1.NFD0 == 1.
+ case-2 TTBR0_EL2 with TCR_EL2.NFD0 == 1.
+ case-3 TTBR1_EL1 with TCR_EL1.NFD1 == 1.
+ case-4 TTBR1_EL2 with TCR_EL2.NFD1 == 1.
+
+ The workaround is to ensure these bits are clear in TCR_ELx.
+ The workaround only affects the Fujitsu-A64FX.
+
+ If unsure, say Y.
+
endmenu
@@ -792,8 +807,7 @@ config SCHED_SMT
config NR_CPUS
int "Maximum number of CPUs (2-4096)"
range 2 4096
- # These have to remain sorted largest to smallest
- default "64"
+ default "256"
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
@@ -872,6 +886,9 @@ config ARCH_WANT_HUGE_PMD_SHARE
config ARCH_HAS_CACHE_LINE_SIZE
def_bool y
+config ARCH_ENABLE_SPLIT_PMD_PTLOCK
+ def_bool y if PGTABLE_LEVELS > 2
+
config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode"
---help---
@@ -1061,9 +1078,65 @@ config RODATA_FULL_DEFAULT_ENABLED
This requires the linear region to be mapped down to pages,
which may adversely affect performance in some cases.
+config ARM64_SW_TTBR0_PAN
+ bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
+ help
+ Enabling this option prevents the kernel from accessing
+ user-space memory directly by pointing TTBR0_EL1 to a reserved
+ zeroed area and reserved ASID. The user access routines
+ restore the valid TTBR0_EL1 temporarily.
+
+menuconfig COMPAT
+ bool "Kernel support for 32-bit EL0"
+ depends on ARM64_4K_PAGES || EXPERT
+ select COMPAT_BINFMT_ELF if BINFMT_ELF
+ select HAVE_UID16
+ select OLD_SIGSUSPEND3
+ select COMPAT_OLD_SIGACTION
+ help
+ This option enables support for a 32-bit EL0 running under a 64-bit
+ kernel at EL1. AArch32-specific components such as system calls,
+ the user helper functions, VFP support and the ptrace interface are
+ handled appropriately by the kernel.
+
+ If you use a page size other than 4KB (i.e, 16KB or 64KB), please be aware
+ that you will only be able to execute AArch32 binaries that were compiled
+ with page size aligned segments.
+
+ If you want to execute 32-bit userspace applications, say Y.
+
+if COMPAT
+
+config KUSER_HELPERS
+ bool "Enable kuser helpers page for 32 bit applications"
+ default y
+ help
+ Warning: disabling this option may break 32-bit user programs.
+
+ Provide kuser helpers to compat tasks. The kernel provides
+ helper code to userspace in read only form at a fixed location
+ to allow userspace to be independent of the CPU type fitted to
+ the system. This permits binaries to be run on ARMv4 through
+ to ARMv8 without modification.
+
+ See Documentation/arm/kernel_user_helpers.txt for details.
+
+ However, the fixed address nature of these helpers can be used
+ by ROP (return orientated programming) authors when creating
+ exploits.
+
+ If all of the binaries and libraries which run on your platform
+ are built specifically for your platform, and make no use of
+ these helpers, then you can turn this option off to hinder
+ such exploits. However, in that case, if a binary or library
+ relying on those helpers is run, it will not function correctly.
+
+ Say N here only if you are absolutely certain that you do not
+ need these helpers; otherwise, the safe option is to say Y.
+
+
menuconfig ARMV8_DEPRECATED
bool "Emulate deprecated/obsolete ARMv8 instructions"
- depends on COMPAT
depends on SYSCTL
help
Legacy software support may require certain instructions
@@ -1129,13 +1202,7 @@ config SETEND_EMULATION
If unsure, say Y
endif
-config ARM64_SW_TTBR0_PAN
- bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
- help
- Enabling this option prevents the kernel from accessing
- user-space memory directly by pointing TTBR0_EL1 to a reserved
- zeroed area and reserved ASID. The user access routines
- restore the valid TTBR0_EL1 temporarily.
+endif
menu "ARMv8.1 architectural features"
@@ -1301,6 +1368,9 @@ config ARM64_SVE
To enable use of this extension on CPUs that implement it, say Y.
+ On CPUs that support the SVE2 extensions, this option will enable
+ those too.
+
Note that for architectural reasons, firmware _must_ implement SVE
support when running on SVE capable hardware. The required support
is present in:
@@ -1328,6 +1398,20 @@ config ARM64_MODULE_PLTS
bool
select HAVE_MOD_ARCH_SPECIFIC
+config ARM64_PSEUDO_NMI
+ bool "Support for NMI-like interrupts"
+ select CONFIG_ARM_GIC_V3
+ help
+ Adds support for mimicking Non-Maskable Interrupts through the use of
+ GIC interrupt priority. This support requires version 3 or later of
+ ARM GIC.
+
+ This high priority configuration for interrupts needs to be
+ explicitly enabled by setting the kernel parameter
+ "irqchip.gicv3_pseudo_nmi" to 1.
+
+ If unsure, say N
+
config RELOCATABLE
bool
help
@@ -1444,25 +1528,6 @@ config DMI
endmenu
-config COMPAT
- bool "Kernel support for 32-bit EL0"
- depends on ARM64_4K_PAGES || EXPERT
- select COMPAT_BINFMT_ELF if BINFMT_ELF
- select HAVE_UID16
- select OLD_SIGSUSPEND3
- select COMPAT_OLD_SIGACTION
- help
- This option enables support for a 32-bit EL0 running under a 64-bit
- kernel at EL1. AArch32-specific components such as system calls,
- the user helper functions, VFP support and the ptrace interface are
- handled appropriately by the kernel.
-
- If you use a page size other than 4KB (i.e, 16KB or 64KB), please be aware
- that you will only be able to execute AArch32 binaries that were compiled
- with page size aligned segments.
-
- If you want to execute 32-bit userspace applications, say Y.
-
config SYSVIPC_COMPAT
def_bool y
depends on COMPAT && SYSVIPC
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index c5f6a57f16b8..b5ca9c50876d 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -27,6 +27,7 @@ config ARCH_BCM2835
bool "Broadcom BCM2835 family"
select TIMER_OF
select GPIOLIB
+ select MFD_CORE
select PINCTRL
select PINCTRL_BCM2835
select ARM_AMBA
@@ -151,7 +152,7 @@ config ARCH_MVEBU
config ARCH_MXC
bool "ARMv8 based NXP i.MX SoC family"
select ARM64_ERRATUM_843419
- select ARM64_ERRATUM_845719
+ select ARM64_ERRATUM_845719 if COMPAT
select IMX_GPCV2
select IMX_GPCV2_PM_DOMAINS
select PM
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
index 7c649f6b14cb..a2cec6218211 100644
--- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
@@ -162,6 +162,7 @@
rx-fifo-depth = <16384>;
snps,multicast-filter-bins = <256>;
iommus = <&smmu 1>;
+ altr,sysmgr-syscon = <&sysmgr 0x44 0>;
status = "disabled";
};
@@ -179,6 +180,7 @@
rx-fifo-depth = <16384>;
snps,multicast-filter-bins = <256>;
iommus = <&smmu 2>;
+ altr,sysmgr-syscon = <&sysmgr 0x48 0>;
status = "disabled";
};
@@ -196,6 +198,7 @@
rx-fifo-depth = <16384>;
snps,multicast-filter-bins = <256>;
iommus = <&smmu 3>;
+ altr,sysmgr-syscon = <&sysmgr 0x4c 0>;
status = "disabled";
};
@@ -531,11 +534,12 @@
};
eccmgr {
- compatible = "altr,socfpga-a10-ecc-manager";
+ compatible = "altr,socfpga-s10-ecc-manager",
+ "altr,socfpga-a10-ecc-manager";
altr,sysmgr-syscon = <&sysmgr>;
#address-cells = <1>;
#size-cells = <1>;
- interrupts = <0 15 4>, <0 95 4>;
+ interrupts = <0 15 4>;
interrupt-controller;
#interrupt-cells = <2>;
ranges;
@@ -543,31 +547,31 @@
sdramedac {
compatible = "altr,sdram-edac-s10";
altr,sdr-syscon = <&sdr>;
- interrupts = <16 4>, <48 4>;
+ interrupts = <16 4>;
};
usb0-ecc@ff8c4000 {
- compatible = "altr,socfpga-usb-ecc";
+ compatible = "altr,socfpga-s10-usb-ecc",
+ "altr,socfpga-usb-ecc";
reg = <0xff8c4000 0x100>;
altr,ecc-parent = <&usb0>;
- interrupts = <2 4>,
- <34 4>;
+ interrupts = <2 4>;
};
emac0-rx-ecc@ff8c0000 {
- compatible = "altr,socfpga-eth-mac-ecc";
+ compatible = "altr,socfpga-s10-eth-mac-ecc",
+ "altr,socfpga-eth-mac-ecc";
reg = <0xff8c0000 0x100>;
altr,ecc-parent = <&gmac0>;
- interrupts = <4 4>,
- <36 4>;
+ interrupts = <4 4>;
};
emac0-tx-ecc@ff8c0400 {
- compatible = "altr,socfpga-eth-mac-ecc";
+ compatible = "altr,socfpga-s10-eth-mac-ecc",
+ "altr,socfpga-eth-mac-ecc";
reg = <0xff8c0400 0x100>;
altr,ecc-parent = <&gmac0>;
- interrupts = <5 4>,
- <37 4>;
+ interrupts = <5 4>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
index 0e762ca92558..cb7185014d3a 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
@@ -666,6 +666,17 @@
status = "disabled";
};
+ pcie_ep@3400000 {
+ compatible = "fsl,ls1046a-pcie-ep","fsl,ls-pcie-ep";
+ reg = <0x00 0x03400000 0x0 0x00100000
+ 0x40 0x00000000 0x8 0x00000000>;
+ reg-names = "regs", "addr_space";
+ num-ib-windows = <6>;
+ num-ob-windows = <8>;
+ num-lanes = <2>;
+ status = "disabled";
+ };
+
pcie@3500000 {
compatible = "fsl,ls1046a-pcie";
reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */
@@ -693,6 +704,17 @@
status = "disabled";
};
+ pcie_ep@3500000 {
+ compatible = "fsl,ls1046a-pcie-ep","fsl,ls-pcie-ep";
+ reg = <0x00 0x03500000 0x0 0x00100000
+ 0x48 0x00000000 0x8 0x00000000>;
+ reg-names = "regs", "addr_space";
+ num-ib-windows = <6>;
+ num-ob-windows = <8>;
+ num-lanes = <2>;
+ status = "disabled";
+ };
+
pcie@3600000 {
compatible = "fsl,ls1046a-pcie";
reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */
@@ -720,6 +742,17 @@
status = "disabled";
};
+ pcie_ep@3600000 {
+ compatible = "fsl,ls1046a-pcie-ep", "fsl,ls-pcie-ep";
+ reg = <0x00 0x03600000 0x0 0x00100000
+ 0x50 0x00000000 0x8 0x00000000>;
+ reg-names = "regs", "addr_space";
+ num-ib-windows = <6>;
+ num-ob-windows = <8>;
+ num-lanes = <2>;
+ status = "disabled";
+ };
+
qdma: dma-controller@8380000 {
compatible = "fsl,ls1046a-qdma", "fsl,ls1021a-qdma";
reg = <0x0 0x8380000 0x0 0x1000>, /* Controller regs */
@@ -740,7 +773,6 @@
queue-sizes = <64 64>;
big-endian;
};
-
};
reserved-memory {
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
new file mode 100644
index 000000000000..e25f7fcd7997
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
@@ -0,0 +1,629 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2017-2018 NXP
+ */
+
+#ifndef __DTS_IMX8MM_PINFUNC_H
+#define __DTS_IMX8MM_PINFUNC_H
+
+/*
+ * The pin function ID is a tuple of
+ * <mux_reg conf_reg input_reg mux_mode input_val>
+ */
+
+#define MX8MM_IOMUXC_GPIO1_IO00_GPIO1_IO0 0x028 0x290 0x000 0x0 0x0
+#define MX8MM_IOMUXC_GPIO1_IO00_CCMSRCGPCMIX_ENET_PHY_REF_CLK_ROOT 0x028 0x290 0x4C0 0x1 0x0
+#define MX8MM_IOMUXC_GPIO1_IO00_ANAMIX_REF_CLK_32K 0x028 0x290 0x000 0x5 0x0
+#define MX8MM_IOMUXC_GPIO1_IO00_CCMSRCGPCMIX_EXT_CLK1 0x028 0x290 0x000 0x6 0x0
+#define MX8MM_IOMUXC_GPIO1_IO00_SJC_FAIL 0x028 0x290 0x000 0x7 0x0
+#define MX8MM_IOMUXC_GPIO1_IO01_GPIO1_IO1 0x02C 0x294 0x000 0x0 0x0
+#define MX8MM_IOMUXC_GPIO1_IO01_PWM1_OUT 0x02C 0x294 0x000 0x1 0x0
+#define MX8MM_IOMUXC_GPIO1_IO01_ANAMIX_REF_CLK_24M 0x02C 0x294 0x4BC 0x5 0x0
+#define MX8MM_IOMUXC_GPIO1_IO01_CCMSRCGPCMIX_EXT_CLK2 0x02C 0x294 0x000 0x6 0x0
+#define MX8MM_IOMUXC_GPIO1_IO01_SJC_ACTIVE 0x02C 0x294 0x000 0x7 0x0
+#define MX8MM_IOMUXC_GPIO1_IO02_GPIO1_IO2 0x030 0x298 0x000 0x0 0x0
+#define MX8MM_IOMUXC_GPIO1_IO02_WDOG1_WDOG_B 0x030 0x298 0x000 0x1 0x0
+#define MX8MM_IOMUXC_GPIO1_IO02_WDOG1_WDOG_ANY 0x030 0x298 0x000 0x5 0x0
+#define MX8MM_IOMUXC_GPIO1_IO02_SJC_DE_B 0x030 0x298 0x000 0x7 0x0
+#define MX8MM_IOMUXC_GPIO1_IO03_GPIO1_IO3 0x034 0x29C 0x000 0x0 0x0
+#define MX8MM_IOMUXC_GPIO1_IO03_USDHC1_VSELECT 0x034 0x29C 0x000 0x1 0x0
+#define MX8MM_IOMUXC_GPIO1_IO03_SDMA1_EXT_EVENT0 0x034 0x29C 0x000 0x5 0x0
+#define MX8MM_IOMUXC_GPIO1_IO03_ANAMIX_XTAL_OK 0x034 0x29C 0x000 0x6 0x0
+#define MX8MM_IOMUXC_GPIO1_IO03_SJC_DONE 0x034 0x29C 0x000 0x7 0x0
+#define MX8MM_IOMUXC_GPIO1_IO04_GPIO1_IO4 0x038 0x2A0 0x000 0x0 0x0
+#define MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x038 0x2A0 0x000 0x1 0x0
+#define MX8MM_IOMUXC_GPIO1_IO04_SDMA1_EXT_EVENT1 0x038 0x2A0 0x000 0x5 0x0
+#define MX8MM_IOMUXC_GPIO1_IO04_ANAMIX_XTAL_OK_LV 0x038 0x2A0 0x000 0x6 0x0
+#define MX8MM_IOMUXC_GPIO1_IO04_USDHC1_TEST_TRIG 0x038 0x2A0 0x000 0x7 0x0
+#define MX8MM_IOMUXC_GPIO1_IO05_GPIO1_IO5 0x03C 0x2A4 0x000 0x0 0x0
+#define MX8MM_IOMUXC_GPIO1_IO05_M4_NMI 0x03C 0x2A4 0x000 0x1 0x0
+#define MX8MM_IOMUXC_GPIO1_IO05_CCMSRCGPCMIX_PMIC_READY 0x03C 0x2A4 0x000 0x5 0x0
+#define MX8MM_IOMUXC_GPIO1_IO05_CCMSRCGPCMIX_INT_BOOT 0x03C 0x2A4 0x000 0x6 0x0
+#define MX8MM_IOMUXC_GPIO1_IO05_USDHC2_TEST_TRIG 0x03C 0x2A4 0x000 0x7 0x0
+#define MX8MM_IOMUXC_GPIO1_IO06_GPIO1_IO6 0x040 0x2A8 0x000 0x0 0x0
+#define MX8MM_IOMUXC_GPIO1_IO06_ENET1_MDC 0x040 0x2A8 0x000 0x1 0x0
+#define MX8MM_IOMUXC_GPIO1_IO06_USDHC1_CD_B 0x040 0x2A8 0x000 0x5 0x0
+#define MX8MM_IOMUXC_GPIO1_IO06_CCMSRCGPCMIX_EXT_CLK3 0x040 0x2A8 0x000 0x6 0x0
+#define MX8MM_IOMUXC_GPIO1_IO06_ECSPI1_TEST_TRIG 0x040 0x2A8 0x000 0x7 0x0
+#define MX8MM_IOMUXC_GPIO1_IO07_GPIO1_IO7 0x044 0x2AC 0x000 0x0 0x0
+#define MX8MM_IOMUXC_GPIO1_IO07_ENET1_MDIO 0x044 0x2AC 0x000 0x1 0x0
+#define MX8MM_IOMUXC_GPIO1_IO07_USDHC1_WP 0x044 0x2AC 0x000 0x5 0x0
+#define MX8MM_IOMUXC_GPIO1_IO07_CCMSRCGPCMIX_EXT_CLK4 0x044 0x2AC 0x000 0x6 0x0
+#define MX8MM_IOMUXC_GPIO1_IO07_ECSPI2_TEST_TRIG 0x044 0x2AC 0x000 0x7 0x0
+#define MX8MM_IOMUXC_GPIO1_IO08_GPIO1_IO8 0x048 0x2B0 0x000 0x0 0x0
+#define MX8MM_IOMUXC_GPIO1_IO08_ENET1_1588_EVENT0_IN 0x048 0x2B0 0x000 0x1 0x0
+#define MX8MM_IOMUXC_GPIO1_IO08_USDHC2_RESET_B 0x048 0x2B0 0x000 0x5 0x0
+#define MX8MM_IOMUXC_GPIO1_IO08_CCMSRCGPCMIX_WAIT 0x048 0x2B0 0x000 0x6 0x0
+#define MX8MM_IOMUXC_GPIO1_IO08_QSPI_TEST_TRIG 0x048 0x2B0 0x000 0x7 0x0
+#define MX8MM_IOMUXC_GPIO1_IO09_GPIO1_IO9 0x04C 0x2B4 0x000 0x0 0x0
+#define MX8MM_IOMUXC_GPIO1_IO09_ENET1_1588_EVENT0_OUT 0x04C 0x2B4 0x000 0x1 0x0
+#define MX8MM_IOMUXC_GPIO1_IO09_SDMA2_EXT_EVENT0 0x04C 0x2B4 0x000 0x5 0x0
+#define MX8MM_IOMUXC_GPIO1_IO09_CCMSRCGPCMIX_STOP 0x04C 0x2B4 0x000 0x6 0x0
+#define MX8MM_IOMUXC_GPIO1_IO09_RAWNAND_TEST_TRIG 0x04C 0x2B4 0x000 0x7 0x0
+#define MX8MM_IOMUXC_GPIO1_IO10_GPIO1_IO10 0x050 0x2B8 0x000 0x0 0x0
+#define MX8MM_IOMUXC_GPIO1_IO10_USB1_OTG_ID 0x050 0x2B8 0x000 0x1 0x0
+#define MX8MM_IOMUXC_GPIO1_IO10_OCOTP_CTRL_WRAPPER_FUSE_LATCHED 0x050 0x2B8 0x000 0x7 0x0
+#define MX8MM_IOMUXC_GPIO1_IO11_GPIO1_IO11 0x054 0x2BC 0x000 0x0 0x0
+#define MX8MM_IOMUXC_GPIO1_IO11_USB2_OTG_ID 0x054 0x2BC 0x000 0x1 0x0
+#define MX8MM_IOMUXC_GPIO1_IO11_CCMSRCGPCMIX_PMIC_READY 0x054 0x2BC 0x4BC 0x5 0x1
+#define MX8MM_IOMUXC_GPIO1_IO11_CCMSRCGPCMIX_OUT0 0x054 0x2BC 0x000 0x6 0x0
+#define MX8MM_IOMUXC_GPIO1_IO11_CAAM_WRAPPER_RNG_OSC_OBS 0x054 0x2BC 0x000 0x7 0x0
+#define MX8MM_IOMUXC_GPIO1_IO12_GPIO1_IO12 0x058 0x2C0 0x000 0x0 0x0
+#define MX8MM_IOMUXC_GPIO1_IO12_USB1_OTG_PWR 0x058 0x2C0 0x000 0x1 0x0
+#define MX8MM_IOMUXC_GPIO1_IO12_SDMA2_EXT_EVENT1 0x058 0x2C0 0x000 0x5 0x0
+#define MX8MM_IOMUXC_GPIO1_IO12_CCMSRCGPCMIX_OUT1 0x058 0x2C0 0x000 0x6 0x0
+#define MX8MM_IOMUXC_GPIO1_IO12_CSU_CSU_ALARM_AUT0 0x058 0x2C0 0x000 0x7 0x0
+#define MX8MM_IOMUXC_GPIO1_IO13_GPIO1_IO13 0x05C 0x2C4 0x000 0x0 0x0
+#define MX8MM_IOMUXC_GPIO1_IO13_USB1_OTG_OC 0x05C 0x2C4 0x000 0x1 0x0
+#define MX8MM_IOMUXC_GPIO1_IO13_PWM2_OUT 0x05C 0x2C4 0x000 0x5 0x0
+#define MX8MM_IOMUXC_GPIO1_IO13_CCMSRCGPCMIX_OUT2 0x05C 0x2C4 0x000 0x6 0x0
+#define MX8MM_IOMUXC_GPIO1_IO13_CSU_CSU_ALARM_AUT1 0x05C 0x2C4 0x000 0x7 0x0
+#define MX8MM_IOMUXC_GPIO1_IO14_GPIO1_IO14 0x060 0x2C8 0x000 0x0 0x0
+#define MX8MM_IOMUXC_GPIO1_IO14_USB2_OTG_PWR 0x060 0x2C8 0x000 0x1 0x0
+#define MX8MM_IOMUXC_GPIO1_IO14_PWM3_OUT 0x060 0x2C8 0x000 0x5 0x0
+#define MX8MM_IOMUXC_GPIO1_IO14_CCMSRCGPCMIX_CLKO1 0x060 0x2C8 0x000 0x6 0x0
+#define MX8MM_IOMUXC_GPIO1_IO14_CSU_CSU_ALARM_AUT2 0x060 0x2C8 0x000 0x7 0x0
+#define MX8MM_IOMUXC_GPIO1_IO15_GPIO1_IO15 0x064 0x2CC 0x000 0x0 0x0
+#define MX8MM_IOMUXC_GPIO1_IO15_USB2_OTG_OC 0x064 0x2CC 0x000 0x1 0x0
+#define MX8MM_IOMUXC_GPIO1_IO15_PWM4_OUT 0x064 0x2CC 0x000 0x5 0x0
+#define MX8MM_IOMUXC_GPIO1_IO15_CCMSRCGPCMIX_CLKO2 0x064 0x2CC 0x000 0x6 0x0
+#define MX8MM_IOMUXC_GPIO1_IO15_CSU_CSU_INT_DEB 0x064 0x2CC 0x000 0x7 0x0
+#define MX8MM_IOMUXC_ENET_MDC_ENET1_MDC 0x068 0x2D0 0x000 0x0 0x0
+#define MX8MM_IOMUXC_ENET_MDC_GPIO1_IO16 0x068 0x2D0 0x000 0x5 0x0
+#define MX8MM_IOMUXC_ENET_MDIO_ENET1_MDIO 0x06C 0x2D4 0x4C0 0x0 0x1
+#define MX8MM_IOMUXC_ENET_MDIO_GPIO1_IO17 0x06C 0x2D4 0x000 0x5 0x0
+#define MX8MM_IOMUXC_ENET_TD3_ENET1_RGMII_TD3 0x070 0x2D8 0x000 0x0 0x0
+#define MX8MM_IOMUXC_ENET_TD3_GPIO1_IO18 0x070 0x2D8 0x000 0x5 0x0
+#define MX8MM_IOMUXC_ENET_TD2_ENET1_RGMII_TD2 0x074 0x2DC 0x000 0x0 0x0
+#define MX8MM_IOMUXC_ENET_TD2_ENET1_TX_CLK 0x074 0x2DC 0x000 0x1 0x0
+#define MX8MM_IOMUXC_ENET_TD2_GPIO1_IO19 0x074 0x2DC 0x000 0x5 0x0
+#define MX8MM_IOMUXC_ENET_TD1_ENET1_RGMII_TD1 0x078 0x2E0 0x000 0x0 0x0
+#define MX8MM_IOMUXC_ENET_TD1_GPIO1_IO20 0x078 0x2E0 0x000 0x5 0x0
+#define MX8MM_IOMUXC_ENET_TD0_ENET1_RGMII_TD0 0x07C 0x2E4 0x000 0x0 0x0
+#define MX8MM_IOMUXC_ENET_TD0_GPIO1_IO21 0x07C 0x2E4 0x000 0x5 0x0
+#define MX8MM_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL 0x080 0x2E8 0x000 0x0 0x0
+#define MX8MM_IOMUXC_ENET_TX_CTL_GPIO1_IO22 0x080 0x2E8 0x000 0x5 0x0
+#define MX8MM_IOMUXC_ENET_TXC_ENET1_RGMII_TXC 0x084 0x2EC 0x000 0x0 0x0
+#define MX8MM_IOMUXC_ENET_TXC_ENET1_TX_ER 0x084 0x2EC 0x000 0x1 0x0
+#define MX8MM_IOMUXC_ENET_TXC_GPIO1_IO23 0x084 0x2EC 0x000 0x5 0x0
+#define MX8MM_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL 0x088 0x2F0 0x000 0x0 0x0
+#define MX8MM_IOMUXC_ENET_RX_CTL_GPIO1_IO24 0x088 0x2F0 0x000 0x5 0x0
+#define MX8MM_IOMUXC_ENET_RXC_ENET1_RGMII_RXC 0x08C 0x2F4 0x000 0x0 0x0
+#define MX8MM_IOMUXC_ENET_RXC_ENET1_RX_ER 0x08C 0x2F4 0x000 0x1 0x0
+#define MX8MM_IOMUXC_ENET_RXC_GPIO1_IO25 0x08C 0x2F4 0x000 0x5 0x0
+#define MX8MM_IOMUXC_ENET_RD0_ENET1_RGMII_RD0 0x090 0x2F8 0x000 0x0 0x0
+#define MX8MM_IOMUXC_ENET_RD0_GPIO1_IO26 0x090 0x2F8 0x000 0x5 0x0
+#define MX8MM_IOMUXC_ENET_RD1_ENET1_RGMII_RD1 0x094 0x2FC 0x000 0x0 0x0
+#define MX8MM_IOMUXC_ENET_RD1_GPIO1_IO27 0x094 0x2FC 0x000 0x5 0x0
+#define MX8MM_IOMUXC_ENET_RD2_ENET1_RGMII_RD2 0x098 0x300 0x000 0x0 0x0
+#define MX8MM_IOMUXC_ENET_RD2_GPIO1_IO28 0x098 0x300 0x000 0x5 0x0
+#define MX8MM_IOMUXC_ENET_RD3_ENET1_RGMII_RD3 0x09C 0x304 0x000 0x0 0x0
+#define MX8MM_IOMUXC_ENET_RD3_GPIO1_IO29 0x09C 0x304 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SD1_CLK_USDHC1_CLK 0x0A0 0x308 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SD1_CLK_GPIO2_IO0 0x0A0 0x308 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x0A4 0x30C 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SD1_CMD_GPIO2_IO1 0x0A4 0x30C 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x0A8 0x310 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x31 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x0AC 0x314 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SD1_DATA1_GPIO2_IO3 0x0AC 0x314 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x0B0 0x318 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SD1_DATA2_GPIO2_IO4 0x0B0 0x318 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SD1_DATA3_USDHC1_DATA3 0x0B4 0x31C 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SD1_DATA3_GPIO2_IO5 0x0B4 0x31C 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SD1_DATA4_USDHC1_DATA4 0x0B8 0x320 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SD1_DATA4_GPIO2_IO6 0x0B8 0x320 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SD1_DATA5_USDHC1_DATA5 0x0BC 0x324 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SD1_DATA5_GPIO2_IO7 0x0BC 0x324 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SD1_DATA6_USDHC1_DATA6 0x0C0 0x328 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SD1_DATA6_GPIO2_IO8 0x0C0 0x328 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SD1_DATA7_USDHC1_DATA7 0x0C4 0x32C 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SD1_DATA7_GPIO2_IO9 0x0C4 0x32C 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0x0C8 0x330 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SD1_RESET_B_GPIO2_IO10 0x0C8 0x330 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x0CC 0x334 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SD1_STROBE_GPIO2_IO11 0x0CC 0x334 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SD2_CD_B_USDHC2_CD_B 0x0D0 0x338 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x0D0 0x338 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x0D4 0x33C 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SD2_CLK_GPIO2_IO13 0x0D4 0x33C 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SD2_CLK_CCMSRCGPCMIX_OBSERVE0 0x0D4 0x33C 0x000 0x6 0x0
+#define MX8MM_IOMUXC_SD2_CLK_OBSERVE_MUX_OUT0 0x0D4 0x33C 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x0D8 0x340 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SD2_CMD_GPIO2_IO14 0x0D8 0x340 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SD2_CMD_CCMSRCGPCMIX_OBSERVE1 0x0D8 0x340 0x000 0x6 0x0
+#define MX8MM_IOMUXC_SD2_CMD_OBSERVE_MUX_OUT1 0x0D8 0x340 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x0DC 0x344 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SD2_DATA0_GPIO2_IO15 0x0DC 0x344 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SD2_DATA0_CCMSRCGPCMIX_OBSERVE2 0x0DC 0x344 0x000 0x6 0x0
+#define MX8MM_IOMUXC_SD2_DATA0_OBSERVE_MUX_OUT2 0x0DC 0x344 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x0E0 0x348 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SD2_DATA1_GPIO2_IO16 0x0E0 0x348 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SD2_DATA1_CCMSRCGPCMIX_WAIT 0x0E0 0x348 0x000 0x6 0x0
+#define MX8MM_IOMUXC_SD2_DATA1_OBSERVE_MUX_OUT3 0x0E0 0x348 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x0E4 0x34C 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SD2_DATA2_GPIO2_IO17 0x0E4 0x34C 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SD2_DATA2_CCMSRCGPCMIX_STOP 0x0E4 0x34C 0x000 0x6 0x0
+#define MX8MM_IOMUXC_SD2_DATA2_OBSERVE_MUX_OUT4 0x0E4 0x34C 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x0E8 0x350 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SD2_DATA3_GPIO2_IO18 0x0E8 0x350 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SD2_DATA3_CCMSRCGPCMIX_EARLY_RESET 0x0E8 0x350 0x000 0x6 0x0
+#define MX8MM_IOMUXC_SD2_RESET_B_USDHC2_RESET_B 0x0EC 0x354 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SD2_RESET_B_GPIO2_IO19 0x0EC 0x354 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SD2_RESET_B_CCMSRCGPCMIX_SYSTEM_RESET 0x0EC 0x354 0x000 0x6 0x0
+#define MX8MM_IOMUXC_SD2_WP_USDHC2_WP 0x0F0 0x358 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SD2_WP_GPIO2_IO20 0x0F0 0x358 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SD2_WP_SIM_M_HMASTLOCK 0x0F0 0x358 0x000 0x7 0x0
+#define MX8MM_IOMUXC_NAND_ALE_RAWNAND_ALE 0x0F4 0x35C 0x000 0x0 0x0
+#define MX8MM_IOMUXC_NAND_ALE_QSPI_A_SCLK 0x0F4 0x35C 0x000 0x1 0x0
+#define MX8MM_IOMUXC_NAND_ALE_GPIO3_IO0 0x0F4 0x35C 0x000 0x5 0x0
+#define MX8MM_IOMUXC_NAND_ALE_SIM_M_HPROT0 0x0F4 0x35C 0x000 0x7 0x0
+#define MX8MM_IOMUXC_NAND_CE0_B_RAWNAND_CE0_B 0x0F8 0x360 0x000 0x0 0x0
+#define MX8MM_IOMUXC_NAND_CE0_B_QSPI_A_SS0_B 0x0F8 0x360 0x000 0x1 0x0
+#define MX8MM_IOMUXC_NAND_CE0_B_GPIO3_IO1 0x0F8 0x360 0x000 0x5 0x0
+#define MX8MM_IOMUXC_NAND_CE0_B_SIM_M_HPROT1 0x0F8 0x360 0x000 0x7 0x0
+#define MX8MM_IOMUXC_NAND_CE1_B_RAWNAND_CE1_B 0x0FC 0x364 0x000 0x0 0x0
+#define MX8MM_IOMUXC_NAND_CE1_B_QSPI_A_SS1_B 0x0FC 0x364 0x000 0x1 0x0
+#define MX8MM_IOMUXC_NAND_CE1_B_USDHC3_STROBE 0x0FC 0x364 0x000 0x2 0x0
+#define MX8MM_IOMUXC_NAND_CE1_B_GPIO3_IO2 0x0FC 0x364 0x000 0x5 0x0
+#define MX8MM_IOMUXC_NAND_CE1_B_SIM_M_HPROT2 0x0FC 0x364 0x000 0x7 0x0
+#define MX8MM_IOMUXC_NAND_CE2_B_RAWNAND_CE2_B 0x100 0x368 0x000 0x0 0x0
+#define MX8MM_IOMUXC_NAND_CE2_B_QSPI_B_SS0_B 0x100 0x368 0x000 0x1 0x0
+#define MX8MM_IOMUXC_NAND_CE2_B_USDHC3_DATA5 0x100 0x368 0x000 0x2 0x0
+#define MX8MM_IOMUXC_NAND_CE2_B_GPIO3_IO3 0x100 0x368 0x000 0x5 0x0
+#define MX8MM_IOMUXC_NAND_CE2_B_SIM_M_HPROT3 0x100 0x368 0x000 0x7 0x0
+#define MX8MM_IOMUXC_NAND_CE3_B_RAWNAND_CE3_B 0x104 0x36C 0x000 0x0 0x0
+#define MX8MM_IOMUXC_NAND_CE3_B_QSPI_B_SS1_B 0x104 0x36C 0x000 0x1 0x0
+#define MX8MM_IOMUXC_NAND_CE3_B_USDHC3_DATA6 0x104 0x36C 0x000 0x2 0x0
+#define MX8MM_IOMUXC_NAND_CE3_B_GPIO3_IO4 0x104 0x36C 0x000 0x5 0x0
+#define MX8MM_IOMUXC_NAND_CE3_B_SIM_M_HADDR0 0x104 0x36C 0x000 0x7 0x0
+#define MX8MM_IOMUXC_NAND_CLE_RAWNAND_CLE 0x108 0x370 0x000 0x0 0x0
+#define MX8MM_IOMUXC_NAND_CLE_QSPI_B_SCLK 0x108 0x370 0x000 0x1 0x0
+#define MX8MM_IOMUXC_NAND_CLE_USDHC3_DATA7 0x108 0x370 0x000 0x2 0x0
+#define MX8MM_IOMUXC_NAND_CLE_GPIO3_IO5 0x108 0x370 0x000 0x5 0x0
+#define MX8MM_IOMUXC_NAND_CLE_SIM_M_HADDR1 0x108 0x370 0x000 0x7 0x0
+#define MX8MM_IOMUXC_NAND_DATA00_RAWNAND_DATA00 0x10C 0x374 0x000 0x0 0x0
+#define MX8MM_IOMUXC_NAND_DATA00_QSPI_A_DATA0 0x10C 0x374 0x000 0x1 0x0
+#define MX8MM_IOMUXC_NAND_DATA00_GPIO3_IO6 0x10C 0x374 0x000 0x5 0x0
+#define MX8MM_IOMUXC_NAND_DATA00_SIM_M_HADDR2 0x10C 0x374 0x000 0x7 0x0
+#define MX8MM_IOMUXC_NAND_DATA01_RAWNAND_DATA01 0x110 0x378 0x000 0x0 0x0
+#define MX8MM_IOMUXC_NAND_DATA01_QSPI_A_DATA1 0x110 0x378 0x000 0x1 0x0
+#define MX8MM_IOMUXC_NAND_DATA01_GPIO3_IO7 0x110 0x378 0x000 0x5 0x0
+#define MX8MM_IOMUXC_NAND_DATA01_SIM_M_HADDR3 0x110 0x378 0x000 0x7 0x0
+#define MX8MM_IOMUXC_NAND_DATA02_RAWNAND_DATA02 0x114 0x37C 0x000 0x0 0x0
+#define MX8MM_IOMUXC_NAND_DATA02_QSPI_A_DATA2 0x114 0x37C 0x000 0x1 0x0
+#define MX8MM_IOMUXC_NAND_DATA02_GPIO3_IO8 0x114 0x37C 0x000 0x5 0x0
+#define MX8MM_IOMUXC_NAND_DATA02_SIM_M_HADDR4 0x114 0x37C 0x000 0x7 0x0
+#define MX8MM_IOMUXC_NAND_DATA03_RAWNAND_DATA03 0x118 0x380 0x000 0x0 0x0
+#define MX8MM_IOMUXC_NAND_DATA03_QSPI_A_DATA3 0x118 0x380 0x000 0x1 0x0
+#define MX8MM_IOMUXC_NAND_DATA03_GPIO3_IO9 0x118 0x380 0x000 0x5 0x0
+#define MX8MM_IOMUXC_NAND_DATA03_SIM_M_HADDR5 0x118 0x380 0x000 0x7 0x0
+#define MX8MM_IOMUXC_NAND_DATA04_RAWNAND_DATA04 0x11C 0x384 0x000 0x0 0x0
+#define MX8MM_IOMUXC_NAND_DATA04_QSPI_B_DATA0 0x11C 0x384 0x000 0x1 0x0
+#define MX8MM_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x11C 0x384 0x000 0x2 0x0
+#define MX8MM_IOMUXC_NAND_DATA04_GPIO3_IO10 0x11C 0x384 0x000 0x5 0x0
+#define MX8MM_IOMUXC_NAND_DATA04_SIM_M_HADDR6 0x11C 0x384 0x000 0x7 0x0
+#define MX8MM_IOMUXC_NAND_DATA05_RAWNAND_DATA05 0x120 0x388 0x000 0x0 0x0
+#define MX8MM_IOMUXC_NAND_DATA05_QSPI_B_DATA1 0x120 0x388 0x000 0x1 0x0
+#define MX8MM_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x120 0x388 0x000 0x2 0x0
+#define MX8MM_IOMUXC_NAND_DATA05_GPIO3_IO11 0x120 0x388 0x000 0x5 0x0
+#define MX8MM_IOMUXC_NAND_DATA05_SIM_M_HADDR7 0x120 0x388 0x000 0x7 0x0
+#define MX8MM_IOMUXC_NAND_DATA06_RAWNAND_DATA06 0x124 0x38C 0x000 0x0 0x0
+#define MX8MM_IOMUXC_NAND_DATA06_QSPI_B_DATA2 0x124 0x38C 0x000 0x1 0x0
+#define MX8MM_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x124 0x38C 0x000 0x2 0x0
+#define MX8MM_IOMUXC_NAND_DATA06_GPIO3_IO12 0x124 0x38C 0x000 0x5 0x0
+#define MX8MM_IOMUXC_NAND_DATA06_SIM_M_HADDR8 0x124 0x38C 0x000 0x7 0x0
+#define MX8MM_IOMUXC_NAND_DATA07_RAWNAND_DATA07 0x128 0x390 0x000 0x0 0x0
+#define MX8MM_IOMUXC_NAND_DATA07_QSPI_B_DATA3 0x128 0x390 0x000 0x1 0x0
+#define MX8MM_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x128 0x390 0x000 0x2 0x0
+#define MX8MM_IOMUXC_NAND_DATA07_GPIO3_IO13 0x128 0x390 0x000 0x5 0x0
+#define MX8MM_IOMUXC_NAND_DATA07_SIM_M_HADDR9 0x128 0x390 0x000 0x7 0x0
+#define MX8MM_IOMUXC_NAND_DQS_RAWNAND_DQS 0x12C 0x394 0x000 0x0 0x0
+#define MX8MM_IOMUXC_NAND_DQS_QSPI_A_DQS 0x12C 0x394 0x000 0x1 0x0
+#define MX8MM_IOMUXC_NAND_DQS_GPIO3_IO14 0x12C 0x394 0x000 0x5 0x0
+#define MX8MM_IOMUXC_NAND_DQS_SIM_M_HADDR10 0x12C 0x394 0x000 0x7 0x0
+#define MX8MM_IOMUXC_NAND_RE_B_RAWNAND_RE_B 0x130 0x398 0x000 0x0 0x0
+#define MX8MM_IOMUXC_NAND_RE_B_QSPI_B_DQS 0x130 0x398 0x000 0x1 0x0
+#define MX8MM_IOMUXC_NAND_RE_B_USDHC3_DATA4 0x130 0x398 0x000 0x2 0x0
+#define MX8MM_IOMUXC_NAND_RE_B_GPIO3_IO15 0x130 0x398 0x000 0x5 0x0
+#define MX8MM_IOMUXC_NAND_RE_B_SIM_M_HADDR11 0x130 0x398 0x000 0x7 0x0
+#define MX8MM_IOMUXC_NAND_READY_B_RAWNAND_READY_B 0x134 0x39C 0x000 0x0 0x0
+#define MX8MM_IOMUXC_NAND_READY_B_GPIO3_IO16 0x134 0x39C 0x000 0x5 0x0
+#define MX8MM_IOMUXC_NAND_READY_B_SIM_M_HADDR12 0x134 0x39C 0x000 0x7 0x0
+#define MX8MM_IOMUXC_NAND_WE_B_RAWNAND_WE_B 0x138 0x3A0 0x000 0x0 0x0
+#define MX8MM_IOMUXC_NAND_WE_B_USDHC3_CLK 0x138 0x3A0 0x000 0x12 0x0
+#define MX8MM_IOMUXC_NAND_WE_B_GPIO3_IO17 0x138 0x3A0 0x000 0x5 0x0
+#define MX8MM_IOMUXC_NAND_WE_B_SIM_M_HADDR13 0x138 0x3A0 0x000 0x7 0x0
+#define MX8MM_IOMUXC_NAND_WP_B_RAWNAND_WP_B 0x13C 0x3A4 0x000 0x0 0x0
+#define MX8MM_IOMUXC_NAND_WP_B_USDHC3_CMD 0x13C 0x3A4 0x000 0x2 0x0
+#define MX8MM_IOMUXC_NAND_WP_B_GPIO3_IO18 0x13C 0x3A4 0x000 0x5 0x0
+#define MX8MM_IOMUXC_NAND_WP_B_SIM_M_HADDR14 0x13C 0x3A4 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI5_RXFS_SAI5_RX_SYNC 0x140 0x3A8 0x4E4 0x0 0x0
+#define MX8MM_IOMUXC_SAI5_RXFS_SAI1_TX_DATA0 0x140 0x3A8 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SAI5_RXFS_GPIO3_IO19 0x140 0x3A8 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI5_RXC_SAI5_RX_BCLK 0x144 0x3AC 0x4D0 0x0 0x0
+#define MX8MM_IOMUXC_SAI5_RXC_SAI1_TX_DATA1 0x144 0x3AC 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SAI5_RXC_PDM_CLK 0x144 0x3AC 0x000 0x4 0x0
+#define MX8MM_IOMUXC_SAI5_RXC_GPIO3_IO20 0x144 0x3AC 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI5_RXD0_SAI5_RX_DATA0 0x148 0x3B0 0x4D4 0x0 0x0
+#define MX8MM_IOMUXC_SAI5_RXD0_SAI1_TX_DATA2 0x148 0x3B0 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SAI5_RXD0_PDM_DATA0 0x148 0x3B0 0x534 0x4 0x0
+#define MX8MM_IOMUXC_SAI5_RXD0_GPIO3_IO21 0x148 0x3B0 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI5_RXD1_SAI5_RX_DATA1 0x14C 0x3B4 0x4D8 0x0 0x0
+#define MX8MM_IOMUXC_SAI5_RXD1_SAI1_TX_DATA3 0x14C 0x3B4 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SAI5_RXD1_SAI1_TX_SYNC 0x14C 0x3B4 0x4CC 0x2 0x0
+#define MX8MM_IOMUXC_SAI5_RXD1_SAI5_TX_SYNC 0x14C 0x3B4 0x4EC 0x3 0x0
+#define MX8MM_IOMUXC_SAI5_RXD1_PDM_DATA1 0x14C 0x3B4 0x538 0x4 0x0
+#define MX8MM_IOMUXC_SAI5_RXD1_GPIO3_IO22 0x14C 0x3B4 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI5_RXD2_SAI5_RX_DATA2 0x150 0x3B8 0x4DC 0x0 0x0
+#define MX8MM_IOMUXC_SAI5_RXD2_SAI1_TX_DATA4 0x150 0x3B8 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SAI5_RXD2_SAI1_TX_SYNC 0x150 0x3B8 0x4CC 0x2 0x1
+#define MX8MM_IOMUXC_SAI5_RXD2_SAI5_TX_BCLK 0x150 0x3B8 0x4E8 0x3 0x0
+#define MX8MM_IOMUXC_SAI5_RXD2_PDM_DATA2 0x150 0x3B8 0x53c 0x4 0x0
+#define MX8MM_IOMUXC_SAI5_RXD2_GPIO3_IO23 0x150 0x3B8 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI5_RXD3_SAI5_RX_DATA3 0x154 0x3BC 0x4E0 0x0 0x0
+#define MX8MM_IOMUXC_SAI5_RXD3_SAI1_TX_DATA5 0x154 0x3BC 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SAI5_RXD3_SAI1_TX_SYNC 0x154 0x3BC 0x4CC 0x2 0x2
+#define MX8MM_IOMUXC_SAI5_RXD3_SAI5_TX_DATA0 0x154 0x3BC 0x000 0x3 0x0
+#define MX8MM_IOMUXC_SAI5_RXD3_PDM_DATA3 0x154 0x3BC 0x540 0x4 0x0
+#define MX8MM_IOMUXC_SAI5_RXD3_GPIO3_IO24 0x154 0x3BC 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI5_MCLK_SAI5_MCLK 0x158 0x3C0 0x52C 0x0 0x0
+#define MX8MM_IOMUXC_SAI5_MCLK_SAI1_TX_BCLK 0x158 0x3C0 0x4C8 0x1 0x0
+#define MX8MM_IOMUXC_SAI5_MCLK_SAI4_MCLK 0x158 0x3C0 0x000 0x2 0x0
+#define MX8MM_IOMUXC_SAI5_MCLK_GPIO3_IO25 0x158 0x3C0 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI5_MCLK_CCMSRCGPCMIX_TESTER_ACK 0x158 0x3C0 0x000 0x6 0x0
+#define MX8MM_IOMUXC_SAI1_RXFS_SAI1_RX_SYNC 0x15C 0x3C4 0x4C4 0x0 0x0
+#define MX8MM_IOMUXC_SAI1_RXFS_SAI5_RX_SYNC 0x15C 0x3C4 0x4E4 0x1 0x1
+#define MX8MM_IOMUXC_SAI1_RXFS_CORESIGHT_TRACE_CLK 0x15C 0x3C4 0x000 0x4 0x0
+#define MX8MM_IOMUXC_SAI1_RXFS_GPIO4_IO0 0x15C 0x3C4 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI1_RXFS_SIM_M_HADDR15 0x15C 0x3C4 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI1_RXC_SAI1_RX_BCLK 0x160 0x3C8 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SAI1_RXC_SAI5_RX_BCLK 0x160 0x3C8 0x4D0 0x1 0x1
+#define MX8MM_IOMUXC_SAI1_RXC_CORESIGHT_TRACE_CTL 0x160 0x3C8 0x000 0x4 0x0
+#define MX8MM_IOMUXC_SAI1_RXC_GPIO4_IO1 0x160 0x3C8 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI1_RXC_SIM_M_HADDR16 0x160 0x3C8 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI1_RXD0_SAI1_RX_DATA0 0x164 0x3CC 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SAI1_RXD0_SAI5_RX_DATA0 0x164 0x3CC 0x4D4 0x1 0x1
+#define MX8MM_IOMUXC_SAI1_RXD0_PDM_DATA0 0x164 0x3CC 0x534 0x3 0x1
+#define MX8MM_IOMUXC_SAI1_RXD0_CORESIGHT_TRACE0 0x164 0x3CC 0x000 0x4 0x0
+#define MX8MM_IOMUXC_SAI1_RXD0_GPIO4_IO2 0x164 0x3CC 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI1_RXD0_CCMSRCGPCMIX_BOOT_CFG0 0x164 0x3CC 0x000 0x6 0x0
+#define MX8MM_IOMUXC_SAI1_RXD0_SIM_M_HADDR17 0x164 0x3CC 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI1_RXD1_SAI1_RX_DATA1 0x168 0x3D0 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SAI1_RXD1_SAI5_RX_DATA1 0x168 0x3D0 0x4D8 0x1 0x1
+#define MX8MM_IOMUXC_SAI1_RXD1_PDM_DATA1 0x168 0x3D0 0x538 0x3 0x1
+#define MX8MM_IOMUXC_SAI1_RXD1_CORESIGHT_TRACE1 0x168 0x3D0 0x000 0x4 0x0
+#define MX8MM_IOMUXC_SAI1_RXD1_GPIO4_IO3 0x168 0x3D0 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI1_RXD1_CCMSRCGPCMIX_BOOT_CFG1 0x168 0x3D0 0x000 0x6 0x0
+#define MX8MM_IOMUXC_SAI1_RXD1_SIM_M_HADDR18 0x168 0x3D0 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI1_RXD2_SAI1_RX_DATA2 0x16C 0x3D4 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SAI1_RXD2_SAI5_RX_DATA2 0x16C 0x3D4 0x4DC 0x1 0x1
+#define MX8MM_IOMUXC_SAI1_RXD2_PDM_DATA2 0x16C 0x3D4 0x53C 0x3 0x1
+#define MX8MM_IOMUXC_SAI1_RXD2_CORESIGHT_TRACE2 0x16C 0x3D4 0x000 0x4 0x0
+#define MX8MM_IOMUXC_SAI1_RXD2_GPIO4_IO4 0x16C 0x3D4 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI1_RXD2_CCMSRCGPCMIX_BOOT_CFG2 0x16C 0x3D4 0x000 0x6 0x0
+#define MX8MM_IOMUXC_SAI1_RXD2_SIM_M_HADDR19 0x16C 0x3D4 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI1_RXD3_SAI1_RX_DATA3 0x170 0x3D8 0x4E0 0x0 0x1
+#define MX8MM_IOMUXC_SAI1_RXD3_SAI5_RX_DATA3 0x170 0x3D8 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SAI1_RXD3_PDM_DATA3 0x170 0x3D8 0x540 0x3 0x1
+#define MX8MM_IOMUXC_SAI1_RXD3_CORESIGHT_TRACE3 0x170 0x3D8 0x000 0x4 0x0
+#define MX8MM_IOMUXC_SAI1_RXD3_GPIO4_IO5 0x170 0x3D8 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI1_RXD3_CCMSRCGPCMIX_BOOT_CFG3 0x170 0x3D8 0x000 0x6 0x0
+#define MX8MM_IOMUXC_SAI1_RXD3_SIM_M_HADDR20 0x170 0x3D8 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI1_RXD4_SAI1_RX_DATA4 0x174 0x3DC 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SAI1_RXD4_SAI6_TX_BCLK 0x174 0x3DC 0x51C 0x1 0x0
+#define MX8MM_IOMUXC_SAI1_RXD4_SAI6_RX_BCLK 0x174 0x3DC 0x510 0x2 0x0
+#define MX8MM_IOMUXC_SAI1_RXD4_CORESIGHT_TRACE4 0x174 0x3DC 0x000 0x4 0x0
+#define MX8MM_IOMUXC_SAI1_RXD4_GPIO4_IO6 0x174 0x3DC 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI1_RXD4_CCMSRCGPCMIX_BOOT_CFG4 0x174 0x3DC 0x000 0x6 0x0
+#define MX8MM_IOMUXC_SAI1_RXD4_SIM_M_HADDR21 0x174 0x3DC 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI1_RXD5_SAI1_RX_DATA5 0x178 0x3E0 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SAI1_RXD5_SAI6_TX_DATA0 0x178 0x3E0 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SAI1_RXD5_SAI6_RX_DATA0 0x178 0x3E0 0x514 0x2 0x0
+#define MX8MM_IOMUXC_SAI1_RXD5_SAI1_RX_SYNC 0x178 0x3E0 0x4C4 0x3 0x1
+#define MX8MM_IOMUXC_SAI1_RXD5_CORESIGHT_TRACE5 0x178 0x3E0 0x000 0x4 0x0
+#define MX8MM_IOMUXC_SAI1_RXD5_GPIO4_IO7 0x178 0x3E0 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI1_RXD5_CCMSRCGPCMIX_BOOT_CFG5 0x178 0x3E0 0x000 0x6 0x0
+#define MX8MM_IOMUXC_SAI1_RXD5_SIM_M_HADDR22 0x178 0x3E0 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI1_RXD6_SAI1_RX_DATA6 0x17C 0x3E4 0x520 0x0 0x0
+#define MX8MM_IOMUXC_SAI1_RXD6_SAI6_TX_SYNC 0x17C 0x3E4 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SAI1_RXD6_SAI6_RX_SYNC 0x17C 0x3E4 0x518 0x2 0x0
+#define MX8MM_IOMUXC_SAI1_RXD6_CORESIGHT_TRACE6 0x17C 0x3E4 0x000 0x4 0x0
+#define MX8MM_IOMUXC_SAI1_RXD6_GPIO4_IO8 0x17C 0x3E4 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI1_RXD6_CCMSRCGPCMIX_BOOT_CFG6 0x17C 0x3E4 0x000 0x6 0x0
+#define MX8MM_IOMUXC_SAI1_RXD6_SIM_M_HADDR23 0x17C 0x3E4 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI1_RXD7_SAI1_RX_DATA7 0x180 0x3E8 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SAI1_RXD7_SAI6_MCLK 0x180 0x3E8 0x530 0x1 0x0
+#define MX8MM_IOMUXC_SAI1_RXD7_SAI1_TX_SYNC 0x180 0x3E8 0x4CC 0x2 0x4
+#define MX8MM_IOMUXC_SAI1_RXD7_SAI1_TX_DATA4 0x180 0x3E8 0x000 0x3 0x0
+#define MX8MM_IOMUXC_SAI1_RXD7_CORESIGHT_TRACE7 0x180 0x3E8 0x000 0x4 0x0
+#define MX8MM_IOMUXC_SAI1_RXD7_GPIO4_IO9 0x180 0x3E8 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI1_RXD7_CCMSRCGPCMIX_BOOT_CFG7 0x180 0x3E8 0x000 0x6 0x0
+#define MX8MM_IOMUXC_SAI1_RXD7_SIM_M_HADDR24 0x180 0x3E8 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI1_TXFS_SAI1_TX_SYNC 0x184 0x3EC 0x4CC 0x0 0x3
+#define MX8MM_IOMUXC_SAI1_TXFS_SAI5_TX_SYNC 0x184 0x3EC 0x4EC 0x1 0x1
+#define MX8MM_IOMUXC_SAI1_TXFS_CORESIGHT_EVENTO 0x184 0x3EC 0x000 0x4 0x0
+#define MX8MM_IOMUXC_SAI1_TXFS_GPIO4_IO10 0x184 0x3EC 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI1_TXFS_SIM_M_HADDR25 0x184 0x3EC 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI1_TXC_SAI1_TX_BCLK 0x188 0x3F0 0x4C8 0x0 0x1
+#define MX8MM_IOMUXC_SAI1_TXC_SAI5_TX_BCLK 0x188 0x3F0 0x4E8 0x1 0x1
+#define MX8MM_IOMUXC_SAI1_TXC_CORESIGHT_EVENTI 0x188 0x3F0 0x000 0x4 0x0
+#define MX8MM_IOMUXC_SAI1_TXC_GPIO4_IO11 0x188 0x3F0 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI1_TXC_SIM_M_HADDR26 0x188 0x3F0 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI1_TXD0_SAI1_TX_DATA0 0x18C 0x3F4 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SAI1_TXD0_SAI5_TX_DATA0 0x18C 0x3F4 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SAI1_TXD0_CORESIGHT_TRACE8 0x18C 0x3F4 0x000 0x4 0x0
+#define MX8MM_IOMUXC_SAI1_TXD0_GPIO4_IO12 0x18C 0x3F4 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI1_TXD0_CCMSRCGPCMIX_BOOT_CFG8 0x18C 0x3F4 0x000 0x6 0x0
+#define MX8MM_IOMUXC_SAI1_TXD0_SIM_M_HADDR27 0x18C 0x3F4 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI1_TXD1_SAI1_TX_DATA1 0x190 0x3F8 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SAI1_TXD1_SAI5_TX_DATA1 0x190 0x3F8 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SAI1_TXD1_CORESIGHT_TRACE9 0x190 0x3F8 0x000 0x4 0x0
+#define MX8MM_IOMUXC_SAI1_TXD1_GPIO4_IO13 0x190 0x3F8 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI1_TXD1_CCMSRCGPCMIX_BOOT_CFG9 0x190 0x3F8 0x000 0x6 0x0
+#define MX8MM_IOMUXC_SAI1_TXD1_SIM_M_HADDR28 0x190 0x3F8 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI1_TXD2_SAI1_TX_DATA2 0x194 0x3FC 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SAI1_TXD2_SAI5_TX_DATA2 0x194 0x3FC 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SAI1_TXD2_CORESIGHT_TRACE10 0x194 0x3FC 0x000 0x4 0x0
+#define MX8MM_IOMUXC_SAI1_TXD2_GPIO4_IO14 0x194 0x3FC 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI1_TXD2_CCMSRCGPCMIX_BOOT_CFG10 0x194 0x3FC 0x000 0x6 0x0
+#define MX8MM_IOMUXC_SAI1_TXD2_SIM_M_HADDR29 0x194 0x3FC 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI1_TXD3_SAI1_TX_DATA3 0x198 0x400 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SAI1_TXD3_SAI5_TX_DATA3 0x198 0x400 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SAI1_TXD3_CORESIGHT_TRACE11 0x198 0x400 0x000 0x4 0x0
+#define MX8MM_IOMUXC_SAI1_TXD3_GPIO4_IO15 0x198 0x400 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI1_TXD3_CCMSRCGPCMIX_BOOT_CFG11 0x198 0x400 0x000 0x6 0x0
+#define MX8MM_IOMUXC_SAI1_TXD3_SIM_M_HADDR30 0x198 0x400 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI1_TXD4_SAI1_TX_DATA4 0x19C 0x404 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SAI1_TXD4_SAI6_RX_BCLK 0x19C 0x404 0x510 0x1 0x1
+#define MX8MM_IOMUXC_SAI1_TXD4_SAI6_TX_BCLK 0x19C 0x404 0x51C 0x2 0x1
+#define MX8MM_IOMUXC_SAI1_TXD4_CORESIGHT_TRACE12 0x19C 0x404 0x000 0x4 0x0
+#define MX8MM_IOMUXC_SAI1_TXD4_GPIO4_IO16 0x19C 0x404 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI1_TXD4_CCMSRCGPCMIX_BOOT_CFG12 0x19C 0x404 0x000 0x6 0x0
+#define MX8MM_IOMUXC_SAI1_TXD4_SIM_M_HADDR31 0x19C 0x404 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI1_TXD5_SAI1_TX_DATA5 0x1A0 0x408 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SAI1_TXD5_SAI6_RX_DATA0 0x1A0 0x408 0x514 0x1 0x1
+#define MX8MM_IOMUXC_SAI1_TXD5_SAI6_TX_DATA0 0x1A0 0x408 0x000 0x2 0x0
+#define MX8MM_IOMUXC_SAI1_TXD5_CORESIGHT_TRACE13 0x1A0 0x408 0x000 0x4 0x0
+#define MX8MM_IOMUXC_SAI1_TXD5_GPIO4_IO17 0x1A0 0x408 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI1_TXD5_CCMSRCGPCMIX_BOOT_CFG13 0x1A0 0x408 0x000 0x6 0x0
+#define MX8MM_IOMUXC_SAI1_TXD5_SIM_M_HBURST0 0x1A0 0x408 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI1_TXD6_SAI1_TX_DATA6 0x1A4 0x40C 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SAI1_TXD6_SAI6_RX_SYNC 0x1A4 0x40C 0x518 0x1 0x1
+#define MX8MM_IOMUXC_SAI1_TXD6_SAI6_TX_SYNC 0x1A4 0x40C 0x520 0x2 0x1
+#define MX8MM_IOMUXC_SAI1_TXD6_CORESIGHT_TRACE14 0x1A4 0x40C 0x000 0x4 0x0
+#define MX8MM_IOMUXC_SAI1_TXD6_GPIO4_IO18 0x1A4 0x40C 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI1_TXD6_CCMSRCGPCMIX_BOOT_CFG14 0x1A4 0x40C 0x000 0x6 0x0
+#define MX8MM_IOMUXC_SAI1_TXD6_SIM_M_HBURST1 0x1A4 0x40C 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI1_TXD7_SAI1_TX_DATA7 0x1A8 0x410 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SAI1_TXD7_SAI6_MCLK 0x1A8 0x410 0x530 0x1 0x1
+#define MX8MM_IOMUXC_SAI1_TXD7_PDM_CLK 0x1A8 0x410 0x000 0x3 0x0
+#define MX8MM_IOMUXC_SAI1_TXD7_CORESIGHT_TRACE15 0x1A8 0x410 0x000 0x4 0x0
+#define MX8MM_IOMUXC_SAI1_TXD7_GPIO4_IO19 0x1A8 0x410 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI1_TXD7_CCMSRCGPCMIX_BOOT_CFG15 0x1A8 0x410 0x000 0x6 0x0
+#define MX8MM_IOMUXC_SAI1_TXD7_SIM_M_HBURST2 0x1A8 0x410 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI1_MCLK_SAI1_MCLK 0x1AC 0x414 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SAI1_MCLK_SAI5_MCLK 0x1AC 0x414 0x52C 0x1 0x1
+#define MX8MM_IOMUXC_SAI1_MCLK_SAI1_TX_BCLK 0x1AC 0x414 0x4C8 0x2 0x2
+#define MX8MM_IOMUXC_SAI1_MCLK_PDM_CLK 0x1AC 0x414 0x000 0x3 0x0
+#define MX8MM_IOMUXC_SAI1_MCLK_GPIO4_IO20 0x1AC 0x414 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI1_MCLK_SIM_M_HRESP 0x1AC 0x414 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI2_RXFS_SAI2_RX_SYNC 0x1B0 0x418 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SAI2_RXFS_SAI5_TX_SYNC 0x1B0 0x418 0x4EC 0x1 0x2
+#define MX8MM_IOMUXC_SAI2_RXFS_GPIO4_IO21 0x1B0 0x418 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI2_RXFS_SIM_M_HSIZE0 0x1B0 0x418 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI2_RXC_SAI2_RX_BCLK 0x1B4 0x41C 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SAI2_RXC_SAI5_TX_BCLK 0x1B4 0x41C 0x4E8 0x1 0x2
+#define MX8MM_IOMUXC_SAI2_RXC_GPIO4_IO22 0x1B4 0x41C 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI2_RXC_SIM_M_HSIZE1 0x1B4 0x41C 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI2_RXD0_SAI2_RX_DATA0 0x1B8 0x420 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SAI2_RXD0_SAI5_TX_DATA0 0x1B8 0x420 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SAI2_RXD0_GPIO4_IO23 0x1B8 0x420 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI2_RXD0_SIM_M_HSIZE2 0x1B8 0x420 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI2_TXFS_SAI2_TX_SYNC 0x1BC 0x424 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SAI2_TXFS_SAI5_TX_DATA1 0x1BC 0x424 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SAI2_TXFS_GPIO4_IO24 0x1BC 0x424 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI2_TXFS_SIM_M_HWRITE 0x1BC 0x424 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI2_TXC_SAI2_TX_BCLK 0x1C0 0x428 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SAI2_TXC_SAI5_TX_DATA2 0x1C0 0x428 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SAI2_TXC_GPIO4_IO25 0x1C0 0x428 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI2_TXC_SIM_M_HREADYOUT 0x1C0 0x428 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI2_TXD0_SAI2_TX_DATA0 0x1C4 0x42C 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SAI2_TXD0_SAI5_TX_DATA3 0x1C4 0x42C 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SAI2_TXD0_GPIO4_IO26 0x1C4 0x42C 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI2_TXD0_TPSMP_CLK 0x1C4 0x42C 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI2_MCLK_SAI2_MCLK 0x1C8 0x430 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SAI2_MCLK_SAI5_MCLK 0x1C8 0x430 0x52C 0x1 0x2
+#define MX8MM_IOMUXC_SAI2_MCLK_GPIO4_IO27 0x1C8 0x430 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI2_MCLK_TPSMP_HDATA_DIR 0x1C8 0x430 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI3_RXFS_SAI3_RX_SYNC 0x1CC 0x434 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SAI3_RXFS_GPT1_CAPTURE1 0x1CC 0x434 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SAI3_RXFS_SAI5_RX_SYNC 0x1CC 0x434 0x4E4 0x2 0x2
+#define MX8MM_IOMUXC_SAI3_RXFS_GPIO4_IO28 0x1CC 0x434 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI3_RXFS_TPSMP_HTRANS0 0x1CC 0x434 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI3_RXC_SAI3_RX_BCLK 0x1D0 0x438 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SAI3_RXC_GPT1_CAPTURE2 0x1D0 0x438 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SAI3_RXC_SAI5_RX_BCLK 0x1D0 0x438 0x4D0 0x2 0x2
+#define MX8MM_IOMUXC_SAI3_RXC_GPIO4_IO29 0x1D0 0x438 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI3_RXC_TPSMP_HTRANS1 0x1D0 0x438 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI3_RXD_SAI3_RX_DATA0 0x1D4 0x43C 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SAI3_RXD_GPT1_COMPARE1 0x1D4 0x43C 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SAI3_RXD_SAI5_RX_DATA0 0x1D4 0x43C 0x4D4 0x2 0x2
+#define MX8MM_IOMUXC_SAI3_RXD_GPIO4_IO30 0x1D4 0x43C 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI3_RXD_TPSMP_HDATA0 0x1D4 0x43C 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI3_TXFS_SAI3_TX_SYNC 0x1D8 0x440 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SAI3_TXFS_GPT1_CLK 0x1D8 0x440 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SAI3_TXFS_SAI5_RX_DATA1 0x1D8 0x440 0x4D8 0x2 0x2
+#define MX8MM_IOMUXC_SAI3_TXFS_GPIO4_IO31 0x1D8 0x440 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI3_TXFS_TPSMP_HDATA1 0x1D8 0x440 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI3_TXC_SAI3_TX_BCLK 0x1DC 0x444 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SAI3_TXC_GPT1_COMPARE2 0x1DC 0x444 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SAI3_TXC_SAI5_RX_DATA2 0x1DC 0x444 0x4DC 0x2 0x2
+#define MX8MM_IOMUXC_SAI3_TXC_GPIO5_IO0 0x1DC 0x444 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI3_TXC_TPSMP_HDATA2 0x1DC 0x444 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI3_TXD_SAI3_TX_DATA0 0x1E0 0x448 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SAI3_TXD_GPT1_COMPARE3 0x1E0 0x448 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SAI3_TXD_SAI5_RX_DATA3 0x1E0 0x448 0x4E0 0x2 0x2
+#define MX8MM_IOMUXC_SAI3_TXD_GPIO5_IO1 0x1E0 0x448 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI3_TXD_TPSMP_HDATA3 0x1E0 0x448 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SAI3_MCLK_SAI3_MCLK 0x1E4 0x44C 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SAI3_MCLK_PWM4_OUT 0x1E4 0x44C 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SAI3_MCLK_SAI5_MCLK 0x1E4 0x44C 0x52C 0x2 0x3
+#define MX8MM_IOMUXC_SAI3_MCLK_GPIO5_IO2 0x1E4 0x44C 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SAI3_MCLK_TPSMP_HDATA4 0x1E4 0x44C 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SPDIF_TX_SPDIF1_OUT 0x1E8 0x450 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SPDIF_TX_PWM3_OUT 0x1E8 0x450 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SPDIF_TX_GPIO5_IO3 0x1E8 0x450 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SPDIF_TX_TPSMP_HDATA5 0x1E8 0x450 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SPDIF_RX_SPDIF1_IN 0x1EC 0x454 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SPDIF_RX_PWM2_OUT 0x1EC 0x454 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SPDIF_RX_GPIO5_IO4 0x1EC 0x454 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SPDIF_RX_TPSMP_HDATA6 0x1EC 0x454 0x000 0x7 0x0
+#define MX8MM_IOMUXC_SPDIF_EXT_CLK_SPDIF1_EXT_CLK 0x1F0 0x458 0x000 0x0 0x0
+#define MX8MM_IOMUXC_SPDIF_EXT_CLK_PWM1_OUT 0x1F0 0x458 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SPDIF_EXT_CLK_GPIO5_IO5 0x1F0 0x458 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SPDIF_EXT_CLK_TPSMP_HDATA7 0x1F0 0x458 0x000 0x7 0x0
+#define MX8MM_IOMUXC_ECSPI1_SCLK_ECSPI1_SCLK 0x1F4 0x45C 0x000 0x0 0x0
+#define MX8MM_IOMUXC_ECSPI1_SCLK_UART3_DCE_RX 0x1F4 0x45C 0x504 0x1 0x0
+#define MX8MM_IOMUXC_ECSPI1_SCLK_UART3_DTE_TX 0x1F4 0x45C 0x000 0x1 0x0
+#define MX8MM_IOMUXC_ECSPI1_SCLK_GPIO5_IO6 0x1F4 0x45C 0x000 0x5 0x0
+#define MX8MM_IOMUXC_ECSPI1_SCLK_TPSMP_HDATA8 0x1F4 0x45C 0x000 0x7 0x0
+#define MX8MM_IOMUXC_ECSPI1_MOSI_ECSPI1_MOSI 0x1F8 0x460 0x000 0x0 0x0
+#define MX8MM_IOMUXC_ECSPI1_MOSI_UART3_DCE_TX 0x1F8 0x460 0x000 0x1 0x0
+#define MX8MM_IOMUXC_ECSPI1_MOSI_UART3_DTE_RX 0x1F8 0x460 0x504 0x1 0x1
+#define MX8MM_IOMUXC_ECSPI1_MOSI_GPIO5_IO7 0x1F8 0x460 0x000 0x5 0x0
+#define MX8MM_IOMUXC_ECSPI1_MOSI_TPSMP_HDATA9 0x1F8 0x460 0x000 0x7 0x0
+#define MX8MM_IOMUXC_ECSPI1_MISO_ECSPI1_MISO 0x1FC 0x464 0x000 0x0 0x0
+#define MX8MM_IOMUXC_ECSPI1_MISO_UART3_DCE_CTS_B 0x1FC 0x464 0x000 0x1 0x0
+#define MX8MM_IOMUXC_ECSPI1_MISO_UART3_DTE_RTS_B 0x1FC 0x464 0x500 0x1 0x0
+#define MX8MM_IOMUXC_ECSPI1_MISO_GPIO5_IO8 0x1FC 0x464 0x000 0x5 0x0
+#define MX8MM_IOMUXC_ECSPI1_MISO_TPSMP_HDATA10 0x1FC 0x464 0x000 0x7 0x0
+#define MX8MM_IOMUXC_ECSPI1_SS0_ECSPI1_SS0 0x200 0x468 0x000 0x0 0x0
+#define MX8MM_IOMUXC_ECSPI1_SS0_UART3_DCE_RTS_B 0x200 0x468 0x500 0x1 0x1
+#define MX8MM_IOMUXC_ECSPI1_SS0_UART3_DTE_CTS_B 0x200 0x468 0x000 0x1 0x0
+#define MX8MM_IOMUXC_ECSPI1_SS0_GPIO5_IO9 0x200 0x468 0x000 0x5 0x0
+#define MX8MM_IOMUXC_ECSPI1_SS0_TPSMP_HDATA11 0x200 0x468 0x000 0x7 0x0
+#define MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0x204 0x46C 0x000 0x0 0x0
+#define MX8MM_IOMUXC_ECSPI2_SCLK_UART4_DCE_RX 0x204 0x46C 0x50C 0x1 0x0
+#define MX8MM_IOMUXC_ECSPI2_SCLK_UART4_DTE_TX 0x204 0x46C 0x000 0x1 0x0
+#define MX8MM_IOMUXC_ECSPI2_SCLK_GPIO5_IO10 0x204 0x46C 0x000 0x5 0x0
+#define MX8MM_IOMUXC_ECSPI2_SCLK_TPSMP_HDATA12 0x204 0x46C 0x000 0x7 0x0
+#define MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0x208 0x470 0x000 0x0 0x0
+#define MX8MM_IOMUXC_ECSPI2_MOSI_UART4_DCE_TX 0x208 0x470 0x000 0x1 0x0
+#define MX8MM_IOMUXC_ECSPI2_MOSI_UART4_DTE_RX 0x208 0x470 0x50C 0x1 0x1
+#define MX8MM_IOMUXC_ECSPI2_MOSI_GPIO5_IO11 0x208 0x470 0x000 0x5 0x0
+#define MX8MM_IOMUXC_ECSPI2_MOSI_TPSMP_HDATA13 0x208 0x470 0x000 0x7 0x0
+#define MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0x20C 0x474 0x000 0x0 0x0
+#define MX8MM_IOMUXC_ECSPI2_MISO_UART4_DCE_CTS_B 0x20C 0x474 0x000 0x1 0x0
+#define MX8MM_IOMUXC_ECSPI2_MISO_UART4_DTE_RTS_B 0x20C 0x474 0x508 0x1 0x0
+#define MX8MM_IOMUXC_ECSPI2_MISO_GPIO5_IO12 0x20C 0x474 0x000 0x5 0x0
+#define MX8MM_IOMUXC_ECSPI2_MISO_TPSMP_HDATA14 0x20C 0x474 0x000 0x7 0x0
+#define MX8MM_IOMUXC_ECSPI2_SS0_ECSPI2_SS0 0x210 0x478 0x000 0x0 0x0
+#define MX8MM_IOMUXC_ECSPI2_SS0_UART4_DCE_RTS_B 0x210 0x478 0x508 0x1 0x1
+#define MX8MM_IOMUXC_ECSPI2_SS0_UART4_DTE_CTS_B 0x210 0x478 0x000 0x1 0x0
+#define MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0x210 0x478 0x000 0x5 0x0
+#define MX8MM_IOMUXC_ECSPI2_SS0_TPSMP_HDATA15 0x210 0x478 0x000 0x7 0x0
+#define MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL 0x214 0x47C 0x000 0x0 0x0
+#define MX8MM_IOMUXC_I2C1_SCL_ENET1_MDC 0x214 0x47C 0x000 0x1 0x0
+#define MX8MM_IOMUXC_I2C1_SCL_GPIO5_IO14 0x214 0x47C 0x000 0x5 0x0
+#define MX8MM_IOMUXC_I2C1_SCL_TPSMP_HDATA16 0x214 0x47C 0x000 0x7 0x0
+#define MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA 0x218 0x480 0x000 0x0 0x0
+#define MX8MM_IOMUXC_I2C1_SDA_ENET1_MDIO 0x218 0x480 0x4C0 0x1 0x2
+#define MX8MM_IOMUXC_I2C1_SDA_GPIO5_IO15 0x218 0x480 0x000 0x5 0x0
+#define MX8MM_IOMUXC_I2C1_SDA_TPSMP_HDATA17 0x218 0x480 0x000 0x7 0x0
+#define MX8MM_IOMUXC_I2C2_SCL_I2C2_SCL 0x21C 0x484 0x000 0x0 0x0
+#define MX8MM_IOMUXC_I2C2_SCL_ENET1_1588_EVENT1_IN 0x21C 0x484 0x000 0x1 0x0
+#define MX8MM_IOMUXC_I2C2_SCL_GPIO5_IO16 0x21C 0x484 0x000 0x5 0x0
+#define MX8MM_IOMUXC_I2C2_SCL_TPSMP_HDATA18 0x21C 0x484 0x000 0x7 0x0
+#define MX8MM_IOMUXC_I2C2_SDA_I2C2_SDA 0x220 0x488 0x000 0x0 0x0
+#define MX8MM_IOMUXC_I2C2_SDA_ENET1_1588_EVENT1_OUT 0x220 0x488 0x000 0x1 0x0
+#define MX8MM_IOMUXC_I2C2_SDA_GPIO5_IO17 0x220 0x488 0x000 0x5 0x0
+#define MX8MM_IOMUXC_I2C2_SDA_TPSMP_HDATA19 0x220 0x488 0x000 0x7 0x0
+#define MX8MM_IOMUXC_I2C3_SCL_I2C3_SCL 0x224 0x48C 0x000 0x0 0x0
+#define MX8MM_IOMUXC_I2C3_SCL_PWM4_OUT 0x224 0x48C 0x000 0x1 0x0
+#define MX8MM_IOMUXC_I2C3_SCL_GPT2_CLK 0x224 0x48C 0x000 0x2 0x0
+#define MX8MM_IOMUXC_I2C3_SCL_GPIO5_IO18 0x224 0x48C 0x000 0x5 0x0
+#define MX8MM_IOMUXC_I2C3_SCL_TPSMP_HDATA20 0x224 0x48C 0x000 0x7 0x0
+#define MX8MM_IOMUXC_I2C3_SDA_I2C3_SDA 0x228 0x490 0x000 0x0 0x0
+#define MX8MM_IOMUXC_I2C3_SDA_PWM3_OUT 0x228 0x490 0x000 0x1 0x0
+#define MX8MM_IOMUXC_I2C3_SDA_GPT3_CLK 0x228 0x490 0x000 0x2 0x0
+#define MX8MM_IOMUXC_I2C3_SDA_GPIO5_IO19 0x228 0x490 0x000 0x5 0x0
+#define MX8MM_IOMUXC_I2C3_SDA_TPSMP_HDATA21 0x228 0x490 0x000 0x7 0x0
+#define MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL 0x22C 0x494 0x000 0x0 0x0
+#define MX8MM_IOMUXC_I2C4_SCL_PWM2_OUT 0x22C 0x494 0x000 0x1 0x0
+#define MX8MM_IOMUXC_I2C4_SCL_PCIE1_CLKREQ_B 0x22C 0x494 0x524 0x12 0x0
+#define MX8MM_IOMUXC_I2C4_SCL_GPIO5_IO20 0x22C 0x494 0x000 0x5 0x0
+#define MX8MM_IOMUXC_I2C4_SCL_TPSMP_HDATA22 0x22C 0x494 0x000 0x7 0x0
+#define MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA 0x230 0x498 0x000 0x0 0x0
+#define MX8MM_IOMUXC_I2C4_SDA_PWM1_OUT 0x230 0x498 0x000 0x1 0x0
+#define MX8MM_IOMUXC_I2C4_SDA_PCIE2_CLKREQ_B 0x230 0x498 0x528 0x2 0x0
+#define MX8MM_IOMUXC_I2C4_SDA_GPIO5_IO21 0x230 0x498 0x000 0x5 0x0
+#define MX8MM_IOMUXC_I2C4_SDA_TPSMP_HDATA23 0x230 0x498 0x000 0x7 0x0
+#define MX8MM_IOMUXC_UART1_RXD_UART1_DCE_RX 0x234 0x49C 0x4F4 0x0 0x0
+#define MX8MM_IOMUXC_UART1_RXD_UART1_DTE_TX 0x234 0x49C 0x000 0x0 0x0
+#define MX8MM_IOMUXC_UART1_RXD_ECSPI3_SCLK 0x234 0x49C 0x000 0x1 0x0
+#define MX8MM_IOMUXC_UART1_RXD_GPIO5_IO22 0x234 0x49C 0x000 0x5 0x0
+#define MX8MM_IOMUXC_UART1_RXD_TPSMP_HDATA24 0x234 0x49C 0x000 0x7 0x0
+#define MX8MM_IOMUXC_UART1_TXD_UART1_DCE_TX 0x238 0x4A0 0x000 0x0 0x0
+#define MX8MM_IOMUXC_UART1_TXD_UART1_DTE_RX 0x238 0x4A0 0x4F4 0x0 0x0
+#define MX8MM_IOMUXC_UART1_TXD_ECSPI3_MOSI 0x238 0x4A0 0x000 0x1 0x0
+#define MX8MM_IOMUXC_UART1_TXD_GPIO5_IO23 0x238 0x4A0 0x000 0x5 0x0
+#define MX8MM_IOMUXC_UART1_TXD_TPSMP_HDATA25 0x238 0x4A0 0x000 0x7 0x0
+#define MX8MM_IOMUXC_UART2_RXD_UART2_DCE_RX 0x23C 0x4A4 0x4FC 0x0 0x0
+#define MX8MM_IOMUXC_UART2_RXD_UART2_DTE_TX 0x23C 0x4A4 0x000 0x0 0x0
+#define MX8MM_IOMUXC_UART2_RXD_ECSPI3_MISO 0x23C 0x4A4 0x000 0x1 0x0
+#define MX8MM_IOMUXC_UART2_RXD_GPIO5_IO24 0x23C 0x4A4 0x000 0x5 0x0
+#define MX8MM_IOMUXC_UART2_RXD_TPSMP_HDATA26 0x23C 0x4A4 0x000 0x7 0x0
+#define MX8MM_IOMUXC_UART2_TXD_UART2_DCE_TX 0x240 0x4A8 0x000 0x0 0x0
+#define MX8MM_IOMUXC_UART2_TXD_UART2_DTE_RX 0x240 0x4A8 0x4FC 0x0 0x1
+#define MX8MM_IOMUXC_UART2_TXD_ECSPI3_SS0 0x240 0x4A8 0x000 0x1 0x0
+#define MX8MM_IOMUXC_UART2_TXD_GPIO5_IO25 0x240 0x4A8 0x000 0x5 0x0
+#define MX8MM_IOMUXC_UART2_TXD_TPSMP_HDATA27 0x240 0x4A8 0x000 0x7 0x0
+#define MX8MM_IOMUXC_UART3_RXD_UART3_DCE_RX 0x244 0x4AC 0x504 0x0 0x2
+#define MX8MM_IOMUXC_UART3_RXD_UART3_DTE_TX 0x244 0x4AC 0x000 0x0 0x0
+#define MX8MM_IOMUXC_UART3_RXD_UART1_DCE_CTS_B 0x244 0x4AC 0x000 0x1 0x0
+#define MX8MM_IOMUXC_UART3_RXD_UART1_DTE_RTS_B 0x244 0x4AC 0x4F0 0x1 0x0
+#define MX8MM_IOMUXC_UART3_RXD_GPIO5_IO26 0x244 0x4AC 0x000 0x5 0x0
+#define MX8MM_IOMUXC_UART3_RXD_TPSMP_HDATA28 0x244 0x4AC 0x000 0x7 0x0
+#define MX8MM_IOMUXC_UART3_TXD_UART3_DCE_TX 0x248 0x4B0 0x000 0x0 0x0
+#define MX8MM_IOMUXC_UART3_TXD_UART3_DTE_RX 0x248 0x4B0 0x504 0x0 0x3
+#define MX8MM_IOMUXC_UART3_TXD_UART1_DCE_RTS_B 0x248 0x4B0 0x4F0 0x1 0x1
+#define MX8MM_IOMUXC_UART3_TXD_UART1_DTE_CTS_B 0x248 0x4B0 0x000 0x1 0x0
+#define MX8MM_IOMUXC_UART3_TXD_GPIO5_IO27 0x248 0x4B0 0x000 0x5 0x0
+#define MX8MM_IOMUXC_UART3_TXD_TPSMP_HDATA29 0x248 0x4B0 0x000 0x7 0x0
+#define MX8MM_IOMUXC_UART4_RXD_UART4_DCE_RX 0x24C 0x4B4 0x50C 0x0 0x2
+#define MX8MM_IOMUXC_UART4_RXD_UART4_DTE_TX 0x24C 0x4B4 0x000 0x0 0x0
+#define MX8MM_IOMUXC_UART4_RXD_UART2_DCE_CTS_B 0x24C 0x4B4 0x000 0x1 0x0
+#define MX8MM_IOMUXC_UART4_RXD_UART2_DTE_RTS_B 0x24C 0x4B4 0x4F8 0x1 0x0
+#define MX8MM_IOMUXC_UART4_RXD_PCIE1_CLKREQ_B 0x24C 0x4B4 0x524 0x2 0x1
+#define MX8MM_IOMUXC_UART4_RXD_GPIO5_IO28 0x24C 0x4B4 0x000 0x5 0x0
+#define MX8MM_IOMUXC_UART4_RXD_TPSMP_HDATA30 0x24C 0x4B4 0x000 0x7 0x0
+#define MX8MM_IOMUXC_UART4_TXD_UART4_DCE_TX 0x250 0x4B8 0x000 0x0 0x0
+#define MX8MM_IOMUXC_UART4_TXD_UART4_DTE_RX 0x250 0x4B8 0x50C 0x0 0x3
+#define MX8MM_IOMUXC_UART4_TXD_UART2_DCE_RTS_B 0x250 0x4B8 0x4F8 0x1 0x1
+#define MX8MM_IOMUXC_UART4_TXD_UART2_DTE_CTS_B 0x250 0x4B8 0x000 0x1 0x0
+#define MX8MM_IOMUXC_UART4_TXD_PCIE2_CLKREQ_B 0x250 0x4B8 0x528 0x2 0x1
+#define MX8MM_IOMUXC_UART4_TXD_GPIO5_IO29 0x250 0x4B8 0x000 0x5 0x0
+#define MX8MM_IOMUXC_UART4_TXD_TPSMP_HDATA31 0x250 0x4B8 0x000 0x7 0x0
+
+#endif /* __DTS_IMX8MM_PINFUNC_H */
diff --git a/arch/arm64/boot/dts/mediatek/mt2712-pinfunc.h b/arch/arm64/boot/dts/mediatek/mt2712-pinfunc.h
index 1b4cb0c55744..385c455a7c98 100644
--- a/arch/arm64/boot/dts/mediatek/mt2712-pinfunc.h
+++ b/arch/arm64/boot/dts/mediatek/mt2712-pinfunc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2018 MediaTek Inc.
* Author: Zhiyong Tao <zhiyong.tao@mediatek.com>
diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
index bb2045be8814..97aeb946ed5e 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
@@ -321,7 +321,6 @@
nvidia,default-trim = <0x9>;
nvidia,dqs-trim = <63>;
mmc-hs400-1_8v;
- supports-cqe;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
index 61a0afb74e63..1ea684af99c4 100644
--- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
@@ -2,7 +2,7 @@
/*
* Device Tree Source for the RZ/G2E (R8A774C0) SoC
*
- * Copyright (C) 2018 Renesas Electronics Corp.
+ * Copyright (C) 2018-2019 Renesas Electronics Corp.
*/
#include <dt-bindings/clock/r8a774c0-cpg-mssr.h>
@@ -1150,9 +1150,8 @@
<&cpg CPG_CORE R8A774C0_CLK_S3D1C>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac1 0x5b>, <&dmac1 0x5a>,
- <&dmac2 0x5b>, <&dmac2 0x5a>;
- dma-names = "tx", "rx", "tx", "rx";
+ dmas = <&dmac0 0x5b>, <&dmac0 0x5a>;
+ dma-names = "tx", "rx";
power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
resets = <&cpg 202>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
index a69faa60ea4d..d2ad665fe2d9 100644
--- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
@@ -2,7 +2,7 @@
/*
* Device Tree Source for the R-Car E3 (R8A77990) SoC
*
- * Copyright (C) 2018 Renesas Electronics Corp.
+ * Copyright (C) 2018-2019 Renesas Electronics Corp.
*/
#include <dt-bindings/clock/r8a77990-cpg-mssr.h>
@@ -1067,9 +1067,8 @@
<&cpg CPG_CORE R8A77990_CLK_S3D1C>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac1 0x5b>, <&dmac1 0x5a>,
- <&dmac2 0x5b>, <&dmac2 0x5a>;
- dma-names = "tx", "rx", "tx", "rx";
+ dmas = <&dmac0 0x5b>, <&dmac0 0x5a>;
+ dma-names = "tx", "rx";
power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
resets = <&cpg 202>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
index 33c44e857247..0e34354b2092 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
@@ -108,8 +108,8 @@
snps,reset-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_LOW>;
snps,reset-active-low;
snps,reset-delays-us = <0 10000 50000>;
- tx_delay = <0x25>;
- rx_delay = <0x11>;
+ tx_delay = <0x24>;
+ rx_delay = <0x18>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
index 2157a528276b..79b4d1d4b5d6 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
@@ -46,8 +46,7 @@
vcc_host1_5v: vcc_otg_5v: vcc-host1-5v-regulator {
compatible = "regulator-fixed";
- enable-active-high;
- gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_HIGH>;
+ gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&usb20_host_drv>;
regulator-name = "vcc_host1_5v";
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index 84f14b132e8f..dabef1a21649 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -1445,11 +1445,11 @@
sdmmc0 {
sdmmc0_clk: sdmmc0-clk {
- rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_4ma>;
+ rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_8ma>;
};
sdmmc0_cmd: sdmmc0-cmd {
- rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_4ma>;
+ rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_8ma>;
};
sdmmc0_dectn: sdmmc0-dectn {
@@ -1461,14 +1461,14 @@
};
sdmmc0_bus1: sdmmc0-bus1 {
- rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>;
+ rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>;
};
sdmmc0_bus4: sdmmc0-bus4 {
- rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>,
- <1 RK_PA1 1 &pcfg_pull_up_4ma>,
- <1 RK_PA2 1 &pcfg_pull_up_4ma>,
- <1 RK_PA3 1 &pcfg_pull_up_4ma>;
+ rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>,
+ <1 RK_PA1 1 &pcfg_pull_up_8ma>,
+ <1 RK_PA2 1 &pcfg_pull_up_8ma>,
+ <1 RK_PA3 1 &pcfg_pull_up_8ma>;
};
sdmmc0_gpio: sdmmc0-gpio {
@@ -1642,50 +1642,50 @@
rgmiim1_pins: rgmiim1-pins {
rockchip,pins =
/* mac_txclk */
- <1 RK_PB4 2 &pcfg_pull_none_12ma>,
+ <1 RK_PB4 2 &pcfg_pull_none_8ma>,
/* mac_rxclk */
- <1 RK_PB5 2 &pcfg_pull_none_2ma>,
+ <1 RK_PB5 2 &pcfg_pull_none_4ma>,
/* mac_mdio */
- <1 RK_PC3 2 &pcfg_pull_none_2ma>,
+ <1 RK_PC3 2 &pcfg_pull_none_4ma>,
/* mac_txen */
- <1 RK_PD1 2 &pcfg_pull_none_12ma>,
+ <1 RK_PD1 2 &pcfg_pull_none_8ma>,
/* mac_clk */
- <1 RK_PC5 2 &pcfg_pull_none_2ma>,
+ <1 RK_PC5 2 &pcfg_pull_none_4ma>,
/* mac_rxdv */
- <1 RK_PC6 2 &pcfg_pull_none_2ma>,
+ <1 RK_PC6 2 &pcfg_pull_none_4ma>,
/* mac_mdc */
- <1 RK_PC7 2 &pcfg_pull_none_2ma>,
+ <1 RK_PC7 2 &pcfg_pull_none_4ma>,
/* mac_rxd1 */
- <1 RK_PB2 2 &pcfg_pull_none_2ma>,
+ <1 RK_PB2 2 &pcfg_pull_none_4ma>,
/* mac_rxd0 */
- <1 RK_PB3 2 &pcfg_pull_none_2ma>,
+ <1 RK_PB3 2 &pcfg_pull_none_4ma>,
/* mac_txd1 */
- <1 RK_PB0 2 &pcfg_pull_none_12ma>,
+ <1 RK_PB0 2 &pcfg_pull_none_8ma>,
/* mac_txd0 */
- <1 RK_PB1 2 &pcfg_pull_none_12ma>,
+ <1 RK_PB1 2 &pcfg_pull_none_8ma>,
/* mac_rxd3 */
- <1 RK_PB6 2 &pcfg_pull_none_2ma>,
+ <1 RK_PB6 2 &pcfg_pull_none_4ma>,
/* mac_rxd2 */
- <1 RK_PB7 2 &pcfg_pull_none_2ma>,
+ <1 RK_PB7 2 &pcfg_pull_none_4ma>,
/* mac_txd3 */
- <1 RK_PC0 2 &pcfg_pull_none_12ma>,
+ <1 RK_PC0 2 &pcfg_pull_none_8ma>,
/* mac_txd2 */
- <1 RK_PC1 2 &pcfg_pull_none_12ma>,
+ <1 RK_PC1 2 &pcfg_pull_none_8ma>,
/* mac_txclk */
- <0 RK_PB0 1 &pcfg_pull_none>,
+ <0 RK_PB0 1 &pcfg_pull_none_8ma>,
/* mac_txen */
- <0 RK_PB4 1 &pcfg_pull_none>,
+ <0 RK_PB4 1 &pcfg_pull_none_8ma>,
/* mac_clk */
- <0 RK_PD0 1 &pcfg_pull_none>,
+ <0 RK_PD0 1 &pcfg_pull_none_4ma>,
/* mac_txd1 */
- <0 RK_PC0 1 &pcfg_pull_none>,
+ <0 RK_PC0 1 &pcfg_pull_none_8ma>,
/* mac_txd0 */
- <0 RK_PC1 1 &pcfg_pull_none>,
+ <0 RK_PC1 1 &pcfg_pull_none_8ma>,
/* mac_txd3 */
- <0 RK_PC7 1 &pcfg_pull_none>,
+ <0 RK_PC7 1 &pcfg_pull_none_8ma>,
/* mac_txd2 */
- <0 RK_PC6 1 &pcfg_pull_none>;
+ <0 RK_PC6 1 &pcfg_pull_none_8ma>;
};
rmiim1_pins: rmiim1-pins {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts
index 4a543f2117d4..844eac939a97 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts
@@ -158,6 +158,7 @@
};
&hdmi {
+ ddc-i2c-bus = <&i2c3>;
pinctrl-names = "default";
pinctrl-0 = <&hdmi_cec>;
status = "okay";
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c
index 5fc6f51908fd..cb89c80800b5 100644
--- a/arch/arm64/crypto/aes-ce-ccm-glue.c
+++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
@@ -14,6 +14,7 @@
#include <crypto/aes.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <linux/module.h>
@@ -109,7 +110,7 @@ static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen)
static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
u32 abytes, u32 *macp)
{
- if (may_use_simd()) {
+ if (crypto_simd_usable()) {
kernel_neon_begin();
ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc,
num_rounds(key));
@@ -255,7 +256,7 @@ static int ccm_encrypt(struct aead_request *req)
err = skcipher_walk_aead_encrypt(&walk, req, false);
- if (may_use_simd()) {
+ if (crypto_simd_usable()) {
while (walk.nbytes) {
u32 tail = walk.nbytes % AES_BLOCK_SIZE;
@@ -313,7 +314,7 @@ static int ccm_decrypt(struct aead_request *req)
err = skcipher_walk_aead_decrypt(&walk, req, false);
- if (may_use_simd()) {
+ if (crypto_simd_usable()) {
while (walk.nbytes) {
u32 tail = walk.nbytes % AES_BLOCK_SIZE;
@@ -372,7 +373,7 @@ static struct aead_alg ccm_aes_alg = {
static int __init aes_mod_init(void)
{
- if (!(elf_hwcap & HWCAP_AES))
+ if (!cpu_have_named_feature(AES))
return -ENODEV;
return crypto_register_aead(&ccm_aes_alg);
}
diff --git a/arch/arm64/crypto/aes-ce-glue.c b/arch/arm64/crypto/aes-ce-glue.c
index e6b3227bbf57..3213843fcb46 100644
--- a/arch/arm64/crypto/aes-ce-glue.c
+++ b/arch/arm64/crypto/aes-ce-glue.c
@@ -12,6 +12,7 @@
#include <asm/simd.h>
#include <asm/unaligned.h>
#include <crypto/aes.h>
+#include <crypto/internal/simd.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>
#include <linux/module.h>
@@ -52,7 +53,7 @@ static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
{
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
- if (!may_use_simd()) {
+ if (!crypto_simd_usable()) {
__aes_arm64_encrypt(ctx->key_enc, dst, src, num_rounds(ctx));
return;
}
@@ -66,7 +67,7 @@ static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
{
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
- if (!may_use_simd()) {
+ if (!crypto_simd_usable()) {
__aes_arm64_decrypt(ctx->key_dec, dst, src, num_rounds(ctx));
return;
}
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 1e676625ef33..f0ceb545bd1e 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -405,7 +405,7 @@ static int ctr_encrypt_sync(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return aes_ctr_encrypt_fallback(ctx, req);
return ctr_encrypt(req);
@@ -642,7 +642,7 @@ static void mac_do_update(struct crypto_aes_ctx *ctx, u8 const in[], int blocks,
{
int rounds = 6 + ctx->key_length / 4;
- if (may_use_simd()) {
+ if (crypto_simd_usable()) {
kernel_neon_begin();
aes_mac_update(in, ctx->key_enc, rounds, blocks, dg, enc_before,
enc_after);
@@ -707,7 +707,7 @@ static int cbcmac_final(struct shash_desc *desc, u8 *out)
struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
- mac_do_update(&tctx->key, NULL, 0, ctx->dg, 1, 0);
+ mac_do_update(&tctx->key, NULL, 0, ctx->dg, (ctx->len != 0), 0);
memcpy(out, ctx->dg, AES_BLOCK_SIZE);
diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c
index e7a95a566462..02b65d9eb947 100644
--- a/arch/arm64/crypto/aes-neonbs-glue.c
+++ b/arch/arm64/crypto/aes-neonbs-glue.c
@@ -288,7 +288,7 @@ static int ctr_encrypt_sync(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return aes_ctr_encrypt_fallback(&ctx->fallback, req);
return ctr_encrypt(req);
@@ -304,6 +304,8 @@ static int __xts_crypt(struct skcipher_request *req,
int err;
err = skcipher_walk_virt(&walk, req, false);
+ if (err)
+ return err;
kernel_neon_begin();
neon_aes_ecb_encrypt(walk.iv, walk.iv, ctx->twkey, ctx->key.rounds, 1);
@@ -440,7 +442,7 @@ static int __init aes_init(void)
int err;
int i;
- if (!(elf_hwcap & HWCAP_ASIMD))
+ if (!cpu_have_named_feature(ASIMD))
return -ENODEV;
err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
diff --git a/arch/arm64/crypto/chacha-neon-glue.c b/arch/arm64/crypto/chacha-neon-glue.c
index bece1d85bd81..82029cda2e77 100644
--- a/arch/arm64/crypto/chacha-neon-glue.c
+++ b/arch/arm64/crypto/chacha-neon-glue.c
@@ -21,6 +21,7 @@
#include <crypto/algapi.h>
#include <crypto/chacha.h>
+#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -90,7 +91,7 @@ static int chacha_neon(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd())
+ if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
return crypto_chacha_crypt(req);
return chacha_neon_stream_xor(req, ctx, req->iv);
@@ -104,7 +105,7 @@ static int xchacha_neon(struct skcipher_request *req)
u32 state[16];
u8 real_iv[16];
- if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd())
+ if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
return crypto_xchacha_crypt(req);
crypto_chacha_init(state, ctx, req->iv);
@@ -173,7 +174,7 @@ static struct skcipher_alg algs[] = {
static int __init chacha_simd_mod_init(void)
{
- if (!(elf_hwcap & HWCAP_ASIMD))
+ if (!cpu_have_named_feature(ASIMD))
return -ENODEV;
return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
diff --git a/arch/arm64/crypto/crct10dif-ce-glue.c b/arch/arm64/crypto/crct10dif-ce-glue.c
index dd325829ee44..2e0a7d2eee24 100644
--- a/arch/arm64/crypto/crct10dif-ce-glue.c
+++ b/arch/arm64/crypto/crct10dif-ce-glue.c
@@ -16,6 +16,7 @@
#include <linux/string.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <asm/neon.h>
#include <asm/simd.h>
@@ -38,7 +39,7 @@ static int crct10dif_update_pmull_p8(struct shash_desc *desc, const u8 *data,
{
u16 *crc = shash_desc_ctx(desc);
- if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) {
+ if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
kernel_neon_begin();
*crc = crc_t10dif_pmull_p8(*crc, data, length);
kernel_neon_end();
@@ -54,7 +55,7 @@ static int crct10dif_update_pmull_p64(struct shash_desc *desc, const u8 *data,
{
u16 *crc = shash_desc_ctx(desc);
- if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) {
+ if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
kernel_neon_begin();
*crc = crc_t10dif_pmull_p64(*crc, data, length);
kernel_neon_end();
@@ -101,7 +102,7 @@ static struct shash_alg crc_t10dif_alg[] = {{
static int __init crc_t10dif_mod_init(void)
{
- if (elf_hwcap & HWCAP_PMULL)
+ if (cpu_have_named_feature(PMULL))
return crypto_register_shashes(crc_t10dif_alg,
ARRAY_SIZE(crc_t10dif_alg));
else
@@ -111,7 +112,7 @@ static int __init crc_t10dif_mod_init(void)
static void __exit crc_t10dif_mod_exit(void)
{
- if (elf_hwcap & HWCAP_PMULL)
+ if (cpu_have_named_feature(PMULL))
crypto_unregister_shashes(crc_t10dif_alg,
ARRAY_SIZE(crc_t10dif_alg));
else
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
index 791ad422c427..b39ed99b06fb 100644
--- a/arch/arm64/crypto/ghash-ce-glue.c
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -17,6 +17,7 @@
#include <crypto/gf128mul.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/cpufeature.h>
@@ -89,7 +90,7 @@ static void ghash_do_update(int blocks, u64 dg[], const char *src,
struct ghash_key const *k,
const char *head))
{
- if (likely(may_use_simd())) {
+ if (likely(crypto_simd_usable())) {
kernel_neon_begin();
simd_update(blocks, dg, src, key, head);
kernel_neon_end();
@@ -441,7 +442,7 @@ static int gcm_encrypt(struct aead_request *req)
err = skcipher_walk_aead_encrypt(&walk, req, false);
- if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) {
+ if (likely(crypto_simd_usable() && walk.total >= 2 * AES_BLOCK_SIZE)) {
u32 const *rk = NULL;
kernel_neon_begin();
@@ -473,9 +474,11 @@ static int gcm_encrypt(struct aead_request *req)
put_unaligned_be32(2, iv + GCM_IV_SIZE);
while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
- int blocks = walk.nbytes / AES_BLOCK_SIZE;
+ const int blocks =
+ walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
u8 *dst = walk.dst.virt.addr;
u8 *src = walk.src.virt.addr;
+ int remaining = blocks;
do {
__aes_arm64_encrypt(ctx->aes_key.key_enc,
@@ -485,9 +488,9 @@ static int gcm_encrypt(struct aead_request *req)
dst += AES_BLOCK_SIZE;
src += AES_BLOCK_SIZE;
- } while (--blocks > 0);
+ } while (--remaining > 0);
- ghash_do_update(walk.nbytes / AES_BLOCK_SIZE, dg,
+ ghash_do_update(blocks, dg,
walk.dst.virt.addr, &ctx->ghash_key,
NULL, pmull_ghash_update_p64);
@@ -563,7 +566,7 @@ static int gcm_decrypt(struct aead_request *req)
err = skcipher_walk_aead_decrypt(&walk, req, false);
- if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) {
+ if (likely(crypto_simd_usable() && walk.total >= 2 * AES_BLOCK_SIZE)) {
u32 const *rk = NULL;
kernel_neon_begin();
@@ -609,7 +612,7 @@ static int gcm_decrypt(struct aead_request *req)
put_unaligned_be32(2, iv + GCM_IV_SIZE);
while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
- int blocks = walk.nbytes / AES_BLOCK_SIZE;
+ int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
u8 *dst = walk.dst.virt.addr;
u8 *src = walk.src.virt.addr;
@@ -704,10 +707,10 @@ static int __init ghash_ce_mod_init(void)
{
int ret;
- if (!(elf_hwcap & HWCAP_ASIMD))
+ if (!cpu_have_named_feature(ASIMD))
return -ENODEV;
- if (elf_hwcap & HWCAP_PMULL)
+ if (cpu_have_named_feature(PMULL))
ret = crypto_register_shashes(ghash_alg,
ARRAY_SIZE(ghash_alg));
else
@@ -717,7 +720,7 @@ static int __init ghash_ce_mod_init(void)
if (ret)
return ret;
- if (elf_hwcap & HWCAP_PMULL) {
+ if (cpu_have_named_feature(PMULL)) {
ret = crypto_register_aead(&gcm_aes_alg);
if (ret)
crypto_unregister_shashes(ghash_alg,
@@ -728,7 +731,7 @@ static int __init ghash_ce_mod_init(void)
static void __exit ghash_ce_mod_exit(void)
{
- if (elf_hwcap & HWCAP_PMULL)
+ if (cpu_have_named_feature(PMULL))
crypto_unregister_shashes(ghash_alg, ARRAY_SIZE(ghash_alg));
else
crypto_unregister_shash(ghash_alg);
diff --git a/arch/arm64/crypto/nhpoly1305-neon-glue.c b/arch/arm64/crypto/nhpoly1305-neon-glue.c
index 22cc32ac9448..895d3727c1fb 100644
--- a/arch/arm64/crypto/nhpoly1305-neon-glue.c
+++ b/arch/arm64/crypto/nhpoly1305-neon-glue.c
@@ -9,6 +9,7 @@
#include <asm/neon.h>
#include <asm/simd.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/nhpoly1305.h>
#include <linux/module.h>
@@ -25,7 +26,7 @@ static void _nh_neon(const u32 *key, const u8 *message, size_t message_len,
static int nhpoly1305_neon_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
- if (srclen < 64 || !may_use_simd())
+ if (srclen < 64 || !crypto_simd_usable())
return crypto_nhpoly1305_update(desc, src, srclen);
do {
@@ -56,7 +57,7 @@ static struct shash_alg nhpoly1305_alg = {
static int __init nhpoly1305_mod_init(void)
{
- if (!(elf_hwcap & HWCAP_ASIMD))
+ if (!cpu_have_named_feature(ASIMD))
return -ENODEV;
return crypto_register_shash(&nhpoly1305_alg);
diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index 17fac2889f56..eaa7a8258f1c 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -12,6 +12,7 @@
#include <asm/simd.h>
#include <asm/unaligned.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/sha.h>
#include <crypto/sha1_base.h>
#include <linux/cpufeature.h>
@@ -38,7 +39,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
{
struct sha1_ce_state *sctx = shash_desc_ctx(desc);
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return crypto_sha1_update(desc, data, len);
sctx->finalize = 0;
@@ -56,7 +57,7 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
struct sha1_ce_state *sctx = shash_desc_ctx(desc);
bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE);
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return crypto_sha1_finup(desc, data, len, out);
/*
@@ -78,7 +79,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)
{
struct sha1_ce_state *sctx = shash_desc_ctx(desc);
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return crypto_sha1_finup(desc, NULL, 0, out);
sctx->finalize = 0;
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index 261f5195cab7..a725997e55f2 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -12,6 +12,7 @@
#include <asm/simd.h>
#include <asm/unaligned.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/sha.h>
#include <crypto/sha256_base.h>
#include <linux/cpufeature.h>
@@ -42,7 +43,7 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
{
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return sha256_base_do_update(desc, data, len,
(sha256_block_fn *)sha256_block_data_order);
@@ -61,7 +62,7 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE);
- if (!may_use_simd()) {
+ if (!crypto_simd_usable()) {
if (len)
sha256_base_do_update(desc, data, len,
(sha256_block_fn *)sha256_block_data_order);
@@ -90,7 +91,7 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
{
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
- if (!may_use_simd()) {
+ if (!crypto_simd_usable()) {
sha256_base_do_finalize(desc,
(sha256_block_fn *)sha256_block_data_order);
return sha256_base_finish(desc, out);
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index 4aedeaefd61f..e62298740e31 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -14,6 +14,7 @@
#include <asm/neon.h>
#include <asm/simd.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/sha.h>
#include <crypto/sha256_base.h>
#include <linux/cryptohash.h>
@@ -89,7 +90,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
{
struct sha256_state *sctx = shash_desc_ctx(desc);
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return sha256_base_do_update(desc, data, len,
(sha256_block_fn *)sha256_block_data_order);
@@ -119,7 +120,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- if (!may_use_simd()) {
+ if (!crypto_simd_usable()) {
if (len)
sha256_base_do_update(desc, data, len,
(sha256_block_fn *)sha256_block_data_order);
@@ -173,7 +174,7 @@ static int __init sha256_mod_init(void)
if (ret)
return ret;
- if (elf_hwcap & HWCAP_ASIMD) {
+ if (cpu_have_named_feature(ASIMD)) {
ret = crypto_register_shashes(neon_algs, ARRAY_SIZE(neon_algs));
if (ret)
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
@@ -183,7 +184,7 @@ static int __init sha256_mod_init(void)
static void __exit sha256_mod_fini(void)
{
- if (elf_hwcap & HWCAP_ASIMD)
+ if (cpu_have_named_feature(ASIMD))
crypto_unregister_shashes(neon_algs, ARRAY_SIZE(neon_algs));
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
}
diff --git a/arch/arm64/crypto/sha3-ce-glue.c b/arch/arm64/crypto/sha3-ce-glue.c
index a336feac0f59..9a4bbfc45f40 100644
--- a/arch/arm64/crypto/sha3-ce-glue.c
+++ b/arch/arm64/crypto/sha3-ce-glue.c
@@ -14,6 +14,7 @@
#include <asm/simd.h>
#include <asm/unaligned.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/sha3.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>
@@ -32,7 +33,7 @@ static int sha3_update(struct shash_desc *desc, const u8 *data,
struct sha3_state *sctx = shash_desc_ctx(desc);
unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return crypto_sha3_update(desc, data, len);
if ((sctx->partial + len) >= sctx->rsiz) {
@@ -76,7 +77,7 @@ static int sha3_final(struct shash_desc *desc, u8 *out)
__le64 *digest = (__le64 *)out;
int i;
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return crypto_sha3_final(desc, out);
sctx->buf[sctx->partial++] = 0x06;
diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
index f2c5f28c622a..2369540040aa 100644
--- a/arch/arm64/crypto/sha512-ce-glue.c
+++ b/arch/arm64/crypto/sha512-ce-glue.c
@@ -13,6 +13,7 @@
#include <asm/simd.h>
#include <asm/unaligned.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/sha.h>
#include <crypto/sha512_base.h>
#include <linux/cpufeature.h>
@@ -31,7 +32,7 @@ asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);
static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return sha512_base_do_update(desc, data, len,
(sha512_block_fn *)sha512_block_data_order);
@@ -46,7 +47,7 @@ static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- if (!may_use_simd()) {
+ if (!crypto_simd_usable()) {
if (len)
sha512_base_do_update(desc, data, len,
(sha512_block_fn *)sha512_block_data_order);
@@ -65,7 +66,7 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
static int sha512_ce_final(struct shash_desc *desc, u8 *out)
{
- if (!may_use_simd()) {
+ if (!crypto_simd_usable()) {
sha512_base_do_finalize(desc,
(sha512_block_fn *)sha512_block_data_order);
return sha512_base_finish(desc, out);
diff --git a/arch/arm64/crypto/sm3-ce-glue.c b/arch/arm64/crypto/sm3-ce-glue.c
index 88938a20d9b2..5d15533799a2 100644
--- a/arch/arm64/crypto/sm3-ce-glue.c
+++ b/arch/arm64/crypto/sm3-ce-glue.c
@@ -12,6 +12,7 @@
#include <asm/simd.h>
#include <asm/unaligned.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/sm3.h>
#include <crypto/sm3_base.h>
#include <linux/cpufeature.h>
@@ -28,7 +29,7 @@ asmlinkage void sm3_ce_transform(struct sm3_state *sst, u8 const *src,
static int sm3_ce_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return crypto_sm3_update(desc, data, len);
kernel_neon_begin();
@@ -40,7 +41,7 @@ static int sm3_ce_update(struct shash_desc *desc, const u8 *data,
static int sm3_ce_final(struct shash_desc *desc, u8 *out)
{
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return crypto_sm3_finup(desc, NULL, 0, out);
kernel_neon_begin();
@@ -53,7 +54,7 @@ static int sm3_ce_final(struct shash_desc *desc, u8 *out)
static int sm3_ce_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return crypto_sm3_finup(desc, data, len, out);
kernel_neon_begin();
diff --git a/arch/arm64/crypto/sm4-ce-glue.c b/arch/arm64/crypto/sm4-ce-glue.c
index 0c4fc223f225..2754c875d39c 100644
--- a/arch/arm64/crypto/sm4-ce-glue.c
+++ b/arch/arm64/crypto/sm4-ce-glue.c
@@ -3,6 +3,7 @@
#include <asm/neon.h>
#include <asm/simd.h>
#include <crypto/sm4.h>
+#include <crypto/internal/simd.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>
@@ -20,7 +21,7 @@ static void sm4_ce_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
- if (!may_use_simd()) {
+ if (!crypto_simd_usable()) {
crypto_sm4_encrypt(tfm, out, in);
} else {
kernel_neon_begin();
@@ -33,7 +34,7 @@ static void sm4_ce_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
- if (!may_use_simd()) {
+ if (!crypto_simd_usable()) {
crypto_sm4_decrypt(tfm, out, in);
} else {
kernel_neon_begin();
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 1e17ea5c372b..eb0df239a759 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -13,10 +13,10 @@ generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += msi.h
generic-y += qrwlock.h
generic-y += qspinlock.h
-generic-y += rwsem.h
generic-y += segment.h
generic-y += serial.h
generic-y += set_memory.h
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index 4b650ec1d7dd..b9f8d787eea9 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -14,8 +14,6 @@
#include <linux/stddef.h>
#include <linux/stringify.h>
-extern int alternatives_applied;
-
struct alt_instr {
s32 orig_offset; /* offset to original instruction */
s32 alt_offset; /* offset to replacement instruction */
@@ -27,7 +25,9 @@ struct alt_instr {
typedef void (*alternative_cb_t)(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst);
+void __init apply_boot_alternatives(void);
void __init apply_alternatives_all(void);
+bool alternative_is_applied(u16 cpufeature);
#ifdef CONFIG_MODULES
void apply_alternatives_module(void *start, size_t length);
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index e278f94df0c9..14b41ddc68ba 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -22,6 +22,7 @@
#ifndef __ASSEMBLY__
+#include <linux/irqchip/arm-gic-common.h>
#include <linux/stringify.h>
#include <asm/barrier.h>
#include <asm/cacheflush.h>
@@ -114,6 +115,21 @@ static inline void gic_write_bpr1(u32 val)
write_sysreg_s(val, SYS_ICC_BPR1_EL1);
}
+static inline u32 gic_read_pmr(void)
+{
+ return read_sysreg_s(SYS_ICC_PMR_EL1);
+}
+
+static inline void gic_write_pmr(u32 val)
+{
+ write_sysreg_s(val, SYS_ICC_PMR_EL1);
+}
+
+static inline u32 gic_read_rpr(void)
+{
+ return read_sysreg_s(SYS_ICC_RPR_EL1);
+}
+
#define gic_read_typer(c) readq_relaxed(c)
#define gic_write_irouter(v, c) writeq_relaxed(v, c)
#define gic_read_lpir(c) readq_relaxed(c)
@@ -140,5 +156,21 @@ static inline void gic_write_bpr1(u32 val)
#define gits_write_vpendbaser(v, c) writeq_relaxed(v, c)
#define gits_read_vpendbaser(c) readq_relaxed(c)
+static inline bool gic_prio_masking_enabled(void)
+{
+ return system_uses_irq_prio_masking();
+}
+
+static inline void gic_pmr_mask_irqs(void)
+{
+ BUILD_BUG_ON(GICD_INT_DEF_PRI <= GIC_PRIO_IRQOFF);
+ gic_write_pmr(GIC_PRIO_IRQOFF);
+}
+
+static inline void gic_arch_enable_irqs(void)
+{
+ asm volatile ("msr daifclr, #2" : : : "memory");
+}
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_ARCH_GICV3_H */
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index f2a234d6516c..b7bca1ae09e6 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -31,11 +31,23 @@
#include <clocksource/arm_arch_timer.h>
#if IS_ENABLED(CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND)
-extern struct static_key_false arch_timer_read_ool_enabled;
-#define needs_unstable_timer_counter_workaround() \
- static_branch_unlikely(&arch_timer_read_ool_enabled)
+#define has_erratum_handler(h) \
+ ({ \
+ const struct arch_timer_erratum_workaround *__wa; \
+ __wa = __this_cpu_read(timer_unstable_counter_workaround); \
+ (__wa && __wa->h); \
+ })
+
+#define erratum_handler(h) \
+ ({ \
+ const struct arch_timer_erratum_workaround *__wa; \
+ __wa = __this_cpu_read(timer_unstable_counter_workaround); \
+ (__wa && __wa->h) ? __wa->h : arch_timer_##h; \
+ })
+
#else
-#define needs_unstable_timer_counter_workaround() false
+#define has_erratum_handler(h) false
+#define erratum_handler(h) (arch_timer_##h)
#endif
enum arch_timer_erratum_match_type {
@@ -61,23 +73,37 @@ struct arch_timer_erratum_workaround {
DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *,
timer_unstable_counter_workaround);
+/* inline sysreg accessors that make erratum_handler() work */
+static inline notrace u32 arch_timer_read_cntp_tval_el0(void)
+{
+ return read_sysreg(cntp_tval_el0);
+}
+
+static inline notrace u32 arch_timer_read_cntv_tval_el0(void)
+{
+ return read_sysreg(cntv_tval_el0);
+}
+
+static inline notrace u64 arch_timer_read_cntpct_el0(void)
+{
+ return read_sysreg(cntpct_el0);
+}
+
+static inline notrace u64 arch_timer_read_cntvct_el0(void)
+{
+ return read_sysreg(cntvct_el0);
+}
+
#define arch_timer_reg_read_stable(reg) \
-({ \
- u64 _val; \
- if (needs_unstable_timer_counter_workaround()) { \
- const struct arch_timer_erratum_workaround *wa; \
+ ({ \
+ u64 _val; \
+ \
preempt_disable_notrace(); \
- wa = __this_cpu_read(timer_unstable_counter_workaround); \
- if (wa && wa->read_##reg) \
- _val = wa->read_##reg(); \
- else \
- _val = read_sysreg(reg); \
+ _val = erratum_handler(read_ ## reg)(); \
preempt_enable_notrace(); \
- } else { \
- _val = read_sysreg(reg); \
- } \
- _val; \
-})
+ \
+ _val; \
+ })
/*
* These register accessors are marked inline so the compiler can
@@ -148,18 +174,67 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl)
isb();
}
-static inline u64 arch_counter_get_cntpct(void)
+/*
+ * Ensure that reads of the counter are treated the same as memory reads
+ * for the purposes of ordering by subsequent memory barriers.
+ *
+ * This insanity brought to you by speculative system register reads,
+ * out-of-order memory accesses, sequence locks and Thomas Gleixner.
+ *
+ * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
+ */
+#define arch_counter_enforce_ordering(val) do { \
+ u64 tmp, _val = (val); \
+ \
+ asm volatile( \
+ " eor %0, %1, %1\n" \
+ " add %0, sp, %0\n" \
+ " ldr xzr, [%0]" \
+ : "=r" (tmp) : "r" (_val)); \
+} while (0)
+
+static inline u64 __arch_counter_get_cntpct_stable(void)
+{
+ u64 cnt;
+
+ isb();
+ cnt = arch_timer_reg_read_stable(cntpct_el0);
+ arch_counter_enforce_ordering(cnt);
+ return cnt;
+}
+
+static inline u64 __arch_counter_get_cntpct(void)
{
+ u64 cnt;
+
isb();
- return arch_timer_reg_read_stable(cntpct_el0);
+ cnt = read_sysreg(cntpct_el0);
+ arch_counter_enforce_ordering(cnt);
+ return cnt;
}
-static inline u64 arch_counter_get_cntvct(void)
+static inline u64 __arch_counter_get_cntvct_stable(void)
{
+ u64 cnt;
+
isb();
- return arch_timer_reg_read_stable(cntvct_el0);
+ cnt = arch_timer_reg_read_stable(cntvct_el0);
+ arch_counter_enforce_ordering(cnt);
+ return cnt;
}
+static inline u64 __arch_counter_get_cntvct(void)
+{
+ u64 cnt;
+
+ isb();
+ cnt = read_sysreg(cntvct_el0);
+ arch_counter_enforce_ordering(cnt);
+ return cnt;
+}
+
+#undef arch_counter_enforce_ordering
+
static inline int arch_timer_arch_init(void)
{
return 0;
diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
index 4128bec033f6..f74909ba29bd 100644
--- a/arch/arm64/include/asm/asm-uaccess.h
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -24,7 +24,7 @@
.endm
.macro __uaccess_ttbr0_enable, tmp1, tmp2
- get_thread_info \tmp1
+ get_current_task \tmp1
ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1
mrs \tmp2, ttbr1_el1
extr \tmp2, \tmp2, \tmp1, #48
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 4feb6119c3c9..039fbd822ec6 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -27,6 +27,7 @@
#include <asm/asm-offsets.h>
#include <asm/cpufeature.h>
+#include <asm/cputype.h>
#include <asm/debug-monitors.h>
#include <asm/page.h>
#include <asm/pgtable-hwdef.h>
@@ -62,16 +63,8 @@
.endm
/*
- * Enable and disable interrupts.
+ * Save/restore interrupts.
*/
- .macro disable_irq
- msr daifset, #2
- .endm
-
- .macro enable_irq
- msr daifclr, #2
- .endm
-
.macro save_and_disable_irq, flags
mrs \flags, daif
msr daifset, #2
@@ -414,10 +407,14 @@ alternative_endif
.ifc \op, cvap
sys 3, c7, c12, 1, \kaddr // dc cvap
.else
+ .ifc \op, cvadp
+ sys 3, c7, c13, 1, \kaddr // dc cvadp
+ .else
dc \op, \kaddr
.endif
.endif
.endif
+ .endif
add \kaddr, \kaddr, \tmp1
cmp \kaddr, \size
b.lo 9998b
@@ -449,8 +446,8 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
* reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
*/
.macro reset_pmuserenr_el0, tmpreg
- mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
- sbfx \tmpreg, \tmpreg, #8, #4
+ mrs \tmpreg, id_aa64dfr0_el1
+ sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_PMUVER_SHIFT, #4
cmp \tmpreg, #1 // Skip if no PMU present
b.lt 9000f
msr pmuserenr_el0, xzr // Disable PMU access from EL0
@@ -536,9 +533,9 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
.endm
/*
- * Return the current thread_info.
+ * Return the current task_struct.
*/
- .macro get_thread_info, rd
+ .macro get_current_task, rd
mrs \rd, sp_el0
.endm
@@ -604,6 +601,25 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
#endif
.endm
+/*
+ * tcr_clear_errata_bits - Clear TCR bits that trigger an errata on this CPU.
+ */
+ .macro tcr_clear_errata_bits, tcr, tmp1, tmp2
+#ifdef CONFIG_FUJITSU_ERRATUM_010001
+ mrs \tmp1, midr_el1
+
+ mov_q \tmp2, MIDR_FUJITSU_ERRATUM_010001_MASK
+ and \tmp1, \tmp1, \tmp2
+ mov_q \tmp2, MIDR_FUJITSU_ERRATUM_010001
+ cmp \tmp1, \tmp2
+ b.ne 10f
+
+ mov_q \tmp2, TCR_CLEAR_FUJITSU_ERRATUM_010001
+ bic \tcr, \tcr, \tmp2
+10:
+#endif /* CONFIG_FUJITSU_ERRATUM_010001 */
+ .endm
+
/**
* Errata workaround prior to disable MMU. Insert an ISB immediately prior
* to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
@@ -721,7 +737,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
.macro if_will_cond_yield_neon
#ifdef CONFIG_PREEMPT
- get_thread_info x0
+ get_current_task x0
ldr x0, [x0, #TSK_TI_PREEMPT]
sub x0, x0, #PREEMPT_DISABLE_OFFSET
cbz x0, .Lyield_\@
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index f66bb04fdf2d..85b6bedbcc68 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -20,6 +20,8 @@
#ifndef __ASSEMBLY__
+#include <linux/kasan-checks.h>
+
#define __nops(n) ".rept " #n "\nnop\n.endr\n"
#define nops(n) asm volatile(__nops(n))
@@ -72,31 +74,33 @@ static inline unsigned long array_index_mask_nospec(unsigned long idx,
#define __smp_store_release(p, v) \
do { \
+ typeof(p) __p = (p); \
union { typeof(*p) __val; char __c[1]; } __u = \
- { .__val = (__force typeof(*p)) (v) }; \
+ { .__val = (__force typeof(*p)) (v) }; \
compiletime_assert_atomic_type(*p); \
+ kasan_check_write(__p, sizeof(*p)); \
switch (sizeof(*p)) { \
case 1: \
asm volatile ("stlrb %w1, %0" \
- : "=Q" (*p) \
+ : "=Q" (*__p) \
: "r" (*(__u8 *)__u.__c) \
: "memory"); \
break; \
case 2: \
asm volatile ("stlrh %w1, %0" \
- : "=Q" (*p) \
+ : "=Q" (*__p) \
: "r" (*(__u16 *)__u.__c) \
: "memory"); \
break; \
case 4: \
asm volatile ("stlr %w1, %0" \
- : "=Q" (*p) \
+ : "=Q" (*__p) \
: "r" (*(__u32 *)__u.__c) \
: "memory"); \
break; \
case 8: \
asm volatile ("stlr %1, %0" \
- : "=Q" (*p) \
+ : "=Q" (*__p) \
: "r" (*(__u64 *)__u.__c) \
: "memory"); \
break; \
@@ -106,27 +110,29 @@ do { \
#define __smp_load_acquire(p) \
({ \
union { typeof(*p) __val; char __c[1]; } __u; \
+ typeof(p) __p = (p); \
compiletime_assert_atomic_type(*p); \
+ kasan_check_read(__p, sizeof(*p)); \
switch (sizeof(*p)) { \
case 1: \
asm volatile ("ldarb %w0, %1" \
: "=r" (*(__u8 *)__u.__c) \
- : "Q" (*p) : "memory"); \
+ : "Q" (*__p) : "memory"); \
break; \
case 2: \
asm volatile ("ldarh %w0, %1" \
: "=r" (*(__u16 *)__u.__c) \
- : "Q" (*p) : "memory"); \
+ : "Q" (*__p) : "memory"); \
break; \
case 4: \
asm volatile ("ldar %w0, %1" \
: "=r" (*(__u32 *)__u.__c) \
- : "Q" (*p) : "memory"); \
+ : "Q" (*__p) : "memory"); \
break; \
case 8: \
asm volatile ("ldar %0, %1" \
: "=r" (*(__u64 *)__u.__c) \
- : "Q" (*p) : "memory"); \
+ : "Q" (*__p) : "memory"); \
break; \
} \
__u.__val; \
diff --git a/arch/arm64/include/asm/brk-imm.h b/arch/arm64/include/asm/brk-imm.h
index 2945fe6cd863..d84294064e6a 100644
--- a/arch/arm64/include/asm/brk-imm.h
+++ b/arch/arm64/include/asm/brk-imm.h
@@ -11,6 +11,8 @@
/*
* #imm16 values used for BRK instruction generation
+ * 0x004: for installing kprobes
+ * 0x005: for installing uprobes
* Allowed values for kgdb are 0x400 - 0x7ff
* 0x100: for triggering a fault on purpose (reserved)
* 0x400: for dynamic BRK instruction
@@ -18,10 +20,13 @@
* 0x800: kernel-mode BUG() and WARN() traps
* 0x9xx: tag-based KASAN trap (allowed values 0x900 - 0x9ff)
*/
+#define KPROBES_BRK_IMM 0x004
+#define UPROBES_BRK_IMM 0x005
#define FAULT_BRK_IMM 0x100
#define KGDB_DYN_DBG_BRK_IMM 0x400
#define KGDB_COMPILED_DBG_BRK_IMM 0x401
#define BUG_BRK_IMM 0x800
#define KASAN_BRK_IMM 0x900
+#define KASAN_BRK_MASK 0x0ff
#endif
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 82e9099834ae..defdc67d9ab4 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -60,7 +60,9 @@
#define ARM64_HAS_ADDRESS_AUTH_IMP_DEF 39
#define ARM64_HAS_GENERIC_AUTH_ARCH 40
#define ARM64_HAS_GENERIC_AUTH_IMP_DEF 41
+#define ARM64_HAS_IRQ_PRIO_MASKING 42
+#define ARM64_HAS_DCPODP 43
-#define ARM64_NCAPS 42
+#define ARM64_NCAPS 44
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index dfcfba725d72..f210bcf096f7 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -14,15 +14,8 @@
#include <asm/hwcap.h>
#include <asm/sysreg.h>
-/*
- * In the arm64 world (as in the ARM world), elf_hwcap is used both internally
- * in the kernel and for user space to keep track of which optional features
- * are supported by the current system. So let's map feature 'x' to HWCAP_x.
- * Note that HWCAP_x constants are bit fields so we need to take the log.
- */
-
-#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
-#define cpu_feature(x) ilog2(HWCAP_ ## x)
+#define MAX_CPU_FEATURES 64
+#define cpu_feature(x) KERNEL_HWCAP_ ## x
#ifndef __ASSEMBLY__
@@ -391,15 +384,21 @@ extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
extern struct static_key_false arm64_const_caps_ready;
+/* ARM64 CAPS + alternative_cb */
+#define ARM64_NPATCHABLE (ARM64_NCAPS + 1)
+extern DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
+
#define for_each_available_cap(cap) \
for_each_set_bit(cap, cpu_hwcaps, ARM64_NCAPS)
bool this_cpu_has_cap(unsigned int cap);
+void cpu_set_feature(unsigned int num);
+bool cpu_have_feature(unsigned int num);
+unsigned long cpu_get_elf_hwcap(void);
+unsigned long cpu_get_elf_hwcap2(void);
-static inline bool cpu_have_feature(unsigned int num)
-{
- return elf_hwcap & (1UL << num);
-}
+#define cpu_set_named_feature(name) cpu_set_feature(cpu_feature(name))
+#define cpu_have_named_feature(name) cpu_have_feature(cpu_feature(name))
/* System capability check for constant caps */
static inline bool __cpus_have_const_cap(int num)
@@ -612,6 +611,12 @@ static inline bool system_supports_generic_auth(void)
cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF));
}
+static inline bool system_uses_irq_prio_masking(void)
+{
+ return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
+ cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING);
+}
+
#define ARM64_SSBD_UNKNOWN -1
#define ARM64_SSBD_FORCE_DISABLE 0
#define ARM64_SSBD_KERNEL 1
@@ -628,11 +633,7 @@ static inline int arm64_get_ssbd_state(void)
#endif
}
-#ifdef CONFIG_ARM64_SSBD
void arm64_set_ssbd_mitigation(bool state);
-#else
-static inline void arm64_set_ssbd_mitigation(bool state) {}
-#endif
extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 951ed1a4e5c9..2602bae334fb 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -76,6 +76,8 @@
#define ARM_CPU_IMP_BRCM 0x42
#define ARM_CPU_IMP_QCOM 0x51
#define ARM_CPU_IMP_NVIDIA 0x4E
+#define ARM_CPU_IMP_FUJITSU 0x46
+#define ARM_CPU_IMP_HISI 0x48
#define ARM_CPU_PART_AEM_V8 0xD0F
#define ARM_CPU_PART_FOUNDATION 0xD00
@@ -87,6 +89,7 @@
#define ARM_CPU_PART_CORTEX_A35 0xD04
#define ARM_CPU_PART_CORTEX_A55 0xD05
#define ARM_CPU_PART_CORTEX_A76 0xD0B
+#define ARM_CPU_PART_NEOVERSE_N1 0xD0C
#define APM_CPU_PART_POTENZA 0x000
@@ -104,6 +107,10 @@
#define NVIDIA_CPU_PART_DENVER 0x003
#define NVIDIA_CPU_PART_CARMEL 0x004
+#define FUJITSU_CPU_PART_A64FX 0x001
+
+#define HISI_CPU_PART_TSV110 0xD01
+
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
@@ -112,6 +119,7 @@
#define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35)
#define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
+#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
@@ -122,6 +130,13 @@
#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
#define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER)
#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
+#define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX)
+#define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110)
+
+/* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
+#define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX
+#define MIDR_FUJITSU_ERRATUM_010001_MASK (~MIDR_CPU_VAR_REV(1, 0))
+#define TCR_CLEAR_FUJITSU_ERRATUM_010001 (TCR_NFD1 | TCR_NFD0)
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h
index fa90779fc752..db452aa9e651 100644
--- a/arch/arm64/include/asm/daifflags.h
+++ b/arch/arm64/include/asm/daifflags.h
@@ -18,6 +18,8 @@
#include <linux/irqflags.h>
+#include <asm/cpufeature.h>
+
#define DAIF_PROCCTX 0
#define DAIF_PROCCTX_NOIRQ PSR_I_BIT
#define DAIF_ERRCTX (PSR_I_BIT | PSR_A_BIT)
@@ -37,31 +39,61 @@ static inline unsigned long local_daif_save(void)
{
unsigned long flags;
- flags = arch_local_save_flags();
+ flags = read_sysreg(daif);
+
+ if (system_uses_irq_prio_masking()) {
+ /* If IRQs are masked with PMR, reflect it in the flags */
+ if (read_sysreg_s(SYS_ICC_PMR_EL1) <= GIC_PRIO_IRQOFF)
+ flags |= PSR_I_BIT;
+ }
local_daif_mask();
return flags;
}
-static inline void local_daif_unmask(void)
-{
- trace_hardirqs_on();
- asm volatile(
- "msr daifclr, #0xf // local_daif_unmask"
- :
- :
- : "memory");
-}
-
static inline void local_daif_restore(unsigned long flags)
{
- if (!arch_irqs_disabled_flags(flags))
+ bool irq_disabled = flags & PSR_I_BIT;
+
+ if (!irq_disabled) {
trace_hardirqs_on();
- arch_local_irq_restore(flags);
+ if (system_uses_irq_prio_masking())
+ arch_local_irq_enable();
+ } else if (!(flags & PSR_A_BIT)) {
+ /*
+ * If interrupts are disabled but we can take
+ * asynchronous errors, we can take NMIs
+ */
+ if (system_uses_irq_prio_masking()) {
+ flags &= ~PSR_I_BIT;
+ /*
+ * There has been concern that the write to daif
+ * might be reordered before this write to PMR.
+ * From the ARM ARM DDI 0487D.a, section D1.7.1
+ * "Accessing PSTATE fields":
+ * Writes to the PSTATE fields have side-effects on
+ * various aspects of the PE operation. All of these
+ * side-effects are guaranteed:
+ * - Not to be visible to earlier instructions in
+ * the execution stream.
+ * - To be visible to later instructions in the
+ * execution stream
+ *
+ * Also, writes to PMR are self-synchronizing, so no
+ * interrupts with a lower priority than PMR is signaled
+ * to the PE after the write.
+ *
+ * So we don't need additional synchronization here.
+ */
+ arch_local_irq_disable();
+ }
+ }
+
+ write_sysreg(flags, daif);
- if (arch_irqs_disabled_flags(flags))
+ if (irq_disabled)
trace_hardirqs_off();
}
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index a44cf5225429..0679f781696d 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -65,12 +65,9 @@
#define CACHE_FLUSH_IS_SAFE 1
/* kprobes BRK opcodes with ESR encoding */
-#define BRK64_ESR_MASK 0xFFFF
-#define BRK64_ESR_KPROBES 0x0004
-#define BRK64_OPCODE_KPROBES (AARCH64_BREAK_MON | (BRK64_ESR_KPROBES << 5))
+#define BRK64_OPCODE_KPROBES (AARCH64_BREAK_MON | (KPROBES_BRK_IMM << 5))
/* uprobes BRK opcodes with ESR encoding */
-#define BRK64_ESR_UPROBES 0x0005
-#define BRK64_OPCODE_UPROBES (AARCH64_BREAK_MON | (BRK64_ESR_UPROBES << 5))
+#define BRK64_OPCODE_UPROBES (AARCH64_BREAK_MON | (UPROBES_BRK_IMM << 5))
/* AArch32 */
#define DBG_ESR_EVT_BKPT 0x4
@@ -94,18 +91,24 @@ struct step_hook {
int (*fn)(struct pt_regs *regs, unsigned int esr);
};
-void register_step_hook(struct step_hook *hook);
-void unregister_step_hook(struct step_hook *hook);
+void register_user_step_hook(struct step_hook *hook);
+void unregister_user_step_hook(struct step_hook *hook);
+
+void register_kernel_step_hook(struct step_hook *hook);
+void unregister_kernel_step_hook(struct step_hook *hook);
struct break_hook {
struct list_head node;
- u32 esr_val;
- u32 esr_mask;
int (*fn)(struct pt_regs *regs, unsigned int esr);
+ u16 imm;
+ u16 mask; /* These bits are ignored when comparing with imm */
};
-void register_break_hook(struct break_hook *hook);
-void unregister_break_hook(struct break_hook *hook);
+void register_user_break_hook(struct break_hook *hook);
+void unregister_user_break_hook(struct break_hook *hook);
+
+void register_kernel_break_hook(struct break_hook *hook);
+void unregister_kernel_break_hook(struct break_hook *hook);
u8 debug_monitors_arch(void);
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 95dbf3ef735a..de98191e4c7d 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -29,15 +29,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return NULL;
}
-void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- const struct iommu_ops *iommu, bool coherent);
-#define arch_setup_dma_ops arch_setup_dma_ops
-
-#ifdef CONFIG_IOMMU_DMA
-void arch_teardown_dma_ops(struct device *dev);
-#define arch_teardown_dma_ops arch_teardown_dma_ops
-#endif
-
/*
* Do not use this function in a driver, it is only provided for
* arch/arm/mm/xen.c, which is used by arm64 as well.
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 7ed320895d1f..c9e9a6978e73 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -44,6 +44,17 @@ efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...);
#define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
+/*
+ * Even when Linux uses IRQ priorities for IRQ disabling, EFI does not.
+ * And EFI shouldn't really play around with priority masking as it is not aware
+ * which priorities the OS has assigned to its interrupts.
+ */
+#define arch_efi_save_flags(state_flags) \
+ ((void)((state_flags) = read_sysreg(daif)))
+
+#define arch_efi_restore_flags(state_flags) write_sysreg(state_flags, daif)
+
+
/* arch specific definitions used by the stub code */
/*
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 6adc1a90e7e6..355d120b78cb 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -214,10 +214,10 @@ typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
set_thread_flag(TIF_32BIT); \
})
#define COMPAT_ARCH_DLINFO
-extern int aarch32_setup_vectors_page(struct linux_binprm *bprm,
- int uses_interp);
+extern int aarch32_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp);
#define compat_arch_setup_additional_pages \
- aarch32_setup_vectors_page
+ aarch32_setup_additional_pages
#endif /* CONFIG_COMPAT */
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 52233f00d53d..0e27fe91d5ea 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -156,9 +156,7 @@
ESR_ELx_WFx_ISS_WFI)
/* BRK instruction trap from AArch64 state */
-#define ESR_ELx_VAL_BRK64(imm) \
- ((ESR_ELx_EC_BRK64 << ESR_ELx_EC_SHIFT) | ESR_ELx_IL | \
- ((imm) & 0xffff))
+#define ESR_ELx_BRK64_ISS_COMMENT_MASK 0xffff
/* ISS field definitions for System instruction traps */
#define ESR_ELx_SYS64_ISS_RES0_SHIFT 22
@@ -198,9 +196,10 @@
/*
* User space cache operations have the following sysreg encoding
* in System instructions.
- * op0=1, op1=3, op2=1, crn=7, crm={ 5, 10, 11, 12, 14 }, WRITE (L=0)
+ * op0=1, op1=3, op2=1, crn=7, crm={ 5, 10, 11, 12, 13, 14 }, WRITE (L=0)
*/
#define ESR_ELx_SYS64_ISS_CRM_DC_CIVAC 14
+#define ESR_ELx_SYS64_ISS_CRM_DC_CVADP 13
#define ESR_ELx_SYS64_ISS_CRM_DC_CVAP 12
#define ESR_ELx_SYS64_ISS_CRM_DC_CVAU 11
#define ESR_ELx_SYS64_ISS_CRM_DC_CVAC 10
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index cccb83ad7fa8..a56efb5626fa 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -23,26 +23,34 @@
#include <asm/errno.h>
+#define FUTEX_MAX_LOOPS 128 /* What's the largest number you can think of? */
+
#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
do { \
+ unsigned int loops = FUTEX_MAX_LOOPS; \
+ \
uaccess_enable(); \
asm volatile( \
" prfm pstl1strm, %2\n" \
"1: ldxr %w1, %2\n" \
insn "\n" \
-"2: stlxr %w3, %w0, %2\n" \
-" cbnz %w3, 1b\n" \
-" dmb ish\n" \
+"2: stlxr %w0, %w3, %2\n" \
+" cbz %w0, 3f\n" \
+" sub %w4, %w4, %w0\n" \
+" cbnz %w4, 1b\n" \
+" mov %w0, %w7\n" \
"3:\n" \
+" dmb ish\n" \
" .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
-"4: mov %w0, %w5\n" \
+"4: mov %w0, %w6\n" \
" b 3b\n" \
" .popsection\n" \
_ASM_EXTABLE(1b, 4b) \
_ASM_EXTABLE(2b, 4b) \
- : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
- : "r" (oparg), "Ir" (-EFAULT) \
+ : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp), \
+ "+r" (loops) \
+ : "r" (oparg), "Ir" (-EFAULT), "Ir" (-EAGAIN) \
: "memory"); \
uaccess_disable(); \
} while (0)
@@ -57,23 +65,23 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
switch (op) {
case FUTEX_OP_SET:
- __futex_atomic_op("mov %w0, %w4",
+ __futex_atomic_op("mov %w3, %w5",
ret, oldval, uaddr, tmp, oparg);
break;
case FUTEX_OP_ADD:
- __futex_atomic_op("add %w0, %w1, %w4",
+ __futex_atomic_op("add %w3, %w1, %w5",
ret, oldval, uaddr, tmp, oparg);
break;
case FUTEX_OP_OR:
- __futex_atomic_op("orr %w0, %w1, %w4",
+ __futex_atomic_op("orr %w3, %w1, %w5",
ret, oldval, uaddr, tmp, oparg);
break;
case FUTEX_OP_ANDN:
- __futex_atomic_op("and %w0, %w1, %w4",
+ __futex_atomic_op("and %w3, %w1, %w5",
ret, oldval, uaddr, tmp, ~oparg);
break;
case FUTEX_OP_XOR:
- __futex_atomic_op("eor %w0, %w1, %w4",
+ __futex_atomic_op("eor %w3, %w1, %w5",
ret, oldval, uaddr, tmp, oparg);
break;
default:
@@ -93,6 +101,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
u32 oldval, u32 newval)
{
int ret = 0;
+ unsigned int loops = FUTEX_MAX_LOOPS;
u32 val, tmp;
u32 __user *uaddr;
@@ -104,24 +113,30 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
asm volatile("// futex_atomic_cmpxchg_inatomic\n"
" prfm pstl1strm, %2\n"
"1: ldxr %w1, %2\n"
-" sub %w3, %w1, %w4\n"
-" cbnz %w3, 3f\n"
-"2: stlxr %w3, %w5, %2\n"
-" cbnz %w3, 1b\n"
-" dmb ish\n"
+" sub %w3, %w1, %w5\n"
+" cbnz %w3, 4f\n"
+"2: stlxr %w3, %w6, %2\n"
+" cbz %w3, 3f\n"
+" sub %w4, %w4, %w3\n"
+" cbnz %w4, 1b\n"
+" mov %w0, %w8\n"
"3:\n"
+" dmb ish\n"
+"4:\n"
" .pushsection .fixup,\"ax\"\n"
-"4: mov %w0, %w6\n"
-" b 3b\n"
+"5: mov %w0, %w7\n"
+" b 4b\n"
" .popsection\n"
- _ASM_EXTABLE(1b, 4b)
- _ASM_EXTABLE(2b, 4b)
- : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
- : "r" (oldval), "r" (newval), "Ir" (-EFAULT)
+ _ASM_EXTABLE(1b, 5b)
+ _ASM_EXTABLE(2b, 5b)
+ : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops)
+ : "r" (oldval), "r" (newval), "Ir" (-EFAULT), "Ir" (-EAGAIN)
: "memory");
uaccess_disable();
- *uval = val;
+ if (!ret)
+ *uval = val;
+
return ret;
}
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
index 1473fc2f7ab7..89691c86640a 100644
--- a/arch/arm64/include/asm/hardirq.h
+++ b/arch/arm64/include/asm/hardirq.h
@@ -17,8 +17,12 @@
#define __ASM_HARDIRQ_H
#include <linux/cache.h>
+#include <linux/percpu.h>
#include <linux/threads.h>
+#include <asm/barrier.h>
#include <asm/irq.h>
+#include <asm/kvm_arm.h>
+#include <asm/sysreg.h>
#define NR_IPI 7
@@ -37,6 +41,33 @@ u64 smp_irq_stat_cpu(unsigned int cpu);
#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
+struct nmi_ctx {
+ u64 hcr;
+};
+
+DECLARE_PER_CPU(struct nmi_ctx, nmi_contexts);
+
+#define arch_nmi_enter() \
+ do { \
+ if (is_kernel_in_hyp_mode()) { \
+ struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \
+ nmi_ctx->hcr = read_sysreg(hcr_el2); \
+ if (!(nmi_ctx->hcr & HCR_TGE)) { \
+ write_sysreg(nmi_ctx->hcr | HCR_TGE, hcr_el2); \
+ isb(); \
+ } \
+ } \
+ } while (0)
+
+#define arch_nmi_exit() \
+ do { \
+ if (is_kernel_in_hyp_mode()) { \
+ struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \
+ if (!(nmi_ctx->hcr & HCR_TGE)) \
+ write_sysreg(nmi_ctx->hcr, hcr_el2); \
+ } \
+ } while (0)
+
static inline void ack_bad_irq(unsigned int irq)
{
extern unsigned long irq_err_count;
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index 400b80b49595..b4bfb6672168 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -17,6 +17,7 @@
#define __ASM_HWCAP_H
#include <uapi/asm/hwcap.h>
+#include <asm/cpufeature.h>
#define COMPAT_HWCAP_HALF (1 << 1)
#define COMPAT_HWCAP_THUMB (1 << 2)
@@ -40,11 +41,67 @@
#define COMPAT_HWCAP2_CRC32 (1 << 4)
#ifndef __ASSEMBLY__
+#include <linux/log2.h>
+
+/*
+ * For userspace we represent hwcaps as a collection of HWCAP{,2}_x bitfields
+ * as described in uapi/asm/hwcap.h. For the kernel we represent hwcaps as
+ * natural numbers (in a single range of size MAX_CPU_FEATURES) defined here
+ * with prefix KERNEL_HWCAP_ mapped to their HWCAP{,2}_x counterpart.
+ *
+ * Hwcaps should be set and tested within the kernel via the
+ * cpu_{set,have}_named_feature(feature) where feature is the unique suffix
+ * of KERNEL_HWCAP_{feature}.
+ */
+#define __khwcap_feature(x) const_ilog2(HWCAP_ ## x)
+#define KERNEL_HWCAP_FP __khwcap_feature(FP)
+#define KERNEL_HWCAP_ASIMD __khwcap_feature(ASIMD)
+#define KERNEL_HWCAP_EVTSTRM __khwcap_feature(EVTSTRM)
+#define KERNEL_HWCAP_AES __khwcap_feature(AES)
+#define KERNEL_HWCAP_PMULL __khwcap_feature(PMULL)
+#define KERNEL_HWCAP_SHA1 __khwcap_feature(SHA1)
+#define KERNEL_HWCAP_SHA2 __khwcap_feature(SHA2)
+#define KERNEL_HWCAP_CRC32 __khwcap_feature(CRC32)
+#define KERNEL_HWCAP_ATOMICS __khwcap_feature(ATOMICS)
+#define KERNEL_HWCAP_FPHP __khwcap_feature(FPHP)
+#define KERNEL_HWCAP_ASIMDHP __khwcap_feature(ASIMDHP)
+#define KERNEL_HWCAP_CPUID __khwcap_feature(CPUID)
+#define KERNEL_HWCAP_ASIMDRDM __khwcap_feature(ASIMDRDM)
+#define KERNEL_HWCAP_JSCVT __khwcap_feature(JSCVT)
+#define KERNEL_HWCAP_FCMA __khwcap_feature(FCMA)
+#define KERNEL_HWCAP_LRCPC __khwcap_feature(LRCPC)
+#define KERNEL_HWCAP_DCPOP __khwcap_feature(DCPOP)
+#define KERNEL_HWCAP_SHA3 __khwcap_feature(SHA3)
+#define KERNEL_HWCAP_SM3 __khwcap_feature(SM3)
+#define KERNEL_HWCAP_SM4 __khwcap_feature(SM4)
+#define KERNEL_HWCAP_ASIMDDP __khwcap_feature(ASIMDDP)
+#define KERNEL_HWCAP_SHA512 __khwcap_feature(SHA512)
+#define KERNEL_HWCAP_SVE __khwcap_feature(SVE)
+#define KERNEL_HWCAP_ASIMDFHM __khwcap_feature(ASIMDFHM)
+#define KERNEL_HWCAP_DIT __khwcap_feature(DIT)
+#define KERNEL_HWCAP_USCAT __khwcap_feature(USCAT)
+#define KERNEL_HWCAP_ILRCPC __khwcap_feature(ILRCPC)
+#define KERNEL_HWCAP_FLAGM __khwcap_feature(FLAGM)
+#define KERNEL_HWCAP_SSBS __khwcap_feature(SSBS)
+#define KERNEL_HWCAP_SB __khwcap_feature(SB)
+#define KERNEL_HWCAP_PACA __khwcap_feature(PACA)
+#define KERNEL_HWCAP_PACG __khwcap_feature(PACG)
+
+#define __khwcap2_feature(x) (const_ilog2(HWCAP2_ ## x) + 32)
+#define KERNEL_HWCAP_DCPODP __khwcap2_feature(DCPODP)
+#define KERNEL_HWCAP_SVE2 __khwcap2_feature(SVE2)
+#define KERNEL_HWCAP_SVEAES __khwcap2_feature(SVEAES)
+#define KERNEL_HWCAP_SVEPMULL __khwcap2_feature(SVEPMULL)
+#define KERNEL_HWCAP_SVEBITPERM __khwcap2_feature(SVEBITPERM)
+#define KERNEL_HWCAP_SVESHA3 __khwcap2_feature(SVESHA3)
+#define KERNEL_HWCAP_SVESM4 __khwcap2_feature(SVESM4)
+
/*
* This yields a mask that user programs can use to figure out what
* instruction set this cpu supports.
*/
-#define ELF_HWCAP (elf_hwcap)
+#define ELF_HWCAP cpu_get_elf_hwcap()
+#define ELF_HWCAP2 cpu_get_elf_hwcap2()
#ifdef CONFIG_COMPAT
#define COMPAT_ELF_HWCAP (compat_elf_hwcap)
@@ -60,6 +117,5 @@ enum {
#endif
};
-extern unsigned long elf_hwcap;
#endif
#endif
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index ee723835c1f4..b807cb9b517d 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -121,10 +121,9 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
: "memory"); \
})
+#define __io_par(v) __iormb(v)
#define __iowmb() wmb()
-#define mmiowb() do { } while (0)
-
/*
* Relaxed I/O memory access primitives. These follow the Device memory
* ordering rules but do not guarantee any ordering relative to Normal memory
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index 24692edf1a69..629963189085 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -18,7 +18,9 @@
#ifdef __KERNEL__
+#include <asm/alternative.h>
#include <asm/ptrace.h>
+#include <asm/sysreg.h>
/*
* Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and
@@ -36,33 +38,27 @@
/*
* CPU interrupt mask handling.
*/
-static inline unsigned long arch_local_irq_save(void)
-{
- unsigned long flags;
- asm volatile(
- "mrs %0, daif // arch_local_irq_save\n"
- "msr daifset, #2"
- : "=r" (flags)
- :
- : "memory");
- return flags;
-}
-
static inline void arch_local_irq_enable(void)
{
- asm volatile(
- "msr daifclr, #2 // arch_local_irq_enable"
- :
+ asm volatile(ALTERNATIVE(
+ "msr daifclr, #2 // arch_local_irq_enable\n"
+ "nop",
+ __msr_s(SYS_ICC_PMR_EL1, "%0")
+ "dsb sy",
+ ARM64_HAS_IRQ_PRIO_MASKING)
:
+ : "r" ((unsigned long) GIC_PRIO_IRQON)
: "memory");
}
static inline void arch_local_irq_disable(void)
{
- asm volatile(
- "msr daifset, #2 // arch_local_irq_disable"
- :
+ asm volatile(ALTERNATIVE(
+ "msr daifset, #2 // arch_local_irq_disable",
+ __msr_s(SYS_ICC_PMR_EL1, "%0"),
+ ARM64_HAS_IRQ_PRIO_MASKING)
:
+ : "r" ((unsigned long) GIC_PRIO_IRQOFF)
: "memory");
}
@@ -71,12 +67,44 @@ static inline void arch_local_irq_disable(void)
*/
static inline unsigned long arch_local_save_flags(void)
{
+ unsigned long daif_bits;
unsigned long flags;
- asm volatile(
- "mrs %0, daif // arch_local_save_flags"
- : "=r" (flags)
- :
+
+ daif_bits = read_sysreg(daif);
+
+ /*
+ * The asm is logically equivalent to:
+ *
+ * if (system_uses_irq_prio_masking())
+ * flags = (daif_bits & PSR_I_BIT) ?
+ * GIC_PRIO_IRQOFF :
+ * read_sysreg_s(SYS_ICC_PMR_EL1);
+ * else
+ * flags = daif_bits;
+ */
+ asm volatile(ALTERNATIVE(
+ "mov %0, %1\n"
+ "nop\n"
+ "nop",
+ __mrs_s("%0", SYS_ICC_PMR_EL1)
+ "ands %1, %1, " __stringify(PSR_I_BIT) "\n"
+ "csel %0, %0, %2, eq",
+ ARM64_HAS_IRQ_PRIO_MASKING)
+ : "=&r" (flags), "+r" (daif_bits)
+ : "r" ((unsigned long) GIC_PRIO_IRQOFF)
: "memory");
+
+ return flags;
+}
+
+static inline unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags;
+
+ flags = arch_local_save_flags();
+
+ arch_local_irq_disable();
+
return flags;
}
@@ -85,16 +113,32 @@ static inline unsigned long arch_local_save_flags(void)
*/
static inline void arch_local_irq_restore(unsigned long flags)
{
- asm volatile(
- "msr daif, %0 // arch_local_irq_restore"
- :
- : "r" (flags)
- : "memory");
+ asm volatile(ALTERNATIVE(
+ "msr daif, %0\n"
+ "nop",
+ __msr_s(SYS_ICC_PMR_EL1, "%0")
+ "dsb sy",
+ ARM64_HAS_IRQ_PRIO_MASKING)
+ : "+r" (flags)
+ :
+ : "memory");
}
static inline int arch_irqs_disabled_flags(unsigned long flags)
{
- return flags & PSR_I_BIT;
+ int res;
+
+ asm volatile(ALTERNATIVE(
+ "and %w0, %w1, #" __stringify(PSR_I_BIT) "\n"
+ "nop",
+ "cmp %w1, #" __stringify(GIC_PRIO_IRQOFF) "\n"
+ "cset %w0, ls",
+ ARM64_HAS_IRQ_PRIO_MASKING)
+ : "=&r" (res)
+ : "r" ((int) flags)
+ : "memory");
+
+ return res;
}
#endif
#endif
diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h
index d5a44cf859e9..21721fbf44e7 100644
--- a/arch/arm64/include/asm/kprobes.h
+++ b/arch/arm64/include/asm/kprobes.h
@@ -54,8 +54,6 @@ void arch_remove_kprobe(struct kprobe *);
int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr);
int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
-int kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr);
-int kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr);
void kretprobe_trampoline(void);
void __kprobes *trampoline_probe_handler(struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 506386a3edde..d3842791e1c4 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -77,6 +77,10 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
*/
if (!vcpu_el1_is_32bit(vcpu))
vcpu->arch.hcr_el2 |= HCR_TID3;
+
+ if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
+ vcpu_el1_is_32bit(vcpu))
+ vcpu->arch.hcr_el2 |= HCR_TID2;
}
static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
@@ -331,6 +335,14 @@ static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
return ESR_ELx_SYS64_ISS_RT(esr);
}
+static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
+{
+ if (kvm_vcpu_trap_is_iabt(vcpu))
+ return false;
+
+ return kvm_vcpu_dabt_iswrite(vcpu);
+}
+
static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
{
return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index da3fc7324d68..a01fe087e022 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -24,12 +24,14 @@
#include <linux/types.h>
#include <linux/kvm_types.h>
+#include <asm/arch_gicv3.h>
#include <asm/cpufeature.h>
#include <asm/daifflags.h>
#include <asm/fpsimd.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
+#include <asm/smp_plat.h>
#include <asm/thread_info.h>
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
@@ -57,16 +59,19 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext);
void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start);
-struct kvm_arch {
+struct kvm_vmid {
/* The VMID generation used for the virt. memory system */
u64 vmid_gen;
u32 vmid;
+};
+
+struct kvm_arch {
+ struct kvm_vmid vmid;
/* stage2 entry level table */
pgd_t *pgd;
+ phys_addr_t pgd_phys;
- /* VTTBR value associated with above pgd and vmid */
- u64 vttbr;
/* VTCR_EL2 value for this VM */
u64 vtcr;
@@ -381,7 +386,36 @@ void kvm_arm_halt_guest(struct kvm *kvm);
void kvm_arm_resume_guest(struct kvm *kvm);
u64 __kvm_call_hyp(void *hypfn, ...);
-#define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__)
+
+/*
+ * The couple of isb() below are there to guarantee the same behaviour
+ * on VHE as on !VHE, where the eret to EL1 acts as a context
+ * synchronization event.
+ */
+#define kvm_call_hyp(f, ...) \
+ do { \
+ if (has_vhe()) { \
+ f(__VA_ARGS__); \
+ isb(); \
+ } else { \
+ __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \
+ } \
+ } while(0)
+
+#define kvm_call_hyp_ret(f, ...) \
+ ({ \
+ typeof(f(__VA_ARGS__)) ret; \
+ \
+ if (has_vhe()) { \
+ ret = f(__VA_ARGS__); \
+ isb(); \
+ } else { \
+ ret = __kvm_call_hyp(kvm_ksym_ref(f), \
+ ##__VA_ARGS__); \
+ } \
+ \
+ ret; \
+ })
void force_vm_exit(const cpumask_t *mask);
void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
@@ -400,6 +434,13 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
+static inline void kvm_init_host_cpu_context(kvm_cpu_context_t *cpu_ctxt,
+ int cpu)
+{
+ /* The host's MPIDR is immutable, so let's set it up at boot time */
+ cpu_ctxt->sys_regs[MPIDR_EL1] = cpu_logical_map(cpu);
+}
+
void __kvm_enable_ssbs(void);
static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
@@ -485,10 +526,25 @@ static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
static inline void kvm_arm_vhe_guest_enter(void)
{
local_daif_mask();
+
+ /*
+ * Having IRQs masked via PMR when entering the guest means the GIC
+ * will not signal the CPU of interrupts of lower priority, and the
+ * only way to get out will be via guest exceptions.
+ * Naturally, we want to avoid this.
+ */
+ if (system_uses_irq_prio_masking()) {
+ gic_write_pmr(GIC_PRIO_IRQON);
+ dsb(sy);
+ }
}
static inline void kvm_arm_vhe_guest_exit(void)
{
+ /*
+ * local_daif_restore() takes care to properly restore PSTATE.DAIF
+ * and the GIC PMR if the host is using IRQ priorities.
+ */
local_daif_restore(DAIF_PROCCTX_NOIRQ);
/*
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index a80a7ef57325..c3060833b7a5 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -21,6 +21,7 @@
#include <linux/compiler.h>
#include <linux/kvm_host.h>
#include <asm/alternative.h>
+#include <asm/kvm_mmu.h>
#include <asm/sysreg.h>
#define __hyp_text __section(.hyp.text) notrace
@@ -29,7 +30,7 @@
({ \
u64 reg; \
asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##nvh),\
- "mrs_s %0, " __stringify(r##vh),\
+ __mrs_s("%0", r##vh), \
ARM64_HAS_VIRT_HOST_EXTN) \
: "=r" (reg)); \
reg; \
@@ -39,7 +40,7 @@
do { \
u64 __val = (u64)(v); \
asm volatile(ALTERNATIVE("msr " __stringify(r##nvh) ", %x0",\
- "msr_s " __stringify(r##vh) ", %x0",\
+ __msr_s(r##vh, "%x0"), \
ARM64_HAS_VIRT_HOST_EXTN) \
: : "rZ" (__val)); \
} while (0)
@@ -163,7 +164,7 @@ void __noreturn __hyp_do_panic(unsigned long, ...);
static __always_inline void __hyp_text __load_guest_stage2(struct kvm *kvm)
{
write_sysreg(kvm->arch.vtcr, vtcr_el2);
- write_sysreg(kvm->arch.vttbr, vttbr_el2);
+ write_sysreg(kvm_get_vttbr(kvm), vttbr_el2);
/*
* ARM erratum 1165522 requires the actual execution of the above
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 8af4b1befa42..ebeefcf835e8 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -138,7 +138,8 @@ static inline unsigned long __kern_hyp_va(unsigned long v)
})
/*
- * We currently only support a 40bit IPA.
+ * We currently support using a VM-specified IPA size. For backward
+ * compatibility, the default IPA size is fixed to 40bits.
*/
#define KVM_PHYS_SHIFT (40)
@@ -444,6 +445,17 @@ static inline int kvm_read_guest_lock(struct kvm *kvm,
return ret;
}
+static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
+ const void *data, unsigned long len)
+{
+ int srcu_idx = srcu_read_lock(&kvm->srcu);
+ int ret = kvm_write_guest(kvm, gpa, data, len);
+
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+
+ return ret;
+}
+
#ifdef CONFIG_KVM_INDIRECT_VECTORS
/*
* EL2 vectors can be mapped and rerouted in a number of ways,
@@ -591,9 +603,15 @@ static inline u64 kvm_vttbr_baddr_mask(struct kvm *kvm)
return vttbr_baddr_mask(kvm_phys_shift(kvm), kvm_stage2_levels(kvm));
}
-static inline bool kvm_cpu_has_cnp(void)
+static __always_inline u64 kvm_get_vttbr(struct kvm *kvm)
{
- return system_supports_cnp();
+ struct kvm_vmid *vmid = &kvm->arch.vmid;
+ u64 vmid_field, baddr;
+ u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
+
+ baddr = kvm->arch.pgd_phys;
+ vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT;
+ return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
}
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/memblock.h b/arch/arm64/include/asm/memblock.h
deleted file mode 100644
index 6afeed2467f1..000000000000
--- a/arch/arm64/include/asm/memblock.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __ASM_MEMBLOCK_H
-#define __ASM_MEMBLOCK_H
-
-extern void arm64_memblock_init(void);
-
-#endif
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index b01ef0180a03..2cb8248fa2c8 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -302,7 +302,7 @@ static inline void *phys_to_virt(phys_addr_t x)
*/
#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET)
-#ifndef CONFIG_SPARSEMEM_VMEMMAP
+#if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#else
@@ -312,8 +312,9 @@ static inline void *phys_to_virt(phys_addr_t x)
#define page_to_virt(page) ({ \
unsigned long __addr = \
((__page_to_voff(page)) | PAGE_OFFSET); \
- __addr = __tag_set(__addr, page_kasan_tag(page)); \
- ((void *)__addr); \
+ unsigned long __addr_tag = \
+ __tag_set(__addr, page_kasan_tag(page)); \
+ ((void *)__addr_tag); \
})
#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 3e8063f4f9d3..67ef25d037ea 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -129,6 +129,7 @@ static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
static inline void arm64_apply_bp_hardening(void) { }
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+extern void arm64_memblock_init(void);
extern void paging_init(void);
extern void bootmem_init(void);
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
index 905e1bb0e7bd..cd9f4e9d04d3 100644
--- a/arch/arm64/include/asm/module.h
+++ b/arch/arm64/include/asm/module.h
@@ -73,4 +73,9 @@ static inline bool is_forbidden_offset_for_adrp(void *place)
struct plt_entry get_plt_entry(u64 dst, void *pc);
bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b);
+static inline bool plt_entry_is_initialized(const struct plt_entry *e)
+{
+ return e->adrp || e->add || e->br;
+}
+
#endif /* __ASM_MODULE_H */
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index 52fa47c73bf0..dabba4b2c61f 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -33,12 +33,22 @@
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return (pmd_t *)__get_free_page(PGALLOC_GFP);
+ struct page *page;
+
+ page = alloc_page(PGALLOC_GFP);
+ if (!page)
+ return NULL;
+ if (!pgtable_pmd_page_ctor(page)) {
+ __free_page(page);
+ return NULL;
+ }
+ return page_address(page);
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp)
{
BUG_ON((unsigned long)pmdp & (PAGE_SIZE-1));
+ pgtable_pmd_page_dtor(virt_to_page(pmdp));
free_page((unsigned long)pmdp);
}
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index e9b0a7d75184..a69259cc1f16 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -302,6 +302,7 @@
#define TCR_TBI1 (UL(1) << 38)
#define TCR_HA (UL(1) << 39)
#define TCR_HD (UL(1) << 40)
+#define TCR_NFD0 (UL(1) << 53)
#define TCR_NFD1 (UL(1) << 54)
/*
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index de70c1eabf33..2c41b04708fe 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -478,6 +478,8 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
return __pmd_to_phys(pmd);
}
+static inline void pte_unmap(pte_t *pte) { }
+
/* Find an entry in the third-level page table. */
#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
@@ -485,9 +487,6 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
#define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr))))
#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
-#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
-#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
#define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
#define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr))
diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h
index 15d49515efdd..d328540cb85e 100644
--- a/arch/arm64/include/asm/pointer_auth.h
+++ b/arch/arm64/include/asm/pointer_auth.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_POINTER_AUTH_H
#define __ASM_POINTER_AUTH_H
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index f1a7ab18faf3..fcd0e691b1ea 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -57,7 +57,15 @@
#define TASK_SIZE_64 (UL(1) << vabits_user)
#ifdef CONFIG_COMPAT
+#if defined(CONFIG_ARM64_64K_PAGES) && defined(CONFIG_KUSER_HELPERS)
+/*
+ * With CONFIG_ARM64_64K_PAGES enabled, the last page is occupied
+ * by the compat vectors page.
+ */
#define TASK_SIZE_32 UL(0x100000000)
+#else
+#define TASK_SIZE_32 (UL(0x100000000) - PAGE_SIZE)
+#endif /* CONFIG_ARM64_64K_PAGES */
#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
TASK_SIZE_32 : TASK_SIZE_64)
#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
@@ -191,6 +199,9 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
memset(regs, 0, sizeof(*regs));
forget_syscall(regs);
regs->pc = pc;
+
+ if (system_uses_irq_prio_masking())
+ regs->pmr_save = GIC_PRIO_IRQON;
}
static inline void start_thread(struct pt_regs *regs, unsigned long pc,
diff --git a/arch/arm64/include/asm/ptdump.h b/arch/arm64/include/asm/ptdump.h
index 6afd8476c60c..9e948a93d26c 100644
--- a/arch/arm64/include/asm/ptdump.h
+++ b/arch/arm64/include/asm/ptdump.h
@@ -34,13 +34,10 @@ struct ptdump_info {
void ptdump_walk_pgd(struct seq_file *s, struct ptdump_info *info);
#ifdef CONFIG_ARM64_PTDUMP_DEBUGFS
-int ptdump_debugfs_register(struct ptdump_info *info, const char *name);
+void ptdump_debugfs_register(struct ptdump_info *info, const char *name);
#else
-static inline int ptdump_debugfs_register(struct ptdump_info *info,
- const char *name)
-{
- return 0;
-}
+static inline void ptdump_debugfs_register(struct ptdump_info *info,
+ const char *name) { }
#endif
void ptdump_check_wx(void);
#endif /* CONFIG_ARM64_PTDUMP_CORE */
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index fce22c4b2f73..b2de32939ada 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -19,12 +19,26 @@
#ifndef __ASM_PTRACE_H
#define __ASM_PTRACE_H
+#include <asm/cpufeature.h>
+
#include <uapi/asm/ptrace.h>
/* Current Exception Level values, as contained in CurrentEL */
#define CurrentEL_EL1 (1 << 2)
#define CurrentEL_EL2 (2 << 2)
+/*
+ * PMR values used to mask/unmask interrupts.
+ *
+ * GIC priority masking works as follows: if an IRQ's priority is a higher value
+ * than the value held in PMR, that IRQ is masked. Lowering the value of PMR
+ * means masking more IRQs (or at least that the same IRQs remain masked).
+ *
+ * To mask interrupts, we clear the most significant bit of PMR.
+ */
+#define GIC_PRIO_IRQON 0xf0
+#define GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80)
+
/* Additional SPSR bits not exposed in the UABI */
#define PSR_IL_BIT (1 << 20)
@@ -167,7 +181,8 @@ struct pt_regs {
#endif
u64 orig_addr_limit;
- u64 unused; // maintain 16 byte alignment
+ /* Only valid when ARM64_HAS_IRQ_PRIO_MASKING is enabled. */
+ u64 pmr_save;
u64 stackframe[2];
};
@@ -202,8 +217,13 @@ static inline void forget_syscall(struct pt_regs *regs)
#define processor_mode(regs) \
((regs)->pstate & PSR_MODE_MASK)
-#define interrupts_enabled(regs) \
- (!((regs)->pstate & PSR_I_BIT))
+#define irqs_priority_unmasked(regs) \
+ (system_uses_irq_prio_masking() ? \
+ (regs)->pmr_save == GIC_PRIO_IRQON : \
+ true)
+
+#define interrupts_enabled(regs) \
+ (!((regs)->pstate & PSR_I_BIT) && irqs_priority_unmasked(regs))
#define fast_interrupts_enabled(regs) \
(!((regs)->pstate & PSR_F_BIT))
@@ -285,6 +305,28 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
return regs->regs[0];
}
+/**
+ * regs_get_kernel_argument() - get Nth function argument in kernel
+ * @regs: pt_regs of that context
+ * @n: function argument number (start from 0)
+ *
+ * regs_get_argument() returns @n th argument of the function call.
+ *
+ * Note that this chooses the most likely register mapping. In very rare
+ * cases this may not return correct data, for example, if one of the
+ * function parameters is 16 bytes or bigger. In such cases, we cannot
+ * get access the parameter correctly and the register assignment of
+ * subsequent parameters will be shifted.
+ */
+static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs,
+ unsigned int n)
+{
+#define NR_REG_ARGUMENTS 8
+ if (n < NR_REG_ARGUMENTS)
+ return pt_regs_read_reg(regs, n);
+ return 0;
+}
+
/* We must avoid circular header include via sched.h */
struct task_struct;
int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task);
diff --git a/arch/arm64/include/asm/sdei.h b/arch/arm64/include/asm/sdei.h
index ffe47d766c25..63e0b92a5fbb 100644
--- a/arch/arm64/include/asm/sdei.h
+++ b/arch/arm64/include/asm/sdei.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2017 Arm Ltd.
#ifndef __ASM_SDEI_H
#define __ASM_SDEI_H
diff --git a/arch/arm64/include/asm/signal32.h b/arch/arm64/include/asm/signal32.h
index 81abea0b7650..58e288aaf0ba 100644
--- a/arch/arm64/include/asm/signal32.h
+++ b/arch/arm64/include/asm/signal32.h
@@ -20,8 +20,6 @@
#ifdef CONFIG_COMPAT
#include <linux/compat.h>
-#define AARCH32_KERN_SIGRET_CODE_OFFSET 0x500
-
int compat_setup_frame(int usig, struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs);
int compat_setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
diff --git a/arch/arm64/include/asm/stage2_pgtable.h b/arch/arm64/include/asm/stage2_pgtable.h
index 5412fa40825e..915809e4ac32 100644
--- a/arch/arm64/include/asm/stage2_pgtable.h
+++ b/arch/arm64/include/asm/stage2_pgtable.h
@@ -119,7 +119,7 @@ static inline pud_t *stage2_pud_offset(struct kvm *kvm,
static inline void stage2_pud_free(struct kvm *kvm, pud_t *pud)
{
if (kvm_stage2_has_pud(kvm))
- pud_free(NULL, pud);
+ free_page((unsigned long)pud);
}
static inline bool stage2_pud_table_empty(struct kvm *kvm, pud_t *pudp)
@@ -192,7 +192,7 @@ static inline pmd_t *stage2_pmd_offset(struct kvm *kvm,
static inline void stage2_pmd_free(struct kvm *kvm, pmd_t *pmd)
{
if (kvm_stage2_has_pmd(kvm))
- pmd_free(NULL, pmd);
+ free_page((unsigned long)pmd);
}
static inline bool stage2_pud_huge(struct kvm *kvm, pud_t pud)
diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h
index ad8be16a39c9..a179df3674a1 100644
--- a/arch/arm64/include/asm/syscall.h
+++ b/arch/arm64/include/asm/syscall.h
@@ -65,52 +65,22 @@ static inline void syscall_set_return_value(struct task_struct *task,
static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
unsigned long *args)
{
- if (n == 0)
- return;
-
- if (i + n > SYSCALL_MAX_ARGS) {
- unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
- unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
- pr_warning("%s called with max args %d, handling only %d\n",
- __func__, i + n, SYSCALL_MAX_ARGS);
- memset(args_bad, 0, n_bad * sizeof(args[0]));
- }
-
- if (i == 0) {
- args[0] = regs->orig_x0;
- args++;
- i++;
- n--;
- }
-
- memcpy(args, &regs->regs[i], n * sizeof(args[0]));
+ args[0] = regs->orig_x0;
+ args++;
+
+ memcpy(args, &regs->regs[1], 5 * sizeof(args[0]));
}
static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
const unsigned long *args)
{
- if (n == 0)
- return;
-
- if (i + n > SYSCALL_MAX_ARGS) {
- pr_warning("%s called with max args %d, handling only %d\n",
- __func__, i + n, SYSCALL_MAX_ARGS);
- n = SYSCALL_MAX_ARGS - i;
- }
-
- if (i == 0) {
- regs->orig_x0 = args[0];
- args++;
- i++;
- n--;
- }
-
- memcpy(&regs->regs[i], args, n * sizeof(args[0]));
+ regs->orig_x0 = args[0];
+ args++;
+
+ memcpy(&regs->regs[1], args, 5 * sizeof(args[0]));
}
/*
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 72dc4c011014..3f7b917e8f3a 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -361,6 +361,7 @@
#define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0)
+#define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0)
#define SYS_CLIDR_EL1 sys_reg(3, 1, 0, 0, 1)
#define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7)
@@ -392,6 +393,10 @@
#define SYS_CNTP_CTL_EL0 sys_reg(3, 3, 14, 2, 1)
#define SYS_CNTP_CVAL_EL0 sys_reg(3, 3, 14, 2, 2)
+#define SYS_AARCH32_CNTP_TVAL sys_reg(0, 0, 14, 2, 0)
+#define SYS_AARCH32_CNTP_CTL sys_reg(0, 0, 14, 2, 1)
+#define SYS_AARCH32_CNTP_CVAL sys_reg(0, 2, 0, 14, 0)
+
#define __PMEV_op2(n) ((n) & 0x7)
#define __CNTR_CRm(n) (0x8 | (((n) >> 3) & 0x3))
#define SYS_PMEVCNTRn_EL0(n) sys_reg(3, 3, 14, __CNTR_CRm(n), __PMEV_op2(n))
@@ -426,7 +431,7 @@
#define SYS_ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1)
#define SYS_ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2)
#define SYS_ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3)
-#define SYS_ICH_ELSR_EL2 sys_reg(3, 4, 12, 11, 5)
+#define SYS_ICH_ELRSR_EL2 sys_reg(3, 4, 12, 11, 5)
#define SYS_ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7)
#define __SYS__LR0_EL2(x) sys_reg(3, 4, 12, 12, x)
@@ -601,6 +606,20 @@
#define ID_AA64PFR1_SSBS_PSTATE_ONLY 1
#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2
+/* id_aa64zfr0 */
+#define ID_AA64ZFR0_SM4_SHIFT 40
+#define ID_AA64ZFR0_SHA3_SHIFT 32
+#define ID_AA64ZFR0_BITPERM_SHIFT 16
+#define ID_AA64ZFR0_AES_SHIFT 4
+#define ID_AA64ZFR0_SVEVER_SHIFT 0
+
+#define ID_AA64ZFR0_SM4 0x1
+#define ID_AA64ZFR0_SHA3 0x1
+#define ID_AA64ZFR0_BITPERM 0x1
+#define ID_AA64ZFR0_AES 0x1
+#define ID_AA64ZFR0_AES_PMULL 0x2
+#define ID_AA64ZFR0_SVEVER_SVE2 0x1
+
/* id_aa64mmfr0 */
#define ID_AA64MMFR0_TGRAN4_SHIFT 28
#define ID_AA64MMFR0_TGRAN64_SHIFT 24
@@ -741,20 +760,39 @@
#include <linux/build_bug.h>
#include <linux/types.h>
-asm(
-" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n"
-" .equ .L__reg_num_x\\num, \\num\n"
-" .endr\n"
+#define __DEFINE_MRS_MSR_S_REGNUM \
+" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" \
+" .equ .L__reg_num_x\\num, \\num\n" \
+" .endr\n" \
" .equ .L__reg_num_xzr, 31\n"
-"\n"
-" .macro mrs_s, rt, sreg\n"
- __emit_inst(0xd5200000|(\\sreg)|(.L__reg_num_\\rt))
+
+#define DEFINE_MRS_S \
+ __DEFINE_MRS_MSR_S_REGNUM \
+" .macro mrs_s, rt, sreg\n" \
+ __emit_inst(0xd5200000|(\\sreg)|(.L__reg_num_\\rt)) \
" .endm\n"
-"\n"
-" .macro msr_s, sreg, rt\n"
- __emit_inst(0xd5000000|(\\sreg)|(.L__reg_num_\\rt))
+
+#define DEFINE_MSR_S \
+ __DEFINE_MRS_MSR_S_REGNUM \
+" .macro msr_s, sreg, rt\n" \
+ __emit_inst(0xd5000000|(\\sreg)|(.L__reg_num_\\rt)) \
" .endm\n"
-);
+
+#define UNDEFINE_MRS_S \
+" .purgem mrs_s\n"
+
+#define UNDEFINE_MSR_S \
+" .purgem msr_s\n"
+
+#define __mrs_s(v, r) \
+ DEFINE_MRS_S \
+" mrs_s " v ", " __stringify(r) "\n" \
+ UNDEFINE_MRS_S
+
+#define __msr_s(r, v) \
+ DEFINE_MSR_S \
+" msr_s " __stringify(r) ", " v "\n" \
+ UNDEFINE_MSR_S
/*
* Unlike read_cpuid, calls to read_sysreg are never expected to be
@@ -782,13 +820,13 @@ asm(
*/
#define read_sysreg_s(r) ({ \
u64 __val; \
- asm volatile("mrs_s %0, " __stringify(r) : "=r" (__val)); \
+ asm volatile(__mrs_s("%0", r) : "=r" (__val)); \
__val; \
})
#define write_sysreg_s(v, r) do { \
u64 __val = (u64)(v); \
- asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \
+ asm volatile(__msr_s(r, "%x0") : : "rZ" (__val)); \
} while (0)
/*
diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h
index 32693f34f431..fca95424e873 100644
--- a/arch/arm64/include/asm/system_misc.h
+++ b/arch/arm64/include/asm/system_misc.h
@@ -41,7 +41,6 @@ void hook_debug_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
int sig, int code, const char *name);
struct mm_struct;
-extern void show_pte(unsigned long addr);
extern void __show_regs(struct pt_regs *);
extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index bbca68b54732..eb3ef73e07cf 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -79,7 +79,6 @@ void arch_release_task_struct(struct task_struct *tsk);
* TIF_SIGPENDING - signal pending
* TIF_NEED_RESCHED - rescheduling necessary
* TIF_NOTIFY_RESUME - callback before returning to user
- * TIF_USEDFPU - FPU was used by this task this quantum (SMP)
*/
#define TIF_SIGPENDING 0
#define TIF_NEED_RESCHED 1
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 106fdc951b6e..a287189ca8b4 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -27,6 +27,7 @@ static inline void __tlb_remove_table(void *_table)
free_page_and_swap_cache((struct page *)_table);
}
+#define tlb_flush tlb_flush
static void tlb_flush(struct mmu_gather *tlb);
#include <asm-generic/tlb.h>
@@ -62,7 +63,10 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
unsigned long addr)
{
- tlb_remove_table(tlb, virt_to_page(pmdp));
+ struct page *page = virt_to_page(pmdp);
+
+ pgtable_pmd_page_dtor(page);
+ tlb_remove_table(tlb, page);
}
#endif
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index f1e5c9165809..e5d5f31c6d36 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -267,7 +267,7 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
: "+r" (err), "=&r" (x) \
: "r" (addr), "i" (-EFAULT))
-#define __get_user_err(x, ptr, err) \
+#define __raw_get_user(x, ptr, err) \
do { \
unsigned long __gu_val; \
__chk_user_ptr(ptr); \
@@ -296,28 +296,22 @@ do { \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
} while (0)
-#define __get_user_check(x, ptr, err) \
-({ \
+#define __get_user_error(x, ptr, err) \
+do { \
__typeof__(*(ptr)) __user *__p = (ptr); \
might_fault(); \
if (access_ok(__p, sizeof(*__p))) { \
__p = uaccess_mask_ptr(__p); \
- __get_user_err((x), __p, (err)); \
+ __raw_get_user((x), __p, (err)); \
} else { \
(x) = 0; (err) = -EFAULT; \
} \
-})
-
-#define __get_user_error(x, ptr, err) \
-({ \
- __get_user_check((x), (ptr), (err)); \
- (void)0; \
-})
+} while (0)
#define __get_user(x, ptr) \
({ \
int __gu_err = 0; \
- __get_user_check((x), (ptr), __gu_err); \
+ __get_user_error((x), (ptr), __gu_err); \
__gu_err; \
})
@@ -337,7 +331,7 @@ do { \
: "+r" (err) \
: "r" (x), "r" (addr), "i" (-EFAULT))
-#define __put_user_err(x, ptr, err) \
+#define __raw_put_user(x, ptr, err) \
do { \
__typeof__(*(ptr)) __pu_val = (x); \
__chk_user_ptr(ptr); \
@@ -365,28 +359,22 @@ do { \
uaccess_disable_not_uao(); \
} while (0)
-#define __put_user_check(x, ptr, err) \
-({ \
+#define __put_user_error(x, ptr, err) \
+do { \
__typeof__(*(ptr)) __user *__p = (ptr); \
might_fault(); \
if (access_ok(__p, sizeof(*__p))) { \
__p = uaccess_mask_ptr(__p); \
- __put_user_err((x), __p, (err)); \
+ __raw_put_user((x), __p, (err)); \
} else { \
(err) = -EFAULT; \
} \
-})
-
-#define __put_user_error(x, ptr, err) \
-({ \
- __put_user_check((x), (ptr), (err)); \
- (void)0; \
-})
+} while (0)
#define __put_user(x, ptr) \
({ \
int __pu_err = 0; \
- __put_user_check((x), (ptr), __pu_err); \
+ __put_user_error((x), (ptr), __pu_err); \
__pu_err; \
})
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index d1dd93436e1e..f2a83ff6b73c 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -44,7 +44,7 @@
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5)
#define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800)
-#define __NR_compat_syscalls 424
+#define __NR_compat_syscalls 428
#endif
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 5590f2623690..23f1a44acada 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -866,6 +866,14 @@ __SYSCALL(__NR_rt_sigtimedwait_time64, compat_sys_rt_sigtimedwait_time64)
__SYSCALL(__NR_futex_time64, sys_futex)
#define __NR_sched_rr_get_interval_time64 423
__SYSCALL(__NR_sched_rr_get_interval_time64, sys_sched_rr_get_interval)
+#define __NR_pidfd_send_signal 424
+__SYSCALL(__NR_pidfd_send_signal, sys_pidfd_send_signal)
+#define __NR_io_uring_setup 425
+__SYSCALL(__NR_io_uring_setup, sys_io_uring_setup)
+#define __NR_io_uring_enter 426
+__SYSCALL(__NR_io_uring_enter, sys_io_uring_enter)
+#define __NR_io_uring_register 427
+__SYSCALL(__NR_io_uring_register, sys_io_uring_register)
/*
* Please add new compat syscalls above this comment and update
diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h
index 2b9a63771eda..f89263c8e11a 100644
--- a/arch/arm64/include/asm/vdso_datapage.h
+++ b/arch/arm64/include/asm/vdso_datapage.h
@@ -38,6 +38,7 @@ struct vdso_data {
__u32 tz_minuteswest; /* Whacky timezone stuff */
__u32 tz_dsttime;
__u32 use_syscall;
+ __u32 hrtimer_res;
};
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/vmap_stack.h b/arch/arm64/include/asm/vmap_stack.h
index 0b5ec6e08c10..0a12115d9638 100644
--- a/arch/arm64/include/asm/vmap_stack.h
+++ b/arch/arm64/include/asm/vmap_stack.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2017 Arm Ltd.
#ifndef __ASM_VMAP_STACK_H
#define __ASM_VMAP_STACK_H
diff --git a/arch/arm64/include/uapi/asm/Kbuild b/arch/arm64/include/uapi/asm/Kbuild
index 87eea29b24ab..602d137932dc 100644
--- a/arch/arm64/include/uapi/asm/Kbuild
+++ b/arch/arm64/include/uapi/asm/Kbuild
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
-include include/uapi/asm-generic/Kbuild.asm
generic-y += kvm_para.h
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index 5f0750c2199c..1a772b162191 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -18,7 +18,7 @@
#define _UAPI__ASM_HWCAP_H
/*
- * HWCAP flags - for elf_hwcap (in kernel) and AT_HWCAP
+ * HWCAP flags - for AT_HWCAP
*/
#define HWCAP_FP (1 << 0)
#define HWCAP_ASIMD (1 << 1)
@@ -53,4 +53,15 @@
#define HWCAP_PACA (1 << 30)
#define HWCAP_PACG (1UL << 31)
+/*
+ * HWCAP2 flags - for AT_HWCAP2
+ */
+#define HWCAP2_DCPODP (1 << 0)
+#define HWCAP2_SVE2 (1 << 1)
+#define HWCAP2_SVEAES (1 << 2)
+#define HWCAP2_SVEPMULL (1 << 3)
+#define HWCAP2_SVEBITPERM (1 << 4)
+#define HWCAP2_SVESHA3 (1 << 5)
+#define HWCAP2_SVESM4 (1 << 6)
+
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
index 28d77c9ed531..d78623acb649 100644
--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -233,6 +233,19 @@ struct user_pac_mask {
__u64 insn_mask;
};
+/* pointer authentication keys (NT_ARM_PACA_KEYS, NT_ARM_PACG_KEYS) */
+
+struct user_pac_address_keys {
+ __uint128_t apiakey;
+ __uint128_t apibkey;
+ __uint128_t apdakey;
+ __uint128_t apdbkey;
+};
+
+struct user_pac_generic_keys {
+ __uint128_t apgakey;
+};
+
#endif /* __ASSEMBLY__ */
#endif /* _UAPI__ASM_PTRACE_H */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index cd434d0719c1..9e7dcb2c31c7 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -7,9 +7,9 @@ CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET)
AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
CFLAGS_armv8_deprecated.o := -I$(src)
-CFLAGS_REMOVE_ftrace.o = -pg
-CFLAGS_REMOVE_insn.o = -pg
-CFLAGS_REMOVE_return_address.o = -pg
+CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_insn.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE)
# Object file lists.
obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
@@ -27,8 +27,9 @@ OBJCOPYFLAGS := --prefix-symbols=__efistub_
$(obj)/%.stub.o: $(obj)/%.o FORCE
$(call if_changed,objcopy)
-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
- sys_compat.o
+obj-$(CONFIG_COMPAT) += sys32.o signal32.o \
+ sigreturn32.o sys_compat.o
+obj-$(CONFIG_KUSER_HELPERS) += kuser32.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index b5d603992d40..a9b467763153 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -32,13 +32,23 @@
#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
-int alternatives_applied;
+static int all_alternatives_applied;
+
+static DECLARE_BITMAP(applied_alternatives, ARM64_NCAPS);
struct alt_region {
struct alt_instr *begin;
struct alt_instr *end;
};
+bool alternative_is_applied(u16 cpufeature)
+{
+ if (WARN_ON(cpufeature >= ARM64_NCAPS))
+ return false;
+
+ return test_bit(cpufeature, applied_alternatives);
+}
+
/*
* Check if the target PC is within an alternative block.
*/
@@ -145,7 +155,8 @@ static void clean_dcache_range_nopatch(u64 start, u64 end)
} while (cur += d_size, cur < end);
}
-static void __apply_alternatives(void *alt_region, bool is_module)
+static void __apply_alternatives(void *alt_region, bool is_module,
+ unsigned long *feature_mask)
{
struct alt_instr *alt;
struct alt_region *region = alt_region;
@@ -155,6 +166,9 @@ static void __apply_alternatives(void *alt_region, bool is_module)
for (alt = region->begin; alt < region->end; alt++) {
int nr_inst;
+ if (!test_bit(alt->cpufeature, feature_mask))
+ continue;
+
/* Use ARM64_CB_PATCH as an unconditional patch */
if (alt->cpufeature < ARM64_CB_PATCH &&
!cpus_have_cap(alt->cpufeature))
@@ -192,6 +206,12 @@ static void __apply_alternatives(void *alt_region, bool is_module)
dsb(ish);
__flush_icache_all();
isb();
+
+ /* Ignore ARM64_CB bit from feature mask */
+ bitmap_or(applied_alternatives, applied_alternatives,
+ feature_mask, ARM64_NCAPS);
+ bitmap_and(applied_alternatives, applied_alternatives,
+ cpu_hwcaps, ARM64_NCAPS);
}
}
@@ -208,14 +228,19 @@ static int __apply_alternatives_multi_stop(void *unused)
/* We always have a CPU 0 at this point (__init) */
if (smp_processor_id()) {
- while (!READ_ONCE(alternatives_applied))
+ while (!READ_ONCE(all_alternatives_applied))
cpu_relax();
isb();
} else {
- BUG_ON(alternatives_applied);
- __apply_alternatives(&region, false);
+ DECLARE_BITMAP(remaining_capabilities, ARM64_NPATCHABLE);
+
+ bitmap_complement(remaining_capabilities, boot_capabilities,
+ ARM64_NPATCHABLE);
+
+ BUG_ON(all_alternatives_applied);
+ __apply_alternatives(&region, false, remaining_capabilities);
/* Barriers provided by the cache flushing */
- WRITE_ONCE(alternatives_applied, 1);
+ WRITE_ONCE(all_alternatives_applied, 1);
}
return 0;
@@ -227,6 +252,24 @@ void __init apply_alternatives_all(void)
stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
}
+/*
+ * This is called very early in the boot process (directly after we run
+ * a feature detect on the boot CPU). No need to worry about other CPUs
+ * here.
+ */
+void __init apply_boot_alternatives(void)
+{
+ struct alt_region region = {
+ .begin = (struct alt_instr *)__alt_instructions,
+ .end = (struct alt_instr *)__alt_instructions_end,
+ };
+
+ /* If called on non-boot cpu things could go wrong */
+ WARN_ON(smp_processor_id() != 0);
+
+ __apply_alternatives(&region, false, &boot_capabilities[0]);
+}
+
#ifdef CONFIG_MODULES
void apply_alternatives_module(void *start, size_t length)
{
@@ -234,7 +277,10 @@ void apply_alternatives_module(void *start, size_t length)
.begin = start,
.end = start + length,
};
+ DECLARE_BITMAP(all_capabilities, ARM64_NPATCHABLE);
+
+ bitmap_fill(all_capabilities, ARM64_NPATCHABLE);
- __apply_alternatives(&region, true);
+ __apply_alternatives(&region, true, &all_capabilities[0]);
}
#endif
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 65b8afc84466..e10e2a5d9ddc 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -53,13 +53,9 @@ int main(void)
DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context));
BLANK();
DEFINE(S_X0, offsetof(struct pt_regs, regs[0]));
- DEFINE(S_X1, offsetof(struct pt_regs, regs[1]));
DEFINE(S_X2, offsetof(struct pt_regs, regs[2]));
- DEFINE(S_X3, offsetof(struct pt_regs, regs[3]));
DEFINE(S_X4, offsetof(struct pt_regs, regs[4]));
- DEFINE(S_X5, offsetof(struct pt_regs, regs[5]));
DEFINE(S_X6, offsetof(struct pt_regs, regs[6]));
- DEFINE(S_X7, offsetof(struct pt_regs, regs[7]));
DEFINE(S_X8, offsetof(struct pt_regs, regs[8]));
DEFINE(S_X10, offsetof(struct pt_regs, regs[10]));
DEFINE(S_X12, offsetof(struct pt_regs, regs[12]));
@@ -73,14 +69,11 @@ int main(void)
DEFINE(S_X28, offsetof(struct pt_regs, regs[28]));
DEFINE(S_LR, offsetof(struct pt_regs, regs[30]));
DEFINE(S_SP, offsetof(struct pt_regs, sp));
-#ifdef CONFIG_COMPAT
- DEFINE(S_COMPAT_SP, offsetof(struct pt_regs, compat_sp));
-#endif
DEFINE(S_PSTATE, offsetof(struct pt_regs, pstate));
DEFINE(S_PC, offsetof(struct pt_regs, pc));
- DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0));
DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno));
DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit));
+ DEFINE(S_PMR_SAVE, offsetof(struct pt_regs, pmr_save));
DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe));
DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
BLANK();
@@ -93,7 +86,6 @@ int main(void)
BLANK();
DEFINE(PAGE_SZ, PAGE_SIZE);
BLANK();
- DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
BLANK();
@@ -102,7 +94,7 @@ int main(void)
DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
DEFINE(CLOCK_MONOTONIC_RAW, CLOCK_MONOTONIC_RAW);
- DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
+ DEFINE(CLOCK_REALTIME_RES, offsetof(struct vdso_data, hrtimer_res));
DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
DEFINE(CLOCK_MONOTONIC_COARSE,CLOCK_MONOTONIC_COARSE);
DEFINE(CLOCK_COARSE_RES, LOW_RES_NSEC);
@@ -110,25 +102,18 @@ int main(void)
BLANK();
DEFINE(VDSO_CS_CYCLE_LAST, offsetof(struct vdso_data, cs_cycle_last));
DEFINE(VDSO_RAW_TIME_SEC, offsetof(struct vdso_data, raw_time_sec));
- DEFINE(VDSO_RAW_TIME_NSEC, offsetof(struct vdso_data, raw_time_nsec));
DEFINE(VDSO_XTIME_CLK_SEC, offsetof(struct vdso_data, xtime_clock_sec));
- DEFINE(VDSO_XTIME_CLK_NSEC, offsetof(struct vdso_data, xtime_clock_nsec));
DEFINE(VDSO_XTIME_CRS_SEC, offsetof(struct vdso_data, xtime_coarse_sec));
DEFINE(VDSO_XTIME_CRS_NSEC, offsetof(struct vdso_data, xtime_coarse_nsec));
DEFINE(VDSO_WTM_CLK_SEC, offsetof(struct vdso_data, wtm_clock_sec));
- DEFINE(VDSO_WTM_CLK_NSEC, offsetof(struct vdso_data, wtm_clock_nsec));
DEFINE(VDSO_TB_SEQ_COUNT, offsetof(struct vdso_data, tb_seq_count));
DEFINE(VDSO_CS_MONO_MULT, offsetof(struct vdso_data, cs_mono_mult));
- DEFINE(VDSO_CS_RAW_MULT, offsetof(struct vdso_data, cs_raw_mult));
DEFINE(VDSO_CS_SHIFT, offsetof(struct vdso_data, cs_shift));
DEFINE(VDSO_TZ_MINWEST, offsetof(struct vdso_data, tz_minuteswest));
- DEFINE(VDSO_TZ_DSTTIME, offsetof(struct vdso_data, tz_dsttime));
DEFINE(VDSO_USE_SYSCALL, offsetof(struct vdso_data, use_syscall));
BLANK();
DEFINE(TVAL_TV_SEC, offsetof(struct timeval, tv_sec));
- DEFINE(TVAL_TV_USEC, offsetof(struct timeval, tv_usec));
DEFINE(TSPEC_TV_SEC, offsetof(struct timespec, tv_sec));
- DEFINE(TSPEC_TV_NSEC, offsetof(struct timespec, tv_nsec));
BLANK();
DEFINE(TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
@@ -142,13 +127,9 @@ int main(void)
DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags));
DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
- DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs));
- DEFINE(VCPU_FPEXC32_EL2, offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[FPEXC32_EL2]));
- DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu));
#endif
#ifdef CONFIG_CPU_PM
- DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx));
DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp));
DEFINE(MPIDR_HASH_MASK, offsetof(struct mpidr_hash, mask));
DEFINE(MPIDR_HASH_SHIFTS, offsetof(struct mpidr_hash, shift_aff));
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 9950bb0cbd52..e88d4e7bdfc7 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -19,6 +19,7 @@
#include <linux/arm-smccc.h>
#include <linux/psci.h>
#include <linux/types.h>
+#include <linux/cpu.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/cpufeature.h>
@@ -109,7 +110,6 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
@@ -131,9 +131,9 @@ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
__flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
}
-static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
- const char *hyp_vecs_start,
- const char *hyp_vecs_end)
+static void install_bp_hardening_cb(bp_hardening_cb_t fn,
+ const char *hyp_vecs_start,
+ const char *hyp_vecs_end)
{
static DEFINE_RAW_SPINLOCK(bp_lock);
int cpu, slot = -1;
@@ -169,7 +169,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
#define __smccc_workaround_1_smc_start NULL
#define __smccc_workaround_1_smc_end NULL
-static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+static void install_bp_hardening_cb(bp_hardening_cb_t fn,
const char *hyp_vecs_start,
const char *hyp_vecs_end)
{
@@ -177,23 +177,6 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
}
#endif /* CONFIG_KVM_INDIRECT_VECTORS */
-static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
- bp_hardening_cb_t fn,
- const char *hyp_vecs_start,
- const char *hyp_vecs_end)
-{
- u64 pfr0;
-
- if (!entry->matches(entry, SCOPE_LOCAL_CPU))
- return;
-
- pfr0 = read_cpuid(ID_AA64PFR0_EL1);
- if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
- return;
-
- __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
-}
-
#include <uapi/linux/psci.h>
#include <linux/arm-smccc.h>
#include <linux/psci.h>
@@ -220,60 +203,83 @@ static void qcom_link_stack_sanitization(void)
: "=&r" (tmp));
}
-static void
-enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
+static bool __nospectre_v2;
+static int __init parse_nospectre_v2(char *str)
+{
+ __nospectre_v2 = true;
+ return 0;
+}
+early_param("nospectre_v2", parse_nospectre_v2);
+
+/*
+ * -1: No workaround
+ * 0: No workaround required
+ * 1: Workaround installed
+ */
+static int detect_harden_bp_fw(void)
{
bp_hardening_cb_t cb;
void *smccc_start, *smccc_end;
struct arm_smccc_res res;
u32 midr = read_cpuid_id();
- if (!entry->matches(entry, SCOPE_LOCAL_CPU))
- return;
-
if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
- return;
+ return -1;
switch (psci_ops.conduit) {
case PSCI_CONDUIT_HVC:
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
- if ((int)res.a0 < 0)
- return;
- cb = call_hvc_arch_workaround_1;
- /* This is a guest, no need to patch KVM vectors */
- smccc_start = NULL;
- smccc_end = NULL;
+ switch ((int)res.a0) {
+ case 1:
+ /* Firmware says we're just fine */
+ return 0;
+ case 0:
+ cb = call_hvc_arch_workaround_1;
+ /* This is a guest, no need to patch KVM vectors */
+ smccc_start = NULL;
+ smccc_end = NULL;
+ break;
+ default:
+ return -1;
+ }
break;
case PSCI_CONDUIT_SMC:
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
- if ((int)res.a0 < 0)
- return;
- cb = call_smc_arch_workaround_1;
- smccc_start = __smccc_workaround_1_smc_start;
- smccc_end = __smccc_workaround_1_smc_end;
+ switch ((int)res.a0) {
+ case 1:
+ /* Firmware says we're just fine */
+ return 0;
+ case 0:
+ cb = call_smc_arch_workaround_1;
+ smccc_start = __smccc_workaround_1_smc_start;
+ smccc_end = __smccc_workaround_1_smc_end;
+ break;
+ default:
+ return -1;
+ }
break;
default:
- return;
+ return -1;
}
if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
cb = qcom_link_stack_sanitization;
- install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
+ if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
+ install_bp_hardening_cb(cb, smccc_start, smccc_end);
- return;
+ return 1;
}
-#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
-#ifdef CONFIG_ARM64_SSBD
DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
+static bool __ssb_safe = true;
static const struct ssbd_options {
const char *str;
@@ -343,6 +349,11 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt,
void arm64_set_ssbd_mitigation(bool state)
{
+ if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
+ pr_info_once("SSBD disabled by kernel configuration\n");
+ return;
+ }
+
if (this_cpu_has_cap(ARM64_SSBS)) {
if (state)
asm volatile(SET_PSTATE_SSBS(0));
@@ -372,16 +383,28 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
struct arm_smccc_res res;
bool required = true;
s32 val;
+ bool this_cpu_safe = false;
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+ if (cpu_mitigations_off())
+ ssbd_state = ARM64_SSBD_FORCE_DISABLE;
+
+ /* delay setting __ssb_safe until we get a firmware response */
+ if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
+ this_cpu_safe = true;
+
if (this_cpu_has_cap(ARM64_SSBS)) {
+ if (!this_cpu_safe)
+ __ssb_safe = false;
required = false;
goto out_printmsg;
}
if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
ssbd_state = ARM64_SSBD_UNKNOWN;
+ if (!this_cpu_safe)
+ __ssb_safe = false;
return false;
}
@@ -398,6 +421,8 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
default:
ssbd_state = ARM64_SSBD_UNKNOWN;
+ if (!this_cpu_safe)
+ __ssb_safe = false;
return false;
}
@@ -406,14 +431,18 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
switch (val) {
case SMCCC_RET_NOT_SUPPORTED:
ssbd_state = ARM64_SSBD_UNKNOWN;
+ if (!this_cpu_safe)
+ __ssb_safe = false;
return false;
+ /* machines with mixed mitigation requirements must not return this */
case SMCCC_RET_NOT_REQUIRED:
pr_info_once("%s mitigation not required\n", entry->desc);
ssbd_state = ARM64_SSBD_MITIGATED;
return false;
case SMCCC_RET_SUCCESS:
+ __ssb_safe = false;
required = true;
break;
@@ -423,6 +452,8 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
default:
WARN_ON(1);
+ if (!this_cpu_safe)
+ __ssb_safe = false;
return false;
}
@@ -462,7 +493,14 @@ out_printmsg:
return required;
}
-#endif /* CONFIG_ARM64_SSBD */
+
+/* known invulnerable cores */
+static const struct midr_range arm64_ssb_cpus[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+ {},
+};
static void __maybe_unused
cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
@@ -507,26 +545,67 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
CAP_MIDR_RANGE_LIST(midr_list)
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+/* Track overall mitigation state. We are only mitigated if all cores are ok */
+static bool __hardenbp_enab = true;
+static bool __spectrev2_safe = true;
/*
- * List of CPUs where we need to issue a psci call to
- * harden the branch predictor.
+ * List of CPUs that do not need any Spectre-v2 mitigation at all.
*/
-static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
- MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
- MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
- MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
- MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
- MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER),
- {},
+static const struct midr_range spectre_v2_safe_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+ { /* sentinel */ }
};
-#endif
+/*
+ * Track overall bp hardening for all heterogeneous cores in the machine.
+ * We are only considered "safe" if all booted cores are known safe.
+ */
+static bool __maybe_unused
+check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ int need_wa;
+
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
+ /* If the CPU has CSV2 set, we're safe */
+ if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
+ ID_AA64PFR0_CSV2_SHIFT))
+ return false;
+
+ /* Alternatively, we have a list of unaffected CPUs */
+ if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
+ return false;
+
+ /* Fallback to firmware detection */
+ need_wa = detect_harden_bp_fw();
+ if (!need_wa)
+ return false;
+
+ __spectrev2_safe = false;
+
+ if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
+ pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
+ __hardenbp_enab = false;
+ return false;
+ }
+
+ /* forced off */
+ if (__nospectre_v2 || cpu_mitigations_off()) {
+ pr_info_once("spectrev2 mitigation disabled by command line option\n");
+ __hardenbp_enab = false;
+ return false;
+ }
+
+ if (need_wa < 0) {
+ pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
+ __hardenbp_enab = false;
+ }
+
+ return (need_wa > 0);
+}
#ifdef CONFIG_HARDEN_EL2_VECTORS
@@ -603,6 +682,16 @@ static const struct midr_range workaround_clean_cache[] = {
};
#endif
+#ifdef CONFIG_ARM64_ERRATUM_1188873
+static const struct midr_range erratum_1188873_list[] = {
+ /* Cortex-A76 r0p0 to r2p0 */
+ MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
+ /* Neoverse-N1 r0p0 to r2p0 */
+ MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 2, 0),
+ {},
+};
+#endif
+
const struct arm64_cpu_capabilities arm64_errata[] = {
#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
{
@@ -701,13 +790,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
},
#endif
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
- .cpu_enable = enable_smccc_arch_workaround_1,
- ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ .matches = check_branch_predictor,
},
-#endif
#ifdef CONFIG_HARDEN_EL2_VECTORS
{
.desc = "EL2 vector hardening",
@@ -715,20 +802,18 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
},
#endif
-#ifdef CONFIG_ARM64_SSBD
{
.desc = "Speculative Store Bypass Disable",
.capability = ARM64_SSBD,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.matches = has_ssbd_mitigation,
+ .midr_range_list = arm64_ssb_cpus,
},
-#endif
#ifdef CONFIG_ARM64_ERRATUM_1188873
{
- /* Cortex-A76 r0p0 to r2p0 */
.desc = "ARM erratum 1188873",
.capability = ARM64_WORKAROUND_1188873,
- ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
+ ERRATA_MIDR_RANGE_LIST(erratum_1188873_list),
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_1165522
@@ -742,3 +827,38 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{
}
};
+
+ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+}
+
+ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ if (__spectrev2_safe)
+ return sprintf(buf, "Not affected\n");
+
+ if (__hardenbp_enab)
+ return sprintf(buf, "Mitigation: Branch predictor hardening\n");
+
+ return sprintf(buf, "Vulnerable\n");
+}
+
+ssize_t cpu_show_spec_store_bypass(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if (__ssb_safe)
+ return sprintf(buf, "Not affected\n");
+
+ switch (ssbd_state) {
+ case ARM64_SSBD_KERNEL:
+ case ARM64_SSBD_FORCE_ENABLE:
+ if (IS_ENABLED(CONFIG_ARM64_SSBD))
+ return sprintf(buf,
+ "Mitigation: Speculative Store Bypass disabled via prctl\n");
+ }
+
+ return sprintf(buf, "Vulnerable\n");
+}
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
index ea001241bdd4..00f8b8612b69 100644
--- a/arch/arm64/kernel/cpu_ops.c
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -85,6 +85,7 @@ static const char *__init cpu_read_enable_method(int cpu)
pr_err("%pOF: missing enable-method property\n",
dn);
}
+ of_node_put(dn);
} else {
enable_method = acpi_get_enable_method(cpu);
if (!enable_method) {
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index f6d84e2c92fe..2b807f129e60 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -25,6 +25,7 @@
#include <linux/stop_machine.h>
#include <linux/types.h>
#include <linux/mm.h>
+#include <linux/cpu.h>
#include <asm/cpu.h>
#include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
@@ -35,8 +36,8 @@
#include <asm/traps.h>
#include <asm/virt.h>
-unsigned long elf_hwcap __read_mostly;
-EXPORT_SYMBOL_GPL(elf_hwcap);
+/* Kernel representation of AT_HWCAP and AT_HWCAP2 */
+static unsigned long elf_hwcap __read_mostly;
#ifdef CONFIG_COMPAT
#define COMPAT_ELF_HWCAP_DEFAULT \
@@ -54,6 +55,9 @@ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
EXPORT_SYMBOL(cpu_hwcaps);
static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM64_NCAPS];
+/* Need also bit for ARM64_CB_PATCH */
+DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
+
/*
* Flag to indicate if we have computed the system wide
* capabilities based on the boot time active CPUs. This
@@ -181,6 +185,15 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
ARM64_FTR_END,
};
+static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_AES_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SVEVER_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
@@ -389,7 +402,7 @@ static const struct __ftr_reg_entry {
/* Op1 = 0, CRn = 0, CRm = 4 */
ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1),
- ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_raz),
+ ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0),
/* Op1 = 0, CRn = 0, CRm = 5 */
ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
@@ -944,7 +957,7 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
return has_cpuid_feature(entry, scope);
}
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+static bool __meltdown_safe = true;
static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
@@ -960,9 +973,20 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+ MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
{ /* sentinel */ }
};
- char const *str = "command line option";
+ char const *str = "kpti command line option";
+ bool meltdown_safe;
+
+ meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
+
+ /* Defer to CPU feature registers */
+ if (has_cpuid_feature(entry, scope))
+ meltdown_safe = true;
+
+ if (!meltdown_safe)
+ __meltdown_safe = false;
/*
* For reasons that aren't entirely clear, enabling KPTI on Cavium
@@ -974,6 +998,24 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
__kpti_forced = -1;
}
+ /* Useful for KASLR robustness */
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) {
+ if (!__kpti_forced) {
+ str = "KASLR";
+ __kpti_forced = 1;
+ }
+ }
+
+ if (cpu_mitigations_off() && !__kpti_forced) {
+ str = "mitigations=off";
+ __kpti_forced = -1;
+ }
+
+ if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
+ pr_info_once("kernel page table isolation disabled by kernel configuration\n");
+ return false;
+ }
+
/* Forced? */
if (__kpti_forced) {
pr_info_once("kernel page table isolation forced %s by %s\n",
@@ -981,18 +1023,10 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
return __kpti_forced > 0;
}
- /* Useful for KASLR robustness */
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
- return kaslr_offset() > 0;
-
- /* Don't force KPTI for CPUs that are not vulnerable */
- if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
- return false;
-
- /* Defer to CPU feature registers */
- return !has_cpuid_feature(entry, scope);
+ return !meltdown_safe;
}
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
static void
kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
{
@@ -1022,6 +1056,12 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
return;
}
+#else
+static void
+kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
+{
+}
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
static int __init parse_kpti(char *str)
{
@@ -1035,7 +1075,6 @@ static int __init parse_kpti(char *str)
return 0;
}
early_param("kpti", parse_kpti);
-#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
#ifdef CONFIG_ARM64_HW_AFDBM
static inline void __cpu_enable_hw_dbm(void)
@@ -1118,7 +1157,7 @@ static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
* that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
* do anything here.
*/
- if (!alternatives_applied)
+ if (!alternative_is_applied(ARM64_HAS_VIRT_HOST_EXTN))
write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
}
#endif
@@ -1203,11 +1242,27 @@ static void cpu_enable_address_auth(struct arm64_cpu_capabilities const *cap)
}
#endif /* CONFIG_ARM64_PTR_AUTH */
+#ifdef CONFIG_ARM64_PSEUDO_NMI
+static bool enable_pseudo_nmi;
+
+static int __init early_enable_pseudo_nmi(char *p)
+{
+ return strtobool(p, &enable_pseudo_nmi);
+}
+early_param("irqchip.gicv3_pseudo_nmi", early_enable_pseudo_nmi);
+
+static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ return enable_pseudo_nmi && has_useable_gicv3_cpuif(entry, scope);
+}
+#endif
+
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
.capability = ARM64_HAS_SYSREG_GIC_CPUIF,
- .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
.matches = has_useable_gicv3_cpuif,
.sys_reg = SYS_ID_AA64PFR0_EL1,
.field_pos = ID_AA64PFR0_GIC_SHIFT,
@@ -1286,7 +1341,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.field_pos = ID_AA64PFR0_EL0_SHIFT,
.min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
},
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
{
.desc = "Kernel page table isolation (KPTI)",
.capability = ARM64_UNMAP_KERNEL_AT_EL0,
@@ -1302,7 +1356,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = unmap_kernel_at_el0,
.cpu_enable = kpti_install_ng_mappings,
},
-#endif
{
/* FP/SIMD is not implemented */
.capability = ARM64_HAS_NO_FPSIMD,
@@ -1320,6 +1373,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.field_pos = ID_AA64ISAR1_DPB_SHIFT,
.min_field_value = 1,
},
+ {
+ .desc = "Data cache clean to Point of Deep Persistence",
+ .capability = ARM64_HAS_DCPODP,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64ISAR1_DPB_SHIFT,
+ .min_field_value = 2,
+ },
#endif
#ifdef CONFIG_ARM64_SVE
{
@@ -1480,6 +1543,21 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_cpuid_feature,
},
#endif /* CONFIG_ARM64_PTR_AUTH */
+#ifdef CONFIG_ARM64_PSEUDO_NMI
+ {
+ /*
+ * Depends on having GICv3
+ */
+ .desc = "IRQ priority masking",
+ .capability = ARM64_HAS_IRQ_PRIO_MASKING,
+ .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
+ .matches = can_use_gic_priorities,
+ .sys_reg = SYS_ID_AA64PFR0_EL1,
+ .field_pos = ID_AA64PFR0_GIC_SHIFT,
+ .sign = FTR_UNSIGNED,
+ .min_field_value = 1,
+ },
+#endif
{},
};
@@ -1536,39 +1614,46 @@ static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = {
#endif
static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_PMULL),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_SHA512),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDRDM),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA3),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM3),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDFHM),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FLAGM),
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP),
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_DIT),
- HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_DCPOP),
- HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_JSCVT),
- HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA),
- HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC),
- HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC),
- HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SB),
- HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_PMULL),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AES),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA1),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA2),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_SHA512),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_CRC32),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ATOMICS),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA3),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM3),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM4),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDDP),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DCPOP),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_DCPODP),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_JSCVT),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FCMA),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_LRCPC),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ILRCPC),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SB),
+ HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT),
#ifdef CONFIG_ARM64_SVE
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SVEVER_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SVEVER_SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES, CAP_HWCAP, KERNEL_HWCAP_SVEAES),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES_PMULL, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_BITPERM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_BITPERM, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SHA3_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SHA3, CAP_HWCAP, KERNEL_HWCAP_SVESHA3),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SM4_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SM4, CAP_HWCAP, KERNEL_HWCAP_SVESM4),
#endif
- HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, HWCAP_SSBS),
+ HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS),
#ifdef CONFIG_ARM64_PTR_AUTH
- HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, HWCAP_PACA),
- HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, HWCAP_PACG),
+ HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA),
+ HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG),
#endif
{},
};
@@ -1588,7 +1673,7 @@ static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
{
switch (cap->hwcap_type) {
case CAP_HWCAP:
- elf_hwcap |= cap->hwcap;
+ cpu_set_feature(cap->hwcap);
break;
#ifdef CONFIG_COMPAT
case CAP_COMPAT_HWCAP:
@@ -1611,7 +1696,7 @@ static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
switch (cap->hwcap_type) {
case CAP_HWCAP:
- rc = (elf_hwcap & cap->hwcap) != 0;
+ rc = cpu_have_feature(cap->hwcap);
break;
#ifdef CONFIG_COMPAT
case CAP_COMPAT_HWCAP:
@@ -1632,7 +1717,7 @@ static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
{
/* We support emulation of accesses to CPU ID feature registers */
- elf_hwcap |= HWCAP_CPUID;
+ cpu_set_named_feature(CPUID);
for (; hwcaps->matches; hwcaps++)
if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps)))
cap_set_elf_hwcap(hwcaps);
@@ -1654,6 +1739,9 @@ static void update_cpu_capabilities(u16 scope_mask)
if (caps->desc)
pr_info("detected: %s\n", caps->desc);
cpus_set_cap(caps->capability);
+
+ if ((scope_mask & SCOPE_BOOT_CPU) && (caps->type & SCOPE_BOOT_CPU))
+ set_bit(caps->capability, boot_capabilities);
}
}
@@ -1909,6 +1997,35 @@ bool this_cpu_has_cap(unsigned int n)
return false;
}
+void cpu_set_feature(unsigned int num)
+{
+ WARN_ON(num >= MAX_CPU_FEATURES);
+ elf_hwcap |= BIT(num);
+}
+EXPORT_SYMBOL_GPL(cpu_set_feature);
+
+bool cpu_have_feature(unsigned int num)
+{
+ WARN_ON(num >= MAX_CPU_FEATURES);
+ return elf_hwcap & BIT(num);
+}
+EXPORT_SYMBOL_GPL(cpu_have_feature);
+
+unsigned long cpu_get_elf_hwcap(void)
+{
+ /*
+ * We currently only populate the first 32 bits of AT_HWCAP. Please
+ * note that for userspace compatibility we guarantee that bits 62
+ * and 63 will always be returned as 0.
+ */
+ return lower_32_bits(elf_hwcap);
+}
+
+unsigned long cpu_get_elf_hwcap2(void)
+{
+ return upper_32_bits(elf_hwcap);
+}
+
static void __init setup_system_capabilities(void)
{
/*
@@ -2063,3 +2180,15 @@ static int __init enable_mrs_emulation(void)
}
core_initcall(enable_mrs_emulation);
+
+ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ if (__meltdown_safe)
+ return sprintf(buf, "Not affected\n");
+
+ if (arm64_kernel_unmapped_at_el0())
+ return sprintf(buf, "Mitigation: PTI\n");
+
+ return sprintf(buf, "Vulnerable\n");
+}
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index ca0685f33900..f6f7936be6e7 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -85,6 +85,13 @@ static const char *const hwcap_str[] = {
"sb",
"paca",
"pacg",
+ "dcpodp",
+ "sve2",
+ "sveaes",
+ "svepmull",
+ "svebitperm",
+ "svesha3",
+ "svesm4",
NULL
};
@@ -167,7 +174,7 @@ static int c_show(struct seq_file *m, void *v)
#endif /* CONFIG_COMPAT */
} else {
for (j = 0; hwcap_str[j]; j++)
- if (elf_hwcap & (1 << j))
+ if (cpu_have_feature(j))
seq_printf(m, " %s", hwcap_str[j]);
}
seq_puts(m, "\n");
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index d7bb6aefae0a..555b6bd2f3d6 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -135,6 +135,7 @@ NOKPROBE_SYMBOL(disable_debug_monitors);
*/
static int clear_os_lock(unsigned int cpu)
{
+ write_sysreg(0, osdlr_el1);
write_sysreg(0, oslar_el1);
isb();
return 0;
@@ -163,25 +164,46 @@ static void clear_regs_spsr_ss(struct pt_regs *regs)
}
NOKPROBE_SYMBOL(clear_regs_spsr_ss);
-/* EL1 Single Step Handler hooks */
-static LIST_HEAD(step_hook);
-static DEFINE_SPINLOCK(step_hook_lock);
+static DEFINE_SPINLOCK(debug_hook_lock);
+static LIST_HEAD(user_step_hook);
+static LIST_HEAD(kernel_step_hook);
-void register_step_hook(struct step_hook *hook)
+static void register_debug_hook(struct list_head *node, struct list_head *list)
{
- spin_lock(&step_hook_lock);
- list_add_rcu(&hook->node, &step_hook);
- spin_unlock(&step_hook_lock);
+ spin_lock(&debug_hook_lock);
+ list_add_rcu(node, list);
+ spin_unlock(&debug_hook_lock);
+
}
-void unregister_step_hook(struct step_hook *hook)
+static void unregister_debug_hook(struct list_head *node)
{
- spin_lock(&step_hook_lock);
- list_del_rcu(&hook->node);
- spin_unlock(&step_hook_lock);
+ spin_lock(&debug_hook_lock);
+ list_del_rcu(node);
+ spin_unlock(&debug_hook_lock);
synchronize_rcu();
}
+void register_user_step_hook(struct step_hook *hook)
+{
+ register_debug_hook(&hook->node, &user_step_hook);
+}
+
+void unregister_user_step_hook(struct step_hook *hook)
+{
+ unregister_debug_hook(&hook->node);
+}
+
+void register_kernel_step_hook(struct step_hook *hook)
+{
+ register_debug_hook(&hook->node, &kernel_step_hook);
+}
+
+void unregister_kernel_step_hook(struct step_hook *hook)
+{
+ unregister_debug_hook(&hook->node);
+}
+
/*
* Call registered single step handlers
* There is no Syndrome info to check for determining the handler.
@@ -191,11 +213,14 @@ void unregister_step_hook(struct step_hook *hook)
static int call_step_hook(struct pt_regs *regs, unsigned int esr)
{
struct step_hook *hook;
+ struct list_head *list;
int retval = DBG_HOOK_ERROR;
+ list = user_mode(regs) ? &user_step_hook : &kernel_step_hook;
+
rcu_read_lock();
- list_for_each_entry_rcu(hook, &step_hook, node) {
+ list_for_each_entry_rcu(hook, list, node) {
retval = hook->fn(regs, esr);
if (retval == DBG_HOOK_HANDLED)
break;
@@ -222,7 +247,7 @@ static void send_user_sigtrap(int si_code)
"User debug trap");
}
-static int single_step_handler(unsigned long addr, unsigned int esr,
+static int single_step_handler(unsigned long unused, unsigned int esr,
struct pt_regs *regs)
{
bool handler_found = false;
@@ -234,10 +259,6 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
if (!reinstall_suspended_bps(regs))
return 0;
-#ifdef CONFIG_KPROBES
- if (kprobe_single_step_handler(regs, esr) == DBG_HOOK_HANDLED)
- handler_found = true;
-#endif
if (!handler_found && call_step_hook(regs, esr) == DBG_HOOK_HANDLED)
handler_found = true;
@@ -264,61 +285,59 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
}
NOKPROBE_SYMBOL(single_step_handler);
-/*
- * Breakpoint handler is re-entrant as another breakpoint can
- * hit within breakpoint handler, especically in kprobes.
- * Use reader/writer locks instead of plain spinlock.
- */
-static LIST_HEAD(break_hook);
-static DEFINE_SPINLOCK(break_hook_lock);
+static LIST_HEAD(user_break_hook);
+static LIST_HEAD(kernel_break_hook);
-void register_break_hook(struct break_hook *hook)
+void register_user_break_hook(struct break_hook *hook)
{
- spin_lock(&break_hook_lock);
- list_add_rcu(&hook->node, &break_hook);
- spin_unlock(&break_hook_lock);
+ register_debug_hook(&hook->node, &user_break_hook);
}
-void unregister_break_hook(struct break_hook *hook)
+void unregister_user_break_hook(struct break_hook *hook)
{
- spin_lock(&break_hook_lock);
- list_del_rcu(&hook->node);
- spin_unlock(&break_hook_lock);
- synchronize_rcu();
+ unregister_debug_hook(&hook->node);
+}
+
+void register_kernel_break_hook(struct break_hook *hook)
+{
+ register_debug_hook(&hook->node, &kernel_break_hook);
+}
+
+void unregister_kernel_break_hook(struct break_hook *hook)
+{
+ unregister_debug_hook(&hook->node);
}
static int call_break_hook(struct pt_regs *regs, unsigned int esr)
{
struct break_hook *hook;
+ struct list_head *list;
int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
+ list = user_mode(regs) ? &user_break_hook : &kernel_break_hook;
+
rcu_read_lock();
- list_for_each_entry_rcu(hook, &break_hook, node)
- if ((esr & hook->esr_mask) == hook->esr_val)
+ list_for_each_entry_rcu(hook, list, node) {
+ unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
+
+ if ((comment & ~hook->mask) == hook->imm)
fn = hook->fn;
+ }
rcu_read_unlock();
return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
}
NOKPROBE_SYMBOL(call_break_hook);
-static int brk_handler(unsigned long addr, unsigned int esr,
+static int brk_handler(unsigned long unused, unsigned int esr,
struct pt_regs *regs)
{
- bool handler_found = false;
-
-#ifdef CONFIG_KPROBES
- if ((esr & BRK64_ESR_MASK) == BRK64_ESR_KPROBES) {
- if (kprobe_breakpoint_handler(regs, esr) == DBG_HOOK_HANDLED)
- handler_found = true;
- }
-#endif
- if (!handler_found && call_break_hook(regs, esr) == DBG_HOOK_HANDLED)
- handler_found = true;
+ if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED)
+ return 0;
- if (!handler_found && user_mode(regs)) {
+ if (user_mode(regs)) {
send_user_sigtrap(TRAP_BRKPT);
- } else if (!handler_found) {
+ } else {
pr_warn("Unexpected kernel BRK exception at EL1\n");
return -EFAULT;
}
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 0ec0c46b2c0c..1a7811b7e3c4 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -185,7 +185,7 @@ alternative_cb_end
.else
add x21, sp, #S_FRAME_SIZE
- get_thread_info tsk
+ get_current_task tsk
/* Save the task's original addr_limit and set USER_DS */
ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
str x20, [sp, #S_ORIG_ADDR_LIMIT]
@@ -249,6 +249,12 @@ alternative_else_nop_endif
msr sp_el0, tsk
.endif
+ /* Save pmr */
+alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+ mrs_s x20, SYS_ICC_PMR_EL1
+ str x20, [sp, #S_PMR_SAVE]
+alternative_else_nop_endif
+
/*
* Registers that may be useful after this macro is invoked:
*
@@ -269,6 +275,14 @@ alternative_else_nop_endif
/* No need to restore UAO, it will be restored from SPSR_EL1 */
.endif
+ /* Restore pmr */
+alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+ ldr x20, [sp, #S_PMR_SAVE]
+ msr_s SYS_ICC_PMR_EL1, x20
+ /* Ensure priority change is seen by redistributor */
+ dsb sy
+alternative_else_nop_endif
+
ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
.if \el == 0
ct_user_enter
@@ -322,6 +336,21 @@ alternative_if ARM64_WORKAROUND_845719
alternative_else_nop_endif
#endif
3:
+#ifdef CONFIG_ARM64_ERRATUM_1188873
+alternative_if_not ARM64_WORKAROUND_1188873
+ b 4f
+alternative_else_nop_endif
+ /*
+ * if (x22.mode32 == cntkctl_el1.el0vcten)
+ * cntkctl_el1.el0vcten = ~cntkctl_el1.el0vcten
+ */
+ mrs x1, cntkctl_el1
+ eon x0, x1, x22, lsr #3
+ tbz x0, #1, 4f
+ eor x1, x1, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN
+ msr cntkctl_el1, x1
+4:
+#endif
apply_ssbd 0, x0, x1
.endif
@@ -348,11 +377,11 @@ alternative_else_nop_endif
.if \el == 0
alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
- bne 4f
+ bne 5f
msr far_el1, x30
tramp_alias x30, tramp_exit_native
br x30
-4:
+5:
tramp_alias x30, tramp_exit_compat
br x30
#endif
@@ -603,32 +632,52 @@ el1_irq:
kernel_entry 1
enable_da_f
#ifdef CONFIG_TRACE_IRQFLAGS
+#ifdef CONFIG_ARM64_PSEUDO_NMI
+alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+ ldr x20, [sp, #S_PMR_SAVE]
+alternative_else
+ mov x20, #GIC_PRIO_IRQON
+alternative_endif
+ cmp x20, #GIC_PRIO_IRQOFF
+ /* Irqs were disabled, don't trace */
+ b.ls 1f
+#endif
bl trace_hardirqs_off
+1:
#endif
irq_handler
#ifdef CONFIG_PREEMPT
ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count
- cbnz x24, 1f // preempt count != 0
- bl el1_preempt
+alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+ /*
+ * DA_F were cleared at start of handling. If anything is set in DAIF,
+ * we come back from an NMI, so skip preemption
+ */
+ mrs x0, daif
+ orr x24, x24, x0
+alternative_else_nop_endif
+ cbnz x24, 1f // preempt count != 0 || NMI return path
+ bl preempt_schedule_irq // irq en/disable is done inside
1:
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
+#ifdef CONFIG_ARM64_PSEUDO_NMI
+ /*
+ * if IRQs were disabled when we received the interrupt, we have an NMI
+ * and we are not re-enabling interrupt upon eret. Skip tracing.
+ */
+ cmp x20, #GIC_PRIO_IRQOFF
+ b.ls 1f
+#endif
bl trace_hardirqs_on
+1:
#endif
+
kernel_exit 1
ENDPROC(el1_irq)
-#ifdef CONFIG_PREEMPT
-el1_preempt:
- mov x24, lr
-1: bl preempt_schedule_irq // irq en/disable is done inside
- ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS
- tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
- ret x24
-#endif
-
/*
* EL0 mode handlers.
*/
@@ -1070,7 +1119,7 @@ ENTRY(ret_from_fork)
cbz x19, 1f // not a kernel thread
mov x0, x20
blr x19
-1: get_thread_info tsk
+1: get_current_task tsk
b ret_to_user
ENDPROC(ret_from_fork)
NOKPROBE(ret_from_fork)
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 5ebe73b69961..735cf1f8b109 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -1258,14 +1258,14 @@ static inline void fpsimd_hotplug_init(void) { }
*/
static int __init fpsimd_init(void)
{
- if (elf_hwcap & HWCAP_FP) {
+ if (cpu_have_named_feature(FP)) {
fpsimd_pm_init();
fpsimd_hotplug_init();
} else {
pr_notice("Floating-point is not implemented\n");
}
- if (!(elf_hwcap & HWCAP_ASIMD))
+ if (!cpu_have_named_feature(ASIMD))
pr_notice("Advanced SIMD is not implemented\n");
return sve_sysctl_init();
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 8e4431a8821f..65a51331088e 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -103,12 +103,16 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
* to be revisited if support for multiple ftrace entry points
* is added in the future, but for now, the pr_err() below
* deals with a theoretical issue only.
+ *
+ * Note that PLTs are place relative, and plt_entries_equal()
+ * checks whether they point to the same target. Here, we need
+ * to check if the actual opcodes are in fact identical,
+ * regardless of the offset in memory so use memcmp() instead.
*/
trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline);
- if (!plt_entries_equal(mod->arch.ftrace_trampoline,
- &trampoline)) {
- if (!plt_entries_equal(mod->arch.ftrace_trampoline,
- &(struct plt_entry){})) {
+ if (memcmp(mod->arch.ftrace_trampoline, &trampoline,
+ sizeof(trampoline))) {
+ if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) {
pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
return -EINVAL;
}
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index eecf7927dab0..fcae3f85c6cd 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -505,7 +505,7 @@ ENTRY(el2_setup)
* kernel is intended to run at EL2.
*/
mrs x2, id_aa64mmfr1_el1
- ubfx x2, x2, #8, #4
+ ubfx x2, x2, #ID_AA64MMFR1_VHE_SHIFT, #4
#else
mov x2, xzr
#endif
@@ -538,7 +538,7 @@ set_hcr:
#ifdef CONFIG_ARM_GIC_V3
/* GICv3 system register access */
mrs x0, id_aa64pfr0_el1
- ubfx x0, x0, #24, #4
+ ubfx x0, x0, #ID_AA64PFR0_GIC_SHIFT, #4
cbz x0, 3f
mrs_s x0, SYS_ICC_SRE_EL2
@@ -564,8 +564,8 @@ set_hcr:
#endif
/* EL2 debug */
- mrs x1, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
- sbfx x0, x1, #8, #4
+ mrs x1, id_aa64dfr0_el1
+ sbfx x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4
cmp x0, #1
b.lt 4f // Skip if no PMU present
mrs x0, pmcr_el0 // Disable debug access traps
@@ -574,7 +574,7 @@ set_hcr:
csel x3, xzr, x0, lt // all PMU counters from EL1
/* Statistical profiling */
- ubfx x0, x1, #32, #4 // Check ID_AA64DFR0_EL1 PMSVer
+ ubfx x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
cbz x0, 7f // Skip if SPE not present
cbnz x2, 6f // VHE?
mrs_s x4, SYS_PMBIDR_EL1 // If SPE available at EL2,
@@ -684,7 +684,7 @@ ENTRY(__boot_cpu_mode)
* with MMU turned off.
*/
ENTRY(__early_cpu_boot_status)
- .long 0
+ .quad 0
.popsection
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 780a12f59a8f..92fa81798fb9 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -33,6 +33,9 @@
unsigned long irq_err_count;
+/* Only access this in an NMI enter/exit */
+DEFINE_PER_CPU(struct nmi_ctx, nmi_contexts);
+
DEFINE_PER_CPU(unsigned long *, irq_stack_ptr);
int arch_show_interrupts(struct seq_file *p, int prec)
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
index ce46c4cdf368..30853d5b7859 100644
--- a/arch/arm64/kernel/kgdb.c
+++ b/arch/arm64/kernel/kgdb.c
@@ -245,7 +245,7 @@ int kgdb_arch_handle_exception(int exception_vector, int signo,
static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr)
{
kgdb_handle_exception(1, SIGTRAP, 0, regs);
- return 0;
+ return DBG_HOOK_HANDLED;
}
NOKPROBE_SYMBOL(kgdb_brk_fn)
@@ -254,7 +254,7 @@ static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr)
compiled_break = 1;
kgdb_handle_exception(1, SIGTRAP, 0, regs);
- return 0;
+ return DBG_HOOK_HANDLED;
}
NOKPROBE_SYMBOL(kgdb_compiled_brk_fn);
@@ -264,20 +264,18 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
return DBG_HOOK_ERROR;
kgdb_handle_exception(1, SIGTRAP, 0, regs);
- return 0;
+ return DBG_HOOK_HANDLED;
}
NOKPROBE_SYMBOL(kgdb_step_brk_fn);
static struct break_hook kgdb_brkpt_hook = {
- .esr_mask = 0xffffffff,
- .esr_val = (u32)ESR_ELx_VAL_BRK64(KGDB_DYN_DBG_BRK_IMM),
- .fn = kgdb_brk_fn
+ .fn = kgdb_brk_fn,
+ .imm = KGDB_DYN_DBG_BRK_IMM,
};
static struct break_hook kgdb_compiled_brkpt_hook = {
- .esr_mask = 0xffffffff,
- .esr_val = (u32)ESR_ELx_VAL_BRK64(KGDB_COMPILED_DBG_BRK_IMM),
- .fn = kgdb_compiled_brk_fn
+ .fn = kgdb_compiled_brk_fn,
+ .imm = KGDB_COMPILED_DBG_BRK_IMM,
};
static struct step_hook kgdb_step_hook = {
@@ -326,9 +324,9 @@ int kgdb_arch_init(void)
if (ret != 0)
return ret;
- register_break_hook(&kgdb_brkpt_hook);
- register_break_hook(&kgdb_compiled_brkpt_hook);
- register_step_hook(&kgdb_step_hook);
+ register_kernel_break_hook(&kgdb_brkpt_hook);
+ register_kernel_break_hook(&kgdb_compiled_brkpt_hook);
+ register_kernel_step_hook(&kgdb_step_hook);
return 0;
}
@@ -339,9 +337,9 @@ int kgdb_arch_init(void)
*/
void kgdb_arch_exit(void)
{
- unregister_break_hook(&kgdb_brkpt_hook);
- unregister_break_hook(&kgdb_compiled_brkpt_hook);
- unregister_step_hook(&kgdb_step_hook);
+ unregister_kernel_break_hook(&kgdb_brkpt_hook);
+ unregister_kernel_break_hook(&kgdb_compiled_brkpt_hook);
+ unregister_kernel_step_hook(&kgdb_step_hook);
unregister_die_notifier(&kgdb_notifier);
}
diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S
index 997e6b27ff6a..49825e9e421e 100644
--- a/arch/arm64/kernel/kuser32.S
+++ b/arch/arm64/kernel/kuser32.S
@@ -1,29 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Low-level user helpers placed in the vectors page for AArch32.
+ * AArch32 user helpers.
* Based on the kuser helpers in arch/arm/kernel/entry-armv.S.
*
* Copyright (C) 2005-2011 Nicolas Pitre <nico@fluxnic.net>
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2012-2018 ARM Ltd.
*
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- *
- * AArch32 user helpers.
- *
- * Each segment is 32-byte aligned and will be moved to the top of the high
- * vector page. New segments (if ever needed) must be added in front of
- * existing ones. This mechanism should be used only for things that are
- * really small and justified, and not be abused freely.
+ * The kuser helpers below are mapped at a fixed address by
+ * aarch32_setup_additional_pages() and are provided for compatibility
+ * reasons with 32 bit (aarch32) applications that need them.
*
* See Documentation/arm/kernel_user_helpers.txt for formal definitions.
*/
@@ -77,42 +62,3 @@ __kuser_helper_version: // 0xffff0ffc
.word ((__kuser_helper_end - __kuser_helper_start) >> 5)
.globl __kuser_helper_end
__kuser_helper_end:
-
-/*
- * AArch32 sigreturn code
- *
- * For ARM syscalls, the syscall number has to be loaded into r7.
- * We do not support an OABI userspace.
- *
- * For Thumb syscalls, we also pass the syscall number via r7. We therefore
- * need two 16-bit instructions.
- */
- .globl __aarch32_sigret_code_start
-__aarch32_sigret_code_start:
-
- /*
- * ARM Code
- */
- .byte __NR_compat_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_sigreturn
- .byte __NR_compat_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_sigreturn
-
- /*
- * Thumb code
- */
- .byte __NR_compat_sigreturn, 0x27 // svc #__NR_compat_sigreturn
- .byte __NR_compat_sigreturn, 0xdf // mov r7, #__NR_compat_sigreturn
-
- /*
- * ARM code
- */
- .byte __NR_compat_rt_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_rt_sigreturn
- .byte __NR_compat_rt_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_rt_sigreturn
-
- /*
- * Thumb code
- */
- .byte __NR_compat_rt_sigreturn, 0x27 // svc #__NR_compat_rt_sigreturn
- .byte __NR_compat_rt_sigreturn, 0xdf // mov r7, #__NR_compat_rt_sigreturn
-
- .globl __aarch32_sigret_code_end
-__aarch32_sigret_code_end:
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 1620a371b1f5..6164d389eed6 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -431,7 +431,7 @@ static inline u64 armv8pmu_read_hw_counter(struct perf_event *event)
return val;
}
-static inline u64 armv8pmu_read_counter(struct perf_event *event)
+static u64 armv8pmu_read_counter(struct perf_event *event)
{
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
@@ -468,7 +468,7 @@ static inline void armv8pmu_write_hw_counter(struct perf_event *event,
}
}
-static inline void armv8pmu_write_counter(struct perf_event *event, u64 value)
+static void armv8pmu_write_counter(struct perf_event *event, u64 value)
{
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
@@ -810,7 +810,7 @@ static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc,
}
/*
- * Add an event filter to a given event. This will only work for PMUv2 PMUs.
+ * Add an event filter to a given event.
*/
static int armv8pmu_set_event_filter(struct hw_perf_event *event,
struct perf_event_attr *attr)
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index f17afb99890c..2509fcb6d404 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -91,8 +91,6 @@ static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
unsigned long probe_addr = (unsigned long)p->addr;
- extern char __start_rodata[];
- extern char __end_rodata[];
if (probe_addr & 0x3)
return -EINVAL;
@@ -100,10 +98,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
/* copy instruction */
p->opcode = le32_to_cpu(*p->addr);
- if (in_exception_text(probe_addr))
- return -EINVAL;
- if (probe_addr >= (unsigned long) __start_rodata &&
- probe_addr <= (unsigned long) __end_rodata)
+ if (search_exception_tables(probe_addr))
return -EINVAL;
/* decode instruction */
@@ -444,7 +439,7 @@ kprobe_ss_hit(struct kprobe_ctlblk *kcb, unsigned long addr)
return DBG_HOOK_ERROR;
}
-int __kprobes
+static int __kprobes
kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
@@ -463,33 +458,53 @@ kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
return retval;
}
-int __kprobes
+static struct step_hook kprobes_step_hook = {
+ .fn = kprobe_single_step_handler,
+};
+
+static int __kprobes
kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
{
kprobe_handler(regs);
return DBG_HOOK_HANDLED;
}
-bool arch_within_kprobe_blacklist(unsigned long addr)
-{
- if ((addr >= (unsigned long)__kprobes_text_start &&
- addr < (unsigned long)__kprobes_text_end) ||
- (addr >= (unsigned long)__entry_text_start &&
- addr < (unsigned long)__entry_text_end) ||
- (addr >= (unsigned long)__idmap_text_start &&
- addr < (unsigned long)__idmap_text_end) ||
- (addr >= (unsigned long)__hyp_text_start &&
- addr < (unsigned long)__hyp_text_end) ||
- !!search_exception_tables(addr))
- return true;
-
- if (!is_kernel_in_hyp_mode()) {
- if ((addr >= (unsigned long)__hyp_idmap_text_start &&
- addr < (unsigned long)__hyp_idmap_text_end))
- return true;
- }
+static struct break_hook kprobes_break_hook = {
+ .imm = KPROBES_BRK_IMM,
+ .fn = kprobe_breakpoint_handler,
+};
- return false;
+/*
+ * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
+ * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
+ */
+int __init arch_populate_kprobe_blacklist(void)
+{
+ int ret;
+
+ ret = kprobe_add_area_blacklist((unsigned long)__entry_text_start,
+ (unsigned long)__entry_text_end);
+ if (ret)
+ return ret;
+ ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
+ (unsigned long)__irqentry_text_end);
+ if (ret)
+ return ret;
+ ret = kprobe_add_area_blacklist((unsigned long)__exception_text_start,
+ (unsigned long)__exception_text_end);
+ if (ret)
+ return ret;
+ ret = kprobe_add_area_blacklist((unsigned long)__idmap_text_start,
+ (unsigned long)__idmap_text_end);
+ if (ret)
+ return ret;
+ ret = kprobe_add_area_blacklist((unsigned long)__hyp_text_start,
+ (unsigned long)__hyp_text_end);
+ if (ret || is_kernel_in_hyp_mode())
+ return ret;
+ ret = kprobe_add_area_blacklist((unsigned long)__hyp_idmap_text_start,
+ (unsigned long)__hyp_idmap_text_end);
+ return ret;
}
void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
@@ -587,5 +602,8 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
int __init arch_init_kprobes(void)
{
+ register_kernel_break_hook(&kprobes_break_hook);
+ register_kernel_step_hook(&kprobes_step_hook);
+
return 0;
}
diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c
index 636ca0119c0e..605945eac1f8 100644
--- a/arch/arm64/kernel/probes/uprobes.c
+++ b/arch/arm64/kernel/probes/uprobes.c
@@ -171,7 +171,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self,
static int uprobe_breakpoint_handler(struct pt_regs *regs,
unsigned int esr)
{
- if (user_mode(regs) && uprobe_pre_sstep_notifier(regs))
+ if (uprobe_pre_sstep_notifier(regs))
return DBG_HOOK_HANDLED;
return DBG_HOOK_ERROR;
@@ -182,21 +182,16 @@ static int uprobe_single_step_handler(struct pt_regs *regs,
{
struct uprobe_task *utask = current->utask;
- if (user_mode(regs)) {
- WARN_ON(utask &&
- (instruction_pointer(regs) != utask->xol_vaddr + 4));
-
- if (uprobe_post_sstep_notifier(regs))
- return DBG_HOOK_HANDLED;
- }
+ WARN_ON(utask && (instruction_pointer(regs) != utask->xol_vaddr + 4));
+ if (uprobe_post_sstep_notifier(regs))
+ return DBG_HOOK_HANDLED;
return DBG_HOOK_ERROR;
}
/* uprobe breakpoint handler hook */
static struct break_hook uprobes_break_hook = {
- .esr_mask = BRK64_ESR_MASK,
- .esr_val = BRK64_ESR_UPROBES,
+ .imm = UPROBES_BRK_IMM,
.fn = uprobe_breakpoint_handler,
};
@@ -207,8 +202,8 @@ static struct step_hook uprobes_step_hook = {
static int __init arch_init_uprobes(void)
{
- register_break_hook(&uprobes_break_hook);
- register_step_hook(&uprobes_step_hook);
+ register_user_break_hook(&uprobes_break_hook);
+ register_user_step_hook(&uprobes_step_hook);
return 0;
}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index a0f985a6ac50..3767fb21a5b8 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -51,6 +51,7 @@
#include <linux/thread_info.h>
#include <asm/alternative.h>
+#include <asm/arch_gicv3.h>
#include <asm/compat.h>
#include <asm/cacheflush.h>
#include <asm/exec.h>
@@ -74,6 +75,50 @@ EXPORT_SYMBOL_GPL(pm_power_off);
void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
+static void __cpu_do_idle(void)
+{
+ dsb(sy);
+ wfi();
+}
+
+static void __cpu_do_idle_irqprio(void)
+{
+ unsigned long pmr;
+ unsigned long daif_bits;
+
+ daif_bits = read_sysreg(daif);
+ write_sysreg(daif_bits | PSR_I_BIT, daif);
+
+ /*
+ * Unmask PMR before going idle to make sure interrupts can
+ * be raised.
+ */
+ pmr = gic_read_pmr();
+ gic_write_pmr(GIC_PRIO_IRQON);
+
+ __cpu_do_idle();
+
+ gic_write_pmr(pmr);
+ write_sysreg(daif_bits, daif);
+}
+
+/*
+ * cpu_do_idle()
+ *
+ * Idle the processor (wait for interrupt).
+ *
+ * If the CPU supports priority masking we must do additional work to
+ * ensure that interrupts are not masked at the PMR (because the core will
+ * not wake up if we block the wake up signal in the interrupt controller).
+ */
+void cpu_do_idle(void)
+{
+ if (system_uses_irq_prio_masking())
+ __cpu_do_idle_irqprio();
+ else
+ __cpu_do_idle();
+}
+
/*
* This is our default idle handler.
*/
@@ -232,6 +277,9 @@ void __show_regs(struct pt_regs *regs)
printk("sp : %016llx\n", sp);
+ if (system_uses_irq_prio_masking())
+ printk("pmr_save: %08llx\n", regs->pmr_save);
+
i = top_reg;
while (i >= 0) {
@@ -363,6 +411,9 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
childregs->pstate |= PSR_SSBS_BIT;
+ if (system_uses_irq_prio_masking())
+ childregs->pmr_save = GIC_PRIO_IRQON;
+
p->thread.cpu_context.x19 = stack_start;
p->thread.cpu_context.x20 = stk_sz;
}
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index ddaea0fd2fa4..b82e0a9b3da3 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -979,6 +979,131 @@ static int pac_mask_get(struct task_struct *target,
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &uregs, 0, -1);
}
+
+#ifdef CONFIG_CHECKPOINT_RESTORE
+static __uint128_t pac_key_to_user(const struct ptrauth_key *key)
+{
+ return (__uint128_t)key->hi << 64 | key->lo;
+}
+
+static struct ptrauth_key pac_key_from_user(__uint128_t ukey)
+{
+ struct ptrauth_key key = {
+ .lo = (unsigned long)ukey,
+ .hi = (unsigned long)(ukey >> 64),
+ };
+
+ return key;
+}
+
+static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys,
+ const struct ptrauth_keys *keys)
+{
+ ukeys->apiakey = pac_key_to_user(&keys->apia);
+ ukeys->apibkey = pac_key_to_user(&keys->apib);
+ ukeys->apdakey = pac_key_to_user(&keys->apda);
+ ukeys->apdbkey = pac_key_to_user(&keys->apdb);
+}
+
+static void pac_address_keys_from_user(struct ptrauth_keys *keys,
+ const struct user_pac_address_keys *ukeys)
+{
+ keys->apia = pac_key_from_user(ukeys->apiakey);
+ keys->apib = pac_key_from_user(ukeys->apibkey);
+ keys->apda = pac_key_from_user(ukeys->apdakey);
+ keys->apdb = pac_key_from_user(ukeys->apdbkey);
+}
+
+static int pac_address_keys_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ struct ptrauth_keys *keys = &target->thread.keys_user;
+ struct user_pac_address_keys user_keys;
+
+ if (!system_supports_address_auth())
+ return -EINVAL;
+
+ pac_address_keys_to_user(&user_keys, keys);
+
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &user_keys, 0, -1);
+}
+
+static int pac_address_keys_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct ptrauth_keys *keys = &target->thread.keys_user;
+ struct user_pac_address_keys user_keys;
+ int ret;
+
+ if (!system_supports_address_auth())
+ return -EINVAL;
+
+ pac_address_keys_to_user(&user_keys, keys);
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &user_keys, 0, -1);
+ if (ret)
+ return ret;
+ pac_address_keys_from_user(keys, &user_keys);
+
+ return 0;
+}
+
+static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys,
+ const struct ptrauth_keys *keys)
+{
+ ukeys->apgakey = pac_key_to_user(&keys->apga);
+}
+
+static void pac_generic_keys_from_user(struct ptrauth_keys *keys,
+ const struct user_pac_generic_keys *ukeys)
+{
+ keys->apga = pac_key_from_user(ukeys->apgakey);
+}
+
+static int pac_generic_keys_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ struct ptrauth_keys *keys = &target->thread.keys_user;
+ struct user_pac_generic_keys user_keys;
+
+ if (!system_supports_generic_auth())
+ return -EINVAL;
+
+ pac_generic_keys_to_user(&user_keys, keys);
+
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &user_keys, 0, -1);
+}
+
+static int pac_generic_keys_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct ptrauth_keys *keys = &target->thread.keys_user;
+ struct user_pac_generic_keys user_keys;
+ int ret;
+
+ if (!system_supports_generic_auth())
+ return -EINVAL;
+
+ pac_generic_keys_to_user(&user_keys, keys);
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &user_keys, 0, -1);
+ if (ret)
+ return ret;
+ pac_generic_keys_from_user(keys, &user_keys);
+
+ return 0;
+}
+#endif /* CONFIG_CHECKPOINT_RESTORE */
#endif /* CONFIG_ARM64_PTR_AUTH */
enum aarch64_regset {
@@ -995,6 +1120,10 @@ enum aarch64_regset {
#endif
#ifdef CONFIG_ARM64_PTR_AUTH
REGSET_PAC_MASK,
+#ifdef CONFIG_CHECKPOINT_RESTORE
+ REGSET_PACA_KEYS,
+ REGSET_PACG_KEYS,
+#endif
#endif
};
@@ -1074,6 +1203,24 @@ static const struct user_regset aarch64_regsets[] = {
.get = pac_mask_get,
/* this cannot be set dynamically */
},
+#ifdef CONFIG_CHECKPOINT_RESTORE
+ [REGSET_PACA_KEYS] = {
+ .core_note_type = NT_ARM_PACA_KEYS,
+ .n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t),
+ .size = sizeof(__uint128_t),
+ .align = sizeof(__uint128_t),
+ .get = pac_address_keys_get,
+ .set = pac_address_keys_set,
+ },
+ [REGSET_PACG_KEYS] = {
+ .core_note_type = NT_ARM_PACG_KEYS,
+ .n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t),
+ .size = sizeof(__uint128_t),
+ .align = sizeof(__uint128_t),
+ .get = pac_generic_keys_get,
+ .set = pac_generic_keys_set,
+ },
+#endif
#endif
};
diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c
index 5ba4465e44f0..ea94cf8f9dc6 100644
--- a/arch/arm64/kernel/sdei.c
+++ b/arch/arm64/kernel/sdei.c
@@ -94,6 +94,9 @@ static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info)
unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
unsigned long high = low + SDEI_STACK_SIZE;
+ if (!low)
+ return false;
+
if (sp < low || sp >= high)
return false;
@@ -111,6 +114,9 @@ static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info)
unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
unsigned long high = low + SDEI_STACK_SIZE;
+ if (!low)
+ return false;
+
if (sp < low || sp >= high)
return false;
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 009849328289..413d566405d1 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -58,7 +58,6 @@
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/traps.h>
-#include <asm/memblock.h>
#include <asm/efi.h>
#include <asm/xen/hypervisor.h>
#include <asm/mmu_context.h>
@@ -209,6 +208,7 @@ static void __init request_standard_resources(void)
struct memblock_region *region;
struct resource *res;
unsigned long i = 0;
+ size_t res_size;
kernel_code.start = __pa_symbol(_text);
kernel_code.end = __pa_symbol(__init_begin - 1);
@@ -216,9 +216,10 @@ static void __init request_standard_resources(void)
kernel_data.end = __pa_symbol(_end - 1);
num_standard_resources = memblock.memory.cnt;
- standard_resources = memblock_alloc_low(num_standard_resources *
- sizeof(*standard_resources),
- SMP_CACHE_BYTES);
+ res_size = num_standard_resources * sizeof(*standard_resources);
+ standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES);
+ if (!standard_resources)
+ panic("%s: Failed to allocate %zu bytes\n", __func__, res_size);
for_each_memblock(memory, region) {
res = &standard_resources[i++];
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index cb7800acd19f..caea6e25db2a 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -403,8 +403,7 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
if (ka->sa.sa_flags & SA_SIGINFO)
idx += 3;
- retcode = AARCH32_VECTORS_BASE +
- AARCH32_KERN_SIGRET_CODE_OFFSET +
+ retcode = (unsigned long)current->mm->context.vdso +
(idx << 2) + thumb;
}
diff --git a/arch/arm64/kernel/sigreturn32.S b/arch/arm64/kernel/sigreturn32.S
new file mode 100644
index 000000000000..475d30d471ac
--- /dev/null
+++ b/arch/arm64/kernel/sigreturn32.S
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AArch32 sigreturn code.
+ * Based on the kuser helpers in arch/arm/kernel/entry-armv.S.
+ *
+ * Copyright (C) 2005-2011 Nicolas Pitre <nico@fluxnic.net>
+ * Copyright (C) 2012-2018 ARM Ltd.
+ *
+ * For ARM syscalls, the syscall number has to be loaded into r7.
+ * We do not support an OABI userspace.
+ *
+ * For Thumb syscalls, we also pass the syscall number via r7. We therefore
+ * need two 16-bit instructions.
+ */
+
+#include <asm/unistd.h>
+
+ .globl __aarch32_sigret_code_start
+__aarch32_sigret_code_start:
+
+ /*
+ * ARM Code
+ */
+ .byte __NR_compat_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_sigreturn
+ .byte __NR_compat_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_sigreturn
+
+ /*
+ * Thumb code
+ */
+ .byte __NR_compat_sigreturn, 0x27 // svc #__NR_compat_sigreturn
+ .byte __NR_compat_sigreturn, 0xdf // mov r7, #__NR_compat_sigreturn
+
+ /*
+ * ARM code
+ */
+ .byte __NR_compat_rt_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_rt_sigreturn
+ .byte __NR_compat_rt_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_rt_sigreturn
+
+ /*
+ * Thumb code
+ */
+ .byte __NR_compat_rt_sigreturn, 0x27 // svc #__NR_compat_rt_sigreturn
+ .byte __NR_compat_rt_sigreturn, 0xdf // mov r7, #__NR_compat_rt_sigreturn
+
+ .globl __aarch32_sigret_code_end
+__aarch32_sigret_code_end:
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 1598d6f7200a..824de7038967 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -35,6 +35,7 @@
#include <linux/smp.h>
#include <linux/seq_file.h>
#include <linux/irq.h>
+#include <linux/irqchip/arm-gic-v3.h>
#include <linux/percpu.h>
#include <linux/clockchips.h>
#include <linux/completion.h>
@@ -180,6 +181,24 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
return ret;
}
+static void init_gic_priority_masking(void)
+{
+ u32 cpuflags;
+
+ if (WARN_ON(!gic_enable_sre()))
+ return;
+
+ cpuflags = read_sysreg(daif);
+
+ WARN_ON(!(cpuflags & PSR_I_BIT));
+
+ gic_write_pmr(GIC_PRIO_IRQOFF);
+
+ /* We can only unmask PSR.I if we can take aborts */
+ if (!(cpuflags & PSR_A_BIT))
+ write_sysreg(cpuflags & ~PSR_I_BIT, daif);
+}
+
/*
* This is the secondary CPU boot entry. We're using this CPUs
* idle thread stack, but a set of temporary page tables.
@@ -206,6 +225,9 @@ asmlinkage notrace void secondary_start_kernel(void)
*/
cpu_uninstall_idmap();
+ if (system_uses_irq_prio_masking())
+ init_gic_priority_masking();
+
preempt_disable();
trace_hardirqs_off();
@@ -419,6 +441,17 @@ void __init smp_prepare_boot_cpu(void)
*/
jump_label_init();
cpuinfo_store_boot_cpu();
+
+ /*
+ * We now know enough about the boot CPU to apply the
+ * alternatives that cannot wait until interrupt handling
+ * and/or scheduling is enabled.
+ */
+ apply_boot_alternatives();
+
+ /* Conditionally switch to GIC PMR for interrupt masking */
+ if (system_uses_irq_prio_masking())
+ init_gic_priority_masking();
}
static u64 __init of_get_cpu_mpidr(struct device_node *dn)
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 1a29f2695ff2..b00ec7d483d1 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -140,9 +140,8 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
#endif
walk_stackframe(current, &frame, save_trace, &data);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
}
+EXPORT_SYMBOL_GPL(save_stack_trace_regs);
static noinline void __save_stack_trace(struct task_struct *tsk,
struct stack_trace *trace, unsigned int nosched)
@@ -171,8 +170,6 @@ static noinline void __save_stack_trace(struct task_struct *tsk,
#endif
walk_stackframe(tsk, &frame, save_trace, &data);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
put_task_stack(tsk);
}
diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c
index b44065fb1616..6f91e8116514 100644
--- a/arch/arm64/kernel/sys.c
+++ b/arch/arm64/kernel/sys.c
@@ -31,7 +31,7 @@
SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags,
- unsigned long, fd, off_t, off)
+ unsigned long, fd, unsigned long, off)
{
if (offset_in_page(off) != 0)
return -EINVAL;
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 4e2fb877f8d5..ade32046f3fe 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -102,10 +102,16 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
struct stackframe frame;
- int skip;
+ int skip = 0;
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
+ if (regs) {
+ if (user_mode(regs))
+ return;
+ skip = 1;
+ }
+
if (!tsk)
tsk = current;
@@ -126,7 +132,6 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
frame.graph = 0;
#endif
- skip = !!regs;
printk("Call trace:\n");
do {
/* skip until specified stack frame */
@@ -176,15 +181,13 @@ static int __die(const char *str, int err, struct pt_regs *regs)
return ret;
print_modules();
- __show_regs(regs);
pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
end_of_stack(tsk));
+ show_regs(regs);
- if (!user_mode(regs)) {
- dump_backtrace(regs, tsk);
+ if (!user_mode(regs))
dump_instr(KERN_EMERG, regs);
- }
return ret;
}
@@ -459,6 +462,9 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
case ESR_ELx_SYS64_ISS_CRM_DC_CVAC: /* DC CVAC, gets promoted */
__user_cache_maint("dc civac", address, ret);
break;
+ case ESR_ELx_SYS64_ISS_CRM_DC_CVADP: /* DC CVADP */
+ __user_cache_maint("sys 3, c7, c13, 1", address, ret);
+ break;
case ESR_ELx_SYS64_ISS_CRM_DC_CVAP: /* DC CVAP */
__user_cache_maint("sys 3, c7, c12, 1", address, ret);
break;
@@ -493,7 +499,7 @@ static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
{
int rt = ESR_ELx_SYS64_ISS_RT(esr);
- pt_regs_write_reg(regs, rt, arch_counter_get_cntvct());
+ pt_regs_write_reg(regs, rt, arch_timer_read_counter());
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
@@ -665,7 +671,7 @@ static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
{
int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT;
int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT;
- u64 val = arch_counter_get_cntvct();
+ u64 val = arch_timer_read_counter();
pt_regs_write_reg(regs, rt, lower_32_bits(val));
pt_regs_write_reg(regs, rt2, upper_32_bits(val));
@@ -898,13 +904,17 @@ bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr)
asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr)
{
- nmi_enter();
+ const bool was_in_nmi = in_nmi();
+
+ if (!was_in_nmi)
+ nmi_enter();
/* non-RAS errors are not containable */
if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
arm64_serror_panic(regs, esr);
- nmi_exit();
+ if (!was_in_nmi)
+ nmi_exit();
}
void __pte_error(const char *file, int line, unsigned long val)
@@ -943,9 +953,6 @@ int is_valid_bugaddr(unsigned long addr)
static int bug_handler(struct pt_regs *regs, unsigned int esr)
{
- if (user_mode(regs))
- return DBG_HOOK_ERROR;
-
switch (report_bug(regs->pc, regs)) {
case BUG_TRAP_TYPE_BUG:
die("Oops - BUG", regs, 0);
@@ -965,9 +972,8 @@ static int bug_handler(struct pt_regs *regs, unsigned int esr)
}
static struct break_hook bug_break_hook = {
- .esr_val = 0xf2000000 | BUG_BRK_IMM,
- .esr_mask = 0xffffffff,
.fn = bug_handler,
+ .imm = BUG_BRK_IMM,
};
#ifdef CONFIG_KASAN_SW_TAGS
@@ -985,9 +991,6 @@ static int kasan_handler(struct pt_regs *regs, unsigned int esr)
u64 addr = regs->regs[0];
u64 pc = regs->pc;
- if (user_mode(regs))
- return DBG_HOOK_ERROR;
-
kasan_report(addr, size, write, pc);
/*
@@ -1012,13 +1015,10 @@ static int kasan_handler(struct pt_regs *regs, unsigned int esr)
return DBG_HOOK_HANDLED;
}
-#define KASAN_ESR_VAL (0xf2000000 | KASAN_BRK_IMM)
-#define KASAN_ESR_MASK 0xffffff00
-
static struct break_hook kasan_break_hook = {
- .esr_val = KASAN_ESR_VAL,
- .esr_mask = KASAN_ESR_MASK,
- .fn = kasan_handler,
+ .fn = kasan_handler,
+ .imm = KASAN_BRK_IMM,
+ .mask = KASAN_BRK_MASK,
};
#endif
@@ -1030,7 +1030,9 @@ int __init early_brk64(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
#ifdef CONFIG_KASAN_SW_TAGS
- if ((esr & KASAN_ESR_MASK) == KASAN_ESR_VAL)
+ unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
+
+ if ((comment & ~KASAN_BRK_MASK) == KASAN_BRK_IMM)
return kasan_handler(regs, esr) != DBG_HOOK_HANDLED;
#endif
return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
@@ -1039,8 +1041,8 @@ int __init early_brk64(unsigned long addr, unsigned int esr,
/* This registration must happen early, before debug_traps_init(). */
void __init trap_init(void)
{
- register_break_hook(&bug_break_hook);
+ register_kernel_break_hook(&bug_break_hook);
#ifdef CONFIG_KASAN_SW_TAGS
- register_break_hook(&kasan_break_hook);
+ register_kernel_break_hook(&kasan_break_hook);
#endif
}
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 2d419006ad43..8074cbd3a3a8 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -1,5 +1,5 @@
/*
- * VDSO implementation for AArch64 and vector page setup for AArch32.
+ * VDSO implementations.
*
* Copyright (C) 2012 ARM Limited
*
@@ -53,61 +53,129 @@ struct vdso_data *vdso_data = &vdso_data_store.data;
/*
* Create and map the vectors page for AArch32 tasks.
*/
-static struct page *vectors_page[1] __ro_after_init;
+#define C_VECTORS 0
+#define C_SIGPAGE 1
+#define C_PAGES (C_SIGPAGE + 1)
+static struct page *aarch32_vdso_pages[C_PAGES] __ro_after_init;
+static const struct vm_special_mapping aarch32_vdso_spec[C_PAGES] = {
+ {
+ .name = "[vectors]", /* ABI */
+ .pages = &aarch32_vdso_pages[C_VECTORS],
+ },
+ {
+ .name = "[sigpage]", /* ABI */
+ .pages = &aarch32_vdso_pages[C_SIGPAGE],
+ },
+};
-static int __init alloc_vectors_page(void)
+static int aarch32_alloc_kuser_vdso_page(void)
{
extern char __kuser_helper_start[], __kuser_helper_end[];
- extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
-
int kuser_sz = __kuser_helper_end - __kuser_helper_start;
- int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
- unsigned long vpage;
+ unsigned long vdso_page;
- vpage = get_zeroed_page(GFP_ATOMIC);
+ if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
+ return 0;
- if (!vpage)
+ vdso_page = get_zeroed_page(GFP_ATOMIC);
+ if (!vdso_page)
return -ENOMEM;
- /* kuser helpers */
- memcpy((void *)vpage + 0x1000 - kuser_sz, __kuser_helper_start,
- kuser_sz);
+ memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
+ kuser_sz);
+ aarch32_vdso_pages[C_VECTORS] = virt_to_page(vdso_page);
+ flush_dcache_page(aarch32_vdso_pages[C_VECTORS]);
+ return 0;
+}
- /* sigreturn code */
- memcpy((void *)vpage + AARCH32_KERN_SIGRET_CODE_OFFSET,
- __aarch32_sigret_code_start, sigret_sz);
+static int __init aarch32_alloc_vdso_pages(void)
+{
+ extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
+ int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
+ unsigned long sigpage;
+ int ret;
- flush_icache_range(vpage, vpage + PAGE_SIZE);
- vectors_page[0] = virt_to_page(vpage);
+ sigpage = get_zeroed_page(GFP_ATOMIC);
+ if (!sigpage)
+ return -ENOMEM;
- return 0;
+ memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
+ aarch32_vdso_pages[C_SIGPAGE] = virt_to_page(sigpage);
+ flush_dcache_page(aarch32_vdso_pages[C_SIGPAGE]);
+
+ ret = aarch32_alloc_kuser_vdso_page();
+ if (ret)
+ free_page(sigpage);
+
+ return ret;
}
-arch_initcall(alloc_vectors_page);
+arch_initcall(aarch32_alloc_vdso_pages);
-int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
+static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
{
- struct mm_struct *mm = current->mm;
- unsigned long addr = AARCH32_VECTORS_BASE;
- static const struct vm_special_mapping spec = {
- .name = "[vectors]",
- .pages = vectors_page,
+ void *ret;
+
+ if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
+ return 0;
+
+ /*
+ * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's
+ * not safe to CoW the page containing the CPU exception vectors.
+ */
+ ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
+ VM_READ | VM_EXEC |
+ VM_MAYREAD | VM_MAYEXEC,
+ &aarch32_vdso_spec[C_VECTORS]);
- };
+ return PTR_ERR_OR_ZERO(ret);
+}
+
+static int aarch32_sigreturn_setup(struct mm_struct *mm)
+{
+ unsigned long addr;
void *ret;
- if (down_write_killable(&mm->mmap_sem))
- return -EINTR;
- current->mm->context.vdso = (void *)addr;
+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
+ if (IS_ERR_VALUE(addr)) {
+ ret = ERR_PTR(addr);
+ goto out;
+ }
- /* Map vectors page at the high address. */
+ /*
+ * VM_MAYWRITE is required to allow gdb to Copy-on-Write and
+ * set breakpoints.
+ */
ret = _install_special_mapping(mm, addr, PAGE_SIZE,
- VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC,
- &spec);
+ VM_READ | VM_EXEC | VM_MAYREAD |
+ VM_MAYWRITE | VM_MAYEXEC,
+ &aarch32_vdso_spec[C_SIGPAGE]);
+ if (IS_ERR(ret))
+ goto out;
- up_write(&mm->mmap_sem);
+ mm->context.vdso = (void *)addr;
+out:
return PTR_ERR_OR_ZERO(ret);
}
+
+int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+ struct mm_struct *mm = current->mm;
+ int ret;
+
+ if (down_write_killable(&mm->mmap_sem))
+ return -EINTR;
+
+ ret = aarch32_kuser_helpers_setup(mm);
+ if (ret)
+ goto out;
+
+ ret = aarch32_sigreturn_setup(mm);
+
+out:
+ up_write(&mm->mmap_sem);
+ return ret;
+}
#endif /* CONFIG_COMPAT */
static int vdso_mremap(const struct vm_special_mapping *sm,
@@ -146,8 +214,6 @@ static int __init vdso_init(void)
}
vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
- pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n",
- vdso_pages + 1, vdso_pages, vdso_start, 1L, vdso_data);
/* Allocate the vDSO pagelist, plus a page for the data. */
vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *),
@@ -232,6 +298,9 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
+ /* Read without the seqlock held by clock_getres() */
+ WRITE_ONCE(vdso_data->hrtimer_res, hrtimer_resolution);
+
if (!use_syscall) {
/* tkr_mono.cycle_last == tkr_raw.cycle_last */
vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index b215c712d897..744b9dbaba03 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -12,17 +12,12 @@ obj-vdso := gettimeofday.o note.o sigreturn.o
targets := $(obj-vdso) vdso.so vdso.so.dbg
obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
-ccflags-y := -shared -fno-common -fno-builtin
-ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \
- $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 \
+ $(call ld-option, --hash-style=sysv) -n -T
# Disable gcov profiling for VDSO code
GCOV_PROFILE := n
-# Workaround for bare-metal (ELF) toolchains that neglect to pass -shared
-# down to collect2, resulting in silent corruption of the vDSO image.
-ccflags-y += -Wl,-shared
-
obj-y += vdso.o
extra-y += vdso.lds
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
@@ -31,8 +26,8 @@ CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
$(obj)/vdso.o : $(obj)/vdso.so
# Link rule for the .so file, .lds has to be first
-$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso)
- $(call if_changed,vdsold)
+$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
+ $(call if_changed,ld)
# Strip rule for the .so file
$(obj)/%.so: OBJCOPYFLAGS := -S
@@ -42,9 +37,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
# Generate VDSO offsets using helper script
gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
quiet_cmd_vdsosym = VDSOSYM $@
-define cmd_vdsosym
- $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
-endef
+ cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
$(call if_changed,vdsosym)
@@ -54,8 +47,6 @@ $(obj-vdso): %.o: %.S FORCE
$(call if_changed_dep,vdsoas)
# Actual build commands
-quiet_cmd_vdsold = VDSOL $@
- cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@
quiet_cmd_vdsoas = VDSOA $@
cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index c39872a7b03c..856fee6d3512 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -73,6 +73,13 @@ x_tmp .req x8
movn x_tmp, #0xff00, lsl #48
and \res, x_tmp, \res
mul \res, \res, \mult
+ /*
+ * Fake address dependency from the value computed from the counter
+ * register to subsequent data page accesses so that the sequence
+ * locking also orders the read of the counter.
+ */
+ and x_tmp, \res, xzr
+ add vdso_data, vdso_data, x_tmp
.endm
/*
@@ -147,12 +154,12 @@ ENTRY(__kernel_gettimeofday)
/* w11 = cs_mono_mult, w12 = cs_shift */
ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
- seqcnt_check fail=1b
get_nsec_per_sec res=x9
lsl x9, x9, x12
get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
+ seqcnt_check fail=1b
get_ts_realtime res_sec=x10, res_nsec=x11, \
clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
@@ -211,13 +218,13 @@ realtime:
/* w11 = cs_mono_mult, w12 = cs_shift */
ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
- seqcnt_check fail=realtime
/* All computations are done with left-shifted nsecs. */
get_nsec_per_sec res=x9
lsl x9, x9, x12
get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
+ seqcnt_check fail=realtime
get_ts_realtime res_sec=x10, res_nsec=x11, \
clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
clock_gettime_return, shift=1
@@ -231,7 +238,6 @@ monotonic:
ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
ldp x3, x4, [vdso_data, #VDSO_WTM_CLK_SEC]
- seqcnt_check fail=monotonic
/* All computations are done with left-shifted nsecs. */
lsl x4, x4, x12
@@ -239,6 +245,7 @@ monotonic:
lsl x9, x9, x12
get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
+ seqcnt_check fail=monotonic
get_ts_realtime res_sec=x10, res_nsec=x11, \
clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
@@ -253,13 +260,13 @@ monotonic_raw:
/* w11 = cs_raw_mult, w12 = cs_shift */
ldp w12, w11, [vdso_data, #VDSO_CS_SHIFT]
ldp x13, x14, [vdso_data, #VDSO_RAW_TIME_SEC]
- seqcnt_check fail=monotonic_raw
/* All computations are done with left-shifted nsecs. */
get_nsec_per_sec res=x9
lsl x9, x9, x12
get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
+ seqcnt_check fail=monotonic_raw
get_ts_clock_raw res_sec=x10, res_nsec=x11, \
clock_nsec=x15, nsec_to_sec=x9
@@ -301,13 +308,14 @@ ENTRY(__kernel_clock_getres)
ccmp w0, #CLOCK_MONOTONIC_RAW, #0x4, ne
b.ne 1f
- ldr x2, 5f
+ adr vdso_data, _vdso_data
+ ldr w2, [vdso_data, #CLOCK_REALTIME_RES]
b 2f
1:
cmp w0, #CLOCK_REALTIME_COARSE
ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
b.ne 4f
- ldr x2, 6f
+ ldr x2, 5f
2:
cbz x1, 3f
stp xzr, x2, [x1]
@@ -321,8 +329,6 @@ ENTRY(__kernel_clock_getres)
svc #0
ret
5:
- .quad CLOCK_REALTIME_RES
-6:
.quad CLOCK_COARSE_RES
.cfi_endproc
ENDPROC(__kernel_clock_getres)
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 0f2a135ba15b..690e033a91c0 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -3,9 +3,7 @@
# Makefile for Kernel-based Virtual Machine module
#
-ccflags-y += -Iarch/arm64/kvm -Ivirt/kvm/arm/vgic
-CFLAGS_arm.o := -I.
-CFLAGS_mmu.o := -I.
+ccflags-y += -I $(srctree)/$(src) -I $(srctree)/virt/kvm/arm/vgic
KVM=../../../virt/kvm
diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
index f39801e4136c..fd917d6d12af 100644
--- a/arch/arm64/kvm/debug.c
+++ b/arch/arm64/kvm/debug.c
@@ -76,7 +76,7 @@ static void restore_guest_debug_regs(struct kvm_vcpu *vcpu)
void kvm_arm_init_debug(void)
{
- __this_cpu_write(mdcr_el2, kvm_call_hyp(__kvm_get_mdcr_el2));
+ __this_cpu_write(mdcr_el2, kvm_call_hyp_ret(__kvm_get_mdcr_el2));
}
/**
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 952f6cb9cf72..2845aa680841 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -40,9 +40,6 @@
* arch/arm64/kernel/hyp_stub.S.
*/
ENTRY(__kvm_call_hyp)
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
hvc #0
ret
-alternative_else_nop_endif
- b __vhe_hyp_call
ENDPROC(__kvm_call_hyp)
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 73c1b483ec39..2b1e686772bf 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -43,18 +43,6 @@
ldr lr, [sp], #16
.endm
-ENTRY(__vhe_hyp_call)
- do_el2_call
- /*
- * We used to rely on having an exception return to get
- * an implicit isb. In the E2H case, we don't have it anymore.
- * rather than changing all the leaf functions, just do it here
- * before returning to the rest of the kernel.
- */
- isb
- ret
-ENDPROC(__vhe_hyp_call)
-
el1_sync: // Guest trapped into EL2
mrs x0, esr_el2
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 421ebf6f7086..3563fe655cd5 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -22,6 +22,7 @@
#include <kvm/arm_psci.h>
+#include <asm/arch_gicv3.h>
#include <asm/cpufeature.h>
#include <asm/kprobes.h>
#include <asm/kvm_asm.h>
@@ -525,6 +526,17 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
struct kvm_cpu_context *guest_ctxt;
u64 exit_code;
+ /*
+ * Having IRQs masked via PMR when entering the guest means the GIC
+ * will not signal the CPU of interrupts of lower priority, and the
+ * only way to get out will be via guest exceptions.
+ * Naturally, we want to avoid this.
+ */
+ if (system_uses_irq_prio_masking()) {
+ gic_write_pmr(GIC_PRIO_IRQON);
+ dsb(sy);
+ }
+
vcpu = kern_hyp_va(vcpu);
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
@@ -577,6 +589,10 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
*/
__debug_switch_to_host(vcpu);
+ /* Returning to host will clear PSR.I, remask PMR if needed */
+ if (system_uses_irq_prio_masking())
+ gic_write_pmr(GIC_PRIO_IRQOFF);
+
return exit_code;
}
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index b426e2cf973c..c52a8451637c 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -53,7 +53,6 @@ static void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
static void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
{
- ctxt->sys_regs[MPIDR_EL1] = read_sysreg(vmpidr_el2);
ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1);
ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(sctlr);
ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1);
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index f16a5f8ff2b4..e2a0500cd7a2 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -123,6 +123,9 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
int ret = -EINVAL;
bool loaded;
+ /* Reset PMU outside of the non-preemptible section */
+ kvm_pmu_vcpu_reset(vcpu);
+
preempt_disable();
loaded = (vcpu->cpu != -1);
if (loaded)
@@ -170,9 +173,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
vcpu->arch.reset_state.reset = false;
}
- /* Reset PMU */
- kvm_pmu_vcpu_reset(vcpu);
-
/* Default workaround setup is enabled (if supported) */
if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL)
vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index c936aa40c3f4..539feecda5b8 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -982,6 +982,10 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return true;
}
+#define reg_to_encoding(x) \
+ sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
+ (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2);
+
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
{ SYS_DESC(SYS_DBGBVRn_EL1(n)), \
@@ -1003,44 +1007,38 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
{ SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \
access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
-static bool access_cntp_tval(struct kvm_vcpu *vcpu,
- struct sys_reg_params *p,
- const struct sys_reg_desc *r)
+static bool access_arch_timer(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
{
- u64 now = kvm_phys_timer_read();
- u64 cval;
+ enum kvm_arch_timers tmr;
+ enum kvm_arch_timer_regs treg;
+ u64 reg = reg_to_encoding(r);
- if (p->is_write) {
- kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL,
- p->regval + now);
- } else {
- cval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
- p->regval = cval - now;
+ switch (reg) {
+ case SYS_CNTP_TVAL_EL0:
+ case SYS_AARCH32_CNTP_TVAL:
+ tmr = TIMER_PTIMER;
+ treg = TIMER_REG_TVAL;
+ break;
+ case SYS_CNTP_CTL_EL0:
+ case SYS_AARCH32_CNTP_CTL:
+ tmr = TIMER_PTIMER;
+ treg = TIMER_REG_CTL;
+ break;
+ case SYS_CNTP_CVAL_EL0:
+ case SYS_AARCH32_CNTP_CVAL:
+ tmr = TIMER_PTIMER;
+ treg = TIMER_REG_CVAL;
+ break;
+ default:
+ BUG();
}
- return true;
-}
-
-static bool access_cntp_ctl(struct kvm_vcpu *vcpu,
- struct sys_reg_params *p,
- const struct sys_reg_desc *r)
-{
- if (p->is_write)
- kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CTL, p->regval);
- else
- p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CTL);
-
- return true;
-}
-
-static bool access_cntp_cval(struct kvm_vcpu *vcpu,
- struct sys_reg_params *p,
- const struct sys_reg_desc *r)
-{
if (p->is_write)
- kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, p->regval);
+ kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
else
- p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
+ p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
return true;
}
@@ -1160,6 +1158,64 @@ static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
return __set_id_reg(rd, uaddr, true);
}
+static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write)
+ return write_to_read_only(vcpu, p, r);
+
+ p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
+ return true;
+}
+
+static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write)
+ return write_to_read_only(vcpu, p, r);
+
+ p->regval = read_sysreg(clidr_el1);
+ return true;
+}
+
+static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write)
+ vcpu_write_sys_reg(vcpu, p->regval, r->reg);
+ else
+ p->regval = vcpu_read_sys_reg(vcpu, r->reg);
+ return true;
+}
+
+static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ u32 csselr;
+
+ if (p->is_write)
+ return write_to_read_only(vcpu, p, r);
+
+ csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
+ p->regval = get_ccsidr(csselr);
+
+ /*
+ * Guests should not be doing cache operations by set/way at all, and
+ * for this reason, we trap them and attempt to infer the intent, so
+ * that we can flush the entire guest's address space at the appropriate
+ * time.
+ * To prevent this trapping from causing performance problems, let's
+ * expose the geometry of all data and unified caches (which are
+ * guaranteed to be PIPT and thus non-aliasing) as 1 set and 1 way.
+ * [If guests should attempt to infer aliasing properties from the
+ * geometry (which is not permitted by the architecture), they would
+ * only do so for virtually indexed caches.]
+ */
+ if (!(csselr & 1)) // data or unified cache
+ p->regval &= ~GENMASK(27, 3);
+ return true;
+}
+
/* sys_reg_desc initialiser for known cpufeature ID registers */
#define ID_SANITISED(name) { \
SYS_DESC(SYS_##name), \
@@ -1377,7 +1433,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
- { SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 },
+ { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
+ { SYS_DESC(SYS_CLIDR_EL1), access_clidr },
+ { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
+ { SYS_DESC(SYS_CTR_EL0), access_ctr },
{ SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, },
{ SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
@@ -1400,9 +1459,9 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
- { SYS_DESC(SYS_CNTP_TVAL_EL0), access_cntp_tval },
- { SYS_DESC(SYS_CNTP_CTL_EL0), access_cntp_ctl },
- { SYS_DESC(SYS_CNTP_CVAL_EL0), access_cntp_cval },
+ { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
+ { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
+ { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
/* PMEVCNTRn_EL0 */
PMU_PMEVCNTR_EL0(0),
@@ -1476,7 +1535,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
{ SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
- { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x70 },
+ { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
};
static bool trap_dbgidr(struct kvm_vcpu *vcpu,
@@ -1677,6 +1736,7 @@ static const struct sys_reg_desc cp14_64_regs[] = {
* register).
*/
static const struct sys_reg_desc cp15_regs[] = {
+ { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
@@ -1723,10 +1783,9 @@ static const struct sys_reg_desc cp15_regs[] = {
{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
- /* CNTP_TVAL */
- { Op1( 0), CRn(14), CRm( 2), Op2( 0), access_cntp_tval },
- /* CNTP_CTL */
- { Op1( 0), CRn(14), CRm( 2), Op2( 1), access_cntp_ctl },
+ /* Arch Tmers */
+ { SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
+ { SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
/* PMEVCNTRn */
PMU_PMEVCNTR(0),
@@ -1794,6 +1853,10 @@ static const struct sys_reg_desc cp15_regs[] = {
PMU_PMEVTYPER(30),
/* PMCCFILTR */
{ Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
+
+ { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
+ { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
+ { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, c0_CSSELR },
};
static const struct sys_reg_desc cp15_64_regs[] = {
@@ -1803,7 +1866,7 @@ static const struct sys_reg_desc cp15_64_regs[] = {
{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
{ Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
{ Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
- { Op1( 2), CRn( 0), CRm(14), Op2( 0), access_cntp_cval },
+ { SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer },
};
/* Target specific emulation tables */
@@ -1832,30 +1895,19 @@ static const struct sys_reg_desc *get_target_table(unsigned target,
}
}
-#define reg_to_match_value(x) \
- ({ \
- unsigned long val; \
- val = (x)->Op0 << 14; \
- val |= (x)->Op1 << 11; \
- val |= (x)->CRn << 7; \
- val |= (x)->CRm << 3; \
- val |= (x)->Op2; \
- val; \
- })
-
static int match_sys_reg(const void *key, const void *elt)
{
const unsigned long pval = (unsigned long)key;
const struct sys_reg_desc *r = elt;
- return pval - reg_to_match_value(r);
+ return pval - reg_to_encoding(r);
}
static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
const struct sys_reg_desc table[],
unsigned int num)
{
- unsigned long pval = reg_to_match_value(params);
+ unsigned long pval = reg_to_encoding(params);
return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
}
@@ -2218,11 +2270,15 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
}
FUNCTION_INVARIANT(midr_el1)
-FUNCTION_INVARIANT(ctr_el0)
FUNCTION_INVARIANT(revidr_el1)
FUNCTION_INVARIANT(clidr_el1)
FUNCTION_INVARIANT(aidr_el1)
+static void get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
+{
+ ((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
+}
+
/* ->val is filled in by kvm_sys_reg_table_init() */
static struct sys_reg_desc invariant_sys_regs[] = {
{ SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index 5540a1638baf..33c2a4abda04 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -24,7 +24,7 @@ CFLAGS_atomic_ll_sc.o := -ffixed-x1 -ffixed-x2 \
-fcall-saved-x10 -fcall-saved-x11 -fcall-saved-x12 \
-fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15 \
-fcall-saved-x18 -fomit-frame-pointer
-CFLAGS_REMOVE_atomic_ll_sc.o := -pg
+CFLAGS_REMOVE_atomic_ll_sc.o := $(CC_FLAGS_FTRACE)
GCOV_PROFILE_atomic_ll_sc.o := n
KASAN_SANITIZE_atomic_ll_sc.o := n
KCOV_INSTRUMENT_atomic_ll_sc.o := n
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index 99bb8facb5cb..14fe23cd5932 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -406,7 +406,7 @@ void ptdump_check_wx(void)
static int ptdump_init(void)
{
ptdump_initialize();
- return ptdump_debugfs_register(&kernel_ptdump_info,
- "kernel_page_tables");
+ ptdump_debugfs_register(&kernel_ptdump_info, "kernel_page_tables");
+ return 0;
}
device_initcall(ptdump_init);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index e1c84c2e1cab..0cb0e09995e1 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -148,7 +148,7 @@ static inline bool is_ttbr1_addr(unsigned long addr)
/*
* Dump out the page tables associated with 'addr' in the currently active mm.
*/
-void show_pte(unsigned long addr)
+static void show_pte(unsigned long addr)
{
struct mm_struct *mm;
pgd_t *pgdp;
@@ -810,12 +810,12 @@ void __init hook_debug_fault_code(int nr,
debug_fault_info[nr].name = name;
}
-asmlinkage int __exception do_debug_exception(unsigned long addr,
- unsigned int esr,
- struct pt_regs *regs)
+asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint,
+ unsigned int esr,
+ struct pt_regs *regs)
{
const struct fault_info *inf = esr_to_debug_fault_info(esr);
- int rv;
+ unsigned long pc = instruction_pointer(regs);
/*
* Tell lockdep we disabled irqs in entry.S. Do nothing if they were
@@ -824,20 +824,15 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
if (interrupts_enabled(regs))
trace_hardirqs_off();
- if (user_mode(regs) && !is_ttbr0_addr(instruction_pointer(regs)))
+ if (user_mode(regs) && !is_ttbr0_addr(pc))
arm64_apply_bp_hardening();
- if (!inf->fn(addr, esr, regs)) {
- rv = 1;
- } else {
+ if (inf->fn(addr_if_watchpoint, esr, regs)) {
arm64_notify_die(inf->name, regs,
- inf->sig, inf->code, (void __user *)addr, esr);
- rv = 0;
+ inf->sig, inf->code, (void __user *)pc, esr);
}
if (interrupts_enabled(regs))
trace_hardirqs_on();
-
- return rv;
}
NOKPROBE_SYMBOL(do_debug_exception);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index c38976b70069..40e2d7e5efcb 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -260,24 +260,6 @@ int pfn_valid(unsigned long pfn)
}
EXPORT_SYMBOL(pfn_valid);
-#ifndef CONFIG_SPARSEMEM
-static void __init arm64_memory_present(void)
-{
-}
-#else
-static void __init arm64_memory_present(void)
-{
- struct memblock_region *reg;
-
- for_each_memblock(memory, reg) {
- int nid = memblock_get_region_node(reg);
-
- memory_present(nid, memblock_region_memory_base_pfn(reg),
- memblock_region_memory_end_pfn(reg));
- }
-}
-#endif
-
static phys_addr_t memory_limit = PHYS_ADDR_MAX;
/*
@@ -381,7 +363,7 @@ void __init arm64_memblock_init(void)
* Otherwise, this is a no-op
*/
u64 base = phys_initrd_start & PAGE_MASK;
- u64 size = PAGE_ALIGN(phys_initrd_size);
+ u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base;
/*
* We can only add back the initrd memory if we don't end up
@@ -395,7 +377,7 @@ void __init arm64_memblock_init(void)
base + size > memblock_start_of_DRAM() +
linear_region_size,
"initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) {
- initrd_start = 0;
+ phys_initrd_size = 0;
} else {
memblock_remove(base, size); /* clear MEMBLOCK_ flags */
memblock_add(base, size);
@@ -458,13 +440,14 @@ void __init bootmem_init(void)
early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT);
max_pfn = max_low_pfn = max;
+ min_low_pfn = min;
arm64_numa_init();
/*
* Sparsemem tries to allocate bootmem in memory_present(), so must be
* done after the fixed reservations.
*/
- arm64_memory_present();
+ memblocks_present();
sparse_init();
zone_sizes_init(min, max);
@@ -553,7 +536,7 @@ void __init mem_init(void)
else
swiotlb_force = SWIOTLB_NO_FORCE;
- set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
+ set_max_mapnr(max_pfn - PHYS_PFN_OFFSET);
#ifndef CONFIG_SPARSEMEM_VMEMMAP
free_unused_memmap();
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index f37a86d2a69d..296de39ddee5 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -40,6 +40,11 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node)
void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
__pa(MAX_DMA_ADDRESS),
MEMBLOCK_ALLOC_KASAN, node);
+ if (!p)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE, node,
+ __pa(MAX_DMA_ADDRESS));
+
return __pa(p);
}
@@ -48,6 +53,11 @@ static phys_addr_t __init kasan_alloc_raw_page(int node)
void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
__pa(MAX_DMA_ADDRESS),
MEMBLOCK_ALLOC_KASAN, node);
+ if (!p)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE, node,
+ __pa(MAX_DMA_ADDRESS));
+
return __pa(p);
}
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index b6f5aa52ac67..ef82312860ac 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -42,7 +42,6 @@
#include <asm/setup.h>
#include <asm/sizes.h>
#include <asm/tlb.h>
-#include <asm/memblock.h>
#include <asm/mmu_context.h>
#include <asm/ptdump.h>
#include <asm/tlbflush.h>
@@ -98,12 +97,14 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
}
EXPORT_SYMBOL(phys_mem_access_prot);
-static phys_addr_t __init early_pgtable_alloc(void)
+static phys_addr_t __init early_pgtable_alloc(int shift)
{
phys_addr_t phys;
void *ptr;
phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!phys)
+ panic("Failed to allocate page table page\n");
/*
* The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
@@ -173,7 +174,7 @@ static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
unsigned long end, phys_addr_t phys,
pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(void),
+ phys_addr_t (*pgtable_alloc)(int),
int flags)
{
unsigned long next;
@@ -183,7 +184,7 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
if (pmd_none(pmd)) {
phys_addr_t pte_phys;
BUG_ON(!pgtable_alloc);
- pte_phys = pgtable_alloc();
+ pte_phys = pgtable_alloc(PAGE_SHIFT);
__pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
pmd = READ_ONCE(*pmdp);
}
@@ -207,7 +208,7 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(void), int flags)
+ phys_addr_t (*pgtable_alloc)(int), int flags)
{
unsigned long next;
pmd_t *pmdp;
@@ -245,7 +246,7 @@ static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
unsigned long end, phys_addr_t phys,
pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(void), int flags)
+ phys_addr_t (*pgtable_alloc)(int), int flags)
{
unsigned long next;
pud_t pud = READ_ONCE(*pudp);
@@ -257,7 +258,7 @@ static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
if (pud_none(pud)) {
phys_addr_t pmd_phys;
BUG_ON(!pgtable_alloc);
- pmd_phys = pgtable_alloc();
+ pmd_phys = pgtable_alloc(PMD_SHIFT);
__pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
pud = READ_ONCE(*pudp);
}
@@ -293,7 +294,7 @@ static inline bool use_1G_block(unsigned long addr, unsigned long next,
static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(void),
+ phys_addr_t (*pgtable_alloc)(int),
int flags)
{
unsigned long next;
@@ -303,7 +304,7 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
if (pgd_none(pgd)) {
phys_addr_t pud_phys;
BUG_ON(!pgtable_alloc);
- pud_phys = pgtable_alloc();
+ pud_phys = pgtable_alloc(PUD_SHIFT);
__pgd_populate(pgdp, pud_phys, PUD_TYPE_TABLE);
pgd = READ_ONCE(*pgdp);
}
@@ -344,7 +345,7 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(void),
+ phys_addr_t (*pgtable_alloc)(int),
int flags)
{
unsigned long addr, length, end, next;
@@ -370,17 +371,36 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
} while (pgdp++, addr = next, addr != end);
}
-static phys_addr_t pgd_pgtable_alloc(void)
+static phys_addr_t __pgd_pgtable_alloc(int shift)
{
void *ptr = (void *)__get_free_page(PGALLOC_GFP);
- if (!ptr || !pgtable_page_ctor(virt_to_page(ptr)))
- BUG();
+ BUG_ON(!ptr);
/* Ensure the zeroed page is visible to the page table walker */
dsb(ishst);
return __pa(ptr);
}
+static phys_addr_t pgd_pgtable_alloc(int shift)
+{
+ phys_addr_t pa = __pgd_pgtable_alloc(shift);
+
+ /*
+ * Call proper page table ctor in case later we need to
+ * call core mm functions like apply_to_page_range() on
+ * this pre-allocated page table.
+ *
+ * We don't select ARCH_ENABLE_SPLIT_PMD_PTLOCK if pmd is
+ * folded, and if so pgtable_pmd_page_ctor() becomes nop.
+ */
+ if (shift == PAGE_SHIFT)
+ BUG_ON(!pgtable_page_ctor(phys_to_page(pa)));
+ else if (shift == PMD_SHIFT)
+ BUG_ON(!pgtable_pmd_page_ctor(phys_to_page(pa)));
+
+ return pa;
+}
+
/*
* This function can only be used to modify existing table entries,
* without allocating new levels of table. Note that this permits the
@@ -582,7 +602,7 @@ static int __init map_entry_trampoline(void)
/* Map only the text into the trampoline page table */
memset(tramp_pg_dir, 0, PGD_SIZE);
__create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
- prot, pgd_pgtable_alloc, 0);
+ prot, __pgd_pgtable_alloc, 0);
/* Map both the text and data into the kernel page table */
__set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
@@ -655,10 +675,6 @@ static void __init map_kernel(pgd_t *pgdp)
kasan_copy_shadow(pgdp);
}
-/*
- * paging_init() sets up the page tables, initialises the zone memory
- * maps and sets up the zero page.
- */
void __init paging_init(void)
{
pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir));
@@ -1058,7 +1074,7 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
- size, PAGE_KERNEL, pgd_pgtable_alloc, flags);
+ size, PAGE_KERNEL, __pgd_pgtable_alloc, flags);
return __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT,
altmap, want_memblock);
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
index 7a0a555b366a..5202f63c29c9 100644
--- a/arch/arm64/mm/numa.c
+++ b/arch/arm64/mm/numa.c
@@ -124,7 +124,7 @@ static void __init setup_node_to_cpumask_map(void)
}
/*
- * Set the cpu to node and mem mapping
+ * Set the cpu to node and mem mapping
*/
void numa_store_cpu_info(unsigned int cpu)
{
@@ -200,7 +200,7 @@ void __init setup_per_cpu_areas(void)
#endif
/**
- * numa_add_memblk - Set node id to memblk
+ * numa_add_memblk() - Set node id to memblk
* @nid: NUMA node ID of the new memblk
* @start: Start address of the new memblk
* @end: End address of the new memblk
@@ -223,7 +223,7 @@ int __init numa_add_memblk(int nid, u64 start, u64 end)
return ret;
}
-/**
+/*
* Initialize NODE_DATA for a node on the local memory
*/
static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
@@ -237,6 +237,10 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
pr_info("Initmem setup node %d [<memory-less node>]\n", nid);
nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
+ if (!nd_pa)
+ panic("Cannot allocate %zu bytes for node %d data\n",
+ nd_size, nid);
+
nd = __va(nd_pa);
/* report and initialize */
@@ -253,7 +257,7 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
}
-/**
+/*
* numa_free_distance
*
* The current table is freed.
@@ -273,10 +277,8 @@ void __init numa_free_distance(void)
numa_distance = NULL;
}
-/**
- *
+/*
* Create a new NUMA distance table.
- *
*/
static int __init numa_alloc_distance(void)
{
@@ -307,7 +309,7 @@ static int __init numa_alloc_distance(void)
}
/**
- * numa_set_distance - Set inter node NUMA distance from node to node.
+ * numa_set_distance() - Set inter node NUMA distance from node to node.
* @from: the 'from' node to set distance
* @to: the 'to' node to set distance
* @distance: NUMA distance
@@ -317,7 +319,6 @@ static int __init numa_alloc_distance(void)
*
* If @from or @to is higher than the highest known node or lower than zero
* or @distance doesn't make sense, the call is ignored.
- *
*/
void __init numa_set_distance(int from, int to, int distance)
{
@@ -343,7 +344,7 @@ void __init numa_set_distance(int from, int to, int distance)
numa_distance[from * numa_distance_cnt + to] = distance;
}
-/**
+/*
* Return NUMA distance @from to @to
*/
int __node_distance(int from, int to)
@@ -418,13 +419,15 @@ out_free_distance:
}
/**
- * dummy_numa_init - Fallback dummy NUMA init
+ * dummy_numa_init() - Fallback dummy NUMA init
*
* Used if there's no underlying NUMA architecture, NUMA initialization
* fails, or NUMA is disabled on the command line.
*
* Must online at least one node (node 0) and add memory blocks that cover all
* allowed memory. It is unlikely that this function fails.
+ *
+ * Return: 0 on success, -errno on failure.
*/
static int __init dummy_numa_init(void)
{
@@ -450,9 +453,9 @@ static int __init dummy_numa_init(void)
}
/**
- * arm64_numa_init - Initialize NUMA
+ * arm64_numa_init() - Initialize NUMA
*
- * Try each configured NUMA initialization method until one succeeds. The
+ * Try each configured NUMA initialization method until one succeeds. The
* last fallback is dummy single node config encomapssing whole memory.
*/
void __init arm64_numa_init(void)
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 73886a5f1f30..fdd626d34274 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -55,17 +55,6 @@
#define MAIR(attr, mt) ((attr) << ((mt) * 8))
-/*
- * cpu_do_idle()
- *
- * Idle the processor (wait for interrupt).
- */
-ENTRY(cpu_do_idle)
- dsb sy // WFI may enter a low-power mode
- wfi
- ret
-ENDPROC(cpu_do_idle)
-
#ifdef CONFIG_CPU_PM
/**
* cpu_do_suspend - save CPU registers context
@@ -76,24 +65,25 @@ ENTRY(cpu_do_suspend)
mrs x2, tpidr_el0
mrs x3, tpidrro_el0
mrs x4, contextidr_el1
- mrs x5, cpacr_el1
- mrs x6, tcr_el1
- mrs x7, vbar_el1
- mrs x8, mdscr_el1
- mrs x9, oslsr_el1
- mrs x10, sctlr_el1
+ mrs x5, osdlr_el1
+ mrs x6, cpacr_el1
+ mrs x7, tcr_el1
+ mrs x8, vbar_el1
+ mrs x9, mdscr_el1
+ mrs x10, oslsr_el1
+ mrs x11, sctlr_el1
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
- mrs x11, tpidr_el1
+ mrs x12, tpidr_el1
alternative_else
- mrs x11, tpidr_el2
+ mrs x12, tpidr_el2
alternative_endif
- mrs x12, sp_el0
+ mrs x13, sp_el0
stp x2, x3, [x0]
- stp x4, xzr, [x0, #16]
- stp x5, x6, [x0, #32]
- stp x7, x8, [x0, #48]
- stp x9, x10, [x0, #64]
- stp x11, x12, [x0, #80]
+ stp x4, x5, [x0, #16]
+ stp x6, x7, [x0, #32]
+ stp x8, x9, [x0, #48]
+ stp x10, x11, [x0, #64]
+ stp x12, x13, [x0, #80]
ret
ENDPROC(cpu_do_suspend)
@@ -116,8 +106,8 @@ ENTRY(cpu_do_resume)
msr cpacr_el1, x6
/* Don't change t0sz here, mask those bits when restoring */
- mrs x5, tcr_el1
- bfi x8, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
+ mrs x7, tcr_el1
+ bfi x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
msr tcr_el1, x8
msr vbar_el1, x9
@@ -141,6 +131,7 @@ alternative_endif
/*
* Restore oslsr_el1 by writing oslar_el1
*/
+ msr osdlr_el1, x5
ubfx x11, x11, #1, #1
msr oslar_el1, x11
reset_pmuserenr_el0 x0 // Disable PMU access from EL0
@@ -456,6 +447,7 @@ ENTRY(__cpu_setup)
ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS
+ tcr_clear_errata_bits x10, x9, x5
#ifdef CONFIG_ARM64_USER_VA_BITS_52
ldr_l x9, vabits_user
diff --git a/arch/arm64/mm/ptdump_debugfs.c b/arch/arm64/mm/ptdump_debugfs.c
index 24d786fc3a4c..064163f25592 100644
--- a/arch/arm64/mm/ptdump_debugfs.c
+++ b/arch/arm64/mm/ptdump_debugfs.c
@@ -12,10 +12,7 @@ static int ptdump_show(struct seq_file *m, void *v)
}
DEFINE_SHOW_ATTRIBUTE(ptdump);
-int ptdump_debugfs_register(struct ptdump_info *info, const char *name)
+void ptdump_debugfs_register(struct ptdump_info *info, const char *name)
{
- struct dentry *pe;
- pe = debugfs_create_file(name, 0400, NULL, info, &ptdump_fops);
- return pe ? 0 : -ENOMEM;
-
+ debugfs_create_file(name, 0400, NULL, info, &ptdump_fops);
}
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
index e5cd3c5f8399..eeb0471268a0 100644
--- a/arch/c6x/Kconfig
+++ b/arch/c6x/Kconfig
@@ -20,6 +20,7 @@ config C6X
select GENERIC_CLOCKEVENTS
select MODULES_USE_ELF_RELA
select ARCH_NO_COHERENT_DMA_MMAP
+ select MMU_GATHER_NO_RANGE if MMU
config MMU
def_bool n
@@ -27,9 +28,6 @@ config MMU
config FPU
def_bool n
-config RWSEM_GENERIC_SPINLOCK
- def_bool y
-
config GENERIC_CALIBRATE_DELAY
def_bool y
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild
index 63b4a1705182..6b168d32fbff 100644
--- a/arch/c6x/include/asm/Kbuild
+++ b/arch/c6x/include/asm/Kbuild
@@ -19,9 +19,11 @@ generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += kprobes.h
+generic-y += kvm_para.h
generic-y += local.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += mmu.h
generic-y += mmu_context.h
generic-y += pci.h
diff --git a/arch/c6x/include/asm/syscall.h b/arch/c6x/include/asm/syscall.h
index ae2be315ee9c..15ba8599858e 100644
--- a/arch/c6x/include/asm/syscall.h
+++ b/arch/c6x/include/asm/syscall.h
@@ -46,78 +46,27 @@ static inline void syscall_set_return_value(struct task_struct *task,
}
static inline void syscall_get_arguments(struct task_struct *task,
- struct pt_regs *regs, unsigned int i,
- unsigned int n, unsigned long *args)
+ struct pt_regs *regs,
+ unsigned long *args)
{
- switch (i) {
- case 0:
- if (!n--)
- break;
- *args++ = regs->a4;
- case 1:
- if (!n--)
- break;
- *args++ = regs->b4;
- case 2:
- if (!n--)
- break;
- *args++ = regs->a6;
- case 3:
- if (!n--)
- break;
- *args++ = regs->b6;
- case 4:
- if (!n--)
- break;
- *args++ = regs->a8;
- case 5:
- if (!n--)
- break;
- *args++ = regs->b8;
- case 6:
- if (!n--)
- break;
- default:
- BUG();
- }
+ *args++ = regs->a4;
+ *args++ = regs->b4;
+ *args++ = regs->a6;
+ *args++ = regs->b6;
+ *args++ = regs->a8;
+ *args = regs->b8;
}
static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
const unsigned long *args)
{
- switch (i) {
- case 0:
- if (!n--)
- break;
- regs->a4 = *args++;
- case 1:
- if (!n--)
- break;
- regs->b4 = *args++;
- case 2:
- if (!n--)
- break;
- regs->a6 = *args++;
- case 3:
- if (!n--)
- break;
- regs->b6 = *args++;
- case 4:
- if (!n--)
- break;
- regs->a8 = *args++;
- case 5:
- if (!n--)
- break;
- regs->a9 = *args++;
- case 6:
- if (!n)
- break;
- default:
- BUG();
- }
+ regs->a4 = *args++;
+ regs->b4 = *args++;
+ regs->a6 = *args++;
+ regs->b6 = *args++;
+ regs->a8 = *args++;
+ regs->a9 = *args;
}
#endif /* __ASM_C6X_SYSCALLS_H */
diff --git a/arch/c6x/include/asm/tlb.h b/arch/c6x/include/asm/tlb.h
index 34525dea1356..240ba0febb57 100644
--- a/arch/c6x/include/asm/tlb.h
+++ b/arch/c6x/include/asm/tlb.h
@@ -2,8 +2,6 @@
#ifndef _ASM_C6X_TLB_H
#define _ASM_C6X_TLB_H
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
#include <asm-generic/tlb.h>
#endif /* _ASM_C6X_TLB_H */
diff --git a/arch/c6x/include/uapi/asm/Kbuild b/arch/c6x/include/uapi/asm/Kbuild
index 0febf1a07c30..1c72f04ff75d 100644
--- a/arch/c6x/include/uapi/asm/Kbuild
+++ b/arch/c6x/include/uapi/asm/Kbuild
@@ -1,4 +1 @@
-include include/uapi/asm-generic/Kbuild.asm
-
-generic-y += kvm_para.h
generic-y += ucontext.h
diff --git a/arch/c6x/mm/dma-coherent.c b/arch/c6x/mm/dma-coherent.c
index 0be289839ce0..0d3701bc88f6 100644
--- a/arch/c6x/mm/dma-coherent.c
+++ b/arch/c6x/mm/dma-coherent.c
@@ -138,6 +138,10 @@ void __init coherent_mem_init(phys_addr_t start, u32 size)
dma_bitmap = memblock_alloc(BITS_TO_LONGS(dma_pages) * sizeof(long),
sizeof(long));
+ if (!dma_bitmap)
+ panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
+ __func__, BITS_TO_LONGS(dma_pages) * sizeof(long),
+ sizeof(long));
}
static void c6x_dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
diff --git a/arch/c6x/mm/init.c b/arch/c6x/mm/init.c
index af5ada0520be..fe582c3a1794 100644
--- a/arch/c6x/mm/init.c
+++ b/arch/c6x/mm/init.c
@@ -40,7 +40,9 @@ void __init paging_init(void)
empty_zero_page = (unsigned long) memblock_alloc(PAGE_SIZE,
PAGE_SIZE);
- memset((void *)empty_zero_page, 0, PAGE_SIZE);
+ if (!empty_zero_page)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
/*
* Set up user data space
diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
index 6959e0b1e956..6555d1781132 100644
--- a/arch/csky/Kconfig
+++ b/arch/csky/Kconfig
@@ -31,7 +31,6 @@ config CSKY
select HAVE_ARCH_TRACEHOOK
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
- select HAVE_GENERIC_DMA_COHERENT
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZO
select HAVE_KERNEL_LZMA
@@ -43,7 +42,6 @@ config CSKY
select MODULES_USE_ELF_RELA if MODULES
select OF
select OF_EARLY_FLATTREE
- select OF_RESERVED_MEM
select PERF_USE_VMALLOC if CPU_CK610
select RTC_LIB
select TIMER_OF
@@ -94,9 +92,6 @@ config GENERIC_HWEIGHT
config MMU
def_bool y
-config RWSEM_GENERIC_SPINLOCK
- def_bool y
-
config STACKTRACE_SUPPORT
def_bool y
diff --git a/arch/csky/include/asm/Kbuild b/arch/csky/include/asm/Kbuild
index 2a0abe8f2a35..95f4e550db8a 100644
--- a/arch/csky/include/asm/Kbuild
+++ b/arch/csky/include/asm/Kbuild
@@ -28,6 +28,7 @@ generic-y += linkage.h
generic-y += local.h
generic-y += local64.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += module.h
generic-y += mutex.h
generic-y += pci.h
diff --git a/arch/csky/include/asm/syscall.h b/arch/csky/include/asm/syscall.h
index d637445737b7..bda0a446c63e 100644
--- a/arch/csky/include/asm/syscall.h
+++ b/arch/csky/include/asm/syscall.h
@@ -43,30 +43,20 @@ syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
static inline void
syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
- unsigned int i, unsigned int n, unsigned long *args)
+ unsigned long *args)
{
- BUG_ON(i + n > 6);
- if (i == 0) {
- args[0] = regs->orig_a0;
- args++;
- i++;
- n--;
- }
- memcpy(args, &regs->a1 + i * sizeof(regs->a1), n * sizeof(args[0]));
+ args[0] = regs->orig_a0;
+ args++;
+ memcpy(args, &regs->a1, 5 * sizeof(args[0]));
}
static inline void
syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
- unsigned int i, unsigned int n, const unsigned long *args)
+ const unsigned long *args)
{
- BUG_ON(i + n > 6);
- if (i == 0) {
- regs->orig_a0 = args[0];
- args++;
- i++;
- n--;
- }
- memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
+ regs->orig_a0 = args[0];
+ args++;
+ memcpy(&regs->a1, args, 5 * sizeof(regs->a1));
}
static inline int
diff --git a/arch/csky/include/uapi/asm/Kbuild b/arch/csky/include/uapi/asm/Kbuild
index c1b06dcf6cf8..1c72f04ff75d 100644
--- a/arch/csky/include/uapi/asm/Kbuild
+++ b/arch/csky/include/uapi/asm/Kbuild
@@ -1,3 +1 @@
-include include/uapi/asm-generic/Kbuild.asm
-
generic-y += ucontext.h
diff --git a/arch/csky/mm/highmem.c b/arch/csky/mm/highmem.c
index 53b1bfa4c462..3317b774f6dc 100644
--- a/arch/csky/mm/highmem.c
+++ b/arch/csky/mm/highmem.c
@@ -141,6 +141,11 @@ static void __init fixrange_init(unsigned long start, unsigned long end,
for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
if (pmd_none(*pmd)) {
pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
+ __func__, PAGE_SIZE,
+ PAGE_SIZE);
+
set_pmd(pmd, __pmd(__pa(pte)));
BUG_ON(pte != pte_offset_kernel(pmd, 0));
}
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index c071da34e081..61c01db6c292 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -27,9 +27,6 @@ config H8300
config CPU_BIG_ENDIAN
def_bool y
-config RWSEM_GENERIC_SPINLOCK
- def_bool y
-
config GENERIC_HWEIGHT
def_bool y
diff --git a/arch/h8300/Makefile b/arch/h8300/Makefile
index f801f3708a89..ba0f26cfad61 100644
--- a/arch/h8300/Makefile
+++ b/arch/h8300/Makefile
@@ -27,7 +27,7 @@ KBUILD_LDFLAGS += $(ldflags-y)
CHECKFLAGS += -msize-long
ifeq ($(CROSS_COMPILE),)
-CROSS_COMPILE := h8300-unknown-linux-
+CROSS_COMPILE := $(call cc-cross-prefix, h8300-unknown-linux- h8300-linux-)
endif
core-y += arch/$(ARCH)/kernel/ arch/$(ARCH)/mm/
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild
index 961c1dc064e1..123d8f54be4a 100644
--- a/arch/h8300/include/asm/Kbuild
+++ b/arch/h8300/include/asm/Kbuild
@@ -17,18 +17,19 @@ generic-y += fb.h
generic-y += ftrace.h
generic-y += futex.h
generic-y += hardirq.h
-generic-y += hash.h
generic-y += hw_irq.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += kprobes.h
+generic-y += kvm_para.h
generic-y += linkage.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += mmu.h
generic-y += mmu_context.h
generic-y += module.h
diff --git a/arch/h8300/include/asm/syscall.h b/arch/h8300/include/asm/syscall.h
index 924990401237..ddd483c6ca95 100644
--- a/arch/h8300/include/asm/syscall.h
+++ b/arch/h8300/include/asm/syscall.h
@@ -17,34 +17,14 @@ syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
static inline void
syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
- unsigned int i, unsigned int n, unsigned long *args)
+ unsigned long *args)
{
- BUG_ON(i + n > 6);
-
- while (n > 0) {
- switch (i) {
- case 0:
- *args++ = regs->er1;
- break;
- case 1:
- *args++ = regs->er2;
- break;
- case 2:
- *args++ = regs->er3;
- break;
- case 3:
- *args++ = regs->er4;
- break;
- case 4:
- *args++ = regs->er5;
- break;
- case 5:
- *args++ = regs->er6;
- break;
- }
- i++;
- n--;
- }
+ *args++ = regs->er1;
+ *args++ = regs->er2;
+ *args++ = regs->er3;
+ *args++ = regs->er4;
+ *args++ = regs->er5;
+ *args = regs->er6;
}
diff --git a/arch/h8300/include/asm/tlb.h b/arch/h8300/include/asm/tlb.h
index 98f344279904..d8201ca31206 100644
--- a/arch/h8300/include/asm/tlb.h
+++ b/arch/h8300/include/asm/tlb.h
@@ -2,8 +2,6 @@
#ifndef __H8300_TLB_H__
#define __H8300_TLB_H__
-#define tlb_flush(tlb) do { } while (0)
-
#include <asm-generic/tlb.h>
#endif
diff --git a/arch/h8300/include/uapi/asm/Kbuild b/arch/h8300/include/uapi/asm/Kbuild
index 0febf1a07c30..1c72f04ff75d 100644
--- a/arch/h8300/include/uapi/asm/Kbuild
+++ b/arch/h8300/include/uapi/asm/Kbuild
@@ -1,4 +1 @@
-include include/uapi/asm-generic/Kbuild.asm
-
-generic-y += kvm_para.h
generic-y += ucontext.h
diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c
index 6519252ac4db..0f04a5e9aa4f 100644
--- a/arch/h8300/mm/init.c
+++ b/arch/h8300/mm/init.c
@@ -68,7 +68,9 @@ void __init paging_init(void)
* to a couple of allocated pages.
*/
empty_zero_page = (unsigned long)memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- memset((void *)empty_zero_page, 0, PAGE_SIZE);
+ if (!empty_zero_page)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
/*
* Set up SFC/DFC registers (user data space).
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index ac441680dcc0..3e54a53208d5 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -65,12 +65,6 @@ config GENERIC_CSUM
config GENERIC_IRQ_PROBE
def_bool y
-config RWSEM_GENERIC_SPINLOCK
- def_bool n
-
-config RWSEM_XCHGADD_ALGORITHM
- def_bool y
-
config GENERIC_HWEIGHT
def_bool y
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index b25fd42aa0f4..6234a303d2a3 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -19,14 +19,15 @@ generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += kprobes.h
+generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += pci.h
generic-y += percpu.h
generic-y += preempt.h
-generic-y += rwsem.h
generic-y += sections.h
generic-y += segment.h
generic-y += serial.h
diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h
index e17262ad125e..3d0ae09c2b8e 100644
--- a/arch/hexagon/include/asm/io.h
+++ b/arch/hexagon/include/asm/io.h
@@ -184,8 +184,6 @@ static inline void writel(u32 data, volatile void __iomem *addr)
#define writew_relaxed __raw_writew
#define writel_relaxed __raw_writel
-#define mmiowb()
-
/*
* Need an mtype somewhere in here, for cache type deals?
* This is probably too long for an inline.
diff --git a/arch/hexagon/include/asm/syscall.h b/arch/hexagon/include/asm/syscall.h
index 4af9c7b6f13a..ae3a1e24fabd 100644
--- a/arch/hexagon/include/asm/syscall.h
+++ b/arch/hexagon/include/asm/syscall.h
@@ -37,10 +37,8 @@ static inline long syscall_get_nr(struct task_struct *task,
static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
unsigned long *args)
{
- BUG_ON(i + n > 6);
- memcpy(args, &(&regs->r00)[i], n * sizeof(args[0]));
+ memcpy(args, &(&regs->r00)[0], 6 * sizeof(args[0]));
}
#endif
diff --git a/arch/hexagon/include/asm/tlb.h b/arch/hexagon/include/asm/tlb.h
index 2f00772cc08a..f71c4ba83614 100644
--- a/arch/hexagon/include/asm/tlb.h
+++ b/arch/hexagon/include/asm/tlb.h
@@ -22,18 +22,6 @@
#include <linux/pagemap.h>
#include <asm/tlbflush.h>
-/*
- * We don't need any special per-pte or per-vma handling...
- */
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
-
-/*
- * .. because we flush the whole mm when it fills up
- */
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
#include <asm-generic/tlb.h>
#endif
diff --git a/arch/hexagon/include/uapi/asm/Kbuild b/arch/hexagon/include/uapi/asm/Kbuild
index c1b06dcf6cf8..1c72f04ff75d 100644
--- a/arch/hexagon/include/uapi/asm/Kbuild
+++ b/arch/hexagon/include/uapi/asm/Kbuild
@@ -1,3 +1 @@
-include include/uapi/asm-generic/Kbuild.asm
-
generic-y += ucontext.h
diff --git a/arch/hexagon/include/uapi/asm/kvm_para.h b/arch/hexagon/include/uapi/asm/kvm_para.h
deleted file mode 100644
index baacc4996d18..000000000000
--- a/arch/hexagon/include/uapi/asm/kvm_para.h
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#include <asm-generic/kvm_para.h>
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 8d7396bd1790..73a26f04644e 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -83,10 +83,6 @@ config STACKTRACE_SUPPORT
config GENERIC_LOCKBREAK
def_bool n
-config RWSEM_XCHGADD_ALGORITHM
- bool
- default y
-
config HUGETLB_PAGE_SIZE_VARIABLE
bool
depends on HUGETLB_PAGE
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c
index f86844fc0725..0a8a74271173 100644
--- a/arch/ia64/hp/sim/simscsi.c
+++ b/arch/ia64/hp/sim/simscsi.c
@@ -105,7 +105,8 @@ simscsi_interrupt (unsigned long val)
atomic_dec(&num_reqs);
queue[rd].sc = NULL;
if (DBG)
- printk("simscsi_interrupt: done with %ld\n", sc->serial_number);
+ printk("simscsi_interrupt: done with %u\n",
+ sc->request->tag);
(*sc->scsi_done)(sc);
rd = (rd + 1) % SIMSCSI_REQ_QUEUE_LEN;
}
@@ -214,8 +215,8 @@ simscsi_queuecommand_lck (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)
register long sp asm ("sp");
if (DBG)
- printk("simscsi_queuecommand: target=%d,cmnd=%u,sc=%lu,sp=%lx,done=%p\n",
- target_id, sc->cmnd[0], sc->serial_number, sp, done);
+ printk("simscsi_queuecommand: target=%d,cmnd=%u,sc=%u,sp=%lx,done=%p\n",
+ target_id, sc->cmnd[0], sc->request->tag, sp, done);
#endif
sc->result = DID_BAD_TARGET << 16;
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild
index 43e21fe3499c..11f191689c9e 100644
--- a/arch/ia64/include/asm/Kbuild
+++ b/arch/ia64/include/asm/Kbuild
@@ -2,6 +2,7 @@ generated-y += syscall_table.h
generic-y += compat.h
generic-y += exec.h
generic-y += irq_work.h
+generic-y += kvm_para.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += preempt.h
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
index 1e6fef69bb01..a511d62d447a 100644
--- a/arch/ia64/include/asm/io.h
+++ b/arch/ia64/include/asm/io.h
@@ -113,20 +113,6 @@ extern int valid_mmap_phys_addr_range (unsigned long pfn, size_t count);
*/
#define __ia64_mf_a() ia64_mfa()
-/**
- * ___ia64_mmiowb - I/O write barrier
- *
- * Ensure ordering of I/O space writes. This will make sure that writes
- * following the barrier will arrive after all previous writes. For most
- * ia64 platforms, this is a simple 'mf.a' instruction.
- *
- * See Documentation/driver-api/device-io.rst for more information.
- */
-static inline void ___ia64_mmiowb(void)
-{
- ia64_mfa();
-}
-
static inline void*
__ia64_mk_io_addr (unsigned long port)
{
@@ -161,7 +147,6 @@ __ia64_mk_io_addr (unsigned long port)
#define __ia64_writew ___ia64_writew
#define __ia64_writel ___ia64_writel
#define __ia64_writeq ___ia64_writeq
-#define __ia64_mmiowb ___ia64_mmiowb
/*
* For the in/out routines, we need to do "mf.a" _after_ doing the I/O access to ensure
@@ -296,7 +281,6 @@ __outsl (unsigned long port, const void *src, unsigned long count)
#define __outb platform_outb
#define __outw platform_outw
#define __outl platform_outl
-#define __mmiowb platform_mmiowb
#define inb(p) __inb(p)
#define inw(p) __inw(p)
@@ -310,7 +294,6 @@ __outsl (unsigned long port, const void *src, unsigned long count)
#define outsb(p,s,c) __outsb(p,s,c)
#define outsw(p,s,c) __outsw(p,s,c)
#define outsl(p,s,c) __outsl(p,s,c)
-#define mmiowb() __mmiowb()
/*
* The address passed to these functions are ioremap()ped already.
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
index 5133739966bc..beae261fbcb4 100644
--- a/arch/ia64/include/asm/machvec.h
+++ b/arch/ia64/include/asm/machvec.h
@@ -30,7 +30,6 @@ typedef void ia64_mv_irq_init_t (void);
typedef void ia64_mv_send_ipi_t (int, int, int, int);
typedef void ia64_mv_timer_interrupt_t (int, void *);
typedef void ia64_mv_global_tlb_purge_t (struct mm_struct *, unsigned long, unsigned long, unsigned long);
-typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *);
typedef u8 ia64_mv_irq_to_vector (int);
typedef unsigned int ia64_mv_local_vector_to_irq (u8);
typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *);
@@ -80,11 +79,6 @@ machvec_noop (void)
}
static inline void
-machvec_noop_mm (struct mm_struct *mm)
-{
-}
-
-static inline void
machvec_noop_task (struct task_struct *task)
{
}
@@ -96,7 +90,6 @@ machvec_noop_bus (struct pci_bus *bus)
extern void machvec_setup (char **);
extern void machvec_timer_interrupt (int, void *);
-extern void machvec_tlb_migrate_finish (struct mm_struct *);
# if defined (CONFIG_IA64_HP_SIM)
# include <asm/machvec_hpsim.h>
@@ -124,7 +117,6 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
# define platform_send_ipi ia64_mv.send_ipi
# define platform_timer_interrupt ia64_mv.timer_interrupt
# define platform_global_tlb_purge ia64_mv.global_tlb_purge
-# define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish
# define platform_dma_init ia64_mv.dma_init
# define platform_dma_get_ops ia64_mv.dma_get_ops
# define platform_irq_to_vector ia64_mv.irq_to_vector
@@ -167,7 +159,6 @@ struct ia64_machine_vector {
ia64_mv_send_ipi_t *send_ipi;
ia64_mv_timer_interrupt_t *timer_interrupt;
ia64_mv_global_tlb_purge_t *global_tlb_purge;
- ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish;
ia64_mv_dma_init *dma_init;
ia64_mv_dma_get_ops *dma_get_ops;
ia64_mv_irq_to_vector *irq_to_vector;
@@ -206,7 +197,6 @@ struct ia64_machine_vector {
platform_send_ipi, \
platform_timer_interrupt, \
platform_global_tlb_purge, \
- platform_tlb_migrate_finish, \
platform_dma_init, \
platform_dma_get_ops, \
platform_irq_to_vector, \
@@ -270,9 +260,6 @@ extern const struct dma_map_ops *dma_get_ops(struct device *);
#ifndef platform_global_tlb_purge
# define platform_global_tlb_purge ia64_global_tlb_purge /* default to architected version */
#endif
-#ifndef platform_tlb_migrate_finish
-# define platform_tlb_migrate_finish machvec_noop_mm
-#endif
#ifndef platform_kernel_launch_event
# define platform_kernel_launch_event machvec_noop
#endif
diff --git a/arch/ia64/include/asm/machvec_sn2.h b/arch/ia64/include/asm/machvec_sn2.h
index b5153d300289..a243e4fb4877 100644
--- a/arch/ia64/include/asm/machvec_sn2.h
+++ b/arch/ia64/include/asm/machvec_sn2.h
@@ -34,7 +34,6 @@ extern ia64_mv_irq_init_t sn_irq_init;
extern ia64_mv_send_ipi_t sn2_send_IPI;
extern ia64_mv_timer_interrupt_t sn_timer_interrupt;
extern ia64_mv_global_tlb_purge_t sn2_global_tlb_purge;
-extern ia64_mv_tlb_migrate_finish_t sn_tlb_migrate_finish;
extern ia64_mv_irq_to_vector sn_irq_to_vector;
extern ia64_mv_local_vector_to_irq sn_local_vector_to_irq;
extern ia64_mv_pci_get_legacy_mem_t sn_pci_get_legacy_mem;
@@ -77,7 +76,6 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus;
#define platform_send_ipi sn2_send_IPI
#define platform_timer_interrupt sn_timer_interrupt
#define platform_global_tlb_purge sn2_global_tlb_purge
-#define platform_tlb_migrate_finish sn_tlb_migrate_finish
#define platform_pci_fixup sn_pci_fixup
#define platform_inb __sn_inb
#define platform_inw __sn_inw
diff --git a/arch/ia64/include/asm/mmiowb.h b/arch/ia64/include/asm/mmiowb.h
new file mode 100644
index 000000000000..297b85ac84a0
--- /dev/null
+++ b/arch/ia64/include/asm/mmiowb.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_IA64_MMIOWB_H
+#define _ASM_IA64_MMIOWB_H
+
+#include <asm/machvec.h>
+
+/**
+ * ___ia64_mmiowb - I/O write barrier
+ *
+ * Ensure ordering of I/O space writes. This will make sure that writes
+ * following the barrier will arrive after all previous writes. For most
+ * ia64 platforms, this is a simple 'mf.a' instruction.
+ */
+static inline void ___ia64_mmiowb(void)
+{
+ ia64_mfa();
+}
+
+#define __ia64_mmiowb ___ia64_mmiowb
+#define mmiowb() platform_mmiowb()
+
+#include <asm-generic/mmiowb.h>
+
+#endif /* _ASM_IA64_MMIOWB_H */
diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h
deleted file mode 100644
index 917910607e0e..000000000000
--- a/arch/ia64/include/asm/rwsem.h
+++ /dev/null
@@ -1,172 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * R/W semaphores for ia64
- *
- * Copyright (C) 2003 Ken Chen <kenneth.w.chen@intel.com>
- * Copyright (C) 2003 Asit Mallick <asit.k.mallick@intel.com>
- * Copyright (C) 2005 Christoph Lameter <cl@linux.com>
- *
- * Based on asm-i386/rwsem.h and other architecture implementation.
- *
- * The MSW of the count is the negated number of active writers and
- * waiting lockers, and the LSW is the total number of active locks.
- *
- * The lock count is initialized to 0 (no active and no waiting lockers).
- *
- * When a writer subtracts WRITE_BIAS, it'll get 0xffffffff00000001 for
- * the case of an uncontended lock. Readers increment by 1 and see a positive
- * value when uncontended, negative if there are writers (and maybe) readers
- * waiting (in which case it goes to sleep).
- */
-
-#ifndef _ASM_IA64_RWSEM_H
-#define _ASM_IA64_RWSEM_H
-
-#ifndef _LINUX_RWSEM_H
-#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
-#endif
-
-#include <asm/intrinsics.h>
-
-#define RWSEM_UNLOCKED_VALUE __IA64_UL_CONST(0x0000000000000000)
-#define RWSEM_ACTIVE_BIAS (1L)
-#define RWSEM_ACTIVE_MASK (0xffffffffL)
-#define RWSEM_WAITING_BIAS (-0x100000000L)
-#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-
-/*
- * lock for reading
- */
-static inline int
-___down_read (struct rw_semaphore *sem)
-{
- long result = ia64_fetchadd8_acq((unsigned long *)&sem->count.counter, 1);
-
- return (result < 0);
-}
-
-static inline void
-__down_read (struct rw_semaphore *sem)
-{
- if (___down_read(sem))
- rwsem_down_read_failed(sem);
-}
-
-static inline int
-__down_read_killable (struct rw_semaphore *sem)
-{
- if (___down_read(sem))
- if (IS_ERR(rwsem_down_read_failed_killable(sem)))
- return -EINTR;
-
- return 0;
-}
-
-/*
- * lock for writing
- */
-static inline long
-___down_write (struct rw_semaphore *sem)
-{
- long old, new;
-
- do {
- old = atomic_long_read(&sem->count);
- new = old + RWSEM_ACTIVE_WRITE_BIAS;
- } while (atomic_long_cmpxchg_acquire(&sem->count, old, new) != old);
-
- return old;
-}
-
-static inline void
-__down_write (struct rw_semaphore *sem)
-{
- if (___down_write(sem))
- rwsem_down_write_failed(sem);
-}
-
-static inline int
-__down_write_killable (struct rw_semaphore *sem)
-{
- if (___down_write(sem)) {
- if (IS_ERR(rwsem_down_write_failed_killable(sem)))
- return -EINTR;
- }
-
- return 0;
-}
-
-/*
- * unlock after reading
- */
-static inline void
-__up_read (struct rw_semaphore *sem)
-{
- long result = ia64_fetchadd8_rel((unsigned long *)&sem->count.counter, -1);
-
- if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0)
- rwsem_wake(sem);
-}
-
-/*
- * unlock after writing
- */
-static inline void
-__up_write (struct rw_semaphore *sem)
-{
- long old, new;
-
- do {
- old = atomic_long_read(&sem->count);
- new = old - RWSEM_ACTIVE_WRITE_BIAS;
- } while (atomic_long_cmpxchg_release(&sem->count, old, new) != old);
-
- if (new < 0 && (new & RWSEM_ACTIVE_MASK) == 0)
- rwsem_wake(sem);
-}
-
-/*
- * trylock for reading -- returns 1 if successful, 0 if contention
- */
-static inline int
-__down_read_trylock (struct rw_semaphore *sem)
-{
- long tmp;
- while ((tmp = atomic_long_read(&sem->count)) >= 0) {
- if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp, tmp+1)) {
- return 1;
- }
- }
- return 0;
-}
-
-/*
- * trylock for writing -- returns 1 if successful, 0 if contention
- */
-static inline int
-__down_write_trylock (struct rw_semaphore *sem)
-{
- long tmp = atomic_long_cmpxchg_acquire(&sem->count,
- RWSEM_UNLOCKED_VALUE, RWSEM_ACTIVE_WRITE_BIAS);
- return tmp == RWSEM_UNLOCKED_VALUE;
-}
-
-/*
- * downgrade write lock to read lock
- */
-static inline void
-__downgrade_write (struct rw_semaphore *sem)
-{
- long old, new;
-
- do {
- old = atomic_long_read(&sem->count);
- new = old - RWSEM_WAITING_BIAS;
- } while (atomic_long_cmpxchg_release(&sem->count, old, new) != old);
-
- if (old < 0)
- rwsem_downgrade_wake(sem);
-}
-
-#endif /* _ASM_IA64_RWSEM_H */
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index afd0b3121b4c..5f620e66384e 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -73,6 +73,8 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
{
unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
+ /* This could be optimised with ARCH_HAS_MMIOWB */
+ mmiowb();
asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
WRITE_ONCE(*p, (tmp + 2) & ~1);
}
diff --git a/arch/ia64/include/asm/syscall.h b/arch/ia64/include/asm/syscall.h
index 1d0b875fec44..0d9e7fab4a79 100644
--- a/arch/ia64/include/asm/syscall.h
+++ b/arch/ia64/include/asm/syscall.h
@@ -59,26 +59,19 @@ static inline void syscall_set_return_value(struct task_struct *task,
}
extern void ia64_syscall_get_set_arguments(struct task_struct *task,
- struct pt_regs *regs, unsigned int i, unsigned int n,
- unsigned long *args, int rw);
+ struct pt_regs *regs, unsigned long *args, int rw);
static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
unsigned long *args)
{
- BUG_ON(i + n > 6);
-
- ia64_syscall_get_set_arguments(task, regs, i, n, args, 0);
+ ia64_syscall_get_set_arguments(task, regs, args, 0);
}
static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
unsigned long *args)
{
- BUG_ON(i + n > 6);
-
- ia64_syscall_get_set_arguments(task, regs, i, n, args, 1);
+ ia64_syscall_get_set_arguments(task, regs, args, 1);
}
static inline int syscall_get_arch(void)
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index 516355a774bf..86ec034ba499 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -47,263 +47,6 @@
#include <asm/tlbflush.h>
#include <asm/machvec.h>
-/*
- * If we can't allocate a page to make a big batch of page pointers
- * to work on, then just handle a few from the on-stack structure.
- */
-#define IA64_GATHER_BUNDLE 8
-
-struct mmu_gather {
- struct mm_struct *mm;
- unsigned int nr;
- unsigned int max;
- unsigned char fullmm; /* non-zero means full mm flush */
- unsigned char need_flush; /* really unmapped some PTEs? */
- unsigned long start, end;
- unsigned long start_addr;
- unsigned long end_addr;
- struct page **pages;
- struct page *local[IA64_GATHER_BUNDLE];
-};
-
-struct ia64_tr_entry {
- u64 ifa;
- u64 itir;
- u64 pte;
- u64 rr;
-}; /*Record for tr entry!*/
-
-extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
-extern void ia64_ptr_entry(u64 target_mask, int slot);
-
-extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
-
-/*
- region register macros
-*/
-#define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001)
-#define RR_VE(val) (((val) & 0x0000000000000001) << 0)
-#define RR_VE_MASK 0x0000000000000001L
-#define RR_VE_SHIFT 0
-#define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f)
-#define RR_PS(val) (((val) & 0x000000000000003f) << 2)
-#define RR_PS_MASK 0x00000000000000fcL
-#define RR_PS_SHIFT 2
-#define RR_RID_MASK 0x00000000ffffff00L
-#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
-
-static inline void
-ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned long end)
-{
- tlb->need_flush = 0;
-
- if (tlb->fullmm) {
- /*
- * Tearing down the entire address space. This happens both as a result
- * of exit() and execve(). The latter case necessitates the call to
- * flush_tlb_mm() here.
- */
- flush_tlb_mm(tlb->mm);
- } else if (unlikely (end - start >= 1024*1024*1024*1024UL
- || REGION_NUMBER(start) != REGION_NUMBER(end - 1)))
- {
- /*
- * If we flush more than a tera-byte or across regions, we're probably
- * better off just flushing the entire TLB(s). This should be very rare
- * and is not worth optimizing for.
- */
- flush_tlb_all();
- } else {
- /*
- * flush_tlb_range() takes a vma instead of a mm pointer because
- * some architectures want the vm_flags for ITLB/DTLB flush.
- */
- struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
-
- /* flush the address range from the tlb: */
- flush_tlb_range(&vma, start, end);
- /* now flush the virt. page-table area mapping the address range: */
- flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
- }
-
-}
-
-static inline void
-ia64_tlb_flush_mmu_free(struct mmu_gather *tlb)
-{
- unsigned long i;
- unsigned int nr;
-
- /* lastly, release the freed pages */
- nr = tlb->nr;
-
- tlb->nr = 0;
- tlb->start_addr = ~0UL;
- for (i = 0; i < nr; ++i)
- free_page_and_swap_cache(tlb->pages[i]);
-}
-
-/*
- * Flush the TLB for address range START to END and, if not in fast mode, release the
- * freed pages that where gathered up to this point.
- */
-static inline void
-ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
-{
- if (!tlb->need_flush)
- return;
- ia64_tlb_flush_mmu_tlbonly(tlb, start, end);
- ia64_tlb_flush_mmu_free(tlb);
-}
-
-static inline void __tlb_alloc_page(struct mmu_gather *tlb)
-{
- unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
-
- if (addr) {
- tlb->pages = (void *)addr;
- tlb->max = PAGE_SIZE / sizeof(void *);
- }
-}
-
-
-static inline void
-arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- tlb->mm = mm;
- tlb->max = ARRAY_SIZE(tlb->local);
- tlb->pages = tlb->local;
- tlb->nr = 0;
- tlb->fullmm = !(start | (end+1));
- tlb->start = start;
- tlb->end = end;
- tlb->start_addr = ~0UL;
-}
-
-/*
- * Called at the end of the shootdown operation to free up any resources that were
- * collected.
- */
-static inline void
-arch_tlb_finish_mmu(struct mmu_gather *tlb,
- unsigned long start, unsigned long end, bool force)
-{
- if (force)
- tlb->need_flush = 1;
- /*
- * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
- * tlb->end_addr.
- */
- ia64_tlb_flush_mmu(tlb, start, end);
-
- /* keep the page table cache within bounds */
- check_pgt_cache();
-
- if (tlb->pages != tlb->local)
- free_pages((unsigned long)tlb->pages, 0);
-}
-
-/*
- * Logically, this routine frees PAGE. On MP machines, the actual freeing of the page
- * must be delayed until after the TLB has been flushed (see comments at the beginning of
- * this file).
- */
-static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
- tlb->need_flush = 1;
-
- if (!tlb->nr && tlb->pages == tlb->local)
- __tlb_alloc_page(tlb);
-
- tlb->pages[tlb->nr++] = page;
- VM_WARN_ON(tlb->nr > tlb->max);
- if (tlb->nr == tlb->max)
- return true;
- return false;
-}
-
-static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
-{
- ia64_tlb_flush_mmu_tlbonly(tlb, tlb->start_addr, tlb->end_addr);
-}
-
-static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
-{
- ia64_tlb_flush_mmu_free(tlb);
-}
-
-static inline void tlb_flush_mmu(struct mmu_gather *tlb)
-{
- ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
-}
-
-static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
- if (__tlb_remove_page(tlb, page))
- tlb_flush_mmu(tlb);
-}
-
-static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
- struct page *page, int page_size)
-{
- return __tlb_remove_page(tlb, page);
-}
-
-static inline void tlb_remove_page_size(struct mmu_gather *tlb,
- struct page *page, int page_size)
-{
- return tlb_remove_page(tlb, page);
-}
-
-/*
- * Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any
- * PTE, not just those pointing to (normal) physical memory.
- */
-static inline void
-__tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
-{
- if (tlb->start_addr == ~0UL)
- tlb->start_addr = address;
- tlb->end_addr = address + PAGE_SIZE;
-}
-
-#define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm)
-
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-
-#define tlb_remove_tlb_entry(tlb, ptep, addr) \
-do { \
- tlb->need_flush = 1; \
- __tlb_remove_tlb_entry(tlb, ptep, addr); \
-} while (0)
-
-#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
- tlb_remove_tlb_entry(tlb, ptep, address)
-
-#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
-static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
- unsigned int page_size)
-{
-}
-
-#define pte_free_tlb(tlb, ptep, address) \
-do { \
- tlb->need_flush = 1; \
- __pte_free_tlb(tlb, ptep, address); \
-} while (0)
-
-#define pmd_free_tlb(tlb, ptep, address) \
-do { \
- tlb->need_flush = 1; \
- __pmd_free_tlb(tlb, ptep, address); \
-} while (0)
-
-#define pud_free_tlb(tlb, pudp, address) \
-do { \
- tlb->need_flush = 1; \
- __pud_free_tlb(tlb, pudp, address); \
-} while (0)
+#include <asm-generic/tlb.h>
#endif /* _ASM_IA64_TLB_H */
diff --git a/arch/ia64/include/asm/tlbflush.h b/arch/ia64/include/asm/tlbflush.h
index 25e280810f6c..ceac10c4d6e2 100644
--- a/arch/ia64/include/asm/tlbflush.h
+++ b/arch/ia64/include/asm/tlbflush.h
@@ -14,6 +14,31 @@
#include <asm/mmu_context.h>
#include <asm/page.h>
+struct ia64_tr_entry {
+ u64 ifa;
+ u64 itir;
+ u64 pte;
+ u64 rr;
+}; /*Record for tr entry!*/
+
+extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
+extern void ia64_ptr_entry(u64 target_mask, int slot);
+extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
+
+/*
+ region register macros
+*/
+#define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001)
+#define RR_VE(val) (((val) & 0x0000000000000001) << 0)
+#define RR_VE_MASK 0x0000000000000001L
+#define RR_VE_SHIFT 0
+#define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f)
+#define RR_PS(val) (((val) & 0x000000000000003f) << 2)
+#define RR_PS_MASK 0x00000000000000fcL
+#define RR_PS_SHIFT 2
+#define RR_RID_MASK 0x00000000ffffff00L
+#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
+
/*
* Now for some TLB flushing routines. This is the kind of stuff that
* can be very expensive, so try to avoid them whenever possible.
diff --git a/arch/ia64/include/uapi/asm/Kbuild b/arch/ia64/include/uapi/asm/Kbuild
index b71c5f787783..62a9522af51e 100644
--- a/arch/ia64/include/uapi/asm/Kbuild
+++ b/arch/ia64/include/uapi/asm/Kbuild
@@ -1,5 +1 @@
-include include/uapi/asm-generic/Kbuild.asm
-
generated-y += unistd_64.h
-generic-y += kvm_para.h
-generic-y += socket.h
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 91bd1e129379..5cabb3fd159f 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -359,11 +359,6 @@ typedef struct ia64_state_log_s
static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
-#define IA64_LOG_ALLOCATE(it, size) \
- {ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
- (ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES); \
- ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
- (ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES);}
#define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
#define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
#define IA64_LOG_UNLOCK(it) spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
@@ -378,6 +373,19 @@ static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
#define IA64_LOG_CURR_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
#define IA64_LOG_COUNT(it) ia64_state_log[it].isl_count
+static inline void ia64_log_allocate(int it, u64 size)
+{
+ ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] =
+ (ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES);
+ if (!ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)])
+ panic("%s: Failed to allocate %llu bytes\n", __func__, size);
+
+ ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] =
+ (ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES);
+ if (!ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)])
+ panic("%s: Failed to allocate %llu bytes\n", __func__, size);
+}
+
/*
* ia64_log_init
* Reset the OS ia64 log buffer
@@ -399,9 +407,7 @@ ia64_log_init(int sal_info_type)
return;
// set up OS data structures to hold error info
- IA64_LOG_ALLOCATE(sal_info_type, max_size);
- memset(IA64_LOG_CURR_BUFFER(sal_info_type), 0, max_size);
- memset(IA64_LOG_NEXT_BUFFER(sal_info_type), 0, max_size);
+ ia64_log_allocate(sal_info_type, max_size);
}
/*
@@ -1835,8 +1841,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
/* Caller prevents this from being called after init */
static void * __ref mca_bootmem(void)
{
- return memblock_alloc_from(sizeof(struct ia64_mca_cpu),
- KERNEL_STACK_SIZE, 0);
+ return memblock_alloc(sizeof(struct ia64_mca_cpu), KERNEL_STACK_SIZE);
}
/* Do per-CPU MCA-related initialization. */
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 6d50ede0ed69..bf9c24d9ce84 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -2179,12 +2179,11 @@ static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
}
void ia64_syscall_get_set_arguments(struct task_struct *task,
- struct pt_regs *regs, unsigned int i, unsigned int n,
- unsigned long *args, int rw)
+ struct pt_regs *regs, unsigned long *args, int rw)
{
struct syscall_get_set_args data = {
- .i = i,
- .n = n,
+ .i = 0,
+ .n = 6,
.args = args,
.regs = regs,
.rw = rw,
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 583a3746d70b..c9cfa760cd57 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -1058,9 +1058,7 @@ check_bugs (void)
static int __init run_dmi_scan(void)
{
- dmi_scan_machine();
- dmi_memdev_walk();
- dmi_set_dump_stack_arch_desc();
+ dmi_setup();
return 0;
}
core_initcall(run_dmi_scan);
diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl
index ab9cda5f6136..56e3d0b685e1 100644
--- a/arch/ia64/kernel/syscalls/syscall.tbl
+++ b/arch/ia64/kernel/syscalls/syscall.tbl
@@ -344,3 +344,7 @@
332 common pkey_free sys_pkey_free
333 common rseq sys_rseq
# 334 through 423 are reserved to sync up with other architectures
+424 common pidfd_send_signal sys_pidfd_send_signal
+425 common io_uring_setup sys_io_uring_setup
+426 common io_uring_enter sys_io_uring_enter
+427 common io_uring_register sys_io_uring_register
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 6e447234205c..d29fb6b9fa33 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -84,9 +84,13 @@ skip:
static inline void
alloc_per_cpu_data(void)
{
- cpu_data = memblock_alloc_from(PERCPU_PAGE_SIZE * num_possible_cpus(),
- PERCPU_PAGE_SIZE,
+ size_t size = PERCPU_PAGE_SIZE * num_possible_cpus();
+
+ cpu_data = memblock_alloc_from(size, PERCPU_PAGE_SIZE,
__pa(MAX_DMA_ADDRESS));
+ if (!cpu_data)
+ panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
+ __func__, size, PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
}
/**
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index f9c36750c6a4..05490dd073e6 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -454,6 +454,10 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
__pa(MAX_DMA_ADDRESS),
MEMBLOCK_ALLOC_ACCESSIBLE,
bestnode);
+ if (!ptr)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%lx\n",
+ __func__, pernodesize, PERCPU_PAGE_SIZE, bestnode,
+ __pa(MAX_DMA_ADDRESS));
return ptr;
}
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 29d841525ca1..e49200e31750 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -444,23 +444,45 @@ int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
for (address = start_page; address < end_page; address += PAGE_SIZE) {
pgd = pgd_offset_k(address);
- if (pgd_none(*pgd))
- pgd_populate(&init_mm, pgd, memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node));
+ if (pgd_none(*pgd)) {
+ pud = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
+ if (!pud)
+ goto err_alloc;
+ pgd_populate(&init_mm, pgd, pud);
+ }
pud = pud_offset(pgd, address);
- if (pud_none(*pud))
- pud_populate(&init_mm, pud, memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node));
+ if (pud_none(*pud)) {
+ pmd = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
+ if (!pmd)
+ goto err_alloc;
+ pud_populate(&init_mm, pud, pmd);
+ }
pmd = pmd_offset(pud, address);
- if (pmd_none(*pmd))
- pmd_populate_kernel(&init_mm, pmd, memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node));
+ if (pmd_none(*pmd)) {
+ pte = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
+ if (!pte)
+ goto err_alloc;
+ pmd_populate_kernel(&init_mm, pmd, pte);
+ }
pte = pte_offset_kernel(pmd, address);
- if (pte_none(*pte))
- set_pte(pte, pfn_pte(__pa(memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node)) >> PAGE_SHIFT,
+ if (pte_none(*pte)) {
+ void *page = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE,
+ node);
+ if (!page)
+ goto err_alloc;
+ set_pte(pte, pfn_pte(__pa(page) >> PAGE_SHIFT,
PAGE_KERNEL));
+ }
}
return 0;
+
+err_alloc:
+ panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d\n",
+ __func__, PAGE_SIZE, PAGE_SIZE, node);
+ return -ENOMEM;
}
struct memmap_init_callback_data {
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index 9340bcb4f29c..5158bd28de05 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -61,8 +61,14 @@ mmu_context_init (void)
{
ia64_ctx.bitmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3,
SMP_CACHE_BYTES);
+ if (!ia64_ctx.bitmap)
+ panic("%s: Failed to allocate %u bytes\n", __func__,
+ (ia64_ctx.max_ctx + 1) >> 3);
ia64_ctx.flushmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3,
SMP_CACHE_BYTES);
+ if (!ia64_ctx.flushmap)
+ panic("%s: Failed to allocate %u bytes\n", __func__,
+ (ia64_ctx.max_ctx + 1) >> 3);
}
/*
@@ -299,8 +305,8 @@ local_flush_tlb_all (void)
ia64_srlz_i(); /* srlz.i implies srlz.d */
}
-void
-flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
+static void
+__flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
@@ -337,6 +343,25 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
preempt_enable();
ia64_srlz_i(); /* srlz.i implies srlz.d */
}
+
+void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ if (unlikely(end - start >= 1024*1024*1024*1024UL
+ || REGION_NUMBER(start) != REGION_NUMBER(end - 1))) {
+ /*
+ * If we flush more than a tera-byte or across regions, we're
+ * probably better off just flushing the entire TLB(s). This
+ * should be very rare and is not worth optimizing for.
+ */
+ flush_tlb_all();
+ } else {
+ /* flush the address range from the tlb */
+ __flush_tlb_range(vma, start, end);
+ /* flush the virt. page-table area mapping the addr range */
+ __flush_tlb_range(vma, ia64_thash(start), ia64_thash(end));
+ }
+}
EXPORT_SYMBOL(flush_tlb_range);
void ia64_tlb_init(void)
diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile
index d27df1d45da7..9c349dd23265 100644
--- a/arch/ia64/sn/kernel/Makefile
+++ b/arch/ia64/sn/kernel/Makefile
@@ -7,7 +7,7 @@
# Copyright (C) 1999,2001-2006,2008 Silicon Graphics, Inc. All Rights Reserved.
#
-ccflags-y := -Iarch/ia64/sn/include
+ccflags-y := -I $(srctree)/arch/ia64/sn/include
obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \
huberror.o io_acpi_init.o io_common.o \
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c
index 8df13d0d96fa..d46847323ef6 100644
--- a/arch/ia64/sn/kernel/io_common.c
+++ b/arch/ia64/sn/kernel/io_common.c
@@ -394,6 +394,9 @@ void __init hubdev_init_node(nodepda_t * npda, cnodeid_t node)
hubdev_info = (struct hubdev_info *)memblock_alloc_node(size,
SMP_CACHE_BYTES,
node);
+ if (!hubdev_info)
+ panic("%s: Failed to allocate %d bytes align=0x%x nid=%d\n",
+ __func__, size, SMP_CACHE_BYTES, node);
npda->pdinfo = (void *)hubdev_info;
}
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index a6d40a2c5bff..e6a5049ef503 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -513,6 +513,10 @@ static void __init sn_init_pdas(char **cmdline_p)
nodepdaindr[cnode] =
memblock_alloc_node(sizeof(nodepda_t), SMP_CACHE_BYTES,
cnode);
+ if (!nodepdaindr[cnode])
+ panic("%s: Failed to allocate %lu bytes align=0x%x nid=%d\n",
+ __func__, sizeof(nodepda_t), SMP_CACHE_BYTES,
+ cnode);
memset(nodepdaindr[cnode]->phys_cpuid, -1,
sizeof(nodepdaindr[cnode]->phys_cpuid));
spin_lock_init(&nodepdaindr[cnode]->ptc_lock);
@@ -521,9 +525,15 @@ static void __init sn_init_pdas(char **cmdline_p)
/*
* Allocate & initialize nodepda for TIOs. For now, put them on node 0.
*/
- for (cnode = num_online_nodes(); cnode < num_cnodes; cnode++)
+ for (cnode = num_online_nodes(); cnode < num_cnodes; cnode++) {
nodepdaindr[cnode] =
memblock_alloc_node(sizeof(nodepda_t), SMP_CACHE_BYTES, 0);
+ if (!nodepdaindr[cnode])
+ panic("%s: Failed to allocate %lu bytes align=0x%x nid=%d\n",
+ __func__, sizeof(nodepda_t), SMP_CACHE_BYTES,
+ cnode);
+ }
+
/*
* Now copy the array of nodepda pointers to each nodepda.
diff --git a/arch/ia64/sn/kernel/sn2/Makefile b/arch/ia64/sn/kernel/sn2/Makefile
index 3d09108d4277..170bde4549da 100644
--- a/arch/ia64/sn/kernel/sn2/Makefile
+++ b/arch/ia64/sn/kernel/sn2/Makefile
@@ -9,7 +9,5 @@
# sn2 specific kernel files
#
-ccflags-y := -Iarch/ia64/sn/include
-
obj-y += cache.o io.o ptc_deadlock.o sn2_smp.o sn_proc_fs.o \
prominfo_proc.o timer.o timer_interrupt.o sn_hwperf.o
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
index b73b0ebf8214..b510f4f17fd4 100644
--- a/arch/ia64/sn/kernel/sn2/sn2_smp.c
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -120,13 +120,6 @@ void sn_migrate(struct task_struct *task)
cpu_relax();
}
-void sn_tlb_migrate_finish(struct mm_struct *mm)
-{
- /* flush_tlb_mm is inefficient if more than 1 users of mm */
- if (mm == current->mm && mm && atomic_read(&mm->mm_users) == 1)
- flush_tlb_mm(mm);
-}
-
static void
sn2_ipi_flush_all_tlb(struct mm_struct *mm)
{
diff --git a/arch/ia64/sn/pci/Makefile b/arch/ia64/sn/pci/Makefile
index df2a90145426..321576b1b425 100644
--- a/arch/ia64/sn/pci/Makefile
+++ b/arch/ia64/sn/pci/Makefile
@@ -7,6 +7,4 @@
#
# Makefile for the sn pci general routines.
-ccflags-y := -Iarch/ia64/sn/include
-
obj-y := pci_dma.o tioca_provider.o tioce_provider.o pcibr/
diff --git a/arch/ia64/sn/pci/pcibr/Makefile b/arch/ia64/sn/pci/pcibr/Makefile
index 396bcae36309..712f6af7c6e0 100644
--- a/arch/ia64/sn/pci/pcibr/Makefile
+++ b/arch/ia64/sn/pci/pcibr/Makefile
@@ -7,7 +7,7 @@
#
# Makefile for the sn2 io routines.
-ccflags-y := -Iarch/ia64/sn/include
+ccflags-y := -I $(srctree)/arch/ia64/sn/include
obj-y += pcibr_dma.o pcibr_reg.o \
pcibr_ate.o pcibr_provider.o
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index b54206408f91..fe5cc2da6d10 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -20,7 +20,6 @@ config M68K
select GENERIC_STRNCPY_FROM_USER if MMU
select GENERIC_STRNLEN_USER if MMU
select ARCH_WANT_IPC_PARSE_VERSION
- select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE
select HAVE_FUTEX_CMPXCHG if MMU && FUTEX
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_REL
@@ -28,17 +27,11 @@ config M68K
select OLD_SIGSUSPEND3
select OLD_SIGACTION
select ARCH_DISCARD_MEMBLOCK
+ select MMU_GATHER_NO_RANGE if MMU
config CPU_BIG_ENDIAN
def_bool y
-config RWSEM_GENERIC_SPINLOCK
- bool
- default y
-
-config RWSEM_XCHGADD_ALGORITHM
- bool
-
config ARCH_HAS_ILOG2_U32
bool
diff --git a/arch/m68k/amiga/cia.c b/arch/m68k/amiga/cia.c
index 2081b8cd5591..b9aee983e6f4 100644
--- a/arch/m68k/amiga/cia.c
+++ b/arch/m68k/amiga/cia.c
@@ -88,10 +88,19 @@ static irqreturn_t cia_handler(int irq, void *dev_id)
struct ciabase *base = dev_id;
int mach_irq;
unsigned char ints;
+ unsigned long flags;
+ /* Interrupts get disabled while the timer irq flag is cleared and
+ * the timer interrupt serviced.
+ */
mach_irq = base->cia_irq;
+ local_irq_save(flags);
ints = cia_set_irq(base, CIA_ICR_ALL);
amiga_custom.intreq = base->int_mask;
+ if (ints & 1)
+ generic_handle_irq(mach_irq);
+ local_irq_restore(flags);
+ mach_irq++, ints >>= 1;
for (; ints; mach_irq++, ints >>= 1) {
if (ints & 1)
generic_handle_irq(mach_irq);
diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c
index 65f63a457130..c32ab8041cf6 100644
--- a/arch/m68k/amiga/config.c
+++ b/arch/m68k/amiga/config.c
@@ -17,6 +17,7 @@
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/tty.h>
+#include <linux/clocksource.h>
#include <linux/console.h>
#include <linux/rtc.h>
#include <linux/init.h>
@@ -95,8 +96,6 @@ static char amiga_model_name[13] = "Amiga ";
static void amiga_sched_init(irq_handler_t handler);
static void amiga_get_model(char *model);
static void amiga_get_hardware_list(struct seq_file *m);
-/* amiga specific timer functions */
-static u32 amiga_gettimeoffset(void);
extern void amiga_mksound(unsigned int count, unsigned int ticks);
static void amiga_reset(void);
extern void amiga_init_sound(void);
@@ -386,7 +385,6 @@ void __init config_amiga(void)
mach_init_IRQ = amiga_init_IRQ;
mach_get_model = amiga_get_model;
mach_get_hardware_list = amiga_get_hardware_list;
- arch_gettimeoffset = amiga_gettimeoffset;
/*
* default MAX_DMA=0xffffffff on all machines. If we don't do so, the SCSI
@@ -464,7 +462,29 @@ void __init config_amiga(void)
*(unsigned char *)ZTWO_VADDR(0xde0002) |= 0x80;
}
+static u64 amiga_read_clk(struct clocksource *cs);
+
+static struct clocksource amiga_clk = {
+ .name = "ciab",
+ .rating = 250,
+ .read = amiga_read_clk,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
static unsigned short jiffy_ticks;
+static u32 clk_total, clk_offset;
+
+static irqreturn_t ciab_timer_handler(int irq, void *dev_id)
+{
+ irq_handler_t timer_routine = dev_id;
+
+ clk_total += jiffy_ticks;
+ clk_offset = 0;
+ timer_routine(0, NULL);
+
+ return IRQ_HANDLED;
+}
static void __init amiga_sched_init(irq_handler_t timer_routine)
{
@@ -484,19 +504,22 @@ static void __init amiga_sched_init(irq_handler_t timer_routine)
* Please don't change this to use ciaa, as it interferes with the
* SCSI code. We'll have to take a look at this later
*/
- if (request_irq(IRQ_AMIGA_CIAB_TA, timer_routine, 0, "timer", NULL))
+ if (request_irq(IRQ_AMIGA_CIAB_TA, ciab_timer_handler, IRQF_TIMER,
+ "timer", timer_routine))
pr_err("Couldn't register timer interrupt\n");
/* start timer */
ciab.cra |= 0x11;
-}
-#define TICK_SIZE 10000
+ clocksource_register_hz(&amiga_clk, amiga_eclock);
+}
-/* This is always executed with interrupts disabled. */
-static u32 amiga_gettimeoffset(void)
+static u64 amiga_read_clk(struct clocksource *cs)
{
unsigned short hi, lo, hi2;
- u32 ticks, offset = 0;
+ unsigned long flags;
+ u32 ticks;
+
+ local_irq_save(flags);
/* read CIA B timer A current value */
hi = ciab.tahi;
@@ -513,12 +536,14 @@ static u32 amiga_gettimeoffset(void)
if (ticks > jiffy_ticks / 2)
/* check for pending interrupt */
if (cia_set_irq(&ciab_base, 0) & CIA_ICR_TA)
- offset = 10000;
+ clk_offset = jiffy_ticks;
ticks = jiffy_ticks - ticks;
- ticks = (10000 * ticks) / jiffy_ticks;
+ ticks += clk_offset + clk_total;
+
+ local_irq_restore(flags);
- return (ticks + offset) * 1000;
+ return ticks;
}
static void amiga_reset(void) __noreturn;
diff --git a/arch/m68k/apollo/config.c b/arch/m68k/apollo/config.c
index aef8d42e078d..7d168e6dfb01 100644
--- a/arch/m68k/apollo/config.c
+++ b/arch/m68k/apollo/config.c
@@ -29,7 +29,6 @@ u_long apollo_model;
extern void dn_sched_init(irq_handler_t handler);
extern void dn_init_IRQ(void);
-extern u32 dn_gettimeoffset(void);
extern int dn_dummy_hwclk(int, struct rtc_time *);
extern void dn_dummy_reset(void);
#ifdef CONFIG_HEARTBEAT
@@ -152,7 +151,6 @@ void __init config_apollo(void)
mach_sched_init=dn_sched_init; /* */
mach_init_IRQ=dn_init_IRQ;
- arch_gettimeoffset = dn_gettimeoffset;
mach_max_dma_address = 0xffffffff;
mach_hwclk = dn_dummy_hwclk; /* */
mach_reset = dn_dummy_reset; /* */
@@ -205,11 +203,6 @@ void dn_sched_init(irq_handler_t timer_routine)
pr_err("Couldn't register timer interrupt\n");
}
-u32 dn_gettimeoffset(void)
-{
- return 0xdeadbeef;
-}
-
int dn_dummy_hwclk(int op, struct rtc_time *t) {
diff --git a/arch/m68k/atari/ataints.c b/arch/m68k/atari/ataints.c
index 3d2b63bedf05..56f02ea2c248 100644
--- a/arch/m68k/atari/ataints.c
+++ b/arch/m68k/atari/ataints.c
@@ -142,7 +142,7 @@ struct mfptimerbase {
.name = "MFP Timer D"
};
-static irqreturn_t mfptimer_handler(int irq, void *dev_id)
+static irqreturn_t mfp_timer_d_handler(int irq, void *dev_id)
{
struct mfptimerbase *base = dev_id;
int mach_irq;
@@ -344,7 +344,7 @@ void __init atari_init_IRQ(void)
st_mfp.tim_ct_cd = (st_mfp.tim_ct_cd & 0xf0) | 0x6;
/* request timer D dispatch handler */
- if (request_irq(IRQ_MFP_TIMD, mfptimer_handler, IRQF_SHARED,
+ if (request_irq(IRQ_MFP_TIMD, mfp_timer_d_handler, IRQF_SHARED,
stmfp_base.name, &stmfp_base))
pr_err("Couldn't register %s interrupt\n", stmfp_base.name);
diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c
index 4fcc4b1df1c0..902255e7b5b2 100644
--- a/arch/m68k/atari/config.c
+++ b/arch/m68k/atari/config.c
@@ -78,7 +78,6 @@ static void atari_heartbeat(int on);
/* atari specific timer functions (in time.c) */
extern void atari_sched_init(irq_handler_t);
-extern u32 atari_gettimeoffset(void);
extern int atari_mste_hwclk (int, struct rtc_time *);
extern int atari_tt_hwclk (int, struct rtc_time *);
@@ -205,7 +204,6 @@ void __init config_atari(void)
mach_init_IRQ = atari_init_IRQ;
mach_get_model = atari_get_model;
mach_get_hardware_list = atari_get_hardware_list;
- arch_gettimeoffset = atari_gettimeoffset;
mach_reset = atari_reset;
mach_max_dma_address = 0xffffff;
#if IS_ENABLED(CONFIG_INPUT_M68K_BEEP)
diff --git a/arch/m68k/atari/stram.c b/arch/m68k/atari/stram.c
index 6ffc204eb07d..6152f9f631d2 100644
--- a/arch/m68k/atari/stram.c
+++ b/arch/m68k/atari/stram.c
@@ -97,6 +97,10 @@ void __init atari_stram_reserve_pages(void *start_mem)
pr_debug("atari_stram pool: kernel in ST-RAM, using alloc_bootmem!\n");
stram_pool.start = (resource_size_t)memblock_alloc_low(pool_size,
PAGE_SIZE);
+ if (!stram_pool.start)
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
+ __func__, pool_size, PAGE_SIZE);
+
stram_pool.end = stram_pool.start + pool_size - 1;
request_resource(&iomem_resource, &stram_pool);
stram_virt_offset = 0;
diff --git a/arch/m68k/atari/time.c b/arch/m68k/atari/time.c
index 9cca64286464..ce923a523695 100644
--- a/arch/m68k/atari/time.c
+++ b/arch/m68k/atari/time.c
@@ -16,6 +16,7 @@
#include <linux/init.h>
#include <linux/rtc.h>
#include <linux/bcd.h>
+#include <linux/clocksource.h>
#include <linux/delay.h>
#include <linux/export.h>
@@ -24,6 +25,35 @@
DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL_GPL(rtc_lock);
+static u64 atari_read_clk(struct clocksource *cs);
+
+static struct clocksource atari_clk = {
+ .name = "mfp",
+ .rating = 100,
+ .read = atari_read_clk,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static u32 clk_total;
+static u8 last_timer_count;
+
+static irqreturn_t mfp_timer_c_handler(int irq, void *dev_id)
+{
+ irq_handler_t timer_routine = dev_id;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ do {
+ last_timer_count = st_mfp.tim_dt_c;
+ } while (last_timer_count == 1);
+ clk_total += INT_TICKS;
+ timer_routine(0, NULL);
+ local_irq_restore(flags);
+
+ return IRQ_HANDLED;
+}
+
void __init
atari_sched_init(irq_handler_t timer_routine)
{
@@ -32,31 +62,33 @@ atari_sched_init(irq_handler_t timer_routine)
/* start timer C, div = 1:100 */
st_mfp.tim_ct_cd = (st_mfp.tim_ct_cd & 15) | 0x60;
/* install interrupt service routine for MFP Timer C */
- if (request_irq(IRQ_MFP_TIMC, timer_routine, 0, "timer", timer_routine))
+ if (request_irq(IRQ_MFP_TIMC, mfp_timer_c_handler, IRQF_TIMER, "timer",
+ timer_routine))
pr_err("Couldn't register timer interrupt\n");
+
+ clocksource_register_hz(&atari_clk, INT_CLK);
}
/* ++andreas: gettimeoffset fixed to check for pending interrupt */
-#define TICK_SIZE 10000
-
-/* This is always executed with interrupts disabled. */
-u32 atari_gettimeoffset(void)
+static u64 atari_read_clk(struct clocksource *cs)
{
- u32 ticks, offset = 0;
-
- /* read MFP timer C current value */
- ticks = st_mfp.tim_dt_c;
- /* The probability of underflow is less than 2% */
- if (ticks > INT_TICKS - INT_TICKS / 50)
- /* Check for pending timer interrupt */
- if (st_mfp.int_pn_b & (1 << 5))
- offset = TICK_SIZE;
-
- ticks = INT_TICKS - ticks;
- ticks = ticks * 10000L / INT_TICKS;
-
- return (ticks + offset) * 1000;
+ unsigned long flags;
+ u8 count;
+ u32 ticks;
+
+ local_irq_save(flags);
+ /* Ensure that the count is monotonically decreasing, even though
+ * the result may briefly stop changing after counter wrap-around.
+ */
+ count = min(st_mfp.tim_dt_c, last_timer_count);
+ last_timer_count = count;
+
+ ticks = INT_TICKS - count;
+ ticks += clk_total;
+ local_irq_restore(flags);
+
+ return ticks;
}
diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c
index 143ee9fa3893..8ebaabc931cd 100644
--- a/arch/m68k/bvme6000/config.c
+++ b/arch/m68k/bvme6000/config.c
@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/tty.h>
+#include <linux/clocksource.h>
#include <linux/console.h>
#include <linux/linkage.h>
#include <linux/init.h>
@@ -39,16 +40,10 @@
static void bvme6000_get_model(char *model);
extern void bvme6000_sched_init(irq_handler_t handler);
-extern u32 bvme6000_gettimeoffset(void);
extern int bvme6000_hwclk (int, struct rtc_time *);
extern void bvme6000_reset (void);
void bvme6000_set_vectors (void);
-/* Save tick handler routine pointer, will point to xtime_update() in
- * kernel/timer/timekeeping.c, called via bvme6000_process_int() */
-
-static irq_handler_t tick_handler;
-
int __init bvme6000_parse_bootinfo(const struct bi_record *bi)
{
@@ -110,7 +105,6 @@ void __init config_bvme6000(void)
mach_max_dma_address = 0xffffffff;
mach_sched_init = bvme6000_sched_init;
mach_init_IRQ = bvme6000_init_IRQ;
- arch_gettimeoffset = bvme6000_gettimeoffset;
mach_hwclk = bvme6000_hwclk;
mach_reset = bvme6000_reset;
mach_get_model = bvme6000_get_model;
@@ -154,15 +148,38 @@ irqreturn_t bvme6000_abort_int (int irq, void *dev_id)
return IRQ_HANDLED;
}
+static u64 bvme6000_read_clk(struct clocksource *cs);
+
+static struct clocksource bvme6000_clk = {
+ .name = "rtc",
+ .rating = 250,
+ .read = bvme6000_read_clk,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static u32 clk_total, clk_offset;
+
+#define RTC_TIMER_CLOCK_FREQ 8000000
+#define RTC_TIMER_CYCLES (RTC_TIMER_CLOCK_FREQ / HZ)
+#define RTC_TIMER_COUNT ((RTC_TIMER_CYCLES / 2) - 1)
static irqreturn_t bvme6000_timer_int (int irq, void *dev_id)
{
+ irq_handler_t timer_routine = dev_id;
+ unsigned long flags;
volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE;
- unsigned char msr = rtc->msr & 0xc0;
+ unsigned char msr;
+ local_irq_save(flags);
+ msr = rtc->msr & 0xc0;
rtc->msr = msr | 0x20; /* Ack the interrupt */
+ clk_total += RTC_TIMER_CYCLES;
+ clk_offset = 0;
+ timer_routine(0, NULL);
+ local_irq_restore(flags);
- return tick_handler(irq, dev_id);
+ return IRQ_HANDLED;
}
/*
@@ -181,14 +198,13 @@ void bvme6000_sched_init (irq_handler_t timer_routine)
rtc->msr = 0; /* Ensure timer registers accessible */
- tick_handler = timer_routine;
- if (request_irq(BVME_IRQ_RTC, bvme6000_timer_int, 0,
- "timer", bvme6000_timer_int))
+ if (request_irq(BVME_IRQ_RTC, bvme6000_timer_int, IRQF_TIMER, "timer",
+ timer_routine))
panic ("Couldn't register timer int");
rtc->t1cr_omr = 0x04; /* Mode 2, ext clk */
- rtc->t1msb = 39999 >> 8;
- rtc->t1lsb = 39999 & 0xff;
+ rtc->t1msb = RTC_TIMER_COUNT >> 8;
+ rtc->t1lsb = RTC_TIMER_COUNT & 0xff;
rtc->irr_icr1 &= 0xef; /* Route timer 1 to INTR pin */
rtc->msr = 0x40; /* Access int.cntrl, etc */
rtc->pfr_icr0 = 0x80; /* Just timer 1 ints enabled */
@@ -200,14 +216,14 @@ void bvme6000_sched_init (irq_handler_t timer_routine)
rtc->msr = msr;
+ clocksource_register_hz(&bvme6000_clk, RTC_TIMER_CLOCK_FREQ);
+
if (request_irq(BVME_IRQ_ABORT, bvme6000_abort_int, 0,
"abort", bvme6000_abort_int))
panic ("Couldn't register abort int");
}
-/* This is always executed with interrupts disabled. */
-
/*
* NOTE: Don't accept any readings within 5us of rollover, as
* the T1INT bit may be a little slow getting set. There is also
@@ -215,14 +231,18 @@ void bvme6000_sched_init (irq_handler_t timer_routine)
* results...
*/
-u32 bvme6000_gettimeoffset(void)
+static u64 bvme6000_read_clk(struct clocksource *cs)
{
+ unsigned long flags;
volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE;
volatile PitRegsPtr pit = (PitRegsPtr)BVME_PIT_BASE;
- unsigned char msr = rtc->msr & 0xc0;
+ unsigned char msr, msb;
unsigned char t1int, t1op;
u32 v = 800000, ov;
+ local_irq_save(flags);
+
+ msr = rtc->msr & 0xc0;
rtc->msr = 0; /* Ensure timer registers accessible */
do {
@@ -230,22 +250,25 @@ u32 bvme6000_gettimeoffset(void)
t1int = rtc->msr & 0x20;
t1op = pit->pcdr & 0x04;
rtc->t1cr_omr |= 0x40; /* Latch timer1 */
- v = rtc->t1msb << 8; /* Read timer1 */
- v |= rtc->t1lsb; /* Read timer1 */
+ msb = rtc->t1msb; /* Read timer1 */
+ v = (msb << 8) | rtc->t1lsb; /* Read timer1 */
} while (t1int != (rtc->msr & 0x20) ||
t1op != (pit->pcdr & 0x04) ||
abs(ov-v) > 80 ||
- v > 39960);
+ v > RTC_TIMER_COUNT - (RTC_TIMER_COUNT / 100));
- v = 39999 - v;
+ v = RTC_TIMER_COUNT - v;
if (!t1op) /* If in second half cycle.. */
- v += 40000;
- v /= 8; /* Convert ticks to microseconds */
- if (t1int)
- v += 10000; /* Int pending, + 10ms */
+ v += RTC_TIMER_CYCLES / 2;
+ if (msb > 0 && t1int)
+ clk_offset = RTC_TIMER_CYCLES;
rtc->msr = msr;
- return v * 1000;
+ v += clk_offset + clk_total;
+
+ local_irq_restore(flags);
+
+ return v;
}
/*
diff --git a/arch/m68k/coldfire/device.c b/arch/m68k/coldfire/device.c
index 908d58347790..b4103b6bfdeb 100644
--- a/arch/m68k/coldfire/device.c
+++ b/arch/m68k/coldfire/device.c
@@ -14,11 +14,14 @@
#include <linux/spi/spi.h>
#include <linux/gpio.h>
#include <linux/fec.h>
+#include <linux/dmaengine.h>
#include <asm/traps.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
#include <asm/mcfqspi.h>
+#include <linux/platform_data/edma.h>
+#include <linux/platform_data/dma-mcf-edma.h>
/*
* All current ColdFire parts contain from 2, 3, 4 or 10 UARTS.
@@ -476,6 +479,81 @@ static struct platform_device mcf_i2c5 = {
#endif /* MCFI2C_BASE5 */
#endif /* IS_ENABLED(CONFIG_I2C_IMX) */
+#if IS_ENABLED(CONFIG_MCF_EDMA)
+
+static const struct dma_slave_map mcf_edma_map[] = {
+ { "dreq0", "rx-tx", MCF_EDMA_FILTER_PARAM(0) },
+ { "dreq1", "rx-tx", MCF_EDMA_FILTER_PARAM(1) },
+ { "uart.0", "rx", MCF_EDMA_FILTER_PARAM(2) },
+ { "uart.0", "tx", MCF_EDMA_FILTER_PARAM(3) },
+ { "uart.1", "rx", MCF_EDMA_FILTER_PARAM(4) },
+ { "uart.1", "tx", MCF_EDMA_FILTER_PARAM(5) },
+ { "uart.2", "rx", MCF_EDMA_FILTER_PARAM(6) },
+ { "uart.2", "tx", MCF_EDMA_FILTER_PARAM(7) },
+ { "timer0", "rx-tx", MCF_EDMA_FILTER_PARAM(8) },
+ { "timer1", "rx-tx", MCF_EDMA_FILTER_PARAM(9) },
+ { "timer2", "rx-tx", MCF_EDMA_FILTER_PARAM(10) },
+ { "timer3", "rx-tx", MCF_EDMA_FILTER_PARAM(11) },
+ { "fsl-dspi.0", "rx", MCF_EDMA_FILTER_PARAM(12) },
+ { "fsl-dspi.0", "tx", MCF_EDMA_FILTER_PARAM(13) },
+ { "fsl-dspi.1", "rx", MCF_EDMA_FILTER_PARAM(14) },
+ { "fsl-dspi.1", "tx", MCF_EDMA_FILTER_PARAM(15) },
+};
+
+static struct mcf_edma_platform_data mcf_edma_data = {
+ .dma_channels = 64,
+ .slave_map = mcf_edma_map,
+ .slavecnt = ARRAY_SIZE(mcf_edma_map),
+};
+
+static struct resource mcf_edma_resources[] = {
+ {
+ .start = MCFEDMA_BASE,
+ .end = MCFEDMA_BASE + MCFEDMA_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MCFEDMA_IRQ_INTR0,
+ .end = MCFEDMA_IRQ_INTR0 + 15,
+ .flags = IORESOURCE_IRQ,
+ .name = "edma-tx-00-15",
+ },
+ {
+ .start = MCFEDMA_IRQ_INTR16,
+ .end = MCFEDMA_IRQ_INTR16 + 39,
+ .flags = IORESOURCE_IRQ,
+ .name = "edma-tx-16-55",
+ },
+ {
+ .start = MCFEDMA_IRQ_INTR56,
+ .end = MCFEDMA_IRQ_INTR56,
+ .flags = IORESOURCE_IRQ,
+ .name = "edma-tx-56-63",
+ },
+ {
+ .start = MCFEDMA_IRQ_ERR,
+ .end = MCFEDMA_IRQ_ERR,
+ .flags = IORESOURCE_IRQ,
+ .name = "edma-err",
+ },
+};
+
+static u64 mcf_edma_dmamask = DMA_BIT_MASK(32);
+
+static struct platform_device mcf_edma = {
+ .name = "mcf-edma",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(mcf_edma_resources),
+ .resource = mcf_edma_resources,
+ .dev = {
+ .dma_mask = &mcf_edma_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &mcf_edma_data,
+ }
+};
+
+#endif /* IS_ENABLED(CONFIG_MCF_EDMA) */
+
static struct platform_device *mcf_devices[] __initdata = {
&mcf_uart,
#if IS_ENABLED(CONFIG_FEC)
@@ -505,6 +583,9 @@ static struct platform_device *mcf_devices[] __initdata = {
&mcf_i2c5,
#endif
#endif
+#if IS_ENABLED(CONFIG_MCF_EDMA)
+ &mcf_edma,
+#endif
};
/*
diff --git a/arch/m68k/coldfire/m5441x.c b/arch/m68k/coldfire/m5441x.c
index 55392af845fb..5bd24c9b865d 100644
--- a/arch/m68k/coldfire/m5441x.c
+++ b/arch/m68k/coldfire/m5441x.c
@@ -137,6 +137,8 @@ struct clk *mcf_clks[] = {
static struct clk * const enable_clks[] __initconst = {
/* make sure these clocks are enabled */
+ &__clk_0_15, /* dspi.1 */
+ &__clk_0_17, /* eDMA */
&__clk_0_18, /* intc0 */
&__clk_0_19, /* intc0 */
&__clk_0_20, /* intc0 */
@@ -157,8 +159,6 @@ static struct clk * const disable_clks[] __initconst = {
&__clk_0_8, /* can.0 */
&__clk_0_9, /* can.1 */
&__clk_0_14, /* i2c.1 */
- &__clk_0_15, /* dspi.1 */
- &__clk_0_17, /* eDMA */
&__clk_0_22, /* i2c.0 */
&__clk_0_23, /* dspi.0 */
&__clk_0_28, /* tmr.1 */
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 525421ae277d..fea392cfcf1b 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -56,6 +56,7 @@ CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_XDP_SOCKETS=y
+CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -210,9 +211,6 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
-CONFIG_NFT_MASQ_IPV4=m
-CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -234,9 +232,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV6=m
-CONFIG_NFT_MASQ_IPV6=m
-CONFIG_NFT_REDIR_IPV6=m
CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_FIB_IPV6=m
CONFIG_NF_FLOW_TABLE_IPV6=m
@@ -313,7 +308,6 @@ CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
CONFIG_PSAMPLE=m
CONFIG_NET_IFE=m
-CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -460,12 +454,12 @@ CONFIG_RTC_DRV_RP5C01=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
+# CONFIG_VALIDATE_FS_PARSER is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
-CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -573,9 +567,11 @@ CONFIG_CRYPTO_AEGIS256=m
CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
+CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
@@ -640,6 +636,7 @@ CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_IDA=m
+CONFIG_TEST_VMALLOC=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_FIND_BIT_BENCHMARK=m
@@ -649,4 +646,5 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
+CONFIG_TEST_STACKINIT=m
CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index db0e654a88d5..2474d267460e 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -52,6 +52,7 @@ CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_XDP_SOCKETS=y
+CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -206,9 +207,6 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
-CONFIG_NFT_MASQ_IPV4=m
-CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -230,9 +228,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV6=m
-CONFIG_NFT_MASQ_IPV6=m
-CONFIG_NFT_REDIR_IPV6=m
CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_FIB_IPV6=m
CONFIG_NF_FLOW_TABLE_IPV6=m
@@ -309,7 +304,6 @@ CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
CONFIG_PSAMPLE=m
CONFIG_NET_IFE=m
-CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -420,12 +414,12 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
+# CONFIG_VALIDATE_FS_PARSER is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
-CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -533,9 +527,11 @@ CONFIG_CRYPTO_AEGIS256=m
CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
+CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
@@ -600,6 +596,7 @@ CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_IDA=m
+CONFIG_TEST_VMALLOC=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_FIND_BIT_BENCHMARK=m
@@ -609,4 +606,5 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
+CONFIG_TEST_STACKINIT=m
CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 1451168eb789..0fc7d2992fe0 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -59,6 +59,7 @@ CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_XDP_SOCKETS=y
+CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -213,9 +214,6 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
-CONFIG_NFT_MASQ_IPV4=m
-CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -237,9 +235,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV6=m
-CONFIG_NFT_MASQ_IPV6=m
-CONFIG_NFT_REDIR_IPV6=m
CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_FIB_IPV6=m
CONFIG_NF_FLOW_TABLE_IPV6=m
@@ -316,7 +311,6 @@ CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
CONFIG_PSAMPLE=m
CONFIG_NET_IFE=m
-CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -442,12 +436,12 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
+# CONFIG_VALIDATE_FS_PARSER is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
-CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -555,9 +549,11 @@ CONFIG_CRYPTO_AEGIS256=m
CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
+CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
@@ -622,6 +618,7 @@ CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_IDA=m
+CONFIG_TEST_VMALLOC=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_FIND_BIT_BENCHMARK=m
@@ -631,4 +628,5 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
+CONFIG_TEST_STACKINIT=m
CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index b0d3609f5bb3..699df9fdf866 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -49,6 +49,7 @@ CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_XDP_SOCKETS=y
+CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -203,9 +204,6 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
-CONFIG_NFT_MASQ_IPV4=m
-CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -227,9 +225,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV6=m
-CONFIG_NFT_MASQ_IPV6=m
-CONFIG_NFT_REDIR_IPV6=m
CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_FIB_IPV6=m
CONFIG_NF_FLOW_TABLE_IPV6=m
@@ -306,7 +301,6 @@ CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
CONFIG_PSAMPLE=m
CONFIG_NET_IFE=m
-CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -413,12 +407,12 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
+# CONFIG_VALIDATE_FS_PARSER is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
-CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -526,9 +520,11 @@ CONFIG_CRYPTO_AEGIS256=m
CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
+CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
@@ -593,6 +589,7 @@ CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_IDA=m
+CONFIG_TEST_VMALLOC=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_FIND_BIT_BENCHMARK=m
@@ -602,4 +599,5 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
+CONFIG_TEST_STACKINIT=m
CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 4ed7c151347c..b50802255324 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -51,6 +51,7 @@ CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_XDP_SOCKETS=y
+CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -205,9 +206,6 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
-CONFIG_NFT_MASQ_IPV4=m
-CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -229,9 +227,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV6=m
-CONFIG_NFT_MASQ_IPV6=m
-CONFIG_NFT_REDIR_IPV6=m
CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_FIB_IPV6=m
CONFIG_NF_FLOW_TABLE_IPV6=m
@@ -308,7 +303,6 @@ CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
CONFIG_PSAMPLE=m
CONFIG_NET_IFE=m
-CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -422,12 +416,12 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
+# CONFIG_VALIDATE_FS_PARSER is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
-CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -535,9 +529,11 @@ CONFIG_CRYPTO_AEGIS256=m
CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
+CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
@@ -602,6 +598,7 @@ CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_IDA=m
+CONFIG_TEST_VMALLOC=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_FIND_BIT_BENCHMARK=m
@@ -611,4 +608,5 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
+CONFIG_TEST_STACKINIT=m
CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 0dc544e1ce1f..04e7d70f6030 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -50,6 +50,7 @@ CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_XDP_SOCKETS=y
+CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -204,9 +205,6 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
-CONFIG_NFT_MASQ_IPV4=m
-CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -228,9 +226,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV6=m
-CONFIG_NFT_MASQ_IPV6=m
-CONFIG_NFT_REDIR_IPV6=m
CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_FIB_IPV6=m
CONFIG_NF_FLOW_TABLE_IPV6=m
@@ -310,7 +305,6 @@ CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
CONFIG_PSAMPLE=m
CONFIG_NET_IFE=m
-CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -444,12 +438,12 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
+# CONFIG_VALIDATE_FS_PARSER is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
-CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -557,9 +551,11 @@ CONFIG_CRYPTO_AEGIS256=m
CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
+CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
@@ -624,6 +620,7 @@ CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_IDA=m
+CONFIG_TEST_VMALLOC=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_FIND_BIT_BENCHMARK=m
@@ -633,4 +630,5 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
+CONFIG_TEST_STACKINIT=m
CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 5a7b7b0d6e72..5e1cc4c17852 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -70,6 +70,7 @@ CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_XDP_SOCKETS=y
+CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -224,9 +225,6 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
-CONFIG_NFT_MASQ_IPV4=m
-CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -248,9 +246,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV6=m
-CONFIG_NFT_MASQ_IPV6=m
-CONFIG_NFT_REDIR_IPV6=m
CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_FIB_IPV6=m
CONFIG_NF_FLOW_TABLE_IPV6=m
@@ -330,7 +325,6 @@ CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
CONFIG_PSAMPLE=m
CONFIG_NET_IFE=m
-CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -526,12 +520,12 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
+# CONFIG_VALIDATE_FS_PARSER is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
-CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -639,9 +633,11 @@ CONFIG_CRYPTO_AEGIS256=m
CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
+CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
@@ -706,6 +702,7 @@ CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_IDA=m
+CONFIG_TEST_VMALLOC=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_FIND_BIT_BENCHMARK=m
@@ -715,4 +712,5 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
+CONFIG_TEST_STACKINIT=m
CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 71eb9be1803b..170ac8792c2d 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -48,6 +48,7 @@ CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_XDP_SOCKETS=y
+CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -202,9 +203,6 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
-CONFIG_NFT_MASQ_IPV4=m
-CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -226,9 +224,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV6=m
-CONFIG_NFT_MASQ_IPV6=m
-CONFIG_NFT_REDIR_IPV6=m
CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_FIB_IPV6=m
CONFIG_NF_FLOW_TABLE_IPV6=m
@@ -305,7 +300,6 @@ CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
CONFIG_PSAMPLE=m
CONFIG_NET_IFE=m
-CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -412,12 +406,12 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
+# CONFIG_VALIDATE_FS_PARSER is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
-CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -525,9 +519,11 @@ CONFIG_CRYPTO_AEGIS256=m
CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
+CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
@@ -592,6 +588,7 @@ CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_IDA=m
+CONFIG_TEST_VMALLOC=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_FIND_BIT_BENCHMARK=m
@@ -601,4 +598,5 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
+CONFIG_TEST_STACKINIT=m
CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index ea2ebd4241c0..d865592a423e 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -49,6 +49,7 @@ CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_XDP_SOCKETS=y
+CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -203,9 +204,6 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
-CONFIG_NFT_MASQ_IPV4=m
-CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -227,9 +225,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV6=m
-CONFIG_NFT_MASQ_IPV6=m
-CONFIG_NFT_REDIR_IPV6=m
CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_FIB_IPV6=m
CONFIG_NF_FLOW_TABLE_IPV6=m
@@ -306,7 +301,6 @@ CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
CONFIG_PSAMPLE=m
CONFIG_NET_IFE=m
-CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -413,12 +407,12 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
+# CONFIG_VALIDATE_FS_PARSER is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
-CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -526,9 +520,11 @@ CONFIG_CRYPTO_AEGIS256=m
CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
+CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
@@ -593,6 +589,7 @@ CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_IDA=m
+CONFIG_TEST_VMALLOC=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_FIND_BIT_BENCHMARK=m
@@ -602,4 +599,5 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
+CONFIG_TEST_STACKINIT=m
CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index cef6dc47c725..034a9de90484 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -50,6 +50,7 @@ CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_XDP_SOCKETS=y
+CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -204,9 +205,6 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
-CONFIG_NFT_MASQ_IPV4=m
-CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -228,9 +226,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV6=m
-CONFIG_NFT_MASQ_IPV6=m
-CONFIG_NFT_REDIR_IPV6=m
CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_FIB_IPV6=m
CONFIG_NF_FLOW_TABLE_IPV6=m
@@ -307,7 +302,6 @@ CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
CONFIG_PSAMPLE=m
CONFIG_NET_IFE=m
-CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -431,12 +425,12 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
+# CONFIG_VALIDATE_FS_PARSER is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
-CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -544,9 +538,11 @@ CONFIG_CRYPTO_AEGIS256=m
CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
+CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
@@ -611,6 +607,7 @@ CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_IDA=m
+CONFIG_TEST_VMALLOC=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_FIND_BIT_BENCHMARK=m
@@ -620,4 +617,5 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
+CONFIG_TEST_STACKINIT=m
CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 69f2282dc4e9..49be0f9fcd8d 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -46,6 +46,7 @@ CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_XDP_SOCKETS=y
+CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -200,9 +201,6 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
-CONFIG_NFT_MASQ_IPV4=m
-CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -224,9 +222,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV6=m
-CONFIG_NFT_MASQ_IPV6=m
-CONFIG_NFT_REDIR_IPV6=m
CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_FIB_IPV6=m
CONFIG_NF_FLOW_TABLE_IPV6=m
@@ -303,7 +298,6 @@ CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
CONFIG_PSAMPLE=m
CONFIG_NET_IFE=m
-CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -415,12 +409,12 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
+# CONFIG_VALIDATE_FS_PARSER is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
-CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -528,9 +522,11 @@ CONFIG_CRYPTO_AEGIS256=m
CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
+CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
@@ -595,6 +591,7 @@ CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_IDA=m
+CONFIG_TEST_VMALLOC=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_FIND_BIT_BENCHMARK=m
@@ -604,3 +601,4 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
+CONFIG_TEST_STACKINIT=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index e91267e868b2..a71acf4a6004 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -46,6 +46,7 @@ CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_XDP_SOCKETS=y
+CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -200,9 +201,6 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
-CONFIG_NFT_MASQ_IPV4=m
-CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -224,9 +222,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV6=m
-CONFIG_NFT_MASQ_IPV6=m
-CONFIG_NFT_REDIR_IPV6=m
CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_FIB_IPV6=m
CONFIG_NF_FLOW_TABLE_IPV6=m
@@ -303,7 +298,6 @@ CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
CONFIG_PSAMPLE=m
CONFIG_NET_IFE=m
-CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -414,12 +408,12 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
+# CONFIG_VALIDATE_FS_PARSER is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
-CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -527,9 +521,11 @@ CONFIG_CRYPTO_AEGIS256=m
CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
+CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
@@ -594,6 +590,7 @@ CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_IDA=m
+CONFIG_TEST_VMALLOC=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_FIND_BIT_BENCHMARK=m
@@ -603,4 +600,5 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
+CONFIG_TEST_STACKINIT=m
CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/hp300/config.c b/arch/m68k/hp300/config.c
index a19bcd23f80b..a161d44fd20b 100644
--- a/arch/m68k/hp300/config.c
+++ b/arch/m68k/hp300/config.c
@@ -254,7 +254,6 @@ void __init config_hp300(void)
mach_sched_init = hp300_sched_init;
mach_init_IRQ = hp300_init_IRQ;
mach_get_model = hp300_get_model;
- arch_gettimeoffset = hp300_gettimeoffset;
mach_hwclk = hp300_hwclk;
mach_get_ss = hp300_get_ss;
mach_reset = hp300_reset;
diff --git a/arch/m68k/hp300/time.c b/arch/m68k/hp300/time.c
index 289d928a46cb..bfee13e1d0fe 100644
--- a/arch/m68k/hp300/time.c
+++ b/arch/m68k/hp300/time.c
@@ -8,6 +8,7 @@
*/
#include <asm/ptrace.h>
+#include <linux/clocksource.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/sched.h>
@@ -19,6 +20,18 @@
#include <asm/traps.h>
#include <asm/blinken.h>
+static u64 hp300_read_clk(struct clocksource *cs);
+
+static struct clocksource hp300_clk = {
+ .name = "timer",
+ .rating = 250,
+ .read = hp300_read_clk,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static u32 clk_total, clk_offset;
+
/* Clock hardware definitions */
#define CLOCKBASE 0xf05f8000
@@ -28,39 +41,61 @@
#define CLKCR3 CLKCR1
#define CLKSR CLKCR2
#define CLKMSB1 0x5
+#define CLKLSB1 0x7
#define CLKMSB2 0x9
#define CLKMSB3 0xD
+#define CLKSR_INT1 BIT(0)
+
/* This is for machines which generate the exact clock. */
-#define USECS_PER_JIFFY (1000000/HZ)
-#define INTVAL ((10000 / 4) - 1)
+#define HP300_TIMER_CLOCK_FREQ 250000
+#define HP300_TIMER_CYCLES (HP300_TIMER_CLOCK_FREQ / HZ)
+#define INTVAL (HP300_TIMER_CYCLES - 1)
static irqreturn_t hp300_tick(int irq, void *dev_id)
{
+ irq_handler_t timer_routine = dev_id;
+ unsigned long flags;
unsigned long tmp;
- irq_handler_t vector = dev_id;
+
+ local_irq_save(flags);
in_8(CLOCKBASE + CLKSR);
asm volatile ("movpw %1@(5),%0" : "=d" (tmp) : "a" (CLOCKBASE));
+ clk_total += INTVAL;
+ clk_offset = 0;
+ timer_routine(0, NULL);
+ local_irq_restore(flags);
+
/* Turn off the network and SCSI leds */
blinken_leds(0, 0xe0);
- return vector(irq, NULL);
+ return IRQ_HANDLED;
}
-u32 hp300_gettimeoffset(void)
+static u64 hp300_read_clk(struct clocksource *cs)
{
- /* Read current timer 1 value */
- unsigned char lsb, msb1, msb2;
- unsigned short ticks;
-
- msb1 = in_8(CLOCKBASE + 5);
- lsb = in_8(CLOCKBASE + 7);
- msb2 = in_8(CLOCKBASE + 5);
- if (msb1 != msb2)
- /* A carry happened while we were reading. Read it again */
- lsb = in_8(CLOCKBASE + 7);
- ticks = INTVAL - ((msb2 << 8) | lsb);
- return ((USECS_PER_JIFFY * ticks) / INTVAL) * 1000;
+ unsigned long flags;
+ unsigned char lsb, msb, msb_new;
+ u32 ticks;
+
+ local_irq_save(flags);
+ /* Read current timer 1 value */
+ msb = in_8(CLOCKBASE + CLKMSB1);
+again:
+ if ((in_8(CLOCKBASE + CLKSR) & CLKSR_INT1) && msb > 0)
+ clk_offset = INTVAL;
+ lsb = in_8(CLOCKBASE + CLKLSB1);
+ msb_new = in_8(CLOCKBASE + CLKMSB1);
+ if (msb_new != msb) {
+ msb = msb_new;
+ goto again;
+ }
+
+ ticks = INTVAL - ((msb << 8) | lsb);
+ ticks += clk_offset + clk_total;
+ local_irq_restore(flags);
+
+ return ticks;
}
void __init hp300_sched_init(irq_handler_t vector)
@@ -70,9 +105,11 @@ void __init hp300_sched_init(irq_handler_t vector)
asm volatile(" movpw %0,%1@(5)" : : "d" (INTVAL), "a" (CLOCKBASE));
- if (request_irq(IRQ_AUTO_6, hp300_tick, 0, "timer tick", vector))
+ if (request_irq(IRQ_AUTO_6, hp300_tick, IRQF_TIMER, "timer tick", vector))
pr_err("Couldn't register timer interrupt\n");
out_8(CLOCKBASE + CLKCR2, 0x1); /* select CR1 */
out_8(CLOCKBASE + CLKCR1, 0x40); /* enable irq */
+
+ clocksource_register_hz(&hp300_clk, HP300_TIMER_CLOCK_FREQ);
}
diff --git a/arch/m68k/hp300/time.h b/arch/m68k/hp300/time.h
index f5583ec4033d..1d77b55cc72a 100644
--- a/arch/m68k/hp300/time.h
+++ b/arch/m68k/hp300/time.h
@@ -1,2 +1 @@
extern void hp300_sched_init(irq_handler_t vector);
-extern u32 hp300_gettimeoffset(void);
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 95f8f631c4df..0ddae4a74adb 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -13,10 +13,12 @@ generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += kprobes.h
+generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += sections.h
diff --git a/arch/m68k/include/asm/io_mm.h b/arch/m68k/include/asm/io_mm.h
index 782b78f8a048..6c03ca5bc436 100644
--- a/arch/m68k/include/asm/io_mm.h
+++ b/arch/m68k/include/asm/io_mm.h
@@ -377,8 +377,6 @@ static inline void isa_delay(void)
#define writesw(port, buf, nr) raw_outsw((port), (u16 *)(buf), (nr))
#define writesl(port, buf, nr) raw_outsl((port), (u32 *)(buf), (nr))
-#define mmiowb()
-
#ifndef CONFIG_SUN3
#define IO_SPACE_LIMIT 0xffff
#else
diff --git a/arch/m68k/include/asm/m5441xsim.h b/arch/m68k/include/asm/m5441xsim.h
index c87556d5581c..4892f314ff38 100644
--- a/arch/m68k/include/asm/m5441xsim.h
+++ b/arch/m68k/include/asm/m5441xsim.h
@@ -282,6 +282,21 @@
* DSPI module.
*/
#define MCFDSPI_BASE0 0xfc05c000
+#define MCFDSPI_BASE1 0xfC03c000
#define MCF_IRQ_DSPI0 (MCFINT0_VECBASE + MCFINT0_DSPI0)
+#define MCF_IRQ_DSPI1 (MCFINT1_VECBASE + MCFINT1_DSPI1)
+/*
+ * eDMA module.
+ */
+#define MCFEDMA_BASE 0xfc044000
+#define MCFEDMA_SIZE 0x4000
+#define MCFINT0_EDMA_INTR0 8
+#define MCFINT0_EDMA_ERR 24
+#define MCFEDMA_EDMA_INTR16 8
+#define MCFEDMA_EDMA_INTR56 0
+#define MCFEDMA_IRQ_INTR0 (MCFINT0_VECBASE + MCFINT0_EDMA_INTR0)
+#define MCFEDMA_IRQ_INTR16 (MCFINT1_VECBASE + MCFEDMA_EDMA_INTR16)
+#define MCFEDMA_IRQ_INTR56 (MCFINT2_VECBASE + MCFEDMA_EDMA_INTR56)
+#define MCFEDMA_IRQ_ERR (MCFINT0_VECBASE + MCFINT0_EDMA_ERR)
#endif /* m5441xsim_h */
diff --git a/arch/m68k/include/asm/mvme147hw.h b/arch/m68k/include/asm/mvme147hw.h
index 9c7ff67c5ffd..257b29184af9 100644
--- a/arch/m68k/include/asm/mvme147hw.h
+++ b/arch/m68k/include/asm/mvme147hw.h
@@ -66,7 +66,7 @@ struct pcc_regs {
#define PCC_INT_ENAB 0x08
#define PCC_TIMER_INT_CLR 0x80
-#define PCC_TIMER_PRELOAD 63936l
+#define PCC_TIMER_CLR_OVF 0x04
#define PCC_LEVEL_ABORT 0x07
#define PCC_LEVEL_SERIAL 0x04
diff --git a/arch/m68k/include/asm/tlb.h b/arch/m68k/include/asm/tlb.h
index b4b9efb6f963..3c81f6adfc8b 100644
--- a/arch/m68k/include/asm/tlb.h
+++ b/arch/m68k/include/asm/tlb.h
@@ -2,20 +2,6 @@
#ifndef _M68K_TLB_H
#define _M68K_TLB_H
-/*
- * m68k doesn't need any special per-pte or
- * per-vma handling..
- */
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
-
-/*
- * .. because we flush the whole mm when it
- * fills up.
- */
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
#include <asm-generic/tlb.h>
#endif /* _M68K_TLB_H */
diff --git a/arch/m68k/include/uapi/asm/Kbuild b/arch/m68k/include/uapi/asm/Kbuild
index 960bf1e4be53..7417847dc438 100644
--- a/arch/m68k/include/uapi/asm/Kbuild
+++ b/arch/m68k/include/uapi/asm/Kbuild
@@ -1,4 +1 @@
-include include/uapi/asm-generic/Kbuild.asm
-
generated-y += unistd_32.h
-generic-y += kvm_para.h
diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl
index 125c14178979..df4ec3ec71d1 100644
--- a/arch/m68k/kernel/syscalls/syscall.tbl
+++ b/arch/m68k/kernel/syscalls/syscall.tbl
@@ -423,3 +423,7 @@
421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait
422 common futex_time64 sys_futex
423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval
+424 common pidfd_send_signal sys_pidfd_send_signal
+425 common io_uring_setup sys_io_uring_setup
+426 common io_uring_enter sys_io_uring_enter
+427 common io_uring_register sys_io_uring_register
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index cd9317d53276..11be08f4f750 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -54,8 +54,6 @@ struct mac_booter_data mac_bi_data;
/* The phys. video addr. - might be bogus on some machines */
static unsigned long mac_orig_videoaddr;
-/* Mac specific timer functions */
-extern u32 mac_gettimeoffset(void);
extern int mac_hwclk(int, struct rtc_time *);
extern void iop_preinit(void);
extern void iop_init(void);
@@ -155,7 +153,6 @@ void __init config_mac(void)
mach_sched_init = mac_sched_init;
mach_init_IRQ = mac_init_IRQ;
mach_get_model = mac_get_model;
- arch_gettimeoffset = mac_gettimeoffset;
mach_hwclk = mac_hwclk;
mach_reset = mac_reset;
mach_halt = mac_poweroff;
diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c
index 0b0289459173..3c2cfcb74982 100644
--- a/arch/m68k/mac/via.c
+++ b/arch/m68k/mac/via.c
@@ -23,6 +23,7 @@
*
*/
+#include <linux/clocksource.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
@@ -55,16 +56,6 @@ static __u8 rbv_clear;
static int gIER,gIFR,gBufA,gBufB;
/*
- * Timer defs.
- */
-
-#define TICK_SIZE 10000
-#define MAC_CLOCK_TICK (783300/HZ) /* ticks per HZ */
-#define MAC_CLOCK_LOW (MAC_CLOCK_TICK&0xFF)
-#define MAC_CLOCK_HIGH (MAC_CLOCK_TICK>>8)
-
-
-/*
* On Macs with a genuine VIA chip there is no way to mask an individual slot
* interrupt. This limitation also seems to apply to VIA clone logic cores in
* Quadra-like ASICs. (RBV and OSS machines don't have this limitation.)
@@ -272,22 +263,6 @@ void __init via_init(void)
}
/*
- * Start the 100 Hz clock
- */
-
-void __init via_init_clock(irq_handler_t func)
-{
- via1[vACR] |= 0x40;
- via1[vT1LL] = MAC_CLOCK_LOW;
- via1[vT1LH] = MAC_CLOCK_HIGH;
- via1[vT1CL] = MAC_CLOCK_LOW;
- via1[vT1CH] = MAC_CLOCK_HIGH;
-
- if (request_irq(IRQ_MAC_TIMER_1, func, 0, "timer", func))
- pr_err("Couldn't register %s interrupt\n", "timer");
-}
-
-/*
* Debugging dump, used in various places to see what's going on.
*/
@@ -315,29 +290,6 @@ void via_debug_dump(void)
}
/*
- * This is always executed with interrupts disabled.
- *
- * TBI: get time offset between scheduling timer ticks
- */
-
-u32 mac_gettimeoffset(void)
-{
- unsigned long ticks, offset = 0;
-
- /* read VIA1 timer 2 current value */
- ticks = via1[vT1CL] | (via1[vT1CH] << 8);
- /* The probability of underflow is less than 2% */
- if (ticks > MAC_CLOCK_TICK - MAC_CLOCK_TICK / 50)
- /* Check for pending timer interrupt in VIA1 IFR */
- if (via1[vIFR] & 0x40) offset = TICK_SIZE;
-
- ticks = MAC_CLOCK_TICK - ticks;
- ticks = ticks * 10000L / MAC_CLOCK_TICK;
-
- return (ticks + offset) * 1000;
-}
-
-/*
* Flush the L2 cache on Macs that have it by flipping
* the system into 24-bit mode for an instant.
*/
@@ -440,6 +392,8 @@ void via_nubus_irq_shutdown(int irq)
* via6522.c :-), disable/pending masks added.
*/
+#define VIA_TIMER_1_INT BIT(6)
+
void via1_irq(struct irq_desc *desc)
{
int irq_num;
@@ -449,6 +403,21 @@ void via1_irq(struct irq_desc *desc)
if (!events)
return;
+ irq_num = IRQ_MAC_TIMER_1;
+ irq_bit = VIA_TIMER_1_INT;
+ if (events & irq_bit) {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ via1[vIFR] = irq_bit;
+ generic_handle_irq(irq_num);
+ local_irq_restore(flags);
+
+ events &= ~irq_bit;
+ if (!events)
+ return;
+ }
+
irq_num = VIA1_SOURCE_BASE;
irq_bit = 1;
do {
@@ -605,3 +574,82 @@ int via2_scsi_drq_pending(void)
return via2[gIFR] & (1 << IRQ_IDX(IRQ_MAC_SCSIDRQ));
}
EXPORT_SYMBOL(via2_scsi_drq_pending);
+
+/* timer and clock source */
+
+#define VIA_CLOCK_FREQ 783360 /* VIA "phase 2" clock in Hz */
+#define VIA_TIMER_CYCLES (VIA_CLOCK_FREQ / HZ) /* clock cycles per jiffy */
+
+#define VIA_TC (VIA_TIMER_CYCLES - 2) /* including 0 and -1 */
+#define VIA_TC_LOW (VIA_TC & 0xFF)
+#define VIA_TC_HIGH (VIA_TC >> 8)
+
+static u64 mac_read_clk(struct clocksource *cs);
+
+static struct clocksource mac_clk = {
+ .name = "via1",
+ .rating = 250,
+ .read = mac_read_clk,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static u32 clk_total, clk_offset;
+
+static irqreturn_t via_timer_handler(int irq, void *dev_id)
+{
+ irq_handler_t timer_routine = dev_id;
+
+ clk_total += VIA_TIMER_CYCLES;
+ clk_offset = 0;
+ timer_routine(0, NULL);
+
+ return IRQ_HANDLED;
+}
+
+void __init via_init_clock(irq_handler_t timer_routine)
+{
+ if (request_irq(IRQ_MAC_TIMER_1, via_timer_handler, IRQF_TIMER, "timer",
+ timer_routine)) {
+ pr_err("Couldn't register %s interrupt\n", "timer");
+ return;
+ }
+
+ via1[vT1LL] = VIA_TC_LOW;
+ via1[vT1LH] = VIA_TC_HIGH;
+ via1[vT1CL] = VIA_TC_LOW;
+ via1[vT1CH] = VIA_TC_HIGH;
+ via1[vACR] |= 0x40;
+
+ clocksource_register_hz(&mac_clk, VIA_CLOCK_FREQ);
+}
+
+static u64 mac_read_clk(struct clocksource *cs)
+{
+ unsigned long flags;
+ u8 count_high;
+ u16 count;
+ u32 ticks;
+
+ /*
+ * Timer counter wrap-around is detected with the timer interrupt flag
+ * but reading the counter low byte (vT1CL) would reset the flag.
+ * Also, accessing both counter registers is essentially a data race.
+ * These problems are avoided by ignoring the low byte. Clock accuracy
+ * is 256 times worse (error can reach 0.327 ms) but CPU overhead is
+ * reduced by avoiding slow VIA register accesses.
+ */
+
+ local_irq_save(flags);
+ count_high = via1[vT1CH];
+ if (count_high == 0xFF)
+ count_high = 0;
+ if (count_high > 0 && (via1[vIFR] & VIA_TIMER_1_INT))
+ clk_offset = VIA_TIMER_CYCLES;
+ count = count_high << 8;
+ ticks = VIA_TIMER_CYCLES - count;
+ ticks += clk_offset + clk_total;
+ local_irq_restore(flags);
+
+ return ticks;
+}
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 933c33e76a48..8868a4c9adae 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -94,6 +94,9 @@ void __init paging_init(void)
high_memory = (void *) end_mem;
empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!empty_zero_page)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
/*
* Set up SFC/DFC registers (user data space).
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
index 0de4999a3810..6cb1e41d58d0 100644
--- a/arch/m68k/mm/mcfmmu.c
+++ b/arch/m68k/mm/mcfmmu.c
@@ -44,7 +44,9 @@ void __init paging_init(void)
int i;
empty_zero_page = (void *) memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- memset((void *) empty_zero_page, 0, PAGE_SIZE);
+ if (!empty_zero_page)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
pg_dir = swapper_pg_dir;
memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
@@ -52,6 +54,9 @@ void __init paging_init(void)
size = num_pages * sizeof(pte_t);
size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
next_pgtable = (unsigned long) memblock_alloc(size, PAGE_SIZE);
+ if (!next_pgtable)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, size, PAGE_SIZE);
bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index 3f3d0bf36091..356601bf96d9 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -55,6 +55,9 @@ static pte_t * __init kernel_page_table(void)
pte_t *ptablep;
ptablep = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
+ if (!ptablep)
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
clear_page(ptablep);
__flush_page_to_ram(ptablep);
@@ -96,6 +99,9 @@ static pmd_t * __init kernel_ptr_table(void)
if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) {
last_pgtable = (pmd_t *)memblock_alloc_low(PAGE_SIZE,
PAGE_SIZE);
+ if (!last_pgtable)
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
clear_page(last_pgtable);
__flush_page_to_ram(last_pgtable);
@@ -278,6 +284,9 @@ void __init paging_init(void)
* to a couple of allocated pages
*/
empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!empty_zero_page)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
/*
* Set up SFC/DFC registers
diff --git a/arch/m68k/mm/sun3mmu.c b/arch/m68k/mm/sun3mmu.c
index f736db48a2e1..eca1c46bb90a 100644
--- a/arch/m68k/mm/sun3mmu.c
+++ b/arch/m68k/mm/sun3mmu.c
@@ -46,6 +46,9 @@ void __init paging_init(void)
unsigned long size;
empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!empty_zero_page)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
address = PAGE_OFFSET;
pg_dir = swapper_pg_dir;
@@ -56,6 +59,9 @@ void __init paging_init(void)
size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
next_pgtable = (unsigned long)memblock_alloc(size, PAGE_SIZE);
+ if (!next_pgtable)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, size, PAGE_SIZE);
bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
/* Map whole memory from PAGE_OFFSET (0x0E000000) */
diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
index adea549d240e..545a1fe0e119 100644
--- a/arch/m68k/mvme147/config.c
+++ b/arch/m68k/mvme147/config.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/tty.h>
+#include <linux/clocksource.h>
#include <linux/console.h>
#include <linux/linkage.h>
#include <linux/init.h>
@@ -38,18 +39,12 @@
static void mvme147_get_model(char *model);
extern void mvme147_sched_init(irq_handler_t handler);
-extern u32 mvme147_gettimeoffset(void);
extern int mvme147_hwclk (int, struct rtc_time *);
extern void mvme147_reset (void);
static int bcd2int (unsigned char b);
-/* Save tick handler routine pointer, will point to xtime_update() in
- * kernel/time/timekeeping.c, called via mvme147_process_int() */
-
-irq_handler_t tick_handler;
-
int __init mvme147_parse_bootinfo(const struct bi_record *bi)
{
@@ -89,7 +84,6 @@ void __init config_mvme147(void)
mach_max_dma_address = 0x01000000;
mach_sched_init = mvme147_sched_init;
mach_init_IRQ = mvme147_init_IRQ;
- arch_gettimeoffset = mvme147_gettimeoffset;
mach_hwclk = mvme147_hwclk;
mach_reset = mvme147_reset;
mach_get_model = mvme147_get_model;
@@ -99,45 +93,76 @@ void __init config_mvme147(void)
vme_brdtype = VME_TYPE_MVME147;
}
+static u64 mvme147_read_clk(struct clocksource *cs);
+
+static struct clocksource mvme147_clk = {
+ .name = "pcc",
+ .rating = 250,
+ .read = mvme147_read_clk,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static u32 clk_total;
+
+#define PCC_TIMER_CLOCK_FREQ 160000
+#define PCC_TIMER_CYCLES (PCC_TIMER_CLOCK_FREQ / HZ)
+#define PCC_TIMER_PRELOAD (0x10000 - PCC_TIMER_CYCLES)
/* Using pcc tick timer 1 */
static irqreturn_t mvme147_timer_int (int irq, void *dev_id)
{
+ irq_handler_t timer_routine = dev_id;
+ unsigned long flags;
+
+ local_irq_save(flags);
m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR;
- m147_pcc->t1_int_cntrl = PCC_INT_ENAB|PCC_LEVEL_TIMER1;
- return tick_handler(irq, dev_id);
+ m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF;
+ clk_total += PCC_TIMER_CYCLES;
+ timer_routine(0, NULL);
+ local_irq_restore(flags);
+
+ return IRQ_HANDLED;
}
void mvme147_sched_init (irq_handler_t timer_routine)
{
- tick_handler = timer_routine;
- if (request_irq(PCC_IRQ_TIMER1, mvme147_timer_int, 0, "timer 1", NULL))
+ if (request_irq(PCC_IRQ_TIMER1, mvme147_timer_int, IRQF_TIMER,
+ "timer 1", timer_routine))
pr_err("Couldn't register timer interrupt\n");
/* Init the clock with a value */
- /* our clock goes off every 6.25us */
+ /* The clock counter increments until 0xFFFF then reloads */
m147_pcc->t1_preload = PCC_TIMER_PRELOAD;
m147_pcc->t1_cntrl = 0x0; /* clear timer */
m147_pcc->t1_cntrl = 0x3; /* start timer */
m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR; /* clear pending ints */
m147_pcc->t1_int_cntrl = PCC_INT_ENAB|PCC_LEVEL_TIMER1;
+
+ clocksource_register_hz(&mvme147_clk, PCC_TIMER_CLOCK_FREQ);
}
-/* This is always executed with interrupts disabled. */
-/* XXX There are race hazards in this code XXX */
-u32 mvme147_gettimeoffset(void)
+static u64 mvme147_read_clk(struct clocksource *cs)
{
- volatile unsigned short *cp = (volatile unsigned short *)0xfffe1012;
- unsigned short n;
-
- n = *cp;
- while (n != *cp)
- n = *cp;
-
- n -= PCC_TIMER_PRELOAD;
- return ((unsigned long)n * 25 / 4) * 1000;
+ unsigned long flags;
+ u8 overflow, tmp;
+ u16 count;
+ u32 ticks;
+
+ local_irq_save(flags);
+ tmp = m147_pcc->t1_cntrl >> 4;
+ count = m147_pcc->t1_count;
+ overflow = m147_pcc->t1_cntrl >> 4;
+ if (overflow != tmp)
+ count = m147_pcc->t1_count;
+ count -= PCC_TIMER_PRELOAD;
+ ticks = count + overflow * PCC_TIMER_CYCLES;
+ ticks += clk_total;
+ local_irq_restore(flags);
+
+ return ticks;
}
static int bcd2int (unsigned char b)
diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
index 6ee36a5b528d..9bc2da69f80c 100644
--- a/arch/m68k/mvme16x/config.c
+++ b/arch/m68k/mvme16x/config.c
@@ -19,6 +19,7 @@
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/tty.h>
+#include <linux/clocksource.h>
#include <linux/console.h>
#include <linux/linkage.h>
#include <linux/init.h>
@@ -44,17 +45,11 @@ static MK48T08ptr_t volatile rtc = (MK48T08ptr_t)MVME_RTC_BASE;
static void mvme16x_get_model(char *model);
extern void mvme16x_sched_init(irq_handler_t handler);
-extern u32 mvme16x_gettimeoffset(void);
extern int mvme16x_hwclk (int, struct rtc_time *);
extern void mvme16x_reset (void);
int bcd2int (unsigned char b);
-/* Save tick handler routine pointer, will point to xtime_update() in
- * kernel/time/timekeeping.c, called via mvme16x_process_int() */
-
-static irq_handler_t tick_handler;
-
unsigned short mvme16x_config;
EXPORT_SYMBOL(mvme16x_config);
@@ -120,11 +115,11 @@ static void __init mvme16x_init_IRQ (void)
m68k_setup_user_interrupt(VEC_USER, 192);
}
-#define pcc2chip ((volatile u_char *)0xfff42000)
-#define PccSCCMICR 0x1d
-#define PccSCCTICR 0x1e
-#define PccSCCRICR 0x1f
-#define PccTPIACKR 0x25
+#define PCC2CHIP (0xfff42000)
+#define PCCSCCMICR (PCC2CHIP + 0x1d)
+#define PCCSCCTICR (PCC2CHIP + 0x1e)
+#define PCCSCCRICR (PCC2CHIP + 0x1f)
+#define PCCTPIACKR (PCC2CHIP + 0x25)
#ifdef CONFIG_EARLY_PRINTK
@@ -232,10 +227,10 @@ void mvme16x_cons_write(struct console *co, const char *str, unsigned count)
base_addr[CyIER] = CyTxMpty;
while (1) {
- if (pcc2chip[PccSCCTICR] & 0x20)
+ if (in_8(PCCSCCTICR) & 0x20)
{
/* We have a Tx int. Acknowledge it */
- sink = pcc2chip[PccTPIACKR];
+ sink = in_8(PCCTPIACKR);
if ((base_addr[CyLICR] >> 2) == port) {
if (i == count) {
/* Last char of string is now output */
@@ -277,7 +272,6 @@ void __init config_mvme16x(void)
mach_max_dma_address = 0xffffffff;
mach_sched_init = mvme16x_sched_init;
mach_init_IRQ = mvme16x_init_IRQ;
- arch_gettimeoffset = mvme16x_gettimeoffset;
mach_hwclk = mvme16x_hwclk;
mach_reset = mvme16x_reset;
mach_get_model = mvme16x_get_model;
@@ -350,10 +344,46 @@ static irqreturn_t mvme16x_abort_int (int irq, void *dev_id)
return IRQ_HANDLED;
}
+static u64 mvme16x_read_clk(struct clocksource *cs);
+
+static struct clocksource mvme16x_clk = {
+ .name = "pcc",
+ .rating = 250,
+ .read = mvme16x_read_clk,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static u32 clk_total;
+
+#define PCC_TIMER_CLOCK_FREQ 1000000
+#define PCC_TIMER_CYCLES (PCC_TIMER_CLOCK_FREQ / HZ)
+
+#define PCCTCMP1 (PCC2CHIP + 0x04)
+#define PCCTCNT1 (PCC2CHIP + 0x08)
+#define PCCTOVR1 (PCC2CHIP + 0x17)
+#define PCCTIC1 (PCC2CHIP + 0x1b)
+
+#define PCCTOVR1_TIC_EN 0x01
+#define PCCTOVR1_COC_EN 0x02
+#define PCCTOVR1_OVR_CLR 0x04
+
+#define PCCTIC1_INT_CLR 0x08
+#define PCCTIC1_INT_EN 0x10
+
static irqreturn_t mvme16x_timer_int (int irq, void *dev_id)
{
- *(volatile unsigned char *)0xfff4201b |= 8;
- return tick_handler(irq, dev_id);
+ irq_handler_t timer_routine = dev_id;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ out_8(PCCTIC1, in_8(PCCTIC1) | PCCTIC1_INT_CLR);
+ out_8(PCCTOVR1, PCCTOVR1_OVR_CLR);
+ clk_total += PCC_TIMER_CYCLES;
+ timer_routine(0, NULL);
+ local_irq_restore(flags);
+
+ return IRQ_HANDLED;
}
void mvme16x_sched_init (irq_handler_t timer_routine)
@@ -361,16 +391,17 @@ void mvme16x_sched_init (irq_handler_t timer_routine)
uint16_t brdno = be16_to_cpu(mvme_bdid.brdno);
int irq;
- tick_handler = timer_routine;
/* Using PCCchip2 or MC2 chip tick timer 1 */
- *(volatile unsigned long *)0xfff42008 = 0;
- *(volatile unsigned long *)0xfff42004 = 10000; /* 10ms */
- *(volatile unsigned char *)0xfff42017 |= 3;
- *(volatile unsigned char *)0xfff4201b = 0x16;
- if (request_irq(MVME16x_IRQ_TIMER, mvme16x_timer_int, 0,
- "timer", mvme16x_timer_int))
+ out_be32(PCCTCNT1, 0);
+ out_be32(PCCTCMP1, PCC_TIMER_CYCLES);
+ out_8(PCCTOVR1, in_8(PCCTOVR1) | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
+ out_8(PCCTIC1, PCCTIC1_INT_EN | 6);
+ if (request_irq(MVME16x_IRQ_TIMER, mvme16x_timer_int, IRQF_TIMER, "timer",
+ timer_routine))
panic ("Couldn't register timer int");
+ clocksource_register_hz(&mvme16x_clk, PCC_TIMER_CLOCK_FREQ);
+
if (brdno == 0x0162 || brdno == 0x172)
irq = MVME162_IRQ_ABORT;
else
@@ -380,11 +411,23 @@ void mvme16x_sched_init (irq_handler_t timer_routine)
panic ("Couldn't register abort int");
}
-
-/* This is always executed with interrupts disabled. */
-u32 mvme16x_gettimeoffset(void)
+static u64 mvme16x_read_clk(struct clocksource *cs)
{
- return (*(volatile u32 *)0xfff42008) * 1000;
+ unsigned long flags;
+ u8 overflow, tmp;
+ u32 ticks;
+
+ local_irq_save(flags);
+ tmp = in_8(PCCTOVR1) >> 4;
+ ticks = in_be32(PCCTCNT1);
+ overflow = in_8(PCCTOVR1) >> 4;
+ if (overflow != tmp)
+ ticks = in_be32(PCCTCNT1);
+ ticks += overflow * PCC_TIMER_CYCLES;
+ ticks += clk_total;
+ local_irq_restore(flags);
+
+ return ticks;
}
int bcd2int (unsigned char b)
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
index 96810d91da2b..e63eb5f06999 100644
--- a/arch/m68k/q40/config.c
+++ b/arch/m68k/q40/config.c
@@ -40,7 +40,6 @@ extern void q40_init_IRQ(void);
static void q40_get_model(char *model);
extern void q40_sched_init(irq_handler_t handler);
-static u32 q40_gettimeoffset(void);
static int q40_hwclk(int, struct rtc_time *);
static unsigned int q40_get_ss(void);
static int q40_get_rtc_pll(struct rtc_pll_info *pll);
@@ -169,7 +168,6 @@ void __init config_q40(void)
mach_sched_init = q40_sched_init;
mach_init_IRQ = q40_init_IRQ;
- arch_gettimeoffset = q40_gettimeoffset;
mach_hwclk = q40_hwclk;
mach_get_ss = q40_get_ss;
mach_get_rtc_pll = q40_get_rtc_pll;
@@ -201,13 +199,6 @@ int __init q40_parse_bootinfo(const struct bi_record *rec)
return 1;
}
-
-static u32 q40_gettimeoffset(void)
-{
- return 5000 * (ql_ticks != 0) * 1000;
-}
-
-
/*
* Looks like op is non-zero for setting the clock, and zero for
* reading the clock.
diff --git a/arch/m68k/q40/q40ints.c b/arch/m68k/q40/q40ints.c
index 3e7603202977..1c696906c159 100644
--- a/arch/m68k/q40/q40ints.c
+++ b/arch/m68k/q40/q40ints.c
@@ -127,10 +127,10 @@ void q40_mksound(unsigned int hz, unsigned int ticks)
sound_ticks = ticks << 1;
}
-static irq_handler_t q40_timer_routine;
-
-static irqreturn_t q40_timer_int (int irq, void * dev)
+static irqreturn_t q40_timer_int(int irq, void *dev_id)
{
+ irq_handler_t timer_routine = dev_id;
+
ql_ticks = ql_ticks ? 0 : 1;
if (sound_ticks) {
unsigned char sval=(sound_ticks & 1) ? 128-SVOL : 128+SVOL;
@@ -139,8 +139,13 @@ static irqreturn_t q40_timer_int (int irq, void * dev)
*DAC_RIGHT=sval;
}
- if (!ql_ticks)
- q40_timer_routine(irq, dev);
+ if (!ql_ticks) {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ timer_routine(0, NULL);
+ local_irq_restore(flags);
+ }
return IRQ_HANDLED;
}
@@ -148,11 +153,9 @@ void q40_sched_init (irq_handler_t timer_routine)
{
int timer_irq;
- q40_timer_routine = timer_routine;
timer_irq = Q40_IRQ_FRAME;
- if (request_irq(timer_irq, q40_timer_int, 0,
- "timer", q40_timer_int))
+ if (request_irq(timer_irq, q40_timer_int, 0, "timer", timer_routine))
panic("Couldn't register timer int");
master_outb(-1, FRAME_CLEAR_REG);
diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c
index 542c4404861c..229ea37dfe1b 100644
--- a/arch/m68k/sun3/config.c
+++ b/arch/m68k/sun3/config.c
@@ -37,7 +37,6 @@
char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
-extern u32 sun3_gettimeoffset(void);
static void sun3_sched_init(irq_handler_t handler);
extern void sun3_get_model (char* model);
extern int sun3_hwclk(int set, struct rtc_time *t);
@@ -138,7 +137,6 @@ void __init config_sun3(void)
mach_sched_init = sun3_sched_init;
mach_init_IRQ = sun3_init_IRQ;
mach_reset = sun3_reboot;
- arch_gettimeoffset = sun3_gettimeoffset;
mach_get_model = sun3_get_model;
mach_hwclk = sun3_hwclk;
mach_halt = sun3_halt;
diff --git a/arch/m68k/sun3/intersil.c b/arch/m68k/sun3/intersil.c
index d911070af02a..8fc74864de81 100644
--- a/arch/m68k/sun3/intersil.c
+++ b/arch/m68k/sun3/intersil.c
@@ -22,13 +22,6 @@
#define STOP_VAL (INTERSIL_STOP | INTERSIL_INT_ENABLE | INTERSIL_24H_MODE)
#define START_VAL (INTERSIL_RUN | INTERSIL_INT_ENABLE | INTERSIL_24H_MODE)
-/* does this need to be implemented? */
-u32 sun3_gettimeoffset(void)
-{
- return 1000;
-}
-
-
/* get/set hwclock */
int sun3_hwclk(int set, struct rtc_time *t)
diff --git a/arch/m68k/sun3/sun3dvma.c b/arch/m68k/sun3/sun3dvma.c
index 4d64711d3d47..399f3d06125f 100644
--- a/arch/m68k/sun3/sun3dvma.c
+++ b/arch/m68k/sun3/sun3dvma.c
@@ -269,6 +269,9 @@ void __init dvma_init(void)
iommu_use = memblock_alloc(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long),
SMP_CACHE_BYTES);
+ if (!iommu_use)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ IOMMU_TOTAL_ENTRIES * sizeof(unsigned long));
dvma_unmap_iommu(DVMA_START, DVMA_SIZE);
diff --git a/arch/m68k/sun3/sun3ints.c b/arch/m68k/sun3/sun3ints.c
index 6bbca30c9188..a5824abb4a39 100644
--- a/arch/m68k/sun3/sun3ints.c
+++ b/arch/m68k/sun3/sun3ints.c
@@ -61,8 +61,10 @@ static irqreturn_t sun3_int7(int irq, void *dev_id)
static irqreturn_t sun3_int5(int irq, void *dev_id)
{
+ unsigned long flags;
unsigned int cnt;
+ local_irq_save(flags);
#ifdef CONFIG_SUN3
intersil_clear();
#endif
@@ -76,6 +78,7 @@ static irqreturn_t sun3_int5(int irq, void *dev_id)
cnt = kstat_irqs_cpu(irq, 0);
if (!(cnt % 20))
sun3_leds(led_pattern[cnt % 160 / 20]);
+ local_irq_restore(flags);
return IRQ_HANDLED;
}
diff --git a/arch/m68k/sun3x/config.c b/arch/m68k/sun3x/config.c
index 33d3a1c6fba0..03ce7f9facfe 100644
--- a/arch/m68k/sun3x/config.c
+++ b/arch/m68k/sun3x/config.c
@@ -49,7 +49,6 @@ void __init config_sun3x(void)
mach_sched_init = sun3x_sched_init;
mach_init_IRQ = sun3_init_IRQ;
- arch_gettimeoffset = sun3x_gettimeoffset;
mach_reset = sun3x_reboot;
mach_hwclk = sun3x_hwclk;
diff --git a/arch/m68k/sun3x/time.c b/arch/m68k/sun3x/time.c
index 047e2bcee3d7..9163294b0fb6 100644
--- a/arch/m68k/sun3x/time.c
+++ b/arch/m68k/sun3x/time.c
@@ -73,22 +73,21 @@ int sun3x_hwclk(int set, struct rtc_time *t)
return 0;
}
-/* Not much we can do here */
-u32 sun3x_gettimeoffset(void)
-{
- return 0L;
-}
#if 0
-static void sun3x_timer_tick(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t sun3x_timer_tick(int irq, void *dev_id)
{
- void (*vector)(int, void *, struct pt_regs *) = dev_id;
+ irq_handler_t timer_routine = dev_id;
+ unsigned long flags;
- /* Clear the pending interrupt - pulse the enable line low */
- disable_irq(5);
- enable_irq(5);
+ local_irq_save(flags);
+ /* Clear the pending interrupt - pulse the enable line low */
+ disable_irq(5);
+ enable_irq(5);
+ timer_routine(0, NULL);
+ local_irq_restore(flags);
- vector(irq, NULL, regs);
+ return IRQ_HANDLED;
}
#endif
diff --git a/arch/m68k/sun3x/time.h b/arch/m68k/sun3x/time.h
index 496f406412ad..86ce78bb3c28 100644
--- a/arch/m68k/sun3x/time.h
+++ b/arch/m68k/sun3x/time.h
@@ -3,7 +3,6 @@
#define SUN3X_TIME_H
extern int sun3x_hwclk(int set, struct rtc_time *t);
-u32 sun3x_gettimeoffset(void);
void sun3x_sched_init(irq_handler_t vector);
struct mostek_dt {
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index a51b965b3b82..adb179f519f9 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -41,6 +41,7 @@ config MICROBLAZE
select TRACING_SUPPORT
select VIRT_TO_BUS
select CPU_NO_EFFICIENT_FFS
+ select MMU_GATHER_NO_RANGE if MMU
# Endianness selection
choice
@@ -58,15 +59,9 @@ config CPU_LITTLE_ENDIAN
endchoice
-config RWSEM_GENERIC_SPINLOCK
- def_bool y
-
config ZONE_DMA
def_bool y
-config RWSEM_XCHGADD_ALGORITHM
- bool
-
config ARCH_HAS_ILOG2_U32
def_bool n
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 791cc8d54d0a..17a8d0a62038 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -17,11 +17,13 @@ generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += kprobes.h
+generic-y += kvm_para.h
generic-y += linkage.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += parport.h
generic-y += percpu.h
generic-y += preempt.h
diff --git a/arch/microblaze/include/asm/syscall.h b/arch/microblaze/include/asm/syscall.h
index 220decd605a4..833d3a53dab3 100644
--- a/arch/microblaze/include/asm/syscall.h
+++ b/arch/microblaze/include/asm/syscall.h
@@ -82,18 +82,22 @@ static inline void microblaze_set_syscall_arg(struct pt_regs *regs,
static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
unsigned long *args)
{
+ unsigned int i = 0;
+ unsigned int n = 6;
+
while (n--)
*args++ = microblaze_get_syscall_arg(regs, i++);
}
static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
const unsigned long *args)
{
+ unsigned int i = 0;
+ unsigned int n = 6;
+
while (n--)
microblaze_set_syscall_arg(regs, i++, *args++);
}
diff --git a/arch/microblaze/include/asm/tlb.h b/arch/microblaze/include/asm/tlb.h
index 99b6ded54849..628a78ee0a72 100644
--- a/arch/microblaze/include/asm/tlb.h
+++ b/arch/microblaze/include/asm/tlb.h
@@ -11,16 +11,7 @@
#ifndef _ASM_MICROBLAZE_TLB_H
#define _ASM_MICROBLAZE_TLB_H
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
#include <linux/pagemap.h>
-
-#ifdef CONFIG_MMU
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
-#endif
-
#include <asm-generic/tlb.h>
#endif /* _ASM_MICROBLAZE_TLB_H */
diff --git a/arch/microblaze/include/uapi/asm/Kbuild b/arch/microblaze/include/uapi/asm/Kbuild
index 97823ec46e97..13f59631c576 100644
--- a/arch/microblaze/include/uapi/asm/Kbuild
+++ b/arch/microblaze/include/uapi/asm/Kbuild
@@ -1,5 +1,2 @@
-include include/uapi/asm-generic/Kbuild.asm
-
generated-y += unistd_32.h
-generic-y += kvm_para.h
generic-y += ucontext.h
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index bbd6968ce55b..522a0c5d9c59 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -192,23 +192,14 @@ struct dentry *of_debugfs_root;
static int microblaze_debugfs_init(void)
{
of_debugfs_root = debugfs_create_dir("microblaze", NULL);
-
- return of_debugfs_root == NULL;
+ return 0;
}
arch_initcall(microblaze_debugfs_init);
# ifdef CONFIG_MMU
static int __init debugfs_tlb(void)
{
- struct dentry *d;
-
- if (!of_debugfs_root)
- return -ENODEV;
-
- d = debugfs_create_u32("tlb_skip", S_IRUGO, of_debugfs_root, &tlb_skip);
- if (!d)
- return -ENOMEM;
-
+ debugfs_create_u32("tlb_skip", S_IRUGO, of_debugfs_root, &tlb_skip);
return 0;
}
device_initcall(debugfs_tlb);
diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl
index 8ee3a8c18498..4964947732af 100644
--- a/arch/microblaze/kernel/syscalls/syscall.tbl
+++ b/arch/microblaze/kernel/syscalls/syscall.tbl
@@ -429,3 +429,7 @@
421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait
422 common futex_time64 sys_futex
423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval
+424 common pidfd_send_signal sys_pidfd_send_signal
+425 common io_uring_setup sys_io_uring_setup
+426 common io_uring_enter sys_io_uring_enter
+427 common io_uring_register sys_io_uring_register
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index 44f4b8910c21..7e97d44f6538 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -374,12 +374,14 @@ void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
{
void *p;
- if (mem_init_done)
+ if (mem_init_done) {
p = kzalloc(size, mask);
- else {
+ } else {
p = memblock_alloc(size, SMP_CACHE_BYTES);
- if (p)
- memset(p, 0, size);
+ if (!p)
+ panic("%s: Failed to allocate %zu bytes\n",
+ __func__, size);
}
+
return p;
}
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 3d7f1153155f..b9c48b27162d 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -57,7 +57,6 @@ config MIPS
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
- select HAVE_GENERIC_DMA_COHERENT
select HAVE_IDE
select HAVE_IOREMAP_PROT
select HAVE_IRQ_EXIT_ON_IRQ_STACK
@@ -1038,13 +1037,6 @@ source "arch/mips/paravirt/Kconfig"
endmenu
-config RWSEM_GENERIC_SPINLOCK
- bool
- default y
-
-config RWSEM_XCHGADD_ALGORITHM
- bool
-
config GENERIC_HWEIGHT
bool
default y
@@ -1119,6 +1111,7 @@ config DMA_MAYBE_COHERENT
config DMA_PERDEV_COHERENT
bool
+ select ARCH_HAS_SETUP_DMA_OPS
select DMA_NONCOHERENT
config DMA_NONCOHERENT
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 4a70c5de8c92..25a57895a3a3 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -210,12 +210,6 @@ const char *get_system_type(void)
return ath79_sys_type;
}
-int get_c0_perfcount_int(void)
-{
- return ATH79_MISC_IRQ(5);
-}
-EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
-
unsigned int get_c0_compare_int(void)
{
return CP0_LEGACY_COMPARE_IRQ;
diff --git a/arch/mips/bcm47xx/workarounds.c b/arch/mips/bcm47xx/workarounds.c
index 46eddbec8d9f..0ab95dd431b3 100644
--- a/arch/mips/bcm47xx/workarounds.c
+++ b/arch/mips/bcm47xx/workarounds.c
@@ -24,6 +24,7 @@ void __init bcm47xx_workarounds(void)
case BCM47XX_BOARD_NETGEAR_WNR3500L:
bcm47xx_workarounds_enable_usb_power(12);
break;
+ case BCM47XX_BOARD_NETGEAR_WNDR3400V2:
case BCM47XX_BOARD_NETGEAR_WNDR3400_V3:
bcm47xx_workarounds_enable_usb_power(21);
break;
diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile
index 35704c28a28b..3ce4dd578370 100644
--- a/arch/mips/boot/Makefile
+++ b/arch/mips/boot/Makefile
@@ -115,7 +115,7 @@ endif
targets += vmlinux.its.S
quiet_cmd_its_cat = CAT $@
- cmd_its_cat = cat $(filter-out $(PHONY), $^) >$@
+ cmd_its_cat = cat $(real-prereqs) >$@
$(obj)/vmlinux.its.S: $(addprefix $(srctree)/arch/mips/$(PLATFORM)/,$(ITS_INPUTS)) FORCE
$(call if_changed,its_cat)
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
index e8eb60ed99f2..11d5a4e90736 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -245,6 +245,9 @@ void __init plat_swiotlb_setup(void)
swiotlbsize = swiotlb_nslabs << IO_TLB_SHIFT;
octeon_swiotlb = memblock_alloc_low(swiotlbsize, PAGE_SIZE);
+ if (!octeon_swiotlb)
+ panic("%s: Failed to allocate %zu bytes align=%lx\n",
+ __func__, swiotlbsize, PAGE_SIZE);
if (swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1) == -ENOMEM)
panic("Cannot allocate SWIOTLB buffer");
diff --git a/arch/mips/configs/generic/board-ocelot.config b/arch/mips/configs/generic/board-ocelot.config
index f607888d2483..184eb65a6ba7 100644
--- a/arch/mips/configs/generic/board-ocelot.config
+++ b/arch/mips/configs/generic/board-ocelot.config
@@ -1,6 +1,10 @@
# require CONFIG_CPU_MIPS32_R2=y
CONFIG_LEGACY_BOARD_OCELOT=y
+CONFIG_FIT_IMAGE_FDT_OCELOT=y
+
+CONFIG_BRIDGE=y
+CONFIG_GENERIC_PHY=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
@@ -19,6 +23,8 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_NETDEVICES=y
+CONFIG_NET_SWITCHDEV=y
+CONFIG_NET_DSA=y
CONFIG_MSCC_OCELOT_SWITCH=y
CONFIG_MSCC_OCELOT_SWITCH_OCELOT=y
CONFIG_MDIO_MSCC_MIIM=y
@@ -35,6 +41,8 @@ CONFIG_SPI_DESIGNWARE=y
CONFIG_SPI_DW_MMIO=y
CONFIG_SPI_SPIDEV=y
+CONFIG_PINCTRL_OCELOT=y
+
CONFIG_GPIO_SYSFS=y
CONFIG_POWER_RESET=y
diff --git a/arch/mips/configs/generic_defconfig b/arch/mips/configs/generic_defconfig
index 7c138dab87df..5d80521e5d5a 100644
--- a/arch/mips/configs/generic_defconfig
+++ b/arch/mips/configs/generic_defconfig
@@ -59,7 +59,7 @@ CONFIG_HID_MONTEREY=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
-CONFIG_EXT4_ENCRYPTION=y
+CONFIG_FS_ENCRYPTION=y
CONFIG_FANOTIFY=y
CONFIG_FUSE_FS=y
CONFIG_CUSE=y
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index 20dfaad3a55d..34de7b17b41b 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -15,14 +15,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
#endif
}
-#define arch_setup_dma_ops arch_setup_dma_ops
-static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
- u64 size, const struct iommu_ops *iommu,
- bool coherent)
-{
-#ifdef CONFIG_DMA_PERDEV_COHERENT
- dev->dma_coherent = coherent;
-#endif
-}
-
#endif /* _ASM_DMA_MAPPING_H */
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 845fbbc7a2e3..29997e42480e 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -102,9 +102,6 @@ static inline void set_io_port_base(unsigned long base)
#define iobarrier_w() wmb()
#define iobarrier_sync() iob()
-/* Some callers use this older API instead. */
-#define mmiowb() iobarrier_w()
-
/*
* virt_to_phys - map virtual addresses to physical
* @address: address to remap
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
index e77672539e8e..e4456e450f94 100644
--- a/arch/mips/include/asm/jump_label.h
+++ b/arch/mips/include/asm/jump_label.h
@@ -21,15 +21,15 @@
#endif
#ifdef CONFIG_CPU_MICROMIPS
-#define NOP_INSN "nop32"
+#define B_INSN "b32"
#else
-#define NOP_INSN "nop"
+#define B_INSN "b"
#endif
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
- asm_volatile_goto("1:\t" NOP_INSN "\n\t"
- "nop\n\t"
+ asm_volatile_goto("1:\t" B_INSN " 2f\n\t"
+ "2:\tnop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
WORD_INSN " 1b, %l[l_yes], %0\n\t"
".popsection\n\t"
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index d2abd98471e8..41204a49cf95 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -1134,7 +1134,7 @@ static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_free_memslot(struct kvm *kvm,
struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
-static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
+static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
diff --git a/arch/mips/include/asm/mmiowb.h b/arch/mips/include/asm/mmiowb.h
new file mode 100644
index 000000000000..a40824e3ef8e
--- /dev/null
+++ b/arch/mips/include/asm/mmiowb.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_MMIOWB_H
+#define _ASM_MMIOWB_H
+
+#include <asm/io.h>
+
+#define mmiowb() iobarrier_w()
+
+#include <asm-generic/mmiowb.h>
+
+#endif /* _ASM_MMIOWB_H */
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index ee81297d9117..8a88eb265516 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -11,6 +11,21 @@
#include <asm/processor.h>
#include <asm/qrwlock.h>
+
+#include <asm-generic/qspinlock_types.h>
+
+#define queued_spin_unlock queued_spin_unlock
+/**
+ * queued_spin_unlock - release a queued spinlock
+ * @lock : Pointer to queued spinlock structure
+ */
+static inline void queued_spin_unlock(struct qspinlock *lock)
+{
+ /* This could be optimised with ARCH_HAS_MMIOWB */
+ mmiowb();
+ smp_store_release(&lock->locked, 0);
+}
+
#include <asm/qspinlock.h>
#endif /* _ASM_SPINLOCK_H */
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index 6cf8ffb5367e..a2b4748655df 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -116,9 +116,10 @@ static inline void syscall_set_return_value(struct task_struct *task,
static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
unsigned long *args)
{
+ unsigned int i = 0;
+ unsigned int n = 6;
int ret;
/* O32 ABI syscall() */
diff --git a/arch/mips/include/asm/tlb.h b/arch/mips/include/asm/tlb.h
index b6823b9e94da..90f3ad76d9e0 100644
--- a/arch/mips/include/asm/tlb.h
+++ b/arch/mips/include/asm/tlb.h
@@ -5,23 +5,6 @@
#include <asm/cpu-features.h>
#include <asm/mipsregs.h>
-/*
- * MIPS doesn't need any special per-pte or per-vma handling, except
- * we need to flush cache for area to be unmapped.
- */
-#define tlb_start_vma(tlb, vma) \
- do { \
- if (!tlb->fullmm) \
- flush_cache_range(vma, vma->vm_start, vma->vm_end); \
- } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
-
-/*
- * .. because we flush the whole mm when it fills up.
- */
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
#define _UNIQUE_ENTRYHI(base, idx) \
(((base) + ((idx) << (PAGE_SHIFT + 1))) | \
(cpu_has_tlbinv ? MIPS_ENTRYHI_EHINV : 0))
diff --git a/arch/mips/include/uapi/asm/Kbuild b/arch/mips/include/uapi/asm/Kbuild
index 0851c103a8ce..c3798bfe0486 100644
--- a/arch/mips/include/uapi/asm/Kbuild
+++ b/arch/mips/include/uapi/asm/Kbuild
@@ -1,5 +1,3 @@
-include include/uapi/asm-generic/Kbuild.asm
-
generated-y += unistd_n32.h
generated-y += unistd_n64.h
generated-y += unistd_o32.h
diff --git a/arch/mips/include/uapi/asm/posix_types.h b/arch/mips/include/uapi/asm/posix_types.h
index 6aa49c10f88f..f0ccb5b90ce9 100644
--- a/arch/mips/include/uapi/asm/posix_types.h
+++ b/arch/mips/include/uapi/asm/posix_types.h
@@ -21,13 +21,6 @@
typedef long __kernel_daddr_t;
#define __kernel_daddr_t __kernel_daddr_t
-#if (_MIPS_SZLONG == 32)
-typedef struct {
- long val[2];
-} __kernel_fsid_t;
-#define __kernel_fsid_t __kernel_fsid_t
-#endif
-
#include <asm-generic/posix_types.h>
#endif /* _ASM_POSIX_TYPES_H */
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index eb9f33f8a8b3..d41765cfbc6e 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -10,8 +10,8 @@
#ifndef _UAPI_ASM_SOCKET_H
#define _UAPI_ASM_SOCKET_H
+#include <linux/posix_types.h>
#include <asm/sockios.h>
-#include <asm/bitsperlong.h>
/*
* For setsockopt(2)
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
index 6e574c02e4c3..ea781b29f7f1 100644
--- a/arch/mips/kernel/kgdb.c
+++ b/arch/mips/kernel/kgdb.c
@@ -33,6 +33,7 @@
#include <asm/processor.h>
#include <asm/sigcontext.h>
#include <linux/uaccess.h>
+#include <asm/irq_regs.h>
static struct hard_trap_info {
unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */
@@ -214,7 +215,7 @@ void kgdb_call_nmi_hook(void *ignored)
old_fs = get_fs();
set_fs(KERNEL_DS);
- kgdb_nmicallback(raw_smp_processor_id(), NULL);
+ kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
set_fs(old_fs);
}
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 0057c910bc2f..3a62f80958e1 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -1419,7 +1419,7 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
sd.nr = syscall;
sd.arch = syscall_get_arch();
- syscall_get_arguments(current, regs, 0, 6, args);
+ syscall_get_arguments(current, regs, args);
for (i = 0; i < 6; i++)
sd.args[i] = args[i];
sd.instruction_pointer = KSTK_EIP(current);
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index f158c5894a9a..feb2653490df 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -125,7 +125,7 @@ trace_a_syscall:
subu t1, v0, __NR_O32_Linux
move a1, v0
bnez t1, 1f /* __NR_syscall at offset 0 */
- lw a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
+ ld a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
.set pop
1: jal syscall_trace_enter
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 5151532ad959..8d1dc6c71173 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -919,6 +919,9 @@ static void __init resource_init(void)
end = HIGHMEM_START - 1;
res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
+ if (!res)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(struct resource));
res->start = start;
res->end = end;
diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl
index 15f4117900ee..9392dfe33f97 100644
--- a/arch/mips/kernel/syscalls/syscall_n32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n32.tbl
@@ -362,3 +362,7 @@
421 n32 rt_sigtimedwait_time64 compat_sys_rt_sigtimedwait_time64
422 n32 futex_time64 sys_futex
423 n32 sched_rr_get_interval_time64 sys_sched_rr_get_interval
+424 n32 pidfd_send_signal sys_pidfd_send_signal
+425 n32 io_uring_setup sys_io_uring_setup
+426 n32 io_uring_enter sys_io_uring_enter
+427 n32 io_uring_register sys_io_uring_register
diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl
index c85502e67b44..cd0c8aa21fba 100644
--- a/arch/mips/kernel/syscalls/syscall_n64.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n64.tbl
@@ -338,3 +338,7 @@
327 n64 rseq sys_rseq
328 n64 io_pgetevents sys_io_pgetevents
# 329 through 423 are reserved to sync up with other architectures
+424 n64 pidfd_send_signal sys_pidfd_send_signal
+425 n64 io_uring_setup sys_io_uring_setup
+426 n64 io_uring_enter sys_io_uring_enter
+427 n64 io_uring_register sys_io_uring_register
diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl
index 2e063d0f837e..e849e8ffe4a2 100644
--- a/arch/mips/kernel/syscalls/syscall_o32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_o32.tbl
@@ -411,3 +411,7 @@
421 o32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64
422 o32 futex_time64 sys_futex sys_futex
423 o32 sched_rr_get_interval_time64 sys_sched_rr_get_interval sys_sched_rr_get_interval
+424 o32 pidfd_send_signal sys_pidfd_send_signal
+425 o32 io_uring_setup sys_io_uring_setup
+426 o32 io_uring_enter sys_io_uring_enter
+427 o32 io_uring_register sys_io_uring_register
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 42d411125690..98ca55d62201 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -2293,7 +2293,10 @@ void __init trap_init(void)
phys_addr_t ebase_pa;
ebase = (unsigned long)
- memblock_alloc_from(size, 1 << fls(size), 0);
+ memblock_alloc(size, 1 << fls(size));
+ if (!ebase)
+ panic("%s: Failed to allocate %lu bytes align=0x%x\n",
+ __func__, size, 1 << fls(size));
/*
* Try to ensure ebase resides in KSeg0 if possible.
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index cb7e9ed7a453..33ee0d18fb0a 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -140,6 +140,13 @@ SECTIONS
PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
#endif
+#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
+ .appended_dtb : AT(ADDR(.appended_dtb) - LOAD_OFFSET) {
+ *(.appended_dtb)
+ KEEP(*(.appended_dtb))
+ }
+#endif
+
#ifdef CONFIG_RELOCATABLE
. = ALIGN(4);
@@ -164,11 +171,6 @@ SECTIONS
__appended_dtb = .;
/* leave space for appended DTB */
. += 0x100000;
-#elif defined(CONFIG_MIPS_ELF_APPENDED_DTB)
- .appended_dtb : AT(ADDR(.appended_dtb) - LOAD_OFFSET) {
- *(.appended_dtb)
- KEEP(*(.appended_dtb))
- }
#endif
/*
* Align to 64K in attempt to eliminate holes before the
diff --git a/arch/mips/loongson64/lemote-2f/irq.c b/arch/mips/loongson64/lemote-2f/irq.c
index 9e33e45aa17c..b213cecb8e3a 100644
--- a/arch/mips/loongson64/lemote-2f/irq.c
+++ b/arch/mips/loongson64/lemote-2f/irq.c
@@ -103,7 +103,7 @@ static struct irqaction ip6_irqaction = {
static struct irqaction cascade_irqaction = {
.handler = no_action,
.name = "cascade",
- .flags = IRQF_NO_THREAD,
+ .flags = IRQF_NO_THREAD | IRQF_NO_SUSPEND,
};
void __init mach_init_irq(void)
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c
index b57465733e87..f9549d2fbea3 100644
--- a/arch/mips/mm/dma-noncoherent.c
+++ b/arch/mips/mm/dma-noncoherent.c
@@ -156,3 +156,11 @@ void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
dma_sync_virt(vaddr, size, direction);
}
+
+#ifdef CONFIG_DMA_PERDEV_COHERENT
+void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ const struct iommu_ops *iommu, bool coherent)
+{
+ dev->dma_coherent = coherent;
+}
+#endif
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index c3b45e248806..bbb196ad5f26 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -252,6 +252,11 @@ void __init fixrange_init(unsigned long start, unsigned long end,
if (pmd_none(*pmd)) {
pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
+ __func__, PAGE_SIZE,
+ PAGE_SIZE);
+
set_pmd(pmd, __pmd((unsigned long)pte));
BUG_ON(pte != pte_offset_kernel(pmd, 0));
}
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
index 0effd3cba9a7..98bf0c222b5f 100644
--- a/arch/mips/net/ebpf_jit.c
+++ b/arch/mips/net/ebpf_jit.c
@@ -186,8 +186,9 @@ enum which_ebpf_reg {
* separate frame pointer, so BPF_REG_10 relative accesses are
* adjusted to be $sp relative.
*/
-int ebpf_to_mips_reg(struct jit_ctx *ctx, const struct bpf_insn *insn,
- enum which_ebpf_reg w)
+static int ebpf_to_mips_reg(struct jit_ctx *ctx,
+ const struct bpf_insn *insn,
+ enum which_ebpf_reg w)
{
int ebpf_reg = (w == src_reg || w == src_reg_no_fp) ?
insn->src_reg : insn->dst_reg;
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 710a59764b01..a32f843cdbe0 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -118,7 +118,6 @@ static void shutdown_bridge_irq(struct irq_data *d)
{
struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
struct bridge_controller *bc;
- int pin = hd->pin;
if (!hd)
return;
@@ -126,7 +125,7 @@ static void shutdown_bridge_irq(struct irq_data *d)
disable_hub_irq(d);
bc = hd->bc;
- bridge_clr(bc, b_int_enable, (1 << pin));
+ bridge_clr(bc, b_int_enable, (1 << hd->pin));
bridge_read(bc, b_wid_tflush);
}
diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig
index addb7f5f5264..55559ca0efe4 100644
--- a/arch/nds32/Kconfig
+++ b/arch/nds32/Kconfig
@@ -60,9 +60,6 @@ config GENERIC_LOCKBREAK
def_bool y
depends on PREEMPT
-config RWSEM_GENERIC_SPINLOCK
- def_bool y
-
config TRACE_IRQFLAGS_SUPPORT
def_bool y
diff --git a/arch/nds32/configs/defconfig b/arch/nds32/configs/defconfig
index 2546d8770785..65ce9259081b 100644
--- a/arch/nds32/configs/defconfig
+++ b/arch/nds32/configs/defconfig
@@ -74,7 +74,7 @@ CONFIG_GENERIC_PHY=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
-CONFIG_EXT4_ENCRYPTION=y
+CONFIG_FS_ENCRYPTION=y
CONFIG_FUSE_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
diff --git a/arch/nds32/include/asm/Kbuild b/arch/nds32/include/asm/Kbuild
index 64ceff7ab99b..688b6ed26227 100644
--- a/arch/nds32/include/asm/Kbuild
+++ b/arch/nds32/include/asm/Kbuild
@@ -31,6 +31,7 @@ generic-y += limits.h
generic-y += local.h
generic-y += local64.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += parport.h
generic-y += pci.h
generic-y += percpu.h
diff --git a/arch/nds32/include/asm/io.h b/arch/nds32/include/asm/io.h
index 71cd226d6863..5ef8ae5ba833 100644
--- a/arch/nds32/include/asm/io.h
+++ b/arch/nds32/include/asm/io.h
@@ -55,8 +55,6 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
#define __iormb() rmb()
#define __iowmb() wmb()
-#define mmiowb() __asm__ __volatile__ ("msync all" : : : "memory");
-
/*
* {read,write}{b,w,l,q}_relaxed() are like the regular version, but
* are not guaranteed to provide ordering against spinlocks or memory
diff --git a/arch/nds32/include/asm/syscall.h b/arch/nds32/include/asm/syscall.h
index f7e5e86765fe..671ebd357496 100644
--- a/arch/nds32/include/asm/syscall.h
+++ b/arch/nds32/include/asm/syscall.h
@@ -108,81 +108,41 @@ void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
* syscall_get_arguments - extract system call parameter values
* @task: task of interest, must be blocked
* @regs: task_pt_regs() of @task
- * @i: argument index [0,5]
- * @n: number of arguments; n+i must be [1,6].
* @args: array filled with argument values
*
- * Fetches @n arguments to the system call starting with the @i'th argument
- * (from 0 through 5). Argument @i is stored in @args[0], and so on.
- * An arch inline version is probably optimal when @i and @n are constants.
+ * Fetches 6 arguments to the system call (from 0 through 5). The first
+ * argument is stored in @args[0], and so on.
*
* It's only valid to call this when @task is stopped for tracing on
* entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
- * It's invalid to call this with @i + @n > 6; we only support system calls
- * taking up to 6 arguments.
*/
#define SYSCALL_MAX_ARGS 6
void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
- unsigned int i, unsigned int n, unsigned long *args)
+ unsigned long *args)
{
- if (n == 0)
- return;
- if (i + n > SYSCALL_MAX_ARGS) {
- unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
- unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
- pr_warning("%s called with max args %d, handling only %d\n",
- __func__, i + n, SYSCALL_MAX_ARGS);
- memset(args_bad, 0, n_bad * sizeof(args[0]));
- memset(args_bad, 0, n_bad * sizeof(args[0]));
- }
-
- if (i == 0) {
- args[0] = regs->orig_r0;
- args++;
- i++;
- n--;
- }
-
- memcpy(args, &regs->uregs[0] + i, n * sizeof(args[0]));
+ args[0] = regs->orig_r0;
+ args++;
+ memcpy(args, &regs->uregs[0] + 1, 5 * sizeof(args[0]));
}
/**
* syscall_set_arguments - change system call parameter value
* @task: task of interest, must be in system call entry tracing
* @regs: task_pt_regs() of @task
- * @i: argument index [0,5]
- * @n: number of arguments; n+i must be [1,6].
* @args: array of argument values to store
*
- * Changes @n arguments to the system call starting with the @i'th argument.
- * Argument @i gets value @args[0], and so on.
- * An arch inline version is probably optimal when @i and @n are constants.
+ * Changes 6 arguments to the system call. The first argument gets value
+ * @args[0], and so on.
*
* It's only valid to call this when @task is stopped for tracing on
* entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
- * It's invalid to call this with @i + @n > 6; we only support system calls
- * taking up to 6 arguments.
*/
void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
- unsigned int i, unsigned int n,
const unsigned long *args)
{
- if (n == 0)
- return;
-
- if (i + n > SYSCALL_MAX_ARGS) {
- pr_warn("%s called with max args %d, handling only %d\n",
- __func__, i + n, SYSCALL_MAX_ARGS);
- n = SYSCALL_MAX_ARGS - i;
- }
-
- if (i == 0) {
- regs->orig_r0 = args[0];
- args++;
- i++;
- n--;
- }
+ regs->orig_r0 = args[0];
+ args++;
- memcpy(&regs->uregs[0] + i, args, n * sizeof(args[0]));
+ memcpy(&regs->uregs[0] + 1, args, 5 * sizeof(args[0]));
}
#endif /* _ASM_NDS32_SYSCALL_H */
diff --git a/arch/nds32/include/asm/tlb.h b/arch/nds32/include/asm/tlb.h
index b35ae5eae3ab..d5ae571c8d30 100644
--- a/arch/nds32/include/asm/tlb.h
+++ b/arch/nds32/include/asm/tlb.h
@@ -4,22 +4,6 @@
#ifndef __ASMNDS32_TLB_H
#define __ASMNDS32_TLB_H
-#define tlb_start_vma(tlb,vma) \
- do { \
- if (!tlb->fullmm) \
- flush_cache_range(vma, vma->vm_start, vma->vm_end); \
- } while (0)
-
-#define tlb_end_vma(tlb,vma) \
- do { \
- if(!tlb->fullmm) \
- flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
- } while (0)
-
-#define __tlb_remove_tlb_entry(tlb, pte, addr) do { } while (0)
-
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
#include <asm-generic/tlb.h>
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
diff --git a/arch/nds32/include/asm/tlbflush.h b/arch/nds32/include/asm/tlbflush.h
index 9b411f401903..38ee769b18d8 100644
--- a/arch/nds32/include/asm/tlbflush.h
+++ b/arch/nds32/include/asm/tlbflush.h
@@ -42,6 +42,5 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
void update_mmu_cache(struct vm_area_struct *vma,
unsigned long address, pte_t * pte);
-void tlb_migrate_finish(struct mm_struct *mm);
#endif
diff --git a/arch/nds32/include/uapi/asm/Kbuild b/arch/nds32/include/uapi/asm/Kbuild
index c1b06dcf6cf8..1c72f04ff75d 100644
--- a/arch/nds32/include/uapi/asm/Kbuild
+++ b/arch/nds32/include/uapi/asm/Kbuild
@@ -1,3 +1 @@
-include include/uapi/asm-generic/Kbuild.asm
-
generic-y += ucontext.h
diff --git a/arch/nds32/mm/init.c b/arch/nds32/mm/init.c
index d1e521cce317..1d03633f89a9 100644
--- a/arch/nds32/mm/init.c
+++ b/arch/nds32/mm/init.c
@@ -79,6 +79,9 @@ static void __init map_ram(void)
/* Alloc one page for holding PTE's... */
pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
set_pmd(pme, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE));
/* Fill the newly allocated page with PTE'S */
@@ -111,6 +114,9 @@ static void __init fixedrange_init(void)
pud = pud_offset(pgd, vaddr);
pmd = pmd_offset(pud, vaddr);
fixmap_pmd_p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!fixmap_pmd_p)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
set_pmd(pmd, __pmd(__pa(fixmap_pmd_p) + _PAGE_KERNEL_TABLE));
#ifdef CONFIG_HIGHMEM
@@ -123,6 +129,9 @@ static void __init fixedrange_init(void)
pud = pud_offset(pgd, vaddr);
pmd = pmd_offset(pud, vaddr);
pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
set_pmd(pmd, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE));
pkmap_page_table = pte;
#endif /* CONFIG_HIGHMEM */
@@ -148,6 +157,9 @@ void __init paging_init(void)
/* allocate space for empty_zero_page */
zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!zero_page)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
zone_sizes_init();
empty_zero_page = virt_to_page(zero_page);
diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig
index c3e913ef4f0c..ea37394ff3ea 100644
--- a/arch/nios2/Kconfig
+++ b/arch/nios2/Kconfig
@@ -24,6 +24,7 @@ config NIOS2
select USB_ARCH_HAS_HCD if USB_SUPPORT
select CPU_NO_EFFICIENT_FFS
select ARCH_DISCARD_MEMBLOCK
+ select MMU_GATHER_NO_RANGE if MMU
config GENERIC_CSUM
def_bool y
@@ -40,9 +41,6 @@ config NO_IOPORT_MAP
config FPU
def_bool n
-config RWSEM_GENERIC_SPINLOCK
- def_bool y
-
config TRACE_IRQFLAGS_SUPPORT
def_bool n
@@ -123,7 +121,6 @@ config NIOS2_CMDLINE_IGNORE_DTB
config NIOS2_PASS_CMDLINE
bool "Passed kernel command line from u-boot"
- default n
help
Use bootargs env variable from u-boot for kernel command line.
will override "Default kernel command string".
diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild
index 8fde4fa2c34f..d7ef3512504a 100644
--- a/arch/nios2/include/asm/Kbuild
+++ b/arch/nios2/include/asm/Kbuild
@@ -23,9 +23,11 @@ generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += kprobes.h
+generic-y += kvm_para.h
generic-y += local.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += module.h
generic-y += pci.h
generic-y += percpu.h
diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h
index db4f7d179220..95237b7f6fc1 100644
--- a/arch/nios2/include/asm/pgtable.h
+++ b/arch/nios2/include/asm/pgtable.h
@@ -232,7 +232,6 @@ static inline void pte_clear(struct mm_struct *mm,
pte_val(null) = (addr >> PAGE_SHIFT) & 0xf;
set_pte_at(mm, addr, ptep, null);
- flush_tlb_one(addr);
}
/*
diff --git a/arch/nios2/include/asm/syscall.h b/arch/nios2/include/asm/syscall.h
index 9de220854c4a..d7624ed06efb 100644
--- a/arch/nios2/include/asm/syscall.h
+++ b/arch/nios2/include/asm/syscall.h
@@ -58,81 +58,25 @@ static inline void syscall_set_return_value(struct task_struct *task,
}
static inline void syscall_get_arguments(struct task_struct *task,
- struct pt_regs *regs, unsigned int i, unsigned int n,
- unsigned long *args)
+ struct pt_regs *regs, unsigned long *args)
{
- BUG_ON(i + n > 6);
-
- switch (i) {
- case 0:
- if (!n--)
- break;
- *args++ = regs->r4;
- case 1:
- if (!n--)
- break;
- *args++ = regs->r5;
- case 2:
- if (!n--)
- break;
- *args++ = regs->r6;
- case 3:
- if (!n--)
- break;
- *args++ = regs->r7;
- case 4:
- if (!n--)
- break;
- *args++ = regs->r8;
- case 5:
- if (!n--)
- break;
- *args++ = regs->r9;
- case 6:
- if (!n--)
- break;
- default:
- BUG();
- }
+ *args++ = regs->r4;
+ *args++ = regs->r5;
+ *args++ = regs->r6;
+ *args++ = regs->r7;
+ *args++ = regs->r8;
+ *args = regs->r9;
}
static inline void syscall_set_arguments(struct task_struct *task,
- struct pt_regs *regs, unsigned int i, unsigned int n,
- const unsigned long *args)
+ struct pt_regs *regs, const unsigned long *args)
{
- BUG_ON(i + n > 6);
-
- switch (i) {
- case 0:
- if (!n--)
- break;
- regs->r4 = *args++;
- case 1:
- if (!n--)
- break;
- regs->r5 = *args++;
- case 2:
- if (!n--)
- break;
- regs->r6 = *args++;
- case 3:
- if (!n--)
- break;
- regs->r7 = *args++;
- case 4:
- if (!n--)
- break;
- regs->r8 = *args++;
- case 5:
- if (!n--)
- break;
- regs->r9 = *args++;
- case 6:
- if (!n)
- break;
- default:
- BUG();
- }
+ regs->r4 = *args++;
+ regs->r5 = *args++;
+ regs->r6 = *args++;
+ regs->r7 = *args++;
+ regs->r8 = *args++;
+ regs->r9 = *args;
}
#endif
diff --git a/arch/nios2/include/asm/tlb.h b/arch/nios2/include/asm/tlb.h
index d3bc648e08b5..f9f2e27e32dd 100644
--- a/arch/nios2/include/asm/tlb.h
+++ b/arch/nios2/include/asm/tlb.h
@@ -11,22 +11,12 @@
#ifndef _ASM_NIOS2_TLB_H
#define _ASM_NIOS2_TLB_H
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
extern void set_mmu_pid(unsigned long pid);
/*
- * NiosII doesn't need any special per-pte or per-vma handling, except
- * we need to flush cache for the area to be unmapped.
+ * NIOS32 does have flush_tlb_range(), but it lacks a limit and fallback to
+ * full mm invalidation. So use flush_tlb_mm() for everything.
*/
-#define tlb_start_vma(tlb, vma) \
- do { \
- if (!tlb->fullmm) \
- flush_cache_range(vma, vma->vm_start, vma->vm_end); \
- } while (0)
-
-#define tlb_end_vma(tlb, vma) do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
#include <linux/pagemap.h>
#include <asm-generic/tlb.h>
diff --git a/arch/nios2/include/asm/tlbflush.h b/arch/nios2/include/asm/tlbflush.h
index e19652fca1c6..b4bf487b9832 100644
--- a/arch/nios2/include/asm/tlbflush.h
+++ b/arch/nios2/include/asm/tlbflush.h
@@ -26,21 +26,32 @@ struct mm_struct;
*
* - flush_tlb_all() flushes all processes TLB entries
* - flush_tlb_mm(mm) flushes the specified mm context TLB entries
- * - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages
+ * - flush_tlb_page(vma, address) flushes a page
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
+ * - flush_tlb_kernel_page(address) flushes a kernel page
+ *
+ * - reload_tlb_page(vma, address, pte) flushes the TLB for address like
+ * flush_tlb_page, then replaces it with a TLB for pte.
*/
extern void flush_tlb_all(void);
extern void flush_tlb_mm(struct mm_struct *mm);
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
-extern void flush_tlb_one(unsigned long vaddr);
static inline void flush_tlb_page(struct vm_area_struct *vma,
- unsigned long addr)
+ unsigned long address)
{
- flush_tlb_one(addr);
+ flush_tlb_range(vma, address, address + PAGE_SIZE);
}
+static inline void flush_tlb_kernel_page(unsigned long address)
+{
+ flush_tlb_kernel_range(address, address + PAGE_SIZE);
+}
+
+extern void reload_tlb_page(struct vm_area_struct *vma, unsigned long addr,
+ pte_t pte);
+
#endif /* _ASM_NIOS2_TLBFLUSH_H */
diff --git a/arch/nios2/include/uapi/asm/Kbuild b/arch/nios2/include/uapi/asm/Kbuild
index 0febf1a07c30..1c72f04ff75d 100644
--- a/arch/nios2/include/uapi/asm/Kbuild
+++ b/arch/nios2/include/uapi/asm/Kbuild
@@ -1,4 +1 @@
-include include/uapi/asm-generic/Kbuild.asm
-
-generic-y += kvm_para.h
generic-y += ucontext.h
diff --git a/arch/nios2/kernel/nios2_ksyms.c b/arch/nios2/kernel/nios2_ksyms.c
index bf2f55d10a4d..4e704046a150 100644
--- a/arch/nios2/kernel/nios2_ksyms.c
+++ b/arch/nios2/kernel/nios2_ksyms.c
@@ -9,12 +9,20 @@
#include <linux/export.h>
#include <linux/string.h>
+#include <asm/cacheflush.h>
+#include <asm/pgtable.h>
+
/* string functions */
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memmove);
+/* memory management */
+
+EXPORT_SYMBOL(empty_zero_page);
+EXPORT_SYMBOL(flush_icache_range);
+
/*
* libgcc functions - functions that are used internally by the
* compiler... (prototypes are not correct though, but that
@@ -31,3 +39,7 @@ DECLARE_EXPORT(__udivsi3);
DECLARE_EXPORT(__umoddi3);
DECLARE_EXPORT(__umodsi3);
DECLARE_EXPORT(__muldi3);
+DECLARE_EXPORT(__ucmpdi2);
+DECLARE_EXPORT(__lshrdi3);
+DECLARE_EXPORT(__ashldi3);
+DECLARE_EXPORT(__ashrdi3);
diff --git a/arch/nios2/mm/cacheflush.c b/arch/nios2/mm/cacheflush.c
index 506f6e1c86d5..65de1bd6a760 100644
--- a/arch/nios2/mm/cacheflush.c
+++ b/arch/nios2/mm/cacheflush.c
@@ -198,12 +198,15 @@ void flush_dcache_page(struct page *page)
EXPORT_SYMBOL(flush_dcache_page);
void update_mmu_cache(struct vm_area_struct *vma,
- unsigned long address, pte_t *pte)
+ unsigned long address, pte_t *ptep)
{
- unsigned long pfn = pte_pfn(*pte);
+ pte_t pte = *ptep;
+ unsigned long pfn = pte_pfn(pte);
struct page *page;
struct address_space *mapping;
+ reload_tlb_page(vma, address, pte);
+
if (!pfn_valid(pfn))
return;
diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c
index eb65f17c158d..6a2e716b959f 100644
--- a/arch/nios2/mm/fault.c
+++ b/arch/nios2/mm/fault.c
@@ -270,7 +270,7 @@ vmalloc_fault:
if (!pte_present(*pte_k))
goto no_context;
- flush_tlb_one(address);
+ flush_tlb_kernel_page(address);
return;
}
}
diff --git a/arch/nios2/mm/tlb.c b/arch/nios2/mm/tlb.c
index cf10326aab1c..7fea59e53f94 100644
--- a/arch/nios2/mm/tlb.c
+++ b/arch/nios2/mm/tlb.c
@@ -23,10 +23,6 @@
((((1UL << (cpuinfo.tlb_ptr_sz - cpuinfo.tlb_num_ways_log2))) - 1) \
<< PAGE_SHIFT)
-/* Used as illegal PHYS_ADDR for TLB mappings
- */
-#define MAX_PHYS_ADDR 0
-
static void get_misc_and_pid(unsigned long *misc, unsigned long *pid)
{
*misc = RDCTL(CTL_TLBMISC);
@@ -35,28 +31,23 @@ static void get_misc_and_pid(unsigned long *misc, unsigned long *pid)
}
/*
- * All entries common to a mm share an asid. To effectively flush these
- * entries, we just bump the asid.
+ * This provides a PTEADDR value for addr that will cause a TLB miss
+ * (fast TLB miss). TLB invalidation replaces entries with this value.
*/
-void flush_tlb_mm(struct mm_struct *mm)
+static unsigned long pteaddr_invalid(unsigned long addr)
{
- if (current->mm == mm)
- flush_tlb_all();
- else
- memset(&mm->context, 0, sizeof(mm_context_t));
+ return ((addr | 0xC0000000UL) >> PAGE_SHIFT) << 2;
}
/*
* This one is only used for pages with the global bit set so we don't care
* much about the ASID.
*/
-void flush_tlb_one_pid(unsigned long addr, unsigned long mmu_pid)
+static void replace_tlb_one_pid(unsigned long addr, unsigned long mmu_pid, unsigned long tlbacc)
{
unsigned int way;
unsigned long org_misc, pid_misc;
- pr_debug("Flush tlb-entry for vaddr=%#lx\n", addr);
-
/* remember pid/way until we return. */
get_misc_and_pid(&org_misc, &pid_misc);
@@ -67,30 +58,48 @@ void flush_tlb_one_pid(unsigned long addr, unsigned long mmu_pid)
unsigned long tlbmisc;
unsigned long pid;
- tlbmisc = pid_misc | TLBMISC_RD | (way << TLBMISC_WAY_SHIFT);
+ tlbmisc = TLBMISC_RD | (way << TLBMISC_WAY_SHIFT);
WRCTL(CTL_TLBMISC, tlbmisc);
+
pteaddr = RDCTL(CTL_PTEADDR);
+ if (((pteaddr >> 2) & 0xfffff) != (addr >> PAGE_SHIFT))
+ continue;
+
tlbmisc = RDCTL(CTL_TLBMISC);
pid = (tlbmisc >> TLBMISC_PID_SHIFT) & TLBMISC_PID_MASK;
- if (((((pteaddr >> 2) & 0xfffff)) == (addr >> PAGE_SHIFT)) &&
- pid == mmu_pid) {
- unsigned long vaddr = CONFIG_NIOS2_IO_REGION_BASE +
- ((PAGE_SIZE * cpuinfo.tlb_num_lines) * way) +
- (addr & TLB_INDEX_MASK);
- pr_debug("Flush entry by writing %#lx way=%dl pid=%ld\n",
- vaddr, way, (pid_misc >> TLBMISC_PID_SHIFT));
-
- WRCTL(CTL_PTEADDR, (vaddr >> 12) << 2);
- tlbmisc = pid_misc | TLBMISC_WE |
- (way << TLBMISC_WAY_SHIFT);
- WRCTL(CTL_TLBMISC, tlbmisc);
- WRCTL(CTL_TLBACC, (MAX_PHYS_ADDR >> PAGE_SHIFT));
- }
+ if (pid != mmu_pid)
+ continue;
+
+ tlbmisc = (mmu_pid << TLBMISC_PID_SHIFT) | TLBMISC_WE |
+ (way << TLBMISC_WAY_SHIFT);
+ WRCTL(CTL_TLBMISC, tlbmisc);
+ if (tlbacc == 0)
+ WRCTL(CTL_PTEADDR, pteaddr_invalid(addr));
+ WRCTL(CTL_TLBACC, tlbacc);
+ /*
+ * There should be only a single entry that maps a
+ * particular {address,pid} so break after a match.
+ */
+ break;
}
WRCTL(CTL_TLBMISC, org_misc);
}
+static void flush_tlb_one_pid(unsigned long addr, unsigned long mmu_pid)
+{
+ pr_debug("Flush tlb-entry for vaddr=%#lx\n", addr);
+
+ replace_tlb_one_pid(addr, mmu_pid, 0);
+}
+
+static void reload_tlb_one_pid(unsigned long addr, unsigned long mmu_pid, pte_t pte)
+{
+ pr_debug("Reload tlb-entry for vaddr=%#lx\n", addr);
+
+ replace_tlb_one_pid(addr, mmu_pid, pte_val(pte));
+}
+
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
@@ -102,19 +111,18 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
}
}
-void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+void reload_tlb_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
{
- while (start < end) {
- flush_tlb_one(start);
- start += PAGE_SIZE;
- }
+ unsigned long mmu_pid = get_pid_from_context(&vma->vm_mm->context);
+
+ reload_tlb_one_pid(addr, mmu_pid, pte);
}
/*
* This one is only used for pages with the global bit set so we don't care
* much about the ASID.
*/
-void flush_tlb_one(unsigned long addr)
+static void flush_tlb_one(unsigned long addr)
{
unsigned int way;
unsigned long org_misc, pid_misc;
@@ -130,30 +138,33 @@ void flush_tlb_one(unsigned long addr)
unsigned long pteaddr;
unsigned long tlbmisc;
- tlbmisc = pid_misc | TLBMISC_RD | (way << TLBMISC_WAY_SHIFT);
+ tlbmisc = TLBMISC_RD | (way << TLBMISC_WAY_SHIFT);
WRCTL(CTL_TLBMISC, tlbmisc);
- pteaddr = RDCTL(CTL_PTEADDR);
- tlbmisc = RDCTL(CTL_TLBMISC);
- if ((((pteaddr >> 2) & 0xfffff)) == (addr >> PAGE_SHIFT)) {
- unsigned long vaddr = CONFIG_NIOS2_IO_REGION_BASE +
- ((PAGE_SIZE * cpuinfo.tlb_num_lines) * way) +
- (addr & TLB_INDEX_MASK);
+ pteaddr = RDCTL(CTL_PTEADDR);
+ if (((pteaddr >> 2) & 0xfffff) != (addr >> PAGE_SHIFT))
+ continue;
- pr_debug("Flush entry by writing %#lx way=%dl pid=%ld\n",
- vaddr, way, (pid_misc >> TLBMISC_PID_SHIFT));
+ pr_debug("Flush entry by writing way=%dl pid=%ld\n",
+ way, (pid_misc >> TLBMISC_PID_SHIFT));
- tlbmisc = pid_misc | TLBMISC_WE |
- (way << TLBMISC_WAY_SHIFT);
- WRCTL(CTL_PTEADDR, (vaddr >> 12) << 2);
- WRCTL(CTL_TLBMISC, tlbmisc);
- WRCTL(CTL_TLBACC, (MAX_PHYS_ADDR >> PAGE_SHIFT));
- }
+ tlbmisc = TLBMISC_WE | (way << TLBMISC_WAY_SHIFT);
+ WRCTL(CTL_TLBMISC, tlbmisc);
+ WRCTL(CTL_PTEADDR, pteaddr_invalid(addr));
+ WRCTL(CTL_TLBACC, 0);
}
WRCTL(CTL_TLBMISC, org_misc);
}
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ while (start < end) {
+ flush_tlb_one(start);
+ start += PAGE_SIZE;
+ }
+}
+
void dump_tlb_line(unsigned long line)
{
unsigned int way;
@@ -177,7 +188,7 @@ void dump_tlb_line(unsigned long line)
tlbmisc = RDCTL(CTL_TLBMISC);
tlbacc = RDCTL(CTL_TLBACC);
- if ((tlbacc << PAGE_SHIFT) != (MAX_PHYS_ADDR & PAGE_MASK)) {
+ if ((tlbacc << PAGE_SHIFT) != 0) {
pr_debug("-- way:%02x vpn:0x%08lx phys:0x%08lx pid:0x%02lx flags:%c%c%c%c%c\n",
way,
(pteaddr << (PAGE_SHIFT-2)),
@@ -203,8 +214,9 @@ void dump_tlb(void)
dump_tlb_line(i);
}
-void flush_tlb_pid(unsigned long pid)
+void flush_tlb_pid(unsigned long mmu_pid)
{
+ unsigned long addr = 0;
unsigned int line;
unsigned int way;
unsigned long org_misc, pid_misc;
@@ -213,55 +225,65 @@ void flush_tlb_pid(unsigned long pid)
get_misc_and_pid(&org_misc, &pid_misc);
for (line = 0; line < cpuinfo.tlb_num_lines; line++) {
- WRCTL(CTL_PTEADDR, line << 2);
+ WRCTL(CTL_PTEADDR, pteaddr_invalid(addr));
for (way = 0; way < cpuinfo.tlb_num_ways; way++) {
- unsigned long pteaddr;
unsigned long tlbmisc;
- unsigned long tlbacc;
+ unsigned long pid;
- tlbmisc = pid_misc | TLBMISC_RD |
- (way << TLBMISC_WAY_SHIFT);
+ tlbmisc = TLBMISC_RD | (way << TLBMISC_WAY_SHIFT);
WRCTL(CTL_TLBMISC, tlbmisc);
- pteaddr = RDCTL(CTL_PTEADDR);
tlbmisc = RDCTL(CTL_TLBMISC);
- tlbacc = RDCTL(CTL_TLBACC);
-
- if (((tlbmisc>>TLBMISC_PID_SHIFT) & TLBMISC_PID_MASK)
- == pid) {
- tlbmisc = pid_misc | TLBMISC_WE |
- (way << TLBMISC_WAY_SHIFT);
- WRCTL(CTL_TLBMISC, tlbmisc);
- WRCTL(CTL_TLBACC,
- (MAX_PHYS_ADDR >> PAGE_SHIFT));
- }
+ pid = (tlbmisc >> TLBMISC_PID_SHIFT) & TLBMISC_PID_MASK;
+ if (pid != mmu_pid)
+ continue;
+
+ tlbmisc = TLBMISC_WE | (way << TLBMISC_WAY_SHIFT);
+ WRCTL(CTL_TLBMISC, tlbmisc);
+ WRCTL(CTL_TLBACC, 0);
}
- WRCTL(CTL_TLBMISC, org_misc);
+ addr += PAGE_SIZE;
+ }
+
+ WRCTL(CTL_TLBMISC, org_misc);
+}
+
+/*
+ * All entries common to a mm share an asid. To effectively flush these
+ * entries, we just bump the asid.
+ */
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ if (current->mm == mm) {
+ unsigned long mmu_pid = get_pid_from_context(&mm->context);
+ flush_tlb_pid(mmu_pid);
+ } else {
+ memset(&mm->context, 0, sizeof(mm_context_t));
}
}
void flush_tlb_all(void)
{
- int i;
- unsigned long vaddr = CONFIG_NIOS2_IO_REGION_BASE;
+ unsigned long addr = 0;
+ unsigned int line;
unsigned int way;
- unsigned long org_misc, pid_misc, tlbmisc;
+ unsigned long org_misc, pid_misc;
/* remember pid/way until we return */
get_misc_and_pid(&org_misc, &pid_misc);
- pid_misc |= TLBMISC_WE;
+
+ /* Start at way 0, way is auto-incremented after each TLBACC write */
+ WRCTL(CTL_TLBMISC, TLBMISC_WE);
/* Map each TLB entry to physcal address 0 with no-access and a
bad ptbase */
- for (way = 0; way < cpuinfo.tlb_num_ways; way++) {
- tlbmisc = pid_misc | (way << TLBMISC_WAY_SHIFT);
- for (i = 0; i < cpuinfo.tlb_num_lines; i++) {
- WRCTL(CTL_PTEADDR, ((vaddr) >> PAGE_SHIFT) << 2);
- WRCTL(CTL_TLBMISC, tlbmisc);
- WRCTL(CTL_TLBACC, (MAX_PHYS_ADDR >> PAGE_SHIFT));
- vaddr += 1UL << 12;
- }
+ for (line = 0; line < cpuinfo.tlb_num_lines; line++) {
+ WRCTL(CTL_PTEADDR, pteaddr_invalid(addr));
+ for (way = 0; way < cpuinfo.tlb_num_ways; way++)
+ WRCTL(CTL_TLBACC, 0);
+
+ addr += PAGE_SIZE;
}
/* restore pid/way */
@@ -270,6 +292,10 @@ void flush_tlb_all(void)
void set_mmu_pid(unsigned long pid)
{
- WRCTL(CTL_TLBMISC, (RDCTL(CTL_TLBMISC) & TLBMISC_WAY) |
- ((pid & TLBMISC_PID_MASK) << TLBMISC_PID_SHIFT));
+ unsigned long tlbmisc;
+
+ tlbmisc = RDCTL(CTL_TLBMISC);
+ tlbmisc = (tlbmisc & TLBMISC_WAY);
+ tlbmisc |= (pid & TLBMISC_PID_MASK) << TLBMISC_PID_SHIFT;
+ WRCTL(CTL_TLBMISC, tlbmisc);
}
diff --git a/arch/nios2/platform/Kconfig.platform b/arch/nios2/platform/Kconfig.platform
index 74c1aaf588b8..c72074f8bdd9 100644
--- a/arch/nios2/platform/Kconfig.platform
+++ b/arch/nios2/platform/Kconfig.platform
@@ -17,7 +17,6 @@ comment "Device tree"
config NIOS2_DTB_AT_PHYS_ADDR
bool "DTB at physical address"
- default n
help
When enabled you can select a physical address to load the dtb from.
Normally this address is passed by a bootloader such as u-boot but
@@ -37,7 +36,6 @@ config NIOS2_DTB_PHYS_ADDR
config NIOS2_DTB_SOURCE_BOOL
bool "Compile and link device tree into kernel image"
- default n
help
This allows you to specify a dts (device tree source) file
which will be compiled and linked into the kernel image.
@@ -62,21 +60,18 @@ config NIOS2_ARCH_REVISION
config NIOS2_HW_MUL_SUPPORT
bool "Enable MUL instruction"
- default n
help
Set to true if you configured the Nios II to include the MUL
instruction. This will enable the -mhw-mul compiler flag.
config NIOS2_HW_MULX_SUPPORT
bool "Enable MULX instruction"
- default n
help
Set to true if you configured the Nios II to include the MULX
instruction. Enables the -mhw-mulx compiler flag.
config NIOS2_HW_DIV_SUPPORT
bool "Enable DIV instruction"
- default n
help
Set to true if you configured the Nios II to include the DIV
instruction. Enables the -mhw-div compiler flag.
@@ -84,7 +79,6 @@ config NIOS2_HW_DIV_SUPPORT
config NIOS2_BMX_SUPPORT
bool "Enable BMX instructions"
depends on NIOS2_ARCH_REVISION = 2
- default n
help
Set to true if you configured the Nios II R2 to include
the BMX Bit Manipulation Extension instructions. Enables
@@ -93,7 +87,6 @@ config NIOS2_BMX_SUPPORT
config NIOS2_CDX_SUPPORT
bool "Enable CDX instructions"
depends on NIOS2_ARCH_REVISION = 2
- default n
help
Set to true if you configured the Nios II R2 to include
the CDX Bit Manipulation Extension instructions. Enables
@@ -101,13 +94,11 @@ config NIOS2_CDX_SUPPORT
config NIOS2_FPU_SUPPORT
bool "Custom floating point instr support"
- default n
help
Enables the -mcustom-fpu-cfg=60-1 compiler flag.
config NIOS2_CI_SWAB_SUPPORT
bool "Byteswap custom instruction"
- default n
help
Use the byteswap (endian converter) Nios II custom instruction provided
by Altera and which can be enabled in QSYS builder. This accelerates
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index a5e361fbb75a..7cfb20555b10 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -36,6 +36,7 @@ config OPENRISC
select OMPIC if SMP
select ARCH_WANT_FRAME_POINTERS
select GENERIC_IRQ_MULTI_HANDLER
+ select MMU_GATHER_NO_RANGE if MMU
config CPU_BIG_ENDIAN
def_bool y
@@ -43,12 +44,6 @@ config CPU_BIG_ENDIAN
config MMU
def_bool y
-config RWSEM_GENERIC_SPINLOCK
- def_bool y
-
-config RWSEM_XCHGADD_ALGORITHM
- def_bool n
-
config GENERIC_HWEIGHT
def_bool y
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index 1f04844b6b82..1919cc5e0f11 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -15,15 +15,16 @@ generic-y += fb.h
generic-y += ftrace.h
generic-y += hardirq.h
generic-y += hw_irq.h
-generic-y += irq.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += kprobes.h
+generic-y += kvm_para.h
generic-y += local.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += module.h
generic-y += pci.h
generic-y += percpu.h
@@ -35,7 +36,6 @@ generic-y += qrwlock.h
generic-y += sections.h
generic-y += segment.h
generic-y += shmparam.h
-generic-y += string.h
generic-y += switch_to.h
generic-y += topology.h
generic-y += trace_clock.h
diff --git a/arch/openrisc/include/asm/syscall.h b/arch/openrisc/include/asm/syscall.h
index 2db9f1cf0694..b4ff07c1baed 100644
--- a/arch/openrisc/include/asm/syscall.h
+++ b/arch/openrisc/include/asm/syscall.h
@@ -56,20 +56,16 @@ syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
static inline void
syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
- unsigned int i, unsigned int n, unsigned long *args)
+ unsigned long *args)
{
- BUG_ON(i + n > 6);
-
- memcpy(args, &regs->gpr[3 + i], n * sizeof(args[0]));
+ memcpy(args, &regs->gpr[3], 6 * sizeof(args[0]));
}
static inline void
syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
- unsigned int i, unsigned int n, const unsigned long *args)
+ const unsigned long *args)
{
- BUG_ON(i + n > 6);
-
- memcpy(&regs->gpr[3 + i], args, n * sizeof(args[0]));
+ memcpy(&regs->gpr[3], args, 6 * sizeof(args[0]));
}
static inline int syscall_get_arch(void)
diff --git a/arch/openrisc/include/asm/tlb.h b/arch/openrisc/include/asm/tlb.h
index fa4376a4515d..92d8a4209884 100644
--- a/arch/openrisc/include/asm/tlb.h
+++ b/arch/openrisc/include/asm/tlb.h
@@ -20,14 +20,10 @@
#define __ASM_OPENRISC_TLB_H__
/*
- * or32 doesn't need any special per-pte or
- * per-vma handling..
+ * OpenRISC doesn't have an efficient flush_tlb_range() so use flush_tlb_mm()
+ * for everything.
*/
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#include <linux/pagemap.h>
#include <asm-generic/tlb.h>
diff --git a/arch/openrisc/include/uapi/asm/Kbuild b/arch/openrisc/include/uapi/asm/Kbuild
index 0febf1a07c30..1c72f04ff75d 100644
--- a/arch/openrisc/include/uapi/asm/Kbuild
+++ b/arch/openrisc/include/uapi/asm/Kbuild
@@ -1,4 +1 @@
-include include/uapi/asm-generic/Kbuild.asm
-
-generic-y += kvm_para.h
generic-y += ucontext.h
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index d157310eb377..caeb4184e8a6 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -105,7 +105,10 @@ static void __init map_ram(void)
}
/* Alloc one page for holding PTE's... */
- pte = (pte_t *) __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE));
+ pte = memblock_alloc_raw(PAGE_SIZE, PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate page for PTEs\n",
+ __func__);
set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte)));
/* Fill the newly allocated page with PTE'S */
diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c
index 051bcb4fefd3..a8509950dbbc 100644
--- a/arch/openrisc/mm/ioremap.c
+++ b/arch/openrisc/mm/ioremap.c
@@ -122,10 +122,14 @@ pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte;
- if (likely(mem_init_done))
+ if (likely(mem_init_done)) {
pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
- else
+ } else {
pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
+ }
return pte;
}
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index c8e621296092..f1ed8ddfe486 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -75,12 +75,6 @@ config GENERIC_LOCKBREAK
default y
depends on SMP && PREEMPT
-config RWSEM_GENERIC_SPINLOCK
- def_bool y
-
-config RWSEM_XCHGADD_ALGORITHM
- bool
-
config ARCH_HAS_ILOG2_U32
bool
default n
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index 0b1e354c8c24..b8c7db777144 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -1,7 +1,6 @@
generated-y += syscall_table_32.h
generated-y += syscall_table_64.h
generated-y += syscall_table_c32.h
-generic-y += barrier.h
generic-y += current.h
generic-y += device.h
generic-y += div64.h
@@ -12,15 +11,16 @@ generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kprobes.h
+generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += seccomp.h
generic-y += segment.h
-generic-y += topology.h
generic-y += trace_clock.h
generic-y += user.h
generic-y += vga.h
diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h
index 30a8315d5c07..93d37010b375 100644
--- a/arch/parisc/include/asm/io.h
+++ b/arch/parisc/include/asm/io.h
@@ -229,8 +229,6 @@ static inline void writeq(unsigned long long q, volatile void __iomem *addr)
#define writel_relaxed(l, addr) writel(l, addr)
#define writeq_relaxed(q, addr) writeq(q, addr)
-#define mmiowb() do { } while (0)
-
void memset_io(volatile void __iomem *addr, unsigned char val, int count);
void memcpy_fromio(void *dst, const volatile void __iomem *src, int count);
void memcpy_toio(volatile void __iomem *dst, const void *src, int count);
diff --git a/arch/parisc/include/asm/ptrace.h b/arch/parisc/include/asm/ptrace.h
index 2a27b275ab09..9ff033d261ab 100644
--- a/arch/parisc/include/asm/ptrace.h
+++ b/arch/parisc/include/asm/ptrace.h
@@ -22,13 +22,14 @@ unsigned long profile_pc(struct pt_regs *);
static inline unsigned long regs_return_value(struct pt_regs *regs)
{
- return regs->gr[20];
+ return regs->gr[28];
}
static inline void instruction_pointer_set(struct pt_regs *regs,
unsigned long val)
{
- regs->iaoq[0] = val;
+ regs->iaoq[0] = val;
+ regs->iaoq[1] = val + 4;
}
/* Query offset/name of register from its name/offset */
diff --git a/arch/parisc/include/asm/syscall.h b/arch/parisc/include/asm/syscall.h
index 8bff1a58c97f..62a6d477fae0 100644
--- a/arch/parisc/include/asm/syscall.h
+++ b/arch/parisc/include/asm/syscall.h
@@ -18,29 +18,15 @@ static inline long syscall_get_nr(struct task_struct *tsk,
}
static inline void syscall_get_arguments(struct task_struct *tsk,
- struct pt_regs *regs, unsigned int i,
- unsigned int n, unsigned long *args)
+ struct pt_regs *regs,
+ unsigned long *args)
{
- BUG_ON(i);
-
- switch (n) {
- case 6:
- args[5] = regs->gr[21];
- case 5:
- args[4] = regs->gr[22];
- case 4:
- args[3] = regs->gr[23];
- case 3:
- args[2] = regs->gr[24];
- case 2:
- args[1] = regs->gr[25];
- case 1:
- args[0] = regs->gr[26];
- case 0:
- break;
- default:
- BUG();
- }
+ args[5] = regs->gr[21];
+ args[4] = regs->gr[22];
+ args[3] = regs->gr[23];
+ args[2] = regs->gr[24];
+ args[1] = regs->gr[25];
+ args[0] = regs->gr[26];
}
static inline long syscall_get_return_value(struct task_struct *task,
diff --git a/arch/parisc/include/asm/tlb.h b/arch/parisc/include/asm/tlb.h
index 0c881e74d8a6..8c0446b04c9e 100644
--- a/arch/parisc/include/asm/tlb.h
+++ b/arch/parisc/include/asm/tlb.h
@@ -2,24 +2,6 @@
#ifndef _PARISC_TLB_H
#define _PARISC_TLB_H
-#define tlb_flush(tlb) \
-do { if ((tlb)->fullmm) \
- flush_tlb_mm((tlb)->mm);\
-} while (0)
-
-#define tlb_start_vma(tlb, vma) \
-do { if (!(tlb)->fullmm) \
- flush_cache_range(vma, vma->vm_start, vma->vm_end); \
-} while (0)
-
-#define tlb_end_vma(tlb, vma) \
-do { if (!(tlb)->fullmm) \
- flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
-} while (0)
-
-#define __tlb_remove_tlb_entry(tlb, pte, address) \
- do { } while (0)
-
#include <asm-generic/tlb.h>
#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
diff --git a/arch/parisc/include/uapi/asm/Kbuild b/arch/parisc/include/uapi/asm/Kbuild
index c54353d390ff..2bd5b392277c 100644
--- a/arch/parisc/include/uapi/asm/Kbuild
+++ b/arch/parisc/include/uapi/asm/Kbuild
@@ -1,5 +1,2 @@
-include include/uapi/asm-generic/Kbuild.asm
-
generated-y += unistd_32.h
generated-y += unistd_64.h
-generic-y += kvm_para.h
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index 16e428f03526..66c5dd245ac7 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -2,8 +2,8 @@
#ifndef _UAPI_ASM_SOCKET_H
#define _UAPI_ASM_SOCKET_H
+#include <linux/posix_types.h>
#include <asm/sockios.h>
-#include <asm/bitsperlong.h>
/* For setsockopt(2) */
#define SOL_SOCKET 0xffff
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index eb39e7e380d7..841db71958cd 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -210,12 +210,6 @@ void __cpuidle arch_cpu_idle(void)
static int __init parisc_idle_init(void)
{
- const char *marker;
-
- /* check QEMU/SeaBIOS marker in PAGE0 */
- marker = (char *) &PAGE0->pad0;
- running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0);
-
if (!running_on_qemu)
cpu_idle_poll_ctrl(1);
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index 15dd9e21be7e..d908058d05c1 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -397,6 +397,9 @@ void __init start_parisc(void)
int ret, cpunum;
struct pdc_coproc_cfg coproc_cfg;
+ /* check QEMU/SeaBIOS marker in PAGE0 */
+ running_on_qemu = (memcmp(&PAGE0->pad0, "SeaBIOS", 8) == 0);
+
cpunum = smp_processor_id();
init_cpu_topology();
diff --git a/arch/parisc/kernel/stacktrace.c b/arch/parisc/kernel/stacktrace.c
index ec5835e83a7a..6f0b9c8d8052 100644
--- a/arch/parisc/kernel/stacktrace.c
+++ b/arch/parisc/kernel/stacktrace.c
@@ -29,22 +29,17 @@ static void dump_trace(struct task_struct *task, struct stack_trace *trace)
}
}
-
/*
* Save stack-backtrace addresses into a stack_trace buffer.
*/
void save_stack_trace(struct stack_trace *trace)
{
dump_trace(current, trace);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace);
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
dump_trace(tsk, trace);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl
index b26766c6647d..fe8ca623add8 100644
--- a/arch/parisc/kernel/syscalls/syscall.tbl
+++ b/arch/parisc/kernel/syscalls/syscall.tbl
@@ -420,3 +420,7 @@
421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64
422 32 futex_time64 sys_futex sys_futex
423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval sys_sched_rr_get_interval
+424 common pidfd_send_signal sys_pidfd_send_signal
+425 common io_uring_setup sys_io_uring_setup
+426 common io_uring_enter sys_io_uring_enter
+427 common io_uring_register sys_io_uring_register
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index b5dce13a6132..fa7219ffeadc 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -103,13 +103,6 @@ config LOCKDEP_SUPPORT
bool
default y
-config RWSEM_GENERIC_SPINLOCK
- bool
-
-config RWSEM_XCHGADD_ALGORITHM
- bool
- default y
-
config GENERIC_LOCKBREAK
bool
default y
@@ -132,6 +125,7 @@ config PPC
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_KCOV
+ select ARCH_HAS_MMIOWB if PPC64
select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_PMEM_API if PPC64
select ARCH_HAS_PTE_SPECIAL
@@ -218,6 +212,8 @@ config PPC
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_RCU_TABLE_FREE if SMP
+ select HAVE_RCU_TABLE_NO_INVALIDATE if HAVE_RCU_TABLE_FREE
+ select HAVE_MMU_GATHER_PAGE_SIZE
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN
select HAVE_SYSCALL_TRACEPOINTS
@@ -232,7 +228,6 @@ config PPC
select NEED_SG_DMA_LENGTH
select OF
select OF_EARLY_FLATTREE
- select OF_RESERVED_MEM
select OLD_SIGACTION if PPC32
select OLD_SIGSUSPEND
select PCI_DOMAINS if PCI
@@ -319,6 +314,10 @@ config ARCH_SUSPEND_POSSIBLE
(PPC_85xx && !PPC_E500MC) || PPC_86xx || PPC_PSERIES \
|| 44x || 40x
+config ARCH_SUSPEND_NONZERO_CPU
+ def_bool y
+ depends on PPC_POWERNV || PPC_PSERIES
+
config PPC_DCR_NATIVE
bool
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 0e8dadd011bc..73d1f3562978 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -218,7 +218,7 @@ quiet_cmd_bootas = BOOTAS $@
cmd_bootas = $(BOOTCC) -Wp,-MD,$(depfile) $(BOOTAFLAGS) -c -o $@ $<
quiet_cmd_bootar = BOOTAR $@
- cmd_bootar = $(BOOTAR) $(BOOTARFLAGS) $@.$$$$ $(filter-out FORCE,$^); mv $@.$$$$ $@
+ cmd_bootar = $(BOOTAR) $(BOOTARFLAGS) $@.$$$$ $(real-prereqs); mv $@.$$$$ $@
$(obj-libfdt): $(obj)/%.o: $(srctree)/scripts/dtc/libfdt/%.c FORCE
$(call if_changed_dep,bootcc)
diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig
index cfdd08897a06..1bcd468ab422 100644
--- a/arch/powerpc/configs/skiroot_defconfig
+++ b/arch/powerpc/configs/skiroot_defconfig
@@ -37,7 +37,8 @@ CONFIG_MODULE_SIG=y
CONFIG_MODULE_SIG_FORCE=y
CONFIG_MODULE_SIG_SHA512=y
CONFIG_PARTITION_ADVANCED=y
-# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_MQ_IOSCHED_DEADLINE is not set
+# CONFIG_MQ_IOSCHED_KYBER is not set
# CONFIG_PPC_VAS is not set
# CONFIG_PPC_PSERIES is not set
# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
@@ -49,7 +50,6 @@ CONFIG_IRQ_ALL_CPUS=y
CONFIG_NUMA=y
# CONFIG_COMPACTION is not set
# CONFIG_MIGRATION is not set
-# CONFIG_BOUNCE is not set
CONFIG_PPC_64K_PAGES=y
CONFIG_SCHED_SMT=y
CONFIG_CMDLINE_BOOL=y
@@ -136,9 +136,11 @@ CONFIG_ACENIC_OMIT_TIGON_I=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_AURORA is not set
CONFIG_TIGON3=m
CONFIG_BNX2X=m
# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_CAVIUM is not set
CONFIG_CHELSIO_T1=m
@@ -151,6 +153,7 @@ CONFIG_BE2NET=m
# CONFIG_NET_VENDOR_HP is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
CONFIG_E1000=m
+CONFIG_E1000E=m
CONFIG_IGB=m
CONFIG_IXGB=m
CONFIG_IXGBE=m
@@ -161,15 +164,18 @@ CONFIG_MLX4_EN=m
# CONFIG_MLX4_CORE_GEN2 is not set
CONFIG_MLX5_CORE=m
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
CONFIG_MYRI10GE=m
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
# CONFIG_NET_VENDOR_NVIDIA is not set
# CONFIG_NET_VENDOR_OKI is not set
-# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_PACKET_ENGINES is not set
CONFIG_QLGE=m
CONFIG_NETXEN_NIC=m
+CONFIG_QED=m
+CONFIG_QEDE=m
# CONFIG_NET_VENDOR_QUALCOMM is not set
# CONFIG_NET_VENDOR_RDC is not set
# CONFIG_NET_VENDOR_REALTEK is not set
@@ -260,6 +266,7 @@ CONFIG_UDF_FS=m
CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
+CONFIG_HUGETLBFS=y
# CONFIG_MISC_FILESYSTEMS is not set
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_NLS=y
diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
index fd1d6c83f0c0..c4fa242dd652 100644
--- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c
+++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
@@ -1,10 +1,12 @@
#include <linux/crc32.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/cpufeature.h>
+#include <asm/simd.h>
#include <asm/switch_to.h>
#define CHKSUM_BLOCK_SIZE 1
@@ -22,7 +24,7 @@ static u32 crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len)
unsigned int prealign;
unsigned int tail;
- if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || in_interrupt())
+ if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || !crypto_simd_usable())
return __crc32c_le(crc, p, len);
if ((unsigned long)p & VMX_ALIGN_MASK) {
diff --git a/arch/powerpc/crypto/crct10dif-vpmsum_glue.c b/arch/powerpc/crypto/crct10dif-vpmsum_glue.c
index 02ea277863d1..e27ff16573b5 100644
--- a/arch/powerpc/crypto/crct10dif-vpmsum_glue.c
+++ b/arch/powerpc/crypto/crct10dif-vpmsum_glue.c
@@ -12,11 +12,13 @@
#include <linux/crc-t10dif.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/cpufeature.h>
+#include <asm/simd.h>
#include <asm/switch_to.h>
#define VMX_ALIGN 16
@@ -32,7 +34,7 @@ static u16 crct10dif_vpmsum(u16 crci, unsigned char const *p, size_t len)
unsigned int tail;
u32 crc = crci;
- if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || in_interrupt())
+ if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || !crypto_simd_usable())
return crc_t10dif_generic(crc, p, len);
if ((unsigned long)p & VMX_ALIGN_MASK) {
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 77ff7fb24823..b9f6e72bf4e5 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -5,10 +5,9 @@ generated-y += syscall_table_spu.h
generic-y += div64.h
generic-y += export.h
generic-y += irq_regs.h
-generic-y += irq_work.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += preempt.h
-generic-y += rwsem.h
generic-y += vtime.h
generic-y += msi.h
+generic-y += simd.h
diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h
index 66c1e4f88d65..ec2a55a553c7 100644
--- a/arch/powerpc/include/asm/book3s/64/hugetlb.h
+++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h
@@ -39,6 +39,14 @@ static inline int hstate_get_psize(struct hstate *hstate)
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
static inline bool gigantic_page_supported(void)
{
+ /*
+ * We used gigantic page reservation with hypervisor assist in some case.
+ * We cannot use runtime allocation of gigantic pages in those platforms
+ * This is hash translation mode LPARs.
+ */
+ if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled())
+ return false;
+
return true;
}
#endif
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 4b73847e9b95..1fad67b46409 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -34,14 +34,11 @@ extern struct pci_dev *isa_bridge_pcidev;
#include <asm/byteorder.h>
#include <asm/synch.h>
#include <asm/delay.h>
+#include <asm/mmiowb.h>
#include <asm/mmu.h>
#include <asm/ppc_asm.h>
#include <asm/pgtable.h>
-#ifdef CONFIG_PPC64
-#include <asm/paca.h>
-#endif
-
#define SIO_CONFIG_RA 0x398
#define SIO_CONFIG_RD 0x399
@@ -107,12 +104,6 @@ extern bool isa_io_special;
*
*/
-#ifdef CONFIG_PPC64
-#define IO_SET_SYNC_FLAG() do { local_paca->io_sync = 1; } while(0)
-#else
-#define IO_SET_SYNC_FLAG()
-#endif
-
#define DEF_MMIO_IN_X(name, size, insn) \
static inline u##size name(const volatile u##size __iomem *addr) \
{ \
@@ -127,7 +118,7 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \
{ \
__asm__ __volatile__("sync;"#insn" %1,%y0" \
: "=Z" (*addr) : "r" (val) : "memory"); \
- IO_SET_SYNC_FLAG(); \
+ mmiowb_set_pending(); \
}
#define DEF_MMIO_IN_D(name, size, insn) \
@@ -144,7 +135,7 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \
{ \
__asm__ __volatile__("sync;"#insn"%U0%X0 %1,%0" \
: "=m" (*addr) : "r" (val) : "memory"); \
- IO_SET_SYNC_FLAG(); \
+ mmiowb_set_pending(); \
}
DEF_MMIO_IN_D(in_8, 8, lbz);
@@ -652,24 +643,6 @@ static inline void name at \
#include <asm-generic/iomap.h>
-#ifdef CONFIG_PPC32
-#define mmiowb()
-#else
-/*
- * Enforce synchronisation of stores vs. spin_unlock
- * (this does it explicitly, though our implementation of spin_unlock
- * does it implicitely too)
- */
-static inline void mmiowb(void)
-{
- unsigned long tmp;
-
- __asm__ __volatile__("sync; li %0,0; stb %0,%1(13)"
- : "=&r" (tmp) : "i" (offsetof(struct paca_struct, io_sync))
- : "memory");
-}
-#endif /* !CONFIG_PPC32 */
-
static inline void iosync(void)
{
__asm__ __volatile__ ("sync" : : : "memory");
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 0f98f00da2ea..e6b5bb012ccb 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -99,6 +99,8 @@ struct kvm_nested_guest;
struct kvm_vm_stat {
ulong remote_tlb_flush;
+ ulong num_2M_pages;
+ ulong num_1G_pages;
};
struct kvm_vcpu_stat {
@@ -377,6 +379,7 @@ struct kvmppc_mmu {
void (*slbmte)(struct kvm_vcpu *vcpu, u64 rb, u64 rs);
u64 (*slbmfee)(struct kvm_vcpu *vcpu, u64 slb_nr);
u64 (*slbmfev)(struct kvm_vcpu *vcpu, u64 slb_nr);
+ int (*slbfee)(struct kvm_vcpu *vcpu, gva_t eaddr, ulong *ret_slb);
void (*slbie)(struct kvm_vcpu *vcpu, u64 slb_nr);
void (*slbia)(struct kvm_vcpu *vcpu);
/* book3s */
@@ -837,7 +840,7 @@ struct kvm_vcpu_arch {
static inline void kvm_arch_hardware_disable(void) {}
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
-static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
+static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_exit(void) {}
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index a6c8548ed9fa..ac22b28ae78d 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -36,6 +36,8 @@
#endif
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
#include <asm/paca.h>
+#include <asm/xive.h>
+#include <asm/cpu_has_feature.h>
#endif
/*
@@ -617,6 +619,18 @@ static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 ir
static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
#endif /* CONFIG_KVM_XIVE */
+#if defined(CONFIG_PPC_POWERNV) && defined(CONFIG_KVM_BOOK3S_64_HANDLER)
+static inline bool xics_on_xive(void)
+{
+ return xive_enabled() && cpu_has_feature(CPU_FTR_HVMODE);
+}
+#else
+static inline bool xics_on_xive(void)
+{
+ return false;
+}
+#endif
+
/*
* Prototypes for functions called only from assembler code.
* Having prototypes reduces sparse errors.
diff --git a/arch/powerpc/include/asm/mmiowb.h b/arch/powerpc/include/asm/mmiowb.h
new file mode 100644
index 000000000000..74a00127eb20
--- /dev/null
+++ b/arch/powerpc/include/asm/mmiowb.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_MMIOWB_H
+#define _ASM_POWERPC_MMIOWB_H
+
+#ifdef CONFIG_MMIOWB
+
+#include <linux/compiler.h>
+#include <asm/barrier.h>
+#include <asm/paca.h>
+
+#define arch_mmiowb_state() (&local_paca->mmiowb_state)
+#define mmiowb() mb()
+
+#endif /* CONFIG_MMIOWB */
+
+#include <asm-generic/mmiowb.h>
+
+#endif /* _ASM_POWERPC_MMIOWB_H */
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index d34ad1657d7b..8ddd4a91bdc1 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -352,7 +352,7 @@ static inline bool strict_kernel_rwx_enabled(void)
#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME) && \
defined (CONFIG_PPC_64K_PAGES)
#define MAX_PHYSMEM_BITS 51
-#else
+#elif defined(CONFIG_PPC64)
#define MAX_PHYSMEM_BITS 46
#endif
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index e843bc5d1a0f..134e912d403f 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -34,6 +34,8 @@
#include <asm/cpuidle.h>
#include <asm/atomic.h>
+#include <asm-generic/mmiowb_types.h>
+
register struct paca_struct *local_paca asm("r13");
#if defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_SMP)
@@ -171,7 +173,6 @@ struct paca_struct {
u16 trap_save; /* Used when bad stack is encountered */
u8 irq_soft_mask; /* mask for irq soft masking */
u8 irq_happened; /* irq happened while soft-disabled */
- u8 io_sync; /* writel() needs spin_unlock sync */
u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
u8 nap_state_lost; /* NV GPR values lost in power7_idle */
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
@@ -264,6 +265,9 @@ struct paca_struct {
#ifdef CONFIG_STACKPROTECTOR
unsigned long canary;
#endif
+#ifdef CONFIG_MMIOWB
+ struct mmiowb_state mmiowb_state;
+#endif
} ____cacheline_aligned;
extern void copy_mm_to_paca(struct mm_struct *mm);
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index c5698a523bb1..23f7ed796f38 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -302,6 +302,7 @@
/* Misc instructions for BPF compiler */
#define PPC_INST_LBZ 0x88000000
#define PPC_INST_LD 0xe8000000
+#define PPC_INST_LDX 0x7c00002a
#define PPC_INST_LHZ 0xa0000000
#define PPC_INST_LWZ 0x80000000
#define PPC_INST_LHBRX 0x7c00062c
@@ -309,6 +310,7 @@
#define PPC_INST_STB 0x98000000
#define PPC_INST_STH 0xb0000000
#define PPC_INST_STD 0xf8000000
+#define PPC_INST_STDX 0x7c00012a
#define PPC_INST_STDU 0xf8000001
#define PPC_INST_STW 0x90000000
#define PPC_INST_STWU 0x94000000
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 685c72310f5d..15b39c407c4e 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -39,19 +39,6 @@
#define LOCK_TOKEN 1
#endif
-#if defined(CONFIG_PPC64) && defined(CONFIG_SMP)
-#define CLEAR_IO_SYNC (get_paca()->io_sync = 0)
-#define SYNC_IO do { \
- if (unlikely(get_paca()->io_sync)) { \
- mb(); \
- get_paca()->io_sync = 0; \
- } \
- } while (0)
-#else
-#define CLEAR_IO_SYNC
-#define SYNC_IO
-#endif
-
#ifdef CONFIG_PPC_PSERIES
#define vcpu_is_preempted vcpu_is_preempted
static inline bool vcpu_is_preempted(int cpu)
@@ -99,7 +86,6 @@ static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
- CLEAR_IO_SYNC;
return __arch_spin_trylock(lock) == 0;
}
@@ -130,7 +116,6 @@ extern void __rw_yield(arch_rwlock_t *lock);
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
- CLEAR_IO_SYNC;
while (1) {
if (likely(__arch_spin_trylock(lock) == 0))
break;
@@ -148,7 +133,6 @@ void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
unsigned long flags_dis;
- CLEAR_IO_SYNC;
while (1) {
if (likely(__arch_spin_trylock(lock) == 0))
break;
@@ -167,7 +151,6 @@ void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
- SYNC_IO;
__asm__ __volatile__("# arch_spin_unlock\n\t"
PPC_RELEASE_BARRIER: : :"memory");
lock->slock = 0;
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
index 1a0e7a8b1c81..1243045bad2d 100644
--- a/arch/powerpc/include/asm/syscall.h
+++ b/arch/powerpc/include/asm/syscall.h
@@ -65,22 +65,20 @@ static inline void syscall_set_return_value(struct task_struct *task,
static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
unsigned long *args)
{
unsigned long val, mask = -1UL;
-
- BUG_ON(i + n > 6);
+ unsigned int n = 6;
#ifdef CONFIG_COMPAT
if (test_tsk_thread_flag(task, TIF_32BIT))
mask = 0xffffffff;
#endif
while (n--) {
- if (n == 0 && i == 0)
+ if (n == 0)
val = regs->orig_gpr3;
else
- val = regs->gpr[3 + i + n];
+ val = regs->gpr[3 + n];
args[n] = val & mask;
}
@@ -88,15 +86,12 @@ static inline void syscall_get_arguments(struct task_struct *task,
static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
const unsigned long *args)
{
- BUG_ON(i + n > 6);
- memcpy(&regs->gpr[3 + i], args, n * sizeof(args[0]));
+ memcpy(&regs->gpr[3], args, 6 * sizeof(args[0]));
/* Also copy the first argument into orig_gpr3 */
- if (i == 0 && n > 0)
- regs->orig_gpr3 = args[0];
+ regs->orig_gpr3 = args[0];
}
static inline int syscall_get_arch(void)
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index e24c67d5ba75..34fba1ce27f7 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -27,8 +27,8 @@
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry __tlb_remove_tlb_entry
-#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
+#define tlb_flush tlb_flush
extern void tlb_flush(struct mmu_gather *tlb);
/* Get the generic bits... */
@@ -46,22 +46,6 @@ static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
#endif
}
-static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
- unsigned int page_size)
-{
- if (!tlb->page_size)
- tlb->page_size = page_size;
- else if (tlb->page_size != page_size) {
- if (!tlb->fullmm)
- tlb_flush_mmu(tlb);
- /*
- * update the page size after flush for the new
- * mmu_gather.
- */
- tlb->page_size = page_size;
- }
-}
-
#ifdef CONFIG_SMP
static inline int mm_is_core_local(struct mm_struct *mm)
{
diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h
index 1afe90ade595..bbc06bd72b1f 100644
--- a/arch/powerpc/include/asm/vdso_datapage.h
+++ b/arch/powerpc/include/asm/vdso_datapage.h
@@ -82,10 +82,10 @@ struct vdso_data {
__u32 icache_block_size; /* L1 i-cache block size */
__u32 dcache_log_block_size; /* L1 d-cache log block size */
__u32 icache_log_block_size; /* L1 i-cache log block size */
- __s32 wtom_clock_sec; /* Wall to monotonic clock */
- __s32 wtom_clock_nsec;
- struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */
- __u32 stamp_sec_fraction; /* fractional seconds of stamp_xtime */
+ __u32 stamp_sec_fraction; /* fractional seconds of stamp_xtime */
+ __s32 wtom_clock_nsec; /* Wall to monotonic clock nsec */
+ __s64 wtom_clock_sec; /* Wall to monotonic clock sec */
+ struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */
__u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */
__u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
};
diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild
index 214a39acdf25..2bd5b392277c 100644
--- a/arch/powerpc/include/uapi/asm/Kbuild
+++ b/arch/powerpc/include/uapi/asm/Kbuild
@@ -1,4 +1,2 @@
-include include/uapi/asm-generic/Kbuild.asm
-
generated-y += unistd_32.h
generated-y += unistd_64.h
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 8c876c166ef2..26ca425f4c2c 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -463,10 +463,12 @@ struct kvm_ppc_cpu_char {
#define KVM_PPC_CPU_CHAR_BR_HINT_HONOURED (1ULL << 58)
#define KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF (1ULL << 57)
#define KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS (1ULL << 56)
+#define KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST (1ull << 54)
#define KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY (1ULL << 63)
#define KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR (1ULL << 62)
#define KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ULL << 61)
+#define KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58)
/* Per-vcpu XICS interrupt controller state */
#define KVM_REG_PPC_ICP_STATE (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8c)
diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S
index 6f1c11e0691f..7534ecff5e92 100644
--- a/arch/powerpc/kernel/cpu_setup_6xx.S
+++ b/arch/powerpc/kernel/cpu_setup_6xx.S
@@ -24,9 +24,6 @@ BEGIN_MMU_FTR_SECTION
li r10,0
mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */
END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
- lis r10, (swapper_pg_dir - PAGE_OFFSET)@h
- ori r10, r10, (swapper_pg_dir - PAGE_OFFSET)@l
- mtspr SPRN_SPRG_PGDIR, r10
BEGIN_FTR_SECTION
bl __init_fpu_registers
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index e49bd5efcfe6..c66fd3ce6478 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -810,7 +810,6 @@ static int __init process_cpufeatures_node(unsigned long node,
int len;
f = &dt_cpu_features[i];
- memset(f, 0, sizeof(struct dt_cpu_feature));
f->node = node;
@@ -1005,7 +1004,12 @@ static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
/* Count and allocate space for cpu features */
of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes,
&nr_dt_cpu_features);
- dt_cpu_features = __va(memblock_phys_alloc(sizeof(struct dt_cpu_feature) * nr_dt_cpu_features, PAGE_SIZE));
+ dt_cpu_features = memblock_alloc(sizeof(struct dt_cpu_feature) * nr_dt_cpu_features, PAGE_SIZE);
+ if (!dt_cpu_features)
+ panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
+ __func__,
+ sizeof(struct dt_cpu_feature) * nr_dt_cpu_features,
+ PAGE_SIZE);
cpufeatures_setup_start(isa);
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index a5b8fbae56a0..9481a117e242 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -656,11 +656,17 @@ EXC_COMMON_BEGIN(data_access_slb_common)
ld r4,PACA_EXSLB+EX_DAR(r13)
std r4,_DAR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
+BEGIN_MMU_FTR_SECTION
+ /* HPT case, do SLB fault */
bl do_slb_fault
cmpdi r3,0
bne- 1f
b fast_exception_return
1: /* Error case */
+MMU_FTR_SECTION_ELSE
+ /* Radix case, access is outside page table range */
+ li r3,-EFAULT
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
std r3,RESULT(r1)
bl save_nvgprs
RECONCILE_IRQ_STATE(r10, r11)
@@ -705,11 +711,17 @@ EXC_COMMON_BEGIN(instruction_access_slb_common)
EXCEPTION_PROLOG_COMMON(0x480, PACA_EXSLB)
ld r4,_NIP(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
+BEGIN_MMU_FTR_SECTION
+ /* HPT case, do SLB fault */
bl do_slb_fault
cmpdi r3,0
bne- 1f
b fast_exception_return
1: /* Error case */
+MMU_FTR_SECTION_ELSE
+ /* Radix case, access is outside page table range */
+ li r3,-EFAULT
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
std r3,RESULT(r1)
bl save_nvgprs
RECONCILE_IRQ_STATE(r10, r11)
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index ce6a972f2584..e25b615e9f9e 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -851,10 +851,9 @@ __secondary_start:
tophys(r4,r2)
addi r4,r4,THREAD /* phys address of our thread_struct */
mtspr SPRN_SPRG_THREAD,r4
-#ifdef CONFIG_PPC_RTAS
- li r3,0
- stw r3, RTAS_SP(r4) /* 0 => not in RTAS */
-#endif
+ lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
+ ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
+ mtspr SPRN_SPRG_PGDIR, r4
/* enable MMU and jump to start_secondary */
li r4,MSR_KERNEL
@@ -938,10 +937,9 @@ start_here:
tophys(r4,r2)
addi r4,r4,THREAD /* init task's THREAD */
mtspr SPRN_SPRG_THREAD,r4
-#ifdef CONFIG_PPC_RTAS
- li r3,0
- stw r3, RTAS_SP(r4) /* 0 => not in RTAS */
-#endif
+ lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
+ ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
+ mtspr SPRN_SPRG_PGDIR, r4
/* stack */
lis r1,init_thread_union@ha
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 1881127682e9..32332e24e421 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -194,13 +194,6 @@ set_ivor:
#endif
mtspr SPRN_MAS4, r2
-#if 0
- /* Enable DOZE */
- mfspr r2,SPRN_HID0
- oris r2,r2,HID0_DOZE@h
- mtspr SPRN_HID0, r2
-#endif
-
#if !defined(CONFIG_BDI_SWITCH)
/*
* The Abatron BDI JTAG debugger does not tolerate others
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 683b5b3805bd..cd381e2291df 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -22,6 +22,7 @@
#include <linux/kvm_host.h>
#include <linux/init.h>
#include <linux/export.h>
+#include <linux/kmemleak.h>
#include <linux/kvm_para.h>
#include <linux/slab.h>
#include <linux/of.h>
@@ -712,6 +713,12 @@ static void kvm_use_magic_page(void)
static __init void kvm_free_tmp(void)
{
+ /*
+ * Inform kmemleak about the hole in the .bss section since the
+ * corresponding pages will be unmapped with DEBUG_PAGEALLOC=y.
+ */
+ kmemleak_free_part(&kvm_tmp[kvm_tmp_index],
+ ARRAY_SIZE(kvm_tmp) - kvm_tmp_index);
free_reserved_area(&kvm_tmp[kvm_tmp_index],
&kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL);
}
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 8c890c6557ed..e7382abee868 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -196,7 +196,11 @@ void __init allocate_paca_ptrs(void)
paca_nr_cpu_ids = nr_cpu_ids;
paca_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids;
- paca_ptrs = __va(memblock_phys_alloc(paca_ptrs_size, SMP_CACHE_BYTES));
+ paca_ptrs = memblock_alloc_raw(paca_ptrs_size, SMP_CACHE_BYTES);
+ if (!paca_ptrs)
+ panic("Failed to allocate %d bytes for paca pointers\n",
+ paca_ptrs_size);
+
memset(paca_ptrs, 0x88, paca_ptrs_size);
}
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index d3f04f2d8249..0417fda13636 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -205,6 +205,9 @@ pci_create_OF_bus_map(void)
of_prop = memblock_alloc(sizeof(struct property) + 256,
SMP_CACHE_BYTES);
+ if (!of_prop)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(struct property) + 256);
dn = of_find_node_by_path("/");
if (dn) {
memset(of_prop, -1, sizeof(struct property) + 256);
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 4181ec715f88..4221527b082f 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -126,7 +126,10 @@ static void __init move_device_tree(void)
if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) ||
!memblock_is_memory(start + size - 1) ||
overlaps_crashkernel(start, size) || overlaps_initrd(start, size)) {
- p = __va(memblock_phys_alloc(size, PAGE_SIZE));
+ p = memblock_alloc_raw(size, PAGE_SIZE);
+ if (!p)
+ panic("Failed to allocate %lu bytes to move device tree\n",
+ size);
memcpy(p, initial_boot_params, size);
initial_boot_params = p;
DBG("Moved device tree to 0x%px\n", p);
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index de35bd8f047f..fbc676160adf 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -1187,7 +1187,11 @@ void __init rtas_initialize(void)
ibm_suspend_me_token = rtas_token("ibm,suspend-me");
}
#endif
- rtas_rmo_buf = memblock_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region);
+ rtas_rmo_buf = memblock_phys_alloc_range(RTAS_RMOBUF_MAX, PAGE_SIZE,
+ 0, rtas_region);
+ if (!rtas_rmo_buf)
+ panic("ERROR: RTAS: Failed to allocate %lx bytes below %pa\n",
+ PAGE_SIZE, &rtas_region);
#ifdef CONFIG_RTAS_ERROR_LOGGING
rtas_last_error_token = rtas_token("rtas-last-error");
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
index 9b8631533e02..70568ccbd9fd 100644
--- a/arch/powerpc/kernel/security.c
+++ b/arch/powerpc/kernel/security.c
@@ -57,7 +57,7 @@ void setup_barrier_nospec(void)
enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR);
- if (!no_nospec)
+ if (!no_nospec && !cpu_mitigations_off())
enable_barrier_nospec(enable);
}
@@ -116,7 +116,7 @@ static int __init handle_nospectre_v2(char *p)
early_param("nospectre_v2", handle_nospectre_v2);
void setup_spectre_v2(void)
{
- if (no_spectrev2)
+ if (no_spectrev2 || cpu_mitigations_off())
do_btb_flush_fixups();
else
btb_flush_enabled = true;
@@ -190,29 +190,22 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
- if (bcs || ccd || count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
- bool comma = false;
+ if (bcs || ccd) {
seq_buf_printf(&s, "Mitigation: ");
- if (bcs) {
+ if (bcs)
seq_buf_printf(&s, "Indirect branch serialisation (kernel only)");
- comma = true;
- }
- if (ccd) {
- if (comma)
- seq_buf_printf(&s, ", ");
- seq_buf_printf(&s, "Indirect branch cache disabled");
- comma = true;
- }
-
- if (comma)
+ if (bcs && ccd)
seq_buf_printf(&s, ", ");
- seq_buf_printf(&s, "Software count cache flush");
+ if (ccd)
+ seq_buf_printf(&s, "Indirect branch cache disabled");
+ } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
+ seq_buf_printf(&s, "Mitigation: Software count cache flush");
if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
- seq_buf_printf(&s, "(hardware accelerated)");
+ seq_buf_printf(&s, " (hardware accelerated)");
} else if (btb_flush_enabled) {
seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
} else {
@@ -307,7 +300,7 @@ void setup_stf_barrier(void)
stf_enabled_flush_types = type;
- if (!no_stf_barrier)
+ if (!no_stf_barrier && !cpu_mitigations_off())
stf_barrier_enable(enable);
}
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index f17868e19e2c..2e5dfb6e0823 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -461,6 +461,9 @@ void __init smp_setup_cpu_maps(void)
cpu_to_phys_id = memblock_alloc(nr_cpu_ids * sizeof(u32),
__alignof__(u32));
+ if (!cpu_to_phys_id)
+ panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
+ __func__, nr_cpu_ids * sizeof(u32), __alignof__(u32));
for_each_node_by_type(dn, "cpu") {
const __be32 *intserv;
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index ff0aac42bb33..4f49e1a3594c 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -905,6 +905,10 @@ static void __ref init_fallback_flush(void)
l1d_flush_fallback_area = memblock_alloc_try_nid(l1d_size * 2,
l1d_size, MEMBLOCK_LOW_LIMIT,
limit, NUMA_NO_NODE);
+ if (!l1d_flush_fallback_area)
+ panic("%s: Failed to allocate %llu bytes align=0x%llx max_addr=%pa\n",
+ __func__, l1d_size * 2, l1d_size, &limit);
+
for_each_possible_cpu(cpu) {
struct paca_struct *paca = paca_ptrs[cpu];
@@ -928,7 +932,7 @@ void setup_rfi_flush(enum l1d_flush_type types, bool enable)
enabled_flush_types = types;
- if (!no_rfi_flush)
+ if (!no_rfi_flush && !cpu_mitigations_off())
rfi_flush_enable(enable);
}
diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
index b18abb0c3dae..00f5a63c8d9a 100644
--- a/arch/powerpc/kernel/syscalls/syscall.tbl
+++ b/arch/powerpc/kernel/syscalls/syscall.tbl
@@ -505,3 +505,7 @@
421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64
422 32 futex_time64 sys_futex sys_futex
423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval sys_sched_rr_get_interval
+424 common pidfd_send_signal sys_pidfd_send_signal
+425 common io_uring_setup sys_io_uring_setup
+426 common io_uring_enter sys_io_uring_enter
+427 common io_uring_register sys_io_uring_register
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index a21200c6aaea..1fd45a8650e1 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -71,6 +71,7 @@
#include <sysdev/fsl_pci.h>
#include <asm/kprobes.h>
#include <asm/stacktrace.h>
+#include <asm/nmi.h>
#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
int (*__debugger)(struct pt_regs *regs) __read_mostly;
diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S
index 1e0bc5955a40..afd516b572f8 100644
--- a/arch/powerpc/kernel/vdso32/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso32/gettimeofday.S
@@ -98,7 +98,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
* can be used, r7 contains NSEC_PER_SEC.
*/
- lwz r5,WTOM_CLOCK_SEC(r9)
+ lwz r5,(WTOM_CLOCK_SEC+LOPART)(r9)
lwz r6,WTOM_CLOCK_NSEC(r9)
/* We now have our offset in r5,r6. We create a fake dependency
diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S
index a4ed9edfd5f0..1f324c28705b 100644
--- a/arch/powerpc/kernel/vdso64/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso64/gettimeofday.S
@@ -92,7 +92,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
* At this point, r4,r5 contain our sec/nsec values.
*/
- lwa r6,WTOM_CLOCK_SEC(r3)
+ ld r6,WTOM_CLOCK_SEC(r3)
lwa r9,WTOM_CLOCK_NSEC(r3)
/* We now have our result in r6,r9. We create a fake dependency
@@ -125,7 +125,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
bne cr6,75f
/* CLOCK_MONOTONIC_COARSE */
- lwa r6,WTOM_CLOCK_SEC(r3)
+ ld r6,WTOM_CLOCK_SEC(r3)
lwa r9,WTOM_CLOCK_NSEC(r3)
/* check if counter has updated */
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 9a7dadbe1f17..10c5579d20ce 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -39,6 +39,7 @@
#include "book3s.h"
#include "trace.h"
+#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
/* #define EXIT_DEBUG */
@@ -71,6 +72,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "pthru_all", VCPU_STAT(pthru_all) },
{ "pthru_host", VCPU_STAT(pthru_host) },
{ "pthru_bad_aff", VCPU_STAT(pthru_bad_aff) },
+ { "largepages_2M", VM_STAT(num_2M_pages) },
+ { "largepages_1G", VM_STAT(num_1G_pages) },
{ NULL }
};
@@ -642,7 +645,7 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
r = -ENXIO;
break;
}
- if (xive_enabled())
+ if (xics_on_xive())
*val = get_reg_val(id, kvmppc_xive_get_icp(vcpu));
else
*val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
@@ -715,7 +718,7 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
r = -ENXIO;
break;
}
- if (xive_enabled())
+ if (xics_on_xive())
r = kvmppc_xive_set_icp(vcpu, set_reg_val(id, *val));
else
r = kvmppc_xics_set_icp(vcpu, set_reg_val(id, *val));
@@ -991,7 +994,7 @@ int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall)
int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
bool line_status)
{
- if (xive_enabled())
+ if (xics_on_xive())
return kvmppc_xive_set_irq(kvm, irq_source_id, irq, level,
line_status);
else
@@ -1044,7 +1047,7 @@ static int kvmppc_book3s_init(void)
#ifdef CONFIG_KVM_XICS
#ifdef CONFIG_KVM_XIVE
- if (xive_enabled()) {
+ if (xics_on_xive()) {
kvmppc_xive_init_module();
kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS);
} else
@@ -1057,7 +1060,7 @@ static int kvmppc_book3s_init(void)
static void kvmppc_book3s_exit(void)
{
#ifdef CONFIG_KVM_XICS
- if (xive_enabled())
+ if (xics_on_xive())
kvmppc_xive_exit_module();
#endif
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index 612169988a3d..6f789f674048 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -425,6 +425,7 @@ void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu)
mmu->slbmte = NULL;
mmu->slbmfee = NULL;
mmu->slbmfev = NULL;
+ mmu->slbfee = NULL;
mmu->slbie = NULL;
mmu->slbia = NULL;
}
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index c92dd25bed23..d4b967f0e8d4 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -435,6 +435,19 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
kvmppc_mmu_map_segment(vcpu, esid << SID_SHIFT);
}
+static int kvmppc_mmu_book3s_64_slbfee(struct kvm_vcpu *vcpu, gva_t eaddr,
+ ulong *ret_slb)
+{
+ struct kvmppc_slb *slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
+
+ if (slbe) {
+ *ret_slb = slbe->origv;
+ return 0;
+ }
+ *ret_slb = 0;
+ return -ENOENT;
+}
+
static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr)
{
struct kvmppc_slb *slbe;
@@ -670,6 +683,7 @@ void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu)
mmu->slbmte = kvmppc_mmu_book3s_64_slbmte;
mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee;
mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev;
+ mmu->slbfee = kvmppc_mmu_book3s_64_slbfee;
mmu->slbie = kvmppc_mmu_book3s_64_slbie;
mmu->slbia = kvmppc_mmu_book3s_64_slbia;
mmu->xlate = kvmppc_mmu_book3s_64_xlate;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index bd2dcfbf00cd..be7bc070eae5 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -442,6 +442,24 @@ int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
u32 last_inst;
/*
+ * Fast path - check if the guest physical address corresponds to a
+ * device on the FAST_MMIO_BUS, if so we can avoid loading the
+ * instruction all together, then we can just handle it and return.
+ */
+ if (is_store) {
+ int idx, ret;
+
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ ret = kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, (gpa_t) gpa, 0,
+ NULL);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ if (!ret) {
+ kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
+ return RESUME_GUEST;
+ }
+ }
+
+ /*
* If we fail, we just return to the guest and try executing it again.
*/
if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) !=
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 1b821c6efdef..f55ef071883f 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -403,8 +403,13 @@ void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
if (!memslot)
return;
}
- if (shift)
+ if (shift) { /* 1GB or 2MB page */
page_size = 1ul << shift;
+ if (shift == PMD_SHIFT)
+ kvm->stat.num_2M_pages--;
+ else if (shift == PUD_SHIFT)
+ kvm->stat.num_1G_pages--;
+ }
gpa &= ~(page_size - 1);
hpa = old & PTE_RPN_MASK;
@@ -878,6 +883,14 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
put_page(page);
}
+ /* Increment number of large pages if we (successfully) inserted one */
+ if (!ret) {
+ if (level == 1)
+ kvm->stat.num_2M_pages++;
+ else if (level == 2)
+ kvm->stat.num_1G_pages++;
+ }
+
return ret;
}
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 532ab79734c7..f100e331e69b 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -133,7 +133,6 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
continue;
kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
- return;
}
}
}
@@ -338,14 +337,15 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
}
}
+ kvm_get_kvm(kvm);
if (!ret)
ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
stt, O_RDWR | O_CLOEXEC);
- if (ret >= 0) {
+ if (ret >= 0)
list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
- kvm_get_kvm(kvm);
- }
+ else
+ kvm_put_kvm(kvm);
mutex_unlock(&kvm->lock);
@@ -543,14 +543,14 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
if (ret != H_SUCCESS)
return ret;
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+
ret = kvmppc_tce_validate(stt, tce);
if (ret != H_SUCCESS)
- return ret;
+ goto unlock_exit;
dir = iommu_tce_direction(tce);
- idx = srcu_read_lock(&vcpu->kvm->srcu);
-
if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
ret = H_PARAMETER;
goto unlock_exit;
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 8c7e933e942e..6ef7c5f00a49 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -47,6 +47,7 @@
#define OP_31_XOP_SLBMFEV 851
#define OP_31_XOP_EIOIO 854
#define OP_31_XOP_SLBMFEE 915
+#define OP_31_XOP_SLBFEE 979
#define OP_31_XOP_TBEGIN 654
#define OP_31_XOP_TABORT 910
@@ -416,6 +417,23 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->arch.mmu.slbia(vcpu);
break;
+ case OP_31_XOP_SLBFEE:
+ if (!(inst & 1) || !vcpu->arch.mmu.slbfee) {
+ return EMULATE_FAIL;
+ } else {
+ ulong b, t;
+ ulong cr = kvmppc_get_cr(vcpu) & ~CR0_MASK;
+
+ b = kvmppc_get_gpr(vcpu, rb);
+ if (!vcpu->arch.mmu.slbfee(vcpu, b, &t))
+ cr |= 2 << CR0_SHIFT;
+ kvmppc_set_gpr(vcpu, rt, t);
+ /* copy XER[SO] bit to CR0[SO] */
+ cr |= (vcpu->arch.regs.xer & 0x80000000) >>
+ (31 - CR0_SHIFT);
+ kvmppc_set_cr(vcpu, cr);
+ }
+ break;
case OP_31_XOP_SLBMFEE:
if (!vcpu->arch.mmu.slbmfee) {
emulated = EMULATE_FAIL;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index a3d5318f5d1e..b2b29d4f9842 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -922,7 +922,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
case H_IPOLL:
case H_XIRR_X:
if (kvmppc_xics_enabled(vcpu)) {
- if (xive_enabled()) {
+ if (xics_on_xive()) {
ret = H_NOT_AVAILABLE;
return RESUME_GUEST;
}
@@ -937,6 +937,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
ret = kvmppc_h_set_xdabr(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5));
break;
+#ifdef CONFIG_SPAPR_TCE_IOMMU
case H_GET_TCE:
ret = kvmppc_h_get_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5));
@@ -966,6 +967,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
if (ret == H_TOO_HARD)
return RESUME_HOST;
break;
+#endif
case H_RANDOM:
if (!powernv_get_random_long(&vcpu->arch.regs.gpr[4]))
ret = H_HARDWARE;
@@ -1445,7 +1447,7 @@ static int kvmppc_handle_nested_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
case BOOK3S_INTERRUPT_HV_RM_HARD:
vcpu->arch.trap = 0;
r = RESUME_GUEST;
- if (!xive_enabled())
+ if (!xics_on_xive())
kvmppc_xics_rm_complete(vcpu, 0);
break;
default:
@@ -3421,7 +3423,9 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2);
vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3);
- mtspr(SPRN_PSSCR, host_psscr);
+ /* Preserve PSSCR[FAKE_SUSPEND] until we've called kvmppc_save_tm_hv */
+ mtspr(SPRN_PSSCR, host_psscr |
+ (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
mtspr(SPRN_HFSCR, host_hfscr);
mtspr(SPRN_CIABR, host_ciabr);
mtspr(SPRN_DAWR, host_dawr);
@@ -3648,11 +3652,12 @@ static void kvmppc_wait_for_exec(struct kvmppc_vcore *vc,
static void grow_halt_poll_ns(struct kvmppc_vcore *vc)
{
- /* 10us base */
- if (vc->halt_poll_ns == 0 && halt_poll_ns_grow)
- vc->halt_poll_ns = 10000;
- else
- vc->halt_poll_ns *= halt_poll_ns_grow;
+ if (!halt_poll_ns_grow)
+ return;
+
+ vc->halt_poll_ns *= halt_poll_ns_grow;
+ if (vc->halt_poll_ns < halt_poll_ns_grow_start)
+ vc->halt_poll_ns = halt_poll_ns_grow_start;
}
static void shrink_halt_poll_ns(struct kvmppc_vcore *vc)
@@ -3666,7 +3671,7 @@ static void shrink_halt_poll_ns(struct kvmppc_vcore *vc)
#ifdef CONFIG_KVM_XICS
static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu)
{
- if (!xive_enabled())
+ if (!xics_on_xive())
return false;
return vcpu->arch.irq_pending || vcpu->arch.xive_saved_state.pipr <
vcpu->arch.xive_saved_state.cppr;
@@ -4226,7 +4231,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
srcu_read_unlock(&kvm->srcu, srcu_idx);
} else if (r == RESUME_PASSTHROUGH) {
- if (WARN_ON(xive_enabled()))
+ if (WARN_ON(xics_on_xive()))
r = H_SUCCESS;
else
r = kvmppc_xics_rm_complete(vcpu, 0);
@@ -4750,7 +4755,7 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
* If xive is enabled, we route 0x500 interrupts directly
* to the guest.
*/
- if (xive_enabled())
+ if (xics_on_xive())
lpcr |= LPCR_LPES;
}
@@ -4986,7 +4991,7 @@ static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
if (i == pimap->n_mapped)
pimap->n_mapped++;
- if (xive_enabled())
+ if (xics_on_xive())
rc = kvmppc_xive_set_mapped(kvm, guest_gsi, desc);
else
kvmppc_xics_set_mapped(kvm, guest_gsi, desc->irq_data.hwirq);
@@ -5027,7 +5032,7 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
return -ENODEV;
}
- if (xive_enabled())
+ if (xics_on_xive())
rc = kvmppc_xive_clr_mapped(kvm, guest_gsi, pimap->mapped[i].desc);
else
kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq);
@@ -5359,13 +5364,11 @@ static int kvm_init_subcore_bitmap(void)
continue;
sibling_subcore_state =
- kmalloc_node(sizeof(struct sibling_subcore_state),
+ kzalloc_node(sizeof(struct sibling_subcore_state),
GFP_KERNEL, node);
if (!sibling_subcore_state)
return -ENOMEM;
- memset(sibling_subcore_state, 0,
- sizeof(struct sibling_subcore_state));
for (j = 0; j < threads_per_core; j++) {
int cpu = first_cpu + j;
@@ -5406,7 +5409,7 @@ static int kvmppc_book3s_init_hv(void)
* indirectly, via OPAL.
*/
#ifdef CONFIG_SMP
- if (!xive_enabled() && !kvmhv_on_pseries() &&
+ if (!xics_on_xive() && !kvmhv_on_pseries() &&
!local_paca->kvm_hstate.xics_phys) {
struct device_node *np;
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index a71e2fc00a4e..b0cf22477e87 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -257,7 +257,7 @@ void kvmhv_rm_send_ipi(int cpu)
}
/* We should never reach this */
- if (WARN_ON_ONCE(xive_enabled()))
+ if (WARN_ON_ONCE(xics_on_xive()))
return;
/* Else poke the target with an IPI */
@@ -577,7 +577,7 @@ unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
{
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
- if (xive_enabled()) {
+ if (xics_on_xive()) {
if (is_rm())
return xive_rm_h_xirr(vcpu);
if (unlikely(!__xive_vm_h_xirr))
@@ -592,7 +592,7 @@ unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
vcpu->arch.regs.gpr[5] = get_tb();
- if (xive_enabled()) {
+ if (xics_on_xive()) {
if (is_rm())
return xive_rm_h_xirr(vcpu);
if (unlikely(!__xive_vm_h_xirr))
@@ -606,7 +606,7 @@ unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
{
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
- if (xive_enabled()) {
+ if (xics_on_xive()) {
if (is_rm())
return xive_rm_h_ipoll(vcpu, server);
if (unlikely(!__xive_vm_h_ipoll))
@@ -621,7 +621,7 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
{
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
- if (xive_enabled()) {
+ if (xics_on_xive()) {
if (is_rm())
return xive_rm_h_ipi(vcpu, server, mfrr);
if (unlikely(!__xive_vm_h_ipi))
@@ -635,7 +635,7 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
{
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
- if (xive_enabled()) {
+ if (xics_on_xive()) {
if (is_rm())
return xive_rm_h_cppr(vcpu, cppr);
if (unlikely(!__xive_vm_h_cppr))
@@ -649,7 +649,7 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
{
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
- if (xive_enabled()) {
+ if (xics_on_xive()) {
if (is_rm())
return xive_rm_h_eoi(vcpu, xirr);
if (unlikely(!__xive_vm_h_eoi))
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index b3f5786b20dc..3b9662a4207e 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -144,6 +144,13 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
return;
}
+ if (xive_enabled() && kvmhv_on_pseries()) {
+ /* No XICS access or hypercalls available, too hard */
+ this_icp->rm_action |= XICS_RM_KICK_VCPU;
+ this_icp->rm_kick_target = vcpu;
+ return;
+ }
+
/*
* Check if the core is loaded,
* if not, find an available host core to post to wake the VCPU,
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 25043b50cb30..3a5e719ef032 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -2272,8 +2272,13 @@ hcall_real_table:
.long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
.long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
.long DOTSYM(kvmppc_h_protect) - hcall_real_table
+#ifdef CONFIG_SPAPR_TCE_IOMMU
.long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
.long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
+#else
+ .long 0 /* 0x1c */
+ .long 0 /* 0x20 */
+#endif
.long 0 /* 0x24 - H_SET_SPRG0 */
.long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
.long 0 /* 0x2c */
@@ -2351,8 +2356,13 @@ hcall_real_table:
.long 0 /* 0x12c */
.long 0 /* 0x130 */
.long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
+#ifdef CONFIG_SPAPR_TCE_IOMMU
.long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
.long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
+#else
+ .long 0 /* 0x138 */
+ .long 0 /* 0x13c */
+#endif
.long 0 /* 0x140 */
.long 0 /* 0x144 */
.long 0 /* 0x148 */
diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
index 2d3b2b1cc272..4e178c4c1ea5 100644
--- a/arch/powerpc/kvm/book3s_rtas.c
+++ b/arch/powerpc/kvm/book3s_rtas.c
@@ -33,7 +33,7 @@ static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
server = be32_to_cpu(args->args[1]);
priority = be32_to_cpu(args->args[2]);
- if (xive_enabled())
+ if (xics_on_xive())
rc = kvmppc_xive_set_xive(vcpu->kvm, irq, server, priority);
else
rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority);
@@ -56,7 +56,7 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
irq = be32_to_cpu(args->args[0]);
server = priority = 0;
- if (xive_enabled())
+ if (xics_on_xive())
rc = kvmppc_xive_get_xive(vcpu->kvm, irq, &server, &priority);
else
rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority);
@@ -83,7 +83,7 @@ static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
irq = be32_to_cpu(args->args[0]);
- if (xive_enabled())
+ if (xics_on_xive())
rc = kvmppc_xive_int_off(vcpu->kvm, irq);
else
rc = kvmppc_xics_int_off(vcpu->kvm, irq);
@@ -105,7 +105,7 @@ static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
irq = be32_to_cpu(args->args[0]);
- if (xive_enabled())
+ if (xics_on_xive())
rc = kvmppc_xive_int_on(vcpu->kvm, irq);
else
rc = kvmppc_xics_int_on(vcpu->kvm, irq);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index b90a7d154180..8885377ec3e0 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -748,7 +748,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
break;
case KVMPPC_IRQ_XICS:
- if (xive_enabled())
+ if (xics_on_xive())
kvmppc_xive_cleanup_vcpu(vcpu);
else
kvmppc_xics_free_icp(vcpu);
@@ -1931,7 +1931,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
r = -EPERM;
dev = kvm_device_from_filp(f.file);
if (dev) {
- if (xive_enabled())
+ if (xics_on_xive())
r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
else
r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
@@ -2189,10 +2189,12 @@ static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
- KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
+ KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
+ KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
- KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
+ KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
+ KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
}
return 0;
}
@@ -2251,12 +2253,16 @@ static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
if (have_fw_feat(fw_features, "enabled",
"fw-count-cache-disabled"))
cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
+ if (have_fw_feat(fw_features, "enabled",
+ "fw-count-cache-flush-bcctr2,0,0"))
+ cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
- KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
+ KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
+ KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
if (have_fw_feat(fw_features, "enabled",
"speculation-policy-favor-security"))
@@ -2267,9 +2273,13 @@ static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
if (!have_fw_feat(fw_features, "disabled",
"needs-spec-barrier-for-bound-checks"))
cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
+ if (have_fw_feat(fw_features, "enabled",
+ "needs-count-cache-flush-on-context-switch"))
+ cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
- KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
+ KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
+ KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
of_node_put(fw_features);
}
diff --git a/arch/powerpc/lib/alloc.c b/arch/powerpc/lib/alloc.c
index dedf88a76f58..ce180870bd52 100644
--- a/arch/powerpc/lib/alloc.c
+++ b/arch/powerpc/lib/alloc.c
@@ -15,6 +15,9 @@ void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
p = kzalloc(size, mask);
else {
p = memblock_alloc(size, SMP_CACHE_BYTES);
+ if (!p)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ size);
}
return p;
}
diff --git a/arch/powerpc/lib/memcmp_64.S b/arch/powerpc/lib/memcmp_64.S
index 844d8e774492..b7f6f6e0b6e8 100644
--- a/arch/powerpc/lib/memcmp_64.S
+++ b/arch/powerpc/lib/memcmp_64.S
@@ -215,11 +215,20 @@ _GLOBAL_TOC(memcmp)
beq .Lzero
.Lcmp_rest_lt8bytes:
- /* Here we have only less than 8 bytes to compare with. at least s1
- * Address is aligned with 8 bytes.
- * The next double words are load and shift right with appropriate
- * bits.
+ /*
+ * Here we have less than 8 bytes to compare. At least s1 is aligned to
+ * 8 bytes, but s2 may not be. We must make sure s2 + 7 doesn't cross a
+ * page boundary, otherwise we might read past the end of the buffer and
+ * trigger a page fault. We use 4K as the conservative minimum page
+ * size. If we detect that case we go to the byte-by-byte loop.
+ *
+ * Otherwise the next double word is loaded from s1 and s2, and shifted
+ * right to compare the appropriate bits.
*/
+ clrldi r6,r4,(64-12) // r6 = r4 & 0xfff
+ cmpdi r6,0xff8
+ bgt .Lshort
+
subfic r6,r5,8
slwi r6,r6,3
LD rA,0,r3
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index d52ec118e09d..3c1bd9fa23cd 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -52,3 +52,6 @@ obj-$(CONFIG_PPC_MEM_KEYS) += pkeys.o
# This is necessary for booting with kcov enabled on book3e machines
KCOV_INSTRUMENT_tlb_nohash.o := n
KCOV_INSTRUMENT_fsl_booke_mmu.o := n
+
+# Instrumenting the SLB fault path can lead to duplicate SLB entries
+KCOV_INSTRUMENT_slb.o := n
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
index 1f13494efb2b..a6c491f18a04 100644
--- a/arch/powerpc/mm/hash_low_32.S
+++ b/arch/powerpc/mm/hash_low_32.S
@@ -70,12 +70,12 @@ _GLOBAL(hash_page)
lis r0,KERNELBASE@h /* check if kernel address */
cmplw 0,r4,r0
ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
- mfspr r5, SPRN_SPRG_PGDIR /* virt page-table root */
+ mfspr r5, SPRN_SPRG_PGDIR /* phys page-table root */
blt+ 112f /* assume user more likely */
- lis r5,swapper_pg_dir@ha /* if kernel address, use */
- addi r5,r5,swapper_pg_dir@l /* kernel page table */
+ lis r5, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
+ addi r5 ,r5 ,(swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
-112: tophys(r5, r5)
+112:
#ifndef CONFIG_PTE_64BIT
rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */
lwz r8,0(r5) /* get pmd entry */
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 3d4b2399192f..0a4f939a8161 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -882,8 +882,12 @@ static void __init htab_initialize(void)
}
#endif /* CONFIG_PPC_CELL */
- table = memblock_alloc_base(htab_size_bytes, htab_size_bytes,
- limit);
+ table = memblock_phys_alloc_range(htab_size_bytes,
+ htab_size_bytes,
+ 0, limit);
+ if (!table)
+ panic("ERROR: Failed to allocate %pa bytes below %pa\n",
+ &htab_size_bytes, &limit);
DBG("Hash table allocated at %lx, size: %lx\n", table,
htab_size_bytes);
@@ -911,6 +915,9 @@ static void __init htab_initialize(void)
linear_map_hash_slots = memblock_alloc_try_nid(
linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT,
ppc64_rma_size, NUMA_NO_NODE);
+ if (!linear_map_hash_slots)
+ panic("%s: Failed to allocate %lu bytes max_addr=%pa\n",
+ __func__, linear_map_hash_count, &ppc64_rma_size);
}
#endif /* CONFIG_DEBUG_PAGEALLOC */
diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c
index e7a9c4f6bfca..8330f135294f 100644
--- a/arch/powerpc/mm/mmu_context_iommu.c
+++ b/arch/powerpc/mm/mmu_context_iommu.c
@@ -95,28 +95,15 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
unsigned long entries, unsigned long dev_hpa,
struct mm_iommu_table_group_mem_t **pmem)
{
- struct mm_iommu_table_group_mem_t *mem;
- long i, ret, locked_entries = 0;
+ struct mm_iommu_table_group_mem_t *mem, *mem2;
+ long i, ret, locked_entries = 0, pinned = 0;
unsigned int pageshift;
-
- mutex_lock(&mem_list_mutex);
-
- list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list,
- next) {
- /* Overlap? */
- if ((mem->ua < (ua + (entries << PAGE_SHIFT))) &&
- (ua < (mem->ua +
- (mem->entries << PAGE_SHIFT)))) {
- ret = -EINVAL;
- goto unlock_exit;
- }
-
- }
+ unsigned long entry, chunk;
if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) {
ret = mm_iommu_adjust_locked_vm(mm, entries, true);
if (ret)
- goto unlock_exit;
+ return ret;
locked_entries = entries;
}
@@ -148,17 +135,27 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
}
down_read(&mm->mmap_sem);
- ret = get_user_pages_longterm(ua, entries, FOLL_WRITE, mem->hpages, NULL);
+ chunk = (1UL << (PAGE_SHIFT + MAX_ORDER - 1)) /
+ sizeof(struct vm_area_struct *);
+ chunk = min(chunk, entries);
+ for (entry = 0; entry < entries; entry += chunk) {
+ unsigned long n = min(entries - entry, chunk);
+
+ ret = get_user_pages_longterm(ua + (entry << PAGE_SHIFT), n,
+ FOLL_WRITE, mem->hpages + entry, NULL);
+ if (ret == n) {
+ pinned += n;
+ continue;
+ }
+ if (ret > 0)
+ pinned += ret;
+ break;
+ }
up_read(&mm->mmap_sem);
- if (ret != entries) {
- /* free the reference taken */
- for (i = 0; i < ret; i++)
- put_page(mem->hpages[i]);
-
- vfree(mem->hpas);
- kfree(mem);
- ret = -EFAULT;
- goto unlock_exit;
+ if (pinned != entries) {
+ if (!ret)
+ ret = -EFAULT;
+ goto free_exit;
}
pageshift = PAGE_SHIFT;
@@ -183,21 +180,43 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
}
good_exit:
- ret = 0;
atomic64_set(&mem->mapped, 1);
mem->used = 1;
mem->ua = ua;
mem->entries = entries;
- *pmem = mem;
- list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
+ mutex_lock(&mem_list_mutex);
-unlock_exit:
- if (locked_entries && ret)
- mm_iommu_adjust_locked_vm(mm, locked_entries, false);
+ list_for_each_entry_rcu(mem2, &mm->context.iommu_group_mem_list, next) {
+ /* Overlap? */
+ if ((mem2->ua < (ua + (entries << PAGE_SHIFT))) &&
+ (ua < (mem2->ua +
+ (mem2->entries << PAGE_SHIFT)))) {
+ ret = -EINVAL;
+ mutex_unlock(&mem_list_mutex);
+ goto free_exit;
+ }
+ }
+
+ list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
mutex_unlock(&mem_list_mutex);
+ *pmem = mem;
+
+ return 0;
+
+free_exit:
+ /* free the reference taken */
+ for (i = 0; i < pinned; i++)
+ put_page(mem->hpages[i]);
+
+ vfree(mem->hpas);
+ kfree(mem);
+
+unlock_exit:
+ mm_iommu_adjust_locked_vm(mm, locked_entries, false);
+
return ret;
}
@@ -266,7 +285,7 @@ static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
{
long ret = 0;
- unsigned long entries, dev_hpa;
+ unsigned long unlock_entries = 0;
mutex_lock(&mem_list_mutex);
@@ -287,17 +306,17 @@ long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
goto unlock_exit;
}
+ if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
+ unlock_entries = mem->entries;
+
/* @mapped became 0 so now mappings are disabled, release the region */
- entries = mem->entries;
- dev_hpa = mem->dev_hpa;
mm_iommu_release(mem);
- if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
- mm_iommu_adjust_locked_vm(mm, entries, false);
-
unlock_exit:
mutex_unlock(&mem_list_mutex);
+ mm_iommu_adjust_locked_vm(mm, unlock_entries, false);
+
return ret;
}
EXPORT_SYMBOL_GPL(mm_iommu_put);
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 22d71a58167f..1945c5f19f5e 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -461,10 +461,19 @@ void __init mmu_context_init(void)
* Allocate the maps used by context management
*/
context_map = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
+ if (!context_map)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ CTX_MAP_SIZE);
context_mm = memblock_alloc(sizeof(void *) * (LAST_CONTEXT + 1),
SMP_CACHE_BYTES);
+ if (!context_mm)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(void *) * (LAST_CONTEXT + 1));
#ifdef CONFIG_SMP
stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
+ if (!stale_map[boot_cpuid])
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ CTX_MAP_SIZE);
cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE,
"powerpc/mmu/ctx:prepare",
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index ac49e4158e50..f976676004ad 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -788,6 +788,10 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
int tnid;
nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
+ if (!nd_pa)
+ panic("Cannot allocate %zu bytes for node %d data\n",
+ nd_size, nid);
+
nd = __va(nd_pa);
/* report and initialize */
diff --git a/arch/powerpc/mm/pgtable-book3e.c b/arch/powerpc/mm/pgtable-book3e.c
index 53cbc7dc2df2..1032ef7aaf62 100644
--- a/arch/powerpc/mm/pgtable-book3e.c
+++ b/arch/powerpc/mm/pgtable-book3e.c
@@ -57,8 +57,16 @@ void vmemmap_remove_mapping(unsigned long start,
static __ref void *early_alloc_pgtable(unsigned long size)
{
- return memblock_alloc_try_nid(size, size, MEMBLOCK_LOW_LIMIT,
- __pa(MAX_DMA_ADDRESS), NUMA_NO_NODE);
+ void *ptr;
+
+ ptr = memblock_alloc_try_nid(size, size, MEMBLOCK_LOW_LIMIT,
+ __pa(MAX_DMA_ADDRESS), NUMA_NO_NODE);
+
+ if (!ptr)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx max_addr=%lx\n",
+ __func__, size, size, __pa(MAX_DMA_ADDRESS));
+
+ return ptr;
}
/*
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index 92a3e4c39540..a4341aba0af4 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -197,6 +197,9 @@ void __init mmu_partition_table_init(void)
BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
/* Initialize the Partition Table with no entries */
partition_tb = memblock_alloc(patb_size, patb_size);
+ if (!partition_tb)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, patb_size, patb_size);
/*
* update partition table control register,
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index e377684ac6ad..154472a28c77 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -53,13 +53,20 @@ static __ref void *early_alloc_pgtable(unsigned long size, int nid,
{
phys_addr_t min_addr = MEMBLOCK_LOW_LIMIT;
phys_addr_t max_addr = MEMBLOCK_ALLOC_ANYWHERE;
+ void *ptr;
if (region_start)
min_addr = region_start;
if (region_end)
max_addr = region_end;
- return memblock_alloc_try_nid(size, size, min_addr, max_addr, nid);
+ ptr = memblock_alloc_try_nid(size, size, min_addr, max_addr, nid);
+
+ if (!ptr)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa max_addr=%pa\n",
+ __func__, size, size, nid, &min_addr, &max_addr);
+
+ return ptr;
}
static int early_map_kernel_page(unsigned long ea, unsigned long pa,
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
index 6c8a60b1e31d..5d9c3ff728c9 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -98,10 +98,20 @@ static int find_free_bat(void)
return -1;
}
+/*
+ * This function calculates the size of the larger block usable to map the
+ * beginning of an area based on the start address and size of that area:
+ * - max block size is 8M on 601 and 256 on other 6xx.
+ * - base address must be aligned to the block size. So the maximum block size
+ * is identified by the lowest bit set to 1 in the base address (for instance
+ * if base is 0x16000000, max size is 0x02000000).
+ * - block size has to be a power of two. This is calculated by finding the
+ * highest bit set to 1.
+ */
static unsigned int block_size(unsigned long base, unsigned long top)
{
unsigned int max_size = (cpu_has_feature(CPU_FTR_601) ? 8 : 256) << 20;
- unsigned int base_shift = (fls(base) - 1) & 31;
+ unsigned int base_shift = (ffs(base) - 1) & 31;
unsigned int block_shift = (fls(top - base) - 1) & 31;
return min3(max_size, 1U << base_shift, 1U << block_shift);
@@ -157,7 +167,7 @@ static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long to
unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
{
- int done;
+ unsigned long done;
unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET;
if (__map_without_bats) {
@@ -169,10 +179,10 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
return __mmu_mapin_ram(base, top);
done = __mmu_mapin_ram(base, border);
- if (done != border - base)
+ if (done != border)
return done;
- return done + __mmu_mapin_ram(border, top);
+ return __mmu_mapin_ram(border, top);
}
void mmu_mark_initmem_nx(void)
@@ -340,6 +350,9 @@ void __init MMU_init_hw(void)
*/
if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
Hash = memblock_alloc(Hash_size, Hash_size);
+ if (!Hash)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, Hash_size, Hash_size);
_SDR1 = __pa(Hash) | SDR1_LOW_BITS;
Hash_end = (struct hash_pte *) ((unsigned long)Hash + Hash_size);
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index 549e9490ff2a..dcac37745b05 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -51,6 +51,8 @@
#define PPC_LIS(r, i) PPC_ADDIS(r, 0, i)
#define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \
___PPC_RA(base) | ((i) & 0xfffc))
+#define PPC_STDX(r, base, b) EMIT(PPC_INST_STDX | ___PPC_RS(r) | \
+ ___PPC_RA(base) | ___PPC_RB(b))
#define PPC_STDU(r, base, i) EMIT(PPC_INST_STDU | ___PPC_RS(r) | \
___PPC_RA(base) | ((i) & 0xfffc))
#define PPC_STW(r, base, i) EMIT(PPC_INST_STW | ___PPC_RS(r) | \
@@ -65,7 +67,9 @@
#define PPC_LBZ(r, base, i) EMIT(PPC_INST_LBZ | ___PPC_RT(r) | \
___PPC_RA(base) | IMM_L(i))
#define PPC_LD(r, base, i) EMIT(PPC_INST_LD | ___PPC_RT(r) | \
- ___PPC_RA(base) | IMM_L(i))
+ ___PPC_RA(base) | ((i) & 0xfffc))
+#define PPC_LDX(r, base, b) EMIT(PPC_INST_LDX | ___PPC_RT(r) | \
+ ___PPC_RA(base) | ___PPC_RB(b))
#define PPC_LWZ(r, base, i) EMIT(PPC_INST_LWZ | ___PPC_RT(r) | \
___PPC_RA(base) | IMM_L(i))
#define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | ___PPC_RT(r) | \
@@ -85,17 +89,6 @@
___PPC_RA(a) | ___PPC_RB(b))
#define PPC_BPF_STDCX(s, a, b) EMIT(PPC_INST_STDCX | ___PPC_RS(s) | \
___PPC_RA(a) | ___PPC_RB(b))
-
-#ifdef CONFIG_PPC64
-#define PPC_BPF_LL(r, base, i) do { PPC_LD(r, base, i); } while(0)
-#define PPC_BPF_STL(r, base, i) do { PPC_STD(r, base, i); } while(0)
-#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
-#else
-#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
-#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
-#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
-#endif
-
#define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i))
#define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i))
#define PPC_CMPW(a, b) EMIT(PPC_INST_CMPW | ___PPC_RA(a) | \
diff --git a/arch/powerpc/net/bpf_jit32.h b/arch/powerpc/net/bpf_jit32.h
index dc50a8d4b3b9..21744d8aa053 100644
--- a/arch/powerpc/net/bpf_jit32.h
+++ b/arch/powerpc/net/bpf_jit32.h
@@ -122,6 +122,10 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
#define PPC_NTOHS_OFFS(r, base, i) PPC_LHZ_OFFS(r, base, i)
#endif
+#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
+#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
+#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
+
#define SEEN_DATAREF 0x10000 /* might call external helpers */
#define SEEN_XREG 0x20000 /* X reg is used */
#define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary
diff --git a/arch/powerpc/net/bpf_jit64.h b/arch/powerpc/net/bpf_jit64.h
index 3609be4692b3..47f441f351a6 100644
--- a/arch/powerpc/net/bpf_jit64.h
+++ b/arch/powerpc/net/bpf_jit64.h
@@ -68,6 +68,26 @@ static const int b2p[] = {
/* PPC NVR range -- update this if we ever use NVRs below r27 */
#define BPF_PPC_NVR_MIN 27
+/*
+ * WARNING: These can use TMP_REG_2 if the offset is not at word boundary,
+ * so ensure that it isn't in use already.
+ */
+#define PPC_BPF_LL(r, base, i) do { \
+ if ((i) % 4) { \
+ PPC_LI(b2p[TMP_REG_2], (i)); \
+ PPC_LDX(r, base, b2p[TMP_REG_2]); \
+ } else \
+ PPC_LD(r, base, i); \
+ } while(0)
+#define PPC_BPF_STL(r, base, i) do { \
+ if ((i) % 4) { \
+ PPC_LI(b2p[TMP_REG_2], (i)); \
+ PPC_STDX(r, base, b2p[TMP_REG_2]); \
+ } else \
+ PPC_STD(r, base, i); \
+ } while(0)
+#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
+
#define SEEN_FUNC 0x1000 /* might call external helpers */
#define SEEN_STACK 0x2000 /* uses BPF stack */
#define SEEN_TAILCALL 0x4000 /* uses tail calls */
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 4194d3cfb60c..21a1dcd4b156 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -252,7 +252,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
* goto out;
*/
- PPC_LD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
+ PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT);
PPC_BCC(COND_GT, out);
@@ -265,7 +265,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
/* prog = array->ptrs[index]; */
PPC_MULI(b2p[TMP_REG_1], b2p_index, 8);
PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array);
- PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
+ PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
/*
* if (prog == NULL)
@@ -275,7 +275,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
PPC_BCC(COND_EQ, out);
/* goto *(prog->bpf_func + prologue_size); */
- PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
+ PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
#ifdef PPC64_ELF_ABI_v1
/* skip past the function descriptor */
PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
@@ -606,7 +606,7 @@ bpf_alu32_trunc:
* the instructions generated will remain the
* same across all passes
*/
- PPC_STD(dst_reg, 1, bpf_jit_stack_local(ctx));
+ PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx));
PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
break;
@@ -662,7 +662,7 @@ emit_clear:
PPC_LI32(b2p[TMP_REG_1], imm);
src_reg = b2p[TMP_REG_1];
}
- PPC_STD(src_reg, dst_reg, off);
+ PPC_BPF_STL(src_reg, dst_reg, off);
break;
/*
@@ -709,7 +709,7 @@ emit_clear:
break;
/* dst = *(u64 *)(ul) (src + off) */
case BPF_LDX | BPF_MEM | BPF_DW:
- PPC_LD(dst_reg, src_reg, off);
+ PPC_BPF_LL(dst_reg, src_reg, off);
break;
/*
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 842b2c7e156a..50cd09b4e05d 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -324,7 +324,7 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK
config PPC_RADIX_MMU
bool "Radix MMU Support"
- depends on PPC_BOOK3S_64
+ depends on PPC_BOOK3S_64 && HUGETLB_PAGE
select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
default y
help
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index 86368e238f6e..044c6089462c 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -211,6 +211,9 @@ static int __init iob_init(struct device_node *dn)
iob_l2_base = memblock_alloc_try_nid_raw(1UL << 21, 1UL << 21,
MEMBLOCK_LOW_LIMIT, 0x80000000,
NUMA_NO_NODE);
+ if (!iob_l2_base)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx max_addr=%x\n",
+ __func__, 1UL << 21, 1UL << 21, 0x80000000);
pr_info("IOBMAP L2 allocated at: %p\n", iob_l2_base);
diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c
index 9360cdc408c1..86989c5779c2 100644
--- a/arch/powerpc/platforms/powermac/nvram.c
+++ b/arch/powerpc/platforms/powermac/nvram.c
@@ -519,6 +519,9 @@ static int __init core99_nvram_setup(struct device_node *dp, unsigned long addr)
return -EINVAL;
}
nvram_image = memblock_alloc(NVRAM_SIZE, SMP_CACHE_BYTES);
+ if (!nvram_image)
+ panic("%s: Failed to allocate %u bytes\n", __func__,
+ NVRAM_SIZE);
nvram_data = ioremap(addr, NVRAM_SIZE*2);
nvram_naddrs = 1; /* Make sure we get the correct case */
diff --git a/arch/powerpc/platforms/powernv/opal-call.c b/arch/powerpc/platforms/powernv/opal-call.c
index 578757d403ab..daad8c45c8e7 100644
--- a/arch/powerpc/platforms/powernv/opal-call.c
+++ b/arch/powerpc/platforms/powernv/opal-call.c
@@ -86,6 +86,7 @@ static s64 __opal_call_trace(s64 a0, s64 a1, s64 a2, s64 a3,
s64 a4, s64 a5, s64 a6, s64 a7,
unsigned long opcode, unsigned long msr)
{
+ return 0;
}
#define DO_TRACE false
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 727a7de08635..2b0eca104f86 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -171,6 +171,9 @@ int __init early_init_dt_scan_recoverable_ranges(unsigned long node,
* Allocate a buffer to hold the MC recoverable ranges.
*/
mc_recoverable_range = memblock_alloc(size, __alignof__(u64));
+ if (!mc_recoverable_range)
+ panic("%s: Failed to allocate %u bytes align=0x%lx\n",
+ __func__, size, __alignof__(u64));
for (i = 0; i < mc_recoverable_range_len; i++) {
mc_recoverable_range[i].start_addr =
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index fa6af52b5219..3ead4c237ed0 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -3657,6 +3657,9 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
pr_debug(" PHB-ID : 0x%016llx\n", phb_id);
phb = memblock_alloc(sizeof(*phb), SMP_CACHE_BYTES);
+ if (!phb)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(*phb));
/* Allocate PCI controller */
phb->hose = hose = pcibios_alloc_controller(np);
@@ -3703,6 +3706,9 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
phb->diag_data_size = PNV_PCI_DIAG_BUF_SIZE;
phb->diag_data = memblock_alloc(phb->diag_data_size, SMP_CACHE_BYTES);
+ if (!phb->diag_data)
+ panic("%s: Failed to allocate %u bytes\n", __func__,
+ phb->diag_data_size);
/* Parse 32-bit and IO ranges (if any) */
pci_process_bridge_OF_ranges(hose, np, !hose->global_number);
@@ -3762,6 +3768,8 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
pemap_off = size;
size += phb->ioda.total_pe_num * sizeof(struct pnv_ioda_pe);
aux = memblock_alloc(size, SMP_CACHE_BYTES);
+ if (!aux)
+ panic("%s: Failed to allocate %lu bytes\n", __func__, size);
phb->ioda.pe_alloc = aux;
phb->ioda.m64_segmap = aux + m64map_off;
phb->ioda.m32_segmap = aux + m32map_off;
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index 658bfab3350b..4ce5458eb0f8 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -127,6 +127,9 @@ static void __init prealloc(struct ps3_prealloc *p)
return;
p->address = memblock_alloc(p->size, p->align);
+ if (!p->address)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, p->size, p->align);
printk(KERN_INFO "%s: %lu bytes at %p\n", p->name, p->size,
p->address);
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
index bba281b1fe1b..96c53b23e58f 100644
--- a/arch/powerpc/platforms/pseries/papr_scm.c
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
@@ -239,6 +239,7 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
memset(&ndr_desc, 0, sizeof(ndr_desc));
ndr_desc.attr_groups = region_attr_groups;
ndr_desc.numa_node = dev_to_node(&p->pdev->dev);
+ ndr_desc.target_node = ndr_desc.numa_node;
ndr_desc.res = &p->res;
ndr_desc.of_node = p->dn;
ndr_desc.provider_data = p;
diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c
index 6ed22127391b..921f12182f3e 100644
--- a/arch/powerpc/platforms/pseries/pseries_energy.c
+++ b/arch/powerpc/platforms/pseries/pseries_energy.c
@@ -77,18 +77,27 @@ static u32 cpu_to_drc_index(int cpu)
ret = drc.drc_index_start + (thread_index * drc.sequential_inc);
} else {
- const __be32 *indexes;
-
- indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
- if (indexes == NULL)
- goto err_of_node_put;
+ u32 nr_drc_indexes, thread_drc_index;
/*
- * The first element indexes[0] is the number of drc_indexes
- * returned in the list. Hence thread_index+1 will get the
- * drc_index corresponding to core number thread_index.
+ * The first element of ibm,drc-indexes array is the
+ * number of drc_indexes returned in the list. Hence
+ * thread_index+1 will get the drc_index corresponding
+ * to core number thread_index.
*/
- ret = indexes[thread_index + 1];
+ rc = of_property_read_u32_index(dn, "ibm,drc-indexes",
+ 0, &nr_drc_indexes);
+ if (rc)
+ goto err_of_node_put;
+
+ WARN_ON_ONCE(thread_index > nr_drc_indexes);
+ rc = of_property_read_u32_index(dn, "ibm,drc-indexes",
+ thread_index + 1,
+ &thread_drc_index);
+ if (rc)
+ goto err_of_node_put;
+
+ ret = thread_drc_index;
}
rc = 0;
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index d97d52772789..452dcfd7e5dd 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -550,6 +550,7 @@ static void pseries_print_mce_info(struct pt_regs *regs,
"UE",
"SLB",
"ERAT",
+ "Unknown",
"TLB",
"D-Cache",
"Unknown",
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index fc5c5c23303e..2a751795ec1e 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -265,6 +265,9 @@ static void allocate_dart(void)
* prefetching into invalid pages and corrupting data
*/
tmp = memblock_phys_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE);
+ if (!tmp)
+ panic("DART: table allocation failed\n");
+
dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) &
DARTMAP_RPNMASK);
diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c
index d45450f6666a..51a679a1c403 100644
--- a/arch/powerpc/sysdev/msi_bitmap.c
+++ b/arch/powerpc/sysdev/msi_bitmap.c
@@ -129,6 +129,9 @@ int __ref msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count,
bmp->bitmap = kzalloc(size, GFP_KERNEL);
else {
bmp->bitmap = memblock_alloc(size, SMP_CACHE_BYTES);
+ if (!bmp->bitmap)
+ panic("%s: Failed to allocate %u bytes\n", __func__,
+ size);
/* the bitmap won't be freed from memblock allocator */
kmemleak_not_leak(bmp->bitmap);
}
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index a0f44f992360..13c6a47e6150 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -2429,7 +2429,10 @@ static void dump_one_paca(int cpu)
DUMP(p, trap_save, "%#-*x");
DUMP(p, irq_soft_mask, "%#-*x");
DUMP(p, irq_happened, "%#-*x");
- DUMP(p, io_sync, "%#-*x");
+#ifdef CONFIG_MMIOWB
+ DUMP(p, mmiowb_state.nesting_count, "%#-*x");
+ DUMP(p, mmiowb_state.mmiowb_pending, "%#-*x");
+#endif
DUMP(p, irq_work_pending, "%#-*x");
DUMP(p, nap_state_lost, "%#-*x");
DUMP(p, sprg_vdso, "%#-*llx");
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index b41311f6a94f..e66745decea1 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -32,7 +32,6 @@ config RISCV
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_DMA_CONTIGUOUS
select HAVE_FUTEX_CMPXCHG if FUTEX
- select HAVE_GENERIC_DMA_COHERENT
select HAVE_PERF_EVENTS
select HAVE_SYSCALL_TRACEPOINTS
select IRQ_DOMAIN
@@ -49,6 +48,7 @@ config RISCV
select RISCV_TIMER
select GENERIC_IRQ_MULTI_HANDLER
select ARCH_HAS_PTE_SPECIAL
+ select ARCH_HAS_MMIOWB
select HAVE_EBPF_JIT if 64BIT
config MMU
@@ -70,9 +70,6 @@ config STACKTRACE_SUPPORT
config TRACE_IRQFLAGS_SUPPORT
def_bool y
-config RWSEM_GENERIC_SPINLOCK
- def_bool y
-
config GENERIC_BUG
def_bool y
depends on BUG
diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig
new file mode 100644
index 000000000000..1a911ed8e772
--- /dev/null
+++ b/arch/riscv/configs/rv32_defconfig
@@ -0,0 +1,84 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_CGROUP_BPF=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_CHECKPOINT_RESTORE=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_ARCH_RV32I=y
+CONFIG_SMP=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NETLINK_DIAG=y
+CONFIG_PCI=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCIE_XILINX=y
+CONFIG_DEVTMPFS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_NETDEVICES=y
+CONFIG_VIRTIO_NET=y
+CONFIG_MACB=y
+CONFIG_E1000E=y
+CONFIG_R8169=y
+CONFIG_MICROSEMI_PHY=y
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
+CONFIG_HVC_RISCV_SBI=y
+# CONFIG_PTP_1588_CLOCK is not set
+CONFIG_DRM=y
+CONFIG_DRM_RADEON=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_PLATFORM=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_UAS=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_SIFIVE_PLIC=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_CRYPTO_USER_API_HASH=y
+CONFIG_CRYPTO_DEV_VIRTIO=y
+CONFIG_PRINTK_TIME=y
+# CONFIG_RCU_TRACE is not set
diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h
index 57afe604b495..c207f6634b91 100644
--- a/arch/riscv/include/asm/fixmap.h
+++ b/arch/riscv/include/asm/fixmap.h
@@ -26,7 +26,7 @@ enum fixed_addresses {
};
#define FIXADDR_SIZE (__end_of_fixed_addresses * PAGE_SIZE)
-#define FIXADDR_TOP (PAGE_OFFSET)
+#define FIXADDR_TOP (VMALLOC_START)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
#define FIXMAP_PAGE_IO PAGE_KERNEL
diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
index b269451e7e85..744fd92e77bc 100644
--- a/arch/riscv/include/asm/io.h
+++ b/arch/riscv/include/asm/io.h
@@ -20,6 +20,7 @@
#define _ASM_RISCV_IO_H
#include <linux/types.h>
+#include <asm/mmiowb.h>
extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
@@ -100,18 +101,6 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
#endif
/*
- * FIXME: I'm flip-flopping on whether or not we should keep this or enforce
- * the ordering with I/O on spinlocks like PowerPC does. The worry is that
- * drivers won't get this correct, but I also don't want to introduce a fence
- * into the lock code that otherwise only uses AMOs (and is essentially defined
- * by the ISA to be correct). For now I'm leaving this here: "o,w" is
- * sufficient to ensure that all writes to the device have completed before the
- * write to the spinlock is allowed to commit. I surmised this from reading
- * "ACQUIRES VS I/O ACCESSES" in memory-barriers.txt.
- */
-#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory");
-
-/*
* Unordered I/O memory access primitives. These are even more relaxed than
* the relaxed versions, as they don't even order accesses between successive
* operations to the I/O regions.
@@ -163,20 +152,20 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
* doesn't define any ordering between the memory space and the I/O space.
*/
#define __io_br() do {} while (0)
-#define __io_ar() __asm__ __volatile__ ("fence i,r" : : : "memory");
+#define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory");
#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory");
-#define __io_aw() do {} while (0)
+#define __io_aw() mmiowb_set_pending()
-#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(); __v; })
-#define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(); __v; })
-#define readl(c) ({ u32 __v; __io_br(); __v = readl_cpu(c); __io_ar(); __v; })
+#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
+#define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(__v); __v; })
+#define readl(c) ({ u32 __v; __io_br(); __v = readl_cpu(c); __io_ar(__v); __v; })
#define writeb(v,c) ({ __io_bw(); writeb_cpu((v),(c)); __io_aw(); })
#define writew(v,c) ({ __io_bw(); writew_cpu((v),(c)); __io_aw(); })
#define writel(v,c) ({ __io_bw(); writel_cpu((v),(c)); __io_aw(); })
#ifdef CONFIG_64BIT
-#define readq(c) ({ u64 __v; __io_br(); __v = readq_cpu(c); __io_ar(); __v; })
+#define readq(c) ({ u64 __v; __io_br(); __v = readq_cpu(c); __io_ar(__v); __v; })
#define writeq(v,c) ({ __io_bw(); writeq_cpu((v),(c)); __io_aw(); })
#endif
@@ -198,20 +187,20 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
* writes.
*/
#define __io_pbr() __asm__ __volatile__ ("fence io,i" : : : "memory");
-#define __io_par() __asm__ __volatile__ ("fence i,ior" : : : "memory");
+#define __io_par(v) __asm__ __volatile__ ("fence i,ior" : : : "memory");
#define __io_pbw() __asm__ __volatile__ ("fence iow,o" : : : "memory");
#define __io_paw() __asm__ __volatile__ ("fence o,io" : : : "memory");
-#define inb(c) ({ u8 __v; __io_pbr(); __v = readb_cpu((void*)(PCI_IOBASE + (c))); __io_par(); __v; })
-#define inw(c) ({ u16 __v; __io_pbr(); __v = readw_cpu((void*)(PCI_IOBASE + (c))); __io_par(); __v; })
-#define inl(c) ({ u32 __v; __io_pbr(); __v = readl_cpu((void*)(PCI_IOBASE + (c))); __io_par(); __v; })
+#define inb(c) ({ u8 __v; __io_pbr(); __v = readb_cpu((void*)(PCI_IOBASE + (c))); __io_par(__v); __v; })
+#define inw(c) ({ u16 __v; __io_pbr(); __v = readw_cpu((void*)(PCI_IOBASE + (c))); __io_par(__v); __v; })
+#define inl(c) ({ u32 __v; __io_pbr(); __v = readl_cpu((void*)(PCI_IOBASE + (c))); __io_par(__v); __v; })
#define outb(v,c) ({ __io_pbw(); writeb_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); })
#define outw(v,c) ({ __io_pbw(); writew_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); })
#define outl(v,c) ({ __io_pbw(); writel_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); })
#ifdef CONFIG_64BIT
-#define inq(c) ({ u64 __v; __io_pbr(); __v = readq_cpu((void*)(c)); __io_par(); __v; })
+#define inq(c) ({ u64 __v; __io_pbr(); __v = readq_cpu((void*)(c)); __io_par(__v); __v; })
#define outq(v,c) ({ __io_pbw(); writeq_cpu((v),(void*)(c)); __io_paw(); })
#endif
@@ -254,16 +243,16 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
afence; \
}
-__io_reads_ins(reads, u8, b, __io_br(), __io_ar())
-__io_reads_ins(reads, u16, w, __io_br(), __io_ar())
-__io_reads_ins(reads, u32, l, __io_br(), __io_ar())
+__io_reads_ins(reads, u8, b, __io_br(), __io_ar(addr))
+__io_reads_ins(reads, u16, w, __io_br(), __io_ar(addr))
+__io_reads_ins(reads, u32, l, __io_br(), __io_ar(addr))
#define readsb(addr, buffer, count) __readsb(addr, buffer, count)
#define readsw(addr, buffer, count) __readsw(addr, buffer, count)
#define readsl(addr, buffer, count) __readsl(addr, buffer, count)
-__io_reads_ins(ins, u8, b, __io_pbr(), __io_par())
-__io_reads_ins(ins, u16, w, __io_pbr(), __io_par())
-__io_reads_ins(ins, u32, l, __io_pbr(), __io_par())
+__io_reads_ins(ins, u8, b, __io_pbr(), __io_par(addr))
+__io_reads_ins(ins, u16, w, __io_pbr(), __io_par(addr))
+__io_reads_ins(ins, u32, l, __io_pbr(), __io_par(addr))
#define insb(addr, buffer, count) __insb((void __iomem *)(long)addr, buffer, count)
#define insw(addr, buffer, count) __insw((void __iomem *)(long)addr, buffer, count)
#define insl(addr, buffer, count) __insl((void __iomem *)(long)addr, buffer, count)
@@ -283,10 +272,10 @@ __io_writes_outs(outs, u32, l, __io_pbw(), __io_paw())
#define outsl(addr, buffer, count) __outsl((void __iomem *)(long)addr, buffer, count)
#ifdef CONFIG_64BIT
-__io_reads_ins(reads, u64, q, __io_br(), __io_ar())
+__io_reads_ins(reads, u64, q, __io_br(), __io_ar(addr))
#define readsq(addr, buffer, count) __readsq(addr, buffer, count)
-__io_reads_ins(ins, u64, q, __io_pbr(), __io_par())
+__io_reads_ins(ins, u64, q, __io_pbr(), __io_par(addr))
#define insq(addr, buffer, count) __insq((void __iomem *)addr, buffer, count)
__io_writes_outs(writes, u64, q, __io_bw(), __io_aw())
diff --git a/arch/riscv/include/asm/mmiowb.h b/arch/riscv/include/asm/mmiowb.h
new file mode 100644
index 000000000000..5d7e3a2b4e3b
--- /dev/null
+++ b/arch/riscv/include/asm/mmiowb.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_RISCV_MMIOWB_H
+#define _ASM_RISCV_MMIOWB_H
+
+/*
+ * "o,w" is sufficient to ensure that all writes to the device have completed
+ * before the write to the spinlock is allowed to commit.
+ */
+#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory");
+
+#include <asm-generic/mmiowb.h>
+
+#endif /* ASM_RISCV_MMIOWB_H */
diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h
index bba3da6ef157..a3d5273ded7c 100644
--- a/arch/riscv/include/asm/syscall.h
+++ b/arch/riscv/include/asm/syscall.h
@@ -72,32 +72,20 @@ static inline void syscall_set_return_value(struct task_struct *task,
static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
unsigned long *args)
{
- BUG_ON(i + n > 6);
- if (i == 0) {
- args[0] = regs->orig_a0;
- args++;
- i++;
- n--;
- }
- memcpy(args, &regs->a1 + i * sizeof(regs->a1), n * sizeof(args[0]));
+ args[0] = regs->orig_a0;
+ args++;
+ memcpy(args, &regs->a1, 5 * sizeof(args[0]));
}
static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
const unsigned long *args)
{
- BUG_ON(i + n > 6);
- if (i == 0) {
- regs->orig_a0 = args[0];
- args++;
- i++;
- n--;
- }
- memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
+ regs->orig_a0 = args[0];
+ args++;
+ memcpy(&regs->a1, args, 5 * sizeof(regs->a1));
}
static inline int syscall_get_arch(void)
diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h
index 439dc7072e05..1ad8d093c58b 100644
--- a/arch/riscv/include/asm/tlb.h
+++ b/arch/riscv/include/asm/tlb.h
@@ -18,6 +18,7 @@ struct mmu_gather;
static void tlb_flush(struct mmu_gather *tlb);
+#define tlb_flush tlb_flush
#include <asm-generic/tlb.h>
static inline void tlb_flush(struct mmu_gather *tlb)
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index a00168b980d2..fb53a8089e76 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -300,7 +300,7 @@ do { \
" .balign 4\n" \
"4:\n" \
" li %0, %6\n" \
- " jump 2b, %1\n" \
+ " jump 3b, %1\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .balign " RISCV_SZPTR "\n" \
diff --git a/arch/riscv/include/uapi/asm/Kbuild b/arch/riscv/include/uapi/asm/Kbuild
index d2ee86b4c091..e69de29bb2d1 100644
--- a/arch/riscv/include/uapi/asm/Kbuild
+++ b/arch/riscv/include/uapi/asm/Kbuild
@@ -1 +0,0 @@
-include include/uapi/asm-generic/Kbuild.asm
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index f13f7f276639..598568168d35 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -4,7 +4,6 @@
ifdef CONFIG_FTRACE
CFLAGS_REMOVE_ftrace.o = -pg
-CFLAGS_REMOVE_setup.o = -pg
endif
extra-y += head.o
@@ -29,8 +28,6 @@ obj-y += vdso.o
obj-y += cacheinfo.o
obj-y += vdso/
-CFLAGS_setup.o := -mcmodel=medany
-
obj-$(CONFIG_FPU) += fpu.o
obj-$(CONFIG_SMP) += smpboot.o
obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index 7dd308129b40..2872edce894d 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -141,7 +141,7 @@ static int apply_r_riscv_hi20_rela(struct module *me, u32 *location,
{
s32 hi20;
- if (IS_ENABLED(CMODEL_MEDLOW)) {
+ if (IS_ENABLED(CONFIG_CMODEL_MEDLOW)) {
pr_err(
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
me->name, (long long)v, location);
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index ecb654f6a79e..540a331d1376 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -48,14 +48,6 @@ struct screen_info screen_info = {
};
#endif
-unsigned long va_pa_offset;
-EXPORT_SYMBOL(va_pa_offset);
-unsigned long pfn_base;
-EXPORT_SYMBOL(pfn_base);
-
-unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
-EXPORT_SYMBOL(empty_zero_page);
-
/* The lucky hart to first increment this variable will boot the other cores */
atomic_t hart_lottery;
unsigned long boot_cpu_hartid;
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index a4b1d94371a0..4d403274c2e8 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -169,8 +169,6 @@ static bool save_trace(unsigned long pc, void *arg)
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
walk_stackframe(tsk, NULL, save_trace, trace);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
index eb22ab49b3e0..b68aac701803 100644
--- a/arch/riscv/mm/Makefile
+++ b/arch/riscv/mm/Makefile
@@ -1,3 +1,9 @@
+
+CFLAGS_init.o := -mcmodel=medany
+ifdef CONFIG_FTRACE
+CFLAGS_REMOVE_init.o = -pg
+endif
+
obj-y += init.o
obj-y += fault.o
obj-y += extable.o
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index b379a75ac6a6..bc7b77e34d09 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -25,6 +25,10 @@
#include <asm/pgtable.h>
#include <asm/io.h>
+unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
+ __page_aligned_bss;
+EXPORT_SYMBOL(empty_zero_page);
+
static void __init zone_sizes_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
@@ -117,6 +121,14 @@ void __init setup_bootmem(void)
*/
memblock_reserve(reg->base, vmlinux_end - reg->base);
mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET);
+
+ /*
+ * Remove memblock from the end of usable area to the
+ * end of region
+ */
+ if (reg->base + mem_size < end)
+ memblock_remove(reg->base + mem_size,
+ end - reg->base - mem_size);
}
}
BUG_ON(mem_size == 0);
@@ -143,6 +155,11 @@ void __init setup_bootmem(void)
}
}
+unsigned long va_pa_offset;
+EXPORT_SYMBOL(va_pa_offset);
+unsigned long pfn_base;
+EXPORT_SYMBOL(pfn_base);
+
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pgd_t trampoline_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
@@ -172,6 +189,25 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
}
}
+/*
+ * setup_vm() is called from head.S with MMU-off.
+ *
+ * Following requirements should be honoured for setup_vm() to work
+ * correctly:
+ * 1) It should use PC-relative addressing for accessing kernel symbols.
+ * To achieve this we always use GCC cmodel=medany.
+ * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
+ * so disable compiler instrumentation when FTRACE is enabled.
+ *
+ * Currently, the above requirements are honoured by using custom CFLAGS
+ * for init.o in mm/Makefile.
+ */
+
+#ifndef __riscv_cmodel_medany
+#error "setup_vm() is called from head.S before relocate so it should "
+ "not use absolute addressing."
+#endif
+
asmlinkage void __init setup_vm(void)
{
extern char _start;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index b6e3d0653002..07485582d027 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -14,12 +14,6 @@ config LOCKDEP_SUPPORT
config STACKTRACE_SUPPORT
def_bool y
-config RWSEM_GENERIC_SPINLOCK
- bool
-
-config RWSEM_XCHGADD_ALGORITHM
- def_bool y
-
config ARCH_HAS_ILOG2_U32
def_bool n
@@ -149,6 +143,7 @@ config S390
select HAVE_FUNCTION_TRACER
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_GCC_PLUGINS
+ select HAVE_GENERIC_GUP
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZ4
@@ -164,11 +159,13 @@ config S390
select HAVE_PERF_USER_STACK_DUMP
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MEMBLOCK_PHYS_MAP
+ select HAVE_MMU_GATHER_NO_GATHER
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NOP_MCOUNT
select HAVE_OPROFILE
select HAVE_PCI
select HAVE_PERF_EVENTS
+ select HAVE_RCU_TABLE_FREE
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RSEQ
select HAVE_SYSCALL_TRACEPOINTS
@@ -188,7 +185,6 @@ config S390
select TTY
select VIRT_CPU_ACCOUNTING
select ARCH_HAS_SCALED_CPUTIME
- select VIRT_TO_BUS
select HAVE_NMI
@@ -240,6 +236,7 @@ choice
config MARCH_Z900
bool "IBM zSeries model z800 and z900"
+ depends on !CC_IS_CLANG
select HAVE_MARCH_Z900_FEATURES
help
Select this to enable optimizations for model z800/z900 (2064 and
@@ -248,6 +245,7 @@ config MARCH_Z900
config MARCH_Z990
bool "IBM zSeries model z890 and z990"
+ depends on !CC_IS_CLANG
select HAVE_MARCH_Z990_FEATURES
help
Select this to enable optimizations for model z890/z990 (2084 and
@@ -256,6 +254,7 @@ config MARCH_Z990
config MARCH_Z9_109
bool "IBM System z9"
+ depends on !CC_IS_CLANG
select HAVE_MARCH_Z9_109_FEATURES
help
Select this to enable optimizations for IBM System z9 (2094 and
@@ -347,12 +346,15 @@ config TUNE_DEFAULT
config TUNE_Z900
bool "IBM zSeries model z800 and z900"
+ depends on !CC_IS_CLANG
config TUNE_Z990
bool "IBM zSeries model z890 and z990"
+ depends on !CC_IS_CLANG
config TUNE_Z9_109
bool "IBM System z9"
+ depends on !CC_IS_CLANG
config TUNE_Z10
bool "IBM System z10"
@@ -388,6 +390,9 @@ config COMPAT
(and some other stuff like libraries and such) is needed for
executing 31 bit applications. It is safe to say "Y".
+config COMPAT_VDSO
+ def_bool COMPAT && !CC_IS_CLANG
+
config SYSVIPC_COMPAT
def_bool y if COMPAT && SYSVIPC
@@ -549,6 +554,17 @@ config ARCH_HAS_KEXEC_PURGATORY
def_bool y
depends on KEXEC_FILE
+config KEXEC_VERIFY_SIG
+ bool "Verify kernel signature during kexec_file_load() syscall"
+ depends on KEXEC_FILE && SYSTEM_DATA_VERIFICATION
+ help
+ This option makes kernel signature verification mandatory for
+ the kexec_file_load() syscall.
+
+ In addition to that option, you need to enable signature
+ verification for the corresponding kernel image type being
+ loaded in order for this to work.
+
config ARCH_RANDOM
def_bool y
prompt "s390 architectural random number generation API"
@@ -609,6 +625,29 @@ config EXPOLINE_FULL
endchoice
+config RELOCATABLE
+ bool "Build a relocatable kernel"
+ select MODULE_REL_CRCS if MODVERSIONS
+ default y
+ help
+ This builds a kernel image that retains relocation information
+ so it can be loaded at an arbitrary address.
+ The kernel is linked as a position-independent executable (PIE)
+ and contains dynamic relocations which are processed early in the
+ bootup process.
+ The relocations make the kernel image about 15% larger (compressed
+ 10%), but are discarded at runtime.
+
+config RANDOMIZE_BASE
+ bool "Randomize the address of the kernel image (KASLR)"
+ depends on RELOCATABLE
+ default y
+ help
+ In support of Kernel Address Space Layout Randomization (KASLR),
+ this randomizes the address at which the kernel image is loaded,
+ as a security feature that deters exploit attempts relying on
+ knowledge of the location of kernel internals.
+
endmenu
menu "Memory setup"
@@ -837,6 +876,17 @@ config HAVE_PNETID
menu "Virtualization"
+config PROTECTED_VIRTUALIZATION_GUEST
+ def_bool n
+ prompt "Protected virtualization guest support"
+ help
+ Select this option, if you want to be able to run this
+ kernel as a protected virtualization KVM guest.
+ Protected virtualization capable machines have a mini hypervisor
+ located at machine level (an ultravisor). With help of the
+ Ultravisor, KVM will be able to run "protected" VMs, special
+ VMs whose memory and management data are unavailable to KVM.
+
config PFAULT
def_bool y
prompt "Pseudo page fault support"
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index e21053e5e0da..df1d6a150f30 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -16,10 +16,14 @@ KBUILD_AFLAGS_MODULE += -fPIC
KBUILD_CFLAGS_MODULE += -fPIC
KBUILD_AFLAGS += -m64
KBUILD_CFLAGS += -m64
+ifeq ($(CONFIG_RELOCATABLE),y)
+KBUILD_CFLAGS += -fPIE
+LDFLAGS_vmlinux := -pie
+endif
aflags_dwarf := -Wa,-gdwarf-2
-KBUILD_AFLAGS_DECOMPRESSOR := -m64 -D__ASSEMBLY__
+KBUILD_AFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -D__ASSEMBLY__
KBUILD_AFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),$(aflags_dwarf))
-KBUILD_CFLAGS_DECOMPRESSOR := -m64 -O2
+KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2
KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float
KBUILD_CFLAGS_DECOMPRESSOR += -fno-asynchronous-unwind-tables
@@ -111,7 +115,7 @@ endif
cfi := $(call as-instr,.cfi_startproc\n.cfi_val_offset 15$(comma)-160\n.cfi_endproc,-DCONFIG_AS_CFI_VAL_OFFSET=1)
KBUILD_CFLAGS += -mbackchain -msoft-float $(cflags-y)
-KBUILD_CFLAGS += -pipe -fno-strength-reduce -Wno-sign-compare
+KBUILD_CFLAGS += -pipe -Wno-sign-compare
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables $(cfi)
KBUILD_AFLAGS += $(aflags-y) $(cfi)
export KBUILD_AFLAGS_DECOMPRESSOR
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
index d5ad724f5c96..c51496bbac19 100644
--- a/arch/s390/boot/Makefile
+++ b/arch/s390/boot/Makefile
@@ -12,25 +12,35 @@ KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR)
#
-# Use -march=z900 for als.c to be able to print an error
+# Use minimum architecture for als.c to be able to print an error
# message if the kernel is started on a machine which is too old
#
-ifneq ($(CC_FLAGS_MARCH),-march=z900)
+ifndef CONFIG_CC_IS_CLANG
+CC_FLAGS_MARCH_MINIMUM := -march=z900
+else
+CC_FLAGS_MARCH_MINIMUM := -march=z10
+endif
+
+ifneq ($(CC_FLAGS_MARCH),$(CC_FLAGS_MARCH_MINIMUM))
AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
-AFLAGS_head.o += -march=z900
+AFLAGS_head.o += $(CC_FLAGS_MARCH_MINIMUM)
AFLAGS_REMOVE_mem.o += $(CC_FLAGS_MARCH)
-AFLAGS_mem.o += -march=z900
+AFLAGS_mem.o += $(CC_FLAGS_MARCH_MINIMUM)
CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
-CFLAGS_als.o += -march=z900
+CFLAGS_als.o += $(CC_FLAGS_MARCH_MINIMUM)
CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_MARCH)
-CFLAGS_sclp_early_core.o += -march=z900
+CFLAGS_sclp_early_core.o += $(CC_FLAGS_MARCH_MINIMUM)
endif
CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
-obj-y := head.o als.o startup.o mem_detect.o ipl_parm.o string.o ebcdic.o
-obj-y += sclp_early_core.o mem.o ipl_vmparm.o cmdline.o ctype.o
-targets := bzImage startup.a section_cmp.boot.data $(obj-y)
+obj-y := head.o als.o startup.o mem_detect.o ipl_parm.o ipl_report.o
+obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
+obj-y += ctype.o text_dma.o
+obj-$(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) += uv.o
+obj-$(CONFIG_RELOCATABLE) += machine_kexec_reloc.o
+obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
+targets := bzImage startup.a section_cmp.boot.data section_cmp.boot.preserved.data $(obj-y)
subdir- := compressed
OBJECTS := $(addprefix $(obj)/,$(obj-y))
@@ -48,7 +58,8 @@ define cmd_section_cmp
touch $@
endef
-$(obj)/bzImage: $(obj)/compressed/vmlinux $(obj)/section_cmp.boot.data FORCE
+OBJCOPYFLAGS_bzImage := --pad-to $$(readelf -s $(obj)/compressed/vmlinux | awk '/\<_end\>/ {print or(strtonum("0x"$$2),4095)+1}')
+$(obj)/bzImage: $(obj)/compressed/vmlinux $(obj)/section_cmp.boot.data $(obj)/section_cmp.boot.preserved.data FORCE
$(call if_changed,objcopy)
$(obj)/section_cmp%: vmlinux $(obj)/compressed/vmlinux FORCE
@@ -57,9 +68,6 @@ $(obj)/section_cmp%: vmlinux $(obj)/compressed/vmlinux FORCE
$(obj)/compressed/vmlinux: $(obj)/startup.a FORCE
$(Q)$(MAKE) $(build)=$(obj)/compressed $@
-quiet_cmd_ar = AR $@
- cmd_ar = rm -f $@; $(AR) rcsTP$(KBUILD_ARFLAGS) $@ $(filter $(OBJECTS), $^)
-
$(obj)/startup.a: $(OBJECTS) FORCE
$(call if_changed,ar)
@@ -67,6 +75,6 @@ install: $(CONFIGURE) $(obj)/bzImage
sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/bzImage \
System.map "$(INSTALL_PATH)"
-chkbss := $(OBJECTS)
-chkbss-target := $(obj)/startup.a
+chkbss := $(obj-y)
+chkbss-target := startup.a
include $(srctree)/arch/s390/scripts/Makefile.chkbss
diff --git a/arch/s390/boot/als.c b/arch/s390/boot/als.c
index f902215e9cd9..ff6801d401c4 100644
--- a/arch/s390/boot/als.c
+++ b/arch/s390/boot/als.c
@@ -99,7 +99,7 @@ static void facility_mismatch(void)
print_machine_type();
print_missing_facilities();
sclp_early_printk("See Principles of Operations for facility bits\n");
- disabled_wait(0x8badcccc);
+ disabled_wait();
}
void verify_facilities(void)
diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h
index 82bc06346e05..ad57c2205a71 100644
--- a/arch/s390/boot/boot.h
+++ b/arch/s390/boot/boot.h
@@ -9,5 +9,10 @@ void setup_boot_command_line(void);
void parse_boot_command_line(void);
void setup_memory_end(void);
void print_missing_facilities(void);
+unsigned long get_random_base(unsigned long safe_addr);
+
+extern int kaslr_enabled;
+
+unsigned long read_ipl_report(unsigned long safe_offset);
#endif /* BOOT_BOOT_H */
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index b1bdd15e3429..fa529c5b4486 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -63,6 +63,6 @@ OBJCOPYFLAGS_piggy.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section
$(obj)/piggy.o: $(obj)/vmlinux.bin$(suffix-y) FORCE
$(call if_changed,objcopy)
-chkbss := $(filter-out $(obj)/piggy.o $(obj)/info.o,$(OBJECTS))
-chkbss-target := $(obj)/vmlinux.bin
+chkbss := $(filter-out piggy.o info.o, $(obj-y))
+chkbss-target := vmlinux.bin
include $(srctree)/arch/s390/scripts/Makefile.chkbss
diff --git a/arch/s390/boot/compressed/decompressor.h b/arch/s390/boot/compressed/decompressor.h
index e1c1f2ec60f4..c15eb7114d83 100644
--- a/arch/s390/boot/compressed/decompressor.h
+++ b/arch/s390/boot/compressed/decompressor.h
@@ -17,6 +17,11 @@ struct vmlinux_info {
unsigned long bss_size; /* uncompressed image .bss size */
unsigned long bootdata_off;
unsigned long bootdata_size;
+ unsigned long bootdata_preserved_off;
+ unsigned long bootdata_preserved_size;
+ unsigned long dynsym_start;
+ unsigned long rela_dyn_start;
+ unsigned long rela_dyn_end;
};
extern char _vmlinux_info[];
diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S
index 7efc3938f595..112b8d9f1e4c 100644
--- a/arch/s390/boot/compressed/vmlinux.lds.S
+++ b/arch/s390/boot/compressed/vmlinux.lds.S
@@ -33,7 +33,29 @@ SECTIONS
*(.data.*)
_edata = . ;
}
+ /*
+ * .dma section for code, data, ex_table that need to stay below 2 GB,
+ * even when the kernel is relocate: above 2 GB.
+ */
+ _sdma = .;
+ .dma.text : {
+ . = ALIGN(PAGE_SIZE);
+ _stext_dma = .;
+ *(.dma.text)
+ . = ALIGN(PAGE_SIZE);
+ _etext_dma = .;
+ }
+ . = ALIGN(16);
+ .dma.ex_table : {
+ _start_dma_ex_table = .;
+ KEEP(*(.dma.ex_table))
+ _stop_dma_ex_table = .;
+ }
+ .dma.data : { *(.dma.data) }
+ _edma = .;
+
BOOT_DATA
+ BOOT_DATA_PRESERVED
/*
* uncompressed image info used by the decompressor it should match
diff --git a/arch/s390/boot/head.S b/arch/s390/boot/head.S
index ce2cbbc41742..028aab03a9e7 100644
--- a/arch/s390/boot/head.S
+++ b/arch/s390/boot/head.S
@@ -305,7 +305,7 @@ ENTRY(startup_kdump)
xc 0x300(256),0x300
xc 0xe00(256),0xe00
xc 0xf00(256),0xf00
- lctlg %c0,%c15,0x200(%r0) # initialize control registers
+ lctlg %c0,%c15,.Lctl-.LPG0(%r13) # load control registers
stcke __LC_BOOT_CLOCK
mvc __LC_LAST_UPDATE_CLOCK(8),__LC_BOOT_CLOCK+1
spt 6f-.LPG0(%r13)
@@ -319,20 +319,54 @@ ENTRY(startup_kdump)
.align 8
6: .long 0x7fffffff,0xffffffff
+.Lctl: .quad 0x04040000 # cr0: AFP registers & secondary space
+ .quad 0 # cr1: primary space segment table
+ .quad .Lduct # cr2: dispatchable unit control table
+ .quad 0 # cr3: instruction authorization
+ .quad 0xffff # cr4: instruction authorization
+ .quad .Lduct # cr5: primary-aste origin
+ .quad 0 # cr6: I/O interrupts
+ .quad 0 # cr7: secondary space segment table
+ .quad 0 # cr8: access registers translation
+ .quad 0 # cr9: tracing off
+ .quad 0 # cr10: tracing off
+ .quad 0 # cr11: tracing off
+ .quad 0 # cr12: tracing off
+ .quad 0 # cr13: home space segment table
+ .quad 0xc0000000 # cr14: machine check handling off
+ .quad .Llinkage_stack # cr15: linkage stack operations
+
+ .section .dma.data,"aw",@progbits
+.Lduct: .long 0,.Laste,.Laste,0,.Lduald,0,0,0
+ .long 0,0,0,0,0,0,0,0
+.Llinkage_stack:
+ .long 0,0,0x89000000,0,0,0,0x8a000000,0
+ .align 64
+.Laste: .quad 0,0xffffffffffffffff,0,0,0,0,0,0
+ .align 128
+.Lduald:.rept 8
+ .long 0x80000000,0,0,0 # invalid access-list entries
+ .endr
+ .previous
+
#include "head_kdump.S"
#
# params at 10400 (setup.h)
+# Must be keept in sync with struct parmarea in setup.h
#
.org PARMAREA
- .long 0,0 # IPL_DEVICE
- .long 0,0 # INITRD_START
- .long 0,0 # INITRD_SIZE
- .long 0,0 # OLDMEM_BASE
- .long 0,0 # OLDMEM_SIZE
+ .quad 0 # IPL_DEVICE
+ .quad 0 # INITRD_START
+ .quad 0 # INITRD_SIZE
+ .quad 0 # OLDMEM_BASE
+ .quad 0 # OLDMEM_SIZE
.org COMMAND_LINE
.byte "root=/dev/ram0 ro"
.byte 0
- .org 0x11000
+ .org EARLY_SCCB_OFFSET
+ .fill 4096
+
+ .org HEAD_END
diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c
index 36beb56de021..3c49bde8aa5e 100644
--- a/arch/s390/boot/ipl_parm.c
+++ b/arch/s390/boot/ipl_parm.c
@@ -7,16 +7,19 @@
#include <asm/sections.h>
#include <asm/boot_data.h>
#include <asm/facility.h>
+#include <asm/uv.h>
#include "boot.h"
char __bootdata(early_command_line)[COMMAND_LINE_SIZE];
-struct ipl_parameter_block __bootdata(early_ipl_block);
-int __bootdata(early_ipl_block_valid);
+struct ipl_parameter_block __bootdata_preserved(ipl_block);
+int __bootdata_preserved(ipl_block_valid);
unsigned long __bootdata(memory_end);
int __bootdata(memory_end_set);
int __bootdata(noexec_disabled);
+int kaslr_enabled __section(.data);
+
static inline int __diag308(unsigned long subcode, void *addr)
{
register unsigned long _addr asm("0") = (unsigned long)addr;
@@ -45,13 +48,15 @@ void store_ipl_parmblock(void)
{
int rc;
- rc = __diag308(DIAG308_STORE, &early_ipl_block);
+ uv_set_shared(__pa(&ipl_block));
+ rc = __diag308(DIAG308_STORE, &ipl_block);
+ uv_remove_shared(__pa(&ipl_block));
if (rc == DIAG308_RC_OK &&
- early_ipl_block.hdr.version <= IPL_MAX_SUPPORTED_VERSION)
- early_ipl_block_valid = 1;
+ ipl_block.hdr.version <= IPL_MAX_SUPPORTED_VERSION)
+ ipl_block_valid = 1;
}
-static size_t scpdata_length(const char *buf, size_t count)
+static size_t scpdata_length(const u8 *buf, size_t count)
{
while (count) {
if (buf[count - 1] != '\0' && buf[count - 1] != ' ')
@@ -68,26 +73,26 @@ static size_t ipl_block_get_ascii_scpdata(char *dest, size_t size,
size_t i;
int has_lowercase;
- count = min(size - 1, scpdata_length(ipb->ipl_info.fcp.scp_data,
- ipb->ipl_info.fcp.scp_data_len));
+ count = min(size - 1, scpdata_length(ipb->fcp.scp_data,
+ ipb->fcp.scp_data_len));
if (!count)
goto out;
has_lowercase = 0;
for (i = 0; i < count; i++) {
- if (!isascii(ipb->ipl_info.fcp.scp_data[i])) {
+ if (!isascii(ipb->fcp.scp_data[i])) {
count = 0;
goto out;
}
- if (!has_lowercase && islower(ipb->ipl_info.fcp.scp_data[i]))
+ if (!has_lowercase && islower(ipb->fcp.scp_data[i]))
has_lowercase = 1;
}
if (has_lowercase)
- memcpy(dest, ipb->ipl_info.fcp.scp_data, count);
+ memcpy(dest, ipb->fcp.scp_data, count);
else
for (i = 0; i < count; i++)
- dest[i] = tolower(ipb->ipl_info.fcp.scp_data[i]);
+ dest[i] = tolower(ipb->fcp.scp_data[i]);
out:
dest[count] = '\0';
return count;
@@ -103,14 +108,14 @@ static void append_ipl_block_parm(void)
delim = early_command_line + len; /* '\0' character position */
parm = early_command_line + len + 1; /* append right after '\0' */
- switch (early_ipl_block.hdr.pbt) {
- case DIAG308_IPL_TYPE_CCW:
+ switch (ipl_block.pb0_hdr.pbt) {
+ case IPL_PBT_CCW:
rc = ipl_block_get_ascii_vmparm(
- parm, COMMAND_LINE_SIZE - len - 1, &early_ipl_block);
+ parm, COMMAND_LINE_SIZE - len - 1, &ipl_block);
break;
- case DIAG308_IPL_TYPE_FCP:
+ case IPL_PBT_FCP:
rc = ipl_block_get_ascii_scpdata(
- parm, COMMAND_LINE_SIZE - len - 1, &early_ipl_block);
+ parm, COMMAND_LINE_SIZE - len - 1, &ipl_block);
break;
}
if (rc) {
@@ -141,7 +146,7 @@ void setup_boot_command_line(void)
strcpy(early_command_line, strim(COMMAND_LINE));
/* append IPL PARM data to the boot command line */
- if (early_ipl_block_valid)
+ if (!is_prot_virt_guest() && ipl_block_valid)
append_ipl_block_parm();
}
@@ -211,6 +216,7 @@ void parse_boot_command_line(void)
char *args;
int rc;
+ kaslr_enabled = IS_ENABLED(CONFIG_RANDOMIZE_BASE);
args = strcpy(command_line_buf, early_command_line);
while (*args) {
args = next_arg(args, &param, &val);
@@ -228,15 +234,21 @@ void parse_boot_command_line(void)
if (!strcmp(param, "facilities"))
modify_fac_list(val);
+
+ if (!strcmp(param, "nokaslr"))
+ kaslr_enabled = 0;
}
}
void setup_memory_end(void)
{
#ifdef CONFIG_CRASH_DUMP
- if (!OLDMEM_BASE && early_ipl_block_valid &&
- early_ipl_block.hdr.pbt == DIAG308_IPL_TYPE_FCP &&
- early_ipl_block.ipl_info.fcp.opt == DIAG308_IPL_OPT_DUMP) {
+ if (OLDMEM_BASE) {
+ kaslr_enabled = 0;
+ } else if (ipl_block_valid &&
+ ipl_block.pb0_hdr.pbt == IPL_PBT_FCP &&
+ ipl_block.fcp.opt == IPL_PB0_FCP_OPT_DUMP) {
+ kaslr_enabled = 0;
if (!sclp_early_get_hsa_size(&memory_end) && memory_end)
memory_end_set = 1;
}
diff --git a/arch/s390/boot/ipl_report.c b/arch/s390/boot/ipl_report.c
new file mode 100644
index 000000000000..0b4965573656
--- /dev/null
+++ b/arch/s390/boot/ipl_report.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/init.h>
+#include <linux/ctype.h>
+#include <asm/ebcdic.h>
+#include <asm/sclp.h>
+#include <asm/sections.h>
+#include <asm/boot_data.h>
+#include <uapi/asm/ipl.h>
+#include "boot.h"
+
+int __bootdata_preserved(ipl_secure_flag);
+
+unsigned long __bootdata_preserved(ipl_cert_list_addr);
+unsigned long __bootdata_preserved(ipl_cert_list_size);
+
+unsigned long __bootdata(early_ipl_comp_list_addr);
+unsigned long __bootdata(early_ipl_comp_list_size);
+
+#define for_each_rb_entry(entry, rb) \
+ for (entry = rb->entries; \
+ (void *) entry + sizeof(*entry) <= (void *) rb + rb->len; \
+ entry++)
+
+static inline bool intersects(unsigned long addr0, unsigned long size0,
+ unsigned long addr1, unsigned long size1)
+{
+ return addr0 + size0 > addr1 && addr1 + size1 > addr0;
+}
+
+static unsigned long find_bootdata_space(struct ipl_rb_components *comps,
+ struct ipl_rb_certificates *certs,
+ unsigned long safe_addr)
+{
+ struct ipl_rb_certificate_entry *cert;
+ struct ipl_rb_component_entry *comp;
+ size_t size;
+
+ /*
+ * Find the length for the IPL report boot data
+ */
+ early_ipl_comp_list_size = 0;
+ for_each_rb_entry(comp, comps)
+ early_ipl_comp_list_size += sizeof(*comp);
+ ipl_cert_list_size = 0;
+ for_each_rb_entry(cert, certs)
+ ipl_cert_list_size += sizeof(unsigned int) + cert->len;
+ size = ipl_cert_list_size + early_ipl_comp_list_size;
+
+ /*
+ * Start from safe_addr to find a free memory area large
+ * enough for the IPL report boot data. This area is used
+ * for ipl_cert_list_addr/ipl_cert_list_size and
+ * early_ipl_comp_list_addr/early_ipl_comp_list_size. It must
+ * not overlap with any component or any certificate.
+ */
+repeat:
+ if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
+ intersects(INITRD_START, INITRD_SIZE, safe_addr, size))
+ safe_addr = INITRD_START + INITRD_SIZE;
+ for_each_rb_entry(comp, comps)
+ if (intersects(safe_addr, size, comp->addr, comp->len)) {
+ safe_addr = comp->addr + comp->len;
+ goto repeat;
+ }
+ for_each_rb_entry(cert, certs)
+ if (intersects(safe_addr, size, cert->addr, cert->len)) {
+ safe_addr = cert->addr + cert->len;
+ goto repeat;
+ }
+ early_ipl_comp_list_addr = safe_addr;
+ ipl_cert_list_addr = safe_addr + early_ipl_comp_list_size;
+
+ return safe_addr + size;
+}
+
+static void copy_components_bootdata(struct ipl_rb_components *comps)
+{
+ struct ipl_rb_component_entry *comp, *ptr;
+
+ ptr = (struct ipl_rb_component_entry *) early_ipl_comp_list_addr;
+ for_each_rb_entry(comp, comps)
+ memcpy(ptr++, comp, sizeof(*ptr));
+}
+
+static void copy_certificates_bootdata(struct ipl_rb_certificates *certs)
+{
+ struct ipl_rb_certificate_entry *cert;
+ void *ptr;
+
+ ptr = (void *) ipl_cert_list_addr;
+ for_each_rb_entry(cert, certs) {
+ *(unsigned int *) ptr = cert->len;
+ ptr += sizeof(unsigned int);
+ memcpy(ptr, (void *) cert->addr, cert->len);
+ ptr += cert->len;
+ }
+}
+
+unsigned long read_ipl_report(unsigned long safe_addr)
+{
+ struct ipl_rb_certificates *certs;
+ struct ipl_rb_components *comps;
+ struct ipl_pl_hdr *pl_hdr;
+ struct ipl_rl_hdr *rl_hdr;
+ struct ipl_rb_hdr *rb_hdr;
+ unsigned long tmp;
+ void *rl_end;
+
+ /*
+ * Check if there is a IPL report by looking at the copy
+ * of the IPL parameter information block.
+ */
+ if (!ipl_block_valid ||
+ !(ipl_block.hdr.flags & IPL_PL_FLAG_IPLSR))
+ return safe_addr;
+ ipl_secure_flag = !!(ipl_block.hdr.flags & IPL_PL_FLAG_SIPL);
+ /*
+ * There is an IPL report, to find it load the pointer to the
+ * IPL parameter information block from lowcore and skip past
+ * the IPL parameter list, then align the address to a double
+ * word boundary.
+ */
+ tmp = (unsigned long) S390_lowcore.ipl_parmblock_ptr;
+ pl_hdr = (struct ipl_pl_hdr *) tmp;
+ tmp = (tmp + pl_hdr->len + 7) & -8UL;
+ rl_hdr = (struct ipl_rl_hdr *) tmp;
+ /* Walk through the IPL report blocks in the IPL Report list */
+ certs = NULL;
+ comps = NULL;
+ rl_end = (void *) rl_hdr + rl_hdr->len;
+ rb_hdr = (void *) rl_hdr + sizeof(*rl_hdr);
+ while ((void *) rb_hdr + sizeof(*rb_hdr) < rl_end &&
+ (void *) rb_hdr + rb_hdr->len <= rl_end) {
+
+ switch (rb_hdr->rbt) {
+ case IPL_RBT_CERTIFICATES:
+ certs = (struct ipl_rb_certificates *) rb_hdr;
+ break;
+ case IPL_RBT_COMPONENTS:
+ comps = (struct ipl_rb_components *) rb_hdr;
+ break;
+ default:
+ break;
+ }
+
+ rb_hdr = (void *) rb_hdr + rb_hdr->len;
+ }
+
+ /*
+ * With either the component list or the certificate list
+ * missing the kernel will stay ignorant of secure IPL.
+ */
+ if (!comps || !certs)
+ return safe_addr;
+
+ /*
+ * Copy component and certificate list to a safe area
+ * where the decompressed kernel can find them.
+ */
+ safe_addr = find_bootdata_space(comps, certs, safe_addr);
+ copy_components_bootdata(comps);
+ copy_certificates_bootdata(certs);
+
+ return safe_addr;
+}
diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c
new file mode 100644
index 000000000000..3bdd8132e56b
--- /dev/null
+++ b/arch/s390/boot/kaslr.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2019
+ */
+#include <asm/mem_detect.h>
+#include <asm/cpacf.h>
+#include <asm/timex.h>
+#include <asm/sclp.h>
+#include "compressed/decompressor.h"
+
+#define PRNG_MODE_TDES 1
+#define PRNG_MODE_SHA512 2
+#define PRNG_MODE_TRNG 3
+
+struct prno_parm {
+ u32 res;
+ u32 reseed_counter;
+ u64 stream_bytes;
+ u8 V[112];
+ u8 C[112];
+};
+
+struct prng_parm {
+ u8 parm_block[32];
+ u32 reseed_counter;
+ u64 byte_counter;
+};
+
+static int check_prng(void)
+{
+ if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG)) {
+ sclp_early_printk("KASLR disabled: CPU has no PRNG\n");
+ return 0;
+ }
+ if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
+ return PRNG_MODE_TRNG;
+ if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN))
+ return PRNG_MODE_SHA512;
+ else
+ return PRNG_MODE_TDES;
+}
+
+static unsigned long get_random(unsigned long limit)
+{
+ struct prng_parm prng = {
+ /* initial parameter block for tdes mode, copied from libica */
+ .parm_block = {
+ 0x0F, 0x2B, 0x8E, 0x63, 0x8C, 0x8E, 0xD2, 0x52,
+ 0x64, 0xB7, 0xA0, 0x7B, 0x75, 0x28, 0xB8, 0xF4,
+ 0x75, 0x5F, 0xD2, 0xA6, 0x8D, 0x97, 0x11, 0xFF,
+ 0x49, 0xD8, 0x23, 0xF3, 0x7E, 0x21, 0xEC, 0xA0
+ },
+ };
+ unsigned long seed, random;
+ struct prno_parm prno;
+ __u64 entropy[4];
+ int mode, i;
+
+ mode = check_prng();
+ seed = get_tod_clock_fast();
+ switch (mode) {
+ case PRNG_MODE_TRNG:
+ cpacf_trng(NULL, 0, (u8 *) &random, sizeof(random));
+ break;
+ case PRNG_MODE_SHA512:
+ cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED, &prno, NULL, 0,
+ (u8 *) &seed, sizeof(seed));
+ cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN, &prno, (u8 *) &random,
+ sizeof(random), NULL, 0);
+ break;
+ case PRNG_MODE_TDES:
+ /* add entropy */
+ *(unsigned long *) prng.parm_block ^= seed;
+ for (i = 0; i < 16; i++) {
+ cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block,
+ (char *) entropy, (char *) entropy,
+ sizeof(entropy));
+ memcpy(prng.parm_block, entropy, sizeof(entropy));
+ }
+ random = seed;
+ cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block, (u8 *) &random,
+ (u8 *) &random, sizeof(random));
+ break;
+ default:
+ random = 0;
+ }
+ return random % limit;
+}
+
+unsigned long get_random_base(unsigned long safe_addr)
+{
+ unsigned long base, start, end, kernel_size;
+ unsigned long block_sum, offset;
+ int i;
+
+ if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE) {
+ if (safe_addr < INITRD_START + INITRD_SIZE)
+ safe_addr = INITRD_START + INITRD_SIZE;
+ }
+ safe_addr = ALIGN(safe_addr, THREAD_SIZE);
+
+ kernel_size = vmlinux.image_size + vmlinux.bss_size;
+ block_sum = 0;
+ for_each_mem_detect_block(i, &start, &end) {
+ if (memory_end_set) {
+ if (start >= memory_end)
+ break;
+ if (end > memory_end)
+ end = memory_end;
+ }
+ if (end - start < kernel_size)
+ continue;
+ block_sum += end - start - kernel_size;
+ }
+ if (!block_sum) {
+ sclp_early_printk("KASLR disabled: not enough memory\n");
+ return 0;
+ }
+
+ base = get_random(block_sum);
+ if (base == 0)
+ return 0;
+ if (base < safe_addr)
+ base = safe_addr;
+ block_sum = offset = 0;
+ for_each_mem_detect_block(i, &start, &end) {
+ if (memory_end_set) {
+ if (start >= memory_end)
+ break;
+ if (end > memory_end)
+ end = memory_end;
+ }
+ if (end - start < kernel_size)
+ continue;
+ block_sum += end - start - kernel_size;
+ if (base <= block_sum) {
+ base = start + base - offset;
+ base = ALIGN_DOWN(base, THREAD_SIZE);
+ break;
+ }
+ offset = block_sum;
+ }
+ return base;
+}
diff --git a/arch/s390/boot/machine_kexec_reloc.c b/arch/s390/boot/machine_kexec_reloc.c
new file mode 100644
index 000000000000..b7a5d0f72097
--- /dev/null
+++ b/arch/s390/boot/machine_kexec_reloc.c
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../kernel/machine_kexec_reloc.c"
diff --git a/arch/s390/boot/mem_detect.c b/arch/s390/boot/mem_detect.c
index 4cb771ba13fa..5d316fe40480 100644
--- a/arch/s390/boot/mem_detect.c
+++ b/arch/s390/boot/mem_detect.c
@@ -25,7 +25,7 @@ static void *mem_detect_alloc_extended(void)
{
unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64));
- if (IS_ENABLED(BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
+ if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
INITRD_START < offset + ENTRIES_EXTENDED_MAX)
offset = ALIGN(INITRD_START + INITRD_SIZE, sizeof(u64));
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index bdfc5549a299..7b0d05414618 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -1,11 +1,55 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/string.h>
+#include <linux/elf.h>
+#include <asm/sections.h>
#include <asm/setup.h>
+#include <asm/kexec.h>
#include <asm/sclp.h>
+#include <asm/diag.h>
+#include <asm/uv.h>
#include "compressed/decompressor.h"
#include "boot.h"
extern char __boot_data_start[], __boot_data_end[];
+extern char __boot_data_preserved_start[], __boot_data_preserved_end[];
+unsigned long __bootdata_preserved(__kaslr_offset);
+
+/*
+ * Some code and data needs to stay below 2 GB, even when the kernel would be
+ * relocated above 2 GB, because it has to use 31 bit addresses.
+ * Such code and data is part of the .dma section, and its location is passed
+ * over to the decompressed / relocated kernel via the .boot.preserved.data
+ * section.
+ */
+extern char _sdma[], _edma[];
+extern char _stext_dma[], _etext_dma[];
+extern struct exception_table_entry _start_dma_ex_table[];
+extern struct exception_table_entry _stop_dma_ex_table[];
+unsigned long __bootdata_preserved(__sdma) = __pa(&_sdma);
+unsigned long __bootdata_preserved(__edma) = __pa(&_edma);
+unsigned long __bootdata_preserved(__stext_dma) = __pa(&_stext_dma);
+unsigned long __bootdata_preserved(__etext_dma) = __pa(&_etext_dma);
+struct exception_table_entry *
+ __bootdata_preserved(__start_dma_ex_table) = _start_dma_ex_table;
+struct exception_table_entry *
+ __bootdata_preserved(__stop_dma_ex_table) = _stop_dma_ex_table;
+
+int _diag210_dma(struct diag210 *addr);
+int _diag26c_dma(void *req, void *resp, enum diag26c_sc subcode);
+int _diag14_dma(unsigned long rx, unsigned long ry1, unsigned long subcode);
+void _diag0c_dma(struct hypfs_diag0c_entry *entry);
+void _diag308_reset_dma(void);
+struct diag_ops __bootdata_preserved(diag_dma_ops) = {
+ .diag210 = _diag210_dma,
+ .diag26c = _diag26c_dma,
+ .diag14 = _diag14_dma,
+ .diag0c = _diag0c_dma,
+ .diag308_reset = _diag308_reset_dma
+};
+static struct diag210 _diag210_tmp_dma __section(".dma.data");
+struct diag210 *__bootdata_preserved(__diag210_tmp_dma) = &_diag210_tmp_dma;
+void _swsusp_reset_dma(void);
+unsigned long __bootdata_preserved(__swsusp_reset_dma) = __pa(_swsusp_reset_dma);
void error(char *x)
{
@@ -13,7 +57,7 @@ void error(char *x)
sclp_early_printk(x);
sclp_early_printk("\n\n -- System halted");
- disabled_wait(0xdeadbeef);
+ disabled_wait();
}
#ifdef CONFIG_KERNEL_UNCOMPRESSED
@@ -23,19 +67,16 @@ unsigned long mem_safe_offset(void)
}
#endif
-static void rescue_initrd(void)
+static void rescue_initrd(unsigned long addr)
{
- unsigned long min_initrd_addr;
-
if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
return;
if (!INITRD_START || !INITRD_SIZE)
return;
- min_initrd_addr = mem_safe_offset();
- if (min_initrd_addr <= INITRD_START)
+ if (addr <= INITRD_START)
return;
- memmove((void *)min_initrd_addr, (void *)INITRD_START, INITRD_SIZE);
- INITRD_START = min_initrd_addr;
+ memmove((void *)addr, (void *)INITRD_START, INITRD_SIZE);
+ INITRD_START = addr;
}
static void copy_bootdata(void)
@@ -43,23 +84,81 @@ static void copy_bootdata(void)
if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size)
error(".boot.data section size mismatch");
memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size);
+ if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size)
+ error(".boot.preserved.data section size mismatch");
+ memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size);
+}
+
+static void handle_relocs(unsigned long offset)
+{
+ Elf64_Rela *rela_start, *rela_end, *rela;
+ int r_type, r_sym, rc;
+ Elf64_Addr loc, val;
+ Elf64_Sym *dynsym;
+
+ rela_start = (Elf64_Rela *) vmlinux.rela_dyn_start;
+ rela_end = (Elf64_Rela *) vmlinux.rela_dyn_end;
+ dynsym = (Elf64_Sym *) vmlinux.dynsym_start;
+ for (rela = rela_start; rela < rela_end; rela++) {
+ loc = rela->r_offset + offset;
+ val = rela->r_addend + offset;
+ r_sym = ELF64_R_SYM(rela->r_info);
+ if (r_sym)
+ val += dynsym[r_sym].st_value;
+ r_type = ELF64_R_TYPE(rela->r_info);
+ rc = arch_kexec_do_relocs(r_type, (void *) loc, val, 0);
+ if (rc)
+ error("Unknown relocation type");
+ }
}
void startup_kernel(void)
{
+ unsigned long random_lma;
+ unsigned long safe_addr;
void *img;
- rescue_initrd();
- sclp_early_read_info();
store_ipl_parmblock();
+ safe_addr = mem_safe_offset();
+ safe_addr = read_ipl_report(safe_addr);
+ uv_query_info();
+ rescue_initrd(safe_addr);
+ sclp_early_read_info();
setup_boot_command_line();
parse_boot_command_line();
setup_memory_end();
detect_memory();
+
+ random_lma = __kaslr_offset = 0;
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
+ random_lma = get_random_base(safe_addr);
+ if (random_lma) {
+ __kaslr_offset = random_lma - vmlinux.default_lma;
+ img = (void *)vmlinux.default_lma;
+ vmlinux.default_lma += __kaslr_offset;
+ vmlinux.entry += __kaslr_offset;
+ vmlinux.bootdata_off += __kaslr_offset;
+ vmlinux.bootdata_preserved_off += __kaslr_offset;
+ vmlinux.rela_dyn_start += __kaslr_offset;
+ vmlinux.rela_dyn_end += __kaslr_offset;
+ vmlinux.dynsym_start += __kaslr_offset;
+ }
+ }
+
if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
img = decompress_kernel();
memmove((void *)vmlinux.default_lma, img, vmlinux.image_size);
- }
+ } else if (__kaslr_offset)
+ memcpy((void *)vmlinux.default_lma, img, vmlinux.image_size);
+
copy_bootdata();
+ if (IS_ENABLED(CONFIG_RELOCATABLE))
+ handle_relocs(__kaslr_offset);
+
+ if (__kaslr_offset) {
+ /* Clear non-relocated kernel */
+ if (IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED))
+ memset(img, 0, vmlinux.image_size);
+ }
vmlinux.entry();
}
diff --git a/arch/s390/boot/text_dma.S b/arch/s390/boot/text_dma.S
new file mode 100644
index 000000000000..9715715c4c28
--- /dev/null
+++ b/arch/s390/boot/text_dma.S
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Code that needs to run below 2 GB.
+ *
+ * Copyright IBM Corp. 2019
+ */
+
+#include <linux/linkage.h>
+#include <asm/errno.h>
+#include <asm/sigp.h>
+
+#ifdef CC_USING_EXPOLINE
+ .pushsection .dma.text.__s390_indirect_jump_r14,"axG"
+__dma__s390_indirect_jump_r14:
+ larl %r1,0f
+ ex 0,0(%r1)
+ j .
+0: br %r14
+ .popsection
+#endif
+
+ .section .dma.text,"ax"
+/*
+ * Simplified version of expoline thunk. The normal thunks can not be used here,
+ * because they might be more than 2 GB away, and not reachable by the relative
+ * branch. No comdat, exrl, etc. optimizations used here, because it only
+ * affects a few functions that are not performance-relevant.
+ */
+ .macro BR_EX_DMA_r14
+#ifdef CC_USING_EXPOLINE
+ jg __dma__s390_indirect_jump_r14
+#else
+ br %r14
+#endif
+ .endm
+
+/*
+ * int _diag14_dma(unsigned long rx, unsigned long ry1, unsigned long subcode)
+ */
+ENTRY(_diag14_dma)
+ lgr %r1,%r2
+ lgr %r2,%r3
+ lgr %r3,%r4
+ lhi %r5,-EIO
+ sam31
+ diag %r1,%r2,0x14
+.Ldiag14_ex:
+ ipm %r5
+ srl %r5,28
+.Ldiag14_fault:
+ sam64
+ lgfr %r2,%r5
+ BR_EX_DMA_r14
+ EX_TABLE_DMA(.Ldiag14_ex, .Ldiag14_fault)
+ENDPROC(_diag14_dma)
+
+/*
+ * int _diag210_dma(struct diag210 *addr)
+ */
+ENTRY(_diag210_dma)
+ lgr %r1,%r2
+ lhi %r2,-1
+ sam31
+ diag %r1,%r0,0x210
+.Ldiag210_ex:
+ ipm %r2
+ srl %r2,28
+.Ldiag210_fault:
+ sam64
+ lgfr %r2,%r2
+ BR_EX_DMA_r14
+ EX_TABLE_DMA(.Ldiag210_ex, .Ldiag210_fault)
+ENDPROC(_diag210_dma)
+
+/*
+ * int _diag26c_dma(void *req, void *resp, enum diag26c_sc subcode)
+ */
+ENTRY(_diag26c_dma)
+ lghi %r5,-EOPNOTSUPP
+ sam31
+ diag %r2,%r4,0x26c
+.Ldiag26c_ex:
+ sam64
+ lgfr %r2,%r5
+ BR_EX_DMA_r14
+ EX_TABLE_DMA(.Ldiag26c_ex, .Ldiag26c_ex)
+ENDPROC(_diag26c_dma)
+
+/*
+ * void _diag0c_dma(struct hypfs_diag0c_entry *entry)
+ */
+ENTRY(_diag0c_dma)
+ sam31
+ diag %r2,%r2,0x0c
+ sam64
+ BR_EX_DMA_r14
+ENDPROC(_diag0c_dma)
+
+/*
+ * void _swsusp_reset_dma(void)
+ */
+ENTRY(_swsusp_reset_dma)
+ larl %r1,restart_entry
+ larl %r2,.Lrestart_diag308_psw
+ og %r1,0(%r2)
+ stg %r1,0(%r0)
+ lghi %r0,0
+ diag %r0,%r0,0x308
+restart_entry:
+ lhi %r1,1
+ sigp %r1,%r0,SIGP_SET_ARCHITECTURE
+ sam64
+ BR_EX_DMA_r14
+ENDPROC(_swsusp_reset_dma)
+
+/*
+ * void _diag308_reset_dma(void)
+ *
+ * Calls diag 308 subcode 1 and continues execution
+ */
+ENTRY(_diag308_reset_dma)
+ larl %r4,.Lctlregs # Save control registers
+ stctg %c0,%c15,0(%r4)
+ lg %r2,0(%r4) # Disable lowcore protection
+ nilh %r2,0xefff
+ larl %r4,.Lctlreg0
+ stg %r2,0(%r4)
+ lctlg %c0,%c0,0(%r4)
+ larl %r4,.Lfpctl # Floating point control register
+ stfpc 0(%r4)
+ larl %r4,.Lprefix # Save prefix register
+ stpx 0(%r4)
+ larl %r4,.Lprefix_zero # Set prefix register to 0
+ spx 0(%r4)
+ larl %r4,.Lcontinue_psw # Save PSW flags
+ epsw %r2,%r3
+ stm %r2,%r3,0(%r4)
+ larl %r4,restart_part2 # Setup restart PSW at absolute 0
+ larl %r3,.Lrestart_diag308_psw
+ og %r4,0(%r3) # Save PSW
+ lghi %r3,0
+ sturg %r4,%r3 # Use sturg, because of large pages
+ lghi %r1,1
+ lghi %r0,0
+ diag %r0,%r1,0x308
+restart_part2:
+ lhi %r0,0 # Load r0 with zero
+ lhi %r1,2 # Use mode 2 = ESAME (dump)
+ sigp %r1,%r0,SIGP_SET_ARCHITECTURE # Switch to ESAME mode
+ sam64 # Switch to 64 bit addressing mode
+ larl %r4,.Lctlregs # Restore control registers
+ lctlg %c0,%c15,0(%r4)
+ larl %r4,.Lfpctl # Restore floating point ctl register
+ lfpc 0(%r4)
+ larl %r4,.Lprefix # Restore prefix register
+ spx 0(%r4)
+ larl %r4,.Lcontinue_psw # Restore PSW flags
+ lpswe 0(%r4)
+.Lcontinue:
+ BR_EX_DMA_r14
+ENDPROC(_diag308_reset_dma)
+
+ .section .dma.data,"aw",@progbits
+.align 8
+.Lrestart_diag308_psw:
+ .long 0x00080000,0x80000000
+
+.align 8
+.Lcontinue_psw:
+ .quad 0,.Lcontinue
+
+.align 8
+.Lctlreg0:
+ .quad 0
+.Lctlregs:
+ .rept 16
+ .quad 0
+ .endr
+.Lfpctl:
+ .long 0
+.Lprefix:
+ .long 0
+.Lprefix_zero:
+ .long 0
diff --git a/arch/s390/boot/uv.c b/arch/s390/boot/uv.c
new file mode 100644
index 000000000000..ed007f4a6444
--- /dev/null
+++ b/arch/s390/boot/uv.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <asm/uv.h>
+#include <asm/facility.h>
+#include <asm/sections.h>
+
+int __bootdata_preserved(prot_virt_guest);
+
+void uv_query_info(void)
+{
+ struct uv_cb_qui uvcb = {
+ .header.cmd = UVC_CMD_QUI,
+ .header.len = sizeof(uvcb)
+ };
+
+ if (!test_facility(158))
+ return;
+
+ if (uv_call(0, (uint64_t)&uvcb))
+ return;
+
+ if (test_bit_inv(BIT_UVC_CMD_SET_SHARED_ACCESS, (unsigned long *)uvcb.inst_calls_list) &&
+ test_bit_inv(BIT_UVC_CMD_REMOVE_SHARED_ACCESS, (unsigned long *)uvcb.inst_calls_list))
+ prot_virt_guest = 1;
+}
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index c69cb04b7a59..b0920b35f87b 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -64,6 +64,7 @@ CONFIG_NUMA=y
CONFIG_PREEMPT=y
CONFIG_HZ_100=y
CONFIG_KEXEC_FILE=y
+CONFIG_KEXEC_VERIFY_SIG=y
CONFIG_EXPOLINE=y
CONFIG_EXPOLINE_AUTO=y
CONFIG_MEMORY_HOTPLUG=y
@@ -500,7 +501,6 @@ CONFIG_S390_AP_IOMMU=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
-CONFIG_EXT4_ENCRYPTION=y
CONFIG_JBD2_DEBUG=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
@@ -520,6 +520,7 @@ CONFIG_BTRFS_DEBUG=y
CONFIG_NILFS2_FS=m
CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
+CONFIG_FS_ENCRYPTION=y
CONFIG_FANOTIFY=y
CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 32f539dc9c19..09aa5cb14873 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -65,6 +65,7 @@ CONFIG_NR_CPUS=512
CONFIG_NUMA=y
CONFIG_HZ_100=y
CONFIG_KEXEC_FILE=y
+CONFIG_KEXEC_VERIFY_SIG=y
CONFIG_EXPOLINE=y
CONFIG_EXPOLINE_AUTO=y
CONFIG_MEMORY_HOTPLUG=y
@@ -497,7 +498,6 @@ CONFIG_S390_AP_IOMMU=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
-CONFIG_EXT4_ENCRYPTION=y
CONFIG_JBD2_DEBUG=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
@@ -515,6 +515,7 @@ CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m
CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
+CONFIG_FS_ENCRYPTION=y
CONFIG_FANOTIFY=y
CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
diff --git a/arch/s390/crypto/crc32be-vx.S b/arch/s390/crypto/crc32be-vx.S
index 2bf01ba44107..0099044e2c86 100644
--- a/arch/s390/crypto/crc32be-vx.S
+++ b/arch/s390/crypto/crc32be-vx.S
@@ -207,5 +207,6 @@ ENTRY(crc32_be_vgfm_16)
.Ldone:
VLGVF %r2,%v2,3
BR_EX %r14
+ENDPROC(crc32_be_vgfm_16)
.previous
diff --git a/arch/s390/crypto/crc32le-vx.S b/arch/s390/crypto/crc32le-vx.S
index 7d6f568bd3ad..71caf0f4ec08 100644
--- a/arch/s390/crypto/crc32le-vx.S
+++ b/arch/s390/crypto/crc32le-vx.S
@@ -105,13 +105,14 @@
ENTRY(crc32_le_vgfm_16)
larl %r5,.Lconstants_CRC_32_LE
j crc32_le_vgfm_generic
+ENDPROC(crc32_le_vgfm_16)
ENTRY(crc32c_le_vgfm_16)
larl %r5,.Lconstants_CRC_32C_LE
j crc32_le_vgfm_generic
+ENDPROC(crc32c_le_vgfm_16)
-
-crc32_le_vgfm_generic:
+ENTRY(crc32_le_vgfm_generic)
/* Load CRC-32 constants */
VLM CONST_PERM_LE2BE,CONST_CRC_POLY,0,%r5
@@ -267,5 +268,6 @@ crc32_le_vgfm_generic:
.Ldone:
VLGVF %r2,%v2,2
BR_EX %r14
+ENDPROC(crc32_le_vgfm_generic)
.previous
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 0d15383d0ff1..1f9ab24dc048 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -224,24 +224,11 @@ static int des3_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int key_len)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
+ int err;
- if (!(crypto_memneq(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
- crypto_memneq(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2],
- DES_KEY_SIZE)) &&
- (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
- tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
- return -EINVAL;
- }
-
- /* in fips mode, ensure k1 != k2 and k2 != k3 and k1 != k3 */
- if (fips_enabled &&
- !(crypto_memneq(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
- crypto_memneq(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2],
- DES_KEY_SIZE) &&
- crypto_memneq(key, &key[DES_KEY_SIZE * 2], DES_KEY_SIZE))) {
- tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
- return -EINVAL;
- }
+ err = __des3_verify_key(&tfm->crt_flags, key);
+ if (unlikely(err))
+ return err;
memcpy(ctx->key, key, key_len);
return 0;
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index a97a1802cfb4..12cca467af7d 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -61,6 +61,7 @@ static unsigned int prng_reseed_limit;
module_param_named(reseed_limit, prng_reseed_limit, int, 0);
MODULE_PARM_DESC(prng_reseed_limit, "PRNG reseed limit");
+static bool trng_available;
/*
* Any one who considers arithmetical methods of producing random digits is,
@@ -115,46 +116,68 @@ static const u8 initial_parm_block[32] __initconst = {
/*
* generate_entropy:
- * This algorithm produces 64 bytes of entropy data based on 1024
- * individual stckf() invocations assuming that each stckf() value
- * contributes 0.25 bits of entropy. So the caller gets 256 bit
- * entropy per 64 byte or 4 bits entropy per byte.
+ * This function fills a given buffer with random bytes. The entropy within
+ * the random bytes given back is assumed to have at least 50% - meaning
+ * a 64 bytes buffer has at least 64 * 8 / 2 = 256 bits of entropy.
+ * Within the function the entropy generation is done in junks of 64 bytes.
+ * So the caller should also ask for buffer fill in multiples of 64 bytes.
+ * The generation of the entropy is based on the assumption that every stckf()
+ * invocation produces 0.5 bits of entropy. To accumulate 256 bits of entropy
+ * at least 512 stckf() values are needed. The entropy relevant part of the
+ * stckf value is bit 51 (counting starts at the left with bit nr 0) so
+ * here we use the lower 4 bytes and exor the values into 2k of bufferspace.
+ * To be on the save side, if there is ever a problem with stckf() the
+ * other half of the page buffer is filled with bytes from urandom via
+ * get_random_bytes(), so this function consumes 2k of urandom for each
+ * requested 64 bytes output data. Finally the buffer page is condensed into
+ * a 64 byte value by hashing with a SHA512 hash.
*/
static int generate_entropy(u8 *ebuf, size_t nbytes)
{
int n, ret = 0;
- u8 *pg, *h, hash[64];
-
- /* allocate 2 pages */
- pg = (u8 *) __get_free_pages(GFP_KERNEL, 1);
+ u8 *pg, pblock[80] = {
+ /* 8 x 64 bit init values */
+ 0x6A, 0x09, 0xE6, 0x67, 0xF3, 0xBC, 0xC9, 0x08,
+ 0xBB, 0x67, 0xAE, 0x85, 0x84, 0xCA, 0xA7, 0x3B,
+ 0x3C, 0x6E, 0xF3, 0x72, 0xFE, 0x94, 0xF8, 0x2B,
+ 0xA5, 0x4F, 0xF5, 0x3A, 0x5F, 0x1D, 0x36, 0xF1,
+ 0x51, 0x0E, 0x52, 0x7F, 0xAD, 0xE6, 0x82, 0xD1,
+ 0x9B, 0x05, 0x68, 0x8C, 0x2B, 0x3E, 0x6C, 0x1F,
+ 0x1F, 0x83, 0xD9, 0xAB, 0xFB, 0x41, 0xBD, 0x6B,
+ 0x5B, 0xE0, 0xCD, 0x19, 0x13, 0x7E, 0x21, 0x79,
+ /* 128 bit counter total message bit length */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00 };
+
+ /* allocate one page stckf buffer */
+ pg = (u8 *) __get_free_page(GFP_KERNEL);
if (!pg) {
prng_errorflag = PRNG_GEN_ENTROPY_FAILED;
return -ENOMEM;
}
+ /* fill the ebuf in chunks of 64 byte each */
while (nbytes) {
- /* fill pages with urandom bytes */
- get_random_bytes(pg, 2*PAGE_SIZE);
- /* exor pages with 1024 stckf values */
- for (n = 0; n < 2 * PAGE_SIZE / sizeof(u64); n++) {
- u64 *p = ((u64 *)pg) + n;
+ /* fill lower 2k with urandom bytes */
+ get_random_bytes(pg, PAGE_SIZE / 2);
+ /* exor upper 2k with 512 stckf values, offset 4 bytes each */
+ for (n = 0; n < 512; n++) {
+ int offset = (PAGE_SIZE / 2) + (n * 4) - 4;
+ u64 *p = (u64 *)(pg + offset);
*p ^= get_tod_clock_fast();
}
- n = (nbytes < sizeof(hash)) ? nbytes : sizeof(hash);
- if (n < sizeof(hash))
- h = hash;
- else
- h = ebuf;
- /* hash over the filled pages */
- cpacf_kimd(CPACF_KIMD_SHA_512, h, pg, 2*PAGE_SIZE);
- if (n < sizeof(hash))
- memcpy(ebuf, hash, n);
+ /* hash over the filled page */
+ cpacf_klmd(CPACF_KLMD_SHA_512, pblock, pg, PAGE_SIZE);
+ n = (nbytes < 64) ? nbytes : 64;
+ memcpy(ebuf, pblock, n);
ret += n;
ebuf += n;
nbytes -= n;
}
- free_pages((unsigned long)pg, 1);
+ memzero_explicit(pblock, sizeof(pblock));
+ memzero_explicit(pg, PAGE_SIZE);
+ free_page((unsigned long)pg);
return ret;
}
@@ -344,8 +367,8 @@ static int __init prng_sha512_selftest(void)
static int __init prng_sha512_instantiate(void)
{
- int ret, datalen;
- u8 seed[64 + 32 + 16];
+ int ret, datalen, seedlen;
+ u8 seed[128 + 16];
pr_debug("prng runs in SHA-512 mode "
"with chunksize=%d and reseed_limit=%u\n",
@@ -368,16 +391,36 @@ static int __init prng_sha512_instantiate(void)
if (ret)
goto outfree;
- /* generate initial seed bytestring, with 256 + 128 bits entropy */
- ret = generate_entropy(seed, 64 + 32);
- if (ret != 64 + 32)
- goto outfree;
- /* followed by 16 bytes of unique nonce */
- get_tod_clock_ext(seed + 64 + 32);
+ /* generate initial seed, we need at least 256 + 128 bits entropy. */
+ if (trng_available) {
+ /*
+ * Trng available, so use it. The trng works in chunks of
+ * 32 bytes and produces 100% entropy. So we pull 64 bytes
+ * which gives us 512 bits entropy.
+ */
+ seedlen = 2 * 32;
+ cpacf_trng(NULL, 0, seed, seedlen);
+ } else {
+ /*
+ * No trng available, so use the generate_entropy() function.
+ * This function works in 64 byte junks and produces
+ * 50% entropy. So we pull 2*64 bytes which gives us 512 bits
+ * of entropy.
+ */
+ seedlen = 2 * 64;
+ ret = generate_entropy(seed, seedlen);
+ if (ret != seedlen)
+ goto outfree;
+ }
+
+ /* append the seed by 16 bytes of unique nonce */
+ get_tod_clock_ext(seed + seedlen);
+ seedlen += 16;
- /* initial seed of the prno drng */
+ /* now initial seed of the prno drng */
cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED,
- &prng_data->prnows, NULL, 0, seed, sizeof(seed));
+ &prng_data->prnows, NULL, 0, seed, seedlen);
+ memzero_explicit(seed, sizeof(seed));
/* if fips mode is enabled, generate a first block of random
bytes for the FIPS 140-2 Conditional Self Test */
@@ -405,17 +448,26 @@ static void prng_sha512_deinstantiate(void)
static int prng_sha512_reseed(void)
{
- int ret;
+ int ret, seedlen;
u8 seed[64];
- /* fetch 256 bits of fresh entropy */
- ret = generate_entropy(seed, sizeof(seed));
- if (ret != sizeof(seed))
- return ret;
+ /* We need at least 256 bits of fresh entropy for reseeding */
+ if (trng_available) {
+ /* trng produces 256 bits entropy in 32 bytes */
+ seedlen = 32;
+ cpacf_trng(NULL, 0, seed, seedlen);
+ } else {
+ /* generate_entropy() produces 256 bits entropy in 64 bytes */
+ seedlen = 64;
+ ret = generate_entropy(seed, seedlen);
+ if (ret != sizeof(seed))
+ return ret;
+ }
/* do a reseed of the prno drng with this bytestring */
cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED,
- &prng_data->prnows, NULL, 0, seed, sizeof(seed));
+ &prng_data->prnows, NULL, 0, seed, seedlen);
+ memzero_explicit(seed, sizeof(seed));
return 0;
}
@@ -592,6 +644,7 @@ static ssize_t prng_sha512_read(struct file *file, char __user *ubuf,
ret = -EFAULT;
break;
}
+ memzero_explicit(p, n);
ubuf += n;
nbytes -= n;
ret += n;
@@ -773,6 +826,10 @@ static int __init prng_init(void)
if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG))
return -EOPNOTSUPP;
+ /* check if TRNG subfunction is available */
+ if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
+ trng_available = true;
+
/* choose prng mode */
if (prng_mode != PRNG_MODE_TDES) {
/* check for MSA5 support for PRNO operations */
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 4d58a92b5d97..c59b922cb6c5 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -39,6 +39,7 @@ CONFIG_NR_CPUS=256
CONFIG_NUMA=y
CONFIG_HZ_100=y
CONFIG_KEXEC_FILE=y
+CONFIG_KEXEC_VERIFY_SIG=y
CONFIG_CRASH_DUMP=y
CONFIG_HIBERNATION=y
CONFIG_PM_DEBUG=y
diff --git a/arch/s390/hypfs/hypfs_diag0c.c b/arch/s390/hypfs/hypfs_diag0c.c
index 72e3140fafb5..3235e4d82f2d 100644
--- a/arch/s390/hypfs/hypfs_diag0c.c
+++ b/arch/s390/hypfs/hypfs_diag0c.c
@@ -16,26 +16,12 @@
#define DBFS_D0C_HDR_VERSION 0
/*
- * Execute diagnose 0c in 31 bit mode
- */
-static void diag0c(struct hypfs_diag0c_entry *entry)
-{
- diag_stat_inc(DIAG_STAT_X00C);
- asm volatile (
- " sam31\n"
- " diag %0,%0,0x0c\n"
- " sam64\n"
- : /* no output register */
- : "a" (entry)
- : "memory");
-}
-
-/*
* Get hypfs_diag0c_entry from CPU vector and store diag0c data
*/
static void diag0c_fn(void *data)
{
- diag0c(((void **) data)[smp_processor_id()]);
+ diag_stat_inc(DIAG_STAT_X00C);
+ diag_dma_ops.diag0c(((void **) data)[smp_processor_id()]);
}
/*
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index e3239772887a..2531f673f099 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -20,8 +20,7 @@ generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
-generic-y += preempt.h
-generic-y += rwsem.h
+generic-y += mmiowb.h
generic-y += trace_clock.h
generic-y += unaligned.h
generic-y += word-at-a-time.h
diff --git a/arch/s390/include/asm/airq.h b/arch/s390/include/asm/airq.h
index fcf539efb32f..c10d2ee2dfda 100644
--- a/arch/s390/include/asm/airq.h
+++ b/arch/s390/include/asm/airq.h
@@ -14,7 +14,7 @@
struct airq_struct {
struct hlist_node list; /* Handler queueing. */
- void (*handler)(struct airq_struct *); /* Thin-interrupt handler */
+ void (*handler)(struct airq_struct *airq, bool floating);
u8 *lsi_ptr; /* Local-Summary-Indicator pointer */
u8 lsi_mask; /* Local-Summary-Indicator mask */
u8 isc; /* Interrupt-subclass */
@@ -35,13 +35,15 @@ struct airq_iv {
unsigned int *data; /* 32 bit value associated with each bit */
unsigned long bits; /* Number of bits in the vector */
unsigned long end; /* Number of highest allocated bit + 1 */
+ unsigned long flags; /* Allocation flags */
spinlock_t lock; /* Lock to protect alloc & free */
};
-#define AIRQ_IV_ALLOC 1 /* Use an allocation bit mask */
-#define AIRQ_IV_BITLOCK 2 /* Allocate the lock bit mask */
-#define AIRQ_IV_PTR 4 /* Allocate the ptr array */
-#define AIRQ_IV_DATA 8 /* Allocate the data array */
+#define AIRQ_IV_ALLOC 1 /* Use an allocation bit mask */
+#define AIRQ_IV_BITLOCK 2 /* Allocate the lock bit mask */
+#define AIRQ_IV_PTR 4 /* Allocate the ptr array */
+#define AIRQ_IV_DATA 8 /* Allocate the data array */
+#define AIRQ_IV_CACHELINE 16 /* Cacheline alignment for the vector */
struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags);
void airq_iv_release(struct airq_iv *iv);
diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h
index 1a6a7092d942..e94a0a28b5eb 100644
--- a/arch/s390/include/asm/ap.h
+++ b/arch/s390/include/asm/ap.h
@@ -360,4 +360,15 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
return reg1;
}
+/*
+ * Interface to tell the AP bus code that a configuration
+ * change has happened. The bus code should at least do
+ * an ap bus resource rescan.
+ */
+#if IS_ENABLED(CONFIG_ZCRYPT)
+void ap_bus_cfg_chg(void);
+#else
+static inline void ap_bus_cfg_chg(void){};
+#endif
+
#endif /* _ASM_S390_AP_H_ */
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index d1f8a4d94cca..9900d655014c 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -73,7 +73,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
}
#endif
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- __atomic64_or(mask, addr);
+ __atomic64_or(mask, (long *)addr);
}
static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
@@ -94,7 +94,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
}
#endif
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
- __atomic64_and(mask, addr);
+ __atomic64_and(mask, (long *)addr);
}
static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
@@ -115,7 +115,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
}
#endif
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- __atomic64_xor(mask, addr);
+ __atomic64_xor(mask, (long *)addr);
}
static inline int
@@ -125,7 +125,7 @@ test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
unsigned long old, mask;
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- old = __atomic64_or_barrier(mask, addr);
+ old = __atomic64_or_barrier(mask, (long *)addr);
return (old & mask) != 0;
}
@@ -136,7 +136,7 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
unsigned long old, mask;
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
- old = __atomic64_and_barrier(mask, addr);
+ old = __atomic64_and_barrier(mask, (long *)addr);
return (old & ~mask) != 0;
}
@@ -147,7 +147,7 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
unsigned long old, mask;
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- old = __atomic64_xor_barrier(mask, addr);
+ old = __atomic64_xor_barrier(mask, (long *)addr);
return (old & mask) != 0;
}
diff --git a/arch/s390/include/asm/boot_data.h b/arch/s390/include/asm/boot_data.h
index 2d999ccb977a..f7eed27b3220 100644
--- a/arch/s390/include/asm/boot_data.h
+++ b/arch/s390/include/asm/boot_data.h
@@ -5,7 +5,14 @@
#include <asm/ipl.h>
extern char early_command_line[COMMAND_LINE_SIZE];
-extern struct ipl_parameter_block early_ipl_block;
-extern int early_ipl_block_valid;
+extern struct ipl_parameter_block ipl_block;
+extern int ipl_block_valid;
+extern int ipl_secure_flag;
+
+extern unsigned long ipl_cert_list_addr;
+extern unsigned long ipl_cert_list_size;
+
+extern unsigned long early_ipl_comp_list_addr;
+extern unsigned long early_ipl_comp_list_size;
#endif /* _ASM_S390_BOOT_DATA_H */
diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h
index 429f43a8a8e8..713fc9735ffb 100644
--- a/arch/s390/include/asm/bug.h
+++ b/arch/s390/include/asm/bug.h
@@ -15,7 +15,7 @@
".section .rodata.str,\"aMS\",@progbits,1\n" \
"2: .asciz \""__FILE__"\"\n" \
".previous\n" \
- ".section __bug_table,\"aw\"\n" \
+ ".section __bug_table,\"awM\",@progbits,%2\n" \
"3: .long 1b-3b,2b-3b\n" \
" .short %0,%1\n" \
" .org 3b+%2\n" \
@@ -27,17 +27,17 @@
#else /* CONFIG_DEBUG_BUGVERBOSE */
-#define __EMIT_BUG(x) do { \
- asm volatile( \
- "0: j 0b+2\n" \
- "1:\n" \
- ".section __bug_table,\"aw\"\n" \
- "2: .long 1b-2b\n" \
- " .short %0\n" \
- " .org 2b+%1\n" \
- ".previous\n" \
- : : "i" (x), \
- "i" (sizeof(struct bug_entry))); \
+#define __EMIT_BUG(x) do { \
+ asm volatile( \
+ "0: j 0b+2\n" \
+ "1:\n" \
+ ".section __bug_table,\"awM\",@progbits,%1\n" \
+ "2: .long 1b-2b\n" \
+ " .short %0\n" \
+ " .org 2b+%1\n" \
+ ".previous\n" \
+ : : "i" (x), \
+ "i" (sizeof(struct bug_entry))); \
} while (0)
#endif /* CONFIG_DEBUG_BUGVERBOSE */
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index 225667652069..1727180e8ca1 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -331,5 +331,6 @@ extern void css_schedule_reprobe(void);
/* Function from drivers/s390/cio/chsc.c */
int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta);
int chsc_sstpi(void *page, void *result, size_t size);
+int chsc_sgib(u32 origin);
#endif
diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h
index 19562be22b7e..0036eab14391 100644
--- a/arch/s390/include/asm/diag.h
+++ b/arch/s390/include/asm/diag.h
@@ -308,4 +308,17 @@ union diag318_info {
int diag204(unsigned long subcode, unsigned long size, void *addr);
int diag224(void *ptr);
int diag26c(void *req, void *resp, enum diag26c_sc subcode);
+
+struct hypfs_diag0c_entry;
+
+struct diag_ops {
+ int (*diag210)(struct diag210 *addr);
+ int (*diag26c)(void *req, void *resp, enum diag26c_sc subcode);
+ int (*diag14)(unsigned long rx, unsigned long ry1, unsigned long subcode);
+ void (*diag0c)(struct hypfs_diag0c_entry *entry);
+ void (*diag308_reset)(void);
+};
+
+extern struct diag_ops diag_dma_ops;
+extern struct diag210 *__diag210_tmp_dma;
#endif /* _ASM_S390_DIAG_H */
diff --git a/arch/s390/include/asm/ebcdic.h b/arch/s390/include/asm/ebcdic.h
index 29441beb92e6..efb50fc6866c 100644
--- a/arch/s390/include/asm/ebcdic.h
+++ b/arch/s390/include/asm/ebcdic.h
@@ -20,7 +20,7 @@ extern __u8 _ebc_tolower[256]; /* EBCDIC -> lowercase */
extern __u8 _ebc_toupper[256]; /* EBCDIC -> uppercase */
static inline void
-codepage_convert(const __u8 *codepage, volatile __u8 * addr, unsigned long nr)
+codepage_convert(const __u8 *codepage, volatile char *addr, unsigned long nr)
{
if (nr-- <= 0)
return;
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 7d22a474a040..5775fc22f410 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -107,6 +107,10 @@
#define HWCAP_S390_VXRS_BCD 4096
#define HWCAP_S390_VXRS_EXT 8192
#define HWCAP_S390_GS 16384
+#define HWCAP_S390_VXRS_EXT2 32768
+#define HWCAP_S390_VXRS_PDE 65536
+#define HWCAP_S390_SORT 131072
+#define HWCAP_S390_DFLT 262144
/* Internal bits, not exposed via elf */
#define HWCAP_INT_SIE 1UL
@@ -252,11 +256,14 @@ do { \
/*
* Cache aliasing on the latest machines calls for a mapping granularity
- * of 512KB. For 64-bit processes use a 512KB alignment and a randomization
- * of up to 1GB. For 31-bit processes the virtual address space is limited,
- * use no alignment and limit the randomization to 8MB.
+ * of 512KB for the anonymous mapping base. For 64-bit processes use a
+ * 512KB alignment and a randomization of up to 1GB. For 31-bit processes
+ * the virtual address space is limited, use no alignment and limit the
+ * randomization to 8MB.
+ * For the additional randomization of the program break use 32MB for
+ * 64-bit and 8MB for 31-bit.
*/
-#define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ffffUL)
+#define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x1fffUL)
#define MMAP_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ff80UL)
#define MMAP_ALIGN_MASK (is_compat_task() ? 0 : 0x7fUL)
#define STACK_RND_MASK MMAP_RND_MASK
diff --git a/arch/s390/include/asm/extable.h b/arch/s390/include/asm/extable.h
index 80a4e5a9cb46..ae27f756b409 100644
--- a/arch/s390/include/asm/extable.h
+++ b/arch/s390/include/asm/extable.h
@@ -19,6 +19,11 @@ struct exception_table_entry
int insn, fixup;
};
+extern struct exception_table_entry *__start_dma_ex_table;
+extern struct exception_table_entry *__stop_dma_ex_table;
+
+const struct exception_table_entry *s390_search_extables(unsigned long addr);
+
static inline unsigned long extable_fixup(const struct exception_table_entry *x)
{
return (unsigned long)&x->fixup + x->fixup;
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 5a3c95b11952..68d362f8d6c1 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -11,9 +11,16 @@
#define MCOUNT_RETURN_FIXUP 18
#endif
+#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
+
#ifndef __ASSEMBLY__
+#ifdef CONFIG_CC_IS_CLANG
+/* https://bugs.llvm.org/show_bug.cgi?id=41424 */
+#define ftrace_return_address(n) 0UL
+#else
#define ftrace_return_address(n) __builtin_return_address(n)
+#endif
void _mcount(void);
void ftrace_caller(void);
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index f34d729347e4..ca421614722f 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -30,14 +30,8 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
#define ioremap_wc ioremap_nocache
#define ioremap_wt ioremap_nocache
-static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
-{
- return (void __iomem *) offset;
-}
-
-static inline void iounmap(volatile void __iomem *addr)
-{
-}
+void __iomem *ioremap(unsigned long offset, unsigned long size);
+void iounmap(volatile void __iomem *addr);
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
@@ -57,14 +51,17 @@ static inline void ioport_unmap(void __iomem *p)
* the corresponding device and create the mapping cookie.
*/
#define pci_iomap pci_iomap
+#define pci_iomap_range pci_iomap_range
#define pci_iounmap pci_iounmap
-#define pci_iomap_wc pci_iomap
-#define pci_iomap_wc_range pci_iomap_range
+#define pci_iomap_wc pci_iomap_wc
+#define pci_iomap_wc_range pci_iomap_wc_range
#define memcpy_fromio(dst, src, count) zpci_memcpy_fromio(dst, src, count)
#define memcpy_toio(dst, src, count) zpci_memcpy_toio(dst, src, count)
#define memset_io(dst, val, count) zpci_memset_io(dst, val, count)
+#define mmiowb() zpci_barrier()
+
#define __raw_readb zpci_read_u8
#define __raw_readw zpci_read_u16
#define __raw_readl zpci_read_u32
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index a8389e2d2f03..084e71b7272a 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -12,74 +12,36 @@
#include <asm/types.h>
#include <asm/cio.h>
#include <asm/setup.h>
+#include <uapi/asm/ipl.h>
-#define NSS_NAME_SIZE 8
-
-#define IPL_PARM_BLK_FCP_LEN (sizeof(struct ipl_list_hdr) + \
- sizeof(struct ipl_block_fcp))
-
-#define IPL_PARM_BLK0_FCP_LEN (sizeof(struct ipl_block_fcp) + 16)
+struct ipl_parameter_block {
+ struct ipl_pl_hdr hdr;
+ union {
+ struct ipl_pb_hdr pb0_hdr;
+ struct ipl_pb0_common common;
+ struct ipl_pb0_fcp fcp;
+ struct ipl_pb0_ccw ccw;
+ char raw[PAGE_SIZE - sizeof(struct ipl_pl_hdr)];
+ };
+} __packed __aligned(PAGE_SIZE);
-#define IPL_PARM_BLK_CCW_LEN (sizeof(struct ipl_list_hdr) + \
- sizeof(struct ipl_block_ccw))
+#define NSS_NAME_SIZE 8
-#define IPL_PARM_BLK0_CCW_LEN (sizeof(struct ipl_block_ccw) + 16)
+#define IPL_BP_FCP_LEN (sizeof(struct ipl_pl_hdr) + \
+ sizeof(struct ipl_pb0_fcp))
+#define IPL_BP0_FCP_LEN (sizeof(struct ipl_pb0_fcp))
+#define IPL_BP_CCW_LEN (sizeof(struct ipl_pl_hdr) + \
+ sizeof(struct ipl_pb0_ccw))
+#define IPL_BP0_CCW_LEN (sizeof(struct ipl_pb0_ccw))
#define IPL_MAX_SUPPORTED_VERSION (0)
-struct ipl_list_hdr {
- u32 len;
- u8 reserved1[3];
- u8 version;
- u32 blk0_len;
- u8 pbt;
- u8 flags;
- u16 reserved2;
- u8 loadparm[8];
-} __attribute__((packed));
-
-struct ipl_block_fcp {
- u8 reserved1[305-1];
- u8 opt;
- u8 reserved2[3];
- u16 reserved3;
- u16 devno;
- u8 reserved4[4];
- u64 wwpn;
- u64 lun;
- u32 bootprog;
- u8 reserved5[12];
- u64 br_lba;
- u32 scp_data_len;
- u8 reserved6[260];
- u8 scp_data[];
-} __attribute__((packed));
-
-#define DIAG308_VMPARM_SIZE 64
-#define DIAG308_SCPDATA_SIZE (PAGE_SIZE - (sizeof(struct ipl_list_hdr) + \
- offsetof(struct ipl_block_fcp, scp_data)))
-
-struct ipl_block_ccw {
- u8 reserved1[84];
- u16 reserved2 : 13;
- u8 ssid : 3;
- u16 devno;
- u8 vm_flags;
- u8 reserved3[3];
- u32 vm_parm_len;
- u8 nss_name[8];
- u8 vm_parm[DIAG308_VMPARM_SIZE];
- u8 reserved4[8];
-} __attribute__((packed));
+#define IPL_RB_CERT_UNKNOWN ((unsigned short)-1)
-struct ipl_parameter_block {
- struct ipl_list_hdr hdr;
- union {
- struct ipl_block_fcp fcp;
- struct ipl_block_ccw ccw;
- char raw[PAGE_SIZE - sizeof(struct ipl_list_hdr)];
- } ipl_info;
-} __packed __aligned(PAGE_SIZE);
+#define DIAG308_VMPARM_SIZE (64)
+#define DIAG308_SCPDATA_OFFSET offsetof(struct ipl_parameter_block, \
+ fcp.scp_data)
+#define DIAG308_SCPDATA_SIZE (PAGE_SIZE - DIAG308_SCPDATA_OFFSET)
struct save_area;
struct save_area * __init save_area_alloc(bool is_boot_cpu);
@@ -88,7 +50,6 @@ void __init save_area_add_regs(struct save_area *, void *regs);
void __init save_area_add_vxrs(struct save_area *, __vector128 *vxrs);
extern void s390_reset_system(void);
-extern void ipl_store_parameters(void);
extern size_t ipl_block_get_ascii_vmparm(char *dest, size_t size,
const struct ipl_parameter_block *ipb);
@@ -122,6 +83,33 @@ extern struct ipl_info ipl_info;
extern void setup_ipl(void);
extern void set_os_info_reipl_block(void);
+struct ipl_report {
+ struct ipl_parameter_block *ipib;
+ struct list_head components;
+ struct list_head certificates;
+ size_t size;
+};
+
+struct ipl_report_component {
+ struct list_head list;
+ struct ipl_rb_component_entry entry;
+};
+
+struct ipl_report_certificate {
+ struct list_head list;
+ struct ipl_rb_certificate_entry entry;
+ void *key;
+};
+
+struct kexec_buf;
+struct ipl_report *ipl_report_init(struct ipl_parameter_block *ipib);
+void *ipl_report_finish(struct ipl_report *report);
+int ipl_report_free(struct ipl_report *report);
+int ipl_report_add_component(struct ipl_report *report, struct kexec_buf *kbuf,
+ unsigned char flags, unsigned short cert);
+int ipl_report_add_certificate(struct ipl_report *report, void *key,
+ unsigned long addr, unsigned long len);
+
/*
* DIAG 308 support
*/
@@ -133,32 +121,12 @@ enum diag308_subcode {
DIAG308_STORE = 6,
};
-enum diag308_ipl_type {
- DIAG308_IPL_TYPE_FCP = 0,
- DIAG308_IPL_TYPE_CCW = 2,
-};
-
-enum diag308_opt {
- DIAG308_IPL_OPT_IPL = 0x10,
- DIAG308_IPL_OPT_DUMP = 0x20,
-};
-
-enum diag308_flags {
- DIAG308_FLAGS_LP_VALID = 0x80,
-};
-
-enum diag308_vm_flags {
- DIAG308_VM_FLAGS_NSS_VALID = 0x80,
- DIAG308_VM_FLAGS_VP_VALID = 0x40,
-};
-
enum diag308_rc {
DIAG308_RC_OK = 0x0001,
DIAG308_RC_NOCONFIG = 0x0102,
};
extern int diag308(unsigned long subcode, void *addr);
-extern void diag308_reset(void);
extern void store_status(void (*fn)(void *), void *data);
extern void lgr_info_log(void);
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index 2f7f27e5493f..9f75d67b8c20 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -47,7 +47,6 @@ enum interruption_class {
IRQEXT_CMC,
IRQEXT_FTP,
IRQIO_CIO,
- IRQIO_QAI,
IRQIO_DAS,
IRQIO_C15,
IRQIO_C70,
@@ -55,13 +54,16 @@ enum interruption_class {
IRQIO_VMR,
IRQIO_LCS,
IRQIO_CTC,
- IRQIO_APB,
IRQIO_ADM,
IRQIO_CSC,
- IRQIO_PCI,
- IRQIO_MSI,
IRQIO_VIR,
+ IRQIO_QAI,
+ IRQIO_APB,
+ IRQIO_PCF,
+ IRQIO_PCD,
+ IRQIO_MSI,
IRQIO_VAI,
+ IRQIO_GAL,
NMI_NMI,
CPU_RST,
NR_ARCH_IRQS
diff --git a/arch/s390/include/asm/isc.h b/arch/s390/include/asm/isc.h
index 6cb9e2ed05b6..b2cc1ec78d06 100644
--- a/arch/s390/include/asm/isc.h
+++ b/arch/s390/include/asm/isc.h
@@ -21,6 +21,7 @@
/* Adapter interrupts. */
#define QDIO_AIRQ_ISC IO_SCH_ISC /* I/O subchannel in qdio mode */
#define PCI_ISC 2 /* PCI I/O subchannels */
+#define GAL_ISC 5 /* GIB alert */
#define AP_ISC 6 /* adjunct processor (crypto) devices */
/* Functions for registration of I/O interruption subclasses */
diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h
index 825dd0f7f221..ea398a05f643 100644
--- a/arch/s390/include/asm/kexec.h
+++ b/arch/s390/include/asm/kexec.h
@@ -11,6 +11,7 @@
#include <asm/processor.h>
#include <asm/page.h>
+#include <asm/setup.h>
/*
* KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
* I.e. Maximum page that is mapped directly into kernel memory,
@@ -42,6 +43,9 @@
/* The native architecture */
#define KEXEC_ARCH KEXEC_ARCH_S390
+/* Allow kexec_file to load a segment to 0 */
+#define KEXEC_BUF_MEM_UNKNOWN -1
+
/* Provide a dummy definition to avoid build failures. */
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs) { }
@@ -51,20 +55,24 @@ struct s390_load_data {
/* Pointer to the kernel buffer. Used to register cmdline etc.. */
void *kernel_buf;
+ /* Load address of the kernel_buf. */
+ unsigned long kernel_mem;
+
+ /* Parmarea in the kernel buffer. */
+ struct parmarea *parm;
+
/* Total size of loaded segments in memory. Used as an offset. */
size_t memsz;
- /* Load address of initrd. Used to register INITRD_START in kernel. */
- unsigned long initrd_load_addr;
+ struct ipl_report *report;
};
-int kexec_file_add_purgatory(struct kimage *image,
- struct s390_load_data *data);
-int kexec_file_add_initrd(struct kimage *image,
- struct s390_load_data *data,
- char *initrd, unsigned long initrd_len);
-int *kexec_file_update_kernel(struct kimage *iamge,
- struct s390_load_data *data);
+int s390_verify_sig(const char *kernel, unsigned long kernel_len);
+void *kexec_file_add_components(struct kimage *image,
+ int (*add_kernel)(struct kimage *image,
+ struct s390_load_data *data));
+int arch_kexec_do_relocs(int r_type, void *loc, unsigned long val,
+ unsigned long addr);
extern const struct kexec_file_ops s390_kexec_image_ops;
extern const struct kexec_file_ops s390_kexec_elf_ops;
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index d5d24889c3bc..c47e22bba87f 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -591,7 +591,6 @@ struct kvm_s390_float_interrupt {
struct kvm_s390_mchk_info mchk;
struct kvm_s390_ext_info srv_signal;
int next_rr_cpu;
- unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
struct mutex ais_lock;
u8 simm;
u8 nimm;
@@ -712,6 +711,7 @@ struct s390_io_adapter {
struct kvm_s390_cpu_model {
/* facility mask supported by kvm & hosting machine */
__u64 fac_mask[S390_ARCH_FAC_LIST_SIZE_U64];
+ struct kvm_s390_vm_cpu_subfunc subfuncs;
/* facility list requested by guest (in dma page) */
__u64 *fac_list;
u64 cpuid;
@@ -782,9 +782,21 @@ struct kvm_s390_gisa {
u8 reserved03[11];
u32 airq_count;
} g1;
+ struct {
+ u64 word[4];
+ } u64;
};
};
+struct kvm_s390_gib {
+ u32 alert_list_origin;
+ u32 reserved01;
+ u8:5;
+ u8 nisc:3;
+ u8 reserved03[3];
+ u32 reserved04[5];
+};
+
/*
* sie_page2 has to be allocated as DMA because fac_list, crycb and
* gisa need 31bit addresses in the sie control block.
@@ -793,7 +805,8 @@ struct sie_page2 {
__u64 fac_list[S390_ARCH_FAC_LIST_SIZE_U64]; /* 0x0000 */
struct kvm_s390_crypto_cb crycb; /* 0x0800 */
struct kvm_s390_gisa gisa; /* 0x0900 */
- u8 reserved920[0x1000 - 0x920]; /* 0x0920 */
+ struct kvm *kvm; /* 0x0920 */
+ u8 reserved928[0x1000 - 0x928]; /* 0x0928 */
};
struct kvm_s390_vsie {
@@ -804,6 +817,20 @@ struct kvm_s390_vsie {
struct page *pages[KVM_MAX_VCPUS];
};
+struct kvm_s390_gisa_iam {
+ u8 mask;
+ spinlock_t ref_lock;
+ u32 ref_count[MAX_ISC + 1];
+};
+
+struct kvm_s390_gisa_interrupt {
+ struct kvm_s390_gisa *origin;
+ struct kvm_s390_gisa_iam alert;
+ struct hrtimer timer;
+ u64 expires;
+ DECLARE_BITMAP(kicked_mask, KVM_MAX_VCPUS);
+};
+
struct kvm_arch{
void *sca;
int use_esca;
@@ -837,7 +864,8 @@ struct kvm_arch{
atomic64_t cmma_dirty_pages;
/* subset of available cpu features enabled by user space */
DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
- struct kvm_s390_gisa *gisa;
+ DECLARE_BITMAP(idle_mask, KVM_MAX_VCPUS);
+ struct kvm_s390_gisa_interrupt gisa_int;
};
#define KVM_HVA_ERR_BAD (-1UL)
@@ -871,6 +899,9 @@ void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
extern int sie64a(struct kvm_s390_sie_block *, u64 *);
extern char sie_exit;
+extern int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc);
+extern int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc);
+
static inline void kvm_arch_hardware_disable(void) {}
static inline void kvm_arch_check_processor_compat(void *rtn) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
@@ -878,7 +909,7 @@ static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_free_memslot(struct kvm *kvm,
struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
-static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
+static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot) {}
diff --git a/arch/s390/include/asm/linkage.h b/arch/s390/include/asm/linkage.h
index 1b95da3fdd64..7f22262b0e46 100644
--- a/arch/s390/include/asm/linkage.h
+++ b/arch/s390/include/asm/linkage.h
@@ -28,5 +28,12 @@
.long (_target) - . ; \
.previous
+#define EX_TABLE_DMA(_fault, _target) \
+ .section .dma.ex_table, "a" ; \
+ .align 4 ; \
+ .long (_fault) - . ; \
+ .long (_target) - . ; \
+ .previous
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index cc0947e08b6f..237ee0c4169f 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -91,52 +91,53 @@ struct lowcore {
__u64 hardirq_timer; /* 0x02e8 */
__u64 softirq_timer; /* 0x02f0 */
__u64 steal_timer; /* 0x02f8 */
- __u64 last_update_timer; /* 0x0300 */
- __u64 last_update_clock; /* 0x0308 */
- __u64 int_clock; /* 0x0310 */
- __u64 mcck_clock; /* 0x0318 */
- __u64 clock_comparator; /* 0x0320 */
- __u64 boot_clock[2]; /* 0x0328 */
+ __u64 avg_steal_timer; /* 0x0300 */
+ __u64 last_update_timer; /* 0x0308 */
+ __u64 last_update_clock; /* 0x0310 */
+ __u64 int_clock; /* 0x0318*/
+ __u64 mcck_clock; /* 0x0320 */
+ __u64 clock_comparator; /* 0x0328 */
+ __u64 boot_clock[2]; /* 0x0330 */
/* Current process. */
- __u64 current_task; /* 0x0338 */
- __u64 kernel_stack; /* 0x0340 */
+ __u64 current_task; /* 0x0340 */
+ __u64 kernel_stack; /* 0x0348 */
/* Interrupt, DAT-off and restartstack. */
- __u64 async_stack; /* 0x0348 */
- __u64 nodat_stack; /* 0x0350 */
- __u64 restart_stack; /* 0x0358 */
+ __u64 async_stack; /* 0x0350 */
+ __u64 nodat_stack; /* 0x0358 */
+ __u64 restart_stack; /* 0x0360 */
/* Restart function and parameter. */
- __u64 restart_fn; /* 0x0360 */
- __u64 restart_data; /* 0x0368 */
- __u64 restart_source; /* 0x0370 */
+ __u64 restart_fn; /* 0x0368 */
+ __u64 restart_data; /* 0x0370 */
+ __u64 restart_source; /* 0x0378 */
/* Address space pointer. */
- __u64 kernel_asce; /* 0x0378 */
- __u64 user_asce; /* 0x0380 */
- __u64 vdso_asce; /* 0x0388 */
+ __u64 kernel_asce; /* 0x0380 */
+ __u64 user_asce; /* 0x0388 */
+ __u64 vdso_asce; /* 0x0390 */
/*
* The lpp and current_pid fields form a
* 64-bit value that is set as program
* parameter with the LPP instruction.
*/
- __u32 lpp; /* 0x0390 */
- __u32 current_pid; /* 0x0394 */
+ __u32 lpp; /* 0x0398 */
+ __u32 current_pid; /* 0x039c */
/* SMP info area */
- __u32 cpu_nr; /* 0x0398 */
- __u32 softirq_pending; /* 0x039c */
- __u32 preempt_count; /* 0x03a0 */
- __u32 spinlock_lockval; /* 0x03a4 */
- __u32 spinlock_index; /* 0x03a8 */
- __u32 fpu_flags; /* 0x03ac */
- __u64 percpu_offset; /* 0x03b0 */
- __u64 vdso_per_cpu_data; /* 0x03b8 */
- __u64 machine_flags; /* 0x03c0 */
- __u64 gmap; /* 0x03c8 */
- __u8 pad_0x03d0[0x0400-0x03d0]; /* 0x03d0 */
+ __u32 cpu_nr; /* 0x03a0 */
+ __u32 softirq_pending; /* 0x03a4 */
+ __s32 preempt_count; /* 0x03a8 */
+ __u32 spinlock_lockval; /* 0x03ac */
+ __u32 spinlock_index; /* 0x03b0 */
+ __u32 fpu_flags; /* 0x03b4 */
+ __u64 percpu_offset; /* 0x03b8 */
+ __u64 vdso_per_cpu_data; /* 0x03c0 */
+ __u64 machine_flags; /* 0x03c8 */
+ __u64 gmap; /* 0x03d0 */
+ __u8 pad_0x03d8[0x0400-0x03d8]; /* 0x03d8 */
/* br %r1 trampoline */
__u16 br_r1_trampoline; /* 0x0400 */
diff --git a/arch/s390/include/asm/nospec-insn.h b/arch/s390/include/asm/nospec-insn.h
index 123dac3717b3..0033dcd663b1 100644
--- a/arch/s390/include/asm/nospec-insn.h
+++ b/arch/s390/include/asm/nospec-insn.h
@@ -32,23 +32,23 @@ _LC_BR_R1 = __LC_BR_R1
.endm
.macro __THUNK_PROLOG_BR r1,r2
- __THUNK_PROLOG_NAME __s390x_indirect_jump_r\r2\()use_r\r1
+ __THUNK_PROLOG_NAME __s390_indirect_jump_r\r2\()use_r\r1
.endm
.macro __THUNK_PROLOG_BC d0,r1,r2
- __THUNK_PROLOG_NAME __s390x_indirect_branch_\d0\()_\r2\()use_\r1
+ __THUNK_PROLOG_NAME __s390_indirect_branch_\d0\()_\r2\()use_\r1
.endm
.macro __THUNK_BR r1,r2
- jg __s390x_indirect_jump_r\r2\()use_r\r1
+ jg __s390_indirect_jump_r\r2\()use_r\r1
.endm
.macro __THUNK_BC d0,r1,r2
- jg __s390x_indirect_branch_\d0\()_\r2\()use_\r1
+ jg __s390_indirect_branch_\d0\()_\r2\()use_\r1
.endm
.macro __THUNK_BRASL r1,r2,r3
- brasl \r1,__s390x_indirect_jump_r\r3\()use_r\r2
+ brasl \r1,__s390_indirect_jump_r\r3\()use_r\r2
.endm
.macro __DECODE_RR expand,reg,ruse
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 4e0efebc56a9..305befd55326 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -26,6 +26,9 @@ int pci_proc_domain(struct pci_bus *);
#define ZPCI_BUS_NR 0 /* default bus number */
#define ZPCI_DEVFN 0 /* default device number */
+#define ZPCI_NR_DMA_SPACES 1
+#define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS
+
/* PCI Function Controls */
#define ZPCI_FC_FN_ENABLED 0x80
#define ZPCI_FC_ERROR 0x40
@@ -83,6 +86,8 @@ enum zpci_state {
struct zpci_bar_struct {
struct resource *res; /* bus resource */
+ void __iomem *mio_wb;
+ void __iomem *mio_wt;
u32 val; /* bar start & 3 flag bits */
u16 map_idx; /* index into bar mapping array */
u8 size; /* order 2 exponent */
@@ -112,6 +117,8 @@ struct zpci_dev {
/* IRQ stuff */
u64 msi_addr; /* MSI address */
unsigned int max_msi; /* maximum number of MSI's */
+ unsigned int msi_first_bit;
+ unsigned int msi_nr_irqs;
struct airq_iv *aibv; /* adapter interrupt bit vector */
unsigned long aisb; /* number of the summary bit */
@@ -130,6 +137,7 @@ struct zpci_dev {
struct iommu_device iommu_dev; /* IOMMU core handle */
char res_name[16];
+ bool mio_capable;
struct zpci_bar_struct bars[PCI_BAR_COUNT];
u64 start_dma; /* Start of available DMA addresses */
@@ -158,6 +166,7 @@ static inline bool zdev_enabled(struct zpci_dev *zdev)
}
extern const struct attribute_group *zpci_attr_groups[];
+extern unsigned int s390_pci_force_floating __initdata;
/* -----------------------------------------------------------------------------
Prototypes
@@ -219,6 +228,9 @@ struct zpci_dev *get_zdev_by_fid(u32);
int zpci_dma_init(void);
void zpci_dma_exit(void);
+int __init zpci_irq_init(void);
+void __init zpci_irq_exit(void);
+
/* FMB */
int zpci_fmb_enable_device(struct zpci_dev *);
int zpci_fmb_disable_device(struct zpci_dev *);
diff --git a/arch/s390/include/asm/pci_clp.h b/arch/s390/include/asm/pci_clp.h
index b3b31b31f0d3..3ec52a05d500 100644
--- a/arch/s390/include/asm/pci_clp.h
+++ b/arch/s390/include/asm/pci_clp.h
@@ -43,6 +43,8 @@ struct clp_fh_list_entry {
#define CLP_SET_ENABLE_PCI_FN 0 /* Yes, 0 enables it */
#define CLP_SET_DISABLE_PCI_FN 1 /* Yes, 1 disables it */
+#define CLP_SET_ENABLE_MIO 2
+#define CLP_SET_DISABLE_MIO 3
#define CLP_UTIL_STR_LEN 64
#define CLP_PFIP_NR_SEGMENTS 4
@@ -80,7 +82,8 @@ struct clp_req_query_pci {
struct clp_rsp_query_pci {
struct clp_rsp_hdr hdr;
u16 vfn; /* virtual fn number */
- u16 : 7;
+ u16 : 6;
+ u16 mio_addr_avail : 1;
u16 util_str_avail : 1; /* utility string available? */
u16 pfgid : 8; /* pci function group id */
u32 fid; /* pci function id */
@@ -96,6 +99,15 @@ struct clp_rsp_query_pci {
u32 reserved[11];
u32 uid; /* user defined id */
u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */
+ u32 reserved2[16];
+ u32 mio_valid : 6;
+ u32 : 26;
+ u32 : 32;
+ struct {
+ u64 wb;
+ u64 wt;
+ } addr[PCI_BAR_COUNT];
+ u32 reserved3[6];
} __packed;
/* Query PCI function group request */
@@ -118,7 +130,11 @@ struct clp_rsp_query_pci_grp {
u8 refresh : 1; /* TLB refresh mode */
u16 reserved2;
u16 mui;
- u64 reserved3;
+ u16 : 16;
+ u16 maxfaal;
+ u16 : 4;
+ u16 dnoi : 12;
+ u16 maxcpu;
u64 dasm; /* dma address space mask */
u64 msia; /* MSI address */
u64 reserved4;
diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h
index ba22a6ea51a1..ff81ed19c506 100644
--- a/arch/s390/include/asm/pci_insn.h
+++ b/arch/s390/include/asm/pci_insn.h
@@ -2,6 +2,8 @@
#ifndef _ASM_S390_PCI_INSN_H
#define _ASM_S390_PCI_INSN_H
+#include <linux/jump_label.h>
+
/* Load/Store status codes */
#define ZPCI_PCI_ST_FUNC_NOT_ENABLED 4
#define ZPCI_PCI_ST_FUNC_IN_ERR 8
@@ -38,6 +40,8 @@
#define ZPCI_MOD_FC_RESET_ERROR 7
#define ZPCI_MOD_FC_RESET_BLOCK 9
#define ZPCI_MOD_FC_SET_MEASURE 10
+#define ZPCI_MOD_FC_REG_INT_D 16
+#define ZPCI_MOD_FC_DEREG_INT_D 17
/* FIB function controls */
#define ZPCI_FIB_FC_ENABLED 0x80
@@ -51,16 +55,7 @@
#define ZPCI_FIB_FC_LS_BLOCKED 0x20
#define ZPCI_FIB_FC_DMAAS_REG 0x10
-/* Function Information Block */
-struct zpci_fib {
- u32 fmt : 8; /* format */
- u32 : 24;
- u32 : 32;
- u8 fc; /* function controls */
- u64 : 56;
- u64 pba; /* PCI base address */
- u64 pal; /* PCI address limit */
- u64 iota; /* I/O Translation Anchor */
+struct zpci_fib_fmt0 {
u32 : 1;
u32 isc : 3; /* Interrupt subclass */
u32 noi : 12; /* Number of interrupts */
@@ -72,16 +67,90 @@ struct zpci_fib {
u32 : 32;
u64 aibv; /* Adapter int bit vector address */
u64 aisb; /* Adapter int summary bit address */
+};
+
+struct zpci_fib_fmt1 {
+ u32 : 4;
+ u32 noi : 12;
+ u32 : 16;
+ u32 dibvo : 16;
+ u32 : 16;
+ u64 : 64;
+ u64 : 64;
+};
+
+/* Function Information Block */
+struct zpci_fib {
+ u32 fmt : 8; /* format */
+ u32 : 24;
+ u32 : 32;
+ u8 fc; /* function controls */
+ u64 : 56;
+ u64 pba; /* PCI base address */
+ u64 pal; /* PCI address limit */
+ u64 iota; /* I/O Translation Anchor */
+ union {
+ struct zpci_fib_fmt0 fmt0;
+ struct zpci_fib_fmt1 fmt1;
+ };
u64 fmb_addr; /* Function measurement block address and key */
u32 : 32;
u32 gd;
} __packed __aligned(8);
+/* directed interruption information block */
+struct zpci_diib {
+ u32 : 1;
+ u32 isc : 3;
+ u32 : 28;
+ u16 : 16;
+ u16 nr_cpus;
+ u64 disb_addr;
+ u64 : 64;
+ u64 : 64;
+} __packed __aligned(8);
+
+/* cpu directed interruption information block */
+struct zpci_cdiib {
+ u64 : 64;
+ u64 dibv_addr;
+ u64 : 64;
+ u64 : 64;
+ u64 : 64;
+} __packed __aligned(8);
+
+union zpci_sic_iib {
+ struct zpci_diib diib;
+ struct zpci_cdiib cdiib;
+};
+
+DECLARE_STATIC_KEY_FALSE(have_mio);
+
u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status);
int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
-int zpci_load(u64 *data, u64 req, u64 offset);
-int zpci_store(u64 data, u64 req, u64 offset);
-int zpci_store_block(const u64 *data, u64 req, u64 offset);
-int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
+int __zpci_load(u64 *data, u64 req, u64 offset);
+int zpci_load(u64 *data, const volatile void __iomem *addr, unsigned long len);
+int __zpci_store(u64 data, u64 req, u64 offset);
+int zpci_store(const volatile void __iomem *addr, u64 data, unsigned long len);
+int __zpci_store_block(const u64 *data, u64 req, u64 offset);
+void zpci_barrier(void);
+int __zpci_set_irq_ctrl(u16 ctl, u8 isc, union zpci_sic_iib *iib);
+
+static inline int zpci_set_irq_ctrl(u16 ctl, u8 isc)
+{
+ union zpci_sic_iib iib = {{0}};
+
+ return __zpci_set_irq_ctrl(ctl, isc, &iib);
+}
+
+#ifdef CONFIG_PCI
+static inline void enable_mio_ctl(void)
+{
+ if (static_branch_likely(&have_mio))
+ __ctl_set_bit(2, 5);
+}
+#else /* CONFIG_PCI */
+static inline void enable_mio_ctl(void) {}
+#endif /* CONFIG_PCI */
#endif
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
index cbb9cb9c6547..cd060b5dd8fd 100644
--- a/arch/s390/include/asm/pci_io.h
+++ b/arch/s390/include/asm/pci_io.h
@@ -37,12 +37,10 @@ extern struct zpci_iomap_entry *zpci_iomap_start;
#define zpci_read(LENGTH, RETTYPE) \
static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \
{ \
- struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; \
- u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
u64 data; \
int rc; \
\
- rc = zpci_load(&data, req, ZPCI_OFFSET(addr)); \
+ rc = zpci_load(&data, addr, LENGTH); \
if (rc) \
data = -1ULL; \
return (RETTYPE) data; \
@@ -52,11 +50,9 @@ static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \
static inline void zpci_write_##VALTYPE(VALTYPE val, \
const volatile void __iomem *addr) \
{ \
- struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; \
- u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
u64 data = (VALTYPE) val; \
\
- zpci_store(data, req, ZPCI_OFFSET(addr)); \
+ zpci_store(addr, data, LENGTH); \
}
zpci_read(8, u64)
@@ -68,36 +64,38 @@ zpci_write(4, u32)
zpci_write(2, u16)
zpci_write(1, u8)
-static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len)
+static inline int zpci_write_single(volatile void __iomem *dst, const void *src,
+ unsigned long len)
{
u64 val;
switch (len) {
case 1:
- val = (u64) *((u8 *) data);
+ val = (u64) *((u8 *) src);
break;
case 2:
- val = (u64) *((u16 *) data);
+ val = (u64) *((u16 *) src);
break;
case 4:
- val = (u64) *((u32 *) data);
+ val = (u64) *((u32 *) src);
break;
case 8:
- val = (u64) *((u64 *) data);
+ val = (u64) *((u64 *) src);
break;
default:
val = 0; /* let FW report error */
break;
}
- return zpci_store(val, req, offset);
+ return zpci_store(dst, val, len);
}
-static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
+static inline int zpci_read_single(void *dst, const volatile void __iomem *src,
+ unsigned long len)
{
u64 data;
int cc;
- cc = zpci_load(&data, req, offset);
+ cc = zpci_load(&data, src, len);
if (cc)
goto out;
@@ -119,10 +117,8 @@ out:
return cc;
}
-static inline int zpci_write_block(u64 req, const u64 *data, u64 offset)
-{
- return zpci_store_block(data, req, offset);
-}
+int zpci_write_block(volatile void __iomem *dst, const void *src,
+ unsigned long len);
static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
{
@@ -140,18 +136,15 @@ static inline int zpci_memcpy_fromio(void *dst,
const volatile void __iomem *src,
unsigned long n)
{
- struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(src)];
- u64 req, offset = ZPCI_OFFSET(src);
int size, rc = 0;
while (n > 0) {
size = zpci_get_max_write_size((u64 __force) src,
(u64) dst, n, 8);
- req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
- rc = zpci_read_single(req, dst, offset, size);
+ rc = zpci_read_single(dst, src, size);
if (rc)
break;
- offset += size;
+ src += size;
dst += size;
n -= size;
}
@@ -161,8 +154,6 @@ static inline int zpci_memcpy_fromio(void *dst,
static inline int zpci_memcpy_toio(volatile void __iomem *dst,
const void *src, unsigned long n)
{
- struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(dst)];
- u64 req, offset = ZPCI_OFFSET(dst);
int size, rc = 0;
if (!src)
@@ -171,16 +162,14 @@ static inline int zpci_memcpy_toio(volatile void __iomem *dst,
while (n > 0) {
size = zpci_get_max_write_size((u64 __force) dst,
(u64) src, n, 128);
- req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
-
if (size > 8) /* main path */
- rc = zpci_write_block(req, src, offset);
+ rc = zpci_write_block(dst, src, size);
else
- rc = zpci_write_single(req, src, offset, size);
+ rc = zpci_write_single(dst, src, size);
if (rc)
break;
- offset += size;
src += size;
+ dst += size;
n -= size;
}
return rc;
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 76dc344edb8c..9f0195d5fa16 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -238,7 +238,7 @@ static inline int is_module_addr(void *addr)
#define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */
#define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */
#define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
-#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
+#define _REGION_ENTRY_TYPE_MASK 0x0c /* region table type mask */
#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
#define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
@@ -277,6 +277,7 @@ static inline int is_module_addr(void *addr)
#define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */
#define _SEGMENT_ENTRY_NOEXEC 0x100 /* segment no-execute bit */
#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
+#define _SEGMENT_ENTRY_TYPE_MASK 0x0c /* segment table type mask */
#define _SEGMENT_ENTRY (0)
#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
@@ -614,15 +615,9 @@ static inline int pgd_none(pgd_t pgd)
static inline int pgd_bad(pgd_t pgd)
{
- /*
- * With dynamic page table levels the pgd can be a region table
- * entry or a segment table entry. Check for the bit that are
- * invalid for either table entry.
- */
- unsigned long mask =
- ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
- ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
- return (pgd_val(pgd) & mask) != 0;
+ if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1)
+ return 0;
+ return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0;
}
static inline unsigned long pgd_pfn(pgd_t pgd)
@@ -703,6 +698,8 @@ static inline int pmd_large(pmd_t pmd)
static inline int pmd_bad(pmd_t pmd)
{
+ if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0)
+ return 1;
if (pmd_large(pmd))
return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
@@ -710,8 +707,12 @@ static inline int pmd_bad(pmd_t pmd)
static inline int pud_bad(pud_t pud)
{
- if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
- return pmd_bad(__pmd(pud_val(pud)));
+ unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
+
+ if (type > _REGION_ENTRY_TYPE_R3)
+ return 1;
+ if (type < _REGION_ENTRY_TYPE_R3)
+ return 0;
if (pud_large(pud))
return (pud_val(pud) & ~_REGION_ENTRY_BITS_LARGE) != 0;
return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
@@ -719,8 +720,12 @@ static inline int pud_bad(pud_t pud)
static inline int p4d_bad(p4d_t p4d)
{
- if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
- return pud_bad(__pud(p4d_val(p4d)));
+ unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK;
+
+ if (type > _REGION_ENTRY_TYPE_R2)
+ return 1;
+ if (type < _REGION_ENTRY_TYPE_R2)
+ return 0;
return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
}
@@ -1204,41 +1209,78 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-#define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr))
-
#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
#define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
-static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
+/*
+ * The pgd_offset function *always* adds the index for the top-level
+ * region/segment table. This is done to get a sequence like the
+ * following to work:
+ * pgdp = pgd_offset(current->mm, addr);
+ * pgd = READ_ONCE(*pgdp);
+ * p4dp = p4d_offset(&pgd, addr);
+ * ...
+ * The subsequent p4d_offset, pud_offset and pmd_offset functions
+ * only add an index if they dereferenced the pointer.
+ */
+static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
{
- p4d_t *p4d = (p4d_t *) pgd;
+ unsigned long rste;
+ unsigned int shift;
- if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
- p4d = (p4d_t *) pgd_deref(*pgd);
- return p4d + p4d_index(address);
+ /* Get the first entry of the top level table */
+ rste = pgd_val(*pgd);
+ /* Pick up the shift from the table type of the first entry */
+ shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20;
+ return pgd + ((address >> shift) & (PTRS_PER_PGD - 1));
}
-static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
+#define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
{
- pud_t *pud = (pud_t *) p4d;
+ if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
+ return (p4d_t *) pgd_deref(*pgd) + p4d_index(address);
+ return (p4d_t *) pgd;
+}
- if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
- pud = (pud_t *) p4d_deref(*p4d);
- return pud + pud_index(address);
+static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
+{
+ if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
+ return (pud_t *) p4d_deref(*p4d) + pud_index(address);
+ return (pud_t *) p4d;
}
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
{
- pmd_t *pmd = (pmd_t *) pud;
+ if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
+ return (pmd_t *) pud_deref(*pud) + pmd_index(address);
+ return (pmd_t *) pud;
+}
- if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
- pmd = (pmd_t *) pud_deref(*pud);
- return pmd + pmd_index(address);
+static inline pte_t *pte_offset(pmd_t *pmd, unsigned long address)
+{
+ return (pte_t *) pmd_deref(*pmd) + pte_index(address);
+}
+
+#define pte_offset_kernel(pmd, address) pte_offset(pmd, address)
+#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
+#define pte_unmap(pte) do { } while (0)
+
+static inline bool gup_fast_permitted(unsigned long start, int nr_pages)
+{
+ unsigned long len, end;
+
+ len = (unsigned long) nr_pages << PAGE_SHIFT;
+ end = start + len;
+ if (end < start)
+ return false;
+ return end <= current->mm->context.asce_limit;
}
+#define gup_fast_permitted gup_fast_permitted
#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
@@ -1249,12 +1291,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
-/* Find an entry in the lowest level page table.. */
-#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
-#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
-#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
-#define pte_unmap(pte) do { } while (0)
-
static inline pmd_t pmd_wrprotect(pmd_t pmd)
{
pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 81038ab357ce..b0fcbc37b637 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -156,25 +156,6 @@ struct thread_struct {
typedef struct thread_struct thread_struct;
-/*
- * Stack layout of a C stack frame.
- */
-#ifndef __PACK_STACK
-struct stack_frame {
- unsigned long back_chain;
- unsigned long empty1[5];
- unsigned long gprs[10];
- unsigned int empty2[8];
-};
-#else
-struct stack_frame {
- unsigned long empty1[5];
- unsigned int empty2[8];
- unsigned long gprs[10];
- unsigned long back_chain;
-};
-#endif
-
#define ARCH_MIN_TASKALIGN 8
#define INIT_THREAD { \
@@ -206,11 +187,7 @@ struct mm_struct;
struct seq_file;
struct pt_regs;
-typedef int (*dump_trace_func_t)(void *data, unsigned long address, int reliable);
-void dump_trace(dump_trace_func_t func, void *data,
- struct task_struct *task, unsigned long sp);
void show_registers(struct pt_regs *regs);
-
void show_cacheinfo(struct seq_file *m);
/* Free all resources held by a thread. */
@@ -244,55 +221,6 @@ static __no_kasan_or_inline unsigned short stap(void)
return cpu_address;
}
-#define CALL_ARGS_0() \
- register unsigned long r2 asm("2")
-#define CALL_ARGS_1(arg1) \
- register unsigned long r2 asm("2") = (unsigned long)(arg1)
-#define CALL_ARGS_2(arg1, arg2) \
- CALL_ARGS_1(arg1); \
- register unsigned long r3 asm("3") = (unsigned long)(arg2)
-#define CALL_ARGS_3(arg1, arg2, arg3) \
- CALL_ARGS_2(arg1, arg2); \
- register unsigned long r4 asm("4") = (unsigned long)(arg3)
-#define CALL_ARGS_4(arg1, arg2, arg3, arg4) \
- CALL_ARGS_3(arg1, arg2, arg3); \
- register unsigned long r4 asm("5") = (unsigned long)(arg4)
-#define CALL_ARGS_5(arg1, arg2, arg3, arg4, arg5) \
- CALL_ARGS_4(arg1, arg2, arg3, arg4); \
- register unsigned long r4 asm("6") = (unsigned long)(arg5)
-
-#define CALL_FMT_0
-#define CALL_FMT_1 CALL_FMT_0, "0" (r2)
-#define CALL_FMT_2 CALL_FMT_1, "d" (r3)
-#define CALL_FMT_3 CALL_FMT_2, "d" (r4)
-#define CALL_FMT_4 CALL_FMT_3, "d" (r5)
-#define CALL_FMT_5 CALL_FMT_4, "d" (r6)
-
-#define CALL_CLOBBER_5 "0", "1", "14", "cc", "memory"
-#define CALL_CLOBBER_4 CALL_CLOBBER_5
-#define CALL_CLOBBER_3 CALL_CLOBBER_4, "5"
-#define CALL_CLOBBER_2 CALL_CLOBBER_3, "4"
-#define CALL_CLOBBER_1 CALL_CLOBBER_2, "3"
-#define CALL_CLOBBER_0 CALL_CLOBBER_1
-
-#define CALL_ON_STACK(fn, stack, nr, args...) \
-({ \
- CALL_ARGS_##nr(args); \
- unsigned long prev; \
- \
- asm volatile( \
- " la %[_prev],0(15)\n" \
- " la 15,0(%[_stack])\n" \
- " stg %[_prev],%[_bc](15)\n" \
- " brasl 14,%[_fn]\n" \
- " la 15,0(%[_prev])\n" \
- : "+&d" (r2), [_prev] "=&a" (prev) \
- : [_stack] "a" (stack), \
- [_bc] "i" (offsetof(struct stack_frame, back_chain)), \
- [_fn] "X" (fn) CALL_FMT_##nr : CALL_CLOBBER_##nr); \
- r2; \
-})
-
/*
* Give up the time slice of the virtual PU.
*/
@@ -339,10 +267,10 @@ static __no_kasan_or_inline void __load_psw_mask(unsigned long mask)
asm volatile(
" larl %0,1f\n"
- " stg %0,%O1+8(%R1)\n"
- " lpswe %1\n"
+ " stg %0,%1\n"
+ " lpswe %2\n"
"1:"
- : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
+ : "=&d" (addr), "=Q" (psw.addr) : "Q" (psw) : "memory", "cc");
}
/*
@@ -387,12 +315,12 @@ void enabled_wait(void);
/*
* Function to drop a processor into disabled wait state
*/
-static inline void __noreturn disabled_wait(unsigned long code)
+static inline void __noreturn disabled_wait(void)
{
psw_t psw;
psw.mask = PSW_MASK_BASE | PSW_MASK_WAIT | PSW_MASK_BA | PSW_MASK_EA;
- psw.addr = code;
+ psw.addr = _THIS_IP_;
__load_psw(psw);
while (1);
}
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index ef4c9dec06a4..f577c5f6031a 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -79,6 +79,9 @@ struct sclp_info {
unsigned char has_kss : 1;
unsigned char has_gisaf : 1;
unsigned char has_diag318 : 1;
+ unsigned char has_sipl : 1;
+ unsigned char has_sipl_g2 : 1;
+ unsigned char has_dirq : 1;
unsigned int ibc;
unsigned int mtid;
unsigned int mtid_cp;
diff --git a/arch/s390/include/asm/sections.h b/arch/s390/include/asm/sections.h
index 7afe4620685c..42de04ad9c07 100644
--- a/arch/s390/include/asm/sections.h
+++ b/arch/s390/include/asm/sections.h
@@ -2,8 +2,20 @@
#ifndef _S390_SECTIONS_H
#define _S390_SECTIONS_H
+#define arch_is_kernel_initmem_freed arch_is_kernel_initmem_freed
+
#include <asm-generic/sections.h>
+extern bool initmem_freed;
+
+static inline int arch_is_kernel_initmem_freed(unsigned long addr)
+{
+ if (!initmem_freed)
+ return 0;
+ return addr >= (unsigned long)__init_begin &&
+ addr < (unsigned long)__init_end;
+}
+
/*
* .boot.data section contains variables "shared" between the decompressor and
* the decompressed kernel. The decompressor will store values in them, and
@@ -16,4 +28,14 @@
*/
#define __bootdata(var) __section(.boot.data.var) var
+/*
+ * .boot.preserved.data is similar to .boot.data, but it is not part of the
+ * .init section and thus will be preserved for later use in the decompressed
+ * kernel.
+ */
+#define __bootdata_preserved(var) __section(.boot.preserved.data.var) var
+
+extern unsigned long __sdma, __edma;
+extern unsigned long __stext_dma, __etext_dma;
+
#endif
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index efda97804aa4..925889d360c1 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -12,7 +12,10 @@
#define EP_OFFSET 0x10008
#define EP_STRING "S390EP"
#define PARMAREA 0x10400
-#define PARMAREA_END 0x11000
+#define EARLY_SCCB_OFFSET 0x11000
+#define HEAD_END 0x12000
+
+#define EARLY_SCCB_SIZE PAGE_SIZE
/*
* Machine features detected in early.c
@@ -65,6 +68,16 @@
#define OLDMEM_SIZE (*(unsigned long *) (OLDMEM_SIZE_OFFSET))
#define COMMAND_LINE ((char *) (COMMAND_LINE_OFFSET))
+struct parmarea {
+ unsigned long ipl_device; /* 0x10400 */
+ unsigned long initrd_start; /* 0x10408 */
+ unsigned long initrd_size; /* 0x10410 */
+ unsigned long oldmem_base; /* 0x10418 */
+ unsigned long oldmem_size; /* 0x10420 */
+ char pad1[0x10480 - 0x10428]; /* 0x10428 - 0x10480 */
+ char command_line[ARCH_COMMAND_LINE_SIZE]; /* 0x10480 */
+};
+
extern int noexec_disabled;
extern int memory_end_set;
extern unsigned long memory_end;
@@ -134,6 +147,12 @@ extern void (*_machine_restart)(char *command);
extern void (*_machine_halt)(void);
extern void (*_machine_power_off)(void);
+extern unsigned long __kaslr_offset;
+static inline unsigned long kaslr_offset(void)
+{
+ return __kaslr_offset;
+}
+
#else /* __ASSEMBLY__ */
#define IPL_DEVICE (IPL_DEVICE_OFFSET)
diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h
new file mode 100644
index 000000000000..49634bfbecdd
--- /dev/null
+++ b/arch/s390/include/asm/stacktrace.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_STACKTRACE_H
+#define _ASM_S390_STACKTRACE_H
+
+#include <linux/uaccess.h>
+#include <linux/ptrace.h>
+#include <asm/switch_to.h>
+
+enum stack_type {
+ STACK_TYPE_UNKNOWN,
+ STACK_TYPE_TASK,
+ STACK_TYPE_IRQ,
+ STACK_TYPE_NODAT,
+ STACK_TYPE_RESTART,
+};
+
+struct stack_info {
+ enum stack_type type;
+ unsigned long begin, end;
+};
+
+const char *stack_type_name(enum stack_type type);
+int get_stack_info(unsigned long sp, struct task_struct *task,
+ struct stack_info *info, unsigned long *visit_mask);
+
+static inline bool on_stack(struct stack_info *info,
+ unsigned long addr, size_t len)
+{
+ if (info->type == STACK_TYPE_UNKNOWN)
+ return false;
+ if (addr + len < addr)
+ return false;
+ return addr >= info->begin && addr + len < info->end;
+}
+
+static inline unsigned long get_stack_pointer(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ if (regs)
+ return (unsigned long) kernel_stack_pointer(regs);
+ if (task == current)
+ return current_stack_pointer();
+ return (unsigned long) task->thread.ksp;
+}
+
+/*
+ * Stack layout of a C stack frame.
+ */
+#ifndef __PACK_STACK
+struct stack_frame {
+ unsigned long back_chain;
+ unsigned long empty1[5];
+ unsigned long gprs[10];
+ unsigned int empty2[8];
+};
+#else
+struct stack_frame {
+ unsigned long empty1[5];
+ unsigned int empty2[8];
+ unsigned long gprs[10];
+ unsigned long back_chain;
+};
+#endif
+
+#define CALL_ARGS_0() \
+ register unsigned long r2 asm("2")
+#define CALL_ARGS_1(arg1) \
+ register unsigned long r2 asm("2") = (unsigned long)(arg1)
+#define CALL_ARGS_2(arg1, arg2) \
+ CALL_ARGS_1(arg1); \
+ register unsigned long r3 asm("3") = (unsigned long)(arg2)
+#define CALL_ARGS_3(arg1, arg2, arg3) \
+ CALL_ARGS_2(arg1, arg2); \
+ register unsigned long r4 asm("4") = (unsigned long)(arg3)
+#define CALL_ARGS_4(arg1, arg2, arg3, arg4) \
+ CALL_ARGS_3(arg1, arg2, arg3); \
+ register unsigned long r4 asm("5") = (unsigned long)(arg4)
+#define CALL_ARGS_5(arg1, arg2, arg3, arg4, arg5) \
+ CALL_ARGS_4(arg1, arg2, arg3, arg4); \
+ register unsigned long r4 asm("6") = (unsigned long)(arg5)
+
+#define CALL_FMT_0 "=&d" (r2) :
+#define CALL_FMT_1 "+&d" (r2) :
+#define CALL_FMT_2 CALL_FMT_1 "d" (r3),
+#define CALL_FMT_3 CALL_FMT_2 "d" (r4),
+#define CALL_FMT_4 CALL_FMT_3 "d" (r5),
+#define CALL_FMT_5 CALL_FMT_4 "d" (r6),
+
+#define CALL_CLOBBER_5 "0", "1", "14", "cc", "memory"
+#define CALL_CLOBBER_4 CALL_CLOBBER_5
+#define CALL_CLOBBER_3 CALL_CLOBBER_4, "5"
+#define CALL_CLOBBER_2 CALL_CLOBBER_3, "4"
+#define CALL_CLOBBER_1 CALL_CLOBBER_2, "3"
+#define CALL_CLOBBER_0 CALL_CLOBBER_1
+
+#define CALL_ON_STACK(fn, stack, nr, args...) \
+({ \
+ CALL_ARGS_##nr(args); \
+ unsigned long prev; \
+ \
+ asm volatile( \
+ " la %[_prev],0(15)\n" \
+ " la 15,0(%[_stack])\n" \
+ " stg %[_prev],%[_bc](15)\n" \
+ " brasl 14,%[_fn]\n" \
+ " la 15,0(%[_prev])\n" \
+ : [_prev] "=&a" (prev), CALL_FMT_##nr \
+ [_stack] "a" (stack), \
+ [_bc] "i" (offsetof(struct stack_frame, back_chain)), \
+ [_fn] "X" (fn) : CALL_CLOBBER_##nr); \
+ r2; \
+})
+
+#endif /* _ASM_S390_STACKTRACE_H */
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index 96f9a9151fde..ab3407aa4fd8 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -14,13 +14,8 @@
#include <linux/err.h>
#include <asm/ptrace.h>
-/*
- * The syscall table always contains 32 bit pointers since we know that the
- * address of the function to be called is (way) below 4GB. So the "int"
- * type here is what we want [need] for both 32 bit and 64 bit systems.
- */
-extern const unsigned int sys_call_table[];
-extern const unsigned int sys_call_table_emu[];
+extern const unsigned long sys_call_table[];
+extern const unsigned long sys_call_table_emu[];
static inline long syscall_get_nr(struct task_struct *task,
struct pt_regs *regs)
@@ -56,40 +51,32 @@ static inline void syscall_set_return_value(struct task_struct *task,
static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
unsigned long *args)
{
unsigned long mask = -1UL;
+ unsigned int n = 6;
- /*
- * No arguments for this syscall, there's nothing to do.
- */
- if (!n)
- return;
-
- BUG_ON(i + n > 6);
#ifdef CONFIG_COMPAT
if (test_tsk_thread_flag(task, TIF_31BIT))
mask = 0xffffffff;
#endif
while (n-- > 0)
- if (i + n > 0)
- args[n] = regs->gprs[2 + i + n] & mask;
- if (i == 0)
- args[0] = regs->orig_gpr2 & mask;
+ if (n > 0)
+ args[n] = regs->gprs[2 + n] & mask;
+
+ args[0] = regs->orig_gpr2 & mask;
}
static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
const unsigned long *args)
{
- BUG_ON(i + n > 6);
+ unsigned int n = 6;
+
while (n-- > 0)
- if (i + n > 0)
- regs->gprs[2 + i + n] = args[n];
- if (i == 0)
- regs->orig_gpr2 = args[0];
+ if (n > 0)
+ regs->gprs[2 + n] = args[n];
+ regs->orig_gpr2 = args[0];
}
static inline int syscall_get_arch(void)
diff --git a/arch/s390/include/asm/syscall_wrapper.h b/arch/s390/include/asm/syscall_wrapper.h
index 5596c5c625d2..3c3d6fe8e2f0 100644
--- a/arch/s390/include/asm/syscall_wrapper.h
+++ b/arch/s390/include/asm/syscall_wrapper.h
@@ -119,8 +119,8 @@
"Type aliasing is used to sanitize syscall arguments");\
asmlinkage long __s390x_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \
__attribute__((alias(__stringify(__se_sys##name)))); \
- ALLOW_ERROR_INJECTION(__s390x_sys##name, ERRNO); \
- static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ ALLOW_ERROR_INJECTION(__s390x_sys##name, ERRNO); \
+ long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
__S390_SYS_STUBx(x, name, __VA_ARGS__) \
asmlinkage long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index b31c779cf581..aa406c05a350 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -22,98 +22,39 @@
* Pages used for the page tables is a different story. FIXME: more
*/
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/swap.h>
-#include <asm/processor.h>
-#include <asm/pgalloc.h>
-#include <asm/tlbflush.h>
-
-struct mmu_gather {
- struct mm_struct *mm;
- struct mmu_table_batch *batch;
- unsigned int fullmm;
- unsigned long start, end;
-};
-
-struct mmu_table_batch {
- struct rcu_head rcu;
- unsigned int nr;
- void *tables[0];
-};
-
-#define MAX_TABLE_BATCH \
- ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
-
-extern void tlb_table_flush(struct mmu_gather *tlb);
-extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
-
-static inline void
-arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- tlb->mm = mm;
- tlb->start = start;
- tlb->end = end;
- tlb->fullmm = !(start | (end+1));
- tlb->batch = NULL;
-}
-
-static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
-{
- __tlb_flush_mm_lazy(tlb->mm);
-}
-
-static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
-{
- tlb_table_flush(tlb);
-}
-
+void __tlb_remove_table(void *_table);
+static inline void tlb_flush(struct mmu_gather *tlb);
+static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
+ struct page *page, int page_size);
-static inline void tlb_flush_mmu(struct mmu_gather *tlb)
-{
- tlb_flush_mmu_tlbonly(tlb);
- tlb_flush_mmu_free(tlb);
-}
+#define tlb_start_vma(tlb, vma) do { } while (0)
+#define tlb_end_vma(tlb, vma) do { } while (0)
-static inline void
-arch_tlb_finish_mmu(struct mmu_gather *tlb,
- unsigned long start, unsigned long end, bool force)
-{
- if (force) {
- tlb->start = start;
- tlb->end = end;
- }
+#define tlb_flush tlb_flush
+#define pte_free_tlb pte_free_tlb
+#define pmd_free_tlb pmd_free_tlb
+#define p4d_free_tlb p4d_free_tlb
+#define pud_free_tlb pud_free_tlb
- tlb_flush_mmu(tlb);
-}
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+#include <asm-generic/tlb.h>
/*
* Release the page cache reference for a pte removed by
* tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
* has already been freed, so just do free_page_and_swap_cache.
*/
-static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
- free_page_and_swap_cache(page);
- return false; /* avoid calling tlb_flush_mmu */
-}
-
-static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
- free_page_and_swap_cache(page);
-}
-
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{
- return __tlb_remove_page(tlb, page);
+ free_page_and_swap_cache(page);
+ return false;
}
-static inline void tlb_remove_page_size(struct mmu_gather *tlb,
- struct page *page, int page_size)
+static inline void tlb_flush(struct mmu_gather *tlb)
{
- return tlb_remove_page(tlb, page);
+ __tlb_flush_mm_lazy(tlb->mm);
}
/*
@@ -121,8 +62,17 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb,
* page table from the tlb.
*/
static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
- unsigned long address)
+ unsigned long address)
{
+ __tlb_adjust_range(tlb, address, PAGE_SIZE);
+ tlb->mm->context.flush_mm = 1;
+ tlb->freed_tables = 1;
+ tlb->cleared_ptes = 1;
+ /*
+ * page_table_free_rcu takes care of the allocation bit masks
+ * of the 2K table fragments in the 4K page table page,
+ * then calls tlb_remove_table.
+ */
page_table_free_rcu(tlb, (unsigned long *) pte, address);
}
@@ -139,6 +89,10 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
if (mm_pmd_folded(tlb->mm))
return;
pgtable_pmd_page_dtor(virt_to_page(pmd));
+ __tlb_adjust_range(tlb, address, PAGE_SIZE);
+ tlb->mm->context.flush_mm = 1;
+ tlb->freed_tables = 1;
+ tlb->cleared_puds = 1;
tlb_remove_table(tlb, pmd);
}
@@ -154,6 +108,10 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
{
if (mm_p4d_folded(tlb->mm))
return;
+ __tlb_adjust_range(tlb, address, PAGE_SIZE);
+ tlb->mm->context.flush_mm = 1;
+ tlb->freed_tables = 1;
+ tlb->cleared_p4ds = 1;
tlb_remove_table(tlb, p4d);
}
@@ -169,21 +127,11 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
{
if (mm_pud_folded(tlb->mm))
return;
+ tlb->mm->context.flush_mm = 1;
+ tlb->freed_tables = 1;
+ tlb->cleared_puds = 1;
tlb_remove_table(tlb, pud);
}
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-#define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0)
-#define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr) do { } while (0)
-#define tlb_migrate_finish(mm) do { } while (0)
-#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
- tlb_remove_tlb_entry(tlb, ptep, address)
-
-#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
-static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
- unsigned int page_size)
-{
-}
#endif /* _S390_TLB_H */
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 007fcb9aeeb8..bd2fd9a7821d 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -55,8 +55,10 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n);
unsigned long __must_check
raw_copy_to_user(void __user *to, const void *from, unsigned long n);
+#ifndef CONFIG_KASAN
#define INLINE_COPY_FROM_USER
#define INLINE_COPY_TO_USER
+#endif
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
diff --git a/arch/s390/include/asm/unwind.h b/arch/s390/include/asm/unwind.h
new file mode 100644
index 000000000000..6eb2ef105d87
--- /dev/null
+++ b/arch/s390/include/asm/unwind.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_UNWIND_H
+#define _ASM_S390_UNWIND_H
+
+#include <linux/sched.h>
+#include <linux/ftrace.h>
+#include <asm/ptrace.h>
+#include <asm/stacktrace.h>
+
+/*
+ * To use the stack unwinder it has to be initialized with unwind_start.
+ * There four combinations for task and regs:
+ * 1) task==NULL, regs==NULL: the unwind starts for the task that is currently
+ * running, sp/ip picked up from the CPU registers
+ * 2) task==NULL, regs!=NULL: the unwind starts from the sp/ip found in
+ * the struct pt_regs of an interrupt frame for the current task
+ * 3) task!=NULL, regs==NULL: the unwind starts for an inactive task with
+ * the sp picked up from task->thread.ksp and the ip picked up from the
+ * return address stored by __switch_to
+ * 4) task!=NULL, regs!=NULL: the sp/ip are picked up from the interrupt
+ * frame 'regs' of a inactive task
+ * If 'first_frame' is not zero unwind_start skips unwind frames until it
+ * reaches the specified stack pointer.
+ * The end of the unwinding is indicated with unwind_done, this can be true
+ * right after unwind_start, e.g. with first_frame!=0 that can not be found.
+ * unwind_next_frame skips to the next frame.
+ * Once the unwind is completed unwind_error() can be used to check if there
+ * has been a situation where the unwinder could not correctly understand
+ * the tasks call chain.
+ */
+
+struct unwind_state {
+ struct stack_info stack_info;
+ unsigned long stack_mask;
+ struct task_struct *task;
+ struct pt_regs *regs;
+ unsigned long sp, ip;
+ int graph_idx;
+ bool reliable;
+ bool error;
+};
+
+void __unwind_start(struct unwind_state *state, struct task_struct *task,
+ struct pt_regs *regs, unsigned long first_frame);
+bool unwind_next_frame(struct unwind_state *state);
+unsigned long unwind_get_return_address(struct unwind_state *state);
+
+static inline bool unwind_done(struct unwind_state *state)
+{
+ return state->stack_info.type == STACK_TYPE_UNKNOWN;
+}
+
+static inline bool unwind_error(struct unwind_state *state)
+{
+ return state->error;
+}
+
+static inline void unwind_start(struct unwind_state *state,
+ struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned long sp)
+{
+ sp = sp ? : get_stack_pointer(task, regs);
+ __unwind_start(state, task, regs, sp);
+}
+
+static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state)
+{
+ return unwind_done(state) ? NULL : state->regs;
+}
+
+#define unwind_for_each_frame(state, task, regs, first_frame) \
+ for (unwind_start(state, task, regs, first_frame); \
+ !unwind_done(state); \
+ unwind_next_frame(state))
+
+static inline void unwind_init(void) {}
+static inline void unwind_module_init(struct module *mod, void *orc_ip,
+ size_t orc_ip_size, void *orc,
+ size_t orc_size) {}
+
+#ifdef CONFIG_KASAN
+/*
+ * This disables KASAN checking when reading a value from another task's stack,
+ * since the other task could be running on another CPU and could have poisoned
+ * the stack in the meantime.
+ */
+#define READ_ONCE_TASK_STACK(task, x) \
+({ \
+ unsigned long val; \
+ if (task == current) \
+ val = READ_ONCE(x); \
+ else \
+ val = READ_ONCE_NOCHECK(x); \
+ val; \
+})
+#else
+#define READ_ONCE_TASK_STACK(task, x) READ_ONCE(x)
+#endif
+
+#endif /* _ASM_S390_UNWIND_H */
diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h
new file mode 100644
index 000000000000..ef3c00b049ab
--- /dev/null
+++ b/arch/s390/include/asm/uv.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Ultravisor Interfaces
+ *
+ * Copyright IBM Corp. 2019
+ *
+ * Author(s):
+ * Vasily Gorbik <gor@linux.ibm.com>
+ * Janosch Frank <frankja@linux.ibm.com>
+ */
+#ifndef _ASM_S390_UV_H
+#define _ASM_S390_UV_H
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+#include <asm/page.h>
+
+#define UVC_RC_EXECUTED 0x0001
+#define UVC_RC_INV_CMD 0x0002
+#define UVC_RC_INV_STATE 0x0003
+#define UVC_RC_INV_LEN 0x0005
+#define UVC_RC_NO_RESUME 0x0007
+
+#define UVC_CMD_QUI 0x0001
+#define UVC_CMD_SET_SHARED_ACCESS 0x1000
+#define UVC_CMD_REMOVE_SHARED_ACCESS 0x1001
+
+/* Bits in installed uv calls */
+enum uv_cmds_inst {
+ BIT_UVC_CMD_QUI = 0,
+ BIT_UVC_CMD_SET_SHARED_ACCESS = 8,
+ BIT_UVC_CMD_REMOVE_SHARED_ACCESS = 9,
+};
+
+struct uv_cb_header {
+ u16 len;
+ u16 cmd; /* Command Code */
+ u16 rc; /* Response Code */
+ u16 rrc; /* Return Reason Code */
+} __packed __aligned(8);
+
+struct uv_cb_qui {
+ struct uv_cb_header header;
+ u64 reserved08;
+ u64 inst_calls_list[4];
+ u64 reserved30[15];
+} __packed __aligned(8);
+
+struct uv_cb_share {
+ struct uv_cb_header header;
+ u64 reserved08[3];
+ u64 paddr;
+ u64 reserved28;
+} __packed __aligned(8);
+
+static inline int uv_call(unsigned long r1, unsigned long r2)
+{
+ int cc;
+
+ asm volatile(
+ "0: .insn rrf,0xB9A40000,%[r1],%[r2],0,0\n"
+ " brc 3,0b\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=d" (cc)
+ : [r1] "a" (r1), [r2] "a" (r2)
+ : "memory", "cc");
+ return cc;
+}
+
+#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
+extern int prot_virt_guest;
+
+static inline int is_prot_virt_guest(void)
+{
+ return prot_virt_guest;
+}
+
+static inline int share(unsigned long addr, u16 cmd)
+{
+ struct uv_cb_share uvcb = {
+ .header.cmd = cmd,
+ .header.len = sizeof(uvcb),
+ .paddr = addr
+ };
+
+ if (!is_prot_virt_guest())
+ return -ENOTSUPP;
+ /*
+ * Sharing is page wise, if we encounter addresses that are
+ * not page aligned, we assume something went wrong. If
+ * malloced structs are passed to this function, we could leak
+ * data to the hypervisor.
+ */
+ BUG_ON(addr & ~PAGE_MASK);
+
+ if (!uv_call(0, (u64)&uvcb))
+ return 0;
+ return -EINVAL;
+}
+
+/*
+ * Guest 2 request to the Ultravisor to make a page shared with the
+ * hypervisor for IO.
+ *
+ * @addr: Real or absolute address of the page to be shared
+ */
+static inline int uv_set_shared(unsigned long addr)
+{
+ return share(addr, UVC_CMD_SET_SHARED_ACCESS);
+}
+
+/*
+ * Guest 2 request to the Ultravisor to make a page unshared.
+ *
+ * @addr: Real or absolute address of the page to be unshared
+ */
+static inline int uv_remove_shared(unsigned long addr)
+{
+ return share(addr, UVC_CMD_REMOVE_SHARED_ACCESS);
+}
+
+void uv_query_info(void);
+#else
+#define is_prot_virt_guest() 0
+static inline int uv_set_shared(unsigned long addr) { return 0; }
+static inline int uv_remove_shared(unsigned long addr) { return 0; }
+static inline void uv_query_info(void) {}
+#endif
+
+#endif /* _ASM_S390_UV_H */
diff --git a/arch/s390/include/asm/vmlinux.lds.h b/arch/s390/include/asm/vmlinux.lds.h
index 2d127f900352..cbe670a6861b 100644
--- a/arch/s390/include/asm/vmlinux.lds.h
+++ b/arch/s390/include/asm/vmlinux.lds.h
@@ -18,3 +18,16 @@
*(SORT_BY_ALIGNMENT(SORT_BY_NAME(.boot.data*))) \
__boot_data_end = .; \
}
+
+/*
+ * .boot.preserved.data is similar to .boot.data, but it is not part of the
+ * .init section and thus will be preserved for later use in the decompressed
+ * kernel.
+ */
+#define BOOT_DATA_PRESERVED \
+ . = ALIGN(PAGE_SIZE); \
+ .boot.preserved.data : { \
+ __boot_data_preserved_start = .; \
+ *(SORT_BY_ALIGNMENT(SORT_BY_NAME(.boot.preserved.data*))) \
+ __boot_data_preserved_end = .; \
+ }
diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild
index 6b0f30b14642..46c1ff0b842a 100644
--- a/arch/s390/include/uapi/asm/Kbuild
+++ b/arch/s390/include/uapi/asm/Kbuild
@@ -1,6 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-include include/uapi/asm-generic/Kbuild.asm
generated-y += unistd_32.h
generated-y += unistd_64.h
-generic-y += socket.h
diff --git a/arch/s390/include/uapi/asm/ipl.h b/arch/s390/include/uapi/asm/ipl.h
new file mode 100644
index 000000000000..fd32b1cd80d2
--- /dev/null
+++ b/arch/s390/include/uapi/asm/ipl.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_UAPI_IPL_H
+#define _ASM_S390_UAPI_IPL_H
+
+#include <linux/types.h>
+
+/* IPL Parameter List header */
+struct ipl_pl_hdr {
+ __u32 len;
+ __u8 flags;
+ __u8 reserved1[2];
+ __u8 version;
+} __packed;
+
+#define IPL_PL_FLAG_IPLPS 0x80
+#define IPL_PL_FLAG_SIPL 0x40
+#define IPL_PL_FLAG_IPLSR 0x20
+
+/* IPL Parameter Block header */
+struct ipl_pb_hdr {
+ __u32 len;
+ __u8 pbt;
+} __packed;
+
+/* IPL Parameter Block types */
+enum ipl_pbt {
+ IPL_PBT_FCP = 0,
+ IPL_PBT_SCP_DATA = 1,
+ IPL_PBT_CCW = 2,
+};
+
+/* IPL Parameter Block 0 with common fields */
+struct ipl_pb0_common {
+ __u32 len;
+ __u8 pbt;
+ __u8 flags;
+ __u8 reserved1[2];
+ __u8 loadparm[8];
+ __u8 reserved2[84];
+} __packed;
+
+#define IPL_PB0_FLAG_LOADPARM 0x80
+
+/* IPL Parameter Block 0 for FCP */
+struct ipl_pb0_fcp {
+ __u32 len;
+ __u8 pbt;
+ __u8 reserved1[3];
+ __u8 loadparm[8];
+ __u8 reserved2[304];
+ __u8 opt;
+ __u8 reserved3[3];
+ __u8 cssid;
+ __u8 reserved4[1];
+ __u16 devno;
+ __u8 reserved5[4];
+ __u64 wwpn;
+ __u64 lun;
+ __u32 bootprog;
+ __u8 reserved6[12];
+ __u64 br_lba;
+ __u32 scp_data_len;
+ __u8 reserved7[260];
+ __u8 scp_data[];
+} __packed;
+
+#define IPL_PB0_FCP_OPT_IPL 0x10
+#define IPL_PB0_FCP_OPT_DUMP 0x20
+
+/* IPL Parameter Block 0 for CCW */
+struct ipl_pb0_ccw {
+ __u32 len;
+ __u8 pbt;
+ __u8 flags;
+ __u8 reserved1[2];
+ __u8 loadparm[8];
+ __u8 reserved2[84];
+ __u16 reserved3 : 13;
+ __u8 ssid : 3;
+ __u16 devno;
+ __u8 vm_flags;
+ __u8 reserved4[3];
+ __u32 vm_parm_len;
+ __u8 nss_name[8];
+ __u8 vm_parm[64];
+ __u8 reserved5[8];
+} __packed;
+
+#define IPL_PB0_CCW_VM_FLAG_NSS 0x80
+#define IPL_PB0_CCW_VM_FLAG_VP 0x40
+
+/* IPL Parameter Block 1 for additional SCP data */
+struct ipl_pb1_scp_data {
+ __u32 len;
+ __u8 pbt;
+ __u8 scp_data[];
+} __packed;
+
+/* IPL Report List header */
+struct ipl_rl_hdr {
+ __u32 len;
+ __u8 flags;
+ __u8 reserved1[2];
+ __u8 version;
+ __u8 reserved2[8];
+} __packed;
+
+/* IPL Report Block header */
+struct ipl_rb_hdr {
+ __u32 len;
+ __u8 rbt;
+ __u8 reserved1[11];
+} __packed;
+
+/* IPL Report Block types */
+enum ipl_rbt {
+ IPL_RBT_CERTIFICATES = 1,
+ IPL_RBT_COMPONENTS = 2,
+};
+
+/* IPL Report Block for the certificate list */
+struct ipl_rb_certificate_entry {
+ __u64 addr;
+ __u64 len;
+} __packed;
+
+struct ipl_rb_certificates {
+ __u32 len;
+ __u8 rbt;
+ __u8 reserved1[11];
+ struct ipl_rb_certificate_entry entries[];
+} __packed;
+
+/* IPL Report Block for the component list */
+struct ipl_rb_component_entry {
+ __u64 addr;
+ __u64 len;
+ __u8 flags;
+ __u8 reserved1[5];
+ __u16 certificate_index;
+ __u8 reserved2[8];
+};
+
+#define IPL_RB_COMPONENT_FLAG_SIGNED 0x80
+#define IPL_RB_COMPONENT_FLAG_VERIFIED 0x40
+
+struct ipl_rb_components {
+ __u32 len;
+ __u8 rbt;
+ __u8 reserved1[11];
+ struct ipl_rb_component_entry entries[];
+} __packed;
+
+#endif
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 8a62c7f72e1b..b0478d01a0c5 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -39,6 +39,7 @@ CFLAGS_smp.o := -Wno-nonnull
#
CFLAGS_stacktrace.o += -fno-optimize-sibling-calls
CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
+CFLAGS_unwind_bc.o += -fno-optimize-sibling-calls
#
# Pass UTS_MACHINE for user_regset definition
@@ -51,7 +52,7 @@ obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o early_nobss.o
obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o pgm_check.o
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
-obj-y += nospec-branch.o ipl_vmparm.o
+obj-y += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o unwind_bc.o
extra-y += head64.o vmlinux.lds
@@ -77,6 +78,8 @@ obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o
obj-$(CONFIG_KEXEC_FILE) += kexec_elf.o
+obj-$(CONFIG_IMA) += ima_arch.o
+
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf_common.o
obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf.o perf_cpum_sf.o
obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o perf_regs.o
@@ -86,7 +89,7 @@ obj-$(CONFIG_TRACEPOINTS) += trace.o
# vdso
obj-y += vdso64/
-obj-$(CONFIG_COMPAT) += vdso32/
+obj-$(CONFIG_COMPAT_VDSO) += vdso32/
chkbss := head64.o early_nobss.o
include $(srctree)/arch/s390/scripts/Makefile.chkbss
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 164bec175628..41ac4ad21311 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -16,6 +16,7 @@
#include <asm/pgtable.h>
#include <asm/gmap.h>
#include <asm/nmi.h>
+#include <asm/stacktrace.h>
int main(void)
{
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index f268fca67e82..2f39ea57f358 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -28,6 +28,7 @@ ENTRY(s390_base_mcck_handler)
1: la %r1,4095
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)
lpswe __LC_MCK_OLD_PSW
+ENDPROC(s390_base_mcck_handler)
.section .bss
.align 8
@@ -48,6 +49,7 @@ ENTRY(s390_base_ext_handler)
1: lmg %r0,%r15,__LC_SAVE_AREA_ASYNC
ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
lpswe __LC_EXT_OLD_PSW
+ENDPROC(s390_base_ext_handler)
.section .bss
.align 8
@@ -68,6 +70,7 @@ ENTRY(s390_base_pgm_handler)
lmg %r0,%r15,__LC_SAVE_AREA_SYNC
lpswe __LC_PGM_OLD_PSW
1: lpswe disabled_wait_psw-0b(%r13)
+ENDPROC(s390_base_pgm_handler)
.align 8
disabled_wait_psw:
@@ -79,71 +82,3 @@ disabled_wait_psw:
s390_base_pgm_handler_fn:
.quad 0
.previous
-
-#
-# Calls diag 308 subcode 1 and continues execution
-#
-ENTRY(diag308_reset)
- larl %r4,.Lctlregs # Save control registers
- stctg %c0,%c15,0(%r4)
- lg %r2,0(%r4) # Disable lowcore protection
- nilh %r2,0xefff
- larl %r4,.Lctlreg0
- stg %r2,0(%r4)
- lctlg %c0,%c0,0(%r4)
- larl %r4,.Lfpctl # Floating point control register
- stfpc 0(%r4)
- larl %r4,.Lprefix # Save prefix register
- stpx 0(%r4)
- larl %r4,.Lprefix_zero # Set prefix register to 0
- spx 0(%r4)
- larl %r4,.Lcontinue_psw # Save PSW flags
- epsw %r2,%r3
- stm %r2,%r3,0(%r4)
- larl %r4,.Lrestart_psw # Setup restart PSW at absolute 0
- lghi %r3,0
- lg %r4,0(%r4) # Save PSW
- sturg %r4,%r3 # Use sturg, because of large pages
- lghi %r1,1
- lghi %r0,0
- diag %r0,%r1,0x308
-.Lrestart_part2:
- lhi %r0,0 # Load r0 with zero
- lhi %r1,2 # Use mode 2 = ESAME (dump)
- sigp %r1,%r0,SIGP_SET_ARCHITECTURE # Switch to ESAME mode
- sam64 # Switch to 64 bit addressing mode
- larl %r4,.Lctlregs # Restore control registers
- lctlg %c0,%c15,0(%r4)
- larl %r4,.Lfpctl # Restore floating point ctl register
- lfpc 0(%r4)
- larl %r4,.Lprefix # Restore prefix register
- spx 0(%r4)
- larl %r4,.Lcontinue_psw # Restore PSW flags
- lpswe 0(%r4)
-.Lcontinue:
- BR_EX %r14
-.align 16
-.Lrestart_psw:
- .long 0x00080000,0x80000000 + .Lrestart_part2
-
- .section .data..nosave,"aw",@progbits
-.align 8
-.Lcontinue_psw:
- .quad 0,.Lcontinue
- .previous
-
- .section .bss
-.align 8
-.Lctlreg0:
- .quad 0
-.Lctlregs:
- .rept 16
- .quad 0
- .endr
-.Lfpctl:
- .long 0
-.Lprefix:
- .long 0
-.Lprefix_zero:
- .long 0
- .previous
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index 97eae3871868..f96a5857bbfd 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -61,6 +61,9 @@ struct save_area * __init save_area_alloc(bool is_boot_cpu)
struct save_area *sa;
sa = (void *) memblock_phys_alloc(sizeof(*sa), 8);
+ if (!sa)
+ panic("Failed to allocate save area\n");
+
if (is_boot_cpu)
list_add(&sa->list, &dump_save_areas);
else
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index 7edaa733a77f..e9dac9a24d3f 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -13,6 +13,7 @@
#include <linux/debugfs.h>
#include <asm/diag.h>
#include <asm/trace/diag.h>
+#include <asm/sections.h>
struct diag_stat {
unsigned int counter[NR_DIAG_STAT];
@@ -49,6 +50,9 @@ static const struct diag_desc diag_map[NR_DIAG_STAT] = {
[DIAG_STAT_X500] = { .code = 0x500, .name = "Virtio Service" },
};
+struct diag_ops __bootdata_preserved(diag_dma_ops);
+struct diag210 *__bootdata_preserved(__diag210_tmp_dma);
+
static int show_diag_stat(struct seq_file *m, void *v)
{
struct diag_stat *stat;
@@ -139,30 +143,10 @@ EXPORT_SYMBOL(diag_stat_inc_norecursion);
/*
* Diagnose 14: Input spool file manipulation
*/
-static inline int __diag14(unsigned long rx, unsigned long ry1,
- unsigned long subcode)
-{
- register unsigned long _ry1 asm("2") = ry1;
- register unsigned long _ry2 asm("3") = subcode;
- int rc = 0;
-
- asm volatile(
- " sam31\n"
- " diag %2,2,0x14\n"
- " sam64\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (rc), "+d" (_ry2)
- : "d" (rx), "d" (_ry1)
- : "cc");
-
- return rc;
-}
-
int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode)
{
diag_stat_inc(DIAG_STAT_X014);
- return __diag14(rx, ry1, subcode);
+ return diag_dma_ops.diag14(rx, ry1, subcode);
}
EXPORT_SYMBOL(diag14);
@@ -195,30 +179,17 @@ EXPORT_SYMBOL(diag204);
*/
int diag210(struct diag210 *addr)
{
- /*
- * diag 210 needs its data below the 2GB border, so we
- * use a static data area to be sure
- */
- static struct diag210 diag210_tmp;
static DEFINE_SPINLOCK(diag210_lock);
unsigned long flags;
int ccode;
spin_lock_irqsave(&diag210_lock, flags);
- diag210_tmp = *addr;
+ *__diag210_tmp_dma = *addr;
diag_stat_inc(DIAG_STAT_X210);
- asm volatile(
- " lhi %0,-1\n"
- " sam31\n"
- " diag %1,0,0x210\n"
- "0: ipm %0\n"
- " srl %0,28\n"
- "1: sam64\n"
- EX_TABLE(0b, 1b)
- : "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory");
-
- *addr = diag210_tmp;
+ ccode = diag_dma_ops.diag210(__diag210_tmp_dma);
+
+ *addr = *__diag210_tmp_dma;
spin_unlock_irqrestore(&diag210_lock, flags);
return ccode;
@@ -243,27 +214,9 @@ EXPORT_SYMBOL(diag224);
/*
* Diagnose 26C: Access Certain System Information
*/
-static inline int __diag26c(void *req, void *resp, enum diag26c_sc subcode)
-{
- register unsigned long _req asm("2") = (addr_t) req;
- register unsigned long _resp asm("3") = (addr_t) resp;
- register unsigned long _subcode asm("4") = subcode;
- register unsigned long _rc asm("5") = -EOPNOTSUPP;
-
- asm volatile(
- " sam31\n"
- " diag %[rx],%[ry],0x26c\n"
- "0: sam64\n"
- EX_TABLE(0b,0b)
- : "+d" (_rc)
- : [rx] "d" (_req), "d" (_resp), [ry] "d" (_subcode)
- : "cc", "memory");
- return _rc;
-}
-
int diag26c(void *req, void *resp, enum diag26c_sc subcode)
{
diag_stat_inc(DIAG_STAT_X26C);
- return __diag26c(req, resp, subcode);
+ return diag_dma_ops.diag26c(req, resp, subcode);
}
EXPORT_SYMBOL(diag26c);
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index cb7f55bbe06e..9e87b68be21c 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -21,95 +21,124 @@
#include <asm/debug.h>
#include <asm/dis.h>
#include <asm/ipl.h>
+#include <asm/unwind.h>
-/*
- * For dump_trace we have tree different stack to consider:
- * - the panic stack which is used if the kernel stack has overflown
- * - the asynchronous interrupt stack (cpu related)
- * - the synchronous kernel stack (process related)
- * The stack trace can start at any of the three stacks and can potentially
- * touch all of them. The order is: panic stack, async stack, sync stack.
- */
-static unsigned long __no_sanitize_address
-__dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
- unsigned long low, unsigned long high)
+const char *stack_type_name(enum stack_type type)
{
- struct stack_frame *sf;
- struct pt_regs *regs;
-
- while (1) {
- if (sp < low || sp > high - sizeof(*sf))
- return sp;
- sf = (struct stack_frame *) sp;
- if (func(data, sf->gprs[8], 0))
- return sp;
- /* Follow the backchain. */
- while (1) {
- low = sp;
- sp = sf->back_chain;
- if (!sp)
- break;
- if (sp <= low || sp > high - sizeof(*sf))
- return sp;
- sf = (struct stack_frame *) sp;
- if (func(data, sf->gprs[8], 1))
- return sp;
- }
- /* Zero backchain detected, check for interrupt frame. */
- sp = (unsigned long) (sf + 1);
- if (sp <= low || sp > high - sizeof(*regs))
- return sp;
- regs = (struct pt_regs *) sp;
- if (!user_mode(regs)) {
- if (func(data, regs->psw.addr, 1))
- return sp;
- }
- low = sp;
- sp = regs->gprs[15];
+ switch (type) {
+ case STACK_TYPE_TASK:
+ return "task";
+ case STACK_TYPE_IRQ:
+ return "irq";
+ case STACK_TYPE_NODAT:
+ return "nodat";
+ case STACK_TYPE_RESTART:
+ return "restart";
+ default:
+ return "unknown";
}
}
-void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task,
- unsigned long sp)
+static inline bool in_stack(unsigned long sp, struct stack_info *info,
+ enum stack_type type, unsigned long low,
+ unsigned long high)
+{
+ if (sp < low || sp >= high)
+ return false;
+ info->type = type;
+ info->begin = low;
+ info->end = high;
+ return true;
+}
+
+static bool in_task_stack(unsigned long sp, struct task_struct *task,
+ struct stack_info *info)
+{
+ unsigned long stack;
+
+ stack = (unsigned long) task_stack_page(task);
+ return in_stack(sp, info, STACK_TYPE_TASK, stack, stack + THREAD_SIZE);
+}
+
+static bool in_irq_stack(unsigned long sp, struct stack_info *info)
{
- unsigned long frame_size;
+ unsigned long frame_size, top;
frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
-#ifdef CONFIG_CHECK_STACK
- sp = __dump_trace(func, data, sp,
- S390_lowcore.nodat_stack + frame_size - THREAD_SIZE,
- S390_lowcore.nodat_stack + frame_size);
-#endif
- sp = __dump_trace(func, data, sp,
- S390_lowcore.async_stack + frame_size - THREAD_SIZE,
- S390_lowcore.async_stack + frame_size);
- task = task ?: current;
- __dump_trace(func, data, sp,
- (unsigned long)task_stack_page(task),
- (unsigned long)task_stack_page(task) + THREAD_SIZE);
+ top = S390_lowcore.async_stack + frame_size;
+ return in_stack(sp, info, STACK_TYPE_IRQ, top - THREAD_SIZE, top);
+}
+
+static bool in_nodat_stack(unsigned long sp, struct stack_info *info)
+{
+ unsigned long frame_size, top;
+
+ frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
+ top = S390_lowcore.nodat_stack + frame_size;
+ return in_stack(sp, info, STACK_TYPE_NODAT, top - THREAD_SIZE, top);
}
-EXPORT_SYMBOL_GPL(dump_trace);
-static int show_address(void *data, unsigned long address, int reliable)
+static bool in_restart_stack(unsigned long sp, struct stack_info *info)
{
- if (reliable)
- printk(" [<%016lx>] %pSR \n", address, (void *)address);
- else
- printk("([<%016lx>] %pSR)\n", address, (void *)address);
+ unsigned long frame_size, top;
+
+ frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
+ top = S390_lowcore.restart_stack + frame_size;
+ return in_stack(sp, info, STACK_TYPE_RESTART, top - THREAD_SIZE, top);
+}
+
+int get_stack_info(unsigned long sp, struct task_struct *task,
+ struct stack_info *info, unsigned long *visit_mask)
+{
+ if (!sp)
+ goto unknown;
+
+ task = task ? : current;
+
+ /* Check per-task stack */
+ if (in_task_stack(sp, task, info))
+ goto recursion_check;
+
+ if (task != current)
+ goto unknown;
+
+ /* Check per-cpu stacks */
+ if (!in_irq_stack(sp, info) &&
+ !in_nodat_stack(sp, info) &&
+ !in_restart_stack(sp, info))
+ goto unknown;
+
+recursion_check:
+ /*
+ * Make sure we don't iterate through any given stack more than once.
+ * If it comes up a second time then there's something wrong going on:
+ * just break out and report an unknown stack type.
+ */
+ if (*visit_mask & (1UL << info->type)) {
+ printk_deferred_once(KERN_WARNING
+ "WARNING: stack recursion on stack type %d\n",
+ info->type);
+ goto unknown;
+ }
+ *visit_mask |= 1UL << info->type;
return 0;
+unknown:
+ info->type = STACK_TYPE_UNKNOWN;
+ return -EINVAL;
}
void show_stack(struct task_struct *task, unsigned long *stack)
{
- unsigned long sp = (unsigned long) stack;
+ struct unwind_state state;
- if (!sp)
- sp = task ? task->thread.ksp : current_stack_pointer();
printk("Call Trace:\n");
- dump_trace(show_address, NULL, task, sp);
if (!task)
task = current;
- debug_show_held_locks(task);
+ unwind_for_each_frame(&state, task, NULL, (unsigned long) stack)
+ printk(state.reliable ? " [<%016lx>] %pSR \n" :
+ "([<%016lx>] %pSR)\n",
+ state.ip, (void *) state.ip);
+ debug_show_held_locks(task ? : current);
}
static void show_last_breaking_event(struct pt_regs *regs)
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index d6edf45f93b9..629f173f60cd 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -30,6 +30,7 @@
#include <asm/sclp.h>
#include <asm/facility.h>
#include <asm/boot_data.h>
+#include <asm/pci_insn.h>
#include "entry.h"
/*
@@ -138,9 +139,9 @@ static void early_pgm_check_handler(void)
unsigned long addr;
addr = S390_lowcore.program_old_psw.addr;
- fixup = search_exception_tables(addr);
+ fixup = s390_search_extables(addr);
if (!fixup)
- disabled_wait(0);
+ disabled_wait();
/* Disable low address protection before storing into lowcore. */
__ctl_store(cr0, 0, 0);
cr0_new = cr0 & ~(1UL << 28);
@@ -235,6 +236,7 @@ static __init void detect_machine_facilities(void)
clock_comparator_max = -1ULL >> 1;
__ctl_set_bit(0, 53);
}
+ enable_mio_ctl();
}
static inline void save_vector_registers(void)
@@ -296,7 +298,7 @@ static void __init check_image_bootable(void)
sclp_early_printk("Linux kernel boot failure: An attempt to boot a vmlinux ELF image failed.\n");
sclp_early_printk("This image does not contain all parts necessary for starting up. Use\n");
sclp_early_printk("bzImage or arch/s390/boot/compressed/vmlinux instead.\n");
- disabled_wait(0xbadb007);
+ disabled_wait();
}
void __init startup_init(void)
@@ -309,7 +311,6 @@ void __init startup_init(void)
setup_facility_list();
detect_machine_type();
setup_arch_string();
- ipl_store_parameters();
setup_boot_command_line();
detect_diag9c();
detect_diag44();
diff --git a/arch/s390/kernel/early_nobss.c b/arch/s390/kernel/early_nobss.c
index 8d73f7fae16e..52a3ef959341 100644
--- a/arch/s390/kernel/early_nobss.c
+++ b/arch/s390/kernel/early_nobss.c
@@ -25,7 +25,7 @@ static void __init reset_tod_clock(void)
return;
/* TOD clock not running. Set the clock to Unix Epoch. */
if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0)
- disabled_wait(0);
+ disabled_wait();
memset(tod_clock_base, 0, 16);
*(__u64 *) &tod_clock_base[1] = TOD_UNIX_EPOCH;
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 583d65ef5007..3f4d272577d3 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -224,6 +224,7 @@ ENTRY(__bpon)
.globl __bpon
BPON
BR_EX %r14
+ENDPROC(__bpon)
/*
* Scheduler resume function, called by switch_to
@@ -248,6 +249,7 @@ ENTRY(__switch_to)
lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
BR_EX %r14
+ENDPROC(__switch_to)
.L__critical_start:
@@ -324,6 +326,7 @@ sie_exit:
EX_TABLE(.Lrewind_pad4,.Lsie_fault)
EX_TABLE(.Lrewind_pad2,.Lsie_fault)
EX_TABLE(sie_exit,.Lsie_fault)
+ENDPROC(sie64a)
EXPORT_SYMBOL(sie64a)
EXPORT_SYMBOL(sie_exit)
#endif
@@ -358,19 +361,19 @@ ENTRY(system_call)
# load address of system call table
lg %r10,__THREAD_sysc_table(%r13,%r12)
llgh %r8,__PT_INT_CODE+2(%r11)
- slag %r8,%r8,2 # shift and test for svc 0
+ slag %r8,%r8,3 # shift and test for svc 0
jnz .Lsysc_nr_ok
# svc 0: system call number in %r1
llgfr %r1,%r1 # clear high word in r1
cghi %r1,NR_syscalls
jnl .Lsysc_nr_ok
sth %r1,__PT_INT_CODE+2(%r11)
- slag %r8,%r1,2
+ slag %r8,%r1,3
.Lsysc_nr_ok:
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
stg %r2,__PT_ORIG_GPR2(%r11)
stg %r7,STACK_FRAME_OVERHEAD(%r15)
- lgf %r9,0(%r8,%r10) # get system call add.
+ lg %r9,0(%r8,%r10) # get system call add.
TSTMSK __TI_flags(%r12),_TIF_TRACE
jnz .Lsysc_tracesys
BASR_EX %r14,%r9 # call sys_xxxx
@@ -556,8 +559,8 @@ ENTRY(system_call)
lghi %r0,NR_syscalls
clgr %r0,%r2
jnh .Lsysc_tracenogo
- sllg %r8,%r2,2
- lgf %r9,0(%r8,%r10)
+ sllg %r8,%r2,3
+ lg %r9,0(%r8,%r10)
.Lsysc_tracego:
lmg %r3,%r7,__PT_R3(%r11)
stg %r7,STACK_FRAME_OVERHEAD(%r15)
@@ -570,6 +573,7 @@ ENTRY(system_call)
lgr %r2,%r11 # pass pointer to pt_regs
larl %r14,.Lsysc_return
jg do_syscall_trace_exit
+ENDPROC(system_call)
#
# a new process exits the kernel with ret_from_fork
@@ -584,10 +588,16 @@ ENTRY(ret_from_fork)
jne .Lsysc_tracenogo
# it's a kernel thread
lmg %r9,%r10,__PT_R9(%r11) # load gprs
+ la %r2,0(%r10)
+ BASR_EX %r14,%r9
+ j .Lsysc_tracenogo
+ENDPROC(ret_from_fork)
+
ENTRY(kernel_thread_starter)
la %r2,0(%r10)
BASR_EX %r14,%r9
j .Lsysc_tracenogo
+ENDPROC(kernel_thread_starter)
/*
* Program check handler routine
@@ -665,9 +675,9 @@ ENTRY(pgm_check_handler)
larl %r1,pgm_check_table
llgh %r10,__PT_INT_CODE+2(%r11)
nill %r10,0x007f
- sll %r10,2
+ sll %r10,3
je .Lpgm_return
- lgf %r9,0(%r10,%r1) # load address of handler routine
+ lg %r9,0(%r10,%r1) # load address of handler routine
lgr %r2,%r11 # pass pointer to pt_regs
BASR_EX %r14,%r9 # branch to interrupt-handler
.Lpgm_return:
@@ -698,6 +708,7 @@ ENTRY(pgm_check_handler)
stg %r14,__LC_RETURN_PSW+8
lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs
+ENDPROC(pgm_check_handler)
/*
* IO interrupt handler routine
@@ -926,6 +937,7 @@ ENTRY(io_int_handler)
ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
TRACE_IRQS_OFF
j .Lio_return
+ENDPROC(io_int_handler)
/*
* External interrupt handler routine
@@ -965,6 +977,7 @@ ENTRY(ext_int_handler)
lghi %r3,EXT_INTERRUPT
brasl %r14,do_IRQ
j .Lio_return
+ENDPROC(ext_int_handler)
/*
* Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
@@ -989,6 +1002,7 @@ ENTRY(psw_idle)
lpswe __SF_EMPTY(%r15)
BR_EX %r14
.Lpsw_idle_end:
+ENDPROC(psw_idle)
/*
* Store floating-point controls and floating-point or vector register
@@ -1031,6 +1045,7 @@ ENTRY(save_fpu_regs)
.Lsave_fpu_regs_exit:
BR_EX %r14
.Lsave_fpu_regs_end:
+ENDPROC(save_fpu_regs)
EXPORT_SYMBOL(save_fpu_regs)
/*
@@ -1077,6 +1092,7 @@ load_fpu_regs:
.Lload_fpu_regs_exit:
BR_EX %r14
.Lload_fpu_regs_end:
+ENDPROC(load_fpu_regs)
.L__critical_end:
@@ -1206,6 +1222,7 @@ ENTRY(mcck_int_handler)
lg %r15,__LC_NODAT_STACK
la %r11,STACK_FRAME_OVERHEAD(%r15)
j .Lmcck_skip
+ENDPROC(mcck_int_handler)
#
# PSW restart interrupt handler
@@ -1232,6 +1249,7 @@ ENTRY(restart_int_handler)
2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu
brc 2,2b
3: j 3b
+ENDPROC(restart_int_handler)
.section .kprobes.text, "ax"
@@ -1241,7 +1259,7 @@ ENTRY(restart_int_handler)
* No need to properly save the registers, we are going to panic anyway.
* Setup a pt_regs so that show_trace can provide a good call trace.
*/
-stack_overflow:
+ENTRY(stack_overflow)
lg %r15,__LC_NODAT_STACK # change to panic stack
la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
@@ -1251,9 +1269,10 @@ stack_overflow:
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs
jg kernel_stack_overflow
+ENDPROC(stack_overflow)
#endif
-cleanup_critical:
+ENTRY(cleanup_critical)
#if IS_ENABLED(CONFIG_KVM)
clg %r9,BASED(.Lcleanup_table_sie) # .Lsie_gmap
jl 0f
@@ -1289,6 +1308,7 @@ cleanup_critical:
clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
jl .Lcleanup_load_fpu_regs
0: BR_EX %r14,%r11
+ENDPROC(cleanup_critical)
.align 8
.Lcleanup_table:
@@ -1512,7 +1532,7 @@ cleanup_critical:
.quad .Lsie_skip - .Lsie_entry
#endif
.section .rodata, "a"
-#define SYSCALL(esame,emu) .long __s390x_ ## esame
+#define SYSCALL(esame,emu) .quad __s390x_ ## esame
.globl sys_call_table
sys_call_table:
#include "asm/syscall_table.h"
@@ -1520,7 +1540,7 @@ sys_call_table:
#ifdef CONFIG_COMPAT
-#define SYSCALL(esame,emu) .long __s390_ ## emu
+#define SYSCALL(esame,emu) .quad __s390_ ## emu
.globl sys_call_table_emu
sys_call_table_emu:
#include "asm/syscall_table.h"
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index c3816ae108b0..20420c2b8a14 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -65,7 +65,7 @@ int setup_profiling_timer(unsigned int multiplier);
void __init time_init(void);
int pfn_is_nosave(unsigned long);
void s390_early_resume(void);
-unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip);
+unsigned long prepare_ftrace_return(unsigned long parent, unsigned long sp, unsigned long ip);
struct s390_mmap_arg_struct;
struct fadvise64_64_args;
diff --git a/arch/s390/kernel/fpu.c b/arch/s390/kernel/fpu.c
index 594464f2129d..0da378e2eb25 100644
--- a/arch/s390/kernel/fpu.c
+++ b/arch/s390/kernel/fpu.c
@@ -23,7 +23,7 @@ void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
if (flags & KERNEL_FPC)
/* Save floating point control */
- asm volatile("stfpc %0" : "=m" (state->fpc));
+ asm volatile("stfpc %0" : "=Q" (state->fpc));
if (!MACHINE_HAS_VX) {
if (flags & KERNEL_VXR_V0V7) {
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 39b13d71a8fe..1bb85f60c0dd 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -201,17 +201,18 @@ device_initcall(ftrace_plt_init);
* Hook the return address and push it in the stack of return addresses
* in current thread info.
*/
-unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
+unsigned long prepare_ftrace_return(unsigned long ra, unsigned long sp,
+ unsigned long ip)
{
if (unlikely(ftrace_graph_is_dead()))
goto out;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
goto out;
ip -= MCOUNT_INSN_SIZE;
- if (!function_graph_enter(parent, ip, 0, NULL))
- parent = (unsigned long) return_to_handler;
+ if (!function_graph_enter(ra, ip, 0, (void *) sp))
+ ra = (unsigned long) return_to_handler;
out:
- return parent;
+ return ra;
}
NOKPROBE_SYMBOL(prepare_ftrace_return);
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 56491e636eab..5aea1a527443 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -26,7 +26,6 @@ ENTRY(startup_continue)
0: larl %r1,tod_clock_base
mvc 0(16,%r1),__LC_BOOT_CLOCK
larl %r13,.LPG1 # get base
- lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
larl %r0,boot_vdso_data
stg %r0,__LC_VDSO_PER_CPU
#
@@ -61,22 +60,6 @@ ENTRY(startup_continue)
.align 16
.LPG1:
-.Lctl: .quad 0x04040000 # cr0: AFP registers & secondary space
- .quad 0 # cr1: primary space segment table
- .quad .Lduct # cr2: dispatchable unit control table
- .quad 0 # cr3: instruction authorization
- .quad 0xffff # cr4: instruction authorization
- .quad .Lduct # cr5: primary-aste origin
- .quad 0 # cr6: I/O interrupts
- .quad 0 # cr7: secondary space segment table
- .quad 0 # cr8: access registers translation
- .quad 0 # cr9: tracing off
- .quad 0 # cr10: tracing off
- .quad 0 # cr11: tracing off
- .quad 0 # cr12: tracing off
- .quad 0 # cr13: home space segment table
- .quad 0xc0000000 # cr14: machine check handling off
- .quad .Llinkage_stack # cr15: linkage stack operations
.Lpcmsk:.quad 0x0000000180000000
.L4malign:.quad 0xffffffffffc00000
.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
@@ -84,14 +67,5 @@ ENTRY(startup_continue)
.Lparmaddr:
.quad PARMAREA
.align 64
-.Lduct: .long 0,.Laste,.Laste,0,.Lduald,0,0,0
- .long 0,0,0,0,0,0,0,0
-.Laste: .quad 0,0xffffffffffffffff,0,0,0,0,0,0
- .align 128
-.Lduald:.rept 8
- .long 0x80000000,0,0,0 # invalid access-list entries
- .endr
-.Llinkage_stack:
- .long 0,0,0x89000000,0,0,0,0x8a000000,0
.Ldw: .quad 0x0002000180000000,0x0000000000000000
.Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
diff --git a/arch/s390/kernel/ima_arch.c b/arch/s390/kernel/ima_arch.c
new file mode 100644
index 000000000000..f3c3e6e1c5d3
--- /dev/null
+++ b/arch/s390/kernel/ima_arch.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/ima.h>
+#include <asm/boot_data.h>
+
+bool arch_ima_get_secureboot(void)
+{
+ return ipl_secure_flag;
+}
+
+const char * const *arch_get_ima_policy(void)
+{
+ return NULL;
+}
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 18a5d6317acc..d836af3ccc38 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -31,6 +31,7 @@
#include <asm/os_info.h>
#include <asm/sections.h>
#include <asm/boot_data.h>
+#include <asm/uv.h>
#include "entry.h"
#define IPL_PARM_BLOCK_VERSION 0
@@ -119,11 +120,15 @@ static char *dump_type_str(enum dump_type type)
}
}
-struct ipl_parameter_block __bootdata(early_ipl_block);
-int __bootdata(early_ipl_block_valid);
+int __bootdata_preserved(ipl_block_valid);
+struct ipl_parameter_block __bootdata_preserved(ipl_block);
+int __bootdata_preserved(ipl_secure_flag);
-static int ipl_block_valid;
-static struct ipl_parameter_block ipl_block;
+unsigned long __bootdata_preserved(ipl_cert_list_addr);
+unsigned long __bootdata_preserved(ipl_cert_list_size);
+
+unsigned long __bootdata(early_ipl_comp_list_addr);
+unsigned long __bootdata(early_ipl_comp_list_size);
static int reipl_capabilities = IPL_TYPE_UNKNOWN;
@@ -246,11 +251,11 @@ static __init enum ipl_type get_ipl_type(void)
if (!ipl_block_valid)
return IPL_TYPE_UNKNOWN;
- switch (ipl_block.hdr.pbt) {
- case DIAG308_IPL_TYPE_CCW:
+ switch (ipl_block.pb0_hdr.pbt) {
+ case IPL_PBT_CCW:
return IPL_TYPE_CCW;
- case DIAG308_IPL_TYPE_FCP:
- if (ipl_block.ipl_info.fcp.opt == DIAG308_IPL_OPT_DUMP)
+ case IPL_PBT_FCP:
+ if (ipl_block.fcp.opt == IPL_PB0_FCP_OPT_DUMP)
return IPL_TYPE_FCP_DUMP;
else
return IPL_TYPE_FCP;
@@ -269,12 +274,35 @@ static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr,
static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
+static ssize_t ipl_secure_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ return sprintf(page, "%i\n", !!ipl_secure_flag);
+}
+
+static struct kobj_attribute sys_ipl_secure_attr =
+ __ATTR(secure, 0444, ipl_secure_show, NULL);
+
+static ssize_t ipl_has_secure_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ if (MACHINE_IS_LPAR)
+ return sprintf(page, "%i\n", !!sclp.has_sipl);
+ else if (MACHINE_IS_VM)
+ return sprintf(page, "%i\n", !!sclp.has_sipl_g2);
+ else
+ return sprintf(page, "%i\n", 0);
+}
+
+static struct kobj_attribute sys_ipl_has_secure_attr =
+ __ATTR(has_secure, 0444, ipl_has_secure_show, NULL);
+
static ssize_t ipl_vm_parm_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
char parm[DIAG308_VMPARM_SIZE + 1] = {};
- if (ipl_block_valid && (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW))
+ if (ipl_block_valid && (ipl_block.pb0_hdr.pbt == IPL_PBT_CCW))
ipl_block_get_ascii_vmparm(parm, sizeof(parm), &ipl_block);
return sprintf(page, "%s\n", parm);
}
@@ -287,12 +315,11 @@ static ssize_t sys_ipl_device_show(struct kobject *kobj,
{
switch (ipl_info.type) {
case IPL_TYPE_CCW:
- return sprintf(page, "0.%x.%04x\n", ipl_block.ipl_info.ccw.ssid,
- ipl_block.ipl_info.ccw.devno);
+ return sprintf(page, "0.%x.%04x\n", ipl_block.ccw.ssid,
+ ipl_block.ccw.devno);
case IPL_TYPE_FCP:
case IPL_TYPE_FCP_DUMP:
- return sprintf(page, "0.0.%04x\n",
- ipl_block.ipl_info.fcp.devno);
+ return sprintf(page, "0.0.%04x\n", ipl_block.fcp.devno);
default:
return 0;
}
@@ -316,8 +343,8 @@ static ssize_t ipl_scp_data_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
- unsigned int size = ipl_block.ipl_info.fcp.scp_data_len;
- void *scp_data = &ipl_block.ipl_info.fcp.scp_data;
+ unsigned int size = ipl_block.fcp.scp_data_len;
+ void *scp_data = &ipl_block.fcp.scp_data;
return memory_read_from_buffer(buf, count, &off, scp_data, size);
}
@@ -333,13 +360,13 @@ static struct bin_attribute *ipl_fcp_bin_attrs[] = {
/* FCP ipl device attributes */
DEFINE_IPL_ATTR_RO(ipl_fcp, wwpn, "0x%016llx\n",
- (unsigned long long)ipl_block.ipl_info.fcp.wwpn);
+ (unsigned long long)ipl_block.fcp.wwpn);
DEFINE_IPL_ATTR_RO(ipl_fcp, lun, "0x%016llx\n",
- (unsigned long long)ipl_block.ipl_info.fcp.lun);
+ (unsigned long long)ipl_block.fcp.lun);
DEFINE_IPL_ATTR_RO(ipl_fcp, bootprog, "%lld\n",
- (unsigned long long)ipl_block.ipl_info.fcp.bootprog);
+ (unsigned long long)ipl_block.fcp.bootprog);
DEFINE_IPL_ATTR_RO(ipl_fcp, br_lba, "%lld\n",
- (unsigned long long)ipl_block.ipl_info.fcp.br_lba);
+ (unsigned long long)ipl_block.fcp.br_lba);
static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
@@ -365,6 +392,8 @@ static struct attribute *ipl_fcp_attrs[] = {
&sys_ipl_fcp_bootprog_attr.attr,
&sys_ipl_fcp_br_lba_attr.attr,
&sys_ipl_ccw_loadparm_attr.attr,
+ &sys_ipl_secure_attr.attr,
+ &sys_ipl_has_secure_attr.attr,
NULL,
};
@@ -380,6 +409,8 @@ static struct attribute *ipl_ccw_attrs_vm[] = {
&sys_ipl_device_attr.attr,
&sys_ipl_ccw_loadparm_attr.attr,
&sys_ipl_vm_parm_attr.attr,
+ &sys_ipl_secure_attr.attr,
+ &sys_ipl_has_secure_attr.attr,
NULL,
};
@@ -387,6 +418,8 @@ static struct attribute *ipl_ccw_attrs_lpar[] = {
&sys_ipl_type_attr.attr,
&sys_ipl_device_attr.attr,
&sys_ipl_ccw_loadparm_attr.attr,
+ &sys_ipl_secure_attr.attr,
+ &sys_ipl_has_secure_attr.attr,
NULL,
};
@@ -495,14 +528,14 @@ static ssize_t reipl_generic_vmparm_store(struct ipl_parameter_block *ipb,
if (!(isalnum(buf[i]) || isascii(buf[i]) || isprint(buf[i])))
return -EINVAL;
- memset(ipb->ipl_info.ccw.vm_parm, 0, DIAG308_VMPARM_SIZE);
- ipb->ipl_info.ccw.vm_parm_len = ip_len;
+ memset(ipb->ccw.vm_parm, 0, DIAG308_VMPARM_SIZE);
+ ipb->ccw.vm_parm_len = ip_len;
if (ip_len > 0) {
- ipb->ipl_info.ccw.vm_flags |= DIAG308_VM_FLAGS_VP_VALID;
- memcpy(ipb->ipl_info.ccw.vm_parm, buf, ip_len);
- ASCEBC(ipb->ipl_info.ccw.vm_parm, ip_len);
+ ipb->ccw.vm_flags |= IPL_PB0_CCW_VM_FLAG_VP;
+ memcpy(ipb->ccw.vm_parm, buf, ip_len);
+ ASCEBC(ipb->ccw.vm_parm, ip_len);
} else {
- ipb->ipl_info.ccw.vm_flags &= ~DIAG308_VM_FLAGS_VP_VALID;
+ ipb->ccw.vm_flags &= ~IPL_PB0_CCW_VM_FLAG_VP;
}
return len;
@@ -549,8 +582,8 @@ static ssize_t reipl_fcp_scpdata_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
- size_t size = reipl_block_fcp->ipl_info.fcp.scp_data_len;
- void *scp_data = reipl_block_fcp->ipl_info.fcp.scp_data;
+ size_t size = reipl_block_fcp->fcp.scp_data_len;
+ void *scp_data = reipl_block_fcp->fcp.scp_data;
return memory_read_from_buffer(buf, count, &off, scp_data, size);
}
@@ -566,17 +599,17 @@ static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj,
if (off)
return -EINVAL;
- memcpy(reipl_block_fcp->ipl_info.fcp.scp_data, buf, count);
+ memcpy(reipl_block_fcp->fcp.scp_data, buf, count);
if (scpdata_len % 8) {
padding = 8 - (scpdata_len % 8);
- memset(reipl_block_fcp->ipl_info.fcp.scp_data + scpdata_len,
+ memset(reipl_block_fcp->fcp.scp_data + scpdata_len,
0, padding);
scpdata_len += padding;
}
- reipl_block_fcp->ipl_info.fcp.scp_data_len = scpdata_len;
- reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN + scpdata_len;
- reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN + scpdata_len;
+ reipl_block_fcp->hdr.len = IPL_BP_FCP_LEN + scpdata_len;
+ reipl_block_fcp->fcp.len = IPL_BP0_FCP_LEN + scpdata_len;
+ reipl_block_fcp->fcp.scp_data_len = scpdata_len;
return count;
}
@@ -590,20 +623,20 @@ static struct bin_attribute *reipl_fcp_bin_attrs[] = {
};
DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%llx\n",
- reipl_block_fcp->ipl_info.fcp.wwpn);
+ reipl_block_fcp->fcp.wwpn);
DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%llx\n",
- reipl_block_fcp->ipl_info.fcp.lun);
+ reipl_block_fcp->fcp.lun);
DEFINE_IPL_ATTR_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n",
- reipl_block_fcp->ipl_info.fcp.bootprog);
+ reipl_block_fcp->fcp.bootprog);
DEFINE_IPL_ATTR_RW(reipl_fcp, br_lba, "%lld\n", "%lld\n",
- reipl_block_fcp->ipl_info.fcp.br_lba);
+ reipl_block_fcp->fcp.br_lba);
DEFINE_IPL_ATTR_RW(reipl_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
- reipl_block_fcp->ipl_info.fcp.devno);
+ reipl_block_fcp->fcp.devno);
static void reipl_get_ascii_loadparm(char *loadparm,
struct ipl_parameter_block *ibp)
{
- memcpy(loadparm, ibp->hdr.loadparm, LOADPARM_LEN);
+ memcpy(loadparm, ibp->common.loadparm, LOADPARM_LEN);
EBCASC(loadparm, LOADPARM_LEN);
loadparm[LOADPARM_LEN] = 0;
strim(loadparm);
@@ -638,11 +671,11 @@ static ssize_t reipl_generic_loadparm_store(struct ipl_parameter_block *ipb,
return -EINVAL;
}
/* initialize loadparm with blanks */
- memset(ipb->hdr.loadparm, ' ', LOADPARM_LEN);
+ memset(ipb->common.loadparm, ' ', LOADPARM_LEN);
/* copy and convert to ebcdic */
- memcpy(ipb->hdr.loadparm, buf, lp_len);
- ASCEBC(ipb->hdr.loadparm, LOADPARM_LEN);
- ipb->hdr.flags |= DIAG308_FLAGS_LP_VALID;
+ memcpy(ipb->common.loadparm, buf, lp_len);
+ ASCEBC(ipb->common.loadparm, LOADPARM_LEN);
+ ipb->common.flags |= IPL_PB0_FLAG_LOADPARM;
return len;
}
@@ -680,7 +713,7 @@ static struct attribute_group reipl_fcp_attr_group = {
};
/* CCW reipl device attributes */
-DEFINE_IPL_CCW_ATTR_RW(reipl_ccw, device, reipl_block_ccw->ipl_info.ccw);
+DEFINE_IPL_CCW_ATTR_RW(reipl_ccw, device, reipl_block_ccw->ccw);
/* NSS wrapper */
static ssize_t reipl_nss_loadparm_show(struct kobject *kobj,
@@ -742,7 +775,7 @@ static struct attribute_group reipl_ccw_attr_group_lpar = {
static void reipl_get_ascii_nss_name(char *dst,
struct ipl_parameter_block *ipb)
{
- memcpy(dst, ipb->ipl_info.ccw.nss_name, NSS_NAME_SIZE);
+ memcpy(dst, ipb->ccw.nss_name, NSS_NAME_SIZE);
EBCASC(dst, NSS_NAME_SIZE);
dst[NSS_NAME_SIZE] = 0;
}
@@ -770,16 +803,14 @@ static ssize_t reipl_nss_name_store(struct kobject *kobj,
if (nss_len > NSS_NAME_SIZE)
return -EINVAL;
- memset(reipl_block_nss->ipl_info.ccw.nss_name, 0x40, NSS_NAME_SIZE);
+ memset(reipl_block_nss->ccw.nss_name, 0x40, NSS_NAME_SIZE);
if (nss_len > 0) {
- reipl_block_nss->ipl_info.ccw.vm_flags |=
- DIAG308_VM_FLAGS_NSS_VALID;
- memcpy(reipl_block_nss->ipl_info.ccw.nss_name, buf, nss_len);
- ASCEBC(reipl_block_nss->ipl_info.ccw.nss_name, nss_len);
- EBC_TOUPPER(reipl_block_nss->ipl_info.ccw.nss_name, nss_len);
+ reipl_block_nss->ccw.vm_flags |= IPL_PB0_CCW_VM_FLAG_NSS;
+ memcpy(reipl_block_nss->ccw.nss_name, buf, nss_len);
+ ASCEBC(reipl_block_nss->ccw.nss_name, nss_len);
+ EBC_TOUPPER(reipl_block_nss->ccw.nss_name, nss_len);
} else {
- reipl_block_nss->ipl_info.ccw.vm_flags &=
- ~DIAG308_VM_FLAGS_NSS_VALID;
+ reipl_block_nss->ccw.vm_flags &= ~IPL_PB0_CCW_VM_FLAG_NSS;
}
return len;
@@ -866,15 +897,21 @@ static void __reipl_run(void *unused)
{
switch (reipl_type) {
case IPL_TYPE_CCW:
+ uv_set_shared(__pa(reipl_block_ccw));
diag308(DIAG308_SET, reipl_block_ccw);
+ uv_remove_shared(__pa(reipl_block_ccw));
diag308(DIAG308_LOAD_CLEAR, NULL);
break;
case IPL_TYPE_FCP:
+ uv_set_shared(__pa(reipl_block_fcp));
diag308(DIAG308_SET, reipl_block_fcp);
+ uv_remove_shared(__pa(reipl_block_fcp));
diag308(DIAG308_LOAD_CLEAR, NULL);
break;
case IPL_TYPE_NSS:
+ uv_set_shared(__pa(reipl_block_nss));
diag308(DIAG308_SET, reipl_block_nss);
+ uv_remove_shared(__pa(reipl_block_nss));
diag308(DIAG308_LOAD_CLEAR, NULL);
break;
case IPL_TYPE_UNKNOWN:
@@ -883,7 +920,7 @@ static void __reipl_run(void *unused)
case IPL_TYPE_FCP_DUMP:
break;
}
- disabled_wait((unsigned long) __builtin_return_address(0));
+ disabled_wait();
}
static void reipl_run(struct shutdown_trigger *trigger)
@@ -893,10 +930,10 @@ static void reipl_run(struct shutdown_trigger *trigger)
static void reipl_block_ccw_init(struct ipl_parameter_block *ipb)
{
- ipb->hdr.len = IPL_PARM_BLK_CCW_LEN;
+ ipb->hdr.len = IPL_BP_CCW_LEN;
ipb->hdr.version = IPL_PARM_BLOCK_VERSION;
- ipb->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN;
- ipb->hdr.pbt = DIAG308_IPL_TYPE_CCW;
+ ipb->pb0_hdr.len = IPL_BP0_CCW_LEN;
+ ipb->pb0_hdr.pbt = IPL_PBT_CCW;
}
static void reipl_block_ccw_fill_parms(struct ipl_parameter_block *ipb)
@@ -904,21 +941,20 @@ static void reipl_block_ccw_fill_parms(struct ipl_parameter_block *ipb)
/* LOADPARM */
/* check if read scp info worked and set loadparm */
if (sclp_ipl_info.is_valid)
- memcpy(ipb->hdr.loadparm, &sclp_ipl_info.loadparm, LOADPARM_LEN);
+ memcpy(ipb->ccw.loadparm, &sclp_ipl_info.loadparm, LOADPARM_LEN);
else
/* read scp info failed: set empty loadparm (EBCDIC blanks) */
- memset(ipb->hdr.loadparm, 0x40, LOADPARM_LEN);
- ipb->hdr.flags = DIAG308_FLAGS_LP_VALID;
+ memset(ipb->ccw.loadparm, 0x40, LOADPARM_LEN);
+ ipb->ccw.flags = IPL_PB0_FLAG_LOADPARM;
/* VM PARM */
if (MACHINE_IS_VM && ipl_block_valid &&
- (ipl_block.ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID)) {
+ (ipl_block.ccw.vm_flags & IPL_PB0_CCW_VM_FLAG_VP)) {
- ipb->ipl_info.ccw.vm_flags |= DIAG308_VM_FLAGS_VP_VALID;
- ipb->ipl_info.ccw.vm_parm_len =
- ipl_block.ipl_info.ccw.vm_parm_len;
- memcpy(ipb->ipl_info.ccw.vm_parm,
- ipl_block.ipl_info.ccw.vm_parm, DIAG308_VMPARM_SIZE);
+ ipb->ccw.vm_flags |= IPL_PB0_CCW_VM_FLAG_VP;
+ ipb->ccw.vm_parm_len = ipl_block.ccw.vm_parm_len;
+ memcpy(ipb->ccw.vm_parm,
+ ipl_block.ccw.vm_parm, DIAG308_VMPARM_SIZE);
}
}
@@ -958,8 +994,8 @@ static int __init reipl_ccw_init(void)
reipl_block_ccw_init(reipl_block_ccw);
if (ipl_info.type == IPL_TYPE_CCW) {
- reipl_block_ccw->ipl_info.ccw.ssid = ipl_block.ipl_info.ccw.ssid;
- reipl_block_ccw->ipl_info.ccw.devno = ipl_block.ipl_info.ccw.devno;
+ reipl_block_ccw->ccw.ssid = ipl_block.ccw.ssid;
+ reipl_block_ccw->ccw.devno = ipl_block.ccw.devno;
reipl_block_ccw_fill_parms(reipl_block_ccw);
}
@@ -997,14 +1033,14 @@ static int __init reipl_fcp_init(void)
* is invalid in the SCSI IPL parameter block, so take it
* always from sclp_ipl_info.
*/
- memcpy(reipl_block_fcp->hdr.loadparm, sclp_ipl_info.loadparm,
+ memcpy(reipl_block_fcp->fcp.loadparm, sclp_ipl_info.loadparm,
LOADPARM_LEN);
} else {
- reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN;
+ reipl_block_fcp->hdr.len = IPL_BP_FCP_LEN;
reipl_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION;
- reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN;
- reipl_block_fcp->hdr.pbt = DIAG308_IPL_TYPE_FCP;
- reipl_block_fcp->ipl_info.fcp.opt = DIAG308_IPL_OPT_IPL;
+ reipl_block_fcp->fcp.len = IPL_BP0_FCP_LEN;
+ reipl_block_fcp->fcp.pbt = IPL_PBT_FCP;
+ reipl_block_fcp->fcp.opt = IPL_PB0_FCP_OPT_IPL;
}
reipl_capabilities |= IPL_TYPE_FCP;
return 0;
@@ -1022,10 +1058,10 @@ static int __init reipl_type_init(void)
/*
* If we have an OS info reipl block, this will be used
*/
- if (reipl_block->hdr.pbt == DIAG308_IPL_TYPE_FCP) {
+ if (reipl_block->pb0_hdr.pbt == IPL_PBT_FCP) {
memcpy(reipl_block_fcp, reipl_block, size);
reipl_type = IPL_TYPE_FCP;
- } else if (reipl_block->hdr.pbt == DIAG308_IPL_TYPE_CCW) {
+ } else if (reipl_block->pb0_hdr.pbt == IPL_PBT_CCW) {
memcpy(reipl_block_ccw, reipl_block, size);
reipl_type = IPL_TYPE_CCW;
}
@@ -1070,15 +1106,15 @@ static struct shutdown_action __refdata reipl_action = {
/* FCP dump device attributes */
DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%llx\n",
- dump_block_fcp->ipl_info.fcp.wwpn);
+ dump_block_fcp->fcp.wwpn);
DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%llx\n",
- dump_block_fcp->ipl_info.fcp.lun);
+ dump_block_fcp->fcp.lun);
DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
- dump_block_fcp->ipl_info.fcp.bootprog);
+ dump_block_fcp->fcp.bootprog);
DEFINE_IPL_ATTR_RW(dump_fcp, br_lba, "%lld\n", "%lld\n",
- dump_block_fcp->ipl_info.fcp.br_lba);
+ dump_block_fcp->fcp.br_lba);
DEFINE_IPL_ATTR_RW(dump_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
- dump_block_fcp->ipl_info.fcp.devno);
+ dump_block_fcp->fcp.devno);
static struct attribute *dump_fcp_attrs[] = {
&sys_dump_fcp_device_attr.attr,
@@ -1095,7 +1131,7 @@ static struct attribute_group dump_fcp_attr_group = {
};
/* CCW dump device attributes */
-DEFINE_IPL_CCW_ATTR_RW(dump_ccw, device, dump_block_ccw->ipl_info.ccw);
+DEFINE_IPL_CCW_ATTR_RW(dump_ccw, device, dump_block_ccw->ccw);
static struct attribute *dump_ccw_attrs[] = {
&sys_dump_ccw_device_attr.attr,
@@ -1145,7 +1181,9 @@ static struct kset *dump_kset;
static void diag308_dump(void *dump_block)
{
+ uv_set_shared(__pa(dump_block));
diag308(DIAG308_SET, dump_block);
+ uv_remove_shared(__pa(dump_block));
while (1) {
if (diag308(DIAG308_LOAD_NORMAL_DUMP, NULL) != 0x302)
break;
@@ -1187,10 +1225,10 @@ static int __init dump_ccw_init(void)
free_page((unsigned long)dump_block_ccw);
return rc;
}
- dump_block_ccw->hdr.len = IPL_PARM_BLK_CCW_LEN;
+ dump_block_ccw->hdr.len = IPL_BP_CCW_LEN;
dump_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION;
- dump_block_ccw->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN;
- dump_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW;
+ dump_block_ccw->ccw.len = IPL_BP0_CCW_LEN;
+ dump_block_ccw->ccw.pbt = IPL_PBT_CCW;
dump_capabilities |= DUMP_TYPE_CCW;
return 0;
}
@@ -1209,11 +1247,11 @@ static int __init dump_fcp_init(void)
free_page((unsigned long)dump_block_fcp);
return rc;
}
- dump_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN;
+ dump_block_fcp->hdr.len = IPL_BP_FCP_LEN;
dump_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION;
- dump_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN;
- dump_block_fcp->hdr.pbt = DIAG308_IPL_TYPE_FCP;
- dump_block_fcp->ipl_info.fcp.opt = DIAG308_IPL_OPT_DUMP;
+ dump_block_fcp->fcp.len = IPL_BP0_FCP_LEN;
+ dump_block_fcp->fcp.pbt = IPL_PBT_FCP;
+ dump_block_fcp->fcp.opt = IPL_PB0_FCP_OPT_DUMP;
dump_capabilities |= DUMP_TYPE_FCP;
return 0;
}
@@ -1337,7 +1375,7 @@ static void stop_run(struct shutdown_trigger *trigger)
{
if (strcmp(trigger->name, ON_PANIC_STR) == 0 ||
strcmp(trigger->name, ON_RESTART_STR) == 0)
- disabled_wait((unsigned long) __builtin_return_address(0));
+ disabled_wait();
smp_stop_cpu();
}
@@ -1572,7 +1610,7 @@ static int __init s390_ipl_init(void)
* READ SCP info provides the correct value.
*/
if (memcmp(sclp_ipl_info.loadparm, str, sizeof(str)) == 0 && ipl_block_valid)
- memcpy(sclp_ipl_info.loadparm, ipl_block.hdr.loadparm, LOADPARM_LEN);
+ memcpy(sclp_ipl_info.loadparm, ipl_block.ccw.loadparm, LOADPARM_LEN);
shutdown_actions_init();
shutdown_triggers_init();
return 0;
@@ -1657,15 +1695,15 @@ void __init setup_ipl(void)
ipl_info.type = get_ipl_type();
switch (ipl_info.type) {
case IPL_TYPE_CCW:
- ipl_info.data.ccw.dev_id.ssid = ipl_block.ipl_info.ccw.ssid;
- ipl_info.data.ccw.dev_id.devno = ipl_block.ipl_info.ccw.devno;
+ ipl_info.data.ccw.dev_id.ssid = ipl_block.ccw.ssid;
+ ipl_info.data.ccw.dev_id.devno = ipl_block.ccw.devno;
break;
case IPL_TYPE_FCP:
case IPL_TYPE_FCP_DUMP:
ipl_info.data.fcp.dev_id.ssid = 0;
- ipl_info.data.fcp.dev_id.devno = ipl_block.ipl_info.fcp.devno;
- ipl_info.data.fcp.wwpn = ipl_block.ipl_info.fcp.wwpn;
- ipl_info.data.fcp.lun = ipl_block.ipl_info.fcp.lun;
+ ipl_info.data.fcp.dev_id.devno = ipl_block.fcp.devno;
+ ipl_info.data.fcp.wwpn = ipl_block.fcp.wwpn;
+ ipl_info.data.fcp.lun = ipl_block.fcp.lun;
break;
case IPL_TYPE_NSS:
case IPL_TYPE_UNKNOWN:
@@ -1675,14 +1713,6 @@ void __init setup_ipl(void)
atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
}
-void __init ipl_store_parameters(void)
-{
- if (early_ipl_block_valid) {
- memcpy(&ipl_block, &early_ipl_block, sizeof(ipl_block));
- ipl_block_valid = 1;
- }
-}
-
void s390_reset_system(void)
{
/* Disable prefixing */
@@ -1690,5 +1720,139 @@ void s390_reset_system(void)
/* Disable lowcore protection */
__ctl_clear_bit(0, 28);
- diag308_reset();
+ diag_dma_ops.diag308_reset();
+}
+
+#ifdef CONFIG_KEXEC_FILE
+
+int ipl_report_add_component(struct ipl_report *report, struct kexec_buf *kbuf,
+ unsigned char flags, unsigned short cert)
+{
+ struct ipl_report_component *comp;
+
+ comp = vzalloc(sizeof(*comp));
+ if (!comp)
+ return -ENOMEM;
+ list_add_tail(&comp->list, &report->components);
+
+ comp->entry.addr = kbuf->mem;
+ comp->entry.len = kbuf->memsz;
+ comp->entry.flags = flags;
+ comp->entry.certificate_index = cert;
+
+ report->size += sizeof(comp->entry);
+
+ return 0;
+}
+
+int ipl_report_add_certificate(struct ipl_report *report, void *key,
+ unsigned long addr, unsigned long len)
+{
+ struct ipl_report_certificate *cert;
+
+ cert = vzalloc(sizeof(*cert));
+ if (!cert)
+ return -ENOMEM;
+ list_add_tail(&cert->list, &report->certificates);
+
+ cert->entry.addr = addr;
+ cert->entry.len = len;
+ cert->key = key;
+
+ report->size += sizeof(cert->entry);
+ report->size += cert->entry.len;
+
+ return 0;
+}
+
+struct ipl_report *ipl_report_init(struct ipl_parameter_block *ipib)
+{
+ struct ipl_report *report;
+
+ report = vzalloc(sizeof(*report));
+ if (!report)
+ return ERR_PTR(-ENOMEM);
+
+ report->ipib = ipib;
+ INIT_LIST_HEAD(&report->components);
+ INIT_LIST_HEAD(&report->certificates);
+
+ report->size = ALIGN(ipib->hdr.len, 8);
+ report->size += sizeof(struct ipl_rl_hdr);
+ report->size += sizeof(struct ipl_rb_components);
+ report->size += sizeof(struct ipl_rb_certificates);
+
+ return report;
+}
+
+void *ipl_report_finish(struct ipl_report *report)
+{
+ struct ipl_report_certificate *cert;
+ struct ipl_report_component *comp;
+ struct ipl_rb_certificates *certs;
+ struct ipl_parameter_block *ipib;
+ struct ipl_rb_components *comps;
+ struct ipl_rl_hdr *rl_hdr;
+ void *buf, *ptr;
+
+ buf = vzalloc(report->size);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+ ptr = buf;
+
+ memcpy(ptr, report->ipib, report->ipib->hdr.len);
+ ipib = ptr;
+ if (ipl_secure_flag)
+ ipib->hdr.flags |= IPL_PL_FLAG_SIPL;
+ ipib->hdr.flags |= IPL_PL_FLAG_IPLSR;
+ ptr += report->ipib->hdr.len;
+ ptr = PTR_ALIGN(ptr, 8);
+
+ rl_hdr = ptr;
+ ptr += sizeof(*rl_hdr);
+
+ comps = ptr;
+ comps->rbt = IPL_RBT_COMPONENTS;
+ ptr += sizeof(*comps);
+ list_for_each_entry(comp, &report->components, list) {
+ memcpy(ptr, &comp->entry, sizeof(comp->entry));
+ ptr += sizeof(comp->entry);
+ }
+ comps->len = ptr - (void *)comps;
+
+ certs = ptr;
+ certs->rbt = IPL_RBT_CERTIFICATES;
+ ptr += sizeof(*certs);
+ list_for_each_entry(cert, &report->certificates, list) {
+ memcpy(ptr, &cert->entry, sizeof(cert->entry));
+ ptr += sizeof(cert->entry);
+ }
+ certs->len = ptr - (void *)certs;
+ rl_hdr->len = ptr - (void *)rl_hdr;
+
+ list_for_each_entry(cert, &report->certificates, list) {
+ memcpy(ptr, cert->key, cert->entry.len);
+ ptr += cert->entry.len;
+ }
+
+ BUG_ON(ptr > buf + report->size);
+ return buf;
+}
+
+int ipl_report_free(struct ipl_report *report)
+{
+ struct ipl_report_component *comp, *ncomp;
+ struct ipl_report_certificate *cert, *ncert;
+
+ list_for_each_entry_safe(comp, ncomp, &report->components, list)
+ vfree(comp);
+
+ list_for_each_entry_safe(cert, ncert, &report->certificates, list)
+ vfree(cert);
+
+ vfree(report);
+
+ return 0;
}
+
+#endif
diff --git a/arch/s390/kernel/ipl_vmparm.c b/arch/s390/kernel/ipl_vmparm.c
index 411838c0a0af..af43535a976d 100644
--- a/arch/s390/kernel/ipl_vmparm.c
+++ b/arch/s390/kernel/ipl_vmparm.c
@@ -11,11 +11,11 @@ size_t ipl_block_get_ascii_vmparm(char *dest, size_t size,
char has_lowercase = 0;
len = 0;
- if ((ipb->ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID) &&
- (ipb->ipl_info.ccw.vm_parm_len > 0)) {
+ if ((ipb->ccw.vm_flags & IPL_PB0_CCW_VM_FLAG_VP) &&
+ (ipb->ccw.vm_parm_len > 0)) {
- len = min_t(size_t, size - 1, ipb->ipl_info.ccw.vm_parm_len);
- memcpy(dest, ipb->ipl_info.ccw.vm_parm, len);
+ len = min_t(size_t, size - 1, ipb->ccw.vm_parm_len);
+ memcpy(dest, ipb->ccw.vm_parm, len);
/* If at least one character is lowercase, we assume mixed
* case; otherwise we convert everything to lowercase.
*/
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 0e8d68bac82c..8371855042dc 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -26,6 +26,7 @@
#include <asm/lowcore.h>
#include <asm/irq.h>
#include <asm/hw_irq.h>
+#include <asm/stacktrace.h>
#include "entry.h"
DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
@@ -73,7 +74,6 @@ static const struct irq_class irqclass_sub_desc[] = {
{.irq = IRQEXT_CMC, .name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"},
{.irq = IRQEXT_FTP, .name = "FTP", .desc = "[EXT] HMC FTP Service"},
{.irq = IRQIO_CIO, .name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"},
- {.irq = IRQIO_QAI, .name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"},
{.irq = IRQIO_DAS, .name = "DAS", .desc = "[I/O] DASD"},
{.irq = IRQIO_C15, .name = "C15", .desc = "[I/O] 3215"},
{.irq = IRQIO_C70, .name = "C70", .desc = "[I/O] 3270"},
@@ -81,13 +81,16 @@ static const struct irq_class irqclass_sub_desc[] = {
{.irq = IRQIO_VMR, .name = "VMR", .desc = "[I/O] Unit Record Devices"},
{.irq = IRQIO_LCS, .name = "LCS", .desc = "[I/O] LCS"},
{.irq = IRQIO_CTC, .name = "CTC", .desc = "[I/O] CTC"},
- {.irq = IRQIO_APB, .name = "APB", .desc = "[I/O] AP Bus"},
{.irq = IRQIO_ADM, .name = "ADM", .desc = "[I/O] EADM Subchannel"},
{.irq = IRQIO_CSC, .name = "CSC", .desc = "[I/O] CHSC Subchannel"},
- {.irq = IRQIO_PCI, .name = "PCI", .desc = "[I/O] PCI Interrupt" },
- {.irq = IRQIO_MSI, .name = "MSI", .desc = "[I/O] MSI Interrupt" },
{.irq = IRQIO_VIR, .name = "VIR", .desc = "[I/O] Virtual I/O Devices"},
- {.irq = IRQIO_VAI, .name = "VAI", .desc = "[I/O] Virtual I/O Devices AI"},
+ {.irq = IRQIO_QAI, .name = "QAI", .desc = "[AIO] QDIO Adapter Interrupt"},
+ {.irq = IRQIO_APB, .name = "APB", .desc = "[AIO] AP Bus"},
+ {.irq = IRQIO_PCF, .name = "PCF", .desc = "[AIO] PCI Floating Interrupt"},
+ {.irq = IRQIO_PCD, .name = "PCD", .desc = "[AIO] PCI Directed Interrupt"},
+ {.irq = IRQIO_MSI, .name = "MSI", .desc = "[AIO] MSI Interrupt"},
+ {.irq = IRQIO_VAI, .name = "VAI", .desc = "[AIO] Virtual I/O Devices AI"},
+ {.irq = IRQIO_GAL, .name = "GAL", .desc = "[AIO] GIB Alert"},
{.irq = NMI_NMI, .name = "NMI", .desc = "[NMI] Machine Check"},
{.irq = CPU_RST, .name = "RST", .desc = "[CPU] CPU Restart"},
};
@@ -115,6 +118,34 @@ void do_IRQ(struct pt_regs *regs, int irq)
set_irq_regs(old_regs);
}
+static void show_msi_interrupt(struct seq_file *p, int irq)
+{
+ struct irq_desc *desc;
+ unsigned long flags;
+ int cpu;
+
+ irq_lock_sparse();
+ desc = irq_to_desc(irq);
+ if (!desc)
+ goto out;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ seq_printf(p, "%3d: ", irq);
+ for_each_online_cpu(cpu)
+ seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
+
+ if (desc->irq_data.chip)
+ seq_printf(p, " %8s", desc->irq_data.chip->name);
+
+ if (desc->action)
+ seq_printf(p, " %s", desc->action->name);
+
+ seq_putc(p, '\n');
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+out:
+ irq_unlock_sparse();
+}
+
/*
* show_interrupts is needed by /proc/interrupts.
*/
@@ -127,7 +158,7 @@ int show_interrupts(struct seq_file *p, void *v)
if (index == 0) {
seq_puts(p, " ");
for_each_online_cpu(cpu)
- seq_printf(p, "CPU%d ", cpu);
+ seq_printf(p, "CPU%-8d", cpu);
seq_putc(p, '\n');
}
if (index < NR_IRQS_BASE) {
@@ -138,9 +169,10 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
goto out;
}
- if (index > NR_IRQS_BASE)
+ if (index < nr_irqs) {
+ show_msi_interrupt(p, index);
goto out;
-
+ }
for (index = 0; index < NR_ARCH_IRQS; index++) {
seq_printf(p, "%s: ", irqclass_sub_desc[index].name);
irq = irqclass_sub_desc[index].irq;
diff --git a/arch/s390/kernel/kexec_elf.c b/arch/s390/kernel/kexec_elf.c
index 5a286b012043..6d0635ceddd0 100644
--- a/arch/s390/kernel/kexec_elf.c
+++ b/arch/s390/kernel/kexec_elf.c
@@ -10,19 +10,26 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/kexec.h>
+#include <asm/ipl.h>
#include <asm/setup.h>
-static int kexec_file_add_elf_kernel(struct kimage *image,
- struct s390_load_data *data,
- char *kernel, unsigned long kernel_len)
+static int kexec_file_add_kernel_elf(struct kimage *image,
+ struct s390_load_data *data)
{
struct kexec_buf buf;
const Elf_Ehdr *ehdr;
const Elf_Phdr *phdr;
+ Elf_Addr entry;
+ void *kernel;
int i, ret;
+ kernel = image->kernel_buf;
ehdr = (Elf_Ehdr *)kernel;
buf.image = image;
+ if (image->type == KEXEC_TYPE_CRASH)
+ entry = STARTUP_KDUMP_OFFSET;
+ else
+ entry = ehdr->e_entry;
phdr = (void *)ehdr + ehdr->e_phoff;
for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
@@ -33,30 +40,27 @@ static int kexec_file_add_elf_kernel(struct kimage *image,
buf.bufsz = phdr->p_filesz;
buf.mem = ALIGN(phdr->p_paddr, phdr->p_align);
+ if (image->type == KEXEC_TYPE_CRASH)
+ buf.mem += crashk_res.start;
buf.memsz = phdr->p_memsz;
+ data->memsz = ALIGN(data->memsz, phdr->p_align) + buf.memsz;
- if (phdr->p_paddr == 0) {
+ if (entry - phdr->p_paddr < phdr->p_memsz) {
data->kernel_buf = buf.buffer;
- data->memsz += STARTUP_NORMAL_OFFSET;
-
- buf.buffer += STARTUP_NORMAL_OFFSET;
- buf.bufsz -= STARTUP_NORMAL_OFFSET;
-
- buf.mem += STARTUP_NORMAL_OFFSET;
- buf.memsz -= STARTUP_NORMAL_OFFSET;
+ data->kernel_mem = buf.mem;
+ data->parm = buf.buffer + PARMAREA;
}
- if (image->type == KEXEC_TYPE_CRASH)
- buf.mem += crashk_res.start;
-
+ ipl_report_add_component(data->report, &buf,
+ IPL_RB_COMPONENT_FLAG_SIGNED |
+ IPL_RB_COMPONENT_FLAG_VERIFIED,
+ IPL_RB_CERT_UNKNOWN);
ret = kexec_add_buffer(&buf);
if (ret)
return ret;
-
- data->memsz += buf.memsz;
}
- return 0;
+ return data->memsz ? 0 : -EINVAL;
}
static void *s390_elf_load(struct kimage *image,
@@ -64,11 +68,10 @@ static void *s390_elf_load(struct kimage *image,
char *initrd, unsigned long initrd_len,
char *cmdline, unsigned long cmdline_len)
{
- struct s390_load_data data = {0};
const Elf_Ehdr *ehdr;
const Elf_Phdr *phdr;
size_t size;
- int i, ret;
+ int i;
/* image->fobs->probe already checked for valid ELF magic number. */
ehdr = (Elf_Ehdr *)kernel;
@@ -101,24 +104,7 @@ static void *s390_elf_load(struct kimage *image,
if (size > kernel_len)
return ERR_PTR(-EINVAL);
- ret = kexec_file_add_elf_kernel(image, &data, kernel, kernel_len);
- if (ret)
- return ERR_PTR(ret);
-
- if (!data.memsz)
- return ERR_PTR(-EINVAL);
-
- if (initrd) {
- ret = kexec_file_add_initrd(image, &data, initrd, initrd_len);
- if (ret)
- return ERR_PTR(ret);
- }
-
- ret = kexec_file_add_purgatory(image, &data);
- if (ret)
- return ERR_PTR(ret);
-
- return kexec_file_update_kernel(image, &data);
+ return kexec_file_add_components(image, kexec_file_add_kernel_elf);
}
static int s390_elf_probe(const char *buf, unsigned long len)
@@ -144,4 +130,7 @@ static int s390_elf_probe(const char *buf, unsigned long len)
const struct kexec_file_ops s390_kexec_elf_ops = {
.probe = s390_elf_probe,
.load = s390_elf_load,
+#ifdef CONFIG_KEXEC_VERIFY_SIG
+ .verify_sig = s390_verify_sig,
+#endif /* CONFIG_KEXEC_VERIFY_SIG */
};
diff --git a/arch/s390/kernel/kexec_image.c b/arch/s390/kernel/kexec_image.c
index 3800852595e8..58318bf89fd9 100644
--- a/arch/s390/kernel/kexec_image.c
+++ b/arch/s390/kernel/kexec_image.c
@@ -10,31 +10,34 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/kexec.h>
+#include <asm/ipl.h>
#include <asm/setup.h>
-static int kexec_file_add_image_kernel(struct kimage *image,
- struct s390_load_data *data,
- char *kernel, unsigned long kernel_len)
+static int kexec_file_add_kernel_image(struct kimage *image,
+ struct s390_load_data *data)
{
struct kexec_buf buf;
- int ret;
buf.image = image;
- buf.buffer = kernel + STARTUP_NORMAL_OFFSET;
- buf.bufsz = kernel_len - STARTUP_NORMAL_OFFSET;
+ buf.buffer = image->kernel_buf;
+ buf.bufsz = image->kernel_buf_len;
- buf.mem = STARTUP_NORMAL_OFFSET;
+ buf.mem = 0;
if (image->type == KEXEC_TYPE_CRASH)
buf.mem += crashk_res.start;
buf.memsz = buf.bufsz;
- ret = kexec_add_buffer(&buf);
+ data->kernel_buf = image->kernel_buf;
+ data->kernel_mem = buf.mem;
+ data->parm = image->kernel_buf + PARMAREA;
+ data->memsz += buf.memsz;
- data->kernel_buf = kernel;
- data->memsz += buf.memsz + STARTUP_NORMAL_OFFSET;
-
- return ret;
+ ipl_report_add_component(data->report, &buf,
+ IPL_RB_COMPONENT_FLAG_SIGNED |
+ IPL_RB_COMPONENT_FLAG_VERIFIED,
+ IPL_RB_CERT_UNKNOWN);
+ return kexec_add_buffer(&buf);
}
static void *s390_image_load(struct kimage *image,
@@ -42,24 +45,7 @@ static void *s390_image_load(struct kimage *image,
char *initrd, unsigned long initrd_len,
char *cmdline, unsigned long cmdline_len)
{
- struct s390_load_data data = {0};
- int ret;
-
- ret = kexec_file_add_image_kernel(image, &data, kernel, kernel_len);
- if (ret)
- return ERR_PTR(ret);
-
- if (initrd) {
- ret = kexec_file_add_initrd(image, &data, initrd, initrd_len);
- if (ret)
- return ERR_PTR(ret);
- }
-
- ret = kexec_file_add_purgatory(image, &data);
- if (ret)
- return ERR_PTR(ret);
-
- return kexec_file_update_kernel(image, &data);
+ return kexec_file_add_components(image, kexec_file_add_kernel_image);
}
static int s390_image_probe(const char *buf, unsigned long len)
@@ -73,4 +59,7 @@ static int s390_image_probe(const char *buf, unsigned long len)
const struct kexec_file_ops s390_kexec_image_ops = {
.probe = s390_image_probe,
.load = s390_image_load,
+#ifdef CONFIG_KEXEC_VERIFY_SIG
+ .verify_sig = s390_verify_sig,
+#endif /* CONFIG_KEXEC_VERIFY_SIG */
};
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 7c0a095e9c5f..6f1388391620 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -27,29 +27,30 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
struct kretprobe_blackpoint kretprobe_blacklist[] = { };
-DEFINE_INSN_CACHE_OPS(dmainsn);
+DEFINE_INSN_CACHE_OPS(s390_insn);
-static void *alloc_dmainsn_page(void)
-{
- void *page;
+static int insn_page_in_use;
+static char insn_page[PAGE_SIZE] __aligned(PAGE_SIZE);
- page = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
- if (page)
- set_memory_x((unsigned long) page, 1);
- return page;
+static void *alloc_s390_insn_page(void)
+{
+ if (xchg(&insn_page_in_use, 1) == 1)
+ return NULL;
+ set_memory_x((unsigned long) &insn_page, 1);
+ return &insn_page;
}
-static void free_dmainsn_page(void *page)
+static void free_s390_insn_page(void *page)
{
set_memory_nx((unsigned long) page, 1);
- free_page((unsigned long)page);
+ xchg(&insn_page_in_use, 0);
}
-struct kprobe_insn_cache kprobe_dmainsn_slots = {
- .mutex = __MUTEX_INITIALIZER(kprobe_dmainsn_slots.mutex),
- .alloc = alloc_dmainsn_page,
- .free = free_dmainsn_page,
- .pages = LIST_HEAD_INIT(kprobe_dmainsn_slots.pages),
+struct kprobe_insn_cache kprobe_s390_insn_slots = {
+ .mutex = __MUTEX_INITIALIZER(kprobe_s390_insn_slots.mutex),
+ .alloc = alloc_s390_insn_page,
+ .free = free_s390_insn_page,
+ .pages = LIST_HEAD_INIT(kprobe_s390_insn_slots.pages),
.insn_size = MAX_INSN_SIZE,
};
@@ -102,7 +103,7 @@ static int s390_get_insn_slot(struct kprobe *p)
*/
p->ainsn.insn = NULL;
if (is_kernel_addr(p->addr))
- p->ainsn.insn = get_dmainsn_slot();
+ p->ainsn.insn = get_s390_insn_slot();
else if (is_module_addr(p->addr))
p->ainsn.insn = get_insn_slot();
return p->ainsn.insn ? 0 : -ENOMEM;
@@ -114,7 +115,7 @@ static void s390_free_insn_slot(struct kprobe *p)
if (!p->ainsn.insn)
return;
if (is_kernel_addr(p->addr))
- free_dmainsn_slot(p->ainsn.insn, 0);
+ free_s390_insn_slot(p->ainsn.insn, 0);
else
free_insn_slot(p->ainsn.insn, 0);
p->ainsn.insn = NULL;
@@ -572,7 +573,7 @@ static int kprobe_trap_handler(struct pt_regs *regs, int trapnr)
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
- entry = search_exception_tables(regs->psw.addr);
+ entry = s390_search_extables(regs->psw.addr);
if (entry) {
regs->psw.addr = extable_fixup(entry);
return 1;
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index cb582649aba6..8a1ae140c5e2 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -27,6 +27,7 @@
#include <asm/cacheflush.h>
#include <asm/os_info.h>
#include <asm/set_memory.h>
+#include <asm/stacktrace.h>
#include <asm/switch_to.h>
#include <asm/nmi.h>
@@ -95,7 +96,7 @@ static void __do_machine_kdump(void *image)
start_kdump(1);
/* Die if start_kdump returns */
- disabled_wait((unsigned long) __builtin_return_address(0));
+ disabled_wait();
}
/*
@@ -253,6 +254,9 @@ void arch_crash_save_vmcoreinfo(void)
VMCOREINFO_SYMBOL(high_memory);
VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS);
mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
+ vmcoreinfo_append_str("SDMA=%lx\n", __sdma);
+ vmcoreinfo_append_str("EDMA=%lx\n", __edma);
+ vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
}
void machine_shutdown(void)
@@ -280,7 +284,7 @@ static void __do_machine_kexec(void *data)
(*data_mover)(&image->head, image->start);
/* Die if kexec returns */
- disabled_wait((unsigned long) __builtin_return_address(0));
+ disabled_wait();
}
/*
diff --git a/arch/s390/kernel/machine_kexec_file.c b/arch/s390/kernel/machine_kexec_file.c
index 32023b4f9dc0..fbdd3ea73667 100644
--- a/arch/s390/kernel/machine_kexec_file.c
+++ b/arch/s390/kernel/machine_kexec_file.c
@@ -8,7 +8,12 @@
*/
#include <linux/elf.h>
+#include <linux/errno.h>
#include <linux/kexec.h>
+#include <linux/module.h>
+#include <linux/verification.h>
+#include <asm/boot_data.h>
+#include <asm/ipl.h>
#include <asm/setup.h>
const struct kexec_file_ops * const kexec_file_loaders[] = {
@@ -17,38 +22,78 @@ const struct kexec_file_ops * const kexec_file_loaders[] = {
NULL,
};
-int *kexec_file_update_kernel(struct kimage *image,
- struct s390_load_data *data)
-{
- unsigned long *loc;
-
- if (image->cmdline_buf_len >= ARCH_COMMAND_LINE_SIZE)
- return ERR_PTR(-EINVAL);
-
- if (image->cmdline_buf_len)
- memcpy(data->kernel_buf + COMMAND_LINE_OFFSET,
- image->cmdline_buf, image->cmdline_buf_len);
-
- if (image->type == KEXEC_TYPE_CRASH) {
- loc = (unsigned long *)(data->kernel_buf + OLDMEM_BASE_OFFSET);
- *loc = crashk_res.start;
-
- loc = (unsigned long *)(data->kernel_buf + OLDMEM_SIZE_OFFSET);
- *loc = crashk_res.end - crashk_res.start + 1;
- }
+#ifdef CONFIG_KEXEC_VERIFY_SIG
+/*
+ * Module signature information block.
+ *
+ * The constituents of the signature section are, in order:
+ *
+ * - Signer's name
+ * - Key identifier
+ * - Signature data
+ * - Information block
+ */
+struct module_signature {
+ u8 algo; /* Public-key crypto algorithm [0] */
+ u8 hash; /* Digest algorithm [0] */
+ u8 id_type; /* Key identifier type [PKEY_ID_PKCS7] */
+ u8 signer_len; /* Length of signer's name [0] */
+ u8 key_id_len; /* Length of key identifier [0] */
+ u8 __pad[3];
+ __be32 sig_len; /* Length of signature data */
+};
- if (image->initrd_buf) {
- loc = (unsigned long *)(data->kernel_buf + INITRD_START_OFFSET);
- *loc = data->initrd_load_addr;
+#define PKEY_ID_PKCS7 2
- loc = (unsigned long *)(data->kernel_buf + INITRD_SIZE_OFFSET);
- *loc = image->initrd_buf_len;
+int s390_verify_sig(const char *kernel, unsigned long kernel_len)
+{
+ const unsigned long marker_len = sizeof(MODULE_SIG_STRING) - 1;
+ struct module_signature *ms;
+ unsigned long sig_len;
+
+ /* Skip signature verification when not secure IPLed. */
+ if (!ipl_secure_flag)
+ return 0;
+
+ if (marker_len > kernel_len)
+ return -EKEYREJECTED;
+
+ if (memcmp(kernel + kernel_len - marker_len, MODULE_SIG_STRING,
+ marker_len))
+ return -EKEYREJECTED;
+ kernel_len -= marker_len;
+
+ ms = (void *)kernel + kernel_len - sizeof(*ms);
+ kernel_len -= sizeof(*ms);
+
+ sig_len = be32_to_cpu(ms->sig_len);
+ if (sig_len >= kernel_len)
+ return -EKEYREJECTED;
+ kernel_len -= sig_len;
+
+ if (ms->id_type != PKEY_ID_PKCS7)
+ return -EKEYREJECTED;
+
+ if (ms->algo != 0 ||
+ ms->hash != 0 ||
+ ms->signer_len != 0 ||
+ ms->key_id_len != 0 ||
+ ms->__pad[0] != 0 ||
+ ms->__pad[1] != 0 ||
+ ms->__pad[2] != 0) {
+ return -EBADMSG;
}
- return NULL;
+ return verify_pkcs7_signature(kernel, kernel_len,
+ kernel + kernel_len, sig_len,
+ VERIFY_USE_PLATFORM_KEYRING,
+ VERIFYING_MODULE_SIGNATURE,
+ NULL, NULL);
}
+#endif /* CONFIG_KEXEC_VERIFY_SIG */
-static int kexec_file_update_purgatory(struct kimage *image)
+static int kexec_file_update_purgatory(struct kimage *image,
+ struct s390_load_data *data)
{
u64 entry, type;
int ret;
@@ -90,7 +135,8 @@ static int kexec_file_update_purgatory(struct kimage *image)
return ret;
}
-int kexec_file_add_purgatory(struct kimage *image, struct s390_load_data *data)
+static int kexec_file_add_purgatory(struct kimage *image,
+ struct s390_load_data *data)
{
struct kexec_buf buf;
int ret;
@@ -105,21 +151,21 @@ int kexec_file_add_purgatory(struct kimage *image, struct s390_load_data *data)
ret = kexec_load_purgatory(image, &buf);
if (ret)
return ret;
+ data->memsz += buf.memsz;
- ret = kexec_file_update_purgatory(image);
- return ret;
+ return kexec_file_update_purgatory(image, data);
}
-int kexec_file_add_initrd(struct kimage *image, struct s390_load_data *data,
- char *initrd, unsigned long initrd_len)
+static int kexec_file_add_initrd(struct kimage *image,
+ struct s390_load_data *data)
{
struct kexec_buf buf;
int ret;
buf.image = image;
- buf.buffer = initrd;
- buf.bufsz = initrd_len;
+ buf.buffer = image->initrd_buf;
+ buf.bufsz = image->initrd_buf_len;
data->memsz = ALIGN(data->memsz, PAGE_SIZE);
buf.mem = data->memsz;
@@ -127,11 +173,115 @@ int kexec_file_add_initrd(struct kimage *image, struct s390_load_data *data,
buf.mem += crashk_res.start;
buf.memsz = buf.bufsz;
- data->initrd_load_addr = buf.mem;
+ data->parm->initrd_start = buf.mem;
+ data->parm->initrd_size = buf.memsz;
data->memsz += buf.memsz;
ret = kexec_add_buffer(&buf);
- return ret;
+ if (ret)
+ return ret;
+
+ return ipl_report_add_component(data->report, &buf, 0, 0);
+}
+
+static int kexec_file_add_ipl_report(struct kimage *image,
+ struct s390_load_data *data)
+{
+ __u32 *lc_ipl_parmblock_ptr;
+ unsigned int len, ncerts;
+ struct kexec_buf buf;
+ unsigned long addr;
+ void *ptr, *end;
+
+ buf.image = image;
+
+ data->memsz = ALIGN(data->memsz, PAGE_SIZE);
+ buf.mem = data->memsz;
+ if (image->type == KEXEC_TYPE_CRASH)
+ buf.mem += crashk_res.start;
+
+ ptr = (void *)ipl_cert_list_addr;
+ end = ptr + ipl_cert_list_size;
+ ncerts = 0;
+ while (ptr < end) {
+ ncerts++;
+ len = *(unsigned int *)ptr;
+ ptr += sizeof(len);
+ ptr += len;
+ }
+
+ addr = data->memsz + data->report->size;
+ addr += ncerts * sizeof(struct ipl_rb_certificate_entry);
+ ptr = (void *)ipl_cert_list_addr;
+ while (ptr < end) {
+ len = *(unsigned int *)ptr;
+ ptr += sizeof(len);
+ ipl_report_add_certificate(data->report, ptr, addr, len);
+ addr += len;
+ ptr += len;
+ }
+
+ buf.buffer = ipl_report_finish(data->report);
+ buf.bufsz = data->report->size;
+ buf.memsz = buf.bufsz;
+
+ data->memsz += buf.memsz;
+
+ lc_ipl_parmblock_ptr =
+ data->kernel_buf + offsetof(struct lowcore, ipl_parmblock_ptr);
+ *lc_ipl_parmblock_ptr = (__u32)buf.mem;
+
+ return kexec_add_buffer(&buf);
+}
+
+void *kexec_file_add_components(struct kimage *image,
+ int (*add_kernel)(struct kimage *image,
+ struct s390_load_data *data))
+{
+ struct s390_load_data data = {0};
+ int ret;
+
+ data.report = ipl_report_init(&ipl_block);
+ if (IS_ERR(data.report))
+ return data.report;
+
+ ret = add_kernel(image, &data);
+ if (ret)
+ goto out;
+
+ if (image->cmdline_buf_len >= ARCH_COMMAND_LINE_SIZE) {
+ ret = -EINVAL;
+ goto out;
+ }
+ memcpy(data.parm->command_line, image->cmdline_buf,
+ image->cmdline_buf_len);
+
+ if (image->type == KEXEC_TYPE_CRASH) {
+ data.parm->oldmem_base = crashk_res.start;
+ data.parm->oldmem_size = crashk_res.end - crashk_res.start + 1;
+ }
+
+ if (image->initrd_buf) {
+ ret = kexec_file_add_initrd(image, &data);
+ if (ret)
+ goto out;
+ }
+
+ ret = kexec_file_add_purgatory(image, &data);
+ if (ret)
+ goto out;
+
+ if (data.kernel_mem == 0) {
+ unsigned long restart_psw = 0x0008000080000000UL;
+ restart_psw += image->start;
+ memcpy(data.kernel_buf, &restart_psw, sizeof(restart_psw));
+ image->start = 0;
+ }
+
+ ret = kexec_file_add_ipl_report(image, &data);
+out:
+ ipl_report_free(data.report);
+ return ERR_PTR(ret);
}
int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
@@ -140,7 +290,7 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
const Elf_Shdr *symtab)
{
Elf_Rela *relas;
- int i;
+ int i, r_type;
relas = (void *)pi->ehdr + relsec->sh_offset;
@@ -174,46 +324,8 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
addr = section->sh_addr + relas[i].r_offset;
- switch (ELF64_R_TYPE(relas[i].r_info)) {
- case R_390_8: /* Direct 8 bit. */
- *(u8 *)loc = val;
- break;
- case R_390_12: /* Direct 12 bit. */
- *(u16 *)loc &= 0xf000;
- *(u16 *)loc |= val & 0xfff;
- break;
- case R_390_16: /* Direct 16 bit. */
- *(u16 *)loc = val;
- break;
- case R_390_20: /* Direct 20 bit. */
- *(u32 *)loc &= 0xf00000ff;
- *(u32 *)loc |= (val & 0xfff) << 16; /* DL */
- *(u32 *)loc |= (val & 0xff000) >> 4; /* DH */
- break;
- case R_390_32: /* Direct 32 bit. */
- *(u32 *)loc = val;
- break;
- case R_390_64: /* Direct 64 bit. */
- *(u64 *)loc = val;
- break;
- case R_390_PC16: /* PC relative 16 bit. */
- *(u16 *)loc = (val - addr);
- break;
- case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */
- *(u16 *)loc = (val - addr) >> 1;
- break;
- case R_390_PC32DBL: /* PC relative 32 bit shifted by 1. */
- *(u32 *)loc = (val - addr) >> 1;
- break;
- case R_390_PC32: /* PC relative 32 bit. */
- *(u32 *)loc = (val - addr);
- break;
- case R_390_PC64: /* PC relative 64 bit. */
- *(u64 *)loc = (val - addr);
- break;
- default:
- break;
- }
+ r_type = ELF64_R_TYPE(relas[i].r_info);
+ arch_kexec_do_relocs(r_type, loc, val, addr);
}
return 0;
}
@@ -225,10 +337,8 @@ int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
* load memory in head.S will be accessed, e.g. to register the next
* command line. If the next kernel were smaller the current kernel
* will panic at load.
- *
- * 0x11000 = sizeof(head.S)
*/
- if (buf_len < 0x11000)
+ if (buf_len < HEAD_END)
return -ENOEXEC;
return kexec_image_probe_default(image, buf, buf_len);
diff --git a/arch/s390/kernel/machine_kexec_reloc.c b/arch/s390/kernel/machine_kexec_reloc.c
new file mode 100644
index 000000000000..1dded39239f8
--- /dev/null
+++ b/arch/s390/kernel/machine_kexec_reloc.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/elf.h>
+
+int arch_kexec_do_relocs(int r_type, void *loc, unsigned long val,
+ unsigned long addr)
+{
+ switch (r_type) {
+ case R_390_NONE:
+ break;
+ case R_390_8: /* Direct 8 bit. */
+ *(u8 *)loc = val;
+ break;
+ case R_390_12: /* Direct 12 bit. */
+ *(u16 *)loc &= 0xf000;
+ *(u16 *)loc |= val & 0xfff;
+ break;
+ case R_390_16: /* Direct 16 bit. */
+ *(u16 *)loc = val;
+ break;
+ case R_390_20: /* Direct 20 bit. */
+ *(u32 *)loc &= 0xf00000ff;
+ *(u32 *)loc |= (val & 0xfff) << 16; /* DL */
+ *(u32 *)loc |= (val & 0xff000) >> 4; /* DH */
+ break;
+ case R_390_32: /* Direct 32 bit. */
+ *(u32 *)loc = val;
+ break;
+ case R_390_64: /* Direct 64 bit. */
+ *(u64 *)loc = val;
+ break;
+ case R_390_PC16: /* PC relative 16 bit. */
+ *(u16 *)loc = (val - addr);
+ break;
+ case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */
+ *(u16 *)loc = (val - addr) >> 1;
+ break;
+ case R_390_PC32DBL: /* PC relative 32 bit shifted by 1. */
+ *(u32 *)loc = (val - addr) >> 1;
+ break;
+ case R_390_PC32: /* PC relative 32 bit. */
+ *(u32 *)loc = (val - addr);
+ break;
+ case R_390_PC64: /* PC relative 64 bit. */
+ *(u64 *)loc = (val - addr);
+ break;
+ case R_390_RELATIVE:
+ *(unsigned long *) loc = val;
+ break;
+ default:
+ return 1;
+ }
+ return 0;
+}
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index e93fbf02490c..9e1660a6b9db 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -20,6 +20,7 @@
ENTRY(ftrace_stub)
BR_EX %r14
+ENDPROC(ftrace_stub)
#define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE)
#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
@@ -28,7 +29,7 @@ ENTRY(ftrace_stub)
ENTRY(_mcount)
BR_EX %r14
-
+ENDPROC(_mcount)
EXPORT_SYMBOL(_mcount)
ENTRY(ftrace_caller)
@@ -61,10 +62,11 @@ ENTRY(ftrace_caller)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
# The j instruction gets runtime patched to a nop instruction.
# See ftrace_enable_ftrace_graph_caller.
-ENTRY(ftrace_graph_caller)
+ .globl ftrace_graph_caller
+ftrace_graph_caller:
j ftrace_graph_caller_end
- lg %r2,(STACK_PTREGS_GPRS+14*8)(%r15)
- lg %r3,(STACK_PTREGS_PSW+8)(%r15)
+ lmg %r2,%r3,(STACK_PTREGS_GPRS+14*8)(%r15)
+ lg %r4,(STACK_PTREGS_PSW+8)(%r15)
brasl %r14,prepare_ftrace_return
stg %r2,(STACK_PTREGS_GPRS+14*8)(%r15)
ftrace_graph_caller_end:
@@ -73,6 +75,7 @@ ftrace_graph_caller_end:
lg %r1,(STACK_PTREGS_PSW+8)(%r15)
lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15)
BR_EX %r1
+ENDPROC(ftrace_caller)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -86,5 +89,6 @@ ENTRY(return_to_handler)
lgr %r14,%r2
lmg %r2,%r5,32(%r15)
BR_EX %r14
+ENDPROC(return_to_handler)
#endif
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 8c867b43c8eb..0a487fae763e 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -125,7 +125,7 @@ void nmi_free_per_cpu(struct lowcore *lc)
static notrace void s390_handle_damage(void)
{
smp_emergency_stop();
- disabled_wait((unsigned long) __builtin_return_address(0));
+ disabled_wait();
while (1);
}
NOKPROBE_SYMBOL(s390_handle_damage);
diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
index bdddaae96559..29e511f5bf06 100644
--- a/arch/s390/kernel/nospec-branch.c
+++ b/arch/s390/kernel/nospec-branch.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/cpu.h>
#include <asm/nospec-branch.h>
static int __init nobp_setup_early(char *str)
@@ -37,7 +38,7 @@ static int __init nospec_report(void)
{
if (test_facility(156))
pr_info("Spectre V2 mitigation: etokens\n");
- if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
+ if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable)
pr_info("Spectre V2 mitigation: execute trampolines\n");
if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
pr_info("Spectre V2 mitigation: limited branch prediction\n");
@@ -58,15 +59,15 @@ early_param("nospectre_v2", nospectre_v2_setup_early);
void __init nospec_auto_detect(void)
{
- if (test_facility(156)) {
+ if (test_facility(156) || cpu_mitigations_off()) {
/*
* The machine supports etokens.
* Disable expolines and disable nobp.
*/
- if (IS_ENABLED(CC_USING_EXPOLINE))
+ if (__is_defined(CC_USING_EXPOLINE))
nospec_disable = 1;
__clear_facility(82, S390_lowcore.alt_stfle_fac_list);
- } else if (IS_ENABLED(CC_USING_EXPOLINE)) {
+ } else if (__is_defined(CC_USING_EXPOLINE)) {
/*
* The kernel has been compiled with expolines.
* Keep expolines enabled and disable nobp.
diff --git a/arch/s390/kernel/nospec-sysfs.c b/arch/s390/kernel/nospec-sysfs.c
index e30e580ae362..48f472bf9290 100644
--- a/arch/s390/kernel/nospec-sysfs.c
+++ b/arch/s390/kernel/nospec-sysfs.c
@@ -15,7 +15,7 @@ ssize_t cpu_show_spectre_v2(struct device *dev,
{
if (test_facility(156))
return sprintf(buf, "Mitigation: etokens\n");
- if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
+ if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable)
return sprintf(buf, "Mitigation: execute trampolines\n");
if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
return sprintf(buf, "Mitigation: limited branch prediction\n");
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index e1c54d28713a..48d48b6187c0 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -2,8 +2,8 @@
/*
* Performance event support for s390x - CPU-measurement Counter Facility
*
- * Copyright IBM Corp. 2012, 2017
- * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+ * Copyright IBM Corp. 2012, 2019
+ * Author(s): Hendrik Brueckner <brueckner@linux.ibm.com>
*/
#define KMSG_COMPONENT "cpum_cf"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
@@ -26,7 +26,7 @@ static enum cpumf_ctr_set get_counter_set(u64 event)
set = CPUMF_CTR_SET_USER;
else if (event < 128)
set = CPUMF_CTR_SET_CRYPTO;
- else if (event < 256)
+ else if (event < 288)
set = CPUMF_CTR_SET_EXT;
else if (event >= 448 && event < 496)
set = CPUMF_CTR_SET_MT_DIAG;
@@ -50,12 +50,19 @@ static int validate_ctr_version(const struct hw_perf_event *hwc)
err = -EOPNOTSUPP;
break;
case CPUMF_CTR_SET_CRYPTO:
+ if ((cpuhw->info.csvn >= 1 && cpuhw->info.csvn <= 5 &&
+ hwc->config > 79) ||
+ (cpuhw->info.csvn >= 6 && hwc->config > 83))
+ err = -EOPNOTSUPP;
+ break;
case CPUMF_CTR_SET_EXT:
if (cpuhw->info.csvn < 1)
err = -EOPNOTSUPP;
if ((cpuhw->info.csvn == 1 && hwc->config > 159) ||
(cpuhw->info.csvn == 2 && hwc->config > 175) ||
- (cpuhw->info.csvn > 2 && hwc->config > 255))
+ (cpuhw->info.csvn >= 3 && cpuhw->info.csvn <= 5
+ && hwc->config > 255) ||
+ (cpuhw->info.csvn >= 6 && hwc->config > 287))
err = -EOPNOTSUPP;
break;
case CPUMF_CTR_SET_MT_DIAG:
diff --git a/arch/s390/kernel/perf_cpum_cf_diag.c b/arch/s390/kernel/perf_cpum_cf_diag.c
index c6fad208c2fa..d4e031f7b9c8 100644
--- a/arch/s390/kernel/perf_cpum_cf_diag.c
+++ b/arch/s390/kernel/perf_cpum_cf_diag.c
@@ -196,23 +196,30 @@ static void cf_diag_perf_event_destroy(struct perf_event *event)
*/
static int __hw_perf_event_init(struct perf_event *event)
{
- struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
struct perf_event_attr *attr = &event->attr;
+ struct cpu_cf_events *cpuhw;
enum cpumf_ctr_set i;
int err = 0;
- debug_sprintf_event(cf_diag_dbg, 5,
- "%s event %p cpu %d authorized %#x\n", __func__,
- event, event->cpu, cpuhw->info.auth_ctl);
+ debug_sprintf_event(cf_diag_dbg, 5, "%s event %p cpu %d\n", __func__,
+ event, event->cpu);
event->hw.config = attr->config;
event->hw.config_base = 0;
- local64_set(&event->count, 0);
- /* Add all authorized counter sets to config_base */
+ /* Add all authorized counter sets to config_base. The
+ * the hardware init function is either called per-cpu or just once
+ * for all CPUS (event->cpu == -1). This depends on the whether
+ * counting is started for all CPUs or on a per workload base where
+ * the perf event moves from one CPU to another CPU.
+ * Checking the authorization on any CPU is fine as the hardware
+ * applies the same authorization settings to all CPUs.
+ */
+ cpuhw = &get_cpu_var(cpu_cf_events);
for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i)
if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i])
event->hw.config_base |= cpumf_ctr_ctl[i];
+ put_cpu_var(cpu_cf_events);
/* No authorized counter sets, nothing to count/sample */
if (!event->hw.config_base) {
@@ -299,15 +306,20 @@ static size_t cf_diag_ctrset_size(enum cpumf_ctr_set ctrset,
ctrset_size = 2;
break;
case CPUMF_CTR_SET_CRYPTO:
- ctrset_size = 16;
+ if (info->csvn >= 1 && info->csvn <= 5)
+ ctrset_size = 16;
+ else if (info->csvn == 6)
+ ctrset_size = 20;
break;
case CPUMF_CTR_SET_EXT:
if (info->csvn == 1)
ctrset_size = 32;
else if (info->csvn == 2)
ctrset_size = 48;
- else if (info->csvn >= 3)
+ else if (info->csvn >= 3 && info->csvn <= 5)
ctrset_size = 128;
+ else if (info->csvn == 6)
+ ctrset_size = 160;
break;
case CPUMF_CTR_SET_MT_DIAG:
if (info->csvn > 3)
diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c
index b45238c89728..34cc96449b30 100644
--- a/arch/s390/kernel/perf_cpum_cf_events.c
+++ b/arch/s390/kernel/perf_cpum_cf_events.c
@@ -31,22 +31,26 @@ CPUMF_EVENT_ATTR(cf_fvn3, PROBLEM_STATE_CPU_CYCLES, 0x0020);
CPUMF_EVENT_ATTR(cf_fvn3, PROBLEM_STATE_INSTRUCTIONS, 0x0021);
CPUMF_EVENT_ATTR(cf_fvn3, L1D_DIR_WRITES, 0x0004);
CPUMF_EVENT_ATTR(cf_fvn3, L1D_PENALTY_CYCLES, 0x0005);
-CPUMF_EVENT_ATTR(cf_svn_generic, PRNG_FUNCTIONS, 0x0040);
-CPUMF_EVENT_ATTR(cf_svn_generic, PRNG_CYCLES, 0x0041);
-CPUMF_EVENT_ATTR(cf_svn_generic, PRNG_BLOCKED_FUNCTIONS, 0x0042);
-CPUMF_EVENT_ATTR(cf_svn_generic, PRNG_BLOCKED_CYCLES, 0x0043);
-CPUMF_EVENT_ATTR(cf_svn_generic, SHA_FUNCTIONS, 0x0044);
-CPUMF_EVENT_ATTR(cf_svn_generic, SHA_CYCLES, 0x0045);
-CPUMF_EVENT_ATTR(cf_svn_generic, SHA_BLOCKED_FUNCTIONS, 0x0046);
-CPUMF_EVENT_ATTR(cf_svn_generic, SHA_BLOCKED_CYCLES, 0x0047);
-CPUMF_EVENT_ATTR(cf_svn_generic, DEA_FUNCTIONS, 0x0048);
-CPUMF_EVENT_ATTR(cf_svn_generic, DEA_CYCLES, 0x0049);
-CPUMF_EVENT_ATTR(cf_svn_generic, DEA_BLOCKED_FUNCTIONS, 0x004a);
-CPUMF_EVENT_ATTR(cf_svn_generic, DEA_BLOCKED_CYCLES, 0x004b);
-CPUMF_EVENT_ATTR(cf_svn_generic, AES_FUNCTIONS, 0x004c);
-CPUMF_EVENT_ATTR(cf_svn_generic, AES_CYCLES, 0x004d);
-CPUMF_EVENT_ATTR(cf_svn_generic, AES_BLOCKED_FUNCTIONS, 0x004e);
-CPUMF_EVENT_ATTR(cf_svn_generic, AES_BLOCKED_CYCLES, 0x004f);
+CPUMF_EVENT_ATTR(cf_svn_12345, PRNG_FUNCTIONS, 0x0040);
+CPUMF_EVENT_ATTR(cf_svn_12345, PRNG_CYCLES, 0x0041);
+CPUMF_EVENT_ATTR(cf_svn_12345, PRNG_BLOCKED_FUNCTIONS, 0x0042);
+CPUMF_EVENT_ATTR(cf_svn_12345, PRNG_BLOCKED_CYCLES, 0x0043);
+CPUMF_EVENT_ATTR(cf_svn_12345, SHA_FUNCTIONS, 0x0044);
+CPUMF_EVENT_ATTR(cf_svn_12345, SHA_CYCLES, 0x0045);
+CPUMF_EVENT_ATTR(cf_svn_12345, SHA_BLOCKED_FUNCTIONS, 0x0046);
+CPUMF_EVENT_ATTR(cf_svn_12345, SHA_BLOCKED_CYCLES, 0x0047);
+CPUMF_EVENT_ATTR(cf_svn_12345, DEA_FUNCTIONS, 0x0048);
+CPUMF_EVENT_ATTR(cf_svn_12345, DEA_CYCLES, 0x0049);
+CPUMF_EVENT_ATTR(cf_svn_12345, DEA_BLOCKED_FUNCTIONS, 0x004a);
+CPUMF_EVENT_ATTR(cf_svn_12345, DEA_BLOCKED_CYCLES, 0x004b);
+CPUMF_EVENT_ATTR(cf_svn_12345, AES_FUNCTIONS, 0x004c);
+CPUMF_EVENT_ATTR(cf_svn_12345, AES_CYCLES, 0x004d);
+CPUMF_EVENT_ATTR(cf_svn_12345, AES_BLOCKED_FUNCTIONS, 0x004e);
+CPUMF_EVENT_ATTR(cf_svn_12345, AES_BLOCKED_CYCLES, 0x004f);
+CPUMF_EVENT_ATTR(cf_svn_6, ECC_FUNCTION_COUNT, 0x0050);
+CPUMF_EVENT_ATTR(cf_svn_6, ECC_CYCLES_COUNT, 0x0051);
+CPUMF_EVENT_ATTR(cf_svn_6, ECC_BLOCKED_FUNCTION_COUNT, 0x0052);
+CPUMF_EVENT_ATTR(cf_svn_6, ECC_BLOCKED_CYCLES_COUNT, 0x0053);
CPUMF_EVENT_ATTR(cf_z10, L1I_L2_SOURCED_WRITES, 0x0080);
CPUMF_EVENT_ATTR(cf_z10, L1D_L2_SOURCED_WRITES, 0x0081);
CPUMF_EVENT_ATTR(cf_z10, L1I_L3_LOCAL_WRITES, 0x0082);
@@ -262,23 +266,47 @@ static struct attribute *cpumcf_fvn3_pmu_event_attr[] __initdata = {
NULL,
};
-static struct attribute *cpumcf_svn_generic_pmu_event_attr[] __initdata = {
- CPUMF_EVENT_PTR(cf_svn_generic, PRNG_FUNCTIONS),
- CPUMF_EVENT_PTR(cf_svn_generic, PRNG_CYCLES),
- CPUMF_EVENT_PTR(cf_svn_generic, PRNG_BLOCKED_FUNCTIONS),
- CPUMF_EVENT_PTR(cf_svn_generic, PRNG_BLOCKED_CYCLES),
- CPUMF_EVENT_PTR(cf_svn_generic, SHA_FUNCTIONS),
- CPUMF_EVENT_PTR(cf_svn_generic, SHA_CYCLES),
- CPUMF_EVENT_PTR(cf_svn_generic, SHA_BLOCKED_FUNCTIONS),
- CPUMF_EVENT_PTR(cf_svn_generic, SHA_BLOCKED_CYCLES),
- CPUMF_EVENT_PTR(cf_svn_generic, DEA_FUNCTIONS),
- CPUMF_EVENT_PTR(cf_svn_generic, DEA_CYCLES),
- CPUMF_EVENT_PTR(cf_svn_generic, DEA_BLOCKED_FUNCTIONS),
- CPUMF_EVENT_PTR(cf_svn_generic, DEA_BLOCKED_CYCLES),
- CPUMF_EVENT_PTR(cf_svn_generic, AES_FUNCTIONS),
- CPUMF_EVENT_PTR(cf_svn_generic, AES_CYCLES),
- CPUMF_EVENT_PTR(cf_svn_generic, AES_BLOCKED_FUNCTIONS),
- CPUMF_EVENT_PTR(cf_svn_generic, AES_BLOCKED_CYCLES),
+static struct attribute *cpumcf_svn_12345_pmu_event_attr[] __initdata = {
+ CPUMF_EVENT_PTR(cf_svn_12345, PRNG_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_12345, PRNG_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_12345, PRNG_BLOCKED_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_12345, PRNG_BLOCKED_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_12345, SHA_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_12345, SHA_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_12345, SHA_BLOCKED_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_12345, SHA_BLOCKED_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_12345, DEA_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_12345, DEA_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_12345, DEA_BLOCKED_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_12345, DEA_BLOCKED_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_12345, AES_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_12345, AES_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_12345, AES_BLOCKED_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_12345, AES_BLOCKED_CYCLES),
+ NULL,
+};
+
+static struct attribute *cpumcf_svn_6_pmu_event_attr[] __initdata = {
+ CPUMF_EVENT_PTR(cf_svn_12345, PRNG_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_12345, PRNG_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_12345, PRNG_BLOCKED_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_12345, PRNG_BLOCKED_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_12345, SHA_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_12345, SHA_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_12345, SHA_BLOCKED_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_12345, SHA_BLOCKED_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_12345, DEA_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_12345, DEA_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_12345, DEA_BLOCKED_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_12345, DEA_BLOCKED_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_12345, AES_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_12345, AES_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_12345, AES_BLOCKED_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_12345, AES_BLOCKED_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_6, ECC_FUNCTION_COUNT),
+ CPUMF_EVENT_PTR(cf_svn_6, ECC_CYCLES_COUNT),
+ CPUMF_EVENT_PTR(cf_svn_6, ECC_BLOCKED_FUNCTION_COUNT),
+ CPUMF_EVENT_PTR(cf_svn_6, ECC_BLOCKED_CYCLES_COUNT),
NULL,
};
@@ -562,7 +590,18 @@ __init const struct attribute_group **cpumf_cf_event_group(void)
default:
cfvn = none;
}
- csvn = cpumcf_svn_generic_pmu_event_attr;
+
+ /* Determine version specific crypto set */
+ switch (ci.csvn) {
+ case 1 ... 5:
+ csvn = cpumcf_svn_12345_pmu_event_attr;
+ break;
+ case 6:
+ csvn = cpumcf_svn_6_pmu_event_attr;
+ break;
+ default:
+ csvn = none;
+ }
/* Determine model-specific counter set(s) */
get_cpu_id(&cpu_id);
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index 0d770e513abf..fcb6c2e92b07 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -21,6 +21,7 @@
#include <asm/lowcore.h>
#include <asm/processor.h>
#include <asm/sysinfo.h>
+#include <asm/unwind.h>
const char *perf_pmu_name(void)
{
@@ -219,20 +220,13 @@ static int __init service_level_perf_register(void)
}
arch_initcall(service_level_perf_register);
-static int __perf_callchain_kernel(void *data, unsigned long address, int reliable)
-{
- struct perf_callchain_entry_ctx *entry = data;
-
- perf_callchain_store(entry, address);
- return 0;
-}
-
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
- if (user_mode(regs))
- return;
- dump_trace(__perf_callchain_kernel, entry, NULL, regs->gprs[15]);
+ struct unwind_state state;
+
+ unwind_for_each_frame(&state, current, regs, 0)
+ perf_callchain_store(entry, state.ip);
}
/* Perf definitions for PMU event attributes in sysfs */
diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S
index 3e62aae34ea3..59dee9d3bebf 100644
--- a/arch/s390/kernel/pgm_check.S
+++ b/arch/s390/kernel/pgm_check.S
@@ -7,7 +7,7 @@
#include <linux/linkage.h>
-#define PGM_CHECK(handler) .long handler
+#define PGM_CHECK(handler) .quad handler
#define PGM_CHECK_DEFAULT PGM_CHECK(default_trap_handler)
/*
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 6e758bb6cd29..63873aa6693f 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -37,6 +37,7 @@
#include <asm/irq.h>
#include <asm/nmi.h>
#include <asm/smp.h>
+#include <asm/stacktrace.h>
#include <asm/switch_to.h>
#include <asm/runtime_instr.h>
#include "entry.h"
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 6fe2e1875058..5de13307b703 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -109,7 +109,8 @@ static void show_cpu_summary(struct seq_file *m, void *v)
{
static const char *hwcap_str[] = {
"esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
- "edat", "etf3eh", "highgprs", "te", "vx", "vxd", "vxe", "gs"
+ "edat", "etf3eh", "highgprs", "te", "vx", "vxd", "vxe", "gs",
+ "vxe2", "vxp", "sort", "dflt"
};
static const char * const int_hwcap_str[] = {
"sie"
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
index 7f14adf512c6..4a22163962eb 100644
--- a/arch/s390/kernel/reipl.S
+++ b/arch/s390/kernel/reipl.S
@@ -73,6 +73,7 @@ ENTRY(store_status)
lgr %r9,%r2
lgr %r2,%r3
BR_EX %r9
+ENDPROC(store_status)
.section .bss
.align 8
diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S
index c97c2d40fe15..fe396673e8a6 100644
--- a/arch/s390/kernel/relocate_kernel.S
+++ b/arch/s390/kernel/relocate_kernel.S
@@ -58,11 +58,15 @@ ENTRY(relocate_kernel)
j .base
.done:
sgr %r0,%r0 # clear register r0
+ cghi %r3,0
+ je .diag
la %r4,load_psw-.base(%r13) # load psw-address into the register
o %r3,4(%r4) # or load address into psw
st %r3,4(%r4)
mvc 0(8,%r0),0(%r4) # copy psw to absolute address 0
+ .diag:
diag %r0,%r0,0x308
+ENDPROC(relocate_kernel)
.align 8
load_psw:
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 12934e8fbb91..f8544d517430 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -50,6 +50,7 @@
#include <linux/compat.h>
#include <linux/start_kernel.h>
+#include <asm/boot_data.h>
#include <asm/ipl.h>
#include <asm/facility.h>
#include <asm/smp.h>
@@ -65,11 +66,13 @@
#include <asm/diag.h>
#include <asm/os_info.h>
#include <asm/sclp.h>
+#include <asm/stacktrace.h>
#include <asm/sysinfo.h>
#include <asm/numa.h>
#include <asm/alternative.h>
#include <asm/nospec-branch.h>
#include <asm/mem_detect.h>
+#include <asm/uv.h>
#include "entry.h"
/*
@@ -89,12 +92,25 @@ char elf_platform[ELF_PLATFORM_SIZE];
unsigned long int_hwcap = 0;
+#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
+int __bootdata_preserved(prot_virt_guest);
+#endif
+
int __bootdata(noexec_disabled);
int __bootdata(memory_end_set);
unsigned long __bootdata(memory_end);
unsigned long __bootdata(max_physmem_end);
struct mem_detect_info __bootdata(mem_detect);
+struct exception_table_entry *__bootdata_preserved(__start_dma_ex_table);
+struct exception_table_entry *__bootdata_preserved(__stop_dma_ex_table);
+unsigned long __bootdata_preserved(__swsusp_reset_dma);
+unsigned long __bootdata_preserved(__stext_dma);
+unsigned long __bootdata_preserved(__etext_dma);
+unsigned long __bootdata_preserved(__sdma);
+unsigned long __bootdata_preserved(__edma);
+unsigned long __bootdata_preserved(__kaslr_offset);
+
unsigned long VMALLOC_START;
EXPORT_SYMBOL(VMALLOC_START);
@@ -378,6 +394,10 @@ static void __init setup_lowcore_dat_off(void)
*/
BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE);
lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc));
+ if (!lc)
+ panic("%s: Failed to allocate %zu bytes align=%zx\n",
+ __func__, sizeof(*lc), sizeof(*lc));
+
lc->restart_psw.mask = PSW_KERNEL_BITS;
lc->restart_psw.addr = (unsigned long) restart_int_handler;
lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
@@ -419,6 +439,9 @@ static void __init setup_lowcore_dat_off(void)
* all CPUs in cast *one* of them does a PSW restart.
*/
restart_stack = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
+ if (!restart_stack)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, THREAD_SIZE, THREAD_SIZE);
restart_stack += STACK_INIT_OFFSET;
/*
@@ -495,6 +518,9 @@ static void __init setup_resources(void)
for_each_memblock(memory, reg) {
res = memblock_alloc(sizeof(*res), 8);
+ if (!res)
+ panic("%s: Failed to allocate %zu bytes align=0x%x\n",
+ __func__, sizeof(*res), 8);
res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
res->name = "System RAM";
@@ -509,6 +535,9 @@ static void __init setup_resources(void)
continue;
if (std_res->end > res->end) {
sub_res = memblock_alloc(sizeof(*sub_res), 8);
+ if (!sub_res)
+ panic("%s: Failed to allocate %zu bytes align=0x%x\n",
+ __func__, sizeof(*sub_res), 8);
*sub_res = *std_res;
sub_res->end = res->end;
std_res->start = res->end + 1;
@@ -723,6 +752,15 @@ static void __init reserve_initrd(void)
#endif
}
+/*
+ * Reserve the memory area used to pass the certificate lists
+ */
+static void __init reserve_certificate_list(void)
+{
+ if (ipl_cert_list_addr)
+ memblock_reserve(ipl_cert_list_addr, ipl_cert_list_size);
+}
+
static void __init reserve_mem_detect_info(void)
{
unsigned long start, size;
@@ -801,9 +839,10 @@ static void __init reserve_kernel(void)
{
unsigned long start_pfn = PFN_UP(__pa(_end));
- memblock_reserve(0, PARMAREA_END);
+ memblock_reserve(0, HEAD_END);
memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn)
- (unsigned long)_stext);
+ memblock_reserve(__sdma, __edma - __sdma);
}
static void __init setup_memory(void)
@@ -901,7 +940,15 @@ static int __init setup_hwcaps(void)
elf_hwcap |= HWCAP_S390_VXRS_EXT;
if (test_facility(135))
elf_hwcap |= HWCAP_S390_VXRS_BCD;
+ if (test_facility(148))
+ elf_hwcap |= HWCAP_S390_VXRS_EXT2;
+ if (test_facility(152))
+ elf_hwcap |= HWCAP_S390_VXRS_PDE;
}
+ if (test_facility(150))
+ elf_hwcap |= HWCAP_S390_SORT;
+ if (test_facility(151))
+ elf_hwcap |= HWCAP_S390_DFLT;
/*
* Guarded storage support HWCAP_S390_GS is bit 12.
@@ -966,6 +1013,9 @@ static void __init setup_randomness(void)
vmms = (struct sysinfo_3_2_2 *) memblock_phys_alloc(PAGE_SIZE,
PAGE_SIZE);
+ if (!vmms)
+ panic("Failed to allocate memory for sysinfo structure\n");
+
if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
memblock_free((unsigned long) vmms, PAGE_SIZE);
@@ -1007,6 +1057,38 @@ static void __init setup_control_program_code(void)
}
/*
+ * Print the component list from the IPL report
+ */
+static void __init log_component_list(void)
+{
+ struct ipl_rb_component_entry *ptr, *end;
+ char *str;
+
+ if (!early_ipl_comp_list_addr)
+ return;
+ if (ipl_block.hdr.flags & IPL_PL_FLAG_IPLSR)
+ pr_info("Linux is running with Secure-IPL enabled\n");
+ else
+ pr_info("Linux is running with Secure-IPL disabled\n");
+ ptr = (void *) early_ipl_comp_list_addr;
+ end = (void *) ptr + early_ipl_comp_list_size;
+ pr_info("The IPL report contains the following components:\n");
+ while (ptr < end) {
+ if (ptr->flags & IPL_RB_COMPONENT_FLAG_SIGNED) {
+ if (ptr->flags & IPL_RB_COMPONENT_FLAG_VERIFIED)
+ str = "signed, verified";
+ else
+ str = "signed, verification failed";
+ } else {
+ str = "not signed";
+ }
+ pr_info("%016llx - %016llx (%s)\n",
+ ptr->addr, ptr->addr + ptr->len, str);
+ ptr++;
+ }
+}
+
+/*
* Setup function called from init/main.c just after the banner
* was printed.
*/
@@ -1026,6 +1108,8 @@ void __init setup_arch(char **cmdline_p)
else
pr_info("Linux is running as a guest in 64-bit mode\n");
+ log_component_list();
+
/* Have one command line that is parsed and saved in /proc/cmdline */
/* boot_command_line has been already set up in early.c */
*cmdline_p = boot_command_line;
@@ -1057,6 +1141,7 @@ void __init setup_arch(char **cmdline_p)
reserve_oldmem();
reserve_kernel();
reserve_initrd();
+ reserve_certificate_list();
reserve_mem_detect_info();
memblock_allow_resize();
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index b198ece2aad6..35fafa2b91a8 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -53,6 +53,7 @@
#include <asm/sigp.h>
#include <asm/idle.h>
#include <asm/nmi.h>
+#include <asm/stacktrace.h>
#include <asm/topology.h>
#include "entry.h"
@@ -266,7 +267,8 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
lc->percpu_offset = __per_cpu_offset[cpu];
lc->kernel_asce = S390_lowcore.kernel_asce;
lc->machine_flags = S390_lowcore.machine_flags;
- lc->user_timer = lc->system_timer = lc->steal_timer = 0;
+ lc->user_timer = lc->system_timer =
+ lc->steal_timer = lc->avg_steal_timer = 0;
__ctl_store(lc->cregs_save_area, 0, 15);
save_access_regs((unsigned int *) lc->access_regs_save_area);
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
@@ -656,7 +658,11 @@ void __init smp_save_dump_cpus(void)
/* No previous system present, normal boot. */
return;
/* Allocate a page as dumping area for the store status sigps */
- page = memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, 1UL << 31);
+ page = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0, 1UL << 31);
+ if (!page)
+ panic("ERROR: Failed to allocate %lx bytes below %lx\n",
+ PAGE_SIZE, 1UL << 31);
+
/* Set multi-threading state to the previous system. */
pcpu_set_smt(sclp.mtid_prev);
boot_cpu_addr = stap();
@@ -684,7 +690,7 @@ void __init smp_save_dump_cpus(void)
smp_save_cpu_regs(sa, addr, is_boot_cpu, page);
}
memblock_free(page, PAGE_SIZE);
- diag308_reset();
+ diag_dma_ops.diag308_reset();
pcpu_set_smt(0);
}
#endif /* CONFIG_CRASH_DUMP */
@@ -766,6 +772,9 @@ void __init smp_detect_cpus(void)
/* Get CPU information */
info = memblock_alloc(sizeof(*info), 8);
+ if (!info)
+ panic("%s: Failed to allocate %zu bytes align=0x%x\n",
+ __func__, sizeof(*info), 8);
smp_get_core_info(info, 1);
/* Find boot CPU type */
if (sclp.has_core_type) {
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index 460dcfba7d4e..f6a620f854e1 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -11,65 +11,52 @@
#include <linux/stacktrace.h>
#include <linux/kallsyms.h>
#include <linux/export.h>
-
-static int __save_address(void *data, unsigned long address, int nosched)
-{
- struct stack_trace *trace = data;
-
- if (nosched && in_sched_functions(address))
- return 0;
- if (trace->skip > 0) {
- trace->skip--;
- return 0;
- }
- if (trace->nr_entries < trace->max_entries) {
- trace->entries[trace->nr_entries++] = address;
- return 0;
- }
- return 1;
-}
-
-static int save_address(void *data, unsigned long address, int reliable)
-{
- return __save_address(data, address, 0);
-}
-
-static int save_address_nosched(void *data, unsigned long address, int reliable)
-{
- return __save_address(data, address, 1);
-}
+#include <asm/stacktrace.h>
+#include <asm/unwind.h>
void save_stack_trace(struct stack_trace *trace)
{
- unsigned long sp;
-
- sp = current_stack_pointer();
- dump_trace(save_address, trace, NULL, sp);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
+ struct unwind_state state;
+
+ unwind_for_each_frame(&state, current, NULL, 0) {
+ if (trace->nr_entries >= trace->max_entries)
+ break;
+ if (trace->skip > 0)
+ trace->skip--;
+ else
+ trace->entries[trace->nr_entries++] = state.ip;
+ }
}
EXPORT_SYMBOL_GPL(save_stack_trace);
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
- unsigned long sp;
-
- sp = tsk->thread.ksp;
- if (tsk == current)
- sp = current_stack_pointer();
- dump_trace(save_address_nosched, trace, tsk, sp);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
+ struct unwind_state state;
+
+ unwind_for_each_frame(&state, tsk, NULL, 0) {
+ if (trace->nr_entries >= trace->max_entries)
+ break;
+ if (in_sched_functions(state.ip))
+ continue;
+ if (trace->skip > 0)
+ trace->skip--;
+ else
+ trace->entries[trace->nr_entries++] = state.ip;
+ }
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
{
- unsigned long sp;
-
- sp = kernel_stack_pointer(regs);
- dump_trace(save_address, trace, NULL, sp);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
+ struct unwind_state state;
+
+ unwind_for_each_frame(&state, current, regs, 0) {
+ if (trace->nr_entries >= trace->max_entries)
+ break;
+ if (trace->skip > 0)
+ trace->skip--;
+ else
+ trace->entries[trace->nr_entries++] = state.ip;
+ }
}
EXPORT_SYMBOL_GPL(save_stack_trace_regs);
diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S
index 993100c31d65..19a3c427801a 100644
--- a/arch/s390/kernel/swsusp.S
+++ b/arch/s390/kernel/swsusp.S
@@ -108,6 +108,7 @@ ENTRY(swsusp_arch_suspend)
lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
lghi %r2,0
BR_EX %r14
+ENDPROC(swsusp_arch_suspend)
/*
* Restore saved memory image to correct place and restore register context.
@@ -154,20 +155,13 @@ ENTRY(swsusp_arch_resume)
ptlb /* flush tlb */
/* Reset System */
- larl %r1,restart_entry
- larl %r2,.Lrestart_diag308_psw
- og %r1,0(%r2)
- stg %r1,0(%r0)
larl %r1,.Lnew_pgm_check_psw
epsw %r2,%r3
stm %r2,%r3,0(%r1)
mvc __LC_PGM_NEW_PSW(16,%r0),0(%r1)
- lghi %r0,0
- diag %r0,%r0,0x308
-restart_entry:
- lhi %r1,1
- sigp %r1,%r0,SIGP_SET_ARCHITECTURE
- sam64
+ larl %r1,__swsusp_reset_dma
+ lg %r1,0(%r1)
+ BASR_EX %r14,%r1
#ifdef CONFIG_SMP
larl %r1,smp_cpu_mt_shift
icm %r1,15,0(%r1)
@@ -267,6 +261,7 @@ restore_registers:
lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
lghi %r2,0
BR_EX %r14
+ENDPROC(swsusp_arch_resume)
.section .data..nosave,"aw",@progbits
.align 8
@@ -275,8 +270,6 @@ restore_registers:
.Lpanic_string:
.asciz "Resume not possible because suspend CPU is no longer available\n"
.align 8
-.Lrestart_diag308_psw:
- .long 0x00080000,0x80000000
.Lrestart_suspend_psw:
.quad 0x0000000180000000,restart_suspend
.Lnew_pgm_check_psw:
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl
index 02579f95f391..061418f787c3 100644
--- a/arch/s390/kernel/syscalls/syscall.tbl
+++ b/arch/s390/kernel/syscalls/syscall.tbl
@@ -426,3 +426,7 @@
421 32 rt_sigtimedwait_time64 - compat_sys_rt_sigtimedwait_time64
422 32 futex_time64 - sys_futex
423 32 sched_rr_get_interval_time64 - sys_sched_rr_get_interval
+424 common pidfd_send_signal sys_pidfd_send_signal sys_pidfd_send_signal
+425 common io_uring_setup sys_io_uring_setup sys_io_uring_setup
+426 common io_uring_enter sys_io_uring_enter sys_io_uring_enter
+427 common io_uring_register sys_io_uring_register sys_io_uring_register
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 8992b04c0ade..8964a3f60aad 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -520,6 +520,9 @@ static void __init alloc_masks(struct sysinfo_15_1_x *info,
nr_masks = max(nr_masks, 1);
for (i = 0; i < nr_masks; i++) {
mask->next = memblock_alloc(sizeof(*mask->next), 8);
+ if (!mask->next)
+ panic("%s: Failed to allocate %zu bytes align=0x%x\n",
+ __func__, sizeof(*mask->next), 8);
mask = mask->next;
}
}
@@ -538,6 +541,9 @@ void __init topology_init_early(void)
if (!MACHINE_HAS_TOPOLOGY)
goto out;
tl_info = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!tl_info)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
info = tl_info;
store_topology(info);
pr_info("The CPU configuration topology of the machine is: %d %d %d %d %d %d / %d\n",
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 8003b38c1688..82e81a9f7112 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -49,7 +49,7 @@ void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
report_user_fault(regs, si_signo, 0);
} else {
const struct exception_table_entry *fixup;
- fixup = search_exception_tables(regs->psw.addr);
+ fixup = s390_search_extables(regs->psw.addr);
if (fixup)
regs->psw.addr = extable_fixup(fixup);
else {
@@ -263,5 +263,6 @@ NOKPROBE_SYMBOL(kernel_stack_overflow);
void __init trap_init(void)
{
+ sort_extable(__start_dma_ex_table, __stop_dma_ex_table);
local_mcck_enable();
}
diff --git a/arch/s390/kernel/unwind_bc.c b/arch/s390/kernel/unwind_bc.c
new file mode 100644
index 000000000000..57fd4e902f1f
--- /dev/null
+++ b/arch/s390/kernel/unwind_bc.c
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/sched.h>
+#include <linux/sched/task.h>
+#include <linux/sched/task_stack.h>
+#include <linux/interrupt.h>
+#include <asm/sections.h>
+#include <asm/ptrace.h>
+#include <asm/bitops.h>
+#include <asm/stacktrace.h>
+#include <asm/unwind.h>
+
+unsigned long unwind_get_return_address(struct unwind_state *state)
+{
+ if (unwind_done(state))
+ return 0;
+ return __kernel_text_address(state->ip) ? state->ip : 0;
+}
+EXPORT_SYMBOL_GPL(unwind_get_return_address);
+
+static bool outside_of_stack(struct unwind_state *state, unsigned long sp)
+{
+ return (sp <= state->sp) ||
+ (sp + sizeof(struct stack_frame) > state->stack_info.end);
+}
+
+static bool update_stack_info(struct unwind_state *state, unsigned long sp)
+{
+ struct stack_info *info = &state->stack_info;
+ unsigned long *mask = &state->stack_mask;
+
+ /* New stack pointer leaves the current stack */
+ if (get_stack_info(sp, state->task, info, mask) != 0 ||
+ !on_stack(info, sp, sizeof(struct stack_frame)))
+ /* 'sp' does not point to a valid stack */
+ return false;
+ return true;
+}
+
+bool unwind_next_frame(struct unwind_state *state)
+{
+ struct stack_info *info = &state->stack_info;
+ struct stack_frame *sf;
+ struct pt_regs *regs;
+ unsigned long sp, ip;
+ bool reliable;
+
+ regs = state->regs;
+ if (unlikely(regs)) {
+ sp = READ_ONCE_TASK_STACK(state->task, regs->gprs[15]);
+ if (unlikely(outside_of_stack(state, sp))) {
+ if (!update_stack_info(state, sp))
+ goto out_err;
+ }
+ sf = (struct stack_frame *) sp;
+ ip = READ_ONCE_TASK_STACK(state->task, sf->gprs[8]);
+ reliable = false;
+ regs = NULL;
+ } else {
+ sf = (struct stack_frame *) state->sp;
+ sp = READ_ONCE_TASK_STACK(state->task, sf->back_chain);
+ if (likely(sp)) {
+ /* Non-zero back-chain points to the previous frame */
+ if (unlikely(outside_of_stack(state, sp))) {
+ if (!update_stack_info(state, sp))
+ goto out_err;
+ }
+ sf = (struct stack_frame *) sp;
+ ip = READ_ONCE_TASK_STACK(state->task, sf->gprs[8]);
+ reliable = true;
+ } else {
+ /* No back-chain, look for a pt_regs structure */
+ sp = state->sp + STACK_FRAME_OVERHEAD;
+ if (!on_stack(info, sp, sizeof(struct pt_regs)))
+ goto out_stop;
+ regs = (struct pt_regs *) sp;
+ if (user_mode(regs))
+ goto out_stop;
+ ip = READ_ONCE_TASK_STACK(state->task, regs->psw.addr);
+ reliable = true;
+ }
+ }
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /* Decode any ftrace redirection */
+ if (ip == (unsigned long) return_to_handler)
+ ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
+ ip, (void *) sp);
+#endif
+
+ /* Update unwind state */
+ state->sp = sp;
+ state->ip = ip;
+ state->regs = regs;
+ state->reliable = reliable;
+ return true;
+
+out_err:
+ state->error = true;
+out_stop:
+ state->stack_info.type = STACK_TYPE_UNKNOWN;
+ return false;
+}
+EXPORT_SYMBOL_GPL(unwind_next_frame);
+
+void __unwind_start(struct unwind_state *state, struct task_struct *task,
+ struct pt_regs *regs, unsigned long sp)
+{
+ struct stack_info *info = &state->stack_info;
+ unsigned long *mask = &state->stack_mask;
+ struct stack_frame *sf;
+ unsigned long ip;
+ bool reliable;
+
+ memset(state, 0, sizeof(*state));
+ state->task = task;
+ state->regs = regs;
+
+ /* Don't even attempt to start from user mode regs: */
+ if (regs && user_mode(regs)) {
+ info->type = STACK_TYPE_UNKNOWN;
+ return;
+ }
+
+ /* Get current stack pointer and initialize stack info */
+ if (get_stack_info(sp, task, info, mask) != 0 ||
+ !on_stack(info, sp, sizeof(struct stack_frame))) {
+ /* Something is wrong with the stack pointer */
+ info->type = STACK_TYPE_UNKNOWN;
+ state->error = true;
+ return;
+ }
+
+ /* Get the instruction pointer from pt_regs or the stack frame */
+ if (regs) {
+ ip = READ_ONCE_TASK_STACK(state->task, regs->psw.addr);
+ reliable = true;
+ } else {
+ sf = (struct stack_frame *) sp;
+ ip = READ_ONCE_TASK_STACK(state->task, sf->gprs[8]);
+ reliable = false;
+ }
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /* Decode any ftrace redirection */
+ if (ip == (unsigned long) return_to_handler)
+ ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
+ ip, NULL);
+#endif
+
+ /* Update unwind state */
+ state->sp = sp;
+ state->ip = ip;
+ state->reliable = reliable;
+}
+EXPORT_SYMBOL_GPL(__unwind_start);
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index e7920a68a12e..243d8b1185bf 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -29,7 +29,7 @@
#include <asm/vdso.h>
#include <asm/facility.h>
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_VDSO
extern char vdso32_start, vdso32_end;
static void *vdso32_kbase = &vdso32_start;
static unsigned int vdso32_pages;
@@ -55,7 +55,7 @@ static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
vdso_pagelist = vdso64_pagelist;
vdso_pages = vdso64_pages;
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_VDSO
if (vma->vm_mm->context.compat_mm) {
vdso_pagelist = vdso32_pagelist;
vdso_pages = vdso32_pages;
@@ -76,7 +76,7 @@ static int vdso_mremap(const struct vm_special_mapping *sm,
unsigned long vdso_pages;
vdso_pages = vdso64_pages;
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_VDSO
if (vma->vm_mm->context.compat_mm)
vdso_pages = vdso32_pages;
#endif
@@ -223,7 +223,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
return 0;
vdso_pages = vdso64_pages;
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_VDSO
mm->context.compat_mm = is_compat_task();
if (mm->context.compat_mm)
vdso_pages = vdso32_pages;
@@ -280,7 +280,7 @@ static int __init vdso_init(void)
int i;
vdso_init_data(vdso_data);
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_VDSO
/* Calculate the size of the 32 bit vDSO */
vdso32_pages = ((&vdso32_end - &vdso32_start
+ PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
index e76309fbbcb3..aee9ffbccb54 100644
--- a/arch/s390/kernel/vdso32/Makefile
+++ b/arch/s390/kernel/vdso32/Makefile
@@ -19,7 +19,7 @@ KBUILD_AFLAGS_31 += -m31 -s
KBUILD_CFLAGS_31 := $(filter-out -m64,$(KBUILD_CFLAGS))
KBUILD_CFLAGS_31 += -m31 -fPIC -shared -fno-common -fno-builtin
KBUILD_CFLAGS_31 += -nostdlib -Wl,-soname=linux-vdso32.so.1 \
- $(call cc-ldoption, -Wl$(comma)--hash-style=both)
+ -Wl,--hash-style=both
$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_31)
$(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_31)
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
index f849ac61c5da..bec19e7e6e1c 100644
--- a/arch/s390/kernel/vdso64/Makefile
+++ b/arch/s390/kernel/vdso64/Makefile
@@ -19,7 +19,7 @@ KBUILD_AFLAGS_64 += -m64 -s
KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS))
KBUILD_CFLAGS_64 += -m64 -fPIC -shared -fno-common -fno-builtin
KBUILD_CFLAGS_64 += -nostdlib -Wl,-soname=linux-vdso64.so.1 \
- $(call cc-ldoption, -Wl$(comma)--hash-style=both)
+ -Wl,--hash-style=both
$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64)
$(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_64)
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 8429ab079715..49d55327de0b 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -72,6 +72,7 @@ SECTIONS
__end_ro_after_init = .;
RW_DATA_SECTION(0x100, PAGE_SIZE, THREAD_SIZE)
+ BOOT_DATA_PRESERVED
_edata = .; /* End of data section */
@@ -143,6 +144,18 @@ SECTIONS
INIT_DATA_SECTION(0x100)
PERCPU_SECTION(0x100)
+
+ .dynsym ALIGN(8) : {
+ __dynsym_start = .;
+ *(.dynsym)
+ __dynsym_end = .;
+ }
+ .rela.dyn ALIGN(8) : {
+ __rela_dyn_start = .;
+ *(.rela*)
+ __rela_dyn_end = .;
+ }
+
. = ALIGN(PAGE_SIZE);
__init_end = .; /* freed after init ends here */
@@ -161,6 +174,12 @@ SECTIONS
QUAD(__bss_stop - __bss_start) /* bss_size */
QUAD(__boot_data_start) /* bootdata_off */
QUAD(__boot_data_end - __boot_data_start) /* bootdata_size */
+ QUAD(__boot_data_preserved_start) /* bootdata_preserved_off */
+ QUAD(__boot_data_preserved_end -
+ __boot_data_preserved_start) /* bootdata_preserved_size */
+ QUAD(__dynsym_start) /* dynsym_start */
+ QUAD(__rela_dyn_start) /* rela_dyn_start */
+ QUAD(__rela_dyn_end) /* rela_dyn_end */
} :NONE
/* Debugging sections. */
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 98f850e00008..c475ca49cfc6 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -37,7 +37,7 @@ static inline u64 get_vtimer(void)
{
u64 timer;
- asm volatile("stpt %0" : "=m" (timer));
+ asm volatile("stpt %0" : "=Q" (timer));
return timer;
}
@@ -48,7 +48,7 @@ static inline void set_vtimer(u64 expires)
asm volatile(
" stpt %0\n" /* Store current cpu timer value */
" spt %1" /* Set new value imm. afterwards */
- : "=m" (timer) : "m" (expires));
+ : "=Q" (timer) : "Q" (expires));
S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
S390_lowcore.last_update_timer = expires;
}
@@ -124,7 +124,7 @@ static void account_system_index_scaled(struct task_struct *p, u64 cputime,
*/
static int do_account_vtime(struct task_struct *tsk)
{
- u64 timer, clock, user, guest, system, hardirq, softirq, steal;
+ u64 timer, clock, user, guest, system, hardirq, softirq;
timer = S390_lowcore.last_update_timer;
clock = S390_lowcore.last_update_clock;
@@ -135,8 +135,8 @@ static int do_account_vtime(struct task_struct *tsk)
#else
" stck %1" /* Store current tod clock value */
#endif
- : "=m" (S390_lowcore.last_update_timer),
- "=m" (S390_lowcore.last_update_clock));
+ : "=Q" (S390_lowcore.last_update_timer),
+ "=Q" (S390_lowcore.last_update_clock));
clock = S390_lowcore.last_update_clock - clock;
timer -= S390_lowcore.last_update_timer;
@@ -182,12 +182,6 @@ static int do_account_vtime(struct task_struct *tsk)
if (softirq)
account_system_index_scaled(tsk, softirq, CPUTIME_SOFTIRQ);
- steal = S390_lowcore.steal_timer;
- if ((s64) steal > 0) {
- S390_lowcore.steal_timer = 0;
- account_steal_time(cputime_to_nsecs(steal));
- }
-
return virt_timer_forward(user + guest + system + hardirq + softirq);
}
@@ -213,8 +207,19 @@ void vtime_task_switch(struct task_struct *prev)
*/
void vtime_flush(struct task_struct *tsk)
{
+ u64 steal, avg_steal;
+
if (do_account_vtime(tsk))
virt_timer_expire();
+
+ steal = S390_lowcore.steal_timer;
+ avg_steal = S390_lowcore.avg_steal_timer / 2;
+ if ((s64) steal > 0) {
+ S390_lowcore.steal_timer = 0;
+ account_steal_time(steal);
+ avg_steal += steal;
+ }
+ S390_lowcore.avg_steal_timer = avg_steal;
}
/*
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index fcb55b02990e..37503ae62486 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -7,6 +7,9 @@
* Author(s): Carsten Otte <cotte@de.ibm.com>
*/
+#define KMSG_COMPONENT "kvm-s390"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/interrupt.h>
#include <linux/kvm_host.h>
#include <linux/hrtimer.h>
@@ -23,6 +26,7 @@
#include <asm/gmap.h>
#include <asm/switch_to.h>
#include <asm/nmi.h>
+#include <asm/airq.h>
#include "kvm-s390.h"
#include "gaccess.h"
#include "trace-s390.h"
@@ -31,6 +35,8 @@
#define PFAULT_DONE 0x0680
#define VIRTIO_PARAM 0x0d00
+static struct kvm_s390_gib *gib;
+
/* handle external calls via sigp interpretation facility */
static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
{
@@ -217,22 +223,100 @@ static inline u8 int_word_to_isc(u32 int_word)
*/
#define IPM_BIT_OFFSET (offsetof(struct kvm_s390_gisa, ipm) * BITS_PER_BYTE)
-static inline void kvm_s390_gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
+/**
+ * gisa_set_iam - change the GISA interruption alert mask
+ *
+ * @gisa: gisa to operate on
+ * @iam: new IAM value to use
+ *
+ * Change the IAM atomically with the next alert address and the IPM
+ * of the GISA if the GISA is not part of the GIB alert list. All three
+ * fields are located in the first long word of the GISA.
+ *
+ * Returns: 0 on success
+ * -EBUSY in case the gisa is part of the alert list
+ */
+static inline int gisa_set_iam(struct kvm_s390_gisa *gisa, u8 iam)
+{
+ u64 word, _word;
+
+ do {
+ word = READ_ONCE(gisa->u64.word[0]);
+ if ((u64)gisa != word >> 32)
+ return -EBUSY;
+ _word = (word & ~0xffUL) | iam;
+ } while (cmpxchg(&gisa->u64.word[0], word, _word) != word);
+
+ return 0;
+}
+
+/**
+ * gisa_clear_ipm - clear the GISA interruption pending mask
+ *
+ * @gisa: gisa to operate on
+ *
+ * Clear the IPM atomically with the next alert address and the IAM
+ * of the GISA unconditionally. All three fields are located in the
+ * first long word of the GISA.
+ */
+static inline void gisa_clear_ipm(struct kvm_s390_gisa *gisa)
+{
+ u64 word, _word;
+
+ do {
+ word = READ_ONCE(gisa->u64.word[0]);
+ _word = word & ~(0xffUL << 24);
+ } while (cmpxchg(&gisa->u64.word[0], word, _word) != word);
+}
+
+/**
+ * gisa_get_ipm_or_restore_iam - return IPM or restore GISA IAM
+ *
+ * @gi: gisa interrupt struct to work on
+ *
+ * Atomically restores the interruption alert mask if none of the
+ * relevant ISCs are pending and return the IPM.
+ *
+ * Returns: the relevant pending ISCs
+ */
+static inline u8 gisa_get_ipm_or_restore_iam(struct kvm_s390_gisa_interrupt *gi)
+{
+ u8 pending_mask, alert_mask;
+ u64 word, _word;
+
+ do {
+ word = READ_ONCE(gi->origin->u64.word[0]);
+ alert_mask = READ_ONCE(gi->alert.mask);
+ pending_mask = (u8)(word >> 24) & alert_mask;
+ if (pending_mask)
+ return pending_mask;
+ _word = (word & ~0xffUL) | alert_mask;
+ } while (cmpxchg(&gi->origin->u64.word[0], word, _word) != word);
+
+ return 0;
+}
+
+static inline int gisa_in_alert_list(struct kvm_s390_gisa *gisa)
+{
+ return READ_ONCE(gisa->next_alert) != (u32)(u64)gisa;
+}
+
+static inline void gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
{
set_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
}
-static inline u8 kvm_s390_gisa_get_ipm(struct kvm_s390_gisa *gisa)
+static inline u8 gisa_get_ipm(struct kvm_s390_gisa *gisa)
{
return READ_ONCE(gisa->ipm);
}
-static inline void kvm_s390_gisa_clear_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
+static inline void gisa_clear_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
{
clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
}
-static inline int kvm_s390_gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
+static inline int gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
{
return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
}
@@ -245,8 +329,13 @@ static inline unsigned long pending_irqs_no_gisa(struct kvm_vcpu *vcpu)
static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
{
- return pending_irqs_no_gisa(vcpu) |
- kvm_s390_gisa_get_ipm(vcpu->kvm->arch.gisa) << IRQ_PEND_IO_ISC_7;
+ struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
+ unsigned long pending_mask;
+
+ pending_mask = pending_irqs_no_gisa(vcpu);
+ if (gi->origin)
+ pending_mask |= gisa_get_ipm(gi->origin) << IRQ_PEND_IO_ISC_7;
+ return pending_mask;
}
static inline int isc_to_irq_type(unsigned long isc)
@@ -318,13 +407,13 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
static void __set_cpu_idle(struct kvm_vcpu *vcpu)
{
kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
- set_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask);
+ set_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
}
static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
{
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
- clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask);
+ clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
}
static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
@@ -345,7 +434,7 @@ static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
{
if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_IO_MASK))
return;
- else if (psw_ioint_disabled(vcpu))
+ if (psw_ioint_disabled(vcpu))
kvm_s390_set_cpuflags(vcpu, CPUSTAT_IO_INT);
else
vcpu->arch.sie_block->lctl |= LCTL_CR6;
@@ -353,7 +442,7 @@ static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
{
- if (!(pending_irqs(vcpu) & IRQ_PEND_EXT_MASK))
+ if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_EXT_MASK))
return;
if (psw_extint_disabled(vcpu))
kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
@@ -363,7 +452,7 @@ static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
{
- if (!(pending_irqs(vcpu) & IRQ_PEND_MCHK_MASK))
+ if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_MCHK_MASK))
return;
if (psw_mchk_disabled(vcpu))
vcpu->arch.sie_block->ictl |= ICTL_LPSW;
@@ -956,6 +1045,7 @@ static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
{
struct list_head *isc_list;
struct kvm_s390_float_interrupt *fi;
+ struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
struct kvm_s390_interrupt_info *inti = NULL;
struct kvm_s390_io_info io;
u32 isc;
@@ -998,8 +1088,7 @@ static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
goto out;
}
- if (vcpu->kvm->arch.gisa &&
- kvm_s390_gisa_tac_ipm_gisc(vcpu->kvm->arch.gisa, isc)) {
+ if (gi->origin && gisa_tac_ipm_gisc(gi->origin, isc)) {
/*
* in case an adapter interrupt was not delivered
* in SIE context KVM will handle the delivery
@@ -1089,6 +1178,7 @@ static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
{
+ struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
u64 sltime;
vcpu->stat.exit_wait_state++;
@@ -1102,6 +1192,11 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP; /* disabled wait */
}
+ if (gi->origin &&
+ (gisa_get_ipm_or_restore_iam(gi) &
+ vcpu->arch.sie_block->gcr[6] >> 24))
+ return 0;
+
if (!ckc_interrupts_enabled(vcpu) &&
!cpu_timer_interrupts_enabled(vcpu)) {
VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
@@ -1533,18 +1628,19 @@ static struct kvm_s390_interrupt_info *get_top_io_int(struct kvm *kvm,
static int get_top_gisa_isc(struct kvm *kvm, u64 isc_mask, u32 schid)
{
+ struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
unsigned long active_mask;
int isc;
if (schid)
goto out;
- if (!kvm->arch.gisa)
+ if (!gi->origin)
goto out;
- active_mask = (isc_mask & kvm_s390_gisa_get_ipm(kvm->arch.gisa) << 24) << 32;
+ active_mask = (isc_mask & gisa_get_ipm(gi->origin) << 24) << 32;
while (active_mask) {
isc = __fls(active_mask) ^ (BITS_PER_LONG - 1);
- if (kvm_s390_gisa_tac_ipm_gisc(kvm->arch.gisa, isc))
+ if (gisa_tac_ipm_gisc(gi->origin, isc))
return isc;
clear_bit_inv(isc, &active_mask);
}
@@ -1567,6 +1663,7 @@ out:
struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
u64 isc_mask, u32 schid)
{
+ struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
struct kvm_s390_interrupt_info *inti, *tmp_inti;
int isc;
@@ -1584,7 +1681,7 @@ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
/* both types of interrupts present */
if (int_word_to_isc(inti->io.io_int_word) <= isc) {
/* classical IO int with higher priority */
- kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc);
+ gisa_set_ipm_gisc(gi->origin, isc);
goto out;
}
gisa_out:
@@ -1596,7 +1693,7 @@ gisa_out:
kvm_s390_reinject_io_int(kvm, inti);
inti = tmp_inti;
} else
- kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc);
+ gisa_set_ipm_gisc(gi->origin, isc);
out:
return inti;
}
@@ -1685,6 +1782,7 @@ static int __inject_float_mchk(struct kvm *kvm,
static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
{
+ struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
struct kvm_s390_float_interrupt *fi;
struct list_head *list;
int isc;
@@ -1692,9 +1790,9 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
kvm->stat.inject_io++;
isc = int_word_to_isc(inti->io.io_int_word);
- if (kvm->arch.gisa && inti->type & KVM_S390_INT_IO_AI_MASK) {
+ if (gi->origin && inti->type & KVM_S390_INT_IO_AI_MASK) {
VM_EVENT(kvm, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc);
- kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc);
+ gisa_set_ipm_gisc(gi->origin, isc);
kfree(inti);
return 0;
}
@@ -1726,7 +1824,6 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
*/
static void __floating_irq_kick(struct kvm *kvm, u64 type)
{
- struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
struct kvm_vcpu *dst_vcpu;
int sigcpu, online_vcpus, nr_tries = 0;
@@ -1735,11 +1832,11 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type)
return;
/* find idle VCPUs first, then round robin */
- sigcpu = find_first_bit(fi->idle_mask, online_vcpus);
+ sigcpu = find_first_bit(kvm->arch.idle_mask, online_vcpus);
if (sigcpu == online_vcpus) {
do {
- sigcpu = fi->next_rr_cpu;
- fi->next_rr_cpu = (fi->next_rr_cpu + 1) % online_vcpus;
+ sigcpu = kvm->arch.float_int.next_rr_cpu++;
+ kvm->arch.float_int.next_rr_cpu %= online_vcpus;
/* avoid endless loops if all vcpus are stopped */
if (nr_tries++ >= online_vcpus)
return;
@@ -1753,7 +1850,8 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type)
kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_STOP_INT);
break;
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
- if (!(type & KVM_S390_INT_IO_AI_MASK && kvm->arch.gisa))
+ if (!(type & KVM_S390_INT_IO_AI_MASK &&
+ kvm->arch.gisa_int.origin))
kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT);
break;
default:
@@ -2003,6 +2101,7 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm)
static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
{
+ struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
struct kvm_s390_interrupt_info *inti;
struct kvm_s390_float_interrupt *fi;
struct kvm_s390_irq *buf;
@@ -2026,15 +2125,14 @@ static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
max_irqs = len / sizeof(struct kvm_s390_irq);
- if (kvm->arch.gisa &&
- kvm_s390_gisa_get_ipm(kvm->arch.gisa)) {
+ if (gi->origin && gisa_get_ipm(gi->origin)) {
for (i = 0; i <= MAX_ISC; i++) {
if (n == max_irqs) {
/* signal userspace to try again */
ret = -ENOMEM;
goto out_nolock;
}
- if (kvm_s390_gisa_tac_ipm_gisc(kvm->arch.gisa, i)) {
+ if (gisa_tac_ipm_gisc(gi->origin, i)) {
irq = (struct kvm_s390_irq *) &buf[n];
irq->type = KVM_S390_INT_IO(1, 0, 0, 0);
irq->u.io.io_int_word = isc_to_int_word(i);
@@ -2831,7 +2929,7 @@ static void store_local_irq(struct kvm_s390_local_interrupt *li,
int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
{
int scn;
- unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)];
+ DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
unsigned long pending_irqs;
struct kvm_s390_irq irq;
@@ -2884,27 +2982,278 @@ int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
return n;
}
-void kvm_s390_gisa_clear(struct kvm *kvm)
+static void __airqs_kick_single_vcpu(struct kvm *kvm, u8 deliverable_mask)
{
- if (kvm->arch.gisa) {
- memset(kvm->arch.gisa, 0, sizeof(struct kvm_s390_gisa));
- kvm->arch.gisa->next_alert = (u32)(u64)kvm->arch.gisa;
- VM_EVENT(kvm, 3, "gisa 0x%pK cleared", kvm->arch.gisa);
+ int vcpu_id, online_vcpus = atomic_read(&kvm->online_vcpus);
+ struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
+ struct kvm_vcpu *vcpu;
+
+ for_each_set_bit(vcpu_id, kvm->arch.idle_mask, online_vcpus) {
+ vcpu = kvm_get_vcpu(kvm, vcpu_id);
+ if (psw_ioint_disabled(vcpu))
+ continue;
+ deliverable_mask &= (u8)(vcpu->arch.sie_block->gcr[6] >> 24);
+ if (deliverable_mask) {
+ /* lately kicked but not yet running */
+ if (test_and_set_bit(vcpu_id, gi->kicked_mask))
+ return;
+ kvm_s390_vcpu_wakeup(vcpu);
+ return;
+ }
}
}
+static enum hrtimer_restart gisa_vcpu_kicker(struct hrtimer *timer)
+{
+ struct kvm_s390_gisa_interrupt *gi =
+ container_of(timer, struct kvm_s390_gisa_interrupt, timer);
+ struct kvm *kvm =
+ container_of(gi->origin, struct sie_page2, gisa)->kvm;
+ u8 pending_mask;
+
+ pending_mask = gisa_get_ipm_or_restore_iam(gi);
+ if (pending_mask) {
+ __airqs_kick_single_vcpu(kvm, pending_mask);
+ hrtimer_forward_now(timer, ns_to_ktime(gi->expires));
+ return HRTIMER_RESTART;
+ };
+
+ return HRTIMER_NORESTART;
+}
+
+#define NULL_GISA_ADDR 0x00000000UL
+#define NONE_GISA_ADDR 0x00000001UL
+#define GISA_ADDR_MASK 0xfffff000UL
+
+static void process_gib_alert_list(void)
+{
+ struct kvm_s390_gisa_interrupt *gi;
+ struct kvm_s390_gisa *gisa;
+ struct kvm *kvm;
+ u32 final, origin = 0UL;
+
+ do {
+ /*
+ * If the NONE_GISA_ADDR is still stored in the alert list
+ * origin, we will leave the outer loop. No further GISA has
+ * been added to the alert list by millicode while processing
+ * the current alert list.
+ */
+ final = (origin & NONE_GISA_ADDR);
+ /*
+ * Cut off the alert list and store the NONE_GISA_ADDR in the
+ * alert list origin to avoid further GAL interruptions.
+ * A new alert list can be build up by millicode in parallel
+ * for guests not in the yet cut-off alert list. When in the
+ * final loop, store the NULL_GISA_ADDR instead. This will re-
+ * enable GAL interruptions on the host again.
+ */
+ origin = xchg(&gib->alert_list_origin,
+ (!final) ? NONE_GISA_ADDR : NULL_GISA_ADDR);
+ /*
+ * Loop through the just cut-off alert list and start the
+ * gisa timers to kick idle vcpus to consume the pending
+ * interruptions asap.
+ */
+ while (origin & GISA_ADDR_MASK) {
+ gisa = (struct kvm_s390_gisa *)(u64)origin;
+ origin = gisa->next_alert;
+ gisa->next_alert = (u32)(u64)gisa;
+ kvm = container_of(gisa, struct sie_page2, gisa)->kvm;
+ gi = &kvm->arch.gisa_int;
+ if (hrtimer_active(&gi->timer))
+ hrtimer_cancel(&gi->timer);
+ hrtimer_start(&gi->timer, 0, HRTIMER_MODE_REL);
+ }
+ } while (!final);
+
+}
+
+void kvm_s390_gisa_clear(struct kvm *kvm)
+{
+ struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
+
+ if (!gi->origin)
+ return;
+ gisa_clear_ipm(gi->origin);
+ VM_EVENT(kvm, 3, "gisa 0x%pK cleared", gi->origin);
+}
+
void kvm_s390_gisa_init(struct kvm *kvm)
{
- if (css_general_characteristics.aiv) {
- kvm->arch.gisa = &kvm->arch.sie_page2->gisa;
- VM_EVENT(kvm, 3, "gisa 0x%pK initialized", kvm->arch.gisa);
- kvm_s390_gisa_clear(kvm);
- }
+ struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
+
+ if (!css_general_characteristics.aiv)
+ return;
+ gi->origin = &kvm->arch.sie_page2->gisa;
+ gi->alert.mask = 0;
+ spin_lock_init(&gi->alert.ref_lock);
+ gi->expires = 50 * 1000; /* 50 usec */
+ hrtimer_init(&gi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ gi->timer.function = gisa_vcpu_kicker;
+ memset(gi->origin, 0, sizeof(struct kvm_s390_gisa));
+ gi->origin->next_alert = (u32)(u64)gi->origin;
+ VM_EVENT(kvm, 3, "gisa 0x%pK initialized", gi->origin);
}
void kvm_s390_gisa_destroy(struct kvm *kvm)
{
- if (!kvm->arch.gisa)
+ struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
+
+ if (!gi->origin)
+ return;
+ if (gi->alert.mask)
+ KVM_EVENT(3, "vm 0x%pK has unexpected iam 0x%02x",
+ kvm, gi->alert.mask);
+ while (gisa_in_alert_list(gi->origin))
+ cpu_relax();
+ hrtimer_cancel(&gi->timer);
+ gi->origin = NULL;
+}
+
+/**
+ * kvm_s390_gisc_register - register a guest ISC
+ *
+ * @kvm: the kernel vm to work with
+ * @gisc: the guest interruption sub class to register
+ *
+ * The function extends the vm specific alert mask to use.
+ * The effective IAM mask in the GISA is updated as well
+ * in case the GISA is not part of the GIB alert list.
+ * It will be updated latest when the IAM gets restored
+ * by gisa_get_ipm_or_restore_iam().
+ *
+ * Returns: the nonspecific ISC (NISC) the gib alert mechanism
+ * has registered with the channel subsystem.
+ * -ENODEV in case the vm uses no GISA
+ * -ERANGE in case the guest ISC is invalid
+ */
+int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc)
+{
+ struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
+
+ if (!gi->origin)
+ return -ENODEV;
+ if (gisc > MAX_ISC)
+ return -ERANGE;
+
+ spin_lock(&gi->alert.ref_lock);
+ gi->alert.ref_count[gisc]++;
+ if (gi->alert.ref_count[gisc] == 1) {
+ gi->alert.mask |= 0x80 >> gisc;
+ gisa_set_iam(gi->origin, gi->alert.mask);
+ }
+ spin_unlock(&gi->alert.ref_lock);
+
+ return gib->nisc;
+}
+EXPORT_SYMBOL_GPL(kvm_s390_gisc_register);
+
+/**
+ * kvm_s390_gisc_unregister - unregister a guest ISC
+ *
+ * @kvm: the kernel vm to work with
+ * @gisc: the guest interruption sub class to register
+ *
+ * The function reduces the vm specific alert mask to use.
+ * The effective IAM mask in the GISA is updated as well
+ * in case the GISA is not part of the GIB alert list.
+ * It will be updated latest when the IAM gets restored
+ * by gisa_get_ipm_or_restore_iam().
+ *
+ * Returns: the nonspecific ISC (NISC) the gib alert mechanism
+ * has registered with the channel subsystem.
+ * -ENODEV in case the vm uses no GISA
+ * -ERANGE in case the guest ISC is invalid
+ * -EINVAL in case the guest ISC is not registered
+ */
+int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc)
+{
+ struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
+ int rc = 0;
+
+ if (!gi->origin)
+ return -ENODEV;
+ if (gisc > MAX_ISC)
+ return -ERANGE;
+
+ spin_lock(&gi->alert.ref_lock);
+ if (gi->alert.ref_count[gisc] == 0) {
+ rc = -EINVAL;
+ goto out;
+ }
+ gi->alert.ref_count[gisc]--;
+ if (gi->alert.ref_count[gisc] == 0) {
+ gi->alert.mask &= ~(0x80 >> gisc);
+ gisa_set_iam(gi->origin, gi->alert.mask);
+ }
+out:
+ spin_unlock(&gi->alert.ref_lock);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(kvm_s390_gisc_unregister);
+
+static void gib_alert_irq_handler(struct airq_struct *airq, bool floating)
+{
+ inc_irq_stat(IRQIO_GAL);
+ process_gib_alert_list();
+}
+
+static struct airq_struct gib_alert_irq = {
+ .handler = gib_alert_irq_handler,
+ .lsi_ptr = &gib_alert_irq.lsi_mask,
+};
+
+void kvm_s390_gib_destroy(void)
+{
+ if (!gib)
return;
- kvm->arch.gisa = NULL;
+ chsc_sgib(0);
+ unregister_adapter_interrupt(&gib_alert_irq);
+ free_page((unsigned long)gib);
+ gib = NULL;
+}
+
+int kvm_s390_gib_init(u8 nisc)
+{
+ int rc = 0;
+
+ if (!css_general_characteristics.aiv) {
+ KVM_EVENT(3, "%s", "gib not initialized, no AIV facility");
+ goto out;
+ }
+
+ gib = (struct kvm_s390_gib *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!gib) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ gib_alert_irq.isc = nisc;
+ if (register_adapter_interrupt(&gib_alert_irq)) {
+ pr_err("Registering the GIB alert interruption handler failed\n");
+ rc = -EIO;
+ goto out_free_gib;
+ }
+
+ gib->nisc = nisc;
+ if (chsc_sgib((u32)(u64)gib)) {
+ pr_err("Associating the GIB with the AIV facility failed\n");
+ free_page((unsigned long)gib);
+ gib = NULL;
+ rc = -EIO;
+ goto out_unreg_gal;
+ }
+
+ KVM_EVENT(3, "gib 0x%pK (nisc=%d) initialized", gib, gib->nisc);
+ goto out;
+
+out_unreg_gal:
+ unregister_adapter_interrupt(&gib_alert_irq);
+out_free_gib:
+ free_page((unsigned long)gib);
+ gib = NULL;
+out:
+ return rc;
}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 7f4bc58a53b9..4638303ba6a8 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -432,11 +432,18 @@ int kvm_arch_init(void *opaque)
/* Register floating interrupt controller interface. */
rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
if (rc) {
- pr_err("Failed to register FLIC rc=%d\n", rc);
+ pr_err("A FLIC registration call failed with rc=%d\n", rc);
goto out_debug_unreg;
}
+
+ rc = kvm_s390_gib_init(GAL_ISC);
+ if (rc)
+ goto out_gib_destroy;
+
return 0;
+out_gib_destroy:
+ kvm_s390_gib_destroy();
out_debug_unreg:
debug_unregister(kvm_s390_dbf);
return rc;
@@ -444,6 +451,7 @@ out_debug_unreg:
void kvm_arch_exit(void)
{
+ kvm_s390_gib_destroy();
debug_unregister(kvm_s390_dbf);
}
@@ -1258,11 +1266,65 @@ static int kvm_s390_set_processor_feat(struct kvm *kvm,
static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
struct kvm_device_attr *attr)
{
- /*
- * Once supported by kernel + hw, we have to store the subfunctions
- * in kvm->arch and remember that user space configured them.
- */
- return -ENXIO;
+ mutex_lock(&kvm->lock);
+ if (kvm->created_vcpus) {
+ mutex_unlock(&kvm->lock);
+ return -EBUSY;
+ }
+
+ if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
+ sizeof(struct kvm_s390_vm_cpu_subfunc))) {
+ mutex_unlock(&kvm->lock);
+ return -EFAULT;
+ }
+ mutex_unlock(&kvm->lock);
+
+ VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
+ ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
+ ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
+ VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
+ VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
+ VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
+ VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
+ VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
+ VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
+ VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
+ VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
+ VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
+ VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
+ VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
+ VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
+ VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
+
+ return 0;
}
static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
@@ -1381,12 +1443,56 @@ static int kvm_s390_get_machine_feat(struct kvm *kvm,
static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
struct kvm_device_attr *attr)
{
- /*
- * Once we can actually configure subfunctions (kernel + hw support),
- * we have to check if they were already set by user space, if so copy
- * them from kvm->arch.
- */
- return -ENXIO;
+ if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
+ sizeof(struct kvm_s390_vm_cpu_subfunc)))
+ return -EFAULT;
+
+ VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
+ ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
+ ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
+ VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
+ VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
+ VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
+ VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
+ VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
+ VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
+ VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
+ VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
+ VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
+ VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
+ VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
+ VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
+ VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
+
+ return 0;
}
static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
@@ -1395,8 +1501,55 @@ static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
sizeof(struct kvm_s390_vm_cpu_subfunc)))
return -EFAULT;
+
+ VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
+ ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
+ ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
+ VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
+ VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
+ VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
+ VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
+ VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
+ VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
+ VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
+ VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
+ VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
+ VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
+ VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
+ VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
+ VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
+
return 0;
}
+
static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
{
int ret = -ENXIO;
@@ -1514,10 +1667,9 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
case KVM_S390_VM_CPU_PROCESSOR_FEAT:
case KVM_S390_VM_CPU_MACHINE_FEAT:
case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
+ case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
ret = 0;
break;
- /* configuring subfunctions is not supported yet */
- case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
default:
ret = -ENXIO;
break;
@@ -2209,6 +2361,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (!kvm->arch.sie_page2)
goto out_err;
+ kvm->arch.sie_page2->kvm = kvm;
kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
for (i = 0; i < kvm_s390_fac_size(); i++) {
@@ -2218,6 +2371,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
kvm_s390_fac_base[i];
}
+ kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
/* we are always in czam mode - even on pre z14 machines */
set_kvm_facility(kvm->arch.model.fac_mask, 138);
@@ -2812,7 +2966,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
vcpu->arch.sie_block->icpua = id;
spin_lock_init(&vcpu->arch.local_int.lock);
- vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa;
+ vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa_int.origin;
if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
vcpu->arch.sie_block->gd |= GISA_FORMAT1;
seqcount_init(&vcpu->arch.cputm_seqcount);
@@ -3458,6 +3612,8 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
kvm_s390_patch_guest_per_regs(vcpu);
}
+ clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
+
vcpu->arch.sie_block->icptcode = 0;
cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
@@ -4293,12 +4449,12 @@ static int __init kvm_s390_init(void)
int i;
if (!sclp.has_sief2) {
- pr_info("SIE not available\n");
+ pr_info("SIE is not available\n");
return -ENODEV;
}
if (nested && hpage) {
- pr_info("nested (vSIE) and hpage (huge page backing) can currently not be activated concurrently");
+ pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
return -EINVAL;
}
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 1f6e36cdce0d..6d9448dbd052 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -67,7 +67,7 @@ static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
{
- return test_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask);
+ return test_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
}
static inline int kvm_is_ucontrol(struct kvm *kvm)
@@ -381,6 +381,8 @@ int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu,
void kvm_s390_gisa_init(struct kvm *kvm);
void kvm_s390_gisa_clear(struct kvm *kvm);
void kvm_s390_gisa_destroy(struct kvm *kvm);
+int kvm_s390_gib_init(u8 nisc);
+void kvm_s390_gib_destroy(void);
/* implemented in guestdbg.c */
void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
index 53008da05190..dc0874f2e203 100644
--- a/arch/s390/lib/mem.S
+++ b/arch/s390/lib/mem.S
@@ -178,6 +178,7 @@ ENTRY(__memset\bits)
BR_EX %r14
.L__memset_mvc\bits:
mvc \bytes(1,%r1),0(%r1)
+ENDPROC(__memset\bits)
.endm
__MEMSET 16,2,sth
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index f5880bfd1b0c..3175413186b9 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -4,7 +4,7 @@
#
obj-y := init.o fault.o extmem.o mmap.o vmem.o maccess.o
-obj-y += page-states.o gup.o pageattr.o pgtable.o pgalloc.o
+obj-y += page-states.o pageattr.o pgtable.o pgalloc.o
obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 11613362c4e7..c220399ae196 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -247,12 +247,24 @@ static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
current);
}
+const struct exception_table_entry *s390_search_extables(unsigned long addr)
+{
+ const struct exception_table_entry *fixup;
+
+ fixup = search_extable(__start_dma_ex_table,
+ __stop_dma_ex_table - __start_dma_ex_table,
+ addr);
+ if (!fixup)
+ fixup = search_exception_tables(addr);
+ return fixup;
+}
+
static noinline void do_no_context(struct pt_regs *regs)
{
const struct exception_table_entry *fixup;
/* Are we prepared to handle this kernel fault? */
- fixup = search_exception_tables(regs->psw.addr);
+ fixup = s390_search_extables(regs->psw.addr);
if (fixup) {
regs->psw.addr = extable_fixup(fixup);
return;
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
deleted file mode 100644
index 2809d11c7a28..000000000000
--- a/arch/s390/mm/gup.c
+++ /dev/null
@@ -1,300 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Lockless get_user_pages_fast for s390
- *
- * Copyright IBM Corp. 2010
- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
- */
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/hugetlb.h>
-#include <linux/vmstat.h>
-#include <linux/pagemap.h>
-#include <linux/rwsem.h>
-#include <asm/pgtable.h>
-
-/*
- * The performance critical leaf functions are made noinline otherwise gcc
- * inlines everything into a single function which results in too much
- * register pressure.
- */
-static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr)
-{
- struct page *head, *page;
- unsigned long mask;
- pte_t *ptep, pte;
-
- mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
-
- ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
- do {
- pte = *ptep;
- barrier();
- /* Similar to the PMD case, NUMA hinting must take slow path */
- if (pte_protnone(pte))
- return 0;
- if ((pte_val(pte) & mask) != 0)
- return 0;
- VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
- page = pte_page(pte);
- head = compound_head(page);
- if (!page_cache_get_speculative(head))
- return 0;
- if (unlikely(pte_val(pte) != pte_val(*ptep))) {
- put_page(head);
- return 0;
- }
- VM_BUG_ON_PAGE(compound_head(page) != head, page);
- pages[*nr] = page;
- (*nr)++;
-
- } while (ptep++, addr += PAGE_SIZE, addr != end);
-
- return 1;
-}
-
-static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr)
-{
- struct page *head, *page;
- unsigned long mask;
- int refs;
-
- mask = (write ? _SEGMENT_ENTRY_PROTECT : 0) | _SEGMENT_ENTRY_INVALID;
- if ((pmd_val(pmd) & mask) != 0)
- return 0;
- VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
-
- refs = 0;
- head = pmd_page(pmd);
- page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
- do {
- VM_BUG_ON(compound_head(page) != head);
- pages[*nr] = page;
- (*nr)++;
- page++;
- refs++;
- } while (addr += PAGE_SIZE, addr != end);
-
- if (!page_cache_add_speculative(head, refs)) {
- *nr -= refs;
- return 0;
- }
-
- if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
- *nr -= refs;
- while (refs--)
- put_page(head);
- return 0;
- }
-
- return 1;
-}
-
-
-static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr)
-{
- unsigned long next;
- pmd_t *pmdp, pmd;
-
- pmdp = (pmd_t *) pudp;
- if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
- pmdp = (pmd_t *) pud_deref(pud);
- pmdp += pmd_index(addr);
- do {
- pmd = *pmdp;
- barrier();
- next = pmd_addr_end(addr, end);
- if (pmd_none(pmd))
- return 0;
- if (unlikely(pmd_large(pmd))) {
- /*
- * NUMA hinting faults need to be handled in the GUP
- * slowpath for accounting purposes and so that they
- * can be serialised against THP migration.
- */
- if (pmd_protnone(pmd))
- return 0;
- if (!gup_huge_pmd(pmdp, pmd, addr, next,
- write, pages, nr))
- return 0;
- } else if (!gup_pte_range(pmdp, pmd, addr, next,
- write, pages, nr))
- return 0;
- } while (pmdp++, addr = next, addr != end);
-
- return 1;
-}
-
-static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr)
-{
- struct page *head, *page;
- unsigned long mask;
- int refs;
-
- mask = (write ? _REGION_ENTRY_PROTECT : 0) | _REGION_ENTRY_INVALID;
- if ((pud_val(pud) & mask) != 0)
- return 0;
- VM_BUG_ON(!pfn_valid(pud_pfn(pud)));
-
- refs = 0;
- head = pud_page(pud);
- page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
- do {
- VM_BUG_ON_PAGE(compound_head(page) != head, page);
- pages[*nr] = page;
- (*nr)++;
- page++;
- refs++;
- } while (addr += PAGE_SIZE, addr != end);
-
- if (!page_cache_add_speculative(head, refs)) {
- *nr -= refs;
- return 0;
- }
-
- if (unlikely(pud_val(pud) != pud_val(*pudp))) {
- *nr -= refs;
- while (refs--)
- put_page(head);
- return 0;
- }
-
- return 1;
-}
-
-static inline int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr)
-{
- unsigned long next;
- pud_t *pudp, pud;
-
- pudp = (pud_t *) p4dp;
- if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
- pudp = (pud_t *) p4d_deref(p4d);
- pudp += pud_index(addr);
- do {
- pud = *pudp;
- barrier();
- next = pud_addr_end(addr, end);
- if (pud_none(pud))
- return 0;
- if (unlikely(pud_large(pud))) {
- if (!gup_huge_pud(pudp, pud, addr, next, write, pages,
- nr))
- return 0;
- } else if (!gup_pmd_range(pudp, pud, addr, next, write, pages,
- nr))
- return 0;
- } while (pudp++, addr = next, addr != end);
-
- return 1;
-}
-
-static inline int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr)
-{
- unsigned long next;
- p4d_t *p4dp, p4d;
-
- p4dp = (p4d_t *) pgdp;
- if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
- p4dp = (p4d_t *) pgd_deref(pgd);
- p4dp += p4d_index(addr);
- do {
- p4d = *p4dp;
- barrier();
- next = p4d_addr_end(addr, end);
- if (p4d_none(p4d))
- return 0;
- if (!gup_pud_range(p4dp, p4d, addr, next, write, pages, nr))
- return 0;
- } while (p4dp++, addr = next, addr != end);
-
- return 1;
-}
-
-/*
- * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
- * back to the regular GUP.
- * Note a difference with get_user_pages_fast: this always returns the
- * number of pages pinned, 0 if no pages were pinned.
- */
-int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages)
-{
- struct mm_struct *mm = current->mm;
- unsigned long addr, len, end;
- unsigned long next, flags;
- pgd_t *pgdp, pgd;
- int nr = 0;
-
- start &= PAGE_MASK;
- addr = start;
- len = (unsigned long) nr_pages << PAGE_SHIFT;
- end = start + len;
- if ((end <= start) || (end > mm->context.asce_limit))
- return 0;
- /*
- * local_irq_save() doesn't prevent pagetable teardown, but does
- * prevent the pagetables from being freed on s390.
- *
- * So long as we atomically load page table pointers versus teardown,
- * we can follow the address down to the the page and take a ref on it.
- */
- local_irq_save(flags);
- pgdp = pgd_offset(mm, addr);
- do {
- pgd = *pgdp;
- barrier();
- next = pgd_addr_end(addr, end);
- if (pgd_none(pgd))
- break;
- if (!gup_p4d_range(pgdp, pgd, addr, next, write, pages, &nr))
- break;
- } while (pgdp++, addr = next, addr != end);
- local_irq_restore(flags);
-
- return nr;
-}
-
-/**
- * get_user_pages_fast() - pin user pages in memory
- * @start: starting user address
- * @nr_pages: number of pages from start to pin
- * @write: whether pages will be written to
- * @pages: array that receives pointers to the pages pinned.
- * Should be at least nr_pages long.
- *
- * Attempt to pin user pages in memory without taking mm->mmap_sem.
- * If not successful, it will fall back to taking the lock and
- * calling get_user_pages().
- *
- * Returns number of pages pinned. This may be fewer than the number
- * requested. If nr_pages is 0 or negative, returns 0. If no pages
- * were pinned, returns -errno.
- */
-int get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages)
-{
- int nr, ret;
-
- might_sleep();
- start &= PAGE_MASK;
- nr = __get_user_pages_fast(start, nr_pages, write, pages);
- if (nr == nr_pages)
- return nr;
-
- /* Try to get the remaining pages with get_user_pages */
- start += nr << PAGE_SHIFT;
- pages += nr;
- ret = get_user_pages_unlocked(start, nr_pages - nr, pages,
- write ? FOLL_WRITE : 0);
- /* Have to be a bit careful with return values */
- if (nr > 0)
- ret = (ret < 0) ? nr : ret + nr;
- return ret;
-}
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 3e82f66d5c61..7cf48eefec8f 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -49,6 +49,8 @@ unsigned long empty_zero_page, zero_page_mask;
EXPORT_SYMBOL(empty_zero_page);
EXPORT_SYMBOL(zero_page_mask);
+bool initmem_freed;
+
static void __init setup_zero_pages(void)
{
unsigned int order;
@@ -148,6 +150,7 @@ void __init mem_init(void)
void free_initmem(void)
{
+ initmem_freed = true;
__set_memory((unsigned long)_sinittext,
(unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
SET_MEMORY_RW | SET_MEMORY_NX);
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 97b3ee53852b..818deeb1ebc3 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -16,6 +16,7 @@
#include <linux/cpu.h>
#include <asm/ctl_reg.h>
#include <asm/io.h>
+#include <asm/stacktrace.h>
static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size)
{
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index db6bb2f97a2c..99e06213a22b 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -290,7 +290,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
tlb_remove_table(tlb, table);
}
-static void __tlb_remove_table(void *_table)
+void __tlb_remove_table(void *_table)
{
unsigned int mask = (unsigned long) _table & 3;
void *table = (void *)((unsigned long) _table ^ mask);
@@ -316,67 +316,6 @@ static void __tlb_remove_table(void *_table)
}
}
-static void tlb_remove_table_smp_sync(void *arg)
-{
- /* Simply deliver the interrupt */
-}
-
-static void tlb_remove_table_one(void *table)
-{
- /*
- * This isn't an RCU grace period and hence the page-tables cannot be
- * assumed to be actually RCU-freed.
- *
- * It is however sufficient for software page-table walkers that rely
- * on IRQ disabling. See the comment near struct mmu_table_batch.
- */
- smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
- __tlb_remove_table(table);
-}
-
-static void tlb_remove_table_rcu(struct rcu_head *head)
-{
- struct mmu_table_batch *batch;
- int i;
-
- batch = container_of(head, struct mmu_table_batch, rcu);
-
- for (i = 0; i < batch->nr; i++)
- __tlb_remove_table(batch->tables[i]);
-
- free_page((unsigned long)batch);
-}
-
-void tlb_table_flush(struct mmu_gather *tlb)
-{
- struct mmu_table_batch **batch = &tlb->batch;
-
- if (*batch) {
- call_rcu(&(*batch)->rcu, tlb_remove_table_rcu);
- *batch = NULL;
- }
-}
-
-void tlb_remove_table(struct mmu_gather *tlb, void *table)
-{
- struct mmu_table_batch **batch = &tlb->batch;
-
- tlb->mm->context.flush_mm = 1;
- if (*batch == NULL) {
- *batch = (struct mmu_table_batch *)
- __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
- if (*batch == NULL) {
- __tlb_flush_mm_lazy(tlb->mm);
- tlb_remove_table_one(table);
- return;
- }
- (*batch)->nr = 0;
- }
- (*batch)->tables[(*batch)->nr++] = table;
- if ((*batch)->nr == MAX_TABLE_BATCH)
- tlb_flush_mmu(tlb);
-}
-
/*
* Base infrastructure required to generate basic asces, region, segment,
* and page tables that do not make use of enhanced features like EDAT1.
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 8485d6dc2754..9ebd01219812 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -410,6 +410,7 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
return old;
}
+#ifdef CONFIG_PGSTE
static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
@@ -427,6 +428,7 @@ static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr)
pmd = pmd_alloc(mm, pud, addr);
return pmd;
}
+#endif
pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t new)
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 0472e27febdf..b403fa14847d 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -413,6 +413,8 @@ void __init vmem_map_init(void)
__set_memory((unsigned long)_sinittext,
(unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
SET_MEMORY_RO | SET_MEMORY_X);
+ __set_memory(__stext_dma, (__etext_dma - __stext_dma) >> PAGE_SHIFT,
+ SET_MEMORY_RO | SET_MEMORY_X);
pr_info("Write protected kernel read-only data: %luk\n",
(unsigned long)(__end_rodata - _stext) >> 10);
}
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 51dd0267d014..5e7c63033159 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -455,7 +455,7 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
EMIT4(0xb9040000, REG_2, BPF_REG_0);
/* Restore registers */
save_restore_regs(jit, REGS_RESTORE, stack_depth);
- if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
+ if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable) {
jit->r14_thunk_ip = jit->prg;
/* Generate __s390_indirect_jump_r14 thunk */
if (test_facility(35)) {
@@ -473,7 +473,7 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
/* br %r14 */
_EMIT2(0x07fe);
- if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable &&
+ if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable &&
(jit->seen & SEEN_FUNC)) {
jit->r1_thunk_ip = jit->prg;
/* Generate __s390_indirect_jump_r1 thunk */
@@ -999,7 +999,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
/* lg %w1,<d(imm)>(%l) */
EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
EMIT_CONST_U64(func));
- if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
+ if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable) {
/* brasl %r14,__s390_indirect_jump_r1 */
EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
} else {
diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c
index bfba273c32c0..71a12a4f4906 100644
--- a/arch/s390/numa/mode_emu.c
+++ b/arch/s390/numa/mode_emu.c
@@ -313,6 +313,9 @@ static void __ref create_core_to_node_map(void)
int i;
emu_cores = memblock_alloc(sizeof(*emu_cores), 8);
+ if (!emu_cores)
+ panic("%s: Failed to allocate %zu bytes align=0x%x\n",
+ __func__, sizeof(*emu_cores), 8);
for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++)
emu_cores->to_node_id[i] = NODE_ID_FREE;
}
diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c
index 2d1271e2a70d..8eb9e9743f5d 100644
--- a/arch/s390/numa/numa.c
+++ b/arch/s390/numa/numa.c
@@ -92,8 +92,12 @@ static void __init numa_setup_memory(void)
} while (cur_base < end_of_dram);
/* Allocate and fill out node_data */
- for (nid = 0; nid < MAX_NUMNODES; nid++)
+ for (nid = 0; nid < MAX_NUMNODES; nid++) {
NODE_DATA(nid) = memblock_alloc(sizeof(pg_data_t), 8);
+ if (!NODE_DATA(nid))
+ panic("%s: Failed to allocate %zu bytes align=0x%x\n",
+ __func__, sizeof(pg_data_t), 8);
+ }
for_each_online_node(nid) {
unsigned long start_pfn, end_pfn;
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index 43d9525c36fc..7441857df51b 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -13,23 +13,17 @@
#include <linux/oprofile.h>
#include <linux/init.h>
#include <asm/processor.h>
-
-static int __s390_backtrace(void *data, unsigned long address, int reliable)
-{
- unsigned int *depth = data;
-
- if (*depth == 0)
- return 1;
- (*depth)--;
- oprofile_add_trace(address);
- return 0;
-}
+#include <asm/unwind.h>
static void s390_backtrace(struct pt_regs *regs, unsigned int depth)
{
- if (user_mode(regs))
- return;
- dump_trace(__s390_backtrace, &depth, NULL, regs->gprs[15]);
+ struct unwind_state state;
+
+ unwind_for_each_frame(&state, current, regs, 0) {
+ if (depth-- == 0)
+ break;
+ oprofile_add_trace(state.ip);
+ }
}
int __init oprofile_arch_init(struct oprofile_operations *ops)
diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile
index 22d0871291ee..748626a33028 100644
--- a/arch/s390/pci/Makefile
+++ b/arch/s390/pci/Makefile
@@ -3,5 +3,5 @@
# Makefile for the s390 PCI subsystem.
#
-obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_sysfs.o \
+obj-$(CONFIG_PCI) += pci.o pci_irq.o pci_dma.o pci_clp.o pci_sysfs.o \
pci_event.o pci_debug.o pci_insn.o pci_mmio.o
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index dc9bc82c072c..0ebb7c405a25 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -24,11 +24,9 @@
#include <linux/err.h>
#include <linux/export.h>
#include <linux/delay.h>
-#include <linux/irq.h>
-#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
+#include <linux/jump_label.h>
#include <linux/pci.h>
-#include <linux/msi.h>
#include <asm/isc.h>
#include <asm/airq.h>
@@ -37,30 +35,13 @@
#include <asm/pci_clp.h>
#include <asm/pci_dma.h>
-#define DEBUG /* enable pr_debug */
-
-#define SIC_IRQ_MODE_ALL 0
-#define SIC_IRQ_MODE_SINGLE 1
-
-#define ZPCI_NR_DMA_SPACES 1
-#define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS
-
/* list of all detected zpci devices */
static LIST_HEAD(zpci_list);
static DEFINE_SPINLOCK(zpci_list_lock);
-static struct irq_chip zpci_irq_chip = {
- .name = "zPCI",
- .irq_unmask = pci_msi_unmask_irq,
- .irq_mask = pci_msi_mask_irq,
-};
-
static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
static DEFINE_SPINLOCK(zpci_domain_lock);
-static struct airq_iv *zpci_aisb_iv;
-static struct airq_iv *zpci_aibv[ZPCI_NR_DEVICES];
-
#define ZPCI_IOMAP_ENTRIES \
min(((unsigned long) ZPCI_NR_DEVICES * PCI_BAR_COUNT / 2), \
ZPCI_IOMAP_MAX_ENTRIES)
@@ -70,6 +51,8 @@ static unsigned long *zpci_iomap_bitmap;
struct zpci_iomap_entry *zpci_iomap_start;
EXPORT_SYMBOL_GPL(zpci_iomap_start);
+DEFINE_STATIC_KEY_FALSE(have_mio);
+
static struct kmem_cache *zdev_fmb_cache;
struct zpci_dev *get_zdev_by_fid(u32 fid)
@@ -123,39 +106,6 @@ int pci_proc_domain(struct pci_bus *bus)
}
EXPORT_SYMBOL_GPL(pci_proc_domain);
-/* Modify PCI: Register adapter interruptions */
-static int zpci_set_airq(struct zpci_dev *zdev)
-{
- u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
- struct zpci_fib fib = {0};
- u8 status;
-
- fib.isc = PCI_ISC;
- fib.sum = 1; /* enable summary notifications */
- fib.noi = airq_iv_end(zdev->aibv);
- fib.aibv = (unsigned long) zdev->aibv->vector;
- fib.aibvo = 0; /* each zdev has its own interrupt vector */
- fib.aisb = (unsigned long) zpci_aisb_iv->vector + (zdev->aisb/64)*8;
- fib.aisbo = zdev->aisb & 63;
-
- return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
-}
-
-/* Modify PCI: Unregister adapter interruptions */
-static int zpci_clear_airq(struct zpci_dev *zdev)
-{
- u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_DEREG_INT);
- struct zpci_fib fib = {0};
- u8 cc, status;
-
- cc = zpci_mod_fc(req, &fib, &status);
- if (cc == 3 || (cc == 1 && status == 24))
- /* Function already gone or IRQs already deregistered. */
- cc = 0;
-
- return cc ? -EIO : 0;
-}
-
/* Modify PCI: Register I/O address translation parameters */
int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
u64 base, u64 limit, u64 iota)
@@ -241,7 +191,7 @@ static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
u64 data;
int rc;
- rc = zpci_load(&data, req, offset);
+ rc = __zpci_load(&data, req, offset);
if (!rc) {
data = le64_to_cpu((__force __le64) data);
data >>= (8 - len) * 8;
@@ -259,7 +209,7 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
data <<= (8 - len) * 8;
data = (__force u64) cpu_to_le64(data);
- rc = zpci_store(data, req, offset);
+ rc = __zpci_store(data, req, offset);
return rc;
}
@@ -276,18 +226,48 @@ void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
zpci_memcpy_toio(to, from, count);
}
+void __iomem *ioremap(unsigned long ioaddr, unsigned long size)
+{
+ struct vm_struct *area;
+ unsigned long offset;
+
+ if (!size)
+ return NULL;
+
+ if (!static_branch_unlikely(&have_mio))
+ return (void __iomem *) ioaddr;
+
+ offset = ioaddr & ~PAGE_MASK;
+ ioaddr &= PAGE_MASK;
+ size = PAGE_ALIGN(size + offset);
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area)
+ return NULL;
+
+ if (ioremap_page_range((unsigned long) area->addr,
+ (unsigned long) area->addr + size,
+ ioaddr, PAGE_KERNEL)) {
+ vunmap(area->addr);
+ return NULL;
+ }
+ return (void __iomem *) ((unsigned long) area->addr + offset);
+}
+EXPORT_SYMBOL(ioremap);
+
+void iounmap(volatile void __iomem *addr)
+{
+ if (static_branch_likely(&have_mio))
+ vunmap((__force void *) ((unsigned long) addr & PAGE_MASK));
+}
+EXPORT_SYMBOL(iounmap);
+
/* Create a virtual mapping cookie for a PCI BAR */
-void __iomem *pci_iomap_range(struct pci_dev *pdev,
- int bar,
- unsigned long offset,
- unsigned long max)
+static void __iomem *pci_iomap_range_fh(struct pci_dev *pdev, int bar,
+ unsigned long offset, unsigned long max)
{
struct zpci_dev *zdev = to_zpci(pdev);
int idx;
- if (!pci_resource_len(pdev, bar) || bar >= PCI_BAR_COUNT)
- return NULL;
-
idx = zdev->bars[bar].map_idx;
spin_lock(&zpci_iomap_lock);
/* Detect overrun */
@@ -298,6 +278,30 @@ void __iomem *pci_iomap_range(struct pci_dev *pdev,
return (void __iomem *) ZPCI_ADDR(idx) + offset;
}
+
+static void __iomem *pci_iomap_range_mio(struct pci_dev *pdev, int bar,
+ unsigned long offset,
+ unsigned long max)
+{
+ unsigned long barsize = pci_resource_len(pdev, bar);
+ struct zpci_dev *zdev = to_zpci(pdev);
+ void __iomem *iova;
+
+ iova = ioremap((unsigned long) zdev->bars[bar].mio_wt, barsize);
+ return iova ? iova + offset : iova;
+}
+
+void __iomem *pci_iomap_range(struct pci_dev *pdev, int bar,
+ unsigned long offset, unsigned long max)
+{
+ if (!pci_resource_len(pdev, bar) || bar >= PCI_BAR_COUNT)
+ return NULL;
+
+ if (static_branch_likely(&have_mio))
+ return pci_iomap_range_mio(pdev, bar, offset, max);
+ else
+ return pci_iomap_range_fh(pdev, bar, offset, max);
+}
EXPORT_SYMBOL(pci_iomap_range);
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
@@ -306,7 +310,37 @@ void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
}
EXPORT_SYMBOL(pci_iomap);
-void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
+static void __iomem *pci_iomap_wc_range_mio(struct pci_dev *pdev, int bar,
+ unsigned long offset, unsigned long max)
+{
+ unsigned long barsize = pci_resource_len(pdev, bar);
+ struct zpci_dev *zdev = to_zpci(pdev);
+ void __iomem *iova;
+
+ iova = ioremap((unsigned long) zdev->bars[bar].mio_wb, barsize);
+ return iova ? iova + offset : iova;
+}
+
+void __iomem *pci_iomap_wc_range(struct pci_dev *pdev, int bar,
+ unsigned long offset, unsigned long max)
+{
+ if (!pci_resource_len(pdev, bar) || bar >= PCI_BAR_COUNT)
+ return NULL;
+
+ if (static_branch_likely(&have_mio))
+ return pci_iomap_wc_range_mio(pdev, bar, offset, max);
+ else
+ return pci_iomap_range_fh(pdev, bar, offset, max);
+}
+EXPORT_SYMBOL(pci_iomap_wc_range);
+
+void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+ return pci_iomap_wc_range(dev, bar, 0, maxlen);
+}
+EXPORT_SYMBOL(pci_iomap_wc);
+
+static void pci_iounmap_fh(struct pci_dev *pdev, void __iomem *addr)
{
unsigned int idx = ZPCI_IDX(addr);
@@ -319,6 +353,19 @@ void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
}
spin_unlock(&zpci_iomap_lock);
}
+
+static void pci_iounmap_mio(struct pci_dev *pdev, void __iomem *addr)
+{
+ iounmap(addr);
+}
+
+void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
+{
+ if (static_branch_likely(&have_mio))
+ pci_iounmap_mio(pdev, addr);
+ else
+ pci_iounmap_fh(pdev, addr);
+}
EXPORT_SYMBOL(pci_iounmap);
static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
@@ -354,136 +401,6 @@ static struct pci_ops pci_root_ops = {
.write = pci_write,
};
-static void zpci_irq_handler(struct airq_struct *airq)
-{
- unsigned long si, ai;
- struct airq_iv *aibv;
- int irqs_on = 0;
-
- inc_irq_stat(IRQIO_PCI);
- for (si = 0;;) {
- /* Scan adapter summary indicator bit vector */
- si = airq_iv_scan(zpci_aisb_iv, si, airq_iv_end(zpci_aisb_iv));
- if (si == -1UL) {
- if (irqs_on++)
- /* End of second scan with interrupts on. */
- break;
- /* First scan complete, reenable interrupts. */
- if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC))
- break;
- si = 0;
- continue;
- }
-
- /* Scan the adapter interrupt vector for this device. */
- aibv = zpci_aibv[si];
- for (ai = 0;;) {
- ai = airq_iv_scan(aibv, ai, airq_iv_end(aibv));
- if (ai == -1UL)
- break;
- inc_irq_stat(IRQIO_MSI);
- airq_iv_lock(aibv, ai);
- generic_handle_irq(airq_iv_get_data(aibv, ai));
- airq_iv_unlock(aibv, ai);
- }
- }
-}
-
-int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
-{
- struct zpci_dev *zdev = to_zpci(pdev);
- unsigned int hwirq, msi_vecs;
- unsigned long aisb;
- struct msi_desc *msi;
- struct msi_msg msg;
- int rc, irq;
-
- zdev->aisb = -1UL;
- if (type == PCI_CAP_ID_MSI && nvec > 1)
- return 1;
- msi_vecs = min_t(unsigned int, nvec, zdev->max_msi);
-
- /* Allocate adapter summary indicator bit */
- aisb = airq_iv_alloc_bit(zpci_aisb_iv);
- if (aisb == -1UL)
- return -EIO;
- zdev->aisb = aisb;
-
- /* Create adapter interrupt vector */
- zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK);
- if (!zdev->aibv)
- return -ENOMEM;
-
- /* Wire up shortcut pointer */
- zpci_aibv[aisb] = zdev->aibv;
-
- /* Request MSI interrupts */
- hwirq = 0;
- for_each_pci_msi_entry(msi, pdev) {
- if (hwirq >= msi_vecs)
- break;
- irq = irq_alloc_desc(0); /* Alloc irq on node 0 */
- if (irq < 0)
- return -ENOMEM;
- rc = irq_set_msi_desc(irq, msi);
- if (rc)
- return rc;
- irq_set_chip_and_handler(irq, &zpci_irq_chip,
- handle_simple_irq);
- msg.data = hwirq;
- msg.address_lo = zdev->msi_addr & 0xffffffff;
- msg.address_hi = zdev->msi_addr >> 32;
- pci_write_msi_msg(irq, &msg);
- airq_iv_set_data(zdev->aibv, hwirq, irq);
- hwirq++;
- }
-
- /* Enable adapter interrupts */
- rc = zpci_set_airq(zdev);
- if (rc)
- return rc;
-
- return (msi_vecs == nvec) ? 0 : msi_vecs;
-}
-
-void arch_teardown_msi_irqs(struct pci_dev *pdev)
-{
- struct zpci_dev *zdev = to_zpci(pdev);
- struct msi_desc *msi;
- int rc;
-
- /* Disable adapter interrupts */
- rc = zpci_clear_airq(zdev);
- if (rc)
- return;
-
- /* Release MSI interrupts */
- for_each_pci_msi_entry(msi, pdev) {
- if (!msi->irq)
- continue;
- if (msi->msi_attrib.is_msix)
- __pci_msix_desc_mask_irq(msi, 1);
- else
- __pci_msi_desc_mask_irq(msi, 1, 1);
- irq_set_msi_desc(msi->irq, NULL);
- irq_free_desc(msi->irq);
- msi->msg.address_lo = 0;
- msi->msg.address_hi = 0;
- msi->msg.data = 0;
- msi->irq = 0;
- }
-
- if (zdev->aisb != -1UL) {
- zpci_aibv[zdev->aisb] = NULL;
- airq_iv_free_bit(zpci_aisb_iv, zdev->aisb);
- zdev->aisb = -1UL;
- }
- if (zdev->aibv) {
- airq_iv_release(zdev->aibv);
- zdev->aibv = NULL;
- }
-}
-
#ifdef CONFIG_PCI_IOV
static struct resource iov_res = {
.name = "PCI IOV res",
@@ -495,6 +412,7 @@ static struct resource iov_res = {
static void zpci_map_resources(struct pci_dev *pdev)
{
+ struct zpci_dev *zdev = to_zpci(pdev);
resource_size_t len;
int i;
@@ -502,8 +420,13 @@ static void zpci_map_resources(struct pci_dev *pdev)
len = pci_resource_len(pdev, i);
if (!len)
continue;
- pdev->resource[i].start =
- (resource_size_t __force) pci_iomap(pdev, i, 0);
+
+ if (static_branch_likely(&have_mio))
+ pdev->resource[i].start =
+ (resource_size_t __force) zdev->bars[i].mio_wb;
+ else
+ pdev->resource[i].start =
+ (resource_size_t __force) pci_iomap(pdev, i, 0);
pdev->resource[i].end = pdev->resource[i].start + len - 1;
}
@@ -524,6 +447,9 @@ static void zpci_unmap_resources(struct pci_dev *pdev)
resource_size_t len;
int i;
+ if (static_branch_likely(&have_mio))
+ return;
+
for (i = 0; i < PCI_BAR_COUNT; i++) {
len = pci_resource_len(pdev, i);
if (!len)
@@ -533,41 +459,6 @@ static void zpci_unmap_resources(struct pci_dev *pdev)
}
}
-static struct airq_struct zpci_airq = {
- .handler = zpci_irq_handler,
- .isc = PCI_ISC,
-};
-
-static int __init zpci_irq_init(void)
-{
- int rc;
-
- rc = register_adapter_interrupt(&zpci_airq);
- if (rc)
- goto out;
- /* Set summary to 1 to be called every time for the ISC. */
- *zpci_airq.lsi_ptr = 1;
-
- rc = -ENOMEM;
- zpci_aisb_iv = airq_iv_create(ZPCI_NR_DEVICES, AIRQ_IV_ALLOC);
- if (!zpci_aisb_iv)
- goto out_airq;
-
- zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
- return 0;
-
-out_airq:
- unregister_adapter_interrupt(&zpci_airq);
-out:
- return rc;
-}
-
-static void zpci_irq_exit(void)
-{
- airq_iv_release(zpci_aisb_iv);
- unregister_adapter_interrupt(&zpci_airq);
-}
-
static int zpci_alloc_iomap(struct zpci_dev *zdev)
{
unsigned long entry;
@@ -958,7 +849,9 @@ static void zpci_mem_exit(void)
kmem_cache_destroy(zdev_fmb_cache);
}
-static unsigned int s390_pci_probe = 1;
+static unsigned int s390_pci_probe __initdata = 1;
+static unsigned int s390_pci_no_mio __initdata;
+unsigned int s390_pci_force_floating __initdata;
static unsigned int s390_pci_initialized;
char * __init pcibios_setup(char *str)
@@ -967,6 +860,14 @@ char * __init pcibios_setup(char *str)
s390_pci_probe = 0;
return NULL;
}
+ if (!strcmp(str, "nomio")) {
+ s390_pci_no_mio = 1;
+ return NULL;
+ }
+ if (!strcmp(str, "force_floating")) {
+ s390_pci_force_floating = 1;
+ return NULL;
+ }
return str;
}
@@ -985,6 +886,9 @@ static int __init pci_base_init(void)
if (!test_facility(69) || !test_facility(71))
return 0;
+ if (test_facility(153) && !s390_pci_no_mio)
+ static_branch_enable(&have_mio);
+
rc = zpci_debug_init();
if (rc)
goto out;
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index eeb7450db18c..3a36b07a5571 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -163,7 +163,14 @@ static int clp_store_query_pci_fn(struct zpci_dev *zdev,
memcpy(zdev->util_str, response->util_str,
sizeof(zdev->util_str));
}
+ zdev->mio_capable = response->mio_addr_avail;
+ for (i = 0; i < PCI_BAR_COUNT; i++) {
+ if (!(response->mio_valid & (1 << (PCI_BAR_COUNT - i - 1))))
+ continue;
+ zdev->bars[i].mio_wb = (void __iomem *) response->addr[i].wb;
+ zdev->bars[i].mio_wt = (void __iomem *) response->addr[i].wt;
+ }
return 0;
}
@@ -279,11 +286,18 @@ int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
int rc;
rc = clp_set_pci_fn(&fh, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
- if (!rc)
- /* Success -> store enabled handle in zdev */
- zdev->fh = fh;
+ zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, fh, rc);
+ if (rc)
+ goto out;
- zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
+ zdev->fh = fh;
+ if (zdev->mio_capable) {
+ rc = clp_set_pci_fn(&fh, nr_dma_as, CLP_SET_ENABLE_MIO);
+ zpci_dbg(3, "ena mio fid:%x, fh:%x, rc:%d\n", zdev->fid, fh, rc);
+ if (rc)
+ clp_disable_fh(zdev);
+ }
+out:
return rc;
}
@@ -296,11 +310,10 @@ int clp_disable_fh(struct zpci_dev *zdev)
return 0;
rc = clp_set_pci_fn(&fh, 0, CLP_SET_DISABLE_PCI_FN);
+ zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, fh, rc);
if (!rc)
- /* Success -> store disabled handle in zdev */
zdev->fh = fh;
- zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
return rc;
}
diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
index f069929e8211..02f9505c99a8 100644
--- a/arch/s390/pci/pci_insn.c
+++ b/arch/s390/pci/pci_insn.c
@@ -8,9 +8,11 @@
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/delay.h>
+#include <linux/jump_label.h>
#include <asm/facility.h>
#include <asm/pci_insn.h>
#include <asm/pci_debug.h>
+#include <asm/pci_io.h>
#include <asm/processor.h>
#define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */
@@ -96,13 +98,15 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
}
/* Set Interruption Controls */
-int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc)
+int __zpci_set_irq_ctrl(u16 ctl, u8 isc, union zpci_sic_iib *iib)
{
if (!test_facility(72))
return -EIO;
- asm volatile (
- " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
- : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
+
+ asm volatile(
+ ".insn rsy,0xeb00000000d1,%[ctl],%[isc],%[iib]\n"
+ : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [iib] "Q" (*iib));
+
return 0;
}
@@ -140,7 +144,7 @@ static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
return cc;
}
-int zpci_load(u64 *data, u64 req, u64 offset)
+int __zpci_load(u64 *data, u64 req, u64 offset)
{
u8 status;
int cc;
@@ -156,6 +160,52 @@ int zpci_load(u64 *data, u64 req, u64 offset)
return (cc > 0) ? -EIO : cc;
}
+EXPORT_SYMBOL_GPL(__zpci_load);
+
+static inline int zpci_load_fh(u64 *data, const volatile void __iomem *addr,
+ unsigned long len)
+{
+ struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)];
+ u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len);
+
+ return __zpci_load(data, req, ZPCI_OFFSET(addr));
+}
+
+static inline int __pcilg_mio(u64 *data, u64 ioaddr, u64 len, u8 *status)
+{
+ register u64 addr asm("2") = ioaddr;
+ register u64 r3 asm("3") = len;
+ int cc = -ENXIO;
+ u64 __data;
+
+ asm volatile (
+ " .insn rre,0xb9d60000,%[data],%[ioaddr]\n"
+ "0: ipm %[cc]\n"
+ " srl %[cc],28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : [cc] "+d" (cc), [data] "=d" (__data), "+d" (r3)
+ : [ioaddr] "d" (addr)
+ : "cc");
+ *status = r3 >> 24 & 0xff;
+ *data = __data;
+ return cc;
+}
+
+int zpci_load(u64 *data, const volatile void __iomem *addr, unsigned long len)
+{
+ u8 status;
+ int cc;
+
+ if (!static_branch_unlikely(&have_mio))
+ return zpci_load_fh(data, addr, len);
+
+ cc = __pcilg_mio(data, (__force u64) addr, len, &status);
+ if (cc)
+ zpci_err_insn(cc, status, 0, (__force u64) addr);
+
+ return (cc > 0) ? -EIO : cc;
+}
EXPORT_SYMBOL_GPL(zpci_load);
/* PCI Store */
@@ -178,7 +228,7 @@ static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
return cc;
}
-int zpci_store(u64 data, u64 req, u64 offset)
+int __zpci_store(u64 data, u64 req, u64 offset)
{
u8 status;
int cc;
@@ -194,6 +244,50 @@ int zpci_store(u64 data, u64 req, u64 offset)
return (cc > 0) ? -EIO : cc;
}
+EXPORT_SYMBOL_GPL(__zpci_store);
+
+static inline int zpci_store_fh(const volatile void __iomem *addr, u64 data,
+ unsigned long len)
+{
+ struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)];
+ u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len);
+
+ return __zpci_store(data, req, ZPCI_OFFSET(addr));
+}
+
+static inline int __pcistg_mio(u64 data, u64 ioaddr, u64 len, u8 *status)
+{
+ register u64 addr asm("2") = ioaddr;
+ register u64 r3 asm("3") = len;
+ int cc = -ENXIO;
+
+ asm volatile (
+ " .insn rre,0xb9d40000,%[data],%[ioaddr]\n"
+ "0: ipm %[cc]\n"
+ " srl %[cc],28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : [cc] "+d" (cc), "+d" (r3)
+ : [data] "d" (data), [ioaddr] "d" (addr)
+ : "cc");
+ *status = r3 >> 24 & 0xff;
+ return cc;
+}
+
+int zpci_store(const volatile void __iomem *addr, u64 data, unsigned long len)
+{
+ u8 status;
+ int cc;
+
+ if (!static_branch_unlikely(&have_mio))
+ return zpci_store_fh(addr, data, len);
+
+ cc = __pcistg_mio(data, (__force u64) addr, len, &status);
+ if (cc)
+ zpci_err_insn(cc, status, 0, (__force u64) addr);
+
+ return (cc > 0) ? -EIO : cc;
+}
EXPORT_SYMBOL_GPL(zpci_store);
/* PCI Store Block */
@@ -214,7 +308,7 @@ static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
return cc;
}
-int zpci_store_block(const u64 *data, u64 req, u64 offset)
+int __zpci_store_block(const u64 *data, u64 req, u64 offset)
{
u8 status;
int cc;
@@ -230,4 +324,63 @@ int zpci_store_block(const u64 *data, u64 req, u64 offset)
return (cc > 0) ? -EIO : cc;
}
-EXPORT_SYMBOL_GPL(zpci_store_block);
+EXPORT_SYMBOL_GPL(__zpci_store_block);
+
+static inline int zpci_write_block_fh(volatile void __iomem *dst,
+ const void *src, unsigned long len)
+{
+ struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(dst)];
+ u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len);
+ u64 offset = ZPCI_OFFSET(dst);
+
+ return __zpci_store_block(src, req, offset);
+}
+
+static inline int __pcistb_mio(const u64 *data, u64 ioaddr, u64 len, u8 *status)
+{
+ int cc = -ENXIO;
+
+ asm volatile (
+ " .insn rsy,0xeb00000000d4,%[len],%[ioaddr],%[data]\n"
+ "0: ipm %[cc]\n"
+ " srl %[cc],28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : [cc] "+d" (cc), [len] "+d" (len)
+ : [ioaddr] "d" (ioaddr), [data] "Q" (*data)
+ : "cc");
+ *status = len >> 24 & 0xff;
+ return cc;
+}
+
+int zpci_write_block(volatile void __iomem *dst,
+ const void *src, unsigned long len)
+{
+ u8 status;
+ int cc;
+
+ if (!static_branch_unlikely(&have_mio))
+ return zpci_write_block_fh(dst, src, len);
+
+ cc = __pcistb_mio(src, (__force u64) dst, len, &status);
+ if (cc)
+ zpci_err_insn(cc, status, 0, (__force u64) dst);
+
+ return (cc > 0) ? -EIO : cc;
+}
+EXPORT_SYMBOL_GPL(zpci_write_block);
+
+static inline void __pciwb_mio(void)
+{
+ unsigned long unused = 0;
+
+ asm volatile (".insn rre,0xb9d50000,%[op],%[op]\n"
+ : [op] "+d" (unused));
+}
+
+void zpci_barrier(void)
+{
+ if (static_branch_likely(&have_mio))
+ __pciwb_mio();
+}
+EXPORT_SYMBOL_GPL(zpci_barrier);
diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c
new file mode 100644
index 000000000000..d80616ae8dd8
--- /dev/null
+++ b/arch/s390/pci/pci_irq.c
@@ -0,0 +1,486 @@
+// SPDX-License-Identifier: GPL-2.0
+#define KMSG_COMPONENT "zpci"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/kernel_stat.h>
+#include <linux/pci.h>
+#include <linux/msi.h>
+#include <linux/smp.h>
+
+#include <asm/isc.h>
+#include <asm/airq.h>
+
+static enum {FLOATING, DIRECTED} irq_delivery;
+
+#define SIC_IRQ_MODE_ALL 0
+#define SIC_IRQ_MODE_SINGLE 1
+#define SIC_IRQ_MODE_DIRECT 4
+#define SIC_IRQ_MODE_D_ALL 16
+#define SIC_IRQ_MODE_D_SINGLE 17
+#define SIC_IRQ_MODE_SET_CPU 18
+
+/*
+ * summary bit vector
+ * FLOATING - summary bit per function
+ * DIRECTED - summary bit per cpu (only used in fallback path)
+ */
+static struct airq_iv *zpci_sbv;
+
+/*
+ * interrupt bit vectors
+ * FLOATING - interrupt bit vector per function
+ * DIRECTED - interrupt bit vector per cpu
+ */
+static struct airq_iv **zpci_ibv;
+
+/* Modify PCI: Register adapter interruptions */
+static int zpci_set_airq(struct zpci_dev *zdev)
+{
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
+ struct zpci_fib fib = {0};
+ u8 status;
+
+ fib.fmt0.isc = PCI_ISC;
+ fib.fmt0.sum = 1; /* enable summary notifications */
+ fib.fmt0.noi = airq_iv_end(zdev->aibv);
+ fib.fmt0.aibv = (unsigned long) zdev->aibv->vector;
+ fib.fmt0.aibvo = 0; /* each zdev has its own interrupt vector */
+ fib.fmt0.aisb = (unsigned long) zpci_sbv->vector + (zdev->aisb/64)*8;
+ fib.fmt0.aisbo = zdev->aisb & 63;
+
+ return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
+}
+
+/* Modify PCI: Unregister adapter interruptions */
+static int zpci_clear_airq(struct zpci_dev *zdev)
+{
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_DEREG_INT);
+ struct zpci_fib fib = {0};
+ u8 cc, status;
+
+ cc = zpci_mod_fc(req, &fib, &status);
+ if (cc == 3 || (cc == 1 && status == 24))
+ /* Function already gone or IRQs already deregistered. */
+ cc = 0;
+
+ return cc ? -EIO : 0;
+}
+
+/* Modify PCI: Register CPU directed interruptions */
+static int zpci_set_directed_irq(struct zpci_dev *zdev)
+{
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT_D);
+ struct zpci_fib fib = {0};
+ u8 status;
+
+ fib.fmt = 1;
+ fib.fmt1.noi = zdev->msi_nr_irqs;
+ fib.fmt1.dibvo = zdev->msi_first_bit;
+
+ return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
+}
+
+/* Modify PCI: Unregister CPU directed interruptions */
+static int zpci_clear_directed_irq(struct zpci_dev *zdev)
+{
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_DEREG_INT_D);
+ struct zpci_fib fib = {0};
+ u8 cc, status;
+
+ fib.fmt = 1;
+ cc = zpci_mod_fc(req, &fib, &status);
+ if (cc == 3 || (cc == 1 && status == 24))
+ /* Function already gone or IRQs already deregistered. */
+ cc = 0;
+
+ return cc ? -EIO : 0;
+}
+
+static int zpci_set_irq_affinity(struct irq_data *data, const struct cpumask *dest,
+ bool force)
+{
+ struct msi_desc *entry = irq_get_msi_desc(data->irq);
+ struct msi_msg msg = entry->msg;
+
+ msg.address_lo &= 0xff0000ff;
+ msg.address_lo |= (cpumask_first(dest) << 8);
+ pci_write_msi_msg(data->irq, &msg);
+
+ return IRQ_SET_MASK_OK;
+}
+
+static struct irq_chip zpci_irq_chip = {
+ .name = "PCI-MSI",
+ .irq_unmask = pci_msi_unmask_irq,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_set_affinity = zpci_set_irq_affinity,
+};
+
+static void zpci_handle_cpu_local_irq(bool rescan)
+{
+ struct airq_iv *dibv = zpci_ibv[smp_processor_id()];
+ unsigned long bit;
+ int irqs_on = 0;
+
+ for (bit = 0;;) {
+ /* Scan the directed IRQ bit vector */
+ bit = airq_iv_scan(dibv, bit, airq_iv_end(dibv));
+ if (bit == -1UL) {
+ if (!rescan || irqs_on++)
+ /* End of second scan with interrupts on. */
+ break;
+ /* First scan complete, reenable interrupts. */
+ if (zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE, PCI_ISC))
+ break;
+ bit = 0;
+ continue;
+ }
+ inc_irq_stat(IRQIO_MSI);
+ generic_handle_irq(airq_iv_get_data(dibv, bit));
+ }
+}
+
+struct cpu_irq_data {
+ call_single_data_t csd;
+ atomic_t scheduled;
+};
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_irq_data, irq_data);
+
+static void zpci_handle_remote_irq(void *data)
+{
+ atomic_t *scheduled = data;
+
+ do {
+ zpci_handle_cpu_local_irq(false);
+ } while (atomic_dec_return(scheduled));
+}
+
+static void zpci_handle_fallback_irq(void)
+{
+ struct cpu_irq_data *cpu_data;
+ unsigned long cpu;
+ int irqs_on = 0;
+
+ for (cpu = 0;;) {
+ cpu = airq_iv_scan(zpci_sbv, cpu, airq_iv_end(zpci_sbv));
+ if (cpu == -1UL) {
+ if (irqs_on++)
+ /* End of second scan with interrupts on. */
+ break;
+ /* First scan complete, reenable interrupts. */
+ if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC))
+ break;
+ cpu = 0;
+ continue;
+ }
+ cpu_data = &per_cpu(irq_data, cpu);
+ if (atomic_inc_return(&cpu_data->scheduled) > 1)
+ continue;
+
+ cpu_data->csd.func = zpci_handle_remote_irq;
+ cpu_data->csd.info = &cpu_data->scheduled;
+ cpu_data->csd.flags = 0;
+ smp_call_function_single_async(cpu, &cpu_data->csd);
+ }
+}
+
+static void zpci_directed_irq_handler(struct airq_struct *airq, bool floating)
+{
+ if (floating) {
+ inc_irq_stat(IRQIO_PCF);
+ zpci_handle_fallback_irq();
+ } else {
+ inc_irq_stat(IRQIO_PCD);
+ zpci_handle_cpu_local_irq(true);
+ }
+}
+
+static void zpci_floating_irq_handler(struct airq_struct *airq, bool floating)
+{
+ unsigned long si, ai;
+ struct airq_iv *aibv;
+ int irqs_on = 0;
+
+ inc_irq_stat(IRQIO_PCF);
+ for (si = 0;;) {
+ /* Scan adapter summary indicator bit vector */
+ si = airq_iv_scan(zpci_sbv, si, airq_iv_end(zpci_sbv));
+ if (si == -1UL) {
+ if (irqs_on++)
+ /* End of second scan with interrupts on. */
+ break;
+ /* First scan complete, reenable interrupts. */
+ if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC))
+ break;
+ si = 0;
+ continue;
+ }
+
+ /* Scan the adapter interrupt vector for this device. */
+ aibv = zpci_ibv[si];
+ for (ai = 0;;) {
+ ai = airq_iv_scan(aibv, ai, airq_iv_end(aibv));
+ if (ai == -1UL)
+ break;
+ inc_irq_stat(IRQIO_MSI);
+ airq_iv_lock(aibv, ai);
+ generic_handle_irq(airq_iv_get_data(aibv, ai));
+ airq_iv_unlock(aibv, ai);
+ }
+ }
+}
+
+int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
+{
+ struct zpci_dev *zdev = to_zpci(pdev);
+ unsigned int hwirq, msi_vecs, cpu;
+ unsigned long bit;
+ struct msi_desc *msi;
+ struct msi_msg msg;
+ int rc, irq;
+
+ zdev->aisb = -1UL;
+ zdev->msi_first_bit = -1U;
+ if (type == PCI_CAP_ID_MSI && nvec > 1)
+ return 1;
+ msi_vecs = min_t(unsigned int, nvec, zdev->max_msi);
+
+ if (irq_delivery == DIRECTED) {
+ /* Allocate cpu vector bits */
+ bit = airq_iv_alloc(zpci_ibv[0], msi_vecs);
+ if (bit == -1UL)
+ return -EIO;
+ } else {
+ /* Allocate adapter summary indicator bit */
+ bit = airq_iv_alloc_bit(zpci_sbv);
+ if (bit == -1UL)
+ return -EIO;
+ zdev->aisb = bit;
+
+ /* Create adapter interrupt vector */
+ zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK);
+ if (!zdev->aibv)
+ return -ENOMEM;
+
+ /* Wire up shortcut pointer */
+ zpci_ibv[bit] = zdev->aibv;
+ /* Each function has its own interrupt vector */
+ bit = 0;
+ }
+
+ /* Request MSI interrupts */
+ hwirq = bit;
+ for_each_pci_msi_entry(msi, pdev) {
+ rc = -EIO;
+ if (hwirq - bit >= msi_vecs)
+ break;
+ irq = __irq_alloc_descs(-1, 0, 1, 0, THIS_MODULE, msi->affinity);
+ if (irq < 0)
+ return -ENOMEM;
+ rc = irq_set_msi_desc(irq, msi);
+ if (rc)
+ return rc;
+ irq_set_chip_and_handler(irq, &zpci_irq_chip,
+ handle_percpu_irq);
+ msg.data = hwirq;
+ if (irq_delivery == DIRECTED) {
+ msg.address_lo = zdev->msi_addr & 0xff0000ff;
+ msg.address_lo |= msi->affinity ?
+ (cpumask_first(&msi->affinity->mask) << 8) : 0;
+ for_each_possible_cpu(cpu) {
+ airq_iv_set_data(zpci_ibv[cpu], hwirq, irq);
+ }
+ } else {
+ msg.address_lo = zdev->msi_addr & 0xffffffff;
+ airq_iv_set_data(zdev->aibv, hwirq, irq);
+ }
+ msg.address_hi = zdev->msi_addr >> 32;
+ pci_write_msi_msg(irq, &msg);
+ hwirq++;
+ }
+
+ zdev->msi_first_bit = bit;
+ zdev->msi_nr_irqs = msi_vecs;
+
+ if (irq_delivery == DIRECTED)
+ rc = zpci_set_directed_irq(zdev);
+ else
+ rc = zpci_set_airq(zdev);
+ if (rc)
+ return rc;
+
+ return (msi_vecs == nvec) ? 0 : msi_vecs;
+}
+
+void arch_teardown_msi_irqs(struct pci_dev *pdev)
+{
+ struct zpci_dev *zdev = to_zpci(pdev);
+ struct msi_desc *msi;
+ int rc;
+
+ /* Disable interrupts */
+ if (irq_delivery == DIRECTED)
+ rc = zpci_clear_directed_irq(zdev);
+ else
+ rc = zpci_clear_airq(zdev);
+ if (rc)
+ return;
+
+ /* Release MSI interrupts */
+ for_each_pci_msi_entry(msi, pdev) {
+ if (!msi->irq)
+ continue;
+ if (msi->msi_attrib.is_msix)
+ __pci_msix_desc_mask_irq(msi, 1);
+ else
+ __pci_msi_desc_mask_irq(msi, 1, 1);
+ irq_set_msi_desc(msi->irq, NULL);
+ irq_free_desc(msi->irq);
+ msi->msg.address_lo = 0;
+ msi->msg.address_hi = 0;
+ msi->msg.data = 0;
+ msi->irq = 0;
+ }
+
+ if (zdev->aisb != -1UL) {
+ zpci_ibv[zdev->aisb] = NULL;
+ airq_iv_free_bit(zpci_sbv, zdev->aisb);
+ zdev->aisb = -1UL;
+ }
+ if (zdev->aibv) {
+ airq_iv_release(zdev->aibv);
+ zdev->aibv = NULL;
+ }
+
+ if ((irq_delivery == DIRECTED) && zdev->msi_first_bit != -1U)
+ airq_iv_free(zpci_ibv[0], zdev->msi_first_bit, zdev->msi_nr_irqs);
+}
+
+static struct airq_struct zpci_airq = {
+ .handler = zpci_floating_irq_handler,
+ .isc = PCI_ISC,
+};
+
+static void __init cpu_enable_directed_irq(void *unused)
+{
+ union zpci_sic_iib iib = {{0}};
+
+ iib.cdiib.dibv_addr = (u64) zpci_ibv[smp_processor_id()]->vector;
+
+ __zpci_set_irq_ctrl(SIC_IRQ_MODE_SET_CPU, 0, &iib);
+ zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE, PCI_ISC);
+}
+
+static int __init zpci_directed_irq_init(void)
+{
+ union zpci_sic_iib iib = {{0}};
+ unsigned int cpu;
+
+ zpci_sbv = airq_iv_create(num_possible_cpus(), 0);
+ if (!zpci_sbv)
+ return -ENOMEM;
+
+ iib.diib.isc = PCI_ISC;
+ iib.diib.nr_cpus = num_possible_cpus();
+ iib.diib.disb_addr = (u64) zpci_sbv->vector;
+ __zpci_set_irq_ctrl(SIC_IRQ_MODE_DIRECT, 0, &iib);
+
+ zpci_ibv = kcalloc(num_possible_cpus(), sizeof(*zpci_ibv),
+ GFP_KERNEL);
+ if (!zpci_ibv)
+ return -ENOMEM;
+
+ for_each_possible_cpu(cpu) {
+ /*
+ * Per CPU IRQ vectors look the same but bit-allocation
+ * is only done on the first vector.
+ */
+ zpci_ibv[cpu] = airq_iv_create(cache_line_size() * BITS_PER_BYTE,
+ AIRQ_IV_DATA |
+ AIRQ_IV_CACHELINE |
+ (!cpu ? AIRQ_IV_ALLOC : 0));
+ if (!zpci_ibv[cpu])
+ return -ENOMEM;
+ }
+ on_each_cpu(cpu_enable_directed_irq, NULL, 1);
+
+ zpci_irq_chip.irq_set_affinity = zpci_set_irq_affinity;
+
+ return 0;
+}
+
+static int __init zpci_floating_irq_init(void)
+{
+ zpci_ibv = kcalloc(ZPCI_NR_DEVICES, sizeof(*zpci_ibv), GFP_KERNEL);
+ if (!zpci_ibv)
+ return -ENOMEM;
+
+ zpci_sbv = airq_iv_create(ZPCI_NR_DEVICES, AIRQ_IV_ALLOC);
+ if (!zpci_sbv)
+ goto out_free;
+
+ return 0;
+
+out_free:
+ kfree(zpci_ibv);
+ return -ENOMEM;
+}
+
+int __init zpci_irq_init(void)
+{
+ int rc;
+
+ irq_delivery = sclp.has_dirq ? DIRECTED : FLOATING;
+ if (s390_pci_force_floating)
+ irq_delivery = FLOATING;
+
+ if (irq_delivery == DIRECTED)
+ zpci_airq.handler = zpci_directed_irq_handler;
+
+ rc = register_adapter_interrupt(&zpci_airq);
+ if (rc)
+ goto out;
+ /* Set summary to 1 to be called every time for the ISC. */
+ *zpci_airq.lsi_ptr = 1;
+
+ switch (irq_delivery) {
+ case FLOATING:
+ rc = zpci_floating_irq_init();
+ break;
+ case DIRECTED:
+ rc = zpci_directed_irq_init();
+ break;
+ }
+
+ if (rc)
+ goto out_airq;
+
+ /*
+ * Enable floating IRQs (with suppression after one IRQ). When using
+ * directed IRQs this enables the fallback path.
+ */
+ zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC);
+
+ return 0;
+out_airq:
+ unregister_adapter_interrupt(&zpci_airq);
+out:
+ return rc;
+}
+
+void __init zpci_irq_exit(void)
+{
+ unsigned int cpu;
+
+ if (irq_delivery == DIRECTED) {
+ for_each_possible_cpu(cpu) {
+ airq_iv_release(zpci_ibv[cpu]);
+ }
+ }
+ kfree(zpci_ibv);
+ if (zpci_sbv)
+ airq_iv_release(zpci_sbv);
+ unregister_adapter_interrupt(&zpci_airq);
+}
diff --git a/arch/s390/purgatory/Makefile b/arch/s390/purgatory/Makefile
index ce6a3f75065b..dc1ae4ff79d7 100644
--- a/arch/s390/purgatory/Makefile
+++ b/arch/s390/purgatory/Makefile
@@ -4,7 +4,7 @@ OBJECT_FILES_NON_STANDARD := y
purgatory-y := head.o purgatory.o string.o sha256.o mem.o
-targets += $(purgatory-y) purgatory.ro kexec-purgatory.c
+targets += $(purgatory-y) purgatory.lds purgatory purgatory.ro
PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
$(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE
@@ -16,22 +16,26 @@ $(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S FORCE
$(obj)/string.o: $(srctree)/arch/s390/lib/string.c FORCE
$(call if_changed_rule,cc_o_c)
-LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib
-LDFLAGS_purgatory.ro += -z nodefaultlib
KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes
KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare
KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding
KBUILD_CFLAGS += -c -MD -Os -m64 -msoft-float -fno-common
+KBUILD_CFLAGS += $(CLANG_FLAGS)
KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
KBUILD_AFLAGS := $(filter-out -DCC_USING_EXPOLINE,$(KBUILD_AFLAGS))
-$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
+LDFLAGS_purgatory := -r --no-undefined -nostdlib -z nodefaultlib -T
+$(obj)/purgatory: $(obj)/purgatory.lds $(PURGATORY_OBJS) FORCE
$(call if_changed,ld)
-quiet_cmd_bin2c = BIN2C $@
- cmd_bin2c = $(objtree)/scripts/bin2c kexec_purgatory < $< > $@
+OBJCOPYFLAGS_purgatory.ro := -O elf64-s390
+OBJCOPYFLAGS_purgatory.ro += --remove-section='*debug*'
+OBJCOPYFLAGS_purgatory.ro += --remove-section='.comment'
+OBJCOPYFLAGS_purgatory.ro += --remove-section='.note.*'
+$(obj)/purgatory.ro: $(obj)/purgatory FORCE
+ $(call if_changed,objcopy)
-$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE
- $(call if_changed,bin2c)
+$(obj)/kexec-purgatory.o: $(obj)/kexec-purgatory.S $(obj)/purgatory.ro FORCE
+ $(call if_changed_rule,as_o_S)
obj-$(CONFIG_ARCH_HAS_KEXEC_PURGATORY) += kexec-purgatory.o
diff --git a/arch/s390/purgatory/kexec-purgatory.S b/arch/s390/purgatory/kexec-purgatory.S
new file mode 100644
index 000000000000..8293753100ae
--- /dev/null
+++ b/arch/s390/purgatory/kexec-purgatory.S
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+ .section .rodata, "a"
+
+ .align 8
+kexec_purgatory:
+ .globl kexec_purgatory
+ .incbin "arch/s390/purgatory/purgatory.ro"
+.Lkexec_purgatroy_end:
+
+ .align 8
+kexec_purgatory_size:
+ .globl kexec_purgatory_size
+ .quad .Lkexec_purgatroy_end - kexec_purgatory
diff --git a/arch/s390/purgatory/purgatory.lds.S b/arch/s390/purgatory/purgatory.lds.S
new file mode 100644
index 000000000000..482eb4fbcef1
--- /dev/null
+++ b/arch/s390/purgatory/purgatory.lds.S
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <asm-generic/vmlinux.lds.h>
+
+OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
+OUTPUT_ARCH(s390:64-bit)
+
+ENTRY(purgatory_start)
+
+SECTIONS
+{
+ . = 0;
+ .head.text : {
+ _head = . ;
+ HEAD_TEXT
+ _ehead = . ;
+ }
+ .text : {
+ _text = .; /* Text */
+ *(.text)
+ *(.text.*)
+ _etext = . ;
+ }
+ .rodata : {
+ _rodata = . ;
+ *(.rodata) /* read-only data */
+ *(.rodata.*)
+ _erodata = . ;
+ }
+ .data : {
+ _data = . ;
+ *(.data)
+ *(.data.*)
+ _edata = . ;
+ }
+
+ . = ALIGN(256);
+ .bss : {
+ _bss = . ;
+ *(.bss)
+ *(.bss.*)
+ *(COMMON)
+ . = ALIGN(8); /* For convenience during zeroing */
+ _ebss = .;
+ }
+ _end = .;
+
+ /* Sections to be discarded */
+ /DISCARD/ : {
+ *(.eh_frame)
+ *(*__ksymtab*)
+ *(___kcrctab*)
+ }
+}
diff --git a/arch/s390/scripts/Makefile.chkbss b/arch/s390/scripts/Makefile.chkbss
index 9bba2c14e0ca..884a9caff5fb 100644
--- a/arch/s390/scripts/Makefile.chkbss
+++ b/arch/s390/scripts/Makefile.chkbss
@@ -1,23 +1,21 @@
# SPDX-License-Identifier: GPL-2.0
+chkbss-target ?= built-in.a
+$(obj)/$(chkbss-target): chkbss
+
+chkbss-files := $(addsuffix .chkbss, $(chkbss))
+clean-files += $(chkbss-files)
+
+PHONY += chkbss
+chkbss: $(addprefix $(obj)/, $(chkbss-files))
+
quiet_cmd_chkbss = CHKBSS $<
-define cmd_chkbss
- rm -f $@; \
- if ! $(OBJDUMP) -j .bss -w -h $< | awk 'END { if ($$3) exit 1 }'; then \
+ cmd_chkbss = \
+ if $(OBJDUMP) -h $< | grep -q "\.bss" && \
+ ! $(OBJDUMP) -j .bss -w -h $< | awk 'END { if ($$3) exit 1 }'; then \
echo "error: $< .bss section is not empty" >&2; exit 1; \
fi; \
touch $@;
-endef
-
-chkbss-target ?= $(obj)/built-in.a
-ifneq (,$(findstring /,$(chkbss)))
-chkbss-files := $(patsubst %, %.chkbss, $(chkbss))
-else
-chkbss-files := $(patsubst %, $(obj)/%.chkbss, $(chkbss))
-endif
-
-$(chkbss-target): $(chkbss-files)
-targets += $(notdir $(chkbss-files))
-%.o.chkbss: %.o
+$(obj)/%.o.chkbss: $(obj)/%.o
$(call cmd,chkbss)
diff --git a/arch/s390/tools/opcodes.txt b/arch/s390/tools/opcodes.txt
index 1cbed82cd17b..64638b764d1c 100644
--- a/arch/s390/tools/opcodes.txt
+++ b/arch/s390/tools/opcodes.txt
@@ -1,3 +1,5 @@
+0000 illegal E
+0002 brkpt E
0101 pr E
0102 upt E
0104 ptff E
@@ -257,6 +259,7 @@ b258 bsg RRE_RR
b25a bsa RRE_RR
b25d clst RRE_RR
b25e srst RRE_RR
+b25f chsc RRE_R0
b263 cmpsc RRE_RR
b274 siga S_RD
b276 xsch S_00
@@ -277,6 +280,9 @@ b29d lfpc S_RD
b2a5 tre RRE_RR
b2a6 cu21 RRF_U0RR
b2a7 cu12 RRF_U0RR
+b2ad nqap RRE_RR
+b2ae dqap RRE_RR
+b2af pqap RRE_RR
b2b0 stfle S_RD
b2b1 stfl S_RD
b2b2 lpswe S_RD
@@ -290,6 +296,7 @@ b2e5 epctr RRE_RR
b2e8 ppa RRF_U0RR
b2ec etnd RRE_R0
b2ed ecpga RRE_RR
+b2f0 iucv RRE_RR
b2f8 tend S_00
b2fa niai IE_UU
b2fc tabort S_RD
@@ -559,12 +566,15 @@ b998 alcr RRE_RR
b999 slbr RRE_RR
b99a epair RRE_R0
b99b esair RRE_R0
+b99c eqbs RRF_U0RR
b99d esea RRE_R0
b99e pti RRE_RR
b99f ssair RRE_R0
+b9a0 clp RRF_U0RR
b9a1 tpei RRE_RR
b9a2 ptf RRE_R0
b9aa lptea RRF_RURR2
+b9ab essa RRF_U0RR
b9ac irbm RRE_RR
b9ae rrbm RRE_RR
b9af pfmf RRE_RR
@@ -1039,6 +1049,7 @@ eb7a agsi SIY_IRD
eb7e algsi SIY_IRD
eb80 icmh RSY_RURD
eb81 icmy RSY_RURD
+eb8a sqbs RSY_RDRU
eb8e mvclu RSY_RRRD
eb8f clclu RSY_RRRD
eb90 stmy RSY_RRRD
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index d9a9144dec35..0be08d586d40 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -7,11 +7,11 @@ config SUPERH
select ARCH_NO_COHERENT_DMA_MMAP if !MMU
select HAVE_PATA_PLATFORM
select CLKDEV_LOOKUP
+ select DMA_DECLARE_COHERENT
select HAVE_IDE if HAS_IOPORT_MAP
select HAVE_MEMBLOCK_NODE_MAP
select ARCH_DISCARD_MEMBLOCK
select HAVE_OPROFILE
- select HAVE_GENERIC_DMA_COHERENT
select HAVE_ARCH_TRACEHOOK
select HAVE_PERF_EVENTS
select HAVE_DEBUG_BUGVERBOSE
@@ -90,12 +90,6 @@ config ARCH_DEFCONFIG
default "arch/sh/configs/shx3_defconfig" if SUPERH32
default "arch/sh/configs/cayman_defconfig" if SUPERH64
-config RWSEM_GENERIC_SPINLOCK
- def_bool y
-
-config RWSEM_XCHGADD_ALGORITHM
- bool
-
config GENERIC_BUG
def_bool y
depends on BUG && SUPERH32
diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c
index 8f234d0435aa..8301a4378f50 100644
--- a/arch/sh/boards/mach-ap325rxa/setup.c
+++ b/arch/sh/boards/mach-ap325rxa/setup.c
@@ -529,9 +529,8 @@ static int __init ap325rxa_devices_setup(void)
device_initialize(&ap325rxa_ceu_device.dev);
arch_setup_pdev_archdata(&ap325rxa_ceu_device);
dma_declare_coherent_memory(&ap325rxa_ceu_device.dev,
- ceu_dma_membase, ceu_dma_membase,
- ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1,
- DMA_MEMORY_EXCLUSIVE);
+ ceu_dma_membase, ceu_dma_membase,
+ ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1);
platform_device_add(&ap325rxa_ceu_device);
@@ -557,7 +556,10 @@ static void __init ap325rxa_mv_mem_reserve(void)
phys_addr_t phys;
phys_addr_t size = CEU_BUFFER_MEMORY_SIZE;
- phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
+ phys = memblock_phys_alloc(size, PAGE_SIZE);
+ if (!phys)
+ panic("Failed to allocate CEU memory\n");
+
memblock_free(phys, size);
memblock_remove(phys, size);
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 5495efa07335..f402aa741bf3 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -806,7 +806,6 @@ static struct spi_board_info spi_bus[] = {
.platform_data = &mmc_spi_info,
.max_speed_hz = 5000000,
.mode = SPI_MODE_0,
- .controller_data = (void *) GPIO_PTM4,
},
};
@@ -838,6 +837,14 @@ static struct platform_device msiof0_device = {
.resource = msiof0_resources,
};
+static struct gpiod_lookup_table msiof_gpio_table = {
+ .dev_id = "spi_sh_msiof.0",
+ .table = {
+ GPIO_LOOKUP("sh7724_pfc", GPIO_PTM4, "cs", GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
#endif
/* FSI */
@@ -1296,12 +1303,11 @@ static int __init arch_setup(void)
gpio_request(GPIO_FN_MSIOF0_TXD, NULL);
gpio_request(GPIO_FN_MSIOF0_RXD, NULL);
gpio_request(GPIO_FN_MSIOF0_TSCK, NULL);
- gpio_request(GPIO_PTM4, NULL); /* software CS control of TSYNC pin */
- gpio_direction_output(GPIO_PTM4, 1); /* active low CS */
gpio_request(GPIO_PTB6, NULL); /* 3.3V power control */
gpio_direction_output(GPIO_PTB6, 0); /* disable power by default */
gpiod_add_lookup_table(&mmc_spi_gpio_table);
+ gpiod_add_lookup_table(&msiof_gpio_table);
spi_register_board_info(spi_bus, ARRAY_SIZE(spi_bus));
#endif
@@ -1438,8 +1444,7 @@ static int __init arch_setup(void)
dma_declare_coherent_memory(&ecovec_ceu_devices[0]->dev,
ceu0_dma_membase, ceu0_dma_membase,
ceu0_dma_membase +
- CEU_BUFFER_MEMORY_SIZE - 1,
- DMA_MEMORY_EXCLUSIVE);
+ CEU_BUFFER_MEMORY_SIZE - 1);
platform_device_add(ecovec_ceu_devices[0]);
device_initialize(&ecovec_ceu_devices[1]->dev);
@@ -1447,8 +1452,7 @@ static int __init arch_setup(void)
dma_declare_coherent_memory(&ecovec_ceu_devices[1]->dev,
ceu1_dma_membase, ceu1_dma_membase,
ceu1_dma_membase +
- CEU_BUFFER_MEMORY_SIZE - 1,
- DMA_MEMORY_EXCLUSIVE);
+ CEU_BUFFER_MEMORY_SIZE - 1);
platform_device_add(ecovec_ceu_devices[1]);
gpiod_add_lookup_table(&cn12_power_gpiod_table);
@@ -1478,12 +1482,18 @@ static void __init ecovec_mv_mem_reserve(void)
phys_addr_t phys;
phys_addr_t size = CEU_BUFFER_MEMORY_SIZE;
- phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
+ phys = memblock_phys_alloc(size, PAGE_SIZE);
+ if (!phys)
+ panic("Failed to allocate CEU0 memory\n");
+
memblock_free(phys, size);
memblock_remove(phys, size);
ceu0_dma_membase = phys;
- phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
+ phys = memblock_phys_alloc(size, PAGE_SIZE);
+ if (!phys)
+ panic("Failed to allocate CEU1 memory\n");
+
memblock_free(phys, size);
memblock_remove(phys, size);
ceu1_dma_membase = phys;
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c
index 203d249a0a2b..1cf9a47ac90e 100644
--- a/arch/sh/boards/mach-kfr2r09/setup.c
+++ b/arch/sh/boards/mach-kfr2r09/setup.c
@@ -603,9 +603,8 @@ static int __init kfr2r09_devices_setup(void)
device_initialize(&kfr2r09_ceu_device.dev);
arch_setup_pdev_archdata(&kfr2r09_ceu_device);
dma_declare_coherent_memory(&kfr2r09_ceu_device.dev,
- ceu_dma_membase, ceu_dma_membase,
- ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1,
- DMA_MEMORY_EXCLUSIVE);
+ ceu_dma_membase, ceu_dma_membase,
+ ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1);
platform_device_add(&kfr2r09_ceu_device);
@@ -631,7 +630,10 @@ static void __init kfr2r09_mv_mem_reserve(void)
phys_addr_t phys;
phys_addr_t size = CEU_BUFFER_MEMORY_SIZE;
- phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
+ phys = memblock_phys_alloc(size, PAGE_SIZE);
+ if (!phys)
+ panic("Failed to allocate CEU memory\n");
+
memblock_free(phys, size);
memblock_remove(phys, size);
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c
index f4ad33c6d2aa..90702740f207 100644
--- a/arch/sh/boards/mach-migor/setup.c
+++ b/arch/sh/boards/mach-migor/setup.c
@@ -5,6 +5,7 @@
* Copyright (C) 2008 Magnus Damm
*/
#include <linux/clkdev.h>
+#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
@@ -603,9 +604,8 @@ static int __init migor_devices_setup(void)
device_initialize(&migor_ceu_device.dev);
arch_setup_pdev_archdata(&migor_ceu_device);
dma_declare_coherent_memory(&migor_ceu_device.dev,
- ceu_dma_membase, ceu_dma_membase,
- ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1,
- DMA_MEMORY_EXCLUSIVE);
+ ceu_dma_membase, ceu_dma_membase,
+ ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1);
platform_device_add(&migor_ceu_device);
@@ -630,7 +630,10 @@ static void __init migor_mv_mem_reserve(void)
phys_addr_t phys;
phys_addr_t size = CEU_BUFFER_MEMORY_SIZE;
- phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
+ phys = memblock_phys_alloc(size, PAGE_SIZE);
+ if (!phys)
+ panic("Failed to allocate CEU memory\n");
+
memblock_free(phys, size);
memblock_remove(phys, size);
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index fdbec22ae687..3674064816c7 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -941,8 +941,7 @@ static int __init devices_setup(void)
dma_declare_coherent_memory(&ms7724se_ceu_devices[0]->dev,
ceu0_dma_membase, ceu0_dma_membase,
ceu0_dma_membase +
- CEU_BUFFER_MEMORY_SIZE - 1,
- DMA_MEMORY_EXCLUSIVE);
+ CEU_BUFFER_MEMORY_SIZE - 1);
platform_device_add(ms7724se_ceu_devices[0]);
device_initialize(&ms7724se_ceu_devices[1]->dev);
@@ -950,8 +949,7 @@ static int __init devices_setup(void)
dma_declare_coherent_memory(&ms7724se_ceu_devices[1]->dev,
ceu1_dma_membase, ceu1_dma_membase,
ceu1_dma_membase +
- CEU_BUFFER_MEMORY_SIZE - 1,
- DMA_MEMORY_EXCLUSIVE);
+ CEU_BUFFER_MEMORY_SIZE - 1);
platform_device_add(ms7724se_ceu_devices[1]);
return platform_add_devices(ms7724se_devices,
@@ -965,12 +963,18 @@ static void __init ms7724se_mv_mem_reserve(void)
phys_addr_t phys;
phys_addr_t size = CEU_BUFFER_MEMORY_SIZE;
- phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
+ phys = memblock_phys_alloc(size, PAGE_SIZE);
+ if (!phys)
+ panic("Failed to allocate CEU0 memory\n");
+
memblock_free(phys, size);
memblock_remove(phys, size);
ceu0_dma_membase = phys;
- phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
+ phys = memblock_phys_alloc(size, PAGE_SIZE);
+ if (!phys)
+ panic("Failed to allocate CEU1 memory\n");
+
memblock_free(phys, size);
memblock_remove(phys, size);
ceu1_dma_membase = phys;
diff --git a/arch/sh/boards/of-generic.c b/arch/sh/boards/of-generic.c
index 958f46da3a79..d91065e81a4e 100644
--- a/arch/sh/boards/of-generic.c
+++ b/arch/sh/boards/of-generic.c
@@ -164,10 +164,10 @@ static struct sh_machine_vector __initmv sh_of_generic_mv = {
struct sh_clk_ops;
-void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
+void __init __weak arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
}
-void __init plat_irq_setup(void)
+void __init __weak plat_irq_setup(void)
{
}
diff --git a/arch/sh/drivers/pci/fixups-dreamcast.c b/arch/sh/drivers/pci/fixups-dreamcast.c
index dfdbd05b6eb1..7be8694c0d13 100644
--- a/arch/sh/drivers/pci/fixups-dreamcast.c
+++ b/arch/sh/drivers/pci/fixups-dreamcast.c
@@ -63,8 +63,7 @@ static void gapspci_fixup_resources(struct pci_dev *dev)
BUG_ON(dma_declare_coherent_memory(&dev->dev,
res.start,
region.start,
- resource_size(&res),
- DMA_MEMORY_EXCLUSIVE));
+ resource_size(&res)));
break;
default:
printk("PCI: Failed resource fixup\n");
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index a6ef3fee5f85..73fff39a0122 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -9,6 +9,7 @@ generic-y += emergency-restart.h
generic-y += exec.h
generic-y += irq_regs.h
generic-y += irq_work.h
+generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
@@ -16,7 +17,6 @@ generic-y += mm-arch-hooks.h
generic-y += parport.h
generic-y += percpu.h
generic-y += preempt.h
-generic-y += rwsem.h
generic-y += serial.h
generic-y += sizes.h
generic-y += trace_clock.h
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 4f7f235f15f8..c28e37a344ad 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -229,9 +229,6 @@ __BUILD_IOPORT_STRING(q, u64)
#define IO_SPACE_LIMIT 0xffffffff
-/* synco on SH-4A, otherwise a nop */
-#define mmiowb() wmb()
-
/* We really want to try and get these to memcpy etc */
void memcpy_fromio(void *, const volatile void __iomem *, unsigned long);
void memcpy_toio(volatile void __iomem *, const void *, unsigned long);
diff --git a/arch/sh/include/asm/mmiowb.h b/arch/sh/include/asm/mmiowb.h
new file mode 100644
index 000000000000..535d59735f1d
--- /dev/null
+++ b/arch/sh/include/asm/mmiowb.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_SH_MMIOWB_H
+#define __ASM_SH_MMIOWB_H
+
+#include <asm/barrier.h>
+
+/* synco on SH-4A, otherwise a nop */
+#define mmiowb() wmb()
+
+#include <asm-generic/mmiowb.h>
+
+#endif /* __ASM_SH_MMIOWB_H */
diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h
index 8ad73cb31121..b56f908b1395 100644
--- a/arch/sh/include/asm/pgalloc.h
+++ b/arch/sh/include/asm/pgalloc.h
@@ -70,6 +70,15 @@ do { \
tlb_remove_page((tlb), (pte)); \
} while (0)
+#if CONFIG_PGTABLE_LEVELS > 2
+#define __pmd_free_tlb(tlb, pmdp, addr) \
+do { \
+ struct page *page = virt_to_page(pmdp); \
+ pgtable_pmd_page_dtor(page); \
+ tlb_remove_page((tlb), page); \
+} while (0);
+#endif
+
static inline void check_pgt_cache(void)
{
quicklist_trim(QUICK_PT, NULL, 25, 16);
diff --git a/arch/sh/include/asm/spinlock-llsc.h b/arch/sh/include/asm/spinlock-llsc.h
index 786ee0fde3b0..7fd929cd2e7a 100644
--- a/arch/sh/include/asm/spinlock-llsc.h
+++ b/arch/sh/include/asm/spinlock-llsc.h
@@ -47,6 +47,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
unsigned long tmp;
+ /* This could be optimised with ARCH_HAS_MMIOWB */
+ mmiowb();
__asm__ __volatile__ (
"mov #1, %0 ! arch_spin_unlock \n\t"
"mov.l %0, @%1 \n\t"
diff --git a/arch/sh/include/asm/syscall_32.h b/arch/sh/include/asm/syscall_32.h
index 6e118799831c..8c9d7e5e5dcc 100644
--- a/arch/sh/include/asm/syscall_32.h
+++ b/arch/sh/include/asm/syscall_32.h
@@ -48,51 +48,28 @@ static inline void syscall_set_return_value(struct task_struct *task,
static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
unsigned long *args)
{
- /*
- * Do this simply for now. If we need to start supporting
- * fetching arguments from arbitrary indices, this will need some
- * extra logic. Presently there are no in-tree users that depend
- * on this behaviour.
- */
- BUG_ON(i);
/* Argument pattern is: R4, R5, R6, R7, R0, R1 */
- switch (n) {
- case 6: args[5] = regs->regs[1];
- case 5: args[4] = regs->regs[0];
- case 4: args[3] = regs->regs[7];
- case 3: args[2] = regs->regs[6];
- case 2: args[1] = regs->regs[5];
- case 1: args[0] = regs->regs[4];
- case 0:
- break;
- default:
- BUG();
- }
+ args[5] = regs->regs[1];
+ args[4] = regs->regs[0];
+ args[3] = regs->regs[7];
+ args[2] = regs->regs[6];
+ args[1] = regs->regs[5];
+ args[0] = regs->regs[4];
}
static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
const unsigned long *args)
{
- /* Same note as above applies */
- BUG_ON(i);
-
- switch (n) {
- case 6: regs->regs[1] = args[5];
- case 5: regs->regs[0] = args[4];
- case 4: regs->regs[7] = args[3];
- case 3: regs->regs[6] = args[2];
- case 2: regs->regs[5] = args[1];
- case 1: regs->regs[4] = args[0];
- break;
- default:
- BUG();
- }
+ regs->regs[1] = args[5];
+ regs->regs[0] = args[4];
+ regs->regs[7] = args[3];
+ regs->regs[6] = args[2];
+ regs->regs[5] = args[1];
+ regs->regs[4] = args[0];
}
static inline int syscall_get_arch(void)
diff --git a/arch/sh/include/asm/syscall_64.h b/arch/sh/include/asm/syscall_64.h
index 43882580c7f9..22fad97da066 100644
--- a/arch/sh/include/asm/syscall_64.h
+++ b/arch/sh/include/asm/syscall_64.h
@@ -47,20 +47,16 @@ static inline void syscall_set_return_value(struct task_struct *task,
static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
unsigned long *args)
{
- BUG_ON(i + n > 6);
- memcpy(args, &regs->regs[2 + i], n * sizeof(args[0]));
+ memcpy(args, &regs->regs[2], 6 * sizeof(args[0]));
}
static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
const unsigned long *args)
{
- BUG_ON(i + n > 6);
- memcpy(&regs->regs[2 + i], args, n * sizeof(args[0]));
+ memcpy(&regs->regs[2], args, 6 * sizeof(args[0]));
}
static inline int syscall_get_arch(void)
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index 77abe192fb43..bc77f3dd4261 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -11,133 +11,8 @@
#ifdef CONFIG_MMU
#include <linux/swap.h>
-#include <asm/pgalloc.h>
-#include <asm/tlbflush.h>
-#include <asm/mmu_context.h>
-/*
- * TLB handling. This allows us to remove pages from the page
- * tables, and efficiently handle the TLB issues.
- */
-struct mmu_gather {
- struct mm_struct *mm;
- unsigned int fullmm;
- unsigned long start, end;
-};
-
-static inline void init_tlb_gather(struct mmu_gather *tlb)
-{
- tlb->start = TASK_SIZE;
- tlb->end = 0;
-
- if (tlb->fullmm) {
- tlb->start = 0;
- tlb->end = TASK_SIZE;
- }
-}
-
-static inline void
-arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- tlb->mm = mm;
- tlb->start = start;
- tlb->end = end;
- tlb->fullmm = !(start | (end+1));
-
- init_tlb_gather(tlb);
-}
-
-static inline void
-arch_tlb_finish_mmu(struct mmu_gather *tlb,
- unsigned long start, unsigned long end, bool force)
-{
- if (tlb->fullmm || force)
- flush_tlb_mm(tlb->mm);
-
- /* keep the page table cache within bounds */
- check_pgt_cache();
-}
-
-static inline void
-tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
-{
- if (tlb->start > address)
- tlb->start = address;
- if (tlb->end < address + PAGE_SIZE)
- tlb->end = address + PAGE_SIZE;
-}
-
-#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
- tlb_remove_tlb_entry(tlb, ptep, address)
-
-/*
- * In the case of tlb vma handling, we can optimise these away in the
- * case where we're doing a full MM flush. When we're doing a munmap,
- * the vmas are adjusted to only cover the region to be torn down.
- */
-static inline void
-tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
-{
- if (!tlb->fullmm)
- flush_cache_range(vma, vma->vm_start, vma->vm_end);
-}
-
-static inline void
-tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
-{
- if (!tlb->fullmm && tlb->end) {
- flush_tlb_range(vma, tlb->start, tlb->end);
- init_tlb_gather(tlb);
- }
-}
-
-static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
-{
-}
-
-static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
-{
-}
-
-static inline void tlb_flush_mmu(struct mmu_gather *tlb)
-{
-}
-
-static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
- free_page_and_swap_cache(page);
- return false; /* avoid calling tlb_flush_mmu */
-}
-
-static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
- __tlb_remove_page(tlb, page);
-}
-
-static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
- struct page *page, int page_size)
-{
- return __tlb_remove_page(tlb, page);
-}
-
-static inline void tlb_remove_page_size(struct mmu_gather *tlb,
- struct page *page, int page_size)
-{
- return tlb_remove_page(tlb, page);
-}
-
-#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
-static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
- unsigned int page_size)
-{
-}
-
-#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
-#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
-#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
-
-#define tlb_migrate_finish(mm) do { } while (0)
+#include <asm-generic/tlb.h>
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SUPERH64)
extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t);
@@ -157,11 +32,6 @@ static inline void tlb_unwire_entry(void)
#else /* CONFIG_MMU */
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
-#define tlb_flush(tlb) do { } while (0)
-
#include <asm-generic/tlb.h>
#endif /* CONFIG_MMU */
diff --git a/arch/sh/include/uapi/asm/Kbuild b/arch/sh/include/uapi/asm/Kbuild
index eaa30bcd93bf..b8812c74c1de 100644
--- a/arch/sh/include/uapi/asm/Kbuild
+++ b/arch/sh/include/uapi/asm/Kbuild
@@ -1,6 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-include include/uapi/asm-generic/Kbuild.asm
generated-y += unistd_32.h
-generic-y += kvm_para.h
generic-y += ucontext.h
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c
index b9f9f1a5afdc..63d63a36f6f2 100644
--- a/arch/sh/kernel/machine_kexec.c
+++ b/arch/sh/kernel/machine_kexec.c
@@ -168,7 +168,8 @@ void __init reserve_crashkernel(void)
crash_size = PAGE_ALIGN(resource_size(&crashk_res));
if (!crashk_res.start) {
unsigned long max = memblock_end_of_DRAM() - memory_limit;
- crashk_res.start = __memblock_alloc_base(crash_size, PAGE_SIZE, max);
+ crashk_res.start = memblock_phys_alloc_range(crash_size,
+ PAGE_SIZE, 0, max);
if (!crashk_res.start) {
pr_err("crashkernel allocation failed\n");
goto disable;
diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c
index f3cb2cccb262..2950b19ad077 100644
--- a/arch/sh/kernel/stacktrace.c
+++ b/arch/sh/kernel/stacktrace.c
@@ -49,8 +49,6 @@ void save_stack_trace(struct stack_trace *trace)
unsigned long *sp = (unsigned long *)current_stack_pointer;
unwind_stack(current, NULL, sp, &save_stack_ops, trace);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace);
@@ -84,7 +82,5 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
unsigned long *sp = (unsigned long *)tsk->thread.sp;
unwind_stack(current, NULL, sp, &save_stack_ops_nosched, trace);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl
index bfda678576e4..480b057556ee 100644
--- a/arch/sh/kernel/syscalls/syscall.tbl
+++ b/arch/sh/kernel/syscalls/syscall.tbl
@@ -426,3 +426,7 @@
421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait
422 common futex_time64 sys_futex
423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval
+424 common pidfd_send_signal sys_pidfd_send_signal
+425 common io_uring_setup sys_io_uring_setup
+426 common io_uring_enter sys_io_uring_enter
+427 common io_uring_register sys_io_uring_register
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index a0fa4de03dd5..70621324db41 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -128,6 +128,9 @@ static pmd_t * __init one_md_table_init(pud_t *pud)
pmd_t *pmd;
pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!pmd)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
pud_populate(&init_mm, pud, pmd);
BUG_ON(pmd != pmd_offset(pud, 0));
}
@@ -141,6 +144,9 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
pte_t *pte;
pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
pmd_populate_kernel(&init_mm, pmd, pte);
BUG_ON(pte != pte_offset_kernel(pmd, 0));
}
@@ -196,7 +202,7 @@ void __init allocate_pgdat(unsigned int nid)
get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
#ifdef CONFIG_NEED_MULTIPLE_NODES
- NODE_DATA(nid) = memblock_alloc_try_nid_nopanic(
+ NODE_DATA(nid) = memblock_alloc_try_nid(
sizeof(struct pglist_data),
SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c
index c4bde6148810..f7e4439deb17 100644
--- a/arch/sh/mm/numa.c
+++ b/arch/sh/mm/numa.c
@@ -43,6 +43,10 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
/* Node-local pgdat */
NODE_DATA(nid) = memblock_alloc_node(sizeof(struct pglist_data),
SMP_CACHE_BYTES, nid);
+ if (!NODE_DATA(nid))
+ panic("%s: Failed to allocate %zu bytes align=0x%x nid=%d\n",
+ __func__, sizeof(struct pglist_data), SMP_CACHE_BYTES,
+ nid);
NODE_DATA(nid)->node_start_pfn = start_pfn;
NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 40f8f4f73fe8..f6421c9ce5d3 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -63,6 +63,7 @@ config SPARC64
select HAVE_KRETPROBES
select HAVE_KPROBES
select HAVE_RCU_TABLE_FREE if SMP
+ select HAVE_RCU_TABLE_NO_INVALIDATE if HAVE_RCU_TABLE_FREE
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_DYNAMIC_FTRACE
@@ -191,14 +192,6 @@ config NR_CPUS
source "kernel/Kconfig.hz"
-config RWSEM_GENERIC_SPINLOCK
- bool
- default y if SPARC32
-
-config RWSEM_XCHGADD_ALGORITHM
- bool
- default y if SPARC64
-
config GENERIC_HWEIGHT
bool
default y
diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
index 4884315daff4..453a4cf5492a 100644
--- a/arch/sparc/crypto/des_glue.c
+++ b/arch/sparc/crypto/des_glue.c
@@ -201,18 +201,15 @@ static int des3_ede_set_key(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
struct des3_ede_sparc64_ctx *dctx = crypto_tfm_ctx(tfm);
- const u32 *K = (const u32 *)key;
u32 *flags = &tfm->crt_flags;
u64 k1[DES_EXPKEY_WORDS / 2];
u64 k2[DES_EXPKEY_WORDS / 2];
u64 k3[DES_EXPKEY_WORDS / 2];
+ int err;
- if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
- !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
- (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
- *flags |= CRYPTO_TFM_RES_WEAK_KEY;
- return -EINVAL;
- }
+ err = __des3_verify_key(flags, key);
+ if (unlikely(err))
+ return err;
des_sparc64_key_expand((const u32 *)key, k1);
key += DES_KEY_SIZE;
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index b82f64e28f55..95c44380b1d6 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -9,15 +9,16 @@ generic-y += exec.h
generic-y += export.h
generic-y += irq_regs.h
generic-y += irq_work.h
+generic-y += kvm_para.h
generic-y += linkage.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += module.h
generic-y += msi.h
generic-y += preempt.h
-generic-y += rwsem.h
generic-y += serial.h
generic-y += trace_clock.h
generic-y += word-at-a-time.h
diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h
index b162c23ae8c2..688911051b44 100644
--- a/arch/sparc/include/asm/io_64.h
+++ b/arch/sparc/include/asm/io_64.h
@@ -396,8 +396,6 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
}
}
-#define mmiowb()
-
#ifdef __KERNEL__
/* On sparc64 we have the whole physical IO address space accessible
diff --git a/arch/sparc/include/asm/syscall.h b/arch/sparc/include/asm/syscall.h
index 053989e3f6a6..4d075434e816 100644
--- a/arch/sparc/include/asm/syscall.h
+++ b/arch/sparc/include/asm/syscall.h
@@ -96,11 +96,11 @@ static inline void syscall_set_return_value(struct task_struct *task,
static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
unsigned long *args)
{
int zero_extend = 0;
unsigned int j;
+ unsigned int n = 6;
#ifdef CONFIG_SPARC64
if (test_tsk_thread_flag(task, TIF_32BIT))
@@ -108,7 +108,7 @@ static inline void syscall_get_arguments(struct task_struct *task,
#endif
for (j = 0; j < n; j++) {
- unsigned long val = regs->u_regs[UREG_I0 + i + j];
+ unsigned long val = regs->u_regs[UREG_I0 + j];
if (zero_extend)
args[j] = (u32) val;
@@ -119,13 +119,12 @@ static inline void syscall_get_arguments(struct task_struct *task,
static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
const unsigned long *args)
{
- unsigned int j;
+ unsigned int i;
- for (j = 0; j < n; j++)
- regs->u_regs[UREG_I0 + i + j] = args[j];
+ for (i = 0; i < 6; i++)
+ regs->u_regs[UREG_I0 + i] = args[i];
}
static inline int syscall_get_arch(void)
diff --git a/arch/sparc/include/asm/tlb_32.h b/arch/sparc/include/asm/tlb_32.h
index 343cea19e573..5cd28a8793e3 100644
--- a/arch/sparc/include/asm/tlb_32.h
+++ b/arch/sparc/include/asm/tlb_32.h
@@ -2,24 +2,6 @@
#ifndef _SPARC_TLB_H
#define _SPARC_TLB_H
-#define tlb_start_vma(tlb, vma) \
-do { \
- flush_cache_range(vma, vma->vm_start, vma->vm_end); \
-} while (0)
-
-#define tlb_end_vma(tlb, vma) \
-do { \
- flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
-} while (0)
-
-#define __tlb_remove_tlb_entry(tlb, pte, address) \
- do { } while (0)
-
-#define tlb_flush(tlb) \
-do { \
- flush_tlb_mm((tlb)->mm); \
-} while (0)
-
#include <asm-generic/tlb.h>
#endif /* _SPARC_TLB_H */
diff --git a/arch/sparc/include/uapi/asm/Kbuild b/arch/sparc/include/uapi/asm/Kbuild
index 214a39acdf25..2bd5b392277c 100644
--- a/arch/sparc/include/uapi/asm/Kbuild
+++ b/arch/sparc/include/uapi/asm/Kbuild
@@ -1,4 +1,2 @@
-include include/uapi/asm-generic/Kbuild.asm
-
generated-y += unistd_32.h
generated-y += unistd_64.h
diff --git a/arch/sparc/include/uapi/asm/kvm_para.h b/arch/sparc/include/uapi/asm/kvm_para.h
deleted file mode 100644
index baacc4996d18..000000000000
--- a/arch/sparc/include/uapi/asm/kvm_para.h
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#include <asm-generic/kvm_para.h>
diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
index 88fe4f978aca..9265a9eece15 100644
--- a/arch/sparc/include/uapi/asm/socket.h
+++ b/arch/sparc/include/uapi/asm/socket.h
@@ -2,8 +2,8 @@
#ifndef _ASM_SOCKET_H
#define _ASM_SOCKET_H
+#include <linux/posix_types.h>
#include <asm/sockios.h>
-#include <asm/bitsperlong.h>
/* For setsockopt(2) */
#define SOL_SOCKET 0xffff
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index b1a09080e8da..4ae7388b1bff 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -745,15 +745,12 @@ static int dma_4u_supported(struct device *dev, u64 device_mask)
{
struct iommu *iommu = dev->archdata.iommu;
- if (device_mask > DMA_BIT_MASK(32))
- return 0;
- if ((device_mask & iommu->dma_addr_mask) == iommu->dma_addr_mask)
+ if (ali_sound_dma_hack(dev, device_mask))
return 1;
-#ifdef CONFIG_PCI
- if (dev_is_pci(dev))
- return pci64_dma_supported(to_pci_dev(dev), device_mask);
-#endif
- return 0;
+
+ if (device_mask < iommu->dma_addr_mask)
+ return 0;
+ return 1;
}
static const struct dma_map_ops sun4u_dma_ops = {
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
index ddffd368e057..f6f498ba3198 100644
--- a/arch/sparc/kernel/kernel.h
+++ b/arch/sparc/kernel/kernel.h
@@ -45,7 +45,11 @@ void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs);
void __irq_entry smp_kgdb_capture_client(int irq, struct pt_regs *regs);
/* pci.c */
-int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
+#ifdef CONFIG_PCI
+int ali_sound_dma_hack(struct device *dev, u64 device_mask);
+#else
+#define ali_sound_dma_hack(dev, mask) (0)
+#endif
/* signal32.c */
void do_sigreturn32(struct pt_regs *regs);
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index bcfec6a85d23..5ed43828e078 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -956,51 +956,35 @@ void arch_teardown_msi_irq(unsigned int irq)
}
#endif /* !(CONFIG_PCI_MSI) */
-static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
+/* ALI sound chips generate 31-bits of DMA, a special register
+ * determines what bit 31 is emitted as.
+ */
+int ali_sound_dma_hack(struct device *dev, u64 device_mask)
{
+ struct iommu *iommu = dev->archdata.iommu;
struct pci_dev *ali_isa_bridge;
u8 val;
- /* ALI sound chips generate 31-bits of DMA, a special register
- * determines what bit 31 is emitted as.
- */
+ if (!dev_is_pci(dev))
+ return 0;
+
+ if (to_pci_dev(dev)->vendor != PCI_VENDOR_ID_AL ||
+ to_pci_dev(dev)->device != PCI_DEVICE_ID_AL_M5451 ||
+ device_mask != 0x7fffffff)
+ return 0;
+
ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL,
PCI_DEVICE_ID_AL_M1533,
NULL);
pci_read_config_byte(ali_isa_bridge, 0x7e, &val);
- if (set_bit)
+ if (iommu->dma_addr_mask & 0x80000000)
val |= 0x01;
else
val &= ~0x01;
pci_write_config_byte(ali_isa_bridge, 0x7e, val);
pci_dev_put(ali_isa_bridge);
-}
-
-int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask)
-{
- u64 dma_addr_mask;
-
- if (pdev == NULL) {
- dma_addr_mask = 0xffffffff;
- } else {
- struct iommu *iommu = pdev->dev.archdata.iommu;
-
- dma_addr_mask = iommu->dma_addr_mask;
-
- if (pdev->vendor == PCI_VENDOR_ID_AL &&
- pdev->device == PCI_DEVICE_ID_AL_M5451 &&
- device_mask == 0x7fffffff) {
- ali_sound_dma_hack(pdev,
- (dma_addr_mask & 0x80000000) != 0);
- return 1;
- }
- }
-
- if (device_mask >= (1UL << 32UL))
- return 0;
-
- return (device_mask & dma_addr_mask) == dma_addr_mask;
+ return 1;
}
void pci_resource_to_user(const struct pci_dev *pdev, int bar,
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index fa0e42b4cbfb..14b93c5564e3 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -73,6 +73,11 @@ static inline void iommu_batch_start(struct device *dev, unsigned long prot, uns
p->npages = 0;
}
+static inline bool iommu_use_atu(struct iommu *iommu, u64 mask)
+{
+ return iommu->atu && mask > DMA_BIT_MASK(32);
+}
+
/* Interrupts must be disabled. */
static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
{
@@ -92,7 +97,7 @@ static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
while (npages != 0) {
- if (mask <= DMA_BIT_MASK(32)) {
+ if (!iommu_use_atu(pbm->iommu, mask)) {
num = pci_sun4v_iommu_map(devhandle,
HV_PCI_TSBID(0, entry),
npages,
@@ -179,7 +184,6 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
unsigned long flags, order, first_page, npages, n;
unsigned long prot = 0;
struct iommu *iommu;
- struct atu *atu;
struct iommu_map_table *tbl;
struct page *page;
void *ret;
@@ -205,13 +209,11 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
memset((char *)first_page, 0, PAGE_SIZE << order);
iommu = dev->archdata.iommu;
- atu = iommu->atu;
-
mask = dev->coherent_dma_mask;
- if (mask <= DMA_BIT_MASK(32))
+ if (!iommu_use_atu(iommu, mask))
tbl = &iommu->tbl;
else
- tbl = &atu->tbl;
+ tbl = &iommu->atu->tbl;
entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
(unsigned long)(-1), 0);
@@ -333,7 +335,7 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
atu = iommu->atu;
devhandle = pbm->devhandle;
- if (dvma <= DMA_BIT_MASK(32)) {
+ if (!iommu_use_atu(iommu, dvma)) {
tbl = &iommu->tbl;
iotsb_num = 0; /* we don't care for legacy iommu */
} else {
@@ -374,7 +376,7 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
npages >>= IO_PAGE_SHIFT;
mask = *dev->dma_mask;
- if (mask <= DMA_BIT_MASK(32))
+ if (!iommu_use_atu(iommu, mask))
tbl = &iommu->tbl;
else
tbl = &atu->tbl;
@@ -510,7 +512,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
mask = *dev->dma_mask;
- if (mask <= DMA_BIT_MASK(32))
+ if (!iommu_use_atu(iommu, mask))
tbl = &iommu->tbl;
else
tbl = &atu->tbl;
@@ -674,18 +676,12 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
static int dma_4v_supported(struct device *dev, u64 device_mask)
{
struct iommu *iommu = dev->archdata.iommu;
- u64 dma_addr_mask = iommu->dma_addr_mask;
-
- if (device_mask > DMA_BIT_MASK(32)) {
- if (iommu->atu)
- dma_addr_mask = iommu->atu->dma_addr_mask;
- else
- return 0;
- }
- if ((device_mask & dma_addr_mask) == dma_addr_mask)
+ if (ali_sound_dma_hack(dev, device_mask))
return 1;
- return pci64_dma_supported(to_pci_dev(dev), device_mask);
+ if (device_mask < iommu->dma_addr_mask)
+ return 0;
+ return 1;
}
static const struct dma_map_ops sun4v_dma_ops = {
diff --git a/arch/sparc/kernel/prom_32.c b/arch/sparc/kernel/prom_32.c
index 42d7f2a7da6d..869b16c96157 100644
--- a/arch/sparc/kernel/prom_32.c
+++ b/arch/sparc/kernel/prom_32.c
@@ -32,9 +32,9 @@ void * __init prom_early_alloc(unsigned long size)
{
void *ret;
- ret = memblock_alloc_from(size, SMP_CACHE_BYTES, 0UL);
- if (ret != NULL)
- memset(ret, 0, size);
+ ret = memblock_alloc(size, SMP_CACHE_BYTES);
+ if (!ret)
+ panic("%s: Failed to allocate %lu bytes\n", __func__, size);
prom_early_allocated += size;
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 51c4d12c0853..fd2182a5c32d 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -624,8 +624,14 @@ void __init alloc_irqstack_bootmem(void)
softirq_stack[i] = memblock_alloc_node(THREAD_SIZE,
THREAD_SIZE, node);
+ if (!softirq_stack[i])
+ panic("%s: Failed to allocate %lu bytes align=%lx nid=%d\n",
+ __func__, THREAD_SIZE, THREAD_SIZE, node);
hardirq_stack[i] = memblock_alloc_node(THREAD_SIZE,
THREAD_SIZE, node);
+ if (!hardirq_stack[i])
+ panic("%s: Failed to allocate %lu bytes align=%lx nid=%d\n",
+ __func__, THREAD_SIZE, THREAD_SIZE, node);
}
}
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index f45d876983f1..a8275fea4b70 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1628,6 +1628,8 @@ static void __init pcpu_populate_pte(unsigned long addr)
pud_t *new;
new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ if (!new)
+ goto err_alloc;
pgd_populate(&init_mm, pgd, new);
}
@@ -1636,6 +1638,8 @@ static void __init pcpu_populate_pte(unsigned long addr)
pmd_t *new;
new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ if (!new)
+ goto err_alloc;
pud_populate(&init_mm, pud, new);
}
@@ -1644,8 +1648,16 @@ static void __init pcpu_populate_pte(unsigned long addr)
pte_t *new;
new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ if (!new)
+ goto err_alloc;
pmd_populate_kernel(&init_mm, pmd, new);
}
+
+ return;
+
+err_alloc:
+ panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
}
void __init setup_per_cpu_areas(void)
diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl
index b9a5a04b2d2c..a1dd24307b00 100644
--- a/arch/sparc/kernel/syscalls/syscall.tbl
+++ b/arch/sparc/kernel/syscalls/syscall.tbl
@@ -469,3 +469,7 @@
421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64
422 32 futex_time64 sys_futex sys_futex
423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval sys_sched_rr_get_interval
+424 common pidfd_send_signal sys_pidfd_send_signal
+425 common io_uring_setup sys_io_uring_setup
+426 common io_uring_enter sys_io_uring_enter
+427 common io_uring_register sys_io_uring_register
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index d900952bfc5f..a8ff29821bdb 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -264,7 +264,7 @@ void __init mem_init(void)
i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5);
i += 1;
sparc_valid_addr_bitmap = (unsigned long *)
- memblock_alloc_from(i << 2, SMP_CACHE_BYTES, 0UL);
+ memblock_alloc(i << 2, SMP_CACHE_BYTES);
if (sparc_valid_addr_bitmap == NULL) {
prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index ef340e8f209f..f2d70ff7a284 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1809,6 +1809,8 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
PAGE_SIZE);
+ if (!new)
+ goto err_alloc;
alloc_bytes += PAGE_SIZE;
pgd_populate(&init_mm, pgd, new);
}
@@ -1822,6 +1824,8 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
}
new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
PAGE_SIZE);
+ if (!new)
+ goto err_alloc;
alloc_bytes += PAGE_SIZE;
pud_populate(&init_mm, pud, new);
}
@@ -1836,6 +1840,8 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
}
new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
PAGE_SIZE);
+ if (!new)
+ goto err_alloc;
alloc_bytes += PAGE_SIZE;
pmd_populate_kernel(&init_mm, pmd, new);
}
@@ -1855,6 +1861,11 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
}
return alloc_bytes;
+
+err_alloc:
+ panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ return -ENOMEM;
}
static void __init flush_all_kernel_tsbs(void)
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index b609362e846f..aaebbc00d262 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -303,13 +303,19 @@ static void __init srmmu_nocache_init(void)
bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
- srmmu_nocache_pool = memblock_alloc_from(srmmu_nocache_size,
- SRMMU_NOCACHE_ALIGN_MAX, 0UL);
+ srmmu_nocache_pool = memblock_alloc(srmmu_nocache_size,
+ SRMMU_NOCACHE_ALIGN_MAX);
+ if (!srmmu_nocache_pool)
+ panic("%s: Failed to allocate %lu bytes align=0x%x\n",
+ __func__, srmmu_nocache_size, SRMMU_NOCACHE_ALIGN_MAX);
memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
srmmu_nocache_bitmap =
- memblock_alloc_from(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
- SMP_CACHE_BYTES, 0UL);
+ memblock_alloc(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
+ SMP_CACHE_BYTES);
+ if (!srmmu_nocache_bitmap)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ BITS_TO_LONGS(bitmap_bits) * sizeof(long));
bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
@@ -467,7 +473,9 @@ static void __init sparc_context_init(int numctx)
unsigned long size;
size = numctx * sizeof(struct ctx_list);
- ctx_list_pool = memblock_alloc_from(size, SMP_CACHE_BYTES, 0UL);
+ ctx_list_pool = memblock_alloc(size, SMP_CACHE_BYTES);
+ if (!ctx_list_pool)
+ panic("%s: Failed to allocate %lu bytes\n", __func__, size);
for (ctx = 0; ctx < numctx; ctx++) {
struct ctx_list *clist;
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index d80cfb1d9430..6e5be5fb4143 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -649,6 +649,9 @@ static int __init eth_setup(char *str)
}
new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES);
+ if (!new)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(*new));
INIT_LIST_HEAD(&new->list);
new->index = n;
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index a4a41421c5e2..aca09be2373e 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -938,7 +938,7 @@ static int ubd_add(int n, char **error_out)
ubd_dev->queue = blk_mq_init_queue(&ubd_dev->tag_set);
if (IS_ERR(ubd_dev->queue)) {
err = PTR_ERR(ubd_dev->queue);
- goto out_cleanup;
+ goto out_cleanup_tags;
}
ubd_dev->queue->queuedata = ubd_dev;
@@ -968,8 +968,8 @@ out:
out_cleanup_tags:
blk_mq_free_tag_set(&ubd_dev->tag_set);
-out_cleanup:
- blk_cleanup_queue(ubd_dev->queue);
+ if (!(IS_ERR(ubd_dev->queue)))
+ blk_cleanup_queue(ubd_dev->queue);
goto out;
}
diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
index 046fa9ea0ccc..596e7056f376 100644
--- a/arch/um/drivers/vector_kern.c
+++ b/arch/um/drivers/vector_kern.c
@@ -1576,6 +1576,9 @@ static int __init vector_setup(char *str)
return 1;
}
new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES);
+ if (!new)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(*new));
INIT_LIST_HEAD(&new->list);
new->unit = n;
new->arguments = str;
diff --git a/arch/um/drivers/vector_user.c b/arch/um/drivers/vector_user.c
index d2c17dd74620..b3f7b3ca896d 100644
--- a/arch/um/drivers/vector_user.c
+++ b/arch/um/drivers/vector_user.c
@@ -16,14 +16,12 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
-#include <sys/types.h>
#include <sys/socket.h>
#include <net/ethernet.h>
#include <netinet/ip.h>
#include <netinet/ether.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
-#include <sys/socket.h>
#include <sys/wait.h>
#include <sys/uio.h>
#include <linux/virtio_net.h>
@@ -31,7 +29,6 @@
#include <stdlib.h>
#include <os.h>
#include <um_malloc.h>
-#include <sys/uio.h>
#include "vector_user.h"
#define ID_GRE 0
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index 00bcbe2326d9..b506ad06aefc 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -16,6 +16,7 @@ generic-y += irq_work.h
generic-y += kdebug.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += param.h
generic-y += pci.h
generic-y += percpu.h
diff --git a/arch/um/include/asm/syscall-generic.h b/arch/um/include/asm/syscall-generic.h
index 9fb9cf8cd39a..98e50c50c12e 100644
--- a/arch/um/include/asm/syscall-generic.h
+++ b/arch/um/include/asm/syscall-generic.h
@@ -53,84 +53,30 @@ static inline void syscall_set_return_value(struct task_struct *task,
static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
unsigned long *args)
{
const struct uml_pt_regs *r = &regs->regs;
- switch (i) {
- case 0:
- if (!n--)
- break;
- *args++ = UPT_SYSCALL_ARG1(r);
- case 1:
- if (!n--)
- break;
- *args++ = UPT_SYSCALL_ARG2(r);
- case 2:
- if (!n--)
- break;
- *args++ = UPT_SYSCALL_ARG3(r);
- case 3:
- if (!n--)
- break;
- *args++ = UPT_SYSCALL_ARG4(r);
- case 4:
- if (!n--)
- break;
- *args++ = UPT_SYSCALL_ARG5(r);
- case 5:
- if (!n--)
- break;
- *args++ = UPT_SYSCALL_ARG6(r);
- case 6:
- if (!n--)
- break;
- default:
- BUG();
- break;
- }
+ *args++ = UPT_SYSCALL_ARG1(r);
+ *args++ = UPT_SYSCALL_ARG2(r);
+ *args++ = UPT_SYSCALL_ARG3(r);
+ *args++ = UPT_SYSCALL_ARG4(r);
+ *args++ = UPT_SYSCALL_ARG5(r);
+ *args = UPT_SYSCALL_ARG6(r);
}
static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
const unsigned long *args)
{
struct uml_pt_regs *r = &regs->regs;
- switch (i) {
- case 0:
- if (!n--)
- break;
- UPT_SYSCALL_ARG1(r) = *args++;
- case 1:
- if (!n--)
- break;
- UPT_SYSCALL_ARG2(r) = *args++;
- case 2:
- if (!n--)
- break;
- UPT_SYSCALL_ARG3(r) = *args++;
- case 3:
- if (!n--)
- break;
- UPT_SYSCALL_ARG4(r) = *args++;
- case 4:
- if (!n--)
- break;
- UPT_SYSCALL_ARG5(r) = *args++;
- case 5:
- if (!n--)
- break;
- UPT_SYSCALL_ARG6(r) = *args++;
- case 6:
- if (!n--)
- break;
- default:
- BUG();
- break;
- }
+ UPT_SYSCALL_ARG1(r) = *args++;
+ UPT_SYSCALL_ARG2(r) = *args++;
+ UPT_SYSCALL_ARG3(r) = *args++;
+ UPT_SYSCALL_ARG4(r) = *args++;
+ UPT_SYSCALL_ARG5(r) = *args++;
+ UPT_SYSCALL_ARG6(r) = *args;
}
/* See arch/x86/um/asm/syscall.h for syscall_get_arch() definition. */
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index dce6db147f24..70ee60383900 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -2,162 +2,8 @@
#ifndef __UM_TLB_H
#define __UM_TLB_H
-#include <linux/pagemap.h>
-#include <linux/swap.h>
-#include <asm/percpu.h>
-#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
-
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
-/* struct mmu_gather is an opaque type used by the mm code for passing around
- * any data needed by arch specific code for tlb_remove_page.
- */
-struct mmu_gather {
- struct mm_struct *mm;
- unsigned int need_flush; /* Really unmapped some ptes? */
- unsigned long start;
- unsigned long end;
- unsigned int fullmm; /* non-zero means full mm flush */
-};
-
-static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
- unsigned long address)
-{
- if (tlb->start > address)
- tlb->start = address;
- if (tlb->end < address + PAGE_SIZE)
- tlb->end = address + PAGE_SIZE;
-}
-
-static inline void init_tlb_gather(struct mmu_gather *tlb)
-{
- tlb->need_flush = 0;
-
- tlb->start = TASK_SIZE;
- tlb->end = 0;
-
- if (tlb->fullmm) {
- tlb->start = 0;
- tlb->end = TASK_SIZE;
- }
-}
-
-static inline void
-arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- tlb->mm = mm;
- tlb->start = start;
- tlb->end = end;
- tlb->fullmm = !(start | (end+1));
-
- init_tlb_gather(tlb);
-}
-
-extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
- unsigned long end);
-
-static inline void
-tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
-{
- flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
-}
-
-static inline void
-tlb_flush_mmu_free(struct mmu_gather *tlb)
-{
- init_tlb_gather(tlb);
-}
-
-static inline void
-tlb_flush_mmu(struct mmu_gather *tlb)
-{
- if (!tlb->need_flush)
- return;
-
- tlb_flush_mmu_tlbonly(tlb);
- tlb_flush_mmu_free(tlb);
-}
-
-/* arch_tlb_finish_mmu
- * Called at the end of the shootdown operation to free up any resources
- * that were required.
- */
-static inline void
-arch_tlb_finish_mmu(struct mmu_gather *tlb,
- unsigned long start, unsigned long end, bool force)
-{
- if (force) {
- tlb->start = start;
- tlb->end = end;
- tlb->need_flush = 1;
- }
- tlb_flush_mmu(tlb);
-
- /* keep the page table cache within bounds */
- check_pgt_cache();
-}
-
-/* tlb_remove_page
- * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
- * while handling the additional races in SMP caused by other CPUs
- * caching valid mappings in their TLBs.
- */
-static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
- tlb->need_flush = 1;
- free_page_and_swap_cache(page);
- return false; /* avoid calling tlb_flush_mmu */
-}
-
-static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
- __tlb_remove_page(tlb, page);
-}
-
-static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
- struct page *page, int page_size)
-{
- return __tlb_remove_page(tlb, page);
-}
-
-static inline void tlb_remove_page_size(struct mmu_gather *tlb,
- struct page *page, int page_size)
-{
- return tlb_remove_page(tlb, page);
-}
-
-/**
- * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
- *
- * Record the fact that pte's were really umapped in ->need_flush, so we can
- * later optimise away the tlb invalidate. This helps when userspace is
- * unmapping already-unmapped pages, which happens quite a lot.
- */
-#define tlb_remove_tlb_entry(tlb, ptep, address) \
- do { \
- tlb->need_flush = 1; \
- __tlb_remove_tlb_entry(tlb, ptep, address); \
- } while (0)
-
-#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
- tlb_remove_tlb_entry(tlb, ptep, address)
-
-#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
-static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
- unsigned int page_size)
-{
-}
-
-#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
-
-#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
-
-#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
-
-#define tlb_migrate_finish(mm) do {} while (0)
+#include <asm-generic/cacheflush.h>
+#include <asm-generic/tlb.h>
#endif
diff --git a/arch/um/kernel/initrd.c b/arch/um/kernel/initrd.c
index ce169ea87e61..1dcd310cb34d 100644
--- a/arch/um/kernel/initrd.c
+++ b/arch/um/kernel/initrd.c
@@ -37,6 +37,8 @@ int __init read_initrd(void)
}
area = memblock_alloc(size, SMP_CACHE_BYTES);
+ if (!area)
+ panic("%s: Failed to allocate %llu bytes\n", __func__, size);
if (load_initrd(initrd, area, size) == -1)
return 0;
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 799b571a8f88..99aa11bf53d1 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -66,6 +66,10 @@ static void __init one_page_table_init(pmd_t *pmd)
if (pmd_none(*pmd)) {
pte_t *pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
+
set_pmd(pmd, __pmd(_KERNPG_TABLE +
(unsigned long) __pa(pte)));
if (pte != pte_offset_kernel(pmd, 0))
@@ -77,6 +81,10 @@ static void __init one_md_table_init(pud_t *pud)
{
#ifdef CONFIG_3_LEVEL_PGTABLES
pmd_t *pmd_table = (pmd_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
+ if (!pmd_table)
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
+
set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
if (pmd_table != pmd_offset(pud, 0))
BUG();
@@ -126,6 +134,10 @@ static void __init fixaddr_user_init( void)
fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
v = (unsigned long) memblock_alloc_low(size, PAGE_SIZE);
+ if (!v)
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
+ __func__, size, PAGE_SIZE);
+
memcpy((void *) v , (void *) FIXADDR_USER_START, size);
p = __pa(v);
for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
@@ -146,6 +158,10 @@ void __init paging_init(void)
empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
PAGE_SIZE);
+ if (!empty_zero_page)
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
+
for (i = 0; i < ARRAY_SIZE(zones_size); i++)
zones_size[i] = 0;
diff --git a/arch/um/kernel/stacktrace.c b/arch/um/kernel/stacktrace.c
index ebe7bcf62684..bd95e020d509 100644
--- a/arch/um/kernel/stacktrace.c
+++ b/arch/um/kernel/stacktrace.c
@@ -63,8 +63,6 @@ static const struct stacktrace_ops dump_ops = {
static void __save_stack_trace(struct task_struct *tsk, struct stack_trace *trace)
{
dump_trace(tsk, &dump_ops, trace);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
}
void save_stack_trace(struct stack_trace *trace)
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index a7f1ae58d211..2445dfcf6444 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -5,7 +5,6 @@ config UNICORE32
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
- select HAVE_GENERIC_DMA_COHERENT
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2
select GENERIC_ATOMIC64
@@ -21,6 +20,7 @@ config UNICORE32
select GENERIC_IOMAP
select MODULES_USE_ELF_REL
select NEED_DMA_MAP_STATE
+ select MMU_GATHER_NO_RANGE if MMU
help
UniCore-32 is 32-bit Instruction Set Architecture,
including a series of low-power-consumption RISC chip
@@ -39,12 +39,6 @@ config STACKTRACE_SUPPORT
config LOCKDEP_SUPPORT
def_bool y
-config RWSEM_GENERIC_SPINLOCK
- def_bool y
-
-config RWSEM_XCHGADD_ALGORITHM
- bool
-
config ARCH_HAS_ILOG2_U32
bool
diff --git a/arch/unicore32/boot/compressed/Makefile b/arch/unicore32/boot/compressed/Makefile
index 9aecdd3ddc48..150fafc32fb0 100644
--- a/arch/unicore32/boot/compressed/Makefile
+++ b/arch/unicore32/boot/compressed/Makefile
@@ -61,7 +61,4 @@ $(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/head.o $(obj)/piggy.o \
ZTEXTADDR := 0x03000000
ZBSSADDR := ALIGN(4)
-SEDFLAGS_lds = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/
-$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/unicore32/boot/Makefile $(KCONFIG_CONFIG)
- @sed "$(SEDFLAGS_lds)" < $< > $@
-
+CPPFLAGS_vmlinux.lds = -DTEXT_START="$(ZTEXTADDR)" -DBSS_START="$(ZBSSADDR)"
diff --git a/arch/unicore32/boot/compressed/vmlinux.lds.in b/arch/unicore32/boot/compressed/vmlinux.lds.S
index d5a3ce296239..d5a3ce296239 100644
--- a/arch/unicore32/boot/compressed/vmlinux.lds.in
+++ b/arch/unicore32/boot/compressed/vmlinux.lds.S
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild
index 1d1544b6ca74..b301a0b3c0b2 100644
--- a/arch/unicore32/include/asm/Kbuild
+++ b/arch/unicore32/include/asm/Kbuild
@@ -18,9 +18,11 @@ generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += kprobes.h
+generic-y += kvm_para.h
generic-y += local.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += module.h
generic-y += parport.h
generic-y += percpu.h
diff --git a/arch/unicore32/include/asm/tlb.h b/arch/unicore32/include/asm/tlb.h
index 9cca15cdae94..00a8477333f6 100644
--- a/arch/unicore32/include/asm/tlb.h
+++ b/arch/unicore32/include/asm/tlb.h
@@ -12,10 +12,9 @@
#ifndef __UNICORE_TLB_H__
#define __UNICORE_TLB_H__
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+/*
+ * unicore32 lacks an efficient flush_tlb_range(), use flush_tlb_mm().
+ */
#define __pte_free_tlb(tlb, pte, addr) \
do { \
diff --git a/arch/unicore32/include/uapi/asm/Kbuild b/arch/unicore32/include/uapi/asm/Kbuild
index 0febf1a07c30..1c72f04ff75d 100644
--- a/arch/unicore32/include/uapi/asm/Kbuild
+++ b/arch/unicore32/include/uapi/asm/Kbuild
@@ -1,4 +1 @@
-include include/uapi/asm-generic/Kbuild.asm
-
-generic-y += kvm_para.h
generic-y += ucontext.h
diff --git a/arch/unicore32/kernel/setup.c b/arch/unicore32/kernel/setup.c
index 4b0cb68c355a..d3239cf2e837 100644
--- a/arch/unicore32/kernel/setup.c
+++ b/arch/unicore32/kernel/setup.c
@@ -207,6 +207,10 @@ request_standard_resources(struct meminfo *mi)
continue;
res = memblock_alloc_low(sizeof(*res), SMP_CACHE_BYTES);
+ if (!res)
+ panic("%s: Failed to allocate %zu bytes align=%x\n",
+ __func__, sizeof(*res), SMP_CACHE_BYTES);
+
res->name = "System RAM";
res->start = mi->bank[i].start;
res->end = mi->bank[i].start + mi->bank[i].size - 1;
diff --git a/arch/unicore32/kernel/stacktrace.c b/arch/unicore32/kernel/stacktrace.c
index 9976e767d51c..e37da8c6837b 100644
--- a/arch/unicore32/kernel/stacktrace.c
+++ b/arch/unicore32/kernel/stacktrace.c
@@ -120,8 +120,6 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
}
walk_stackframe(&frame, save_trace, &data);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
}
void save_stack_trace(struct stack_trace *trace)
diff --git a/arch/unicore32/mm/mmu.c b/arch/unicore32/mm/mmu.c
index a40219291965..aa2060beb408 100644
--- a/arch/unicore32/mm/mmu.c
+++ b/arch/unicore32/mm/mmu.c
@@ -145,8 +145,13 @@ static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr,
unsigned long prot)
{
if (pmd_none(*pmd)) {
- pte_t *pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t),
- PTRS_PER_PTE * sizeof(pte_t));
+ size_t size = PTRS_PER_PTE * sizeof(pte_t);
+ pte_t *pte = memblock_alloc(size, size);
+
+ if (!pte)
+ panic("%s: Failed to allocate %zu bytes align=%zx\n",
+ __func__, size, size);
+
__pmd_populate(pmd, __pa(pte) | prot);
}
BUG_ON(pmd_bad(*pmd));
@@ -349,6 +354,9 @@ static void __init devicemaps_init(void)
* Allocate the vector page early.
*/
vectors = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!vectors)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
pmd_clear(pmd_off_k(addr));
@@ -426,6 +434,9 @@ void __init paging_init(void)
/* allocate the zero page. */
zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!zero_page)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
bootmem_init();
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 90b562a34d65..0a3cc347143f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -14,7 +14,7 @@ config X86_32
select ARCH_WANT_IPC_PARSE_VERSION
select CLKSRC_I8253
select CLONE_BACKWARDS
- select HAVE_GENERIC_DMA_COHERENT
+ select HAVE_DEBUG_STACKOVERFLOW
select MODULES_USE_ELF_REL
select OLD_SIGACTION
@@ -29,7 +29,6 @@ config X86_64
select MODULES_USE_ELF_RELA
select NEED_DMA_MAP_STATE
select SWIOTLB
- select X86_DEV_DMA_OPS
select ARCH_HAS_SYSCALL_WRAPPER
#
@@ -66,6 +65,7 @@ config X86
select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
select ARCH_HAS_UACCESS_MCSAFE if X86_64 && X86_MCE
select ARCH_HAS_SET_MEMORY
+ select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_STRICT_MODULE_RWX
select ARCH_HAS_SYNC_CORE_BEFORE_USERMODE
@@ -75,6 +75,7 @@ config X86
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
+ select ARCH_STACKWALK
select ARCH_SUPPORTS_ACPI
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
@@ -139,7 +140,6 @@ config X86
select HAVE_COPY_THREAD_TLS
select HAVE_C_RECORDMCOUNT
select HAVE_DEBUG_KMEMLEAK
- select HAVE_DEBUG_STACKOVERFLOW
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
@@ -184,7 +184,6 @@ config X86
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_RCU_TABLE_FREE if PARAVIRT
- select HAVE_RCU_TABLE_INVALIDATE if HAVE_RCU_TABLE_FREE
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION
select HAVE_FUNCTION_ARG_ACCESS_API
@@ -269,9 +268,6 @@ config ARCH_MAY_HAVE_PC_FDC
def_bool y
depends on ISA_DMA_API
-config RWSEM_XCHGADD_ALGORITHM
- def_bool y
-
config GENERIC_CALIBRATE_DELAY
def_bool y
@@ -704,8 +700,6 @@ config STA2X11
bool "STA2X11 Companion Chip Support"
depends on X86_32_NON_STANDARD && PCI
select ARCH_HAS_PHYS_TO_DMA
- select X86_DEV_DMA_OPS
- select X86_DMA_REMAP
select SWIOTLB
select MFD_STA2X11
select GPIOLIB
@@ -784,14 +778,6 @@ config PARAVIRT_SPINLOCKS
If you are unsure how to answer this question, answer Y.
-config QUEUED_LOCK_STAT
- bool "Paravirt queued spinlock statistics"
- depends on PARAVIRT_SPINLOCKS && DEBUG_FS
- ---help---
- Enable the collection of statistical data on the slowpath
- behavior of paravirtualized queued spinlocks and report
- them on debugfs.
-
source "arch/x86/xen/Kconfig"
config KVM_GUEST
@@ -1331,8 +1317,16 @@ config MICROCODE_AMD
processors will be enabled.
config MICROCODE_OLD_INTERFACE
- def_bool y
+ bool "Ancient loading interface (DEPRECATED)"
+ default n
depends on MICROCODE
+ ---help---
+ DO NOT USE THIS! This is the ancient /dev/cpu/microcode interface
+ which was used by userspace tools like iucode_tool and microcode.ctl.
+ It is inadequate because it runs too late to be able to properly
+ load microcode on a machine and it needs special tools. Instead, you
+ should've switched to the early loading method with the initrd or
+ builtin microcode by now: Documentation/x86/microcode.txt
config X86_MSR
tristate "/dev/cpu/*/msr - Model-specific register support"
@@ -1500,7 +1494,7 @@ config X86_CPA_STATISTICS
depends on DEBUG_FS
---help---
Expose statistics about the Change Page Attribute mechanims, which
- helps to determine the effectivness of preserving large and huge
+ helps to determine the effectiveness of preserving large and huge
page mappings when mapping protections are changed.
config ARCH_HAS_MEM_ENCRYPT
@@ -1607,12 +1601,9 @@ config ARCH_FLATMEM_ENABLE
depends on X86_32 && !NUMA
config ARCH_DISCONTIGMEM_ENABLE
- def_bool y
- depends on NUMA && X86_32
-
-config ARCH_DISCONTIGMEM_DEFAULT
- def_bool y
+ def_bool n
depends on NUMA && X86_32
+ depends on BROKEN
config ARCH_SPARSEMEM_ENABLE
def_bool y
@@ -1621,8 +1612,7 @@ config ARCH_SPARSEMEM_ENABLE
select SPARSEMEM_VMEMMAP_ENABLE if X86_64
config ARCH_SPARSEMEM_DEFAULT
- def_bool y
- depends on X86_64
+ def_bool X86_64 || (NUMA && X86_32)
config ARCH_SELECT_MEMORY_MODEL
def_bool y
@@ -2218,14 +2208,8 @@ config RANDOMIZE_MEMORY_PHYSICAL_PADDING
If unsure, leave at the default value.
config HOTPLUG_CPU
- bool "Support for hot-pluggable CPUs"
+ def_bool y
depends on SMP
- ---help---
- Say Y here to allow turning CPUs off and on. CPUs can be
- controlled through /sys/devices/system/cpu.
- ( Note: power management support will enable this option
- automatically on SMP systems. )
- Say N if you want to disable CPU hotplug.
config BOOTPARAM_HOTPLUG_CPU0
bool "Set default setting of cpu0_hotpluggable"
@@ -2885,11 +2869,6 @@ config HAVE_ATOMIC_IOMAP
config X86_DEV_DMA_OPS
bool
- depends on X86_64 || STA2X11
-
-config X86_DMA_REMAP
- bool
- depends on STA2X11
config HAVE_GENERIC_GUP
def_bool y
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 2d8b9d8ca4f8..56e748a7679f 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -47,7 +47,7 @@ export REALMODE_CFLAGS
export BITS
ifdef CONFIG_X86_NEED_RELOCS
- LDFLAGS_vmlinux := --emit-relocs
+ LDFLAGS_vmlinux := --emit-relocs --discard-none
endif
#
@@ -219,8 +219,12 @@ ifdef CONFIG_RETPOLINE
# Additionally, avoid generating expensive indirect jumps which
# are subject to retpolines for small number of switch cases.
# clang turns off jump table generation by default when under
- # retpoline builds, however, gcc does not for x86.
- KBUILD_CFLAGS += $(call cc-option,--param=case-values-threshold=20)
+ # retpoline builds, however, gcc does not for x86. This has
+ # only been fixed starting from gcc stable version 8.4.0 and
+ # onwards, but not for older ones. See gcc bug #86952.
+ ifndef CONFIG_CC_IS_CLANG
+ KBUILD_CFLAGS += $(call cc-option,-fno-jump-tables)
+ endif
endif
archscripts: scripts_basic
diff --git a/arch/x86/boot/compressed/acpi.c b/arch/x86/boot/compressed/acpi.c
index 0ef4ad55b29b..ad84239e595e 100644
--- a/arch/x86/boot/compressed/acpi.c
+++ b/arch/x86/boot/compressed/acpi.c
@@ -276,7 +276,7 @@ static unsigned long get_acpi_srat_table(void)
if (acpi_table) {
header = (struct acpi_table_header *)acpi_table;
- if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_SRAT))
+ if (ACPI_COMPARE_NAMESEG(header->signature, ACPI_SIG_SRAT))
return acpi_table;
}
entry += size;
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index fa0332dda9f2..2e53c056ba20 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -697,8 +697,8 @@ static bool process_mem_region(struct mem_vector *region,
return 1;
}
}
- return 0;
#endif
+ return 0;
}
#ifdef CONFIG_EFI
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index c0d6c560df69..5a237e8dbf8d 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -352,7 +352,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
boot_params->hdr.loadflags &= ~KASLR_FLAG;
/* Save RSDP address for later use. */
- boot_params->acpi_rsdp_addr = get_rsdp_addr();
+ /* boot_params->acpi_rsdp_addr = get_rsdp_addr(); */
sanitize_boot_params(boot_params);
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index fd13655e0f9b..d2f184165934 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -120,8 +120,6 @@ static inline void console_init(void)
void set_sev_encryption_mask(void);
-#endif
-
/* acpi.c */
#ifdef CONFIG_ACPI
acpi_physical_address get_rsdp_addr(void);
@@ -135,3 +133,5 @@ int count_immovable_mem_regions(void);
#else
static inline int count_immovable_mem_regions(void) { return 0; }
#endif
+
+#endif /* BOOT_COMPRESSED_MISC_H */
diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
index 315a67b8896b..90154df8f125 100644
--- a/arch/x86/boot/string.c
+++ b/arch/x86/boot/string.c
@@ -13,8 +13,9 @@
*/
#include <linux/types.h>
-#include <linux/kernel.h>
+#include <linux/compiler.h>
#include <linux/errno.h>
+#include <linux/limits.h>
#include <asm/asm.h>
#include "ctype.h"
#include "string.h"
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 9f908112bbb9..2b2481acc661 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -25,18 +25,6 @@ CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_OSF_PARTITION=y
-CONFIG_AMIGA_PARTITION=y
-CONFIG_MAC_PARTITION=y
-CONFIG_BSD_DISKLABEL=y
-CONFIG_MINIX_SUBPARTITION=y
-CONFIG_SOLARIS_X86_PARTITION=y
-CONFIG_UNIXWARE_DISKLABEL=y
-CONFIG_SGI_PARTITION=y
-CONFIG_SUN_PARTITION=y
-CONFIG_KARMA_PARTITION=y
-CONFIG_EFI_PARTITION=y
CONFIG_SMP=y
CONFIG_X86_GENERIC=y
CONFIG_HPET_TIMER=y
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index 1d3badfda09e..e8829abf063a 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -24,18 +24,6 @@ CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_OSF_PARTITION=y
-CONFIG_AMIGA_PARTITION=y
-CONFIG_MAC_PARTITION=y
-CONFIG_BSD_DISKLABEL=y
-CONFIG_MINIX_SUBPARTITION=y
-CONFIG_SOLARIS_X86_PARTITION=y
-CONFIG_UNIXWARE_DISKLABEL=y
-CONFIG_SGI_PARTITION=y
-CONFIG_SUN_PARTITION=y
-CONFIG_KARMA_PARTITION=y
-CONFIG_EFI_PARTITION=y
CONFIG_SMP=y
CONFIG_CALGARY_IOMMU=y
CONFIG_NR_CPUS=64
diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c
index 3ea71b871813..bdeee1b830be 100644
--- a/arch/x86/crypto/aegis128-aesni-glue.c
+++ b/arch/x86/crypto/aegis128-aesni-glue.c
@@ -11,8 +11,8 @@
* any later version.
*/
-#include <crypto/cryptd.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/module.h>
@@ -242,131 +242,35 @@ static void crypto_aegis128_aesni_exit_tfm(struct crypto_aead *aead)
{
}
-static int cryptd_aegis128_aesni_setkey(struct crypto_aead *aead,
- const u8 *key, unsigned int keylen)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- return crypto_aead_setkey(&cryptd_tfm->base, key, keylen);
-}
-
-static int cryptd_aegis128_aesni_setauthsize(struct crypto_aead *aead,
- unsigned int authsize)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
-}
-
-static int cryptd_aegis128_aesni_encrypt(struct aead_request *req)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- aead = &cryptd_tfm->base;
- if (irq_fpu_usable() && (!in_atomic() ||
- !cryptd_aead_queued(cryptd_tfm)))
- aead = cryptd_aead_child(cryptd_tfm);
-
- aead_request_set_tfm(req, aead);
-
- return crypto_aead_encrypt(req);
-}
-
-static int cryptd_aegis128_aesni_decrypt(struct aead_request *req)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- aead = &cryptd_tfm->base;
- if (irq_fpu_usable() && (!in_atomic() ||
- !cryptd_aead_queued(cryptd_tfm)))
- aead = cryptd_aead_child(cryptd_tfm);
-
- aead_request_set_tfm(req, aead);
-
- return crypto_aead_decrypt(req);
-}
-
-static int cryptd_aegis128_aesni_init_tfm(struct crypto_aead *aead)
-{
- struct cryptd_aead *cryptd_tfm;
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
- cryptd_tfm = cryptd_alloc_aead("__aegis128-aesni", CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL);
- if (IS_ERR(cryptd_tfm))
- return PTR_ERR(cryptd_tfm);
-
- *ctx = cryptd_tfm;
- crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
- return 0;
-}
-
-static void cryptd_aegis128_aesni_exit_tfm(struct crypto_aead *aead)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
- cryptd_free_aead(*ctx);
-}
-
-static struct aead_alg crypto_aegis128_aesni_alg[] = {
- {
- .setkey = crypto_aegis128_aesni_setkey,
- .setauthsize = crypto_aegis128_aesni_setauthsize,
- .encrypt = crypto_aegis128_aesni_encrypt,
- .decrypt = crypto_aegis128_aesni_decrypt,
- .init = crypto_aegis128_aesni_init_tfm,
- .exit = crypto_aegis128_aesni_exit_tfm,
-
- .ivsize = AEGIS128_NONCE_SIZE,
- .maxauthsize = AEGIS128_MAX_AUTH_SIZE,
- .chunksize = AEGIS128_BLOCK_SIZE,
-
- .base = {
- .cra_flags = CRYPTO_ALG_INTERNAL,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct aegis_ctx) +
- __alignof__(struct aegis_ctx),
- .cra_alignmask = 0,
-
- .cra_name = "__aegis128",
- .cra_driver_name = "__aegis128-aesni",
-
- .cra_module = THIS_MODULE,
- }
- }, {
- .setkey = cryptd_aegis128_aesni_setkey,
- .setauthsize = cryptd_aegis128_aesni_setauthsize,
- .encrypt = cryptd_aegis128_aesni_encrypt,
- .decrypt = cryptd_aegis128_aesni_decrypt,
- .init = cryptd_aegis128_aesni_init_tfm,
- .exit = cryptd_aegis128_aesni_exit_tfm,
-
- .ivsize = AEGIS128_NONCE_SIZE,
- .maxauthsize = AEGIS128_MAX_AUTH_SIZE,
- .chunksize = AEGIS128_BLOCK_SIZE,
-
- .base = {
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct cryptd_aead *),
- .cra_alignmask = 0,
-
- .cra_priority = 400,
-
- .cra_name = "aegis128",
- .cra_driver_name = "aegis128-aesni",
-
- .cra_module = THIS_MODULE,
- }
+static struct aead_alg crypto_aegis128_aesni_alg = {
+ .setkey = crypto_aegis128_aesni_setkey,
+ .setauthsize = crypto_aegis128_aesni_setauthsize,
+ .encrypt = crypto_aegis128_aesni_encrypt,
+ .decrypt = crypto_aegis128_aesni_decrypt,
+ .init = crypto_aegis128_aesni_init_tfm,
+ .exit = crypto_aegis128_aesni_exit_tfm,
+
+ .ivsize = AEGIS128_NONCE_SIZE,
+ .maxauthsize = AEGIS128_MAX_AUTH_SIZE,
+ .chunksize = AEGIS128_BLOCK_SIZE,
+
+ .base = {
+ .cra_flags = CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct aegis_ctx) +
+ __alignof__(struct aegis_ctx),
+ .cra_alignmask = 0,
+ .cra_priority = 400,
+
+ .cra_name = "__aegis128",
+ .cra_driver_name = "__aegis128-aesni",
+
+ .cra_module = THIS_MODULE,
}
};
+static struct simd_aead_alg *simd_alg;
+
static int __init crypto_aegis128_aesni_module_init(void)
{
if (!boot_cpu_has(X86_FEATURE_XMM2) ||
@@ -374,14 +278,13 @@ static int __init crypto_aegis128_aesni_module_init(void)
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
- return crypto_register_aeads(crypto_aegis128_aesni_alg,
- ARRAY_SIZE(crypto_aegis128_aesni_alg));
+ return simd_register_aeads_compat(&crypto_aegis128_aesni_alg, 1,
+ &simd_alg);
}
static void __exit crypto_aegis128_aesni_module_exit(void)
{
- crypto_unregister_aeads(crypto_aegis128_aesni_alg,
- ARRAY_SIZE(crypto_aegis128_aesni_alg));
+ simd_unregister_aeads(&crypto_aegis128_aesni_alg, 1, &simd_alg);
}
module_init(crypto_aegis128_aesni_module_init);
diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c b/arch/x86/crypto/aegis128l-aesni-glue.c
index 1b1b39c66c5e..80d917f7e467 100644
--- a/arch/x86/crypto/aegis128l-aesni-glue.c
+++ b/arch/x86/crypto/aegis128l-aesni-glue.c
@@ -11,8 +11,8 @@
* any later version.
*/
-#include <crypto/cryptd.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/module.h>
@@ -242,131 +242,35 @@ static void crypto_aegis128l_aesni_exit_tfm(struct crypto_aead *aead)
{
}
-static int cryptd_aegis128l_aesni_setkey(struct crypto_aead *aead,
- const u8 *key, unsigned int keylen)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- return crypto_aead_setkey(&cryptd_tfm->base, key, keylen);
-}
-
-static int cryptd_aegis128l_aesni_setauthsize(struct crypto_aead *aead,
- unsigned int authsize)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
-}
-
-static int cryptd_aegis128l_aesni_encrypt(struct aead_request *req)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- aead = &cryptd_tfm->base;
- if (irq_fpu_usable() && (!in_atomic() ||
- !cryptd_aead_queued(cryptd_tfm)))
- aead = cryptd_aead_child(cryptd_tfm);
-
- aead_request_set_tfm(req, aead);
-
- return crypto_aead_encrypt(req);
-}
-
-static int cryptd_aegis128l_aesni_decrypt(struct aead_request *req)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- aead = &cryptd_tfm->base;
- if (irq_fpu_usable() && (!in_atomic() ||
- !cryptd_aead_queued(cryptd_tfm)))
- aead = cryptd_aead_child(cryptd_tfm);
-
- aead_request_set_tfm(req, aead);
-
- return crypto_aead_decrypt(req);
-}
-
-static int cryptd_aegis128l_aesni_init_tfm(struct crypto_aead *aead)
-{
- struct cryptd_aead *cryptd_tfm;
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
- cryptd_tfm = cryptd_alloc_aead("__aegis128l-aesni", CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL);
- if (IS_ERR(cryptd_tfm))
- return PTR_ERR(cryptd_tfm);
-
- *ctx = cryptd_tfm;
- crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
- return 0;
-}
-
-static void cryptd_aegis128l_aesni_exit_tfm(struct crypto_aead *aead)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
- cryptd_free_aead(*ctx);
-}
-
-static struct aead_alg crypto_aegis128l_aesni_alg[] = {
- {
- .setkey = crypto_aegis128l_aesni_setkey,
- .setauthsize = crypto_aegis128l_aesni_setauthsize,
- .encrypt = crypto_aegis128l_aesni_encrypt,
- .decrypt = crypto_aegis128l_aesni_decrypt,
- .init = crypto_aegis128l_aesni_init_tfm,
- .exit = crypto_aegis128l_aesni_exit_tfm,
-
- .ivsize = AEGIS128L_NONCE_SIZE,
- .maxauthsize = AEGIS128L_MAX_AUTH_SIZE,
- .chunksize = AEGIS128L_BLOCK_SIZE,
-
- .base = {
- .cra_flags = CRYPTO_ALG_INTERNAL,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct aegis_ctx) +
- __alignof__(struct aegis_ctx),
- .cra_alignmask = 0,
-
- .cra_name = "__aegis128l",
- .cra_driver_name = "__aegis128l-aesni",
-
- .cra_module = THIS_MODULE,
- }
- }, {
- .setkey = cryptd_aegis128l_aesni_setkey,
- .setauthsize = cryptd_aegis128l_aesni_setauthsize,
- .encrypt = cryptd_aegis128l_aesni_encrypt,
- .decrypt = cryptd_aegis128l_aesni_decrypt,
- .init = cryptd_aegis128l_aesni_init_tfm,
- .exit = cryptd_aegis128l_aesni_exit_tfm,
-
- .ivsize = AEGIS128L_NONCE_SIZE,
- .maxauthsize = AEGIS128L_MAX_AUTH_SIZE,
- .chunksize = AEGIS128L_BLOCK_SIZE,
-
- .base = {
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct cryptd_aead *),
- .cra_alignmask = 0,
-
- .cra_priority = 400,
-
- .cra_name = "aegis128l",
- .cra_driver_name = "aegis128l-aesni",
-
- .cra_module = THIS_MODULE,
- }
+static struct aead_alg crypto_aegis128l_aesni_alg = {
+ .setkey = crypto_aegis128l_aesni_setkey,
+ .setauthsize = crypto_aegis128l_aesni_setauthsize,
+ .encrypt = crypto_aegis128l_aesni_encrypt,
+ .decrypt = crypto_aegis128l_aesni_decrypt,
+ .init = crypto_aegis128l_aesni_init_tfm,
+ .exit = crypto_aegis128l_aesni_exit_tfm,
+
+ .ivsize = AEGIS128L_NONCE_SIZE,
+ .maxauthsize = AEGIS128L_MAX_AUTH_SIZE,
+ .chunksize = AEGIS128L_BLOCK_SIZE,
+
+ .base = {
+ .cra_flags = CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct aegis_ctx) +
+ __alignof__(struct aegis_ctx),
+ .cra_alignmask = 0,
+ .cra_priority = 400,
+
+ .cra_name = "__aegis128l",
+ .cra_driver_name = "__aegis128l-aesni",
+
+ .cra_module = THIS_MODULE,
}
};
+static struct simd_aead_alg *simd_alg;
+
static int __init crypto_aegis128l_aesni_module_init(void)
{
if (!boot_cpu_has(X86_FEATURE_XMM2) ||
@@ -374,14 +278,13 @@ static int __init crypto_aegis128l_aesni_module_init(void)
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
- return crypto_register_aeads(crypto_aegis128l_aesni_alg,
- ARRAY_SIZE(crypto_aegis128l_aesni_alg));
+ return simd_register_aeads_compat(&crypto_aegis128l_aesni_alg, 1,
+ &simd_alg);
}
static void __exit crypto_aegis128l_aesni_module_exit(void)
{
- crypto_unregister_aeads(crypto_aegis128l_aesni_alg,
- ARRAY_SIZE(crypto_aegis128l_aesni_alg));
+ simd_unregister_aeads(&crypto_aegis128l_aesni_alg, 1, &simd_alg);
}
module_init(crypto_aegis128l_aesni_module_init);
diff --git a/arch/x86/crypto/aegis256-aesni-glue.c b/arch/x86/crypto/aegis256-aesni-glue.c
index 6227ca3220a0..716eecb66bd5 100644
--- a/arch/x86/crypto/aegis256-aesni-glue.c
+++ b/arch/x86/crypto/aegis256-aesni-glue.c
@@ -11,8 +11,8 @@
* any later version.
*/
-#include <crypto/cryptd.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/module.h>
@@ -242,131 +242,35 @@ static void crypto_aegis256_aesni_exit_tfm(struct crypto_aead *aead)
{
}
-static int cryptd_aegis256_aesni_setkey(struct crypto_aead *aead,
- const u8 *key, unsigned int keylen)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- return crypto_aead_setkey(&cryptd_tfm->base, key, keylen);
-}
-
-static int cryptd_aegis256_aesni_setauthsize(struct crypto_aead *aead,
- unsigned int authsize)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
-}
-
-static int cryptd_aegis256_aesni_encrypt(struct aead_request *req)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- aead = &cryptd_tfm->base;
- if (irq_fpu_usable() && (!in_atomic() ||
- !cryptd_aead_queued(cryptd_tfm)))
- aead = cryptd_aead_child(cryptd_tfm);
-
- aead_request_set_tfm(req, aead);
-
- return crypto_aead_encrypt(req);
-}
-
-static int cryptd_aegis256_aesni_decrypt(struct aead_request *req)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- aead = &cryptd_tfm->base;
- if (irq_fpu_usable() && (!in_atomic() ||
- !cryptd_aead_queued(cryptd_tfm)))
- aead = cryptd_aead_child(cryptd_tfm);
-
- aead_request_set_tfm(req, aead);
-
- return crypto_aead_decrypt(req);
-}
-
-static int cryptd_aegis256_aesni_init_tfm(struct crypto_aead *aead)
-{
- struct cryptd_aead *cryptd_tfm;
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
- cryptd_tfm = cryptd_alloc_aead("__aegis256-aesni", CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL);
- if (IS_ERR(cryptd_tfm))
- return PTR_ERR(cryptd_tfm);
-
- *ctx = cryptd_tfm;
- crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
- return 0;
-}
-
-static void cryptd_aegis256_aesni_exit_tfm(struct crypto_aead *aead)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
- cryptd_free_aead(*ctx);
-}
-
-static struct aead_alg crypto_aegis256_aesni_alg[] = {
- {
- .setkey = crypto_aegis256_aesni_setkey,
- .setauthsize = crypto_aegis256_aesni_setauthsize,
- .encrypt = crypto_aegis256_aesni_encrypt,
- .decrypt = crypto_aegis256_aesni_decrypt,
- .init = crypto_aegis256_aesni_init_tfm,
- .exit = crypto_aegis256_aesni_exit_tfm,
-
- .ivsize = AEGIS256_NONCE_SIZE,
- .maxauthsize = AEGIS256_MAX_AUTH_SIZE,
- .chunksize = AEGIS256_BLOCK_SIZE,
-
- .base = {
- .cra_flags = CRYPTO_ALG_INTERNAL,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct aegis_ctx) +
- __alignof__(struct aegis_ctx),
- .cra_alignmask = 0,
-
- .cra_name = "__aegis256",
- .cra_driver_name = "__aegis256-aesni",
-
- .cra_module = THIS_MODULE,
- }
- }, {
- .setkey = cryptd_aegis256_aesni_setkey,
- .setauthsize = cryptd_aegis256_aesni_setauthsize,
- .encrypt = cryptd_aegis256_aesni_encrypt,
- .decrypt = cryptd_aegis256_aesni_decrypt,
- .init = cryptd_aegis256_aesni_init_tfm,
- .exit = cryptd_aegis256_aesni_exit_tfm,
-
- .ivsize = AEGIS256_NONCE_SIZE,
- .maxauthsize = AEGIS256_MAX_AUTH_SIZE,
- .chunksize = AEGIS256_BLOCK_SIZE,
-
- .base = {
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct cryptd_aead *),
- .cra_alignmask = 0,
-
- .cra_priority = 400,
-
- .cra_name = "aegis256",
- .cra_driver_name = "aegis256-aesni",
-
- .cra_module = THIS_MODULE,
- }
+static struct aead_alg crypto_aegis256_aesni_alg = {
+ .setkey = crypto_aegis256_aesni_setkey,
+ .setauthsize = crypto_aegis256_aesni_setauthsize,
+ .encrypt = crypto_aegis256_aesni_encrypt,
+ .decrypt = crypto_aegis256_aesni_decrypt,
+ .init = crypto_aegis256_aesni_init_tfm,
+ .exit = crypto_aegis256_aesni_exit_tfm,
+
+ .ivsize = AEGIS256_NONCE_SIZE,
+ .maxauthsize = AEGIS256_MAX_AUTH_SIZE,
+ .chunksize = AEGIS256_BLOCK_SIZE,
+
+ .base = {
+ .cra_flags = CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct aegis_ctx) +
+ __alignof__(struct aegis_ctx),
+ .cra_alignmask = 0,
+ .cra_priority = 400,
+
+ .cra_name = "__aegis256",
+ .cra_driver_name = "__aegis256-aesni",
+
+ .cra_module = THIS_MODULE,
}
};
+static struct simd_aead_alg *simd_alg;
+
static int __init crypto_aegis256_aesni_module_init(void)
{
if (!boot_cpu_has(X86_FEATURE_XMM2) ||
@@ -374,14 +278,13 @@ static int __init crypto_aegis256_aesni_module_init(void)
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
- return crypto_register_aeads(crypto_aegis256_aesni_alg,
- ARRAY_SIZE(crypto_aegis256_aesni_alg));
+ return simd_register_aeads_compat(&crypto_aegis256_aesni_alg, 1,
+ &simd_alg);
}
static void __exit crypto_aegis256_aesni_module_exit(void)
{
- crypto_unregister_aeads(crypto_aegis256_aesni_alg,
- ARRAY_SIZE(crypto_aegis256_aesni_alg));
+ simd_unregister_aeads(&crypto_aegis256_aesni_alg, 1, &simd_alg);
}
module_init(crypto_aegis256_aesni_module_init);
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 1e3d2102033a..21c246799aa5 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -25,14 +25,13 @@
#include <linux/err.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
-#include <crypto/cryptd.h>
#include <crypto/ctr.h>
#include <crypto/b128ops.h>
#include <crypto/gcm.h>
#include <crypto/xts.h>
#include <asm/cpu_device_id.h>
-#include <asm/fpu/api.h>
#include <asm/crypto/aes.h>
+#include <asm/simd.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/simd.h>
@@ -333,7 +332,7 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
return -EINVAL;
}
- if (!irq_fpu_usable())
+ if (!crypto_simd_usable())
err = crypto_aes_expand_key(ctx, in_key, key_len);
else {
kernel_fpu_begin();
@@ -354,7 +353,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
- if (!irq_fpu_usable())
+ if (!crypto_simd_usable())
crypto_aes_encrypt_x86(ctx, dst, src);
else {
kernel_fpu_begin();
@@ -367,7 +366,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
- if (!irq_fpu_usable())
+ if (!crypto_simd_usable())
crypto_aes_decrypt_x86(ctx, dst, src);
else {
kernel_fpu_begin();
@@ -643,29 +642,6 @@ static int xts_decrypt(struct skcipher_request *req)
aes_ctx(ctx->raw_crypt_ctx));
}
-static int rfc4106_init(struct crypto_aead *aead)
-{
- struct cryptd_aead *cryptd_tfm;
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
- cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
- CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL);
- if (IS_ERR(cryptd_tfm))
- return PTR_ERR(cryptd_tfm);
-
- *ctx = cryptd_tfm;
- crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
- return 0;
-}
-
-static void rfc4106_exit(struct crypto_aead *aead)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
- cryptd_free_aead(*ctx);
-}
-
static int
rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
{
@@ -710,15 +686,8 @@ static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
}
-static int gcmaes_wrapper_set_key(struct crypto_aead *parent, const u8 *key,
- unsigned int key_len)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(parent);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- return crypto_aead_setkey(&cryptd_tfm->base, key, key_len);
-}
-
+/* This is the Integrity Check Value (aka the authentication tag) length and can
+ * be 8, 12 or 16 bytes long. */
static int common_rfc4106_set_authsize(struct crypto_aead *aead,
unsigned int authsize)
{
@@ -734,17 +703,6 @@ static int common_rfc4106_set_authsize(struct crypto_aead *aead,
return 0;
}
-/* This is the Integrity Check Value (aka the authentication tag length and can
- * be 8, 12 or 16 bytes long. */
-static int gcmaes_wrapper_set_authsize(struct crypto_aead *parent,
- unsigned int authsize)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(parent);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
-}
-
static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
unsigned int authsize)
{
@@ -964,38 +922,6 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
aes_ctx);
}
-
-static int gcmaes_wrapper_encrypt(struct aead_request *req)
-{
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- tfm = &cryptd_tfm->base;
- if (irq_fpu_usable() && (!in_atomic() ||
- !cryptd_aead_queued(cryptd_tfm)))
- tfm = cryptd_aead_child(cryptd_tfm);
-
- aead_request_set_tfm(req, tfm);
-
- return crypto_aead_encrypt(req);
-}
-
-static int gcmaes_wrapper_decrypt(struct aead_request *req)
-{
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- tfm = &cryptd_tfm->base;
- if (irq_fpu_usable() && (!in_atomic() ||
- !cryptd_aead_queued(cryptd_tfm)))
- tfm = cryptd_aead_child(cryptd_tfm);
-
- aead_request_set_tfm(req, tfm);
-
- return crypto_aead_decrypt(req);
-}
#endif
static struct crypto_alg aesni_algs[] = { {
@@ -1148,31 +1074,7 @@ static int generic_gcmaes_decrypt(struct aead_request *req)
aes_ctx);
}
-static int generic_gcmaes_init(struct crypto_aead *aead)
-{
- struct cryptd_aead *cryptd_tfm;
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
- cryptd_tfm = cryptd_alloc_aead("__driver-generic-gcm-aes-aesni",
- CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL);
- if (IS_ERR(cryptd_tfm))
- return PTR_ERR(cryptd_tfm);
-
- *ctx = cryptd_tfm;
- crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
-
- return 0;
-}
-
-static void generic_gcmaes_exit(struct crypto_aead *aead)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
- cryptd_free_aead(*ctx);
-}
-
-static struct aead_alg aesni_aead_algs[] = { {
+static struct aead_alg aesni_aeads[] = { {
.setkey = common_rfc4106_set_key,
.setauthsize = common_rfc4106_set_authsize,
.encrypt = helper_rfc4106_encrypt,
@@ -1180,8 +1082,9 @@ static struct aead_alg aesni_aead_algs[] = { {
.ivsize = GCM_RFC4106_IV_SIZE,
.maxauthsize = 16,
.base = {
- .cra_name = "__gcm-aes-aesni",
- .cra_driver_name = "__driver-gcm-aes-aesni",
+ .cra_name = "__rfc4106(gcm(aes))",
+ .cra_driver_name = "__rfc4106-gcm-aesni",
+ .cra_priority = 400,
.cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx),
@@ -1189,24 +1092,6 @@ static struct aead_alg aesni_aead_algs[] = { {
.cra_module = THIS_MODULE,
},
}, {
- .init = rfc4106_init,
- .exit = rfc4106_exit,
- .setkey = gcmaes_wrapper_set_key,
- .setauthsize = gcmaes_wrapper_set_authsize,
- .encrypt = gcmaes_wrapper_encrypt,
- .decrypt = gcmaes_wrapper_decrypt,
- .ivsize = GCM_RFC4106_IV_SIZE,
- .maxauthsize = 16,
- .base = {
- .cra_name = "rfc4106(gcm(aes))",
- .cra_driver_name = "rfc4106-gcm-aesni",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct cryptd_aead *),
- .cra_module = THIS_MODULE,
- },
-}, {
.setkey = generic_gcmaes_set_key,
.setauthsize = generic_gcmaes_set_authsize,
.encrypt = generic_gcmaes_encrypt,
@@ -1214,38 +1099,21 @@ static struct aead_alg aesni_aead_algs[] = { {
.ivsize = GCM_AES_IV_SIZE,
.maxauthsize = 16,
.base = {
- .cra_name = "__generic-gcm-aes-aesni",
- .cra_driver_name = "__driver-generic-gcm-aes-aesni",
- .cra_priority = 0,
+ .cra_name = "__gcm(aes)",
+ .cra_driver_name = "__generic-gcm-aesni",
+ .cra_priority = 400,
.cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct generic_gcmaes_ctx),
.cra_alignmask = AESNI_ALIGN - 1,
.cra_module = THIS_MODULE,
},
-}, {
- .init = generic_gcmaes_init,
- .exit = generic_gcmaes_exit,
- .setkey = gcmaes_wrapper_set_key,
- .setauthsize = gcmaes_wrapper_set_authsize,
- .encrypt = gcmaes_wrapper_encrypt,
- .decrypt = gcmaes_wrapper_decrypt,
- .ivsize = GCM_AES_IV_SIZE,
- .maxauthsize = 16,
- .base = {
- .cra_name = "gcm(aes)",
- .cra_driver_name = "generic-gcm-aesni",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct cryptd_aead *),
- .cra_module = THIS_MODULE,
- },
} };
#else
-static struct aead_alg aesni_aead_algs[0];
+static struct aead_alg aesni_aeads[0];
#endif
+static struct simd_aead_alg *aesni_simd_aeads[ARRAY_SIZE(aesni_aeads)];
static const struct x86_cpu_id aesni_cpu_id[] = {
X86_FEATURE_MATCH(X86_FEATURE_AES),
@@ -1253,23 +1121,9 @@ static const struct x86_cpu_id aesni_cpu_id[] = {
};
MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
-static void aesni_free_simds(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers) &&
- aesni_simd_skciphers[i]; i++)
- simd_skcipher_free(aesni_simd_skciphers[i]);
-}
-
static int __init aesni_init(void)
{
- struct simd_skcipher_alg *simd;
- const char *basename;
- const char *algname;
- const char *drvname;
int err;
- int i;
if (!x86_match_cpu(aesni_cpu_id))
return -ENODEV;
@@ -1304,36 +1158,22 @@ static int __init aesni_init(void)
if (err)
return err;
- err = crypto_register_skciphers(aesni_skciphers,
- ARRAY_SIZE(aesni_skciphers));
+ err = simd_register_skciphers_compat(aesni_skciphers,
+ ARRAY_SIZE(aesni_skciphers),
+ aesni_simd_skciphers);
if (err)
goto unregister_algs;
- err = crypto_register_aeads(aesni_aead_algs,
- ARRAY_SIZE(aesni_aead_algs));
+ err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads),
+ aesni_simd_aeads);
if (err)
goto unregister_skciphers;
- for (i = 0; i < ARRAY_SIZE(aesni_skciphers); i++) {
- algname = aesni_skciphers[i].base.cra_name + 2;
- drvname = aesni_skciphers[i].base.cra_driver_name + 2;
- basename = aesni_skciphers[i].base.cra_driver_name;
- simd = simd_skcipher_create_compat(algname, drvname, basename);
- err = PTR_ERR(simd);
- if (IS_ERR(simd))
- goto unregister_simds;
-
- aesni_simd_skciphers[i] = simd;
- }
-
return 0;
-unregister_simds:
- aesni_free_simds();
- crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
unregister_skciphers:
- crypto_unregister_skciphers(aesni_skciphers,
- ARRAY_SIZE(aesni_skciphers));
+ simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
+ aesni_simd_skciphers);
unregister_algs:
crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
return err;
@@ -1341,10 +1181,10 @@ unregister_algs:
static void __exit aesni_exit(void)
{
- aesni_free_simds();
- crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
- crypto_unregister_skciphers(aesni_skciphers,
- ARRAY_SIZE(aesni_skciphers));
+ simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads),
+ aesni_simd_aeads);
+ simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
+ aesni_simd_skciphers);
crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
}
diff --git a/arch/x86/crypto/chacha_glue.c b/arch/x86/crypto/chacha_glue.c
index 45c1c4143176..4967ad620775 100644
--- a/arch/x86/crypto/chacha_glue.c
+++ b/arch/x86/crypto/chacha_glue.c
@@ -12,10 +12,10 @@
#include <crypto/algapi.h>
#include <crypto/chacha.h>
+#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <asm/fpu/api.h>
#include <asm/simd.h>
#define CHACHA_STATE_ALIGN 16
@@ -170,7 +170,7 @@ static int chacha_simd(struct skcipher_request *req)
struct skcipher_walk walk;
int err;
- if (req->cryptlen <= CHACHA_BLOCK_SIZE || !irq_fpu_usable())
+ if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
return crypto_chacha_crypt(req);
err = skcipher_walk_virt(&walk, req, true);
@@ -193,7 +193,7 @@ static int xchacha_simd(struct skcipher_request *req)
u8 real_iv[16];
int err;
- if (req->cryptlen <= CHACHA_BLOCK_SIZE || !irq_fpu_usable())
+ if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
return crypto_xchacha_crypt(req);
err = skcipher_walk_virt(&walk, req, true);
diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c
index c8d9cdacbf10..cb4ab6645106 100644
--- a/arch/x86/crypto/crc32-pclmul_glue.c
+++ b/arch/x86/crypto/crc32-pclmul_glue.c
@@ -32,10 +32,11 @@
#include <linux/kernel.h>
#include <linux/crc32.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <asm/cpufeatures.h>
#include <asm/cpu_device_id.h>
-#include <asm/fpu/api.h>
+#include <asm/simd.h>
#define CHKSUM_BLOCK_SIZE 1
#define CHKSUM_DIGEST_SIZE 4
@@ -54,7 +55,7 @@ static u32 __attribute__((pure))
unsigned int iremainder;
unsigned int prealign;
- if (len < PCLMUL_MIN_LEN + SCALE_F_MASK || !irq_fpu_usable())
+ if (len < PCLMUL_MIN_LEN + SCALE_F_MASK || !crypto_simd_usable())
return crc32_le(crc, p, len);
if ((long)p & SCALE_F_MASK) {
diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
index 5773e1161072..a58fe217c856 100644
--- a/arch/x86/crypto/crc32c-intel_glue.c
+++ b/arch/x86/crypto/crc32c-intel_glue.c
@@ -29,10 +29,11 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <asm/cpufeatures.h>
#include <asm/cpu_device_id.h>
-#include <asm/fpu/internal.h>
+#include <asm/simd.h>
#define CHKSUM_BLOCK_SIZE 1
#define CHKSUM_DIGEST_SIZE 4
@@ -177,7 +178,7 @@ static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data,
* use faster PCL version if datasize is large enough to
* overcome kernel fpu state save/restore overhead
*/
- if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
+ if (len >= CRC32C_PCL_BREAKEVEN && crypto_simd_usable()) {
kernel_fpu_begin();
*crcp = crc_pcl(data, len, *crcp);
kernel_fpu_end();
@@ -189,7 +190,7 @@ static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data,
static int __crc32c_pcl_intel_finup(u32 *crcp, const u8 *data, unsigned int len,
u8 *out)
{
- if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
+ if (len >= CRC32C_PCL_BREAKEVEN && crypto_simd_usable()) {
kernel_fpu_begin();
*(__le32 *)out = ~cpu_to_le32(crc_pcl(data, len, *crcp));
kernel_fpu_end();
diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c
index 0e785c0b2354..3c81e15b0873 100644
--- a/arch/x86/crypto/crct10dif-pclmul_glue.c
+++ b/arch/x86/crypto/crct10dif-pclmul_glue.c
@@ -26,12 +26,13 @@
#include <linux/module.h>
#include <linux/crc-t10dif.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/kernel.h>
-#include <asm/fpu/api.h>
#include <asm/cpufeatures.h>
#include <asm/cpu_device_id.h>
+#include <asm/simd.h>
asmlinkage u16 crc_t10dif_pcl(u16 init_crc, const u8 *buf, size_t len);
@@ -53,7 +54,7 @@ static int chksum_update(struct shash_desc *desc, const u8 *data,
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
- if (length >= 16 && irq_fpu_usable()) {
+ if (length >= 16 && crypto_simd_usable()) {
kernel_fpu_begin();
ctx->crc = crc_t10dif_pcl(ctx->crc, data, length);
kernel_fpu_end();
@@ -70,15 +71,14 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
return 0;
}
-static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
- u8 *out)
+static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
{
- if (len >= 16 && irq_fpu_usable()) {
+ if (len >= 16 && crypto_simd_usable()) {
kernel_fpu_begin();
- *(__u16 *)out = crc_t10dif_pcl(*crcp, data, len);
+ *(__u16 *)out = crc_t10dif_pcl(crc, data, len);
kernel_fpu_end();
} else
- *(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
+ *(__u16 *)out = crc_t10dif_generic(crc, data, len);
return 0;
}
@@ -87,15 +87,13 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data,
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
- return __chksum_finup(&ctx->crc, data, len, out);
+ return __chksum_finup(ctx->crc, data, len, out);
}
static int chksum_digest(struct shash_desc *desc, const u8 *data,
unsigned int length, u8 *out)
{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- return __chksum_finup(&ctx->crc, data, length, out);
+ return __chksum_finup(0, data, length, out);
}
static struct shash_alg alg = {
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
index 3582ae885ee1..e3f3e6fd9d65 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -19,8 +19,9 @@
#include <crypto/cryptd.h>
#include <crypto/gf128mul.h>
#include <crypto/internal/hash.h>
-#include <asm/fpu/api.h>
+#include <crypto/internal/simd.h>
#include <asm/cpu_device_id.h>
+#include <asm/simd.h>
#define GHASH_BLOCK_SIZE 16
#define GHASH_DIGEST_SIZE 16
@@ -171,7 +172,6 @@ static int ghash_async_init(struct ahash_request *req)
struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
desc->tfm = child;
- desc->flags = req->base.flags;
return crypto_shash_init(desc);
}
@@ -182,7 +182,7 @@ static int ghash_async_update(struct ahash_request *req)
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
- if (!irq_fpu_usable() ||
+ if (!crypto_simd_usable() ||
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
memcpy(cryptd_req, req, sizeof(*req));
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
@@ -200,7 +200,7 @@ static int ghash_async_final(struct ahash_request *req)
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
- if (!irq_fpu_usable() ||
+ if (!crypto_simd_usable() ||
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
memcpy(cryptd_req, req, sizeof(*req));
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
@@ -241,7 +241,7 @@ static int ghash_async_digest(struct ahash_request *req)
struct ahash_request *cryptd_req = ahash_request_ctx(req);
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
- if (!irq_fpu_usable() ||
+ if (!crypto_simd_usable() ||
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
memcpy(cryptd_req, req, sizeof(*req));
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
@@ -251,7 +251,6 @@ static int ghash_async_digest(struct ahash_request *req)
struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
desc->tfm = child;
- desc->flags = req->base.flags;
return shash_ahash_digest(req, desc);
}
}
diff --git a/arch/x86/crypto/morus1280-avx2-glue.c b/arch/x86/crypto/morus1280-avx2-glue.c
index 6634907d6ccd..679627a2a824 100644
--- a/arch/x86/crypto/morus1280-avx2-glue.c
+++ b/arch/x86/crypto/morus1280-avx2-glue.c
@@ -12,6 +12,7 @@
*/
#include <crypto/internal/aead.h>
+#include <crypto/internal/simd.h>
#include <crypto/morus1280_glue.h>
#include <linux/module.h>
#include <asm/fpu/api.h>
@@ -35,7 +36,9 @@ asmlinkage void crypto_morus1280_avx2_dec_tail(void *state, const void *src,
asmlinkage void crypto_morus1280_avx2_final(void *state, void *tag_xor,
u64 assoclen, u64 cryptlen);
-MORUS1280_DECLARE_ALGS(avx2, "morus1280-avx2", 400);
+MORUS1280_DECLARE_ALG(avx2, "morus1280-avx2", 400);
+
+static struct simd_aead_alg *simd_alg;
static int __init crypto_morus1280_avx2_module_init(void)
{
@@ -44,14 +47,13 @@ static int __init crypto_morus1280_avx2_module_init(void)
!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
return -ENODEV;
- return crypto_register_aeads(crypto_morus1280_avx2_algs,
- ARRAY_SIZE(crypto_morus1280_avx2_algs));
+ return simd_register_aeads_compat(&crypto_morus1280_avx2_alg, 1,
+ &simd_alg);
}
static void __exit crypto_morus1280_avx2_module_exit(void)
{
- crypto_unregister_aeads(crypto_morus1280_avx2_algs,
- ARRAY_SIZE(crypto_morus1280_avx2_algs));
+ simd_unregister_aeads(&crypto_morus1280_avx2_alg, 1, &simd_alg);
}
module_init(crypto_morus1280_avx2_module_init);
diff --git a/arch/x86/crypto/morus1280-sse2-glue.c b/arch/x86/crypto/morus1280-sse2-glue.c
index f40244eaf14d..c35c0638d0bb 100644
--- a/arch/x86/crypto/morus1280-sse2-glue.c
+++ b/arch/x86/crypto/morus1280-sse2-glue.c
@@ -12,6 +12,7 @@
*/
#include <crypto/internal/aead.h>
+#include <crypto/internal/simd.h>
#include <crypto/morus1280_glue.h>
#include <linux/module.h>
#include <asm/fpu/api.h>
@@ -35,7 +36,9 @@ asmlinkage void crypto_morus1280_sse2_dec_tail(void *state, const void *src,
asmlinkage void crypto_morus1280_sse2_final(void *state, void *tag_xor,
u64 assoclen, u64 cryptlen);
-MORUS1280_DECLARE_ALGS(sse2, "morus1280-sse2", 350);
+MORUS1280_DECLARE_ALG(sse2, "morus1280-sse2", 350);
+
+static struct simd_aead_alg *simd_alg;
static int __init crypto_morus1280_sse2_module_init(void)
{
@@ -43,14 +46,13 @@ static int __init crypto_morus1280_sse2_module_init(void)
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
- return crypto_register_aeads(crypto_morus1280_sse2_algs,
- ARRAY_SIZE(crypto_morus1280_sse2_algs));
+ return simd_register_aeads_compat(&crypto_morus1280_sse2_alg, 1,
+ &simd_alg);
}
static void __exit crypto_morus1280_sse2_module_exit(void)
{
- crypto_unregister_aeads(crypto_morus1280_sse2_algs,
- ARRAY_SIZE(crypto_morus1280_sse2_algs));
+ simd_unregister_aeads(&crypto_morus1280_sse2_alg, 1, &simd_alg);
}
module_init(crypto_morus1280_sse2_module_init);
diff --git a/arch/x86/crypto/morus1280_glue.c b/arch/x86/crypto/morus1280_glue.c
index 7e600f8bcdad..30fc1bd98ec3 100644
--- a/arch/x86/crypto/morus1280_glue.c
+++ b/arch/x86/crypto/morus1280_glue.c
@@ -11,7 +11,6 @@
* any later version.
*/
-#include <crypto/cryptd.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include <crypto/morus1280_glue.h>
@@ -205,90 +204,6 @@ void crypto_morus1280_glue_init_ops(struct crypto_aead *aead,
}
EXPORT_SYMBOL_GPL(crypto_morus1280_glue_init_ops);
-int cryptd_morus1280_glue_setkey(struct crypto_aead *aead, const u8 *key,
- unsigned int keylen)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- return crypto_aead_setkey(&cryptd_tfm->base, key, keylen);
-}
-EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_setkey);
-
-int cryptd_morus1280_glue_setauthsize(struct crypto_aead *aead,
- unsigned int authsize)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
-}
-EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_setauthsize);
-
-int cryptd_morus1280_glue_encrypt(struct aead_request *req)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- aead = &cryptd_tfm->base;
- if (irq_fpu_usable() && (!in_atomic() ||
- !cryptd_aead_queued(cryptd_tfm)))
- aead = cryptd_aead_child(cryptd_tfm);
-
- aead_request_set_tfm(req, aead);
-
- return crypto_aead_encrypt(req);
-}
-EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_encrypt);
-
-int cryptd_morus1280_glue_decrypt(struct aead_request *req)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- aead = &cryptd_tfm->base;
- if (irq_fpu_usable() && (!in_atomic() ||
- !cryptd_aead_queued(cryptd_tfm)))
- aead = cryptd_aead_child(cryptd_tfm);
-
- aead_request_set_tfm(req, aead);
-
- return crypto_aead_decrypt(req);
-}
-EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_decrypt);
-
-int cryptd_morus1280_glue_init_tfm(struct crypto_aead *aead)
-{
- struct cryptd_aead *cryptd_tfm;
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- const char *name = crypto_aead_alg(aead)->base.cra_driver_name;
- char internal_name[CRYPTO_MAX_ALG_NAME];
-
- if (snprintf(internal_name, CRYPTO_MAX_ALG_NAME, "__%s", name)
- >= CRYPTO_MAX_ALG_NAME)
- return -ENAMETOOLONG;
-
- cryptd_tfm = cryptd_alloc_aead(internal_name, CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL);
- if (IS_ERR(cryptd_tfm))
- return PTR_ERR(cryptd_tfm);
-
- *ctx = cryptd_tfm;
- crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
- return 0;
-}
-EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_init_tfm);
-
-void cryptd_morus1280_glue_exit_tfm(struct crypto_aead *aead)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
- cryptd_free_aead(*ctx);
-}
-EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_exit_tfm);
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
MODULE_DESCRIPTION("MORUS-1280 AEAD mode -- glue for x86 optimizations");
diff --git a/arch/x86/crypto/morus640-sse2-glue.c b/arch/x86/crypto/morus640-sse2-glue.c
index 9afaf8f8565a..32da56b3bdad 100644
--- a/arch/x86/crypto/morus640-sse2-glue.c
+++ b/arch/x86/crypto/morus640-sse2-glue.c
@@ -12,6 +12,7 @@
*/
#include <crypto/internal/aead.h>
+#include <crypto/internal/simd.h>
#include <crypto/morus640_glue.h>
#include <linux/module.h>
#include <asm/fpu/api.h>
@@ -35,7 +36,9 @@ asmlinkage void crypto_morus640_sse2_dec_tail(void *state, const void *src,
asmlinkage void crypto_morus640_sse2_final(void *state, void *tag_xor,
u64 assoclen, u64 cryptlen);
-MORUS640_DECLARE_ALGS(sse2, "morus640-sse2", 400);
+MORUS640_DECLARE_ALG(sse2, "morus640-sse2", 400);
+
+static struct simd_aead_alg *simd_alg;
static int __init crypto_morus640_sse2_module_init(void)
{
@@ -43,14 +46,13 @@ static int __init crypto_morus640_sse2_module_init(void)
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
- return crypto_register_aeads(crypto_morus640_sse2_algs,
- ARRAY_SIZE(crypto_morus640_sse2_algs));
+ return simd_register_aeads_compat(&crypto_morus640_sse2_alg, 1,
+ &simd_alg);
}
static void __exit crypto_morus640_sse2_module_exit(void)
{
- crypto_unregister_aeads(crypto_morus640_sse2_algs,
- ARRAY_SIZE(crypto_morus640_sse2_algs));
+ simd_unregister_aeads(&crypto_morus640_sse2_alg, 1, &simd_alg);
}
module_init(crypto_morus640_sse2_module_init);
diff --git a/arch/x86/crypto/morus640_glue.c b/arch/x86/crypto/morus640_glue.c
index cb3a81732016..1dea33d84426 100644
--- a/arch/x86/crypto/morus640_glue.c
+++ b/arch/x86/crypto/morus640_glue.c
@@ -11,7 +11,6 @@
* any later version.
*/
-#include <crypto/cryptd.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include <crypto/morus640_glue.h>
@@ -200,90 +199,6 @@ void crypto_morus640_glue_init_ops(struct crypto_aead *aead,
}
EXPORT_SYMBOL_GPL(crypto_morus640_glue_init_ops);
-int cryptd_morus640_glue_setkey(struct crypto_aead *aead, const u8 *key,
- unsigned int keylen)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- return crypto_aead_setkey(&cryptd_tfm->base, key, keylen);
-}
-EXPORT_SYMBOL_GPL(cryptd_morus640_glue_setkey);
-
-int cryptd_morus640_glue_setauthsize(struct crypto_aead *aead,
- unsigned int authsize)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
-}
-EXPORT_SYMBOL_GPL(cryptd_morus640_glue_setauthsize);
-
-int cryptd_morus640_glue_encrypt(struct aead_request *req)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- aead = &cryptd_tfm->base;
- if (irq_fpu_usable() && (!in_atomic() ||
- !cryptd_aead_queued(cryptd_tfm)))
- aead = cryptd_aead_child(cryptd_tfm);
-
- aead_request_set_tfm(req, aead);
-
- return crypto_aead_encrypt(req);
-}
-EXPORT_SYMBOL_GPL(cryptd_morus640_glue_encrypt);
-
-int cryptd_morus640_glue_decrypt(struct aead_request *req)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- aead = &cryptd_tfm->base;
- if (irq_fpu_usable() && (!in_atomic() ||
- !cryptd_aead_queued(cryptd_tfm)))
- aead = cryptd_aead_child(cryptd_tfm);
-
- aead_request_set_tfm(req, aead);
-
- return crypto_aead_decrypt(req);
-}
-EXPORT_SYMBOL_GPL(cryptd_morus640_glue_decrypt);
-
-int cryptd_morus640_glue_init_tfm(struct crypto_aead *aead)
-{
- struct cryptd_aead *cryptd_tfm;
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- const char *name = crypto_aead_alg(aead)->base.cra_driver_name;
- char internal_name[CRYPTO_MAX_ALG_NAME];
-
- if (snprintf(internal_name, CRYPTO_MAX_ALG_NAME, "__%s", name)
- >= CRYPTO_MAX_ALG_NAME)
- return -ENAMETOOLONG;
-
- cryptd_tfm = cryptd_alloc_aead(internal_name, CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL);
- if (IS_ERR(cryptd_tfm))
- return PTR_ERR(cryptd_tfm);
-
- *ctx = cryptd_tfm;
- crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
- return 0;
-}
-EXPORT_SYMBOL_GPL(cryptd_morus640_glue_init_tfm);
-
-void cryptd_morus640_glue_exit_tfm(struct crypto_aead *aead)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
- cryptd_free_aead(*ctx);
-}
-EXPORT_SYMBOL_GPL(cryptd_morus640_glue_exit_tfm);
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
MODULE_DESCRIPTION("MORUS-640 AEAD mode -- glue for x86 optimizations");
diff --git a/arch/x86/crypto/nhpoly1305-avx2-glue.c b/arch/x86/crypto/nhpoly1305-avx2-glue.c
index 20d815ea4b6a..f7567cbd35b6 100644
--- a/arch/x86/crypto/nhpoly1305-avx2-glue.c
+++ b/arch/x86/crypto/nhpoly1305-avx2-glue.c
@@ -7,9 +7,10 @@
*/
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/nhpoly1305.h>
#include <linux/module.h>
-#include <asm/fpu/api.h>
+#include <asm/simd.h>
asmlinkage void nh_avx2(const u32 *key, const u8 *message, size_t message_len,
u8 hash[NH_HASH_BYTES]);
@@ -24,7 +25,7 @@ static void _nh_avx2(const u32 *key, const u8 *message, size_t message_len,
static int nhpoly1305_avx2_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
- if (srclen < 64 || !irq_fpu_usable())
+ if (srclen < 64 || !crypto_simd_usable())
return crypto_nhpoly1305_update(desc, src, srclen);
do {
diff --git a/arch/x86/crypto/nhpoly1305-sse2-glue.c b/arch/x86/crypto/nhpoly1305-sse2-glue.c
index ed68d164ce14..a661ede3b5cf 100644
--- a/arch/x86/crypto/nhpoly1305-sse2-glue.c
+++ b/arch/x86/crypto/nhpoly1305-sse2-glue.c
@@ -7,9 +7,10 @@
*/
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/nhpoly1305.h>
#include <linux/module.h>
-#include <asm/fpu/api.h>
+#include <asm/simd.h>
asmlinkage void nh_sse2(const u32 *key, const u8 *message, size_t message_len,
u8 hash[NH_HASH_BYTES]);
@@ -24,7 +25,7 @@ static void _nh_sse2(const u32 *key, const u8 *message, size_t message_len,
static int nhpoly1305_sse2_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
- if (srclen < 64 || !irq_fpu_usable())
+ if (srclen < 64 || !crypto_simd_usable())
return crypto_nhpoly1305_update(desc, src, srclen);
do {
diff --git a/arch/x86/crypto/poly1305-avx2-x86_64.S b/arch/x86/crypto/poly1305-avx2-x86_64.S
index 3b6e70d085da..8457cdd47f75 100644
--- a/arch/x86/crypto/poly1305-avx2-x86_64.S
+++ b/arch/x86/crypto/poly1305-avx2-x86_64.S
@@ -323,6 +323,12 @@ ENTRY(poly1305_4block_avx2)
vpaddq t2,t1,t1
vmovq t1x,d4
+ # Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
+ # h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
+ # amount. Careful: we must not assume the carry bits 'd0 >> 26',
+ # 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
+ # integers. It's true in a single-block implementation, but not here.
+
# d1 += d0 >> 26
mov d0,%rax
shr $26,%rax
@@ -361,16 +367,16 @@ ENTRY(poly1305_4block_avx2)
# h0 += (d4 >> 26) * 5
mov d4,%rax
shr $26,%rax
- lea (%eax,%eax,4),%eax
- add %eax,%ebx
+ lea (%rax,%rax,4),%rax
+ add %rax,%rbx
# h4 = d4 & 0x3ffffff
mov d4,%rax
and $0x3ffffff,%eax
mov %eax,h4
# h1 += h0 >> 26
- mov %ebx,%eax
- shr $26,%eax
+ mov %rbx,%rax
+ shr $26,%rax
add %eax,h1
# h0 = h0 & 0x3ffffff
andl $0x3ffffff,%ebx
diff --git a/arch/x86/crypto/poly1305-sse2-x86_64.S b/arch/x86/crypto/poly1305-sse2-x86_64.S
index e6add74d78a5..6f0be7a86964 100644
--- a/arch/x86/crypto/poly1305-sse2-x86_64.S
+++ b/arch/x86/crypto/poly1305-sse2-x86_64.S
@@ -253,16 +253,16 @@ ENTRY(poly1305_block_sse2)
# h0 += (d4 >> 26) * 5
mov d4,%rax
shr $26,%rax
- lea (%eax,%eax,4),%eax
- add %eax,%ebx
+ lea (%rax,%rax,4),%rax
+ add %rax,%rbx
# h4 = d4 & 0x3ffffff
mov d4,%rax
and $0x3ffffff,%eax
mov %eax,h4
# h1 += h0 >> 26
- mov %ebx,%eax
- shr $26,%eax
+ mov %rbx,%rax
+ shr $26,%rax
add %eax,h1
# h0 = h0 & 0x3ffffff
andl $0x3ffffff,%ebx
@@ -524,6 +524,12 @@ ENTRY(poly1305_2block_sse2)
paddq t2,t1
movq t1,d4
+ # Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
+ # h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
+ # amount. Careful: we must not assume the carry bits 'd0 >> 26',
+ # 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
+ # integers. It's true in a single-block implementation, but not here.
+
# d1 += d0 >> 26
mov d0,%rax
shr $26,%rax
@@ -562,16 +568,16 @@ ENTRY(poly1305_2block_sse2)
# h0 += (d4 >> 26) * 5
mov d4,%rax
shr $26,%rax
- lea (%eax,%eax,4),%eax
- add %eax,%ebx
+ lea (%rax,%rax,4),%rax
+ add %rax,%rbx
# h4 = d4 & 0x3ffffff
mov d4,%rax
and $0x3ffffff,%eax
mov %eax,h4
# h1 += h0 >> 26
- mov %ebx,%eax
- shr $26,%eax
+ mov %rbx,%rax
+ shr $26,%rax
add %eax,h1
# h0 = h0 & 0x3ffffff
andl $0x3ffffff,%ebx
diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
index 88cc01506c84..6eb65b237b3c 100644
--- a/arch/x86/crypto/poly1305_glue.c
+++ b/arch/x86/crypto/poly1305_glue.c
@@ -11,11 +11,11 @@
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/poly1305.h>
#include <linux/crypto.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <asm/fpu/api.h>
#include <asm/simd.h>
struct poly1305_simd_desc_ctx {
@@ -126,7 +126,7 @@ static int poly1305_simd_update(struct shash_desc *desc,
unsigned int bytes;
/* kernel_fpu_begin/end is costly, use fallback for small updates */
- if (srclen <= 288 || !may_use_simd())
+ if (srclen <= 288 || !crypto_simd_usable())
return crypto_poly1305_update(desc, src, srclen);
kernel_fpu_begin();
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
index 7391c7de72c7..42f177afc33a 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
@@ -22,6 +22,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
@@ -29,7 +30,7 @@
#include <linux/types.h>
#include <crypto/sha.h>
#include <crypto/sha1_base.h>
-#include <asm/fpu/api.h>
+#include <asm/simd.h>
typedef void (sha1_transform_fn)(u32 *digest, const char *data,
unsigned int rounds);
@@ -39,7 +40,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
{
struct sha1_state *sctx = shash_desc_ctx(desc);
- if (!irq_fpu_usable() ||
+ if (!crypto_simd_usable() ||
(sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
return crypto_sha1_update(desc, data, len);
@@ -57,7 +58,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
static int sha1_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out, sha1_transform_fn *sha1_xform)
{
- if (!irq_fpu_usable())
+ if (!crypto_simd_usable())
return crypto_sha1_finup(desc, data, len, out);
kernel_fpu_begin();
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
index 773a873d2b28..73867da3cbee 100644
--- a/arch/x86/crypto/sha256_ssse3_glue.c
+++ b/arch/x86/crypto/sha256_ssse3_glue.c
@@ -30,6 +30,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
@@ -37,8 +38,8 @@
#include <linux/types.h>
#include <crypto/sha.h>
#include <crypto/sha256_base.h>
-#include <asm/fpu/api.h>
#include <linux/string.h>
+#include <asm/simd.h>
asmlinkage void sha256_transform_ssse3(u32 *digest, const char *data,
u64 rounds);
@@ -49,7 +50,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
{
struct sha256_state *sctx = shash_desc_ctx(desc);
- if (!irq_fpu_usable() ||
+ if (!crypto_simd_usable() ||
(sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
return crypto_sha256_update(desc, data, len);
@@ -67,7 +68,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
static int sha256_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out, sha256_transform_fn *sha256_xform)
{
- if (!irq_fpu_usable())
+ if (!crypto_simd_usable())
return crypto_sha256_finup(desc, data, len, out);
kernel_fpu_begin();
diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
index f1b811b60ba6..458356a3f124 100644
--- a/arch/x86/crypto/sha512_ssse3_glue.c
+++ b/arch/x86/crypto/sha512_ssse3_glue.c
@@ -28,16 +28,16 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/cryptohash.h>
+#include <linux/string.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <crypto/sha512_base.h>
-#include <asm/fpu/api.h>
-
-#include <linux/string.h>
+#include <asm/simd.h>
asmlinkage void sha512_transform_ssse3(u64 *digest, const char *data,
u64 rounds);
@@ -49,7 +49,7 @@ static int sha512_update(struct shash_desc *desc, const u8 *data,
{
struct sha512_state *sctx = shash_desc_ctx(desc);
- if (!irq_fpu_usable() ||
+ if (!crypto_simd_usable() ||
(sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE)
return crypto_sha512_update(desc, data, len);
@@ -67,7 +67,7 @@ static int sha512_update(struct shash_desc *desc, const u8 *data,
static int sha512_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out, sha512_transform_fn *sha512_xform)
{
- if (!irq_fpu_usable())
+ if (!crypto_simd_usable())
return crypto_sha512_finup(desc, data, len, out);
kernel_fpu_begin();
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index d309f30cf7af..7b23431be5cb 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -650,6 +650,7 @@ ENTRY(__switch_to_asm)
pushl %ebx
pushl %edi
pushl %esi
+ pushfl
/* switch stack */
movl %esp, TASK_threadsp(%eax)
@@ -672,6 +673,7 @@ ENTRY(__switch_to_asm)
#endif
/* restore callee-saved registers */
+ popfl
popl %esi
popl %edi
popl %ebx
@@ -766,13 +768,12 @@ END(ret_from_exception)
#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
DISABLE_INTERRUPTS(CLBR_ANY)
-.Lneed_resched:
cmpl $0, PER_CPU_VAR(__preempt_count)
jnz restore_all_kernel
testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
jz restore_all_kernel
call preempt_schedule_irq
- jmp .Lneed_resched
+ jmp restore_all_kernel
END(resume_kernel)
#endif
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 1f0efdb7b629..20e45d9b4e15 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -298,7 +298,7 @@ ENTRY(__switch_to_asm)
#ifdef CONFIG_STACKPROTECTOR
movq TASK_stack_canary(%rsi), %rbx
- movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset
+ movq %rbx, PER_CPU_VAR(fixed_percpu_data) + stack_canary_offset
#endif
#ifdef CONFIG_RETPOLINE
@@ -430,8 +430,8 @@ END(irq_entries_start)
* it before we actually move ourselves to the IRQ stack.
*/
- movq \old_rsp, PER_CPU_VAR(irq_stack_union + IRQ_STACK_SIZE - 8)
- movq PER_CPU_VAR(irq_stack_ptr), %rsp
+ movq \old_rsp, PER_CPU_VAR(irq_stack_backing_store + IRQ_STACK_SIZE - 8)
+ movq PER_CPU_VAR(hardirq_stack_ptr), %rsp
#ifdef CONFIG_DEBUG_ENTRY
/*
@@ -645,10 +645,9 @@ retint_kernel:
/* Check if we need preemption */
btl $9, EFLAGS(%rsp) /* were interrupts off? */
jnc 1f
-0: cmpl $0, PER_CPU_VAR(__preempt_count)
+ cmpl $0, PER_CPU_VAR(__preempt_count)
jnz 1f
call preempt_schedule_irq
- jmp 0b
1:
#endif
/*
@@ -841,7 +840,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
/*
* Exception entry points.
*/
-#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8)
+#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + (x) * 8)
/**
* idtentry - Generate an IDT entry stub
@@ -879,7 +878,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
* @paranoid == 2 is special: the stub will never switch stacks. This is for
* #DF: if the thread stack is somehow unusable, we'll still get a useful OOPS.
*/
-.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
+.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 ist_offset=0
ENTRY(\sym)
UNWIND_HINT_IRET_REGS offset=\has_error_code*8
@@ -925,13 +924,13 @@ ENTRY(\sym)
.endif
.if \shift_ist != -1
- subq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
+ subq $\ist_offset, CPU_TSS_IST(\shift_ist)
.endif
call \do_sym
.if \shift_ist != -1
- addq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
+ addq $\ist_offset, CPU_TSS_IST(\shift_ist)
.endif
/* these procedures expect "no swapgs" flag in ebx */
@@ -1129,7 +1128,7 @@ apicinterrupt3 HYPERV_STIMER0_VECTOR \
hv_stimer0_callback_vector hv_stimer0_vector_handler
#endif /* CONFIG_HYPERV */
-idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
+idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=IST_INDEX_DB ist_offset=DB_STACK_OFFSET
idtentry int3 do_int3 has_error_code=0
idtentry stack_segment do_stack_segment has_error_code=1
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index 8da78595d69d..1f9607ed087c 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -429,6 +429,7 @@
421 i386 rt_sigtimedwait_time64 sys_rt_sigtimedwait __ia32_compat_sys_rt_sigtimedwait_time64
422 i386 futex_time64 sys_futex __ia32_sys_futex
423 i386 sched_rr_get_interval_time64 sys_sched_rr_get_interval __ia32_sys_sched_rr_get_interval
+424 i386 pidfd_send_signal sys_pidfd_send_signal __ia32_sys_pidfd_send_signal
425 i386 io_uring_setup sys_io_uring_setup __ia32_sys_io_uring_setup
426 i386 io_uring_enter sys_io_uring_enter __ia32_sys_io_uring_enter
427 i386 io_uring_register sys_io_uring_register __ia32_sys_io_uring_register
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index c768447f97ec..92ee0b4378d4 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -345,6 +345,7 @@
334 common rseq __x64_sys_rseq
# don't use numbers 387 through 423, add new calls after the last
# 'common' entry
+424 common pidfd_send_signal __x64_sys_pidfd_send_signal
425 common io_uring_setup __x64_sys_io_uring_setup
426 common io_uring_enter __x64_sys_io_uring_enter
427 common io_uring_register __x64_sys_io_uring_register
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index 5bfe2243a08f..42fe42e82baf 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -116,7 +116,7 @@ $(obj)/%-x32.o: $(obj)/%.o FORCE
targets += vdsox32.lds $(vobjx32s-y)
$(obj)/%.so: OBJCOPYFLAGS := -S
-$(obj)/%.so: $(obj)/%.so.dbg
+$(obj)/%.so: $(obj)/%.so.dbg FORCE
$(call if_changed,objcopy)
$(obj)/vdsox32.so.dbg: $(obj)/vdsox32.lds $(vobjx32s) FORCE
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index 007b3fe9d727..98c7d12b945c 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -29,12 +29,12 @@ extern int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz);
extern time_t __vdso_time(time_t *t);
#ifdef CONFIG_PARAVIRT_CLOCK
-extern u8 pvclock_page
+extern u8 pvclock_page[PAGE_SIZE]
__attribute__((visibility("hidden")));
#endif
#ifdef CONFIG_HYPERV_TSCPAGE
-extern u8 hvclock_page
+extern u8 hvclock_page[PAGE_SIZE]
__attribute__((visibility("hidden")));
#endif
diff --git a/arch/x86/entry/vdso/vdso2c.h b/arch/x86/entry/vdso/vdso2c.h
index fa847a620f40..a20b134de2a8 100644
--- a/arch/x86/entry/vdso/vdso2c.h
+++ b/arch/x86/entry/vdso/vdso2c.h
@@ -7,7 +7,7 @@
static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
void *stripped_addr, size_t stripped_len,
- FILE *outfile, const char *name)
+ FILE *outfile, const char *image_name)
{
int found_load = 0;
unsigned long load_size = -1; /* Work around bogus warning */
@@ -93,11 +93,12 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
int k;
ELF(Sym) *sym = raw_addr + GET_LE(&symtab_hdr->sh_offset) +
GET_LE(&symtab_hdr->sh_entsize) * i;
- const char *name = raw_addr + GET_LE(&strtab_hdr->sh_offset) +
- GET_LE(&sym->st_name);
+ const char *sym_name = raw_addr +
+ GET_LE(&strtab_hdr->sh_offset) +
+ GET_LE(&sym->st_name);
for (k = 0; k < NSYMS; k++) {
- if (!strcmp(name, required_syms[k].name)) {
+ if (!strcmp(sym_name, required_syms[k].name)) {
if (syms[k]) {
fail("duplicate symbol %s\n",
required_syms[k].name);
@@ -134,7 +135,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
if (syms[sym_vvar_start] % 4096)
fail("vvar_begin must be a multiple of 4096\n");
- if (!name) {
+ if (!image_name) {
fwrite(stripped_addr, stripped_len, 1, outfile);
return;
}
@@ -157,7 +158,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
}
fprintf(outfile, "\n};\n\n");
- fprintf(outfile, "const struct vdso_image %s = {\n", name);
+ fprintf(outfile, "const struct vdso_image %s = {\n", image_name);
fprintf(outfile, "\t.data = raw_data,\n");
fprintf(outfile, "\t.size = %lu,\n", mapping_size);
if (alt_sec) {
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index 7d2d7c801dba..f15441b07dad 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -3,10 +3,14 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/delay.h>
#include <asm/apicdef.h>
+#include <asm/nmi.h>
#include "../perf_event.h"
+static DEFINE_PER_CPU(unsigned int, perf_nmi_counter);
+
static __initconst const u64 amd_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
@@ -112,23 +116,144 @@ static __initconst const u64 amd_hw_cache_event_ids
},
};
+static __initconst const u64 amd_hw_cache_event_ids_f17h
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0x0040, /* Data Cache Accesses */
+ [C(RESULT_MISS)] = 0xc860, /* L2$ access from DC Miss */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS)] = 0,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = 0xff5a, /* h/w prefetch DC Fills */
+ [C(RESULT_MISS)] = 0,
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0x0080, /* Instruction cache fetches */
+ [C(RESULT_MISS)] = 0x0081, /* Instruction cache misses */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS)] = 0,
+ },
+},
+[C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS)] = 0,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS)] = 0,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS)] = 0,
+ },
+},
+[C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0xff45, /* All L2 DTLB accesses */
+ [C(RESULT_MISS)] = 0xf045, /* L2 DTLB misses (PT walks) */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS)] = 0,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS)] = 0,
+ },
+},
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0x0084, /* L1 ITLB misses, L2 ITLB hits */
+ [C(RESULT_MISS)] = 0xff85, /* L1 ITLB misses, L2 misses */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+},
+[C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0x00c2, /* Retired Branch Instr. */
+ [C(RESULT_MISS)] = 0x00c3, /* Retired Mispredicted BI */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+},
+[C(NODE)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS)] = 0,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+},
+};
+
/*
- * AMD Performance Monitor K7 and later.
+ * AMD Performance Monitor K7 and later, up to and including Family 16h:
*/
static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
{
- [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
- [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
- [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
- [PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
- [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
- [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
- [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
+ [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
+};
+
+/*
+ * AMD Performance Monitor Family 17h and later:
+ */
+static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
+{
+ [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x0187,
};
static u64 amd_pmu_event_map(int hw_event)
{
+ if (boot_cpu_data.x86 >= 0x17)
+ return amd_f17h_perfmon_event_map[hw_event];
+
return amd_perfmon_event_map[hw_event];
}
@@ -429,6 +554,132 @@ static void amd_pmu_cpu_dead(int cpu)
}
}
+/*
+ * When a PMC counter overflows, an NMI is used to process the event and
+ * reset the counter. NMI latency can result in the counter being updated
+ * before the NMI can run, which can result in what appear to be spurious
+ * NMIs. This function is intended to wait for the NMI to run and reset
+ * the counter to avoid possible unhandled NMI messages.
+ */
+#define OVERFLOW_WAIT_COUNT 50
+
+static void amd_pmu_wait_on_overflow(int idx)
+{
+ unsigned int i;
+ u64 counter;
+
+ /*
+ * Wait for the counter to be reset if it has overflowed. This loop
+ * should exit very, very quickly, but just in case, don't wait
+ * forever...
+ */
+ for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) {
+ rdmsrl(x86_pmu_event_addr(idx), counter);
+ if (counter & (1ULL << (x86_pmu.cntval_bits - 1)))
+ break;
+
+ /* Might be in IRQ context, so can't sleep */
+ udelay(1);
+ }
+}
+
+static void amd_pmu_disable_all(void)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ int idx;
+
+ x86_pmu_disable_all();
+
+ /*
+ * This shouldn't be called from NMI context, but add a safeguard here
+ * to return, since if we're in NMI context we can't wait for an NMI
+ * to reset an overflowed counter value.
+ */
+ if (in_nmi())
+ return;
+
+ /*
+ * Check each counter for overflow and wait for it to be reset by the
+ * NMI if it has overflowed. This relies on the fact that all active
+ * counters are always enabled when this function is caled and
+ * ARCH_PERFMON_EVENTSEL_INT is always set.
+ */
+ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ if (!test_bit(idx, cpuc->active_mask))
+ continue;
+
+ amd_pmu_wait_on_overflow(idx);
+ }
+}
+
+static void amd_pmu_disable_event(struct perf_event *event)
+{
+ x86_pmu_disable_event(event);
+
+ /*
+ * This can be called from NMI context (via x86_pmu_stop). The counter
+ * may have overflowed, but either way, we'll never see it get reset
+ * by the NMI if we're already in the NMI. And the NMI latency support
+ * below will take care of any pending NMI that might have been
+ * generated by the overflow.
+ */
+ if (in_nmi())
+ return;
+
+ amd_pmu_wait_on_overflow(event->hw.idx);
+}
+
+/*
+ * Because of NMI latency, if multiple PMC counters are active or other sources
+ * of NMIs are received, the perf NMI handler can handle one or more overflowed
+ * PMC counters outside of the NMI associated with the PMC overflow. If the NMI
+ * doesn't arrive at the LAPIC in time to become a pending NMI, then the kernel
+ * back-to-back NMI support won't be active. This PMC handler needs to take into
+ * account that this can occur, otherwise this could result in unknown NMI
+ * messages being issued. Examples of this is PMC overflow while in the NMI
+ * handler when multiple PMCs are active or PMC overflow while handling some
+ * other source of an NMI.
+ *
+ * Attempt to mitigate this by using the number of active PMCs to determine
+ * whether to return NMI_HANDLED if the perf NMI handler did not handle/reset
+ * any PMCs. The per-CPU perf_nmi_counter variable is set to a minimum of the
+ * number of active PMCs or 2. The value of 2 is used in case an NMI does not
+ * arrive at the LAPIC in time to be collapsed into an already pending NMI.
+ */
+static int amd_pmu_handle_irq(struct pt_regs *regs)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ int active, handled;
+
+ /*
+ * Obtain the active count before calling x86_pmu_handle_irq() since
+ * it is possible that x86_pmu_handle_irq() may make a counter
+ * inactive (through x86_pmu_stop).
+ */
+ active = __bitmap_weight(cpuc->active_mask, X86_PMC_IDX_MAX);
+
+ /* Process any counter overflows */
+ handled = x86_pmu_handle_irq(regs);
+
+ /*
+ * If a counter was handled, record the number of possible remaining
+ * NMIs that can occur.
+ */
+ if (handled) {
+ this_cpu_write(perf_nmi_counter,
+ min_t(unsigned int, 2, active));
+
+ return handled;
+ }
+
+ if (!this_cpu_read(perf_nmi_counter))
+ return NMI_DONE;
+
+ this_cpu_dec(perf_nmi_counter);
+
+ return NMI_HANDLED;
+}
+
static struct event_constraint *
amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
@@ -621,11 +872,11 @@ static ssize_t amd_event_sysfs_show(char *page, u64 config)
static __initconst const struct x86_pmu amd_pmu = {
.name = "AMD",
- .handle_irq = x86_pmu_handle_irq,
- .disable_all = x86_pmu_disable_all,
+ .handle_irq = amd_pmu_handle_irq,
+ .disable_all = amd_pmu_disable_all,
.enable_all = x86_pmu_enable_all,
.enable = x86_pmu_enable_event,
- .disable = x86_pmu_disable_event,
+ .disable = amd_pmu_disable_event,
.hw_config = amd_pmu_hw_config,
.schedule_events = x86_schedule_events,
.eventsel = MSR_K7_EVNTSEL0,
@@ -718,9 +969,10 @@ __init int amd_pmu_init(void)
x86_pmu.amd_nb_constraints = 0;
}
- /* Events are common for all AMDs */
- memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
- sizeof(hw_cache_event_ids));
+ if (boot_cpu_data.x86 >= 0x17)
+ memcpy(hw_cache_event_ids, amd_hw_cache_event_ids_f17h, sizeof(hw_cache_event_ids));
+ else
+ memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, sizeof(hw_cache_event_ids));
return 0;
}
@@ -732,7 +984,7 @@ void amd_pmu_enable_virt(void)
cpuc->perf_ctr_virt_mask = 0;
/* Reload all events */
- x86_pmu_disable_all();
+ amd_pmu_disable_all();
x86_pmu_enable_all(0);
}
EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
@@ -750,7 +1002,7 @@ void amd_pmu_disable_virt(void)
cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
/* Reload all events */
- x86_pmu_disable_all();
+ amd_pmu_disable_all();
x86_pmu_enable_all(0);
}
EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index b684f0294f35..f315425d8468 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -560,6 +560,21 @@ int x86_pmu_hw_config(struct perf_event *event)
return -EINVAL;
}
+ /* sample_regs_user never support XMM registers */
+ if (unlikely(event->attr.sample_regs_user & PEBS_XMM_REGS))
+ return -EINVAL;
+ /*
+ * Besides the general purpose registers, XMM registers may
+ * be collected in PEBS on some platforms, e.g. Icelake
+ */
+ if (unlikely(event->attr.sample_regs_intr & PEBS_XMM_REGS)) {
+ if (x86_pmu.pebs_no_xmm_regs)
+ return -EINVAL;
+
+ if (!event->attr.precise_ip)
+ return -EINVAL;
+ }
+
return x86_setup_perfctr(event);
}
@@ -661,6 +676,10 @@ static inline int is_x86_event(struct perf_event *event)
return event->pmu == &pmu;
}
+struct pmu *x86_get_pmu(void)
+{
+ return &pmu;
+}
/*
* Event scheduler state:
*
@@ -849,18 +868,43 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
struct event_constraint *c;
unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
struct perf_event *e;
- int i, wmin, wmax, unsched = 0;
+ int n0, i, wmin, wmax, unsched = 0;
struct hw_perf_event *hwc;
bitmap_zero(used_mask, X86_PMC_IDX_MAX);
+ /*
+ * Compute the number of events already present; see x86_pmu_add(),
+ * validate_group() and x86_pmu_commit_txn(). For the former two
+ * cpuc->n_events hasn't been updated yet, while for the latter
+ * cpuc->n_txn contains the number of events added in the current
+ * transaction.
+ */
+ n0 = cpuc->n_events;
+ if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
+ n0 -= cpuc->n_txn;
+
if (x86_pmu.start_scheduling)
x86_pmu.start_scheduling(cpuc);
for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
- cpuc->event_constraint[i] = NULL;
- c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]);
- cpuc->event_constraint[i] = c;
+ c = cpuc->event_constraint[i];
+
+ /*
+ * Previously scheduled events should have a cached constraint,
+ * while new events should not have one.
+ */
+ WARN_ON_ONCE((c && i >= n0) || (!c && i < n0));
+
+ /*
+ * Request constraints for new events; or for those events that
+ * have a dynamic constraint -- for those the constraint can
+ * change due to external factors (sibling state, allow_tfa).
+ */
+ if (!c || (c->flags & PERF_X86_EVENT_DYNAMIC)) {
+ c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]);
+ cpuc->event_constraint[i] = c;
+ }
wmin = min(wmin, c->weight);
wmax = max(wmax, c->weight);
@@ -925,25 +969,20 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
if (!unsched && assign) {
for (i = 0; i < n; i++) {
e = cpuc->event_list[i];
- e->hw.flags |= PERF_X86_EVENT_COMMITTED;
if (x86_pmu.commit_scheduling)
x86_pmu.commit_scheduling(cpuc, i, assign[i]);
}
} else {
- for (i = 0; i < n; i++) {
+ for (i = n0; i < n; i++) {
e = cpuc->event_list[i];
- /*
- * do not put_constraint() on comitted events,
- * because they are good to go
- */
- if ((e->hw.flags & PERF_X86_EVENT_COMMITTED))
- continue;
/*
* release events that failed scheduling
*/
if (x86_pmu.put_event_constraints)
x86_pmu.put_event_constraints(cpuc, e);
+
+ cpuc->event_constraint[i] = NULL;
}
}
@@ -1349,8 +1388,9 @@ void x86_pmu_stop(struct perf_event *event, int flags)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
- if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
+ if (test_bit(hwc->idx, cpuc->active_mask)) {
x86_pmu.disable(event);
+ __clear_bit(hwc->idx, cpuc->active_mask);
cpuc->events[hwc->idx] = NULL;
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
hwc->state |= PERF_HES_STOPPED;
@@ -1372,11 +1412,6 @@ static void x86_pmu_del(struct perf_event *event, int flags)
int i;
/*
- * event is descheduled
- */
- event->hw.flags &= ~PERF_X86_EVENT_COMMITTED;
-
- /*
* If we're called during a txn, we only need to undo x86_pmu.add.
* The events never got scheduled and ->cancel_txn will truncate
* the event_list.
@@ -1412,6 +1447,7 @@ static void x86_pmu_del(struct perf_event *event, int flags)
cpuc->event_list[i-1] = cpuc->event_list[i];
cpuc->event_constraint[i-1] = cpuc->event_constraint[i];
}
+ cpuc->event_constraint[i-1] = NULL;
--cpuc->n_events;
perf_event_update_userpage(event);
@@ -1447,16 +1483,8 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
apic_write(APIC_LVTPC, APIC_DM_NMI);
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
- if (!test_bit(idx, cpuc->active_mask)) {
- /*
- * Though we deactivated the counter some cpus
- * might still deliver spurious interrupts still
- * in flight. Catch them:
- */
- if (__test_and_clear_bit(idx, cpuc->running))
- handled++;
+ if (!test_bit(idx, cpuc->active_mask))
continue;
- }
event = cpuc->events[idx];
@@ -1995,7 +2023,7 @@ static int x86_pmu_commit_txn(struct pmu *pmu)
*/
static void free_fake_cpuc(struct cpu_hw_events *cpuc)
{
- kfree(cpuc->shared_regs);
+ intel_cpuc_finish(cpuc);
kfree(cpuc);
}
@@ -2007,14 +2035,11 @@ static struct cpu_hw_events *allocate_fake_cpuc(void)
cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
if (!cpuc)
return ERR_PTR(-ENOMEM);
-
- /* only needed, if we have extra_regs */
- if (x86_pmu.extra_regs) {
- cpuc->shared_regs = allocate_shared_regs(cpu);
- if (!cpuc->shared_regs)
- goto error;
- }
cpuc->is_fake = 1;
+
+ if (intel_cpuc_prepare(cpuc, cpu))
+ goto error;
+
return cpuc;
error:
free_fake_cpuc(cpuc);
@@ -2034,7 +2059,7 @@ static int validate_event(struct perf_event *event)
if (IS_ERR(fake_cpuc))
return PTR_ERR(fake_cpuc);
- c = x86_pmu.get_event_constraints(fake_cpuc, -1, event);
+ c = x86_pmu.get_event_constraints(fake_cpuc, 0, event);
if (!c || !c->weight)
ret = -EINVAL;
@@ -2082,8 +2107,7 @@ static int validate_group(struct perf_event *event)
if (n < 0)
goto out;
- fake_cpuc->n_events = n;
-
+ fake_cpuc->n_events = 0;
ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
out:
@@ -2358,6 +2382,15 @@ void arch_perf_update_userpage(struct perf_event *event,
cyc2ns_read_end();
}
+/*
+ * Determine whether the regs were taken from an irq/exception handler rather
+ * than from perf_arch_fetch_caller_regs().
+ */
+static bool perf_hw_regs(struct pt_regs *regs)
+{
+ return regs->flags & X86_EFLAGS_FIXED;
+}
+
void
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
@@ -2369,11 +2402,15 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
return;
}
- if (perf_callchain_store(entry, regs->ip))
- return;
+ if (perf_hw_regs(regs)) {
+ if (perf_callchain_store(entry, regs->ip))
+ return;
+ unwind_start(&state, current, regs, NULL);
+ } else {
+ unwind_start(&state, current, NULL, (void *)regs->sp);
+ }
- for (unwind_start(&state, current, regs, NULL); !unwind_done(&state);
- unwind_next_frame(&state)) {
+ for (; !unwind_done(&state); unwind_next_frame(&state)) {
addr = unwind_get_return_address(&state);
if (!addr || perf_callchain_store(entry, addr))
return;
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 7ec265bacb6a..ef763f535e3a 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -239,6 +239,35 @@ static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
EVENT_EXTRA_END
};
+static struct event_constraint intel_icl_event_constraints[] = {
+ FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
+ INTEL_UEVENT_CONSTRAINT(0x1c0, 0), /* INST_RETIRED.PREC_DIST */
+ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
+ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
+ FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
+ INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf),
+ INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf),
+ INTEL_EVENT_CONSTRAINT(0x32, 0xf), /* SW_PREFETCH_ACCESS.* */
+ INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x54, 0xf),
+ INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf),
+ INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff), /* CYCLE_ACTIVITY.STALLS_TOTAL */
+ INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.STALLS_MEM_ANY */
+ INTEL_EVENT_CONSTRAINT(0xa3, 0xf), /* CYCLE_ACTIVITY.* */
+ INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
+ INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
+ INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
+ INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
+ EVENT_CONSTRAINT_END
+};
+
+static struct extra_reg intel_icl_extra_regs[] __read_mostly = {
+ INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff9fffull, RSP_0),
+ INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff9fffull, RSP_1),
+ INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
+ INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
+ EVENT_EXTRA_END
+};
+
EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
@@ -1827,6 +1856,45 @@ static __initconst const u64 glp_hw_cache_extra_regs
},
};
+#define TNT_LOCAL_DRAM BIT_ULL(26)
+#define TNT_DEMAND_READ GLM_DEMAND_DATA_RD
+#define TNT_DEMAND_WRITE GLM_DEMAND_RFO
+#define TNT_LLC_ACCESS GLM_ANY_RESPONSE
+#define TNT_SNP_ANY (SNB_SNP_NOT_NEEDED|SNB_SNP_MISS| \
+ SNB_NO_FWD|SNB_SNP_FWD|SNB_HITM)
+#define TNT_LLC_MISS (TNT_SNP_ANY|SNB_NON_DRAM|TNT_LOCAL_DRAM)
+
+static __initconst const u64 tnt_hw_cache_extra_regs
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ [C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = TNT_DEMAND_READ|
+ TNT_LLC_ACCESS,
+ [C(RESULT_MISS)] = TNT_DEMAND_READ|
+ TNT_LLC_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = TNT_DEMAND_WRITE|
+ TNT_LLC_ACCESS,
+ [C(RESULT_MISS)] = TNT_DEMAND_WRITE|
+ TNT_LLC_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = 0x0,
+ [C(RESULT_MISS)] = 0x0,
+ },
+ },
+};
+
+static struct extra_reg intel_tnt_extra_regs[] __read_mostly = {
+ /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
+ INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffffff9fffull, RSP_0),
+ INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xffffff9fffull, RSP_1),
+ EVENT_EXTRA_END
+};
+
#define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */
#define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */
#define KNL_MCDRAM_LOCAL BIT_ULL(21)
@@ -2000,6 +2068,39 @@ static void intel_pmu_nhm_enable_all(int added)
intel_pmu_enable_all(added);
}
+static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
+{
+ u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
+
+ if (cpuc->tfa_shadow != val) {
+ cpuc->tfa_shadow = val;
+ wrmsrl(MSR_TSX_FORCE_ABORT, val);
+ }
+}
+
+static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
+{
+ /*
+ * We're going to use PMC3, make sure TFA is set before we touch it.
+ */
+ if (cntr == 3)
+ intel_set_tfa(cpuc, true);
+}
+
+static void intel_tfa_pmu_enable_all(int added)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+ /*
+ * If we find PMC3 is no longer used when we enable the PMU, we can
+ * clear TFA.
+ */
+ if (!test_bit(3, cpuc->active_mask))
+ intel_set_tfa(cpuc, false);
+
+ intel_pmu_enable_all(added);
+}
+
static void enable_counter_freeze(void)
{
update_debugctlmsr(get_debugctlmsr() |
@@ -2058,15 +2159,19 @@ static void intel_pmu_disable_event(struct perf_event *event)
cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
cpuc->intel_cp_status &= ~(1ull << hwc->idx);
- if (unlikely(event->attr.precise_ip))
- intel_pmu_pebs_disable(event);
-
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
intel_pmu_disable_fixed(hwc);
return;
}
x86_pmu_disable_event(event);
+
+ /*
+ * Needs to be called after x86_pmu_disable_event,
+ * so we don't trigger the event without PEBS bit set.
+ */
+ if (unlikely(event->attr.precise_ip))
+ intel_pmu_pebs_disable(event);
}
static void intel_pmu_del_event(struct perf_event *event)
@@ -2112,6 +2217,11 @@ static void intel_pmu_enable_fixed(struct perf_event *event)
bits <<= (idx * 4);
mask = 0xfULL << (idx * 4);
+ if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) {
+ bits |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
+ mask |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
+ }
+
rdmsrl(hwc->config_base, ctrl_val);
ctrl_val &= ~mask;
ctrl_val |= bits;
@@ -2655,7 +2765,7 @@ x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
if (x86_pmu.event_constraints) {
for_each_event_constraint(c, x86_pmu.event_constraints) {
- if ((event->hw.config & c->cmask) == c->code) {
+ if (constraint_match(c, event->hw.config)) {
event->hw.flags |= c->flags;
return c;
}
@@ -2770,13 +2880,42 @@ intel_stop_scheduling(struct cpu_hw_events *cpuc)
}
static struct event_constraint *
+dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
+{
+ WARN_ON_ONCE(!cpuc->constraint_list);
+
+ if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
+ struct event_constraint *cx;
+
+ /*
+ * grab pre-allocated constraint entry
+ */
+ cx = &cpuc->constraint_list[idx];
+
+ /*
+ * initialize dynamic constraint
+ * with static constraint
+ */
+ *cx = *c;
+
+ /*
+ * mark constraint as dynamic
+ */
+ cx->flags |= PERF_X86_EVENT_DYNAMIC;
+ c = cx;
+ }
+
+ return c;
+}
+
+static struct event_constraint *
intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
int idx, struct event_constraint *c)
{
struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
struct intel_excl_states *xlo;
int tid = cpuc->excl_thread_id;
- int is_excl, i;
+ int is_excl, i, w;
/*
* validating a group does not require
@@ -2799,27 +2938,7 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
* only needed when constraint has not yet
* been cloned (marked dynamic)
*/
- if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
- struct event_constraint *cx;
-
- /*
- * grab pre-allocated constraint entry
- */
- cx = &cpuc->constraint_list[idx];
-
- /*
- * initialize dynamic constraint
- * with static constraint
- */
- *cx = *c;
-
- /*
- * mark constraint as dynamic, so we
- * can free it later on
- */
- cx->flags |= PERF_X86_EVENT_DYNAMIC;
- c = cx;
- }
+ c = dyn_constraint(cpuc, c, idx);
/*
* From here on, the constraint is dynamic.
@@ -2852,36 +2971,40 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
* SHARED : sibling counter measuring non-exclusive event
* UNUSED : sibling counter unused
*/
+ w = c->weight;
for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) {
/*
* exclusive event in sibling counter
* our corresponding counter cannot be used
* regardless of our event
*/
- if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE)
+ if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE) {
__clear_bit(i, c->idxmsk);
+ w--;
+ continue;
+ }
/*
* if measuring an exclusive event, sibling
* measuring non-exclusive, then counter cannot
* be used
*/
- if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED)
+ if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED) {
__clear_bit(i, c->idxmsk);
+ w--;
+ continue;
+ }
}
/*
- * recompute actual bit weight for scheduling algorithm
- */
- c->weight = hweight64(c->idxmsk64);
-
- /*
* if we return an empty mask, then switch
* back to static empty constraint to avoid
* the cost of freeing later on
*/
- if (c->weight == 0)
+ if (!w)
c = &emptyconstraint;
+ c->weight = w;
+
return c;
}
@@ -2889,11 +3012,9 @@ static struct event_constraint *
intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
{
- struct event_constraint *c1 = NULL;
- struct event_constraint *c2;
+ struct event_constraint *c1, *c2;
- if (idx >= 0) /* fake does < 0 */
- c1 = cpuc->event_constraint[idx];
+ c1 = cpuc->event_constraint[idx];
/*
* first time only
@@ -2901,7 +3022,8 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
* - dynamic constraint: handled by intel_get_excl_constraints()
*/
c2 = __intel_get_event_constraints(cpuc, idx, event);
- if (c1 && (c1->flags & PERF_X86_EVENT_DYNAMIC)) {
+ if (c1) {
+ WARN_ON_ONCE(!(c1->flags & PERF_X86_EVENT_DYNAMIC));
bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
c1->weight = c2->weight;
c2 = c1;
@@ -3089,7 +3211,7 @@ static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
flags &= ~PERF_SAMPLE_TIME;
if (!event->attr.exclude_kernel)
flags &= ~PERF_SAMPLE_REGS_USER;
- if (event->attr.sample_regs_user & ~PEBS_REGS)
+ if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
return flags;
}
@@ -3143,7 +3265,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
return ret;
if (event->attr.precise_ip) {
- if (!event->attr.freq) {
+ if (!(event->attr.freq || event->attr.wakeup_events)) {
event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
if (!(event->attr.sample_type &
~intel_pmu_large_pebs_flags(event)))
@@ -3324,6 +3446,12 @@ static struct event_constraint counter0_constraint =
static struct event_constraint counter2_constraint =
EVENT_CONSTRAINT(0, 0x4, 0);
+static struct event_constraint fixed0_constraint =
+ FIXED_EVENT_CONSTRAINT(0x00c0, 0);
+
+static struct event_constraint fixed0_counter0_constraint =
+ INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000001ULL);
+
static struct event_constraint *
hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
@@ -3343,6 +3471,21 @@ hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
}
static struct event_constraint *
+icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+ struct perf_event *event)
+{
+ /*
+ * Fixed counter 0 has less skid.
+ * Force instruction:ppp in Fixed counter 0
+ */
+ if ((event->attr.precise_ip == 3) &&
+ constraint_match(&fixed0_constraint, event->hw.config))
+ return &fixed0_constraint;
+
+ return hsw_get_event_constraints(cpuc, idx, event);
+}
+
+static struct event_constraint *
glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
{
@@ -3357,6 +3500,49 @@ glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
return c;
}
+static struct event_constraint *
+tnt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+ struct perf_event *event)
+{
+ struct event_constraint *c;
+
+ /*
+ * :ppp means to do reduced skid PEBS,
+ * which is available on PMC0 and fixed counter 0.
+ */
+ if (event->attr.precise_ip == 3) {
+ /* Force instruction:ppp on PMC0 and Fixed counter 0 */
+ if (constraint_match(&fixed0_constraint, event->hw.config))
+ return &fixed0_counter0_constraint;
+
+ return &counter0_constraint;
+ }
+
+ c = intel_get_event_constraints(cpuc, idx, event);
+
+ return c;
+}
+
+static bool allow_tsx_force_abort = true;
+
+static struct event_constraint *
+tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+ struct perf_event *event)
+{
+ struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
+
+ /*
+ * Without TFA we must not use PMC3.
+ */
+ if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
+ c = dyn_constraint(cpuc, c, idx);
+ c->idxmsk64 &= ~(1ULL << 3);
+ c->weight--;
+ }
+
+ return c;
+}
+
/*
* Broadwell:
*
@@ -3410,7 +3596,7 @@ ssize_t intel_event_sysfs_show(char *page, u64 config)
return x86_event_sysfs_show(page, config, event);
}
-struct intel_shared_regs *allocate_shared_regs(int cpu)
+static struct intel_shared_regs *allocate_shared_regs(int cpu)
{
struct intel_shared_regs *regs;
int i;
@@ -3442,9 +3628,10 @@ static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
return c;
}
-static int intel_pmu_cpu_prepare(int cpu)
+
+int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
{
- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+ cpuc->pebs_record_size = x86_pmu.pebs_record_size;
if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
cpuc->shared_regs = allocate_shared_regs(cpu);
@@ -3452,13 +3639,15 @@ static int intel_pmu_cpu_prepare(int cpu)
goto err;
}
- if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
+ if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
- cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
+ cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
if (!cpuc->constraint_list)
goto err_shared_regs;
+ }
+ if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
if (!cpuc->excl_cntrs)
goto err_constraint_list;
@@ -3480,6 +3669,11 @@ err:
return -ENOMEM;
}
+static int intel_pmu_cpu_prepare(int cpu)
+{
+ return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
+}
+
static void flip_smm_bit(void *data)
{
unsigned long set = *(unsigned long *)data;
@@ -3507,6 +3701,12 @@ static void intel_pmu_cpu_starting(int cpu)
cpuc->lbr_sel = NULL;
+ if (x86_pmu.flags & PMU_FL_TFA) {
+ WARN_ON_ONCE(cpuc->tfa_shadow);
+ cpuc->tfa_shadow = ~0ULL;
+ intel_set_tfa(cpuc, false);
+ }
+
if (x86_pmu.version > 1)
flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
@@ -3554,9 +3754,8 @@ static void intel_pmu_cpu_starting(int cpu)
}
}
-static void free_excl_cntrs(int cpu)
+static void free_excl_cntrs(struct cpu_hw_events *cpuc)
{
- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
struct intel_excl_cntrs *c;
c = cpuc->excl_cntrs;
@@ -3564,9 +3763,10 @@ static void free_excl_cntrs(int cpu)
if (c->core_id == -1 || --c->refcnt == 0)
kfree(c);
cpuc->excl_cntrs = NULL;
- kfree(cpuc->constraint_list);
- cpuc->constraint_list = NULL;
}
+
+ kfree(cpuc->constraint_list);
+ cpuc->constraint_list = NULL;
}
static void intel_pmu_cpu_dying(int cpu)
@@ -3577,9 +3777,8 @@ static void intel_pmu_cpu_dying(int cpu)
disable_counter_freeze();
}
-static void intel_pmu_cpu_dead(int cpu)
+void intel_cpuc_finish(struct cpu_hw_events *cpuc)
{
- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
struct intel_shared_regs *pc;
pc = cpuc->shared_regs;
@@ -3589,7 +3788,12 @@ static void intel_pmu_cpu_dead(int cpu)
cpuc->shared_regs = NULL;
}
- free_excl_cntrs(cpu);
+ free_excl_cntrs(cpuc);
+}
+
+static void intel_pmu_cpu_dead(int cpu)
+{
+ intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
}
static void intel_pmu_sched_task(struct perf_event_context *ctx,
@@ -4036,6 +4240,42 @@ static struct attribute *hsw_tsx_events_attrs[] = {
NULL
};
+EVENT_ATTR_STR(tx-capacity-read, tx_capacity_read, "event=0x54,umask=0x80");
+EVENT_ATTR_STR(tx-capacity-write, tx_capacity_write, "event=0x54,umask=0x2");
+EVENT_ATTR_STR(el-capacity-read, el_capacity_read, "event=0x54,umask=0x80");
+EVENT_ATTR_STR(el-capacity-write, el_capacity_write, "event=0x54,umask=0x2");
+
+static struct attribute *icl_events_attrs[] = {
+ EVENT_PTR(mem_ld_hsw),
+ EVENT_PTR(mem_st_hsw),
+ NULL,
+};
+
+static struct attribute *icl_tsx_events_attrs[] = {
+ EVENT_PTR(tx_start),
+ EVENT_PTR(tx_abort),
+ EVENT_PTR(tx_commit),
+ EVENT_PTR(tx_capacity_read),
+ EVENT_PTR(tx_capacity_write),
+ EVENT_PTR(tx_conflict),
+ EVENT_PTR(el_start),
+ EVENT_PTR(el_abort),
+ EVENT_PTR(el_commit),
+ EVENT_PTR(el_capacity_read),
+ EVENT_PTR(el_capacity_write),
+ EVENT_PTR(el_conflict),
+ EVENT_PTR(cycles_t),
+ EVENT_PTR(cycles_ct),
+ NULL,
+};
+
+static __init struct attribute **get_icl_events_attrs(void)
+{
+ return boot_cpu_has(X86_FEATURE_RTM) ?
+ merge_attr(icl_events_attrs, icl_tsx_events_attrs) :
+ icl_events_attrs;
+}
+
static ssize_t freeze_on_smi_show(struct device *cdev,
struct device_attribute *attr,
char *buf)
@@ -4075,6 +4315,50 @@ done:
return count;
}
+static void update_tfa_sched(void *ignored)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+ /*
+ * check if PMC3 is used
+ * and if so force schedule out for all event types all contexts
+ */
+ if (test_bit(3, cpuc->active_mask))
+ perf_pmu_resched(x86_get_pmu());
+}
+
+static ssize_t show_sysctl_tfa(struct device *cdev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, 40, "%d\n", allow_tsx_force_abort);
+}
+
+static ssize_t set_sysctl_tfa(struct device *cdev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ bool val;
+ ssize_t ret;
+
+ ret = kstrtobool(buf, &val);
+ if (ret)
+ return ret;
+
+ /* no change */
+ if (val == allow_tsx_force_abort)
+ return count;
+
+ allow_tsx_force_abort = val;
+
+ get_online_cpus();
+ on_each_cpu(update_tfa_sched, NULL, 1);
+ put_online_cpus();
+
+ return count;
+}
+
+
static DEVICE_ATTR_RW(freeze_on_smi);
static ssize_t branches_show(struct device *cdev,
@@ -4107,8 +4391,13 @@ static struct attribute *intel_pmu_caps_attrs[] = {
NULL
};
+static DEVICE_ATTR(allow_tsx_force_abort, 0644,
+ show_sysctl_tfa,
+ set_sysctl_tfa);
+
static struct attribute *intel_pmu_attrs[] = {
&dev_attr_freeze_on_smi.attr,
+ NULL, /* &dev_attr_allow_tsx_force_abort.attr.attr */
NULL,
};
@@ -4365,6 +4654,32 @@ __init int intel_pmu_init(void)
name = "goldmont_plus";
break;
+ case INTEL_FAM6_ATOM_TREMONT_X:
+ x86_pmu.late_ack = true;
+ memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+ memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs,
+ sizeof(hw_cache_extra_regs));
+ hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
+
+ intel_pmu_lbr_init_skl();
+
+ x86_pmu.event_constraints = intel_slm_event_constraints;
+ x86_pmu.extra_regs = intel_tnt_extra_regs;
+ /*
+ * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
+ * for precise cycles.
+ */
+ x86_pmu.pebs_aliases = NULL;
+ x86_pmu.pebs_prec_dist = true;
+ x86_pmu.lbr_pt_coexist = true;
+ x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+ x86_pmu.get_event_constraints = tnt_get_event_constraints;
+ extra_attr = slm_format_attr;
+ pr_cont("Tremont events, ");
+ name = "Tremont";
+ break;
+
case INTEL_FAM6_WESTMERE:
case INTEL_FAM6_WESTMERE_EP:
case INTEL_FAM6_WESTMERE_EX:
@@ -4607,10 +4922,47 @@ __init int intel_pmu_init(void)
tsx_attr = hsw_tsx_events_attrs;
intel_pmu_pebs_data_source_skl(
boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
+
+ if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
+ x86_pmu.flags |= PMU_FL_TFA;
+ x86_pmu.get_event_constraints = tfa_get_event_constraints;
+ x86_pmu.enable_all = intel_tfa_pmu_enable_all;
+ x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
+ intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr;
+ }
+
pr_cont("Skylake events, ");
name = "skylake";
break;
+ case INTEL_FAM6_ICELAKE_MOBILE:
+ x86_pmu.late_ack = true;
+ memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
+ memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
+ hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
+ intel_pmu_lbr_init_skl();
+
+ x86_pmu.event_constraints = intel_icl_event_constraints;
+ x86_pmu.pebs_constraints = intel_icl_pebs_event_constraints;
+ x86_pmu.extra_regs = intel_icl_extra_regs;
+ x86_pmu.pebs_aliases = NULL;
+ x86_pmu.pebs_prec_dist = true;
+ x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+ x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
+
+ x86_pmu.hw_config = hsw_hw_config;
+ x86_pmu.get_event_constraints = icl_get_event_constraints;
+ extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
+ hsw_format_attr : nhm_format_attr;
+ extra_attr = merge_attr(extra_attr, skl_format_attr);
+ x86_pmu.cpu_events = get_icl_events_attrs();
+ x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xca, .umask=0x02);
+ x86_pmu.lbr_pt_coexist = true;
+ intel_pmu_pebs_data_source_skl(false);
+ pr_cont("Icelake events, ");
+ name = "icelake";
+ break;
+
default:
switch (x86_pmu.version) {
case 1:
@@ -4758,7 +5110,7 @@ static __init int fixup_ht_bug(void)
hardlockup_detector_perf_restart();
for_each_online_cpu(c)
- free_excl_cntrs(c);
+ free_excl_cntrs(&per_cpu(cpu_hw_events, c));
cpus_read_unlock();
pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 94a4b7fc75d0..6072f92cb8ea 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -76,15 +76,15 @@
* Scope: Package (physical package)
* MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter.
* perf code: 0x04
- * Available model: HSW ULT,CNL
+ * Available model: HSW ULT,KBL,CNL
* Scope: Package (physical package)
* MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter.
* perf code: 0x05
- * Available model: HSW ULT,CNL
+ * Available model: HSW ULT,KBL,CNL
* Scope: Package (physical package)
* MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
* perf code: 0x06
- * Available model: HSW ULT,GLM,CNL
+ * Available model: HSW ULT,KBL,GLM,CNL
* Scope: Package (physical package)
*
*/
@@ -566,8 +566,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X, snb_cstates),
- X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, snb_cstates),
- X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, snb_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, hswult_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, hswult_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_CANNONLAKE_MOBILE, cnl_cstates),
@@ -578,6 +578,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_X, glm_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates),
+
+ X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_MOBILE, snb_cstates),
{ },
};
MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 10c99ce1fead..7a9f5dac5abe 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -849,6 +849,26 @@ struct event_constraint intel_skl_pebs_event_constraints[] = {
EVENT_CONSTRAINT_END
};
+struct event_constraint intel_icl_pebs_event_constraints[] = {
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x400000000ULL), /* SLOTS */
+
+ INTEL_PLD_CONSTRAINT(0x1cd, 0xff), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf), /* MEM_INST_RETIRED.LOAD */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x2d0, 0xf), /* MEM_INST_RETIRED.STORE */
+
+ INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), /* MEM_LOAD_*_RETIRED.* */
+
+ INTEL_FLAGS_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
+
+ /*
+ * Everything else is handled by PMU_FL_PEBS_ALL, because we
+ * need the full constraints from the main table.
+ */
+
+ EVENT_CONSTRAINT_END
+};
+
struct event_constraint *intel_pebs_constraints(struct perf_event *event)
{
struct event_constraint *c;
@@ -858,7 +878,7 @@ struct event_constraint *intel_pebs_constraints(struct perf_event *event)
if (x86_pmu.pebs_constraints) {
for_each_event_constraint(c, x86_pmu.pebs_constraints) {
- if ((event->hw.config & c->cmask) == c->code) {
+ if (constraint_match(c, event->hw.config)) {
event->hw.flags |= c->flags;
return c;
}
@@ -906,17 +926,87 @@ static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
if (cpuc->n_pebs == cpuc->n_large_pebs) {
threshold = ds->pebs_absolute_maximum -
- reserved * x86_pmu.pebs_record_size;
+ reserved * cpuc->pebs_record_size;
} else {
- threshold = ds->pebs_buffer_base + x86_pmu.pebs_record_size;
+ threshold = ds->pebs_buffer_base + cpuc->pebs_record_size;
}
ds->pebs_interrupt_threshold = threshold;
}
+static void adaptive_pebs_record_size_update(void)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ u64 pebs_data_cfg = cpuc->pebs_data_cfg;
+ int sz = sizeof(struct pebs_basic);
+
+ if (pebs_data_cfg & PEBS_DATACFG_MEMINFO)
+ sz += sizeof(struct pebs_meminfo);
+ if (pebs_data_cfg & PEBS_DATACFG_GP)
+ sz += sizeof(struct pebs_gprs);
+ if (pebs_data_cfg & PEBS_DATACFG_XMMS)
+ sz += sizeof(struct pebs_xmm);
+ if (pebs_data_cfg & PEBS_DATACFG_LBRS)
+ sz += x86_pmu.lbr_nr * sizeof(struct pebs_lbr_entry);
+
+ cpuc->pebs_record_size = sz;
+}
+
+#define PERF_PEBS_MEMINFO_TYPE (PERF_SAMPLE_ADDR | PERF_SAMPLE_DATA_SRC | \
+ PERF_SAMPLE_PHYS_ADDR | PERF_SAMPLE_WEIGHT | \
+ PERF_SAMPLE_TRANSACTION)
+
+static u64 pebs_update_adaptive_cfg(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+ u64 sample_type = attr->sample_type;
+ u64 pebs_data_cfg = 0;
+ bool gprs, tsx_weight;
+
+ if (!(sample_type & ~(PERF_SAMPLE_IP|PERF_SAMPLE_TIME)) &&
+ attr->precise_ip > 1)
+ return pebs_data_cfg;
+
+ if (sample_type & PERF_PEBS_MEMINFO_TYPE)
+ pebs_data_cfg |= PEBS_DATACFG_MEMINFO;
+
+ /*
+ * We need GPRs when:
+ * + user requested them
+ * + precise_ip < 2 for the non event IP
+ * + For RTM TSX weight we need GPRs for the abort code.
+ */
+ gprs = (sample_type & PERF_SAMPLE_REGS_INTR) &&
+ (attr->sample_regs_intr & PEBS_GP_REGS);
+
+ tsx_weight = (sample_type & PERF_SAMPLE_WEIGHT) &&
+ ((attr->config & INTEL_ARCH_EVENT_MASK) ==
+ x86_pmu.rtm_abort_event);
+
+ if (gprs || (attr->precise_ip < 2) || tsx_weight)
+ pebs_data_cfg |= PEBS_DATACFG_GP;
+
+ if ((sample_type & PERF_SAMPLE_REGS_INTR) &&
+ (attr->sample_regs_intr & PEBS_XMM_REGS))
+ pebs_data_cfg |= PEBS_DATACFG_XMMS;
+
+ if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
+ /*
+ * For now always log all LBRs. Could configure this
+ * later.
+ */
+ pebs_data_cfg |= PEBS_DATACFG_LBRS |
+ ((x86_pmu.lbr_nr-1) << PEBS_DATACFG_LBR_SHIFT);
+ }
+
+ return pebs_data_cfg;
+}
+
static void
-pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc, struct pmu *pmu)
+pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
+ struct perf_event *event, bool add)
{
+ struct pmu *pmu = event->ctx->pmu;
/*
* Make sure we get updated with the first PEBS
* event. It will trigger also during removal, but
@@ -933,6 +1023,29 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc, struct pmu *pmu)
update = true;
}
+ /*
+ * The PEBS record doesn't shrink on pmu::del(). Doing so would require
+ * iterating all remaining PEBS events to reconstruct the config.
+ */
+ if (x86_pmu.intel_cap.pebs_baseline && add) {
+ u64 pebs_data_cfg;
+
+ /* Clear pebs_data_cfg and pebs_record_size for first PEBS. */
+ if (cpuc->n_pebs == 1) {
+ cpuc->pebs_data_cfg = 0;
+ cpuc->pebs_record_size = sizeof(struct pebs_basic);
+ }
+
+ pebs_data_cfg = pebs_update_adaptive_cfg(event);
+
+ /* Update pebs_record_size if new event requires more data. */
+ if (pebs_data_cfg & ~cpuc->pebs_data_cfg) {
+ cpuc->pebs_data_cfg |= pebs_data_cfg;
+ adaptive_pebs_record_size_update();
+ update = true;
+ }
+ }
+
if (update)
pebs_update_threshold(cpuc);
}
@@ -947,7 +1060,7 @@ void intel_pmu_pebs_add(struct perf_event *event)
if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS)
cpuc->n_large_pebs++;
- pebs_update_state(needed_cb, cpuc, event->ctx->pmu);
+ pebs_update_state(needed_cb, cpuc, event, true);
}
void intel_pmu_pebs_enable(struct perf_event *event)
@@ -960,11 +1073,19 @@ void intel_pmu_pebs_enable(struct perf_event *event)
cpuc->pebs_enabled |= 1ULL << hwc->idx;
- if (event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT)
+ if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) && (x86_pmu.version < 5))
cpuc->pebs_enabled |= 1ULL << (hwc->idx + 32);
else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
cpuc->pebs_enabled |= 1ULL << 63;
+ if (x86_pmu.intel_cap.pebs_baseline) {
+ hwc->config |= ICL_EVENTSEL_ADAPTIVE;
+ if (cpuc->pebs_data_cfg != cpuc->active_pebs_data_cfg) {
+ wrmsrl(MSR_PEBS_DATA_CFG, cpuc->pebs_data_cfg);
+ cpuc->active_pebs_data_cfg = cpuc->pebs_data_cfg;
+ }
+ }
+
/*
* Use auto-reload if possible to save a MSR write in the PMI.
* This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD.
@@ -991,7 +1112,7 @@ void intel_pmu_pebs_del(struct perf_event *event)
if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS)
cpuc->n_large_pebs--;
- pebs_update_state(needed_cb, cpuc, event->ctx->pmu);
+ pebs_update_state(needed_cb, cpuc, event, false);
}
void intel_pmu_pebs_disable(struct perf_event *event)
@@ -1004,7 +1125,8 @@ void intel_pmu_pebs_disable(struct perf_event *event)
cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
- if (event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT)
+ if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) &&
+ (x86_pmu.version < 5))
cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32));
else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
cpuc->pebs_enabled &= ~(1ULL << 63);
@@ -1125,34 +1247,57 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
return 0;
}
-static inline u64 intel_hsw_weight(struct pebs_record_skl *pebs)
+static inline u64 intel_get_tsx_weight(u64 tsx_tuning)
{
- if (pebs->tsx_tuning) {
- union hsw_tsx_tuning tsx = { .value = pebs->tsx_tuning };
+ if (tsx_tuning) {
+ union hsw_tsx_tuning tsx = { .value = tsx_tuning };
return tsx.cycles_last_block;
}
return 0;
}
-static inline u64 intel_hsw_transaction(struct pebs_record_skl *pebs)
+static inline u64 intel_get_tsx_transaction(u64 tsx_tuning, u64 ax)
{
- u64 txn = (pebs->tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32;
+ u64 txn = (tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32;
/* For RTM XABORTs also log the abort code from AX */
- if ((txn & PERF_TXN_TRANSACTION) && (pebs->ax & 1))
- txn |= ((pebs->ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
+ if ((txn & PERF_TXN_TRANSACTION) && (ax & 1))
+ txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
return txn;
}
-static void setup_pebs_sample_data(struct perf_event *event,
- struct pt_regs *iregs, void *__pebs,
- struct perf_sample_data *data,
- struct pt_regs *regs)
+static inline u64 get_pebs_status(void *n)
{
+ if (x86_pmu.intel_cap.pebs_format < 4)
+ return ((struct pebs_record_nhm *)n)->status;
+ return ((struct pebs_basic *)n)->applicable_counters;
+}
+
#define PERF_X86_EVENT_PEBS_HSW_PREC \
(PERF_X86_EVENT_PEBS_ST_HSW | \
PERF_X86_EVENT_PEBS_LD_HSW | \
PERF_X86_EVENT_PEBS_NA_HSW)
+
+static u64 get_data_src(struct perf_event *event, u64 aux)
+{
+ u64 val = PERF_MEM_NA;
+ int fl = event->hw.flags;
+ bool fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC);
+
+ if (fl & PERF_X86_EVENT_PEBS_LDLAT)
+ val = load_latency_data(aux);
+ else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC))
+ val = precise_datala_hsw(event, aux);
+ else if (fst)
+ val = precise_store_data(aux);
+ return val;
+}
+
+static void setup_pebs_fixed_sample_data(struct perf_event *event,
+ struct pt_regs *iregs, void *__pebs,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
/*
* We cast to the biggest pebs_record but are careful not to
* unconditionally access the 'extra' entries.
@@ -1160,17 +1305,13 @@ static void setup_pebs_sample_data(struct perf_event *event,
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct pebs_record_skl *pebs = __pebs;
u64 sample_type;
- int fll, fst, dsrc;
- int fl = event->hw.flags;
+ int fll;
if (pebs == NULL)
return;
sample_type = event->attr.sample_type;
- dsrc = sample_type & PERF_SAMPLE_DATA_SRC;
-
- fll = fl & PERF_X86_EVENT_PEBS_LDLAT;
- fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC);
+ fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT;
perf_sample_data_init(data, 0, event->hw.last_period);
@@ -1185,16 +1326,8 @@ static void setup_pebs_sample_data(struct perf_event *event,
/*
* data.data_src encodes the data source
*/
- if (dsrc) {
- u64 val = PERF_MEM_NA;
- if (fll)
- val = load_latency_data(pebs->dse);
- else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC))
- val = precise_datala_hsw(event, pebs->dse);
- else if (fst)
- val = precise_store_data(pebs->dse);
- data->data_src.val = val;
- }
+ if (sample_type & PERF_SAMPLE_DATA_SRC)
+ data->data_src.val = get_data_src(event, pebs->dse);
/*
* We must however always use iregs for the unwinder to stay sane; the
@@ -1281,10 +1414,11 @@ static void setup_pebs_sample_data(struct perf_event *event,
if (x86_pmu.intel_cap.pebs_format >= 2) {
/* Only set the TSX weight when no memory weight. */
if ((sample_type & PERF_SAMPLE_WEIGHT) && !fll)
- data->weight = intel_hsw_weight(pebs);
+ data->weight = intel_get_tsx_weight(pebs->tsx_tuning);
if (sample_type & PERF_SAMPLE_TRANSACTION)
- data->txn = intel_hsw_transaction(pebs);
+ data->txn = intel_get_tsx_transaction(pebs->tsx_tuning,
+ pebs->ax);
}
/*
@@ -1301,6 +1435,140 @@ static void setup_pebs_sample_data(struct perf_event *event,
data->br_stack = &cpuc->lbr_stack;
}
+static void adaptive_pebs_save_regs(struct pt_regs *regs,
+ struct pebs_gprs *gprs)
+{
+ regs->ax = gprs->ax;
+ regs->bx = gprs->bx;
+ regs->cx = gprs->cx;
+ regs->dx = gprs->dx;
+ regs->si = gprs->si;
+ regs->di = gprs->di;
+ regs->bp = gprs->bp;
+ regs->sp = gprs->sp;
+#ifndef CONFIG_X86_32
+ regs->r8 = gprs->r8;
+ regs->r9 = gprs->r9;
+ regs->r10 = gprs->r10;
+ regs->r11 = gprs->r11;
+ regs->r12 = gprs->r12;
+ regs->r13 = gprs->r13;
+ regs->r14 = gprs->r14;
+ regs->r15 = gprs->r15;
+#endif
+}
+
+/*
+ * With adaptive PEBS the layout depends on what fields are configured.
+ */
+
+static void setup_pebs_adaptive_sample_data(struct perf_event *event,
+ struct pt_regs *iregs, void *__pebs,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct pebs_basic *basic = __pebs;
+ void *next_record = basic + 1;
+ u64 sample_type;
+ u64 format_size;
+ struct pebs_meminfo *meminfo = NULL;
+ struct pebs_gprs *gprs = NULL;
+ struct x86_perf_regs *perf_regs;
+
+ if (basic == NULL)
+ return;
+
+ perf_regs = container_of(regs, struct x86_perf_regs, regs);
+ perf_regs->xmm_regs = NULL;
+
+ sample_type = event->attr.sample_type;
+ format_size = basic->format_size;
+ perf_sample_data_init(data, 0, event->hw.last_period);
+ data->period = event->hw.last_period;
+
+ if (event->attr.use_clockid == 0)
+ data->time = native_sched_clock_from_tsc(basic->tsc);
+
+ /*
+ * We must however always use iregs for the unwinder to stay sane; the
+ * record BP,SP,IP can point into thin air when the record is from a
+ * previous PMI context or an (I)RET happened between the record and
+ * PMI.
+ */
+ if (sample_type & PERF_SAMPLE_CALLCHAIN)
+ data->callchain = perf_callchain(event, iregs);
+
+ *regs = *iregs;
+ /* The ip in basic is EventingIP */
+ set_linear_ip(regs, basic->ip);
+ regs->flags = PERF_EFLAGS_EXACT;
+
+ /*
+ * The record for MEMINFO is in front of GP
+ * But PERF_SAMPLE_TRANSACTION needs gprs->ax.
+ * Save the pointer here but process later.
+ */
+ if (format_size & PEBS_DATACFG_MEMINFO) {
+ meminfo = next_record;
+ next_record = meminfo + 1;
+ }
+
+ if (format_size & PEBS_DATACFG_GP) {
+ gprs = next_record;
+ next_record = gprs + 1;
+
+ if (event->attr.precise_ip < 2) {
+ set_linear_ip(regs, gprs->ip);
+ regs->flags &= ~PERF_EFLAGS_EXACT;
+ }
+
+ if (sample_type & PERF_SAMPLE_REGS_INTR)
+ adaptive_pebs_save_regs(regs, gprs);
+ }
+
+ if (format_size & PEBS_DATACFG_MEMINFO) {
+ if (sample_type & PERF_SAMPLE_WEIGHT)
+ data->weight = meminfo->latency ?:
+ intel_get_tsx_weight(meminfo->tsx_tuning);
+
+ if (sample_type & PERF_SAMPLE_DATA_SRC)
+ data->data_src.val = get_data_src(event, meminfo->aux);
+
+ if (sample_type & (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR))
+ data->addr = meminfo->address;
+
+ if (sample_type & PERF_SAMPLE_TRANSACTION)
+ data->txn = intel_get_tsx_transaction(meminfo->tsx_tuning,
+ gprs ? gprs->ax : 0);
+ }
+
+ if (format_size & PEBS_DATACFG_XMMS) {
+ struct pebs_xmm *xmm = next_record;
+
+ next_record = xmm + 1;
+ perf_regs->xmm_regs = xmm->xmm;
+ }
+
+ if (format_size & PEBS_DATACFG_LBRS) {
+ struct pebs_lbr *lbr = next_record;
+ int num_lbr = ((format_size >> PEBS_DATACFG_LBR_SHIFT)
+ & 0xff) + 1;
+ next_record = next_record + num_lbr*sizeof(struct pebs_lbr_entry);
+
+ if (has_branch_stack(event)) {
+ intel_pmu_store_pebs_lbrs(lbr);
+ data->br_stack = &cpuc->lbr_stack;
+ }
+ }
+
+ WARN_ONCE(next_record != __pebs + (format_size >> 48),
+ "PEBS record size %llu, expected %llu, config %llx\n",
+ format_size >> 48,
+ (u64)(next_record - __pebs),
+ basic->format_size);
+}
+
static inline void *
get_next_pebs_record_by_bit(void *base, void *top, int bit)
{
@@ -1318,19 +1586,19 @@ get_next_pebs_record_by_bit(void *base, void *top, int bit)
if (base == NULL)
return NULL;
- for (at = base; at < top; at += x86_pmu.pebs_record_size) {
- struct pebs_record_nhm *p = at;
+ for (at = base; at < top; at += cpuc->pebs_record_size) {
+ unsigned long status = get_pebs_status(at);
- if (test_bit(bit, (unsigned long *)&p->status)) {
+ if (test_bit(bit, (unsigned long *)&status)) {
/* PEBS v3 has accurate status bits */
if (x86_pmu.intel_cap.pebs_format >= 3)
return at;
- if (p->status == (1 << bit))
+ if (status == (1 << bit))
return at;
/* clear non-PEBS bit and re-check */
- pebs_status = p->status & cpuc->pebs_enabled;
+ pebs_status = status & cpuc->pebs_enabled;
pebs_status &= PEBS_COUNTER_MASK;
if (pebs_status == (1 << bit))
return at;
@@ -1410,11 +1678,18 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
static void __intel_pmu_pebs_event(struct perf_event *event,
struct pt_regs *iregs,
void *base, void *top,
- int bit, int count)
+ int bit, int count,
+ void (*setup_sample)(struct perf_event *,
+ struct pt_regs *,
+ void *,
+ struct perf_sample_data *,
+ struct pt_regs *))
{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
struct perf_sample_data data;
- struct pt_regs regs;
+ struct x86_perf_regs perf_regs;
+ struct pt_regs *regs = &perf_regs.regs;
void *at = get_next_pebs_record_by_bit(base, top, bit);
if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
@@ -1429,20 +1704,20 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
return;
while (count > 1) {
- setup_pebs_sample_data(event, iregs, at, &data, &regs);
- perf_event_output(event, &data, &regs);
- at += x86_pmu.pebs_record_size;
+ setup_sample(event, iregs, at, &data, regs);
+ perf_event_output(event, &data, regs);
+ at += cpuc->pebs_record_size;
at = get_next_pebs_record_by_bit(at, top, bit);
count--;
}
- setup_pebs_sample_data(event, iregs, at, &data, &regs);
+ setup_sample(event, iregs, at, &data, regs);
/*
* All but the last records are processed.
* The last one is left to be able to call the overflow handler.
*/
- if (perf_event_overflow(event, &data, &regs)) {
+ if (perf_event_overflow(event, &data, regs)) {
x86_pmu_stop(event, 0);
return;
}
@@ -1483,7 +1758,27 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
return;
}
- __intel_pmu_pebs_event(event, iregs, at, top, 0, n);
+ __intel_pmu_pebs_event(event, iregs, at, top, 0, n,
+ setup_pebs_fixed_sample_data);
+}
+
+static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int size)
+{
+ struct perf_event *event;
+ int bit;
+
+ /*
+ * The drain_pebs() could be called twice in a short period
+ * for auto-reload event in pmu::read(). There are no
+ * overflows have happened in between.
+ * It needs to call intel_pmu_save_and_restart_reload() to
+ * update the event->count for this case.
+ */
+ for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled, size) {
+ event = cpuc->events[bit];
+ if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
+ intel_pmu_save_and_restart_reload(event, 0);
+ }
}
static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
@@ -1513,19 +1808,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
}
if (unlikely(base >= top)) {
- /*
- * The drain_pebs() could be called twice in a short period
- * for auto-reload event in pmu::read(). There are no
- * overflows have happened in between.
- * It needs to call intel_pmu_save_and_restart_reload() to
- * update the event->count for this case.
- */
- for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled,
- size) {
- event = cpuc->events[bit];
- if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
- intel_pmu_save_and_restart_reload(event, 0);
- }
+ intel_pmu_pebs_event_update_no_drain(cpuc, size);
return;
}
@@ -1538,8 +1821,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
/* PEBS v3 has more accurate status bits */
if (x86_pmu.intel_cap.pebs_format >= 3) {
- for_each_set_bit(bit, (unsigned long *)&pebs_status,
- size)
+ for_each_set_bit(bit, (unsigned long *)&pebs_status, size)
counts[bit]++;
continue;
@@ -1578,8 +1860,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
* If collision happened, the record will be dropped.
*/
if (p->status != (1ULL << bit)) {
- for_each_set_bit(i, (unsigned long *)&pebs_status,
- x86_pmu.max_pebs_events)
+ for_each_set_bit(i, (unsigned long *)&pebs_status, size)
error[i]++;
continue;
}
@@ -1587,7 +1868,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
counts[bit]++;
}
- for (bit = 0; bit < size; bit++) {
+ for_each_set_bit(bit, (unsigned long *)&mask, size) {
if ((counts[bit] == 0) && (error[bit] == 0))
continue;
@@ -1608,11 +1889,66 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
if (counts[bit]) {
__intel_pmu_pebs_event(event, iregs, base,
- top, bit, counts[bit]);
+ top, bit, counts[bit],
+ setup_pebs_fixed_sample_data);
}
}
}
+static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs)
+{
+ short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct debug_store *ds = cpuc->ds;
+ struct perf_event *event;
+ void *base, *at, *top;
+ int bit, size;
+ u64 mask;
+
+ if (!x86_pmu.pebs_active)
+ return;
+
+ base = (struct pebs_basic *)(unsigned long)ds->pebs_buffer_base;
+ top = (struct pebs_basic *)(unsigned long)ds->pebs_index;
+
+ ds->pebs_index = ds->pebs_buffer_base;
+
+ mask = ((1ULL << x86_pmu.max_pebs_events) - 1) |
+ (((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED);
+ size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed;
+
+ if (unlikely(base >= top)) {
+ intel_pmu_pebs_event_update_no_drain(cpuc, size);
+ return;
+ }
+
+ for (at = base; at < top; at += cpuc->pebs_record_size) {
+ u64 pebs_status;
+
+ pebs_status = get_pebs_status(at) & cpuc->pebs_enabled;
+ pebs_status &= mask;
+
+ for_each_set_bit(bit, (unsigned long *)&pebs_status, size)
+ counts[bit]++;
+ }
+
+ for_each_set_bit(bit, (unsigned long *)&mask, size) {
+ if (counts[bit] == 0)
+ continue;
+
+ event = cpuc->events[bit];
+ if (WARN_ON_ONCE(!event))
+ continue;
+
+ if (WARN_ON_ONCE(!event->attr.precise_ip))
+ continue;
+
+ __intel_pmu_pebs_event(event, iregs, base,
+ top, bit, counts[bit],
+ setup_pebs_adaptive_sample_data);
+ }
+}
+
/*
* BTS, PEBS probe and setup
*/
@@ -1628,12 +1964,18 @@ void __init intel_ds_init(void)
x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS);
x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
- if (x86_pmu.version <= 4)
+ if (x86_pmu.version <= 4) {
x86_pmu.pebs_no_isolation = 1;
+ x86_pmu.pebs_no_xmm_regs = 1;
+ }
if (x86_pmu.pebs) {
char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-';
+ char *pebs_qual = "";
int format = x86_pmu.intel_cap.pebs_format;
+ if (format < 4)
+ x86_pmu.intel_cap.pebs_baseline = 0;
+
switch (format) {
case 0:
pr_cont("PEBS fmt0%c, ", pebs_type);
@@ -1669,6 +2011,29 @@ void __init intel_ds_init(void)
x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME;
break;
+ case 4:
+ x86_pmu.drain_pebs = intel_pmu_drain_pebs_icl;
+ x86_pmu.pebs_record_size = sizeof(struct pebs_basic);
+ if (x86_pmu.intel_cap.pebs_baseline) {
+ x86_pmu.large_pebs_flags |=
+ PERF_SAMPLE_BRANCH_STACK |
+ PERF_SAMPLE_TIME;
+ x86_pmu.flags |= PMU_FL_PEBS_ALL;
+ pebs_qual = "-baseline";
+ } else {
+ /* Only basic record supported */
+ x86_pmu.pebs_no_xmm_regs = 1;
+ x86_pmu.large_pebs_flags &=
+ ~(PERF_SAMPLE_ADDR |
+ PERF_SAMPLE_TIME |
+ PERF_SAMPLE_DATA_SRC |
+ PERF_SAMPLE_TRANSACTION |
+ PERF_SAMPLE_REGS_USER |
+ PERF_SAMPLE_REGS_INTR);
+ }
+ pr_cont("PEBS fmt4%c%s, ", pebs_type, pebs_qual);
+ break;
+
default:
pr_cont("no PEBS fmt%d%c, ", format, pebs_type);
x86_pmu.pebs = 0;
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 580c1b91c454..6f814a27416b 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -488,6 +488,8 @@ void intel_pmu_lbr_add(struct perf_event *event)
* be 'new'. Conversely, a new event can get installed through the
* context switch path for the first time.
*/
+ if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
+ cpuc->lbr_pebs_users++;
perf_sched_cb_inc(event->ctx->pmu);
if (!cpuc->lbr_users++ && !event->total_time_running)
intel_pmu_lbr_reset();
@@ -507,8 +509,11 @@ void intel_pmu_lbr_del(struct perf_event *event)
task_ctx->lbr_callstack_users--;
}
+ if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
+ cpuc->lbr_pebs_users--;
cpuc->lbr_users--;
WARN_ON_ONCE(cpuc->lbr_users < 0);
+ WARN_ON_ONCE(cpuc->lbr_pebs_users < 0);
perf_sched_cb_dec(event->ctx->pmu);
}
@@ -658,7 +663,13 @@ void intel_pmu_lbr_read(void)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- if (!cpuc->lbr_users)
+ /*
+ * Don't read when all LBRs users are using adaptive PEBS.
+ *
+ * This could be smarter and actually check the event,
+ * but this simple approach seems to work for now.
+ */
+ if (!cpuc->lbr_users || cpuc->lbr_users == cpuc->lbr_pebs_users)
return;
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
@@ -1080,6 +1091,28 @@ intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
}
}
+void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ int i;
+
+ cpuc->lbr_stack.nr = x86_pmu.lbr_nr;
+ for (i = 0; i < x86_pmu.lbr_nr; i++) {
+ u64 info = lbr->lbr[i].info;
+ struct perf_branch_entry *e = &cpuc->lbr_entries[i];
+
+ e->from = lbr->lbr[i].from;
+ e->to = lbr->lbr[i].to;
+ e->mispred = !!(info & LBR_INFO_MISPRED);
+ e->predicted = !(info & LBR_INFO_MISPRED);
+ e->in_tx = !!(info & LBR_INFO_IN_TX);
+ e->abort = !!(info & LBR_INFO_ABORT);
+ e->cycles = info & LBR_INFO_CYCLES;
+ e->reserved = 0;
+ }
+ intel_pmu_lbr_filter(cpuc);
+}
+
/*
* Map interface branch filters onto LBR filters
*/
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
index fb3a2f13fc70..339d7628080c 100644
--- a/arch/x86/events/intel/pt.c
+++ b/arch/x86/events/intel/pt.c
@@ -1525,8 +1525,7 @@ static __init int pt_init(void)
}
if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
- pt_pmu.pmu.capabilities =
- PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_AUX_SW_DOUBLEBUF;
+ pt_pmu.pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG;
pt_pmu.pmu.capabilities |= PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE;
pt_pmu.pmu.attr_groups = pt_attr_groups;
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index 94dc564146ca..37ebf6fc5415 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -775,6 +775,8 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_X, hsw_rapl_init),
X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_PLUS, hsw_rapl_init),
+
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, skl_rapl_init),
{},
};
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index d516161c00c4..fc40a1473058 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -732,6 +732,7 @@ static int uncore_pmu_event_init(struct perf_event *event)
/* fixed counters have event field hardcoded to zero */
hwc->config = 0ULL;
} else if (is_freerunning_event(event)) {
+ hwc->config = event->attr.config;
if (!check_valid_freerunning_event(box, event))
return -EINVAL;
event->hw.idx = UNCORE_PMC_IDX_FREERUNNING;
@@ -1366,6 +1367,11 @@ static const struct intel_uncore_init_fun skx_uncore_init __initconst = {
.pci_init = skx_uncore_pci_init,
};
+static const struct intel_uncore_init_fun icl_uncore_init __initconst = {
+ .cpu_init = icl_uncore_cpu_init,
+ .pci_init = skl_uncore_pci_init,
+};
+
static const struct x86_cpu_id intel_uncore_match[] __initconst = {
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EP, nhm_uncore_init),
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM, nhm_uncore_init),
@@ -1392,6 +1398,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, skx_uncore_init),
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, skl_uncore_init),
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_uncore_init),
+ X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, icl_uncore_init),
{},
};
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
index cb46d602a6b8..79eb2e21e4f0 100644
--- a/arch/x86/events/intel/uncore.h
+++ b/arch/x86/events/intel/uncore.h
@@ -292,8 +292,8 @@ static inline
unsigned int uncore_freerunning_counter(struct intel_uncore_box *box,
struct perf_event *event)
{
- unsigned int type = uncore_freerunning_type(event->attr.config);
- unsigned int idx = uncore_freerunning_idx(event->attr.config);
+ unsigned int type = uncore_freerunning_type(event->hw.config);
+ unsigned int idx = uncore_freerunning_idx(event->hw.config);
struct intel_uncore_pmu *pmu = box->pmu;
return pmu->type->freerunning[type].counter_base +
@@ -377,7 +377,7 @@ static inline
unsigned int uncore_freerunning_bits(struct intel_uncore_box *box,
struct perf_event *event)
{
- unsigned int type = uncore_freerunning_type(event->attr.config);
+ unsigned int type = uncore_freerunning_type(event->hw.config);
return box->pmu->type->freerunning[type].bits;
}
@@ -385,7 +385,7 @@ unsigned int uncore_freerunning_bits(struct intel_uncore_box *box,
static inline int uncore_num_freerunning(struct intel_uncore_box *box,
struct perf_event *event)
{
- unsigned int type = uncore_freerunning_type(event->attr.config);
+ unsigned int type = uncore_freerunning_type(event->hw.config);
return box->pmu->type->freerunning[type].num_counters;
}
@@ -399,8 +399,8 @@ static inline int uncore_num_freerunning_types(struct intel_uncore_box *box,
static inline bool check_valid_freerunning_event(struct intel_uncore_box *box,
struct perf_event *event)
{
- unsigned int type = uncore_freerunning_type(event->attr.config);
- unsigned int idx = uncore_freerunning_idx(event->attr.config);
+ unsigned int type = uncore_freerunning_type(event->hw.config);
+ unsigned int idx = uncore_freerunning_idx(event->hw.config);
return (type < uncore_num_freerunning_types(box, event)) &&
(idx < uncore_num_freerunning(box, event));
@@ -512,6 +512,7 @@ int skl_uncore_pci_init(void);
void snb_uncore_cpu_init(void);
void nhm_uncore_cpu_init(void);
void skl_uncore_cpu_init(void);
+void icl_uncore_cpu_init(void);
int snb_pci2phy_map_init(int devid);
/* uncore_snbep.c */
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
index b12517fae77a..f8431819b3e1 100644
--- a/arch/x86/events/intel/uncore_snb.c
+++ b/arch/x86/events/intel/uncore_snb.c
@@ -34,6 +34,8 @@
#define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC 0x3e33
#define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC 0x3eca
#define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC 0x3e32
+#define PCI_DEVICE_ID_INTEL_ICL_U_IMC 0x8a02
+#define PCI_DEVICE_ID_INTEL_ICL_U2_IMC 0x8a12
/* SNB event control */
#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
@@ -93,6 +95,12 @@
#define SKL_UNC_PERF_GLOBAL_CTL 0xe01
#define SKL_UNC_GLOBAL_CTL_CORE_ALL ((1 << 5) - 1)
+/* ICL Cbo register */
+#define ICL_UNC_CBO_CONFIG 0x396
+#define ICL_UNC_NUM_CBO_MASK 0xf
+#define ICL_UNC_CBO_0_PER_CTR0 0x702
+#define ICL_UNC_CBO_MSR_OFFSET 0x8
+
DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
@@ -280,6 +288,70 @@ void skl_uncore_cpu_init(void)
snb_uncore_arb.ops = &skl_uncore_msr_ops;
}
+static struct intel_uncore_type icl_uncore_cbox = {
+ .name = "cbox",
+ .num_counters = 4,
+ .perf_ctr_bits = 44,
+ .perf_ctr = ICL_UNC_CBO_0_PER_CTR0,
+ .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
+ .event_mask = SNB_UNC_RAW_EVENT_MASK,
+ .msr_offset = ICL_UNC_CBO_MSR_OFFSET,
+ .ops = &skl_uncore_msr_ops,
+ .format_group = &snb_uncore_format_group,
+};
+
+static struct uncore_event_desc icl_uncore_events[] = {
+ INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff"),
+ { /* end: all zeroes */ },
+};
+
+static struct attribute *icl_uncore_clock_formats_attr[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static struct attribute_group icl_uncore_clock_format_group = {
+ .name = "format",
+ .attrs = icl_uncore_clock_formats_attr,
+};
+
+static struct intel_uncore_type icl_uncore_clockbox = {
+ .name = "clock",
+ .num_counters = 1,
+ .num_boxes = 1,
+ .fixed_ctr_bits = 48,
+ .fixed_ctr = SNB_UNC_FIXED_CTR,
+ .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
+ .single_fixed = 1,
+ .event_mask = SNB_UNC_CTL_EV_SEL_MASK,
+ .format_group = &icl_uncore_clock_format_group,
+ .ops = &skl_uncore_msr_ops,
+ .event_descs = icl_uncore_events,
+};
+
+static struct intel_uncore_type *icl_msr_uncores[] = {
+ &icl_uncore_cbox,
+ &snb_uncore_arb,
+ &icl_uncore_clockbox,
+ NULL,
+};
+
+static int icl_get_cbox_num(void)
+{
+ u64 num_boxes;
+
+ rdmsrl(ICL_UNC_CBO_CONFIG, num_boxes);
+
+ return num_boxes & ICL_UNC_NUM_CBO_MASK;
+}
+
+void icl_uncore_cpu_init(void)
+{
+ uncore_msr_uncores = icl_msr_uncores;
+ icl_uncore_cbox.num_boxes = icl_get_cbox_num();
+ snb_uncore_arb.ops = &skl_uncore_msr_ops;
+}
+
enum {
SNB_PCI_UNCORE_IMC,
};
@@ -442,9 +514,11 @@ static int snb_uncore_imc_event_init(struct perf_event *event)
/* must be done before validate_group */
event->hw.event_base = base;
- event->hw.config = cfg;
event->hw.idx = idx;
+ /* Convert to standard encoding format for freerunning counters */
+ event->hw.config = ((cfg - 1) << 8) | 0x10ff;
+
/* no group validation needed, we have free running counters */
return 0;
@@ -666,6 +740,18 @@ static const struct pci_device_id skl_uncore_pci_ids[] = {
{ /* end: all zeroes */ },
};
+static const struct pci_device_id icl_uncore_pci_ids[] = {
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U2_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* end: all zeroes */ },
+};
+
static struct pci_driver snb_uncore_pci_driver = {
.name = "snb_uncore",
.id_table = snb_uncore_pci_ids,
@@ -691,6 +777,11 @@ static struct pci_driver skl_uncore_pci_driver = {
.id_table = skl_uncore_pci_ids,
};
+static struct pci_driver icl_uncore_pci_driver = {
+ .name = "icl_uncore",
+ .id_table = icl_uncore_pci_ids,
+};
+
struct imc_uncore_pci_dev {
__u32 pci_id;
struct pci_driver *driver;
@@ -730,6 +821,8 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Server */
IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Server */
IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Server */
+ IMC_DEV(ICL_U_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */
+ IMC_DEV(ICL_U2_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */
{ /* end marker */ }
};
diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
index a878e6286e4a..f3f4c2263501 100644
--- a/arch/x86/events/msr.c
+++ b/arch/x86/events/msr.c
@@ -89,6 +89,7 @@ static bool test_intel(int idx)
case INTEL_FAM6_SKYLAKE_X:
case INTEL_FAM6_KABYLAKE_MOBILE:
case INTEL_FAM6_KABYLAKE_DESKTOP:
+ case INTEL_FAM6_ICELAKE_MOBILE:
if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
return true;
break;
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 7e75f474b076..07fc84bb85c1 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -49,28 +49,33 @@ struct event_constraint {
unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
u64 idxmsk64;
};
- u64 code;
- u64 cmask;
- int weight;
- int overlap;
- int flags;
+ u64 code;
+ u64 cmask;
+ int weight;
+ int overlap;
+ int flags;
+ unsigned int size;
};
+
+static inline bool constraint_match(struct event_constraint *c, u64 ecode)
+{
+ return ((ecode & c->cmask) - c->code) <= (u64)c->size;
+}
+
/*
* struct hw_perf_event.flags flags
*/
#define PERF_X86_EVENT_PEBS_LDLAT 0x0001 /* ld+ldlat data address sampling */
#define PERF_X86_EVENT_PEBS_ST 0x0002 /* st data address sampling */
#define PERF_X86_EVENT_PEBS_ST_HSW 0x0004 /* haswell style datala, store */
-#define PERF_X86_EVENT_COMMITTED 0x0008 /* event passed commit_txn */
-#define PERF_X86_EVENT_PEBS_LD_HSW 0x0010 /* haswell style datala, load */
-#define PERF_X86_EVENT_PEBS_NA_HSW 0x0020 /* haswell style datala, unknown */
-#define PERF_X86_EVENT_EXCL 0x0040 /* HT exclusivity on counter */
-#define PERF_X86_EVENT_DYNAMIC 0x0080 /* dynamic alloc'd constraint */
-#define PERF_X86_EVENT_RDPMC_ALLOWED 0x0100 /* grant rdpmc permission */
-#define PERF_X86_EVENT_EXCL_ACCT 0x0200 /* accounted EXCL event */
-#define PERF_X86_EVENT_AUTO_RELOAD 0x0400 /* use PEBS auto-reload */
-#define PERF_X86_EVENT_LARGE_PEBS 0x0800 /* use large PEBS */
-
+#define PERF_X86_EVENT_PEBS_LD_HSW 0x0008 /* haswell style datala, load */
+#define PERF_X86_EVENT_PEBS_NA_HSW 0x0010 /* haswell style datala, unknown */
+#define PERF_X86_EVENT_EXCL 0x0020 /* HT exclusivity on counter */
+#define PERF_X86_EVENT_DYNAMIC 0x0040 /* dynamic alloc'd constraint */
+#define PERF_X86_EVENT_RDPMC_ALLOWED 0x0080 /* grant rdpmc permission */
+#define PERF_X86_EVENT_EXCL_ACCT 0x0100 /* accounted EXCL event */
+#define PERF_X86_EVENT_AUTO_RELOAD 0x0200 /* use PEBS auto-reload */
+#define PERF_X86_EVENT_LARGE_PEBS 0x0400 /* use large PEBS */
struct amd_nb {
int nb_id; /* NorthBridge id */
@@ -96,25 +101,43 @@ struct amd_nb {
PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \
PERF_SAMPLE_PERIOD)
-#define PEBS_REGS \
- (PERF_REG_X86_AX | \
- PERF_REG_X86_BX | \
- PERF_REG_X86_CX | \
- PERF_REG_X86_DX | \
- PERF_REG_X86_DI | \
- PERF_REG_X86_SI | \
- PERF_REG_X86_SP | \
- PERF_REG_X86_BP | \
- PERF_REG_X86_IP | \
- PERF_REG_X86_FLAGS | \
- PERF_REG_X86_R8 | \
- PERF_REG_X86_R9 | \
- PERF_REG_X86_R10 | \
- PERF_REG_X86_R11 | \
- PERF_REG_X86_R12 | \
- PERF_REG_X86_R13 | \
- PERF_REG_X86_R14 | \
- PERF_REG_X86_R15)
+#define PEBS_GP_REGS \
+ ((1ULL << PERF_REG_X86_AX) | \
+ (1ULL << PERF_REG_X86_BX) | \
+ (1ULL << PERF_REG_X86_CX) | \
+ (1ULL << PERF_REG_X86_DX) | \
+ (1ULL << PERF_REG_X86_DI) | \
+ (1ULL << PERF_REG_X86_SI) | \
+ (1ULL << PERF_REG_X86_SP) | \
+ (1ULL << PERF_REG_X86_BP) | \
+ (1ULL << PERF_REG_X86_IP) | \
+ (1ULL << PERF_REG_X86_FLAGS) | \
+ (1ULL << PERF_REG_X86_R8) | \
+ (1ULL << PERF_REG_X86_R9) | \
+ (1ULL << PERF_REG_X86_R10) | \
+ (1ULL << PERF_REG_X86_R11) | \
+ (1ULL << PERF_REG_X86_R12) | \
+ (1ULL << PERF_REG_X86_R13) | \
+ (1ULL << PERF_REG_X86_R14) | \
+ (1ULL << PERF_REG_X86_R15))
+
+#define PEBS_XMM_REGS \
+ ((1ULL << PERF_REG_X86_XMM0) | \
+ (1ULL << PERF_REG_X86_XMM1) | \
+ (1ULL << PERF_REG_X86_XMM2) | \
+ (1ULL << PERF_REG_X86_XMM3) | \
+ (1ULL << PERF_REG_X86_XMM4) | \
+ (1ULL << PERF_REG_X86_XMM5) | \
+ (1ULL << PERF_REG_X86_XMM6) | \
+ (1ULL << PERF_REG_X86_XMM7) | \
+ (1ULL << PERF_REG_X86_XMM8) | \
+ (1ULL << PERF_REG_X86_XMM9) | \
+ (1ULL << PERF_REG_X86_XMM10) | \
+ (1ULL << PERF_REG_X86_XMM11) | \
+ (1ULL << PERF_REG_X86_XMM12) | \
+ (1ULL << PERF_REG_X86_XMM13) | \
+ (1ULL << PERF_REG_X86_XMM14) | \
+ (1ULL << PERF_REG_X86_XMM15))
/*
* Per register state.
@@ -207,10 +230,16 @@ struct cpu_hw_events {
int n_pebs;
int n_large_pebs;
+ /* Current super set of events hardware configuration */
+ u64 pebs_data_cfg;
+ u64 active_pebs_data_cfg;
+ int pebs_record_size;
+
/*
* Intel LBR bits
*/
int lbr_users;
+ int lbr_pebs_users;
struct perf_branch_stack lbr_stack;
struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
struct er_account *lbr_sel;
@@ -243,6 +272,11 @@ struct cpu_hw_events {
int excl_thread_id; /* 0 or 1 */
/*
+ * SKL TSX_FORCE_ABORT shadow
+ */
+ u64 tfa_shadow;
+
+ /*
* AMD specific bits
*/
struct amd_nb *amd_nb;
@@ -252,18 +286,29 @@ struct cpu_hw_events {
void *kfree_on_online[X86_PERF_KFREE_MAX];
};
-#define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\
+#define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) { \
{ .idxmsk64 = (n) }, \
.code = (c), \
+ .size = (e) - (c), \
.cmask = (m), \
.weight = (w), \
.overlap = (o), \
.flags = f, \
}
+#define __EVENT_CONSTRAINT(c, n, m, w, o, f) \
+ __EVENT_CONSTRAINT_RANGE(c, c, n, m, w, o, f)
+
#define EVENT_CONSTRAINT(c, n, m) \
__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
+/*
+ * The constraint_match() function only works for 'simple' event codes
+ * and not for extended (AMD64_EVENTSEL_EVENT) events codes.
+ */
+#define EVENT_CONSTRAINT_RANGE(c, e, n, m) \
+ __EVENT_CONSTRAINT_RANGE(c, e, n, m, HWEIGHT(n), 0, 0)
+
#define INTEL_EXCLEVT_CONSTRAINT(c, n) \
__EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
0, PERF_X86_EVENT_EXCL)
@@ -299,6 +344,12 @@ struct cpu_hw_events {
EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
/*
+ * Constraint on a range of Event codes
+ */
+#define INTEL_EVENT_CONSTRAINT_RANGE(c, e, n) \
+ EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT)
+
+/*
* Constraint on the Event code + UMask + fixed-mask
*
* filter mask to validate fixed counter events.
@@ -345,6 +396,9 @@ struct cpu_hw_events {
#define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
+#define INTEL_FLAGS_EVENT_CONSTRAINT_RANGE(c, e, n) \
+ EVENT_CONSTRAINT_RANGE(c, e, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
+
/* Check only flags, but allow all event/umask */
#define INTEL_ALL_EVENT_CONSTRAINT(code, n) \
EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS)
@@ -361,6 +415,11 @@ struct cpu_hw_events {
ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
+#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(code, end, n) \
+ __EVENT_CONSTRAINT_RANGE(code, end, n, \
+ ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
+ HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
+
#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \
__EVENT_CONSTRAINT(code, n, \
ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
@@ -468,6 +527,7 @@ union perf_capabilities {
* values > 32bit.
*/
u64 full_width_write:1;
+ u64 pebs_baseline:1;
};
u64 capabilities;
};
@@ -608,14 +668,16 @@ struct x86_pmu {
pebs_broken :1,
pebs_prec_dist :1,
pebs_no_tlb :1,
- pebs_no_isolation :1;
+ pebs_no_isolation :1,
+ pebs_no_xmm_regs :1;
int pebs_record_size;
int pebs_buffer_size;
+ int max_pebs_events;
void (*drain_pebs)(struct pt_regs *regs);
struct event_constraint *pebs_constraints;
void (*pebs_aliases)(struct perf_event *event);
- int max_pebs_events;
unsigned long large_pebs_flags;
+ u64 rtm_abort_event;
/*
* Intel LBR
@@ -682,6 +744,7 @@ do { \
#define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */
#define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */
#define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */
+#define PMU_FL_TFA 0x20 /* deal with TSX force abort */
#define EVENT_VAR(_id) event_attr_##_id
#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
@@ -708,6 +771,7 @@ static struct perf_pmu_events_ht_attr event_attr_##v = { \
.event_str_ht = ht, \
}
+struct pmu *x86_get_pmu(void);
extern struct x86_pmu x86_pmu __read_mostly;
static inline bool x86_pmu_has_lbr_callstack(void)
@@ -890,7 +954,8 @@ struct event_constraint *
x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event);
-struct intel_shared_regs *allocate_shared_regs(int cpu);
+extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu);
+extern void intel_cpuc_finish(struct cpu_hw_events *cpuc);
int intel_pmu_init(void);
@@ -934,6 +999,8 @@ extern struct event_constraint intel_bdw_pebs_event_constraints[];
extern struct event_constraint intel_skl_pebs_event_constraints[];
+extern struct event_constraint intel_icl_pebs_event_constraints[];
+
struct event_constraint *intel_pebs_constraints(struct perf_event *event);
void intel_pmu_pebs_add(struct perf_event *event);
@@ -952,6 +1019,8 @@ void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in);
void intel_pmu_auto_reload_read(struct perf_event *event);
+void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr);
+
void intel_ds_init(void);
void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
@@ -1026,9 +1095,13 @@ static inline int intel_pmu_init(void)
return 0;
}
-static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
+static inline int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
+{
+ return 0;
+}
+
+static inline void intel_cpuc_finish(struct cpu_hw_events *cpuc)
{
- return NULL;
}
static inline int is_ht_workaround_enabled(void)
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
index 8eb6fbee8e13..5c056b8aebef 100644
--- a/arch/x86/hyperv/hv_apic.c
+++ b/arch/x86/hyperv/hv_apic.c
@@ -86,6 +86,11 @@ static void hv_apic_write(u32 reg, u32 val)
static void hv_apic_eoi_write(u32 reg, u32 val)
{
+ struct hv_vp_assist_page *hvp = hv_vp_assist_page[smp_processor_id()];
+
+ if (hvp && (xchg(&hvp->apic_assist, 0) & 0x1))
+ return;
+
wrmsr(HV_X64_MSR_EOI, val, 0);
}
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index 7abb09e2eeb8..e4ba467a9fc6 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -96,15 +96,20 @@ void __percpu **hyperv_pcpu_input_arg;
EXPORT_SYMBOL_GPL(hyperv_pcpu_input_arg);
u32 hv_max_vp_index;
+EXPORT_SYMBOL_GPL(hv_max_vp_index);
static int hv_cpu_init(unsigned int cpu)
{
u64 msr_vp_index;
struct hv_vp_assist_page **hvp = &hv_vp_assist_page[smp_processor_id()];
void **input_arg;
+ struct page *pg;
input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
- *input_arg = page_address(alloc_page(GFP_KERNEL));
+ pg = alloc_page(GFP_KERNEL);
+ if (unlikely(!pg))
+ return -ENOMEM;
+ *input_arg = page_address(pg);
hv_get_vp_index(msr_vp_index);
@@ -406,6 +411,13 @@ void hyperv_cleanup(void)
/* Reset our OS id */
wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
+ /*
+ * Reset hypercall page reference before reset the page,
+ * let hypercall operations fail safely rather than
+ * panic the kernel for using invalid hypercall page
+ */
+ hv_hypercall_pg = NULL;
+
/* Reset the hypercall page */
hypercall_msr.as_uint64 = 0;
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
diff --git a/arch/x86/hyperv/hv_spinlock.c b/arch/x86/hyperv/hv_spinlock.c
index a861b0456b1a..07f21a06392f 100644
--- a/arch/x86/hyperv/hv_spinlock.c
+++ b/arch/x86/hyperv/hv_spinlock.c
@@ -56,7 +56,7 @@ static void hv_qlock_wait(u8 *byte, u8 val)
/*
* Hyper-V does not support this so far.
*/
-bool hv_vcpu_is_preempted(int vcpu)
+__visible bool hv_vcpu_is_preempted(int vcpu)
{
return false;
}
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index 321fe5f5d0e9..4d5fcd47ab75 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -61,9 +61,8 @@
} while (0)
#define RELOAD_SEG(seg) { \
- unsigned int pre = GET_SEG(seg); \
+ unsigned int pre = (seg) | 3; \
unsigned int cur = get_user_seg(seg); \
- pre |= 3; \
if (pre != cur) \
set_user_seg(seg, pre); \
}
@@ -72,6 +71,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
struct sigcontext_32 __user *sc)
{
unsigned int tmpflags, err = 0;
+ u16 gs, fs, es, ds;
void __user *buf;
u32 tmp;
@@ -79,16 +79,10 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
current->restart_block.fn = do_no_restart_syscall;
get_user_try {
- /*
- * Reload fs and gs if they have changed in the signal
- * handler. This does not handle long fs/gs base changes in
- * the handler, but does not clobber them at least in the
- * normal case.
- */
- RELOAD_SEG(gs);
- RELOAD_SEG(fs);
- RELOAD_SEG(ds);
- RELOAD_SEG(es);
+ gs = GET_SEG(gs);
+ fs = GET_SEG(fs);
+ ds = GET_SEG(ds);
+ es = GET_SEG(es);
COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
COPY(dx); COPY(cx); COPY(ip); COPY(ax);
@@ -106,6 +100,17 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
buf = compat_ptr(tmp);
} get_user_catch(err);
+ /*
+ * Reload fs and gs if they have changed in the signal
+ * handler. This does not handle long fs/gs base changes in
+ * the handler, but does not clobber them at least in the
+ * normal case.
+ */
+ RELOAD_SEG(gs);
+ RELOAD_SEG(fs);
+ RELOAD_SEG(ds);
+ RELOAD_SEG(es);
+
err |= fpu__restore_sig(buf, 1);
force_iret();
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index a0ab9ab61c75..eebd05942e6c 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -11,3 +11,4 @@ generic-y += early_ioremap.h
generic-y += export.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
index 31b627b43a8e..464034db299f 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -20,6 +20,17 @@
#endif
/*
+ * objtool annotation to ignore the alternatives and only consider the original
+ * instruction(s).
+ */
+.macro ANNOTATE_IGNORE_ALTERNATIVE
+ .Lannotate_\@:
+ .pushsection .discard.ignore_alts
+ .long .Lannotate_\@ - .
+ .popsection
+.endm
+
+/*
* Issue one struct alt_instr descriptor entry (need to put it into
* the section .altinstructions, see below). This entry contains
* enough information for the alternatives patching code to patch an
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 4c74073a19cc..094fbc9c0b1c 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -45,6 +45,16 @@
#define LOCK_PREFIX ""
#endif
+/*
+ * objtool annotation to ignore the alternatives and only consider the original
+ * instruction(s).
+ */
+#define ANNOTATE_IGNORE_ALTERNATIVE \
+ "999:\n\t" \
+ ".pushsection .discard.ignore_alts\n\t" \
+ ".long 999b - .\n\t" \
+ ".popsection\n\t"
+
struct alt_instr {
s32 instr_offset; /* original instruction */
s32 repl_offset; /* offset to replacement instruction */
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 6467757bb39f..3ff577c0b102 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -148,30 +148,6 @@
_ASM_PTR (entry); \
.popsection
-.macro ALIGN_DESTINATION
- /* check for bad alignment of destination */
- movl %edi,%ecx
- andl $7,%ecx
- jz 102f /* already aligned */
- subl $8,%ecx
- negl %ecx
- subl %ecx,%edx
-100: movb (%rsi),%al
-101: movb %al,(%rdi)
- incq %rsi
- incq %rdi
- decl %ecx
- jnz 100b
-102:
- .section .fixup,"ax"
-103: addl %ecx,%edx /* ecx is zerorest also */
- jmp copy_user_handle_tail
- .previous
-
- _ASM_EXTABLE_UA(100b, 103b)
- _ASM_EXTABLE_UA(101b, 103b)
- .endm
-
#else
# define _EXPAND_EXTABLE_HANDLE(x) #x
# define _ASM_EXTABLE_HANDLE(from, to, handler) \
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index ad7b210aa3f6..8e790ec219a5 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -36,22 +36,17 @@
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
*/
-#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
-/* Technically wrong, but this avoids compilation errors on some gcc
- versions. */
-#define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
-#else
-#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
-#endif
+#define RLONG_ADDR(x) "m" (*(volatile long *) (x))
+#define WBYTE_ADDR(x) "+m" (*(volatile char *) (x))
-#define ADDR BITOP_ADDR(addr)
+#define ADDR RLONG_ADDR(addr)
/*
* We do the locked ops that don't return the old value as
* a mask operation on a byte.
*/
#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
+#define CONST_MASK_ADDR(nr, addr) WBYTE_ADDR((void *)(addr) + ((nr)>>3))
#define CONST_MASK(nr) (1 << ((nr) & 7))
/**
@@ -79,7 +74,7 @@ set_bit(long nr, volatile unsigned long *addr)
: "memory");
} else {
asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
- : BITOP_ADDR(addr) : "Ir" (nr) : "memory");
+ : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
}
}
@@ -94,7 +89,7 @@ set_bit(long nr, volatile unsigned long *addr)
*/
static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
{
- asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory");
+ asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
}
/**
@@ -116,8 +111,7 @@ clear_bit(long nr, volatile unsigned long *addr)
: "iq" ((u8)~CONST_MASK(nr)));
} else {
asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
- : BITOP_ADDR(addr)
- : "Ir" (nr));
+ : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
}
}
@@ -137,7 +131,7 @@ static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *ad
static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
{
- asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr));
+ asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
}
static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
@@ -145,7 +139,7 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile
bool negative;
asm volatile(LOCK_PREFIX "andb %2,%1"
CC_SET(s)
- : CC_OUT(s) (negative), ADDR
+ : CC_OUT(s) (negative), WBYTE_ADDR(addr)
: "ir" ((char) ~(1 << nr)) : "memory");
return negative;
}
@@ -161,13 +155,9 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile
* __clear_bit() is non-atomic and implies release semantics before the memory
* operation. It can be used for an unlock if no other CPUs can concurrently
* modify other bits in the word.
- *
- * No memory barrier is required here, because x86 cannot reorder stores past
- * older loads. Same principle as spin_unlock.
*/
static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
{
- barrier();
__clear_bit(nr, addr);
}
@@ -182,7 +172,7 @@ static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *
*/
static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
{
- asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr));
+ asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
}
/**
@@ -202,8 +192,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
: "iq" ((u8)CONST_MASK(nr)));
} else {
asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
- : BITOP_ADDR(addr)
- : "Ir" (nr));
+ : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
}
}
@@ -248,8 +237,8 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *
asm(__ASM_SIZE(bts) " %2,%1"
CC_SET(c)
- : CC_OUT(c) (oldbit), ADDR
- : "Ir" (nr));
+ : CC_OUT(c) (oldbit)
+ : ADDR, "Ir" (nr) : "memory");
return oldbit;
}
@@ -288,8 +277,8 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long
asm volatile(__ASM_SIZE(btr) " %2,%1"
CC_SET(c)
- : CC_OUT(c) (oldbit), ADDR
- : "Ir" (nr));
+ : CC_OUT(c) (oldbit)
+ : ADDR, "Ir" (nr) : "memory");
return oldbit;
}
@@ -300,8 +289,8 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon
asm volatile(__ASM_SIZE(btc) " %2,%1"
CC_SET(c)
- : CC_OUT(c) (oldbit), ADDR
- : "Ir" (nr) : "memory");
+ : CC_OUT(c) (oldbit)
+ : ADDR, "Ir" (nr) : "memory");
return oldbit;
}
@@ -332,7 +321,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l
asm volatile(__ASM_SIZE(bt) " %2,%1"
CC_SET(c)
: CC_OUT(c) (oldbit)
- : "m" (*(unsigned long *)addr), "Ir" (nr));
+ : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory");
return oldbit;
}
diff --git a/arch/x86/include/asm/cpu_device_id.h b/arch/x86/include/asm/cpu_device_id.h
index 3417110574c1..31c379c1da41 100644
--- a/arch/x86/include/asm/cpu_device_id.h
+++ b/arch/x86/include/asm/cpu_device_id.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _CPU_DEVICE_ID
-#define _CPU_DEVICE_ID 1
+#ifndef _ASM_X86_CPU_DEVICE_ID
+#define _ASM_X86_CPU_DEVICE_ID
/*
* Declare drivers belonging to specific x86 CPUs
@@ -9,8 +9,6 @@
#include <linux/mod_devicetable.h>
-extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match);
-
/*
* Match specific microcode revisions.
*
@@ -22,21 +20,22 @@ extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match);
*/
struct x86_cpu_desc {
- __u8 x86_family;
- __u8 x86_vendor;
- __u8 x86_model;
- __u8 x86_stepping;
- __u32 x86_microcode_rev;
+ u8 x86_family;
+ u8 x86_vendor;
+ u8 x86_model;
+ u8 x86_stepping;
+ u32 x86_microcode_rev;
};
-#define INTEL_CPU_DESC(mod, step, rev) { \
- .x86_family = 6, \
- .x86_vendor = X86_VENDOR_INTEL, \
- .x86_model = mod, \
- .x86_stepping = step, \
- .x86_microcode_rev = rev, \
+#define INTEL_CPU_DESC(model, stepping, revision) { \
+ .x86_family = 6, \
+ .x86_vendor = X86_VENDOR_INTEL, \
+ .x86_model = (model), \
+ .x86_stepping = (stepping), \
+ .x86_microcode_rev = (revision), \
}
+extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match);
extern bool x86_cpu_has_min_microcode_rev(const struct x86_cpu_desc *table);
-#endif
+#endif /* _ASM_X86_CPU_DEVICE_ID */
diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h
index 29c706415443..cff3f3f3bfe0 100644
--- a/arch/x86/include/asm/cpu_entry_area.h
+++ b/arch/x86/include/asm/cpu_entry_area.h
@@ -7,6 +7,64 @@
#include <asm/processor.h>
#include <asm/intel_ds.h>
+#ifdef CONFIG_X86_64
+
+/* Macro to enforce the same ordering and stack sizes */
+#define ESTACKS_MEMBERS(guardsize, db2_holesize)\
+ char DF_stack_guard[guardsize]; \
+ char DF_stack[EXCEPTION_STKSZ]; \
+ char NMI_stack_guard[guardsize]; \
+ char NMI_stack[EXCEPTION_STKSZ]; \
+ char DB2_stack_guard[guardsize]; \
+ char DB2_stack[db2_holesize]; \
+ char DB1_stack_guard[guardsize]; \
+ char DB1_stack[EXCEPTION_STKSZ]; \
+ char DB_stack_guard[guardsize]; \
+ char DB_stack[EXCEPTION_STKSZ]; \
+ char MCE_stack_guard[guardsize]; \
+ char MCE_stack[EXCEPTION_STKSZ]; \
+ char IST_top_guard[guardsize]; \
+
+/* The exception stacks' physical storage. No guard pages required */
+struct exception_stacks {
+ ESTACKS_MEMBERS(0, 0)
+};
+
+/* The effective cpu entry area mapping with guard pages. */
+struct cea_exception_stacks {
+ ESTACKS_MEMBERS(PAGE_SIZE, EXCEPTION_STKSZ)
+};
+
+/*
+ * The exception stack ordering in [cea_]exception_stacks
+ */
+enum exception_stack_ordering {
+ ESTACK_DF,
+ ESTACK_NMI,
+ ESTACK_DB2,
+ ESTACK_DB1,
+ ESTACK_DB,
+ ESTACK_MCE,
+ N_EXCEPTION_STACKS
+};
+
+#define CEA_ESTACK_SIZE(st) \
+ sizeof(((struct cea_exception_stacks *)0)->st## _stack)
+
+#define CEA_ESTACK_BOT(ceastp, st) \
+ ((unsigned long)&(ceastp)->st## _stack)
+
+#define CEA_ESTACK_TOP(ceastp, st) \
+ (CEA_ESTACK_BOT(ceastp, st) + CEA_ESTACK_SIZE(st))
+
+#define CEA_ESTACK_OFFS(st) \
+ offsetof(struct cea_exception_stacks, st## _stack)
+
+#define CEA_ESTACK_PAGES \
+ (sizeof(struct cea_exception_stacks) / PAGE_SIZE)
+
+#endif
+
/*
* cpu_entry_area is a percpu region that contains things needed by the CPU
* and early entry/exit code. Real types aren't used for all fields here
@@ -32,12 +90,9 @@ struct cpu_entry_area {
#ifdef CONFIG_X86_64
/*
- * Exception stacks used for IST entries.
- *
- * In the future, this should have a separate slot for each stack
- * with guard pages between them.
+ * Exception stacks used for IST entries with guard pages.
*/
- char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ];
+ struct cea_exception_stacks estacks;
#endif
#ifdef CONFIG_CPU_SUP_INTEL
/*
@@ -57,6 +112,7 @@ struct cpu_entry_area {
#define CPU_ENTRY_AREA_TOT_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS)
DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
+DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);
extern void setup_cpu_entry_areas(void);
extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);
@@ -76,4 +132,7 @@ static inline struct entry_stack *cpu_entry_stack(int cpu)
return &get_cpu_entry_area(cpu)->entry_stack_page.stack;
}
+#define __this_cpu_ist_top_va(name) \
+ CEA_ESTACK_TOP(__this_cpu_read(cea_exception_stacks), name)
+
#endif
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index ce95b8cbd229..1d337c51f7e6 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -112,8 +112,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
test_cpu_cap(c, bit))
#define this_cpu_has(bit) \
- (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
- x86_this_cpu_test_bit(bit, (unsigned long *)&cpu_info.x86_capability))
+ (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
+ x86_this_cpu_test_bit(bit, \
+ (unsigned long __percpu *)&cpu_info.x86_capability))
/*
* This macro is for detection of features which need kernel
@@ -155,11 +156,14 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
#else
/*
- * Static testing of CPU features. Used the same as boot_cpu_has().
- * These will statically patch the target code for additional
- * performance.
+ * Static testing of CPU features. Used the same as boot_cpu_has(). It
+ * statically patches the target code for additional performance. Use
+ * static_cpu_has() only in fast paths, where every cycle counts. Which
+ * means that the boot_cpu_has() variant is already fast enough for the
+ * majority of cases and you should stick to using it as it is generally
+ * only two instructions: a RIP-relative MOV and a TEST.
*/
-static __always_inline __pure bool _static_cpu_has(u16 bit)
+static __always_inline bool _static_cpu_has(u16 bit)
{
asm_volatile_goto("1: jmp 6f\n"
"2:\n"
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 6d6122524711..981ff9479648 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -344,6 +344,7 @@
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
index 9e5ca30738e5..1a8609a15856 100644
--- a/arch/x86/include/asm/debugreg.h
+++ b/arch/x86/include/asm/debugreg.h
@@ -104,11 +104,9 @@ static inline void debug_stack_usage_dec(void)
{
__this_cpu_dec(debug_stack_usage);
}
-int is_debug_stack(unsigned long addr);
void debug_stack_set_zero(void);
void debug_stack_reset(void);
#else /* !X86_64 */
-static inline int is_debug_stack(unsigned long addr) { return 0; }
static inline void debug_stack_set_zero(void) { }
static inline void debug_stack_reset(void) { }
static inline void debug_stack_usage_inc(void) { }
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 50ba74a34a37..9da8cccdf3fb 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -103,8 +103,6 @@ enum fixed_addresses {
#ifdef CONFIG_PARAVIRT
FIX_PARAVIRT_BOOTMAP,
#endif
- FIX_TEXT_POKE1, /* reserve 2 pages for text_poke() */
- FIX_TEXT_POKE0, /* first page is last, because allocation is backward */
#ifdef CONFIG_X86_INTEL_MID
FIX_LNW_VRTC,
#endif
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index fb04a3ded7dd..745a19d34f23 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -253,7 +253,7 @@ static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
WARN_ON(system_state != SYSTEM_BOOTING);
- if (static_cpu_has(X86_FEATURE_XSAVES))
+ if (boot_cpu_has(X86_FEATURE_XSAVES))
XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
else
XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
@@ -275,7 +275,7 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
WARN_ON(system_state != SYSTEM_BOOTING);
- if (static_cpu_has(X86_FEATURE_XSAVES))
+ if (boot_cpu_has(X86_FEATURE_XSAVES))
XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
else
XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
@@ -497,8 +497,7 @@ static inline void fpregs_activate(struct fpu *fpu)
* - switch_fpu_finish() restores the new state as
* necessary.
*/
-static inline void
-switch_fpu_prepare(struct fpu *old_fpu, int cpu)
+static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
{
if (static_cpu_has(X86_FEATURE_FPU) && old_fpu->initialized) {
if (!copy_fpregs_to_fpstate(old_fpu))
diff --git a/arch/x86/include/asm/intel_ds.h b/arch/x86/include/asm/intel_ds.h
index ae26df1c2789..8380c3ddd4b2 100644
--- a/arch/x86/include/asm/intel_ds.h
+++ b/arch/x86/include/asm/intel_ds.h
@@ -8,7 +8,7 @@
/* The maximal number of PEBS events: */
#define MAX_PEBS_EVENTS 8
-#define MAX_FIXED_PEBS_EVENTS 3
+#define MAX_FIXED_PEBS_EVENTS 4
/*
* A debug store configuration.
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 686247db3106..a06a9f8294ea 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -90,8 +90,6 @@ build_mmio_write(__writel, "l", unsigned int, "r", )
#define __raw_writew __writew
#define __raw_writel __writel
-#define mmiowb() barrier()
-
#ifdef CONFIG_X86_64
build_mmio_read(readq, "q", u64, "=r", :"memory")
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index fbb16e6b6c18..8f95686ec27e 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -16,11 +16,7 @@ static inline int irq_canonicalize(int irq)
return ((irq == 2) ? 9 : irq);
}
-#ifdef CONFIG_X86_32
-extern void irq_ctx_init(int cpu);
-#else
-# define irq_ctx_init(cpu) do { } while (0)
-#endif
+extern int irq_init_percpu_irqstack(unsigned int cpu);
#define __ARCH_HAS_DO_SOFTIRQ
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 548d90bbf919..889f8b1b5b7f 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -18,8 +18,8 @@
* Vectors 0 ... 31 : system traps and exceptions - hardcoded events
* Vectors 32 ... 127 : device interrupts
* Vector 128 : legacy int80 syscall interface
- * Vectors 129 ... INVALIDATE_TLB_VECTOR_START-1 except 204 : device interrupts
- * Vectors INVALIDATE_TLB_VECTOR_START ... 255 : special interrupts
+ * Vectors 129 ... LOCAL_TIMER_VECTOR-1
+ * Vectors LOCAL_TIMER_VECTOR ... 255 : special interrupts
*
* 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table.
*
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 93c4bf598fb0..feab24cac610 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -226,7 +226,9 @@ struct x86_emulate_ops {
unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags);
- int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt, u64 smbase);
+ int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt,
+ const char *smstate);
+ void (*post_leave_smm)(struct x86_emulate_ctxt *ctxt);
};
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 180373360e34..c79abe7ca093 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -35,6 +35,7 @@
#include <asm/msr-index.h>
#include <asm/asm.h>
#include <asm/kvm_page_track.h>
+#include <asm/kvm_vcpu_regs.h>
#include <asm/hyperv-tlfs.h>
#define KVM_MAX_VCPUS 288
@@ -125,7 +126,7 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
}
#define KVM_PERMILLE_MMU_PAGES 20
-#define KVM_MIN_ALLOC_MMU_PAGES 64
+#define KVM_MIN_ALLOC_MMU_PAGES 64UL
#define KVM_MMU_HASH_SHIFT 12
#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
#define KVM_MIN_FREE_MMU_PAGES 5
@@ -137,23 +138,23 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
#define ASYNC_PF_PER_VCPU 64
enum kvm_reg {
- VCPU_REGS_RAX = 0,
- VCPU_REGS_RCX = 1,
- VCPU_REGS_RDX = 2,
- VCPU_REGS_RBX = 3,
- VCPU_REGS_RSP = 4,
- VCPU_REGS_RBP = 5,
- VCPU_REGS_RSI = 6,
- VCPU_REGS_RDI = 7,
+ VCPU_REGS_RAX = __VCPU_REGS_RAX,
+ VCPU_REGS_RCX = __VCPU_REGS_RCX,
+ VCPU_REGS_RDX = __VCPU_REGS_RDX,
+ VCPU_REGS_RBX = __VCPU_REGS_RBX,
+ VCPU_REGS_RSP = __VCPU_REGS_RSP,
+ VCPU_REGS_RBP = __VCPU_REGS_RBP,
+ VCPU_REGS_RSI = __VCPU_REGS_RSI,
+ VCPU_REGS_RDI = __VCPU_REGS_RDI,
#ifdef CONFIG_X86_64
- VCPU_REGS_R8 = 8,
- VCPU_REGS_R9 = 9,
- VCPU_REGS_R10 = 10,
- VCPU_REGS_R11 = 11,
- VCPU_REGS_R12 = 12,
- VCPU_REGS_R13 = 13,
- VCPU_REGS_R14 = 14,
- VCPU_REGS_R15 = 15,
+ VCPU_REGS_R8 = __VCPU_REGS_R8,
+ VCPU_REGS_R9 = __VCPU_REGS_R9,
+ VCPU_REGS_R10 = __VCPU_REGS_R10,
+ VCPU_REGS_R11 = __VCPU_REGS_R11,
+ VCPU_REGS_R12 = __VCPU_REGS_R12,
+ VCPU_REGS_R13 = __VCPU_REGS_R13,
+ VCPU_REGS_R14 = __VCPU_REGS_R14,
+ VCPU_REGS_R15 = __VCPU_REGS_R15,
#endif
VCPU_REGS_RIP,
NR_VCPU_REGS
@@ -252,14 +253,14 @@ struct kvm_mmu_memory_cache {
* kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used
* by indirect shadow page can not be more than 15 bits.
*
- * Currently, we used 14 bits that are @level, @cr4_pae, @quadrant, @access,
+ * Currently, we used 14 bits that are @level, @gpte_is_8_bytes, @quadrant, @access,
* @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp.
*/
union kvm_mmu_page_role {
u32 word;
struct {
unsigned level:4;
- unsigned cr4_pae:1;
+ unsigned gpte_is_8_bytes:1;
unsigned quadrant:2;
unsigned direct:1;
unsigned access:3;
@@ -294,6 +295,7 @@ union kvm_mmu_extended_role {
unsigned int valid:1;
unsigned int execonly:1;
unsigned int cr0_pg:1;
+ unsigned int cr4_pae:1;
unsigned int cr4_pse:1;
unsigned int cr4_pke:1;
unsigned int cr4_smap:1;
@@ -319,6 +321,7 @@ struct kvm_mmu_page {
struct list_head link;
struct hlist_node hash_link;
bool unsync;
+ bool mmio_cached;
/*
* The following two entries are used to key the shadow page in the
@@ -333,10 +336,6 @@ struct kvm_mmu_page {
int root_count; /* Currently serving as active root */
unsigned int unsync_children;
struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
-
- /* The page is obsolete if mmu_valid_gen != kvm->arch.mmu_valid_gen. */
- unsigned long mmu_valid_gen;
-
DECLARE_BITMAP(unsync_child_bitmap, 512);
#ifdef CONFIG_X86_32
@@ -352,6 +351,7 @@ struct kvm_mmu_page {
};
struct kvm_pio_request {
+ unsigned long linear_rip;
unsigned long count;
int in;
int port;
@@ -570,6 +570,7 @@ struct kvm_vcpu_arch {
bool tpr_access_reporting;
u64 ia32_xss;
u64 microcode_version;
+ u64 arch_capabilities;
/*
* Paging state of the vcpu
@@ -844,17 +845,15 @@ enum kvm_irqchip_mode {
};
struct kvm_arch {
- unsigned int n_used_mmu_pages;
- unsigned int n_requested_mmu_pages;
- unsigned int n_max_mmu_pages;
+ unsigned long n_used_mmu_pages;
+ unsigned long n_requested_mmu_pages;
+ unsigned long n_max_mmu_pages;
unsigned int indirect_shadow_pages;
- unsigned long mmu_valid_gen;
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
/*
* Hash table of struct kvm_mmu_page.
*/
struct list_head active_mmu_pages;
- struct list_head zapped_obsolete_pages;
struct kvm_page_track_notifier_node mmu_sp_tracker;
struct kvm_page_track_notifier_head track_notifier_head;
@@ -1184,7 +1183,7 @@ struct kvm_x86_ops {
int (*smi_allowed)(struct kvm_vcpu *vcpu);
int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
- int (*pre_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase);
+ int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
int (*enable_smi_window)(struct kvm_vcpu *vcpu);
int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
@@ -1196,6 +1195,8 @@ struct kvm_x86_ops {
int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu,
uint16_t *vmcs_version);
uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu);
+
+ bool (*need_emulation_on_page_fault)(struct kvm_vcpu *vcpu);
};
struct kvm_arch_async_pf {
@@ -1255,9 +1256,9 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask);
void kvm_mmu_zap_all(struct kvm *kvm);
-void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots);
-unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
-void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
+void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
+unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
+void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
bool pdptrs_changed(struct kvm_vcpu *vcpu);
@@ -1592,4 +1593,7 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
#define put_smstate(type, buf, offset, val) \
*(type *)((buf) + (offset) - 0x7e00) = val
+#define GET_SMSTATE(type, buf, offset) \
+ (*(type *)((buf) + (offset) - 0x7e00))
+
#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/include/asm/kvm_vcpu_regs.h b/arch/x86/include/asm/kvm_vcpu_regs.h
new file mode 100644
index 000000000000..1af2cb59233b
--- /dev/null
+++ b/arch/x86/include/asm/kvm_vcpu_regs.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_KVM_VCPU_REGS_H
+#define _ASM_X86_KVM_VCPU_REGS_H
+
+#define __VCPU_REGS_RAX 0
+#define __VCPU_REGS_RCX 1
+#define __VCPU_REGS_RDX 2
+#define __VCPU_REGS_RBX 3
+#define __VCPU_REGS_RSP 4
+#define __VCPU_REGS_RBP 5
+#define __VCPU_REGS_RSI 6
+#define __VCPU_REGS_RDI 7
+
+#ifdef CONFIG_X86_64
+#define __VCPU_REGS_R8 8
+#define __VCPU_REGS_R9 9
+#define __VCPU_REGS_R10 10
+#define __VCPU_REGS_R11 11
+#define __VCPU_REGS_R12 12
+#define __VCPU_REGS_R13 13
+#define __VCPU_REGS_R14 14
+#define __VCPU_REGS_R15 15
+#endif
+
+#endif /* _ASM_X86_KVM_VCPU_REGS_H */
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 22d05e3835f0..dc2d4b206ab7 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -210,16 +210,6 @@ static inline void cmci_rediscover(void) {}
static inline void cmci_recheck(void) {}
#endif
-#ifdef CONFIG_X86_MCE_AMD
-void mce_amd_feature_init(struct cpuinfo_x86 *c);
-int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr);
-#else
-static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { }
-static inline int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) { return -EINVAL; };
-#endif
-
-static inline void mce_hygon_feature_init(struct cpuinfo_x86 *c) { return mce_amd_feature_init(c); }
-
int mce_available(struct cpuinfo_x86 *c);
bool mce_is_memory_error(struct mce *m);
bool mce_is_correctable(struct mce *m);
@@ -345,12 +335,19 @@ extern bool amd_mce_is_memory_error(struct mce *m);
extern int mce_threshold_create_device(unsigned int cpu);
extern int mce_threshold_remove_device(unsigned int cpu);
-#else
+void mce_amd_feature_init(struct cpuinfo_x86 *c);
+int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr);
-static inline int mce_threshold_create_device(unsigned int cpu) { return 0; };
-static inline int mce_threshold_remove_device(unsigned int cpu) { return 0; };
-static inline bool amd_mce_is_memory_error(struct mce *m) { return false; };
+#else
+static inline int mce_threshold_create_device(unsigned int cpu) { return 0; };
+static inline int mce_threshold_remove_device(unsigned int cpu) { return 0; };
+static inline bool amd_mce_is_memory_error(struct mce *m) { return false; };
+static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { }
+static inline int
+umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) { return -EINVAL; };
#endif
+static inline void mce_hygon_feature_init(struct cpuinfo_x86 *c) { return mce_amd_feature_init(c); }
+
#endif /* _ASM_X86_MCE_H */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 19d18fae6ec6..93dff1963337 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -13,6 +13,7 @@
#include <asm/tlbflush.h>
#include <asm/paravirt.h>
#include <asm/mpx.h>
+#include <asm/debugreg.h>
extern atomic64_t last_mm_ctx_id;
@@ -356,4 +357,59 @@ static inline unsigned long __get_current_cr3_fast(void)
return cr3;
}
+typedef struct {
+ struct mm_struct *mm;
+} temp_mm_state_t;
+
+/*
+ * Using a temporary mm allows to set temporary mappings that are not accessible
+ * by other CPUs. Such mappings are needed to perform sensitive memory writes
+ * that override the kernel memory protections (e.g., W^X), without exposing the
+ * temporary page-table mappings that are required for these write operations to
+ * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the
+ * mapping is torn down.
+ *
+ * Context: The temporary mm needs to be used exclusively by a single core. To
+ * harden security IRQs must be disabled while the temporary mm is
+ * loaded, thereby preventing interrupt handler bugs from overriding
+ * the kernel memory protection.
+ */
+static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
+{
+ temp_mm_state_t temp_state;
+
+ lockdep_assert_irqs_disabled();
+ temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
+ switch_mm_irqs_off(NULL, mm, current);
+
+ /*
+ * If breakpoints are enabled, disable them while the temporary mm is
+ * used. Userspace might set up watchpoints on addresses that are used
+ * in the temporary mm, which would lead to wrong signals being sent or
+ * crashes.
+ *
+ * Note that breakpoints are not disabled selectively, which also causes
+ * kernel breakpoints (e.g., perf's) to be disabled. This might be
+ * undesirable, but still seems reasonable as the code that runs in the
+ * temporary mm should be short.
+ */
+ if (hw_breakpoint_active())
+ hw_breakpoint_disable();
+
+ return temp_state;
+}
+
+static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
+{
+ lockdep_assert_irqs_disabled();
+ switch_mm_irqs_off(NULL, prev_state.mm, current);
+
+ /*
+ * Restore the breakpoints if they were disabled before the temporary mm
+ * was loaded.
+ */
+ if (hw_breakpoint_active())
+ hw_breakpoint_restore();
+}
+
#endif /* _ASM_X86_MMU_CONTEXT_H */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 8e40c2446fd1..1378518cf63f 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -116,6 +116,7 @@
#define LBR_INFO_CYCLES 0xffff
#define MSR_IA32_PEBS_ENABLE 0x000003f1
+#define MSR_PEBS_DATA_CFG 0x000003f2
#define MSR_IA32_DS_AREA 0x00000600
#define MSR_IA32_PERF_CAPABILITIES 0x00000345
#define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6
@@ -666,6 +667,12 @@
#define MSR_IA32_TSC_DEADLINE 0x000006E0
+
+#define MSR_TSX_FORCE_ABORT 0x0000010F
+
+#define MSR_TFA_RTM_FORCE_ABORT_BIT 0
+#define MSR_TFA_RTM_FORCE_ABORT BIT_ULL(MSR_TFA_RTM_FORCE_ABORT_BIT)
+
/* P4/Xeon+ specific */
#define MSR_IA32_MCG_EAX 0x00000180
#define MSR_IA32_MCG_EBX 0x00000181
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index dad12b767ba0..daf25b60c9e3 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -11,6 +11,15 @@
#include <asm/msr-index.h>
/*
+ * This should be used immediately before a retpoline alternative. It tells
+ * objtool where the retpolines are so that it can make sense of the control
+ * flow by just reading the original instruction(s) and ignoring the
+ * alternatives.
+ */
+#define ANNOTATE_NOSPEC_ALTERNATIVE \
+ ANNOTATE_IGNORE_ALTERNATIVE
+
+/*
* Fill the CPU return stack buffer.
*
* Each entry in the RSB, if used for a speculative 'ret', contains an
@@ -57,19 +66,6 @@
#ifdef __ASSEMBLY__
/*
- * This should be used immediately before a retpoline alternative. It tells
- * objtool where the retpolines are so that it can make sense of the control
- * flow by just reading the original instruction(s) and ignoring the
- * alternatives.
- */
-.macro ANNOTATE_NOSPEC_ALTERNATIVE
- .Lannotate_\@:
- .pushsection .discard.nospec
- .long .Lannotate_\@ - .
- .popsection
-.endm
-
-/*
* This should be used immediately before an indirect jump/call. It tells
* objtool the subsequent indirect jump/call is vouched safe for retpoline
* builds.
@@ -152,12 +148,6 @@
#else /* __ASSEMBLY__ */
-#define ANNOTATE_NOSPEC_ALTERNATIVE \
- "999:\n\t" \
- ".pushsection .discard.nospec\n\t" \
- ".long 999b - .\n\t" \
- ".popsection\n\t"
-
#define ANNOTATE_RETPOLINE_SAFE \
"999:\n\t" \
".pushsection .discard.retpoline_safe\n\t" \
diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
index 0d5c739eebd7..565ad755c785 100644
--- a/arch/x86/include/asm/page_32_types.h
+++ b/arch/x86/include/asm/page_32_types.h
@@ -22,11 +22,9 @@
#define THREAD_SIZE_ORDER 1
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
-#define DOUBLEFAULT_STACK 1
-#define NMI_STACK 0
-#define DEBUG_STACK 0
-#define MCE_STACK 0
-#define N_EXCEPTION_STACKS 1
+#define IRQ_STACK_SIZE THREAD_SIZE
+
+#define N_EXCEPTION_STACKS 1
#ifdef CONFIG_X86_PAE
/*
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 0ce558a8150d..793c14c372cb 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -7,33 +7,27 @@
#endif
#ifdef CONFIG_KASAN
-#ifdef CONFIG_KASAN_EXTRA
-#define KASAN_STACK_ORDER 2
-#else
#define KASAN_STACK_ORDER 1
-#endif
#else
#define KASAN_STACK_ORDER 0
#endif
#define THREAD_SIZE_ORDER (2 + KASAN_STACK_ORDER)
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
-#define CURRENT_MASK (~(THREAD_SIZE - 1))
#define EXCEPTION_STACK_ORDER (0 + KASAN_STACK_ORDER)
#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
-#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
-#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
-
#define IRQ_STACK_ORDER (2 + KASAN_STACK_ORDER)
#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
-#define DOUBLEFAULT_STACK 1
-#define NMI_STACK 2
-#define DEBUG_STACK 3
-#define MCE_STACK 4
-#define N_EXCEPTION_STACKS 4 /* hw limit: 7 */
+/*
+ * The index for the tss.ist[] array. The hardware limit is 7 entries.
+ */
+#define IST_INDEX_DF 0
+#define IST_INDEX_NMI 1
+#define IST_INDEX_DB 2
+#define IST_INDEX_MCE 3
/*
* Set __PAGE_OFFSET to the most negative possible address +
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 8bdf74902293..1392d5e6e8d6 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -7,7 +7,7 @@
*/
#define INTEL_PMC_MAX_GENERIC 32
-#define INTEL_PMC_MAX_FIXED 3
+#define INTEL_PMC_MAX_FIXED 4
#define INTEL_PMC_IDX_FIXED 32
#define X86_PMC_IDX_MAX 64
@@ -32,6 +32,8 @@
#define HSW_IN_TX (1ULL << 32)
#define HSW_IN_TX_CHECKPOINTED (1ULL << 33)
+#define ICL_EVENTSEL_ADAPTIVE (1ULL << 34)
+#define ICL_FIXED_0_ADAPTIVE (1ULL << 32)
#define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36)
#define AMD64_EVENTSEL_GUESTONLY (1ULL << 40)
@@ -87,6 +89,12 @@
#define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
#define ARCH_PERFMON_EVENTS_COUNT 7
+#define PEBS_DATACFG_MEMINFO BIT_ULL(0)
+#define PEBS_DATACFG_GP BIT_ULL(1)
+#define PEBS_DATACFG_XMMS BIT_ULL(2)
+#define PEBS_DATACFG_LBRS BIT_ULL(3)
+#define PEBS_DATACFG_LBR_SHIFT 24
+
/*
* Intel "Architectural Performance Monitoring" CPUID
* detection/enumeration details:
@@ -177,6 +185,41 @@ struct x86_pmu_capability {
#define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(55)
/*
+ * Adaptive PEBS v4
+ */
+
+struct pebs_basic {
+ u64 format_size;
+ u64 ip;
+ u64 applicable_counters;
+ u64 tsc;
+};
+
+struct pebs_meminfo {
+ u64 address;
+ u64 aux;
+ u64 latency;
+ u64 tsx_tuning;
+};
+
+struct pebs_gprs {
+ u64 flags, ip, ax, cx, dx, bx, sp, bp, si, di;
+ u64 r8, r9, r10, r11, r12, r13, r14, r15;
+};
+
+struct pebs_xmm {
+ u64 xmm[16*2]; /* two entries for each register */
+};
+
+struct pebs_lbr_entry {
+ u64 from, to, info;
+};
+
+struct pebs_lbr {
+ struct pebs_lbr_entry lbr[0]; /* Variable length */
+};
+
+/*
* IBS cpuid feature detection
*/
@@ -248,6 +291,11 @@ extern void perf_events_lapic_init(void);
#define PERF_EFLAGS_VM (1UL << 5)
struct pt_regs;
+struct x86_perf_regs {
+ struct pt_regs regs;
+ u64 *xmm_regs;
+};
+
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
extern unsigned long perf_misc_flags(struct pt_regs *regs);
#define perf_misc_flags(regs) perf_misc_flags(regs)
@@ -260,14 +308,9 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
*/
#define perf_arch_fetch_caller_regs(regs, __ip) { \
(regs)->ip = (__ip); \
- (regs)->bp = caller_frame_pointer(); \
+ (regs)->sp = (unsigned long)__builtin_frame_address(0); \
(regs)->cs = __KERNEL_CS; \
regs->flags = 0; \
- asm volatile( \
- _ASM_MOV "%%"_ASM_SP ", %0\n" \
- : "=m" ((regs)->sp) \
- :: "memory" \
- ); \
}
struct perf_guest_switch_msr {
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 2779ace16d23..3a221942f805 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -46,7 +46,7 @@ void ptdump_walk_user_pgd_level_checkwx(void);
*/
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
__visible;
-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+#define ZERO_PAGE(vaddr) ((void)(vaddr),virt_to_page(empty_zero_page))
extern spinlock_t pgd_lock;
extern struct list_head pgd_list;
@@ -1021,6 +1021,9 @@ static inline void __meminit init_trampoline_default(void)
/* Default trampoline pgd value */
trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)];
}
+
+void __init poking_init(void);
+
# ifdef CONFIG_RANDOMIZE_MEMORY
void __meminit init_trampoline(void);
# else
diff --git a/arch/x86/include/asm/processor-cyrix.h b/arch/x86/include/asm/processor-cyrix.h
index aaedd73ea2c6..df700a6cc869 100644
--- a/arch/x86/include/asm/processor-cyrix.h
+++ b/arch/x86/include/asm/processor-cyrix.h
@@ -3,19 +3,6 @@
* NSC/Cyrix CPU indexed register access. Must be inlined instead of
* macros to ensure correct access ordering
* Access order is always 0x22 (=offset), 0x23 (=value)
- *
- * When using the old macros a line like
- * setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
- * gets expanded to:
- * do {
- * outb((CX86_CCR2), 0x22);
- * outb((({
- * outb((CX86_CCR2), 0x22);
- * inb(0x23);
- * }) | 0x88), 0x23);
- * } while (0);
- *
- * which in fact violates the access order (= 0x22, 0x22, 0x23, 0x23).
*/
static inline u8 getCx86(u8 reg)
@@ -29,11 +16,3 @@ static inline void setCx86(u8 reg, u8 data)
outb(reg, 0x22);
outb(data, 0x23);
}
-
-#define getCx86_old(reg) ({ outb((reg), 0x22); inb(0x23); })
-
-#define setCx86_old(reg, data) do { \
- outb((reg), 0x22); \
- outb((data), 0x23); \
-} while (0)
-
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 2bb3a648fc12..7e99ef67bff0 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -367,6 +367,13 @@ DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw);
#define __KERNEL_TSS_LIMIT \
(IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1)
+/* Per CPU interrupt stacks */
+struct irq_stack {
+ char stack[IRQ_STACK_SIZE];
+} __aligned(IRQ_STACK_SIZE);
+
+DECLARE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
+
#ifdef CONFIG_X86_32
DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
#else
@@ -374,38 +381,25 @@ DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
#define cpu_current_top_of_stack cpu_tss_rw.x86_tss.sp1
#endif
-/*
- * Save the original ist values for checking stack pointers during debugging
- */
-struct orig_ist {
- unsigned long ist[7];
-};
-
#ifdef CONFIG_X86_64
-DECLARE_PER_CPU(struct orig_ist, orig_ist);
-
-union irq_stack_union {
- char irq_stack[IRQ_STACK_SIZE];
+struct fixed_percpu_data {
/*
* GCC hardcodes the stack canary as %gs:40. Since the
* irq_stack is the object at %gs:0, we reserve the bottom
* 48 bytes of the irq stack for the canary.
*/
- struct {
- char gs_base[40];
- unsigned long stack_canary;
- };
+ char gs_base[40];
+ unsigned long stack_canary;
};
-DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible;
-DECLARE_INIT_PER_CPU(irq_stack_union);
+DECLARE_PER_CPU_FIRST(struct fixed_percpu_data, fixed_percpu_data) __visible;
+DECLARE_INIT_PER_CPU(fixed_percpu_data);
static inline unsigned long cpu_kernelmode_gs_base(int cpu)
{
- return (unsigned long)per_cpu(irq_stack_union.gs_base, cpu);
+ return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu);
}
-DECLARE_PER_CPU(char *, irq_stack_ptr);
DECLARE_PER_CPU(unsigned int, irq_count);
extern asmlinkage void ignore_sysret(void);
@@ -427,15 +421,8 @@ struct stack_canary {
};
DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
#endif
-/*
- * per-CPU IRQ handling stacks
- */
-struct irq_stack {
- u32 stack[THREAD_SIZE/sizeof(u32)];
-} __aligned(THREAD_SIZE);
-
-DECLARE_PER_CPU(struct irq_stack *, hardirq_stack);
-DECLARE_PER_CPU(struct irq_stack *, softirq_stack);
+/* Per CPU softirq stack pointer */
+DECLARE_PER_CPU(struct irq_stack *, softirq_stack_ptr);
#endif /* X86_64 */
extern unsigned int fpu_kernel_xstate_size;
diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
index 63b3393bd98e..c53682303c9c 100644
--- a/arch/x86/include/asm/realmode.h
+++ b/arch/x86/include/asm/realmode.h
@@ -77,7 +77,11 @@ static inline size_t real_mode_size_needed(void)
return ALIGN(real_mode_blob_end - real_mode_blob, PAGE_SIZE);
}
-void set_real_mode_mem(phys_addr_t mem, size_t size);
+static inline void set_real_mode_mem(phys_addr_t mem)
+{
+ real_mode_header = (struct real_mode_header *) __va(mem);
+}
+
void reserve_real_mode(void);
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
deleted file mode 100644
index 4c25cf6caefa..000000000000
--- a/arch/x86/include/asm/rwsem.h
+++ /dev/null
@@ -1,237 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for i486+
- *
- * Written by David Howells (dhowells@redhat.com).
- *
- * Derived from asm-x86/semaphore.h
- *
- *
- * The MSW of the count is the negated number of active writers and waiting
- * lockers, and the LSW is the total number of active locks
- *
- * The lock count is initialized to 0 (no active and no waiting lockers).
- *
- * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
- * uncontended lock. This can be determined because XADD returns the old value.
- * Readers increment by 1 and see a positive value when uncontended, negative
- * if there are writers (and maybe) readers waiting (in which case it goes to
- * sleep).
- *
- * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
- * be extended to 65534 by manually checking the whole MSW rather than relying
- * on the S flag.
- *
- * The value of ACTIVE_BIAS supports up to 65535 active processes.
- *
- * This should be totally fair - if anything is waiting, a process that wants a
- * lock will go to the back of the queue. When the currently active lock is
- * released, if there's a writer at the front of the queue, then that and only
- * that will be woken up; if there's a bunch of consecutive readers at the
- * front, then they'll all be woken up, but no other readers will be.
- */
-
-#ifndef _ASM_X86_RWSEM_H
-#define _ASM_X86_RWSEM_H
-
-#ifndef _LINUX_RWSEM_H
-#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
-#endif
-
-#ifdef __KERNEL__
-#include <asm/asm.h>
-
-/*
- * The bias values and the counter type limits the number of
- * potential readers/writers to 32767 for 32 bits and 2147483647
- * for 64 bits.
- */
-
-#ifdef CONFIG_X86_64
-# define RWSEM_ACTIVE_MASK 0xffffffffL
-#else
-# define RWSEM_ACTIVE_MASK 0x0000ffffL
-#endif
-
-#define RWSEM_UNLOCKED_VALUE 0x00000000L
-#define RWSEM_ACTIVE_BIAS 0x00000001L
-#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
-#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-
-/*
- * lock for reading
- */
-#define ____down_read(sem, slow_path) \
-({ \
- struct rw_semaphore* ret; \
- asm volatile("# beginning down_read\n\t" \
- LOCK_PREFIX _ASM_INC "(%[sem])\n\t" \
- /* adds 0x00000001 */ \
- " jns 1f\n" \
- " call " slow_path "\n" \
- "1:\n\t" \
- "# ending down_read\n\t" \
- : "+m" (sem->count), "=a" (ret), \
- ASM_CALL_CONSTRAINT \
- : [sem] "a" (sem) \
- : "memory", "cc"); \
- ret; \
-})
-
-static inline void __down_read(struct rw_semaphore *sem)
-{
- ____down_read(sem, "call_rwsem_down_read_failed");
-}
-
-static inline int __down_read_killable(struct rw_semaphore *sem)
-{
- if (IS_ERR(____down_read(sem, "call_rwsem_down_read_failed_killable")))
- return -EINTR;
- return 0;
-}
-
-/*
- * trylock for reading -- returns 1 if successful, 0 if contention
- */
-static inline bool __down_read_trylock(struct rw_semaphore *sem)
-{
- long result, tmp;
- asm volatile("# beginning __down_read_trylock\n\t"
- " mov %[count],%[result]\n\t"
- "1:\n\t"
- " mov %[result],%[tmp]\n\t"
- " add %[inc],%[tmp]\n\t"
- " jle 2f\n\t"
- LOCK_PREFIX " cmpxchg %[tmp],%[count]\n\t"
- " jnz 1b\n\t"
- "2:\n\t"
- "# ending __down_read_trylock\n\t"
- : [count] "+m" (sem->count), [result] "=&a" (result),
- [tmp] "=&r" (tmp)
- : [inc] "i" (RWSEM_ACTIVE_READ_BIAS)
- : "memory", "cc");
- return result >= 0;
-}
-
-/*
- * lock for writing
- */
-#define ____down_write(sem, slow_path) \
-({ \
- long tmp; \
- struct rw_semaphore* ret; \
- \
- asm volatile("# beginning down_write\n\t" \
- LOCK_PREFIX " xadd %[tmp],(%[sem])\n\t" \
- /* adds 0xffff0001, returns the old value */ \
- " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \
- /* was the active mask 0 before? */\
- " jz 1f\n" \
- " call " slow_path "\n" \
- "1:\n" \
- "# ending down_write" \
- : "+m" (sem->count), [tmp] "=d" (tmp), \
- "=a" (ret), ASM_CALL_CONSTRAINT \
- : [sem] "a" (sem), "[tmp]" (RWSEM_ACTIVE_WRITE_BIAS) \
- : "memory", "cc"); \
- ret; \
-})
-
-static inline void __down_write(struct rw_semaphore *sem)
-{
- ____down_write(sem, "call_rwsem_down_write_failed");
-}
-
-static inline int __down_write_killable(struct rw_semaphore *sem)
-{
- if (IS_ERR(____down_write(sem, "call_rwsem_down_write_failed_killable")))
- return -EINTR;
-
- return 0;
-}
-
-/*
- * trylock for writing -- returns 1 if successful, 0 if contention
- */
-static inline bool __down_write_trylock(struct rw_semaphore *sem)
-{
- bool result;
- long tmp0, tmp1;
- asm volatile("# beginning __down_write_trylock\n\t"
- " mov %[count],%[tmp0]\n\t"
- "1:\n\t"
- " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
- /* was the active mask 0 before? */
- " jnz 2f\n\t"
- " mov %[tmp0],%[tmp1]\n\t"
- " add %[inc],%[tmp1]\n\t"
- LOCK_PREFIX " cmpxchg %[tmp1],%[count]\n\t"
- " jnz 1b\n\t"
- "2:\n\t"
- CC_SET(e)
- "# ending __down_write_trylock\n\t"
- : [count] "+m" (sem->count), [tmp0] "=&a" (tmp0),
- [tmp1] "=&r" (tmp1), CC_OUT(e) (result)
- : [inc] "er" (RWSEM_ACTIVE_WRITE_BIAS)
- : "memory");
- return result;
-}
-
-/*
- * unlock after reading
- */
-static inline void __up_read(struct rw_semaphore *sem)
-{
- long tmp;
- asm volatile("# beginning __up_read\n\t"
- LOCK_PREFIX " xadd %[tmp],(%[sem])\n\t"
- /* subtracts 1, returns the old value */
- " jns 1f\n\t"
- " call call_rwsem_wake\n" /* expects old value in %edx */
- "1:\n"
- "# ending __up_read\n"
- : "+m" (sem->count), [tmp] "=d" (tmp)
- : [sem] "a" (sem), "[tmp]" (-RWSEM_ACTIVE_READ_BIAS)
- : "memory", "cc");
-}
-
-/*
- * unlock after writing
- */
-static inline void __up_write(struct rw_semaphore *sem)
-{
- long tmp;
- asm volatile("# beginning __up_write\n\t"
- LOCK_PREFIX " xadd %[tmp],(%[sem])\n\t"
- /* subtracts 0xffff0001, returns the old value */
- " jns 1f\n\t"
- " call call_rwsem_wake\n" /* expects old value in %edx */
- "1:\n\t"
- "# ending __up_write\n"
- : "+m" (sem->count), [tmp] "=d" (tmp)
- : [sem] "a" (sem), "[tmp]" (-RWSEM_ACTIVE_WRITE_BIAS)
- : "memory", "cc");
-}
-
-/*
- * downgrade write lock to read lock
- */
-static inline void __downgrade_write(struct rw_semaphore *sem)
-{
- asm volatile("# beginning __downgrade_write\n\t"
- LOCK_PREFIX _ASM_ADD "%[inc],(%[sem])\n\t"
- /*
- * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
- * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
- */
- " jns 1f\n\t"
- " call call_rwsem_downgrade_wake\n"
- "1:\n\t"
- "# ending __downgrade_write\n"
- : "+m" (sem->count)
- : [sem] "a" (sem), [inc] "er" (-RWSEM_WAITING_BIAS)
- : "memory", "cc");
-}
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_X86_RWSEM_H */
diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h
index 07a25753e85c..ae7b909dc242 100644
--- a/arch/x86/include/asm/set_memory.h
+++ b/arch/x86/include/asm/set_memory.h
@@ -85,6 +85,9 @@ int set_pages_nx(struct page *page, int numpages);
int set_pages_ro(struct page *page, int numpages);
int set_pages_rw(struct page *page, int numpages);
+int set_direct_map_invalid_noflush(struct page *page);
+int set_direct_map_default_noflush(struct page *page);
+
extern int kernel_set_to_readonly;
void set_kernel_text_rw(void);
void set_kernel_text_ro(void);
diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
index db333300bd4b..f94a7d0ddd49 100644
--- a/arch/x86/include/asm/smap.h
+++ b/arch/x86/include/asm/smap.h
@@ -13,13 +13,12 @@
#ifndef _ASM_X86_SMAP_H
#define _ASM_X86_SMAP_H
-#include <linux/stringify.h>
#include <asm/nops.h>
#include <asm/cpufeatures.h>
/* "Raw" instruction opcodes */
-#define __ASM_CLAC .byte 0x0f,0x01,0xca
-#define __ASM_STAC .byte 0x0f,0x01,0xcb
+#define __ASM_CLAC ".byte 0x0f,0x01,0xca"
+#define __ASM_STAC ".byte 0x0f,0x01,0xcb"
#ifdef __ASSEMBLY__
@@ -28,10 +27,10 @@
#ifdef CONFIG_X86_SMAP
#define ASM_CLAC \
- ALTERNATIVE "", __stringify(__ASM_CLAC), X86_FEATURE_SMAP
+ ALTERNATIVE "", __ASM_CLAC, X86_FEATURE_SMAP
#define ASM_STAC \
- ALTERNATIVE "", __stringify(__ASM_STAC), X86_FEATURE_SMAP
+ ALTERNATIVE "", __ASM_STAC, X86_FEATURE_SMAP
#else /* CONFIG_X86_SMAP */
@@ -49,26 +48,46 @@
static __always_inline void clac(void)
{
/* Note: a barrier is implicit in alternative() */
- alternative("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP);
+ alternative("", __ASM_CLAC, X86_FEATURE_SMAP);
}
static __always_inline void stac(void)
{
/* Note: a barrier is implicit in alternative() */
- alternative("", __stringify(__ASM_STAC), X86_FEATURE_SMAP);
+ alternative("", __ASM_STAC, X86_FEATURE_SMAP);
+}
+
+static __always_inline unsigned long smap_save(void)
+{
+ unsigned long flags;
+
+ asm volatile (ALTERNATIVE("", "pushf; pop %0; " __ASM_CLAC,
+ X86_FEATURE_SMAP)
+ : "=rm" (flags) : : "memory", "cc");
+
+ return flags;
+}
+
+static __always_inline void smap_restore(unsigned long flags)
+{
+ asm volatile (ALTERNATIVE("", "push %0; popf", X86_FEATURE_SMAP)
+ : : "g" (flags) : "memory", "cc");
}
/* These macros can be used in asm() statements */
#define ASM_CLAC \
- ALTERNATIVE("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP)
+ ALTERNATIVE("", __ASM_CLAC, X86_FEATURE_SMAP)
#define ASM_STAC \
- ALTERNATIVE("", __stringify(__ASM_STAC), X86_FEATURE_SMAP)
+ ALTERNATIVE("", __ASM_STAC, X86_FEATURE_SMAP)
#else /* CONFIG_X86_SMAP */
static inline void clac(void) { }
static inline void stac(void) { }
+static inline unsigned long smap_save(void) { return 0; }
+static inline void smap_restore(unsigned long flags) { }
+
#define ASM_CLAC
#define ASM_STAC
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 2e95b6c1bca3..da545df207b2 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -131,7 +131,7 @@ void native_smp_prepare_boot_cpu(void);
void native_smp_prepare_cpus(unsigned int max_cpus);
void calculate_max_logical_packages(void);
void native_smp_cpus_done(unsigned int max_cpus);
-void common_cpu_up(unsigned int cpunum, struct task_struct *tidle);
+int common_cpu_up(unsigned int cpunum, struct task_struct *tidle);
int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
int native_cpu_disable(void);
int common_cpu_die(unsigned int cpu);
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
index 8ec97a62c245..91e29b6a86a5 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -13,7 +13,7 @@
* On x86_64, %gs is shared by percpu area and stack canary. All
* percpu symbols are zero based and %gs points to the base of percpu
* area. The first occupant of the percpu area is always
- * irq_stack_union which contains stack_canary at offset 40. Userland
+ * fixed_percpu_data which contains stack_canary at offset 40. Userland
* %gs is always saved and restored on kernel entry and exit using
* swapgs, so stack protector doesn't add any complexity there.
*
@@ -64,7 +64,7 @@ static __always_inline void boot_init_stack_canary(void)
u64 tsc;
#ifdef CONFIG_X86_64
- BUILD_BUG_ON(offsetof(union irq_stack_union, stack_canary) != 40);
+ BUILD_BUG_ON(offsetof(struct fixed_percpu_data, stack_canary) != 40);
#endif
/*
* We both use the random pool and the current TSC as a source
@@ -79,7 +79,7 @@ static __always_inline void boot_init_stack_canary(void)
current->stack_canary = canary;
#ifdef CONFIG_X86_64
- this_cpu_write(irq_stack_union.stack_canary, canary);
+ this_cpu_write(fixed_percpu_data.stack_canary, canary);
#else
this_cpu_write(stack_canary.canary, canary);
#endif
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index f335aad404a4..a8d0cdf48616 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -9,6 +9,8 @@
#include <linux/uaccess.h>
#include <linux/ptrace.h>
+
+#include <asm/cpu_entry_area.h>
#include <asm/switch_to.h>
enum stack_type {
@@ -98,19 +100,6 @@ struct stack_frame_ia32 {
u32 return_address;
};
-static inline unsigned long caller_frame_pointer(void)
-{
- struct stack_frame *frame;
-
- frame = __builtin_frame_address(0);
-
-#ifdef CONFIG_FRAME_POINTER
- frame = frame->next_frame;
-#endif
-
- return (unsigned long)frame;
-}
-
void show_opcodes(struct pt_regs *regs, const char *loglvl);
void show_ip(struct pt_regs *regs, const char *loglvl);
#endif /* _ASM_X86_STACKTRACE_H */
diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h
index 55d392c6bd29..f74362b05619 100644
--- a/arch/x86/include/asm/string_32.h
+++ b/arch/x86/include/asm/string_32.h
@@ -179,14 +179,7 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len)
* No 3D Now!
*/
-#if (__GNUC__ >= 4)
#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
-#else
-#define memcpy(t, f, n) \
- (__builtin_constant_p((n)) \
- ? __constant_memcpy((t), (f), (n)) \
- : __memcpy((t), (f), (n)))
-#endif
#endif
#endif /* !CONFIG_FORTIFY_SOURCE */
@@ -216,29 +209,6 @@ static inline void *__memset_generic(void *s, char c, size_t count)
/* we might want to write optimized versions of these later */
#define __constant_count_memset(s, c, count) __memset_generic((s), (c), (count))
-/*
- * memset(x, 0, y) is a reasonably common thing to do, so we want to fill
- * things 32 bits at a time even when we don't know the size of the
- * area at compile-time..
- */
-static __always_inline
-void *__constant_c_memset(void *s, unsigned long c, size_t count)
-{
- int d0, d1;
- asm volatile("rep ; stosl\n\t"
- "testb $2,%b3\n\t"
- "je 1f\n\t"
- "stosw\n"
- "1:\ttestb $1,%b3\n\t"
- "je 2f\n\t"
- "stosb\n"
- "2:"
- : "=&c" (d0), "=&D" (d1)
- : "a" (c), "q" (count), "0" (count/4), "1" ((long)s)
- : "memory");
- return s;
-}
-
/* Added by Gertjan van Wingerde to make minix and sysv module work */
#define __HAVE_ARCH_STRNLEN
extern size_t strnlen(const char *s, size_t count);
@@ -247,72 +217,6 @@ extern size_t strnlen(const char *s, size_t count);
#define __HAVE_ARCH_STRSTR
extern char *strstr(const char *cs, const char *ct);
-/*
- * This looks horribly ugly, but the compiler can optimize it totally,
- * as we by now know that both pattern and count is constant..
- */
-static __always_inline
-void *__constant_c_and_count_memset(void *s, unsigned long pattern,
- size_t count)
-{
- switch (count) {
- case 0:
- return s;
- case 1:
- *(unsigned char *)s = pattern & 0xff;
- return s;
- case 2:
- *(unsigned short *)s = pattern & 0xffff;
- return s;
- case 3:
- *(unsigned short *)s = pattern & 0xffff;
- *((unsigned char *)s + 2) = pattern & 0xff;
- return s;
- case 4:
- *(unsigned long *)s = pattern;
- return s;
- }
-
-#define COMMON(x) \
- asm volatile("rep ; stosl" \
- x \
- : "=&c" (d0), "=&D" (d1) \
- : "a" (eax), "0" (count/4), "1" ((long)s) \
- : "memory")
-
- {
- int d0, d1;
-#if __GNUC__ == 4 && __GNUC_MINOR__ == 0
- /* Workaround for broken gcc 4.0 */
- register unsigned long eax asm("%eax") = pattern;
-#else
- unsigned long eax = pattern;
-#endif
-
- switch (count % 4) {
- case 0:
- COMMON("");
- return s;
- case 1:
- COMMON("\n\tstosb");
- return s;
- case 2:
- COMMON("\n\tstosw");
- return s;
- default:
- COMMON("\n\tstosw\n\tstosb");
- return s;
- }
- }
-
-#undef COMMON
-}
-
-#define __constant_c_x_memset(s, c, count) \
- (__builtin_constant_p(count) \
- ? __constant_c_and_count_memset((s), (c), (count)) \
- : __constant_c_memset((s), (c), (count)))
-
#define __memset(s, c, count) \
(__builtin_constant_p(count) \
? __constant_count_memset((s), (c), (count)) \
@@ -321,15 +225,7 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern,
#define __HAVE_ARCH_MEMSET
extern void *memset(void *, int, size_t);
#ifndef CONFIG_FORTIFY_SOURCE
-#if (__GNUC__ >= 4)
#define memset(s, c, count) __builtin_memset(s, c, count)
-#else
-#define memset(s, c, count) \
- (__builtin_constant_p(c) \
- ? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \
- (count)) \
- : __memset((s), (c), (count)))
-#endif
#endif /* !CONFIG_FORTIFY_SOURCE */
#define __HAVE_ARCH_MEMSET16
diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
index 4e4194e21a09..75314c3dbe47 100644
--- a/arch/x86/include/asm/string_64.h
+++ b/arch/x86/include/asm/string_64.h
@@ -14,21 +14,6 @@
extern void *memcpy(void *to, const void *from, size_t len);
extern void *__memcpy(void *to, const void *from, size_t len);
-#ifndef CONFIG_FORTIFY_SOURCE
-#if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
-#define memcpy(dst, src, len) \
-({ \
- size_t __len = (len); \
- void *__ret; \
- if (__builtin_constant_p(len) && __len >= 64) \
- __ret = __memcpy((dst), (src), __len); \
- else \
- __ret = __builtin_memcpy((dst), (src), __len); \
- __ret; \
-})
-#endif
-#endif /* !CONFIG_FORTIFY_SOURCE */
-
#define __HAVE_ARCH_MEMSET
void *memset(void *s, int c, size_t n);
void *__memset(void *s, int c, size_t n);
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index 7cf1a270d891..18a4b6890fa8 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -46,6 +46,7 @@ struct inactive_task_frame {
unsigned long r13;
unsigned long r12;
#else
+ unsigned long flags;
unsigned long si;
unsigned long di;
#endif
diff --git a/arch/x86/include/asm/sync_bitops.h b/arch/x86/include/asm/sync_bitops.h
index 2fe745356fb1..6d8d6bc183b7 100644
--- a/arch/x86/include/asm/sync_bitops.h
+++ b/arch/x86/include/asm/sync_bitops.h
@@ -14,6 +14,8 @@
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
*/
+#include <asm/rmwcc.h>
+
#define ADDR (*(volatile long *)addr)
/**
@@ -29,7 +31,7 @@
*/
static inline void sync_set_bit(long nr, volatile unsigned long *addr)
{
- asm volatile("lock; bts %1,%0"
+ asm volatile("lock; " __ASM_SIZE(bts) " %1,%0"
: "+m" (ADDR)
: "Ir" (nr)
: "memory");
@@ -47,7 +49,7 @@ static inline void sync_set_bit(long nr, volatile unsigned long *addr)
*/
static inline void sync_clear_bit(long nr, volatile unsigned long *addr)
{
- asm volatile("lock; btr %1,%0"
+ asm volatile("lock; " __ASM_SIZE(btr) " %1,%0"
: "+m" (ADDR)
: "Ir" (nr)
: "memory");
@@ -64,7 +66,7 @@ static inline void sync_clear_bit(long nr, volatile unsigned long *addr)
*/
static inline void sync_change_bit(long nr, volatile unsigned long *addr)
{
- asm volatile("lock; btc %1,%0"
+ asm volatile("lock; " __ASM_SIZE(btc) " %1,%0"
: "+m" (ADDR)
: "Ir" (nr)
: "memory");
@@ -78,14 +80,9 @@ static inline void sync_change_bit(long nr, volatile unsigned long *addr)
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static inline int sync_test_and_set_bit(long nr, volatile unsigned long *addr)
+static inline bool sync_test_and_set_bit(long nr, volatile unsigned long *addr)
{
- unsigned char oldbit;
-
- asm volatile("lock; bts %2,%1\n\tsetc %0"
- : "=qm" (oldbit), "+m" (ADDR)
- : "Ir" (nr) : "memory");
- return oldbit;
+ return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(bts), *addr, c, "Ir", nr);
}
/**
@@ -98,12 +95,7 @@ static inline int sync_test_and_set_bit(long nr, volatile unsigned long *addr)
*/
static inline int sync_test_and_clear_bit(long nr, volatile unsigned long *addr)
{
- unsigned char oldbit;
-
- asm volatile("lock; btr %2,%1\n\tsetc %0"
- : "=qm" (oldbit), "+m" (ADDR)
- : "Ir" (nr) : "memory");
- return oldbit;
+ return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(btr), *addr, c, "Ir", nr);
}
/**
@@ -116,12 +108,7 @@ static inline int sync_test_and_clear_bit(long nr, volatile unsigned long *addr)
*/
static inline int sync_test_and_change_bit(long nr, volatile unsigned long *addr)
{
- unsigned char oldbit;
-
- asm volatile("lock; btc %2,%1\n\tsetc %0"
- : "=qm" (oldbit), "+m" (ADDR)
- : "Ir" (nr) : "memory");
- return oldbit;
+ return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(btc), *addr, c, "Ir", nr);
}
#define sync_test_bit(nr, addr) test_bit(nr, addr)
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index d653139857af..4c305471ec33 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -91,11 +91,9 @@ static inline void syscall_set_return_value(struct task_struct *task,
static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
unsigned long *args)
{
- BUG_ON(i + n > 6);
- memcpy(args, &regs->bx + i, n * sizeof(args[0]));
+ memcpy(args, &regs->bx, 6 * sizeof(args[0]));
}
static inline void syscall_set_arguments(struct task_struct *task,
@@ -116,124 +114,50 @@ static inline int syscall_get_arch(void)
static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
unsigned long *args)
{
# ifdef CONFIG_IA32_EMULATION
- if (task->thread_info.status & TS_COMPAT)
- switch (i) {
- case 0:
- if (!n--) break;
- *args++ = regs->bx;
- case 1:
- if (!n--) break;
- *args++ = regs->cx;
- case 2:
- if (!n--) break;
- *args++ = regs->dx;
- case 3:
- if (!n--) break;
- *args++ = regs->si;
- case 4:
- if (!n--) break;
- *args++ = regs->di;
- case 5:
- if (!n--) break;
- *args++ = regs->bp;
- case 6:
- if (!n--) break;
- default:
- BUG();
- break;
- }
- else
+ if (task->thread_info.status & TS_COMPAT) {
+ *args++ = regs->bx;
+ *args++ = regs->cx;
+ *args++ = regs->dx;
+ *args++ = regs->si;
+ *args++ = regs->di;
+ *args = regs->bp;
+ } else
# endif
- switch (i) {
- case 0:
- if (!n--) break;
- *args++ = regs->di;
- case 1:
- if (!n--) break;
- *args++ = regs->si;
- case 2:
- if (!n--) break;
- *args++ = regs->dx;
- case 3:
- if (!n--) break;
- *args++ = regs->r10;
- case 4:
- if (!n--) break;
- *args++ = regs->r8;
- case 5:
- if (!n--) break;
- *args++ = regs->r9;
- case 6:
- if (!n--) break;
- default:
- BUG();
- break;
- }
+ {
+ *args++ = regs->di;
+ *args++ = regs->si;
+ *args++ = regs->dx;
+ *args++ = regs->r10;
+ *args++ = regs->r8;
+ *args = regs->r9;
+ }
}
static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
const unsigned long *args)
{
# ifdef CONFIG_IA32_EMULATION
- if (task->thread_info.status & TS_COMPAT)
- switch (i) {
- case 0:
- if (!n--) break;
- regs->bx = *args++;
- case 1:
- if (!n--) break;
- regs->cx = *args++;
- case 2:
- if (!n--) break;
- regs->dx = *args++;
- case 3:
- if (!n--) break;
- regs->si = *args++;
- case 4:
- if (!n--) break;
- regs->di = *args++;
- case 5:
- if (!n--) break;
- regs->bp = *args++;
- case 6:
- if (!n--) break;
- default:
- BUG();
- break;
- }
- else
+ if (task->thread_info.status & TS_COMPAT) {
+ regs->bx = *args++;
+ regs->cx = *args++;
+ regs->dx = *args++;
+ regs->si = *args++;
+ regs->di = *args++;
+ regs->bp = *args;
+ } else
# endif
- switch (i) {
- case 0:
- if (!n--) break;
- regs->di = *args++;
- case 1:
- if (!n--) break;
- regs->si = *args++;
- case 2:
- if (!n--) break;
- regs->dx = *args++;
- case 3:
- if (!n--) break;
- regs->r10 = *args++;
- case 4:
- if (!n--) break;
- regs->r8 = *args++;
- case 5:
- if (!n--) break;
- regs->r9 = *args++;
- case 6:
- if (!n--) break;
- default:
- BUG();
- break;
- }
+ {
+ regs->di = *args++;
+ regs->si = *args++;
+ regs->dx = *args++;
+ regs->r10 = *args++;
+ regs->r8 = *args++;
+ regs->r9 = *args;
+ }
}
static inline int syscall_get_arch(void)
diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
index e85ff65c43c3..c90678fd391a 100644
--- a/arch/x86/include/asm/text-patching.h
+++ b/arch/x86/include/asm/text-patching.h
@@ -18,7 +18,7 @@ static inline void apply_paravirt(struct paravirt_patch_site *start,
#define __parainstructions_end NULL
#endif
-extern void *text_poke_early(void *addr, const void *opcode, size_t len);
+extern void text_poke_early(void *addr, const void *opcode, size_t len);
/*
* Clear and restore the kernel write-protection flag on the local CPU.
@@ -35,8 +35,11 @@ extern void *text_poke_early(void *addr, const void *opcode, size_t len);
* inconsistent instruction while you patch.
*/
extern void *text_poke(void *addr, const void *opcode, size_t len);
+extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len);
extern int poke_int3_handler(struct pt_regs *regs);
-extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
+extern void text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
extern int after_bootmem;
+extern __ro_after_init struct mm_struct *poking_mm;
+extern __ro_after_init unsigned long poking_addr;
#endif /* _ASM_X86_TEXT_PATCHING_H */
diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
index 404b8b1d44f5..f23e7aaff4cd 100644
--- a/arch/x86/include/asm/tlb.h
+++ b/arch/x86/include/asm/tlb.h
@@ -6,6 +6,7 @@
#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
+#define tlb_flush tlb_flush
static inline void tlb_flush(struct mmu_gather *tlb);
#include <asm-generic/tlb.h>
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index f4204bf377fc..dee375831962 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -167,7 +167,7 @@ struct tlb_state {
*/
struct mm_struct *loaded_mm;
-#define LOADED_MM_SWITCHING ((struct mm_struct *)1)
+#define LOADED_MM_SWITCHING ((struct mm_struct *)1UL)
/* Last user mm for optimizing IBPB */
union {
@@ -274,6 +274,8 @@ static inline bool nmi_uaccess_okay(void)
return true;
}
+#define nmi_uaccess_okay nmi_uaccess_okay
+
/* Initialize cr4 shadow for this CPU. */
static inline void cr4_init_shadow(void)
{
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 1954dd5552a2..c82abd6e4ca3 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -427,10 +427,11 @@ do { \
({ \
__label__ __pu_label; \
int __pu_err = -EFAULT; \
- __typeof__(*(ptr)) __pu_val; \
- __pu_val = x; \
+ __typeof__(*(ptr)) __pu_val = (x); \
+ __typeof__(ptr) __pu_ptr = (ptr); \
+ __typeof__(size) __pu_size = (size); \
__uaccess_begin(); \
- __put_user_size(__pu_val, (ptr), (size), __pu_label); \
+ __put_user_size(__pu_val, __pu_ptr, __pu_size, __pu_label); \
__pu_err = 0; \
__pu_label: \
__uaccess_end(); \
@@ -585,7 +586,6 @@ extern void __cmpxchg_wrong_size(void)
#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
({ \
int __ret = 0; \
- __typeof__(ptr) __uval = (uval); \
__typeof__(*(ptr)) __old = (old); \
__typeof__(*(ptr)) __new = (new); \
__uaccess_begin_nospec(); \
@@ -661,7 +661,7 @@ extern void __cmpxchg_wrong_size(void)
__cmpxchg_wrong_size(); \
} \
__uaccess_end(); \
- *__uval = __old; \
+ *(uval) = __old; \
__ret; \
})
@@ -705,7 +705,7 @@ extern struct movsl_mask {
* checking before using them, but you have to surround them with the
* user_access_begin/end() pair.
*/
-static __must_check inline bool user_access_begin(const void __user *ptr, size_t len)
+static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
{
if (unlikely(!access_ok(ptr,len)))
return 0;
@@ -715,6 +715,9 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t
#define user_access_begin(a,b) user_access_begin(a,b)
#define user_access_end() __uaccess_end()
+#define user_access_save() smap_save()
+#define user_access_restore(x) smap_restore(x)
+
#define unsafe_put_user(x, ptr, label) \
__put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index a9d637bc301d..5cd1caa8bc65 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -208,9 +208,6 @@ __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
}
unsigned long
-copy_user_handle_tail(char *to, char *from, unsigned len);
-
-unsigned long
mcsafe_handle_tail(char *to, char *from, unsigned len);
#endif /* _ASM_X86_UACCESS_64_H */
diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h
index 1f86e1b0a5cd..499578f7e6d7 100644
--- a/arch/x86/include/asm/unwind.h
+++ b/arch/x86/include/asm/unwind.h
@@ -23,6 +23,12 @@ struct unwind_state {
#elif defined(CONFIG_UNWINDER_FRAME_POINTER)
bool got_irq;
unsigned long *bp, *orig_sp, ip;
+ /*
+ * If non-NULL: The current frame is incomplete and doesn't contain a
+ * valid BP. When looking for the next frame, use this instead of the
+ * non-existent saved BP.
+ */
+ unsigned long *next_bp;
struct pt_regs *regs;
#else
unsigned long *sp;
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index ef05bea7010d..d50c7b747d8b 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -206,6 +206,9 @@ xen_single_call(unsigned int call,
__HYPERCALL_DECLS;
__HYPERCALL_5ARG(a1, a2, a3, a4, a5);
+ if (call >= PAGE_SIZE / sizeof(hypercall_page[0]))
+ return -EINVAL;
+
asm volatile(CALL_NOSPEC
: __HYPERCALL_5PARAM
: [thunk_target] "a" (&hypercall_page[call])
@@ -214,6 +217,22 @@ xen_single_call(unsigned int call,
return (long)__res;
}
+static __always_inline void __xen_stac(void)
+{
+ /*
+ * Suppress objtool seeing the STAC/CLAC and getting confused about it
+ * calling random code with AC=1.
+ */
+ asm volatile(ANNOTATE_IGNORE_ALTERNATIVE
+ ASM_STAC ::: "memory", "flags");
+}
+
+static __always_inline void __xen_clac(void)
+{
+ asm volatile(ANNOTATE_IGNORE_ALTERNATIVE
+ ASM_CLAC ::: "memory", "flags");
+}
+
static inline long
privcmd_call(unsigned int call,
unsigned long a1, unsigned long a2,
@@ -222,9 +241,9 @@ privcmd_call(unsigned int call,
{
long res;
- stac();
+ __xen_stac();
res = xen_single_call(call, a1, a2, a3, a4, a5);
- clac();
+ __xen_clac();
return res;
}
@@ -332,15 +351,11 @@ HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val,
return _hypercall4(int, update_va_mapping, va,
new_val.pte, new_val.pte >> 32, flags);
}
-extern int __must_check xen_event_channel_op_compat(int, void *);
static inline int
HYPERVISOR_event_channel_op(int cmd, void *arg)
{
- int rc = _hypercall2(int, event_channel_op, cmd, arg);
- if (unlikely(rc == -ENOSYS))
- rc = xen_event_channel_op_compat(cmd, arg);
- return rc;
+ return _hypercall2(int, event_channel_op, cmd, arg);
}
static inline int
@@ -355,15 +370,10 @@ HYPERVISOR_console_io(int cmd, int count, char *str)
return _hypercall3(int, console_io, cmd, count, str);
}
-extern int __must_check xen_physdev_op_compat(int, void *);
-
static inline int
HYPERVISOR_physdev_op(int cmd, void *arg)
{
- int rc = _hypercall2(int, physdev_op, cmd, arg);
- if (unlikely(rc == -ENOSYS))
- rc = xen_physdev_op_compat(cmd, arg);
- return rc;
+ return _hypercall2(int, physdev_op, cmd, arg);
}
static inline int
@@ -430,9 +440,9 @@ HYPERVISOR_dm_op(
domid_t dom, unsigned int nr_bufs, struct xen_dm_op_buf *bufs)
{
int ret;
- stac();
+ __xen_stac();
ret = _hypercall3(int, dm_op, dom, nr_bufs, bufs);
- clac();
+ __xen_clac();
return ret;
}
diff --git a/arch/x86/include/uapi/asm/Kbuild b/arch/x86/include/uapi/asm/Kbuild
index efe701b7c6ce..59b5ad310f78 100644
--- a/arch/x86/include/uapi/asm/Kbuild
+++ b/arch/x86/include/uapi/asm/Kbuild
@@ -1,6 +1,3 @@
-include include/uapi/asm-generic/Kbuild.asm
-
generated-y += unistd_32.h
generated-y += unistd_64.h
generated-y += unistd_x32.h
-generic-y += socket.h
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index dabfcf7c3941..7a0e64ccd6ff 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -381,6 +381,7 @@ struct kvm_sync_regs {
#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
+#define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3)
#define KVM_STATE_NESTED_GUEST_MODE 0x00000001
#define KVM_STATE_NESTED_RUN_PENDING 0x00000002
diff --git a/arch/x86/include/uapi/asm/perf_regs.h b/arch/x86/include/uapi/asm/perf_regs.h
index f3329cabce5c..ac67bbea10ca 100644
--- a/arch/x86/include/uapi/asm/perf_regs.h
+++ b/arch/x86/include/uapi/asm/perf_regs.h
@@ -27,8 +27,29 @@ enum perf_event_x86_regs {
PERF_REG_X86_R13,
PERF_REG_X86_R14,
PERF_REG_X86_R15,
-
+ /* These are the limits for the GPRs. */
PERF_REG_X86_32_MAX = PERF_REG_X86_GS + 1,
PERF_REG_X86_64_MAX = PERF_REG_X86_R15 + 1,
+
+ /* These all need two bits set because they are 128bit */
+ PERF_REG_X86_XMM0 = 32,
+ PERF_REG_X86_XMM1 = 34,
+ PERF_REG_X86_XMM2 = 36,
+ PERF_REG_X86_XMM3 = 38,
+ PERF_REG_X86_XMM4 = 40,
+ PERF_REG_X86_XMM5 = 42,
+ PERF_REG_X86_XMM6 = 44,
+ PERF_REG_X86_XMM7 = 46,
+ PERF_REG_X86_XMM8 = 48,
+ PERF_REG_X86_XMM9 = 50,
+ PERF_REG_X86_XMM10 = 52,
+ PERF_REG_X86_XMM11 = 54,
+ PERF_REG_X86_XMM12 = 56,
+ PERF_REG_X86_XMM13 = 58,
+ PERF_REG_X86_XMM14 = 60,
+ PERF_REG_X86_XMM15 = 62,
+
+ /* These include both GPRs and XMMX registers */
+ PERF_REG_X86_XMM_MAX = PERF_REG_X86_XMM15 + 2,
};
#endif /* _ASM_X86_PERF_REGS_H */
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
index f0b0c90dd398..d213ec5c3766 100644
--- a/arch/x86/include/uapi/asm/vmx.h
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -146,6 +146,7 @@
#define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1
#define VMX_ABORT_LOAD_HOST_PDPTE_FAIL 2
+#define VMX_ABORT_VMCS_CORRUPTED 3
#define VMX_ABORT_LOAD_HOST_MSR_FAIL 4
#endif /* _UAPIVMX_H */
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 2624de16cd7a..8dcbf6890714 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -935,6 +935,9 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
#define HPET_RESOURCE_NAME_SIZE 9
hpet_res = memblock_alloc(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE,
SMP_CACHE_BYTES);
+ if (!hpet_res)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE);
hpet_res->name = (void *)&hpet_res[1];
hpet_res->flags = IORESOURCE_MEM;
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index 158ad1483c43..cb6e076a6d39 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -51,6 +51,18 @@ void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
if (c->x86_vendor == X86_VENDOR_INTEL &&
(c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 0x0f)))
flags->bm_control = 0;
+ /*
+ * For all recent Centaur CPUs, the ucode will make sure that each
+ * core can keep cache coherence with each other while entering C3
+ * type state. So, set bm_check to 1 to indicate that the kernel
+ * doesn't need to execute a cache flush operation (WBINVD) when
+ * entering C3 type state.
+ */
+ if (c->x86_vendor == X86_VENDOR_CENTAUR) {
+ if (c->x86 > 6 || (c->x86 == 6 && c->x86_model == 0x0f &&
+ c->x86_stepping >= 0x0e))
+ flags->bm_check = 1;
+ }
}
EXPORT_SYMBOL(acpi_processor_power_init_bm_check);
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 9a79c7808f9c..7b9b49dfc05a 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/kdebug.h>
#include <linux/kprobes.h>
+#include <linux/mmu_context.h>
#include <asm/text-patching.h>
#include <asm/alternative.h>
#include <asm/sections.h>
@@ -264,7 +265,7 @@ static void __init_or_module add_nops(void *insns, unsigned int len)
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
extern s32 __smp_locks[], __smp_locks_end[];
-void *text_poke_early(void *addr, const void *opcode, size_t len);
+void text_poke_early(void *addr, const void *opcode, size_t len);
/*
* Are we looking at a near JMP with a 1 or 4-byte displacement.
@@ -666,16 +667,136 @@ void __init alternative_instructions(void)
* instructions. And on the local CPU you need to be protected again NMI or MCE
* handlers seeing an inconsistent instruction while you patch.
*/
-void *__init_or_module text_poke_early(void *addr, const void *opcode,
- size_t len)
+void __init_or_module text_poke_early(void *addr, const void *opcode,
+ size_t len)
{
unsigned long flags;
+
+ if (boot_cpu_has(X86_FEATURE_NX) &&
+ is_module_text_address((unsigned long)addr)) {
+ /*
+ * Modules text is marked initially as non-executable, so the
+ * code cannot be running and speculative code-fetches are
+ * prevented. Just change the code.
+ */
+ memcpy(addr, opcode, len);
+ } else {
+ local_irq_save(flags);
+ memcpy(addr, opcode, len);
+ local_irq_restore(flags);
+ sync_core();
+
+ /*
+ * Could also do a CLFLUSH here to speed up CPU recovery; but
+ * that causes hangs on some VIA CPUs.
+ */
+ }
+}
+
+__ro_after_init struct mm_struct *poking_mm;
+__ro_after_init unsigned long poking_addr;
+
+static void *__text_poke(void *addr, const void *opcode, size_t len)
+{
+ bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE;
+ struct page *pages[2] = {NULL};
+ temp_mm_state_t prev;
+ unsigned long flags;
+ pte_t pte, *ptep;
+ spinlock_t *ptl;
+ pgprot_t pgprot;
+
+ /*
+ * While boot memory allocator is running we cannot use struct pages as
+ * they are not yet initialized. There is no way to recover.
+ */
+ BUG_ON(!after_bootmem);
+
+ if (!core_kernel_text((unsigned long)addr)) {
+ pages[0] = vmalloc_to_page(addr);
+ if (cross_page_boundary)
+ pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
+ } else {
+ pages[0] = virt_to_page(addr);
+ WARN_ON(!PageReserved(pages[0]));
+ if (cross_page_boundary)
+ pages[1] = virt_to_page(addr + PAGE_SIZE);
+ }
+ /*
+ * If something went wrong, crash and burn since recovery paths are not
+ * implemented.
+ */
+ BUG_ON(!pages[0] || (cross_page_boundary && !pages[1]));
+
local_irq_save(flags);
- memcpy(addr, opcode, len);
+
+ /*
+ * Map the page without the global bit, as TLB flushing is done with
+ * flush_tlb_mm_range(), which is intended for non-global PTEs.
+ */
+ pgprot = __pgprot(pgprot_val(PAGE_KERNEL) & ~_PAGE_GLOBAL);
+
+ /*
+ * The lock is not really needed, but this allows to avoid open-coding.
+ */
+ ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
+
+ /*
+ * This must not fail; preallocated in poking_init().
+ */
+ VM_BUG_ON(!ptep);
+
+ pte = mk_pte(pages[0], pgprot);
+ set_pte_at(poking_mm, poking_addr, ptep, pte);
+
+ if (cross_page_boundary) {
+ pte = mk_pte(pages[1], pgprot);
+ set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
+ }
+
+ /*
+ * Loading the temporary mm behaves as a compiler barrier, which
+ * guarantees that the PTE will be set at the time memcpy() is done.
+ */
+ prev = use_temporary_mm(poking_mm);
+
+ kasan_disable_current();
+ memcpy((u8 *)poking_addr + offset_in_page(addr), opcode, len);
+ kasan_enable_current();
+
+ /*
+ * Ensure that the PTE is only cleared after the instructions of memcpy
+ * were issued by using a compiler barrier.
+ */
+ barrier();
+
+ pte_clear(poking_mm, poking_addr, ptep);
+ if (cross_page_boundary)
+ pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1);
+
+ /*
+ * Loading the previous page-table hierarchy requires a serializing
+ * instruction that already allows the core to see the updated version.
+ * Xen-PV is assumed to serialize execution in a similar manner.
+ */
+ unuse_temporary_mm(prev);
+
+ /*
+ * Flushing the TLB might involve IPIs, which would require enabled
+ * IRQs, but not if the mm is not used, as it is in this point.
+ */
+ flush_tlb_mm_range(poking_mm, poking_addr, poking_addr +
+ (cross_page_boundary ? 2 : 1) * PAGE_SIZE,
+ PAGE_SHIFT, false);
+
+ /*
+ * If the text does not match what we just wrote then something is
+ * fundamentally screwy; there's nothing we can really do about that.
+ */
+ BUG_ON(memcmp(addr, opcode, len));
+
+ pte_unmap_unlock(ptep, ptl);
local_irq_restore(flags);
- sync_core();
- /* Could also do a CLFLUSH here to speed up CPU recovery; but
- that causes hangs on some VIA CPUs. */
return addr;
}
@@ -689,48 +810,36 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
* It means the size must be writable atomically and the address must be aligned
* in a way that permits an atomic write. It also makes sure we fit on a single
* page.
+ *
+ * Note that the caller must ensure that if the modified code is part of a
+ * module, the module would not be removed during poking. This can be achieved
+ * by registering a module notifier, and ordering module removal and patching
+ * trough a mutex.
*/
void *text_poke(void *addr, const void *opcode, size_t len)
{
- unsigned long flags;
- char *vaddr;
- struct page *pages[2];
- int i;
-
- /*
- * While boot memory allocator is runnig we cannot use struct
- * pages as they are not yet initialized.
- */
- BUG_ON(!after_bootmem);
-
lockdep_assert_held(&text_mutex);
- if (!core_kernel_text((unsigned long)addr)) {
- pages[0] = vmalloc_to_page(addr);
- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
- } else {
- pages[0] = virt_to_page(addr);
- WARN_ON(!PageReserved(pages[0]));
- pages[1] = virt_to_page(addr + PAGE_SIZE);
- }
- BUG_ON(!pages[0]);
- local_irq_save(flags);
- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
- if (pages[1])
- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
- clear_fixmap(FIX_TEXT_POKE0);
- if (pages[1])
- clear_fixmap(FIX_TEXT_POKE1);
- local_flush_tlb();
- sync_core();
- /* Could also do a CLFLUSH here to speed up CPU recovery; but
- that causes hangs on some VIA CPUs. */
- for (i = 0; i < len; i++)
- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
- local_irq_restore(flags);
- return addr;
+ return __text_poke(addr, opcode, len);
+}
+
+/**
+ * text_poke_kgdb - Update instructions on a live kernel by kgdb
+ * @addr: address to modify
+ * @opcode: source of the copy
+ * @len: length to copy
+ *
+ * Only atomic text poke/set should be allowed when not doing early patching.
+ * It means the size must be writable atomically and the address must be aligned
+ * in a way that permits an atomic write. It also makes sure we fit on a single
+ * page.
+ *
+ * Context: should only be used by kgdb, which ensures no other core is running,
+ * despite the fact it does not hold the text_mutex.
+ */
+void *text_poke_kgdb(void *addr, const void *opcode, size_t len)
+{
+ return __text_poke(addr, opcode, len);
}
static void do_sync_core(void *info)
@@ -788,7 +897,7 @@ NOKPROBE_SYMBOL(poke_int3_handler);
* replacing opcode
* - sync cores
*/
-void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
+void text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
{
unsigned char int3 = 0xcc;
@@ -830,7 +939,5 @@ void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
* the writing of the new instruction.
*/
bp_patching_in_progress = false;
-
- return addr;
}
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 58176b56354e..294ed4392a0e 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -14,6 +14,7 @@
#define pr_fmt(fmt) "AGP: " fmt
#include <linux/kernel.h>
+#include <linux/kcore.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/memblock.h>
@@ -57,7 +58,7 @@ int fallback_aper_force __initdata;
int fix_aperture __initdata = 1;
-#ifdef CONFIG_PROC_VMCORE
+#if defined(CONFIG_PROC_VMCORE) || defined(CONFIG_PROC_KCORE)
/*
* If the first kernel maps the aperture over e820 RAM, the kdump kernel will
* use the same range because it will remain configured in the northbridge.
@@ -66,20 +67,25 @@ int fix_aperture __initdata = 1;
*/
static unsigned long aperture_pfn_start, aperture_page_count;
-static int gart_oldmem_pfn_is_ram(unsigned long pfn)
+static int gart_mem_pfn_is_ram(unsigned long pfn)
{
return likely((pfn < aperture_pfn_start) ||
(pfn >= aperture_pfn_start + aperture_page_count));
}
-static void exclude_from_vmcore(u64 aper_base, u32 aper_order)
+static void __init exclude_from_core(u64 aper_base, u32 aper_order)
{
aperture_pfn_start = aper_base >> PAGE_SHIFT;
aperture_page_count = (32 * 1024 * 1024) << aper_order >> PAGE_SHIFT;
- WARN_ON(register_oldmem_pfn_is_ram(&gart_oldmem_pfn_is_ram));
+#ifdef CONFIG_PROC_VMCORE
+ WARN_ON(register_oldmem_pfn_is_ram(&gart_mem_pfn_is_ram));
+#endif
+#ifdef CONFIG_PROC_KCORE
+ WARN_ON(register_mem_pfn_is_ram(&gart_mem_pfn_is_ram));
+#endif
}
#else
-static void exclude_from_vmcore(u64 aper_base, u32 aper_order)
+static void exclude_from_core(u64 aper_base, u32 aper_order)
{
}
#endif
@@ -474,7 +480,7 @@ out:
* may have allocated the range over its e820 RAM
* and fixed up the northbridge
*/
- exclude_from_vmcore(last_aper_base, last_aper_order);
+ exclude_from_core(last_aper_base, last_aper_order);
return 1;
}
@@ -520,7 +526,7 @@ out:
* overlap with the first kernel's memory. We can't access the
* range through vmcore even though it should be part of the dump.
*/
- exclude_from_vmcore(aper_alloc, aper_order);
+ exclude_from_core(aper_alloc, aper_order);
/* Fix up the north bridges */
for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index b7bcdd781651..ab6af775f06c 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -802,6 +802,24 @@ calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
return 0;
}
+static int __init lapic_init_clockevent(void)
+{
+ if (!lapic_timer_frequency)
+ return -1;
+
+ /* Calculate the scaled math multiplication factor */
+ lapic_clockevent.mult = div_sc(lapic_timer_frequency/APIC_DIVISOR,
+ TICK_NSEC, lapic_clockevent.shift);
+ lapic_clockevent.max_delta_ns =
+ clockevent_delta2ns(0x7FFFFFFF, &lapic_clockevent);
+ lapic_clockevent.max_delta_ticks = 0x7FFFFFFF;
+ lapic_clockevent.min_delta_ns =
+ clockevent_delta2ns(0xF, &lapic_clockevent);
+ lapic_clockevent.min_delta_ticks = 0xF;
+
+ return 0;
+}
+
static int __init calibrate_APIC_clock(void)
{
struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
@@ -810,25 +828,21 @@ static int __init calibrate_APIC_clock(void)
long delta, deltatsc;
int pm_referenced = 0;
- /**
- * check if lapic timer has already been calibrated by platform
- * specific routine, such as tsc calibration code. if so, we just fill
+ if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
+ return 0;
+
+ /*
+ * Check if lapic timer has already been calibrated by platform
+ * specific routine, such as tsc calibration code. If so just fill
* in the clockevent structure and return.
*/
-
- if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) {
- return 0;
- } else if (lapic_timer_frequency) {
+ if (!lapic_init_clockevent()) {
apic_printk(APIC_VERBOSE, "lapic timer already calibrated %d\n",
- lapic_timer_frequency);
- lapic_clockevent.mult = div_sc(lapic_timer_frequency/APIC_DIVISOR,
- TICK_NSEC, lapic_clockevent.shift);
- lapic_clockevent.max_delta_ns =
- clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
- lapic_clockevent.max_delta_ticks = 0x7FFFFF;
- lapic_clockevent.min_delta_ns =
- clockevent_delta2ns(0xF, &lapic_clockevent);
- lapic_clockevent.min_delta_ticks = 0xF;
+ lapic_timer_frequency);
+ /*
+ * Direct calibration methods must have an always running
+ * local APIC timer, no need for broadcast timer.
+ */
lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
return 0;
}
@@ -869,17 +883,8 @@ static int __init calibrate_APIC_clock(void)
pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1,
&delta, &deltatsc);
- /* Calculate the scaled math multiplication factor */
- lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS,
- lapic_clockevent.shift);
- lapic_clockevent.max_delta_ns =
- clockevent_delta2ns(0x7FFFFFFF, &lapic_clockevent);
- lapic_clockevent.max_delta_ticks = 0x7FFFFFFF;
- lapic_clockevent.min_delta_ns =
- clockevent_delta2ns(0xF, &lapic_clockevent);
- lapic_clockevent.min_delta_ticks = 0xF;
-
lapic_timer_frequency = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS;
+ lapic_init_clockevent();
apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta);
apic_printk(APIC_VERBOSE, "..... mult: %u\n", lapic_clockevent.mult);
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index 78778b54f904..a5464b8b6c46 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -175,7 +175,7 @@ static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
this_cpu_write(cpu_llc_id, node);
/* Account for nodes per socket in multi-core-module processors */
- if (static_cpu_has(X86_FEATURE_NODEID_MSR)) {
+ if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
rdmsrl(MSR_FAM10H_NODE_ID, val);
nodes = ((val >> 3) & 7) + 1;
}
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 264e3221d923..53aa234a6803 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2581,6 +2581,8 @@ static struct resource * __init ioapic_setup_resources(void)
n *= nr_ioapics;
mem = memblock_alloc(n, SMP_CACHE_BYTES);
+ if (!mem)
+ panic("%s: Failed to allocate %lu bytes\n", __func__, n);
res = (void *)mem;
mem += sizeof(struct resource) * nr_ioapics;
@@ -2625,6 +2627,9 @@ fake_ioapic_page:
#endif
ioapic_phys = (unsigned long)memblock_alloc(PAGE_SIZE,
PAGE_SIZE);
+ if (!ioapic_phys)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
ioapic_phys = __pa(ioapic_phys);
}
set_fixmap_nocache(idx, ioapic_phys);
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index ddced33184b5..d3d075226c0a 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -68,10 +68,12 @@ int main(void)
#undef ENTRY
OFFSET(TSS_ist, tss_struct, x86_tss.ist);
+ DEFINE(DB_STACK_OFFSET, offsetof(struct cea_exception_stacks, DB_stack) -
+ offsetof(struct cea_exception_stacks, DB1_stack));
BLANK();
#ifdef CONFIG_STACKPROTECTOR
- DEFINE(stack_canary_offset, offsetof(union irq_stack_union, stack_canary));
+ DEFINE(stack_canary_offset, offsetof(struct fixed_percpu_data, stack_canary));
BLANK();
#endif
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index cfd24f9f7614..1796d2bdcaaa 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -28,7 +28,7 @@ obj-y += cpuid-deps.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
-obj-$(CONFIG_CPU_SUP_INTEL) += intel.o intel_pconfig.o
+obj-$(CONFIG_CPU_SUP_INTEL) += intel.o intel_pconfig.o intel_epb.o
obj-$(CONFIG_CPU_SUP_AMD) += amd.o
obj-$(CONFIG_CPU_SUP_HYGON) += hygon.o
obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 01004bfb1a1b..fb6a64bd765f 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -82,11 +82,14 @@ static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
* performance at the same time..
*/
+#ifdef CONFIG_X86_32
extern __visible void vide(void);
-__asm__(".globl vide\n"
+__asm__(".text\n"
+ ".globl vide\n"
".type vide, @function\n"
".align 4\n"
"vide: ret\n");
+#endif
static void init_amd_k5(struct cpuinfo_x86 *c)
{
diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c
index 804c49493938..64d5aec24203 100644
--- a/arch/x86/kernel/cpu/aperfmperf.c
+++ b/arch/x86/kernel/cpu/aperfmperf.c
@@ -83,7 +83,7 @@ unsigned int aperfmperf_get_khz(int cpu)
if (!cpu_khz)
return 0;
- if (!static_cpu_has(X86_FEATURE_APERFMPERF))
+ if (!boot_cpu_has(X86_FEATURE_APERFMPERF))
return 0;
aperfmperf_snapshot_cpu(cpu, ktime_get(), true);
@@ -99,7 +99,7 @@ void arch_freq_prepare_all(void)
if (!cpu_khz)
return;
- if (!static_cpu_has(X86_FEATURE_APERFMPERF))
+ if (!boot_cpu_has(X86_FEATURE_APERFMPERF))
return;
for_each_online_cpu(cpu)
@@ -115,7 +115,7 @@ unsigned int arch_freq_get_on_cpu(int cpu)
if (!cpu_khz)
return 0;
- if (!static_cpu_has(X86_FEATURE_APERFMPERF))
+ if (!boot_cpu_has(X86_FEATURE_APERFMPERF))
return 0;
if (aperfmperf_snapshot_cpu(cpu, ktime_get(), true))
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 2da82eff0eb4..29630393f300 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -275,7 +275,7 @@ static const struct {
const char *option;
enum spectre_v2_user_cmd cmd;
bool secure;
-} v2_user_options[] __initdata = {
+} v2_user_options[] __initconst = {
{ "auto", SPECTRE_V2_USER_CMD_AUTO, false },
{ "off", SPECTRE_V2_USER_CMD_NONE, false },
{ "on", SPECTRE_V2_USER_CMD_FORCE, true },
@@ -419,7 +419,7 @@ static const struct {
const char *option;
enum spectre_v2_mitigation_cmd cmd;
bool secure;
-} mitigation_options[] __initdata = {
+} mitigation_options[] __initconst = {
{ "off", SPECTRE_V2_CMD_NONE, false },
{ "on", SPECTRE_V2_CMD_FORCE, true },
{ "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
@@ -440,7 +440,8 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
char arg[20];
int ret, i;
- if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
+ if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") ||
+ cpu_mitigations_off())
return SPECTRE_V2_CMD_NONE;
ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
@@ -658,7 +659,7 @@ static const char * const ssb_strings[] = {
static const struct {
const char *option;
enum ssb_mitigation_cmd cmd;
-} ssb_mitigation_options[] __initdata = {
+} ssb_mitigation_options[] __initconst = {
{ "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
{ "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
{ "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
@@ -672,7 +673,8 @@ static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
char arg[20];
int ret, i;
- if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
+ if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
+ cpu_mitigations_off()) {
return SPEC_STORE_BYPASS_CMD_NONE;
} else {
ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
@@ -1008,6 +1010,11 @@ static void __init l1tf_select_mitigation(void)
if (!boot_cpu_has_bug(X86_BUG_L1TF))
return;
+ if (cpu_mitigations_off())
+ l1tf_mitigation = L1TF_MITIGATION_OFF;
+ else if (cpu_mitigations_auto_nosmt())
+ l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
+
override_cache_bits(&boot_cpu_data);
switch (l1tf_mitigation) {
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index cb28e98a0659..37640544e12f 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -507,19 +507,6 @@ void load_percpu_segment(int cpu)
DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
#endif
-#ifdef CONFIG_X86_64
-/*
- * Special IST stacks which the CPU switches to when it calls
- * an IST-marked descriptor entry. Up to 7 stacks (hardware
- * limit), all of them are 4K, except the debug stack which
- * is 8K.
- */
-static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
- [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
- [DEBUG_STACK - 1] = DEBUG_STKSZ
-};
-#endif
-
/* Load the original GDT from the per-cpu structure */
void load_direct_gdt(int cpu)
{
@@ -1511,9 +1498,9 @@ static __init int setup_clearcpuid(char *arg)
__setup("clearcpuid=", setup_clearcpuid);
#ifdef CONFIG_X86_64
-DEFINE_PER_CPU_FIRST(union irq_stack_union,
- irq_stack_union) __aligned(PAGE_SIZE) __visible;
-EXPORT_PER_CPU_SYMBOL_GPL(irq_stack_union);
+DEFINE_PER_CPU_FIRST(struct fixed_percpu_data,
+ fixed_percpu_data) __aligned(PAGE_SIZE) __visible;
+EXPORT_PER_CPU_SYMBOL_GPL(fixed_percpu_data);
/*
* The following percpu variables are hot. Align current_task to
@@ -1523,9 +1510,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
&init_task;
EXPORT_PER_CPU_SYMBOL(current_task);
-DEFINE_PER_CPU(char *, irq_stack_ptr) =
- init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE;
-
+DEFINE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
@@ -1562,23 +1547,7 @@ void syscall_init(void)
X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT);
}
-/*
- * Copies of the original ist values from the tss are only accessed during
- * debugging, no special alignment required.
- */
-DEFINE_PER_CPU(struct orig_ist, orig_ist);
-
-static DEFINE_PER_CPU(unsigned long, debug_stack_addr);
DEFINE_PER_CPU(int, debug_stack_usage);
-
-int is_debug_stack(unsigned long addr)
-{
- return __this_cpu_read(debug_stack_usage) ||
- (addr <= __this_cpu_read(debug_stack_addr) &&
- addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ));
-}
-NOKPROBE_SYMBOL(is_debug_stack);
-
DEFINE_PER_CPU(u32, debug_idt_ctr);
void debug_stack_set_zero(void)
@@ -1668,7 +1637,7 @@ static void setup_getcpu(int cpu)
unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu));
struct desc_struct d = { };
- if (static_cpu_has(X86_FEATURE_RDTSCP))
+ if (boot_cpu_has(X86_FEATURE_RDTSCP))
write_rdtscp_aux(cpudata);
/* Store CPU and node number in limit. */
@@ -1690,17 +1659,14 @@ static void setup_getcpu(int cpu)
* initialized (naturally) in the bootstrap process, such as the GDT
* and IDT. We reload them nevertheless, this function acts as a
* 'CPU state barrier', nothing should get across.
- * A lot of state is already set up in PDA init for 64 bit
*/
#ifdef CONFIG_X86_64
void cpu_init(void)
{
- struct orig_ist *oist;
+ int cpu = raw_smp_processor_id();
struct task_struct *me;
struct tss_struct *t;
- unsigned long v;
- int cpu = raw_smp_processor_id();
int i;
wait_for_master_cpu(cpu);
@@ -1715,7 +1681,6 @@ void cpu_init(void)
load_ucode_ap();
t = &per_cpu(cpu_tss_rw, cpu);
- oist = &per_cpu(orig_ist, cpu);
#ifdef CONFIG_NUMA
if (this_cpu_read(numa_node) == 0 &&
@@ -1753,16 +1718,11 @@ void cpu_init(void)
/*
* set up and load the per-CPU TSS
*/
- if (!oist->ist[0]) {
- char *estacks = get_cpu_entry_area(cpu)->exception_stacks;
-
- for (v = 0; v < N_EXCEPTION_STACKS; v++) {
- estacks += exception_stack_sizes[v];
- oist->ist[v] = t->x86_tss.ist[v] =
- (unsigned long)estacks;
- if (v == DEBUG_STACK-1)
- per_cpu(debug_stack_addr, cpu) = (unsigned long)estacks;
- }
+ if (!t->x86_tss.ist[0]) {
+ t->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(DF);
+ t->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI);
+ t->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB);
+ t->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE);
}
t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
@@ -1864,23 +1824,6 @@ void cpu_init(void)
}
#endif
-static void bsp_resume(void)
-{
- if (this_cpu->c_bsp_resume)
- this_cpu->c_bsp_resume(&boot_cpu_data);
-}
-
-static struct syscore_ops cpu_syscore_ops = {
- .resume = bsp_resume,
-};
-
-static int __init init_cpu_syscore(void)
-{
- register_syscore_ops(&cpu_syscore_ops);
- return 0;
-}
-core_initcall(init_cpu_syscore);
-
/*
* The microcode loader calls this upon late microcode load to recheck features,
* only when microcode has been updated. Caller holds microcode_mutex and CPU
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 5eb946b9a9f3..c0e2407abdd6 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -14,7 +14,6 @@ struct cpu_dev {
void (*c_init)(struct cpuinfo_x86 *);
void (*c_identify)(struct cpuinfo_x86 *);
void (*c_detect_tlb)(struct cpuinfo_x86 *);
- void (*c_bsp_resume)(struct cpuinfo_x86 *);
int c_x86_vendor;
#ifdef CONFIG_X86_32
/* Optional vendor specific routine to obtain the cache size. */
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index d12226f60168..1d9b8aaea06c 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -124,7 +124,7 @@ static void set_cx86_reorder(void)
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
/* Load/Store Serialize to mem access disable (=reorder it) */
- setCx86_old(CX86_PCR0, getCx86_old(CX86_PCR0) & ~0x80);
+ setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80);
/* set load/store serialize from 1GB to 4GB */
ccr3 |= 0xe0;
setCx86(CX86_CCR3, ccr3);
@@ -135,11 +135,11 @@ static void set_cx86_memwb(void)
pr_info("Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
/* CCR2 bit 2: unlock NW bit */
- setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04);
+ setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04);
/* set 'Not Write-through' */
write_cr0(read_cr0() | X86_CR0_NW);
/* CCR2 bit 2: lock NW bit and set WT1 */
- setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x14);
+ setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14);
}
/*
@@ -153,14 +153,14 @@ static void geode_configure(void)
local_irq_save(flags);
/* Suspend on halt power saving and enable #SUSP pin */
- setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x88);
+ setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
ccr3 = getCx86(CX86_CCR3);
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
/* FPU fast, DTE cache, Mem bypass */
- setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x38);
+ setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38);
setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
set_cx86_memwb();
@@ -296,7 +296,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
/* GXm supports extended cpuid levels 'ala' AMD */
if (c->cpuid_level == 2) {
/* Enable cxMMX extensions (GX1 Datasheet 54) */
- setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7) | 1);
+ setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1);
/*
* GXm : 0x30 ... 0x5f GXm datasheet 51
@@ -319,7 +319,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
if (dir1 > 7) {
dir0_msn++; /* M II */
/* Enable MMX extensions (App note 108) */
- setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1);
+ setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
} else {
/* A 6x86MX - it has the bug. */
set_cpu_bug(c, X86_BUG_COMA);
diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c
index cf25405444ab..415621ddb8a2 100644
--- a/arch/x86/kernel/cpu/hygon.c
+++ b/arch/x86/kernel/cpu/hygon.c
@@ -19,6 +19,8 @@
#include "cpu.h"
+#define APICID_SOCKET_ID_BIT 6
+
/*
* nodes_per_socket: Stores the number of nodes per socket.
* Refer to CPUID Fn8000_001E_ECX Node Identifiers[10:8]
@@ -87,6 +89,9 @@ static void hygon_get_topology(struct cpuinfo_x86 *c)
if (!err)
c->x86_coreid_bits = get_count_order(c->x86_max_cores);
+ /* Socket ID is ApicId[6] for these processors. */
+ c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
+
cacheinfo_hygon_init_llc_id(c, cpu, node_id);
} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
u64 value;
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index fc3c07fe7df5..f17c1a714779 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -596,36 +596,6 @@ detect_keyid_bits:
c->x86_phys_bits -= keyid_bits;
}
-static void init_intel_energy_perf(struct cpuinfo_x86 *c)
-{
- u64 epb;
-
- /*
- * Initialize MSR_IA32_ENERGY_PERF_BIAS if not already initialized.
- * (x86_energy_perf_policy(8) is available to change it at run-time.)
- */
- if (!cpu_has(c, X86_FEATURE_EPB))
- return;
-
- rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
- if ((epb & 0xF) != ENERGY_PERF_BIAS_PERFORMANCE)
- return;
-
- pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n");
- pr_warn_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n");
- epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL;
- wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
-}
-
-static void intel_bsp_resume(struct cpuinfo_x86 *c)
-{
- /*
- * MSR_IA32_ENERGY_PERF_BIAS is lost across suspend/resume,
- * so reinitialize it properly like during bootup:
- */
- init_intel_energy_perf(c);
-}
-
static void init_cpuid_fault(struct cpuinfo_x86 *c)
{
u64 msr;
@@ -763,8 +733,6 @@ static void init_intel(struct cpuinfo_x86 *c)
if (cpu_has(c, X86_FEATURE_TME))
detect_tme(c);
- init_intel_energy_perf(c);
-
init_intel_misc_features(c);
}
@@ -1023,9 +991,7 @@ static const struct cpu_dev intel_cpu_dev = {
.c_detect_tlb = intel_detect_tlb,
.c_early_init = early_init_intel,
.c_init = init_intel,
- .c_bsp_resume = intel_bsp_resume,
.c_x86_vendor = X86_VENDOR_INTEL,
};
cpu_dev_register(intel_cpu_dev);
-
diff --git a/arch/x86/kernel/cpu/intel_epb.c b/arch/x86/kernel/cpu/intel_epb.c
new file mode 100644
index 000000000000..f4dd73396f28
--- /dev/null
+++ b/arch/x86/kernel/cpu/intel_epb.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Performance and Energy Bias Hint support.
+ *
+ * Copyright (C) 2019 Intel Corporation
+ *
+ * Author:
+ * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ */
+
+#include <linux/cpuhotplug.h>
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/syscore_ops.h>
+#include <linux/pm.h>
+
+#include <asm/cpufeature.h>
+#include <asm/msr.h>
+
+/**
+ * DOC: overview
+ *
+ * The Performance and Energy Bias Hint (EPB) allows software to specify its
+ * preference with respect to the power-performance tradeoffs present in the
+ * processor. Generally, the EPB is expected to be set by user space (directly
+ * via sysfs or with the help of the x86_energy_perf_policy tool), but there are
+ * two reasons for the kernel to update it.
+ *
+ * First, there are systems where the platform firmware resets the EPB during
+ * system-wide transitions from sleep states back into the working state
+ * effectively causing the previous EPB updates by user space to be lost.
+ * Thus the kernel needs to save the current EPB values for all CPUs during
+ * system-wide transitions to sleep states and restore them on the way back to
+ * the working state. That can be achieved by saving EPB for secondary CPUs
+ * when they are taken offline during transitions into system sleep states and
+ * for the boot CPU in a syscore suspend operation, so that it can be restored
+ * for the boot CPU in a syscore resume operation and for the other CPUs when
+ * they are brought back online. However, CPUs that are already offline when
+ * a system-wide PM transition is started are not taken offline again, but their
+ * EPB values may still be reset by the platform firmware during the transition,
+ * so in fact it is necessary to save the EPB of any CPU taken offline and to
+ * restore it when the given CPU goes back online at all times.
+ *
+ * Second, on many systems the initial EPB value coming from the platform
+ * firmware is 0 ('performance') and at least on some of them that is because
+ * the platform firmware does not initialize EPB at all with the assumption that
+ * the OS will do that anyway. That sometimes is problematic, as it may cause
+ * the system battery to drain too fast, for example, so it is better to adjust
+ * it on CPU bring-up and if the initial EPB value for a given CPU is 0, the
+ * kernel changes it to 6 ('normal').
+ */
+
+static DEFINE_PER_CPU(u8, saved_epb);
+
+#define EPB_MASK 0x0fULL
+#define EPB_SAVED 0x10ULL
+#define MAX_EPB EPB_MASK
+
+static int intel_epb_save(void)
+{
+ u64 epb;
+
+ rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
+ /*
+ * Ensure that saved_epb will always be nonzero after this write even if
+ * the EPB value read from the MSR is 0.
+ */
+ this_cpu_write(saved_epb, (epb & EPB_MASK) | EPB_SAVED);
+
+ return 0;
+}
+
+static void intel_epb_restore(void)
+{
+ u64 val = this_cpu_read(saved_epb);
+ u64 epb;
+
+ rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
+ if (val) {
+ val &= EPB_MASK;
+ } else {
+ /*
+ * Because intel_epb_save() has not run for the current CPU yet,
+ * it is going online for the first time, so if its EPB value is
+ * 0 ('performance') at this point, assume that it has not been
+ * initialized by the platform firmware and set it to 6
+ * ('normal').
+ */
+ val = epb & EPB_MASK;
+ if (val == ENERGY_PERF_BIAS_PERFORMANCE) {
+ val = ENERGY_PERF_BIAS_NORMAL;
+ pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n");
+ }
+ }
+ wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, (epb & ~EPB_MASK) | val);
+}
+
+static struct syscore_ops intel_epb_syscore_ops = {
+ .suspend = intel_epb_save,
+ .resume = intel_epb_restore,
+};
+
+static const char * const energy_perf_strings[] = {
+ "performance",
+ "balance-performance",
+ "normal",
+ "balance-power",
+ "power"
+};
+static const u8 energ_perf_values[] = {
+ ENERGY_PERF_BIAS_PERFORMANCE,
+ ENERGY_PERF_BIAS_BALANCE_PERFORMANCE,
+ ENERGY_PERF_BIAS_NORMAL,
+ ENERGY_PERF_BIAS_BALANCE_POWERSAVE,
+ ENERGY_PERF_BIAS_POWERSAVE
+};
+
+static ssize_t energy_perf_bias_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned int cpu = dev->id;
+ u64 epb;
+ int ret;
+
+ ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%llu\n", epb);
+}
+
+static ssize_t energy_perf_bias_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int cpu = dev->id;
+ u64 epb, val;
+ int ret;
+
+ ret = __sysfs_match_string(energy_perf_strings,
+ ARRAY_SIZE(energy_perf_strings), buf);
+ if (ret >= 0)
+ val = energ_perf_values[ret];
+ else if (kstrtou64(buf, 0, &val) || val > MAX_EPB)
+ return -EINVAL;
+
+ ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
+ if (ret < 0)
+ return ret;
+
+ ret = wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS,
+ (epb & ~EPB_MASK) | val);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(energy_perf_bias);
+
+static struct attribute *intel_epb_attrs[] = {
+ &dev_attr_energy_perf_bias.attr,
+ NULL
+};
+
+static const struct attribute_group intel_epb_attr_group = {
+ .name = power_group_name,
+ .attrs = intel_epb_attrs
+};
+
+static int intel_epb_online(unsigned int cpu)
+{
+ struct device *cpu_dev = get_cpu_device(cpu);
+
+ intel_epb_restore();
+ if (!cpuhp_tasks_frozen)
+ sysfs_merge_group(&cpu_dev->kobj, &intel_epb_attr_group);
+
+ return 0;
+}
+
+static int intel_epb_offline(unsigned int cpu)
+{
+ struct device *cpu_dev = get_cpu_device(cpu);
+
+ if (!cpuhp_tasks_frozen)
+ sysfs_unmerge_group(&cpu_dev->kobj, &intel_epb_attr_group);
+
+ intel_epb_save();
+ return 0;
+}
+
+static __init int intel_epb_init(void)
+{
+ int ret;
+
+ if (!boot_cpu_has(X86_FEATURE_EPB))
+ return -ENODEV;
+
+ ret = cpuhp_setup_state(CPUHP_AP_X86_INTEL_EPB_ONLINE,
+ "x86/intel/epb:online", intel_epb_online,
+ intel_epb_offline);
+ if (ret < 0)
+ goto err_out_online;
+
+ register_syscore_ops(&intel_epb_syscore_ops);
+ return 0;
+
+err_out_online:
+ cpuhp_remove_state(CPUHP_AP_X86_INTEL_EPB_ONLINE);
+ return ret;
+}
+subsys_initcall(intel_epb_init);
diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
index e64de5149e50..d904aafe6409 100644
--- a/arch/x86/kernel/cpu/mce/amd.c
+++ b/arch/x86/kernel/cpu/mce/amd.c
@@ -563,33 +563,59 @@ out:
return offset;
}
+bool amd_filter_mce(struct mce *m)
+{
+ enum smca_bank_types bank_type = smca_get_bank_type(m->bank);
+ struct cpuinfo_x86 *c = &boot_cpu_data;
+ u8 xec = (m->status >> 16) & 0x3F;
+
+ /* See Family 17h Models 10h-2Fh Erratum #1114. */
+ if (c->x86 == 0x17 &&
+ c->x86_model >= 0x10 && c->x86_model <= 0x2F &&
+ bank_type == SMCA_IF && xec == 10)
+ return true;
+
+ return false;
+}
+
/*
- * Turn off MC4_MISC thresholding banks on all family 0x15 models since
- * they're not supported there.
+ * Turn off thresholding banks for the following conditions:
+ * - MC4_MISC thresholding is not supported on Family 0x15.
+ * - Prevent possible spurious interrupts from the IF bank on Family 0x17
+ * Models 0x10-0x2F due to Erratum #1114.
*/
-void disable_err_thresholding(struct cpuinfo_x86 *c)
+void disable_err_thresholding(struct cpuinfo_x86 *c, unsigned int bank)
{
- int i;
+ int i, num_msrs;
u64 hwcr;
bool need_toggle;
- u32 msrs[] = {
- 0x00000413, /* MC4_MISC0 */
- 0xc0000408, /* MC4_MISC1 */
- };
+ u32 msrs[NR_BLOCKS];
+
+ if (c->x86 == 0x15 && bank == 4) {
+ msrs[0] = 0x00000413; /* MC4_MISC0 */
+ msrs[1] = 0xc0000408; /* MC4_MISC1 */
+ num_msrs = 2;
+ } else if (c->x86 == 0x17 &&
+ (c->x86_model >= 0x10 && c->x86_model <= 0x2F)) {
- if (c->x86 != 0x15)
+ if (smca_get_bank_type(bank) != SMCA_IF)
+ return;
+
+ msrs[0] = MSR_AMD64_SMCA_MCx_MISC(bank);
+ num_msrs = 1;
+ } else {
return;
+ }
rdmsrl(MSR_K7_HWCR, hwcr);
/* McStatusWrEn has to be set */
need_toggle = !(hwcr & BIT(18));
-
if (need_toggle)
wrmsrl(MSR_K7_HWCR, hwcr | BIT(18));
/* Clear CntP bit safely */
- for (i = 0; i < ARRAY_SIZE(msrs); i++)
+ for (i = 0; i < num_msrs; i++)
msr_clear_bit(msrs[i], 62);
/* restore old settings */
@@ -604,12 +630,12 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
unsigned int bank, block, cpu = smp_processor_id();
int offset = -1;
- disable_err_thresholding(c);
-
for (bank = 0; bank < mca_cfg.banks; ++bank) {
if (mce_flags.smca)
smca_configure(bank, cpu);
+ disable_err_thresholding(c, bank);
+
for (block = 0; block < NR_BLOCKS; ++block) {
address = get_block_address(address, low, high, bank, block);
if (!address)
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index b7fb541a4873..5112a50e6486 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -460,23 +460,6 @@ static void mce_irq_work_cb(struct irq_work *entry)
mce_schedule_work();
}
-static void mce_report_event(struct pt_regs *regs)
-{
- if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) {
- mce_notify_irq();
- /*
- * Triggering the work queue here is just an insurance
- * policy in case the syscall exit notify handler
- * doesn't run soon enough or ends up running on the
- * wrong CPU (can happen when audit sleeps)
- */
- mce_schedule_work();
- return;
- }
-
- irq_work_queue(&mce_irq_work);
-}
-
/*
* Check if the address reported by the CPU is in a format we can parse.
* It would be possible to add code for most other cases, but all would
@@ -712,19 +695,49 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
barrier();
m.status = mce_rdmsrl(msr_ops.status(i));
+
+ /* If this entry is not valid, ignore it */
if (!(m.status & MCI_STATUS_VAL))
continue;
/*
- * Uncorrected or signalled events are handled by the exception
- * handler when it is enabled, so don't process those here.
- *
- * TBD do the same check for MCI_STATUS_EN here?
+ * If we are logging everything (at CPU online) or this
+ * is a corrected error, then we must log it.
*/
- if (!(flags & MCP_UC) &&
- (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC)))
- continue;
+ if ((flags & MCP_UC) || !(m.status & MCI_STATUS_UC))
+ goto log_it;
+
+ /*
+ * Newer Intel systems that support software error
+ * recovery need to make additional checks. Other
+ * CPUs should skip over uncorrected errors, but log
+ * everything else.
+ */
+ if (!mca_cfg.ser) {
+ if (m.status & MCI_STATUS_UC)
+ continue;
+ goto log_it;
+ }
+ /* Log "not enabled" (speculative) errors */
+ if (!(m.status & MCI_STATUS_EN))
+ goto log_it;
+
+ /*
+ * Log UCNA (SDM: 15.6.3 "UCR Error Classification")
+ * UC == 1 && PCC == 0 && S == 0
+ */
+ if (!(m.status & MCI_STATUS_PCC) && !(m.status & MCI_STATUS_S))
+ goto log_it;
+
+ /*
+ * Skip anything else. Presumption is that our read of this
+ * bank is racing with a machine check. Leave the log alone
+ * for do_machine_check() to deal with it.
+ */
+ continue;
+
+log_it:
error_seen = true;
mce_read_aux(&m, i);
@@ -1301,7 +1314,8 @@ void do_machine_check(struct pt_regs *regs, long error_code)
mce_panic("Fatal machine check on current CPU", &m, msg);
if (worst > 0)
- mce_report_event(regs);
+ irq_work_queue(&mce_irq_work);
+
mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
sync_core();
@@ -1451,13 +1465,12 @@ EXPORT_SYMBOL_GPL(mce_notify_irq);
static int __mcheck_cpu_mce_banks_init(void)
{
int i;
- u8 num_banks = mca_cfg.banks;
- mce_banks = kcalloc(num_banks, sizeof(struct mce_bank), GFP_KERNEL);
+ mce_banks = kcalloc(MAX_NR_BANKS, sizeof(struct mce_bank), GFP_KERNEL);
if (!mce_banks)
return -ENOMEM;
- for (i = 0; i < num_banks; i++) {
+ for (i = 0; i < MAX_NR_BANKS; i++) {
struct mce_bank *b = &mce_banks[i];
b->ctl = -1ULL;
@@ -1471,28 +1484,19 @@ static int __mcheck_cpu_mce_banks_init(void)
*/
static int __mcheck_cpu_cap_init(void)
{
- unsigned b;
u64 cap;
+ u8 b;
rdmsrl(MSR_IA32_MCG_CAP, cap);
b = cap & MCG_BANKCNT_MASK;
- if (!mca_cfg.banks)
- pr_info("CPU supports %d MCE banks\n", b);
-
- if (b > MAX_NR_BANKS) {
- pr_warn("Using only %u machine check banks out of %u\n",
- MAX_NR_BANKS, b);
+ if (WARN_ON_ONCE(b > MAX_NR_BANKS))
b = MAX_NR_BANKS;
- }
- /* Don't support asymmetric configurations today */
- WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks);
- mca_cfg.banks = b;
+ mca_cfg.banks = max(mca_cfg.banks, b);
if (!mce_banks) {
int err = __mcheck_cpu_mce_banks_init();
-
if (err)
return err;
}
@@ -1771,6 +1775,14 @@ static void __mcheck_cpu_init_timer(void)
mce_start_timer(t);
}
+bool filter_mce(struct mce *m)
+{
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ return amd_filter_mce(m);
+
+ return false;
+}
+
/* Handle unconfigured int18 (should never happen) */
static void unexpected_machine_check(struct pt_regs *regs, long error_code)
{
@@ -2425,8 +2437,8 @@ static int fake_panic_set(void *data, u64 val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get,
- fake_panic_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(fake_panic_fops, fake_panic_get, fake_panic_set,
+ "%llu\n");
static int __init mcheck_debugfs_init(void)
{
@@ -2435,8 +2447,8 @@ static int __init mcheck_debugfs_init(void)
dmce = mce_get_debugfs_dir();
if (!dmce)
return -ENOMEM;
- ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL,
- &fake_panic_fops);
+ ffake_panic = debugfs_create_file_unsafe("fake_panic", 0444, dmce,
+ NULL, &fake_panic_fops);
if (!ffake_panic)
return -ENOMEM;
@@ -2451,6 +2463,8 @@ EXPORT_SYMBOL_GPL(mcsafe_key);
static int __init mcheck_late_init(void)
{
+ pr_info("Using %d MCE banks\n", mca_cfg.banks);
+
if (mca_cfg.recovery)
static_branch_inc(&mcsafe_key);
diff --git a/arch/x86/kernel/cpu/mce/genpool.c b/arch/x86/kernel/cpu/mce/genpool.c
index 3395549c51d3..64d1d5a00f39 100644
--- a/arch/x86/kernel/cpu/mce/genpool.c
+++ b/arch/x86/kernel/cpu/mce/genpool.c
@@ -99,6 +99,9 @@ int mce_gen_pool_add(struct mce *mce)
{
struct mce_evt_llist *node;
+ if (filter_mce(mce))
+ return -EINVAL;
+
if (!mce_evt_pool)
return -EINVAL;
diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c
index 8492ef7d9015..a6026170af92 100644
--- a/arch/x86/kernel/cpu/mce/inject.c
+++ b/arch/x86/kernel/cpu/mce/inject.c
@@ -46,8 +46,6 @@
static struct mce i_mce;
static struct dentry *dfs_inj;
-static u8 n_banks;
-
#define MAX_FLAG_OPT_SIZE 4
#define NBCFG 0x44
@@ -528,7 +526,7 @@ static void do_inject(void)
* only on the node base core. Refer to D18F3x44[NbMcaToMstCpuEn] for
* Fam10h and later BKDGs.
*/
- if (static_cpu_has(X86_FEATURE_AMD_DCM) &&
+ if (boot_cpu_has(X86_FEATURE_AMD_DCM) &&
b == 4 &&
boot_cpu_data.x86 < 0x17) {
toggle_nb_mca_mst_cpu(amd_get_nb_id(cpu));
@@ -570,9 +568,15 @@ err:
static int inj_bank_set(void *data, u64 val)
{
struct mce *m = (struct mce *)data;
+ u8 n_banks;
+ u64 cap;
+
+ /* Get bank count on target CPU so we can handle non-uniform values. */
+ rdmsrl_on_cpu(m->extcpu, MSR_IA32_MCG_CAP, &cap);
+ n_banks = cap & MCG_BANKCNT_MASK;
if (val >= n_banks) {
- pr_err("Non-existent MCE bank: %llu\n", val);
+ pr_err("MCA bank %llu non-existent on CPU%d\n", val, m->extcpu);
return -EINVAL;
}
@@ -665,10 +669,6 @@ static struct dfs_node {
static int __init debugfs_init(void)
{
unsigned int i;
- u64 cap;
-
- rdmsrl(MSR_IA32_MCG_CAP, cap);
- n_banks = cap & MCG_BANKCNT_MASK;
dfs_inj = debugfs_create_dir("mce-inject", NULL);
if (!dfs_inj)
diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h
index af5eab1e65e2..a34b55baa7aa 100644
--- a/arch/x86/kernel/cpu/mce/internal.h
+++ b/arch/x86/kernel/cpu/mce/internal.h
@@ -173,4 +173,13 @@ struct mca_msr_regs {
extern struct mca_msr_regs msr_ops;
+/* Decide whether to add MCE record to MCE event pool or filter it out. */
+extern bool filter_mce(struct mce *m);
+
+#ifdef CONFIG_X86_MCE_AMD
+extern bool amd_filter_mce(struct mce *m);
+#else
+static inline bool amd_filter_mce(struct mce *m) { return false; };
+#endif
+
#endif /* __X86_MCE_INTERNAL_H__ */
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 97f9ada9ceda..8a4a7823451a 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -418,8 +418,9 @@ static int do_microcode_update(const void __user *buf, size_t size)
if (ustate == UCODE_ERROR) {
error = -1;
break;
- } else if (ustate == UCODE_OK)
+ } else if (ustate == UCODE_NEW) {
apply_microcode_on_target(cpu);
+ }
}
return error;
@@ -608,6 +609,8 @@ static int microcode_reload_late(void)
if (ret > 0)
microcode_check();
+ pr_info("Reload completed, microcode revision: 0x%x\n", boot_cpu_data.microcode);
+
return ret;
}
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 16936a24795c..a44bdbe7c55e 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -31,6 +31,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/cpu.h>
+#include <linux/uio.h>
#include <linux/mm.h>
#include <asm/microcode_intel.h>
@@ -861,32 +862,33 @@ out:
return ret;
}
-static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
- int (*get_ucode_data)(void *, const void *, size_t))
+static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter)
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
- u8 *ucode_ptr = data, *new_mc = NULL, *mc = NULL;
- int new_rev = uci->cpu_sig.rev;
- unsigned int leftover = size;
unsigned int curr_mc_size = 0, new_mc_size = 0;
- unsigned int csig, cpf;
enum ucode_state ret = UCODE_OK;
+ int new_rev = uci->cpu_sig.rev;
+ u8 *new_mc = NULL, *mc = NULL;
+ unsigned int csig, cpf;
- while (leftover) {
+ while (iov_iter_count(iter)) {
struct microcode_header_intel mc_header;
- unsigned int mc_size;
+ unsigned int mc_size, data_size;
+ u8 *data;
- if (leftover < sizeof(mc_header)) {
- pr_err("error! Truncated header in microcode data file\n");
+ if (!copy_from_iter_full(&mc_header, sizeof(mc_header), iter)) {
+ pr_err("error! Truncated or inaccessible header in microcode data file\n");
break;
}
- if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header)))
- break;
-
mc_size = get_totalsize(&mc_header);
- if (!mc_size || mc_size > leftover) {
- pr_err("error! Bad data in microcode data file\n");
+ if (mc_size < sizeof(mc_header)) {
+ pr_err("error! Bad data in microcode data file (totalsize too small)\n");
+ break;
+ }
+ data_size = mc_size - sizeof(mc_header);
+ if (data_size > iov_iter_count(iter)) {
+ pr_err("error! Bad data in microcode data file (truncated file?)\n");
break;
}
@@ -899,7 +901,9 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
curr_mc_size = mc_size;
}
- if (get_ucode_data(mc, ucode_ptr, mc_size) ||
+ memcpy(mc, &mc_header, sizeof(mc_header));
+ data = mc + sizeof(mc_header);
+ if (!copy_from_iter_full(data, data_size, iter) ||
microcode_sanity_check(mc, 1) < 0) {
break;
}
@@ -914,14 +918,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
mc = NULL; /* trigger new vmalloc */
ret = UCODE_NEW;
}
-
- ucode_ptr += mc_size;
- leftover -= mc_size;
}
vfree(mc);
- if (leftover) {
+ if (iov_iter_count(iter)) {
vfree(new_mc);
return UCODE_ERROR;
}
@@ -945,12 +946,6 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
return ret;
}
-static int get_ucode_fw(void *to, const void *from, size_t n)
-{
- memcpy(to, from, n);
- return 0;
-}
-
static bool is_blacklisted(unsigned int cpu)
{
struct cpuinfo_x86 *c = &cpu_data(cpu);
@@ -977,10 +972,12 @@ static bool is_blacklisted(unsigned int cpu)
static enum ucode_state request_microcode_fw(int cpu, struct device *device,
bool refresh_fw)
{
- char name[30];
struct cpuinfo_x86 *c = &cpu_data(cpu);
const struct firmware *firmware;
+ struct iov_iter iter;
enum ucode_state ret;
+ struct kvec kvec;
+ char name[30];
if (is_blacklisted(cpu))
return UCODE_NFOUND;
@@ -993,26 +990,30 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
return UCODE_NFOUND;
}
- ret = generic_load_microcode(cpu, (void *)firmware->data,
- firmware->size, &get_ucode_fw);
+ kvec.iov_base = (void *)firmware->data;
+ kvec.iov_len = firmware->size;
+ iov_iter_kvec(&iter, WRITE, &kvec, 1, firmware->size);
+ ret = generic_load_microcode(cpu, &iter);
release_firmware(firmware);
return ret;
}
-static int get_ucode_user(void *to, const void *from, size_t n)
-{
- return copy_from_user(to, from, n);
-}
-
static enum ucode_state
request_microcode_user(int cpu, const void __user *buf, size_t size)
{
+ struct iov_iter iter;
+ struct iovec iov;
+
if (is_blacklisted(cpu))
return UCODE_NFOUND;
- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
+ iov.iov_base = (void __user *)buf;
+ iov.iov_len = size;
+ iov_iter_init(&iter, WRITE, &iov, 1, size);
+
+ return generic_load_microcode(cpu, &iter);
}
static struct microcode_ops microcode_intel_ops = {
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index e81a2db42df7..3fa238a137d2 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -328,6 +328,18 @@ static void __init ms_hyperv_init_platform(void)
# ifdef CONFIG_SMP
smp_ops.smp_prepare_boot_cpu = hv_smp_prepare_boot_cpu;
# endif
+
+ /*
+ * Hyper-V doesn't provide irq remapping for IO-APIC. To enable x2apic,
+ * set x2apic destination mode to physcial mode when x2apic is available
+ * and Hyper-V IOMMU driver makes sure cpus assigned with IO-APIC irqs
+ * have 8-bit APIC id.
+ */
+# ifdef CONFIG_X86_X2APIC
+ if (x2apic_supported())
+ x2apic_phys = 1;
+# endif
+
#endif
}
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 2c8522a39ed5..cb2e49810d68 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -35,11 +35,11 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
"fpu_exception\t: %s\n"
"cpuid level\t: %d\n"
"wp\t\t: yes\n",
- static_cpu_has_bug(X86_BUG_FDIV) ? "yes" : "no",
- static_cpu_has_bug(X86_BUG_F00F) ? "yes" : "no",
- static_cpu_has_bug(X86_BUG_COMA) ? "yes" : "no",
- static_cpu_has(X86_FEATURE_FPU) ? "yes" : "no",
- static_cpu_has(X86_FEATURE_FPU) ? "yes" : "no",
+ boot_cpu_has_bug(X86_BUG_FDIV) ? "yes" : "no",
+ boot_cpu_has_bug(X86_BUG_F00F) ? "yes" : "no",
+ boot_cpu_has_bug(X86_BUG_COMA) ? "yes" : "no",
+ boot_cpu_has(X86_FEATURE_FPU) ? "yes" : "no",
+ boot_cpu_has(X86_FEATURE_FPU) ? "yes" : "no",
c->cpuid_level);
}
#else
diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
index 2dbd990a2eb7..89320c0396b1 100644
--- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
+++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
@@ -342,10 +342,10 @@ int update_domains(struct rdt_resource *r, int closid)
if (cpumask_empty(cpu_mask) || mba_sc)
goto done;
cpu = get_cpu();
- /* Update CBM on this cpu if it's in cpu_mask. */
+ /* Update resource control msr on this CPU if it's in cpu_mask. */
if (cpumask_test_cpu(cpu, cpu_mask))
rdt_ctrl_update(&msr_param);
- /* Update CBM on other cpus. */
+ /* Update resource control msr on other CPUs. */
smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
put_cpu();
diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
index 822b7db634ee..e49b77283924 100644
--- a/arch/x86/kernel/cpu/resctrl/internal.h
+++ b/arch/x86/kernel/cpu/resctrl/internal.h
@@ -4,6 +4,7 @@
#include <linux/sched.h>
#include <linux/kernfs.h>
+#include <linux/fs_context.h>
#include <linux/jump_label.h>
#define MSR_IA32_L3_QOS_CFG 0xc81
@@ -40,6 +41,21 @@
#define RMID_VAL_ERROR BIT_ULL(63)
#define RMID_VAL_UNAVAIL BIT_ULL(62)
+
+struct rdt_fs_context {
+ struct kernfs_fs_context kfc;
+ bool enable_cdpl2;
+ bool enable_cdpl3;
+ bool enable_mba_mbps;
+};
+
+static inline struct rdt_fs_context *rdt_fc2context(struct fs_context *fc)
+{
+ struct kernfs_fs_context *kfc = fc->fs_private;
+
+ return container_of(kfc, struct rdt_fs_context, kfc);
+}
+
DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
/**
diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index f33f11f69078..1573a0a6b525 100644
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -501,11 +501,8 @@ out_unlock:
void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms)
{
unsigned long delay = msecs_to_jiffies(delay_ms);
- struct rdt_resource *r;
int cpu;
- r = &rdt_resources_all[RDT_RESOURCE_L3];
-
cpu = cpumask_any(&dom->cpu_mask);
dom->cqm_work_cpu = cpu;
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 8388adf241b2..333c177a2471 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -24,6 +24,7 @@
#include <linux/cpu.h>
#include <linux/debugfs.h>
#include <linux/fs.h>
+#include <linux/fs_parser.h>
#include <linux/sysfs.h>
#include <linux/kernfs.h>
#include <linux/seq_buf.h>
@@ -32,6 +33,7 @@
#include <linux/sched/task.h>
#include <linux/slab.h>
#include <linux/task_work.h>
+#include <linux/user_namespace.h>
#include <uapi/linux/magic.h>
@@ -1858,46 +1860,6 @@ static void cdp_disable_all(void)
cdpl2_disable();
}
-static int parse_rdtgroupfs_options(char *data)
-{
- char *token, *o = data;
- int ret = 0;
-
- while ((token = strsep(&o, ",")) != NULL) {
- if (!*token) {
- ret = -EINVAL;
- goto out;
- }
-
- if (!strcmp(token, "cdp")) {
- ret = cdpl3_enable();
- if (ret)
- goto out;
- } else if (!strcmp(token, "cdpl2")) {
- ret = cdpl2_enable();
- if (ret)
- goto out;
- } else if (!strcmp(token, "mba_MBps")) {
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
- ret = set_mba_sc(true);
- else
- ret = -EINVAL;
- if (ret)
- goto out;
- } else {
- ret = -EINVAL;
- goto out;
- }
- }
-
- return 0;
-
-out:
- pr_err("Invalid mount option \"%s\"\n", token);
-
- return ret;
-}
-
/*
* We don't allow rdtgroup directories to be created anywhere
* except the root directory. Thus when looking for the rdtgroup
@@ -1969,13 +1931,27 @@ static int mkdir_mondata_all(struct kernfs_node *parent_kn,
struct rdtgroup *prgrp,
struct kernfs_node **mon_data_kn);
-static struct dentry *rdt_mount(struct file_system_type *fs_type,
- int flags, const char *unused_dev_name,
- void *data)
+static int rdt_enable_ctx(struct rdt_fs_context *ctx)
{
+ int ret = 0;
+
+ if (ctx->enable_cdpl2)
+ ret = cdpl2_enable();
+
+ if (!ret && ctx->enable_cdpl3)
+ ret = cdpl3_enable();
+
+ if (!ret && ctx->enable_mba_mbps)
+ ret = set_mba_sc(true);
+
+ return ret;
+}
+
+static int rdt_get_tree(struct fs_context *fc)
+{
+ struct rdt_fs_context *ctx = rdt_fc2context(fc);
struct rdt_domain *dom;
struct rdt_resource *r;
- struct dentry *dentry;
int ret;
cpus_read_lock();
@@ -1984,53 +1960,42 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type,
* resctrl file system can only be mounted once.
*/
if (static_branch_unlikely(&rdt_enable_key)) {
- dentry = ERR_PTR(-EBUSY);
+ ret = -EBUSY;
goto out;
}
- ret = parse_rdtgroupfs_options(data);
- if (ret) {
- dentry = ERR_PTR(ret);
+ ret = rdt_enable_ctx(ctx);
+ if (ret < 0)
goto out_cdp;
- }
closid_init();
ret = rdtgroup_create_info_dir(rdtgroup_default.kn);
- if (ret) {
- dentry = ERR_PTR(ret);
- goto out_cdp;
- }
+ if (ret < 0)
+ goto out_mba;
if (rdt_mon_capable) {
ret = mongroup_create_dir(rdtgroup_default.kn,
NULL, "mon_groups",
&kn_mongrp);
- if (ret) {
- dentry = ERR_PTR(ret);
+ if (ret < 0)
goto out_info;
- }
kernfs_get(kn_mongrp);
ret = mkdir_mondata_all(rdtgroup_default.kn,
&rdtgroup_default, &kn_mondata);
- if (ret) {
- dentry = ERR_PTR(ret);
+ if (ret < 0)
goto out_mongrp;
- }
kernfs_get(kn_mondata);
rdtgroup_default.mon.mon_data_kn = kn_mondata;
}
ret = rdt_pseudo_lock_init();
- if (ret) {
- dentry = ERR_PTR(ret);
+ if (ret)
goto out_mondata;
- }
- dentry = kernfs_mount(fs_type, flags, rdt_root,
- RDTGROUP_SUPER_MAGIC, NULL);
- if (IS_ERR(dentry))
+ ret = kernfs_get_tree(fc);
+ if (ret < 0)
goto out_psl;
if (rdt_alloc_capable)
@@ -2059,14 +2024,95 @@ out_mongrp:
kernfs_remove(kn_mongrp);
out_info:
kernfs_remove(kn_info);
+out_mba:
+ if (ctx->enable_mba_mbps)
+ set_mba_sc(false);
out_cdp:
cdp_disable_all();
out:
rdt_last_cmd_clear();
mutex_unlock(&rdtgroup_mutex);
cpus_read_unlock();
+ return ret;
+}
+
+enum rdt_param {
+ Opt_cdp,
+ Opt_cdpl2,
+ Opt_mba_mbps,
+ nr__rdt_params
+};
+
+static const struct fs_parameter_spec rdt_param_specs[] = {
+ fsparam_flag("cdp", Opt_cdp),
+ fsparam_flag("cdpl2", Opt_cdpl2),
+ fsparam_flag("mba_MBps", Opt_mba_mbps),
+ {}
+};
+
+static const struct fs_parameter_description rdt_fs_parameters = {
+ .name = "rdt",
+ .specs = rdt_param_specs,
+};
+
+static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ struct rdt_fs_context *ctx = rdt_fc2context(fc);
+ struct fs_parse_result result;
+ int opt;
+
+ opt = fs_parse(fc, &rdt_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_cdp:
+ ctx->enable_cdpl3 = true;
+ return 0;
+ case Opt_cdpl2:
+ ctx->enable_cdpl2 = true;
+ return 0;
+ case Opt_mba_mbps:
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return -EINVAL;
+ ctx->enable_mba_mbps = true;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static void rdt_fs_context_free(struct fs_context *fc)
+{
+ struct rdt_fs_context *ctx = rdt_fc2context(fc);
+
+ kernfs_free_fs_context(fc);
+ kfree(ctx);
+}
+
+static const struct fs_context_operations rdt_fs_context_ops = {
+ .free = rdt_fs_context_free,
+ .parse_param = rdt_parse_param,
+ .get_tree = rdt_get_tree,
+};
- return dentry;
+static int rdt_init_fs_context(struct fs_context *fc)
+{
+ struct rdt_fs_context *ctx;
+
+ ctx = kzalloc(sizeof(struct rdt_fs_context), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->kfc.root = rdt_root;
+ ctx->kfc.magic = RDTGROUP_SUPER_MAGIC;
+ fc->fs_private = &ctx->kfc;
+ fc->ops = &rdt_fs_context_ops;
+ if (fc->user_ns)
+ put_user_ns(fc->user_ns);
+ fc->user_ns = get_user_ns(&init_user_ns);
+ fc->global = true;
+ return 0;
}
static int reset_all_ctrls(struct rdt_resource *r)
@@ -2239,9 +2285,10 @@ static void rdt_kill_sb(struct super_block *sb)
}
static struct file_system_type rdt_fs_type = {
- .name = "resctrl",
- .mount = rdt_mount,
- .kill_sb = rdt_kill_sb,
+ .name = "resctrl",
+ .init_fs_context = rdt_init_fs_context,
+ .parameters = &rdt_fs_parameters,
+ .kill_sb = rdt_kill_sb,
};
static int mon_addfile(struct kernfs_node *parent_kn, const char *name,
@@ -2469,103 +2516,131 @@ static void cbm_ensure_valid(u32 *_val, struct rdt_resource *r)
bitmap_clear(val, zero_bit, cbm_len - zero_bit);
}
-/**
- * rdtgroup_init_alloc - Initialize the new RDT group's allocations
- *
- * A new RDT group is being created on an allocation capable (CAT)
- * supporting system. Set this group up to start off with all usable
- * allocations. That is, all shareable and unused bits.
+/*
+ * Initialize cache resources per RDT domain
*
- * All-zero CBM is invalid. If there are no more shareable bits available
- * on any domain then the entire allocation will fail.
+ * Set the RDT domain up to start off with all usable allocations. That is,
+ * all shareable and unused bits. All-zero CBM is invalid.
*/
-static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
+static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
+ u32 closid)
{
struct rdt_resource *r_cdp = NULL;
struct rdt_domain *d_cdp = NULL;
u32 used_b = 0, unused_b = 0;
- u32 closid = rdtgrp->closid;
- struct rdt_resource *r;
unsigned long tmp_cbm;
enum rdtgrp_mode mode;
- struct rdt_domain *d;
u32 peer_ctl, *ctrl;
- int i, ret;
+ int i;
- for_each_alloc_enabled_rdt_resource(r) {
- /*
- * Only initialize default allocations for CBM cache
- * resources
- */
- if (r->rid == RDT_RESOURCE_MBA)
- continue;
- list_for_each_entry(d, &r->domains, list) {
- rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp);
- d->have_new_ctrl = false;
- d->new_ctrl = r->cache.shareable_bits;
- used_b = r->cache.shareable_bits;
- ctrl = d->ctrl_val;
- for (i = 0; i < closids_supported(); i++, ctrl++) {
- if (closid_allocated(i) && i != closid) {
- mode = rdtgroup_mode_by_closid(i);
- if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
- break;
- /*
- * If CDP is active include peer
- * domain's usage to ensure there
- * is no overlap with an exclusive
- * group.
- */
- if (d_cdp)
- peer_ctl = d_cdp->ctrl_val[i];
- else
- peer_ctl = 0;
- used_b |= *ctrl | peer_ctl;
- if (mode == RDT_MODE_SHAREABLE)
- d->new_ctrl |= *ctrl | peer_ctl;
- }
- }
- if (d->plr && d->plr->cbm > 0)
- used_b |= d->plr->cbm;
- unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
- unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
- d->new_ctrl |= unused_b;
- /*
- * Force the initial CBM to be valid, user can
- * modify the CBM based on system availability.
- */
- cbm_ensure_valid(&d->new_ctrl, r);
+ rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp);
+ d->have_new_ctrl = false;
+ d->new_ctrl = r->cache.shareable_bits;
+ used_b = r->cache.shareable_bits;
+ ctrl = d->ctrl_val;
+ for (i = 0; i < closids_supported(); i++, ctrl++) {
+ if (closid_allocated(i) && i != closid) {
+ mode = rdtgroup_mode_by_closid(i);
+ if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
+ break;
/*
- * Assign the u32 CBM to an unsigned long to ensure
- * that bitmap_weight() does not access out-of-bound
- * memory.
+ * If CDP is active include peer domain's
+ * usage to ensure there is no overlap
+ * with an exclusive group.
*/
- tmp_cbm = d->new_ctrl;
- if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) <
- r->cache.min_cbm_bits) {
- rdt_last_cmd_printf("No space on %s:%d\n",
- r->name, d->id);
- return -ENOSPC;
- }
- d->have_new_ctrl = true;
+ if (d_cdp)
+ peer_ctl = d_cdp->ctrl_val[i];
+ else
+ peer_ctl = 0;
+ used_b |= *ctrl | peer_ctl;
+ if (mode == RDT_MODE_SHAREABLE)
+ d->new_ctrl |= *ctrl | peer_ctl;
}
}
+ if (d->plr && d->plr->cbm > 0)
+ used_b |= d->plr->cbm;
+ unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
+ unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
+ d->new_ctrl |= unused_b;
+ /*
+ * Force the initial CBM to be valid, user can
+ * modify the CBM based on system availability.
+ */
+ cbm_ensure_valid(&d->new_ctrl, r);
+ /*
+ * Assign the u32 CBM to an unsigned long to ensure that
+ * bitmap_weight() does not access out-of-bound memory.
+ */
+ tmp_cbm = d->new_ctrl;
+ if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) {
+ rdt_last_cmd_printf("No space on %s:%d\n", r->name, d->id);
+ return -ENOSPC;
+ }
+ d->have_new_ctrl = true;
+
+ return 0;
+}
+
+/*
+ * Initialize cache resources with default values.
+ *
+ * A new RDT group is being created on an allocation capable (CAT)
+ * supporting system. Set this group up to start off with all usable
+ * allocations.
+ *
+ * If there are no more shareable bits available on any domain then
+ * the entire allocation will fail.
+ */
+static int rdtgroup_init_cat(struct rdt_resource *r, u32 closid)
+{
+ struct rdt_domain *d;
+ int ret;
+
+ list_for_each_entry(d, &r->domains, list) {
+ ret = __init_one_rdt_domain(d, r, closid);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Initialize MBA resource with default values. */
+static void rdtgroup_init_mba(struct rdt_resource *r)
+{
+ struct rdt_domain *d;
+
+ list_for_each_entry(d, &r->domains, list) {
+ d->new_ctrl = is_mba_sc(r) ? MBA_MAX_MBPS : r->default_ctrl;
+ d->have_new_ctrl = true;
+ }
+}
+
+/* Initialize the RDT group's allocations. */
+static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
+{
+ struct rdt_resource *r;
+ int ret;
for_each_alloc_enabled_rdt_resource(r) {
- /*
- * Only initialize default allocations for CBM cache
- * resources
- */
- if (r->rid == RDT_RESOURCE_MBA)
- continue;
+ if (r->rid == RDT_RESOURCE_MBA) {
+ rdtgroup_init_mba(r);
+ } else {
+ ret = rdtgroup_init_cat(r, rdtgrp->closid);
+ if (ret < 0)
+ return ret;
+ }
+
ret = update_domains(r, rdtgrp->closid);
if (ret < 0) {
rdt_last_cmd_puts("Failed to initialize allocations\n");
return ret;
}
- rdtgrp->mode = RDT_MODE_SHAREABLE;
+
}
+ rdtgrp->mode = RDT_MODE_SHAREABLE;
+
return 0;
}
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 17ffc869cab8..a96ca8584803 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -204,8 +204,7 @@ static struct crash_mem *fill_up_crash_elf_data(void)
* another range split. So add extra two slots here.
*/
nr_ranges += 2;
- cmem = vzalloc(sizeof(struct crash_mem) +
- sizeof(struct crash_mem_range) * nr_ranges);
+ cmem = vzalloc(struct_size(cmem, ranges, nr_ranges));
if (!cmem)
return NULL;
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index cd53f3030e40..64a59d726639 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -34,14 +34,14 @@ const char *stack_type_name(enum stack_type type)
static bool in_hardirq_stack(unsigned long *stack, struct stack_info *info)
{
- unsigned long *begin = (unsigned long *)this_cpu_read(hardirq_stack);
+ unsigned long *begin = (unsigned long *)this_cpu_read(hardirq_stack_ptr);
unsigned long *end = begin + (THREAD_SIZE / sizeof(long));
/*
* This is a software stack, so 'end' can be a valid stack pointer.
* It just means the stack is empty.
*/
- if (stack <= begin || stack > end)
+ if (stack < begin || stack > end)
return false;
info->type = STACK_TYPE_IRQ;
@@ -59,14 +59,14 @@ static bool in_hardirq_stack(unsigned long *stack, struct stack_info *info)
static bool in_softirq_stack(unsigned long *stack, struct stack_info *info)
{
- unsigned long *begin = (unsigned long *)this_cpu_read(softirq_stack);
+ unsigned long *begin = (unsigned long *)this_cpu_read(softirq_stack_ptr);
unsigned long *end = begin + (THREAD_SIZE / sizeof(long));
/*
* This is a software stack, so 'end' can be a valid stack pointer.
* It just means the stack is empty.
*/
- if (stack <= begin || stack > end)
+ if (stack < begin || stack > end)
return false;
info->type = STACK_TYPE_SOFTIRQ;
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 5cdb9e84da57..753b8cfe8b8a 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -16,23 +16,21 @@
#include <linux/bug.h>
#include <linux/nmi.h>
+#include <asm/cpu_entry_area.h>
#include <asm/stacktrace.h>
-static char *exception_stack_names[N_EXCEPTION_STACKS] = {
- [ DOUBLEFAULT_STACK-1 ] = "#DF",
- [ NMI_STACK-1 ] = "NMI",
- [ DEBUG_STACK-1 ] = "#DB",
- [ MCE_STACK-1 ] = "#MC",
-};
-
-static unsigned long exception_stack_sizes[N_EXCEPTION_STACKS] = {
- [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
- [DEBUG_STACK - 1] = DEBUG_STKSZ
+static const char * const exception_stack_names[] = {
+ [ ESTACK_DF ] = "#DF",
+ [ ESTACK_NMI ] = "NMI",
+ [ ESTACK_DB2 ] = "#DB2",
+ [ ESTACK_DB1 ] = "#DB1",
+ [ ESTACK_DB ] = "#DB",
+ [ ESTACK_MCE ] = "#MC",
};
const char *stack_type_name(enum stack_type type)
{
- BUILD_BUG_ON(N_EXCEPTION_STACKS != 4);
+ BUILD_BUG_ON(N_EXCEPTION_STACKS != 6);
if (type == STACK_TYPE_IRQ)
return "IRQ";
@@ -52,43 +50,84 @@ const char *stack_type_name(enum stack_type type)
return NULL;
}
+/**
+ * struct estack_pages - Page descriptor for exception stacks
+ * @offs: Offset from the start of the exception stack area
+ * @size: Size of the exception stack
+ * @type: Type to store in the stack_info struct
+ */
+struct estack_pages {
+ u32 offs;
+ u16 size;
+ u16 type;
+};
+
+#define EPAGERANGE(st) \
+ [PFN_DOWN(CEA_ESTACK_OFFS(st)) ... \
+ PFN_DOWN(CEA_ESTACK_OFFS(st) + CEA_ESTACK_SIZE(st) - 1)] = { \
+ .offs = CEA_ESTACK_OFFS(st), \
+ .size = CEA_ESTACK_SIZE(st), \
+ .type = STACK_TYPE_EXCEPTION + ESTACK_ ##st, }
+
+/*
+ * Array of exception stack page descriptors. If the stack is larger than
+ * PAGE_SIZE, all pages covering a particular stack will have the same
+ * info. The guard pages including the not mapped DB2 stack are zeroed
+ * out.
+ */
+static const
+struct estack_pages estack_pages[CEA_ESTACK_PAGES] ____cacheline_aligned = {
+ EPAGERANGE(DF),
+ EPAGERANGE(NMI),
+ EPAGERANGE(DB1),
+ EPAGERANGE(DB),
+ EPAGERANGE(MCE),
+};
+
static bool in_exception_stack(unsigned long *stack, struct stack_info *info)
{
- unsigned long *begin, *end;
+ unsigned long begin, end, stk = (unsigned long)stack;
+ const struct estack_pages *ep;
struct pt_regs *regs;
- unsigned k;
+ unsigned int k;
- BUILD_BUG_ON(N_EXCEPTION_STACKS != 4);
+ BUILD_BUG_ON(N_EXCEPTION_STACKS != 6);
- for (k = 0; k < N_EXCEPTION_STACKS; k++) {
- end = (unsigned long *)raw_cpu_ptr(&orig_ist)->ist[k];
- begin = end - (exception_stack_sizes[k] / sizeof(long));
- regs = (struct pt_regs *)end - 1;
-
- if (stack <= begin || stack >= end)
- continue;
+ begin = (unsigned long)__this_cpu_read(cea_exception_stacks);
+ end = begin + sizeof(struct cea_exception_stacks);
+ /* Bail if @stack is outside the exception stack area. */
+ if (stk < begin || stk >= end)
+ return false;
- info->type = STACK_TYPE_EXCEPTION + k;
- info->begin = begin;
- info->end = end;
- info->next_sp = (unsigned long *)regs->sp;
+ /* Calc page offset from start of exception stacks */
+ k = (stk - begin) >> PAGE_SHIFT;
+ /* Lookup the page descriptor */
+ ep = &estack_pages[k];
+ /* Guard page? */
+ if (!ep->size)
+ return false;
- return true;
- }
+ begin += (unsigned long)ep->offs;
+ end = begin + (unsigned long)ep->size;
+ regs = (struct pt_regs *)end - 1;
- return false;
+ info->type = ep->type;
+ info->begin = (unsigned long *)begin;
+ info->end = (unsigned long *)end;
+ info->next_sp = (unsigned long *)regs->sp;
+ return true;
}
static bool in_irq_stack(unsigned long *stack, struct stack_info *info)
{
- unsigned long *end = (unsigned long *)this_cpu_read(irq_stack_ptr);
+ unsigned long *end = (unsigned long *)this_cpu_read(hardirq_stack_ptr);
unsigned long *begin = end - (IRQ_STACK_SIZE / sizeof(long));
/*
* This is a software stack, so 'end' can be a valid stack pointer.
* It just means the stack is empty.
*/
- if (stack <= begin || stack > end)
+ if (stack < begin || stack >= end)
return false;
info->type = STACK_TYPE_IRQ;
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index a687d10da417..2879e234e193 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -14,6 +14,7 @@
#include <linux/acpi.h>
#include <linux/firmware-map.h>
#include <linux/sort.h>
+#include <linux/memory_hotplug.h>
#include <asm/e820/api.h>
#include <asm/setup.h>
@@ -775,7 +776,7 @@ u64 __init e820__memblock_alloc_reserved(u64 size, u64 align)
{
u64 addr;
- addr = __memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
+ addr = memblock_phys_alloc(size, align);
if (addr) {
e820__range_update_kexec(addr, size, E820_TYPE_RAM, E820_TYPE_RESERVED);
pr_info("update e820_table_kexec for e820__memblock_alloc_reserved()\n");
@@ -878,6 +879,10 @@ static int __init parse_memopt(char *p)
e820__range_remove(mem_size, ULLONG_MAX - mem_size, E820_TYPE_RAM, 1);
+#ifdef CONFIG_MEMORY_HOTPLUG
+ max_mem_size = mem_size;
+#endif
+
return 0;
}
early_param("mem", parse_memopt);
@@ -1092,6 +1097,9 @@ void __init e820__reserve_resources(void)
res = memblock_alloc(sizeof(*res) * e820_table->nr_entries,
SMP_CACHE_BYTES);
+ if (!res)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(*res) * e820_table->nr_entries);
e820_res = res;
for (i = 0; i < e820_table->nr_entries; i++) {
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 3e3789c8f8e1..0caf8122d680 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -49,7 +49,7 @@ int ftrace_arch_code_modify_post_process(void)
union ftrace_code_union {
char code[MCOUNT_INSN_SIZE];
struct {
- unsigned char e8;
+ unsigned char op;
int offset;
} __attribute__((packed));
};
@@ -59,20 +59,23 @@ static int ftrace_calc_offset(long ip, long addr)
return (int)(addr - ip);
}
-static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
+static unsigned char *
+ftrace_text_replace(unsigned char op, unsigned long ip, unsigned long addr)
{
static union ftrace_code_union calc;
- calc.e8 = 0xe8;
+ calc.op = op;
calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
- /*
- * No locking needed, this must be called via kstop_machine
- * which in essence is like running on a uniprocessor machine.
- */
return calc.code;
}
+static unsigned char *
+ftrace_call_replace(unsigned long ip, unsigned long addr)
+{
+ return ftrace_text_replace(0xe8, ip, addr);
+}
+
static inline int
within(unsigned long addr, unsigned long start, unsigned long end)
{
@@ -665,22 +668,6 @@ int __init ftrace_dyn_arch_init(void)
return 0;
}
-#if defined(CONFIG_X86_64) || defined(CONFIG_FUNCTION_GRAPH_TRACER)
-static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
-{
- static union ftrace_code_union calc;
-
- /* Jmp not a call (ignore the .e8) */
- calc.e8 = 0xe9;
- calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
-
- /*
- * ftrace external locks synchronize the access to the static variable.
- */
- return calc.code;
-}
-#endif
-
/* Currently only x86_64 supports dynamic trampolines */
#ifdef CONFIG_X86_64
@@ -691,12 +678,8 @@ static inline void *alloc_tramp(unsigned long size)
{
return module_alloc(size);
}
-static inline void tramp_free(void *tramp, int size)
+static inline void tramp_free(void *tramp)
{
- int npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
- set_memory_nx((unsigned long)tramp, npages);
- set_memory_rw((unsigned long)tramp, npages);
module_memfree(tramp);
}
#else
@@ -705,7 +688,7 @@ static inline void *alloc_tramp(unsigned long size)
{
return NULL;
}
-static inline void tramp_free(void *tramp, int size) { }
+static inline void tramp_free(void *tramp) { }
#endif
/* Defined as markers to the end of the ftrace default trampolines */
@@ -743,6 +726,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
unsigned long end_offset;
unsigned long op_offset;
unsigned long offset;
+ unsigned long npages;
unsigned long size;
unsigned long retq;
unsigned long *ptr;
@@ -775,6 +759,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
return 0;
*tramp_size = size + RET_SIZE + sizeof(void *);
+ npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
/* Copy ftrace_caller onto the trampoline memory */
ret = probe_kernel_read(trampoline, (void *)start_offset, size);
@@ -819,9 +804,17 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
/* ALLOC_TRAMP flags lets us know we created it */
ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
+ set_vm_flush_reset_perms(trampoline);
+
+ /*
+ * Module allocation needs to be completed by making the page
+ * executable. The page is still writable, which is a security hazard,
+ * but anyhow ftrace breaks W^X completely.
+ */
+ set_memory_x((unsigned long)trampoline, npages);
return (unsigned long)trampoline;
fail:
- tramp_free(trampoline, *tramp_size);
+ tramp_free(trampoline);
return 0;
}
@@ -892,8 +885,8 @@ static void *addr_from_call(void *ptr)
return NULL;
/* Make sure this is a call */
- if (WARN_ON_ONCE(calc.e8 != 0xe8)) {
- pr_warn("Expected e8, got %x\n", calc.e8);
+ if (WARN_ON_ONCE(calc.op != 0xe8)) {
+ pr_warn("Expected e8, got %x\n", calc.op);
return NULL;
}
@@ -952,7 +945,7 @@ void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
return;
- tramp_free((void *)ops->trampoline, ops->trampoline_size);
+ tramp_free((void *)ops->trampoline);
ops->trampoline = 0;
}
@@ -964,6 +957,11 @@ void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
#ifdef CONFIG_DYNAMIC_FTRACE
extern void ftrace_graph_call(void);
+static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
+{
+ return ftrace_text_replace(0xe9, ip, addr);
+}
+
static int ftrace_mod_jmp(unsigned long ip, void *func)
{
unsigned char *new;
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index d1dbe8e4eb82..bcd206c8ac90 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -265,7 +265,7 @@ ENDPROC(start_cpu0)
GLOBAL(initial_code)
.quad x86_64_start_kernel
GLOBAL(initial_gs)
- .quad INIT_PER_CPU_VAR(irq_stack_union)
+ .quad INIT_PER_CPU_VAR(fixed_percpu_data)
GLOBAL(initial_stack)
/*
* The SIZEOF_PTREGS gap is a convention which helps the in-kernel
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index dfd3aca82c61..fb32925a2e62 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -905,6 +905,8 @@ int __init hpet_enable(void)
return 0;
hpet_set_mapping();
+ if (!hpet_virt_address)
+ return 0;
/*
* Read the period and check for a sane value:
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index ff9bfd40429e..d73083021002 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -354,6 +354,7 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
#endif
default:
WARN_ON_ONCE(1);
+ return -EINVAL;
}
/*
diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c
index 01adea278a71..6d8917875f44 100644
--- a/arch/x86/kernel/idt.c
+++ b/arch/x86/kernel/idt.c
@@ -41,13 +41,12 @@ struct idt_data {
#define SYSG(_vector, _addr) \
G(_vector, _addr, DEFAULT_STACK, GATE_INTERRUPT, DPL3, __KERNEL_CS)
-/* Interrupt gate with interrupt stack */
+/*
+ * Interrupt gate with interrupt stack. The _ist index is the index in
+ * the tss.ist[] array, but for the descriptor it needs to start at 1.
+ */
#define ISTG(_vector, _addr, _ist) \
- G(_vector, _addr, _ist, GATE_INTERRUPT, DPL0, __KERNEL_CS)
-
-/* System interrupt gate with interrupt stack */
-#define SISTG(_vector, _addr, _ist) \
- G(_vector, _addr, _ist, GATE_INTERRUPT, DPL3, __KERNEL_CS)
+ G(_vector, _addr, _ist + 1, GATE_INTERRUPT, DPL0, __KERNEL_CS)
/* Task gate */
#define TSKG(_vector, _gdt) \
@@ -184,11 +183,11 @@ gate_desc debug_idt_table[IDT_ENTRIES] __page_aligned_bss;
* cpu_init() when the TSS has been initialized.
*/
static const __initconst struct idt_data ist_idts[] = {
- ISTG(X86_TRAP_DB, debug, DEBUG_STACK),
- ISTG(X86_TRAP_NMI, nmi, NMI_STACK),
- ISTG(X86_TRAP_DF, double_fault, DOUBLEFAULT_STACK),
+ ISTG(X86_TRAP_DB, debug, IST_INDEX_DB),
+ ISTG(X86_TRAP_NMI, nmi, IST_INDEX_NMI),
+ ISTG(X86_TRAP_DF, double_fault, IST_INDEX_DF),
#ifdef CONFIG_X86_MCE
- ISTG(X86_TRAP_MC, &machine_check, MCE_STACK),
+ ISTG(X86_TRAP_MC, &machine_check, IST_INDEX_MCE),
#endif
};
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 95600a99ae93..fc34816c6f04 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -51,8 +51,8 @@ static inline int check_stack_overflow(void) { return 0; }
static inline void print_stack_overflow(void) { }
#endif
-DEFINE_PER_CPU(struct irq_stack *, hardirq_stack);
-DEFINE_PER_CPU(struct irq_stack *, softirq_stack);
+DEFINE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
+DEFINE_PER_CPU(struct irq_stack *, softirq_stack_ptr);
static void call_on_stack(void *func, void *stack)
{
@@ -76,7 +76,7 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
u32 *isp, *prev_esp, arg1;
curstk = (struct irq_stack *) current_stack();
- irqstk = __this_cpu_read(hardirq_stack);
+ irqstk = __this_cpu_read(hardirq_stack_ptr);
/*
* this is where we switch to the IRQ stack. However, if we are
@@ -107,27 +107,28 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
}
/*
- * allocate per-cpu stacks for hardirq and for softirq processing
+ * Allocate per-cpu stacks for hardirq and softirq processing
*/
-void irq_ctx_init(int cpu)
+int irq_init_percpu_irqstack(unsigned int cpu)
{
- struct irq_stack *irqstk;
-
- if (per_cpu(hardirq_stack, cpu))
- return;
+ int node = cpu_to_node(cpu);
+ struct page *ph, *ps;
- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
- THREADINFO_GFP,
- THREAD_SIZE_ORDER));
- per_cpu(hardirq_stack, cpu) = irqstk;
+ if (per_cpu(hardirq_stack_ptr, cpu))
+ return 0;
- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
- THREADINFO_GFP,
- THREAD_SIZE_ORDER));
- per_cpu(softirq_stack, cpu) = irqstk;
+ ph = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER);
+ if (!ph)
+ return -ENOMEM;
+ ps = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER);
+ if (!ps) {
+ __free_pages(ph, THREAD_SIZE_ORDER);
+ return -ENOMEM;
+ }
- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
- cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
+ per_cpu(hardirq_stack_ptr, cpu) = page_address(ph);
+ per_cpu(softirq_stack_ptr, cpu) = page_address(ps);
+ return 0;
}
void do_softirq_own_stack(void)
@@ -135,7 +136,7 @@ void do_softirq_own_stack(void)
struct irq_stack *irqstk;
u32 *isp, *prev_esp;
- irqstk = __this_cpu_read(softirq_stack);
+ irqstk = __this_cpu_read(softirq_stack_ptr);
/* build the stack frame on the softirq stack */
isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 4dff56658427..6bf6517a05bb 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -18,63 +18,64 @@
#include <linux/uaccess.h>
#include <linux/smp.h>
#include <linux/sched/task_stack.h>
+
+#include <asm/cpu_entry_area.h>
#include <asm/io_apic.h>
#include <asm/apic.h>
-int sysctl_panic_on_stackoverflow;
+DEFINE_PER_CPU_PAGE_ALIGNED(struct irq_stack, irq_stack_backing_store) __visible;
+DECLARE_INIT_PER_CPU(irq_stack_backing_store);
-/*
- * Probabilistic stack overflow check:
- *
- * Only check the stack in process context, because everything else
- * runs on the big interrupt stacks. Checking reliably is too expensive,
- * so we just check from interrupts.
- */
-static inline void stack_overflow_check(struct pt_regs *regs)
+bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
{
-#ifdef CONFIG_DEBUG_STACKOVERFLOW
-#define STACK_TOP_MARGIN 128
- struct orig_ist *oist;
- u64 irq_stack_top, irq_stack_bottom;
- u64 estack_top, estack_bottom;
- u64 curbase = (u64)task_stack_page(current);
+ if (IS_ERR_OR_NULL(desc))
+ return false;
- if (user_mode(regs))
- return;
+ generic_handle_irq_desc(desc);
+ return true;
+}
- if (regs->sp >= curbase + sizeof(struct pt_regs) + STACK_TOP_MARGIN &&
- regs->sp <= curbase + THREAD_SIZE)
- return;
+#ifdef CONFIG_VMAP_STACK
+/*
+ * VMAP the backing store with guard pages
+ */
+static int map_irq_stack(unsigned int cpu)
+{
+ char *stack = (char *)per_cpu_ptr(&irq_stack_backing_store, cpu);
+ struct page *pages[IRQ_STACK_SIZE / PAGE_SIZE];
+ void *va;
+ int i;
- irq_stack_top = (u64)this_cpu_ptr(irq_stack_union.irq_stack) +
- STACK_TOP_MARGIN;
- irq_stack_bottom = (u64)__this_cpu_read(irq_stack_ptr);
- if (regs->sp >= irq_stack_top && regs->sp <= irq_stack_bottom)
- return;
+ for (i = 0; i < IRQ_STACK_SIZE / PAGE_SIZE; i++) {
+ phys_addr_t pa = per_cpu_ptr_to_phys(stack + (i << PAGE_SHIFT));
- oist = this_cpu_ptr(&orig_ist);
- estack_top = (u64)oist->ist[0] - EXCEPTION_STKSZ + STACK_TOP_MARGIN;
- estack_bottom = (u64)oist->ist[N_EXCEPTION_STACKS - 1];
- if (regs->sp >= estack_top && regs->sp <= estack_bottom)
- return;
+ pages[i] = pfn_to_page(pa >> PAGE_SHIFT);
+ }
- WARN_ONCE(1, "do_IRQ(): %s has overflown the kernel stack (cur:%Lx,sp:%lx,irq stk top-bottom:%Lx-%Lx,exception stk top-bottom:%Lx-%Lx,ip:%pS)\n",
- current->comm, curbase, regs->sp,
- irq_stack_top, irq_stack_bottom,
- estack_top, estack_bottom, (void *)regs->ip);
+ va = vmap(pages, IRQ_STACK_SIZE / PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL);
+ if (!va)
+ return -ENOMEM;
- if (sysctl_panic_on_stackoverflow)
- panic("low stack detected by irq handler - check messages\n");
-#endif
+ per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE;
+ return 0;
}
-
-bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
+#else
+/*
+ * If VMAP stacks are disabled due to KASAN, just use the per cpu
+ * backing store without guard pages.
+ */
+static int map_irq_stack(unsigned int cpu)
{
- stack_overflow_check(regs);
+ void *va = per_cpu_ptr(&irq_stack_backing_store, cpu);
- if (IS_ERR_OR_NULL(desc))
- return false;
+ per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE;
+ return 0;
+}
+#endif
- generic_handle_irq_desc(desc);
- return true;
+int irq_init_percpu_irqstack(unsigned int cpu)
+{
+ if (per_cpu(hardirq_stack_ptr, cpu))
+ return 0;
+ return map_irq_stack(cpu);
}
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index a0693b71cfc1..16919a9671fa 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -91,6 +91,8 @@ void __init init_IRQ(void)
for (i = 0; i < nr_legacy_irqs(); i++)
per_cpu(vector_irq, 0)[ISA_IRQ_VECTOR(i)] = irq_to_desc(i);
+ BUG_ON(irq_init_percpu_irqstack(smp_processor_id()));
+
x86_init.irqs.intr_init();
}
@@ -104,6 +106,4 @@ void __init native_init_IRQ(void)
if (!acpi_ioapic && !of_ioapic && nr_legacy_irqs())
setup_irq(2, &irq2);
-
- irq_ctx_init(smp_processor_id());
}
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index f99bd26bd3f1..e631c358f7f4 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -37,7 +37,6 @@ static void bug_at(unsigned char *ip, int line)
static void __ref __jump_label_transform(struct jump_entry *entry,
enum jump_label_type type,
- void *(*poker)(void *, const void *, size_t),
int init)
{
union jump_code_union jmp;
@@ -50,9 +49,6 @@ static void __ref __jump_label_transform(struct jump_entry *entry,
jmp.offset = jump_entry_target(entry) -
(jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE);
- if (early_boot_irqs_disabled)
- poker = text_poke_early;
-
if (type == JUMP_LABEL_JMP) {
if (init) {
expect = default_nop; line = __LINE__;
@@ -75,16 +71,19 @@ static void __ref __jump_label_transform(struct jump_entry *entry,
bug_at((void *)jump_entry_code(entry), line);
/*
- * Make text_poke_bp() a default fallback poker.
+ * As long as only a single processor is running and the code is still
+ * not marked as RO, text_poke_early() can be used; Checking that
+ * system_state is SYSTEM_BOOTING guarantees it. It will be set to
+ * SYSTEM_SCHEDULING before other cores are awaken and before the
+ * code is write-protected.
*
* At the time the change is being done, just ignore whether we
* are doing nop -> jump or jump -> nop transition, and assume
* always nop being the 'currently valid' instruction
- *
*/
- if (poker) {
- (*poker)((void *)jump_entry_code(entry), code,
- JUMP_LABEL_NOP_SIZE);
+ if (init || system_state == SYSTEM_BOOTING) {
+ text_poke_early((void *)jump_entry_code(entry), code,
+ JUMP_LABEL_NOP_SIZE);
return;
}
@@ -96,7 +95,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
mutex_lock(&text_mutex);
- __jump_label_transform(entry, type, NULL, 0);
+ __jump_label_transform(entry, type, 0);
mutex_unlock(&text_mutex);
}
@@ -126,5 +125,5 @@ __init_or_module void arch_jump_label_transform_static(struct jump_entry *entry,
jlstate = JL_STATE_NO_UPDATE;
}
if (jlstate == JL_STATE_UPDATE)
- __jump_label_transform(entry, type, text_poke_early, 1);
+ __jump_label_transform(entry, type, 1);
}
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index 1f3b77367948..22f60dd26460 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -538,9 +538,17 @@ static int bzImage64_cleanup(void *loader_data)
#ifdef CONFIG_KEXEC_BZIMAGE_VERIFY_SIG
static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len)
{
- return verify_pefile_signature(kernel, kernel_len,
- VERIFY_USE_SECONDARY_KEYRING,
- VERIFYING_KEXEC_PE_SIGNATURE);
+ int ret;
+
+ ret = verify_pefile_signature(kernel, kernel_len,
+ VERIFY_USE_SECONDARY_KEYRING,
+ VERIFYING_KEXEC_PE_SIGNATURE);
+ if (ret == -ENOKEY && IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING)) {
+ ret = verify_pefile_signature(kernel, kernel_len,
+ VERIFY_USE_PLATFORM_KEYRING,
+ VERIFYING_KEXEC_PE_SIGNATURE);
+ }
+ return ret;
}
#endif
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 4ff6b4cdb941..13b13311b792 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -747,7 +747,6 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
{
int err;
- char opc[BREAK_INSTR_SIZE];
bpt->type = BP_BREAKPOINT;
err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
@@ -759,18 +758,13 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
if (!err)
return err;
/*
- * It is safe to call text_poke() because normal kernel execution
+ * It is safe to call text_poke_kgdb() because normal kernel execution
* is stopped on all cores, so long as the text_mutex is not locked.
*/
if (mutex_is_locked(&text_mutex))
return -EBUSY;
- text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
- BREAK_INSTR_SIZE);
- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
- if (err)
- return err;
- if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
- return -EINVAL;
+ text_poke_kgdb((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
+ BREAK_INSTR_SIZE);
bpt->type = BP_POKE_BREAKPOINT;
return err;
@@ -778,22 +772,17 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
{
- int err;
- char opc[BREAK_INSTR_SIZE];
-
if (bpt->type != BP_POKE_BREAKPOINT)
goto knl_write;
/*
- * It is safe to call text_poke() because normal kernel execution
+ * It is safe to call text_poke_kgdb() because normal kernel execution
* is stopped on all cores, so long as the text_mutex is not locked.
*/
if (mutex_is_locked(&text_mutex))
goto knl_write;
- text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
- if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
- goto knl_write;
- return err;
+ text_poke_kgdb((void *)bpt->bpt_addr, bpt->saved_instr,
+ BREAK_INSTR_SIZE);
+ return 0;
knl_write:
return probe_kernel_write((char *)bpt->bpt_addr,
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index a034cb808e7e..cf52ee0d8711 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -431,8 +431,21 @@ void *alloc_insn_page(void)
void *page;
page = module_alloc(PAGE_SIZE);
- if (page)
- set_memory_ro((unsigned long)page & PAGE_MASK, 1);
+ if (!page)
+ return NULL;
+
+ set_vm_flush_reset_perms(page);
+ /*
+ * First make the page read-only, and only then make it executable to
+ * prevent it from being W+X in between.
+ */
+ set_memory_ro((unsigned long)page, 1);
+
+ /*
+ * TODO: Once additional kernel code protection mechanisms are set, ensure
+ * that the page was not maliciously altered and it is still zeroed.
+ */
+ set_memory_x((unsigned long)page, 1);
return page;
}
@@ -440,8 +453,6 @@ void *alloc_insn_page(void)
/* Recover page to RW mode before releasing it */
void free_insn_page(void *page)
{
- set_memory_nx((unsigned long)page & PAGE_MASK, 1);
- set_memory_rw((unsigned long)page & PAGE_MASK, 1);
module_memfree(page);
}
@@ -569,6 +580,7 @@ void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
unsigned long *sara = stack_addr(regs);
ri->ret_addr = (kprobe_opcode_t *) *sara;
+ ri->fp = sara;
/* Replace the return addr with trampoline addr */
*sara = (unsigned long) &kretprobe_trampoline;
@@ -715,6 +727,7 @@ NOKPROBE_SYMBOL(kprobe_int3_handler);
* calls trampoline_handler() runs, which calls the kretprobe's handler.
*/
asm(
+ ".text\n"
".global kretprobe_trampoline\n"
".type kretprobe_trampoline, @function\n"
"kretprobe_trampoline:\n"
@@ -748,26 +761,48 @@ asm(
NOKPROBE_SYMBOL(kretprobe_trampoline);
STACK_FRAME_NON_STANDARD(kretprobe_trampoline);
+static struct kprobe kretprobe_kprobe = {
+ .addr = (void *)kretprobe_trampoline,
+};
+
/*
* Called from kretprobe_trampoline
*/
static __used void *trampoline_handler(struct pt_regs *regs)
{
+ struct kprobe_ctlblk *kcb;
struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp;
struct hlist_node *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
kprobe_opcode_t *correct_ret_addr = NULL;
+ void *frame_pointer;
+ bool skipped = false;
+
+ preempt_disable();
+
+ /*
+ * Set a dummy kprobe for avoiding kretprobe recursion.
+ * Since kretprobe never run in kprobe handler, kprobe must not
+ * be running at this point.
+ */
+ kcb = get_kprobe_ctlblk();
+ __this_cpu_write(current_kprobe, &kretprobe_kprobe);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
INIT_HLIST_HEAD(&empty_rp);
kretprobe_hash_lock(current, &head, &flags);
/* fixup registers */
#ifdef CONFIG_X86_64
regs->cs = __KERNEL_CS;
+ /* On x86-64, we use pt_regs->sp for return address holder. */
+ frame_pointer = &regs->sp;
#else
regs->cs = __KERNEL_CS | get_kernel_rpl();
regs->gs = 0;
+ /* On x86-32, we use pt_regs->flags for return address holder. */
+ frame_pointer = &regs->flags;
#endif
regs->ip = trampoline_address;
regs->orig_ax = ~0UL;
@@ -789,8 +824,25 @@ static __used void *trampoline_handler(struct pt_regs *regs)
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
+ /*
+ * Return probes must be pushed on this hash list correct
+ * order (same as return order) so that it can be poped
+ * correctly. However, if we find it is pushed it incorrect
+ * order, this means we find a function which should not be
+ * probed, because the wrong order entry is pushed on the
+ * path of processing other kretprobe itself.
+ */
+ if (ri->fp != frame_pointer) {
+ if (!skipped)
+ pr_warn("kretprobe is stacked incorrectly. Trying to fixup.\n");
+ skipped = true;
+ continue;
+ }
orig_ret_address = (unsigned long)ri->ret_addr;
+ if (skipped)
+ pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n",
+ ri->rp->kp.addr);
if (orig_ret_address != trampoline_address)
/*
@@ -808,14 +860,15 @@ static __used void *trampoline_handler(struct pt_regs *regs)
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
+ if (ri->fp != frame_pointer)
+ continue;
orig_ret_address = (unsigned long)ri->ret_addr;
if (ri->rp && ri->rp->handler) {
__this_cpu_write(current_kprobe, &ri->rp->kp);
- get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
ri->ret_addr = correct_ret_addr;
ri->rp->handler(ri, regs);
- __this_cpu_write(current_kprobe, NULL);
+ __this_cpu_write(current_kprobe, &kretprobe_kprobe);
}
recycle_rp_inst(ri, &empty_rp);
@@ -831,6 +884,9 @@ static __used void *trampoline_handler(struct pt_regs *regs)
kretprobe_hash_unlock(current, &flags);
+ __this_cpu_write(current_kprobe, NULL);
+ preempt_enable();
+
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 5c93a65ee1e5..3f0cc828cc36 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -67,7 +67,7 @@ static int __init parse_no_stealacc(char *arg)
early_param("no-steal-acc", parse_no_stealacc);
static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
-static DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64);
+DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
static int has_steal_clock = 0;
/*
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index e811d4d1c824..904494b924c1 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -104,12 +104,8 @@ static u64 kvm_sched_clock_read(void)
static inline void kvm_sched_clock_init(bool stable)
{
- if (!stable) {
- pv_ops.time.sched_clock = kvm_clock_read;
+ if (!stable)
clear_sched_clock_stable();
- return;
- }
-
kvm_sched_clock_offset = kvm_clock_read();
pv_ops.time.sched_clock = kvm_sched_clock_read;
@@ -355,6 +351,20 @@ void __init kvmclock_init(void)
machine_ops.crash_shutdown = kvm_crash_shutdown;
#endif
kvm_get_preset_lpj();
+
+ /*
+ * X86_FEATURE_NONSTOP_TSC is TSC runs at constant rate
+ * with P/T states and does not stop in deep C-states.
+ *
+ * Invariant TSC exposed by host means kvmclock is not necessary:
+ * can use TSC as clocksource.
+ *
+ */
+ if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
+ boot_cpu_has(X86_FEATURE_NONSTOP_TSC) &&
+ !check_tsc_unstable())
+ kvm_clock.rating = 299;
+
clocksource_register_hz(&kvm_clock, NSEC_PER_SEC);
pv_info.name = "KVM";
}
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index 6135ae8ce036..b2463fcb20a8 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -113,7 +113,7 @@ static void do_sanity_check(struct mm_struct *mm,
* tables.
*/
WARN_ON(!had_kernel_mapping);
- if (static_cpu_has(X86_FEATURE_PTI))
+ if (boot_cpu_has(X86_FEATURE_PTI))
WARN_ON(!had_user_mapping);
} else {
/*
@@ -121,7 +121,7 @@ static void do_sanity_check(struct mm_struct *mm,
* Sync the pgd to the usermode tables.
*/
WARN_ON(had_kernel_mapping);
- if (static_cpu_has(X86_FEATURE_PTI))
+ if (boot_cpu_has(X86_FEATURE_PTI))
WARN_ON(had_user_mapping);
}
}
@@ -156,7 +156,7 @@ static void map_ldt_struct_to_user(struct mm_struct *mm)
k_pmd = pgd_to_pmd_walk(k_pgd, LDT_BASE_ADDR);
u_pmd = pgd_to_pmd_walk(u_pgd, LDT_BASE_ADDR);
- if (static_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
+ if (boot_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
set_pmd(u_pmd, *k_pmd);
}
@@ -181,7 +181,7 @@ static void map_ldt_struct_to_user(struct mm_struct *mm)
{
pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
- if (static_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
+ if (boot_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
set_pgd(kernel_to_user_pgdp(pgd), *pgd);
}
@@ -208,7 +208,7 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
spinlock_t *ptl;
int i, nr_pages;
- if (!static_cpu_has(X86_FEATURE_PTI))
+ if (!boot_cpu_has(X86_FEATURE_PTI))
return 0;
/*
@@ -271,7 +271,7 @@ static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
return;
/* LDT map/unmap is only required for PTI */
- if (!static_cpu_has(X86_FEATURE_PTI))
+ if (!boot_cpu_has(X86_FEATURE_PTI))
return;
nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
@@ -311,7 +311,7 @@ static void free_ldt_pgtables(struct mm_struct *mm)
unsigned long start = LDT_BASE_ADDR;
unsigned long end = LDT_END_ADDR;
- if (!static_cpu_has(X86_FEATURE_PTI))
+ if (!boot_cpu_has(X86_FEATURE_PTI))
return;
tlb_gather_mmu(&tlb, mm, start, end);
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index b052e883dd8c..cfa3106faee4 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -87,7 +87,7 @@ void *module_alloc(unsigned long size)
p = __vmalloc_node_range(size, MODULE_ALIGN,
MODULES_VADDR + get_module_load_offset(),
MODULES_END, GFP_KERNEL,
- PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
+ PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
if (p && (kasan_module_alloc(p, size) < 0)) {
vfree(p);
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 3482460d984d..1bfe5c6e6cfe 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -598,8 +598,8 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
mpf_base = base;
mpf_found = true;
- pr_info("found SMP MP-table at [mem %#010lx-%#010lx] mapped at [%p]\n",
- base, base + sizeof(*mpf) - 1, mpf);
+ pr_info("found SMP MP-table at [mem %#010lx-%#010lx]\n",
+ base, base + sizeof(*mpf) - 1);
memblock_reserve(base, sizeof(*mpf));
if (mpf->physptr)
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 18bc9b51ac9b..3755d0310026 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -21,13 +21,14 @@
#include <linux/ratelimit.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/atomic.h>
#include <linux/sched/clock.h>
#if defined(CONFIG_EDAC)
#include <linux/edac.h>
#endif
-#include <linux/atomic.h>
+#include <asm/cpu_entry_area.h>
#include <asm/traps.h>
#include <asm/mach_traps.h>
#include <asm/nmi.h>
@@ -487,6 +488,23 @@ static DEFINE_PER_CPU(unsigned long, nmi_cr2);
* switch back to the original IDT.
*/
static DEFINE_PER_CPU(int, update_debug_stack);
+
+static bool notrace is_debug_stack(unsigned long addr)
+{
+ struct cea_exception_stacks *cs = __this_cpu_read(cea_exception_stacks);
+ unsigned long top = CEA_ESTACK_TOP(cs, DB);
+ unsigned long bot = CEA_ESTACK_BOT(cs, DB1);
+
+ if (__this_cpu_read(debug_stack_usage))
+ return true;
+ /*
+ * Note, this covers the guard page between DB and DB1 as well to
+ * avoid two checks. But by all means @addr can never point into
+ * the guard page.
+ */
+ return addr >= bot && addr < top;
+}
+NOKPROBE_SYMBOL(is_debug_stack);
#endif
dotraplinkage notrace void
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index c0e0101133f3..7bbaa6baf37f 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -121,7 +121,7 @@ DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
void __init native_pv_lock_init(void)
{
- if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
+ if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
static_branch_disable(&virt_spin_lock_key);
}
diff --git a/arch/x86/kernel/perf_regs.c b/arch/x86/kernel/perf_regs.c
index c06c4c16c6b6..07c30ee17425 100644
--- a/arch/x86/kernel/perf_regs.c
+++ b/arch/x86/kernel/perf_regs.c
@@ -59,18 +59,34 @@ static unsigned int pt_regs_offset[PERF_REG_X86_MAX] = {
u64 perf_reg_value(struct pt_regs *regs, int idx)
{
+ struct x86_perf_regs *perf_regs;
+
+ if (idx >= PERF_REG_X86_XMM0 && idx < PERF_REG_X86_XMM_MAX) {
+ perf_regs = container_of(regs, struct x86_perf_regs, regs);
+ if (!perf_regs->xmm_regs)
+ return 0;
+ return perf_regs->xmm_regs[idx - PERF_REG_X86_XMM0];
+ }
+
if (WARN_ON_ONCE(idx >= ARRAY_SIZE(pt_regs_offset)))
return 0;
return regs_get_register(regs, pt_regs_offset[idx]);
}
-#define REG_RESERVED (~((1ULL << PERF_REG_X86_MAX) - 1ULL))
-
#ifdef CONFIG_X86_32
+#define REG_NOSUPPORT ((1ULL << PERF_REG_X86_R8) | \
+ (1ULL << PERF_REG_X86_R9) | \
+ (1ULL << PERF_REG_X86_R10) | \
+ (1ULL << PERF_REG_X86_R11) | \
+ (1ULL << PERF_REG_X86_R12) | \
+ (1ULL << PERF_REG_X86_R13) | \
+ (1ULL << PERF_REG_X86_R14) | \
+ (1ULL << PERF_REG_X86_R15))
+
int perf_reg_validate(u64 mask)
{
- if (!mask || mask & REG_RESERVED)
+ if (!mask || (mask & REG_NOSUPPORT))
return -EINVAL;
return 0;
@@ -96,10 +112,7 @@ void perf_get_regs_user(struct perf_regs *regs_user,
int perf_reg_validate(u64 mask)
{
- if (!mask || mask & REG_RESERVED)
- return -EINVAL;
-
- if (mask & REG_NOSUPPORT)
+ if (!mask || (mask & REG_NOSUPPORT))
return -EINVAL;
return 0;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 58ac7be52c7a..d1d312d012a6 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -236,7 +236,7 @@ static int get_cpuid_mode(void)
static int set_cpuid_mode(struct task_struct *task, unsigned long cpuid_enabled)
{
- if (!static_cpu_has(X86_FEATURE_CPUID_FAULT))
+ if (!boot_cpu_has(X86_FEATURE_CPUID_FAULT))
return -ENODEV;
if (cpuid_enabled)
@@ -426,6 +426,8 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
u64 msr = x86_spec_ctrl_base;
bool updmsr = false;
+ lockdep_assert_irqs_disabled();
+
/*
* If TIF_SSBD is different, select the proper mitigation
* method. Note that if SSBD mitigation is disabled or permanentely
@@ -477,10 +479,12 @@ static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
void speculation_ctrl_update(unsigned long tif)
{
+ unsigned long flags;
+
/* Forced update. Make sure all relevant TIF flags are different */
- preempt_disable();
+ local_irq_save(flags);
__speculation_ctrl_update(~tif, tif);
- preempt_enable();
+ local_irq_restore(flags);
}
/* Called from seccomp/prctl update */
@@ -666,7 +670,7 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
if (c->x86_vendor != X86_VENDOR_INTEL)
return 0;
- if (!cpu_has(c, X86_FEATURE_MWAIT) || static_cpu_has_bug(X86_BUG_MONITOR))
+ if (!cpu_has(c, X86_FEATURE_MWAIT) || boot_cpu_has_bug(X86_BUG_MONITOR))
return 0;
return 1;
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index e471d8e6f0b2..70933193878c 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -127,6 +127,13 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
struct task_struct *tsk;
int err;
+ /*
+ * For a new task use the RESET flags value since there is no before.
+ * All the status flags are zero; DF and all the system flags must also
+ * be 0, specifically IF must be 0 because we context switch to the new
+ * task with interrupts disabled.
+ */
+ frame->flags = X86_EFLAGS_FIXED;
frame->bp = 0;
frame->ret_addr = (unsigned long) ret_from_fork;
p->thread.sp = (unsigned long) fork_frame;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 6a62f4af9fcf..844a28b29967 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -392,6 +392,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
childregs = task_pt_regs(p);
fork_frame = container_of(childregs, struct fork_frame, regs);
frame = &fork_frame->frame;
+
frame->bp = 0;
frame->ret_addr = (unsigned long) ret_from_fork;
p->thread.sp = (unsigned long) fork_frame;
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 725624b6c0c0..09d6bded3c1e 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -81,6 +81,19 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
return 0;
}
+/*
+ * Some machines don't handle the default ACPI reboot method and
+ * require the EFI reboot method:
+ */
+static int __init set_efi_reboot(const struct dmi_system_id *d)
+{
+ if (reboot_type != BOOT_EFI && !efi_runtime_disabled()) {
+ reboot_type = BOOT_EFI;
+ pr_info("%s series board detected. Selecting EFI-method for reboot.\n", d->ident);
+ }
+ return 0;
+}
+
void __noreturn machine_real_restart(unsigned int type)
{
local_irq_disable();
@@ -108,7 +121,7 @@ void __noreturn machine_real_restart(unsigned int type)
write_cr3(real_mode_header->trampoline_pgd);
/* Exiting long mode will fail if CR4.PCIDE is set. */
- if (static_cpu_has(X86_FEATURE_PCID))
+ if (boot_cpu_has(X86_FEATURE_PCID))
cr4_clear_bits(X86_CR4_PCIDE);
#endif
@@ -166,6 +179,14 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"),
},
},
+ { /* Handle reboot issue on Acer TravelMate X514-51T */
+ .callback = set_efi_reboot,
+ .ident = "Acer TravelMate X514-51T",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate X514-51T"),
+ },
+ },
/* Apple */
{ /* Handle problems with rebooting on Apple MacBook5 */
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 3d872a527cd9..905dae880563 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -71,6 +71,7 @@
#include <linux/tboot.h>
#include <linux/jiffies.h>
#include <linux/mem_encrypt.h>
+#include <linux/sizes.h>
#include <linux/usb/xhci-dbgp.h>
#include <video/edid.h>
@@ -448,18 +449,17 @@ static void __init memblock_x86_reserve_range_setup_data(void)
#ifdef CONFIG_KEXEC_CORE
/* 16M alignment for crash kernel regions */
-#define CRASH_ALIGN (16 << 20)
+#define CRASH_ALIGN SZ_16M
/*
* Keep the crash kernel below this limit. On 32 bits earlier kernels
* would limit the kernel to the low 512 MiB due to mapping restrictions.
- * On 64bit, old kexec-tools need to under 896MiB.
*/
#ifdef CONFIG_X86_32
-# define CRASH_ADDR_LOW_MAX (512 << 20)
-# define CRASH_ADDR_HIGH_MAX (512 << 20)
+# define CRASH_ADDR_LOW_MAX SZ_512M
+# define CRASH_ADDR_HIGH_MAX SZ_512M
#else
-# define CRASH_ADDR_LOW_MAX (896UL << 20)
+# define CRASH_ADDR_LOW_MAX SZ_4G
# define CRASH_ADDR_HIGH_MAX MAXMEM
#endif
@@ -541,21 +541,27 @@ static void __init reserve_crashkernel(void)
}
/* 0 means: find the address automatically */
- if (crash_base <= 0) {
+ if (!crash_base) {
/*
* Set CRASH_ADDR_LOW_MAX upper bound for crash memory,
- * as old kexec-tools loads bzImage below that, unless
- * "crashkernel=size[KMG],high" is specified.
+ * crashkernel=x,high reserves memory over 4G, also allocates
+ * 256M extra low memory for DMA buffers and swiotlb.
+ * But the extra memory is not required for all machines.
+ * So try low memory first and fall back to high memory
+ * unless "crashkernel=size[KMG],high" is specified.
*/
- crash_base = memblock_find_in_range(CRASH_ALIGN,
- high ? CRASH_ADDR_HIGH_MAX
- : CRASH_ADDR_LOW_MAX,
- crash_size, CRASH_ALIGN);
+ if (!high)
+ crash_base = memblock_find_in_range(CRASH_ALIGN,
+ CRASH_ADDR_LOW_MAX,
+ crash_size, CRASH_ALIGN);
+ if (!crash_base)
+ crash_base = memblock_find_in_range(CRASH_ALIGN,
+ CRASH_ADDR_HIGH_MAX,
+ crash_size, CRASH_ALIGN);
if (!crash_base) {
pr_info("crashkernel reservation failed - No suitable area found.\n");
return;
}
-
} else {
unsigned long long start;
@@ -1005,13 +1011,11 @@ void __init setup_arch(char **cmdline_p)
if (efi_enabled(EFI_BOOT))
efi_init();
- dmi_scan_machine();
- dmi_memdev_walk();
- dmi_set_dump_stack_arch_desc();
+ dmi_setup();
/*
* VMware detection requires dmi to be available, so this
- * needs to be done after dmi_scan_machine(), for the boot CPU.
+ * needs to be done after dmi_setup(), for the boot CPU.
*/
init_hypervisor_platform();
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 13af08827eef..86663874ef04 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -106,22 +106,22 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
void *ptr;
if (!node_online(node) || !NODE_DATA(node)) {
- ptr = memblock_alloc_from_nopanic(size, align, goal);
+ ptr = memblock_alloc_from(size, align, goal);
pr_info("cpu %d has no node %d or node-local memory\n",
cpu, node);
pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
cpu, size, __pa(ptr));
} else {
- ptr = memblock_alloc_try_nid_nopanic(size, align, goal,
- MEMBLOCK_ALLOC_ACCESSIBLE,
- node);
+ ptr = memblock_alloc_try_nid(size, align, goal,
+ MEMBLOCK_ALLOC_ACCESSIBLE,
+ node);
pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n",
cpu, size, node, __pa(ptr));
}
return ptr;
#else
- return memblock_alloc_from_nopanic(size, align, goal);
+ return memblock_alloc_from(size, align, goal);
#endif
}
@@ -244,11 +244,6 @@ void __init setup_per_cpu_areas(void)
per_cpu(x86_cpu_to_logical_apicid, cpu) =
early_per_cpu_map(x86_cpu_to_logical_apicid, cpu);
#endif
-#ifdef CONFIG_X86_64
- per_cpu(irq_stack_ptr, cpu) =
- per_cpu(irq_stack_union.irq_stack, cpu) +
- IRQ_STACK_SIZE;
-#endif
#ifdef CONFIG_NUMA
per_cpu(x86_cpu_to_node_map, cpu) =
early_per_cpu_map(x86_cpu_to_node_map, cpu);
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 08dfd4c1a4f9..dff90fb6a9af 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -132,16 +132,6 @@ static int restore_sigcontext(struct pt_regs *regs,
COPY_SEG_CPL3(cs);
COPY_SEG_CPL3(ss);
-#ifdef CONFIG_X86_64
- /*
- * Fix up SS if needed for the benefit of old DOSEMU and
- * CRIU.
- */
- if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) &&
- user_64bit_mode(regs)))
- force_valid_ss(regs);
-#endif
-
get_user_ex(tmpflags, &sc->flags);
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
regs->orig_ax = -1; /* disable syscall checks */
@@ -150,6 +140,15 @@ static int restore_sigcontext(struct pt_regs *regs,
buf = (void __user *)buf_val;
} get_user_catch(err);
+#ifdef CONFIG_X86_64
+ /*
+ * Fix up SS if needed for the benefit of old DOSEMU and
+ * CRIU.
+ */
+ if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) && user_64bit_mode(regs)))
+ force_valid_ss(regs);
+#endif
+
err |= fpu__restore_sig(buf, IS_ENABLED(CONFIG_X86_32));
force_iret();
@@ -461,6 +460,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
{
struct rt_sigframe __user *frame;
void __user *fp = NULL;
+ unsigned long uc_flags;
int err = 0;
frame = get_sigframe(&ksig->ka, regs, sizeof(struct rt_sigframe), &fp);
@@ -473,9 +473,11 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
return -EFAULT;
}
+ uc_flags = frame_uc_flags(regs);
+
put_user_try {
/* Create the ucontext. */
- put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags);
+ put_user_ex(uc_flags, &frame->uc.uc_flags);
put_user_ex(0, &frame->uc.uc_link);
save_altstack_ex(&frame->uc.uc_stack, regs->sp);
@@ -541,6 +543,7 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
{
#ifdef CONFIG_X86_X32_ABI
struct rt_sigframe_x32 __user *frame;
+ unsigned long uc_flags;
void __user *restorer;
int err = 0;
void __user *fpstate = NULL;
@@ -555,9 +558,11 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
return -EFAULT;
}
+ uc_flags = frame_uc_flags(regs);
+
put_user_try {
/* Create the ucontext. */
- put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags);
+ put_user_ex(uc_flags, &frame->uc.uc_flags);
put_user_ex(0, &frame->uc.uc_link);
compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
put_user_ex(0, &frame->uc.uc__pad0);
@@ -688,10 +693,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
sigset_t *set = sigmask_to_save();
compat_sigset_t *cset = (compat_sigset_t *) set;
- /*
- * Increment event counter and perform fixup for the pre-signal
- * frame.
- */
+ /* Perform fixup for the pre-signal frame. */
rseq_signal_deliver(ksig, regs);
/* Set up the stack frame */
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index ce1a67b70168..73e69aaaa117 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -455,7 +455,7 @@ static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
* multicore group inside a NUMA node. If this happens, we will
* discard the MC level of the topology later.
*/
-static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
+static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{
if (c->phys_proc_id == o->phys_proc_id)
return true;
@@ -546,7 +546,7 @@ void set_cpu_sibling_map(int cpu)
for_each_cpu(i, cpu_sibling_setup_mask) {
o = &cpu_data(i);
- if ((i == cpu) || (has_mp && match_die(c, o))) {
+ if ((i == cpu) || (has_mp && match_pkg(c, o))) {
link_mask(topology_core_cpumask, cpu, i);
/*
@@ -570,7 +570,7 @@ void set_cpu_sibling_map(int cpu)
} else if (i != cpu && !c->booted_cores)
c->booted_cores = cpu_data(i).booted_cores;
}
- if (match_die(c, o) && !topology_same_node(c, o))
+ if (match_pkg(c, o) && !topology_same_node(c, o))
x86_has_numa_in_package = true;
}
@@ -935,20 +935,27 @@ out:
return boot_error;
}
-void common_cpu_up(unsigned int cpu, struct task_struct *idle)
+int common_cpu_up(unsigned int cpu, struct task_struct *idle)
{
+ int ret;
+
/* Just in case we booted with a single CPU. */
alternatives_enable_smp();
per_cpu(current_task, cpu) = idle;
+ /* Initialize the interrupt stack(s) */
+ ret = irq_init_percpu_irqstack(cpu);
+ if (ret)
+ return ret;
+
#ifdef CONFIG_X86_32
/* Stack for startup_32 can be just as for start_secondary onwards */
- irq_ctx_init(cpu);
per_cpu(cpu_current_top_of_stack, cpu) = task_top_of_stack(idle);
#else
initial_gs = per_cpu_offset(cpu);
#endif
+ return 0;
}
/*
@@ -1106,7 +1113,9 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
/* the FPU context is blank, nobody can own it */
per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
- common_cpu_up(cpu, tidle);
+ err = common_cpu_up(cpu, tidle);
+ if (err)
+ return err;
err = do_boot_cpu(apicid, cpu, tidle, &cpu0_nmi_registered);
if (err) {
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 5c2d71a1dc06..2abf27d7df6b 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -12,78 +12,31 @@
#include <asm/stacktrace.h>
#include <asm/unwind.h>
-static int save_stack_address(struct stack_trace *trace, unsigned long addr,
- bool nosched)
-{
- if (nosched && in_sched_functions(addr))
- return 0;
-
- if (trace->skip > 0) {
- trace->skip--;
- return 0;
- }
-
- if (trace->nr_entries >= trace->max_entries)
- return -1;
-
- trace->entries[trace->nr_entries++] = addr;
- return 0;
-}
-
-static void noinline __save_stack_trace(struct stack_trace *trace,
- struct task_struct *task, struct pt_regs *regs,
- bool nosched)
+void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+ struct task_struct *task, struct pt_regs *regs)
{
struct unwind_state state;
unsigned long addr;
- if (regs)
- save_stack_address(trace, regs->ip, nosched);
+ if (regs && !consume_entry(cookie, regs->ip, false))
+ return;
for (unwind_start(&state, task, regs, NULL); !unwind_done(&state);
unwind_next_frame(&state)) {
addr = unwind_get_return_address(&state);
- if (!addr || save_stack_address(trace, addr, nosched))
+ if (!addr || !consume_entry(cookie, addr, false))
break;
}
-
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
}
/*
- * Save stack-backtrace addresses into a stack_trace buffer.
+ * This function returns an error if it detects any unreliable features of the
+ * stack. Otherwise it guarantees that the stack trace is reliable.
+ *
+ * If the task is not 'current', the caller *must* ensure the task is inactive.
*/
-void save_stack_trace(struct stack_trace *trace)
-{
- trace->skip++;
- __save_stack_trace(trace, current, NULL, false);
-}
-EXPORT_SYMBOL_GPL(save_stack_trace);
-
-void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
-{
- __save_stack_trace(trace, current, regs, false);
-}
-
-void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
-{
- if (!try_get_task_stack(tsk))
- return;
-
- if (tsk == current)
- trace->skip++;
- __save_stack_trace(trace, tsk, NULL, true);
-
- put_task_stack(tsk);
-}
-EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
-
-#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
-
-static int __always_inline
-__save_stack_trace_reliable(struct stack_trace *trace,
- struct task_struct *task)
+int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
+ void *cookie, struct task_struct *task)
{
struct unwind_state state;
struct pt_regs *regs;
@@ -97,7 +50,7 @@ __save_stack_trace_reliable(struct stack_trace *trace,
if (regs) {
/* Success path for user tasks */
if (user_mode(regs))
- goto success;
+ return 0;
/*
* Kernel mode registers on the stack indicate an
@@ -120,7 +73,7 @@ __save_stack_trace_reliable(struct stack_trace *trace,
if (!addr)
return -EINVAL;
- if (save_stack_address(trace, addr, false))
+ if (!consume_entry(cookie, addr, false))
return -EINVAL;
}
@@ -132,39 +85,9 @@ __save_stack_trace_reliable(struct stack_trace *trace,
if (!(task->flags & (PF_KTHREAD | PF_IDLE)))
return -EINVAL;
-success:
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
-
return 0;
}
-/*
- * This function returns an error if it detects any unreliable features of the
- * stack. Otherwise it guarantees that the stack trace is reliable.
- *
- * If the task is not 'current', the caller *must* ensure the task is inactive.
- */
-int save_stack_trace_tsk_reliable(struct task_struct *tsk,
- struct stack_trace *trace)
-{
- int ret;
-
- /*
- * If the task doesn't have a stack (e.g., a zombie), the stack is
- * "reliably" empty.
- */
- if (!try_get_task_stack(tsk))
- return 0;
-
- ret = __save_stack_trace_reliable(trace, tsk);
-
- put_task_stack(tsk);
-
- return ret;
-}
-#endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
-
/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
struct stack_frame_user {
@@ -189,15 +112,15 @@ copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
return ret;
}
-static inline void __save_stack_trace_user(struct stack_trace *trace)
+void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
+ const struct pt_regs *regs)
{
- const struct pt_regs *regs = task_pt_regs(current);
const void __user *fp = (const void __user *)regs->bp;
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = regs->ip;
+ if (!consume_entry(cookie, regs->ip, false))
+ return;
- while (trace->nr_entries < trace->max_entries) {
+ while (1) {
struct stack_frame_user frame;
frame.next_fp = NULL;
@@ -207,8 +130,8 @@ static inline void __save_stack_trace_user(struct stack_trace *trace)
if ((unsigned long)fp < regs->sp)
break;
if (frame.ret_addr) {
- trace->entries[trace->nr_entries++] =
- frame.ret_addr;
+ if (!consume_entry(cookie, frame.ret_addr, false))
+ return;
}
if (fp == frame.next_fp)
break;
@@ -216,14 +139,3 @@ static inline void __save_stack_trace_user(struct stack_trace *trace)
}
}
-void save_stack_trace_user(struct stack_trace *trace)
-{
- /*
- * Trace user stack if we are not a kernel thread
- */
- if (current->mm) {
- __save_stack_trace_user(trace);
- }
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
-}
diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c
index 738bf42b0218..be5bc2e47c71 100644
--- a/arch/x86/kernel/topology.c
+++ b/arch/x86/kernel/topology.c
@@ -71,7 +71,7 @@ int _debug_hotplug_cpu(int cpu, int action)
case 0:
ret = cpu_down(cpu);
if (!ret) {
- pr_info("CPU %u is now offline\n", cpu);
+ pr_info("DEBUG_HOTPLUG_CPU0: CPU %u is now offline\n", cpu);
dev->offline = true;
kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
} else
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 3fae23834069..15b5e98a86f9 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -185,8 +185,7 @@ static void __init cyc2ns_init_boot_cpu(void)
/*
* Secondary CPUs do not run through tsc_init(), so set up
* all the scale factors for all CPUs, assuming the same
- * speed as the bootup CPU. (cpufreq notifiers will fix this
- * up if their speed diverges)
+ * speed as the bootup CPU.
*/
static void __init cyc2ns_init_secondary_cpus(void)
{
@@ -283,6 +282,7 @@ int __init notsc_setup(char *str)
__setup("notsc", notsc_setup);
static int no_sched_irq_time;
+static int no_tsc_watchdog;
static int __init tsc_setup(char *str)
{
@@ -292,6 +292,8 @@ static int __init tsc_setup(char *str)
no_sched_irq_time = 1;
if (!strcmp(str, "unstable"))
mark_tsc_unstable("boot parameter");
+ if (!strcmp(str, "nowatchdog"))
+ no_tsc_watchdog = 1;
return 1;
}
@@ -937,12 +939,12 @@ void tsc_restore_sched_clock_state(void)
}
#ifdef CONFIG_CPU_FREQ
-/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
+/*
+ * Frequency scaling support. Adjust the TSC based timer when the CPU frequency
* changes.
*
- * RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
- * not that important because current Opteron setups do not support
- * scaling on SMP anyroads.
+ * NOTE: On SMP the situation is not fixable in general, so simply mark the TSC
+ * as unstable and give up in those cases.
*
* Should fix up last_tsc too. Currently gettimeofday in the
* first tick after the change will be slightly wrong.
@@ -956,22 +958,22 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
void *data)
{
struct cpufreq_freqs *freq = data;
- unsigned long *lpj;
- lpj = &boot_cpu_data.loops_per_jiffy;
-#ifdef CONFIG_SMP
- if (!(freq->flags & CPUFREQ_CONST_LOOPS))
- lpj = &cpu_data(freq->cpu).loops_per_jiffy;
-#endif
+ if (num_online_cpus() > 1) {
+ mark_tsc_unstable("cpufreq changes on SMP");
+ return 0;
+ }
if (!ref_freq) {
ref_freq = freq->old;
- loops_per_jiffy_ref = *lpj;
+ loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy;
tsc_khz_ref = tsc_khz;
}
+
if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
- (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
- *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
+ (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
+ boot_cpu_data.loops_per_jiffy =
+ cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
if (!(freq->flags & CPUFREQ_CONST_LOOPS))
@@ -1349,7 +1351,7 @@ static int __init init_tsc_clocksource(void)
if (tsc_unstable)
goto unreg;
- if (tsc_clocksource_reliable)
+ if (tsc_clocksource_reliable || no_tsc_watchdog)
clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
index 3dc26f95d46e..9b9fd4826e7a 100644
--- a/arch/x86/kernel/unwind_frame.c
+++ b/arch/x86/kernel/unwind_frame.c
@@ -320,10 +320,14 @@ bool unwind_next_frame(struct unwind_state *state)
}
/* Get the next frame pointer: */
- if (state->regs)
+ if (state->next_bp) {
+ next_bp = state->next_bp;
+ state->next_bp = NULL;
+ } else if (state->regs) {
next_bp = (unsigned long *)state->regs->bp;
- else
+ } else {
next_bp = (unsigned long *)READ_ONCE_TASK_STACK(state->task, *state->bp);
+ }
/* Move to the next frame if it's safe: */
if (!update_stack_state(state, next_bp))
@@ -398,6 +402,21 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
bp = get_frame_pointer(task, regs);
+ /*
+ * If we crash with IP==0, the last successfully executed instruction
+ * was probably an indirect function call with a NULL function pointer.
+ * That means that SP points into the middle of an incomplete frame:
+ * *SP is a return pointer, and *(SP-sizeof(unsigned long)) is where we
+ * would have written a frame pointer if we hadn't crashed.
+ * Pretend that the frame is complete and that BP points to it, but save
+ * the real BP so that we can use it when looking for the next frame.
+ */
+ if (regs && regs->ip == 0 &&
+ (unsigned long *)kernel_stack_pointer(regs) >= first_frame) {
+ state->next_bp = bp;
+ bp = ((unsigned long *)kernel_stack_pointer(regs)) - 1;
+ }
+
/* Initialize stack info and make sure the frame data is accessible: */
get_stack_info(bp, state->task, &state->stack_info,
&state->stack_mask);
@@ -410,7 +429,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
*/
while (!unwind_done(state) &&
(!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
- state->bp < first_frame))
+ (state->next_bp == NULL && state->bp < first_frame)))
unwind_next_frame(state);
}
EXPORT_SYMBOL_GPL(__unwind_start);
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
index 26038eacf74a..89be1be1790c 100644
--- a/arch/x86/kernel/unwind_orc.c
+++ b/arch/x86/kernel/unwind_orc.c
@@ -113,6 +113,20 @@ static struct orc_entry *orc_ftrace_find(unsigned long ip)
}
#endif
+/*
+ * If we crash with IP==0, the last successfully executed instruction
+ * was probably an indirect function call with a NULL function pointer,
+ * and we don't have unwind information for NULL.
+ * This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function
+ * pointer into its parent and then continue normally from there.
+ */
+static struct orc_entry null_orc_entry = {
+ .sp_offset = sizeof(long),
+ .sp_reg = ORC_REG_SP,
+ .bp_reg = ORC_REG_UNDEFINED,
+ .type = ORC_TYPE_CALL
+};
+
static struct orc_entry *orc_find(unsigned long ip)
{
static struct orc_entry *orc;
@@ -120,6 +134,9 @@ static struct orc_entry *orc_find(unsigned long ip)
if (!orc_init)
return NULL;
+ if (ip == 0)
+ return &null_orc_entry;
+
/* For non-init vmlinux addresses, use the fast lookup table: */
if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) {
unsigned int idx, start, stop;
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index a092b6b40c6b..6a38717d179c 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -369,7 +369,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
preempt_disable();
tsk->thread.sp0 += 16;
- if (static_cpu_has(X86_FEATURE_SEP)) {
+ if (boot_cpu_has(X86_FEATURE_SEP)) {
tsk->thread.sysenter_cs = 0;
refresh_sysenter_cs(&tsk->thread);
}
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index bad8c51fee6e..0850b5149345 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -141,11 +141,11 @@ SECTIONS
*(.text.__x86.indirect_thunk)
__indirect_thunk_end = .;
#endif
-
- /* End of text section */
- _etext = .;
} :text = 0x9090
+ /* End of text section */
+ _etext = .;
+
NOTES :text :note
EXCEPTION_TABLE(16) :text = 0x9090
@@ -362,7 +362,7 @@ SECTIONS
.bss : AT(ADDR(.bss) - LOAD_OFFSET) {
__bss_start = .;
*(.bss..page_aligned)
- *(.bss)
+ *(BSS_MAIN)
BSS_DECRYPTED
. = ALIGN(PAGE_SIZE);
__bss_stop = .;
@@ -403,7 +403,8 @@ SECTIONS
*/
#define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) + __per_cpu_load
INIT_PER_CPU(gdt_page);
-INIT_PER_CPU(irq_stack_union);
+INIT_PER_CPU(fixed_percpu_data);
+INIT_PER_CPU(irq_stack_backing_store);
/*
* Build-time check on the image size:
@@ -412,8 +413,8 @@ INIT_PER_CPU(irq_stack_union);
"kernel image bigger than KERNEL_IMAGE_SIZE");
#ifdef CONFIG_SMP
-. = ASSERT((irq_stack_union == 0),
- "irq_stack_union is not at start of per-cpu area");
+. = ASSERT((fixed_percpu_data == 0),
+ "fixed_percpu_data is not at start of per-cpu area");
#endif
#endif /* CONFIG_X86_32 */
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index c07958b59f50..fd3951638ae4 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -405,7 +405,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ |
F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
- F(CLDEMOTE);
+ F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B);
/* cpuid 7.0.edx*/
const u32 kvm_cpuid_7_0_edx_x86_features =
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index c338984c850d..d0d5dd44b4f4 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2331,24 +2331,18 @@ static int em_lseg(struct x86_emulate_ctxt *ctxt)
static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
{
+#ifdef CONFIG_X86_64
u32 eax, ebx, ecx, edx;
eax = 0x80000001;
ecx = 0;
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
return edx & bit(X86_FEATURE_LM);
+#else
+ return false;
+#endif
}
-#define GET_SMSTATE(type, smbase, offset) \
- ({ \
- type __val; \
- int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val, \
- sizeof(__val)); \
- if (r != X86EMUL_CONTINUE) \
- return X86EMUL_UNHANDLEABLE; \
- __val; \
- })
-
static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
{
desc->g = (flags >> 23) & 1;
@@ -2361,27 +2355,30 @@ static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
desc->type = (flags >> 8) & 15;
}
-static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
+static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate,
+ int n)
{
struct desc_struct desc;
int offset;
u16 selector;
- selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4);
+ selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4);
if (n < 3)
offset = 0x7f84 + n * 12;
else
offset = 0x7f2c + (n - 3) * 12;
- set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
- set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
- rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset));
+ set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
+ set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
+ rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset));
ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
return X86EMUL_CONTINUE;
}
-static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
+#ifdef CONFIG_X86_64
+static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
+ int n)
{
struct desc_struct desc;
int offset;
@@ -2390,15 +2387,16 @@ static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
offset = 0x7e00 + n * 16;
- selector = GET_SMSTATE(u16, smbase, offset);
- rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8);
- set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
- set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
- base3 = GET_SMSTATE(u32, smbase, offset + 12);
+ selector = GET_SMSTATE(u16, smstate, offset);
+ rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8);
+ set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
+ set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
+ base3 = GET_SMSTATE(u32, smstate, offset + 12);
ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
return X86EMUL_CONTINUE;
}
+#endif
static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
u64 cr0, u64 cr3, u64 cr4)
@@ -2445,7 +2443,8 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
return X86EMUL_CONTINUE;
}
-static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
+static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
+ const char *smstate)
{
struct desc_struct desc;
struct desc_ptr dt;
@@ -2453,53 +2452,55 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
u32 val, cr0, cr3, cr4;
int i;
- cr0 = GET_SMSTATE(u32, smbase, 0x7ffc);
- cr3 = GET_SMSTATE(u32, smbase, 0x7ff8);
- ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
- ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0);
+ cr0 = GET_SMSTATE(u32, smstate, 0x7ffc);
+ cr3 = GET_SMSTATE(u32, smstate, 0x7ff8);
+ ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED;
+ ctxt->_eip = GET_SMSTATE(u32, smstate, 0x7ff0);
for (i = 0; i < 8; i++)
- *reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4);
+ *reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
- val = GET_SMSTATE(u32, smbase, 0x7fcc);
+ val = GET_SMSTATE(u32, smstate, 0x7fcc);
ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
- val = GET_SMSTATE(u32, smbase, 0x7fc8);
+ val = GET_SMSTATE(u32, smstate, 0x7fc8);
ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
- selector = GET_SMSTATE(u32, smbase, 0x7fc4);
- set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f64));
- set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f60));
- rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f5c));
+ selector = GET_SMSTATE(u32, smstate, 0x7fc4);
+ set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64));
+ set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f60));
+ rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f5c));
ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
- selector = GET_SMSTATE(u32, smbase, 0x7fc0);
- set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f80));
- set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f7c));
- rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f78));
+ selector = GET_SMSTATE(u32, smstate, 0x7fc0);
+ set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f80));
+ set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f7c));
+ rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f78));
ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
- dt.address = GET_SMSTATE(u32, smbase, 0x7f74);
- dt.size = GET_SMSTATE(u32, smbase, 0x7f70);
+ dt.address = GET_SMSTATE(u32, smstate, 0x7f74);
+ dt.size = GET_SMSTATE(u32, smstate, 0x7f70);
ctxt->ops->set_gdt(ctxt, &dt);
- dt.address = GET_SMSTATE(u32, smbase, 0x7f58);
- dt.size = GET_SMSTATE(u32, smbase, 0x7f54);
+ dt.address = GET_SMSTATE(u32, smstate, 0x7f58);
+ dt.size = GET_SMSTATE(u32, smstate, 0x7f54);
ctxt->ops->set_idt(ctxt, &dt);
for (i = 0; i < 6; i++) {
- int r = rsm_load_seg_32(ctxt, smbase, i);
+ int r = rsm_load_seg_32(ctxt, smstate, i);
if (r != X86EMUL_CONTINUE)
return r;
}
- cr4 = GET_SMSTATE(u32, smbase, 0x7f14);
+ cr4 = GET_SMSTATE(u32, smstate, 0x7f14);
- ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
+ ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8));
return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
}
-static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
+#ifdef CONFIG_X86_64
+static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
+ const char *smstate)
{
struct desc_struct desc;
struct desc_ptr dt;
@@ -2509,43 +2510,43 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
int i, r;
for (i = 0; i < 16; i++)
- *reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
+ *reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8);
- ctxt->_eip = GET_SMSTATE(u64, smbase, 0x7f78);
- ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED;
+ ctxt->_eip = GET_SMSTATE(u64, smstate, 0x7f78);
+ ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
- val = GET_SMSTATE(u32, smbase, 0x7f68);
+ val = GET_SMSTATE(u32, smstate, 0x7f68);
ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
- val = GET_SMSTATE(u32, smbase, 0x7f60);
+ val = GET_SMSTATE(u32, smstate, 0x7f60);
ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
- cr0 = GET_SMSTATE(u64, smbase, 0x7f58);
- cr3 = GET_SMSTATE(u64, smbase, 0x7f50);
- cr4 = GET_SMSTATE(u64, smbase, 0x7f48);
- ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
- val = GET_SMSTATE(u64, smbase, 0x7ed0);
+ cr0 = GET_SMSTATE(u64, smstate, 0x7f58);
+ cr3 = GET_SMSTATE(u64, smstate, 0x7f50);
+ cr4 = GET_SMSTATE(u64, smstate, 0x7f48);
+ ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
+ val = GET_SMSTATE(u64, smstate, 0x7ed0);
ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
- selector = GET_SMSTATE(u32, smbase, 0x7e90);
- rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e92) << 8);
- set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e94));
- set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e98));
- base3 = GET_SMSTATE(u32, smbase, 0x7e9c);
+ selector = GET_SMSTATE(u32, smstate, 0x7e90);
+ rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8);
+ set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e94));
+ set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e98));
+ base3 = GET_SMSTATE(u32, smstate, 0x7e9c);
ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
- dt.size = GET_SMSTATE(u32, smbase, 0x7e84);
- dt.address = GET_SMSTATE(u64, smbase, 0x7e88);
+ dt.size = GET_SMSTATE(u32, smstate, 0x7e84);
+ dt.address = GET_SMSTATE(u64, smstate, 0x7e88);
ctxt->ops->set_idt(ctxt, &dt);
- selector = GET_SMSTATE(u32, smbase, 0x7e70);
- rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e72) << 8);
- set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e74));
- set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e78));
- base3 = GET_SMSTATE(u32, smbase, 0x7e7c);
+ selector = GET_SMSTATE(u32, smstate, 0x7e70);
+ rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e72) << 8);
+ set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e74));
+ set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e78));
+ base3 = GET_SMSTATE(u32, smstate, 0x7e7c);
ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
- dt.size = GET_SMSTATE(u32, smbase, 0x7e64);
- dt.address = GET_SMSTATE(u64, smbase, 0x7e68);
+ dt.size = GET_SMSTATE(u32, smstate, 0x7e64);
+ dt.address = GET_SMSTATE(u64, smstate, 0x7e68);
ctxt->ops->set_gdt(ctxt, &dt);
r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
@@ -2553,37 +2554,49 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
return r;
for (i = 0; i < 6; i++) {
- r = rsm_load_seg_64(ctxt, smbase, i);
+ r = rsm_load_seg_64(ctxt, smstate, i);
if (r != X86EMUL_CONTINUE)
return r;
}
return X86EMUL_CONTINUE;
}
+#endif
static int em_rsm(struct x86_emulate_ctxt *ctxt)
{
unsigned long cr0, cr4, efer;
+ char buf[512];
u64 smbase;
int ret;
if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
return emulate_ud(ctxt);
+ smbase = ctxt->ops->get_smbase(ctxt);
+
+ ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf));
+ if (ret != X86EMUL_CONTINUE)
+ return X86EMUL_UNHANDLEABLE;
+
+ if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
+ ctxt->ops->set_nmi_mask(ctxt, false);
+
+ ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
+ ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
+
/*
* Get back to real mode, to prepare a safe state in which to load
* CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
* supports long mode.
*/
- cr4 = ctxt->ops->get_cr(ctxt, 4);
if (emulator_has_longmode(ctxt)) {
struct desc_struct cs_desc;
/* Zero CR4.PCIDE before CR0.PG. */
- if (cr4 & X86_CR4_PCIDE) {
+ cr4 = ctxt->ops->get_cr(ctxt, 4);
+ if (cr4 & X86_CR4_PCIDE)
ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
- cr4 &= ~X86_CR4_PCIDE;
- }
/* A 32-bit code segment is required to clear EFER.LMA. */
memset(&cs_desc, 0, sizeof(cs_desc));
@@ -2597,39 +2610,39 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
if (cr0 & X86_CR0_PE)
ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
- /* Now clear CR4.PAE (which must be done before clearing EFER.LME). */
- if (cr4 & X86_CR4_PAE)
- ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
-
- /* And finally go back to 32-bit mode. */
- efer = 0;
- ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
+ if (emulator_has_longmode(ctxt)) {
+ /* Clear CR4.PAE before clearing EFER.LME. */
+ cr4 = ctxt->ops->get_cr(ctxt, 4);
+ if (cr4 & X86_CR4_PAE)
+ ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
- smbase = ctxt->ops->get_smbase(ctxt);
+ /* And finally go back to 32-bit mode. */
+ efer = 0;
+ ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
+ }
/*
* Give pre_leave_smm() a chance to make ISA-specific changes to the
* vCPU state (e.g. enter guest mode) before loading state from the SMM
* state-save area.
*/
- if (ctxt->ops->pre_leave_smm(ctxt, smbase))
+ if (ctxt->ops->pre_leave_smm(ctxt, buf))
return X86EMUL_UNHANDLEABLE;
+#ifdef CONFIG_X86_64
if (emulator_has_longmode(ctxt))
- ret = rsm_load_state_64(ctxt, smbase + 0x8000);
+ ret = rsm_load_state_64(ctxt, buf);
else
- ret = rsm_load_state_32(ctxt, smbase + 0x8000);
+#endif
+ ret = rsm_load_state_32(ctxt, buf);
if (ret != X86EMUL_CONTINUE) {
/* FIXME: should triple fault */
return X86EMUL_UNHANDLEABLE;
}
- if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
- ctxt->ops->set_nmi_mask(ctxt, false);
+ ctxt->ops->post_leave_smm(ctxt);
- ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
- ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
return X86EMUL_CONTINUE;
}
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 89d20ed1d2e8..cc24b3a32c44 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -526,7 +526,9 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
new_config.enable = 0;
stimer->config.as_uint64 = new_config.as_uint64;
- stimer_mark_pending(stimer, false);
+ if (stimer->config.enable)
+ stimer_mark_pending(stimer, false);
+
return 0;
}
@@ -542,7 +544,10 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
stimer->config.enable = 0;
else if (stimer->config.auto_enable)
stimer->config.enable = 1;
- stimer_mark_pending(stimer, false);
+
+ if (stimer->config.enable)
+ stimer_mark_pending(stimer, false);
+
return 0;
}
@@ -1366,7 +1371,16 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
valid_bank_mask = BIT_ULL(0);
sparse_banks[0] = flush.processor_mask;
- all_cpus = flush.flags & HV_FLUSH_ALL_PROCESSORS;
+
+ /*
+ * Work around possible WS2012 bug: it sends hypercalls
+ * with processor_mask = 0x0 and HV_FLUSH_ALL_PROCESSORS clear,
+ * while also expecting us to flush something and crashing if
+ * we don't. Let's treat processor_mask == 0 same as
+ * HV_FLUSH_ALL_PROCESSORS.
+ */
+ all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) ||
+ flush.processor_mask == 0;
} else {
if (unlikely(kvm_read_guest(kvm, ingpa, &flush_ex,
sizeof(flush_ex))))
@@ -1729,7 +1743,7 @@ static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd)
mutex_lock(&hv->hv_lock);
ret = idr_alloc(&hv->conn_to_evt, eventfd, conn_id, conn_id + 1,
- GFP_KERNEL);
+ GFP_KERNEL_ACCOUNT);
mutex_unlock(&hv->hv_lock);
if (ret >= 0)
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index af192895b1fc..4a6dc54cc12b 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -653,7 +653,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
pid_t pid_nr;
int ret;
- pit = kzalloc(sizeof(struct kvm_pit), GFP_KERNEL);
+ pit = kzalloc(sizeof(struct kvm_pit), GFP_KERNEL_ACCOUNT);
if (!pit)
return NULL;
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index bdcd4139eca9..8b38bb4868a6 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -583,7 +583,7 @@ int kvm_pic_init(struct kvm *kvm)
struct kvm_pic *s;
int ret;
- s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL);
+ s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL_ACCOUNT);
if (!s)
return -ENOMEM;
spin_lock_init(&s->lock);
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index 4e822ad363f3..1add1bc881e2 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -622,7 +622,7 @@ int kvm_ioapic_init(struct kvm *kvm)
struct kvm_ioapic *ioapic;
int ret;
- ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL);
+ ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL_ACCOUNT);
if (!ioapic)
return -ENOMEM;
spin_lock_init(&ioapic->lock);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 4b6c2da7265c..bd13fdddbdc4 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -70,7 +70,6 @@
#define APIC_BROADCAST 0xFF
#define X2APIC_BROADCAST 0xFFFFFFFFul
-static bool lapic_timer_advance_adjust_done = false;
#define LAPIC_TIMER_ADVANCE_ADJUST_DONE 100
/* step-by-step approximation to mitigate fluctuation */
#define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
@@ -138,6 +137,7 @@ static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
if (offset <= max_apic_id) {
u8 cluster_size = min(max_apic_id - offset + 1, 16U);
+ offset = array_index_nospec(offset, map->max_apic_id + 1);
*cluster = &map->phys_map[offset];
*mask = dest_id & (0xffff >> (16 - cluster_size));
} else {
@@ -181,7 +181,8 @@ static void recalculate_apic_map(struct kvm *kvm)
max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic));
new = kvzalloc(sizeof(struct kvm_apic_map) +
- sizeof(struct kvm_lapic *) * ((u64)max_id + 1), GFP_KERNEL);
+ sizeof(struct kvm_lapic *) * ((u64)max_id + 1),
+ GFP_KERNEL_ACCOUNT);
if (!new)
goto out;
@@ -900,7 +901,8 @@ static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
if (irq->dest_id > map->max_apic_id) {
*bitmap = 0;
} else {
- *dst = &map->phys_map[irq->dest_id];
+ u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1);
+ *dst = &map->phys_map[dest_id];
*bitmap = 1;
}
return true;
@@ -1479,14 +1481,32 @@ static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
return false;
}
+static inline void __wait_lapic_expire(struct kvm_vcpu *vcpu, u64 guest_cycles)
+{
+ u64 timer_advance_ns = vcpu->arch.apic->lapic_timer.timer_advance_ns;
+
+ /*
+ * If the guest TSC is running at a different ratio than the host, then
+ * convert the delay to nanoseconds to achieve an accurate delay. Note
+ * that __delay() uses delay_tsc whenever the hardware has TSC, thus
+ * always for VMX enabled hardware.
+ */
+ if (vcpu->arch.tsc_scaling_ratio == kvm_default_tsc_scaling_ratio) {
+ __delay(min(guest_cycles,
+ nsec_to_cycles(vcpu, timer_advance_ns)));
+ } else {
+ u64 delay_ns = guest_cycles * 1000000ULL;
+ do_div(delay_ns, vcpu->arch.virtual_tsc_khz);
+ ndelay(min_t(u32, delay_ns, timer_advance_ns));
+ }
+}
+
void wait_lapic_expire(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
+ u32 timer_advance_ns = apic->lapic_timer.timer_advance_ns;
u64 guest_tsc, tsc_deadline, ns;
- if (!lapic_in_kernel(vcpu))
- return;
-
if (apic->lapic_timer.expired_tscdeadline == 0)
return;
@@ -1498,33 +1518,37 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu)
guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
- /* __delay is delay_tsc whenever the hardware has TSC, thus always. */
if (guest_tsc < tsc_deadline)
- __delay(min(tsc_deadline - guest_tsc,
- nsec_to_cycles(vcpu, lapic_timer_advance_ns)));
+ __wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
- if (!lapic_timer_advance_adjust_done) {
+ if (!apic->lapic_timer.timer_advance_adjust_done) {
/* too early */
if (guest_tsc < tsc_deadline) {
ns = (tsc_deadline - guest_tsc) * 1000000ULL;
do_div(ns, vcpu->arch.virtual_tsc_khz);
- lapic_timer_advance_ns -= min((unsigned int)ns,
- lapic_timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP);
+ timer_advance_ns -= min((u32)ns,
+ timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP);
} else {
/* too late */
ns = (guest_tsc - tsc_deadline) * 1000000ULL;
do_div(ns, vcpu->arch.virtual_tsc_khz);
- lapic_timer_advance_ns += min((unsigned int)ns,
- lapic_timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP);
+ timer_advance_ns += min((u32)ns,
+ timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP);
}
if (abs(guest_tsc - tsc_deadline) < LAPIC_TIMER_ADVANCE_ADJUST_DONE)
- lapic_timer_advance_adjust_done = true;
+ apic->lapic_timer.timer_advance_adjust_done = true;
+ if (unlikely(timer_advance_ns > 5000)) {
+ timer_advance_ns = 0;
+ apic->lapic_timer.timer_advance_adjust_done = true;
+ }
+ apic->lapic_timer.timer_advance_ns = timer_advance_ns;
}
}
static void start_sw_tscdeadline(struct kvm_lapic *apic)
{
- u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline;
+ struct kvm_timer *ktimer = &apic->lapic_timer;
+ u64 guest_tsc, tscdeadline = ktimer->tscdeadline;
u64 ns = 0;
ktime_t expire;
struct kvm_vcpu *vcpu = apic->vcpu;
@@ -1539,13 +1563,15 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic)
now = ktime_get();
guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
- if (likely(tscdeadline > guest_tsc)) {
- ns = (tscdeadline - guest_tsc) * 1000000ULL;
- do_div(ns, this_tsc_khz);
+
+ ns = (tscdeadline - guest_tsc) * 1000000ULL;
+ do_div(ns, this_tsc_khz);
+
+ if (likely(tscdeadline > guest_tsc) &&
+ likely(ns > apic->lapic_timer.timer_advance_ns)) {
expire = ktime_add_ns(now, ns);
- expire = ktime_sub_ns(expire, lapic_timer_advance_ns);
- hrtimer_start(&apic->lapic_timer.timer,
- expire, HRTIMER_MODE_ABS_PINNED);
+ expire = ktime_sub_ns(expire, ktimer->timer_advance_ns);
+ hrtimer_start(&ktimer->timer, expire, HRTIMER_MODE_ABS_PINNED);
} else
apic_timer_expired(apic);
@@ -2252,20 +2278,20 @@ static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
return HRTIMER_NORESTART;
}
-int kvm_create_lapic(struct kvm_vcpu *vcpu)
+int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
{
struct kvm_lapic *apic;
ASSERT(vcpu != NULL);
apic_debug("apic_init %d\n", vcpu->vcpu_id);
- apic = kzalloc(sizeof(*apic), GFP_KERNEL);
+ apic = kzalloc(sizeof(*apic), GFP_KERNEL_ACCOUNT);
if (!apic)
goto nomem;
vcpu->arch.apic = apic;
- apic->regs = (void *)get_zeroed_page(GFP_KERNEL);
+ apic->regs = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
if (!apic->regs) {
printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
vcpu->vcpu_id);
@@ -2276,6 +2302,14 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
HRTIMER_MODE_ABS_PINNED);
apic->lapic_timer.timer.function = apic_timer_fn;
+ if (timer_advance_ns == -1) {
+ apic->lapic_timer.timer_advance_ns = 1000;
+ apic->lapic_timer.timer_advance_adjust_done = false;
+ } else {
+ apic->lapic_timer.timer_advance_ns = timer_advance_ns;
+ apic->lapic_timer.timer_advance_adjust_done = true;
+ }
+
/*
* APIC is created enabled. This will prevent kvm_lapic_set_base from
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index ff6ef9c3d760..d6d049ba3045 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -31,8 +31,10 @@ struct kvm_timer {
u32 timer_mode_mask;
u64 tscdeadline;
u64 expired_tscdeadline;
+ u32 timer_advance_ns;
atomic_t pending; /* accumulated triggered timers */
bool hv_timer_in_use;
+ bool timer_advance_adjust_done;
};
struct kvm_lapic {
@@ -62,7 +64,7 @@ struct kvm_lapic {
struct dest_map;
-int kvm_create_lapic(struct kvm_vcpu *vcpu);
+int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns);
void kvm_free_lapic(struct kvm_vcpu *vcpu);
int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index f2d1d230d5b8..d9c7b45d231f 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -109,9 +109,11 @@ module_param(dbg, bool, 0644);
(((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
-#define PT64_BASE_ADDR_MASK __sme_clr((((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1)))
-#define PT64_DIR_BASE_ADDR_MASK \
- (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
+#ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
+#define PT64_BASE_ADDR_MASK (physical_mask & ~(u64)(PAGE_SIZE-1))
+#else
+#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
+#endif
#define PT64_LVL_ADDR_MASK(level) \
(PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
* PT64_LEVEL_BITS))) - 1))
@@ -180,7 +182,7 @@ struct kvm_shadow_walk_iterator {
static const union kvm_mmu_page_role mmu_base_role_mask = {
.cr0_wp = 1,
- .cr4_pae = 1,
+ .gpte_is_8_bytes = 1,
.nxe = 1,
.smep_andnot_wp = 1,
.smap_andnot_wp = 1,
@@ -330,53 +332,56 @@ static inline bool is_access_track_spte(u64 spte)
}
/*
- * the low bit of the generation number is always presumed to be zero.
- * This disables mmio caching during memslot updates. The concept is
- * similar to a seqcount but instead of retrying the access we just punt
- * and ignore the cache.
+ * Due to limited space in PTEs, the MMIO generation is a 19 bit subset of
+ * the memslots generation and is derived as follows:
*
- * spte bits 3-11 are used as bits 1-9 of the generation number,
- * the bits 52-61 are used as bits 10-19 of the generation number.
+ * Bits 0-8 of the MMIO generation are propagated to spte bits 3-11
+ * Bits 9-18 of the MMIO generation are propagated to spte bits 52-61
+ *
+ * The KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS flag is intentionally not included in
+ * the MMIO generation number, as doing so would require stealing a bit from
+ * the "real" generation number and thus effectively halve the maximum number
+ * of MMIO generations that can be handled before encountering a wrap (which
+ * requires a full MMU zap). The flag is instead explicitly queried when
+ * checking for MMIO spte cache hits.
*/
-#define MMIO_SPTE_GEN_LOW_SHIFT 2
-#define MMIO_SPTE_GEN_HIGH_SHIFT 52
+#define MMIO_SPTE_GEN_MASK GENMASK_ULL(18, 0)
-#define MMIO_GEN_SHIFT 20
-#define MMIO_GEN_LOW_SHIFT 10
-#define MMIO_GEN_LOW_MASK ((1 << MMIO_GEN_LOW_SHIFT) - 2)
-#define MMIO_GEN_MASK ((1 << MMIO_GEN_SHIFT) - 1)
+#define MMIO_SPTE_GEN_LOW_START 3
+#define MMIO_SPTE_GEN_LOW_END 11
+#define MMIO_SPTE_GEN_LOW_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \
+ MMIO_SPTE_GEN_LOW_START)
-static u64 generation_mmio_spte_mask(unsigned int gen)
+#define MMIO_SPTE_GEN_HIGH_START 52
+#define MMIO_SPTE_GEN_HIGH_END 61
+#define MMIO_SPTE_GEN_HIGH_MASK GENMASK_ULL(MMIO_SPTE_GEN_HIGH_END, \
+ MMIO_SPTE_GEN_HIGH_START)
+static u64 generation_mmio_spte_mask(u64 gen)
{
u64 mask;
- WARN_ON(gen & ~MMIO_GEN_MASK);
+ WARN_ON(gen & ~MMIO_SPTE_GEN_MASK);
- mask = (gen & MMIO_GEN_LOW_MASK) << MMIO_SPTE_GEN_LOW_SHIFT;
- mask |= ((u64)gen >> MMIO_GEN_LOW_SHIFT) << MMIO_SPTE_GEN_HIGH_SHIFT;
+ mask = (gen << MMIO_SPTE_GEN_LOW_START) & MMIO_SPTE_GEN_LOW_MASK;
+ mask |= (gen << MMIO_SPTE_GEN_HIGH_START) & MMIO_SPTE_GEN_HIGH_MASK;
return mask;
}
-static unsigned int get_mmio_spte_generation(u64 spte)
+static u64 get_mmio_spte_generation(u64 spte)
{
- unsigned int gen;
+ u64 gen;
spte &= ~shadow_mmio_mask;
- gen = (spte >> MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_GEN_LOW_MASK;
- gen |= (spte >> MMIO_SPTE_GEN_HIGH_SHIFT) << MMIO_GEN_LOW_SHIFT;
+ gen = (spte & MMIO_SPTE_GEN_LOW_MASK) >> MMIO_SPTE_GEN_LOW_START;
+ gen |= (spte & MMIO_SPTE_GEN_HIGH_MASK) >> MMIO_SPTE_GEN_HIGH_START;
return gen;
}
-static unsigned int kvm_current_mmio_generation(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_memslots(vcpu)->generation & MMIO_GEN_MASK;
-}
-
static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
unsigned access)
{
- unsigned int gen = kvm_current_mmio_generation(vcpu);
+ u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK;
u64 mask = generation_mmio_spte_mask(gen);
u64 gpa = gfn << PAGE_SHIFT;
@@ -386,6 +391,8 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
mask |= (gpa & shadow_nonpresent_or_rsvd_mask)
<< shadow_nonpresent_or_rsvd_mask_len;
+ page_header(__pa(sptep))->mmio_cached = true;
+
trace_mark_mmio_spte(sptep, gfn, access, gen);
mmu_spte_set(sptep, mask);
}
@@ -407,7 +414,7 @@ static gfn_t get_mmio_spte_gfn(u64 spte)
static unsigned get_mmio_spte_access(u64 spte)
{
- u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask;
+ u64 mask = generation_mmio_spte_mask(MMIO_SPTE_GEN_MASK) | shadow_mmio_mask;
return (spte & ~mask) & ~PAGE_MASK;
}
@@ -424,9 +431,13 @@ static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
{
- unsigned int kvm_gen, spte_gen;
+ u64 kvm_gen, spte_gen, gen;
+
+ gen = kvm_vcpu_memslots(vcpu)->generation;
+ if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
+ return false;
- kvm_gen = kvm_current_mmio_generation(vcpu);
+ kvm_gen = gen & MMIO_SPTE_GEN_MASK;
spte_gen = get_mmio_spte_generation(spte);
trace_check_mmio_spte(spte, kvm_gen, spte_gen);
@@ -959,7 +970,7 @@ static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
if (cache->nobjs >= min)
return 0;
while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
- obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
+ obj = kmem_cache_zalloc(base_cache, GFP_KERNEL_ACCOUNT);
if (!obj)
return cache->nobjs >= min ? 0 : -ENOMEM;
cache->objects[cache->nobjs++] = obj;
@@ -1996,7 +2007,7 @@ static int is_empty_shadow_page(u64 *spt)
* aggregate version in order to make the slab shrinker
* faster
*/
-static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
+static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr)
{
kvm->arch.n_used_mmu_pages += nr;
percpu_counter_add(&kvm_total_used_mmu_pages, nr);
@@ -2049,12 +2060,6 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct
if (!direct)
sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
-
- /*
- * The active_mmu_pages list is the FIFO list, do not move the
- * page until it is zapped. kvm_zap_obsolete_pages depends on
- * this feature. See the comments in kvm_zap_obsolete_pages().
- */
list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
kvm_mod_used_mmu_pages(vcpu->kvm, +1);
return sp;
@@ -2195,35 +2200,33 @@ static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
--kvm->stat.mmu_unsync;
}
-static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
- struct list_head *invalid_list);
+static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
+ struct list_head *invalid_list);
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
struct list_head *invalid_list);
-/*
- * NOTE: we should pay more attention on the zapped-obsolete page
- * (is_obsolete_sp(sp) && sp->role.invalid) when you do hash list walk
- * since it has been deleted from active_mmu_pages but still can be found
- * at hast list.
- *
- * for_each_valid_sp() has skipped that kind of pages.
- */
+
#define for_each_valid_sp(_kvm, _sp, _gfn) \
hlist_for_each_entry(_sp, \
&(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
- if (is_obsolete_sp((_kvm), (_sp)) || (_sp)->role.invalid) { \
+ if ((_sp)->role.invalid) { \
} else
#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \
for_each_valid_sp(_kvm, _sp, _gfn) \
if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
+static inline bool is_ept_sp(struct kvm_mmu_page *sp)
+{
+ return sp->role.cr0_wp && sp->role.smap_andnot_wp;
+}
+
/* @sp->gfn should be write-protected at the call site */
static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
struct list_head *invalid_list)
{
- if (sp->role.cr4_pae != !!is_pae(vcpu)
- || vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
+ if ((!is_ept_sp(sp) && sp->role.gpte_is_8_bytes != !!is_pae(vcpu)) ||
+ vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
return false;
}
@@ -2231,18 +2234,28 @@ static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
return true;
}
+static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
+ struct list_head *invalid_list,
+ bool remote_flush)
+{
+ if (!remote_flush && list_empty(invalid_list))
+ return false;
+
+ if (!list_empty(invalid_list))
+ kvm_mmu_commit_zap_page(kvm, invalid_list);
+ else
+ kvm_flush_remote_tlbs(kvm);
+ return true;
+}
+
static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
struct list_head *invalid_list,
bool remote_flush, bool local_flush)
{
- if (!list_empty(invalid_list)) {
- kvm_mmu_commit_zap_page(vcpu->kvm, invalid_list);
+ if (kvm_mmu_remote_flush_or_zap(vcpu->kvm, invalid_list, remote_flush))
return;
- }
- if (remote_flush)
- kvm_flush_remote_tlbs(vcpu->kvm);
- else if (local_flush)
+ if (local_flush)
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
}
@@ -2253,11 +2266,6 @@ static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
static void mmu_audit_disable(void) { }
#endif
-static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
-{
- return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
-}
-
static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
struct list_head *invalid_list)
{
@@ -2421,7 +2429,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
role.level = level;
role.direct = direct;
if (role.direct)
- role.cr4_pae = 0;
+ role.gpte_is_8_bytes = true;
role.access = access;
if (!vcpu->arch.mmu->direct_map
&& vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
@@ -2482,7 +2490,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
if (level > PT_PAGE_TABLE_LEVEL && need_sync)
flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
}
- sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
clear_page(sp->spt);
trace_kvm_mmu_get_page(sp, true);
@@ -2668,17 +2675,22 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
return zapped;
}
-static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
- struct list_head *invalid_list)
+static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
+ struct kvm_mmu_page *sp,
+ struct list_head *invalid_list,
+ int *nr_zapped)
{
- int ret;
+ bool list_unstable;
trace_kvm_mmu_prepare_zap_page(sp);
++kvm->stat.mmu_shadow_zapped;
- ret = mmu_zap_unsync_children(kvm, sp, invalid_list);
+ *nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list);
kvm_mmu_page_unlink_children(kvm, sp);
kvm_mmu_unlink_parents(kvm, sp);
+ /* Zapping children means active_mmu_pages has become unstable. */
+ list_unstable = *nr_zapped;
+
if (!sp->role.invalid && !sp->role.direct)
unaccount_shadowed(kvm, sp);
@@ -2686,22 +2698,27 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
kvm_unlink_unsync_page(kvm, sp);
if (!sp->root_count) {
/* Count self */
- ret++;
+ (*nr_zapped)++;
list_move(&sp->link, invalid_list);
kvm_mod_used_mmu_pages(kvm, -1);
} else {
list_move(&sp->link, &kvm->arch.active_mmu_pages);
- /*
- * The obsolete pages can not be used on any vcpus.
- * See the comments in kvm_mmu_invalidate_zap_all_pages().
- */
- if (!sp->role.invalid && !is_obsolete_sp(kvm, sp))
+ if (!sp->role.invalid)
kvm_reload_remote_mmus(kvm);
}
sp->role.invalid = 1;
- return ret;
+ return list_unstable;
+}
+
+static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
+ struct list_head *invalid_list)
+{
+ int nr_zapped;
+
+ __kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped);
+ return nr_zapped;
}
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
@@ -2746,7 +2763,7 @@ static bool prepare_zap_oldest_mmu_page(struct kvm *kvm,
* Changing the number of mmu pages allocated to the vm
* Note: if goal_nr_mmu_pages is too small, you will get dead lock
*/
-void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
+void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
{
LIST_HEAD(invalid_list);
@@ -3703,7 +3720,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
u64 *lm_root;
- lm_root = (void*)get_zeroed_page(GFP_KERNEL);
+ lm_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT);
if (lm_root == NULL)
return 1;
@@ -4204,14 +4221,6 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
return false;
if (cached_root_available(vcpu, new_cr3, new_role)) {
- /*
- * It is possible that the cached previous root page is
- * obsolete because of a change in the MMU
- * generation number. However, that is accompanied by
- * KVM_REQ_MMU_RELOAD, which will free the root that we
- * have set here and allocate a new one.
- */
-
kvm_make_request(KVM_REQ_LOAD_CR3, vcpu);
if (!skip_tlb_flush) {
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
@@ -4772,6 +4781,7 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
union kvm_mmu_extended_role ext = {0};
ext.cr0_pg = !!is_paging(vcpu);
+ ext.cr4_pae = !!is_pae(vcpu);
ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
ext.cr4_pse = !!is_pse(vcpu);
@@ -4791,7 +4801,6 @@ static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
role.base.access = ACC_ALL;
role.base.nxe = !!is_nx(vcpu);
- role.base.cr4_pae = !!is_pae(vcpu);
role.base.cr0_wp = is_write_protection(vcpu);
role.base.smm = is_smm(vcpu);
role.base.guest_mode = is_guest_mode(vcpu);
@@ -4812,6 +4821,7 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
role.base.ad_disabled = (shadow_accessed_mask == 0);
role.base.level = kvm_x86_ops->get_tdp_level(vcpu);
role.base.direct = true;
+ role.base.gpte_is_8_bytes = true;
return role;
}
@@ -4876,6 +4886,7 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
role.base.smap_andnot_wp = role.ext.cr4_smap &&
!is_write_protection(vcpu);
role.base.direct = !is_paging(vcpu);
+ role.base.gpte_is_8_bytes = !!is_pae(vcpu);
if (!is_long_mode(vcpu))
role.base.level = PT32E_ROOT_LEVEL;
@@ -4915,18 +4926,26 @@ static union kvm_mmu_role
kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
bool execonly)
{
- union kvm_mmu_role role;
+ union kvm_mmu_role role = {0};
- /* Base role is inherited from root_mmu */
- role.base.word = vcpu->arch.root_mmu.mmu_role.base.word;
- role.ext = kvm_calc_mmu_role_ext(vcpu);
+ /* SMM flag is inherited from root_mmu */
+ role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm;
role.base.level = PT64_ROOT_4LEVEL;
+ role.base.gpte_is_8_bytes = true;
role.base.direct = false;
role.base.ad_disabled = !accessed_dirty;
role.base.guest_mode = true;
role.base.access = ACC_ALL;
+ /*
+ * WP=1 and NOT_WP=1 is an impossible combination, use WP and the
+ * SMAP variation to denote shadow EPT entries.
+ */
+ role.base.cr0_wp = true;
+ role.base.smap_andnot_wp = true;
+
+ role.ext = kvm_calc_mmu_role_ext(vcpu);
role.ext.execonly = execonly;
return role;
@@ -5176,7 +5195,7 @@ static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
gpa, bytes, sp->role.word);
offset = offset_in_page(gpa);
- pte_size = sp->role.cr4_pae ? 8 : 4;
+ pte_size = sp->role.gpte_is_8_bytes ? 8 : 4;
/*
* Sometimes, the OS only writes the last one bytes to update status
@@ -5200,7 +5219,7 @@ static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
page_offset = offset_in_page(gpa);
level = sp->role.level;
*nspte = 1;
- if (!sp->role.cr4_pae) {
+ if (!sp->role.gpte_is_8_bytes) {
page_offset <<= 1; /* 32->64 */
/*
* A 32-bit pde maps 4MB while the shadow pdes map
@@ -5390,10 +5409,12 @@ emulate:
* This can happen if a guest gets a page-fault on data access but the HW
* table walker is not able to read the instruction page (e.g instruction
* page is not present in memory). In those cases we simply restart the
- * guest.
+ * guest, with the exception of AMD Erratum 1096 which is unrecoverable.
*/
- if (unlikely(insn && !insn_len))
- return 1;
+ if (unlikely(insn && !insn_len)) {
+ if (!kvm_x86_ops->need_emulation_on_page_fault(vcpu))
+ return 1;
+ }
er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len);
@@ -5486,6 +5507,79 @@ void kvm_disable_tdp(void)
}
EXPORT_SYMBOL_GPL(kvm_disable_tdp);
+
+/* The return value indicates if tlb flush on all vcpus is needed. */
+typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
+
+/* The caller should hold mmu-lock before calling this function. */
+static __always_inline bool
+slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ slot_level_handler fn, int start_level, int end_level,
+ gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
+{
+ struct slot_rmap_walk_iterator iterator;
+ bool flush = false;
+
+ for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
+ end_gfn, &iterator) {
+ if (iterator.rmap)
+ flush |= fn(kvm, iterator.rmap);
+
+ if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
+ if (flush && lock_flush_tlb) {
+ kvm_flush_remote_tlbs_with_address(kvm,
+ start_gfn,
+ iterator.gfn - start_gfn + 1);
+ flush = false;
+ }
+ cond_resched_lock(&kvm->mmu_lock);
+ }
+ }
+
+ if (flush && lock_flush_tlb) {
+ kvm_flush_remote_tlbs_with_address(kvm, start_gfn,
+ end_gfn - start_gfn + 1);
+ flush = false;
+ }
+
+ return flush;
+}
+
+static __always_inline bool
+slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ slot_level_handler fn, int start_level, int end_level,
+ bool lock_flush_tlb)
+{
+ return slot_handle_level_range(kvm, memslot, fn, start_level,
+ end_level, memslot->base_gfn,
+ memslot->base_gfn + memslot->npages - 1,
+ lock_flush_tlb);
+}
+
+static __always_inline bool
+slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ slot_level_handler fn, bool lock_flush_tlb)
+{
+ return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
+ PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
+}
+
+static __always_inline bool
+slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ slot_level_handler fn, bool lock_flush_tlb)
+{
+ return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL + 1,
+ PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
+}
+
+static __always_inline bool
+slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ slot_level_handler fn, bool lock_flush_tlb)
+{
+ return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
+ PT_PAGE_TABLE_LEVEL, lock_flush_tlb);
+}
+
static void free_mmu_pages(struct kvm_vcpu *vcpu)
{
free_page((unsigned long)vcpu->arch.mmu->pae_root);
@@ -5505,7 +5599,7 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
* Therefore we need to allocate shadow page tables in the first
* 4GB of memory, which happens to fit the DMA32 zone.
*/
- page = alloc_page(GFP_KERNEL | __GFP_DMA32);
+ page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
if (!page)
return -ENOMEM;
@@ -5543,105 +5637,62 @@ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot,
struct kvm_page_track_notifier_node *node)
{
- kvm_mmu_invalidate_zap_all_pages(kvm);
-}
-
-void kvm_mmu_init_vm(struct kvm *kvm)
-{
- struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
-
- node->track_write = kvm_mmu_pte_write;
- node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
- kvm_page_track_register_notifier(kvm, node);
-}
+ struct kvm_mmu_page *sp;
+ LIST_HEAD(invalid_list);
+ unsigned long i;
+ bool flush;
+ gfn_t gfn;
-void kvm_mmu_uninit_vm(struct kvm *kvm)
-{
- struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
+ spin_lock(&kvm->mmu_lock);
- kvm_page_track_unregister_notifier(kvm, node);
-}
+ if (list_empty(&kvm->arch.active_mmu_pages))
+ goto out_unlock;
-/* The return value indicates if tlb flush on all vcpus is needed. */
-typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
+ flush = slot_handle_all_level(kvm, slot, kvm_zap_rmapp, false);
-/* The caller should hold mmu-lock before calling this function. */
-static __always_inline bool
-slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
- slot_level_handler fn, int start_level, int end_level,
- gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
-{
- struct slot_rmap_walk_iterator iterator;
- bool flush = false;
+ for (i = 0; i < slot->npages; i++) {
+ gfn = slot->base_gfn + i;
- for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
- end_gfn, &iterator) {
- if (iterator.rmap)
- flush |= fn(kvm, iterator.rmap);
+ for_each_valid_sp(kvm, sp, gfn) {
+ if (sp->gfn != gfn)
+ continue;
+ kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
+ }
if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
- if (flush && lock_flush_tlb) {
- kvm_flush_remote_tlbs(kvm);
- flush = false;
- }
+ kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
+ flush = false;
cond_resched_lock(&kvm->mmu_lock);
}
}
+ kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
- if (flush && lock_flush_tlb) {
- kvm_flush_remote_tlbs(kvm);
- flush = false;
- }
-
- return flush;
+out_unlock:
+ spin_unlock(&kvm->mmu_lock);
}
-static __always_inline bool
-slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
- slot_level_handler fn, int start_level, int end_level,
- bool lock_flush_tlb)
+void kvm_mmu_init_vm(struct kvm *kvm)
{
- return slot_handle_level_range(kvm, memslot, fn, start_level,
- end_level, memslot->base_gfn,
- memslot->base_gfn + memslot->npages - 1,
- lock_flush_tlb);
-}
+ struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
-static __always_inline bool
-slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
- slot_level_handler fn, bool lock_flush_tlb)
-{
- return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
- PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
+ node->track_write = kvm_mmu_pte_write;
+ node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
+ kvm_page_track_register_notifier(kvm, node);
}
-static __always_inline bool
-slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
- slot_level_handler fn, bool lock_flush_tlb)
+void kvm_mmu_uninit_vm(struct kvm *kvm)
{
- return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL + 1,
- PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
-}
+ struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
-static __always_inline bool
-slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
- slot_level_handler fn, bool lock_flush_tlb)
-{
- return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
- PT_PAGE_TABLE_LEVEL, lock_flush_tlb);
+ kvm_page_track_unregister_notifier(kvm, node);
}
void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
- bool flush_tlb = true;
- bool flush = false;
int i;
- if (kvm_available_flush_tlb_with_range())
- flush_tlb = false;
-
spin_lock(&kvm->mmu_lock);
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
slots = __kvm_memslots(kvm, i);
@@ -5653,17 +5704,12 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
if (start >= end)
continue;
- flush |= slot_handle_level_range(kvm, memslot,
- kvm_zap_rmapp, PT_PAGE_TABLE_LEVEL,
- PT_MAX_HUGEPAGE_LEVEL, start,
- end - 1, flush_tlb);
+ slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
+ PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL,
+ start, end - 1, true);
}
}
- if (flush)
- kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
- gfn_end - gfn_start + 1);
-
spin_unlock(&kvm->mmu_lock);
}
@@ -5815,101 +5861,58 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm,
}
EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
-#define BATCH_ZAP_PAGES 10
-static void kvm_zap_obsolete_pages(struct kvm *kvm)
+static void __kvm_mmu_zap_all(struct kvm *kvm, bool mmio_only)
{
struct kvm_mmu_page *sp, *node;
- int batch = 0;
+ LIST_HEAD(invalid_list);
+ int ign;
+ spin_lock(&kvm->mmu_lock);
restart:
- list_for_each_entry_safe_reverse(sp, node,
- &kvm->arch.active_mmu_pages, link) {
- int ret;
-
- /*
- * No obsolete page exists before new created page since
- * active_mmu_pages is the FIFO list.
- */
- if (!is_obsolete_sp(kvm, sp))
- break;
-
- /*
- * Since we are reversely walking the list and the invalid
- * list will be moved to the head, skip the invalid page
- * can help us to avoid the infinity list walking.
- */
- if (sp->role.invalid)
+ list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
+ if (mmio_only && !sp->mmio_cached)
continue;
-
- /*
- * Need not flush tlb since we only zap the sp with invalid
- * generation number.
- */
- if (batch >= BATCH_ZAP_PAGES &&
- cond_resched_lock(&kvm->mmu_lock)) {
- batch = 0;
+ if (sp->role.invalid && sp->root_count)
+ continue;
+ if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) {
+ WARN_ON_ONCE(mmio_only);
goto restart;
}
-
- ret = kvm_mmu_prepare_zap_page(kvm, sp,
- &kvm->arch.zapped_obsolete_pages);
- batch += ret;
-
- if (ret)
+ if (cond_resched_lock(&kvm->mmu_lock))
goto restart;
}
- /*
- * Should flush tlb before free page tables since lockless-walking
- * may use the pages.
- */
- kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
-}
-
-/*
- * Fast invalidate all shadow pages and use lock-break technique
- * to zap obsolete pages.
- *
- * It's required when memslot is being deleted or VM is being
- * destroyed, in these cases, we should ensure that KVM MMU does
- * not use any resource of the being-deleted slot or all slots
- * after calling the function.
- */
-void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm)
-{
- spin_lock(&kvm->mmu_lock);
- trace_kvm_mmu_invalidate_zap_all_pages(kvm);
- kvm->arch.mmu_valid_gen++;
-
- /*
- * Notify all vcpus to reload its shadow page table
- * and flush TLB. Then all vcpus will switch to new
- * shadow page table with the new mmu_valid_gen.
- *
- * Note: we should do this under the protection of
- * mmu-lock, otherwise, vcpu would purge shadow page
- * but miss tlb flush.
- */
- kvm_reload_remote_mmus(kvm);
-
- kvm_zap_obsolete_pages(kvm);
+ kvm_mmu_commit_zap_page(kvm, &invalid_list);
spin_unlock(&kvm->mmu_lock);
}
-static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
+void kvm_mmu_zap_all(struct kvm *kvm)
{
- return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
+ return __kvm_mmu_zap_all(kvm, false);
}
-void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots)
+void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
{
+ WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
+
+ gen &= MMIO_SPTE_GEN_MASK;
+
/*
- * The very rare case: if the generation-number is round,
+ * Generation numbers are incremented in multiples of the number of
+ * address spaces in order to provide unique generations across all
+ * address spaces. Strip what is effectively the address space
+ * modifier prior to checking for a wrap of the MMIO generation so
+ * that a wrap in any address space is detected.
+ */
+ gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1);
+
+ /*
+ * The very rare case: if the MMIO generation number has wrapped,
* zap all shadow pages.
*/
- if (unlikely((slots->generation & MMIO_GEN_MASK) == 0)) {
+ if (unlikely(gen == 0)) {
kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
- kvm_mmu_invalidate_zap_all_pages(kvm);
+ __kvm_mmu_zap_all(kvm, true);
}
}
@@ -5940,24 +5943,16 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
* want to shrink a VM that only started to populate its MMU
* anyway.
*/
- if (!kvm->arch.n_used_mmu_pages &&
- !kvm_has_zapped_obsolete_pages(kvm))
+ if (!kvm->arch.n_used_mmu_pages)
continue;
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
- if (kvm_has_zapped_obsolete_pages(kvm)) {
- kvm_mmu_commit_zap_page(kvm,
- &kvm->arch.zapped_obsolete_pages);
- goto unlock;
- }
-
if (prepare_zap_oldest_mmu_page(kvm, &invalid_list))
freed++;
kvm_mmu_commit_zap_page(kvm, &invalid_list);
-unlock:
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
@@ -6037,10 +6032,10 @@ out:
/*
* Calculate mmu pages needed for kvm.
*/
-unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
+unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
{
- unsigned int nr_mmu_pages;
- unsigned int nr_pages = 0;
+ unsigned long nr_mmu_pages;
+ unsigned long nr_pages = 0;
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
int i;
@@ -6053,8 +6048,7 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
}
nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
- nr_mmu_pages = max(nr_mmu_pages,
- (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
+ nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
return nr_mmu_pages;
}
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index c7b333147c4a..54c2a377795b 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -64,7 +64,7 @@ bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
u64 fault_address, char *insn, int insn_len);
-static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
+static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
{
if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
return kvm->arch.n_max_mmu_pages -
@@ -203,7 +203,6 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
return -(u32)fault & errcode;
}
-void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
index c73bf4e4988c..dd30dccd2ad5 100644
--- a/arch/x86/kvm/mmutrace.h
+++ b/arch/x86/kvm/mmutrace.h
@@ -8,18 +8,16 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvmmmu
-#define KVM_MMU_PAGE_FIELDS \
- __field(unsigned long, mmu_valid_gen) \
- __field(__u64, gfn) \
- __field(__u32, role) \
- __field(__u32, root_count) \
+#define KVM_MMU_PAGE_FIELDS \
+ __field(__u64, gfn) \
+ __field(__u32, role) \
+ __field(__u32, root_count) \
__field(bool, unsync)
-#define KVM_MMU_PAGE_ASSIGN(sp) \
- __entry->mmu_valid_gen = sp->mmu_valid_gen; \
- __entry->gfn = sp->gfn; \
- __entry->role = sp->role.word; \
- __entry->root_count = sp->root_count; \
+#define KVM_MMU_PAGE_ASSIGN(sp) \
+ __entry->gfn = sp->gfn; \
+ __entry->role = sp->role.word; \
+ __entry->root_count = sp->root_count; \
__entry->unsync = sp->unsync;
#define KVM_MMU_PAGE_PRINTK() ({ \
@@ -31,11 +29,10 @@
\
role.word = __entry->role; \
\
- trace_seq_printf(p, "sp gen %lx gfn %llx l%u%s q%u%s %s%s" \
+ trace_seq_printf(p, "sp gfn %llx l%u %u-byte q%u%s %s%s" \
" %snxe %sad root %u %s%c", \
- __entry->mmu_valid_gen, \
__entry->gfn, role.level, \
- role.cr4_pae ? " pae" : "", \
+ role.gpte_is_8_bytes ? 8 : 4, \
role.quadrant, \
role.direct ? " direct" : "", \
access_str[role.access], \
@@ -283,27 +280,6 @@ TRACE_EVENT(
);
TRACE_EVENT(
- kvm_mmu_invalidate_zap_all_pages,
- TP_PROTO(struct kvm *kvm),
- TP_ARGS(kvm),
-
- TP_STRUCT__entry(
- __field(unsigned long, mmu_valid_gen)
- __field(unsigned int, mmu_used_pages)
- ),
-
- TP_fast_assign(
- __entry->mmu_valid_gen = kvm->arch.mmu_valid_gen;
- __entry->mmu_used_pages = kvm->arch.n_used_mmu_pages;
- ),
-
- TP_printk("kvm-mmu-valid-gen %lx used_pages %x",
- __entry->mmu_valid_gen, __entry->mmu_used_pages
- )
-);
-
-
-TRACE_EVENT(
check_mmio_spte,
TP_PROTO(u64 spte, unsigned int kvm_gen, unsigned int spte_gen),
TP_ARGS(spte, kvm_gen, spte_gen),
diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c
index 3052a59a3065..fd04d462fdae 100644
--- a/arch/x86/kvm/page_track.c
+++ b/arch/x86/kvm/page_track.c
@@ -42,7 +42,7 @@ int kvm_page_track_create_memslot(struct kvm_memory_slot *slot,
for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) {
slot->arch.gfn_track[i] =
kvcalloc(npages, sizeof(*slot->arch.gfn_track[i]),
- GFP_KERNEL);
+ GFP_KERNEL_ACCOUNT);
if (!slot->arch.gfn_track[i])
goto track_free;
}
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 58ead7db71a3..e39741997893 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -281,9 +281,13 @@ static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
{
bool fast_mode = idx & (1u << 31);
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct kvm_pmc *pmc;
u64 ctr_val;
+ if (!pmu->version)
+ return 1;
+
if (is_vmware_backdoor_pmc(idx))
return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index f13a3a24d360..406b558abfef 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -145,7 +145,6 @@ struct kvm_svm {
/* Struct members for AVIC */
u32 avic_vm_id;
- u32 ldr_mode;
struct page *avic_logical_id_table_page;
struct page *avic_physical_id_table_page;
struct hlist_node hnode;
@@ -236,6 +235,7 @@ struct vcpu_svm {
bool nrips_enabled : 1;
u32 ldr_reg;
+ u32 dfr_reg;
struct page *avic_backing_page;
u64 *avic_physical_id_cache;
bool avic_is_running;
@@ -262,6 +262,7 @@ struct amd_svm_iommu_ir {
};
#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
+#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
@@ -1795,9 +1796,10 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
/* Avoid using vmalloc for smaller buffers. */
size = npages * sizeof(struct page *);
if (size > PAGE_SIZE)
- pages = vmalloc(size);
+ pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO,
+ PAGE_KERNEL);
else
- pages = kmalloc(size, GFP_KERNEL);
+ pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
if (!pages)
return NULL;
@@ -1865,7 +1867,9 @@ static void __unregister_enc_region_locked(struct kvm *kvm,
static struct kvm *svm_vm_alloc(void)
{
- struct kvm_svm *kvm_svm = vzalloc(sizeof(struct kvm_svm));
+ struct kvm_svm *kvm_svm = __vmalloc(sizeof(struct kvm_svm),
+ GFP_KERNEL_ACCOUNT | __GFP_ZERO,
+ PAGE_KERNEL);
return &kvm_svm->kvm;
}
@@ -1940,7 +1944,7 @@ static int avic_vm_init(struct kvm *kvm)
return 0;
/* Allocating physical APIC ID table (4KB) */
- p_page = alloc_page(GFP_KERNEL);
+ p_page = alloc_page(GFP_KERNEL_ACCOUNT);
if (!p_page)
goto free_avic;
@@ -1948,7 +1952,7 @@ static int avic_vm_init(struct kvm *kvm)
clear_page(page_address(p_page));
/* Allocating logical APIC ID table (4KB) */
- l_page = alloc_page(GFP_KERNEL);
+ l_page = alloc_page(GFP_KERNEL_ACCOUNT);
if (!l_page)
goto free_avic;
@@ -2106,6 +2110,7 @@ static int avic_init_vcpu(struct vcpu_svm *svm)
INIT_LIST_HEAD(&svm->ir_list);
spin_lock_init(&svm->ir_list_lock);
+ svm->dfr_reg = APIC_DFR_FLAT;
return ret;
}
@@ -2119,13 +2124,14 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
struct page *nested_msrpm_pages;
int err;
- svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+ svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
if (!svm) {
err = -ENOMEM;
goto out;
}
- svm->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, GFP_KERNEL);
+ svm->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache,
+ GFP_KERNEL_ACCOUNT);
if (!svm->vcpu.arch.guest_fpu) {
printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n");
err = -ENOMEM;
@@ -2137,19 +2143,19 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
goto free_svm;
err = -ENOMEM;
- page = alloc_page(GFP_KERNEL);
+ page = alloc_page(GFP_KERNEL_ACCOUNT);
if (!page)
goto uninit;
- msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
+ msrpm_pages = alloc_pages(GFP_KERNEL_ACCOUNT, MSRPM_ALLOC_ORDER);
if (!msrpm_pages)
goto free_page1;
- nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
+ nested_msrpm_pages = alloc_pages(GFP_KERNEL_ACCOUNT, MSRPM_ALLOC_ORDER);
if (!nested_msrpm_pages)
goto free_page2;
- hsave_page = alloc_page(GFP_KERNEL);
+ hsave_page = alloc_page(GFP_KERNEL_ACCOUNT);
if (!hsave_page)
goto free_page3;
@@ -2687,6 +2693,7 @@ static int npf_interception(struct vcpu_svm *svm)
static int db_interception(struct vcpu_svm *svm)
{
struct kvm_run *kvm_run = svm->vcpu.run;
+ struct kvm_vcpu *vcpu = &svm->vcpu;
if (!(svm->vcpu.guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
@@ -2697,6 +2704,8 @@ static int db_interception(struct vcpu_svm *svm)
if (svm->nmi_singlestep) {
disable_nmi_singlestep(svm);
+ /* Make sure we check for pending NMIs upon entry */
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
}
if (svm->vcpu.guest_debug &
@@ -4512,14 +4521,25 @@ static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
kvm_lapic_reg_write(apic, APIC_ICR, icrl);
break;
case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
+ int i;
+ struct kvm_vcpu *vcpu;
+ struct kvm *kvm = svm->vcpu.kvm;
struct kvm_lapic *apic = svm->vcpu.arch.apic;
/*
- * Update ICR high and low, then emulate sending IPI,
- * which is handled when writing APIC_ICR.
+ * At this point, we expect that the AVIC HW has already
+ * set the appropriate IRR bits on the valid target
+ * vcpus. So, we just need to kick the appropriate vcpu.
*/
- kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
- kvm_lapic_reg_write(apic, APIC_ICR, icrl);
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ bool m = kvm_apic_match_dest(vcpu, apic,
+ icrl & KVM_APIC_SHORT_MASK,
+ GET_APIC_DEST_FIELD(icrh),
+ icrl & KVM_APIC_DEST_MASK);
+
+ if (m && !avic_vcpu_is_running(vcpu))
+ kvm_vcpu_wake_up(vcpu);
+ }
break;
}
case AVIC_IPI_FAILURE_INVALID_TARGET:
@@ -4565,8 +4585,7 @@ static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat)
return &logical_apic_id_table[index];
}
-static int avic_ldr_write(struct kvm_vcpu *vcpu, u8 g_physical_id, u32 ldr,
- bool valid)
+static int avic_ldr_write(struct kvm_vcpu *vcpu, u8 g_physical_id, u32 ldr)
{
bool flat;
u32 *entry, new_entry;
@@ -4579,31 +4598,39 @@ static int avic_ldr_write(struct kvm_vcpu *vcpu, u8 g_physical_id, u32 ldr,
new_entry = READ_ONCE(*entry);
new_entry &= ~AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK;
new_entry |= (g_physical_id & AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK);
- if (valid)
- new_entry |= AVIC_LOGICAL_ID_ENTRY_VALID_MASK;
- else
- new_entry &= ~AVIC_LOGICAL_ID_ENTRY_VALID_MASK;
+ new_entry |= AVIC_LOGICAL_ID_ENTRY_VALID_MASK;
WRITE_ONCE(*entry, new_entry);
return 0;
}
+static void avic_invalidate_logical_id_entry(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+ bool flat = svm->dfr_reg == APIC_DFR_FLAT;
+ u32 *entry = avic_get_logical_id_entry(vcpu, svm->ldr_reg, flat);
+
+ if (entry)
+ clear_bit(AVIC_LOGICAL_ID_ENTRY_VALID_BIT, (unsigned long *)entry);
+}
+
static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
{
- int ret;
+ int ret = 0;
struct vcpu_svm *svm = to_svm(vcpu);
u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR);
- if (!ldr)
- return 1;
+ if (ldr == svm->ldr_reg)
+ return 0;
- ret = avic_ldr_write(vcpu, vcpu->vcpu_id, ldr, true);
- if (ret && svm->ldr_reg) {
- avic_ldr_write(vcpu, 0, svm->ldr_reg, false);
- svm->ldr_reg = 0;
- } else {
+ avic_invalidate_logical_id_entry(vcpu);
+
+ if (ldr)
+ ret = avic_ldr_write(vcpu, vcpu->vcpu_id, ldr);
+
+ if (!ret)
svm->ldr_reg = ldr;
- }
+
return ret;
}
@@ -4637,27 +4664,16 @@ static int avic_handle_apic_id_update(struct kvm_vcpu *vcpu)
return 0;
}
-static int avic_handle_dfr_update(struct kvm_vcpu *vcpu)
+static void avic_handle_dfr_update(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
u32 dfr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR);
- u32 mod = (dfr >> 28) & 0xf;
- /*
- * We assume that all local APICs are using the same type.
- * If this changes, we need to flush the AVIC logical
- * APID id table.
- */
- if (kvm_svm->ldr_mode == mod)
- return 0;
-
- clear_page(page_address(kvm_svm->avic_logical_id_table_page));
- kvm_svm->ldr_mode = mod;
+ if (svm->dfr_reg == dfr)
+ return;
- if (svm->ldr_reg)
- avic_handle_ldr_update(vcpu);
- return 0;
+ avic_invalidate_logical_id_entry(vcpu);
+ svm->dfr_reg = dfr;
}
static int avic_unaccel_trap_write(struct vcpu_svm *svm)
@@ -5125,11 +5141,11 @@ static void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb *vmcb = svm->vmcb;
- if (!kvm_vcpu_apicv_active(&svm->vcpu))
- return;
-
- vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK;
- mark_dirty(vmcb, VMCB_INTR);
+ if (kvm_vcpu_apicv_active(vcpu))
+ vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
+ else
+ vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK;
+ mark_dirty(vmcb, VMCB_AVIC);
}
static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
@@ -5195,7 +5211,7 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
* Allocating new amd_iommu_pi_data, which will get
* add to the per-vcpu ir_list.
*/
- ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_KERNEL);
+ ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_KERNEL_ACCOUNT);
if (!ir) {
ret = -ENOMEM;
goto out;
@@ -5620,6 +5636,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
svm->vmcb->save.cr2 = vcpu->arch.cr2;
clgi();
+ kvm_load_guest_xcr0(vcpu);
/*
* If this vCPU has touched SPEC_CTRL, restore the guest's value if
@@ -5765,6 +5782,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
kvm_before_interrupt(&svm->vcpu);
+ kvm_put_guest_xcr0(vcpu);
stgi();
/* Any pending NMI will happen here */
@@ -6163,8 +6181,7 @@ static inline void avic_post_state_restore(struct kvm_vcpu *vcpu)
{
if (avic_handle_apic_id_update(vcpu) != 0)
return;
- if (avic_handle_dfr_update(vcpu) != 0)
- return;
+ avic_handle_dfr_update(vcpu);
avic_handle_ldr_update(vcpu);
}
@@ -6215,32 +6232,24 @@ static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
return 0;
}
-static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
+static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb *nested_vmcb;
struct page *page;
- struct {
- u64 guest;
- u64 vmcb;
- } svm_state_save;
- int ret;
+ u64 guest;
+ u64 vmcb;
- ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfed8, &svm_state_save,
- sizeof(svm_state_save));
- if (ret)
- return ret;
+ guest = GET_SMSTATE(u64, smstate, 0x7ed8);
+ vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
- if (svm_state_save.guest) {
- vcpu->arch.hflags &= ~HF_SMM_MASK;
- nested_vmcb = nested_svm_map(svm, svm_state_save.vmcb, &page);
- if (nested_vmcb)
- enter_svm_guest_mode(svm, svm_state_save.vmcb, nested_vmcb, page);
- else
- ret = 1;
- vcpu->arch.hflags |= HF_SMM_MASK;
+ if (guest) {
+ nested_vmcb = nested_svm_map(svm, vmcb, &page);
+ if (!nested_vmcb)
+ return 1;
+ enter_svm_guest_mode(svm, vmcb, nested_vmcb, page);
}
- return ret;
+ return 0;
}
static int enable_smi_window(struct kvm_vcpu *vcpu)
@@ -6311,7 +6320,7 @@ static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
if (ret)
return ret;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
if (!data)
return -ENOMEM;
@@ -6361,7 +6370,7 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
return -EFAULT;
- start = kzalloc(sizeof(*start), GFP_KERNEL);
+ start = kzalloc(sizeof(*start), GFP_KERNEL_ACCOUNT);
if (!start)
return -ENOMEM;
@@ -6422,11 +6431,11 @@ e_free:
return ret;
}
-static int get_num_contig_pages(int idx, struct page **inpages,
- unsigned long npages)
+static unsigned long get_num_contig_pages(unsigned long idx,
+ struct page **inpages, unsigned long npages)
{
unsigned long paddr, next_paddr;
- int i = idx + 1, pages = 1;
+ unsigned long i = idx + 1, pages = 1;
/* find the number of contiguous pages starting from idx */
paddr = __sme_page_pa(inpages[idx]);
@@ -6445,12 +6454,12 @@ static int get_num_contig_pages(int idx, struct page **inpages,
static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
- unsigned long vaddr, vaddr_end, next_vaddr, npages, size;
+ unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
struct kvm_sev_launch_update_data params;
struct sev_data_launch_update_data *data;
struct page **inpages;
- int i, ret, pages;
+ int ret;
if (!sev_guest(kvm))
return -ENOTTY;
@@ -6458,7 +6467,7 @@ static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
return -EFAULT;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
if (!data)
return -ENOMEM;
@@ -6535,7 +6544,7 @@ static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
if (copy_from_user(&params, measure, sizeof(params)))
return -EFAULT;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
if (!data)
return -ENOMEM;
@@ -6597,7 +6606,7 @@ static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
if (!sev_guest(kvm))
return -ENOTTY;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
if (!data)
return -ENOMEM;
@@ -6618,7 +6627,7 @@ static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
if (!sev_guest(kvm))
return -ENOTTY;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
if (!data)
return -ENOMEM;
@@ -6646,7 +6655,7 @@ static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
struct sev_data_dbg *data;
int ret;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
if (!data)
return -ENOMEM;
@@ -6799,7 +6808,8 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
struct page **src_p, **dst_p;
struct kvm_sev_dbg debug;
unsigned long n;
- int ret, size;
+ unsigned int size;
+ int ret;
if (!sev_guest(kvm))
return -ENOTTY;
@@ -6807,6 +6817,11 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
return -EFAULT;
+ if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
+ return -EINVAL;
+ if (!debug.dst_uaddr)
+ return -EINVAL;
+
vaddr = debug.src_uaddr;
size = debug.len;
vaddr_end = vaddr + size;
@@ -6857,8 +6872,8 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
dst_vaddr,
len, &argp->error);
- sev_unpin_memory(kvm, src_p, 1);
- sev_unpin_memory(kvm, dst_p, 1);
+ sev_unpin_memory(kvm, src_p, n);
+ sev_unpin_memory(kvm, dst_p, n);
if (ret)
goto err;
@@ -6901,7 +6916,7 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
}
ret = -ENOMEM;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
if (!data)
goto e_unpin_memory;
@@ -7007,7 +7022,7 @@ static int svm_register_enc_region(struct kvm *kvm,
if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
return -EINVAL;
- region = kzalloc(sizeof(*region), GFP_KERNEL);
+ region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
if (!region)
return -ENOMEM;
@@ -7098,6 +7113,36 @@ static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
return -ENODEV;
}
+static bool svm_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
+{
+ bool is_user, smap;
+
+ is_user = svm_get_cpl(vcpu) == 3;
+ smap = !kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
+
+ /*
+ * Detect and workaround Errata 1096 Fam_17h_00_0Fh
+ *
+ * In non SEV guest, hypervisor will be able to read the guest
+ * memory to decode the instruction pointer when insn_len is zero
+ * so we return true to indicate that decoding is possible.
+ *
+ * But in the SEV guest, the guest memory is encrypted with the
+ * guest specific key and hypervisor will not be able to decode the
+ * instruction pointer so we will not able to workaround it. Lets
+ * print the error and request to kill the guest.
+ */
+ if (is_user && smap) {
+ if (!sev_guest(vcpu->kvm))
+ return true;
+
+ pr_err_ratelimited("KVM: Guest triggered AMD Erratum 1096\n");
+ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
+ }
+
+ return false;
+}
+
static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled,
@@ -7231,6 +7276,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.nested_enable_evmcs = nested_enable_evmcs,
.nested_get_evmcs_version = nested_get_evmcs_version,
+
+ .need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
};
static int __init svm_init(void)
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 6432d08c7de7..4d47a2631d1f 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -438,13 +438,13 @@ TRACE_EVENT(kvm_apic_ipi,
);
TRACE_EVENT(kvm_apic_accept_irq,
- TP_PROTO(__u32 apicid, __u16 dm, __u8 tm, __u8 vec),
+ TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec),
TP_ARGS(apicid, dm, tm, vec),
TP_STRUCT__entry(
__field( __u32, apicid )
__field( __u16, dm )
- __field( __u8, tm )
+ __field( __u16, tm )
__field( __u8, vec )
),
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index d737a51a53ca..0c601d079cd2 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -211,7 +211,6 @@ static void free_nested(struct kvm_vcpu *vcpu)
if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
return;
- hrtimer_cancel(&vmx->nested.preemption_timer);
vmx->nested.vmxon = false;
vmx->nested.smm.vmxon = false;
free_vpid(vmx->nested.vpid02);
@@ -274,6 +273,7 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
{
vcpu_load(vcpu);
+ vmx_leave_nested(vcpu);
vmx_switch_vmcs(vcpu, &to_vmx(vcpu)->vmcs01);
free_nested(vcpu);
vcpu_put(vcpu);
@@ -500,6 +500,17 @@ static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
}
}
+static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) {
+ int msr;
+
+ for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
+ unsigned word = msr / BITS_PER_LONG;
+
+ msr_bitmap[word] = ~0;
+ msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
+ }
+}
+
/*
* Merge L0's and L1's MSR bitmap, return false to indicate that
* we do not use the hardware.
@@ -541,39 +552,44 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
return false;
msr_bitmap_l1 = (unsigned long *)kmap(page);
- if (nested_cpu_has_apic_reg_virt(vmcs12)) {
- /*
- * L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it
- * just lets the processor take the value from the virtual-APIC page;
- * take those 256 bits directly from the L1 bitmap.
- */
- for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
- unsigned word = msr / BITS_PER_LONG;
- msr_bitmap_l0[word] = msr_bitmap_l1[word];
- msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
- }
- } else {
- for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
- unsigned word = msr / BITS_PER_LONG;
- msr_bitmap_l0[word] = ~0;
- msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
- }
- }
- nested_vmx_disable_intercept_for_msr(
- msr_bitmap_l1, msr_bitmap_l0,
- X2APIC_MSR(APIC_TASKPRI),
- MSR_TYPE_W);
+ /*
+ * To keep the control flow simple, pay eight 8-byte writes (sixteen
+ * 4-byte writes on 32-bit systems) up front to enable intercepts for
+ * the x2APIC MSR range and selectively disable them below.
+ */
+ enable_x2apic_msr_intercepts(msr_bitmap_l0);
+
+ if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
+ if (nested_cpu_has_apic_reg_virt(vmcs12)) {
+ /*
+ * L0 need not intercept reads for MSRs between 0x800
+ * and 0x8ff, it just lets the processor take the value
+ * from the virtual-APIC page; take those 256 bits
+ * directly from the L1 bitmap.
+ */
+ for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
+ unsigned word = msr / BITS_PER_LONG;
+
+ msr_bitmap_l0[word] = msr_bitmap_l1[word];
+ }
+ }
- if (nested_cpu_has_vid(vmcs12)) {
- nested_vmx_disable_intercept_for_msr(
- msr_bitmap_l1, msr_bitmap_l0,
- X2APIC_MSR(APIC_EOI),
- MSR_TYPE_W);
nested_vmx_disable_intercept_for_msr(
msr_bitmap_l1, msr_bitmap_l0,
- X2APIC_MSR(APIC_SELF_IPI),
- MSR_TYPE_W);
+ X2APIC_MSR(APIC_TASKPRI),
+ MSR_TYPE_R | MSR_TYPE_W);
+
+ if (nested_cpu_has_vid(vmcs12)) {
+ nested_vmx_disable_intercept_for_msr(
+ msr_bitmap_l1, msr_bitmap_l0,
+ X2APIC_MSR(APIC_EOI),
+ MSR_TYPE_W);
+ nested_vmx_disable_intercept_for_msr(
+ msr_bitmap_l1, msr_bitmap_l0,
+ X2APIC_MSR(APIC_SELF_IPI),
+ MSR_TYPE_W);
+ }
}
if (spec_ctrl)
@@ -1980,17 +1996,6 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
prepare_vmcs02_early_full(vmx, vmcs12);
/*
- * HOST_RSP is normally set correctly in vmx_vcpu_run() just before
- * entry, but only if the current (host) sp changed from the value
- * we wrote last (vmx->host_rsp). This cache is no longer relevant
- * if we switch vmcs, and rather than hold a separate cache per vmcs,
- * here we just force the write to happen on entry. host_rsp will
- * also be written unconditionally by nested_vmx_check_vmentry_hw()
- * if we are doing early consistency checks via hardware.
- */
- vmx->host_rsp = 0;
-
- /*
* PIN CONTROLS
*/
exec_control = vmcs12->pin_based_vm_exec_control;
@@ -2289,10 +2294,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
}
vmx_set_rflags(vcpu, vmcs12->guest_rflags);
- vmx->nested.preemption_timer_expired = false;
- if (nested_cpu_has_preemption_timer(vmcs12))
- vmx_start_preemption_timer(vcpu);
-
/* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
* bitwise-or of what L1 wants to trap for L2, and what we want to
* trap. Note that CR0.TS also needs updating - we do this later.
@@ -2600,6 +2601,11 @@ static int nested_check_host_control_regs(struct kvm_vcpu *vcpu,
!nested_host_cr4_valid(vcpu, vmcs12->host_cr4) ||
!nested_cr3_valid(vcpu, vmcs12->host_cr3))
return -EINVAL;
+
+ if (is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu) ||
+ is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu))
+ return -EINVAL;
+
/*
* If the load IA32_EFER VM-exit control is 1, bits reserved in the
* IA32_EFER MSR must be 0 in the field for that register. In addition,
@@ -2722,6 +2728,7 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long cr3, cr4;
+ bool vm_fail;
if (!nested_early_check)
return 0;
@@ -2755,29 +2762,34 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
vmx->loaded_vmcs->host_state.cr4 = cr4;
}
- vmx->__launched = vmx->loaded_vmcs->launched;
-
asm(
- /* Set HOST_RSP */
"sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */
- __ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t"
- "mov %%" _ASM_SP ", %c[host_rsp](%1)\n\t"
+ "cmp %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
+ "je 1f \n\t"
+ __ex("vmwrite %%" _ASM_SP ", %[HOST_RSP]") "\n\t"
+ "mov %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
+ "1: \n\t"
"add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */
/* Check if vmlaunch or vmresume is needed */
- "cmpl $0, %c[launched](%% " _ASM_CX")\n\t"
+ "cmpb $0, %c[launched](%[loaded_vmcs])\n\t"
+ /*
+ * VMLAUNCH and VMRESUME clear RFLAGS.{CF,ZF} on VM-Exit, set
+ * RFLAGS.CF on VM-Fail Invalid and set RFLAGS.ZF on VM-Fail
+ * Valid. vmx_vmenter() directly "returns" RFLAGS, and so the
+ * results of VM-Enter is captured via CC_{SET,OUT} to vm_fail.
+ */
"call vmx_vmenter\n\t"
- /* Set vmx->fail accordingly */
- "setbe %c[fail](%% " _ASM_CX")\n\t"
- : ASM_CALL_CONSTRAINT
- : "c"(vmx), "d"((unsigned long)HOST_RSP),
- [launched]"i"(offsetof(struct vcpu_vmx, __launched)),
- [fail]"i"(offsetof(struct vcpu_vmx, fail)),
- [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
+ CC_SET(be)
+ : ASM_CALL_CONSTRAINT, CC_OUT(be) (vm_fail)
+ : [HOST_RSP]"r"((unsigned long)HOST_RSP),
+ [loaded_vmcs]"r"(vmx->loaded_vmcs),
+ [launched]"i"(offsetof(struct loaded_vmcs, launched)),
+ [host_state_rsp]"i"(offsetof(struct loaded_vmcs, host_state.rsp)),
[wordsize]"i"(sizeof(ulong))
- : "rax", "cc", "memory"
+ : "cc", "memory"
);
preempt_enable();
@@ -2787,10 +2799,9 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
if (vmx->msr_autoload.guest.nr)
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
- if (vmx->fail) {
+ if (vm_fail) {
WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
VMXERR_ENTRY_INVALID_CONTROL_FIELD);
- vmx->fail = 0;
return 1;
}
@@ -2813,8 +2824,6 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
return 0;
}
-STACK_FRAME_NON_STANDARD(nested_vmx_check_vmentry_hw);
-
static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12);
@@ -2864,20 +2873,27 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
/*
* If translation failed, VM entry will fail because
* prepare_vmcs02 set VIRTUAL_APIC_PAGE_ADDR to -1ull.
- * Failing the vm entry is _not_ what the processor
- * does but it's basically the only possibility we
- * have. We could still enter the guest if CR8 load
- * exits are enabled, CR8 store exits are enabled, and
- * virtualize APIC access is disabled; in this case
- * the processor would never use the TPR shadow and we
- * could simply clear the bit from the execution
- * control. But such a configuration is useless, so
- * let's keep the code simple.
*/
if (!is_error_page(page)) {
vmx->nested.virtual_apic_page = page;
hpa = page_to_phys(vmx->nested.virtual_apic_page);
vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, hpa);
+ } else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) &&
+ nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) &&
+ !nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
+ /*
+ * The processor will never use the TPR shadow, simply
+ * clear the bit from the execution control. Such a
+ * configuration is useless, but it happens in tests.
+ * For any other configuration, failing the vm entry is
+ * _not_ what the processor does but it's basically the
+ * only possibility we have.
+ */
+ vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
+ CPU_BASED_TPR_SHADOW);
+ } else {
+ printk("bad virtual-APIC page address\n");
+ dump_vmcs();
}
}
@@ -3031,6 +3047,15 @@ int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
kvm_make_request(KVM_REQ_EVENT, vcpu);
/*
+ * Do not start the preemption timer hrtimer until after we know
+ * we are successful, so that only nested_vmx_vmexit needs to cancel
+ * the timer.
+ */
+ vmx->nested.preemption_timer_expired = false;
+ if (nested_cpu_has_preemption_timer(vmcs12))
+ vmx_start_preemption_timer(vcpu);
+
+ /*
* Note no nested_vmx_succeed or nested_vmx_fail here. At this point
* we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
* returned as far as L1 is concerned. It will only return (and set
@@ -3450,13 +3475,10 @@ static void sync_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
else
vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
- if (nested_cpu_has_preemption_timer(vmcs12)) {
- if (vmcs12->vm_exit_controls &
- VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
+ if (nested_cpu_has_preemption_timer(vmcs12) &&
+ vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
vmcs12->vmx_preemption_timer_value =
vmx_get_preemption_timer_value(vcpu);
- hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
- }
/*
* In some cases (usually, nested EPT), L2 is allowed to change its
@@ -3774,8 +3796,18 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
nested_ept_uninit_mmu_context(vcpu);
- vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
- __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
+
+ /*
+ * This is only valid if EPT is in use, otherwise the vmcs01 GUEST_CR3
+ * points to shadow pages! Fortunately we only get here after a WARN_ON
+ * if EPT is disabled, so a VMabort is perfectly fine.
+ */
+ if (enable_ept) {
+ vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
+ __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
+ } else {
+ nested_vmx_abort(vcpu, VMX_ABORT_VMCS_CORRUPTED);
+ }
/*
* Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
@@ -3864,6 +3896,9 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
leave_guest_mode(vcpu);
+ if (nested_cpu_has_preemption_timer(vmcs12))
+ hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
+
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
@@ -3915,9 +3950,6 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
vmx_flush_tlb(vcpu, true);
}
- /* This is needed for same reason as it was needed in prepare_vmcs02 */
- vmx->host_rsp = 0;
-
/* Unpin physical memory we referred to in vmcs02 */
if (vmx->nested.apic_access_page) {
kvm_release_page_dirty(vmx->nested.apic_access_page);
@@ -4035,25 +4067,50 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
/* Addr = segment_base + offset */
/* offset = base + [index * scale] + displacement */
off = exit_qualification; /* holds the displacement */
+ if (addr_size == 1)
+ off = (gva_t)sign_extend64(off, 31);
+ else if (addr_size == 0)
+ off = (gva_t)sign_extend64(off, 15);
if (base_is_valid)
off += kvm_register_read(vcpu, base_reg);
if (index_is_valid)
off += kvm_register_read(vcpu, index_reg)<<scaling;
vmx_get_segment(vcpu, &s, seg_reg);
- *ret = s.base + off;
+ /*
+ * The effective address, i.e. @off, of a memory operand is truncated
+ * based on the address size of the instruction. Note that this is
+ * the *effective address*, i.e. the address prior to accounting for
+ * the segment's base.
+ */
if (addr_size == 1) /* 32 bit */
- *ret &= 0xffffffff;
+ off &= 0xffffffff;
+ else if (addr_size == 0) /* 16 bit */
+ off &= 0xffff;
/* Checks for #GP/#SS exceptions. */
exn = false;
if (is_long_mode(vcpu)) {
+ /*
+ * The virtual/linear address is never truncated in 64-bit
+ * mode, e.g. a 32-bit address size can yield a 64-bit virtual
+ * address when using FS/GS with a non-zero base.
+ */
+ *ret = s.base + off;
+
/* Long mode: #GP(0)/#SS(0) if the memory address is in a
* non-canonical form. This is the only check on the memory
* destination for long mode!
*/
exn = is_noncanonical_address(*ret, vcpu);
- } else if (is_protmode(vcpu)) {
+ } else {
+ /*
+ * When not in long mode, the virtual/linear address is
+ * unconditionally truncated to 32 bits regardless of the
+ * address size.
+ */
+ *ret = (s.base + off) & 0xffffffff;
+
/* Protected mode: apply checks for segment validity in the
* following order:
* - segment type check (#GP(0) may be thrown)
@@ -4077,10 +4134,16 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
/* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
*/
exn = (s.unusable != 0);
- /* Protected mode: #GP(0)/#SS(0) if the memory
- * operand is outside the segment limit.
+
+ /*
+ * Protected mode: #GP(0)/#SS(0) if the memory operand is
+ * outside the segment limit. All CPUs that support VMX ignore
+ * limit checks for flat segments, i.e. segments with base==0,
+ * limit==0xffffffff and of type expand-up data or code.
*/
- exn = exn || (off + sizeof(u64) > s.limit);
+ if (!(s.base == 0 && s.limit == 0xffffffff &&
+ ((s.type & 8) || !(s.type & 4))))
+ exn = exn || (off + sizeof(u64) > s.limit);
}
if (exn) {
kvm_queue_exception_e(vcpu,
@@ -4145,11 +4208,11 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
if (r < 0)
goto out_vmcs02;
- vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
+ vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
if (!vmx->nested.cached_vmcs12)
goto out_cached_vmcs12;
- vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
+ vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
if (!vmx->nested.cached_shadow_vmcs12)
goto out_cached_shadow_vmcs12;
@@ -5360,7 +5423,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
return ret;
/* Empty 'VMXON' state is permitted */
- if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12))
+ if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12))
return 0;
if (kvm_state->vmx.vmcs_pa != -1ull) {
@@ -5404,7 +5467,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
vmcs12->vmcs_link_pointer != -1ull) {
struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
- if (kvm_state->size < sizeof(kvm_state) + 2 * sizeof(*vmcs12))
+ if (kvm_state->size < sizeof(*kvm_state) + 2 * sizeof(*vmcs12))
return -EINVAL;
if (copy_from_user(shadow_vmcs12,
@@ -5692,10 +5755,22 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
{
int i;
+ /*
+ * Without EPT it is not possible to restore L1's CR3 and PDPTR on
+ * VMfail, because they are not available in vmcs01. Just always
+ * use hardware checks.
+ */
+ if (!enable_ept)
+ nested_early_check = 1;
+
if (!cpu_has_vmx_shadow_vmcs())
enable_shadow_vmcs = 0;
if (enable_shadow_vmcs) {
for (i = 0; i < VMX_BITMAP_NR; i++) {
+ /*
+ * The vmx_bitmap is not tied to a VM and so should
+ * not be charged to a memcg.
+ */
vmx_bitmap[i] = (unsigned long *)
__get_free_page(GFP_KERNEL);
if (!vmx_bitmap[i]) {
diff --git a/arch/x86/kvm/vmx/vmcs.h b/arch/x86/kvm/vmx/vmcs.h
index 6def3ba88e3b..cb6079f8a227 100644
--- a/arch/x86/kvm/vmx/vmcs.h
+++ b/arch/x86/kvm/vmx/vmcs.h
@@ -34,6 +34,7 @@ struct vmcs_host_state {
unsigned long cr4; /* May not match real cr4 */
unsigned long gs_base;
unsigned long fs_base;
+ unsigned long rsp;
u16 fs_sel, gs_sel, ldt_sel;
#ifdef CONFIG_X86_64
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index bcef2c7e9bc4..d4cb1945b2e3 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -1,6 +1,31 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <asm/asm.h>
+#include <asm/bitsperlong.h>
+#include <asm/kvm_vcpu_regs.h>
+#include <asm/nospec-branch.h>
+
+#define WORD_SIZE (BITS_PER_LONG / 8)
+
+#define VCPU_RAX __VCPU_REGS_RAX * WORD_SIZE
+#define VCPU_RCX __VCPU_REGS_RCX * WORD_SIZE
+#define VCPU_RDX __VCPU_REGS_RDX * WORD_SIZE
+#define VCPU_RBX __VCPU_REGS_RBX * WORD_SIZE
+/* Intentionally omit RSP as it's context switched by hardware */
+#define VCPU_RBP __VCPU_REGS_RBP * WORD_SIZE
+#define VCPU_RSI __VCPU_REGS_RSI * WORD_SIZE
+#define VCPU_RDI __VCPU_REGS_RDI * WORD_SIZE
+
+#ifdef CONFIG_X86_64
+#define VCPU_R8 __VCPU_REGS_R8 * WORD_SIZE
+#define VCPU_R9 __VCPU_REGS_R9 * WORD_SIZE
+#define VCPU_R10 __VCPU_REGS_R10 * WORD_SIZE
+#define VCPU_R11 __VCPU_REGS_R11 * WORD_SIZE
+#define VCPU_R12 __VCPU_REGS_R12 * WORD_SIZE
+#define VCPU_R13 __VCPU_REGS_R13 * WORD_SIZE
+#define VCPU_R14 __VCPU_REGS_R14 * WORD_SIZE
+#define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE
+#endif
.text
@@ -53,5 +78,159 @@ ENDPROC(vmx_vmenter)
* referred to by VMCS.HOST_RIP.
*/
ENTRY(vmx_vmexit)
+#ifdef CONFIG_RETPOLINE
+ ALTERNATIVE "jmp .Lvmexit_skip_rsb", "", X86_FEATURE_RETPOLINE
+ /* Preserve guest's RAX, it's used to stuff the RSB. */
+ push %_ASM_AX
+
+ /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
+ FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
+
+ pop %_ASM_AX
+.Lvmexit_skip_rsb:
+#endif
ret
ENDPROC(vmx_vmexit)
+
+/**
+ * __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode
+ * @vmx: struct vcpu_vmx *
+ * @regs: unsigned long * (to guest registers)
+ * @launched: %true if the VMCS has been launched
+ *
+ * Returns:
+ * 0 on VM-Exit, 1 on VM-Fail
+ */
+ENTRY(__vmx_vcpu_run)
+ push %_ASM_BP
+ mov %_ASM_SP, %_ASM_BP
+#ifdef CONFIG_X86_64
+ push %r15
+ push %r14
+ push %r13
+ push %r12
+#else
+ push %edi
+ push %esi
+#endif
+ push %_ASM_BX
+
+ /*
+ * Save @regs, _ASM_ARG2 may be modified by vmx_update_host_rsp() and
+ * @regs is needed after VM-Exit to save the guest's register values.
+ */
+ push %_ASM_ARG2
+
+ /* Copy @launched to BL, _ASM_ARG3 is volatile. */
+ mov %_ASM_ARG3B, %bl
+
+ /* Adjust RSP to account for the CALL to vmx_vmenter(). */
+ lea -WORD_SIZE(%_ASM_SP), %_ASM_ARG2
+ call vmx_update_host_rsp
+
+ /* Load @regs to RAX. */
+ mov (%_ASM_SP), %_ASM_AX
+
+ /* Check if vmlaunch or vmresume is needed */
+ cmpb $0, %bl
+
+ /* Load guest registers. Don't clobber flags. */
+ mov VCPU_RBX(%_ASM_AX), %_ASM_BX
+ mov VCPU_RCX(%_ASM_AX), %_ASM_CX
+ mov VCPU_RDX(%_ASM_AX), %_ASM_DX
+ mov VCPU_RSI(%_ASM_AX), %_ASM_SI
+ mov VCPU_RDI(%_ASM_AX), %_ASM_DI
+ mov VCPU_RBP(%_ASM_AX), %_ASM_BP
+#ifdef CONFIG_X86_64
+ mov VCPU_R8 (%_ASM_AX), %r8
+ mov VCPU_R9 (%_ASM_AX), %r9
+ mov VCPU_R10(%_ASM_AX), %r10
+ mov VCPU_R11(%_ASM_AX), %r11
+ mov VCPU_R12(%_ASM_AX), %r12
+ mov VCPU_R13(%_ASM_AX), %r13
+ mov VCPU_R14(%_ASM_AX), %r14
+ mov VCPU_R15(%_ASM_AX), %r15
+#endif
+ /* Load guest RAX. This kills the vmx_vcpu pointer! */
+ mov VCPU_RAX(%_ASM_AX), %_ASM_AX
+
+ /* Enter guest mode */
+ call vmx_vmenter
+
+ /* Jump on VM-Fail. */
+ jbe 2f
+
+ /* Temporarily save guest's RAX. */
+ push %_ASM_AX
+
+ /* Reload @regs to RAX. */
+ mov WORD_SIZE(%_ASM_SP), %_ASM_AX
+
+ /* Save all guest registers, including RAX from the stack */
+ __ASM_SIZE(pop) VCPU_RAX(%_ASM_AX)
+ mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
+ mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
+ mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
+ mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
+ mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
+ mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
+#ifdef CONFIG_X86_64
+ mov %r8, VCPU_R8 (%_ASM_AX)
+ mov %r9, VCPU_R9 (%_ASM_AX)
+ mov %r10, VCPU_R10(%_ASM_AX)
+ mov %r11, VCPU_R11(%_ASM_AX)
+ mov %r12, VCPU_R12(%_ASM_AX)
+ mov %r13, VCPU_R13(%_ASM_AX)
+ mov %r14, VCPU_R14(%_ASM_AX)
+ mov %r15, VCPU_R15(%_ASM_AX)
+#endif
+
+ /* Clear RAX to indicate VM-Exit (as opposed to VM-Fail). */
+ xor %eax, %eax
+
+ /*
+ * Clear all general purpose registers except RSP and RAX to prevent
+ * speculative use of the guest's values, even those that are reloaded
+ * via the stack. In theory, an L1 cache miss when restoring registers
+ * could lead to speculative execution with the guest's values.
+ * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
+ * free. RSP and RAX are exempt as RSP is restored by hardware during
+ * VM-Exit and RAX is explicitly loaded with 0 or 1 to return VM-Fail.
+ */
+1: xor %ebx, %ebx
+ xor %ecx, %ecx
+ xor %edx, %edx
+ xor %esi, %esi
+ xor %edi, %edi
+ xor %ebp, %ebp
+#ifdef CONFIG_X86_64
+ xor %r8d, %r8d
+ xor %r9d, %r9d
+ xor %r10d, %r10d
+ xor %r11d, %r11d
+ xor %r12d, %r12d
+ xor %r13d, %r13d
+ xor %r14d, %r14d
+ xor %r15d, %r15d
+#endif
+
+ /* "POP" @regs. */
+ add $WORD_SIZE, %_ASM_SP
+ pop %_ASM_BX
+
+#ifdef CONFIG_X86_64
+ pop %r12
+ pop %r13
+ pop %r14
+ pop %r15
+#else
+ pop %esi
+ pop %edi
+#endif
+ pop %_ASM_BP
+ ret
+
+ /* VM-Fail. Out-of-line to avoid a taken Jcc after VM-Exit. */
+2: mov $1, %eax
+ jmp 1b
+ENDPROC(__vmx_vcpu_run)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 30a6bcd735ec..0c955bb286ff 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -246,6 +246,10 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
!boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
+ /*
+ * This allocation for vmx_l1d_flush_pages is not tied to a VM
+ * lifetime and so should not be charged to a memcg.
+ */
page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
if (!page)
return -ENOMEM;
@@ -1679,12 +1683,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = to_vmx(vcpu)->spec_ctrl;
break;
- case MSR_IA32_ARCH_CAPABILITIES:
- if (!msr_info->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
- return 1;
- msr_info->data = to_vmx(vcpu)->arch_capabilities;
- break;
case MSR_IA32_SYSENTER_CS:
msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
break;
@@ -1891,11 +1889,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD,
MSR_TYPE_W);
break;
- case MSR_IA32_ARCH_CAPABILITIES:
- if (!msr_info->host_initiated)
- return 1;
- vmx->arch_capabilities = data;
- break;
case MSR_IA32_CR_PAT:
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
@@ -2387,13 +2380,13 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
return 0;
}
-struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu)
+struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags)
{
int node = cpu_to_node(cpu);
struct page *pages;
struct vmcs *vmcs;
- pages = __alloc_pages_node(node, GFP_KERNEL, vmcs_config.order);
+ pages = __alloc_pages_node(node, flags, vmcs_config.order);
if (!pages)
return NULL;
vmcs = page_address(pages);
@@ -2440,7 +2433,8 @@ int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
loaded_vmcs_init(loaded_vmcs);
if (cpu_has_vmx_msr_bitmap()) {
- loaded_vmcs->msr_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
+ loaded_vmcs->msr_bitmap = (unsigned long *)
+ __get_free_page(GFP_KERNEL_ACCOUNT);
if (!loaded_vmcs->msr_bitmap)
goto out_vmcs;
memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE);
@@ -2481,7 +2475,7 @@ static __init int alloc_kvm_area(void)
for_each_possible_cpu(cpu) {
struct vmcs *vmcs;
- vmcs = alloc_vmcs_cpu(false, cpu);
+ vmcs = alloc_vmcs_cpu(false, cpu, GFP_KERNEL);
if (!vmcs) {
free_kvm_area();
return -ENOMEM;
@@ -4083,8 +4077,6 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
++vmx->nmsrs;
}
- vmx->arch_capabilities = kvm_get_arch_capabilities();
-
vm_exit_controls_init(vmx, vmx_vmexit_ctrl());
/* 22.2.1, 20.8.1 */
@@ -5611,7 +5603,7 @@ static void vmx_dump_dtsel(char *name, uint32_t limit)
vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT));
}
-static void dump_vmcs(void)
+void dump_vmcs(void)
{
u32 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS);
u32 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS);
@@ -6360,150 +6352,15 @@ static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
vmx->loaded_vmcs->hv_timer_armed = false;
}
-static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
+void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
{
- unsigned long evmcs_rsp;
-
- vmx->__launched = vmx->loaded_vmcs->launched;
-
- evmcs_rsp = static_branch_unlikely(&enable_evmcs) ?
- (unsigned long)&current_evmcs->host_rsp : 0;
-
- if (static_branch_unlikely(&vmx_l1d_should_flush))
- vmx_l1d_flush(vcpu);
-
- asm(
- /* Store host registers */
- "push %%" _ASM_DX "; push %%" _ASM_BP ";"
- "push %%" _ASM_CX " \n\t" /* placeholder for guest rcx */
- "push %%" _ASM_CX " \n\t"
- "sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */
- "cmp %%" _ASM_SP ", %c[host_rsp](%%" _ASM_CX ") \n\t"
- "je 1f \n\t"
- "mov %%" _ASM_SP ", %c[host_rsp](%%" _ASM_CX ") \n\t"
- /* Avoid VMWRITE when Enlightened VMCS is in use */
- "test %%" _ASM_SI ", %%" _ASM_SI " \n\t"
- "jz 2f \n\t"
- "mov %%" _ASM_SP ", (%%" _ASM_SI ") \n\t"
- "jmp 1f \n\t"
- "2: \n\t"
- __ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t"
- "1: \n\t"
- "add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */
-
- /* Reload cr2 if changed */
- "mov %c[cr2](%%" _ASM_CX "), %%" _ASM_AX " \n\t"
- "mov %%cr2, %%" _ASM_DX " \n\t"
- "cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t"
- "je 3f \n\t"
- "mov %%" _ASM_AX", %%cr2 \n\t"
- "3: \n\t"
- /* Check if vmlaunch or vmresume is needed */
- "cmpl $0, %c[launched](%%" _ASM_CX ") \n\t"
- /* Load guest registers. Don't clobber flags. */
- "mov %c[rax](%%" _ASM_CX "), %%" _ASM_AX " \n\t"
- "mov %c[rbx](%%" _ASM_CX "), %%" _ASM_BX " \n\t"
- "mov %c[rdx](%%" _ASM_CX "), %%" _ASM_DX " \n\t"
- "mov %c[rsi](%%" _ASM_CX "), %%" _ASM_SI " \n\t"
- "mov %c[rdi](%%" _ASM_CX "), %%" _ASM_DI " \n\t"
- "mov %c[rbp](%%" _ASM_CX "), %%" _ASM_BP " \n\t"
-#ifdef CONFIG_X86_64
- "mov %c[r8](%%" _ASM_CX "), %%r8 \n\t"
- "mov %c[r9](%%" _ASM_CX "), %%r9 \n\t"
- "mov %c[r10](%%" _ASM_CX "), %%r10 \n\t"
- "mov %c[r11](%%" _ASM_CX "), %%r11 \n\t"
- "mov %c[r12](%%" _ASM_CX "), %%r12 \n\t"
- "mov %c[r13](%%" _ASM_CX "), %%r13 \n\t"
- "mov %c[r14](%%" _ASM_CX "), %%r14 \n\t"
- "mov %c[r15](%%" _ASM_CX "), %%r15 \n\t"
-#endif
- /* Load guest RCX. This kills the vmx_vcpu pointer! */
- "mov %c[rcx](%%" _ASM_CX "), %%" _ASM_CX " \n\t"
-
- /* Enter guest mode */
- "call vmx_vmenter\n\t"
-
- /* Save guest's RCX to the stack placeholder (see above) */
- "mov %%" _ASM_CX ", %c[wordsize](%%" _ASM_SP ") \n\t"
-
- /* Load host's RCX, i.e. the vmx_vcpu pointer */
- "pop %%" _ASM_CX " \n\t"
-
- /* Set vmx->fail based on EFLAGS.{CF,ZF} */
- "setbe %c[fail](%%" _ASM_CX ")\n\t"
-
- /* Save all guest registers, including RCX from the stack */
- "mov %%" _ASM_AX ", %c[rax](%%" _ASM_CX ") \n\t"
- "mov %%" _ASM_BX ", %c[rbx](%%" _ASM_CX ") \n\t"
- __ASM_SIZE(pop) " %c[rcx](%%" _ASM_CX ") \n\t"
- "mov %%" _ASM_DX ", %c[rdx](%%" _ASM_CX ") \n\t"
- "mov %%" _ASM_SI ", %c[rsi](%%" _ASM_CX ") \n\t"
- "mov %%" _ASM_DI ", %c[rdi](%%" _ASM_CX ") \n\t"
- "mov %%" _ASM_BP ", %c[rbp](%%" _ASM_CX ") \n\t"
-#ifdef CONFIG_X86_64
- "mov %%r8, %c[r8](%%" _ASM_CX ") \n\t"
- "mov %%r9, %c[r9](%%" _ASM_CX ") \n\t"
- "mov %%r10, %c[r10](%%" _ASM_CX ") \n\t"
- "mov %%r11, %c[r11](%%" _ASM_CX ") \n\t"
- "mov %%r12, %c[r12](%%" _ASM_CX ") \n\t"
- "mov %%r13, %c[r13](%%" _ASM_CX ") \n\t"
- "mov %%r14, %c[r14](%%" _ASM_CX ") \n\t"
- "mov %%r15, %c[r15](%%" _ASM_CX ") \n\t"
- /*
- * Clear host registers marked as clobbered to prevent
- * speculative use.
- */
- "xor %%r8d, %%r8d \n\t"
- "xor %%r9d, %%r9d \n\t"
- "xor %%r10d, %%r10d \n\t"
- "xor %%r11d, %%r11d \n\t"
- "xor %%r12d, %%r12d \n\t"
- "xor %%r13d, %%r13d \n\t"
- "xor %%r14d, %%r14d \n\t"
- "xor %%r15d, %%r15d \n\t"
-#endif
- "mov %%cr2, %%" _ASM_AX " \n\t"
- "mov %%" _ASM_AX ", %c[cr2](%%" _ASM_CX ") \n\t"
-
- "xor %%eax, %%eax \n\t"
- "xor %%ebx, %%ebx \n\t"
- "xor %%esi, %%esi \n\t"
- "xor %%edi, %%edi \n\t"
- "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t"
- : ASM_CALL_CONSTRAINT
- : "c"(vmx), "d"((unsigned long)HOST_RSP), "S"(evmcs_rsp),
- [launched]"i"(offsetof(struct vcpu_vmx, __launched)),
- [fail]"i"(offsetof(struct vcpu_vmx, fail)),
- [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
- [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
- [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
- [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),
- [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])),
- [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])),
- [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])),
- [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])),
-#ifdef CONFIG_X86_64
- [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])),
- [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])),
- [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])),
- [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])),
- [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])),
- [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])),
- [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
- [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
-#endif
- [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
- [wordsize]"i"(sizeof(ulong))
- : "cc", "memory"
-#ifdef CONFIG_X86_64
- , "rax", "rbx", "rdi"
- , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
-#else
- , "eax", "ebx", "edi"
-#endif
- );
+ if (unlikely(host_rsp != vmx->loaded_vmcs->host_state.rsp)) {
+ vmx->loaded_vmcs->host_state.rsp = host_rsp;
+ vmcs_writel(HOST_RSP, host_rsp);
+ }
}
-STACK_FRAME_NON_STANDARD(__vmx_vcpu_run);
+
+bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
{
@@ -6553,6 +6410,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
vmx_set_interrupt_shadow(vcpu, 0);
+ kvm_load_guest_xcr0(vcpu);
+
if (static_cpu_has(X86_FEATURE_PKU) &&
kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
vcpu->arch.pkru != vmx->host_pkru)
@@ -6572,7 +6431,16 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
*/
x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
- __vmx_vcpu_run(vcpu, vmx);
+ if (static_branch_unlikely(&vmx_l1d_should_flush))
+ vmx_l1d_flush(vcpu);
+
+ if (vcpu->arch.cr2 != read_cr2())
+ write_cr2(vcpu->arch.cr2);
+
+ vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
+ vmx->loaded_vmcs->launched);
+
+ vcpu->arch.cr2 = read_cr2();
/*
* We do not use IBRS in the kernel. If this vCPU has used the
@@ -6594,9 +6462,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
- /* Eliminate branch target predictions from guest mode */
- vmexit_fill_RSB();
-
/* All fields are clean at this point */
if (static_branch_unlikely(&enable_evmcs))
current_evmcs->hv_clean_fields |=
@@ -6640,6 +6505,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
__write_pkru(vmx->host_pkru);
}
+ kvm_put_guest_xcr0(vcpu);
+
vmx->nested.nested_run_pending = 0;
vmx->idt_vectoring_info = 0;
@@ -6657,7 +6524,9 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
static struct kvm *vmx_vm_alloc(void)
{
- struct kvm_vmx *kvm_vmx = vzalloc(sizeof(struct kvm_vmx));
+ struct kvm_vmx *kvm_vmx = __vmalloc(sizeof(struct kvm_vmx),
+ GFP_KERNEL_ACCOUNT | __GFP_ZERO,
+ PAGE_KERNEL);
return &kvm_vmx->kvm;
}
@@ -6673,7 +6542,6 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
if (enable_pml)
vmx_destroy_pml_buffer(vmx);
free_vpid(vmx->vpid);
- leave_guest_mode(vcpu);
nested_vmx_free_vcpu(vcpu);
free_loaded_vmcs(vmx->loaded_vmcs);
kfree(vmx->guest_msrs);
@@ -6685,14 +6553,16 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
{
int err;
- struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+ struct vcpu_vmx *vmx;
unsigned long *msr_bitmap;
int cpu;
+ vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
if (!vmx)
return ERR_PTR(-ENOMEM);
- vmx->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, GFP_KERNEL);
+ vmx->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache,
+ GFP_KERNEL_ACCOUNT);
if (!vmx->vcpu.arch.guest_fpu) {
printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n");
err = -ENOMEM;
@@ -6714,12 +6584,12 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
* for the guest, etc.
*/
if (enable_pml) {
- vmx->pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ vmx->pml_pg = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
if (!vmx->pml_pg)
goto uninit_vcpu;
}
- vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT);
BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0])
> PAGE_SIZE);
@@ -6983,6 +6853,30 @@ static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
}
}
+static bool guest_cpuid_has_pmu(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *entry;
+ union cpuid10_eax eax;
+
+ entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
+ if (!entry)
+ return false;
+
+ eax.full = entry->eax;
+ return (eax.split.version_id > 0);
+}
+
+static void nested_vmx_procbased_ctls_update(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ bool pmu_enabled = guest_cpuid_has_pmu(vcpu);
+
+ if (pmu_enabled)
+ vmx->nested.msrs.procbased_ctls_high |= CPU_BASED_RDPMC_EXITING;
+ else
+ vmx->nested.msrs.procbased_ctls_high &= ~CPU_BASED_RDPMC_EXITING;
+}
+
static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -7071,6 +6965,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
if (nested_vmx_allowed(vcpu)) {
nested_vmx_cr_fixed1_bits_update(vcpu);
nested_vmx_entry_exit_ctls_update(vcpu);
+ nested_vmx_procbased_ctls_update(vcpu);
}
if (boot_cpu_has(X86_FEATURE_INTEL_PT) &&
@@ -7134,6 +7029,7 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
{
struct vcpu_vmx *vmx;
u64 tscl, guest_tscl, delta_tsc, lapic_timer_advance_cycles;
+ struct kvm_timer *ktimer = &vcpu->arch.apic->lapic_timer;
if (kvm_mwait_in_guest(vcpu->kvm))
return -EOPNOTSUPP;
@@ -7142,7 +7038,8 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
tscl = rdtsc();
guest_tscl = kvm_read_l1_tsc(vcpu, tscl);
delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl;
- lapic_timer_advance_cycles = nsec_to_cycles(vcpu, lapic_timer_advance_ns);
+ lapic_timer_advance_cycles = nsec_to_cycles(vcpu,
+ ktimer->timer_advance_ns);
if (delta_tsc > lapic_timer_advance_cycles)
delta_tsc -= lapic_timer_advance_cycles;
@@ -7500,7 +7397,7 @@ static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
return 0;
}
-static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
+static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int ret;
@@ -7511,9 +7408,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
}
if (vmx->nested.smm.guest_mode) {
- vcpu->arch.hflags &= ~HF_SMM_MASK;
ret = nested_vmx_enter_non_root_mode(vcpu, false);
- vcpu->arch.hflags |= HF_SMM_MASK;
if (ret)
return ret;
@@ -7527,6 +7422,11 @@ static int enable_smi_window(struct kvm_vcpu *vcpu)
return 0;
}
+static bool vmx_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
static __init int hardware_setup(void)
{
unsigned long host_bndcfgs;
@@ -7829,6 +7729,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.set_nested_state = NULL,
.get_vmcs12_pages = NULL,
.nested_enable_evmcs = NULL,
+ .need_emulation_on_page_fault = vmx_need_emulation_on_page_fault,
};
static void vmx_cleanup_l1d_flush(void)
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 0ac0a64c7790..f879529906b4 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -175,7 +175,6 @@ struct nested_vmx {
struct vcpu_vmx {
struct kvm_vcpu vcpu;
- unsigned long host_rsp;
u8 fail;
u8 msr_bitmap_mode;
u32 exit_intr_info;
@@ -191,7 +190,6 @@ struct vcpu_vmx {
u64 msr_guest_kernel_gs_base;
#endif
- u64 arch_capabilities;
u64 spec_ctrl;
u32 vm_entry_controls_shadow;
@@ -209,7 +207,7 @@ struct vcpu_vmx {
struct loaded_vmcs vmcs01;
struct loaded_vmcs *loaded_vmcs;
struct loaded_vmcs *loaded_cpu_state;
- bool __launched; /* temporary, used in vmx_vcpu_run */
+
struct msr_autoload {
struct vmx_msrs guest;
struct vmx_msrs host;
@@ -339,8 +337,8 @@ static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
static inline void pi_set_sn(struct pi_desc *pi_desc)
{
- return set_bit(POSTED_INTR_SN,
- (unsigned long *)&pi_desc->control);
+ set_bit(POSTED_INTR_SN,
+ (unsigned long *)&pi_desc->control);
}
static inline void pi_set_on(struct pi_desc *pi_desc)
@@ -445,7 +443,8 @@ static inline u32 vmx_vmentry_ctrl(void)
{
u32 vmentry_ctrl = vmcs_config.vmentry_ctrl;
if (pt_mode == PT_MODE_SYSTEM)
- vmentry_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP | VM_EXIT_CLEAR_IA32_RTIT_CTL);
+ vmentry_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP |
+ VM_ENTRY_LOAD_IA32_RTIT_CTL);
/* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
return vmentry_ctrl &
~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VM_ENTRY_LOAD_IA32_EFER);
@@ -455,9 +454,10 @@ static inline u32 vmx_vmexit_ctrl(void)
{
u32 vmexit_ctrl = vmcs_config.vmexit_ctrl;
if (pt_mode == PT_MODE_SYSTEM)
- vmexit_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP | VM_ENTRY_LOAD_IA32_RTIT_CTL);
+ vmexit_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP |
+ VM_EXIT_CLEAR_IA32_RTIT_CTL);
/* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
- return vmcs_config.vmexit_ctrl &
+ return vmexit_ctrl &
~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER);
}
@@ -478,7 +478,7 @@ static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
return &(to_vmx(vcpu)->pi_desc);
}
-struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu);
+struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
void free_vmcs(struct vmcs *vmcs);
int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
@@ -487,7 +487,8 @@ void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs);
static inline struct vmcs *alloc_vmcs(bool shadow)
{
- return alloc_vmcs_cpu(shadow, raw_smp_processor_id());
+ return alloc_vmcs_cpu(shadow, raw_smp_processor_id(),
+ GFP_KERNEL_ACCOUNT);
}
u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
@@ -516,4 +517,6 @@ static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx)
vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
}
+void dump_vmcs(void);
+
#endif /* __KVM_X86_VMX_H */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 941f932373d0..b5edc8e3ce1d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -136,10 +136,14 @@ EXPORT_SYMBOL_GPL(kvm_default_tsc_scaling_ratio);
static u32 __read_mostly tsc_tolerance_ppm = 250;
module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
-/* lapic timer advance (tscdeadline mode only) in nanoseconds */
-unsigned int __read_mostly lapic_timer_advance_ns = 1000;
+/*
+ * lapic timer advance (tscdeadline mode only) in nanoseconds. '-1' enables
+ * adaptive tuning starting from default advancment of 1000ns. '0' disables
+ * advancement entirely. Any other value is used as-is and disables adaptive
+ * tuning, i.e. allows priveleged userspace to set an exact advancement time.
+ */
+static int __read_mostly lapic_timer_advance_ns = -1;
module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR);
-EXPORT_SYMBOL_GPL(lapic_timer_advance_ns);
static bool __read_mostly vector_hashing = true;
module_param(vector_hashing, bool, S_IRUGO);
@@ -800,7 +804,7 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
}
EXPORT_SYMBOL_GPL(kvm_lmsw);
-static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
+void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
{
if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
!vcpu->guest_xcr0_loaded) {
@@ -810,8 +814,9 @@ static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
vcpu->guest_xcr0_loaded = 1;
}
}
+EXPORT_SYMBOL_GPL(kvm_load_guest_xcr0);
-static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
+void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
{
if (vcpu->guest_xcr0_loaded) {
if (vcpu->arch.xcr0 != host_xcr0)
@@ -819,6 +824,7 @@ static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
vcpu->guest_xcr0_loaded = 0;
}
}
+EXPORT_SYMBOL_GPL(kvm_put_guest_xcr0);
static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
{
@@ -1125,7 +1131,7 @@ static u32 msrs_to_save[] = {
#endif
MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
- MSR_IA32_SPEC_CTRL, MSR_IA32_ARCH_CAPABILITIES,
+ MSR_IA32_SPEC_CTRL,
MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH,
MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK,
MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B,
@@ -1158,6 +1164,7 @@ static u32 emulated_msrs[] = {
MSR_IA32_TSC_ADJUST,
MSR_IA32_TSCDEADLINE,
+ MSR_IA32_ARCH_CAPABILITIES,
MSR_IA32_MISC_ENABLE,
MSR_IA32_MCG_STATUS,
MSR_IA32_MCG_CTL,
@@ -2443,6 +2450,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (msr_info->host_initiated)
vcpu->arch.microcode_version = data;
break;
+ case MSR_IA32_ARCH_CAPABILITIES:
+ if (!msr_info->host_initiated)
+ return 1;
+ vcpu->arch.arch_capabilities = data;
+ break;
case MSR_EFER:
return set_efer(vcpu, data);
case MSR_K7_HWCR:
@@ -2747,6 +2759,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_UCODE_REV:
msr_info->data = vcpu->arch.microcode_version;
break;
+ case MSR_IA32_ARCH_CAPABILITIES:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
+ return 1;
+ msr_info->data = vcpu->arch.arch_capabilities;
+ break;
case MSR_IA32_TSC:
msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset;
break;
@@ -3081,7 +3099,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
break;
case KVM_CAP_NESTED_STATE:
r = kvm_x86_ops->get_nested_state ?
- kvm_x86_ops->get_nested_state(NULL, 0, 0) : 0;
+ kvm_x86_ops->get_nested_state(NULL, NULL, 0) : 0;
break;
default:
break;
@@ -3516,7 +3534,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
memset(&events->reserved, 0, sizeof(events->reserved));
}
-static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags);
+static void kvm_smm_changed(struct kvm_vcpu *vcpu);
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
@@ -3576,12 +3594,13 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
vcpu->arch.apic->sipi_vector = events->sipi_vector;
if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
- u32 hflags = vcpu->arch.hflags;
- if (events->smi.smm)
- hflags |= HF_SMM_MASK;
- else
- hflags &= ~HF_SMM_MASK;
- kvm_set_hflags(vcpu, hflags);
+ if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) {
+ if (events->smi.smm)
+ vcpu->arch.hflags |= HF_SMM_MASK;
+ else
+ vcpu->arch.hflags &= ~HF_SMM_MASK;
+ kvm_smm_changed(vcpu);
+ }
vcpu->arch.smi_pending = events->smi.pending;
@@ -3879,7 +3898,8 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = -EINVAL;
if (!lapic_in_kernel(vcpu))
goto out;
- u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
+ u.lapic = kzalloc(sizeof(struct kvm_lapic_state),
+ GFP_KERNEL_ACCOUNT);
r = -ENOMEM;
if (!u.lapic)
@@ -4066,7 +4086,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
break;
}
case KVM_GET_XSAVE: {
- u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
+ u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL_ACCOUNT);
r = -ENOMEM;
if (!u.xsave)
break;
@@ -4090,7 +4110,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
break;
}
case KVM_GET_XCRS: {
- u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
+ u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL_ACCOUNT);
r = -ENOMEM;
if (!u.xcrs)
break;
@@ -4257,7 +4277,7 @@ static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
}
static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
- u32 kvm_nr_mmu_pages)
+ unsigned long kvm_nr_mmu_pages)
{
if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
return -EINVAL;
@@ -4271,7 +4291,7 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
return 0;
}
-static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
+static unsigned long kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
{
return kvm->arch.n_max_mmu_pages;
}
@@ -5945,12 +5965,18 @@ static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
{
- kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags);
+ emul_to_vcpu(ctxt)->arch.hflags = emul_flags;
}
-static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt, u64 smbase)
+static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt,
+ const char *smstate)
{
- return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smbase);
+ return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smstate);
+}
+
+static void emulator_post_leave_smm(struct x86_emulate_ctxt *ctxt)
+{
+ kvm_smm_changed(emul_to_vcpu(ctxt));
}
static const struct x86_emulate_ops emulate_ops = {
@@ -5993,6 +6019,7 @@ static const struct x86_emulate_ops emulate_ops = {
.get_hflags = emulator_get_hflags,
.set_hflags = emulator_set_hflags,
.pre_leave_smm = emulator_pre_leave_smm,
+ .post_leave_smm = emulator_post_leave_smm,
};
static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
@@ -6234,16 +6261,6 @@ static void kvm_smm_changed(struct kvm_vcpu *vcpu)
kvm_mmu_reset_context(vcpu);
}
-static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags)
-{
- unsigned changed = vcpu->arch.hflags ^ emul_flags;
-
- vcpu->arch.hflags = emul_flags;
-
- if (changed & HF_SMM_MASK)
- kvm_smm_changed(vcpu);
-}
-
static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
unsigned long *db)
{
@@ -6522,15 +6539,45 @@ int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
}
EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer);
+static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.pio.count = 0;
+ return 1;
+}
+
+static int complete_fast_pio_out(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.pio.count = 0;
+
+ if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip)))
+ return 1;
+
+ return kvm_skip_emulated_instruction(vcpu);
+}
+
static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
unsigned short port)
{
unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
size, port, &val, 1);
- /* do not return to emulator after return from userspace */
- vcpu->arch.pio.count = 0;
- return ret;
+ if (ret)
+ return ret;
+
+ /*
+ * Workaround userspace that relies on old KVM behavior of %rip being
+ * incremented prior to exiting to userspace to handle "OUT 0x7e".
+ */
+ if (port == 0x7e &&
+ kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) {
+ vcpu->arch.complete_userspace_io =
+ complete_fast_pio_out_port_0x7e;
+ kvm_skip_emulated_instruction(vcpu);
+ } else {
+ vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
+ vcpu->arch.complete_userspace_io = complete_fast_pio_out;
+ }
+ return 0;
}
static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
@@ -6540,6 +6587,11 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
/* We should only ever be called with arch.pio.count equal to 1 */
BUG_ON(vcpu->arch.pio.count != 1);
+ if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) {
+ vcpu->arch.pio.count = 0;
+ return 1;
+ }
+
/* For size less than 4 we merge, else we zero extend */
val = (vcpu->arch.pio.size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX)
: 0;
@@ -6552,7 +6604,7 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
vcpu->arch.pio.port, &val, 1);
kvm_register_write(vcpu, VCPU_REGS_RAX, val);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
@@ -6571,6 +6623,7 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
return ret;
}
+ vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
vcpu->arch.complete_userspace_io = complete_fast_pio_in;
return 0;
@@ -6578,16 +6631,13 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in)
{
- int ret = kvm_skip_emulated_instruction(vcpu);
+ int ret;
- /*
- * TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered
- * KVM_EXIT_DEBUG here.
- */
if (in)
- return kvm_fast_pio_in(vcpu, size, port) && ret;
+ ret = kvm_fast_pio_in(vcpu, size, port);
else
- return kvm_fast_pio_out(vcpu, size, port) && ret;
+ ret = kvm_fast_pio_out(vcpu, size, port);
+ return ret && kvm_skip_emulated_instruction(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_fast_pio);
@@ -7055,6 +7105,13 @@ static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid)
void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu)
{
+ if (!lapic_in_kernel(vcpu)) {
+ WARN_ON_ONCE(vcpu->arch.apicv_active);
+ return;
+ }
+ if (!vcpu->arch.apicv_active)
+ return;
+
vcpu->arch.apicv_active = false;
kvm_x86_ops->refresh_apicv_exec_ctrl(vcpu);
}
@@ -7405,9 +7462,9 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase);
}
+#ifdef CONFIG_X86_64
static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
{
-#ifdef CONFIG_X86_64
struct desc_ptr dt;
struct kvm_segment seg;
unsigned long val;
@@ -7457,10 +7514,8 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
for (i = 0; i < 6; i++)
enter_smm_save_seg_64(vcpu, buf, i);
-#else
- WARN_ON_ONCE(1);
-#endif
}
+#endif
static void enter_smm(struct kvm_vcpu *vcpu)
{
@@ -7471,9 +7526,11 @@ static void enter_smm(struct kvm_vcpu *vcpu)
trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
memset(buf, 0, 512);
+#ifdef CONFIG_X86_64
if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
enter_smm_save_state_64(vcpu, buf);
else
+#endif
enter_smm_save_state_32(vcpu, buf);
/*
@@ -7531,8 +7588,10 @@ static void enter_smm(struct kvm_vcpu *vcpu)
kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);
+#ifdef CONFIG_X86_64
if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
kvm_x86_ops->set_efer(vcpu, 0);
+#endif
kvm_update_cpuid(vcpu);
kvm_mmu_reset_context(vcpu);
@@ -7829,15 +7888,14 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
goto cancel_injection;
}
- kvm_load_guest_xcr0(vcpu);
-
if (req_immediate_exit) {
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_x86_ops->request_immediate_exit(vcpu);
}
trace_kvm_entry(vcpu->vcpu_id);
- if (lapic_timer_advance_ns)
+ if (lapic_in_kernel(vcpu) &&
+ vcpu->arch.apic->lapic_timer.timer_advance_ns)
wait_lapic_expire(vcpu);
guest_enter_irqoff();
@@ -7883,8 +7941,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();
- kvm_put_guest_xcr0(vcpu);
-
kvm_before_interrupt(vcpu);
kvm_x86_ops->handle_external_intr(vcpu);
kvm_after_interrupt(vcpu);
@@ -8725,6 +8781,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
+ vcpu->arch.arch_capabilities = kvm_get_arch_capabilities();
vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
kvm_vcpu_mtrr_init(vcpu);
vcpu_load(vcpu);
@@ -9005,7 +9062,6 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
struct page *page;
int r;
- vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(vcpu);
vcpu->arch.emulate_ctxt.ops = &emulate_ops;
if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
@@ -9026,21 +9082,23 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
goto fail_free_pio_data;
if (irqchip_in_kernel(vcpu->kvm)) {
- r = kvm_create_lapic(vcpu);
+ vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(vcpu);
+ r = kvm_create_lapic(vcpu, lapic_timer_advance_ns);
if (r < 0)
goto fail_mmu_destroy;
} else
static_key_slow_inc(&kvm_no_apic_vcpu);
vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
- GFP_KERNEL);
+ GFP_KERNEL_ACCOUNT);
if (!vcpu->arch.mce_banks) {
r = -ENOMEM;
goto fail_free_lapic;
}
vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
- if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) {
+ if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask,
+ GFP_KERNEL_ACCOUNT)) {
r = -ENOMEM;
goto fail_free_mce_banks;
}
@@ -9104,7 +9162,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
- INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
atomic_set(&kvm->arch.noncoherent_dma_count, 0);
@@ -9299,13 +9356,13 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
slot->arch.rmap[i] =
kvcalloc(lpages, sizeof(*slot->arch.rmap[i]),
- GFP_KERNEL);
+ GFP_KERNEL_ACCOUNT);
if (!slot->arch.rmap[i])
goto out_free;
if (i == 0)
continue;
- linfo = kvcalloc(lpages, sizeof(*linfo), GFP_KERNEL);
+ linfo = kvcalloc(lpages, sizeof(*linfo), GFP_KERNEL_ACCOUNT);
if (!linfo)
goto out_free;
@@ -9348,13 +9405,13 @@ out_free:
return -ENOMEM;
}
-void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
+void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
{
/*
* memslots->generation has been incremented.
* mmio generation may have reached its maximum value.
*/
- kvm_mmu_invalidate_mmio_sptes(kvm, slots);
+ kvm_mmu_invalidate_mmio_sptes(kvm, gen);
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
@@ -9421,13 +9478,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
- int nr_mmu_pages = 0;
-
if (!kvm->arch.n_requested_mmu_pages)
- nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
-
- if (nr_mmu_pages)
- kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
+ kvm_mmu_change_mmu_pages(kvm,
+ kvm_mmu_calculate_default_mmu_pages(kvm));
/*
* Dirty logging tracks sptes in 4k granularity, meaning that large
@@ -9462,7 +9515,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
void kvm_arch_flush_shadow_all(struct kvm *kvm)
{
- kvm_mmu_invalidate_zap_all_pages(kvm);
+ kvm_mmu_zap_all(kvm);
}
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 224cd0a47568..534d3f28bb01 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -181,6 +181,11 @@ static inline bool emul_is_noncanonical_address(u64 la,
static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
gva_t gva, gfn_t gfn, unsigned access)
{
+ u64 gen = kvm_memslots(vcpu->kvm)->generation;
+
+ if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
+ return;
+
/*
* If this is a shadow nested page table, the "GVA" is
* actually a nGPA.
@@ -188,7 +193,7 @@ static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK;
vcpu->arch.access = access;
vcpu->arch.mmio_gfn = gfn;
- vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation;
+ vcpu->arch.mmio_gen = gen;
}
static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
@@ -289,8 +294,6 @@ extern u64 kvm_supported_xcr0(void);
extern unsigned int min_timer_period_us;
-extern unsigned int lapic_timer_advance_ns;
-
extern bool enable_vmware_backdoor;
extern struct static_key kvm_no_apic_vcpu;
@@ -342,4 +345,6 @@ static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
__this_cpu_write(current_vcpu, NULL);
}
+void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu);
+void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu);
#endif
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 140e61843a07..5246db42de45 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -6,6 +6,18 @@
# Produces uninteresting flaky coverage.
KCOV_INSTRUMENT_delay.o := n
+# Early boot use of cmdline; don't instrument it
+ifdef CONFIG_AMD_MEM_ENCRYPT
+KCOV_INSTRUMENT_cmdline.o := n
+KASAN_SANITIZE_cmdline.o := n
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_cmdline.o = -pg
+endif
+
+CFLAGS_cmdline.o := $(call cc-option, -fno-stack-protector)
+endif
+
inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk
inat_tables_maps = $(srctree)/arch/x86/lib/x86-opcode-map.txt
quiet_cmd_inat_tables = GEN $@
@@ -23,7 +35,6 @@ obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
lib-y := delay.o misc.o cmdline.o cpu.o
lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o
lib-y += memcpy_$(BITS).o
-lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o
lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index db4e5aa0858b..b2f1822084ae 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -16,6 +16,30 @@
#include <asm/smap.h>
#include <asm/export.h>
+.macro ALIGN_DESTINATION
+ /* check for bad alignment of destination */
+ movl %edi,%ecx
+ andl $7,%ecx
+ jz 102f /* already aligned */
+ subl $8,%ecx
+ negl %ecx
+ subl %ecx,%edx
+100: movb (%rsi),%al
+101: movb %al,(%rdi)
+ incq %rsi
+ incq %rdi
+ decl %ecx
+ jnz 100b
+102:
+ .section .fixup,"ax"
+103: addl %ecx,%edx /* ecx is zerorest also */
+ jmp copy_user_handle_tail
+ .previous
+
+ _ASM_EXTABLE_UA(100b, 103b)
+ _ASM_EXTABLE_UA(101b, 103b)
+ .endm
+
/*
* copy_user_generic_unrolled - memory copy with exception handling.
* This version is for CPUs like P4 that don't have efficient micro
@@ -194,6 +218,30 @@ ENDPROC(copy_user_enhanced_fast_string)
EXPORT_SYMBOL(copy_user_enhanced_fast_string)
/*
+ * Try to copy last bytes and clear the rest if needed.
+ * Since protection fault in copy_from/to_user is not a normal situation,
+ * it is not necessary to optimize tail handling.
+ *
+ * Input:
+ * rdi destination
+ * rsi source
+ * rdx count
+ *
+ * Output:
+ * eax uncopied bytes or 0 if successful.
+ */
+ALIGN;
+copy_user_handle_tail:
+ movl %edx,%ecx
+1: rep movsb
+2: mov %ecx,%eax
+ ASM_CLAC
+ ret
+
+ _ASM_EXTABLE_UA(1b, 2b)
+ENDPROC(copy_user_handle_tail)
+
+/*
* copy_user_nocache - Uncached memory copy with exception handling
* This will force destination out of cache for more performance.
*
diff --git a/arch/x86/lib/csum-partial_64.c b/arch/x86/lib/csum-partial_64.c
index 9baca3e054be..e7925d668b68 100644
--- a/arch/x86/lib/csum-partial_64.c
+++ b/arch/x86/lib/csum-partial_64.c
@@ -94,7 +94,7 @@ static unsigned do_csum(const unsigned char *buff, unsigned len)
: "m" (*(unsigned long *)buff),
"r" (zero), "0" (result));
--count;
- buff += 8;
+ buff += 8;
}
result = add32_with_carry(result>>32,
result&0xffffffff);
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index f5b7f1b3b6d7..b7375dc6898f 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -162,7 +162,7 @@ void __delay(unsigned long loops)
}
EXPORT_SYMBOL(__delay);
-void __const_udelay(unsigned long xloops)
+noinline void __const_udelay(unsigned long xloops)
{
unsigned long lpj = this_cpu_read(cpu_info.loops_per_jiffy) ? : loops_per_jiffy;
int d0;
diff --git a/arch/x86/lib/error-inject.c b/arch/x86/lib/error-inject.c
index 3cdf06128d13..be5b5fb1598b 100644
--- a/arch/x86/lib/error-inject.c
+++ b/arch/x86/lib/error-inject.c
@@ -6,6 +6,7 @@
asmlinkage void just_return_func(void);
asm(
+ ".text\n"
".type just_return_func, @function\n"
".globl just_return_func\n"
"just_return_func:\n"
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 3b24dc05251c..9d05572370ed 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -257,6 +257,7 @@ ENTRY(__memcpy_mcsafe)
/* Copy successful. Return zero */
.L_done_memcpy_trap:
xorl %eax, %eax
+.L_done:
ret
ENDPROC(__memcpy_mcsafe)
EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
@@ -273,7 +274,7 @@ EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
addl %edx, %ecx
.E_trailing_bytes:
mov %ecx, %eax
- ret
+ jmp .L_done
/*
* For write fault handling, given the destination is unaligned,
diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
deleted file mode 100644
index dc2ab6ea6768..000000000000
--- a/arch/x86/lib/rwsem.S
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * x86 semaphore implementation.
- *
- * (C) Copyright 1999 Linus Torvalds
- *
- * Portions Copyright 1999 Red Hat, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
- */
-
-#include <linux/linkage.h>
-#include <asm/alternative-asm.h>
-#include <asm/frame.h>
-
-#define __ASM_HALF_REG(reg) __ASM_SEL(reg, e##reg)
-#define __ASM_HALF_SIZE(inst) __ASM_SEL(inst##w, inst##l)
-
-#ifdef CONFIG_X86_32
-
-/*
- * The semaphore operations have a special calling sequence that
- * allow us to do a simpler in-line version of them. These routines
- * need to convert that sequence back into the C sequence when
- * there is contention on the semaphore.
- *
- * %eax contains the semaphore pointer on entry. Save the C-clobbered
- * registers (%eax, %edx and %ecx) except %eax which is either a return
- * value or just gets clobbered. Same is true for %edx so make sure GCC
- * reloads it after the slow path, by making it hold a temporary, for
- * example see ____down_write().
- */
-
-#define save_common_regs \
- pushl %ecx
-
-#define restore_common_regs \
- popl %ecx
-
- /* Avoid uglifying the argument copying x86-64 needs to do. */
- .macro movq src, dst
- .endm
-
-#else
-
-/*
- * x86-64 rwsem wrappers
- *
- * This interfaces the inline asm code to the slow-path
- * C routines. We need to save the call-clobbered regs
- * that the asm does not mark as clobbered, and move the
- * argument from %rax to %rdi.
- *
- * NOTE! We don't need to save %rax, because the functions
- * will always return the semaphore pointer in %rax (which
- * is also the input argument to these helpers)
- *
- * The following can clobber %rdx because the asm clobbers it:
- * call_rwsem_down_write_failed
- * call_rwsem_wake
- * but %rdi, %rsi, %rcx, %r8-r11 always need saving.
- */
-
-#define save_common_regs \
- pushq %rdi; \
- pushq %rsi; \
- pushq %rcx; \
- pushq %r8; \
- pushq %r9; \
- pushq %r10; \
- pushq %r11
-
-#define restore_common_regs \
- popq %r11; \
- popq %r10; \
- popq %r9; \
- popq %r8; \
- popq %rcx; \
- popq %rsi; \
- popq %rdi
-
-#endif
-
-/* Fix up special calling conventions */
-ENTRY(call_rwsem_down_read_failed)
- FRAME_BEGIN
- save_common_regs
- __ASM_SIZE(push,) %__ASM_REG(dx)
- movq %rax,%rdi
- call rwsem_down_read_failed
- __ASM_SIZE(pop,) %__ASM_REG(dx)
- restore_common_regs
- FRAME_END
- ret
-ENDPROC(call_rwsem_down_read_failed)
-
-ENTRY(call_rwsem_down_read_failed_killable)
- FRAME_BEGIN
- save_common_regs
- __ASM_SIZE(push,) %__ASM_REG(dx)
- movq %rax,%rdi
- call rwsem_down_read_failed_killable
- __ASM_SIZE(pop,) %__ASM_REG(dx)
- restore_common_regs
- FRAME_END
- ret
-ENDPROC(call_rwsem_down_read_failed_killable)
-
-ENTRY(call_rwsem_down_write_failed)
- FRAME_BEGIN
- save_common_regs
- movq %rax,%rdi
- call rwsem_down_write_failed
- restore_common_regs
- FRAME_END
- ret
-ENDPROC(call_rwsem_down_write_failed)
-
-ENTRY(call_rwsem_down_write_failed_killable)
- FRAME_BEGIN
- save_common_regs
- movq %rax,%rdi
- call rwsem_down_write_failed_killable
- restore_common_regs
- FRAME_END
- ret
-ENDPROC(call_rwsem_down_write_failed_killable)
-
-ENTRY(call_rwsem_wake)
- FRAME_BEGIN
- /* do nothing if still outstanding active readers */
- __ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx)
- jnz 1f
- save_common_regs
- movq %rax,%rdi
- call rwsem_wake
- restore_common_regs
-1: FRAME_END
- ret
-ENDPROC(call_rwsem_wake)
-
-ENTRY(call_rwsem_downgrade_wake)
- FRAME_BEGIN
- save_common_regs
- __ASM_SIZE(push,) %__ASM_REG(dx)
- movq %rax,%rdi
- call rwsem_downgrade_wake
- __ASM_SIZE(pop,) %__ASM_REG(dx)
- restore_common_regs
- FRAME_END
- ret
-ENDPROC(call_rwsem_downgrade_wake)
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index ee42bb0cbeb3..9952a01cad24 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -55,26 +55,6 @@ unsigned long clear_user(void __user *to, unsigned long n)
EXPORT_SYMBOL(clear_user);
/*
- * Try to copy last bytes and clear the rest if needed.
- * Since protection fault in copy_from/to_user is not a normal situation,
- * it is not necessary to optimize tail handling.
- */
-__visible unsigned long
-copy_user_handle_tail(char *to, char *from, unsigned len)
-{
- for (; len; --len, to++) {
- char c;
-
- if (__get_user_nocheck(c, from++, sizeof(char)))
- break;
- if (__put_user_nocheck(c, to, sizeof(char)))
- break;
- }
- clac();
- return len;
-}
-
-/*
* Similar to copy_user_handle_tail, probe for the write fault point,
* but reuse __memcpy_mcsafe in case a new read error is encountered.
* clac() is handled in _copy_to_iter_mcsafe().
diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
index 19c6abf9ea31..752ad11d6868 100644
--- a/arch/x86/mm/cpu_entry_area.c
+++ b/arch/x86/mm/cpu_entry_area.c
@@ -13,8 +13,8 @@
static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
#ifdef CONFIG_X86_64
-static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
- [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
+static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
+DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
#endif
struct cpu_entry_area *get_cpu_entry_area(int cpu)
@@ -52,10 +52,10 @@ cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
}
-static void __init percpu_setup_debug_store(int cpu)
+static void __init percpu_setup_debug_store(unsigned int cpu)
{
#ifdef CONFIG_CPU_SUP_INTEL
- int npages;
+ unsigned int npages;
void *cea;
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
@@ -78,9 +78,43 @@ static void __init percpu_setup_debug_store(int cpu)
#endif
}
+#ifdef CONFIG_X86_64
+
+#define cea_map_stack(name) do { \
+ npages = sizeof(estacks->name## _stack) / PAGE_SIZE; \
+ cea_map_percpu_pages(cea->estacks.name## _stack, \
+ estacks->name## _stack, npages, PAGE_KERNEL); \
+ } while (0)
+
+static void __init percpu_setup_exception_stacks(unsigned int cpu)
+{
+ struct exception_stacks *estacks = per_cpu_ptr(&exception_stacks, cpu);
+ struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
+ unsigned int npages;
+
+ BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
+
+ per_cpu(cea_exception_stacks, cpu) = &cea->estacks;
+
+ /*
+ * The exceptions stack mappings in the per cpu area are protected
+ * by guard pages so each stack must be mapped separately. DB2 is
+ * not mapped; it just exists to catch triple nesting of #DB.
+ */
+ cea_map_stack(DF);
+ cea_map_stack(NMI);
+ cea_map_stack(DB1);
+ cea_map_stack(DB);
+ cea_map_stack(MCE);
+}
+#else
+static inline void percpu_setup_exception_stacks(unsigned int cpu) {}
+#endif
+
/* Setup the fixmap mappings only once per-processor */
-static void __init setup_cpu_entry_area(int cpu)
+static void __init setup_cpu_entry_area(unsigned int cpu)
{
+ struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
#ifdef CONFIG_X86_64
/* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
pgprot_t gdt_prot = PAGE_KERNEL_RO;
@@ -101,10 +135,9 @@ static void __init setup_cpu_entry_area(int cpu)
pgprot_t tss_prot = PAGE_KERNEL;
#endif
- cea_set_pte(&get_cpu_entry_area(cpu)->gdt, get_cpu_gdt_paddr(cpu),
- gdt_prot);
+ cea_set_pte(&cea->gdt, get_cpu_gdt_paddr(cpu), gdt_prot);
- cea_map_percpu_pages(&get_cpu_entry_area(cpu)->entry_stack_page,
+ cea_map_percpu_pages(&cea->entry_stack_page,
per_cpu_ptr(&entry_stack_storage, cpu), 1,
PAGE_KERNEL);
@@ -128,22 +161,15 @@ static void __init setup_cpu_entry_area(int cpu)
BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
- cea_map_percpu_pages(&get_cpu_entry_area(cpu)->tss,
- &per_cpu(cpu_tss_rw, cpu),
+ cea_map_percpu_pages(&cea->tss, &per_cpu(cpu_tss_rw, cpu),
sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
#ifdef CONFIG_X86_32
- per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu);
+ per_cpu(cpu_entry_area, cpu) = cea;
#endif
-#ifdef CONFIG_X86_64
- BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
- BUILD_BUG_ON(sizeof(exception_stacks) !=
- sizeof(((struct cpu_entry_area *)0)->exception_stacks));
- cea_map_percpu_pages(&get_cpu_entry_area(cpu)->exception_stacks,
- &per_cpu(exception_stacks, cpu),
- sizeof(exception_stacks) / PAGE_SIZE, PAGE_KERNEL);
-#endif
+ percpu_setup_exception_stacks(cpu);
+
percpu_setup_debug_store(cpu);
}
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index ee8f8ab46941..6a7302d1161f 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -259,7 +259,8 @@ static void note_wx(struct pg_state *st)
#endif
/* Account the WX pages */
st->wx_pages += npages;
- WARN_ONCE(1, "x86/mm: Found insecure W+X mapping at address %pS\n",
+ WARN_ONCE(__supported_pte_mask & _PAGE_NX,
+ "x86/mm: Found insecure W+X mapping at address %pS\n",
(void *)st->start_address);
}
@@ -577,7 +578,7 @@ void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd)
void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user)
{
#ifdef CONFIG_PAGE_TABLE_ISOLATION
- if (user && static_cpu_has(X86_FEATURE_PTI))
+ if (user && boot_cpu_has(X86_FEATURE_PTI))
pgd = kernel_to_user_pgdp(pgd);
#endif
ptdump_walk_pgd_level_core(m, pgd, false, false);
@@ -590,7 +591,7 @@ void ptdump_walk_user_pgd_level_checkwx(void)
pgd_t *pgd = INIT_PGD;
if (!(__supported_pte_mask & _PAGE_NX) ||
- !static_cpu_has(X86_FEATURE_PTI))
+ !boot_cpu_has(X86_FEATURE_PTI))
return;
pr_info("x86/mm: Checking user space page tables\n");
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 667f1da36208..46df4c6aae46 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -28,6 +28,7 @@
#include <asm/mmu_context.h> /* vma_pkey() */
#include <asm/efi.h> /* efi_recover_from_page_fault()*/
#include <asm/desc.h> /* store_idt(), ... */
+#include <asm/cpu_entry_area.h> /* exception stack */
#define CREATE_TRACE_POINTS
#include <asm/trace/exceptions.h>
@@ -359,8 +360,6 @@ static noinline int vmalloc_fault(unsigned long address)
if (!(address >= VMALLOC_START && address < VMALLOC_END))
return -1;
- WARN_ON_ONCE(in_nmi());
-
/*
* Copy kernel mappings over when needed. This can also
* happen within a race in page table update. In the later
@@ -603,24 +602,9 @@ static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index)
name, index, addr, (desc.limit0 | (desc.limit1 << 16)));
}
-/*
- * This helper function transforms the #PF error_code bits into
- * "[PROT] [USER]" type of descriptive, almost human-readable error strings:
- */
-static void err_str_append(unsigned long error_code, char *buf, unsigned long mask, const char *txt)
-{
- if (error_code & mask) {
- if (buf[0])
- strcat(buf, " ");
- strcat(buf, txt);
- }
-}
-
static void
show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long address)
{
- char err_txt[64];
-
if (!oops_may_print())
return;
@@ -644,31 +628,29 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long ad
from_kuid(&init_user_ns, current_uid()));
}
- pr_alert("BUG: unable to handle kernel %s at %px\n",
- address < PAGE_SIZE ? "NULL pointer dereference" : "paging request",
- (void *)address);
-
- err_txt[0] = 0;
-
- /*
- * Note: length of these appended strings including the separation space and the
- * zero delimiter must fit into err_txt[].
- */
- err_str_append(error_code, err_txt, X86_PF_PROT, "[PROT]" );
- err_str_append(error_code, err_txt, X86_PF_WRITE, "[WRITE]");
- err_str_append(error_code, err_txt, X86_PF_USER, "[USER]" );
- err_str_append(error_code, err_txt, X86_PF_RSVD, "[RSVD]" );
- err_str_append(error_code, err_txt, X86_PF_INSTR, "[INSTR]");
- err_str_append(error_code, err_txt, X86_PF_PK, "[PK]" );
-
- pr_alert("#PF error: %s\n", error_code ? err_txt : "[normal kernel read fault]");
+ if (address < PAGE_SIZE && !user_mode(regs))
+ pr_alert("BUG: kernel NULL pointer dereference, address: %px\n",
+ (void *)address);
+ else
+ pr_alert("BUG: unable to handle page fault for address: %px\n",
+ (void *)address);
+
+ pr_alert("#PF: %s %s in %s mode\n",
+ (error_code & X86_PF_USER) ? "user" : "supervisor",
+ (error_code & X86_PF_INSTR) ? "instruction fetch" :
+ (error_code & X86_PF_WRITE) ? "write access" :
+ "read access",
+ user_mode(regs) ? "user" : "kernel");
+ pr_alert("#PF: error_code(0x%04lx) - %s\n", error_code,
+ !(error_code & X86_PF_PROT) ? "not-present page" :
+ (error_code & X86_PF_RSVD) ? "reserved bit violation" :
+ (error_code & X86_PF_PK) ? "protection keys violation" :
+ "permissions violation");
if (!(error_code & X86_PF_USER) && user_mode(regs)) {
struct desc_ptr idt, gdt;
u16 ldtr, tr;
- pr_alert("This was a system access from user code\n");
-
/*
* This can happen for quite a few reasons. The more obvious
* ones are faults accessing the GDT, or LDT. Perhaps
@@ -793,7 +775,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
if (is_vmalloc_addr((void *)address) &&
(((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) ||
address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) {
- unsigned long stack = this_cpu_read(orig_ist.ist[DOUBLEFAULT_STACK]) - sizeof(void *);
+ unsigned long stack = __this_cpu_ist_top_va(DF) - sizeof(void *);
/*
* We're likely to be running with very little stack space
* left. It's plausible that we'd hit this condition but
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index f905a2371080..fd10d91a6115 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -5,6 +5,8 @@
#include <linux/memblock.h>
#include <linux/swapfile.h>
#include <linux/swapops.h>
+#include <linux/kmemleak.h>
+#include <linux/sched/task.h>
#include <asm/set_memory.h>
#include <asm/e820/api.h>
@@ -22,6 +24,7 @@
#include <asm/hypervisor.h>
#include <asm/cpufeature.h>
#include <asm/pti.h>
+#include <asm/text-patching.h>
/*
* We need to define the tracepoints somewhere, and tlb.c
@@ -701,6 +704,41 @@ void __init init_mem_mapping(void)
}
/*
+ * Initialize an mm_struct to be used during poking and a pointer to be used
+ * during patching.
+ */
+void __init poking_init(void)
+{
+ spinlock_t *ptl;
+ pte_t *ptep;
+
+ poking_mm = copy_init_mm();
+ BUG_ON(!poking_mm);
+
+ /*
+ * Randomize the poking address, but make sure that the following page
+ * will be mapped at the same PMD. We need 2 pages, so find space for 3,
+ * and adjust the address if the PMD ends after the first one.
+ */
+ poking_addr = TASK_UNMAPPED_BASE;
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+ poking_addr += (kaslr_get_random_long("Poking") & PAGE_MASK) %
+ (TASK_SIZE - TASK_UNMAPPED_BASE - 3 * PAGE_SIZE);
+
+ if (((poking_addr + PAGE_SIZE) & ~PMD_MASK) == 0)
+ poking_addr += PAGE_SIZE;
+
+ /*
+ * We need to trigger the allocation of the page-tables that will be
+ * needed for poking now. Later, poking may be performed in an atomic
+ * section, which might cause allocation to fail.
+ */
+ ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
+ BUG_ON(!ptep);
+ pte_unmap_unlock(ptep, ptl);
+}
+
+/*
* devmem_is_allowed() checks to see if /dev/mem access to a certain address
* is valid. The argument is a physical page number.
*
@@ -766,6 +804,11 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end)
if (debug_pagealloc_enabled()) {
pr_info("debug: unmapping init [mem %#010lx-%#010lx]\n",
begin, end - 1);
+ /*
+ * Inform kmemleak about the hole in the memory since the
+ * corresponding pages will be unmapped.
+ */
+ kmemleak_free_part((void *)begin, end - begin);
set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
} else {
/*
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 0029604af8a4..dd73d5d74393 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -825,7 +825,7 @@ void __init __early_set_fixmap(enum fixed_addresses idx,
pte = early_ioremap_pte(addr);
/* Sanitize 'prot' against any unsupported bits: */
- pgprot_val(flags) &= __default_kernel_pte_mask;
+ pgprot_val(flags) &= __supported_pte_mask;
if (pgprot_val(flags))
set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 462fde83b515..8dc0fc0b1382 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -24,14 +24,16 @@ extern struct range pfn_mapped[E820_MAX_ENTRIES];
static p4d_t tmp_p4d_table[MAX_PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
-static __init void *early_alloc(size_t size, int nid, bool panic)
+static __init void *early_alloc(size_t size, int nid, bool should_panic)
{
- if (panic)
- return memblock_alloc_try_nid(size, size,
- __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
- else
- return memblock_alloc_try_nid_nopanic(size, size,
+ void *ptr = memblock_alloc_try_nid(size, size,
__pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+
+ if (!ptr && should_panic)
+ panic("%pS: Failed to allocate page, nid=%d from=%lx\n",
+ (void *)_RET_IP_, nid, __pa(MAX_DMA_ADDRESS));
+
+ return ptr;
}
static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index 3f452ffed7e9..dc3f058bdf9b 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -94,7 +94,7 @@ void __init kernel_randomize_memory(void)
if (!kaslr_memory_enabled())
return;
- kaslr_regions[0].size_tb = 1 << (__PHYSICAL_MASK_SHIFT - TB_SHIFT);
+ kaslr_regions[0].size_tb = 1 << (MAX_PHYSMEM_BITS - TB_SHIFT);
kaslr_regions[1].size_tb = VMALLOC_SIZE_TB;
/*
@@ -125,10 +125,7 @@ void __init kernel_randomize_memory(void)
*/
entropy = remain_entropy / (ARRAY_SIZE(kaslr_regions) - i);
prandom_bytes_state(&rand_state, &rand, sizeof(rand));
- if (pgtable_l5_enabled())
- entropy = (rand % (entropy + 1)) & P4D_MASK;
- else
- entropy = (rand % (entropy + 1)) & PUD_MASK;
+ entropy = (rand % (entropy + 1)) & PUD_MASK;
vaddr += entropy;
*kaslr_regions[i].base = vaddr;
@@ -137,84 +134,71 @@ void __init kernel_randomize_memory(void)
* randomization alignment.
*/
vaddr += get_padding(&kaslr_regions[i]);
- if (pgtable_l5_enabled())
- vaddr = round_up(vaddr + 1, P4D_SIZE);
- else
- vaddr = round_up(vaddr + 1, PUD_SIZE);
+ vaddr = round_up(vaddr + 1, PUD_SIZE);
remain_entropy -= entropy;
}
}
static void __meminit init_trampoline_pud(void)
{
- unsigned long paddr, paddr_next;
+ pud_t *pud_page_tramp, *pud, *pud_tramp;
+ p4d_t *p4d_page_tramp, *p4d, *p4d_tramp;
+ unsigned long paddr, vaddr;
pgd_t *pgd;
- pud_t *pud_page, *pud_page_tramp;
- int i;
pud_page_tramp = alloc_low_page();
+ /*
+ * There are two mappings for the low 1MB area, the direct mapping
+ * and the 1:1 mapping for the real mode trampoline:
+ *
+ * Direct mapping: virt_addr = phys_addr + PAGE_OFFSET
+ * 1:1 mapping: virt_addr = phys_addr
+ */
paddr = 0;
- pgd = pgd_offset_k((unsigned long)__va(paddr));
- pud_page = (pud_t *) pgd_page_vaddr(*pgd);
-
- for (i = pud_index(paddr); i < PTRS_PER_PUD; i++, paddr = paddr_next) {
- pud_t *pud, *pud_tramp;
- unsigned long vaddr = (unsigned long)__va(paddr);
+ vaddr = (unsigned long)__va(paddr);
+ pgd = pgd_offset_k(vaddr);
- pud_tramp = pud_page_tramp + pud_index(paddr);
- pud = pud_page + pud_index(vaddr);
- paddr_next = (paddr & PUD_MASK) + PUD_SIZE;
-
- *pud_tramp = *pud;
- }
+ p4d = p4d_offset(pgd, vaddr);
+ pud = pud_offset(p4d, vaddr);
- set_pgd(&trampoline_pgd_entry,
- __pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
-}
-
-static void __meminit init_trampoline_p4d(void)
-{
- unsigned long paddr, paddr_next;
- pgd_t *pgd;
- p4d_t *p4d_page, *p4d_page_tramp;
- int i;
+ pud_tramp = pud_page_tramp + pud_index(paddr);
+ *pud_tramp = *pud;
- p4d_page_tramp = alloc_low_page();
-
- paddr = 0;
- pgd = pgd_offset_k((unsigned long)__va(paddr));
- p4d_page = (p4d_t *) pgd_page_vaddr(*pgd);
-
- for (i = p4d_index(paddr); i < PTRS_PER_P4D; i++, paddr = paddr_next) {
- p4d_t *p4d, *p4d_tramp;
- unsigned long vaddr = (unsigned long)__va(paddr);
+ if (pgtable_l5_enabled()) {
+ p4d_page_tramp = alloc_low_page();
p4d_tramp = p4d_page_tramp + p4d_index(paddr);
- p4d = p4d_page + p4d_index(vaddr);
- paddr_next = (paddr & P4D_MASK) + P4D_SIZE;
- *p4d_tramp = *p4d;
- }
+ set_p4d(p4d_tramp,
+ __p4d(_KERNPG_TABLE | __pa(pud_page_tramp)));
- set_pgd(&trampoline_pgd_entry,
- __pgd(_KERNPG_TABLE | __pa(p4d_page_tramp)));
+ set_pgd(&trampoline_pgd_entry,
+ __pgd(_KERNPG_TABLE | __pa(p4d_page_tramp)));
+ } else {
+ set_pgd(&trampoline_pgd_entry,
+ __pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
+ }
}
/*
- * Create PGD aligned trampoline table to allow real mode initialization
- * of additional CPUs. Consume only 1 low memory page.
+ * The real mode trampoline, which is required for bootstrapping CPUs
+ * occupies only a small area under the low 1MB. See reserve_real_mode()
+ * for details.
+ *
+ * If KASLR is disabled the first PGD entry of the direct mapping is copied
+ * to map the real mode trampoline.
+ *
+ * If KASLR is enabled, copy only the PUD which covers the low 1MB
+ * area. This limits the randomization granularity to 1GB for both 4-level
+ * and 5-level paging.
*/
void __meminit init_trampoline(void)
{
-
if (!kaslr_memory_enabled()) {
init_trampoline_default();
return;
}
- if (pgtable_l5_enabled())
- init_trampoline_p4d();
- else
- init_trampoline_pud();
+ init_trampoline_pud();
}
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index db3165714521..dc726e07d8ba 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -230,7 +230,7 @@ bool mmap_address_hint_valid(unsigned long addr, unsigned long len)
/* Can we access it for direct reading/writing? Must be RAM: */
int valid_phys_addr_range(phys_addr_t addr, size_t count)
{
- return addr + count <= __pa(high_memory);
+ return addr + count - 1 <= __pa(high_memory - 1);
}
/* Can we access it through mmap? Must be a valid physical address: */
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 12c1b7a83ed7..dfb6c4df639a 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -195,15 +195,11 @@ static void __init alloc_node_data(int nid)
* Allocate node data. Try node-local memory and then any node.
* Never allocate in DMA zone.
*/
- nd_pa = memblock_phys_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
+ nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
if (!nd_pa) {
- nd_pa = __memblock_alloc_base(nd_size, SMP_CACHE_BYTES,
- MEMBLOCK_ALLOC_ACCESSIBLE);
- if (!nd_pa) {
- pr_err("Cannot find %zu bytes in any node (initial node: %d)\n",
- nd_size, nid);
- return;
- }
+ pr_err("Cannot find %zu bytes in any node (initial node: %d)\n",
+ nd_size, nid);
+ return;
}
nd = __va(nd_pa);
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 14e6119838a6..daf4d645e537 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -738,7 +738,7 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
{
unsigned long numpages, pmask, psize, lpaddr, pfn, old_pfn;
pgprot_t old_prot, new_prot, req_prot, chk_prot;
- pte_t new_pte, old_pte, *tmp;
+ pte_t new_pte, *tmp;
enum pg_level level;
/*
@@ -781,7 +781,7 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
* Convert protection attributes to 4k-format, as cpa->mask* are set
* up accordingly.
*/
- old_pte = *kpte;
+
/* Clear PSE (aka _PAGE_PAT) and move PAT bit to correct position */
req_prot = pgprot_large_2_4k(old_prot);
@@ -2209,8 +2209,6 @@ int set_pages_rw(struct page *page, int numpages)
return set_memory_rw(addr, numpages);
}
-#ifdef CONFIG_DEBUG_PAGEALLOC
-
static int __set_pages_p(struct page *page, int numpages)
{
unsigned long tempaddr = (unsigned long) page_address(page);
@@ -2249,6 +2247,16 @@ static int __set_pages_np(struct page *page, int numpages)
return __change_page_attr_set_clr(&cpa, 0);
}
+int set_direct_map_invalid_noflush(struct page *page)
+{
+ return __set_pages_np(page, 1);
+}
+
+int set_direct_map_default_noflush(struct page *page)
+{
+ return __set_pages_p(page, 1);
+}
+
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
if (PageHighMem(page))
@@ -2282,7 +2290,6 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
}
#ifdef CONFIG_HIBERNATION
-
bool kernel_page_present(struct page *page)
{
unsigned int level;
@@ -2294,11 +2301,8 @@ bool kernel_page_present(struct page *page)
pte = lookup_address((unsigned long)page_address(page), &level);
return (pte_val(*pte) & _PAGE_PRESENT);
}
-
#endif /* CONFIG_HIBERNATION */
-#endif /* CONFIG_DEBUG_PAGEALLOC */
-
int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
unsigned numpages, unsigned long page_flags)
{
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 7bd01709a091..1f67b1e15bf6 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -190,7 +190,7 @@ static void pgd_dtor(pgd_t *pgd)
* when PTI is enabled. We need them to map the per-process LDT into the
* user-space page-table.
*/
-#define PREALLOCATED_USER_PMDS (static_cpu_has(X86_FEATURE_PTI) ? \
+#define PREALLOCATED_USER_PMDS (boot_cpu_has(X86_FEATURE_PTI) ? \
KERNEL_PGD_PTRS : 0)
#define MAX_PREALLOCATED_USER_PMDS KERNEL_PGD_PTRS
@@ -292,7 +292,7 @@ static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
#ifdef CONFIG_PAGE_TABLE_ISOLATION
- if (!static_cpu_has(X86_FEATURE_PTI))
+ if (!boot_cpu_has(X86_FEATURE_PTI))
return;
pgdp = kernel_to_user_pgdp(pgdp);
@@ -373,14 +373,14 @@ static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
static struct kmem_cache *pgd_cache;
-static int __init pgd_cache_init(void)
+void __init pgd_cache_init(void)
{
/*
* When PAE kernel is running as a Xen domain, it does not use
* shared kernel pmd. And this requires a whole page for pgd.
*/
if (!SHARED_KERNEL_PMD)
- return 0;
+ return;
/*
* when PAE kernel is not running as a Xen domain, it uses
@@ -390,9 +390,7 @@ static int __init pgd_cache_init(void)
*/
pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
SLAB_PANIC, NULL);
- return 0;
}
-core_initcall(pgd_cache_init);
static inline pgd_t *_pgd_alloc(void)
{
@@ -420,6 +418,10 @@ static inline void _pgd_free(pgd_t *pgd)
}
#else
+void __init pgd_cache_init(void)
+{
+}
+
static inline pgd_t *_pgd_alloc(void)
{
return (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ALLOCATION_ORDER);
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index 4fee5c3003ed..9c2463bc158f 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -35,6 +35,7 @@
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
+#include <linux/cpu.h>
#include <asm/cpufeature.h>
#include <asm/hypervisor.h>
@@ -77,7 +78,7 @@ static void __init pti_print_if_secure(const char *reason)
pr_info("%s\n", reason);
}
-enum pti_mode {
+static enum pti_mode {
PTI_AUTO = 0,
PTI_FORCE_OFF,
PTI_FORCE_ON
@@ -115,7 +116,8 @@ void __init pti_check_boottime_disable(void)
}
}
- if (cmdline_find_option_bool(boot_command_line, "nopti")) {
+ if (cmdline_find_option_bool(boot_command_line, "nopti") ||
+ cpu_mitigations_off()) {
pti_mode = PTI_FORCE_OFF;
pti_print_if_insecure("disabled on command line.");
return;
@@ -602,7 +604,7 @@ static void pti_clone_kernel_text(void)
set_memory_global(start, (end_global - start) >> PAGE_SHIFT);
}
-void pti_set_kernel_image_nonglobal(void)
+static void pti_set_kernel_image_nonglobal(void)
{
/*
* The identity map is created with PMDs, regardless of the
@@ -626,7 +628,7 @@ void pti_set_kernel_image_nonglobal(void)
*/
void __init pti_init(void)
{
- if (!static_cpu_has(X86_FEATURE_PTI))
+ if (!boot_cpu_has(X86_FEATURE_PTI))
return;
pr_info("enabled\n");
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index bc4bc7b2f075..7f61431c75fb 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -634,7 +634,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
}
-static void flush_tlb_func_local(void *info, enum tlb_flush_reason reason)
+static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason)
{
const struct flush_tlb_info *f = info;
@@ -722,43 +722,81 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
*/
unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct flush_tlb_info, flush_tlb_info);
+
+#ifdef CONFIG_DEBUG_VM
+static DEFINE_PER_CPU(unsigned int, flush_tlb_info_idx);
+#endif
+
+static inline struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
+ unsigned long start, unsigned long end,
+ unsigned int stride_shift, bool freed_tables,
+ u64 new_tlb_gen)
+{
+ struct flush_tlb_info *info = this_cpu_ptr(&flush_tlb_info);
+
+#ifdef CONFIG_DEBUG_VM
+ /*
+ * Ensure that the following code is non-reentrant and flush_tlb_info
+ * is not overwritten. This means no TLB flushing is initiated by
+ * interrupt handlers and machine-check exception handlers.
+ */
+ BUG_ON(this_cpu_inc_return(flush_tlb_info_idx) != 1);
+#endif
+
+ info->start = start;
+ info->end = end;
+ info->mm = mm;
+ info->stride_shift = stride_shift;
+ info->freed_tables = freed_tables;
+ info->new_tlb_gen = new_tlb_gen;
+
+ return info;
+}
+
+static inline void put_flush_tlb_info(void)
+{
+#ifdef CONFIG_DEBUG_VM
+ /* Complete reentrency prevention checks */
+ barrier();
+ this_cpu_dec(flush_tlb_info_idx);
+#endif
+}
+
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned int stride_shift,
bool freed_tables)
{
+ struct flush_tlb_info *info;
+ u64 new_tlb_gen;
int cpu;
- struct flush_tlb_info info __aligned(SMP_CACHE_BYTES) = {
- .mm = mm,
- .stride_shift = stride_shift,
- .freed_tables = freed_tables,
- };
-
cpu = get_cpu();
- /* This is also a barrier that synchronizes with switch_mm(). */
- info.new_tlb_gen = inc_mm_tlb_gen(mm);
-
/* Should we flush just the requested range? */
- if ((end != TLB_FLUSH_ALL) &&
- ((end - start) >> stride_shift) <= tlb_single_page_flush_ceiling) {
- info.start = start;
- info.end = end;
- } else {
- info.start = 0UL;
- info.end = TLB_FLUSH_ALL;
+ if ((end == TLB_FLUSH_ALL) ||
+ ((end - start) >> stride_shift) > tlb_single_page_flush_ceiling) {
+ start = 0;
+ end = TLB_FLUSH_ALL;
}
+ /* This is also a barrier that synchronizes with switch_mm(). */
+ new_tlb_gen = inc_mm_tlb_gen(mm);
+
+ info = get_flush_tlb_info(mm, start, end, stride_shift, freed_tables,
+ new_tlb_gen);
+
if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
- VM_WARN_ON(irqs_disabled());
+ lockdep_assert_irqs_enabled();
local_irq_disable();
- flush_tlb_func_local(&info, TLB_LOCAL_MM_SHOOTDOWN);
+ flush_tlb_func_local(info, TLB_LOCAL_MM_SHOOTDOWN);
local_irq_enable();
}
if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
- flush_tlb_others(mm_cpumask(mm), &info);
+ flush_tlb_others(mm_cpumask(mm), info);
+ put_flush_tlb_info();
put_cpu();
}
@@ -787,38 +825,48 @@ static void do_kernel_range_flush(void *info)
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
-
/* Balance as user space task's flush, a bit conservative */
if (end == TLB_FLUSH_ALL ||
(end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) {
on_each_cpu(do_flush_tlb_all, NULL, 1);
} else {
- struct flush_tlb_info info;
- info.start = start;
- info.end = end;
- on_each_cpu(do_kernel_range_flush, &info, 1);
+ struct flush_tlb_info *info;
+
+ preempt_disable();
+ info = get_flush_tlb_info(NULL, start, end, 0, false, 0);
+
+ on_each_cpu(do_kernel_range_flush, info, 1);
+
+ put_flush_tlb_info();
+ preempt_enable();
}
}
+/*
+ * arch_tlbbatch_flush() performs a full TLB flush regardless of the active mm.
+ * This means that the 'struct flush_tlb_info' that describes which mappings to
+ * flush is actually fixed. We therefore set a single fixed struct and use it in
+ * arch_tlbbatch_flush().
+ */
+static const struct flush_tlb_info full_flush_tlb_info = {
+ .mm = NULL,
+ .start = 0,
+ .end = TLB_FLUSH_ALL,
+};
+
void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
{
- struct flush_tlb_info info = {
- .mm = NULL,
- .start = 0UL,
- .end = TLB_FLUSH_ALL,
- };
-
int cpu = get_cpu();
if (cpumask_test_cpu(cpu, &batch->cpumask)) {
- VM_WARN_ON(irqs_disabled());
+ lockdep_assert_irqs_enabled();
local_irq_disable();
- flush_tlb_func_local(&info, TLB_LOCAL_SHOOTDOWN);
+ flush_tlb_func_local(&full_flush_tlb_info, TLB_LOCAL_SHOOTDOWN);
local_irq_enable();
}
if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids)
- flush_tlb_others(&batch->cpumask, &info);
+ flush_tlb_others(&batch->cpumask, &full_flush_tlb_info);
cpumask_clear(&batch->cpumask);
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 30a5111ae5fd..527e69b12002 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -635,6 +635,22 @@ static void quirk_no_aersid(struct pci_dev *pdev)
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
PCI_CLASS_BRIDGE_PCI, 8, quirk_no_aersid);
+static void quirk_intel_th_dnv(struct pci_dev *dev)
+{
+ struct resource *r = &dev->resource[4];
+
+ /*
+ * Denverton reports 2k of RTIT_BAR (intel_th resource 4), which
+ * appears to be 4 MB in reality.
+ */
+ if (r->end == r->start + 0x7ff) {
+ r->start = 0;
+ r->end = 0x3fffff;
+ r->flags |= IORESOURCE_UNSET;
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x19e1, quirk_intel_th_dnv);
+
#ifdef CONFIG_PHYS_ADDR_T_64BIT
#define AMD_141b_MMIO_BASE(x) (0x80 + (x) * 0x8)
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 458a0e2bcc57..a25a9fd987a9 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -449,7 +449,7 @@ void __init efi_free_boot_services(void)
*/
rm_size = real_mode_size_needed();
if (rm_size && (start + rm_size) < (1<<20) && size >= rm_size) {
- set_real_mode_mem(start, rm_size);
+ set_real_mode_mem(start);
start += rm_size;
size -= rm_size;
}
diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
index b4ab779f1d47..ac9e7bf49b66 100644
--- a/arch/x86/platform/olpc/olpc_dt.c
+++ b/arch/x86/platform/olpc/olpc_dt.c
@@ -141,6 +141,9 @@ void * __init prom_early_alloc(unsigned long size)
* wasted bootmem) and hand off chunks of it to callers.
*/
res = memblock_alloc(chunk_size, SMP_CACHE_BYTES);
+ if (!res)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ chunk_size);
BUG_ON(!res);
prom_early_allocated += chunk_size;
memset(res, 0, chunk_size);
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 2c53b0f19329..1297e185b8c8 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -2133,14 +2133,19 @@ static int __init summarize_uvhub_sockets(int nuvhubs,
*/
static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
{
- unsigned char *uvhub_mask;
struct uvhub_desc *uvhub_descs;
+ unsigned char *uvhub_mask = NULL;
if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub())
timeout_us = calculate_destination_timeout();
uvhub_descs = kcalloc(nuvhubs, sizeof(struct uvhub_desc), GFP_KERNEL);
+ if (!uvhub_descs)
+ goto fail;
+
uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
+ if (!uvhub_mask)
+ goto fail;
if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask))
goto fail;
diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c
index bcddf09b5aa3..4845b8c7be7f 100644
--- a/arch/x86/power/hibernate.c
+++ b/arch/x86/power/hibernate.c
@@ -90,7 +90,6 @@ static int get_e820_md5(struct e820_table *table, void *buf)
}
desc->tfm = tfm;
- desc->flags = 0;
size = offsetof(struct e820_table, entries) +
sizeof(struct e820_entry) * table->nr_entries;
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index d10105825d57..7dce39c8c034 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -15,15 +15,6 @@ u32 *trampoline_cr4_features;
/* Hold the pgd entry used on booting additional CPUs */
pgd_t trampoline_pgd_entry;
-void __init set_real_mode_mem(phys_addr_t mem, size_t size)
-{
- void *base = __va(mem);
-
- real_mode_header = (struct real_mode_header *) base;
- printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
- base, (unsigned long long)mem, size);
-}
-
void __init reserve_real_mode(void)
{
phys_addr_t mem;
@@ -42,7 +33,7 @@ void __init reserve_real_mode(void)
}
memblock_reserve(mem, size);
- set_real_mode_mem(mem, size);
+ set_real_mode_mem(mem);
}
static void __init setup_real_mode(void)
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index 96cb20de08af..f60501a384f9 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -37,8 +37,7 @@ REALMODE_OBJS = $(addprefix $(obj)/,$(realmode-y))
sed-pasyms := -n -r -e 's/^([0-9a-fA-F]+) [ABCDGRSTVW] (.+)$$/pa_\2 = \2;/p'
quiet_cmd_pasyms = PASYMS $@
- cmd_pasyms = $(NM) $(filter-out FORCE,$^) | \
- sed $(sed-pasyms) | sort | uniq > $@
+ cmd_pasyms = $(NM) $(real-prereqs) | sed $(sed-pasyms) | sort | uniq > $@
targets += pasyms.h
$(obj)/pasyms.h: $(REALMODE_OBJS) FORCE
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index b629f6992d9f..ce7188cbdae5 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -11,7 +11,9 @@
#define Elf_Shdr ElfW(Shdr)
#define Elf_Sym ElfW(Sym)
-static Elf_Ehdr ehdr;
+static Elf_Ehdr ehdr;
+static unsigned long shnum;
+static unsigned int shstrndx;
struct relocs {
uint32_t *offset;
@@ -241,9 +243,9 @@ static const char *sec_name(unsigned shndx)
{
const char *sec_strtab;
const char *name;
- sec_strtab = secs[ehdr.e_shstrndx].strtab;
+ sec_strtab = secs[shstrndx].strtab;
name = "<noname>";
- if (shndx < ehdr.e_shnum) {
+ if (shndx < shnum) {
name = sec_strtab + secs[shndx].shdr.sh_name;
}
else if (shndx == SHN_ABS) {
@@ -271,7 +273,7 @@ static const char *sym_name(const char *sym_strtab, Elf_Sym *sym)
static Elf_Sym *sym_lookup(const char *symname)
{
int i;
- for (i = 0; i < ehdr.e_shnum; i++) {
+ for (i = 0; i < shnum; i++) {
struct section *sec = &secs[i];
long nsyms;
char *strtab;
@@ -366,27 +368,41 @@ static void read_ehdr(FILE *fp)
ehdr.e_shnum = elf_half_to_cpu(ehdr.e_shnum);
ehdr.e_shstrndx = elf_half_to_cpu(ehdr.e_shstrndx);
- if ((ehdr.e_type != ET_EXEC) && (ehdr.e_type != ET_DYN)) {
+ shnum = ehdr.e_shnum;
+ shstrndx = ehdr.e_shstrndx;
+
+ if ((ehdr.e_type != ET_EXEC) && (ehdr.e_type != ET_DYN))
die("Unsupported ELF header type\n");
- }
- if (ehdr.e_machine != ELF_MACHINE) {
+ if (ehdr.e_machine != ELF_MACHINE)
die("Not for %s\n", ELF_MACHINE_NAME);
- }
- if (ehdr.e_version != EV_CURRENT) {
+ if (ehdr.e_version != EV_CURRENT)
die("Unknown ELF version\n");
- }
- if (ehdr.e_ehsize != sizeof(Elf_Ehdr)) {
+ if (ehdr.e_ehsize != sizeof(Elf_Ehdr))
die("Bad Elf header size\n");
- }
- if (ehdr.e_phentsize != sizeof(Elf_Phdr)) {
+ if (ehdr.e_phentsize != sizeof(Elf_Phdr))
die("Bad program header entry\n");
- }
- if (ehdr.e_shentsize != sizeof(Elf_Shdr)) {
+ if (ehdr.e_shentsize != sizeof(Elf_Shdr))
die("Bad section header entry\n");
+
+
+ if (shnum == SHN_UNDEF || shstrndx == SHN_XINDEX) {
+ Elf_Shdr shdr;
+
+ if (fseek(fp, ehdr.e_shoff, SEEK_SET) < 0)
+ die("Seek to %d failed: %s\n", ehdr.e_shoff, strerror(errno));
+
+ if (fread(&shdr, sizeof(shdr), 1, fp) != 1)
+ die("Cannot read initial ELF section header: %s\n", strerror(errno));
+
+ if (shnum == SHN_UNDEF)
+ shnum = elf_xword_to_cpu(shdr.sh_size);
+
+ if (shstrndx == SHN_XINDEX)
+ shstrndx = elf_word_to_cpu(shdr.sh_link);
}
- if (ehdr.e_shstrndx >= ehdr.e_shnum) {
+
+ if (shstrndx >= shnum)
die("String table index out of bounds\n");
- }
}
static void read_shdrs(FILE *fp)
@@ -394,20 +410,20 @@ static void read_shdrs(FILE *fp)
int i;
Elf_Shdr shdr;
- secs = calloc(ehdr.e_shnum, sizeof(struct section));
+ secs = calloc(shnum, sizeof(struct section));
if (!secs) {
die("Unable to allocate %d section headers\n",
- ehdr.e_shnum);
+ shnum);
}
if (fseek(fp, ehdr.e_shoff, SEEK_SET) < 0) {
die("Seek to %d failed: %s\n",
ehdr.e_shoff, strerror(errno));
}
- for (i = 0; i < ehdr.e_shnum; i++) {
+ for (i = 0; i < shnum; i++) {
struct section *sec = &secs[i];
if (fread(&shdr, sizeof(shdr), 1, fp) != 1)
die("Cannot read ELF section headers %d/%d: %s\n",
- i, ehdr.e_shnum, strerror(errno));
+ i, shnum, strerror(errno));
sec->shdr.sh_name = elf_word_to_cpu(shdr.sh_name);
sec->shdr.sh_type = elf_word_to_cpu(shdr.sh_type);
sec->shdr.sh_flags = elf_xword_to_cpu(shdr.sh_flags);
@@ -418,7 +434,7 @@ static void read_shdrs(FILE *fp)
sec->shdr.sh_info = elf_word_to_cpu(shdr.sh_info);
sec->shdr.sh_addralign = elf_xword_to_cpu(shdr.sh_addralign);
sec->shdr.sh_entsize = elf_xword_to_cpu(shdr.sh_entsize);
- if (sec->shdr.sh_link < ehdr.e_shnum)
+ if (sec->shdr.sh_link < shnum)
sec->link = &secs[sec->shdr.sh_link];
}
@@ -427,7 +443,7 @@ static void read_shdrs(FILE *fp)
static void read_strtabs(FILE *fp)
{
int i;
- for (i = 0; i < ehdr.e_shnum; i++) {
+ for (i = 0; i < shnum; i++) {
struct section *sec = &secs[i];
if (sec->shdr.sh_type != SHT_STRTAB) {
continue;
@@ -452,7 +468,7 @@ static void read_strtabs(FILE *fp)
static void read_symtabs(FILE *fp)
{
int i,j;
- for (i = 0; i < ehdr.e_shnum; i++) {
+ for (i = 0; i < shnum; i++) {
struct section *sec = &secs[i];
if (sec->shdr.sh_type != SHT_SYMTAB) {
continue;
@@ -485,7 +501,7 @@ static void read_symtabs(FILE *fp)
static void read_relocs(FILE *fp)
{
int i,j;
- for (i = 0; i < ehdr.e_shnum; i++) {
+ for (i = 0; i < shnum; i++) {
struct section *sec = &secs[i];
if (sec->shdr.sh_type != SHT_REL_TYPE) {
continue;
@@ -528,7 +544,7 @@ static void print_absolute_symbols(void)
printf("Absolute symbols\n");
printf(" Num: Value Size Type Bind Visibility Name\n");
- for (i = 0; i < ehdr.e_shnum; i++) {
+ for (i = 0; i < shnum; i++) {
struct section *sec = &secs[i];
char *sym_strtab;
int j;
@@ -566,7 +582,7 @@ static void print_absolute_relocs(void)
else
format = "%08"PRIx32" %08"PRIx32" %10s %08"PRIx32" %s\n";
- for (i = 0; i < ehdr.e_shnum; i++) {
+ for (i = 0; i < shnum; i++) {
struct section *sec = &secs[i];
struct section *sec_applies, *sec_symtab;
char *sym_strtab;
@@ -650,7 +666,7 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
{
int i;
/* Walk through the relocations */
- for (i = 0; i < ehdr.e_shnum; i++) {
+ for (i = 0; i < shnum; i++) {
char *sym_strtab;
Elf_Sym *sh_symtab;
struct section *sec_applies, *sec_symtab;
@@ -706,7 +722,7 @@ static Elf_Addr per_cpu_load_addr;
static void percpu_init(void)
{
int i;
- for (i = 0; i < ehdr.e_shnum; i++) {
+ for (i = 0; i < shnum; i++) {
ElfW(Sym) *sym;
if (strcmp(sec_name(i), ".data..percpu"))
continue;
@@ -738,7 +754,7 @@ static void percpu_init(void)
* __per_cpu_load
*
* The "gold" linker incorrectly associates:
- * init_per_cpu__irq_stack_union
+ * init_per_cpu__fixed_percpu_data
* init_per_cpu__gdt_page
*/
static int is_percpu_sym(ElfW(Sym) *sym, const char *symname)
diff --git a/arch/x86/um/Kconfig b/arch/x86/um/Kconfig
index a9e80e44178c..a8985e1f7432 100644
--- a/arch/x86/um/Kconfig
+++ b/arch/x86/um/Kconfig
@@ -32,12 +32,6 @@ config ARCH_DEFCONFIG
default "arch/um/configs/i386_defconfig" if X86_32
default "arch/um/configs/x86_64_defconfig" if X86_64
-config RWSEM_XCHGADD_ALGORITHM
- def_bool 64BIT
-
-config RWSEM_GENERIC_SPINLOCK
- def_bool !RWSEM_XCHGADD_ALGORITHM
-
config 3_LEVEL_PGTABLES
bool "Three-level pagetables" if !64BIT
default 64BIT
diff --git a/arch/x86/um/Makefile b/arch/x86/um/Makefile
index 2d686ae54681..33c51c064c77 100644
--- a/arch/x86/um/Makefile
+++ b/arch/x86/um/Makefile
@@ -21,14 +21,12 @@ obj-y += checksum_32.o syscalls_32.o
obj-$(CONFIG_ELF_CORE) += elfcore.o
subarch-y = ../lib/string_32.o ../lib/atomic64_32.o ../lib/atomic64_cx8_32.o
-subarch-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += ../lib/rwsem.o
else
obj-y += syscalls_64.o vdso/
-subarch-y = ../lib/csum-partial_64.o ../lib/memcpy_64.o ../entry/thunk_64.o \
- ../lib/rwsem.o
+subarch-y = ../lib/csum-partial_64.o ../lib/memcpy_64.o ../entry/thunk_64.o
endif
diff --git a/arch/x86/um/vdso/Makefile b/arch/x86/um/vdso/Makefile
index bf94060fc06f..0caddd6acb22 100644
--- a/arch/x86/um/vdso/Makefile
+++ b/arch/x86/um/vdso/Makefile
@@ -62,7 +62,7 @@ quiet_cmd_vdso = VDSO $@
-Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+VDSO_LDFLAGS = -fPIC -shared -Wl,--hash-style=sysv
GCOV_PROFILE := n
#
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 856a85814f00..beb44e22afdf 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -2114,10 +2114,10 @@ void __init xen_relocate_p2m(void)
pt = early_memremap(pt_phys, PAGE_SIZE);
clear_page(pt);
for (idx_pte = 0;
- idx_pte < min(n_pte, PTRS_PER_PTE);
- idx_pte++) {
- set_pte(pt + idx_pte,
- pfn_pte(p2m_pfn, PAGE_KERNEL));
+ idx_pte < min(n_pte, PTRS_PER_PTE);
+ idx_pte++) {
+ pt[idx_pte] = pfn_pte(p2m_pfn,
+ PAGE_KERNEL);
p2m_pfn++;
}
n_pte -= PTRS_PER_PTE;
@@ -2125,8 +2125,7 @@ void __init xen_relocate_p2m(void)
make_lowmem_page_readonly(__va(pt_phys));
pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
PFN_DOWN(pt_phys));
- set_pmd(pmd + idx_pt,
- __pmd(_PAGE_TABLE | pt_phys));
+ pmd[idx_pt] = __pmd(_PAGE_TABLE | pt_phys);
pt_phys += PAGE_SIZE;
}
n_pt -= PTRS_PER_PMD;
@@ -2134,7 +2133,7 @@ void __init xen_relocate_p2m(void)
make_lowmem_page_readonly(__va(pmd_phys));
pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
PFN_DOWN(pmd_phys));
- set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys));
+ pud[idx_pmd] = __pud(_PAGE_TABLE | pmd_phys);
pmd_phys += PAGE_SIZE;
}
n_pmd -= PTRS_PER_PUD;
@@ -2319,8 +2318,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
#elif defined(CONFIG_X86_VSYSCALL_EMULATION)
case VSYSCALL_PAGE:
#endif
- case FIX_TEXT_POKE0:
- case FIX_TEXT_POKE1:
/* All local page mappings */
pte = pfn_pte(phys, prot);
break;
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 055e37e43541..95ce9b5be411 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -181,8 +181,15 @@ static void p2m_init_identity(unsigned long *p2m, unsigned long pfn)
static void * __ref alloc_p2m_page(void)
{
- if (unlikely(!slab_is_available()))
- return memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (unlikely(!slab_is_available())) {
+ void *ptr = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+
+ if (!ptr)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
+
+ return ptr;
+ }
return (void *)__get_free_page(GFP_KERNEL);
}
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index d5f303c0e656..548d1e0a5ba1 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -12,6 +12,7 @@
#include <linux/memblock.h>
#include <linux/cpuidle.h>
#include <linux/cpufreq.h>
+#include <linux/memory_hotplug.h>
#include <asm/elf.h>
#include <asm/vdso.h>
@@ -589,6 +590,14 @@ static void __init xen_align_and_add_e820_region(phys_addr_t start,
if (type == E820_TYPE_RAM) {
start = PAGE_ALIGN(start);
end &= ~((phys_addr_t)PAGE_SIZE - 1);
+#ifdef CONFIG_MEMORY_HOTPLUG
+ /*
+ * Don't allow adding memory not in E820 map while booting the
+ * system. Once the balloon driver is up it will remove that
+ * restriction again.
+ */
+ max_mem_size = end;
+#endif
}
e820__range_add(start, end - start, type);
@@ -748,6 +757,10 @@ char * __init xen_memory_setup(void)
memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries);
set_xen_guest_handle(memmap.buffer, xen_e820_table.entries);
+#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_XEN_BALLOON)
+ xen_saved_max_mem_size = max_mem_size;
+#endif
+
op = xen_initial_domain() ?
XENMEM_machine_memory_map :
XENMEM_memory_map;
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index 145506f9fdbe..590fcf863006 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -361,7 +361,9 @@ static int xen_pv_cpu_up(unsigned int cpu, struct task_struct *idle)
{
int rc;
- common_cpu_up(cpu, idle);
+ rc = common_cpu_up(cpu, idle);
+ if (rc)
+ return rc;
xen_setup_runstate_info(cpu);
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
index 5077ead5e59c..c1d8b90aa4e2 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -40,13 +40,13 @@ ENTRY(startup_xen)
#ifdef CONFIG_X86_64
/* Set up %gs.
*
- * The base of %gs always points to the bottom of the irqstack
- * union. If the stack protector canary is enabled, it is
- * located at %gs:40. Note that, on SMP, the boot cpu uses
- * init data section till per cpu areas are set up.
+ * The base of %gs always points to fixed_percpu_data. If the
+ * stack protector canary is enabled, it is located at %gs:40.
+ * Note that, on SMP, the boot cpu uses init data section until
+ * the per cpu areas are set up.
*/
movl $MSR_GS_BASE,%ecx
- movq $INIT_PER_CPU_VAR(irq_stack_union),%rax
+ movq $INIT_PER_CPU_VAR(fixed_percpu_data),%rax
cdq
wrmsr
#endif
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index bacf87ee7975..35c8d91e6106 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -46,9 +46,6 @@ config XTENSA
with reasonable minimum requirements. The Xtensa Linux project has
a home page at <http://www.linux-xtensa.org/>.
-config RWSEM_XCHGADD_ALGORITHM
- def_bool y
-
config GENERIC_HWEIGHT
def_bool y
@@ -450,7 +447,6 @@ config USE_OF
bool "Flattened Device Tree support"
select OF
select OF_EARLY_FLATTREE
- select OF_RESERVED_MEM
help
Include support for flattened device tree machine descriptions.
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index d939e13e8d84..35f83c4bf239 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -15,17 +15,17 @@ generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += kprobes.h
-generic-y += linkage.h
+generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += param.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += qrwlock.h
generic-y += qspinlock.h
-generic-y += rwsem.h
generic-y += sections.h
generic-y += socket.h
generic-y += topology.h
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index f7dd895b2353..0c14018d1c26 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -187,15 +187,18 @@ struct thread_struct {
/* Clearing a0 terminates the backtrace. */
#define start_thread(regs, new_pc, new_sp) \
- memset(regs, 0, sizeof(*regs)); \
- regs->pc = new_pc; \
- regs->ps = USER_PS_VALUE; \
- regs->areg[1] = new_sp; \
- regs->areg[0] = 0; \
- regs->wmask = 1; \
- regs->depc = 0; \
- regs->windowbase = 0; \
- regs->windowstart = 1;
+ do { \
+ memset((regs), 0, sizeof(*(regs))); \
+ (regs)->pc = (new_pc); \
+ (regs)->ps = USER_PS_VALUE; \
+ (regs)->areg[1] = (new_sp); \
+ (regs)->areg[0] = 0; \
+ (regs)->wmask = 1; \
+ (regs)->depc = 0; \
+ (regs)->windowbase = 0; \
+ (regs)->windowstart = 1; \
+ (regs)->syscall = NO_SYSCALL; \
+ } while (0)
/* Forward declaration */
struct task_struct;
diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h
index a168bf81c7f4..91dc06d58060 100644
--- a/arch/xtensa/include/asm/syscall.h
+++ b/arch/xtensa/include/asm/syscall.h
@@ -59,45 +59,24 @@ static inline void syscall_set_return_value(struct task_struct *task,
static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
unsigned long *args)
{
static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS;
- unsigned int j;
+ unsigned int i;
- if (n == 0)
- return;
-
- WARN_ON_ONCE(i + n > SYSCALL_MAX_ARGS);
-
- for (j = 0; j < n; ++j) {
- if (i + j < SYSCALL_MAX_ARGS)
- args[j] = regs->areg[reg[i + j]];
- else
- args[j] = 0;
- }
+ for (i = 0; i < 6; ++i)
+ args[i] = regs->areg[reg[i]];
}
static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs,
- unsigned int i, unsigned int n,
const unsigned long *args)
{
static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS;
- unsigned int j;
-
- if (n == 0)
- return;
-
- if (WARN_ON_ONCE(i + n > SYSCALL_MAX_ARGS)) {
- if (i < SYSCALL_MAX_ARGS)
- n = SYSCALL_MAX_ARGS - i;
- else
- return;
- }
+ unsigned int i;
- for (j = 0; j < n; ++j)
- regs->areg[reg[i + j]] = args[j];
+ for (i = 0; i < 6; ++i)
+ regs->areg[reg[i]] = args[i];
}
asmlinkage long xtensa_rt_sigreturn(struct pt_regs*);
diff --git a/arch/xtensa/include/asm/tlb.h b/arch/xtensa/include/asm/tlb.h
index 0d766f9c1083..50889935138a 100644
--- a/arch/xtensa/include/asm/tlb.h
+++ b/arch/xtensa/include/asm/tlb.h
@@ -14,32 +14,6 @@
#include <asm/cache.h>
#include <asm/page.h>
-#if (DCACHE_WAY_SIZE <= PAGE_SIZE)
-
-/* Note, read http://lkml.org/lkml/2004/1/15/6 */
-
-# define tlb_start_vma(tlb,vma) do { } while (0)
-# define tlb_end_vma(tlb,vma) do { } while (0)
-
-#else
-
-# define tlb_start_vma(tlb, vma) \
- do { \
- if (!tlb->fullmm) \
- flush_cache_range(vma, vma->vm_start, vma->vm_end); \
- } while(0)
-
-# define tlb_end_vma(tlb, vma) \
- do { \
- if (!tlb->fullmm) \
- flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
- } while(0)
-
-#endif
-
-#define __tlb_remove_tlb_entry(tlb,pte,addr) do { } while (0)
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
#include <asm-generic/tlb.h>
#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)
diff --git a/arch/xtensa/include/uapi/asm/Kbuild b/arch/xtensa/include/uapi/asm/Kbuild
index 6b43e5049ff7..7417847dc438 100644
--- a/arch/xtensa/include/uapi/asm/Kbuild
+++ b/arch/xtensa/include/uapi/asm/Kbuild
@@ -1,5 +1 @@
-include include/uapi/asm-generic/Kbuild.asm
-
generated-y += unistd_32.h
-generic-y += kvm_para.h
-generic-y += socket.h
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index e50f5124dc6f..e54af8b7e0f8 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -1860,6 +1860,8 @@ ENTRY(system_call)
l32i a7, a2, PT_SYSCALL
1:
+ s32i a7, a1, 4
+
/* syscall = sys_call_table[syscall_nr] */
movi a4, sys_call_table
@@ -1893,8 +1895,12 @@ ENTRY(system_call)
retw
1:
+ l32i a4, a1, 4
+ l32i a3, a2, PT_SYSCALL
+ s32i a4, a2, PT_SYSCALL
mov a6, a2
call4 do_syscall_trace_leave
+ s32i a3, a2, PT_SYSCALL
retw
ENDPROC(system_call)
diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c
index 174c11f13bba..b9f82510c650 100644
--- a/arch/xtensa/kernel/stacktrace.c
+++ b/arch/xtensa/kernel/stacktrace.c
@@ -253,10 +253,14 @@ static int return_address_cb(struct stackframe *frame, void *data)
return 1;
}
+/*
+ * level == 0 is for the return address from the caller of this function,
+ * not from this function itself.
+ */
unsigned long return_address(unsigned level)
{
struct return_addr_data r = {
- .skip = level + 1,
+ .skip = level,
};
walk_stackframe(stack_pointer(NULL), return_address_cb, &r);
return r.addr;
diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl
index 6af49929de85..30084eaf8422 100644
--- a/arch/xtensa/kernel/syscalls/syscall.tbl
+++ b/arch/xtensa/kernel/syscalls/syscall.tbl
@@ -394,3 +394,7 @@
421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait
422 common futex_time64 sys_futex
423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval
+424 common pidfd_send_signal sys_pidfd_send_signal
+425 common io_uring_setup sys_io_uring_setup
+426 common io_uring_enter sys_io_uring_enter
+427 common io_uring_register sys_io_uring_register
diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c
index 1734cda6bc4a..af7152560bc3 100644
--- a/arch/xtensa/mm/kasan_init.c
+++ b/arch/xtensa/mm/kasan_init.c
@@ -45,6 +45,10 @@ static void __init populate(void *start, void *end)
pmd_t *pmd = pmd_offset(pgd, vaddr);
pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, n_pages * sizeof(pte_t), PAGE_SIZE);
+
pr_debug("%s: %p - %p\n", __func__, start, end);
for (i = j = 0; i < n_pmds; ++i) {
@@ -52,8 +56,10 @@ static void __init populate(void *start, void *end)
for (k = 0; k < PTRS_PER_PTE; ++k, ++j) {
phys_addr_t phys =
- memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
- MEMBLOCK_ALLOC_ANYWHERE);
+ memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
+
+ if (!phys)
+ panic("Failed to allocate page table page\n");
set_pte(pte + j, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
}
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index a4dcfd39bc5c..03678c4afc39 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -32,6 +32,9 @@ static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
__func__, vaddr, n_pages);
pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
+ __func__, n_pages * sizeof(pte_t), PAGE_SIZE);
for (i = 0; i < n_pages; ++i)
pte_clear(NULL, 0, pte + i);
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 4c592496a16a..5ba1e0d841b4 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -674,7 +674,7 @@ static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
* at least two nodes.
*/
return !(varied_queue_weights || multiple_classes_busy
-#ifdef BFQ_GROUP_IOSCHED_ENABLED
+#ifdef CONFIG_BFQ_GROUP_IOSCHED
|| bfqd->num_groups_with_pending_reqs > 0
#endif
);
@@ -2822,7 +2822,7 @@ static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
bfq_remove_request(q, rq);
}
-static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+static bool __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
{
/*
* If this bfqq is shared between multiple processes, check
@@ -2855,9 +2855,11 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
/*
* All in-service entities must have been properly deactivated
* or requeued before executing the next function, which
- * resets all in-service entites as no more in service.
+ * resets all in-service entities as no more in service. This
+ * may cause bfqq to be freed. If this happens, the next
+ * function returns true.
*/
- __bfq_bfqd_reset_in_service(bfqd);
+ return __bfq_bfqd_reset_in_service(bfqd);
}
/**
@@ -3262,7 +3264,6 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
bool slow;
unsigned long delta = 0;
struct bfq_entity *entity = &bfqq->entity;
- int ref;
/*
* Check whether the process is slow (see bfq_bfqq_is_slow).
@@ -3347,10 +3348,8 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
* reason.
*/
__bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
- ref = bfqq->ref;
- __bfq_bfqq_expire(bfqd, bfqq);
-
- if (ref == 1) /* bfqq is gone, no more actions on it */
+ if (__bfq_bfqq_expire(bfqd, bfqq))
+ /* bfqq is gone, no more actions on it */
return;
bfqq->injected_service = 0;
@@ -5397,7 +5396,7 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
return min_shallow;
}
-static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
+static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx)
{
struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
struct blk_mq_tags *tags = hctx->sched_tags;
@@ -5405,6 +5404,11 @@ static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
min_shallow = bfq_update_depths(bfqd, &tags->bitmap_tags);
sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, min_shallow);
+}
+
+static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
+{
+ bfq_depth_updated(hctx);
return 0;
}
@@ -5827,6 +5831,7 @@ static struct elevator_type iosched_bfq_mq = {
.requests_merged = bfq_requests_merged,
.request_merged = bfq_request_merged,
.has_work = bfq_has_work,
+ .depth_updated = bfq_depth_updated,
.init_hctx = bfq_init_hctx,
.init_sched = bfq_init_queue,
.exit_sched = bfq_exit_queue,
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index 062e1c4787f4..86394e503ca9 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -995,7 +995,7 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity,
bool ins_into_idle_tree);
bool next_queue_may_preempt(struct bfq_data *bfqd);
struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd);
-void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd);
+bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd);
void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bool ins_into_idle_tree, bool expiration);
void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
index 63311d1ff1ed..ae4d000ac0af 100644
--- a/block/bfq-wf2q.c
+++ b/block/bfq-wf2q.c
@@ -1012,7 +1012,7 @@ static void __bfq_activate_entity(struct bfq_entity *entity,
entity->on_st = true;
}
-#ifdef BFQ_GROUP_IOSCHED_ENABLED
+#ifdef CONFIG_BFQ_GROUP_IOSCHED
if (!bfq_entity_to_bfqq(entity)) { /* bfq_group */
struct bfq_group *bfqg =
container_of(entity, struct bfq_group, entity);
@@ -1605,7 +1605,8 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
return bfqq;
}
-void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
+/* returns true if the in-service queue gets freed */
+bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
{
struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue;
struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity;
@@ -1629,8 +1630,20 @@ void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
* service tree either, then release the service reference to
* the queue it represents (taken with bfq_get_entity).
*/
- if (!in_serv_entity->on_st)
+ if (!in_serv_entity->on_st) {
+ /*
+ * If no process is referencing in_serv_bfqq any
+ * longer, then the service reference may be the only
+ * reference to the queue. If this is the case, then
+ * bfqq gets freed here.
+ */
+ int ref = in_serv_bfqq->ref;
bfq_put_queue(in_serv_bfqq);
+ if (ref == 1)
+ return true;
+ }
+
+ return false;
}
void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
diff --git a/block/bio.c b/block/bio.c
index 71a78d9fb8b7..716510ecd7ff 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -849,20 +849,14 @@ static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter)
size = bio_add_page(bio, bv->bv_page, len,
bv->bv_offset + iter->iov_offset);
if (size == len) {
- struct page *page;
- int i;
+ if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
+ struct page *page;
+ int i;
+
+ mp_bvec_for_each_page(page, bv, i)
+ get_page(page);
+ }
- /*
- * For the normal O_DIRECT case, we could skip grabbing this
- * reference and then not have to put them again when IO
- * completes. But this breaks some in-kernel users, like
- * splicing to/from a loop device, where we release the pipe
- * pages unconditionally. If we can fix that case, we can
- * get rid of the get here and the need to call
- * bio_release_pages() at IO completion time.
- */
- mp_bvec_for_each_page(page, bv, i)
- get_page(page);
iov_iter_advance(iter, size);
return 0;
}
@@ -925,10 +919,12 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
* This takes either an iterator pointing to user memory, or one pointing to
* kernel pages (BVEC iterator). If we're adding user pages, we pin them and
* map them into the kernel. On IO completion, the caller should put those
- * pages. For now, when adding kernel pages, we still grab a reference to the
- * page. This isn't strictly needed for the common case, but some call paths
- * end up releasing pages from eg a pipe and we can't easily control these.
- * See comment in __bio_iov_bvec_add_pages().
+ * pages. If we're adding kernel pages, and the caller told us it's safe to
+ * do so, we just have to add the pages to the bio directly. We don't grab an
+ * extra reference to those pages (the user should already have that), and we
+ * don't put the page on IO completion. The caller needs to check if the bio is
+ * flagged BIO_NO_PAGE_REF on IO completion. If it isn't, then pages should be
+ * released.
*
* The function tries, but does not guarantee, to pin as many pages as
* fit into the bio, or are requested in *iter, whatever is smaller. If
@@ -940,6 +936,13 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
const bool is_bvec = iov_iter_is_bvec(iter);
unsigned short orig_vcnt = bio->bi_vcnt;
+ /*
+ * If this is a BVEC iter, then the pages are kernel pages. Don't
+ * release them on IO completion, if the caller asked us to.
+ */
+ if (is_bvec && iov_iter_bvec_no_ref(iter))
+ bio_set_flag(bio, BIO_NO_PAGE_REF);
+
do {
int ret;
@@ -1295,8 +1298,11 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
}
}
- if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
+ if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) {
+ if (!map_data)
+ __free_page(page);
break;
+ }
len -= bytes;
offset = 0;
@@ -1696,7 +1702,8 @@ static void bio_dirty_fn(struct work_struct *work)
next = bio->bi_private;
bio_set_pages_dirty(bio);
- bio_release_pages(bio);
+ if (!bio_flagged(bio, BIO_NO_PAGE_REF))
+ bio_release_pages(bio);
bio_put(bio);
}
}
@@ -1713,7 +1720,8 @@ void bio_check_pages_dirty(struct bio *bio)
goto defer;
}
- bio_release_pages(bio);
+ if (!bio_flagged(bio, BIO_NO_PAGE_REF))
+ bio_release_pages(bio);
bio_put(bio);
return;
defer:
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 77f37ef8ef06..617a2b3f7582 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1736,8 +1736,8 @@ out:
/**
* blkcg_schedule_throttle - this task needs to check for throttling
- * @q - the request queue IO was submitted on
- * @use_memdelay - do we charge this to memory delay for PSI
+ * @q: the request queue IO was submitted on
+ * @use_memdelay: do we charge this to memory delay for PSI
*
* This is called by the IO controller when we know there's delay accumulated
* for the blkg for this task. We do not pass the blkg because there are places
@@ -1769,8 +1769,9 @@ void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
/**
* blkcg_add_delay - add delay to this blkg
- * @now - the current time in nanoseconds
- * @delta - how many nanoseconds of delay to add
+ * @blkg: blkg of interest
+ * @now: the current time in nanoseconds
+ * @delta: how many nanoseconds of delay to add
*
* Charge @delta to the blkg's current delay accumulation. This is used to
* throttle tasks if an IO controller thinks we need more throttling.
diff --git a/block/blk-core.c b/block/blk-core.c
index 6b78ec56a4f2..a55389ba8779 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -500,8 +500,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
if (!q->stats)
goto fail_stats;
- q->backing_dev_info->ra_pages =
- (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
+ q->backing_dev_info->ra_pages = VM_READAHEAD_PAGES;
q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
q->backing_dev_info->name = "block";
q->node = node_id;
@@ -1246,8 +1245,6 @@ static int blk_cloned_rq_check_limits(struct request_queue *q,
*/
blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
{
- blk_qc_t unused;
-
if (blk_cloned_rq_check_limits(q, rq))
return BLK_STS_IOERR;
@@ -1263,7 +1260,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
* bypass a potential scheduler on the bottom device for
* insert.
*/
- return blk_mq_try_issue_directly(rq->mq_hctx, rq, &unused, true, true);
+ return blk_mq_request_issue_directly(rq, true);
}
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 6e0f2d97fc6d..d95f94892015 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -220,7 +220,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
flush_rq->tag = -1;
} else {
- blk_mq_put_driver_tag_hctx(hctx, flush_rq);
+ blk_mq_put_driver_tag(flush_rq);
flush_rq->internal_tag = -1;
}
@@ -324,7 +324,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
if (q->elevator) {
WARN_ON(rq->tag < 0);
- blk_mq_put_driver_tag_hctx(hctx, rq);
+ blk_mq_put_driver_tag(rq);
}
/*
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index 2620baa1f699..507212d75ee2 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -75,6 +75,7 @@
#include <linux/blk-mq.h>
#include "blk-rq-qos.h"
#include "blk-stat.h"
+#include "blk.h"
#define DEFAULT_SCALE_COOKIE 1000000U
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 22467f475ab4..1c9d4f0f96ea 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -180,7 +180,7 @@ static unsigned get_max_segment_size(struct request_queue *q,
*/
static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv,
unsigned *nsegs, unsigned *last_seg_size,
- unsigned *front_seg_size, unsigned *sectors)
+ unsigned *front_seg_size, unsigned *sectors, unsigned max_segs)
{
unsigned len = bv->bv_len;
unsigned total_len = 0;
@@ -190,7 +190,7 @@ static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv,
* Multi-page bvec may be too big to hold in one segment, so the
* current bvec has to be splitted as multiple segments.
*/
- while (len && new_nsegs + *nsegs < queue_max_segments(q)) {
+ while (len && new_nsegs + *nsegs < max_segs) {
seg_size = get_max_segment_size(q, bv->bv_offset + total_len);
seg_size = min(seg_size, len);
@@ -240,6 +240,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
bool do_split = true;
struct bio *new = NULL;
const unsigned max_sectors = get_max_io_size(q, bio);
+ const unsigned max_segs = queue_max_segments(q);
bio_for_each_bvec(bv, bio, iter) {
/*
@@ -254,14 +255,14 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
* Consider this a new segment if we're splitting in
* the middle of this vector.
*/
- if (nsegs < queue_max_segments(q) &&
+ if (nsegs < max_segs &&
sectors < max_sectors) {
/* split in the middle of bvec */
bv.bv_len = (max_sectors - sectors) << 9;
bvec_split_segs(q, &bv, &nsegs,
&seg_size,
&front_seg_size,
- &sectors);
+ &sectors, max_segs);
}
goto split;
}
@@ -283,7 +284,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
continue;
}
new_segment:
- if (nsegs == queue_max_segments(q))
+ if (nsegs == max_segs)
goto split;
bvprv = bv;
@@ -296,7 +297,7 @@ new_segment:
if (nsegs == 1 && seg_size > front_seg_size)
front_seg_size = seg_size;
} else if (bvec_split_segs(q, &bv, &nsegs, &seg_size,
- &front_seg_size, &sectors)) {
+ &front_seg_size, &sectors, max_segs)) {
goto split;
}
}
@@ -415,7 +416,7 @@ new_segment:
bvprv = bv;
prev = 1;
bvec_split_segs(q, &bv, &nr_phys_segs, &seg_size,
- &front_seg_size, NULL);
+ &front_seg_size, NULL, UINT_MAX);
}
bbio = bio;
}
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index bac34b72b33b..ec1d18cb643c 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -115,7 +115,6 @@ static int queue_pm_only_show(void *data, struct seq_file *m)
static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(STOPPED),
QUEUE_FLAG_NAME(DYING),
- QUEUE_FLAG_NAME(BIDI),
QUEUE_FLAG_NAME(NOMERGES),
QUEUE_FLAG_NAME(SAME_COMP),
QUEUE_FLAG_NAME(FAIL_IO),
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 40905539afed..aa6bc5c02643 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -423,10 +423,12 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
* busy in case of 'none' scheduler, and this way may save
* us one extra enqueue & dequeue to sw queue.
*/
- if (!hctx->dispatch_busy && !e && !run_queue_async)
+ if (!hctx->dispatch_busy && !e && !run_queue_async) {
blk_mq_try_issue_list_directly(hctx, list);
- else
- blk_mq_insert_requests(hctx, ctx, list);
+ if (list_empty(list))
+ return;
+ }
+ blk_mq_insert_requests(hctx, ctx, list);
}
blk_mq_run_hw_queue(hctx, run_queue_async);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 4e502db8b10c..fc60ed7e940e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -59,7 +59,8 @@ static int blk_mq_poll_stats_bkt(const struct request *rq)
}
/*
- * Check if any of the ctx's have pending work in this hardware queue
+ * Check if any of the ctx, dispatch list or elevator
+ * have pending work in this hardware queue.
*/
static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
{
@@ -331,7 +332,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
#if defined(CONFIG_BLK_DEV_INTEGRITY)
rq->nr_integrity_segments = 0;
#endif
- rq->special = NULL;
/* tag was already set */
rq->extra_len = 0;
WRITE_ONCE(rq->deadline, 0);
@@ -340,7 +340,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
rq->end_io = NULL;
rq->end_io_data = NULL;
- rq->next_rq = NULL;
data->ctx->rq_dispatched[op_is_sync(op)]++;
refcount_set(&rq->ref, 1);
@@ -550,8 +549,6 @@ inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
rq_qos_done(rq->q, rq);
rq->end_io(rq, error);
} else {
- if (unlikely(blk_bidi_rq(rq)))
- blk_mq_free_request(rq->next_rq);
blk_mq_free_request(rq);
}
}
@@ -657,6 +654,13 @@ bool blk_mq_complete_request(struct request *rq)
}
EXPORT_SYMBOL(blk_mq_complete_request);
+void blk_mq_complete_request_sync(struct request *rq)
+{
+ WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
+ rq->q->mq_ops->complete(rq);
+}
+EXPORT_SYMBOL_GPL(blk_mq_complete_request_sync);
+
int blk_mq_request_started(struct request *rq)
{
return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
@@ -786,7 +790,6 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
if (kick_requeue_list)
blk_mq_kick_requeue_list(q);
}
-EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
void blk_mq_kick_requeue_list(struct request_queue *q)
{
@@ -1076,7 +1079,13 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
spin_lock(&hctx->dispatch_wait_lock);
- list_del_init(&wait->entry);
+ if (!list_empty(&wait->entry)) {
+ struct sbitmap_queue *sbq;
+
+ list_del_init(&wait->entry);
+ sbq = &hctx->tags->bitmap_tags;
+ atomic_dec(&sbq->ws_active);
+ }
spin_unlock(&hctx->dispatch_wait_lock);
blk_mq_run_hw_queue(hctx, true);
@@ -1092,13 +1101,13 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
struct request *rq)
{
+ struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
struct wait_queue_head *wq;
wait_queue_entry_t *wait;
bool ret;
if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) {
- if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
- set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+ blk_mq_sched_mark_restart_hctx(hctx);
/*
* It's possible that a tag was freed in the window between the
@@ -1115,7 +1124,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
if (!list_empty_careful(&wait->entry))
return false;
- wq = &bt_wait_ptr(&hctx->tags->bitmap_tags, hctx)->wait;
+ wq = &bt_wait_ptr(sbq, hctx)->wait;
spin_lock_irq(&wq->lock);
spin_lock(&hctx->dispatch_wait_lock);
@@ -1125,6 +1134,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
return false;
}
+ atomic_inc(&sbq->ws_active);
wait->flags &= ~WQ_FLAG_EXCLUSIVE;
__add_wait_queue(wq, wait);
@@ -1145,6 +1155,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
* someone else gets the wakeup.
*/
list_del_init(&wait->entry);
+ atomic_dec(&sbq->ws_active);
spin_unlock(&hctx->dispatch_wait_lock);
spin_unlock_irq(&wq->lock);
@@ -1707,11 +1718,12 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
unsigned int depth;
list_splice_init(&plug->mq_list, &list);
- plug->rq_count = 0;
if (plug->rq_count > 2 && plug->multiple_queues)
list_sort(NULL, &list, plug_rq_cmp);
+ plug->rq_count = 0;
+
this_q = NULL;
this_hctx = NULL;
this_ctx = NULL;
@@ -1796,74 +1808,76 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
return ret;
}
-blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
struct request *rq,
blk_qc_t *cookie,
- bool bypass, bool last)
+ bool bypass_insert, bool last)
{
struct request_queue *q = rq->q;
bool run_queue = true;
- blk_status_t ret = BLK_STS_RESOURCE;
- int srcu_idx;
- bool force = false;
- hctx_lock(hctx, &srcu_idx);
/*
- * hctx_lock is needed before checking quiesced flag.
+ * RCU or SRCU read lock is needed before checking quiesced flag.
*
- * When queue is stopped or quiesced, ignore 'bypass', insert
- * and return BLK_STS_OK to caller, and avoid driver to try to
- * dispatch again.
+ * When queue is stopped or quiesced, ignore 'bypass_insert' from
+ * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
+ * and avoid driver to try to dispatch again.
*/
- if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) {
+ if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
run_queue = false;
- bypass = false;
- goto out_unlock;
+ bypass_insert = false;
+ goto insert;
}
- if (unlikely(q->elevator && !bypass))
- goto out_unlock;
+ if (q->elevator && !bypass_insert)
+ goto insert;
if (!blk_mq_get_dispatch_budget(hctx))
- goto out_unlock;
+ goto insert;
if (!blk_mq_get_driver_tag(rq)) {
blk_mq_put_dispatch_budget(hctx);
- goto out_unlock;
+ goto insert;
}
- /*
- * Always add a request that has been through
- *.queue_rq() to the hardware dispatch list.
- */
- force = true;
- ret = __blk_mq_issue_directly(hctx, rq, cookie, last);
-out_unlock:
+ return __blk_mq_issue_directly(hctx, rq, cookie, last);
+insert:
+ if (bypass_insert)
+ return BLK_STS_RESOURCE;
+
+ blk_mq_request_bypass_insert(rq, run_queue);
+ return BLK_STS_OK;
+}
+
+static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+ struct request *rq, blk_qc_t *cookie)
+{
+ blk_status_t ret;
+ int srcu_idx;
+
+ might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
+
+ hctx_lock(hctx, &srcu_idx);
+
+ ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
+ if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
+ blk_mq_request_bypass_insert(rq, true);
+ else if (ret != BLK_STS_OK)
+ blk_mq_end_request(rq, ret);
+
+ hctx_unlock(hctx, srcu_idx);
+}
+
+blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
+{
+ blk_status_t ret;
+ int srcu_idx;
+ blk_qc_t unused_cookie;
+ struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
+
+ hctx_lock(hctx, &srcu_idx);
+ ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
hctx_unlock(hctx, srcu_idx);
- switch (ret) {
- case BLK_STS_OK:
- break;
- case BLK_STS_DEV_RESOURCE:
- case BLK_STS_RESOURCE:
- if (force) {
- blk_mq_request_bypass_insert(rq, run_queue);
- /*
- * We have to return BLK_STS_OK for the DM
- * to avoid livelock. Otherwise, we return
- * the real result to indicate whether the
- * request is direct-issued successfully.
- */
- ret = bypass ? BLK_STS_OK : ret;
- } else if (!bypass) {
- blk_mq_sched_insert_request(rq, false,
- run_queue, false);
- }
- break;
- default:
- if (!bypass)
- blk_mq_end_request(rq, ret);
- break;
- }
return ret;
}
@@ -1871,20 +1885,22 @@ out_unlock:
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
struct list_head *list)
{
- blk_qc_t unused;
- blk_status_t ret = BLK_STS_OK;
-
while (!list_empty(list)) {
+ blk_status_t ret;
struct request *rq = list_first_entry(list, struct request,
queuelist);
list_del_init(&rq->queuelist);
- if (ret == BLK_STS_OK)
- ret = blk_mq_try_issue_directly(hctx, rq, &unused,
- false,
+ ret = blk_mq_request_issue_directly(rq, list_empty(list));
+ if (ret != BLK_STS_OK) {
+ if (ret == BLK_STS_RESOURCE ||
+ ret == BLK_STS_DEV_RESOURCE) {
+ blk_mq_request_bypass_insert(rq,
list_empty(list));
- else
- blk_mq_sched_insert_request(rq, false, true, false);
+ break;
+ }
+ blk_mq_end_request(rq, ret);
+ }
}
/*
@@ -1892,7 +1908,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
* the driver there was more coming, but that turned out to
* be a lie.
*/
- if (ret != BLK_STS_OK && hctx->queue->mq_ops->commit_rqs)
+ if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs)
hctx->queue->mq_ops->commit_rqs(hctx);
}
@@ -1999,19 +2015,21 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
plug->rq_count--;
}
blk_add_rq_to_plug(plug, rq);
+ trace_block_plug(q);
blk_mq_put_ctx(data.ctx);
if (same_queue_rq) {
data.hctx = same_queue_rq->mq_hctx;
+ trace_block_unplug(q, 1, true);
blk_mq_try_issue_directly(data.hctx, same_queue_rq,
- &cookie, false, true);
+ &cookie);
}
} else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
!data.hctx->dispatch_busy)) {
blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio);
- blk_mq_try_issue_directly(data.hctx, rq, &cookie, false, true);
+ blk_mq_try_issue_directly(data.hctx, rq, &cookie);
} else {
blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio);
@@ -2328,7 +2346,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
return 0;
free_fq:
- kfree(hctx->fq);
+ blk_free_flush_queue(hctx->fq);
exit_hctx:
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
@@ -2861,7 +2879,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
/*
* Default to classic polling
*/
- q->poll_nsec = -1;
+ q->poll_nsec = BLK_MQ_POLL_CLASSIC;
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
blk_mq_add_queue_tag_set(set, q);
@@ -3117,6 +3135,8 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
}
if (ret)
break;
+ if (q->elevator && q->elevator->type->ops.depth_updated)
+ q->elevator->type->ops.depth_updated(hctx);
}
if (!ret)
@@ -3396,7 +3416,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q,
{
struct request *rq;
- if (q->poll_nsec == -1)
+ if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
return false;
if (!blk_qc_t_is_internal(cookie))
diff --git a/block/blk-mq.h b/block/blk-mq.h
index c11353a3749d..423ea88ab6fb 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -41,6 +41,8 @@ void blk_mq_free_queue(struct request_queue *q);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
void blk_mq_wake_waiters(struct request_queue *q);
bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
+void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
+ bool kick_requeue_list);
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
bool blk_mq_get_driver_tag(struct request *rq);
struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
@@ -68,10 +70,8 @@ void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
struct list_head *list);
-blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
- struct request *rq,
- blk_qc_t *cookie,
- bool bypass, bool last);
+/* Used by blk_insert_cloned_request() to issue request directly */
+blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
struct list_head *list);
@@ -222,15 +222,6 @@ static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
}
}
-static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
- struct request *rq)
-{
- if (rq->tag == -1 || rq->internal_tag == -1)
- return;
-
- __blk_mq_put_driver_tag(hctx, rq);
-}
-
static inline void blk_mq_put_driver_tag(struct request *rq)
{
if (rq->tag == -1 || rq->internal_tag == -1)
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 59685918167e..422327089e0f 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -360,8 +360,8 @@ static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
{
int val;
- if (q->poll_nsec == -1)
- val = -1;
+ if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
+ val = BLK_MQ_POLL_CLASSIC;
else
val = q->poll_nsec / 1000;
@@ -380,10 +380,12 @@ static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
if (err < 0)
return err;
- if (val == -1)
- q->poll_nsec = -1;
- else
+ if (val == BLK_MQ_POLL_CLASSIC)
+ q->poll_nsec = BLK_MQ_POLL_CLASSIC;
+ else if (val >= 0)
q->poll_nsec = val * 1000;
+ else
+ return -EINVAL;
return count;
}
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index 192129856342..005e2b75d775 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -51,11 +51,40 @@ static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
fmode_t mode)
{
struct bsg_job *job = blk_mq_rq_to_pdu(rq);
+ int ret;
job->request_len = hdr->request_len;
job->request = memdup_user(uptr64(hdr->request), hdr->request_len);
+ if (IS_ERR(job->request))
+ return PTR_ERR(job->request);
+
+ if (hdr->dout_xfer_len && hdr->din_xfer_len) {
+ job->bidi_rq = blk_get_request(rq->q, REQ_OP_SCSI_IN, 0);
+ if (IS_ERR(job->bidi_rq)) {
+ ret = PTR_ERR(job->bidi_rq);
+ goto out;
+ }
+
+ ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL,
+ uptr64(hdr->din_xferp), hdr->din_xfer_len,
+ GFP_KERNEL);
+ if (ret)
+ goto out_free_bidi_rq;
+
+ job->bidi_bio = job->bidi_rq->bio;
+ } else {
+ job->bidi_rq = NULL;
+ job->bidi_bio = NULL;
+ }
- return PTR_ERR_OR_ZERO(job->request);
+ return 0;
+
+out_free_bidi_rq:
+ if (job->bidi_rq)
+ blk_put_request(job->bidi_rq);
+out:
+ kfree(job->request);
+ return ret;
}
static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
@@ -93,7 +122,7 @@ static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
/* we assume all request payload was transferred, residual == 0 */
hdr->dout_resid = 0;
- if (rq->next_rq) {
+ if (job->bidi_rq) {
unsigned int rsp_len = job->reply_payload.payload_len;
if (WARN_ON(job->reply_payload_rcv_len > rsp_len))
@@ -111,6 +140,11 @@ static void bsg_transport_free_rq(struct request *rq)
{
struct bsg_job *job = blk_mq_rq_to_pdu(rq);
+ if (job->bidi_rq) {
+ blk_rq_unmap_user(job->bidi_bio);
+ blk_put_request(job->bidi_rq);
+ }
+
kfree(job->request);
}
@@ -200,7 +234,6 @@ static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
*/
static bool bsg_prepare_job(struct device *dev, struct request *req)
{
- struct request *rsp = req->next_rq;
struct bsg_job *job = blk_mq_rq_to_pdu(req);
int ret;
@@ -211,8 +244,8 @@ static bool bsg_prepare_job(struct device *dev, struct request *req)
if (ret)
goto failjob_rls_job;
}
- if (rsp && rsp->bio) {
- ret = bsg_map_buffer(&job->reply_payload, rsp);
+ if (job->bidi_rq) {
+ ret = bsg_map_buffer(&job->reply_payload, job->bidi_rq);
if (ret)
goto failjob_rls_rqst_payload;
}
@@ -369,7 +402,6 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
}
q->queuedata = dev;
- blk_queue_flag_set(QUEUE_FLAG_BIDI, q);
blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
ret = bsg_register_queue(q, dev, name, &bsg_transport_ops);
diff --git a/block/bsg.c b/block/bsg.c
index 50e5f8f666f2..f306853c6b08 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -74,6 +74,11 @@ static int bsg_scsi_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
{
struct scsi_request *sreq = scsi_req(rq);
+ if (hdr->dout_xfer_len && hdr->din_xfer_len) {
+ pr_warn_once("BIDI support in bsg has been removed.\n");
+ return -EOPNOTSUPP;
+ }
+
sreq->cmd_len = hdr->request_len;
if (sreq->cmd_len > BLK_MAX_CDB) {
sreq->cmd = kzalloc(sreq->cmd_len, GFP_KERNEL);
@@ -114,14 +119,10 @@ static int bsg_scsi_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
hdr->response_len = len;
}
- if (rq->next_rq) {
- hdr->dout_resid = sreq->resid_len;
- hdr->din_resid = scsi_req(rq->next_rq)->resid_len;
- } else if (rq_data_dir(rq) == READ) {
+ if (rq_data_dir(rq) == READ)
hdr->din_resid = sreq->resid_len;
- } else {
+ else
hdr->dout_resid = sreq->resid_len;
- }
return ret;
}
@@ -138,32 +139,35 @@ static const struct bsg_ops bsg_scsi_ops = {
.free_rq = bsg_scsi_free_rq,
};
-static struct request *
-bsg_map_hdr(struct request_queue *q, struct sg_io_v4 *hdr, fmode_t mode)
+static int bsg_sg_io(struct request_queue *q, fmode_t mode, void __user *uarg)
{
- struct request *rq, *next_rq = NULL;
+ struct request *rq;
+ struct bio *bio;
+ struct sg_io_v4 hdr;
int ret;
- if (!q->bsg_dev.class_dev)
- return ERR_PTR(-ENXIO);
+ if (copy_from_user(&hdr, uarg, sizeof(hdr)))
+ return -EFAULT;
- if (hdr->guard != 'Q')
- return ERR_PTR(-EINVAL);
+ if (!q->bsg_dev.class_dev)
+ return -ENXIO;
- ret = q->bsg_dev.ops->check_proto(hdr);
+ if (hdr.guard != 'Q')
+ return -EINVAL;
+ ret = q->bsg_dev.ops->check_proto(&hdr);
if (ret)
- return ERR_PTR(ret);
+ return ret;
- rq = blk_get_request(q, hdr->dout_xfer_len ?
+ rq = blk_get_request(q, hdr.dout_xfer_len ?
REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
if (IS_ERR(rq))
- return rq;
+ return PTR_ERR(rq);
- ret = q->bsg_dev.ops->fill_hdr(rq, hdr, mode);
+ ret = q->bsg_dev.ops->fill_hdr(rq, &hdr, mode);
if (ret)
- goto out;
+ return ret;
- rq->timeout = msecs_to_jiffies(hdr->timeout);
+ rq->timeout = msecs_to_jiffies(hdr.timeout);
if (!rq->timeout)
rq->timeout = q->sg_timeout;
if (!rq->timeout)
@@ -171,68 +175,28 @@ bsg_map_hdr(struct request_queue *q, struct sg_io_v4 *hdr, fmode_t mode)
if (rq->timeout < BLK_MIN_SG_TIMEOUT)
rq->timeout = BLK_MIN_SG_TIMEOUT;
- if (hdr->dout_xfer_len && hdr->din_xfer_len) {
- if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
- ret = -EOPNOTSUPP;
- goto out;
- }
-
- pr_warn_once(
- "BIDI support in bsg has been deprecated and might be removed. "
- "Please report your use case to linux-scsi@vger.kernel.org\n");
-
- next_rq = blk_get_request(q, REQ_OP_SCSI_IN, 0);
- if (IS_ERR(next_rq)) {
- ret = PTR_ERR(next_rq);
- goto out;
- }
-
- rq->next_rq = next_rq;
- ret = blk_rq_map_user(q, next_rq, NULL, uptr64(hdr->din_xferp),
- hdr->din_xfer_len, GFP_KERNEL);
- if (ret)
- goto out_free_nextrq;
- }
-
- if (hdr->dout_xfer_len) {
- ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->dout_xferp),
- hdr->dout_xfer_len, GFP_KERNEL);
- } else if (hdr->din_xfer_len) {
- ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->din_xferp),
- hdr->din_xfer_len, GFP_KERNEL);
+ if (hdr.dout_xfer_len) {
+ ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr.dout_xferp),
+ hdr.dout_xfer_len, GFP_KERNEL);
+ } else if (hdr.din_xfer_len) {
+ ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr.din_xferp),
+ hdr.din_xfer_len, GFP_KERNEL);
}
if (ret)
- goto out_unmap_nextrq;
- return rq;
-
-out_unmap_nextrq:
- if (rq->next_rq)
- blk_rq_unmap_user(rq->next_rq->bio);
-out_free_nextrq:
- if (rq->next_rq)
- blk_put_request(rq->next_rq);
-out:
- q->bsg_dev.ops->free_rq(rq);
- blk_put_request(rq);
- return ERR_PTR(ret);
-}
+ goto out_free_rq;
-static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
- struct bio *bio, struct bio *bidi_bio)
-{
- int ret;
-
- ret = rq->q->bsg_dev.ops->complete_rq(rq, hdr);
-
- if (rq->next_rq) {
- blk_rq_unmap_user(bidi_bio);
- blk_put_request(rq->next_rq);
- }
+ bio = rq->bio;
+ blk_execute_rq(q, NULL, rq, !(hdr.flags & BSG_FLAG_Q_AT_TAIL));
+ ret = rq->q->bsg_dev.ops->complete_rq(rq, &hdr);
blk_rq_unmap_user(bio);
+
+out_free_rq:
rq->q->bsg_dev.ops->free_rq(rq);
blk_put_request(rq);
+ if (!ret && copy_to_user(uarg, &hdr, sizeof(hdr)))
+ return -EFAULT;
return ret;
}
@@ -367,31 +331,39 @@ static int bsg_release(struct inode *inode, struct file *file)
return bsg_put_device(bd);
}
+static int bsg_get_command_q(struct bsg_device *bd, int __user *uarg)
+{
+ return put_user(bd->max_queue, uarg);
+}
+
+static int bsg_set_command_q(struct bsg_device *bd, int __user *uarg)
+{
+ int queue;
+
+ if (get_user(queue, uarg))
+ return -EFAULT;
+ if (queue < 1)
+ return -EINVAL;
+
+ spin_lock_irq(&bd->lock);
+ bd->max_queue = queue;
+ spin_unlock_irq(&bd->lock);
+ return 0;
+}
+
static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct bsg_device *bd = file->private_data;
- int __user *uarg = (int __user *) arg;
- int ret;
+ void __user *uarg = (void __user *) arg;
switch (cmd) {
- /*
- * our own ioctls
- */
+ /*
+ * Our own ioctls
+ */
case SG_GET_COMMAND_Q:
- return put_user(bd->max_queue, uarg);
- case SG_SET_COMMAND_Q: {
- int queue;
-
- if (get_user(queue, uarg))
- return -EFAULT;
- if (queue < 1)
- return -EINVAL;
-
- spin_lock_irq(&bd->lock);
- bd->max_queue = queue;
- spin_unlock_irq(&bd->lock);
- return 0;
- }
+ return bsg_get_command_q(bd, uarg);
+ case SG_SET_COMMAND_Q:
+ return bsg_set_command_q(bd, uarg);
/*
* SCSI/sg ioctls
@@ -404,36 +376,10 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case SG_GET_RESERVED_SIZE:
case SG_SET_RESERVED_SIZE:
case SG_EMULATED_HOST:
- case SCSI_IOCTL_SEND_COMMAND: {
- void __user *uarg = (void __user *) arg;
+ case SCSI_IOCTL_SEND_COMMAND:
return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg);
- }
- case SG_IO: {
- struct request *rq;
- struct bio *bio, *bidi_bio = NULL;
- struct sg_io_v4 hdr;
- int at_head;
-
- if (copy_from_user(&hdr, uarg, sizeof(hdr)))
- return -EFAULT;
-
- rq = bsg_map_hdr(bd->queue, &hdr, file->f_mode);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- bio = rq->bio;
- if (rq->next_rq)
- bidi_bio = rq->next_rq->bio;
-
- at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL));
- blk_execute_rq(bd->queue, NULL, rq, at_head);
- ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
-
- if (copy_to_user(uarg, &hdr, sizeof(hdr)))
- return -EFAULT;
-
- return ret;
- }
+ case SG_IO:
+ return bsg_sg_io(bd->queue, file->f_mode, uarg);
default:
return -ENOTTY;
}
diff --git a/certs/system_keyring.c b/certs/system_keyring.c
index 81728717523d..c05c29ae4d5d 100644
--- a/certs/system_keyring.c
+++ b/certs/system_keyring.c
@@ -24,6 +24,9 @@ static struct key *builtin_trusted_keys;
#ifdef CONFIG_SECONDARY_TRUSTED_KEYRING
static struct key *secondary_trusted_keys;
#endif
+#ifdef CONFIG_INTEGRITY_PLATFORM_KEYRING
+static struct key *platform_trusted_keys;
+#endif
extern __initconst const u8 system_certificate_list[];
extern __initconst const unsigned long system_certificate_list_size;
@@ -237,11 +240,22 @@ int verify_pkcs7_signature(const void *data, size_t len,
#else
trusted_keys = builtin_trusted_keys;
#endif
+ } else if (trusted_keys == VERIFY_USE_PLATFORM_KEYRING) {
+#ifdef CONFIG_INTEGRITY_PLATFORM_KEYRING
+ trusted_keys = platform_trusted_keys;
+#else
+ trusted_keys = NULL;
+#endif
+ if (!trusted_keys) {
+ ret = -ENOKEY;
+ pr_devel("PKCS#7 platform keyring is not available\n");
+ goto error;
+ }
}
ret = pkcs7_validate_trust(pkcs7, trusted_keys);
if (ret < 0) {
if (ret == -ENOKEY)
- pr_err("PKCS#7 signature not signed with a trusted key\n");
+ pr_devel("PKCS#7 signature not signed with a trusted key\n");
goto error;
}
@@ -266,3 +280,10 @@ error:
EXPORT_SYMBOL_GPL(verify_pkcs7_signature);
#endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
+
+#ifdef CONFIG_INTEGRITY_PLATFORM_KEYRING
+void __init set_platform_trusted_keys(struct key *keyring)
+{
+ platform_trusted_keys = keyring;
+}
+#endif
diff --git a/crypto/842.c b/crypto/842.c
index bc26dc942821..5f98393b65d1 100644
--- a/crypto/842.c
+++ b/crypto/842.c
@@ -144,7 +144,7 @@ static int __init crypto842_mod_init(void)
return ret;
}
-module_init(crypto842_mod_init);
+subsys_initcall(crypto842_mod_init);
static void __exit crypto842_mod_exit(void)
{
diff --git a/crypto/Kconfig b/crypto/Kconfig
index bbab6bf33519..3d056e7da65f 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -27,8 +27,8 @@ config CRYPTO_FIPS
depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && !CRYPTO_MANAGER_DISABLE_TESTS
depends on (MODULE_SIG || !MODULES)
help
- This options enables the fips boot option which is
- required if you want to system to operate in a FIPS 200
+ This option enables the fips boot option which is
+ required if you want the system to operate in a FIPS 200
certification. You should say no unless you know what
this is.
@@ -113,29 +113,6 @@ config CRYPTO_ACOMP
select CRYPTO_ALGAPI
select CRYPTO_ACOMP2
-config CRYPTO_RSA
- tristate "RSA algorithm"
- select CRYPTO_AKCIPHER
- select CRYPTO_MANAGER
- select MPILIB
- select ASN1
- help
- Generic implementation of the RSA public key algorithm.
-
-config CRYPTO_DH
- tristate "Diffie-Hellman algorithm"
- select CRYPTO_KPP
- select MPILIB
- help
- Generic implementation of the Diffie-Hellman algorithm.
-
-config CRYPTO_ECDH
- tristate "ECDH algorithm"
- select CRYPTO_KPP
- select CRYPTO_RNG_DEFAULT
- help
- Generic implementation of the ECDH algorithm
-
config CRYPTO_MANAGER
tristate "Cryptographic algorithm manager"
select CRYPTO_MANAGER2
@@ -253,6 +230,48 @@ config CRYPTO_GLUE_HELPER_X86
config CRYPTO_ENGINE
tristate
+comment "Public-key cryptography"
+
+config CRYPTO_RSA
+ tristate "RSA algorithm"
+ select CRYPTO_AKCIPHER
+ select CRYPTO_MANAGER
+ select MPILIB
+ select ASN1
+ help
+ Generic implementation of the RSA public key algorithm.
+
+config CRYPTO_DH
+ tristate "Diffie-Hellman algorithm"
+ select CRYPTO_KPP
+ select MPILIB
+ help
+ Generic implementation of the Diffie-Hellman algorithm.
+
+config CRYPTO_ECC
+ tristate
+
+config CRYPTO_ECDH
+ tristate "ECDH algorithm"
+ select CRYPTO_ECC
+ select CRYPTO_KPP
+ select CRYPTO_RNG_DEFAULT
+ help
+ Generic implementation of the ECDH algorithm
+
+config CRYPTO_ECRDSA
+ tristate "EC-RDSA (GOST 34.10) algorithm"
+ select CRYPTO_ECC
+ select CRYPTO_AKCIPHER
+ select CRYPTO_STREEBOG
+ select OID_REGISTRY
+ select ASN1
+ help
+ Elliptic Curve Russian Digital Signature Algorithm (GOST R 34.10-2012,
+ RFC 7091, ISO/IEC 14888-3:2018) is one of the Russian cryptographic
+ standard algorithms (called GOST algorithms). Only signature verification
+ is implemented.
+
comment "Authenticated Encryption with Associated Data"
config CRYPTO_CCM
@@ -310,25 +329,25 @@ config CRYPTO_AEGIS128_AESNI_SSE2
tristate "AEGIS-128 AEAD algorithm (x86_64 AESNI+SSE2 implementation)"
depends on X86 && 64BIT
select CRYPTO_AEAD
- select CRYPTO_CRYPTD
+ select CRYPTO_SIMD
help
- AESNI+SSE2 implementation of the AEGSI-128 dedicated AEAD algorithm.
+ AESNI+SSE2 implementation of the AEGIS-128 dedicated AEAD algorithm.
config CRYPTO_AEGIS128L_AESNI_SSE2
tristate "AEGIS-128L AEAD algorithm (x86_64 AESNI+SSE2 implementation)"
depends on X86 && 64BIT
select CRYPTO_AEAD
- select CRYPTO_CRYPTD
+ select CRYPTO_SIMD
help
- AESNI+SSE2 implementation of the AEGSI-128L dedicated AEAD algorithm.
+ AESNI+SSE2 implementation of the AEGIS-128L dedicated AEAD algorithm.
config CRYPTO_AEGIS256_AESNI_SSE2
tristate "AEGIS-256 AEAD algorithm (x86_64 AESNI+SSE2 implementation)"
depends on X86 && 64BIT
select CRYPTO_AEAD
- select CRYPTO_CRYPTD
+ select CRYPTO_SIMD
help
- AESNI+SSE2 implementation of the AEGSI-256 dedicated AEAD algorithm.
+ AESNI+SSE2 implementation of the AEGIS-256 dedicated AEAD algorithm.
config CRYPTO_MORUS640
tristate "MORUS-640 AEAD algorithm"
@@ -340,7 +359,7 @@ config CRYPTO_MORUS640_GLUE
tristate
depends on X86
select CRYPTO_AEAD
- select CRYPTO_CRYPTD
+ select CRYPTO_SIMD
help
Common glue for SIMD optimizations of the MORUS-640 dedicated AEAD
algorithm.
@@ -363,7 +382,7 @@ config CRYPTO_MORUS1280_GLUE
tristate
depends on X86
select CRYPTO_AEAD
- select CRYPTO_CRYPTD
+ select CRYPTO_SIMD
help
Common glue for SIMD optimizations of the MORUS-1280 dedicated AEAD
algorithm.
diff --git a/crypto/Makefile b/crypto/Makefile
index fb5bf2a3a666..266a4cdbb9e2 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -147,12 +147,20 @@ obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o
obj-$(CONFIG_CRYPTO_USER_API_AEAD) += algif_aead.o
obj-$(CONFIG_CRYPTO_ZSTD) += zstd.o
obj-$(CONFIG_CRYPTO_OFB) += ofb.o
+obj-$(CONFIG_CRYPTO_ECC) += ecc.o
-ecdh_generic-y := ecc.o
ecdh_generic-y += ecdh.o
ecdh_generic-y += ecdh_helper.o
obj-$(CONFIG_CRYPTO_ECDH) += ecdh_generic.o
+$(obj)/ecrdsa_params.asn1.o: $(obj)/ecrdsa_params.asn1.c $(obj)/ecrdsa_params.asn1.h
+$(obj)/ecrdsa_pub_key.asn1.o: $(obj)/ecrdsa_pub_key.asn1.c $(obj)/ecrdsa_pub_key.asn1.h
+$(obj)/ecrdsa.o: $(obj)/ecrdsa_params.asn1.h $(obj)/ecrdsa_pub_key.asn1.h
+ecrdsa_generic-y += ecrdsa.o
+ecrdsa_generic-y += ecrdsa_params.asn1.o
+ecrdsa_generic-y += ecrdsa_pub_key.asn1.o
+obj-$(CONFIG_CRYPTO_ECRDSA) += ecrdsa_generic.o
+
#
# generic algorithms and the async_tx api
#
diff --git a/crypto/adiantum.c b/crypto/adiantum.c
index 5564e73266a6..395a3ddd3707 100644
--- a/crypto/adiantum.c
+++ b/crypto/adiantum.c
@@ -265,7 +265,6 @@ static int adiantum_hash_message(struct skcipher_request *req,
int err;
hash_desc->tfm = tctx->hash;
- hash_desc->flags = 0;
err = crypto_shash_init(hash_desc);
if (err)
@@ -659,7 +658,7 @@ static void __exit adiantum_module_exit(void)
crypto_unregister_template(&adiantum_tmpl);
}
-module_init(adiantum_module_init);
+subsys_initcall(adiantum_module_init);
module_exit(adiantum_module_exit);
MODULE_DESCRIPTION("Adiantum length-preserving encryption mode");
diff --git a/crypto/aegis128.c b/crypto/aegis128.c
index 3718a8341303..d78f77fc5dd1 100644
--- a/crypto/aegis128.c
+++ b/crypto/aegis128.c
@@ -448,7 +448,7 @@ static void __exit crypto_aegis128_module_exit(void)
crypto_unregister_aead(&crypto_aegis128_alg);
}
-module_init(crypto_aegis128_module_init);
+subsys_initcall(crypto_aegis128_module_init);
module_exit(crypto_aegis128_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/aegis128l.c b/crypto/aegis128l.c
index 275a8616d71b..9bca3d619a22 100644
--- a/crypto/aegis128l.c
+++ b/crypto/aegis128l.c
@@ -512,7 +512,7 @@ static void __exit crypto_aegis128l_module_exit(void)
crypto_unregister_aead(&crypto_aegis128l_alg);
}
-module_init(crypto_aegis128l_module_init);
+subsys_initcall(crypto_aegis128l_module_init);
module_exit(crypto_aegis128l_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/aegis256.c b/crypto/aegis256.c
index ecd6b7f34a2d..b47fd39595ad 100644
--- a/crypto/aegis256.c
+++ b/crypto/aegis256.c
@@ -463,7 +463,7 @@ static void __exit crypto_aegis256_module_exit(void)
crypto_unregister_aead(&crypto_aegis256_alg);
}
-module_init(crypto_aegis256_module_init);
+subsys_initcall(crypto_aegis256_module_init);
module_exit(crypto_aegis256_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
index 13df33aca463..f217568917e4 100644
--- a/crypto/aes_generic.c
+++ b/crypto/aes_generic.c
@@ -64,7 +64,7 @@ static inline u8 byte(const u32 x, const unsigned n)
static const u32 rco_tab[10] = { 1, 2, 4, 8, 16, 32, 64, 128, 27, 54 };
/* cacheline-aligned to facilitate prefetching into cache */
-__visible const u32 crypto_ft_tab[4][256] __cacheline_aligned = {
+__visible const u32 crypto_ft_tab[4][256] ____cacheline_aligned = {
{
0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6,
0x0df2f2ff, 0xbd6b6bd6, 0xb16f6fde, 0x54c5c591,
@@ -328,7 +328,7 @@ __visible const u32 crypto_ft_tab[4][256] __cacheline_aligned = {
}
};
-__visible const u32 crypto_fl_tab[4][256] __cacheline_aligned = {
+__visible const u32 crypto_fl_tab[4][256] ____cacheline_aligned = {
{
0x00000063, 0x0000007c, 0x00000077, 0x0000007b,
0x000000f2, 0x0000006b, 0x0000006f, 0x000000c5,
@@ -592,7 +592,7 @@ __visible const u32 crypto_fl_tab[4][256] __cacheline_aligned = {
}
};
-__visible const u32 crypto_it_tab[4][256] __cacheline_aligned = {
+__visible const u32 crypto_it_tab[4][256] ____cacheline_aligned = {
{
0x50a7f451, 0x5365417e, 0xc3a4171a, 0x965e273a,
0xcb6bab3b, 0xf1459d1f, 0xab58faac, 0x9303e34b,
@@ -856,7 +856,7 @@ __visible const u32 crypto_it_tab[4][256] __cacheline_aligned = {
}
};
-__visible const u32 crypto_il_tab[4][256] __cacheline_aligned = {
+__visible const u32 crypto_il_tab[4][256] ____cacheline_aligned = {
{
0x00000052, 0x00000009, 0x0000006a, 0x000000d5,
0x00000030, 0x00000036, 0x000000a5, 0x00000038,
@@ -1470,7 +1470,7 @@ static void __exit aes_fini(void)
crypto_unregister_alg(&aes_alg);
}
-module_init(aes_init);
+subsys_initcall(aes_init);
module_exit(aes_fini);
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
diff --git a/crypto/akcipher.c b/crypto/akcipher.c
index 0cbeae137e0a..780daa436dac 100644
--- a/crypto/akcipher.c
+++ b/crypto/akcipher.c
@@ -119,10 +119,24 @@ static void akcipher_prepare_alg(struct akcipher_alg *alg)
base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER;
}
+static int akcipher_default_op(struct akcipher_request *req)
+{
+ return -ENOSYS;
+}
+
int crypto_register_akcipher(struct akcipher_alg *alg)
{
struct crypto_alg *base = &alg->base;
+ if (!alg->sign)
+ alg->sign = akcipher_default_op;
+ if (!alg->verify)
+ alg->verify = akcipher_default_op;
+ if (!alg->encrypt)
+ alg->encrypt = akcipher_default_op;
+ if (!alg->decrypt)
+ alg->decrypt = akcipher_default_op;
+
akcipher_prepare_alg(alg);
return crypto_register_alg(base);
}
diff --git a/crypto/algboss.c b/crypto/algboss.c
index 527b44d0af21..bb97cfb38836 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -296,7 +296,13 @@ static void __exit cryptomgr_exit(void)
BUG_ON(err);
}
-subsys_initcall(cryptomgr_init);
+/*
+ * This is arch_initcall() so that the crypto self-tests are run on algorithms
+ * registered early by subsys_initcall(). subsys_initcall() is needed for
+ * generic implementations so that they're available for comparison tests when
+ * other implementations are registered later by module_init().
+ */
+arch_initcall(cryptomgr_init);
module_exit(cryptomgr_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index eff337ce9003..e7c43ea4ce9d 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -472,7 +472,7 @@ MODULE_DESCRIPTION("Software Pseudo Random Number Generator");
MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
module_param(dbg, int, 0);
MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
-module_init(prng_mod_init);
+subsys_initcall(prng_mod_init);
module_exit(prng_mod_fini);
MODULE_ALIAS_CRYPTO("stdrng");
MODULE_ALIAS_CRYPTO("ansi_cprng");
diff --git a/crypto/anubis.c b/crypto/anubis.c
index 4bb187c2a902..673927de0eb9 100644
--- a/crypto/anubis.c
+++ b/crypto/anubis.c
@@ -699,7 +699,7 @@ static void __exit anubis_mod_fini(void)
crypto_unregister_alg(&anubis_alg);
}
-module_init(anubis_mod_init);
+subsys_initcall(anubis_mod_init);
module_exit(anubis_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/arc4.c b/crypto/arc4.c
index 6c93342e3405..2233d36456e2 100644
--- a/crypto/arc4.c
+++ b/crypto/arc4.c
@@ -163,7 +163,7 @@ static void __exit arc4_exit(void)
crypto_unregister_skcipher(&arc4_skcipher);
}
-module_init(arc4_init);
+subsys_initcall(arc4_init);
module_exit(arc4_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/asymmetric_keys/asym_tpm.c b/crypto/asymmetric_keys/asym_tpm.c
index 5d4c270463f6..76d2ce3a1b5b 100644
--- a/crypto/asymmetric_keys/asym_tpm.c
+++ b/crypto/asymmetric_keys/asym_tpm.c
@@ -276,6 +276,10 @@ static int tpm_sign(struct tpm_buf *tb,
return datalen;
}
+
+/* Room to fit two u32 zeros for algo id and parameters length. */
+#define SETKEY_PARAMS_SIZE (sizeof(u32) * 2)
+
/*
* Maximum buffer size for the BER/DER encoded public key. The public key
* is of the form SEQUENCE { INTEGER n, INTEGER e } where n is a maximum 2048
@@ -286,8 +290,9 @@ static int tpm_sign(struct tpm_buf *tb,
* - 257 bytes of n
* - max 2 bytes for INTEGER e type/length
* - 3 bytes of e
+ * - 4+4 of zeros for set_pub_key parameters (SETKEY_PARAMS_SIZE)
*/
-#define PUB_KEY_BUF_SIZE (4 + 4 + 257 + 2 + 3)
+#define PUB_KEY_BUF_SIZE (4 + 4 + 257 + 2 + 3 + SETKEY_PARAMS_SIZE)
/*
* Provide a part of a description of the key for /proc/keys.
@@ -364,6 +369,8 @@ static uint32_t derive_pub_key(const void *pub_key, uint32_t len, uint8_t *buf)
cur = encode_tag_length(cur, 0x02, sizeof(e));
memcpy(cur, e, sizeof(e));
cur += sizeof(e);
+ /* Zero parameters to satisfy set_pub_key ABI. */
+ memset(cur, 0, SETKEY_PARAMS_SIZE);
return cur - buf;
}
@@ -744,12 +751,10 @@ static int tpm_key_verify_signature(const struct key *key,
struct crypto_wait cwait;
struct crypto_akcipher *tfm;
struct akcipher_request *req;
- struct scatterlist sig_sg, digest_sg;
+ struct scatterlist src_sg[2];
char alg_name[CRYPTO_MAX_ALG_NAME];
uint8_t der_pub_key[PUB_KEY_BUF_SIZE];
uint32_t der_pub_key_len;
- void *output;
- unsigned int outlen;
int ret;
pr_devel("==>%s()\n", __func__);
@@ -781,37 +786,17 @@ static int tpm_key_verify_signature(const struct key *key,
if (!req)
goto error_free_tfm;
- ret = -ENOMEM;
- outlen = crypto_akcipher_maxsize(tfm);
- output = kmalloc(outlen, GFP_KERNEL);
- if (!output)
- goto error_free_req;
-
- sg_init_one(&sig_sg, sig->s, sig->s_size);
- sg_init_one(&digest_sg, output, outlen);
- akcipher_request_set_crypt(req, &sig_sg, &digest_sg, sig->s_size,
- outlen);
+ sg_init_table(src_sg, 2);
+ sg_set_buf(&src_sg[0], sig->s, sig->s_size);
+ sg_set_buf(&src_sg[1], sig->digest, sig->digest_size);
+ akcipher_request_set_crypt(req, src_sg, NULL, sig->s_size,
+ sig->digest_size);
crypto_init_wait(&cwait);
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP,
crypto_req_done, &cwait);
-
- /* Perform the verification calculation. This doesn't actually do the
- * verification, but rather calculates the hash expected by the
- * signature and returns that to us.
- */
ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait);
- if (ret)
- goto out_free_output;
-
- /* Do the actual verification step. */
- if (req->dst_len != sig->digest_size ||
- memcmp(sig->digest, output, sig->digest_size) != 0)
- ret = -EKEYREJECTED;
-out_free_output:
- kfree(output);
-error_free_req:
akcipher_request_free(req);
error_free_tfm:
crypto_free_akcipher(tfm);
diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c
index 97c77f66b20d..f7b0980bf02d 100644
--- a/crypto/asymmetric_keys/pkcs7_verify.c
+++ b/crypto/asymmetric_keys/pkcs7_verify.c
@@ -56,7 +56,6 @@ static int pkcs7_digest(struct pkcs7_message *pkcs7,
goto error_no_desc;
desc->tfm = tfm;
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
/* Digest the message [RFC2315 9.3] */
ret = crypto_shash_digest(desc, pkcs7->data, pkcs7->data_len,
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index f5d85b47fcc6..77e0ae7840ff 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -45,6 +45,7 @@ void public_key_free(struct public_key *key)
{
if (key) {
kfree(key->key);
+ kfree(key->params);
kfree(key);
}
}
@@ -94,6 +95,12 @@ int software_key_determine_akcipher(const char *encoding,
return -ENOPKG;
}
+static u8 *pkey_pack_u32(u8 *dst, u32 val)
+{
+ memcpy(dst, &val, sizeof(val));
+ return dst + sizeof(val);
+}
+
/*
* Query information about a key.
*/
@@ -103,6 +110,7 @@ static int software_key_query(const struct kernel_pkey_params *params,
struct crypto_akcipher *tfm;
struct public_key *pkey = params->key->payload.data[asym_crypto];
char alg_name[CRYPTO_MAX_ALG_NAME];
+ u8 *key, *ptr;
int ret, len;
ret = software_key_determine_akcipher(params->encoding,
@@ -115,14 +123,22 @@ static int software_key_query(const struct kernel_pkey_params *params,
if (IS_ERR(tfm))
return PTR_ERR(tfm);
+ key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
+ GFP_KERNEL);
+ if (!key)
+ goto error_free_tfm;
+ memcpy(key, pkey->key, pkey->keylen);
+ ptr = key + pkey->keylen;
+ ptr = pkey_pack_u32(ptr, pkey->algo);
+ ptr = pkey_pack_u32(ptr, pkey->paramlen);
+ memcpy(ptr, pkey->params, pkey->paramlen);
+
if (pkey->key_is_private)
- ret = crypto_akcipher_set_priv_key(tfm,
- pkey->key, pkey->keylen);
+ ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
else
- ret = crypto_akcipher_set_pub_key(tfm,
- pkey->key, pkey->keylen);
+ ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
if (ret < 0)
- goto error_free_tfm;
+ goto error_free_key;
len = crypto_akcipher_maxsize(tfm);
info->key_size = len * 8;
@@ -137,6 +153,8 @@ static int software_key_query(const struct kernel_pkey_params *params,
KEYCTL_SUPPORTS_SIGN);
ret = 0;
+error_free_key:
+ kfree(key);
error_free_tfm:
crypto_free_akcipher(tfm);
pr_devel("<==%s() = %d\n", __func__, ret);
@@ -155,6 +173,7 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
struct crypto_wait cwait;
struct scatterlist in_sg, out_sg;
char alg_name[CRYPTO_MAX_ALG_NAME];
+ char *key, *ptr;
int ret;
pr_devel("==>%s()\n", __func__);
@@ -173,14 +192,23 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
if (!req)
goto error_free_tfm;
+ key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
+ GFP_KERNEL);
+ if (!key)
+ goto error_free_req;
+
+ memcpy(key, pkey->key, pkey->keylen);
+ ptr = key + pkey->keylen;
+ ptr = pkey_pack_u32(ptr, pkey->algo);
+ ptr = pkey_pack_u32(ptr, pkey->paramlen);
+ memcpy(ptr, pkey->params, pkey->paramlen);
+
if (pkey->key_is_private)
- ret = crypto_akcipher_set_priv_key(tfm,
- pkey->key, pkey->keylen);
+ ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
else
- ret = crypto_akcipher_set_pub_key(tfm,
- pkey->key, pkey->keylen);
+ ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
if (ret)
- goto error_free_req;
+ goto error_free_key;
sg_init_one(&in_sg, in, params->in_len);
sg_init_one(&out_sg, out, params->out_len);
@@ -210,6 +238,8 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
if (ret == 0)
ret = req->dst_len;
+error_free_key:
+ kfree(key);
error_free_req:
akcipher_request_free(req);
error_free_tfm:
@@ -227,10 +257,9 @@ int public_key_verify_signature(const struct public_key *pkey,
struct crypto_wait cwait;
struct crypto_akcipher *tfm;
struct akcipher_request *req;
- struct scatterlist sig_sg, digest_sg;
+ struct scatterlist src_sg[2];
char alg_name[CRYPTO_MAX_ALG_NAME];
- void *output;
- unsigned int outlen;
+ char *key, *ptr;
int ret;
pr_devel("==>%s()\n", __func__);
@@ -254,45 +283,37 @@ int public_key_verify_signature(const struct public_key *pkey,
if (!req)
goto error_free_tfm;
+ key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
+ GFP_KERNEL);
+ if (!key)
+ goto error_free_req;
+
+ memcpy(key, pkey->key, pkey->keylen);
+ ptr = key + pkey->keylen;
+ ptr = pkey_pack_u32(ptr, pkey->algo);
+ ptr = pkey_pack_u32(ptr, pkey->paramlen);
+ memcpy(ptr, pkey->params, pkey->paramlen);
+
if (pkey->key_is_private)
- ret = crypto_akcipher_set_priv_key(tfm,
- pkey->key, pkey->keylen);
+ ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
else
- ret = crypto_akcipher_set_pub_key(tfm,
- pkey->key, pkey->keylen);
+ ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
if (ret)
- goto error_free_req;
-
- ret = -ENOMEM;
- outlen = crypto_akcipher_maxsize(tfm);
- output = kmalloc(outlen, GFP_KERNEL);
- if (!output)
- goto error_free_req;
+ goto error_free_key;
- sg_init_one(&sig_sg, sig->s, sig->s_size);
- sg_init_one(&digest_sg, output, outlen);
- akcipher_request_set_crypt(req, &sig_sg, &digest_sg, sig->s_size,
- outlen);
+ sg_init_table(src_sg, 2);
+ sg_set_buf(&src_sg[0], sig->s, sig->s_size);
+ sg_set_buf(&src_sg[1], sig->digest, sig->digest_size);
+ akcipher_request_set_crypt(req, src_sg, NULL, sig->s_size,
+ sig->digest_size);
crypto_init_wait(&cwait);
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP,
crypto_req_done, &cwait);
-
- /* Perform the verification calculation. This doesn't actually do the
- * verification, but rather calculates the hash expected by the
- * signature and returns that to us.
- */
ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait);
- if (ret)
- goto out_free_output;
-
- /* Do the actual verification step. */
- if (req->dst_len != sig->digest_size ||
- memcmp(sig->digest, output, sig->digest_size) != 0)
- ret = -EKEYREJECTED;
-out_free_output:
- kfree(output);
+error_free_key:
+ kfree(key);
error_free_req:
akcipher_request_free(req);
error_free_tfm:
diff --git a/crypto/asymmetric_keys/verify_pefile.c b/crypto/asymmetric_keys/verify_pefile.c
index d178650fd524..f8e4a932bcfb 100644
--- a/crypto/asymmetric_keys/verify_pefile.c
+++ b/crypto/asymmetric_keys/verify_pefile.c
@@ -354,7 +354,6 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen,
goto error_no_desc;
desc->tfm = tfm;
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
ret = crypto_shash_init(desc);
if (ret < 0)
goto error;
diff --git a/crypto/asymmetric_keys/x509.asn1 b/crypto/asymmetric_keys/x509.asn1
index aae0cde414e2..5c9f4e4a5231 100644
--- a/crypto/asymmetric_keys/x509.asn1
+++ b/crypto/asymmetric_keys/x509.asn1
@@ -22,7 +22,7 @@ CertificateSerialNumber ::= INTEGER
AlgorithmIdentifier ::= SEQUENCE {
algorithm OBJECT IDENTIFIER ({ x509_note_OID }),
- parameters ANY OPTIONAL
+ parameters ANY OPTIONAL ({ x509_note_params })
}
Name ::= SEQUENCE OF RelativeDistinguishedName
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index 991f4d735a4e..5b7bfd95c334 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -26,6 +26,9 @@ struct x509_parse_context {
const void *cert_start; /* Start of cert content */
const void *key; /* Key data */
size_t key_size; /* Size of key data */
+ const void *params; /* Key parameters */
+ size_t params_size; /* Size of key parameters */
+ enum OID key_algo; /* Public key algorithm */
enum OID last_oid; /* Last OID encountered */
enum OID algo_oid; /* Algorithm OID */
unsigned char nr_mpi; /* Number of MPIs stored */
@@ -109,6 +112,13 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
cert->pub->keylen = ctx->key_size;
+ cert->pub->params = kmemdup(ctx->params, ctx->params_size, GFP_KERNEL);
+ if (!cert->pub->params)
+ goto error_decode;
+
+ cert->pub->paramlen = ctx->params_size;
+ cert->pub->algo = ctx->key_algo;
+
/* Grab the signature bits */
ret = x509_get_sig_params(cert);
if (ret < 0)
@@ -220,6 +230,14 @@ int x509_note_pkey_algo(void *context, size_t hdrlen,
case OID_sha224WithRSAEncryption:
ctx->cert->sig->hash_algo = "sha224";
goto rsa_pkcs1;
+
+ case OID_gost2012Signature256:
+ ctx->cert->sig->hash_algo = "streebog256";
+ goto ecrdsa;
+
+ case OID_gost2012Signature512:
+ ctx->cert->sig->hash_algo = "streebog512";
+ goto ecrdsa;
}
rsa_pkcs1:
@@ -227,6 +245,11 @@ rsa_pkcs1:
ctx->cert->sig->encoding = "pkcs1";
ctx->algo_oid = ctx->last_oid;
return 0;
+ecrdsa:
+ ctx->cert->sig->pkey_algo = "ecrdsa";
+ ctx->cert->sig->encoding = "raw";
+ ctx->algo_oid = ctx->last_oid;
+ return 0;
}
/*
@@ -246,7 +269,8 @@ int x509_note_signature(void *context, size_t hdrlen,
return -EINVAL;
}
- if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0) {
+ if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0 ||
+ strcmp(ctx->cert->sig->pkey_algo, "ecrdsa") == 0) {
/* Discard the BIT STRING metadata */
if (vlen < 1 || *(const u8 *)value != 0)
return -EBADMSG;
@@ -401,6 +425,27 @@ int x509_note_subject(void *context, size_t hdrlen,
}
/*
+ * Extract the parameters for the public key
+ */
+int x509_note_params(void *context, size_t hdrlen,
+ unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct x509_parse_context *ctx = context;
+
+ /*
+ * AlgorithmIdentifier is used three times in the x509, we should skip
+ * first and ignore third, using second one which is after subject and
+ * before subjectPublicKey.
+ */
+ if (!ctx->cert->raw_subject || ctx->key)
+ return 0;
+ ctx->params = value - hdrlen;
+ ctx->params_size = vlen + hdrlen;
+ return 0;
+}
+
+/*
* Extract the data for the public key algorithm
*/
int x509_extract_key_data(void *context, size_t hdrlen,
@@ -409,11 +454,15 @@ int x509_extract_key_data(void *context, size_t hdrlen,
{
struct x509_parse_context *ctx = context;
- if (ctx->last_oid != OID_rsaEncryption)
+ ctx->key_algo = ctx->last_oid;
+ if (ctx->last_oid == OID_rsaEncryption)
+ ctx->cert->pub->pkey_algo = "rsa";
+ else if (ctx->last_oid == OID_gost2012PKey256 ||
+ ctx->last_oid == OID_gost2012PKey512)
+ ctx->cert->pub->pkey_algo = "ecrdsa";
+ else
return -ENOPKG;
- ctx->cert->pub->pkey_algo = "rsa";
-
/* Discard the BIT STRING metadata */
if (vlen < 1 || *(const u8 *)value != 0)
return -EBADMSG;
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index 9338b4558cdc..bd96683d8cde 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -77,7 +77,6 @@ int x509_get_sig_params(struct x509_certificate *cert)
goto error;
desc->tfm = tfm;
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
ret = crypto_shash_digest(desc, cert->tbs, cert->tbs_size, sig->digest);
if (ret < 0)
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 4be293a4b5f0..b3eddac7fa3a 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -508,7 +508,7 @@ static void __exit crypto_authenc_module_exit(void)
crypto_unregister_template(&crypto_authenc_tmpl);
}
-module_init(crypto_authenc_module_init);
+subsys_initcall(crypto_authenc_module_init);
module_exit(crypto_authenc_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 4741fe89ba2c..58074308e535 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -523,7 +523,7 @@ static void __exit crypto_authenc_esn_module_exit(void)
crypto_unregister_template(&crypto_authenc_esn_tmpl);
}
-module_init(crypto_authenc_esn_module_init);
+subsys_initcall(crypto_authenc_esn_module_init);
module_exit(crypto_authenc_esn_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/blowfish_generic.c b/crypto/blowfish_generic.c
index 87b392a77a93..8548ced8b074 100644
--- a/crypto/blowfish_generic.c
+++ b/crypto/blowfish_generic.c
@@ -133,7 +133,7 @@ static void __exit blowfish_mod_fini(void)
crypto_unregister_alg(&alg);
}
-module_init(blowfish_mod_init);
+subsys_initcall(blowfish_mod_init);
module_exit(blowfish_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c
index 32ddd4836ff5..15ce1281f5d9 100644
--- a/crypto/camellia_generic.c
+++ b/crypto/camellia_generic.c
@@ -1092,7 +1092,7 @@ static void __exit camellia_fini(void)
crypto_unregister_alg(&camellia_alg);
}
-module_init(camellia_init);
+subsys_initcall(camellia_init);
module_exit(camellia_fini);
MODULE_DESCRIPTION("Camellia Cipher Algorithm");
diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c
index 66169c178314..24bc7d4e33be 100644
--- a/crypto/cast5_generic.c
+++ b/crypto/cast5_generic.c
@@ -543,7 +543,7 @@ static void __exit cast5_mod_fini(void)
crypto_unregister_alg(&alg);
}
-module_init(cast5_mod_init);
+subsys_initcall(cast5_mod_init);
module_exit(cast5_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c
index c8e5ec69790e..edd59cc34991 100644
--- a/crypto/cast6_generic.c
+++ b/crypto/cast6_generic.c
@@ -285,7 +285,7 @@ static void __exit cast6_mod_fini(void)
crypto_unregister_alg(&alg);
}
-module_init(cast6_mod_init);
+subsys_initcall(cast6_mod_init);
module_exit(cast6_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/cbc.c b/crypto/cbc.c
index d12efaac9230..129f79d03365 100644
--- a/crypto/cbc.c
+++ b/crypto/cbc.c
@@ -98,7 +98,7 @@ static void __exit crypto_cbc_module_exit(void)
crypto_unregister_template(&crypto_cbc_tmpl);
}
-module_init(crypto_cbc_module_init);
+subsys_initcall(crypto_cbc_module_init);
module_exit(crypto_cbc_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 50df8f001c1c..c1ef9d0b4271 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -458,7 +458,6 @@ static void crypto_ccm_free(struct aead_instance *inst)
static int crypto_ccm_create_common(struct crypto_template *tmpl,
struct rtattr **tb,
- const char *full_name,
const char *ctr_name,
const char *mac_name)
{
@@ -486,7 +485,8 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
mac = __crypto_hash_alg_common(mac_alg);
err = -EINVAL;
- if (mac->digestsize != 16)
+ if (strncmp(mac->base.cra_name, "cbcmac(", 7) != 0 ||
+ mac->digestsize != 16)
goto out_put_mac;
inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
@@ -509,23 +509,27 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
ctr = crypto_spawn_skcipher_alg(&ictx->ctr);
- /* Not a stream cipher? */
+ /* The skcipher algorithm must be CTR mode, using 16-byte blocks. */
err = -EINVAL;
- if (ctr->base.cra_blocksize != 1)
+ if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
+ crypto_skcipher_alg_ivsize(ctr) != 16 ||
+ ctr->base.cra_blocksize != 1)
goto err_drop_ctr;
- /* We want the real thing! */
- if (crypto_skcipher_alg_ivsize(ctr) != 16)
+ /* ctr and cbcmac must use the same underlying block cipher. */
+ if (strcmp(ctr->base.cra_name + 4, mac->base.cra_name + 7) != 0)
goto err_drop_ctr;
err = -ENAMETOOLONG;
+ if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+ "ccm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME)
+ goto err_drop_ctr;
+
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"ccm_base(%s,%s)", ctr->base.cra_driver_name,
mac->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_drop_ctr;
- memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
-
inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = (mac->base.cra_priority +
ctr->base.cra_priority) / 2;
@@ -567,7 +571,6 @@ static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb)
const char *cipher_name;
char ctr_name[CRYPTO_MAX_ALG_NAME];
char mac_name[CRYPTO_MAX_ALG_NAME];
- char full_name[CRYPTO_MAX_ALG_NAME];
cipher_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(cipher_name))
@@ -581,35 +584,24 @@ static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb)
cipher_name) >= CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
- if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm(%s)", cipher_name) >=
- CRYPTO_MAX_ALG_NAME)
- return -ENAMETOOLONG;
-
- return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name,
- mac_name);
+ return crypto_ccm_create_common(tmpl, tb, ctr_name, mac_name);
}
static int crypto_ccm_base_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
const char *ctr_name;
- const char *cipher_name;
- char full_name[CRYPTO_MAX_ALG_NAME];
+ const char *mac_name;
ctr_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(ctr_name))
return PTR_ERR(ctr_name);
- cipher_name = crypto_attr_alg_name(tb[2]);
- if (IS_ERR(cipher_name))
- return PTR_ERR(cipher_name);
-
- if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)",
- ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME)
- return -ENAMETOOLONG;
+ mac_name = crypto_attr_alg_name(tb[2]);
+ if (IS_ERR(mac_name))
+ return PTR_ERR(mac_name);
- return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name,
- cipher_name);
+ return crypto_ccm_create_common(tmpl, tb, ctr_name, mac_name);
}
static int crypto_rfc4309_setkey(struct crypto_aead *parent, const u8 *key,
@@ -1014,7 +1006,7 @@ static void __exit crypto_ccm_module_exit(void)
ARRAY_SIZE(crypto_ccm_tmpls));
}
-module_init(crypto_ccm_module_init);
+subsys_initcall(crypto_ccm_module_init);
module_exit(crypto_ccm_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/cfb.c b/crypto/cfb.c
index 03ac847f6d6a..7b68fbb61732 100644
--- a/crypto/cfb.c
+++ b/crypto/cfb.c
@@ -243,7 +243,7 @@ static void __exit crypto_cfb_module_exit(void)
crypto_unregister_template(&crypto_cfb_tmpl);
}
-module_init(crypto_cfb_module_init);
+subsys_initcall(crypto_cfb_module_init);
module_exit(crypto_cfb_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index ed2e12e26dd8..e38a2d61819a 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -645,8 +645,8 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
- "%s(%s,%s)", name, chacha_name,
- poly_name) >= CRYPTO_MAX_ALG_NAME)
+ "%s(%s,%s)", name, chacha->base.cra_name,
+ poly->cra_name) >= CRYPTO_MAX_ALG_NAME)
goto out_drop_chacha;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"%s(%s,%s)", name, chacha->base.cra_driver_name,
@@ -725,7 +725,7 @@ static void __exit chacha20poly1305_module_exit(void)
ARRAY_SIZE(rfc7539_tmpls));
}
-module_init(chacha20poly1305_module_init);
+subsys_initcall(chacha20poly1305_module_init);
module_exit(chacha20poly1305_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/chacha_generic.c b/crypto/chacha_generic.c
index 35b583101f4f..d2ec04997832 100644
--- a/crypto/chacha_generic.c
+++ b/crypto/chacha_generic.c
@@ -22,18 +22,16 @@ static void chacha_docrypt(u32 *state, u8 *dst, const u8 *src,
/* aligned to potentially speed up crypto_xor() */
u8 stream[CHACHA_BLOCK_SIZE] __aligned(sizeof(long));
- if (dst != src)
- memcpy(dst, src, bytes);
-
while (bytes >= CHACHA_BLOCK_SIZE) {
chacha_block(state, stream, nrounds);
- crypto_xor(dst, stream, CHACHA_BLOCK_SIZE);
+ crypto_xor_cpy(dst, src, stream, CHACHA_BLOCK_SIZE);
bytes -= CHACHA_BLOCK_SIZE;
dst += CHACHA_BLOCK_SIZE;
+ src += CHACHA_BLOCK_SIZE;
}
if (bytes) {
chacha_block(state, stream, nrounds);
- crypto_xor(dst, stream, bytes);
+ crypto_xor_cpy(dst, src, stream, bytes);
}
}
@@ -52,7 +50,7 @@ static int chacha_stream_xor(struct skcipher_request *req,
unsigned int nbytes = walk.nbytes;
if (nbytes < walk.total)
- nbytes = round_down(nbytes, walk.stride);
+ nbytes = round_down(nbytes, CHACHA_BLOCK_SIZE);
chacha_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
nbytes, ctx->nrounds);
@@ -203,7 +201,7 @@ static void __exit chacha_generic_mod_fini(void)
crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
}
-module_init(chacha_generic_mod_init);
+subsys_initcall(chacha_generic_mod_init);
module_exit(chacha_generic_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/cmac.c b/crypto/cmac.c
index 16301f52858c..c60b6c011ec6 100644
--- a/crypto/cmac.c
+++ b/crypto/cmac.c
@@ -313,7 +313,7 @@ static void __exit crypto_cmac_module_exit(void)
crypto_unregister_template(&crypto_cmac_tmpl);
}
-module_init(crypto_cmac_module_init);
+subsys_initcall(crypto_cmac_module_init);
module_exit(crypto_cmac_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/crc32_generic.c b/crypto/crc32_generic.c
index 00facd27bcc2..9e97912280bd 100644
--- a/crypto/crc32_generic.c
+++ b/crypto/crc32_generic.c
@@ -146,7 +146,7 @@ static void __exit crc32_mod_fini(void)
crypto_unregister_shash(&alg);
}
-module_init(crc32_mod_init);
+subsys_initcall(crc32_mod_init);
module_exit(crc32_mod_fini);
MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>");
diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c
index 7283066ecc98..ad26f15d4c7b 100644
--- a/crypto/crc32c_generic.c
+++ b/crypto/crc32c_generic.c
@@ -165,7 +165,7 @@ static void __exit crc32c_mod_fini(void)
crypto_unregister_shash(&alg);
}
-module_init(crc32c_mod_init);
+subsys_initcall(crc32c_mod_init);
module_exit(crc32c_mod_fini);
MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c
index 8e94e29dc6fc..d90c0070710e 100644
--- a/crypto/crct10dif_generic.c
+++ b/crypto/crct10dif_generic.c
@@ -65,10 +65,9 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
return 0;
}
-static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
- u8 *out)
+static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
{
- *(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
+ *(__u16 *)out = crc_t10dif_generic(crc, data, len);
return 0;
}
@@ -77,15 +76,13 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data,
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
- return __chksum_finup(&ctx->crc, data, len, out);
+ return __chksum_finup(ctx->crc, data, len, out);
}
static int chksum_digest(struct shash_desc *desc, const u8 *data,
unsigned int length, u8 *out)
{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- return __chksum_finup(&ctx->crc, data, length, out);
+ return __chksum_finup(0, data, length, out);
}
static struct shash_alg alg = {
@@ -115,7 +112,7 @@ static void __exit crct10dif_mod_fini(void)
crypto_unregister_shash(&alg);
}
-module_init(crct10dif_mod_init);
+subsys_initcall(crct10dif_mod_init);
module_exit(crct10dif_mod_fini);
MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 5640e5db7bdb..b3bb99390ae7 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -65,15 +65,6 @@ struct aead_instance_ctx {
struct cryptd_queue *queue;
};
-struct cryptd_blkcipher_ctx {
- atomic_t refcnt;
- struct crypto_blkcipher *child;
-};
-
-struct cryptd_blkcipher_request_ctx {
- crypto_completion_t complete;
-};
-
struct cryptd_skcipher_ctx {
atomic_t refcnt;
struct crypto_sync_skcipher *child;
@@ -216,129 +207,6 @@ static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
*mask |= algt->mask & CRYPTO_ALG_INTERNAL;
}
-static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
- const u8 *key, unsigned int keylen)
-{
- struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
- struct crypto_blkcipher *child = ctx->child;
- int err;
-
- crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_blkcipher_setkey(child, key, keylen);
- crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
- return err;
-}
-
-static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
- struct crypto_blkcipher *child,
- int err,
- int (*crypt)(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int len))
-{
- struct cryptd_blkcipher_request_ctx *rctx;
- struct cryptd_blkcipher_ctx *ctx;
- struct crypto_ablkcipher *tfm;
- struct blkcipher_desc desc;
- int refcnt;
-
- rctx = ablkcipher_request_ctx(req);
-
- if (unlikely(err == -EINPROGRESS))
- goto out;
-
- desc.tfm = child;
- desc.info = req->info;
- desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
-
- err = crypt(&desc, req->dst, req->src, req->nbytes);
-
- req->base.complete = rctx->complete;
-
-out:
- tfm = crypto_ablkcipher_reqtfm(req);
- ctx = crypto_ablkcipher_ctx(tfm);
- refcnt = atomic_read(&ctx->refcnt);
-
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
-
- if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
- crypto_free_ablkcipher(tfm);
-}
-
-static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
-{
- struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
- struct crypto_blkcipher *child = ctx->child;
-
- cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
- crypto_blkcipher_crt(child)->encrypt);
-}
-
-static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
-{
- struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
- struct crypto_blkcipher *child = ctx->child;
-
- cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
- crypto_blkcipher_crt(child)->decrypt);
-}
-
-static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
- crypto_completion_t compl)
-{
- struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct cryptd_queue *queue;
-
- queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
- rctx->complete = req->base.complete;
- req->base.complete = compl;
-
- return cryptd_enqueue_request(queue, &req->base);
-}
-
-static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
-{
- return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
-}
-
-static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
-{
- return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
-}
-
-static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
-{
- struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
- struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
- struct crypto_spawn *spawn = &ictx->spawn;
- struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_blkcipher *cipher;
-
- cipher = crypto_spawn_blkcipher(spawn);
- if (IS_ERR(cipher))
- return PTR_ERR(cipher);
-
- ctx->child = cipher;
- tfm->crt_ablkcipher.reqsize =
- sizeof(struct cryptd_blkcipher_request_ctx);
- return 0;
-}
-
-static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
-{
- struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
-
- crypto_free_blkcipher(ctx->child);
-}
-
static int cryptd_init_instance(struct crypto_instance *inst,
struct crypto_alg *alg)
{
@@ -382,67 +250,6 @@ out_free_inst:
goto out;
}
-static int cryptd_create_blkcipher(struct crypto_template *tmpl,
- struct rtattr **tb,
- struct cryptd_queue *queue)
-{
- struct cryptd_instance_ctx *ctx;
- struct crypto_instance *inst;
- struct crypto_alg *alg;
- u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
- u32 mask = CRYPTO_ALG_TYPE_MASK;
- int err;
-
- cryptd_check_internal(tb, &type, &mask);
-
- alg = crypto_get_attr_alg(tb, type, mask);
- if (IS_ERR(alg))
- return PTR_ERR(alg);
-
- inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
- err = PTR_ERR(inst);
- if (IS_ERR(inst))
- goto out_put_alg;
-
- ctx = crypto_instance_ctx(inst);
- ctx->queue = queue;
-
- err = crypto_init_spawn(&ctx->spawn, alg, inst,
- CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
- if (err)
- goto out_free_inst;
-
- type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
- if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
- type |= CRYPTO_ALG_INTERNAL;
- inst->alg.cra_flags = type;
- inst->alg.cra_type = &crypto_ablkcipher_type;
-
- inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
- inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
- inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
-
- inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
-
- inst->alg.cra_init = cryptd_blkcipher_init_tfm;
- inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
-
- inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
- inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
- inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
-
- err = crypto_register_instance(tmpl, inst);
- if (err) {
- crypto_drop_spawn(&ctx->spawn);
-out_free_inst:
- kfree(inst);
- }
-
-out_put_alg:
- crypto_mod_put(alg);
- return err;
-}
-
static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
const u8 *key, unsigned int keylen)
{
@@ -738,7 +545,6 @@ static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
goto out;
desc->tfm = child;
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_shash_init(desc);
@@ -830,7 +636,6 @@ static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
goto out;
desc->tfm = child;
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = shash_ahash_digest(req, desc);
@@ -859,7 +664,6 @@ static int cryptd_hash_import(struct ahash_request *req, const void *in)
struct shash_desc *desc = cryptd_shash_desc(req);
desc->tfm = ctx->child;
- desc->flags = req->base.flags;
return crypto_shash_import(desc, in);
}
@@ -1118,10 +922,6 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_BLKCIPHER:
- if ((algt->type & CRYPTO_ALG_TYPE_MASK) ==
- CRYPTO_ALG_TYPE_BLKCIPHER)
- return cryptd_create_blkcipher(tmpl, tb, &queue);
-
return cryptd_create_skcipher(tmpl, tb, &queue);
case CRYPTO_ALG_TYPE_DIGEST:
return cryptd_create_hash(tmpl, tb, &queue);
@@ -1160,58 +960,6 @@ static struct crypto_template cryptd_tmpl = {
.module = THIS_MODULE,
};
-struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
- u32 type, u32 mask)
-{
- char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
- struct cryptd_blkcipher_ctx *ctx;
- struct crypto_tfm *tfm;
-
- if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
- "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
- return ERR_PTR(-EINVAL);
- type = crypto_skcipher_type(type);
- mask &= ~CRYPTO_ALG_TYPE_MASK;
- mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK;
- tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
- if (IS_ERR(tfm))
- return ERR_CAST(tfm);
- if (tfm->__crt_alg->cra_module != THIS_MODULE) {
- crypto_free_tfm(tfm);
- return ERR_PTR(-EINVAL);
- }
-
- ctx = crypto_tfm_ctx(tfm);
- atomic_set(&ctx->refcnt, 1);
-
- return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
-}
-EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
-
-struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
-{
- struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
- return ctx->child;
-}
-EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
-
-bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm)
-{
- struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
-
- return atomic_read(&ctx->refcnt) - 1;
-}
-EXPORT_SYMBOL_GPL(cryptd_ablkcipher_queued);
-
-void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
-{
- struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
-
- if (atomic_dec_and_test(&ctx->refcnt))
- crypto_free_ablkcipher(&tfm->base);
-}
-EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
-
struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
u32 type, u32 mask)
{
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
index 01630a9c7e01..9320d4eaa4a8 100644
--- a/crypto/crypto_null.c
+++ b/crypto/crypto_null.c
@@ -220,7 +220,7 @@ static void __exit crypto_null_mod_fini(void)
crypto_unregister_skcipher(&skcipher_null);
}
-module_init(crypto_null_mod_init);
+subsys_initcall(crypto_null_mod_init);
module_exit(crypto_null_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/ctr.c b/crypto/ctr.c
index ec8f8b67473a..52cdf2c5605f 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -384,7 +384,7 @@ static void __exit crypto_ctr_module_exit(void)
ARRAY_SIZE(crypto_ctr_tmpls));
}
-module_init(crypto_ctr_module_init);
+subsys_initcall(crypto_ctr_module_init);
module_exit(crypto_ctr_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/cts.c b/crypto/cts.c
index 4e28d83ae37d..6b6087dbb62a 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -152,12 +152,14 @@ static int crypto_cts_encrypt(struct skcipher_request *req)
struct skcipher_request *subreq = &rctx->subreq;
int bsize = crypto_skcipher_blocksize(tfm);
unsigned int nbytes = req->cryptlen;
- int cbc_blocks = (nbytes + bsize - 1) / bsize - 1;
unsigned int offset;
skcipher_request_set_tfm(subreq, ctx->child);
- if (cbc_blocks <= 0) {
+ if (nbytes < bsize)
+ return -EINVAL;
+
+ if (nbytes == bsize) {
skcipher_request_set_callback(subreq, req->base.flags,
req->base.complete,
req->base.data);
@@ -166,7 +168,7 @@ static int crypto_cts_encrypt(struct skcipher_request *req)
return crypto_skcipher_encrypt(subreq);
}
- offset = cbc_blocks * bsize;
+ offset = rounddown(nbytes - 1, bsize);
rctx->offset = offset;
skcipher_request_set_callback(subreq, req->base.flags,
@@ -244,13 +246,15 @@ static int crypto_cts_decrypt(struct skcipher_request *req)
struct skcipher_request *subreq = &rctx->subreq;
int bsize = crypto_skcipher_blocksize(tfm);
unsigned int nbytes = req->cryptlen;
- int cbc_blocks = (nbytes + bsize - 1) / bsize - 1;
unsigned int offset;
u8 *space;
skcipher_request_set_tfm(subreq, ctx->child);
- if (cbc_blocks <= 0) {
+ if (nbytes < bsize)
+ return -EINVAL;
+
+ if (nbytes == bsize) {
skcipher_request_set_callback(subreq, req->base.flags,
req->base.complete,
req->base.data);
@@ -264,10 +268,10 @@ static int crypto_cts_decrypt(struct skcipher_request *req)
space = crypto_cts_reqctx_space(req);
- offset = cbc_blocks * bsize;
+ offset = rounddown(nbytes - 1, bsize);
rctx->offset = offset;
- if (cbc_blocks <= 1)
+ if (offset <= bsize)
memcpy(space, req->iv, bsize);
else
scatterwalk_map_and_copy(space, req->src, offset - 2 * bsize,
@@ -419,7 +423,7 @@ static void __exit crypto_cts_module_exit(void)
crypto_unregister_template(&crypto_cts_tmpl);
}
-module_init(crypto_cts_module_init);
+subsys_initcall(crypto_cts_module_init);
module_exit(crypto_cts_module_exit);
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/crypto/deflate.c b/crypto/deflate.c
index 94ec3b36a8e8..aab089cde1bf 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -334,7 +334,7 @@ static void __exit deflate_mod_fini(void)
crypto_unregister_scomps(scomp, ARRAY_SIZE(scomp));
}
-module_init(deflate_mod_init);
+subsys_initcall(deflate_mod_init);
module_exit(deflate_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/des_generic.c b/crypto/des_generic.c
index 1e6621665dd9..d7a88b4fa611 100644
--- a/crypto/des_generic.c
+++ b/crypto/des_generic.c
@@ -862,14 +862,11 @@ static void des_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
int __des3_ede_setkey(u32 *expkey, u32 *flags, const u8 *key,
unsigned int keylen)
{
- const u32 *K = (const u32 *)key;
+ int err;
- if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
- !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
- (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
- *flags |= CRYPTO_TFM_RES_WEAK_KEY;
- return -EINVAL;
- }
+ err = __des3_verify_key(flags, key);
+ if (unlikely(err))
+ return err;
des_ekey(expkey, key); expkey += DES_EXPKEY_WORDS; key += DES_KEY_SIZE;
dkey(expkey, key); expkey += DES_EXPKEY_WORDS; key += DES_KEY_SIZE;
@@ -993,7 +990,7 @@ static void __exit des_generic_mod_fini(void)
crypto_unregister_algs(des_algs, ARRAY_SIZE(des_algs));
}
-module_init(des_generic_mod_init);
+subsys_initcall(des_generic_mod_init);
module_exit(des_generic_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/dh.c b/crypto/dh.c
index 09a44de4209d..ce77fb4ee8b3 100644
--- a/crypto/dh.c
+++ b/crypto/dh.c
@@ -236,7 +236,7 @@ static void dh_exit(void)
crypto_unregister_kpp(&dh);
}
-module_init(dh_init);
+subsys_initcall(dh_init);
module_exit(dh_exit);
MODULE_ALIAS_CRYPTO("dh");
MODULE_LICENSE("GPL");
diff --git a/crypto/drbg.c b/crypto/drbg.c
index bc52d9562611..2a5b16bb000c 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1587,7 +1587,6 @@ static int drbg_init_hash_kernel(struct drbg_state *drbg)
}
sdesc->shash.tfm = tfm;
- sdesc->shash.flags = 0;
drbg->priv_data = sdesc;
return crypto_shash_alignmask(tfm);
@@ -2039,7 +2038,7 @@ static void __exit drbg_exit(void)
crypto_unregister_rngs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2));
}
-module_init(drbg_init);
+subsys_initcall(drbg_init);
module_exit(drbg_exit);
#ifndef CRYPTO_DRBG_HASH_STRING
#define CRYPTO_DRBG_HASH_STRING ""
diff --git a/crypto/ecb.c b/crypto/ecb.c
index 0732715c8d91..de839129d151 100644
--- a/crypto/ecb.c
+++ b/crypto/ecb.c
@@ -101,7 +101,7 @@ static void __exit crypto_ecb_module_exit(void)
crypto_unregister_template(&crypto_ecb_tmpl);
}
-module_init(crypto_ecb_module_init);
+subsys_initcall(crypto_ecb_module_init);
module_exit(crypto_ecb_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/ecc.c b/crypto/ecc.c
index ed1237115066..dfe114bc0c4a 100644
--- a/crypto/ecc.c
+++ b/crypto/ecc.c
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2013, Kenneth MacKay
- * All rights reserved.
+ * Copyright (c) 2013, 2014 Kenneth MacKay. All rights reserved.
+ * Copyright (c) 2019 Vitaly Chikunov <vt@altlinux.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
@@ -24,12 +24,15 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <linux/module.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/swab.h>
#include <linux/fips.h>
#include <crypto/ecdh.h>
#include <crypto/rng.h>
+#include <asm/unaligned.h>
+#include <linux/ratelimit.h>
#include "ecc.h"
#include "ecc_curve_defs.h"
@@ -112,7 +115,7 @@ static void vli_clear(u64 *vli, unsigned int ndigits)
}
/* Returns true if vli == 0, false otherwise. */
-static bool vli_is_zero(const u64 *vli, unsigned int ndigits)
+bool vli_is_zero(const u64 *vli, unsigned int ndigits)
{
int i;
@@ -123,6 +126,7 @@ static bool vli_is_zero(const u64 *vli, unsigned int ndigits)
return true;
}
+EXPORT_SYMBOL(vli_is_zero);
/* Returns nonzero if bit bit of vli is set. */
static u64 vli_test_bit(const u64 *vli, unsigned int bit)
@@ -130,6 +134,11 @@ static u64 vli_test_bit(const u64 *vli, unsigned int bit)
return (vli[bit / 64] & ((u64)1 << (bit % 64)));
}
+static bool vli_is_negative(const u64 *vli, unsigned int ndigits)
+{
+ return vli_test_bit(vli, ndigits * 64 - 1);
+}
+
/* Counts the number of 64-bit "digits" in vli. */
static unsigned int vli_num_digits(const u64 *vli, unsigned int ndigits)
{
@@ -161,6 +170,27 @@ static unsigned int vli_num_bits(const u64 *vli, unsigned int ndigits)
return ((num_digits - 1) * 64 + i);
}
+/* Set dest from unaligned bit string src. */
+void vli_from_be64(u64 *dest, const void *src, unsigned int ndigits)
+{
+ int i;
+ const u64 *from = src;
+
+ for (i = 0; i < ndigits; i++)
+ dest[i] = get_unaligned_be64(&from[ndigits - 1 - i]);
+}
+EXPORT_SYMBOL(vli_from_be64);
+
+void vli_from_le64(u64 *dest, const void *src, unsigned int ndigits)
+{
+ int i;
+ const u64 *from = src;
+
+ for (i = 0; i < ndigits; i++)
+ dest[i] = get_unaligned_le64(&from[i]);
+}
+EXPORT_SYMBOL(vli_from_le64);
+
/* Sets dest = src. */
static void vli_set(u64 *dest, const u64 *src, unsigned int ndigits)
{
@@ -171,7 +201,7 @@ static void vli_set(u64 *dest, const u64 *src, unsigned int ndigits)
}
/* Returns sign of left - right. */
-static int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits)
+int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits)
{
int i;
@@ -184,6 +214,7 @@ static int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits)
return 0;
}
+EXPORT_SYMBOL(vli_cmp);
/* Computes result = in << c, returning carry. Can modify in place
* (if result == in). 0 < shift < 64.
@@ -239,8 +270,30 @@ static u64 vli_add(u64 *result, const u64 *left, const u64 *right,
return carry;
}
+/* Computes result = left + right, returning carry. Can modify in place. */
+static u64 vli_uadd(u64 *result, const u64 *left, u64 right,
+ unsigned int ndigits)
+{
+ u64 carry = right;
+ int i;
+
+ for (i = 0; i < ndigits; i++) {
+ u64 sum;
+
+ sum = left[i] + carry;
+ if (sum != left[i])
+ carry = (sum < left[i]);
+ else
+ carry = !!carry;
+
+ result[i] = sum;
+ }
+
+ return carry;
+}
+
/* Computes result = left - right, returning borrow. Can modify in place. */
-static u64 vli_sub(u64 *result, const u64 *left, const u64 *right,
+u64 vli_sub(u64 *result, const u64 *left, const u64 *right,
unsigned int ndigits)
{
u64 borrow = 0;
@@ -258,9 +311,37 @@ static u64 vli_sub(u64 *result, const u64 *left, const u64 *right,
return borrow;
}
+EXPORT_SYMBOL(vli_sub);
+
+/* Computes result = left - right, returning borrow. Can modify in place. */
+static u64 vli_usub(u64 *result, const u64 *left, u64 right,
+ unsigned int ndigits)
+{
+ u64 borrow = right;
+ int i;
+
+ for (i = 0; i < ndigits; i++) {
+ u64 diff;
+
+ diff = left[i] - borrow;
+ if (diff != left[i])
+ borrow = (diff > left[i]);
+
+ result[i] = diff;
+ }
+
+ return borrow;
+}
static uint128_t mul_64_64(u64 left, u64 right)
{
+ uint128_t result;
+#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
+ unsigned __int128 m = (unsigned __int128)left * right;
+
+ result.m_low = m;
+ result.m_high = m >> 64;
+#else
u64 a0 = left & 0xffffffffull;
u64 a1 = left >> 32;
u64 b0 = right & 0xffffffffull;
@@ -269,7 +350,6 @@ static uint128_t mul_64_64(u64 left, u64 right)
u64 m1 = a0 * b1;
u64 m2 = a1 * b0;
u64 m3 = a1 * b1;
- uint128_t result;
m2 += (m0 >> 32);
m2 += m1;
@@ -280,7 +360,7 @@ static uint128_t mul_64_64(u64 left, u64 right)
result.m_low = (m0 & 0xffffffffull) | (m2 << 32);
result.m_high = m3 + (m2 >> 32);
-
+#endif
return result;
}
@@ -330,6 +410,28 @@ static void vli_mult(u64 *result, const u64 *left, const u64 *right,
result[ndigits * 2 - 1] = r01.m_low;
}
+/* Compute product = left * right, for a small right value. */
+static void vli_umult(u64 *result, const u64 *left, u32 right,
+ unsigned int ndigits)
+{
+ uint128_t r01 = { 0 };
+ unsigned int k;
+
+ for (k = 0; k < ndigits; k++) {
+ uint128_t product;
+
+ product = mul_64_64(left[k], right);
+ r01 = add_128_128(r01, product);
+ /* no carry */
+ result[k] = r01.m_low;
+ r01.m_low = r01.m_high;
+ r01.m_high = 0;
+ }
+ result[k] = r01.m_low;
+ for (++k; k < ndigits * 2; k++)
+ result[k] = 0;
+}
+
static void vli_square(u64 *result, const u64 *left, unsigned int ndigits)
{
uint128_t r01 = { 0, 0 };
@@ -402,6 +504,170 @@ static void vli_mod_sub(u64 *result, const u64 *left, const u64 *right,
vli_add(result, result, mod, ndigits);
}
+/*
+ * Computes result = product % mod
+ * for special form moduli: p = 2^k-c, for small c (note the minus sign)
+ *
+ * References:
+ * R. Crandall, C. Pomerance. Prime Numbers: A Computational Perspective.
+ * 9 Fast Algorithms for Large-Integer Arithmetic. 9.2.3 Moduli of special form
+ * Algorithm 9.2.13 (Fast mod operation for special-form moduli).
+ */
+static void vli_mmod_special(u64 *result, const u64 *product,
+ const u64 *mod, unsigned int ndigits)
+{
+ u64 c = -mod[0];
+ u64 t[ECC_MAX_DIGITS * 2];
+ u64 r[ECC_MAX_DIGITS * 2];
+
+ vli_set(r, product, ndigits * 2);
+ while (!vli_is_zero(r + ndigits, ndigits)) {
+ vli_umult(t, r + ndigits, c, ndigits);
+ vli_clear(r + ndigits, ndigits);
+ vli_add(r, r, t, ndigits * 2);
+ }
+ vli_set(t, mod, ndigits);
+ vli_clear(t + ndigits, ndigits);
+ while (vli_cmp(r, t, ndigits * 2) >= 0)
+ vli_sub(r, r, t, ndigits * 2);
+ vli_set(result, r, ndigits);
+}
+
+/*
+ * Computes result = product % mod
+ * for special form moduli: p = 2^{k-1}+c, for small c (note the plus sign)
+ * where k-1 does not fit into qword boundary by -1 bit (such as 255).
+
+ * References (loosely based on):
+ * A. Menezes, P. van Oorschot, S. Vanstone. Handbook of Applied Cryptography.
+ * 14.3.4 Reduction methods for moduli of special form. Algorithm 14.47.
+ * URL: http://cacr.uwaterloo.ca/hac/about/chap14.pdf
+ *
+ * H. Cohen, G. Frey, R. Avanzi, C. Doche, T. Lange, K. Nguyen, F. Vercauteren.
+ * Handbook of Elliptic and Hyperelliptic Curve Cryptography.
+ * Algorithm 10.25 Fast reduction for special form moduli
+ */
+static void vli_mmod_special2(u64 *result, const u64 *product,
+ const u64 *mod, unsigned int ndigits)
+{
+ u64 c2 = mod[0] * 2;
+ u64 q[ECC_MAX_DIGITS];
+ u64 r[ECC_MAX_DIGITS * 2];
+ u64 m[ECC_MAX_DIGITS * 2]; /* expanded mod */
+ int carry; /* last bit that doesn't fit into q */
+ int i;
+
+ vli_set(m, mod, ndigits);
+ vli_clear(m + ndigits, ndigits);
+
+ vli_set(r, product, ndigits);
+ /* q and carry are top bits */
+ vli_set(q, product + ndigits, ndigits);
+ vli_clear(r + ndigits, ndigits);
+ carry = vli_is_negative(r, ndigits);
+ if (carry)
+ r[ndigits - 1] &= (1ull << 63) - 1;
+ for (i = 1; carry || !vli_is_zero(q, ndigits); i++) {
+ u64 qc[ECC_MAX_DIGITS * 2];
+
+ vli_umult(qc, q, c2, ndigits);
+ if (carry)
+ vli_uadd(qc, qc, mod[0], ndigits * 2);
+ vli_set(q, qc + ndigits, ndigits);
+ vli_clear(qc + ndigits, ndigits);
+ carry = vli_is_negative(qc, ndigits);
+ if (carry)
+ qc[ndigits - 1] &= (1ull << 63) - 1;
+ if (i & 1)
+ vli_sub(r, r, qc, ndigits * 2);
+ else
+ vli_add(r, r, qc, ndigits * 2);
+ }
+ while (vli_is_negative(r, ndigits * 2))
+ vli_add(r, r, m, ndigits * 2);
+ while (vli_cmp(r, m, ndigits * 2) >= 0)
+ vli_sub(r, r, m, ndigits * 2);
+
+ vli_set(result, r, ndigits);
+}
+
+/*
+ * Computes result = product % mod, where product is 2N words long.
+ * Reference: Ken MacKay's micro-ecc.
+ * Currently only designed to work for curve_p or curve_n.
+ */
+static void vli_mmod_slow(u64 *result, u64 *product, const u64 *mod,
+ unsigned int ndigits)
+{
+ u64 mod_m[2 * ECC_MAX_DIGITS];
+ u64 tmp[2 * ECC_MAX_DIGITS];
+ u64 *v[2] = { tmp, product };
+ u64 carry = 0;
+ unsigned int i;
+ /* Shift mod so its highest set bit is at the maximum position. */
+ int shift = (ndigits * 2 * 64) - vli_num_bits(mod, ndigits);
+ int word_shift = shift / 64;
+ int bit_shift = shift % 64;
+
+ vli_clear(mod_m, word_shift);
+ if (bit_shift > 0) {
+ for (i = 0; i < ndigits; ++i) {
+ mod_m[word_shift + i] = (mod[i] << bit_shift) | carry;
+ carry = mod[i] >> (64 - bit_shift);
+ }
+ } else
+ vli_set(mod_m + word_shift, mod, ndigits);
+
+ for (i = 1; shift >= 0; --shift) {
+ u64 borrow = 0;
+ unsigned int j;
+
+ for (j = 0; j < ndigits * 2; ++j) {
+ u64 diff = v[i][j] - mod_m[j] - borrow;
+
+ if (diff != v[i][j])
+ borrow = (diff > v[i][j]);
+ v[1 - i][j] = diff;
+ }
+ i = !(i ^ borrow); /* Swap the index if there was no borrow */
+ vli_rshift1(mod_m, ndigits);
+ mod_m[ndigits - 1] |= mod_m[ndigits] << (64 - 1);
+ vli_rshift1(mod_m + ndigits, ndigits);
+ }
+ vli_set(result, v[i], ndigits);
+}
+
+/* Computes result = product % mod using Barrett's reduction with precomputed
+ * value mu appended to the mod after ndigits, mu = (2^{2w} / mod) and have
+ * length ndigits + 1, where mu * (2^w - 1) should not overflow ndigits
+ * boundary.
+ *
+ * Reference:
+ * R. Brent, P. Zimmermann. Modern Computer Arithmetic. 2010.
+ * 2.4.1 Barrett's algorithm. Algorithm 2.5.
+ */
+static void vli_mmod_barrett(u64 *result, u64 *product, const u64 *mod,
+ unsigned int ndigits)
+{
+ u64 q[ECC_MAX_DIGITS * 2];
+ u64 r[ECC_MAX_DIGITS * 2];
+ const u64 *mu = mod + ndigits;
+
+ vli_mult(q, product + ndigits, mu, ndigits);
+ if (mu[ndigits])
+ vli_add(q + ndigits, q + ndigits, product + ndigits, ndigits);
+ vli_mult(r, mod, q + ndigits, ndigits);
+ vli_sub(r, product, r, ndigits * 2);
+ while (!vli_is_zero(r + ndigits, ndigits) ||
+ vli_cmp(r, mod, ndigits) != -1) {
+ u64 carry;
+
+ carry = vli_sub(r, r, mod, ndigits);
+ vli_usub(r + ndigits, r + ndigits, carry, ndigits);
+ }
+ vli_set(result, r, ndigits);
+}
+
/* Computes p_result = p_product % curve_p.
* See algorithm 5 and 6 from
* http://www.isys.uni-klu.ac.at/PDF/2001-0126-MT.pdf
@@ -509,14 +775,33 @@ static void vli_mmod_fast_256(u64 *result, const u64 *product,
}
}
-/* Computes result = product % curve_prime
- * from http://www.nsa.gov/ia/_files/nist-routines.pdf
-*/
+/* Computes result = product % curve_prime for different curve_primes.
+ *
+ * Note that curve_primes are distinguished just by heuristic check and
+ * not by complete conformance check.
+ */
static bool vli_mmod_fast(u64 *result, u64 *product,
const u64 *curve_prime, unsigned int ndigits)
{
u64 tmp[2 * ECC_MAX_DIGITS];
+ /* Currently, both NIST primes have -1 in lowest qword. */
+ if (curve_prime[0] != -1ull) {
+ /* Try to handle Pseudo-Marsenne primes. */
+ if (curve_prime[ndigits - 1] == -1ull) {
+ vli_mmod_special(result, product, curve_prime,
+ ndigits);
+ return true;
+ } else if (curve_prime[ndigits - 1] == 1ull << 63 &&
+ curve_prime[ndigits - 2] == 0) {
+ vli_mmod_special2(result, product, curve_prime,
+ ndigits);
+ return true;
+ }
+ vli_mmod_barrett(result, product, curve_prime, ndigits);
+ return true;
+ }
+
switch (ndigits) {
case 3:
vli_mmod_fast_192(result, product, curve_prime, tmp);
@@ -525,13 +810,26 @@ static bool vli_mmod_fast(u64 *result, u64 *product,
vli_mmod_fast_256(result, product, curve_prime, tmp);
break;
default:
- pr_err("unsupports digits size!\n");
+ pr_err_ratelimited("ecc: unsupported digits size!\n");
return false;
}
return true;
}
+/* Computes result = (left * right) % mod.
+ * Assumes that mod is big enough curve order.
+ */
+void vli_mod_mult_slow(u64 *result, const u64 *left, const u64 *right,
+ const u64 *mod, unsigned int ndigits)
+{
+ u64 product[ECC_MAX_DIGITS * 2];
+
+ vli_mult(product, left, right, ndigits);
+ vli_mmod_slow(result, product, mod, ndigits);
+}
+EXPORT_SYMBOL(vli_mod_mult_slow);
+
/* Computes result = (left * right) % curve_prime. */
static void vli_mod_mult_fast(u64 *result, const u64 *left, const u64 *right,
const u64 *curve_prime, unsigned int ndigits)
@@ -557,7 +855,7 @@ static void vli_mod_square_fast(u64 *result, const u64 *left,
* See "From Euclid's GCD to Montgomery Multiplication to the Great Divide"
* https://labs.oracle.com/techrep/2001/smli_tr-2001-95.pdf
*/
-static void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod,
+void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod,
unsigned int ndigits)
{
u64 a[ECC_MAX_DIGITS], b[ECC_MAX_DIGITS];
@@ -630,6 +928,7 @@ static void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod,
vli_set(result, u, ndigits);
}
+EXPORT_SYMBOL(vli_mod_inv);
/* ------ Point operations ------ */
@@ -903,6 +1202,85 @@ static void ecc_point_mult(struct ecc_point *result,
vli_set(result->y, ry[0], ndigits);
}
+/* Computes R = P + Q mod p */
+static void ecc_point_add(const struct ecc_point *result,
+ const struct ecc_point *p, const struct ecc_point *q,
+ const struct ecc_curve *curve)
+{
+ u64 z[ECC_MAX_DIGITS];
+ u64 px[ECC_MAX_DIGITS];
+ u64 py[ECC_MAX_DIGITS];
+ unsigned int ndigits = curve->g.ndigits;
+
+ vli_set(result->x, q->x, ndigits);
+ vli_set(result->y, q->y, ndigits);
+ vli_mod_sub(z, result->x, p->x, curve->p, ndigits);
+ vli_set(px, p->x, ndigits);
+ vli_set(py, p->y, ndigits);
+ xycz_add(px, py, result->x, result->y, curve->p, ndigits);
+ vli_mod_inv(z, z, curve->p, ndigits);
+ apply_z(result->x, result->y, z, curve->p, ndigits);
+}
+
+/* Computes R = u1P + u2Q mod p using Shamir's trick.
+ * Based on: Kenneth MacKay's micro-ecc (2014).
+ */
+void ecc_point_mult_shamir(const struct ecc_point *result,
+ const u64 *u1, const struct ecc_point *p,
+ const u64 *u2, const struct ecc_point *q,
+ const struct ecc_curve *curve)
+{
+ u64 z[ECC_MAX_DIGITS];
+ u64 sump[2][ECC_MAX_DIGITS];
+ u64 *rx = result->x;
+ u64 *ry = result->y;
+ unsigned int ndigits = curve->g.ndigits;
+ unsigned int num_bits;
+ struct ecc_point sum = ECC_POINT_INIT(sump[0], sump[1], ndigits);
+ const struct ecc_point *points[4];
+ const struct ecc_point *point;
+ unsigned int idx;
+ int i;
+
+ ecc_point_add(&sum, p, q, curve);
+ points[0] = NULL;
+ points[1] = p;
+ points[2] = q;
+ points[3] = &sum;
+
+ num_bits = max(vli_num_bits(u1, ndigits),
+ vli_num_bits(u2, ndigits));
+ i = num_bits - 1;
+ idx = (!!vli_test_bit(u1, i)) | ((!!vli_test_bit(u2, i)) << 1);
+ point = points[idx];
+
+ vli_set(rx, point->x, ndigits);
+ vli_set(ry, point->y, ndigits);
+ vli_clear(z + 1, ndigits - 1);
+ z[0] = 1;
+
+ for (--i; i >= 0; i--) {
+ ecc_point_double_jacobian(rx, ry, z, curve->p, ndigits);
+ idx = (!!vli_test_bit(u1, i)) | ((!!vli_test_bit(u2, i)) << 1);
+ point = points[idx];
+ if (point) {
+ u64 tx[ECC_MAX_DIGITS];
+ u64 ty[ECC_MAX_DIGITS];
+ u64 tz[ECC_MAX_DIGITS];
+
+ vli_set(tx, point->x, ndigits);
+ vli_set(ty, point->y, ndigits);
+ apply_z(tx, ty, z, curve->p, ndigits);
+ vli_mod_sub(tz, rx, tx, curve->p, ndigits);
+ xycz_add(tx, ty, rx, ry, curve->p, ndigits);
+ vli_mod_mult_fast(z, z, tz, curve->p, ndigits);
+ }
+ }
+ vli_mod_inv(z, z, curve->p, ndigits);
+ apply_z(rx, ry, z, curve->p, ndigits);
+}
+EXPORT_SYMBOL(ecc_point_mult_shamir);
+
static inline void ecc_swap_digits(const u64 *in, u64 *out,
unsigned int ndigits)
{
@@ -948,6 +1326,7 @@ int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
return __ecc_is_key_valid(curve, private_key, ndigits);
}
+EXPORT_SYMBOL(ecc_is_key_valid);
/*
* ECC private keys are generated using the method of extra random bits,
@@ -1000,6 +1379,7 @@ int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey)
return 0;
}
+EXPORT_SYMBOL(ecc_gen_privkey);
int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits,
const u64 *private_key, u64 *public_key)
@@ -1036,13 +1416,17 @@ err_free_point:
out:
return ret;
}
+EXPORT_SYMBOL(ecc_make_pub_key);
/* SP800-56A section 5.6.2.3.4 partial verification: ephemeral keys only */
-static int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
- struct ecc_point *pk)
+int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
+ struct ecc_point *pk)
{
u64 yy[ECC_MAX_DIGITS], xxx[ECC_MAX_DIGITS], w[ECC_MAX_DIGITS];
+ if (WARN_ON(pk->ndigits != curve->g.ndigits))
+ return -EINVAL;
+
/* Check 1: Verify key is not the zero point. */
if (ecc_point_is_zero(pk))
return -EINVAL;
@@ -1064,8 +1448,8 @@ static int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
return -EINVAL;
return 0;
-
}
+EXPORT_SYMBOL(ecc_is_pubkey_valid_partial);
int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
const u64 *private_key, const u64 *public_key,
@@ -1121,3 +1505,6 @@ err_alloc_product:
out:
return ret;
}
+EXPORT_SYMBOL(crypto_ecdh_shared_secret);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/crypto/ecc.h b/crypto/ecc.h
index f75a86baa3bd..ab0eb70b9c09 100644
--- a/crypto/ecc.h
+++ b/crypto/ecc.h
@@ -26,13 +26,51 @@
#ifndef _CRYPTO_ECC_H
#define _CRYPTO_ECC_H
+/* One digit is u64 qword. */
#define ECC_CURVE_NIST_P192_DIGITS 3
#define ECC_CURVE_NIST_P256_DIGITS 4
-#define ECC_MAX_DIGITS ECC_CURVE_NIST_P256_DIGITS
+#define ECC_MAX_DIGITS (512 / 64)
#define ECC_DIGITS_TO_BYTES_SHIFT 3
/**
+ * struct ecc_point - elliptic curve point in affine coordinates
+ *
+ * @x: X coordinate in vli form.
+ * @y: Y coordinate in vli form.
+ * @ndigits: Length of vlis in u64 qwords.
+ */
+struct ecc_point {
+ u64 *x;
+ u64 *y;
+ u8 ndigits;
+};
+
+#define ECC_POINT_INIT(x, y, ndigits) (struct ecc_point) { x, y, ndigits }
+
+/**
+ * struct ecc_curve - definition of elliptic curve
+ *
+ * @name: Short name of the curve.
+ * @g: Generator point of the curve.
+ * @p: Prime number, if Barrett's reduction is used for this curve
+ * pre-calculated value 'mu' is appended to the @p after ndigits.
+ * Use of Barrett's reduction is heuristically determined in
+ * vli_mmod_fast().
+ * @n: Order of the curve group.
+ * @a: Curve parameter a.
+ * @b: Curve parameter b.
+ */
+struct ecc_curve {
+ char *name;
+ struct ecc_point g;
+ u64 *p;
+ u64 *n;
+ u64 *a;
+ u64 *b;
+};
+
+/**
* ecc_is_key_valid() - Validate a given ECDH private key
*
* @curve_id: id representing the curve to use
@@ -91,4 +129,117 @@ int ecc_make_pub_key(const unsigned int curve_id, unsigned int ndigits,
int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
const u64 *private_key, const u64 *public_key,
u64 *secret);
+
+/**
+ * ecc_is_pubkey_valid_partial() - Partial public key validation
+ *
+ * @curve: elliptic curve domain parameters
+ * @pk: public key as a point
+ *
+ * Valdiate public key according to SP800-56A section 5.6.2.3.4 ECC Partial
+ * Public-Key Validation Routine.
+ *
+ * Note: There is no check that the public key is in the correct elliptic curve
+ * subgroup.
+ *
+ * Return: 0 if validation is successful, -EINVAL if validation is failed.
+ */
+int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
+ struct ecc_point *pk);
+
+/**
+ * vli_is_zero() - Determine is vli is zero
+ *
+ * @vli: vli to check.
+ * @ndigits: length of the @vli
+ */
+bool vli_is_zero(const u64 *vli, unsigned int ndigits);
+
+/**
+ * vli_cmp() - compare left and right vlis
+ *
+ * @left: vli
+ * @right: vli
+ * @ndigits: length of both vlis
+ *
+ * Returns sign of @left - @right, i.e. -1 if @left < @right,
+ * 0 if @left == @right, 1 if @left > @right.
+ */
+int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits);
+
+/**
+ * vli_sub() - Subtracts right from left
+ *
+ * @result: where to write result
+ * @left: vli
+ * @right vli
+ * @ndigits: length of all vlis
+ *
+ * Note: can modify in-place.
+ *
+ * Return: carry bit.
+ */
+u64 vli_sub(u64 *result, const u64 *left, const u64 *right,
+ unsigned int ndigits);
+
+/**
+ * vli_from_be64() - Load vli from big-endian u64 array
+ *
+ * @dest: destination vli
+ * @src: source array of u64 BE values
+ * @ndigits: length of both vli and array
+ */
+void vli_from_be64(u64 *dest, const void *src, unsigned int ndigits);
+
+/**
+ * vli_from_le64() - Load vli from little-endian u64 array
+ *
+ * @dest: destination vli
+ * @src: source array of u64 LE values
+ * @ndigits: length of both vli and array
+ */
+void vli_from_le64(u64 *dest, const void *src, unsigned int ndigits);
+
+/**
+ * vli_mod_inv() - Modular inversion
+ *
+ * @result: where to write vli number
+ * @input: vli value to operate on
+ * @mod: modulus
+ * @ndigits: length of all vlis
+ */
+void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod,
+ unsigned int ndigits);
+
+/**
+ * vli_mod_mult_slow() - Modular multiplication
+ *
+ * @result: where to write result value
+ * @left: vli number to multiply with @right
+ * @right: vli number to multiply with @left
+ * @mod: modulus
+ * @ndigits: length of all vlis
+ *
+ * Note: Assumes that mod is big enough curve order.
+ */
+void vli_mod_mult_slow(u64 *result, const u64 *left, const u64 *right,
+ const u64 *mod, unsigned int ndigits);
+
+/**
+ * ecc_point_mult_shamir() - Add two points multiplied by scalars
+ *
+ * @result: resulting point
+ * @x: scalar to multiply with @p
+ * @p: point to multiply with @x
+ * @y: scalar to multiply with @q
+ * @q: point to multiply with @y
+ * @curve: curve
+ *
+ * Returns result = x * p + x * q over the curve.
+ * This works faster than two multiplications and addition.
+ */
+void ecc_point_mult_shamir(const struct ecc_point *result,
+ const u64 *x, const struct ecc_point *p,
+ const u64 *y, const struct ecc_point *q,
+ const struct ecc_curve *curve);
#endif
diff --git a/crypto/ecc_curve_defs.h b/crypto/ecc_curve_defs.h
index 336ab1805639..69be6c7d228f 100644
--- a/crypto/ecc_curve_defs.h
+++ b/crypto/ecc_curve_defs.h
@@ -2,21 +2,6 @@
#ifndef _CRYTO_ECC_CURVE_DEFS_H
#define _CRYTO_ECC_CURVE_DEFS_H
-struct ecc_point {
- u64 *x;
- u64 *y;
- u8 ndigits;
-};
-
-struct ecc_curve {
- char *name;
- struct ecc_point g;
- u64 *p;
- u64 *n;
- u64 *a;
- u64 *b;
-};
-
/* NIST P-192: a = p - 3 */
static u64 nist_p192_g_x[] = { 0xF4FF0AFD82FF1012ull, 0x7CBF20EB43A18800ull,
0x188DA80EB03090F6ull };
diff --git a/crypto/ecdh.c b/crypto/ecdh.c
index bf6300175b9c..890092bd8989 100644
--- a/crypto/ecdh.c
+++ b/crypto/ecdh.c
@@ -166,7 +166,7 @@ static void ecdh_exit(void)
crypto_unregister_kpp(&ecdh);
}
-module_init(ecdh_init);
+subsys_initcall(ecdh_init);
module_exit(ecdh_exit);
MODULE_ALIAS_CRYPTO("ecdh");
MODULE_LICENSE("GPL");
diff --git a/crypto/echainiv.c b/crypto/echainiv.c
index 77e607fdbfb7..e71d1bc8d850 100644
--- a/crypto/echainiv.c
+++ b/crypto/echainiv.c
@@ -174,7 +174,7 @@ static void __exit echainiv_module_exit(void)
crypto_unregister_template(&echainiv_tmpl);
}
-module_init(echainiv_module_init);
+subsys_initcall(echainiv_module_init);
module_exit(echainiv_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/ecrdsa.c b/crypto/ecrdsa.c
new file mode 100644
index 000000000000..887ec21aee49
--- /dev/null
+++ b/crypto/ecrdsa.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Elliptic Curve (Russian) Digital Signature Algorithm for Cryptographic API
+ *
+ * Copyright (c) 2019 Vitaly Chikunov <vt@altlinux.org>
+ *
+ * References:
+ * GOST 34.10-2018, GOST R 34.10-2012, RFC 7091, ISO/IEC 14888-3:2018.
+ *
+ * Historical references:
+ * GOST R 34.10-2001, RFC 4357, ISO/IEC 14888-3:2006/Amd 1:2010.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <crypto/streebog.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/akcipher.h>
+#include <linux/oid_registry.h>
+#include "ecrdsa_params.asn1.h"
+#include "ecrdsa_pub_key.asn1.h"
+#include "ecc.h"
+#include "ecrdsa_defs.h"
+
+#define ECRDSA_MAX_SIG_SIZE (2 * 512 / 8)
+#define ECRDSA_MAX_DIGITS (512 / 64)
+
+struct ecrdsa_ctx {
+ enum OID algo_oid; /* overall public key oid */
+ enum OID curve_oid; /* parameter */
+ enum OID digest_oid; /* parameter */
+ const struct ecc_curve *curve; /* curve from oid */
+ unsigned int digest_len; /* parameter (bytes) */
+ const char *digest; /* digest name from oid */
+ unsigned int key_len; /* @key length (bytes) */
+ const char *key; /* raw public key */
+ struct ecc_point pub_key;
+ u64 _pubp[2][ECRDSA_MAX_DIGITS]; /* point storage for @pub_key */
+};
+
+static const struct ecc_curve *get_curve_by_oid(enum OID oid)
+{
+ switch (oid) {
+ case OID_gostCPSignA:
+ case OID_gostTC26Sign256B:
+ return &gost_cp256a;
+ case OID_gostCPSignB:
+ case OID_gostTC26Sign256C:
+ return &gost_cp256b;
+ case OID_gostCPSignC:
+ case OID_gostTC26Sign256D:
+ return &gost_cp256c;
+ case OID_gostTC26Sign512A:
+ return &gost_tc512a;
+ case OID_gostTC26Sign512B:
+ return &gost_tc512b;
+ /* The following two aren't implemented: */
+ case OID_gostTC26Sign256A:
+ case OID_gostTC26Sign512C:
+ default:
+ return NULL;
+ }
+}
+
+static int ecrdsa_verify(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct ecrdsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ unsigned char sig[ECRDSA_MAX_SIG_SIZE];
+ unsigned char digest[STREEBOG512_DIGEST_SIZE];
+ unsigned int ndigits = req->dst_len / sizeof(u64);
+ u64 r[ECRDSA_MAX_DIGITS]; /* witness (r) */
+ u64 _r[ECRDSA_MAX_DIGITS]; /* -r */
+ u64 s[ECRDSA_MAX_DIGITS]; /* second part of sig (s) */
+ u64 e[ECRDSA_MAX_DIGITS]; /* h \mod q */
+ u64 *v = e; /* e^{-1} \mod q */
+ u64 z1[ECRDSA_MAX_DIGITS];
+ u64 *z2 = _r;
+ struct ecc_point cc = ECC_POINT_INIT(s, e, ndigits); /* reuse s, e */
+
+ /*
+ * Digest value, digest algorithm, and curve (modulus) should have the
+ * same length (256 or 512 bits), public key and signature should be
+ * twice bigger.
+ */
+ if (!ctx->curve ||
+ !ctx->digest ||
+ !req->src ||
+ !ctx->pub_key.x ||
+ req->dst_len != ctx->digest_len ||
+ req->dst_len != ctx->curve->g.ndigits * sizeof(u64) ||
+ ctx->pub_key.ndigits != ctx->curve->g.ndigits ||
+ req->dst_len * 2 != req->src_len ||
+ WARN_ON(req->src_len > sizeof(sig)) ||
+ WARN_ON(req->dst_len > sizeof(digest)))
+ return -EBADMSG;
+
+ sg_copy_to_buffer(req->src, sg_nents_for_len(req->src, req->src_len),
+ sig, req->src_len);
+ sg_pcopy_to_buffer(req->src,
+ sg_nents_for_len(req->src,
+ req->src_len + req->dst_len),
+ digest, req->dst_len, req->src_len);
+
+ vli_from_be64(s, sig, ndigits);
+ vli_from_be64(r, sig + ndigits * sizeof(u64), ndigits);
+
+ /* Step 1: verify that 0 < r < q, 0 < s < q */
+ if (vli_is_zero(r, ndigits) ||
+ vli_cmp(r, ctx->curve->n, ndigits) == 1 ||
+ vli_is_zero(s, ndigits) ||
+ vli_cmp(s, ctx->curve->n, ndigits) == 1)
+ return -EKEYREJECTED;
+
+ /* Step 2: calculate hash (h) of the message (passed as input) */
+ /* Step 3: calculate e = h \mod q */
+ vli_from_le64(e, digest, ndigits);
+ if (vli_cmp(e, ctx->curve->n, ndigits) == 1)
+ vli_sub(e, e, ctx->curve->n, ndigits);
+ if (vli_is_zero(e, ndigits))
+ e[0] = 1;
+
+ /* Step 4: calculate v = e^{-1} \mod q */
+ vli_mod_inv(v, e, ctx->curve->n, ndigits);
+
+ /* Step 5: calculate z_1 = sv \mod q, z_2 = -rv \mod q */
+ vli_mod_mult_slow(z1, s, v, ctx->curve->n, ndigits);
+ vli_sub(_r, ctx->curve->n, r, ndigits);
+ vli_mod_mult_slow(z2, _r, v, ctx->curve->n, ndigits);
+
+ /* Step 6: calculate point C = z_1P + z_2Q, and R = x_c \mod q */
+ ecc_point_mult_shamir(&cc, z1, &ctx->curve->g, z2, &ctx->pub_key,
+ ctx->curve);
+ if (vli_cmp(cc.x, ctx->curve->n, ndigits) == 1)
+ vli_sub(cc.x, cc.x, ctx->curve->n, ndigits);
+
+ /* Step 7: if R == r signature is valid */
+ if (!vli_cmp(cc.x, r, ndigits))
+ return 0;
+ else
+ return -EKEYREJECTED;
+}
+
+int ecrdsa_param_curve(void *context, size_t hdrlen, unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct ecrdsa_ctx *ctx = context;
+
+ ctx->curve_oid = look_up_OID(value, vlen);
+ if (!ctx->curve_oid)
+ return -EINVAL;
+ ctx->curve = get_curve_by_oid(ctx->curve_oid);
+ return 0;
+}
+
+/* Optional. If present should match expected digest algo OID. */
+int ecrdsa_param_digest(void *context, size_t hdrlen, unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct ecrdsa_ctx *ctx = context;
+ int digest_oid = look_up_OID(value, vlen);
+
+ if (digest_oid != ctx->digest_oid)
+ return -EINVAL;
+ return 0;
+}
+
+int ecrdsa_parse_pub_key(void *context, size_t hdrlen, unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct ecrdsa_ctx *ctx = context;
+
+ ctx->key = value;
+ ctx->key_len = vlen;
+ return 0;
+}
+
+static u8 *ecrdsa_unpack_u32(u32 *dst, void *src)
+{
+ memcpy(dst, src, sizeof(u32));
+ return src + sizeof(u32);
+}
+
+/* Parse BER encoded subjectPublicKey. */
+static int ecrdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ struct ecrdsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ unsigned int ndigits;
+ u32 algo, paramlen;
+ u8 *params;
+ int err;
+
+ err = asn1_ber_decoder(&ecrdsa_pub_key_decoder, ctx, key, keylen);
+ if (err < 0)
+ return err;
+
+ /* Key parameters is in the key after keylen. */
+ params = ecrdsa_unpack_u32(&paramlen,
+ ecrdsa_unpack_u32(&algo, (u8 *)key + keylen));
+
+ if (algo == OID_gost2012PKey256) {
+ ctx->digest = "streebog256";
+ ctx->digest_oid = OID_gost2012Digest256;
+ ctx->digest_len = 256 / 8;
+ } else if (algo == OID_gost2012PKey512) {
+ ctx->digest = "streebog512";
+ ctx->digest_oid = OID_gost2012Digest512;
+ ctx->digest_len = 512 / 8;
+ } else
+ return -ENOPKG;
+ ctx->algo_oid = algo;
+
+ /* Parse SubjectPublicKeyInfo.AlgorithmIdentifier.parameters. */
+ err = asn1_ber_decoder(&ecrdsa_params_decoder, ctx, params, paramlen);
+ if (err < 0)
+ return err;
+ /*
+ * Sizes of algo (set in digest_len) and curve should match
+ * each other.
+ */
+ if (!ctx->curve ||
+ ctx->curve->g.ndigits * sizeof(u64) != ctx->digest_len)
+ return -ENOPKG;
+ /*
+ * Key is two 256- or 512-bit coordinates which should match
+ * curve size.
+ */
+ if ((ctx->key_len != (2 * 256 / 8) &&
+ ctx->key_len != (2 * 512 / 8)) ||
+ ctx->key_len != ctx->curve->g.ndigits * sizeof(u64) * 2)
+ return -ENOPKG;
+
+ ndigits = ctx->key_len / sizeof(u64) / 2;
+ ctx->pub_key = ECC_POINT_INIT(ctx->_pubp[0], ctx->_pubp[1], ndigits);
+ vli_from_le64(ctx->pub_key.x, ctx->key, ndigits);
+ vli_from_le64(ctx->pub_key.y, ctx->key + ndigits * sizeof(u64),
+ ndigits);
+
+ if (ecc_is_pubkey_valid_partial(ctx->curve, &ctx->pub_key))
+ return -EKEYREJECTED;
+
+ return 0;
+}
+
+static unsigned int ecrdsa_max_size(struct crypto_akcipher *tfm)
+{
+ struct ecrdsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ /*
+ * Verify doesn't need any output, so it's just informational
+ * for keyctl to determine the key bit size.
+ */
+ return ctx->pub_key.ndigits * sizeof(u64);
+}
+
+static void ecrdsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+}
+
+static struct akcipher_alg ecrdsa_alg = {
+ .verify = ecrdsa_verify,
+ .set_pub_key = ecrdsa_set_pub_key,
+ .max_size = ecrdsa_max_size,
+ .exit = ecrdsa_exit_tfm,
+ .base = {
+ .cra_name = "ecrdsa",
+ .cra_driver_name = "ecrdsa-generic",
+ .cra_priority = 100,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct ecrdsa_ctx),
+ },
+};
+
+static int __init ecrdsa_mod_init(void)
+{
+ return crypto_register_akcipher(&ecrdsa_alg);
+}
+
+static void __exit ecrdsa_mod_fini(void)
+{
+ crypto_unregister_akcipher(&ecrdsa_alg);
+}
+
+module_init(ecrdsa_mod_init);
+module_exit(ecrdsa_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vitaly Chikunov <vt@altlinux.org>");
+MODULE_DESCRIPTION("EC-RDSA generic algorithm");
+MODULE_ALIAS_CRYPTO("ecrdsa-generic");
diff --git a/crypto/ecrdsa_defs.h b/crypto/ecrdsa_defs.h
new file mode 100644
index 000000000000..170baf039007
--- /dev/null
+++ b/crypto/ecrdsa_defs.h
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Definitions of EC-RDSA Curve Parameters
+ *
+ * Copyright (c) 2019 Vitaly Chikunov <vt@altlinux.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#ifndef _CRYTO_ECRDSA_DEFS_H
+#define _CRYTO_ECRDSA_DEFS_H
+
+#include "ecc.h"
+
+#define ECRDSA_MAX_SIG_SIZE (2 * 512 / 8)
+#define ECRDSA_MAX_DIGITS (512 / 64)
+
+/*
+ * EC-RDSA uses its own set of curves.
+ *
+ * cp256{a,b,c} curves first defined for GOST R 34.10-2001 in RFC 4357 (as
+ * 256-bit {A,B,C}-ParamSet), but inherited for GOST R 34.10-2012 and
+ * proposed for use in R 50.1.114-2016 and RFC 7836 as the 256-bit curves.
+ */
+/* OID_gostCPSignA 1.2.643.2.2.35.1 */
+static u64 cp256a_g_x[] = {
+ 0x0000000000000001ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x0000000000000000ull, };
+static u64 cp256a_g_y[] = {
+ 0x22ACC99C9E9F1E14ull, 0x35294F2DDF23E3B1ull,
+ 0x27DF505A453F2B76ull, 0x8D91E471E0989CDAull, };
+static u64 cp256a_p[] = { /* p = 2^256 - 617 */
+ 0xFFFFFFFFFFFFFD97ull, 0xFFFFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull };
+static u64 cp256a_n[] = {
+ 0x45841B09B761B893ull, 0x6C611070995AD100ull,
+ 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull };
+static u64 cp256a_a[] = { /* a = p - 3 */
+ 0xFFFFFFFFFFFFFD94ull, 0xFFFFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull };
+static u64 cp256a_b[] = {
+ 0x00000000000000a6ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x0000000000000000ull };
+
+static struct ecc_curve gost_cp256a = {
+ .name = "cp256a",
+ .g = {
+ .x = cp256a_g_x,
+ .y = cp256a_g_y,
+ .ndigits = 256 / 64,
+ },
+ .p = cp256a_p,
+ .n = cp256a_n,
+ .a = cp256a_a,
+ .b = cp256a_b
+};
+
+/* OID_gostCPSignB 1.2.643.2.2.35.2 */
+static u64 cp256b_g_x[] = {
+ 0x0000000000000001ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x0000000000000000ull, };
+static u64 cp256b_g_y[] = {
+ 0x744BF8D717717EFCull, 0xC545C9858D03ECFBull,
+ 0xB83D1C3EB2C070E5ull, 0x3FA8124359F96680ull, };
+static u64 cp256b_p[] = { /* p = 2^255 + 3225 */
+ 0x0000000000000C99ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x8000000000000000ull, };
+static u64 cp256b_n[] = {
+ 0xE497161BCC8A198Full, 0x5F700CFFF1A624E5ull,
+ 0x0000000000000001ull, 0x8000000000000000ull, };
+static u64 cp256b_a[] = { /* a = p - 3 */
+ 0x0000000000000C96ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x8000000000000000ull, };
+static u64 cp256b_b[] = {
+ 0x2F49D4CE7E1BBC8Bull, 0xE979259373FF2B18ull,
+ 0x66A7D3C25C3DF80Aull, 0x3E1AF419A269A5F8ull, };
+
+static struct ecc_curve gost_cp256b = {
+ .name = "cp256b",
+ .g = {
+ .x = cp256b_g_x,
+ .y = cp256b_g_y,
+ .ndigits = 256 / 64,
+ },
+ .p = cp256b_p,
+ .n = cp256b_n,
+ .a = cp256b_a,
+ .b = cp256b_b
+};
+
+/* OID_gostCPSignC 1.2.643.2.2.35.3 */
+static u64 cp256c_g_x[] = {
+ 0x0000000000000000ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x0000000000000000ull, };
+static u64 cp256c_g_y[] = {
+ 0x366E550DFDB3BB67ull, 0x4D4DC440D4641A8Full,
+ 0x3CBF3783CD08C0EEull, 0x41ECE55743711A8Cull, };
+static u64 cp256c_p[] = {
+ 0x7998F7B9022D759Bull, 0xCF846E86789051D3ull,
+ 0xAB1EC85E6B41C8AAull, 0x9B9F605F5A858107ull,
+ /* pre-computed value for Barrett's reduction */
+ 0xedc283cdd217b5a2ull, 0xbac48fc06398ae59ull,
+ 0x405384d55f9f3b73ull, 0xa51f176161f1d734ull,
+ 0x0000000000000001ull, };
+static u64 cp256c_n[] = {
+ 0xF02F3A6598980BB9ull, 0x582CA3511EDDFB74ull,
+ 0xAB1EC85E6B41C8AAull, 0x9B9F605F5A858107ull, };
+static u64 cp256c_a[] = { /* a = p - 3 */
+ 0x7998F7B9022D7598ull, 0xCF846E86789051D3ull,
+ 0xAB1EC85E6B41C8AAull, 0x9B9F605F5A858107ull, };
+static u64 cp256c_b[] = {
+ 0x000000000000805aull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x0000000000000000ull, };
+
+static struct ecc_curve gost_cp256c = {
+ .name = "cp256c",
+ .g = {
+ .x = cp256c_g_x,
+ .y = cp256c_g_y,
+ .ndigits = 256 / 64,
+ },
+ .p = cp256c_p,
+ .n = cp256c_n,
+ .a = cp256c_a,
+ .b = cp256c_b
+};
+
+/* tc512{a,b} curves first recommended in 2013 and then standardized in
+ * R 50.1.114-2016 and RFC 7836 for use with GOST R 34.10-2012 (as TC26
+ * 512-bit ParamSet{A,B}).
+ */
+/* OID_gostTC26Sign512A 1.2.643.7.1.2.1.2.1 */
+static u64 tc512a_g_x[] = {
+ 0x0000000000000003ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x0000000000000000ull, };
+static u64 tc512a_g_y[] = {
+ 0x89A589CB5215F2A4ull, 0x8028FE5FC235F5B8ull,
+ 0x3D75E6A50E3A41E9ull, 0xDF1626BE4FD036E9ull,
+ 0x778064FDCBEFA921ull, 0xCE5E1C93ACF1ABC1ull,
+ 0xA61B8816E25450E6ull, 0x7503CFE87A836AE3ull, };
+static u64 tc512a_p[] = { /* p = 2^512 - 569 */
+ 0xFFFFFFFFFFFFFDC7ull, 0xFFFFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull, };
+static u64 tc512a_n[] = {
+ 0xCACDB1411F10B275ull, 0x9B4B38ABFAD2B85Dull,
+ 0x6FF22B8D4E056060ull, 0x27E69532F48D8911ull,
+ 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull, };
+static u64 tc512a_a[] = { /* a = p - 3 */
+ 0xFFFFFFFFFFFFFDC4ull, 0xFFFFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull, };
+static u64 tc512a_b[] = {
+ 0x503190785A71C760ull, 0x862EF9D4EBEE4761ull,
+ 0x4CB4574010DA90DDull, 0xEE3CB090F30D2761ull,
+ 0x79BD081CFD0B6265ull, 0x34B82574761CB0E8ull,
+ 0xC1BD0B2B6667F1DAull, 0xE8C2505DEDFC86DDull, };
+
+static struct ecc_curve gost_tc512a = {
+ .name = "tc512a",
+ .g = {
+ .x = tc512a_g_x,
+ .y = tc512a_g_y,
+ .ndigits = 512 / 64,
+ },
+ .p = tc512a_p,
+ .n = tc512a_n,
+ .a = tc512a_a,
+ .b = tc512a_b
+};
+
+/* OID_gostTC26Sign512B 1.2.643.7.1.2.1.2.2 */
+static u64 tc512b_g_x[] = {
+ 0x0000000000000002ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x0000000000000000ull, };
+static u64 tc512b_g_y[] = {
+ 0x7E21340780FE41BDull, 0x28041055F94CEEECull,
+ 0x152CBCAAF8C03988ull, 0xDCB228FD1EDF4A39ull,
+ 0xBE6DD9E6C8EC7335ull, 0x3C123B697578C213ull,
+ 0x2C071E3647A8940Full, 0x1A8F7EDA389B094Cull, };
+static u64 tc512b_p[] = { /* p = 2^511 + 111 */
+ 0x000000000000006Full, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x8000000000000000ull, };
+static u64 tc512b_n[] = {
+ 0xC6346C54374F25BDull, 0x8B996712101BEA0Eull,
+ 0xACFDB77BD9D40CFAull, 0x49A1EC142565A545ull,
+ 0x0000000000000001ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x8000000000000000ull, };
+static u64 tc512b_a[] = { /* a = p - 3 */
+ 0x000000000000006Cull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x8000000000000000ull, };
+static u64 tc512b_b[] = {
+ 0xFB8CCBC7C5140116ull, 0x50F78BEE1FA3106Eull,
+ 0x7F8B276FAD1AB69Cull, 0x3E965D2DB1416D21ull,
+ 0xBF85DC806C4B289Full, 0xB97C7D614AF138BCull,
+ 0x7E3E06CF6F5E2517ull, 0x687D1B459DC84145ull, };
+
+static struct ecc_curve gost_tc512b = {
+ .name = "tc512b",
+ .g = {
+ .x = tc512b_g_x,
+ .y = tc512b_g_y,
+ .ndigits = 512 / 64,
+ },
+ .p = tc512b_p,
+ .n = tc512b_n,
+ .a = tc512b_a,
+ .b = tc512b_b
+};
+
+#endif
diff --git a/crypto/ecrdsa_params.asn1 b/crypto/ecrdsa_params.asn1
new file mode 100644
index 000000000000..aba99c3763cf
--- /dev/null
+++ b/crypto/ecrdsa_params.asn1
@@ -0,0 +1,4 @@
+EcrdsaParams ::= SEQUENCE {
+ curve OBJECT IDENTIFIER ({ ecrdsa_param_curve }),
+ digest OBJECT IDENTIFIER OPTIONAL ({ ecrdsa_param_digest })
+}
diff --git a/crypto/ecrdsa_pub_key.asn1 b/crypto/ecrdsa_pub_key.asn1
new file mode 100644
index 000000000000..048cb646bce4
--- /dev/null
+++ b/crypto/ecrdsa_pub_key.asn1
@@ -0,0 +1 @@
+EcrdsaPubKey ::= OCTET STRING ({ ecrdsa_parse_pub_key })
diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c
index 77286ea28865..4e8704405a3b 100644
--- a/crypto/fcrypt.c
+++ b/crypto/fcrypt.c
@@ -414,7 +414,7 @@ static void __exit fcrypt_mod_fini(void)
crypto_unregister_alg(&fcrypt_alg);
}
-module_init(fcrypt_mod_init);
+subsys_initcall(fcrypt_mod_init);
module_exit(fcrypt_mod_fini);
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/crypto/fips.c b/crypto/fips.c
index 9d627c1cf8bc..9dfed122d6da 100644
--- a/crypto/fips.c
+++ b/crypto/fips.c
@@ -74,5 +74,5 @@ static void __exit fips_exit(void)
crypto_proc_fips_exit();
}
-module_init(fips_init);
+subsys_initcall(fips_init);
module_exit(fips_exit);
diff --git a/crypto/gcm.c b/crypto/gcm.c
index e1a11f529d25..33f45a980967 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -597,7 +597,6 @@ static void crypto_gcm_free(struct aead_instance *inst)
static int crypto_gcm_create_common(struct crypto_template *tmpl,
struct rtattr **tb,
- const char *full_name,
const char *ctr_name,
const char *ghash_name)
{
@@ -638,7 +637,8 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
goto err_free_inst;
err = -EINVAL;
- if (ghash->digestsize != 16)
+ if (strcmp(ghash->base.cra_name, "ghash") != 0 ||
+ ghash->digestsize != 16)
goto err_drop_ghash;
crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst));
@@ -650,24 +650,24 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
ctr = crypto_spawn_skcipher_alg(&ctx->ctr);
- /* We only support 16-byte blocks. */
+ /* The skcipher algorithm must be CTR mode, using 16-byte blocks. */
err = -EINVAL;
- if (crypto_skcipher_alg_ivsize(ctr) != 16)
+ if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
+ crypto_skcipher_alg_ivsize(ctr) != 16 ||
+ ctr->base.cra_blocksize != 1)
goto out_put_ctr;
- /* Not a stream cipher? */
- if (ctr->base.cra_blocksize != 1)
+ err = -ENAMETOOLONG;
+ if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+ "gcm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME)
goto out_put_ctr;
- err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"gcm_base(%s,%s)", ctr->base.cra_driver_name,
ghash_alg->cra_driver_name) >=
CRYPTO_MAX_ALG_NAME)
goto out_put_ctr;
- memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
-
inst->alg.base.cra_flags = (ghash->base.cra_flags |
ctr->base.cra_flags) & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = (ghash->base.cra_priority +
@@ -709,7 +709,6 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
{
const char *cipher_name;
char ctr_name[CRYPTO_MAX_ALG_NAME];
- char full_name[CRYPTO_MAX_ALG_NAME];
cipher_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(cipher_name))
@@ -719,12 +718,7 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
- if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm(%s)", cipher_name) >=
- CRYPTO_MAX_ALG_NAME)
- return -ENAMETOOLONG;
-
- return crypto_gcm_create_common(tmpl, tb, full_name,
- ctr_name, "ghash");
+ return crypto_gcm_create_common(tmpl, tb, ctr_name, "ghash");
}
static int crypto_gcm_base_create(struct crypto_template *tmpl,
@@ -732,7 +726,6 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl,
{
const char *ctr_name;
const char *ghash_name;
- char full_name[CRYPTO_MAX_ALG_NAME];
ctr_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(ctr_name))
@@ -742,12 +735,7 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl,
if (IS_ERR(ghash_name))
return PTR_ERR(ghash_name);
- if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)",
- ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME)
- return -ENAMETOOLONG;
-
- return crypto_gcm_create_common(tmpl, tb, full_name,
- ctr_name, ghash_name);
+ return crypto_gcm_create_common(tmpl, tb, ctr_name, ghash_name);
}
static int crypto_rfc4106_setkey(struct crypto_aead *parent, const u8 *key,
@@ -1258,7 +1246,7 @@ static void __exit crypto_gcm_module_exit(void)
ARRAY_SIZE(crypto_gcm_tmpls));
}
-module_init(crypto_gcm_module_init);
+subsys_initcall(crypto_gcm_module_init);
module_exit(crypto_gcm_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index d9f192b953b2..e6307935413c 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -149,7 +149,7 @@ static void __exit ghash_mod_exit(void)
crypto_unregister_shash(&ghash_alg);
}
-module_init(ghash_mod_init);
+subsys_initcall(ghash_mod_init);
module_exit(ghash_mod_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/hmac.c b/crypto/hmac.c
index e74730224f0a..a68c1266121f 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -57,8 +57,6 @@ static int hmac_setkey(struct crypto_shash *parent,
unsigned int i;
shash->tfm = hash;
- shash->flags = crypto_shash_get_flags(parent)
- & CRYPTO_TFM_REQ_MAY_SLEEP;
if (keylen > bs) {
int err;
@@ -91,8 +89,6 @@ static int hmac_export(struct shash_desc *pdesc, void *out)
{
struct shash_desc *desc = shash_desc_ctx(pdesc);
- desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
-
return crypto_shash_export(desc, out);
}
@@ -102,7 +98,6 @@ static int hmac_import(struct shash_desc *pdesc, const void *in)
struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm);
desc->tfm = ctx->hash;
- desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
return crypto_shash_import(desc, in);
}
@@ -117,8 +112,6 @@ static int hmac_update(struct shash_desc *pdesc,
{
struct shash_desc *desc = shash_desc_ctx(pdesc);
- desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
-
return crypto_shash_update(desc, data, nbytes);
}
@@ -130,8 +123,6 @@ static int hmac_final(struct shash_desc *pdesc, u8 *out)
char *opad = crypto_shash_ctx_aligned(parent) + ss;
struct shash_desc *desc = shash_desc_ctx(pdesc);
- desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
-
return crypto_shash_final(desc, out) ?:
crypto_shash_import(desc, opad) ?:
crypto_shash_finup(desc, out, ds, out);
@@ -147,8 +138,6 @@ static int hmac_finup(struct shash_desc *pdesc, const u8 *data,
char *opad = crypto_shash_ctx_aligned(parent) + ss;
struct shash_desc *desc = shash_desc_ctx(pdesc);
- desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
-
return crypto_shash_finup(desc, data, nbytes, out) ?:
crypto_shash_import(desc, opad) ?:
crypto_shash_finup(desc, out, ds, out);
@@ -268,7 +257,7 @@ static void __exit hmac_module_exit(void)
crypto_unregister_template(&hmac_tmpl);
}
-module_init(hmac_module_init);
+subsys_initcall(hmac_module_init);
module_exit(hmac_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c
index 787dccca3715..6ea1a270b8dc 100644
--- a/crypto/jitterentropy-kcapi.c
+++ b/crypto/jitterentropy-kcapi.c
@@ -198,7 +198,7 @@ static void __exit jent_mod_exit(void)
crypto_unregister_rng(&jent_alg);
}
-module_init(jent_mod_init);
+subsys_initcall(jent_mod_init);
module_exit(jent_mod_exit);
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/crypto/keywrap.c b/crypto/keywrap.c
index a5cfe610d8f4..a155c88105ea 100644
--- a/crypto/keywrap.c
+++ b/crypto/keywrap.c
@@ -310,7 +310,7 @@ static void __exit crypto_kw_exit(void)
crypto_unregister_template(&crypto_kw_tmpl);
}
-module_init(crypto_kw_init);
+subsys_initcall(crypto_kw_init);
module_exit(crypto_kw_exit);
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/crypto/khazad.c b/crypto/khazad.c
index 873eb5ded6d7..b50aa8a3ab4c 100644
--- a/crypto/khazad.c
+++ b/crypto/khazad.c
@@ -875,7 +875,7 @@ static void __exit khazad_mod_fini(void)
}
-module_init(khazad_mod_init);
+subsys_initcall(khazad_mod_init);
module_exit(khazad_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 0430ccd08728..fa302f3f161e 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -162,8 +162,10 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass)
}
err = skcipher_walk_virt(&w, req, false);
- iv = (__be32 *)w.iv;
+ if (err)
+ return err;
+ iv = (__be32 *)w.iv;
counter[0] = be32_to_cpu(iv[3]);
counter[1] = be32_to_cpu(iv[2]);
counter[2] = be32_to_cpu(iv[1]);
@@ -212,8 +214,12 @@ static void crypt_done(struct crypto_async_request *areq, int err)
{
struct skcipher_request *req = areq->data;
- if (!err)
+ if (!err) {
+ struct rctx *rctx = skcipher_request_ctx(req);
+
+ rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
err = xor_tweak_post(req);
+ }
skcipher_request_complete(req, err);
}
@@ -431,7 +437,7 @@ static void __exit crypto_module_exit(void)
crypto_unregister_template(&crypto_tmpl);
}
-module_init(crypto_module_init);
+subsys_initcall(crypto_module_init);
module_exit(crypto_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/lz4.c b/crypto/lz4.c
index c160dfdbf2e0..1e35134d0a98 100644
--- a/crypto/lz4.c
+++ b/crypto/lz4.c
@@ -164,7 +164,7 @@ static void __exit lz4_mod_fini(void)
crypto_unregister_scomp(&scomp);
}
-module_init(lz4_mod_init);
+subsys_initcall(lz4_mod_init);
module_exit(lz4_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c
index 583b5e013d7a..4a220b628fe7 100644
--- a/crypto/lz4hc.c
+++ b/crypto/lz4hc.c
@@ -165,7 +165,7 @@ static void __exit lz4hc_mod_fini(void)
crypto_unregister_scomp(&scomp);
}
-module_init(lz4hc_mod_init);
+subsys_initcall(lz4hc_mod_init);
module_exit(lz4hc_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/lzo-rle.c b/crypto/lzo-rle.c
index ea9c75b1db49..4c82bf18440f 100644
--- a/crypto/lzo-rle.c
+++ b/crypto/lzo-rle.c
@@ -167,7 +167,7 @@ static void __exit lzorle_mod_fini(void)
crypto_unregister_scomp(&scomp);
}
-module_init(lzorle_mod_init);
+subsys_initcall(lzorle_mod_init);
module_exit(lzorle_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/lzo.c b/crypto/lzo.c
index 218567d717d6..4a6ac8f247d0 100644
--- a/crypto/lzo.c
+++ b/crypto/lzo.c
@@ -167,7 +167,7 @@ static void __exit lzo_mod_fini(void)
crypto_unregister_scomp(&scomp);
}
-module_init(lzo_mod_init);
+subsys_initcall(lzo_mod_init);
module_exit(lzo_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/md4.c b/crypto/md4.c
index 9965ec40d9f9..9a1a228a0c69 100644
--- a/crypto/md4.c
+++ b/crypto/md4.c
@@ -232,7 +232,7 @@ static void __exit md4_mod_fini(void)
crypto_unregister_shash(&alg);
}
-module_init(md4_mod_init);
+subsys_initcall(md4_mod_init);
module_exit(md4_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/md5.c b/crypto/md5.c
index 94dd78144ba3..221c2c0932f8 100644
--- a/crypto/md5.c
+++ b/crypto/md5.c
@@ -244,7 +244,7 @@ static void __exit md5_mod_fini(void)
crypto_unregister_shash(&alg);
}
-module_init(md5_mod_init);
+subsys_initcall(md5_mod_init);
module_exit(md5_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c
index 46195e0d0f4d..538ae7933795 100644
--- a/crypto/michael_mic.c
+++ b/crypto/michael_mic.c
@@ -178,7 +178,7 @@ static void __exit michael_mic_exit(void)
}
-module_init(michael_mic_init);
+subsys_initcall(michael_mic_init);
module_exit(michael_mic_exit);
MODULE_LICENSE("GPL v2");
diff --git a/crypto/morus1280.c b/crypto/morus1280.c
index 0747732d5b78..f8734c6576af 100644
--- a/crypto/morus1280.c
+++ b/crypto/morus1280.c
@@ -532,7 +532,7 @@ static void __exit crypto_morus1280_module_exit(void)
crypto_unregister_aead(&crypto_morus1280_alg);
}
-module_init(crypto_morus1280_module_init);
+subsys_initcall(crypto_morus1280_module_init);
module_exit(crypto_morus1280_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/morus640.c b/crypto/morus640.c
index 1617a1eb8be1..ae5aa9482cb4 100644
--- a/crypto/morus640.c
+++ b/crypto/morus640.c
@@ -523,7 +523,7 @@ static void __exit crypto_morus640_module_exit(void)
crypto_unregister_aead(&crypto_morus640_alg);
}
-module_init(crypto_morus640_module_init);
+subsys_initcall(crypto_morus640_module_init);
module_exit(crypto_morus640_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/nhpoly1305.c b/crypto/nhpoly1305.c
index ec831a5594d8..9ab4e07cde4d 100644
--- a/crypto/nhpoly1305.c
+++ b/crypto/nhpoly1305.c
@@ -244,7 +244,7 @@ static void __exit nhpoly1305_mod_exit(void)
crypto_unregister_shash(&nhpoly1305_alg);
}
-module_init(nhpoly1305_mod_init);
+subsys_initcall(nhpoly1305_mod_init);
module_exit(nhpoly1305_mod_exit);
MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function");
diff --git a/crypto/ofb.c b/crypto/ofb.c
index 34b6e1f426f7..133ff4c7f2c6 100644
--- a/crypto/ofb.c
+++ b/crypto/ofb.c
@@ -95,7 +95,7 @@ static void __exit crypto_ofb_module_exit(void)
crypto_unregister_template(&crypto_ofb_tmpl);
}
-module_init(crypto_ofb_module_init);
+subsys_initcall(crypto_ofb_module_init);
module_exit(crypto_ofb_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
index 2fa03fc576fe..31b3ce948474 100644
--- a/crypto/pcbc.c
+++ b/crypto/pcbc.c
@@ -191,7 +191,7 @@ static void __exit crypto_pcbc_module_exit(void)
crypto_unregister_template(&crypto_pcbc_tmpl);
}
-module_init(crypto_pcbc_module_init);
+subsys_initcall(crypto_pcbc_module_init);
module_exit(crypto_pcbc_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index d47cfc47b1b1..0e9ce329fd47 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -512,7 +512,7 @@ static void __exit pcrypt_exit(void)
crypto_unregister_template(&pcrypt_tmpl);
}
-module_init(pcrypt_init);
+subsys_initcall(pcrypt_init);
module_exit(pcrypt_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c
index 2a06874204e8..adc40298c749 100644
--- a/crypto/poly1305_generic.c
+++ b/crypto/poly1305_generic.c
@@ -318,7 +318,7 @@ static void __exit poly1305_mod_exit(void)
crypto_unregister_shash(&poly1305_alg);
}
-module_init(poly1305_mod_init);
+subsys_initcall(poly1305_mod_init);
module_exit(poly1305_mod_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/rmd128.c b/crypto/rmd128.c
index 5f4472256e27..faf4252c4b85 100644
--- a/crypto/rmd128.c
+++ b/crypto/rmd128.c
@@ -318,7 +318,7 @@ static void __exit rmd128_mod_fini(void)
crypto_unregister_shash(&alg);
}
-module_init(rmd128_mod_init);
+subsys_initcall(rmd128_mod_init);
module_exit(rmd128_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/rmd160.c b/crypto/rmd160.c
index 737645344d1c..b33309916d4f 100644
--- a/crypto/rmd160.c
+++ b/crypto/rmd160.c
@@ -362,7 +362,7 @@ static void __exit rmd160_mod_fini(void)
crypto_unregister_shash(&alg);
}
-module_init(rmd160_mod_init);
+subsys_initcall(rmd160_mod_init);
module_exit(rmd160_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/rmd256.c b/crypto/rmd256.c
index 0e9d30676a01..2a643250c9a5 100644
--- a/crypto/rmd256.c
+++ b/crypto/rmd256.c
@@ -337,7 +337,7 @@ static void __exit rmd256_mod_fini(void)
crypto_unregister_shash(&alg);
}
-module_init(rmd256_mod_init);
+subsys_initcall(rmd256_mod_init);
module_exit(rmd256_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/rmd320.c b/crypto/rmd320.c
index 3ae1df5bb48c..2f062574fc8c 100644
--- a/crypto/rmd320.c
+++ b/crypto/rmd320.c
@@ -386,7 +386,7 @@ static void __exit rmd320_mod_fini(void)
crypto_unregister_shash(&alg);
}
-module_init(rmd320_mod_init);
+subsys_initcall(rmd320_mod_init);
module_exit(rmd320_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 0a6680ca8cb6..29c336068dc0 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -429,7 +429,7 @@ static int pkcs1pad_sign(struct akcipher_request *req)
akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg,
req->dst, ctx->key_size - 1, req->dst_len);
- err = crypto_akcipher_sign(&req_ctx->child_req);
+ err = crypto_akcipher_decrypt(&req_ctx->child_req);
if (err != -EINPROGRESS && err != -EBUSY)
return pkcs1pad_encrypt_sign_complete(req, err);
@@ -488,14 +488,21 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
err = 0;
- if (req->dst_len < dst_len - pos)
- err = -EOVERFLOW;
- req->dst_len = dst_len - pos;
-
- if (!err)
- sg_copy_from_buffer(req->dst,
- sg_nents_for_len(req->dst, req->dst_len),
- out_buf + pos, req->dst_len);
+ if (req->dst_len != dst_len - pos) {
+ err = -EKEYREJECTED;
+ req->dst_len = dst_len - pos;
+ goto done;
+ }
+ /* Extract appended digest. */
+ sg_pcopy_to_buffer(req->src,
+ sg_nents_for_len(req->src,
+ req->src_len + req->dst_len),
+ req_ctx->out_buf + ctx->key_size,
+ req->dst_len, ctx->key_size);
+ /* Do the actual verification step. */
+ if (memcmp(req_ctx->out_buf + ctx->key_size, out_buf + pos,
+ req->dst_len) != 0)
+ err = -EKEYREJECTED;
done:
kzfree(req_ctx->out_buf);
@@ -532,10 +539,12 @@ static int pkcs1pad_verify(struct akcipher_request *req)
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
int err;
- if (!ctx->key_size || req->src_len < ctx->key_size)
+ if (WARN_ON(req->dst) ||
+ WARN_ON(!req->dst_len) ||
+ !ctx->key_size || req->src_len < ctx->key_size)
return -EINVAL;
- req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL);
+ req_ctx->out_buf = kmalloc(ctx->key_size + req->dst_len, GFP_KERNEL);
if (!req_ctx->out_buf)
return -ENOMEM;
@@ -551,7 +560,7 @@ static int pkcs1pad_verify(struct akcipher_request *req)
req_ctx->out_sg, req->src_len,
ctx->key_size);
- err = crypto_akcipher_verify(&req_ctx->child_req);
+ err = crypto_akcipher_encrypt(&req_ctx->child_req);
if (err != -EINPROGRESS && err != -EBUSY)
return pkcs1pad_verify_complete(req, err);
diff --git a/crypto/rsa.c b/crypto/rsa.c
index 4167980c243d..dcbb03431778 100644
--- a/crypto/rsa.c
+++ b/crypto/rsa.c
@@ -50,34 +50,6 @@ static int _rsa_dec(const struct rsa_mpi_key *key, MPI m, MPI c)
return mpi_powm(m, c, key->d, key->n);
}
-/*
- * RSASP1 function [RFC3447 sec 5.2.1]
- * s = m^d mod n
- */
-static int _rsa_sign(const struct rsa_mpi_key *key, MPI s, MPI m)
-{
- /* (1) Validate 0 <= m < n */
- if (mpi_cmp_ui(m, 0) < 0 || mpi_cmp(m, key->n) >= 0)
- return -EINVAL;
-
- /* (2) s = m^d mod n */
- return mpi_powm(s, m, key->d, key->n);
-}
-
-/*
- * RSAVP1 function [RFC3447 sec 5.2.2]
- * m = s^e mod n;
- */
-static int _rsa_verify(const struct rsa_mpi_key *key, MPI m, MPI s)
-{
- /* (1) Validate 0 <= s < n */
- if (mpi_cmp_ui(s, 0) < 0 || mpi_cmp(s, key->n) >= 0)
- return -EINVAL;
-
- /* (2) m = s^e mod n */
- return mpi_powm(m, s, key->e, key->n);
-}
-
static inline struct rsa_mpi_key *rsa_get_key(struct crypto_akcipher *tfm)
{
return akcipher_tfm_ctx(tfm);
@@ -160,85 +132,6 @@ err_free_m:
return ret;
}
-static int rsa_sign(struct akcipher_request *req)
-{
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- const struct rsa_mpi_key *pkey = rsa_get_key(tfm);
- MPI m, s = mpi_alloc(0);
- int ret = 0;
- int sign;
-
- if (!s)
- return -ENOMEM;
-
- if (unlikely(!pkey->n || !pkey->d)) {
- ret = -EINVAL;
- goto err_free_s;
- }
-
- ret = -ENOMEM;
- m = mpi_read_raw_from_sgl(req->src, req->src_len);
- if (!m)
- goto err_free_s;
-
- ret = _rsa_sign(pkey, s, m);
- if (ret)
- goto err_free_m;
-
- ret = mpi_write_to_sgl(s, req->dst, req->dst_len, &sign);
- if (ret)
- goto err_free_m;
-
- if (sign < 0)
- ret = -EBADMSG;
-
-err_free_m:
- mpi_free(m);
-err_free_s:
- mpi_free(s);
- return ret;
-}
-
-static int rsa_verify(struct akcipher_request *req)
-{
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- const struct rsa_mpi_key *pkey = rsa_get_key(tfm);
- MPI s, m = mpi_alloc(0);
- int ret = 0;
- int sign;
-
- if (!m)
- return -ENOMEM;
-
- if (unlikely(!pkey->n || !pkey->e)) {
- ret = -EINVAL;
- goto err_free_m;
- }
-
- s = mpi_read_raw_from_sgl(req->src, req->src_len);
- if (!s) {
- ret = -ENOMEM;
- goto err_free_m;
- }
-
- ret = _rsa_verify(pkey, m, s);
- if (ret)
- goto err_free_s;
-
- ret = mpi_write_to_sgl(m, req->dst, req->dst_len, &sign);
- if (ret)
- goto err_free_s;
-
- if (sign < 0)
- ret = -EBADMSG;
-
-err_free_s:
- mpi_free(s);
-err_free_m:
- mpi_free(m);
- return ret;
-}
-
static void rsa_free_mpi_key(struct rsa_mpi_key *key)
{
mpi_free(key->d);
@@ -353,8 +246,6 @@ static void rsa_exit_tfm(struct crypto_akcipher *tfm)
static struct akcipher_alg rsa = {
.encrypt = rsa_enc,
.decrypt = rsa_dec,
- .sign = rsa_sign,
- .verify = rsa_verify,
.set_priv_key = rsa_set_priv_key,
.set_pub_key = rsa_set_pub_key,
.max_size = rsa_max_size,
@@ -391,7 +282,7 @@ static void rsa_exit(void)
crypto_unregister_akcipher(&rsa);
}
-module_init(rsa_init);
+subsys_initcall(rsa_init);
module_exit(rsa_exit);
MODULE_ALIAS_CRYPTO("rsa");
MODULE_LICENSE("GPL");
diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
index 00fce32ae17a..c81a44404086 100644
--- a/crypto/salsa20_generic.c
+++ b/crypto/salsa20_generic.c
@@ -86,18 +86,17 @@ static void salsa20_docrypt(u32 *state, u8 *dst, const u8 *src,
{
__le32 stream[SALSA20_BLOCK_SIZE / sizeof(__le32)];
- if (dst != src)
- memcpy(dst, src, bytes);
-
while (bytes >= SALSA20_BLOCK_SIZE) {
salsa20_block(state, stream);
- crypto_xor(dst, (const u8 *)stream, SALSA20_BLOCK_SIZE);
+ crypto_xor_cpy(dst, src, (const u8 *)stream,
+ SALSA20_BLOCK_SIZE);
bytes -= SALSA20_BLOCK_SIZE;
dst += SALSA20_BLOCK_SIZE;
+ src += SALSA20_BLOCK_SIZE;
}
if (bytes) {
salsa20_block(state, stream);
- crypto_xor(dst, (const u8 *)stream, bytes);
+ crypto_xor_cpy(dst, src, (const u8 *)stream, bytes);
}
}
@@ -161,7 +160,7 @@ static int salsa20_crypt(struct skcipher_request *req)
err = skcipher_walk_virt(&walk, req, false);
- salsa20_init(state, ctx, walk.iv);
+ salsa20_init(state, ctx, req->iv);
while (walk.nbytes > 0) {
unsigned int nbytes = walk.nbytes;
@@ -204,7 +203,7 @@ static void __exit salsa20_generic_mod_fini(void)
crypto_unregister_skcipher(&alg);
}
-module_init(salsa20_generic_mod_init);
+subsys_initcall(salsa20_generic_mod_init);
module_exit(salsa20_generic_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/scompress.c b/crypto/scompress.c
index 6f8305f8c300..712b4c2ea021 100644
--- a/crypto/scompress.c
+++ b/crypto/scompress.c
@@ -29,9 +29,17 @@
#include <crypto/internal/scompress.h>
#include "internal.h"
+struct scomp_scratch {
+ spinlock_t lock;
+ void *src;
+ void *dst;
+};
+
+static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = {
+ .lock = __SPIN_LOCK_UNLOCKED(scomp_scratch.lock),
+};
+
static const struct crypto_type crypto_scomp_type;
-static void * __percpu *scomp_src_scratches;
-static void * __percpu *scomp_dst_scratches;
static int scomp_scratch_users;
static DEFINE_MUTEX(scomp_lock);
@@ -62,76 +70,53 @@ static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
seq_puts(m, "type : scomp\n");
}
-static void crypto_scomp_free_scratches(void * __percpu *scratches)
+static void crypto_scomp_free_scratches(void)
{
+ struct scomp_scratch *scratch;
int i;
- if (!scratches)
- return;
-
- for_each_possible_cpu(i)
- vfree(*per_cpu_ptr(scratches, i));
+ for_each_possible_cpu(i) {
+ scratch = per_cpu_ptr(&scomp_scratch, i);
- free_percpu(scratches);
+ vfree(scratch->src);
+ vfree(scratch->dst);
+ scratch->src = NULL;
+ scratch->dst = NULL;
+ }
}
-static void * __percpu *crypto_scomp_alloc_scratches(void)
+static int crypto_scomp_alloc_scratches(void)
{
- void * __percpu *scratches;
+ struct scomp_scratch *scratch;
int i;
- scratches = alloc_percpu(void *);
- if (!scratches)
- return NULL;
-
for_each_possible_cpu(i) {
- void *scratch;
-
- scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
- if (!scratch)
- goto error;
- *per_cpu_ptr(scratches, i) = scratch;
- }
-
- return scratches;
-
-error:
- crypto_scomp_free_scratches(scratches);
- return NULL;
-}
+ void *mem;
-static void crypto_scomp_free_all_scratches(void)
-{
- if (!--scomp_scratch_users) {
- crypto_scomp_free_scratches(scomp_src_scratches);
- crypto_scomp_free_scratches(scomp_dst_scratches);
- scomp_src_scratches = NULL;
- scomp_dst_scratches = NULL;
- }
-}
+ scratch = per_cpu_ptr(&scomp_scratch, i);
-static int crypto_scomp_alloc_all_scratches(void)
-{
- if (!scomp_scratch_users++) {
- scomp_src_scratches = crypto_scomp_alloc_scratches();
- if (!scomp_src_scratches)
- return -ENOMEM;
- scomp_dst_scratches = crypto_scomp_alloc_scratches();
- if (!scomp_dst_scratches) {
- crypto_scomp_free_scratches(scomp_src_scratches);
- scomp_src_scratches = NULL;
- return -ENOMEM;
- }
+ mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
+ if (!mem)
+ goto error;
+ scratch->src = mem;
+ mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
+ if (!mem)
+ goto error;
+ scratch->dst = mem;
}
return 0;
+error:
+ crypto_scomp_free_scratches();
+ return -ENOMEM;
}
static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
{
- int ret;
+ int ret = 0;
mutex_lock(&scomp_lock);
- ret = crypto_scomp_alloc_all_scratches();
+ if (!scomp_scratch_users++)
+ ret = crypto_scomp_alloc_scratches();
mutex_unlock(&scomp_lock);
return ret;
@@ -143,42 +128,41 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
void **tfm_ctx = acomp_tfm_ctx(tfm);
struct crypto_scomp *scomp = *tfm_ctx;
void **ctx = acomp_request_ctx(req);
- const int cpu = get_cpu();
- u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
- u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
+ struct scomp_scratch *scratch;
int ret;
- if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) {
- ret = -EINVAL;
- goto out;
- }
+ if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
+ return -EINVAL;
- if (req->dst && !req->dlen) {
- ret = -EINVAL;
- goto out;
- }
+ if (req->dst && !req->dlen)
+ return -EINVAL;
if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
req->dlen = SCOMP_SCRATCH_SIZE;
- scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0);
+ scratch = raw_cpu_ptr(&scomp_scratch);
+ spin_lock(&scratch->lock);
+
+ scatterwalk_map_and_copy(scratch->src, req->src, 0, req->slen, 0);
if (dir)
- ret = crypto_scomp_compress(scomp, scratch_src, req->slen,
- scratch_dst, &req->dlen, *ctx);
+ ret = crypto_scomp_compress(scomp, scratch->src, req->slen,
+ scratch->dst, &req->dlen, *ctx);
else
- ret = crypto_scomp_decompress(scomp, scratch_src, req->slen,
- scratch_dst, &req->dlen, *ctx);
+ ret = crypto_scomp_decompress(scomp, scratch->src, req->slen,
+ scratch->dst, &req->dlen, *ctx);
if (!ret) {
if (!req->dst) {
req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL);
- if (!req->dst)
+ if (!req->dst) {
+ ret = -ENOMEM;
goto out;
+ }
}
- scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen,
+ scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen,
1);
}
out:
- put_cpu();
+ spin_unlock(&scratch->lock);
return ret;
}
@@ -199,7 +183,8 @@ static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
crypto_free_scomp(*ctx);
mutex_lock(&scomp_lock);
- crypto_scomp_free_all_scratches();
+ if (!--scomp_scratch_users)
+ crypto_scomp_free_scratches();
mutex_unlock(&scomp_lock);
}
diff --git a/crypto/seed.c b/crypto/seed.c
index c6ba8438be43..a75ac50fa4fd 100644
--- a/crypto/seed.c
+++ b/crypto/seed.c
@@ -470,7 +470,7 @@ static void __exit seed_fini(void)
crypto_unregister_alg(&seed_alg);
}
-module_init(seed_init);
+subsys_initcall(seed_init);
module_exit(seed_fini);
MODULE_DESCRIPTION("SEED Cipher Algorithm");
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index ed1b0e9f2436..3f2fad615d26 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -211,7 +211,7 @@ static void __exit seqiv_module_exit(void)
crypto_unregister_template(&seqiv_tmpl);
}
-module_init(seqiv_module_init);
+subsys_initcall(seqiv_module_init);
module_exit(seqiv_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c
index 7c3382facc82..ec4ec89ad108 100644
--- a/crypto/serpent_generic.c
+++ b/crypto/serpent_generic.c
@@ -664,7 +664,7 @@ static void __exit serpent_mod_fini(void)
crypto_unregister_algs(srp_algs, ARRAY_SIZE(srp_algs));
}
-module_init(serpent_mod_init);
+subsys_initcall(serpent_mod_init);
module_exit(serpent_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
index 2af64ef81f40..1b806d4584b2 100644
--- a/crypto/sha1_generic.c
+++ b/crypto/sha1_generic.c
@@ -92,7 +92,7 @@ static void __exit sha1_generic_mod_fini(void)
crypto_unregister_shash(&alg);
}
-module_init(sha1_generic_mod_init);
+subsys_initcall(sha1_generic_mod_init);
module_exit(sha1_generic_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
index 1e5ba6649e8d..5844e9a469e8 100644
--- a/crypto/sha256_generic.c
+++ b/crypto/sha256_generic.c
@@ -301,7 +301,7 @@ static void __exit sha256_generic_mod_fini(void)
crypto_unregister_shashes(sha256_algs, ARRAY_SIZE(sha256_algs));
}
-module_init(sha256_generic_mod_init);
+subsys_initcall(sha256_generic_mod_init);
module_exit(sha256_generic_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
index 7ed98367d4fb..60fd2be609d8 100644
--- a/crypto/sha3_generic.c
+++ b/crypto/sha3_generic.c
@@ -294,7 +294,7 @@ static void __exit sha3_generic_mod_fini(void)
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
}
-module_init(sha3_generic_mod_init);
+subsys_initcall(sha3_generic_mod_init);
module_exit(sha3_generic_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
index 4097cd555eb6..0193ecb8ae10 100644
--- a/crypto/sha512_generic.c
+++ b/crypto/sha512_generic.c
@@ -223,7 +223,7 @@ static void __exit sha512_generic_mod_fini(void)
crypto_unregister_shashes(sha512_algs, ARRAY_SIZE(sha512_algs));
}
-module_init(sha512_generic_mod_init);
+subsys_initcall(sha512_generic_mod_init);
module_exit(sha512_generic_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/shash.c b/crypto/shash.c
index 15b369c4745f..e55c1f558bc3 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -238,7 +238,6 @@ static int shash_async_init(struct ahash_request *req)
struct shash_desc *desc = ahash_request_ctx(req);
desc->tfm = *ctx;
- desc->flags = req->base.flags;
return crypto_shash_init(desc);
}
@@ -293,7 +292,6 @@ static int shash_async_finup(struct ahash_request *req)
struct shash_desc *desc = ahash_request_ctx(req);
desc->tfm = *ctx;
- desc->flags = req->base.flags;
return shash_ahash_finup(req, desc);
}
@@ -307,14 +305,13 @@ int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
if (nbytes &&
(sg = req->src, offset = sg->offset,
- nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
+ nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
void *data;
data = kmap_atomic(sg_page(sg));
err = crypto_shash_digest(desc, data + offset, nbytes,
req->result);
kunmap_atomic(data);
- crypto_yield(desc->flags);
} else
err = crypto_shash_init(desc) ?:
shash_ahash_finup(req, desc);
@@ -329,7 +326,6 @@ static int shash_async_digest(struct ahash_request *req)
struct shash_desc *desc = ahash_request_ctx(req);
desc->tfm = *ctx;
- desc->flags = req->base.flags;
return shash_ahash_digest(req, desc);
}
@@ -345,7 +341,6 @@ static int shash_async_import(struct ahash_request *req, const void *in)
struct shash_desc *desc = ahash_request_ctx(req);
desc->tfm = *ctx;
- desc->flags = req->base.flags;
return crypto_shash_import(desc, in);
}
diff --git a/crypto/simd.c b/crypto/simd.c
index 78e8d037ae2b..3e3b1d1a6b1f 100644
--- a/crypto/simd.c
+++ b/crypto/simd.c
@@ -3,6 +3,7 @@
*
* Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
* Copyright (c) 2016 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2019 Google LLC
*
* Based on aesni-intel_glue.c by:
* Copyright (C) 2008, Intel Corp.
@@ -20,10 +21,26 @@
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Shared crypto SIMD helpers. These functions dynamically create and register
+ * an skcipher or AEAD algorithm that wraps another, internal algorithm. The
+ * wrapper ensures that the internal algorithm is only executed in a context
+ * where SIMD instructions are usable, i.e. where may_use_simd() returns true.
+ * If SIMD is already usable, the wrapper directly calls the internal algorithm.
+ * Otherwise it defers execution to a workqueue via cryptd.
*
+ * This is an alternative to the internal algorithm implementing a fallback for
+ * the !may_use_simd() case itself.
+ *
+ * Note that the wrapper algorithm is asynchronous, i.e. it has the
+ * CRYPTO_ALG_ASYNC flag set. Therefore it won't be found by users who
+ * explicitly allocate a synchronous algorithm.
*/
#include <crypto/cryptd.h>
+#include <crypto/internal/aead.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <linux/kernel.h>
@@ -31,6 +48,8 @@
#include <linux/preempt.h>
#include <asm/simd.h>
+/* skcipher support */
+
struct simd_skcipher_alg {
const char *ialg_name;
struct skcipher_alg alg;
@@ -66,7 +85,7 @@ static int simd_skcipher_encrypt(struct skcipher_request *req)
subreq = skcipher_request_ctx(req);
*subreq = *req;
- if (!may_use_simd() ||
+ if (!crypto_simd_usable() ||
(in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm)))
child = &ctx->cryptd_tfm->base;
else
@@ -87,7 +106,7 @@ static int simd_skcipher_decrypt(struct skcipher_request *req)
subreq = skcipher_request_ctx(req);
*subreq = *req;
- if (!may_use_simd() ||
+ if (!crypto_simd_usable() ||
(in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm)))
child = &ctx->cryptd_tfm->base;
else
@@ -272,4 +291,254 @@ void simd_unregister_skciphers(struct skcipher_alg *algs, int count,
}
EXPORT_SYMBOL_GPL(simd_unregister_skciphers);
+/* AEAD support */
+
+struct simd_aead_alg {
+ const char *ialg_name;
+ struct aead_alg alg;
+};
+
+struct simd_aead_ctx {
+ struct cryptd_aead *cryptd_tfm;
+};
+
+static int simd_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct crypto_aead *child = &ctx->cryptd_tfm->base;
+ int err;
+
+ crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_aead_set_flags(child, crypto_aead_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_aead_setkey(child, key, key_len);
+ crypto_aead_set_flags(tfm, crypto_aead_get_flags(child) &
+ CRYPTO_TFM_RES_MASK);
+ return err;
+}
+
+static int simd_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct crypto_aead *child = &ctx->cryptd_tfm->base;
+
+ return crypto_aead_setauthsize(child, authsize);
+}
+
+static int simd_aead_encrypt(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_request *subreq;
+ struct crypto_aead *child;
+
+ subreq = aead_request_ctx(req);
+ *subreq = *req;
+
+ if (!crypto_simd_usable() ||
+ (in_atomic() && cryptd_aead_queued(ctx->cryptd_tfm)))
+ child = &ctx->cryptd_tfm->base;
+ else
+ child = cryptd_aead_child(ctx->cryptd_tfm);
+
+ aead_request_set_tfm(subreq, child);
+
+ return crypto_aead_encrypt(subreq);
+}
+
+static int simd_aead_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_request *subreq;
+ struct crypto_aead *child;
+
+ subreq = aead_request_ctx(req);
+ *subreq = *req;
+
+ if (!crypto_simd_usable() ||
+ (in_atomic() && cryptd_aead_queued(ctx->cryptd_tfm)))
+ child = &ctx->cryptd_tfm->base;
+ else
+ child = cryptd_aead_child(ctx->cryptd_tfm);
+
+ aead_request_set_tfm(subreq, child);
+
+ return crypto_aead_decrypt(subreq);
+}
+
+static void simd_aead_exit(struct crypto_aead *tfm)
+{
+ struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ cryptd_free_aead(ctx->cryptd_tfm);
+}
+
+static int simd_aead_init(struct crypto_aead *tfm)
+{
+ struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cryptd_aead *cryptd_tfm;
+ struct simd_aead_alg *salg;
+ struct aead_alg *alg;
+ unsigned reqsize;
+
+ alg = crypto_aead_alg(tfm);
+ salg = container_of(alg, struct simd_aead_alg, alg);
+
+ cryptd_tfm = cryptd_alloc_aead(salg->ialg_name, CRYPTO_ALG_INTERNAL,
+ CRYPTO_ALG_INTERNAL);
+ if (IS_ERR(cryptd_tfm))
+ return PTR_ERR(cryptd_tfm);
+
+ ctx->cryptd_tfm = cryptd_tfm;
+
+ reqsize = crypto_aead_reqsize(cryptd_aead_child(cryptd_tfm));
+ reqsize = max(reqsize, crypto_aead_reqsize(&cryptd_tfm->base));
+ reqsize += sizeof(struct aead_request);
+
+ crypto_aead_set_reqsize(tfm, reqsize);
+
+ return 0;
+}
+
+struct simd_aead_alg *simd_aead_create_compat(const char *algname,
+ const char *drvname,
+ const char *basename)
+{
+ struct simd_aead_alg *salg;
+ struct crypto_aead *tfm;
+ struct aead_alg *ialg;
+ struct aead_alg *alg;
+ int err;
+
+ tfm = crypto_alloc_aead(basename, CRYPTO_ALG_INTERNAL,
+ CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm))
+ return ERR_CAST(tfm);
+
+ ialg = crypto_aead_alg(tfm);
+
+ salg = kzalloc(sizeof(*salg), GFP_KERNEL);
+ if (!salg) {
+ salg = ERR_PTR(-ENOMEM);
+ goto out_put_tfm;
+ }
+
+ salg->ialg_name = basename;
+ alg = &salg->alg;
+
+ err = -ENAMETOOLONG;
+ if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", algname) >=
+ CRYPTO_MAX_ALG_NAME)
+ goto out_free_salg;
+
+ if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ drvname) >= CRYPTO_MAX_ALG_NAME)
+ goto out_free_salg;
+
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->base.cra_priority = ialg->base.cra_priority;
+ alg->base.cra_blocksize = ialg->base.cra_blocksize;
+ alg->base.cra_alignmask = ialg->base.cra_alignmask;
+ alg->base.cra_module = ialg->base.cra_module;
+ alg->base.cra_ctxsize = sizeof(struct simd_aead_ctx);
+
+ alg->ivsize = ialg->ivsize;
+ alg->maxauthsize = ialg->maxauthsize;
+ alg->chunksize = ialg->chunksize;
+
+ alg->init = simd_aead_init;
+ alg->exit = simd_aead_exit;
+
+ alg->setkey = simd_aead_setkey;
+ alg->setauthsize = simd_aead_setauthsize;
+ alg->encrypt = simd_aead_encrypt;
+ alg->decrypt = simd_aead_decrypt;
+
+ err = crypto_register_aead(alg);
+ if (err)
+ goto out_free_salg;
+
+out_put_tfm:
+ crypto_free_aead(tfm);
+ return salg;
+
+out_free_salg:
+ kfree(salg);
+ salg = ERR_PTR(err);
+ goto out_put_tfm;
+}
+EXPORT_SYMBOL_GPL(simd_aead_create_compat);
+
+struct simd_aead_alg *simd_aead_create(const char *algname,
+ const char *basename)
+{
+ char drvname[CRYPTO_MAX_ALG_NAME];
+
+ if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >=
+ CRYPTO_MAX_ALG_NAME)
+ return ERR_PTR(-ENAMETOOLONG);
+
+ return simd_aead_create_compat(algname, drvname, basename);
+}
+EXPORT_SYMBOL_GPL(simd_aead_create);
+
+void simd_aead_free(struct simd_aead_alg *salg)
+{
+ crypto_unregister_aead(&salg->alg);
+ kfree(salg);
+}
+EXPORT_SYMBOL_GPL(simd_aead_free);
+
+int simd_register_aeads_compat(struct aead_alg *algs, int count,
+ struct simd_aead_alg **simd_algs)
+{
+ int err;
+ int i;
+ const char *algname;
+ const char *drvname;
+ const char *basename;
+ struct simd_aead_alg *simd;
+
+ err = crypto_register_aeads(algs, count);
+ if (err)
+ return err;
+
+ for (i = 0; i < count; i++) {
+ WARN_ON(strncmp(algs[i].base.cra_name, "__", 2));
+ WARN_ON(strncmp(algs[i].base.cra_driver_name, "__", 2));
+ algname = algs[i].base.cra_name + 2;
+ drvname = algs[i].base.cra_driver_name + 2;
+ basename = algs[i].base.cra_driver_name;
+ simd = simd_aead_create_compat(algname, drvname, basename);
+ err = PTR_ERR(simd);
+ if (IS_ERR(simd))
+ goto err_unregister;
+ simd_algs[i] = simd;
+ }
+ return 0;
+
+err_unregister:
+ simd_unregister_aeads(algs, count, simd_algs);
+ return err;
+}
+EXPORT_SYMBOL_GPL(simd_register_aeads_compat);
+
+void simd_unregister_aeads(struct aead_alg *algs, int count,
+ struct simd_aead_alg **simd_algs)
+{
+ int i;
+
+ crypto_unregister_aeads(algs, count);
+
+ for (i = 0; i < count; i++) {
+ if (simd_algs[i]) {
+ simd_aead_free(simd_algs[i]);
+ simd_algs[i] = NULL;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(simd_unregister_aeads);
+
MODULE_LICENSE("GPL");
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index bcf13d95f54a..2e66f312e2c4 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -131,8 +131,13 @@ unmap_src:
memcpy(walk->dst.virt.addr, walk->page, n);
skcipher_unmap_dst(walk);
} else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
- if (WARN_ON(err)) {
- /* unexpected case; didn't process all bytes */
+ if (err) {
+ /*
+ * Didn't process all bytes. Either the algorithm is
+ * broken, or this was the last step and it turned out
+ * the message wasn't evenly divisible into blocks but
+ * the algorithm requires it.
+ */
err = -EINVAL;
goto finish;
}
diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c
index c0cf87ae7ef6..e227bcada2a2 100644
--- a/crypto/sm3_generic.c
+++ b/crypto/sm3_generic.c
@@ -199,7 +199,7 @@ static void __exit sm3_generic_mod_fini(void)
crypto_unregister_shash(&sm3_alg);
}
-module_init(sm3_generic_mod_init);
+subsys_initcall(sm3_generic_mod_init);
module_exit(sm3_generic_mod_fini);
MODULE_LICENSE("GPL v2");
diff --git a/crypto/sm4_generic.c b/crypto/sm4_generic.c
index c18eebfd5edd..71ffb343709a 100644
--- a/crypto/sm4_generic.c
+++ b/crypto/sm4_generic.c
@@ -237,7 +237,7 @@ static void __exit sm4_fini(void)
crypto_unregister_alg(&sm4_alg);
}
-module_init(sm4_init);
+subsys_initcall(sm4_init);
module_exit(sm4_fini);
MODULE_DESCRIPTION("SM4 Cipher Algorithm");
diff --git a/crypto/streebog_generic.c b/crypto/streebog_generic.c
index 5a2eafed9c29..63663c3bab7e 100644
--- a/crypto/streebog_generic.c
+++ b/crypto/streebog_generic.c
@@ -996,7 +996,7 @@ static void streebog_add512(const struct streebog_uint512 *x,
static void streebog_g(struct streebog_uint512 *h,
const struct streebog_uint512 *N,
- const u8 *m)
+ const struct streebog_uint512 *m)
{
struct streebog_uint512 Ki, data;
unsigned int i;
@@ -1005,7 +1005,7 @@ static void streebog_g(struct streebog_uint512 *h,
/* Starting E() */
Ki = data;
- streebog_xlps(&Ki, (const struct streebog_uint512 *)&m[0], &data);
+ streebog_xlps(&Ki, m, &data);
for (i = 0; i < 11; i++)
streebog_round(i, &Ki, &data);
@@ -1015,16 +1015,19 @@ static void streebog_g(struct streebog_uint512 *h,
/* E() done */
streebog_xor(&data, h, &data);
- streebog_xor(&data, (const struct streebog_uint512 *)&m[0], h);
+ streebog_xor(&data, m, h);
}
static void streebog_stage2(struct streebog_state *ctx, const u8 *data)
{
- streebog_g(&ctx->h, &ctx->N, data);
+ struct streebog_uint512 m;
+
+ memcpy(&m, data, sizeof(m));
+
+ streebog_g(&ctx->h, &ctx->N, &m);
streebog_add512(&ctx->N, &buffer512, &ctx->N);
- streebog_add512(&ctx->Sigma, (const struct streebog_uint512 *)data,
- &ctx->Sigma);
+ streebog_add512(&ctx->Sigma, &m, &ctx->Sigma);
}
static void streebog_stage3(struct streebog_state *ctx)
@@ -1034,13 +1037,11 @@ static void streebog_stage3(struct streebog_state *ctx)
buf.qword[0] = cpu_to_le64(ctx->fillsize << 3);
streebog_pad(ctx);
- streebog_g(&ctx->h, &ctx->N, (const u8 *)&ctx->buffer);
+ streebog_g(&ctx->h, &ctx->N, &ctx->m);
streebog_add512(&ctx->N, &buf, &ctx->N);
- streebog_add512(&ctx->Sigma,
- (const struct streebog_uint512 *)&ctx->buffer[0],
- &ctx->Sigma);
- streebog_g(&ctx->h, &buffer0, (const u8 *)&ctx->N);
- streebog_g(&ctx->h, &buffer0, (const u8 *)&ctx->Sigma);
+ streebog_add512(&ctx->Sigma, &ctx->m, &ctx->Sigma);
+ streebog_g(&ctx->h, &buffer0, &ctx->N);
+ streebog_g(&ctx->h, &buffer0, &ctx->Sigma);
memcpy(&ctx->hash, &ctx->h, sizeof(struct streebog_uint512));
}
@@ -1127,7 +1128,7 @@ static void __exit streebog_mod_fini(void)
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
}
-module_init(streebog_mod_init);
+subsys_initcall(streebog_mod_init);
module_exit(streebog_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 1ea2d5007ff5..798253f05203 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -3053,7 +3053,7 @@ err_free_tv:
*/
static void __exit tcrypt_mod_fini(void) { }
-module_init(tcrypt_mod_init);
+subsys_initcall(tcrypt_mod_init);
module_exit(tcrypt_mod_fini);
module_param(alg, charp, 0);
diff --git a/crypto/tea.c b/crypto/tea.c
index b70b441c7d1e..786b589e1399 100644
--- a/crypto/tea.c
+++ b/crypto/tea.c
@@ -274,7 +274,7 @@ MODULE_ALIAS_CRYPTO("tea");
MODULE_ALIAS_CRYPTO("xtea");
MODULE_ALIAS_CRYPTO("xeta");
-module_init(tea_mod_init);
+subsys_initcall(tea_mod_init);
module_exit(tea_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 8386038d67c7..c9e67c2bd725 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -37,6 +37,7 @@
#include <crypto/akcipher.h>
#include <crypto/kpp.h>
#include <crypto/acompress.h>
+#include <crypto/internal/simd.h>
#include "internal.h"
@@ -44,6 +45,9 @@ static bool notests;
module_param(notests, bool, 0644);
MODULE_PARM_DESC(notests, "disable crypto self-tests");
+static bool panic_on_fail;
+module_param(panic_on_fail, bool, 0444);
+
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
static bool noextratests;
module_param(noextratests, bool, 0644);
@@ -52,6 +56,9 @@ MODULE_PARM_DESC(noextratests, "disable expensive crypto self-tests");
static unsigned int fuzz_iterations = 100;
module_param(fuzz_iterations, uint, 0644);
MODULE_PARM_DESC(fuzz_iterations, "number of fuzz test iterations");
+
+DEFINE_PER_CPU(bool, crypto_simd_disabled_for_test);
+EXPORT_PER_CPU_SYMBOL_GPL(crypto_simd_disabled_for_test);
#endif
#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
@@ -121,6 +128,7 @@ struct kpp_test_suite {
struct alg_test_desc {
const char *alg;
+ const char *generic_driver;
int (*test)(const struct alg_test_desc *desc, const char *driver,
u32 type, u32 mask);
int fips_allowed; /* set if alg is allowed in fips mode */
@@ -230,12 +238,14 @@ enum finalization_type {
* @offset
* @flush_type: for hashes, whether an update() should be done now vs.
* continuing to accumulate data
+ * @nosimd: if doing the pending update(), do it with SIMD disabled?
*/
struct test_sg_division {
unsigned int proportion_of_total;
unsigned int offset;
bool offset_relative_to_alignmask;
enum flush_type flush_type;
+ bool nosimd;
};
/**
@@ -255,6 +265,7 @@ struct test_sg_division {
* @iv_offset_relative_to_alignmask: if true, add the algorithm's alignmask to
* the @iv_offset
* @finalization_type: what finalization function to use for hashes
+ * @nosimd: execute with SIMD disabled? Requires !CRYPTO_TFM_REQ_MAY_SLEEP.
*/
struct testvec_config {
const char *name;
@@ -265,6 +276,7 @@ struct testvec_config {
unsigned int iv_offset;
bool iv_offset_relative_to_alignmask;
enum finalization_type finalization_type;
+ bool nosimd;
};
#define TESTVEC_CONFIG_NAMELEN 192
@@ -416,8 +428,11 @@ static unsigned int count_test_sg_divisions(const struct test_sg_division *divs)
return ndivs;
}
+#define SGDIVS_HAVE_FLUSHES BIT(0)
+#define SGDIVS_HAVE_NOSIMD BIT(1)
+
static bool valid_sg_divisions(const struct test_sg_division *divs,
- unsigned int count, bool *any_flushes_ret)
+ unsigned int count, int *flags_ret)
{
unsigned int total = 0;
unsigned int i;
@@ -428,7 +443,9 @@ static bool valid_sg_divisions(const struct test_sg_division *divs,
return false;
total += divs[i].proportion_of_total;
if (divs[i].flush_type != FLUSH_TYPE_NONE)
- *any_flushes_ret = true;
+ *flags_ret |= SGDIVS_HAVE_FLUSHES;
+ if (divs[i].nosimd)
+ *flags_ret |= SGDIVS_HAVE_NOSIMD;
}
return total == TEST_SG_TOTAL &&
memchr_inv(&divs[i], 0, (count - i) * sizeof(divs[0])) == NULL;
@@ -441,19 +458,18 @@ static bool valid_sg_divisions(const struct test_sg_division *divs,
*/
static bool valid_testvec_config(const struct testvec_config *cfg)
{
- bool any_flushes = false;
+ int flags = 0;
if (cfg->name == NULL)
return false;
if (!valid_sg_divisions(cfg->src_divs, ARRAY_SIZE(cfg->src_divs),
- &any_flushes))
+ &flags))
return false;
if (cfg->dst_divs[0].proportion_of_total) {
if (!valid_sg_divisions(cfg->dst_divs,
- ARRAY_SIZE(cfg->dst_divs),
- &any_flushes))
+ ARRAY_SIZE(cfg->dst_divs), &flags))
return false;
} else {
if (memchr_inv(cfg->dst_divs, 0, sizeof(cfg->dst_divs)))
@@ -466,7 +482,12 @@ static bool valid_testvec_config(const struct testvec_config *cfg)
MAX_ALGAPI_ALIGNMASK + 1)
return false;
- if (any_flushes && cfg->finalization_type == FINALIZATION_TYPE_DIGEST)
+ if ((flags & (SGDIVS_HAVE_FLUSHES | SGDIVS_HAVE_NOSIMD)) &&
+ cfg->finalization_type == FINALIZATION_TYPE_DIGEST)
+ return false;
+
+ if ((cfg->nosimd || (flags & SGDIVS_HAVE_NOSIMD)) &&
+ (cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP))
return false;
return true;
@@ -725,15 +746,101 @@ static int build_cipher_test_sglists(struct cipher_test_sglists *tsgls,
}
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+
+/* Generate a random length in range [0, max_len], but prefer smaller values */
+static unsigned int generate_random_length(unsigned int max_len)
+{
+ unsigned int len = prandom_u32() % (max_len + 1);
+
+ switch (prandom_u32() % 4) {
+ case 0:
+ return len % 64;
+ case 1:
+ return len % 256;
+ case 2:
+ return len % 1024;
+ default:
+ return len;
+ }
+}
+
+/* Sometimes make some random changes to the given data buffer */
+static void mutate_buffer(u8 *buf, size_t count)
+{
+ size_t num_flips;
+ size_t i;
+ size_t pos;
+
+ /* Sometimes flip some bits */
+ if (prandom_u32() % 4 == 0) {
+ num_flips = min_t(size_t, 1 << (prandom_u32() % 8), count * 8);
+ for (i = 0; i < num_flips; i++) {
+ pos = prandom_u32() % (count * 8);
+ buf[pos / 8] ^= 1 << (pos % 8);
+ }
+ }
+
+ /* Sometimes flip some bytes */
+ if (prandom_u32() % 4 == 0) {
+ num_flips = min_t(size_t, 1 << (prandom_u32() % 8), count);
+ for (i = 0; i < num_flips; i++)
+ buf[prandom_u32() % count] ^= 0xff;
+ }
+}
+
+/* Randomly generate 'count' bytes, but sometimes make them "interesting" */
+static void generate_random_bytes(u8 *buf, size_t count)
+{
+ u8 b;
+ u8 increment;
+ size_t i;
+
+ if (count == 0)
+ return;
+
+ switch (prandom_u32() % 8) { /* Choose a generation strategy */
+ case 0:
+ case 1:
+ /* All the same byte, plus optional mutations */
+ switch (prandom_u32() % 4) {
+ case 0:
+ b = 0x00;
+ break;
+ case 1:
+ b = 0xff;
+ break;
+ default:
+ b = (u8)prandom_u32();
+ break;
+ }
+ memset(buf, b, count);
+ mutate_buffer(buf, count);
+ break;
+ case 2:
+ /* Ascending or descending bytes, plus optional mutations */
+ increment = (u8)prandom_u32();
+ b = (u8)prandom_u32();
+ for (i = 0; i < count; i++, b += increment)
+ buf[i] = b;
+ mutate_buffer(buf, count);
+ break;
+ default:
+ /* Fully random bytes */
+ for (i = 0; i < count; i++)
+ buf[i] = (u8)prandom_u32();
+ }
+}
+
static char *generate_random_sgl_divisions(struct test_sg_division *divs,
size_t max_divs, char *p, char *end,
- bool gen_flushes)
+ bool gen_flushes, u32 req_flags)
{
struct test_sg_division *div = divs;
unsigned int remaining = TEST_SG_TOTAL;
do {
unsigned int this_len;
+ const char *flushtype_str;
if (div == &divs[max_divs - 1] || prandom_u32() % 2 == 0)
this_len = remaining;
@@ -762,11 +869,31 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs,
}
}
+ if (div->flush_type != FLUSH_TYPE_NONE &&
+ !(req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
+ prandom_u32() % 2 == 0)
+ div->nosimd = true;
+
+ switch (div->flush_type) {
+ case FLUSH_TYPE_FLUSH:
+ if (div->nosimd)
+ flushtype_str = "<flush,nosimd>";
+ else
+ flushtype_str = "<flush>";
+ break;
+ case FLUSH_TYPE_REIMPORT:
+ if (div->nosimd)
+ flushtype_str = "<reimport,nosimd>";
+ else
+ flushtype_str = "<reimport>";
+ break;
+ default:
+ flushtype_str = "";
+ break;
+ }
+
BUILD_BUG_ON(TEST_SG_TOTAL != 10000); /* for "%u.%u%%" */
- p += scnprintf(p, end - p, "%s%u.%u%%@%s+%u%s",
- div->flush_type == FLUSH_TYPE_NONE ? "" :
- div->flush_type == FLUSH_TYPE_FLUSH ?
- "<flush> " : "<reimport> ",
+ p += scnprintf(p, end - p, "%s%u.%u%%@%s+%u%s", flushtype_str,
this_len / 100, this_len % 100,
div->offset_relative_to_alignmask ?
"alignmask" : "",
@@ -816,18 +943,26 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
break;
}
+ if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
+ prandom_u32() % 2 == 0) {
+ cfg->nosimd = true;
+ p += scnprintf(p, end - p, " nosimd");
+ }
+
p += scnprintf(p, end - p, " src_divs=[");
p = generate_random_sgl_divisions(cfg->src_divs,
ARRAY_SIZE(cfg->src_divs), p, end,
(cfg->finalization_type !=
- FINALIZATION_TYPE_DIGEST));
+ FINALIZATION_TYPE_DIGEST),
+ cfg->req_flags);
p += scnprintf(p, end - p, "]");
if (!cfg->inplace && prandom_u32() % 2 == 0) {
p += scnprintf(p, end - p, " dst_divs=[");
p = generate_random_sgl_divisions(cfg->dst_divs,
ARRAY_SIZE(cfg->dst_divs),
- p, end, false);
+ p, end, false,
+ cfg->req_flags);
p += scnprintf(p, end - p, "]");
}
@@ -838,21 +973,100 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
WARN_ON_ONCE(!valid_testvec_config(cfg));
}
-#endif /* CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
+
+static void crypto_disable_simd_for_test(void)
+{
+ preempt_disable();
+ __this_cpu_write(crypto_simd_disabled_for_test, true);
+}
+
+static void crypto_reenable_simd_for_test(void)
+{
+ __this_cpu_write(crypto_simd_disabled_for_test, false);
+ preempt_enable();
+}
+
+/*
+ * Given an algorithm name, build the name of the generic implementation of that
+ * algorithm, assuming the usual naming convention. Specifically, this appends
+ * "-generic" to every part of the name that is not a template name. Examples:
+ *
+ * aes => aes-generic
+ * cbc(aes) => cbc(aes-generic)
+ * cts(cbc(aes)) => cts(cbc(aes-generic))
+ * rfc7539(chacha20,poly1305) => rfc7539(chacha20-generic,poly1305-generic)
+ *
+ * Return: 0 on success, or -ENAMETOOLONG if the generic name would be too long
+ */
+static int build_generic_driver_name(const char *algname,
+ char driver_name[CRYPTO_MAX_ALG_NAME])
+{
+ const char *in = algname;
+ char *out = driver_name;
+ size_t len = strlen(algname);
+
+ if (len >= CRYPTO_MAX_ALG_NAME)
+ goto too_long;
+ do {
+ const char *in_saved = in;
+
+ while (*in && *in != '(' && *in != ')' && *in != ',')
+ *out++ = *in++;
+ if (*in != '(' && in > in_saved) {
+ len += 8;
+ if (len >= CRYPTO_MAX_ALG_NAME)
+ goto too_long;
+ memcpy(out, "-generic", 8);
+ out += 8;
+ }
+ } while ((*out++ = *in++) != '\0');
+ return 0;
+
+too_long:
+ pr_err("alg: generic driver name for \"%s\" would be too long\n",
+ algname);
+ return -ENAMETOOLONG;
+}
+#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
+static void crypto_disable_simd_for_test(void)
+{
+}
+
+static void crypto_reenable_simd_for_test(void)
+{
+}
+#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
+
+static int do_ahash_op(int (*op)(struct ahash_request *req),
+ struct ahash_request *req,
+ struct crypto_wait *wait, bool nosimd)
+{
+ int err;
+
+ if (nosimd)
+ crypto_disable_simd_for_test();
+
+ err = op(req);
+
+ if (nosimd)
+ crypto_reenable_simd_for_test();
+
+ return crypto_wait_req(err, wait);
+}
static int check_nonfinal_hash_op(const char *op, int err,
u8 *result, unsigned int digestsize,
- const char *driver, unsigned int vec_num,
+ const char *driver, const char *vec_name,
const struct testvec_config *cfg)
{
if (err) {
- pr_err("alg: hash: %s %s() failed with err %d on test vector %u, cfg=\"%s\"\n",
- driver, op, err, vec_num, cfg->name);
+ pr_err("alg: hash: %s %s() failed with err %d on test vector %s, cfg=\"%s\"\n",
+ driver, op, err, vec_name, cfg->name);
return err;
}
if (!testmgr_is_poison(result, digestsize)) {
- pr_err("alg: hash: %s %s() used result buffer on test vector %u, cfg=\"%s\"\n",
- driver, op, vec_num, cfg->name);
+ pr_err("alg: hash: %s %s() used result buffer on test vector %s, cfg=\"%s\"\n",
+ driver, op, vec_name, cfg->name);
return -EINVAL;
}
return 0;
@@ -860,7 +1074,7 @@ static int check_nonfinal_hash_op(const char *op, int err,
static int test_hash_vec_cfg(const char *driver,
const struct hash_testvec *vec,
- unsigned int vec_num,
+ const char *vec_name,
const struct testvec_config *cfg,
struct ahash_request *req,
struct test_sglist *tsgl,
@@ -885,11 +1099,18 @@ static int test_hash_vec_cfg(const char *driver,
if (vec->ksize) {
err = crypto_ahash_setkey(tfm, vec->key, vec->ksize);
if (err) {
- pr_err("alg: hash: %s setkey failed with err %d on test vector %u; flags=%#x\n",
- driver, err, vec_num,
+ if (err == vec->setkey_error)
+ return 0;
+ pr_err("alg: hash: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n",
+ driver, vec_name, vec->setkey_error, err,
crypto_ahash_get_flags(tfm));
return err;
}
+ if (vec->setkey_error) {
+ pr_err("alg: hash: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n",
+ driver, vec_name, vec->setkey_error);
+ return -EINVAL;
+ }
}
/* Build the scatterlist for the source data */
@@ -899,8 +1120,8 @@ static int test_hash_vec_cfg(const char *driver,
err = build_test_sglist(tsgl, cfg->src_divs, alignmask, vec->psize,
&input, divs);
if (err) {
- pr_err("alg: hash: %s: error preparing scatterlist for test vector %u, cfg=\"%s\"\n",
- driver, vec_num, cfg->name);
+ pr_err("alg: hash: %s: error preparing scatterlist for test vector %s, cfg=\"%s\"\n",
+ driver, vec_name, cfg->name);
return err;
}
@@ -909,17 +1130,26 @@ static int test_hash_vec_cfg(const char *driver,
testmgr_poison(req->__ctx, crypto_ahash_reqsize(tfm));
testmgr_poison(result, digestsize + TESTMGR_POISON_LEN);
- if (cfg->finalization_type == FINALIZATION_TYPE_DIGEST) {
+ if (cfg->finalization_type == FINALIZATION_TYPE_DIGEST ||
+ vec->digest_error) {
/* Just using digest() */
ahash_request_set_callback(req, req_flags, crypto_req_done,
&wait);
ahash_request_set_crypt(req, tsgl->sgl, result, vec->psize);
- err = crypto_wait_req(crypto_ahash_digest(req), &wait);
+ err = do_ahash_op(crypto_ahash_digest, req, &wait, cfg->nosimd);
if (err) {
- pr_err("alg: hash: %s digest() failed with err %d on test vector %u, cfg=\"%s\"\n",
- driver, err, vec_num, cfg->name);
+ if (err == vec->digest_error)
+ return 0;
+ pr_err("alg: hash: %s digest() failed on test vector %s; expected_error=%d, actual_error=%d, cfg=\"%s\"\n",
+ driver, vec_name, vec->digest_error, err,
+ cfg->name);
return err;
}
+ if (vec->digest_error) {
+ pr_err("alg: hash: %s digest() unexpectedly succeeded on test vector %s; expected_error=%d, cfg=\"%s\"\n",
+ driver, vec_name, vec->digest_error, cfg->name);
+ return -EINVAL;
+ }
goto result_ready;
}
@@ -927,9 +1157,9 @@ static int test_hash_vec_cfg(const char *driver,
ahash_request_set_callback(req, req_flags, crypto_req_done, &wait);
ahash_request_set_crypt(req, NULL, result, 0);
- err = crypto_wait_req(crypto_ahash_init(req), &wait);
+ err = do_ahash_op(crypto_ahash_init, req, &wait, cfg->nosimd);
err = check_nonfinal_hash_op("init", err, result, digestsize,
- driver, vec_num, cfg);
+ driver, vec_name, cfg);
if (err)
return err;
@@ -943,10 +1173,11 @@ static int test_hash_vec_cfg(const char *driver,
crypto_req_done, &wait);
ahash_request_set_crypt(req, pending_sgl, result,
pending_len);
- err = crypto_wait_req(crypto_ahash_update(req), &wait);
+ err = do_ahash_op(crypto_ahash_update, req, &wait,
+ divs[i]->nosimd);
err = check_nonfinal_hash_op("update", err,
result, digestsize,
- driver, vec_num, cfg);
+ driver, vec_name, cfg);
if (err)
return err;
pending_sgl = NULL;
@@ -959,13 +1190,13 @@ static int test_hash_vec_cfg(const char *driver,
err = crypto_ahash_export(req, hashstate);
err = check_nonfinal_hash_op("export", err,
result, digestsize,
- driver, vec_num, cfg);
+ driver, vec_name, cfg);
if (err)
return err;
if (!testmgr_is_poison(hashstate + statesize,
TESTMGR_POISON_LEN)) {
- pr_err("alg: hash: %s export() overran state buffer on test vector %u, cfg=\"%s\"\n",
- driver, vec_num, cfg->name);
+ pr_err("alg: hash: %s export() overran state buffer on test vector %s, cfg=\"%s\"\n",
+ driver, vec_name, cfg->name);
return -EOVERFLOW;
}
@@ -973,7 +1204,7 @@ static int test_hash_vec_cfg(const char *driver,
err = crypto_ahash_import(req, hashstate);
err = check_nonfinal_hash_op("import", err,
result, digestsize,
- driver, vec_num, cfg);
+ driver, vec_name, cfg);
if (err)
return err;
}
@@ -986,23 +1217,23 @@ static int test_hash_vec_cfg(const char *driver,
ahash_request_set_crypt(req, pending_sgl, result, pending_len);
if (cfg->finalization_type == FINALIZATION_TYPE_FINAL) {
/* finish with update() and final() */
- err = crypto_wait_req(crypto_ahash_update(req), &wait);
+ err = do_ahash_op(crypto_ahash_update, req, &wait, cfg->nosimd);
err = check_nonfinal_hash_op("update", err, result, digestsize,
- driver, vec_num, cfg);
+ driver, vec_name, cfg);
if (err)
return err;
- err = crypto_wait_req(crypto_ahash_final(req), &wait);
+ err = do_ahash_op(crypto_ahash_final, req, &wait, cfg->nosimd);
if (err) {
- pr_err("alg: hash: %s final() failed with err %d on test vector %u, cfg=\"%s\"\n",
- driver, err, vec_num, cfg->name);
+ pr_err("alg: hash: %s final() failed with err %d on test vector %s, cfg=\"%s\"\n",
+ driver, err, vec_name, cfg->name);
return err;
}
} else {
/* finish with finup() */
- err = crypto_wait_req(crypto_ahash_finup(req), &wait);
+ err = do_ahash_op(crypto_ahash_finup, req, &wait, cfg->nosimd);
if (err) {
- pr_err("alg: hash: %s finup() failed with err %d on test vector %u, cfg=\"%s\"\n",
- driver, err, vec_num, cfg->name);
+ pr_err("alg: hash: %s finup() failed with err %d on test vector %s, cfg=\"%s\"\n",
+ driver, err, vec_name, cfg->name);
return err;
}
}
@@ -1010,13 +1241,13 @@ static int test_hash_vec_cfg(const char *driver,
result_ready:
/* Check that the algorithm produced the correct digest */
if (memcmp(result, vec->digest, digestsize) != 0) {
- pr_err("alg: hash: %s test failed (wrong result) on test vector %u, cfg=\"%s\"\n",
- driver, vec_num, cfg->name);
+ pr_err("alg: hash: %s test failed (wrong result) on test vector %s, cfg=\"%s\"\n",
+ driver, vec_name, cfg->name);
return -EINVAL;
}
if (!testmgr_is_poison(&result[digestsize], TESTMGR_POISON_LEN)) {
- pr_err("alg: hash: %s overran result buffer on test vector %u, cfg=\"%s\"\n",
- driver, vec_num, cfg->name);
+ pr_err("alg: hash: %s overran result buffer on test vector %s, cfg=\"%s\"\n",
+ driver, vec_name, cfg->name);
return -EOVERFLOW;
}
@@ -1027,11 +1258,14 @@ static int test_hash_vec(const char *driver, const struct hash_testvec *vec,
unsigned int vec_num, struct ahash_request *req,
struct test_sglist *tsgl, u8 *hashstate)
{
+ char vec_name[16];
unsigned int i;
int err;
+ sprintf(vec_name, "%u", vec_num);
+
for (i = 0; i < ARRAY_SIZE(default_hash_testvec_configs); i++) {
- err = test_hash_vec_cfg(driver, vec, vec_num,
+ err = test_hash_vec_cfg(driver, vec, vec_name,
&default_hash_testvec_configs[i],
req, tsgl, hashstate);
if (err)
@@ -1046,7 +1280,7 @@ static int test_hash_vec(const char *driver, const struct hash_testvec *vec,
for (i = 0; i < fuzz_iterations; i++) {
generate_random_testvec_config(&cfg, cfgname,
sizeof(cfgname));
- err = test_hash_vec_cfg(driver, vec, vec_num, &cfg,
+ err = test_hash_vec_cfg(driver, vec, vec_name, &cfg,
req, tsgl, hashstate);
if (err)
return err;
@@ -1056,9 +1290,168 @@ static int test_hash_vec(const char *driver, const struct hash_testvec *vec,
return 0;
}
+#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+/*
+ * Generate a hash test vector from the given implementation.
+ * Assumes the buffers in 'vec' were already allocated.
+ */
+static void generate_random_hash_testvec(struct crypto_shash *tfm,
+ struct hash_testvec *vec,
+ unsigned int maxkeysize,
+ unsigned int maxdatasize,
+ char *name, size_t max_namelen)
+{
+ SHASH_DESC_ON_STACK(desc, tfm);
+
+ /* Data */
+ vec->psize = generate_random_length(maxdatasize);
+ generate_random_bytes((u8 *)vec->plaintext, vec->psize);
+
+ /*
+ * Key: length in range [1, maxkeysize], but usually choose maxkeysize.
+ * If algorithm is unkeyed, then maxkeysize == 0 and set ksize = 0.
+ */
+ vec->setkey_error = 0;
+ vec->ksize = 0;
+ if (maxkeysize) {
+ vec->ksize = maxkeysize;
+ if (prandom_u32() % 4 == 0)
+ vec->ksize = 1 + (prandom_u32() % maxkeysize);
+ generate_random_bytes((u8 *)vec->key, vec->ksize);
+
+ vec->setkey_error = crypto_shash_setkey(tfm, vec->key,
+ vec->ksize);
+ /* If the key couldn't be set, no need to continue to digest. */
+ if (vec->setkey_error)
+ goto done;
+ }
+
+ /* Digest */
+ desc->tfm = tfm;
+ vec->digest_error = crypto_shash_digest(desc, vec->plaintext,
+ vec->psize, (u8 *)vec->digest);
+done:
+ snprintf(name, max_namelen, "\"random: psize=%u ksize=%u\"",
+ vec->psize, vec->ksize);
+}
+
+/*
+ * Test the hash algorithm represented by @req against the corresponding generic
+ * implementation, if one is available.
+ */
+static int test_hash_vs_generic_impl(const char *driver,
+ const char *generic_driver,
+ unsigned int maxkeysize,
+ struct ahash_request *req,
+ struct test_sglist *tsgl,
+ u8 *hashstate)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ const unsigned int digestsize = crypto_ahash_digestsize(tfm);
+ const unsigned int blocksize = crypto_ahash_blocksize(tfm);
+ const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
+ const char *algname = crypto_hash_alg_common(tfm)->base.cra_name;
+ char _generic_driver[CRYPTO_MAX_ALG_NAME];
+ struct crypto_shash *generic_tfm = NULL;
+ unsigned int i;
+ struct hash_testvec vec = { 0 };
+ char vec_name[64];
+ struct testvec_config cfg;
+ char cfgname[TESTVEC_CONFIG_NAMELEN];
+ int err;
+
+ if (noextratests)
+ return 0;
+
+ if (!generic_driver) { /* Use default naming convention? */
+ err = build_generic_driver_name(algname, _generic_driver);
+ if (err)
+ return err;
+ generic_driver = _generic_driver;
+ }
+
+ if (strcmp(generic_driver, driver) == 0) /* Already the generic impl? */
+ return 0;
+
+ generic_tfm = crypto_alloc_shash(generic_driver, 0, 0);
+ if (IS_ERR(generic_tfm)) {
+ err = PTR_ERR(generic_tfm);
+ if (err == -ENOENT) {
+ pr_warn("alg: hash: skipping comparison tests for %s because %s is unavailable\n",
+ driver, generic_driver);
+ return 0;
+ }
+ pr_err("alg: hash: error allocating %s (generic impl of %s): %d\n",
+ generic_driver, algname, err);
+ return err;
+ }
+
+ /* Check the algorithm properties for consistency. */
+
+ if (digestsize != crypto_shash_digestsize(generic_tfm)) {
+ pr_err("alg: hash: digestsize for %s (%u) doesn't match generic impl (%u)\n",
+ driver, digestsize,
+ crypto_shash_digestsize(generic_tfm));
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (blocksize != crypto_shash_blocksize(generic_tfm)) {
+ pr_err("alg: hash: blocksize for %s (%u) doesn't match generic impl (%u)\n",
+ driver, blocksize, crypto_shash_blocksize(generic_tfm));
+ err = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Now generate test vectors using the generic implementation, and test
+ * the other implementation against them.
+ */
+
+ vec.key = kmalloc(maxkeysize, GFP_KERNEL);
+ vec.plaintext = kmalloc(maxdatasize, GFP_KERNEL);
+ vec.digest = kmalloc(digestsize, GFP_KERNEL);
+ if (!vec.key || !vec.plaintext || !vec.digest) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < fuzz_iterations * 8; i++) {
+ generate_random_hash_testvec(generic_tfm, &vec,
+ maxkeysize, maxdatasize,
+ vec_name, sizeof(vec_name));
+ generate_random_testvec_config(&cfg, cfgname, sizeof(cfgname));
+
+ err = test_hash_vec_cfg(driver, &vec, vec_name, &cfg,
+ req, tsgl, hashstate);
+ if (err)
+ goto out;
+ cond_resched();
+ }
+ err = 0;
+out:
+ kfree(vec.key);
+ kfree(vec.plaintext);
+ kfree(vec.digest);
+ crypto_free_shash(generic_tfm);
+ return err;
+}
+#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
+static int test_hash_vs_generic_impl(const char *driver,
+ const char *generic_driver,
+ unsigned int maxkeysize,
+ struct ahash_request *req,
+ struct test_sglist *tsgl,
+ u8 *hashstate)
+{
+ return 0;
+}
+#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
+
static int __alg_test_hash(const struct hash_testvec *vecs,
unsigned int num_vecs, const char *driver,
- u32 type, u32 mask)
+ u32 type, u32 mask,
+ const char *generic_driver, unsigned int maxkeysize)
{
struct crypto_ahash *tfm;
struct ahash_request *req = NULL;
@@ -1106,7 +1499,8 @@ static int __alg_test_hash(const struct hash_testvec *vecs,
if (err)
goto out;
}
- err = 0;
+ err = test_hash_vs_generic_impl(driver, generic_driver, maxkeysize, req,
+ tsgl, hashstate);
out:
kfree(hashstate);
if (tsgl) {
@@ -1124,6 +1518,7 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
const struct hash_testvec *template = desc->suite.hash.vecs;
unsigned int tcount = desc->suite.hash.count;
unsigned int nr_unkeyed, nr_keyed;
+ unsigned int maxkeysize = 0;
int err;
/*
@@ -1142,23 +1537,27 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
"unkeyed ones must come first\n", desc->alg);
return -EINVAL;
}
+ maxkeysize = max_t(unsigned int, maxkeysize,
+ template[nr_unkeyed + nr_keyed].ksize);
}
err = 0;
if (nr_unkeyed) {
- err = __alg_test_hash(template, nr_unkeyed, driver, type, mask);
+ err = __alg_test_hash(template, nr_unkeyed, driver, type, mask,
+ desc->generic_driver, maxkeysize);
template += nr_unkeyed;
}
if (!err && nr_keyed)
- err = __alg_test_hash(template, nr_keyed, driver, type, mask);
+ err = __alg_test_hash(template, nr_keyed, driver, type, mask,
+ desc->generic_driver, maxkeysize);
return err;
}
static int test_aead_vec_cfg(const char *driver, int enc,
const struct aead_testvec *vec,
- unsigned int vec_num,
+ const char *vec_name,
const struct testvec_config *cfg,
struct aead_request *req,
struct cipher_test_sglists *tsgls)
@@ -1175,6 +1574,7 @@ static int test_aead_vec_cfg(const char *driver, int enc,
cfg->iv_offset +
(cfg->iv_offset_relative_to_alignmask ? alignmask : 0);
struct kvec input[2];
+ int expected_error;
int err;
/* Set the key */
@@ -1183,26 +1583,33 @@ static int test_aead_vec_cfg(const char *driver, int enc,
else
crypto_aead_clear_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
err = crypto_aead_setkey(tfm, vec->key, vec->klen);
- if (err) {
- if (vec->fail) /* expectedly failed to set key? */
- return 0;
- pr_err("alg: aead: %s setkey failed with err %d on test vector %u; flags=%#x\n",
- driver, err, vec_num, crypto_aead_get_flags(tfm));
+ if (err && err != vec->setkey_error) {
+ pr_err("alg: aead: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n",
+ driver, vec_name, vec->setkey_error, err,
+ crypto_aead_get_flags(tfm));
return err;
}
- if (vec->fail) {
- pr_err("alg: aead: %s setkey unexpectedly succeeded on test vector %u\n",
- driver, vec_num);
+ if (!err && vec->setkey_error) {
+ pr_err("alg: aead: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n",
+ driver, vec_name, vec->setkey_error);
return -EINVAL;
}
/* Set the authentication tag size */
err = crypto_aead_setauthsize(tfm, authsize);
- if (err) {
- pr_err("alg: aead: %s setauthsize failed with err %d on test vector %u\n",
- driver, err, vec_num);
+ if (err && err != vec->setauthsize_error) {
+ pr_err("alg: aead: %s setauthsize failed on test vector %s; expected_error=%d, actual_error=%d\n",
+ driver, vec_name, vec->setauthsize_error, err);
return err;
}
+ if (!err && vec->setauthsize_error) {
+ pr_err("alg: aead: %s setauthsize unexpectedly succeeded on test vector %s; expected_error=%d\n",
+ driver, vec_name, vec->setauthsize_error);
+ return -EINVAL;
+ }
+
+ if (vec->setkey_error || vec->setauthsize_error)
+ return 0;
/* The IV must be copied to a buffer, as the algorithm may modify it */
if (WARN_ON(ivsize > MAX_IVLEN))
@@ -1224,8 +1631,8 @@ static int test_aead_vec_cfg(const char *driver, int enc,
vec->plen),
input, 2);
if (err) {
- pr_err("alg: aead: %s %s: error preparing scatterlists for test vector %u, cfg=\"%s\"\n",
- driver, op, vec_num, cfg->name);
+ pr_err("alg: aead: %s %s: error preparing scatterlists for test vector %s, cfg=\"%s\"\n",
+ driver, op, vec_name, cfg->name);
return err;
}
@@ -1235,23 +1642,12 @@ static int test_aead_vec_cfg(const char *driver, int enc,
aead_request_set_crypt(req, tsgls->src.sgl_ptr, tsgls->dst.sgl_ptr,
enc ? vec->plen : vec->clen, iv);
aead_request_set_ad(req, vec->alen);
- err = crypto_wait_req(enc ? crypto_aead_encrypt(req) :
- crypto_aead_decrypt(req), &wait);
-
- aead_request_set_tfm(req, tfm); /* TODO: get rid of this */
-
- if (err) {
- if (err == -EBADMSG && vec->novrfy)
- return 0;
- pr_err("alg: aead: %s %s failed with err %d on test vector %u, cfg=\"%s\"\n",
- driver, op, err, vec_num, cfg->name);
- return err;
- }
- if (vec->novrfy) {
- pr_err("alg: aead: %s %s unexpectedly succeeded on test vector %u, cfg=\"%s\"\n",
- driver, op, vec_num, cfg->name);
- return -EINVAL;
- }
+ if (cfg->nosimd)
+ crypto_disable_simd_for_test();
+ err = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
+ if (cfg->nosimd)
+ crypto_reenable_simd_for_test();
+ err = crypto_wait_req(err, &wait);
/* Check that the algorithm didn't overwrite things it shouldn't have */
if (req->cryptlen != (enc ? vec->plen : vec->clen) ||
@@ -1263,8 +1659,8 @@ static int test_aead_vec_cfg(const char *driver, int enc,
req->base.complete != crypto_req_done ||
req->base.flags != req_flags ||
req->base.data != &wait) {
- pr_err("alg: aead: %s %s corrupted request struct on test vector %u, cfg=\"%s\"\n",
- driver, op, vec_num, cfg->name);
+ pr_err("alg: aead: %s %s corrupted request struct on test vector %s, cfg=\"%s\"\n",
+ driver, op, vec_name, cfg->name);
if (req->cryptlen != (enc ? vec->plen : vec->clen))
pr_err("alg: aead: changed 'req->cryptlen'\n");
if (req->assoclen != vec->alen)
@@ -1286,14 +1682,29 @@ static int test_aead_vec_cfg(const char *driver, int enc,
return -EINVAL;
}
if (is_test_sglist_corrupted(&tsgls->src)) {
- pr_err("alg: aead: %s %s corrupted src sgl on test vector %u, cfg=\"%s\"\n",
- driver, op, vec_num, cfg->name);
+ pr_err("alg: aead: %s %s corrupted src sgl on test vector %s, cfg=\"%s\"\n",
+ driver, op, vec_name, cfg->name);
return -EINVAL;
}
if (tsgls->dst.sgl_ptr != tsgls->src.sgl &&
is_test_sglist_corrupted(&tsgls->dst)) {
- pr_err("alg: aead: %s %s corrupted dst sgl on test vector %u, cfg=\"%s\"\n",
- driver, op, vec_num, cfg->name);
+ pr_err("alg: aead: %s %s corrupted dst sgl on test vector %s, cfg=\"%s\"\n",
+ driver, op, vec_name, cfg->name);
+ return -EINVAL;
+ }
+
+ /* Check for success or failure */
+ expected_error = vec->novrfy ? -EBADMSG : vec->crypt_error;
+ if (err) {
+ if (err == expected_error)
+ return 0;
+ pr_err("alg: aead: %s %s failed on test vector %s; expected_error=%d, actual_error=%d, cfg=\"%s\"\n",
+ driver, op, vec_name, expected_error, err, cfg->name);
+ return err;
+ }
+ if (expected_error) {
+ pr_err("alg: aead: %s %s unexpectedly succeeded on test vector %s; expected_error=%d, cfg=\"%s\"\n",
+ driver, op, vec_name, expected_error, cfg->name);
return -EINVAL;
}
@@ -1302,13 +1713,13 @@ static int test_aead_vec_cfg(const char *driver, int enc,
enc ? vec->clen : vec->plen,
vec->alen, enc || !cfg->inplace);
if (err == -EOVERFLOW) {
- pr_err("alg: aead: %s %s overran dst buffer on test vector %u, cfg=\"%s\"\n",
- driver, op, vec_num, cfg->name);
+ pr_err("alg: aead: %s %s overran dst buffer on test vector %s, cfg=\"%s\"\n",
+ driver, op, vec_name, cfg->name);
return err;
}
if (err) {
- pr_err("alg: aead: %s %s test failed (wrong result) on test vector %u, cfg=\"%s\"\n",
- driver, op, vec_num, cfg->name);
+ pr_err("alg: aead: %s %s test failed (wrong result) on test vector %s, cfg=\"%s\"\n",
+ driver, op, vec_name, cfg->name);
return err;
}
@@ -1320,14 +1731,17 @@ static int test_aead_vec(const char *driver, int enc,
struct aead_request *req,
struct cipher_test_sglists *tsgls)
{
+ char vec_name[16];
unsigned int i;
int err;
if (enc && vec->novrfy)
return 0;
+ sprintf(vec_name, "%u", vec_num);
+
for (i = 0; i < ARRAY_SIZE(default_cipher_testvec_configs); i++) {
- err = test_aead_vec_cfg(driver, enc, vec, vec_num,
+ err = test_aead_vec_cfg(driver, enc, vec, vec_name,
&default_cipher_testvec_configs[i],
req, tsgls);
if (err)
@@ -1342,7 +1756,7 @@ static int test_aead_vec(const char *driver, int enc,
for (i = 0; i < fuzz_iterations; i++) {
generate_random_testvec_config(&cfg, cfgname,
sizeof(cfgname));
- err = test_aead_vec_cfg(driver, enc, vec, vec_num,
+ err = test_aead_vec_cfg(driver, enc, vec, vec_name,
&cfg, req, tsgls);
if (err)
return err;
@@ -1352,6 +1766,226 @@ static int test_aead_vec(const char *driver, int enc,
return 0;
}
+#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+/*
+ * Generate an AEAD test vector from the given implementation.
+ * Assumes the buffers in 'vec' were already allocated.
+ */
+static void generate_random_aead_testvec(struct aead_request *req,
+ struct aead_testvec *vec,
+ unsigned int maxkeysize,
+ unsigned int maxdatasize,
+ char *name, size_t max_namelen)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ const unsigned int ivsize = crypto_aead_ivsize(tfm);
+ unsigned int maxauthsize = crypto_aead_alg(tfm)->maxauthsize;
+ unsigned int authsize;
+ unsigned int total_len;
+ int i;
+ struct scatterlist src[2], dst;
+ u8 iv[MAX_IVLEN];
+ DECLARE_CRYPTO_WAIT(wait);
+
+ /* Key: length in [0, maxkeysize], but usually choose maxkeysize */
+ vec->klen = maxkeysize;
+ if (prandom_u32() % 4 == 0)
+ vec->klen = prandom_u32() % (maxkeysize + 1);
+ generate_random_bytes((u8 *)vec->key, vec->klen);
+ vec->setkey_error = crypto_aead_setkey(tfm, vec->key, vec->klen);
+
+ /* IV */
+ generate_random_bytes((u8 *)vec->iv, ivsize);
+
+ /* Tag length: in [0, maxauthsize], but usually choose maxauthsize */
+ authsize = maxauthsize;
+ if (prandom_u32() % 4 == 0)
+ authsize = prandom_u32() % (maxauthsize + 1);
+ if (WARN_ON(authsize > maxdatasize))
+ authsize = maxdatasize;
+ maxdatasize -= authsize;
+ vec->setauthsize_error = crypto_aead_setauthsize(tfm, authsize);
+
+ /* Plaintext and associated data */
+ total_len = generate_random_length(maxdatasize);
+ if (prandom_u32() % 4 == 0)
+ vec->alen = 0;
+ else
+ vec->alen = generate_random_length(total_len);
+ vec->plen = total_len - vec->alen;
+ generate_random_bytes((u8 *)vec->assoc, vec->alen);
+ generate_random_bytes((u8 *)vec->ptext, vec->plen);
+
+ vec->clen = vec->plen + authsize;
+
+ /*
+ * If the key or authentication tag size couldn't be set, no need to
+ * continue to encrypt.
+ */
+ if (vec->setkey_error || vec->setauthsize_error)
+ goto done;
+
+ /* Ciphertext */
+ sg_init_table(src, 2);
+ i = 0;
+ if (vec->alen)
+ sg_set_buf(&src[i++], vec->assoc, vec->alen);
+ if (vec->plen)
+ sg_set_buf(&src[i++], vec->ptext, vec->plen);
+ sg_init_one(&dst, vec->ctext, vec->alen + vec->clen);
+ memcpy(iv, vec->iv, ivsize);
+ aead_request_set_callback(req, 0, crypto_req_done, &wait);
+ aead_request_set_crypt(req, src, &dst, vec->plen, iv);
+ aead_request_set_ad(req, vec->alen);
+ vec->crypt_error = crypto_wait_req(crypto_aead_encrypt(req), &wait);
+ if (vec->crypt_error == 0)
+ memmove((u8 *)vec->ctext, vec->ctext + vec->alen, vec->clen);
+done:
+ snprintf(name, max_namelen,
+ "\"random: alen=%u plen=%u authsize=%u klen=%u\"",
+ vec->alen, vec->plen, authsize, vec->klen);
+}
+
+/*
+ * Test the AEAD algorithm represented by @req against the corresponding generic
+ * implementation, if one is available.
+ */
+static int test_aead_vs_generic_impl(const char *driver,
+ const struct alg_test_desc *test_desc,
+ struct aead_request *req,
+ struct cipher_test_sglists *tsgls)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ const unsigned int ivsize = crypto_aead_ivsize(tfm);
+ const unsigned int maxauthsize = crypto_aead_alg(tfm)->maxauthsize;
+ const unsigned int blocksize = crypto_aead_blocksize(tfm);
+ const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
+ const char *algname = crypto_aead_alg(tfm)->base.cra_name;
+ const char *generic_driver = test_desc->generic_driver;
+ char _generic_driver[CRYPTO_MAX_ALG_NAME];
+ struct crypto_aead *generic_tfm = NULL;
+ struct aead_request *generic_req = NULL;
+ unsigned int maxkeysize;
+ unsigned int i;
+ struct aead_testvec vec = { 0 };
+ char vec_name[64];
+ struct testvec_config cfg;
+ char cfgname[TESTVEC_CONFIG_NAMELEN];
+ int err;
+
+ if (noextratests)
+ return 0;
+
+ if (!generic_driver) { /* Use default naming convention? */
+ err = build_generic_driver_name(algname, _generic_driver);
+ if (err)
+ return err;
+ generic_driver = _generic_driver;
+ }
+
+ if (strcmp(generic_driver, driver) == 0) /* Already the generic impl? */
+ return 0;
+
+ generic_tfm = crypto_alloc_aead(generic_driver, 0, 0);
+ if (IS_ERR(generic_tfm)) {
+ err = PTR_ERR(generic_tfm);
+ if (err == -ENOENT) {
+ pr_warn("alg: aead: skipping comparison tests for %s because %s is unavailable\n",
+ driver, generic_driver);
+ return 0;
+ }
+ pr_err("alg: aead: error allocating %s (generic impl of %s): %d\n",
+ generic_driver, algname, err);
+ return err;
+ }
+
+ generic_req = aead_request_alloc(generic_tfm, GFP_KERNEL);
+ if (!generic_req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* Check the algorithm properties for consistency. */
+
+ if (maxauthsize != crypto_aead_alg(generic_tfm)->maxauthsize) {
+ pr_err("alg: aead: maxauthsize for %s (%u) doesn't match generic impl (%u)\n",
+ driver, maxauthsize,
+ crypto_aead_alg(generic_tfm)->maxauthsize);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (ivsize != crypto_aead_ivsize(generic_tfm)) {
+ pr_err("alg: aead: ivsize for %s (%u) doesn't match generic impl (%u)\n",
+ driver, ivsize, crypto_aead_ivsize(generic_tfm));
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (blocksize != crypto_aead_blocksize(generic_tfm)) {
+ pr_err("alg: aead: blocksize for %s (%u) doesn't match generic impl (%u)\n",
+ driver, blocksize, crypto_aead_blocksize(generic_tfm));
+ err = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Now generate test vectors using the generic implementation, and test
+ * the other implementation against them.
+ */
+
+ maxkeysize = 0;
+ for (i = 0; i < test_desc->suite.aead.count; i++)
+ maxkeysize = max_t(unsigned int, maxkeysize,
+ test_desc->suite.aead.vecs[i].klen);
+
+ vec.key = kmalloc(maxkeysize, GFP_KERNEL);
+ vec.iv = kmalloc(ivsize, GFP_KERNEL);
+ vec.assoc = kmalloc(maxdatasize, GFP_KERNEL);
+ vec.ptext = kmalloc(maxdatasize, GFP_KERNEL);
+ vec.ctext = kmalloc(maxdatasize, GFP_KERNEL);
+ if (!vec.key || !vec.iv || !vec.assoc || !vec.ptext || !vec.ctext) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < fuzz_iterations * 8; i++) {
+ generate_random_aead_testvec(generic_req, &vec,
+ maxkeysize, maxdatasize,
+ vec_name, sizeof(vec_name));
+ generate_random_testvec_config(&cfg, cfgname, sizeof(cfgname));
+
+ err = test_aead_vec_cfg(driver, ENCRYPT, &vec, vec_name, &cfg,
+ req, tsgls);
+ if (err)
+ goto out;
+ err = test_aead_vec_cfg(driver, DECRYPT, &vec, vec_name, &cfg,
+ req, tsgls);
+ if (err)
+ goto out;
+ cond_resched();
+ }
+ err = 0;
+out:
+ kfree(vec.key);
+ kfree(vec.iv);
+ kfree(vec.assoc);
+ kfree(vec.ptext);
+ kfree(vec.ctext);
+ crypto_free_aead(generic_tfm);
+ aead_request_free(generic_req);
+ return err;
+}
+#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
+static int test_aead_vs_generic_impl(const char *driver,
+ const struct alg_test_desc *test_desc,
+ struct aead_request *req,
+ struct cipher_test_sglists *tsgls)
+{
+ return 0;
+}
+#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
+
static int test_aead(const char *driver, int enc,
const struct aead_test_suite *suite,
struct aead_request *req,
@@ -1411,6 +2045,10 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
goto out;
err = test_aead(driver, DECRYPT, suite, req, tsgls);
+ if (err)
+ goto out;
+
+ err = test_aead_vs_generic_impl(driver, desc, req, tsgls);
out:
free_cipher_test_sglists(tsgls);
aead_request_free(req);
@@ -1462,13 +2100,20 @@ static int test_cipher(struct crypto_cipher *tfm, int enc,
ret = crypto_cipher_setkey(tfm, template[i].key,
template[i].klen);
- if (template[i].fail == !ret) {
- printk(KERN_ERR "alg: cipher: setkey failed "
- "on test %d for %s: flags=%x\n", j,
- algo, crypto_cipher_get_flags(tfm));
+ if (ret) {
+ if (ret == template[i].setkey_error)
+ continue;
+ pr_err("alg: cipher: %s setkey failed on test vector %u; expected_error=%d, actual_error=%d, flags=%#x\n",
+ algo, j, template[i].setkey_error, ret,
+ crypto_cipher_get_flags(tfm));
goto out;
- } else if (ret)
- continue;
+ }
+ if (template[i].setkey_error) {
+ pr_err("alg: cipher: %s setkey unexpectedly succeeded on test vector %u; expected_error=%d\n",
+ algo, j, template[i].setkey_error);
+ ret = -EINVAL;
+ goto out;
+ }
for (k = 0; k < template[i].len;
k += crypto_cipher_blocksize(tfm)) {
@@ -1500,7 +2145,7 @@ out_nobuf:
static int test_skcipher_vec_cfg(const char *driver, int enc,
const struct cipher_testvec *vec,
- unsigned int vec_num,
+ const char *vec_name,
const struct testvec_config *cfg,
struct skcipher_request *req,
struct cipher_test_sglists *tsgls)
@@ -1526,15 +2171,16 @@ static int test_skcipher_vec_cfg(const char *driver, int enc,
CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
err = crypto_skcipher_setkey(tfm, vec->key, vec->klen);
if (err) {
- if (vec->fail) /* expectedly failed to set key? */
+ if (err == vec->setkey_error)
return 0;
- pr_err("alg: skcipher: %s setkey failed with err %d on test vector %u; flags=%#x\n",
- driver, err, vec_num, crypto_skcipher_get_flags(tfm));
+ pr_err("alg: skcipher: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n",
+ driver, vec_name, vec->setkey_error, err,
+ crypto_skcipher_get_flags(tfm));
return err;
}
- if (vec->fail) {
- pr_err("alg: skcipher: %s setkey unexpectedly succeeded on test vector %u\n",
- driver, vec_num);
+ if (vec->setkey_error) {
+ pr_err("alg: skcipher: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n",
+ driver, vec_name, vec->setkey_error);
return -EINVAL;
}
@@ -1550,8 +2196,8 @@ static int test_skcipher_vec_cfg(const char *driver, int enc,
memset(iv, 0, ivsize);
} else {
if (vec->generates_iv) {
- pr_err("alg: skcipher: %s has ivsize=0 but test vector %u generates IV!\n",
- driver, vec_num);
+ pr_err("alg: skcipher: %s has ivsize=0 but test vector %s generates IV!\n",
+ driver, vec_name);
return -EINVAL;
}
iv = NULL;
@@ -1563,8 +2209,8 @@ static int test_skcipher_vec_cfg(const char *driver, int enc,
err = build_cipher_test_sglists(tsgls, cfg, alignmask,
vec->len, vec->len, &input, 1);
if (err) {
- pr_err("alg: skcipher: %s %s: error preparing scatterlists for test vector %u, cfg=\"%s\"\n",
- driver, op, vec_num, cfg->name);
+ pr_err("alg: skcipher: %s %s: error preparing scatterlists for test vector %s, cfg=\"%s\"\n",
+ driver, op, vec_name, cfg->name);
return err;
}
@@ -1573,13 +2219,12 @@ static int test_skcipher_vec_cfg(const char *driver, int enc,
skcipher_request_set_callback(req, req_flags, crypto_req_done, &wait);
skcipher_request_set_crypt(req, tsgls->src.sgl_ptr, tsgls->dst.sgl_ptr,
vec->len, iv);
- err = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) :
- crypto_skcipher_decrypt(req), &wait);
- if (err) {
- pr_err("alg: skcipher: %s %s failed with err %d on test vector %u, cfg=\"%s\"\n",
- driver, op, err, vec_num, cfg->name);
- return err;
- }
+ if (cfg->nosimd)
+ crypto_disable_simd_for_test();
+ err = enc ? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
+ if (cfg->nosimd)
+ crypto_reenable_simd_for_test();
+ err = crypto_wait_req(err, &wait);
/* Check that the algorithm didn't overwrite things it shouldn't have */
if (req->cryptlen != vec->len ||
@@ -1590,8 +2235,8 @@ static int test_skcipher_vec_cfg(const char *driver, int enc,
req->base.complete != crypto_req_done ||
req->base.flags != req_flags ||
req->base.data != &wait) {
- pr_err("alg: skcipher: %s %s corrupted request struct on test vector %u, cfg=\"%s\"\n",
- driver, op, vec_num, cfg->name);
+ pr_err("alg: skcipher: %s %s corrupted request struct on test vector %s, cfg=\"%s\"\n",
+ driver, op, vec_name, cfg->name);
if (req->cryptlen != vec->len)
pr_err("alg: skcipher: changed 'req->cryptlen'\n");
if (req->iv != iv)
@@ -1611,14 +2256,28 @@ static int test_skcipher_vec_cfg(const char *driver, int enc,
return -EINVAL;
}
if (is_test_sglist_corrupted(&tsgls->src)) {
- pr_err("alg: skcipher: %s %s corrupted src sgl on test vector %u, cfg=\"%s\"\n",
- driver, op, vec_num, cfg->name);
+ pr_err("alg: skcipher: %s %s corrupted src sgl on test vector %s, cfg=\"%s\"\n",
+ driver, op, vec_name, cfg->name);
return -EINVAL;
}
if (tsgls->dst.sgl_ptr != tsgls->src.sgl &&
is_test_sglist_corrupted(&tsgls->dst)) {
- pr_err("alg: skcipher: %s %s corrupted dst sgl on test vector %u, cfg=\"%s\"\n",
- driver, op, vec_num, cfg->name);
+ pr_err("alg: skcipher: %s %s corrupted dst sgl on test vector %s, cfg=\"%s\"\n",
+ driver, op, vec_name, cfg->name);
+ return -EINVAL;
+ }
+
+ /* Check for success or failure */
+ if (err) {
+ if (err == vec->crypt_error)
+ return 0;
+ pr_err("alg: skcipher: %s %s failed on test vector %s; expected_error=%d, actual_error=%d, cfg=\"%s\"\n",
+ driver, op, vec_name, vec->crypt_error, err, cfg->name);
+ return err;
+ }
+ if (vec->crypt_error) {
+ pr_err("alg: skcipher: %s %s unexpectedly succeeded on test vector %s; expected_error=%d, cfg=\"%s\"\n",
+ driver, op, vec_name, vec->crypt_error, cfg->name);
return -EINVAL;
}
@@ -1626,20 +2285,20 @@ static int test_skcipher_vec_cfg(const char *driver, int enc,
err = verify_correct_output(&tsgls->dst, enc ? vec->ctext : vec->ptext,
vec->len, 0, true);
if (err == -EOVERFLOW) {
- pr_err("alg: skcipher: %s %s overran dst buffer on test vector %u, cfg=\"%s\"\n",
- driver, op, vec_num, cfg->name);
+ pr_err("alg: skcipher: %s %s overran dst buffer on test vector %s, cfg=\"%s\"\n",
+ driver, op, vec_name, cfg->name);
return err;
}
if (err) {
- pr_err("alg: skcipher: %s %s test failed (wrong result) on test vector %u, cfg=\"%s\"\n",
- driver, op, vec_num, cfg->name);
+ pr_err("alg: skcipher: %s %s test failed (wrong result) on test vector %s, cfg=\"%s\"\n",
+ driver, op, vec_name, cfg->name);
return err;
}
/* If applicable, check that the algorithm generated the correct IV */
if (vec->iv_out && memcmp(iv, vec->iv_out, ivsize) != 0) {
- pr_err("alg: skcipher: %s %s test failed (wrong output IV) on test vector %u, cfg=\"%s\"\n",
- driver, op, vec_num, cfg->name);
+ pr_err("alg: skcipher: %s %s test failed (wrong output IV) on test vector %s, cfg=\"%s\"\n",
+ driver, op, vec_name, cfg->name);
hexdump(iv, ivsize);
return -EINVAL;
}
@@ -1653,14 +2312,17 @@ static int test_skcipher_vec(const char *driver, int enc,
struct skcipher_request *req,
struct cipher_test_sglists *tsgls)
{
+ char vec_name[16];
unsigned int i;
int err;
if (fips_enabled && vec->fips_skip)
return 0;
+ sprintf(vec_name, "%u", vec_num);
+
for (i = 0; i < ARRAY_SIZE(default_cipher_testvec_configs); i++) {
- err = test_skcipher_vec_cfg(driver, enc, vec, vec_num,
+ err = test_skcipher_vec_cfg(driver, enc, vec, vec_name,
&default_cipher_testvec_configs[i],
req, tsgls);
if (err)
@@ -1675,7 +2337,7 @@ static int test_skcipher_vec(const char *driver, int enc,
for (i = 0; i < fuzz_iterations; i++) {
generate_random_testvec_config(&cfg, cfgname,
sizeof(cfgname));
- err = test_skcipher_vec_cfg(driver, enc, vec, vec_num,
+ err = test_skcipher_vec_cfg(driver, enc, vec, vec_name,
&cfg, req, tsgls);
if (err)
return err;
@@ -1685,6 +2347,186 @@ static int test_skcipher_vec(const char *driver, int enc,
return 0;
}
+#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+/*
+ * Generate a symmetric cipher test vector from the given implementation.
+ * Assumes the buffers in 'vec' were already allocated.
+ */
+static void generate_random_cipher_testvec(struct skcipher_request *req,
+ struct cipher_testvec *vec,
+ unsigned int maxdatasize,
+ char *name, size_t max_namelen)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const unsigned int maxkeysize = tfm->keysize;
+ const unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+ struct scatterlist src, dst;
+ u8 iv[MAX_IVLEN];
+ DECLARE_CRYPTO_WAIT(wait);
+
+ /* Key: length in [0, maxkeysize], but usually choose maxkeysize */
+ vec->klen = maxkeysize;
+ if (prandom_u32() % 4 == 0)
+ vec->klen = prandom_u32() % (maxkeysize + 1);
+ generate_random_bytes((u8 *)vec->key, vec->klen);
+ vec->setkey_error = crypto_skcipher_setkey(tfm, vec->key, vec->klen);
+
+ /* IV */
+ generate_random_bytes((u8 *)vec->iv, ivsize);
+
+ /* Plaintext */
+ vec->len = generate_random_length(maxdatasize);
+ generate_random_bytes((u8 *)vec->ptext, vec->len);
+
+ /* If the key couldn't be set, no need to continue to encrypt. */
+ if (vec->setkey_error)
+ goto done;
+
+ /* Ciphertext */
+ sg_init_one(&src, vec->ptext, vec->len);
+ sg_init_one(&dst, vec->ctext, vec->len);
+ memcpy(iv, vec->iv, ivsize);
+ skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
+ skcipher_request_set_crypt(req, &src, &dst, vec->len, iv);
+ vec->crypt_error = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
+done:
+ snprintf(name, max_namelen, "\"random: len=%u klen=%u\"",
+ vec->len, vec->klen);
+}
+
+/*
+ * Test the skcipher algorithm represented by @req against the corresponding
+ * generic implementation, if one is available.
+ */
+static int test_skcipher_vs_generic_impl(const char *driver,
+ const char *generic_driver,
+ struct skcipher_request *req,
+ struct cipher_test_sglists *tsgls)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+ const unsigned int blocksize = crypto_skcipher_blocksize(tfm);
+ const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
+ const char *algname = crypto_skcipher_alg(tfm)->base.cra_name;
+ char _generic_driver[CRYPTO_MAX_ALG_NAME];
+ struct crypto_skcipher *generic_tfm = NULL;
+ struct skcipher_request *generic_req = NULL;
+ unsigned int i;
+ struct cipher_testvec vec = { 0 };
+ char vec_name[64];
+ struct testvec_config cfg;
+ char cfgname[TESTVEC_CONFIG_NAMELEN];
+ int err;
+
+ if (noextratests)
+ return 0;
+
+ /* Keywrap isn't supported here yet as it handles its IV differently. */
+ if (strncmp(algname, "kw(", 3) == 0)
+ return 0;
+
+ if (!generic_driver) { /* Use default naming convention? */
+ err = build_generic_driver_name(algname, _generic_driver);
+ if (err)
+ return err;
+ generic_driver = _generic_driver;
+ }
+
+ if (strcmp(generic_driver, driver) == 0) /* Already the generic impl? */
+ return 0;
+
+ generic_tfm = crypto_alloc_skcipher(generic_driver, 0, 0);
+ if (IS_ERR(generic_tfm)) {
+ err = PTR_ERR(generic_tfm);
+ if (err == -ENOENT) {
+ pr_warn("alg: skcipher: skipping comparison tests for %s because %s is unavailable\n",
+ driver, generic_driver);
+ return 0;
+ }
+ pr_err("alg: skcipher: error allocating %s (generic impl of %s): %d\n",
+ generic_driver, algname, err);
+ return err;
+ }
+
+ generic_req = skcipher_request_alloc(generic_tfm, GFP_KERNEL);
+ if (!generic_req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* Check the algorithm properties for consistency. */
+
+ if (tfm->keysize != generic_tfm->keysize) {
+ pr_err("alg: skcipher: max keysize for %s (%u) doesn't match generic impl (%u)\n",
+ driver, tfm->keysize, generic_tfm->keysize);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (ivsize != crypto_skcipher_ivsize(generic_tfm)) {
+ pr_err("alg: skcipher: ivsize for %s (%u) doesn't match generic impl (%u)\n",
+ driver, ivsize, crypto_skcipher_ivsize(generic_tfm));
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (blocksize != crypto_skcipher_blocksize(generic_tfm)) {
+ pr_err("alg: skcipher: blocksize for %s (%u) doesn't match generic impl (%u)\n",
+ driver, blocksize,
+ crypto_skcipher_blocksize(generic_tfm));
+ err = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Now generate test vectors using the generic implementation, and test
+ * the other implementation against them.
+ */
+
+ vec.key = kmalloc(tfm->keysize, GFP_KERNEL);
+ vec.iv = kmalloc(ivsize, GFP_KERNEL);
+ vec.ptext = kmalloc(maxdatasize, GFP_KERNEL);
+ vec.ctext = kmalloc(maxdatasize, GFP_KERNEL);
+ if (!vec.key || !vec.iv || !vec.ptext || !vec.ctext) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < fuzz_iterations * 8; i++) {
+ generate_random_cipher_testvec(generic_req, &vec, maxdatasize,
+ vec_name, sizeof(vec_name));
+ generate_random_testvec_config(&cfg, cfgname, sizeof(cfgname));
+
+ err = test_skcipher_vec_cfg(driver, ENCRYPT, &vec, vec_name,
+ &cfg, req, tsgls);
+ if (err)
+ goto out;
+ err = test_skcipher_vec_cfg(driver, DECRYPT, &vec, vec_name,
+ &cfg, req, tsgls);
+ if (err)
+ goto out;
+ cond_resched();
+ }
+ err = 0;
+out:
+ kfree(vec.key);
+ kfree(vec.iv);
+ kfree(vec.ptext);
+ kfree(vec.ctext);
+ crypto_free_skcipher(generic_tfm);
+ skcipher_request_free(generic_req);
+ return err;
+}
+#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
+static int test_skcipher_vs_generic_impl(const char *driver,
+ const char *generic_driver,
+ struct skcipher_request *req,
+ struct cipher_test_sglists *tsgls)
+{
+ return 0;
+}
+#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
+
static int test_skcipher(const char *driver, int enc,
const struct cipher_test_suite *suite,
struct skcipher_request *req,
@@ -1744,6 +2586,11 @@ static int alg_test_skcipher(const struct alg_test_desc *desc,
goto out;
err = test_skcipher(driver, DECRYPT, suite, req, tsgls);
+ if (err)
+ goto out;
+
+ err = test_skcipher_vs_generic_impl(driver, desc->generic_driver, req,
+ tsgls);
out:
free_cipher_test_sglists(tsgls);
skcipher_request_free(req);
@@ -2179,7 +3026,6 @@ static int alg_test_crc32c(const struct alg_test_desc *desc,
u32 *ctx = (u32 *)shash_desc_ctx(shash);
shash->tfm = tfm;
- shash->flags = 0;
*ctx = 420553207;
err = crypto_shash_final(shash, (u8 *)&val);
@@ -2493,6 +3339,12 @@ static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver,
return err;
}
+static u8 *test_pack_u32(u8 *dst, u32 val)
+{
+ memcpy(dst, &val, sizeof(val));
+ return dst + sizeof(val);
+}
+
static int test_akcipher_one(struct crypto_akcipher *tfm,
const struct akcipher_testvec *vecs)
{
@@ -2503,10 +3355,11 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
struct crypto_wait wait;
unsigned int out_len_max, out_len = 0;
int err = -ENOMEM;
- struct scatterlist src, dst, src_tab[2];
+ struct scatterlist src, dst, src_tab[3];
const char *m, *c;
unsigned int m_size, c_size;
const char *op;
+ u8 *key, *ptr;
if (testmgr_alloc_buf(xbuf))
return err;
@@ -2517,22 +3370,29 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
crypto_init_wait(&wait);
+ key = kmalloc(vecs->key_len + sizeof(u32) * 2 + vecs->param_len,
+ GFP_KERNEL);
+ if (!key)
+ goto free_xbuf;
+ memcpy(key, vecs->key, vecs->key_len);
+ ptr = key + vecs->key_len;
+ ptr = test_pack_u32(ptr, vecs->algo);
+ ptr = test_pack_u32(ptr, vecs->param_len);
+ memcpy(ptr, vecs->params, vecs->param_len);
+
if (vecs->public_key_vec)
- err = crypto_akcipher_set_pub_key(tfm, vecs->key,
- vecs->key_len);
+ err = crypto_akcipher_set_pub_key(tfm, key, vecs->key_len);
else
- err = crypto_akcipher_set_priv_key(tfm, vecs->key,
- vecs->key_len);
+ err = crypto_akcipher_set_priv_key(tfm, key, vecs->key_len);
if (err)
goto free_req;
- err = -ENOMEM;
- out_len_max = crypto_akcipher_maxsize(tfm);
-
/*
* First run test which do not require a private key, such as
* encrypt or verify.
*/
+ err = -ENOMEM;
+ out_len_max = crypto_akcipher_maxsize(tfm);
outbuf_enc = kzalloc(out_len_max, GFP_KERNEL);
if (!outbuf_enc)
goto free_req;
@@ -2558,12 +3418,20 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
goto free_all;
memcpy(xbuf[0], m, m_size);
- sg_init_table(src_tab, 2);
+ sg_init_table(src_tab, 3);
sg_set_buf(&src_tab[0], xbuf[0], 8);
sg_set_buf(&src_tab[1], xbuf[0] + 8, m_size - 8);
- sg_init_one(&dst, outbuf_enc, out_len_max);
- akcipher_request_set_crypt(req, src_tab, &dst, m_size,
- out_len_max);
+ if (vecs->siggen_sigver_test) {
+ if (WARN_ON(c_size > PAGE_SIZE))
+ goto free_all;
+ memcpy(xbuf[1], c, c_size);
+ sg_set_buf(&src_tab[2], xbuf[1], c_size);
+ akcipher_request_set_crypt(req, src_tab, NULL, m_size, c_size);
+ } else {
+ sg_init_one(&dst, outbuf_enc, out_len_max);
+ akcipher_request_set_crypt(req, src_tab, &dst, m_size,
+ out_len_max);
+ }
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &wait);
@@ -2576,18 +3444,21 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
pr_err("alg: akcipher: %s test failed. err %d\n", op, err);
goto free_all;
}
- if (req->dst_len != c_size) {
- pr_err("alg: akcipher: %s test failed. Invalid output len\n",
- op);
- err = -EINVAL;
- goto free_all;
- }
- /* verify that encrypted message is equal to expected */
- if (memcmp(c, outbuf_enc, c_size)) {
- pr_err("alg: akcipher: %s test failed. Invalid output\n", op);
- hexdump(outbuf_enc, c_size);
- err = -EINVAL;
- goto free_all;
+ if (!vecs->siggen_sigver_test) {
+ if (req->dst_len != c_size) {
+ pr_err("alg: akcipher: %s test failed. Invalid output len\n",
+ op);
+ err = -EINVAL;
+ goto free_all;
+ }
+ /* verify that encrypted message is equal to expected */
+ if (memcmp(c, outbuf_enc, c_size) != 0) {
+ pr_err("alg: akcipher: %s test failed. Invalid output\n",
+ op);
+ hexdump(outbuf_enc, c_size);
+ err = -EINVAL;
+ goto free_all;
+ }
}
/*
@@ -2642,6 +3513,7 @@ free_all:
kfree(outbuf_enc);
free_req:
akcipher_request_free(req);
+ kfree(key);
free_xbuf:
testmgr_free_buf(xbuf);
return err;
@@ -2699,12 +3571,14 @@ static int alg_test_null(const struct alg_test_desc *desc,
static const struct alg_test_desc alg_test_descs[] = {
{
.alg = "adiantum(xchacha12,aes)",
+ .generic_driver = "adiantum(xchacha12-generic,aes-generic,nhpoly1305-generic)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(adiantum_xchacha12_aes_tv_template)
},
}, {
.alg = "adiantum(xchacha20,aes)",
+ .generic_driver = "adiantum(xchacha20-generic,aes-generic,nhpoly1305-generic)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(adiantum_xchacha20_aes_tv_template)
@@ -2921,6 +3795,12 @@ static const struct alg_test_desc alg_test_descs[] = {
.test = alg_test_null,
.fips_allowed = 1,
}, {
+ /* Same as cbc(sm4) except the key is stored in
+ * hardware secure memory which we reference by index
+ */
+ .alg = "cbc(psm4)",
+ .test = alg_test_null,
+ }, {
.alg = "cbc(serpent)",
.test = alg_test_skcipher,
.suite = {
@@ -2947,6 +3827,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "ccm(aes)",
+ .generic_driver = "ccm_base(ctr(aes-generic),cbcmac(aes-generic))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
@@ -3055,6 +3936,13 @@ static const struct alg_test_desc alg_test_descs[] = {
.test = alg_test_null,
.fips_allowed = 1,
}, {
+
+ /* Same as ctr(sm4) except the key is stored in
+ * hardware secure memory which we reference by index
+ */
+ .alg = "ctr(psm4)",
+ .test = alg_test_null,
+ }, {
.alg = "ctr(serpent)",
.test = alg_test_skcipher,
.suite = {
@@ -3080,6 +3968,13 @@ static const struct alg_test_desc alg_test_descs[] = {
.cipher = __VECS(cts_mode_tv_template)
}
}, {
+ /* Same as cts(cbc((aes)) except the key is stored in
+ * hardware secure memory which we reference by index
+ */
+ .alg = "cts(cbc(paes))",
+ .test = alg_test_null,
+ .fips_allowed = 1,
+ }, {
.alg = "deflate",
.test = alg_test_comp,
.fips_allowed = 1,
@@ -3358,7 +4253,14 @@ static const struct alg_test_desc alg_test_descs[] = {
.kpp = __VECS(ecdh_tv_template)
}
}, {
+ .alg = "ecrdsa",
+ .test = alg_test_akcipher,
+ .suite = {
+ .akcipher = __VECS(ecrdsa_tv_template)
+ }
+ }, {
.alg = "gcm(aes)",
+ .generic_driver = "gcm_base(ctr(aes-generic),ghash-generic)",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
@@ -3477,30 +4379,35 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "lrw(aes)",
+ .generic_driver = "lrw(ecb(aes-generic))",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(aes_lrw_tv_template)
}
}, {
.alg = "lrw(camellia)",
+ .generic_driver = "lrw(ecb(camellia-generic))",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(camellia_lrw_tv_template)
}
}, {
.alg = "lrw(cast6)",
+ .generic_driver = "lrw(ecb(cast6-generic))",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(cast6_lrw_tv_template)
}
}, {
.alg = "lrw(serpent)",
+ .generic_driver = "lrw(ecb(serpent-generic))",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(serpent_lrw_tv_template)
}
}, {
.alg = "lrw(twofish)",
+ .generic_driver = "lrw(ecb(twofish-generic))",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(tf_lrw_tv_template)
@@ -3625,6 +4532,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "rfc4106(gcm(aes))",
+ .generic_driver = "rfc4106(gcm_base(ctr(aes-generic),ghash-generic))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
@@ -3632,6 +4540,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "rfc4309(ccm(aes))",
+ .generic_driver = "rfc4309(ccm_base(ctr(aes-generic),cbcmac(aes-generic)))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
@@ -3639,6 +4548,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "rfc4543(gcm(aes))",
+ .generic_driver = "rfc4543(gcm_base(ctr(aes-generic),ghash-generic))",
.test = alg_test_aead,
.suite = {
.aead = __VECS(aes_gcm_rfc4543_tv_template)
@@ -3835,6 +4745,7 @@ static const struct alg_test_desc alg_test_descs[] = {
},
}, {
.alg = "xts(aes)",
+ .generic_driver = "xts(ecb(aes-generic))",
.test = alg_test_skcipher,
.fips_allowed = 1,
.suite = {
@@ -3842,12 +4753,14 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "xts(camellia)",
+ .generic_driver = "xts(ecb(camellia-generic))",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(camellia_xts_tv_template)
}
}, {
.alg = "xts(cast6)",
+ .generic_driver = "xts(ecb(cast6-generic))",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(cast6_xts_tv_template)
@@ -3861,12 +4774,14 @@ static const struct alg_test_desc alg_test_descs[] = {
.fips_allowed = 1,
}, {
.alg = "xts(serpent)",
+ .generic_driver = "xts(ecb(serpent-generic))",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(serpent_xts_tv_template)
}
}, {
.alg = "xts(twofish)",
+ .generic_driver = "xts(ecb(twofish-generic))",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(tf_xts_tv_template)
@@ -4020,8 +4935,9 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
type, mask);
test_done:
- if (fips_enabled && rc)
- panic("%s: %s alg self test failed in fips mode!\n", driver, alg);
+ if (rc && (fips_enabled || panic_on_fail))
+ panic("alg: self-tests for %s (%s) failed in %s mode!\n",
+ driver, alg, fips_enabled ? "fips" : "panic_on_fail");
if (fips_enabled && !rc)
pr_info("alg: self-tests for %s (%s) passed\n", driver, alg);
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index f267633cf13a..b6daae1f6a1d 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -25,6 +25,8 @@
#ifndef _CRYPTO_TESTMGR_H
#define _CRYPTO_TESTMGR_H
+#include <linux/oid_registry.h>
+
#define MAX_IVLEN 32
/*
@@ -34,6 +36,8 @@
* @digest: Pointer to expected digest
* @psize: Length of source data in bytes
* @ksize: Length of @key in bytes (0 if no key)
+ * @setkey_error: Expected error from setkey()
+ * @digest_error: Expected error from digest()
*/
struct hash_testvec {
const char *key;
@@ -41,6 +45,8 @@ struct hash_testvec {
const char *digest;
unsigned short psize;
unsigned short ksize;
+ int setkey_error;
+ int digest_error;
};
/*
@@ -52,12 +58,13 @@ struct hash_testvec {
* @ptext: Pointer to plaintext
* @ctext: Pointer to ciphertext
* @len: Length of @ptext and @ctext in bytes
- * @fail: If set to one, the test need to fail
* @wk: Does the test need CRYPTO_TFM_REQ_FORBID_WEAK_KEYS?
* ( e.g. test needs to fail due to a weak key )
* @fips_skip: Skip the test vector in FIPS mode
* @generates_iv: Encryption should ignore the given IV, and output @iv_out.
* Decryption takes @iv_out. Needed for AES Keywrap ("kw(aes)").
+ * @setkey_error: Expected error from setkey()
+ * @crypt_error: Expected error from encrypt() and decrypt()
*/
struct cipher_testvec {
const char *key;
@@ -65,12 +72,13 @@ struct cipher_testvec {
const char *iv_out;
const char *ptext;
const char *ctext;
- bool fail;
unsigned char wk; /* weak key flag */
- unsigned char klen;
+ unsigned short klen;
unsigned short len;
bool fips_skip;
bool generates_iv;
+ int setkey_error;
+ int crypt_error;
};
/*
@@ -82,7 +90,6 @@ struct cipher_testvec {
* @ctext: Pointer to the full authenticated ciphertext. For AEADs that
* produce a separate "ciphertext" and "authentication tag", these
* two parts are concatenated: ciphertext || tag.
- * @fail: setkey() failure expected?
* @novrfy: Decryption verification failure expected?
* @wk: Does the test need CRYPTO_TFM_REQ_FORBID_WEAK_KEYS?
* (e.g. setkey() needs to fail due to a weak key)
@@ -90,6 +97,9 @@ struct cipher_testvec {
* @plen: Length of @ptext in bytes
* @alen: Length of @assoc in bytes
* @clen: Length of @ctext in bytes
+ * @setkey_error: Expected error from setkey()
+ * @setauthsize_error: Expected error from setauthsize()
+ * @crypt_error: Expected error from encrypt() and decrypt()
*/
struct aead_testvec {
const char *key;
@@ -97,13 +107,15 @@ struct aead_testvec {
const char *ptext;
const char *assoc;
const char *ctext;
- bool fail;
unsigned char novrfy;
unsigned char wk;
unsigned char klen;
unsigned short plen;
unsigned short clen;
unsigned short alen;
+ int setkey_error;
+ int setauthsize_error;
+ int crypt_error;
};
struct cprng_testvec {
@@ -135,13 +147,16 @@ struct drbg_testvec {
struct akcipher_testvec {
const unsigned char *key;
+ const unsigned char *params;
const unsigned char *m;
const unsigned char *c;
unsigned int key_len;
+ unsigned int param_len;
unsigned int m_size;
unsigned int c_size;
bool public_key_vec;
bool siggen_sigver_test;
+ enum OID algo;
};
struct kpp_testvec {
@@ -551,6 +566,160 @@ static const struct akcipher_testvec rsa_tv_template[] = {
};
/*
+ * EC-RDSA test vectors are generated by gost-engine.
+ */
+static const struct akcipher_testvec ecrdsa_tv_template[] = {
+ {
+ .key =
+ "\x04\x40\xd5\xa7\x77\xf9\x26\x2f\x8c\xbd\xcc\xe3\x1f\x01\x94\x05"
+ "\x3d\x2f\xec\xb5\x00\x34\xf5\x51\x6d\x3b\x90\x4b\x23\x28\x6f\x1d"
+ "\xc8\x36\x61\x60\x36\xec\xbb\xb4\x0b\x95\x4e\x54\x4f\x15\x21\x05"
+ "\xd8\x52\x66\x44\x31\x7e\x5d\xc5\xd1\x26\x00\x5f\x60\xd8\xf0\xc7"
+ "\x27\xfc",
+ .key_len = 66,
+ .params = /* OID_gostCPSignA */
+ "\x30\x13\x06\x07\x2a\x85\x03\x02\x02\x23\x01\x06\x08\x2a\x85\x03"
+ "\x07\x01\x01\x02\x02",
+ .param_len = 21,
+ .c =
+ "\x41\x32\x09\x73\xa4\xc1\x38\xd6\x63\x7d\x8b\xf7\x50\x3f\xda\x9f"
+ "\x68\x48\xc1\x50\xe3\x42\x3a\x9b\x2b\x28\x12\x2a\xa7\xc2\x75\x31"
+ "\x65\x77\x8c\x3c\x9e\x0d\x56\xb2\xf9\xdc\x04\x33\x3e\xb0\x9e\xf9"
+ "\x74\x4e\x59\xb3\x83\xf2\x91\x27\xda\x5e\xc7\x33\xc0\xc1\x8f\x41",
+ .c_size = 64,
+ .algo = OID_gost2012PKey256,
+ .m =
+ "\x75\x1b\x9b\x40\x25\xb9\x96\xd2\x9b\x00\x41\xb3\x58\xbf\x23\x14"
+ "\x79\xd2\x76\x64\xa3\xbd\x66\x10\x79\x05\x5a\x06\x42\xec\xb9\xc9",
+ .m_size = 32,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ },
+ {
+ .key =
+ "\x04\x40\x66\x6f\xd6\xb7\x06\xd0\xf5\xa5\x6f\x69\x5c\xa5\x13\x45"
+ "\x14\xdd\xcb\x12\x9c\x1b\xf5\x28\x64\x7a\x49\x48\x29\x14\x66\x42"
+ "\xb8\x1b\x5c\xf9\x56\x6d\x08\x3b\xce\xbb\x62\x2f\xc2\x3c\xc5\x49"
+ "\x93\x27\x70\x20\xcc\x79\xeb\xdc\x76\x8e\x48\x6e\x04\x96\xc3\x29"
+ "\xa0\x73",
+ .key_len = 66,
+ .params = /* OID_gostCPSignB */
+ "\x30\x13\x06\x07\x2a\x85\x03\x02\x02\x23\x02\x06\x08\x2a\x85\x03"
+ "\x07\x01\x01\x02\x02",
+ .param_len = 21,
+ .c =
+ "\x45\x6d\x4a\x03\x1d\x5c\x0b\x17\x79\xe7\x19\xdb\xbf\x81\x9f\x82"
+ "\xae\x06\xda\xf5\x47\x00\x05\x80\xc3\x16\x06\x9a\x8e\x7c\xb2\x8e"
+ "\x7f\x74\xaa\xec\x6b\x7b\x7f\x8b\xc6\x0b\x10\x42\x4e\x91\x2c\xdf"
+ "\x7b\x8b\x15\xf4\x9e\x59\x0f\xc7\xa4\x68\x2e\xce\x89\xdf\x84\xe9",
+ .c_size = 64,
+ .algo = OID_gost2012PKey256,
+ .m =
+ "\xd0\x54\x00\x27\x6a\xeb\xce\x6c\xf5\xf6\xfb\x57\x18\x18\x21\x13"
+ "\x11\x23\x4a\x70\x43\x52\x7a\x68\x11\x65\x45\x37\xbb\x25\xb7\x40",
+ .m_size = 32,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ },
+ {
+ .key =
+ "\x04\x40\x05\x91\xa9\x7d\xcb\x87\xdc\x98\xa1\xbf\xff\xdd\x20\x61"
+ "\xaa\x58\x3b\x2d\x8e\x9c\x41\x9d\x4f\xc6\x23\x17\xf9\xca\x60\x65"
+ "\xbc\x97\x97\xf6\x6b\x24\xe8\xac\xb1\xa7\x61\x29\x3c\x71\xdc\xad"
+ "\xcb\x20\xbe\x96\xe8\xf4\x44\x2e\x49\xd5\x2c\xb9\xc9\x3b\x9c\xaa"
+ "\xba\x15",
+ .key_len = 66,
+ .params = /* OID_gostCPSignC */
+ "\x30\x13\x06\x07\x2a\x85\x03\x02\x02\x23\x03\x06\x08\x2a\x85\x03"
+ "\x07\x01\x01\x02\x02",
+ .param_len = 21,
+ .c =
+ "\x3b\x2e\x2e\x74\x74\x47\xda\xea\x93\x90\x6a\xe2\xf5\xf5\xe6\x46"
+ "\x11\xfc\xab\xdc\x52\xbc\x58\xdb\x45\x44\x12\x4a\xf7\xd0\xab\xc9"
+ "\x73\xba\x64\xab\x0d\xac\x4e\x72\x10\xa8\x04\xf6\x1e\xe0\x48\x6a"
+ "\xcd\xe8\xe3\x78\x73\x77\x82\x24\x8d\xf1\xd3\xeb\x4c\x25\x7e\xc0",
+ .c_size = 64,
+ .algo = OID_gost2012PKey256,
+ .m =
+ "\x52\x33\xf4\x3f\x7b\x5d\xcf\x20\xee\xe4\x5c\xab\x0b\x3f\x14\xd6"
+ "\x9f\x16\xc6\x1c\xb1\x3f\x84\x41\x69\xec\x34\xfd\xf1\xf9\xa3\x39",
+ .m_size = 32,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ },
+ {
+ .key =
+ "\x04\x81\x80\x85\x46\x8f\x16\xf8\x7a\x7e\x4a\xc3\x81\x9e\xf1\x6e"
+ "\x94\x1e\x5d\x02\x87\xea\xfa\xa0\x0a\x17\x70\x49\x64\xad\x95\x68"
+ "\x60\x0a\xf0\x57\x29\x41\x79\x30\x3c\x61\x69\xf2\xa6\x94\x87\x17"
+ "\x54\xfa\x97\x2c\xe6\x1e\x0a\xbb\x55\x10\x57\xbe\xf7\xc1\x77\x2b"
+ "\x11\x74\x0a\x50\x37\x14\x10\x2a\x45\xfc\x7a\xae\x1c\x4c\xce\x08"
+ "\x05\xb7\xa4\x50\xc8\x3d\x39\x3d\xdc\x5c\x8f\x96\x6c\xe7\xfc\x21"
+ "\xc3\x2d\x1e\x9f\x11\xb3\xec\x22\x18\x8a\x8c\x08\x6b\x8b\xed\xf5"
+ "\xc5\x47\x3c\x7e\x73\x59\x44\x1e\x77\x83\x84\x52\x9e\x3b\x7d\xff"
+ "\x9d\x86\x1a",
+ .key_len = 131,
+ .params = /* OID_gostTC26Sign512A */
+ "\x30\x0b\x06\x09\x2a\x85\x03\x07\x01\x02\x01\x02\x01",
+ .param_len = 13,
+ .c =
+ "\x92\x81\x74\x5f\x95\x48\x38\x87\xd9\x8f\x5e\xc8\x8a\xbb\x01\x4e"
+ "\xb0\x75\x3c\x2f\xc7\x5a\x08\x4c\x68\xab\x75\x01\x32\x75\x75\xb5"
+ "\x37\xe0\x74\x6d\x94\x84\x31\x2a\x6b\xf4\xf7\xb7\xa7\x39\x7b\x46"
+ "\x07\xf0\x98\xbd\x33\x18\xa1\x72\xb2\x6d\x54\xe3\xde\x91\xc2\x2e"
+ "\x4f\x6a\xf8\xb7\xec\xa8\x83\xc9\x8f\xd9\xce\x7c\x45\x06\x02\xf4"
+ "\x4f\x21\xb5\x24\x3d\xb4\xb5\xd8\x58\x42\xbe\x2d\x29\xae\x93\xc0"
+ "\x13\x41\x96\x35\x08\x69\xe8\x36\xc7\xd1\x83\x81\xd7\xca\xfb\xc0"
+ "\xd2\xb7\x78\x32\x3e\x30\x1a\x1e\xce\xdc\x34\x35\xc6\xad\x68\x24",
+ .c_size = 128,
+ .algo = OID_gost2012PKey512,
+ .m =
+ "\x1f\x70\xb5\xe9\x55\x12\xd6\x88\xcc\x55\xb9\x0c\x7f\xc4\x94\xf2"
+ "\x04\x77\x41\x12\x02\xd6\xf1\x1f\x83\x56\xe9\xd6\x5a\x6a\x72\xb9"
+ "\x6e\x8e\x24\x2a\x84\xf1\xba\x67\xe8\xbf\xff\xc1\xd3\xde\xfb\xc6"
+ "\xa8\xf6\x80\x01\xb9\x27\xac\xd8\x45\x96\x66\xa1\xee\x48\x08\x3f",
+ .m_size = 64,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ },
+ {
+ .key =
+ "\x04\x81\x80\x28\xf3\x2b\x92\x04\x32\xea\x66\x20\xde\xa0\x2f\x74"
+ "\xbf\x2d\xf7\xb5\x30\x76\xb1\xc8\xee\x38\x9f\xea\xe5\xad\xc6\xa3"
+ "\x28\x1e\x51\x3d\x67\xa3\x41\xcc\x6b\x81\xe2\xe2\x9e\x82\xf3\x78"
+ "\x56\xd7\x2e\xb2\xb5\xbe\xb4\x50\x21\x05\xe5\x29\x82\xef\x15\x1b"
+ "\xc0\xd7\x30\xd6\x2f\x96\xe8\xff\x99\x4c\x25\xcf\x9a\xfc\x54\x30"
+ "\xce\xdf\x59\xe9\xc6\x45\xce\xe4\x22\xe8\x01\xd5\xcd\x2f\xaa\x78"
+ "\x99\xc6\x04\x1e\x6f\x4c\x25\x6a\x76\xad\xff\x48\xf3\xb3\xb4\xd6"
+ "\x14\x5c\x2c\x0e\xea\xa2\x4b\xb9\x7e\x89\x77\x02\x3a\x29\xc8\x16"
+ "\x8e\x78\x48",
+ .key_len = 131,
+ .params = /* OID_gostTC26Sign512B */
+ "\x30\x0b\x06\x09\x2a\x85\x03\x07\x01\x02\x01\x02\x02",
+ .param_len = 13,
+ .c =
+ "\x0a\xed\xb6\x27\xea\xa7\xa6\x7e\x2f\xc1\x02\x21\x74\xce\x27\xd2"
+ "\xee\x8a\x92\x4d\xa9\x43\x2d\xa4\x5b\xdc\x23\x02\xfc\x3a\xf3\xb2"
+ "\x10\x93\x0b\x40\x1b\x75\x95\x3e\x39\x41\x37\xb9\xab\x51\x09\xeb"
+ "\xf1\xb9\x49\x58\xec\x58\xc7\xf9\x2e\xb9\xc9\x40\xf2\x00\x39\x7e"
+ "\x3f\xde\x72\xe3\x85\x67\x06\xbe\xd8\xb8\xc1\x81\x1e\xe3\x0a\xfe"
+ "\xce\xd3\x77\x92\x56\x8c\x58\xf9\x37\x60\x2d\xe6\x8b\x66\xa3\xdd"
+ "\xd2\xf0\xf8\xda\x1b\x20\xbc\x9c\xec\x29\x5d\xd1\x8f\xcc\x37\xd1"
+ "\x3b\x8d\xb7\xc1\xe0\xb8\x3b\xef\x14\x1b\x87\xbc\xc1\x03\x9a\x93",
+ .c_size = 128,
+ .algo = OID_gost2012PKey512,
+ .m =
+ "\x11\x24\x21\x27\xf2\x42\x9f\xce\x5a\xf9\x01\x70\xe0\x07\x2b\x57"
+ "\xfb\x7d\x77\x5e\x74\x66\xe6\xa5\x40\x4c\x1a\x85\x18\xff\xd0\x63"
+ "\xe0\x39\xd3\xd6\xe5\x17\xf8\xc3\x4b\xc6\x1c\x33\x1a\xca\xa6\x66"
+ "\x6d\xf4\xd2\x45\xc2\x83\xa0\x42\x95\x05\x9d\x89\x8e\x0a\xca\xcc",
+ .m_size = 64,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ },
+};
+
+/*
* PKCS#1 RSA test vectors. Obtained from CAVS testing.
*/
static const struct akcipher_testvec pkcs1pad_rsa_tv_template[] = {
@@ -5634,7 +5803,49 @@ static const struct hash_testvec poly1305_tv_template[] = {
.psize = 80,
.digest = "\x13\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
- },
+ }, { /* Regression test for overflow in AVX2 implementation */
+ .plaintext = "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff",
+ .psize = 300,
+ .digest = "\xfb\x5e\x96\xd8\x61\xd5\xc7\xc8"
+ "\x78\xe5\x87\xcc\x2d\x5a\x22\xe1",
+ }
};
/* NHPoly1305 test vectors from https://github.com/google/adiantum */
@@ -7042,7 +7253,7 @@ static const struct cipher_testvec des_tv_template[] = {
"\xb4\x99\x26\xf7\x1f\xe1\xd4\x90",
.len = 24,
}, { /* Weak key */
- .fail = true,
+ .setkey_error = -EINVAL,
.wk = 1,
.key = "\x01\x01\x01\x01\x01\x01\x01\x01",
.klen = 8,
diff --git a/crypto/tgr192.c b/crypto/tgr192.c
index f8e1d9f9938f..40020f8adc46 100644
--- a/crypto/tgr192.c
+++ b/crypto/tgr192.c
@@ -677,7 +677,7 @@ MODULE_ALIAS_CRYPTO("tgr192");
MODULE_ALIAS_CRYPTO("tgr160");
MODULE_ALIAS_CRYPTO("tgr128");
-module_init(tgr192_mod_init);
+subsys_initcall(tgr192_mod_init);
module_exit(tgr192_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c
index 07e62433fbfb..dbac6e233285 100644
--- a/crypto/twofish_generic.c
+++ b/crypto/twofish_generic.c
@@ -205,7 +205,7 @@ static void __exit twofish_mod_fini(void)
crypto_unregister_alg(&alg);
}
-module_init(twofish_mod_init);
+subsys_initcall(twofish_mod_init);
module_exit(twofish_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/vmac.c b/crypto/vmac.c
index 5f436dfdfc61..f50a85060b39 100644
--- a/crypto/vmac.c
+++ b/crypto/vmac.c
@@ -690,7 +690,7 @@ static void __exit vmac_module_exit(void)
crypto_unregister_template(&vmac64_tmpl);
}
-module_init(vmac_module_init);
+subsys_initcall(vmac_module_init);
module_exit(vmac_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/wp512.c b/crypto/wp512.c
index 149e577fb772..1b8e502d999f 100644
--- a/crypto/wp512.c
+++ b/crypto/wp512.c
@@ -1168,7 +1168,7 @@ MODULE_ALIAS_CRYPTO("wp512");
MODULE_ALIAS_CRYPTO("wp384");
MODULE_ALIAS_CRYPTO("wp256");
-module_init(wp512_mod_init);
+subsys_initcall(wp512_mod_init);
module_exit(wp512_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
index c055f57fab11..94ca694ef091 100644
--- a/crypto/xcbc.c
+++ b/crypto/xcbc.c
@@ -282,7 +282,7 @@ static void __exit crypto_xcbc_module_exit(void)
crypto_unregister_template(&crypto_xcbc_tmpl);
}
-module_init(crypto_xcbc_module_init);
+subsys_initcall(crypto_xcbc_module_init);
module_exit(crypto_xcbc_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/xts.c b/crypto/xts.c
index 847f54f76789..33cf726df4ac 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -137,8 +137,12 @@ static void crypt_done(struct crypto_async_request *areq, int err)
{
struct skcipher_request *req = areq->data;
- if (!err)
+ if (!err) {
+ struct rctx *rctx = skcipher_request_ctx(req);
+
+ rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
err = xor_tweak_post(req);
+ }
skcipher_request_complete(req, err);
}
@@ -359,7 +363,7 @@ static void __exit crypto_module_exit(void)
crypto_unregister_template(&crypto_tmpl);
}
-module_init(crypto_module_init);
+subsys_initcall(crypto_module_init);
module_exit(crypto_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/zstd.c b/crypto/zstd.c
index 9a76b3ed8b8b..2c04055e407f 100644
--- a/crypto/zstd.c
+++ b/crypto/zstd.c
@@ -257,7 +257,7 @@ static void __exit zstd_mod_fini(void)
crypto_unregister_scomp(&scomp);
}
-module_init(zstd_mod_init);
+subsys_initcall(zstd_mod_init);
module_exit(zstd_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/drivers/Makefile b/drivers/Makefile
index bb15b9d0e793..c61cde554340 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -56,7 +56,7 @@ obj-y += tty/
obj-y += char/
# iommu/ comes before gpu as gpu are using iommu controllers
-obj-$(CONFIG_IOMMU_SUPPORT) += iommu/
+obj-y += iommu/
# gpu/ comes after char for AGP vs DRM startup and after iommu
obj-y += gpu/
diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c
index b58850389094..f92033661239 100644
--- a/drivers/acpi/acpi_configfs.c
+++ b/drivers/acpi/acpi_configfs.c
@@ -97,22 +97,22 @@ static ssize_t acpi_table_aml_read(struct config_item *cfg,
CONFIGFS_BIN_ATTR(acpi_table_, aml, NULL, MAX_ACPI_TABLE_SIZE);
-struct configfs_bin_attribute *acpi_table_bin_attrs[] = {
+static struct configfs_bin_attribute *acpi_table_bin_attrs[] = {
&acpi_table_attr_aml,
NULL,
};
-ssize_t acpi_table_signature_show(struct config_item *cfg, char *str)
+static ssize_t acpi_table_signature_show(struct config_item *cfg, char *str)
{
struct acpi_table_header *h = get_header(cfg);
if (!h)
return -EINVAL;
- return sprintf(str, "%.*s\n", ACPI_NAME_SIZE, h->signature);
+ return sprintf(str, "%.*s\n", ACPI_NAMESEG_SIZE, h->signature);
}
-ssize_t acpi_table_length_show(struct config_item *cfg, char *str)
+static ssize_t acpi_table_length_show(struct config_item *cfg, char *str)
{
struct acpi_table_header *h = get_header(cfg);
@@ -122,7 +122,7 @@ ssize_t acpi_table_length_show(struct config_item *cfg, char *str)
return sprintf(str, "%d\n", h->length);
}
-ssize_t acpi_table_revision_show(struct config_item *cfg, char *str)
+static ssize_t acpi_table_revision_show(struct config_item *cfg, char *str)
{
struct acpi_table_header *h = get_header(cfg);
@@ -132,7 +132,7 @@ ssize_t acpi_table_revision_show(struct config_item *cfg, char *str)
return sprintf(str, "%d\n", h->revision);
}
-ssize_t acpi_table_oem_id_show(struct config_item *cfg, char *str)
+static ssize_t acpi_table_oem_id_show(struct config_item *cfg, char *str)
{
struct acpi_table_header *h = get_header(cfg);
@@ -142,7 +142,7 @@ ssize_t acpi_table_oem_id_show(struct config_item *cfg, char *str)
return sprintf(str, "%.*s\n", ACPI_OEM_ID_SIZE, h->oem_id);
}
-ssize_t acpi_table_oem_table_id_show(struct config_item *cfg, char *str)
+static ssize_t acpi_table_oem_table_id_show(struct config_item *cfg, char *str)
{
struct acpi_table_header *h = get_header(cfg);
@@ -152,7 +152,7 @@ ssize_t acpi_table_oem_table_id_show(struct config_item *cfg, char *str)
return sprintf(str, "%.*s\n", ACPI_OEM_TABLE_ID_SIZE, h->oem_table_id);
}
-ssize_t acpi_table_oem_revision_show(struct config_item *cfg, char *str)
+static ssize_t acpi_table_oem_revision_show(struct config_item *cfg, char *str)
{
struct acpi_table_header *h = get_header(cfg);
@@ -162,18 +162,19 @@ ssize_t acpi_table_oem_revision_show(struct config_item *cfg, char *str)
return sprintf(str, "%d\n", h->oem_revision);
}
-ssize_t acpi_table_asl_compiler_id_show(struct config_item *cfg, char *str)
+static ssize_t acpi_table_asl_compiler_id_show(struct config_item *cfg,
+ char *str)
{
struct acpi_table_header *h = get_header(cfg);
if (!h)
return -EINVAL;
- return sprintf(str, "%.*s\n", ACPI_NAME_SIZE, h->asl_compiler_id);
+ return sprintf(str, "%.*s\n", ACPI_NAMESEG_SIZE, h->asl_compiler_id);
}
-ssize_t acpi_table_asl_compiler_revision_show(struct config_item *cfg,
- char *str)
+static ssize_t acpi_table_asl_compiler_revision_show(struct config_item *cfg,
+ char *str)
{
struct acpi_table_header *h = get_header(cfg);
@@ -192,7 +193,7 @@ CONFIGFS_ATTR_RO(acpi_table_, oem_revision);
CONFIGFS_ATTR_RO(acpi_table_, asl_compiler_id);
CONFIGFS_ATTR_RO(acpi_table_, asl_compiler_revision);
-struct configfs_attribute *acpi_table_attrs[] = {
+static struct configfs_attribute *acpi_table_attrs[] = {
&acpi_table_attr_signature,
&acpi_table_attr_length,
&acpi_table_attr_revision,
@@ -232,7 +233,7 @@ static void acpi_table_drop_item(struct config_group *group,
acpi_tb_unload_table(table->index);
}
-struct configfs_group_operations acpi_table_group_ops = {
+static struct configfs_group_operations acpi_table_group_ops = {
.make_item = acpi_table_make_item,
.drop_item = acpi_table_drop_item,
};
diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c
index 4a434c23a196..d18246a2a65e 100644
--- a/drivers/acpi/acpi_dbg.c
+++ b/drivers/acpi/acpi_dbg.c
@@ -390,7 +390,7 @@ again:
return size > 0 ? size : ret;
}
-static int acpi_aml_thread(void *unsed)
+static int acpi_aml_thread(void *unused)
{
acpi_osd_exec_callback function = NULL;
void *context;
diff --git a/drivers/acpi/acpi_lpat.c b/drivers/acpi/acpi_lpat.c
index 2cd9f738812b..43f1b99c86ca 100644
--- a/drivers/acpi/acpi_lpat.c
+++ b/drivers/acpi/acpi_lpat.c
@@ -22,7 +22,7 @@
* LPAT conversion table
*
* @lpat_table: the temperature_raw mapping table structure
- * @raw: the raw value, used as a key to get the temerature from the
+ * @raw: the raw value, used as a key to get the temperature from the
* above mapping table
*
* A positive converted temperature value will be returned on success,
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 5f94c35d165f..cf768608437e 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -18,7 +18,7 @@
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
-#include <linux/platform_data/clk-lpss.h>
+#include <linux/platform_data/x86/clk-lpss.h>
#include <linux/platform_data/x86/pmc_atom.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
@@ -1142,8 +1142,8 @@ static struct dev_pm_domain acpi_lpss_pm_domain = {
.thaw_noirq = acpi_subsys_thaw_noirq,
.poweroff = acpi_subsys_suspend,
.poweroff_late = acpi_lpss_suspend_late,
- .poweroff_noirq = acpi_subsys_suspend_noirq,
- .restore_noirq = acpi_subsys_resume_noirq,
+ .poweroff_noirq = acpi_lpss_suspend_noirq,
+ .restore_noirq = acpi_lpss_resume_noirq,
.restore_early = acpi_lpss_resume_early,
#endif
.runtime_suspend = acpi_lpss_runtime_suspend,
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index a2dfbf6b004e..13d513b81589 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -293,7 +293,7 @@ acpi_status (*acpi_internal_method) (struct acpi_walk_state * walk_state);
* expected_return_btypes - Allowed type(s) for the return value
*/
struct acpi_name_info {
- char name[ACPI_NAME_SIZE];
+ char name[ACPI_NAMESEG_SIZE];
u16 argument_list;
u8 expected_btypes;
};
@@ -370,7 +370,7 @@ typedef acpi_status (*acpi_object_converter) (struct acpi_namespace_node *
converted_object);
struct acpi_simple_repair_info {
- char name[ACPI_NAME_SIZE];
+ char name[ACPI_NAMESEG_SIZE];
u32 unexpected_btypes;
u32 package_index;
acpi_object_converter object_converter;
diff --git a/drivers/acpi/acpica/dbexec.c b/drivers/acpi/acpica/dbexec.c
index bb43305cb215..4027eaab18a4 100644
--- a/drivers/acpi/acpica/dbexec.c
+++ b/drivers/acpi/acpica/dbexec.c
@@ -453,7 +453,7 @@ acpi_db_execute(char *name, char **args, acpi_object_type *types, u32 flags)
/* Dump a _PLD buffer if present */
- if (ACPI_COMPARE_NAME
+ if (ACPI_COMPARE_NAMESEG
((ACPI_CAST_PTR
(struct acpi_namespace_node,
acpi_gbl_db_method_info.method)->name.ascii),
diff --git a/drivers/acpi/acpica/dbnames.c b/drivers/acpi/acpica/dbnames.c
index 004d34d9369b..63fe30e86807 100644
--- a/drivers/acpi/acpica/dbnames.c
+++ b/drivers/acpi/acpica/dbnames.c
@@ -354,7 +354,7 @@ acpi_status acpi_db_find_name_in_namespace(char *name_arg)
char acpi_name[5] = "____";
char *acpi_name_ptr = acpi_name;
- if (strlen(name_arg) > ACPI_NAME_SIZE) {
+ if (strlen(name_arg) > ACPI_NAMESEG_SIZE) {
acpi_os_printf("Name must be no longer than 4 characters\n");
return (AE_OK);
}
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index a4a24ffe5fae..4ebd23700bbc 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -200,7 +200,7 @@ acpi_ds_initialize_objects(u32 table_index,
/* DSDT is always the first AML table */
- if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT)) {
+ if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_DSDT)) {
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
"\nInitializing Namespace objects:\n"));
}
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index c92d2f6ebe01..b04f982e59fa 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -292,7 +292,7 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
acpi_status status;
u32 gpe_number;
u8 temp_gpe_number;
- char name[ACPI_NAME_SIZE + 1];
+ char name[ACPI_NAMESEG_SIZE + 1];
u8 type;
ACPI_FUNCTION_TRACE(ev_match_gpe_method);
@@ -310,7 +310,7 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
* 1) Extract the method name and null terminate it
*/
ACPI_MOVE_32_TO_32(name, &method_node->name.integer);
- name[ACPI_NAME_SIZE] = 0;
+ name[ACPI_NAMESEG_SIZE] = 0;
/* 2) Name must begin with an underscore */
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index bd68d66e89f0..6b76be5212a4 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -53,10 +53,10 @@ static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs)
/* Special case for root */
- size_needed = 1 + (ACPI_NAME_SIZE * num_name_segs) + 2 + 1;
+ size_needed = 1 + (ACPI_NAMESEG_SIZE * num_name_segs) + 2 + 1;
} else {
size_needed =
- prefix_count + (ACPI_NAME_SIZE * num_name_segs) + 2 + 1;
+ prefix_count + (ACPI_NAMESEG_SIZE * num_name_segs) + 2 + 1;
}
/*
@@ -141,7 +141,7 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
}
for (index = 0;
- (index < ACPI_NAME_SIZE)
+ (index < ACPI_NAMESEG_SIZE)
&& (acpi_ut_valid_name_char(*aml_address, 0)); index++) {
char_buf[index] = *aml_address++;
}
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index 75192b958544..7b855603f81a 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -683,7 +683,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
/* Point to next name segment and make this node current */
- path += ACPI_NAME_SIZE;
+ path += ACPI_NAMESEG_SIZE;
current_node = this_node;
}
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index 5470213b8e64..6eb63db72249 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -74,6 +74,10 @@ void acpi_ns_delete_node(struct acpi_namespace_node *node)
ACPI_FUNCTION_NAME(ns_delete_node);
+ if (!node) {
+ return_VOID;
+ }
+
/* Detach an object if there is one */
acpi_ns_detach_object(node);
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 15070bd0c28a..1b12c172e115 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -70,7 +70,7 @@ void acpi_ns_print_pathname(u32 num_segments, const char *pathname)
acpi_os_printf("?");
}
- pathname += ACPI_NAME_SIZE;
+ pathname += ACPI_NAMESEG_SIZE;
num_segments--;
if (num_segments) {
acpi_os_printf(".");
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 19fb8dda870f..53e5d00d3a5e 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -478,7 +478,7 @@ acpi_ns_find_ini_methods(acpi_handle obj_handle,
/* We are only looking for methods named _INI */
- if (!ACPI_COMPARE_NAME(node->name.ascii, METHOD_NAME__INI)) {
+ if (!ACPI_COMPARE_NAMESEG(node->name.ascii, METHOD_NAME__INI)) {
return (AE_OK);
}
@@ -641,7 +641,7 @@ acpi_ns_init_one_device(acpi_handle obj_handle,
* Note: We know there is an _INI within this subtree, but it may not be
* under this particular device, it may be lower in the branch.
*/
- if (!ACPI_COMPARE_NAME(device_node->name.ascii, "_SB_") ||
+ if (!ACPI_COMPARE_NAMESEG(device_node->name.ascii, "_SB_") ||
device_node->parent != acpi_gbl_root_node) {
ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
(ACPI_TYPE_METHOD, device_node,
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index 289c15bb8c6a..370bbc867745 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -108,8 +108,8 @@ acpi_ns_handle_to_name(acpi_handle target_handle, struct acpi_buffer *buffer)
/* Just copy the ACPI name from the Node and zero terminate it */
node_name = acpi_ut_get_node_name(node);
- ACPI_MOVE_NAME(buffer->pointer, node_name);
- ((char *)buffer->pointer)[ACPI_NAME_SIZE] = 0;
+ ACPI_COPY_NAMESEG(buffer->pointer, node_name);
+ ((char *)buffer->pointer)[ACPI_NAMESEG_SIZE] = 0;
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%4.4s\n", (char *)buffer->pointer));
return_ACPI_STATUS(AE_OK);
@@ -198,7 +198,7 @@ acpi_ns_build_normalized_path(struct acpi_namespace_node *node,
char *full_path, u32 path_size, u8 no_trailing)
{
u32 length = 0, i;
- char name[ACPI_NAME_SIZE];
+ char name[ACPI_NAMESEG_SIZE];
u8 do_no_trailing;
char c, *left, *right;
struct acpi_namespace_node *next_node;
@@ -446,7 +446,7 @@ static void acpi_ns_normalize_pathname(char *original_path)
/* Do one nameseg at a time */
- for (i = 0; (i < ACPI_NAME_SIZE) && *input_path; i++) {
+ for (i = 0; (i < ACPI_NAMESEG_SIZE) && *input_path; i++) {
if ((i == 0) || (*input_path != '_')) { /* First char is allowed to be underscore */
*new_path = *input_path;
new_path++;
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index 8638f43cfc3d..79d86da1c892 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -186,6 +186,10 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node)
}
}
+ if (obj_desc->common.type == ACPI_TYPE_REGION) {
+ acpi_ut_remove_address_range(obj_desc->region.space_id, node);
+ }
+
/* Clear the Node entry in all cases */
node->object = NULL;
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index c0b4f7bedfab..f16cf5e4742c 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -203,7 +203,7 @@ acpi_ns_one_complete_parse(u32 pass_number,
/* Found OSDT table, enable the namespace override feature */
- if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_OSDT) &&
+ if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_OSDT) &&
pass_number == ACPI_IMODE_LOAD_PASS1) {
walk_state->namespace_override = TRUE;
}
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 0aacfa48e20d..be86fea8e4d4 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -316,7 +316,7 @@ static const struct acpi_simple_repair_info *acpi_ns_match_simple_repair(struct
this_name = acpi_object_repair_info;
while (this_name->object_converter) {
- if (ACPI_COMPARE_NAME(node->name.ascii, this_name->name)) {
+ if (ACPI_COMPARE_NAMESEG(node->name.ascii, this_name->name)) {
/* Check if we can actually repair this name/type combination */
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index d5804a6d1d65..8d776256b213 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -25,7 +25,7 @@ acpi_status (*acpi_repair_function) (struct acpi_evaluate_info * info,
return_object_ptr);
typedef struct acpi_repair_info {
- char name[ACPI_NAME_SIZE];
+ char name[ACPI_NAMESEG_SIZE];
acpi_repair_function repair_function;
} acpi_repair_info;
@@ -188,7 +188,7 @@ static const struct acpi_repair_info *acpi_ns_match_complex_repair(struct
this_name = acpi_ns_repairable_names;
while (this_name->repair_function) {
- if (ACPI_COMPARE_NAME(node->name.ascii, this_name->name)) {
+ if (ACPI_COMPARE_NAMESEG(node->name.ascii, this_name->name)) {
return (this_name);
}
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index e5cef1edf49f..6bc90d46db5c 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -178,7 +178,7 @@ void acpi_ns_get_internal_name_length(struct acpi_namestring_info *info)
}
}
- info->length = (ACPI_NAME_SIZE * info->num_segments) +
+ info->length = (ACPI_NAMESEG_SIZE * info->num_segments) +
4 + info->num_carats;
info->next_external_char = next_external_char;
@@ -249,7 +249,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
/* Build the name (minus path separators) */
for (; num_segments; num_segments--) {
- for (i = 0; i < ACPI_NAME_SIZE; i++) {
+ for (i = 0; i < ACPI_NAMESEG_SIZE; i++) {
if (ACPI_IS_PATH_SEPARATOR(*external_name) ||
(*external_name == 0)) {
@@ -274,7 +274,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
/* Move on the next segment */
external_name++;
- result += ACPI_NAME_SIZE;
+ result += ACPI_NAMESEG_SIZE;
}
/* Terminate the string */
@@ -489,12 +489,12 @@ acpi_ns_externalize_name(u32 internal_name_length,
/* Copy and validate the 4-char name segment */
- ACPI_MOVE_NAME(&(*converted_name)[j],
- &internal_name[names_index]);
+ ACPI_COPY_NAMESEG(&(*converted_name)[j],
+ &internal_name[names_index]);
acpi_ut_repair_name(&(*converted_name)[j]);
- j += ACPI_NAME_SIZE;
- names_index += ACPI_NAME_SIZE;
+ j += ACPI_NAMESEG_SIZE;
+ names_index += ACPI_NAMESEG_SIZE;
}
}
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index de2d3135d6a9..55b4a5b3331f 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -495,8 +495,8 @@ acpi_status acpi_install_method(u8 *buffer)
/* Table must be a DSDT or SSDT */
- if (!ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT) &&
- !ACPI_COMPARE_NAME(table->signature, ACPI_SIG_SSDT)) {
+ if (!ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_DSDT) &&
+ !ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_SSDT)) {
return (AE_BAD_HEADER);
}
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 9d9d442cd999..e62c7897fdf1 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -150,21 +150,21 @@ char *acpi_ps_get_next_namestring(struct acpi_parse_state *parser_state)
/* Two name segments */
- end += 1 + (2 * ACPI_NAME_SIZE);
+ end += 1 + (2 * ACPI_NAMESEG_SIZE);
break;
case AML_MULTI_NAME_PREFIX:
/* Multiple name segments, 4 chars each, count in next byte */
- end += 2 + (*(end + 1) * ACPI_NAME_SIZE);
+ end += 2 + (*(end + 1) * ACPI_NAMESEG_SIZE);
break;
default:
/* Single name segment */
- end += ACPI_NAME_SIZE;
+ end += ACPI_NAMESEG_SIZE;
break;
}
@@ -522,7 +522,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
ACPI_MOVE_32_TO_32(&name, parser_state->aml);
acpi_ps_set_name(field, name);
- parser_state->aml += ACPI_NAME_SIZE;
+ parser_state->aml += ACPI_NAMESEG_SIZE;
ASL_CV_CAPTURE_COMMENTS_ONLY(parser_state);
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 1d6f136e4068..c62be3d91712 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -603,10 +603,10 @@ acpi_walk_resources(acpi_handle device_handle,
/* Parameter validation */
if (!device_handle || !user_function || !name ||
- (!ACPI_COMPARE_NAME(name, METHOD_NAME__CRS) &&
- !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS) &&
- !ACPI_COMPARE_NAME(name, METHOD_NAME__AEI) &&
- !ACPI_COMPARE_NAME(name, METHOD_NAME__DMA))) {
+ (!ACPI_COMPARE_NAMESEG(name, METHOD_NAME__CRS) &&
+ !ACPI_COMPARE_NAMESEG(name, METHOD_NAME__PRS) &&
+ !ACPI_COMPARE_NAMESEG(name, METHOD_NAME__AEI) &&
+ !ACPI_COMPARE_NAMESEG(name, METHOD_NAME__DMA))) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index 0cecd0039acf..933f81316ad2 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -480,7 +480,8 @@ acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc,
/* If a particular signature is expected (DSDT/FACS), it must match */
- if (signature && !ACPI_COMPARE_NAME(&table_desc->signature, signature)) {
+ if (signature &&
+ !ACPI_COMPARE_NAMESEG(&table_desc->signature, signature)) {
ACPI_BIOS_ERROR((AE_INFO,
"Invalid signature 0x%X for ACPI table, expected [%s]",
table_desc->signature.integer, signature));
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index 951bd8e1c50a..b2abb40023a6 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -56,7 +56,7 @@ acpi_tb_find_table(char *signature,
/* Normalize the input strings */
memset(&header, 0, sizeof(struct acpi_table_header));
- ACPI_MOVE_NAME(header.signature, signature);
+ ACPI_COPY_NAMESEG(header.signature, signature);
strncpy(header.oem_id, oem_id, ACPI_OEM_ID_SIZE);
strncpy(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
@@ -65,7 +65,7 @@ acpi_tb_find_table(char *signature,
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
if (memcmp(&(acpi_gbl_root_table_list.tables[i].signature),
- header.signature, ACPI_NAME_SIZE)) {
+ header.signature, ACPI_NAMESEG_SIZE)) {
/* Not the requested table */
@@ -94,14 +94,14 @@ acpi_tb_find_table(char *signature,
if (!memcmp
(acpi_gbl_root_table_list.tables[i].pointer->signature,
- header.signature, ACPI_NAME_SIZE) && (!oem_id[0]
- ||
- !memcmp
- (acpi_gbl_root_table_list.
- tables[i].pointer->
- oem_id,
- header.oem_id,
- ACPI_OEM_ID_SIZE))
+ header.signature, ACPI_NAMESEG_SIZE) && (!oem_id[0]
+ ||
+ !memcmp
+ (acpi_gbl_root_table_list.
+ tables[i].
+ pointer->oem_id,
+ header.oem_id,
+ ACPI_OEM_ID_SIZE))
&& (!oem_table_id[0]
|| !memcmp(acpi_gbl_root_table_list.tables[i].pointer->
oem_table_id, header.oem_table_id,
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index be6642bf6366..ef1ffd36ab3f 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -120,7 +120,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
*/
if (!reload &&
acpi_gbl_disable_ssdt_table_install &&
- ACPI_COMPARE_NAME(&new_table_desc.signature, ACPI_SIG_SSDT)) {
+ ACPI_COMPARE_NAMESEG(&new_table_desc.signature, ACPI_SIG_SSDT)) {
ACPI_INFO(("Ignoring installation of %4.4s at %8.8X%8.8X",
new_table_desc.signature.ascii,
ACPI_FORMAT_UINT64(address)));
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index 9b5df95d881b..4764f849cb78 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -69,10 +69,10 @@ acpi_tb_cleanup_table_header(struct acpi_table_header *out_header,
memcpy(out_header, header, sizeof(struct acpi_table_header));
- acpi_tb_fix_string(out_header->signature, ACPI_NAME_SIZE);
+ acpi_tb_fix_string(out_header->signature, ACPI_NAMESEG_SIZE);
acpi_tb_fix_string(out_header->oem_id, ACPI_OEM_ID_SIZE);
acpi_tb_fix_string(out_header->oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
- acpi_tb_fix_string(out_header->asl_compiler_id, ACPI_NAME_SIZE);
+ acpi_tb_fix_string(out_header->asl_compiler_id, ACPI_NAMESEG_SIZE);
}
/*******************************************************************************
@@ -94,7 +94,7 @@ acpi_tb_print_table_header(acpi_physical_address address,
{
struct acpi_table_header local_header;
- if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_FACS)) {
+ if (ACPI_COMPARE_NAMESEG(header->signature, ACPI_SIG_FACS)) {
/* FACS only has signature and length fields */
@@ -158,8 +158,8 @@ acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length)
* They are the odd tables, have no standard ACPI header and no checksum
*/
- if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_S3PT) ||
- ACPI_COMPARE_NAME(table->signature, ACPI_SIG_FACS)) {
+ if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_S3PT) ||
+ ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_FACS)) {
return (AE_OK);
}
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 2469e01310e2..c5f0b8ec70cc 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -332,9 +332,9 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
&table_index);
if (ACPI_SUCCESS(status) &&
- ACPI_COMPARE_NAME(&acpi_gbl_root_table_list.
- tables[table_index].signature,
- ACPI_SIG_FADT)) {
+ ACPI_COMPARE_NAMESEG(&acpi_gbl_root_table_list.
+ tables[table_index].signature,
+ ACPI_SIG_FADT)) {
acpi_gbl_fadt_index = table_index;
acpi_tb_parse_fadt();
}
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 36592888f0e7..1640685bf4ae 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -230,7 +230,7 @@ acpi_get_table_header(char *signature,
for (i = 0, j = 0; i < acpi_gbl_root_table_list.current_table_count;
i++) {
- if (!ACPI_COMPARE_NAME
+ if (!ACPI_COMPARE_NAMESEG
(&(acpi_gbl_root_table_list.tables[i].signature),
signature)) {
continue;
@@ -323,7 +323,7 @@ acpi_get_table(char *signature,
i++) {
table_desc = &acpi_gbl_root_table_list.tables[i];
- if (!ACPI_COMPARE_NAME(&table_desc->signature, signature)) {
+ if (!ACPI_COMPARE_NAMESEG(&table_desc->signature, signature)) {
continue;
}
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 1a2592cc3245..4f30f06a6f78 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -118,7 +118,7 @@ acpi_status acpi_tb_load_namespace(void)
table = &acpi_gbl_root_table_list.tables[acpi_gbl_dsdt_index];
if (!acpi_gbl_root_table_list.current_table_count ||
- !ACPI_COMPARE_NAME(table->signature.ascii, ACPI_SIG_DSDT) ||
+ !ACPI_COMPARE_NAMESEG(table->signature.ascii, ACPI_SIG_DSDT) ||
ACPI_FAILURE(acpi_tb_validate_table(table))) {
status = AE_NO_ACPI_TABLES;
goto unlock_and_exit;
@@ -170,11 +170,12 @@ acpi_status acpi_tb_load_namespace(void)
table = &acpi_gbl_root_table_list.tables[i];
if (!table->address ||
- (!ACPI_COMPARE_NAME(table->signature.ascii, ACPI_SIG_SSDT)
- && !ACPI_COMPARE_NAME(table->signature.ascii,
- ACPI_SIG_PSDT)
- && !ACPI_COMPARE_NAME(table->signature.ascii,
- ACPI_SIG_OSDT))
+ (!ACPI_COMPARE_NAMESEG
+ (table->signature.ascii, ACPI_SIG_SSDT)
+ && !ACPI_COMPARE_NAMESEG(table->signature.ascii,
+ ACPI_SIG_PSDT)
+ && !ACPI_COMPARE_NAMESEG(table->signature.ascii,
+ ACPI_SIG_OSDT))
|| ACPI_FAILURE(acpi_tb_validate_table(table))) {
continue;
}
@@ -364,7 +365,7 @@ acpi_status acpi_unload_parent_table(acpi_handle object)
* only these types can contain AML and thus are the only types
* that can create namespace objects.
*/
- if (ACPI_COMPARE_NAME
+ if (ACPI_COMPARE_NAMESEG
(acpi_gbl_root_table_list.tables[i].signature.ascii,
ACPI_SIG_DSDT)) {
status = AE_TYPE;
diff --git a/drivers/acpi/acpica/utascii.c b/drivers/acpi/acpica/utascii.c
index 79d7426fd7bf..f6cd7d4f698b 100644
--- a/drivers/acpi/acpica/utascii.c
+++ b/drivers/acpi/acpica/utascii.c
@@ -30,7 +30,7 @@ u8 acpi_ut_valid_nameseg(char *name)
/* Validate each character in the signature */
- for (i = 0; i < ACPI_NAME_SIZE; i++) {
+ for (i = 0; i < ACPI_NAMESEG_SIZE; i++) {
if (!acpi_ut_valid_name_char(name[i], i)) {
return (FALSE);
}
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index ad9f77eb554f..65beaa237669 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -78,7 +78,7 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
"IPMI", /* 0x07 */
"GeneralPurposeIo", /* 0x08 */
"GenericSerialBus", /* 0x09 */
- "PCC" /* 0x0A */
+ "PlatformCommChannel" /* 0x0A */
};
const char *acpi_ut_get_region_name(u8 space_id)
@@ -239,7 +239,7 @@ const char *acpi_ut_get_node_name(void *object)
{
struct acpi_namespace_node *node = (struct acpi_namespace_node *)object;
- /* Must return a string of exactly 4 characters == ACPI_NAME_SIZE */
+ /* Must return a string of exactly 4 characters == ACPI_NAMESEG_SIZE */
if (!object) {
return ("NULL");
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index afaadc73196b..8638efacdbf4 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -59,10 +59,10 @@ u8 acpi_ut_is_aml_table(struct acpi_table_header *table)
/* These are the only tables that contain executable AML */
- if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT) ||
- ACPI_COMPARE_NAME(table->signature, ACPI_SIG_PSDT) ||
- ACPI_COMPARE_NAME(table->signature, ACPI_SIG_SSDT) ||
- ACPI_COMPARE_NAME(table->signature, ACPI_SIG_OSDT) ||
+ if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_DSDT) ||
+ ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_PSDT) ||
+ ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_SSDT) ||
+ ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_OSDT) ||
ACPI_IS_OEM_SIG(table->signature)) {
return (TRUE);
}
diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c
index a9f08f43c685..1b0f68f5ed8c 100644
--- a/drivers/acpi/acpica/utpredef.c
+++ b/drivers/acpi/acpica/utpredef.c
@@ -84,7 +84,7 @@ const union acpi_predefined_info *acpi_ut_match_predefined_method(char *name)
this_name = acpi_gbl_predefined_methods;
while (this_name->info.name[0]) {
- if (ACPI_COMPARE_NAME(name, this_name->info.name)) {
+ if (ACPI_COMPARE_NAMESEG(name, this_name->info.name)) {
return (this_name);
}
@@ -201,7 +201,7 @@ const union acpi_predefined_info *acpi_ut_match_resource_name(char *name)
this_name = acpi_gbl_resource_names;
while (this_name->info.name[0]) {
- if (ACPI_COMPARE_NAME(name, this_name->info.name)) {
+ if (ACPI_COMPARE_NAMESEG(name, this_name->info.name)) {
return (this_name);
}
diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c
index 5bef0b059406..c39b5483045d 100644
--- a/drivers/acpi/acpica/utstring.c
+++ b/drivers/acpi/acpica/utstring.c
@@ -141,15 +141,15 @@ void acpi_ut_repair_name(char *name)
* Special case for the root node. This can happen if we get an
* error during the execution of module-level code.
*/
- if (ACPI_COMPARE_NAME(name, ACPI_ROOT_PATHNAME)) {
+ if (ACPI_COMPARE_NAMESEG(name, ACPI_ROOT_PATHNAME)) {
return;
}
- ACPI_MOVE_NAME(&original_name, name);
+ ACPI_COPY_NAMESEG(&original_name, name);
/* Check each character in the name */
- for (i = 0; i < ACPI_NAME_SIZE; i++) {
+ for (i = 0; i < ACPI_NAMESEG_SIZE; i++) {
if (acpi_ut_valid_name_char(name[i], i)) {
continue;
}
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index e48894e002ba..adbf7cbedf80 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -356,7 +356,8 @@ static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX ||
- node->type == ACPI_IORT_NODE_SMMU_V3) {
+ node->type == ACPI_IORT_NODE_SMMU_V3 ||
+ node->type == ACPI_IORT_NODE_PMCG) {
*id_out = map->output_base;
return parent;
}
@@ -394,6 +395,8 @@ static int iort_get_id_mapping_index(struct acpi_iort_node *node)
}
return smmu->id_mapping_index;
+ case ACPI_IORT_NODE_PMCG:
+ return 0;
default:
return -EINVAL;
}
@@ -1218,32 +1221,47 @@ static void __init arm_smmu_v3_init_resources(struct resource *res,
}
}
-static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node)
+static void __init arm_smmu_v3_dma_configure(struct device *dev,
+ struct acpi_iort_node *node)
{
struct acpi_iort_smmu_v3 *smmu;
+ enum dev_dma_attr attr;
/* Retrieve SMMUv3 specific data */
smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
- return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE;
+ attr = (smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) ?
+ DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
+
+ /* We expect the dma masks to be equivalent for all SMMUv3 set-ups */
+ dev->dma_mask = &dev->coherent_dma_mask;
+
+ /* Configure DMA for the page table walker */
+ acpi_dma_configure(dev, attr);
}
#if defined(CONFIG_ACPI_NUMA)
/*
* set numa proximity domain for smmuv3 device
*/
-static void __init arm_smmu_v3_set_proximity(struct device *dev,
+static int __init arm_smmu_v3_set_proximity(struct device *dev,
struct acpi_iort_node *node)
{
struct acpi_iort_smmu_v3 *smmu;
smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
- set_dev_node(dev, acpi_map_pxm_to_node(smmu->pxm));
+ int node = acpi_map_pxm_to_node(smmu->pxm);
+
+ if (node != NUMA_NO_NODE && !node_online(node))
+ return -EINVAL;
+
+ set_dev_node(dev, node);
pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
smmu->base_address,
smmu->pxm);
}
+ return 0;
}
#else
#define arm_smmu_v3_set_proximity NULL
@@ -1301,30 +1319,96 @@ static void __init arm_smmu_init_resources(struct resource *res,
}
}
-static bool __init arm_smmu_is_coherent(struct acpi_iort_node *node)
+static void __init arm_smmu_dma_configure(struct device *dev,
+ struct acpi_iort_node *node)
{
struct acpi_iort_smmu *smmu;
+ enum dev_dma_attr attr;
/* Retrieve SMMU specific data */
smmu = (struct acpi_iort_smmu *)node->node_data;
- return smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK;
+ attr = (smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) ?
+ DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
+
+ /* We expect the dma masks to be equivalent for SMMU set-ups */
+ dev->dma_mask = &dev->coherent_dma_mask;
+
+ /* Configure DMA for the page table walker */
+ acpi_dma_configure(dev, attr);
+}
+
+static int __init arm_smmu_v3_pmcg_count_resources(struct acpi_iort_node *node)
+{
+ struct acpi_iort_pmcg *pmcg;
+
+ /* Retrieve PMCG specific data */
+ pmcg = (struct acpi_iort_pmcg *)node->node_data;
+
+ /*
+ * There are always 2 memory resources.
+ * If the overflow_gsiv is present then add that for a total of 3.
+ */
+ return pmcg->overflow_gsiv ? 3 : 2;
+}
+
+static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res,
+ struct acpi_iort_node *node)
+{
+ struct acpi_iort_pmcg *pmcg;
+
+ /* Retrieve PMCG specific data */
+ pmcg = (struct acpi_iort_pmcg *)node->node_data;
+
+ res[0].start = pmcg->page0_base_address;
+ res[0].end = pmcg->page0_base_address + SZ_4K - 1;
+ res[0].flags = IORESOURCE_MEM;
+ res[1].start = pmcg->page1_base_address;
+ res[1].end = pmcg->page1_base_address + SZ_4K - 1;
+ res[1].flags = IORESOURCE_MEM;
+
+ if (pmcg->overflow_gsiv)
+ acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow",
+ ACPI_EDGE_SENSITIVE, &res[2]);
+}
+
+static struct acpi_platform_list pmcg_plat_info[] __initdata = {
+ /* HiSilicon Hip08 Platform */
+ {"HISI ", "HIP08 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
+ "Erratum #162001800", IORT_SMMU_V3_PMCG_HISI_HIP08},
+ { }
+};
+
+static int __init arm_smmu_v3_pmcg_add_platdata(struct platform_device *pdev)
+{
+ u32 model;
+ int idx;
+
+ idx = acpi_match_platform_list(pmcg_plat_info);
+ if (idx >= 0)
+ model = pmcg_plat_info[idx].data;
+ else
+ model = IORT_SMMU_V3_PMCG_GENERIC;
+
+ return platform_device_add_data(pdev, &model, sizeof(model));
}
struct iort_dev_config {
const char *name;
int (*dev_init)(struct acpi_iort_node *node);
- bool (*dev_is_coherent)(struct acpi_iort_node *node);
+ void (*dev_dma_configure)(struct device *dev,
+ struct acpi_iort_node *node);
int (*dev_count_resources)(struct acpi_iort_node *node);
void (*dev_init_resources)(struct resource *res,
struct acpi_iort_node *node);
- void (*dev_set_proximity)(struct device *dev,
+ int (*dev_set_proximity)(struct device *dev,
struct acpi_iort_node *node);
+ int (*dev_add_platdata)(struct platform_device *pdev);
};
static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = {
.name = "arm-smmu-v3",
- .dev_is_coherent = arm_smmu_v3_is_coherent,
+ .dev_dma_configure = arm_smmu_v3_dma_configure,
.dev_count_resources = arm_smmu_v3_count_resources,
.dev_init_resources = arm_smmu_v3_init_resources,
.dev_set_proximity = arm_smmu_v3_set_proximity,
@@ -1332,9 +1416,16 @@ static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = {
static const struct iort_dev_config iort_arm_smmu_cfg __initconst = {
.name = "arm-smmu",
- .dev_is_coherent = arm_smmu_is_coherent,
+ .dev_dma_configure = arm_smmu_dma_configure,
.dev_count_resources = arm_smmu_count_resources,
- .dev_init_resources = arm_smmu_init_resources
+ .dev_init_resources = arm_smmu_init_resources,
+};
+
+static const struct iort_dev_config iort_arm_smmu_v3_pmcg_cfg __initconst = {
+ .name = "arm-smmu-v3-pmcg",
+ .dev_count_resources = arm_smmu_v3_pmcg_count_resources,
+ .dev_init_resources = arm_smmu_v3_pmcg_init_resources,
+ .dev_add_platdata = arm_smmu_v3_pmcg_add_platdata,
};
static __init const struct iort_dev_config *iort_get_dev_cfg(
@@ -1345,6 +1436,8 @@ static __init const struct iort_dev_config *iort_get_dev_cfg(
return &iort_arm_smmu_v3_cfg;
case ACPI_IORT_NODE_SMMU:
return &iort_arm_smmu_cfg;
+ case ACPI_IORT_NODE_PMCG:
+ return &iort_arm_smmu_v3_pmcg_cfg;
default:
return NULL;
}
@@ -1362,15 +1455,17 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node,
struct fwnode_handle *fwnode;
struct platform_device *pdev;
struct resource *r;
- enum dev_dma_attr attr;
int ret, count;
pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO);
if (!pdev)
return -ENOMEM;
- if (ops->dev_set_proximity)
- ops->dev_set_proximity(&pdev->dev, node);
+ if (ops->dev_set_proximity) {
+ ret = ops->dev_set_proximity(&pdev->dev, node);
+ if (ret)
+ goto dev_put;
+ }
count = ops->dev_count_resources(node);
@@ -1393,19 +1488,19 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node,
goto dev_put;
/*
- * Add a copy of IORT node pointer to platform_data to
- * be used to retrieve IORT data information.
+ * Platform devices based on PMCG nodes uses platform_data to
+ * pass the hardware model info to the driver. For others, add
+ * a copy of IORT node pointer to platform_data to be used to
+ * retrieve IORT data information.
*/
- ret = platform_device_add_data(pdev, &node, sizeof(node));
+ if (ops->dev_add_platdata)
+ ret = ops->dev_add_platdata(pdev);
+ else
+ ret = platform_device_add_data(pdev, &node, sizeof(node));
+
if (ret)
goto dev_put;
- /*
- * We expect the dma masks to be equivalent for
- * all SMMUs set-ups
- */
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-
fwnode = iort_get_fwnode(node);
if (!fwnode) {
@@ -1415,11 +1510,8 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node,
pdev->dev.fwnode = fwnode;
- attr = ops->dev_is_coherent && ops->dev_is_coherent(node) ?
- DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
-
- /* Configure DMA for the page table walker */
- acpi_dma_configure(&pdev->dev, attr);
+ if (ops->dev_dma_configure)
+ ops->dev_dma_configure(&pdev->dev, node);
iort_set_device_domain(&pdev->dev, node);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 6ecbbabf1233..eec263c9019e 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1043,9 +1043,6 @@ void __init acpi_early_init(void)
acpi_permanent_mmap = true;
- /* Initialize debug output. Linux does not use ACPICA defaults */
- acpi_dbg_level = ACPI_LV_INFO | ACPI_LV_REPAIR;
-
#ifdef CONFIG_X86
/*
* If the machine falls into the DMI check table,
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index a19ff3977ac4..623998a8d722 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -456,8 +456,11 @@ static int acpi_button_resume(struct device *dev)
struct acpi_button *button = acpi_driver_data(device);
button->suspended = false;
- if (button->type == ACPI_BUTTON_TYPE_LID && button->input->users)
+ if (button->type == ACPI_BUTTON_TYPE_LID && button->input->users) {
+ button->last_state = !!acpi_lid_evaluate_state(device);
+ button->last_time = ktime_get();
acpi_lid_initialize_state(device);
+ }
return 0;
}
#endif
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 1b207fca1420..653642a4cbdd 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -81,9 +81,9 @@ struct cppc_pcc_data {
int refcount;
};
-/* Array to represent the PCC channel per subspace id */
+/* Array to represent the PCC channel per subspace ID */
static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
-/* The cpu_pcc_subspace_idx containsper CPU subspace id */
+/* The cpu_pcc_subspace_idx contains per CPU subspace ID */
static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
/*
@@ -436,7 +436,7 @@ int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data)
return -ENOMEM;
/*
- * Now that we have _PSD data from all CPUs, lets setup P-state
+ * Now that we have _PSD data from all CPUs, let's setup P-state
* domain info.
*/
for_each_possible_cpu(i) {
@@ -588,7 +588,7 @@ static int register_pcc_channel(int pcc_ss_idx)
return -ENOMEM;
}
- /* Set flag so that we dont come here for each CPU. */
+ /* Set flag so that we don't come here for each CPU. */
pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
}
@@ -613,7 +613,7 @@ bool __weak cpc_ffh_supported(void)
*
* Check and allocate the cppc_pcc_data memory.
* In some processor configurations it is possible that same subspace
- * is shared between multiple CPU's. This is seen especially in CPU's
+ * is shared between multiple CPUs. This is seen especially in CPUs
* with hardware multi-threading support.
*
* Return: 0 for success, errno for failure
@@ -711,7 +711,7 @@ static bool is_cppc_supported(int revision, int num_ent)
/**
* acpi_cppc_processor_probe - Search for per CPU _CPC objects.
- * @pr: Ptr to acpi_processor containing this CPUs logical Id.
+ * @pr: Ptr to acpi_processor containing this CPU's logical ID.
*
* Return: 0 for success or negative value for err.
*/
@@ -728,7 +728,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
acpi_status status;
int ret = -EFAULT;
- /* Parse the ACPI _CPC table for this cpu. */
+ /* Parse the ACPI _CPC table for this CPU. */
status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
ACPI_TYPE_PACKAGE);
if (ACPI_FAILURE(status)) {
@@ -840,7 +840,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
if (ret)
goto out_free;
- /* Register PCC channel once for all PCC subspace id. */
+ /* Register PCC channel once for all PCC subspace ID. */
if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
ret = register_pcc_channel(pcc_subspace_id);
if (ret)
@@ -860,7 +860,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
goto out_free;
}
- /* Plug PSD data into this CPUs CPC descriptor. */
+ /* Plug PSD data into this CPU's CPC descriptor. */
per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
@@ -891,7 +891,7 @@ EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
/**
* acpi_cppc_processor_exit - Cleanup CPC structs.
- * @pr: Ptr to acpi_processor containing this CPUs logical Id.
+ * @pr: Ptr to acpi_processor containing this CPU's logical ID.
*
* Return: Void
*/
@@ -931,7 +931,7 @@ EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
/**
* cpc_read_ffh() - Read FFH register
- * @cpunum: cpu number to read
+ * @cpunum: CPU number to read
* @reg: cppc register information
* @val: place holder for return value
*
@@ -946,7 +946,7 @@ int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
/**
* cpc_write_ffh() - Write FFH register
- * @cpunum: cpu number to write
+ * @cpunum: CPU number to write
* @reg: cppc register information
* @val: value to write
*
@@ -1093,7 +1093,7 @@ int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
/**
- * cppc_get_perf_caps - Get a CPUs performance capabilities.
+ * cppc_get_perf_caps - Get a CPU's performance capabilities.
* @cpunum: CPU from which to get capabilities info.
* @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
*
@@ -1150,8 +1150,13 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
cpc_read(cpunum, nominal_reg, &nom);
perf_caps->nominal_perf = nom;
- cpc_read(cpunum, guaranteed_reg, &guaranteed);
- perf_caps->guaranteed_perf = guaranteed;
+ if (guaranteed_reg->type != ACPI_TYPE_BUFFER ||
+ IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
+ perf_caps->guaranteed_perf = 0;
+ } else {
+ cpc_read(cpunum, guaranteed_reg, &guaranteed);
+ perf_caps->guaranteed_perf = guaranteed;
+ }
cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
perf_caps->lowest_nonlinear_perf = min_nonlinear;
@@ -1178,7 +1183,7 @@ out_err:
EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
/**
- * cppc_get_perf_ctrs - Read a CPUs performance feedback counters.
+ * cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
* @cpunum: CPU from which to read counters.
* @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
*
@@ -1205,7 +1210,7 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
/*
- * If refernce perf register is not supported then we should
+ * If reference perf register is not supported then we should
* use the nominal perf value
*/
if (!CPC_SUPPORTED(ref_perf_reg))
@@ -1258,7 +1263,7 @@ out_err:
EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
/**
- * cppc_set_perf - Set a CPUs performance controls.
+ * cppc_set_perf - Set a CPU's performance controls.
* @cpu: CPU for which to set performance controls.
* @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
*
@@ -1339,7 +1344,7 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
* executing the Phase-II.
* 2. Some other CPU has beaten this CPU to successfully execute the
* write_trylock and has already acquired the write_lock. We know for a
- * fact it(other CPU acquiring the write_lock) couldn't have happened
+ * fact it (other CPU acquiring the write_lock) couldn't have happened
* before this CPU's Phase-I as we held the read_lock.
* 3. Some other CPU executing pcc CMD_READ has stolen the
* down_write, in which case, send_pcc_cmd will check for pending
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 1aa0d014dc34..b859d75eaf9f 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -728,6 +728,9 @@ static int __acpi_device_wakeup_enable(struct acpi_device *adev,
goto out;
}
+ acpi_handle_debug(adev->handle, "GPE%2X enabled for wakeup\n",
+ (unsigned int)wakeup->gpe_number);
+
inc:
wakeup->enable_count++;
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index 545e91420cde..8940054d6250 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -202,11 +202,15 @@ static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias,
{
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
const union acpi_object *of_compatible, *obj;
+ acpi_status status;
int len, count;
int i, nval;
char *c;
- acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
+ status = acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
/* DT strings are all in lower case */
for (c = buf.pointer; *c != '\0'; c++)
*c = tolower(*c);
diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c
index e1c242568341..0c081390930a 100644
--- a/drivers/acpi/dptf/dptf_power.c
+++ b/drivers/acpi/dptf/dptf_power.c
@@ -31,8 +31,7 @@ static ssize_t name##_show(struct device *dev,\
struct device_attribute *attr,\
char *buf)\
{\
- struct platform_device *pdev = to_platform_device(dev);\
- struct acpi_device *acpi_dev = platform_get_drvdata(pdev);\
+ struct acpi_device *acpi_dev = dev_get_drvdata(dev);\
unsigned long long val;\
acpi_status status;\
\
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
index 5a127f3f2d5c..47f21599f2ab 100644
--- a/drivers/acpi/event.c
+++ b/drivers/acpi/event.c
@@ -131,8 +131,8 @@ int acpi_bus_generate_netlink_event(const char *device_class,
event = nla_data(attr);
memset(event, 0, sizeof(struct acpi_genl_event));
- strcpy(event->device_class, device_class);
- strcpy(event->bus_id, bus_id);
+ strscpy(event->device_class, device_class, sizeof(event->device_class));
+ strscpy(event->bus_id, bus_id, sizeof(event->bus_id));
event->type = type;
event->data = data;
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index e18ade5d74e9..f1ed0befe303 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -55,6 +55,10 @@ static bool no_init_ars;
module_param(no_init_ars, bool, 0644);
MODULE_PARM_DESC(no_init_ars, "Skip ARS run at nfit init time");
+static bool force_labels;
+module_param(force_labels, bool, 0444);
+MODULE_PARM_DESC(force_labels, "Opt-in to labels despite missing methods");
+
LIST_HEAD(acpi_descs);
DEFINE_MUTEX(acpi_desc_lock);
@@ -415,7 +419,7 @@ static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
if (call_pkg) {
int i;
- if (nfit_mem->family != call_pkg->nd_family)
+ if (nfit_mem && nfit_mem->family != call_pkg->nd_family)
return -ENOTTY;
for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
@@ -424,6 +428,10 @@ static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
return call_pkg->nd_command;
}
+ /* In the !call_pkg case, bus commands == bus functions */
+ if (!nfit_mem)
+ return cmd;
+
/* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */
if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
return cmd;
@@ -454,17 +462,18 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
if (cmd_rc)
*cmd_rc = -EINVAL;
+ if (cmd == ND_CMD_CALL)
+ call_pkg = buf;
+ func = cmd_to_func(nfit_mem, cmd, call_pkg);
+ if (func < 0)
+ return func;
+
if (nvdimm) {
struct acpi_device *adev = nfit_mem->adev;
if (!adev)
return -ENOTTY;
- if (cmd == ND_CMD_CALL)
- call_pkg = buf;
- func = cmd_to_func(nfit_mem, cmd, call_pkg);
- if (func < 0)
- return func;
dimm_name = nvdimm_name(nvdimm);
cmd_name = nvdimm_cmd_name(cmd);
cmd_mask = nvdimm_cmd_mask(nvdimm);
@@ -475,12 +484,9 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
} else {
struct acpi_device *adev = to_acpi_dev(acpi_desc);
- func = cmd;
cmd_name = nvdimm_bus_cmd_name(cmd);
cmd_mask = nd_desc->cmd_mask;
- dsm_mask = cmd_mask;
- if (cmd == ND_CMD_CALL)
- dsm_mask = nd_desc->bus_dsm_mask;
+ dsm_mask = nd_desc->bus_dsm_mask;
desc = nd_cmd_bus_desc(cmd);
guid = to_nfit_uuid(NFIT_DEV_BUS);
handle = adev->handle;
@@ -554,6 +560,19 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
return -EINVAL;
}
+ if (out_obj->type != ACPI_TYPE_BUFFER) {
+ dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n",
+ dimm_name, cmd_name, out_obj->type);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
+ cmd_name, out_obj->buffer.length);
+ print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
+ out_obj->buffer.pointer,
+ min_t(u32, 128, out_obj->buffer.length), true);
+
if (call_pkg) {
call_pkg->nd_fw_size = out_obj->buffer.length;
memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
@@ -572,19 +591,6 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
return 0;
}
- if (out_obj->package.type != ACPI_TYPE_BUFFER) {
- dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n",
- dimm_name, cmd_name, out_obj->type);
- rc = -EINVAL;
- goto out;
- }
-
- dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
- cmd_name, out_obj->buffer.length);
- print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
- out_obj->buffer.pointer,
- min_t(u32, 128, out_obj->buffer.length), true);
-
for (i = 0, offset = 0; i < desc->out_num; i++) {
u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
(u32 *) out_obj->buffer.pointer,
@@ -1317,19 +1323,30 @@ static ssize_t scrub_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nvdimm_bus_descriptor *nd_desc;
+ struct acpi_nfit_desc *acpi_desc;
ssize_t rc = -ENXIO;
+ bool busy;
device_lock(dev);
nd_desc = dev_get_drvdata(dev);
- if (nd_desc) {
- struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+ if (!nd_desc) {
+ device_unlock(dev);
+ return rc;
+ }
+ acpi_desc = to_acpi_desc(nd_desc);
- mutex_lock(&acpi_desc->init_mutex);
- rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
- acpi_desc->scrub_busy
- && !acpi_desc->cancel ? "+\n" : "\n");
- mutex_unlock(&acpi_desc->init_mutex);
+ mutex_lock(&acpi_desc->init_mutex);
+ busy = test_bit(ARS_BUSY, &acpi_desc->scrub_flags)
+ && !test_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
+ rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, busy ? "+\n" : "\n");
+ /* Allow an admin to poll the busy state at a higher rate */
+ if (busy && capable(CAP_SYS_RAWIO) && !test_and_set_bit(ARS_POLL,
+ &acpi_desc->scrub_flags)) {
+ acpi_desc->scrub_tmo = 1;
+ mod_delayed_work(nfit_wq, &acpi_desc->dwork, HZ);
}
+
+ mutex_unlock(&acpi_desc->init_mutex);
device_unlock(dev);
return rc;
}
@@ -1759,14 +1776,14 @@ static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method)
__weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
{
+ struct device *dev = &nfit_mem->adev->dev;
struct nd_intel_smart smart = { 0 };
union acpi_object in_buf = {
- .type = ACPI_TYPE_BUFFER,
- .buffer.pointer = (char *) &smart,
- .buffer.length = sizeof(smart),
+ .buffer.type = ACPI_TYPE_BUFFER,
+ .buffer.length = 0,
};
union acpi_object in_obj = {
- .type = ACPI_TYPE_PACKAGE,
+ .package.type = ACPI_TYPE_PACKAGE,
.package.count = 1,
.package.elements = &in_buf,
};
@@ -1781,8 +1798,15 @@ __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
return;
out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
- if (!out_obj)
+ if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER
+ || out_obj->buffer.length < sizeof(smart)) {
+ dev_dbg(dev->parent, "%s: failed to retrieve initial health\n",
+ dev_name(dev));
+ ACPI_FREE(out_obj);
return;
+ }
+ memcpy(&smart, out_obj->buffer.pointer, sizeof(smart));
+ ACPI_FREE(out_obj);
if (smart.flags & ND_INTEL_SMART_SHUTDOWN_VALID) {
if (smart.shutdown_state)
@@ -1793,7 +1817,6 @@ __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
set_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags);
nfit_mem->dirty_shutdown = smart.shutdown_count;
}
- ACPI_FREE(out_obj);
}
static void populate_shutdown_status(struct nfit_mem *nfit_mem)
@@ -1861,9 +1884,17 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
dev_set_drvdata(&adev_dimm->dev, nfit_mem);
/*
- * Until standardization materializes we need to consider 4
- * different command sets. Note, that checking for function0 (bit0)
- * tells us if any commands are reachable through this GUID.
+ * There are 4 "legacy" NVDIMM command sets
+ * (NVDIMM_FAMILY_{INTEL,MSFT,HPE1,HPE2}) that were created before
+ * an EFI working group was established to constrain this
+ * proliferation. The nfit driver probes for the supported command
+ * set by GUID. Note, if you're a platform developer looking to add
+ * a new command set to this probe, consider using an existing set,
+ * or otherwise seek approval to publish the command set at
+ * http://www.uefi.org/RFIC_LIST.
+ *
+ * Note, that checking for function0 (bit0) tells us if any commands
+ * are reachable through this GUID.
*/
for (i = 0; i <= NVDIMM_FAMILY_MAX; i++)
if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
@@ -1886,6 +1917,8 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
dsm_mask &= ~(1 << 8);
} else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) {
dsm_mask = 0xffffffff;
+ } else if (nfit_mem->family == NVDIMM_FAMILY_HYPERV) {
+ dsm_mask = 0x1f;
} else {
dev_dbg(dev, "unknown dimm command family\n");
nfit_mem->family = -1;
@@ -1915,18 +1948,32 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
| 1 << ND_CMD_SET_CONFIG_DATA;
if (family == NVDIMM_FAMILY_INTEL
&& (dsm_mask & label_mask) == label_mask)
- return 0;
+ /* skip _LS{I,R,W} enabling */;
+ else {
+ if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
+ && acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
+ dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
+ set_bit(NFIT_MEM_LSR, &nfit_mem->flags);
+ }
- if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
- && acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
- dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
- set_bit(NFIT_MEM_LSR, &nfit_mem->flags);
- }
+ if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
+ && acpi_nvdimm_has_method(adev_dimm, "_LSW")) {
+ dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
+ set_bit(NFIT_MEM_LSW, &nfit_mem->flags);
+ }
- if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
- && acpi_nvdimm_has_method(adev_dimm, "_LSW")) {
- dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
- set_bit(NFIT_MEM_LSW, &nfit_mem->flags);
+ /*
+ * Quirk read-only label configurations to preserve
+ * access to label-less namespaces by default.
+ */
+ if (!test_bit(NFIT_MEM_LSW, &nfit_mem->flags)
+ && !force_labels) {
+ dev_dbg(dev, "%s: No _LSW, disable labels\n",
+ dev_name(&adev_dimm->dev));
+ clear_bit(NFIT_MEM_LSR, &nfit_mem->flags);
+ } else
+ dev_dbg(dev, "%s: Force enable labels\n",
+ dev_name(&adev_dimm->dev));
}
populate_shutdown_status(nfit_mem);
@@ -2027,6 +2074,10 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK;
}
+ /* Quirk to ignore LOCAL for labels on HYPERV DIMMs */
+ if (nfit_mem->family == NVDIMM_FAMILY_HYPERV)
+ set_bit(NDD_NOBLK, &flags);
+
if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) {
set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
@@ -2050,7 +2101,7 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
continue;
- dev_info(acpi_desc->dev, "%s flags:%s%s%s%s%s\n",
+ dev_err(acpi_desc->dev, "Error found in NVDIMM %s flags:%s%s%s%s%s\n",
nvdimm_name(nvdimm),
mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
@@ -2641,7 +2692,10 @@ static int ars_start(struct acpi_nfit_desc *acpi_desc,
if (rc < 0)
return rc;
- return cmd_rc;
+ if (cmd_rc < 0)
+ return cmd_rc;
+ set_bit(ARS_VALID, &acpi_desc->scrub_flags);
+ return 0;
}
static int ars_continue(struct acpi_nfit_desc *acpi_desc)
@@ -2651,11 +2705,11 @@ static int ars_continue(struct acpi_nfit_desc *acpi_desc)
struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
- memset(&ars_start, 0, sizeof(ars_start));
- ars_start.address = ars_status->restart_address;
- ars_start.length = ars_status->restart_length;
- ars_start.type = ars_status->type;
- ars_start.flags = acpi_desc->ars_start_flags;
+ ars_start = (struct nd_cmd_ars_start) {
+ .address = ars_status->restart_address,
+ .length = ars_status->restart_length,
+ .type = ars_status->type,
+ };
rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
sizeof(ars_start), &cmd_rc);
if (rc < 0)
@@ -2734,6 +2788,17 @@ static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc)
*/
if (ars_status->out_length < 44)
return 0;
+
+ /*
+ * Ignore potentially stale results that are only refreshed
+ * after a start-ARS event.
+ */
+ if (!test_and_clear_bit(ARS_VALID, &acpi_desc->scrub_flags)) {
+ dev_dbg(acpi_desc->dev, "skip %d stale records\n",
+ ars_status->num_records);
+ return 0;
+ }
+
for (i = 0; i < ars_status->num_records; i++) {
/* only process full records */
if (ars_status->out_length
@@ -2891,11 +2956,15 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
ndr_desc->res = &res;
ndr_desc->provider_data = nfit_spa;
ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
- if (spa->flags & ACPI_NFIT_PROXIMITY_VALID)
+ if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) {
ndr_desc->numa_node = acpi_map_pxm_to_online_node(
spa->proximity_domain);
- else
+ ndr_desc->target_node = acpi_map_pxm_to_node(
+ spa->proximity_domain);
+ } else {
ndr_desc->numa_node = NUMA_NO_NODE;
+ ndr_desc->target_node = NUMA_NO_NODE;
+ }
/*
* Persistence domain bits are hierarchical, if
@@ -3004,14 +3073,16 @@ static int ars_register(struct acpi_nfit_desc *acpi_desc,
{
int rc;
- if (no_init_ars || test_bit(ARS_FAILED, &nfit_spa->ars_state))
+ if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
return acpi_nfit_register_region(acpi_desc, nfit_spa);
set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
- set_bit(ARS_REQ_LONG, &nfit_spa->ars_state);
+ if (!no_init_ars)
+ set_bit(ARS_REQ_LONG, &nfit_spa->ars_state);
switch (acpi_nfit_query_poison(acpi_desc)) {
case 0:
+ case -ENOSPC:
case -EAGAIN:
rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT);
/* shouldn't happen, try again later */
@@ -3036,7 +3107,6 @@ static int ars_register(struct acpi_nfit_desc *acpi_desc,
break;
case -EBUSY:
case -ENOMEM:
- case -ENOSPC:
/*
* BIOS was using ARS, wait for it to complete (or
* resources to become available) and then perform our
@@ -3071,7 +3141,7 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
lockdep_assert_held(&acpi_desc->init_mutex);
- if (acpi_desc->cancel)
+ if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags))
return 0;
if (query_rc == -EBUSY) {
@@ -3145,7 +3215,7 @@ static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo)
{
lockdep_assert_held(&acpi_desc->init_mutex);
- acpi_desc->scrub_busy = 1;
+ set_bit(ARS_BUSY, &acpi_desc->scrub_flags);
/* note this should only be set from within the workqueue */
if (tmo)
acpi_desc->scrub_tmo = tmo;
@@ -3161,7 +3231,7 @@ static void notify_ars_done(struct acpi_nfit_desc *acpi_desc)
{
lockdep_assert_held(&acpi_desc->init_mutex);
- acpi_desc->scrub_busy = 0;
+ clear_bit(ARS_BUSY, &acpi_desc->scrub_flags);
acpi_desc->scrub_count++;
if (acpi_desc->scrub_count_state)
sysfs_notify_dirent(acpi_desc->scrub_count_state);
@@ -3182,6 +3252,7 @@ static void acpi_nfit_scrub(struct work_struct *work)
else
notify_ars_done(acpi_desc);
memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
+ clear_bit(ARS_POLL, &acpi_desc->scrub_flags);
mutex_unlock(&acpi_desc->init_mutex);
}
@@ -3216,6 +3287,7 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
struct nfit_spa *nfit_spa;
int rc;
+ set_bit(ARS_VALID, &acpi_desc->scrub_flags);
list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
switch (nfit_spa_type(nfit_spa->spa)) {
case NFIT_SPA_VOLATILE:
@@ -3450,7 +3522,7 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
struct nfit_spa *nfit_spa;
mutex_lock(&acpi_desc->init_mutex);
- if (acpi_desc->cancel) {
+ if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags)) {
mutex_unlock(&acpi_desc->init_mutex);
return 0;
}
@@ -3529,7 +3601,7 @@ void acpi_nfit_shutdown(void *data)
mutex_unlock(&acpi_desc_lock);
mutex_lock(&acpi_desc->init_mutex);
- acpi_desc->cancel = 1;
+ set_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
cancel_delayed_work_sync(&acpi_desc->dwork);
mutex_unlock(&acpi_desc->init_mutex);
@@ -3729,6 +3801,7 @@ static __init int nfit_init(void)
guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
+ guid_parse(UUID_NFIT_DIMM_N_HYPERV, &nfit_uuid[NFIT_DEV_DIMM_N_HYPERV]);
nfit_wq = create_singlethread_workqueue("nfit");
if (!nfit_wq)
diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c
index f70de71f79d6..cddd0fcf622c 100644
--- a/drivers/acpi/nfit/intel.c
+++ b/drivers/acpi/nfit/intel.c
@@ -122,9 +122,8 @@ static int intel_security_change_key(struct nvdimm *nvdimm,
if (!test_bit(cmd, &nfit_mem->dsm_mask))
return -ENOTTY;
- if (old_data)
- memcpy(nd_cmd.cmd.old_pass, old_data->data,
- sizeof(nd_cmd.cmd.old_pass));
+ memcpy(nd_cmd.cmd.old_pass, old_data->data,
+ sizeof(nd_cmd.cmd.old_pass));
memcpy(nd_cmd.cmd.new_pass, new_data->data,
sizeof(nd_cmd.cmd.new_pass));
rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
@@ -336,9 +335,8 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
/* flush all cache before we erase DIMM */
nvdimm_invalidate_cache();
- if (nkey)
- memcpy(nd_cmd.cmd.passphrase, nkey->data,
- sizeof(nd_cmd.cmd.passphrase));
+ memcpy(nd_cmd.cmd.passphrase, nkey->data,
+ sizeof(nd_cmd.cmd.passphrase));
rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
if (rc < 0)
return rc;
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index 33691aecfcee..2f8cf2a11e3b 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -34,11 +34,14 @@
/* https://msdn.microsoft.com/library/windows/hardware/mt604741 */
#define UUID_NFIT_DIMM_N_MSFT "1ee68b36-d4bd-4a1a-9a16-4f8e53d46e05"
+/* http://www.uefi.org/RFIC_LIST (see "Virtual NVDIMM 0x1901") */
+#define UUID_NFIT_DIMM_N_HYPERV "5746c5f2-a9a2-4264-ad0e-e4ddc9e09e80"
+
#define ACPI_NFIT_MEM_FAILED_MASK (ACPI_NFIT_MEM_SAVE_FAILED \
| ACPI_NFIT_MEM_RESTORE_FAILED | ACPI_NFIT_MEM_FLUSH_FAILED \
| ACPI_NFIT_MEM_NOT_ARMED | ACPI_NFIT_MEM_MAP_FAILED)
-#define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_MSFT
+#define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_HYPERV
#define NVDIMM_STANDARD_CMDMASK \
(1 << ND_CMD_SMART | 1 << ND_CMD_SMART_THRESHOLD | 1 << ND_CMD_DIMM_FLAGS \
@@ -94,6 +97,7 @@ enum nfit_uuids {
NFIT_DEV_DIMM_N_HPE1 = NVDIMM_FAMILY_HPE1,
NFIT_DEV_DIMM_N_HPE2 = NVDIMM_FAMILY_HPE2,
NFIT_DEV_DIMM_N_MSFT = NVDIMM_FAMILY_MSFT,
+ NFIT_DEV_DIMM_N_HYPERV = NVDIMM_FAMILY_HYPERV,
NFIT_SPA_VOLATILE,
NFIT_SPA_PM,
NFIT_SPA_DCR,
@@ -210,6 +214,13 @@ struct nfit_mem {
int family;
};
+enum scrub_flags {
+ ARS_BUSY,
+ ARS_CANCEL,
+ ARS_VALID,
+ ARS_POLL,
+};
+
struct acpi_nfit_desc {
struct nvdimm_bus_descriptor nd_desc;
struct acpi_table_header acpi_header;
@@ -223,7 +234,6 @@ struct acpi_nfit_desc {
struct list_head idts;
struct nvdimm_bus *nvdimm_bus;
struct device *dev;
- u8 ars_start_flags;
struct nd_cmd_ars_status *ars_status;
struct nfit_spa *scrub_spa;
struct delayed_work dwork;
@@ -232,8 +242,7 @@ struct acpi_nfit_desc {
unsigned int max_ars;
unsigned int scrub_count;
unsigned int scrub_mode;
- unsigned int scrub_busy:1;
- unsigned int cancel:1;
+ unsigned long scrub_flags;
unsigned long dimm_cmd_force_en;
unsigned long bus_cmd_force_en;
unsigned long bus_nfit_cmd_force_en;
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 7bbbf8256a41..867f6e3f2b4f 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -84,6 +84,7 @@ int acpi_map_pxm_to_node(int pxm)
return node;
}
+EXPORT_SYMBOL(acpi_map_pxm_to_node);
/**
* acpi_map_pxm_to_online_node - Map proximity ID to online node
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 665e93ca0b40..87db3e124725 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -535,12 +535,12 @@ int acpi_device_sleep_wake(struct acpi_device *dev,
/*
* Try to execute _DSW first.
*
- * Three agruments are needed for the _DSW object:
+ * Three arguments are needed for the _DSW object:
* Argument 0: enable/disable the wake capabilities
* Argument 1: target system state
* Argument 2: target device state
* When _DSW object is called to disable the wake capabilities, maybe
- * the first argument is filled. The values of the other two agruments
+ * the first argument is filled. The values of the other two arguments
* are meaningless.
*/
in_arg[0].type = ACPI_TYPE_INTEGER;
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index ad31c50de3be..b72e6afaa8fb 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -164,7 +164,7 @@ static struct acpi_pptt_cache *acpi_find_cache_level(struct acpi_table_header *t
}
/**
- * acpi_count_levels() - Given a PPTT table, and a cpu node, count the caches
+ * acpi_count_levels() - Given a PPTT table, and a CPU node, count the caches
* @table_hdr: Pointer to the head of the PPTT table
* @cpu_node: processor node we wish to count caches for
*
@@ -209,6 +209,9 @@ static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr,
struct acpi_pptt_processor *cpu_node;
u32 proc_sz;
+ if (table_hdr->revision > 1)
+ return (node->flags & ACPI_PPTT_ACPI_LEAF_NODE);
+
table_end = (unsigned long)table_hdr + table_hdr->length;
node_entry = ACPI_PTR_DIFF(node, table_hdr);
entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
@@ -232,7 +235,7 @@ static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr,
/**
* acpi_find_processor_node() - Given a PPTT table find the requested processor
* @table_hdr: Pointer to the head of the PPTT table
- * @acpi_cpu_id: cpu we are searching for
+ * @acpi_cpu_id: CPU we are searching for
*
* Find the subtable entry describing the provided processor.
* This is done by iterating the PPTT table looking for processor nodes
@@ -453,21 +456,21 @@ static struct acpi_pptt_processor *acpi_find_processor_package_id(struct acpi_ta
static void acpi_pptt_warn_missing(void)
{
- pr_warn_once("No PPTT table found, cpu and cache topology may be inaccurate\n");
+ pr_warn_once("No PPTT table found, CPU and cache topology may be inaccurate\n");
}
/**
* topology_get_acpi_cpu_tag() - Find a unique topology value for a feature
* @table: Pointer to the head of the PPTT table
- * @cpu: Kernel logical cpu number
+ * @cpu: Kernel logical CPU number
* @level: A level that terminates the search
* @flag: A flag which terminates the search
*
- * Get a unique value given a cpu, and a topology level, that can be
+ * Get a unique value given a CPU, and a topology level, that can be
* matched to determine which cpus share common topological features
* at that level.
*
- * Return: Unique value, or -ENOENT if unable to locate cpu
+ * Return: Unique value, or -ENOENT if unable to locate CPU
*/
static int topology_get_acpi_cpu_tag(struct acpi_table_header *table,
unsigned int cpu, int level, int flag)
@@ -507,7 +510,7 @@ static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag)
return -ENOENT;
}
retval = topology_get_acpi_cpu_tag(table, cpu, level, flag);
- pr_debug("Topology Setup ACPI cpu %d, level %d ret = %d\n",
+ pr_debug("Topology Setup ACPI CPU %d, level %d ret = %d\n",
cpu, level, retval);
acpi_put_table(table);
@@ -516,9 +519,9 @@ static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag)
/**
* acpi_find_last_cache_level() - Determines the number of cache levels for a PE
- * @cpu: Kernel logical cpu number
+ * @cpu: Kernel logical CPU number
*
- * Given a logical cpu number, returns the number of levels of cache represented
+ * Given a logical CPU number, returns the number of levels of cache represented
* in the PPTT. Errors caused by lack of a PPTT table, or otherwise, return 0
* indicating we didn't find any cache levels.
*
@@ -531,7 +534,7 @@ int acpi_find_last_cache_level(unsigned int cpu)
int number_of_levels = 0;
acpi_status status;
- pr_debug("Cache Setup find last level cpu=%d\n", cpu);
+ pr_debug("Cache Setup find last level CPU=%d\n", cpu);
acpi_cpu_id = get_acpi_id_for_cpu(cpu);
status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
@@ -548,14 +551,14 @@ int acpi_find_last_cache_level(unsigned int cpu)
/**
* cache_setup_acpi() - Override CPU cache topology with data from the PPTT
- * @cpu: Kernel logical cpu number
+ * @cpu: Kernel logical CPU number
*
* Updates the global cache info provided by cpu_get_cacheinfo()
* when there are valid properties in the acpi_pptt_cache nodes. A
* successful parse may not result in any updates if none of the
- * cache levels have any valid flags set. Futher, a unique value is
+ * cache levels have any valid flags set. Further, a unique value is
* associated with each known CPU cache entry. This unique value
- * can be used to determine whether caches are shared between cpus.
+ * can be used to determine whether caches are shared between CPUs.
*
* Return: -ENOENT on failure to find table, or 0 on success
*/
@@ -564,7 +567,7 @@ int cache_setup_acpi(unsigned int cpu)
struct acpi_table_header *table;
acpi_status status;
- pr_debug("Cache Setup ACPI cpu %d\n", cpu);
+ pr_debug("Cache Setup ACPI CPU %d\n", cpu);
status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
if (ACPI_FAILURE(status)) {
@@ -579,8 +582,8 @@ int cache_setup_acpi(unsigned int cpu)
}
/**
- * find_acpi_cpu_topology() - Determine a unique topology value for a given cpu
- * @cpu: Kernel logical cpu number
+ * find_acpi_cpu_topology() - Determine a unique topology value for a given CPU
+ * @cpu: Kernel logical CPU number
* @level: The topological level for which we would like a unique ID
*
* Determine a topology unique ID for each thread/core/cluster/mc_grouping
@@ -593,7 +596,7 @@ int cache_setup_acpi(unsigned int cpu)
* other levels beyond this use a generated value to uniquely identify
* a topological feature.
*
- * Return: -ENOENT if the PPTT doesn't exist, or the cpu cannot be found.
+ * Return: -ENOENT if the PPTT doesn't exist, or the CPU cannot be found.
* Otherwise returns a value which represents a unique topological feature.
*/
int find_acpi_cpu_topology(unsigned int cpu, int level)
@@ -603,12 +606,12 @@ int find_acpi_cpu_topology(unsigned int cpu, int level)
/**
* find_acpi_cpu_cache_topology() - Determine a unique cache topology value
- * @cpu: Kernel logical cpu number
+ * @cpu: Kernel logical CPU number
* @level: The cache level for which we would like a unique ID
*
* Determine a unique ID for each unified cache in the system
*
- * Return: -ENOENT if the PPTT doesn't exist, or the cpu cannot be found.
+ * Return: -ENOENT if the PPTT doesn't exist, or the CPU cannot be found.
* Otherwise returns a value which represents a unique topological feature.
*/
int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
@@ -640,17 +643,17 @@ int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
/**
- * find_acpi_cpu_topology_package() - Determine a unique cpu package value
- * @cpu: Kernel logical cpu number
+ * find_acpi_cpu_topology_package() - Determine a unique CPU package value
+ * @cpu: Kernel logical CPU number
*
- * Determine a topology unique package ID for the given cpu.
+ * Determine a topology unique package ID for the given CPU.
* This ID can then be used to group peers, which will have matching ids.
*
* The search terminates when either a level is found with the PHYSICAL_PACKAGE
* flag set or we reach a root node.
*
- * Return: -ENOENT if the PPTT doesn't exist, or the cpu cannot be found.
- * Otherwise returns a value which represents the package for this cpu.
+ * Return: -ENOENT if the PPTT doesn't exist, or the CPU cannot be found.
+ * Otherwise returns a value which represents the package for this CPU.
*/
int find_acpi_cpu_topology_package(unsigned int cpu)
{
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index a303fd0e108c..c73d3a62799a 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -181,7 +181,7 @@ void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
acpi_processor_ppc_ost(pr->handle, 0);
}
if (ret >= 0)
- cpufreq_update_policy(pr->id);
+ cpufreq_update_limits(pr->id);
}
int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 77abe0ec4043..9d460a859be0 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -44,6 +44,7 @@ static const guid_t prp_guids[] = {
0xbf, 0xf0, 0x76, 0x14, 0x38, 0x07, 0xc3, 0x89),
};
+/* ACPI _DSD data subnodes GUID: dbb8e3e6-5886-4ba6-8795-1319f52a966b */
static const guid_t ads_guid =
GUID_INIT(0xdbb8e3e6, 0x5886, 0x4ba6,
0x87, 0x95, 0x13, 0x19, 0xf5, 0x2a, 0x96, 0x6b);
@@ -1031,6 +1032,14 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
const struct acpi_data_node *data = to_acpi_data_node(fwnode);
struct acpi_data_node *dn;
+ /*
+ * We can have a combination of device and data nodes, e.g. with
+ * hierarchical _DSD properties. Make sure the adev pointer is
+ * restored before going through data nodes, otherwise we will
+ * be looking for data_nodes below the last device found instead
+ * of the common fwnode shared by device_nodes and data_nodes.
+ */
+ adev = to_acpi_device_node(fwnode);
if (adev)
head = &adev->data.subnodes;
else if (data)
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 5efd4219f112..b845dc3e0ba9 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -763,18 +763,16 @@ acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd)
}
EXPORT_SYMBOL_GPL(acpi_bus_get_ejd);
-static int acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
- struct acpi_device_wakeup *wakeup)
+static int acpi_bus_extract_wakeup_device_power_package(struct acpi_device *dev)
{
+ acpi_handle handle = dev->handle;
+ struct acpi_device_wakeup *wakeup = &dev->wakeup;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *package = NULL;
union acpi_object *element = NULL;
acpi_status status;
int err = -ENODATA;
- if (!wakeup)
- return -EINVAL;
-
INIT_LIST_HEAD(&wakeup->resources);
/* _PRW */
@@ -848,9 +846,9 @@ static int acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
static bool acpi_wakeup_gpe_init(struct acpi_device *device)
{
static const struct acpi_device_id button_device_ids[] = {
- {"PNP0C0C", 0},
- {"PNP0C0D", 0},
- {"PNP0C0E", 0},
+ {"PNP0C0C", 0}, /* Power button */
+ {"PNP0C0D", 0}, /* Lid */
+ {"PNP0C0E", 0}, /* Sleep button */
{"", 0},
};
struct acpi_device_wakeup *wakeup = &device->wakeup;
@@ -883,8 +881,7 @@ static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
if (!acpi_has_method(device->handle, "_PRW"))
return;
- err = acpi_bus_extract_wakeup_device_power_package(device->handle,
- &device->wakeup);
+ err = acpi_bus_extract_wakeup_device_power_package(device);
if (err) {
dev_err(&device->dev, "_PRW evaluation error: %d\n", err);
return;
@@ -895,7 +892,7 @@ static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
/*
* Call _PSW/_DSW object to disable its ability to wake the sleeping
* system for the ACPI device with the _PRW object.
- * The _PSW object is depreciated in ACPI 3.0 and is replaced by _DSW.
+ * The _PSW object is deprecated in ACPI 3.0 and is replaced by _DSW.
* So it is necessary to call _DSW object first. Only when it is not
* present will the _PSW object used.
*/
@@ -1545,6 +1542,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
*/
static const struct acpi_device_id i2c_multi_instantiate_ids[] = {
{"BSG1160", },
+ {"BSG2150", },
{"INT33FE", },
{"INT3515", },
{}
@@ -2259,7 +2257,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
mutex_lock(&acpi_probe_mutex);
for (ape = ap_head; nr; ape++, nr--) {
- if (ACPI_COMPARE_NAME(ACPI_SIG_MADT, ape->id)) {
+ if (ACPI_COMPARE_NAMESEG(ACPI_SIG_MADT, ape->id)) {
acpi_probe_count = 0;
acpi_table_parse_madt(ape->type, acpi_match_madt, 0);
count += acpi_probe_count;
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index c336784d0bcb..b34d05e365b7 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -28,7 +28,7 @@ EXPORT_SYMBOL(qdf2400_e44_present);
/*
* Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit.
- * Detect them by examining the OEM fields in the SPCR header, similiar to PCI
+ * Detect them by examining the OEM fields in the SPCR header, similar to PCI
* quirk detection in pci_mcfg.c.
*/
static bool qdf2400_erratum_44_present(struct acpi_table_header *h)
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 41324f0b1bee..75948a3f1a20 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -327,9 +327,9 @@ static struct kobject *hotplug_kobj;
struct acpi_table_attr {
struct bin_attribute attr;
- char name[ACPI_NAME_SIZE];
+ char name[ACPI_NAMESEG_SIZE];
int instance;
- char filename[ACPI_NAME_SIZE+ACPI_INST_SIZE];
+ char filename[ACPI_NAMESEG_SIZE+ACPI_INST_SIZE];
struct list_head node;
};
@@ -368,10 +368,10 @@ static int acpi_table_attr_init(struct kobject *tables_obj,
char instance_str[ACPI_INST_SIZE];
sysfs_attr_init(&table_attr->attr.attr);
- ACPI_MOVE_NAME(table_attr->name, table_header->signature);
+ ACPI_COPY_NAMESEG(table_attr->name, table_header->signature);
list_for_each_entry(attr, &acpi_table_attr_list, node) {
- if (ACPI_COMPARE_NAME(table_attr->name, attr->name))
+ if (ACPI_COMPARE_NAMESEG(table_attr->name, attr->name))
if (table_attr->instance < attr->instance)
table_attr->instance = attr->instance;
}
@@ -382,8 +382,8 @@ static int acpi_table_attr_init(struct kobject *tables_obj,
return -ERANGE;
}
- ACPI_MOVE_NAME(table_attr->filename, table_header->signature);
- table_attr->filename[ACPI_NAME_SIZE] = '\0';
+ ACPI_COPY_NAMESEG(table_attr->filename, table_header->signature);
+ table_attr->filename[ACPI_NAMESEG_SIZE] = '\0';
if (table_attr->instance > 1 || (table_attr->instance == 1 &&
!acpi_get_table
(table_header->signature, 2, &header))) {
@@ -484,7 +484,7 @@ static int acpi_table_data_init(struct acpi_table_header *th)
int i;
for (i = 0; i < NUM_ACPI_DATA_OBJS; i++) {
- if (ACPI_COMPARE_NAME(th->signature, acpi_data_objs[i].name)) {
+ if (ACPI_COMPARE_NAMESEG(th->signature, acpi_data_objs[i].name)) {
data_attr = kzalloc(sizeof(*data_attr), GFP_KERNEL);
if (!data_attr)
return -ENOMEM;
@@ -648,26 +648,29 @@ static void acpi_global_event_handler(u32 event_type, acpi_handle device,
}
}
-static int get_status(u32 index, acpi_event_status *status,
+static int get_status(u32 index, acpi_event_status *ret,
acpi_handle *handle)
{
- int result;
+ acpi_status status;
if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
return -EINVAL;
if (index < num_gpes) {
- result = acpi_get_gpe_device(index, handle);
- if (result) {
+ status = acpi_get_gpe_device(index, handle);
+ if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND,
"Invalid GPE 0x%x", index));
- return result;
+ return -ENXIO;
}
- result = acpi_get_gpe_status(*handle, index, status);
- } else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS))
- result = acpi_get_event_status(index - num_gpes, status);
+ status = acpi_get_gpe_status(*handle, index, ret);
+ } else {
+ status = acpi_get_event_status(index - num_gpes, ret);
+ }
+ if (ACPI_FAILURE(status))
+ return -EIO;
- return result;
+ return 0;
}
static ssize_t counter_show(struct kobject *kobj,
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 8fccbe49612a..d7bf936b1646 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -240,8 +240,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
* On success returns sum of all matching entries for all proc handlers.
* Otherwise, -ENODEV or -EINVAL is returned.
*/
-static int __init
-acpi_parse_entries_array(char *id, unsigned long table_size,
+static int __init acpi_parse_entries_array(char *id, unsigned long table_size,
struct acpi_table_header *table_header,
struct acpi_subtable_proc *proc, int proc_num,
unsigned int max_entries)
@@ -314,8 +313,7 @@ acpi_parse_entries_array(char *id, unsigned long table_size,
return errs ? -EINVAL : count;
}
-int __init
-acpi_table_parse_entries_array(char *id,
+int __init acpi_table_parse_entries_array(char *id,
unsigned long table_size,
struct acpi_subtable_proc *proc, int proc_num,
unsigned int max_entries)
@@ -346,8 +344,7 @@ acpi_table_parse_entries_array(char *id,
return count;
}
-int __init
-acpi_table_parse_entries(char *id,
+int __init acpi_table_parse_entries(char *id,
unsigned long table_size,
int entry_id,
acpi_tbl_entry_handler handler,
@@ -362,8 +359,7 @@ acpi_table_parse_entries(char *id,
max_entries);
}
-int __init
-acpi_table_parse_madt(enum acpi_madt_type id,
+int __init acpi_table_parse_madt(enum acpi_madt_type id,
acpi_tbl_entry_handler handler, unsigned int max_entries)
{
return acpi_table_parse_entries(ACPI_SIG_MADT,
@@ -670,8 +666,8 @@ static void __init acpi_table_initrd_scan(void)
table_length = table->length;
/* Skip RSDT/XSDT which should only be used for override */
- if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_RSDT) ||
- ACPI_COMPARE_NAME(table->signature, ACPI_SIG_XSDT)) {
+ if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_RSDT) ||
+ ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_XSDT)) {
acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
goto next_table;
}
@@ -725,8 +721,7 @@ static void *amlcode __attribute__ ((weakref("AmlCode")));
static void *dsdt_amlcode __attribute__ ((weakref("dsdt_aml_code")));
#endif
-acpi_status
-acpi_os_table_override(struct acpi_table_header *existing_table,
+acpi_status acpi_os_table_override(struct acpi_table_header *existing_table,
struct acpi_table_header **new_table)
{
if (!existing_table || !new_table)
@@ -788,7 +783,6 @@ static int __init acpi_parse_apic_instance(char *str)
return 0;
}
-
early_param("acpi_apic_instance", acpi_parse_apic_instance);
static int __init acpi_force_table_verification_setup(char *s)
@@ -797,7 +791,6 @@ static int __init acpi_force_table_verification_setup(char *s)
return 0;
}
-
early_param("acpi_force_table_verification", acpi_force_table_verification_setup);
static int __init acpi_force_32bit_fadt_addr(char *s)
@@ -807,5 +800,4 @@ static int __init acpi_force_32bit_fadt_addr(char *s)
return 0;
}
-
early_param("acpi_force_32bit_fadt_addr", acpi_force_32bit_fadt_addr);
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 78db97687f26..89363b245489 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -739,6 +739,7 @@ EXPORT_SYMBOL(acpi_dev_found);
struct acpi_dev_match_info {
const char *dev_name;
+ struct acpi_device *adev;
struct acpi_device_id hid[2];
const char *uid;
s64 hrv;
@@ -759,6 +760,7 @@ static int acpi_dev_match_cb(struct device *dev, void *data)
return 0;
match->dev_name = acpi_dev_name(adev);
+ match->adev = adev;
if (match->hrv == -1)
return 1;
@@ -800,23 +802,26 @@ bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
match.hrv = hrv;
dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb);
+ put_device(dev);
return !!dev;
}
EXPORT_SYMBOL(acpi_dev_present);
/**
- * acpi_dev_get_first_match_name - Return name of first match of ACPI device
+ * acpi_dev_get_first_match_dev - Return the first match of ACPI device
* @hid: Hardware ID of the device.
* @uid: Unique ID of the device, pass NULL to not check _UID
* @hrv: Hardware Revision of the device, pass -1 to not check _HRV
*
- * Return device name if a matching device was present
+ * Return the first match of ACPI device if a matching device was present
* at the moment of invocation, or NULL otherwise.
*
+ * The caller is responsible to call put_device() on the returned device.
+ *
* See additional information in acpi_dev_present() as well.
*/
-const char *
-acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv)
+struct acpi_device *
+acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv)
{
struct acpi_dev_match_info match = {};
struct device *dev;
@@ -826,9 +831,9 @@ acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv)
match.hrv = hrv;
dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb);
- return dev ? match.dev_name : NULL;
+ return dev ? match.adev : NULL;
}
-EXPORT_SYMBOL(acpi_dev_get_first_match_name);
+EXPORT_SYMBOL(acpi_dev_get_first_match_dev);
/*
* acpi_backlight= handling, this is done here rather then in video_detect.c
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 43587ac680e4..31014c7d3793 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -112,7 +112,7 @@ static int video_detect_force_none(const struct dmi_system_id *d)
static const struct dmi_system_id video_detect_dmi_table[] = {
/* On Samsung X360, the BIOS will set a flag (VDRV) if generic
* ACPI backlight device is used. This flag will definitively break
- * the backlight interface (even the vendor interface) untill next
+ * the backlight interface (even the vendor interface) until next
* reboot. It's why we should prevent video.ko from being used here
* and we can't rely on a later call to acpi_video_unregister().
*/
@@ -141,6 +141,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
},
},
+ {
+ .callback = video_detect_force_vendor,
+ .ident = "Sony VPCEH3U1E",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VPCEH3U1E"),
+ },
+ },
/*
* These models have a working acpi_video backlight control, and using
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 41b706403ef7..b4dae624b9af 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -26,19 +26,36 @@
#define to_amba_driver(d) container_of(d, struct amba_driver, drv)
-static const struct amba_id *
-amba_lookup(const struct amba_id *table, struct amba_device *dev)
+/* called on periphid match and class 0x9 coresight device. */
+static int
+amba_cs_uci_id_match(const struct amba_id *table, struct amba_device *dev)
{
int ret = 0;
+ struct amba_cs_uci_id *uci;
+
+ uci = table->data;
+ /* no table data or zero mask - return match on periphid */
+ if (!uci || (uci->devarch_mask == 0))
+ return 1;
+
+ /* test against read devtype and masked devarch value */
+ ret = (dev->uci.devtype == uci->devtype) &&
+ ((dev->uci.devarch & uci->devarch_mask) == uci->devarch);
+ return ret;
+}
+
+static const struct amba_id *
+amba_lookup(const struct amba_id *table, struct amba_device *dev)
+{
while (table->mask) {
- ret = (dev->periphid & table->mask) == table->id;
- if (ret)
- break;
+ if (((dev->periphid & table->mask) == table->id) &&
+ ((dev->cid != CORESIGHT_CID) ||
+ (amba_cs_uci_id_match(table, dev))))
+ return table;
table++;
}
-
- return ret ? table : NULL;
+ return NULL;
}
static int amba_match(struct device *dev, struct device_driver *drv)
@@ -399,10 +416,22 @@ static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
cid |= (readl(tmp + size - 0x10 + 4 * i) & 255) <<
(i * 8);
+ if (cid == CORESIGHT_CID) {
+ /* set the base to the start of the last 4k block */
+ void __iomem *csbase = tmp + size - 4096;
+
+ dev->uci.devarch =
+ readl(csbase + UCI_REG_DEVARCH_OFFSET);
+ dev->uci.devtype =
+ readl(csbase + UCI_REG_DEVTYPE_OFFSET) & 0xff;
+ }
+
amba_put_disable_pclk(dev);
- if (cid == AMBA_CID || cid == CORESIGHT_CID)
+ if (cid == AMBA_CID || cid == CORESIGHT_CID) {
dev->periphid = pid;
+ dev->cid = cid;
+ }
if (!dev->periphid)
ret = -ENODEV;
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 8685882da64c..4b9c7ca492e6 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2057,7 +2057,8 @@ static size_t binder_get_object(struct binder_proc *proc,
size_t object_size = 0;
read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
- if (read_size < sizeof(*hdr) || !IS_ALIGNED(offset, sizeof(u32)))
+ if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
+ !IS_ALIGNED(offset, sizeof(u32)))
return 0;
binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
offset, read_size);
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 6389467670a0..195f120c4e8c 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -927,14 +927,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
index = page - alloc->pages;
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
+
+ mm = alloc->vma_vm_mm;
+ if (!mmget_not_zero(mm))
+ goto err_mmget;
+ if (!down_write_trylock(&mm->mmap_sem))
+ goto err_down_write_mmap_sem_failed;
vma = binder_alloc_get_vma(alloc);
- if (vma) {
- if (!mmget_not_zero(alloc->vma_vm_mm))
- goto err_mmget;
- mm = alloc->vma_vm_mm;
- if (!down_read_trylock(&mm->mmap_sem))
- goto err_down_write_mmap_sem_failed;
- }
list_lru_isolate(lru, item);
spin_unlock(lock);
@@ -945,10 +944,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
zap_page_range(vma, page_addr, PAGE_SIZE);
trace_binder_unmap_user_end(alloc, index);
-
- up_read(&mm->mmap_sem);
- mmput(mm);
}
+ up_write(&mm->mmap_sem);
+ mmput(mm);
trace_binder_unmap_kernel_start(alloc, index);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 21d1ce20e1a9..c10ee2391031 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -778,7 +778,7 @@ static int ata_ioc32(struct ata_port *ap)
}
int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
- int cmd, void __user *arg)
+ unsigned int cmd, void __user *arg)
{
unsigned long val;
int rc = -EINVAL;
@@ -829,7 +829,8 @@ int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
}
EXPORT_SYMBOL_GPL(ata_sas_scsi_ioctl);
-int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
+int ata_scsi_ioctl(struct scsi_device *scsidev, unsigned int cmd,
+ void __user *arg)
{
return ata_sas_scsi_ioctl(ata_shost_to_port(scsidev->host),
scsidev, cmd, arg);
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index b3ed8f9953a8..173e6f2dd9af 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -52,38 +52,52 @@ static int eject_tray(struct ata_device *dev)
/* Per the spec, only slot type and drawer type ODD can be supported */
static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
{
- char buf[16];
+ char *buf;
unsigned int ret;
- struct rm_feature_desc *desc = (void *)(buf + 8);
+ struct rm_feature_desc *desc;
struct ata_taskfile tf;
static const char cdb[] = { GPCMD_GET_CONFIGURATION,
2, /* only 1 feature descriptor requested */
0, 3, /* 3, removable medium feature */
0, 0, 0,/* reserved */
- 0, sizeof(buf),
+ 0, 16,
0, 0, 0,
};
+ buf = kzalloc(16, GFP_KERNEL);
+ if (!buf)
+ return ODD_MECH_TYPE_UNSUPPORTED;
+ desc = (void *)(buf + 8);
+
ata_tf_init(dev, &tf);
tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf.command = ATA_CMD_PACKET;
tf.protocol = ATAPI_PROT_PIO;
- tf.lbam = sizeof(buf);
+ tf.lbam = 16;
ret = ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
- buf, sizeof(buf), 0);
- if (ret)
+ buf, 16, 0);
+ if (ret) {
+ kfree(buf);
return ODD_MECH_TYPE_UNSUPPORTED;
+ }
- if (be16_to_cpu(desc->feature_code) != 3)
+ if (be16_to_cpu(desc->feature_code) != 3) {
+ kfree(buf);
return ODD_MECH_TYPE_UNSUPPORTED;
+ }
- if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1)
+ if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1) {
+ kfree(buf);
return ODD_MECH_TYPE_SLOT;
- else if (desc->mech_type == 1 && desc->load == 0 && desc->eject == 1)
+ } else if (desc->mech_type == 1 && desc->load == 0 &&
+ desc->eject == 1) {
+ kfree(buf);
return ODD_MECH_TYPE_DRAWER;
- else
+ } else {
+ kfree(buf);
return ODD_MECH_TYPE_UNSUPPORTED;
+ }
}
/* Test if ODD is zero power ready by sense code */
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 11e1663bdc4d..b2c06da4f62e 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -1646,7 +1646,7 @@ static irqreturn_t fs_irq (int irq, void *dev_id)
}
if (status & ISR_TBRQ_W) {
- fs_dprintk (FS_DEBUG_IRQ, "Data tramsitted!\n");
+ fs_dprintk (FS_DEBUG_IRQ, "Data transmitted!\n");
process_txdone_queue (dev, &dev->tx_relq);
}
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index 57410f9c5d44..c52c738e554a 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -164,9 +164,7 @@ config ARM_CHARLCD
line and the Linux version on the second line, but that's
still useful.
-endif # AUXDISPLAY
-
-menuconfig PANEL
+menuconfig PARPORT_PANEL
tristate "Parallel port LCD/Keypad Panel support"
depends on PARPORT
select CHARLCD
@@ -178,7 +176,7 @@ menuconfig PANEL
compiled as a module, or linked into the kernel and started at boot.
If you don't understand what all this is about, say N.
-if PANEL
+if PARPORT_PANEL
config PANEL_PARPORT
int "Default parallel port number (0=LPT1)"
@@ -419,8 +417,11 @@ config PANEL_LCD_PIN_BL
Default for the 'BL' pin in custom profile is '0' (uncontrolled).
+endif # PARPORT_PANEL
+
config PANEL_CHANGE_MESSAGE
bool "Change LCD initialization message ?"
+ depends on CHARLCD
default "n"
---help---
This allows you to replace the boot message indicating the kernel version
@@ -444,7 +445,34 @@ config PANEL_BOOT_MESSAGE
An empty message will only clear the display at driver init time. Any other
printf()-formatted message is valid with newline and escape codes.
-endif # PANEL
+choice
+ prompt "Backlight initial state"
+ default CHARLCD_BL_FLASH
+
+ config CHARLCD_BL_OFF
+ bool "Off"
+ help
+ Backlight is initially turned off
+
+ config CHARLCD_BL_ON
+ bool "On"
+ help
+ Backlight is initially turned on
+
+ config CHARLCD_BL_FLASH
+ bool "Flash"
+ help
+ Backlight is flashed briefly on init
+
+endchoice
+
+endif # AUXDISPLAY
+
+config PANEL
+ tristate "Parallel port LCD/Keypad Panel support (OLD OPTION)"
+ depends on PARPORT
+ select AUXDISPLAY
+ select PARPORT_PANEL
config CHARLCD
tristate "Character LCD core support" if COMPILE_TEST
diff --git a/drivers/auxdisplay/Makefile b/drivers/auxdisplay/Makefile
index 7ac6776ca3f6..cf54b5efb07e 100644
--- a/drivers/auxdisplay/Makefile
+++ b/drivers/auxdisplay/Makefile
@@ -10,4 +10,4 @@ obj-$(CONFIG_CFAG12864B) += cfag12864b.o cfag12864bfb.o
obj-$(CONFIG_IMG_ASCII_LCD) += img-ascii-lcd.o
obj-$(CONFIG_HD44780) += hd44780.o
obj-$(CONFIG_HT16K33) += ht16k33.o
-obj-$(CONFIG_PANEL) += panel.o
+obj-$(CONFIG_PARPORT_PANEL) += panel.o
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index 60e0b772673f..92745efefb54 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -91,7 +91,7 @@ struct charlcd_priv {
unsigned long long drvdata[0];
};
-#define to_priv(p) container_of(p, struct charlcd_priv, lcd)
+#define charlcd_to_priv(p) container_of(p, struct charlcd_priv, lcd)
/* Device single-open policy control */
static atomic_t charlcd_available = ATOMIC_INIT(1);
@@ -105,7 +105,7 @@ static void long_sleep(int ms)
/* turn the backlight on or off */
static void charlcd_backlight(struct charlcd *lcd, int on)
{
- struct charlcd_priv *priv = to_priv(lcd);
+ struct charlcd_priv *priv = charlcd_to_priv(lcd);
if (!lcd->ops->backlight)
return;
@@ -134,7 +134,7 @@ static void charlcd_bl_off(struct work_struct *work)
/* turn the backlight on for a little while */
void charlcd_poke(struct charlcd *lcd)
{
- struct charlcd_priv *priv = to_priv(lcd);
+ struct charlcd_priv *priv = charlcd_to_priv(lcd);
if (!lcd->ops->backlight)
return;
@@ -152,7 +152,7 @@ EXPORT_SYMBOL_GPL(charlcd_poke);
static void charlcd_gotoxy(struct charlcd *lcd)
{
- struct charlcd_priv *priv = to_priv(lcd);
+ struct charlcd_priv *priv = charlcd_to_priv(lcd);
unsigned int addr;
/*
@@ -170,7 +170,7 @@ static void charlcd_gotoxy(struct charlcd *lcd)
static void charlcd_home(struct charlcd *lcd)
{
- struct charlcd_priv *priv = to_priv(lcd);
+ struct charlcd_priv *priv = charlcd_to_priv(lcd);
priv->addr.x = 0;
priv->addr.y = 0;
@@ -179,7 +179,7 @@ static void charlcd_home(struct charlcd *lcd)
static void charlcd_print(struct charlcd *lcd, char c)
{
- struct charlcd_priv *priv = to_priv(lcd);
+ struct charlcd_priv *priv = charlcd_to_priv(lcd);
if (priv->addr.x < lcd->bwidth) {
if (lcd->char_conv)
@@ -211,7 +211,7 @@ static void charlcd_clear_fast(struct charlcd *lcd)
/* clears the display and resets X/Y */
static void charlcd_clear_display(struct charlcd *lcd)
{
- struct charlcd_priv *priv = to_priv(lcd);
+ struct charlcd_priv *priv = charlcd_to_priv(lcd);
lcd->ops->write_cmd(lcd, LCD_CMD_DISPLAY_CLEAR);
priv->addr.x = 0;
@@ -223,7 +223,7 @@ static void charlcd_clear_display(struct charlcd *lcd)
static int charlcd_init_display(struct charlcd *lcd)
{
void (*write_cmd_raw)(struct charlcd *lcd, int cmd);
- struct charlcd_priv *priv = to_priv(lcd);
+ struct charlcd_priv *priv = charlcd_to_priv(lcd);
u8 init;
if (lcd->ifwidth != 4 && lcd->ifwidth != 8)
@@ -369,7 +369,7 @@ static bool parse_xy(const char *s, unsigned long *x, unsigned long *y)
static inline int handle_lcd_special_code(struct charlcd *lcd)
{
- struct charlcd_priv *priv = to_priv(lcd);
+ struct charlcd_priv *priv = charlcd_to_priv(lcd);
/* LCD special codes */
@@ -580,7 +580,7 @@ static inline int handle_lcd_special_code(struct charlcd *lcd)
static void charlcd_write_char(struct charlcd *lcd, char c)
{
- struct charlcd_priv *priv = to_priv(lcd);
+ struct charlcd_priv *priv = charlcd_to_priv(lcd);
/* first, we'll test if we're in escape mode */
if ((c != '\n') && priv->esc_seq.len >= 0) {
@@ -705,7 +705,7 @@ static ssize_t charlcd_write(struct file *file, const char __user *buf,
static int charlcd_open(struct inode *inode, struct file *file)
{
- struct charlcd_priv *priv = to_priv(the_charlcd);
+ struct charlcd_priv *priv = charlcd_to_priv(the_charlcd);
int ret;
ret = -EBUSY;
@@ -763,10 +763,24 @@ static void charlcd_puts(struct charlcd *lcd, const char *s)
}
}
+#ifdef CONFIG_PANEL_BOOT_MESSAGE
+#define LCD_INIT_TEXT CONFIG_PANEL_BOOT_MESSAGE
+#else
+#define LCD_INIT_TEXT "Linux-" UTS_RELEASE "\n"
+#endif
+
+#ifdef CONFIG_CHARLCD_BL_ON
+#define LCD_INIT_BL "\x1b[L+"
+#elif defined(CONFIG_CHARLCD_BL_FLASH)
+#define LCD_INIT_BL "\x1b[L*"
+#else
+#define LCD_INIT_BL "\x1b[L-"
+#endif
+
/* initialize the LCD driver */
static int charlcd_init(struct charlcd *lcd)
{
- struct charlcd_priv *priv = to_priv(lcd);
+ struct charlcd_priv *priv = charlcd_to_priv(lcd);
int ret;
if (lcd->ops->backlight) {
@@ -784,13 +798,8 @@ static int charlcd_init(struct charlcd *lcd)
return ret;
/* display a short message */
-#ifdef CONFIG_PANEL_CHANGE_MESSAGE
-#ifdef CONFIG_PANEL_BOOT_MESSAGE
- charlcd_puts(lcd, "\x1b[Lc\x1b[Lb\x1b[L*" CONFIG_PANEL_BOOT_MESSAGE);
-#endif
-#else
- charlcd_puts(lcd, "\x1b[Lc\x1b[Lb\x1b[L*Linux-" UTS_RELEASE "\n");
-#endif
+ charlcd_puts(lcd, "\x1b[Lc\x1b[Lb" LCD_INIT_BL LCD_INIT_TEXT);
+
/* clear the display on the next device opening */
priv->must_clear = true;
charlcd_home(lcd);
@@ -818,6 +827,12 @@ struct charlcd *charlcd_alloc(unsigned int drvdata_size)
}
EXPORT_SYMBOL_GPL(charlcd_alloc);
+void charlcd_free(struct charlcd *lcd)
+{
+ kfree(charlcd_to_priv(lcd));
+}
+EXPORT_SYMBOL_GPL(charlcd_free);
+
static int panel_notify_sys(struct notifier_block *this, unsigned long code,
void *unused)
{
@@ -866,7 +881,7 @@ EXPORT_SYMBOL_GPL(charlcd_register);
int charlcd_unregister(struct charlcd *lcd)
{
- struct charlcd_priv *priv = to_priv(lcd);
+ struct charlcd_priv *priv = charlcd_to_priv(lcd);
unregister_reboot_notifier(&panel_notifier);
charlcd_puts(lcd, "\x0cLCD driver unloaded.\x1b[Lc\x1b[Lb\x1b[L-");
diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
index 9ad93ea42fdc..ab15b64707ad 100644
--- a/drivers/auxdisplay/hd44780.c
+++ b/drivers/auxdisplay/hd44780.c
@@ -271,7 +271,7 @@ static int hd44780_probe(struct platform_device *pdev)
return 0;
fail:
- kfree(lcd);
+ charlcd_free(lcd);
return ret;
}
@@ -280,6 +280,8 @@ static int hd44780_remove(struct platform_device *pdev)
struct charlcd *lcd = platform_get_drvdata(pdev);
charlcd_unregister(lcd);
+
+ charlcd_free(lcd);
return 0;
}
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index 21b9b2f2470a..e06de63497cf 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -1620,7 +1620,7 @@ err_lcd_unreg:
if (lcd.enabled)
charlcd_unregister(lcd.charlcd);
err_unreg_device:
- kfree(lcd.charlcd);
+ charlcd_free(lcd.charlcd);
lcd.charlcd = NULL;
parport_unregister_device(pprt);
pprt = NULL;
@@ -1647,7 +1647,7 @@ static void panel_detach(struct parport *port)
if (lcd.enabled) {
charlcd_unregister(lcd.charlcd);
lcd.initialized = false;
- kfree(lcd.charlcd);
+ charlcd_free(lcd.charlcd);
lcd.charlcd = NULL;
}
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 3e63a900b330..059700ea3521 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -191,83 +191,6 @@ config DMA_FENCE_TRACE
lockup related problems for dma-buffers shared across multiple
devices.
-config DMA_CMA
- bool "DMA Contiguous Memory Allocator"
- depends on HAVE_DMA_CONTIGUOUS && CMA
- help
- This enables the Contiguous Memory Allocator which allows drivers
- to allocate big physically-contiguous blocks of memory for use with
- hardware components that do not support I/O map nor scatter-gather.
-
- You can disable CMA by specifying "cma=0" on the kernel's command
- line.
-
- For more information see <include/linux/dma-contiguous.h>.
- If unsure, say "n".
-
-if DMA_CMA
-comment "Default contiguous memory area size:"
-
-config CMA_SIZE_MBYTES
- int "Size in Mega Bytes"
- depends on !CMA_SIZE_SEL_PERCENTAGE
- default 0 if X86
- default 16
- help
- Defines the size (in MiB) of the default memory area for Contiguous
- Memory Allocator. If the size of 0 is selected, CMA is disabled by
- default, but it can be enabled by passing cma=size[MG] to the kernel.
-
-
-config CMA_SIZE_PERCENTAGE
- int "Percentage of total memory"
- depends on !CMA_SIZE_SEL_MBYTES
- default 0 if X86
- default 10
- help
- Defines the size of the default memory area for Contiguous Memory
- Allocator as a percentage of the total memory in the system.
- If 0 percent is selected, CMA is disabled by default, but it can be
- enabled by passing cma=size[MG] to the kernel.
-
-choice
- prompt "Selected region size"
- default CMA_SIZE_SEL_MBYTES
-
-config CMA_SIZE_SEL_MBYTES
- bool "Use mega bytes value only"
-
-config CMA_SIZE_SEL_PERCENTAGE
- bool "Use percentage value only"
-
-config CMA_SIZE_SEL_MIN
- bool "Use lower value (minimum)"
-
-config CMA_SIZE_SEL_MAX
- bool "Use higher value (maximum)"
-
-endchoice
-
-config CMA_ALIGNMENT
- int "Maximum PAGE_SIZE order of alignment for contiguous buffers"
- range 4 12
- default 8
- help
- DMA mapping framework by default aligns all buffers to the smallest
- PAGE_SIZE order which is greater than or equal to the requested buffer
- size. This works well for buffers up to a few hundreds kilobytes, but
- for larger buffers it just a memory waste. With this parameter you can
- specify the maximum PAGE_SIZE order for contiguous buffers. Larger
- buffers will be aligned only to this specified order. The order is
- expressed as a power of two multiplied by the PAGE_SIZE.
-
- For example, if your system defaults to 4KiB pages, the order value
- of 8 means that the buffers will be aligned up to 1MiB only.
-
- If unsure, leave the default value "8".
-
-endif
-
config GENERIC_ARCH_TOPOLOGY
bool
help
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 048cbf7d5233..e49028a60429 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -88,6 +88,7 @@ unsigned long __weak memory_block_size_bytes(void)
{
return MIN_MEMORY_BLOCK_SIZE;
}
+EXPORT_SYMBOL_GPL(memory_block_size_bytes);
static unsigned long get_memory_block_size(void)
{
@@ -505,7 +506,7 @@ static ssize_t probe_store(struct device *dev, struct device_attribute *attr,
ret = lock_device_hotplug_sysfs();
if (ret)
- goto out;
+ return ret;
nid = memory_add_physaddr_to_nid(phys_addr);
ret = __add_memory(nid, phys_addr,
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 2c334c01fc43..3d899e8abd58 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -6,6 +6,8 @@
* This file is released under the GPLv2.
*/
+#define pr_fmt(fmt) "PM: " fmt
+
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/io.h>
@@ -20,6 +22,7 @@
#include <linux/sched.h>
#include <linux/suspend.h>
#include <linux/export.h>
+#include <linux/cpu.h>
#include "power.h"
@@ -126,6 +129,7 @@ static const struct genpd_lock_ops genpd_spin_ops = {
#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
+#define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN)
static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
const struct generic_pm_domain *genpd)
@@ -389,11 +393,9 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
if (unlikely(!genpd->set_performance_state))
return -EINVAL;
- if (unlikely(!dev->power.subsys_data ||
- !dev->power.subsys_data->domain_data)) {
- WARN_ON(1);
+ if (WARN_ON(!dev->power.subsys_data ||
+ !dev->power.subsys_data->domain_data))
return -EINVAL;
- }
genpd_lock(genpd);
@@ -457,19 +459,19 @@ static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
time_start = ktime_get();
ret = genpd->power_off(genpd);
- if (ret == -EBUSY)
+ if (ret)
return ret;
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
- return ret;
+ return 0;
genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
genpd->max_off_time_changed = true;
pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
genpd->name, "off", elapsed_ns);
- return ret;
+ return 0;
}
/**
@@ -1394,8 +1396,7 @@ EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
#endif /* CONFIG_PM_SLEEP */
-static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
- struct gpd_timing_data *td)
+static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev)
{
struct generic_pm_domain_data *gpd_data;
int ret;
@@ -1410,9 +1411,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
goto err_put;
}
- if (td)
- gpd_data->td = *td;
-
gpd_data->base.dev = dev;
gpd_data->td.constraint_changed = true;
gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
@@ -1452,8 +1450,57 @@ static void genpd_free_dev_data(struct device *dev,
dev_pm_put_subsys_data(dev);
}
+static void genpd_update_cpumask(struct generic_pm_domain *genpd,
+ int cpu, bool set, unsigned int depth)
+{
+ struct gpd_link *link;
+
+ if (!genpd_is_cpu_domain(genpd))
+ return;
+
+ list_for_each_entry(link, &genpd->slave_links, slave_node) {
+ struct generic_pm_domain *master = link->master;
+
+ genpd_lock_nested(master, depth + 1);
+ genpd_update_cpumask(master, cpu, set, depth + 1);
+ genpd_unlock(master);
+ }
+
+ if (set)
+ cpumask_set_cpu(cpu, genpd->cpus);
+ else
+ cpumask_clear_cpu(cpu, genpd->cpus);
+}
+
+static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
+{
+ if (cpu >= 0)
+ genpd_update_cpumask(genpd, cpu, true, 0);
+}
+
+static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
+{
+ if (cpu >= 0)
+ genpd_update_cpumask(genpd, cpu, false, 0);
+}
+
+static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
+{
+ int cpu;
+
+ if (!genpd_is_cpu_domain(genpd))
+ return -1;
+
+ for_each_possible_cpu(cpu) {
+ if (get_cpu_device(cpu) == dev)
+ return cpu;
+ }
+
+ return -1;
+}
+
static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
- struct gpd_timing_data *td)
+ struct device *base_dev)
{
struct generic_pm_domain_data *gpd_data;
int ret;
@@ -1463,16 +1510,19 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
return -EINVAL;
- gpd_data = genpd_alloc_dev_data(dev, td);
+ gpd_data = genpd_alloc_dev_data(dev);
if (IS_ERR(gpd_data))
return PTR_ERR(gpd_data);
- genpd_lock(genpd);
+ gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
if (ret)
goto out;
+ genpd_lock(genpd);
+
+ genpd_set_cpumask(genpd, gpd_data->cpu);
dev_pm_domain_set(dev, &genpd->domain);
genpd->device_count++;
@@ -1480,9 +1530,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
- out:
genpd_unlock(genpd);
-
+ out:
if (ret)
genpd_free_dev_data(dev, gpd_data);
else
@@ -1501,7 +1550,7 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
int ret;
mutex_lock(&gpd_list_lock);
- ret = genpd_add_device(genpd, dev, NULL);
+ ret = genpd_add_device(genpd, dev, dev);
mutex_unlock(&gpd_list_lock);
return ret;
@@ -1531,15 +1580,16 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
genpd->device_count--;
genpd->max_off_time_changed = true;
- if (genpd->detach_dev)
- genpd->detach_dev(genpd, dev);
-
+ genpd_clear_cpumask(genpd, gpd_data->cpu);
dev_pm_domain_set(dev, NULL);
list_del_init(&pdd->list_node);
genpd_unlock(genpd);
+ if (genpd->detach_dev)
+ genpd->detach_dev(genpd, dev);
+
genpd_free_dev_data(dev, gpd_data);
return 0;
@@ -1657,8 +1707,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
- pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
- subdomain->name);
+ pr_warn("%s: unable to remove subdomain %s\n",
+ genpd->name, subdomain->name);
ret = -EBUSY;
goto out;
}
@@ -1685,6 +1735,12 @@ out:
}
EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
+static void genpd_free_default_power_state(struct genpd_power_state *states,
+ unsigned int state_count)
+{
+ kfree(states);
+}
+
static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
{
struct genpd_power_state *state;
@@ -1695,7 +1751,7 @@ static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
genpd->states = state;
genpd->state_count = 1;
- genpd->free = state;
+ genpd->free_states = genpd_free_default_power_state;
return 0;
}
@@ -1761,13 +1817,20 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
if (genpd_is_always_on(genpd) && !genpd_status_on(genpd))
return -EINVAL;
+ if (genpd_is_cpu_domain(genpd) &&
+ !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
+ return -ENOMEM;
+
/* Use only one "off" state if there were no states declared */
if (genpd->state_count == 0) {
ret = genpd_set_default_power_state(genpd);
- if (ret)
+ if (ret) {
+ if (genpd_is_cpu_domain(genpd))
+ free_cpumask_var(genpd->cpus);
return ret;
- } else if (!gov) {
- pr_warn("%s : no governor for states\n", genpd->name);
+ }
+ } else if (!gov && genpd->state_count > 1) {
+ pr_warn("%s: no governor for states\n", genpd->name);
}
device_initialize(&genpd->dev);
@@ -1811,7 +1874,11 @@ static int genpd_remove(struct generic_pm_domain *genpd)
list_del(&genpd->gpd_list_node);
genpd_unlock(genpd);
cancel_work_sync(&genpd->power_off_work);
- kfree(genpd->free);
+ if (genpd_is_cpu_domain(genpd))
+ free_cpumask_var(genpd->cpus);
+ if (genpd->free_states)
+ genpd->free_states(genpd->states, genpd->state_count);
+
pr_debug("%s: removed %s\n", __func__, genpd->name);
return 0;
@@ -2189,7 +2256,7 @@ int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
goto out;
}
- ret = genpd_add_device(genpd, dev, NULL);
+ ret = genpd_add_device(genpd, dev, dev);
out:
mutex_unlock(&gpd_list_lock);
@@ -2273,6 +2340,7 @@ EXPORT_SYMBOL_GPL(of_genpd_remove_last);
static void genpd_release_dev(struct device *dev)
{
+ of_node_put(dev->of_node);
kfree(dev);
}
@@ -2334,14 +2402,14 @@ static void genpd_dev_pm_sync(struct device *dev)
genpd_queue_power_off_work(pd);
}
-static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
+static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
unsigned int index, bool power_on)
{
struct of_phandle_args pd_args;
struct generic_pm_domain *pd;
int ret;
- ret = of_parse_phandle_with_args(np, "power-domains",
+ ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
"#power-domain-cells", index, &pd_args);
if (ret < 0)
return ret;
@@ -2353,12 +2421,12 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
mutex_unlock(&gpd_list_lock);
dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
__func__, PTR_ERR(pd));
- return driver_deferred_probe_check_state(dev);
+ return driver_deferred_probe_check_state(base_dev);
}
dev_dbg(dev, "adding to PM domain %s\n", pd->name);
- ret = genpd_add_device(pd, dev, NULL);
+ ret = genpd_add_device(pd, dev, base_dev);
mutex_unlock(&gpd_list_lock);
if (ret < 0) {
@@ -2409,7 +2477,7 @@ int genpd_dev_pm_attach(struct device *dev)
"#power-domain-cells") != 1)
return 0;
- return __genpd_dev_pm_attach(dev, dev->of_node, 0, true);
+ return __genpd_dev_pm_attach(dev, dev, 0, true);
}
EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
@@ -2439,10 +2507,10 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev,
if (!dev->of_node)
return NULL;
- /* Deal only with devices using multiple PM domains. */
+ /* Verify that the index is within a valid range. */
num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
"#power-domain-cells");
- if (num_domains < 2 || index >= num_domains)
+ if (index >= num_domains)
return NULL;
/* Allocate and register device on the genpd bus. */
@@ -2453,15 +2521,16 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev,
dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
virt_dev->bus = &genpd_bus_type;
virt_dev->release = genpd_release_dev;
+ virt_dev->of_node = of_node_get(dev->of_node);
ret = device_register(virt_dev);
if (ret) {
- kfree(virt_dev);
+ put_device(virt_dev);
return ERR_PTR(ret);
}
/* Try to attach the device to the PM domain at the specified index. */
- ret = __genpd_dev_pm_attach(virt_dev, dev->of_node, index, false);
+ ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
if (ret < 1) {
device_unregister(virt_dev);
return ret ? ERR_PTR(ret) : NULL;
@@ -2514,7 +2583,7 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state,
&entry_latency);
if (err) {
pr_debug(" * %pOF missing entry-latency-us property\n",
- state_node);
+ state_node);
return -EINVAL;
}
@@ -2522,7 +2591,7 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state,
&exit_latency);
if (err) {
pr_debug(" * %pOF missing exit-latency-us property\n",
- state_node);
+ state_node);
return -EINVAL;
}
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 99896fbf18e4..7912bc957244 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -10,6 +10,9 @@
#include <linux/pm_domain.h>
#include <linux/pm_qos.h>
#include <linux/hrtimer.h>
+#include <linux/cpuidle.h>
+#include <linux/cpumask.h>
+#include <linux/ktime.h>
static int dev_update_qos_constraint(struct device *dev, void *data)
{
@@ -128,7 +131,6 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
off_on_time_ns = genpd->states[state].power_off_latency_ns +
genpd->states[state].power_on_latency_ns;
-
min_off_time_ns = -1;
/*
* Check if subdomains can be off for enough time.
@@ -211,8 +213,10 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
struct generic_pm_domain *genpd = pd_to_genpd(pd);
struct gpd_link *link;
- if (!genpd->max_off_time_changed)
+ if (!genpd->max_off_time_changed) {
+ genpd->state_idx = genpd->cached_power_down_state_idx;
return genpd->cached_power_down_ok;
+ }
/*
* We have to invalidate the cached results for the masters, so
@@ -237,6 +241,7 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
genpd->state_idx--;
}
+ genpd->cached_power_down_state_idx = genpd->state_idx;
return genpd->cached_power_down_ok;
}
@@ -245,6 +250,65 @@ static bool always_on_power_down_ok(struct dev_pm_domain *domain)
return false;
}
+#ifdef CONFIG_CPU_IDLE
+static bool cpu_power_down_ok(struct dev_pm_domain *pd)
+{
+ struct generic_pm_domain *genpd = pd_to_genpd(pd);
+ struct cpuidle_device *dev;
+ ktime_t domain_wakeup, next_hrtimer;
+ s64 idle_duration_ns;
+ int cpu, i;
+
+ /* Validate dev PM QoS constraints. */
+ if (!default_power_down_ok(pd))
+ return false;
+
+ if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN))
+ return true;
+
+ /*
+ * Find the next wakeup for any of the online CPUs within the PM domain
+ * and its subdomains. Note, we only need the genpd->cpus, as it already
+ * contains a mask of all CPUs from subdomains.
+ */
+ domain_wakeup = ktime_set(KTIME_SEC_MAX, 0);
+ for_each_cpu_and(cpu, genpd->cpus, cpu_online_mask) {
+ dev = per_cpu(cpuidle_devices, cpu);
+ if (dev) {
+ next_hrtimer = READ_ONCE(dev->next_hrtimer);
+ if (ktime_before(next_hrtimer, domain_wakeup))
+ domain_wakeup = next_hrtimer;
+ }
+ }
+
+ /* The minimum idle duration is from now - until the next wakeup. */
+ idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, ktime_get()));
+ if (idle_duration_ns <= 0)
+ return false;
+
+ /*
+ * Find the deepest idle state that has its residency value satisfied
+ * and by also taking into account the power off latency for the state.
+ * Start at the state picked by the dev PM QoS constraint validation.
+ */
+ i = genpd->state_idx;
+ do {
+ if (idle_duration_ns >= (genpd->states[i].residency_ns +
+ genpd->states[i].power_off_latency_ns)) {
+ genpd->state_idx = i;
+ return true;
+ }
+ } while (--i >= 0);
+
+ return false;
+}
+
+struct dev_power_governor pm_domain_cpu_gov = {
+ .suspend_ok = default_suspend_ok,
+ .power_down_ok = cpu_power_down_ok,
+};
+#endif
+
struct dev_power_governor simple_qos_governor = {
.suspend_ok = default_suspend_ok,
.power_down_ok = default_power_down_ok,
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index c88f56b9ae5b..10528a7747bf 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -17,6 +17,8 @@
* subsystem list maintains.
*/
+#define pr_fmt(fmt) "PM: " fmt
+
#include <linux/device.h>
#include <linux/export.h>
#include <linux/mutex.h>
@@ -128,7 +130,7 @@ void device_pm_add(struct device *dev)
if (device_pm_not_required(dev))
return;
- pr_debug("PM: Adding info for %s:%s\n",
+ pr_debug("Adding info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
device_pm_check_callbacks(dev);
mutex_lock(&dpm_list_mtx);
@@ -149,7 +151,7 @@ void device_pm_remove(struct device *dev)
if (device_pm_not_required(dev))
return;
- pr_debug("PM: Removing info for %s:%s\n",
+ pr_debug("Removing info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
complete_all(&dev->power.completion);
mutex_lock(&dpm_list_mtx);
@@ -168,7 +170,7 @@ void device_pm_remove(struct device *dev)
*/
void device_pm_move_before(struct device *deva, struct device *devb)
{
- pr_debug("PM: Moving %s:%s before %s:%s\n",
+ pr_debug("Moving %s:%s before %s:%s\n",
deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
/* Delete deva from dpm_list and reinsert before devb. */
@@ -182,7 +184,7 @@ void device_pm_move_before(struct device *deva, struct device *devb)
*/
void device_pm_move_after(struct device *deva, struct device *devb)
{
- pr_debug("PM: Moving %s:%s after %s:%s\n",
+ pr_debug("Moving %s:%s after %s:%s\n",
deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
/* Delete deva from dpm_list and reinsert after devb. */
@@ -195,7 +197,7 @@ void device_pm_move_after(struct device *deva, struct device *devb)
*/
void device_pm_move_last(struct device *dev)
{
- pr_debug("PM: Moving %s:%s to end of list\n",
+ pr_debug("Moving %s:%s to end of list\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
list_move_tail(&dev->power.entry, &dpm_list);
}
@@ -418,8 +420,8 @@ static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
int error)
{
- printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
- dev_name(dev), pm_verb(state.event), info, error);
+ pr_err("Device %s failed to %s%s: error %d\n",
+ dev_name(dev), pm_verb(state.event), info, error);
}
static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
@@ -476,7 +478,7 @@ struct dpm_watchdog {
/**
* dpm_watchdog_handler - Driver suspend / resume watchdog handler.
- * @data: Watchdog object address.
+ * @t: The timer that PM watchdog depends on.
*
* Called when a driver has timed out suspending or resuming.
* There's not much we can do here to recover so panic() to
@@ -704,6 +706,19 @@ static bool is_async(struct device *dev)
&& !pm_trace_is_enabled();
}
+static bool dpm_async_fn(struct device *dev, async_func_t func)
+{
+ reinit_completion(&dev->power.completion);
+
+ if (is_async(dev)) {
+ get_device(dev);
+ async_schedule(func, dev);
+ return true;
+ }
+
+ return false;
+}
+
static void async_resume_noirq(void *data, async_cookie_t cookie)
{
struct device *dev = (struct device *)data;
@@ -730,13 +745,8 @@ void dpm_noirq_resume_devices(pm_message_t state)
* in case the starting of async threads is
* delayed by non-async resuming devices.
*/
- list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
- reinit_completion(&dev->power.completion);
- if (is_async(dev)) {
- get_device(dev);
- async_schedule_dev(async_resume_noirq, dev);
- }
- }
+ list_for_each_entry(dev, &dpm_noirq_list, power.entry)
+ dpm_async_fn(dev, async_resume_noirq);
while (!list_empty(&dpm_noirq_list)) {
dev = to_device(dpm_noirq_list.next);
@@ -887,13 +897,8 @@ void dpm_resume_early(pm_message_t state)
* in case the starting of async threads is
* delayed by non-async resuming devices.
*/
- list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
- reinit_completion(&dev->power.completion);
- if (is_async(dev)) {
- get_device(dev);
- async_schedule_dev(async_resume_early, dev);
- }
- }
+ list_for_each_entry(dev, &dpm_late_early_list, power.entry)
+ dpm_async_fn(dev, async_resume_early);
while (!list_empty(&dpm_late_early_list)) {
dev = to_device(dpm_late_early_list.next);
@@ -1051,13 +1056,8 @@ void dpm_resume(pm_message_t state)
pm_transition = state;
async_error = 0;
- list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
- reinit_completion(&dev->power.completion);
- if (is_async(dev)) {
- get_device(dev);
- async_schedule_dev(async_resume, dev);
- }
- }
+ list_for_each_entry(dev, &dpm_suspended_list, power.entry)
+ dpm_async_fn(dev, async_resume);
while (!list_empty(&dpm_suspended_list)) {
dev = to_device(dpm_suspended_list.next);
@@ -1371,13 +1371,9 @@ static void async_suspend_noirq(void *data, async_cookie_t cookie)
static int device_suspend_noirq(struct device *dev)
{
- reinit_completion(&dev->power.completion);
-
- if (is_async(dev)) {
- get_device(dev);
- async_schedule_dev(async_suspend_noirq, dev);
+ if (dpm_async_fn(dev, async_suspend_noirq))
return 0;
- }
+
return __device_suspend_noirq(dev, pm_transition, false);
}
@@ -1574,13 +1570,8 @@ static void async_suspend_late(void *data, async_cookie_t cookie)
static int device_suspend_late(struct device *dev)
{
- reinit_completion(&dev->power.completion);
-
- if (is_async(dev)) {
- get_device(dev);
- async_schedule_dev(async_suspend_late, dev);
+ if (dpm_async_fn(dev, async_suspend_late))
return 0;
- }
return __device_suspend_late(dev, pm_transition, false);
}
@@ -1745,6 +1736,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
if (dev->power.syscore)
goto Complete;
+ /* Avoid direct_complete to let wakeup_path propagate. */
+ if (device_may_wakeup(dev) || dev->power.wakeup_path)
+ dev->power.direct_complete = false;
+
if (dev->power.direct_complete) {
if (pm_runtime_status_suspended(dev)) {
pm_runtime_disable(dev);
@@ -1840,13 +1835,8 @@ static void async_suspend(void *data, async_cookie_t cookie)
static int device_suspend(struct device *dev)
{
- reinit_completion(&dev->power.completion);
-
- if (is_async(dev)) {
- get_device(dev);
- async_schedule_dev(async_suspend, dev);
+ if (dpm_async_fn(dev, async_suspend))
return 0;
- }
return __device_suspend(dev, pm_transition, false);
}
@@ -2022,8 +2012,7 @@ int dpm_prepare(pm_message_t state)
error = 0;
continue;
}
- printk(KERN_INFO "PM: Device %s not prepared "
- "for power transition: code %d\n",
+ pr_info("Device %s not prepared for power transition: code %d\n",
dev_name(dev), error);
put_device(dev);
break;
@@ -2062,14 +2051,14 @@ EXPORT_SYMBOL_GPL(dpm_suspend_start);
void __suspend_report_result(const char *function, void *fn, int ret)
{
if (ret)
- printk(KERN_ERR "%s(): %pS returns %d\n", function, fn, ret);
+ pr_err("%s(): %pS returns %d\n", function, fn, ret);
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
/**
* device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
- * @dev: Device to wait for.
* @subordinate: Device that needs to wait for @dev.
+ * @dev: Device to wait for.
*/
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
{
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index c511def48b48..ec33fbdb919b 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -21,6 +21,7 @@ static inline void pm_runtime_early_init(struct device *dev)
extern void pm_runtime_init(struct device *dev);
extern void pm_runtime_reinit(struct device *dev);
extern void pm_runtime_remove(struct device *dev);
+extern u64 pm_runtime_active_time(struct device *dev);
#define WAKE_IRQ_DEDICATED_ALLOCATED BIT(0)
#define WAKE_IRQ_DEDICATED_MANAGED BIT(1)
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 3382542b39b7..f80e402ef778 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -22,7 +22,7 @@
* per-device constraint data struct.
*
* Note about the per-device constraint data struct allocation:
- * . The per-device constraints data struct ptr is tored into the device
+ * . The per-device constraints data struct ptr is stored into the device
* dev_pm_info.
* . To minimize the data usage by the per-device constraints, the data struct
* is only allocated at the first call to dev_pm_qos_add_request.
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index a80dbf08a99c..977db40378b0 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -64,7 +64,7 @@ static int rpm_suspend(struct device *dev, int rpmflags);
* runtime_status field is updated, to account the time in the old state
* correctly.
*/
-void update_pm_runtime_accounting(struct device *dev)
+static void update_pm_runtime_accounting(struct device *dev)
{
u64 now, last, delta;
@@ -98,7 +98,7 @@ static void __update_runtime_status(struct device *dev, enum rpm_status status)
dev->power.runtime_status = status;
}
-u64 pm_runtime_suspended_time(struct device *dev)
+static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
{
u64 time;
unsigned long flags;
@@ -106,12 +106,22 @@ u64 pm_runtime_suspended_time(struct device *dev)
spin_lock_irqsave(&dev->power.lock, flags);
update_pm_runtime_accounting(dev);
- time = dev->power.suspended_time;
+ time = suspended ? dev->power.suspended_time : dev->power.active_time;
spin_unlock_irqrestore(&dev->power.lock, flags);
return time;
}
+
+u64 pm_runtime_active_time(struct device *dev)
+{
+ return rpm_get_accounted_time(dev, false);
+}
+
+u64 pm_runtime_suspended_time(struct device *dev)
+{
+ return rpm_get_accounted_time(dev, true);
+}
EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
/**
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index c6bf76124184..1226e441ddfe 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -125,13 +125,9 @@ static ssize_t runtime_active_time_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
- u64 tmp;
- spin_lock_irq(&dev->power.lock);
- update_pm_runtime_accounting(dev);
- tmp = dev->power.active_time;
+ u64 tmp = pm_runtime_active_time(dev);
do_div(tmp, NSEC_PER_MSEC);
ret = sprintf(buf, "%llu\n", tmp);
- spin_unlock_irq(&dev->power.lock);
return ret;
}
@@ -141,13 +137,9 @@ static ssize_t runtime_suspended_time_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
- u64 tmp;
- spin_lock_irq(&dev->power.lock);
- update_pm_runtime_accounting(dev);
- tmp = dev->power.suspended_time;
+ u64 tmp = pm_runtime_suspended_time(dev);
do_div(tmp, NSEC_PER_MSEC);
ret = sprintf(buf, "%llu\n", tmp);
- spin_unlock_irq(&dev->power.lock);
return ret;
}
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index b11f47a1e819..2bd9d2c744ca 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -7,6 +7,8 @@
* devices may be working.
*/
+#define pr_fmt(fmt) "PM: " fmt
+
#include <linux/pm-trace.h>
#include <linux/export.h>
#include <linux/rtc.h>
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index f1fee72ed970..23c243a4c675 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -6,6 +6,8 @@
* This file is released under the GPLv2.
*/
+#define pr_fmt(fmt) "PM: " fmt
+
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/sched/signal.h>
@@ -106,23 +108,6 @@ struct wakeup_source *wakeup_source_create(const char *name)
}
EXPORT_SYMBOL_GPL(wakeup_source_create);
-/**
- * wakeup_source_drop - Prepare a struct wakeup_source object for destruction.
- * @ws: Wakeup source to prepare for destruction.
- *
- * Callers must ensure that __pm_stay_awake() or __pm_wakeup_event() will never
- * be run in parallel with this function for the same wakeup source object.
- */
-void wakeup_source_drop(struct wakeup_source *ws)
-{
- if (!ws)
- return;
-
- del_timer_sync(&ws->timer);
- __pm_relax(ws);
-}
-EXPORT_SYMBOL_GPL(wakeup_source_drop);
-
/*
* Record wakeup_source statistics being deleted into a dummy wakeup_source.
*/
@@ -162,7 +147,7 @@ void wakeup_source_destroy(struct wakeup_source *ws)
if (!ws)
return;
- wakeup_source_drop(ws);
+ __pm_relax(ws);
wakeup_source_record(ws);
kfree_const(ws->name);
kfree(ws);
@@ -205,6 +190,13 @@ void wakeup_source_remove(struct wakeup_source *ws)
list_del_rcu(&ws->entry);
raw_spin_unlock_irqrestore(&events_lock, flags);
synchronize_srcu(&wakeup_srcu);
+
+ del_timer_sync(&ws->timer);
+ /*
+ * Clear timer.function to make wakeup_source_not_registered() treat
+ * this wakeup source as not registered.
+ */
+ ws->timer.function = NULL;
}
EXPORT_SYMBOL_GPL(wakeup_source_remove);
@@ -812,7 +804,7 @@ void pm_print_active_wakeup_sources(void)
srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
if (ws->active) {
- pr_debug("active wakeup source: %s\n", ws->name);
+ pm_pr_dbg("active wakeup source: %s\n", ws->name);
active = 1;
} else if (!active &&
(!last_activity_ws ||
@@ -823,7 +815,7 @@ void pm_print_active_wakeup_sources(void)
}
if (!active && last_activity_ws)
- pr_debug("last active wakeup source: %s\n",
+ pm_pr_dbg("last active wakeup source: %s\n",
last_activity_ws->name);
srcu_read_unlock(&wakeup_srcu, srcuidx);
}
@@ -853,7 +845,7 @@ bool pm_wakeup_pending(void)
raw_spin_unlock_irqrestore(&events_lock, flags);
if (ret) {
- pr_debug("PM: Wakeup pending, aborting suspend\n");
+ pm_pr_dbg("Wakeup pending, aborting suspend\n");
pm_print_active_wakeup_sources();
}
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 8b91ab380d14..348b37e64944 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -984,6 +984,81 @@ fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port_id,
EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_node);
/**
+ * fwnode_graph_get_endpoint_by_id - get endpoint by port and endpoint numbers
+ * @fwnode: parent fwnode_handle containing the graph
+ * @port: identifier of the port node
+ * @endpoint: identifier of the endpoint node under the port node
+ * @flags: fwnode lookup flags
+ *
+ * Return the fwnode handle of the local endpoint corresponding the port and
+ * endpoint IDs or NULL if not found.
+ *
+ * If FWNODE_GRAPH_ENDPOINT_NEXT is passed in @flags and the specified endpoint
+ * has not been found, look for the closest endpoint ID greater than the
+ * specified one and return the endpoint that corresponds to it, if present.
+ *
+ * Do not return endpoints that belong to disabled devices, unless
+ * FWNODE_GRAPH_DEVICE_DISABLED is passed in @flags.
+ *
+ * The returned endpoint needs to be released by calling fwnode_handle_put() on
+ * it when it is not needed any more.
+ */
+struct fwnode_handle *
+fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode,
+ u32 port, u32 endpoint, unsigned long flags)
+{
+ struct fwnode_handle *ep = NULL, *best_ep = NULL;
+ unsigned int best_ep_id = 0;
+ bool endpoint_next = flags & FWNODE_GRAPH_ENDPOINT_NEXT;
+ bool enabled_only = !(flags & FWNODE_GRAPH_DEVICE_DISABLED);
+
+ while ((ep = fwnode_graph_get_next_endpoint(fwnode, ep))) {
+ struct fwnode_endpoint fwnode_ep = { 0 };
+ int ret;
+
+ if (enabled_only) {
+ struct fwnode_handle *dev_node;
+ bool available;
+
+ dev_node = fwnode_graph_get_remote_port_parent(ep);
+ available = fwnode_device_is_available(dev_node);
+ fwnode_handle_put(dev_node);
+ if (!available)
+ continue;
+ }
+
+ ret = fwnode_graph_parse_endpoint(ep, &fwnode_ep);
+ if (ret < 0)
+ continue;
+
+ if (fwnode_ep.port != port)
+ continue;
+
+ if (fwnode_ep.id == endpoint)
+ return ep;
+
+ if (!endpoint_next)
+ continue;
+
+ /*
+ * If the endpoint that has just been found is not the first
+ * matching one and the ID of the one found previously is closer
+ * to the requested endpoint ID, skip it.
+ */
+ if (fwnode_ep.id < endpoint ||
+ (best_ep && best_ep_id < fwnode_ep.id))
+ continue;
+
+ fwnode_handle_put(best_ep);
+ best_ep = fwnode_handle_get(ep);
+ best_ep_id = fwnode_ep.id;
+ }
+
+ return best_ep;
+}
+EXPORT_SYMBOL_GPL(fwnode_graph_get_endpoint_by_id);
+
+/**
* fwnode_graph_parse_endpoint - parse common endpoint node properties
* @fwnode: pointer to endpoint fwnode_handle
* @endpoint: pointer to the fwnode endpoint data structure
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index a98fced9bff8..3d80c4b43f72 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Register map access API internal header
*
* Copyright 2011 Wolfson Microelectronics plc
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _REGMAP_INTERNAL_H
diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c
index bc6cd88b8cc6..b7e4b2464102 100644
--- a/drivers/base/regmap/regcache-flat.c
+++ b/drivers/base/regmap/regcache-flat.c
@@ -1,14 +1,10 @@
-/*
- * Register cache access API - flat caching support
- *
- * Copyright 2012 Wolfson Microelectronics plc
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register cache access API - flat caching support
+//
+// Copyright 2012 Wolfson Microelectronics plc
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/device.h>
#include <linux/seq_file.h>
diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c
index 4ff311374c4a..fc14e8b9344f 100644
--- a/drivers/base/regmap/regcache-lzo.c
+++ b/drivers/base/regmap/regcache-lzo.c
@@ -1,14 +1,10 @@
-/*
- * Register cache access API - LZO caching support
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register cache access API - LZO caching support
+//
+// Copyright 2011 Wolfson Microelectronics plc
+//
+// Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
#include <linux/device.h>
#include <linux/lzo.h>
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 9cbb4b0cd01b..cfa29dc89bbf 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -1,14 +1,10 @@
-/*
- * Register cache access API - rbtree caching support
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register cache access API - rbtree caching support
+//
+// Copyright 2011 Wolfson Microelectronics plc
+//
+// Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
#include <linux/debugfs.h>
#include <linux/device.h>
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 773560348337..a93cafd7be4f 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -1,14 +1,10 @@
-/*
- * Register cache access API
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register cache access API
+//
+// Copyright 2011 Wolfson Microelectronics plc
+//
+// Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
#include <linux/bsearch.h>
#include <linux/device.h>
diff --git a/drivers/base/regmap/regmap-ac97.c b/drivers/base/regmap/regmap-ac97.c
index c03ebfd4c731..b9f76bdf74a9 100644
--- a/drivers/base/regmap/regmap-ac97.c
+++ b/drivers/base/regmap/regmap-ac97.c
@@ -1,20 +1,8 @@
-/*
- * Register map access API - AC'97 support
- *
- * Copyright 2013 Linaro Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API - AC'97 support
+//
+// Copyright 2013 Linaro Ltd. All rights reserved.
#include <linux/clk.h>
#include <linux/err.h>
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 19eb454f26c3..263f82516ff4 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -1,14 +1,10 @@
-/*
- * Register map access API - debugfs
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API - debugfs
+//
+// Copyright 2011 Wolfson Microelectronics plc
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/slab.h>
#include <linux/mutex.h>
@@ -195,6 +191,28 @@ static inline void regmap_calc_tot_len(struct regmap *map,
}
}
+static int regmap_next_readable_reg(struct regmap *map, int reg)
+{
+ struct regmap_debugfs_off_cache *c;
+ int ret = -EINVAL;
+
+ if (regmap_printable(map, reg + map->reg_stride)) {
+ ret = reg + map->reg_stride;
+ } else {
+ mutex_lock(&map->cache_lock);
+ list_for_each_entry(c, &map->debugfs_off_cache, list) {
+ if (reg > c->max_reg)
+ continue;
+ if (reg < c->base_reg) {
+ ret = c->base_reg;
+ break;
+ }
+ }
+ mutex_unlock(&map->cache_lock);
+ }
+ return ret;
+}
+
static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
unsigned int to, char __user *user_buf,
size_t count, loff_t *ppos)
@@ -218,12 +236,8 @@ static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
/* Work out which register we're starting at */
start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p);
- for (i = start_reg; i <= to; i += map->reg_stride) {
- if (!regmap_readable(map, i) && !regmap_cached(map, i))
- continue;
-
- if (regmap_precious(map, i))
- continue;
+ for (i = start_reg; i >= 0 && i <= to;
+ i = regmap_next_readable_reg(map, i)) {
/* If we're in the region the user is trying to read */
if (p >= *ppos) {
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index 056acde5e7d3..ac9b31c57967 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -1,14 +1,10 @@
-/*
- * Register map access API - I2C support
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API - I2C support
+//
+// Copyright 2011 Wolfson Microelectronics plc
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/regmap.h>
#include <linux/i2c.h>
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 5059748afd4c..c9dc70ceca5f 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -1,14 +1,10 @@
-/*
- * regmap based irq_chip
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// regmap based irq_chip
+//
+// Copyright 2011 Wolfson Microelectronics plc
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/device.h>
#include <linux/export.h>
@@ -761,9 +757,6 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
if (chip->num_type_reg && !chip->type_in_mask) {
for (i = 0; i < chip->num_type_reg; ++i) {
- if (!d->type_buf_def[i])
- continue;
-
reg = chip->type_base +
(i * map->reg_stride * d->type_reg_stride);
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index 8741fb5f8f54..af967d8f975e 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -1,20 +1,8 @@
-/*
- * Register map access API - MMIO support
- *
- * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API - MMIO support
+//
+// Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
#include <linux/clk.h>
#include <linux/err.h>
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index c7150dd264d5..c1894e93c378 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -1,14 +1,10 @@
-/*
- * Register map access API - SPI support
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API - SPI support
+//
+// Copyright 2011 Wolfson Microelectronics plc
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c
index 0bfb8ed244d5..cdf12d2aa3a1 100644
--- a/drivers/base/regmap/regmap-spmi.c
+++ b/drivers/base/regmap/regmap-spmi.c
@@ -1,22 +1,13 @@
-/*
- * Register map access API - SPMI support
- *
- * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
- *
- * Based on regmap-i2c.c:
- * Copyright 2011 Wolfson Microelectronics plc
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API - SPMI support
+//
+// Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+//
+// Based on regmap-i2c.c:
+// Copyright 2011 Wolfson Microelectronics plc
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+
#include <linux/regmap.h>
#include <linux/spmi.h>
#include <linux/module.h>
diff --git a/drivers/base/regmap/regmap-w1.c b/drivers/base/regmap/regmap-w1.c
index e6c64b0be5b2..3a7d30b8c3ac 100644
--- a/drivers/base/regmap/regmap-w1.c
+++ b/drivers/base/regmap/regmap-w1.c
@@ -1,13 +1,9 @@
-/*
- * Register map access API - W1 (1-Wire) support
- *
- * Copyright (c) 2017 Radioavionica Corporation
- * Author: Alex A. Mihaylov <minimumlaw@rambler.ru>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API - W1 (1-Wire) support
+//
+// Copyright (c) 2017 Radioavionica Corporation
+// Author: Alex A. Mihaylov <minimumlaw@rambler.ru>
#include <linux/regmap.h>
#include <linux/module.h>
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 4f822e087def..f1025452bb39 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1,14 +1,10 @@
-/*
- * Register map access API
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API
+//
+// Copyright 2011 Wolfson Microelectronics plc
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/device.h>
#include <linux/slab.h>
@@ -1493,11 +1489,10 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
WARN_ON(!map->bus);
/* Check for unwritable registers before we start */
- if (map->writeable_reg)
- for (i = 0; i < val_len / map->format.val_bytes; i++)
- if (!map->writeable_reg(map->dev,
- reg + regmap_get_offset(map, i)))
- return -EINVAL;
+ for (i = 0; i < val_len / map->format.val_bytes; i++)
+ if (!regmap_writeable(map,
+ reg + regmap_get_offset(map, i)))
+ return -EINVAL;
if (!map->cache_bypass && map->format.parse_val) {
unsigned int ival;
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index 1fad9291f6aa..7fc5a18e02ad 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -472,7 +472,7 @@ static int software_node_read_string_array(const struct fwnode_handle *fwnode,
val, nval);
}
-struct fwnode_handle *
+static struct fwnode_handle *
software_node_get_parent(const struct fwnode_handle *fwnode)
{
struct software_node *swnode = to_software_node(fwnode);
@@ -481,7 +481,7 @@ software_node_get_parent(const struct fwnode_handle *fwnode)
NULL;
}
-struct fwnode_handle *
+static struct fwnode_handle *
software_node_get_next_child(const struct fwnode_handle *fwnode,
struct fwnode_handle *child)
{
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 3e5fd97a3b4d..c850bdb6c178 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -5443,7 +5443,6 @@ static int drbd_do_auth(struct drbd_connection *connection)
rcu_read_unlock();
desc->tfm = connection->cram_hmac_tfm;
- desc->flags = 0;
rv = crypto_shash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len);
if (rv) {
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 268ef0c5d4ab..6781bcf3ec26 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -304,7 +304,6 @@ void drbd_csum_ee(struct crypto_shash *tfm, struct drbd_peer_request *peer_req,
void *src;
desc->tfm = tfm;
- desc->flags = 0;
crypto_shash_init(desc);
@@ -332,7 +331,6 @@ void drbd_csum_bio(struct crypto_shash *tfm, struct bio *bio, void *digest)
struct bvec_iter iter;
desc->tfm = tfm;
- desc->flags = 0;
crypto_shash_init(desc);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 1e6edd568214..bf1c61cab8eb 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -656,7 +656,7 @@ static int loop_validate_file(struct file *file, struct block_device *bdev)
return -EBADF;
l = f->f_mapping->host->i_bdev->bd_disk->private_data;
- if (l->lo_state == Lo_unbound) {
+ if (l->lo_state != Lo_bound) {
return -EINVAL;
}
f = l->lo_backing_file;
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index 417a9f15c116..d7ac09c092f2 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -1748,6 +1748,11 @@ static int __init null_init(void)
return -EINVAL;
}
+ if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) {
+ pr_err("null_blk: invalid home_node value\n");
+ g_home_node = NUMA_NO_NODE;
+ }
+
if (g_queue_mode == NULL_Q_RQ) {
pr_err("null_blk: legacy IO path no longer available\n");
return -EINVAL;
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 96670eefaeb2..6d415b20fb70 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -314,6 +314,7 @@ static void pcd_init_units(void)
disk->queue = blk_mq_init_sq_queue(&cd->tag_set, &pcd_mq_ops,
1, BLK_MQ_F_SHOULD_MERGE);
if (IS_ERR(disk->queue)) {
+ put_disk(disk);
disk->queue = NULL;
continue;
}
@@ -749,8 +750,14 @@ static int pcd_detect(void)
return 0;
printk("%s: No CD-ROM drive found\n", name);
- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++)
+ for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
+ if (!cd->disk)
+ continue;
+ blk_cleanup_queue(cd->disk->queue);
+ cd->disk->queue = NULL;
+ blk_mq_free_tag_set(&cd->tag_set);
put_disk(cd->disk);
+ }
pi_unregister_driver(par_drv);
return -1;
}
@@ -1006,8 +1013,14 @@ static int __init pcd_init(void)
pcd_probe_capabilities();
if (register_blkdev(major, name)) {
- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++)
+ for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
+ if (!cd->disk)
+ continue;
+
+ blk_cleanup_queue(cd->disk->queue);
+ blk_mq_free_tag_set(&cd->tag_set);
put_disk(cd->disk);
+ }
return -EBUSY;
}
@@ -1028,6 +1041,9 @@ static void __exit pcd_exit(void)
int unit;
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
+ if (!cd->disk)
+ continue;
+
if (cd->present) {
del_gendisk(cd->disk);
pi_release(cd->pi);
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index e92e7a8eeeb2..35e6e271b219 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -761,8 +761,14 @@ static int pf_detect(void)
return 0;
printk("%s: No ATAPI disk detected\n", name);
- for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
+ for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
+ if (!pf->disk)
+ continue;
+ blk_cleanup_queue(pf->disk->queue);
+ pf->disk->queue = NULL;
+ blk_mq_free_tag_set(&pf->tag_set);
put_disk(pf->disk);
+ }
pi_unregister_driver(par_drv);
return -1;
}
@@ -1025,8 +1031,13 @@ static int __init pf_init(void)
pf_busy = 0;
if (register_blkdev(major, name)) {
- for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
+ for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
+ if (!pf->disk)
+ continue;
+ blk_cleanup_queue(pf->disk->queue);
+ blk_mq_free_tag_set(&pf->tag_set);
put_disk(pf->disk);
+ }
return -EBUSY;
}
@@ -1047,13 +1058,18 @@ static void __exit pf_exit(void)
int unit;
unregister_blkdev(major, name);
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
- if (!pf->present)
+ if (!pf->disk)
continue;
- del_gendisk(pf->disk);
+
+ if (pf->present)
+ del_gendisk(pf->disk);
+
blk_cleanup_queue(pf->disk->queue);
blk_mq_free_tag_set(&pf->tag_set);
put_disk(pf->disk);
- pi_release(pf->pi);
+
+ if (pf->present)
+ pi_release(pf->pi);
}
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 74088d8dbaf3..2210c1b9491b 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -115,12 +115,14 @@ static int atomic_dec_return_safe(atomic_t *v)
#define RBD_FEATURE_LAYERING (1ULL<<0)
#define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
#define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
+#define RBD_FEATURE_DEEP_FLATTEN (1ULL<<5)
#define RBD_FEATURE_DATA_POOL (1ULL<<7)
#define RBD_FEATURE_OPERATIONS (1ULL<<8)
#define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
RBD_FEATURE_STRIPINGV2 | \
RBD_FEATURE_EXCLUSIVE_LOCK | \
+ RBD_FEATURE_DEEP_FLATTEN | \
RBD_FEATURE_DATA_POOL | \
RBD_FEATURE_OPERATIONS)
@@ -214,28 +216,40 @@ enum obj_operation_type {
OBJ_OP_READ = 1,
OBJ_OP_WRITE,
OBJ_OP_DISCARD,
+ OBJ_OP_ZEROOUT,
};
/*
* Writes go through the following state machine to deal with
* layering:
*
- * need copyup
- * RBD_OBJ_WRITE_GUARD ---------------> RBD_OBJ_WRITE_COPYUP
- * | ^ |
- * v \------------------------------/
- * done
- * ^
- * |
- * RBD_OBJ_WRITE_FLAT
+ * . . . . . RBD_OBJ_WRITE_GUARD. . . . . . . . . . . . . .
+ * . | .
+ * . v .
+ * . RBD_OBJ_WRITE_READ_FROM_PARENT. . . .
+ * . | . .
+ * . v v (deep-copyup .
+ * (image . RBD_OBJ_WRITE_COPYUP_EMPTY_SNAPC . not needed) .
+ * flattened) v | . .
+ * . v . .
+ * . . . .RBD_OBJ_WRITE_COPYUP_OPS. . . . . (copyup .
+ * | not needed) v
+ * v .
+ * done . . . . . . . . . . . . . . . . . .
+ * ^
+ * |
+ * RBD_OBJ_WRITE_FLAT
*
* Writes start in RBD_OBJ_WRITE_GUARD or _FLAT, depending on whether
- * there is a parent or not.
+ * assert_exists guard is needed or not (in some cases it's not needed
+ * even if there is a parent).
*/
enum rbd_obj_write_state {
RBD_OBJ_WRITE_FLAT = 1,
RBD_OBJ_WRITE_GUARD,
- RBD_OBJ_WRITE_COPYUP,
+ RBD_OBJ_WRITE_READ_FROM_PARENT,
+ RBD_OBJ_WRITE_COPYUP_EMPTY_SNAPC,
+ RBD_OBJ_WRITE_COPYUP_OPS,
};
struct rbd_obj_request {
@@ -291,7 +305,6 @@ struct rbd_img_request {
int result; /* first nonzero obj_request result */
struct list_head object_extents; /* obj_req.ex structs */
- u32 obj_request_count;
u32 pending_count;
struct kref kref;
@@ -421,6 +434,10 @@ static DEFINE_IDA(rbd_dev_id_ida);
static struct workqueue_struct *rbd_wq;
+static struct ceph_snap_context rbd_empty_snapc = {
+ .nref = REFCOUNT_INIT(1),
+};
+
/*
* single-major requires >= 0.75 version of userspace rbd utility.
*/
@@ -732,6 +749,7 @@ static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
*/
enum {
Opt_queue_depth,
+ Opt_alloc_size,
Opt_lock_timeout,
Opt_last_int,
/* int args above */
@@ -748,6 +766,7 @@ enum {
static match_table_t rbd_opts_tokens = {
{Opt_queue_depth, "queue_depth=%d"},
+ {Opt_alloc_size, "alloc_size=%d"},
{Opt_lock_timeout, "lock_timeout=%d"},
/* int args above */
{Opt_pool_ns, "_pool_ns=%s"},
@@ -764,6 +783,7 @@ static match_table_t rbd_opts_tokens = {
struct rbd_options {
int queue_depth;
+ int alloc_size;
unsigned long lock_timeout;
bool read_only;
bool lock_on_read;
@@ -772,6 +792,7 @@ struct rbd_options {
};
#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
+#define RBD_ALLOC_SIZE_DEFAULT (64 * 1024)
#define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
#define RBD_READ_ONLY_DEFAULT false
#define RBD_LOCK_ON_READ_DEFAULT false
@@ -811,6 +832,17 @@ static int parse_rbd_opts_token(char *c, void *private)
}
pctx->opts->queue_depth = intval;
break;
+ case Opt_alloc_size:
+ if (intval < SECTOR_SIZE) {
+ pr_err("alloc_size out of range\n");
+ return -EINVAL;
+ }
+ if (!is_power_of_2(intval)) {
+ pr_err("alloc_size must be a power of 2\n");
+ return -EINVAL;
+ }
+ pctx->opts->alloc_size = intval;
+ break;
case Opt_lock_timeout:
/* 0 is "wait forever" (i.e. infinite timeout) */
if (intval < 0 || intval > INT_MAX / 1000) {
@@ -857,6 +889,8 @@ static char* obj_op_name(enum obj_operation_type op_type)
return "write";
case OBJ_OP_DISCARD:
return "discard";
+ case OBJ_OP_ZEROOUT:
+ return "zeroout";
default:
return "???";
}
@@ -890,23 +924,6 @@ static void rbd_put_client(struct rbd_client *rbdc)
kref_put(&rbdc->kref, rbd_client_release);
}
-static int wait_for_latest_osdmap(struct ceph_client *client)
-{
- u64 newest_epoch;
- int ret;
-
- ret = ceph_monc_get_version(&client->monc, "osdmap", &newest_epoch);
- if (ret)
- return ret;
-
- if (client->osdc.osdmap->epoch >= newest_epoch)
- return 0;
-
- ceph_osdc_maybe_request_map(&client->osdc);
- return ceph_monc_wait_osdmap(&client->monc, newest_epoch,
- client->options->mount_timeout);
-}
-
/*
* Get a ceph client with specific addr and configuration, if one does
* not exist create it. Either way, ceph_opts is consumed by this
@@ -926,7 +943,8 @@ static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
* Using an existing client. Make sure ->pg_pools is up to
* date before we look up the pool id in do_rbd_add().
*/
- ret = wait_for_latest_osdmap(rbdc->client);
+ ret = ceph_wait_for_latest_osdmap(rbdc->client,
+ rbdc->client->options->mount_timeout);
if (ret) {
rbd_warn(NULL, "failed to get latest osdmap: %d", ret);
rbd_put_client(rbdc);
@@ -1344,7 +1362,6 @@ static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
/* Image request now owns object's original reference */
obj_request->img_request = img_request;
- img_request->obj_request_count++;
img_request->pending_count++;
dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
}
@@ -1354,8 +1371,6 @@ static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
{
dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
list_del(&obj_request->ex.oe_item);
- rbd_assert(img_request->obj_request_count > 0);
- img_request->obj_request_count--;
rbd_assert(obj_request->img_request == img_request);
rbd_obj_request_put(obj_request);
}
@@ -1409,6 +1424,19 @@ static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req)
rbd_dev->layout.object_size;
}
+/*
+ * Must be called after rbd_obj_calc_img_extents().
+ */
+static bool rbd_obj_copyup_enabled(struct rbd_obj_request *obj_req)
+{
+ if (!obj_req->num_img_extents ||
+ (rbd_obj_is_entire(obj_req) &&
+ !obj_req->img_request->snapc->num_snaps))
+ return false;
+
+ return true;
+}
+
static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req)
{
return ceph_file_extents_bytes(obj_req->img_extents,
@@ -1422,6 +1450,7 @@ static bool rbd_img_is_write(struct rbd_img_request *img_req)
return false;
case OBJ_OP_WRITE:
case OBJ_OP_DISCARD:
+ case OBJ_OP_ZEROOUT:
return true;
default:
BUG();
@@ -1470,18 +1499,16 @@ static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
}
static struct ceph_osd_request *
-rbd_osd_req_create(struct rbd_obj_request *obj_req, unsigned int num_ops)
+__rbd_osd_req_create(struct rbd_obj_request *obj_req,
+ struct ceph_snap_context *snapc, unsigned int num_ops)
{
- struct rbd_img_request *img_req = obj_req->img_request;
- struct rbd_device *rbd_dev = img_req->rbd_dev;
+ struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct ceph_osd_request *req;
const char *name_format = rbd_dev->image_format == 1 ?
RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT;
- req = ceph_osdc_alloc_request(osdc,
- (rbd_img_is_write(img_req) ? img_req->snapc : NULL),
- num_ops, false, GFP_NOIO);
+ req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, GFP_NOIO);
if (!req)
return NULL;
@@ -1506,6 +1533,13 @@ err_req:
return NULL;
}
+static struct ceph_osd_request *
+rbd_osd_req_create(struct rbd_obj_request *obj_req, unsigned int num_ops)
+{
+ return __rbd_osd_req_create(obj_req, obj_req->img_request->snapc,
+ num_ops);
+}
+
static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
{
ceph_osdc_put_request(osd_req);
@@ -1671,7 +1705,6 @@ static void rbd_img_request_destroy(struct kref *kref)
for_each_obj_request_safe(img_request, obj_request, next_obj_request)
rbd_img_obj_request_del(img_request, obj_request);
- rbd_assert(img_request->obj_request_count == 0);
if (img_request_layered_test(img_request)) {
img_request_layered_clear(img_request);
@@ -1754,7 +1787,7 @@ static void rbd_osd_req_setup_data(struct rbd_obj_request *obj_req, u32 which)
static int rbd_obj_setup_read(struct rbd_obj_request *obj_req)
{
- obj_req->osd_req = rbd_osd_req_create(obj_req, 1);
+ obj_req->osd_req = __rbd_osd_req_create(obj_req, NULL, 1);
if (!obj_req->osd_req)
return -ENOMEM;
@@ -1790,6 +1823,11 @@ static int __rbd_obj_setup_stat(struct rbd_obj_request *obj_req,
return 0;
}
+static int count_write_ops(struct rbd_obj_request *obj_req)
+{
+ return 2; /* setallochint + write/writefull */
+}
+
static void __rbd_obj_setup_write(struct rbd_obj_request *obj_req,
unsigned int which)
{
@@ -1816,6 +1854,7 @@ static void __rbd_obj_setup_write(struct rbd_obj_request *obj_req,
static int rbd_obj_setup_write(struct rbd_obj_request *obj_req)
{
unsigned int num_osd_ops, which = 0;
+ bool need_guard;
int ret;
/* reverse map the entire object onto the parent */
@@ -1823,47 +1862,112 @@ static int rbd_obj_setup_write(struct rbd_obj_request *obj_req)
if (ret)
return ret;
- if (obj_req->num_img_extents) {
- obj_req->write_state = RBD_OBJ_WRITE_GUARD;
- num_osd_ops = 3; /* stat + setallochint + write/writefull */
- } else {
- obj_req->write_state = RBD_OBJ_WRITE_FLAT;
- num_osd_ops = 2; /* setallochint + write/writefull */
- }
+ need_guard = rbd_obj_copyup_enabled(obj_req);
+ num_osd_ops = need_guard + count_write_ops(obj_req);
obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops);
if (!obj_req->osd_req)
return -ENOMEM;
- if (obj_req->num_img_extents) {
+ if (need_guard) {
ret = __rbd_obj_setup_stat(obj_req, which++);
if (ret)
return ret;
+
+ obj_req->write_state = RBD_OBJ_WRITE_GUARD;
+ } else {
+ obj_req->write_state = RBD_OBJ_WRITE_FLAT;
}
__rbd_obj_setup_write(obj_req, which);
return 0;
}
-static void __rbd_obj_setup_discard(struct rbd_obj_request *obj_req,
+static u16 truncate_or_zero_opcode(struct rbd_obj_request *obj_req)
+{
+ return rbd_obj_is_tail(obj_req) ? CEPH_OSD_OP_TRUNCATE :
+ CEPH_OSD_OP_ZERO;
+}
+
+static int rbd_obj_setup_discard(struct rbd_obj_request *obj_req)
+{
+ struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
+ u64 off = obj_req->ex.oe_off;
+ u64 next_off = obj_req->ex.oe_off + obj_req->ex.oe_len;
+ int ret;
+
+ /*
+ * Align the range to alloc_size boundary and punt on discards
+ * that are too small to free up any space.
+ *
+ * alloc_size == object_size && is_tail() is a special case for
+ * filestore with filestore_punch_hole = false, needed to allow
+ * truncate (in addition to delete).
+ */
+ if (rbd_dev->opts->alloc_size != rbd_dev->layout.object_size ||
+ !rbd_obj_is_tail(obj_req)) {
+ off = round_up(off, rbd_dev->opts->alloc_size);
+ next_off = round_down(next_off, rbd_dev->opts->alloc_size);
+ if (off >= next_off)
+ return 1;
+ }
+
+ /* reverse map the entire object onto the parent */
+ ret = rbd_obj_calc_img_extents(obj_req, true);
+ if (ret)
+ return ret;
+
+ obj_req->osd_req = rbd_osd_req_create(obj_req, 1);
+ if (!obj_req->osd_req)
+ return -ENOMEM;
+
+ if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents) {
+ osd_req_op_init(obj_req->osd_req, 0, CEPH_OSD_OP_DELETE, 0);
+ } else {
+ dout("%s %p %llu~%llu -> %llu~%llu\n", __func__,
+ obj_req, obj_req->ex.oe_off, obj_req->ex.oe_len,
+ off, next_off - off);
+ osd_req_op_extent_init(obj_req->osd_req, 0,
+ truncate_or_zero_opcode(obj_req),
+ off, next_off - off, 0, 0);
+ }
+
+ obj_req->write_state = RBD_OBJ_WRITE_FLAT;
+ rbd_osd_req_format_write(obj_req);
+ return 0;
+}
+
+static int count_zeroout_ops(struct rbd_obj_request *obj_req)
+{
+ int num_osd_ops;
+
+ if (rbd_obj_is_entire(obj_req) && obj_req->num_img_extents &&
+ !rbd_obj_copyup_enabled(obj_req))
+ num_osd_ops = 2; /* create + truncate */
+ else
+ num_osd_ops = 1; /* delete/truncate/zero */
+
+ return num_osd_ops;
+}
+
+static void __rbd_obj_setup_zeroout(struct rbd_obj_request *obj_req,
unsigned int which)
{
u16 opcode;
if (rbd_obj_is_entire(obj_req)) {
if (obj_req->num_img_extents) {
- osd_req_op_init(obj_req->osd_req, which++,
- CEPH_OSD_OP_CREATE, 0);
+ if (!rbd_obj_copyup_enabled(obj_req))
+ osd_req_op_init(obj_req->osd_req, which++,
+ CEPH_OSD_OP_CREATE, 0);
opcode = CEPH_OSD_OP_TRUNCATE;
} else {
osd_req_op_init(obj_req->osd_req, which++,
CEPH_OSD_OP_DELETE, 0);
opcode = 0;
}
- } else if (rbd_obj_is_tail(obj_req)) {
- opcode = CEPH_OSD_OP_TRUNCATE;
} else {
- opcode = CEPH_OSD_OP_ZERO;
+ opcode = truncate_or_zero_opcode(obj_req);
}
if (opcode)
@@ -1875,9 +1979,10 @@ static void __rbd_obj_setup_discard(struct rbd_obj_request *obj_req,
rbd_osd_req_format_write(obj_req);
}
-static int rbd_obj_setup_discard(struct rbd_obj_request *obj_req)
+static int rbd_obj_setup_zeroout(struct rbd_obj_request *obj_req)
{
unsigned int num_osd_ops, which = 0;
+ bool need_guard;
int ret;
/* reverse map the entire object onto the parent */
@@ -1885,33 +1990,24 @@ static int rbd_obj_setup_discard(struct rbd_obj_request *obj_req)
if (ret)
return ret;
- if (rbd_obj_is_entire(obj_req)) {
- obj_req->write_state = RBD_OBJ_WRITE_FLAT;
- if (obj_req->num_img_extents)
- num_osd_ops = 2; /* create + truncate */
- else
- num_osd_ops = 1; /* delete */
- } else {
- if (obj_req->num_img_extents) {
- obj_req->write_state = RBD_OBJ_WRITE_GUARD;
- num_osd_ops = 2; /* stat + truncate/zero */
- } else {
- obj_req->write_state = RBD_OBJ_WRITE_FLAT;
- num_osd_ops = 1; /* truncate/zero */
- }
- }
+ need_guard = rbd_obj_copyup_enabled(obj_req);
+ num_osd_ops = need_guard + count_zeroout_ops(obj_req);
obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops);
if (!obj_req->osd_req)
return -ENOMEM;
- if (!rbd_obj_is_entire(obj_req) && obj_req->num_img_extents) {
+ if (need_guard) {
ret = __rbd_obj_setup_stat(obj_req, which++);
if (ret)
return ret;
+
+ obj_req->write_state = RBD_OBJ_WRITE_GUARD;
+ } else {
+ obj_req->write_state = RBD_OBJ_WRITE_FLAT;
}
- __rbd_obj_setup_discard(obj_req, which);
+ __rbd_obj_setup_zeroout(obj_req, which);
return 0;
}
@@ -1922,10 +2018,10 @@ static int rbd_obj_setup_discard(struct rbd_obj_request *obj_req)
*/
static int __rbd_img_fill_request(struct rbd_img_request *img_req)
{
- struct rbd_obj_request *obj_req;
+ struct rbd_obj_request *obj_req, *next_obj_req;
int ret;
- for_each_obj_request(img_req, obj_req) {
+ for_each_obj_request_safe(img_req, obj_req, next_obj_req) {
switch (img_req->op_type) {
case OBJ_OP_READ:
ret = rbd_obj_setup_read(obj_req);
@@ -1936,11 +2032,20 @@ static int __rbd_img_fill_request(struct rbd_img_request *img_req)
case OBJ_OP_DISCARD:
ret = rbd_obj_setup_discard(obj_req);
break;
+ case OBJ_OP_ZEROOUT:
+ ret = rbd_obj_setup_zeroout(obj_req);
+ break;
default:
rbd_assert(0);
}
- if (ret)
+ if (ret < 0)
return ret;
+ if (ret > 0) {
+ img_req->xferred += obj_req->ex.oe_len;
+ img_req->pending_count--;
+ rbd_img_obj_request_del(img_req, obj_req);
+ continue;
+ }
ret = ceph_osdc_alloc_messages(obj_req->osd_req, GFP_NOIO);
if (ret)
@@ -2356,21 +2461,19 @@ static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes)
return true;
}
-static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes)
+#define MODS_ONLY U32_MAX
+
+static int rbd_obj_issue_copyup_empty_snapc(struct rbd_obj_request *obj_req,
+ u32 bytes)
{
- unsigned int num_osd_ops = obj_req->osd_req->r_num_ops;
int ret;
dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
rbd_assert(obj_req->osd_req->r_ops[0].op == CEPH_OSD_OP_STAT);
+ rbd_assert(bytes > 0 && bytes != MODS_ONLY);
rbd_osd_req_destroy(obj_req->osd_req);
- /*
- * Create a copyup request with the same number of OSD ops as
- * the original request. The original request was stat + op(s),
- * the new copyup request will be copyup + the same op(s).
- */
- obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops);
+ obj_req->osd_req = __rbd_osd_req_create(obj_req, &rbd_empty_snapc, 1);
if (!obj_req->osd_req)
return -ENOMEM;
@@ -2378,27 +2481,65 @@ static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes)
if (ret)
return ret;
- /*
- * Only send non-zero copyup data to save some I/O and network
- * bandwidth -- zero copyup data is equivalent to the object not
- * existing.
- */
- if (is_zero_bvecs(obj_req->copyup_bvecs, bytes)) {
- dout("%s obj_req %p detected zeroes\n", __func__, obj_req);
- bytes = 0;
- }
osd_req_op_cls_request_data_bvecs(obj_req->osd_req, 0,
obj_req->copyup_bvecs,
obj_req->copyup_bvec_count,
bytes);
+ rbd_osd_req_format_write(obj_req);
- switch (obj_req->img_request->op_type) {
+ ret = ceph_osdc_alloc_messages(obj_req->osd_req, GFP_NOIO);
+ if (ret)
+ return ret;
+
+ rbd_obj_request_submit(obj_req);
+ return 0;
+}
+
+static int rbd_obj_issue_copyup_ops(struct rbd_obj_request *obj_req, u32 bytes)
+{
+ struct rbd_img_request *img_req = obj_req->img_request;
+ unsigned int num_osd_ops = (bytes != MODS_ONLY);
+ unsigned int which = 0;
+ int ret;
+
+ dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
+ rbd_assert(obj_req->osd_req->r_ops[0].op == CEPH_OSD_OP_STAT ||
+ obj_req->osd_req->r_ops[0].op == CEPH_OSD_OP_CALL);
+ rbd_osd_req_destroy(obj_req->osd_req);
+
+ switch (img_req->op_type) {
case OBJ_OP_WRITE:
- __rbd_obj_setup_write(obj_req, 1);
+ num_osd_ops += count_write_ops(obj_req);
break;
- case OBJ_OP_DISCARD:
- rbd_assert(!rbd_obj_is_entire(obj_req));
- __rbd_obj_setup_discard(obj_req, 1);
+ case OBJ_OP_ZEROOUT:
+ num_osd_ops += count_zeroout_ops(obj_req);
+ break;
+ default:
+ rbd_assert(0);
+ }
+
+ obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops);
+ if (!obj_req->osd_req)
+ return -ENOMEM;
+
+ if (bytes != MODS_ONLY) {
+ ret = osd_req_op_cls_init(obj_req->osd_req, which, "rbd",
+ "copyup");
+ if (ret)
+ return ret;
+
+ osd_req_op_cls_request_data_bvecs(obj_req->osd_req, which++,
+ obj_req->copyup_bvecs,
+ obj_req->copyup_bvec_count,
+ bytes);
+ }
+
+ switch (img_req->op_type) {
+ case OBJ_OP_WRITE:
+ __rbd_obj_setup_write(obj_req, which);
+ break;
+ case OBJ_OP_ZEROOUT:
+ __rbd_obj_setup_zeroout(obj_req, which);
break;
default:
rbd_assert(0);
@@ -2412,6 +2553,33 @@ static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes)
return 0;
}
+static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes)
+{
+ /*
+ * Only send non-zero copyup data to save some I/O and network
+ * bandwidth -- zero copyup data is equivalent to the object not
+ * existing.
+ */
+ if (is_zero_bvecs(obj_req->copyup_bvecs, bytes)) {
+ dout("%s obj_req %p detected zeroes\n", __func__, obj_req);
+ bytes = 0;
+ }
+
+ if (obj_req->img_request->snapc->num_snaps && bytes > 0) {
+ /*
+ * Send a copyup request with an empty snapshot context to
+ * deep-copyup the object through all existing snapshots.
+ * A second request with the current snapshot context will be
+ * sent for the actual modification.
+ */
+ obj_req->write_state = RBD_OBJ_WRITE_COPYUP_EMPTY_SNAPC;
+ return rbd_obj_issue_copyup_empty_snapc(obj_req, bytes);
+ }
+
+ obj_req->write_state = RBD_OBJ_WRITE_COPYUP_OPS;
+ return rbd_obj_issue_copyup_ops(obj_req, bytes);
+}
+
static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap)
{
u32 i;
@@ -2451,22 +2619,19 @@ static int rbd_obj_handle_write_guard(struct rbd_obj_request *obj_req)
if (!obj_req->num_img_extents) {
/*
* The overlap has become 0 (most likely because the
- * image has been flattened). Use rbd_obj_issue_copyup()
- * to re-submit the original write request -- the copyup
- * operation itself will be a no-op, since someone must
- * have populated the child object while we weren't
- * looking. Move to WRITE_FLAT state as we'll be done
- * with the operation once the null copyup completes.
+ * image has been flattened). Re-submit the original write
+ * request -- pass MODS_ONLY since the copyup isn't needed
+ * anymore.
*/
- obj_req->write_state = RBD_OBJ_WRITE_FLAT;
- return rbd_obj_issue_copyup(obj_req, 0);
+ obj_req->write_state = RBD_OBJ_WRITE_COPYUP_OPS;
+ return rbd_obj_issue_copyup_ops(obj_req, MODS_ONLY);
}
ret = setup_copyup_bvecs(obj_req, rbd_obj_img_extents_bytes(obj_req));
if (ret)
return ret;
- obj_req->write_state = RBD_OBJ_WRITE_COPYUP;
+ obj_req->write_state = RBD_OBJ_WRITE_READ_FROM_PARENT;
return rbd_obj_read_from_parent(obj_req);
}
@@ -2474,7 +2639,6 @@ static bool rbd_obj_handle_write(struct rbd_obj_request *obj_req)
{
int ret;
-again:
switch (obj_req->write_state) {
case RBD_OBJ_WRITE_GUARD:
rbd_assert(!obj_req->xferred);
@@ -2493,6 +2657,7 @@ again:
}
/* fall through */
case RBD_OBJ_WRITE_FLAT:
+ case RBD_OBJ_WRITE_COPYUP_OPS:
if (!obj_req->result)
/*
* There is no such thing as a successful short
@@ -2500,15 +2665,26 @@ again:
*/
obj_req->xferred = obj_req->ex.oe_len;
return true;
- case RBD_OBJ_WRITE_COPYUP:
- obj_req->write_state = RBD_OBJ_WRITE_GUARD;
+ case RBD_OBJ_WRITE_READ_FROM_PARENT:
if (obj_req->result)
- goto again;
+ return true;
rbd_assert(obj_req->xferred);
ret = rbd_obj_issue_copyup(obj_req, obj_req->xferred);
if (ret) {
obj_req->result = ret;
+ obj_req->xferred = 0;
+ return true;
+ }
+ return false;
+ case RBD_OBJ_WRITE_COPYUP_EMPTY_SNAPC:
+ if (obj_req->result)
+ return true;
+
+ obj_req->write_state = RBD_OBJ_WRITE_COPYUP_OPS;
+ ret = rbd_obj_issue_copyup_ops(obj_req, MODS_ONLY);
+ if (ret) {
+ obj_req->result = ret;
return true;
}
return false;
@@ -2528,6 +2704,7 @@ static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req)
case OBJ_OP_WRITE:
return rbd_obj_handle_write(obj_req);
case OBJ_OP_DISCARD:
+ case OBJ_OP_ZEROOUT:
if (rbd_obj_handle_write(obj_req)) {
/*
* Hide -ENOENT from delete/truncate/zero -- discarding
@@ -3640,9 +3817,11 @@ static void rbd_queue_workfn(struct work_struct *work)
switch (req_op(rq)) {
case REQ_OP_DISCARD:
- case REQ_OP_WRITE_ZEROES:
op_type = OBJ_OP_DISCARD;
break;
+ case REQ_OP_WRITE_ZEROES:
+ op_type = OBJ_OP_ZEROOUT;
+ break;
case REQ_OP_WRITE:
op_type = OBJ_OP_WRITE;
break;
@@ -3722,12 +3901,12 @@ static void rbd_queue_workfn(struct work_struct *work)
img_request->rq = rq;
snapc = NULL; /* img_request consumes a ref */
- if (op_type == OBJ_OP_DISCARD)
+ if (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_ZEROOUT)
result = rbd_img_fill_nodata(img_request, offset, length);
else
result = rbd_img_fill_from_bio(img_request, offset, length,
rq->bio);
- if (result)
+ if (result || !img_request->pending_count)
goto err_img_request;
rbd_img_request_submit(img_request);
@@ -4008,12 +4187,12 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
q->limits.max_sectors = queue_max_hw_sectors(q);
blk_queue_max_segments(q, USHRT_MAX);
blk_queue_max_segment_size(q, UINT_MAX);
- blk_queue_io_min(q, objset_bytes);
- blk_queue_io_opt(q, objset_bytes);
+ blk_queue_io_min(q, rbd_dev->opts->alloc_size);
+ blk_queue_io_opt(q, rbd_dev->opts->alloc_size);
if (rbd_dev->opts->trim) {
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
- q->limits.discard_granularity = objset_bytes;
+ q->limits.discard_granularity = rbd_dev->opts->alloc_size;
blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
}
@@ -5388,6 +5567,7 @@ static int rbd_add_parse_args(const char *buf,
pctx.opts->read_only = RBD_READ_ONLY_DEFAULT;
pctx.opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
+ pctx.opts->alloc_size = RBD_ALLOC_SIZE_DEFAULT;
pctx.opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT;
pctx.opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
pctx.opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
@@ -5795,14 +5975,6 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
ret = rbd_dev_v2_parent_info(rbd_dev);
if (ret)
goto err_out_probe;
-
- /*
- * Need to warn users if this image is the one being
- * mapped and has a parent.
- */
- if (!depth && rbd_dev->parent_spec)
- rbd_warn(rbd_dev,
- "WARNING: kernel layering is EXPERIMENTAL!");
}
ret = rbd_dev_probe_parent(rbd_dev, depth);
@@ -5885,6 +6057,12 @@ static ssize_t do_rbd_add(struct bus_type *bus,
if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
rbd_dev->opts->read_only = true;
+ if (rbd_dev->opts->alloc_size > rbd_dev->layout.object_size) {
+ rbd_warn(rbd_dev, "alloc_size adjusted to %u",
+ rbd_dev->layout.object_size);
+ rbd_dev->opts->alloc_size = rbd_dev->layout.object_size;
+ }
+
rc = rbd_dev_device_setup(rbd_dev);
if (rc)
goto err_out_image_probe;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index b16a887bbd02..2a7ca4a1e6f7 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -513,6 +513,8 @@ static int init_vq(struct virtio_blk *vblk)
if (err)
num_vqs = 1;
+ num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
+
vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
if (!vblk->vqs)
return -ENOMEM;
@@ -723,7 +725,7 @@ static int virtblk_probe(struct virtio_device *vdev)
struct request_queue *q;
int err, index;
- u32 v, blk_size, sg_elems, opt_io_size;
+ u32 v, blk_size, max_size, sg_elems, opt_io_size;
u16 min_io_size;
u8 physical_block_exp, alignment_offset;
@@ -826,14 +828,16 @@ static int virtblk_probe(struct virtio_device *vdev)
/* No real sector limit. */
blk_queue_max_hw_sectors(q, -1U);
+ max_size = virtio_max_dma_size(vdev);
+
/* Host can optionally specify maximum segment size and number of
* segments. */
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
struct virtio_blk_config, size_max, &v);
if (!err)
- blk_queue_max_segment_size(q, v);
- else
- blk_queue_max_segment_size(q, -1U);
+ max_size = min(max_size, v);
+
+ blk_queue_max_segment_size(q, max_size);
/* Host can optionally specify the block size of the device */
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index a4bc74e72c39..24896ffb04ed 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -926,7 +926,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
int err, i, j;
struct xen_blkif *blkif = ring->blkif;
struct xenbus_device *dev = blkif->be->dev;
- unsigned int ring_page_order, nr_grefs, evtchn;
+ unsigned int nr_grefs, evtchn;
err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u",
&evtchn);
@@ -936,43 +936,42 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
return err;
}
- err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
- &ring_page_order);
- if (err != 1) {
- err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u", &ring_ref[0]);
+ nr_grefs = blkif->nr_ring_pages;
+
+ if (unlikely(!nr_grefs)) {
+ WARN_ON(true);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < nr_grefs; i++) {
+ char ring_ref_name[RINGREF_NAME_LEN];
+
+ snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
+ err = xenbus_scanf(XBT_NIL, dir, ring_ref_name,
+ "%u", &ring_ref[i]);
+
if (err != 1) {
+ if (nr_grefs == 1)
+ break;
+
err = -EINVAL;
- xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir);
+ xenbus_dev_fatal(dev, err, "reading %s/%s",
+ dir, ring_ref_name);
return err;
}
- nr_grefs = 1;
- } else {
- unsigned int i;
+ }
- if (ring_page_order > xen_blkif_max_ring_order) {
+ if (err != 1) {
+ WARN_ON(nr_grefs != 1);
+
+ err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u",
+ &ring_ref[0]);
+ if (err != 1) {
err = -EINVAL;
- xenbus_dev_fatal(dev, err, "%s/request %d ring page order exceed max:%d",
- dir, ring_page_order,
- xen_blkif_max_ring_order);
+ xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir);
return err;
}
-
- nr_grefs = 1 << ring_page_order;
- for (i = 0; i < nr_grefs; i++) {
- char ring_ref_name[RINGREF_NAME_LEN];
-
- snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
- err = xenbus_scanf(XBT_NIL, dir, ring_ref_name,
- "%u", &ring_ref[i]);
- if (err != 1) {
- err = -EINVAL;
- xenbus_dev_fatal(dev, err, "reading %s/%s",
- dir, ring_ref_name);
- return err;
- }
- }
}
- blkif->nr_ring_pages = nr_grefs;
for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
req = kzalloc(sizeof(*req), GFP_KERNEL);
@@ -1023,6 +1022,7 @@ fail:
static int connect_ring(struct backend_info *be)
{
struct xenbus_device *dev = be->dev;
+ struct xen_blkif *blkif = be->blkif;
unsigned int pers_grants;
char protocol[64] = "";
int err, i;
@@ -1030,28 +1030,29 @@ static int connect_ring(struct backend_info *be)
size_t xspathsize;
const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */
unsigned int requested_num_queues = 0;
+ unsigned int ring_page_order;
pr_debug("%s %s\n", __func__, dev->otherend);
- be->blkif->blk_protocol = BLKIF_PROTOCOL_DEFAULT;
+ blkif->blk_protocol = BLKIF_PROTOCOL_DEFAULT;
err = xenbus_scanf(XBT_NIL, dev->otherend, "protocol",
"%63s", protocol);
if (err <= 0)
strcpy(protocol, "unspecified, assuming default");
else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
- be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
+ blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
- be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
+ blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
- be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
+ blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
else {
xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
return -ENOSYS;
}
pers_grants = xenbus_read_unsigned(dev->otherend, "feature-persistent",
0);
- be->blkif->vbd.feature_gnt_persistent = pers_grants;
- be->blkif->vbd.overflow_max_grants = 0;
+ blkif->vbd.feature_gnt_persistent = pers_grants;
+ blkif->vbd.overflow_max_grants = 0;
/*
* Read the number of hardware queues from frontend.
@@ -1067,16 +1068,30 @@ static int connect_ring(struct backend_info *be)
requested_num_queues, xenblk_max_queues);
return -ENOSYS;
}
- be->blkif->nr_rings = requested_num_queues;
- if (xen_blkif_alloc_rings(be->blkif))
+ blkif->nr_rings = requested_num_queues;
+ if (xen_blkif_alloc_rings(blkif))
return -ENOMEM;
pr_info("%s: using %d queues, protocol %d (%s) %s\n", dev->nodename,
- be->blkif->nr_rings, be->blkif->blk_protocol, protocol,
+ blkif->nr_rings, blkif->blk_protocol, protocol,
pers_grants ? "persistent grants" : "");
- if (be->blkif->nr_rings == 1)
- return read_per_ring_refs(&be->blkif->rings[0], dev->otherend);
+ ring_page_order = xenbus_read_unsigned(dev->otherend,
+ "ring-page-order", 0);
+
+ if (ring_page_order > xen_blkif_max_ring_order) {
+ err = -EINVAL;
+ xenbus_dev_fatal(dev, err,
+ "requested ring page order %d exceed max:%d",
+ ring_page_order,
+ xen_blkif_max_ring_order);
+ return err;
+ }
+
+ blkif->nr_ring_pages = 1 << ring_page_order;
+
+ if (blkif->nr_rings == 1)
+ return read_per_ring_refs(&blkif->rings[0], dev->otherend);
else {
xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
xspath = kmalloc(xspathsize, GFP_KERNEL);
@@ -1085,10 +1100,10 @@ static int connect_ring(struct backend_info *be)
return -ENOMEM;
}
- for (i = 0; i < be->blkif->nr_rings; i++) {
+ for (i = 0; i < blkif->nr_rings; i++) {
memset(xspath, 0, xspathsize);
snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend, i);
- err = read_per_ring_refs(&be->blkif->rings[i], xspath);
+ err = read_per_ring_refs(&blkif->rings[i], xspath);
if (err) {
kfree(xspath);
return err;
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 87ccef4bd69e..32a21b8d1d85 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -1090,6 +1090,8 @@ static int ace_setup(struct ace_device *ace)
return 0;
err_read:
+ /* prevent double queue cleanup */
+ ace->gd->queue = NULL;
put_disk(ace->gd);
err_alloc_disk:
blk_cleanup_queue(ace->queue);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 04ca65912638..d58a359a6622 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -41,7 +41,7 @@ static DEFINE_IDR(zram_index_idr);
static DEFINE_MUTEX(zram_index_mutex);
static int zram_major;
-static const char *default_compressor = "lzo";
+static const char *default_compressor = "lzo-rle";
/* Module params (documentation at end) */
static unsigned int num_devices = 1;
@@ -290,18 +290,8 @@ static ssize_t idle_store(struct device *dev,
struct zram *zram = dev_to_zram(dev);
unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
int index;
- char mode_buf[8];
- ssize_t sz;
- sz = strscpy(mode_buf, buf, sizeof(mode_buf));
- if (sz <= 0)
- return -EINVAL;
-
- /* ignore trailing new line */
- if (mode_buf[sz - 1] == '\n')
- mode_buf[sz - 1] = 0x00;
-
- if (strcmp(mode_buf, "all"))
+ if (!sysfs_streq(buf, "all"))
return -EINVAL;
down_read(&zram->init_lock);
@@ -635,25 +625,15 @@ static ssize_t writeback_store(struct device *dev,
struct bio bio;
struct bio_vec bio_vec;
struct page *page;
- ssize_t ret, sz;
- char mode_buf[8];
- int mode = -1;
+ ssize_t ret;
+ int mode;
unsigned long blk_idx = 0;
- sz = strscpy(mode_buf, buf, sizeof(mode_buf));
- if (sz <= 0)
- return -EINVAL;
-
- /* ignore trailing newline */
- if (mode_buf[sz - 1] == '\n')
- mode_buf[sz - 1] = 0x00;
-
- if (!strcmp(mode_buf, "idle"))
+ if (sysfs_streq(buf, "idle"))
mode = IDLE_WRITEBACK;
- else if (!strcmp(mode_buf, "huge"))
+ else if (sysfs_streq(buf, "huge"))
mode = HUGE_WRITEBACK;
-
- if (mode == -1)
+ else
return -EINVAL;
down_read(&zram->init_lock);
@@ -794,18 +774,18 @@ struct zram_work {
struct zram *zram;
unsigned long entry;
struct bio *bio;
+ struct bio_vec bvec;
};
#if PAGE_SIZE != 4096
static void zram_sync_read(struct work_struct *work)
{
- struct bio_vec bvec;
struct zram_work *zw = container_of(work, struct zram_work, work);
struct zram *zram = zw->zram;
unsigned long entry = zw->entry;
struct bio *bio = zw->bio;
- read_from_bdev_async(zram, &bvec, entry, bio);
+ read_from_bdev_async(zram, &zw->bvec, entry, bio);
}
/*
@@ -818,6 +798,7 @@ static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
{
struct zram_work work;
+ work.bvec = *bvec;
work.zram = zram;
work.entry = entry;
work.bio = bio;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index ded198328f21..7db48ae65cd2 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -2942,6 +2942,7 @@ static int btusb_config_oob_wake(struct hci_dev *hdev)
return 0;
}
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_irq(&hdev->dev, irq, btusb_oob_wake_handler,
0, "OOB Wake-on-BT", data);
if (ret) {
@@ -2956,7 +2957,6 @@ static int btusb_config_oob_wake(struct hci_dev *hdev)
}
data->oob_wake_irq = irq;
- disable_irq(irq);
bt_dev_info(hdev, "OOB Wake-on-BT configured at IRQ %u", irq);
return 0;
}
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 72866a004f07..466ebd84ad17 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -348,7 +348,7 @@ config XILINX_HWICAP
config R3964
tristate "Siemens R3964 line discipline"
- depends on TTY
+ depends on TTY && BROKEN
---help---
This driver allows synchronous communication with devices using the
Siemens R3964 packet protocol. Unless you are dealing with special
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index b65ff6962899..e9b6ac61fb7f 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -443,6 +443,7 @@ static int omap_rng_probe(struct platform_device *pdev)
priv->rng.read = omap_rng_do_read;
priv->rng.init = omap_rng_init;
priv->rng.cleanup = omap_rng_cleanup;
+ priv->rng.quality = 900;
priv->rng.priv = (unsigned long)priv;
platform_set_drvdata(pdev, priv);
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
index 042860d97b15..0ef5b6a3f560 100644
--- a/drivers/char/hw_random/stm32-rng.c
+++ b/drivers/char/hw_random/stm32-rng.c
@@ -161,6 +161,7 @@ static int stm32_rng_probe(struct platform_device *ofdev)
#endif
priv->rng.read = stm32_rng_read,
priv->rng.priv = (unsigned long) dev;
+ priv->rng.quality = 900;
pm_runtime_set_autosuspend_delay(dev, 100);
pm_runtime_use_autosuspend(dev);
@@ -169,6 +170,13 @@ static int stm32_rng_probe(struct platform_device *ofdev)
return devm_hwrng_register(dev, &priv->rng);
}
+static int stm32_rng_remove(struct platform_device *ofdev)
+{
+ pm_runtime_disable(&ofdev->dev);
+
+ return 0;
+}
+
#ifdef CONFIG_PM
static int stm32_rng_runtime_suspend(struct device *dev)
{
@@ -210,6 +218,7 @@ static struct platform_driver stm32_rng_driver = {
.of_match_table = stm32_rng_match,
},
.probe = stm32_rng_probe,
+ .remove = stm32_rng_remove,
};
module_platform_driver(stm32_rng_driver);
diff --git a/drivers/char/ipmi/ipmi_dmi.c b/drivers/char/ipmi/ipmi_dmi.c
index ff0b199be472..f2411468f33f 100644
--- a/drivers/char/ipmi/ipmi_dmi.c
+++ b/drivers/char/ipmi/ipmi_dmi.c
@@ -66,7 +66,6 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr,
return;
}
- memset(&p, 0, sizeof(p));
p.addr = base_addr;
p.space = space;
p.regspacing = offset;
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index e8ba67834746..00bf4b17edbf 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -214,6 +214,9 @@ struct ipmi_user {
/* Does this interface receive IPMI events? */
bool gets_events;
+
+ /* Free must run in process context for RCU cleanup. */
+ struct work_struct remove_work;
};
static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
@@ -1157,6 +1160,15 @@ static int intf_err_seq(struct ipmi_smi *intf,
return rv;
}
+static void free_user_work(struct work_struct *work)
+{
+ struct ipmi_user *user = container_of(work, struct ipmi_user,
+ remove_work);
+
+ cleanup_srcu_struct(&user->release_barrier);
+ kfree(user);
+}
+
int ipmi_create_user(unsigned int if_num,
const struct ipmi_user_hndl *handler,
void *handler_data,
@@ -1200,6 +1212,8 @@ int ipmi_create_user(unsigned int if_num,
goto out_kfree;
found:
+ INIT_WORK(&new_user->remove_work, free_user_work);
+
rv = init_srcu_struct(&new_user->release_barrier);
if (rv)
goto out_kfree;
@@ -1260,8 +1274,9 @@ EXPORT_SYMBOL(ipmi_get_smi_info);
static void free_user(struct kref *ref)
{
struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
- cleanup_srcu_struct(&user->release_barrier);
- kfree(user);
+
+ /* SRCU cleanup must happen in task context. */
+ schedule_work(&user->remove_work);
}
static void _ipmi_destroy_user(struct ipmi_user *user)
diff --git a/drivers/char/ipmi/ipmi_si_hardcode.c b/drivers/char/ipmi/ipmi_si_hardcode.c
index 01946cad3d13..682221eebd66 100644
--- a/drivers/char/ipmi/ipmi_si_hardcode.c
+++ b/drivers/char/ipmi/ipmi_si_hardcode.c
@@ -118,6 +118,8 @@ void __init ipmi_hardcode_init(void)
char *str;
char *si_type[SI_MAX_PARMS];
+ memset(si_type, 0, sizeof(si_type));
+
/* Parse out the si_type string into its components. */
str = si_type_str;
if (*str != '\0') {
diff --git a/drivers/char/tpm/eventlog/tpm1.c b/drivers/char/tpm/eventlog/tpm1.c
index 58c84784ba25..bfdff9271be0 100644
--- a/drivers/char/tpm/eventlog/tpm1.c
+++ b/drivers/char/tpm/eventlog/tpm1.c
@@ -74,7 +74,7 @@ static const char* tcpa_pc_event_id_strings[] = {
/* returns pointer to start of pos. entry of tcg log */
static void *tpm1_bios_measurements_start(struct seq_file *m, loff_t *pos)
{
- loff_t i;
+ loff_t i = 0;
struct tpm_chip *chip = m->private;
struct tpm_bios_log *log = &chip->log;
void *addr = log->bios_event_log;
@@ -83,38 +83,29 @@ static void *tpm1_bios_measurements_start(struct seq_file *m, loff_t *pos)
u32 converted_event_size;
u32 converted_event_type;
-
/* read over *pos measurements */
- for (i = 0; i < *pos; i++) {
+ do {
event = addr;
+ /* check if current entry is valid */
+ if (addr + sizeof(struct tcpa_event) > limit)
+ return NULL;
+
converted_event_size =
do_endian_conversion(event->event_size);
converted_event_type =
do_endian_conversion(event->event_type);
- if ((addr + sizeof(struct tcpa_event)) < limit) {
- if ((converted_event_type == 0) &&
- (converted_event_size == 0))
- return NULL;
- addr += (sizeof(struct tcpa_event) +
- converted_event_size);
- }
- }
-
- /* now check if current entry is valid */
- if ((addr + sizeof(struct tcpa_event)) >= limit)
- return NULL;
-
- event = addr;
+ if (((converted_event_type == 0) && (converted_event_size == 0))
+ || ((addr + sizeof(struct tcpa_event) + converted_event_size)
+ > limit))
+ return NULL;
- converted_event_size = do_endian_conversion(event->event_size);
- converted_event_type = do_endian_conversion(event->event_type);
+ if (i++ == *pos)
+ break;
- if (((converted_event_type == 0) && (converted_event_size == 0))
- || ((addr + sizeof(struct tcpa_event) + converted_event_size)
- >= limit))
- return NULL;
+ addr += (sizeof(struct tcpa_event) + converted_event_size);
+ } while (1);
return addr;
}
@@ -134,7 +125,7 @@ static void *tpm1_bios_measurements_next(struct seq_file *m, void *v,
v += sizeof(struct tcpa_event) + converted_event_size;
/* now check if current entry is valid */
- if ((v + sizeof(struct tcpa_event)) >= limit)
+ if ((v + sizeof(struct tcpa_event)) > limit)
return NULL;
event = v;
@@ -143,7 +134,7 @@ static void *tpm1_bios_measurements_next(struct seq_file *m, void *v,
converted_event_type = do_endian_conversion(event->event_type);
if (((converted_event_type == 0) && (converted_event_size == 0)) ||
- ((v + sizeof(struct tcpa_event) + converted_event_size) >= limit))
+ ((v + sizeof(struct tcpa_event) + converted_event_size) > limit))
return NULL;
(*pos)++;
diff --git a/drivers/char/tpm/eventlog/tpm2.c b/drivers/char/tpm/eventlog/tpm2.c
index 1b8fa9de2cac..f824563fc28d 100644
--- a/drivers/char/tpm/eventlog/tpm2.c
+++ b/drivers/char/tpm/eventlog/tpm2.c
@@ -37,10 +37,10 @@
*
* Returns size of the event. If it is an invalid event, returns 0.
*/
-static int calc_tpm2_event_size(struct tcg_pcr_event2 *event,
- struct tcg_pcr_event *event_header)
+static size_t calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
+ struct tcg_pcr_event *event_header)
{
- struct tcg_efi_specid_event *efispecid;
+ struct tcg_efi_specid_event_head *efispecid;
struct tcg_event_field *event_field;
void *marker;
void *marker_start;
@@ -55,7 +55,7 @@ static int calc_tpm2_event_size(struct tcg_pcr_event2 *event,
marker = marker + sizeof(event->pcr_idx) + sizeof(event->event_type)
+ sizeof(event->count);
- efispecid = (struct tcg_efi_specid_event *)event_header->event;
+ efispecid = (struct tcg_efi_specid_event_head *)event_header->event;
/* Check if event is malformed. */
if (event->count > efispecid->num_algs)
@@ -95,7 +95,7 @@ static void *tpm2_bios_measurements_start(struct seq_file *m, loff_t *pos)
void *addr = log->bios_event_log;
void *limit = log->bios_event_log_end;
struct tcg_pcr_event *event_header;
- struct tcg_pcr_event2 *event;
+ struct tcg_pcr_event2_head *event;
size_t size;
int i;
@@ -136,7 +136,7 @@ static void *tpm2_bios_measurements_next(struct seq_file *m, void *v,
loff_t *pos)
{
struct tcg_pcr_event *event_header;
- struct tcg_pcr_event2 *event;
+ struct tcg_pcr_event2_head *event;
struct tpm_chip *chip = m->private;
struct tpm_bios_log *log = &chip->log;
void *limit = log->bios_event_log_end;
@@ -180,7 +180,7 @@ static int tpm2_binary_bios_measurements_show(struct seq_file *m, void *v)
struct tpm_chip *chip = m->private;
struct tpm_bios_log *log = &chip->log;
struct tcg_pcr_event *event_header = log->bios_event_log;
- struct tcg_pcr_event2 *event = v;
+ struct tcg_pcr_event2_head *event = v;
void *temp_ptr;
size_t size;
diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c
index be5d1abd3e8e..8390c5b54c3b 100644
--- a/drivers/char/tpm/st33zp24/i2c.c
+++ b/drivers/char/tpm/st33zp24/i2c.c
@@ -33,7 +33,7 @@
struct st33zp24_i2c_phy {
struct i2c_client *client;
- u8 buf[TPM_BUFSIZE + 1];
+ u8 buf[ST33ZP24_BUFSIZE + 1];
int io_lpcpd;
};
diff --git a/drivers/char/tpm/st33zp24/spi.c b/drivers/char/tpm/st33zp24/spi.c
index d7909ab287a8..ff019a1e3c68 100644
--- a/drivers/char/tpm/st33zp24/spi.c
+++ b/drivers/char/tpm/st33zp24/spi.c
@@ -63,7 +63,7 @@
* some latency byte before the answer is available (max 15).
* We have 2048 + 1024 + 15.
*/
-#define ST33ZP24_SPI_BUFFER_SIZE (TPM_BUFSIZE + (TPM_BUFSIZE / 2) +\
+#define ST33ZP24_SPI_BUFFER_SIZE (ST33ZP24_BUFSIZE + (ST33ZP24_BUFSIZE / 2) +\
MAX_SPI_LATENCY)
diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
index 64dc560859f2..13dc614b7ebc 100644
--- a/drivers/char/tpm/st33zp24/st33zp24.c
+++ b/drivers/char/tpm/st33zp24/st33zp24.c
@@ -436,7 +436,7 @@ static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf,
goto out_err;
}
- return len;
+ return 0;
out_err:
st33zp24_cancel(chip);
release_locality(chip);
diff --git a/drivers/char/tpm/st33zp24/st33zp24.h b/drivers/char/tpm/st33zp24/st33zp24.h
index 6f4a4198af6a..20da0a84988d 100644
--- a/drivers/char/tpm/st33zp24/st33zp24.h
+++ b/drivers/char/tpm/st33zp24/st33zp24.h
@@ -18,8 +18,8 @@
#ifndef __LOCAL_ST33ZP24_H__
#define __LOCAL_ST33ZP24_H__
-#define TPM_WRITE_DIRECTION 0x80
-#define TPM_BUFSIZE 2048
+#define TPM_WRITE_DIRECTION 0x80
+#define ST33ZP24_BUFSIZE 2048
struct st33zp24_dev {
struct tpm_chip *chip;
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 32db84683c40..8804c9e916fd 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -37,6 +37,103 @@ struct class *tpm_class;
struct class *tpmrm_class;
dev_t tpm_devt;
+static int tpm_request_locality(struct tpm_chip *chip)
+{
+ int rc;
+
+ if (!chip->ops->request_locality)
+ return 0;
+
+ rc = chip->ops->request_locality(chip, 0);
+ if (rc < 0)
+ return rc;
+
+ chip->locality = rc;
+ return 0;
+}
+
+static void tpm_relinquish_locality(struct tpm_chip *chip)
+{
+ int rc;
+
+ if (!chip->ops->relinquish_locality)
+ return;
+
+ rc = chip->ops->relinquish_locality(chip, chip->locality);
+ if (rc)
+ dev_err(&chip->dev, "%s: : error %d\n", __func__, rc);
+
+ chip->locality = -1;
+}
+
+static int tpm_cmd_ready(struct tpm_chip *chip)
+{
+ if (!chip->ops->cmd_ready)
+ return 0;
+
+ return chip->ops->cmd_ready(chip);
+}
+
+static int tpm_go_idle(struct tpm_chip *chip)
+{
+ if (!chip->ops->go_idle)
+ return 0;
+
+ return chip->ops->go_idle(chip);
+}
+
+/**
+ * tpm_chip_start() - power on the TPM
+ * @chip: a TPM chip to use
+ *
+ * Return:
+ * * The response length - OK
+ * * -errno - A system error
+ */
+int tpm_chip_start(struct tpm_chip *chip)
+{
+ int ret;
+
+ if (chip->ops->clk_enable)
+ chip->ops->clk_enable(chip, true);
+
+ if (chip->locality == -1) {
+ ret = tpm_request_locality(chip);
+ if (ret) {
+ chip->ops->clk_enable(chip, false);
+ return ret;
+ }
+ }
+
+ ret = tpm_cmd_ready(chip);
+ if (ret) {
+ tpm_relinquish_locality(chip);
+ if (chip->ops->clk_enable)
+ chip->ops->clk_enable(chip, false);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tpm_chip_start);
+
+/**
+ * tpm_chip_stop() - power off the TPM
+ * @chip: a TPM chip to use
+ *
+ * Return:
+ * * The response length - OK
+ * * -errno - A system error
+ */
+void tpm_chip_stop(struct tpm_chip *chip)
+{
+ tpm_go_idle(chip);
+ tpm_relinquish_locality(chip);
+ if (chip->ops->clk_enable)
+ chip->ops->clk_enable(chip, false);
+}
+EXPORT_SYMBOL_GPL(tpm_chip_stop);
+
/**
* tpm_try_get_ops() - Get a ref to the tpm_chip
* @chip: Chip to ref
@@ -56,10 +153,17 @@ int tpm_try_get_ops(struct tpm_chip *chip)
down_read(&chip->ops_sem);
if (!chip->ops)
+ goto out_ops;
+
+ mutex_lock(&chip->tpm_mutex);
+ rc = tpm_chip_start(chip);
+ if (rc)
goto out_lock;
return 0;
out_lock:
+ mutex_unlock(&chip->tpm_mutex);
+out_ops:
up_read(&chip->ops_sem);
put_device(&chip->dev);
return rc;
@@ -75,6 +179,8 @@ EXPORT_SYMBOL_GPL(tpm_try_get_ops);
*/
void tpm_put_ops(struct tpm_chip *chip)
{
+ tpm_chip_stop(chip);
+ mutex_unlock(&chip->tpm_mutex);
up_read(&chip->ops_sem);
put_device(&chip->dev);
}
@@ -160,6 +266,7 @@ static void tpm_dev_release(struct device *dev)
kfree(chip->log.bios_event_log);
kfree(chip->work_space.context_buf);
kfree(chip->work_space.session_buf);
+ kfree(chip->allocated_banks);
kfree(chip);
}
@@ -189,7 +296,10 @@ static int tpm_class_shutdown(struct device *dev)
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
down_write(&chip->ops_sem);
- tpm2_shutdown(chip, TPM2_SU_CLEAR);
+ if (!tpm_chip_start(chip)) {
+ tpm2_shutdown(chip, TPM2_SU_CLEAR);
+ tpm_chip_stop(chip);
+ }
chip->ops = NULL;
up_write(&chip->ops_sem);
}
@@ -368,8 +478,12 @@ static void tpm_del_char_device(struct tpm_chip *chip)
/* Make the driver uncallable. */
down_write(&chip->ops_sem);
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- tpm2_shutdown(chip, TPM2_SU_CLEAR);
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ if (!tpm_chip_start(chip)) {
+ tpm2_shutdown(chip, TPM2_SU_CLEAR);
+ tpm_chip_stop(chip);
+ }
+ }
chip->ops = NULL;
up_write(&chip->ops_sem);
}
@@ -451,7 +565,11 @@ int tpm_chip_register(struct tpm_chip *chip)
{
int rc;
+ rc = tpm_chip_start(chip);
+ if (rc)
+ return rc;
rc = tpm_auto_startup(chip);
+ tpm_chip_stop(chip);
if (rc)
return rc;
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
index 5eecad233ea1..817ae09a369e 100644
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -27,7 +27,38 @@
static struct workqueue_struct *tpm_dev_wq;
static DEFINE_MUTEX(tpm_dev_wq_lock);
-static void tpm_async_work(struct work_struct *work)
+static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space,
+ u8 *buf, size_t bufsiz)
+{
+ struct tpm_header *header = (void *)buf;
+ ssize_t ret, len;
+
+ ret = tpm2_prepare_space(chip, space, buf, bufsiz);
+ /* If the command is not implemented by the TPM, synthesize a
+ * response with a TPM2_RC_COMMAND_CODE return for user-space.
+ */
+ if (ret == -EOPNOTSUPP) {
+ header->length = cpu_to_be32(sizeof(*header));
+ header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
+ header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE |
+ TSS2_RESMGR_TPM_RC_LAYER);
+ ret = sizeof(*header);
+ }
+ if (ret)
+ goto out_rc;
+
+ len = tpm_transmit(chip, buf, bufsiz);
+ if (len < 0)
+ ret = len;
+
+ if (!ret)
+ ret = tpm2_commit_space(chip, space, buf, &len);
+
+out_rc:
+ return ret ? ret : len;
+}
+
+static void tpm_dev_async_work(struct work_struct *work)
{
struct file_priv *priv =
container_of(work, struct file_priv, async_work);
@@ -35,9 +66,8 @@ static void tpm_async_work(struct work_struct *work)
mutex_lock(&priv->buffer_mutex);
priv->command_enqueued = false;
- ret = tpm_transmit(priv->chip, priv->space, priv->data_buffer,
- sizeof(priv->data_buffer), 0);
-
+ ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
+ sizeof(priv->data_buffer));
tpm_put_ops(priv->chip);
if (ret > 0) {
priv->response_length = ret;
@@ -80,7 +110,7 @@ void tpm_common_open(struct file *file, struct tpm_chip *chip,
mutex_init(&priv->buffer_mutex);
timer_setup(&priv->user_read_timer, user_reader_timeout, 0);
INIT_WORK(&priv->timeout_work, tpm_timeout_work);
- INIT_WORK(&priv->async_work, tpm_async_work);
+ INIT_WORK(&priv->async_work, tpm_dev_async_work);
init_waitqueue_head(&priv->async_wait);
file->private_data = priv;
}
@@ -183,8 +213,8 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
return size;
}
- ret = tpm_transmit(priv->chip, priv->space, priv->data_buffer,
- sizeof(priv->data_buffer), 0);
+ ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
+ sizeof(priv->data_buffer));
tpm_put_ops(priv->chip);
if (ret > 0) {
@@ -203,12 +233,19 @@ __poll_t tpm_common_poll(struct file *file, poll_table *wait)
__poll_t mask = 0;
poll_wait(file, &priv->async_wait, wait);
+ mutex_lock(&priv->buffer_mutex);
- if (!priv->response_read || priv->response_length)
+ /*
+ * The response_length indicates if there is still response
+ * (or part of it) to be consumed. Partial reads decrease it
+ * by the number of bytes read, and write resets it the zero.
+ */
+ if (priv->response_length)
mask = EPOLLIN | EPOLLRDNORM;
else
mask = EPOLLOUT | EPOLLWRNORM;
+ mutex_unlock(&priv->buffer_mutex);
return mask;
}
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index d9439f9abe78..ae1030c9b086 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -62,137 +62,22 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal)
}
EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
-static int tpm_validate_command(struct tpm_chip *chip,
- struct tpm_space *space,
- const u8 *cmd,
- size_t len)
+static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz)
{
- const struct tpm_input_header *header = (const void *)cmd;
- int i;
- u32 cc;
- u32 attrs;
- unsigned int nr_handles;
-
- if (len < TPM_HEADER_SIZE)
- return -EINVAL;
-
- if (!space)
- return 0;
-
- if (chip->flags & TPM_CHIP_FLAG_TPM2 && chip->nr_commands) {
- cc = be32_to_cpu(header->ordinal);
-
- i = tpm2_find_cc(chip, cc);
- if (i < 0) {
- dev_dbg(&chip->dev, "0x%04X is an invalid command\n",
- cc);
- return -EOPNOTSUPP;
- }
-
- attrs = chip->cc_attrs_tbl[i];
- nr_handles =
- 4 * ((attrs >> TPM2_CC_ATTR_CHANDLES) & GENMASK(2, 0));
- if (len < TPM_HEADER_SIZE + 4 * nr_handles)
- goto err_len;
- }
-
- return 0;
-err_len:
- dev_dbg(&chip->dev,
- "%s: insufficient command length %zu", __func__, len);
- return -EINVAL;
-}
-
-static int tpm_request_locality(struct tpm_chip *chip, unsigned int flags)
-{
- int rc;
-
- if (flags & TPM_TRANSMIT_NESTED)
- return 0;
-
- if (!chip->ops->request_locality)
- return 0;
-
- rc = chip->ops->request_locality(chip, 0);
- if (rc < 0)
- return rc;
-
- chip->locality = rc;
-
- return 0;
-}
-
-static void tpm_relinquish_locality(struct tpm_chip *chip, unsigned int flags)
-{
- int rc;
-
- if (flags & TPM_TRANSMIT_NESTED)
- return;
-
- if (!chip->ops->relinquish_locality)
- return;
-
- rc = chip->ops->relinquish_locality(chip, chip->locality);
- if (rc)
- dev_err(&chip->dev, "%s: : error %d\n", __func__, rc);
-
- chip->locality = -1;
-}
-
-static int tpm_cmd_ready(struct tpm_chip *chip, unsigned int flags)
-{
- if (flags & TPM_TRANSMIT_NESTED)
- return 0;
-
- if (!chip->ops->cmd_ready)
- return 0;
-
- return chip->ops->cmd_ready(chip);
-}
-
-static int tpm_go_idle(struct tpm_chip *chip, unsigned int flags)
-{
- if (flags & TPM_TRANSMIT_NESTED)
- return 0;
-
- if (!chip->ops->go_idle)
- return 0;
-
- return chip->ops->go_idle(chip);
-}
-
-static ssize_t tpm_try_transmit(struct tpm_chip *chip,
- struct tpm_space *space,
- u8 *buf, size_t bufsiz,
- unsigned int flags)
-{
- struct tpm_output_header *header = (void *)buf;
+ struct tpm_header *header = buf;
int rc;
ssize_t len = 0;
u32 count, ordinal;
unsigned long stop;
- bool need_locality;
- rc = tpm_validate_command(chip, space, buf, bufsiz);
- if (rc == -EINVAL)
- return rc;
- /*
- * If the command is not implemented by the TPM, synthesize a
- * response with a TPM2_RC_COMMAND_CODE return for user-space.
- */
- if (rc == -EOPNOTSUPP) {
- header->length = cpu_to_be32(sizeof(*header));
- header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
- header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE |
- TSS2_RESMGR_TPM_RC_LAYER);
- return sizeof(*header);
- }
+ if (bufsiz < TPM_HEADER_SIZE)
+ return -EINVAL;
if (bufsiz > TPM_BUFSIZE)
bufsiz = TPM_BUFSIZE;
- count = be32_to_cpu(*((__be32 *) (buf + 2)));
- ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
+ count = be32_to_cpu(header->length);
+ ordinal = be32_to_cpu(header->ordinal);
if (count == 0)
return -ENODATA;
if (count > bufsiz) {
@@ -201,37 +86,21 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip,
return -E2BIG;
}
- if (!(flags & TPM_TRANSMIT_UNLOCKED) && !(flags & TPM_TRANSMIT_NESTED))
- mutex_lock(&chip->tpm_mutex);
-
- if (chip->ops->clk_enable != NULL)
- chip->ops->clk_enable(chip, true);
-
- /* Store the decision as chip->locality will be changed. */
- need_locality = chip->locality == -1;
-
- if (need_locality) {
- rc = tpm_request_locality(chip, flags);
- if (rc < 0) {
- need_locality = false;
- goto out_locality;
- }
- }
-
- rc = tpm_cmd_ready(chip, flags);
- if (rc)
- goto out_locality;
-
- rc = tpm2_prepare_space(chip, space, ordinal, buf);
- if (rc)
- goto out;
-
rc = chip->ops->send(chip, buf, count);
if (rc < 0) {
if (rc != -EPIPE)
dev_err(&chip->dev,
- "%s: tpm_send: error %d\n", __func__, rc);
- goto out;
+ "%s: send(): error %d\n", __func__, rc);
+ return rc;
+ }
+
+ /* A sanity check. send() should just return zero on success e.g.
+ * not the command length.
+ */
+ if (rc > 0) {
+ dev_warn(&chip->dev,
+ "%s: send(): invalid value %d\n", __func__, rc);
+ rc = 0;
}
if (chip->flags & TPM_CHIP_FLAG_IRQ)
@@ -246,8 +115,7 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip,
if (chip->ops->req_canceled(chip, status)) {
dev_err(&chip->dev, "Operation Canceled\n");
- rc = -ECANCELED;
- goto out;
+ return -ECANCELED;
}
tpm_msleep(TPM_TIMEOUT_POLL);
@@ -256,77 +124,45 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip,
chip->ops->cancel(chip);
dev_err(&chip->dev, "Operation Timed out\n");
- rc = -ETIME;
- goto out;
+ return -ETIME;
out_recv:
len = chip->ops->recv(chip, buf, bufsiz);
if (len < 0) {
rc = len;
- dev_err(&chip->dev,
- "tpm_transmit: tpm_recv: error %d\n", rc);
- goto out;
- } else if (len < TPM_HEADER_SIZE) {
- rc = -EFAULT;
- goto out;
- }
-
- if (len != be32_to_cpu(header->length)) {
+ dev_err(&chip->dev, "tpm_transmit: tpm_recv: error %d\n", rc);
+ } else if (len < TPM_HEADER_SIZE || len != be32_to_cpu(header->length))
rc = -EFAULT;
- goto out;
- }
- rc = tpm2_commit_space(chip, space, ordinal, buf, &len);
- if (rc)
- dev_err(&chip->dev, "tpm2_commit_space: error %d\n", rc);
-
-out:
- /* may fail but do not override previous error value in rc */
- tpm_go_idle(chip, flags);
-
-out_locality:
- if (need_locality)
- tpm_relinquish_locality(chip, flags);
-
- if (chip->ops->clk_enable != NULL)
- chip->ops->clk_enable(chip, false);
-
- if (!(flags & TPM_TRANSMIT_UNLOCKED) && !(flags & TPM_TRANSMIT_NESTED))
- mutex_unlock(&chip->tpm_mutex);
return rc ? rc : len;
}
/**
* tpm_transmit - Internal kernel interface to transmit TPM commands.
+ * @chip: a TPM chip to use
+ * @buf: a TPM command buffer
+ * @bufsiz: length of the TPM command buffer
*
- * @chip: TPM chip to use
- * @space: tpm space
- * @buf: TPM command buffer
- * @bufsiz: length of the TPM command buffer
- * @flags: tpm transmit flags - bitmap
+ * A wrapper around tpm_try_transmit() that handles TPM2_RC_RETRY returns from
+ * the TPM and retransmits the command after a delay up to a maximum wait of
+ * TPM2_DURATION_LONG.
*
- * A wrapper around tpm_try_transmit that handles TPM2_RC_RETRY
- * returns from the TPM and retransmits the command after a delay up
- * to a maximum wait of TPM2_DURATION_LONG.
- *
- * Note: TPM1 never returns TPM2_RC_RETRY so the retry logic is TPM2
- * only
+ * Note that TPM 1.x never returns TPM2_RC_RETRY so the retry logic is TPM 2.0
+ * only.
*
* Return:
- * the length of the return when the operation is successful.
- * A negative number for system errors (errno).
+ * * The response length - OK
+ * * -errno - A system error
*/
-ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
- u8 *buf, size_t bufsiz, unsigned int flags)
+ssize_t tpm_transmit(struct tpm_chip *chip, u8 *buf, size_t bufsiz)
{
- struct tpm_output_header *header = (struct tpm_output_header *)buf;
+ struct tpm_header *header = (struct tpm_header *)buf;
/* space for header and handles */
u8 save[TPM_HEADER_SIZE + 3*sizeof(u32)];
unsigned int delay_msec = TPM2_DURATION_SHORT;
u32 rc = 0;
ssize_t ret;
- const size_t save_size = min(space ? sizeof(save) : TPM_HEADER_SIZE,
- bufsiz);
+ const size_t save_size = min(sizeof(save), bufsiz);
/* the command code is where the return code will be */
u32 cc = be32_to_cpu(header->return_code);
@@ -338,7 +174,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
memcpy(save, buf, save_size);
for (;;) {
- ret = tpm_try_transmit(chip, space, buf, bufsiz, flags);
+ ret = tpm_try_transmit(chip, buf, bufsiz);
if (ret < 0)
break;
rc = be32_to_cpu(header->return_code);
@@ -365,39 +201,33 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
}
return ret;
}
+
/**
* tpm_transmit_cmd - send a tpm command to the device
- * The function extracts tpm out header return code
- *
- * @chip: TPM chip to use
- * @space: tpm space
- * @buf: TPM command buffer
- * @bufsiz: length of the buffer
- * @min_rsp_body_length: minimum expected length of response body
- * @flags: tpm transmit flags - bitmap
- * @desc: command description used in the error message
+ * @chip: a TPM chip to use
+ * @buf: a TPM command buffer
+ * @min_rsp_body_length: minimum expected length of response body
+ * @desc: command description used in the error message
*
* Return:
- * 0 when the operation is successful.
- * A negative number for system errors (errno).
- * A positive number for a TPM error.
+ * * 0 - OK
+ * * -errno - A system error
+ * * TPM_RC - A TPM error
*/
-ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_space *space,
- void *buf, size_t bufsiz,
- size_t min_rsp_body_length, unsigned int flags,
- const char *desc)
+ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_buf *buf,
+ size_t min_rsp_body_length, const char *desc)
{
- const struct tpm_output_header *header = buf;
+ const struct tpm_header *header = (struct tpm_header *)buf->data;
int err;
ssize_t len;
- len = tpm_transmit(chip, space, buf, bufsiz, flags);
+ len = tpm_transmit(chip, buf->data, PAGE_SIZE);
if (len < 0)
return len;
err = be32_to_cpu(header->return_code);
if (err != 0 && err != TPM_ERR_DISABLED && err != TPM_ERR_DEACTIVATED
- && desc)
+ && err != TPM2_RC_TESTING && desc)
dev_err(&chip->dev, "A TPM error (%d) occurred %s\n", err,
desc);
if (err)
@@ -451,11 +281,12 @@ EXPORT_SYMBOL_GPL(tpm_is_tpm2);
* tpm_pcr_read - read a PCR value from SHA1 bank
* @chip: a &struct tpm_chip instance, %NULL for the default chip
* @pcr_idx: the PCR to be retrieved
- * @res_buf: the value of the PCR
+ * @digest: the PCR bank and buffer current PCR value is written to
*
* Return: same as with tpm_transmit_cmd()
*/
-int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf)
+int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx,
+ struct tpm_digest *digest)
{
int rc;
@@ -464,9 +295,9 @@ int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf)
return -ENODEV;
if (chip->flags & TPM_CHIP_FLAG_TPM2)
- rc = tpm2_pcr_read(chip, pcr_idx, res_buf);
+ rc = tpm2_pcr_read(chip, pcr_idx, digest, NULL);
else
- rc = tpm1_pcr_read(chip, pcr_idx, res_buf);
+ rc = tpm1_pcr_read(chip, pcr_idx, digest->digest);
tpm_put_ops(chip);
return rc;
@@ -477,41 +308,34 @@ EXPORT_SYMBOL_GPL(tpm_pcr_read);
* tpm_pcr_extend - extend a PCR value in SHA1 bank.
* @chip: a &struct tpm_chip instance, %NULL for the default chip
* @pcr_idx: the PCR to be retrieved
- * @hash: the hash value used to extend the PCR value
+ * @digests: array of tpm_digest structures used to extend PCRs
*
- * Note: with TPM 2.0 extends also those banks with a known digest size to the
- * cryto subsystem in order to prevent malicious use of those PCR banks. In the
- * future we should dynamically determine digest sizes.
+ * Note: callers must pass a digest for every allocated PCR bank, in the same
+ * order of the banks in chip->allocated_banks.
*
* Return: same as with tpm_transmit_cmd()
*/
-int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, const u8 *hash)
+int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
+ struct tpm_digest *digests)
{
int rc;
- struct tpm2_digest digest_list[ARRAY_SIZE(chip->active_banks)];
- u32 count = 0;
int i;
chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
- if (chip->flags & TPM_CHIP_FLAG_TPM2) {
- memset(digest_list, 0, sizeof(digest_list));
-
- for (i = 0; i < ARRAY_SIZE(chip->active_banks) &&
- chip->active_banks[i] != TPM2_ALG_ERROR; i++) {
- digest_list[i].alg_id = chip->active_banks[i];
- memcpy(digest_list[i].digest, hash, TPM_DIGEST_SIZE);
- count++;
- }
+ for (i = 0; i < chip->nr_allocated_banks; i++)
+ if (digests[i].alg_id != chip->allocated_banks[i].alg_id)
+ return -EINVAL;
- rc = tpm2_pcr_extend(chip, pcr_idx, count, digest_list);
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ rc = tpm2_pcr_extend(chip, pcr_idx, digests);
tpm_put_ops(chip);
return rc;
}
- rc = tpm1_pcr_extend(chip, pcr_idx, hash,
+ rc = tpm1_pcr_extend(chip, pcr_idx, digests[0].digest,
"attempting extend a PCR value");
tpm_put_ops(chip);
return rc;
@@ -528,14 +352,21 @@ EXPORT_SYMBOL_GPL(tpm_pcr_extend);
*/
int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen)
{
+ struct tpm_buf buf;
int rc;
chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
- rc = tpm_transmit_cmd(chip, NULL, cmd, buflen, 0, 0,
- "attempting to a send a command");
+ rc = tpm_buf_init(&buf, 0, 0);
+ if (rc)
+ goto out;
+
+ memcpy(buf.data, cmd, buflen);
+ rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to a send a command");
+ tpm_buf_destroy(&buf);
+out:
tpm_put_ops(chip);
return rc;
}
@@ -571,10 +402,14 @@ int tpm_pm_suspend(struct device *dev)
if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED)
return 0;
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- tpm2_shutdown(chip, TPM2_SU_STATE);
- else
- rc = tpm1_pm_suspend(chip, tpm_suspend_pcr);
+ if (!tpm_chip_start(chip)) {
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ tpm2_shutdown(chip, TPM2_SU_STATE);
+ else
+ rc = tpm1_pm_suspend(chip, tpm_suspend_pcr);
+
+ tpm_chip_stop(chip);
+ }
return rc;
}
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index b88e08ec2c59..533a260d744e 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -39,7 +39,6 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
{
struct tpm_buf tpm_buf;
struct tpm_readpubek_out *out;
- ssize_t rc;
int i;
char *str = buf;
struct tpm_chip *chip = to_tpm_chip(dev);
@@ -47,19 +46,17 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
memset(&anti_replay, 0, sizeof(anti_replay));
- rc = tpm_buf_init(&tpm_buf, TPM_TAG_RQU_COMMAND, TPM_ORD_READPUBEK);
- if (rc)
- return rc;
+ if (tpm_try_get_ops(chip))
+ return 0;
+
+ if (tpm_buf_init(&tpm_buf, TPM_TAG_RQU_COMMAND, TPM_ORD_READPUBEK))
+ goto out_ops;
tpm_buf_append(&tpm_buf, anti_replay, sizeof(anti_replay));
- rc = tpm_transmit_cmd(chip, NULL, tpm_buf.data, PAGE_SIZE,
- READ_PUBEK_RESULT_MIN_BODY_SIZE, 0,
- "attempting to read the PUBEK");
- if (rc) {
- tpm_buf_destroy(&tpm_buf);
- return 0;
- }
+ if (tpm_transmit_cmd(chip, &tpm_buf, READ_PUBEK_RESULT_MIN_BODY_SIZE,
+ "attempting to read the PUBEK"))
+ goto out_buf;
out = (struct tpm_readpubek_out *)&tpm_buf.data[10];
str +=
@@ -90,9 +87,11 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
str += sprintf(str, "\n");
}
- rc = str - buf;
+out_buf:
tpm_buf_destroy(&tpm_buf);
- return rc;
+out_ops:
+ tpm_put_ops(chip);
+ return str - buf;
}
static DEVICE_ATTR_RO(pubek);
@@ -101,27 +100,32 @@ static ssize_t pcrs_show(struct device *dev, struct device_attribute *attr,
{
cap_t cap;
u8 digest[TPM_DIGEST_SIZE];
- ssize_t rc;
u32 i, j, num_pcrs;
char *str = buf;
struct tpm_chip *chip = to_tpm_chip(dev);
- rc = tpm1_getcap(chip, TPM_CAP_PROP_PCR, &cap,
- "attempting to determine the number of PCRS",
- sizeof(cap.num_pcrs));
- if (rc)
+ if (tpm_try_get_ops(chip))
return 0;
+ if (tpm1_getcap(chip, TPM_CAP_PROP_PCR, &cap,
+ "attempting to determine the number of PCRS",
+ sizeof(cap.num_pcrs))) {
+ tpm_put_ops(chip);
+ return 0;
+ }
+
num_pcrs = be32_to_cpu(cap.num_pcrs);
for (i = 0; i < num_pcrs; i++) {
- rc = tpm1_pcr_read(chip, i, digest);
- if (rc)
+ if (tpm1_pcr_read(chip, i, digest)) {
+ str = buf;
break;
+ }
str += sprintf(str, "PCR-%02d: ", i);
for (j = 0; j < TPM_DIGEST_SIZE; j++)
str += sprintf(str, "%02X ", digest[j]);
str += sprintf(str, "\n");
}
+ tpm_put_ops(chip);
return str - buf;
}
static DEVICE_ATTR_RO(pcrs);
@@ -129,16 +133,21 @@ static DEVICE_ATTR_RO(pcrs);
static ssize_t enabled_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+ ssize_t rc = 0;
cap_t cap;
- ssize_t rc;
- rc = tpm1_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_PERM, &cap,
- "attempting to determine the permanent enabled state",
- sizeof(cap.perm_flags));
- if (rc)
+ if (tpm_try_get_ops(chip))
return 0;
+ if (tpm1_getcap(chip, TPM_CAP_FLAG_PERM, &cap,
+ "attempting to determine the permanent enabled state",
+ sizeof(cap.perm_flags)))
+ goto out_ops;
+
rc = sprintf(buf, "%d\n", !cap.perm_flags.disable);
+out_ops:
+ tpm_put_ops(chip);
return rc;
}
static DEVICE_ATTR_RO(enabled);
@@ -146,16 +155,21 @@ static DEVICE_ATTR_RO(enabled);
static ssize_t active_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+ ssize_t rc = 0;
cap_t cap;
- ssize_t rc;
- rc = tpm1_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_PERM, &cap,
- "attempting to determine the permanent active state",
- sizeof(cap.perm_flags));
- if (rc)
+ if (tpm_try_get_ops(chip))
return 0;
+ if (tpm1_getcap(chip, TPM_CAP_FLAG_PERM, &cap,
+ "attempting to determine the permanent active state",
+ sizeof(cap.perm_flags)))
+ goto out_ops;
+
rc = sprintf(buf, "%d\n", !cap.perm_flags.deactivated);
+out_ops:
+ tpm_put_ops(chip);
return rc;
}
static DEVICE_ATTR_RO(active);
@@ -163,16 +177,21 @@ static DEVICE_ATTR_RO(active);
static ssize_t owned_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+ ssize_t rc = 0;
cap_t cap;
- ssize_t rc;
- rc = tpm1_getcap(to_tpm_chip(dev), TPM_CAP_PROP_OWNER, &cap,
- "attempting to determine the owner state",
- sizeof(cap.owned));
- if (rc)
+ if (tpm_try_get_ops(chip))
return 0;
+ if (tpm1_getcap(to_tpm_chip(dev), TPM_CAP_PROP_OWNER, &cap,
+ "attempting to determine the owner state",
+ sizeof(cap.owned)))
+ goto out_ops;
+
rc = sprintf(buf, "%d\n", cap.owned);
+out_ops:
+ tpm_put_ops(chip);
return rc;
}
static DEVICE_ATTR_RO(owned);
@@ -180,16 +199,21 @@ static DEVICE_ATTR_RO(owned);
static ssize_t temp_deactivated_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+ ssize_t rc = 0;
cap_t cap;
- ssize_t rc;
- rc = tpm1_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_VOL, &cap,
- "attempting to determine the temporary state",
- sizeof(cap.stclear_flags));
- if (rc)
+ if (tpm_try_get_ops(chip))
return 0;
+ if (tpm1_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_VOL, &cap,
+ "attempting to determine the temporary state",
+ sizeof(cap.stclear_flags)))
+ goto out_ops;
+
rc = sprintf(buf, "%d\n", cap.stclear_flags.deactivated);
+out_ops:
+ tpm_put_ops(chip);
return rc;
}
static DEVICE_ATTR_RO(temp_deactivated);
@@ -198,15 +222,18 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tpm_chip *chip = to_tpm_chip(dev);
- cap_t cap;
- ssize_t rc;
+ ssize_t rc = 0;
char *str = buf;
+ cap_t cap;
- rc = tpm1_getcap(chip, TPM_CAP_PROP_MANUFACTURER, &cap,
- "attempting to determine the manufacturer",
- sizeof(cap.manufacturer_id));
- if (rc)
+ if (tpm_try_get_ops(chip))
return 0;
+
+ if (tpm1_getcap(chip, TPM_CAP_PROP_MANUFACTURER, &cap,
+ "attempting to determine the manufacturer",
+ sizeof(cap.manufacturer_id)))
+ goto out_ops;
+
str += sprintf(str, "Manufacturer: 0x%x\n",
be32_to_cpu(cap.manufacturer_id));
@@ -223,11 +250,10 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
cap.tpm_version_1_2.revMinor);
} else {
/* Otherwise just use TPM_STRUCT_VER */
- rc = tpm1_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
- "attempting to determine the 1.1 version",
- sizeof(cap.tpm_version));
- if (rc)
- return 0;
+ if (tpm1_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
+ "attempting to determine the 1.1 version",
+ sizeof(cap.tpm_version)))
+ goto out_ops;
str += sprintf(str,
"TCG version: %d.%d\nFirmware version: %d.%d\n",
cap.tpm_version.Major,
@@ -235,8 +261,10 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
cap.tpm_version.revMajor,
cap.tpm_version.revMinor);
}
-
- return str - buf;
+ rc = str - buf;
+out_ops:
+ tpm_put_ops(chip);
+ return rc;
}
static DEVICE_ATTR_RO(caps);
@@ -244,10 +272,12 @@ static ssize_t cancel_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct tpm_chip *chip = to_tpm_chip(dev);
- if (chip == NULL)
+
+ if (tpm_try_get_ops(chip))
return 0;
chip->ops->cancel(chip);
+ tpm_put_ops(chip);
return count;
}
static DEVICE_ATTR_WO(cancel);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index f27d1f38a93d..2cce072f25b5 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -25,30 +25,22 @@
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/fs.h>
-#include <linux/hw_random.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/tpm.h>
-#include <linux/acpi.h>
-#include <linux/cdev.h>
#include <linux/highmem.h>
#include <linux/tpm_eventlog.h>
-#include <crypto/hash_info.h>
#ifdef CONFIG_X86
#include <asm/intel-family.h>
#endif
-enum tpm_const {
- TPM_MINOR = 224, /* officially assigned */
- TPM_BUFSIZE = 4096,
- TPM_NUM_DEVICES = 65536,
- TPM_RETRY = 50, /* 5 seconds */
- TPM_NUM_EVENT_LOG_FILES = 3,
-};
+#define TPM_MINOR 224 /* officially assigned */
+#define TPM_BUFSIZE 4096
+#define TPM_NUM_DEVICES 65536
+#define TPM_RETRY 50
enum tpm_timeout {
TPM_TIMEOUT = 5, /* msecs */
@@ -65,16 +57,6 @@ enum tpm_addr {
TPM_ADDR = 0x4E,
};
-/* Indexes the duration array */
-enum tpm_duration {
- TPM_SHORT = 0,
- TPM_MEDIUM = 1,
- TPM_LONG = 2,
- TPM_LONG_LONG = 3,
- TPM_UNDEFINED,
- TPM_NUM_DURATIONS = TPM_UNDEFINED,
-};
-
#define TPM_WARN_RETRY 0x800
#define TPM_WARN_DOING_SELFTEST 0x802
#define TPM_ERR_DEACTIVATED 0x6
@@ -122,17 +104,6 @@ enum tpm2_return_codes {
TPM2_RC_RETRY = 0x0922,
};
-enum tpm2_algorithms {
- TPM2_ALG_ERROR = 0x0000,
- TPM2_ALG_SHA1 = 0x0004,
- TPM2_ALG_KEYEDHASH = 0x0008,
- TPM2_ALG_SHA256 = 0x000B,
- TPM2_ALG_SHA384 = 0x000C,
- TPM2_ALG_SHA512 = 0x000D,
- TPM2_ALG_NULL = 0x0010,
- TPM2_ALG_SM3_256 = 0x0012,
-};
-
enum tpm2_command_codes {
TPM2_CC_FIRST = 0x011F,
TPM2_CC_HIERARCHY_CONTROL = 0x0121,
@@ -190,15 +161,6 @@ enum tpm2_cc_attrs {
#define TPM_VID_WINBOND 0x1050
#define TPM_VID_STM 0x104A
-#define TPM_PPI_VERSION_LEN 3
-
-struct tpm_space {
- u32 context_tbl[3];
- u8 *context_buf;
- u32 session_tbl[3];
- u8 *session_buf;
-};
-
enum tpm_chip_flags {
TPM_CHIP_FLAG_TPM2 = BIT(1),
TPM_CHIP_FLAG_IRQ = BIT(2),
@@ -207,82 +169,15 @@ enum tpm_chip_flags {
TPM_CHIP_FLAG_ALWAYS_POWERED = BIT(5),
};
-struct tpm_bios_log {
- void *bios_event_log;
- void *bios_event_log_end;
-};
-
-struct tpm_chip_seqops {
- struct tpm_chip *chip;
- const struct seq_operations *seqops;
-};
-
-struct tpm_chip {
- struct device dev;
- struct device devs;
- struct cdev cdev;
- struct cdev cdevs;
-
- /* A driver callback under ops cannot be run unless ops_sem is held
- * (sometimes implicitly, eg for the sysfs code). ops becomes null
- * when the driver is unregistered, see tpm_try_get_ops.
- */
- struct rw_semaphore ops_sem;
- const struct tpm_class_ops *ops;
-
- struct tpm_bios_log log;
- struct tpm_chip_seqops bin_log_seqops;
- struct tpm_chip_seqops ascii_log_seqops;
-
- unsigned int flags;
-
- int dev_num; /* /dev/tpm# */
- unsigned long is_open; /* only one allowed */
-
- char hwrng_name[64];
- struct hwrng hwrng;
-
- struct mutex tpm_mutex; /* tpm is processing */
-
- unsigned long timeout_a; /* jiffies */
- unsigned long timeout_b; /* jiffies */
- unsigned long timeout_c; /* jiffies */
- unsigned long timeout_d; /* jiffies */
- bool timeout_adjusted;
- unsigned long duration[TPM_NUM_DURATIONS]; /* jiffies */
- bool duration_adjusted;
-
- struct dentry *bios_dir[TPM_NUM_EVENT_LOG_FILES];
-
- const struct attribute_group *groups[3];
- unsigned int groups_cnt;
-
- u16 active_banks[7];
-#ifdef CONFIG_ACPI
- acpi_handle acpi_dev_handle;
- char ppi_version[TPM_PPI_VERSION_LEN + 1];
-#endif /* CONFIG_ACPI */
-
- struct tpm_space work_space;
- u32 nr_commands;
- u32 *cc_attrs_tbl;
-
- /* active locality */
- int locality;
-};
-
#define to_tpm_chip(d) container_of(d, struct tpm_chip, dev)
-struct tpm_input_header {
- __be16 tag;
- __be32 length;
- __be32 ordinal;
-} __packed;
-
-struct tpm_output_header {
- __be16 tag;
- __be32 length;
- __be32 return_code;
+struct tpm_header {
+ __be16 tag;
+ __be32 length;
+ union {
+ __be32 ordinal;
+ __be32 return_code;
+ };
} __packed;
#define TPM_TAG_RQU_COMMAND 193
@@ -401,8 +296,8 @@ struct tpm_buf {
static inline void tpm_buf_reset(struct tpm_buf *buf, u16 tag, u32 ordinal)
{
- struct tpm_input_header *head;
- head = (struct tpm_input_header *)buf->data;
+ struct tpm_header *head = (struct tpm_header *)buf->data;
+
head->tag = cpu_to_be16(tag);
head->length = cpu_to_be32(sizeof(*head));
head->ordinal = cpu_to_be32(ordinal);
@@ -428,14 +323,14 @@ static inline void tpm_buf_destroy(struct tpm_buf *buf)
static inline u32 tpm_buf_length(struct tpm_buf *buf)
{
- struct tpm_input_header *head = (struct tpm_input_header *) buf->data;
+ struct tpm_header *head = (struct tpm_header *)buf->data;
return be32_to_cpu(head->length);
}
static inline u16 tpm_buf_tag(struct tpm_buf *buf)
{
- struct tpm_input_header *head = (struct tpm_input_header *) buf->data;
+ struct tpm_header *head = (struct tpm_header *)buf->data;
return be16_to_cpu(head->tag);
}
@@ -444,7 +339,7 @@ static inline void tpm_buf_append(struct tpm_buf *buf,
const unsigned char *new_data,
unsigned int new_len)
{
- struct tpm_input_header *head = (struct tpm_input_header *) buf->data;
+ struct tpm_header *head = (struct tpm_header *)buf->data;
u32 len = tpm_buf_length(buf);
/* Return silently if overflow has already happened. */
@@ -487,25 +382,9 @@ extern const struct file_operations tpm_fops;
extern const struct file_operations tpmrm_fops;
extern struct idr dev_nums_idr;
-/**
- * enum tpm_transmit_flags - flags for tpm_transmit()
- *
- * @TPM_TRANSMIT_UNLOCKED: do not lock the chip
- * @TPM_TRANSMIT_NESTED: discard setup steps (power management,
- * locality) including locking (i.e. implicit
- * UNLOCKED)
- */
-enum tpm_transmit_flags {
- TPM_TRANSMIT_UNLOCKED = BIT(0),
- TPM_TRANSMIT_NESTED = BIT(1),
-};
-
-ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
- u8 *buf, size_t bufsiz, unsigned int flags);
-ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_space *space,
- void *buf, size_t bufsiz,
- size_t min_rsp_body_length, unsigned int flags,
- const char *desc);
+ssize_t tpm_transmit(struct tpm_chip *chip, u8 *buf, size_t bufsiz);
+ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_buf *buf,
+ size_t min_rsp_body_length, const char *desc);
int tpm_get_timeouts(struct tpm_chip *);
int tpm_auto_startup(struct tpm_chip *chip);
@@ -530,6 +409,8 @@ static inline void tpm_msleep(unsigned int delay_msec)
delay_msec * 1000);
};
+int tpm_chip_start(struct tpm_chip *chip);
+void tpm_chip_stop(struct tpm_chip *chip);
struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip);
__must_check int tpm_try_get_ops(struct tpm_chip *chip);
void tpm_put_ops(struct tpm_chip *chip);
@@ -558,12 +439,12 @@ static inline u32 tpm2_rc_value(u32 rc)
}
int tpm2_get_timeouts(struct tpm_chip *chip);
-int tpm2_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf);
-int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, u32 count,
- struct tpm2_digest *digests);
+int tpm2_pcr_read(struct tpm_chip *chip, u32 pcr_idx,
+ struct tpm_digest *digest, u16 *digest_size_ptr);
+int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
+ struct tpm_digest *digests);
int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max);
-void tpm2_flush_context_cmd(struct tpm_chip *chip, u32 handle,
- unsigned int flags);
+void tpm2_flush_context(struct tpm_chip *chip, u32 handle);
int tpm2_seal_trusted(struct tpm_chip *chip,
struct trusted_key_payload *payload,
struct trusted_key_options *options);
@@ -580,10 +461,11 @@ int tpm2_probe(struct tpm_chip *chip);
int tpm2_find_cc(struct tpm_chip *chip, u32 cc);
int tpm2_init_space(struct tpm_space *space);
void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space);
-int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u32 cc,
- u8 *cmd);
-int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space,
- u32 cc, u8 *buf, size_t *bufsiz);
+void tpm2_flush_space(struct tpm_chip *chip);
+int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd,
+ size_t cmdsiz);
+int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, void *buf,
+ size_t *bufsiz);
int tpm_bios_log_setup(struct tpm_chip *chip);
void tpm_bios_log_teardown(struct tpm_chip *chip);
diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c
index 6f306338953b..85dcf2654d11 100644
--- a/drivers/char/tpm/tpm1-cmd.c
+++ b/drivers/char/tpm/tpm1-cmd.c
@@ -334,11 +334,8 @@ static int tpm1_startup(struct tpm_chip *chip)
tpm_buf_append_u16(&buf, TPM_ST_CLEAR);
- rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0,
- "attempting to start the TPM");
-
+ rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to start the TPM");
tpm_buf_destroy(&buf);
-
return rc;
}
@@ -380,8 +377,7 @@ int tpm1_get_timeouts(struct tpm_chip *chip)
* of misreporting.
*/
if (chip->ops->update_timeouts)
- chip->timeout_adjusted =
- chip->ops->update_timeouts(chip, timeout_eff);
+ chip->ops->update_timeouts(chip, timeout_eff);
if (!chip->timeout_adjusted) {
/* Restore default if chip reported 0 */
@@ -462,9 +458,7 @@ int tpm1_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, const u8 *hash,
tpm_buf_append_u32(&buf, pcr_idx);
tpm_buf_append(&buf, hash, TPM_DIGEST_SIZE);
- rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE,
- TPM_DIGEST_SIZE, 0, log_msg);
-
+ rc = tpm_transmit_cmd(chip, &buf, TPM_DIGEST_SIZE, log_msg);
tpm_buf_destroy(&buf);
return rc;
}
@@ -494,11 +488,9 @@ ssize_t tpm1_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap,
tpm_buf_append_u32(&buf, 4);
tpm_buf_append_u32(&buf, subcap_id);
}
- rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE,
- min_cap_length, 0, desc);
+ rc = tpm_transmit_cmd(chip, &buf, min_cap_length, desc);
if (!rc)
*cap = *(cap_t *)&buf.data[TPM_HEADER_SIZE + 4];
-
tpm_buf_destroy(&buf);
return rc;
}
@@ -537,8 +529,7 @@ int tpm1_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
do {
tpm_buf_append_u32(&buf, num_bytes);
- rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE,
- sizeof(out->rng_data_len), 0,
+ rc = tpm_transmit_cmd(chip, &buf, sizeof(out->rng_data_len),
"attempting get random");
if (rc)
goto out;
@@ -583,8 +574,7 @@ int tpm1_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf)
tpm_buf_append_u32(&buf, pcr_idx);
- rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE,
- TPM_DIGEST_SIZE, 0,
+ rc = tpm_transmit_cmd(chip, &buf, TPM_DIGEST_SIZE,
"attempting to read a pcr value");
if (rc)
goto out;
@@ -618,11 +608,8 @@ static int tpm1_continue_selftest(struct tpm_chip *chip)
if (rc)
return rc;
- rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE,
- 0, 0, "continue selftest");
-
+ rc = tpm_transmit_cmd(chip, &buf, 0, "continue selftest");
tpm_buf_destroy(&buf);
-
return rc;
}
@@ -709,6 +696,18 @@ int tpm1_auto_startup(struct tpm_chip *chip)
goto out;
}
+ chip->allocated_banks = kcalloc(1, sizeof(*chip->allocated_banks),
+ GFP_KERNEL);
+ if (!chip->allocated_banks) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ chip->allocated_banks[0].alg_id = TPM_ALG_SHA1;
+ chip->allocated_banks[0].digest_size = hash_digest_size[HASH_ALGO_SHA1];
+ chip->allocated_banks[0].crypto_id = HASH_ALGO_SHA1;
+ chip->nr_allocated_banks = 1;
+
return rc;
out:
if (rc > 0)
@@ -747,9 +746,7 @@ int tpm1_pm_suspend(struct tpm_chip *chip, u32 tpm_suspend_pcr)
return rc;
/* now do the actual savestate */
for (try = 0; try < TPM_RETRY; try++) {
- rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE,
- 0, 0, NULL);
-
+ rc = tpm_transmit_cmd(chip, &buf, 0, NULL);
/*
* If the TPM indicates that it is too busy to respond to
* this command then retry before giving up. It can take
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index a6bec13afa69..e74c5b7b64bf 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -33,11 +33,11 @@ struct tpm2_hash {
};
static struct tpm2_hash tpm2_hash_map[] = {
- {HASH_ALGO_SHA1, TPM2_ALG_SHA1},
- {HASH_ALGO_SHA256, TPM2_ALG_SHA256},
- {HASH_ALGO_SHA384, TPM2_ALG_SHA384},
- {HASH_ALGO_SHA512, TPM2_ALG_SHA512},
- {HASH_ALGO_SM3_256, TPM2_ALG_SM3_256},
+ {HASH_ALGO_SHA1, TPM_ALG_SHA1},
+ {HASH_ALGO_SHA256, TPM_ALG_SHA256},
+ {HASH_ALGO_SHA384, TPM_ALG_SHA384},
+ {HASH_ALGO_SHA512, TPM_ALG_SHA512},
+ {HASH_ALGO_SM3_256, TPM_ALG_SM3_256},
};
int tpm2_get_timeouts(struct tpm_chip *chip)
@@ -171,20 +171,36 @@ struct tpm2_pcr_read_out {
* tpm2_pcr_read() - read a PCR value
* @chip: TPM chip to use.
* @pcr_idx: index of the PCR to read.
- * @res_buf: buffer to store the resulting hash.
+ * @digest: PCR bank and buffer current PCR value is written to.
+ * @digest_size_ptr: pointer to variable that stores the digest size.
*
* Return: Same as with tpm_transmit_cmd.
*/
-int tpm2_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf)
+int tpm2_pcr_read(struct tpm_chip *chip, u32 pcr_idx,
+ struct tpm_digest *digest, u16 *digest_size_ptr)
{
+ int i;
int rc;
struct tpm_buf buf;
struct tpm2_pcr_read_out *out;
u8 pcr_select[TPM2_PCR_SELECT_MIN] = {0};
+ u16 digest_size;
+ u16 expected_digest_size = 0;
if (pcr_idx >= TPM2_PLATFORM_PCR)
return -EINVAL;
+ if (!digest_size_ptr) {
+ for (i = 0; i < chip->nr_allocated_banks &&
+ chip->allocated_banks[i].alg_id != digest->alg_id; i++)
+ ;
+
+ if (i == chip->nr_allocated_banks)
+ return -EINVAL;
+
+ expected_digest_size = chip->allocated_banks[i].digest_size;
+ }
+
rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_PCR_READ);
if (rc)
return rc;
@@ -192,18 +208,28 @@ int tpm2_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf)
pcr_select[pcr_idx >> 3] = 1 << (pcr_idx & 0x7);
tpm_buf_append_u32(&buf, 1);
- tpm_buf_append_u16(&buf, TPM2_ALG_SHA1);
+ tpm_buf_append_u16(&buf, digest->alg_id);
tpm_buf_append_u8(&buf, TPM2_PCR_SELECT_MIN);
tpm_buf_append(&buf, (const unsigned char *)pcr_select,
sizeof(pcr_select));
- rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0,
- res_buf ? "attempting to read a pcr value" : NULL);
- if (rc == 0 && res_buf) {
- out = (struct tpm2_pcr_read_out *)&buf.data[TPM_HEADER_SIZE];
- memcpy(res_buf, out->digest, SHA1_DIGEST_SIZE);
+ rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to read a pcr value");
+ if (rc)
+ goto out;
+
+ out = (struct tpm2_pcr_read_out *)&buf.data[TPM_HEADER_SIZE];
+ digest_size = be16_to_cpu(out->digest_size);
+ if (digest_size > sizeof(digest->digest) ||
+ (!digest_size_ptr && digest_size != expected_digest_size)) {
+ rc = -EINVAL;
+ goto out;
}
+ if (digest_size_ptr)
+ *digest_size_ptr = digest_size;
+
+ memcpy(digest->digest, out->digest, digest_size);
+out:
tpm_buf_destroy(&buf);
return rc;
}
@@ -220,22 +246,17 @@ struct tpm2_null_auth_area {
*
* @chip: TPM chip to use.
* @pcr_idx: index of the PCR.
- * @count: number of digests passed.
* @digests: list of pcr banks and corresponding digest values to extend.
*
* Return: Same as with tpm_transmit_cmd.
*/
-int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, u32 count,
- struct tpm2_digest *digests)
+int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
+ struct tpm_digest *digests)
{
struct tpm_buf buf;
struct tpm2_null_auth_area auth_area;
int rc;
int i;
- int j;
-
- if (count > ARRAY_SIZE(chip->active_banks))
- return -EINVAL;
rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_PCR_EXTEND);
if (rc)
@@ -251,21 +272,15 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, u32 count,
tpm_buf_append_u32(&buf, sizeof(struct tpm2_null_auth_area));
tpm_buf_append(&buf, (const unsigned char *)&auth_area,
sizeof(auth_area));
- tpm_buf_append_u32(&buf, count);
-
- for (i = 0; i < count; i++) {
- for (j = 0; j < ARRAY_SIZE(tpm2_hash_map); j++) {
- if (digests[i].alg_id != tpm2_hash_map[j].tpm_id)
- continue;
- tpm_buf_append_u16(&buf, digests[i].alg_id);
- tpm_buf_append(&buf, (const unsigned char
- *)&digests[i].digest,
- hash_digest_size[tpm2_hash_map[j].crypto_id]);
- }
+ tpm_buf_append_u32(&buf, chip->nr_allocated_banks);
+
+ for (i = 0; i < chip->nr_allocated_banks; i++) {
+ tpm_buf_append_u16(&buf, digests[i].alg_id);
+ tpm_buf_append(&buf, (const unsigned char *)&digests[i].digest,
+ chip->allocated_banks[i].digest_size);
}
- rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0,
- "attempting extend a PCR value");
+ rc = tpm_transmit_cmd(chip, &buf, 0, "attempting extend a PCR value");
tpm_buf_destroy(&buf);
@@ -309,10 +324,10 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
do {
tpm_buf_reset(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_RANDOM);
tpm_buf_append_u16(&buf, num_bytes);
- err = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE,
+ err = tpm_transmit_cmd(chip, &buf,
offsetof(struct tpm2_get_random_out,
buffer),
- 0, "attempting get random");
+ "attempting get random");
if (err)
goto out;
@@ -341,14 +356,11 @@ out:
}
/**
- * tpm2_flush_context_cmd() - execute a TPM2_FlushContext command
+ * tpm2_flush_context() - execute a TPM2_FlushContext command
* @chip: TPM chip to use
* @handle: context handle
- * @flags: tpm transmit flags - bitmap
- *
*/
-void tpm2_flush_context_cmd(struct tpm_chip *chip, u32 handle,
- unsigned int flags)
+void tpm2_flush_context(struct tpm_chip *chip, u32 handle)
{
struct tpm_buf buf;
int rc;
@@ -362,9 +374,7 @@ void tpm2_flush_context_cmd(struct tpm_chip *chip, u32 handle,
tpm_buf_append_u32(&buf, handle);
- (void) tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, flags,
- "flushing context");
-
+ tpm_transmit_cmd(chip, &buf, 0, "flushing context");
tpm_buf_destroy(&buf);
}
@@ -449,7 +459,7 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
/* public */
tpm_buf_append_u16(&buf, 14 + options->policydigest_len);
- tpm_buf_append_u16(&buf, TPM2_ALG_KEYEDHASH);
+ tpm_buf_append_u16(&buf, TPM_ALG_KEYEDHASH);
tpm_buf_append_u16(&buf, hash);
/* policy */
@@ -464,7 +474,7 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
}
/* public parameters */
- tpm_buf_append_u16(&buf, TPM2_ALG_NULL);
+ tpm_buf_append_u16(&buf, TPM_ALG_NULL);
tpm_buf_append_u16(&buf, 0);
/* outside info */
@@ -478,8 +488,7 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
goto out;
}
- rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 4, 0,
- "sealing data");
+ rc = tpm_transmit_cmd(chip, &buf, 4, "sealing data");
if (rc)
goto out;
@@ -516,7 +525,6 @@ out:
* @payload: the key data in clear and encrypted form
* @options: authentication values and other options
* @blob_handle: returned blob handle
- * @flags: tpm transmit flags
*
* Return: 0 on success.
* -E2BIG on wrong payload size.
@@ -526,7 +534,7 @@ out:
static int tpm2_load_cmd(struct tpm_chip *chip,
struct trusted_key_payload *payload,
struct trusted_key_options *options,
- u32 *blob_handle, unsigned int flags)
+ u32 *blob_handle)
{
struct tpm_buf buf;
unsigned int private_len;
@@ -561,8 +569,7 @@ static int tpm2_load_cmd(struct tpm_chip *chip,
goto out;
}
- rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 4, flags,
- "loading blob");
+ rc = tpm_transmit_cmd(chip, &buf, 4, "loading blob");
if (!rc)
*blob_handle = be32_to_cpup(
(__be32 *) &buf.data[TPM_HEADER_SIZE]);
@@ -583,7 +590,6 @@ out:
* @payload: the key data in clear and encrypted form
* @options: authentication values and other options
* @blob_handle: blob handle
- * @flags: tpm_transmit_cmd flags
*
* Return: 0 on success
* -EPERM on tpm error status
@@ -592,7 +598,7 @@ out:
static int tpm2_unseal_cmd(struct tpm_chip *chip,
struct trusted_key_payload *payload,
struct trusted_key_options *options,
- u32 blob_handle, unsigned int flags)
+ u32 blob_handle)
{
struct tpm_buf buf;
u16 data_len;
@@ -612,8 +618,7 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
options->blobauth /* hmac */,
TPM_DIGEST_SIZE);
- rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 6, flags,
- "unsealing");
+ rc = tpm_transmit_cmd(chip, &buf, 6, "unsealing");
if (rc > 0)
rc = -EPERM;
@@ -657,17 +662,12 @@ int tpm2_unseal_trusted(struct tpm_chip *chip,
u32 blob_handle;
int rc;
- mutex_lock(&chip->tpm_mutex);
- rc = tpm2_load_cmd(chip, payload, options, &blob_handle,
- TPM_TRANSMIT_UNLOCKED);
+ rc = tpm2_load_cmd(chip, payload, options, &blob_handle);
if (rc)
- goto out;
+ return rc;
- rc = tpm2_unseal_cmd(chip, payload, options, blob_handle,
- TPM_TRANSMIT_UNLOCKED);
- tpm2_flush_context_cmd(chip, blob_handle, TPM_TRANSMIT_UNLOCKED);
-out:
- mutex_unlock(&chip->tpm_mutex);
+ rc = tpm2_unseal_cmd(chip, payload, options, blob_handle);
+ tpm2_flush_context(chip, blob_handle);
return rc;
}
@@ -703,7 +703,7 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value,
tpm_buf_append_u32(&buf, TPM2_CAP_TPM_PROPERTIES);
tpm_buf_append_u32(&buf, property_id);
tpm_buf_append_u32(&buf, 1);
- rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0, NULL);
+ rc = tpm_transmit_cmd(chip, &buf, 0, NULL);
if (!rc) {
out = (struct tpm2_get_cap_out *)
&buf.data[TPM_HEADER_SIZE];
@@ -733,8 +733,7 @@ void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type)
if (rc)
return;
tpm_buf_append_u16(&buf, shutdown_type);
- tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0,
- "stopping the TPM");
+ tpm_transmit_cmd(chip, &buf, 0, "stopping the TPM");
tpm_buf_destroy(&buf);
}
@@ -763,7 +762,7 @@ static int tpm2_do_selftest(struct tpm_chip *chip)
return rc;
tpm_buf_append_u8(&buf, full);
- rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0,
+ rc = tpm_transmit_cmd(chip, &buf, 0,
"attempting the self test");
tpm_buf_destroy(&buf);
@@ -790,7 +789,7 @@ static int tpm2_do_selftest(struct tpm_chip *chip)
*/
int tpm2_probe(struct tpm_chip *chip)
{
- struct tpm_output_header *out;
+ struct tpm_header *out;
struct tpm_buf buf;
int rc;
@@ -800,10 +799,10 @@ int tpm2_probe(struct tpm_chip *chip)
tpm_buf_append_u32(&buf, TPM2_CAP_TPM_PROPERTIES);
tpm_buf_append_u32(&buf, TPM_PT_TOTAL_COMMANDS);
tpm_buf_append_u32(&buf, 1);
- rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0, NULL);
+ rc = tpm_transmit_cmd(chip, &buf, 0, NULL);
/* We ignore TPM return codes on purpose. */
if (rc >= 0) {
- out = (struct tpm_output_header *)buf.data;
+ out = (struct tpm_header *)buf.data;
if (be16_to_cpu(out->tag) == TPM2_ST_NO_SESSIONS)
chip->flags |= TPM_CHIP_FLAG_TPM2;
}
@@ -812,6 +811,30 @@ int tpm2_probe(struct tpm_chip *chip)
}
EXPORT_SYMBOL_GPL(tpm2_probe);
+static int tpm2_init_bank_info(struct tpm_chip *chip, u32 bank_index)
+{
+ struct tpm_bank_info *bank = chip->allocated_banks + bank_index;
+ struct tpm_digest digest = { .alg_id = bank->alg_id };
+ int i;
+
+ /*
+ * Avoid unnecessary PCR read operations to reduce overhead
+ * and obtain identifiers of the crypto subsystem.
+ */
+ for (i = 0; i < ARRAY_SIZE(tpm2_hash_map); i++) {
+ enum hash_algo crypto_algo = tpm2_hash_map[i].crypto_id;
+
+ if (bank->alg_id != tpm2_hash_map[i].tpm_id)
+ continue;
+
+ bank->digest_size = hash_digest_size[crypto_algo];
+ bank->crypto_id = crypto_algo;
+ return 0;
+ }
+
+ return tpm2_pcr_read(chip, 0, &digest, &bank->digest_size);
+}
+
struct tpm2_pcr_selection {
__be16 hash_alg;
u8 size_of_select;
@@ -825,8 +848,10 @@ static ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip)
void *marker;
void *end;
void *pcr_select_offset;
- unsigned int count;
u32 sizeof_pcr_selection;
+ u32 nr_possible_banks;
+ u32 nr_alloc_banks = 0;
+ u16 hash_alg;
u32 rsp_len;
int rc;
int i = 0;
@@ -839,16 +864,18 @@ static ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip)
tpm_buf_append_u32(&buf, 0);
tpm_buf_append_u32(&buf, 1);
- rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 9, 0,
- "get tpm pcr allocation");
+ rc = tpm_transmit_cmd(chip, &buf, 9, "get tpm pcr allocation");
if (rc)
goto out;
- count = be32_to_cpup(
+ nr_possible_banks = be32_to_cpup(
(__be32 *)&buf.data[TPM_HEADER_SIZE + 5]);
- if (count > ARRAY_SIZE(chip->active_banks)) {
- rc = -ENODEV;
+ chip->allocated_banks = kcalloc(nr_possible_banks,
+ sizeof(*chip->allocated_banks),
+ GFP_KERNEL);
+ if (!chip->allocated_banks) {
+ rc = -ENOMEM;
goto out;
}
@@ -857,7 +884,7 @@ static ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip)
rsp_len = be32_to_cpup((__be32 *)&buf.data[2]);
end = &buf.data[rsp_len];
- for (i = 0; i < count; i++) {
+ for (i = 0; i < nr_possible_banks; i++) {
pcr_select_offset = marker +
offsetof(struct tpm2_pcr_selection, size_of_select);
if (pcr_select_offset >= end) {
@@ -866,17 +893,28 @@ static ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip)
}
memcpy(&pcr_selection, marker, sizeof(pcr_selection));
- chip->active_banks[i] = be16_to_cpu(pcr_selection.hash_alg);
+ hash_alg = be16_to_cpu(pcr_selection.hash_alg);
+
+ pcr_select_offset = memchr_inv(pcr_selection.pcr_select, 0,
+ pcr_selection.size_of_select);
+ if (pcr_select_offset) {
+ chip->allocated_banks[nr_alloc_banks].alg_id = hash_alg;
+
+ rc = tpm2_init_bank_info(chip, nr_alloc_banks);
+ if (rc < 0)
+ break;
+
+ nr_alloc_banks++;
+ }
+
sizeof_pcr_selection = sizeof(pcr_selection.hash_alg) +
sizeof(pcr_selection.size_of_select) +
pcr_selection.size_of_select;
marker = marker + sizeof_pcr_selection;
}
+ chip->nr_allocated_banks = nr_alloc_banks;
out:
- if (i < ARRAY_SIZE(chip->active_banks))
- chip->active_banks[i] = TPM2_ALG_ERROR;
-
tpm_buf_destroy(&buf);
return rc;
@@ -911,8 +949,7 @@ static int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
tpm_buf_append_u32(&buf, TPM2_CC_FIRST);
tpm_buf_append_u32(&buf, nr_commands);
- rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE,
- 9 + 4 * nr_commands, 0, NULL);
+ rc = tpm_transmit_cmd(chip, &buf, 9 + 4 * nr_commands, NULL);
if (rc) {
tpm_buf_destroy(&buf);
goto out;
@@ -969,8 +1006,7 @@ static int tpm2_startup(struct tpm_chip *chip)
return rc;
tpm_buf_append_u16(&buf, TPM2_SU_CLEAR);
- rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0,
- "attempting to start the TPM");
+ rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to start the TPM");
tpm_buf_destroy(&buf);
return rc;
diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
index dcdfde3c253e..4a2773c3374f 100644
--- a/drivers/char/tpm/tpm2-space.c
+++ b/drivers/char/tpm/tpm2-space.c
@@ -38,8 +38,7 @@ static void tpm2_flush_sessions(struct tpm_chip *chip, struct tpm_space *space)
for (i = 0; i < ARRAY_SIZE(space->session_tbl); i++) {
if (space->session_tbl[i])
- tpm2_flush_context_cmd(chip, space->session_tbl[i],
- TPM_TRANSMIT_NESTED);
+ tpm2_flush_context(chip, space->session_tbl[i]);
}
}
@@ -61,7 +60,10 @@ int tpm2_init_space(struct tpm_space *space)
void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space)
{
mutex_lock(&chip->tpm_mutex);
- tpm2_flush_sessions(chip, space);
+ if (!tpm_chip_start(chip)) {
+ tpm2_flush_sessions(chip, space);
+ tpm_chip_stop(chip);
+ }
mutex_unlock(&chip->tpm_mutex);
kfree(space->context_buf);
kfree(space->session_buf);
@@ -83,8 +85,7 @@ static int tpm2_load_context(struct tpm_chip *chip, u8 *buf,
body_size = sizeof(*ctx) + be16_to_cpu(ctx->blob_size);
tpm_buf_append(&tbuf, &buf[*offset], body_size);
- rc = tpm_transmit_cmd(chip, NULL, tbuf.data, PAGE_SIZE, 4,
- TPM_TRANSMIT_NESTED, NULL);
+ rc = tpm_transmit_cmd(chip, &tbuf, 4, NULL);
if (rc < 0) {
dev_warn(&chip->dev, "%s: failed with a system error %d\n",
__func__, rc);
@@ -132,8 +133,7 @@ static int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf,
tpm_buf_append_u32(&tbuf, handle);
- rc = tpm_transmit_cmd(chip, NULL, tbuf.data, PAGE_SIZE, 0,
- TPM_TRANSMIT_NESTED, NULL);
+ rc = tpm_transmit_cmd(chip, &tbuf, 0, NULL);
if (rc < 0) {
dev_warn(&chip->dev, "%s: failed with a system error %d\n",
__func__, rc);
@@ -162,15 +162,14 @@ static int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf,
return 0;
}
-static void tpm2_flush_space(struct tpm_chip *chip)
+void tpm2_flush_space(struct tpm_chip *chip)
{
struct tpm_space *space = &chip->work_space;
int i;
for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++)
if (space->context_tbl[i] && ~space->context_tbl[i])
- tpm2_flush_context_cmd(chip, space->context_tbl[i],
- TPM_TRANSMIT_NESTED);
+ tpm2_flush_context(chip, space->context_tbl[i]);
tpm2_flush_sessions(chip, space);
}
@@ -264,14 +263,54 @@ static int tpm2_map_command(struct tpm_chip *chip, u32 cc, u8 *cmd)
return 0;
}
-int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u32 cc,
- u8 *cmd)
+static int tpm_find_and_validate_cc(struct tpm_chip *chip,
+ struct tpm_space *space,
+ const void *cmd, size_t len)
+{
+ const struct tpm_header *header = (const void *)cmd;
+ int i;
+ u32 cc;
+ u32 attrs;
+ unsigned int nr_handles;
+
+ if (len < TPM_HEADER_SIZE || !chip->nr_commands)
+ return -EINVAL;
+
+ cc = be32_to_cpu(header->ordinal);
+
+ i = tpm2_find_cc(chip, cc);
+ if (i < 0) {
+ dev_dbg(&chip->dev, "0x%04X is an invalid command\n",
+ cc);
+ return -EOPNOTSUPP;
+ }
+
+ attrs = chip->cc_attrs_tbl[i];
+ nr_handles =
+ 4 * ((attrs >> TPM2_CC_ATTR_CHANDLES) & GENMASK(2, 0));
+ if (len < TPM_HEADER_SIZE + 4 * nr_handles)
+ goto err_len;
+
+ return cc;
+err_len:
+ dev_dbg(&chip->dev, "%s: insufficient command length %zu", __func__,
+ len);
+ return -EINVAL;
+}
+
+int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd,
+ size_t cmdsiz)
{
int rc;
+ int cc;
if (!space)
return 0;
+ cc = tpm_find_and_validate_cc(chip, space, cmd, cmdsiz);
+ if (cc < 0)
+ return cc;
+
memcpy(&chip->work_space.context_tbl, &space->context_tbl,
sizeof(space->context_tbl));
memcpy(&chip->work_space.session_tbl, &space->session_tbl,
@@ -291,6 +330,7 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u32 cc,
return rc;
}
+ chip->last_cc = cc;
return 0;
}
@@ -334,7 +374,7 @@ static int tpm2_map_response_header(struct tpm_chip *chip, u32 cc, u8 *rsp,
size_t len)
{
struct tpm_space *space = &chip->work_space;
- struct tpm_output_header *header = (void *)rsp;
+ struct tpm_header *header = (struct tpm_header *)rsp;
u32 phandle;
u32 phandle_type;
u32 vhandle;
@@ -377,7 +417,7 @@ static int tpm2_map_response_header(struct tpm_chip *chip, u32 cc, u8 *rsp,
return 0;
out_no_slots:
- tpm2_flush_context_cmd(chip, phandle, TPM_TRANSMIT_NESTED);
+ tpm2_flush_context(chip, phandle);
dev_warn(&chip->dev, "%s: out of slots for 0x%08X\n", __func__,
phandle);
return -ENOMEM;
@@ -394,7 +434,7 @@ static int tpm2_map_response_body(struct tpm_chip *chip, u32 cc, u8 *rsp,
size_t len)
{
struct tpm_space *space = &chip->work_space;
- struct tpm_output_header *header = (void *)rsp;
+ struct tpm_header *header = (struct tpm_header *)rsp;
struct tpm2_cap_handles *data;
u32 phandle;
u32 phandle_type;
@@ -464,8 +504,7 @@ static int tpm2_save_space(struct tpm_chip *chip)
} else if (rc)
return rc;
- tpm2_flush_context_cmd(chip, space->context_tbl[i],
- TPM_TRANSMIT_NESTED);
+ tpm2_flush_context(chip, space->context_tbl[i]);
space->context_tbl[i] = ~0;
}
@@ -490,30 +529,30 @@ static int tpm2_save_space(struct tpm_chip *chip)
}
int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space,
- u32 cc, u8 *buf, size_t *bufsiz)
+ void *buf, size_t *bufsiz)
{
- struct tpm_output_header *header = (void *)buf;
+ struct tpm_header *header = buf;
int rc;
if (!space)
return 0;
- rc = tpm2_map_response_header(chip, cc, buf, *bufsiz);
+ rc = tpm2_map_response_header(chip, chip->last_cc, buf, *bufsiz);
if (rc) {
tpm2_flush_space(chip);
- return rc;
+ goto out;
}
- rc = tpm2_map_response_body(chip, cc, buf, *bufsiz);
+ rc = tpm2_map_response_body(chip, chip->last_cc, buf, *bufsiz);
if (rc) {
tpm2_flush_space(chip);
- return rc;
+ goto out;
}
rc = tpm2_save_space(chip);
if (rc) {
tpm2_flush_space(chip);
- return rc;
+ goto out;
}
*bufsiz = be32_to_cpu(header->length);
@@ -526,4 +565,7 @@ int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space,
memcpy(space->session_buf, chip->work_space.session_buf, PAGE_SIZE);
return 0;
+out:
+ dev_err(&chip->dev, "%s: error %d\n", __func__, rc);
+ return rc;
}
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
index 66a14526aaf4..a290b30a0c35 100644
--- a/drivers/char/tpm/tpm_atmel.c
+++ b/drivers/char/tpm/tpm_atmel.c
@@ -105,7 +105,7 @@ static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count)
iowrite8(buf[i], priv->iobase);
}
- return count;
+ return 0;
}
static void tpm_atml_cancel(struct tpm_chip *chip)
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index 36952ef98f90..763fc7e6c005 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -287,19 +287,29 @@ static int crb_recv(struct tpm_chip *chip, u8 *buf, size_t count)
struct crb_priv *priv = dev_get_drvdata(&chip->dev);
unsigned int expected;
- /* sanity check */
- if (count < 6)
+ /* A sanity check that the upper layer wants to get at least the header
+ * as that is the minimum size for any TPM response.
+ */
+ if (count < TPM_HEADER_SIZE)
return -EIO;
+ /* If this bit is set, according to the spec, the TPM is in
+ * unrecoverable condition.
+ */
if (ioread32(&priv->regs_t->ctrl_sts) & CRB_CTRL_STS_ERROR)
return -EIO;
- memcpy_fromio(buf, priv->rsp, 6);
- expected = be32_to_cpup((__be32 *) &buf[2]);
- if (expected > count || expected < 6)
+ /* Read the first 8 bytes in order to get the length of the response.
+ * We read exactly a quad word in order to make sure that the remaining
+ * reads will be aligned.
+ */
+ memcpy_fromio(buf, priv->rsp, 8);
+
+ expected = be32_to_cpup((__be32 *)&buf[2]);
+ if (expected > count || expected < TPM_HEADER_SIZE)
return -EIO;
- memcpy_fromio(&buf[6], &priv->rsp[6], expected - 6);
+ memcpy_fromio(&buf[8], &priv->rsp[8], expected - 8);
return expected;
}
diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
index 95ce2e9ccdc6..8a7e80923091 100644
--- a/drivers/char/tpm/tpm_i2c_atmel.c
+++ b/drivers/char/tpm/tpm_i2c_atmel.c
@@ -46,7 +46,7 @@ struct priv_data {
/* This is the amount we read on the first try. 25 was chosen to fit a
* fair number of read responses in the buffer so a 2nd retry can be
* avoided in small message cases. */
- u8 buffer[sizeof(struct tpm_output_header) + 25];
+ u8 buffer[sizeof(struct tpm_header) + 25];
};
static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
@@ -65,15 +65,22 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
dev_dbg(&chip->dev,
"%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__,
(int)min_t(size_t, 64, len), buf, len, status);
- return status;
+
+ if (status < 0)
+ return status;
+
+ /* The upper layer does not support incomplete sends. */
+ if (status != len)
+ return -E2BIG;
+
+ return 0;
}
static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
struct priv_data *priv = dev_get_drvdata(&chip->dev);
struct i2c_client *client = to_i2c_client(chip->dev.parent);
- struct tpm_output_header *hdr =
- (struct tpm_output_header *)priv->buffer;
+ struct tpm_header *hdr = (struct tpm_header *)priv->buffer;
u32 expected_len;
int rc;
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index 9086edc9066b..3b4e9672ff6c 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -26,8 +26,7 @@
#include <linux/wait.h>
#include "tpm.h"
-/* max. buffer size supported by our TPM */
-#define TPM_BUFSIZE 1260
+#define TPM_I2C_INFINEON_BUFSIZE 1260
/* max. number of iterations after I2C NAK */
#define MAX_COUNT 3
@@ -63,11 +62,13 @@ enum i2c_chip_type {
UNKNOWN,
};
-/* Structure to store I2C TPM specific stuff */
struct tpm_inf_dev {
struct i2c_client *client;
int locality;
- u8 buf[TPM_BUFSIZE + sizeof(u8)]; /* max. buffer size + addr */
+ /* In addition to the data itself, the buffer must fit the 7-bit I2C
+ * address and the direction bit.
+ */
+ u8 buf[TPM_I2C_INFINEON_BUFSIZE + 1];
struct tpm_chip *chip;
enum i2c_chip_type chip_type;
unsigned int adapterlimit;
@@ -219,7 +220,7 @@ static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len,
.buf = tpm_dev.buf
};
- if (len > TPM_BUFSIZE)
+ if (len > TPM_I2C_INFINEON_BUFSIZE)
return -EINVAL;
if (!tpm_dev.client->adapter->algo->master_xfer)
@@ -527,8 +528,8 @@ static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len)
u8 retries = 0;
u8 sts = TPM_STS_GO;
- if (len > TPM_BUFSIZE)
- return -E2BIG; /* command is too long for our tpm, sorry */
+ if (len > TPM_I2C_INFINEON_BUFSIZE)
+ return -E2BIG;
if (request_locality(chip, 0) < 0)
return -EBUSY;
@@ -587,7 +588,7 @@ static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len)
/* go and do it */
iic_tpm_write(TPM_STS(tpm_dev.locality), &sts, 1);
- return len;
+ return 0;
out_err:
tpm_tis_i2c_ready(chip);
/* The TPM needs some time to clean up here,
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
index 217f7f1cbde8..315a3b4548f7 100644
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -35,14 +35,12 @@
#include "tpm.h"
/* I2C interface offsets */
-#define TPM_STS 0x00
-#define TPM_BURST_COUNT 0x01
-#define TPM_DATA_FIFO_W 0x20
-#define TPM_DATA_FIFO_R 0x40
-#define TPM_VID_DID_RID 0x60
-/* TPM command header size */
-#define TPM_HEADER_SIZE 10
-#define TPM_RETRY 5
+#define TPM_STS 0x00
+#define TPM_BURST_COUNT 0x01
+#define TPM_DATA_FIFO_W 0x20
+#define TPM_DATA_FIFO_R 0x40
+#define TPM_VID_DID_RID 0x60
+#define TPM_I2C_RETRIES 5
/*
* I2C bus device maximum buffer size w/o counting I2C address or command
* i.e. max size required for I2C write is 34 = addr, command, 32 bytes data
@@ -292,7 +290,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
dev_err(dev, "%s() count < header size\n", __func__);
return -EIO;
}
- for (retries = 0; retries < TPM_RETRY; retries++) {
+ for (retries = 0; retries < TPM_I2C_RETRIES; retries++) {
if (retries > 0) {
/* if this is not the first trial, set responseRetry */
i2c_nuvoton_write_status(client,
@@ -467,7 +465,7 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
}
dev_dbg(dev, "%s() -> %zd\n", __func__, len);
- return len;
+ return 0;
}
static bool i2c_nuvoton_req_canceled(struct tpm_chip *chip, u8 status)
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index 07b5a487d0c8..757ca45b39b8 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -139,14 +139,14 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
}
/**
- * tpm_ibmvtpm_send - Send tpm request
- *
+ * tpm_ibmvtpm_send() - Send a TPM command
* @chip: tpm chip struct
* @buf: buffer contains data to send
* @count: size of buffer
*
* Return:
- * Number of bytes sent or < 0 on error.
+ * 0 on success,
+ * -errno on error
*/
static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
{
@@ -192,7 +192,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
rc = 0;
ibmvtpm->tpm_processing_cmd = false;
} else
- rc = count;
+ rc = 0;
spin_unlock(&ibmvtpm->rtce_lock);
return rc;
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index d8f10047fbba..97f6d4fe0aee 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -354,7 +354,7 @@ static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count)
for (i = 0; i < count; i++) {
wait_and_send(chip, buf[i]);
}
- return count;
+ return 0;
}
static void tpm_inf_cancel(struct tpm_chip *chip)
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index 5d6cce74cd3f..9bee3c5eb4bf 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -226,7 +226,7 @@ static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count)
}
outb(NSC_COMMAND_EOC, priv->base + NSC_COMMAND);
- return count;
+ return 0;
}
static void tpm_nsc_cancel(struct tpm_chip *chip)
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index 86dd8521feef..75e7a856177c 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -20,7 +20,8 @@
#include <linux/acpi.h>
#include "tpm.h"
-#define TPM_PPI_REVISION_ID 1
+#define TPM_PPI_REVISION_ID_1 1
+#define TPM_PPI_REVISION_ID_2 2
#define TPM_PPI_FN_VERSION 1
#define TPM_PPI_FN_SUBREQ 2
#define TPM_PPI_FN_GETREQ 3
@@ -28,7 +29,7 @@
#define TPM_PPI_FN_GETRSP 5
#define TPM_PPI_FN_SUBREQ2 7
#define TPM_PPI_FN_GETOPR 8
-#define PPI_TPM_REQ_MAX 22
+#define PPI_TPM_REQ_MAX 101 /* PPI 1.3 for TPM 2 */
#define PPI_VS_REQ_START 128
#define PPI_VS_REQ_END 255
@@ -36,14 +37,18 @@ static const guid_t tpm_ppi_guid =
GUID_INIT(0x3DDDFAA6, 0x361B, 0x4EB4,
0xA4, 0x24, 0x8D, 0x10, 0x08, 0x9D, 0x16, 0x53);
+static bool tpm_ppi_req_has_parameter(u64 req)
+{
+ return req == 23;
+}
+
static inline union acpi_object *
tpm_eval_dsm(acpi_handle ppi_handle, int func, acpi_object_type type,
- union acpi_object *argv4)
+ union acpi_object *argv4, u64 rev)
{
BUG_ON(!ppi_handle);
return acpi_evaluate_dsm_typed(ppi_handle, &tpm_ppi_guid,
- TPM_PPI_REVISION_ID,
- func, argv4, type);
+ rev, func, argv4, type);
}
static ssize_t tpm_show_ppi_version(struct device *dev,
@@ -60,9 +65,14 @@ static ssize_t tpm_show_ppi_request(struct device *dev,
ssize_t size = -EINVAL;
union acpi_object *obj;
struct tpm_chip *chip = to_tpm_chip(dev);
+ u64 rev = TPM_PPI_REVISION_ID_2;
+ u64 req;
+
+ if (strcmp(chip->ppi_version, "1.2") < 0)
+ rev = TPM_PPI_REVISION_ID_1;
obj = tpm_eval_dsm(chip->acpi_dev_handle, TPM_PPI_FN_GETREQ,
- ACPI_TYPE_PACKAGE, NULL);
+ ACPI_TYPE_PACKAGE, NULL, rev);
if (!obj)
return -ENXIO;
@@ -72,7 +82,23 @@ static ssize_t tpm_show_ppi_request(struct device *dev,
* error. The second is pending TPM operation requested by the OS, 0
* means none and >0 means operation value.
*/
- if (obj->package.count == 2 &&
+ if (obj->package.count == 3 &&
+ obj->package.elements[0].type == ACPI_TYPE_INTEGER &&
+ obj->package.elements[1].type == ACPI_TYPE_INTEGER &&
+ obj->package.elements[2].type == ACPI_TYPE_INTEGER) {
+ if (obj->package.elements[0].integer.value)
+ size = -EFAULT;
+ else {
+ req = obj->package.elements[1].integer.value;
+ if (tpm_ppi_req_has_parameter(req))
+ size = scnprintf(buf, PAGE_SIZE,
+ "%llu %llu\n", req,
+ obj->package.elements[2].integer.value);
+ else
+ size = scnprintf(buf, PAGE_SIZE,
+ "%llu\n", req);
+ }
+ } else if (obj->package.count == 2 &&
obj->package.elements[0].type == ACPI_TYPE_INTEGER &&
obj->package.elements[1].type == ACPI_TYPE_INTEGER) {
if (obj->package.elements[0].integer.value)
@@ -94,9 +120,10 @@ static ssize_t tpm_store_ppi_request(struct device *dev,
u32 req;
u64 ret;
int func = TPM_PPI_FN_SUBREQ;
- union acpi_object *obj, tmp;
- union acpi_object argv4 = ACPI_INIT_DSM_ARGV4(1, &tmp);
+ union acpi_object *obj, tmp[2];
+ union acpi_object argv4 = ACPI_INIT_DSM_ARGV4(2, tmp);
struct tpm_chip *chip = to_tpm_chip(dev);
+ u64 rev = TPM_PPI_REVISION_ID_1;
/*
* the function to submit TPM operation request to pre-os environment
@@ -104,7 +131,7 @@ static ssize_t tpm_store_ppi_request(struct device *dev,
* version 1.1
*/
if (acpi_check_dsm(chip->acpi_dev_handle, &tpm_ppi_guid,
- TPM_PPI_REVISION_ID, 1 << TPM_PPI_FN_SUBREQ2))
+ TPM_PPI_REVISION_ID_1, 1 << TPM_PPI_FN_SUBREQ2))
func = TPM_PPI_FN_SUBREQ2;
/*
@@ -113,20 +140,29 @@ static ssize_t tpm_store_ppi_request(struct device *dev,
* string/package type. For PPI version 1.0 and 1.1, use buffer type
* for compatibility, and use package type since 1.2 according to spec.
*/
- if (strcmp(chip->ppi_version, "1.2") < 0) {
+ if (strcmp(chip->ppi_version, "1.3") == 0) {
+ if (sscanf(buf, "%llu %llu", &tmp[0].integer.value,
+ &tmp[1].integer.value) != 2)
+ goto ppi12;
+ rev = TPM_PPI_REVISION_ID_2;
+ tmp[0].type = ACPI_TYPE_INTEGER;
+ tmp[1].type = ACPI_TYPE_INTEGER;
+ } else if (strcmp(chip->ppi_version, "1.2") < 0) {
if (sscanf(buf, "%d", &req) != 1)
return -EINVAL;
argv4.type = ACPI_TYPE_BUFFER;
argv4.buffer.length = sizeof(req);
argv4.buffer.pointer = (u8 *)&req;
} else {
- tmp.type = ACPI_TYPE_INTEGER;
- if (sscanf(buf, "%llu", &tmp.integer.value) != 1)
+ppi12:
+ argv4.package.count = 1;
+ tmp[0].type = ACPI_TYPE_INTEGER;
+ if (sscanf(buf, "%llu", &tmp[0].integer.value) != 1)
return -EINVAL;
}
obj = tpm_eval_dsm(chip->acpi_dev_handle, func, ACPI_TYPE_INTEGER,
- &argv4);
+ &argv4, rev);
if (!obj) {
return -ENXIO;
} else {
@@ -170,7 +206,7 @@ static ssize_t tpm_show_ppi_transition_action(struct device *dev,
if (strcmp(chip->ppi_version, "1.2") < 0)
obj = &tmp;
obj = tpm_eval_dsm(chip->acpi_dev_handle, TPM_PPI_FN_GETACT,
- ACPI_TYPE_INTEGER, obj);
+ ACPI_TYPE_INTEGER, obj, TPM_PPI_REVISION_ID_1);
if (!obj) {
return -ENXIO;
} else {
@@ -196,7 +232,7 @@ static ssize_t tpm_show_ppi_response(struct device *dev,
struct tpm_chip *chip = to_tpm_chip(dev);
obj = tpm_eval_dsm(chip->acpi_dev_handle, TPM_PPI_FN_GETRSP,
- ACPI_TYPE_PACKAGE, NULL);
+ ACPI_TYPE_PACKAGE, NULL, TPM_PPI_REVISION_ID_1);
if (!obj)
return -ENXIO;
@@ -264,7 +300,7 @@ static ssize_t show_ppi_operations(acpi_handle dev_handle, char *buf, u32 start,
"User not required",
};
- if (!acpi_check_dsm(dev_handle, &tpm_ppi_guid, TPM_PPI_REVISION_ID,
+ if (!acpi_check_dsm(dev_handle, &tpm_ppi_guid, TPM_PPI_REVISION_ID_1,
1 << TPM_PPI_FN_GETOPR))
return -EPERM;
@@ -272,7 +308,8 @@ static ssize_t show_ppi_operations(acpi_handle dev_handle, char *buf, u32 start,
for (i = start; i <= end; i++) {
tmp.integer.value = i;
obj = tpm_eval_dsm(dev_handle, TPM_PPI_FN_GETOPR,
- ACPI_TYPE_INTEGER, &argv);
+ ACPI_TYPE_INTEGER, &argv,
+ TPM_PPI_REVISION_ID_1);
if (!obj) {
return -ENOMEM;
} else {
@@ -338,12 +375,13 @@ void tpm_add_ppi(struct tpm_chip *chip)
return;
if (!acpi_check_dsm(chip->acpi_dev_handle, &tpm_ppi_guid,
- TPM_PPI_REVISION_ID, 1 << TPM_PPI_FN_VERSION))
+ TPM_PPI_REVISION_ID_1, 1 << TPM_PPI_FN_VERSION))
return;
/* Cache PPI version string. */
obj = acpi_evaluate_dsm_typed(chip->acpi_dev_handle, &tpm_ppi_guid,
- TPM_PPI_REVISION_ID, TPM_PPI_FN_VERSION,
+ TPM_PPI_REVISION_ID_1,
+ TPM_PPI_FN_VERSION,
NULL, ACPI_TYPE_STRING);
if (obj) {
strlcpy(chip->ppi_version, obj->string.pointer,
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index bf7e49cfa643..b9f64684c3fb 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -481,7 +481,7 @@ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
goto out_err;
}
}
- return len;
+ return 0;
out_err:
tpm_tis_ready(chip);
return rc;
@@ -521,35 +521,38 @@ static const struct tis_vendor_timeout_override vendor_timeout_overrides[] = {
(TIS_SHORT_TIMEOUT*1000), (TIS_SHORT_TIMEOUT*1000) } },
};
-static bool tpm_tis_update_timeouts(struct tpm_chip *chip,
+static void tpm_tis_update_timeouts(struct tpm_chip *chip,
unsigned long *timeout_cap)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
int i, rc;
u32 did_vid;
+ chip->timeout_adjusted = false;
+
if (chip->ops->clk_enable != NULL)
chip->ops->clk_enable(chip, true);
rc = tpm_tis_read32(priv, TPM_DID_VID(0), &did_vid);
- if (rc < 0)
+ if (rc < 0) {
+ dev_warn(&chip->dev, "%s: failed to read did_vid: %d\n",
+ __func__, rc);
goto out;
+ }
for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) {
if (vendor_timeout_overrides[i].did_vid != did_vid)
continue;
memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us,
sizeof(vendor_timeout_overrides[i].timeout_us));
- rc = true;
+ chip->timeout_adjusted = true;
}
- rc = false;
-
out:
if (chip->ops->clk_enable != NULL)
chip->ops->clk_enable(chip, false);
- return rc;
+ return;
}
/*
@@ -913,7 +916,11 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
intmask &= ~TPM_GLOBAL_INT_ENABLE;
tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
+ rc = tpm_chip_start(chip);
+ if (rc)
+ goto out_err;
rc = tpm2_probe(chip);
+ tpm_chip_stop(chip);
if (rc)
goto out_err;
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
index 87a0ce47f201..d74f3de74ae6 100644
--- a/drivers/char/tpm/tpm_vtpm_proxy.c
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
@@ -303,9 +303,9 @@ out:
static int vtpm_proxy_is_driver_command(struct tpm_chip *chip,
u8 *buf, size_t count)
{
- struct tpm_input_header *hdr = (struct tpm_input_header *)buf;
+ struct tpm_header *hdr = (struct tpm_header *)buf;
- if (count < sizeof(struct tpm_input_header))
+ if (count < sizeof(struct tpm_header))
return 0;
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
@@ -335,7 +335,6 @@ static int vtpm_proxy_is_driver_command(struct tpm_chip *chip,
static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count)
{
struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
- int rc = 0;
if (count > sizeof(proxy_dev->buffer)) {
dev_err(&chip->dev,
@@ -366,7 +365,7 @@ static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count)
wake_up_interruptible(&proxy_dev->wq);
- return rc;
+ return 0;
}
static void vtpm_proxy_tpm_op_cancel(struct tpm_chip *chip)
@@ -402,7 +401,7 @@ static int vtpm_proxy_request_locality(struct tpm_chip *chip, int locality)
{
struct tpm_buf buf;
int rc;
- const struct tpm_output_header *header;
+ const struct tpm_header *header;
struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
if (chip->flags & TPM_CHIP_FLAG_TPM2)
@@ -417,9 +416,7 @@ static int vtpm_proxy_request_locality(struct tpm_chip *chip, int locality)
proxy_dev->state |= STATE_DRIVER_COMMAND;
- rc = tpm_transmit_cmd(chip, NULL, buf.data, tpm_buf_length(&buf), 0,
- TPM_TRANSMIT_NESTED,
- "attempting to set locality");
+ rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to set locality");
proxy_dev->state &= ~STATE_DRIVER_COMMAND;
@@ -428,7 +425,7 @@ static int vtpm_proxy_request_locality(struct tpm_chip *chip, int locality)
goto out;
}
- header = (const struct tpm_output_header *)buf.data;
+ header = (const struct tpm_header *)buf.data;
rc = be32_to_cpu(header->return_code);
if (rc)
locality = -1;
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index b150f87f38f5..4e2d00cb0d81 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -163,7 +163,7 @@ static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
wmb();
notify_remote_via_evtchn(priv->evtchn);
- ordinal = be32_to_cpu(((struct tpm_input_header*)buf)->ordinal);
+ ordinal = be32_to_cpu(((struct tpm_header *)buf)->ordinal);
duration = tpm_calc_ordinal_duration(chip, ordinal);
if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, duration,
@@ -173,7 +173,7 @@ static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
return -ETIME;
}
- return count;
+ return 0;
}
static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index d2f0bb5ba47e..e705aab9e38b 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -290,6 +290,12 @@ config COMMON_CLK_BD718XX
This driver supports ROHM BD71837 and ROHM BD71847
PMICs clock gates.
+config COMMON_CLK_FIXED_MMIO
+ bool "Clock driver for Memory Mapped Fixed values"
+ depends on COMMON_CLK && OF
+ help
+ Support for Memory Mapped IO Fixed clocks
+
source "drivers/clk/actions/Kconfig"
source "drivers/clk/bcm/Kconfig"
source "drivers/clk/hisilicon/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 8a9440a97500..1db133652f0c 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o
obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o
obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o
obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o
+obj-$(CONFIG_COMMON_CLK_FIXED_MMIO) += clk-fixed-mmio.o
obj-$(CONFIG_COMMON_CLK_GEMINI) += clk-gemini.o
obj-$(CONFIG_COMMON_CLK_ASPEED) += clk-aspeed.o
obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
@@ -78,7 +79,7 @@ obj-$(CONFIG_ARCH_K3) += keystone/
obj-$(CONFIG_ARCH_KEYSTONE) += keystone/
obj-$(CONFIG_MACH_LOONGSON32) += loongson1/
obj-y += mediatek/
-obj-$(CONFIG_COMMON_CLK_AMLOGIC) += meson/
+obj-$(CONFIG_ARCH_MESON) += meson/
obj-$(CONFIG_MACH_PIC32) += microchip/
ifeq ($(CONFIG_COMMON_CLK), y)
obj-$(CONFIG_ARCH_MMP) += mmp/
diff --git a/drivers/clk/actions/Kconfig b/drivers/clk/actions/Kconfig
index 04f0a6355726..5b45ca35757e 100644
--- a/drivers/clk/actions/Kconfig
+++ b/drivers/clk/actions/Kconfig
@@ -9,6 +9,11 @@ if CLK_ACTIONS
# SoC Drivers
+config CLK_OWL_S500
+ bool "Support for the Actions Semi OWL S500 clocks"
+ depends on ARCH_ACTIONS || COMPILE_TEST
+ default ARCH_ACTIONS
+
config CLK_OWL_S700
bool "Support for the Actions Semi OWL S700 clocks"
depends on (ARM64 && ARCH_ACTIONS) || COMPILE_TEST
diff --git a/drivers/clk/actions/Makefile b/drivers/clk/actions/Makefile
index ccfdf9781cef..a2588e55c790 100644
--- a/drivers/clk/actions/Makefile
+++ b/drivers/clk/actions/Makefile
@@ -10,5 +10,6 @@ clk-owl-y += owl-pll.o
clk-owl-y += owl-reset.o
# SoC support
+obj-$(CONFIG_CLK_OWL_S500) += owl-s500.o
obj-$(CONFIG_CLK_OWL_S700) += owl-s700.o
obj-$(CONFIG_CLK_OWL_S900) += owl-s900.o
diff --git a/drivers/clk/actions/owl-pll.c b/drivers/clk/actions/owl-pll.c
index 058e06d7099f..02437bdedf4d 100644
--- a/drivers/clk/actions/owl-pll.c
+++ b/drivers/clk/actions/owl-pll.c
@@ -179,7 +179,7 @@ static int owl_pll_set_rate(struct clk_hw *hw, unsigned long rate,
regmap_write(common->regmap, pll_hw->reg, reg);
- udelay(PLL_STABILITY_WAIT_US);
+ udelay(pll_hw->delay);
return 0;
}
diff --git a/drivers/clk/actions/owl-pll.h b/drivers/clk/actions/owl-pll.h
index 0aae30abd5dc..6fb0d45bb088 100644
--- a/drivers/clk/actions/owl-pll.h
+++ b/drivers/clk/actions/owl-pll.h
@@ -13,6 +13,8 @@
#include "owl-common.h"
+#define OWL_PLL_DEF_DELAY 50
+
/* last entry should have rate = 0 */
struct clk_pll_table {
unsigned int val;
@@ -27,6 +29,7 @@ struct owl_pll_hw {
u8 width;
u8 min_mul;
u8 max_mul;
+ u8 delay;
const struct clk_pll_table *table;
};
@@ -36,7 +39,7 @@ struct owl_pll {
};
#define OWL_PLL_HW(_reg, _bfreq, _bit_idx, _shift, \
- _width, _min_mul, _max_mul, _table) \
+ _width, _min_mul, _max_mul, _delay, _table) \
{ \
.reg = _reg, \
.bfreq = _bfreq, \
@@ -45,6 +48,7 @@ struct owl_pll {
.width = _width, \
.min_mul = _min_mul, \
.max_mul = _max_mul, \
+ .delay = _delay, \
.table = _table, \
}
@@ -52,8 +56,8 @@ struct owl_pll {
_shift, _width, _min_mul, _max_mul, _table, _flags) \
struct owl_pll _struct = { \
.pll_hw = OWL_PLL_HW(_reg, _bfreq, _bit_idx, _shift, \
- _width, _min_mul, \
- _max_mul, _table), \
+ _width, _min_mul, _max_mul, \
+ OWL_PLL_DEF_DELAY, _table), \
.common = { \
.regmap = NULL, \
.hw.init = CLK_HW_INIT(_name, \
@@ -67,8 +71,23 @@ struct owl_pll {
_shift, _width, _min_mul, _max_mul, _table, _flags) \
struct owl_pll _struct = { \
.pll_hw = OWL_PLL_HW(_reg, _bfreq, _bit_idx, _shift, \
- _width, _min_mul, \
- _max_mul, _table), \
+ _width, _min_mul, _max_mul, \
+ OWL_PLL_DEF_DELAY, _table), \
+ .common = { \
+ .regmap = NULL, \
+ .hw.init = CLK_HW_INIT_NO_PARENT(_name, \
+ &owl_pll_ops, \
+ _flags), \
+ }, \
+ }
+
+#define OWL_PLL_NO_PARENT_DELAY(_struct, _name, _reg, _bfreq, _bit_idx, \
+ _shift, _width, _min_mul, _max_mul, _delay, _table, \
+ _flags) \
+ struct owl_pll _struct = { \
+ .pll_hw = OWL_PLL_HW(_reg, _bfreq, _bit_idx, _shift, \
+ _width, _min_mul, _max_mul, \
+ _delay, _table), \
.common = { \
.regmap = NULL, \
.hw.init = CLK_HW_INIT_NO_PARENT(_name, \
@@ -78,7 +97,6 @@ struct owl_pll {
}
#define mul_mask(m) ((1 << ((m)->width)) - 1)
-#define PLL_STABILITY_WAIT_US (50)
static inline struct owl_pll *hw_to_owl_pll(const struct clk_hw *hw)
{
diff --git a/drivers/clk/actions/owl-s500.c b/drivers/clk/actions/owl-s500.c
new file mode 100644
index 000000000000..e2007ac4d235
--- /dev/null
+++ b/drivers/clk/actions/owl-s500.c
@@ -0,0 +1,525 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Actions Semi Owl S500 SoC clock driver
+ *
+ * Copyright (c) 2014 Actions Semi Inc.
+ * Author: David Liu <liuwei@actions-semi.com>
+ *
+ * Copyright (c) 2018 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ *
+ * Copyright (c) 2018 LSI-TEC - Caninos Loucos
+ * Author: Edgar Bernardi Righi <edgar.righi@lsitec.org.br>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "owl-common.h"
+#include "owl-composite.h"
+#include "owl-divider.h"
+#include "owl-factor.h"
+#include "owl-fixed-factor.h"
+#include "owl-gate.h"
+#include "owl-mux.h"
+#include "owl-pll.h"
+
+#include <dt-bindings/clock/actions,s500-cmu.h>
+
+#define CMU_COREPLL (0x0000)
+#define CMU_DEVPLL (0x0004)
+#define CMU_DDRPLL (0x0008)
+#define CMU_NANDPLL (0x000C)
+#define CMU_DISPLAYPLL (0x0010)
+#define CMU_AUDIOPLL (0x0014)
+#define CMU_TVOUTPLL (0x0018)
+#define CMU_BUSCLK (0x001C)
+#define CMU_SENSORCLK (0x0020)
+#define CMU_LCDCLK (0x0024)
+#define CMU_DSICLK (0x0028)
+#define CMU_CSICLK (0x002C)
+#define CMU_DECLK (0x0030)
+#define CMU_BISPCLK (0x0034)
+#define CMU_BUSCLK1 (0x0038)
+#define CMU_VDECLK (0x0040)
+#define CMU_VCECLK (0x0044)
+#define CMU_NANDCCLK (0x004C)
+#define CMU_SD0CLK (0x0050)
+#define CMU_SD1CLK (0x0054)
+#define CMU_SD2CLK (0x0058)
+#define CMU_UART0CLK (0x005C)
+#define CMU_UART1CLK (0x0060)
+#define CMU_UART2CLK (0x0064)
+#define CMU_PWM4CLK (0x0068)
+#define CMU_PWM5CLK (0x006C)
+#define CMU_PWM0CLK (0x0070)
+#define CMU_PWM1CLK (0x0074)
+#define CMU_PWM2CLK (0x0078)
+#define CMU_PWM3CLK (0x007C)
+#define CMU_USBPLL (0x0080)
+#define CMU_ETHERNETPLL (0x0084)
+#define CMU_CVBSPLL (0x0088)
+#define CMU_LENSCLK (0x008C)
+#define CMU_GPU3DCLK (0x0090)
+#define CMU_CORECTL (0x009C)
+#define CMU_DEVCLKEN0 (0x00A0)
+#define CMU_DEVCLKEN1 (0x00A4)
+#define CMU_DEVRST0 (0x00A8)
+#define CMU_DEVRST1 (0x00AC)
+#define CMU_UART3CLK (0x00B0)
+#define CMU_UART4CLK (0x00B4)
+#define CMU_UART5CLK (0x00B8)
+#define CMU_UART6CLK (0x00BC)
+#define CMU_SSCLK (0x00C0)
+#define CMU_DIGITALDEBUG (0x00D0)
+#define CMU_ANALOGDEBUG (0x00D4)
+#define CMU_COREPLLDEBUG (0x00D8)
+#define CMU_DEVPLLDEBUG (0x00DC)
+#define CMU_DDRPLLDEBUG (0x00E0)
+#define CMU_NANDPLLDEBUG (0x00E4)
+#define CMU_DISPLAYPLLDEBUG (0x00E8)
+#define CMU_TVOUTPLLDEBUG (0x00EC)
+#define CMU_DEEPCOLORPLLDEBUG (0x00F4)
+#define CMU_AUDIOPLL_ETHPLLDEBUG (0x00F8)
+#define CMU_CVBSPLLDEBUG (0x00FC)
+
+#define OWL_S500_COREPLL_DELAY (150)
+#define OWL_S500_DDRPLL_DELAY (63)
+#define OWL_S500_DEVPLL_DELAY (28)
+#define OWL_S500_NANDPLL_DELAY (44)
+#define OWL_S500_DISPLAYPLL_DELAY (57)
+#define OWL_S500_ETHERNETPLL_DELAY (25)
+#define OWL_S500_AUDIOPLL_DELAY (100)
+
+static const struct clk_pll_table clk_audio_pll_table[] = {
+ { 0, 45158400 }, { 1, 49152000 },
+ { 0, 0 },
+};
+
+/* pll clocks */
+static OWL_PLL_NO_PARENT_DELAY(ethernet_pll_clk, "ethernet_pll_clk", CMU_ETHERNETPLL, 500000000, 0, 0, 0, 0, 0, OWL_S500_ETHERNETPLL_DELAY, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT_DELAY(core_pll_clk, "core_pll_clk", CMU_COREPLL, 12000000, 9, 0, 8, 4, 134, OWL_S500_COREPLL_DELAY, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT_DELAY(ddr_pll_clk, "ddr_pll_clk", CMU_DDRPLL, 12000000, 8, 0, 8, 1, 67, OWL_S500_DDRPLL_DELAY, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT_DELAY(nand_pll_clk, "nand_pll_clk", CMU_NANDPLL, 6000000, 8, 0, 7, 2, 86, OWL_S500_NANDPLL_DELAY, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT_DELAY(display_pll_clk, "display_pll_clk", CMU_DISPLAYPLL, 6000000, 8, 0, 8, 2, 126, OWL_S500_DISPLAYPLL_DELAY, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT_DELAY(dev_pll_clk, "dev_pll_clk", CMU_DEVPLL, 6000000, 8, 0, 7, 8, 126, OWL_S500_DEVPLL_DELAY, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT_DELAY(audio_pll_clk, "audio_pll_clk", CMU_AUDIOPLL, 0, 4, 0, 1, 0, 0, OWL_S500_AUDIOPLL_DELAY, clk_audio_pll_table, CLK_IGNORE_UNUSED);
+
+static const char * const dev_clk_mux_p[] = { "hosc", "dev_pll_clk" };
+static const char * const bisp_clk_mux_p[] = { "display_pll_clk", "dev_clk" };
+static const char * const sensor_clk_mux_p[] = { "hosc", "bisp_clk" };
+static const char * const sd_clk_mux_p[] = { "dev_clk", "nand_pll_clk" };
+static const char * const pwm_clk_mux_p[] = { "losc", "hosc" };
+static const char * const ahbprediv_clk_mux_p[] = { "dev_clk", "display_pll_clk", "nand_pll_clk", "ddr_pll_clk" };
+static const char * const uart_clk_mux_p[] = { "hosc", "dev_pll_clk" };
+static const char * const de_clk_mux_p[] = { "display_pll_clk", "dev_clk" };
+static const char * const i2s_clk_mux_p[] = { "audio_pll_clk" };
+static const char * const hde_clk_mux_p[] = { "dev_clk", "display_pll_clk", "nand_pll_clk", "ddr_pll_clk" };
+static const char * const nand_clk_mux_p[] = { "nand_pll_clk", "display_pll_clk", "dev_clk", "ddr_pll_clk" };
+
+static struct clk_factor_table sd_factor_table[] = {
+ /* bit0 ~ 4 */
+ { 0, 1, 1 }, { 1, 1, 2 }, { 2, 1, 3 }, { 3, 1, 4 },
+ { 4, 1, 5 }, { 5, 1, 6 }, { 6, 1, 7 }, { 7, 1, 8 },
+ { 8, 1, 9 }, { 9, 1, 10 }, { 10, 1, 11 }, { 11, 1, 12 },
+ { 12, 1, 13 }, { 13, 1, 14 }, { 14, 1, 15 }, { 15, 1, 16 },
+ { 16, 1, 17 }, { 17, 1, 18 }, { 18, 1, 19 }, { 19, 1, 20 },
+ { 20, 1, 21 }, { 21, 1, 22 }, { 22, 1, 23 }, { 23, 1, 24 },
+ { 24, 1, 25 }, { 25, 1, 26 }, { 26, 1, 27 }, { 27, 1, 28 },
+ { 28, 1, 29 }, { 29, 1, 30 }, { 30, 1, 31 }, { 31, 1, 32 },
+
+ /* bit8: /128 */
+ { 256, 1, 1 * 128 }, { 257, 1, 2 * 128 }, { 258, 1, 3 * 128 }, { 259, 1, 4 * 128 },
+ { 260, 1, 5 * 128 }, { 261, 1, 6 * 128 }, { 262, 1, 7 * 128 }, { 263, 1, 8 * 128 },
+ { 264, 1, 9 * 128 }, { 265, 1, 10 * 128 }, { 266, 1, 11 * 128 }, { 267, 1, 12 * 128 },
+ { 268, 1, 13 * 128 }, { 269, 1, 14 * 128 }, { 270, 1, 15 * 128 }, { 271, 1, 16 * 128 },
+ { 272, 1, 17 * 128 }, { 273, 1, 18 * 128 }, { 274, 1, 19 * 128 }, { 275, 1, 20 * 128 },
+ { 276, 1, 21 * 128 }, { 277, 1, 22 * 128 }, { 278, 1, 23 * 128 }, { 279, 1, 24 * 128 },
+ { 280, 1, 25 * 128 }, { 281, 1, 26 * 128 }, { 282, 1, 27 * 128 }, { 283, 1, 28 * 128 },
+ { 284, 1, 29 * 128 }, { 285, 1, 30 * 128 }, { 286, 1, 31 * 128 }, { 287, 1, 32 * 128 },
+ { 0, 0, 0 },
+};
+
+static struct clk_factor_table bisp_factor_table[] = {
+ { 0, 1, 1 }, { 1, 1, 2 }, { 2, 1, 3 }, { 3, 1, 4 },
+ { 4, 1, 5 }, { 5, 1, 6 }, { 6, 1, 7 }, { 7, 1, 8 },
+ { 0, 0, 0 },
+};
+
+static struct clk_factor_table ahb_factor_table[] = {
+ { 1, 1, 2 }, { 2, 1, 3 },
+ { 0, 0, 0 },
+};
+
+static struct clk_div_table rmii_ref_div_table[] = {
+ { 0, 4 }, { 1, 10 },
+ { 0, 0 },
+};
+
+static struct clk_div_table i2s_div_table[] = {
+ { 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
+ { 4, 6 }, { 5, 8 }, { 6, 12 }, { 7, 16 },
+ { 8, 24 },
+ { 0, 0 },
+};
+
+static struct clk_div_table nand_div_table[] = {
+ { 0, 1 }, { 1, 2 }, { 2, 4 }, { 3, 6 },
+ { 4, 8 }, { 5, 10 }, { 6, 12 }, { 7, 14 },
+ { 8, 16 }, { 9, 18 }, { 10, 20 }, { 11, 22 },
+ { 0, 0 },
+};
+
+/* mux clock */
+static OWL_MUX(dev_clk, "dev_clk", dev_clk_mux_p, CMU_DEVPLL, 12, 1, CLK_SET_RATE_PARENT);
+static OWL_MUX(ahbprediv_clk, "ahbprediv_clk", ahbprediv_clk_mux_p, CMU_BUSCLK1, 8, 3, CLK_SET_RATE_PARENT);
+
+/* gate clocks */
+static OWL_GATE(spi0_clk, "spi0_clk", "ahb_clk", CMU_DEVCLKEN1, 10, 0, CLK_IGNORE_UNUSED);
+static OWL_GATE(spi1_clk, "spi1_clk", "ahb_clk", CMU_DEVCLKEN1, 11, 0, CLK_IGNORE_UNUSED);
+static OWL_GATE(spi2_clk, "spi2_clk", "ahb_clk", CMU_DEVCLKEN1, 12, 0, CLK_IGNORE_UNUSED);
+static OWL_GATE(spi3_clk, "spi3_clk", "ahb_clk", CMU_DEVCLKEN1, 13, 0, CLK_IGNORE_UNUSED);
+static OWL_GATE(timer_clk, "timer_clk", "hosc", CMU_DEVCLKEN1, 27, 0, 0);
+static OWL_GATE(hdmi_clk, "hdmi_clk", "hosc", CMU_DEVCLKEN1, 3, 0, 0);
+
+/* divider clocks */
+static OWL_DIVIDER(h_clk, "h_clk", "ahbprevdiv_clk", CMU_BUSCLK1, 12, 2, NULL, 0, 0);
+static OWL_DIVIDER(rmii_ref_clk, "rmii_ref_clk", "ethernet_pll_clk", CMU_ETHERNETPLL, 1, 1, rmii_ref_div_table, 0, 0);
+
+/* factor clocks */
+static OWL_FACTOR(ahb_clk, "ahb_clk", "h_clk", CMU_BUSCLK1, 2, 2, ahb_factor_table, 0, 0);
+static OWL_FACTOR(de1_clk, "de_clk1", "de_clk", CMU_DECLK, 0, 3, bisp_factor_table, 0, 0);
+static OWL_FACTOR(de2_clk, "de_clk2", "de_clk", CMU_DECLK, 4, 3, bisp_factor_table, 0, 0);
+
+/* composite clocks */
+static OWL_COMP_FACTOR(vce_clk, "vce_clk", hde_clk_mux_p,
+ OWL_MUX_HW(CMU_VCECLK, 4, 2),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 26, 0),
+ OWL_FACTOR_HW(CMU_VCECLK, 0, 3, 0, bisp_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(vde_clk, "vde_clk", hde_clk_mux_p,
+ OWL_MUX_HW(CMU_VDECLK, 4, 2),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 25, 0),
+ OWL_FACTOR_HW(CMU_VDECLK, 0, 3, 0, bisp_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(bisp_clk, "bisp_clk", bisp_clk_mux_p,
+ OWL_MUX_HW(CMU_BISPCLK, 4, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 14, 0),
+ OWL_FACTOR_HW(CMU_BISPCLK, 0, 3, 0, bisp_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(sensor0_clk, "sensor0_clk", sensor_clk_mux_p,
+ OWL_MUX_HW(CMU_SENSORCLK, 4, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 14, 0),
+ OWL_FACTOR_HW(CMU_SENSORCLK, 0, 3, 0, bisp_factor_table),
+ CLK_IGNORE_UNUSED);
+
+static OWL_COMP_FACTOR(sensor1_clk, "sensor1_clk", sensor_clk_mux_p,
+ OWL_MUX_HW(CMU_SENSORCLK, 4, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 14, 0),
+ OWL_FACTOR_HW(CMU_SENSORCLK, 8, 3, 0, bisp_factor_table),
+ CLK_IGNORE_UNUSED);
+
+static OWL_COMP_FACTOR(sd0_clk, "sd0_clk", sd_clk_mux_p,
+ OWL_MUX_HW(CMU_SD0CLK, 9, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 5, 0),
+ OWL_FACTOR_HW(CMU_SD0CLK, 0, 9, 0, sd_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(sd1_clk, "sd1_clk", sd_clk_mux_p,
+ OWL_MUX_HW(CMU_SD1CLK, 9, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 6, 0),
+ OWL_FACTOR_HW(CMU_SD1CLK, 0, 9, 0, sd_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(sd2_clk, "sd2_clk", sd_clk_mux_p,
+ OWL_MUX_HW(CMU_SD2CLK, 9, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 7, 0),
+ OWL_FACTOR_HW(CMU_SD2CLK, 0, 9, 0, sd_factor_table),
+ 0);
+
+static OWL_COMP_DIV(pwm0_clk, "pwm0_clk", pwm_clk_mux_p,
+ OWL_MUX_HW(CMU_PWM0CLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 23, 0),
+ OWL_DIVIDER_HW(CMU_PWM0CLK, 0, 10, 0, NULL),
+ 0);
+
+static OWL_COMP_DIV(pwm1_clk, "pwm1_clk", pwm_clk_mux_p,
+ OWL_MUX_HW(CMU_PWM1CLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 24, 0),
+ OWL_DIVIDER_HW(CMU_PWM1CLK, 0, 10, 0, NULL),
+ 0);
+
+static OWL_COMP_DIV(pwm2_clk, "pwm2_clk", pwm_clk_mux_p,
+ OWL_MUX_HW(CMU_PWM2CLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 25, 0),
+ OWL_DIVIDER_HW(CMU_PWM2CLK, 0, 10, 0, NULL),
+ 0);
+
+static OWL_COMP_DIV(pwm3_clk, "pwm3_clk", pwm_clk_mux_p,
+ OWL_MUX_HW(CMU_PWM3CLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 26, 0),
+ OWL_DIVIDER_HW(CMU_PWM3CLK, 0, 10, 0, NULL),
+ 0);
+
+static OWL_COMP_DIV(pwm4_clk, "pwm4_clk", pwm_clk_mux_p,
+ OWL_MUX_HW(CMU_PWM4CLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 11, 0),
+ OWL_DIVIDER_HW(CMU_PWM4CLK, 0, 10, 0, NULL),
+ 0);
+
+static OWL_COMP_DIV(pwm5_clk, "pwm5_clk", pwm_clk_mux_p,
+ OWL_MUX_HW(CMU_PWM5CLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 0, 0),
+ OWL_DIVIDER_HW(CMU_PWM5CLK, 0, 10, 0, NULL),
+ 0);
+
+static OWL_COMP_PASS(de_clk, "de_clk", de_clk_mux_p,
+ OWL_MUX_HW(CMU_DECLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 8, 0),
+ 0);
+
+static OWL_COMP_FIXED_FACTOR(i2c0_clk, "i2c0_clk", "ethernet_pll_clk",
+ OWL_GATE_HW(CMU_DEVCLKEN1, 14, 0),
+ 1, 5, 0);
+
+static OWL_COMP_FIXED_FACTOR(i2c1_clk, "i2c1_clk", "ethernet_pll_clk",
+ OWL_GATE_HW(CMU_DEVCLKEN1, 15, 0),
+ 1, 5, 0);
+
+static OWL_COMP_FIXED_FACTOR(i2c2_clk, "i2c2_clk", "ethernet_pll_clk",
+ OWL_GATE_HW(CMU_DEVCLKEN1, 30, 0),
+ 1, 5, 0);
+
+static OWL_COMP_FIXED_FACTOR(i2c3_clk, "i2c3_clk", "ethernet_pll_clk",
+ OWL_GATE_HW(CMU_DEVCLKEN1, 31, 0),
+ 1, 5, 0);
+
+static OWL_COMP_DIV(uart0_clk, "uart0_clk", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART0CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 6, 0),
+ OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ CLK_IGNORE_UNUSED);
+
+static OWL_COMP_DIV(uart1_clk, "uart1_clk", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART1CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 7, 0),
+ OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ CLK_IGNORE_UNUSED);
+
+static OWL_COMP_DIV(uart2_clk, "uart2_clk", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART2CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 8, 0),
+ OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ CLK_IGNORE_UNUSED);
+
+static OWL_COMP_DIV(uart3_clk, "uart3_clk", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART3CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 19, 0),
+ OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ CLK_IGNORE_UNUSED);
+
+static OWL_COMP_DIV(uart4_clk, "uart4_clk", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART4CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 20, 0),
+ OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ CLK_IGNORE_UNUSED);
+
+static OWL_COMP_DIV(uart5_clk, "uart5_clk", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART5CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 21, 0),
+ OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ CLK_IGNORE_UNUSED);
+
+static OWL_COMP_DIV(uart6_clk, "uart6_clk", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART6CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 18, 0),
+ OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ CLK_IGNORE_UNUSED);
+
+static OWL_COMP_DIV(i2srx_clk, "i2srx_clk", i2s_clk_mux_p,
+ OWL_MUX_HW(CMU_AUDIOPLL, 24, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 21, 0),
+ OWL_DIVIDER_HW(CMU_AUDIOPLL, 20, 4, 0, i2s_div_table),
+ 0);
+
+static OWL_COMP_DIV(i2stx_clk, "i2stx_clk", i2s_clk_mux_p,
+ OWL_MUX_HW(CMU_AUDIOPLL, 24, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 20, 0),
+ OWL_DIVIDER_HW(CMU_AUDIOPLL, 16, 4, 0, i2s_div_table),
+ 0);
+
+static OWL_COMP_DIV(hdmia_clk, "hdmia_clk", i2s_clk_mux_p,
+ OWL_MUX_HW(CMU_AUDIOPLL, 24, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 22, 0),
+ OWL_DIVIDER_HW(CMU_AUDIOPLL, 24, 4, 0, i2s_div_table),
+ 0);
+
+static OWL_COMP_DIV(spdif_clk, "spdif_clk", i2s_clk_mux_p,
+ OWL_MUX_HW(CMU_AUDIOPLL, 24, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 23, 0),
+ OWL_DIVIDER_HW(CMU_AUDIOPLL, 28, 4, 0, i2s_div_table),
+ 0);
+
+static OWL_COMP_DIV(nand_clk, "nand_clk", nand_clk_mux_p,
+ OWL_MUX_HW(CMU_NANDCCLK, 8, 2),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 4, 0),
+ OWL_DIVIDER_HW(CMU_NANDCCLK, 0, 3, 0, nand_div_table),
+ CLK_SET_RATE_PARENT);
+
+static OWL_COMP_DIV(ecc_clk, "ecc_clk", nand_clk_mux_p,
+ OWL_MUX_HW(CMU_NANDCCLK, 8, 2),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 4, 0),
+ OWL_DIVIDER_HW(CMU_NANDCCLK, 4, 3, 0, nand_div_table),
+ CLK_SET_RATE_PARENT);
+
+static struct owl_clk_common *s500_clks[] = {
+ &ethernet_pll_clk.common,
+ &core_pll_clk.common,
+ &ddr_pll_clk.common,
+ &dev_pll_clk.common,
+ &nand_pll_clk.common,
+ &audio_pll_clk.common,
+ &display_pll_clk.common,
+ &dev_clk.common,
+ &timer_clk.common,
+ &i2c0_clk.common,
+ &i2c1_clk.common,
+ &i2c2_clk.common,
+ &i2c3_clk.common,
+ &uart0_clk.common,
+ &uart1_clk.common,
+ &uart2_clk.common,
+ &uart3_clk.common,
+ &uart4_clk.common,
+ &uart5_clk.common,
+ &uart6_clk.common,
+ &pwm0_clk.common,
+ &pwm1_clk.common,
+ &pwm2_clk.common,
+ &pwm3_clk.common,
+ &pwm4_clk.common,
+ &pwm5_clk.common,
+ &sensor0_clk.common,
+ &sensor1_clk.common,
+ &sd0_clk.common,
+ &sd1_clk.common,
+ &sd2_clk.common,
+ &bisp_clk.common,
+ &ahb_clk.common,
+ &ahbprediv_clk.common,
+ &h_clk.common,
+ &spi0_clk.common,
+ &spi1_clk.common,
+ &spi2_clk.common,
+ &spi3_clk.common,
+ &rmii_ref_clk.common,
+ &de_clk.common,
+ &de1_clk.common,
+ &de2_clk.common,
+ &i2srx_clk.common,
+ &i2stx_clk.common,
+ &hdmia_clk.common,
+ &hdmi_clk.common,
+ &vce_clk.common,
+ &vde_clk.common,
+ &spdif_clk.common,
+ &nand_clk.common,
+ &ecc_clk.common,
+};
+
+static struct clk_hw_onecell_data s500_hw_clks = {
+ .hws = {
+ [CLK_ETHERNET_PLL] = &ethernet_pll_clk.common.hw,
+ [CLK_CORE_PLL] = &core_pll_clk.common.hw,
+ [CLK_DDR_PLL] = &ddr_pll_clk.common.hw,
+ [CLK_NAND_PLL] = &nand_pll_clk.common.hw,
+ [CLK_DISPLAY_PLL] = &display_pll_clk.common.hw,
+ [CLK_DEV_PLL] = &dev_pll_clk.common.hw,
+ [CLK_AUDIO_PLL] = &audio_pll_clk.common.hw,
+ [CLK_TIMER] = &timer_clk.common.hw,
+ [CLK_DEV] = &dev_clk.common.hw,
+ [CLK_DE] = &de_clk.common.hw,
+ [CLK_DE1] = &de1_clk.common.hw,
+ [CLK_DE2] = &de2_clk.common.hw,
+ [CLK_I2C0] = &i2c0_clk.common.hw,
+ [CLK_I2C1] = &i2c1_clk.common.hw,
+ [CLK_I2C2] = &i2c2_clk.common.hw,
+ [CLK_I2C3] = &i2c3_clk.common.hw,
+ [CLK_I2SRX] = &i2srx_clk.common.hw,
+ [CLK_I2STX] = &i2stx_clk.common.hw,
+ [CLK_UART0] = &uart0_clk.common.hw,
+ [CLK_UART1] = &uart1_clk.common.hw,
+ [CLK_UART2] = &uart2_clk.common.hw,
+ [CLK_UART3] = &uart3_clk.common.hw,
+ [CLK_UART4] = &uart4_clk.common.hw,
+ [CLK_UART5] = &uart5_clk.common.hw,
+ [CLK_UART6] = &uart6_clk.common.hw,
+ [CLK_PWM0] = &pwm0_clk.common.hw,
+ [CLK_PWM1] = &pwm1_clk.common.hw,
+ [CLK_PWM2] = &pwm2_clk.common.hw,
+ [CLK_PWM3] = &pwm3_clk.common.hw,
+ [CLK_PWM4] = &pwm4_clk.common.hw,
+ [CLK_PWM5] = &pwm5_clk.common.hw,
+ [CLK_SENSOR0] = &sensor0_clk.common.hw,
+ [CLK_SENSOR1] = &sensor1_clk.common.hw,
+ [CLK_SD0] = &sd0_clk.common.hw,
+ [CLK_SD1] = &sd1_clk.common.hw,
+ [CLK_SD2] = &sd2_clk.common.hw,
+ [CLK_BISP] = &bisp_clk.common.hw,
+ [CLK_SPI0] = &spi0_clk.common.hw,
+ [CLK_SPI1] = &spi1_clk.common.hw,
+ [CLK_SPI2] = &spi2_clk.common.hw,
+ [CLK_SPI3] = &spi3_clk.common.hw,
+ [CLK_AHB] = &ahb_clk.common.hw,
+ [CLK_H] = &h_clk.common.hw,
+ [CLK_AHBPREDIV] = &ahbprediv_clk.common.hw,
+ [CLK_RMII_REF] = &rmii_ref_clk.common.hw,
+ [CLK_HDMI_AUDIO] = &hdmia_clk.common.hw,
+ [CLK_HDMI] = &hdmi_clk.common.hw,
+ [CLK_VDE] = &vde_clk.common.hw,
+ [CLK_VCE] = &vce_clk.common.hw,
+ [CLK_SPDIF] = &spdif_clk.common.hw,
+ [CLK_NAND] = &nand_clk.common.hw,
+ [CLK_ECC] = &ecc_clk.common.hw,
+ },
+ .num = CLK_NR_CLKS,
+};
+
+static struct owl_clk_desc s500_clk_desc = {
+ .clks = s500_clks,
+ .num_clks = ARRAY_SIZE(s500_clks),
+
+ .hw_clks = &s500_hw_clks,
+};
+
+static int s500_clk_probe(struct platform_device *pdev)
+{
+ struct owl_clk_desc *desc;
+
+ desc = &s500_clk_desc;
+ owl_clk_regmap_init(pdev, desc);
+
+ return owl_clk_probe(&pdev->dev, desc->hw_clks);
+}
+
+static const struct of_device_id s500_clk_of_match[] = {
+ { .compatible = "actions,s500-cmu", },
+ { /* sentinel */ }
+};
+
+static struct platform_driver s500_clk_driver = {
+ .probe = s500_clk_probe,
+ .driver = {
+ .name = "s500-cmu",
+ .of_match_table = s500_clk_of_match,
+ },
+};
+
+static int __init s500_clk_init(void)
+{
+ return platform_driver_register(&s500_clk_driver);
+}
+core_initcall(s500_clk_init);
diff --git a/drivers/clk/at91/clk-audio-pll.c b/drivers/clk/at91/clk-audio-pll.c
index 36d77146a3bd..3cc4a82f4e9f 100644
--- a/drivers/clk/at91/clk-audio-pll.c
+++ b/drivers/clk/at91/clk-audio-pll.c
@@ -340,7 +340,12 @@ static long clk_audio_pll_pmc_round_rate(struct clk_hw *hw, unsigned long rate,
pr_debug("A PLL/PMC: %s, rate = %lu (parent_rate = %lu)\n", __func__,
rate, *parent_rate);
- for (div = 1; div <= AUDIO_PLL_QDPMC_MAX; div++) {
+ if (!rate)
+ return 0;
+
+ best_parent_rate = clk_round_rate(pclk->clk, 1);
+ div = max(best_parent_rate / rate, 1UL);
+ for (; div <= AUDIO_PLL_QDPMC_MAX; div++) {
best_parent_rate = clk_round_rate(pclk->clk, rate * div);
tmp_rate = best_parent_rate / div;
tmp_diff = abs(rate - tmp_rate);
@@ -350,6 +355,8 @@ static long clk_audio_pll_pmc_round_rate(struct clk_hw *hw, unsigned long rate,
best_rate = tmp_rate;
best_diff = tmp_diff;
tmp_qd = div;
+ if (!best_diff)
+ break; /* got exact match */
}
}
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
index 5bc68b9c5498..f8edbb65eda3 100644
--- a/drivers/clk/at91/clk-programmable.c
+++ b/drivers/clk/at91/clk-programmable.c
@@ -20,8 +20,7 @@
#define PROG_ID_MAX 7
#define PROG_STATUS_MASK(id) (1 << ((id) + 8))
-#define PROG_PRES_MASK 0x7
-#define PROG_PRES(layout, pckr) ((pckr >> layout->pres_shift) & PROG_PRES_MASK)
+#define PROG_PRES(layout, pckr) ((pckr >> layout->pres_shift) & layout->pres_mask)
#define PROG_MAX_RM9200_CSS 3
struct clk_programmable {
@@ -37,20 +36,29 @@ static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_programmable *prog = to_clk_programmable(hw);
+ const struct clk_programmable_layout *layout = prog->layout;
unsigned int pckr;
+ unsigned long rate;
regmap_read(prog->regmap, AT91_PMC_PCKR(prog->id), &pckr);
- return parent_rate >> PROG_PRES(prog->layout, pckr);
+ if (layout->is_pres_direct)
+ rate = parent_rate / (PROG_PRES(layout, pckr) + 1);
+ else
+ rate = parent_rate >> PROG_PRES(layout, pckr);
+
+ return rate;
}
static int clk_programmable_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
+ struct clk_programmable *prog = to_clk_programmable(hw);
+ const struct clk_programmable_layout *layout = prog->layout;
struct clk_hw *parent;
long best_rate = -EINVAL;
unsigned long parent_rate;
- unsigned long tmp_rate;
+ unsigned long tmp_rate = 0;
int shift;
int i;
@@ -60,10 +68,18 @@ static int clk_programmable_determine_rate(struct clk_hw *hw,
continue;
parent_rate = clk_hw_get_rate(parent);
- for (shift = 0; shift < PROG_PRES_MASK; shift++) {
- tmp_rate = parent_rate >> shift;
- if (tmp_rate <= req->rate)
- break;
+ if (layout->is_pres_direct) {
+ for (shift = 0; shift <= layout->pres_mask; shift++) {
+ tmp_rate = parent_rate / (shift + 1);
+ if (tmp_rate <= req->rate)
+ break;
+ }
+ } else {
+ for (shift = 0; shift < layout->pres_mask; shift++) {
+ tmp_rate = parent_rate >> shift;
+ if (tmp_rate <= req->rate)
+ break;
+ }
}
if (tmp_rate > req->rate)
@@ -132,24 +148,28 @@ static int clk_programmable_set_rate(struct clk_hw *hw, unsigned long rate,
struct clk_programmable *prog = to_clk_programmable(hw);
const struct clk_programmable_layout *layout = prog->layout;
unsigned long div = parent_rate / rate;
- unsigned int pckr;
int shift = 0;
- regmap_read(prog->regmap, AT91_PMC_PCKR(prog->id), &pckr);
-
if (!div)
return -EINVAL;
- shift = fls(div) - 1;
+ if (layout->is_pres_direct) {
+ shift = div - 1;
- if (div != (1 << shift))
- return -EINVAL;
+ if (shift > layout->pres_mask)
+ return -EINVAL;
+ } else {
+ shift = fls(div) - 1;
- if (shift >= PROG_PRES_MASK)
- return -EINVAL;
+ if (div != (1 << shift))
+ return -EINVAL;
+
+ if (shift >= layout->pres_mask)
+ return -EINVAL;
+ }
regmap_update_bits(prog->regmap, AT91_PMC_PCKR(prog->id),
- PROG_PRES_MASK << layout->pres_shift,
+ layout->pres_mask << layout->pres_shift,
shift << layout->pres_shift);
return 0;
@@ -205,19 +225,25 @@ at91_clk_register_programmable(struct regmap *regmap,
}
const struct clk_programmable_layout at91rm9200_programmable_layout = {
+ .pres_mask = 0x7,
.pres_shift = 2,
.css_mask = 0x3,
.have_slck_mck = 0,
+ .is_pres_direct = 0,
};
const struct clk_programmable_layout at91sam9g45_programmable_layout = {
+ .pres_mask = 0x7,
.pres_shift = 2,
.css_mask = 0x3,
.have_slck_mck = 1,
+ .is_pres_direct = 0,
};
const struct clk_programmable_layout at91sam9x5_programmable_layout = {
+ .pres_mask = 0x7,
.pres_shift = 4,
.css_mask = 0x7,
.have_slck_mck = 0,
+ .is_pres_direct = 0,
};
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index 672a79bda88c..a0e5ce9c9b9e 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -71,9 +71,11 @@ struct clk_pll_characteristics {
};
struct clk_programmable_layout {
+ u8 pres_mask;
u8 pres_shift;
u8 css_mask;
u8 have_slck_mck;
+ u8 is_pres_direct;
};
extern const struct clk_programmable_layout at91rm9200_programmable_layout;
diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c
index cd0ef7274fdb..81943fac4537 100644
--- a/drivers/clk/at91/sama5d2.c
+++ b/drivers/clk/at91/sama5d2.c
@@ -125,6 +125,14 @@ static const struct {
.pll = true },
};
+static const struct clk_programmable_layout sama5d2_programmable_layout = {
+ .pres_mask = 0xff,
+ .pres_shift = 4,
+ .css_mask = 0x7,
+ .have_slck_mck = 0,
+ .is_pres_direct = 1,
+};
+
static void __init sama5d2_pmc_setup(struct device_node *np)
{
struct clk_range range = CLK_RANGE(0, 0);
@@ -241,14 +249,15 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
parent_names[4] = "masterck";
+ parent_names[5] = "audiopll_pmcck";
for (i = 0; i < 3; i++) {
char name[6];
snprintf(name, sizeof(name), "prog%d", i);
hw = at91_clk_register_programmable(regmap, name,
- parent_names, 5, i,
- &at91sam9x5_programmable_layout);
+ parent_names, 6, i,
+ &sama5d2_programmable_layout);
if (IS_ERR(hw))
goto err_free;
}
diff --git a/drivers/clk/clk-clps711x.c b/drivers/clk/clk-clps711x.c
index 2c04396402ab..c36c47bdba02 100644
--- a/drivers/clk/clk-clps711x.c
+++ b/drivers/clk/clk-clps711x.c
@@ -44,21 +44,21 @@ struct clps711x_clk {
struct clk_hw_onecell_data clk_data;
};
-static struct clps711x_clk * __init _clps711x_clk_init(void __iomem *base,
- u32 fref)
+static void __init clps711x_clk_init_dt(struct device_node *np)
{
- u32 tmp, f_cpu, f_pll, f_bus, f_tim, f_pwm, f_spi;
+ u32 tmp, f_cpu, f_pll, f_bus, f_tim, f_pwm, f_spi, fref = 0;
struct clps711x_clk *clps711x_clk;
- unsigned i;
+ void __iomem *base;
+
+ WARN_ON(of_property_read_u32(np, "startup-frequency", &fref));
- if (!base)
- return ERR_PTR(-ENOMEM);
+ base = of_iomap(np, 0);
+ BUG_ON(!base);
clps711x_clk = kzalloc(struct_size(clps711x_clk, clk_data.hws,
CLPS711X_CLK_MAX),
GFP_KERNEL);
- if (!clps711x_clk)
- return ERR_PTR(-ENOMEM);
+ BUG_ON(!clps711x_clk);
spin_lock_init(&clps711x_clk->lock);
@@ -137,52 +137,13 @@ static struct clps711x_clk * __init _clps711x_clk_init(void __iomem *base,
clk_hw_register_fixed_factor(NULL, "uart", "bus", 0, 1, 10);
clps711x_clk->clk_data.hws[CLPS711X_CLK_TICK] =
clk_hw_register_fixed_rate(NULL, "tick", NULL, 0, 64);
- for (i = 0; i < CLPS711X_CLK_MAX; i++)
- if (IS_ERR(clps711x_clk->clk_data.hws[i]))
+ for (tmp = 0; tmp < CLPS711X_CLK_MAX; tmp++)
+ if (IS_ERR(clps711x_clk->clk_data.hws[tmp]))
pr_err("clk %i: register failed with %ld\n",
- i, PTR_ERR(clps711x_clk->clk_data.hws[i]));
-
- return clps711x_clk;
-}
-
-void __init clps711x_clk_init(void __iomem *base)
-{
- struct clps711x_clk *clps711x_clk;
-
- clps711x_clk = _clps711x_clk_init(base, 73728000);
-
- BUG_ON(IS_ERR(clps711x_clk));
-
- /* Clocksource */
- clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_TIMER1],
- NULL, "clps711x-timer.0");
- clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_TIMER2],
- NULL, "clps711x-timer.1");
-
- /* Drivers */
- clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_PWM],
- NULL, "clps711x-pwm");
- clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_UART],
- NULL, "clps711x-uart.0");
- clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_UART],
- NULL, "clps711x-uart.1");
-}
-
-#ifdef CONFIG_OF
-static void __init clps711x_clk_init_dt(struct device_node *np)
-{
- void __iomem *base = of_iomap(np, 0);
- struct clps711x_clk *clps711x_clk;
- u32 fref = 0;
-
- WARN_ON(of_property_read_u32(np, "startup-frequency", &fref));
-
- clps711x_clk = _clps711x_clk_init(base, fref);
- BUG_ON(IS_ERR(clps711x_clk));
+ tmp, PTR_ERR(clps711x_clk->clk_data.hws[tmp]));
clps711x_clk->clk_data.num = CLPS711X_CLK_MAX;
of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
&clps711x_clk->clk_data);
}
CLK_OF_DECLARE(clps711x, "cirrus,ep7209-clk", clps711x_clk_init_dt);
-#endif
diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c
index c9a86156ced8..daa1fc8fba53 100644
--- a/drivers/clk/clk-devres.c
+++ b/drivers/clk/clk-devres.c
@@ -29,6 +29,17 @@ struct clk *devm_clk_get(struct device *dev, const char *id)
}
EXPORT_SYMBOL(devm_clk_get);
+struct clk *devm_clk_get_optional(struct device *dev, const char *id)
+{
+ struct clk *clk = devm_clk_get(dev, id);
+
+ if (clk == ERR_PTR(-ENOENT))
+ return NULL;
+
+ return clk;
+}
+EXPORT_SYMBOL(devm_clk_get_optional);
+
struct clk_bulk_devres {
struct clk_bulk_data *clks;
int num_clks;
diff --git a/drivers/clk/clk-fixed-mmio.c b/drivers/clk/clk-fixed-mmio.c
new file mode 100644
index 000000000000..d1a97d971183
--- /dev/null
+++ b/drivers/clk/clk-fixed-mmio.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Memory Mapped IO Fixed clock driver
+ *
+ * Copyright (C) 2018 Cadence Design Systems, Inc.
+ *
+ * Authors:
+ * Jan Kotas <jank@cadence.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+static struct clk_hw *fixed_mmio_clk_setup(struct device_node *node)
+{
+ struct clk_hw *clk;
+ const char *clk_name = node->name;
+ void __iomem *base;
+ u32 freq;
+ int ret;
+
+ base = of_iomap(node, 0);
+ if (!base) {
+ pr_err("%pOFn: failed to map address\n", node);
+ return ERR_PTR(-EIO);
+ }
+
+ freq = readl(base);
+ iounmap(base);
+ of_property_read_string(node, "clock-output-names", &clk_name);
+
+ clk = clk_hw_register_fixed_rate(NULL, clk_name, NULL, 0, freq);
+ if (IS_ERR(clk)) {
+ pr_err("%pOFn: failed to register fixed rate clock\n", node);
+ return clk;
+ }
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, clk);
+ if (ret) {
+ pr_err("%pOFn: failed to add clock provider\n", node);
+ clk_hw_unregister(clk);
+ clk = ERR_PTR(ret);
+ }
+
+ return clk;
+}
+
+static void __init of_fixed_mmio_clk_setup(struct device_node *node)
+{
+ fixed_mmio_clk_setup(node);
+}
+CLK_OF_DECLARE(fixed_mmio_clk, "fixed-mmio-clock", of_fixed_mmio_clk_setup);
+
+/**
+ * This is not executed when of_fixed_mmio_clk_setup succeeded.
+ */
+static int of_fixed_mmio_clk_probe(struct platform_device *pdev)
+{
+ struct clk_hw *clk;
+
+ clk = fixed_mmio_clk_setup(pdev->dev.of_node);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ platform_set_drvdata(pdev, clk);
+
+ return 0;
+}
+
+static int of_fixed_mmio_clk_remove(struct platform_device *pdev)
+{
+ struct clk_hw *clk = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(pdev->dev.of_node);
+ clk_hw_unregister_fixed_rate(clk);
+
+ return 0;
+}
+
+static const struct of_device_id of_fixed_mmio_clk_ids[] = {
+ { .compatible = "fixed-mmio-clock" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, of_fixed_mmio_clk_ids);
+
+static struct platform_driver of_fixed_mmio_clk_driver = {
+ .driver = {
+ .name = "of_fixed_mmio_clk",
+ .of_match_table = of_fixed_mmio_clk_ids,
+ },
+ .probe = of_fixed_mmio_clk_probe,
+ .remove = of_fixed_mmio_clk_remove,
+};
+module_platform_driver(of_fixed_mmio_clk_driver);
+
+MODULE_AUTHOR("Jan Kotas <jank@cadence.com>");
+MODULE_DESCRIPTION("Memory Mapped IO Fixed clock driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c
index 545dceec0bbf..fdfe2e423d15 100644
--- a/drivers/clk/clk-fractional-divider.c
+++ b/drivers/clk/clk-fractional-divider.c
@@ -79,7 +79,7 @@ static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long m, n;
u64 ret;
- if (!rate || rate >= *parent_rate)
+ if (!rate || (!clk_hw_can_set_rate_parent(hw) && rate >= *parent_rate))
return *parent_rate;
if (fd->approximation)
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index 25eed3e0251f..c2f07f0d077c 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -58,6 +58,35 @@ const struct clk_ops clk_gpio_gate_ops = {
};
EXPORT_SYMBOL_GPL(clk_gpio_gate_ops);
+static int clk_sleeping_gpio_gate_prepare(struct clk_hw *hw)
+{
+ struct clk_gpio *clk = to_clk_gpio(hw);
+
+ gpiod_set_value_cansleep(clk->gpiod, 1);
+
+ return 0;
+}
+
+static void clk_sleeping_gpio_gate_unprepare(struct clk_hw *hw)
+{
+ struct clk_gpio *clk = to_clk_gpio(hw);
+
+ gpiod_set_value_cansleep(clk->gpiod, 0);
+}
+
+static int clk_sleeping_gpio_gate_is_prepared(struct clk_hw *hw)
+{
+ struct clk_gpio *clk = to_clk_gpio(hw);
+
+ return gpiod_get_value_cansleep(clk->gpiod);
+}
+
+static const struct clk_ops clk_sleeping_gpio_gate_ops = {
+ .prepare = clk_sleeping_gpio_gate_prepare,
+ .unprepare = clk_sleeping_gpio_gate_unprepare,
+ .is_prepared = clk_sleeping_gpio_gate_is_prepared,
+};
+
/**
* DOC: basic clock multiplexer which can be controlled with a gpio output
* Traits of this clock:
@@ -144,10 +173,16 @@ struct clk_hw *clk_hw_register_gpio_gate(struct device *dev, const char *name,
const char *parent_name, struct gpio_desc *gpiod,
unsigned long flags)
{
+ const struct clk_ops *ops;
+
+ if (gpiod_cansleep(gpiod))
+ ops = &clk_sleeping_gpio_gate_ops;
+ else
+ ops = &clk_gpio_gate_ops;
+
return clk_register_gpio(dev, name,
(parent_name ? &parent_name : NULL),
- (parent_name ? 1 : 0), gpiod, flags,
- &clk_gpio_gate_ops);
+ (parent_name ? 1 : 0), gpiod, flags, ops);
}
EXPORT_SYMBOL_GPL(clk_hw_register_gpio_gate);
diff --git a/drivers/clk/clk-highbank.c b/drivers/clk/clk-highbank.c
index 727ed8e1bb72..8e4581004695 100644
--- a/drivers/clk/clk-highbank.c
+++ b/drivers/clk/clk-highbank.c
@@ -293,6 +293,7 @@ static __init struct clk *hb_clk_init(struct device_node *node, const struct clk
/* Map system registers */
srnp = of_find_compatible_node(NULL, NULL, "calxeda,hb-sregs");
hb_clk->reg = of_iomap(srnp, 0);
+ of_node_put(srnp);
BUG_ON(!hb_clk->reg);
hb_clk->reg += reg;
diff --git a/drivers/clk/clk-max77686.c b/drivers/clk/clk-max77686.c
index 22c937644c93..3727d5472450 100644
--- a/drivers/clk/clk-max77686.c
+++ b/drivers/clk/clk-max77686.c
@@ -235,8 +235,9 @@ static int max77686_clk_probe(struct platform_device *pdev)
return ret;
}
- ret = clk_hw_register_clkdev(&max_clk_data->hw,
- max_clk_data->clk_idata.name, NULL);
+ ret = devm_clk_hw_register_clkdev(dev, &max_clk_data->hw,
+ max_clk_data->clk_idata.name,
+ NULL);
if (ret < 0) {
dev_err(dev, "Failed to clkdev register: %d\n", ret);
return ret;
@@ -244,8 +245,8 @@ static int max77686_clk_probe(struct platform_device *pdev)
}
if (parent->of_node) {
- ret = of_clk_add_hw_provider(parent->of_node, of_clk_max77686_get,
- drv_data);
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_max77686_get,
+ drv_data);
if (ret < 0) {
dev_err(dev, "Failed to register OF clock provider: %d\n",
@@ -261,27 +262,11 @@ static int max77686_clk_probe(struct platform_device *pdev)
1 << MAX77802_CLOCK_LOW_JITTER_SHIFT);
if (ret < 0) {
dev_err(dev, "Failed to config low-jitter: %d\n", ret);
- goto remove_of_clk_provider;
+ return ret;
}
}
return 0;
-
-remove_of_clk_provider:
- if (parent->of_node)
- of_clk_del_provider(parent->of_node);
-
- return ret;
-}
-
-static int max77686_clk_remove(struct platform_device *pdev)
-{
- struct device *parent = pdev->dev.parent;
-
- if (parent->of_node)
- of_clk_del_provider(parent->of_node);
-
- return 0;
}
static const struct platform_device_id max77686_clk_id[] = {
@@ -297,7 +282,6 @@ static struct platform_driver max77686_clk_driver = {
.name = "max77686-clk",
},
.probe = max77686_clk_probe,
- .remove = max77686_clk_remove,
.id_table = max77686_clk_id,
};
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 5baa9e051110..1212a9be7e80 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -1148,8 +1148,8 @@ static void __init create_one_pll(struct clockgen *cg, int idx)
pll->div[i].clk = clk;
ret = clk_register_clkdev(clk, pll->div[i].name, NULL);
if (ret != 0)
- pr_err("%s: %s: register to lookup table failed %ld\n",
- __func__, pll->div[i].name, PTR_ERR(clk));
+ pr_err("%s: %s: register to lookup table failed %d\n",
+ __func__, pll->div[i].name, ret);
}
}
@@ -1389,6 +1389,7 @@ static void __init clockgen_init(struct device_node *np)
pr_err("%s: Couldn't map %pOF regs\n", __func__,
guts);
}
+ of_node_put(guts);
}
}
diff --git a/drivers/clk/clk-stm32mp1.c b/drivers/clk/clk-stm32mp1.c
index 6a31f7f434ce..a0ae8dc16909 100644
--- a/drivers/clk/clk-stm32mp1.c
+++ b/drivers/clk/clk-stm32mp1.c
@@ -121,7 +121,7 @@ static const char * const cpu_src[] = {
};
static const char * const axi_src[] = {
- "ck_hsi", "ck_hse", "pll2_p", "pll3_p"
+ "ck_hsi", "ck_hse", "pll2_p"
};
static const char * const per_src[] = {
@@ -225,19 +225,19 @@ static const char * const usart6_src[] = {
};
static const char * const fdcan_src[] = {
- "ck_hse", "pll3_q", "pll4_q"
+ "ck_hse", "pll3_q", "pll4_q", "pll4_r"
};
static const char * const sai_src[] = {
- "pll4_q", "pll3_q", "i2s_ckin", "ck_per"
+ "pll4_q", "pll3_q", "i2s_ckin", "ck_per", "pll3_r"
};
static const char * const sai2_src[] = {
- "pll4_q", "pll3_q", "i2s_ckin", "ck_per", "spdif_ck_symb"
+ "pll4_q", "pll3_q", "i2s_ckin", "ck_per", "spdif_ck_symb", "pll3_r"
};
static const char * const adc12_src[] = {
- "pll4_q", "ck_per"
+ "pll4_r", "ck_per", "pll3_q"
};
static const char * const dsi_src[] = {
@@ -269,7 +269,7 @@ static const struct clk_div_table axi_div_table[] = {
static const struct clk_div_table mcu_div_table[] = {
{ 0, 1 }, { 1, 2 }, { 2, 4 }, { 3, 8 },
{ 4, 16 }, { 5, 32 }, { 6, 64 }, { 7, 128 },
- { 8, 512 }, { 9, 512 }, { 10, 512}, { 11, 512 },
+ { 8, 256 }, { 9, 512 }, { 10, 512}, { 11, 512 },
{ 12, 512 }, { 13, 512 }, { 14, 512}, { 15, 512 },
{ 0 },
};
@@ -1286,10 +1286,11 @@ _clk_stm32_register_composite(struct device *dev,
MGATE_MP1(_id, _name, _parent, _flags, _mgate)
#define KCLK(_id, _name, _parents, _flags, _mgate, _mmux)\
- COMPOSITE(_id, _name, _parents, CLK_OPS_PARENT_ENABLE | _flags,\
- _MGATE_MP1(_mgate),\
- _MMUX(_mmux),\
- _NO_DIV)
+ COMPOSITE(_id, _name, _parents, CLK_OPS_PARENT_ENABLE |\
+ CLK_SET_RATE_NO_REPARENT | _flags,\
+ _MGATE_MP1(_mgate),\
+ _MMUX(_mmux),\
+ _NO_DIV)
enum {
G_SAI1,
@@ -1655,12 +1656,14 @@ static const struct stm32_mux_cfg ker_mux_cfg[M_LAST] = {
static const struct clock_config stm32mp1_clock_cfg[] = {
/* Oscillator divider */
- DIV(NO_ID, "clk-hsi-div", "clk-hsi", 0, RCC_HSICFGR, 0, 2,
- CLK_DIVIDER_READ_ONLY),
+ DIV(NO_ID, "clk-hsi-div", "clk-hsi", CLK_DIVIDER_POWER_OF_TWO,
+ RCC_HSICFGR, 0, 2, CLK_DIVIDER_READ_ONLY),
/* External / Internal Oscillators */
GATE_MP1(CK_HSE, "ck_hse", "clk-hse", 0, RCC_OCENSETR, 8, 0),
- GATE_MP1(CK_CSI, "ck_csi", "clk-csi", 0, RCC_OCENSETR, 4, 0),
+ /* ck_csi is used by IO compensation and should be critical */
+ GATE_MP1(CK_CSI, "ck_csi", "clk-csi", CLK_IS_CRITICAL,
+ RCC_OCENSETR, 4, 0),
GATE_MP1(CK_HSI, "ck_hsi", "clk-hsi-div", 0, RCC_OCENSETR, 0, 0),
GATE(CK_LSI, "ck_lsi", "clk-lsi", 0, RCC_RDLSICR, 0, 0),
GATE(CK_LSE, "ck_lse", "clk-lse", 0, RCC_BDCR, 0, 0),
@@ -1952,14 +1955,14 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
MGATE_MP1(GPU_K, "gpu_k", "pll2_q", 0, G_GPU),
MGATE_MP1(DAC12_K, "dac12_k", "ck_lsi", 0, G_DAC12),
- COMPOSITE(ETHPTP_K, "ethptp_k", eth_src, CLK_OPS_PARENT_ENABLE,
+ COMPOSITE(ETHPTP_K, "ethptp_k", eth_src, CLK_OPS_PARENT_ENABLE |
+ CLK_SET_RATE_NO_REPARENT,
_NO_GATE,
_MMUX(M_ETHCK),
- _DIV(RCC_ETHCKSELR, 4, 4, CLK_DIVIDER_ALLOW_ZERO, NULL)),
+ _DIV(RCC_ETHCKSELR, 4, 4, 0, NULL)),
/* RTC clock */
- DIV(NO_ID, "ck_hse_rtc", "ck_hse", 0, RCC_RTCDIVR, 0, 7,
- CLK_DIVIDER_ALLOW_ZERO),
+ DIV(NO_ID, "ck_hse_rtc", "ck_hse", 0, RCC_RTCDIVR, 0, 6, 0),
COMPOSITE(RTC, "ck_rtc", rtc_src, CLK_OPS_PARENT_ENABLE |
CLK_SET_RATE_PARENT,
diff --git a/drivers/clk/clk-twl6040.c b/drivers/clk/clk-twl6040.c
index ea846f77750b..0cad5748bf0e 100644
--- a/drivers/clk/clk-twl6040.c
+++ b/drivers/clk/clk-twl6040.c
@@ -41,6 +41,43 @@ static int twl6040_pdmclk_is_prepared(struct clk_hw *hw)
return pdmclk->enabled;
}
+static int twl6040_pdmclk_reset_one_clock(struct twl6040_pdmclk *pdmclk,
+ unsigned int reg)
+{
+ const u8 reset_mask = TWL6040_HPLLRST; /* Same for HPPLL and LPPLL */
+ int ret;
+
+ ret = twl6040_set_bits(pdmclk->twl6040, reg, reset_mask);
+ if (ret < 0)
+ return ret;
+
+ ret = twl6040_clear_bits(pdmclk->twl6040, reg, reset_mask);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * TWL6040A2 Phoenix Audio IC erratum #6: "PDM Clock Generation Issue At
+ * Cold Temperature". This affects cold boot and deeper idle states it
+ * seems. The workaround consists of resetting HPPLL and LPPLL.
+ */
+static int twl6040_pdmclk_quirk_reset_clocks(struct twl6040_pdmclk *pdmclk)
+{
+ int ret;
+
+ ret = twl6040_pdmclk_reset_one_clock(pdmclk, TWL6040_REG_HPPLLCTL);
+ if (ret)
+ return ret;
+
+ ret = twl6040_pdmclk_reset_one_clock(pdmclk, TWL6040_REG_LPPLLCTL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int twl6040_pdmclk_prepare(struct clk_hw *hw)
{
struct twl6040_pdmclk *pdmclk = container_of(hw, struct twl6040_pdmclk,
@@ -48,8 +85,20 @@ static int twl6040_pdmclk_prepare(struct clk_hw *hw)
int ret;
ret = twl6040_power(pdmclk->twl6040, 1);
- if (!ret)
- pdmclk->enabled = 1;
+ if (ret)
+ return ret;
+
+ ret = twl6040_pdmclk_quirk_reset_clocks(pdmclk);
+ if (ret)
+ goto out_err;
+
+ pdmclk->enabled = 1;
+
+ return 0;
+
+out_err:
+ dev_err(pdmclk->dev, "%s: error %i\n", __func__, ret);
+ twl6040_power(pdmclk->twl6040, 0);
return ret;
}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index d2477a5058ac..96053a96fe2f 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -57,6 +57,7 @@ struct clk_core {
struct clk_core *new_child;
unsigned long flags;
bool orphan;
+ bool rpm_enabled;
unsigned int enable_count;
unsigned int prepare_count;
unsigned int protect_count;
@@ -81,6 +82,7 @@ struct clk_core {
struct clk {
struct clk_core *core;
+ struct device *dev;
const char *dev_id;
const char *con_id;
unsigned long min_rate;
@@ -92,9 +94,9 @@ struct clk {
/*** runtime pm ***/
static int clk_pm_runtime_get(struct clk_core *core)
{
- int ret = 0;
+ int ret;
- if (!core->dev)
+ if (!core->rpm_enabled)
return 0;
ret = pm_runtime_get_sync(core->dev);
@@ -103,7 +105,7 @@ static int clk_pm_runtime_get(struct clk_core *core)
static void clk_pm_runtime_put(struct clk_core *core)
{
- if (!core->dev)
+ if (!core->rpm_enabled)
return;
pm_runtime_put_sync(core->dev);
@@ -223,7 +225,7 @@ static bool clk_core_is_enabled(struct clk_core *core)
* taking enable spinlock, but the below check is needed if one tries
* to call it from other places.
*/
- if (core->dev) {
+ if (core->rpm_enabled) {
pm_runtime_get_noresume(core->dev);
if (!pm_runtime_active(core->dev)) {
ret = false;
@@ -233,7 +235,7 @@ static bool clk_core_is_enabled(struct clk_core *core)
ret = core->ops->is_enabled(core->hw);
done:
- if (core->dev)
+ if (core->rpm_enabled)
pm_runtime_put(core->dev);
return ret;
@@ -394,16 +396,19 @@ bool clk_hw_is_prepared(const struct clk_hw *hw)
{
return clk_core_is_prepared(hw->core);
}
+EXPORT_SYMBOL_GPL(clk_hw_is_prepared);
bool clk_hw_rate_is_protected(const struct clk_hw *hw)
{
return clk_core_rate_is_protected(hw->core);
}
+EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected);
bool clk_hw_is_enabled(const struct clk_hw *hw)
{
return clk_core_is_enabled(hw->core);
}
+EXPORT_SYMBOL_GPL(clk_hw_is_enabled);
bool __clk_is_enabled(struct clk *clk)
{
@@ -3209,43 +3214,106 @@ unlock:
return ret;
}
-struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
+/**
+ * clk_core_link_consumer - Add a clk consumer to the list of consumers in a clk_core
+ * @core: clk to add consumer to
+ * @clk: consumer to link to a clk
+ */
+static void clk_core_link_consumer(struct clk_core *core, struct clk *clk)
+{
+ clk_prepare_lock();
+ hlist_add_head(&clk->clks_node, &core->clks);
+ clk_prepare_unlock();
+}
+
+/**
+ * clk_core_unlink_consumer - Remove a clk consumer from the list of consumers in a clk_core
+ * @clk: consumer to unlink
+ */
+static void clk_core_unlink_consumer(struct clk *clk)
+{
+ lockdep_assert_held(&prepare_lock);
+ hlist_del(&clk->clks_node);
+}
+
+/**
+ * alloc_clk - Allocate a clk consumer, but leave it unlinked to the clk_core
+ * @core: clk to allocate a consumer for
+ * @dev_id: string describing device name
+ * @con_id: connection ID string on device
+ *
+ * Returns: clk consumer left unlinked from the consumer list
+ */
+static struct clk *alloc_clk(struct clk_core *core, const char *dev_id,
const char *con_id)
{
struct clk *clk;
- /* This is to allow this function to be chained to others */
- if (IS_ERR_OR_NULL(hw))
- return ERR_CAST(hw);
-
clk = kzalloc(sizeof(*clk), GFP_KERNEL);
if (!clk)
return ERR_PTR(-ENOMEM);
- clk->core = hw->core;
+ clk->core = core;
clk->dev_id = dev_id;
clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
clk->max_rate = ULONG_MAX;
- clk_prepare_lock();
- hlist_add_head(&clk->clks_node, &hw->core->clks);
- clk_prepare_unlock();
-
return clk;
}
-/* keep in sync with __clk_put */
-void __clk_free_clk(struct clk *clk)
+/**
+ * free_clk - Free a clk consumer
+ * @clk: clk consumer to free
+ *
+ * Note, this assumes the clk has been unlinked from the clk_core consumer
+ * list.
+ */
+static void free_clk(struct clk *clk)
{
- clk_prepare_lock();
- hlist_del(&clk->clks_node);
- clk_prepare_unlock();
-
kfree_const(clk->con_id);
kfree(clk);
}
/**
+ * clk_hw_create_clk: Allocate and link a clk consumer to a clk_core given
+ * a clk_hw
+ * @dev: clk consumer device
+ * @hw: clk_hw associated with the clk being consumed
+ * @dev_id: string describing device name
+ * @con_id: connection ID string on device
+ *
+ * This is the main function used to create a clk pointer for use by clk
+ * consumers. It connects a consumer to the clk_core and clk_hw structures
+ * used by the framework and clk provider respectively.
+ */
+struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw,
+ const char *dev_id, const char *con_id)
+{
+ struct clk *clk;
+ struct clk_core *core;
+
+ /* This is to allow this function to be chained to others */
+ if (IS_ERR_OR_NULL(hw))
+ return ERR_CAST(hw);
+
+ core = hw->core;
+ clk = alloc_clk(core, dev_id, con_id);
+ if (IS_ERR(clk))
+ return clk;
+ clk->dev = dev;
+
+ if (!try_module_get(core->owner)) {
+ free_clk(clk);
+ return ERR_PTR(-ENOENT);
+ }
+
+ kref_get(&core->ref);
+ clk_core_link_consumer(core, clk);
+
+ return clk;
+}
+
+/**
* clk_register - allocate a new clock, register it and return an opaque cookie
* @dev: device that is registering this clock
* @hw: link to hardware-specific clock data
@@ -3280,7 +3348,8 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
core->ops = hw->init->ops;
if (dev && pm_runtime_enabled(dev))
- core->dev = dev;
+ core->rpm_enabled = true;
+ core->dev = dev;
if (dev && dev->driver)
core->owner = dev->driver->owner;
core->hw = hw;
@@ -3320,17 +3389,27 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
INIT_HLIST_HEAD(&core->clks);
- hw->clk = __clk_create_clk(hw, NULL, NULL);
+ /*
+ * Don't call clk_hw_create_clk() here because that would pin the
+ * provider module to itself and prevent it from ever being removed.
+ */
+ hw->clk = alloc_clk(core, NULL, NULL);
if (IS_ERR(hw->clk)) {
ret = PTR_ERR(hw->clk);
goto fail_parents;
}
+ clk_core_link_consumer(hw->core, hw->clk);
+
ret = __clk_core_init(core);
if (!ret)
return hw->clk;
- __clk_free_clk(hw->clk);
+ clk_prepare_lock();
+ clk_core_unlink_consumer(hw->clk);
+ clk_prepare_unlock();
+
+ free_clk(hw->clk);
hw->clk = NULL;
fail_parents:
@@ -3601,20 +3680,7 @@ EXPORT_SYMBOL_GPL(devm_clk_hw_unregister);
/*
* clkdev helpers
*/
-int __clk_get(struct clk *clk)
-{
- struct clk_core *core = !clk ? NULL : clk->core;
-
- if (core) {
- if (!try_module_get(core->owner))
- return 0;
- kref_get(&core->ref);
- }
- return 1;
-}
-
-/* keep in sync with __clk_free_clk */
void __clk_put(struct clk *clk)
{
struct module *owner;
@@ -3648,8 +3714,7 @@ void __clk_put(struct clk *clk)
module_put(owner);
- kfree_const(clk->con_id);
- kfree(clk);
+ free_clk(clk);
}
/*** clk rate change notifiers ***/
@@ -4006,6 +4071,49 @@ void devm_of_clk_del_provider(struct device *dev)
}
EXPORT_SYMBOL(devm_of_clk_del_provider);
+/*
+ * Beware the return values when np is valid, but no clock provider is found.
+ * If name == NULL, the function returns -ENOENT.
+ * If name != NULL, the function returns -EINVAL. This is because
+ * of_parse_phandle_with_args() is called even if of_property_match_string()
+ * returns an error.
+ */
+static int of_parse_clkspec(const struct device_node *np, int index,
+ const char *name, struct of_phandle_args *out_args)
+{
+ int ret = -ENOENT;
+
+ /* Walk up the tree of devices looking for a clock property that matches */
+ while (np) {
+ /*
+ * For named clocks, first look up the name in the
+ * "clock-names" property. If it cannot be found, then index
+ * will be an error code and of_parse_phandle_with_args() will
+ * return -EINVAL.
+ */
+ if (name)
+ index = of_property_match_string(np, "clock-names", name);
+ ret = of_parse_phandle_with_args(np, "clocks", "#clock-cells",
+ index, out_args);
+ if (!ret)
+ break;
+ if (name && index >= 0)
+ break;
+
+ /*
+ * No matching clock found on this node. If the parent node
+ * has a "clock-ranges" property, then we can try one of its
+ * clocks.
+ */
+ np = np->parent;
+ if (np && !of_get_property(np, "clock-ranges", NULL))
+ break;
+ index = 0;
+ }
+
+ return ret;
+}
+
static struct clk_hw *
__of_clk_get_hw_from_provider(struct of_clk_provider *provider,
struct of_phandle_args *clkspec)
@@ -4021,36 +4129,26 @@ __of_clk_get_hw_from_provider(struct of_clk_provider *provider,
return __clk_get_hw(clk);
}
-struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
- const char *dev_id, const char *con_id)
+static struct clk_hw *
+of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
{
struct of_clk_provider *provider;
- struct clk *clk = ERR_PTR(-EPROBE_DEFER);
- struct clk_hw *hw;
+ struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER);
if (!clkspec)
return ERR_PTR(-EINVAL);
- /* Check if we have such a provider in our array */
mutex_lock(&of_clk_mutex);
list_for_each_entry(provider, &of_clk_providers, link) {
if (provider->node == clkspec->np) {
hw = __of_clk_get_hw_from_provider(provider, clkspec);
- clk = __clk_create_clk(hw, dev_id, con_id);
- }
-
- if (!IS_ERR(clk)) {
- if (!__clk_get(clk)) {
- __clk_free_clk(clk);
- clk = ERR_PTR(-ENOENT);
- }
-
- break;
+ if (!IS_ERR(hw))
+ break;
}
}
mutex_unlock(&of_clk_mutex);
- return clk;
+ return hw;
}
/**
@@ -4063,10 +4161,62 @@ struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
*/
struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
{
- return __of_clk_get_from_provider(clkspec, NULL, __func__);
+ struct clk_hw *hw = of_clk_get_hw_from_clkspec(clkspec);
+
+ return clk_hw_create_clk(NULL, hw, NULL, __func__);
}
EXPORT_SYMBOL_GPL(of_clk_get_from_provider);
+struct clk_hw *of_clk_get_hw(struct device_node *np, int index,
+ const char *con_id)
+{
+ int ret;
+ struct clk_hw *hw;
+ struct of_phandle_args clkspec;
+
+ ret = of_parse_clkspec(np, index, con_id, &clkspec);
+ if (ret)
+ return ERR_PTR(ret);
+
+ hw = of_clk_get_hw_from_clkspec(&clkspec);
+ of_node_put(clkspec.np);
+
+ return hw;
+}
+
+static struct clk *__of_clk_get(struct device_node *np,
+ int index, const char *dev_id,
+ const char *con_id)
+{
+ struct clk_hw *hw = of_clk_get_hw(np, index, con_id);
+
+ return clk_hw_create_clk(NULL, hw, dev_id, con_id);
+}
+
+struct clk *of_clk_get(struct device_node *np, int index)
+{
+ return __of_clk_get(np, index, np->full_name, NULL);
+}
+EXPORT_SYMBOL(of_clk_get);
+
+/**
+ * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node
+ * @np: pointer to clock consumer node
+ * @name: name of consumer's clock input, or NULL for the first clock reference
+ *
+ * This function parses the clocks and clock-names properties,
+ * and uses them to look up the struct clk from the registered list of clock
+ * providers.
+ */
+struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
+{
+ if (!np)
+ return ERR_PTR(-ENOENT);
+
+ return __of_clk_get(np, 0, np->full_name, name);
+}
+EXPORT_SYMBOL(of_clk_get_by_name);
+
/**
* of_clk_get_parent_count() - Count the number of clocks a device node has
* @np: device node to count
diff --git a/drivers/clk/clk.h b/drivers/clk/clk.h
index b02f5e604e69..553f531cc232 100644
--- a/drivers/clk/clk.h
+++ b/drivers/clk/clk.h
@@ -5,31 +5,36 @@
*/
struct clk_hw;
+struct device;
+struct of_phandle_args;
#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
-struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
- const char *dev_id, const char *con_id);
+struct clk_hw *of_clk_get_hw(struct device_node *np,
+ int index, const char *con_id);
+#else /* !CONFIG_COMMON_CLK || !CONFIG_OF */
+static inline struct clk_hw *of_clk_get_hw(struct device_node *np,
+ int index, const char *con_id)
+{
+ return ERR_PTR(-ENOENT);
+}
#endif
#ifdef CONFIG_COMMON_CLK
-struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
- const char *con_id);
-void __clk_free_clk(struct clk *clk);
-int __clk_get(struct clk *clk);
+struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw,
+ const char *dev_id, const char *con_id);
void __clk_put(struct clk *clk);
#else
/* All these casts to avoid ifdefs in clkdev... */
static inline struct clk *
-__clk_create_clk(struct clk_hw *hw, const char *dev_id, const char *con_id)
+clk_hw_create_clk(struct device *dev, struct clk_hw *hw, const char *dev_id,
+ const char *con_id)
{
return (struct clk *)hw;
}
-static inline void __clk_free_clk(struct clk *clk) { }
static struct clk_hw *__clk_get_hw(struct clk *clk)
{
return (struct clk_hw *)clk;
}
-static inline int __clk_get(struct clk *clk) { return 1; }
static inline void __clk_put(struct clk *clk) { }
#endif
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 9ab3db8b3988..6e787cc9e5b9 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -27,99 +27,6 @@
static LIST_HEAD(clocks);
static DEFINE_MUTEX(clocks_mutex);
-#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
-static struct clk *__of_clk_get(struct device_node *np, int index,
- const char *dev_id, const char *con_id)
-{
- struct of_phandle_args clkspec;
- struct clk *clk;
- int rc;
-
- rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
- &clkspec);
- if (rc)
- return ERR_PTR(rc);
-
- clk = __of_clk_get_from_provider(&clkspec, dev_id, con_id);
- of_node_put(clkspec.np);
-
- return clk;
-}
-
-struct clk *of_clk_get(struct device_node *np, int index)
-{
- return __of_clk_get(np, index, np->full_name, NULL);
-}
-EXPORT_SYMBOL(of_clk_get);
-
-static struct clk *__of_clk_get_by_name(struct device_node *np,
- const char *dev_id,
- const char *name)
-{
- struct clk *clk = ERR_PTR(-ENOENT);
-
- /* Walk up the tree of devices looking for a clock that matches */
- while (np) {
- int index = 0;
-
- /*
- * For named clocks, first look up the name in the
- * "clock-names" property. If it cannot be found, then
- * index will be an error code, and of_clk_get() will fail.
- */
- if (name)
- index = of_property_match_string(np, "clock-names", name);
- clk = __of_clk_get(np, index, dev_id, name);
- if (!IS_ERR(clk)) {
- break;
- } else if (name && index >= 0) {
- if (PTR_ERR(clk) != -EPROBE_DEFER)
- pr_err("ERROR: could not get clock %pOF:%s(%i)\n",
- np, name ? name : "", index);
- return clk;
- }
-
- /*
- * No matching clock found on this node. If the parent node
- * has a "clock-ranges" property, then we can try one of its
- * clocks.
- */
- np = np->parent;
- if (np && !of_get_property(np, "clock-ranges", NULL))
- break;
- }
-
- return clk;
-}
-
-/**
- * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node
- * @np: pointer to clock consumer node
- * @name: name of consumer's clock input, or NULL for the first clock reference
- *
- * This function parses the clocks and clock-names properties,
- * and uses them to look up the struct clk from the registered list of clock
- * providers.
- */
-struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
-{
- if (!np)
- return ERR_PTR(-ENOENT);
-
- return __of_clk_get_by_name(np, np->full_name, name);
-}
-EXPORT_SYMBOL(of_clk_get_by_name);
-
-#else /* defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) */
-
-static struct clk *__of_clk_get_by_name(struct device_node *np,
- const char *dev_id,
- const char *name)
-{
- return ERR_PTR(-ENOENT);
-}
-#endif
-
/*
* Find the correct struct clk for the device and connection ID.
* We do slightly fuzzy matching here:
@@ -139,6 +46,8 @@ static struct clk_lookup *clk_find(const char *dev_id, const char *con_id)
if (con_id)
best_possible += 1;
+ lockdep_assert_held(&clocks_mutex);
+
list_for_each_entry(p, &clocks, node) {
match = 0;
if (p->dev_id) {
@@ -163,7 +72,8 @@ static struct clk_lookup *clk_find(const char *dev_id, const char *con_id)
return cl;
}
-struct clk *clk_get_sys(const char *dev_id, const char *con_id)
+static struct clk *__clk_get_sys(struct device *dev, const char *dev_id,
+ const char *con_id)
{
struct clk_lookup *cl;
struct clk *clk = NULL;
@@ -174,35 +84,33 @@ struct clk *clk_get_sys(const char *dev_id, const char *con_id)
if (!cl)
goto out;
- clk = __clk_create_clk(cl->clk_hw, dev_id, con_id);
+ clk = clk_hw_create_clk(dev, cl->clk_hw, dev_id, con_id);
if (IS_ERR(clk))
- goto out;
-
- if (!__clk_get(clk)) {
- __clk_free_clk(clk);
cl = NULL;
- goto out;
- }
-
out:
mutex_unlock(&clocks_mutex);
return cl ? clk : ERR_PTR(-ENOENT);
}
+
+struct clk *clk_get_sys(const char *dev_id, const char *con_id)
+{
+ return __clk_get_sys(NULL, dev_id, con_id);
+}
EXPORT_SYMBOL(clk_get_sys);
struct clk *clk_get(struct device *dev, const char *con_id)
{
const char *dev_id = dev ? dev_name(dev) : NULL;
- struct clk *clk;
+ struct clk_hw *hw;
if (dev && dev->of_node) {
- clk = __of_clk_get_by_name(dev->of_node, dev_id, con_id);
- if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
- return clk;
+ hw = of_clk_get_hw(dev->of_node, 0, con_id);
+ if (!IS_ERR(hw) || PTR_ERR(hw) == -EPROBE_DEFER)
+ return clk_hw_create_clk(dev, hw, dev_id, con_id);
}
- return clk_get_sys(dev_id, con_id);
+ return __clk_get_sys(dev, dev_id, con_id);
}
EXPORT_SYMBOL(clk_get);
@@ -401,6 +309,23 @@ static struct clk_lookup *__clk_register_clkdev(struct clk_hw *hw,
return cl;
}
+static int do_clk_register_clkdev(struct clk_hw *hw,
+ struct clk_lookup **cl, const char *con_id, const char *dev_id)
+{
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ /*
+ * Since dev_id can be NULL, and NULL is handled specially, we must
+ * pass it as either a NULL format string, or with "%s".
+ */
+ if (dev_id)
+ *cl = __clk_register_clkdev(hw, con_id, "%s", dev_id);
+ else
+ *cl = __clk_register_clkdev(hw, con_id, NULL);
+
+ return *cl ? 0 : -ENOMEM;
+}
+
/**
* clk_register_clkdev - register one clock lookup for a struct clk
* @clk: struct clk to associate with all clk_lookups
@@ -423,17 +348,8 @@ int clk_register_clkdev(struct clk *clk, const char *con_id,
if (IS_ERR(clk))
return PTR_ERR(clk);
- /*
- * Since dev_id can be NULL, and NULL is handled specially, we must
- * pass it as either a NULL format string, or with "%s".
- */
- if (dev_id)
- cl = __clk_register_clkdev(__clk_get_hw(clk), con_id, "%s",
- dev_id);
- else
- cl = __clk_register_clkdev(__clk_get_hw(clk), con_id, NULL);
-
- return cl ? 0 : -ENOMEM;
+ return do_clk_register_clkdev(__clk_get_hw(clk), &cl, con_id,
+ dev_id);
}
EXPORT_SYMBOL(clk_register_clkdev);
@@ -456,18 +372,78 @@ int clk_hw_register_clkdev(struct clk_hw *hw, const char *con_id,
{
struct clk_lookup *cl;
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ return do_clk_register_clkdev(hw, &cl, con_id, dev_id);
+}
+EXPORT_SYMBOL(clk_hw_register_clkdev);
- /*
- * Since dev_id can be NULL, and NULL is handled specially, we must
- * pass it as either a NULL format string, or with "%s".
- */
- if (dev_id)
- cl = __clk_register_clkdev(hw, con_id, "%s", dev_id);
- else
- cl = __clk_register_clkdev(hw, con_id, NULL);
+static void devm_clkdev_release(struct device *dev, void *res)
+{
+ clkdev_drop(*(struct clk_lookup **)res);
+}
- return cl ? 0 : -ENOMEM;
+static int devm_clk_match_clkdev(struct device *dev, void *res, void *data)
+{
+ struct clk_lookup **l = res;
+
+ return *l == data;
}
-EXPORT_SYMBOL(clk_hw_register_clkdev);
+
+/**
+ * devm_clk_release_clkdev - Resource managed clkdev lookup release
+ * @dev: device this lookup is bound
+ * @con_id: connection ID string on device
+ * @dev_id: format string describing device name
+ *
+ * Drop the clkdev lookup created with devm_clk_hw_register_clkdev.
+ * Normally this function will not need to be called and the resource
+ * management code will ensure that the resource is freed.
+ */
+void devm_clk_release_clkdev(struct device *dev, const char *con_id,
+ const char *dev_id)
+{
+ struct clk_lookup *cl;
+ int rval;
+
+ mutex_lock(&clocks_mutex);
+ cl = clk_find(dev_id, con_id);
+ mutex_unlock(&clocks_mutex);
+
+ WARN_ON(!cl);
+ rval = devres_release(dev, devm_clkdev_release,
+ devm_clk_match_clkdev, cl);
+ WARN_ON(rval);
+}
+EXPORT_SYMBOL(devm_clk_release_clkdev);
+
+/**
+ * devm_clk_hw_register_clkdev - managed clk lookup registration for clk_hw
+ * @dev: device this lookup is bound
+ * @hw: struct clk_hw to associate with all clk_lookups
+ * @con_id: connection ID string on device
+ * @dev_id: format string describing device name
+ *
+ * con_id or dev_id may be NULL as a wildcard, just as in the rest of
+ * clkdev.
+ *
+ * To make things easier for mass registration, we detect error clk_hws
+ * from a previous clk_hw_register_*() call, and return the error code for
+ * those. This is to permit this function to be called immediately
+ * after clk_hw_register_*().
+ */
+int devm_clk_hw_register_clkdev(struct device *dev, struct clk_hw *hw,
+ const char *con_id, const char *dev_id)
+{
+ int rval = -ENOMEM;
+ struct clk_lookup **cl;
+
+ cl = devres_alloc(devm_clkdev_release, sizeof(*cl), GFP_KERNEL);
+ if (cl) {
+ rval = do_clk_register_clkdev(hw, cl, con_id, dev_id);
+ if (!rval)
+ devres_add(dev, cl);
+ else
+ devres_free(cl);
+ }
+ return rval;
+}
+EXPORT_SYMBOL(devm_clk_hw_register_clkdev);
diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig
index 4aae31a23449..0eaf41848280 100644
--- a/drivers/clk/imx/Kconfig
+++ b/drivers/clk/imx/Kconfig
@@ -8,6 +8,12 @@ config MXC_CLK_SCU
bool
depends on IMX_SCU
+config CLK_IMX8MM
+ bool "IMX8MM CCM Clock Driver"
+ depends on ARCH_MXC && ARM64
+ help
+ Build the driver for i.MX8MM CCM Clock Driver
+
config CLK_IMX8MQ
bool "IMX8MQ CCM Clock Driver"
depends on ARCH_MXC && ARM64
diff --git a/drivers/clk/imx/Makefile b/drivers/clk/imx/Makefile
index 73119fbfa547..0d5180fbe988 100644
--- a/drivers/clk/imx/Makefile
+++ b/drivers/clk/imx/Makefile
@@ -18,12 +18,14 @@ obj-$(CONFIG_MXC_CLK) += \
clk-pllv2.o \
clk-pllv3.o \
clk-pllv4.o \
- clk-sccg-pll.o
+ clk-sccg-pll.o \
+ clk-pll14xx.o
obj-$(CONFIG_MXC_CLK_SCU) += \
clk-scu.o \
clk-lpcg-scu.o
+obj-$(CONFIG_CLK_IMX8MM) += clk-imx8mm.o
obj-$(CONFIG_CLK_IMX8MQ) += clk-imx8mq.o
obj-$(CONFIG_CLK_IMX8QXP) += clk-imx8qxp.o clk-imx8qxp-lpcg.o
diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
index 527ade1d6933..574fac1a169f 100644
--- a/drivers/clk/imx/clk-composite-8m.c
+++ b/drivers/clk/imx/clk-composite-8m.c
@@ -123,7 +123,7 @@ static const struct clk_ops imx8m_clk_composite_divider_ops = {
};
struct clk *imx8m_clk_composite_flags(const char *name,
- const char **parent_names,
+ const char * const *parent_names,
int num_parents, void __iomem *reg,
unsigned long flags)
{
diff --git a/drivers/clk/imx/clk-imx51-imx53.c b/drivers/clk/imx/clk-imx51-imx53.c
index fc8e782d817b..e91c826bce70 100644
--- a/drivers/clk/imx/clk-imx51-imx53.c
+++ b/drivers/clk/imx/clk-imx51-imx53.c
@@ -428,6 +428,7 @@ static void __init mx51_clocks_init(struct device_node *np)
clk[IMX5_CLK_ESDHC4_PER_GATE] = imx_clk_gate2("esdhc4_per_gate", "esdhc_d_sel", MXC_CCM_CCGR3, 14);
clk[IMX5_CLK_USB_PHY_GATE] = imx_clk_gate2("usb_phy_gate", "usb_phy_sel", MXC_CCM_CCGR2, 0);
clk[IMX5_CLK_HSI2C_GATE] = imx_clk_gate2("hsi2c_gate", "ipg", MXC_CCM_CCGR1, 22);
+ clk[IMX5_CLK_SCC2_IPG_GATE] = imx_clk_gate2("scc2_gate", "ipg", MXC_CCM_CCGR1, 30);
clk[IMX5_CLK_MIPI_HSC1_GATE] = imx_clk_gate2_flags("mipi_hsc1_gate", "ipg", MXC_CCM_CCGR4, 6, CLK_IS_CRITICAL);
clk[IMX5_CLK_MIPI_HSC2_GATE] = imx_clk_gate2_flags("mipi_hsc2_gate", "ipg", MXC_CCM_CCGR4, 8, CLK_IS_CRITICAL);
clk[IMX5_CLK_MIPI_ESC_GATE] = imx_clk_gate2_flags("mipi_esc_gate", "ipg", MXC_CCM_CCGR4, 10, CLK_IS_CRITICAL);
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index 716eac3136b4..708e7c5590dd 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -471,6 +471,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-anatop");
anatop_base = base = of_iomap(np, 0);
WARN_ON(!base);
+ of_node_put(np);
/* Audio/video PLL post dividers do not work on i.MX6q revision 1.0 */
if (clk_on_imx6q() && imx_get_soc_revision() == IMX_CHIP_REVISION_1_0) {
diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
index 18527a335ace..91558b09bf9e 100644
--- a/drivers/clk/imx/clk-imx6sx.c
+++ b/drivers/clk/imx/clk-imx6sx.c
@@ -151,6 +151,7 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
np = of_find_compatible_node(NULL, NULL, "fsl,imx6sx-anatop");
base = of_iomap(np, 0);
WARN_ON(!base);
+ of_node_put(np);
clks[IMX6SX_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
clks[IMX6SX_PLL2_BYPASS_SRC] = imx_clk_mux("pll2_bypass_src", base + 0x30, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index 06c105d580a4..cfbd8d4edb85 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -404,6 +404,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
np = of_find_compatible_node(NULL, NULL, "fsl,imx7d-anatop");
base = of_iomap(np, 0);
WARN_ON(!base);
+ of_node_put(np);
clks[IMX7D_PLL_ARM_MAIN_SRC] = imx_clk_mux("pll_arm_main_src", base + 0x60, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel));
clks[IMX7D_PLL_DRAM_MAIN_SRC] = imx_clk_mux("pll_dram_main_src", base + 0x70, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel));
diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c
index 4e18f629f823..ce306631e844 100644
--- a/drivers/clk/imx/clk-imx7ulp.c
+++ b/drivers/clk/imx/clk-imx7ulp.c
@@ -48,8 +48,8 @@ static void __init imx7ulp_clk_scg1_init(struct device_node *np)
struct clk_hw **clks;
void __iomem *base;
- clk_data = kzalloc(sizeof(*clk_data) + sizeof(*clk_data->hws) *
- IMX7ULP_CLK_SCG1_END, GFP_KERNEL);
+ clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_SCG1_END),
+ GFP_KERNEL);
if (!clk_data)
return;
@@ -136,8 +136,8 @@ static void __init imx7ulp_clk_pcc2_init(struct device_node *np)
struct clk_hw **clks;
void __iomem *base;
- clk_data = kzalloc(sizeof(*clk_data) + sizeof(*clk_data->hws) *
- IMX7ULP_CLK_PCC2_END, GFP_KERNEL);
+ clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_PCC2_END),
+ GFP_KERNEL);
if (!clk_data)
return;
@@ -183,8 +183,8 @@ static void __init imx7ulp_clk_pcc3_init(struct device_node *np)
struct clk_hw **clks;
void __iomem *base;
- clk_data = kzalloc(sizeof(*clk_data) + sizeof(*clk_data->hws) *
- IMX7ULP_CLK_PCC3_END, GFP_KERNEL);
+ clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_PCC3_END),
+ GFP_KERNEL);
if (!clk_data)
return;
@@ -228,8 +228,8 @@ static void __init imx7ulp_clk_smc1_init(struct device_node *np)
struct clk_hw **clks;
void __iomem *base;
- clk_data = kzalloc(sizeof(*clk_data) + sizeof(*clk_data->hws) *
- IMX7ULP_CLK_SMC1_END, GFP_KERNEL);
+ clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_SMC1_END),
+ GFP_KERNEL);
if (!clk_data)
return;
diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
new file mode 100644
index 000000000000..1ef8438e3d6d
--- /dev/null
+++ b/drivers/clk/imx/clk-imx8mm.c
@@ -0,0 +1,675 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2017-2018 NXP.
+ */
+
+#include <dt-bindings/clock/imx8mm-clock.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include "clk.h"
+
+static u32 share_count_sai1;
+static u32 share_count_sai2;
+static u32 share_count_sai3;
+static u32 share_count_sai4;
+static u32 share_count_sai5;
+static u32 share_count_sai6;
+static u32 share_count_dcss;
+static u32 share_count_pdm;
+static u32 share_count_nand;
+
+#define PLL_1416X_RATE(_rate, _m, _p, _s) \
+ { \
+ .rate = (_rate), \
+ .mdiv = (_m), \
+ .pdiv = (_p), \
+ .sdiv = (_s), \
+ }
+
+#define PLL_1443X_RATE(_rate, _m, _p, _s, _k) \
+ { \
+ .rate = (_rate), \
+ .mdiv = (_m), \
+ .pdiv = (_p), \
+ .sdiv = (_s), \
+ .kdiv = (_k), \
+ }
+
+static const struct imx_pll14xx_rate_table imx8mm_pll1416x_tbl[] = {
+ PLL_1416X_RATE(1800000000U, 225, 3, 0),
+ PLL_1416X_RATE(1600000000U, 200, 3, 0),
+ PLL_1416X_RATE(1200000000U, 300, 3, 1),
+ PLL_1416X_RATE(1000000000U, 250, 3, 1),
+ PLL_1416X_RATE(800000000U, 200, 3, 1),
+ PLL_1416X_RATE(750000000U, 250, 2, 2),
+ PLL_1416X_RATE(700000000U, 350, 3, 2),
+ PLL_1416X_RATE(600000000U, 300, 3, 2),
+};
+
+static const struct imx_pll14xx_rate_table imx8mm_audiopll_tbl[] = {
+ PLL_1443X_RATE(786432000U, 655, 5, 2, 23593),
+ PLL_1443X_RATE(722534400U, 301, 5, 1, 3670),
+};
+
+static const struct imx_pll14xx_rate_table imx8mm_videopll_tbl[] = {
+ PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
+ PLL_1443X_RATE(594000000U, 198, 2, 2, 0),
+};
+
+static const struct imx_pll14xx_rate_table imx8mm_drampll_tbl[] = {
+ PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
+};
+
+static struct imx_pll14xx_clk imx8mm_audio_pll __initdata = {
+ .type = PLL_1443X,
+ .rate_table = imx8mm_audiopll_tbl,
+ .rate_count = ARRAY_SIZE(imx8mm_audiopll_tbl),
+};
+
+static struct imx_pll14xx_clk imx8mm_video_pll __initdata = {
+ .type = PLL_1443X,
+ .rate_table = imx8mm_videopll_tbl,
+ .rate_count = ARRAY_SIZE(imx8mm_videopll_tbl),
+};
+
+static struct imx_pll14xx_clk imx8mm_dram_pll __initdata = {
+ .type = PLL_1443X,
+ .rate_table = imx8mm_drampll_tbl,
+ .rate_count = ARRAY_SIZE(imx8mm_drampll_tbl),
+};
+
+static struct imx_pll14xx_clk imx8mm_arm_pll __initdata = {
+ .type = PLL_1416X,
+ .rate_table = imx8mm_pll1416x_tbl,
+ .rate_count = ARRAY_SIZE(imx8mm_pll1416x_tbl),
+};
+
+static struct imx_pll14xx_clk imx8mm_gpu_pll __initdata = {
+ .type = PLL_1416X,
+ .rate_table = imx8mm_pll1416x_tbl,
+ .rate_count = ARRAY_SIZE(imx8mm_pll1416x_tbl),
+};
+
+static struct imx_pll14xx_clk imx8mm_vpu_pll __initdata = {
+ .type = PLL_1416X,
+ .rate_table = imx8mm_pll1416x_tbl,
+ .rate_count = ARRAY_SIZE(imx8mm_pll1416x_tbl),
+};
+
+static struct imx_pll14xx_clk imx8mm_sys_pll __initdata = {
+ .type = PLL_1416X,
+ .rate_table = imx8mm_pll1416x_tbl,
+ .rate_count = ARRAY_SIZE(imx8mm_pll1416x_tbl),
+};
+
+static const char *pll_ref_sels[] = { "osc_24m", "dummy", "dummy", "dummy", };
+static const char *audio_pll1_bypass_sels[] = {"audio_pll1", "audio_pll1_ref_sel", };
+static const char *audio_pll2_bypass_sels[] = {"audio_pll2", "audio_pll2_ref_sel", };
+static const char *video_pll1_bypass_sels[] = {"video_pll1", "video_pll1_ref_sel", };
+static const char *dram_pll_bypass_sels[] = {"dram_pll", "dram_pll_ref_sel", };
+static const char *gpu_pll_bypass_sels[] = {"gpu_pll", "gpu_pll_ref_sel", };
+static const char *vpu_pll_bypass_sels[] = {"vpu_pll", "vpu_pll_ref_sel", };
+static const char *arm_pll_bypass_sels[] = {"arm_pll", "arm_pll_ref_sel", };
+static const char *sys_pll1_bypass_sels[] = {"sys_pll1", "sys_pll1_ref_sel", };
+static const char *sys_pll2_bypass_sels[] = {"sys_pll2", "sys_pll2_ref_sel", };
+static const char *sys_pll3_bypass_sels[] = {"sys_pll3", "sys_pll3_ref_sel", };
+
+/* CCM ROOT */
+static const char *imx8mm_a53_sels[] = {"osc_24m", "arm_pll_out", "sys_pll2_500m", "sys_pll2_1000m",
+ "sys_pll1_800m", "sys_pll1_400m", "audio_pll1_out", "sys_pll3_out", };
+
+static const char *imx8mm_m4_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_250m", "sys_pll1_266m",
+ "sys_pll1_800m", "audio_pll1_out", "video_pll1_out", "sys_pll3_out", };
+
+static const char *imx8mm_vpu_sels[] = {"osc_24m", "arm_pll_out", "sys_pll2_500m", "sys_pll2_1000m",
+ "sys_pll1_800m", "sys_pll1_400m", "audio_pll1_out", "vpu_pll_out", };
+
+static const char *imx8mm_gpu3d_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m", "sys_pll3_out",
+ "sys_pll2_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
+
+static const char *imx8mm_gpu2d_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m", "sys_pll3_out",
+ "sys_pll2_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
+
+static const char *imx8mm_main_axi_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll1_800m", "sys_pll2_250m",
+ "sys_pll2_1000m", "audio_pll1_out", "video_pll1_out", "sys_pll1_100m",};
+
+static const char *imx8mm_enet_axi_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll1_800m", "sys_pll2_250m",
+ "sys_pll2_200m", "audio_pll1_out", "video_pll1_out", "sys_pll3_out", };
+
+static const char *imx8mm_nand_usdhc_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll1_800m", "sys_pll2_200m",
+ "sys_pll1_133m", "sys_pll3_out", "sys_pll2_250m", "audio_pll1_out", };
+
+static const char *imx8mm_vpu_bus_sels[] = {"osc_24m", "sys_pll1_800m", "vpu_pll_out", "audio_pll2_out",
+ "sys_pll3_out", "sys_pll2_1000m", "sys_pll2_200m", "sys_pll1_100m", };
+
+static const char *imx8mm_disp_axi_sels[] = {"osc_24m", "sys_pll2_1000m", "sys_pll1_800m", "sys_pll3_out",
+ "sys_pll1_40m", "audio_pll2_out", "clk_ext1", "clk_ext4", };
+
+static const char *imx8mm_disp_apb_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll1_800m", "sys_pll3_out",
+ "sys_pll1_40m", "audio_pll2_out", "clk_ext1", "clk_ext3", };
+
+static const char *imx8mm_disp_rtrm_sels[] = {"osc_24m", "sys_pll1_800m", "sys_pll2_200m", "sys_pll2_1000m",
+ "audio_pll1_out", "video_pll1_out", "clk_ext2", "clk_ext3", };
+
+static const char *imx8mm_usb_bus_sels[] = {"osc_24m", "sys_pll2_500m", "sys_pll1_800m", "sys_pll2_100m",
+ "sys_pll2_200m", "clk_ext2", "clk_ext4", "audio_pll2_out", };
+
+static const char *imx8mm_gpu_axi_sels[] = {"osc_24m", "sys_pll1_800m", "gpu_pll_out", "sys_pll3_out", "sys_pll2_1000m",
+ "audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
+
+static const char *imx8mm_gpu_ahb_sels[] = {"osc_24m", "sys_pll1_800m", "gpu_pll_out", "sys_pll3_out", "sys_pll2_1000m",
+ "audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
+
+static const char *imx8mm_noc_sels[] = {"osc_24m", "sys_pll1_800m", "sys_pll3_out", "sys_pll2_1000m", "sys_pll2_500m",
+ "audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
+
+static const char *imx8mm_noc_apb_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll3_out", "sys_pll2_333m", "sys_pll2_200m",
+ "sys_pll1_800m", "audio_pll1_out", "video_pll1_out", };
+
+static const char *imx8mm_ahb_sels[] = {"osc_24m", "sys_pll1_133m", "sys_pll1_800m", "sys_pll1_400m",
+ "sys_pll2_125m", "sys_pll3_out", "audio_pll1_out", "video_pll1_out", };
+
+static const char *imx8mm_audio_ahb_sels[] = {"osc_24m", "sys_pll2_500m", "sys_pll1_800m", "sys_pll2_1000m",
+ "sys_pll2_166m", "sys_pll3_out", "audio_pll1_out", "video_pll1_out", };
+
+static const char *imx8mm_dram_alt_sels[] = {"osc_24m", "sys_pll1_800m", "sys_pll1_100m", "sys_pll2_500m",
+ "sys_pll2_1000m", "sys_pll3_out", "audio_pll1_out", "sys_pll1_266m", };
+
+static const char *imx8mm_dram_apb_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m", "sys_pll1_160m",
+ "sys_pll1_800m", "sys_pll3_out", "sys_pll2_250m", "audio_pll2_out", };
+
+static const char *imx8mm_vpu_g1_sels[] = {"osc_24m", "vpu_pll_out", "sys_pll1_800m", "sys_pll2_1000m",
+ "sys_pll1_100m", "sys_pll2_125m", "sys_pll3_out", "audio_pll1_out", };
+
+static const char *imx8mm_vpu_g2_sels[] = {"osc_24m", "vpu_pll_out", "sys_pll1_800m", "sys_pll2_1000m",
+ "sys_pll1_100m", "sys_pll2_125m", "sys_pll3_out", "audio_pll1_out", };
+
+static const char *imx8mm_disp_dtrc_sels[] = {"osc_24m", "video_pll2_out", "sys_pll1_800m", "sys_pll2_1000m",
+ "sys_pll1_160m", "video_pll1_out", "sys_pll3_out", "audio_pll2_out", };
+
+static const char *imx8mm_disp_dc8000_sels[] = {"osc_24m", "video_pll2_out", "sys_pll1_800m", "sys_pll2_1000m",
+ "sys_pll1_160m", "video_pll1_out", "sys_pll3_out", "audio_pll2_out", };
+
+static const char *imx8mm_pcie1_ctrl_sels[] = {"osc_24m", "sys_pll2_250m", "sys_pll2_200m", "sys_pll1_266m",
+ "sys_pll1_800m", "sys_pll2_500m", "sys_pll2_333m", "sys_pll3_out", };
+
+static const char *imx8mm_pcie1_phy_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll2_500m", "clk_ext1", "clk_ext2",
+ "clk_ext3", "clk_ext4", "sys_pll1_400m", };
+
+static const char *imx8mm_pcie1_aux_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_50m", "sys_pll3_out",
+ "sys_pll2_100m", "sys_pll1_80m", "sys_pll1_160m", "sys_pll1_200m", };
+
+static const char *imx8mm_dc_pixel_sels[] = {"osc_24m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out",
+ "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out", "clk_ext4", };
+
+static const char *imx8mm_lcdif_pixel_sels[] = {"osc_24m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out",
+ "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out", "clk_ext4", };
+
+static const char *imx8mm_sai1_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out",
+ "sys_pll1_133m", "osc_hdmi", "clk_ext1", "clk_ext2", };
+
+static const char *imx8mm_sai2_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out",
+ "sys_pll1_133m", "osc_hdmi", "clk_ext2", "clk_ext3", };
+
+static const char *imx8mm_sai3_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out",
+ "sys_pll1_133m", "osc_hdmi", "clk_ext3", "clk_ext4", };
+
+static const char *imx8mm_sai4_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out",
+ "sys_pll1_133m", "osc_hdmi", "clk_ext1", "clk_ext2", };
+
+static const char *imx8mm_sai5_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out",
+ "sys_pll1_133m", "osc_hdmi", "clk_ext2", "clk_ext3", };
+
+static const char *imx8mm_sai6_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out",
+ "sys_pll1_133m", "osc_hdmi", "clk_ext3", "clk_ext4", };
+
+static const char *imx8mm_spdif1_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out",
+ "sys_pll1_133m", "osc_hdmi", "clk_ext2", "clk_ext3", };
+
+static const char *imx8mm_spdif2_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out",
+ "sys_pll1_133m", "osc_hdmi", "clk_ext3", "clk_ext4", };
+
+static const char *imx8mm_enet_ref_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll2_50m", "sys_pll2_100m",
+ "sys_pll1_160m", "audio_pll1_out", "video_pll1_out", "clk_ext4", };
+
+static const char *imx8mm_enet_timer_sels[] = {"osc_24m", "sys_pll2_100m", "audio_pll1_out", "clk_ext1", "clk_ext2",
+ "clk_ext3", "clk_ext4", "video_pll1_out", };
+
+static const char *imx8mm_enet_phy_sels[] = {"osc_24m", "sys_pll2_50m", "sys_pll2_125m", "sys_pll2_200m",
+ "sys_pll2_500m", "video_pll1_out", "audio_pll2_out", };
+
+static const char *imx8mm_nand_sels[] = {"osc_24m", "sys_pll2_500m", "audio_pll1_out", "sys_pll1_400m",
+ "audio_pll2_out", "sys_pll3_out", "sys_pll2_250m", "video_pll1_out", };
+
+static const char *imx8mm_qspi_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m", "sys_pll2_500m",
+ "audio_pll2_out", "sys_pll1_266m", "sys_pll3_out", "sys_pll1_100m", };
+
+static const char *imx8mm_usdhc1_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m", "sys_pll2_500m",
+ "sys_pll3_out", "sys_pll1_266m", "audio_pll2_out", "sys_pll1_100m", };
+
+static const char *imx8mm_usdhc2_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m", "sys_pll2_500m",
+ "sys_pll3_out", "sys_pll1_266m", "audio_pll2_out", "sys_pll1_100m", };
+
+static const char *imx8mm_i2c1_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m", "sys_pll3_out", "audio_pll1_out",
+ "video_pll1_out", "audio_pll2_out", "sys_pll1_133m", };
+
+static const char *imx8mm_i2c2_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m", "sys_pll3_out", "audio_pll1_out",
+ "video_pll1_out", "audio_pll2_out", "sys_pll1_133m", };
+
+static const char *imx8mm_i2c3_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m", "sys_pll3_out", "audio_pll1_out",
+ "video_pll1_out", "audio_pll2_out", "sys_pll1_133m", };
+
+static const char *imx8mm_i2c4_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m", "sys_pll3_out", "audio_pll1_out",
+ "video_pll1_out", "audio_pll2_out", "sys_pll1_133m", };
+
+static const char *imx8mm_uart1_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m", "sys_pll2_100m",
+ "sys_pll3_out", "clk_ext2", "clk_ext4", "audio_pll2_out", };
+
+static const char *imx8mm_uart2_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m", "sys_pll2_100m",
+ "sys_pll3_out", "clk_ext2", "clk_ext3", "audio_pll2_out", };
+
+static const char *imx8mm_uart3_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m", "sys_pll2_100m",
+ "sys_pll3_out", "clk_ext2", "clk_ext4", "audio_pll2_out", };
+
+static const char *imx8mm_uart4_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m", "sys_pll2_100m",
+ "sys_pll3_out", "clk_ext2", "clk_ext3", "audio_pll2_out", };
+
+static const char *imx8mm_usb_core_sels[] = {"osc_24m", "sys_pll1_100m", "sys_pll1_40m", "sys_pll2_100m",
+ "sys_pll2_200m", "clk_ext2", "clk_ext3", "audio_pll2_out", };
+
+static const char *imx8mm_usb_phy_sels[] = {"osc_24m", "sys_pll1_100m", "sys_pll1_40m", "sys_pll2_100m",
+ "sys_pll2_200m", "clk_ext2", "clk_ext3", "audio_pll2_out", };
+
+static const char *imx8mm_ecspi1_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m", "sys_pll1_160m",
+ "sys_pll1_800m", "sys_pll3_out", "sys_pll2_250m", "audio_pll2_out", };
+
+static const char *imx8mm_ecspi2_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m", "sys_pll1_160m",
+ "sys_pll1_800m", "sys_pll3_out", "sys_pll2_250m", "audio_pll2_out", };
+
+static const char *imx8mm_pwm1_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m", "sys_pll1_40m",
+ "sys_pll3_out", "clk_ext1", "sys_pll1_80m", "video_pll1_out", };
+
+static const char *imx8mm_pwm2_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m", "sys_pll1_40m",
+ "sys_pll3_out", "clk_ext1", "sys_pll1_80m", "video_pll1_out", };
+
+static const char *imx8mm_pwm3_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m", "sys_pll1_40m",
+ "sys3_pll2_out", "clk_ext2", "sys_pll1_80m", "video_pll1_out", };
+
+static const char *imx8mm_pwm4_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m", "sys_pll1_40m",
+ "sys_pll3_out", "clk_ext2", "sys_pll1_80m", "video_pll1_out", };
+
+static const char *imx8mm_gpt1_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m", "sys_pll1_40m",
+ "video_pll1_out", "sys_pll1_800m", "audio_pll1_out", "clk_ext1" };
+
+static const char *imx8mm_wdog_sels[] = {"osc_24m", "sys_pll1_133m", "sys_pll1_160m", "vpu_pll_out",
+ "sys_pll2_125m", "sys_pll3_out", "sys_pll1_80m", "sys_pll2_166m", };
+
+static const char *imx8mm_wrclk_sels[] = {"osc_24m", "sys_pll1_40m", "vpu_pll_out", "sys_pll3_out", "sys_pll2_200m",
+ "sys_pll1_266m", "sys_pll2_500m", "sys_pll1_100m", };
+
+static const char *imx8mm_dsi_core_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m", "sys_pll1_800m",
+ "sys_pll2_1000m", "sys_pll3_out", "audio_pll2_out", "video_pll1_out", };
+
+static const char *imx8mm_dsi_phy_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll2_100m", "sys_pll1_800m",
+ "sys_pll2_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", };
+
+static const char *imx8mm_dsi_dbi_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_100m", "sys_pll1_800m",
+ "sys_pll2_1000m", "sys_pll3_out", "audio_pll2_out", "video_pll1_out", };
+
+static const char *imx8mm_usdhc3_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m", "sys_pll2_500m",
+ "sys_pll3_out", "sys_pll1_266m", "audio_pll2_clk", "sys_pll1_100m", };
+
+static const char *imx8mm_csi1_core_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m", "sys_pll1_800m",
+ "sys_pll2_1000m", "sys_pll3_out", "audio_pll2_out", "video_pll1_out", };
+
+static const char *imx8mm_csi1_phy_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll2_100m", "sys_pll1_800m",
+ "sys_pll2_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", };
+
+static const char *imx8mm_csi1_esc_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_80m", "sys_pll1_800m",
+ "sys_pll2_1000m", "sys_pll3_out", "clk_ext3", "audio_pll2_out", };
+
+static const char *imx8mm_csi2_core_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m", "sys_pll1_800m",
+ "sys_pll2_1000m", "sys_pll3_out", "audio_pll2_out", "video_pll1_out", };
+
+static const char *imx8mm_csi2_phy_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll2_100m", "sys_pll1_800m",
+ "sys_pll2_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", };
+
+static const char *imx8mm_csi2_esc_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_80m", "sys_pll1_800m",
+ "sys_pll2_1000m", "sys_pll3_out", "clk_ext3", "audio_pll2_out", };
+
+static const char *imx8mm_pcie2_ctrl_sels[] = {"osc_24m", "sys_pll2_250m", "sys_pll2_200m", "sys_pll1_266m",
+ "sys_pll1_800m", "sys_pll2_500m", "sys_pll2_333m", "sys_pll3_out", };
+
+static const char *imx8mm_pcie2_phy_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll2_500m", "clk_ext1",
+ "clk_ext2", "clk_ext3", "clk_ext4", "sys_pll1_400m", };
+
+static const char *imx8mm_pcie2_aux_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_50m", "sys_pll3_out",
+ "sys_pll2_100m", "sys_pll1_80m", "sys_pll1_160m", "sys_pll1_200m", };
+
+static const char *imx8mm_ecspi3_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m", "sys_pll1_160m",
+ "sys_pll1_800m", "sys_pll3_out", "sys_pll2_250m", "audio_pll2_out", };
+
+static const char *imx8mm_pdm_sels[] = {"osc_24m", "sys_pll2_100m", "audio_pll1_out", "sys_pll1_800m",
+ "sys_pll2_1000m", "sys_pll3_out", "clk_ext3", "audio_pll2_out", };
+
+static const char *imx8mm_vpu_h1_sels[] = {"osc_24m", "vpu_pll_out", "sys_pll1_800m", "sys_pll2_1000m",
+ "audio_pll2_clk", "sys_pll2_125m", "sys_pll3_clk", "audio_pll1_out", };
+
+static const char *imx8mm_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", };
+
+static const char *imx8mm_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "osc_27m", "sys_pll1_200m", "audio_pll2_clk",
+ "vpu_pll", "sys_pll1_80m", };
+
+static struct clk *clks[IMX8MM_CLK_END];
+static struct clk_onecell_data clk_data;
+
+static struct clk ** const uart_clks[] __initconst = {
+ &clks[IMX8MM_CLK_UART1_ROOT],
+ &clks[IMX8MM_CLK_UART2_ROOT],
+ &clks[IMX8MM_CLK_UART3_ROOT],
+ &clks[IMX8MM_CLK_UART4_ROOT],
+ NULL
+};
+
+static int __init imx8mm_clocks_init(struct device_node *ccm_node)
+{
+ struct device_node *np;
+ void __iomem *base;
+ int ret;
+
+ clks[IMX8MM_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
+ clks[IMX8MM_CLK_24M] = of_clk_get_by_name(ccm_node, "osc_24m");
+ clks[IMX8MM_CLK_32K] = of_clk_get_by_name(ccm_node, "osc_32k");
+ clks[IMX8MM_CLK_EXT1] = of_clk_get_by_name(ccm_node, "clk_ext1");
+ clks[IMX8MM_CLK_EXT2] = of_clk_get_by_name(ccm_node, "clk_ext2");
+ clks[IMX8MM_CLK_EXT3] = of_clk_get_by_name(ccm_node, "clk_ext3");
+ clks[IMX8MM_CLK_EXT4] = of_clk_get_by_name(ccm_node, "clk_ext4");
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-anatop");
+ base = of_iomap(np, 0);
+ if (WARN_ON(!base))
+ return -ENOMEM;
+
+ clks[IMX8MM_AUDIO_PLL1_REF_SEL] = imx_clk_mux("audio_pll1_ref_sel", base + 0x0, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MM_AUDIO_PLL2_REF_SEL] = imx_clk_mux("audio_pll2_ref_sel", base + 0x14, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MM_VIDEO_PLL1_REF_SEL] = imx_clk_mux("video_pll1_ref_sel", base + 0x28, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MM_DRAM_PLL_REF_SEL] = imx_clk_mux("dram_pll_ref_sel", base + 0x50, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MM_GPU_PLL_REF_SEL] = imx_clk_mux("gpu_pll_ref_sel", base + 0x64, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MM_VPU_PLL_REF_SEL] = imx_clk_mux("vpu_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MM_ARM_PLL_REF_SEL] = imx_clk_mux("arm_pll_ref_sel", base + 0x84, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MM_SYS_PLL1_REF_SEL] = imx_clk_mux("sys_pll1_ref_sel", base + 0x94, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MM_SYS_PLL2_REF_SEL] = imx_clk_mux("sys_pll2_ref_sel", base + 0x104, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MM_SYS_PLL3_REF_SEL] = imx_clk_mux("sys_pll3_ref_sel", base + 0x114, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+
+ clks[IMX8MM_AUDIO_PLL1] = imx_clk_pll14xx("audio_pll1", "audio_pll1_ref_sel", base, &imx8mm_audio_pll);
+ clks[IMX8MM_AUDIO_PLL2] = imx_clk_pll14xx("audio_pll2", "audio_pll2_ref_sel", base + 0x14, &imx8mm_audio_pll);
+ clks[IMX8MM_VIDEO_PLL1] = imx_clk_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx8mm_video_pll);
+ clks[IMX8MM_DRAM_PLL] = imx_clk_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx8mm_dram_pll);
+ clks[IMX8MM_GPU_PLL] = imx_clk_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx8mm_gpu_pll);
+ clks[IMX8MM_VPU_PLL] = imx_clk_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx8mm_vpu_pll);
+ clks[IMX8MM_ARM_PLL] = imx_clk_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx8mm_arm_pll);
+ clks[IMX8MM_SYS_PLL1] = imx_clk_pll14xx("sys_pll1", "sys_pll1_ref_sel", base + 0x94, &imx8mm_sys_pll);
+ clks[IMX8MM_SYS_PLL2] = imx_clk_pll14xx("sys_pll2", "sys_pll2_ref_sel", base + 0x104, &imx8mm_sys_pll);
+ clks[IMX8MM_SYS_PLL3] = imx_clk_pll14xx("sys_pll3", "sys_pll3_ref_sel", base + 0x114, &imx8mm_sys_pll);
+
+ /* PLL bypass out */
+ clks[IMX8MM_AUDIO_PLL1_BYPASS] = imx_clk_mux_flags("audio_pll1_bypass", base, 4, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX8MM_AUDIO_PLL2_BYPASS] = imx_clk_mux_flags("audio_pll2_bypass", base + 0x14, 4, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX8MM_VIDEO_PLL1_BYPASS] = imx_clk_mux_flags("video_pll1_bypass", base + 0x28, 4, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX8MM_DRAM_PLL_BYPASS] = imx_clk_mux_flags("dram_pll_bypass", base + 0x50, 4, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX8MM_GPU_PLL_BYPASS] = imx_clk_mux_flags("gpu_pll_bypass", base + 0x64, 4, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX8MM_VPU_PLL_BYPASS] = imx_clk_mux_flags("vpu_pll_bypass", base + 0x74, 4, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX8MM_ARM_PLL_BYPASS] = imx_clk_mux_flags("arm_pll_bypass", base + 0x84, 4, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX8MM_SYS_PLL1_BYPASS] = imx_clk_mux_flags("sys_pll1_bypass", base + 0x94, 4, 1, sys_pll1_bypass_sels, ARRAY_SIZE(sys_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX8MM_SYS_PLL2_BYPASS] = imx_clk_mux_flags("sys_pll2_bypass", base + 0x104, 4, 1, sys_pll2_bypass_sels, ARRAY_SIZE(sys_pll2_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX8MM_SYS_PLL3_BYPASS] = imx_clk_mux_flags("sys_pll3_bypass", base + 0x114, 4, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT);
+
+ /* unbypass all the plls */
+ clk_set_parent(clks[IMX8MM_AUDIO_PLL1_BYPASS], clks[IMX8MM_AUDIO_PLL1]);
+ clk_set_parent(clks[IMX8MM_AUDIO_PLL2_BYPASS], clks[IMX8MM_AUDIO_PLL2]);
+ clk_set_parent(clks[IMX8MM_VIDEO_PLL1_BYPASS], clks[IMX8MM_VIDEO_PLL1]);
+ clk_set_parent(clks[IMX8MM_DRAM_PLL_BYPASS], clks[IMX8MM_DRAM_PLL]);
+ clk_set_parent(clks[IMX8MM_GPU_PLL_BYPASS], clks[IMX8MM_GPU_PLL]);
+ clk_set_parent(clks[IMX8MM_VPU_PLL_BYPASS], clks[IMX8MM_VPU_PLL]);
+ clk_set_parent(clks[IMX8MM_ARM_PLL_BYPASS], clks[IMX8MM_ARM_PLL]);
+ clk_set_parent(clks[IMX8MM_SYS_PLL1_BYPASS], clks[IMX8MM_SYS_PLL1]);
+ clk_set_parent(clks[IMX8MM_SYS_PLL2_BYPASS], clks[IMX8MM_SYS_PLL2]);
+ clk_set_parent(clks[IMX8MM_SYS_PLL3_BYPASS], clks[IMX8MM_SYS_PLL3]);
+
+ /* PLL out gate */
+ clks[IMX8MM_AUDIO_PLL1_OUT] = imx_clk_gate("audio_pll1_out", "audio_pll1_bypass", base, 13);
+ clks[IMX8MM_AUDIO_PLL2_OUT] = imx_clk_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x14, 13);
+ clks[IMX8MM_VIDEO_PLL1_OUT] = imx_clk_gate("video_pll1_out", "video_pll1_bypass", base + 0x28, 13);
+ clks[IMX8MM_DRAM_PLL_OUT] = imx_clk_gate("dram_pll_out", "dram_pll_bypass", base + 0x50, 13);
+ clks[IMX8MM_GPU_PLL_OUT] = imx_clk_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x64, 13);
+ clks[IMX8MM_VPU_PLL_OUT] = imx_clk_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x74, 13);
+ clks[IMX8MM_ARM_PLL_OUT] = imx_clk_gate("arm_pll_out", "arm_pll_bypass", base + 0x84, 13);
+ clks[IMX8MM_SYS_PLL1_OUT] = imx_clk_gate("sys_pll1_out", "sys_pll1_bypass", base + 0x94, 13);
+ clks[IMX8MM_SYS_PLL2_OUT] = imx_clk_gate("sys_pll2_out", "sys_pll2_bypass", base + 0x104, 13);
+ clks[IMX8MM_SYS_PLL3_OUT] = imx_clk_gate("sys_pll3_out", "sys_pll3_bypass", base + 0x114, 13);
+
+ /* SYS PLL fixed output */
+ clks[IMX8MM_SYS_PLL1_40M] = imx_clk_fixed_factor("sys_pll1_40m", "sys_pll1_out", 1, 20);
+ clks[IMX8MM_SYS_PLL1_80M] = imx_clk_fixed_factor("sys_pll1_80m", "sys_pll1_out", 1, 10);
+ clks[IMX8MM_SYS_PLL1_100M] = imx_clk_fixed_factor("sys_pll1_100m", "sys_pll1_out", 1, 8);
+ clks[IMX8MM_SYS_PLL1_133M] = imx_clk_fixed_factor("sys_pll1_133m", "sys_pll1_out", 1, 6);
+ clks[IMX8MM_SYS_PLL1_160M] = imx_clk_fixed_factor("sys_pll1_160m", "sys_pll1_out", 1, 5);
+ clks[IMX8MM_SYS_PLL1_200M] = imx_clk_fixed_factor("sys_pll1_200m", "sys_pll1_out", 1, 4);
+ clks[IMX8MM_SYS_PLL1_266M] = imx_clk_fixed_factor("sys_pll1_266m", "sys_pll1_out", 1, 3);
+ clks[IMX8MM_SYS_PLL1_400M] = imx_clk_fixed_factor("sys_pll1_400m", "sys_pll1_out", 1, 2);
+ clks[IMX8MM_SYS_PLL1_800M] = imx_clk_fixed_factor("sys_pll1_800m", "sys_pll1_out", 1, 1);
+
+ clks[IMX8MM_SYS_PLL2_50M] = imx_clk_fixed_factor("sys_pll2_50m", "sys_pll2_out", 1, 20);
+ clks[IMX8MM_SYS_PLL2_100M] = imx_clk_fixed_factor("sys_pll2_100m", "sys_pll2_out", 1, 10);
+ clks[IMX8MM_SYS_PLL2_125M] = imx_clk_fixed_factor("sys_pll2_125m", "sys_pll2_out", 1, 8);
+ clks[IMX8MM_SYS_PLL2_166M] = imx_clk_fixed_factor("sys_pll2_166m", "sys_pll2_out", 1, 6);
+ clks[IMX8MM_SYS_PLL2_200M] = imx_clk_fixed_factor("sys_pll2_200m", "sys_pll2_out", 1, 5);
+ clks[IMX8MM_SYS_PLL2_250M] = imx_clk_fixed_factor("sys_pll2_250m", "sys_pll2_out", 1, 4);
+ clks[IMX8MM_SYS_PLL2_333M] = imx_clk_fixed_factor("sys_pll2_333m", "sys_pll2_out", 1, 3);
+ clks[IMX8MM_SYS_PLL2_500M] = imx_clk_fixed_factor("sys_pll2_500m", "sys_pll2_out", 1, 2);
+ clks[IMX8MM_SYS_PLL2_1000M] = imx_clk_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
+
+ np = ccm_node;
+ base = of_iomap(np, 0);
+ if (WARN_ON(!base))
+ return -ENOMEM;
+
+ /* Core Slice */
+ clks[IMX8MM_CLK_A53_SRC] = imx_clk_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mm_a53_sels, ARRAY_SIZE(imx8mm_a53_sels));
+ clks[IMX8MM_CLK_M4_SRC] = imx_clk_mux2("arm_m4_src", base + 0x8080, 24, 3, imx8mm_m4_sels, ARRAY_SIZE(imx8mm_m4_sels));
+ clks[IMX8MM_CLK_VPU_SRC] = imx_clk_mux2("vpu_src", base + 0x8100, 24, 3, imx8mm_vpu_sels, ARRAY_SIZE(imx8mm_vpu_sels));
+ clks[IMX8MM_CLK_GPU3D_SRC] = imx_clk_mux2("gpu3d_src", base + 0x8180, 24, 3, imx8mm_gpu3d_sels, ARRAY_SIZE(imx8mm_gpu3d_sels));
+ clks[IMX8MM_CLK_GPU2D_SRC] = imx_clk_mux2("gpu2d_src", base + 0x8200, 24, 3, imx8mm_gpu2d_sels, ARRAY_SIZE(imx8mm_gpu2d_sels));
+ clks[IMX8MM_CLK_A53_CG] = imx_clk_gate3("arm_a53_cg", "arm_a53_src", base + 0x8000, 28);
+ clks[IMX8MM_CLK_M4_CG] = imx_clk_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28);
+ clks[IMX8MM_CLK_VPU_CG] = imx_clk_gate3("vpu_cg", "vpu_src", base + 0x8100, 28);
+ clks[IMX8MM_CLK_GPU3D_CG] = imx_clk_gate3("gpu3d_cg", "gpu3d_src", base + 0x8180, 28);
+ clks[IMX8MM_CLK_GPU2D_CG] = imx_clk_gate3("gpu2d_cg", "gpu2d_src", base + 0x8200, 28);
+ clks[IMX8MM_CLK_A53_DIV] = imx_clk_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
+ clks[IMX8MM_CLK_M4_DIV] = imx_clk_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3);
+ clks[IMX8MM_CLK_VPU_DIV] = imx_clk_divider2("vpu_div", "vpu_cg", base + 0x8100, 0, 3);
+ clks[IMX8MM_CLK_GPU3D_DIV] = imx_clk_divider2("gpu3d_div", "gpu3d_cg", base + 0x8180, 0, 3);
+ clks[IMX8MM_CLK_GPU2D_DIV] = imx_clk_divider2("gpu2d_div", "gpu2d_cg", base + 0x8200, 0, 3);
+
+ /* BUS */
+ clks[IMX8MM_CLK_MAIN_AXI] = imx8m_clk_composite_critical("main_axi", imx8mm_main_axi_sels, base + 0x8800);
+ clks[IMX8MM_CLK_ENET_AXI] = imx8m_clk_composite("enet_axi", imx8mm_enet_axi_sels, base + 0x8880);
+ clks[IMX8MM_CLK_NAND_USDHC_BUS] = imx8m_clk_composite_critical("nand_usdhc_bus", imx8mm_nand_usdhc_sels, base + 0x8900);
+ clks[IMX8MM_CLK_VPU_BUS] = imx8m_clk_composite("vpu_bus", imx8mm_vpu_bus_sels, base + 0x8980);
+ clks[IMX8MM_CLK_DISP_AXI] = imx8m_clk_composite("disp_axi", imx8mm_disp_axi_sels, base + 0x8a00);
+ clks[IMX8MM_CLK_DISP_APB] = imx8m_clk_composite("disp_apb", imx8mm_disp_apb_sels, base + 0x8a80);
+ clks[IMX8MM_CLK_DISP_RTRM] = imx8m_clk_composite("disp_rtrm", imx8mm_disp_rtrm_sels, base + 0x8b00);
+ clks[IMX8MM_CLK_USB_BUS] = imx8m_clk_composite("usb_bus", imx8mm_usb_bus_sels, base + 0x8b80);
+ clks[IMX8MM_CLK_GPU_AXI] = imx8m_clk_composite("gpu_axi", imx8mm_gpu_axi_sels, base + 0x8c00);
+ clks[IMX8MM_CLK_GPU_AHB] = imx8m_clk_composite("gpu_ahb", imx8mm_gpu_ahb_sels, base + 0x8c80);
+ clks[IMX8MM_CLK_NOC] = imx8m_clk_composite_critical("noc", imx8mm_noc_sels, base + 0x8d00);
+ clks[IMX8MM_CLK_NOC_APB] = imx8m_clk_composite_critical("noc_apb", imx8mm_noc_apb_sels, base + 0x8d80);
+
+ /* AHB */
+ clks[IMX8MM_CLK_AHB] = imx8m_clk_composite_critical("ahb", imx8mm_ahb_sels, base + 0x9000);
+ clks[IMX8MM_CLK_AUDIO_AHB] = imx8m_clk_composite("audio_ahb", imx8mm_audio_ahb_sels, base + 0x9100);
+
+ /* IPG */
+ clks[IMX8MM_CLK_IPG_ROOT] = imx_clk_divider2("ipg_root", "ahb", base + 0x9080, 0, 1);
+ clks[IMX8MM_CLK_IPG_AUDIO_ROOT] = imx_clk_divider2("ipg_audio_root", "audio_ahb", base + 0x9180, 0, 1);
+
+ /* IP */
+ clks[IMX8MM_CLK_DRAM_ALT] = imx8m_clk_composite("dram_alt", imx8mm_dram_alt_sels, base + 0xa000);
+ clks[IMX8MM_CLK_DRAM_APB] = imx8m_clk_composite("dram_apb", imx8mm_dram_apb_sels, base + 0xa080);
+ clks[IMX8MM_CLK_VPU_G1] = imx8m_clk_composite("vpu_g1", imx8mm_vpu_g1_sels, base + 0xa100);
+ clks[IMX8MM_CLK_VPU_G2] = imx8m_clk_composite("vpu_g2", imx8mm_vpu_g2_sels, base + 0xa180);
+ clks[IMX8MM_CLK_DISP_DTRC] = imx8m_clk_composite("disp_dtrc", imx8mm_disp_dtrc_sels, base + 0xa200);
+ clks[IMX8MM_CLK_DISP_DC8000] = imx8m_clk_composite("disp_dc8000", imx8mm_disp_dc8000_sels, base + 0xa280);
+ clks[IMX8MM_CLK_PCIE1_CTRL] = imx8m_clk_composite("pcie1_ctrl", imx8mm_pcie1_ctrl_sels, base + 0xa300);
+ clks[IMX8MM_CLK_PCIE1_PHY] = imx8m_clk_composite("pcie1_phy", imx8mm_pcie1_phy_sels, base + 0xa380);
+ clks[IMX8MM_CLK_PCIE1_AUX] = imx8m_clk_composite("pcie1_aux", imx8mm_pcie1_aux_sels, base + 0xa400);
+ clks[IMX8MM_CLK_DC_PIXEL] = imx8m_clk_composite("dc_pixel", imx8mm_dc_pixel_sels, base + 0xa480);
+ clks[IMX8MM_CLK_LCDIF_PIXEL] = imx8m_clk_composite("lcdif_pixel", imx8mm_lcdif_pixel_sels, base + 0xa500);
+ clks[IMX8MM_CLK_SAI1] = imx8m_clk_composite("sai1", imx8mm_sai1_sels, base + 0xa580);
+ clks[IMX8MM_CLK_SAI2] = imx8m_clk_composite("sai2", imx8mm_sai2_sels, base + 0xa600);
+ clks[IMX8MM_CLK_SAI3] = imx8m_clk_composite("sai3", imx8mm_sai3_sels, base + 0xa680);
+ clks[IMX8MM_CLK_SAI4] = imx8m_clk_composite("sai4", imx8mm_sai4_sels, base + 0xa700);
+ clks[IMX8MM_CLK_SAI5] = imx8m_clk_composite("sai5", imx8mm_sai5_sels, base + 0xa780);
+ clks[IMX8MM_CLK_SAI6] = imx8m_clk_composite("sai6", imx8mm_sai6_sels, base + 0xa800);
+ clks[IMX8MM_CLK_SPDIF1] = imx8m_clk_composite("spdif1", imx8mm_spdif1_sels, base + 0xa880);
+ clks[IMX8MM_CLK_SPDIF2] = imx8m_clk_composite("spdif2", imx8mm_spdif2_sels, base + 0xa900);
+ clks[IMX8MM_CLK_ENET_REF] = imx8m_clk_composite("enet_ref", imx8mm_enet_ref_sels, base + 0xa980);
+ clks[IMX8MM_CLK_ENET_TIMER] = imx8m_clk_composite("enet_timer", imx8mm_enet_timer_sels, base + 0xaa00);
+ clks[IMX8MM_CLK_ENET_PHY_REF] = imx8m_clk_composite("enet_phy", imx8mm_enet_phy_sels, base + 0xaa80);
+ clks[IMX8MM_CLK_NAND] = imx8m_clk_composite("nand", imx8mm_nand_sels, base + 0xab00);
+ clks[IMX8MM_CLK_QSPI] = imx8m_clk_composite("qspi", imx8mm_qspi_sels, base + 0xab80);
+ clks[IMX8MM_CLK_USDHC1] = imx8m_clk_composite("usdhc1", imx8mm_usdhc1_sels, base + 0xac00);
+ clks[IMX8MM_CLK_USDHC2] = imx8m_clk_composite("usdhc2", imx8mm_usdhc2_sels, base + 0xac80);
+ clks[IMX8MM_CLK_I2C1] = imx8m_clk_composite("i2c1", imx8mm_i2c1_sels, base + 0xad00);
+ clks[IMX8MM_CLK_I2C2] = imx8m_clk_composite("i2c2", imx8mm_i2c2_sels, base + 0xad80);
+ clks[IMX8MM_CLK_I2C3] = imx8m_clk_composite("i2c3", imx8mm_i2c3_sels, base + 0xae00);
+ clks[IMX8MM_CLK_I2C4] = imx8m_clk_composite("i2c4", imx8mm_i2c4_sels, base + 0xae80);
+ clks[IMX8MM_CLK_UART1] = imx8m_clk_composite("uart1", imx8mm_uart1_sels, base + 0xaf00);
+ clks[IMX8MM_CLK_UART2] = imx8m_clk_composite("uart2", imx8mm_uart2_sels, base + 0xaf80);
+ clks[IMX8MM_CLK_UART3] = imx8m_clk_composite("uart3", imx8mm_uart3_sels, base + 0xb000);
+ clks[IMX8MM_CLK_UART4] = imx8m_clk_composite("uart4", imx8mm_uart4_sels, base + 0xb080);
+ clks[IMX8MM_CLK_USB_CORE_REF] = imx8m_clk_composite("usb_core_ref", imx8mm_usb_core_sels, base + 0xb100);
+ clks[IMX8MM_CLK_USB_PHY_REF] = imx8m_clk_composite("usb_phy_ref", imx8mm_usb_phy_sels, base + 0xb180);
+ clks[IMX8MM_CLK_ECSPI1] = imx8m_clk_composite("ecspi1", imx8mm_ecspi1_sels, base + 0xb280);
+ clks[IMX8MM_CLK_ECSPI2] = imx8m_clk_composite("ecspi2", imx8mm_ecspi2_sels, base + 0xb300);
+ clks[IMX8MM_CLK_PWM1] = imx8m_clk_composite("pwm1", imx8mm_pwm1_sels, base + 0xb380);
+ clks[IMX8MM_CLK_PWM2] = imx8m_clk_composite("pwm2", imx8mm_pwm2_sels, base + 0xb400);
+ clks[IMX8MM_CLK_PWM3] = imx8m_clk_composite("pwm3", imx8mm_pwm3_sels, base + 0xb480);
+ clks[IMX8MM_CLK_PWM4] = imx8m_clk_composite("pwm4", imx8mm_pwm4_sels, base + 0xb500);
+ clks[IMX8MM_CLK_GPT1] = imx8m_clk_composite("gpt1", imx8mm_gpt1_sels, base + 0xb580);
+ clks[IMX8MM_CLK_WDOG] = imx8m_clk_composite("wdog", imx8mm_wdog_sels, base + 0xb900);
+ clks[IMX8MM_CLK_WRCLK] = imx8m_clk_composite("wrclk", imx8mm_wrclk_sels, base + 0xb980);
+ clks[IMX8MM_CLK_CLKO1] = imx8m_clk_composite("clko1", imx8mm_clko1_sels, base + 0xba00);
+ clks[IMX8MM_CLK_DSI_CORE] = imx8m_clk_composite("dsi_core", imx8mm_dsi_core_sels, base + 0xbb00);
+ clks[IMX8MM_CLK_DSI_PHY_REF] = imx8m_clk_composite("dsi_phy_ref", imx8mm_dsi_phy_sels, base + 0xbb80);
+ clks[IMX8MM_CLK_DSI_DBI] = imx8m_clk_composite("dsi_dbi", imx8mm_dsi_dbi_sels, base + 0xbc00);
+ clks[IMX8MM_CLK_USDHC3] = imx8m_clk_composite("usdhc3", imx8mm_usdhc3_sels, base + 0xbc80);
+ clks[IMX8MM_CLK_CSI1_CORE] = imx8m_clk_composite("csi1_core", imx8mm_csi1_core_sels, base + 0xbd00);
+ clks[IMX8MM_CLK_CSI1_PHY_REF] = imx8m_clk_composite("csi1_phy_ref", imx8mm_csi1_phy_sels, base + 0xbd80);
+ clks[IMX8MM_CLK_CSI1_ESC] = imx8m_clk_composite("csi1_esc", imx8mm_csi1_esc_sels, base + 0xbe00);
+ clks[IMX8MM_CLK_CSI2_CORE] = imx8m_clk_composite("csi2_core", imx8mm_csi2_core_sels, base + 0xbe80);
+ clks[IMX8MM_CLK_CSI2_PHY_REF] = imx8m_clk_composite("csi2_phy_ref", imx8mm_csi2_phy_sels, base + 0xbf00);
+ clks[IMX8MM_CLK_CSI2_ESC] = imx8m_clk_composite("csi2_esc", imx8mm_csi2_esc_sels, base + 0xbf80);
+ clks[IMX8MM_CLK_PCIE2_CTRL] = imx8m_clk_composite("pcie2_ctrl", imx8mm_pcie2_ctrl_sels, base + 0xc000);
+ clks[IMX8MM_CLK_PCIE2_PHY] = imx8m_clk_composite("pcie2_phy", imx8mm_pcie2_phy_sels, base + 0xc080);
+ clks[IMX8MM_CLK_PCIE2_AUX] = imx8m_clk_composite("pcie2_aux", imx8mm_pcie2_aux_sels, base + 0xc100);
+ clks[IMX8MM_CLK_ECSPI3] = imx8m_clk_composite("ecspi3", imx8mm_ecspi3_sels, base + 0xc180);
+ clks[IMX8MM_CLK_PDM] = imx8m_clk_composite("pdm", imx8mm_pdm_sels, base + 0xc200);
+ clks[IMX8MM_CLK_VPU_H1] = imx8m_clk_composite("vpu_h1", imx8mm_vpu_h1_sels, base + 0xc280);
+
+ /* CCGR */
+ clks[IMX8MM_CLK_ECSPI1_ROOT] = imx_clk_gate4("ecspi1_root_clk", "ecspi1", base + 0x4070, 0);
+ clks[IMX8MM_CLK_ECSPI2_ROOT] = imx_clk_gate4("ecspi2_root_clk", "ecspi2", base + 0x4080, 0);
+ clks[IMX8MM_CLK_ECSPI3_ROOT] = imx_clk_gate4("ecspi3_root_clk", "ecspi3", base + 0x4090, 0);
+ clks[IMX8MM_CLK_ENET1_ROOT] = imx_clk_gate4("enet1_root_clk", "enet_axi", base + 0x40a0, 0);
+ clks[IMX8MM_CLK_GPT1_ROOT] = imx_clk_gate4("gpt1_root_clk", "gpt1", base + 0x4100, 0);
+ clks[IMX8MM_CLK_I2C1_ROOT] = imx_clk_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0);
+ clks[IMX8MM_CLK_I2C2_ROOT] = imx_clk_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0);
+ clks[IMX8MM_CLK_I2C3_ROOT] = imx_clk_gate4("i2c3_root_clk", "i2c3", base + 0x4190, 0);
+ clks[IMX8MM_CLK_I2C4_ROOT] = imx_clk_gate4("i2c4_root_clk", "i2c4", base + 0x41a0, 0);
+ clks[IMX8MM_CLK_MU_ROOT] = imx_clk_gate4("mu_root_clk", "ipg_root", base + 0x4210, 0);
+ clks[IMX8MM_CLK_OCOTP_ROOT] = imx_clk_gate4("ocotp_root_clk", "ipg_root", base + 0x4220, 0);
+ clks[IMX8MM_CLK_PCIE1_ROOT] = imx_clk_gate4("pcie1_root_clk", "pcie1_ctrl", base + 0x4250, 0);
+ clks[IMX8MM_CLK_PWM1_ROOT] = imx_clk_gate4("pwm1_root_clk", "pwm1", base + 0x4280, 0);
+ clks[IMX8MM_CLK_PWM2_ROOT] = imx_clk_gate4("pwm2_root_clk", "pwm2", base + 0x4290, 0);
+ clks[IMX8MM_CLK_PWM3_ROOT] = imx_clk_gate4("pwm3_root_clk", "pwm3", base + 0x42a0, 0);
+ clks[IMX8MM_CLK_PWM4_ROOT] = imx_clk_gate4("pwm4_root_clk", "pwm4", base + 0x42b0, 0);
+ clks[IMX8MM_CLK_QSPI_ROOT] = imx_clk_gate4("qspi_root_clk", "qspi", base + 0x42f0, 0);
+ clks[IMX8MM_CLK_NAND_ROOT] = imx_clk_gate2_shared2("nand_root_clk", "nand", base + 0x4300, 0, &share_count_nand);
+ clks[IMX8MM_CLK_NAND_USDHC_BUS_RAWNAND_CLK] = imx_clk_gate2_shared2("nand_usdhc_rawnand_clk", "nand_usdhc_bus", base + 0x4300, 0, &share_count_nand);
+ clks[IMX8MM_CLK_SAI1_ROOT] = imx_clk_gate2_shared2("sai1_root_clk", "sai1", base + 0x4330, 0, &share_count_sai1);
+ clks[IMX8MM_CLK_SAI1_IPG] = imx_clk_gate2_shared2("sai1_ipg_clk", "ipg_audio_root", base + 0x4330, 0, &share_count_sai1);
+ clks[IMX8MM_CLK_SAI2_ROOT] = imx_clk_gate2_shared2("sai2_root_clk", "sai2", base + 0x4340, 0, &share_count_sai2);
+ clks[IMX8MM_CLK_SAI2_IPG] = imx_clk_gate2_shared2("sai2_ipg_clk", "ipg_audio_root", base + 0x4340, 0, &share_count_sai2);
+ clks[IMX8MM_CLK_SAI3_ROOT] = imx_clk_gate2_shared2("sai3_root_clk", "sai3", base + 0x4350, 0, &share_count_sai3);
+ clks[IMX8MM_CLK_SAI3_IPG] = imx_clk_gate2_shared2("sai3_ipg_clk", "ipg_audio_root", base + 0x4350, 0, &share_count_sai3);
+ clks[IMX8MM_CLK_SAI4_ROOT] = imx_clk_gate2_shared2("sai4_root_clk", "sai4", base + 0x4360, 0, &share_count_sai4);
+ clks[IMX8MM_CLK_SAI4_IPG] = imx_clk_gate2_shared2("sai4_ipg_clk", "ipg_audio_root", base + 0x4360, 0, &share_count_sai4);
+ clks[IMX8MM_CLK_SAI5_ROOT] = imx_clk_gate2_shared2("sai5_root_clk", "sai5", base + 0x4370, 0, &share_count_sai5);
+ clks[IMX8MM_CLK_SAI5_IPG] = imx_clk_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5);
+ clks[IMX8MM_CLK_SAI6_ROOT] = imx_clk_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6);
+ clks[IMX8MM_CLK_SAI6_IPG] = imx_clk_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6);
+ clks[IMX8MM_CLK_UART1_ROOT] = imx_clk_gate4("uart1_root_clk", "uart1", base + 0x4490, 0);
+ clks[IMX8MM_CLK_UART2_ROOT] = imx_clk_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0);
+ clks[IMX8MM_CLK_UART3_ROOT] = imx_clk_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0);
+ clks[IMX8MM_CLK_UART4_ROOT] = imx_clk_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0);
+ clks[IMX8MM_CLK_USB1_CTRL_ROOT] = imx_clk_gate4("usb1_ctrl_root_clk", "usb_core_ref", base + 0x44d0, 0);
+ clks[IMX8MM_CLK_GPU3D_ROOT] = imx_clk_gate4("gpu3d_root_clk", "gpu3d_div", base + 0x44f0, 0);
+ clks[IMX8MM_CLK_USDHC1_ROOT] = imx_clk_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0);
+ clks[IMX8MM_CLK_USDHC2_ROOT] = imx_clk_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0);
+ clks[IMX8MM_CLK_WDOG1_ROOT] = imx_clk_gate4("wdog1_root_clk", "wdog", base + 0x4530, 0);
+ clks[IMX8MM_CLK_WDOG2_ROOT] = imx_clk_gate4("wdog2_root_clk", "wdog", base + 0x4540, 0);
+ clks[IMX8MM_CLK_WDOG3_ROOT] = imx_clk_gate4("wdog3_root_clk", "wdog", base + 0x4550, 0);
+ clks[IMX8MM_CLK_VPU_G1_ROOT] = imx_clk_gate4("vpu_g1_root_clk", "vpu_g1", base + 0x4560, 0);
+ clks[IMX8MM_CLK_GPU_BUS_ROOT] = imx_clk_gate4("gpu_root_clk", "gpu_axi", base + 0x4570, 0);
+ clks[IMX8MM_CLK_VPU_H1_ROOT] = imx_clk_gate4("vpu_h1_root_clk", "vpu_h1", base + 0x4590, 0);
+ clks[IMX8MM_CLK_VPU_G2_ROOT] = imx_clk_gate4("vpu_g2_root_clk", "vpu_g2", base + 0x45a0, 0);
+ clks[IMX8MM_CLK_PDM_ROOT] = imx_clk_gate2_shared2("pdm_root_clk", "pdm", base + 0x45b0, 0, &share_count_pdm);
+ clks[IMX8MM_CLK_PDM_IPG] = imx_clk_gate2_shared2("pdm_ipg_clk", "ipg_audio_root", base + 0x45b0, 0, &share_count_pdm);
+ clks[IMX8MM_CLK_DISP_ROOT] = imx_clk_gate2_shared2("disp_root_clk", "disp_dc8000", base + 0x45d0, 0, &share_count_dcss);
+ clks[IMX8MM_CLK_DISP_AXI_ROOT] = imx_clk_gate2_shared2("disp_axi_root_clk", "disp_axi", base + 0x45d0, 0, &share_count_dcss);
+ clks[IMX8MM_CLK_DISP_APB_ROOT] = imx_clk_gate2_shared2("disp_apb_root_clk", "disp_apb", base + 0x45d0, 0, &share_count_dcss);
+ clks[IMX8MM_CLK_DISP_RTRM_ROOT] = imx_clk_gate2_shared2("disp_rtrm_root_clk", "disp_rtrm", base + 0x45d0, 0, &share_count_dcss);
+ clks[IMX8MM_CLK_USDHC3_ROOT] = imx_clk_gate4("usdhc3_root_clk", "usdhc3", base + 0x45e0, 0);
+ clks[IMX8MM_CLK_TMU_ROOT] = imx_clk_gate4("tmu_root_clk", "ipg_root", base + 0x4620, 0);
+ clks[IMX8MM_CLK_VPU_DEC_ROOT] = imx_clk_gate4("vpu_dec_root_clk", "vpu_bus", base + 0x4630, 0);
+ clks[IMX8MM_CLK_SDMA1_ROOT] = imx_clk_gate4("sdma1_clk", "ipg_root", base + 0x43a0, 0);
+ clks[IMX8MM_CLK_SDMA2_ROOT] = imx_clk_gate4("sdma2_clk", "ipg_audio_root", base + 0x43b0, 0);
+ clks[IMX8MM_CLK_SDMA3_ROOT] = imx_clk_gate4("sdma3_clk", "ipg_audio_root", base + 0x45f0, 0);
+ clks[IMX8MM_CLK_GPU2D_ROOT] = imx_clk_gate4("gpu2d_root_clk", "gpu2d_div", base + 0x4660, 0);
+ clks[IMX8MM_CLK_CSI1_ROOT] = imx_clk_gate4("csi1_root_clk", "csi1_core", base + 0x4650, 0);
+
+ clks[IMX8MM_CLK_GPT_3M] = imx_clk_fixed_factor("gpt_3m", "osc_24m", 1, 8);
+
+ clks[IMX8MM_CLK_DRAM_ALT_ROOT] = imx_clk_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
+ clks[IMX8MM_CLK_DRAM_CORE] = imx_clk_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mm_dram_core_sels, ARRAY_SIZE(imx8mm_dram_core_sels), CLK_IS_CRITICAL);
+
+ clks[IMX8MM_CLK_ARM] = imx_clk_cpu("arm", "arm_a53_div",
+ clks[IMX8MM_CLK_A53_DIV],
+ clks[IMX8MM_CLK_A53_SRC],
+ clks[IMX8MM_ARM_PLL_OUT],
+ clks[IMX8MM_CLK_24M]);
+
+ imx_check_clocks(clks, ARRAY_SIZE(clks));
+
+ clk_data.clks = clks;
+ clk_data.clk_num = ARRAY_SIZE(clks);
+ ret = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+ if (ret < 0) {
+ pr_err("failed to register clks for i.MX8MM\n");
+ return -EINVAL;
+ }
+
+ imx_register_uart_clocks(uart_clks);
+
+ return 0;
+}
+CLK_OF_DECLARE_DRIVER(imx8mm, "fsl,imx8mm-ccm", imx8mm_clocks_init);
diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
index 26b57f43ccc3..a9b3888aef0c 100644
--- a/drivers/clk/imx/clk-imx8mq.c
+++ b/drivers/clk/imx/clk-imx8mq.c
@@ -26,246 +26,246 @@ static u32 share_count_nand;
static struct clk *clks[IMX8MQ_CLK_END];
-static const char *pll_ref_sels[] = { "osc_25m", "osc_27m", "dummy", "dummy", };
-static const char *arm_pll_bypass_sels[] = {"arm_pll", "arm_pll_ref_sel", };
-static const char *gpu_pll_bypass_sels[] = {"gpu_pll", "gpu_pll_ref_sel", };
-static const char *vpu_pll_bypass_sels[] = {"vpu_pll", "vpu_pll_ref_sel", };
-static const char *audio_pll1_bypass_sels[] = {"audio_pll1", "audio_pll1_ref_sel", };
-static const char *audio_pll2_bypass_sels[] = {"audio_pll2", "audio_pll2_ref_sel", };
-static const char *video_pll1_bypass_sels[] = {"video_pll1", "video_pll1_ref_sel", };
-
-static const char *sys1_pll1_out_sels[] = {"sys1_pll1", "sys1_pll1_ref_sel", };
-static const char *sys2_pll1_out_sels[] = {"sys2_pll1", "sys1_pll1_ref_sel", };
-static const char *sys3_pll1_out_sels[] = {"sys3_pll1", "sys3_pll1_ref_sel", };
-static const char *dram_pll1_out_sels[] = {"dram_pll1", "dram_pll1_ref_sel", };
-
-static const char *sys1_pll2_out_sels[] = {"sys1_pll2_div", "sys1_pll1_ref_sel", };
-static const char *sys2_pll2_out_sels[] = {"sys2_pll2_div", "sys2_pll1_ref_sel", };
-static const char *sys3_pll2_out_sels[] = {"sys3_pll2_div", "sys2_pll1_ref_sel", };
-static const char *dram_pll2_out_sels[] = {"dram_pll2_div", "dram_pll1_ref_sel", };
+static const char * const pll_ref_sels[] = { "osc_25m", "osc_27m", "dummy", "dummy", };
+static const char * const arm_pll_bypass_sels[] = {"arm_pll", "arm_pll_ref_sel", };
+static const char * const gpu_pll_bypass_sels[] = {"gpu_pll", "gpu_pll_ref_sel", };
+static const char * const vpu_pll_bypass_sels[] = {"vpu_pll", "vpu_pll_ref_sel", };
+static const char * const audio_pll1_bypass_sels[] = {"audio_pll1", "audio_pll1_ref_sel", };
+static const char * const audio_pll2_bypass_sels[] = {"audio_pll2", "audio_pll2_ref_sel", };
+static const char * const video_pll1_bypass_sels[] = {"video_pll1", "video_pll1_ref_sel", };
+
+static const char * const sys1_pll_out_sels[] = {"sys1_pll1_ref_sel", };
+static const char * const sys2_pll_out_sels[] = {"sys1_pll1_ref_sel", "sys2_pll1_ref_sel", };
+static const char * const sys3_pll_out_sels[] = {"sys3_pll1_ref_sel", "sys2_pll1_ref_sel", };
+static const char * const dram_pll_out_sels[] = {"dram_pll1_ref_sel", };
/* CCM ROOT */
-static const char *imx8mq_a53_sels[] = {"osc_25m", "arm_pll_out", "sys2_pll_500m", "sys2_pll_1000m",
+static const char * const imx8mq_a53_sels[] = {"osc_25m", "arm_pll_out", "sys2_pll_500m", "sys2_pll_1000m",
"sys1_pll_800m", "sys1_pll_400m", "audio_pll1_out", "sys3_pll2_out", };
-static const char *imx8mq_vpu_sels[] = {"osc_25m", "arm_pll_out", "sys2_pll_500m", "sys2_pll_1000m",
+static const char * const imx8mq_arm_m4_sels[] = {"osc_25m", "sys2_pll_200m", "sys2_pll_250m", "sys1_pll_266m",
+ "sys1_pll_800m", "audio_pll1_out", "video_pll1_out", "sys3_pll2_out", };
+
+static const char * const imx8mq_vpu_sels[] = {"osc_25m", "arm_pll_out", "sys2_pll_500m", "sys2_pll_1000m",
"sys1_pll_800m", "sys1_pll_400m", "audio_pll1_out", "vpu_pll_out", };
-static const char *imx8mq_gpu_core_sels[] = {"osc_25m", "gpu_pll_out", "sys1_pll_800m", "sys3_pll2_out",
+static const char * const imx8mq_gpu_core_sels[] = {"osc_25m", "gpu_pll_out", "sys1_pll_800m", "sys3_pll2_out",
"sys2_pll_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
-static const char *imx8mq_gpu_shader_sels[] = {"osc_25m", "gpu_pll_out", "sys1_pll_800m", "sys3_pll2_out",
+static const char * const imx8mq_gpu_shader_sels[] = {"osc_25m", "gpu_pll_out", "sys1_pll_800m", "sys3_pll2_out",
"sys2_pll_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
-static const char *imx8mq_main_axi_sels[] = {"osc_25m", "sys2_pll_333m", "sys1_pll_800m", "sys2_pll_250m",
+static const char * const imx8mq_main_axi_sels[] = {"osc_25m", "sys2_pll_333m", "sys1_pll_800m", "sys2_pll_250m",
"sys2_pll_1000m", "audio_pll1_out", "video_pll1_out", "sys1_pll_100m",};
-static const char *imx8mq_enet_axi_sels[] = {"osc_25m", "sys1_pll_266m", "sys1_pll_800m", "sys2_pll_250m",
+static const char * const imx8mq_enet_axi_sels[] = {"osc_25m", "sys1_pll_266m", "sys1_pll_800m", "sys2_pll_250m",
"sys2_pll_200m", "audio_pll1_out", "video_pll1_out", "sys3_pll2_out", };
-static const char *imx8mq_nand_usdhc_sels[] = {"osc_25m", "sys1_pll_266m", "sys1_pll_800m", "sys2_pll_200m",
+static const char * const imx8mq_nand_usdhc_sels[] = {"osc_25m", "sys1_pll_266m", "sys1_pll_800m", "sys2_pll_200m",
"sys1_pll_133m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll1_out", };
-static const char *imx8mq_vpu_bus_sels[] = {"osc_25m", "sys1_pll_800m", "vpu_pll_out", "audio_pll2_out", "sys3_pll2_out", "sys2_pll_1000m", "sys2_pll_200m", "sys1_pll_100m", };
+static const char * const imx8mq_vpu_bus_sels[] = {"osc_25m", "sys1_pll_800m", "vpu_pll_out", "audio_pll2_out", "sys3_pll2_out", "sys2_pll_1000m", "sys2_pll_200m", "sys1_pll_100m", };
-static const char *imx8mq_disp_axi_sels[] = {"osc_25m", "sys2_pll_125m", "sys1_pll_800m", "sys3_pll2_out", "sys1_pll_400m", "audio_pll2_out", "clk_ext1", "clk_ext4", };
+static const char * const imx8mq_disp_axi_sels[] = {"osc_25m", "sys2_pll_125m", "sys1_pll_800m", "sys3_pll2_out", "sys1_pll_400m", "audio_pll2_out", "clk_ext1", "clk_ext4", };
-static const char *imx8mq_disp_apb_sels[] = {"osc_25m", "sys2_pll_125m", "sys1_pll_800m", "sys3_pll2_out",
+static const char * const imx8mq_disp_apb_sels[] = {"osc_25m", "sys2_pll_125m", "sys1_pll_800m", "sys3_pll2_out",
"sys1_pll_40m", "audio_pll2_out", "clk_ext1", "clk_ext3", };
-static const char *imx8mq_disp_rtrm_sels[] = {"osc_25m", "sys1_pll_800m", "sys2_pll_200m", "sys1_pll_400m",
+static const char * const imx8mq_disp_rtrm_sels[] = {"osc_25m", "sys1_pll_800m", "sys2_pll_200m", "sys1_pll_400m",
"audio_pll1_out", "video_pll1_out", "clk_ext2", "clk_ext3", };
-static const char *imx8mq_usb_bus_sels[] = {"osc_25m", "sys2_pll_500m", "sys1_pll_800m", "sys2_pll_100m",
+static const char * const imx8mq_usb_bus_sels[] = {"osc_25m", "sys2_pll_500m", "sys1_pll_800m", "sys2_pll_100m",
"sys2_pll_200m", "clk_ext2", "clk_ext4", "audio_pll2_out", };
-static const char *imx8mq_gpu_axi_sels[] = {"osc_25m", "sys1_pll_800m", "gpu_pll_out", "sys3_pll2_out", "sys2_pll_1000m",
+static const char * const imx8mq_gpu_axi_sels[] = {"osc_25m", "sys1_pll_800m", "gpu_pll_out", "sys3_pll2_out", "sys2_pll_1000m",
"audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
-static const char *imx8mq_gpu_ahb_sels[] = {"osc_25m", "sys1_pll_800m", "gpu_pll_out", "sys3_pll2_out", "sys2_pll_1000m",
+static const char * const imx8mq_gpu_ahb_sels[] = {"osc_25m", "sys1_pll_800m", "gpu_pll_out", "sys3_pll2_out", "sys2_pll_1000m",
"audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
-static const char *imx8mq_noc_sels[] = {"osc_25m", "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_1000m", "sys2_pll_500m",
+static const char * const imx8mq_noc_sels[] = {"osc_25m", "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_1000m", "sys2_pll_500m",
"audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
-static const char *imx8mq_noc_apb_sels[] = {"osc_25m", "sys1_pll_400m", "sys3_pll2_out", "sys2_pll_333m", "sys2_pll_200m",
+static const char * const imx8mq_noc_apb_sels[] = {"osc_25m", "sys1_pll_400m", "sys3_pll2_out", "sys2_pll_333m", "sys2_pll_200m",
"sys1_pll_800m", "audio_pll1_out", "video_pll1_out", };
-static const char *imx8mq_ahb_sels[] = {"osc_25m", "sys1_pll_133m", "sys1_pll_800m", "sys1_pll_400m",
+static const char * const imx8mq_ahb_sels[] = {"osc_25m", "sys1_pll_133m", "sys1_pll_800m", "sys1_pll_400m",
"sys2_pll_125m", "sys3_pll2_out", "audio_pll1_out", "video_pll1_out", };
-static const char *imx8mq_audio_ahb_sels[] = {"osc_25m", "sys2_pll_500m", "sys1_pll_800m", "sys2_pll_1000m",
+static const char * const imx8mq_audio_ahb_sels[] = {"osc_25m", "sys2_pll_500m", "sys1_pll_800m", "sys2_pll_1000m",
"sys2_pll_166m", "sys3_pll2_out", "audio_pll1_out", "video_pll1_out", };
-static const char *imx8mq_dsi_ahb_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m",
+static const char * const imx8mq_dsi_ahb_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m",
"sys2_pll_1000m", "sys3_pll2_out", "clk_ext3", "audio_pll2_out"};
-static const char *imx8mq_dram_alt_sels[] = {"osc_25m", "sys1_pll_800m", "sys1_pll_100m", "sys2_pll_500m",
+static const char * const imx8mq_dram_alt_sels[] = {"osc_25m", "sys1_pll_800m", "sys1_pll_100m", "sys2_pll_500m",
"sys2_pll_250m", "sys1_pll_400m", "audio_pll1_out", "sys1_pll_266m", };
-static const char *imx8mq_dram_apb_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m",
+static const char * const imx8mq_dram_apb_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m",
"sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", };
-static const char *imx8mq_vpu_g1_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_100m", "sys2_pll_125m", "sys3_pll2_out", "audio_pll1_out", };
+static const char * const imx8mq_vpu_g1_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_100m", "sys2_pll_125m", "sys3_pll2_out", "audio_pll1_out", };
-static const char *imx8mq_vpu_g2_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_100m", "sys2_pll_125m", "sys3_pll2_out", "audio_pll1_out", };
+static const char * const imx8mq_vpu_g2_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_100m", "sys2_pll_125m", "sys3_pll2_out", "audio_pll1_out", };
-static const char *imx8mq_disp_dtrc_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_160m", "sys2_pll_100m", "sys3_pll2_out", "audio_pll2_out", };
+static const char * const imx8mq_disp_dtrc_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_160m", "sys2_pll_100m", "sys3_pll2_out", "audio_pll2_out", };
-static const char *imx8mq_disp_dc8000_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_160m", "sys2_pll_100m", "sys3_pll2_out", "audio_pll2_out", };
+static const char * const imx8mq_disp_dc8000_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_160m", "sys2_pll_100m", "sys3_pll2_out", "audio_pll2_out", };
-static const char *imx8mq_pcie1_ctrl_sels[] = {"osc_25m", "sys2_pll_250m", "sys2_pll_200m", "sys1_pll_266m",
+static const char * const imx8mq_pcie1_ctrl_sels[] = {"osc_25m", "sys2_pll_250m", "sys2_pll_200m", "sys1_pll_266m",
"sys1_pll_800m", "sys2_pll_500m", "sys2_pll_250m", "sys3_pll2_out", };
-static const char *imx8mq_pcie1_phy_sels[] = {"osc_25m", "sys2_pll_100m", "sys2_pll_500m", "clk_ext1", "clk_ext2",
+static const char * const imx8mq_pcie1_phy_sels[] = {"osc_25m", "sys2_pll_100m", "sys2_pll_500m", "clk_ext1", "clk_ext2",
"clk_ext3", "clk_ext4", };
-static const char *imx8mq_pcie1_aux_sels[] = {"osc_25m", "sys2_pll_200m", "sys2_pll_500m", "sys3_pll2_out",
+static const char * const imx8mq_pcie1_aux_sels[] = {"osc_25m", "sys2_pll_200m", "sys2_pll_500m", "sys3_pll2_out",
"sys2_pll_100m", "sys1_pll_80m", "sys1_pll_160m", "sys1_pll_200m", };
-static const char *imx8mq_dc_pixel_sels[] = {"osc_25m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out", "sys1_pll_800m", "sys2_pll_1000m", "sys3_pll2_out", "clk_ext4", };
+static const char * const imx8mq_dc_pixel_sels[] = {"osc_25m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out", "sys1_pll_800m", "sys2_pll_1000m", "sys3_pll2_out", "clk_ext4", };
-static const char *imx8mq_lcdif_pixel_sels[] = {"osc_25m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out", "sys1_pll_800m", "sys2_pll_1000m", "sys3_pll2_out", "clk_ext4", };
+static const char * const imx8mq_lcdif_pixel_sels[] = {"osc_25m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out", "sys1_pll_800m", "sys2_pll_1000m", "sys3_pll2_out", "clk_ext4", };
-static const char *imx8mq_sai1_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext1", "clk_ext2", };
+static const char * const imx8mq_sai1_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext1", "clk_ext2", };
-static const char *imx8mq_sai2_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext2", "clk_ext3", };
+static const char * const imx8mq_sai2_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext2", "clk_ext3", };
-static const char *imx8mq_sai3_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext3", "clk_ext4", };
+static const char * const imx8mq_sai3_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext3", "clk_ext4", };
-static const char *imx8mq_sai4_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext1", "clk_ext2", };
+static const char * const imx8mq_sai4_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext1", "clk_ext2", };
-static const char *imx8mq_sai5_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext2", "clk_ext3", };
+static const char * const imx8mq_sai5_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext2", "clk_ext3", };
-static const char *imx8mq_sai6_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext3", "clk_ext4", };
+static const char * const imx8mq_sai6_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext3", "clk_ext4", };
-static const char *imx8mq_spdif1_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext2", "clk_ext3", };
+static const char * const imx8mq_spdif1_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext2", "clk_ext3", };
-static const char *imx8mq_spdif2_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext3", "clk_ext4", };
+static const char * const imx8mq_spdif2_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext3", "clk_ext4", };
-static const char *imx8mq_enet_ref_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_500m", "sys2_pll_100m",
+static const char * const imx8mq_enet_ref_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_500m", "sys2_pll_100m",
"sys1_pll_160m", "audio_pll1_out", "video_pll1_out", "clk_ext4", };
-static const char *imx8mq_enet_timer_sels[] = {"osc_25m", "sys2_pll_100m", "audio_pll1_out", "clk_ext1", "clk_ext2",
+static const char * const imx8mq_enet_timer_sels[] = {"osc_25m", "sys2_pll_100m", "audio_pll1_out", "clk_ext1", "clk_ext2",
"clk_ext3", "clk_ext4", "video_pll1_out", };
-static const char *imx8mq_enet_phy_sels[] = {"osc_25m", "sys2_pll_50m", "sys2_pll_125m", "sys2_pll_500m",
+static const char * const imx8mq_enet_phy_sels[] = {"osc_25m", "sys2_pll_50m", "sys2_pll_125m", "sys2_pll_500m",
"audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
-static const char *imx8mq_nand_sels[] = {"osc_25m", "sys2_pll_500m", "audio_pll1_out", "sys1_pll_400m",
+static const char * const imx8mq_nand_sels[] = {"osc_25m", "sys2_pll_500m", "audio_pll1_out", "sys1_pll_400m",
"audio_pll2_out", "sys3_pll2_out", "sys2_pll_250m", "video_pll1_out", };
-static const char *imx8mq_qspi_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
+static const char * const imx8mq_qspi_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
"audio_pll2_out", "sys1_pll_266m", "sys3_pll2_out", "sys1_pll_100m", };
-static const char *imx8mq_usdhc1_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
+static const char * const imx8mq_usdhc1_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
"audio_pll2_out", "sys1_pll_266m", "sys3_pll2_out", "sys1_pll_100m", };
-static const char *imx8mq_usdhc2_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
+static const char * const imx8mq_usdhc2_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
"audio_pll2_out", "sys1_pll_266m", "sys3_pll2_out", "sys1_pll_100m", };
-static const char *imx8mq_i2c1_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out",
+static const char * const imx8mq_i2c1_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out",
"video_pll1_out", "audio_pll2_out", "sys1_pll_133m", };
-static const char *imx8mq_i2c2_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out",
+static const char * const imx8mq_i2c2_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out",
"video_pll1_out", "audio_pll2_out", "sys1_pll_133m", };
-static const char *imx8mq_i2c3_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out",
+static const char * const imx8mq_i2c3_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out",
"video_pll1_out", "audio_pll2_out", "sys1_pll_133m", };
-static const char *imx8mq_i2c4_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out",
+static const char * const imx8mq_i2c4_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out",
"video_pll1_out", "audio_pll2_out", "sys1_pll_133m", };
-static const char *imx8mq_uart1_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m",
+static const char * const imx8mq_uart1_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m",
"sys3_pll2_out", "clk_ext2", "clk_ext4", "audio_pll2_out", };
-static const char *imx8mq_uart2_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m",
+static const char * const imx8mq_uart2_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m",
"sys3_pll2_out", "clk_ext2", "clk_ext3", "audio_pll2_out", };
-static const char *imx8mq_uart3_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m",
+static const char * const imx8mq_uart3_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m",
"sys3_pll2_out", "clk_ext2", "clk_ext4", "audio_pll2_out", };
-static const char *imx8mq_uart4_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m",
+static const char * const imx8mq_uart4_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m",
"sys3_pll2_out", "clk_ext2", "clk_ext3", "audio_pll2_out", };
-static const char *imx8mq_usb_core_sels[] = {"osc_25m", "sys1_pll_100m", "sys1_pll_40m", "sys2_pll_100m",
+static const char * const imx8mq_usb_core_sels[] = {"osc_25m", "sys1_pll_100m", "sys1_pll_40m", "sys2_pll_100m",
"sys2_pll_200m", "clk_ext2", "clk_ext3", "audio_pll2_out", };
-static const char *imx8mq_usb_phy_sels[] = {"osc_25m", "sys1_pll_100m", "sys1_pll_40m", "sys2_pll_100m",
+static const char * const imx8mq_usb_phy_sels[] = {"osc_25m", "sys1_pll_100m", "sys1_pll_40m", "sys2_pll_100m",
"sys2_pll_200m", "clk_ext2", "clk_ext3", "audio_pll2_out", };
-static const char *imx8mq_ecspi1_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m",
+static const char * const imx8mq_ecspi1_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m",
"sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", };
-static const char *imx8mq_ecspi2_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m",
+static const char * const imx8mq_ecspi2_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m",
"sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", };
-static const char *imx8mq_pwm1_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m",
+static const char * const imx8mq_pwm1_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m",
"sys3_pll2_out", "clk_ext1", "sys1_pll_80m", "video_pll1_out", };
-static const char *imx8mq_pwm2_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m",
+static const char * const imx8mq_pwm2_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m",
"sys3_pll2_out", "clk_ext1", "sys1_pll_80m", "video_pll1_out", };
-static const char *imx8mq_pwm3_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m",
+static const char * const imx8mq_pwm3_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m",
"sys3_pll2_out", "clk_ext2", "sys1_pll_80m", "video_pll1_out", };
-static const char *imx8mq_pwm4_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m",
+static const char * const imx8mq_pwm4_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m",
"sys3_pll2_out", "clk_ext2", "sys1_pll_80m", "video_pll1_out", };
-static const char *imx8mq_gpt1_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_400m", "sys1_pll_40m",
+static const char * const imx8mq_gpt1_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_400m", "sys1_pll_40m",
"sys1_pll_80m", "audio_pll1_out", "clk_ext1", };
-static const char *imx8mq_wdog_sels[] = {"osc_25m", "sys1_pll_133m", "sys1_pll_160m", "vpu_pll_out",
+static const char * const imx8mq_wdog_sels[] = {"osc_25m", "sys1_pll_133m", "sys1_pll_160m", "vpu_pll_out",
"sys2_pll_125m", "sys3_pll2_out", "sys1_pll_80m", "sys2_pll_166m", };
-static const char *imx8mq_wrclk_sels[] = {"osc_25m", "sys1_pll_40m", "vpu_pll_out", "sys3_pll2_out", "sys2_pll_200m",
+static const char * const imx8mq_wrclk_sels[] = {"osc_25m", "sys1_pll_40m", "vpu_pll_out", "sys3_pll2_out", "sys2_pll_200m",
"sys1_pll_266m", "sys2_pll_500m", "sys1_pll_100m", };
-static const char *imx8mq_dsi_core_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_250m", "sys1_pll_800m",
+static const char * const imx8mq_dsi_core_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_250m", "sys1_pll_800m",
"sys2_pll_1000m", "sys3_pll2_out", "audio_pll2_out", "video_pll1_out", };
-static const char *imx8mq_dsi_phy_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_100m", "sys1_pll_800m",
+static const char * const imx8mq_dsi_phy_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_100m", "sys1_pll_800m",
"sys2_pll_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", };
-static const char *imx8mq_dsi_dbi_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_100m", "sys1_pll_800m",
+static const char * const imx8mq_dsi_dbi_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_100m", "sys1_pll_800m",
"sys2_pll_1000m", "sys3_pll2_out", "audio_pll2_out", "video_pll1_out", };
-static const char *imx8mq_dsi_esc_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m",
+static const char * const imx8mq_dsi_esc_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m",
"sys2_pll_1000m", "sys3_pll2_out", "clk_ext3", "audio_pll2_out", };
-static const char *imx8mq_csi1_core_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_250m", "sys1_pll_800m",
+static const char * const imx8mq_csi1_core_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_250m", "sys1_pll_800m",
"sys2_pll_1000m", "sys3_pll2_out", "audio_pll2_out", "video_pll1_out", };
-static const char *imx8mq_csi1_phy_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_100m", "sys1_pll_800m",
+static const char * const imx8mq_csi1_phy_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_100m", "sys1_pll_800m",
"sys2_pll_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", };
-static const char *imx8mq_csi1_esc_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m",
+static const char * const imx8mq_csi1_esc_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m",
"sys2_pll_1000m", "sys3_pll2_out", "clk_ext3", "audio_pll2_out", };
-static const char *imx8mq_csi2_core_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_250m", "sys1_pll_800m",
+static const char * const imx8mq_csi2_core_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_250m", "sys1_pll_800m",
"sys2_pll_1000m", "sys3_pll2_out", "audio_pll2_out", "video_pll1_out", };
-static const char *imx8mq_csi2_phy_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_100m", "sys1_pll_800m",
+static const char * const imx8mq_csi2_phy_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_100m", "sys1_pll_800m",
"sys2_pll_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", };
-static const char *imx8mq_csi2_esc_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m",
+static const char * const imx8mq_csi2_esc_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m",
"sys2_pll_1000m", "sys3_pll2_out", "clk_ext3", "audio_pll2_out", };
-static const char *imx8mq_pcie2_ctrl_sels[] = {"osc_25m", "sys2_pll_250m", "sys2_pll_200m", "sys1_pll_266m",
+static const char * const imx8mq_pcie2_ctrl_sels[] = {"osc_25m", "sys2_pll_250m", "sys2_pll_200m", "sys1_pll_266m",
"sys1_pll_800m", "sys2_pll_500m", "sys2_pll_333m", "sys3_pll2_out", };
-static const char *imx8mq_pcie2_phy_sels[] = {"osc_25m", "sys2_pll_100m", "sys2_pll_500m", "clk_ext1",
+static const char * const imx8mq_pcie2_phy_sels[] = {"osc_25m", "sys2_pll_100m", "sys2_pll_500m", "clk_ext1",
"clk_ext2", "clk_ext3", "clk_ext4", "sys1_pll_400m", };
-static const char *imx8mq_pcie2_aux_sels[] = {"osc_25m", "sys2_pll_200m", "sys2_pll_50m", "sys3_pll2_out",
+static const char * const imx8mq_pcie2_aux_sels[] = {"osc_25m", "sys2_pll_200m", "sys2_pll_50m", "sys3_pll2_out",
"sys2_pll_100m", "sys1_pll_80m", "sys1_pll_160m", "sys1_pll_200m", };
-static const char *imx8mq_ecspi3_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m",
+static const char * const imx8mq_ecspi3_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m",
"sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", };
-static const char *imx8mq_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", };
+static const char * const imx8mq_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", };
-static const char *imx8mq_clko2_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_400m", "sys2_pll_166m", "audio_pll1_out",
- "video_pll1_out", "ckil", };
+static const char * const imx8mq_clko1_sels[] = {"osc_25m", "sys1_pll_800m", "osc_27m", "sys1_pll_200m",
+ "audio_pll2_out", "sys2_pll_500m", "vpu_pll_out", "sys1_pll_80m", };
+static const char * const imx8mq_clko2_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_400m", "sys2_pll_166m",
+ "sys3_pll2_out", "audio_pll1_out", "video_pll1_out", "ckil", };
static struct clk_onecell_data clk_data;
@@ -308,10 +308,6 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
clks[IMX8MQ_AUDIO_PLL1_REF_DIV] = imx_clk_divider("audio_pll1_ref_div", "audio_pll1_ref_sel", base + 0x0, 5, 6);
clks[IMX8MQ_AUDIO_PLL2_REF_DIV] = imx_clk_divider("audio_pll2_ref_div", "audio_pll2_ref_sel", base + 0x8, 5, 6);
clks[IMX8MQ_VIDEO_PLL1_REF_DIV] = imx_clk_divider("video_pll1_ref_div", "video_pll1_ref_sel", base + 0x10, 5, 6);
- clks[IMX8MQ_SYS1_PLL1_REF_DIV] = imx_clk_divider("sys1_pll1_ref_div", "sys1_pll1_ref_sel", base + 0x38, 25, 3);
- clks[IMX8MQ_SYS2_PLL1_REF_DIV] = imx_clk_divider("sys2_pll1_ref_div", "sys2_pll1_ref_sel", base + 0x44, 25, 3);
- clks[IMX8MQ_SYS3_PLL1_REF_DIV] = imx_clk_divider("sys3_pll1_ref_div", "sys3_pll1_ref_sel", base + 0x50, 25, 3);
- clks[IMX8MQ_DRAM_PLL1_REF_DIV] = imx_clk_divider("dram_pll1_ref_div", "dram_pll1_ref_sel", base + 0x68, 25, 3);
clks[IMX8MQ_ARM_PLL] = imx_clk_frac_pll("arm_pll", "arm_pll_ref_div", base + 0x28);
clks[IMX8MQ_GPU_PLL] = imx_clk_frac_pll("gpu_pll", "gpu_pll_ref_div", base + 0x18);
@@ -319,43 +315,15 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
clks[IMX8MQ_AUDIO_PLL1] = imx_clk_frac_pll("audio_pll1", "audio_pll1_ref_div", base + 0x0);
clks[IMX8MQ_AUDIO_PLL2] = imx_clk_frac_pll("audio_pll2", "audio_pll2_ref_div", base + 0x8);
clks[IMX8MQ_VIDEO_PLL1] = imx_clk_frac_pll("video_pll1", "video_pll1_ref_div", base + 0x10);
- clks[IMX8MQ_SYS1_PLL1] = imx_clk_sccg_pll("sys1_pll1", "sys1_pll1_ref_div", base + 0x30, SCCG_PLL1);
- clks[IMX8MQ_SYS2_PLL1] = imx_clk_sccg_pll("sys2_pll1", "sys2_pll1_ref_div", base + 0x3c, SCCG_PLL1);
- clks[IMX8MQ_SYS3_PLL1] = imx_clk_sccg_pll("sys3_pll1", "sys3_pll1_ref_div", base + 0x48, SCCG_PLL1);
- clks[IMX8MQ_DRAM_PLL1] = imx_clk_sccg_pll("dram_pll1", "dram_pll1_ref_div", base + 0x60, SCCG_PLL1);
-
- clks[IMX8MQ_SYS1_PLL2] = imx_clk_sccg_pll("sys1_pll2", "sys1_pll1_out_div", base + 0x30, SCCG_PLL2);
- clks[IMX8MQ_SYS2_PLL2] = imx_clk_sccg_pll("sys2_pll2", "sys2_pll1_out_div", base + 0x3c, SCCG_PLL2);
- clks[IMX8MQ_SYS3_PLL2] = imx_clk_sccg_pll("sys3_pll2", "sys3_pll1_out_div", base + 0x48, SCCG_PLL2);
- clks[IMX8MQ_DRAM_PLL2] = imx_clk_sccg_pll("dram_pll2", "dram_pll1_out_div", base + 0x60, SCCG_PLL2);
-
- /* PLL divs */
- clks[IMX8MQ_SYS1_PLL1_OUT_DIV] = imx_clk_divider("sys1_pll1_out_div", "sys1_pll1_out", base + 0x38, 19, 6);
- clks[IMX8MQ_SYS2_PLL1_OUT_DIV] = imx_clk_divider("sys2_pll1_out_div", "sys2_pll1_out", base + 0x44, 19, 6);
- clks[IMX8MQ_SYS3_PLL1_OUT_DIV] = imx_clk_divider("sys3_pll1_out_div", "sys3_pll1_out", base + 0x50, 19, 6);
- clks[IMX8MQ_DRAM_PLL1_OUT_DIV] = imx_clk_divider("dram_pll1_out_div", "dram_pll1_out", base + 0x68, 19, 6);
- clks[IMX8MQ_SYS1_PLL2_DIV] = imx_clk_divider("sys1_pll2_div", "sys1_pll2", base + 0x38, 1, 6);
- clks[IMX8MQ_SYS2_PLL2_DIV] = imx_clk_divider("sys2_pll2_div", "sys2_pll2", base + 0x44, 1, 6);
- clks[IMX8MQ_SYS3_PLL2_DIV] = imx_clk_divider("sys3_pll2_div", "sys3_pll2", base + 0x50, 1, 6);
- clks[IMX8MQ_DRAM_PLL2_DIV] = imx_clk_divider("dram_pll2_div", "dram_pll2", base + 0x68, 1, 6);
/* PLL bypass out */
- clks[IMX8MQ_ARM_PLL_BYPASS] = imx_clk_mux("arm_pll_bypass", base + 0x28, 14, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels));
+ clks[IMX8MQ_ARM_PLL_BYPASS] = imx_clk_mux_flags("arm_pll_bypass", base + 0x28, 14, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
clks[IMX8MQ_GPU_PLL_BYPASS] = imx_clk_mux("gpu_pll_bypass", base + 0x18, 14, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels));
clks[IMX8MQ_VPU_PLL_BYPASS] = imx_clk_mux("vpu_pll_bypass", base + 0x20, 14, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels));
clks[IMX8MQ_AUDIO_PLL1_BYPASS] = imx_clk_mux("audio_pll1_bypass", base + 0x0, 14, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels));
clks[IMX8MQ_AUDIO_PLL2_BYPASS] = imx_clk_mux("audio_pll2_bypass", base + 0x8, 14, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels));
clks[IMX8MQ_VIDEO_PLL1_BYPASS] = imx_clk_mux("video_pll1_bypass", base + 0x10, 14, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels));
- clks[IMX8MQ_SYS1_PLL1_OUT] = imx_clk_mux("sys1_pll1_out", base + 0x30, 5, 1, sys1_pll1_out_sels, ARRAY_SIZE(sys1_pll1_out_sels));
- clks[IMX8MQ_SYS2_PLL1_OUT] = imx_clk_mux("sys2_pll1_out", base + 0x3c, 5, 1, sys2_pll1_out_sels, ARRAY_SIZE(sys2_pll1_out_sels));
- clks[IMX8MQ_SYS3_PLL1_OUT] = imx_clk_mux("sys3_pll1_out", base + 0x48, 5, 1, sys3_pll1_out_sels, ARRAY_SIZE(sys3_pll1_out_sels));
- clks[IMX8MQ_DRAM_PLL1_OUT] = imx_clk_mux("dram_pll1_out", base + 0x60, 5, 1, dram_pll1_out_sels, ARRAY_SIZE(dram_pll1_out_sels));
- clks[IMX8MQ_SYS1_PLL2_OUT] = imx_clk_mux("sys1_pll2_out", base + 0x30, 4, 1, sys1_pll2_out_sels, ARRAY_SIZE(sys1_pll2_out_sels));
- clks[IMX8MQ_SYS2_PLL2_OUT] = imx_clk_mux("sys2_pll2_out", base + 0x3c, 4, 1, sys2_pll2_out_sels, ARRAY_SIZE(sys2_pll2_out_sels));
- clks[IMX8MQ_SYS3_PLL2_OUT] = imx_clk_mux("sys3_pll2_out", base + 0x48, 4, 1, sys3_pll2_out_sels, ARRAY_SIZE(sys3_pll2_out_sels));
- clks[IMX8MQ_DRAM_PLL2_OUT] = imx_clk_mux("dram_pll2_out", base + 0x60, 4, 1, dram_pll2_out_sels, ARRAY_SIZE(dram_pll2_out_sels));
-
/* PLL OUT GATE */
clks[IMX8MQ_ARM_PLL_OUT] = imx_clk_gate("arm_pll_out", "arm_pll_bypass", base + 0x28, 21);
clks[IMX8MQ_GPU_PLL_OUT] = imx_clk_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x18, 21);
@@ -363,11 +331,11 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
clks[IMX8MQ_AUDIO_PLL1_OUT] = imx_clk_gate("audio_pll1_out", "audio_pll1_bypass", base + 0x0, 21);
clks[IMX8MQ_AUDIO_PLL2_OUT] = imx_clk_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x8, 21);
clks[IMX8MQ_VIDEO_PLL1_OUT] = imx_clk_gate("video_pll1_out", "video_pll1_bypass", base + 0x10, 21);
- clks[IMX8MQ_SYS1_PLL_OUT] = imx_clk_gate("sys1_pll_out", "sys1_pll2_out", base + 0x30, 9);
- clks[IMX8MQ_SYS2_PLL_OUT] = imx_clk_gate("sys2_pll_out", "sys2_pll2_out", base + 0x3c, 9);
- clks[IMX8MQ_SYS3_PLL_OUT] = imx_clk_gate("sys3_pll_out", "sys3_pll2_out", base + 0x48, 9);
- clks[IMX8MQ_DRAM_PLL_OUT] = imx_clk_gate("dram_pll_out", "dram_pll2_out", base + 0x60, 9);
+ clks[IMX8MQ_SYS1_PLL_OUT] = imx_clk_sccg_pll("sys1_pll_out", sys1_pll_out_sels, ARRAY_SIZE(sys1_pll_out_sels), 0, 0, 0, base + 0x30, CLK_IS_CRITICAL);
+ clks[IMX8MQ_SYS2_PLL_OUT] = imx_clk_sccg_pll("sys2_pll_out", sys2_pll_out_sels, ARRAY_SIZE(sys2_pll_out_sels), 0, 0, 1, base + 0x3c, CLK_IS_CRITICAL);
+ clks[IMX8MQ_SYS3_PLL_OUT] = imx_clk_sccg_pll("sys3_pll_out", sys3_pll_out_sels, ARRAY_SIZE(sys3_pll_out_sels), 0, 0, 1, base + 0x48, CLK_IS_CRITICAL);
+ clks[IMX8MQ_DRAM_PLL_OUT] = imx_clk_sccg_pll("dram_pll_out", dram_pll_out_sels, ARRAY_SIZE(dram_pll_out_sels), 0, 0, 0, base + 0x60, CLK_IS_CRITICAL);
/* SYS PLL fixed output */
clks[IMX8MQ_SYS1_PLL_40M] = imx_clk_fixed_factor("sys1_pll_40m", "sys1_pll_out", 1, 20);
clks[IMX8MQ_SYS1_PLL_80M] = imx_clk_fixed_factor("sys1_pll_80m", "sys1_pll_out", 1, 10);
@@ -396,15 +364,19 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
/* CORE */
clks[IMX8MQ_CLK_A53_SRC] = imx_clk_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mq_a53_sels, ARRAY_SIZE(imx8mq_a53_sels));
+ clks[IMX8MQ_CLK_M4_SRC] = imx_clk_mux2("arm_m4_src", base + 0x8080, 24, 3, imx8mq_arm_m4_sels, ARRAY_SIZE(imx8mq_arm_m4_sels));
clks[IMX8MQ_CLK_VPU_SRC] = imx_clk_mux2("vpu_src", base + 0x8100, 24, 3, imx8mq_vpu_sels, ARRAY_SIZE(imx8mq_vpu_sels));
clks[IMX8MQ_CLK_GPU_CORE_SRC] = imx_clk_mux2("gpu_core_src", base + 0x8180, 24, 3, imx8mq_gpu_core_sels, ARRAY_SIZE(imx8mq_gpu_core_sels));
clks[IMX8MQ_CLK_GPU_SHADER_SRC] = imx_clk_mux2("gpu_shader_src", base + 0x8200, 24, 3, imx8mq_gpu_shader_sels, ARRAY_SIZE(imx8mq_gpu_shader_sels));
+
clks[IMX8MQ_CLK_A53_CG] = imx_clk_gate3_flags("arm_a53_cg", "arm_a53_src", base + 0x8000, 28, CLK_IS_CRITICAL);
+ clks[IMX8MQ_CLK_M4_CG] = imx_clk_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28);
clks[IMX8MQ_CLK_VPU_CG] = imx_clk_gate3("vpu_cg", "vpu_src", base + 0x8100, 28);
clks[IMX8MQ_CLK_GPU_CORE_CG] = imx_clk_gate3("gpu_core_cg", "gpu_core_src", base + 0x8180, 28);
clks[IMX8MQ_CLK_GPU_SHADER_CG] = imx_clk_gate3("gpu_shader_cg", "gpu_shader_src", base + 0x8200, 28);
clks[IMX8MQ_CLK_A53_DIV] = imx_clk_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
+ clks[IMX8MQ_CLK_M4_DIV] = imx_clk_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3);
clks[IMX8MQ_CLK_VPU_DIV] = imx_clk_divider2("vpu_div", "vpu_cg", base + 0x8100, 0, 3);
clks[IMX8MQ_CLK_GPU_CORE_DIV] = imx_clk_divider2("gpu_core_div", "gpu_core_cg", base + 0x8180, 0, 3);
clks[IMX8MQ_CLK_GPU_SHADER_DIV] = imx_clk_divider2("gpu_shader_div", "gpu_shader_cg", base + 0x8200, 0, 3);
@@ -479,6 +451,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
clks[IMX8MQ_CLK_GPT1] = imx8m_clk_composite("gpt1", imx8mq_gpt1_sels, base + 0xb580);
clks[IMX8MQ_CLK_WDOG] = imx8m_clk_composite("wdog", imx8mq_wdog_sels, base + 0xb900);
clks[IMX8MQ_CLK_WRCLK] = imx8m_clk_composite("wrclk", imx8mq_wrclk_sels, base + 0xb980);
+ clks[IMX8MQ_CLK_CLKO1] = imx8m_clk_composite("clko1", imx8mq_clko1_sels, base + 0xba00);
clks[IMX8MQ_CLK_CLKO2] = imx8m_clk_composite("clko2", imx8mq_clko2_sels, base + 0xba80);
clks[IMX8MQ_CLK_DSI_CORE] = imx8m_clk_composite("dsi_core", imx8mq_dsi_core_sels, base + 0xbb00);
clks[IMX8MQ_CLK_DSI_PHY_REF] = imx8m_clk_composite("dsi_phy_ref", imx8mq_dsi_phy_sels, base + 0xbb80);
@@ -500,6 +473,11 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
clks[IMX8MQ_CLK_ECSPI2_ROOT] = imx_clk_gate4("ecspi2_root_clk", "ecspi2", base + 0x4080, 0);
clks[IMX8MQ_CLK_ECSPI3_ROOT] = imx_clk_gate4("ecspi3_root_clk", "ecspi3", base + 0x4090, 0);
clks[IMX8MQ_CLK_ENET1_ROOT] = imx_clk_gate4("enet1_root_clk", "enet_axi", base + 0x40a0, 0);
+ clks[IMX8MQ_CLK_GPIO1_ROOT] = imx_clk_gate4("gpio1_root_clk", "ipg_root", base + 0x40b0, 0);
+ clks[IMX8MQ_CLK_GPIO2_ROOT] = imx_clk_gate4("gpio2_root_clk", "ipg_root", base + 0x40c0, 0);
+ clks[IMX8MQ_CLK_GPIO3_ROOT] = imx_clk_gate4("gpio3_root_clk", "ipg_root", base + 0x40d0, 0);
+ clks[IMX8MQ_CLK_GPIO4_ROOT] = imx_clk_gate4("gpio4_root_clk", "ipg_root", base + 0x40e0, 0);
+ clks[IMX8MQ_CLK_GPIO5_ROOT] = imx_clk_gate4("gpio5_root_clk", "ipg_root", base + 0x40f0, 0);
clks[IMX8MQ_CLK_GPT1_ROOT] = imx_clk_gate4("gpt1_root_clk", "gpt1", base + 0x4100, 0);
clks[IMX8MQ_CLK_I2C1_ROOT] = imx_clk_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0);
clks[IMX8MQ_CLK_I2C2_ROOT] = imx_clk_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0);
@@ -558,6 +536,12 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
clks[IMX8MQ_GPT_3M_CLK] = imx_clk_fixed_factor("gpt_3m", "osc_25m", 1, 8);
clks[IMX8MQ_CLK_DRAM_ALT_ROOT] = imx_clk_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
+ clks[IMX8MQ_CLK_ARM] = imx_clk_cpu("arm", "arm_a53_div",
+ clks[IMX8MQ_CLK_A53_DIV],
+ clks[IMX8MQ_CLK_A53_SRC],
+ clks[IMX8MQ_ARM_PLL_OUT],
+ clks[IMX8MQ_SYS1_PLL_800M]);
+
for (i = 0; i < IMX8MQ_CLK_END; i++)
if (IS_ERR(clks[i]))
pr_err("i.MX8mq clk %u register failed with %ld\n",
diff --git a/drivers/clk/imx/clk-imx8qxp.c b/drivers/clk/imx/clk-imx8qxp.c
index 83e2ef96d81d..5e2903efc488 100644
--- a/drivers/clk/imx/clk-imx8qxp.c
+++ b/drivers/clk/imx/clk-imx8qxp.c
@@ -138,6 +138,7 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
}
static const struct of_device_id imx8qxp_match[] = {
+ { .compatible = "fsl,scu-clk", },
{ .compatible = "fsl,imx8qxp-clk", },
{ /* sentinel */ }
};
diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
new file mode 100644
index 000000000000..113d71042199
--- /dev/null
+++ b/drivers/clk/imx/clk-pll14xx.c
@@ -0,0 +1,392 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2017-2018 NXP.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+
+#include "clk.h"
+
+#define GNRL_CTL 0x0
+#define DIV_CTL 0x4
+#define LOCK_STATUS BIT(31)
+#define LOCK_SEL_MASK BIT(29)
+#define CLKE_MASK BIT(11)
+#define RST_MASK BIT(9)
+#define BYPASS_MASK BIT(4)
+#define MDIV_SHIFT 12
+#define MDIV_MASK GENMASK(21, 12)
+#define PDIV_SHIFT 4
+#define PDIV_MASK GENMASK(9, 4)
+#define SDIV_SHIFT 0
+#define SDIV_MASK GENMASK(2, 0)
+#define KDIV_SHIFT 0
+#define KDIV_MASK GENMASK(15, 0)
+
+#define LOCK_TIMEOUT_US 10000
+
+struct clk_pll14xx {
+ struct clk_hw hw;
+ void __iomem *base;
+ enum imx_pll14xx_type type;
+ const struct imx_pll14xx_rate_table *rate_table;
+ int rate_count;
+};
+
+#define to_clk_pll14xx(_hw) container_of(_hw, struct clk_pll14xx, hw)
+
+static const struct imx_pll14xx_rate_table *imx_get_pll_settings(
+ struct clk_pll14xx *pll, unsigned long rate)
+{
+ const struct imx_pll14xx_rate_table *rate_table = pll->rate_table;
+ int i;
+
+ for (i = 0; i < pll->rate_count; i++)
+ if (rate == rate_table[i].rate)
+ return &rate_table[i];
+
+ return NULL;
+}
+
+static long clk_pll14xx_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_pll14xx *pll = to_clk_pll14xx(hw);
+ const struct imx_pll14xx_rate_table *rate_table = pll->rate_table;
+ int i;
+
+ /* Assumming rate_table is in descending order */
+ for (i = 0; i < pll->rate_count; i++)
+ if (rate >= rate_table[i].rate)
+ return rate_table[i].rate;
+
+ /* return minimum supported value */
+ return rate_table[i - 1].rate;
+}
+
+static unsigned long clk_pll1416x_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_pll14xx *pll = to_clk_pll14xx(hw);
+ u32 mdiv, pdiv, sdiv, pll_gnrl, pll_div;
+ u64 fvco = parent_rate;
+
+ pll_gnrl = readl_relaxed(pll->base);
+ pll_div = readl_relaxed(pll->base + 4);
+ mdiv = (pll_div & MDIV_MASK) >> MDIV_SHIFT;
+ pdiv = (pll_div & PDIV_MASK) >> PDIV_SHIFT;
+ sdiv = (pll_div & SDIV_MASK) >> SDIV_SHIFT;
+
+ fvco *= mdiv;
+ do_div(fvco, pdiv << sdiv);
+
+ return fvco;
+}
+
+static unsigned long clk_pll1443x_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_pll14xx *pll = to_clk_pll14xx(hw);
+ u32 mdiv, pdiv, sdiv, pll_gnrl, pll_div_ctl0, pll_div_ctl1;
+ short int kdiv;
+ u64 fvco = parent_rate;
+
+ pll_gnrl = readl_relaxed(pll->base);
+ pll_div_ctl0 = readl_relaxed(pll->base + 4);
+ pll_div_ctl1 = readl_relaxed(pll->base + 8);
+ mdiv = (pll_div_ctl0 & MDIV_MASK) >> MDIV_SHIFT;
+ pdiv = (pll_div_ctl0 & PDIV_MASK) >> PDIV_SHIFT;
+ sdiv = (pll_div_ctl0 & SDIV_MASK) >> SDIV_SHIFT;
+ kdiv = pll_div_ctl1 & KDIV_MASK;
+
+ /* fvco = (m * 65536 + k) * Fin / (p * 65536) */
+ fvco *= (mdiv * 65536 + kdiv);
+ pdiv *= 65536;
+
+ do_div(fvco, pdiv << sdiv);
+
+ return fvco;
+}
+
+static inline bool clk_pll1416x_mp_change(const struct imx_pll14xx_rate_table *rate,
+ u32 pll_div)
+{
+ u32 old_mdiv, old_pdiv;
+
+ old_mdiv = (pll_div >> MDIV_SHIFT) & MDIV_MASK;
+ old_pdiv = (pll_div >> PDIV_SHIFT) & PDIV_MASK;
+
+ return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv;
+}
+
+static inline bool clk_pll1443x_mpk_change(const struct imx_pll14xx_rate_table *rate,
+ u32 pll_div_ctl0, u32 pll_div_ctl1)
+{
+ u32 old_mdiv, old_pdiv, old_kdiv;
+
+ old_mdiv = (pll_div_ctl0 >> MDIV_SHIFT) & MDIV_MASK;
+ old_pdiv = (pll_div_ctl0 >> PDIV_SHIFT) & PDIV_MASK;
+ old_kdiv = (pll_div_ctl1 >> KDIV_SHIFT) & KDIV_MASK;
+
+ return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv ||
+ rate->kdiv != old_kdiv;
+}
+
+static inline bool clk_pll1443x_mp_change(const struct imx_pll14xx_rate_table *rate,
+ u32 pll_div_ctl0, u32 pll_div_ctl1)
+{
+ u32 old_mdiv, old_pdiv, old_kdiv;
+
+ old_mdiv = (pll_div_ctl0 >> MDIV_SHIFT) & MDIV_MASK;
+ old_pdiv = (pll_div_ctl0 >> PDIV_SHIFT) & PDIV_MASK;
+ old_kdiv = (pll_div_ctl1 >> KDIV_SHIFT) & KDIV_MASK;
+
+ return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv ||
+ rate->kdiv != old_kdiv;
+}
+
+static int clk_pll14xx_wait_lock(struct clk_pll14xx *pll)
+{
+ u32 val;
+
+ return readl_poll_timeout(pll->base, val, val & LOCK_TIMEOUT_US, 0,
+ LOCK_TIMEOUT_US);
+}
+
+static int clk_pll1416x_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct clk_pll14xx *pll = to_clk_pll14xx(hw);
+ const struct imx_pll14xx_rate_table *rate;
+ u32 tmp, div_val;
+ int ret;
+
+ rate = imx_get_pll_settings(pll, drate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+ drate, clk_hw_get_name(hw));
+ return -EINVAL;
+ }
+
+ tmp = readl_relaxed(pll->base + 4);
+
+ if (!clk_pll1416x_mp_change(rate, tmp)) {
+ tmp &= ~(SDIV_MASK) << SDIV_SHIFT;
+ tmp |= rate->sdiv << SDIV_SHIFT;
+ writel_relaxed(tmp, pll->base + 4);
+
+ return 0;
+ }
+
+ /* Bypass clock and set lock to pll output lock */
+ tmp = readl_relaxed(pll->base);
+ tmp |= LOCK_SEL_MASK;
+ writel_relaxed(tmp, pll->base);
+
+ /* Enable RST */
+ tmp &= ~RST_MASK;
+ writel_relaxed(tmp, pll->base);
+
+ div_val = (rate->mdiv << MDIV_SHIFT) | (rate->pdiv << PDIV_SHIFT) |
+ (rate->sdiv << SDIV_SHIFT);
+ writel_relaxed(div_val, pll->base + 0x4);
+
+ /*
+ * According to SPEC, t3 - t2 need to be greater than
+ * 1us and 1/FREF, respectively.
+ * FREF is FIN / Prediv, the prediv is [1, 63], so choose
+ * 3us.
+ */
+ udelay(3);
+
+ /* Disable RST */
+ tmp |= RST_MASK;
+ writel_relaxed(tmp, pll->base);
+
+ /* Wait Lock */
+ ret = clk_pll14xx_wait_lock(pll);
+ if (ret)
+ return ret;
+
+ /* Bypass */
+ tmp &= ~BYPASS_MASK;
+ writel_relaxed(tmp, pll->base);
+
+ return 0;
+}
+
+static int clk_pll1443x_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct clk_pll14xx *pll = to_clk_pll14xx(hw);
+ const struct imx_pll14xx_rate_table *rate;
+ u32 tmp, div_val;
+ int ret;
+
+ rate = imx_get_pll_settings(pll, drate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+ drate, clk_hw_get_name(hw));
+ return -EINVAL;
+ }
+
+ tmp = readl_relaxed(pll->base + 4);
+ div_val = readl_relaxed(pll->base + 8);
+
+ if (!clk_pll1443x_mpk_change(rate, tmp, div_val)) {
+ tmp &= ~(SDIV_MASK) << SDIV_SHIFT;
+ tmp |= rate->sdiv << SDIV_SHIFT;
+ writel_relaxed(tmp, pll->base + 4);
+
+ return 0;
+ }
+
+ /* Enable RST */
+ tmp = readl_relaxed(pll->base);
+ tmp &= ~RST_MASK;
+ writel_relaxed(tmp, pll->base);
+
+ div_val = (rate->mdiv << MDIV_SHIFT) | (rate->pdiv << PDIV_SHIFT) |
+ (rate->sdiv << SDIV_SHIFT);
+ writel_relaxed(div_val, pll->base + 0x4);
+ writel_relaxed(rate->kdiv << KDIV_SHIFT, pll->base + 0x8);
+
+ /*
+ * According to SPEC, t3 - t2 need to be greater than
+ * 1us and 1/FREF, respectively.
+ * FREF is FIN / Prediv, the prediv is [1, 63], so choose
+ * 3us.
+ */
+ udelay(3);
+
+ /* Disable RST */
+ tmp |= RST_MASK;
+ writel_relaxed(tmp, pll->base);
+
+ /* Wait Lock*/
+ ret = clk_pll14xx_wait_lock(pll);
+ if (ret)
+ return ret;
+
+ /* Bypass */
+ tmp &= ~BYPASS_MASK;
+ writel_relaxed(tmp, pll->base);
+
+ return 0;
+}
+
+static int clk_pll14xx_prepare(struct clk_hw *hw)
+{
+ struct clk_pll14xx *pll = to_clk_pll14xx(hw);
+ u32 val;
+
+ /*
+ * RESETB = 1 from 0, PLL starts its normal
+ * operation after lock time
+ */
+ val = readl_relaxed(pll->base + GNRL_CTL);
+ val |= RST_MASK;
+ writel_relaxed(val, pll->base + GNRL_CTL);
+
+ return clk_pll14xx_wait_lock(pll);
+}
+
+static int clk_pll14xx_is_prepared(struct clk_hw *hw)
+{
+ struct clk_pll14xx *pll = to_clk_pll14xx(hw);
+ u32 val;
+
+ val = readl_relaxed(pll->base + GNRL_CTL);
+
+ return (val & RST_MASK) ? 1 : 0;
+}
+
+static void clk_pll14xx_unprepare(struct clk_hw *hw)
+{
+ struct clk_pll14xx *pll = to_clk_pll14xx(hw);
+ u32 val;
+
+ /*
+ * Set RST to 0, power down mode is enabled and
+ * every digital block is reset
+ */
+ val = readl_relaxed(pll->base + GNRL_CTL);
+ val &= ~RST_MASK;
+ writel_relaxed(val, pll->base + GNRL_CTL);
+}
+
+static const struct clk_ops clk_pll1416x_ops = {
+ .prepare = clk_pll14xx_prepare,
+ .unprepare = clk_pll14xx_unprepare,
+ .is_prepared = clk_pll14xx_is_prepared,
+ .recalc_rate = clk_pll1416x_recalc_rate,
+ .round_rate = clk_pll14xx_round_rate,
+ .set_rate = clk_pll1416x_set_rate,
+};
+
+static const struct clk_ops clk_pll1416x_min_ops = {
+ .recalc_rate = clk_pll1416x_recalc_rate,
+};
+
+static const struct clk_ops clk_pll1443x_ops = {
+ .prepare = clk_pll14xx_prepare,
+ .unprepare = clk_pll14xx_unprepare,
+ .is_prepared = clk_pll14xx_is_prepared,
+ .recalc_rate = clk_pll1443x_recalc_rate,
+ .round_rate = clk_pll14xx_round_rate,
+ .set_rate = clk_pll1443x_set_rate,
+};
+
+struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
+ void __iomem *base,
+ const struct imx_pll14xx_clk *pll_clk)
+{
+ struct clk_pll14xx *pll;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.flags = pll_clk->flags;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ switch (pll_clk->type) {
+ case PLL_1416X:
+ if (!pll_clk->rate_table)
+ init.ops = &clk_pll1416x_min_ops;
+ else
+ init.ops = &clk_pll1416x_ops;
+ break;
+ case PLL_1443X:
+ init.ops = &clk_pll1443x_ops;
+ break;
+ default:
+ pr_err("%s: Unknown pll type for pll clk %s\n",
+ __func__, name);
+ };
+
+ pll->base = base;
+ pll->hw.init = &init;
+ pll->type = pll_clk->type;
+ pll->rate_table = pll_clk->rate_table;
+ pll->rate_count = pll_clk->rate_count;
+
+ clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register pll %s %lu\n",
+ __func__, name, PTR_ERR(clk));
+ kfree(pll);
+ }
+
+ return clk;
+}
diff --git a/drivers/clk/imx/clk-sccg-pll.c b/drivers/clk/imx/clk-sccg-pll.c
index ee7752bace89..9dfd03a95557 100644
--- a/drivers/clk/imx/clk-sccg-pll.c
+++ b/drivers/clk/imx/clk-sccg-pll.c
@@ -25,87 +25,292 @@
#define PLL_DIVF2_MASK GENMASK(12, 7)
#define PLL_DIVR1_MASK GENMASK(27, 25)
#define PLL_DIVR2_MASK GENMASK(24, 19)
+#define PLL_DIVQ_MASK GENMASK(6, 1)
#define PLL_REF_MASK GENMASK(2, 0)
#define PLL_LOCK_MASK BIT(31)
#define PLL_PD_MASK BIT(7)
-#define OSC_25M 25000000
-#define OSC_27M 27000000
+/* These are the specification limits for the SSCG PLL */
+#define PLL_REF_MIN_FREQ 25000000UL
+#define PLL_REF_MAX_FREQ 235000000UL
-#define PLL_SCCG_LOCK_TIMEOUT 70
+#define PLL_STAGE1_MIN_FREQ 1600000000UL
+#define PLL_STAGE1_MAX_FREQ 2400000000UL
+
+#define PLL_STAGE1_REF_MIN_FREQ 25000000UL
+#define PLL_STAGE1_REF_MAX_FREQ 54000000UL
+
+#define PLL_STAGE2_MIN_FREQ 1200000000UL
+#define PLL_STAGE2_MAX_FREQ 2400000000UL
+
+#define PLL_STAGE2_REF_MIN_FREQ 54000000UL
+#define PLL_STAGE2_REF_MAX_FREQ 75000000UL
+
+#define PLL_OUT_MIN_FREQ 20000000UL
+#define PLL_OUT_MAX_FREQ 1200000000UL
+
+#define PLL_DIVR1_MAX 7
+#define PLL_DIVR2_MAX 63
+#define PLL_DIVF1_MAX 63
+#define PLL_DIVF2_MAX 63
+#define PLL_DIVQ_MAX 63
+
+#define PLL_BYPASS_NONE 0x0
+#define PLL_BYPASS1 0x2
+#define PLL_BYPASS2 0x1
+
+#define SSCG_PLL_BYPASS1_MASK BIT(5)
+#define SSCG_PLL_BYPASS2_MASK BIT(4)
+#define SSCG_PLL_BYPASS_MASK GENMASK(5, 4)
+
+#define PLL_SCCG_LOCK_TIMEOUT 70
+
+struct clk_sccg_pll_setup {
+ int divr1, divf1;
+ int divr2, divf2;
+ int divq;
+ int bypass;
+
+ uint64_t vco1;
+ uint64_t vco2;
+ uint64_t fout;
+ uint64_t ref;
+ uint64_t ref_div1;
+ uint64_t ref_div2;
+ uint64_t fout_request;
+ int fout_error;
+};
struct clk_sccg_pll {
struct clk_hw hw;
- void __iomem *base;
+ const struct clk_ops ops;
+
+ void __iomem *base;
+
+ struct clk_sccg_pll_setup setup;
+
+ u8 parent;
+ u8 bypass1;
+ u8 bypass2;
};
#define to_clk_sccg_pll(_hw) container_of(_hw, struct clk_sccg_pll, hw)
-static int clk_pll_wait_lock(struct clk_sccg_pll *pll)
+static int clk_sccg_pll_wait_lock(struct clk_sccg_pll *pll)
{
u32 val;
- return readl_poll_timeout(pll->base, val, val & PLL_LOCK_MASK, 0,
- PLL_SCCG_LOCK_TIMEOUT);
+ val = readl_relaxed(pll->base + PLL_CFG0);
+
+ /* don't wait for lock if all plls are bypassed */
+ if (!(val & SSCG_PLL_BYPASS2_MASK))
+ return readl_poll_timeout(pll->base, val, val & PLL_LOCK_MASK,
+ 0, PLL_SCCG_LOCK_TIMEOUT);
+
+ return 0;
}
-static int clk_pll1_is_prepared(struct clk_hw *hw)
+static int clk_sccg_pll2_check_match(struct clk_sccg_pll_setup *setup,
+ struct clk_sccg_pll_setup *temp_setup)
{
- struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
- u32 val;
+ int new_diff = temp_setup->fout - temp_setup->fout_request;
+ int diff = temp_setup->fout_error;
- val = readl_relaxed(pll->base + PLL_CFG0);
- return (val & PLL_PD_MASK) ? 0 : 1;
+ if (abs(diff) > abs(new_diff)) {
+ temp_setup->fout_error = new_diff;
+ memcpy(setup, temp_setup, sizeof(struct clk_sccg_pll_setup));
+
+ if (temp_setup->fout_request == temp_setup->fout)
+ return 0;
+ }
+ return -1;
}
-static unsigned long clk_pll1_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
+static int clk_sccg_divq_lookup(struct clk_sccg_pll_setup *setup,
+ struct clk_sccg_pll_setup *temp_setup)
{
- struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
- u32 val, divf;
+ int ret = -EINVAL;
+
+ for (temp_setup->divq = 0; temp_setup->divq <= PLL_DIVQ_MAX;
+ temp_setup->divq++) {
+ temp_setup->vco2 = temp_setup->vco1;
+ do_div(temp_setup->vco2, temp_setup->divr2 + 1);
+ temp_setup->vco2 *= 2;
+ temp_setup->vco2 *= temp_setup->divf2 + 1;
+ if (temp_setup->vco2 >= PLL_STAGE2_MIN_FREQ &&
+ temp_setup->vco2 <= PLL_STAGE2_MAX_FREQ) {
+ temp_setup->fout = temp_setup->vco2;
+ do_div(temp_setup->fout, 2 * (temp_setup->divq + 1));
+
+ ret = clk_sccg_pll2_check_match(setup, temp_setup);
+ if (!ret) {
+ temp_setup->bypass = PLL_BYPASS1;
+ return ret;
+ }
+ }
+ }
- val = readl_relaxed(pll->base + PLL_CFG2);
- divf = FIELD_GET(PLL_DIVF1_MASK, val);
+ return ret;
+}
+
+static int clk_sccg_divf2_lookup(struct clk_sccg_pll_setup *setup,
+ struct clk_sccg_pll_setup *temp_setup)
+{
+ int ret = -EINVAL;
+
+ for (temp_setup->divf2 = 0; temp_setup->divf2 <= PLL_DIVF2_MAX;
+ temp_setup->divf2++) {
+ ret = clk_sccg_divq_lookup(setup, temp_setup);
+ if (!ret)
+ return ret;
+ }
- return parent_rate * 2 * (divf + 1);
+ return ret;
}
-static long clk_pll1_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_sccg_divr2_lookup(struct clk_sccg_pll_setup *setup,
+ struct clk_sccg_pll_setup *temp_setup)
{
- unsigned long parent_rate = *prate;
- u32 div;
+ int ret = -EINVAL;
+
+ for (temp_setup->divr2 = 0; temp_setup->divr2 <= PLL_DIVR2_MAX;
+ temp_setup->divr2++) {
+ temp_setup->ref_div2 = temp_setup->vco1;
+ do_div(temp_setup->ref_div2, temp_setup->divr2 + 1);
+ if (temp_setup->ref_div2 >= PLL_STAGE2_REF_MIN_FREQ &&
+ temp_setup->ref_div2 <= PLL_STAGE2_REF_MAX_FREQ) {
+ ret = clk_sccg_divf2_lookup(setup, temp_setup);
+ if (!ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int clk_sccg_pll2_find_setup(struct clk_sccg_pll_setup *setup,
+ struct clk_sccg_pll_setup *temp_setup,
+ uint64_t ref)
+{
+
+ int ret = -EINVAL;
- if (!parent_rate)
- return 0;
+ if (ref < PLL_STAGE1_MIN_FREQ || ref > PLL_STAGE1_MAX_FREQ)
+ return ret;
- div = rate / (parent_rate * 2);
+ temp_setup->vco1 = ref;
- return parent_rate * div * 2;
+ ret = clk_sccg_divr2_lookup(setup, temp_setup);
+ return ret;
}
-static int clk_pll1_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
+static int clk_sccg_divf1_lookup(struct clk_sccg_pll_setup *setup,
+ struct clk_sccg_pll_setup *temp_setup)
{
- struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
- u32 val;
- u32 divf;
+ int ret = -EINVAL;
- if (!parent_rate)
- return -EINVAL;
+ for (temp_setup->divf1 = 0; temp_setup->divf1 <= PLL_DIVF1_MAX;
+ temp_setup->divf1++) {
+ uint64_t vco1 = temp_setup->ref;
- divf = rate / (parent_rate * 2);
+ do_div(vco1, temp_setup->divr1 + 1);
+ vco1 *= 2;
+ vco1 *= temp_setup->divf1 + 1;
- val = readl_relaxed(pll->base + PLL_CFG2);
- val &= ~PLL_DIVF1_MASK;
- val |= FIELD_PREP(PLL_DIVF1_MASK, divf - 1);
- writel_relaxed(val, pll->base + PLL_CFG2);
+ ret = clk_sccg_pll2_find_setup(setup, temp_setup, vco1);
+ if (!ret) {
+ temp_setup->bypass = PLL_BYPASS_NONE;
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int clk_sccg_divr1_lookup(struct clk_sccg_pll_setup *setup,
+ struct clk_sccg_pll_setup *temp_setup)
+{
+ int ret = -EINVAL;
+
+ for (temp_setup->divr1 = 0; temp_setup->divr1 <= PLL_DIVR1_MAX;
+ temp_setup->divr1++) {
+ temp_setup->ref_div1 = temp_setup->ref;
+ do_div(temp_setup->ref_div1, temp_setup->divr1 + 1);
+ if (temp_setup->ref_div1 >= PLL_STAGE1_REF_MIN_FREQ &&
+ temp_setup->ref_div1 <= PLL_STAGE1_REF_MAX_FREQ) {
+ ret = clk_sccg_divf1_lookup(setup, temp_setup);
+ if (!ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int clk_sccg_pll1_find_setup(struct clk_sccg_pll_setup *setup,
+ struct clk_sccg_pll_setup *temp_setup,
+ uint64_t ref)
+{
+
+ int ret = -EINVAL;
+
+ if (ref < PLL_REF_MIN_FREQ || ref > PLL_REF_MAX_FREQ)
+ return ret;
+
+ temp_setup->ref = ref;
+
+ ret = clk_sccg_divr1_lookup(setup, temp_setup);
+
+ return ret;
+}
+
+static int clk_sccg_pll_find_setup(struct clk_sccg_pll_setup *setup,
+ uint64_t prate,
+ uint64_t rate, int try_bypass)
+{
+ struct clk_sccg_pll_setup temp_setup;
+ int ret = -EINVAL;
+
+ memset(&temp_setup, 0, sizeof(struct clk_sccg_pll_setup));
+ memset(setup, 0, sizeof(struct clk_sccg_pll_setup));
+
+ temp_setup.fout_error = PLL_OUT_MAX_FREQ;
+ temp_setup.fout_request = rate;
+
+ switch (try_bypass) {
- return clk_pll_wait_lock(pll);
+ case PLL_BYPASS2:
+ if (prate == rate) {
+ setup->bypass = PLL_BYPASS2;
+ setup->fout = rate;
+ ret = 0;
+ }
+ break;
+
+ case PLL_BYPASS1:
+ ret = clk_sccg_pll2_find_setup(setup, &temp_setup, prate);
+ break;
+
+ case PLL_BYPASS_NONE:
+ ret = clk_sccg_pll1_find_setup(setup, &temp_setup, prate);
+ break;
+ }
+
+ return ret;
+}
+
+
+static int clk_sccg_pll_is_prepared(struct clk_hw *hw)
+{
+ struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
+
+ u32 val = readl_relaxed(pll->base + PLL_CFG0);
+
+ return (val & PLL_PD_MASK) ? 0 : 1;
}
-static int clk_pll1_prepare(struct clk_hw *hw)
+static int clk_sccg_pll_prepare(struct clk_hw *hw)
{
struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
u32 val;
@@ -114,10 +319,10 @@ static int clk_pll1_prepare(struct clk_hw *hw)
val &= ~PLL_PD_MASK;
writel_relaxed(val, pll->base + PLL_CFG0);
- return clk_pll_wait_lock(pll);
+ return clk_sccg_pll_wait_lock(pll);
}
-static void clk_pll1_unprepare(struct clk_hw *hw)
+static void clk_sccg_pll_unprepare(struct clk_hw *hw)
{
struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
u32 val;
@@ -125,121 +330,208 @@ static void clk_pll1_unprepare(struct clk_hw *hw)
val = readl_relaxed(pll->base + PLL_CFG0);
val |= PLL_PD_MASK;
writel_relaxed(val, pll->base + PLL_CFG0);
-
}
-static unsigned long clk_pll2_recalc_rate(struct clk_hw *hw,
+static unsigned long clk_sccg_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
- u32 val, ref, divr1, divf1, divr2, divf2;
+ u32 val, divr1, divf1, divr2, divf2, divq;
u64 temp64;
- val = readl_relaxed(pll->base + PLL_CFG0);
- switch (FIELD_GET(PLL_REF_MASK, val)) {
- case 0:
- ref = OSC_25M;
- break;
- case 1:
- ref = OSC_27M;
- break;
- default:
- ref = OSC_25M;
- break;
- }
-
val = readl_relaxed(pll->base + PLL_CFG2);
divr1 = FIELD_GET(PLL_DIVR1_MASK, val);
divr2 = FIELD_GET(PLL_DIVR2_MASK, val);
divf1 = FIELD_GET(PLL_DIVF1_MASK, val);
divf2 = FIELD_GET(PLL_DIVF2_MASK, val);
-
- temp64 = ref * 2;
- temp64 *= (divf1 + 1) * (divf2 + 1);
-
- do_div(temp64, (divr1 + 1) * (divr2 + 1));
+ divq = FIELD_GET(PLL_DIVQ_MASK, val);
+
+ temp64 = parent_rate;
+
+ val = clk_readl(pll->base + PLL_CFG0);
+ if (val & SSCG_PLL_BYPASS2_MASK) {
+ temp64 = parent_rate;
+ } else if (val & SSCG_PLL_BYPASS1_MASK) {
+ temp64 *= divf2;
+ do_div(temp64, (divr2 + 1) * (divq + 1));
+ } else {
+ temp64 *= 2;
+ temp64 *= (divf1 + 1) * (divf2 + 1);
+ do_div(temp64, (divr1 + 1) * (divr2 + 1) * (divq + 1));
+ }
return temp64;
}
-static long clk_pll2_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_sccg_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
{
- u32 div;
- unsigned long parent_rate = *prate;
+ struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
+ struct clk_sccg_pll_setup *setup = &pll->setup;
+ u32 val;
- if (!parent_rate)
- return 0;
+ /* set bypass here too since the parent might be the same */
+ val = clk_readl(pll->base + PLL_CFG0);
+ val &= ~SSCG_PLL_BYPASS_MASK;
+ val |= FIELD_PREP(SSCG_PLL_BYPASS_MASK, setup->bypass);
+ clk_writel(val, pll->base + PLL_CFG0);
- div = rate / parent_rate;
+ val = readl_relaxed(pll->base + PLL_CFG2);
+ val &= ~(PLL_DIVF1_MASK | PLL_DIVF2_MASK);
+ val &= ~(PLL_DIVR1_MASK | PLL_DIVR2_MASK | PLL_DIVQ_MASK);
+ val |= FIELD_PREP(PLL_DIVF1_MASK, setup->divf1);
+ val |= FIELD_PREP(PLL_DIVF2_MASK, setup->divf2);
+ val |= FIELD_PREP(PLL_DIVR1_MASK, setup->divr1);
+ val |= FIELD_PREP(PLL_DIVR2_MASK, setup->divr2);
+ val |= FIELD_PREP(PLL_DIVQ_MASK, setup->divq);
+ writel_relaxed(val, pll->base + PLL_CFG2);
- return parent_rate * div;
+ return clk_sccg_pll_wait_lock(pll);
}
-static int clk_pll2_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
+static u8 clk_sccg_pll_get_parent(struct clk_hw *hw)
{
+ struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
u32 val;
- u32 divf;
+ u8 ret = pll->parent;
+
+ val = clk_readl(pll->base + PLL_CFG0);
+ if (val & SSCG_PLL_BYPASS2_MASK)
+ ret = pll->bypass2;
+ else if (val & SSCG_PLL_BYPASS1_MASK)
+ ret = pll->bypass1;
+ return ret;
+}
+
+static int clk_sccg_pll_set_parent(struct clk_hw *hw, u8 index)
+{
struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
+ u32 val;
- if (!parent_rate)
- return -EINVAL;
+ val = clk_readl(pll->base + PLL_CFG0);
+ val &= ~SSCG_PLL_BYPASS_MASK;
+ val |= FIELD_PREP(SSCG_PLL_BYPASS_MASK, pll->setup.bypass);
+ clk_writel(val, pll->base + PLL_CFG0);
- divf = rate / parent_rate;
+ return clk_sccg_pll_wait_lock(pll);
+}
- val = readl_relaxed(pll->base + PLL_CFG2);
- val &= ~PLL_DIVF2_MASK;
- val |= FIELD_PREP(PLL_DIVF2_MASK, divf - 1);
- writel_relaxed(val, pll->base + PLL_CFG2);
+static int __clk_sccg_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req,
+ uint64_t min,
+ uint64_t max,
+ uint64_t rate,
+ int bypass)
+{
+ struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
+ struct clk_sccg_pll_setup *setup = &pll->setup;
+ struct clk_hw *parent_hw = NULL;
+ int bypass_parent_index;
+ int ret = -EINVAL;
+
+ req->max_rate = max;
+ req->min_rate = min;
+
+ switch (bypass) {
+ case PLL_BYPASS2:
+ bypass_parent_index = pll->bypass2;
+ break;
+ case PLL_BYPASS1:
+ bypass_parent_index = pll->bypass1;
+ break;
+ default:
+ bypass_parent_index = pll->parent;
+ break;
+ }
+
+ parent_hw = clk_hw_get_parent_by_index(hw, bypass_parent_index);
+ ret = __clk_determine_rate(parent_hw, req);
+ if (!ret) {
+ ret = clk_sccg_pll_find_setup(setup, req->rate,
+ rate, bypass);
+ }
+
+ req->best_parent_hw = parent_hw;
+ req->best_parent_rate = req->rate;
+ req->rate = setup->fout;
- return clk_pll_wait_lock(pll);
+ return ret;
}
-static const struct clk_ops clk_sccg_pll1_ops = {
- .is_prepared = clk_pll1_is_prepared,
- .recalc_rate = clk_pll1_recalc_rate,
- .round_rate = clk_pll1_round_rate,
- .set_rate = clk_pll1_set_rate,
-};
+static int clk_sccg_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
+ struct clk_sccg_pll_setup *setup = &pll->setup;
+ uint64_t rate = req->rate;
+ uint64_t min = req->min_rate;
+ uint64_t max = req->max_rate;
+ int ret = -EINVAL;
+
+ if (rate < PLL_OUT_MIN_FREQ || rate > PLL_OUT_MAX_FREQ)
+ return ret;
+
+ ret = __clk_sccg_pll_determine_rate(hw, req, req->rate, req->rate,
+ rate, PLL_BYPASS2);
+ if (!ret)
+ return ret;
+
+ ret = __clk_sccg_pll_determine_rate(hw, req, PLL_STAGE1_REF_MIN_FREQ,
+ PLL_STAGE1_REF_MAX_FREQ, rate,
+ PLL_BYPASS1);
+ if (!ret)
+ return ret;
+
+ ret = __clk_sccg_pll_determine_rate(hw, req, PLL_REF_MIN_FREQ,
+ PLL_REF_MAX_FREQ, rate,
+ PLL_BYPASS_NONE);
+ if (!ret)
+ return ret;
+
+ if (setup->fout >= min && setup->fout <= max)
+ ret = 0;
+
+ return ret;
+}
-static const struct clk_ops clk_sccg_pll2_ops = {
- .prepare = clk_pll1_prepare,
- .unprepare = clk_pll1_unprepare,
- .recalc_rate = clk_pll2_recalc_rate,
- .round_rate = clk_pll2_round_rate,
- .set_rate = clk_pll2_set_rate,
+static const struct clk_ops clk_sccg_pll_ops = {
+ .prepare = clk_sccg_pll_prepare,
+ .unprepare = clk_sccg_pll_unprepare,
+ .is_prepared = clk_sccg_pll_is_prepared,
+ .recalc_rate = clk_sccg_pll_recalc_rate,
+ .set_rate = clk_sccg_pll_set_rate,
+ .set_parent = clk_sccg_pll_set_parent,
+ .get_parent = clk_sccg_pll_get_parent,
+ .determine_rate = clk_sccg_pll_determine_rate,
};
struct clk *imx_clk_sccg_pll(const char *name,
- const char *parent_name,
+ const char * const *parent_names,
+ u8 num_parents,
+ u8 parent, u8 bypass1, u8 bypass2,
void __iomem *base,
- enum imx_sccg_pll_type pll_type)
+ unsigned long flags)
{
struct clk_sccg_pll *pll;
struct clk_init_data init;
struct clk_hw *hw;
int ret;
- switch (pll_type) {
- case SCCG_PLL1:
- init.ops = &clk_sccg_pll1_ops;
- break;
- case SCCG_PLL2:
- init.ops = &clk_sccg_pll2_ops;
- break;
- default:
- return ERR_PTR(-EINVAL);
- }
-
pll = kzalloc(sizeof(*pll), GFP_KERNEL);
if (!pll)
return ERR_PTR(-ENOMEM);
+ pll->parent = parent;
+ pll->bypass1 = bypass1;
+ pll->bypass2 = bypass2;
+
+ pll->base = base;
init.name = name;
- init.flags = 0;
- init.parent_names = &parent_name;
- init.num_parents = 1;
+ init.ops = &clk_sccg_pll_ops;
+
+ init.flags = flags;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
pll->base = base;
pll->hw.init = &init;
diff --git a/drivers/clk/imx/clk-scu.c b/drivers/clk/imx/clk-scu.c
index 7ccf7edfe11c..fbef740704d0 100644
--- a/drivers/clk/imx/clk-scu.c
+++ b/drivers/clk/imx/clk-scu.c
@@ -4,12 +4,17 @@
* Dong Aisheng <aisheng.dong@nxp.com>
*/
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include <linux/arm-smccc.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/slab.h>
#include "clk-scu.h"
+#define IMX_SIP_CPUFREQ 0xC2000001
+#define IMX_SIP_SET_CPUFREQ 0x00
+
static struct imx_sc_ipc *ccm_ipc_handle;
/*
@@ -66,6 +71,41 @@ struct imx_sc_msg_get_clock_rate {
};
/*
+ * struct imx_sc_msg_get_clock_parent - clock get parent protocol
+ * @hdr: SCU protocol header
+ * @req: get parent request protocol
+ * @resp: get parent response protocol
+ *
+ * This structure describes the SCU protocol of clock get parent
+ */
+struct imx_sc_msg_get_clock_parent {
+ struct imx_sc_rpc_msg hdr;
+ union {
+ struct req_get_clock_parent {
+ __le16 resource;
+ u8 clk;
+ } __packed req;
+ struct resp_get_clock_parent {
+ u8 parent;
+ } resp;
+ } data;
+};
+
+/*
+ * struct imx_sc_msg_set_clock_parent - clock set parent protocol
+ * @hdr: SCU protocol header
+ * @req: set parent request protocol
+ *
+ * This structure describes the SCU protocol of clock set parent
+ */
+struct imx_sc_msg_set_clock_parent {
+ struct imx_sc_rpc_msg hdr;
+ __le16 resource;
+ u8 clk;
+ u8 parent;
+} __packed;
+
+/*
* struct imx_sc_msg_req_clock_enable - clock gate protocol
* @hdr: SCU protocol header
* @resource: clock resource to gate
@@ -145,6 +185,25 @@ static long clk_scu_round_rate(struct clk_hw *hw, unsigned long rate,
return rate;
}
+static int clk_scu_atf_set_cpu_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_scu *clk = to_clk_scu(hw);
+ struct arm_smccc_res res;
+ unsigned long cluster_id;
+
+ if (clk->rsrc_id == IMX_SC_R_A35)
+ cluster_id = 0;
+ else
+ return -EINVAL;
+
+ /* CPU frequency scaling can ONLY be done by ARM-Trusted-Firmware */
+ arm_smccc_smc(IMX_SIP_CPUFREQ, IMX_SIP_SET_CPUFREQ,
+ cluster_id, rate, 0, 0, 0, 0, &res);
+
+ return 0;
+}
+
/*
* clk_scu_set_rate - Set rate for a SCU clock
* @hw: clock to change rate for
@@ -173,6 +232,49 @@ static int clk_scu_set_rate(struct clk_hw *hw, unsigned long rate,
return imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
}
+static u8 clk_scu_get_parent(struct clk_hw *hw)
+{
+ struct clk_scu *clk = to_clk_scu(hw);
+ struct imx_sc_msg_get_clock_parent msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ int ret;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_PM;
+ hdr->func = IMX_SC_PM_FUNC_GET_CLOCK_PARENT;
+ hdr->size = 2;
+
+ msg.data.req.resource = cpu_to_le16(clk->rsrc_id);
+ msg.data.req.clk = clk->clk_type;
+
+ ret = imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
+ if (ret) {
+ pr_err("%s: failed to get clock parent %d\n",
+ clk_hw_get_name(hw), ret);
+ return 0;
+ }
+
+ return msg.data.resp.parent;
+}
+
+static int clk_scu_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_scu *clk = to_clk_scu(hw);
+ struct imx_sc_msg_set_clock_parent msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_PM;
+ hdr->func = IMX_SC_PM_FUNC_SET_CLOCK_PARENT;
+ hdr->size = 2;
+
+ msg.resource = cpu_to_le16(clk->rsrc_id);
+ msg.clk = clk->clk_type;
+ msg.parent = index;
+
+ return imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
+}
+
static int sc_pm_clock_enable(struct imx_sc_ipc *ipc, u16 resource,
u8 clk, bool enable, bool autog)
{
@@ -228,11 +330,22 @@ static const struct clk_ops clk_scu_ops = {
.recalc_rate = clk_scu_recalc_rate,
.round_rate = clk_scu_round_rate,
.set_rate = clk_scu_set_rate,
+ .get_parent = clk_scu_get_parent,
+ .set_parent = clk_scu_set_parent,
+ .prepare = clk_scu_prepare,
+ .unprepare = clk_scu_unprepare,
+};
+
+static const struct clk_ops clk_scu_cpu_ops = {
+ .recalc_rate = clk_scu_recalc_rate,
+ .round_rate = clk_scu_round_rate,
+ .set_rate = clk_scu_atf_set_cpu_rate,
.prepare = clk_scu_prepare,
.unprepare = clk_scu_unprepare,
};
-struct clk_hw *imx_clk_scu(const char *name, u32 rsrc_id, u8 clk_type)
+struct clk_hw *__imx_clk_scu(const char *name, const char * const *parents,
+ int num_parents, u32 rsrc_id, u8 clk_type)
{
struct clk_init_data init;
struct clk_scu *clk;
@@ -248,7 +361,13 @@ struct clk_hw *imx_clk_scu(const char *name, u32 rsrc_id, u8 clk_type)
init.name = name;
init.ops = &clk_scu_ops;
- init.num_parents = 0;
+ if (rsrc_id == IMX_SC_R_A35)
+ init.ops = &clk_scu_cpu_ops;
+ else
+ init.ops = &clk_scu_ops;
+ init.parent_names = parents;
+ init.num_parents = num_parents;
+
/*
* Note on MX8, the clocks are tightly coupled with power domain
* that once the power domain is off, the clock status may be
diff --git a/drivers/clk/imx/clk-scu.h b/drivers/clk/imx/clk-scu.h
index 52c1746ec988..2bcfaf06a458 100644
--- a/drivers/clk/imx/clk-scu.h
+++ b/drivers/clk/imx/clk-scu.h
@@ -10,7 +10,21 @@
#include <linux/firmware/imx/sci.h>
int imx_clk_scu_init(void);
-struct clk_hw *imx_clk_scu(const char *name, u32 rsrc_id, u8 clk_type);
+
+struct clk_hw *__imx_clk_scu(const char *name, const char * const *parents,
+ int num_parents, u32 rsrc_id, u8 clk_type);
+
+static inline struct clk_hw *imx_clk_scu(const char *name, u32 rsrc_id,
+ u8 clk_type)
+{
+ return __imx_clk_scu(name, NULL, 0, rsrc_id, clk_type);
+}
+
+static inline struct clk_hw *imx_clk_scu2(const char *name, const char * const *parents,
+ int num_parents, u32 rsrc_id, u8 clk_type)
+{
+ return __imx_clk_scu(name, parents, num_parents, rsrc_id, clk_type);
+}
struct clk_hw *imx_clk_lpcg_scu(const char *name, const char *parent_name,
unsigned long flags, void __iomem *reg,
diff --git a/drivers/clk/imx/clk-vf610.c b/drivers/clk/imx/clk-vf610.c
index 6dae54325a91..a334667c450a 100644
--- a/drivers/clk/imx/clk-vf610.c
+++ b/drivers/clk/imx/clk-vf610.c
@@ -203,6 +203,7 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
np = of_find_compatible_node(NULL, NULL, "fsl,vf610-anatop");
anatop_base = of_iomap(np, 0);
BUG_ON(!anatop_base);
+ of_node_put(np);
np = ccm_node;
ccm_base = of_iomap(np, 0);
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index 028312de21b8..5748ec8673e4 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -27,6 +27,30 @@ enum imx_sccg_pll_type {
SCCG_PLL2,
};
+enum imx_pll14xx_type {
+ PLL_1416X,
+ PLL_1443X,
+};
+
+/* NOTE: Rate table should be kept sorted in descending order. */
+struct imx_pll14xx_rate_table {
+ unsigned int rate;
+ unsigned int pdiv;
+ unsigned int mdiv;
+ unsigned int sdiv;
+ unsigned int kdiv;
+};
+
+struct imx_pll14xx_clk {
+ enum imx_pll14xx_type type;
+ const struct imx_pll14xx_rate_table *rate_table;
+ int rate_count;
+ int flags;
+};
+
+struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
+ void __iomem *base, const struct imx_pll14xx_clk *pll_clk);
+
struct clk *imx_clk_pllv1(enum imx_pllv1_type type, const char *name,
const char *parent, void __iomem *base);
@@ -36,9 +60,12 @@ struct clk *imx_clk_pllv2(const char *name, const char *parent,
struct clk *imx_clk_frac_pll(const char *name, const char *parent_name,
void __iomem *base);
-struct clk *imx_clk_sccg_pll(const char *name, const char *parent_name,
- void __iomem *base,
- enum imx_sccg_pll_type pll_type);
+struct clk *imx_clk_sccg_pll(const char *name,
+ const char * const *parent_names,
+ u8 num_parents,
+ u8 parent, u8 bypass1, u8 bypass2,
+ void __iomem *base,
+ unsigned long flags);
enum imx_pllv3_type {
IMX_PLLV3_GENERIC,
@@ -329,7 +356,8 @@ static inline struct clk *imx_clk_mux_flags(const char *name,
}
static inline struct clk *imx_clk_mux2_flags(const char *name,
- void __iomem *reg, u8 shift, u8 width, const char **parents,
+ void __iomem *reg, u8 shift, u8 width,
+ const char * const *parents,
int num_parents, unsigned long flags)
{
return clk_register_mux(NULL, name, parents, num_parents,
@@ -354,7 +382,7 @@ struct clk *imx_clk_cpu(const char *name, const char *parent_name,
struct clk *step);
struct clk *imx8m_clk_composite_flags(const char *name,
- const char **parent_names,
+ const char * const *parent_names,
int num_parents, void __iomem *reg,
unsigned long flags);
diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c
index 5ef7d9ba2195..510b685212d3 100644
--- a/drivers/clk/ingenic/cgu.c
+++ b/drivers/clk/ingenic/cgu.c
@@ -83,7 +83,7 @@ ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
const struct ingenic_cgu_clk_info *clk_info;
const struct ingenic_cgu_pll_info *pll_info;
unsigned m, n, od_enc, od;
- bool bypass, enable;
+ bool bypass;
unsigned long flags;
u32 ctl;
@@ -103,7 +103,6 @@ ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
od_enc &= GENMASK(pll_info->od_bits - 1, 0);
bypass = !pll_info->no_bypass_bit &&
!!(ctl & BIT(pll_info->bypass_bit));
- enable = !!(ctl & BIT(pll_info->enable_bit));
if (bypass)
return parent_rate;
@@ -426,16 +425,16 @@ ingenic_clk_round_rate(struct clk_hw *hw, unsigned long req_rate,
struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
struct ingenic_cgu *cgu = ingenic_clk->cgu;
const struct ingenic_cgu_clk_info *clk_info;
- long rate = *parent_rate;
+ unsigned int div = 1;
clk_info = &cgu->clock_info[ingenic_clk->idx];
if (clk_info->type & CGU_CLK_DIV)
- rate /= ingenic_clk_calc_div(clk_info, *parent_rate, req_rate);
+ div = ingenic_clk_calc_div(clk_info, *parent_rate, req_rate);
else if (clk_info->type & CGU_CLK_FIXDIV)
- rate /= clk_info->fixdiv.div;
+ div = clk_info->fixdiv.div;
- return rate;
+ return DIV_ROUND_UP(*parent_rate, div);
}
static int
@@ -455,7 +454,7 @@ ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate,
if (clk_info->type & CGU_CLK_DIV) {
div = ingenic_clk_calc_div(clk_info, parent_rate, req_rate);
- rate = parent_rate / div;
+ rate = DIV_ROUND_UP(parent_rate, div);
if (rate != req_rate)
return -EINVAL;
diff --git a/drivers/clk/ingenic/cgu.h b/drivers/clk/ingenic/cgu.h
index 502bcbb61b04..e12716d8ce3c 100644
--- a/drivers/clk/ingenic/cgu.h
+++ b/drivers/clk/ingenic/cgu.h
@@ -80,7 +80,7 @@ struct ingenic_cgu_mux_info {
* @reg: offset of the divider control register within the CGU
* @shift: number of bits to left shift the divide value by (ie. the index of
* the lowest bit of the divide value within its control register)
- * @div: number of bits to divide the divider value by (i.e. if the
+ * @div: number to divide the divider value by (i.e. if the
* effective divider value is the value written to the register
* multiplied by some constant)
* @bits: the size of the divide value in bits
diff --git a/drivers/clk/ingenic/jz4740-cgu.c b/drivers/clk/ingenic/jz4740-cgu.c
index 4479c102e899..b86edd328249 100644
--- a/drivers/clk/ingenic/jz4740-cgu.c
+++ b/drivers/clk/ingenic/jz4740-cgu.c
@@ -165,7 +165,7 @@ static const struct ingenic_cgu_clk_info jz4740_cgu_clocks[] = {
.parents = { JZ4740_CLK_EXT, JZ4740_CLK_PLL_HALF, -1, -1 },
.mux = { CGU_REG_CPCCR, 29, 1 },
.div = { CGU_REG_CPCCR, 23, 1, 6, -1, -1, -1 },
- .gate = { CGU_REG_SCR, 6 },
+ .gate = { CGU_REG_SCR, 6, true },
},
/* Gate-only clocks */
diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c
index 934bf0e45e26..85daf826619a 100644
--- a/drivers/clk/mediatek/clk-gate.c
+++ b/drivers/clk/mediatek/clk-gate.c
@@ -157,7 +157,8 @@ struct clk *mtk_clk_register_gate(
int clr_ofs,
int sta_ofs,
u8 bit,
- const struct clk_ops *ops)
+ const struct clk_ops *ops,
+ unsigned long flags)
{
struct mtk_clk_gate *cg;
struct clk *clk;
@@ -168,7 +169,7 @@ struct clk *mtk_clk_register_gate(
return ERR_PTR(-ENOMEM);
init.name = name;
- init.flags = CLK_SET_RATE_PARENT;
+ init.flags = flags | CLK_SET_RATE_PARENT;
init.parent_names = parent_name ? &parent_name : NULL;
init.num_parents = parent_name ? 1 : 0;
init.ops = ops;
diff --git a/drivers/clk/mediatek/clk-gate.h b/drivers/clk/mediatek/clk-gate.h
index 72ef89b3ad7b..9f766dfe1d57 100644
--- a/drivers/clk/mediatek/clk-gate.h
+++ b/drivers/clk/mediatek/clk-gate.h
@@ -47,6 +47,7 @@ struct clk *mtk_clk_register_gate(
int clr_ofs,
int sta_ofs,
u8 bit,
- const struct clk_ops *ops);
+ const struct clk_ops *ops,
+ unsigned long flags);
#endif /* __DRV_CLK_GATE_H */
diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c
index ab6ab07f53e6..905a2316f6a7 100644
--- a/drivers/clk/mediatek/clk-mt2701.c
+++ b/drivers/clk/mediatek/clk-mt2701.c
@@ -535,8 +535,8 @@ static const struct mtk_composite top_muxes[] = {
0x0080, 8, 2, 15),
MUX_GATE(CLK_TOP_DPI0_SEL, "dpi0_sel", dpi0_parents,
0x0080, 16, 3, 23),
- MUX_GATE(CLK_TOP_DPI1_SEL, "dpi1_sel", dpi1_parents,
- 0x0080, 24, 2, 31),
+ MUX_GATE_FLAGS_2(CLK_TOP_DPI1_SEL, "dpi1_sel", dpi1_parents,
+ 0x0080, 24, 2, 31, 0, CLK_MUX_ROUND_CLOSEST),
MUX_GATE(CLK_TOP_TVE_SEL, "tve_sel", tve_parents,
0x0090, 0, 3, 7),
diff --git a/drivers/clk/mediatek/clk-mt2712.c b/drivers/clk/mediatek/clk-mt2712.c
index 991d4093726e..b09cb3d99f66 100644
--- a/drivers/clk/mediatek/clk-mt2712.c
+++ b/drivers/clk/mediatek/clk-mt2712.c
@@ -223,6 +223,8 @@ static const struct mtk_fixed_factor top_divs[] = {
4),
FACTOR(CLK_TOP_APLL1_D3, "apll1_d3", "apll1_ck", 1,
3),
+ FACTOR(CLK_TOP_APLL2_D3, "apll2_d3", "apll2_ck", 1,
+ 3),
};
static const char * const axi_parents[] = {
@@ -594,7 +596,8 @@ static const char * const a1sys_hp_parents[] = {
"apll1_ck",
"apll1_d2",
"apll1_d4",
- "apll1_d8"
+ "apll1_d8",
+ "apll1_d3"
};
static const char * const a2sys_hp_parents[] = {
@@ -602,7 +605,8 @@ static const char * const a2sys_hp_parents[] = {
"apll2_ck",
"apll2_d2",
"apll2_d4",
- "apll2_d8"
+ "apll2_d8",
+ "apll2_d3"
};
static const char * const asm_l_parents[] = {
@@ -1463,7 +1467,6 @@ static struct platform_driver clk_mt2712_drv = {
.probe = clk_mt2712_probe,
.driver = {
.name = "clk-mt2712",
- .owner = THIS_MODULE,
.of_match_table = of_match_clk_mt2712,
},
};
diff --git a/drivers/clk/mediatek/clk-mt6797.c b/drivers/clk/mediatek/clk-mt6797.c
index 5702bc974ed9..c2b46b184b9a 100644
--- a/drivers/clk/mediatek/clk-mt6797.c
+++ b/drivers/clk/mediatek/clk-mt6797.c
@@ -324,6 +324,10 @@ static const char * const anc_md32_parents[] = {
"univpll_d5",
};
+/*
+ * Clock mux ddrphycfg is needed by the DRAM controller. We mark it as
+ * critical as otherwise the system will hang after boot.
+ */
static const struct mtk_composite top_muxes[] = {
MUX(CLK_TOP_MUX_ULPOSC_AXI_CK_MUX_PRE, "ulposc_axi_ck_mux_pre",
ulposc_axi_ck_mux_pre_parents, 0x0040, 3, 1),
@@ -331,8 +335,8 @@ static const struct mtk_composite top_muxes[] = {
ulposc_axi_ck_mux_parents, 0x0040, 2, 1),
MUX(CLK_TOP_MUX_AXI, "axi_sel", axi_parents,
0x0040, 0, 2),
- MUX(CLK_TOP_MUX_DDRPHYCFG, "ddrphycfg_sel", ddrphycfg_parents,
- 0x0040, 16, 2),
+ MUX_FLAGS(CLK_TOP_MUX_DDRPHYCFG, "ddrphycfg_sel", ddrphycfg_parents,
+ 0x0040, 16, 2, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT),
MUX(CLK_TOP_MUX_MM, "mm_sel", mm_parents,
0x0040, 24, 2),
MUX_GATE(CLK_TOP_MUX_PWM, "pwm_sel", pwm_parents, 0x0050, 0, 3, 7),
@@ -424,33 +428,45 @@ static const struct mtk_gate_regs infra2_cg_regs = {
.sta_ofs = 0x00b0,
};
-#define GATE_ICG0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &infra0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
+#define GATE_ICG0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &infra0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
}
-#define GATE_ICG1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &infra1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
+#define GATE_ICG1(_id, _name, _parent, _shift) \
+ GATE_ICG1_FLAGS(_id, _name, _parent, _shift, 0)
+
+#define GATE_ICG1_FLAGS(_id, _name, _parent, _shift, _flags) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &infra1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ .flags = _flags, \
}
-#define GATE_ICG2(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &infra2_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
+#define GATE_ICG2(_id, _name, _parent, _shift) \
+ GATE_ICG2_FLAGS(_id, _name, _parent, _shift, 0)
+
+#define GATE_ICG2_FLAGS(_id, _name, _parent, _shift, _flags) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &infra2_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ .flags = _flags, \
}
+/*
+ * Clock gates dramc and dramc_b are needed by the DRAM controller.
+ * We mark them as critical as otherwise the system will hang after boot.
+ */
static const struct mtk_gate infra_clks[] = {
GATE_ICG0(CLK_INFRA_PMIC_TMR, "infra_pmic_tmr", "ulposc", 0),
GATE_ICG0(CLK_INFRA_PMIC_AP, "infra_pmic_ap", "pmicspi_sel", 1),
@@ -505,7 +521,8 @@ static const struct mtk_gate infra_clks[] = {
GATE_ICG1(CLK_INFRA_CCIF_AP, "infra_ccif_ap", "axi_sel", 23),
GATE_ICG1(CLK_INFRA_AUDIO, "infra_audio", "axi_sel", 25),
GATE_ICG1(CLK_INFRA_CCIF_MD, "infra_ccif_md", "axi_sel", 26),
- GATE_ICG1(CLK_INFRA_DRAMC_F26M, "infra_dramc_f26m", "clk26m", 31),
+ GATE_ICG1_FLAGS(CLK_INFRA_DRAMC_F26M, "infra_dramc_f26m",
+ "clk26m", 31, CLK_IS_CRITICAL),
GATE_ICG2(CLK_INFRA_I2C4, "infra_i2c4", "axi_sel", 0),
GATE_ICG2(CLK_INFRA_I2C_APPM, "infra_i2c_appm", "axi_sel", 1),
GATE_ICG2(CLK_INFRA_I2C_GPUPM, "infra_i2c_gpupm", "axi_sel", 2),
@@ -516,7 +533,8 @@ static const struct mtk_gate infra_clks[] = {
GATE_ICG2(CLK_INFRA_I2C5, "infra_i2c5", "axi_sel", 7),
GATE_ICG2(CLK_INFRA_SYS_CIRQ, "infra_sys_cirq", "axi_sel", 8),
GATE_ICG2(CLK_INFRA_SPI1, "infra_spi1", "spi_sel", 10),
- GATE_ICG2(CLK_INFRA_DRAMC_B_F26M, "infra_dramc_b_f26m", "clk26m", 11),
+ GATE_ICG2_FLAGS(CLK_INFRA_DRAMC_B_F26M, "infra_dramc_b_f26m",
+ "clk26m", 11, CLK_IS_CRITICAL),
GATE_ICG2(CLK_INFRA_ANC_MD32, "infra_anc_md32", "anc_md32_sel", 12),
GATE_ICG2(CLK_INFRA_ANC_MD32_32K, "infra_anc_md32_32k", "clk26m", 13),
GATE_ICG2(CLK_INFRA_DVFS_SPM1, "infra_dvfs_spm1", "axi_sel", 15),
diff --git a/drivers/clk/mediatek/clk-mt8173.c b/drivers/clk/mediatek/clk-mt8173.c
index 96c292c3e440..deedeb3ea33b 100644
--- a/drivers/clk/mediatek/clk-mt8173.c
+++ b/drivers/clk/mediatek/clk-mt8173.c
@@ -533,7 +533,7 @@ static const char * const ca53_parents[] __initconst = {
"univpll"
};
-static const char * const ca57_parents[] __initconst = {
+static const char * const ca72_parents[] __initconst = {
"clk26m",
"armca15pll",
"mainpll",
@@ -542,7 +542,7 @@ static const char * const ca57_parents[] __initconst = {
static const struct mtk_composite cpu_muxes[] __initconst = {
MUX(CLK_INFRA_CA53SEL, "infra_ca53_sel", ca53_parents, 0x0000, 0, 2),
- MUX(CLK_INFRA_CA57SEL, "infra_ca57_sel", ca57_parents, 0x0000, 2, 2),
+ MUX(CLK_INFRA_CA72SEL, "infra_ca72_sel", ca72_parents, 0x0000, 2, 2),
};
static const struct mtk_composite top_muxes[] __initconst = {
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index 9c0ae4278a94..5531dd2e496d 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -130,7 +130,7 @@ int mtk_clk_register_gates(struct device_node *node,
gate->regs->set_ofs,
gate->regs->clr_ofs,
gate->regs->sta_ofs,
- gate->shift, gate->ops);
+ gate->shift, gate->ops, gate->flags);
if (IS_ERR(clk)) {
pr_err("Failed to register clk %s: %ld\n",
@@ -167,7 +167,7 @@ struct clk *mtk_clk_register_composite(const struct mtk_composite *mc,
mux->mask = BIT(mc->mux_width) - 1;
mux->shift = mc->mux_shift;
mux->lock = lock;
-
+ mux->flags = mc->mux_flags;
mux_hw = &mux->hw;
mux_ops = &clk_mux_ops;
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index f83c2bbb677e..fb27b5bf30d9 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -81,15 +81,13 @@ struct mtk_composite {
signed char divider_shift;
signed char divider_width;
+ u8 mux_flags;
+
signed char num_parents;
};
-/*
- * In case the rate change propagation to parent clocks is undesirable,
- * this macro allows to specify the clock flags manually.
- */
-#define MUX_GATE_FLAGS(_id, _name, _parents, _reg, _shift, _width, \
- _gate, _flags) { \
+#define MUX_GATE_FLAGS_2(_id, _name, _parents, _reg, _shift, \
+ _width, _gate, _flags, _muxflags) { \
.id = _id, \
.name = _name, \
.mux_reg = _reg, \
@@ -101,9 +99,19 @@ struct mtk_composite {
.parent_names = _parents, \
.num_parents = ARRAY_SIZE(_parents), \
.flags = _flags, \
+ .mux_flags = _muxflags, \
}
/*
+ * In case the rate change propagation to parent clocks is undesirable,
+ * this macro allows to specify the clock flags manually.
+ */
+#define MUX_GATE_FLAGS(_id, _name, _parents, _reg, _shift, _width, \
+ _gate, _flags) \
+ MUX_GATE_FLAGS_2(_id, _name, _parents, _reg, \
+ _shift, _width, _gate, _flags, 0)
+
+/*
* Unless necessary, all MUX_GATE clocks propagate rate changes to their
* parent clock by default.
*/
@@ -111,7 +119,11 @@ struct mtk_composite {
MUX_GATE_FLAGS(_id, _name, _parents, _reg, _shift, _width, \
_gate, CLK_SET_RATE_PARENT)
-#define MUX(_id, _name, _parents, _reg, _shift, _width) { \
+#define MUX(_id, _name, _parents, _reg, _shift, _width) \
+ MUX_FLAGS(_id, _name, _parents, _reg, \
+ _shift, _width, CLK_SET_RATE_PARENT)
+
+#define MUX_FLAGS(_id, _name, _parents, _reg, _shift, _width, _flags) { \
.id = _id, \
.name = _name, \
.mux_reg = _reg, \
@@ -121,7 +133,7 @@ struct mtk_composite {
.divider_shift = -1, \
.parent_names = _parents, \
.num_parents = ARRAY_SIZE(_parents), \
- .flags = CLK_SET_RATE_PARENT, \
+ .flags = _flags, \
}
#define DIV_GATE(_id, _name, _parent, _gate_reg, _gate_shift, _div_reg, \
@@ -158,6 +170,7 @@ struct mtk_gate {
const struct mtk_gate_regs *regs;
int shift;
const struct clk_ops *ops;
+ unsigned long flags;
};
int mtk_clk_register_gates(struct device_node *node,
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index efaa70f682b4..3858747f5438 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -1,27 +1,52 @@
-config COMMON_CLK_AMLOGIC
- bool
- depends on ARCH_MESON || COMPILE_TEST
- select COMMON_CLK_REGMAP_MESON
+config COMMON_CLK_MESON_INPUT
+ tristate
-config COMMON_CLK_AMLOGIC_AUDIO
- bool
- depends on ARCH_MESON || COMPILE_TEST
- select COMMON_CLK_AMLOGIC
+config COMMON_CLK_MESON_REGMAP
+ tristate
+ select REGMAP
-config COMMON_CLK_MESON_AO
- bool
- depends on OF
- depends on ARCH_MESON || COMPILE_TEST
- select COMMON_CLK_REGMAP_MESON
+config COMMON_CLK_MESON_DUALDIV
+ tristate
+ select COMMON_CLK_MESON_REGMAP
+
+config COMMON_CLK_MESON_MPLL
+ tristate
+ select COMMON_CLK_MESON_REGMAP
+
+config COMMON_CLK_MESON_PHASE
+ tristate
+ select COMMON_CLK_MESON_REGMAP
+
+config COMMON_CLK_MESON_PLL
+ tristate
+ select COMMON_CLK_MESON_REGMAP
+
+config COMMON_CLK_MESON_SCLK_DIV
+ tristate
+ select COMMON_CLK_MESON_REGMAP
+
+config COMMON_CLK_MESON_VID_PLL_DIV
+ tristate
+ select COMMON_CLK_MESON_REGMAP
+
+config COMMON_CLK_MESON_AO_CLKC
+ tristate
+ select COMMON_CLK_MESON_REGMAP
+ select COMMON_CLK_MESON_INPUT
select RESET_CONTROLLER
-config COMMON_CLK_REGMAP_MESON
- bool
- select REGMAP
+config COMMON_CLK_MESON_EE_CLKC
+ tristate
+ select COMMON_CLK_MESON_REGMAP
+ select COMMON_CLK_MESON_INPUT
config COMMON_CLK_MESON8B
bool
- select COMMON_CLK_AMLOGIC
+ depends on ARCH_MESON
+ select COMMON_CLK_MESON_REGMAP
+ select COMMON_CLK_MESON_MPLL
+ select COMMON_CLK_MESON_PLL
+ select MFD_SYSCON
select RESET_CONTROLLER
help
Support for the clock controller on AmLogic S802 (Meson8),
@@ -30,8 +55,14 @@ config COMMON_CLK_MESON8B
config COMMON_CLK_GXBB
bool
- select COMMON_CLK_AMLOGIC
- select COMMON_CLK_MESON_AO
+ depends on ARCH_MESON
+ select COMMON_CLK_MESON_REGMAP
+ select COMMON_CLK_MESON_DUALDIV
+ select COMMON_CLK_MESON_VID_PLL_DIV
+ select COMMON_CLK_MESON_MPLL
+ select COMMON_CLK_MESON_PLL
+ select COMMON_CLK_MESON_AO_CLKC
+ select COMMON_CLK_MESON_EE_CLKC
select MFD_SYSCON
help
Support for the clock controller on AmLogic S905 devices, aka gxbb.
@@ -39,8 +70,13 @@ config COMMON_CLK_GXBB
config COMMON_CLK_AXG
bool
- select COMMON_CLK_AMLOGIC
- select COMMON_CLK_MESON_AO
+ depends on ARCH_MESON
+ select COMMON_CLK_MESON_REGMAP
+ select COMMON_CLK_MESON_DUALDIV
+ select COMMON_CLK_MESON_MPLL
+ select COMMON_CLK_MESON_PLL
+ select COMMON_CLK_MESON_AO_CLKC
+ select COMMON_CLK_MESON_EE_CLKC
select MFD_SYSCON
help
Support for the clock controller on AmLogic A113D devices, aka axg.
@@ -48,9 +84,26 @@ config COMMON_CLK_AXG
config COMMON_CLK_AXG_AUDIO
tristate "Meson AXG Audio Clock Controller Driver"
- depends on COMMON_CLK_AXG
- select COMMON_CLK_AMLOGIC_AUDIO
- select MFD_SYSCON
+ depends on ARCH_MESON
+ select COMMON_CLK_MESON_INPUT
+ select COMMON_CLK_MESON_REGMAP
+ select COMMON_CLK_MESON_PHASE
+ select COMMON_CLK_MESON_SCLK_DIV
+ select REGMAP_MMIO
help
Support for the audio clock controller on AmLogic A113D devices,
aka axg, Say Y if you want audio subsystem to work.
+
+config COMMON_CLK_G12A
+ bool
+ depends on ARCH_MESON
+ select COMMON_CLK_MESON_REGMAP
+ select COMMON_CLK_MESON_DUALDIV
+ select COMMON_CLK_MESON_MPLL
+ select COMMON_CLK_MESON_PLL
+ select COMMON_CLK_MESON_AO_CLKC
+ select COMMON_CLK_MESON_EE_CLKC
+ select MFD_SYSCON
+ help
+ Support for the clock controller on Amlogic S905D2, S905X2 and S905Y2
+ devices, aka g12a. Say Y if you want peripherals to work.
diff --git a/drivers/clk/meson/Makefile b/drivers/clk/meson/Makefile
index a849aa809825..021fc290e749 100644
--- a/drivers/clk/meson/Makefile
+++ b/drivers/clk/meson/Makefile
@@ -1,13 +1,20 @@
-#
-# Makefile for Meson specific clk
-#
+# Amlogic clock drivers
-obj-$(CONFIG_COMMON_CLK_AMLOGIC) += clk-pll.o clk-mpll.o clk-phase.o vid-pll-div.o
-obj-$(CONFIG_COMMON_CLK_AMLOGIC) += clk-input.o
-obj-$(CONFIG_COMMON_CLK_AMLOGIC_AUDIO) += clk-triphase.o sclk-div.o
-obj-$(CONFIG_COMMON_CLK_MESON_AO) += meson-aoclk.o
+obj-$(CONFIG_COMMON_CLK_MESON_AO_CLKC) += meson-aoclk.o
+obj-$(CONFIG_COMMON_CLK_MESON_DUALDIV) += clk-dualdiv.o
+obj-$(CONFIG_COMMON_CLK_MESON_EE_CLKC) += meson-eeclk.o
+obj-$(CONFIG_COMMON_CLK_MESON_INPUT) += clk-input.o
+obj-$(CONFIG_COMMON_CLK_MESON_MPLL) += clk-mpll.o
+obj-$(CONFIG_COMMON_CLK_MESON_PHASE) += clk-phase.o
+obj-$(CONFIG_COMMON_CLK_MESON_PLL) += clk-pll.o
+obj-$(CONFIG_COMMON_CLK_MESON_REGMAP) += clk-regmap.o
+obj-$(CONFIG_COMMON_CLK_MESON_SCLK_DIV) += sclk-div.o
+obj-$(CONFIG_COMMON_CLK_MESON_VID_PLL_DIV) += vid-pll-div.o
+
+# Amlogic Clock controllers
+
+obj-$(CONFIG_COMMON_CLK_AXG) += axg.o axg-aoclk.o
+obj-$(CONFIG_COMMON_CLK_AXG_AUDIO) += axg-audio.o
+obj-$(CONFIG_COMMON_CLK_GXBB) += gxbb.o gxbb-aoclk.o
+obj-$(CONFIG_COMMON_CLK_G12A) += g12a.o g12a-aoclk.o
obj-$(CONFIG_COMMON_CLK_MESON8B) += meson8b.o
-obj-$(CONFIG_COMMON_CLK_GXBB) += gxbb.o gxbb-aoclk.o gxbb-aoclk-32k.o
-obj-$(CONFIG_COMMON_CLK_AXG) += axg.o axg-aoclk.o
-obj-$(CONFIG_COMMON_CLK_AXG_AUDIO) += axg-audio.o
-obj-$(CONFIG_COMMON_CLK_REGMAP_MESON) += clk-regmap.o
diff --git a/drivers/clk/meson/axg-aoclk.c b/drivers/clk/meson/axg-aoclk.c
index 29e088542387..0086f31288eb 100644
--- a/drivers/clk/meson/axg-aoclk.c
+++ b/drivers/clk/meson/axg-aoclk.c
@@ -12,10 +12,27 @@
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/mfd/syscon.h>
-#include "clk-regmap.h"
#include "meson-aoclk.h"
#include "axg-aoclk.h"
+#include "clk-regmap.h"
+#include "clk-dualdiv.h"
+
+#define IN_PREFIX "ao-in-"
+
+/*
+ * AO Configuration Clock registers offsets
+ * Register offsets from the data sheet must be multiplied by 4.
+ */
+#define AO_RTI_PWR_CNTL_REG1 0x0C
+#define AO_RTI_PWR_CNTL_REG0 0x10
+#define AO_RTI_GEN_CNTL_REG0 0x40
+#define AO_OSCIN_CNTL 0x58
+#define AO_CRT_CLK_CNTL1 0x68
+#define AO_SAR_CLK 0x90
+#define AO_RTC_ALT_CLK_CNTL0 0x94
+#define AO_RTC_ALT_CLK_CNTL1 0x98
+
#define AXG_AO_GATE(_name, _bit) \
static struct clk_regmap axg_aoclk_##_name = { \
.data = &(struct clk_regmap_gate_data) { \
@@ -25,7 +42,7 @@ static struct clk_regmap axg_aoclk_##_name = { \
.hw.init = &(struct clk_init_data) { \
.name = "axg_ao_" #_name, \
.ops = &clk_regmap_gate_ops, \
- .parent_names = (const char *[]){ "clk81" }, \
+ .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk" }, \
.num_parents = 1, \
.flags = CLK_IGNORE_UNUSED, \
}, \
@@ -39,17 +56,141 @@ AXG_AO_GATE(uart2, 5);
AXG_AO_GATE(ir_blaster, 6);
AXG_AO_GATE(saradc, 7);
+static struct clk_regmap axg_aoclk_cts_oscin = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = AO_RTI_PWR_CNTL_REG0,
+ .bit_idx = 14,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cts_oscin",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap axg_aoclk_32k_pre = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = AO_RTC_ALT_CLK_CNTL0,
+ .bit_idx = 31,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "axg_ao_32k_pre",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "cts_oscin" },
+ .num_parents = 1,
+ },
+};
+
+static const struct meson_clk_dualdiv_param axg_32k_div_table[] = {
+ {
+ .dual = 1,
+ .n1 = 733,
+ .m1 = 8,
+ .n2 = 732,
+ .m2 = 11,
+ }, {}
+};
+
+static struct clk_regmap axg_aoclk_32k_div = {
+ .data = &(struct meson_clk_dualdiv_data){
+ .n1 = {
+ .reg_off = AO_RTC_ALT_CLK_CNTL0,
+ .shift = 0,
+ .width = 12,
+ },
+ .n2 = {
+ .reg_off = AO_RTC_ALT_CLK_CNTL0,
+ .shift = 12,
+ .width = 12,
+ },
+ .m1 = {
+ .reg_off = AO_RTC_ALT_CLK_CNTL1,
+ .shift = 0,
+ .width = 12,
+ },
+ .m2 = {
+ .reg_off = AO_RTC_ALT_CLK_CNTL1,
+ .shift = 12,
+ .width = 12,
+ },
+ .dual = {
+ .reg_off = AO_RTC_ALT_CLK_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .table = axg_32k_div_table,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "axg_ao_32k_div",
+ .ops = &meson_clk_dualdiv_ops,
+ .parent_names = (const char *[]){ "axg_ao_32k_pre" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap axg_aoclk_32k_sel = {
+ .data = &(struct clk_regmap_mux_data) {
+ .offset = AO_RTC_ALT_CLK_CNTL1,
+ .mask = 0x1,
+ .shift = 24,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "axg_ao_32k_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ "axg_ao_32k_div",
+ "axg_ao_32k_pre" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap axg_aoclk_32k = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = AO_RTC_ALT_CLK_CNTL0,
+ .bit_idx = 30,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "axg_ao_32k",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "axg_ao_32k_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap axg_aoclk_cts_rtc_oscin = {
+ .data = &(struct clk_regmap_mux_data) {
+ .offset = AO_RTI_PWR_CNTL_REG0,
+ .mask = 0x1,
+ .shift = 10,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "axg_ao_cts_rtc_oscin",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ "axg_ao_32k",
+ IN_PREFIX "ext_32k-0" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
static struct clk_regmap axg_aoclk_clk81 = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTI_PWR_CNTL_REG0,
.mask = 0x1,
.shift = 8,
+ .flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "axg_ao_clk81",
.ops = &clk_regmap_mux_ro_ops,
- .parent_names = (const char *[]){ "clk81", "ao_alt_xtal"},
+ .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk",
+ "axg_ao_cts_rtc_oscin"},
.num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -62,7 +203,8 @@ static struct clk_regmap axg_aoclk_saradc_mux = {
.hw.init = &(struct clk_init_data){
.name = "axg_ao_saradc_mux",
.ops = &clk_regmap_mux_ops,
- .parent_names = (const char *[]){ "xtal", "axg_ao_clk81" },
+ .parent_names = (const char *[]){ IN_PREFIX "xtal",
+ "axg_ao_clk81" },
.num_parents = 2,
},
};
@@ -106,17 +248,23 @@ static const unsigned int axg_aoclk_reset[] = {
};
static struct clk_regmap *axg_aoclk_regmap[] = {
- [CLKID_AO_REMOTE] = &axg_aoclk_remote,
- [CLKID_AO_I2C_MASTER] = &axg_aoclk_i2c_master,
- [CLKID_AO_I2C_SLAVE] = &axg_aoclk_i2c_slave,
- [CLKID_AO_UART1] = &axg_aoclk_uart1,
- [CLKID_AO_UART2] = &axg_aoclk_uart2,
- [CLKID_AO_IR_BLASTER] = &axg_aoclk_ir_blaster,
- [CLKID_AO_SAR_ADC] = &axg_aoclk_saradc,
- [CLKID_AO_CLK81] = &axg_aoclk_clk81,
- [CLKID_AO_SAR_ADC_SEL] = &axg_aoclk_saradc_mux,
- [CLKID_AO_SAR_ADC_DIV] = &axg_aoclk_saradc_div,
- [CLKID_AO_SAR_ADC_CLK] = &axg_aoclk_saradc_gate,
+ &axg_aoclk_remote,
+ &axg_aoclk_i2c_master,
+ &axg_aoclk_i2c_slave,
+ &axg_aoclk_uart1,
+ &axg_aoclk_uart2,
+ &axg_aoclk_ir_blaster,
+ &axg_aoclk_saradc,
+ &axg_aoclk_cts_oscin,
+ &axg_aoclk_32k_pre,
+ &axg_aoclk_32k_div,
+ &axg_aoclk_32k_sel,
+ &axg_aoclk_32k,
+ &axg_aoclk_cts_rtc_oscin,
+ &axg_aoclk_clk81,
+ &axg_aoclk_saradc_mux,
+ &axg_aoclk_saradc_div,
+ &axg_aoclk_saradc_gate,
};
static const struct clk_hw_onecell_data axg_aoclk_onecell_data = {
@@ -132,10 +280,22 @@ static const struct clk_hw_onecell_data axg_aoclk_onecell_data = {
[CLKID_AO_SAR_ADC_SEL] = &axg_aoclk_saradc_mux.hw,
[CLKID_AO_SAR_ADC_DIV] = &axg_aoclk_saradc_div.hw,
[CLKID_AO_SAR_ADC_CLK] = &axg_aoclk_saradc_gate.hw,
+ [CLKID_AO_CTS_OSCIN] = &axg_aoclk_cts_oscin.hw,
+ [CLKID_AO_32K_PRE] = &axg_aoclk_32k_pre.hw,
+ [CLKID_AO_32K_DIV] = &axg_aoclk_32k_div.hw,
+ [CLKID_AO_32K_SEL] = &axg_aoclk_32k_sel.hw,
+ [CLKID_AO_32K] = &axg_aoclk_32k.hw,
+ [CLKID_AO_CTS_RTC_OSCIN] = &axg_aoclk_cts_rtc_oscin.hw,
},
.num = NR_CLKS,
};
+static const struct meson_aoclk_input axg_aoclk_inputs[] = {
+ { .name = "xtal", .required = true },
+ { .name = "mpeg-clk", .required = true },
+ { .name = "ext-32k-0", .required = false },
+};
+
static const struct meson_aoclk_data axg_aoclkc_data = {
.reset_reg = AO_RTI_GEN_CNTL_REG0,
.num_reset = ARRAY_SIZE(axg_aoclk_reset),
@@ -143,6 +303,9 @@ static const struct meson_aoclk_data axg_aoclkc_data = {
.num_clks = ARRAY_SIZE(axg_aoclk_regmap),
.clks = axg_aoclk_regmap,
.hw_data = &axg_aoclk_onecell_data,
+ .inputs = axg_aoclk_inputs,
+ .num_inputs = ARRAY_SIZE(axg_aoclk_inputs),
+ .input_prefix = IN_PREFIX,
};
static const struct of_device_id axg_aoclkc_match_table[] = {
diff --git a/drivers/clk/meson/axg-aoclk.h b/drivers/clk/meson/axg-aoclk.h
index 91384d8dd844..3cc27e85170f 100644
--- a/drivers/clk/meson/axg-aoclk.h
+++ b/drivers/clk/meson/axg-aoclk.h
@@ -10,18 +10,7 @@
#ifndef __AXG_AOCLKC_H
#define __AXG_AOCLKC_H
-#define NR_CLKS 11
-/* AO Configuration Clock registers offsets
- * Register offsets from the data sheet must be multiplied by 4.
- */
-#define AO_RTI_PWR_CNTL_REG1 0x0C
-#define AO_RTI_PWR_CNTL_REG0 0x10
-#define AO_RTI_GEN_CNTL_REG0 0x40
-#define AO_OSCIN_CNTL 0x58
-#define AO_CRT_CLK_CNTL1 0x68
-#define AO_SAR_CLK 0x90
-#define AO_RTC_ALT_CLK_CNTL0 0x94
-#define AO_RTC_ALT_CLK_CNTL1 0x98
+#define NR_CLKS 17
#include <dt-bindings/clock/axg-aoclkc.h>
#include <dt-bindings/reset/axg-aoclkc.h>
diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
index 8ac3a2295473..7ab200b6c3bf 100644
--- a/drivers/clk/meson/axg-audio.c
+++ b/drivers/clk/meson/axg-audio.c
@@ -14,8 +14,11 @@
#include <linux/reset.h>
#include <linux/slab.h>
-#include "clkc-audio.h"
#include "axg-audio.h"
+#include "clk-input.h"
+#include "clk-regmap.h"
+#include "clk-phase.h"
+#include "sclk-div.h"
#define AXG_MST_IN_COUNT 8
#define AXG_SLV_SCLK_COUNT 10
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
index 792735d7e46e..7a8ef80e5f2c 100644
--- a/drivers/clk/meson/axg.c
+++ b/drivers/clk/meson/axg.c
@@ -9,16 +9,17 @@
* Author: Qiufang Dai <qiufang.dai@amlogic.com>
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/init.h>
#include <linux/of_device.h>
-#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
-#include <linux/regmap.h>
-#include "clkc.h"
+#include "clk-input.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-mpll.h"
#include "axg.h"
+#include "meson-eeclk.h"
static DEFINE_SPINLOCK(meson_clk_lock);
@@ -58,7 +59,7 @@ static struct clk_regmap axg_fixed_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "fixed_pll_dco",
.ops = &meson_clk_pll_ro_ops,
- .parent_names = (const char *[]){ "xtal" },
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
.num_parents = 1,
},
};
@@ -113,7 +114,7 @@ static struct clk_regmap axg_sys_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "sys_pll_dco",
.ops = &meson_clk_pll_ro_ops,
- .parent_names = (const char *[]){ "xtal" },
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
.num_parents = 1,
},
};
@@ -214,7 +215,7 @@ static struct clk_regmap axg_gp0_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "gp0_pll_dco",
.ops = &meson_clk_pll_ops,
- .parent_names = (const char *[]){ "xtal" },
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
.num_parents = 1,
},
};
@@ -283,7 +284,7 @@ static struct clk_regmap axg_hifi_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "hifi_pll_dco",
.ops = &meson_clk_pll_ops,
- .parent_names = (const char *[]){ "xtal" },
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
.num_parents = 1,
},
};
@@ -701,7 +702,7 @@ static struct clk_regmap axg_pcie_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "pcie_pll_dco",
.ops = &meson_clk_pll_ops,
- .parent_names = (const char *[]){ "xtal" },
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
.num_parents = 1,
},
};
@@ -803,7 +804,7 @@ static struct clk_regmap axg_pcie_cml_en1 = {
static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
static const char * const clk81_parent_names[] = {
- "xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4",
+ IN_PREFIX "xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4",
"fclk_div3", "fclk_div5"
};
@@ -852,7 +853,7 @@ static struct clk_regmap axg_clk81 = {
};
static const char * const axg_sd_emmc_clk0_parent_names[] = {
- "xtal", "fclk_div2", "fclk_div3", "fclk_div5", "fclk_div7",
+ IN_PREFIX "xtal", "fclk_div2", "fclk_div3", "fclk_div5", "fclk_div7",
/*
* Following these parent clocks, we should also have had mpll2, mpll3
@@ -957,7 +958,7 @@ static struct clk_regmap axg_sd_emmc_c_clk0 = {
static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8,
9, 10, 11, 13, 14, };
static const char * const gen_clk_parent_names[] = {
- "xtal", "hifi_pll", "mpll0", "mpll1", "mpll2", "mpll3",
+ IN_PREFIX "xtal", "hifi_pll", "mpll0", "mpll1", "mpll2", "mpll3",
"fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", "gp0_pll",
};
@@ -1255,46 +1256,20 @@ static struct clk_regmap *const axg_clk_regmaps[] = {
&axg_pcie_pll_od,
};
+static const struct meson_eeclkc_data axg_clkc_data = {
+ .regmap_clks = axg_clk_regmaps,
+ .regmap_clk_num = ARRAY_SIZE(axg_clk_regmaps),
+ .hw_onecell_data = &axg_hw_onecell_data,
+};
+
+
static const struct of_device_id clkc_match_table[] = {
- { .compatible = "amlogic,axg-clkc" },
+ { .compatible = "amlogic,axg-clkc", .data = &axg_clkc_data },
{}
};
-static int axg_clkc_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct regmap *map;
- int ret, i;
-
- /* Get the hhi system controller node if available */
- map = syscon_node_to_regmap(of_get_parent(dev->of_node));
- if (IS_ERR(map)) {
- dev_err(dev, "failed to get HHI regmap\n");
- return PTR_ERR(map);
- }
-
- /* Populate regmap for the regmap backed clocks */
- for (i = 0; i < ARRAY_SIZE(axg_clk_regmaps); i++)
- axg_clk_regmaps[i]->map = map;
-
- for (i = 0; i < axg_hw_onecell_data.num; i++) {
- /* array might be sparse */
- if (!axg_hw_onecell_data.hws[i])
- continue;
-
- ret = devm_clk_hw_register(dev, axg_hw_onecell_data.hws[i]);
- if (ret) {
- dev_err(dev, "Clock registration failed\n");
- return ret;
- }
- }
-
- return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
- &axg_hw_onecell_data);
-}
-
static struct platform_driver axg_driver = {
- .probe = axg_clkc_probe,
+ .probe = meson_eeclkc_probe,
.driver = {
.name = "axg-clkc",
.of_match_table = clkc_match_table,
diff --git a/drivers/clk/meson/clk-dualdiv.c b/drivers/clk/meson/clk-dualdiv.c
new file mode 100644
index 000000000000..c5ca23a5e3e8
--- /dev/null
+++ b/drivers/clk/meson/clk-dualdiv.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+/*
+ * The AO Domain embeds a dual/divider to generate a more precise
+ * 32,768KHz clock for low-power suspend mode and CEC.
+ * ______ ______
+ * | | | |
+ * | Div1 |-| Cnt1 |
+ * /|______| |______|\
+ * -| ______ ______ X--> Out
+ * \| | | |/
+ * | Div2 |-| Cnt2 |
+ * |______| |______|
+ *
+ * The dividing can be switched to single or dual, with a counter
+ * for each divider to set when the switching is done.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+
+#include "clk-regmap.h"
+#include "clk-dualdiv.h"
+
+static inline struct meson_clk_dualdiv_data *
+meson_clk_dualdiv_data(struct clk_regmap *clk)
+{
+ return (struct meson_clk_dualdiv_data *)clk->data;
+}
+
+static unsigned long
+__dualdiv_param_to_rate(unsigned long parent_rate,
+ const struct meson_clk_dualdiv_param *p)
+{
+ if (!p->dual)
+ return DIV_ROUND_CLOSEST(parent_rate, p->n1);
+
+ return DIV_ROUND_CLOSEST(parent_rate * (p->m1 + p->m2),
+ p->n1 * p->m1 + p->n2 * p->m2);
+}
+
+static unsigned long meson_clk_dualdiv_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_dualdiv_data *dualdiv = meson_clk_dualdiv_data(clk);
+ struct meson_clk_dualdiv_param setting;
+
+ setting.dual = meson_parm_read(clk->map, &dualdiv->dual);
+ setting.n1 = meson_parm_read(clk->map, &dualdiv->n1) + 1;
+ setting.m1 = meson_parm_read(clk->map, &dualdiv->m1) + 1;
+ setting.n2 = meson_parm_read(clk->map, &dualdiv->n2) + 1;
+ setting.m2 = meson_parm_read(clk->map, &dualdiv->m2) + 1;
+
+ return __dualdiv_param_to_rate(parent_rate, &setting);
+}
+
+static const struct meson_clk_dualdiv_param *
+__dualdiv_get_setting(unsigned long rate, unsigned long parent_rate,
+ struct meson_clk_dualdiv_data *dualdiv)
+{
+ const struct meson_clk_dualdiv_param *table = dualdiv->table;
+ unsigned long best = 0, now = 0;
+ unsigned int i, best_i = 0;
+
+ if (!table)
+ return NULL;
+
+ for (i = 0; table[i].n1; i++) {
+ now = __dualdiv_param_to_rate(parent_rate, &table[i]);
+
+ /* If we get an exact match, don't bother any further */
+ if (now == rate) {
+ return &table[i];
+ } else if (abs(now - rate) < abs(best - rate)) {
+ best = now;
+ best_i = i;
+ }
+ }
+
+ return (struct meson_clk_dualdiv_param *)&table[best_i];
+}
+
+static long meson_clk_dualdiv_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_dualdiv_data *dualdiv = meson_clk_dualdiv_data(clk);
+ const struct meson_clk_dualdiv_param *setting =
+ __dualdiv_get_setting(rate, *parent_rate, dualdiv);
+
+ if (!setting)
+ return meson_clk_dualdiv_recalc_rate(hw, *parent_rate);
+
+ return __dualdiv_param_to_rate(*parent_rate, setting);
+}
+
+static int meson_clk_dualdiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_dualdiv_data *dualdiv = meson_clk_dualdiv_data(clk);
+ const struct meson_clk_dualdiv_param *setting =
+ __dualdiv_get_setting(rate, parent_rate, dualdiv);
+
+ if (!setting)
+ return -EINVAL;
+
+ meson_parm_write(clk->map, &dualdiv->dual, setting->dual);
+ meson_parm_write(clk->map, &dualdiv->n1, setting->n1 - 1);
+ meson_parm_write(clk->map, &dualdiv->m1, setting->m1 - 1);
+ meson_parm_write(clk->map, &dualdiv->n2, setting->n2 - 1);
+ meson_parm_write(clk->map, &dualdiv->m2, setting->m2 - 1);
+
+ return 0;
+}
+
+const struct clk_ops meson_clk_dualdiv_ops = {
+ .recalc_rate = meson_clk_dualdiv_recalc_rate,
+ .round_rate = meson_clk_dualdiv_round_rate,
+ .set_rate = meson_clk_dualdiv_set_rate,
+};
+EXPORT_SYMBOL_GPL(meson_clk_dualdiv_ops);
+
+const struct clk_ops meson_clk_dualdiv_ro_ops = {
+ .recalc_rate = meson_clk_dualdiv_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(meson_clk_dualdiv_ro_ops);
+
+MODULE_DESCRIPTION("Amlogic dual divider driver");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/clk-dualdiv.h b/drivers/clk/meson/clk-dualdiv.h
new file mode 100644
index 000000000000..4aa939018012
--- /dev/null
+++ b/drivers/clk/meson/clk-dualdiv.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef __MESON_CLK_DUALDIV_H
+#define __MESON_CLK_DUALDIV_H
+
+#include <linux/clk-provider.h>
+#include "parm.h"
+
+struct meson_clk_dualdiv_param {
+ unsigned int n1;
+ unsigned int n2;
+ unsigned int m1;
+ unsigned int m2;
+ unsigned int dual;
+};
+
+struct meson_clk_dualdiv_data {
+ struct parm n1;
+ struct parm n2;
+ struct parm m1;
+ struct parm m2;
+ struct parm dual;
+ const struct meson_clk_dualdiv_param *table;
+};
+
+extern const struct clk_ops meson_clk_dualdiv_ops;
+extern const struct clk_ops meson_clk_dualdiv_ro_ops;
+
+#endif /* __MESON_CLK_DUALDIV_H */
diff --git a/drivers/clk/meson/clk-input.c b/drivers/clk/meson/clk-input.c
index 06b3e3bb6a66..086226e9dba6 100644
--- a/drivers/clk/meson/clk-input.c
+++ b/drivers/clk/meson/clk-input.c
@@ -7,7 +7,8 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
-#include "clkc.h"
+#include <linux/module.h>
+#include "clk-input.h"
static const struct clk_ops meson_clk_no_ops = {};
@@ -42,3 +43,7 @@ struct clk_hw *meson_clk_hw_register_input(struct device *dev,
return ret ? ERR_PTR(ret) : hw;
}
EXPORT_SYMBOL_GPL(meson_clk_hw_register_input);
+
+MODULE_DESCRIPTION("Amlogic clock input helper");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/clk-input.h b/drivers/clk/meson/clk-input.h
new file mode 100644
index 000000000000..4a541b9685a6
--- /dev/null
+++ b/drivers/clk/meson/clk-input.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef __MESON_CLK_INPUT_H
+#define __MESON_CLK_INPUT_H
+
+#include <linux/clk-provider.h>
+
+struct device;
+
+struct clk_hw *meson_clk_hw_register_input(struct device *dev,
+ const char *of_name,
+ const char *clk_name,
+ unsigned long flags);
+
+#endif /* __MESON_CLK_INPUT_H */
diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c
index 650f75cc15a9..f76850d99e59 100644
--- a/drivers/clk/meson/clk-mpll.c
+++ b/drivers/clk/meson/clk-mpll.c
@@ -12,7 +12,11 @@
*/
#include <linux/clk-provider.h>
-#include "clkc.h"
+#include <linux/module.h>
+#include <linux/spinlock.h>
+
+#include "clk-regmap.h"
+#include "clk-mpll.h"
#define SDM_DEN 16384
#define N2_MIN 4
@@ -138,9 +142,15 @@ const struct clk_ops meson_clk_mpll_ro_ops = {
.recalc_rate = mpll_recalc_rate,
.round_rate = mpll_round_rate,
};
+EXPORT_SYMBOL_GPL(meson_clk_mpll_ro_ops);
const struct clk_ops meson_clk_mpll_ops = {
.recalc_rate = mpll_recalc_rate,
.round_rate = mpll_round_rate,
.set_rate = mpll_set_rate,
};
+EXPORT_SYMBOL_GPL(meson_clk_mpll_ops);
+
+MODULE_DESCRIPTION("Amlogic MPLL driver");
+MODULE_AUTHOR("Michael Turquette <mturquette@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/clk-mpll.h b/drivers/clk/meson/clk-mpll.h
new file mode 100644
index 000000000000..cf79340006dd
--- /dev/null
+++ b/drivers/clk/meson/clk-mpll.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef __MESON_CLK_MPLL_H
+#define __MESON_CLK_MPLL_H
+
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+
+#include "parm.h"
+
+struct meson_clk_mpll_data {
+ struct parm sdm;
+ struct parm sdm_en;
+ struct parm n2;
+ struct parm ssen;
+ struct parm misc;
+ spinlock_t *lock;
+ u8 flags;
+};
+
+#define CLK_MESON_MPLL_ROUND_CLOSEST BIT(0)
+
+extern const struct clk_ops meson_clk_mpll_ro_ops;
+extern const struct clk_ops meson_clk_mpll_ops;
+
+#endif /* __MESON_CLK_MPLL_H */
diff --git a/drivers/clk/meson/clk-phase.c b/drivers/clk/meson/clk-phase.c
index cba43748ce3d..80c3ada193a4 100644
--- a/drivers/clk/meson/clk-phase.c
+++ b/drivers/clk/meson/clk-phase.c
@@ -5,7 +5,10 @@
*/
#include <linux/clk-provider.h>
-#include "clkc.h"
+#include <linux/module.h>
+
+#include "clk-regmap.h"
+#include "clk-phase.h"
#define phase_step(_width) (360 / (1 << (_width)))
@@ -15,13 +18,12 @@ meson_clk_phase_data(struct clk_regmap *clk)
return (struct meson_clk_phase_data *)clk->data;
}
-int meson_clk_degrees_from_val(unsigned int val, unsigned int width)
+static int meson_clk_degrees_from_val(unsigned int val, unsigned int width)
{
return phase_step(width) * val;
}
-EXPORT_SYMBOL_GPL(meson_clk_degrees_from_val);
-unsigned int meson_clk_degrees_to_val(int degrees, unsigned int width)
+static unsigned int meson_clk_degrees_to_val(int degrees, unsigned int width)
{
unsigned int val = DIV_ROUND_CLOSEST(degrees, phase_step(width));
@@ -31,7 +33,6 @@ unsigned int meson_clk_degrees_to_val(int degrees, unsigned int width)
*/
return val % (1 << width);
}
-EXPORT_SYMBOL_GPL(meson_clk_degrees_to_val);
static int meson_clk_phase_get_phase(struct clk_hw *hw)
{
@@ -61,3 +62,67 @@ const struct clk_ops meson_clk_phase_ops = {
.set_phase = meson_clk_phase_set_phase,
};
EXPORT_SYMBOL_GPL(meson_clk_phase_ops);
+
+/*
+ * This is a special clock for the audio controller.
+ * The phase of mst_sclk clock output can be controlled independently
+ * for the outside world (ph0), the tdmout (ph1) and tdmin (ph2).
+ * Controlling these 3 phases as just one makes things simpler and
+ * give the same clock view to all the element on the i2s bus.
+ * If necessary, we can still control the phase in the tdm block
+ * which makes these independent control redundant.
+ */
+static inline struct meson_clk_triphase_data *
+meson_clk_triphase_data(struct clk_regmap *clk)
+{
+ return (struct meson_clk_triphase_data *)clk->data;
+}
+
+static void meson_clk_triphase_sync(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
+ unsigned int val;
+
+ /* Get phase 0 and sync it to phase 1 and 2 */
+ val = meson_parm_read(clk->map, &tph->ph0);
+ meson_parm_write(clk->map, &tph->ph1, val);
+ meson_parm_write(clk->map, &tph->ph2, val);
+}
+
+static int meson_clk_triphase_get_phase(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
+ unsigned int val;
+
+ /* Phase are in sync, reading phase 0 is enough */
+ val = meson_parm_read(clk->map, &tph->ph0);
+
+ return meson_clk_degrees_from_val(val, tph->ph0.width);
+}
+
+static int meson_clk_triphase_set_phase(struct clk_hw *hw, int degrees)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
+ unsigned int val;
+
+ val = meson_clk_degrees_to_val(degrees, tph->ph0.width);
+ meson_parm_write(clk->map, &tph->ph0, val);
+ meson_parm_write(clk->map, &tph->ph1, val);
+ meson_parm_write(clk->map, &tph->ph2, val);
+
+ return 0;
+}
+
+const struct clk_ops meson_clk_triphase_ops = {
+ .init = meson_clk_triphase_sync,
+ .get_phase = meson_clk_triphase_get_phase,
+ .set_phase = meson_clk_triphase_set_phase,
+};
+EXPORT_SYMBOL_GPL(meson_clk_triphase_ops);
+
+MODULE_DESCRIPTION("Amlogic phase driver");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/clk-phase.h b/drivers/clk/meson/clk-phase.h
new file mode 100644
index 000000000000..5579f9ced142
--- /dev/null
+++ b/drivers/clk/meson/clk-phase.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef __MESON_CLK_PHASE_H
+#define __MESON_CLK_PHASE_H
+
+#include <linux/clk-provider.h>
+#include "parm.h"
+
+struct meson_clk_phase_data {
+ struct parm ph;
+};
+
+struct meson_clk_triphase_data {
+ struct parm ph0;
+ struct parm ph1;
+ struct parm ph2;
+};
+
+extern const struct clk_ops meson_clk_phase_ops;
+extern const struct clk_ops meson_clk_triphase_ops;
+
+#endif /* __MESON_CLK_PHASE_H */
diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c
index afffc1547e20..7a14ac9b2fec 100644
--- a/drivers/clk/meson/clk-pll.c
+++ b/drivers/clk/meson/clk-pll.c
@@ -32,11 +32,10 @@
#include <linux/io.h>
#include <linux/math64.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/slab.h>
-#include <linux/string.h>
+#include <linux/rational.h>
-#include "clkc.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
static inline struct meson_clk_pll_data *
meson_clk_pll_data(struct clk_regmap *clk)
@@ -44,12 +43,21 @@ meson_clk_pll_data(struct clk_regmap *clk)
return (struct meson_clk_pll_data *)clk->data;
}
+static int __pll_round_closest_mult(struct meson_clk_pll_data *pll)
+{
+ if ((pll->flags & CLK_MESON_PLL_ROUND_CLOSEST) &&
+ !MESON_PARM_APPLICABLE(&pll->frac))
+ return 1;
+
+ return 0;
+}
+
static unsigned long __pll_params_to_rate(unsigned long parent_rate,
- const struct pll_params_table *pllt,
- u16 frac,
+ unsigned int m, unsigned int n,
+ unsigned int frac,
struct meson_clk_pll_data *pll)
{
- u64 rate = (u64)parent_rate * pllt->m;
+ u64 rate = (u64)parent_rate * m;
if (frac && MESON_PARM_APPLICABLE(&pll->frac)) {
u64 frac_rate = (u64)parent_rate * frac;
@@ -58,7 +66,7 @@ static unsigned long __pll_params_to_rate(unsigned long parent_rate,
(1 << pll->frac.width));
}
- return DIV_ROUND_UP_ULL(rate, pllt->n);
+ return DIV_ROUND_UP_ULL(rate, n);
}
static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw,
@@ -66,35 +74,39 @@ static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw,
{
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
- struct pll_params_table pllt;
- u16 frac;
+ unsigned int m, n, frac;
- pllt.n = meson_parm_read(clk->map, &pll->n);
- pllt.m = meson_parm_read(clk->map, &pll->m);
+ n = meson_parm_read(clk->map, &pll->n);
+ m = meson_parm_read(clk->map, &pll->m);
frac = MESON_PARM_APPLICABLE(&pll->frac) ?
meson_parm_read(clk->map, &pll->frac) :
0;
- return __pll_params_to_rate(parent_rate, &pllt, frac, pll);
+ return __pll_params_to_rate(parent_rate, m, n, frac, pll);
}
-static u16 __pll_params_with_frac(unsigned long rate,
- unsigned long parent_rate,
- const struct pll_params_table *pllt,
- struct meson_clk_pll_data *pll)
+static unsigned int __pll_params_with_frac(unsigned long rate,
+ unsigned long parent_rate,
+ unsigned int m,
+ unsigned int n,
+ struct meson_clk_pll_data *pll)
{
- u16 frac_max = (1 << pll->frac.width);
- u64 val = (u64)rate * pllt->n;
+ unsigned int frac_max = (1 << pll->frac.width);
+ u64 val = (u64)rate * n;
+
+ /* Bail out if we are already over the requested rate */
+ if (rate < parent_rate * m / n)
+ return 0;
if (pll->flags & CLK_MESON_PLL_ROUND_CLOSEST)
val = DIV_ROUND_CLOSEST_ULL(val * frac_max, parent_rate);
else
val = div_u64(val * frac_max, parent_rate);
- val -= pllt->m * frac_max;
+ val -= m * frac_max;
- return min((u16)val, (u16)(frac_max - 1));
+ return min((unsigned int)val, (frac_max - 1));
}
static bool meson_clk_pll_is_better(unsigned long rate,
@@ -102,45 +114,123 @@ static bool meson_clk_pll_is_better(unsigned long rate,
unsigned long now,
struct meson_clk_pll_data *pll)
{
- if (!(pll->flags & CLK_MESON_PLL_ROUND_CLOSEST) ||
- MESON_PARM_APPLICABLE(&pll->frac)) {
- /* Round down */
- if (now < rate && best < now)
- return true;
- } else {
+ if (__pll_round_closest_mult(pll)) {
/* Round Closest */
if (abs(now - rate) < abs(best - rate))
return true;
+ } else {
+ /* Round down */
+ if (now <= rate && best < now)
+ return true;
}
return false;
}
-static const struct pll_params_table *
-meson_clk_get_pll_settings(unsigned long rate,
- unsigned long parent_rate,
- struct meson_clk_pll_data *pll)
+static int meson_clk_get_pll_table_index(unsigned int index,
+ unsigned int *m,
+ unsigned int *n,
+ struct meson_clk_pll_data *pll)
{
- const struct pll_params_table *table = pll->table;
- unsigned long best = 0, now = 0;
- unsigned int i, best_i = 0;
+ if (!pll->table[index].n)
+ return -EINVAL;
+
+ *m = pll->table[index].m;
+ *n = pll->table[index].n;
+
+ return 0;
+}
+
+static unsigned int meson_clk_get_pll_range_m(unsigned long rate,
+ unsigned long parent_rate,
+ unsigned int n,
+ struct meson_clk_pll_data *pll)
+{
+ u64 val = (u64)rate * n;
- if (!table)
- return NULL;
+ if (__pll_round_closest_mult(pll))
+ return DIV_ROUND_CLOSEST_ULL(val, parent_rate);
- for (i = 0; table[i].n; i++) {
- now = __pll_params_to_rate(parent_rate, &table[i], 0, pll);
+ return div_u64(val, parent_rate);
+}
+
+static int meson_clk_get_pll_range_index(unsigned long rate,
+ unsigned long parent_rate,
+ unsigned int index,
+ unsigned int *m,
+ unsigned int *n,
+ struct meson_clk_pll_data *pll)
+{
+ *n = index + 1;
- /* If we get an exact match, don't bother any further */
- if (now == rate) {
- return &table[i];
- } else if (meson_clk_pll_is_better(rate, best, now, pll)) {
+ /* Check the predivider range */
+ if (*n >= (1 << pll->n.width))
+ return -EINVAL;
+
+ if (*n == 1) {
+ /* Get the boundaries out the way */
+ if (rate <= pll->range->min * parent_rate) {
+ *m = pll->range->min;
+ return -ENODATA;
+ } else if (rate >= pll->range->max * parent_rate) {
+ *m = pll->range->max;
+ return -ENODATA;
+ }
+ }
+
+ *m = meson_clk_get_pll_range_m(rate, parent_rate, *n, pll);
+
+ /* the pre-divider gives a multiplier too big - stop */
+ if (*m >= (1 << pll->m.width))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int meson_clk_get_pll_get_index(unsigned long rate,
+ unsigned long parent_rate,
+ unsigned int index,
+ unsigned int *m,
+ unsigned int *n,
+ struct meson_clk_pll_data *pll)
+{
+ if (pll->range)
+ return meson_clk_get_pll_range_index(rate, parent_rate,
+ index, m, n, pll);
+ else if (pll->table)
+ return meson_clk_get_pll_table_index(index, m, n, pll);
+
+ return -EINVAL;
+}
+
+static int meson_clk_get_pll_settings(unsigned long rate,
+ unsigned long parent_rate,
+ unsigned int *best_m,
+ unsigned int *best_n,
+ struct meson_clk_pll_data *pll)
+{
+ unsigned long best = 0, now = 0;
+ unsigned int i, m, n;
+ int ret;
+
+ for (i = 0, ret = 0; !ret; i++) {
+ ret = meson_clk_get_pll_get_index(rate, parent_rate,
+ i, &m, &n, pll);
+ if (ret == -EINVAL)
+ break;
+
+ now = __pll_params_to_rate(parent_rate, m, n, 0, pll);
+ if (meson_clk_pll_is_better(rate, best, now, pll)) {
best = now;
- best_i = i;
+ *best_m = m;
+ *best_n = n;
+
+ if (now == rate)
+ break;
}
}
- return (struct pll_params_table *)&table[best_i];
+ return best ? 0 : -EINVAL;
}
static long meson_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
@@ -148,15 +238,15 @@ static long meson_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
{
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
- const struct pll_params_table *pllt =
- meson_clk_get_pll_settings(rate, *parent_rate, pll);
+ unsigned int m, n, frac;
unsigned long round;
- u16 frac;
+ int ret;
- if (!pllt)
+ ret = meson_clk_get_pll_settings(rate, *parent_rate, &m, &n, pll);
+ if (ret)
return meson_clk_pll_recalc_rate(hw, *parent_rate);
- round = __pll_params_to_rate(*parent_rate, pllt, 0, pll);
+ round = __pll_params_to_rate(*parent_rate, m, n, 0, pll);
if (!MESON_PARM_APPLICABLE(&pll->frac) || rate == round)
return round;
@@ -165,9 +255,9 @@ static long meson_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
* The rate provided by the setting is not an exact match, let's
* try to improve the result using the fractional parameter
*/
- frac = __pll_params_with_frac(rate, *parent_rate, pllt, pll);
+ frac = __pll_params_with_frac(rate, *parent_rate, m, n, pll);
- return __pll_params_to_rate(*parent_rate, pllt, frac, pll);
+ return __pll_params_to_rate(*parent_rate, m, n, frac, pll);
}
static int meson_clk_pll_wait_lock(struct clk_hw *hw)
@@ -254,30 +344,27 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
- const struct pll_params_table *pllt;
- unsigned int enabled;
+ unsigned int enabled, m, n, frac = 0, ret;
unsigned long old_rate;
- u16 frac = 0;
if (parent_rate == 0 || rate == 0)
return -EINVAL;
old_rate = rate;
- pllt = meson_clk_get_pll_settings(rate, parent_rate, pll);
- if (!pllt)
- return -EINVAL;
+ ret = meson_clk_get_pll_settings(rate, parent_rate, &m, &n, pll);
+ if (ret)
+ return ret;
enabled = meson_parm_read(clk->map, &pll->en);
if (enabled)
meson_clk_pll_disable(hw);
- meson_parm_write(clk->map, &pll->n, pllt->n);
- meson_parm_write(clk->map, &pll->m, pllt->m);
-
+ meson_parm_write(clk->map, &pll->n, n);
+ meson_parm_write(clk->map, &pll->m, m);
if (MESON_PARM_APPLICABLE(&pll->frac)) {
- frac = __pll_params_with_frac(rate, parent_rate, pllt, pll);
+ frac = __pll_params_with_frac(rate, parent_rate, m, n, pll);
meson_parm_write(clk->map, &pll->frac, frac);
}
@@ -309,8 +396,15 @@ const struct clk_ops meson_clk_pll_ops = {
.enable = meson_clk_pll_enable,
.disable = meson_clk_pll_disable
};
+EXPORT_SYMBOL_GPL(meson_clk_pll_ops);
const struct clk_ops meson_clk_pll_ro_ops = {
.recalc_rate = meson_clk_pll_recalc_rate,
.is_enabled = meson_clk_pll_is_enabled,
};
+EXPORT_SYMBOL_GPL(meson_clk_pll_ro_ops);
+
+MODULE_DESCRIPTION("Amlogic PLL driver");
+MODULE_AUTHOR("Carlo Caione <carlo@endlessm.com>");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/clk-pll.h b/drivers/clk/meson/clk-pll.h
new file mode 100644
index 000000000000..55af2e285b1b
--- /dev/null
+++ b/drivers/clk/meson/clk-pll.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef __MESON_CLK_PLL_H
+#define __MESON_CLK_PLL_H
+
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include "parm.h"
+
+struct pll_params_table {
+ unsigned int m;
+ unsigned int n;
+};
+
+struct pll_mult_range {
+ unsigned int min;
+ unsigned int max;
+};
+
+#define PLL_PARAMS(_m, _n) \
+ { \
+ .m = (_m), \
+ .n = (_n), \
+ }
+
+#define CLK_MESON_PLL_ROUND_CLOSEST BIT(0)
+
+struct meson_clk_pll_data {
+ struct parm en;
+ struct parm m;
+ struct parm n;
+ struct parm frac;
+ struct parm l;
+ struct parm rst;
+ const struct reg_sequence *init_regs;
+ unsigned int init_count;
+ const struct pll_params_table *table;
+ const struct pll_mult_range *range;
+ u8 flags;
+};
+
+extern const struct clk_ops meson_clk_pll_ro_ops;
+extern const struct clk_ops meson_clk_pll_ops;
+
+#endif /* __MESON_CLK_PLL_H */
diff --git a/drivers/clk/meson/clk-regmap.c b/drivers/clk/meson/clk-regmap.c
index c515f67322a3..dcd1757cc5df 100644
--- a/drivers/clk/meson/clk-regmap.c
+++ b/drivers/clk/meson/clk-regmap.c
@@ -4,6 +4,7 @@
* Author: Jerome Brunet <jbrunet@baylibre.com>
*/
+#include <linux/module.h>
#include "clk-regmap.h"
static int clk_regmap_gate_endisable(struct clk_hw *hw, int enable)
@@ -180,3 +181,7 @@ const struct clk_ops clk_regmap_mux_ro_ops = {
.get_parent = clk_regmap_mux_get_parent,
};
EXPORT_SYMBOL_GPL(clk_regmap_mux_ro_ops);
+
+MODULE_DESCRIPTION("Amlogic regmap backed clock driver");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/clk-regmap.h b/drivers/clk/meson/clk-regmap.h
index e9c5728d40eb..1dd0abe3ba91 100644
--- a/drivers/clk/meson/clk-regmap.h
+++ b/drivers/clk/meson/clk-regmap.h
@@ -111,4 +111,24 @@ clk_get_regmap_mux_data(struct clk_regmap *clk)
extern const struct clk_ops clk_regmap_mux_ops;
extern const struct clk_ops clk_regmap_mux_ro_ops;
+#define __MESON_GATE(_name, _reg, _bit, _ops) \
+struct clk_regmap _name = { \
+ .data = &(struct clk_regmap_gate_data){ \
+ .offset = (_reg), \
+ .bit_idx = (_bit), \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = #_name, \
+ .ops = _ops, \
+ .parent_names = (const char *[]){ "clk81" }, \
+ .num_parents = 1, \
+ .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED), \
+ }, \
+}
+
+#define MESON_GATE(_name, _reg, _bit) \
+ __MESON_GATE(_name, _reg, _bit, &clk_regmap_gate_ops)
+
+#define MESON_GATE_RO(_name, _reg, _bit) \
+ __MESON_GATE(_name, _reg, _bit, &clk_regmap_gate_ro_ops)
#endif /* __CLK_REGMAP_H */
diff --git a/drivers/clk/meson/clk-triphase.c b/drivers/clk/meson/clk-triphase.c
deleted file mode 100644
index 4a59936251e5..000000000000
--- a/drivers/clk/meson/clk-triphase.c
+++ /dev/null
@@ -1,68 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MIT)
-/*
- * Copyright (c) 2018 BayLibre, SAS.
- * Author: Jerome Brunet <jbrunet@baylibre.com>
- */
-
-#include <linux/clk-provider.h>
-#include "clkc-audio.h"
-
-/*
- * This is a special clock for the audio controller.
- * The phase of mst_sclk clock output can be controlled independently
- * for the outside world (ph0), the tdmout (ph1) and tdmin (ph2).
- * Controlling these 3 phases as just one makes things simpler and
- * give the same clock view to all the element on the i2s bus.
- * If necessary, we can still control the phase in the tdm block
- * which makes these independent control redundant.
- */
-static inline struct meson_clk_triphase_data *
-meson_clk_triphase_data(struct clk_regmap *clk)
-{
- return (struct meson_clk_triphase_data *)clk->data;
-}
-
-static void meson_clk_triphase_sync(struct clk_hw *hw)
-{
- struct clk_regmap *clk = to_clk_regmap(hw);
- struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
- unsigned int val;
-
- /* Get phase 0 and sync it to phase 1 and 2 */
- val = meson_parm_read(clk->map, &tph->ph0);
- meson_parm_write(clk->map, &tph->ph1, val);
- meson_parm_write(clk->map, &tph->ph2, val);
-}
-
-static int meson_clk_triphase_get_phase(struct clk_hw *hw)
-{
- struct clk_regmap *clk = to_clk_regmap(hw);
- struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
- unsigned int val;
-
- /* Phase are in sync, reading phase 0 is enough */
- val = meson_parm_read(clk->map, &tph->ph0);
-
- return meson_clk_degrees_from_val(val, tph->ph0.width);
-}
-
-static int meson_clk_triphase_set_phase(struct clk_hw *hw, int degrees)
-{
- struct clk_regmap *clk = to_clk_regmap(hw);
- struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
- unsigned int val;
-
- val = meson_clk_degrees_to_val(degrees, tph->ph0.width);
- meson_parm_write(clk->map, &tph->ph0, val);
- meson_parm_write(clk->map, &tph->ph1, val);
- meson_parm_write(clk->map, &tph->ph2, val);
-
- return 0;
-}
-
-const struct clk_ops meson_clk_triphase_ops = {
- .init = meson_clk_triphase_sync,
- .get_phase = meson_clk_triphase_get_phase,
- .set_phase = meson_clk_triphase_set_phase,
-};
-EXPORT_SYMBOL_GPL(meson_clk_triphase_ops);
diff --git a/drivers/clk/meson/clkc.h b/drivers/clk/meson/clkc.h
deleted file mode 100644
index 6183b22c4bf2..000000000000
--- a/drivers/clk/meson/clkc.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2015 Endless Mobile, Inc.
- * Author: Carlo Caione <carlo@endlessm.com>
- */
-
-#ifndef __CLKC_H
-#define __CLKC_H
-
-#include <linux/clk-provider.h>
-#include "clk-regmap.h"
-
-#define PMASK(width) GENMASK(width - 1, 0)
-#define SETPMASK(width, shift) GENMASK(shift + width - 1, shift)
-#define CLRPMASK(width, shift) (~SETPMASK(width, shift))
-
-#define PARM_GET(width, shift, reg) \
- (((reg) & SETPMASK(width, shift)) >> (shift))
-#define PARM_SET(width, shift, reg, val) \
- (((reg) & CLRPMASK(width, shift)) | ((val) << (shift)))
-
-#define MESON_PARM_APPLICABLE(p) (!!((p)->width))
-
-struct parm {
- u16 reg_off;
- u8 shift;
- u8 width;
-};
-
-static inline unsigned int meson_parm_read(struct regmap *map, struct parm *p)
-{
- unsigned int val;
-
- regmap_read(map, p->reg_off, &val);
- return PARM_GET(p->width, p->shift, val);
-}
-
-static inline void meson_parm_write(struct regmap *map, struct parm *p,
- unsigned int val)
-{
- regmap_update_bits(map, p->reg_off, SETPMASK(p->width, p->shift),
- val << p->shift);
-}
-
-
-struct pll_params_table {
- u16 m;
- u16 n;
-};
-
-#define PLL_PARAMS(_m, _n) \
- { \
- .m = (_m), \
- .n = (_n), \
- }
-
-#define CLK_MESON_PLL_ROUND_CLOSEST BIT(0)
-
-struct meson_clk_pll_data {
- struct parm en;
- struct parm m;
- struct parm n;
- struct parm frac;
- struct parm l;
- struct parm rst;
- const struct reg_sequence *init_regs;
- unsigned int init_count;
- const struct pll_params_table *table;
- u8 flags;
-};
-
-#define to_meson_clk_pll(_hw) container_of(_hw, struct meson_clk_pll, hw)
-
-struct meson_clk_mpll_data {
- struct parm sdm;
- struct parm sdm_en;
- struct parm n2;
- struct parm ssen;
- struct parm misc;
- spinlock_t *lock;
- u8 flags;
-};
-
-#define CLK_MESON_MPLL_ROUND_CLOSEST BIT(0)
-
-struct meson_clk_phase_data {
- struct parm ph;
-};
-
-int meson_clk_degrees_from_val(unsigned int val, unsigned int width);
-unsigned int meson_clk_degrees_to_val(int degrees, unsigned int width);
-
-struct meson_vid_pll_div_data {
- struct parm val;
- struct parm sel;
-};
-
-#define MESON_GATE(_name, _reg, _bit) \
-struct clk_regmap _name = { \
- .data = &(struct clk_regmap_gate_data){ \
- .offset = (_reg), \
- .bit_idx = (_bit), \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = #_name, \
- .ops = &clk_regmap_gate_ops, \
- .parent_names = (const char *[]){ "clk81" }, \
- .num_parents = 1, \
- .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED), \
- }, \
-};
-
-/* clk_ops */
-extern const struct clk_ops meson_clk_pll_ro_ops;
-extern const struct clk_ops meson_clk_pll_ops;
-extern const struct clk_ops meson_clk_cpu_ops;
-extern const struct clk_ops meson_clk_mpll_ro_ops;
-extern const struct clk_ops meson_clk_mpll_ops;
-extern const struct clk_ops meson_clk_phase_ops;
-extern const struct clk_ops meson_vid_pll_div_ro_ops;
-
-struct clk_hw *meson_clk_hw_register_input(struct device *dev,
- const char *of_name,
- const char *clk_name,
- unsigned long flags);
-
-#endif /* __CLKC_H */
diff --git a/drivers/clk/meson/g12a-aoclk.c b/drivers/clk/meson/g12a-aoclk.c
new file mode 100644
index 000000000000..1994e735396b
--- /dev/null
+++ b/drivers/clk/meson/g12a-aoclk.c
@@ -0,0 +1,454 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Amlogic Meson-AXG Clock Controller Driver
+ *
+ * Copyright (c) 2016 Baylibre SAS.
+ * Author: Michael Turquette <mturquette@baylibre.com>
+ *
+ * Copyright (c) 2019 Baylibre SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/mfd/syscon.h>
+#include "meson-aoclk.h"
+#include "g12a-aoclk.h"
+
+#include "clk-regmap.h"
+#include "clk-dualdiv.h"
+
+#define IN_PREFIX "ao-in-"
+
+/*
+ * AO Configuration Clock registers offsets
+ * Register offsets from the data sheet must be multiplied by 4.
+ */
+#define AO_RTI_STATUS_REG3 0x0C
+#define AO_RTI_PWR_CNTL_REG0 0x10
+#define AO_RTI_GEN_CNTL_REG0 0x40
+#define AO_CLK_GATE0 0x4c
+#define AO_CLK_GATE0_SP 0x50
+#define AO_OSCIN_CNTL 0x58
+#define AO_CEC_CLK_CNTL_REG0 0x74
+#define AO_CEC_CLK_CNTL_REG1 0x78
+#define AO_SAR_CLK 0x90
+#define AO_RTC_ALT_CLK_CNTL0 0x94
+#define AO_RTC_ALT_CLK_CNTL1 0x98
+
+/*
+ * Like every other peripheral clock gate in Amlogic Clock drivers,
+ * we are using CLK_IGNORE_UNUSED here, so we keep the state of the
+ * bootloader. The goal is to remove this flag at some point.
+ * Actually removing it will require some extensive test to be done safely.
+ */
+#define AXG_AO_GATE(_name, _reg, _bit) \
+static struct clk_regmap g12a_aoclk_##_name = { \
+ .data = &(struct clk_regmap_gate_data) { \
+ .offset = (_reg), \
+ .bit_idx = (_bit), \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = "g12a_ao_" #_name, \
+ .ops = &clk_regmap_gate_ops, \
+ .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk" }, \
+ .num_parents = 1, \
+ .flags = CLK_IGNORE_UNUSED, \
+ }, \
+}
+
+AXG_AO_GATE(ahb, AO_CLK_GATE0, 0);
+AXG_AO_GATE(ir_in, AO_CLK_GATE0, 1);
+AXG_AO_GATE(i2c_m0, AO_CLK_GATE0, 2);
+AXG_AO_GATE(i2c_s0, AO_CLK_GATE0, 3);
+AXG_AO_GATE(uart, AO_CLK_GATE0, 4);
+AXG_AO_GATE(prod_i2c, AO_CLK_GATE0, 5);
+AXG_AO_GATE(uart2, AO_CLK_GATE0, 6);
+AXG_AO_GATE(ir_out, AO_CLK_GATE0, 7);
+AXG_AO_GATE(saradc, AO_CLK_GATE0, 8);
+AXG_AO_GATE(mailbox, AO_CLK_GATE0_SP, 0);
+AXG_AO_GATE(m3, AO_CLK_GATE0_SP, 1);
+AXG_AO_GATE(ahb_sram, AO_CLK_GATE0_SP, 2);
+AXG_AO_GATE(rti, AO_CLK_GATE0_SP, 3);
+AXG_AO_GATE(m4_fclk, AO_CLK_GATE0_SP, 4);
+AXG_AO_GATE(m4_hclk, AO_CLK_GATE0_SP, 5);
+
+static struct clk_regmap g12a_aoclk_cts_oscin = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = AO_RTI_PWR_CNTL_REG0,
+ .bit_idx = 14,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cts_oscin",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .num_parents = 1,
+ },
+};
+
+static const struct meson_clk_dualdiv_param g12a_32k_div_table[] = {
+ {
+ .dual = 1,
+ .n1 = 733,
+ .m1 = 8,
+ .n2 = 732,
+ .m2 = 11,
+ }, {}
+};
+
+/* 32k_by_oscin clock */
+
+static struct clk_regmap g12a_aoclk_32k_by_oscin_pre = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = AO_RTC_ALT_CLK_CNTL0,
+ .bit_idx = 31,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "g12a_ao_32k_by_oscin_pre",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "cts_oscin" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_aoclk_32k_by_oscin_div = {
+ .data = &(struct meson_clk_dualdiv_data){
+ .n1 = {
+ .reg_off = AO_RTC_ALT_CLK_CNTL0,
+ .shift = 0,
+ .width = 12,
+ },
+ .n2 = {
+ .reg_off = AO_RTC_ALT_CLK_CNTL0,
+ .shift = 12,
+ .width = 12,
+ },
+ .m1 = {
+ .reg_off = AO_RTC_ALT_CLK_CNTL1,
+ .shift = 0,
+ .width = 12,
+ },
+ .m2 = {
+ .reg_off = AO_RTC_ALT_CLK_CNTL1,
+ .shift = 12,
+ .width = 12,
+ },
+ .dual = {
+ .reg_off = AO_RTC_ALT_CLK_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .table = g12a_32k_div_table,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "g12a_ao_32k_by_oscin_div",
+ .ops = &meson_clk_dualdiv_ops,
+ .parent_names = (const char *[]){ "g12a_ao_32k_by_oscin_pre" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_aoclk_32k_by_oscin_sel = {
+ .data = &(struct clk_regmap_mux_data) {
+ .offset = AO_RTC_ALT_CLK_CNTL1,
+ .mask = 0x1,
+ .shift = 24,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "g12a_ao_32k_by_oscin_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ "g12a_ao_32k_by_oscin_div",
+ "g12a_ao_32k_by_oscin_pre" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_aoclk_32k_by_oscin = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = AO_RTC_ALT_CLK_CNTL0,
+ .bit_idx = 30,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "g12a_ao_32k_by_oscin",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "g12a_ao_32k_by_oscin_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+/* cec clock */
+
+static struct clk_regmap g12a_aoclk_cec_pre = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = AO_CEC_CLK_CNTL_REG0,
+ .bit_idx = 31,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "g12a_ao_cec_pre",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "cts_oscin" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_aoclk_cec_div = {
+ .data = &(struct meson_clk_dualdiv_data){
+ .n1 = {
+ .reg_off = AO_CEC_CLK_CNTL_REG0,
+ .shift = 0,
+ .width = 12,
+ },
+ .n2 = {
+ .reg_off = AO_CEC_CLK_CNTL_REG0,
+ .shift = 12,
+ .width = 12,
+ },
+ .m1 = {
+ .reg_off = AO_CEC_CLK_CNTL_REG1,
+ .shift = 0,
+ .width = 12,
+ },
+ .m2 = {
+ .reg_off = AO_CEC_CLK_CNTL_REG1,
+ .shift = 12,
+ .width = 12,
+ },
+ .dual = {
+ .reg_off = AO_CEC_CLK_CNTL_REG0,
+ .shift = 28,
+ .width = 1,
+ },
+ .table = g12a_32k_div_table,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "g12a_ao_cec_div",
+ .ops = &meson_clk_dualdiv_ops,
+ .parent_names = (const char *[]){ "g12a_ao_cec_pre" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_aoclk_cec_sel = {
+ .data = &(struct clk_regmap_mux_data) {
+ .offset = AO_CEC_CLK_CNTL_REG1,
+ .mask = 0x1,
+ .shift = 24,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "g12a_ao_cec_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ "g12a_ao_cec_div",
+ "g12a_ao_cec_pre" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_aoclk_cec = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = AO_CEC_CLK_CNTL_REG0,
+ .bit_idx = 30,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "g12a_ao_cec",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "g12a_ao_cec_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_aoclk_cts_rtc_oscin = {
+ .data = &(struct clk_regmap_mux_data) {
+ .offset = AO_RTI_PWR_CNTL_REG0,
+ .mask = 0x1,
+ .shift = 10,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "g12a_ao_cts_rtc_oscin",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ "g12a_ao_32k_by_oscin",
+ IN_PREFIX "ext_32k-0" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_aoclk_clk81 = {
+ .data = &(struct clk_regmap_mux_data) {
+ .offset = AO_RTI_PWR_CNTL_REG0,
+ .mask = 0x1,
+ .shift = 8,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "g12a_ao_clk81",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk",
+ "g12a_ao_cts_rtc_oscin"},
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_aoclk_saradc_mux = {
+ .data = &(struct clk_regmap_mux_data) {
+ .offset = AO_SAR_CLK,
+ .mask = 0x3,
+ .shift = 9,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "g12a_ao_saradc_mux",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ IN_PREFIX "xtal",
+ "g12a_ao_clk81" },
+ .num_parents = 2,
+ },
+};
+
+static struct clk_regmap g12a_aoclk_saradc_div = {
+ .data = &(struct clk_regmap_div_data) {
+ .offset = AO_SAR_CLK,
+ .shift = 0,
+ .width = 8,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "g12a_ao_saradc_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "g12a_ao_saradc_mux" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_aoclk_saradc_gate = {
+ .data = &(struct clk_regmap_gate_data) {
+ .offset = AO_SAR_CLK,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "g12a_ao_saradc_gate",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "g12a_ao_saradc_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const unsigned int g12a_aoclk_reset[] = {
+ [RESET_AO_IR_IN] = 16,
+ [RESET_AO_UART] = 17,
+ [RESET_AO_I2C_M] = 18,
+ [RESET_AO_I2C_S] = 19,
+ [RESET_AO_SAR_ADC] = 20,
+ [RESET_AO_UART2] = 22,
+ [RESET_AO_IR_OUT] = 23,
+};
+
+static struct clk_regmap *g12a_aoclk_regmap[] = {
+ &g12a_aoclk_ahb,
+ &g12a_aoclk_ir_in,
+ &g12a_aoclk_i2c_m0,
+ &g12a_aoclk_i2c_s0,
+ &g12a_aoclk_uart,
+ &g12a_aoclk_prod_i2c,
+ &g12a_aoclk_uart2,
+ &g12a_aoclk_ir_out,
+ &g12a_aoclk_saradc,
+ &g12a_aoclk_mailbox,
+ &g12a_aoclk_m3,
+ &g12a_aoclk_ahb_sram,
+ &g12a_aoclk_rti,
+ &g12a_aoclk_m4_fclk,
+ &g12a_aoclk_m4_hclk,
+ &g12a_aoclk_cts_oscin,
+ &g12a_aoclk_32k_by_oscin_pre,
+ &g12a_aoclk_32k_by_oscin_div,
+ &g12a_aoclk_32k_by_oscin_sel,
+ &g12a_aoclk_32k_by_oscin,
+ &g12a_aoclk_cec_pre,
+ &g12a_aoclk_cec_div,
+ &g12a_aoclk_cec_sel,
+ &g12a_aoclk_cec,
+ &g12a_aoclk_cts_rtc_oscin,
+ &g12a_aoclk_clk81,
+ &g12a_aoclk_saradc_mux,
+ &g12a_aoclk_saradc_div,
+ &g12a_aoclk_saradc_gate,
+};
+
+static const struct clk_hw_onecell_data g12a_aoclk_onecell_data = {
+ .hws = {
+ [CLKID_AO_AHB] = &g12a_aoclk_ahb.hw,
+ [CLKID_AO_IR_IN] = &g12a_aoclk_ir_in.hw,
+ [CLKID_AO_I2C_M0] = &g12a_aoclk_i2c_m0.hw,
+ [CLKID_AO_I2C_S0] = &g12a_aoclk_i2c_s0.hw,
+ [CLKID_AO_UART] = &g12a_aoclk_uart.hw,
+ [CLKID_AO_PROD_I2C] = &g12a_aoclk_prod_i2c.hw,
+ [CLKID_AO_UART2] = &g12a_aoclk_uart2.hw,
+ [CLKID_AO_IR_OUT] = &g12a_aoclk_ir_out.hw,
+ [CLKID_AO_SAR_ADC] = &g12a_aoclk_saradc.hw,
+ [CLKID_AO_MAILBOX] = &g12a_aoclk_mailbox.hw,
+ [CLKID_AO_M3] = &g12a_aoclk_m3.hw,
+ [CLKID_AO_AHB_SRAM] = &g12a_aoclk_ahb_sram.hw,
+ [CLKID_AO_RTI] = &g12a_aoclk_rti.hw,
+ [CLKID_AO_M4_FCLK] = &g12a_aoclk_m4_fclk.hw,
+ [CLKID_AO_M4_HCLK] = &g12a_aoclk_m4_hclk.hw,
+ [CLKID_AO_CLK81] = &g12a_aoclk_clk81.hw,
+ [CLKID_AO_SAR_ADC_SEL] = &g12a_aoclk_saradc_mux.hw,
+ [CLKID_AO_SAR_ADC_DIV] = &g12a_aoclk_saradc_div.hw,
+ [CLKID_AO_SAR_ADC_CLK] = &g12a_aoclk_saradc_gate.hw,
+ [CLKID_AO_CTS_OSCIN] = &g12a_aoclk_cts_oscin.hw,
+ [CLKID_AO_32K_PRE] = &g12a_aoclk_32k_by_oscin_pre.hw,
+ [CLKID_AO_32K_DIV] = &g12a_aoclk_32k_by_oscin_div.hw,
+ [CLKID_AO_32K_SEL] = &g12a_aoclk_32k_by_oscin_sel.hw,
+ [CLKID_AO_32K] = &g12a_aoclk_32k_by_oscin.hw,
+ [CLKID_AO_CEC_PRE] = &g12a_aoclk_cec_pre.hw,
+ [CLKID_AO_CEC_DIV] = &g12a_aoclk_cec_div.hw,
+ [CLKID_AO_CEC_SEL] = &g12a_aoclk_cec_sel.hw,
+ [CLKID_AO_CEC] = &g12a_aoclk_cec.hw,
+ [CLKID_AO_CTS_RTC_OSCIN] = &g12a_aoclk_cts_rtc_oscin.hw,
+ },
+ .num = NR_CLKS,
+};
+
+static const struct meson_aoclk_input g12a_aoclk_inputs[] = {
+ { .name = "xtal", .required = true },
+ { .name = "mpeg-clk", .required = true },
+ { .name = "ext-32k-0", .required = false },
+};
+
+static const struct meson_aoclk_data g12a_aoclkc_data = {
+ .reset_reg = AO_RTI_GEN_CNTL_REG0,
+ .num_reset = ARRAY_SIZE(g12a_aoclk_reset),
+ .reset = g12a_aoclk_reset,
+ .num_clks = ARRAY_SIZE(g12a_aoclk_regmap),
+ .clks = g12a_aoclk_regmap,
+ .hw_data = &g12a_aoclk_onecell_data,
+ .inputs = g12a_aoclk_inputs,
+ .num_inputs = ARRAY_SIZE(g12a_aoclk_inputs),
+ .input_prefix = IN_PREFIX,
+};
+
+static const struct of_device_id g12a_aoclkc_match_table[] = {
+ {
+ .compatible = "amlogic,meson-g12a-aoclkc",
+ .data = &g12a_aoclkc_data,
+ },
+ { }
+};
+
+static struct platform_driver g12a_aoclkc_driver = {
+ .probe = meson_aoclkc_probe,
+ .driver = {
+ .name = "g12a-aoclkc",
+ .of_match_table = g12a_aoclkc_match_table,
+ },
+};
+
+builtin_platform_driver(g12a_aoclkc_driver);
diff --git a/drivers/clk/meson/g12a-aoclk.h b/drivers/clk/meson/g12a-aoclk.h
new file mode 100644
index 000000000000..04b0d5506641
--- /dev/null
+++ b/drivers/clk/meson/g12a-aoclk.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2019 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#ifndef __G12A_AOCLKC_H
+#define __G12A_AOCLKC_H
+
+/*
+ * CLKID index values
+ *
+ * These indices are entirely contrived and do not map onto the hardware.
+ * It has now been decided to expose everything by default in the DT header:
+ * include/dt-bindings/clock/g12a-aoclkc.h. Only the clocks ids we don't want
+ * to expose, such as the internal muxes and dividers of composite clocks,
+ * will remain defined here.
+ */
+#define CLKID_AO_SAR_ADC_SEL 16
+#define CLKID_AO_SAR_ADC_DIV 17
+#define CLKID_AO_CTS_OSCIN 19
+#define CLKID_AO_32K_PRE 20
+#define CLKID_AO_32K_DIV 21
+#define CLKID_AO_32K_SEL 22
+#define CLKID_AO_CEC_PRE 24
+#define CLKID_AO_CEC_DIV 25
+#define CLKID_AO_CEC_SEL 26
+
+#define NR_CLKS 29
+
+#include <dt-bindings/clock/g12a-aoclkc.h>
+#include <dt-bindings/reset/g12a-aoclkc.h>
+
+#endif /* __G12A_AOCLKC_H */
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
new file mode 100644
index 000000000000..f7b11e1eeebe
--- /dev/null
+++ b/drivers/clk/meson/g12a.c
@@ -0,0 +1,2359 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Amlogic Meson-G12A Clock Controller Driver
+ *
+ * Copyright (c) 2016 Baylibre SAS.
+ * Author: Michael Turquette <mturquette@baylibre.com>
+ *
+ * Copyright (c) 2018 Amlogic, inc.
+ * Author: Qiufang Dai <qiufang.dai@amlogic.com>
+ * Author: Jian Hu <jian.hu@amlogic.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-input.h"
+#include "clk-mpll.h"
+#include "clk-pll.h"
+#include "clk-regmap.h"
+#include "vid-pll-div.h"
+#include "meson-eeclk.h"
+#include "g12a.h"
+
+static DEFINE_SPINLOCK(meson_clk_lock);
+
+static struct clk_regmap g12a_fixed_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_FIX_PLL_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_FIX_PLL_CNTL0,
+ .shift = 0,
+ .width = 8,
+ },
+ .n = {
+ .reg_off = HHI_FIX_PLL_CNTL0,
+ .shift = 10,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_FIX_PLL_CNTL1,
+ .shift = 0,
+ .width = 17,
+ },
+ .l = {
+ .reg_off = HHI_FIX_PLL_CNTL0,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_FIX_PLL_CNTL0,
+ .shift = 29,
+ .width = 1,
+ },
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fixed_pll_dco",
+ .ops = &meson_clk_pll_ro_ops,
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_fixed_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_FIX_PLL_CNTL0,
+ .shift = 16,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fixed_pll",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "fixed_pll_dco" },
+ .num_parents = 1,
+ /*
+ * This clock won't ever change at runtime so
+ * CLK_SET_RATE_PARENT is not required
+ */
+ },
+};
+
+/*
+ * Internal sys pll emulation configuration parameters
+ */
+static const struct reg_sequence g12a_sys_init_regs[] = {
+ { .reg = HHI_SYS_PLL_CNTL1, .def = 0x00000000 },
+ { .reg = HHI_SYS_PLL_CNTL2, .def = 0x00000000 },
+ { .reg = HHI_SYS_PLL_CNTL3, .def = 0x48681c00 },
+ { .reg = HHI_SYS_PLL_CNTL4, .def = 0x88770290 },
+ { .reg = HHI_SYS_PLL_CNTL5, .def = 0x39272000 },
+ { .reg = HHI_SYS_PLL_CNTL6, .def = 0x56540000 },
+};
+
+static struct clk_regmap g12a_sys_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_SYS_PLL_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_SYS_PLL_CNTL0,
+ .shift = 0,
+ .width = 8,
+ },
+ .n = {
+ .reg_off = HHI_SYS_PLL_CNTL0,
+ .shift = 10,
+ .width = 5,
+ },
+ .l = {
+ .reg_off = HHI_SYS_PLL_CNTL0,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_SYS_PLL_CNTL0,
+ .shift = 29,
+ .width = 1,
+ },
+ .init_regs = g12a_sys_init_regs,
+ .init_count = ARRAY_SIZE(g12a_sys_init_regs),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "sys_pll_dco",
+ .ops = &meson_clk_pll_ro_ops,
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_sys_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SYS_PLL_CNTL0,
+ .shift = 16,
+ .width = 3,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "sys_pll",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "sys_pll_dco" },
+ .num_parents = 1,
+ },
+};
+
+static const struct pll_mult_range g12a_gp0_pll_mult_range = {
+ .min = 55,
+ .max = 255,
+};
+
+/*
+ * Internal gp0 pll emulation configuration parameters
+ */
+static const struct reg_sequence g12a_gp0_init_regs[] = {
+ { .reg = HHI_GP0_PLL_CNTL1, .def = 0x00000000 },
+ { .reg = HHI_GP0_PLL_CNTL2, .def = 0x00000000 },
+ { .reg = HHI_GP0_PLL_CNTL3, .def = 0x48681c00 },
+ { .reg = HHI_GP0_PLL_CNTL4, .def = 0x33771290 },
+ { .reg = HHI_GP0_PLL_CNTL5, .def = 0x39272000 },
+ { .reg = HHI_GP0_PLL_CNTL6, .def = 0x56540000 },
+};
+
+static struct clk_regmap g12a_gp0_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_GP0_PLL_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_GP0_PLL_CNTL0,
+ .shift = 0,
+ .width = 8,
+ },
+ .n = {
+ .reg_off = HHI_GP0_PLL_CNTL0,
+ .shift = 10,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_GP0_PLL_CNTL1,
+ .shift = 0,
+ .width = 17,
+ },
+ .l = {
+ .reg_off = HHI_GP0_PLL_CNTL0,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_GP0_PLL_CNTL0,
+ .shift = 29,
+ .width = 1,
+ },
+ .range = &g12a_gp0_pll_mult_range,
+ .init_regs = g12a_gp0_init_regs,
+ .init_count = ARRAY_SIZE(g12a_gp0_init_regs),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gp0_pll_dco",
+ .ops = &meson_clk_pll_ops,
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_gp0_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_GP0_PLL_CNTL0,
+ .shift = 16,
+ .width = 3,
+ .flags = (CLK_DIVIDER_POWER_OF_TWO |
+ CLK_DIVIDER_ROUND_CLOSEST),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gp0_pll",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "gp0_pll_dco" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+/*
+ * Internal hifi pll emulation configuration parameters
+ */
+static const struct reg_sequence g12a_hifi_init_regs[] = {
+ { .reg = HHI_HIFI_PLL_CNTL1, .def = 0x00000000 },
+ { .reg = HHI_HIFI_PLL_CNTL2, .def = 0x00000000 },
+ { .reg = HHI_HIFI_PLL_CNTL3, .def = 0x6a285c00 },
+ { .reg = HHI_HIFI_PLL_CNTL4, .def = 0x65771290 },
+ { .reg = HHI_HIFI_PLL_CNTL5, .def = 0x39272000 },
+ { .reg = HHI_HIFI_PLL_CNTL6, .def = 0x56540000 },
+};
+
+static struct clk_regmap g12a_hifi_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_HIFI_PLL_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_HIFI_PLL_CNTL0,
+ .shift = 0,
+ .width = 8,
+ },
+ .n = {
+ .reg_off = HHI_HIFI_PLL_CNTL0,
+ .shift = 10,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_HIFI_PLL_CNTL1,
+ .shift = 0,
+ .width = 17,
+ },
+ .l = {
+ .reg_off = HHI_HIFI_PLL_CNTL0,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_HIFI_PLL_CNTL0,
+ .shift = 29,
+ .width = 1,
+ },
+ .range = &g12a_gp0_pll_mult_range,
+ .init_regs = g12a_hifi_init_regs,
+ .init_count = ARRAY_SIZE(g12a_hifi_init_regs),
+ .flags = CLK_MESON_PLL_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hifi_pll_dco",
+ .ops = &meson_clk_pll_ops,
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_hifi_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HIFI_PLL_CNTL0,
+ .shift = 16,
+ .width = 2,
+ .flags = (CLK_DIVIDER_POWER_OF_TWO |
+ CLK_DIVIDER_ROUND_CLOSEST),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hifi_pll",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "hifi_pll_dco" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_hdmi_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_HDMI_PLL_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_HDMI_PLL_CNTL0,
+ .shift = 0,
+ .width = 8,
+ },
+ .n = {
+ .reg_off = HHI_HDMI_PLL_CNTL0,
+ .shift = 10,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_HDMI_PLL_CNTL1,
+ .shift = 0,
+ .width = 16,
+ },
+ .l = {
+ .reg_off = HHI_HDMI_PLL_CNTL0,
+ .shift = 30,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_HDMI_PLL_CNTL0,
+ .shift = 29,
+ .width = 1,
+ },
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll_dco",
+ .ops = &meson_clk_pll_ro_ops,
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .num_parents = 1,
+ /*
+ * Display directly handle hdmi pll registers ATM, we need
+ * NOCACHE to keep our view of the clock as accurate as possible
+ */
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap g12a_hdmi_pll_od = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HDMI_PLL_CNTL0,
+ .shift = 16,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll_od",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "hdmi_pll_dco" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_hdmi_pll_od2 = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HDMI_PLL_CNTL0,
+ .shift = 18,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll_od2",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "hdmi_pll_od" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_hdmi_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HDMI_PLL_CNTL0,
+ .shift = 20,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "hdmi_pll_od2" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_fixed_factor g12a_fclk_div2_div = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div2_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "fixed_pll" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_fclk_div2 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_FIX_PLL_CNTL1,
+ .bit_idx = 24,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div2",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div2_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_fclk_div3_div = {
+ .mult = 1,
+ .div = 3,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div3_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "fixed_pll" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_fclk_div3 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_FIX_PLL_CNTL1,
+ .bit_idx = 20,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div3",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div3_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_fclk_div4_div = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div4_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "fixed_pll" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_fclk_div4 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_FIX_PLL_CNTL1,
+ .bit_idx = 21,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div4",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div4_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_fclk_div5_div = {
+ .mult = 1,
+ .div = 5,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div5_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "fixed_pll" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_fclk_div5 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_FIX_PLL_CNTL1,
+ .bit_idx = 22,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div5",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div5_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_fclk_div7_div = {
+ .mult = 1,
+ .div = 7,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div7_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "fixed_pll" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_fclk_div7 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_FIX_PLL_CNTL1,
+ .bit_idx = 23,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div7",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div7_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_fclk_div2p5_div = {
+ .mult = 1,
+ .div = 5,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div2p5_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "fixed_pll_dco" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_fclk_div2p5 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_FIX_PLL_CNTL1,
+ .bit_idx = 25,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div2p5",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div2p5_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_mpll_50m_div = {
+ .mult = 1,
+ .div = 80,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll_50m_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "fixed_pll_dco" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_mpll_50m = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_FIX_PLL_CNTL3,
+ .mask = 0x1,
+ .shift = 5,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll_50m",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = (const char *[]){ IN_PREFIX "xtal",
+ "mpll_50m_div" },
+ .num_parents = 2,
+ },
+};
+
+static struct clk_fixed_factor g12a_mpll_prediv = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll_prediv",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "fixed_pll_dco" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_mpll0_div = {
+ .data = &(struct meson_clk_mpll_data){
+ .sdm = {
+ .reg_off = HHI_MPLL_CNTL1,
+ .shift = 0,
+ .width = 14,
+ },
+ .sdm_en = {
+ .reg_off = HHI_MPLL_CNTL1,
+ .shift = 30,
+ .width = 1,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL_CNTL1,
+ .shift = 20,
+ .width = 9,
+ },
+ .ssen = {
+ .reg_off = HHI_MPLL_CNTL1,
+ .shift = 29,
+ .width = 1,
+ },
+ .lock = &meson_clk_lock,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll0_div",
+ .ops = &meson_clk_mpll_ops,
+ .parent_names = (const char *[]){ "mpll_prediv" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_mpll0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL1,
+ .bit_idx = 31,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll0",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mpll0_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_mpll1_div = {
+ .data = &(struct meson_clk_mpll_data){
+ .sdm = {
+ .reg_off = HHI_MPLL_CNTL3,
+ .shift = 0,
+ .width = 14,
+ },
+ .sdm_en = {
+ .reg_off = HHI_MPLL_CNTL3,
+ .shift = 30,
+ .width = 1,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL_CNTL3,
+ .shift = 20,
+ .width = 9,
+ },
+ .ssen = {
+ .reg_off = HHI_MPLL_CNTL3,
+ .shift = 29,
+ .width = 1,
+ },
+ .lock = &meson_clk_lock,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll1_div",
+ .ops = &meson_clk_mpll_ops,
+ .parent_names = (const char *[]){ "mpll_prediv" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_mpll1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL3,
+ .bit_idx = 31,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mpll1_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_mpll2_div = {
+ .data = &(struct meson_clk_mpll_data){
+ .sdm = {
+ .reg_off = HHI_MPLL_CNTL5,
+ .shift = 0,
+ .width = 14,
+ },
+ .sdm_en = {
+ .reg_off = HHI_MPLL_CNTL5,
+ .shift = 30,
+ .width = 1,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL_CNTL5,
+ .shift = 20,
+ .width = 9,
+ },
+ .ssen = {
+ .reg_off = HHI_MPLL_CNTL5,
+ .shift = 29,
+ .width = 1,
+ },
+ .lock = &meson_clk_lock,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll2_div",
+ .ops = &meson_clk_mpll_ops,
+ .parent_names = (const char *[]){ "mpll_prediv" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_mpll2 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL5,
+ .bit_idx = 31,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll2",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mpll2_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_mpll3_div = {
+ .data = &(struct meson_clk_mpll_data){
+ .sdm = {
+ .reg_off = HHI_MPLL_CNTL7,
+ .shift = 0,
+ .width = 14,
+ },
+ .sdm_en = {
+ .reg_off = HHI_MPLL_CNTL7,
+ .shift = 30,
+ .width = 1,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL_CNTL7,
+ .shift = 20,
+ .width = 9,
+ },
+ .ssen = {
+ .reg_off = HHI_MPLL_CNTL7,
+ .shift = 29,
+ .width = 1,
+ },
+ .lock = &meson_clk_lock,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll3_div",
+ .ops = &meson_clk_mpll_ops,
+ .parent_names = (const char *[]){ "mpll_prediv" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_mpll3 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL7,
+ .bit_idx = 31,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll3",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mpll3_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
+static const char * const clk81_parent_names[] = {
+ IN_PREFIX "xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4",
+ "fclk_div3", "fclk_div5"
+};
+
+static struct clk_regmap g12a_mpeg_clk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_MPEG_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 12,
+ .table = mux_table_clk81,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mpeg_clk_sel",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = clk81_parent_names,
+ .num_parents = ARRAY_SIZE(clk81_parent_names),
+ },
+};
+
+static struct clk_regmap g12a_mpeg_clk_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_MPEG_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mpeg_clk_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "mpeg_clk_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_clk81 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPEG_CLK_CNTL,
+ .bit_idx = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "clk81",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mpeg_clk_div" },
+ .num_parents = 1,
+ .flags = (CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
+ },
+};
+
+static const char * const g12a_sd_emmc_clk0_parent_names[] = {
+ IN_PREFIX "xtal", "fclk_div2", "fclk_div3", "fclk_div5", "fclk_div7",
+
+ /*
+ * Following these parent clocks, we should also have had mpll2, mpll3
+ * and gp0_pll but these clocks are too precious to be used here. All
+ * the necessary rates for MMC and NAND operation can be acheived using
+ * g12a_ee_core or fclk_div clocks
+ */
+};
+
+/* SDIO clock */
+static struct clk_regmap g12a_sd_emmc_a_clk0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SD_EMMC_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 9,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "sd_emmc_a_clk0_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_sd_emmc_clk0_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_sd_emmc_a_clk0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SD_EMMC_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "sd_emmc_a_clk0_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "sd_emmc_a_clk0_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_sd_emmc_a_clk0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_SD_EMMC_CLK_CNTL,
+ .bit_idx = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "sd_emmc_a_clk0",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "sd_emmc_a_clk0_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+/* SDcard clock */
+static struct clk_regmap g12a_sd_emmc_b_clk0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SD_EMMC_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 25,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "sd_emmc_b_clk0_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_sd_emmc_clk0_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_sd_emmc_b_clk0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SD_EMMC_CLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "sd_emmc_b_clk0_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "sd_emmc_b_clk0_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_sd_emmc_b_clk0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_SD_EMMC_CLK_CNTL,
+ .bit_idx = 23,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "sd_emmc_b_clk0",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "sd_emmc_b_clk0_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+/* EMMC/NAND clock */
+static struct clk_regmap g12a_sd_emmc_c_clk0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_NAND_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 9,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "sd_emmc_c_clk0_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_sd_emmc_clk0_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_sd_emmc_c_clk0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_NAND_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "sd_emmc_c_clk0_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "sd_emmc_c_clk0_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_sd_emmc_c_clk0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_NAND_CLK_CNTL,
+ .bit_idx = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "sd_emmc_c_clk0",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "sd_emmc_c_clk0_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+/* VPU Clock */
+
+static const char * const g12a_vpu_parent_names[] = {
+ "fclk_div3", "fclk_div4", "fclk_div5", "fclk_div7",
+ "mpll1", "vid_pll", "hifi_pll", "gp0_pll",
+};
+
+static struct clk_regmap g12a_vpu_0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 9,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_0_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_vpu_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_vpu_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap g12a_vpu_0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_0_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vpu_0_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_vpu_0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vpu_0",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vpu_0_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vpu_1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 25,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_1_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_vpu_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_vpu_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap g12a_vpu_1_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_1_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vpu_1_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_vpu_1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .bit_idx = 24,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vpu_1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vpu_1_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vpu = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .mask = 1,
+ .shift = 31,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu",
+ .ops = &clk_regmap_mux_ops,
+ /*
+ * bit 31 selects from 2 possible parents:
+ * vpu_0 or vpu_1
+ */
+ .parent_names = (const char *[]){ "vpu_0", "vpu_1" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+/* VAPB Clock */
+
+static const char * const g12a_vapb_parent_names[] = {
+ "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7",
+ "mpll1", "vid_pll", "mpll2", "fclk_div2p5",
+};
+
+static struct clk_regmap g12a_vapb_0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .mask = 0x3,
+ .shift = 9,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vapb_0_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_vapb_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_vapb_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap g12a_vapb_0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vapb_0_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vapb_0_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_vapb_0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vapb_0",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vapb_0_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vapb_1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .mask = 0x3,
+ .shift = 25,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vapb_1_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_vapb_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_vapb_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap g12a_vapb_1_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vapb_1_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vapb_1_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_vapb_1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .bit_idx = 24,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vapb_1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vapb_1_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vapb_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .mask = 1,
+ .shift = 31,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vapb_sel",
+ .ops = &clk_regmap_mux_ops,
+ /*
+ * bit 31 selects from 2 possible parents:
+ * vapb_0 or vapb_1
+ */
+ .parent_names = (const char *[]){ "vapb_0", "vapb_1" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap g12a_vapb = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .bit_idx = 30,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vapb",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vapb_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+/* Video Clocks */
+
+static struct clk_regmap g12a_vid_pll_div = {
+ .data = &(struct meson_vid_pll_div_data){
+ .val = {
+ .reg_off = HHI_VID_PLL_CLK_DIV,
+ .shift = 0,
+ .width = 15,
+ },
+ .sel = {
+ .reg_off = HHI_VID_PLL_CLK_DIV,
+ .shift = 16,
+ .width = 2,
+ },
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vid_pll_div",
+ .ops = &meson_vid_pll_div_ro_ops,
+ .parent_names = (const char *[]){ "hdmi_pll" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static const char * const g12a_vid_pll_parent_names[] = { "vid_pll_div",
+ "hdmi_pll" };
+
+static struct clk_regmap g12a_vid_pll_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VID_PLL_CLK_DIV,
+ .mask = 0x1,
+ .shift = 18,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vid_pll_sel",
+ .ops = &clk_regmap_mux_ops,
+ /*
+ * bit 18 selects from 2 possible parents:
+ * vid_pll_div or hdmi_pll
+ */
+ .parent_names = g12a_vid_pll_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_vid_pll_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap g12a_vid_pll = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_PLL_CLK_DIV,
+ .bit_idx = 19,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vid_pll",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vid_pll_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static const char * const g12a_vclk_parent_names[] = {
+ "vid_pll", "gp0_pll", "hifi_pll", "mpll1", "fclk_div3", "fclk_div4",
+ "fclk_div5", "fclk_div7"
+};
+
+static struct clk_regmap g12a_vclk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 16,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_vclk_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_vclk_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap g12a_vclk2_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 16,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_vclk_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_vclk_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap g12a_vclk_input = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_DIV,
+ .bit_idx = 16,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk_input",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vclk2_input = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_DIV,
+ .bit_idx = 16,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2_input",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk2_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vclk_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VID_CLK_DIV,
+ .shift = 0,
+ .width = 8,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vclk_input" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap g12a_vclk2_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VIID_CLK_DIV,
+ .shift = 0,
+ .width = 8,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vclk2_input" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap g12a_vclk = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .bit_idx = 19,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vclk2 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .bit_idx = 19,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk2_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vclk_div1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .bit_idx = 0,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk_div1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vclk_div2_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .bit_idx = 1,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk_div2_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vclk_div4_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .bit_idx = 2,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk_div4_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vclk_div6_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .bit_idx = 3,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk_div6_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vclk_div12_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .bit_idx = 4,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk_div12_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vclk2_div1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .bit_idx = 0,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2_div1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk2" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vclk2_div2_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .bit_idx = 1,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2_div2_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk2" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vclk2_div4_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .bit_idx = 2,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2_div4_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk2" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vclk2_div6_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .bit_idx = 3,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2_div6_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk2" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_vclk2_div12_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .bit_idx = 4,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2_div12_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk2" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_fixed_factor g12a_vclk_div2 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div2",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "vclk_div2_en" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_vclk_div4 = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div4",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "vclk_div4_en" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_vclk_div6 = {
+ .mult = 1,
+ .div = 6,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div6",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "vclk_div6_en" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_vclk_div12 = {
+ .mult = 1,
+ .div = 12,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div12",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "vclk_div12_en" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_vclk2_div2 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div2",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "vclk2_div2_en" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_vclk2_div4 = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div4",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "vclk2_div4_en" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_vclk2_div6 = {
+ .mult = 1,
+ .div = 6,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div6",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "vclk2_div6_en" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_vclk2_div12 = {
+ .mult = 1,
+ .div = 12,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div12",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "vclk2_div12_en" },
+ .num_parents = 1,
+ },
+};
+
+static u32 mux_table_cts_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const char * const g12a_cts_parent_names[] = {
+ "vclk_div1", "vclk_div2", "vclk_div4", "vclk_div6",
+ "vclk_div12", "vclk2_div1", "vclk2_div2", "vclk2_div4",
+ "vclk2_div6", "vclk2_div12"
+};
+
+static struct clk_regmap g12a_cts_enci_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VID_CLK_DIV,
+ .mask = 0xf,
+ .shift = 28,
+ .table = mux_table_cts_sel,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cts_enci_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_cts_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_cts_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap g12a_cts_encp_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VID_CLK_DIV,
+ .mask = 0xf,
+ .shift = 20,
+ .table = mux_table_cts_sel,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cts_encp_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_cts_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_cts_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap g12a_cts_vdac_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VIID_CLK_DIV,
+ .mask = 0xf,
+ .shift = 28,
+ .table = mux_table_cts_sel,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cts_vdac_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_cts_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_cts_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+/* TOFIX: add support for cts_tcon */
+static u32 mux_table_hdmi_tx_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const char * const g12a_cts_hdmi_tx_parent_names[] = {
+ "vclk_div1", "vclk_div2", "vclk_div4", "vclk_div6",
+ "vclk_div12", "vclk2_div1", "vclk2_div2", "vclk2_div4",
+ "vclk2_div6", "vclk2_div12"
+};
+
+static struct clk_regmap g12a_hdmi_tx_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_HDMI_CLK_CNTL,
+ .mask = 0xf,
+ .shift = 16,
+ .table = mux_table_hdmi_tx_sel,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_tx_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_cts_hdmi_tx_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_cts_hdmi_tx_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap g12a_cts_enci = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL2,
+ .bit_idx = 0,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "cts_enci",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "cts_enci_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_cts_encp = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL2,
+ .bit_idx = 2,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "cts_encp",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "cts_encp_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_cts_vdac = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL2,
+ .bit_idx = 4,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "cts_vdac",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "cts_vdac_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap g12a_hdmi_tx = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL2,
+ .bit_idx = 5,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "hdmi_tx",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "hdmi_tx_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+/* HDMI Clocks */
+
+static const char * const g12a_hdmi_parent_names[] = {
+ IN_PREFIX "xtal", "fclk_div4", "fclk_div3", "fclk_div5"
+};
+
+static struct clk_regmap g12a_hdmi_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_HDMI_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 9,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_hdmi_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_hdmi_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap g12a_hdmi_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HDMI_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "hdmi_sel" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap g12a_hdmi = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_HDMI_CLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "hdmi",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "hdmi_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+/*
+ * The MALI IP is clocked by two identical clocks (mali_0 and mali_1)
+ * muxed by a glitch-free switch.
+ */
+
+static const char * const g12a_mali_0_1_parent_names[] = {
+ IN_PREFIX "xtal", "gp0_pll", "hihi_pll", "fclk_div2p5",
+ "fclk_div3", "fclk_div4", "fclk_div5", "fclk_div7"
+};
+
+static struct clk_regmap g12a_mali_0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 9,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mali_0_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_mali_0_1_parent_names,
+ .num_parents = 8,
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap g12a_mali_0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mali_0_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "mali_0_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap g12a_mali_0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mali_0",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mali_0_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_mali_1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 25,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mali_1_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_mali_0_1_parent_names,
+ .num_parents = 8,
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap g12a_mali_1_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mali_1_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "mali_1_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap g12a_mali_1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .bit_idx = 24,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mali_1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mali_1_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const char * const g12a_mali_parent_names[] = {
+ "mali_0", "mali_1"
+};
+
+static struct clk_regmap g12a_mali = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .mask = 1,
+ .shift = 31,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mali",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_mali_parent_names,
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+/* Everything Else (EE) domain gates */
+static MESON_GATE(g12a_ddr, HHI_GCLK_MPEG0, 0);
+static MESON_GATE(g12a_dos, HHI_GCLK_MPEG0, 1);
+static MESON_GATE(g12a_audio_locker, HHI_GCLK_MPEG0, 2);
+static MESON_GATE(g12a_mipi_dsi_host, HHI_GCLK_MPEG0, 3);
+static MESON_GATE(g12a_eth_phy, HHI_GCLK_MPEG0, 4);
+static MESON_GATE(g12a_isa, HHI_GCLK_MPEG0, 5);
+static MESON_GATE(g12a_pl301, HHI_GCLK_MPEG0, 6);
+static MESON_GATE(g12a_periphs, HHI_GCLK_MPEG0, 7);
+static MESON_GATE(g12a_spicc_0, HHI_GCLK_MPEG0, 8);
+static MESON_GATE(g12a_i2c, HHI_GCLK_MPEG0, 9);
+static MESON_GATE(g12a_sana, HHI_GCLK_MPEG0, 10);
+static MESON_GATE(g12a_sd, HHI_GCLK_MPEG0, 11);
+static MESON_GATE(g12a_rng0, HHI_GCLK_MPEG0, 12);
+static MESON_GATE(g12a_uart0, HHI_GCLK_MPEG0, 13);
+static MESON_GATE(g12a_spicc_1, HHI_GCLK_MPEG0, 14);
+static MESON_GATE(g12a_hiu_reg, HHI_GCLK_MPEG0, 19);
+static MESON_GATE(g12a_mipi_dsi_phy, HHI_GCLK_MPEG0, 20);
+static MESON_GATE(g12a_assist_misc, HHI_GCLK_MPEG0, 23);
+static MESON_GATE(g12a_emmc_a, HHI_GCLK_MPEG0, 4);
+static MESON_GATE(g12a_emmc_b, HHI_GCLK_MPEG0, 25);
+static MESON_GATE(g12a_emmc_c, HHI_GCLK_MPEG0, 26);
+static MESON_GATE(g12a_audio_codec, HHI_GCLK_MPEG0, 28);
+
+static MESON_GATE(g12a_audio, HHI_GCLK_MPEG1, 0);
+static MESON_GATE(g12a_eth_core, HHI_GCLK_MPEG1, 3);
+static MESON_GATE(g12a_demux, HHI_GCLK_MPEG1, 4);
+static MESON_GATE(g12a_audio_ififo, HHI_GCLK_MPEG1, 11);
+static MESON_GATE(g12a_adc, HHI_GCLK_MPEG1, 13);
+static MESON_GATE(g12a_uart1, HHI_GCLK_MPEG1, 16);
+static MESON_GATE(g12a_g2d, HHI_GCLK_MPEG1, 20);
+static MESON_GATE(g12a_reset, HHI_GCLK_MPEG1, 23);
+static MESON_GATE(g12a_pcie_comb, HHI_GCLK_MPEG1, 24);
+static MESON_GATE(g12a_parser, HHI_GCLK_MPEG1, 25);
+static MESON_GATE(g12a_usb_general, HHI_GCLK_MPEG1, 26);
+static MESON_GATE(g12a_pcie_phy, HHI_GCLK_MPEG1, 27);
+static MESON_GATE(g12a_ahb_arb0, HHI_GCLK_MPEG1, 29);
+
+static MESON_GATE(g12a_ahb_data_bus, HHI_GCLK_MPEG2, 1);
+static MESON_GATE(g12a_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2);
+static MESON_GATE(g12a_htx_hdcp22, HHI_GCLK_MPEG2, 3);
+static MESON_GATE(g12a_htx_pclk, HHI_GCLK_MPEG2, 4);
+static MESON_GATE(g12a_bt656, HHI_GCLK_MPEG2, 6);
+static MESON_GATE(g12a_usb1_to_ddr, HHI_GCLK_MPEG2, 8);
+static MESON_GATE(g12a_mmc_pclk, HHI_GCLK_MPEG2, 11);
+static MESON_GATE(g12a_uart2, HHI_GCLK_MPEG2, 15);
+static MESON_GATE(g12a_vpu_intr, HHI_GCLK_MPEG2, 25);
+static MESON_GATE(g12a_gic, HHI_GCLK_MPEG2, 30);
+
+static MESON_GATE(g12a_vclk2_venci0, HHI_GCLK_OTHER, 1);
+static MESON_GATE(g12a_vclk2_venci1, HHI_GCLK_OTHER, 2);
+static MESON_GATE(g12a_vclk2_vencp0, HHI_GCLK_OTHER, 3);
+static MESON_GATE(g12a_vclk2_vencp1, HHI_GCLK_OTHER, 4);
+static MESON_GATE(g12a_vclk2_venct0, HHI_GCLK_OTHER, 5);
+static MESON_GATE(g12a_vclk2_venct1, HHI_GCLK_OTHER, 6);
+static MESON_GATE(g12a_vclk2_other, HHI_GCLK_OTHER, 7);
+static MESON_GATE(g12a_vclk2_enci, HHI_GCLK_OTHER, 8);
+static MESON_GATE(g12a_vclk2_encp, HHI_GCLK_OTHER, 9);
+static MESON_GATE(g12a_dac_clk, HHI_GCLK_OTHER, 10);
+static MESON_GATE(g12a_aoclk_gate, HHI_GCLK_OTHER, 14);
+static MESON_GATE(g12a_iec958_gate, HHI_GCLK_OTHER, 16);
+static MESON_GATE(g12a_enc480p, HHI_GCLK_OTHER, 20);
+static MESON_GATE(g12a_rng1, HHI_GCLK_OTHER, 21);
+static MESON_GATE(g12a_vclk2_enct, HHI_GCLK_OTHER, 22);
+static MESON_GATE(g12a_vclk2_encl, HHI_GCLK_OTHER, 23);
+static MESON_GATE(g12a_vclk2_venclmmc, HHI_GCLK_OTHER, 24);
+static MESON_GATE(g12a_vclk2_vencl, HHI_GCLK_OTHER, 25);
+static MESON_GATE(g12a_vclk2_other1, HHI_GCLK_OTHER, 26);
+
+static MESON_GATE_RO(g12a_dma, HHI_GCLK_OTHER2, 0);
+static MESON_GATE_RO(g12a_efuse, HHI_GCLK_OTHER2, 1);
+static MESON_GATE_RO(g12a_rom_boot, HHI_GCLK_OTHER2, 2);
+static MESON_GATE_RO(g12a_reset_sec, HHI_GCLK_OTHER2, 3);
+static MESON_GATE_RO(g12a_sec_ahb_apb3, HHI_GCLK_OTHER2, 4);
+
+/* Array of all clocks provided by this provider */
+static struct clk_hw_onecell_data g12a_hw_onecell_data = {
+ .hws = {
+ [CLKID_SYS_PLL] = &g12a_sys_pll.hw,
+ [CLKID_FIXED_PLL] = &g12a_fixed_pll.hw,
+ [CLKID_FCLK_DIV2] = &g12a_fclk_div2.hw,
+ [CLKID_FCLK_DIV3] = &g12a_fclk_div3.hw,
+ [CLKID_FCLK_DIV4] = &g12a_fclk_div4.hw,
+ [CLKID_FCLK_DIV5] = &g12a_fclk_div5.hw,
+ [CLKID_FCLK_DIV7] = &g12a_fclk_div7.hw,
+ [CLKID_FCLK_DIV2P5] = &g12a_fclk_div2p5.hw,
+ [CLKID_GP0_PLL] = &g12a_gp0_pll.hw,
+ [CLKID_MPEG_SEL] = &g12a_mpeg_clk_sel.hw,
+ [CLKID_MPEG_DIV] = &g12a_mpeg_clk_div.hw,
+ [CLKID_CLK81] = &g12a_clk81.hw,
+ [CLKID_MPLL0] = &g12a_mpll0.hw,
+ [CLKID_MPLL1] = &g12a_mpll1.hw,
+ [CLKID_MPLL2] = &g12a_mpll2.hw,
+ [CLKID_MPLL3] = &g12a_mpll3.hw,
+ [CLKID_DDR] = &g12a_ddr.hw,
+ [CLKID_DOS] = &g12a_dos.hw,
+ [CLKID_AUDIO_LOCKER] = &g12a_audio_locker.hw,
+ [CLKID_MIPI_DSI_HOST] = &g12a_mipi_dsi_host.hw,
+ [CLKID_ETH_PHY] = &g12a_eth_phy.hw,
+ [CLKID_ISA] = &g12a_isa.hw,
+ [CLKID_PL301] = &g12a_pl301.hw,
+ [CLKID_PERIPHS] = &g12a_periphs.hw,
+ [CLKID_SPICC0] = &g12a_spicc_0.hw,
+ [CLKID_I2C] = &g12a_i2c.hw,
+ [CLKID_SANA] = &g12a_sana.hw,
+ [CLKID_SD] = &g12a_sd.hw,
+ [CLKID_RNG0] = &g12a_rng0.hw,
+ [CLKID_UART0] = &g12a_uart0.hw,
+ [CLKID_SPICC1] = &g12a_spicc_1.hw,
+ [CLKID_HIU_IFACE] = &g12a_hiu_reg.hw,
+ [CLKID_MIPI_DSI_PHY] = &g12a_mipi_dsi_phy.hw,
+ [CLKID_ASSIST_MISC] = &g12a_assist_misc.hw,
+ [CLKID_SD_EMMC_A] = &g12a_emmc_a.hw,
+ [CLKID_SD_EMMC_B] = &g12a_emmc_b.hw,
+ [CLKID_SD_EMMC_C] = &g12a_emmc_c.hw,
+ [CLKID_AUDIO_CODEC] = &g12a_audio_codec.hw,
+ [CLKID_AUDIO] = &g12a_audio.hw,
+ [CLKID_ETH] = &g12a_eth_core.hw,
+ [CLKID_DEMUX] = &g12a_demux.hw,
+ [CLKID_AUDIO_IFIFO] = &g12a_audio_ififo.hw,
+ [CLKID_ADC] = &g12a_adc.hw,
+ [CLKID_UART1] = &g12a_uart1.hw,
+ [CLKID_G2D] = &g12a_g2d.hw,
+ [CLKID_RESET] = &g12a_reset.hw,
+ [CLKID_PCIE_COMB] = &g12a_pcie_comb.hw,
+ [CLKID_PARSER] = &g12a_parser.hw,
+ [CLKID_USB] = &g12a_usb_general.hw,
+ [CLKID_PCIE_PHY] = &g12a_pcie_phy.hw,
+ [CLKID_AHB_ARB0] = &g12a_ahb_arb0.hw,
+ [CLKID_AHB_DATA_BUS] = &g12a_ahb_data_bus.hw,
+ [CLKID_AHB_CTRL_BUS] = &g12a_ahb_ctrl_bus.hw,
+ [CLKID_HTX_HDCP22] = &g12a_htx_hdcp22.hw,
+ [CLKID_HTX_PCLK] = &g12a_htx_pclk.hw,
+ [CLKID_BT656] = &g12a_bt656.hw,
+ [CLKID_USB1_DDR_BRIDGE] = &g12a_usb1_to_ddr.hw,
+ [CLKID_MMC_PCLK] = &g12a_mmc_pclk.hw,
+ [CLKID_UART2] = &g12a_uart2.hw,
+ [CLKID_VPU_INTR] = &g12a_vpu_intr.hw,
+ [CLKID_GIC] = &g12a_gic.hw,
+ [CLKID_SD_EMMC_A_CLK0_SEL] = &g12a_sd_emmc_a_clk0_sel.hw,
+ [CLKID_SD_EMMC_A_CLK0_DIV] = &g12a_sd_emmc_a_clk0_div.hw,
+ [CLKID_SD_EMMC_A_CLK0] = &g12a_sd_emmc_a_clk0.hw,
+ [CLKID_SD_EMMC_B_CLK0_SEL] = &g12a_sd_emmc_b_clk0_sel.hw,
+ [CLKID_SD_EMMC_B_CLK0_DIV] = &g12a_sd_emmc_b_clk0_div.hw,
+ [CLKID_SD_EMMC_B_CLK0] = &g12a_sd_emmc_b_clk0.hw,
+ [CLKID_SD_EMMC_C_CLK0_SEL] = &g12a_sd_emmc_c_clk0_sel.hw,
+ [CLKID_SD_EMMC_C_CLK0_DIV] = &g12a_sd_emmc_c_clk0_div.hw,
+ [CLKID_SD_EMMC_C_CLK0] = &g12a_sd_emmc_c_clk0.hw,
+ [CLKID_MPLL0_DIV] = &g12a_mpll0_div.hw,
+ [CLKID_MPLL1_DIV] = &g12a_mpll1_div.hw,
+ [CLKID_MPLL2_DIV] = &g12a_mpll2_div.hw,
+ [CLKID_MPLL3_DIV] = &g12a_mpll3_div.hw,
+ [CLKID_FCLK_DIV2_DIV] = &g12a_fclk_div2_div.hw,
+ [CLKID_FCLK_DIV3_DIV] = &g12a_fclk_div3_div.hw,
+ [CLKID_FCLK_DIV4_DIV] = &g12a_fclk_div4_div.hw,
+ [CLKID_FCLK_DIV5_DIV] = &g12a_fclk_div5_div.hw,
+ [CLKID_FCLK_DIV7_DIV] = &g12a_fclk_div7_div.hw,
+ [CLKID_FCLK_DIV2P5_DIV] = &g12a_fclk_div2p5_div.hw,
+ [CLKID_HIFI_PLL] = &g12a_hifi_pll.hw,
+ [CLKID_VCLK2_VENCI0] = &g12a_vclk2_venci0.hw,
+ [CLKID_VCLK2_VENCI1] = &g12a_vclk2_venci1.hw,
+ [CLKID_VCLK2_VENCP0] = &g12a_vclk2_vencp0.hw,
+ [CLKID_VCLK2_VENCP1] = &g12a_vclk2_vencp1.hw,
+ [CLKID_VCLK2_VENCT0] = &g12a_vclk2_venct0.hw,
+ [CLKID_VCLK2_VENCT1] = &g12a_vclk2_venct1.hw,
+ [CLKID_VCLK2_OTHER] = &g12a_vclk2_other.hw,
+ [CLKID_VCLK2_ENCI] = &g12a_vclk2_enci.hw,
+ [CLKID_VCLK2_ENCP] = &g12a_vclk2_encp.hw,
+ [CLKID_DAC_CLK] = &g12a_dac_clk.hw,
+ [CLKID_AOCLK] = &g12a_aoclk_gate.hw,
+ [CLKID_IEC958] = &g12a_iec958_gate.hw,
+ [CLKID_ENC480P] = &g12a_enc480p.hw,
+ [CLKID_RNG1] = &g12a_rng1.hw,
+ [CLKID_VCLK2_ENCT] = &g12a_vclk2_enct.hw,
+ [CLKID_VCLK2_ENCL] = &g12a_vclk2_encl.hw,
+ [CLKID_VCLK2_VENCLMMC] = &g12a_vclk2_venclmmc.hw,
+ [CLKID_VCLK2_VENCL] = &g12a_vclk2_vencl.hw,
+ [CLKID_VCLK2_OTHER1] = &g12a_vclk2_other1.hw,
+ [CLKID_FIXED_PLL_DCO] = &g12a_fixed_pll_dco.hw,
+ [CLKID_SYS_PLL_DCO] = &g12a_sys_pll_dco.hw,
+ [CLKID_GP0_PLL_DCO] = &g12a_gp0_pll_dco.hw,
+ [CLKID_HIFI_PLL_DCO] = &g12a_hifi_pll_dco.hw,
+ [CLKID_DMA] = &g12a_dma.hw,
+ [CLKID_EFUSE] = &g12a_efuse.hw,
+ [CLKID_ROM_BOOT] = &g12a_rom_boot.hw,
+ [CLKID_RESET_SEC] = &g12a_reset_sec.hw,
+ [CLKID_SEC_AHB_APB3] = &g12a_sec_ahb_apb3.hw,
+ [CLKID_MPLL_PREDIV] = &g12a_mpll_prediv.hw,
+ [CLKID_VPU_0_SEL] = &g12a_vpu_0_sel.hw,
+ [CLKID_VPU_0_DIV] = &g12a_vpu_0_div.hw,
+ [CLKID_VPU_0] = &g12a_vpu_0.hw,
+ [CLKID_VPU_1_SEL] = &g12a_vpu_1_sel.hw,
+ [CLKID_VPU_1_DIV] = &g12a_vpu_1_div.hw,
+ [CLKID_VPU_1] = &g12a_vpu_1.hw,
+ [CLKID_VPU] = &g12a_vpu.hw,
+ [CLKID_VAPB_0_SEL] = &g12a_vapb_0_sel.hw,
+ [CLKID_VAPB_0_DIV] = &g12a_vapb_0_div.hw,
+ [CLKID_VAPB_0] = &g12a_vapb_0.hw,
+ [CLKID_VAPB_1_SEL] = &g12a_vapb_1_sel.hw,
+ [CLKID_VAPB_1_DIV] = &g12a_vapb_1_div.hw,
+ [CLKID_VAPB_1] = &g12a_vapb_1.hw,
+ [CLKID_VAPB_SEL] = &g12a_vapb_sel.hw,
+ [CLKID_VAPB] = &g12a_vapb.hw,
+ [CLKID_HDMI_PLL_DCO] = &g12a_hdmi_pll_dco.hw,
+ [CLKID_HDMI_PLL_OD] = &g12a_hdmi_pll_od.hw,
+ [CLKID_HDMI_PLL_OD2] = &g12a_hdmi_pll_od2.hw,
+ [CLKID_HDMI_PLL] = &g12a_hdmi_pll.hw,
+ [CLKID_VID_PLL] = &g12a_vid_pll_div.hw,
+ [CLKID_VID_PLL_SEL] = &g12a_vid_pll_sel.hw,
+ [CLKID_VID_PLL_DIV] = &g12a_vid_pll.hw,
+ [CLKID_VCLK_SEL] = &g12a_vclk_sel.hw,
+ [CLKID_VCLK2_SEL] = &g12a_vclk2_sel.hw,
+ [CLKID_VCLK_INPUT] = &g12a_vclk_input.hw,
+ [CLKID_VCLK2_INPUT] = &g12a_vclk2_input.hw,
+ [CLKID_VCLK_DIV] = &g12a_vclk_div.hw,
+ [CLKID_VCLK2_DIV] = &g12a_vclk2_div.hw,
+ [CLKID_VCLK] = &g12a_vclk.hw,
+ [CLKID_VCLK2] = &g12a_vclk2.hw,
+ [CLKID_VCLK_DIV1] = &g12a_vclk_div1.hw,
+ [CLKID_VCLK_DIV2_EN] = &g12a_vclk_div2_en.hw,
+ [CLKID_VCLK_DIV4_EN] = &g12a_vclk_div4_en.hw,
+ [CLKID_VCLK_DIV6_EN] = &g12a_vclk_div6_en.hw,
+ [CLKID_VCLK_DIV12_EN] = &g12a_vclk_div12_en.hw,
+ [CLKID_VCLK2_DIV1] = &g12a_vclk2_div1.hw,
+ [CLKID_VCLK2_DIV2_EN] = &g12a_vclk2_div2_en.hw,
+ [CLKID_VCLK2_DIV4_EN] = &g12a_vclk2_div4_en.hw,
+ [CLKID_VCLK2_DIV6_EN] = &g12a_vclk2_div6_en.hw,
+ [CLKID_VCLK2_DIV12_EN] = &g12a_vclk2_div12_en.hw,
+ [CLKID_VCLK_DIV2] = &g12a_vclk_div2.hw,
+ [CLKID_VCLK_DIV4] = &g12a_vclk_div4.hw,
+ [CLKID_VCLK_DIV6] = &g12a_vclk_div6.hw,
+ [CLKID_VCLK_DIV12] = &g12a_vclk_div12.hw,
+ [CLKID_VCLK2_DIV2] = &g12a_vclk2_div2.hw,
+ [CLKID_VCLK2_DIV4] = &g12a_vclk2_div4.hw,
+ [CLKID_VCLK2_DIV6] = &g12a_vclk2_div6.hw,
+ [CLKID_VCLK2_DIV12] = &g12a_vclk2_div12.hw,
+ [CLKID_CTS_ENCI_SEL] = &g12a_cts_enci_sel.hw,
+ [CLKID_CTS_ENCP_SEL] = &g12a_cts_encp_sel.hw,
+ [CLKID_CTS_VDAC_SEL] = &g12a_cts_vdac_sel.hw,
+ [CLKID_HDMI_TX_SEL] = &g12a_hdmi_tx_sel.hw,
+ [CLKID_CTS_ENCI] = &g12a_cts_enci.hw,
+ [CLKID_CTS_ENCP] = &g12a_cts_encp.hw,
+ [CLKID_CTS_VDAC] = &g12a_cts_vdac.hw,
+ [CLKID_HDMI_TX] = &g12a_hdmi_tx.hw,
+ [CLKID_HDMI_SEL] = &g12a_hdmi_sel.hw,
+ [CLKID_HDMI_DIV] = &g12a_hdmi_div.hw,
+ [CLKID_HDMI] = &g12a_hdmi.hw,
+ [CLKID_MALI_0_SEL] = &g12a_mali_0_sel.hw,
+ [CLKID_MALI_0_DIV] = &g12a_mali_0_div.hw,
+ [CLKID_MALI_0] = &g12a_mali_0.hw,
+ [CLKID_MALI_1_SEL] = &g12a_mali_1_sel.hw,
+ [CLKID_MALI_1_DIV] = &g12a_mali_1_div.hw,
+ [CLKID_MALI_1] = &g12a_mali_1.hw,
+ [CLKID_MALI] = &g12a_mali.hw,
+ [CLKID_MPLL_5OM_DIV] = &g12a_mpll_50m_div.hw,
+ [CLKID_MPLL_5OM] = &g12a_mpll_50m.hw,
+ [NR_CLKS] = NULL,
+ },
+ .num = NR_CLKS,
+};
+
+/* Convenience table to populate regmap in .probe */
+static struct clk_regmap *const g12a_clk_regmaps[] = {
+ &g12a_clk81,
+ &g12a_dos,
+ &g12a_ddr,
+ &g12a_audio_locker,
+ &g12a_mipi_dsi_host,
+ &g12a_eth_phy,
+ &g12a_isa,
+ &g12a_pl301,
+ &g12a_periphs,
+ &g12a_spicc_0,
+ &g12a_i2c,
+ &g12a_sana,
+ &g12a_sd,
+ &g12a_rng0,
+ &g12a_uart0,
+ &g12a_spicc_1,
+ &g12a_hiu_reg,
+ &g12a_mipi_dsi_phy,
+ &g12a_assist_misc,
+ &g12a_emmc_a,
+ &g12a_emmc_b,
+ &g12a_emmc_c,
+ &g12a_audio_codec,
+ &g12a_audio,
+ &g12a_eth_core,
+ &g12a_demux,
+ &g12a_audio_ififo,
+ &g12a_adc,
+ &g12a_uart1,
+ &g12a_g2d,
+ &g12a_reset,
+ &g12a_pcie_comb,
+ &g12a_parser,
+ &g12a_usb_general,
+ &g12a_pcie_phy,
+ &g12a_ahb_arb0,
+ &g12a_ahb_data_bus,
+ &g12a_ahb_ctrl_bus,
+ &g12a_htx_hdcp22,
+ &g12a_htx_pclk,
+ &g12a_bt656,
+ &g12a_usb1_to_ddr,
+ &g12a_mmc_pclk,
+ &g12a_vpu_intr,
+ &g12a_gic,
+ &g12a_sd_emmc_a_clk0,
+ &g12a_sd_emmc_b_clk0,
+ &g12a_sd_emmc_c_clk0,
+ &g12a_mpeg_clk_div,
+ &g12a_sd_emmc_a_clk0_div,
+ &g12a_sd_emmc_b_clk0_div,
+ &g12a_sd_emmc_c_clk0_div,
+ &g12a_mpeg_clk_sel,
+ &g12a_sd_emmc_a_clk0_sel,
+ &g12a_sd_emmc_b_clk0_sel,
+ &g12a_sd_emmc_c_clk0_sel,
+ &g12a_mpll0,
+ &g12a_mpll1,
+ &g12a_mpll2,
+ &g12a_mpll3,
+ &g12a_mpll0_div,
+ &g12a_mpll1_div,
+ &g12a_mpll2_div,
+ &g12a_mpll3_div,
+ &g12a_fixed_pll,
+ &g12a_sys_pll,
+ &g12a_gp0_pll,
+ &g12a_hifi_pll,
+ &g12a_vclk2_venci0,
+ &g12a_vclk2_venci1,
+ &g12a_vclk2_vencp0,
+ &g12a_vclk2_vencp1,
+ &g12a_vclk2_venct0,
+ &g12a_vclk2_venct1,
+ &g12a_vclk2_other,
+ &g12a_vclk2_enci,
+ &g12a_vclk2_encp,
+ &g12a_dac_clk,
+ &g12a_aoclk_gate,
+ &g12a_iec958_gate,
+ &g12a_enc480p,
+ &g12a_rng1,
+ &g12a_vclk2_enct,
+ &g12a_vclk2_encl,
+ &g12a_vclk2_venclmmc,
+ &g12a_vclk2_vencl,
+ &g12a_vclk2_other1,
+ &g12a_fixed_pll_dco,
+ &g12a_sys_pll_dco,
+ &g12a_gp0_pll_dco,
+ &g12a_hifi_pll_dco,
+ &g12a_fclk_div2,
+ &g12a_fclk_div3,
+ &g12a_fclk_div4,
+ &g12a_fclk_div5,
+ &g12a_fclk_div7,
+ &g12a_fclk_div2p5,
+ &g12a_dma,
+ &g12a_efuse,
+ &g12a_rom_boot,
+ &g12a_reset_sec,
+ &g12a_sec_ahb_apb3,
+ &g12a_vpu_0_sel,
+ &g12a_vpu_0_div,
+ &g12a_vpu_0,
+ &g12a_vpu_1_sel,
+ &g12a_vpu_1_div,
+ &g12a_vpu_1,
+ &g12a_vpu,
+ &g12a_vapb_0_sel,
+ &g12a_vapb_0_div,
+ &g12a_vapb_0,
+ &g12a_vapb_1_sel,
+ &g12a_vapb_1_div,
+ &g12a_vapb_1,
+ &g12a_vapb_sel,
+ &g12a_vapb,
+ &g12a_hdmi_pll_dco,
+ &g12a_hdmi_pll_od,
+ &g12a_hdmi_pll_od2,
+ &g12a_hdmi_pll,
+ &g12a_vid_pll_div,
+ &g12a_vid_pll_sel,
+ &g12a_vid_pll,
+ &g12a_vclk_sel,
+ &g12a_vclk2_sel,
+ &g12a_vclk_input,
+ &g12a_vclk2_input,
+ &g12a_vclk_div,
+ &g12a_vclk2_div,
+ &g12a_vclk,
+ &g12a_vclk2,
+ &g12a_vclk_div1,
+ &g12a_vclk_div2_en,
+ &g12a_vclk_div4_en,
+ &g12a_vclk_div6_en,
+ &g12a_vclk_div12_en,
+ &g12a_vclk2_div1,
+ &g12a_vclk2_div2_en,
+ &g12a_vclk2_div4_en,
+ &g12a_vclk2_div6_en,
+ &g12a_vclk2_div12_en,
+ &g12a_cts_enci_sel,
+ &g12a_cts_encp_sel,
+ &g12a_cts_vdac_sel,
+ &g12a_hdmi_tx_sel,
+ &g12a_cts_enci,
+ &g12a_cts_encp,
+ &g12a_cts_vdac,
+ &g12a_hdmi_tx,
+ &g12a_hdmi_sel,
+ &g12a_hdmi_div,
+ &g12a_hdmi,
+ &g12a_mali_0_sel,
+ &g12a_mali_0_div,
+ &g12a_mali_0,
+ &g12a_mali_1_sel,
+ &g12a_mali_1_div,
+ &g12a_mali_1,
+ &g12a_mali,
+ &g12a_mpll_50m,
+};
+
+static const struct meson_eeclkc_data g12a_clkc_data = {
+ .regmap_clks = g12a_clk_regmaps,
+ .regmap_clk_num = ARRAY_SIZE(g12a_clk_regmaps),
+ .hw_onecell_data = &g12a_hw_onecell_data
+};
+
+static const struct of_device_id clkc_match_table[] = {
+ { .compatible = "amlogic,g12a-clkc", .data = &g12a_clkc_data },
+ {}
+};
+
+static struct platform_driver g12a_driver = {
+ .probe = meson_eeclkc_probe,
+ .driver = {
+ .name = "g12a-clkc",
+ .of_match_table = clkc_match_table,
+ },
+};
+
+builtin_platform_driver(g12a_driver);
diff --git a/drivers/clk/meson/g12a.h b/drivers/clk/meson/g12a.h
new file mode 100644
index 000000000000..f399dfe1401c
--- /dev/null
+++ b/drivers/clk/meson/g12a.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2016 Amlogic, Inc.
+ * Author: Michael Turquette <mturquette@baylibre.com>
+ *
+ * Copyright (c) 2018 Amlogic, inc.
+ * Author: Qiufang Dai <qiufang.dai@amlogic.com>
+ * Author: Jian Hu <jian.hu@amlogic.com>
+ *
+ */
+#ifndef __G12A_H
+#define __G12A_H
+
+/*
+ * Clock controller register offsets
+ *
+ * Register offsets from the data sheet must be multiplied by 4 before
+ * adding them to the base address to get the right value.
+ */
+#define HHI_MIPI_CNTL0 0x000
+#define HHI_MIPI_CNTL1 0x004
+#define HHI_MIPI_CNTL2 0x008
+#define HHI_MIPI_STS 0x00C
+#define HHI_GP0_PLL_CNTL0 0x040
+#define HHI_GP0_PLL_CNTL1 0x044
+#define HHI_GP0_PLL_CNTL2 0x048
+#define HHI_GP0_PLL_CNTL3 0x04C
+#define HHI_GP0_PLL_CNTL4 0x050
+#define HHI_GP0_PLL_CNTL5 0x054
+#define HHI_GP0_PLL_CNTL6 0x058
+#define HHI_GP0_PLL_STS 0x05C
+#define HHI_PCIE_PLL_CNTL0 0x098
+#define HHI_PCIE_PLL_CNTL1 0x09C
+#define HHI_PCIE_PLL_CNTL2 0x0A0
+#define HHI_PCIE_PLL_CNTL3 0x0A4
+#define HHI_PCIE_PLL_CNTL4 0x0A8
+#define HHI_PCIE_PLL_CNTL5 0x0AC
+#define HHI_PCIE_PLL_STS 0x0B8
+#define HHI_HIFI_PLL_CNTL0 0x0D8
+#define HHI_HIFI_PLL_CNTL1 0x0DC
+#define HHI_HIFI_PLL_CNTL2 0x0E0
+#define HHI_HIFI_PLL_CNTL3 0x0E4
+#define HHI_HIFI_PLL_CNTL4 0x0E8
+#define HHI_HIFI_PLL_CNTL5 0x0EC
+#define HHI_HIFI_PLL_CNTL6 0x0F0
+#define HHI_VIID_CLK_DIV 0x128
+#define HHI_VIID_CLK_CNTL 0x12C
+#define HHI_GCLK_MPEG0 0x140
+#define HHI_GCLK_MPEG1 0x144
+#define HHI_GCLK_MPEG2 0x148
+#define HHI_GCLK_OTHER 0x150
+#define HHI_GCLK_OTHER2 0x154
+#define HHI_VID_CLK_DIV 0x164
+#define HHI_MPEG_CLK_CNTL 0x174
+#define HHI_AUD_CLK_CNTL 0x178
+#define HHI_VID_CLK_CNTL 0x17c
+#define HHI_TS_CLK_CNTL 0x190
+#define HHI_VID_CLK_CNTL2 0x194
+#define HHI_SYS_CPU_CLK_CNTL0 0x19c
+#define HHI_VID_PLL_CLK_DIV 0x1A0
+#define HHI_MALI_CLK_CNTL 0x1b0
+#define HHI_VPU_CLKC_CNTL 0x1b4
+#define HHI_VPU_CLK_CNTL 0x1bC
+#define HHI_HDMI_CLK_CNTL 0x1CC
+#define HHI_VDEC_CLK_CNTL 0x1E0
+#define HHI_VDEC2_CLK_CNTL 0x1E4
+#define HHI_VDEC3_CLK_CNTL 0x1E8
+#define HHI_VDEC4_CLK_CNTL 0x1EC
+#define HHI_HDCP22_CLK_CNTL 0x1F0
+#define HHI_VAPBCLK_CNTL 0x1F4
+#define HHI_VPU_CLKB_CNTL 0x20C
+#define HHI_GEN_CLK_CNTL 0x228
+#define HHI_VDIN_MEAS_CLK_CNTL 0x250
+#define HHI_MIPIDSI_PHY_CLK_CNTL 0x254
+#define HHI_NAND_CLK_CNTL 0x25C
+#define HHI_SD_EMMC_CLK_CNTL 0x264
+#define HHI_MPLL_CNTL0 0x278
+#define HHI_MPLL_CNTL1 0x27C
+#define HHI_MPLL_CNTL2 0x280
+#define HHI_MPLL_CNTL3 0x284
+#define HHI_MPLL_CNTL4 0x288
+#define HHI_MPLL_CNTL5 0x28c
+#define HHI_MPLL_CNTL6 0x290
+#define HHI_MPLL_CNTL7 0x294
+#define HHI_MPLL_CNTL8 0x298
+#define HHI_FIX_PLL_CNTL0 0x2A0
+#define HHI_FIX_PLL_CNTL1 0x2A4
+#define HHI_FIX_PLL_CNTL3 0x2AC
+#define HHI_SYS_PLL_CNTL0 0x2f4
+#define HHI_SYS_PLL_CNTL1 0x2f8
+#define HHI_SYS_PLL_CNTL2 0x2fc
+#define HHI_SYS_PLL_CNTL3 0x300
+#define HHI_SYS_PLL_CNTL4 0x304
+#define HHI_SYS_PLL_CNTL5 0x308
+#define HHI_SYS_PLL_CNTL6 0x30c
+#define HHI_HDMI_PLL_CNTL0 0x320
+#define HHI_HDMI_PLL_CNTL1 0x324
+#define HHI_HDMI_PLL_CNTL2 0x328
+#define HHI_HDMI_PLL_CNTL3 0x32c
+#define HHI_HDMI_PLL_CNTL4 0x330
+#define HHI_HDMI_PLL_CNTL5 0x334
+#define HHI_HDMI_PLL_CNTL6 0x338
+#define HHI_SPICC_CLK_CNTL 0x3dc
+
+/*
+ * CLKID index values
+ *
+ * These indices are entirely contrived and do not map onto the hardware.
+ * It has now been decided to expose everything by default in the DT header:
+ * include/dt-bindings/clock/g12a-clkc.h. Only the clocks ids we don't want
+ * to expose, such as the internal muxes and dividers of composite clocks,
+ * will remain defined here.
+ */
+#define CLKID_MPEG_SEL 8
+#define CLKID_MPEG_DIV 9
+#define CLKID_SD_EMMC_A_CLK0_SEL 63
+#define CLKID_SD_EMMC_A_CLK0_DIV 64
+#define CLKID_SD_EMMC_B_CLK0_SEL 65
+#define CLKID_SD_EMMC_B_CLK0_DIV 66
+#define CLKID_SD_EMMC_C_CLK0_SEL 67
+#define CLKID_SD_EMMC_C_CLK0_DIV 68
+#define CLKID_MPLL0_DIV 69
+#define CLKID_MPLL1_DIV 70
+#define CLKID_MPLL2_DIV 71
+#define CLKID_MPLL3_DIV 72
+#define CLKID_MPLL_PREDIV 73
+#define CLKID_FCLK_DIV2_DIV 75
+#define CLKID_FCLK_DIV3_DIV 76
+#define CLKID_FCLK_DIV4_DIV 77
+#define CLKID_FCLK_DIV5_DIV 78
+#define CLKID_FCLK_DIV7_DIV 79
+#define CLKID_FCLK_DIV2P5_DIV 100
+#define CLKID_FIXED_PLL_DCO 101
+#define CLKID_SYS_PLL_DCO 102
+#define CLKID_GP0_PLL_DCO 103
+#define CLKID_HIFI_PLL_DCO 104
+#define CLKID_VPU_0_DIV 111
+#define CLKID_VPU_1_DIV 114
+#define CLKID_VAPB_0_DIV 118
+#define CLKID_VAPB_1_DIV 121
+#define CLKID_HDMI_PLL_DCO 125
+#define CLKID_HDMI_PLL_OD 126
+#define CLKID_HDMI_PLL_OD2 127
+#define CLKID_VID_PLL_SEL 130
+#define CLKID_VID_PLL_DIV 131
+#define CLKID_VCLK_SEL 132
+#define CLKID_VCLK2_SEL 133
+#define CLKID_VCLK_INPUT 134
+#define CLKID_VCLK2_INPUT 135
+#define CLKID_VCLK_DIV 136
+#define CLKID_VCLK2_DIV 137
+#define CLKID_VCLK_DIV2_EN 140
+#define CLKID_VCLK_DIV4_EN 141
+#define CLKID_VCLK_DIV6_EN 142
+#define CLKID_VCLK_DIV12_EN 143
+#define CLKID_VCLK2_DIV2_EN 144
+#define CLKID_VCLK2_DIV4_EN 145
+#define CLKID_VCLK2_DIV6_EN 146
+#define CLKID_VCLK2_DIV12_EN 147
+#define CLKID_CTS_ENCI_SEL 158
+#define CLKID_CTS_ENCP_SEL 159
+#define CLKID_CTS_VDAC_SEL 160
+#define CLKID_HDMI_TX_SEL 161
+#define CLKID_HDMI_SEL 166
+#define CLKID_HDMI_DIV 167
+#define CLKID_MALI_0_DIV 170
+#define CLKID_MALI_1_DIV 173
+#define CLKID_MPLL_5OM_DIV 176
+
+#define NR_CLKS 178
+
+/* include the CLKIDs that have been made part of the DT binding */
+#include <dt-bindings/clock/g12a-clkc.h>
+
+#endif /* __G12A_H */
diff --git a/drivers/clk/meson/gxbb-aoclk-32k.c b/drivers/clk/meson/gxbb-aoclk-32k.c
deleted file mode 100644
index 680467141a1d..000000000000
--- a/drivers/clk/meson/gxbb-aoclk-32k.c
+++ /dev/null
@@ -1,193 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (c) 2017 BayLibre, SAS.
- * Author: Neil Armstrong <narmstrong@baylibre.com>
- */
-
-#include <linux/clk-provider.h>
-#include <linux/bitfield.h>
-#include <linux/regmap.h>
-#include "gxbb-aoclk.h"
-
-/*
- * The AO Domain embeds a dual/divider to generate a more precise
- * 32,768KHz clock for low-power suspend mode and CEC.
- * ______ ______
- * | | | |
- * ______ | Div1 |-| Cnt1 | ______
- * | | /|______| |______|\ | |
- * Xtal-->| Gate |---| ______ ______ X-X--| Gate |-->
- * |______| | \| | | |/ | |______|
- * | | Div2 |-| Cnt2 | |
- * | |______| |______| |
- * |_______________________|
- *
- * The dividing can be switched to single or dual, with a counter
- * for each divider to set when the switching is done.
- * The entire dividing mechanism can be also bypassed.
- */
-
-#define CLK_CNTL0_N1_MASK GENMASK(11, 0)
-#define CLK_CNTL0_N2_MASK GENMASK(23, 12)
-#define CLK_CNTL0_DUALDIV_EN BIT(28)
-#define CLK_CNTL0_OUT_GATE_EN BIT(30)
-#define CLK_CNTL0_IN_GATE_EN BIT(31)
-
-#define CLK_CNTL1_M1_MASK GENMASK(11, 0)
-#define CLK_CNTL1_M2_MASK GENMASK(23, 12)
-#define CLK_CNTL1_BYPASS_EN BIT(24)
-#define CLK_CNTL1_SELECT_OSC BIT(27)
-
-#define PWR_CNTL_ALT_32K_SEL GENMASK(13, 10)
-
-struct cec_32k_freq_table {
- unsigned long parent_rate;
- unsigned long target_rate;
- bool dualdiv;
- unsigned int n1;
- unsigned int n2;
- unsigned int m1;
- unsigned int m2;
-};
-
-static const struct cec_32k_freq_table aoclk_cec_32k_table[] = {
- [0] = {
- .parent_rate = 24000000,
- .target_rate = 32768,
- .dualdiv = true,
- .n1 = 733,
- .n2 = 732,
- .m1 = 8,
- .m2 = 11,
- },
-};
-
-/*
- * If CLK_CNTL0_DUALDIV_EN == 0
- * - will use N1 divider only
- * If CLK_CNTL0_DUALDIV_EN == 1
- * - hold M1 cycles of N1 divider then changes to N2
- * - hold M2 cycles of N2 divider then changes to N1
- * Then we can get more accurate division.
- */
-static unsigned long aoclk_cec_32k_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct aoclk_cec_32k *cec_32k = to_aoclk_cec_32k(hw);
- unsigned long n1;
- u32 reg0, reg1;
-
- regmap_read(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0, &reg0);
- regmap_read(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL1, &reg1);
-
- if (reg1 & CLK_CNTL1_BYPASS_EN)
- return parent_rate;
-
- if (reg0 & CLK_CNTL0_DUALDIV_EN) {
- unsigned long n2, m1, m2, f1, f2, p1, p2;
-
- n1 = FIELD_GET(CLK_CNTL0_N1_MASK, reg0) + 1;
- n2 = FIELD_GET(CLK_CNTL0_N2_MASK, reg0) + 1;
-
- m1 = FIELD_GET(CLK_CNTL1_M1_MASK, reg1) + 1;
- m2 = FIELD_GET(CLK_CNTL1_M2_MASK, reg1) + 1;
-
- f1 = DIV_ROUND_CLOSEST(parent_rate, n1);
- f2 = DIV_ROUND_CLOSEST(parent_rate, n2);
-
- p1 = DIV_ROUND_CLOSEST(100000000 * m1, f1 * (m1 + m2));
- p2 = DIV_ROUND_CLOSEST(100000000 * m2, f2 * (m1 + m2));
-
- return DIV_ROUND_UP(100000000, p1 + p2);
- }
-
- n1 = FIELD_GET(CLK_CNTL0_N1_MASK, reg0) + 1;
-
- return DIV_ROUND_CLOSEST(parent_rate, n1);
-}
-
-static const struct cec_32k_freq_table *find_cec_32k_freq(unsigned long rate,
- unsigned long prate)
-{
- int i;
-
- for (i = 0 ; i < ARRAY_SIZE(aoclk_cec_32k_table) ; ++i)
- if (aoclk_cec_32k_table[i].parent_rate == prate &&
- aoclk_cec_32k_table[i].target_rate == rate)
- return &aoclk_cec_32k_table[i];
-
- return NULL;
-}
-
-static long aoclk_cec_32k_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- const struct cec_32k_freq_table *freq = find_cec_32k_freq(rate,
- *prate);
-
- /* If invalid return first one */
- if (!freq)
- return aoclk_cec_32k_table[0].target_rate;
-
- return freq->target_rate;
-}
-
-/*
- * From the Amlogic init procedure, the IN and OUT gates needs to be handled
- * in the init procedure to avoid any glitches.
- */
-
-static int aoclk_cec_32k_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- const struct cec_32k_freq_table *freq = find_cec_32k_freq(rate,
- parent_rate);
- struct aoclk_cec_32k *cec_32k = to_aoclk_cec_32k(hw);
- u32 reg = 0;
-
- if (!freq)
- return -EINVAL;
-
- /* Disable clock */
- regmap_update_bits(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0,
- CLK_CNTL0_IN_GATE_EN | CLK_CNTL0_OUT_GATE_EN, 0);
-
- reg = FIELD_PREP(CLK_CNTL0_N1_MASK, freq->n1 - 1);
- if (freq->dualdiv)
- reg |= CLK_CNTL0_DUALDIV_EN |
- FIELD_PREP(CLK_CNTL0_N2_MASK, freq->n2 - 1);
-
- regmap_write(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0, reg);
-
- reg = FIELD_PREP(CLK_CNTL1_M1_MASK, freq->m1 - 1);
- if (freq->dualdiv)
- reg |= FIELD_PREP(CLK_CNTL1_M2_MASK, freq->m2 - 1);
-
- regmap_write(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL1, reg);
-
- /* Enable clock */
- regmap_update_bits(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0,
- CLK_CNTL0_IN_GATE_EN, CLK_CNTL0_IN_GATE_EN);
-
- udelay(200);
-
- regmap_update_bits(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0,
- CLK_CNTL0_OUT_GATE_EN, CLK_CNTL0_OUT_GATE_EN);
-
- regmap_update_bits(cec_32k->regmap, AO_CRT_CLK_CNTL1,
- CLK_CNTL1_SELECT_OSC, CLK_CNTL1_SELECT_OSC);
-
- /* Select 32k from XTAL */
- regmap_update_bits(cec_32k->regmap,
- AO_RTI_PWR_CNTL_REG0,
- PWR_CNTL_ALT_32K_SEL,
- FIELD_PREP(PWR_CNTL_ALT_32K_SEL, 4));
-
- return 0;
-}
-
-const struct clk_ops meson_aoclk_cec_32k_ops = {
- .recalc_rate = aoclk_cec_32k_recalc_rate,
- .round_rate = aoclk_cec_32k_round_rate,
- .set_rate = aoclk_cec_32k_set_rate,
-};
diff --git a/drivers/clk/meson/gxbb-aoclk.c b/drivers/clk/meson/gxbb-aoclk.c
index 42ed61d3c3fb..449f6ac189d8 100644
--- a/drivers/clk/meson/gxbb-aoclk.c
+++ b/drivers/clk/meson/gxbb-aoclk.c
@@ -5,10 +5,23 @@
*/
#include <linux/platform_device.h>
#include <linux/mfd/syscon.h>
-#include "clk-regmap.h"
#include "meson-aoclk.h"
#include "gxbb-aoclk.h"
+#include "clk-regmap.h"
+#include "clk-dualdiv.h"
+
+#define IN_PREFIX "ao-in-"
+
+/* AO Configuration Clock registers offsets */
+#define AO_RTI_PWR_CNTL_REG1 0x0c
+#define AO_RTI_PWR_CNTL_REG0 0x10
+#define AO_RTI_GEN_CNTL_REG0 0x40
+#define AO_OSCIN_CNTL 0x58
+#define AO_CRT_CLK_CNTL1 0x68
+#define AO_RTC_ALT_CLK_CNTL0 0x94
+#define AO_RTC_ALT_CLK_CNTL1 0x98
+
#define GXBB_AO_GATE(_name, _bit) \
static struct clk_regmap _name##_ao = { \
.data = &(struct clk_regmap_gate_data) { \
@@ -18,7 +31,7 @@ static struct clk_regmap _name##_ao = { \
.hw.init = &(struct clk_init_data) { \
.name = #_name "_ao", \
.ops = &clk_regmap_gate_ops, \
- .parent_names = (const char *[]){ "clk81" }, \
+ .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk" }, \
.num_parents = 1, \
.flags = CLK_IGNORE_UNUSED, \
}, \
@@ -31,13 +44,174 @@ GXBB_AO_GATE(uart1, 3);
GXBB_AO_GATE(uart2, 5);
GXBB_AO_GATE(ir_blaster, 6);
-static struct aoclk_cec_32k cec_32k_ao = {
- .hw.init = &(struct clk_init_data) {
- .name = "cec_32k_ao",
- .ops = &meson_aoclk_cec_32k_ops,
- .parent_names = (const char *[]){ "xtal" },
+static struct clk_regmap ao_cts_oscin = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = AO_RTI_PWR_CNTL_REG0,
+ .bit_idx = 6,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "ao_cts_oscin",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap ao_32k_pre = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = AO_RTC_ALT_CLK_CNTL0,
+ .bit_idx = 31,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "ao_32k_pre",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "ao_cts_oscin" },
+ .num_parents = 1,
+ },
+};
+
+static const struct meson_clk_dualdiv_param gxbb_32k_div_table[] = {
+ {
+ .dual = 1,
+ .n1 = 733,
+ .m1 = 8,
+ .n2 = 732,
+ .m2 = 11,
+ }, {}
+};
+
+static struct clk_regmap ao_32k_div = {
+ .data = &(struct meson_clk_dualdiv_data){
+ .n1 = {
+ .reg_off = AO_RTC_ALT_CLK_CNTL0,
+ .shift = 0,
+ .width = 12,
+ },
+ .n2 = {
+ .reg_off = AO_RTC_ALT_CLK_CNTL0,
+ .shift = 12,
+ .width = 12,
+ },
+ .m1 = {
+ .reg_off = AO_RTC_ALT_CLK_CNTL1,
+ .shift = 0,
+ .width = 12,
+ },
+ .m2 = {
+ .reg_off = AO_RTC_ALT_CLK_CNTL1,
+ .shift = 12,
+ .width = 12,
+ },
+ .dual = {
+ .reg_off = AO_RTC_ALT_CLK_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .table = gxbb_32k_div_table,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "ao_32k_div",
+ .ops = &meson_clk_dualdiv_ops,
+ .parent_names = (const char *[]){ "ao_32k_pre" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap ao_32k_sel = {
+ .data = &(struct clk_regmap_mux_data) {
+ .offset = AO_RTC_ALT_CLK_CNTL1,
+ .mask = 0x1,
+ .shift = 24,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "ao_32k_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ "ao_32k_div",
+ "ao_32k_pre" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap ao_32k = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = AO_RTC_ALT_CLK_CNTL0,
+ .bit_idx = 30,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "ao_32k",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "ao_32k_sel" },
.num_parents = 1,
- .flags = CLK_IGNORE_UNUSED,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap ao_cts_rtc_oscin = {
+ .data = &(struct clk_regmap_mux_data) {
+ .offset = AO_RTI_PWR_CNTL_REG0,
+ .mask = 0x7,
+ .shift = 10,
+ .table = (u32[]){ 1, 2, 3, 4 },
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "ao_cts_rtc_oscin",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ IN_PREFIX "ext-32k-0",
+ IN_PREFIX "ext-32k-1",
+ IN_PREFIX "ext-32k-2",
+ "ao_32k" },
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap ao_clk81 = {
+ .data = &(struct clk_regmap_mux_data) {
+ .offset = AO_RTI_PWR_CNTL_REG0,
+ .mask = 0x1,
+ .shift = 0,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "ao_clk81",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk",
+ "ao_cts_rtc_oscin" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap ao_cts_cec = {
+ .data = &(struct clk_regmap_mux_data) {
+ .offset = AO_CRT_CLK_CNTL1,
+ .mask = 0x1,
+ .shift = 27,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "ao_cts_cec",
+ .ops = &clk_regmap_mux_ops,
+ /*
+ * FIXME: The 'fixme' parent obviously does not exist.
+ *
+ * ATM, CCF won't call get_parent() if num_parents is 1. It
+ * does not allow NULL as a parent name either.
+ *
+ * On this particular mux, we only know the input #1 parent
+ * but, on boot, unknown input #0 is set, so it is critical
+ * to call .get_parent() on it
+ *
+ * Until CCF gets fixed, adding this fake parent that won't
+ * ever be registered should work around the problem
+ */
+ .parent_names = (const char *[]){ "fixme",
+ "ao_cts_rtc_oscin" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -50,13 +224,21 @@ static const unsigned int gxbb_aoclk_reset[] = {
[RESET_AO_IR_BLASTER] = 23,
};
-static struct clk_regmap *gxbb_aoclk_gate[] = {
- [CLKID_AO_REMOTE] = &remote_ao,
- [CLKID_AO_I2C_MASTER] = &i2c_master_ao,
- [CLKID_AO_I2C_SLAVE] = &i2c_slave_ao,
- [CLKID_AO_UART1] = &uart1_ao,
- [CLKID_AO_UART2] = &uart2_ao,
- [CLKID_AO_IR_BLASTER] = &ir_blaster_ao,
+static struct clk_regmap *gxbb_aoclk[] = {
+ &remote_ao,
+ &i2c_master_ao,
+ &i2c_slave_ao,
+ &uart1_ao,
+ &uart2_ao,
+ &ir_blaster_ao,
+ &ao_cts_oscin,
+ &ao_32k_pre,
+ &ao_32k_div,
+ &ao_32k_sel,
+ &ao_32k,
+ &ao_cts_rtc_oscin,
+ &ao_clk81,
+ &ao_cts_cec,
};
static const struct clk_hw_onecell_data gxbb_aoclk_onecell_data = {
@@ -67,52 +249,38 @@ static const struct clk_hw_onecell_data gxbb_aoclk_onecell_data = {
[CLKID_AO_UART1] = &uart1_ao.hw,
[CLKID_AO_UART2] = &uart2_ao.hw,
[CLKID_AO_IR_BLASTER] = &ir_blaster_ao.hw,
- [CLKID_AO_CEC_32K] = &cec_32k_ao.hw,
+ [CLKID_AO_CEC_32K] = &ao_cts_cec.hw,
+ [CLKID_AO_CTS_OSCIN] = &ao_cts_oscin.hw,
+ [CLKID_AO_32K_PRE] = &ao_32k_pre.hw,
+ [CLKID_AO_32K_DIV] = &ao_32k_div.hw,
+ [CLKID_AO_32K_SEL] = &ao_32k_sel.hw,
+ [CLKID_AO_32K] = &ao_32k.hw,
+ [CLKID_AO_CTS_RTC_OSCIN] = &ao_cts_rtc_oscin.hw,
+ [CLKID_AO_CLK81] = &ao_clk81.hw,
},
.num = NR_CLKS,
};
-static int gxbb_register_cec_ao_32k(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct regmap *regmap;
- int ret;
-
- regmap = syscon_node_to_regmap(of_get_parent(dev->of_node));
- if (IS_ERR(regmap)) {
- dev_err(dev, "failed to get regmap\n");
- return PTR_ERR(regmap);
- }
-
- /* Specific clocks */
- cec_32k_ao.regmap = regmap;
- ret = devm_clk_hw_register(dev, &cec_32k_ao.hw);
- if (ret) {
- dev_err(&pdev->dev, "clk cec_32k_ao register failed.\n");
- return ret;
- }
-
- return 0;
-}
+static const struct meson_aoclk_input gxbb_aoclk_inputs[] = {
+ { .name = "xtal", .required = true, },
+ { .name = "mpeg-clk", .required = true, },
+ {. name = "ext-32k-0", .required = false, },
+ {. name = "ext-32k-1", .required = false, },
+ {. name = "ext-32k-2", .required = false, },
+};
static const struct meson_aoclk_data gxbb_aoclkc_data = {
.reset_reg = AO_RTI_GEN_CNTL_REG0,
.num_reset = ARRAY_SIZE(gxbb_aoclk_reset),
.reset = gxbb_aoclk_reset,
- .num_clks = ARRAY_SIZE(gxbb_aoclk_gate),
- .clks = gxbb_aoclk_gate,
+ .num_clks = ARRAY_SIZE(gxbb_aoclk),
+ .clks = gxbb_aoclk,
.hw_data = &gxbb_aoclk_onecell_data,
+ .inputs = gxbb_aoclk_inputs,
+ .num_inputs = ARRAY_SIZE(gxbb_aoclk_inputs),
+ .input_prefix = IN_PREFIX,
};
-static int gxbb_aoclkc_probe(struct platform_device *pdev)
-{
- int ret = gxbb_register_cec_ao_32k(pdev);
- if (ret)
- return ret;
-
- return meson_aoclkc_probe(pdev);
-}
-
static const struct of_device_id gxbb_aoclkc_match_table[] = {
{
.compatible = "amlogic,meson-gx-aoclkc",
@@ -122,7 +290,7 @@ static const struct of_device_id gxbb_aoclkc_match_table[] = {
};
static struct platform_driver gxbb_aoclkc_driver = {
- .probe = gxbb_aoclkc_probe,
+ .probe = meson_aoclkc_probe,
.driver = {
.name = "gxbb-aoclkc",
.of_match_table = gxbb_aoclkc_match_table,
diff --git a/drivers/clk/meson/gxbb-aoclk.h b/drivers/clk/meson/gxbb-aoclk.h
index c514493d989a..1db16f9b37d4 100644
--- a/drivers/clk/meson/gxbb-aoclk.h
+++ b/drivers/clk/meson/gxbb-aoclk.h
@@ -7,25 +7,7 @@
#ifndef __GXBB_AOCLKC_H
#define __GXBB_AOCLKC_H
-#define NR_CLKS 7
-
-/* AO Configuration Clock registers offsets */
-#define AO_RTI_PWR_CNTL_REG1 0x0c
-#define AO_RTI_PWR_CNTL_REG0 0x10
-#define AO_RTI_GEN_CNTL_REG0 0x40
-#define AO_OSCIN_CNTL 0x58
-#define AO_CRT_CLK_CNTL1 0x68
-#define AO_RTC_ALT_CLK_CNTL0 0x94
-#define AO_RTC_ALT_CLK_CNTL1 0x98
-
-struct aoclk_cec_32k {
- struct clk_hw hw;
- struct regmap *regmap;
-};
-
-#define to_aoclk_cec_32k(_hw) container_of(_hw, struct aoclk_cec_32k, hw)
-
-extern const struct clk_ops meson_aoclk_cec_32k_ops;
+#define NR_CLKS 14
#include <dt-bindings/clock/gxbb-aoclkc.h>
#include <dt-bindings/reset/gxbb-aoclkc.h>
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 65f2599e5243..29ffb4fde714 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -4,17 +4,20 @@
* Michael Turquette <mturquette@baylibre.com>
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/init.h>
#include <linux/of_device.h>
-#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
-#include <linux/regmap.h>
-#include "clkc.h"
#include "gxbb.h"
+#include "clk-input.h"
#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-mpll.h"
+#include "meson-eeclk.h"
+#include "vid-pll-div.h"
+
+#define IN_PREFIX "ee-in-"
static DEFINE_SPINLOCK(meson_clk_lock);
@@ -118,7 +121,7 @@ static struct clk_regmap gxbb_fixed_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "fixed_pll_dco",
.ops = &meson_clk_pll_ro_ops,
- .parent_names = (const char *[]){ "xtal" },
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
.num_parents = 1,
},
};
@@ -148,7 +151,7 @@ static struct clk_fixed_factor gxbb_hdmi_pll_pre_mult = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_pll_pre_mult",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "xtal" },
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
.num_parents = 1,
},
};
@@ -241,7 +244,7 @@ static struct clk_regmap gxl_hdmi_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_pll_dco",
.ops = &meson_clk_pll_ro_ops,
- .parent_names = (const char *[]){ "xtal" },
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
.num_parents = 1,
/*
* Display directly handle hdmi pll registers ATM, we need
@@ -378,7 +381,7 @@ static struct clk_regmap gxbb_sys_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "sys_pll_dco",
.ops = &meson_clk_pll_ro_ops,
- .parent_names = (const char *[]){ "xtal" },
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
.num_parents = 1,
},
};
@@ -439,7 +442,7 @@ static struct clk_regmap gxbb_gp0_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "gp0_pll_dco",
.ops = &meson_clk_pll_ops,
- .parent_names = (const char *[]){ "xtal" },
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
.num_parents = 1,
},
};
@@ -491,7 +494,7 @@ static struct clk_regmap gxl_gp0_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "gp0_pll_dco",
.ops = &meson_clk_pll_ops,
- .parent_names = (const char *[]){ "xtal" },
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
.num_parents = 1,
},
};
@@ -789,7 +792,7 @@ static struct clk_regmap gxbb_mpll2 = {
static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
static const char * const clk81_parent_names[] = {
- "xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4",
+ IN_PREFIX "xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4",
"fclk_div3", "fclk_div5"
};
@@ -852,7 +855,7 @@ static struct clk_regmap gxbb_sar_adc_clk_sel = {
.name = "sar_adc_clk_sel",
.ops = &clk_regmap_mux_ops,
/* NOTE: The datasheet doesn't list the parents for bit 10 */
- .parent_names = (const char *[]){ "xtal", "clk81", },
+ .parent_names = (const char *[]){ IN_PREFIX "xtal", "clk81", },
.num_parents = 2,
},
};
@@ -891,7 +894,7 @@ static struct clk_regmap gxbb_sar_adc_clk = {
*/
static const char * const gxbb_mali_0_1_parent_names[] = {
- "xtal", "gp0_pll", "mpll2", "mpll1", "fclk_div7",
+ IN_PREFIX "xtal", "gp0_pll", "mpll2", "mpll1", "fclk_div7",
"fclk_div4", "fclk_div3", "fclk_div5"
};
@@ -1153,7 +1156,7 @@ static struct clk_regmap gxbb_32k_clk = {
};
static const char * const gxbb_32k_clk_parent_names[] = {
- "xtal", "cts_slow_oscin", "fclk_div3", "fclk_div5"
+ IN_PREFIX "xtal", "cts_slow_oscin", "fclk_div3", "fclk_div5"
};
static struct clk_regmap gxbb_32k_clk_sel = {
@@ -1172,7 +1175,7 @@ static struct clk_regmap gxbb_32k_clk_sel = {
};
static const char * const gxbb_sd_emmc_clk0_parent_names[] = {
- "xtal", "fclk_div2", "fclk_div3", "fclk_div5", "fclk_div7",
+ IN_PREFIX "xtal", "fclk_div2", "fclk_div3", "fclk_div5", "fclk_div7",
/*
* Following these parent clocks, we should also have had mpll2, mpll3
@@ -2138,7 +2141,7 @@ static struct clk_regmap gxbb_hdmi_tx = {
/* HDMI Clocks */
static const char * const gxbb_hdmi_parent_names[] = {
- "xtal", "fclk_div4", "fclk_div3", "fclk_div5"
+ IN_PREFIX "xtal", "fclk_div4", "fclk_div3", "fclk_div5"
};
static struct clk_regmap gxbb_hdmi_sel = {
@@ -2213,6 +2216,7 @@ static struct clk_regmap gxbb_vdec_1_div = {
.offset = HHI_VDEC_CLK_CNTL,
.shift = 0,
.width = 7,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "vdec_1_div",
@@ -2258,6 +2262,7 @@ static struct clk_regmap gxbb_vdec_hevc_div = {
.offset = HHI_VDEC2_CLK_CNTL,
.shift = 16,
.width = 7,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "vdec_hevc_div",
@@ -2285,7 +2290,7 @@ static struct clk_regmap gxbb_vdec_hevc = {
static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8,
9, 10, 11, 13, 14, };
static const char * const gen_clk_parent_names[] = {
- "xtal", "vdec_1", "vdec_hevc", "mpll0", "mpll1", "mpll2",
+ IN_PREFIX "xtal", "vdec_1", "vdec_hevc", "mpll0", "mpll1", "mpll2",
"fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", "gp0_pll",
};
@@ -2854,6 +2859,192 @@ static struct clk_hw_onecell_data gxl_hw_onecell_data = {
};
static struct clk_regmap *const gxbb_clk_regmaps[] = {
+ &gxbb_clk81,
+ &gxbb_ddr,
+ &gxbb_dos,
+ &gxbb_isa,
+ &gxbb_pl301,
+ &gxbb_periphs,
+ &gxbb_spicc,
+ &gxbb_i2c,
+ &gxbb_sar_adc,
+ &gxbb_smart_card,
+ &gxbb_rng0,
+ &gxbb_uart0,
+ &gxbb_sdhc,
+ &gxbb_stream,
+ &gxbb_async_fifo,
+ &gxbb_sdio,
+ &gxbb_abuf,
+ &gxbb_hiu_iface,
+ &gxbb_assist_misc,
+ &gxbb_spi,
+ &gxbb_i2s_spdif,
+ &gxbb_eth,
+ &gxbb_demux,
+ &gxbb_aiu_glue,
+ &gxbb_iec958,
+ &gxbb_i2s_out,
+ &gxbb_amclk,
+ &gxbb_aififo2,
+ &gxbb_mixer,
+ &gxbb_mixer_iface,
+ &gxbb_adc,
+ &gxbb_blkmv,
+ &gxbb_aiu,
+ &gxbb_uart1,
+ &gxbb_g2d,
+ &gxbb_usb0,
+ &gxbb_usb1,
+ &gxbb_reset,
+ &gxbb_nand,
+ &gxbb_dos_parser,
+ &gxbb_usb,
+ &gxbb_vdin1,
+ &gxbb_ahb_arb0,
+ &gxbb_efuse,
+ &gxbb_boot_rom,
+ &gxbb_ahb_data_bus,
+ &gxbb_ahb_ctrl_bus,
+ &gxbb_hdmi_intr_sync,
+ &gxbb_hdmi_pclk,
+ &gxbb_usb1_ddr_bridge,
+ &gxbb_usb0_ddr_bridge,
+ &gxbb_mmc_pclk,
+ &gxbb_dvin,
+ &gxbb_uart2,
+ &gxbb_sana,
+ &gxbb_vpu_intr,
+ &gxbb_sec_ahb_ahb3_bridge,
+ &gxbb_clk81_a53,
+ &gxbb_vclk2_venci0,
+ &gxbb_vclk2_venci1,
+ &gxbb_vclk2_vencp0,
+ &gxbb_vclk2_vencp1,
+ &gxbb_gclk_venci_int0,
+ &gxbb_gclk_vencp_int,
+ &gxbb_dac_clk,
+ &gxbb_aoclk_gate,
+ &gxbb_iec958_gate,
+ &gxbb_enc480p,
+ &gxbb_rng1,
+ &gxbb_gclk_venci_int1,
+ &gxbb_vclk2_venclmcc,
+ &gxbb_vclk2_vencl,
+ &gxbb_vclk_other,
+ &gxbb_edp,
+ &gxbb_ao_media_cpu,
+ &gxbb_ao_ahb_sram,
+ &gxbb_ao_ahb_bus,
+ &gxbb_ao_iface,
+ &gxbb_ao_i2c,
+ &gxbb_emmc_a,
+ &gxbb_emmc_b,
+ &gxbb_emmc_c,
+ &gxbb_sar_adc_clk,
+ &gxbb_mali_0,
+ &gxbb_mali_1,
+ &gxbb_cts_amclk,
+ &gxbb_cts_mclk_i958,
+ &gxbb_32k_clk,
+ &gxbb_sd_emmc_a_clk0,
+ &gxbb_sd_emmc_b_clk0,
+ &gxbb_sd_emmc_c_clk0,
+ &gxbb_vpu_0,
+ &gxbb_vpu_1,
+ &gxbb_vapb_0,
+ &gxbb_vapb_1,
+ &gxbb_vapb,
+ &gxbb_mpeg_clk_div,
+ &gxbb_sar_adc_clk_div,
+ &gxbb_mali_0_div,
+ &gxbb_mali_1_div,
+ &gxbb_cts_mclk_i958_div,
+ &gxbb_32k_clk_div,
+ &gxbb_sd_emmc_a_clk0_div,
+ &gxbb_sd_emmc_b_clk0_div,
+ &gxbb_sd_emmc_c_clk0_div,
+ &gxbb_vpu_0_div,
+ &gxbb_vpu_1_div,
+ &gxbb_vapb_0_div,
+ &gxbb_vapb_1_div,
+ &gxbb_mpeg_clk_sel,
+ &gxbb_sar_adc_clk_sel,
+ &gxbb_mali_0_sel,
+ &gxbb_mali_1_sel,
+ &gxbb_mali,
+ &gxbb_cts_amclk_sel,
+ &gxbb_cts_mclk_i958_sel,
+ &gxbb_cts_i958,
+ &gxbb_32k_clk_sel,
+ &gxbb_sd_emmc_a_clk0_sel,
+ &gxbb_sd_emmc_b_clk0_sel,
+ &gxbb_sd_emmc_c_clk0_sel,
+ &gxbb_vpu_0_sel,
+ &gxbb_vpu_1_sel,
+ &gxbb_vpu,
+ &gxbb_vapb_0_sel,
+ &gxbb_vapb_1_sel,
+ &gxbb_vapb_sel,
+ &gxbb_mpll0,
+ &gxbb_mpll1,
+ &gxbb_mpll2,
+ &gxbb_mpll0_div,
+ &gxbb_mpll1_div,
+ &gxbb_mpll2_div,
+ &gxbb_cts_amclk_div,
+ &gxbb_fixed_pll,
+ &gxbb_sys_pll,
+ &gxbb_mpll_prediv,
+ &gxbb_fclk_div2,
+ &gxbb_fclk_div3,
+ &gxbb_fclk_div4,
+ &gxbb_fclk_div5,
+ &gxbb_fclk_div7,
+ &gxbb_vdec_1_sel,
+ &gxbb_vdec_1_div,
+ &gxbb_vdec_1,
+ &gxbb_vdec_hevc_sel,
+ &gxbb_vdec_hevc_div,
+ &gxbb_vdec_hevc,
+ &gxbb_gen_clk_sel,
+ &gxbb_gen_clk_div,
+ &gxbb_gen_clk,
+ &gxbb_fixed_pll_dco,
+ &gxbb_sys_pll_dco,
+ &gxbb_gp0_pll,
+ &gxbb_vid_pll,
+ &gxbb_vid_pll_sel,
+ &gxbb_vid_pll_div,
+ &gxbb_vclk,
+ &gxbb_vclk_sel,
+ &gxbb_vclk_div,
+ &gxbb_vclk_input,
+ &gxbb_vclk_div1,
+ &gxbb_vclk_div2_en,
+ &gxbb_vclk_div4_en,
+ &gxbb_vclk_div6_en,
+ &gxbb_vclk_div12_en,
+ &gxbb_vclk2,
+ &gxbb_vclk2_sel,
+ &gxbb_vclk2_div,
+ &gxbb_vclk2_input,
+ &gxbb_vclk2_div1,
+ &gxbb_vclk2_div2_en,
+ &gxbb_vclk2_div4_en,
+ &gxbb_vclk2_div6_en,
+ &gxbb_vclk2_div12_en,
+ &gxbb_cts_enci,
+ &gxbb_cts_enci_sel,
+ &gxbb_cts_encp,
+ &gxbb_cts_encp_sel,
+ &gxbb_cts_vdac,
+ &gxbb_cts_vdac_sel,
+ &gxbb_hdmi_tx,
+ &gxbb_hdmi_tx_sel,
+ &gxbb_hdmi_sel,
+ &gxbb_hdmi_div,
+ &gxbb_hdmi,
&gxbb_gp0_pll_dco,
&gxbb_hdmi_pll,
&gxbb_hdmi_pll_od,
@@ -2862,14 +3053,6 @@ static struct clk_regmap *const gxbb_clk_regmaps[] = {
};
static struct clk_regmap *const gxl_clk_regmaps[] = {
- &gxl_gp0_pll_dco,
- &gxl_hdmi_pll,
- &gxl_hdmi_pll_od,
- &gxl_hdmi_pll_od2,
- &gxl_hdmi_pll_dco,
-};
-
-static struct clk_regmap *const gx_clk_regmaps[] = {
&gxbb_clk81,
&gxbb_ddr,
&gxbb_dos,
@@ -3056,23 +3239,22 @@ static struct clk_regmap *const gx_clk_regmaps[] = {
&gxbb_hdmi_sel,
&gxbb_hdmi_div,
&gxbb_hdmi,
+ &gxl_gp0_pll_dco,
+ &gxl_hdmi_pll,
+ &gxl_hdmi_pll_od,
+ &gxl_hdmi_pll_od2,
+ &gxl_hdmi_pll_dco,
};
-struct clkc_data {
- struct clk_regmap *const *regmap_clks;
- unsigned int regmap_clks_count;
- struct clk_hw_onecell_data *hw_onecell_data;
-};
-
-static const struct clkc_data gxbb_clkc_data = {
+static const struct meson_eeclkc_data gxbb_clkc_data = {
.regmap_clks = gxbb_clk_regmaps,
- .regmap_clks_count = ARRAY_SIZE(gxbb_clk_regmaps),
+ .regmap_clk_num = ARRAY_SIZE(gxbb_clk_regmaps),
.hw_onecell_data = &gxbb_hw_onecell_data,
};
-static const struct clkc_data gxl_clkc_data = {
+static const struct meson_eeclkc_data gxl_clkc_data = {
.regmap_clks = gxl_clk_regmaps,
- .regmap_clks_count = ARRAY_SIZE(gxl_clk_regmaps),
+ .regmap_clk_num = ARRAY_SIZE(gxl_clk_regmaps),
.hw_onecell_data = &gxl_hw_onecell_data,
};
@@ -3082,52 +3264,8 @@ static const struct of_device_id clkc_match_table[] = {
{},
};
-static int gxbb_clkc_probe(struct platform_device *pdev)
-{
- const struct clkc_data *clkc_data;
- struct regmap *map;
- int ret, i;
- struct device *dev = &pdev->dev;
-
- clkc_data = of_device_get_match_data(dev);
- if (!clkc_data)
- return -EINVAL;
-
- /* Get the hhi system controller node if available */
- map = syscon_node_to_regmap(of_get_parent(dev->of_node));
- if (IS_ERR(map)) {
- dev_err(dev, "failed to get HHI regmap\n");
- return PTR_ERR(map);
- }
-
- /* Populate regmap for the common regmap backed clocks */
- for (i = 0; i < ARRAY_SIZE(gx_clk_regmaps); i++)
- gx_clk_regmaps[i]->map = map;
-
- /* Populate regmap for soc specific clocks */
- for (i = 0; i < clkc_data->regmap_clks_count; i++)
- clkc_data->regmap_clks[i]->map = map;
-
- /* Register all clks */
- for (i = 0; i < clkc_data->hw_onecell_data->num; i++) {
- /* array might be sparse */
- if (!clkc_data->hw_onecell_data->hws[i])
- continue;
-
- ret = devm_clk_hw_register(dev,
- clkc_data->hw_onecell_data->hws[i]);
- if (ret) {
- dev_err(dev, "Clock registration failed\n");
- return ret;
- }
- }
-
- return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
- clkc_data->hw_onecell_data);
-}
-
static struct platform_driver gxbb_driver = {
- .probe = gxbb_clkc_probe,
+ .probe = meson_eeclkc_probe,
.driver = {
.name = "gxbb-clkc",
.of_match_table = clkc_match_table,
diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c
index f965845917e3..b67951909e04 100644
--- a/drivers/clk/meson/meson-aoclk.c
+++ b/drivers/clk/meson/meson-aoclk.c
@@ -14,9 +14,11 @@
#include <linux/reset-controller.h>
#include <linux/mfd/syscon.h>
#include <linux/of_device.h>
-#include "clk-regmap.h"
+#include <linux/slab.h>
#include "meson-aoclk.h"
+#include "clk-input.h"
+
static int meson_aoclk_do_reset(struct reset_controller_dev *rcdev,
unsigned long id)
{
@@ -31,6 +33,37 @@ static const struct reset_control_ops meson_aoclk_reset_ops = {
.reset = meson_aoclk_do_reset,
};
+static int meson_aoclkc_register_inputs(struct device *dev,
+ struct meson_aoclk_data *data)
+{
+ struct clk_hw *hw;
+ char *str;
+ int i;
+
+ for (i = 0; i < data->num_inputs; i++) {
+ const struct meson_aoclk_input *in = &data->inputs[i];
+
+ str = kasprintf(GFP_KERNEL, "%s%s", data->input_prefix,
+ in->name);
+ if (!str)
+ return -ENOMEM;
+
+ hw = meson_clk_hw_register_input(dev, in->name, str, 0);
+ kfree(str);
+
+ if (IS_ERR(hw)) {
+ if (!in->required && PTR_ERR(hw) == -ENOENT)
+ continue;
+ else if (PTR_ERR(hw) != -EPROBE_DEFER)
+ dev_err(dev, "failed to register input %s\n",
+ in->name);
+ return PTR_ERR(hw);
+ }
+ }
+
+ return 0;
+}
+
int meson_aoclkc_probe(struct platform_device *pdev)
{
struct meson_aoclk_reset_controller *rstc;
@@ -53,6 +86,10 @@ int meson_aoclkc_probe(struct platform_device *pdev)
return PTR_ERR(regmap);
}
+ ret = meson_aoclkc_register_inputs(dev, data);
+ if (ret)
+ return ret;
+
/* Reset Controller */
rstc->data = data;
rstc->regmap = regmap;
@@ -65,15 +102,20 @@ int meson_aoclkc_probe(struct platform_device *pdev)
return ret;
}
- /*
- * Populate regmap and register all clks
- */
- for (clkid = 0; clkid < data->num_clks; clkid++) {
+ /* Populate regmap */
+ for (clkid = 0; clkid < data->num_clks; clkid++)
data->clks[clkid]->map = regmap;
+ /* Register all clks */
+ for (clkid = 0; clkid < data->hw_data->num; clkid++) {
+ if (!data->hw_data->hws[clkid])
+ continue;
+
ret = devm_clk_hw_register(dev, data->hw_data->hws[clkid]);
- if (ret)
+ if (ret) {
+ dev_err(dev, "Clock registration failed\n");
return ret;
+ }
}
return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
diff --git a/drivers/clk/meson/meson-aoclk.h b/drivers/clk/meson/meson-aoclk.h
index ab2819e88922..999cde3868f7 100644
--- a/drivers/clk/meson/meson-aoclk.h
+++ b/drivers/clk/meson/meson-aoclk.h
@@ -11,16 +11,27 @@
#ifndef __MESON_AOCLK_H__
#define __MESON_AOCLK_H__
+#include <linux/clk-provider.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/reset-controller.h>
+
#include "clk-regmap.h"
+struct meson_aoclk_input {
+ const char *name;
+ bool required;
+};
+
struct meson_aoclk_data {
const unsigned int reset_reg;
const int num_reset;
const unsigned int *reset;
- int num_clks;
+ const int num_clks;
struct clk_regmap **clks;
+ const int num_inputs;
+ const struct meson_aoclk_input *inputs;
+ const char *input_prefix;
const struct clk_hw_onecell_data *hw_data;
};
diff --git a/drivers/clk/meson/meson-eeclk.c b/drivers/clk/meson/meson-eeclk.c
new file mode 100644
index 000000000000..37a34c9c3885
--- /dev/null
+++ b/drivers/clk/meson/meson-eeclk.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#include "clk-input.h"
+#include "clk-regmap.h"
+#include "meson-eeclk.h"
+
+int meson_eeclkc_probe(struct platform_device *pdev)
+{
+ const struct meson_eeclkc_data *data;
+ struct device *dev = &pdev->dev;
+ struct clk_hw *input;
+ struct regmap *map;
+ int ret, i;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ /* Get the hhi system controller node */
+ map = syscon_node_to_regmap(of_get_parent(dev->of_node));
+ if (IS_ERR(map)) {
+ dev_err(dev,
+ "failed to get HHI regmap\n");
+ return PTR_ERR(map);
+ }
+
+ input = meson_clk_hw_register_input(dev, "xtal", IN_PREFIX "xtal", 0);
+ if (IS_ERR(input)) {
+ ret = PTR_ERR(input);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get input clock");
+ return ret;
+ }
+
+ /* Populate regmap for the regmap backed clocks */
+ for (i = 0; i < data->regmap_clk_num; i++)
+ data->regmap_clks[i]->map = map;
+
+ for (i = 0; i < data->hw_onecell_data->num; i++) {
+ /* array might be sparse */
+ if (!data->hw_onecell_data->hws[i])
+ continue;
+
+ ret = devm_clk_hw_register(dev, data->hw_onecell_data->hws[i]);
+ if (ret) {
+ dev_err(dev, "Clock registration failed\n");
+ return ret;
+ }
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ data->hw_onecell_data);
+}
diff --git a/drivers/clk/meson/meson-eeclk.h b/drivers/clk/meson/meson-eeclk.h
new file mode 100644
index 000000000000..1b809b1419fe
--- /dev/null
+++ b/drivers/clk/meson/meson-eeclk.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef __MESON_CLKC_H
+#define __MESON_CLKC_H
+
+#include <linux/clk-provider.h>
+#include "clk-regmap.h"
+
+#define IN_PREFIX "ee-in-"
+
+struct platform_device;
+
+struct meson_eeclkc_data {
+ struct clk_regmap *const *regmap_clks;
+ unsigned int regmap_clk_num;
+ struct clk_hw_onecell_data *hw_onecell_data;
+};
+
+int meson_eeclkc_probe(struct platform_device *pdev);
+
+#endif /* __MESON_CLKC_H */
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 950d0e548c75..576ad42252d0 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -16,9 +16,10 @@
#include <linux/slab.h>
#include <linux/regmap.h>
-#include "clkc.h"
#include "meson8b.h"
#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-mpll.h"
static DEFINE_SPINLOCK(meson_clk_lock);
@@ -803,16 +804,16 @@ static struct clk_fixed_factor meson8b_cpu_clk_div8 = {
},
};
-static u32 mux_table_abp[] = { 1, 2, 3, 4, 5, 6, 7 };
-static struct clk_regmap meson8b_abp_clk_sel = {
+static u32 mux_table_apb[] = { 1, 2, 3, 4, 5, 6, 7 };
+static struct clk_regmap meson8b_apb_clk_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.mask = 0x7,
.shift = 3,
- .table = mux_table_abp,
+ .table = mux_table_apb,
},
.hw.init = &(struct clk_init_data){
- .name = "abp_clk_sel",
+ .name = "apb_clk_sel",
.ops = &clk_regmap_mux_ops,
.parent_names = (const char *[]){ "cpu_clk_div2",
"cpu_clk_div3",
@@ -825,16 +826,16 @@ static struct clk_regmap meson8b_abp_clk_sel = {
},
};
-static struct clk_regmap meson8b_abp_clk_gate = {
+static struct clk_regmap meson8b_apb_clk_gate = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.bit_idx = 16,
.flags = CLK_GATE_SET_TO_DISABLE,
},
.hw.init = &(struct clk_init_data){
- .name = "abp_clk_dis",
+ .name = "apb_clk_dis",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "abp_clk_sel" },
+ .parent_names = (const char *[]){ "apb_clk_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1573,6 +1574,135 @@ static struct clk_regmap meson8b_hdmi_sys = {
},
};
+/*
+ * The MALI IP is clocked by two identical clocks (mali_0 and mali_1)
+ * muxed by a glitch-free switch on Meson8b and Meson8m2. Meson8 only
+ * has mali_0 and no glitch-free mux.
+ */
+static const char * const meson8b_mali_0_1_parent_names[] = {
+ "xtal", "mpll2", "mpll1", "fclk_div7", "fclk_div4", "fclk_div3",
+ "fclk_div5"
+};
+
+static u32 meson8b_mali_0_1_mux_table[] = { 0, 2, 3, 4, 5, 6, 7 };
+
+static struct clk_regmap meson8b_mali_0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 9,
+ .table = meson8b_mali_0_1_mux_table,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mali_0_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = meson8b_mali_0_1_parent_names,
+ .num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_names),
+ /*
+ * Don't propagate rate changes up because the only changeable
+ * parents are mpll1 and mpll2 but we need those for audio and
+ * RGMII (Ethernet). We don't want to change the audio or
+ * Ethernet clocks when setting the GPU frequency.
+ */
+ .flags = 0,
+ },
+};
+
+static struct clk_regmap meson8b_mali_0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mali_0_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "mali_0_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_mali_0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mali_0",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mali_0_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_mali_1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 25,
+ .table = meson8b_mali_0_1_mux_table,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mali_1_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = meson8b_mali_0_1_parent_names,
+ .num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_names),
+ /*
+ * Don't propagate rate changes up because the only changeable
+ * parents are mpll1 and mpll2 but we need those for audio and
+ * RGMII (Ethernet). We don't want to change the audio or
+ * Ethernet clocks when setting the GPU frequency.
+ */
+ .flags = 0,
+ },
+};
+
+static struct clk_regmap meson8b_mali_1_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mali_1_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "mali_1_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_mali_1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .bit_idx = 24,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mali_1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mali_1_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_mali = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .mask = 1,
+ .shift = 31,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mali",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ "mali_0", "mali_1" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
/* Everything Else (EE) domain gates */
static MESON_GATE(meson8b_ddr, HHI_GCLK_MPEG0, 0);
@@ -1659,6 +1789,188 @@ static MESON_GATE(meson8b_ao_ahb_sram, HHI_GCLK_AO, 1);
static MESON_GATE(meson8b_ao_ahb_bus, HHI_GCLK_AO, 2);
static MESON_GATE(meson8b_ao_iface, HHI_GCLK_AO, 3);
+static struct clk_hw_onecell_data meson8_hw_onecell_data = {
+ .hws = {
+ [CLKID_XTAL] = &meson8b_xtal.hw,
+ [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
+ [CLKID_PLL_VID] = &meson8b_vid_pll.hw,
+ [CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
+ [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw,
+ [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw,
+ [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw,
+ [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw,
+ [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw,
+ [CLKID_CPUCLK] = &meson8b_cpu_clk.hw,
+ [CLKID_MPEG_SEL] = &meson8b_mpeg_clk_sel.hw,
+ [CLKID_MPEG_DIV] = &meson8b_mpeg_clk_div.hw,
+ [CLKID_CLK81] = &meson8b_clk81.hw,
+ [CLKID_DDR] = &meson8b_ddr.hw,
+ [CLKID_DOS] = &meson8b_dos.hw,
+ [CLKID_ISA] = &meson8b_isa.hw,
+ [CLKID_PL301] = &meson8b_pl301.hw,
+ [CLKID_PERIPHS] = &meson8b_periphs.hw,
+ [CLKID_SPICC] = &meson8b_spicc.hw,
+ [CLKID_I2C] = &meson8b_i2c.hw,
+ [CLKID_SAR_ADC] = &meson8b_sar_adc.hw,
+ [CLKID_SMART_CARD] = &meson8b_smart_card.hw,
+ [CLKID_RNG0] = &meson8b_rng0.hw,
+ [CLKID_UART0] = &meson8b_uart0.hw,
+ [CLKID_SDHC] = &meson8b_sdhc.hw,
+ [CLKID_STREAM] = &meson8b_stream.hw,
+ [CLKID_ASYNC_FIFO] = &meson8b_async_fifo.hw,
+ [CLKID_SDIO] = &meson8b_sdio.hw,
+ [CLKID_ABUF] = &meson8b_abuf.hw,
+ [CLKID_HIU_IFACE] = &meson8b_hiu_iface.hw,
+ [CLKID_ASSIST_MISC] = &meson8b_assist_misc.hw,
+ [CLKID_SPI] = &meson8b_spi.hw,
+ [CLKID_I2S_SPDIF] = &meson8b_i2s_spdif.hw,
+ [CLKID_ETH] = &meson8b_eth.hw,
+ [CLKID_DEMUX] = &meson8b_demux.hw,
+ [CLKID_AIU_GLUE] = &meson8b_aiu_glue.hw,
+ [CLKID_IEC958] = &meson8b_iec958.hw,
+ [CLKID_I2S_OUT] = &meson8b_i2s_out.hw,
+ [CLKID_AMCLK] = &meson8b_amclk.hw,
+ [CLKID_AIFIFO2] = &meson8b_aififo2.hw,
+ [CLKID_MIXER] = &meson8b_mixer.hw,
+ [CLKID_MIXER_IFACE] = &meson8b_mixer_iface.hw,
+ [CLKID_ADC] = &meson8b_adc.hw,
+ [CLKID_BLKMV] = &meson8b_blkmv.hw,
+ [CLKID_AIU] = &meson8b_aiu.hw,
+ [CLKID_UART1] = &meson8b_uart1.hw,
+ [CLKID_G2D] = &meson8b_g2d.hw,
+ [CLKID_USB0] = &meson8b_usb0.hw,
+ [CLKID_USB1] = &meson8b_usb1.hw,
+ [CLKID_RESET] = &meson8b_reset.hw,
+ [CLKID_NAND] = &meson8b_nand.hw,
+ [CLKID_DOS_PARSER] = &meson8b_dos_parser.hw,
+ [CLKID_USB] = &meson8b_usb.hw,
+ [CLKID_VDIN1] = &meson8b_vdin1.hw,
+ [CLKID_AHB_ARB0] = &meson8b_ahb_arb0.hw,
+ [CLKID_EFUSE] = &meson8b_efuse.hw,
+ [CLKID_BOOT_ROM] = &meson8b_boot_rom.hw,
+ [CLKID_AHB_DATA_BUS] = &meson8b_ahb_data_bus.hw,
+ [CLKID_AHB_CTRL_BUS] = &meson8b_ahb_ctrl_bus.hw,
+ [CLKID_HDMI_INTR_SYNC] = &meson8b_hdmi_intr_sync.hw,
+ [CLKID_HDMI_PCLK] = &meson8b_hdmi_pclk.hw,
+ [CLKID_USB1_DDR_BRIDGE] = &meson8b_usb1_ddr_bridge.hw,
+ [CLKID_USB0_DDR_BRIDGE] = &meson8b_usb0_ddr_bridge.hw,
+ [CLKID_MMC_PCLK] = &meson8b_mmc_pclk.hw,
+ [CLKID_DVIN] = &meson8b_dvin.hw,
+ [CLKID_UART2] = &meson8b_uart2.hw,
+ [CLKID_SANA] = &meson8b_sana.hw,
+ [CLKID_VPU_INTR] = &meson8b_vpu_intr.hw,
+ [CLKID_SEC_AHB_AHB3_BRIDGE] = &meson8b_sec_ahb_ahb3_bridge.hw,
+ [CLKID_CLK81_A9] = &meson8b_clk81_a9.hw,
+ [CLKID_VCLK2_VENCI0] = &meson8b_vclk2_venci0.hw,
+ [CLKID_VCLK2_VENCI1] = &meson8b_vclk2_venci1.hw,
+ [CLKID_VCLK2_VENCP0] = &meson8b_vclk2_vencp0.hw,
+ [CLKID_VCLK2_VENCP1] = &meson8b_vclk2_vencp1.hw,
+ [CLKID_GCLK_VENCI_INT] = &meson8b_gclk_venci_int.hw,
+ [CLKID_GCLK_VENCP_INT] = &meson8b_gclk_vencp_int.hw,
+ [CLKID_DAC_CLK] = &meson8b_dac_clk.hw,
+ [CLKID_AOCLK_GATE] = &meson8b_aoclk_gate.hw,
+ [CLKID_IEC958_GATE] = &meson8b_iec958_gate.hw,
+ [CLKID_ENC480P] = &meson8b_enc480p.hw,
+ [CLKID_RNG1] = &meson8b_rng1.hw,
+ [CLKID_GCLK_VENCL_INT] = &meson8b_gclk_vencl_int.hw,
+ [CLKID_VCLK2_VENCLMCC] = &meson8b_vclk2_venclmcc.hw,
+ [CLKID_VCLK2_VENCL] = &meson8b_vclk2_vencl.hw,
+ [CLKID_VCLK2_OTHER] = &meson8b_vclk2_other.hw,
+ [CLKID_EDP] = &meson8b_edp.hw,
+ [CLKID_AO_MEDIA_CPU] = &meson8b_ao_media_cpu.hw,
+ [CLKID_AO_AHB_SRAM] = &meson8b_ao_ahb_sram.hw,
+ [CLKID_AO_AHB_BUS] = &meson8b_ao_ahb_bus.hw,
+ [CLKID_AO_IFACE] = &meson8b_ao_iface.hw,
+ [CLKID_MPLL0] = &meson8b_mpll0.hw,
+ [CLKID_MPLL1] = &meson8b_mpll1.hw,
+ [CLKID_MPLL2] = &meson8b_mpll2.hw,
+ [CLKID_MPLL0_DIV] = &meson8b_mpll0_div.hw,
+ [CLKID_MPLL1_DIV] = &meson8b_mpll1_div.hw,
+ [CLKID_MPLL2_DIV] = &meson8b_mpll2_div.hw,
+ [CLKID_CPU_IN_SEL] = &meson8b_cpu_in_sel.hw,
+ [CLKID_CPU_IN_DIV2] = &meson8b_cpu_in_div2.hw,
+ [CLKID_CPU_IN_DIV3] = &meson8b_cpu_in_div3.hw,
+ [CLKID_CPU_SCALE_DIV] = &meson8b_cpu_scale_div.hw,
+ [CLKID_CPU_SCALE_OUT_SEL] = &meson8b_cpu_scale_out_sel.hw,
+ [CLKID_MPLL_PREDIV] = &meson8b_mpll_prediv.hw,
+ [CLKID_FCLK_DIV2_DIV] = &meson8b_fclk_div2_div.hw,
+ [CLKID_FCLK_DIV3_DIV] = &meson8b_fclk_div3_div.hw,
+ [CLKID_FCLK_DIV4_DIV] = &meson8b_fclk_div4_div.hw,
+ [CLKID_FCLK_DIV5_DIV] = &meson8b_fclk_div5_div.hw,
+ [CLKID_FCLK_DIV7_DIV] = &meson8b_fclk_div7_div.hw,
+ [CLKID_NAND_SEL] = &meson8b_nand_clk_sel.hw,
+ [CLKID_NAND_DIV] = &meson8b_nand_clk_div.hw,
+ [CLKID_NAND_CLK] = &meson8b_nand_clk_gate.hw,
+ [CLKID_PLL_FIXED_DCO] = &meson8b_fixed_pll_dco.hw,
+ [CLKID_HDMI_PLL_DCO] = &meson8b_hdmi_pll_dco.hw,
+ [CLKID_PLL_SYS_DCO] = &meson8b_sys_pll_dco.hw,
+ [CLKID_CPU_CLK_DIV2] = &meson8b_cpu_clk_div2.hw,
+ [CLKID_CPU_CLK_DIV3] = &meson8b_cpu_clk_div3.hw,
+ [CLKID_CPU_CLK_DIV4] = &meson8b_cpu_clk_div4.hw,
+ [CLKID_CPU_CLK_DIV5] = &meson8b_cpu_clk_div5.hw,
+ [CLKID_CPU_CLK_DIV6] = &meson8b_cpu_clk_div6.hw,
+ [CLKID_CPU_CLK_DIV7] = &meson8b_cpu_clk_div7.hw,
+ [CLKID_CPU_CLK_DIV8] = &meson8b_cpu_clk_div8.hw,
+ [CLKID_APB_SEL] = &meson8b_apb_clk_sel.hw,
+ [CLKID_APB] = &meson8b_apb_clk_gate.hw,
+ [CLKID_PERIPH_SEL] = &meson8b_periph_clk_sel.hw,
+ [CLKID_PERIPH] = &meson8b_periph_clk_gate.hw,
+ [CLKID_AXI_SEL] = &meson8b_axi_clk_sel.hw,
+ [CLKID_AXI] = &meson8b_axi_clk_gate.hw,
+ [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_clk_sel.hw,
+ [CLKID_L2_DRAM] = &meson8b_l2_dram_clk_gate.hw,
+ [CLKID_HDMI_PLL_LVDS_OUT] = &meson8b_hdmi_pll_lvds_out.hw,
+ [CLKID_HDMI_PLL_HDMI_OUT] = &meson8b_hdmi_pll_hdmi_out.hw,
+ [CLKID_VID_PLL_IN_SEL] = &meson8b_vid_pll_in_sel.hw,
+ [CLKID_VID_PLL_IN_EN] = &meson8b_vid_pll_in_en.hw,
+ [CLKID_VID_PLL_PRE_DIV] = &meson8b_vid_pll_pre_div.hw,
+ [CLKID_VID_PLL_POST_DIV] = &meson8b_vid_pll_post_div.hw,
+ [CLKID_VID_PLL_FINAL_DIV] = &meson8b_vid_pll_final_div.hw,
+ [CLKID_VCLK_IN_SEL] = &meson8b_vclk_in_sel.hw,
+ [CLKID_VCLK_IN_EN] = &meson8b_vclk_in_en.hw,
+ [CLKID_VCLK_DIV1] = &meson8b_vclk_div1_gate.hw,
+ [CLKID_VCLK_DIV2_DIV] = &meson8b_vclk_div2_div.hw,
+ [CLKID_VCLK_DIV2] = &meson8b_vclk_div2_div_gate.hw,
+ [CLKID_VCLK_DIV4_DIV] = &meson8b_vclk_div4_div.hw,
+ [CLKID_VCLK_DIV4] = &meson8b_vclk_div4_div_gate.hw,
+ [CLKID_VCLK_DIV6_DIV] = &meson8b_vclk_div6_div.hw,
+ [CLKID_VCLK_DIV6] = &meson8b_vclk_div6_div_gate.hw,
+ [CLKID_VCLK_DIV12_DIV] = &meson8b_vclk_div12_div.hw,
+ [CLKID_VCLK_DIV12] = &meson8b_vclk_div12_div_gate.hw,
+ [CLKID_VCLK2_IN_SEL] = &meson8b_vclk2_in_sel.hw,
+ [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_clk_in_en.hw,
+ [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1_gate.hw,
+ [CLKID_VCLK2_DIV2_DIV] = &meson8b_vclk2_div2_div.hw,
+ [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2_div_gate.hw,
+ [CLKID_VCLK2_DIV4_DIV] = &meson8b_vclk2_div4_div.hw,
+ [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4_div_gate.hw,
+ [CLKID_VCLK2_DIV6_DIV] = &meson8b_vclk2_div6_div.hw,
+ [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6_div_gate.hw,
+ [CLKID_VCLK2_DIV12_DIV] = &meson8b_vclk2_div12_div.hw,
+ [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12_div_gate.hw,
+ [CLKID_CTS_ENCT_SEL] = &meson8b_cts_enct_sel.hw,
+ [CLKID_CTS_ENCT] = &meson8b_cts_enct.hw,
+ [CLKID_CTS_ENCP_SEL] = &meson8b_cts_encp_sel.hw,
+ [CLKID_CTS_ENCP] = &meson8b_cts_encp.hw,
+ [CLKID_CTS_ENCI_SEL] = &meson8b_cts_enci_sel.hw,
+ [CLKID_CTS_ENCI] = &meson8b_cts_enci.hw,
+ [CLKID_HDMI_TX_PIXEL_SEL] = &meson8b_hdmi_tx_pixel_sel.hw,
+ [CLKID_HDMI_TX_PIXEL] = &meson8b_hdmi_tx_pixel.hw,
+ [CLKID_CTS_ENCL_SEL] = &meson8b_cts_encl_sel.hw,
+ [CLKID_CTS_ENCL] = &meson8b_cts_encl.hw,
+ [CLKID_CTS_VDAC0_SEL] = &meson8b_cts_vdac0_sel.hw,
+ [CLKID_CTS_VDAC0] = &meson8b_cts_vdac0.hw,
+ [CLKID_HDMI_SYS_SEL] = &meson8b_hdmi_sys_sel.hw,
+ [CLKID_HDMI_SYS_DIV] = &meson8b_hdmi_sys_div.hw,
+ [CLKID_HDMI_SYS] = &meson8b_hdmi_sys.hw,
+ [CLKID_MALI_0_SEL] = &meson8b_mali_0_sel.hw,
+ [CLKID_MALI_0_DIV] = &meson8b_mali_0_div.hw,
+ [CLKID_MALI] = &meson8b_mali_0.hw,
+ [CLK_NR_CLKS] = NULL,
+ },
+ .num = CLK_NR_CLKS,
+};
+
static struct clk_hw_onecell_data meson8b_hw_onecell_data = {
.hws = {
[CLKID_XTAL] = &meson8b_xtal.hw,
@@ -1781,8 +2093,8 @@ static struct clk_hw_onecell_data meson8b_hw_onecell_data = {
[CLKID_CPU_CLK_DIV6] = &meson8b_cpu_clk_div6.hw,
[CLKID_CPU_CLK_DIV7] = &meson8b_cpu_clk_div7.hw,
[CLKID_CPU_CLK_DIV8] = &meson8b_cpu_clk_div8.hw,
- [CLKID_ABP_SEL] = &meson8b_abp_clk_sel.hw,
- [CLKID_ABP] = &meson8b_abp_clk_gate.hw,
+ [CLKID_APB_SEL] = &meson8b_apb_clk_sel.hw,
+ [CLKID_APB] = &meson8b_apb_clk_gate.hw,
[CLKID_PERIPH_SEL] = &meson8b_periph_clk_sel.hw,
[CLKID_PERIPH] = &meson8b_periph_clk_gate.hw,
[CLKID_AXI_SEL] = &meson8b_axi_clk_sel.hw,
@@ -1833,6 +2145,13 @@ static struct clk_hw_onecell_data meson8b_hw_onecell_data = {
[CLKID_HDMI_SYS_SEL] = &meson8b_hdmi_sys_sel.hw,
[CLKID_HDMI_SYS_DIV] = &meson8b_hdmi_sys_div.hw,
[CLKID_HDMI_SYS] = &meson8b_hdmi_sys.hw,
+ [CLKID_MALI_0_SEL] = &meson8b_mali_0_sel.hw,
+ [CLKID_MALI_0_DIV] = &meson8b_mali_0_div.hw,
+ [CLKID_MALI_0] = &meson8b_mali_0.hw,
+ [CLKID_MALI_1_SEL] = &meson8b_mali_1_sel.hw,
+ [CLKID_MALI_1_DIV] = &meson8b_mali_1_div.hw,
+ [CLKID_MALI_1] = &meson8b_mali_1.hw,
+ [CLKID_MALI] = &meson8b_mali.hw,
[CLK_NR_CLKS] = NULL,
},
.num = CLK_NR_CLKS,
@@ -1943,8 +2262,8 @@ static struct clk_regmap *const meson8b_clk_regmaps[] = {
&meson8b_fixed_pll_dco,
&meson8b_hdmi_pll_dco,
&meson8b_sys_pll_dco,
- &meson8b_abp_clk_sel,
- &meson8b_abp_clk_gate,
+ &meson8b_apb_clk_sel,
+ &meson8b_apb_clk_gate,
&meson8b_periph_clk_sel,
&meson8b_periph_clk_gate,
&meson8b_axi_clk_sel,
@@ -1988,6 +2307,13 @@ static struct clk_regmap *const meson8b_clk_regmaps[] = {
&meson8b_hdmi_sys_sel,
&meson8b_hdmi_sys_div,
&meson8b_hdmi_sys,
+ &meson8b_mali_0_sel,
+ &meson8b_mali_0_div,
+ &meson8b_mali_0,
+ &meson8b_mali_1_sel,
+ &meson8b_mali_1_div,
+ &meson8b_mali_1,
+ &meson8b_mali,
};
static const struct meson8b_clk_reset_line {
@@ -2132,7 +2458,6 @@ static int meson8b_cpu_clk_notifier_cb(struct notifier_block *nb,
static struct meson8b_nb_data meson8b_cpu_nb_data = {
.nb.notifier_call = meson8b_cpu_clk_notifier_cb,
- .onecell_data = &meson8b_hw_onecell_data,
};
static const struct regmap_config clkc_regmap_config = {
@@ -2141,7 +2466,8 @@ static const struct regmap_config clkc_regmap_config = {
.reg_stride = 4,
};
-static void __init meson8b_clkc_init(struct device_node *np)
+static void __init meson8b_clkc_init_common(struct device_node *np,
+ struct clk_hw_onecell_data *clk_hw_onecell_data)
{
struct meson8b_clk_reset *rstc;
const char *notifier_clk_name;
@@ -2192,14 +2518,16 @@ static void __init meson8b_clkc_init(struct device_node *np)
*/
for (i = CLKID_XTAL; i < CLK_NR_CLKS; i++) {
/* array might be sparse */
- if (!meson8b_hw_onecell_data.hws[i])
+ if (!clk_hw_onecell_data->hws[i])
continue;
- ret = clk_hw_register(NULL, meson8b_hw_onecell_data.hws[i]);
+ ret = clk_hw_register(NULL, clk_hw_onecell_data->hws[i]);
if (ret)
return;
}
+ meson8b_cpu_nb_data.onecell_data = clk_hw_onecell_data;
+
/*
* FIXME we shouldn't program the muxes in notifier handlers. The
* tricky programming sequence will be handled by the forthcoming
@@ -2215,13 +2543,23 @@ static void __init meson8b_clkc_init(struct device_node *np)
}
ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
- &meson8b_hw_onecell_data);
+ clk_hw_onecell_data);
if (ret)
pr_err("%s: failed to register clock provider\n", __func__);
}
+static void __init meson8_clkc_init(struct device_node *np)
+{
+ return meson8b_clkc_init_common(np, &meson8_hw_onecell_data);
+}
+
+static void __init meson8b_clkc_init(struct device_node *np)
+{
+ return meson8b_clkc_init_common(np, &meson8b_hw_onecell_data);
+}
+
CLK_OF_DECLARE_DRIVER(meson8_clkc, "amlogic,meson8-clkc",
- meson8b_clkc_init);
+ meson8_clkc_init);
CLK_OF_DECLARE_DRIVER(meson8b_clkc, "amlogic,meson8b-clkc",
meson8b_clkc_init);
CLK_OF_DECLARE_DRIVER(meson8m2_clkc, "amlogic,meson8m2-clkc",
diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h
index 87fba739af81..b8c58faeae52 100644
--- a/drivers/clk/meson/meson8b.h
+++ b/drivers/clk/meson/meson8b.h
@@ -33,6 +33,7 @@
#define HHI_VID_CLK_CNTL2 0x194 /* 0x65 offset in data sheet */
#define HHI_VID_DIVIDER_CNTL 0x198 /* 0x66 offset in data sheet */
#define HHI_SYS_CPU_CLK_CNTL0 0x19c /* 0x67 offset in data sheet */
+#define HHI_MALI_CLK_CNTL 0x1b0 /* 0x6c offset in data sheet */
#define HHI_HDMI_CLK_CNTL 0x1cc /* 0x73 offset in data sheet */
#define HHI_NAND_CLK_CNTL 0x25c /* 0x97 offset in data sheet */
#define HHI_MPLL_CNTL 0x280 /* 0xa0 offset in data sheet */
@@ -91,7 +92,7 @@
#define CLKID_CPU_CLK_DIV6 120
#define CLKID_CPU_CLK_DIV7 121
#define CLKID_CPU_CLK_DIV8 122
-#define CLKID_ABP_SEL 123
+#define CLKID_APB_SEL 123
#define CLKID_PERIPH_SEL 125
#define CLKID_AXI_SEL 127
#define CLKID_L2_DRAM_SEL 129
@@ -139,8 +140,14 @@
#define CLKID_HDMI_SYS_SEL 172
#define CLKID_HDMI_SYS_DIV 173
#define CLKID_HDMI_SYS 174
+#define CLKID_MALI_0_SEL 175
+#define CLKID_MALI_0_DIV 176
+#define CLKID_MALI_0 177
+#define CLKID_MALI_1_SEL 178
+#define CLKID_MALI_1_DIV 179
+#define CLKID_MALI_1 180
-#define CLK_NR_CLKS 175
+#define CLK_NR_CLKS 181
/*
* include the CLKID and RESETID that have
diff --git a/drivers/clk/meson/parm.h b/drivers/clk/meson/parm.h
new file mode 100644
index 000000000000..3c9ef1b505ce
--- /dev/null
+++ b/drivers/clk/meson/parm.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2015 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ */
+
+#ifndef __MESON_PARM_H
+#define __MESON_PARM_H
+
+#include <linux/bits.h>
+#include <linux/regmap.h>
+
+#define PMASK(width) GENMASK(width - 1, 0)
+#define SETPMASK(width, shift) GENMASK(shift + width - 1, shift)
+#define CLRPMASK(width, shift) (~SETPMASK(width, shift))
+
+#define PARM_GET(width, shift, reg) \
+ (((reg) & SETPMASK(width, shift)) >> (shift))
+#define PARM_SET(width, shift, reg, val) \
+ (((reg) & CLRPMASK(width, shift)) | ((val) << (shift)))
+
+#define MESON_PARM_APPLICABLE(p) (!!((p)->width))
+
+struct parm {
+ u16 reg_off;
+ u8 shift;
+ u8 width;
+};
+
+static inline unsigned int meson_parm_read(struct regmap *map, struct parm *p)
+{
+ unsigned int val;
+
+ regmap_read(map, p->reg_off, &val);
+ return PARM_GET(p->width, p->shift, val);
+}
+
+static inline void meson_parm_write(struct regmap *map, struct parm *p,
+ unsigned int val)
+{
+ regmap_update_bits(map, p->reg_off, SETPMASK(p->width, p->shift),
+ val << p->shift);
+}
+
+#endif /* __MESON_PARM_H */
+
diff --git a/drivers/clk/meson/sclk-div.c b/drivers/clk/meson/sclk-div.c
index bc64019b8eeb..3acf03780221 100644
--- a/drivers/clk/meson/sclk-div.c
+++ b/drivers/clk/meson/sclk-div.c
@@ -16,7 +16,11 @@
* duty_cycle = (1 + hi) / (1 + val)
*/
-#include "clkc-audio.h"
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+
+#include "clk-regmap.h"
+#include "sclk-div.h"
static inline struct meson_sclk_div_data *
meson_sclk_div_data(struct clk_regmap *clk)
@@ -241,3 +245,7 @@ const struct clk_ops meson_sclk_div_ops = {
.init = sclk_div_init,
};
EXPORT_SYMBOL_GPL(meson_sclk_div_ops);
+
+MODULE_DESCRIPTION("Amlogic Sample divider driver");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/clkc-audio.h b/drivers/clk/meson/sclk-div.h
index 0a7c157ebf81..b64b2a32005f 100644
--- a/drivers/clk/meson/clkc-audio.h
+++ b/drivers/clk/meson/sclk-div.h
@@ -4,16 +4,11 @@
* Author: Jerome Brunet <jbrunet@baylibre.com>
*/
-#ifndef __MESON_CLKC_AUDIO_H
-#define __MESON_CLKC_AUDIO_H
+#ifndef __MESON_SCLK_DIV_H
+#define __MESON_SCLK_DIV_H
-#include "clkc.h"
-
-struct meson_clk_triphase_data {
- struct parm ph0;
- struct parm ph1;
- struct parm ph2;
-};
+#include <linux/clk-provider.h>
+#include "parm.h"
struct meson_sclk_div_data {
struct parm div;
@@ -22,7 +17,6 @@ struct meson_sclk_div_data {
struct clk_duty cached_duty;
};
-extern const struct clk_ops meson_clk_triphase_ops;
extern const struct clk_ops meson_sclk_div_ops;
-#endif /* __MESON_CLKC_AUDIO_H */
+#endif /* __MESON_SCLK_DIV_H */
diff --git a/drivers/clk/meson/vid-pll-div.c b/drivers/clk/meson/vid-pll-div.c
index 88af0e282ea0..daff235bc763 100644
--- a/drivers/clk/meson/vid-pll-div.c
+++ b/drivers/clk/meson/vid-pll-div.c
@@ -5,7 +5,10 @@
*/
#include <linux/clk-provider.h>
-#include "clkc.h"
+#include <linux/module.h>
+
+#include "clk-regmap.h"
+#include "vid-pll-div.h"
static inline struct meson_vid_pll_div_data *
meson_vid_pll_div_data(struct clk_regmap *clk)
@@ -79,8 +82,8 @@ static unsigned long meson_vid_pll_div_recalc_rate(struct clk_hw *hw,
div = _get_table_val(meson_parm_read(clk->map, &pll_div->val),
meson_parm_read(clk->map, &pll_div->sel));
if (!div || !div->divider) {
- pr_info("%s: Invalid config value for vid_pll_div\n", __func__);
- return parent_rate;
+ pr_debug("%s: Invalid config value for vid_pll_div\n", __func__);
+ return 0;
}
return DIV_ROUND_UP_ULL(parent_rate * div->multiplier, div->divider);
@@ -89,3 +92,8 @@ static unsigned long meson_vid_pll_div_recalc_rate(struct clk_hw *hw,
const struct clk_ops meson_vid_pll_div_ro_ops = {
.recalc_rate = meson_vid_pll_div_recalc_rate,
};
+EXPORT_SYMBOL_GPL(meson_vid_pll_div_ro_ops);
+
+MODULE_DESCRIPTION("Amlogic video pll divider driver");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/vid-pll-div.h b/drivers/clk/meson/vid-pll-div.h
new file mode 100644
index 000000000000..c0128e33ccf9
--- /dev/null
+++ b/drivers/clk/meson/vid-pll-div.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef __MESON_VID_PLL_DIV_H
+#define __MESON_VID_PLL_DIV_H
+
+#include <linux/clk-provider.h>
+#include "parm.h"
+
+struct meson_vid_pll_div_data {
+ struct parm val;
+ struct parm sel;
+};
+
+extern const struct clk_ops meson_vid_pll_div_ro_ops;
+
+#endif /* __MESON_VID_PLL_DIV_H */
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
index d083b860f083..a60a1be937ad 100644
--- a/drivers/clk/mmp/clk-of-mmp2.c
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -229,9 +229,10 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = {
{MMP2_CLK_SDH1, "sdh1_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
{MMP2_CLK_SDH2, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
{MMP2_CLK_SDH3, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
- {MMP2_CLK_DISP0, "disp0_clk", "disp0_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1b, 0x1b, 0x0, 0, &disp0_lock},
+ {MMP2_CLK_DISP0, "disp0_clk", "disp0_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x09, 0x09, 0x0, 0, &disp0_lock},
+ {MMP2_CLK_DISP0_LCDC, "disp0_lcdc_clk", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 0x12, 0x12, 0x0, 0, &disp0_lock},
{MMP2_CLK_DISP0_SPHY, "disp0_sphy_clk", "disp0_sphy_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1024, 0x1024, 0x0, 0, &disp0_lock},
- {MMP2_CLK_DISP1, "disp1_clk", "disp1_div", CLK_SET_RATE_PARENT, APMU_DISP1, 0x1b, 0x1b, 0x0, 0, &disp1_lock},
+ {MMP2_CLK_DISP1, "disp1_clk", "disp1_div", CLK_SET_RATE_PARENT, APMU_DISP1, 0x09, 0x09, 0x0, 0, &disp1_lock},
{MMP2_CLK_CCIC_ARBITER, "ccic_arbiter", "vctcxo", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1800, 0x1800, 0x0, 0, &ccic0_lock},
{MMP2_CLK_CCIC0, "ccic0_clk", "ccic0_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1b, 0x1b, 0x0, 0, &ccic0_lock},
{MMP2_CLK_CCIC0_PHY, "ccic0_phy_clk", "ccic0_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x24, 0x24, 0x0, 0, &ccic0_lock},
diff --git a/drivers/clk/mvebu/armada-370.c b/drivers/clk/mvebu/armada-370.c
index 7dedfaa6e152..5c6bbee396b3 100644
--- a/drivers/clk/mvebu/armada-370.c
+++ b/drivers/clk/mvebu/armada-370.c
@@ -175,8 +175,10 @@ static void __init a370_clk_init(struct device_node *np)
mvebu_coreclk_setup(np, &a370_coreclks);
- if (cgnp)
+ if (cgnp) {
mvebu_clk_gating_setup(cgnp, a370_gating_desc);
+ of_node_put(cgnp);
+ }
}
CLK_OF_DECLARE(a370_clk, "marvell,armada-370-core-clock", a370_clk_init);
diff --git a/drivers/clk/mvebu/armada-xp.c b/drivers/clk/mvebu/armada-xp.c
index e8f03293ec83..fa1568279c23 100644
--- a/drivers/clk/mvebu/armada-xp.c
+++ b/drivers/clk/mvebu/armada-xp.c
@@ -226,7 +226,9 @@ static void __init axp_clk_init(struct device_node *np)
mvebu_coreclk_setup(np, &axp_coreclks);
- if (cgnp)
+ if (cgnp) {
mvebu_clk_gating_setup(cgnp, axp_gating_desc);
+ of_node_put(cgnp);
+ }
}
CLK_OF_DECLARE(axp_clk, "marvell,armada-xp-core-clock", axp_clk_init);
diff --git a/drivers/clk/mvebu/dove.c b/drivers/clk/mvebu/dove.c
index e0dd99f36bf4..0bd09d33f9cf 100644
--- a/drivers/clk/mvebu/dove.c
+++ b/drivers/clk/mvebu/dove.c
@@ -188,10 +188,14 @@ static void __init dove_clk_init(struct device_node *np)
mvebu_coreclk_setup(np, &dove_coreclks);
- if (ddnp)
+ if (ddnp) {
dove_divider_clk_init(ddnp);
+ of_node_put(ddnp);
+ }
- if (cgnp)
+ if (cgnp) {
mvebu_clk_gating_setup(cgnp, dove_gating_desc);
+ of_node_put(cgnp);
+ }
}
CLK_OF_DECLARE(dove_clk, "marvell,dove-core-clock", dove_clk_init);
diff --git a/drivers/clk/mvebu/kirkwood.c b/drivers/clk/mvebu/kirkwood.c
index 6f784167bda4..35af3aa18f1c 100644
--- a/drivers/clk/mvebu/kirkwood.c
+++ b/drivers/clk/mvebu/kirkwood.c
@@ -331,6 +331,8 @@ static void __init kirkwood_clk_init(struct device_node *np)
if (cgnp) {
mvebu_clk_gating_setup(cgnp, kirkwood_gating_desc);
kirkwood_clk_muxing_setup(cgnp, kirkwood_mux_desc);
+
+ of_node_put(cgnp);
}
}
CLK_OF_DECLARE(kirkwood_clk, "marvell,kirkwood-core-clock",
diff --git a/drivers/clk/mvebu/mv98dx3236.c b/drivers/clk/mvebu/mv98dx3236.c
index 0a74cf7a7725..1c8ab4f834ba 100644
--- a/drivers/clk/mvebu/mv98dx3236.c
+++ b/drivers/clk/mvebu/mv98dx3236.c
@@ -172,7 +172,9 @@ static void __init mv98dx3236_clk_init(struct device_node *np)
mvebu_coreclk_setup(np, &mv98dx3236_core_clocks);
- if (cgnp)
+ if (cgnp) {
mvebu_clk_gating_setup(cgnp, mv98dx3236_gating_desc);
+ of_node_put(cgnp);
+ }
}
CLK_OF_DECLARE(mv98dx3236_clk, "marvell,mv98dx3236-core-clock", mv98dx3236_clk_init);
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index e5eca8a1abe4..c25b57c3cbc8 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -71,7 +71,6 @@ struct src_sel {
* @freq_tbl: frequency table
* @clkr: regmap clock handle
* @lock: register lock
- *
*/
struct clk_rcg {
u32 ns_reg;
@@ -107,7 +106,6 @@ extern const struct clk_ops clk_rcg_lcc_ops;
* @freq_tbl: frequency table
* @clkr: regmap clock handle
* @lock: register lock
- *
*/
struct clk_dyn_rcg {
u32 ns_reg[2];
@@ -140,7 +138,7 @@ extern const struct clk_ops clk_dyn_rcg_ops;
* @parent_map: map from software's parent index to hardware's src_sel field
* @freq_tbl: frequency table
* @clkr: regmap clock handle
- *
+ * @cfg_off: defines the cfg register offset from the CMD_RCGR + CFG_REG
*/
struct clk_rcg2 {
u32 cmd_rcgr;
@@ -150,6 +148,7 @@ struct clk_rcg2 {
const struct parent_map *parent_map;
const struct freq_tbl *freq_tbl;
struct clk_regmap clkr;
+ u8 cfg_off;
};
#define to_clk_rcg2(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg2, clkr)
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 6e3bd195d012..8c02bffe50df 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -41,6 +41,11 @@
#define N_REG 0xc
#define D_REG 0x10
+#define RCG_CFG_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + CFG_REG)
+#define RCG_M_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + M_REG)
+#define RCG_N_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + N_REG)
+#define RCG_D_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + D_REG)
+
/* Dynamic Frequency Scaling */
#define MAX_PERF_LEVEL 8
#define SE_CMD_DFSR_OFFSET 0x14
@@ -74,7 +79,7 @@ static u8 clk_rcg2_get_parent(struct clk_hw *hw)
u32 cfg;
int i, ret;
- ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+ ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
if (ret)
goto err;
@@ -123,7 +128,7 @@ static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
int ret;
u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
- ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ ret = regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg),
CFG_SRC_SEL_MASK, cfg);
if (ret)
return ret;
@@ -162,13 +167,13 @@ clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask;
- regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+ regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
if (rcg->mnd_width) {
mask = BIT(rcg->mnd_width) - 1;
- regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + M_REG, &m);
+ regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
m &= mask;
- regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + N_REG, &n);
+ regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &n);
n = ~n;
n &= mask;
n += m;
@@ -263,17 +268,17 @@ static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
if (rcg->mnd_width && f->n) {
mask = BIT(rcg->mnd_width) - 1;
ret = regmap_update_bits(rcg->clkr.regmap,
- rcg->cmd_rcgr + M_REG, mask, f->m);
+ RCG_M_OFFSET(rcg), mask, f->m);
if (ret)
return ret;
ret = regmap_update_bits(rcg->clkr.regmap,
- rcg->cmd_rcgr + N_REG, mask, ~(f->n - f->m));
+ RCG_N_OFFSET(rcg), mask, ~(f->n - f->m));
if (ret)
return ret;
ret = regmap_update_bits(rcg->clkr.regmap,
- rcg->cmd_rcgr + D_REG, mask, ~f->n);
+ RCG_D_OFFSET(rcg), mask, ~f->n);
if (ret)
return ret;
}
@@ -284,8 +289,7 @@ static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
if (rcg->mnd_width && f->n && (f->m != f->n))
cfg |= CFG_MODE_DUAL_EDGE;
-
- return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ return regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg),
mask, cfg);
}
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index 9f4fc7773fb2..c3fd632af119 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -18,6 +18,31 @@
#define CLK_RPMH_ARC_EN_OFFSET 0
#define CLK_RPMH_VRM_EN_OFFSET 4
+#define BCM_TCS_CMD_COMMIT_MASK 0x40000000
+#define BCM_TCS_CMD_VALID_SHIFT 29
+#define BCM_TCS_CMD_VOTE_MASK 0x3fff
+#define BCM_TCS_CMD_VOTE_SHIFT 0
+
+#define BCM_TCS_CMD(valid, vote) \
+ (BCM_TCS_CMD_COMMIT_MASK | \
+ ((valid) << BCM_TCS_CMD_VALID_SHIFT) | \
+ ((vote & BCM_TCS_CMD_VOTE_MASK) \
+ << BCM_TCS_CMD_VOTE_SHIFT))
+
+/**
+ * struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager(BCM)
+ * @unit: divisor used to convert Hz value to an RPMh msg
+ * @width: multiplier used to convert Hz value to an RPMh msg
+ * @vcd: virtual clock domain that this bcm belongs to
+ * @reserved: reserved to pad the struct
+ */
+struct bcm_db {
+ __le32 unit;
+ __le16 width;
+ u8 vcd;
+ u8 reserved;
+};
+
/**
* struct clk_rpmh - individual rpmh clock data structure
* @hw: handle between common and hardware-specific interfaces
@@ -29,6 +54,7 @@
* @aggr_state: rpmh clock aggregated state
* @last_sent_aggr_state: rpmh clock last aggr state sent to RPMh
* @valid_state_mask: mask to determine the state of the rpmh clock
+ * @unit: divisor to convert rate to rpmh msg in magnitudes of Khz
* @dev: device to which it is attached
* @peer: pointer to the clock rpmh sibling
*/
@@ -42,6 +68,7 @@ struct clk_rpmh {
u32 aggr_state;
u32 last_sent_aggr_state;
u32 valid_state_mask;
+ u32 unit;
struct device *dev;
struct clk_rpmh *peer;
};
@@ -98,6 +125,17 @@ static DEFINE_MUTEX(rpmh_clk_lock);
__DEFINE_CLK_RPMH(_platform, _name, _name_active, _res_name, \
CLK_RPMH_VRM_EN_OFFSET, 1, _div)
+#define DEFINE_CLK_RPMH_BCM(_platform, _name, _res_name) \
+ static struct clk_rpmh _platform##_##_name = { \
+ .res_name = _res_name, \
+ .valid_state_mask = BIT(RPMH_ACTIVE_ONLY_STATE), \
+ .div = 1, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpmh_bcm_ops, \
+ .name = #_name, \
+ }, \
+ }
+
static inline struct clk_rpmh *to_clk_rpmh(struct clk_hw *_hw)
{
return container_of(_hw, struct clk_rpmh, hw);
@@ -210,6 +248,96 @@ static const struct clk_ops clk_rpmh_ops = {
.recalc_rate = clk_rpmh_recalc_rate,
};
+static int clk_rpmh_bcm_send_cmd(struct clk_rpmh *c, bool enable)
+{
+ struct tcs_cmd cmd = { 0 };
+ u32 cmd_state;
+ int ret;
+
+ mutex_lock(&rpmh_clk_lock);
+
+ cmd_state = 0;
+ if (enable) {
+ cmd_state = 1;
+ if (c->aggr_state)
+ cmd_state = c->aggr_state;
+ }
+
+ if (c->last_sent_aggr_state == cmd_state) {
+ mutex_unlock(&rpmh_clk_lock);
+ return 0;
+ }
+
+ cmd.addr = c->res_addr;
+ cmd.data = BCM_TCS_CMD(enable, cmd_state);
+
+ ret = rpmh_write_async(c->dev, RPMH_ACTIVE_ONLY_STATE, &cmd, 1);
+ if (ret) {
+ dev_err(c->dev, "set active state of %s failed: (%d)\n",
+ c->res_name, ret);
+ mutex_unlock(&rpmh_clk_lock);
+ return ret;
+ }
+
+ c->last_sent_aggr_state = cmd_state;
+
+ mutex_unlock(&rpmh_clk_lock);
+
+ return 0;
+}
+
+static int clk_rpmh_bcm_prepare(struct clk_hw *hw)
+{
+ struct clk_rpmh *c = to_clk_rpmh(hw);
+
+ return clk_rpmh_bcm_send_cmd(c, true);
+};
+
+static void clk_rpmh_bcm_unprepare(struct clk_hw *hw)
+{
+ struct clk_rpmh *c = to_clk_rpmh(hw);
+
+ clk_rpmh_bcm_send_cmd(c, false);
+};
+
+static int clk_rpmh_bcm_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rpmh *c = to_clk_rpmh(hw);
+
+ c->aggr_state = rate / c->unit;
+ /*
+ * Since any non-zero value sent to hw would result in enabling the
+ * clock, only send the value if the clock has already been prepared.
+ */
+ if (clk_hw_is_prepared(hw))
+ clk_rpmh_bcm_send_cmd(c, true);
+
+ return 0;
+};
+
+static long clk_rpmh_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ return rate;
+}
+
+static unsigned long clk_rpmh_bcm_recalc_rate(struct clk_hw *hw,
+ unsigned long prate)
+{
+ struct clk_rpmh *c = to_clk_rpmh(hw);
+
+ return c->aggr_state * c->unit;
+}
+
+static const struct clk_ops clk_rpmh_bcm_ops = {
+ .prepare = clk_rpmh_bcm_prepare,
+ .unprepare = clk_rpmh_bcm_unprepare,
+ .set_rate = clk_rpmh_bcm_set_rate,
+ .round_rate = clk_rpmh_round_rate,
+ .recalc_rate = clk_rpmh_bcm_recalc_rate,
+};
+
/* Resource name must match resource id present in cmd-db. */
DEFINE_CLK_RPMH_ARC(sdm845, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 2);
DEFINE_CLK_RPMH_VRM(sdm845, ln_bb_clk2, ln_bb_clk2_ao, "lnbclka2", 2);
@@ -217,6 +345,7 @@ DEFINE_CLK_RPMH_VRM(sdm845, ln_bb_clk3, ln_bb_clk3_ao, "lnbclka3", 2);
DEFINE_CLK_RPMH_VRM(sdm845, rf_clk1, rf_clk1_ao, "rfclka1", 1);
DEFINE_CLK_RPMH_VRM(sdm845, rf_clk2, rf_clk2_ao, "rfclka2", 1);
DEFINE_CLK_RPMH_VRM(sdm845, rf_clk3, rf_clk3_ao, "rfclka3", 1);
+DEFINE_CLK_RPMH_BCM(sdm845, ipa, "IP0");
static struct clk_hw *sdm845_rpmh_clocks[] = {
[RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw,
@@ -231,6 +360,7 @@ static struct clk_hw *sdm845_rpmh_clocks[] = {
[RPMH_RF_CLK2_A] = &sdm845_rf_clk2_ao.hw,
[RPMH_RF_CLK3] = &sdm845_rf_clk3.hw,
[RPMH_RF_CLK3_A] = &sdm845_rf_clk3_ao.hw,
+ [RPMH_IPA_CLK] = &sdm845_ipa.hw,
};
static const struct clk_rpmh_desc clk_rpmh_sdm845 = {
@@ -267,6 +397,8 @@ static int clk_rpmh_probe(struct platform_device *pdev)
for (i = 0; i < desc->num_clks; i++) {
u32 res_addr;
+ size_t aux_data_len;
+ const struct bcm_db *data;
rpmh_clk = to_clk_rpmh(hw_clks[i]);
res_addr = cmd_db_read_addr(rpmh_clk->res_name);
@@ -275,6 +407,20 @@ static int clk_rpmh_probe(struct platform_device *pdev)
rpmh_clk->res_name);
return -ENODEV;
}
+
+ data = cmd_db_read_aux_data(rpmh_clk->res_name, &aux_data_len);
+ if (IS_ERR(data)) {
+ ret = PTR_ERR(data);
+ dev_err(&pdev->dev,
+ "error reading RPMh aux data for %s (%d)\n",
+ rpmh_clk->res_name, ret);
+ return ret;
+ }
+
+ /* Convert unit from Khz to Hz */
+ if (aux_data_len == sizeof(*data))
+ rpmh_clk->unit = le32_to_cpu(data->unit) * 1000ULL;
+
rpmh_clk->res_addr += res_addr;
rpmh_clk->dev = &pdev->dev;
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index d3aadaeb2903..22dd42ad9223 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -655,10 +655,73 @@ static const struct rpm_smd_clk_desc rpm_clk_qcs404 = {
.num_clks = ARRAY_SIZE(qcs404_clks),
};
+/* msm8998 */
+DEFINE_CLK_SMD_RPM(msm8998, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
+DEFINE_CLK_SMD_RPM(msm8998, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2);
+DEFINE_CLK_SMD_RPM(msm8998, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, div_clk1, div_clk1_a, 0xb);
+DEFINE_CLK_SMD_RPM(msm8998, ipa_clk, ipa_a_clk, QCOM_SMD_RPM_IPA_CLK, 0);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, ln_bb_clk1, ln_bb_clk1_a, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, ln_bb_clk2, ln_bb_clk2_a, 2);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, ln_bb_clk3_pin, ln_bb_clk3_a_pin,
+ 3);
+DEFINE_CLK_SMD_RPM(msm8998, mmssnoc_axi_rpm_clk, mmssnoc_axi_rpm_a_clk,
+ QCOM_SMD_RPM_MMAXI_CLK, 0);
+DEFINE_CLK_SMD_RPM(msm8998, aggre1_noc_clk, aggre1_noc_a_clk,
+ QCOM_SMD_RPM_AGGR_CLK, 1);
+DEFINE_CLK_SMD_RPM(msm8998, aggre2_noc_clk, aggre2_noc_a_clk,
+ QCOM_SMD_RPM_AGGR_CLK, 2);
+DEFINE_CLK_SMD_RPM_QDSS(msm8998, qdss_clk, qdss_a_clk,
+ QCOM_SMD_RPM_MISC_CLK, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, rf_clk1, rf_clk1_a, 4);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk2_pin, rf_clk2_a_pin, 5);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, rf_clk3, rf_clk3_a, 6);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk3_pin, rf_clk3_a_pin, 6);
+static struct clk_smd_rpm *msm8998_clks[] = {
+ [RPM_SMD_SNOC_CLK] = &msm8998_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &msm8998_snoc_a_clk,
+ [RPM_SMD_CNOC_CLK] = &msm8998_cnoc_clk,
+ [RPM_SMD_CNOC_A_CLK] = &msm8998_cnoc_a_clk,
+ [RPM_SMD_CE1_CLK] = &msm8998_ce1_clk,
+ [RPM_SMD_CE1_A_CLK] = &msm8998_ce1_a_clk,
+ [RPM_SMD_DIV_CLK1] = &msm8998_div_clk1,
+ [RPM_SMD_DIV_A_CLK1] = &msm8998_div_clk1_a,
+ [RPM_SMD_IPA_CLK] = &msm8998_ipa_clk,
+ [RPM_SMD_IPA_A_CLK] = &msm8998_ipa_a_clk,
+ [RPM_SMD_LN_BB_CLK1] = &msm8998_ln_bb_clk1,
+ [RPM_SMD_LN_BB_CLK1_A] = &msm8998_ln_bb_clk1_a,
+ [RPM_SMD_LN_BB_CLK2] = &msm8998_ln_bb_clk2,
+ [RPM_SMD_LN_BB_CLK2_A] = &msm8998_ln_bb_clk2_a,
+ [RPM_SMD_LN_BB_CLK3_PIN] = &msm8998_ln_bb_clk3_pin,
+ [RPM_SMD_LN_BB_CLK3_A_PIN] = &msm8998_ln_bb_clk3_a_pin,
+ [RPM_SMD_MMAXI_CLK] = &msm8998_mmssnoc_axi_rpm_clk,
+ [RPM_SMD_MMAXI_A_CLK] = &msm8998_mmssnoc_axi_rpm_a_clk,
+ [RPM_SMD_AGGR1_NOC_CLK] = &msm8998_aggre1_noc_clk,
+ [RPM_SMD_AGGR1_NOC_A_CLK] = &msm8998_aggre1_noc_a_clk,
+ [RPM_SMD_AGGR2_NOC_CLK] = &msm8998_aggre2_noc_clk,
+ [RPM_SMD_AGGR2_NOC_A_CLK] = &msm8998_aggre2_noc_a_clk,
+ [RPM_SMD_QDSS_CLK] = &msm8998_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &msm8998_qdss_a_clk,
+ [RPM_SMD_RF_CLK1] = &msm8998_rf_clk1,
+ [RPM_SMD_RF_CLK1_A] = &msm8998_rf_clk1_a,
+ [RPM_SMD_RF_CLK2_PIN] = &msm8998_rf_clk2_pin,
+ [RPM_SMD_RF_CLK2_A_PIN] = &msm8998_rf_clk2_a_pin,
+ [RPM_SMD_RF_CLK3] = &msm8998_rf_clk3,
+ [RPM_SMD_RF_CLK3_A] = &msm8998_rf_clk3_a,
+ [RPM_SMD_RF_CLK3_PIN] = &msm8998_rf_clk3_pin,
+ [RPM_SMD_RF_CLK3_A_PIN] = &msm8998_rf_clk3_a_pin,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_msm8998 = {
+ .clks = msm8998_clks,
+ .num_clks = ARRAY_SIZE(msm8998_clks),
+};
+
static const struct of_device_id rpm_smd_clk_match_table[] = {
{ .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 },
{ .compatible = "qcom,rpmcc-msm8974", .data = &rpm_clk_msm8974 },
{ .compatible = "qcom,rpmcc-msm8996", .data = &rpm_clk_msm8996 },
+ { .compatible = "qcom,rpmcc-msm8998", .data = &rpm_clk_msm8998 },
{ .compatible = "qcom,rpmcc-qcs404", .data = &rpm_clk_qcs404 },
{ }
};
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index 0a48ed56833b..a6b2f86112d8 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -231,6 +231,8 @@ int qcom_cc_really_probe(struct platform_device *pdev,
struct gdsc_desc *scd;
size_t num_clks = desc->num_clks;
struct clk_regmap **rclks = desc->clks;
+ size_t num_clk_hws = desc->num_clk_hws;
+ struct clk_hw **clk_hws = desc->clk_hws;
cc = devm_kzalloc(dev, sizeof(*cc), GFP_KERNEL);
if (!cc)
@@ -269,6 +271,12 @@ int qcom_cc_really_probe(struct platform_device *pdev,
qcom_cc_drop_protected(dev, cc);
+ for (i = 0; i < num_clk_hws; i++) {
+ ret = devm_clk_hw_register(dev, clk_hws[i]);
+ if (ret)
+ return ret;
+ }
+
for (i = 0; i < num_clks; i++) {
if (!rclks[i])
continue;
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index 4aa33ee70bae..1e2a8bdac55a 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -27,6 +27,8 @@ struct qcom_cc_desc {
size_t num_resets;
struct gdsc **gdscs;
size_t num_gdscs;
+ struct clk_hw **clk_hws;
+ size_t num_clk_hws;
};
/**
diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
index 505c6263141d..0e32892b438c 100644
--- a/drivers/clk/qcom/gcc-ipq8074.c
+++ b/drivers/clk/qcom/gcc-ipq8074.c
@@ -4715,18 +4715,12 @@ static const struct qcom_cc_desc gcc_ipq8074_desc = {
.num_clks = ARRAY_SIZE(gcc_ipq8074_clks),
.resets = gcc_ipq8074_resets,
.num_resets = ARRAY_SIZE(gcc_ipq8074_resets),
+ .clk_hws = gcc_ipq8074_hws,
+ .num_clk_hws = ARRAY_SIZE(gcc_ipq8074_hws),
};
static int gcc_ipq8074_probe(struct platform_device *pdev)
{
- int ret, i;
-
- for (i = 0; i < ARRAY_SIZE(gcc_ipq8074_hws); i++) {
- ret = devm_clk_hw_register(&pdev->dev, gcc_ipq8074_hws[i]);
- if (ret)
- return ret;
- }
-
return qcom_cc_probe(pdev, &gcc_ipq8074_desc);
}
diff --git a/drivers/clk/qcom/gcc-mdm9615.c b/drivers/clk/qcom/gcc-mdm9615.c
index 849046fbed6d..8c6d93144b9c 100644
--- a/drivers/clk/qcom/gcc-mdm9615.c
+++ b/drivers/clk/qcom/gcc-mdm9615.c
@@ -1702,6 +1702,8 @@ static const struct qcom_cc_desc gcc_mdm9615_desc = {
.num_clks = ARRAY_SIZE(gcc_mdm9615_clks),
.resets = gcc_mdm9615_resets,
.num_resets = ARRAY_SIZE(gcc_mdm9615_resets),
+ .clk_hws = gcc_mdm9615_hws,
+ .num_clk_hws = ARRAY_SIZE(gcc_mdm9615_hws),
};
static const struct of_device_id gcc_mdm9615_match_table[] = {
@@ -1712,21 +1714,12 @@ MODULE_DEVICE_TABLE(of, gcc_mdm9615_match_table);
static int gcc_mdm9615_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
struct regmap *regmap;
- int ret;
- int i;
regmap = qcom_cc_map(pdev, &gcc_mdm9615_desc);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- for (i = 0; i < ARRAY_SIZE(gcc_mdm9615_hws); i++) {
- ret = devm_clk_hw_register(dev, gcc_mdm9615_hws[i]);
- if (ret)
- return ret;
- }
-
return qcom_cc_really_probe(pdev, &gcc_mdm9615_desc, regmap);
}
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
index 9d136172c27c..4632b9272b7f 100644
--- a/drivers/clk/qcom/gcc-msm8996.c
+++ b/drivers/clk/qcom/gcc-msm8996.c
@@ -3656,6 +3656,8 @@ static const struct qcom_cc_desc gcc_msm8996_desc = {
.num_resets = ARRAY_SIZE(gcc_msm8996_resets),
.gdscs = gcc_msm8996_gdscs,
.num_gdscs = ARRAY_SIZE(gcc_msm8996_gdscs),
+ .clk_hws = gcc_msm8996_hws,
+ .num_clk_hws = ARRAY_SIZE(gcc_msm8996_hws),
};
static const struct of_device_id gcc_msm8996_match_table[] = {
@@ -3666,8 +3668,6 @@ MODULE_DEVICE_TABLE(of, gcc_msm8996_match_table);
static int gcc_msm8996_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- int i, ret;
struct regmap *regmap;
regmap = qcom_cc_map(pdev, &gcc_msm8996_desc);
@@ -3680,12 +3680,6 @@ static int gcc_msm8996_probe(struct platform_device *pdev)
*/
regmap_update_bits(regmap, 0x52008, BIT(21), BIT(21));
- for (i = 0; i < ARRAY_SIZE(gcc_msm8996_hws); i++) {
- ret = devm_clk_hw_register(dev, gcc_msm8996_hws[i]);
- if (ret)
- return ret;
- }
-
return qcom_cc_really_probe(pdev, &gcc_msm8996_desc, regmap);
}
diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
index 1b779396e04f..c240fba794c7 100644
--- a/drivers/clk/qcom/gcc-msm8998.c
+++ b/drivers/clk/qcom/gcc-msm8998.c
@@ -1112,6 +1112,7 @@ static struct clk_rcg2 ufs_axi_clk_src = {
static const struct freq_tbl ftbl_usb30_master_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
F(120000000, P_GPLL0_OUT_MAIN, 5, 0, 0),
F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
{ }
@@ -1189,6 +1190,7 @@ static struct clk_branch gcc_aggre1_ufs_axi_clk = {
"ufs_axi_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1206,6 +1208,7 @@ static struct clk_branch gcc_aggre1_usb3_axi_clk = {
"usb30_master_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1288,6 +1291,7 @@ static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
"blsp1_qup1_i2c_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1305,6 +1309,7 @@ static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
"blsp1_qup1_spi_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1322,6 +1327,7 @@ static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
"blsp1_qup2_i2c_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1339,6 +1345,7 @@ static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
"blsp1_qup2_spi_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1356,6 +1363,7 @@ static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
"blsp1_qup3_i2c_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1373,6 +1381,7 @@ static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
"blsp1_qup3_spi_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1390,6 +1399,7 @@ static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
"blsp1_qup4_i2c_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1407,6 +1417,7 @@ static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
"blsp1_qup4_spi_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1424,6 +1435,7 @@ static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
"blsp1_qup5_i2c_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1441,6 +1453,7 @@ static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
"blsp1_qup5_spi_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1458,6 +1471,7 @@ static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = {
"blsp1_qup6_i2c_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1475,6 +1489,7 @@ static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
"blsp1_qup6_spi_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1505,6 +1520,7 @@ static struct clk_branch gcc_blsp1_uart1_apps_clk = {
"blsp1_uart1_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1522,6 +1538,7 @@ static struct clk_branch gcc_blsp1_uart2_apps_clk = {
"blsp1_uart2_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1539,6 +1556,7 @@ static struct clk_branch gcc_blsp1_uart3_apps_clk = {
"blsp1_uart3_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1569,6 +1587,7 @@ static struct clk_branch gcc_blsp2_qup1_i2c_apps_clk = {
"blsp2_qup1_i2c_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1586,6 +1605,7 @@ static struct clk_branch gcc_blsp2_qup1_spi_apps_clk = {
"blsp2_qup1_spi_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1603,6 +1623,7 @@ static struct clk_branch gcc_blsp2_qup2_i2c_apps_clk = {
"blsp2_qup2_i2c_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1620,6 +1641,7 @@ static struct clk_branch gcc_blsp2_qup2_spi_apps_clk = {
"blsp2_qup2_spi_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1637,6 +1659,7 @@ static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = {
"blsp2_qup3_i2c_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1654,6 +1677,7 @@ static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = {
"blsp2_qup3_spi_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1671,6 +1695,7 @@ static struct clk_branch gcc_blsp2_qup4_i2c_apps_clk = {
"blsp2_qup4_i2c_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1688,6 +1713,7 @@ static struct clk_branch gcc_blsp2_qup4_spi_apps_clk = {
"blsp2_qup4_spi_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1705,6 +1731,7 @@ static struct clk_branch gcc_blsp2_qup5_i2c_apps_clk = {
"blsp2_qup5_i2c_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1722,6 +1749,7 @@ static struct clk_branch gcc_blsp2_qup5_spi_apps_clk = {
"blsp2_qup5_spi_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1739,6 +1767,7 @@ static struct clk_branch gcc_blsp2_qup6_i2c_apps_clk = {
"blsp2_qup6_i2c_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1756,6 +1785,7 @@ static struct clk_branch gcc_blsp2_qup6_spi_apps_clk = {
"blsp2_qup6_spi_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1786,6 +1816,7 @@ static struct clk_branch gcc_blsp2_uart1_apps_clk = {
"blsp2_uart1_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1803,6 +1834,7 @@ static struct clk_branch gcc_blsp2_uart2_apps_clk = {
"blsp2_uart2_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1820,6 +1852,7 @@ static struct clk_branch gcc_blsp2_uart3_apps_clk = {
"blsp2_uart3_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1837,6 +1870,7 @@ static struct clk_branch gcc_cfg_noc_usb3_axi_clk = {
"usb30_master_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1854,6 +1888,7 @@ static struct clk_branch gcc_gp1_clk = {
"gp1_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1871,6 +1906,7 @@ static struct clk_branch gcc_gp2_clk = {
"gp2_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1888,6 +1924,7 @@ static struct clk_branch gcc_gp3_clk = {
"gp3_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1957,6 +1994,7 @@ static struct clk_branch gcc_hmss_ahb_clk = {
"hmss_ahb_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1987,6 +2025,7 @@ static struct clk_branch gcc_hmss_rbcpr_clk = {
"hmss_rbcpr_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2088,6 +2127,7 @@ static struct clk_branch gcc_pcie_0_aux_clk = {
"pcie_aux_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2157,6 +2197,7 @@ static struct clk_branch gcc_pcie_phy_aux_clk = {
"pcie_aux_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2174,6 +2215,7 @@ static struct clk_branch gcc_pdm2_clk = {
"pdm2_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2243,6 +2285,7 @@ static struct clk_branch gcc_sdcc2_apps_clk = {
"sdcc2_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2273,6 +2316,7 @@ static struct clk_branch gcc_sdcc4_apps_clk = {
"sdcc4_apps_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2316,6 +2360,7 @@ static struct clk_branch gcc_tsif_ref_clk = {
"tsif_ref_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2346,6 +2391,7 @@ static struct clk_branch gcc_ufs_axi_clk = {
"ufs_axi_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2441,6 +2487,7 @@ static struct clk_branch gcc_usb30_master_clk = {
"usb30_master_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2458,6 +2505,7 @@ static struct clk_branch gcc_usb30_mock_utmi_clk = {
"usb30_mock_utmi_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2488,6 +2536,7 @@ static struct clk_branch gcc_usb3_phy_aux_clk = {
"usb3_phy_aux_clk_src",
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2495,7 +2544,7 @@ static struct clk_branch gcc_usb3_phy_aux_clk = {
static struct clk_branch gcc_usb3_phy_pipe_clk = {
.halt_reg = 0x50004,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x50004,
.enable_mask = BIT(0),
@@ -2910,6 +2959,10 @@ static const struct regmap_config gcc_msm8998_regmap_config = {
.fast_io = true,
};
+static struct clk_hw *gcc_msm8998_hws[] = {
+ &xo.hw,
+};
+
static const struct qcom_cc_desc gcc_msm8998_desc = {
.config = &gcc_msm8998_regmap_config,
.clks = gcc_msm8998_clocks,
@@ -2918,6 +2971,8 @@ static const struct qcom_cc_desc gcc_msm8998_desc = {
.num_resets = ARRAY_SIZE(gcc_msm8998_resets),
.gdscs = gcc_msm8998_gdscs,
.num_gdscs = ARRAY_SIZE(gcc_msm8998_gdscs),
+ .clk_hws = gcc_msm8998_hws,
+ .num_clk_hws = ARRAY_SIZE(gcc_msm8998_hws),
};
static int gcc_msm8998_probe(struct platform_device *pdev)
@@ -2937,10 +2992,6 @@ static int gcc_msm8998_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = devm_clk_hw_register(&pdev->dev, &xo.hw);
- if (ret)
- return ret;
-
return qcom_cc_really_probe(pdev, &gcc_msm8998_desc, regmap);
}
diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c
index 64da032bb9ed..5a62f64ada93 100644
--- a/drivers/clk/qcom/gcc-qcs404.c
+++ b/drivers/clk/qcom/gcc-qcs404.c
@@ -678,6 +678,7 @@ static struct clk_rcg2 blsp1_uart3_apps_clk_src = {
.cmd_rcgr = 0x4014,
.mnd_width = 16,
.hid_width = 5,
+ .cfg_off = 0x20,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_blsp1_uart0_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
@@ -2692,6 +2693,8 @@ static const struct qcom_cc_desc gcc_qcs404_desc = {
.num_clks = ARRAY_SIZE(gcc_qcs404_clocks),
.resets = gcc_qcs404_resets,
.num_resets = ARRAY_SIZE(gcc_qcs404_resets),
+ .clk_hws = gcc_qcs404_hws,
+ .num_clk_hws = ARRAY_SIZE(gcc_qcs404_hws),
};
static const struct of_device_id gcc_qcs404_match_table[] = {
@@ -2703,7 +2706,6 @@ MODULE_DEVICE_TABLE(of, gcc_qcs404_match_table);
static int gcc_qcs404_probe(struct platform_device *pdev)
{
struct regmap *regmap;
- int ret, i;
regmap = qcom_cc_map(pdev, &gcc_qcs404_desc);
if (IS_ERR(regmap))
@@ -2711,12 +2713,6 @@ static int gcc_qcs404_probe(struct platform_device *pdev)
clk_alpha_pll_configure(&gpll3_out_main, regmap, &gpll3_config);
- for (i = 0; i < ARRAY_SIZE(gcc_qcs404_hws); i++) {
- ret = devm_clk_hw_register(&pdev->dev, gcc_qcs404_hws[i]);
- if (ret)
- return ret;
- }
-
return qcom_cc_really_probe(pdev, &gcc_qcs404_desc, regmap);
}
diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
index ba239ea4c842..8827db23066f 100644
--- a/drivers/clk/qcom/gcc-sdm660.c
+++ b/drivers/clk/qcom/gcc-sdm660.c
@@ -2420,6 +2420,8 @@ static const struct qcom_cc_desc gcc_sdm660_desc = {
.num_resets = ARRAY_SIZE(gcc_sdm660_resets),
.gdscs = gcc_sdm660_gdscs,
.num_gdscs = ARRAY_SIZE(gcc_sdm660_gdscs),
+ .clk_hws = gcc_sdm660_hws,
+ .num_clk_hws = ARRAY_SIZE(gcc_sdm660_hws),
};
static const struct of_device_id gcc_sdm660_match_table[] = {
@@ -2431,7 +2433,7 @@ MODULE_DEVICE_TABLE(of, gcc_sdm660_match_table);
static int gcc_sdm660_probe(struct platform_device *pdev)
{
- int i, ret;
+ int ret;
struct regmap *regmap;
regmap = qcom_cc_map(pdev, &gcc_sdm660_desc);
@@ -2446,13 +2448,6 @@ static int gcc_sdm660_probe(struct platform_device *pdev)
if (ret)
return ret;
- /* Register the hws */
- for (i = 0; i < ARRAY_SIZE(gcc_sdm660_hws); i++) {
- ret = devm_clk_hw_register(&pdev->dev, gcc_sdm660_hws[i]);
- if (ret)
- return ret;
- }
-
return qcom_cc_really_probe(pdev, &gcc_sdm660_desc, regmap);
}
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index 58fa5c247af1..7131dcf9b060 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -1703,6 +1703,9 @@ static struct clk_branch gcc_pcie_0_pipe_clk = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_pipe_clk",
+ .parent_names = (const char *[]){ "pcie_0_pipe_clk" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1802,6 +1805,8 @@ static struct clk_branch gcc_pcie_1_pipe_clk = {
.enable_mask = BIT(30),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_pipe_clk",
+ .parent_names = (const char *[]){ "pcie_1_pipe_clk" },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c
index 7d4ee109435c..7235510eac94 100644
--- a/drivers/clk/qcom/mmcc-msm8996.c
+++ b/drivers/clk/qcom/mmcc-msm8996.c
@@ -3347,6 +3347,8 @@ static const struct qcom_cc_desc mmcc_msm8996_desc = {
.num_resets = ARRAY_SIZE(mmcc_msm8996_resets),
.gdscs = mmcc_msm8996_gdscs,
.num_gdscs = ARRAY_SIZE(mmcc_msm8996_gdscs),
+ .clk_hws = mmcc_msm8996_hws,
+ .num_clk_hws = ARRAY_SIZE(mmcc_msm8996_hws),
};
static const struct of_device_id mmcc_msm8996_match_table[] = {
@@ -3357,8 +3359,6 @@ MODULE_DEVICE_TABLE(of, mmcc_msm8996_match_table);
static int mmcc_msm8996_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- int i, ret;
struct regmap *regmap;
regmap = qcom_cc_map(pdev, &mmcc_msm8996_desc);
@@ -3370,12 +3370,6 @@ static int mmcc_msm8996_probe(struct platform_device *pdev)
/* Disable the NoC FSM for mmss_mmagic_cfg_ahb_clk */
regmap_update_bits(regmap, 0x5054, BIT(15), 0);
- for (i = 0; i < ARRAY_SIZE(mmcc_msm8996_hws); i++) {
- ret = devm_clk_hw_register(dev, mmcc_msm8996_hws[i]);
- if (ret)
- return ret;
- }
-
return qcom_cc_really_probe(pdev, &mmcc_msm8996_desc, regmap);
}
diff --git a/drivers/clk/renesas/r8a774a1-cpg-mssr.c b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
index 10e852518870..4d92b27a6153 100644
--- a/drivers/clk/renesas/r8a774a1-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
@@ -21,7 +21,7 @@
enum clk_ids {
/* Core Clock Outputs exported to DT */
- LAST_DT_CORE_CLK = R8A774A1_CLK_OSC,
+ LAST_DT_CORE_CLK = R8A774A1_CLK_CANFD,
/* External Input Clocks */
CLK_EXTAL,
@@ -102,6 +102,7 @@ static const struct cpg_core_clk r8a774a1_core_clks[] __initconst = {
DEF_FIXED("cp", R8A774A1_CLK_CP, CLK_EXTAL, 2, 1),
DEF_FIXED("cpex", R8A774A1_CLK_CPEX, CLK_EXTAL, 2, 1),
+ DEF_DIV6P1("canfd", R8A774A1_CLK_CANFD, CLK_PLL1_DIV4, 0x244),
DEF_DIV6P1("csi0", R8A774A1_CLK_CSI0, CLK_PLL1_DIV4, 0x00c),
DEF_DIV6P1("mso", R8A774A1_CLK_MSO, CLK_PLL1_DIV4, 0x014),
DEF_DIV6P1("hdmi", R8A774A1_CLK_HDMI, CLK_PLL1_DIV4, 0x250),
@@ -191,6 +192,7 @@ static const struct mssr_mod_clk r8a774a1_mod_clks[] __initconst = {
DEF_MOD("gpio2", 910, R8A774A1_CLK_S3D4),
DEF_MOD("gpio1", 911, R8A774A1_CLK_S3D4),
DEF_MOD("gpio0", 912, R8A774A1_CLK_S3D4),
+ DEF_MOD("can-fd", 914, R8A774A1_CLK_S3D2),
DEF_MOD("can-if1", 915, R8A774A1_CLK_S3D4),
DEF_MOD("can-if0", 916, R8A774A1_CLK_S3D4),
DEF_MOD("i2c6", 918, R8A774A1_CLK_S0D6),
diff --git a/drivers/clk/renesas/r8a774c0-cpg-mssr.c b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
index 10b96895d452..34e274f2a273 100644
--- a/drivers/clk/renesas/r8a774c0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
@@ -22,7 +22,7 @@
enum clk_ids {
/* Core Clock Outputs exported to DT */
- LAST_DT_CORE_CLK = R8A774C0_CLK_CPEX,
+ LAST_DT_CORE_CLK = R8A774C0_CLK_CANFD,
/* External Input Clocks */
CLK_EXTAL,
@@ -33,6 +33,7 @@ enum clk_ids {
CLK_PLL1,
CLK_PLL3,
CLK_PLL0D4,
+ CLK_PLL0D6,
CLK_PLL0D8,
CLK_PLL0D20,
CLK_PLL0D24,
@@ -61,6 +62,7 @@ static const struct cpg_core_clk r8a774c0_core_clks[] __initconst = {
DEF_FIXED(".pll0", CLK_PLL0, CLK_MAIN, 1, 100),
DEF_FIXED(".pll0d4", CLK_PLL0D4, CLK_PLL0, 4, 1),
+ DEF_FIXED(".pll0d6", CLK_PLL0D6, CLK_PLL0, 6, 1),
DEF_FIXED(".pll0d8", CLK_PLL0D8, CLK_PLL0, 8, 1),
DEF_FIXED(".pll0d20", CLK_PLL0D20, CLK_PLL0, 20, 1),
DEF_FIXED(".pll0d24", CLK_PLL0D24, CLK_PLL0, 24, 1),
@@ -112,6 +114,7 @@ static const struct cpg_core_clk r8a774c0_core_clks[] __initconst = {
DEF_GEN3_PE("s3d2c", R8A774C0_CLK_S3D2C, CLK_S3, 2, CLK_PE, 2),
DEF_GEN3_PE("s3d4c", R8A774C0_CLK_S3D4C, CLK_S3, 4, CLK_PE, 4),
+ DEF_DIV6P1("canfd", R8A774C0_CLK_CANFD, CLK_PLL0D6, 0x244),
DEF_DIV6P1("csi0", R8A774C0_CLK_CSI0, CLK_PLL1D2, 0x00c),
DEF_DIV6P1("mso", R8A774C0_CLK_MSO, CLK_PLL1D2, 0x014),
@@ -119,6 +122,11 @@ static const struct cpg_core_clk r8a774c0_core_clks[] __initconst = {
};
static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = {
+ DEF_MOD("tmu4", 121, R8A774C0_CLK_S0D6C),
+ DEF_MOD("tmu3", 122, R8A774C0_CLK_S3D2C),
+ DEF_MOD("tmu2", 123, R8A774C0_CLK_S3D2C),
+ DEF_MOD("tmu1", 124, R8A774C0_CLK_S3D2C),
+ DEF_MOD("tmu0", 125, R8A774C0_CLK_CP),
DEF_MOD("scif5", 202, R8A774C0_CLK_S3D4C),
DEF_MOD("scif4", 203, R8A774C0_CLK_S3D4C),
DEF_MOD("scif3", 204, R8A774C0_CLK_S3D4C),
@@ -172,8 +180,8 @@ static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = {
DEF_MOD("ehci0", 703, R8A774C0_CLK_S3D4),
DEF_MOD("hsusb", 704, R8A774C0_CLK_S3D4),
DEF_MOD("csi40", 716, R8A774C0_CLK_CSI0),
- DEF_MOD("du1", 723, R8A774C0_CLK_S2D1),
- DEF_MOD("du0", 724, R8A774C0_CLK_S2D1),
+ DEF_MOD("du1", 723, R8A774C0_CLK_S1D1),
+ DEF_MOD("du0", 724, R8A774C0_CLK_S1D1),
DEF_MOD("lvds", 727, R8A774C0_CLK_S2D1),
DEF_MOD("vin5", 806, R8A774C0_CLK_S1D2),
@@ -187,6 +195,7 @@ static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = {
DEF_MOD("gpio2", 910, R8A774C0_CLK_S3D4),
DEF_MOD("gpio1", 911, R8A774C0_CLK_S3D4),
DEF_MOD("gpio0", 912, R8A774C0_CLK_S3D4),
+ DEF_MOD("can-fd", 914, R8A774C0_CLK_S3D2),
DEF_MOD("can-if1", 915, R8A774C0_CLK_S3D4),
DEF_MOD("can-if0", 916, R8A774C0_CLK_S3D4),
DEF_MOD("i2c6", 918, R8A774C0_CLK_S3D2),
diff --git a/drivers/clk/renesas/r8a77980-cpg-mssr.c b/drivers/clk/renesas/r8a77980-cpg-mssr.c
index 25a3083b6764..f9e07fcc0d96 100644
--- a/drivers/clk/renesas/r8a77980-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77980-cpg-mssr.c
@@ -41,6 +41,7 @@ enum clk_ids {
CLK_S2,
CLK_S3,
CLK_SDSRC,
+ CLK_RPCSRC,
CLK_OCO,
/* Module Clocks */
@@ -65,8 +66,14 @@ static const struct cpg_core_clk r8a77980_core_clks[] __initconst = {
DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
+ DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1),
DEF_RATE(".oco", CLK_OCO, 32768),
+ DEF_BASE("rpc", R8A77980_CLK_RPC, CLK_TYPE_GEN3_RPC,
+ CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A77980_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2,
+ R8A77980_CLK_RPC),
+
/* Core Clock Outputs */
DEF_FIXED("ztr", R8A77980_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED("ztrd2", R8A77980_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1),
@@ -164,6 +171,7 @@ static const struct mssr_mod_clk r8a77980_mod_clks[] __initconst = {
DEF_MOD("gpio1", 911, R8A77980_CLK_CP),
DEF_MOD("gpio0", 912, R8A77980_CLK_CP),
DEF_MOD("can-fd", 914, R8A77980_CLK_S3D2),
+ DEF_MOD("rpc-if", 917, R8A77980_CLK_RPC),
DEF_MOD("i2c4", 927, R8A77980_CLK_S0D6),
DEF_MOD("i2c3", 928, R8A77980_CLK_S0D6),
DEF_MOD("i2c2", 929, R8A77980_CLK_S3D2),
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index be2ccbd6d623..9a8071a8114d 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -30,6 +30,21 @@
#define CPG_RCKCR_CKSEL BIT(15) /* RCLK Clock Source Select */
+static spinlock_t cpg_lock;
+
+static void cpg_reg_modify(void __iomem *reg, u32 clear, u32 set)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&cpg_lock, flags);
+ val = readl(reg);
+ val &= ~clear;
+ val |= set;
+ writel(val, reg);
+ spin_unlock_irqrestore(&cpg_lock, flags);
+};
+
struct cpg_simple_notifier {
struct notifier_block nb;
void __iomem *reg;
@@ -118,7 +133,6 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
struct cpg_z_clk *zclk = to_z_clk(hw);
unsigned int mult;
unsigned int i;
- u32 val, kick;
/* Factor of 2 is for fixed divider */
mult = DIV_ROUND_CLOSEST_ULL(rate * 32ULL * 2, parent_rate);
@@ -127,17 +141,14 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
return -EBUSY;
- val = readl(zclk->reg) & ~zclk->mask;
- val |= ((32 - mult) << __ffs(zclk->mask)) & zclk->mask;
- writel(val, zclk->reg);
+ cpg_reg_modify(zclk->reg, zclk->mask,
+ ((32 - mult) << __ffs(zclk->mask)) & zclk->mask);
/*
* Set KICK bit in FRQCRB to update hardware setting and wait for
* clock change completion.
*/
- kick = readl(zclk->kick_reg);
- kick |= CPG_FRQCRB_KICK;
- writel(kick, zclk->kick_reg);
+ cpg_reg_modify(zclk->kick_reg, 0, CPG_FRQCRB_KICK);
/*
* Note: There is no HW information about the worst case latency.
@@ -266,12 +277,10 @@ static const struct sd_div_table cpg_sd_div_table[] = {
static int cpg_sd_clock_enable(struct clk_hw *hw)
{
struct sd_clock *clock = to_sd_clock(hw);
- u32 val = readl(clock->csn.reg);
-
- val &= ~(CPG_SD_STP_MASK);
- val |= clock->div_table[clock->cur_div_idx].val & CPG_SD_STP_MASK;
- writel(val, clock->csn.reg);
+ cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK,
+ clock->div_table[clock->cur_div_idx].val &
+ CPG_SD_STP_MASK);
return 0;
}
@@ -280,7 +289,7 @@ static void cpg_sd_clock_disable(struct clk_hw *hw)
{
struct sd_clock *clock = to_sd_clock(hw);
- writel(readl(clock->csn.reg) | CPG_SD_STP_MASK, clock->csn.reg);
+ cpg_reg_modify(clock->csn.reg, 0, CPG_SD_STP_MASK);
}
static int cpg_sd_clock_is_enabled(struct clk_hw *hw)
@@ -327,7 +336,6 @@ static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct sd_clock *clock = to_sd_clock(hw);
unsigned int div = cpg_sd_clock_calc_div(clock, rate, parent_rate);
- u32 val;
unsigned int i;
for (i = 0; i < clock->div_num; i++)
@@ -339,10 +347,9 @@ static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate,
clock->cur_div_idx = i;
- val = readl(clock->csn.reg);
- val &= ~(CPG_SD_STP_MASK | CPG_SD_FC_MASK);
- val |= clock->div_table[i].val & (CPG_SD_STP_MASK | CPG_SD_FC_MASK);
- writel(val, clock->csn.reg);
+ cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK | CPG_SD_FC_MASK,
+ clock->div_table[i].val &
+ (CPG_SD_STP_MASK | CPG_SD_FC_MASK));
return 0;
}
@@ -415,6 +422,92 @@ free_clock:
return clk;
}
+struct rpc_clock {
+ struct clk_divider div;
+ struct clk_gate gate;
+ /*
+ * One notifier covers both RPC and RPCD2 clocks as they are both
+ * controlled by the same RPCCKCR register...
+ */
+ struct cpg_simple_notifier csn;
+};
+
+static const struct clk_div_table cpg_rpcsrc_div_table[] = {
+ { 2, 5 }, { 3, 6 }, { 0, 0 },
+};
+
+static const struct clk_div_table cpg_rpc_div_table[] = {
+ { 1, 2 }, { 3, 4 }, { 5, 6 }, { 7, 8 }, { 0, 0 },
+};
+
+static struct clk * __init cpg_rpc_clk_register(const char *name,
+ void __iomem *base, const char *parent_name,
+ struct raw_notifier_head *notifiers)
+{
+ struct rpc_clock *rpc;
+ struct clk *clk;
+
+ rpc = kzalloc(sizeof(*rpc), GFP_KERNEL);
+ if (!rpc)
+ return ERR_PTR(-ENOMEM);
+
+ rpc->div.reg = base + CPG_RPCCKCR;
+ rpc->div.width = 3;
+ rpc->div.table = cpg_rpc_div_table;
+ rpc->div.lock = &cpg_lock;
+
+ rpc->gate.reg = base + CPG_RPCCKCR;
+ rpc->gate.bit_idx = 8;
+ rpc->gate.flags = CLK_GATE_SET_TO_DISABLE;
+ rpc->gate.lock = &cpg_lock;
+
+ rpc->csn.reg = base + CPG_RPCCKCR;
+
+ clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
+ &rpc->div.hw, &clk_divider_ops,
+ &rpc->gate.hw, &clk_gate_ops, 0);
+ if (IS_ERR(clk)) {
+ kfree(rpc);
+ return clk;
+ }
+
+ cpg_simple_notifier_register(notifiers, &rpc->csn);
+ return clk;
+}
+
+struct rpcd2_clock {
+ struct clk_fixed_factor fixed;
+ struct clk_gate gate;
+};
+
+static struct clk * __init cpg_rpcd2_clk_register(const char *name,
+ void __iomem *base,
+ const char *parent_name)
+{
+ struct rpcd2_clock *rpcd2;
+ struct clk *clk;
+
+ rpcd2 = kzalloc(sizeof(*rpcd2), GFP_KERNEL);
+ if (!rpcd2)
+ return ERR_PTR(-ENOMEM);
+
+ rpcd2->fixed.mult = 1;
+ rpcd2->fixed.div = 2;
+
+ rpcd2->gate.reg = base + CPG_RPCCKCR;
+ rpcd2->gate.bit_idx = 9;
+ rpcd2->gate.flags = CLK_GATE_SET_TO_DISABLE;
+ rpcd2->gate.lock = &cpg_lock;
+
+ clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
+ &rpcd2->fixed.hw, &clk_fixed_factor_ops,
+ &rpcd2->gate.hw, &clk_gate_ops, 0);
+ if (IS_ERR(clk))
+ kfree(rpcd2);
+
+ return clk;
+}
+
static const struct rcar_gen3_cpg_pll_config *cpg_pll_config __initdata;
static unsigned int cpg_clk_extalr __initdata;
@@ -593,6 +686,21 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
}
break;
+ case CLK_TYPE_GEN3_RPCSRC:
+ return clk_register_divider_table(NULL, core->name,
+ __clk_get_name(parent), 0,
+ base + CPG_RPCCKCR, 3, 2, 0,
+ cpg_rpcsrc_div_table,
+ &cpg_lock);
+
+ case CLK_TYPE_GEN3_RPC:
+ return cpg_rpc_clk_register(core->name, base,
+ __clk_get_name(parent), notifiers);
+
+ case CLK_TYPE_GEN3_RPCD2:
+ return cpg_rpcd2_clk_register(core->name, base,
+ __clk_get_name(parent));
+
default:
return ERR_PTR(-EINVAL);
}
@@ -613,5 +721,8 @@ int __init rcar_gen3_cpg_init(const struct rcar_gen3_cpg_pll_config *config,
if (attr)
cpg_quirks = (uintptr_t)attr->data;
pr_debug("%s: mode = 0x%x quirks = 0x%x\n", __func__, mode, cpg_quirks);
+
+ spin_lock_init(&cpg_lock);
+
return 0;
}
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.h b/drivers/clk/renesas/rcar-gen3-cpg.h
index f4fb6cf16688..eac1b057455a 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.h
+++ b/drivers/clk/renesas/rcar-gen3-cpg.h
@@ -23,6 +23,9 @@ enum rcar_gen3_clk_types {
CLK_TYPE_GEN3_Z2,
CLK_TYPE_GEN3_OSC, /* OSC EXTAL predivider and fixed divider */
CLK_TYPE_GEN3_RCKSEL, /* Select parent/divider using RCKCR.CKSEL */
+ CLK_TYPE_GEN3_RPCSRC,
+ CLK_TYPE_GEN3_RPC,
+ CLK_TYPE_GEN3_RPCD2,
/* SoC specific definitions start here */
CLK_TYPE_GEN3_SOC_BASE,
@@ -57,6 +60,7 @@ struct rcar_gen3_cpg_pll_config {
u8 osc_prediv;
};
+#define CPG_RPCCKCR 0x238
#define CPG_RCKCR 0x240
struct clk *rcar_gen3_cpg_clk_register(struct device *dev,
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index 7ea20341e870..5ecf28854876 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -586,12 +586,12 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
COMPOSITE(0, "dclk_lcdc0_src", mux_pll_src_cpll_gpll_p, 0,
RK2928_CLKSEL_CON(27), 0, 1, MFLAGS, 8, 8, DFLAGS,
RK2928_CLKGATE_CON(3), 1, GFLAGS),
- MUX(DCLK_LCDC0, "dclk_lcdc0", mux_rk3066_lcdc0_p, 0,
+ MUX(DCLK_LCDC0, "dclk_lcdc0", mux_rk3066_lcdc0_p, CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(27), 4, 1, MFLAGS),
COMPOSITE(0, "dclk_lcdc1_src", mux_pll_src_cpll_gpll_p, 0,
RK2928_CLKSEL_CON(28), 0, 1, MFLAGS, 8, 8, DFLAGS,
RK2928_CLKGATE_CON(3), 2, GFLAGS),
- MUX(DCLK_LCDC1, "dclk_lcdc1", mux_rk3066_lcdc1_p, 0,
+ MUX(DCLK_LCDC1, "dclk_lcdc1", mux_rk3066_lcdc1_p, CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(28), 4, 1, MFLAGS),
COMPOSITE_NOMUX(0, "cif1_pre", "cif_src", 0,
diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c
index faa94adb2a37..65ab5c2f48b0 100644
--- a/drivers/clk/rockchip/clk-rk3328.c
+++ b/drivers/clk/rockchip/clk-rk3328.c
@@ -78,17 +78,17 @@ static struct rockchip_pll_rate_table rk3328_pll_rates[] = {
static struct rockchip_pll_rate_table rk3328_pll_frac_rates[] = {
/* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */
- RK3036_PLL_RATE(1016064000, 3, 127, 1, 1, 0, 134217),
+ RK3036_PLL_RATE(1016064000, 3, 127, 1, 1, 0, 134218),
/* vco = 1016064000 */
- RK3036_PLL_RATE(983040000, 24, 983, 1, 1, 0, 671088),
+ RK3036_PLL_RATE(983040000, 24, 983, 1, 1, 0, 671089),
/* vco = 983040000 */
- RK3036_PLL_RATE(491520000, 24, 983, 2, 1, 0, 671088),
+ RK3036_PLL_RATE(491520000, 24, 983, 2, 1, 0, 671089),
/* vco = 983040000 */
- RK3036_PLL_RATE(61440000, 6, 215, 7, 2, 0, 671088),
+ RK3036_PLL_RATE(61440000, 6, 215, 7, 2, 0, 671089),
/* vco = 860156000 */
- RK3036_PLL_RATE(56448000, 12, 451, 4, 4, 0, 9797894),
+ RK3036_PLL_RATE(56448000, 12, 451, 4, 4, 0, 9797895),
/* vco = 903168000 */
- RK3036_PLL_RATE(40960000, 12, 409, 4, 5, 0, 10066329),
+ RK3036_PLL_RATE(40960000, 12, 409, 4, 5, 0, 10066330),
/* vco = 819200000 */
{ /* sentinel */ },
};
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 59d4d46667ce..54066e6508d3 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -1028,6 +1028,7 @@ static unsigned long __init exynos4_get_xom(void)
xom = readl(chipid_base + 8);
iounmap(chipid_base);
+ of_node_put(np);
}
return xom;
diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.c b/drivers/clk/samsung/clk-exynos5-subcmu.c
index 93306283d764..8ae44b5db4c2 100644
--- a/drivers/clk/samsung/clk-exynos5-subcmu.c
+++ b/drivers/clk/samsung/clk-exynos5-subcmu.c
@@ -136,15 +136,20 @@ static int __init exynos5_clk_register_subcmu(struct device *parent,
{
struct of_phandle_args genpdspec = { .np = pd_node };
struct platform_device *pdev;
+ int ret;
+
+ pdev = platform_device_alloc("exynos5-subcmu", PLATFORM_DEVID_AUTO);
+ if (!pdev)
+ return -ENOMEM;
- pdev = platform_device_alloc(info->pd_name, -1);
pdev->dev.parent = parent;
- pdev->driver_override = "exynos5-subcmu";
platform_set_drvdata(pdev, (void *)info);
of_genpd_add_device(&genpdspec, &pdev->dev);
- platform_device_add(pdev);
+ ret = platform_device_add(pdev);
+ if (ret)
+ platform_device_put(pdev);
- return 0;
+ return ret;
}
static int __init exynos5_clk_probe(struct platform_device *pdev)
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index 751e2c4fb65b..dae1c96de933 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -559,7 +559,7 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = {
/* ENABLE_ACLK_TOP */
GATE(CLK_ACLK_G3D_400, "aclk_g3d_400", "div_aclk_g3d_400",
ENABLE_ACLK_TOP, 30, CLK_IS_CRITICAL, 0),
- GATE(CLK_ACLK_IMEM_SSX_266, "aclk_imem_ssx_266",
+ GATE(CLK_ACLK_IMEM_SSSX_266, "aclk_imem_sssx_266",
"div_aclk_imem_sssx_266", ENABLE_ACLK_TOP,
29, CLK_IGNORE_UNUSED, 0),
GATE(CLK_ACLK_BUS0_400, "aclk_bus0_400", "div_aclk_bus0_400",
@@ -568,10 +568,10 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = {
GATE(CLK_ACLK_BUS1_400, "aclk_bus1_400", "div_aclk_bus1_400",
ENABLE_ACLK_TOP, 25,
CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
- GATE(CLK_ACLK_IMEM_200, "aclk_imem_200", "div_aclk_imem_266",
+ GATE(CLK_ACLK_IMEM_200, "aclk_imem_200", "div_aclk_imem_200",
ENABLE_ACLK_TOP, 24,
CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
- GATE(CLK_ACLK_IMEM_266, "aclk_imem_266", "div_aclk_imem_200",
+ GATE(CLK_ACLK_IMEM_266, "aclk_imem_266", "div_aclk_imem_266",
ENABLE_ACLK_TOP, 23,
CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
GATE(CLK_ACLK_PERIC_66, "aclk_peric_66", "div_aclk_peric_66_b",
@@ -5467,6 +5467,35 @@ static const struct samsung_cmu_info cam1_cmu_info __initconst = {
.clk_name = "aclk_cam1_400",
};
+/*
+ * Register offset definitions for CMU_IMEM
+ */
+#define ENABLE_ACLK_IMEM_SLIMSSS 0x080c
+#define ENABLE_PCLK_IMEM_SLIMSSS 0x0908
+
+static const unsigned long imem_clk_regs[] __initconst = {
+ ENABLE_ACLK_IMEM_SLIMSSS,
+ ENABLE_PCLK_IMEM_SLIMSSS,
+};
+
+static const struct samsung_gate_clock imem_gate_clks[] __initconst = {
+ /* ENABLE_ACLK_IMEM_SLIMSSS */
+ GATE(CLK_ACLK_SLIMSSS, "aclk_slimsss", "aclk_imem_sssx_266",
+ ENABLE_ACLK_IMEM_SLIMSSS, 0, CLK_IGNORE_UNUSED, 0),
+
+ /* ENABLE_PCLK_IMEM_SLIMSSS */
+ GATE(CLK_PCLK_SLIMSSS, "pclk_slimsss", "aclk_imem_200",
+ ENABLE_PCLK_IMEM_SLIMSSS, 0, CLK_IGNORE_UNUSED, 0),
+};
+
+static const struct samsung_cmu_info imem_cmu_info __initconst = {
+ .gate_clks = imem_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(imem_gate_clks),
+ .nr_clk_ids = IMEM_NR_CLK,
+ .clk_regs = imem_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(imem_clk_regs),
+ .clk_name = "aclk_imem_200",
+};
struct exynos5433_cmu_data {
struct samsung_clk_reg_dump *clk_save;
@@ -5655,6 +5684,9 @@ static const struct of_device_id exynos5433_cmu_of_match[] = {
.compatible = "samsung,exynos5433-cmu-mscl",
.data = &mscl_cmu_info,
}, {
+ .compatible = "samsung,exynos5433-cmu-imem",
+ .data = &imem_cmu_info,
+ }, {
},
};
diff --git a/drivers/clk/samsung/clk-s3c2443.c b/drivers/clk/samsung/clk-s3c2443.c
index 884067e4f1a1..f38f0e24e3b6 100644
--- a/drivers/clk/samsung/clk-s3c2443.c
+++ b/drivers/clk/samsung/clk-s3c2443.c
@@ -389,7 +389,7 @@ void __init s3c2443_common_clk_init(struct device_node *np, unsigned long xti_f,
ARRAY_SIZE(s3c2450_gates));
samsung_clk_register_alias(ctx, s3c2450_aliases,
ARRAY_SIZE(s3c2450_aliases));
- /* fall through, as s3c2450 extends the s3c2416 clocks */
+ /* fall through - as s3c2450 extends the s3c2416 clocks */
case S3C2416:
samsung_clk_register_div(ctx, s3c2416_dividers,
ARRAY_SIZE(s3c2416_dividers));
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
index c3f309d7100d..9cfaca5fbcdb 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
@@ -26,7 +26,7 @@ struct samsung_clk_provider {
void __iomem *reg_base;
struct device *dev;
spinlock_t lock;
- /* clk_data must be the last entry due to variable lenght 'hws' array */
+ /* clk_data must be the last entry due to variable length 'hws' array */
struct clk_hw_onecell_data clk_data;
};
diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
index aa7a6e6a15b6..73e03328d5c5 100644
--- a/drivers/clk/socfpga/clk-gate.c
+++ b/drivers/clk/socfpga/clk-gate.c
@@ -176,8 +176,7 @@ static struct clk_ops gateclk_ops = {
.set_parent = socfpga_clk_set_parent,
};
-static void __init __socfpga_gate_init(struct device_node *node,
- const struct clk_ops *ops)
+void __init socfpga_gate_init(struct device_node *node)
{
u32 clk_gate[2];
u32 div_reg[3];
@@ -188,12 +187,17 @@ static void __init __socfpga_gate_init(struct device_node *node,
const char *clk_name = node->name;
const char *parent_name[SOCFPGA_MAX_PARENTS];
struct clk_init_data init;
+ struct clk_ops *ops;
int rc;
socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL);
if (WARN_ON(!socfpga_clk))
return;
+ ops = kmemdup(&gateclk_ops, sizeof(gateclk_ops), GFP_KERNEL);
+ if (WARN_ON(!ops))
+ return;
+
rc = of_property_read_u32_array(node, "clk-gate", clk_gate, 2);
if (rc)
clk_gate[0] = 0;
@@ -202,8 +206,8 @@ static void __init __socfpga_gate_init(struct device_node *node,
socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
socfpga_clk->hw.bit_idx = clk_gate[1];
- gateclk_ops.enable = clk_gate_ops.enable;
- gateclk_ops.disable = clk_gate_ops.disable;
+ ops->enable = clk_gate_ops.enable;
+ ops->disable = clk_gate_ops.disable;
}
rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
@@ -234,6 +238,11 @@ static void __init __socfpga_gate_init(struct device_node *node,
init.flags = 0;
init.num_parents = of_clk_parent_fill(node, parent_name, SOCFPGA_MAX_PARENTS);
+ if (init.num_parents < 2) {
+ ops->get_parent = NULL;
+ ops->set_parent = NULL;
+ }
+
init.parent_names = parent_name;
socfpga_clk->hw.hw.init = &init;
@@ -246,8 +255,3 @@ static void __init __socfpga_gate_init(struct device_node *node,
if (WARN_ON(rc))
return;
}
-
-void __init socfpga_gate_init(struct device_node *node)
-{
- __socfpga_gate_init(node, &gateclk_ops);
-}
diff --git a/drivers/clk/socfpga/clk-pll-a10.c b/drivers/clk/socfpga/clk-pll-a10.c
index 35fabe1a32c3..269467e8e07e 100644
--- a/drivers/clk/socfpga/clk-pll-a10.c
+++ b/drivers/clk/socfpga/clk-pll-a10.c
@@ -95,6 +95,7 @@ static struct clk * __init __socfpga_pll_init(struct device_node *node,
clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr");
clk_mgr_a10_base_addr = of_iomap(clkmgr_np, 0);
+ of_node_put(clkmgr_np);
BUG_ON(!clk_mgr_a10_base_addr);
pll_clk->hw.reg = clk_mgr_a10_base_addr + reg;
diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
index c7f463172e4b..b4b44e9b5901 100644
--- a/drivers/clk/socfpga/clk-pll.c
+++ b/drivers/clk/socfpga/clk-pll.c
@@ -100,6 +100,7 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr");
clk_mgr_base_addr = of_iomap(clkmgr_np, 0);
+ of_node_put(clkmgr_np);
BUG_ON(!clk_mgr_base_addr);
pll_clk->hw.reg = clk_mgr_base_addr + reg;
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
index a4fa2945f230..4b5f8f4e4ab8 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
@@ -144,7 +144,7 @@ static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_mipi_clk, "pll-mipi",
8, 4, /* N */
4, 2, /* K */
0, 4, /* M */
- BIT(31), /* gate */
+ BIT(31) | BIT(23) | BIT(22), /* gate */
BIT(28), /* lock */
CLK_SET_RATE_UNGATE);
diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c
index 9b49adb20d07..cbcdf664f336 100644
--- a/drivers/clk/sunxi-ng/ccu_nkmp.c
+++ b/drivers/clk/sunxi-ng/ccu_nkmp.c
@@ -167,7 +167,7 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
- u32 n_mask, k_mask, m_mask, p_mask;
+ u32 n_mask = 0, k_mask = 0, m_mask = 0, p_mask = 0;
struct _ccu_nkmp _nkmp;
unsigned long flags;
u32 reg;
@@ -186,10 +186,24 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
ccu_nkmp_find_best(parent_rate, rate, &_nkmp);
- n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1, nkmp->n.shift);
- k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1, nkmp->k.shift);
- m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1, nkmp->m.shift);
- p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1, nkmp->p.shift);
+ /*
+ * If width is 0, GENMASK() macro may not generate expected mask (0)
+ * as it falls under undefined behaviour by C standard due to shifts
+ * which are equal or greater than width of left operand. This can
+ * be easily avoided by explicitly checking if width is 0.
+ */
+ if (nkmp->n.width)
+ n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1,
+ nkmp->n.shift);
+ if (nkmp->k.width)
+ k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1,
+ nkmp->k.shift);
+ if (nkmp->m.width)
+ m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1,
+ nkmp->m.shift);
+ if (nkmp->p.width)
+ p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1,
+ nkmp->p.shift);
spin_lock_irqsave(nkmp->common.lock, flags);
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index 0400e5b1d627..1fc71baae13b 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -1293,8 +1293,8 @@ static int attr_enable_set(void *data, u64 val)
return val ? dfll_enable(td) : dfll_disable(td);
}
-DEFINE_SIMPLE_ATTRIBUTE(enable_fops, attr_enable_get, attr_enable_set,
- "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(enable_fops, attr_enable_get, attr_enable_set,
+ "%llu\n");
static int attr_lock_get(void *data, u64 *val)
{
@@ -1310,8 +1310,7 @@ static int attr_lock_set(void *data, u64 val)
return val ? dfll_lock(td) : dfll_unlock(td);
}
-DEFINE_SIMPLE_ATTRIBUTE(lock_fops, attr_lock_get, attr_lock_set,
- "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(lock_fops, attr_lock_get, attr_lock_set, "%llu\n");
static int attr_rate_get(void *data, u64 *val)
{
@@ -1328,7 +1327,7 @@ static int attr_rate_set(void *data, u64 val)
return dfll_request_rate(td, val);
}
-DEFINE_SIMPLE_ATTRIBUTE(rate_fops, attr_rate_get, attr_rate_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(rate_fops, attr_rate_get, attr_rate_set, "%llu\n");
static int attr_registers_show(struct seq_file *s, void *data)
{
@@ -1379,10 +1378,11 @@ static void dfll_debug_init(struct tegra_dfll *td)
root = debugfs_create_dir("tegra_dfll_fcpu", NULL);
td->debugfs_dir = root;
- debugfs_create_file("enable", S_IRUGO | S_IWUSR, root, td, &enable_fops);
- debugfs_create_file("lock", S_IRUGO, root, td, &lock_fops);
- debugfs_create_file("rate", S_IRUGO, root, td, &rate_fops);
- debugfs_create_file("registers", S_IRUGO, root, td, &attr_registers_fops);
+ debugfs_create_file_unsafe("enable", 0644, root, td,
+ &enable_fops);
+ debugfs_create_file_unsafe("lock", 0444, root, td, &lock_fops);
+ debugfs_create_file_unsafe("rate", 0444, root, td, &rate_fops);
+ debugfs_create_file("registers", 0444, root, td, &attr_registers_fops);
}
#else
diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c
index 688e403333b9..0c210984765a 100644
--- a/drivers/clk/ti/adpll.c
+++ b/drivers/clk/ti/adpll.c
@@ -614,7 +614,7 @@ static int ti_adpll_init_clkout(struct ti_adpll_data *d,
init.name = child_name;
init.ops = ops;
- init.flags = CLK_IS_BASIC;
+ init.flags = 0;
co->hw.init = &init;
parent_names[0] = __clk_get_name(clk0);
parent_names[1] = __clk_get_name(clk1);
diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c
index 222f68bc3f2a..015a657d3382 100644
--- a/drivers/clk/ti/apll.c
+++ b/drivers/clk/ti/apll.c
@@ -165,7 +165,7 @@ static void __init omap_clk_register_apll(void *user,
ad->clk_bypass = __clk_get_hw(clk);
- clk = ti_clk_register(NULL, &clk_hw->hw, node->name);
+ clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, node->name);
if (!IS_ERR(clk)) {
of_clk_add_provider(node, of_clk_src_simple_get, clk);
kfree(clk_hw->hw.init->parent_names);
@@ -402,7 +402,7 @@ static void __init of_omap2_apll_setup(struct device_node *node)
if (ret)
goto cleanup;
- clk = clk_register(NULL, &clk_hw->hw);
+ clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, node->name);
if (!IS_ERR(clk)) {
of_clk_add_provider(node, of_clk_src_simple_get, clk);
kfree(init);
diff --git a/drivers/clk/ti/autoidle.c b/drivers/clk/ti/autoidle.c
index 7bb9afbe4058..1cae226759dd 100644
--- a/drivers/clk/ti/autoidle.c
+++ b/drivers/clk/ti/autoidle.c
@@ -35,7 +35,44 @@ struct clk_ti_autoidle {
#define AUTOIDLE_LOW 0x1
static LIST_HEAD(autoidle_clks);
-static LIST_HEAD(clk_hw_omap_clocks);
+
+/*
+ * we have some non-atomic read/write
+ * operations behind it, so lets
+ * take one lock for handling autoidle
+ * of all clocks
+ */
+static DEFINE_SPINLOCK(autoidle_spinlock);
+
+static int _omap2_clk_deny_idle(struct clk_hw_omap *clk)
+{
+ if (clk->ops && clk->ops->deny_idle) {
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&autoidle_spinlock, irqflags);
+ clk->autoidle_count++;
+ if (clk->autoidle_count == 1)
+ clk->ops->deny_idle(clk);
+
+ spin_unlock_irqrestore(&autoidle_spinlock, irqflags);
+ }
+ return 0;
+}
+
+static int _omap2_clk_allow_idle(struct clk_hw_omap *clk)
+{
+ if (clk->ops && clk->ops->allow_idle) {
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&autoidle_spinlock, irqflags);
+ clk->autoidle_count--;
+ if (clk->autoidle_count == 0)
+ clk->ops->allow_idle(clk);
+
+ spin_unlock_irqrestore(&autoidle_spinlock, irqflags);
+ }
+ return 0;
+}
/**
* omap2_clk_deny_idle - disable autoidle on an OMAP clock
@@ -45,12 +82,15 @@ static LIST_HEAD(clk_hw_omap_clocks);
*/
int omap2_clk_deny_idle(struct clk *clk)
{
- struct clk_hw_omap *c;
+ struct clk_hw *hw = __clk_get_hw(clk);
- c = to_clk_hw_omap(__clk_get_hw(clk));
- if (c->ops && c->ops->deny_idle)
- c->ops->deny_idle(c);
- return 0;
+ if (omap2_clk_is_hw_omap(hw)) {
+ struct clk_hw_omap *c = to_clk_hw_omap(hw);
+
+ return _omap2_clk_deny_idle(c);
+ }
+
+ return -EINVAL;
}
/**
@@ -61,12 +101,15 @@ int omap2_clk_deny_idle(struct clk *clk)
*/
int omap2_clk_allow_idle(struct clk *clk)
{
- struct clk_hw_omap *c;
+ struct clk_hw *hw = __clk_get_hw(clk);
- c = to_clk_hw_omap(__clk_get_hw(clk));
- if (c->ops && c->ops->allow_idle)
- c->ops->allow_idle(c);
- return 0;
+ if (omap2_clk_is_hw_omap(hw)) {
+ struct clk_hw_omap *c = to_clk_hw_omap(hw);
+
+ return _omap2_clk_allow_idle(c);
+ }
+
+ return -EINVAL;
}
static void _allow_autoidle(struct clk_ti_autoidle *clk)
@@ -168,26 +211,6 @@ int __init of_ti_clk_autoidle_setup(struct device_node *node)
}
/**
- * omap2_init_clk_hw_omap_clocks - initialize an OMAP clock
- * @hw: struct clk_hw * to initialize
- *
- * Add an OMAP clock @clk to the internal list of OMAP clocks. Used
- * temporarily for autoidle handling, until this support can be
- * integrated into the common clock framework code in some way. No
- * return value.
- */
-void omap2_init_clk_hw_omap_clocks(struct clk_hw *hw)
-{
- struct clk_hw_omap *c;
-
- if (clk_hw_get_flags(hw) & CLK_IS_BASIC)
- return;
-
- c = to_clk_hw_omap(hw);
- list_add(&c->node, &clk_hw_omap_clocks);
-}
-
-/**
* omap2_clk_enable_autoidle_all - enable autoidle on all OMAP clocks that
* support it
*
@@ -198,11 +221,11 @@ void omap2_init_clk_hw_omap_clocks(struct clk_hw *hw)
*/
int omap2_clk_enable_autoidle_all(void)
{
- struct clk_hw_omap *c;
+ int ret;
- list_for_each_entry(c, &clk_hw_omap_clocks, node)
- if (c->ops && c->ops->allow_idle)
- c->ops->allow_idle(c);
+ ret = omap2_clk_for_each(_omap2_clk_allow_idle);
+ if (ret)
+ return ret;
_clk_generic_allow_autoidle_all();
@@ -220,11 +243,11 @@ int omap2_clk_enable_autoidle_all(void)
*/
int omap2_clk_disable_autoidle_all(void)
{
- struct clk_hw_omap *c;
+ int ret;
- list_for_each_entry(c, &clk_hw_omap_clocks, node)
- if (c->ops && c->ops->deny_idle)
- c->ops->deny_idle(c);
+ ret = omap2_clk_for_each(_omap2_clk_deny_idle);
+ if (ret)
+ return ret;
_clk_generic_deny_autoidle_all();
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index d0cd58534781..ba17cc5bd04b 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -31,6 +31,7 @@
#undef pr_fmt
#define pr_fmt(fmt) "%s: " fmt, __func__
+static LIST_HEAD(clk_hw_omap_clocks);
struct ti_clk_ll_ops *ti_clk_ll_ops;
static struct device_node *clocks_node_ptr[CLK_MAX_MEMMAPS];
@@ -191,9 +192,13 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[])
clkdev_add(&c->lk);
} else {
if (num_args && !has_clkctrl_data) {
- if (of_find_compatible_node(NULL, NULL,
- "ti,clkctrl")) {
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL,
+ "ti,clkctrl");
+ if (np) {
has_clkctrl_data = true;
+ of_node_put(np);
} else {
clkctrl_nodes_missing = true;
@@ -351,6 +356,9 @@ void __init omap2_clk_legacy_provider_init(int index, void __iomem *mem)
struct clk_iomap *io;
io = memblock_alloc(sizeof(*io), SMP_CACHE_BYTES);
+ if (!io)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(*io));
io->mem = mem;
@@ -517,3 +525,74 @@ struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw,
return clk;
}
+
+/**
+ * ti_clk_register_omap_hw - register a clk_hw_omap to the clock framework
+ * @dev: device for this clock
+ * @hw: hardware clock handle
+ * @con: connection ID for this clock
+ *
+ * Registers a clk_hw_omap clock to the clock framewor, adds a clock alias
+ * for it, and adds the list to the available clk_hw_omap type clocks.
+ * Returns a handle to the registered clock if successful, ERR_PTR value
+ * in failure.
+ */
+struct clk *ti_clk_register_omap_hw(struct device *dev, struct clk_hw *hw,
+ const char *con)
+{
+ struct clk *clk;
+ struct clk_hw_omap *oclk;
+
+ clk = ti_clk_register(dev, hw, con);
+ if (IS_ERR(clk))
+ return clk;
+
+ oclk = to_clk_hw_omap(hw);
+
+ list_add(&oclk->node, &clk_hw_omap_clocks);
+
+ return clk;
+}
+
+/**
+ * omap2_clk_for_each - call function for each registered clk_hw_omap
+ * @fn: pointer to a callback function
+ *
+ * Call @fn for each registered clk_hw_omap, passing @hw to each
+ * function. @fn must return 0 for success or any other value for
+ * failure. If @fn returns non-zero, the iteration across clocks
+ * will stop and the non-zero return value will be passed to the
+ * caller of omap2_clk_for_each().
+ */
+int omap2_clk_for_each(int (*fn)(struct clk_hw_omap *hw))
+{
+ int ret;
+ struct clk_hw_omap *hw;
+
+ list_for_each_entry(hw, &clk_hw_omap_clocks, node) {
+ ret = (*fn)(hw);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * omap2_clk_is_hw_omap - check if the provided clk_hw is OMAP clock
+ * @hw: clk_hw to check if it is an omap clock or not
+ *
+ * Checks if the provided clk_hw is OMAP clock or not. Returns true if
+ * it is, false otherwise.
+ */
+bool omap2_clk_is_hw_omap(struct clk_hw *hw)
+{
+ struct clk_hw_omap *oclk;
+
+ list_for_each_entry(oclk, &clk_hw_omap_clocks, node) {
+ if (&oclk->hw == hw)
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
index 40630eb950fc..639f515e08f0 100644
--- a/drivers/clk/ti/clkctrl.c
+++ b/drivers/clk/ti/clkctrl.c
@@ -276,7 +276,7 @@ _ti_clkctrl_clk_register(struct omap_clkctrl_provider *provider,
init.parent_names = parents;
init.num_parents = num_parents;
init.ops = ops;
- init.flags = CLK_IS_BASIC;
+ init.flags = 0;
clk = ti_clk_register(NULL, clk_hw, init.name);
if (IS_ERR_OR_NULL(clk)) {
@@ -530,7 +530,7 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
* Create default clkdm name, replace _cm from end of parent
* node name with _clkdm
*/
- provider->clkdm_name[strlen(provider->clkdm_name) - 5] = 0;
+ provider->clkdm_name[strlen(provider->clkdm_name) - 2] = 0;
} else {
provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFn", node);
if (!provider->clkdm_name) {
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
index 9f312a219510..1c0fac59d809 100644
--- a/drivers/clk/ti/clock.h
+++ b/drivers/clk/ti/clock.h
@@ -203,6 +203,8 @@ typedef void (*ti_of_clk_init_cb_t)(void *, struct device_node *);
struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw,
const char *con);
+struct clk *ti_clk_register_omap_hw(struct device *dev, struct clk_hw *hw,
+ const char *con);
int ti_clk_add_alias(struct device *dev, struct clk *clk, const char *con);
void ti_clk_add_aliases(void);
@@ -221,7 +223,6 @@ int ti_clk_retry_init(struct device_node *node, void *user,
ti_of_clk_init_cb_t func);
int ti_clk_add_component(struct device_node *node, struct clk_hw *hw, int type);
-void omap2_init_clk_hw_omap_clocks(struct clk_hw *hw);
int of_ti_clk_autoidle_setup(struct device_node *node);
void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks);
@@ -301,6 +302,8 @@ long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
unsigned long *parent_rate);
int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req);
+int omap2_clk_for_each(int (*fn)(struct clk_hw_omap *hw));
+bool omap2_clk_is_hw_omap(struct clk_hw *hw);
extern struct ti_clk_ll_ops *ti_clk_ll_ops;
diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c
index 07a805125e98..423a99b9f10c 100644
--- a/drivers/clk/ti/clockdomain.c
+++ b/drivers/clk/ti/clockdomain.c
@@ -143,7 +143,7 @@ static void __init of_ti_clockdomain_setup(struct device_node *node)
continue;
}
clk_hw = __clk_get_hw(clk);
- if (clk_hw_get_flags(clk_hw) & CLK_IS_BASIC) {
+ if (!omap2_clk_is_hw_omap(clk_hw)) {
pr_warn("can't setup clkdm for basic clk %s\n",
__clk_get_name(clk));
continue;
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index 0241450f3eb3..4786e0ebc2e8 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -336,7 +336,7 @@ static struct clk *_register_divider(struct device *dev, const char *name,
init.name = name;
init.ops = &ti_clk_divider_ops;
- init.flags = flags | CLK_IS_BASIC;
+ init.flags = flags;
init.parent_names = (parent_name ? &parent_name : NULL);
init.num_parents = (parent_name ? 1 : 0);
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
index 6c3329bc116f..659dadb23279 100644
--- a/drivers/clk/ti/dpll.c
+++ b/drivers/clk/ti/dpll.c
@@ -192,10 +192,9 @@ static void __init _register_dpll(void *user,
dd->clk_bypass = __clk_get_hw(clk);
/* register the clock */
- clk = ti_clk_register(NULL, &clk_hw->hw, node->name);
+ clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, node->name);
if (!IS_ERR(clk)) {
- omap2_init_clk_hw_omap_clocks(&clk_hw->hw);
of_clk_add_provider(node, of_clk_src_simple_get, clk);
kfree(clk_hw->hw.init->parent_names);
kfree(clk_hw->hw.init);
@@ -265,14 +264,12 @@ static void _register_dpll_x2(struct device_node *node,
#endif
/* register the clock */
- clk = ti_clk_register(NULL, &clk_hw->hw, name);
+ clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name);
- if (IS_ERR(clk)) {
+ if (IS_ERR(clk))
kfree(clk_hw);
- } else {
- omap2_init_clk_hw_omap_clocks(&clk_hw->hw);
+ else
of_clk_add_provider(node, of_clk_src_simple_get, clk);
- }
}
#endif
diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c
index 44b6b6403753..3dde6c8c3354 100644
--- a/drivers/clk/ti/dpll3xxx.c
+++ b/drivers/clk/ti/dpll3xxx.c
@@ -731,7 +731,7 @@ static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw)
do {
do {
hw = clk_hw_get_parent(hw);
- } while (hw && (clk_hw_get_flags(hw) & CLK_IS_BASIC));
+ } while (hw && (!omap2_clk_is_hw_omap(hw)));
if (!hw)
break;
pclk = to_clk_hw_omap(hw);
diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c
index 1c78fff5513c..504c0e91cdc7 100644
--- a/drivers/clk/ti/gate.c
+++ b/drivers/clk/ti/gate.c
@@ -123,7 +123,7 @@ static struct clk *_register_gate(struct device *dev, const char *name,
init.flags = flags;
- clk = ti_clk_register(NULL, &clk_hw->hw, name);
+ clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name);
if (IS_ERR(clk))
kfree(clk_hw);
diff --git a/drivers/clk/ti/interface.c b/drivers/clk/ti/interface.c
index 87e00c2ee957..83e34429d3b1 100644
--- a/drivers/clk/ti/interface.c
+++ b/drivers/clk/ti/interface.c
@@ -57,12 +57,10 @@ static struct clk *_register_interface(struct device *dev, const char *name,
init.num_parents = 1;
init.parent_names = &parent_name;
- clk = ti_clk_register(NULL, &clk_hw->hw, name);
+ clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name);
if (IS_ERR(clk))
kfree(clk_hw);
- else
- omap2_init_clk_hw_omap_clocks(&clk_hw->hw);
return clk;
}
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index 883bdde94d04..b7f9a4f068bf 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -143,7 +143,7 @@ static struct clk *_register_mux(struct device *dev, const char *name,
init.name = name;
init.ops = &ti_clk_mux_ops;
- init.flags = flags | CLK_IS_BASIC;
+ init.flags = flags;
init.parent_names = parent_names;
init.num_parents = num_parents;
diff --git a/drivers/clk/uniphier/clk-uniphier-cpugear.c b/drivers/clk/uniphier/clk-uniphier-cpugear.c
index ec11f55594ad..5d2d42b7e182 100644
--- a/drivers/clk/uniphier/clk-uniphier-cpugear.c
+++ b/drivers/clk/uniphier/clk-uniphier-cpugear.c
@@ -47,7 +47,7 @@ static int uniphier_clk_cpugear_set_parent(struct clk_hw *hw, u8 index)
return ret;
ret = regmap_write_bits(gear->regmap,
- gear->regbase + UNIPHIER_CLK_CPUGEAR_SET,
+ gear->regbase + UNIPHIER_CLK_CPUGEAR_UPD,
UNIPHIER_CLK_CPUGEAR_UPD_BIT,
UNIPHIER_CLK_CPUGEAR_UPD_BIT);
if (ret)
diff --git a/drivers/clk/x86/clk-lpt.c b/drivers/clk/x86/clk-lpt.c
index 6b40eb89ae19..68bd3abaef2c 100644
--- a/drivers/clk/x86/clk-lpt.c
+++ b/drivers/clk/x86/clk-lpt.c
@@ -13,7 +13,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/module.h>
-#include <linux/platform_data/clk-lpss.h>
+#include <linux/platform_data/x86/clk-lpss.h>
#include <linux/platform_device.h>
static int lpt_clk_probe(struct platform_device *pdev)
diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c
index d977193842df..19174835693b 100644
--- a/drivers/clk/x86/clk-pmc-atom.c
+++ b/drivers/clk/x86/clk-pmc-atom.c
@@ -165,7 +165,7 @@ static const struct clk_ops plt_clk_ops = {
};
static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
- void __iomem *base,
+ const struct pmc_clk_data *pmc_data,
const char **parent_names,
int num_parents)
{
@@ -184,9 +184,17 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
init.num_parents = num_parents;
pclk->hw.init = &init;
- pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
+ pclk->reg = pmc_data->base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
spin_lock_init(&pclk->lock);
+ /*
+ * On some systems, the pmc_plt_clocks already enabled by the
+ * firmware are being marked as critical to avoid them being
+ * gated by the clock framework.
+ */
+ if (pmc_data->critical && plt_clk_is_enabled(&pclk->hw))
+ init.flags |= CLK_IS_CRITICAL;
+
ret = devm_clk_hw_register(&pdev->dev, &pclk->hw);
if (ret) {
pclk = ERR_PTR(ret);
@@ -332,7 +340,7 @@ static int plt_clk_probe(struct platform_device *pdev)
return PTR_ERR(parent_names);
for (i = 0; i < PMC_CLK_NUM; i++) {
- data->clks[i] = plt_clk_register(pdev, i, pmc_data->base,
+ data->clks[i] = plt_clk_register(pdev, i, pmc_data,
parent_names, data->nparents);
if (IS_ERR(data->clks[i])) {
err = PTR_ERR(data->clks[i]);
diff --git a/drivers/clk/x86/clk-st.c b/drivers/clk/x86/clk-st.c
index 3a0996f2d556..25d4b97aff9b 100644
--- a/drivers/clk/x86/clk-st.c
+++ b/drivers/clk/x86/clk-st.c
@@ -52,7 +52,8 @@ static int st_clk_probe(struct platform_device *pdev)
0, st_data->base + MISCCLKCNTL1, OSCCLKENB,
CLK_GATE_SET_TO_DISABLE, NULL);
- clk_hw_register_clkdev(hws[ST_CLK_GATE], "oscout1", NULL);
+ devm_clk_hw_register_clkdev(&pdev->dev, hws[ST_CLK_GATE], "oscout1",
+ NULL);
return 0;
}
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 171502a356aa..4b3d143f0f8a 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -145,6 +145,7 @@ config VT8500_TIMER
config NPCM7XX_TIMER
bool "NPCM7xx timer driver" if COMPILE_TEST
depends on HAS_IOMEM
+ select TIMER_OF
select CLKSRC_MMIO
help
Enable 24-bit TIMER0 and TIMER1 counters in the NPCM7xx architecture,
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index a8b20b65bd4b..b2a951a798e2 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -9,7 +9,7 @@
* published by the Free Software Foundation.
*/
-#define pr_fmt(fmt) "arm_arch_timer: " fmt
+#define pr_fmt(fmt) "arch_timer: " fmt
#include <linux/init.h>
#include <linux/kernel.h>
@@ -33,9 +33,6 @@
#include <clocksource/arm_arch_timer.h>
-#undef pr_fmt
-#define pr_fmt(fmt) "arch_timer: " fmt
-
#define CNTTIDR 0x08
#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
@@ -152,6 +149,26 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
return val;
}
+static u64 arch_counter_get_cntpct_stable(void)
+{
+ return __arch_counter_get_cntpct_stable();
+}
+
+static u64 arch_counter_get_cntpct(void)
+{
+ return __arch_counter_get_cntpct();
+}
+
+static u64 arch_counter_get_cntvct_stable(void)
+{
+ return __arch_counter_get_cntvct_stable();
+}
+
+static u64 arch_counter_get_cntvct(void)
+{
+ return __arch_counter_get_cntvct();
+}
+
/*
* Default to cp15 based access because arm64 uses this function for
* sched_clock() before DT is probed and the cp15 method is guaranteed
@@ -319,13 +336,6 @@ static u64 notrace arm64_858921_read_cntvct_el0(void)
}
#endif
-#ifdef CONFIG_ARM64_ERRATUM_1188873
-static u64 notrace arm64_1188873_read_cntvct_el0(void)
-{
- return read_sysreg(cntvct_el0);
-}
-#endif
-
#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
/*
* The low bits of the counter registers are indeterminate while bit 10 or
@@ -372,8 +382,7 @@ static u32 notrace sun50i_a64_read_cntv_tval_el0(void)
DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
-DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
-EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
+static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0);
static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
struct clock_event_device *clk)
@@ -457,14 +466,6 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.read_cntvct_el0 = arm64_858921_read_cntvct_el0,
},
#endif
-#ifdef CONFIG_ARM64_ERRATUM_1188873
- {
- .match_type = ate_match_local_cap_id,
- .id = (void *)ARM64_WORKAROUND_1188873,
- .desc = "ARM erratum 1188873",
- .read_cntvct_el0 = arm64_1188873_read_cntvct_el0,
- },
-#endif
#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
{
.match_type = ate_match_dt,
@@ -552,11 +553,8 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa
per_cpu(timer_unstable_counter_workaround, i) = wa;
}
- /*
- * Use the locked version, as we're called from the CPU
- * hotplug framework. Otherwise, we end-up in deadlock-land.
- */
- static_branch_enable_cpuslocked(&arch_timer_read_ool_enabled);
+ if (wa->read_cntvct_el0 || wa->read_cntpct_el0)
+ atomic_set(&timer_unstable_counter_workaround_in_use, 1);
/*
* Don't use the vdso fastpath if errata require using the
@@ -573,7 +571,7 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa
static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
void *arg)
{
- const struct arch_timer_erratum_workaround *wa;
+ const struct arch_timer_erratum_workaround *wa, *__wa;
ate_match_fn_t match_fn = NULL;
bool local = false;
@@ -597,53 +595,32 @@ static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type t
if (!wa)
return;
- if (needs_unstable_timer_counter_workaround()) {
- const struct arch_timer_erratum_workaround *__wa;
- __wa = __this_cpu_read(timer_unstable_counter_workaround);
- if (__wa && wa != __wa)
- pr_warn("Can't enable workaround for %s (clashes with %s\n)",
- wa->desc, __wa->desc);
+ __wa = __this_cpu_read(timer_unstable_counter_workaround);
+ if (__wa && wa != __wa)
+ pr_warn("Can't enable workaround for %s (clashes with %s\n)",
+ wa->desc, __wa->desc);
- if (__wa)
- return;
- }
+ if (__wa)
+ return;
arch_timer_enable_workaround(wa, local);
pr_info("Enabling %s workaround for %s\n",
local ? "local" : "global", wa->desc);
}
-#define erratum_handler(fn, r, ...) \
-({ \
- bool __val; \
- if (needs_unstable_timer_counter_workaround()) { \
- const struct arch_timer_erratum_workaround *__wa; \
- __wa = __this_cpu_read(timer_unstable_counter_workaround); \
- if (__wa && __wa->fn) { \
- r = __wa->fn(__VA_ARGS__); \
- __val = true; \
- } else { \
- __val = false; \
- } \
- } else { \
- __val = false; \
- } \
- __val; \
-})
-
static bool arch_timer_this_cpu_has_cntvct_wa(void)
{
- const struct arch_timer_erratum_workaround *wa;
+ return has_erratum_handler(read_cntvct_el0);
+}
- wa = __this_cpu_read(timer_unstable_counter_workaround);
- return wa && wa->read_cntvct_el0;
+static bool arch_timer_counter_has_wa(void)
+{
+ return atomic_read(&timer_unstable_counter_workaround_in_use);
}
#else
#define arch_timer_check_ool_workaround(t,a) do { } while(0)
-#define erratum_set_next_event_tval_virt(...) ({BUG(); 0;})
-#define erratum_set_next_event_tval_phys(...) ({BUG(); 0;})
-#define erratum_handler(fn, r, ...) ({false;})
#define arch_timer_this_cpu_has_cntvct_wa() ({false;})
+#define arch_timer_counter_has_wa() ({false;})
#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
static __always_inline irqreturn_t timer_handler(const int access,
@@ -736,11 +713,6 @@ static __always_inline void set_next_event(const int access, unsigned long evt,
static int arch_timer_set_next_event_virt(unsigned long evt,
struct clock_event_device *clk)
{
- int ret;
-
- if (erratum_handler(set_next_event_virt, ret, evt, clk))
- return ret;
-
set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
return 0;
}
@@ -748,11 +720,6 @@ static int arch_timer_set_next_event_virt(unsigned long evt,
static int arch_timer_set_next_event_phys(unsigned long evt,
struct clock_event_device *clk)
{
- int ret;
-
- if (erratum_handler(set_next_event_phys, ret, evt, clk))
- return ret;
-
set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
return 0;
}
@@ -777,6 +744,10 @@ static void __arch_timer_setup(unsigned type,
clk->features = CLOCK_EVT_FEAT_ONESHOT;
if (type == ARCH_TIMER_TYPE_CP15) {
+ typeof(clk->set_next_event) sne;
+
+ arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
+
if (arch_timer_c3stop)
clk->features |= CLOCK_EVT_FEAT_C3STOP;
clk->name = "arch_sys_timer";
@@ -787,20 +758,20 @@ static void __arch_timer_setup(unsigned type,
case ARCH_TIMER_VIRT_PPI:
clk->set_state_shutdown = arch_timer_shutdown_virt;
clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
- clk->set_next_event = arch_timer_set_next_event_virt;
+ sne = erratum_handler(set_next_event_virt);
break;
case ARCH_TIMER_PHYS_SECURE_PPI:
case ARCH_TIMER_PHYS_NONSECURE_PPI:
case ARCH_TIMER_HYP_PPI:
clk->set_state_shutdown = arch_timer_shutdown_phys;
clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
- clk->set_next_event = arch_timer_set_next_event_phys;
+ sne = erratum_handler(set_next_event_phys);
break;
default:
BUG();
}
- arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
+ clk->set_next_event = sne;
} else {
clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
clk->name = "arch_mem_timer";
@@ -833,7 +804,11 @@ static void arch_timer_evtstrm_enable(int divider)
cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
| ARCH_TIMER_VIRT_EVT_EN;
arch_timer_set_cntkctl(cntkctl);
+#ifdef CONFIG_ARM64
+ cpu_set_named_feature(EVTSTRM);
+#else
elf_hwcap |= HWCAP_EVTSTRM;
+#endif
#ifdef CONFIG_COMPAT
compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
#endif
@@ -998,12 +973,22 @@ static void __init arch_counter_register(unsigned type)
/* Register the CP15 based counter if we have one */
if (type & ARCH_TIMER_TYPE_CP15) {
+ u64 (*rd)(void);
+
if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
- arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI)
- arch_timer_read_counter = arch_counter_get_cntvct;
- else
- arch_timer_read_counter = arch_counter_get_cntpct;
+ arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
+ if (arch_timer_counter_has_wa())
+ rd = arch_counter_get_cntvct_stable;
+ else
+ rd = arch_counter_get_cntvct;
+ } else {
+ if (arch_timer_counter_has_wa())
+ rd = arch_counter_get_cntpct_stable;
+ else
+ rd = arch_counter_get_cntpct;
+ }
+ arch_timer_read_counter = rd;
clocksource_counter.archdata.vdso_direct = vdso_default;
} else {
arch_timer_read_counter = arch_counter_get_cntvct_mem;
@@ -1055,7 +1040,11 @@ static int arch_timer_cpu_pm_notify(struct notifier_block *self,
} else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) {
arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
+#ifdef CONFIG_ARM64
+ if (cpu_have_named_feature(EVTSTRM))
+#else
if (elf_hwcap & HWCAP_EVTSTRM)
+#endif
cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
}
return NOTIFY_OK;
@@ -1261,6 +1250,13 @@ static enum arch_timer_ppi_nr __init arch_timer_select_ppi(void)
return ARCH_TIMER_PHYS_SECURE_PPI;
}
+static void __init arch_timer_populate_kvm_info(void)
+{
+ arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
+ if (is_kernel_in_hyp_mode())
+ arch_timer_kvm_info.physical_irq = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
+}
+
static int __init arch_timer_of_init(struct device_node *np)
{
int i, ret;
@@ -1275,7 +1271,7 @@ static int __init arch_timer_of_init(struct device_node *np)
for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++)
arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
- arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
+ arch_timer_populate_kvm_info();
rate = arch_timer_get_cntfrq();
arch_timer_of_configure_rate(rate, np);
@@ -1605,7 +1601,7 @@ static int __init arch_timer_acpi_init(struct acpi_table_header *table)
arch_timer_ppi[ARCH_TIMER_HYP_PPI] =
acpi_gtdt_map_ppi(ARCH_TIMER_HYP_PPI);
- arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
+ arch_timer_populate_kvm_info();
/*
* When probing via ACPI, we have no mechanism to override the sysreg
diff --git a/drivers/clocksource/clps711x-timer.c b/drivers/clocksource/clps711x-timer.c
index a8dd80576c95..857f8c086274 100644
--- a/drivers/clocksource/clps711x-timer.c
+++ b/drivers/clocksource/clps711x-timer.c
@@ -31,16 +31,9 @@ static u64 notrace clps711x_sched_clock_read(void)
return ~readw(tcd);
}
-static int __init _clps711x_clksrc_init(struct clk *clock, void __iomem *base)
+static void __init clps711x_clksrc_init(struct clk *clock, void __iomem *base)
{
- unsigned long rate;
-
- if (!base)
- return -ENOMEM;
- if (IS_ERR(clock))
- return PTR_ERR(clock);
-
- rate = clk_get_rate(clock);
+ unsigned long rate = clk_get_rate(clock);
tcd = base;
@@ -48,8 +41,6 @@ static int __init _clps711x_clksrc_init(struct clk *clock, void __iomem *base)
clocksource_mmio_readw_down);
sched_clock_register(clps711x_sched_clock_read, 16, rate);
-
- return 0;
}
static irqreturn_t clps711x_timer_interrupt(int irq, void *dev_id)
@@ -67,13 +58,6 @@ static int __init _clps711x_clkevt_init(struct clk *clock, void __iomem *base,
struct clock_event_device *clkevt;
unsigned long rate;
- if (!irq)
- return -EINVAL;
- if (!base)
- return -ENOMEM;
- if (IS_ERR(clock))
- return PTR_ERR(clock);
-
clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
if (!clkevt)
return -ENOMEM;
@@ -93,31 +77,29 @@ static int __init _clps711x_clkevt_init(struct clk *clock, void __iomem *base,
"clps711x-timer", clkevt);
}
-void __init clps711x_clksrc_init(void __iomem *tc1_base, void __iomem *tc2_base,
- unsigned int irq)
-{
- struct clk *tc1 = clk_get_sys("clps711x-timer.0", NULL);
- struct clk *tc2 = clk_get_sys("clps711x-timer.1", NULL);
-
- BUG_ON(_clps711x_clksrc_init(tc1, tc1_base));
- BUG_ON(_clps711x_clkevt_init(tc2, tc2_base, irq));
-}
-
-#ifdef CONFIG_TIMER_OF
static int __init clps711x_timer_init(struct device_node *np)
{
unsigned int irq = irq_of_parse_and_map(np, 0);
struct clk *clock = of_clk_get(np, 0);
void __iomem *base = of_iomap(np, 0);
+ if (!base)
+ return -ENOMEM;
+ if (!irq)
+ return -EINVAL;
+ if (IS_ERR(clock))
+ return PTR_ERR(clock);
+
switch (of_alias_get_id(np, "timer")) {
case CLPS711X_CLKSRC_CLOCKSOURCE:
- return _clps711x_clksrc_init(clock, base);
+ clps711x_clksrc_init(clock, base);
+ break;
case CLPS711X_CLKSRC_CLOCKEVENT:
return _clps711x_clkevt_init(clock, base, irq);
default:
return -EINVAL;
}
+
+ return 0;
}
TIMER_OF_DECLARE(clps711x, "cirrus,ep7209-timer", clps711x_timer_init);
-#endif
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index 54f8a331b53a..37671a5d4ed9 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -67,7 +67,7 @@ static irqreturn_t gic_compare_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-struct irqaction gic_compare_irqaction = {
+static struct irqaction gic_compare_irqaction = {
.handler = gic_compare_interrupt,
.percpu_dev_id = &gic_clockevent_device,
.flags = IRQF_PERCPU | IRQF_TIMER,
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 43f4d5c4d6fa..f987027ca566 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -71,7 +71,7 @@ static u64 tc_get_cycles32(struct clocksource *cs)
return readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV));
}
-void tc_clksrc_suspend(struct clocksource *cs)
+static void tc_clksrc_suspend(struct clocksource *cs)
{
int i;
@@ -86,7 +86,7 @@ void tc_clksrc_suspend(struct clocksource *cs)
bmr_cache = readl(tcaddr + ATMEL_TC_BMR);
}
-void tc_clksrc_resume(struct clocksource *cs)
+static void tc_clksrc_resume(struct clocksource *cs)
{
int i;
diff --git a/drivers/clocksource/timer-oxnas-rps.c b/drivers/clocksource/timer-oxnas-rps.c
index eed6feff8b5f..30c6f4ce672b 100644
--- a/drivers/clocksource/timer-oxnas-rps.c
+++ b/drivers/clocksource/timer-oxnas-rps.c
@@ -296,4 +296,4 @@ err_alloc:
TIMER_OF_DECLARE(ox810se_rps,
"oxsemi,ox810se-rps-timer", oxnas_rps_timer_init);
TIMER_OF_DECLARE(ox820_rps,
- "oxsemi,ox820se-rps-timer", oxnas_rps_timer_init);
+ "oxsemi,ox820-rps-timer", oxnas_rps_timer_init);
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index e8163693e936..5e6038fbf115 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -58,7 +58,7 @@ static u64 riscv_sched_clock(void)
static DEFINE_PER_CPU(struct clocksource, riscv_clocksource) = {
.name = "riscv_clocksource",
.rating = 300,
- .mask = CLOCKSOURCE_MASK(BITS_PER_LONG),
+ .mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.read = riscv_clocksource_rdtime,
};
@@ -120,8 +120,7 @@ static int __init riscv_timer_init_dt(struct device_node *n)
return error;
}
- sched_clock_register(riscv_sched_clock,
- BITS_PER_LONG, riscv_timebase);
+ sched_clock_register(riscv_sched_clock, 64, riscv_timebase);
error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING,
"clockevents/riscv/timer:starting",
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index c364027638e1..ee8ec5a8cb16 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -585,34 +585,6 @@ static int omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload,
return 0;
}
-/* Optimized set_load which removes costly spin wait in timer_start */
-int omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload,
- unsigned int load)
-{
- u32 l;
-
- if (unlikely(!timer))
- return -EINVAL;
-
- omap_dm_timer_enable(timer);
-
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
- if (autoreload) {
- l |= OMAP_TIMER_CTRL_AR;
- omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load);
- } else {
- l &= ~OMAP_TIMER_CTRL_AR;
- }
- l |= OMAP_TIMER_CTRL_ST;
-
- __omap_dm_timer_load_start(timer, l, load, timer->posted);
-
- /* Save the context */
- timer->context.tclr = l;
- timer->context.tldr = load;
- timer->context.tcrr = load;
- return 0;
-}
static int omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable,
unsigned int match)
{
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index ed5e42461094..ad48fd52cb53 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -250,6 +250,7 @@ void proc_coredump_connector(struct task_struct *task)
{
struct cn_msg *msg;
struct proc_event *ev;
+ struct task_struct *parent;
__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
if (atomic_read(&proc_event_num_listeners) < 1)
@@ -262,8 +263,14 @@ void proc_coredump_connector(struct task_struct *task)
ev->what = PROC_EVENT_COREDUMP;
ev->event_data.coredump.process_pid = task->pid;
ev->event_data.coredump.process_tgid = task->tgid;
- ev->event_data.coredump.parent_pid = task->real_parent->pid;
- ev->event_data.coredump.parent_tgid = task->real_parent->tgid;
+
+ rcu_read_lock();
+ if (pid_alive(task)) {
+ parent = rcu_dereference(task->real_parent);
+ ev->event_data.coredump.parent_pid = parent->pid;
+ ev->event_data.coredump.parent_tgid = parent->tgid;
+ }
+ rcu_read_unlock();
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
@@ -276,6 +283,7 @@ void proc_exit_connector(struct task_struct *task)
{
struct cn_msg *msg;
struct proc_event *ev;
+ struct task_struct *parent;
__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
if (atomic_read(&proc_event_num_listeners) < 1)
@@ -290,8 +298,14 @@ void proc_exit_connector(struct task_struct *task)
ev->event_data.exit.process_tgid = task->tgid;
ev->event_data.exit.exit_code = task->exit_code;
ev->event_data.exit.exit_signal = task->exit_signal;
- ev->event_data.exit.parent_pid = task->real_parent->pid;
- ev->event_data.exit.parent_tgid = task->real_parent->tgid;
+
+ rcu_read_lock();
+ if (pid_alive(task)) {
+ parent = rcu_dereference(task->real_parent);
+ ev->event_data.exit.parent_pid = parent->pid;
+ ev->event_data.exit.parent_tgid = parent->tgid;
+ }
+ rcu_read_unlock();
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index b22e6bba71f1..4d2b33a30292 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -26,10 +26,6 @@ config CPU_FREQ_GOV_COMMON
select IRQ_WORK
bool
-config CPU_FREQ_BOOST_SW
- bool
- depends on THERMAL
-
config CPU_FREQ_STAT
bool "CPU frequency transition statistics"
help
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index c72258a44ba4..73bb2aafb1a8 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -366,7 +366,7 @@ static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *dat
val = drv_read(data, mask);
- pr_debug("get_cur_val = %u\n", val);
+ pr_debug("%s = %u\n", __func__, val);
return val;
}
@@ -378,7 +378,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
unsigned int freq;
unsigned int cached_freq;
- pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
+ pr_debug("%s (%d)\n", __func__, cpu);
policy = cpufreq_cpu_get_raw(cpu);
if (unlikely(!policy))
@@ -458,8 +458,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
if (acpi_pstate_strict) {
if (!check_freqs(policy, mask,
policy->freq_table[index].frequency)) {
- pr_debug("acpi_cpufreq_target failed (%d)\n",
- policy->cpu);
+ pr_debug("%s (%d)\n", __func__, policy->cpu);
result = -EAGAIN;
}
}
@@ -573,7 +572,7 @@ static int cpufreq_boost_down_prep(unsigned int cpu)
static int __init acpi_cpufreq_early_init(void)
{
unsigned int i;
- pr_debug("acpi_cpufreq_early_init\n");
+ pr_debug("%s\n", __func__);
acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
if (!acpi_perf_data) {
@@ -657,7 +656,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
static int blacklisted;
#endif
- pr_debug("acpi_cpufreq_cpu_init\n");
+ pr_debug("%s\n", __func__);
#ifdef CONFIG_SMP
if (blacklisted)
@@ -856,7 +855,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct acpi_cpufreq_data *data = policy->driver_data;
- pr_debug("acpi_cpufreq_cpu_exit\n");
+ pr_debug("%s\n", __func__);
policy->fast_switch_possible = false;
policy->driver_data = NULL;
@@ -881,7 +880,7 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
{
struct acpi_cpufreq_data *data = policy->driver_data;
- pr_debug("acpi_cpufreq_resume\n");
+ pr_debug("%s\n", __func__);
data->resume = 1;
@@ -954,7 +953,7 @@ static int __init acpi_cpufreq_init(void)
if (cpufreq_get_current_driver())
return -EEXIST;
- pr_debug("acpi_cpufreq_init\n");
+ pr_debug("%s\n", __func__);
ret = acpi_cpufreq_early_init();
if (ret)
@@ -991,7 +990,7 @@ static int __init acpi_cpufreq_init(void)
static void __exit acpi_cpufreq_exit(void)
{
- pr_debug("acpi_cpufreq_exit\n");
+ pr_debug("%s\n", __func__);
acpi_cpufreq_boost_exit();
diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c
index 4ac7c3cf34be..6927a8c0e748 100644
--- a/drivers/cpufreq/amd_freq_sensitivity.c
+++ b/drivers/cpufreq/amd_freq_sensitivity.c
@@ -124,7 +124,7 @@ static int __init amd_freq_sensitivity_init(void)
PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL);
if (!pcidev) {
- if (!static_cpu_has(X86_FEATURE_PROC_FEEDBACK))
+ if (!boot_cpu_has(X86_FEATURE_PROC_FEEDBACK))
return -ENODEV;
}
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
index 75491fc841a6..0df16eb1eb3c 100644
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -359,11 +359,11 @@ static int __init armada37xx_cpufreq_driver_init(void)
struct armada_37xx_dvfs *dvfs;
struct platform_device *pdev;
unsigned long freq;
- unsigned int cur_frequency;
+ unsigned int cur_frequency, base_frequency;
struct regmap *nb_pm_base, *avs_base;
struct device *cpu_dev;
int load_lvl, ret;
- struct clk *clk;
+ struct clk *clk, *parent;
nb_pm_base =
syscon_regmap_lookup_by_compatible("marvell,armada-3700-nb-pm");
@@ -399,6 +399,22 @@ static int __init armada37xx_cpufreq_driver_init(void)
return PTR_ERR(clk);
}
+ parent = clk_get_parent(clk);
+ if (IS_ERR(parent)) {
+ dev_err(cpu_dev, "Cannot get parent clock for CPU0\n");
+ clk_put(clk);
+ return PTR_ERR(parent);
+ }
+
+ /* Get parent CPU frequency */
+ base_frequency = clk_get_rate(parent);
+
+ if (!base_frequency) {
+ dev_err(cpu_dev, "Failed to get parent clock rate for CPU\n");
+ clk_put(clk);
+ return -EINVAL;
+ }
+
/* Get nominal (current) CPU frequency */
cur_frequency = clk_get_rate(clk);
if (!cur_frequency) {
@@ -431,7 +447,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
load_lvl++) {
unsigned long u_volt = avs_map[dvfs->avs[load_lvl]] * 1000;
- freq = cur_frequency / dvfs->divider[load_lvl];
+ freq = base_frequency / dvfs->divider[load_lvl];
ret = dev_pm_opp_add(cpu_dev, freq, u_volt);
if (ret)
goto remove_opp;
diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
index b3f4bd647e9b..988ebc326bdb 100644
--- a/drivers/cpufreq/armada-8k-cpufreq.c
+++ b/drivers/cpufreq/armada-8k-cpufreq.c
@@ -132,6 +132,7 @@ static int __init armada_8k_cpufreq_init(void)
of_node_put(node);
return -ENODEV;
}
+ of_node_put(node);
nb_cpus = num_possible_cpus();
freq_tables = kcalloc(nb_cpus, sizeof(*freq_tables), GFP_KERNEL);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index d87ebc2d2e68..db779b650fce 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -34,11 +34,6 @@
static LIST_HEAD(cpufreq_policy_list);
-static inline bool policy_is_inactive(struct cpufreq_policy *policy)
-{
- return cpumask_empty(policy->cpus);
-}
-
/* Macros to iterate over CPU policies */
#define for_each_suitable_policy(__policy, __active) \
list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
@@ -206,17 +201,15 @@ unsigned int cpufreq_generic_get(unsigned int cpu)
EXPORT_SYMBOL_GPL(cpufreq_generic_get);
/**
- * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
- *
- * @cpu: cpu to find policy for.
+ * cpufreq_cpu_get - Return policy for a CPU and mark it as busy.
+ * @cpu: CPU to find the policy for.
*
- * This returns policy for 'cpu', returns NULL if it doesn't exist.
- * It also increments the kobject reference count to mark it busy and so would
- * require a corresponding call to cpufreq_cpu_put() to decrement it back.
- * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
- * freed as that depends on the kobj count.
+ * Call cpufreq_cpu_get_raw() to obtain a cpufreq policy for @cpu and increment
+ * the kobject reference counter of that policy. Return a valid policy on
+ * success or NULL on failure.
*
- * Return: A valid policy on success, otherwise NULL on failure.
+ * The policy returned by this function has to be released with the help of
+ * cpufreq_cpu_put() to balance its kobject reference counter properly.
*/
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
{
@@ -243,12 +236,8 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
/**
- * cpufreq_cpu_put: Decrements the usage count of a policy
- *
- * @policy: policy earlier returned by cpufreq_cpu_get().
- *
- * This decrements the kobject reference count incremented earlier by calling
- * cpufreq_cpu_get().
+ * cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy.
+ * @policy: cpufreq policy returned by cpufreq_cpu_get().
*/
void cpufreq_cpu_put(struct cpufreq_policy *policy)
{
@@ -256,6 +245,51 @@ void cpufreq_cpu_put(struct cpufreq_policy *policy)
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
+/**
+ * cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
+ * @policy: cpufreq policy returned by cpufreq_cpu_acquire().
+ */
+void cpufreq_cpu_release(struct cpufreq_policy *policy)
+{
+ if (WARN_ON(!policy))
+ return;
+
+ lockdep_assert_held(&policy->rwsem);
+
+ up_write(&policy->rwsem);
+
+ cpufreq_cpu_put(policy);
+}
+
+/**
+ * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
+ * @cpu: CPU to find the policy for.
+ *
+ * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
+ * if the policy returned by it is not NULL, acquire its rwsem for writing.
+ * Return the policy if it is active or release it and return NULL otherwise.
+ *
+ * The policy returned by this function has to be released with the help of
+ * cpufreq_cpu_release() in order to release its rwsem and balance its usage
+ * counter properly.
+ */
+struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+
+ if (!policy)
+ return NULL;
+
+ down_write(&policy->rwsem);
+
+ if (policy_is_inactive(policy)) {
+ cpufreq_cpu_release(policy);
+ return NULL;
+ }
+
+ return policy;
+}
+
/*********************************************************************
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
*********************************************************************/
@@ -675,9 +709,6 @@ static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
return ret;
}
-static int cpufreq_set_policy(struct cpufreq_policy *policy,
- struct cpufreq_policy *new_policy);
-
/**
* cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
*/
@@ -863,11 +894,9 @@ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
{
unsigned int limit;
int ret;
- if (cpufreq_driver->bios_limit) {
- ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
- if (!ret)
- return sprintf(buf, "%u\n", limit);
- }
+ ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
+ if (!ret)
+ return sprintf(buf, "%u\n", limit);
return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
}
@@ -1104,6 +1133,7 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
cpufreq_global_kobject, "policy%u", cpu);
if (ret) {
pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
+ kobject_put(&policy->kobj);
goto err_free_real_cpus;
}
@@ -1556,7 +1586,7 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
{
unsigned int ret_freq = 0;
- if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get)
+ if (unlikely(policy_is_inactive(policy)))
return ret_freq;
ret_freq = cpufreq_driver->get(policy->cpu);
@@ -1594,7 +1624,8 @@ unsigned int cpufreq_get(unsigned int cpu)
if (policy) {
down_read(&policy->rwsem);
- ret_freq = __cpufreq_get(policy);
+ if (cpufreq_driver->get)
+ ret_freq = __cpufreq_get(policy);
up_read(&policy->rwsem);
cpufreq_cpu_put(policy);
@@ -2235,8 +2266,8 @@ EXPORT_SYMBOL(cpufreq_get_policy);
*
* The cpuinfo part of @policy is not updated by this function.
*/
-static int cpufreq_set_policy(struct cpufreq_policy *policy,
- struct cpufreq_policy *new_policy)
+int cpufreq_set_policy(struct cpufreq_policy *policy,
+ struct cpufreq_policy *new_policy)
{
struct cpufreq_governor *old_gov;
int ret;
@@ -2343,17 +2374,12 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
*/
void cpufreq_update_policy(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
struct cpufreq_policy new_policy;
if (!policy)
return;
- down_write(&policy->rwsem);
-
- if (policy_is_inactive(policy))
- goto unlock;
-
/*
* BIOS might change freq behind our back
* -> ask driver for current freq and notify governors about a change
@@ -2370,12 +2396,26 @@ void cpufreq_update_policy(unsigned int cpu)
cpufreq_set_policy(policy, &new_policy);
unlock:
- up_write(&policy->rwsem);
-
- cpufreq_cpu_put(policy);
+ cpufreq_cpu_release(policy);
}
EXPORT_SYMBOL(cpufreq_update_policy);
+/**
+ * cpufreq_update_limits - Update policy limits for a given CPU.
+ * @cpu: CPU to update the policy limits for.
+ *
+ * Invoke the driver's ->update_limits callback if present or call
+ * cpufreq_update_policy() for @cpu.
+ */
+void cpufreq_update_limits(unsigned int cpu)
+{
+ if (cpufreq_driver->update_limits)
+ cpufreq_driver->update_limits(cpu);
+ else
+ cpufreq_update_policy(cpu);
+}
+EXPORT_SYMBOL_GPL(cpufreq_update_limits);
+
/*********************************************************************
* BOOST *
*********************************************************************/
@@ -2432,7 +2472,7 @@ int cpufreq_boost_trigger_state(int state)
static bool cpufreq_boost_supported(void)
{
- return likely(cpufreq_driver) && cpufreq_driver->set_boost;
+ return cpufreq_driver->set_boost;
}
static int create_boost_sysfs_file(void)
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index ffa9adeaba31..9d1d9bf02710 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -459,6 +459,8 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
/* Failure, so roll back. */
pr_err("initialization failed (dbs_data kobject init error %d)\n", ret);
+ kobject_put(&dbs_data->attr_set.kobj);
+
policy->governor_data = NULL;
if (!have_governor_per_policy())
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index e2db5581489a..08b192eb22c6 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/slab.h>
-static DEFINE_SPINLOCK(cpufreq_stats_lock);
struct cpufreq_stats {
unsigned int total_trans;
@@ -23,6 +22,7 @@ struct cpufreq_stats {
unsigned int state_num;
unsigned int last_index;
u64 *time_in_state;
+ spinlock_t lock;
unsigned int *freq_table;
unsigned int *trans_table;
};
@@ -39,12 +39,12 @@ static void cpufreq_stats_clear_table(struct cpufreq_stats *stats)
{
unsigned int count = stats->max_state;
- spin_lock(&cpufreq_stats_lock);
+ spin_lock(&stats->lock);
memset(stats->time_in_state, 0, count * sizeof(u64));
memset(stats->trans_table, 0, count * count * sizeof(int));
stats->last_time = get_jiffies_64();
stats->total_trans = 0;
- spin_unlock(&cpufreq_stats_lock);
+ spin_unlock(&stats->lock);
}
static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
@@ -62,9 +62,9 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
if (policy->fast_switch_enabled)
return 0;
- spin_lock(&cpufreq_stats_lock);
+ spin_lock(&stats->lock);
cpufreq_stats_update(stats);
- spin_unlock(&cpufreq_stats_lock);
+ spin_unlock(&stats->lock);
for (i = 0; i < stats->state_num; i++) {
len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i],
@@ -211,6 +211,7 @@ void cpufreq_stats_create_table(struct cpufreq_policy *policy)
stats->state_num = i;
stats->last_time = get_jiffies_64();
stats->last_index = freq_table_get_index(stats, policy->cur);
+ spin_lock_init(&stats->lock);
policy->stats = stats;
ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
@@ -242,11 +243,11 @@ void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
if (old_index == -1 || new_index == -1 || old_index == new_index)
return;
- spin_lock(&cpufreq_stats_lock);
+ spin_lock(&stats->lock);
cpufreq_stats_update(stats);
stats->last_index = new_index;
stats->trans_table[old_index * stats->max_state + new_index]++;
stats->total_trans++;
- spin_unlock(&cpufreq_stats_lock);
+ spin_unlock(&stats->lock);
}
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 3a8cc99e6815..e7be0af3199f 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -290,9 +290,6 @@ EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_boost_freqs);
struct freq_attr *cpufreq_generic_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
-#ifdef CONFIG_CPU_FREQ_BOOST_SW
- &cpufreq_freq_attr_scaling_boost_freqs,
-#endif
NULL,
};
EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index a4ff09f91c8f..3e17560b1efe 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -388,11 +388,11 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
ret = imx6ul_opp_check_speed_grading(cpu_dev);
if (ret) {
if (ret == -EPROBE_DEFER)
- return ret;
+ goto put_node;
dev_err(cpu_dev, "failed to read ocotp: %d\n",
ret);
- return ret;
+ goto put_node;
}
} else {
imx6q_opp_check_speed_grading(cpu_dev);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 002f5169d4eb..34b54df41aaa 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -179,6 +179,7 @@ struct vid_data {
* based on the MSR_IA32_MISC_ENABLE value and whether or
* not the maximum reported turbo P-state is different from
* the maximum reported non-turbo one.
+ * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq.
* @min_perf_pct: Minimum capacity limit in percent of the maximum turbo
* P-state capacity.
* @max_perf_pct: Maximum capacity limit in percent of the maximum turbo
@@ -187,6 +188,7 @@ struct vid_data {
struct global_params {
bool no_turbo;
bool turbo_disabled;
+ bool turbo_disabled_mf;
int max_perf_pct;
int min_perf_pct;
};
@@ -385,7 +387,10 @@ static int intel_pstate_get_cppc_guranteed(int cpu)
if (ret)
return ret;
- return cppc_perf.guaranteed_perf;
+ if (cppc_perf.guaranteed_perf)
+ return cppc_perf.guaranteed_perf;
+
+ return cppc_perf.nominal_perf;
}
#else /* CONFIG_ACPI_CPPC_LIB */
@@ -522,7 +527,7 @@ static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
u64 epb;
int ret;
- if (!static_cpu_has(X86_FEATURE_EPB))
+ if (!boot_cpu_has(X86_FEATURE_EPB))
return -ENXIO;
ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
@@ -536,7 +541,7 @@ static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
{
s16 epp;
- if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
+ if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
/*
* When hwp_req_data is 0, means that caller didn't read
* MSR_HWP_REQUEST, so need to read and get EPP.
@@ -561,7 +566,7 @@ static int intel_pstate_set_epb(int cpu, s16 pref)
u64 epb;
int ret;
- if (!static_cpu_has(X86_FEATURE_EPB))
+ if (!boot_cpu_has(X86_FEATURE_EPB))
return -ENXIO;
ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
@@ -609,7 +614,7 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
if (epp < 0)
return epp;
- if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
+ if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
if (epp == HWP_EPP_PERFORMANCE)
return 1;
if (epp <= HWP_EPP_BALANCE_PERFORMANCE)
@@ -618,7 +623,7 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
return 3;
else
return 4;
- } else if (static_cpu_has(X86_FEATURE_EPB)) {
+ } else if (boot_cpu_has(X86_FEATURE_EPB)) {
/*
* Range:
* 0x00-0x03 : Performance
@@ -646,7 +651,7 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
mutex_lock(&intel_pstate_limits_lock);
- if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
+ if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
u64 value;
ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value);
@@ -821,7 +826,7 @@ static void intel_pstate_hwp_set(unsigned int cpu)
epp = cpu_data->epp_powersave;
}
update_epp:
- if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
+ if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
value &= ~GENMASK_ULL(31, 24);
value |= (u64)epp << 24;
} else {
@@ -846,7 +851,7 @@ static void intel_pstate_hwp_force_min_perf(int cpu)
value |= HWP_MIN_PERF(min_perf);
/* Set EPP/EPB to min */
- if (static_cpu_has(X86_FEATURE_HWP_EPP))
+ if (boot_cpu_has(X86_FEATURE_HWP_EPP))
value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE);
else
intel_pstate_set_epb(cpu, HWP_EPP_BALANCE_POWERSAVE);
@@ -894,6 +899,48 @@ static void intel_pstate_update_policies(void)
cpufreq_update_policy(cpu);
}
+static void intel_pstate_update_max_freq(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
+ struct cpufreq_policy new_policy;
+ struct cpudata *cpudata;
+
+ if (!policy)
+ return;
+
+ cpudata = all_cpu_data[cpu];
+ policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
+ cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
+
+ memcpy(&new_policy, policy, sizeof(*policy));
+ new_policy.max = min(policy->user_policy.max, policy->cpuinfo.max_freq);
+ new_policy.min = min(policy->user_policy.min, new_policy.max);
+
+ cpufreq_set_policy(policy, &new_policy);
+
+ cpufreq_cpu_release(policy);
+}
+
+static void intel_pstate_update_limits(unsigned int cpu)
+{
+ mutex_lock(&intel_pstate_driver_lock);
+
+ update_turbo_state();
+ /*
+ * If turbo has been turned on or off globally, policy limits for
+ * all CPUs need to be updated to reflect that.
+ */
+ if (global.turbo_disabled_mf != global.turbo_disabled) {
+ global.turbo_disabled_mf = global.turbo_disabled;
+ for_each_possible_cpu(cpu)
+ intel_pstate_update_max_freq(cpu);
+ } else {
+ cpufreq_update_policy(cpu);
+ }
+
+ mutex_unlock(&intel_pstate_driver_lock);
+}
+
/************************** sysfs begin ************************/
#define show_one(file_name, object) \
static ssize_t show_##file_name \
@@ -1194,7 +1241,7 @@ static void __init intel_pstate_sysfs_expose_params(void)
static void intel_pstate_hwp_enable(struct cpudata *cpudata)
{
/* First disable HWP notification interrupt as we don't process them */
- if (static_cpu_has(X86_FEATURE_HWP_NOTIFY))
+ if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
@@ -1762,7 +1809,7 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
/* Start over if the CPU may have been idle. */
if (delta_ns > TICK_NSEC) {
cpu->iowait_boost = ONE_EIGHTH_FP;
- } else if (cpu->iowait_boost) {
+ } else if (cpu->iowait_boost >= ONE_EIGHTH_FP) {
cpu->iowait_boost <<= 1;
if (cpu->iowait_boost > int_tofp(1))
cpu->iowait_boost = int_tofp(1);
@@ -2135,6 +2182,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
/* cpuinfo and default policy values */
policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
update_turbo_state();
+ global.turbo_disabled_mf = global.turbo_disabled;
policy->cpuinfo.max_freq = global.turbo_disabled ?
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
policy->cpuinfo.max_freq *= cpu->pstate.scaling;
@@ -2179,6 +2227,7 @@ static struct cpufreq_driver intel_pstate = {
.init = intel_pstate_cpu_init,
.exit = intel_pstate_cpu_exit,
.stop_cpu = intel_pstate_stop_cpu,
+ .update_limits = intel_pstate_update_limits,
.name = "intel_pstate",
};
@@ -2313,6 +2362,7 @@ static struct cpufreq_driver intel_cpufreq = {
.init = intel_cpufreq_cpu_init,
.exit = intel_pstate_cpu_exit,
.stop_cpu = intel_cpufreq_stop_cpu,
+ .update_limits = intel_pstate_update_limits,
.name = "intel_cpufreq",
};
@@ -2593,6 +2643,9 @@ static int __init intel_pstate_init(void)
const struct x86_cpu_id *id;
int rc;
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return -ENODEV;
+
if (no_load)
return -ENODEV;
@@ -2608,7 +2661,7 @@ static int __init intel_pstate_init(void)
} else {
id = x86_match_cpu(intel_pstate_cpu_ids);
if (!id) {
- pr_info("CPU ID not supported\n");
+ pr_info("CPU model not supported\n");
return -ENODEV;
}
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index c2dd43f3f5d8..8d63a6dc8383 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -124,13 +124,14 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk");
if (IS_ERR(priv.cpu_clk)) {
dev_err(priv.dev, "Unable to get cpuclk\n");
- return PTR_ERR(priv.cpu_clk);
+ err = PTR_ERR(priv.cpu_clk);
+ goto out_node;
}
err = clk_prepare_enable(priv.cpu_clk);
if (err) {
dev_err(priv.dev, "Unable to prepare cpuclk\n");
- return err;
+ goto out_node;
}
kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000;
@@ -161,20 +162,22 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
goto out_ddr;
}
- of_node_put(np);
- np = NULL;
-
err = cpufreq_register_driver(&kirkwood_cpufreq_driver);
- if (!err)
- return 0;
+ if (err) {
+ dev_err(priv.dev, "Failed to register cpufreq driver\n");
+ goto out_powersave;
+ }
- dev_err(priv.dev, "Failed to register cpufreq driver\n");
+ of_node_put(np);
+ return 0;
+out_powersave:
clk_disable_unprepare(priv.powersave_clk);
out_ddr:
clk_disable_unprepare(priv.ddr_clk);
out_cpu:
clk_disable_unprepare(priv.cpu_clk);
+out_node:
of_node_put(np);
return err;
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c
index d9df89392b84..a94355723ef8 100644
--- a/drivers/cpufreq/maple-cpufreq.c
+++ b/drivers/cpufreq/maple-cpufreq.c
@@ -210,7 +210,7 @@ static int __init maple_cpufreq_init(void)
*/
valp = of_get_property(cpunode, "clock-frequency", NULL);
if (!valp)
- return -ENODEV;
+ goto bail_noprops;
max_freq = (*valp)/1000;
maple_cpu_freqs[0].frequency = max_freq;
maple_cpu_freqs[1].frequency = max_freq/2;
@@ -231,10 +231,6 @@ static int __init maple_cpufreq_init(void)
rc = cpufreq_register_driver(&maple_cpufreq_driver);
- of_node_put(cpunode);
-
- return rc;
-
bail_noprops:
of_node_put(cpunode);
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index 75dfbd2a58ea..c7710c149de8 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -146,6 +146,7 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpu = of_get_cpu_node(policy->cpu, NULL);
+ of_node_put(cpu);
if (!cpu)
goto out;
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index 52f0d91d30c1..9b4ce2eb8222 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -552,6 +552,7 @@ static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
if (volt_gpio_np)
voltage_gpio = read_gpio(volt_gpio_np);
+ of_node_put(volt_gpio_np);
if (!voltage_gpio){
pr_err("missing cpu-vcore-select gpio\n");
return 1;
@@ -588,6 +589,7 @@ static int pmac_cpufreq_init_750FX(struct device_node *cpunode)
if (volt_gpio_np)
voltage_gpio = read_gpio(volt_gpio_np);
+ of_node_put(volt_gpio_np);
pvr = mfspr(SPRN_PVR);
has_cpu_l2lve = !((pvr & 0xf00) == 0x100);
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index fb77b39a4ce3..3c12e03fa343 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1178,7 +1178,7 @@ static int powernowk8_init(void)
unsigned int i, supported_cpus = 0;
int ret;
- if (static_cpu_has(X86_FEATURE_HW_PSTATE)) {
+ if (boot_cpu_has(X86_FEATURE_HW_PSTATE)) {
__request_acpi_cpufreq();
return -ENODEV;
}
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
index 41a0f0be3f9f..8414c3a4ea08 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
@@ -86,6 +86,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (!cbe_get_cpu_pmd_regs(policy->cpu) ||
!cbe_get_cpu_mic_tm_regs(policy->cpu)) {
pr_info("invalid CBE regs pointers for cpufreq\n");
+ of_node_put(cpu);
return -EINVAL;
}
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
index 46254e583982..74e0e0c20c46 100644
--- a/drivers/cpufreq/pxa2xx-cpufreq.c
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -143,7 +143,7 @@ static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
return ret;
}
-static void __init pxa_cpufreq_init_voltages(void)
+static void pxa_cpufreq_init_voltages(void)
{
vcc_core = regulator_get(NULL, "vcc_core");
if (IS_ERR(vcc_core)) {
@@ -159,7 +159,7 @@ static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
return 0;
}
-static void __init pxa_cpufreq_init_voltages(void) { }
+static void pxa_cpufreq_init_voltages(void) { }
#endif
static void find_freq_tables(struct cpufreq_frequency_table **freq_table,
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index 4295e5476264..71b640c8c1a5 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -280,10 +280,12 @@ static const struct of_device_id node_matches[] __initconst = {
{ .compatible = "fsl,ls1012a-clockgen", },
{ .compatible = "fsl,ls1021a-clockgen", },
+ { .compatible = "fsl,ls1028a-clockgen", },
{ .compatible = "fsl,ls1043a-clockgen", },
{ .compatible = "fsl,ls1046a-clockgen", },
{ .compatible = "fsl,ls1088a-clockgen", },
{ .compatible = "fsl,ls2080a-clockgen", },
+ { .compatible = "fsl,lx2160a-clockgen", },
{ .compatible = "fsl,p4080-clockgen", },
{ .compatible = "fsl,qoriq-clockgen-1.0", },
{ .compatible = "fsl,qoriq-clockgen-2.0", },
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 3f49427766b8..2b51e0718c9f 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -189,8 +189,8 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
clk_put(priv->clk);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
- kfree(priv);
dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
+ kfree(priv);
return 0;
}
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index a1fb735685db..e086b2dd4072 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -412,7 +412,7 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
}
/**
- * centrino_setpolicy - set a new CPUFreq policy
+ * centrino_target - set a new CPUFreq policy
* @policy: new policy
* @index: index of target frequency
*
diff --git a/drivers/cpuidle/cpuidle-exynos.c b/drivers/cpuidle/cpuidle-exynos.c
index 0171a6e190d7..f7199a35cbb6 100644
--- a/drivers/cpuidle/cpuidle-exynos.c
+++ b/drivers/cpuidle/cpuidle-exynos.c
@@ -84,7 +84,7 @@ static struct cpuidle_driver exynos_idle_driver = {
[1] = {
.enter = exynos_enter_lowpower,
.exit_latency = 300,
- .target_residency = 100000,
+ .target_residency = 10000,
.name = "C1",
.desc = "ARM power down",
},
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 7f108309e871..0f4b7c45df3e 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -328,9 +328,23 @@ int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
int index)
{
+ int ret = 0;
+
+ /*
+ * Store the next hrtimer, which becomes either next tick or the next
+ * timer event, whatever expires first. Additionally, to make this data
+ * useful for consumers outside cpuidle, we rely on that the governor's
+ * ->select() callback have decided, whether to stop the tick or not.
+ */
+ WRITE_ONCE(dev->next_hrtimer, tick_nohz_get_next_hrtimer());
+
if (cpuidle_state_is_coupled(drv, index))
- return cpuidle_enter_state_coupled(dev, drv, index);
- return cpuidle_enter_state(dev, drv, index);
+ ret = cpuidle_enter_state_coupled(dev, drv, index);
+ else
+ ret = cpuidle_enter_state(dev, drv, index);
+
+ WRITE_ONCE(dev->next_hrtimer, 0);
+ return ret;
}
/**
@@ -511,6 +525,7 @@ static void __cpuidle_device_init(struct cpuidle_device *dev)
{
memset(dev->states_usage, 0, sizeof(dev->states_usage));
dev->last_residency = 0;
+ dev->next_hrtimer = 0;
}
/**
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index bb93e5cf6a4a..9fddf828a76f 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -89,6 +89,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
mutex_lock(&cpuidle_lock);
if (__cpuidle_find_governor(gov->name) == NULL) {
ret = 0;
+ list_add_tail(&gov->governor_list, &cpuidle_governors);
if (!cpuidle_curr_governor ||
!strncasecmp(param_governor, gov->name, CPUIDLE_NAME_LEN) ||
(cpuidle_curr_governor->rating < gov->rating &&
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 61316fc51548..5951604e7d5c 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -186,7 +186,7 @@ static unsigned int get_typical_interval(struct menu_device *data,
unsigned int min, max, thresh, avg;
uint64_t sum, variance;
- thresh = UINT_MAX; /* Discard outliers above this value */
+ thresh = INT_MAX; /* Discard outliers above this value */
again:
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 0be55fcc19ba..177b7713bd2d 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -404,15 +404,6 @@ config CRYPTO_DEV_SAHARA
This option enables support for the SAHARA HW crypto accelerator
found in some Freescale i.MX chips.
-config CRYPTO_DEV_MXC_SCC
- tristate "Support for Freescale Security Controller (SCC)"
- depends on ARCH_MXC && OF
- select CRYPTO_BLKCIPHER
- select CRYPTO_DES
- help
- This option enables support for the Security Controller (SCC)
- found in Freescale i.MX25 chips.
-
config CRYPTO_DEV_EXYNOS_RNG
tristate "EXYNOS HW pseudo random number generator support"
depends on ARCH_EXYNOS || COMPILE_TEST
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 8e7e225d2446..a23a7197fcd7 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -18,7 +18,6 @@ obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell/
obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mediatek/
obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
-obj-$(CONFIG_CRYPTO_DEV_MXC_SCC) += mxc-scc.o
obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
n2_crypto-y := n2_core.o n2_asm.o
obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index 4092c2aad8e2..307f5cfa9ba4 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -141,9 +141,10 @@ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
/* Setup SA */
sa = ctx->sa_in;
- set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_CBC ?
- SA_SAVE_IV : SA_NOT_SAVE_IV),
- SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_ECB ?
+ SA_NOT_SAVE_IV : SA_SAVE_IV),
+ SA_NOT_LOAD_HASH, (cm == CRYPTO_MODE_ECB ?
+ SA_LOAD_IV_FROM_SA : SA_LOAD_IV_FROM_STATE),
SA_NO_HEADER_PROC, SA_HASH_ALG_NULL,
SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
SA_OP_GROUP_BASIC, SA_OPCODE_DECRYPT,
@@ -162,6 +163,11 @@ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
sa = ctx->sa_out;
sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+ /*
+ * SA_OPCODE_ENCRYPT is the same value as SA_OPCODE_DECRYPT.
+ * it's the DIR_(IN|OUT)BOUND that matters
+ */
+ sa->sa_command_0.bf.opcode = SA_OPCODE_ENCRYPT;
return 0;
}
@@ -258,10 +264,10 @@ crypto4xx_ctr_crypt(struct skcipher_request *req, bool encrypt)
* overlow.
*/
if (counter + nblks < counter) {
- struct skcipher_request *subreq = skcipher_request_ctx(req);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher.cipher);
int ret;
- skcipher_request_set_tfm(subreq, ctx->sw_cipher.cipher);
+ skcipher_request_set_sync_tfm(subreq, ctx->sw_cipher.cipher);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -283,14 +289,14 @@ static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx,
{
int rc;
- crypto_skcipher_clear_flags(ctx->sw_cipher.cipher,
+ crypto_sync_skcipher_clear_flags(ctx->sw_cipher.cipher,
CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(ctx->sw_cipher.cipher,
+ crypto_sync_skcipher_set_flags(ctx->sw_cipher.cipher,
crypto_skcipher_get_flags(cipher) & CRYPTO_TFM_REQ_MASK);
- rc = crypto_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen);
+ rc = crypto_sync_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen);
crypto_skcipher_clear_flags(cipher, CRYPTO_TFM_RES_MASK);
crypto_skcipher_set_flags(cipher,
- crypto_skcipher_get_flags(ctx->sw_cipher.cipher) &
+ crypto_sync_skcipher_get_flags(ctx->sw_cipher.cipher) &
CRYPTO_TFM_RES_MASK);
return rc;
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 06574a884715..3934c2523762 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -539,7 +539,7 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
req = skcipher_request_cast(pd_uinfo->async_req);
- if (pd_uinfo->using_sd) {
+ if (pd_uinfo->sa_va->sa_command_0.bf.scatter) {
crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
req->cryptlen, req->dst);
} else {
@@ -593,7 +593,7 @@ static void crypto4xx_aead_done(struct crypto4xx_device *dev,
u32 icv[AES_BLOCK_SIZE];
int err = 0;
- if (pd_uinfo->using_sd) {
+ if (pd_uinfo->sa_va->sa_command_0.bf.scatter) {
crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
pd->pd_ctl_len.bf.pkt_len,
dst);
@@ -714,7 +714,23 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
size_t offset_to_sr_ptr;
u32 gd_idx = 0;
int tmp;
- bool is_busy;
+ bool is_busy, force_sd;
+
+ /*
+ * There's a very subtile/disguised "bug" in the hardware that
+ * gets indirectly mentioned in 18.1.3.5 Encryption/Decryption
+ * of the hardware spec:
+ * *drum roll* the AES/(T)DES OFB and CFB modes are listed as
+ * operation modes for >>> "Block ciphers" <<<.
+ *
+ * To workaround this issue and stop the hardware from causing
+ * "overran dst buffer" on crypttexts that are not a multiple
+ * of 16 (AES_BLOCK_SIZE), we force the driver to use the
+ * scatter buffers.
+ */
+ force_sd = (req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_CFB
+ || req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_OFB)
+ && (datalen % AES_BLOCK_SIZE);
/* figure how many gd are needed */
tmp = sg_nents_for_len(src, assoclen + datalen);
@@ -732,7 +748,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
}
/* figure how many sd are needed */
- if (sg_is_last(dst)) {
+ if (sg_is_last(dst) && force_sd == false) {
num_sd = 0;
} else {
if (datalen > PPC4XX_SD_BUFFER_SIZE) {
@@ -807,9 +823,10 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
pd->sa_len = sa_len;
pd_uinfo = &dev->pdr_uinfo[pd_entry];
- pd_uinfo->async_req = req;
pd_uinfo->num_gd = num_gd;
pd_uinfo->num_sd = num_sd;
+ pd_uinfo->dest_va = dst;
+ pd_uinfo->async_req = req;
if (iv_len)
memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len);
@@ -828,7 +845,6 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
/* get first gd we are going to use */
gd_idx = fst_gd;
pd_uinfo->first_gd = fst_gd;
- pd_uinfo->num_gd = num_gd;
gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
pd->src = gd_dma;
/* enable gather */
@@ -865,17 +881,13 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
* Indicate gather array is not used
*/
pd_uinfo->first_gd = 0xffffffff;
- pd_uinfo->num_gd = 0;
}
- if (sg_is_last(dst)) {
+ if (!num_sd) {
/*
* we know application give us dst a whole piece of memory
* no need to use scatter ring.
*/
- pd_uinfo->using_sd = 0;
pd_uinfo->first_sd = 0xffffffff;
- pd_uinfo->num_sd = 0;
- pd_uinfo->dest_va = dst;
sa->sa_command_0.bf.scatter = 0;
pd->dest = (u32)dma_map_page(dev->core_dev->device,
sg_page(dst), dst->offset,
@@ -888,10 +900,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
u32 sd_idx = fst_sd;
nbytes = datalen;
sa->sa_command_0.bf.scatter = 1;
- pd_uinfo->using_sd = 1;
- pd_uinfo->dest_va = dst;
pd_uinfo->first_sd = fst_sd;
- pd_uinfo->num_sd = num_sd;
sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
pd->dest = sd_dma;
/* setup scatter descriptor */
@@ -954,15 +963,10 @@ static int crypto4xx_sk_init(struct crypto_skcipher *sk)
if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
ctx->sw_cipher.cipher =
- crypto_alloc_skcipher(alg->base.cra_name, 0,
- CRYPTO_ALG_NEED_FALLBACK |
- CRYPTO_ALG_ASYNC);
+ crypto_alloc_sync_skcipher(alg->base.cra_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->sw_cipher.cipher))
return PTR_ERR(ctx->sw_cipher.cipher);
-
- crypto_skcipher_set_reqsize(sk,
- sizeof(struct skcipher_request) + 32 +
- crypto_skcipher_reqsize(ctx->sw_cipher.cipher));
}
amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher);
@@ -981,7 +985,7 @@ static void crypto4xx_sk_exit(struct crypto_skcipher *sk)
crypto4xx_common_exit(ctx);
if (ctx->sw_cipher.cipher)
- crypto_free_skcipher(ctx->sw_cipher.cipher);
+ crypto_free_sync_skcipher(ctx->sw_cipher.cipher);
}
static int crypto4xx_aead_init(struct crypto_aead *tfm)
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index 18df695ca6b1..c624f8cd3d2e 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -64,7 +64,6 @@ union shadow_sa_buf {
struct pd_uinfo {
struct crypto4xx_device *dev;
u32 state;
- u32 using_sd;
u32 first_gd; /* first gather discriptor
used by this packet */
u32 num_gd; /* number of gather discriptor
@@ -131,7 +130,7 @@ struct crypto4xx_ctx {
__le32 iv_nonce;
u32 sa_len;
union {
- struct crypto_skcipher *cipher;
+ struct crypto_sync_skcipher *cipher;
struct crypto_aead *aead;
} sw_cipher;
};
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 65bf1a299562..fa76620281e8 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -800,20 +800,14 @@ static int atmel_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int keylen)
{
struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- const char *alg_name;
-
- alg_name = crypto_tfm_alg_name(crypto_ablkcipher_tfm(tfm));
+ u32 flags;
+ int err;
- /*
- * HW bug in cfb 3-keys mode.
- */
- if (!ctx->dd->caps.has_cfb_3keys && strstr(alg_name, "cfb")
- && (keylen != 2*DES_KEY_SIZE)) {
- crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- } else if ((keylen != 2*DES_KEY_SIZE) && (keylen != 3*DES_KEY_SIZE)) {
- crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
+ flags = crypto_ablkcipher_get_flags(tfm);
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(tfm, flags);
+ return err;
}
memcpy(ctx->key, key, keylen);
@@ -1060,7 +1054,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
.cra_u.ablkcipher = {
- .min_keysize = 2 * DES_KEY_SIZE,
+ .min_keysize = 3 * DES_KEY_SIZE,
.max_keysize = 3 * DES_KEY_SIZE,
.setkey = atmel_tdes_setkey,
.encrypt = atmel_tdes_ecb_encrypt,
@@ -1079,7 +1073,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
.cra_u.ablkcipher = {
- .min_keysize = 2*DES_KEY_SIZE,
+ .min_keysize = 3*DES_KEY_SIZE,
.max_keysize = 3*DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
.setkey = atmel_tdes_setkey,
@@ -1088,86 +1082,6 @@ static struct crypto_alg tdes_algs[] = {
}
},
{
- .cra_name = "cfb(des3_ede)",
- .cra_driver_name = "atmel-cfb-tdes",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = 2*DES_KEY_SIZE,
- .max_keysize = 2*DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_tdes_setkey,
- .encrypt = atmel_tdes_cfb_encrypt,
- .decrypt = atmel_tdes_cfb_decrypt,
- }
-},
-{
- .cra_name = "cfb8(des3_ede)",
- .cra_driver_name = "atmel-cfb8-tdes",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CFB8_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = 2*DES_KEY_SIZE,
- .max_keysize = 2*DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_tdes_setkey,
- .encrypt = atmel_tdes_cfb8_encrypt,
- .decrypt = atmel_tdes_cfb8_decrypt,
- }
-},
-{
- .cra_name = "cfb16(des3_ede)",
- .cra_driver_name = "atmel-cfb16-tdes",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CFB16_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x1,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = 2*DES_KEY_SIZE,
- .max_keysize = 2*DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_tdes_setkey,
- .encrypt = atmel_tdes_cfb16_encrypt,
- .decrypt = atmel_tdes_cfb16_decrypt,
- }
-},
-{
- .cra_name = "cfb32(des3_ede)",
- .cra_driver_name = "atmel-cfb32-tdes",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CFB32_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = 2*DES_KEY_SIZE,
- .max_keysize = 2*DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_tdes_setkey,
- .encrypt = atmel_tdes_cfb32_encrypt,
- .decrypt = atmel_tdes_cfb32_decrypt,
- }
-},
-{
.cra_name = "ofb(des3_ede)",
.cra_driver_name = "atmel-ofb-tdes",
.cra_priority = 100,
@@ -1179,7 +1093,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
.cra_u.ablkcipher = {
- .min_keysize = 2*DES_KEY_SIZE,
+ .min_keysize = 3*DES_KEY_SIZE,
.max_keysize = 3*DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
.setkey = atmel_tdes_setkey,
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 57e5dca3253f..d2fb72811442 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -2247,8 +2247,6 @@ artpec6_crypto_hash_set_key(struct crypto_ahash *tfm,
SHASH_DESC_ON_STACK(hdesc, tfm_ctx->child_hash);
hdesc->tfm = tfm_ctx->child_hash;
- hdesc->flags = crypto_ahash_get_flags(tfm) &
- CRYPTO_TFM_REQ_MAY_SLEEP;
tfm_ctx->hmac_key_length = blocksize;
ret = crypto_shash_digest(hdesc, key, keylen,
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 28f592f7e1b7..25f8d3913ceb 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -1840,13 +1840,14 @@ static int threedes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
if (keylen == (DES_KEY_SIZE * 3)) {
- const u32 *K = (const u32 *)key;
- u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
+ u32 flags;
+ int ret;
- if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
- !((K[2] ^ K[4]) | (K[3] ^ K[5]))) {
+ flags = crypto_ablkcipher_get_flags(cipher);
+ ret = __des3_verify_key(&flags, key);
+ if (unlikely(ret)) {
crypto_ablkcipher_set_flags(cipher, flags);
- return -EINVAL;
+ return ret;
}
ctx->cipher_type = CIPHER_TYPE_3DES;
@@ -2139,7 +2140,6 @@ static int ahash_init(struct ahash_request *req)
goto err_hash;
}
ctx->shash->tfm = hash;
- ctx->shash->flags = 0;
/* Set the key using data we already have from setkey */
if (ctx->authkeylen > 0) {
@@ -2885,13 +2885,13 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
break;
case CIPHER_ALG_3DES:
if (ctx->enckeylen == (DES_KEY_SIZE * 3)) {
- const u32 *K = (const u32 *)keys.enckey;
- u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
+ u32 flags;
- if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
- !((K[2] ^ K[4]) | (K[3] ^ K[5]))) {
+ flags = crypto_aead_get_flags(cipher);
+ ret = __des3_verify_key(&flags, keys.enckey);
+ if (unlikely(ret)) {
crypto_aead_set_flags(cipher, flags);
- return -EINVAL;
+ return ret;
}
ctx->cipher_type = CIPHER_TYPE_3DES;
diff --git a/drivers/crypto/bcm/spu.c b/drivers/crypto/bcm/spu.c
index dbb5c03dde49..2baf6d7f2c1d 100644
--- a/drivers/crypto/bcm/spu.c
+++ b/drivers/crypto/bcm/spu.c
@@ -22,9 +22,6 @@
#include "spum.h"
#include "cipher.h"
-/* This array is based on the hash algo type supported in spu.h */
-char *tag_to_hash_idx[] = { "none", "md5", "sha1", "sha224", "sha256" };
-
char *hash_alg_name[] = { "None", "md5", "sha1", "sha224", "sha256", "aes",
"sha384", "sha512", "sha3_224", "sha3_256", "sha3_384", "sha3_512" };
diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c
index d8cda5fb75ad..91ec56399d84 100644
--- a/drivers/crypto/bcm/util.c
+++ b/drivers/crypto/bcm/util.c
@@ -242,7 +242,6 @@ int do_shash(unsigned char *name, unsigned char *result,
goto do_shash_err;
}
sdesc->shash.tfm = hash;
- sdesc->shash.flags = 0x0;
if (key_len > 0) {
rc = crypto_shash_setkey(hash, key, key_len);
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 9eac5099098e..3e23d4b2cce2 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -638,6 +638,39 @@ badkey:
return -EINVAL;
}
+static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_authenc_keys keys;
+ u32 flags;
+ int err;
+
+ err = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (unlikely(err))
+ goto badkey;
+
+ err = -EINVAL;
+ if (keys.enckeylen != DES3_EDE_KEY_SIZE)
+ goto badkey;
+
+ flags = crypto_aead_get_flags(aead);
+ err = __des3_verify_key(&flags, keys.enckey);
+ if (unlikely(err)) {
+ crypto_aead_set_flags(aead, flags);
+ goto out;
+ }
+
+ err = aead_setkey(aead, key, keylen);
+
+out:
+ memzero_explicit(&keys, sizeof(keys));
+ return err;
+
+badkey:
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ goto out;
+}
+
static int gcm_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
@@ -2457,7 +2490,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2479,7 +2512,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2502,7 +2535,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2525,7 +2558,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2548,7 +2581,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2571,7 +2604,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2594,7 +2627,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2617,7 +2650,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2640,7 +2673,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2663,7 +2696,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2686,7 +2719,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2709,7 +2742,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -3455,13 +3488,12 @@ static int __init caam_algapi_init(void)
{
struct device_node *dev_node;
struct platform_device *pdev;
- struct device *ctrldev;
struct caam_drv_private *priv;
int i = 0, err = 0;
u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
u32 arc4_inst;
unsigned int md_limit = SHA512_DIGEST_SIZE;
- bool registered = false;
+ bool registered = false, gcm_support;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
if (!dev_node) {
@@ -3476,16 +3508,17 @@ static int __init caam_algapi_init(void)
return -ENODEV;
}
- ctrldev = &pdev->dev;
- priv = dev_get_drvdata(ctrldev);
+ priv = dev_get_drvdata(&pdev->dev);
of_node_put(dev_node);
/*
* If priv is NULL, it's probably because the caam driver wasn't
* properly initialized (e.g. RNG4 init failed). Thus, bail out here.
*/
- if (!priv)
- return -ENODEV;
+ if (!priv) {
+ err = -ENODEV;
+ goto out_put_dev;
+ }
/*
@@ -3493,7 +3526,7 @@ static int __init caam_algapi_init(void)
* First, detect presence and attributes of DES, AES, and MD blocks.
*/
if (priv->era < 10) {
- u32 cha_vid, cha_inst;
+ u32 cha_vid, cha_inst, aes_rn;
cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
@@ -3508,6 +3541,10 @@ static int __init caam_algapi_init(void)
CHA_ID_LS_ARC4_SHIFT;
ccha_inst = 0;
ptha_inst = 0;
+
+ aes_rn = rd_reg32(&priv->ctrl->perfmon.cha_rev_ls) &
+ CHA_ID_LS_AES_MASK;
+ gcm_support = !(aes_vid == CHA_VER_VID_AES_LP && aes_rn < 8);
} else {
u32 aesa, mdha;
@@ -3523,6 +3560,8 @@ static int __init caam_algapi_init(void)
ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
arc4_inst = rd_reg32(&priv->ctrl->vreg.afha) & CHA_VER_NUM_MASK;
+
+ gcm_support = aesa & CHA_VER_MISC_AES_GCM;
}
/* If MD is present, limit digest size based on LP256 */
@@ -3595,11 +3634,9 @@ static int __init caam_algapi_init(void)
if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && !ptha_inst)
continue;
- /*
- * Check support for AES algorithms not available
- * on LP devices.
- */
- if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM)
+ /* Skip GCM algorithms if not supported by device */
+ if (c1_alg_sel == OP_ALG_ALGSEL_AES &&
+ alg_aai == OP_ALG_AAI_GCM && !gcm_support)
continue;
/*
@@ -3626,6 +3663,8 @@ static int __init caam_algapi_init(void)
if (registered)
pr_info("caam algorithms registered in /proc/crypto\n");
+out_put_dev:
+ put_device(&pdev->dev);
return err;
}
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index a15ce9213310..70af211d2d01 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -292,6 +292,39 @@ badkey:
return -EINVAL;
}
+static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_authenc_keys keys;
+ u32 flags;
+ int err;
+
+ err = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (unlikely(err))
+ goto badkey;
+
+ err = -EINVAL;
+ if (keys.enckeylen != DES3_EDE_KEY_SIZE)
+ goto badkey;
+
+ flags = crypto_aead_get_flags(aead);
+ err = __des3_verify_key(&flags, keys.enckey);
+ if (unlikely(err)) {
+ crypto_aead_set_flags(aead, flags);
+ goto out;
+ }
+
+ err = aead_setkey(aead, key, keylen);
+
+out:
+ memzero_explicit(&keys, sizeof(keys));
+ return err;
+
+badkey:
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ goto out;
+}
+
static int gcm_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
@@ -667,6 +700,13 @@ badkey:
return -EINVAL;
}
+static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ return unlikely(des3_verify_key(skcipher, key)) ?:
+ skcipher_setkey(skcipher, key, keylen);
+}
+
static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen)
{
@@ -1382,7 +1422,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "cbc-3des-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = skcipher_setkey,
+ .setkey = des3_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
@@ -1798,7 +1838,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1820,7 +1860,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1843,7 +1883,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1866,7 +1906,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1889,7 +1929,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1912,7 +1952,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1935,7 +1975,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1958,7 +1998,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1981,7 +2021,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2004,7 +2044,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2027,7 +2067,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2050,7 +2090,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2492,12 +2532,15 @@ static int __init caam_qi_algapi_init(void)
* If priv is NULL, it's probably because the caam driver wasn't
* properly initialized (e.g. RNG4 init failed). Thus, bail out here.
*/
- if (!priv || !priv->qi_present)
- return -ENODEV;
+ if (!priv || !priv->qi_present) {
+ err = -ENODEV;
+ goto out_put_dev;
+ }
if (caam_dpaa2) {
dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
- return -ENODEV;
+ err = -ENODEV;
+ goto out_put_dev;
}
/*
@@ -2610,6 +2653,8 @@ static int __init caam_qi_algapi_init(void)
if (registered)
dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
+out_put_dev:
+ put_device(ctrldev);
return err;
}
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index c2c1abc68f81..33a4df6b81de 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -323,6 +323,39 @@ badkey:
return -EINVAL;
}
+static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_authenc_keys keys;
+ u32 flags;
+ int err;
+
+ err = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (unlikely(err))
+ goto badkey;
+
+ err = -EINVAL;
+ if (keys.enckeylen != DES3_EDE_KEY_SIZE)
+ goto badkey;
+
+ flags = crypto_aead_get_flags(aead);
+ err = __des3_verify_key(&flags, keys.enckey);
+ if (unlikely(err)) {
+ crypto_aead_set_flags(aead, flags);
+ goto out;
+ }
+
+ err = aead_setkey(aead, key, keylen);
+
+out:
+ memzero_explicit(&keys, sizeof(keys));
+ return err;
+
+badkey:
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ goto out;
+}
+
static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
bool encrypt)
{
@@ -938,6 +971,13 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
return 0;
}
+static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ return unlikely(des3_verify_key(skcipher, key)) ?:
+ skcipher_setkey(skcipher, key, keylen);
+}
+
static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen)
{
@@ -1484,7 +1524,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "cbc-3des-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = skcipher_setkey,
+ .setkey = des3_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
@@ -1916,7 +1956,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1938,7 +1978,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1961,7 +2001,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1984,7 +2024,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2007,7 +2047,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2030,7 +2070,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2053,7 +2093,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2076,7 +2116,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2099,7 +2139,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2122,7 +2162,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2145,7 +2185,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2168,7 +2208,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2854,6 +2894,7 @@ struct caam_hash_state {
struct caam_request caam_req;
dma_addr_t buf_dma;
dma_addr_t ctx_dma;
+ int ctx_dma_len;
u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
int buflen_0;
u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
@@ -2927,6 +2968,7 @@ static inline int ctx_map_to_qm_sg(struct device *dev,
struct caam_hash_state *state, int ctx_len,
struct dpaa2_sg_entry *qm_sg, u32 flag)
{
+ state->ctx_dma_len = ctx_len;
state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
if (dma_mapping_error(dev, state->ctx_dma)) {
dev_err(dev, "unable to map ctx\n");
@@ -3018,13 +3060,13 @@ static void split_key_sh_done(void *cbk_ctx, u32 err)
}
/* Digest hash size if it is too large */
-static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
- u32 *keylen, u8 *key_out, u32 digestsize)
+static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
+ u32 digestsize)
{
struct caam_request *req_ctx;
u32 *desc;
struct split_key_sh_result result;
- dma_addr_t src_dma, dst_dma;
+ dma_addr_t key_dma;
struct caam_flc *flc;
dma_addr_t flc_dma;
int ret = -ENOMEM;
@@ -3041,17 +3083,10 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
if (!flc)
goto err_flc;
- src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen,
- DMA_TO_DEVICE);
- if (dma_mapping_error(ctx->dev, src_dma)) {
- dev_err(ctx->dev, "unable to map key input memory\n");
- goto err_src_dma;
- }
- dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(ctx->dev, dst_dma)) {
- dev_err(ctx->dev, "unable to map key output memory\n");
- goto err_dst_dma;
+ key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(ctx->dev, key_dma)) {
+ dev_err(ctx->dev, "unable to map key memory\n");
+ goto err_key_dma;
}
desc = flc->sh_desc;
@@ -3076,14 +3111,14 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
dpaa2_fl_set_final(in_fle, true);
dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(in_fle, src_dma);
+ dpaa2_fl_set_addr(in_fle, key_dma);
dpaa2_fl_set_len(in_fle, *keylen);
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(out_fle, dst_dma);
+ dpaa2_fl_set_addr(out_fle, key_dma);
dpaa2_fl_set_len(out_fle, digestsize);
print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
+ DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1);
@@ -3103,17 +3138,15 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
wait_for_completion(&result.completion);
ret = result.err;
print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key_in,
+ DUMP_PREFIX_ADDRESS, 16, 4, key,
digestsize, 1);
}
dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
DMA_TO_DEVICE);
err_flc_dma:
- dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE);
-err_dst_dma:
- dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE);
-err_src_dma:
+ dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL);
+err_key_dma:
kfree(flc);
err_flc:
kfree(req_ctx);
@@ -3135,12 +3168,10 @@ static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
if (keylen > blocksize) {
- hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key),
- GFP_KERNEL | GFP_DMA);
+ hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
if (!hashed_key)
return -ENOMEM;
- ret = hash_digest_key(ctx, key, &keylen, hashed_key,
- digestsize);
+ ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
if (ret)
goto bad_free_key;
key = hashed_key;
@@ -3165,14 +3196,12 @@ bad_free_key:
}
static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
- struct ahash_request *req, int dst_len)
+ struct ahash_request *req)
{
struct caam_hash_state *state = ahash_request_ctx(req);
if (edesc->src_nents)
dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
- if (edesc->dst_dma)
- dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
if (edesc->qm_sg_bytes)
dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
@@ -3187,18 +3216,15 @@ static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
static inline void ahash_unmap_ctx(struct device *dev,
struct ahash_edesc *edesc,
- struct ahash_request *req, int dst_len,
- u32 flag)
+ struct ahash_request *req, u32 flag)
{
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
if (state->ctx_dma) {
- dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
+ dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
state->ctx_dma = 0;
}
- ahash_unmap(dev, edesc, req, dst_len);
+ ahash_unmap(dev, edesc, req);
}
static void ahash_done(void *cbk_ctx, u32 status)
@@ -3219,16 +3245,13 @@ static void ahash_done(void *cbk_ctx, u32 status)
ecode = -EIO;
}
- ahash_unmap(ctx->dev, edesc, req, digestsize);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
+ memcpy(req->result, state->caam_ctx, digestsize);
qi_cache_free(edesc);
print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
- if (req->result)
- print_hex_dump_debug("result@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->result,
- digestsize, 1);
req->base.complete(&req->base, ecode);
}
@@ -3250,7 +3273,7 @@ static void ahash_done_bi(void *cbk_ctx, u32 status)
ecode = -EIO;
}
- ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
switch_buf(state);
qi_cache_free(edesc);
@@ -3283,16 +3306,13 @@ static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
ecode = -EIO;
}
- ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
+ memcpy(req->result, state->caam_ctx, digestsize);
qi_cache_free(edesc);
print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
- if (req->result)
- print_hex_dump_debug("result@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->result,
- digestsize, 1);
req->base.complete(&req->base, ecode);
}
@@ -3314,7 +3334,7 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
ecode = -EIO;
}
- ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
switch_buf(state);
qi_cache_free(edesc);
@@ -3452,7 +3472,7 @@ static int ahash_update_ctx(struct ahash_request *req)
return ret;
unmap_ctx:
- ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
qi_cache_free(edesc);
return ret;
}
@@ -3484,7 +3504,7 @@ static int ahash_final_ctx(struct ahash_request *req)
sg_table = &edesc->sgt[0];
ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
- DMA_TO_DEVICE);
+ DMA_BIDIRECTIONAL);
if (ret)
goto unmap_ctx;
@@ -3503,22 +3523,13 @@ static int ahash_final_ctx(struct ahash_request *req)
}
edesc->qm_sg_bytes = qm_sg_bytes;
- edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
- dev_err(ctx->dev, "unable to map dst\n");
- edesc->dst_dma = 0;
- ret = -ENOMEM;
- goto unmap_ctx;
- }
-
memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
dpaa2_fl_set_final(in_fle, true);
dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
dpaa2_fl_set_len(out_fle, digestsize);
req_ctx->flc = &ctx->flc[FINALIZE];
@@ -3533,7 +3544,7 @@ static int ahash_final_ctx(struct ahash_request *req)
return ret;
unmap_ctx:
- ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
qi_cache_free(edesc);
return ret;
}
@@ -3586,7 +3597,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
sg_table = &edesc->sgt[0];
ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
- DMA_TO_DEVICE);
+ DMA_BIDIRECTIONAL);
if (ret)
goto unmap_ctx;
@@ -3605,22 +3616,13 @@ static int ahash_finup_ctx(struct ahash_request *req)
}
edesc->qm_sg_bytes = qm_sg_bytes;
- edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
- dev_err(ctx->dev, "unable to map dst\n");
- edesc->dst_dma = 0;
- ret = -ENOMEM;
- goto unmap_ctx;
- }
-
memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
dpaa2_fl_set_final(in_fle, true);
dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
dpaa2_fl_set_len(out_fle, digestsize);
req_ctx->flc = &ctx->flc[FINALIZE];
@@ -3635,7 +3637,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
return ret;
unmap_ctx:
- ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
qi_cache_free(edesc);
return ret;
}
@@ -3704,18 +3706,19 @@ static int ahash_digest(struct ahash_request *req)
dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
}
- edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+ state->ctx_dma_len = digestsize;
+ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
DMA_FROM_DEVICE);
- if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
- dev_err(ctx->dev, "unable to map dst\n");
- edesc->dst_dma = 0;
+ if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
+ dev_err(ctx->dev, "unable to map ctx\n");
+ state->ctx_dma = 0;
goto unmap;
}
dpaa2_fl_set_final(in_fle, true);
dpaa2_fl_set_len(in_fle, req->nbytes);
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
dpaa2_fl_set_len(out_fle, digestsize);
req_ctx->flc = &ctx->flc[DIGEST];
@@ -3729,7 +3732,7 @@ static int ahash_digest(struct ahash_request *req)
return ret;
unmap:
- ahash_unmap(ctx->dev, edesc, req, digestsize);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
qi_cache_free(edesc);
return ret;
}
@@ -3755,27 +3758,39 @@ static int ahash_final_no_ctx(struct ahash_request *req)
if (!edesc)
return ret;
- state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE);
- if (dma_mapping_error(ctx->dev, state->buf_dma)) {
- dev_err(ctx->dev, "unable to map src\n");
- goto unmap;
+ if (buflen) {
+ state->buf_dma = dma_map_single(ctx->dev, buf, buflen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, state->buf_dma)) {
+ dev_err(ctx->dev, "unable to map src\n");
+ goto unmap;
+ }
}
- edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+ state->ctx_dma_len = digestsize;
+ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
DMA_FROM_DEVICE);
- if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
- dev_err(ctx->dev, "unable to map dst\n");
- edesc->dst_dma = 0;
+ if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
+ dev_err(ctx->dev, "unable to map ctx\n");
+ state->ctx_dma = 0;
goto unmap;
}
memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
dpaa2_fl_set_final(in_fle, true);
- dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(in_fle, state->buf_dma);
- dpaa2_fl_set_len(in_fle, buflen);
+ /*
+ * crypto engine requires the input entry to be present when
+ * "frame list" FD is used.
+ * Since engine does not support FMT=2'b11 (unused entry type), leaving
+ * in_fle zeroized (except for "Final" flag) is the best option.
+ */
+ if (buflen) {
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(in_fle, state->buf_dma);
+ dpaa2_fl_set_len(in_fle, buflen);
+ }
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
dpaa2_fl_set_len(out_fle, digestsize);
req_ctx->flc = &ctx->flc[DIGEST];
@@ -3790,7 +3805,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
return ret;
unmap:
- ahash_unmap(ctx->dev, edesc, req, digestsize);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
qi_cache_free(edesc);
return ret;
}
@@ -3870,6 +3885,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
}
edesc->qm_sg_bytes = qm_sg_bytes;
+ state->ctx_dma_len = ctx->ctx_len;
state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
ctx->ctx_len, DMA_FROM_DEVICE);
if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
@@ -3918,7 +3934,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
return ret;
unmap_ctx:
- ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
qi_cache_free(edesc);
return ret;
}
@@ -3983,11 +3999,12 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
}
edesc->qm_sg_bytes = qm_sg_bytes;
- edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+ state->ctx_dma_len = digestsize;
+ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
DMA_FROM_DEVICE);
- if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
- dev_err(ctx->dev, "unable to map dst\n");
- edesc->dst_dma = 0;
+ if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
+ dev_err(ctx->dev, "unable to map ctx\n");
+ state->ctx_dma = 0;
ret = -ENOMEM;
goto unmap;
}
@@ -3998,7 +4015,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
dpaa2_fl_set_len(out_fle, digestsize);
req_ctx->flc = &ctx->flc[DIGEST];
@@ -4013,7 +4030,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
return ret;
unmap:
- ahash_unmap(ctx->dev, edesc, req, digestsize);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
qi_cache_free(edesc);
return -ENOMEM;
}
@@ -4100,6 +4117,7 @@ static int ahash_update_first(struct ahash_request *req)
scatterwalk_map_and_copy(next_buf, req->src, to_hash,
*next_buflen, 0);
+ state->ctx_dma_len = ctx->ctx_len;
state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
ctx->ctx_len, DMA_FROM_DEVICE);
if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
@@ -4143,7 +4161,7 @@ static int ahash_update_first(struct ahash_request *req)
return ret;
unmap_ctx:
- ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
qi_cache_free(edesc);
return ret;
}
@@ -4162,6 +4180,7 @@ static int ahash_init(struct ahash_request *req)
state->final = ahash_final_no_ctx;
state->ctx_dma = 0;
+ state->ctx_dma_len = 0;
state->current_buf = 0;
state->buf_dma = 0;
state->buflen_0 = 0;
diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
index 20890780fb82..be5085451053 100644
--- a/drivers/crypto/caam/caamalg_qi2.h
+++ b/drivers/crypto/caam/caamalg_qi2.h
@@ -162,14 +162,12 @@ struct skcipher_edesc {
/*
* ahash_edesc - s/w-extended ahash descriptor
- * @dst_dma: I/O virtual address of req->result
* @qm_sg_dma: I/O virtual address of h/w link table
* @src_nents: number of segments in input scatterlist
* @qm_sg_bytes: length of dma mapped qm_sg space
* @sgt: pointer to h/w link table
*/
struct ahash_edesc {
- dma_addr_t dst_dma;
dma_addr_t qm_sg_dma;
int src_nents;
int qm_sg_bytes;
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index d7483e4d0ce2..7205d9f4029e 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -865,19 +865,18 @@ static int ahash_update_ctx(struct ahash_request *req)
if (ret)
goto unmap_ctx;
- if (mapped_nents) {
+ if (mapped_nents)
sg_to_sec4_sg_last(req->src, mapped_nents,
edesc->sec4_sg + sec4_sg_src_index,
0);
- if (*next_buflen)
- scatterwalk_map_and_copy(next_buf, req->src,
- to_hash - *buflen,
- *next_buflen, 0);
- } else {
+ else
sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
1);
- }
+ if (*next_buflen)
+ scatterwalk_map_and_copy(next_buf, req->src,
+ to_hash - *buflen,
+ *next_buflen, 0);
desc = edesc->hw_desc;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
@@ -1993,7 +1992,6 @@ static int __init caam_algapi_hash_init(void)
{
struct device_node *dev_node;
struct platform_device *pdev;
- struct device *ctrldev;
int i = 0, err = 0;
struct caam_drv_private *priv;
unsigned int md_limit = SHA512_DIGEST_SIZE;
@@ -2012,16 +2010,17 @@ static int __init caam_algapi_hash_init(void)
return -ENODEV;
}
- ctrldev = &pdev->dev;
- priv = dev_get_drvdata(ctrldev);
+ priv = dev_get_drvdata(&pdev->dev);
of_node_put(dev_node);
/*
* If priv is NULL, it's probably because the caam driver wasn't
* properly initialized (e.g. RNG4 init failed). Thus, bail out here.
*/
- if (!priv)
- return -ENODEV;
+ if (!priv) {
+ err = -ENODEV;
+ goto out_put_dev;
+ }
/*
* Register crypto algorithms the device supports. First, identify
@@ -2043,8 +2042,10 @@ static int __init caam_algapi_hash_init(void)
* Skip registration of any hashing algorithms if MD block
* is not present.
*/
- if (!md_inst)
- return -ENODEV;
+ if (!md_inst) {
+ err = -ENODEV;
+ goto out_put_dev;
+ }
/* Limit digest size based on LP256 */
if (md_vid == CHA_VER_VID_MD_LP256)
@@ -2101,6 +2102,8 @@ static int __init caam_algapi_hash_init(void)
list_add_tail(&t_alg->entry, &hash_list);
}
+out_put_dev:
+ put_device(&pdev->dev);
return err;
}
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 77ab28a2811a..fe24485274e1 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -994,8 +994,6 @@ static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
static struct akcipher_alg caam_rsa = {
.encrypt = caam_rsa_enc,
.decrypt = caam_rsa_dec,
- .sign = caam_rsa_dec,
- .verify = caam_rsa_enc,
.set_pub_key = caam_rsa_set_pub_key,
.set_priv_key = caam_rsa_set_priv_key,
.max_size = caam_rsa_max_size,
@@ -1042,8 +1040,10 @@ static int __init caam_pkc_init(void)
* If priv is NULL, it's probably because the caam driver wasn't
* properly initialized (e.g. RNG4 init failed). Thus, bail out here.
*/
- if (!priv)
- return -ENODEV;
+ if (!priv) {
+ err = -ENODEV;
+ goto out_put_dev;
+ }
/* Determine public key hardware accelerator presence. */
if (priv->era < 10)
@@ -1053,8 +1053,10 @@ static int __init caam_pkc_init(void)
pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK;
/* Do not register algorithms if PKHA is not present. */
- if (!pk_inst)
- return -ENODEV;
+ if (!pk_inst) {
+ err = -ENODEV;
+ goto out_put_dev;
+ }
err = crypto_register_akcipher(&caam_rsa);
if (err)
@@ -1063,6 +1065,8 @@ static int __init caam_pkc_init(void)
else
dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
+out_put_dev:
+ put_device(ctrldev);
return err;
}
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index a387c8d49a62..95eb5402c59f 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -308,7 +308,6 @@ static int __init caam_rng_init(void)
struct device *dev;
struct device_node *dev_node;
struct platform_device *pdev;
- struct device *ctrldev;
struct caam_drv_private *priv;
u32 rng_inst;
int err;
@@ -326,16 +325,17 @@ static int __init caam_rng_init(void)
return -ENODEV;
}
- ctrldev = &pdev->dev;
- priv = dev_get_drvdata(ctrldev);
+ priv = dev_get_drvdata(&pdev->dev);
of_node_put(dev_node);
/*
* If priv is NULL, it's probably because the caam driver wasn't
* properly initialized (e.g. RNG4 init failed). Thus, bail out here.
*/
- if (!priv)
- return -ENODEV;
+ if (!priv) {
+ err = -ENODEV;
+ goto out_put_dev;
+ }
/* Check for an instantiated RNG before registration */
if (priv->era < 10)
@@ -344,13 +344,16 @@ static int __init caam_rng_init(void)
else
rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK;
- if (!rng_inst)
- return -ENODEV;
+ if (!rng_inst) {
+ err = -ENODEV;
+ goto out_put_dev;
+ }
dev = caam_jr_alloc();
if (IS_ERR(dev)) {
pr_err("Job Ring Device allocation for transform failed\n");
- return PTR_ERR(dev);
+ err = PTR_ERR(dev);
+ goto out_put_dev;
}
rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
if (!rng_ctx) {
@@ -361,6 +364,7 @@ static int __init caam_rng_init(void)
if (err)
goto free_rng_ctx;
+ put_device(&pdev->dev);
dev_info(dev, "registering rng-caam\n");
return hwrng_register(&caam_rng);
@@ -368,6 +372,8 @@ free_rng_ctx:
kfree(rng_ctx);
free_caam_alloc:
caam_jr_free(dev);
+out_put_dev:
+ put_device(&pdev->dev);
return err;
}
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 858bdc9ab4a3..e2ba3d202da5 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -468,6 +468,24 @@ static int caam_get_era(struct caam_ctrl __iomem *ctrl)
return caam_get_era_from_hw(ctrl);
}
+/*
+ * ERRATA: imx6 devices (imx6D, imx6Q, imx6DL, imx6S, imx6DP and imx6DQ)
+ * have an issue wherein AXI bus transactions may not occur in the correct
+ * order. This isn't a problem running single descriptors, but can be if
+ * running multiple concurrent descriptors. Reworking the driver to throttle
+ * to single requests is impractical, thus the workaround is to limit the AXI
+ * pipeline to a depth of 1 (from it's default of 4) to preclude this situation
+ * from occurring.
+ */
+static void handle_imx6_err005766(u32 *mcr)
+{
+ if (of_machine_is_compatible("fsl,imx6q") ||
+ of_machine_is_compatible("fsl,imx6dl") ||
+ of_machine_is_compatible("fsl,imx6qp"))
+ clrsetbits_32(mcr, MCFGR_AXIPIPE_MASK,
+ 1 << MCFGR_AXIPIPE_SHIFT);
+}
+
static const struct of_device_id caam_match[] = {
{
.compatible = "fsl,sec-v4.0",
@@ -640,6 +658,8 @@ static int caam_probe(struct platform_device *pdev)
(sizeof(dma_addr_t) == sizeof(u64) ?
MCFGR_LONG_PTR : 0));
+ handle_imx6_err005766(&ctrl->mcr);
+
/*
* Read the Compile Time paramters and SCFGR to determine
* if Virtualization is enabled for this platform
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 21a70fd32f5d..a4129a35a330 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -138,7 +138,7 @@ static const struct {
{ 0x46, "Annotation length exceeds offset (reuse mode)"},
{ 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"},
{ 0x49, "Data offset correction exceeds input frame data length (reuse mode)"},
- { 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"},
+ { 0x4B, "Annotation output enabled but ASA cannot be expanded (frame list)"},
{ 0x51, "Unsupported IF reuse mode"},
{ 0x52, "Unsupported FL use mode"},
{ 0x53, "Unsupported RJD use mode"},
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 5869ad58d497..3392615dc91b 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -49,13 +49,11 @@ struct caam_drv_private_jr {
atomic_t tfm_count ____cacheline_aligned;
/* Job ring info */
- int ringsize; /* Size of rings (assume input = output) */
struct caam_jrentry_info *entinfo; /* Alloc'ed 1 per ring entry */
spinlock_t inplock ____cacheline_aligned; /* Input ring index lock */
- int inp_ring_write_index; /* Input index "tail" */
+ u32 inpring_avail; /* Number of free entries in input ring */
int head; /* entinfo (s/w ring) head index */
dma_addr_t *inpring; /* Base of input ring, alloc DMA-safe */
- spinlock_t outlock ____cacheline_aligned; /* Output ring index lock */
int out_ring_read_index; /* Output index "tail" */
int tail; /* entinfo (s/w ring) tail index */
struct jr_outentry *outring; /* Base of output ring, DMA-safe */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index d50085a03597..044a69b526f7 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -170,13 +170,13 @@ static void caam_jr_dequeue(unsigned long devarg)
void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
u32 *userdesc, userstatus;
void *userarg;
+ u32 outring_used = 0;
- while (rd_reg32(&jrp->rregs->outring_used)) {
+ while (outring_used ||
+ (outring_used = rd_reg32(&jrp->rregs->outring_used))) {
head = READ_ONCE(jrp->head);
- spin_lock(&jrp->outlock);
-
sw_idx = tail = jrp->tail;
hw_idx = jrp->out_ring_read_index;
@@ -199,7 +199,7 @@ static void caam_jr_dequeue(unsigned long devarg)
/* mark completed, avoid matching on a recycled desc addr */
jrp->entinfo[sw_idx].desc_addr_dma = 0;
- /* Stash callback params for use outside of lock */
+ /* Stash callback params */
usercall = jrp->entinfo[sw_idx].callbk;
userarg = jrp->entinfo[sw_idx].cbkarg;
userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
@@ -213,7 +213,7 @@ static void caam_jr_dequeue(unsigned long devarg)
mb();
/* set done */
- wr_reg32(&jrp->rregs->outring_rmvd, 1);
+ wr_reg32_relaxed(&jrp->rregs->outring_rmvd, 1);
jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) &
(JOBR_DEPTH - 1);
@@ -232,10 +232,9 @@ static void caam_jr_dequeue(unsigned long devarg)
jrp->tail = tail;
}
- spin_unlock(&jrp->outlock);
-
/* Finally, execute user's callback */
usercall(dev, userdesc, userstatus, userarg);
+ outring_used--;
}
/* reenable / unmask IRQs */
@@ -345,7 +344,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
head = jrp->head;
tail = READ_ONCE(jrp->tail);
- if (!rd_reg32(&jrp->rregs->inpring_avail) ||
+ if (!jrp->inpring_avail ||
CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
spin_unlock_bh(&jrp->inplock);
dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
@@ -359,7 +358,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
head_entry->cbkarg = areq;
head_entry->desc_addr_dma = desc_dma;
- jrp->inpring[jrp->inp_ring_write_index] = cpu_to_caam_dma(desc_dma);
+ jrp->inpring[head] = cpu_to_caam_dma(desc_dma);
/*
* Guarantee that the descriptor's DMA address has been written to
@@ -368,18 +367,22 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
*/
smp_wmb();
- jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
- (JOBR_DEPTH - 1);
jrp->head = (head + 1) & (JOBR_DEPTH - 1);
/*
* Ensure that all job information has been written before
- * notifying CAAM that a new job was added to the input ring.
+ * notifying CAAM that a new job was added to the input ring
+ * using a memory barrier. The wr_reg32() uses api iowrite32()
+ * to do the register write. iowrite32() issues a memory barrier
+ * before the write operation.
*/
- wmb();
wr_reg32(&jrp->rregs->inpring_jobadd, 1);
+ jrp->inpring_avail--;
+ if (!jrp->inpring_avail)
+ jrp->inpring_avail = rd_reg32(&jrp->rregs->inpring_avail);
+
spin_unlock_bh(&jrp->inplock);
return 0;
@@ -431,7 +434,6 @@ static int caam_jr_init(struct device *dev)
jrp->entinfo[i].desc_addr_dma = !0;
/* Setup rings */
- jrp->inp_ring_write_index = 0;
jrp->out_ring_read_index = 0;
jrp->head = 0;
jrp->tail = 0;
@@ -441,10 +443,9 @@ static int caam_jr_init(struct device *dev)
wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
- jrp->ringsize = JOBR_DEPTH;
+ jrp->inpring_avail = JOBR_DEPTH;
spin_lock_init(&jrp->inplock);
- spin_lock_init(&jrp->outlock);
/* Select interrupt coalescing parameters */
clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC |
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index 7cb8b1755e57..9f08f84cca59 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -318,7 +318,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
/* Create a new req FQ in parked state */
new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
drv_ctx->context_a, 0);
- if (IS_ERR_OR_NULL(new_fq)) {
+ if (IS_ERR(new_fq)) {
dev_err(qidev, "FQ allocation for shdesc update failed\n");
return PTR_ERR(new_fq);
}
@@ -431,7 +431,7 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
/* Attach request FQ */
drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
QMAN_INITFQ_FLAG_SCHED);
- if (IS_ERR_OR_NULL(drv_ctx->req_fq)) {
+ if (IS_ERR(drv_ctx->req_fq)) {
dev_err(qidev, "create_caam_req_fq failed\n");
dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
kfree(drv_ctx);
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 3cd0822ea819..c1fa1ec701d9 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -96,6 +96,14 @@ cpu_to_caam(16)
cpu_to_caam(32)
cpu_to_caam(64)
+static inline void wr_reg32_relaxed(void __iomem *reg, u32 data)
+{
+ if (caam_little_end)
+ writel_relaxed(data, reg);
+ else
+ writel_relaxed(cpu_to_be32(data), reg);
+}
+
static inline void wr_reg32(void __iomem *reg, u32 data)
{
if (caam_little_end)
@@ -253,6 +261,9 @@ struct version_regs {
#define CHA_VER_VID_SHIFT 24
#define CHA_VER_VID_MASK (0xffull << CHA_VER_VID_SHIFT)
+/* CHA Miscellaneous Information - AESA_MISC specific */
+#define CHA_VER_MISC_AES_GCM BIT(1 + CHA_VER_MISC_SHIFT)
+
/*
* caam_perfmon - Performance Monitor/Secure Memory Status/
* CAAM Global Status/Component Version IDs
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index 600336d169a9..9810ad8ac519 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -10,7 +10,6 @@
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <crypto/authenc.h>
-#include <crypto/cryptd.h>
#include <crypto/crypto_wq.h>
#include <crypto/des.h>
#include <crypto/xts.h>
@@ -327,27 +326,36 @@ static int cvm_cfb_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
static int cvm_cbc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
u32 keylen)
{
+ u32 flags = crypto_ablkcipher_get_flags(cipher);
+ int err;
+
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(cipher, flags);
+ return err;
+ }
+
return cvm_setkey(cipher, key, keylen, DES3_CBC);
}
static int cvm_ecb_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
u32 keylen)
{
+ u32 flags = crypto_ablkcipher_get_flags(cipher);
+ int err;
+
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(cipher, flags);
+ return err;
+ }
+
return cvm_setkey(cipher, key, keylen, DES3_ECB);
}
static int cvm_enc_dec_init(struct crypto_tfm *tfm)
{
- struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
-
- memset(ctx, 0, sizeof(*ctx));
- tfm->crt_ablkcipher.reqsize = sizeof(struct cvm_req_ctx) +
- sizeof(struct ablkcipher_request);
- /* Additional memory for ablkcipher_request is
- * allocated since the cryptd daemon uses
- * this memory for request_ctx information
- */
-
+ tfm->crt_ablkcipher.reqsize = sizeof(struct cvm_req_ctx);
return 0;
}
diff --git a/drivers/crypto/cavium/cpt/cptvf_main.c b/drivers/crypto/cavium/cpt/cptvf_main.c
index 2ca431ed1db8..88a0166f5477 100644
--- a/drivers/crypto/cavium/cpt/cptvf_main.c
+++ b/drivers/crypto/cavium/cpt/cptvf_main.c
@@ -641,7 +641,7 @@ static void cptvf_write_vq_saddr(struct cpt_vf *cptvf, u64 val)
cpt_write_csr64(cptvf->reg_base, CPTX_VQX_SADDR(0, 0), vqx_saddr.u);
}
-void cptvf_device_init(struct cpt_vf *cptvf)
+static void cptvf_device_init(struct cpt_vf *cptvf)
{
u64 base_addr = 0;
diff --git a/drivers/crypto/cavium/cpt/cptvf_mbox.c b/drivers/crypto/cavium/cpt/cptvf_mbox.c
index d5ec3b8a9e61..4f438eceb506 100644
--- a/drivers/crypto/cavium/cpt/cptvf_mbox.c
+++ b/drivers/crypto/cavium/cpt/cptvf_mbox.c
@@ -17,23 +17,6 @@ static void cptvf_send_msg_to_pf(struct cpt_vf *cptvf, struct cpt_mbox *mbx)
mbx->data);
}
-/* ACKs PF's mailbox message
- */
-void cptvf_mbox_send_ack(struct cpt_vf *cptvf, struct cpt_mbox *mbx)
-{
- mbx->msg = CPT_MBOX_MSG_TYPE_ACK;
- cptvf_send_msg_to_pf(cptvf, mbx);
-}
-
-/* NACKs PF's mailbox message that VF is not able to
- * complete the action
- */
-void cptvf_mbox_send_nack(struct cpt_vf *cptvf, struct cpt_mbox *mbx)
-{
- mbx->msg = CPT_MBOX_MSG_TYPE_NACK;
- cptvf_send_msg_to_pf(cptvf, mbx);
-}
-
/* Interrupt handler to handle mailbox messages from VFs */
void cptvf_handle_mbox_intr(struct cpt_vf *cptvf)
{
diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
index ca549c5dc08e..f16f61504241 100644
--- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
+++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
@@ -223,7 +223,7 @@ scatter_gather_clean:
return ret;
}
-int send_cpt_command(struct cpt_vf *cptvf, union cpt_inst_s *cmd,
+static int send_cpt_command(struct cpt_vf *cptvf, union cpt_inst_s *cmd,
u32 qno)
{
struct pci_dev *pdev = cptvf->pdev;
@@ -270,7 +270,7 @@ int send_cpt_command(struct cpt_vf *cptvf, union cpt_inst_s *cmd,
return ret;
}
-void do_request_cleanup(struct cpt_vf *cptvf,
+static void do_request_cleanup(struct cpt_vf *cptvf,
struct cpt_info_buffer *info)
{
int i;
@@ -316,7 +316,7 @@ void do_request_cleanup(struct cpt_vf *cptvf,
kzfree(info);
}
-void do_post_process(struct cpt_vf *cptvf, struct cpt_info_buffer *info)
+static void do_post_process(struct cpt_vf *cptvf, struct cpt_info_buffer *info)
{
struct pci_dev *pdev = cptvf->pdev;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_aead.c b/drivers/crypto/cavium/nitrox/nitrox_aead.c
index 4f43eacd2557..e4841eb2a09f 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_aead.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_aead.c
@@ -18,26 +18,6 @@
#define GCM_AES_SALT_SIZE 4
-/**
- * struct nitrox_crypt_params - Params to set nitrox crypto request.
- * @cryptlen: Encryption/Decryption data length
- * @authlen: Assoc data length + Cryptlen
- * @srclen: Input buffer length
- * @dstlen: Output buffer length
- * @iv: IV data
- * @ivsize: IV data length
- * @ctrl_arg: Identifies the request type (ENCRYPT/DECRYPT)
- */
-struct nitrox_crypt_params {
- unsigned int cryptlen;
- unsigned int authlen;
- unsigned int srclen;
- unsigned int dstlen;
- u8 *iv;
- int ivsize;
- u8 ctrl_arg;
-};
-
union gph_p3 {
struct {
#ifdef __BIG_ENDIAN_BITFIELD
@@ -94,36 +74,40 @@ static int nitrox_aead_setauthsize(struct crypto_aead *aead,
return 0;
}
-static int alloc_src_sglist(struct aead_request *areq, char *iv, int ivsize,
+static int alloc_src_sglist(struct nitrox_kcrypt_request *nkreq,
+ struct scatterlist *src, char *iv, int ivsize,
int buflen)
{
- struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
- int nents = sg_nents_for_len(areq->src, buflen) + 1;
+ int nents = sg_nents_for_len(src, buflen);
int ret;
if (nents < 0)
return nents;
+ /* IV entry */
+ nents += 1;
/* Allocate buffer to hold IV and input scatterlist array */
ret = alloc_src_req_buf(nkreq, nents, ivsize);
if (ret)
return ret;
nitrox_creq_copy_iv(nkreq->src, iv, ivsize);
- nitrox_creq_set_src_sg(nkreq, nents, ivsize, areq->src, buflen);
+ nitrox_creq_set_src_sg(nkreq, nents, ivsize, src, buflen);
return 0;
}
-static int alloc_dst_sglist(struct aead_request *areq, int ivsize, int buflen)
+static int alloc_dst_sglist(struct nitrox_kcrypt_request *nkreq,
+ struct scatterlist *dst, int ivsize, int buflen)
{
- struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
- int nents = sg_nents_for_len(areq->dst, buflen) + 3;
+ int nents = sg_nents_for_len(dst, buflen);
int ret;
if (nents < 0)
return nents;
+ /* IV, ORH, COMPLETION entries */
+ nents += 3;
/* Allocate buffer to hold ORH, COMPLETION and output scatterlist
* array
*/
@@ -133,61 +117,54 @@ static int alloc_dst_sglist(struct aead_request *areq, int ivsize, int buflen)
nitrox_creq_set_orh(nkreq);
nitrox_creq_set_comp(nkreq);
- nitrox_creq_set_dst_sg(nkreq, nents, ivsize, areq->dst, buflen);
+ nitrox_creq_set_dst_sg(nkreq, nents, ivsize, dst, buflen);
return 0;
}
-static void free_src_sglist(struct aead_request *areq)
+static void free_src_sglist(struct nitrox_kcrypt_request *nkreq)
{
- struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
-
kfree(nkreq->src);
}
-static void free_dst_sglist(struct aead_request *areq)
+static void free_dst_sglist(struct nitrox_kcrypt_request *nkreq)
{
- struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
-
kfree(nkreq->dst);
}
-static int nitrox_set_creq(struct aead_request *areq,
- struct nitrox_crypt_params *params)
+static int nitrox_set_creq(struct nitrox_aead_rctx *rctx)
{
- struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
- struct se_crypto_request *creq = &nkreq->creq;
- struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct se_crypto_request *creq = &rctx->nkreq.creq;
union gph_p3 param3;
- struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
int ret;
- creq->flags = areq->base.flags;
- creq->gfp = (areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
+ creq->flags = rctx->flags;
+ creq->gfp = (rctx->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL :
+ GFP_ATOMIC;
creq->ctrl.value = 0;
creq->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC;
- creq->ctrl.s.arg = params->ctrl_arg;
+ creq->ctrl.s.arg = rctx->ctrl_arg;
- creq->gph.param0 = cpu_to_be16(params->cryptlen);
- creq->gph.param1 = cpu_to_be16(params->authlen);
- creq->gph.param2 = cpu_to_be16(params->ivsize + areq->assoclen);
+ creq->gph.param0 = cpu_to_be16(rctx->cryptlen);
+ creq->gph.param1 = cpu_to_be16(rctx->cryptlen + rctx->assoclen);
+ creq->gph.param2 = cpu_to_be16(rctx->ivsize + rctx->assoclen);
param3.iv_offset = 0;
- param3.auth_offset = params->ivsize;
+ param3.auth_offset = rctx->ivsize;
creq->gph.param3 = cpu_to_be16(param3.param);
- creq->ctx_handle = nctx->u.ctx_handle;
+ creq->ctx_handle = rctx->ctx_handle;
creq->ctrl.s.ctxl = sizeof(struct flexi_crypto_context);
- ret = alloc_src_sglist(areq, params->iv, params->ivsize,
- params->srclen);
+ ret = alloc_src_sglist(&rctx->nkreq, rctx->src, rctx->iv, rctx->ivsize,
+ rctx->srclen);
if (ret)
return ret;
- ret = alloc_dst_sglist(areq, params->ivsize, params->dstlen);
+ ret = alloc_dst_sglist(&rctx->nkreq, rctx->dst, rctx->ivsize,
+ rctx->dstlen);
if (ret) {
- free_src_sglist(areq);
+ free_src_sglist(&rctx->nkreq);
return ret;
}
@@ -197,9 +174,10 @@ static int nitrox_set_creq(struct aead_request *areq,
static void nitrox_aead_callback(void *arg, int err)
{
struct aead_request *areq = arg;
+ struct nitrox_aead_rctx *rctx = aead_request_ctx(areq);
- free_src_sglist(areq);
- free_dst_sglist(areq);
+ free_src_sglist(&rctx->nkreq);
+ free_dst_sglist(&rctx->nkreq);
if (err) {
pr_err_ratelimited("request failed status 0x%0x\n", err);
err = -EINVAL;
@@ -212,23 +190,25 @@ static int nitrox_aes_gcm_enc(struct aead_request *areq)
{
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
- struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
- struct se_crypto_request *creq = &nkreq->creq;
+ struct nitrox_aead_rctx *rctx = aead_request_ctx(areq);
+ struct se_crypto_request *creq = &rctx->nkreq.creq;
struct flexi_crypto_context *fctx = nctx->u.fctx;
- struct nitrox_crypt_params params;
int ret;
memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE);
- memset(&params, 0, sizeof(params));
- params.cryptlen = areq->cryptlen;
- params.authlen = areq->assoclen + params.cryptlen;
- params.srclen = params.authlen;
- params.dstlen = params.srclen + aead->authsize;
- params.iv = &areq->iv[GCM_AES_SALT_SIZE];
- params.ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE;
- params.ctrl_arg = ENCRYPT;
- ret = nitrox_set_creq(areq, &params);
+ rctx->cryptlen = areq->cryptlen;
+ rctx->assoclen = areq->assoclen;
+ rctx->srclen = areq->assoclen + areq->cryptlen;
+ rctx->dstlen = rctx->srclen + aead->authsize;
+ rctx->iv = &areq->iv[GCM_AES_SALT_SIZE];
+ rctx->ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE;
+ rctx->flags = areq->base.flags;
+ rctx->ctx_handle = nctx->u.ctx_handle;
+ rctx->src = areq->src;
+ rctx->dst = areq->dst;
+ rctx->ctrl_arg = ENCRYPT;
+ ret = nitrox_set_creq(rctx);
if (ret)
return ret;
@@ -241,23 +221,25 @@ static int nitrox_aes_gcm_dec(struct aead_request *areq)
{
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
- struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
- struct se_crypto_request *creq = &nkreq->creq;
+ struct nitrox_aead_rctx *rctx = aead_request_ctx(areq);
+ struct se_crypto_request *creq = &rctx->nkreq.creq;
struct flexi_crypto_context *fctx = nctx->u.fctx;
- struct nitrox_crypt_params params;
int ret;
memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE);
- memset(&params, 0, sizeof(params));
- params.cryptlen = areq->cryptlen - aead->authsize;
- params.authlen = areq->assoclen + params.cryptlen;
- params.srclen = areq->cryptlen + areq->assoclen;
- params.dstlen = params.srclen - aead->authsize;
- params.iv = &areq->iv[GCM_AES_SALT_SIZE];
- params.ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE;
- params.ctrl_arg = DECRYPT;
- ret = nitrox_set_creq(areq, &params);
+ rctx->cryptlen = areq->cryptlen - aead->authsize;
+ rctx->assoclen = areq->assoclen;
+ rctx->srclen = areq->cryptlen + areq->assoclen;
+ rctx->dstlen = rctx->srclen - aead->authsize;
+ rctx->iv = &areq->iv[GCM_AES_SALT_SIZE];
+ rctx->ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE;
+ rctx->flags = areq->base.flags;
+ rctx->ctx_handle = nctx->u.ctx_handle;
+ rctx->src = areq->src;
+ rctx->dst = areq->dst;
+ rctx->ctrl_arg = DECRYPT;
+ ret = nitrox_set_creq(rctx);
if (ret)
return ret;
@@ -290,7 +272,7 @@ static int nitrox_aead_init(struct crypto_aead *aead)
return 0;
}
-static int nitrox_aes_gcm_init(struct crypto_aead *aead)
+static int nitrox_gcm_common_init(struct crypto_aead *aead)
{
int ret;
struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
@@ -308,8 +290,20 @@ static int nitrox_aes_gcm_init(struct crypto_aead *aead)
flags->w0.auth_input_type = 1;
flags->f = be64_to_cpu(flags->f);
- crypto_aead_set_reqsize(aead, sizeof(struct aead_request) +
- sizeof(struct nitrox_kcrypt_request));
+ return 0;
+}
+
+static int nitrox_aes_gcm_init(struct crypto_aead *aead)
+{
+ int ret;
+
+ ret = nitrox_gcm_common_init(aead);
+ if (ret)
+ return ret;
+
+ crypto_aead_set_reqsize(aead,
+ sizeof(struct aead_request) +
+ sizeof(struct nitrox_aead_rctx));
return 0;
}
@@ -332,6 +326,166 @@ static void nitrox_aead_exit(struct crypto_aead *aead)
nctx->ndev = NULL;
}
+static int nitrox_rfc4106_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
+ struct flexi_crypto_context *fctx = nctx->u.fctx;
+ int ret;
+
+ if (keylen < GCM_AES_SALT_SIZE)
+ return -EINVAL;
+
+ keylen -= GCM_AES_SALT_SIZE;
+ ret = nitrox_aes_gcm_setkey(aead, key, keylen);
+ if (ret)
+ return ret;
+
+ memcpy(fctx->crypto.iv, key + keylen, GCM_AES_SALT_SIZE);
+ return 0;
+}
+
+static int nitrox_rfc4106_setauthsize(struct crypto_aead *aead,
+ unsigned int authsize)
+{
+ switch (authsize) {
+ case 8:
+ case 12:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return nitrox_aead_setauthsize(aead, authsize);
+}
+
+static int nitrox_rfc4106_set_aead_rctx_sglist(struct aead_request *areq)
+{
+ struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq);
+ struct nitrox_aead_rctx *aead_rctx = &rctx->base;
+ unsigned int assoclen = areq->assoclen - GCM_RFC4106_IV_SIZE;
+ struct scatterlist *sg;
+
+ if (areq->assoclen != 16 && areq->assoclen != 20)
+ return -EINVAL;
+
+ scatterwalk_map_and_copy(rctx->assoc, areq->src, 0, assoclen, 0);
+ sg_init_table(rctx->src, 3);
+ sg_set_buf(rctx->src, rctx->assoc, assoclen);
+ sg = scatterwalk_ffwd(rctx->src + 1, areq->src, areq->assoclen);
+ if (sg != rctx->src + 1)
+ sg_chain(rctx->src, 2, sg);
+
+ if (areq->src != areq->dst) {
+ sg_init_table(rctx->dst, 3);
+ sg_set_buf(rctx->dst, rctx->assoc, assoclen);
+ sg = scatterwalk_ffwd(rctx->dst + 1, areq->dst, areq->assoclen);
+ if (sg != rctx->dst + 1)
+ sg_chain(rctx->dst, 2, sg);
+ }
+
+ aead_rctx->src = rctx->src;
+ aead_rctx->dst = (areq->src == areq->dst) ? rctx->src : rctx->dst;
+
+ return 0;
+}
+
+static void nitrox_rfc4106_callback(void *arg, int err)
+{
+ struct aead_request *areq = arg;
+ struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq);
+ struct nitrox_kcrypt_request *nkreq = &rctx->base.nkreq;
+
+ free_src_sglist(nkreq);
+ free_dst_sglist(nkreq);
+ if (err) {
+ pr_err_ratelimited("request failed status 0x%0x\n", err);
+ err = -EINVAL;
+ }
+
+ areq->base.complete(&areq->base, err);
+}
+
+static int nitrox_rfc4106_enc(struct aead_request *areq)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
+ struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq);
+ struct nitrox_aead_rctx *aead_rctx = &rctx->base;
+ struct se_crypto_request *creq = &aead_rctx->nkreq.creq;
+ int ret;
+
+ aead_rctx->cryptlen = areq->cryptlen;
+ aead_rctx->assoclen = areq->assoclen - GCM_RFC4106_IV_SIZE;
+ aead_rctx->srclen = aead_rctx->assoclen + aead_rctx->cryptlen;
+ aead_rctx->dstlen = aead_rctx->srclen + aead->authsize;
+ aead_rctx->iv = areq->iv;
+ aead_rctx->ivsize = GCM_RFC4106_IV_SIZE;
+ aead_rctx->flags = areq->base.flags;
+ aead_rctx->ctx_handle = nctx->u.ctx_handle;
+ aead_rctx->ctrl_arg = ENCRYPT;
+
+ ret = nitrox_rfc4106_set_aead_rctx_sglist(areq);
+ if (ret)
+ return ret;
+
+ ret = nitrox_set_creq(aead_rctx);
+ if (ret)
+ return ret;
+
+ /* send the crypto request */
+ return nitrox_process_se_request(nctx->ndev, creq,
+ nitrox_rfc4106_callback, areq);
+}
+
+static int nitrox_rfc4106_dec(struct aead_request *areq)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
+ struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq);
+ struct nitrox_aead_rctx *aead_rctx = &rctx->base;
+ struct se_crypto_request *creq = &aead_rctx->nkreq.creq;
+ int ret;
+
+ aead_rctx->cryptlen = areq->cryptlen - aead->authsize;
+ aead_rctx->assoclen = areq->assoclen - GCM_RFC4106_IV_SIZE;
+ aead_rctx->srclen =
+ areq->cryptlen - GCM_RFC4106_IV_SIZE + areq->assoclen;
+ aead_rctx->dstlen = aead_rctx->srclen - aead->authsize;
+ aead_rctx->iv = areq->iv;
+ aead_rctx->ivsize = GCM_RFC4106_IV_SIZE;
+ aead_rctx->flags = areq->base.flags;
+ aead_rctx->ctx_handle = nctx->u.ctx_handle;
+ aead_rctx->ctrl_arg = DECRYPT;
+
+ ret = nitrox_rfc4106_set_aead_rctx_sglist(areq);
+ if (ret)
+ return ret;
+
+ ret = nitrox_set_creq(aead_rctx);
+ if (ret)
+ return ret;
+
+ /* send the crypto request */
+ return nitrox_process_se_request(nctx->ndev, creq,
+ nitrox_rfc4106_callback, areq);
+}
+
+static int nitrox_rfc4106_init(struct crypto_aead *aead)
+{
+ int ret;
+
+ ret = nitrox_gcm_common_init(aead);
+ if (ret)
+ return ret;
+
+ crypto_aead_set_reqsize(aead, sizeof(struct aead_request) +
+ sizeof(struct nitrox_rfc4106_rctx));
+
+ return 0;
+}
+
static struct aead_alg nitrox_aeads[] = { {
.base = {
.cra_name = "gcm(aes)",
@@ -351,6 +505,25 @@ static struct aead_alg nitrox_aeads[] = { {
.exit = nitrox_aead_exit,
.ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
+}, {
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "n5_rfc4106",
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .setkey = nitrox_rfc4106_setkey,
+ .setauthsize = nitrox_rfc4106_setauthsize,
+ .encrypt = nitrox_rfc4106_enc,
+ .decrypt = nitrox_rfc4106_dec,
+ .init = nitrox_rfc4106_init,
+ .exit = nitrox_aead_exit,
+ .ivsize = GCM_RFC4106_IV_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
} };
int nitrox_register_aeads(void)
diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.c b/drivers/crypto/cavium/nitrox/nitrox_hal.c
index c08d9f33a3b1..3f0df60267a9 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_hal.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_hal.c
@@ -437,6 +437,45 @@ void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode)
nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, vfcfg.value);
}
+static const char *get_core_option(u8 se_cores, u8 ae_cores)
+{
+ const char *option = "";
+
+ if (ae_cores == AE_MAX_CORES) {
+ switch (se_cores) {
+ case SE_MAX_CORES:
+ option = "60";
+ break;
+ case 40:
+ option = "60s";
+ break;
+ }
+ } else if (ae_cores == (AE_MAX_CORES / 2)) {
+ option = "30";
+ } else {
+ option = "60i";
+ }
+
+ return option;
+}
+
+static const char *get_feature_option(u8 zip_cores, int core_freq)
+{
+ if (zip_cores == 0)
+ return "";
+ else if (zip_cores < ZIP_MAX_CORES)
+ return "-C15";
+
+ if (core_freq >= 850)
+ return "-C45";
+ else if (core_freq >= 750)
+ return "-C35";
+ else if (core_freq >= 550)
+ return "-C25";
+
+ return "";
+}
+
void nitrox_get_hwinfo(struct nitrox_device *ndev)
{
union emu_fuse_map emu_fuse;
@@ -469,24 +508,14 @@ void nitrox_get_hwinfo(struct nitrox_device *ndev)
ndev->hw.zip_cores = ZIP_MAX_CORES - dead_cores;
}
- /* determine the partname CNN55<cores>-<freq><pincount>-<rev>*/
- if (ndev->hw.ae_cores == AE_MAX_CORES) {
- switch (ndev->hw.se_cores) {
- case SE_MAX_CORES:
- i = snprintf(name, sizeof(name), "CNN5560");
- break;
- case 40:
- i = snprintf(name, sizeof(name), "CNN5560s");
- break;
- }
- } else if (ndev->hw.ae_cores == (AE_MAX_CORES / 2)) {
- i = snprintf(name, sizeof(name), "CNN5530");
- } else {
- i = snprintf(name, sizeof(name), "CNN5560i");
- }
-
- snprintf(name + i, sizeof(name) - i, "-%3dBG676-1.%u",
- ndev->hw.freq, ndev->hw.revision_id);
+ /* determine the partname
+ * CNN55<core option>-<freq><pincount>-<feature option>-<rev>
+ */
+ snprintf(name, sizeof(name), "CNN55%s-%3dBG676%s-1.%u",
+ get_core_option(ndev->hw.se_cores, ndev->hw.ae_cores),
+ ndev->hw.freq,
+ get_feature_option(ndev->hw.zip_cores, ndev->hw.freq),
+ ndev->hw.revision_id);
/* copy partname */
strncpy(ndev->hw.partname, name, sizeof(ndev->hw.partname));
diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h
index 76c0f0be7233..efdbd0fc3e3b 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_req.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_req.h
@@ -212,6 +212,50 @@ struct nitrox_kcrypt_request {
};
/**
+ * struct nitrox_aead_rctx - AEAD request context
+ * @nkreq: Base request context
+ * @cryptlen: Encryption/Decryption data length
+ * @assoclen: AAD length
+ * @srclen: Input buffer length
+ * @dstlen: Output buffer length
+ * @iv: IV data
+ * @ivsize: IV data length
+ * @flags: AEAD req flags
+ * @ctx_handle: Device context handle
+ * @src: Source sglist
+ * @dst: Destination sglist
+ * @ctrl_arg: Identifies the request type (ENCRYPT/DECRYPT)
+ */
+struct nitrox_aead_rctx {
+ struct nitrox_kcrypt_request nkreq;
+ unsigned int cryptlen;
+ unsigned int assoclen;
+ unsigned int srclen;
+ unsigned int dstlen;
+ u8 *iv;
+ int ivsize;
+ u32 flags;
+ u64 ctx_handle;
+ struct scatterlist *src;
+ struct scatterlist *dst;
+ u8 ctrl_arg;
+};
+
+/**
+ * struct nitrox_rfc4106_rctx - rfc4106 cipher request context
+ * @base: AEAD request context
+ * @src: Source sglist
+ * @dst: Destination sglist
+ * @assoc: AAD
+ */
+struct nitrox_rfc4106_rctx {
+ struct nitrox_aead_rctx base;
+ struct scatterlist src[3];
+ struct scatterlist dst[3];
+ u8 assoc[20];
+};
+
+/**
* struct pkt_instr_hdr - Packet Instruction Header
* @g: Gather used
* When [G] is set and [GSZ] != 0, the instruction is
@@ -512,7 +556,7 @@ static inline struct scatterlist *create_multi_sg(struct scatterlist *to_sg,
struct scatterlist *sg = to_sg;
unsigned int sglen;
- for (; buflen; buflen -= sglen) {
+ for (; buflen && from_sg; buflen -= sglen) {
sglen = from_sg->length;
if (sglen > buflen)
sglen = buflen;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index 4c97478d44bd..5826c2c98a50 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -303,8 +303,6 @@ static void post_se_instr(struct nitrox_softreq *sr,
/* Ring doorbell with count 1 */
writeq(1, cmdq->dbell_csr_addr);
- /* orders the doorbell rings */
- mmiowb();
cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
@@ -599,8 +597,6 @@ void pkt_slc_resp_tasklet(unsigned long data)
* MSI-X interrupt generates if Completion count > Threshold
*/
writeq(slc_cnts.value, cmdq->compl_cnt_csr_addr);
- /* order the writes */
- mmiowb();
if (atomic_read(&cmdq->backlog_count))
schedule_work(&cmdq->backlog_qflush);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
index d4935d6cefdd..7e4a5e69085e 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
@@ -257,12 +257,8 @@ static int nitrox_aes_decrypt(struct skcipher_request *skreq)
static int nitrox_3des_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
- if (keylen != DES3_EDE_KEY_SIZE) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
-
- return nitrox_skcipher_setkey(cipher, 0, key, keylen);
+ return unlikely(des3_verify_key(cipher, key)) ?:
+ nitrox_skcipher_setkey(cipher, 0, key, keylen);
}
static int nitrox_3des_encrypt(struct skcipher_request *skreq)
diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c
index b92b6e7e100f..4985bc812b0e 100644
--- a/drivers/crypto/cavium/zip/zip_crypto.c
+++ b/drivers/crypto/cavium/zip/zip_crypto.c
@@ -69,7 +69,7 @@ static void zip_static_init_zip_ops(struct zip_operation *zip_ops,
zip_ops->csum = 1; /* Adler checksum desired */
}
-int zip_ctx_init(struct zip_kernel_ctx *zip_ctx, int lzs_flag)
+static int zip_ctx_init(struct zip_kernel_ctx *zip_ctx, int lzs_flag)
{
struct zip_operation *comp_ctx = &zip_ctx->zip_comp;
struct zip_operation *decomp_ctx = &zip_ctx->zip_decomp;
@@ -107,7 +107,7 @@ err_comp_input:
return -ENOMEM;
}
-void zip_ctx_exit(struct zip_kernel_ctx *zip_ctx)
+static void zip_ctx_exit(struct zip_kernel_ctx *zip_ctx)
{
struct zip_operation *comp_ctx = &zip_ctx->zip_comp;
struct zip_operation *dec_ctx = &zip_ctx->zip_decomp;
@@ -119,7 +119,7 @@ void zip_ctx_exit(struct zip_kernel_ctx *zip_ctx)
zip_data_buf_free(dec_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
}
-int zip_compress(const u8 *src, unsigned int slen,
+static int zip_compress(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen,
struct zip_kernel_ctx *zip_ctx)
{
@@ -155,7 +155,7 @@ int zip_compress(const u8 *src, unsigned int slen,
return ret;
}
-int zip_decompress(const u8 *src, unsigned int slen,
+static int zip_decompress(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen,
struct zip_kernel_ctx *zip_ctx)
{
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
index c2ff551d215b..91482ffcac59 100644
--- a/drivers/crypto/ccp/ccp-crypto-des3.c
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -43,24 +43,11 @@ static int ccp_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
struct ccp_crypto_ablkcipher_alg *alg =
ccp_crypto_ablkcipher_alg(crypto_ablkcipher_tfm(tfm));
u32 *flags = &tfm->base.crt_flags;
+ int err;
-
- /* From des_generic.c:
- *
- * RFC2451:
- * If the first two or last two independent 64-bit keys are
- * equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the
- * same as DES. Implementers MUST reject keys that exhibit this
- * property.
- */
- const u32 *K = (const u32 *)key;
-
- if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
- !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
- (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
- *flags |= CRYPTO_TFM_RES_WEAK_KEY;
- return -EINVAL;
- }
+ err = __des3_verify_key(flags, key);
+ if (unlikely(err))
+ return err;
/* It's not clear that there is any support for a keysize of 112.
* If needed, the caller should make K1 == K3
diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c b/drivers/crypto/ccp/ccp-crypto-rsa.c
index 05850dfd7940..a2570c0c8cdc 100644
--- a/drivers/crypto/ccp/ccp-crypto-rsa.c
+++ b/drivers/crypto/ccp/ccp-crypto-rsa.c
@@ -37,10 +37,9 @@ static inline int ccp_copy_and_save_keypart(u8 **kpbuf, unsigned int *kplen,
if (buf[nskip])
break;
*kplen = sz - nskip;
- *kpbuf = kzalloc(*kplen, GFP_KERNEL);
+ *kpbuf = kmemdup(buf + nskip, *kplen, GFP_KERNEL);
if (!*kpbuf)
return -ENOMEM;
- memcpy(*kpbuf, buf + nskip, *kplen);
return 0;
}
@@ -214,8 +213,6 @@ static void ccp_rsa_exit_tfm(struct crypto_akcipher *tfm)
static struct akcipher_alg ccp_rsa_defaults = {
.encrypt = ccp_rsa_encrypt,
.decrypt = ccp_rsa_decrypt,
- .sign = ccp_rsa_decrypt,
- .verify = ccp_rsa_encrypt,
.set_pub_key = ccp_rsa_setpubkey,
.set_priv_key = ccp_rsa_setprivkey,
.max_size = ccp_rsa_maxsize,
@@ -248,7 +245,8 @@ static struct ccp_rsa_def rsa_algs[] = {
}
};
-int ccp_register_rsa_alg(struct list_head *head, const struct ccp_rsa_def *def)
+static int ccp_register_rsa_alg(struct list_head *head,
+ const struct ccp_rsa_def *def)
{
struct ccp_crypto_akcipher_alg *ccp_alg;
struct akcipher_alg *alg;
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 10a61cd54fce..3e10573f589e 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -293,8 +293,6 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
if (key_len > block_size) {
/* Must hash the input key */
sdesc->tfm = shash;
- sdesc->flags = crypto_ahash_get_flags(tfm) &
- CRYPTO_TFM_REQ_MAY_SLEEP;
ret = crypto_shash_digest(sdesc, key, key_len,
ctx->u.sha.key);
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index fadf859a14b8..656838433f2f 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -583,6 +583,69 @@ e_free:
return ret;
}
+static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
+{
+ struct sev_user_data_get_id2 input;
+ struct sev_data_get_id *data;
+ void *id_blob = NULL;
+ int ret;
+
+ /* SEV GET_ID is available from SEV API v0.16 and up */
+ if (!SEV_VERSION_GREATER_OR_EQUAL(0, 16))
+ return -ENOTSUPP;
+
+ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+ return -EFAULT;
+
+ /* Check if we have write access to the userspace buffer */
+ if (input.address &&
+ input.length &&
+ !access_ok(input.address, input.length))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (input.address && input.length) {
+ id_blob = kmalloc(input.length, GFP_KERNEL);
+ if (!id_blob) {
+ kfree(data);
+ return -ENOMEM;
+ }
+
+ data->address = __psp_pa(id_blob);
+ data->len = input.length;
+ }
+
+ ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
+
+ /*
+ * Firmware will return the length of the ID value (either the minimum
+ * required length or the actual length written), return it to the user.
+ */
+ input.length = data->len;
+
+ if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+
+ if (id_blob) {
+ if (copy_to_user((void __user *)input.address,
+ id_blob, data->len)) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+ }
+
+e_free:
+ kfree(id_blob);
+ kfree(data);
+
+ return ret;
+}
+
static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp)
{
struct sev_data_get_id *data;
@@ -761,8 +824,12 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
ret = sev_ioctl_do_pdh_export(&input);
break;
case SEV_GET_ID:
+ pr_warn_once("SEV_GET_ID command is deprecated, use SEV_GET_ID2\n");
ret = sev_ioctl_do_get_id(&input);
break;
+ case SEV_GET_ID2:
+ ret = sev_ioctl_do_get_id2(&input);
+ break;
default:
ret = -EINVAL;
goto out;
@@ -997,7 +1064,7 @@ void psp_pci_init(void)
rc = sev_platform_init(&error);
if (rc) {
dev_err(sp->dev, "SEV: failed to INIT error %#x\n", error);
- goto err;
+ return;
}
dev_info(sp->dev, "SEV API:%d.%d build:%d\n", psp_master->api_major,
diff --git a/drivers/crypto/ccree/Makefile b/drivers/crypto/ccree/Makefile
index bdc27970f95f..145e50bdbf16 100644
--- a/drivers/crypto/ccree/Makefile
+++ b/drivers/crypto/ccree/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2012-2019 ARM Limited (or its affiliates).
obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_ivgen.o cc_sram_mgr.o
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index a3527c00b29a..7aa4cbe19a86 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <linux/kernel.h>
#include <linux/module.h>
@@ -23,12 +23,8 @@
#define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
#define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
-#define AES_CCM_RFC4309_NONCE_SIZE 3
#define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
-/* Value of each ICV_CMP byte (of 8) in case of success */
-#define ICV_VERIF_OK 0x01
-
struct cc_aead_handle {
cc_sram_addr_t sram_workspace_addr;
struct list_head aead_list;
@@ -220,6 +216,10 @@ static void cc_aead_complete(struct device *dev, void *cc_req, int err)
struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ /* BACKLOG notification */
+ if (err == -EINPROGRESS)
+ goto done;
+
cc_unmap_aead_request(dev, areq);
/* Restore ordinary iv pointer */
@@ -424,7 +424,7 @@ static int validate_keys_sizes(struct cc_aead_ctx *ctx)
/* This function prepers the user key so it can pass to the hmac processing
* (copy to intenral buffer or hash in case of key longer than block
*/
-static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
+static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
unsigned int keylen)
{
dma_addr_t key_dma_addr = 0;
@@ -437,6 +437,7 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
unsigned int hashmode;
unsigned int idx = 0;
int rc = 0;
+ u8 *key = NULL;
struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
dma_addr_t padded_authkey_dma_addr =
ctx->auth_state.hmac.padded_authkey_dma_addr;
@@ -455,11 +456,17 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
}
if (keylen != 0) {
+
+ key = kmemdup(authkey, keylen, GFP_KERNEL);
+ if (!key)
+ return -ENOMEM;
+
key_dma_addr = dma_map_single(dev, (void *)key, keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen);
+ kzfree(key);
return -ENOMEM;
}
if (keylen > blocksize) {
@@ -542,6 +549,8 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
if (key_dma_addr)
dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
+ kzfree(key);
+
return rc;
}
@@ -650,6 +659,39 @@ setkey_error:
return rc;
}
+static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_authenc_keys keys;
+ u32 flags;
+ int err;
+
+ err = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (unlikely(err))
+ goto badkey;
+
+ err = -EINVAL;
+ if (keys.enckeylen != DES3_EDE_KEY_SIZE)
+ goto badkey;
+
+ flags = crypto_aead_get_flags(aead);
+ err = __des3_verify_key(&flags, keys.enckey);
+ if (unlikely(err)) {
+ crypto_aead_set_flags(aead, flags);
+ goto out;
+ }
+
+ err = cc_aead_setkey(aead, key, keylen);
+
+out:
+ memzero_explicit(&keys, sizeof(keys));
+ return err;
+
+badkey:
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ goto out;
+}
+
static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
@@ -731,7 +773,7 @@ static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
dev_dbg(dev, "ASSOC buffer type DLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
- areq->assoclen, NS_BIT);
+ areq_ctx->assoclen, NS_BIT);
set_flow_mode(&desc[idx], flow_mode);
if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
areq_ctx->cryptlen > 0)
@@ -1080,9 +1122,11 @@ static void cc_proc_header_desc(struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int idx = *seq_size;
+
/* Hash associated data */
- if (req->assoclen > 0)
+ if (areq_ctx->assoclen > 0)
cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
/* Hash IV */
@@ -1159,9 +1203,9 @@ static void cc_mlli_to_sram(struct aead_request *req,
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- if (req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
+ if ((req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
- !req_ctx->is_single_pass) {
+ !req_ctx->is_single_pass) && req_ctx->mlli_params.mlli_len) {
dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
(unsigned int)ctx->drvdata->mlli_sram_addr,
req_ctx->mlli_params.mlli_len);
@@ -1310,7 +1354,7 @@ static int validate_data_size(struct cc_aead_ctx *ctx,
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- unsigned int assoclen = req->assoclen;
+ unsigned int assoclen = areq_ctx->assoclen;
unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
(req->cryptlen - ctx->authsize) : req->cryptlen;
@@ -1469,7 +1513,7 @@ static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
idx++;
/* process assoc data */
- if (req->assoclen > 0) {
+ if (req_ctx->assoclen > 0) {
cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
} else {
hw_desc_init(&desc[idx]);
@@ -1561,7 +1605,7 @@ static int config_ccm_adata(struct aead_request *req)
* NIST Special Publication 800-38C
*/
*b0 |= (8 * ((m - 2) / 2));
- if (req->assoclen > 0)
+ if (req_ctx->assoclen > 0)
*b0 |= 64; /* Enable bit 6 if Adata exists. */
rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */
@@ -1572,7 +1616,7 @@ static int config_ccm_adata(struct aead_request *req)
/* END of "taken from crypto/ccm.c" */
/* l(a) - size of associated data. */
- req_ctx->ccm_hdr_size = format_ccm_a0(a0, req->assoclen);
+ req_ctx->ccm_hdr_size = format_ccm_a0(a0, req_ctx->assoclen);
memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
req->iv[15] = 1;
@@ -1604,7 +1648,7 @@ static void cc_proc_rfc4309_ccm(struct aead_request *req)
memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
CCM_BLOCK_IV_SIZE);
req->iv = areq_ctx->ctr_iv;
- req->assoclen -= CCM_BLOCK_IV_SIZE;
+ areq_ctx->assoclen -= CCM_BLOCK_IV_SIZE;
}
static void cc_set_ghash_desc(struct aead_request *req,
@@ -1812,7 +1856,7 @@ static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
// for gcm and rfc4106.
cc_set_ghash_desc(req, desc, seq_size);
/* process(ghash) assoc data */
- if (req->assoclen > 0)
+ if (req_ctx->assoclen > 0)
cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
cc_set_gctr_desc(req, desc, seq_size);
/* process(gctr+ghash) */
@@ -1836,8 +1880,8 @@ static int config_gcm_context(struct aead_request *req)
(req->cryptlen - ctx->authsize);
__be32 counter = cpu_to_be32(2);
- dev_dbg(dev, "%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n",
- __func__, cryptlen, req->assoclen, ctx->authsize);
+ dev_dbg(dev, "%s() cryptlen = %d, req_ctx->assoclen = %d ctx->authsize = %d\n",
+ __func__, cryptlen, req_ctx->assoclen, ctx->authsize);
memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
@@ -1853,7 +1897,7 @@ static int config_gcm_context(struct aead_request *req)
if (!req_ctx->plaintext_authenticate_only) {
__be64 temp64;
- temp64 = cpu_to_be64(req->assoclen * 8);
+ temp64 = cpu_to_be64(req_ctx->assoclen * 8);
memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
temp64 = cpu_to_be64(cryptlen * 8);
memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
@@ -1863,8 +1907,8 @@ static int config_gcm_context(struct aead_request *req)
*/
__be64 temp64;
- temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE +
- cryptlen) * 8);
+ temp64 = cpu_to_be64((req_ctx->assoclen +
+ GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
temp64 = 0;
memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
@@ -1884,7 +1928,7 @@ static void cc_proc_rfc4_gcm(struct aead_request *req)
memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
GCM_BLOCK_RFC4_IV_SIZE);
req->iv = areq_ctx->ctr_iv;
- req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
+ areq_ctx->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
}
static int cc_proc_aead(struct aead_request *req,
@@ -1909,7 +1953,7 @@ static int cc_proc_aead(struct aead_request *req,
/* Check data length according to mode */
if (validate_data_size(ctx, direct, req)) {
dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
- req->cryptlen, req->assoclen);
+ req->cryptlen, areq_ctx->assoclen);
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
return -EINVAL;
}
@@ -2058,8 +2102,11 @@ static int cc_aead_encrypt(struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = false;
@@ -2087,8 +2134,11 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
goto out;
}
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = true;
@@ -2106,8 +2156,11 @@ static int cc_aead_decrypt(struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = false;
@@ -2133,8 +2186,11 @@ static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
goto out;
}
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = true;
@@ -2250,8 +2306,11 @@ static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
goto out;
}
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->plaintext_authenticate_only = false;
@@ -2273,11 +2332,14 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
//plaintext is not encryped with rfc4543
areq_ctx->plaintext_authenticate_only = true;
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
cc_proc_rfc4_gcm(req);
@@ -2305,8 +2367,11 @@ static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
goto out;
}
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->plaintext_authenticate_only = false;
@@ -2328,11 +2393,14 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
//plaintext is not decryped with rfc4543
areq_ctx->plaintext_authenticate_only = true;
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
cc_proc_rfc4_gcm(req);
@@ -2372,7 +2440,7 @@ static struct cc_alg_template aead_algs[] = {
.driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
.blocksize = DES3_EDE_BLOCK_SIZE,
.template_aead = {
- .setkey = cc_aead_setkey,
+ .setkey = cc_des3_aead_setkey,
.setauthsize = cc_aead_setauthsize,
.encrypt = cc_aead_encrypt,
.decrypt = cc_aead_decrypt,
@@ -2412,7 +2480,7 @@ static struct cc_alg_template aead_algs[] = {
.driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
.blocksize = DES3_EDE_BLOCK_SIZE,
.template_aead = {
- .setkey = cc_aead_setkey,
+ .setkey = cc_des3_aead_setkey,
.setauthsize = cc_aead_setauthsize,
.encrypt = cc_aead_encrypt,
.decrypt = cc_aead_decrypt,
diff --git a/drivers/crypto/ccree/cc_aead.h b/drivers/crypto/ccree/cc_aead.h
index 5edf3b351fa4..e51724b96c56 100644
--- a/drivers/crypto/ccree/cc_aead.h
+++ b/drivers/crypto/ccree/cc_aead.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_aead.h
* ARM CryptoCell AEAD Crypto API
@@ -67,6 +67,7 @@ struct aead_req_ctx {
u8 backup_mac[MAX_MAC_SIZE];
u8 *backup_iv; /*store iv for generated IV flow*/
u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/
+ u32 assoclen; /* internal assoclen */
dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
/* buffer for internal ccm configurations */
dma_addr_t ccm_iv0_dma_addr;
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index 0ee1c52da0a4..c81ad33f9115 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <crypto/internal/aead.h>
#include <crypto/authenc.h>
@@ -65,7 +65,7 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req,
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- u32 skip = req->assoclen + req->cryptlen;
+ u32 skip = areq_ctx->assoclen + req->cryptlen;
if (areq_ctx->is_gcm4543)
skip += crypto_aead_ivsize(tfm);
@@ -83,24 +83,17 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req,
*/
static unsigned int cc_get_sgl_nents(struct device *dev,
struct scatterlist *sg_list,
- unsigned int nbytes, u32 *lbytes,
- bool *is_chained)
+ unsigned int nbytes, u32 *lbytes)
{
unsigned int nents = 0;
while (nbytes && sg_list) {
- if (sg_list->length) {
- nents++;
- /* get the number of bytes in the last entry */
- *lbytes = nbytes;
- nbytes -= (sg_list->length > nbytes) ?
- nbytes : sg_list->length;
- sg_list = sg_next(sg_list);
- } else {
- sg_list = (struct scatterlist *)sg_page(sg_list);
- if (is_chained)
- *is_chained = true;
- }
+ nents++;
+ /* get the number of bytes in the last entry */
+ *lbytes = nbytes;
+ nbytes -= (sg_list->length > nbytes) ?
+ nbytes : sg_list->length;
+ sg_list = sg_next(sg_list);
}
dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
return nents;
@@ -140,9 +133,9 @@ void cc_zero_sgl(struct scatterlist *sgl, u32 data_len)
void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
{
- u32 nents, lbytes;
+ u32 nents;
- nents = cc_get_sgl_nents(dev, sg, end, &lbytes, NULL);
+ nents = sg_nents_for_len(sg, end);
sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
(direct == CC_SG_TO_BUF));
}
@@ -314,40 +307,10 @@ static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
sgl_data->num_of_buffers++;
}
-static int cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
- enum dma_data_direction direction)
-{
- u32 i, j;
- struct scatterlist *l_sg = sg;
-
- for (i = 0; i < nents; i++) {
- if (!l_sg)
- break;
- if (dma_map_sg(dev, l_sg, 1, direction) != 1) {
- dev_err(dev, "dma_map_page() sg buffer failed\n");
- goto err;
- }
- l_sg = sg_next(l_sg);
- }
- return nents;
-
-err:
- /* Restore mapped parts */
- for (j = 0; j < i; j++) {
- if (!sg)
- break;
- dma_unmap_sg(dev, sg, 1, direction);
- sg = sg_next(sg);
- }
- return 0;
-}
-
static int cc_map_sg(struct device *dev, struct scatterlist *sg,
unsigned int nbytes, int direction, u32 *nents,
u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
{
- bool is_chained = false;
-
if (sg_is_last(sg)) {
/* One entry only case -set to DLLI */
if (dma_map_sg(dev, sg, 1, direction) != 1) {
@@ -361,35 +324,21 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
*nents = 1;
*mapped_nents = 1;
} else { /*sg_is_last*/
- *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes,
- &is_chained);
+ *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
if (*nents > max_sg_nents) {
*nents = 0;
dev_err(dev, "Too many fragments. current %d max %d\n",
*nents, max_sg_nents);
return -ENOMEM;
}
- if (!is_chained) {
- /* In case of mmu the number of mapped nents might
- * be changed from the original sgl nents
- */
- *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
- if (*mapped_nents == 0) {
- *nents = 0;
- dev_err(dev, "dma_map_sg() sg buffer failed\n");
- return -ENOMEM;
- }
- } else {
- /*In this case the driver maps entry by entry so it
- * must have the same nents before and after map
- */
- *mapped_nents = cc_dma_map_sg(dev, sg, *nents,
- direction);
- if (*mapped_nents != *nents) {
- *nents = *mapped_nents;
- dev_err(dev, "dma_map_sg() sg buffer failed\n");
- return -ENOMEM;
- }
+ /* In case of mmu the number of mapped nents might
+ * be changed from the original sgl nents
+ */
+ *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
+ if (*mapped_nents == 0) {
+ *nents = 0;
+ dev_err(dev, "dma_map_sg() sg buffer failed\n");
+ return -ENOMEM;
}
}
@@ -457,7 +406,7 @@ void cc_unmap_cipher_request(struct device *dev, void *ctx,
dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
&req_ctx->gen_ctx.iv_dma_addr, ivsize);
dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
- ivsize, DMA_TO_DEVICE);
+ ivsize, DMA_BIDIRECTIONAL);
}
/* Release pool */
if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
@@ -499,7 +448,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
dump_byte_array("iv", (u8 *)info, ivsize);
req_ctx->gen_ctx.iv_dma_addr =
dma_map_single(dev, (void *)info,
- ivsize, DMA_TO_DEVICE);
+ ivsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
ivsize, info);
@@ -568,11 +517,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int hw_iv_size = areq_ctx->hw_iv_size;
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
- u32 dummy;
- bool chained;
- u32 size_to_unmap = 0;
if (areq_ctx->mac_buf_dma_addr) {
dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
@@ -612,6 +557,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
if (areq_ctx->gen_ctx.iv_dma_addr) {
dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
hw_iv_size, DMA_BIDIRECTIONAL);
+ kzfree(areq_ctx->gen_ctx.iv);
}
/* Release pool */
@@ -628,23 +574,13 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
- req->assoclen, req->cryptlen);
- size_to_unmap = req->assoclen + req->cryptlen;
- if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
- size_to_unmap += areq_ctx->req_authsize;
- if (areq_ctx->is_gcm4543)
- size_to_unmap += crypto_aead_ivsize(tfm);
+ areq_ctx->assoclen, req->cryptlen);
- dma_unmap_sg(dev, req->src,
- cc_get_sgl_nents(dev, req->src, size_to_unmap,
- &dummy, &chained),
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL);
if (req->src != req->dst) {
dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
sg_virt(req->dst));
- dma_unmap_sg(dev, req->dst,
- cc_get_sgl_nents(dev, req->dst, size_to_unmap,
- &dummy, &chained),
+ dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
DMA_BIDIRECTIONAL);
}
if (drvdata->coherent &&
@@ -658,55 +594,10 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
}
}
-static int cc_get_aead_icv_nents(struct device *dev, struct scatterlist *sgl,
- unsigned int sgl_nents, unsigned int authsize,
- u32 last_entry_data_size,
- bool *is_icv_fragmented)
+static bool cc_is_icv_frag(unsigned int sgl_nents, unsigned int authsize,
+ u32 last_entry_data_size)
{
- unsigned int icv_max_size = 0;
- unsigned int icv_required_size = authsize > last_entry_data_size ?
- (authsize - last_entry_data_size) :
- authsize;
- unsigned int nents;
- unsigned int i;
-
- if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) {
- *is_icv_fragmented = false;
- return 0;
- }
-
- for (i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
- if (!sgl)
- break;
- sgl = sg_next(sgl);
- }
-
- if (sgl)
- icv_max_size = sgl->length;
-
- if (last_entry_data_size > authsize) {
- /* ICV attached to data in last entry (not fragmented!) */
- nents = 0;
- *is_icv_fragmented = false;
- } else if (last_entry_data_size == authsize) {
- /* ICV placed in whole last entry (not fragmented!) */
- nents = 1;
- *is_icv_fragmented = false;
- } else if (icv_max_size > icv_required_size) {
- nents = 1;
- *is_icv_fragmented = true;
- } else if (icv_max_size == icv_required_size) {
- nents = 2;
- *is_icv_fragmented = true;
- } else {
- dev_err(dev, "Unsupported num. of ICV fragments (> %d)\n",
- MAX_ICV_NENTS_SUPPORTED);
- nents = -1; /*unsupported*/
- }
- dev_dbg(dev, "is_frag=%s icv_nents=%u\n",
- (*is_icv_fragmented ? "true" : "false"), nents);
-
- return nents;
+ return ((sgl_nents > 1) && (last_entry_data_size < authsize));
}
static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
@@ -717,19 +608,27 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int hw_iv_size = areq_ctx->hw_iv_size;
struct device *dev = drvdata_to_dev(drvdata);
+ gfp_t flags = cc_gfp_flags(&req->base);
int rc = 0;
if (!req->iv) {
areq_ctx->gen_ctx.iv_dma_addr = 0;
+ areq_ctx->gen_ctx.iv = NULL;
goto chain_iv_exit;
}
- areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
- hw_iv_size,
- DMA_BIDIRECTIONAL);
+ areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags);
+ if (!areq_ctx->gen_ctx.iv)
+ return -ENOMEM;
+
+ areq_ctx->gen_ctx.iv_dma_addr =
+ dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
+ DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
hw_iv_size, req->iv);
+ kzfree(areq_ctx->gen_ctx.iv);
+ areq_ctx->gen_ctx.iv = NULL;
rc = -ENOMEM;
goto chain_iv_exit;
}
@@ -760,11 +659,9 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc = 0;
- u32 mapped_nents = 0;
- struct scatterlist *current_sg = req->src;
+ int mapped_nents = 0;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- unsigned int sg_index = 0;
- u32 size_of_assoc = req->assoclen;
+ unsigned int size_of_assoc = areq_ctx->assoclen;
struct device *dev = drvdata_to_dev(drvdata);
if (areq_ctx->is_gcm4543)
@@ -775,7 +672,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
goto chain_assoc_exit;
}
- if (req->assoclen == 0) {
+ if (areq_ctx->assoclen == 0) {
areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
areq_ctx->assoc.nents = 0;
areq_ctx->assoc.mlli_nents = 0;
@@ -785,26 +682,10 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
goto chain_assoc_exit;
}
- //iterate over the sgl to see how many entries are for associated data
- //it is assumed that if we reach here , the sgl is already mapped
- sg_index = current_sg->length;
- //the first entry in the scatter list contains all the associated data
- if (sg_index > size_of_assoc) {
- mapped_nents++;
- } else {
- while (sg_index <= size_of_assoc) {
- current_sg = sg_next(current_sg);
- /* if have reached the end of the sgl, then this is
- * unexpected
- */
- if (!current_sg) {
- dev_err(dev, "reached end of sg list. unexpected\n");
- return -EINVAL;
- }
- sg_index += current_sg->length;
- mapped_nents++;
- }
- }
+ mapped_nents = sg_nents_for_len(req->src, size_of_assoc);
+ if (mapped_nents < 0)
+ return mapped_nents;
+
if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
dev_err(dev, "Too many fragments. current %d max %d\n",
mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
@@ -835,7 +716,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
cc_dma_buf_type(areq_ctx->assoc_buff_type),
areq_ctx->assoc.nents);
cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
- req->assoclen, 0, is_last,
+ areq_ctx->assoclen, 0, is_last,
&areq_ctx->assoc.mlli_nents);
areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
}
@@ -850,39 +731,32 @@ static void cc_prepare_aead_data_dlli(struct aead_request *req,
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
unsigned int authsize = areq_ctx->req_authsize;
+ struct scatterlist *sg;
+ ssize_t offset;
areq_ctx->is_icv_fragmented = false;
- if (req->src == req->dst) {
- /*INPLACE*/
- areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
- (*src_last_bytes - authsize);
- areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
- (*src_last_bytes - authsize);
- } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
- /*NON-INPLACE and DECRYPT*/
- areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
- (*src_last_bytes - authsize);
- areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
- (*src_last_bytes - authsize);
+
+ if ((req->src == req->dst) || direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ sg = areq_ctx->src_sgl;
+ offset = *src_last_bytes - authsize;
} else {
- /*NON-INPLACE and ENCRYPT*/
- areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->dst_sgl) +
- (*dst_last_bytes - authsize);
- areq_ctx->icv_virt_addr = sg_virt(areq_ctx->dst_sgl) +
- (*dst_last_bytes - authsize);
+ sg = areq_ctx->dst_sgl;
+ offset = *dst_last_bytes - authsize;
}
+
+ areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset;
+ areq_ctx->icv_virt_addr = sg_virt(sg) + offset;
}
-static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
- struct aead_request *req,
- struct buffer_array *sg_data,
- u32 *src_last_bytes, u32 *dst_last_bytes,
- bool is_last_table)
+static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
+ struct aead_request *req,
+ struct buffer_array *sg_data,
+ u32 *src_last_bytes, u32 *dst_last_bytes,
+ bool is_last_table)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
unsigned int authsize = areq_ctx->req_authsize;
- int rc = 0, icv_nents;
struct device *dev = drvdata_to_dev(drvdata);
struct scatterlist *sg;
@@ -893,14 +767,9 @@ static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
areq_ctx->src_offset, is_last_table,
&areq_ctx->src.mlli_nents);
- icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
- areq_ctx->src.nents,
- authsize, *src_last_bytes,
- &areq_ctx->is_icv_fragmented);
- if (icv_nents < 0) {
- rc = -ENOTSUPP;
- goto prepare_data_mlli_exit;
- }
+ areq_ctx->is_icv_fragmented =
+ cc_is_icv_frag(areq_ctx->src.nents, authsize,
+ *src_last_bytes);
if (areq_ctx->is_icv_fragmented) {
/* Backup happens only when ICV is fragmented, ICV
@@ -942,16 +811,11 @@ static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
areq_ctx->dst_offset, is_last_table,
&areq_ctx->dst.mlli_nents);
- icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
- areq_ctx->src.nents,
- authsize, *src_last_bytes,
- &areq_ctx->is_icv_fragmented);
- if (icv_nents < 0) {
- rc = -ENOTSUPP;
- goto prepare_data_mlli_exit;
- }
-
+ areq_ctx->is_icv_fragmented =
+ cc_is_icv_frag(areq_ctx->src.nents, authsize,
+ *src_last_bytes);
/* Backup happens only when ICV is fragmented, ICV
+
* verification is made by CPU compare in order to simplify
* MAC verification upon request completion
*/
@@ -979,14 +843,9 @@ static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
areq_ctx->src_offset, is_last_table,
&areq_ctx->src.mlli_nents);
- icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->dst_sgl,
- areq_ctx->dst.nents,
- authsize, *dst_last_bytes,
- &areq_ctx->is_icv_fragmented);
- if (icv_nents < 0) {
- rc = -ENOTSUPP;
- goto prepare_data_mlli_exit;
- }
+ areq_ctx->is_icv_fragmented =
+ cc_is_icv_frag(areq_ctx->dst.nents, authsize,
+ *dst_last_bytes);
if (!areq_ctx->is_icv_fragmented) {
sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
@@ -1000,9 +859,6 @@ static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
}
}
-
-prepare_data_mlli_exit:
- return rc;
}
static int cc_aead_chain_data(struct cc_drvdata *drvdata,
@@ -1019,12 +875,12 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
u32 src_mapped_nents = 0, dst_mapped_nents = 0;
u32 offset = 0;
/* non-inplace mode */
- unsigned int size_for_map = req->assoclen + req->cryptlen;
+ unsigned int size_for_map = areq_ctx->assoclen + req->cryptlen;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
u32 sg_index = 0;
- bool chained = false;
bool is_gcm4543 = areq_ctx->is_gcm4543;
- u32 size_to_skip = req->assoclen;
+ u32 size_to_skip = areq_ctx->assoclen;
+ struct scatterlist *sgl;
if (is_gcm4543)
size_to_skip += crypto_aead_ivsize(tfm);
@@ -1043,19 +899,17 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
authsize : 0;
src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
- &src_last_bytes, &chained);
+ &src_last_bytes);
sg_index = areq_ctx->src_sgl->length;
//check where the data starts
while (sg_index <= size_to_skip) {
+ src_mapped_nents--;
offset -= areq_ctx->src_sgl->length;
- areq_ctx->src_sgl = sg_next(areq_ctx->src_sgl);
- //if have reached the end of the sgl, then this is unexpected
- if (!areq_ctx->src_sgl) {
- dev_err(dev, "reached end of sg list. unexpected\n");
- return -EINVAL;
- }
+ sgl = sg_next(areq_ctx->src_sgl);
+ if (!sgl)
+ break;
+ areq_ctx->src_sgl = sgl;
sg_index += areq_ctx->src_sgl->length;
- src_mapped_nents--;
}
if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
dev_err(dev, "Too many fragments. current %d max %d\n",
@@ -1068,7 +922,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
areq_ctx->src_offset = offset;
if (req->src != req->dst) {
- size_for_map = req->assoclen + req->cryptlen;
+ size_for_map = areq_ctx->assoclen + req->cryptlen;
size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
authsize : 0;
if (is_gcm4543)
@@ -1083,21 +937,19 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
}
dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
- &dst_last_bytes, &chained);
+ &dst_last_bytes);
sg_index = areq_ctx->dst_sgl->length;
offset = size_to_skip;
//check where the data starts
while (sg_index <= size_to_skip) {
+ dst_mapped_nents--;
offset -= areq_ctx->dst_sgl->length;
- areq_ctx->dst_sgl = sg_next(areq_ctx->dst_sgl);
- //if have reached the end of the sgl, then this is unexpected
- if (!areq_ctx->dst_sgl) {
- dev_err(dev, "reached end of sg list. unexpected\n");
- return -EINVAL;
- }
+ sgl = sg_next(areq_ctx->dst_sgl);
+ if (!sgl)
+ break;
+ areq_ctx->dst_sgl = sgl;
sg_index += areq_ctx->dst_sgl->length;
- dst_mapped_nents--;
}
if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
dev_err(dev, "Too many fragments. current %d max %d\n",
@@ -1110,9 +962,9 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
dst_mapped_nents > 1 ||
do_chain) {
areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
- rc = cc_prepare_aead_data_mlli(drvdata, req, sg_data,
- &src_last_bytes,
- &dst_last_bytes, is_last_table);
+ cc_prepare_aead_data_mlli(drvdata, req, sg_data,
+ &src_last_bytes, &dst_last_bytes,
+ is_last_table);
} else {
areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
cc_prepare_aead_data_dlli(req, &src_last_bytes,
@@ -1234,7 +1086,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
areq_ctx->ccm_iv0_dma_addr = dma_addr;
rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
- &sg_data, req->assoclen);
+ &sg_data, areq_ctx->assoclen);
if (rc)
goto aead_map_failure;
}
@@ -1285,7 +1137,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
}
- size_to_map = req->cryptlen + req->assoclen;
+ size_to_map = req->cryptlen + areq_ctx->assoclen;
if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
size_to_map += authsize;
@@ -1483,8 +1335,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
if (total_in_len < block_size) {
dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
- areq_ctx->in_nents =
- cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL);
+ areq_ctx->in_nents = sg_nents_for_len(src, nbytes);
sg_copy_to_buffer(src, areq_ctx->in_nents,
&curr_buff[*curr_buff_cnt], nbytes);
*curr_buff_cnt += nbytes;
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.h b/drivers/crypto/ccree/cc_buffer_mgr.h
index 3ec4b4db5247..a726016bdbc1 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.h
+++ b/drivers/crypto/ccree/cc_buffer_mgr.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_buffer_mgr.h
* Buffer Manager
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index d9c17078517b..5b58226ea24d 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <linux/kernel.h>
#include <linux/module.h>
@@ -34,6 +34,18 @@ struct cc_hw_key_info {
enum cc_hw_crypto_key key2_slot;
};
+struct cc_cpp_key_info {
+ u8 slot;
+ enum cc_cpp_alg alg;
+};
+
+enum cc_key_type {
+ CC_UNPROTECTED_KEY, /* User key */
+ CC_HW_PROTECTED_KEY, /* HW (FDE) key */
+ CC_POLICY_PROTECTED_KEY, /* CPP key */
+ CC_INVALID_PROTECTED_KEY /* Invalid key */
+};
+
struct cc_cipher_ctx {
struct cc_drvdata *drvdata;
int keylen;
@@ -41,19 +53,22 @@ struct cc_cipher_ctx {
int cipher_mode;
int flow_mode;
unsigned int flags;
- bool hw_key;
+ enum cc_key_type key_type;
struct cc_user_key_info user;
- struct cc_hw_key_info hw;
+ union {
+ struct cc_hw_key_info hw;
+ struct cc_cpp_key_info cpp;
+ };
struct crypto_shash *shash_tfm;
};
static void cc_cipher_complete(struct device *dev, void *cc_req, int err);
-static inline bool cc_is_hw_key(struct crypto_tfm *tfm)
+static inline enum cc_key_type cc_key_type(struct crypto_tfm *tfm)
{
struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
- return ctx_p->hw_key;
+ return ctx_p->key_type;
}
static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
@@ -232,7 +247,7 @@ struct tdes_keys {
u8 key3[DES_KEY_SIZE];
};
-static enum cc_hw_crypto_key cc_slot_to_hw_key(int slot_num)
+static enum cc_hw_crypto_key cc_slot_to_hw_key(u8 slot_num)
{
switch (slot_num) {
case 0:
@@ -247,6 +262,22 @@ static enum cc_hw_crypto_key cc_slot_to_hw_key(int slot_num)
return END_OF_KEYS;
}
+static u8 cc_slot_to_cpp_key(u8 slot_num)
+{
+ return (slot_num - CC_FIRST_CPP_KEY_SLOT);
+}
+
+static inline enum cc_key_type cc_slot_to_key_type(u8 slot_num)
+{
+ if (slot_num >= CC_FIRST_HW_KEY_SLOT && slot_num <= CC_LAST_HW_KEY_SLOT)
+ return CC_HW_PROTECTED_KEY;
+ else if (slot_num >= CC_FIRST_CPP_KEY_SLOT &&
+ slot_num <= CC_LAST_CPP_KEY_SLOT)
+ return CC_POLICY_PROTECTED_KEY;
+ else
+ return CC_INVALID_PROTECTED_KEY;
+}
+
static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
unsigned int keylen)
{
@@ -261,18 +292,13 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
/* STAT_PHASE_0: Init and sanity checks */
- /* This check the size of the hardware key token */
+ /* This check the size of the protected key token */
if (keylen != sizeof(hki)) {
- dev_err(dev, "Unsupported HW key size %d.\n", keylen);
+ dev_err(dev, "Unsupported protected key size %d.\n", keylen);
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
- if (ctx_p->flow_mode != S_DIN_to_AES) {
- dev_err(dev, "HW key not supported for non-AES flows\n");
- return -EINVAL;
- }
-
memcpy(&hki, key, keylen);
/* The real key len for crypto op is the size of the HW key
@@ -286,31 +312,70 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
return -EINVAL;
}
- ctx_p->hw.key1_slot = cc_slot_to_hw_key(hki.hw_key1);
- if (ctx_p->hw.key1_slot == END_OF_KEYS) {
- dev_err(dev, "Unsupported hw key1 number (%d)\n", hki.hw_key1);
- return -EINVAL;
- }
+ ctx_p->keylen = keylen;
- if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
- ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
- ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
- if (hki.hw_key1 == hki.hw_key2) {
- dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
- hki.hw_key1, hki.hw_key2);
+ switch (cc_slot_to_key_type(hki.hw_key1)) {
+ case CC_HW_PROTECTED_KEY:
+ if (ctx_p->flow_mode == S_DIN_to_SM4) {
+ dev_err(dev, "Only AES HW protected keys are supported\n");
return -EINVAL;
}
- ctx_p->hw.key2_slot = cc_slot_to_hw_key(hki.hw_key2);
- if (ctx_p->hw.key2_slot == END_OF_KEYS) {
- dev_err(dev, "Unsupported hw key2 number (%d)\n",
- hki.hw_key2);
+
+ ctx_p->hw.key1_slot = cc_slot_to_hw_key(hki.hw_key1);
+ if (ctx_p->hw.key1_slot == END_OF_KEYS) {
+ dev_err(dev, "Unsupported hw key1 number (%d)\n",
+ hki.hw_key1);
return -EINVAL;
}
- }
- ctx_p->keylen = keylen;
- ctx_p->hw_key = true;
- dev_dbg(dev, "cc_is_hw_key ret 0");
+ if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
+ ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
+ ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
+ if (hki.hw_key1 == hki.hw_key2) {
+ dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
+ hki.hw_key1, hki.hw_key2);
+ return -EINVAL;
+ }
+
+ ctx_p->hw.key2_slot = cc_slot_to_hw_key(hki.hw_key2);
+ if (ctx_p->hw.key2_slot == END_OF_KEYS) {
+ dev_err(dev, "Unsupported hw key2 number (%d)\n",
+ hki.hw_key2);
+ return -EINVAL;
+ }
+ }
+
+ ctx_p->key_type = CC_HW_PROTECTED_KEY;
+ dev_dbg(dev, "HW protected key %d/%d set\n.",
+ ctx_p->hw.key1_slot, ctx_p->hw.key2_slot);
+ break;
+
+ case CC_POLICY_PROTECTED_KEY:
+ if (ctx_p->drvdata->hw_rev < CC_HW_REV_713) {
+ dev_err(dev, "CPP keys not supported in this hardware revision.\n");
+ return -EINVAL;
+ }
+
+ if (ctx_p->cipher_mode != DRV_CIPHER_CBC &&
+ ctx_p->cipher_mode != DRV_CIPHER_CTR) {
+ dev_err(dev, "CPP keys only supported in CBC or CTR modes.\n");
+ return -EINVAL;
+ }
+
+ ctx_p->cpp.slot = cc_slot_to_cpp_key(hki.hw_key1);
+ if (ctx_p->flow_mode == S_DIN_to_AES)
+ ctx_p->cpp.alg = CC_CPP_AES;
+ else /* Must be SM4 since due to sethkey registration */
+ ctx_p->cpp.alg = CC_CPP_SM4;
+ ctx_p->key_type = CC_POLICY_PROTECTED_KEY;
+ dev_dbg(dev, "policy protected key alg: %d slot: %d.\n",
+ ctx_p->cpp.alg, ctx_p->cpp.slot);
+ break;
+
+ default:
+ dev_err(dev, "Unsupported protected key (%d)\n", hki.hw_key1);
+ return -EINVAL;
+ }
return 0;
}
@@ -321,7 +386,6 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
struct crypto_tfm *tfm = crypto_skcipher_tfm(sktfm);
struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
- u32 tmp[DES3_EDE_EXPKEY_WORDS];
struct cc_crypto_alg *cc_alg =
container_of(tfm->__crt_alg, struct cc_crypto_alg,
skcipher_alg.base);
@@ -339,7 +403,7 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
return -EINVAL;
}
- ctx_p->hw_key = false;
+ ctx_p->key_type = CC_UNPROTECTED_KEY;
/*
* Verify DES weak keys
@@ -347,6 +411,7 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
* HW does the expansion on its own.
*/
if (ctx_p->flow_mode == S_DIN_to_DES) {
+ u32 tmp[DES3_EDE_EXPKEY_WORDS];
if (keylen == DES3_EDE_KEY_SIZE &&
__des3_ede_setkey(tmp, &tfm->crt_flags, key,
DES3_EDE_KEY_SIZE)) {
@@ -399,7 +464,77 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
return 0;
}
-static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
+static int cc_out_setup_mode(struct cc_cipher_ctx *ctx_p)
+{
+ switch (ctx_p->flow_mode) {
+ case S_DIN_to_AES:
+ return S_AES_to_DOUT;
+ case S_DIN_to_DES:
+ return S_DES_to_DOUT;
+ case S_DIN_to_SM4:
+ return S_SM4_to_DOUT;
+ default:
+ return ctx_p->flow_mode;
+ }
+}
+
+static void cc_setup_readiv_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ unsigned int ivsize, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ int cipher_mode = ctx_p->cipher_mode;
+ int flow_mode = cc_out_setup_mode(ctx_p);
+ int direction = req_ctx->gen_ctx.op_type;
+ dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
+
+ if (ctx_p->key_type == CC_POLICY_PROTECTED_KEY)
+ return;
+
+ switch (cipher_mode) {
+ case DRV_CIPHER_ECB:
+ break;
+ case DRV_CIPHER_CBC:
+ case DRV_CIPHER_CBC_CTS:
+ case DRV_CIPHER_CTR:
+ case DRV_CIPHER_OFB:
+ /* Read next IV */
+ hw_desc_init(&desc[*seq_size]);
+ set_dout_dlli(&desc[*seq_size], iv_dma_addr, ivsize, NS_BIT, 1);
+ set_cipher_config0(&desc[*seq_size], direction);
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ if (cipher_mode == DRV_CIPHER_CTR ||
+ cipher_mode == DRV_CIPHER_OFB) {
+ set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE1);
+ } else {
+ set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE0);
+ }
+ set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
+ (*seq_size)++;
+ break;
+ case DRV_CIPHER_XTS:
+ case DRV_CIPHER_ESSIV:
+ case DRV_CIPHER_BITLOCKER:
+ /* IV */
+ hw_desc_init(&desc[*seq_size]);
+ set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE1);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_dout_dlli(&desc[*seq_size], iv_dma_addr, CC_AES_BLOCK_SIZE,
+ NS_BIT, 1);
+ set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
+ (*seq_size)++;
+ break;
+ default:
+ dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
+ }
+}
+
+static void cc_setup_state_desc(struct crypto_tfm *tfm,
struct cipher_req_ctx *req_ctx,
unsigned int ivsize, unsigned int nbytes,
struct cc_hw_desc desc[],
@@ -423,11 +558,13 @@ static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
du_size = cc_alg->data_unit;
switch (cipher_mode) {
+ case DRV_CIPHER_ECB:
+ break;
case DRV_CIPHER_CBC:
case DRV_CIPHER_CBC_CTS:
case DRV_CIPHER_CTR:
case DRV_CIPHER_OFB:
- /* Load cipher state */
+ /* Load IV */
hw_desc_init(&desc[*seq_size]);
set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr, ivsize,
NS_BIT);
@@ -441,57 +578,15 @@ static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0);
}
(*seq_size)++;
- /*FALLTHROUGH*/
- case DRV_CIPHER_ECB:
- /* Load key */
- hw_desc_init(&desc[*seq_size]);
- set_cipher_mode(&desc[*seq_size], cipher_mode);
- set_cipher_config0(&desc[*seq_size], direction);
- if (flow_mode == S_DIN_to_AES) {
- if (cc_is_hw_key(tfm)) {
- set_hw_crypto_key(&desc[*seq_size],
- ctx_p->hw.key1_slot);
- } else {
- set_din_type(&desc[*seq_size], DMA_DLLI,
- key_dma_addr, ((key_len == 24) ?
- AES_MAX_KEY_SIZE :
- key_len), NS_BIT);
- }
- set_key_size_aes(&desc[*seq_size], key_len);
- } else {
- /*des*/
- set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
- key_len, NS_BIT);
- set_key_size_des(&desc[*seq_size], key_len);
- }
- set_flow_mode(&desc[*seq_size], flow_mode);
- set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
- (*seq_size)++;
break;
case DRV_CIPHER_XTS:
case DRV_CIPHER_ESSIV:
case DRV_CIPHER_BITLOCKER:
- /* Load AES key */
- hw_desc_init(&desc[*seq_size]);
- set_cipher_mode(&desc[*seq_size], cipher_mode);
- set_cipher_config0(&desc[*seq_size], direction);
- if (cc_is_hw_key(tfm)) {
- set_hw_crypto_key(&desc[*seq_size],
- ctx_p->hw.key1_slot);
- } else {
- set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
- (key_len / 2), NS_BIT);
- }
- set_key_size_aes(&desc[*seq_size], (key_len / 2));
- set_flow_mode(&desc[*seq_size], flow_mode);
- set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
- (*seq_size)++;
-
/* load XEX key */
hw_desc_init(&desc[*seq_size]);
set_cipher_mode(&desc[*seq_size], cipher_mode);
set_cipher_config0(&desc[*seq_size], direction);
- if (cc_is_hw_key(tfm)) {
+ if (cc_key_type(tfm) == CC_HW_PROTECTED_KEY) {
set_hw_crypto_key(&desc[*seq_size],
ctx_p->hw.key2_slot);
} else {
@@ -505,7 +600,7 @@ static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
set_setup_mode(&desc[*seq_size], SETUP_LOAD_XEX_KEY);
(*seq_size)++;
- /* Set state */
+ /* Load IV */
hw_desc_init(&desc[*seq_size]);
set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
set_cipher_mode(&desc[*seq_size], cipher_mode);
@@ -521,48 +616,113 @@ static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
}
}
-static void cc_setup_cipher_data(struct crypto_tfm *tfm,
- struct cipher_req_ctx *req_ctx,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes,
- void *areq, struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static int cc_out_flow_mode(struct cc_cipher_ctx *ctx_p)
{
- struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
- struct device *dev = drvdata_to_dev(ctx_p->drvdata);
- unsigned int flow_mode = ctx_p->flow_mode;
-
switch (ctx_p->flow_mode) {
case S_DIN_to_AES:
- flow_mode = DIN_AES_DOUT;
- break;
+ return DIN_AES_DOUT;
case S_DIN_to_DES:
- flow_mode = DIN_DES_DOUT;
- break;
+ return DIN_DES_DOUT;
case S_DIN_to_SM4:
- flow_mode = DIN_SM4_DOUT;
- break;
+ return DIN_SM4_DOUT;
default:
- dev_err(dev, "invalid flow mode, flow_mode = %d\n", flow_mode);
- return;
+ return ctx_p->flow_mode;
}
- /* Process */
- if (req_ctx->dma_buf_type == CC_DMA_BUF_DLLI) {
- dev_dbg(dev, " data params addr %pad length 0x%X\n",
- &sg_dma_address(src), nbytes);
- dev_dbg(dev, " data params addr %pad length 0x%X\n",
- &sg_dma_address(dst), nbytes);
+}
+
+static void cc_setup_key_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ unsigned int nbytes, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ int cipher_mode = ctx_p->cipher_mode;
+ int flow_mode = ctx_p->flow_mode;
+ int direction = req_ctx->gen_ctx.op_type;
+ dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
+ unsigned int key_len = ctx_p->keylen;
+ unsigned int din_size;
+
+ switch (cipher_mode) {
+ case DRV_CIPHER_CBC:
+ case DRV_CIPHER_CBC_CTS:
+ case DRV_CIPHER_CTR:
+ case DRV_CIPHER_OFB:
+ case DRV_CIPHER_ECB:
+ /* Load key */
hw_desc_init(&desc[*seq_size]);
- set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src),
- nbytes, NS_BIT);
- set_dout_dlli(&desc[*seq_size], sg_dma_address(dst),
- nbytes, NS_BIT, (!areq ? 0 : 1));
- if (areq)
- set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+ if (cc_key_type(tfm) == CC_POLICY_PROTECTED_KEY) {
+ /* We use the AES key size coding for all CPP algs */
+ set_key_size_aes(&desc[*seq_size], key_len);
+ set_cpp_crypto_key(&desc[*seq_size], ctx_p->cpp.slot);
+ flow_mode = cc_out_flow_mode(ctx_p);
+ } else {
+ if (flow_mode == S_DIN_to_AES) {
+ if (cc_key_type(tfm) == CC_HW_PROTECTED_KEY) {
+ set_hw_crypto_key(&desc[*seq_size],
+ ctx_p->hw.key1_slot);
+ } else {
+ /* CC_POLICY_UNPROTECTED_KEY
+ * Invalid keys are filtered out in
+ * sethkey()
+ */
+ din_size = (key_len == 24) ?
+ AES_MAX_KEY_SIZE : key_len;
+
+ set_din_type(&desc[*seq_size], DMA_DLLI,
+ key_dma_addr, din_size,
+ NS_BIT);
+ }
+ set_key_size_aes(&desc[*seq_size], key_len);
+ } else {
+ /*des*/
+ set_din_type(&desc[*seq_size], DMA_DLLI,
+ key_dma_addr, key_len, NS_BIT);
+ set_key_size_des(&desc[*seq_size], key_len);
+ }
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
+ }
set_flow_mode(&desc[*seq_size], flow_mode);
(*seq_size)++;
- } else {
+ break;
+ case DRV_CIPHER_XTS:
+ case DRV_CIPHER_ESSIV:
+ case DRV_CIPHER_BITLOCKER:
+ /* Load AES key */
+ hw_desc_init(&desc[*seq_size]);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+ if (cc_key_type(tfm) == CC_HW_PROTECTED_KEY) {
+ set_hw_crypto_key(&desc[*seq_size],
+ ctx_p->hw.key1_slot);
+ } else {
+ set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
+ (key_len / 2), NS_BIT);
+ }
+ set_key_size_aes(&desc[*seq_size], (key_len / 2));
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
+ (*seq_size)++;
+ break;
+ default:
+ dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
+ }
+}
+
+static void cc_setup_mlli_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes, void *areq,
+ struct cc_hw_desc desc[], unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
/* bypass */
dev_dbg(dev, " bypass params addr %pad length 0x%X addr 0x%08X\n",
&req_ctx->mlli_params.mlli_dma_addr,
@@ -577,7 +737,38 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
req_ctx->mlli_params.mlli_len);
set_flow_mode(&desc[*seq_size], BYPASS);
(*seq_size)++;
+ }
+}
+
+static void cc_setup_flow_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ unsigned int flow_mode = cc_out_flow_mode(ctx_p);
+ bool last_desc = (ctx_p->key_type == CC_POLICY_PROTECTED_KEY ||
+ ctx_p->cipher_mode == DRV_CIPHER_ECB);
+ /* Process */
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_DLLI) {
+ dev_dbg(dev, " data params addr %pad length 0x%X\n",
+ &sg_dma_address(src), nbytes);
+ dev_dbg(dev, " data params addr %pad length 0x%X\n",
+ &sg_dma_address(dst), nbytes);
+ hw_desc_init(&desc[*seq_size]);
+ set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src),
+ nbytes, NS_BIT);
+ set_dout_dlli(&desc[*seq_size], sg_dma_address(dst),
+ nbytes, NS_BIT, (!last_desc ? 0 : 1));
+ if (last_desc)
+ set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
+
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ (*seq_size)++;
+ } else {
hw_desc_init(&desc[*seq_size]);
set_din_type(&desc[*seq_size], DMA_MLLI,
ctx_p->drvdata->mlli_sram_addr,
@@ -589,7 +780,7 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
set_dout_mlli(&desc[*seq_size],
ctx_p->drvdata->mlli_sram_addr,
req_ctx->in_mlli_nents, NS_BIT,
- (!areq ? 0 : 1));
+ (!last_desc ? 0 : 1));
} else {
dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
(unsigned int)ctx_p->drvdata->mlli_sram_addr,
@@ -600,9 +791,9 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
(LLI_ENTRY_BYTE_SIZE *
req_ctx->in_mlli_nents)),
req_ctx->out_mlli_nents, NS_BIT,
- (!areq ? 0 : 1));
+ (!last_desc ? 0 : 1));
}
- if (areq)
+ if (last_desc)
set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
set_flow_mode(&desc[*seq_size], flow_mode);
@@ -610,38 +801,6 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
}
}
-/*
- * Update a CTR-AES 128 bit counter
- */
-static void cc_update_ctr(u8 *ctr, unsigned int increment)
-{
- if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
- IS_ALIGNED((unsigned long)ctr, 8)) {
-
- __be64 *high_be = (__be64 *)ctr;
- __be64 *low_be = high_be + 1;
- u64 orig_low = __be64_to_cpu(*low_be);
- u64 new_low = orig_low + (u64)increment;
-
- *low_be = __cpu_to_be64(new_low);
-
- if (new_low < orig_low)
- *high_be = __cpu_to_be64(__be64_to_cpu(*high_be) + 1);
- } else {
- u8 *pos = (ctr + AES_BLOCK_SIZE);
- u8 val;
- unsigned int size;
-
- for (; increment; increment--)
- for (size = AES_BLOCK_SIZE; size; size--) {
- val = *--pos + 1;
- *pos = val;
- if (val)
- break;
- }
- }
-}
-
static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
{
struct skcipher_request *req = (struct skcipher_request *)cc_req;
@@ -649,44 +808,15 @@ static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
struct scatterlist *src = req->src;
struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
- struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
- unsigned int len;
- cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
-
- switch (ctx_p->cipher_mode) {
- case DRV_CIPHER_CBC:
- /*
- * The crypto API expects us to set the req->iv to the last
- * ciphertext block. For encrypt, simply copy from the result.
- * For decrypt, we must copy from a saved buffer since this
- * could be an in-place decryption operation and the src is
- * lost by this point.
- */
- if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
- memcpy(req->iv, req_ctx->backup_info, ivsize);
- kzfree(req_ctx->backup_info);
- } else if (!err) {
- len = req->cryptlen - ivsize;
- scatterwalk_map_and_copy(req->iv, req->dst, len,
- ivsize, 0);
- }
- break;
-
- case DRV_CIPHER_CTR:
- /* Compute the counter of the last block */
- len = ALIGN(req->cryptlen, AES_BLOCK_SIZE) / AES_BLOCK_SIZE;
- cc_update_ctr((u8 *)req->iv, len);
- break;
-
- default:
- break;
+ if (err != -EINPROGRESS) {
+ /* Not a BACKLOG notification */
+ cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
+ memcpy(req->iv, req_ctx->iv, ivsize);
+ kzfree(req_ctx->iv);
}
- kzfree(req_ctx->iv);
-
skcipher_request_complete(req, err);
}
@@ -741,6 +871,13 @@ static int cc_cipher_process(struct skcipher_request *req,
cc_req.user_cb = (void *)cc_cipher_complete;
cc_req.user_arg = (void *)req;
+ /* Setup CPP operation details */
+ if (ctx_p->key_type == CC_POLICY_PROTECTED_KEY) {
+ cc_req.cpp.is_cpp = true;
+ cc_req.cpp.alg = ctx_p->cpp.alg;
+ cc_req.cpp.slot = ctx_p->cpp.slot;
+ }
+
/* Setup request context */
req_ctx->gen_ctx.op_type = direction;
@@ -755,11 +892,16 @@ static int cc_cipher_process(struct skcipher_request *req,
/* STAT_PHASE_2: Create sequence */
- /* Setup processing */
- cc_setup_cipher_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
+ /* Setup IV and XEX key used */
+ cc_setup_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
+ /* Setup MLLI line, if needed */
+ cc_setup_mlli_desc(tfm, req_ctx, dst, src, nbytes, req, desc, &seq_len);
+ /* Setup key */
+ cc_setup_key_desc(tfm, req_ctx, nbytes, desc, &seq_len);
/* Data processing */
- cc_setup_cipher_data(tfm, req_ctx, dst, src, nbytes, req, desc,
- &seq_len);
+ cc_setup_flow_desc(tfm, req_ctx, dst, src, nbytes, desc, &seq_len);
+ /* Read next IV */
+ cc_setup_readiv_desc(tfm, req_ctx, ivsize, desc, &seq_len);
/* STAT_PHASE_3: Lock HW and push sequence */
@@ -774,7 +916,6 @@ static int cc_cipher_process(struct skcipher_request *req,
exit_process:
if (rc != -EINPROGRESS && rc != -EBUSY) {
- kzfree(req_ctx->backup_info);
kzfree(req_ctx->iv);
}
@@ -792,31 +933,10 @@ static int cc_cipher_encrypt(struct skcipher_request *req)
static int cc_cipher_decrypt(struct skcipher_request *req)
{
- struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
- struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
- unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
- gfp_t flags = cc_gfp_flags(&req->base);
- unsigned int len;
memset(req_ctx, 0, sizeof(*req_ctx));
- if ((ctx_p->cipher_mode == DRV_CIPHER_CBC) &&
- (req->cryptlen >= ivsize)) {
-
- /* Allocate and save the last IV sized bytes of the source,
- * which will be lost in case of in-place decryption.
- */
- req_ctx->backup_info = kzalloc(ivsize, flags);
- if (!req_ctx->backup_info)
- return -ENOMEM;
-
- len = req->cryptlen - ivsize;
- scatterwalk_map_and_copy(req_ctx->backup_info, req->src, len,
- ivsize, 0);
- }
-
return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
}
@@ -838,6 +958,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_630,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "xts512(paes)",
@@ -856,6 +977,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.data_unit = 512,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "xts4096(paes)",
@@ -874,6 +996,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.data_unit = 4096,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "essiv(paes)",
@@ -891,6 +1014,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "essiv512(paes)",
@@ -909,6 +1033,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.data_unit = 512,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "essiv4096(paes)",
@@ -927,6 +1052,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.data_unit = 4096,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "bitlocker(paes)",
@@ -944,6 +1070,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "bitlocker512(paes)",
@@ -962,6 +1089,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.data_unit = 512,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "bitlocker4096(paes)",
@@ -980,6 +1108,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.data_unit = 4096,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "ecb(paes)",
@@ -997,6 +1126,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "cbc(paes)",
@@ -1014,6 +1144,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "ofb(paes)",
@@ -1031,6 +1162,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "cts(cbc(paes))",
@@ -1048,6 +1180,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "ctr(paes)",
@@ -1065,6 +1198,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "xts(aes)",
@@ -1429,6 +1563,42 @@ static const struct cc_alg_template skcipher_algs[] = {
.min_hw_rev = CC_HW_REV_713,
.std_body = CC_STD_OSCCA,
},
+ {
+ .name = "cbc(psm4)",
+ .driver_name = "cbc-psm4-ccree",
+ .blocksize = SM4_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_SM4,
+ .min_hw_rev = CC_HW_REV_713,
+ .std_body = CC_STD_OSCCA,
+ .sec_func = true,
+ },
+ {
+ .name = "ctr(psm4)",
+ .driver_name = "ctr-psm4-ccree",
+ .blocksize = SM4_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CTR,
+ .flow_mode = S_DIN_to_SM4,
+ .min_hw_rev = CC_HW_REV_713,
+ .std_body = CC_STD_OSCCA,
+ .sec_func = true,
+ },
};
static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl,
@@ -1504,7 +1674,8 @@ int cc_cipher_alloc(struct cc_drvdata *drvdata)
ARRAY_SIZE(skcipher_algs));
for (alg = 0; alg < ARRAY_SIZE(skcipher_algs); alg++) {
if ((skcipher_algs[alg].min_hw_rev > drvdata->hw_rev) ||
- !(drvdata->std_bodies & skcipher_algs[alg].std_body))
+ !(drvdata->std_bodies & skcipher_algs[alg].std_body) ||
+ (drvdata->sec_disabled && skcipher_algs[alg].sec_func))
continue;
dev_dbg(dev, "creating %s\n", skcipher_algs[alg].driver_name);
diff --git a/drivers/crypto/ccree/cc_cipher.h b/drivers/crypto/ccree/cc_cipher.h
index 4dbc0a1e6d5c..da3a38707fae 100644
--- a/drivers/crypto/ccree/cc_cipher.h
+++ b/drivers/crypto/ccree/cc_cipher.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_cipher.h
* ARM CryptoCell Cipher Crypto API
@@ -20,7 +20,6 @@ struct cipher_req_ctx {
u32 in_mlli_nents;
u32 out_nents;
u32 out_mlli_nents;
- u8 *backup_info; /*store iv for generated IV flow*/
u8 *iv;
struct mlli_params mlli_params;
};
diff --git a/drivers/crypto/ccree/cc_crypto_ctx.h b/drivers/crypto/ccree/cc_crypto_ctx.h
index c8dac273c563..ccf960a0d989 100644
--- a/drivers/crypto/ccree/cc_crypto_ctx.h
+++ b/drivers/crypto/ccree/cc_crypto_ctx.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef _CC_CRYPTO_CTX_H_
#define _CC_CRYPTO_CTX_H_
@@ -55,6 +55,14 @@
#define CC_DRV_ALG_MAX_BLOCK_SIZE CC_HASH_BLOCK_SIZE_MAX
+#define CC_CPP_NUM_SLOTS 8
+#define CC_CPP_NUM_ALGS 2
+
+enum cc_cpp_alg {
+ CC_CPP_SM4 = 1,
+ CC_CPP_AES = 0
+};
+
enum drv_engine_type {
DRV_ENGINE_NULL = 0,
DRV_ENGINE_AES = 1,
diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c
index 5fa05a7bcf36..566999738698 100644
--- a/drivers/crypto/ccree/cc_debugfs.c
+++ b/drivers/crypto/ccree/cc_debugfs.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited or its affiliates. */
#include <linux/kernel.h>
#include <linux/debugfs.h>
@@ -25,9 +25,24 @@ struct cc_debugfs_ctx {
*/
static struct dentry *cc_debugfs_dir;
-static struct debugfs_reg32 debug_regs[] = {
+static struct debugfs_reg32 ver_sig_regs[] = {
{ .name = "SIGNATURE" }, /* Must be 0th */
{ .name = "VERSION" }, /* Must be 1st */
+};
+
+static struct debugfs_reg32 pid_cid_regs[] = {
+ CC_DEBUG_REG(PERIPHERAL_ID_0),
+ CC_DEBUG_REG(PERIPHERAL_ID_1),
+ CC_DEBUG_REG(PERIPHERAL_ID_2),
+ CC_DEBUG_REG(PERIPHERAL_ID_3),
+ CC_DEBUG_REG(PERIPHERAL_ID_4),
+ CC_DEBUG_REG(COMPONENT_ID_0),
+ CC_DEBUG_REG(COMPONENT_ID_1),
+ CC_DEBUG_REG(COMPONENT_ID_2),
+ CC_DEBUG_REG(COMPONENT_ID_3),
+};
+
+static struct debugfs_reg32 debug_regs[] = {
CC_DEBUG_REG(HOST_IRR),
CC_DEBUG_REG(HOST_POWER_DOWN_EN),
CC_DEBUG_REG(AXIM_MON_ERR),
@@ -53,10 +68,7 @@ int cc_debugfs_init(struct cc_drvdata *drvdata)
{
struct device *dev = drvdata_to_dev(drvdata);
struct cc_debugfs_ctx *ctx;
- struct debugfs_regset32 *regset;
-
- debug_regs[0].offset = drvdata->sig_offset;
- debug_regs[1].offset = drvdata->ver_offset;
+ struct debugfs_regset32 *regset, *verset;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -75,8 +87,26 @@ int cc_debugfs_init(struct cc_drvdata *drvdata)
debugfs_create_regset32("regs", 0400, ctx->dir, regset);
debugfs_create_bool("coherent", 0400, ctx->dir, &drvdata->coherent);
- drvdata->debugfs = ctx;
+ verset = devm_kzalloc(dev, sizeof(*verset), GFP_KERNEL);
+ /* Failing here is not important enough to fail the module load */
+ if (!verset)
+ goto out;
+
+ if (drvdata->hw_rev <= CC_HW_REV_712) {
+ ver_sig_regs[0].offset = drvdata->sig_offset;
+ ver_sig_regs[1].offset = drvdata->ver_offset;
+ verset->regs = ver_sig_regs;
+ verset->nregs = ARRAY_SIZE(ver_sig_regs);
+ } else {
+ verset->regs = pid_cid_regs;
+ verset->nregs = ARRAY_SIZE(pid_cid_regs);
+ }
+ verset->base = drvdata->cc_base;
+ debugfs_create_regset32("version", 0400, ctx->dir, verset);
+
+out:
+ drvdata->debugfs = ctx;
return 0;
}
diff --git a/drivers/crypto/ccree/cc_debugfs.h b/drivers/crypto/ccree/cc_debugfs.h
index 01cbd9a95659..664ff402e0e9 100644
--- a/drivers/crypto/ccree/cc_debugfs.h
+++ b/drivers/crypto/ccree/cc_debugfs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef __CC_DEBUGFS_H__
#define __CC_DEBUGFS_H__
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 3bcc6c76e090..86ac7b443355 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited or its affiliates. */
#include <linux/kernel.h>
#include <linux/module.h>
@@ -30,27 +30,47 @@
bool cc_dump_desc;
module_param_named(dump_desc, cc_dump_desc, bool, 0600);
MODULE_PARM_DESC(cc_dump_desc, "Dump descriptors to kernel log as debugging aid");
-
bool cc_dump_bytes;
module_param_named(dump_bytes, cc_dump_bytes, bool, 0600);
MODULE_PARM_DESC(cc_dump_bytes, "Dump buffers to kernel log as debugging aid");
+static bool cc_sec_disable;
+module_param_named(sec_disable, cc_sec_disable, bool, 0600);
+MODULE_PARM_DESC(cc_sec_disable, "Disable security functions");
+
struct cc_hw_data {
char *name;
enum cc_hw_rev rev;
u32 sig;
+ u32 cidr_0123;
+ u32 pidr_0124;
int std_bodies;
};
+#define CC_NUM_IDRS 4
+
+/* Note: PIDR3 holds CMOD/Rev so ignored for HW identification purposes */
+static const u32 pidr_0124_offsets[CC_NUM_IDRS] = {
+ CC_REG(PERIPHERAL_ID_0), CC_REG(PERIPHERAL_ID_1),
+ CC_REG(PERIPHERAL_ID_2), CC_REG(PERIPHERAL_ID_4)
+};
+
+static const u32 cidr_0123_offsets[CC_NUM_IDRS] = {
+ CC_REG(COMPONENT_ID_0), CC_REG(COMPONENT_ID_1),
+ CC_REG(COMPONENT_ID_2), CC_REG(COMPONENT_ID_3)
+};
+
/* Hardware revisions defs. */
/* The 703 is a OSCCA only variant of the 713 */
static const struct cc_hw_data cc703_hw = {
- .name = "703", .rev = CC_HW_REV_713, .std_bodies = CC_STD_OSCCA
+ .name = "703", .rev = CC_HW_REV_713, .cidr_0123 = 0xB105F00DU,
+ .pidr_0124 = 0x040BB0D0U, .std_bodies = CC_STD_OSCCA
};
static const struct cc_hw_data cc713_hw = {
- .name = "713", .rev = CC_HW_REV_713, .std_bodies = CC_STD_ALL
+ .name = "713", .rev = CC_HW_REV_713, .cidr_0123 = 0xB105F00DU,
+ .pidr_0124 = 0x040BB0D0U, .std_bodies = CC_STD_ALL
};
static const struct cc_hw_data cc712_hw = {
@@ -78,6 +98,20 @@ static const struct of_device_id arm_ccree_dev_of_match[] = {
};
MODULE_DEVICE_TABLE(of, arm_ccree_dev_of_match);
+static u32 cc_read_idr(struct cc_drvdata *drvdata, const u32 *idr_offsets)
+{
+ int i;
+ union {
+ u8 regs[CC_NUM_IDRS];
+ __le32 val;
+ } idr;
+
+ for (i = 0; i < CC_NUM_IDRS; ++i)
+ idr.regs[i] = cc_ioread(drvdata, idr_offsets[i]);
+
+ return le32_to_cpu(idr.val);
+}
+
void __dump_byte_array(const char *name, const u8 *buf, size_t len)
{
char prefix[64];
@@ -114,12 +148,12 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
drvdata->irq = irr;
/* Completion interrupt - most probable */
- if (irr & CC_COMP_IRQ_MASK) {
- /* Mask AXI completion interrupt - will be unmasked in
- * Deferred service handler
+ if (irr & drvdata->comp_mask) {
+ /* Mask all completion interrupts - will be unmasked in
+ * deferred service handler
*/
- cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | CC_COMP_IRQ_MASK);
- irr &= ~CC_COMP_IRQ_MASK;
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | drvdata->comp_mask);
+ irr &= ~drvdata->comp_mask;
complete_request(drvdata);
}
#ifdef CONFIG_CRYPTO_FIPS
@@ -159,11 +193,14 @@ int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe)
unsigned int val, cache_params;
struct device *dev = drvdata_to_dev(drvdata);
- /* Unmask all AXI interrupt sources AXI_CFG1 register */
- val = cc_ioread(drvdata, CC_REG(AXIM_CFG));
- cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~CC_AXI_IRQ_MASK);
- dev_dbg(dev, "AXIM_CFG=0x%08X\n",
- cc_ioread(drvdata, CC_REG(AXIM_CFG)));
+ /* Unmask all AXI interrupt sources AXI_CFG1 register */
+ /* AXI interrupt config are obsoleted startign at cc7x3 */
+ if (drvdata->hw_rev <= CC_HW_REV_712) {
+ val = cc_ioread(drvdata, CC_REG(AXIM_CFG));
+ cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~CC_AXI_IRQ_MASK);
+ dev_dbg(dev, "AXIM_CFG=0x%08X\n",
+ cc_ioread(drvdata, CC_REG(AXIM_CFG)));
+ }
/* Clear all pending interrupts */
val = cc_ioread(drvdata, CC_REG(HOST_IRR));
@@ -171,7 +208,7 @@ int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe)
cc_iowrite(drvdata, CC_REG(HOST_ICR), val);
/* Unmask relevant interrupt cause */
- val = CC_COMP_IRQ_MASK | CC_AXI_ERR_IRQ_MASK;
+ val = drvdata->comp_mask | CC_AXI_ERR_IRQ_MASK;
if (drvdata->hw_rev >= CC_HW_REV_712)
val |= CC_GPR0_IRQ_MASK;
@@ -201,7 +238,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
struct cc_drvdata *new_drvdata;
struct device *dev = &plat_dev->dev;
struct device_node *np = dev->of_node;
- u32 signature_val;
+ u32 val, hw_rev_pidr, sig_cidr;
u64 dma_mask;
const struct cc_hw_data *hw_rev;
const struct of_device_id *dev_id;
@@ -231,6 +268,8 @@ static int init_cc_resources(struct platform_device *plat_dev)
new_drvdata->ver_offset = CC_REG(HOST_VERSION_630);
}
+ new_drvdata->comp_mask = CC_COMP_IRQ_MASK;
+
platform_set_drvdata(plat_dev, new_drvdata);
new_drvdata->plat_dev = plat_dev;
@@ -311,22 +350,57 @@ static int init_cc_resources(struct platform_device *plat_dev)
return rc;
}
+ new_drvdata->sec_disabled = cc_sec_disable;
+
if (hw_rev->rev <= CC_HW_REV_712) {
/* Verify correct mapping */
- signature_val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
- if (signature_val != hw_rev->sig) {
+ val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
+ if (val != hw_rev->sig) {
dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
- signature_val, hw_rev->sig);
+ val, hw_rev->sig);
+ rc = -EINVAL;
+ goto post_clk_err;
+ }
+ sig_cidr = val;
+ hw_rev_pidr = cc_ioread(new_drvdata, new_drvdata->ver_offset);
+ } else {
+ /* Verify correct mapping */
+ val = cc_read_idr(new_drvdata, pidr_0124_offsets);
+ if (val != hw_rev->pidr_0124) {
+ dev_err(dev, "Invalid CC PIDR: PIDR0124=0x%08X != expected=0x%08X\n",
+ val, hw_rev->pidr_0124);
+ rc = -EINVAL;
+ goto post_clk_err;
+ }
+ hw_rev_pidr = val;
+
+ val = cc_read_idr(new_drvdata, cidr_0123_offsets);
+ if (val != hw_rev->cidr_0123) {
+ dev_err(dev, "Invalid CC CIDR: CIDR0123=0x%08X != expected=0x%08X\n",
+ val, hw_rev->cidr_0123);
rc = -EINVAL;
goto post_clk_err;
}
- dev_dbg(dev, "CC SIGNATURE=0x%08X\n", signature_val);
+ sig_cidr = val;
+
+ /* Check security disable state */
+ val = cc_ioread(new_drvdata, CC_REG(SECURITY_DISABLED));
+ val &= CC_SECURITY_DISABLED_MASK;
+ new_drvdata->sec_disabled |= !!val;
+
+ if (!new_drvdata->sec_disabled) {
+ new_drvdata->comp_mask |= CC_CPP_SM4_ABORT_MASK;
+ if (new_drvdata->std_bodies & CC_STD_NIST)
+ new_drvdata->comp_mask |= CC_CPP_AES_ABORT_MASK;
+ }
}
+ if (new_drvdata->sec_disabled)
+ dev_info(dev, "Security Disabled mode is in effect. Security functions disabled.\n");
+
/* Display HW versions */
- dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n",
- hw_rev->name, cc_ioread(new_drvdata, new_drvdata->ver_offset),
- DRV_MODULE_VERSION);
+ dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X/0x%8X, Driver version %s\n",
+ hw_rev->name, hw_rev_pidr, sig_cidr, DRV_MODULE_VERSION);
rc = init_cc_regs(new_drvdata, true);
if (rc) {
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index 33dbf3e6d15d..b76181335c08 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_driver.h
* ARM CryptoCell Linux Crypto Driver
@@ -65,10 +65,32 @@ enum cc_std_body {
#define CC_COMP_IRQ_MASK BIT(CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT)
+#define CC_SECURITY_DISABLED_MASK BIT(CC_SECURITY_DISABLED_VALUE_BIT_SHIFT)
+
#define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \
CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
CC_AXIM_MON_COMP_VALUE_BIT_SHIFT)
+#define CC_CPP_AES_ABORT_MASK ( \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SHIFT))
+
+#define CC_CPP_SM4_ABORT_MASK ( \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SHIFT))
+
/* Register name mangling macro */
#define CC_REG(reg_name) CC_ ## reg_name ## _REG_OFFSET
@@ -81,7 +103,6 @@ enum cc_std_body {
#define MAX_REQUEST_QUEUE_SIZE 4096
#define MAX_MLLI_BUFF_SIZE 2080
-#define MAX_ICV_NENTS_SUPPORTED 2
/* Definitions for HW descriptors DIN/DOUT fields */
#define NS_BIT 1
@@ -90,6 +111,12 @@ enum cc_std_body {
* field in the HW descriptor. The DMA engine +8 that value.
*/
+struct cc_cpp_req {
+ bool is_cpp;
+ enum cc_cpp_alg alg;
+ u8 slot;
+};
+
#define CC_MAX_IVGEN_DMA_ADDRESSES 3
struct cc_crypto_req {
void (*user_cb)(struct device *dev, void *req, int err);
@@ -104,6 +131,7 @@ struct cc_crypto_req {
/* The generated IV size required, 8/16 B allowed. */
unsigned int ivgen_size;
struct completion seq_compl; /* request completion */
+ struct cc_cpp_req cpp;
};
/**
@@ -136,6 +164,8 @@ struct cc_drvdata {
u32 sig_offset;
u32 ver_offset;
int std_bodies;
+ bool sec_disabled;
+ u32 comp_mask;
};
struct cc_crypto_alg {
@@ -162,12 +192,14 @@ struct cc_alg_template {
int auth_mode;
u32 min_hw_rev;
enum cc_std_body std_body;
+ bool sec_func;
unsigned int data_unit;
struct cc_drvdata *drvdata;
};
struct async_gen_req_ctx {
dma_addr_t iv_dma_addr;
+ u8 *iv;
enum drv_crypto_direction op_type;
};
diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c
index b4d0a6d983e0..5ad3ffb7acaa 100644
--- a/drivers/crypto/ccree/cc_fips.c
+++ b/drivers/crypto/ccree/cc_fips.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <linux/kernel.h>
#include <linux/fips.h>
@@ -49,8 +49,6 @@ void cc_fips_fini(struct cc_drvdata *drvdata)
/* Kill tasklet */
tasklet_kill(&fips_h->tasklet);
-
- kfree(fips_h);
drvdata->fips_handle = NULL;
}
@@ -72,20 +70,28 @@ static inline void tee_fips_error(struct device *dev)
dev_err(dev, "TEE reported error!\n");
}
+/*
+ * This function check if cryptocell tee fips error occurred
+ * and in such case triggers system error
+ */
+void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata)
+{
+ struct device *dev = drvdata_to_dev(p_drvdata);
+
+ if (!cc_get_tee_fips_status(p_drvdata))
+ tee_fips_error(dev);
+}
+
/* Deferred service handler, run as interrupt-fired tasklet */
static void fips_dsr(unsigned long devarg)
{
struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
- struct device *dev = drvdata_to_dev(drvdata);
- u32 irq, state, val;
+ u32 irq, val;
irq = (drvdata->irq & (CC_GPR0_IRQ_MASK));
if (irq) {
- state = cc_ioread(drvdata, CC_REG(GPR_HOST));
-
- if (state != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK))
- tee_fips_error(dev);
+ cc_tee_handle_fips_error(drvdata);
}
/* after verifing that there is nothing to do,
@@ -104,7 +110,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata)
if (p_drvdata->hw_rev < CC_HW_REV_712)
return 0;
- fips_h = kzalloc(sizeof(*fips_h), GFP_KERNEL);
+ fips_h = devm_kzalloc(dev, sizeof(*fips_h), GFP_KERNEL);
if (!fips_h)
return -ENOMEM;
@@ -113,8 +119,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata)
dev_dbg(dev, "Initializing fips tasklet\n");
tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata);
- if (!cc_get_tee_fips_status(p_drvdata))
- tee_fips_error(dev);
+ cc_tee_handle_fips_error(p_drvdata);
return 0;
}
diff --git a/drivers/crypto/ccree/cc_fips.h b/drivers/crypto/ccree/cc_fips.h
index 645e096a7a82..fc33eeb4d566 100644
--- a/drivers/crypto/ccree/cc_fips.h
+++ b/drivers/crypto/ccree/cc_fips.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef __CC_FIPS_H__
#define __CC_FIPS_H__
@@ -18,6 +18,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata);
void cc_fips_fini(struct cc_drvdata *drvdata);
void fips_handler(struct cc_drvdata *drvdata);
void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool ok);
+void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata);
#else /* CONFIG_CRYPTO_FIPS */
@@ -30,6 +31,7 @@ static inline void cc_fips_fini(struct cc_drvdata *drvdata) {}
static inline void cc_set_ree_fips_status(struct cc_drvdata *drvdata,
bool ok) {}
static inline void fips_handler(struct cc_drvdata *drvdata) {}
+static inline void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata) {}
#endif /* CONFIG_CRYPTO_FIPS */
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index 2c4ddc8fb76b..a6abe4e3bb0e 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <linux/kernel.h>
#include <linux/module.h>
@@ -69,6 +69,7 @@ struct cc_hash_alg {
struct hash_key_req_ctx {
u32 keylen;
dma_addr_t key_dma_addr;
+ u8 *key;
};
/* hash per-session context */
@@ -280,9 +281,13 @@ static void cc_update_complete(struct device *dev, void *cc_req, int err)
dev_dbg(dev, "req=%pK\n", req);
- cc_unmap_hash_request(dev, state, req->src, false);
- cc_unmap_req(dev, state, ctx);
- req->base.complete(&req->base, err);
+ if (err != -EINPROGRESS) {
+ /* Not a BACKLOG notification */
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_req(dev, state, ctx);
+ }
+
+ ahash_request_complete(req, err);
}
static void cc_digest_complete(struct device *dev, void *cc_req, int err)
@@ -295,10 +300,14 @@ static void cc_digest_complete(struct device *dev, void *cc_req, int err)
dev_dbg(dev, "req=%pK\n", req);
- cc_unmap_hash_request(dev, state, req->src, false);
- cc_unmap_result(dev, state, digestsize, req->result);
- cc_unmap_req(dev, state, ctx);
- req->base.complete(&req->base, err);
+ if (err != -EINPROGRESS) {
+ /* Not a BACKLOG notification */
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ }
+
+ ahash_request_complete(req, err);
}
static void cc_hash_complete(struct device *dev, void *cc_req, int err)
@@ -311,10 +320,14 @@ static void cc_hash_complete(struct device *dev, void *cc_req, int err)
dev_dbg(dev, "req=%pK\n", req);
- cc_unmap_hash_request(dev, state, req->src, false);
- cc_unmap_result(dev, state, digestsize, req->result);
- cc_unmap_req(dev, state, ctx);
- req->base.complete(&req->base, err);
+ if (err != -EINPROGRESS) {
+ /* Not a BACKLOG notification */
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ }
+
+ ahash_request_complete(req, err);
}
static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
@@ -730,13 +743,20 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
ctx->key_params.keylen = keylen;
ctx->key_params.key_dma_addr = 0;
ctx->is_hmac = true;
+ ctx->key_params.key = NULL;
if (keylen) {
+ ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
+ if (!ctx->key_params.key)
+ return -ENOMEM;
+
ctx->key_params.key_dma_addr =
- dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
+ dma_map_single(dev, (void *)ctx->key_params.key, keylen,
+ DMA_TO_DEVICE);
if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
- key, keylen);
+ ctx->key_params.key, keylen);
+ kzfree(ctx->key_params.key);
return -ENOMEM;
}
dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
@@ -887,6 +907,9 @@ out:
dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
}
+
+ kzfree(ctx->key_params.key);
+
return rc;
}
@@ -913,11 +936,16 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash,
ctx->key_params.keylen = keylen;
+ ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
+ if (!ctx->key_params.key)
+ return -ENOMEM;
+
ctx->key_params.key_dma_addr =
- dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
+ dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE);
if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen);
+ kzfree(ctx->key_params.key);
return -ENOMEM;
}
dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
@@ -969,6 +997,8 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash,
dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
+ kzfree(ctx->key_params.key);
+
return rc;
}
@@ -1621,7 +1651,7 @@ static struct cc_hash_template driver_hash[] = {
.setkey = cc_hash_setkey,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
- .statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE),
+ .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE),
},
},
.hash_mode = DRV_HASH_SHA224,
@@ -1648,7 +1678,7 @@ static struct cc_hash_template driver_hash[] = {
.setkey = cc_hash_setkey,
.halg = {
.digestsize = SHA384_DIGEST_SIZE,
- .statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE),
+ .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
},
},
.hash_mode = DRV_HASH_SHA384,
diff --git a/drivers/crypto/ccree/cc_hash.h b/drivers/crypto/ccree/cc_hash.h
index 2e5bf8b0bbb6..0d6dc61484d7 100644
--- a/drivers/crypto/ccree/cc_hash.h
+++ b/drivers/crypto/ccree/cc_hash.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_hash.h
* ARM CryptoCell Hash Crypto API
diff --git a/drivers/crypto/ccree/cc_host_regs.h b/drivers/crypto/ccree/cc_host_regs.h
index 616b2e1c41ba..d0764147573f 100644
--- a/drivers/crypto/ccree/cc_host_regs.h
+++ b/drivers/crypto/ccree/cc_host_regs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited or its affiliates. */
#ifndef __CC_HOST_H__
#define __CC_HOST_H__
@@ -7,33 +7,102 @@
// --------------------------------------
// BLOCK: HOST_P
// --------------------------------------
+
+
+/* IRR */
#define CC_HOST_IRR_REG_OFFSET 0xA00UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SHIFT 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SIZE 0x1UL
#define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SHIFT 0x2UL
#define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SHIFT 0x3UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SHIFT 0x4UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SHIFT 0x5UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SHIFT 0x6UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SHIFT 0x7UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SIZE 0x1UL
#define CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT 0x8UL
#define CC_HOST_IRR_AXI_ERR_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SHIFT 0x9UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SHIFT 0xAUL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SIZE 0x1UL
#define CC_HOST_IRR_GPR0_BIT_SHIFT 0xBUL
#define CC_HOST_IRR_GPR0_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SHIFT 0xCUL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SHIFT 0xDUL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SHIFT 0xEUL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SHIFT 0xFUL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SHIFT 0x10UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SHIFT 0x11UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SHIFT 0x12UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SIZE 0x1UL
#define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SHIFT 0x13UL
#define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SHIFT 0x14UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SIZE 0x1UL
#define CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT 0x17UL
#define CC_HOST_IRR_AXIM_COMP_INT_BIT_SIZE 0x1UL
#define CC_HOST_SEP_SRAM_THRESHOLD_REG_OFFSET 0xA10UL
#define CC_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SHIFT 0x0UL
#define CC_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SIZE 0xCUL
-#define CC_HOST_IMR_REG_OFFSET 0xA04UL
-#define CC_HOST_IMR_NOT_USED_MASK_BIT_SHIFT 0x1UL
-#define CC_HOST_IMR_NOT_USED_MASK_BIT_SIZE 0x1UL
+
+/* IMR */
+#define CC_HOST_IMR_REG_OFFSET 0x0A04UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SHIFT 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SIZE 0x1UL
#define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SHIFT 0x2UL
#define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SHIFT 0x3UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SHIFT 0x4UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SHIFT 0x5UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SHIFT 0x6UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SHIFT 0x7UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SIZE 0x1UL
#define CC_HOST_IMR_AXI_ERR_MASK_BIT_SHIFT 0x8UL
#define CC_HOST_IMR_AXI_ERR_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SHIFT 0x9UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SHIFT 0xAUL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SIZE 0x1UL
#define CC_HOST_IMR_GPR0_BIT_SHIFT 0xBUL
#define CC_HOST_IMR_GPR0_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SHIFT 0xCUL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SHIFT 0xDUL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SHIFT 0xEUL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SHIFT 0xFUL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SHIFT 0x10UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SHIFT 0x11UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SHIFT 0x12UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SIZE 0x1UL
#define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SHIFT 0x13UL
#define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SHIFT 0x14UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SIZE 0x1UL
#define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SHIFT 0x17UL
#define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SIZE 0x1UL
+
+/* ICR */
#define CC_HOST_ICR_REG_OFFSET 0xA08UL
#define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SHIFT 0x2UL
#define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SIZE 0x1UL
@@ -45,6 +114,9 @@
#define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE 0x1UL
#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT 0x17UL
#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE 0x1UL
+#define CC_SECURITY_DISABLED_REG_OFFSET 0x0A1CUL
+#define CC_SECURITY_DISABLED_VALUE_BIT_SHIFT 0x0UL
+#define CC_SECURITY_DISABLED_VALUE_BIT_SIZE 0x1UL
#define CC_HOST_SIGNATURE_712_REG_OFFSET 0xA24UL
#define CC_HOST_SIGNATURE_630_REG_OFFSET 0xAC8UL
#define CC_HOST_SIGNATURE_VALUE_BIT_SHIFT 0x0UL
@@ -132,6 +204,49 @@
#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SHIFT 0x0UL
#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SIZE 0x1UL
// --------------------------------------
+// BLOCK: ID_REGISTERS
+// --------------------------------------
+#define CC_PERIPHERAL_ID_4_REG_OFFSET 0x0FD0UL
+#define CC_PERIPHERAL_ID_4_VALUE_BIT_SHIFT 0x0UL
+#define CC_PERIPHERAL_ID_4_VALUE_BIT_SIZE 0x4UL
+#define CC_PIDRESERVED0_REG_OFFSET 0x0FD4UL
+#define CC_PIDRESERVED1_REG_OFFSET 0x0FD8UL
+#define CC_PIDRESERVED2_REG_OFFSET 0x0FDCUL
+#define CC_PERIPHERAL_ID_0_REG_OFFSET 0x0FE0UL
+#define CC_PERIPHERAL_ID_0_VALUE_BIT_SHIFT 0x0UL
+#define CC_PERIPHERAL_ID_0_VALUE_BIT_SIZE 0x8UL
+#define CC_PERIPHERAL_ID_1_REG_OFFSET 0x0FE4UL
+#define CC_PERIPHERAL_ID_1_PART_1_BIT_SHIFT 0x0UL
+#define CC_PERIPHERAL_ID_1_PART_1_BIT_SIZE 0x4UL
+#define CC_PERIPHERAL_ID_1_DES_0_JEP106_BIT_SHIFT 0x4UL
+#define CC_PERIPHERAL_ID_1_DES_0_JEP106_BIT_SIZE 0x4UL
+#define CC_PERIPHERAL_ID_2_REG_OFFSET 0x0FE8UL
+#define CC_PERIPHERAL_ID_2_DES_1_JEP106_BIT_SHIFT 0x0UL
+#define CC_PERIPHERAL_ID_2_DES_1_JEP106_BIT_SIZE 0x3UL
+#define CC_PERIPHERAL_ID_2_JEDEC_BIT_SHIFT 0x3UL
+#define CC_PERIPHERAL_ID_2_JEDEC_BIT_SIZE 0x1UL
+#define CC_PERIPHERAL_ID_2_REVISION_BIT_SHIFT 0x4UL
+#define CC_PERIPHERAL_ID_2_REVISION_BIT_SIZE 0x4UL
+#define CC_PERIPHERAL_ID_3_REG_OFFSET 0x0FECUL
+#define CC_PERIPHERAL_ID_3_CMOD_BIT_SHIFT 0x0UL
+#define CC_PERIPHERAL_ID_3_CMOD_BIT_SIZE 0x4UL
+#define CC_PERIPHERAL_ID_3_REVAND_BIT_SHIFT 0x4UL
+#define CC_PERIPHERAL_ID_3_REVAND_BIT_SIZE 0x4UL
+#define CC_COMPONENT_ID_0_REG_OFFSET 0x0FF0UL
+#define CC_COMPONENT_ID_0_VALUE_BIT_SHIFT 0x0UL
+#define CC_COMPONENT_ID_0_VALUE_BIT_SIZE 0x8UL
+#define CC_COMPONENT_ID_1_REG_OFFSET 0x0FF4UL
+#define CC_COMPONENT_ID_1_PRMBL_1_BIT_SHIFT 0x0UL
+#define CC_COMPONENT_ID_1_PRMBL_1_BIT_SIZE 0x4UL
+#define CC_COMPONENT_ID_1_CLASS_BIT_SHIFT 0x4UL
+#define CC_COMPONENT_ID_1_CLASS_BIT_SIZE 0x4UL
+#define CC_COMPONENT_ID_2_REG_OFFSET 0x0FF8UL
+#define CC_COMPONENT_ID_2_VALUE_BIT_SHIFT 0x0UL
+#define CC_COMPONENT_ID_2_VALUE_BIT_SIZE 0x8UL
+#define CC_COMPONENT_ID_3_REG_OFFSET 0x0FFCUL
+#define CC_COMPONENT_ID_3_VALUE_BIT_SHIFT 0x0UL
+#define CC_COMPONENT_ID_3_VALUE_BIT_SIZE 0x8UL
+// --------------------------------------
// BLOCK: HOST_SRAM
// --------------------------------------
#define CC_SRAM_DATA_REG_OFFSET 0xF00UL
diff --git a/drivers/crypto/ccree/cc_hw_queue_defs.h b/drivers/crypto/ccree/cc_hw_queue_defs.h
index 7a9b90db7db7..9f4db9956e91 100644
--- a/drivers/crypto/ccree/cc_hw_queue_defs.h
+++ b/drivers/crypto/ccree/cc_hw_queue_defs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef __CC_HW_QUEUE_DEFS_H__
#define __CC_HW_QUEUE_DEFS_H__
@@ -28,11 +28,13 @@
GENMASK(CC_REG_HIGH(word, name), CC_REG_LOW(word, name))
#define WORD0_VALUE CC_GENMASK(0, VALUE)
+#define WORD0_CPP_CIPHER_MODE CC_GENMASK(0, CPP_CIPHER_MODE)
#define WORD1_DIN_CONST_VALUE CC_GENMASK(1, DIN_CONST_VALUE)
#define WORD1_DIN_DMA_MODE CC_GENMASK(1, DIN_DMA_MODE)
#define WORD1_DIN_SIZE CC_GENMASK(1, DIN_SIZE)
#define WORD1_NOT_LAST CC_GENMASK(1, NOT_LAST)
#define WORD1_NS_BIT CC_GENMASK(1, NS_BIT)
+#define WORD1_LOCK_QUEUE CC_GENMASK(1, LOCK_QUEUE)
#define WORD2_VALUE CC_GENMASK(2, VALUE)
#define WORD3_DOUT_DMA_MODE CC_GENMASK(3, DOUT_DMA_MODE)
#define WORD3_DOUT_LAST_IND CC_GENMASK(3, DOUT_LAST_IND)
@@ -176,6 +178,15 @@ enum cc_hw_crypto_key {
END_OF_KEYS = S32_MAX,
};
+#define CC_NUM_HW_KEY_SLOTS 4
+#define CC_FIRST_HW_KEY_SLOT 0
+#define CC_LAST_HW_KEY_SLOT (CC_FIRST_HW_KEY_SLOT + CC_NUM_HW_KEY_SLOTS - 1)
+
+#define CC_NUM_CPP_KEY_SLOTS 8
+#define CC_FIRST_CPP_KEY_SLOT 16
+#define CC_LAST_CPP_KEY_SLOT (CC_FIRST_CPP_KEY_SLOT + \
+ CC_NUM_CPP_KEY_SLOTS - 1)
+
enum cc_hw_aes_key_size {
AES_128_KEY = 0,
AES_192_KEY = 1,
@@ -189,6 +200,9 @@ enum cc_hash_cipher_pad {
HASH_CIPHER_DO_PADDING_RESERVE32 = S32_MAX,
};
+#define CC_CPP_DIN_ADDR 0xFF00FF00UL
+#define CC_CPP_DIN_SIZE 0xFF00FFUL
+
/*****************************/
/* Descriptor packing macros */
/*****************************/
@@ -249,6 +263,25 @@ static inline void set_din_no_dma(struct cc_hw_desc *pdesc, u32 addr, u32 size)
}
/*
+ * Setup the special CPP descriptor
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @alg: cipher used (AES / SM4)
+ * @mode: mode used (CTR or CBC)
+ * @slot: slot number
+ * @ksize: key size
+ */
+static inline void set_cpp_crypto_key(struct cc_hw_desc *pdesc, u8 slot)
+{
+ pdesc->word[0] |= CC_CPP_DIN_ADDR;
+
+ pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, CC_CPP_DIN_SIZE);
+ pdesc->word[1] |= FIELD_PREP(WORD1_LOCK_QUEUE, 1);
+
+ pdesc->word[4] |= FIELD_PREP(WORD4_SETUP_OPERATION, slot);
+}
+
+/*
* Set the DIN field of a HW descriptors to SRAM mode.
* Note: No need to check SRAM alignment since host requests do not use SRAM and
* adaptor will enforce alignment check.
diff --git a/drivers/crypto/ccree/cc_ivgen.c b/drivers/crypto/ccree/cc_ivgen.c
index 769458323394..99dc69383e20 100644
--- a/drivers/crypto/ccree/cc_ivgen.c
+++ b/drivers/crypto/ccree/cc_ivgen.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <crypto/ctr.h>
#include "cc_driver.h"
@@ -154,9 +154,6 @@ void cc_ivgen_fini(struct cc_drvdata *drvdata)
}
ivgen_ctx->pool = NULL_SRAM_ADDR;
-
- /* release "this" context */
- kfree(ivgen_ctx);
}
/*!
@@ -174,10 +171,12 @@ int cc_ivgen_init(struct cc_drvdata *drvdata)
int rc;
/* Allocate "this" context */
- ivgen_ctx = kzalloc(sizeof(*ivgen_ctx), GFP_KERNEL);
+ ivgen_ctx = devm_kzalloc(device, sizeof(*ivgen_ctx), GFP_KERNEL);
if (!ivgen_ctx)
return -ENOMEM;
+ drvdata->ivgen_handle = ivgen_ctx;
+
/* Allocate pool's header for initial enc. key/IV */
ivgen_ctx->pool_meta = dma_alloc_coherent(device, CC_IVPOOL_META_SIZE,
&ivgen_ctx->pool_meta_dma,
@@ -196,8 +195,6 @@ int cc_ivgen_init(struct cc_drvdata *drvdata)
goto out;
}
- drvdata->ivgen_handle = ivgen_ctx;
-
return cc_init_iv_sram(drvdata);
out:
diff --git a/drivers/crypto/ccree/cc_ivgen.h b/drivers/crypto/ccree/cc_ivgen.h
index b6ac16903dda..a9f5e8bba4f1 100644
--- a/drivers/crypto/ccree/cc_ivgen.h
+++ b/drivers/crypto/ccree/cc_ivgen.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef __CC_IVGEN_H__
#define __CC_IVGEN_H__
diff --git a/drivers/crypto/ccree/cc_kernel_regs.h b/drivers/crypto/ccree/cc_kernel_regs.h
index 8d7262a35156..582bae450596 100644
--- a/drivers/crypto/ccree/cc_kernel_regs.h
+++ b/drivers/crypto/ccree/cc_kernel_regs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef __CC_CRYS_KERNEL_H__
#define __CC_CRYS_KERNEL_H__
diff --git a/drivers/crypto/ccree/cc_lli_defs.h b/drivers/crypto/ccree/cc_lli_defs.h
index 64b15ac9f1d3..f891ab813f41 100644
--- a/drivers/crypto/ccree/cc_lli_defs.h
+++ b/drivers/crypto/ccree/cc_lli_defs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef _CC_LLI_DEFS_H_
#define _CC_LLI_DEFS_H_
@@ -14,7 +14,7 @@
#define CC_MAX_MLLI_ENTRY_SIZE 0xFFFF
#define LLI_MAX_NUM_OF_DATA_ENTRIES 128
-#define LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES 4
+#define LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES 8
#define MLLI_TABLE_MIN_ALIGNMENT 4 /* 32 bit alignment */
#define MAX_NUM_OF_BUFFERS_IN_MLLI 4
#define MAX_NUM_OF_TOTAL_MLLI_ENTRIES \
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index 6ff7e75ad90e..2dad9c9543c6 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <linux/kernel.h>
#include <linux/interrupt.h>
@@ -11,6 +11,7 @@
#include "cc_ivgen.h"
#include "cc_hash.h"
#include "cc_pm.h"
+#include "cc_fips.h"
#define POWER_DOWN_ENABLE 0x01
#define POWER_DOWN_DISABLE 0x00
@@ -25,13 +26,13 @@ int cc_pm_suspend(struct device *dev)
int rc;
dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
- cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
rc = cc_suspend_req_queue(drvdata);
if (rc) {
dev_err(dev, "cc_suspend_req_queue (%x)\n", rc);
return rc;
}
fini_cc_regs(drvdata);
+ cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
cc_clk_off(drvdata);
return 0;
}
@@ -42,19 +43,21 @@ int cc_pm_resume(struct device *dev)
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
- cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
-
+ /* Enables the device source clk */
rc = cc_clk_on(drvdata);
if (rc) {
dev_err(dev, "failed getting clock back on. We're toast.\n");
return rc;
}
+ cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
rc = init_cc_regs(drvdata, false);
if (rc) {
dev_err(dev, "init_cc_regs (%x)\n", rc);
return rc;
}
+ /* check if tee fips error occurred during power down */
+ cc_tee_handle_fips_error(drvdata);
rc = cc_resume_req_queue(drvdata);
if (rc) {
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
index 907a6db4d6c0..6190cdba5dad 100644
--- a/drivers/crypto/ccree/cc_pm.h
+++ b/drivers/crypto/ccree/cc_pm.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_pm.h
*/
diff --git a/drivers/crypto/ccree/cc_request_mgr.c b/drivers/crypto/ccree/cc_request_mgr.c
index 83a8aaae61c7..0bc6ccb0b899 100644
--- a/drivers/crypto/ccree/cc_request_mgr.c
+++ b/drivers/crypto/ccree/cc_request_mgr.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <linux/kernel.h>
+#include <linux/nospec.h>
#include "cc_driver.h"
#include "cc_buffer_mgr.h"
#include "cc_request_mgr.h"
@@ -52,11 +53,38 @@ struct cc_bl_item {
bool notif;
};
+static const u32 cc_cpp_int_masks[CC_CPP_NUM_ALGS][CC_CPP_NUM_SLOTS] = {
+ { BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SHIFT) },
+ { BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SHIFT) }
+};
+
static void comp_handler(unsigned long devarg);
#ifdef COMP_IN_WQ
static void comp_work_handler(struct work_struct *work);
#endif
+static inline u32 cc_cpp_int_mask(enum cc_cpp_alg alg, int slot)
+{
+ alg = array_index_nospec(alg, CC_CPP_NUM_ALGS);
+ slot = array_index_nospec(slot, CC_CPP_NUM_SLOTS);
+
+ return cc_cpp_int_masks[alg][slot];
+}
+
void cc_req_mgr_fini(struct cc_drvdata *drvdata)
{
struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
@@ -336,10 +364,12 @@ static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
struct cc_bl_item *bli)
{
struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
+ struct device *dev = drvdata_to_dev(drvdata);
spin_lock_bh(&mgr->bl_lock);
list_add_tail(&bli->list, &mgr->backlog);
++mgr->bl_len;
+ dev_dbg(dev, "+++bl len: %d\n", mgr->bl_len);
spin_unlock_bh(&mgr->bl_lock);
tasklet_schedule(&mgr->comptask);
}
@@ -349,7 +379,7 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata)
struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
struct cc_bl_item *bli;
struct cc_crypto_req *creq;
- struct crypto_async_request *req;
+ void *req;
bool ivgen;
unsigned int total_len;
struct device *dev = drvdata_to_dev(drvdata);
@@ -359,17 +389,20 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata)
while (mgr->bl_len) {
bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list);
+ dev_dbg(dev, "---bl len: %d\n", mgr->bl_len);
+
spin_unlock(&mgr->bl_lock);
+
creq = &bli->creq;
- req = (struct crypto_async_request *)creq->user_arg;
+ req = creq->user_arg;
/*
* Notify the request we're moving out of the backlog
* but only if we haven't done so already.
*/
if (!bli->notif) {
- req->complete(req, -EINPROGRESS);
+ creq->user_cb(dev, req, -EINPROGRESS);
bli->notif = true;
}
@@ -579,6 +612,8 @@ static void proc_completions(struct cc_drvdata *drvdata)
drvdata->request_mgr_handle;
unsigned int *tail = &request_mgr_handle->req_queue_tail;
unsigned int *head = &request_mgr_handle->req_queue_head;
+ int rc;
+ u32 mask;
while (request_mgr_handle->axi_completed) {
request_mgr_handle->axi_completed--;
@@ -596,8 +631,22 @@ static void proc_completions(struct cc_drvdata *drvdata)
cc_req = &request_mgr_handle->req_queue[*tail];
+ if (cc_req->cpp.is_cpp) {
+
+ dev_dbg(dev, "CPP request completion slot: %d alg:%d\n",
+ cc_req->cpp.slot, cc_req->cpp.alg);
+ mask = cc_cpp_int_mask(cc_req->cpp.alg,
+ cc_req->cpp.slot);
+ rc = (drvdata->irq & mask ? -EPERM : 0);
+ dev_dbg(dev, "Got mask: %x irq: %x rc: %d\n", mask,
+ drvdata->irq, rc);
+ } else {
+ dev_dbg(dev, "None CPP request completion\n");
+ rc = 0;
+ }
+
if (cc_req->user_cb)
- cc_req->user_cb(dev, cc_req->user_arg, 0);
+ cc_req->user_cb(dev, cc_req->user_arg, rc);
*tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
dev_dbg(dev, "Dequeue request tail=%u\n", *tail);
dev_dbg(dev, "Request completed. axi_completed=%d\n",
@@ -618,47 +667,50 @@ static void comp_handler(unsigned long devarg)
struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
struct cc_req_mgr_handle *request_mgr_handle =
drvdata->request_mgr_handle;
-
+ struct device *dev = drvdata_to_dev(drvdata);
u32 irq;
- irq = (drvdata->irq & CC_COMP_IRQ_MASK);
+ dev_dbg(dev, "Completion handler called!\n");
+ irq = (drvdata->irq & drvdata->comp_mask);
- if (irq & CC_COMP_IRQ_MASK) {
- /* To avoid the interrupt from firing as we unmask it,
- * we clear it now
- */
- cc_iowrite(drvdata, CC_REG(HOST_ICR), CC_COMP_IRQ_MASK);
+ /* To avoid the interrupt from firing as we unmask it,
+ * we clear it now
+ */
+ cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
- /* Avoid race with above clear: Test completion counter
- * once more
- */
- request_mgr_handle->axi_completed +=
- cc_axi_comp_count(drvdata);
-
- while (request_mgr_handle->axi_completed) {
- do {
- proc_completions(drvdata);
- /* At this point (after proc_completions()),
- * request_mgr_handle->axi_completed is 0.
- */
- request_mgr_handle->axi_completed =
- cc_axi_comp_count(drvdata);
- } while (request_mgr_handle->axi_completed > 0);
+ /* Avoid race with above clear: Test completion counter once more */
- cc_iowrite(drvdata, CC_REG(HOST_ICR),
- CC_COMP_IRQ_MASK);
+ request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
+
+ dev_dbg(dev, "AXI completion after updated: %d\n",
+ request_mgr_handle->axi_completed);
+
+ while (request_mgr_handle->axi_completed) {
+ do {
+ drvdata->irq |= cc_ioread(drvdata, CC_REG(HOST_IRR));
+ irq = (drvdata->irq & drvdata->comp_mask);
+ proc_completions(drvdata);
+ /* At this point (after proc_completions()),
+ * request_mgr_handle->axi_completed is 0.
+ */
request_mgr_handle->axi_completed +=
- cc_axi_comp_count(drvdata);
- }
+ cc_axi_comp_count(drvdata);
+ } while (request_mgr_handle->axi_completed > 0);
+
+ cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
+
+ request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
}
+
/* after verifing that there is nothing to do,
* unmask AXI completion interrupt
*/
cc_iowrite(drvdata, CC_REG(HOST_IMR),
- cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~irq);
+ cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~drvdata->comp_mask);
cc_proc_backlog(drvdata);
+ dev_dbg(dev, "Comp. handler done.\n");
}
/*
diff --git a/drivers/crypto/ccree/cc_request_mgr.h b/drivers/crypto/ccree/cc_request_mgr.h
index 573cb97af085..f46cf766fe4d 100644
--- a/drivers/crypto/ccree/cc_request_mgr.h
+++ b/drivers/crypto/ccree/cc_request_mgr.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_request_mgr.h
* Request Manager
diff --git a/drivers/crypto/ccree/cc_sram_mgr.c b/drivers/crypto/ccree/cc_sram_mgr.c
index c8c276f6dee9..62c885e6e791 100644
--- a/drivers/crypto/ccree/cc_sram_mgr.c
+++ b/drivers/crypto/ccree/cc_sram_mgr.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include "cc_driver.h"
#include "cc_sram_mgr.h"
@@ -19,8 +19,7 @@ struct cc_sram_ctx {
*/
void cc_sram_mgr_fini(struct cc_drvdata *drvdata)
{
- /* Free "this" context */
- kfree(drvdata->sram_mgr_handle);
+ /* Nothing needed */
}
/**
@@ -48,7 +47,7 @@ int cc_sram_mgr_init(struct cc_drvdata *drvdata)
}
/* Allocate "this" context */
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
diff --git a/drivers/crypto/ccree/cc_sram_mgr.h b/drivers/crypto/ccree/cc_sram_mgr.h
index d48649fb3323..1d14de9ee8c3 100644
--- a/drivers/crypto/ccree/cc_sram_mgr.h
+++ b/drivers/crypto/ccree/cc_sram_mgr.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef __CC_SRAM_MGR_H__
#define __CC_SRAM_MGR_H__
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 8d8cf80b9294..8a76fce22943 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -2130,7 +2130,6 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
* ipad in hmacctx->ipad and opad in hmacctx->opad location
*/
shash->tfm = hmacctx->base_hash;
- shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
if (keylen > bs) {
err = crypto_shash_digest(shash, key, keylen,
hmacctx->ipad);
@@ -3517,7 +3516,6 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
SHASH_DESC_ON_STACK(shash, base_hash);
shash->tfm = base_hash;
- shash->flags = crypto_shash_get_flags(base_hash);
bs = crypto_shash_blocksize(base_hash);
align = KEYCTX_ALIGN_PAD(max_authsize);
o_ptr = actx->h_iopad + param.result_size + align;
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index dad212cabe63..d656be0a142b 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -1976,6 +1976,29 @@ static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
+static int hifn_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int len)
+{
+ struct hifn_context *ctx = crypto_ablkcipher_ctx(cipher);
+ struct hifn_device *dev = ctx->dev;
+ u32 flags;
+ int err;
+
+ flags = crypto_ablkcipher_get_flags(cipher);
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(cipher, flags);
+ return err;
+ }
+
+ dev->flags &= ~HIFN_FLAG_OLD_KEY;
+
+ memcpy(ctx->key, key, len);
+ ctx->keysize = len;
+
+ return 0;
+}
+
static int hifn_handle_req(struct ablkcipher_request *req)
{
struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
@@ -2240,7 +2263,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
.ablkcipher = {
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
- .setkey = hifn_setkey,
+ .setkey = hifn_des3_setkey,
.encrypt = hifn_encrypt_3des_cfb,
.decrypt = hifn_decrypt_3des_cfb,
},
@@ -2250,7 +2273,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
.ablkcipher = {
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
- .setkey = hifn_setkey,
+ .setkey = hifn_des3_setkey,
.encrypt = hifn_encrypt_3des_ofb,
.decrypt = hifn_decrypt_3des_ofb,
},
@@ -2261,7 +2284,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
.ivsize = HIFN_IV_LENGTH,
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
- .setkey = hifn_setkey,
+ .setkey = hifn_des3_setkey,
.encrypt = hifn_encrypt_3des_cbc,
.decrypt = hifn_decrypt_3des_cbc,
},
@@ -2271,7 +2294,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
.ablkcipher = {
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
- .setkey = hifn_setkey,
+ .setkey = hifn_des3_setkey,
.encrypt = hifn_encrypt_3des_ecb,
.decrypt = hifn_decrypt_3des_ecb,
},
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index adc0cd8ae97b..02768af0dccd 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -365,20 +365,16 @@ static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher *tfm,
static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
- if (keylen != DES_KEY_SIZE * 3)
- return -EINVAL;
-
- return sec_alg_skcipher_setkey(tfm, key, keylen,
+ return unlikely(des3_verify_key(tfm, key)) ?:
+ sec_alg_skcipher_setkey(tfm, key, keylen,
SEC_C_3DES_ECB_192_3KEY);
}
static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
- if (keylen != DES3_EDE_KEY_SIZE)
- return -EINVAL;
-
- return sec_alg_skcipher_setkey(tfm, key, keylen,
+ return unlikely(des3_verify_key(tfm, key)) ?:
+ sec_alg_skcipher_setkey(tfm, key, keylen,
SEC_C_3DES_CBC_192_3KEY);
}
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 7ef30a98cb24..de4be10b172f 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -1039,13 +1039,12 @@ static int safexcel_cbc_des3_ede_decrypt(struct skcipher_request *req)
static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm,
const u8 *key, unsigned int len)
{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
- struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
+ int err;
- if (len != DES3_EDE_KEY_SIZE) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
+ err = des3_verify_key(ctfm, key);
+ if (unlikely(err))
+ return err;
/* if context exits and key changed, need to invalidate it */
if (ctx->base.ctxr_dma) {
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 5c4659b04d70..9bbde2f26cac 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -758,14 +758,6 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
return -EINVAL;
}
cipher_cfg |= keylen_cfg;
- } else if (cipher_cfg & MOD_3DES) {
- const u32 *K = (const u32 *)key;
- if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
- !((K[2] ^ K[4]) | (K[3] ^ K[5]))))
- {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
- return -EINVAL;
- }
} else {
u32 tmp[DES_EXPKEY_WORDS];
if (des_ekey(tmp, key) == 0) {
@@ -859,6 +851,19 @@ out:
return ret;
}
+static int ablk_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ u32 flags = crypto_ablkcipher_get_flags(tfm);
+ int err;
+
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err))
+ crypto_ablkcipher_set_flags(tfm, flags);
+
+ return ablk_setkey(tfm, key, key_len);
+}
+
static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len)
{
@@ -1175,6 +1180,43 @@ badkey:
return -EINVAL;
}
+static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
+ u32 flags = CRYPTO_TFM_RES_BAD_KEY_LEN;
+ struct crypto_authenc_keys keys;
+ int err;
+
+ err = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (unlikely(err))
+ goto badkey;
+
+ err = -EINVAL;
+ if (keys.authkeylen > sizeof(ctx->authkey))
+ goto badkey;
+
+ if (keys.enckeylen != DES3_EDE_KEY_SIZE)
+ goto badkey;
+
+ flags = crypto_aead_get_flags(tfm);
+ err = __des3_verify_key(&flags, keys.enckey);
+ if (unlikely(err))
+ goto badkey;
+
+ memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
+ memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
+ ctx->authkey_len = keys.authkeylen;
+ ctx->enckey_len = keys.enckeylen;
+
+ memzero_explicit(&keys, sizeof(keys));
+ return aead_setup(tfm, crypto_aead_authsize(tfm));
+badkey:
+ crypto_aead_set_flags(tfm, flags);
+ memzero_explicit(&keys, sizeof(keys));
+ return err;
+}
+
static int aead_encrypt(struct aead_request *req)
{
return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
@@ -1220,6 +1262,7 @@ static struct ixp_alg ixp4xx_algos[] = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = ablk_des3_setkey,
}
}
},
@@ -1232,6 +1275,7 @@ static struct ixp_alg ixp4xx_algos[] = {
.cra_u = { .ablkcipher = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = ablk_des3_setkey,
}
}
},
@@ -1313,6 +1357,7 @@ static struct ixp_aead_alg ixp4xx_aeads[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
+ .setkey = des3_aead_setkey,
},
.hash = &hash_alg_md5,
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
@@ -1337,6 +1382,7 @@ static struct ixp_aead_alg ixp4xx_aeads[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
+ .setkey = des3_aead_setkey,
},
.hash = &hash_alg_sha1,
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
@@ -1443,7 +1489,7 @@ static int __init ixp_module_init(void)
/* authenc */
cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC;
- cra->setkey = aead_setkey;
+ cra->setkey = cra->setkey ?: aead_setkey;
cra->setauthsize = aead_setauthsize;
cra->encrypt = aead_encrypt;
cra->decrypt = aead_decrypt;
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index fb279b3a1ca1..2fd936b19c6d 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -299,13 +299,12 @@ static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int len)
{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
- struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher);
+ int err;
- if (len != DES3_EDE_KEY_SIZE) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
+ err = des3_verify_key(cipher, key);
+ if (unlikely(err))
+ return err;
memcpy(ctx->key, key, DES3_EDE_KEY_SIZE);
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index 99ff54cc8a15..fd456dd703bf 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -135,11 +135,10 @@ static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
{
- unsigned int index, padlen;
+ unsigned int padlen;
buf[0] = 0x80;
/* Pad out to 56 mod 64 */
- index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
padlen = mv_cesa_ahash_pad_len(creq);
memset(buf + 1, 0, padlen - 1);
diff --git a/drivers/crypto/mediatek/mtk-sha.c b/drivers/crypto/mediatek/mtk-sha.c
index 5f4f845adbb8..a0806ba40c68 100644
--- a/drivers/crypto/mediatek/mtk-sha.c
+++ b/drivers/crypto/mediatek/mtk-sha.c
@@ -365,7 +365,6 @@ static int mtk_sha_finish_hmac(struct ahash_request *req)
SHASH_DESC_ON_STACK(shash, bctx->shash);
shash->tfm = bctx->shash;
- shash->flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
return crypto_shash_init(shash) ?:
crypto_shash_update(shash, bctx->opad, ctx->bs) ?:
@@ -810,8 +809,6 @@ static int mtk_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
SHASH_DESC_ON_STACK(shash, bctx->shash);
shash->tfm = bctx->shash;
- shash->flags = crypto_shash_get_flags(bctx->shash) &
- CRYPTO_TFM_REQ_MAY_SLEEP;
if (keylen > bs) {
err = crypto_shash_digest(shash, key, keylen, bctx->ipad);
diff --git a/drivers/crypto/mxc-scc.c b/drivers/crypto/mxc-scc.c
deleted file mode 100644
index 519086730791..000000000000
--- a/drivers/crypto/mxc-scc.c
+++ /dev/null
@@ -1,767 +0,0 @@
-/*
- * Copyright (C) 2016 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
- *
- * The driver is based on information gathered from
- * drivers/mxc/security/mxc_scc.c which can be found in
- * the Freescale linux-2.6-imx.git in the imx_2.6.35_maintain branch.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-#include <linux/clk.h>
-#include <linux/crypto.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-
-#include <crypto/algapi.h>
-#include <crypto/des.h>
-
-/* Secure Memory (SCM) registers */
-#define SCC_SCM_RED_START 0x0000
-#define SCC_SCM_BLACK_START 0x0004
-#define SCC_SCM_LENGTH 0x0008
-#define SCC_SCM_CTRL 0x000C
-#define SCC_SCM_STATUS 0x0010
-#define SCC_SCM_ERROR_STATUS 0x0014
-#define SCC_SCM_INTR_CTRL 0x0018
-#define SCC_SCM_CFG 0x001C
-#define SCC_SCM_INIT_VECTOR_0 0x0020
-#define SCC_SCM_INIT_VECTOR_1 0x0024
-#define SCC_SCM_RED_MEMORY 0x0400
-#define SCC_SCM_BLACK_MEMORY 0x0800
-
-/* Security Monitor (SMN) Registers */
-#define SCC_SMN_STATUS 0x1000
-#define SCC_SMN_COMMAND 0x1004
-#define SCC_SMN_SEQ_START 0x1008
-#define SCC_SMN_SEQ_END 0x100C
-#define SCC_SMN_SEQ_CHECK 0x1010
-#define SCC_SMN_BIT_COUNT 0x1014
-#define SCC_SMN_BITBANK_INC_SIZE 0x1018
-#define SCC_SMN_BITBANK_DECREMENT 0x101C
-#define SCC_SMN_COMPARE_SIZE 0x1020
-#define SCC_SMN_PLAINTEXT_CHECK 0x1024
-#define SCC_SMN_CIPHERTEXT_CHECK 0x1028
-#define SCC_SMN_TIMER_IV 0x102C
-#define SCC_SMN_TIMER_CONTROL 0x1030
-#define SCC_SMN_DEBUG_DETECT_STAT 0x1034
-#define SCC_SMN_TIMER 0x1038
-
-#define SCC_SCM_CTRL_START_CIPHER BIT(2)
-#define SCC_SCM_CTRL_CBC_MODE BIT(1)
-#define SCC_SCM_CTRL_DECRYPT_MODE BIT(0)
-
-#define SCC_SCM_STATUS_LEN_ERR BIT(12)
-#define SCC_SCM_STATUS_SMN_UNBLOCKED BIT(11)
-#define SCC_SCM_STATUS_CIPHERING_DONE BIT(10)
-#define SCC_SCM_STATUS_ZEROIZING_DONE BIT(9)
-#define SCC_SCM_STATUS_INTR_STATUS BIT(8)
-#define SCC_SCM_STATUS_SEC_KEY BIT(7)
-#define SCC_SCM_STATUS_INTERNAL_ERR BIT(6)
-#define SCC_SCM_STATUS_BAD_SEC_KEY BIT(5)
-#define SCC_SCM_STATUS_ZEROIZE_FAIL BIT(4)
-#define SCC_SCM_STATUS_SMN_BLOCKED BIT(3)
-#define SCC_SCM_STATUS_CIPHERING BIT(2)
-#define SCC_SCM_STATUS_ZEROIZING BIT(1)
-#define SCC_SCM_STATUS_BUSY BIT(0)
-
-#define SCC_SMN_STATUS_STATE_MASK 0x0000001F
-#define SCC_SMN_STATE_START 0x0
-/* The SMN is zeroizing its RAM during reset */
-#define SCC_SMN_STATE_ZEROIZE_RAM 0x5
-/* SMN has passed internal checks */
-#define SCC_SMN_STATE_HEALTH_CHECK 0x6
-/* Fatal Security Violation. SMN is locked, SCM is inoperative. */
-#define SCC_SMN_STATE_FAIL 0x9
-/* SCC is in secure state. SCM is using secret key. */
-#define SCC_SMN_STATE_SECURE 0xA
-/* SCC is not secure. SCM is using default key. */
-#define SCC_SMN_STATE_NON_SECURE 0xC
-
-#define SCC_SCM_INTR_CTRL_ZEROIZE_MEM BIT(2)
-#define SCC_SCM_INTR_CTRL_CLR_INTR BIT(1)
-#define SCC_SCM_INTR_CTRL_MASK_INTR BIT(0)
-
-/* Size, in blocks, of Red memory. */
-#define SCC_SCM_CFG_BLACK_SIZE_MASK 0x07fe0000
-#define SCC_SCM_CFG_BLACK_SIZE_SHIFT 17
-/* Size, in blocks, of Black memory. */
-#define SCC_SCM_CFG_RED_SIZE_MASK 0x0001ff80
-#define SCC_SCM_CFG_RED_SIZE_SHIFT 7
-/* Number of bytes per block. */
-#define SCC_SCM_CFG_BLOCK_SIZE_MASK 0x0000007f
-
-#define SCC_SMN_COMMAND_TAMPER_LOCK BIT(4)
-#define SCC_SMN_COMMAND_CLR_INTR BIT(3)
-#define SCC_SMN_COMMAND_CLR_BIT_BANK BIT(2)
-#define SCC_SMN_COMMAND_EN_INTR BIT(1)
-#define SCC_SMN_COMMAND_SET_SOFTWARE_ALARM BIT(0)
-
-#define SCC_KEY_SLOTS 20
-#define SCC_MAX_KEY_SIZE 32
-#define SCC_KEY_SLOT_SIZE 32
-
-#define SCC_CRC_CCITT_START 0xFFFF
-
-/*
- * Offset into each RAM of the base of the area which is not
- * used for Stored Keys.
- */
-#define SCC_NON_RESERVED_OFFSET (SCC_KEY_SLOTS * SCC_KEY_SLOT_SIZE)
-
-/* Fixed padding for appending to plaintext to fill out a block */
-static char scc_block_padding[8] = { 0x80, 0, 0, 0, 0, 0, 0, 0 };
-
-enum mxc_scc_state {
- SCC_STATE_OK,
- SCC_STATE_UNIMPLEMENTED,
- SCC_STATE_FAILED
-};
-
-struct mxc_scc {
- struct device *dev;
- void __iomem *base;
- struct clk *clk;
- bool hw_busy;
- spinlock_t lock;
- struct crypto_queue queue;
- struct crypto_async_request *req;
- int block_size_bytes;
- int black_ram_size_blocks;
- int memory_size_bytes;
- int bytes_remaining;
-
- void __iomem *red_memory;
- void __iomem *black_memory;
-};
-
-struct mxc_scc_ctx {
- struct mxc_scc *scc;
- struct scatterlist *sg_src;
- size_t src_nents;
- struct scatterlist *sg_dst;
- size_t dst_nents;
- unsigned int offset;
- unsigned int size;
- unsigned int ctrl;
-};
-
-struct mxc_scc_crypto_tmpl {
- struct mxc_scc *scc;
- struct crypto_alg alg;
-};
-
-static int mxc_scc_get_data(struct mxc_scc_ctx *ctx,
- struct crypto_async_request *req)
-{
- struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
- struct mxc_scc *scc = ctx->scc;
- size_t len;
- void __iomem *from;
-
- if (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE)
- from = scc->red_memory;
- else
- from = scc->black_memory;
-
- dev_dbg(scc->dev, "pcopy: from 0x%p %zu bytes\n", from,
- ctx->dst_nents * 8);
- len = sg_pcopy_from_buffer(ablkreq->dst, ctx->dst_nents,
- from, ctx->size, ctx->offset);
- if (!len) {
- dev_err(scc->dev, "pcopy err from 0x%p (len=%zu)\n", from, len);
- return -EINVAL;
- }
-
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "red memory@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4,
- scc->red_memory, ctx->size, 1);
- print_hex_dump(KERN_ERR,
- "black memory@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4,
- scc->black_memory, ctx->size, 1);
-#endif
-
- ctx->offset += len;
-
- if (ctx->offset < ablkreq->nbytes)
- return -EINPROGRESS;
-
- return 0;
-}
-
-static int mxc_scc_ablkcipher_req_init(struct ablkcipher_request *req,
- struct mxc_scc_ctx *ctx)
-{
- struct mxc_scc *scc = ctx->scc;
- int nents;
-
- nents = sg_nents_for_len(req->src, req->nbytes);
- if (nents < 0) {
- dev_err(scc->dev, "Invalid number of src SC");
- return nents;
- }
- ctx->src_nents = nents;
-
- nents = sg_nents_for_len(req->dst, req->nbytes);
- if (nents < 0) {
- dev_err(scc->dev, "Invalid number of dst SC");
- return nents;
- }
- ctx->dst_nents = nents;
-
- ctx->size = 0;
- ctx->offset = 0;
-
- return 0;
-}
-
-static int mxc_scc_ablkcipher_req_complete(struct crypto_async_request *req,
- struct mxc_scc_ctx *ctx,
- int result)
-{
- struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
- struct mxc_scc *scc = ctx->scc;
-
- scc->req = NULL;
- scc->bytes_remaining = scc->memory_size_bytes;
-
- if (ctx->ctrl & SCC_SCM_CTRL_CBC_MODE)
- memcpy(ablkreq->info, scc->base + SCC_SCM_INIT_VECTOR_0,
- scc->block_size_bytes);
-
- req->complete(req, result);
- scc->hw_busy = false;
-
- return 0;
-}
-
-static int mxc_scc_put_data(struct mxc_scc_ctx *ctx,
- struct ablkcipher_request *req)
-{
- u8 padding_buffer[sizeof(u16) + sizeof(scc_block_padding)];
- size_t len = min_t(size_t, req->nbytes - ctx->offset,
- ctx->scc->bytes_remaining);
- unsigned int padding_byte_count = 0;
- struct mxc_scc *scc = ctx->scc;
- void __iomem *to;
-
- if (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE)
- to = scc->black_memory;
- else
- to = scc->red_memory;
-
- if (ctx->ctrl & SCC_SCM_CTRL_CBC_MODE && req->info)
- memcpy(scc->base + SCC_SCM_INIT_VECTOR_0, req->info,
- scc->block_size_bytes);
-
- len = sg_pcopy_to_buffer(req->src, ctx->src_nents,
- to, len, ctx->offset);
- if (!len) {
- dev_err(scc->dev, "pcopy err to 0x%p (len=%zu)\n", to, len);
- return -EINVAL;
- }
-
- ctx->size = len;
-
-#ifdef DEBUG
- dev_dbg(scc->dev, "copied %d bytes to 0x%p\n", len, to);
- print_hex_dump(KERN_ERR,
- "init vector0@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4,
- scc->base + SCC_SCM_INIT_VECTOR_0, scc->block_size_bytes,
- 1);
- print_hex_dump(KERN_ERR,
- "red memory@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4,
- scc->red_memory, ctx->size, 1);
- print_hex_dump(KERN_ERR,
- "black memory@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4,
- scc->black_memory, ctx->size, 1);
-#endif
-
- scc->bytes_remaining -= len;
-
- padding_byte_count = len % scc->block_size_bytes;
-
- if (padding_byte_count) {
- memcpy(padding_buffer, scc_block_padding, padding_byte_count);
- memcpy(to + len, padding_buffer, padding_byte_count);
- ctx->size += padding_byte_count;
- }
-
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "data to encrypt@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4,
- to, ctx->size, 1);
-#endif
-
- return 0;
-}
-
-static void mxc_scc_ablkcipher_next(struct mxc_scc_ctx *ctx,
- struct crypto_async_request *req)
-{
- struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
- struct mxc_scc *scc = ctx->scc;
- int err;
-
- dev_dbg(scc->dev, "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
- ablkreq->nbytes, ablkreq->src, ablkreq->dst);
-
- writel(0, scc->base + SCC_SCM_ERROR_STATUS);
-
- err = mxc_scc_put_data(ctx, ablkreq);
- if (err) {
- mxc_scc_ablkcipher_req_complete(req, ctx, err);
- return;
- }
-
- dev_dbg(scc->dev, "Start encryption (0x%x/0x%x)\n",
- readl(scc->base + SCC_SCM_RED_START),
- readl(scc->base + SCC_SCM_BLACK_START));
-
- /* clear interrupt control registers */
- writel(SCC_SCM_INTR_CTRL_CLR_INTR,
- scc->base + SCC_SCM_INTR_CTRL);
-
- writel((ctx->size / ctx->scc->block_size_bytes) - 1,
- scc->base + SCC_SCM_LENGTH);
-
- dev_dbg(scc->dev, "Process %d block(s) in 0x%p\n",
- ctx->size / ctx->scc->block_size_bytes,
- (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE) ? scc->black_memory :
- scc->red_memory);
-
- writel(ctx->ctrl, scc->base + SCC_SCM_CTRL);
-}
-
-static irqreturn_t mxc_scc_int(int irq, void *priv)
-{
- struct crypto_async_request *req;
- struct mxc_scc_ctx *ctx;
- struct mxc_scc *scc = priv;
- int status;
- int ret;
-
- status = readl(scc->base + SCC_SCM_STATUS);
-
- /* clear interrupt control registers */
- writel(SCC_SCM_INTR_CTRL_CLR_INTR, scc->base + SCC_SCM_INTR_CTRL);
-
- if (status & SCC_SCM_STATUS_BUSY)
- return IRQ_NONE;
-
- req = scc->req;
- if (req) {
- ctx = crypto_tfm_ctx(req->tfm);
- ret = mxc_scc_get_data(ctx, req);
- if (ret != -EINPROGRESS)
- mxc_scc_ablkcipher_req_complete(req, ctx, ret);
- else
- mxc_scc_ablkcipher_next(ctx, req);
- }
-
- return IRQ_HANDLED;
-}
-
-static int mxc_scc_cra_init(struct crypto_tfm *tfm)
-{
- struct mxc_scc_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_alg *alg = tfm->__crt_alg;
- struct mxc_scc_crypto_tmpl *algt;
-
- algt = container_of(alg, struct mxc_scc_crypto_tmpl, alg);
-
- ctx->scc = algt->scc;
- return 0;
-}
-
-static void mxc_scc_dequeue_req_unlocked(struct mxc_scc_ctx *ctx)
-{
- struct crypto_async_request *req, *backlog;
-
- if (ctx->scc->hw_busy)
- return;
-
- spin_lock_bh(&ctx->scc->lock);
- backlog = crypto_get_backlog(&ctx->scc->queue);
- req = crypto_dequeue_request(&ctx->scc->queue);
- ctx->scc->req = req;
- ctx->scc->hw_busy = true;
- spin_unlock_bh(&ctx->scc->lock);
-
- if (!req)
- return;
-
- if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
-
- mxc_scc_ablkcipher_next(ctx, req);
-}
-
-static int mxc_scc_queue_req(struct mxc_scc_ctx *ctx,
- struct crypto_async_request *req)
-{
- int ret;
-
- spin_lock_bh(&ctx->scc->lock);
- ret = crypto_enqueue_request(&ctx->scc->queue, req);
- spin_unlock_bh(&ctx->scc->lock);
-
- if (ret != -EINPROGRESS)
- return ret;
-
- mxc_scc_dequeue_req_unlocked(ctx);
-
- return -EINPROGRESS;
-}
-
-static int mxc_scc_des3_op(struct mxc_scc_ctx *ctx,
- struct ablkcipher_request *req)
-{
- int err;
-
- err = mxc_scc_ablkcipher_req_init(req, ctx);
- if (err)
- return err;
-
- return mxc_scc_queue_req(ctx, &req->base);
-}
-
-static int mxc_scc_ecb_des_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
- struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
-
- ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
-
- return mxc_scc_des3_op(ctx, req);
-}
-
-static int mxc_scc_ecb_des_decrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
- struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
-
- ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
- ctx->ctrl |= SCC_SCM_CTRL_DECRYPT_MODE;
-
- return mxc_scc_des3_op(ctx, req);
-}
-
-static int mxc_scc_cbc_des_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
- struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
-
- ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
- ctx->ctrl |= SCC_SCM_CTRL_CBC_MODE;
-
- return mxc_scc_des3_op(ctx, req);
-}
-
-static int mxc_scc_cbc_des_decrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
- struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
-
- ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
- ctx->ctrl |= SCC_SCM_CTRL_CBC_MODE;
- ctx->ctrl |= SCC_SCM_CTRL_DECRYPT_MODE;
-
- return mxc_scc_des3_op(ctx, req);
-}
-
-static void mxc_scc_hw_init(struct mxc_scc *scc)
-{
- int offset;
-
- offset = SCC_NON_RESERVED_OFFSET / scc->block_size_bytes;
-
- /* Fill the RED_START register */
- writel(offset, scc->base + SCC_SCM_RED_START);
-
- /* Fill the BLACK_START register */
- writel(offset, scc->base + SCC_SCM_BLACK_START);
-
- scc->red_memory = scc->base + SCC_SCM_RED_MEMORY +
- SCC_NON_RESERVED_OFFSET;
-
- scc->black_memory = scc->base + SCC_SCM_BLACK_MEMORY +
- SCC_NON_RESERVED_OFFSET;
-
- scc->bytes_remaining = scc->memory_size_bytes;
-}
-
-static int mxc_scc_get_config(struct mxc_scc *scc)
-{
- int config;
-
- config = readl(scc->base + SCC_SCM_CFG);
-
- scc->block_size_bytes = config & SCC_SCM_CFG_BLOCK_SIZE_MASK;
-
- scc->black_ram_size_blocks = config & SCC_SCM_CFG_BLACK_SIZE_MASK;
-
- scc->memory_size_bytes = (scc->block_size_bytes *
- scc->black_ram_size_blocks) -
- SCC_NON_RESERVED_OFFSET;
-
- return 0;
-}
-
-static enum mxc_scc_state mxc_scc_get_state(struct mxc_scc *scc)
-{
- enum mxc_scc_state state;
- int status;
-
- status = readl(scc->base + SCC_SMN_STATUS) &
- SCC_SMN_STATUS_STATE_MASK;
-
- /* If in Health Check, try to bringup to secure state */
- if (status & SCC_SMN_STATE_HEALTH_CHECK) {
- /*
- * Write a simple algorithm to the Algorithm Sequence
- * Checker (ASC)
- */
- writel(0xaaaa, scc->base + SCC_SMN_SEQ_START);
- writel(0x5555, scc->base + SCC_SMN_SEQ_END);
- writel(0x5555, scc->base + SCC_SMN_SEQ_CHECK);
-
- status = readl(scc->base + SCC_SMN_STATUS) &
- SCC_SMN_STATUS_STATE_MASK;
- }
-
- switch (status) {
- case SCC_SMN_STATE_NON_SECURE:
- case SCC_SMN_STATE_SECURE:
- state = SCC_STATE_OK;
- break;
- case SCC_SMN_STATE_FAIL:
- state = SCC_STATE_FAILED;
- break;
- default:
- state = SCC_STATE_UNIMPLEMENTED;
- break;
- }
-
- return state;
-}
-
-static struct mxc_scc_crypto_tmpl scc_ecb_des = {
- .alg = {
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "ecb-des3-scc",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mxc_scc_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = mxc_scc_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .encrypt = mxc_scc_ecb_des_encrypt,
- .decrypt = mxc_scc_ecb_des_decrypt,
- }
- }
-};
-
-static struct mxc_scc_crypto_tmpl scc_cbc_des = {
- .alg = {
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "cbc-des3-scc",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mxc_scc_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = mxc_scc_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .encrypt = mxc_scc_cbc_des_encrypt,
- .decrypt = mxc_scc_cbc_des_decrypt,
- }
- }
-};
-
-static struct mxc_scc_crypto_tmpl *scc_crypto_algs[] = {
- &scc_ecb_des,
- &scc_cbc_des,
-};
-
-static int mxc_scc_crypto_register(struct mxc_scc *scc)
-{
- int i;
- int err = 0;
-
- for (i = 0; i < ARRAY_SIZE(scc_crypto_algs); i++) {
- scc_crypto_algs[i]->scc = scc;
- err = crypto_register_alg(&scc_crypto_algs[i]->alg);
- if (err)
- goto err_out;
- }
-
- return 0;
-
-err_out:
- while (--i >= 0)
- crypto_unregister_alg(&scc_crypto_algs[i]->alg);
-
- return err;
-}
-
-static void mxc_scc_crypto_unregister(void)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(scc_crypto_algs); i++)
- crypto_unregister_alg(&scc_crypto_algs[i]->alg);
-}
-
-static int mxc_scc_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct resource *res;
- struct mxc_scc *scc;
- enum mxc_scc_state state;
- int irq;
- int ret;
- int i;
-
- scc = devm_kzalloc(dev, sizeof(*scc), GFP_KERNEL);
- if (!scc)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- scc->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(scc->base))
- return PTR_ERR(scc->base);
-
- scc->clk = devm_clk_get(&pdev->dev, "ipg");
- if (IS_ERR(scc->clk)) {
- dev_err(dev, "Could not get ipg clock\n");
- return PTR_ERR(scc->clk);
- }
-
- ret = clk_prepare_enable(scc->clk);
- if (ret)
- return ret;
-
- /* clear error status register */
- writel(0x0, scc->base + SCC_SCM_ERROR_STATUS);
-
- /* clear interrupt control registers */
- writel(SCC_SCM_INTR_CTRL_CLR_INTR |
- SCC_SCM_INTR_CTRL_MASK_INTR,
- scc->base + SCC_SCM_INTR_CTRL);
-
- writel(SCC_SMN_COMMAND_CLR_INTR |
- SCC_SMN_COMMAND_EN_INTR,
- scc->base + SCC_SMN_COMMAND);
-
- scc->dev = dev;
- platform_set_drvdata(pdev, scc);
-
- ret = mxc_scc_get_config(scc);
- if (ret)
- goto err_out;
-
- state = mxc_scc_get_state(scc);
-
- if (state != SCC_STATE_OK) {
- dev_err(dev, "SCC in unusable state %d\n", state);
- ret = -EINVAL;
- goto err_out;
- }
-
- mxc_scc_hw_init(scc);
-
- spin_lock_init(&scc->lock);
- /* FIXME: calculate queue from RAM slots */
- crypto_init_queue(&scc->queue, 50);
-
- for (i = 0; i < 2; i++) {
- irq = platform_get_irq(pdev, i);
- if (irq < 0) {
- dev_err(dev, "failed to get irq resource: %d\n", irq);
- ret = irq;
- goto err_out;
- }
-
- ret = devm_request_threaded_irq(dev, irq, NULL, mxc_scc_int,
- IRQF_ONESHOT, dev_name(dev), scc);
- if (ret)
- goto err_out;
- }
-
- ret = mxc_scc_crypto_register(scc);
- if (ret) {
- dev_err(dev, "could not register algorithms");
- goto err_out;
- }
-
- dev_info(dev, "registered successfully.\n");
-
- return 0;
-
-err_out:
- clk_disable_unprepare(scc->clk);
-
- return ret;
-}
-
-static int mxc_scc_remove(struct platform_device *pdev)
-{
- struct mxc_scc *scc = platform_get_drvdata(pdev);
-
- mxc_scc_crypto_unregister();
-
- clk_disable_unprepare(scc->clk);
-
- return 0;
-}
-
-static const struct of_device_id mxc_scc_dt_ids[] = {
- { .compatible = "fsl,imx25-scc", .data = NULL, },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, mxc_scc_dt_ids);
-
-static struct platform_driver mxc_scc_driver = {
- .probe = mxc_scc_probe,
- .remove = mxc_scc_remove,
- .driver = {
- .name = "mxc-scc",
- .of_match_table = mxc_scc_dt_ids,
- },
-};
-
-module_platform_driver(mxc_scc_driver);
-MODULE_AUTHOR("Steffen Trumtrar <kernel@pengutronix.de>");
-MODULE_DESCRIPTION("Freescale i.MX25 SCC Crypto driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index a2105cf33abb..b4429891e368 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -471,7 +471,7 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
wake_up_process(sdcp->thread[actx->chan]);
- return -EINPROGRESS;
+ return ret;
}
static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req)
@@ -700,11 +700,7 @@ static int dcp_chan_thread_sha(void *data)
struct crypto_async_request *backlog;
struct crypto_async_request *arq;
-
- struct dcp_sha_req_ctx *rctx;
-
- struct ahash_request *req;
- int ret, fini;
+ int ret;
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -725,11 +721,7 @@ static int dcp_chan_thread_sha(void *data)
backlog->complete(backlog, -EINPROGRESS);
if (arq) {
- req = ahash_request_cast(arq);
- rctx = ahash_request_ctx(req);
-
ret = dcp_sha_req_to_buf(arq);
- fini = rctx->fini;
arq->complete(arq, ret);
}
}
@@ -797,7 +789,7 @@ static int dcp_sha_update_fx(struct ahash_request *req, int fini)
wake_up_process(sdcp->thread[actx->chan]);
mutex_unlock(&actx->mutex);
- return -EINPROGRESS;
+ return ret;
}
static int dcp_sha_update(struct ahash_request *req)
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 9450c41211b2..0d5d3d8eb680 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -469,8 +469,6 @@ static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
return err;
shash->tfm = child_shash;
- shash->flags = crypto_ahash_get_flags(tfm) &
- CRYPTO_TFM_REQ_MAY_SLEEP;
bs = crypto_shash_blocksize(child_shash);
ds = crypto_shash_digestsize(child_shash);
@@ -788,13 +786,18 @@ static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+ u32 flags;
+ int err;
+
+ flags = crypto_ablkcipher_get_flags(cipher);
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(cipher, flags);
+ return err;
+ }
ctx->enc_type = n2alg->enc_type;
- if (keylen != (3 * DES_KEY_SIZE)) {
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
ctx->key_len = keylen;
memcpy(ctx->key.des3, key, keylen);
return 0;
diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c
index 66869976cfa2..57932848361b 100644
--- a/drivers/crypto/nx/nx-842-pseries.c
+++ b/drivers/crypto/nx/nx-842-pseries.c
@@ -296,7 +296,7 @@ static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen,
struct nx842_workmem *workmem;
struct nx842_scatterlist slin, slout;
struct nx_csbcpb *csbcpb;
- int ret = 0, max_sync_size;
+ int ret = 0;
unsigned long inbuf, outbuf;
struct vio_pfo_op op = {
.done = NULL,
@@ -319,7 +319,6 @@ static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen,
rcu_read_unlock();
return -ENODEV;
}
- max_sync_size = local_devdata->max_sync_size;
dev = local_devdata->dev;
/* Init scatterlist */
@@ -427,7 +426,7 @@ static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen,
struct nx842_workmem *workmem;
struct nx842_scatterlist slin, slout;
struct nx_csbcpb *csbcpb;
- int ret = 0, max_sync_size;
+ int ret = 0;
unsigned long inbuf, outbuf;
struct vio_pfo_op op = {
.done = NULL,
@@ -451,7 +450,6 @@ static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen,
rcu_read_unlock();
return -ENODEV;
}
- max_sync_size = local_devdata->max_sync_size;
dev = local_devdata->dev;
workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN);
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index d94e25df503b..f06565df2a12 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -353,7 +353,7 @@ static int decompress(struct nx842_crypto_ctx *ctx,
unsigned int adj_slen = slen;
u8 *src = p->in, *dst = p->out;
u16 padding = be16_to_cpu(g->padding);
- int ret, spadding = 0, dpadding = 0;
+ int ret, spadding = 0;
ktime_t timeout;
if (!slen || !required_len)
@@ -413,7 +413,6 @@ usesw:
spadding = 0;
dst = p->out;
dlen = p->oremain;
- dpadding = 0;
if (dlen < required_len) { /* have ignore bytes */
dst = ctx->dbounce;
dlen = BOUNCE_BUFFER_SIZE;
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index ad3358e74f5c..8f5820b78a83 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -105,8 +105,7 @@ static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
if (rc)
goto out;
atomic_inc(&(nx_ctx->stats->aes_ops));
@@ -134,8 +133,7 @@ static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
if (rc)
goto out;
atomic_inc(&(nx_ctx->stats->aes_ops));
@@ -279,8 +277,7 @@ static int nx_xcbc_update(struct shash_desc *desc,
goto out;
}
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
if (rc)
goto out;
@@ -361,8 +358,7 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
goto out;
}
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
if (rc)
goto out;
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index a6764af83c6d..e06f0431dee5 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -162,8 +162,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
goto out;
}
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
if (rc)
goto out;
@@ -243,8 +242,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
goto out;
}
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
if (rc)
goto out;
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index 92956bc6e45e..0293b17903d0 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -166,8 +166,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
goto out;
}
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
if (rc)
goto out;
@@ -249,8 +248,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
goto out;
}
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
if (rc)
goto out;
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 1ba2633e90d6..3d82d18ff810 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -656,9 +656,6 @@ static int omap_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(cipher);
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- if (keylen != DES_KEY_SIZE && keylen != (3*DES_KEY_SIZE))
- return -EINVAL;
-
pr_debug("enter, keylen: %d\n", keylen);
/* Do we need to test against weak key? */
@@ -678,6 +675,28 @@ static int omap_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
+static int omap_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 flags;
+ int err;
+
+ pr_debug("enter, keylen: %d\n", keylen);
+
+ flags = crypto_ablkcipher_get_flags(cipher);
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(cipher, flags);
+ return err;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ return 0;
+}
+
static int omap_des_ecb_encrypt(struct ablkcipher_request *req)
{
return omap_des_crypt(req, FLAGS_ENCRYPT);
@@ -788,7 +807,7 @@ static struct crypto_alg algs_ecb_cbc[] = {
.cra_u.ablkcipher = {
.min_keysize = 3*DES_KEY_SIZE,
.max_keysize = 3*DES_KEY_SIZE,
- .setkey = omap_des_setkey,
+ .setkey = omap_des3_setkey,
.encrypt = omap_des_ecb_encrypt,
.decrypt = omap_des_ecb_decrypt,
}
@@ -811,7 +830,7 @@ static struct crypto_alg algs_ecb_cbc[] = {
.min_keysize = 3*DES_KEY_SIZE,
.max_keysize = 3*DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
- .setkey = omap_des_setkey,
+ .setkey = omap_des3_setkey,
.encrypt = omap_des_cbc_encrypt,
.decrypt = omap_des_cbc_decrypt,
}
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 0641185bd82f..51b20abac464 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -1055,7 +1055,6 @@ static int omap_sham_finish_hmac(struct ahash_request *req)
SHASH_DESC_ON_STACK(shash, bctx->shash);
shash->tfm = bctx->shash;
- shash->flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
return crypto_shash_init(shash) ?:
crypto_shash_update(shash, bctx->opad, bs) ?:
@@ -1226,7 +1225,6 @@ static int omap_sham_shash_digest(struct crypto_shash *tfm, u32 flags,
SHASH_DESC_ON_STACK(shash, tfm);
shash->tfm = tfm;
- shash->flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP;
return crypto_shash_digest(shash, data, len, out);
}
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index 21e5cae0a1e0..e641481a3cd9 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -39,7 +39,6 @@ static int padlock_sha_init(struct shash_desc *desc)
struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
dctx->fallback.tfm = ctx->fallback;
- dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
return crypto_shash_init(&dctx->fallback);
}
@@ -48,7 +47,6 @@ static int padlock_sha_update(struct shash_desc *desc,
{
struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
- dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
return crypto_shash_update(&dctx->fallback, data, length);
}
@@ -65,7 +63,6 @@ static int padlock_sha_import(struct shash_desc *desc, const void *in)
struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
dctx->fallback.tfm = ctx->fallback;
- dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
return crypto_shash_import(&dctx->fallback, in);
}
@@ -91,7 +88,6 @@ static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
unsigned int leftover;
int err;
- dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_shash_export(&dctx->fallback, &state);
if (err)
goto out;
@@ -153,7 +149,6 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
unsigned int leftover;
int err;
- dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_shash_export(&dctx->fallback, &state);
if (err)
goto out;
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 1b3acdeffede..05b89e703903 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -753,11 +753,6 @@ static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
u32 tmp[DES_EXPKEY_WORDS];
- if (len > DES3_EDE_KEY_SIZE) {
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
-
if (unlikely(!des_ekey(tmp, key)) &&
(crypto_ablkcipher_get_flags(cipher) &
CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
@@ -772,6 +767,30 @@ static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
}
/*
+ * Set the 3DES key for a block cipher transform. This also performs weak key
+ * checking if the transform has requested it.
+ */
+static int spacc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int len)
+{
+ struct spacc_ablk_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 flags;
+ int err;
+
+ flags = crypto_ablkcipher_get_flags(cipher);
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(cipher, flags);
+ return err;
+ }
+
+ memcpy(ctx->key, key, len);
+ ctx->key_len = len;
+
+ return 0;
+}
+
+/*
* Set the key for an AES block cipher. Some key lengths are not supported in
* hardware so this must also check whether a fallback is needed.
*/
@@ -1196,7 +1215,7 @@ static const struct dev_pm_ops spacc_pm_ops = {
static inline struct spacc_engine *spacc_dev_to_engine(struct device *dev)
{
- return dev ? platform_get_drvdata(to_platform_device(dev)) : NULL;
+ return dev ? dev_get_drvdata(dev) : NULL;
}
static ssize_t spacc_stat_irq_thresh_show(struct device *dev,
@@ -1353,7 +1372,7 @@ static struct spacc_alg ipsec_engine_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_ablkcipher = {
- .setkey = spacc_des_setkey,
+ .setkey = spacc_des3_setkey,
.encrypt = spacc_ablk_encrypt,
.decrypt = spacc_ablk_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
@@ -1380,7 +1399,7 @@ static struct spacc_alg ipsec_engine_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_ablkcipher = {
- .setkey = spacc_des_setkey,
+ .setkey = spacc_des3_setkey,
.encrypt = spacc_ablk_encrypt,
.decrypt = spacc_ablk_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 975c75198f56..c8d401646902 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -164,7 +164,6 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
memset(ctx->ipad, 0, block_size);
memset(ctx->opad, 0, block_size);
shash->tfm = ctx->hash_tfm;
- shash->flags = 0x0;
if (auth_keylen > block_size) {
int ret = crypto_shash_digest(shash, auth_key,
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index c9f324730d71..692a7aaee749 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -1300,8 +1300,6 @@ static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
static struct akcipher_alg rsa = {
.encrypt = qat_rsa_enc,
.decrypt = qat_rsa_dec,
- .sign = qat_rsa_dec,
- .verify = qat_rsa_enc,
.set_pub_key = qat_rsa_setpubkey,
.set_priv_key = qat_rsa_setprivkey,
.max_size = qat_rsa_max_size,
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
index 154b6baa124e..8d3493855a70 100644
--- a/drivers/crypto/qce/ablkcipher.c
+++ b/drivers/crypto/qce/ablkcipher.c
@@ -198,6 +198,25 @@ weakkey:
return -EINVAL;
}
+static int qce_des3_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
+ unsigned int keylen)
+{
+ struct qce_cipher_ctx *ctx = crypto_ablkcipher_ctx(ablk);
+ u32 flags;
+ int err;
+
+ flags = crypto_ablkcipher_get_flags(ablk);
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(ablk, flags);
+ return err;
+ }
+
+ ctx->enc_keylen = keylen;
+ memcpy(ctx->enc_key, key, keylen);
+ return 0;
+}
+
static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
{
struct crypto_tfm *tfm =
@@ -363,7 +382,8 @@ static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def,
alg->cra_ablkcipher.ivsize = def->ivsize;
alg->cra_ablkcipher.min_keysize = def->min_keysize;
alg->cra_ablkcipher.max_keysize = def->max_keysize;
- alg->cra_ablkcipher.setkey = qce_ablkcipher_setkey;
+ alg->cra_ablkcipher.setkey = IS_3DES(def->flags) ?
+ qce_des3_setkey : qce_ablkcipher_setkey;
alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt;
alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt;
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
index 02dac6ae7e53..313759521a0f 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
@@ -46,24 +46,36 @@ static int rk_aes_setkey(struct crypto_ablkcipher *cipher,
return 0;
}
-static int rk_tdes_setkey(struct crypto_ablkcipher *cipher,
- const u8 *key, unsigned int keylen)
+static int rk_des_setkey(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
{
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
u32 tmp[DES_EXPKEY_WORDS];
- if (keylen != DES_KEY_SIZE && keylen != DES3_EDE_KEY_SIZE) {
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (!des_ekey(tmp, key) &&
+ (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
- if (keylen == DES_KEY_SIZE) {
- if (!des_ekey(tmp, key) &&
- (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
- tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
- return -EINVAL;
- }
+ ctx->keylen = keylen;
+ memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
+ return 0;
+}
+
+static int rk_tdes_setkey(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 flags;
+ int err;
+
+ flags = crypto_ablkcipher_get_flags(cipher);
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(cipher, flags);
+ return err;
}
ctx->keylen = keylen;
@@ -250,9 +262,14 @@ static int rk_set_data_start(struct rk_crypto_info *dev)
u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
dev->sg_src->offset + dev->sg_src->length - ivsize;
- /* store the iv that need to be updated in chain mode */
- if (ctx->mode & RK_CRYPTO_DEC)
+ /* Store the iv that need to be updated in chain mode.
+ * And update the IV buffer to contain the next IV for decryption mode.
+ */
+ if (ctx->mode & RK_CRYPTO_DEC) {
memcpy(ctx->iv, src_last_blk, ivsize);
+ sg_pcopy_to_buffer(dev->first, dev->src_nents, req->info,
+ ivsize, dev->total - ivsize);
+ }
err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
if (!err)
@@ -288,13 +305,19 @@ static void rk_iv_copyback(struct rk_crypto_info *dev)
struct ablkcipher_request *req =
ablkcipher_request_cast(dev->async_req);
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
u32 ivsize = crypto_ablkcipher_ivsize(tfm);
- if (ivsize == DES_BLOCK_SIZE)
- memcpy_fromio(req->info, dev->reg + RK_CRYPTO_TDES_IV_0,
- ivsize);
- else if (ivsize == AES_BLOCK_SIZE)
- memcpy_fromio(req->info, dev->reg + RK_CRYPTO_AES_IV_0, ivsize);
+ /* Update the IV buffer to contain the next IV for encryption mode. */
+ if (!(ctx->mode & RK_CRYPTO_DEC)) {
+ if (dev->aligned) {
+ memcpy(req->info, sg_virt(dev->sg_dst) +
+ dev->sg_dst->length - ivsize, ivsize);
+ } else {
+ memcpy(req->info, dev->addr_vir +
+ dev->count - ivsize, ivsize);
+ }
+ }
}
static void rk_update_iv(struct rk_crypto_info *dev)
@@ -457,7 +480,7 @@ struct rk_crypto_tmp rk_ecb_des_alg = {
.cra_u.ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
- .setkey = rk_tdes_setkey,
+ .setkey = rk_des_setkey,
.encrypt = rk_des_ecb_encrypt,
.decrypt = rk_des_ecb_decrypt,
}
@@ -483,7 +506,7 @@ struct rk_crypto_tmp rk_cbc_des_alg = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
- .setkey = rk_tdes_setkey,
+ .setkey = rk_des_setkey,
.encrypt = rk_des_cbc_encrypt,
.decrypt = rk_des_cbc_decrypt,
}
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index f4e625cf53ca..9ef25230c199 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -241,7 +241,7 @@
struct samsung_aes_variant {
unsigned int aes_offset;
unsigned int hash_offset;
- const char *clk_names[];
+ const char *clk_names[2];
};
struct s5p_aes_reqctx {
@@ -1534,7 +1534,6 @@ static int s5p_hash_shash_digest(struct crypto_shash *tfm, u32 flags,
SHASH_DESC_ON_STACK(shash, tfm);
shash->tfm = tfm;
- shash->flags = flags & ~CRYPTO_TFM_REQ_MAY_SLEEP;
return crypto_shash_digest(shash, data, len, out);
}
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 8c32a3059b4a..fd11162a915e 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -354,7 +354,7 @@ static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
{
u8 state;
- if (!IS_ENABLED(DEBUG))
+ if (!__is_defined(DEBUG))
return;
state = SAHARA_STATUS_GET_STATE(status);
@@ -406,7 +406,7 @@ static void sahara_dump_descriptors(struct sahara_dev *dev)
{
int i;
- if (!IS_ENABLED(DEBUG))
+ if (!__is_defined(DEBUG))
return;
for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
@@ -427,7 +427,7 @@ static void sahara_dump_links(struct sahara_dev *dev)
{
int i;
- if (!IS_ENABLED(DEBUG))
+ if (!__is_defined(DEBUG))
return;
for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
index 63aa78c0b12b..4491e2197d9f 100644
--- a/drivers/crypto/stm32/Kconfig
+++ b/drivers/crypto/stm32/Kconfig
@@ -24,6 +24,7 @@ config CRYPTO_DEV_STM32_CRYP
depends on ARCH_STM32
select CRYPTO_HASH
select CRYPTO_ENGINE
+ select CRYPTO_DES
help
This enables support for the CRYP (AES/DES/TDES) hw accelerator which
can be found on STMicroelectronics STM32 SOC.
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index 23b0b7bd64c7..cddcc97875b2 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -137,7 +137,6 @@ struct stm32_cryp {
struct crypto_engine *engine;
- struct mutex lock; /* protects req / areq */
struct ablkcipher_request *req;
struct aead_request *areq;
@@ -394,6 +393,23 @@ static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, u32 *iv)
}
}
+static void stm32_cryp_get_iv(struct stm32_cryp *cryp)
+{
+ struct ablkcipher_request *req = cryp->req;
+ u32 *tmp = req->info;
+
+ if (!tmp)
+ return;
+
+ *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV0LR));
+ *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV0RR));
+
+ if (is_aes(cryp)) {
+ *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV1LR));
+ *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV1RR));
+ }
+}
+
static void stm32_cryp_hw_write_key(struct stm32_cryp *c)
{
unsigned int i;
@@ -623,6 +639,9 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
/* Phase 4 : output tag */
err = stm32_cryp_read_auth_tag(cryp);
+ if (!err && (!(is_gcm(cryp) || is_ccm(cryp))))
+ stm32_cryp_get_iv(cryp);
+
if (cryp->sgs_copied) {
void *buf_in, *buf_out;
int pages, len;
@@ -645,18 +664,13 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
pm_runtime_mark_last_busy(cryp->dev);
pm_runtime_put_autosuspend(cryp->dev);
- if (is_gcm(cryp) || is_ccm(cryp)) {
+ if (is_gcm(cryp) || is_ccm(cryp))
crypto_finalize_aead_request(cryp->engine, cryp->areq, err);
- cryp->areq = NULL;
- } else {
+ else
crypto_finalize_ablkcipher_request(cryp->engine, cryp->req,
err);
- cryp->req = NULL;
- }
memset(cryp->ctx->key, 0, cryp->ctx->keylen);
-
- mutex_unlock(&cryp->lock);
}
static int stm32_cryp_cpu_start(struct stm32_cryp *cryp)
@@ -753,19 +767,35 @@ static int stm32_cryp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
static int stm32_cryp_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int keylen)
{
+ u32 tmp[DES_EXPKEY_WORDS];
+
if (keylen != DES_KEY_SIZE)
return -EINVAL;
- else
- return stm32_cryp_setkey(tfm, key, keylen);
+
+ if ((crypto_ablkcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) &&
+ unlikely(!des_ekey(tmp, key))) {
+ crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
+ return -EINVAL;
+ }
+
+ return stm32_cryp_setkey(tfm, key, keylen);
}
static int stm32_cryp_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int keylen)
{
- if (keylen != (3 * DES_KEY_SIZE))
- return -EINVAL;
- else
- return stm32_cryp_setkey(tfm, key, keylen);
+ u32 flags;
+ int err;
+
+ flags = crypto_ablkcipher_get_flags(tfm);
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(tfm, flags);
+ return err;
+ }
+
+ return stm32_cryp_setkey(tfm, key, keylen);
}
static int stm32_cryp_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key,
@@ -917,8 +947,6 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
if (!cryp)
return -ENODEV;
- mutex_lock(&cryp->lock);
-
rctx = req ? ablkcipher_request_ctx(req) : aead_request_ctx(areq);
rctx->mode &= FLG_MODE_MASK;
@@ -930,6 +958,7 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
if (req) {
cryp->req = req;
+ cryp->areq = NULL;
cryp->total_in = req->nbytes;
cryp->total_out = cryp->total_in;
} else {
@@ -955,6 +984,7 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
* <---------- total_out ----------------->
*/
cryp->areq = areq;
+ cryp->req = NULL;
cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(areq));
cryp->total_in = areq->assoclen + areq->cryptlen;
if (is_encrypt(cryp))
@@ -976,19 +1006,19 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
if (cryp->in_sg_len < 0) {
dev_err(cryp->dev, "Cannot get in_sg_len\n");
ret = cryp->in_sg_len;
- goto out;
+ return ret;
}
cryp->out_sg_len = sg_nents_for_len(cryp->out_sg, cryp->total_out);
if (cryp->out_sg_len < 0) {
dev_err(cryp->dev, "Cannot get out_sg_len\n");
ret = cryp->out_sg_len;
- goto out;
+ return ret;
}
ret = stm32_cryp_copy_sgs(cryp);
if (ret)
- goto out;
+ return ret;
scatterwalk_start(&cryp->in_walk, cryp->in_sg);
scatterwalk_start(&cryp->out_walk, cryp->out_sg);
@@ -1000,10 +1030,6 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
}
ret = stm32_cryp_hw_init(cryp);
-out:
- if (ret)
- mutex_unlock(&cryp->lock);
-
return ret;
}
@@ -1943,8 +1969,6 @@ static int stm32_cryp_probe(struct platform_device *pdev)
cryp->dev = dev;
- mutex_init(&cryp->lock);
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
cryp->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(cryp->regs))
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 4a6cc8a3045d..bfc49e67124b 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -181,8 +181,6 @@ struct stm32_hash_dev {
u32 dma_mode;
u32 dma_maxburst;
- spinlock_t lock; /* lock to protect queue */
-
struct ahash_request *req;
struct crypto_engine *engine;
@@ -977,7 +975,7 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
pm_runtime_get_sync(hdev->dev);
- while (!(stm32_hash_read(hdev, HASH_SR) & HASH_SR_DATA_INPUT_READY))
+ while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY))
cpu_relax();
rctx->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER,
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
index 54fd714d53ca..b060a0810934 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
@@ -41,11 +41,6 @@ static int sun4i_ss_opti_poll(struct skcipher_request *areq)
if (!areq->cryptlen)
return 0;
- if (!areq->iv) {
- dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
- return -EINVAL;
- }
-
if (!areq->src || !areq->dst) {
dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
return -EINVAL;
@@ -134,6 +129,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
struct scatterlist *out_sg = areq->dst;
unsigned int ivsize = crypto_skcipher_ivsize(tfm);
struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct sun4i_ss_alg_template *algt;
u32 mode = ctx->mode;
/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
u32 rx_cnt = SS_RX_DEFAULT;
@@ -153,20 +150,20 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
unsigned int obo = 0; /* offset in bufo*/
unsigned int obl = 0; /* length of data in bufo */
unsigned long flags;
+ bool need_fallback;
if (!areq->cryptlen)
return 0;
- if (!areq->iv) {
- dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
- return -EINVAL;
- }
-
if (!areq->src || !areq->dst) {
dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
return -EINVAL;
}
+ algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
+ if (areq->cryptlen % algt->alg.crypto.base.cra_blocksize)
+ need_fallback = true;
+
/*
* if we have only SGs with size multiple of 4,
* we can use the SS optimized function
@@ -182,9 +179,24 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
out_sg = sg_next(out_sg);
}
- if (no_chunk == 1)
+ if (no_chunk == 1 && !need_fallback)
return sun4i_ss_opti_poll(areq);
+ if (need_fallback) {
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
+ skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
+ skcipher_request_set_callback(subreq, areq->base.flags, NULL,
+ NULL);
+ skcipher_request_set_crypt(subreq, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+ if (ctx->mode & SS_DECRYPTION)
+ err = crypto_skcipher_decrypt(subreq);
+ else
+ err = crypto_skcipher_encrypt(subreq);
+ skcipher_request_zero(subreq);
+ return err;
+ }
+
spin_lock_irqsave(&ss->slock, flags);
for (i = 0; i < op->keylen; i += 4)
@@ -458,6 +470,7 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
{
struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
struct sun4i_ss_alg_template *algt;
+ const char *name = crypto_tfm_alg_name(tfm);
memset(op, 0, sizeof(struct sun4i_tfm_ctx));
@@ -468,9 +481,22 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
sizeof(struct sun4i_cipher_req_ctx));
+ op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(op->fallback_tfm)) {
+ dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
+ name, PTR_ERR(op->fallback_tfm));
+ return PTR_ERR(op->fallback_tfm);
+ }
+
return 0;
}
+void sun4i_ss_cipher_exit(struct crypto_tfm *tfm)
+{
+ struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
+ crypto_free_sync_skcipher(op->fallback_tfm);
+}
+
/* check and set the AES key, prepare the mode to be used */
int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
@@ -495,7 +521,11 @@ int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
}
op->keylen = keylen;
memcpy(op->key, key, keylen);
- return 0;
+
+ crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+ return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
}
/* check and set the DES key, prepare the mode to be used */
@@ -525,7 +555,11 @@ int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
op->keylen = keylen;
memcpy(op->key, key, keylen);
- return 0;
+
+ crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+ return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
}
/* check and set the 3DES key, prepare the mode to be used */
@@ -533,14 +567,18 @@ int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
- struct sun4i_ss_ctx *ss = op->ss;
+ int err;
+
+ err = des3_verify_key(tfm, key);
+ if (unlikely(err))
+ return err;
- if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
- dev_err(ss->dev, "Invalid keylen %u\n", keylen);
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
op->keylen = keylen;
memcpy(op->key, key, keylen);
- return 0;
+
+ crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+ return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+
}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
index 89adf9e0fed2..05b3d3c32f6d 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
@@ -92,11 +92,12 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "cbc-aes-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
.cra_init = sun4i_ss_cipher_init,
+ .cra_exit = sun4i_ss_cipher_exit,
}
}
},
@@ -107,17 +108,17 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.decrypt = sun4i_ss_ecb_aes_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
.base = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
.cra_init = sun4i_ss_cipher_init,
+ .cra_exit = sun4i_ss_cipher_exit,
}
}
},
@@ -134,11 +135,12 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "cbc-des-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
.cra_init = sun4i_ss_cipher_init,
+ .cra_exit = sun4i_ss_cipher_exit,
}
}
},
@@ -154,11 +156,12 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "ecb-des-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
.cra_init = sun4i_ss_cipher_init,
+ .cra_exit = sun4i_ss_cipher_exit,
}
}
},
@@ -175,11 +178,12 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "cbc-des3-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
.cra_init = sun4i_ss_cipher_init,
+ .cra_exit = sun4i_ss_cipher_exit,
}
}
},
@@ -190,16 +194,17 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.decrypt = sun4i_ss_ecb_des3_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
.base = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "ecb-des3-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
.cra_init = sun4i_ss_cipher_init,
+ .cra_exit = sun4i_ss_cipher_exit,
}
}
},
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
index a4b5ff2b72f8..f6936bb3b7be 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
@@ -240,7 +240,10 @@ static int sun4i_hash(struct ahash_request *areq)
}
} else {
/* Since we have the flag final, we can go up to modulo 4 */
- end = ((areq->nbytes + op->len) / 4) * 4 - op->len;
+ if (areq->nbytes < 4)
+ end = 0;
+ else
+ end = ((areq->nbytes + op->len) / 4) * 4 - op->len;
}
/* TODO if SGlen % 4 and !op->len then DMA */
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/sunxi-ss/sun4i-ss.h
index f3ac90692ac6..8c4ec9e93565 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss.h
+++ b/drivers/crypto/sunxi-ss/sun4i-ss.h
@@ -161,6 +161,7 @@ struct sun4i_tfm_ctx {
u32 keylen;
u32 keymode;
struct sun4i_ss_ctx *ss;
+ struct crypto_sync_skcipher *fallback_tfm;
};
struct sun4i_cipher_req_ctx {
@@ -203,6 +204,7 @@ int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq);
int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq);
int sun4i_ss_cipher_init(struct crypto_tfm *tfm);
+void sun4i_ss_cipher_exit(struct crypto_tfm *tfm);
int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index de78b54bcfb1..1d429fc073d1 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -913,6 +913,54 @@ badkey:
return -EINVAL;
}
+static int aead_des3_setkey(struct crypto_aead *authenc,
+ const u8 *key, unsigned int keylen)
+{
+ struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
+ struct device *dev = ctx->dev;
+ struct crypto_authenc_keys keys;
+ u32 flags;
+ int err;
+
+ err = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (unlikely(err))
+ goto badkey;
+
+ err = -EINVAL;
+ if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
+ goto badkey;
+
+ if (keys.enckeylen != DES3_EDE_KEY_SIZE)
+ goto badkey;
+
+ flags = crypto_aead_get_flags(authenc);
+ err = __des3_verify_key(&flags, keys.enckey);
+ if (unlikely(err)) {
+ crypto_aead_set_flags(authenc, flags);
+ goto out;
+ }
+
+ if (ctx->keylen)
+ dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
+
+ memcpy(ctx->key, keys.authkey, keys.authkeylen);
+ memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
+
+ ctx->keylen = keys.authkeylen + keys.enckeylen;
+ ctx->enckeylen = keys.enckeylen;
+ ctx->authkeylen = keys.authkeylen;
+ ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
+ DMA_TO_DEVICE);
+
+out:
+ memzero_explicit(&keys, sizeof(keys));
+ return err;
+
+badkey:
+ crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ goto out;
+}
+
/*
* talitos_edesc - s/w-extended descriptor
* @src_nents: number of segments in input scatterlist
@@ -1527,12 +1575,22 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
{
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
struct device *dev = ctx->dev;
- u32 tmp[DES_EXPKEY_WORDS];
- if (keylen > TALITOS_MAX_KEY_SIZE) {
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
+ if (ctx->keylen)
+ dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
+
+ memcpy(&ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
+
+ return 0;
+}
+
+static int ablkcipher_des_setkey(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ u32 tmp[DES_EXPKEY_WORDS];
if (unlikely(crypto_ablkcipher_get_flags(cipher) &
CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) &&
@@ -1541,15 +1599,23 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
return -EINVAL;
}
- if (ctx->keylen)
- dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
+ return ablkcipher_setkey(cipher, key, keylen);
+}
- memcpy(&ctx->key, key, keylen);
- ctx->keylen = keylen;
+static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ u32 flags;
+ int err;
- ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
+ flags = crypto_ablkcipher_get_flags(cipher);
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(cipher, flags);
+ return err;
+ }
- return 0;
+ return ablkcipher_setkey(cipher, key, keylen);
}
static void common_nonsnoop_unmap(struct device *dev,
@@ -2313,6 +2379,7 @@ static struct talitos_alg_template driver_algs[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
+ .setkey = aead_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
@@ -2336,6 +2403,7 @@ static struct talitos_alg_template driver_algs[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
+ .setkey = aead_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
DESC_HDR_SEL0_DEU |
@@ -2399,6 +2467,7 @@ static struct talitos_alg_template driver_algs[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
+ .setkey = aead_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
@@ -2422,6 +2491,7 @@ static struct talitos_alg_template driver_algs[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
+ .setkey = aead_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
DESC_HDR_SEL0_DEU |
@@ -2485,6 +2555,7 @@ static struct talitos_alg_template driver_algs[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
+ .setkey = aead_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
@@ -2508,6 +2579,7 @@ static struct talitos_alg_template driver_algs[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
+ .setkey = aead_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
DESC_HDR_SEL0_DEU |
@@ -2550,6 +2622,7 @@ static struct talitos_alg_template driver_algs[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
+ .setkey = aead_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
@@ -2592,6 +2665,7 @@ static struct talitos_alg_template driver_algs[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
+ .setkey = aead_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
@@ -2654,6 +2728,7 @@ static struct talitos_alg_template driver_algs[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
+ .setkey = aead_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
@@ -2676,6 +2751,7 @@ static struct talitos_alg_template driver_algs[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
+ .setkey = aead_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
DESC_HDR_SEL0_DEU |
@@ -2748,6 +2824,7 @@ static struct talitos_alg_template driver_algs[] = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
+ .setkey = ablkcipher_des_setkey,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2764,6 +2841,7 @@ static struct talitos_alg_template driver_algs[] = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
+ .setkey = ablkcipher_des_setkey,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2781,6 +2859,7 @@ static struct talitos_alg_template driver_algs[] = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = ablkcipher_des3_setkey,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2798,6 +2877,7 @@ static struct talitos_alg_template driver_algs[] = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = ablkcipher_des3_setkey,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -3144,7 +3224,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
alg->cra_init = talitos_cra_init;
alg->cra_exit = talitos_cra_exit;
alg->cra_type = &crypto_ablkcipher_type;
- alg->cra_ablkcipher.setkey = ablkcipher_setkey;
+ alg->cra_ablkcipher.setkey = alg->cra_ablkcipher.setkey ?:
+ ablkcipher_setkey;
alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
break;
@@ -3152,7 +3233,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
alg = &t_alg->algt.alg.aead.base;
alg->cra_exit = talitos_cra_exit;
t_alg->algt.alg.aead.init = talitos_cra_init_aead;
- t_alg->algt.alg.aead.setkey = aead_setkey;
+ t_alg->algt.alg.aead.setkey = t_alg->algt.alg.aead.setkey ?:
+ aead_setkey;
t_alg->algt.alg.aead.encrypt = aead_encrypt;
t_alg->algt.alg.aead.decrypt = aead_decrypt;
if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
diff --git a/drivers/crypto/ux500/cryp/Makefile b/drivers/crypto/ux500/cryp/Makefile
index b497ae3dde07..dc8fff29c4b3 100644
--- a/drivers/crypto/ux500/cryp/Makefile
+++ b/drivers/crypto/ux500/cryp/Makefile
@@ -3,11 +3,7 @@
# * Author: shujuan.chen@stericsson.com for ST-Ericsson.
# * License terms: GNU General Public License (GPL) version 2 */
-ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG
-CFLAGS_cryp_core.o := -DDEBUG
-CFLAGS_cryp.o := -DDEBUG
-CFLAGS_cryp_irq.o := -DDEBUG
-endif
+ccflags-$(CONFIG_CRYPTO_DEV_UX500_DEBUG) += -DDEBUG
obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += ux500_cryp.o
ux500_cryp-objs := cryp.o cryp_irq.o cryp_core.o
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 3235611928f2..7a93cba0877f 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1019,37 +1019,16 @@ static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
const u8 *key, unsigned int keylen)
{
struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
- u32 *flags = &cipher->base.crt_flags;
- const u32 *K = (const u32 *)key;
- u32 tmp[DES3_EDE_EXPKEY_WORDS];
- int i, ret;
+ u32 flags;
+ int err;
pr_debug(DEV_DBG_NAME " [%s]", __func__);
- if (keylen != DES3_EDE_KEY_SIZE) {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
- pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN",
- __func__);
- return -EINVAL;
- }
- /* Checking key interdependency for weak key detection. */
- if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
- !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
- (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
- *flags |= CRYPTO_TFM_RES_WEAK_KEY;
- pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_WEAK_KEY",
- __func__);
- return -EINVAL;
- }
- for (i = 0; i < 3; i++) {
- ret = des_ekey(tmp, key + i*DES_KEY_SIZE);
- if (unlikely(ret == 0) &&
- (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
- *flags |= CRYPTO_TFM_RES_WEAK_KEY;
- pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_WEAK_KEY",
- __func__);
- return -EINVAL;
- }
+ flags = crypto_ablkcipher_get_flags(cipher);
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(cipher, flags);
+ return err;
}
memcpy(ctx->key, key, keylen);
@@ -1219,57 +1198,6 @@ static struct cryp_algo_template cryp_algs[] = {
{
.algomode = CRYP_ALGO_DES_ECB,
.crypto = {
- .cra_name = "des",
- .cra_driver_name = "des-ux500",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cryp_ctx),
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = cryp_cra_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = des_ablkcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt
- }
- }
- }
-
- },
- {
- .algomode = CRYP_ALGO_TDES_ECB,
- .crypto = {
- .cra_name = "des3_ede",
- .cra_driver_name = "des3_ede-ux500",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cryp_ctx),
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = cryp_cra_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = des_ablkcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt
- }
- }
- }
- },
- {
- .algomode = CRYP_ALGO_DES_ECB,
- .crypto = {
.cra_name = "ecb(des)",
.cra_driver_name = "ecb-des-ux500",
.cra_priority = 300,
diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
index d7316f7a3a69..603a62081994 100644
--- a/drivers/crypto/vmx/aes.c
+++ b/drivers/crypto/vmx/aes.c
@@ -23,9 +23,10 @@
#include <linux/err.h>
#include <linux/crypto.h>
#include <linux/delay.h>
-#include <linux/hardirq.h>
+#include <asm/simd.h>
#include <asm/switch_to.h>
#include <crypto/aes.h>
+#include <crypto/internal/simd.h>
#include "aesp8-ppc.h"
@@ -78,20 +79,21 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_disable();
enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
- ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+ ret |= aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
- ret += crypto_cipher_setkey(ctx->fallback, key, keylen);
- return ret;
+ ret |= crypto_cipher_setkey(ctx->fallback, key, keylen);
+
+ return ret ? -EINVAL : 0;
}
static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
- if (in_interrupt()) {
+ if (!crypto_simd_usable()) {
crypto_cipher_encrypt_one(ctx->fallback, dst, src);
} else {
preempt_disable();
@@ -108,7 +110,7 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
- if (in_interrupt()) {
+ if (!crypto_simd_usable()) {
crypto_cipher_decrypt_one(ctx->fallback, dst, src);
} else {
preempt_disable();
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index c5c5ff82b52e..a1a9a6f0d42c 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -23,9 +23,10 @@
#include <linux/err.h>
#include <linux/crypto.h>
#include <linux/delay.h>
-#include <linux/hardirq.h>
+#include <asm/simd.h>
#include <asm/switch_to.h>
#include <crypto/aes.h>
+#include <crypto/internal/simd.h>
#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
@@ -81,13 +82,14 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_disable();
enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
- ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+ ret |= aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
- ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
- return ret;
+ ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
+
+ return ret ? -EINVAL : 0;
}
static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
@@ -99,7 +101,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
struct p8_aes_cbc_ctx *ctx =
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
- if (in_interrupt()) {
+ if (!crypto_simd_usable()) {
SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
skcipher_request_set_sync_tfm(req, ctx->fallback);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
@@ -138,7 +140,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
struct p8_aes_cbc_ctx *ctx =
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
- if (in_interrupt()) {
+ if (!crypto_simd_usable()) {
SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
skcipher_request_set_sync_tfm(req, ctx->fallback);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index 8a2fe092cb8e..192a53512f5e 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -23,9 +23,10 @@
#include <linux/err.h>
#include <linux/crypto.h>
#include <linux/delay.h>
-#include <linux/hardirq.h>
+#include <asm/simd.h>
#include <asm/switch_to.h>
#include <crypto/aes.h>
+#include <crypto/internal/simd.h>
#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
@@ -83,8 +84,9 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_enable();
preempt_enable();
- ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
- return ret;
+ ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
+
+ return ret ? -EINVAL : 0;
}
static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
@@ -118,7 +120,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
struct p8_aes_ctr_ctx *ctx =
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
- if (in_interrupt()) {
+ if (!crypto_simd_usable()) {
SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
skcipher_request_set_sync_tfm(req, ctx->fallback);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index ecd64e5cc5bb..00d412d811ae 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -23,9 +23,10 @@
#include <linux/err.h>
#include <linux/crypto.h>
#include <linux/delay.h>
-#include <linux/hardirq.h>
+#include <asm/simd.h>
#include <asm/switch_to.h>
#include <crypto/aes.h>
+#include <crypto/internal/simd.h>
#include <crypto/scatterwalk.h>
#include <crypto/xts.h>
#include <crypto/skcipher.h>
@@ -86,14 +87,15 @@ static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_disable();
enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key + keylen/2, (keylen/2) * 8, &ctx->tweak_key);
- ret += aes_p8_set_encrypt_key(key, (keylen/2) * 8, &ctx->enc_key);
- ret += aes_p8_set_decrypt_key(key, (keylen/2) * 8, &ctx->dec_key);
+ ret |= aes_p8_set_encrypt_key(key, (keylen/2) * 8, &ctx->enc_key);
+ ret |= aes_p8_set_decrypt_key(key, (keylen/2) * 8, &ctx->dec_key);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
- ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
- return ret;
+ ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
+
+ return ret ? -EINVAL : 0;
}
static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
@@ -108,7 +110,7 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
struct p8_aes_xts_ctx *ctx =
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
- if (in_interrupt()) {
+ if (!crypto_simd_usable()) {
SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
skcipher_request_set_sync_tfm(req, ctx->fallback);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl
index d6a9f63d65ba..de78282b8f44 100644
--- a/drivers/crypto/vmx/aesp8-ppc.pl
+++ b/drivers/crypto/vmx/aesp8-ppc.pl
@@ -1854,7 +1854,7 @@ Lctr32_enc8x_three:
stvx_u $out1,$x10,$out
stvx_u $out2,$x20,$out
addi $out,$out,0x30
- b Lcbc_dec8x_done
+ b Lctr32_enc8x_done
.align 5
Lctr32_enc8x_two:
@@ -1866,7 +1866,7 @@ Lctr32_enc8x_two:
stvx_u $out0,$x00,$out
stvx_u $out1,$x10,$out
addi $out,$out,0x20
- b Lcbc_dec8x_done
+ b Lctr32_enc8x_done
.align 5
Lctr32_enc8x_one:
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
index dd8b8716467a..b5a6883bb09e 100644
--- a/drivers/crypto/vmx/ghash.c
+++ b/drivers/crypto/vmx/ghash.c
@@ -23,16 +23,15 @@
#include <linux/err.h>
#include <linux/crypto.h>
#include <linux/delay.h>
-#include <linux/hardirq.h>
+#include <asm/simd.h>
#include <asm/switch_to.h>
#include <crypto/aes.h>
#include <crypto/ghash.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/b128ops.h>
-#define IN_INTERRUPT in_interrupt()
-
void gcm_init_p8(u128 htable[16], const u64 Xi[2]);
void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]);
void gcm_ghash_p8(u64 Xi[2], const u128 htable[16],
@@ -102,7 +101,6 @@ static int p8_ghash_init(struct shash_desc *desc)
dctx->bytes = 0;
memset(dctx->shash, 0, GHASH_DIGEST_SIZE);
dctx->fallback_desc.tfm = ctx->fallback;
- dctx->fallback_desc.flags = desc->flags;
return crypto_shash_init(&dctx->fallback_desc);
}
@@ -131,7 +129,7 @@ static int p8_ghash_update(struct shash_desc *desc,
struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- if (IN_INTERRUPT) {
+ if (!crypto_simd_usable()) {
return crypto_shash_update(&dctx->fallback_desc, src,
srclen);
} else {
@@ -182,7 +180,7 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- if (IN_INTERRUPT) {
+ if (!crypto_simd_usable()) {
return crypto_shash_final(&dctx->fallback_desc, out);
} else {
if (dctx->bytes) {
diff --git a/drivers/crypto/vmx/vmx.c b/drivers/crypto/vmx/vmx.c
index 31a98dc6f849..a9f519830615 100644
--- a/drivers/crypto/vmx/vmx.c
+++ b/drivers/crypto/vmx/vmx.c
@@ -41,7 +41,7 @@ static struct crypto_alg *algs[] = {
NULL,
};
-int __init p8_init(void)
+static int __init p8_init(void)
{
int ret = 0;
struct crypto_alg **alg_it;
@@ -67,7 +67,7 @@ int __init p8_init(void)
return ret;
}
-void __exit p8_exit(void)
+static void __exit p8_exit(void)
{
struct crypto_alg **alg_it;
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index e0700bf4893a..5ef624fe3934 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -23,12 +23,38 @@ config DEV_DAX
config DEV_DAX_PMEM
tristate "PMEM DAX: direct access to persistent memory"
depends on LIBNVDIMM && NVDIMM_DAX && DEV_DAX
+ depends on m # until we can kill DEV_DAX_PMEM_COMPAT
default DEV_DAX
help
Support raw access to persistent memory. Note that this
driver consumes memory ranges allocated and exported by the
libnvdimm sub-system.
- Say Y if unsure
+ Say M if unsure
+
+config DEV_DAX_KMEM
+ tristate "KMEM DAX: volatile-use of persistent memory"
+ default DEV_DAX
+ depends on DEV_DAX
+ depends on MEMORY_HOTPLUG # for add_memory() and friends
+ help
+ Support access to persistent memory as if it were RAM. This
+ allows easier use of persistent memory by unmodified
+ applications.
+
+ To use this feature, a DAX device must be unbound from the
+ device_dax driver (PMEM DAX) and bound to this kmem driver
+ on each boot.
+
+ Say N if unsure.
+
+config DEV_DAX_PMEM_COMPAT
+ tristate "PMEM DAX: support the deprecated /sys/class/dax interface"
+ depends on DEV_DAX_PMEM
+ default DEV_DAX_PMEM
+ help
+ Older versions of the libdaxctl library expect to find all
+ device-dax instances under /sys/class/dax. If libdaxctl in
+ your distribution is older than v58 say M, otherwise say N.
endif
diff --git a/drivers/dax/Makefile b/drivers/dax/Makefile
index 574286fac87c..81f7d54dadfb 100644
--- a/drivers/dax/Makefile
+++ b/drivers/dax/Makefile
@@ -1,8 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DAX) += dax.o
obj-$(CONFIG_DEV_DAX) += device_dax.o
-obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem.o
+obj-$(CONFIG_DEV_DAX_KMEM) += kmem.o
dax-y := super.o
-dax_pmem-y := pmem.o
+dax-y += bus.o
device_dax-y := device.o
+
+obj-y += pmem/
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
new file mode 100644
index 000000000000..2109cfe80219
--- /dev/null
+++ b/drivers/dax/bus.c
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2017-2018 Intel Corporation. All rights reserved. */
+#include <linux/memremap.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/dax.h>
+#include "dax-private.h"
+#include "bus.h"
+
+static struct class *dax_class;
+
+static DEFINE_MUTEX(dax_bus_lock);
+
+#define DAX_NAME_LEN 30
+struct dax_id {
+ struct list_head list;
+ char dev_name[DAX_NAME_LEN];
+};
+
+static int dax_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ /*
+ * We only ever expect to handle device-dax instances, i.e. the
+ * @type argument to MODULE_ALIAS_DAX_DEVICE() is always zero
+ */
+ return add_uevent_var(env, "MODALIAS=" DAX_DEVICE_MODALIAS_FMT, 0);
+}
+
+static struct dax_device_driver *to_dax_drv(struct device_driver *drv)
+{
+ return container_of(drv, struct dax_device_driver, drv);
+}
+
+static struct dax_id *__dax_match_id(struct dax_device_driver *dax_drv,
+ const char *dev_name)
+{
+ struct dax_id *dax_id;
+
+ lockdep_assert_held(&dax_bus_lock);
+
+ list_for_each_entry(dax_id, &dax_drv->ids, list)
+ if (sysfs_streq(dax_id->dev_name, dev_name))
+ return dax_id;
+ return NULL;
+}
+
+static int dax_match_id(struct dax_device_driver *dax_drv, struct device *dev)
+{
+ int match;
+
+ mutex_lock(&dax_bus_lock);
+ match = !!__dax_match_id(dax_drv, dev_name(dev));
+ mutex_unlock(&dax_bus_lock);
+
+ return match;
+}
+
+enum id_action {
+ ID_REMOVE,
+ ID_ADD,
+};
+
+static ssize_t do_id_store(struct device_driver *drv, const char *buf,
+ size_t count, enum id_action action)
+{
+ struct dax_device_driver *dax_drv = to_dax_drv(drv);
+ unsigned int region_id, id;
+ char devname[DAX_NAME_LEN];
+ struct dax_id *dax_id;
+ ssize_t rc = count;
+ int fields;
+
+ fields = sscanf(buf, "dax%d.%d", &region_id, &id);
+ if (fields != 2)
+ return -EINVAL;
+ sprintf(devname, "dax%d.%d", region_id, id);
+ if (!sysfs_streq(buf, devname))
+ return -EINVAL;
+
+ mutex_lock(&dax_bus_lock);
+ dax_id = __dax_match_id(dax_drv, buf);
+ if (!dax_id) {
+ if (action == ID_ADD) {
+ dax_id = kzalloc(sizeof(*dax_id), GFP_KERNEL);
+ if (dax_id) {
+ strncpy(dax_id->dev_name, buf, DAX_NAME_LEN);
+ list_add(&dax_id->list, &dax_drv->ids);
+ } else
+ rc = -ENOMEM;
+ } else
+ /* nothing to remove */;
+ } else if (action == ID_REMOVE) {
+ list_del(&dax_id->list);
+ kfree(dax_id);
+ } else
+ /* dax_id already added */;
+ mutex_unlock(&dax_bus_lock);
+
+ if (rc < 0)
+ return rc;
+ if (action == ID_ADD)
+ rc = driver_attach(drv);
+ if (rc)
+ return rc;
+ return count;
+}
+
+static ssize_t new_id_store(struct device_driver *drv, const char *buf,
+ size_t count)
+{
+ return do_id_store(drv, buf, count, ID_ADD);
+}
+static DRIVER_ATTR_WO(new_id);
+
+static ssize_t remove_id_store(struct device_driver *drv, const char *buf,
+ size_t count)
+{
+ return do_id_store(drv, buf, count, ID_REMOVE);
+}
+static DRIVER_ATTR_WO(remove_id);
+
+static struct attribute *dax_drv_attrs[] = {
+ &driver_attr_new_id.attr,
+ &driver_attr_remove_id.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(dax_drv);
+
+static int dax_bus_match(struct device *dev, struct device_driver *drv);
+
+static struct bus_type dax_bus_type = {
+ .name = "dax",
+ .uevent = dax_bus_uevent,
+ .match = dax_bus_match,
+ .drv_groups = dax_drv_groups,
+};
+
+static int dax_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct dax_device_driver *dax_drv = to_dax_drv(drv);
+
+ /*
+ * All but the 'device-dax' driver, which has 'match_always'
+ * set, requires an exact id match.
+ */
+ if (dax_drv->match_always)
+ return 1;
+
+ return dax_match_id(dax_drv, dev);
+}
+
+/*
+ * Rely on the fact that drvdata is set before the attributes are
+ * registered, and that the attributes are unregistered before drvdata
+ * is cleared to assume that drvdata is always valid.
+ */
+static ssize_t id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dax_region *dax_region = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", dax_region->id);
+}
+static DEVICE_ATTR_RO(id);
+
+static ssize_t region_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dax_region *dax_region = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%llu\n", (unsigned long long)
+ resource_size(&dax_region->res));
+}
+static struct device_attribute dev_attr_region_size = __ATTR(size, 0444,
+ region_size_show, NULL);
+
+static ssize_t align_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dax_region *dax_region = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", dax_region->align);
+}
+static DEVICE_ATTR_RO(align);
+
+static struct attribute *dax_region_attributes[] = {
+ &dev_attr_region_size.attr,
+ &dev_attr_align.attr,
+ &dev_attr_id.attr,
+ NULL,
+};
+
+static const struct attribute_group dax_region_attribute_group = {
+ .name = "dax_region",
+ .attrs = dax_region_attributes,
+};
+
+static const struct attribute_group *dax_region_attribute_groups[] = {
+ &dax_region_attribute_group,
+ NULL,
+};
+
+static void dax_region_free(struct kref *kref)
+{
+ struct dax_region *dax_region;
+
+ dax_region = container_of(kref, struct dax_region, kref);
+ kfree(dax_region);
+}
+
+void dax_region_put(struct dax_region *dax_region)
+{
+ kref_put(&dax_region->kref, dax_region_free);
+}
+EXPORT_SYMBOL_GPL(dax_region_put);
+
+static void dax_region_unregister(void *region)
+{
+ struct dax_region *dax_region = region;
+
+ sysfs_remove_groups(&dax_region->dev->kobj,
+ dax_region_attribute_groups);
+ dax_region_put(dax_region);
+}
+
+struct dax_region *alloc_dax_region(struct device *parent, int region_id,
+ struct resource *res, int target_node, unsigned int align,
+ unsigned long pfn_flags)
+{
+ struct dax_region *dax_region;
+
+ /*
+ * The DAX core assumes that it can store its private data in
+ * parent->driver_data. This WARN is a reminder / safeguard for
+ * developers of device-dax drivers.
+ */
+ if (dev_get_drvdata(parent)) {
+ dev_WARN(parent, "dax core failed to setup private data\n");
+ return NULL;
+ }
+
+ if (!IS_ALIGNED(res->start, align)
+ || !IS_ALIGNED(resource_size(res), align))
+ return NULL;
+
+ dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL);
+ if (!dax_region)
+ return NULL;
+
+ dev_set_drvdata(parent, dax_region);
+ memcpy(&dax_region->res, res, sizeof(*res));
+ dax_region->pfn_flags = pfn_flags;
+ kref_init(&dax_region->kref);
+ dax_region->id = region_id;
+ dax_region->align = align;
+ dax_region->dev = parent;
+ dax_region->target_node = target_node;
+ if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) {
+ kfree(dax_region);
+ return NULL;
+ }
+
+ kref_get(&dax_region->kref);
+ if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region))
+ return NULL;
+ return dax_region;
+}
+EXPORT_SYMBOL_GPL(alloc_dax_region);
+
+static ssize_t size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_dax *dev_dax = to_dev_dax(dev);
+ unsigned long long size = resource_size(&dev_dax->region->res);
+
+ return sprintf(buf, "%llu\n", size);
+}
+static DEVICE_ATTR_RO(size);
+
+static int dev_dax_target_node(struct dev_dax *dev_dax)
+{
+ struct dax_region *dax_region = dev_dax->region;
+
+ return dax_region->target_node;
+}
+
+static ssize_t target_node_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_dax *dev_dax = to_dev_dax(dev);
+
+ return sprintf(buf, "%d\n", dev_dax_target_node(dev_dax));
+}
+static DEVICE_ATTR_RO(target_node);
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ /*
+ * We only ever expect to handle device-dax instances, i.e. the
+ * @type argument to MODULE_ALIAS_DAX_DEVICE() is always zero
+ */
+ return sprintf(buf, DAX_DEVICE_MODALIAS_FMT "\n", 0);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static umode_t dev_dax_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct dev_dax *dev_dax = to_dev_dax(dev);
+
+ if (a == &dev_attr_target_node.attr && dev_dax_target_node(dev_dax) < 0)
+ return 0;
+ return a->mode;
+}
+
+static struct attribute *dev_dax_attributes[] = {
+ &dev_attr_modalias.attr,
+ &dev_attr_size.attr,
+ &dev_attr_target_node.attr,
+ NULL,
+};
+
+static const struct attribute_group dev_dax_attribute_group = {
+ .attrs = dev_dax_attributes,
+ .is_visible = dev_dax_visible,
+};
+
+static const struct attribute_group *dax_attribute_groups[] = {
+ &dev_dax_attribute_group,
+ NULL,
+};
+
+void kill_dev_dax(struct dev_dax *dev_dax)
+{
+ struct dax_device *dax_dev = dev_dax->dax_dev;
+ struct inode *inode = dax_inode(dax_dev);
+
+ kill_dax(dax_dev);
+ unmap_mapping_range(inode->i_mapping, 0, 0, 1);
+}
+EXPORT_SYMBOL_GPL(kill_dev_dax);
+
+static void dev_dax_release(struct device *dev)
+{
+ struct dev_dax *dev_dax = to_dev_dax(dev);
+ struct dax_region *dax_region = dev_dax->region;
+ struct dax_device *dax_dev = dev_dax->dax_dev;
+
+ dax_region_put(dax_region);
+ put_dax(dax_dev);
+ kfree(dev_dax);
+}
+
+static void unregister_dev_dax(void *dev)
+{
+ struct dev_dax *dev_dax = to_dev_dax(dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ kill_dev_dax(dev_dax);
+ device_del(dev);
+ put_device(dev);
+}
+
+struct dev_dax *__devm_create_dev_dax(struct dax_region *dax_region, int id,
+ struct dev_pagemap *pgmap, enum dev_dax_subsys subsys)
+{
+ struct device *parent = dax_region->dev;
+ struct dax_device *dax_dev;
+ struct dev_dax *dev_dax;
+ struct inode *inode;
+ struct device *dev;
+ int rc = -ENOMEM;
+
+ if (id < 0)
+ return ERR_PTR(-EINVAL);
+
+ dev_dax = kzalloc(sizeof(*dev_dax), GFP_KERNEL);
+ if (!dev_dax)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(&dev_dax->pgmap, pgmap, sizeof(*pgmap));
+
+ /*
+ * No 'host' or dax_operations since there is no access to this
+ * device outside of mmap of the resulting character device.
+ */
+ dax_dev = alloc_dax(dev_dax, NULL, NULL);
+ if (!dax_dev)
+ goto err;
+
+ /* a device_dax instance is dead while the driver is not attached */
+ kill_dax(dax_dev);
+
+ /* from here on we're committed to teardown via dax_dev_release() */
+ dev = &dev_dax->dev;
+ device_initialize(dev);
+
+ dev_dax->dax_dev = dax_dev;
+ dev_dax->region = dax_region;
+ dev_dax->target_node = dax_region->target_node;
+ kref_get(&dax_region->kref);
+
+ inode = dax_inode(dax_dev);
+ dev->devt = inode->i_rdev;
+ if (subsys == DEV_DAX_BUS)
+ dev->bus = &dax_bus_type;
+ else
+ dev->class = dax_class;
+ dev->parent = parent;
+ dev->groups = dax_attribute_groups;
+ dev->release = dev_dax_release;
+ dev_set_name(dev, "dax%d.%d", dax_region->id, id);
+
+ rc = device_add(dev);
+ if (rc) {
+ kill_dev_dax(dev_dax);
+ put_device(dev);
+ return ERR_PTR(rc);
+ }
+
+ rc = devm_add_action_or_reset(dax_region->dev, unregister_dev_dax, dev);
+ if (rc)
+ return ERR_PTR(rc);
+
+ return dev_dax;
+
+ err:
+ kfree(dev_dax);
+
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(__devm_create_dev_dax);
+
+static int match_always_count;
+
+int __dax_driver_register(struct dax_device_driver *dax_drv,
+ struct module *module, const char *mod_name)
+{
+ struct device_driver *drv = &dax_drv->drv;
+ int rc = 0;
+
+ INIT_LIST_HEAD(&dax_drv->ids);
+ drv->owner = module;
+ drv->name = mod_name;
+ drv->mod_name = mod_name;
+ drv->bus = &dax_bus_type;
+
+ /* there can only be one default driver */
+ mutex_lock(&dax_bus_lock);
+ match_always_count += dax_drv->match_always;
+ if (match_always_count > 1) {
+ match_always_count--;
+ WARN_ON(1);
+ rc = -EINVAL;
+ }
+ mutex_unlock(&dax_bus_lock);
+ if (rc)
+ return rc;
+ return driver_register(drv);
+}
+EXPORT_SYMBOL_GPL(__dax_driver_register);
+
+void dax_driver_unregister(struct dax_device_driver *dax_drv)
+{
+ struct device_driver *drv = &dax_drv->drv;
+ struct dax_id *dax_id, *_id;
+
+ mutex_lock(&dax_bus_lock);
+ match_always_count -= dax_drv->match_always;
+ list_for_each_entry_safe(dax_id, _id, &dax_drv->ids, list) {
+ list_del(&dax_id->list);
+ kfree(dax_id);
+ }
+ mutex_unlock(&dax_bus_lock);
+ driver_unregister(drv);
+}
+EXPORT_SYMBOL_GPL(dax_driver_unregister);
+
+int __init dax_bus_init(void)
+{
+ int rc;
+
+ if (IS_ENABLED(CONFIG_DEV_DAX_PMEM_COMPAT)) {
+ dax_class = class_create(THIS_MODULE, "dax");
+ if (IS_ERR(dax_class))
+ return PTR_ERR(dax_class);
+ }
+
+ rc = bus_register(&dax_bus_type);
+ if (rc)
+ class_destroy(dax_class);
+ return rc;
+}
+
+void __exit dax_bus_exit(void)
+{
+ bus_unregister(&dax_bus_type);
+ class_destroy(dax_class);
+}
diff --git a/drivers/dax/bus.h b/drivers/dax/bus.h
new file mode 100644
index 000000000000..8619e3299943
--- /dev/null
+++ b/drivers/dax/bus.h
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */
+#ifndef __DAX_BUS_H__
+#define __DAX_BUS_H__
+#include <linux/device.h>
+
+struct dev_dax;
+struct resource;
+struct dax_device;
+struct dax_region;
+void dax_region_put(struct dax_region *dax_region);
+struct dax_region *alloc_dax_region(struct device *parent, int region_id,
+ struct resource *res, int target_node, unsigned int align,
+ unsigned long flags);
+
+enum dev_dax_subsys {
+ DEV_DAX_BUS,
+ DEV_DAX_CLASS,
+};
+
+struct dev_dax *__devm_create_dev_dax(struct dax_region *dax_region, int id,
+ struct dev_pagemap *pgmap, enum dev_dax_subsys subsys);
+
+static inline struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
+ int id, struct dev_pagemap *pgmap)
+{
+ return __devm_create_dev_dax(dax_region, id, pgmap, DEV_DAX_BUS);
+}
+
+/* to be deleted when DEV_DAX_CLASS is removed */
+struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys);
+
+struct dax_device_driver {
+ struct device_driver drv;
+ struct list_head ids;
+ int match_always;
+};
+
+int __dax_driver_register(struct dax_device_driver *dax_drv,
+ struct module *module, const char *mod_name);
+#define dax_driver_register(driver) \
+ __dax_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
+void dax_driver_unregister(struct dax_device_driver *dax_drv);
+void kill_dev_dax(struct dev_dax *dev_dax);
+
+#if IS_ENABLED(CONFIG_DEV_DAX_PMEM_COMPAT)
+int dev_dax_probe(struct device *dev);
+#endif
+
+/*
+ * While run_dax() is potentially a generic operation that could be
+ * defined in include/linux/dax.h we don't want to grow any users
+ * outside of drivers/dax/
+ */
+void run_dax(struct dax_device *dax_dev);
+
+#define MODULE_ALIAS_DAX_DEVICE(type) \
+ MODULE_ALIAS("dax:t" __stringify(type) "*")
+#define DAX_DEVICE_MODALIAS_FMT "dax:t%d"
+
+#endif /* __DAX_BUS_H__ */
diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h
index b6fc4f04636d..a45612148ca0 100644
--- a/drivers/dax/dax-private.h
+++ b/drivers/dax/dax-private.h
@@ -16,10 +16,17 @@
#include <linux/device.h>
#include <linux/cdev.h>
+/* private routines between core files */
+struct dax_device;
+struct dax_device *inode_dax(struct inode *inode);
+struct inode *dax_inode(struct dax_device *dax_dev);
+int dax_bus_init(void);
+void dax_bus_exit(void);
+
/**
* struct dax_region - mapping infrastructure for dax devices
* @id: kernel-wide unique region for a memory range
- * @base: linear address corresponding to @res
+ * @target_node: effective numa node if this memory range is onlined
* @kref: to pin while other agents have a need to do lookups
* @dev: parent device backing this region
* @align: allocation and mapping alignment for child dax devices
@@ -28,8 +35,7 @@
*/
struct dax_region {
int id;
- struct ida ida;
- void *base;
+ int target_node;
struct kref kref;
struct device *dev;
unsigned int align;
@@ -38,20 +44,28 @@ struct dax_region {
};
/**
- * struct dev_dax - instance data for a subdivision of a dax region
+ * struct dev_dax - instance data for a subdivision of a dax region, and
+ * data while the device is activated in the driver.
* @region - parent region
* @dax_dev - core dax functionality
+ * @target_node: effective numa node if dev_dax memory range is onlined
* @dev - device core
- * @id - child id in the region
- * @num_resources - number of physical address extents in this device
- * @res - array of physical address ranges
+ * @pgmap - pgmap for memmap setup / lifetime (driver owned)
+ * @ref: pgmap reference count (driver owned)
+ * @cmp: @ref final put completion (driver owned)
*/
struct dev_dax {
struct dax_region *region;
struct dax_device *dax_dev;
+ int target_node;
struct device dev;
- int id;
- int num_resources;
- struct resource res[0];
+ struct dev_pagemap pgmap;
+ struct percpu_ref ref;
+ struct completion cmp;
};
+
+static inline struct dev_dax *to_dev_dax(struct device *dev)
+{
+ return container_of(dev, struct dev_dax, dev);
+}
#endif
diff --git a/drivers/dax/dax.h b/drivers/dax/dax.h
deleted file mode 100644
index f9e5feea742c..000000000000
--- a/drivers/dax/dax.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright(c) 2016 - 2017 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-#ifndef __DAX_H__
-#define __DAX_H__
-struct dax_device;
-struct dax_device *inode_dax(struct inode *inode);
-struct inode *dax_inode(struct dax_device *dax_dev);
-#endif /* __DAX_H__ */
diff --git a/drivers/dax/device-dax.h b/drivers/dax/device-dax.h
deleted file mode 100644
index 688b051750bd..000000000000
--- a/drivers/dax/device-dax.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-#ifndef __DEVICE_DAX_H__
-#define __DEVICE_DAX_H__
-struct device;
-struct dev_dax;
-struct resource;
-struct dax_region;
-void dax_region_put(struct dax_region *dax_region);
-struct dax_region *alloc_dax_region(struct device *parent,
- int region_id, struct resource *res, unsigned int align,
- void *addr, unsigned long flags);
-struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
- int id, struct resource *res, int count);
-#endif /* __DEVICE_DAX_H__ */
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 948806e57cee..e428468ab661 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -1,15 +1,6 @@
-/*
- * Copyright(c) 2016 - 2017 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2016-2018 Intel Corporation. All rights reserved. */
+#include <linux/memremap.h>
#include <linux/pagemap.h>
#include <linux/module.h>
#include <linux/device.h>
@@ -21,161 +12,39 @@
#include <linux/mm.h>
#include <linux/mman.h>
#include "dax-private.h"
-#include "dax.h"
+#include "bus.h"
-static struct class *dax_class;
-
-/*
- * Rely on the fact that drvdata is set before the attributes are
- * registered, and that the attributes are unregistered before drvdata
- * is cleared to assume that drvdata is always valid.
- */
-static ssize_t id_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct dax_region *dax_region = dev_get_drvdata(dev);
-
- return sprintf(buf, "%d\n", dax_region->id);
-}
-static DEVICE_ATTR_RO(id);
-
-static ssize_t region_size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct dax_region *dax_region = dev_get_drvdata(dev);
-
- return sprintf(buf, "%llu\n", (unsigned long long)
- resource_size(&dax_region->res));
-}
-static struct device_attribute dev_attr_region_size = __ATTR(size, 0444,
- region_size_show, NULL);
-
-static ssize_t align_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct dax_region *dax_region = dev_get_drvdata(dev);
-
- return sprintf(buf, "%u\n", dax_region->align);
-}
-static DEVICE_ATTR_RO(align);
-
-static struct attribute *dax_region_attributes[] = {
- &dev_attr_region_size.attr,
- &dev_attr_align.attr,
- &dev_attr_id.attr,
- NULL,
-};
-
-static const struct attribute_group dax_region_attribute_group = {
- .name = "dax_region",
- .attrs = dax_region_attributes,
-};
-
-static const struct attribute_group *dax_region_attribute_groups[] = {
- &dax_region_attribute_group,
- NULL,
-};
-
-static void dax_region_free(struct kref *kref)
-{
- struct dax_region *dax_region;
-
- dax_region = container_of(kref, struct dax_region, kref);
- kfree(dax_region);
-}
-
-void dax_region_put(struct dax_region *dax_region)
+static struct dev_dax *ref_to_dev_dax(struct percpu_ref *ref)
{
- kref_put(&dax_region->kref, dax_region_free);
+ return container_of(ref, struct dev_dax, ref);
}
-EXPORT_SYMBOL_GPL(dax_region_put);
-static void dax_region_unregister(void *region)
+static void dev_dax_percpu_release(struct percpu_ref *ref)
{
- struct dax_region *dax_region = region;
+ struct dev_dax *dev_dax = ref_to_dev_dax(ref);
- sysfs_remove_groups(&dax_region->dev->kobj,
- dax_region_attribute_groups);
- dax_region_put(dax_region);
+ dev_dbg(&dev_dax->dev, "%s\n", __func__);
+ complete(&dev_dax->cmp);
}
-struct dax_region *alloc_dax_region(struct device *parent, int region_id,
- struct resource *res, unsigned int align, void *addr,
- unsigned long pfn_flags)
+static void dev_dax_percpu_exit(void *data)
{
- struct dax_region *dax_region;
-
- /*
- * The DAX core assumes that it can store its private data in
- * parent->driver_data. This WARN is a reminder / safeguard for
- * developers of device-dax drivers.
- */
- if (dev_get_drvdata(parent)) {
- dev_WARN(parent, "dax core failed to setup private data\n");
- return NULL;
- }
-
- if (!IS_ALIGNED(res->start, align)
- || !IS_ALIGNED(resource_size(res), align))
- return NULL;
-
- dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL);
- if (!dax_region)
- return NULL;
-
- dev_set_drvdata(parent, dax_region);
- memcpy(&dax_region->res, res, sizeof(*res));
- dax_region->pfn_flags = pfn_flags;
- kref_init(&dax_region->kref);
- dax_region->id = region_id;
- ida_init(&dax_region->ida);
- dax_region->align = align;
- dax_region->dev = parent;
- dax_region->base = addr;
- if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) {
- kfree(dax_region);
- return NULL;
- }
+ struct percpu_ref *ref = data;
+ struct dev_dax *dev_dax = ref_to_dev_dax(ref);
- kref_get(&dax_region->kref);
- if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region))
- return NULL;
- return dax_region;
+ dev_dbg(&dev_dax->dev, "%s\n", __func__);
+ wait_for_completion(&dev_dax->cmp);
+ percpu_ref_exit(ref);
}
-EXPORT_SYMBOL_GPL(alloc_dax_region);
-static struct dev_dax *to_dev_dax(struct device *dev)
+static void dev_dax_percpu_kill(struct percpu_ref *data)
{
- return container_of(dev, struct dev_dax, dev);
-}
-
-static ssize_t size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct dev_dax *dev_dax = to_dev_dax(dev);
- unsigned long long size = 0;
- int i;
+ struct percpu_ref *ref = data;
+ struct dev_dax *dev_dax = ref_to_dev_dax(ref);
- for (i = 0; i < dev_dax->num_resources; i++)
- size += resource_size(&dev_dax->res[i]);
-
- return sprintf(buf, "%llu\n", size);
+ dev_dbg(&dev_dax->dev, "%s\n", __func__);
+ percpu_ref_kill(ref);
}
-static DEVICE_ATTR_RO(size);
-
-static struct attribute *dev_dax_attributes[] = {
- &dev_attr_size.attr,
- NULL,
-};
-
-static const struct attribute_group dev_dax_attribute_group = {
- .attrs = dev_dax_attributes,
-};
-
-static const struct attribute_group *dax_attribute_groups[] = {
- &dev_dax_attribute_group,
- NULL,
-};
static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
const char *func)
@@ -226,21 +95,11 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
__weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
unsigned long size)
{
- struct resource *res;
- /* gcc-4.6.3-nolibc for i386 complains that this is uninitialized */
- phys_addr_t uninitialized_var(phys);
- int i;
-
- for (i = 0; i < dev_dax->num_resources; i++) {
- res = &dev_dax->res[i];
- phys = pgoff * PAGE_SIZE + res->start;
- if (phys >= res->start && phys <= res->end)
- break;
- pgoff -= PHYS_PFN(resource_size(res));
- }
+ struct resource *res = &dev_dax->region->res;
+ phys_addr_t phys;
- if (i < dev_dax->num_resources) {
- res = &dev_dax->res[i];
+ phys = pgoff * PAGE_SIZE + res->start;
+ if (phys >= res->start && phys <= res->end) {
if (phys + size - 1 <= res->end)
return phys;
}
@@ -576,152 +435,100 @@ static const struct file_operations dax_fops = {
.mmap_supported_flags = MAP_SYNC,
};
-static void dev_dax_release(struct device *dev)
+static void dev_dax_cdev_del(void *cdev)
{
- struct dev_dax *dev_dax = to_dev_dax(dev);
- struct dax_region *dax_region = dev_dax->region;
- struct dax_device *dax_dev = dev_dax->dax_dev;
-
- if (dev_dax->id >= 0)
- ida_simple_remove(&dax_region->ida, dev_dax->id);
- dax_region_put(dax_region);
- put_dax(dax_dev);
- kfree(dev_dax);
+ cdev_del(cdev);
}
-static void kill_dev_dax(struct dev_dax *dev_dax)
+static void dev_dax_kill(void *dev_dax)
{
- struct dax_device *dax_dev = dev_dax->dax_dev;
- struct inode *inode = dax_inode(dax_dev);
-
- kill_dax(dax_dev);
- unmap_mapping_range(inode->i_mapping, 0, 0, 1);
+ kill_dev_dax(dev_dax);
}
-static void unregister_dev_dax(void *dev)
+int dev_dax_probe(struct device *dev)
{
struct dev_dax *dev_dax = to_dev_dax(dev);
struct dax_device *dax_dev = dev_dax->dax_dev;
- struct inode *inode = dax_inode(dax_dev);
- struct cdev *cdev = inode->i_cdev;
-
- dev_dbg(dev, "trace\n");
-
- kill_dev_dax(dev_dax);
- cdev_device_del(cdev, dev);
- put_device(dev);
-}
-
-struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
- int id, struct resource *res, int count)
-{
- struct device *parent = dax_region->dev;
- struct dax_device *dax_dev;
- struct dev_dax *dev_dax;
+ struct resource *res = &dev_dax->region->res;
struct inode *inode;
- struct device *dev;
struct cdev *cdev;
- int rc, i;
-
- if (!count)
- return ERR_PTR(-EINVAL);
-
- dev_dax = kzalloc(struct_size(dev_dax, res, count), GFP_KERNEL);
- if (!dev_dax)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < count; i++) {
- if (!IS_ALIGNED(res[i].start, dax_region->align)
- || !IS_ALIGNED(resource_size(&res[i]),
- dax_region->align)) {
- rc = -EINVAL;
- break;
- }
- dev_dax->res[i].start = res[i].start;
- dev_dax->res[i].end = res[i].end;
+ void *addr;
+ int rc;
+
+ /* 1:1 map region resource range to device-dax instance range */
+ if (!devm_request_mem_region(dev, res->start, resource_size(res),
+ dev_name(dev))) {
+ dev_warn(dev, "could not reserve region %pR\n", res);
+ return -EBUSY;
}
- if (i < count)
- goto err_id;
+ init_completion(&dev_dax->cmp);
+ rc = percpu_ref_init(&dev_dax->ref, dev_dax_percpu_release, 0,
+ GFP_KERNEL);
+ if (rc)
+ return rc;
- if (id < 0) {
- id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
- dev_dax->id = id;
- if (id < 0) {
- rc = id;
- goto err_id;
- }
- } else {
- /* region provider owns @id lifetime */
- dev_dax->id = -1;
- }
+ rc = devm_add_action_or_reset(dev, dev_dax_percpu_exit, &dev_dax->ref);
+ if (rc)
+ return rc;
- /*
- * No 'host' or dax_operations since there is no access to this
- * device outside of mmap of the resulting character device.
- */
- dax_dev = alloc_dax(dev_dax, NULL, NULL);
- if (!dax_dev) {
- rc = -ENOMEM;
- goto err_dax;
+ dev_dax->pgmap.ref = &dev_dax->ref;
+ dev_dax->pgmap.kill = dev_dax_percpu_kill;
+ addr = devm_memremap_pages(dev, &dev_dax->pgmap);
+ if (IS_ERR(addr)) {
+ devm_remove_action(dev, dev_dax_percpu_exit, &dev_dax->ref);
+ percpu_ref_exit(&dev_dax->ref);
+ return PTR_ERR(addr);
}
- /* from here on we're committed to teardown via dax_dev_release() */
- dev = &dev_dax->dev;
- device_initialize(dev);
-
inode = dax_inode(dax_dev);
cdev = inode->i_cdev;
cdev_init(cdev, &dax_fops);
- cdev->owner = parent->driver->owner;
-
- dev_dax->num_resources = count;
- dev_dax->dax_dev = dax_dev;
- dev_dax->region = dax_region;
- kref_get(&dax_region->kref);
-
- dev->devt = inode->i_rdev;
- dev->class = dax_class;
- dev->parent = parent;
- dev->groups = dax_attribute_groups;
- dev->release = dev_dax_release;
- dev_set_name(dev, "dax%d.%d", dax_region->id, id);
-
- rc = cdev_device_add(cdev, dev);
- if (rc) {
- kill_dev_dax(dev_dax);
- put_device(dev);
- return ERR_PTR(rc);
- }
-
- rc = devm_add_action_or_reset(dax_region->dev, unregister_dev_dax, dev);
+ if (dev->class) {
+ /* for the CONFIG_DEV_DAX_PMEM_COMPAT case */
+ cdev->owner = dev->parent->driver->owner;
+ } else
+ cdev->owner = dev->driver->owner;
+ cdev_set_parent(cdev, &dev->kobj);
+ rc = cdev_add(cdev, dev->devt, 1);
if (rc)
- return ERR_PTR(rc);
+ return rc;
- return dev_dax;
+ rc = devm_add_action_or_reset(dev, dev_dax_cdev_del, cdev);
+ if (rc)
+ return rc;
- err_dax:
- if (dev_dax->id >= 0)
- ida_simple_remove(&dax_region->ida, dev_dax->id);
- err_id:
- kfree(dev_dax);
+ run_dax(dax_dev);
+ return devm_add_action_or_reset(dev, dev_dax_kill, dev_dax);
+}
+EXPORT_SYMBOL_GPL(dev_dax_probe);
- return ERR_PTR(rc);
+static int dev_dax_remove(struct device *dev)
+{
+ /* all probe actions are unwound by devm */
+ return 0;
}
-EXPORT_SYMBOL_GPL(devm_create_dev_dax);
+
+static struct dax_device_driver device_dax_driver = {
+ .drv = {
+ .probe = dev_dax_probe,
+ .remove = dev_dax_remove,
+ },
+ .match_always = 1,
+};
static int __init dax_init(void)
{
- dax_class = class_create(THIS_MODULE, "dax");
- return PTR_ERR_OR_ZERO(dax_class);
+ return dax_driver_register(&device_dax_driver);
}
static void __exit dax_exit(void)
{
- class_destroy(dax_class);
+ dax_driver_unregister(&device_dax_driver);
}
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL v2");
-subsys_initcall(dax_init);
+module_init(dax_init);
module_exit(dax_exit);
+MODULE_ALIAS_DAX_DEVICE(0);
diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
new file mode 100644
index 000000000000..a02318c6d28a
--- /dev/null
+++ b/drivers/dax/kmem.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2016-2019 Intel Corporation. All rights reserved. */
+#include <linux/memremap.h>
+#include <linux/pagemap.h>
+#include <linux/memory.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/pfn_t.h>
+#include <linux/slab.h>
+#include <linux/dax.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include "dax-private.h"
+#include "bus.h"
+
+int dev_dax_kmem_probe(struct device *dev)
+{
+ struct dev_dax *dev_dax = to_dev_dax(dev);
+ struct resource *res = &dev_dax->region->res;
+ resource_size_t kmem_start;
+ resource_size_t kmem_size;
+ resource_size_t kmem_end;
+ struct resource *new_res;
+ int numa_node;
+ int rc;
+
+ /*
+ * Ensure good NUMA information for the persistent memory.
+ * Without this check, there is a risk that slow memory
+ * could be mixed in a node with faster memory, causing
+ * unavoidable performance issues.
+ */
+ numa_node = dev_dax->target_node;
+ if (numa_node < 0) {
+ dev_warn(dev, "rejecting DAX region %pR with invalid node: %d\n",
+ res, numa_node);
+ return -EINVAL;
+ }
+
+ /* Hotplug starting at the beginning of the next block: */
+ kmem_start = ALIGN(res->start, memory_block_size_bytes());
+
+ kmem_size = resource_size(res);
+ /* Adjust the size down to compensate for moving up kmem_start: */
+ kmem_size -= kmem_start - res->start;
+ /* Align the size down to cover only complete blocks: */
+ kmem_size &= ~(memory_block_size_bytes() - 1);
+ kmem_end = kmem_start + kmem_size;
+
+ /* Region is permanently reserved. Hot-remove not yet implemented. */
+ new_res = request_mem_region(kmem_start, kmem_size, dev_name(dev));
+ if (!new_res) {
+ dev_warn(dev, "could not reserve region [%pa-%pa]\n",
+ &kmem_start, &kmem_end);
+ return -EBUSY;
+ }
+
+ /*
+ * Set flags appropriate for System RAM. Leave ..._BUSY clear
+ * so that add_memory() can add a child resource. Do not
+ * inherit flags from the parent since it may set new flags
+ * unknown to us that will break add_memory() below.
+ */
+ new_res->flags = IORESOURCE_SYSTEM_RAM;
+ new_res->name = dev_name(dev);
+
+ rc = add_memory(numa_node, new_res->start, resource_size(new_res));
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int dev_dax_kmem_remove(struct device *dev)
+{
+ /*
+ * Purposely leak the request_mem_region() for the device-dax
+ * range and return '0' to ->remove() attempts. The removal of
+ * the device from the driver always succeeds, but the region
+ * is permanently pinned as reserved by the unreleased
+ * request_mem_region().
+ */
+ return 0;
+}
+
+static struct dax_device_driver device_dax_kmem_driver = {
+ .drv = {
+ .probe = dev_dax_kmem_probe,
+ .remove = dev_dax_kmem_remove,
+ },
+};
+
+static int __init dax_kmem_init(void)
+{
+ return dax_driver_register(&device_dax_kmem_driver);
+}
+
+static void __exit dax_kmem_exit(void)
+{
+ dax_driver_unregister(&device_dax_kmem_driver);
+}
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+module_init(dax_kmem_init);
+module_exit(dax_kmem_exit);
+MODULE_ALIAS_DAX_DEVICE(0);
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
deleted file mode 100644
index 2c1f459c0c63..000000000000
--- a/drivers/dax/pmem.c
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-#include <linux/percpu-refcount.h>
-#include <linux/memremap.h>
-#include <linux/module.h>
-#include <linux/pfn_t.h>
-#include "../nvdimm/pfn.h"
-#include "../nvdimm/nd.h"
-#include "device-dax.h"
-
-struct dax_pmem {
- struct device *dev;
- struct percpu_ref ref;
- struct dev_pagemap pgmap;
- struct completion cmp;
-};
-
-static struct dax_pmem *to_dax_pmem(struct percpu_ref *ref)
-{
- return container_of(ref, struct dax_pmem, ref);
-}
-
-static void dax_pmem_percpu_release(struct percpu_ref *ref)
-{
- struct dax_pmem *dax_pmem = to_dax_pmem(ref);
-
- dev_dbg(dax_pmem->dev, "trace\n");
- complete(&dax_pmem->cmp);
-}
-
-static void dax_pmem_percpu_exit(void *data)
-{
- struct percpu_ref *ref = data;
- struct dax_pmem *dax_pmem = to_dax_pmem(ref);
-
- dev_dbg(dax_pmem->dev, "trace\n");
- wait_for_completion(&dax_pmem->cmp);
- percpu_ref_exit(ref);
-}
-
-static void dax_pmem_percpu_kill(struct percpu_ref *ref)
-{
- struct dax_pmem *dax_pmem = to_dax_pmem(ref);
-
- dev_dbg(dax_pmem->dev, "trace\n");
- percpu_ref_kill(ref);
-}
-
-static int dax_pmem_probe(struct device *dev)
-{
- void *addr;
- struct resource res;
- int rc, id, region_id;
- struct nd_pfn_sb *pfn_sb;
- struct dev_dax *dev_dax;
- struct dax_pmem *dax_pmem;
- struct nd_namespace_io *nsio;
- struct dax_region *dax_region;
- struct nd_namespace_common *ndns;
- struct nd_dax *nd_dax = to_nd_dax(dev);
- struct nd_pfn *nd_pfn = &nd_dax->nd_pfn;
-
- ndns = nvdimm_namespace_common_probe(dev);
- if (IS_ERR(ndns))
- return PTR_ERR(ndns);
- nsio = to_nd_namespace_io(&ndns->dev);
-
- dax_pmem = devm_kzalloc(dev, sizeof(*dax_pmem), GFP_KERNEL);
- if (!dax_pmem)
- return -ENOMEM;
-
- /* parse the 'pfn' info block via ->rw_bytes */
- rc = devm_nsio_enable(dev, nsio);
- if (rc)
- return rc;
- rc = nvdimm_setup_pfn(nd_pfn, &dax_pmem->pgmap);
- if (rc)
- return rc;
- devm_nsio_disable(dev, nsio);
-
- pfn_sb = nd_pfn->pfn_sb;
-
- if (!devm_request_mem_region(dev, nsio->res.start,
- resource_size(&nsio->res),
- dev_name(&ndns->dev))) {
- dev_warn(dev, "could not reserve region %pR\n", &nsio->res);
- return -EBUSY;
- }
-
- dax_pmem->dev = dev;
- init_completion(&dax_pmem->cmp);
- rc = percpu_ref_init(&dax_pmem->ref, dax_pmem_percpu_release, 0,
- GFP_KERNEL);
- if (rc)
- return rc;
-
- rc = devm_add_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref);
- if (rc) {
- percpu_ref_exit(&dax_pmem->ref);
- return rc;
- }
-
- dax_pmem->pgmap.ref = &dax_pmem->ref;
- dax_pmem->pgmap.kill = dax_pmem_percpu_kill;
- addr = devm_memremap_pages(dev, &dax_pmem->pgmap);
- if (IS_ERR(addr))
- return PTR_ERR(addr);
-
- /* adjust the dax_region resource to the start of data */
- memcpy(&res, &dax_pmem->pgmap.res, sizeof(res));
- res.start += le64_to_cpu(pfn_sb->dataoff);
-
- rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", &region_id, &id);
- if (rc != 2)
- return -EINVAL;
-
- dax_region = alloc_dax_region(dev, region_id, &res,
- le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP);
- if (!dax_region)
- return -ENOMEM;
-
- /* TODO: support for subdividing a dax region... */
- dev_dax = devm_create_dev_dax(dax_region, id, &res, 1);
-
- /* child dev_dax instances now own the lifetime of the dax_region */
- dax_region_put(dax_region);
-
- return PTR_ERR_OR_ZERO(dev_dax);
-}
-
-static struct nd_device_driver dax_pmem_driver = {
- .probe = dax_pmem_probe,
- .drv = {
- .name = "dax_pmem",
- },
- .type = ND_DRIVER_DAX_PMEM,
-};
-
-module_nd_driver(dax_pmem_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Intel Corporation");
-MODULE_ALIAS_ND_DEVICE(ND_DEVICE_DAX_PMEM);
diff --git a/drivers/dax/pmem/Makefile b/drivers/dax/pmem/Makefile
new file mode 100644
index 000000000000..e2e79bd3fdcf
--- /dev/null
+++ b/drivers/dax/pmem/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem.o
+obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem_core.o
+obj-$(CONFIG_DEV_DAX_PMEM_COMPAT) += dax_pmem_compat.o
+
+dax_pmem-y := pmem.o
+dax_pmem_core-y := core.o
+dax_pmem_compat-y := compat.o
diff --git a/drivers/dax/pmem/compat.c b/drivers/dax/pmem/compat.c
new file mode 100644
index 000000000000..d7b15e6f30c5
--- /dev/null
+++ b/drivers/dax/pmem/compat.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */
+#include <linux/percpu-refcount.h>
+#include <linux/memremap.h>
+#include <linux/module.h>
+#include <linux/pfn_t.h>
+#include <linux/nd.h>
+#include "../bus.h"
+
+/* we need the private definitions to implement compat suport */
+#include "../dax-private.h"
+
+static int dax_pmem_compat_probe(struct device *dev)
+{
+ struct dev_dax *dev_dax = __dax_pmem_probe(dev, DEV_DAX_CLASS);
+ int rc;
+
+ if (IS_ERR(dev_dax))
+ return PTR_ERR(dev_dax);
+
+ if (!devres_open_group(&dev_dax->dev, dev_dax, GFP_KERNEL))
+ return -ENOMEM;
+
+ device_lock(&dev_dax->dev);
+ rc = dev_dax_probe(&dev_dax->dev);
+ device_unlock(&dev_dax->dev);
+
+ devres_close_group(&dev_dax->dev, dev_dax);
+ if (rc)
+ devres_release_group(&dev_dax->dev, dev_dax);
+
+ return rc;
+}
+
+static int dax_pmem_compat_release(struct device *dev, void *data)
+{
+ device_lock(dev);
+ devres_release_group(dev, to_dev_dax(dev));
+ device_unlock(dev);
+
+ return 0;
+}
+
+static int dax_pmem_compat_remove(struct device *dev)
+{
+ device_for_each_child(dev, NULL, dax_pmem_compat_release);
+ return 0;
+}
+
+static struct nd_device_driver dax_pmem_compat_driver = {
+ .probe = dax_pmem_compat_probe,
+ .remove = dax_pmem_compat_remove,
+ .drv = {
+ .name = "dax_pmem_compat",
+ },
+ .type = ND_DRIVER_DAX_PMEM,
+};
+
+static int __init dax_pmem_compat_init(void)
+{
+ return nd_driver_register(&dax_pmem_compat_driver);
+}
+module_init(dax_pmem_compat_init);
+
+static void __exit dax_pmem_compat_exit(void)
+{
+ driver_unregister(&dax_pmem_compat_driver.drv);
+}
+module_exit(dax_pmem_compat_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_ALIAS_ND_DEVICE(ND_DEVICE_DAX_PMEM);
diff --git a/drivers/dax/pmem/core.c b/drivers/dax/pmem/core.c
new file mode 100644
index 000000000000..f71019ce0647
--- /dev/null
+++ b/drivers/dax/pmem/core.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */
+#include <linux/memremap.h>
+#include <linux/module.h>
+#include <linux/pfn_t.h>
+#include "../../nvdimm/pfn.h"
+#include "../../nvdimm/nd.h"
+#include "../bus.h"
+
+struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
+{
+ struct resource res;
+ int rc, id, region_id;
+ resource_size_t offset;
+ struct nd_pfn_sb *pfn_sb;
+ struct dev_dax *dev_dax;
+ struct nd_namespace_io *nsio;
+ struct dax_region *dax_region;
+ struct dev_pagemap pgmap = { 0 };
+ struct nd_namespace_common *ndns;
+ struct nd_dax *nd_dax = to_nd_dax(dev);
+ struct nd_pfn *nd_pfn = &nd_dax->nd_pfn;
+ struct nd_region *nd_region = to_nd_region(dev->parent);
+
+ ndns = nvdimm_namespace_common_probe(dev);
+ if (IS_ERR(ndns))
+ return ERR_CAST(ndns);
+ nsio = to_nd_namespace_io(&ndns->dev);
+
+ /* parse the 'pfn' info block via ->rw_bytes */
+ rc = devm_nsio_enable(dev, nsio);
+ if (rc)
+ return ERR_PTR(rc);
+ rc = nvdimm_setup_pfn(nd_pfn, &pgmap);
+ if (rc)
+ return ERR_PTR(rc);
+ devm_nsio_disable(dev, nsio);
+
+ /* reserve the metadata area, device-dax will reserve the data */
+ pfn_sb = nd_pfn->pfn_sb;
+ offset = le64_to_cpu(pfn_sb->dataoff);
+ if (!devm_request_mem_region(dev, nsio->res.start, offset,
+ dev_name(&ndns->dev))) {
+ dev_warn(dev, "could not reserve metadata\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", &region_id, &id);
+ if (rc != 2)
+ return ERR_PTR(-EINVAL);
+
+ /* adjust the dax_region resource to the start of data */
+ memcpy(&res, &pgmap.res, sizeof(res));
+ res.start += offset;
+ dax_region = alloc_dax_region(dev, region_id, &res,
+ nd_region->target_node, le32_to_cpu(pfn_sb->align),
+ PFN_DEV|PFN_MAP);
+ if (!dax_region)
+ return ERR_PTR(-ENOMEM);
+
+ dev_dax = __devm_create_dev_dax(dax_region, id, &pgmap, subsys);
+
+ /* child dev_dax instances now own the lifetime of the dax_region */
+ dax_region_put(dax_region);
+
+ return dev_dax;
+}
+EXPORT_SYMBOL_GPL(__dax_pmem_probe);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/dax/pmem/pmem.c b/drivers/dax/pmem/pmem.c
new file mode 100644
index 000000000000..0ae4238a0ef8
--- /dev/null
+++ b/drivers/dax/pmem/pmem.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */
+#include <linux/percpu-refcount.h>
+#include <linux/memremap.h>
+#include <linux/module.h>
+#include <linux/pfn_t.h>
+#include <linux/nd.h>
+#include "../bus.h"
+
+static int dax_pmem_probe(struct device *dev)
+{
+ return PTR_ERR_OR_ZERO(__dax_pmem_probe(dev, DEV_DAX_BUS));
+}
+
+static struct nd_device_driver dax_pmem_driver = {
+ .probe = dax_pmem_probe,
+ .drv = {
+ .name = "dax_pmem",
+ },
+ .type = ND_DRIVER_DAX_PMEM,
+};
+
+static int __init dax_pmem_init(void)
+{
+ return nd_driver_register(&dax_pmem_driver);
+}
+module_init(dax_pmem_init);
+
+static void __exit dax_pmem_exit(void)
+{
+ driver_unregister(&dax_pmem_driver.drv);
+}
+module_exit(dax_pmem_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Intel Corporation");
+#if !IS_ENABLED(CONFIG_DEV_DAX_PMEM_COMPAT)
+/* For compat builds, don't load this module by default */
+MODULE_ALIAS_ND_DEVICE(ND_DEVICE_DAX_PMEM);
+#endif
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 6e928f37d084..0a339b85133e 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -22,6 +22,7 @@
#include <linux/uio.h>
#include <linux/dax.h>
#include <linux/fs.h>
+#include "dax-private.h"
static dev_t dax_devt;
DEFINE_STATIC_SRCU(dax_srcu);
@@ -86,12 +87,14 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
{
struct dax_device *dax_dev;
bool dax_enabled = false;
+ pgoff_t pgoff, pgoff_end;
struct request_queue *q;
- pgoff_t pgoff;
- int err, id;
- pfn_t pfn;
- long len;
char buf[BDEVNAME_SIZE];
+ void *kaddr, *end_kaddr;
+ pfn_t pfn, end_pfn;
+ sector_t last_page;
+ long len, len2;
+ int err, id;
if (blocksize != PAGE_SIZE) {
pr_debug("%s: error: unsupported blocksize for dax\n",
@@ -113,6 +116,14 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
return false;
}
+ last_page = PFN_DOWN(i_size_read(bdev->bd_inode) - 1) * 8;
+ err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end);
+ if (err) {
+ pr_debug("%s: error: unaligned partition for dax\n",
+ bdevname(bdev, buf));
+ return false;
+ }
+
dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
if (!dax_dev) {
pr_debug("%s: error: device does not support dax\n",
@@ -121,14 +132,15 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
}
id = dax_read_lock();
- len = dax_direct_access(dax_dev, pgoff, 1, NULL, &pfn);
+ len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
+ len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
dax_read_unlock(id);
put_dax(dax_dev);
- if (len < 1) {
+ if (len < 1 || len2 < 1) {
pr_debug("%s: error: dax access failed (%ld)\n",
- bdevname(bdev, buf), len);
+ bdevname(bdev, buf), len < 1 ? len : len2);
return false;
}
@@ -143,13 +155,20 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
*/
WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API));
dax_enabled = true;
- } else if (pfn_t_devmap(pfn)) {
- struct dev_pagemap *pgmap;
+ } else if (pfn_t_devmap(pfn) && pfn_t_devmap(end_pfn)) {
+ struct dev_pagemap *pgmap, *end_pgmap;
pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL);
- if (pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX)
+ end_pgmap = get_dev_pagemap(pfn_t_to_pfn(end_pfn), NULL);
+ if (pgmap && pgmap == end_pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX
+ && pfn_t_to_page(pfn)->pgmap == pgmap
+ && pfn_t_to_page(end_pfn)->pgmap == pgmap
+ && pfn_t_to_pfn(pfn) == PHYS_PFN(__pa(kaddr))
+ && pfn_t_to_pfn(end_pfn) == PHYS_PFN(__pa(end_kaddr)))
dax_enabled = true;
put_dev_pagemap(pgmap);
+ put_dev_pagemap(end_pgmap);
+
}
if (!dax_enabled) {
@@ -365,11 +384,15 @@ void kill_dax(struct dax_device *dax_dev)
spin_lock(&dax_host_lock);
hlist_del_init(&dax_dev->list);
spin_unlock(&dax_host_lock);
-
- dax_dev->private = NULL;
}
EXPORT_SYMBOL_GPL(kill_dax);
+void run_dax(struct dax_device *dax_dev)
+{
+ set_bit(DAXDEV_ALIVE, &dax_dev->flags);
+}
+EXPORT_SYMBOL_GPL(run_dax);
+
static struct inode *dax_alloc_inode(struct super_block *sb)
{
struct dax_device *dax_dev;
@@ -584,6 +607,8 @@ EXPORT_SYMBOL_GPL(dax_inode);
void *dax_get_private(struct dax_device *dax_dev)
{
+ if (!test_bit(DAXDEV_ALIVE, &dax_dev->flags))
+ return NULL;
return dax_dev->private;
}
EXPORT_SYMBOL_GPL(dax_get_private);
@@ -597,7 +622,7 @@ static void init_once(void *_dax_dev)
inode_init_once(inode);
}
-static int __dax_fs_init(void)
+static int dax_fs_init(void)
{
int rc;
@@ -629,35 +654,45 @@ static int __dax_fs_init(void)
return rc;
}
-static void __dax_fs_exit(void)
+static void dax_fs_exit(void)
{
kern_unmount(dax_mnt);
unregister_filesystem(&dax_fs_type);
kmem_cache_destroy(dax_cache);
}
-static int __init dax_fs_init(void)
+static int __init dax_core_init(void)
{
int rc;
- rc = __dax_fs_init();
+ rc = dax_fs_init();
if (rc)
return rc;
rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax");
if (rc)
- __dax_fs_exit();
- return rc;
+ goto err_chrdev;
+
+ rc = dax_bus_init();
+ if (rc)
+ goto err_bus;
+ return 0;
+
+err_bus:
+ unregister_chrdev_region(dax_devt, MINORMASK+1);
+err_chrdev:
+ dax_fs_exit();
+ return 0;
}
-static void __exit dax_fs_exit(void)
+static void __exit dax_core_exit(void)
{
unregister_chrdev_region(dax_devt, MINORMASK+1);
ida_destroy(&dax_minor_ida);
- __dax_fs_exit();
+ dax_fs_exit();
}
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL v2");
-subsys_initcall(dax_fs_init);
-module_exit(dax_fs_exit);
+subsys_initcall(dax_core_init);
+module_exit(dax_core_exit);
diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c
index d67242d87744..87e93406d7cd 100644
--- a/drivers/devfreq/devfreq-event.c
+++ b/drivers/devfreq/devfreq-event.c
@@ -240,7 +240,7 @@ struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(struct device *dev,
}
list_for_each_entry(edev, &devfreq_event_list, node) {
- if (!strcmp(edev->desc->name, node->name))
+ if (of_node_name_eq(node, edev->desc->name))
goto out;
}
edev = NULL;
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 0ae3de76833b..6b6991f0e873 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -29,6 +29,9 @@
#include <linux/of.h>
#include "governor.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/devfreq.h>
+
static struct class *devfreq_class;
/*
@@ -228,7 +231,7 @@ static struct devfreq_governor *find_devfreq_governor(const char *name)
* if is not found. This can happen when both drivers (the governor driver
* and the driver that call devfreq_add_device) are built as modules.
* devfreq_list_lock should be held by the caller. Returns the matched
- * governor's pointer.
+ * governor's pointer or an error pointer.
*/
static struct devfreq_governor *try_then_request_governor(const char *name)
{
@@ -254,7 +257,7 @@ static struct devfreq_governor *try_then_request_governor(const char *name)
/* Restore previous state before return */
mutex_lock(&devfreq_list_lock);
if (err)
- return NULL;
+ return ERR_PTR(err);
governor = find_devfreq_governor(name);
}
@@ -394,6 +397,8 @@ static void devfreq_monitor(struct work_struct *work)
queue_delayed_work(devfreq_wq, &devfreq->work,
msecs_to_jiffies(devfreq->profile->polling_ms));
mutex_unlock(&devfreq->lock);
+
+ trace_devfreq_monitor(devfreq);
}
/**
@@ -528,7 +533,7 @@ void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
mutex_lock(&devfreq->lock);
if (!devfreq->stop_polling)
queue_delayed_work(devfreq_wq, &devfreq->work,
- msecs_to_jiffies(devfreq->profile->polling_ms));
+ msecs_to_jiffies(devfreq->profile->polling_ms));
}
out:
mutex_unlock(&devfreq->lock);
@@ -537,7 +542,7 @@ EXPORT_SYMBOL(devfreq_interval_update);
/**
* devfreq_notifier_call() - Notify that the device frequency requirements
- * has been changed out of devfreq framework.
+ * has been changed out of devfreq framework.
* @nb: the notifier_block (supposed to be devfreq->nb)
* @type: not used
* @devp: not used
@@ -651,7 +656,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
mutex_unlock(&devfreq->lock);
err = set_freq_table(devfreq);
if (err < 0)
- goto err_out;
+ goto err_dev;
mutex_lock(&devfreq->lock);
}
@@ -683,16 +688,27 @@ struct devfreq *devfreq_add_device(struct device *dev,
goto err_out;
}
- devfreq->trans_table =
- devm_kzalloc(&devfreq->dev,
- array3_size(sizeof(unsigned int),
- devfreq->profile->max_state,
- devfreq->profile->max_state),
- GFP_KERNEL);
+ devfreq->trans_table = devm_kzalloc(&devfreq->dev,
+ array3_size(sizeof(unsigned int),
+ devfreq->profile->max_state,
+ devfreq->profile->max_state),
+ GFP_KERNEL);
+ if (!devfreq->trans_table) {
+ mutex_unlock(&devfreq->lock);
+ err = -ENOMEM;
+ goto err_devfreq;
+ }
+
devfreq->time_in_state = devm_kcalloc(&devfreq->dev,
- devfreq->profile->max_state,
- sizeof(unsigned long),
- GFP_KERNEL);
+ devfreq->profile->max_state,
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!devfreq->time_in_state) {
+ mutex_unlock(&devfreq->lock);
+ err = -ENOMEM;
+ goto err_devfreq;
+ }
+
devfreq->last_stat_updated = jiffies;
srcu_init_notifier_head(&devfreq->transition_notifier_list);
@@ -726,7 +742,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
err_init:
mutex_unlock(&devfreq_list_lock);
-
+err_devfreq:
devfreq_remove_device(devfreq);
devfreq = NULL;
err_dev:
@@ -1113,7 +1129,7 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
struct devfreq *df = to_devfreq(dev);
int ret;
char str_governor[DEVFREQ_NAME_LEN + 1];
- struct devfreq_governor *governor;
+ const struct devfreq_governor *governor, *prev_governor;
ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
if (ret != 1)
@@ -1142,12 +1158,24 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
goto out;
}
}
+ prev_governor = df->governor;
df->governor = governor;
strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
- if (ret)
+ if (ret) {
dev_warn(dev, "%s: Governor %s not started(%d)\n",
__func__, df->governor->name, ret);
+ df->governor = prev_governor;
+ strncpy(df->governor_name, prev_governor->name,
+ DEVFREQ_NAME_LEN);
+ ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
+ if (ret) {
+ dev_err(dev,
+ "%s: reverting to Governor %s failed (%d)\n",
+ __func__, df->governor_name, ret);
+ df->governor = NULL;
+ }
+ }
out:
mutex_unlock(&devfreq_list_lock);
@@ -1172,7 +1200,7 @@ static ssize_t available_governors_show(struct device *d,
*/
if (df->governor->immutable) {
count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
- "%s ", df->governor_name);
+ "%s ", df->governor_name);
/*
* The devfreq device shows the registered governor except for
* immutable governors such as passive governor .
@@ -1485,8 +1513,8 @@ EXPORT_SYMBOL(devfreq_recommended_opp);
/**
* devfreq_register_opp_notifier() - Helper function to get devfreq notified
- * for any changes in the OPP availability
- * changes
+ * for any changes in the OPP availability
+ * changes
* @dev: The devfreq user device. (parent of devfreq)
* @devfreq: The devfreq object.
*/
@@ -1498,8 +1526,8 @@ EXPORT_SYMBOL(devfreq_register_opp_notifier);
/**
* devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
- * notified for any changes in the OPP
- * availability changes anymore.
+ * notified for any changes in the OPP
+ * availability changes anymore.
* @dev: The devfreq user device. (parent of devfreq)
* @devfreq: The devfreq object.
*
@@ -1518,8 +1546,8 @@ static void devm_devfreq_opp_release(struct device *dev, void *res)
}
/**
- * devm_ devfreq_register_opp_notifier()
- * - Resource-managed devfreq_register_opp_notifier()
+ * devm_devfreq_register_opp_notifier() - Resource-managed
+ * devfreq_register_opp_notifier()
* @dev: The devfreq user device. (parent of devfreq)
* @devfreq: The devfreq object.
*/
@@ -1547,8 +1575,8 @@ int devm_devfreq_register_opp_notifier(struct device *dev,
EXPORT_SYMBOL(devm_devfreq_register_opp_notifier);
/**
- * devm_devfreq_unregister_opp_notifier()
- * - Resource-managed devfreq_unregister_opp_notifier()
+ * devm_devfreq_unregister_opp_notifier() - Resource-managed
+ * devfreq_unregister_opp_notifier()
* @dev: The devfreq user device. (parent of devfreq)
* @devfreq: The devfreq object.
*/
@@ -1567,8 +1595,8 @@ EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier);
* @list: DEVFREQ_TRANSITION_NOTIFIER.
*/
int devfreq_register_notifier(struct devfreq *devfreq,
- struct notifier_block *nb,
- unsigned int list)
+ struct notifier_block *nb,
+ unsigned int list)
{
int ret = 0;
@@ -1674,9 +1702,9 @@ EXPORT_SYMBOL(devm_devfreq_register_notifier);
* @list: DEVFREQ_TRANSITION_NOTIFIER.
*/
void devm_devfreq_unregister_notifier(struct device *dev,
- struct devfreq *devfreq,
- struct notifier_block *nb,
- unsigned int list)
+ struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list)
{
WARN_ON(devres_release(dev, devm_devfreq_notifier_release,
devm_devfreq_dev_match, devfreq));
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index c61de0bdf053..c2ea94957501 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -529,7 +529,7 @@ static int of_get_devfreq_events(struct device_node *np,
if (!ppmu_events[i].name)
continue;
- if (!of_node_cmp(node->name, ppmu_events[i].name))
+ if (of_node_name_eq(node, ppmu_events[i].name))
break;
}
diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
index 22b113363ffc..a436ec4901bb 100644
--- a/drivers/devfreq/event/rockchip-dfi.c
+++ b/drivers/devfreq/event/rockchip-dfi.c
@@ -26,6 +26,8 @@
#include <linux/list.h>
#include <linux/of.h>
+#include <soc/rockchip/rk3399_grf.h>
+
#define RK3399_DMC_NUM_CH 2
/* DDRMON_CTRL */
@@ -43,18 +45,6 @@
#define DDRMON_CH1_COUNT_NUM 0x3c
#define DDRMON_CH1_DFI_ACCESS_NUM 0x40
-/* pmu grf */
-#define PMUGRF_OS_REG2 0x308
-#define DDRTYPE_SHIFT 13
-#define DDRTYPE_MASK 7
-
-enum {
- DDR3 = 3,
- LPDDR3 = 6,
- LPDDR4 = 7,
- UNUSED = 0xFF
-};
-
struct dmc_usage {
u32 access;
u32 total;
@@ -83,16 +73,17 @@ static void rockchip_dfi_start_hardware_counter(struct devfreq_event_dev *edev)
u32 ddr_type;
/* get ddr type */
- regmap_read(info->regmap_pmu, PMUGRF_OS_REG2, &val);
- ddr_type = (val >> DDRTYPE_SHIFT) & DDRTYPE_MASK;
+ regmap_read(info->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val);
+ ddr_type = (val >> RK3399_PMUGRF_DDRTYPE_SHIFT) &
+ RK3399_PMUGRF_DDRTYPE_MASK;
/* clear DDRMON_CTRL setting */
writel_relaxed(CLR_DDRMON_CTRL, dfi_regs + DDRMON_CTRL);
/* set ddr type to dfi */
- if (ddr_type == LPDDR3)
+ if (ddr_type == RK3399_PMUGRF_DDRTYPE_LPDDR3)
writel_relaxed(LPDDR3_EN, dfi_regs + DDRMON_CTRL);
- else if (ddr_type == LPDDR4)
+ else if (ddr_type == RK3399_PMUGRF_DDRTYPE_LPDDR4)
writel_relaxed(LPDDR4_EN, dfi_regs + DDRMON_CTRL);
/* enable count, use software mode */
@@ -211,7 +202,7 @@ static int rockchip_dfi_probe(struct platform_device *pdev)
if (IS_ERR(data->clk)) {
dev_err(dev, "Cannot get the clk dmc_clk\n");
return PTR_ERR(data->clk);
- };
+ }
/* try to find the optional reference to the pmu syscon */
node = of_parse_phandle(np, "rockchip,pmu", 0);
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index c25658b26598..486cc5b422f1 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -514,6 +514,13 @@ err:
return ret;
}
+static void exynos_bus_shutdown(struct platform_device *pdev)
+{
+ struct exynos_bus *bus = dev_get_drvdata(&pdev->dev);
+
+ devfreq_suspend_device(bus->devfreq);
+}
+
#ifdef CONFIG_PM_SLEEP
static int exynos_bus_resume(struct device *dev)
{
@@ -556,6 +563,7 @@ MODULE_DEVICE_TABLE(of, exynos_bus_of_match);
static struct platform_driver exynos_bus_platdrv = {
.probe = exynos_bus_probe,
+ .shutdown = exynos_bus_shutdown,
.driver = {
.name = "exynos-bus",
.pm = &exynos_bus_pm,
diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
index e795ad2b3f6b..567c034d0301 100644
--- a/drivers/devfreq/rk3399_dmc.c
+++ b/drivers/devfreq/rk3399_dmc.c
@@ -18,14 +18,17 @@
#include <linux/devfreq.h>
#include <linux/devfreq-event.h>
#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
+#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/rwsem.h>
#include <linux/suspend.h>
+#include <soc/rockchip/rk3399_grf.h>
#include <soc/rockchip/rockchip_sip.h>
struct dram_timing {
@@ -69,8 +72,11 @@ struct rk3399_dmcfreq {
struct mutex lock;
struct dram_timing timing;
struct regulator *vdd_center;
+ struct regmap *regmap_pmu;
unsigned long rate, target_rate;
unsigned long volt, target_volt;
+ unsigned int odt_dis_freq;
+ int odt_pd_arg0, odt_pd_arg1;
};
static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
@@ -80,6 +86,8 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
struct dev_pm_opp *opp;
unsigned long old_clk_rate = dmcfreq->rate;
unsigned long target_volt, target_rate;
+ struct arm_smccc_res res;
+ bool odt_enable = false;
int err;
opp = devfreq_recommended_opp(dev, freq, flags);
@@ -95,6 +103,19 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
mutex_lock(&dmcfreq->lock);
+ if (target_rate >= dmcfreq->odt_dis_freq)
+ odt_enable = true;
+
+ /*
+ * This makes a SMC call to the TF-A to set the DDR PD (power-down)
+ * timings and to enable or disable the ODT (on-die termination)
+ * resistors.
+ */
+ arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, dmcfreq->odt_pd_arg0,
+ dmcfreq->odt_pd_arg1,
+ ROCKCHIP_SIP_CONFIG_DRAM_SET_ODT_PD,
+ odt_enable, 0, 0, 0, &res);
+
/*
* If frequency scaling from low to high, adjust voltage first.
* If frequency scaling from high to low, adjust frequency first.
@@ -294,11 +315,13 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
{
struct arm_smccc_res res;
struct device *dev = &pdev->dev;
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = pdev->dev.of_node, *node;
struct rk3399_dmcfreq *data;
int ret, index, size;
uint32_t *timing;
struct dev_pm_opp *opp;
+ u32 ddr_type;
+ u32 val;
data = devm_kzalloc(dev, sizeof(struct rk3399_dmcfreq), GFP_KERNEL);
if (!data)
@@ -322,7 +345,7 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
dev_err(dev, "Cannot get the clk dmc_clk\n");
return PTR_ERR(data->dmc_clk);
- };
+ }
data->edev = devfreq_event_get_edev_by_phandle(dev, 0);
if (IS_ERR(data->edev))
@@ -354,11 +377,57 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
}
}
+ node = of_parse_phandle(np, "rockchip,pmu", 0);
+ if (node) {
+ data->regmap_pmu = syscon_node_to_regmap(node);
+ if (IS_ERR(data->regmap_pmu))
+ return PTR_ERR(data->regmap_pmu);
+ }
+
+ regmap_read(data->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val);
+ ddr_type = (val >> RK3399_PMUGRF_DDRTYPE_SHIFT) &
+ RK3399_PMUGRF_DDRTYPE_MASK;
+
+ switch (ddr_type) {
+ case RK3399_PMUGRF_DDRTYPE_DDR3:
+ data->odt_dis_freq = data->timing.ddr3_odt_dis_freq;
+ break;
+ case RK3399_PMUGRF_DDRTYPE_LPDDR3:
+ data->odt_dis_freq = data->timing.lpddr3_odt_dis_freq;
+ break;
+ case RK3399_PMUGRF_DDRTYPE_LPDDR4:
+ data->odt_dis_freq = data->timing.lpddr4_odt_dis_freq;
+ break;
+ default:
+ return -EINVAL;
+ };
+
arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0,
ROCKCHIP_SIP_CONFIG_DRAM_INIT,
0, 0, 0, 0, &res);
/*
+ * In TF-A there is a platform SIP call to set the PD (power-down)
+ * timings and to enable or disable the ODT (on-die termination).
+ * This call needs three arguments as follows:
+ *
+ * arg0:
+ * bit[0-7] : sr_idle
+ * bit[8-15] : sr_mc_gate_idle
+ * bit[16-31] : standby idle
+ * arg1:
+ * bit[0-11] : pd_idle
+ * bit[16-27] : srpd_lite_idle
+ * arg2:
+ * bit[0] : odt enable
+ */
+ data->odt_pd_arg0 = (data->timing.sr_idle & 0xff) |
+ ((data->timing.sr_mc_gate_idle & 0xff) << 8) |
+ ((data->timing.standby_idle & 0xffff) << 16);
+ data->odt_pd_arg1 = (data->timing.pd_idle & 0xfff) |
+ ((data->timing.srpd_lite_idle & 0xfff) << 16);
+
+ /*
* We add a devfreq driver to our parent since it has a device tree node
* with operating points.
*/
diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
index c59d2eee5d30..c89ba7b834ff 100644
--- a/drivers/devfreq/tegra-devfreq.c
+++ b/drivers/devfreq/tegra-devfreq.c
@@ -573,10 +573,7 @@ static int tegra_governor_get_target(struct devfreq *devfreq,
static int tegra_governor_event_handler(struct devfreq *devfreq,
unsigned int event, void *data)
{
- struct tegra_devfreq *tegra;
- int ret = 0;
-
- tegra = dev_get_drvdata(devfreq->dev.parent);
+ struct tegra_devfreq *tegra = dev_get_drvdata(devfreq->dev.parent);
switch (event) {
case DEVFREQ_GOV_START:
@@ -600,7 +597,7 @@ static int tegra_governor_event_handler(struct devfreq *devfreq,
break;
}
- return ret;
+ return 0;
}
static struct devfreq_governor tegra_devfreq_governor = {
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index d2286c7f7222..0b1dfb5bf2d9 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -218,6 +218,20 @@ config FSL_EDMA
multiplexing capability for DMA request sources(slot).
This module can be found on Freescale Vybrid and LS-1 SoCs.
+config FSL_QDMA
+ tristate "NXP Layerscape qDMA engine support"
+ depends on ARM || ARM64
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ select DMA_ENGINE_RAID
+ select ASYNC_TX_ENABLE_CHANNEL_SWITCH
+ help
+ Support the NXP Layerscape qDMA engine with command queue and legacy mode.
+ Channel virtualization is supported through enqueuing of DMA jobs to,
+ or dequeuing DMA jobs from, different work queues.
+ This module can be found on NXP Layerscape SoCs.
+ The qdma driver only work on SoCs with a DPAA hardware block.
+
config FSL_RAID
tristate "Freescale RAID engine Support"
depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 09571a81353d..6126e1c3a875 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
obj-$(CONFIG_FSL_DMA) += fsldma.o
obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o
obj-$(CONFIG_MCF_EDMA) += mcf-edma.o fsl-edma-common.o
+obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
obj-$(CONFIG_FSL_RAID) += fsl_raid.o
obj-$(CONFIG_HSU_DMA) += hsu/
obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 01d936c9fe89..a0a9cd76c1d4 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -134,7 +134,6 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
struct at_desc *ret = NULL;
unsigned long flags;
unsigned int i = 0;
- LIST_HEAD(tmp_list);
spin_lock_irqsave(&atchan->lock, flags);
list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
@@ -1387,8 +1386,6 @@ static int atc_pause(struct dma_chan *chan)
int chan_id = atchan->chan_common.chan_id;
unsigned long flags;
- LIST_HEAD(list);
-
dev_vdbg(chan2dev(chan), "%s\n", __func__);
spin_lock_irqsave(&atchan->lock, flags);
@@ -1408,8 +1405,6 @@ static int atc_resume(struct dma_chan *chan)
int chan_id = atchan->chan_common.chan_id;
unsigned long flags;
- LIST_HEAD(list);
-
dev_vdbg(chan2dev(chan), "%s\n", __func__);
if (!atc_chan_is_paused(atchan))
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index ae10f5614f95..54093ffd0aef 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -2,9 +2,6 @@
/*
* BCM2835 DMA engine support
*
- * This driver only supports cyclic DMA transfers
- * as needed for the I2S module.
- *
* Author: Florian Meier <florian.meier@koalo.de>
* Copyright 2013
*
@@ -42,7 +39,6 @@
struct bcm2835_dmadev {
struct dma_device ddev;
- spinlock_t lock;
void __iomem *base;
struct device_dma_parameters dma_parms;
};
@@ -64,7 +60,6 @@ struct bcm2835_cb_entry {
struct bcm2835_chan {
struct virt_dma_chan vc;
- struct list_head node;
struct dma_slave_config cfg;
unsigned int dreq;
@@ -312,8 +307,7 @@ static struct bcm2835_desc *bcm2835_dma_create_cb_chain(
return NULL;
/* allocate and setup the descriptor. */
- d = kzalloc(sizeof(*d) + frames * sizeof(struct bcm2835_cb_entry),
- gfp);
+ d = kzalloc(struct_size(d, cb_list, frames), gfp);
if (!d)
return NULL;
@@ -406,7 +400,7 @@ static void bcm2835_dma_fill_cb_chain_with_sg(
}
}
-static int bcm2835_dma_abort(struct bcm2835_chan *c)
+static void bcm2835_dma_abort(struct bcm2835_chan *c)
{
void __iomem *chan_base = c->chan_base;
long int timeout = 10000;
@@ -416,7 +410,7 @@ static int bcm2835_dma_abort(struct bcm2835_chan *c)
* (The ACTIVE flag in the CS register is not a reliable indicator.)
*/
if (!readl(chan_base + BCM2835_DMA_ADDR))
- return 0;
+ return;
/* Write 0 to the active bit - Pause the DMA */
writel(0, chan_base + BCM2835_DMA_CS);
@@ -432,7 +426,6 @@ static int bcm2835_dma_abort(struct bcm2835_chan *c)
"failed to complete outstanding writes\n");
writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS);
- return 0;
}
static void bcm2835_dma_start_desc(struct bcm2835_chan *c)
@@ -504,8 +497,12 @@ static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan)
dev_dbg(dev, "Allocating DMA channel %d\n", c->ch);
+ /*
+ * Control blocks are 256 bit in length and must start at a 256 bit
+ * (32 byte) aligned address (BCM2835 ARM Peripherals, sec. 4.2.1.1).
+ */
c->cb_pool = dma_pool_create(dev_name(dev), dev,
- sizeof(struct bcm2835_dma_cb), 0, 0);
+ sizeof(struct bcm2835_dma_cb), 32, 0);
if (!c->cb_pool) {
dev_err(dev, "unable to allocate descriptor pool\n");
return -ENOMEM;
@@ -674,7 +671,7 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_slave_sg(
d = bcm2835_dma_create_cb_chain(chan, direction, false,
info, extra,
frames, src, dst, 0, 0,
- GFP_KERNEL);
+ GFP_NOWAIT);
if (!d)
return NULL;
@@ -774,17 +771,11 @@ static int bcm2835_dma_slave_config(struct dma_chan *chan,
static int bcm2835_dma_terminate_all(struct dma_chan *chan)
{
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
- struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device);
unsigned long flags;
LIST_HEAD(head);
spin_lock_irqsave(&c->vc.lock, flags);
- /* Prevent this channel being scheduled */
- spin_lock(&d->lock);
- list_del_init(&c->node);
- spin_unlock(&d->lock);
-
/* stop DMA activity */
if (c->desc) {
vchan_terminate_vdesc(&c->desc->vd);
@@ -817,7 +808,6 @@ static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id,
c->vc.desc_free = bcm2835_dma_desc_free;
vchan_init(&c->vc, &d->ddev);
- INIT_LIST_HEAD(&c->node);
c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id);
c->ch = chan_id;
@@ -920,7 +910,6 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
od->ddev.dev = &pdev->dev;
INIT_LIST_HEAD(&od->ddev.channels);
- spin_lock_init(&od->lock);
platform_set_drvdata(pdev, od);
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 15b2453d2647..ffc0adc2f6ce 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -367,8 +367,7 @@ static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
struct axi_dmac_desc *desc;
unsigned int i;
- desc = kzalloc(sizeof(struct axi_dmac_desc) +
- sizeof(struct axi_dmac_sg) * num_sgs, GFP_NOWAIT);
+ desc = kzalloc(struct_size(desc, sg, num_sgs), GFP_NOWAIT);
if (!desc)
return NULL;
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index a8b6225faa12..9ce0a386225b 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -838,9 +838,8 @@ static int jz4780_dma_probe(struct platform_device *pdev)
if (!soc_data)
return -EINVAL;
- jzdma = devm_kzalloc(dev, sizeof(*jzdma)
- + sizeof(*jzdma->chan) * soc_data->nb_channels,
- GFP_KERNEL);
+ jzdma = devm_kzalloc(dev, struct_size(jzdma, chan,
+ soc_data->nb_channels), GFP_KERNEL);
if (!jzdma)
return -ENOMEM;
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 6511928b4cdf..b96814a7dceb 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -200,15 +200,20 @@ struct dmatest_done {
wait_queue_head_t *wait;
};
+struct dmatest_data {
+ u8 **raw;
+ u8 **aligned;
+ unsigned int cnt;
+ unsigned int off;
+};
+
struct dmatest_thread {
struct list_head node;
struct dmatest_info *info;
struct task_struct *task;
struct dma_chan *chan;
- u8 **srcs;
- u8 **usrcs;
- u8 **dsts;
- u8 **udsts;
+ struct dmatest_data src;
+ struct dmatest_data dst;
enum dma_transaction_type type;
wait_queue_head_t done_wait;
struct dmatest_done test_done;
@@ -481,6 +486,53 @@ static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
return FIXPT_TO_INT(dmatest_persec(runtime, len >> 10));
}
+static void __dmatest_free_test_data(struct dmatest_data *d, unsigned int cnt)
+{
+ unsigned int i;
+
+ for (i = 0; i < cnt; i++)
+ kfree(d->raw[i]);
+
+ kfree(d->aligned);
+ kfree(d->raw);
+}
+
+static void dmatest_free_test_data(struct dmatest_data *d)
+{
+ __dmatest_free_test_data(d, d->cnt);
+}
+
+static int dmatest_alloc_test_data(struct dmatest_data *d,
+ unsigned int buf_size, u8 align)
+{
+ unsigned int i = 0;
+
+ d->raw = kcalloc(d->cnt + 1, sizeof(u8 *), GFP_KERNEL);
+ if (!d->raw)
+ return -ENOMEM;
+
+ d->aligned = kcalloc(d->cnt + 1, sizeof(u8 *), GFP_KERNEL);
+ if (!d->aligned)
+ goto err;
+
+ for (i = 0; i < d->cnt; i++) {
+ d->raw[i] = kmalloc(buf_size + align, GFP_KERNEL);
+ if (!d->raw[i])
+ goto err;
+
+ /* align to alignment restriction */
+ if (align)
+ d->aligned[i] = PTR_ALIGN(d->raw[i], align);
+ else
+ d->aligned[i] = d->raw[i];
+ }
+
+ return 0;
+err:
+ __dmatest_free_test_data(d, i);
+ return -ENOMEM;
+}
+
/*
* This function repeatedly tests DMA transfers of various lengths and
* offsets for a given operation type until it is told to exit by
@@ -511,8 +563,9 @@ static int dmatest_func(void *data)
enum dma_ctrl_flags flags;
u8 *pq_coefs = NULL;
int ret;
- int src_cnt;
- int dst_cnt;
+ unsigned int buf_size;
+ struct dmatest_data *src;
+ struct dmatest_data *dst;
int i;
ktime_t ktime, start, diff;
ktime_t filltime = 0;
@@ -535,25 +588,27 @@ static int dmatest_func(void *data)
params = &info->params;
chan = thread->chan;
dev = chan->device;
+ src = &thread->src;
+ dst = &thread->dst;
if (thread->type == DMA_MEMCPY) {
align = params->alignment < 0 ? dev->copy_align :
params->alignment;
- src_cnt = dst_cnt = 1;
+ src->cnt = dst->cnt = 1;
} else if (thread->type == DMA_MEMSET) {
align = params->alignment < 0 ? dev->fill_align :
params->alignment;
- src_cnt = dst_cnt = 1;
+ src->cnt = dst->cnt = 1;
is_memset = true;
} else if (thread->type == DMA_XOR) {
/* force odd to ensure dst = src */
- src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
- dst_cnt = 1;
+ src->cnt = min_odd(params->xor_sources | 1, dev->max_xor);
+ dst->cnt = 1;
align = params->alignment < 0 ? dev->xor_align :
params->alignment;
} else if (thread->type == DMA_PQ) {
/* force odd to ensure dst = src */
- src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
- dst_cnt = 2;
+ src->cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
+ dst->cnt = 2;
align = params->alignment < 0 ? dev->pq_align :
params->alignment;
@@ -561,75 +616,38 @@ static int dmatest_func(void *data)
if (!pq_coefs)
goto err_thread_type;
- for (i = 0; i < src_cnt; i++)
+ for (i = 0; i < src->cnt; i++)
pq_coefs[i] = 1;
} else
goto err_thread_type;
/* Check if buffer count fits into map count variable (u8) */
- if ((src_cnt + dst_cnt) >= 255) {
+ if ((src->cnt + dst->cnt) >= 255) {
pr_err("too many buffers (%d of 255 supported)\n",
- src_cnt + dst_cnt);
+ src->cnt + dst->cnt);
goto err_free_coefs;
}
- if (1 << align > params->buf_size) {
+ buf_size = params->buf_size;
+ if (1 << align > buf_size) {
pr_err("%u-byte buffer too small for %d-byte alignment\n",
- params->buf_size, 1 << align);
+ buf_size, 1 << align);
goto err_free_coefs;
}
- thread->srcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL);
- if (!thread->srcs)
+ if (dmatest_alloc_test_data(src, buf_size, align) < 0)
goto err_free_coefs;
- thread->usrcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL);
- if (!thread->usrcs)
- goto err_usrcs;
-
- for (i = 0; i < src_cnt; i++) {
- thread->usrcs[i] = kmalloc(params->buf_size + align,
- GFP_KERNEL);
- if (!thread->usrcs[i])
- goto err_srcbuf;
-
- /* align srcs to alignment restriction */
- if (align)
- thread->srcs[i] = PTR_ALIGN(thread->usrcs[i], align);
- else
- thread->srcs[i] = thread->usrcs[i];
- }
- thread->srcs[i] = NULL;
-
- thread->dsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL);
- if (!thread->dsts)
- goto err_dsts;
-
- thread->udsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL);
- if (!thread->udsts)
- goto err_udsts;
-
- for (i = 0; i < dst_cnt; i++) {
- thread->udsts[i] = kmalloc(params->buf_size + align,
- GFP_KERNEL);
- if (!thread->udsts[i])
- goto err_dstbuf;
-
- /* align dsts to alignment restriction */
- if (align)
- thread->dsts[i] = PTR_ALIGN(thread->udsts[i], align);
- else
- thread->dsts[i] = thread->udsts[i];
- }
- thread->dsts[i] = NULL;
+ if (dmatest_alloc_test_data(dst, buf_size, align) < 0)
+ goto err_src;
set_user_nice(current, 10);
- srcs = kcalloc(src_cnt, sizeof(dma_addr_t), GFP_KERNEL);
+ srcs = kcalloc(src->cnt, sizeof(dma_addr_t), GFP_KERNEL);
if (!srcs)
- goto err_dstbuf;
+ goto err_dst;
- dma_pq = kcalloc(dst_cnt, sizeof(dma_addr_t), GFP_KERNEL);
+ dma_pq = kcalloc(dst->cnt, sizeof(dma_addr_t), GFP_KERNEL);
if (!dma_pq)
goto err_srcs_array;
@@ -644,21 +662,21 @@ static int dmatest_func(void *data)
struct dma_async_tx_descriptor *tx = NULL;
struct dmaengine_unmap_data *um;
dma_addr_t *dsts;
- unsigned int src_off, dst_off, len;
+ unsigned int len;
total_tests++;
if (params->transfer_size) {
- if (params->transfer_size >= params->buf_size) {
+ if (params->transfer_size >= buf_size) {
pr_err("%u-byte transfer size must be lower than %u-buffer size\n",
- params->transfer_size, params->buf_size);
+ params->transfer_size, buf_size);
break;
}
len = params->transfer_size;
} else if (params->norandom) {
- len = params->buf_size;
+ len = buf_size;
} else {
- len = dmatest_random() % params->buf_size + 1;
+ len = dmatest_random() % buf_size + 1;
}
/* Do not alter transfer size explicitly defined by user */
@@ -670,57 +688,57 @@ static int dmatest_func(void *data)
total_len += len;
if (params->norandom) {
- src_off = 0;
- dst_off = 0;
+ src->off = 0;
+ dst->off = 0;
} else {
- src_off = dmatest_random() % (params->buf_size - len + 1);
- dst_off = dmatest_random() % (params->buf_size - len + 1);
+ src->off = dmatest_random() % (buf_size - len + 1);
+ dst->off = dmatest_random() % (buf_size - len + 1);
- src_off = (src_off >> align) << align;
- dst_off = (dst_off >> align) << align;
+ src->off = (src->off >> align) << align;
+ dst->off = (dst->off >> align) << align;
}
if (!params->noverify) {
start = ktime_get();
- dmatest_init_srcs(thread->srcs, src_off, len,
- params->buf_size, is_memset);
- dmatest_init_dsts(thread->dsts, dst_off, len,
- params->buf_size, is_memset);
+ dmatest_init_srcs(src->aligned, src->off, len,
+ buf_size, is_memset);
+ dmatest_init_dsts(dst->aligned, dst->off, len,
+ buf_size, is_memset);
diff = ktime_sub(ktime_get(), start);
filltime = ktime_add(filltime, diff);
}
- um = dmaengine_get_unmap_data(dev->dev, src_cnt + dst_cnt,
+ um = dmaengine_get_unmap_data(dev->dev, src->cnt + dst->cnt,
GFP_KERNEL);
if (!um) {
failed_tests++;
result("unmap data NULL", total_tests,
- src_off, dst_off, len, ret);
+ src->off, dst->off, len, ret);
continue;
}
- um->len = params->buf_size;
- for (i = 0; i < src_cnt; i++) {
- void *buf = thread->srcs[i];
+ um->len = buf_size;
+ for (i = 0; i < src->cnt; i++) {
+ void *buf = src->aligned[i];
struct page *pg = virt_to_page(buf);
unsigned long pg_off = offset_in_page(buf);
um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
um->len, DMA_TO_DEVICE);
- srcs[i] = um->addr[i] + src_off;
+ srcs[i] = um->addr[i] + src->off;
ret = dma_mapping_error(dev->dev, um->addr[i]);
if (ret) {
result("src mapping error", total_tests,
- src_off, dst_off, len, ret);
+ src->off, dst->off, len, ret);
goto error_unmap_continue;
}
um->to_cnt++;
}
/* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
- dsts = &um->addr[src_cnt];
- for (i = 0; i < dst_cnt; i++) {
- void *buf = thread->dsts[i];
+ dsts = &um->addr[src->cnt];
+ for (i = 0; i < dst->cnt; i++) {
+ void *buf = dst->aligned[i];
struct page *pg = virt_to_page(buf);
unsigned long pg_off = offset_in_page(buf);
@@ -729,7 +747,7 @@ static int dmatest_func(void *data)
ret = dma_mapping_error(dev->dev, dsts[i]);
if (ret) {
result("dst mapping error", total_tests,
- src_off, dst_off, len, ret);
+ src->off, dst->off, len, ret);
goto error_unmap_continue;
}
um->bidi_cnt++;
@@ -737,29 +755,29 @@ static int dmatest_func(void *data)
if (thread->type == DMA_MEMCPY)
tx = dev->device_prep_dma_memcpy(chan,
- dsts[0] + dst_off,
+ dsts[0] + dst->off,
srcs[0], len, flags);
else if (thread->type == DMA_MEMSET)
tx = dev->device_prep_dma_memset(chan,
- dsts[0] + dst_off,
- *(thread->srcs[0] + src_off),
+ dsts[0] + dst->off,
+ *(src->aligned[0] + src->off),
len, flags);
else if (thread->type == DMA_XOR)
tx = dev->device_prep_dma_xor(chan,
- dsts[0] + dst_off,
- srcs, src_cnt,
+ dsts[0] + dst->off,
+ srcs, src->cnt,
len, flags);
else if (thread->type == DMA_PQ) {
- for (i = 0; i < dst_cnt; i++)
- dma_pq[i] = dsts[i] + dst_off;
+ for (i = 0; i < dst->cnt; i++)
+ dma_pq[i] = dsts[i] + dst->off;
tx = dev->device_prep_dma_pq(chan, dma_pq, srcs,
- src_cnt, pq_coefs,
+ src->cnt, pq_coefs,
len, flags);
}
if (!tx) {
- result("prep error", total_tests, src_off,
- dst_off, len, ret);
+ result("prep error", total_tests, src->off,
+ dst->off, len, ret);
msleep(100);
goto error_unmap_continue;
}
@@ -770,8 +788,8 @@ static int dmatest_func(void *data)
cookie = tx->tx_submit(tx);
if (dma_submit_error(cookie)) {
- result("submit error", total_tests, src_off,
- dst_off, len, ret);
+ result("submit error", total_tests, src->off,
+ dst->off, len, ret);
msleep(100);
goto error_unmap_continue;
}
@@ -783,58 +801,58 @@ static int dmatest_func(void *data)
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
if (!done->done) {
- result("test timed out", total_tests, src_off, dst_off,
+ result("test timed out", total_tests, src->off, dst->off,
len, 0);
goto error_unmap_continue;
} else if (status != DMA_COMPLETE) {
result(status == DMA_ERROR ?
"completion error status" :
- "completion busy status", total_tests, src_off,
- dst_off, len, ret);
+ "completion busy status", total_tests, src->off,
+ dst->off, len, ret);
goto error_unmap_continue;
}
dmaengine_unmap_put(um);
if (params->noverify) {
- verbose_result("test passed", total_tests, src_off,
- dst_off, len, 0);
+ verbose_result("test passed", total_tests, src->off,
+ dst->off, len, 0);
continue;
}
start = ktime_get();
pr_debug("%s: verifying source buffer...\n", current->comm);
- error_count = dmatest_verify(thread->srcs, 0, src_off,
+ error_count = dmatest_verify(src->aligned, 0, src->off,
0, PATTERN_SRC, true, is_memset);
- error_count += dmatest_verify(thread->srcs, src_off,
- src_off + len, src_off,
+ error_count += dmatest_verify(src->aligned, src->off,
+ src->off + len, src->off,
PATTERN_SRC | PATTERN_COPY, true, is_memset);
- error_count += dmatest_verify(thread->srcs, src_off + len,
- params->buf_size, src_off + len,
+ error_count += dmatest_verify(src->aligned, src->off + len,
+ buf_size, src->off + len,
PATTERN_SRC, true, is_memset);
pr_debug("%s: verifying dest buffer...\n", current->comm);
- error_count += dmatest_verify(thread->dsts, 0, dst_off,
+ error_count += dmatest_verify(dst->aligned, 0, dst->off,
0, PATTERN_DST, false, is_memset);
- error_count += dmatest_verify(thread->dsts, dst_off,
- dst_off + len, src_off,
+ error_count += dmatest_verify(dst->aligned, dst->off,
+ dst->off + len, src->off,
PATTERN_SRC | PATTERN_COPY, false, is_memset);
- error_count += dmatest_verify(thread->dsts, dst_off + len,
- params->buf_size, dst_off + len,
+ error_count += dmatest_verify(dst->aligned, dst->off + len,
+ buf_size, dst->off + len,
PATTERN_DST, false, is_memset);
diff = ktime_sub(ktime_get(), start);
comparetime = ktime_add(comparetime, diff);
if (error_count) {
- result("data error", total_tests, src_off, dst_off,
+ result("data error", total_tests, src->off, dst->off,
len, error_count);
failed_tests++;
} else {
- verbose_result("test passed", total_tests, src_off,
- dst_off, len, 0);
+ verbose_result("test passed", total_tests, src->off,
+ dst->off, len, 0);
}
continue;
@@ -852,19 +870,10 @@ error_unmap_continue:
kfree(dma_pq);
err_srcs_array:
kfree(srcs);
-err_dstbuf:
- for (i = 0; thread->udsts[i]; i++)
- kfree(thread->udsts[i]);
- kfree(thread->udsts);
-err_udsts:
- kfree(thread->dsts);
-err_dsts:
-err_srcbuf:
- for (i = 0; thread->usrcs[i]; i++)
- kfree(thread->usrcs[i]);
- kfree(thread->usrcs);
-err_usrcs:
- kfree(thread->srcs);
+err_dst:
+ dmatest_free_test_data(dst);
+err_src:
+ dmatest_free_test_data(src);
err_free_coefs:
kfree(pq_coefs);
err_thread_type:
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
index f8888dc0b8dc..18b6014cf9b4 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
@@ -75,7 +75,7 @@ struct __packed axi_dma_lli {
__le32 sstat;
__le32 dstat;
__le32 status_lo;
- __le32 ststus_hi;
+ __le32 status_hi;
__le32 reserved_lo;
__le32 reserved_hi;
};
diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig
index 04b9728c1d26..e5162690de8f 100644
--- a/drivers/dma/dw/Kconfig
+++ b/drivers/dma/dw/Kconfig
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
#
# DMA engine configuration for dw
#
diff --git a/drivers/dma/dw/Makefile b/drivers/dma/dw/Makefile
index 2b949c2e4504..63ed895c09aa 100644
--- a/drivers/dma/dw/Makefile
+++ b/drivers/dma/dw/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DW_DMAC_CORE) += dw_dmac_core.o
-dw_dmac_core-objs := core.o
+dw_dmac_core-objs := core.o dw.o idma32.o
obj-$(CONFIG_DW_DMAC) += dw_dmac.o
dw_dmac-objs := platform.o
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index dc053e62f894..21cb2a58dbd2 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Core driver for the Synopsys DesignWare DMA Controller
*
* Copyright (C) 2007-2008 Atmel Corporation
* Copyright (C) 2010-2011 ST Microelectronics
* Copyright (C) 2013 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/bitops.h>
@@ -37,27 +34,6 @@
* support descriptor writeback.
*/
-#define DWC_DEFAULT_CTLLO(_chan) ({ \
- struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
- struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
- bool _is_slave = is_slave_direction(_dwc->direction); \
- u8 _smsize = _is_slave ? _sconfig->src_maxburst : \
- DW_DMA_MSIZE_16; \
- u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \
- DW_DMA_MSIZE_16; \
- u8 _dms = (_dwc->direction == DMA_MEM_TO_DEV) ? \
- _dwc->dws.p_master : _dwc->dws.m_master; \
- u8 _sms = (_dwc->direction == DMA_DEV_TO_MEM) ? \
- _dwc->dws.p_master : _dwc->dws.m_master; \
- \
- (DWC_CTLL_DST_MSIZE(_dmsize) \
- | DWC_CTLL_SRC_MSIZE(_smsize) \
- | DWC_CTLL_LLP_D_EN \
- | DWC_CTLL_LLP_S_EN \
- | DWC_CTLL_DMS(_dms) \
- | DWC_CTLL_SMS(_sms)); \
- })
-
/* The set of bus widths supported by the DMA controller */
#define DW_DMA_BUSWIDTHS \
BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
@@ -138,44 +114,6 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
dwc->descs_allocated--;
}
-static void dwc_initialize_chan_idma32(struct dw_dma_chan *dwc)
-{
- u32 cfghi = 0;
- u32 cfglo = 0;
-
- /* Set default burst alignment */
- cfglo |= IDMA32C_CFGL_DST_BURST_ALIGN | IDMA32C_CFGL_SRC_BURST_ALIGN;
-
- /* Low 4 bits of the request lines */
- cfghi |= IDMA32C_CFGH_DST_PER(dwc->dws.dst_id & 0xf);
- cfghi |= IDMA32C_CFGH_SRC_PER(dwc->dws.src_id & 0xf);
-
- /* Request line extension (2 bits) */
- cfghi |= IDMA32C_CFGH_DST_PER_EXT(dwc->dws.dst_id >> 4 & 0x3);
- cfghi |= IDMA32C_CFGH_SRC_PER_EXT(dwc->dws.src_id >> 4 & 0x3);
-
- channel_writel(dwc, CFG_LO, cfglo);
- channel_writel(dwc, CFG_HI, cfghi);
-}
-
-static void dwc_initialize_chan_dw(struct dw_dma_chan *dwc)
-{
- struct dw_dma *dw = to_dw_dma(dwc->chan.device);
- u32 cfghi = DWC_CFGH_FIFO_MODE;
- u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
- bool hs_polarity = dwc->dws.hs_polarity;
-
- cfghi |= DWC_CFGH_DST_PER(dwc->dws.dst_id);
- cfghi |= DWC_CFGH_SRC_PER(dwc->dws.src_id);
- cfghi |= DWC_CFGH_PROTCTL(dw->pdata->protctl);
-
- /* Set polarity of handshake interface */
- cfglo |= hs_polarity ? DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL : 0;
-
- channel_writel(dwc, CFG_LO, cfglo);
- channel_writel(dwc, CFG_HI, cfghi);
-}
-
static void dwc_initialize(struct dw_dma_chan *dwc)
{
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
@@ -183,10 +121,7 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags))
return;
- if (dw->pdata->is_idma32)
- dwc_initialize_chan_idma32(dwc);
- else
- dwc_initialize_chan_dw(dwc);
+ dw->initialize_chan(dwc);
/* Enable interrupts */
channel_set_bit(dw, MASK.XFER, dwc->mask);
@@ -215,37 +150,6 @@ static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
cpu_relax();
}
-static u32 bytes2block(struct dw_dma_chan *dwc, size_t bytes,
- unsigned int width, size_t *len)
-{
- struct dw_dma *dw = to_dw_dma(dwc->chan.device);
- u32 block;
-
- /* Always in bytes for iDMA 32-bit */
- if (dw->pdata->is_idma32)
- width = 0;
-
- if ((bytes >> width) > dwc->block_size) {
- block = dwc->block_size;
- *len = block << width;
- } else {
- block = bytes >> width;
- *len = bytes;
- }
-
- return block;
-}
-
-static size_t block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width)
-{
- struct dw_dma *dw = to_dw_dma(dwc->chan.device);
-
- if (dw->pdata->is_idma32)
- return IDMA32C_CTLH_BLOCK_TS(block);
-
- return DWC_CTLH_BLOCK_TS(block) << width;
-}
-
/*----------------------------------------------------------------------*/
/* Perform single block transfer */
@@ -391,10 +295,11 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
/* Returns how many bytes were already received from source */
static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
{
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
u32 ctlhi = channel_readl(dwc, CTL_HI);
u32 ctllo = channel_readl(dwc, CTL_LO);
- return block2bytes(dwc, ctlhi, ctllo >> 4 & 7);
+ return dw->block2bytes(dwc, ctlhi, ctllo >> 4 & 7);
}
static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
@@ -651,7 +556,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
unsigned int src_width;
unsigned int dst_width;
unsigned int data_width = dw->pdata->data_width[m_master];
- u32 ctllo;
+ u32 ctllo, ctlhi;
u8 lms = DWC_LLP_LMS(m_master);
dev_vdbg(chan2dev(chan),
@@ -667,7 +572,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
src_width = dst_width = __ffs(data_width | src | dest | len);
- ctllo = DWC_DEFAULT_CTLLO(chan)
+ ctllo = dw->prepare_ctllo(dwc)
| DWC_CTLL_DST_WIDTH(dst_width)
| DWC_CTLL_SRC_WIDTH(src_width)
| DWC_CTLL_DST_INC
@@ -680,10 +585,12 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
if (!desc)
goto err_desc_get;
+ ctlhi = dw->bytes2block(dwc, len - offset, src_width, &xfer_count);
+
lli_write(desc, sar, src + offset);
lli_write(desc, dar, dest + offset);
lli_write(desc, ctllo, ctllo);
- lli_write(desc, ctlhi, bytes2block(dwc, len - offset, src_width, &xfer_count));
+ lli_write(desc, ctlhi, ctlhi);
desc->len = xfer_count;
if (!first) {
@@ -721,7 +628,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct dma_slave_config *sconfig = &dwc->dma_sconfig;
struct dw_desc *prev;
struct dw_desc *first;
- u32 ctllo;
+ u32 ctllo, ctlhi;
u8 m_master = dwc->dws.m_master;
u8 lms = DWC_LLP_LMS(m_master);
dma_addr_t reg;
@@ -745,10 +652,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
case DMA_MEM_TO_DEV:
reg_width = __ffs(sconfig->dst_addr_width);
reg = sconfig->dst_addr;
- ctllo = (DWC_DEFAULT_CTLLO(chan)
+ ctllo = dw->prepare_ctllo(dwc)
| DWC_CTLL_DST_WIDTH(reg_width)
| DWC_CTLL_DST_FIX
- | DWC_CTLL_SRC_INC);
+ | DWC_CTLL_SRC_INC;
ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
DWC_CTLL_FC(DW_DMA_FC_D_M2P);
@@ -768,9 +675,11 @@ slave_sg_todev_fill_desc:
if (!desc)
goto err_desc_get;
+ ctlhi = dw->bytes2block(dwc, len, mem_width, &dlen);
+
lli_write(desc, sar, mem);
lli_write(desc, dar, reg);
- lli_write(desc, ctlhi, bytes2block(dwc, len, mem_width, &dlen));
+ lli_write(desc, ctlhi, ctlhi);
lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width));
desc->len = dlen;
@@ -793,10 +702,10 @@ slave_sg_todev_fill_desc:
case DMA_DEV_TO_MEM:
reg_width = __ffs(sconfig->src_addr_width);
reg = sconfig->src_addr;
- ctllo = (DWC_DEFAULT_CTLLO(chan)
+ ctllo = dw->prepare_ctllo(dwc)
| DWC_CTLL_SRC_WIDTH(reg_width)
| DWC_CTLL_DST_INC
- | DWC_CTLL_SRC_FIX);
+ | DWC_CTLL_SRC_FIX;
ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
DWC_CTLL_FC(DW_DMA_FC_D_P2M);
@@ -814,9 +723,11 @@ slave_sg_fromdev_fill_desc:
if (!desc)
goto err_desc_get;
+ ctlhi = dw->bytes2block(dwc, len, reg_width, &dlen);
+
lli_write(desc, sar, reg);
lli_write(desc, dar, mem);
- lli_write(desc, ctlhi, bytes2block(dwc, len, reg_width, &dlen));
+ lli_write(desc, ctlhi, ctlhi);
mem_width = __ffs(data_width | mem | dlen);
lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
desc->len = dlen;
@@ -876,22 +787,12 @@ EXPORT_SYMBOL_GPL(dw_dma_filter);
static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- struct dma_slave_config *sc = &dwc->dma_sconfig;
struct dw_dma *dw = to_dw_dma(chan->device);
- /*
- * Fix sconfig's burst size according to dw_dmac. We need to convert
- * them as:
- * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
- *
- * NOTE: burst size 2 is not supported by DesignWare controller.
- * iDMA 32-bit supports it.
- */
- u32 s = dw->pdata->is_idma32 ? 1 : 2;
memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
- sc->src_maxburst = sc->src_maxburst > 1 ? fls(sc->src_maxburst) - s : 0;
- sc->dst_maxburst = sc->dst_maxburst > 1 ? fls(sc->dst_maxburst) - s : 0;
+ dw->encode_maxburst(dwc, &dwc->dma_sconfig.src_maxburst);
+ dw->encode_maxburst(dwc, &dwc->dma_sconfig.dst_maxburst);
return 0;
}
@@ -900,16 +801,9 @@ static void dwc_chan_pause(struct dw_dma_chan *dwc, bool drain)
{
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
unsigned int count = 20; /* timeout iterations */
- u32 cfglo;
- cfglo = channel_readl(dwc, CFG_LO);
- if (dw->pdata->is_idma32) {
- if (drain)
- cfglo |= IDMA32C_CFGL_CH_DRAIN;
- else
- cfglo &= ~IDMA32C_CFGL_CH_DRAIN;
- }
- channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
+ dw->suspend_chan(dwc, drain);
+
while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
udelay(2);
@@ -928,11 +822,11 @@ static int dwc_pause(struct dma_chan *chan)
return 0;
}
-static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
+static inline void dwc_chan_resume(struct dw_dma_chan *dwc, bool drain)
{
- u32 cfglo = channel_readl(dwc, CFG_LO);
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
- channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
+ dw->resume_chan(dwc, drain);
clear_bit(DW_DMA_IS_PAUSED, &dwc->flags);
}
@@ -945,7 +839,7 @@ static int dwc_resume(struct dma_chan *chan)
spin_lock_irqsave(&dwc->lock, flags);
if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags))
- dwc_chan_resume(dwc);
+ dwc_chan_resume(dwc, false);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -968,7 +862,7 @@ static int dwc_terminate_all(struct dma_chan *chan)
dwc_chan_disable(dw, dwc);
- dwc_chan_resume(dwc);
+ dwc_chan_resume(dwc, true);
/* active_list entries will end up before queued entries */
list_splice_init(&dwc->queue, &list);
@@ -1058,33 +952,7 @@ static void dwc_issue_pending(struct dma_chan *chan)
/*----------------------------------------------------------------------*/
-/*
- * Program FIFO size of channels.
- *
- * By default full FIFO (512 bytes) is assigned to channel 0. Here we
- * slice FIFO on equal parts between channels.
- */
-static void idma32_fifo_partition(struct dw_dma *dw)
-{
- u64 value = IDMA32C_FP_PSIZE_CH0(64) | IDMA32C_FP_PSIZE_CH1(64) |
- IDMA32C_FP_UPDATE;
- u64 fifo_partition = 0;
-
- if (!dw->pdata->is_idma32)
- return;
-
- /* Fill FIFO_PARTITION low bits (Channels 0..1, 4..5) */
- fifo_partition |= value << 0;
-
- /* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */
- fifo_partition |= value << 32;
-
- /* Program FIFO Partition registers - 64 bytes per channel */
- idma32_writeq(dw, FIFO_PARTITION1, fifo_partition);
- idma32_writeq(dw, FIFO_PARTITION0, fifo_partition);
-}
-
-static void dw_dma_off(struct dw_dma *dw)
+void do_dw_dma_off(struct dw_dma *dw)
{
unsigned int i;
@@ -1103,7 +971,7 @@ static void dw_dma_off(struct dw_dma *dw)
clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags);
}
-static void dw_dma_on(struct dw_dma *dw)
+void do_dw_dma_on(struct dw_dma *dw)
{
dma_writel(dw, CFG, DW_CFG_DMA_EN);
}
@@ -1139,7 +1007,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
/* Enable controller here if needed */
if (!dw->in_use)
- dw_dma_on(dw);
+ do_dw_dma_on(dw);
dw->in_use |= dwc->mask;
return 0;
@@ -1150,7 +1018,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device);
unsigned long flags;
- LIST_HEAD(list);
dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
dwc->descs_allocated);
@@ -1177,30 +1044,25 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
/* Disable controller in case it was a last user */
dw->in_use &= ~dwc->mask;
if (!dw->in_use)
- dw_dma_off(dw);
+ do_dw_dma_off(dw);
dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
}
-int dw_dma_probe(struct dw_dma_chip *chip)
+int do_dma_probe(struct dw_dma_chip *chip)
{
+ struct dw_dma *dw = chip->dw;
struct dw_dma_platform_data *pdata;
- struct dw_dma *dw;
bool autocfg = false;
unsigned int dw_params;
unsigned int i;
int err;
- dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL);
- if (!dw)
- return -ENOMEM;
-
dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL);
if (!dw->pdata)
return -ENOMEM;
dw->regs = chip->regs;
- chip->dw = dw;
pm_runtime_get_sync(chip->dev);
@@ -1227,8 +1089,6 @@ int dw_dma_probe(struct dw_dma_chip *chip)
pdata->block_size = dma_readl(dw, MAX_BLK_SIZE);
/* Fill platform data with the default values */
- pdata->is_private = true;
- pdata->is_memcpy = true;
pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
} else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) {
@@ -1252,15 +1112,10 @@ int dw_dma_probe(struct dw_dma_chip *chip)
dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
/* Force dma off, just in case */
- dw_dma_off(dw);
-
- idma32_fifo_partition(dw);
+ dw->disable(dw);
/* Device and instance ID for IRQ and DMA pool */
- if (pdata->is_idma32)
- snprintf(dw->name, sizeof(dw->name), "idma32:dmac%d", chip->id);
- else
- snprintf(dw->name, sizeof(dw->name), "dw:dmac%d", chip->id);
+ dw->set_device_name(dw, chip->id);
/* Create a pool of consistent memory blocks for hardware descriptors */
dw->desc_pool = dmam_pool_create(dw->name, chip->dev,
@@ -1340,10 +1195,8 @@ int dw_dma_probe(struct dw_dma_chip *chip)
/* Set capabilities */
dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
- if (pdata->is_private)
- dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
- if (pdata->is_memcpy)
- dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
+ dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
+ dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
dw->dma.dev = chip->dev;
dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
@@ -1384,16 +1237,15 @@ err_pdata:
pm_runtime_put_sync_suspend(chip->dev);
return err;
}
-EXPORT_SYMBOL_GPL(dw_dma_probe);
-int dw_dma_remove(struct dw_dma_chip *chip)
+int do_dma_remove(struct dw_dma_chip *chip)
{
struct dw_dma *dw = chip->dw;
struct dw_dma_chan *dwc, *_dwc;
pm_runtime_get_sync(chip->dev);
- dw_dma_off(dw);
+ do_dw_dma_off(dw);
dma_async_device_unregister(&dw->dma);
free_irq(chip->irq, dw);
@@ -1408,27 +1260,24 @@ int dw_dma_remove(struct dw_dma_chip *chip)
pm_runtime_put_sync_suspend(chip->dev);
return 0;
}
-EXPORT_SYMBOL_GPL(dw_dma_remove);
-int dw_dma_disable(struct dw_dma_chip *chip)
+int do_dw_dma_disable(struct dw_dma_chip *chip)
{
struct dw_dma *dw = chip->dw;
- dw_dma_off(dw);
+ dw->disable(dw);
return 0;
}
-EXPORT_SYMBOL_GPL(dw_dma_disable);
+EXPORT_SYMBOL_GPL(do_dw_dma_disable);
-int dw_dma_enable(struct dw_dma_chip *chip)
+int do_dw_dma_enable(struct dw_dma_chip *chip)
{
struct dw_dma *dw = chip->dw;
- idma32_fifo_partition(dw);
-
- dw_dma_on(dw);
+ dw->enable(dw);
return 0;
}
-EXPORT_SYMBOL_GPL(dw_dma_enable);
+EXPORT_SYMBOL_GPL(do_dw_dma_enable);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver");
diff --git a/drivers/dma/dw/dw.c b/drivers/dma/dw/dw.c
new file mode 100644
index 000000000000..7a085b3c1854
--- /dev/null
+++ b/drivers/dma/dw/dw.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2007-2008 Atmel Corporation
+// Copyright (C) 2010-2011 ST Microelectronics
+// Copyright (C) 2013,2018 Intel Corporation
+
+#include <linux/bitops.h>
+#include <linux/dmaengine.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "internal.h"
+
+static void dw_dma_initialize_chan(struct dw_dma_chan *dwc)
+{
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ u32 cfghi = DWC_CFGH_FIFO_MODE;
+ u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
+ bool hs_polarity = dwc->dws.hs_polarity;
+
+ cfghi |= DWC_CFGH_DST_PER(dwc->dws.dst_id);
+ cfghi |= DWC_CFGH_SRC_PER(dwc->dws.src_id);
+ cfghi |= DWC_CFGH_PROTCTL(dw->pdata->protctl);
+
+ /* Set polarity of handshake interface */
+ cfglo |= hs_polarity ? DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL : 0;
+
+ channel_writel(dwc, CFG_LO, cfglo);
+ channel_writel(dwc, CFG_HI, cfghi);
+}
+
+static void dw_dma_suspend_chan(struct dw_dma_chan *dwc, bool drain)
+{
+ u32 cfglo = channel_readl(dwc, CFG_LO);
+
+ channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
+}
+
+static void dw_dma_resume_chan(struct dw_dma_chan *dwc, bool drain)
+{
+ u32 cfglo = channel_readl(dwc, CFG_LO);
+
+ channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
+}
+
+static u32 dw_dma_bytes2block(struct dw_dma_chan *dwc,
+ size_t bytes, unsigned int width, size_t *len)
+{
+ u32 block;
+
+ if ((bytes >> width) > dwc->block_size) {
+ block = dwc->block_size;
+ *len = dwc->block_size << width;
+ } else {
+ block = bytes >> width;
+ *len = bytes;
+ }
+
+ return block;
+}
+
+static size_t dw_dma_block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width)
+{
+ return DWC_CTLH_BLOCK_TS(block) << width;
+}
+
+static u32 dw_dma_prepare_ctllo(struct dw_dma_chan *dwc)
+{
+ struct dma_slave_config *sconfig = &dwc->dma_sconfig;
+ bool is_slave = is_slave_direction(dwc->direction);
+ u8 smsize = is_slave ? sconfig->src_maxburst : DW_DMA_MSIZE_16;
+ u8 dmsize = is_slave ? sconfig->dst_maxburst : DW_DMA_MSIZE_16;
+ u8 p_master = dwc->dws.p_master;
+ u8 m_master = dwc->dws.m_master;
+ u8 dms = (dwc->direction == DMA_MEM_TO_DEV) ? p_master : m_master;
+ u8 sms = (dwc->direction == DMA_DEV_TO_MEM) ? p_master : m_master;
+
+ return DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN |
+ DWC_CTLL_DST_MSIZE(dmsize) | DWC_CTLL_SRC_MSIZE(smsize) |
+ DWC_CTLL_DMS(dms) | DWC_CTLL_SMS(sms);
+}
+
+static void dw_dma_encode_maxburst(struct dw_dma_chan *dwc, u32 *maxburst)
+{
+ /*
+ * Fix burst size according to dw_dmac. We need to convert them as:
+ * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
+ */
+ *maxburst = *maxburst > 1 ? fls(*maxburst) - 2 : 0;
+}
+
+static void dw_dma_set_device_name(struct dw_dma *dw, int id)
+{
+ snprintf(dw->name, sizeof(dw->name), "dw:dmac%d", id);
+}
+
+static void dw_dma_disable(struct dw_dma *dw)
+{
+ do_dw_dma_off(dw);
+}
+
+static void dw_dma_enable(struct dw_dma *dw)
+{
+ do_dw_dma_on(dw);
+}
+
+int dw_dma_probe(struct dw_dma_chip *chip)
+{
+ struct dw_dma *dw;
+
+ dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL);
+ if (!dw)
+ return -ENOMEM;
+
+ /* Channel operations */
+ dw->initialize_chan = dw_dma_initialize_chan;
+ dw->suspend_chan = dw_dma_suspend_chan;
+ dw->resume_chan = dw_dma_resume_chan;
+ dw->prepare_ctllo = dw_dma_prepare_ctllo;
+ dw->encode_maxburst = dw_dma_encode_maxburst;
+ dw->bytes2block = dw_dma_bytes2block;
+ dw->block2bytes = dw_dma_block2bytes;
+
+ /* Device operations */
+ dw->set_device_name = dw_dma_set_device_name;
+ dw->disable = dw_dma_disable;
+ dw->enable = dw_dma_enable;
+
+ chip->dw = dw;
+ return do_dma_probe(chip);
+}
+EXPORT_SYMBOL_GPL(dw_dma_probe);
+
+int dw_dma_remove(struct dw_dma_chip *chip)
+{
+ return do_dma_remove(chip);
+}
+EXPORT_SYMBOL_GPL(dw_dma_remove);
diff --git a/drivers/dma/dw/idma32.c b/drivers/dma/dw/idma32.c
new file mode 100644
index 000000000000..f00657308811
--- /dev/null
+++ b/drivers/dma/dw/idma32.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013,2018 Intel Corporation
+
+#include <linux/bitops.h>
+#include <linux/dmaengine.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "internal.h"
+
+static void idma32_initialize_chan(struct dw_dma_chan *dwc)
+{
+ u32 cfghi = 0;
+ u32 cfglo = 0;
+
+ /* Set default burst alignment */
+ cfglo |= IDMA32C_CFGL_DST_BURST_ALIGN | IDMA32C_CFGL_SRC_BURST_ALIGN;
+
+ /* Low 4 bits of the request lines */
+ cfghi |= IDMA32C_CFGH_DST_PER(dwc->dws.dst_id & 0xf);
+ cfghi |= IDMA32C_CFGH_SRC_PER(dwc->dws.src_id & 0xf);
+
+ /* Request line extension (2 bits) */
+ cfghi |= IDMA32C_CFGH_DST_PER_EXT(dwc->dws.dst_id >> 4 & 0x3);
+ cfghi |= IDMA32C_CFGH_SRC_PER_EXT(dwc->dws.src_id >> 4 & 0x3);
+
+ channel_writel(dwc, CFG_LO, cfglo);
+ channel_writel(dwc, CFG_HI, cfghi);
+}
+
+static void idma32_suspend_chan(struct dw_dma_chan *dwc, bool drain)
+{
+ u32 cfglo = channel_readl(dwc, CFG_LO);
+
+ if (drain)
+ cfglo |= IDMA32C_CFGL_CH_DRAIN;
+
+ channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
+}
+
+static void idma32_resume_chan(struct dw_dma_chan *dwc, bool drain)
+{
+ u32 cfglo = channel_readl(dwc, CFG_LO);
+
+ if (drain)
+ cfglo &= ~IDMA32C_CFGL_CH_DRAIN;
+
+ channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
+}
+
+static u32 idma32_bytes2block(struct dw_dma_chan *dwc,
+ size_t bytes, unsigned int width, size_t *len)
+{
+ u32 block;
+
+ if (bytes > dwc->block_size) {
+ block = dwc->block_size;
+ *len = dwc->block_size;
+ } else {
+ block = bytes;
+ *len = bytes;
+ }
+
+ return block;
+}
+
+static size_t idma32_block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width)
+{
+ return IDMA32C_CTLH_BLOCK_TS(block);
+}
+
+static u32 idma32_prepare_ctllo(struct dw_dma_chan *dwc)
+{
+ struct dma_slave_config *sconfig = &dwc->dma_sconfig;
+ bool is_slave = is_slave_direction(dwc->direction);
+ u8 smsize = is_slave ? sconfig->src_maxburst : IDMA32_MSIZE_8;
+ u8 dmsize = is_slave ? sconfig->dst_maxburst : IDMA32_MSIZE_8;
+
+ return DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN |
+ DWC_CTLL_DST_MSIZE(dmsize) | DWC_CTLL_SRC_MSIZE(smsize);
+}
+
+static void idma32_encode_maxburst(struct dw_dma_chan *dwc, u32 *maxburst)
+{
+ *maxburst = *maxburst > 1 ? fls(*maxburst) - 1 : 0;
+}
+
+static void idma32_set_device_name(struct dw_dma *dw, int id)
+{
+ snprintf(dw->name, sizeof(dw->name), "idma32:dmac%d", id);
+}
+
+/*
+ * Program FIFO size of channels.
+ *
+ * By default full FIFO (512 bytes) is assigned to channel 0. Here we
+ * slice FIFO on equal parts between channels.
+ */
+static void idma32_fifo_partition(struct dw_dma *dw)
+{
+ u64 value = IDMA32C_FP_PSIZE_CH0(64) | IDMA32C_FP_PSIZE_CH1(64) |
+ IDMA32C_FP_UPDATE;
+ u64 fifo_partition = 0;
+
+ /* Fill FIFO_PARTITION low bits (Channels 0..1, 4..5) */
+ fifo_partition |= value << 0;
+
+ /* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */
+ fifo_partition |= value << 32;
+
+ /* Program FIFO Partition registers - 64 bytes per channel */
+ idma32_writeq(dw, FIFO_PARTITION1, fifo_partition);
+ idma32_writeq(dw, FIFO_PARTITION0, fifo_partition);
+}
+
+static void idma32_disable(struct dw_dma *dw)
+{
+ do_dw_dma_off(dw);
+ idma32_fifo_partition(dw);
+}
+
+static void idma32_enable(struct dw_dma *dw)
+{
+ idma32_fifo_partition(dw);
+ do_dw_dma_on(dw);
+}
+
+int idma32_dma_probe(struct dw_dma_chip *chip)
+{
+ struct dw_dma *dw;
+
+ dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL);
+ if (!dw)
+ return -ENOMEM;
+
+ /* Channel operations */
+ dw->initialize_chan = idma32_initialize_chan;
+ dw->suspend_chan = idma32_suspend_chan;
+ dw->resume_chan = idma32_resume_chan;
+ dw->prepare_ctllo = idma32_prepare_ctllo;
+ dw->encode_maxburst = idma32_encode_maxburst;
+ dw->bytes2block = idma32_bytes2block;
+ dw->block2bytes = idma32_block2bytes;
+
+ /* Device operations */
+ dw->set_device_name = idma32_set_device_name;
+ dw->disable = idma32_disable;
+ dw->enable = idma32_enable;
+
+ chip->dw = dw;
+ return do_dma_probe(chip);
+}
+EXPORT_SYMBOL_GPL(idma32_dma_probe);
+
+int idma32_dma_remove(struct dw_dma_chip *chip)
+{
+ return do_dma_remove(chip);
+}
+EXPORT_SYMBOL_GPL(idma32_dma_remove);
diff --git a/drivers/dma/dw/internal.h b/drivers/dma/dw/internal.h
index 41439732ff6b..1dd7a4e6dd23 100644
--- a/drivers/dma/dw/internal.h
+++ b/drivers/dma/dw/internal.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Driver for the Synopsys DesignWare DMA Controller
*
* Copyright (C) 2013 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _DMA_DW_INTERNAL_H
@@ -15,8 +12,14 @@
#include "regs.h"
-int dw_dma_disable(struct dw_dma_chip *chip);
-int dw_dma_enable(struct dw_dma_chip *chip);
+int do_dma_probe(struct dw_dma_chip *chip);
+int do_dma_remove(struct dw_dma_chip *chip);
+
+void do_dw_dma_on(struct dw_dma *dw);
+void do_dw_dma_off(struct dw_dma *dw);
+
+int do_dw_dma_disable(struct dw_dma_chip *chip);
+int do_dw_dma_enable(struct dw_dma_chip *chip);
extern bool dw_dma_filter(struct dma_chan *chan, void *param);
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index 7778ed705a1a..e79a75db0852 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* PCI driver for the Synopsys DesignWare DMA Controller
*
* Copyright (C) 2013 Intel Corporation
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
@@ -15,21 +12,33 @@
#include "internal.h"
-static struct dw_dma_platform_data mrfld_pdata = {
+struct dw_dma_pci_data {
+ const struct dw_dma_platform_data *pdata;
+ int (*probe)(struct dw_dma_chip *chip);
+};
+
+static const struct dw_dma_pci_data dw_pci_data = {
+ .probe = dw_dma_probe,
+};
+
+static const struct dw_dma_platform_data idma32_pdata = {
.nr_channels = 8,
- .is_private = true,
- .is_memcpy = true,
- .is_idma32 = true,
.chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
.chan_priority = CHAN_PRIORITY_ASCENDING,
.block_size = 131071,
.nr_masters = 1,
.data_width = {4},
+ .multi_block = {1, 1, 1, 1, 1, 1, 1, 1},
+};
+
+static const struct dw_dma_pci_data idma32_pci_data = {
+ .pdata = &idma32_pdata,
+ .probe = idma32_dma_probe,
};
static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
{
- const struct dw_dma_platform_data *pdata = (void *)pid->driver_data;
+ const struct dw_dma_pci_data *data = (void *)pid->driver_data;
struct dw_dma_chip *chip;
int ret;
@@ -62,9 +71,9 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
chip->id = pdev->devfn;
chip->regs = pcim_iomap_table(pdev)[0];
chip->irq = pdev->irq;
- chip->pdata = pdata;
+ chip->pdata = data->pdata;
- ret = dw_dma_probe(chip);
+ ret = data->probe(chip);
if (ret)
return ret;
@@ -90,7 +99,7 @@ static int dw_pci_suspend_late(struct device *dev)
struct pci_dev *pci = to_pci_dev(dev);
struct dw_dma_chip *chip = pci_get_drvdata(pci);
- return dw_dma_disable(chip);
+ return do_dw_dma_disable(chip);
};
static int dw_pci_resume_early(struct device *dev)
@@ -98,7 +107,7 @@ static int dw_pci_resume_early(struct device *dev)
struct pci_dev *pci = to_pci_dev(dev);
struct dw_dma_chip *chip = pci_get_drvdata(pci);
- return dw_dma_enable(chip);
+ return do_dw_dma_enable(chip);
};
#endif /* CONFIG_PM_SLEEP */
@@ -109,24 +118,24 @@ static const struct dev_pm_ops dw_pci_dev_pm_ops = {
static const struct pci_device_id dw_pci_id_table[] = {
/* Medfield (GPDMA) */
- { PCI_VDEVICE(INTEL, 0x0827) },
+ { PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_pci_data },
/* BayTrail */
- { PCI_VDEVICE(INTEL, 0x0f06) },
- { PCI_VDEVICE(INTEL, 0x0f40) },
+ { PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_pci_data },
+ { PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_pci_data },
- /* Merrifield iDMA 32-bit (GPDMA) */
- { PCI_VDEVICE(INTEL, 0x11a2), (kernel_ulong_t)&mrfld_pdata },
+ /* Merrifield */
+ { PCI_VDEVICE(INTEL, 0x11a2), (kernel_ulong_t)&idma32_pci_data },
/* Braswell */
- { PCI_VDEVICE(INTEL, 0x2286) },
- { PCI_VDEVICE(INTEL, 0x22c0) },
+ { PCI_VDEVICE(INTEL, 0x2286), (kernel_ulong_t)&dw_pci_data },
+ { PCI_VDEVICE(INTEL, 0x22c0), (kernel_ulong_t)&dw_pci_data },
/* Haswell */
- { PCI_VDEVICE(INTEL, 0x9c60) },
+ { PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_pci_data },
/* Broadwell */
- { PCI_VDEVICE(INTEL, 0x9ce0) },
+ { PCI_VDEVICE(INTEL, 0x9ce0), (kernel_ulong_t)&dw_pci_data },
{ }
};
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 31ff8113c3de..382dfd9e9600 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Platform driver for the Synopsys DesignWare DMA Controller
*
@@ -6,10 +7,6 @@
* Copyright (C) 2013 Intel Corporation
*
* Some parts of this driver are derived from the original dw_dmac.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
@@ -128,15 +125,6 @@ dw_dma_parse_dt(struct platform_device *pdev)
pdata->nr_masters = nr_masters;
pdata->nr_channels = nr_channels;
- if (of_property_read_bool(np, "is_private"))
- pdata->is_private = true;
-
- /*
- * All known devices, which use DT for configuration, support
- * memory-to-memory transfers. So enable it by default.
- */
- pdata->is_memcpy = true;
-
if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
pdata->chan_allocation_order = (unsigned char)tmp;
@@ -264,7 +252,7 @@ static void dw_shutdown(struct platform_device *pdev)
struct dw_dma_chip *chip = platform_get_drvdata(pdev);
/*
- * We have to call dw_dma_disable() to stop any ongoing transfer. On
+ * We have to call do_dw_dma_disable() to stop any ongoing transfer. On
* some platforms we can't do that since DMA device is powered off.
* Moreover we have no possibility to check if the platform is affected
* or not. That's why we call pm_runtime_get_sync() / pm_runtime_put()
@@ -273,7 +261,7 @@ static void dw_shutdown(struct platform_device *pdev)
* used by the driver.
*/
pm_runtime_get_sync(chip->dev);
- dw_dma_disable(chip);
+ do_dw_dma_disable(chip);
pm_runtime_put_sync_suspend(chip->dev);
clk_disable_unprepare(chip->clk);
@@ -303,7 +291,7 @@ static int dw_suspend_late(struct device *dev)
{
struct dw_dma_chip *chip = dev_get_drvdata(dev);
- dw_dma_disable(chip);
+ do_dw_dma_disable(chip);
clk_disable_unprepare(chip->clk);
return 0;
@@ -318,7 +306,7 @@ static int dw_resume_early(struct device *dev)
if (ret)
return ret;
- return dw_dma_enable(chip);
+ return do_dw_dma_enable(chip);
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h
index 646c9c960c07..3fce66ecee7a 100644
--- a/drivers/dma/dw/regs.h
+++ b/drivers/dma/dw/regs.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Driver for the Synopsys DesignWare AHB DMA Controller
*
* Copyright (C) 2005-2007 Atmel Corporation
* Copyright (C) 2010-2011 ST Microelectronics
* Copyright (C) 2016 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/bitops.h>
@@ -222,6 +219,16 @@ enum dw_dma_msize {
/* iDMA 32-bit support */
+/* bursts size */
+enum idma32_msize {
+ IDMA32_MSIZE_1,
+ IDMA32_MSIZE_2,
+ IDMA32_MSIZE_4,
+ IDMA32_MSIZE_8,
+ IDMA32_MSIZE_16,
+ IDMA32_MSIZE_32,
+};
+
/* Bitfields in CTL_HI */
#define IDMA32C_CTLH_BLOCK_TS_MASK GENMASK(16, 0)
#define IDMA32C_CTLH_BLOCK_TS(x) ((x) & IDMA32C_CTLH_BLOCK_TS_MASK)
@@ -312,6 +319,21 @@ struct dw_dma {
u8 all_chan_mask;
u8 in_use;
+ /* Channel operations */
+ void (*initialize_chan)(struct dw_dma_chan *dwc);
+ void (*suspend_chan)(struct dw_dma_chan *dwc, bool drain);
+ void (*resume_chan)(struct dw_dma_chan *dwc, bool drain);
+ u32 (*prepare_ctllo)(struct dw_dma_chan *dwc);
+ void (*encode_maxburst)(struct dw_dma_chan *dwc, u32 *maxburst);
+ u32 (*bytes2block)(struct dw_dma_chan *dwc, size_t bytes,
+ unsigned int width, size_t *len);
+ size_t (*block2bytes)(struct dw_dma_chan *dwc, u32 block, u32 width);
+
+ /* Device operations */
+ void (*set_device_name)(struct dw_dma *dw, int id);
+ void (*disable)(struct dw_dma *dw);
+ void (*enable)(struct dw_dma *dw);
+
/* platform data */
struct dw_dma_platform_data *pdata;
};
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index 8876c4c1bb2c..680b2a00a953 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -6,6 +6,7 @@
#include <linux/dmapool.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/dma-mapping.h>
#include "fsl-edma-common.h"
@@ -173,12 +174,62 @@ int fsl_edma_resume(struct dma_chan *chan)
}
EXPORT_SYMBOL_GPL(fsl_edma_resume);
+static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan)
+{
+ if (fsl_chan->dma_dir != DMA_NONE)
+ dma_unmap_resource(fsl_chan->vchan.chan.device->dev,
+ fsl_chan->dma_dev_addr,
+ fsl_chan->dma_dev_size,
+ fsl_chan->dma_dir, 0);
+ fsl_chan->dma_dir = DMA_NONE;
+}
+
+static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan *fsl_chan,
+ enum dma_transfer_direction dir)
+{
+ struct device *dev = fsl_chan->vchan.chan.device->dev;
+ enum dma_data_direction dma_dir;
+ phys_addr_t addr = 0;
+ u32 size = 0;
+
+ switch (dir) {
+ case DMA_MEM_TO_DEV:
+ dma_dir = DMA_FROM_DEVICE;
+ addr = fsl_chan->cfg.dst_addr;
+ size = fsl_chan->cfg.dst_maxburst;
+ break;
+ case DMA_DEV_TO_MEM:
+ dma_dir = DMA_TO_DEVICE;
+ addr = fsl_chan->cfg.src_addr;
+ size = fsl_chan->cfg.src_maxburst;
+ break;
+ default:
+ dma_dir = DMA_NONE;
+ break;
+ }
+
+ /* Already mapped for this config? */
+ if (fsl_chan->dma_dir == dma_dir)
+ return true;
+
+ fsl_edma_unprep_slave_dma(fsl_chan);
+
+ fsl_chan->dma_dev_addr = dma_map_resource(dev, addr, size, dma_dir, 0);
+ if (dma_mapping_error(dev, fsl_chan->dma_dev_addr))
+ return false;
+ fsl_chan->dma_dev_size = size;
+ fsl_chan->dma_dir = dma_dir;
+
+ return true;
+}
+
int fsl_edma_slave_config(struct dma_chan *chan,
struct dma_slave_config *cfg)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg));
+ fsl_edma_unprep_slave_dma(fsl_chan);
return 0;
}
@@ -339,9 +390,7 @@ static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
struct fsl_edma_desc *fsl_desc;
int i;
- fsl_desc = kzalloc(sizeof(*fsl_desc) +
- sizeof(struct fsl_edma_sw_tcd) *
- sg_len, GFP_NOWAIT);
+ fsl_desc = kzalloc(struct_size(fsl_desc, tcd, sg_len), GFP_NOWAIT);
if (!fsl_desc)
return NULL;
@@ -378,6 +427,9 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
if (!is_slave_direction(direction))
return NULL;
+ if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
+ return NULL;
+
sg_len = buf_len / period_len;
fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
if (!fsl_desc)
@@ -409,11 +461,11 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
if (direction == DMA_MEM_TO_DEV) {
src_addr = dma_buf_next;
- dst_addr = fsl_chan->cfg.dst_addr;
+ dst_addr = fsl_chan->dma_dev_addr;
soff = fsl_chan->cfg.dst_addr_width;
doff = 0;
} else {
- src_addr = fsl_chan->cfg.src_addr;
+ src_addr = fsl_chan->dma_dev_addr;
dst_addr = dma_buf_next;
soff = 0;
doff = fsl_chan->cfg.src_addr_width;
@@ -444,6 +496,9 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
if (!is_slave_direction(direction))
return NULL;
+ if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
+ return NULL;
+
fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
if (!fsl_desc)
return NULL;
@@ -468,11 +523,11 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
if (direction == DMA_MEM_TO_DEV) {
src_addr = sg_dma_address(sg);
- dst_addr = fsl_chan->cfg.dst_addr;
+ dst_addr = fsl_chan->dma_dev_addr;
soff = fsl_chan->cfg.dst_addr_width;
doff = 0;
} else {
- src_addr = fsl_chan->cfg.src_addr;
+ src_addr = fsl_chan->dma_dev_addr;
dst_addr = sg_dma_address(sg);
soff = 0;
doff = fsl_chan->cfg.src_addr_width;
@@ -555,6 +610,7 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan)
fsl_edma_chan_mux(fsl_chan, 0, false);
fsl_chan->edesc = NULL;
vchan_get_all_descriptors(&fsl_chan->vchan, &head);
+ fsl_edma_unprep_slave_dma(fsl_chan);
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index 8917e8865959..b435d8e1e3a1 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -6,6 +6,7 @@
#ifndef _FSL_EDMA_COMMON_H_
#define _FSL_EDMA_COMMON_H_
+#include <linux/dma-direction.h>
#include "virt-dma.h"
#define EDMA_CR_EDBG BIT(1)
@@ -120,6 +121,9 @@ struct fsl_edma_chan {
struct dma_slave_config cfg;
u32 attr;
struct dma_pool *tcd_pool;
+ dma_addr_t dma_dev_addr;
+ u32 dma_dev_size;
+ enum dma_data_direction dma_dir;
};
struct fsl_edma_desc {
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index 34d70112fcc9..75e8a7ba3a22 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -254,6 +254,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_chan->pm_state = RUNNING;
fsl_chan->slave_id = 0;
fsl_chan->idle = true;
+ fsl_chan->dma_dir = DMA_NONE;
fsl_chan->vchan.desc_free = fsl_edma_free_desc;
vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
new file mode 100644
index 000000000000..aa1d0ae3d207
--- /dev/null
+++ b/drivers/dma/fsl-qdma.c
@@ -0,0 +1,1259 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright 2014-2015 Freescale
+// Copyright 2018 NXP
+
+/*
+ * Driver for NXP Layerscape Queue Direct Memory Access Controller
+ *
+ * Author:
+ * Wen He <wen.he_1@nxp.com>
+ * Jiaheng Fan <jiaheng.fan@nxp.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/of_dma.h>
+#include <linux/dma-mapping.h>
+
+#include "virt-dma.h"
+#include "fsldma.h"
+
+/* Register related definition */
+#define FSL_QDMA_DMR 0x0
+#define FSL_QDMA_DSR 0x4
+#define FSL_QDMA_DEIER 0xe00
+#define FSL_QDMA_DEDR 0xe04
+#define FSL_QDMA_DECFDW0R 0xe10
+#define FSL_QDMA_DECFDW1R 0xe14
+#define FSL_QDMA_DECFDW2R 0xe18
+#define FSL_QDMA_DECFDW3R 0xe1c
+#define FSL_QDMA_DECFQIDR 0xe30
+#define FSL_QDMA_DECBR 0xe34
+
+#define FSL_QDMA_BCQMR(x) (0xc0 + 0x100 * (x))
+#define FSL_QDMA_BCQSR(x) (0xc4 + 0x100 * (x))
+#define FSL_QDMA_BCQEDPA_SADDR(x) (0xc8 + 0x100 * (x))
+#define FSL_QDMA_BCQDPA_SADDR(x) (0xcc + 0x100 * (x))
+#define FSL_QDMA_BCQEEPA_SADDR(x) (0xd0 + 0x100 * (x))
+#define FSL_QDMA_BCQEPA_SADDR(x) (0xd4 + 0x100 * (x))
+#define FSL_QDMA_BCQIER(x) (0xe0 + 0x100 * (x))
+#define FSL_QDMA_BCQIDR(x) (0xe4 + 0x100 * (x))
+
+#define FSL_QDMA_SQDPAR 0x80c
+#define FSL_QDMA_SQEPAR 0x814
+#define FSL_QDMA_BSQMR 0x800
+#define FSL_QDMA_BSQSR 0x804
+#define FSL_QDMA_BSQICR 0x828
+#define FSL_QDMA_CQMR 0xa00
+#define FSL_QDMA_CQDSCR1 0xa08
+#define FSL_QDMA_CQDSCR2 0xa0c
+#define FSL_QDMA_CQIER 0xa10
+#define FSL_QDMA_CQEDR 0xa14
+#define FSL_QDMA_SQCCMR 0xa20
+
+/* Registers for bit and genmask */
+#define FSL_QDMA_CQIDR_SQT BIT(15)
+#define QDMA_CCDF_FOTMAT BIT(29)
+#define QDMA_CCDF_SER BIT(30)
+#define QDMA_SG_FIN BIT(30)
+#define QDMA_SG_LEN_MASK GENMASK(29, 0)
+#define QDMA_CCDF_MASK GENMASK(28, 20)
+
+#define FSL_QDMA_DEDR_CLEAR GENMASK(31, 0)
+#define FSL_QDMA_BCQIDR_CLEAR GENMASK(31, 0)
+#define FSL_QDMA_DEIER_CLEAR GENMASK(31, 0)
+
+#define FSL_QDMA_BCQIER_CQTIE BIT(15)
+#define FSL_QDMA_BCQIER_CQPEIE BIT(23)
+#define FSL_QDMA_BSQICR_ICEN BIT(31)
+
+#define FSL_QDMA_BSQICR_ICST(x) ((x) << 16)
+#define FSL_QDMA_CQIER_MEIE BIT(31)
+#define FSL_QDMA_CQIER_TEIE BIT(0)
+#define FSL_QDMA_SQCCMR_ENTER_WM BIT(21)
+
+#define FSL_QDMA_BCQMR_EN BIT(31)
+#define FSL_QDMA_BCQMR_EI BIT(30)
+#define FSL_QDMA_BCQMR_CD_THLD(x) ((x) << 20)
+#define FSL_QDMA_BCQMR_CQ_SIZE(x) ((x) << 16)
+
+#define FSL_QDMA_BCQSR_QF BIT(16)
+#define FSL_QDMA_BCQSR_XOFF BIT(0)
+
+#define FSL_QDMA_BSQMR_EN BIT(31)
+#define FSL_QDMA_BSQMR_DI BIT(30)
+#define FSL_QDMA_BSQMR_CQ_SIZE(x) ((x) << 16)
+
+#define FSL_QDMA_BSQSR_QE BIT(17)
+
+#define FSL_QDMA_DMR_DQD BIT(30)
+#define FSL_QDMA_DSR_DB BIT(31)
+
+/* Size related definition */
+#define FSL_QDMA_QUEUE_MAX 8
+#define FSL_QDMA_COMMAND_BUFFER_SIZE 64
+#define FSL_QDMA_DESCRIPTOR_BUFFER_SIZE 32
+#define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN 64
+#define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX 16384
+#define FSL_QDMA_QUEUE_NUM_MAX 8
+
+/* Field definition for CMD */
+#define FSL_QDMA_CMD_RWTTYPE 0x4
+#define FSL_QDMA_CMD_LWC 0x2
+#define FSL_QDMA_CMD_RWTTYPE_OFFSET 28
+#define FSL_QDMA_CMD_NS_OFFSET 27
+#define FSL_QDMA_CMD_DQOS_OFFSET 24
+#define FSL_QDMA_CMD_WTHROTL_OFFSET 20
+#define FSL_QDMA_CMD_DSEN_OFFSET 19
+#define FSL_QDMA_CMD_LWC_OFFSET 16
+
+/* Field definition for Descriptor offset */
+#define QDMA_CCDF_STATUS 20
+#define QDMA_CCDF_OFFSET 20
+
+/* Field definition for safe loop count*/
+#define FSL_QDMA_HALT_COUNT 1500
+#define FSL_QDMA_MAX_SIZE 16385
+#define FSL_QDMA_COMP_TIMEOUT 1000
+#define FSL_COMMAND_QUEUE_OVERFLLOW 10
+
+#define FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma_engine, x) \
+ (((fsl_qdma_engine)->block_offset) * (x))
+
+/**
+ * struct fsl_qdma_format - This is the struct holding describing compound
+ * descriptor format with qDMA.
+ * @status: Command status and enqueue status notification.
+ * @cfg: Frame offset and frame format.
+ * @addr_lo: Holding the compound descriptor of the lower
+ * 32-bits address in memory 40-bit address.
+ * @addr_hi: Same as above member, but point high 8-bits in
+ * memory 40-bit address.
+ * @__reserved1: Reserved field.
+ * @cfg8b_w1: Compound descriptor command queue origin produced
+ * by qDMA and dynamic debug field.
+ * @data Pointer to the memory 40-bit address, describes DMA
+ * source information and DMA destination information.
+ */
+struct fsl_qdma_format {
+ __le32 status;
+ __le32 cfg;
+ union {
+ struct {
+ __le32 addr_lo;
+ u8 addr_hi;
+ u8 __reserved1[2];
+ u8 cfg8b_w1;
+ } __packed;
+ __le64 data;
+ };
+} __packed;
+
+/* qDMA status notification pre information */
+struct fsl_pre_status {
+ u64 addr;
+ u8 queue;
+};
+
+static DEFINE_PER_CPU(struct fsl_pre_status, pre);
+
+struct fsl_qdma_chan {
+ struct virt_dma_chan vchan;
+ struct virt_dma_desc vdesc;
+ enum dma_status status;
+ struct fsl_qdma_engine *qdma;
+ struct fsl_qdma_queue *queue;
+};
+
+struct fsl_qdma_queue {
+ struct fsl_qdma_format *virt_head;
+ struct fsl_qdma_format *virt_tail;
+ struct list_head comp_used;
+ struct list_head comp_free;
+ struct dma_pool *comp_pool;
+ struct dma_pool *desc_pool;
+ spinlock_t queue_lock;
+ dma_addr_t bus_addr;
+ u32 n_cq;
+ u32 id;
+ struct fsl_qdma_format *cq;
+ void __iomem *block_base;
+};
+
+struct fsl_qdma_comp {
+ dma_addr_t bus_addr;
+ dma_addr_t desc_bus_addr;
+ struct fsl_qdma_format *virt_addr;
+ struct fsl_qdma_format *desc_virt_addr;
+ struct fsl_qdma_chan *qchan;
+ struct virt_dma_desc vdesc;
+ struct list_head list;
+};
+
+struct fsl_qdma_engine {
+ struct dma_device dma_dev;
+ void __iomem *ctrl_base;
+ void __iomem *status_base;
+ void __iomem *block_base;
+ u32 n_chans;
+ u32 n_queues;
+ struct mutex fsl_qdma_mutex;
+ int error_irq;
+ int *queue_irq;
+ u32 feature;
+ struct fsl_qdma_queue *queue;
+ struct fsl_qdma_queue **status;
+ struct fsl_qdma_chan *chans;
+ int block_number;
+ int block_offset;
+ int irq_base;
+ int desc_allocated;
+
+};
+
+static inline u64
+qdma_ccdf_addr_get64(const struct fsl_qdma_format *ccdf)
+{
+ return le64_to_cpu(ccdf->data) & (U64_MAX >> 24);
+}
+
+static inline void
+qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
+{
+ ccdf->addr_hi = upper_32_bits(addr);
+ ccdf->addr_lo = cpu_to_le32(lower_32_bits(addr));
+}
+
+static inline u8
+qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf)
+{
+ return ccdf->cfg8b_w1 & U8_MAX;
+}
+
+static inline int
+qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf)
+{
+ return (le32_to_cpu(ccdf->cfg) & QDMA_CCDF_MASK) >> QDMA_CCDF_OFFSET;
+}
+
+static inline void
+qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
+{
+ ccdf->cfg = cpu_to_le32(QDMA_CCDF_FOTMAT | offset);
+}
+
+static inline int
+qdma_ccdf_get_status(const struct fsl_qdma_format *ccdf)
+{
+ return (le32_to_cpu(ccdf->status) & QDMA_CCDF_MASK) >> QDMA_CCDF_STATUS;
+}
+
+static inline void
+qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status)
+{
+ ccdf->status = cpu_to_le32(QDMA_CCDF_SER | status);
+}
+
+static inline void qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
+{
+ csgf->cfg = cpu_to_le32(len & QDMA_SG_LEN_MASK);
+}
+
+static inline void qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
+{
+ csgf->cfg = cpu_to_le32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
+}
+
+static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr)
+{
+ return FSL_DMA_IN(qdma, addr, 32);
+}
+
+static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val,
+ void __iomem *addr)
+{
+ FSL_DMA_OUT(qdma, addr, val, 32);
+}
+
+static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct fsl_qdma_chan, vchan.chan);
+}
+
+static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct fsl_qdma_comp, vdesc);
+}
+
+static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
+{
+ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
+ struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+ struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
+ struct fsl_qdma_comp *comp_temp, *_comp_temp;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ vchan_get_all_descriptors(&fsl_chan->vchan, &head);
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+
+ vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+
+ if (!fsl_queue->comp_pool && !fsl_queue->comp_pool)
+ return;
+
+ list_for_each_entry_safe(comp_temp, _comp_temp,
+ &fsl_queue->comp_used, list) {
+ dma_pool_free(fsl_queue->comp_pool,
+ comp_temp->virt_addr,
+ comp_temp->bus_addr);
+ dma_pool_free(fsl_queue->desc_pool,
+ comp_temp->desc_virt_addr,
+ comp_temp->desc_bus_addr);
+ list_del(&comp_temp->list);
+ kfree(comp_temp);
+ }
+
+ list_for_each_entry_safe(comp_temp, _comp_temp,
+ &fsl_queue->comp_free, list) {
+ dma_pool_free(fsl_queue->comp_pool,
+ comp_temp->virt_addr,
+ comp_temp->bus_addr);
+ dma_pool_free(fsl_queue->desc_pool,
+ comp_temp->desc_virt_addr,
+ comp_temp->desc_bus_addr);
+ list_del(&comp_temp->list);
+ kfree(comp_temp);
+ }
+
+ dma_pool_destroy(fsl_queue->comp_pool);
+ dma_pool_destroy(fsl_queue->desc_pool);
+
+ fsl_qdma->desc_allocated--;
+ fsl_queue->comp_pool = NULL;
+ fsl_queue->desc_pool = NULL;
+}
+
+static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
+ dma_addr_t dst, dma_addr_t src, u32 len)
+{
+ struct fsl_qdma_format *sdf, *ddf;
+ struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
+
+ ccdf = fsl_comp->virt_addr;
+ csgf_desc = fsl_comp->virt_addr + 1;
+ csgf_src = fsl_comp->virt_addr + 2;
+ csgf_dest = fsl_comp->virt_addr + 3;
+ sdf = fsl_comp->desc_virt_addr;
+ ddf = fsl_comp->desc_virt_addr + 1;
+
+ memset(fsl_comp->virt_addr, 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
+ memset(fsl_comp->desc_virt_addr, 0, FSL_QDMA_DESCRIPTOR_BUFFER_SIZE);
+ /* Head Command Descriptor(Frame Descriptor) */
+ qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
+ qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf));
+ qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf));
+ /* Status notification is enqueued to status queue. */
+ /* Compound Command Descriptor(Frame List Table) */
+ qdma_desc_addr_set64(csgf_desc, fsl_comp->desc_bus_addr);
+ /* It must be 32 as Compound S/G Descriptor */
+ qdma_csgf_set_len(csgf_desc, 32);
+ qdma_desc_addr_set64(csgf_src, src);
+ qdma_csgf_set_len(csgf_src, len);
+ qdma_desc_addr_set64(csgf_dest, dst);
+ qdma_csgf_set_len(csgf_dest, len);
+ /* This entry is the last entry. */
+ qdma_csgf_set_f(csgf_dest, len);
+ /* Descriptor Buffer */
+ sdf->data =
+ cpu_to_le64(FSL_QDMA_CMD_RWTTYPE <<
+ FSL_QDMA_CMD_RWTTYPE_OFFSET);
+ ddf->data =
+ cpu_to_le64(FSL_QDMA_CMD_RWTTYPE <<
+ FSL_QDMA_CMD_RWTTYPE_OFFSET);
+ ddf->data |=
+ cpu_to_le64(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
+}
+
+/*
+ * Pre-request full command descriptor for enqueue.
+ */
+static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue *queue)
+{
+ int i;
+ struct fsl_qdma_comp *comp_temp, *_comp_temp;
+
+ for (i = 0; i < queue->n_cq + FSL_COMMAND_QUEUE_OVERFLLOW; i++) {
+ comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
+ if (!comp_temp)
+ goto err_alloc;
+ comp_temp->virt_addr =
+ dma_pool_alloc(queue->comp_pool, GFP_KERNEL,
+ &comp_temp->bus_addr);
+ if (!comp_temp->virt_addr)
+ goto err_dma_alloc;
+
+ comp_temp->desc_virt_addr =
+ dma_pool_alloc(queue->desc_pool, GFP_KERNEL,
+ &comp_temp->desc_bus_addr);
+ if (!comp_temp->desc_virt_addr)
+ goto err_desc_dma_alloc;
+
+ list_add_tail(&comp_temp->list, &queue->comp_free);
+ }
+
+ return 0;
+
+err_desc_dma_alloc:
+ dma_pool_free(queue->comp_pool, comp_temp->virt_addr,
+ comp_temp->bus_addr);
+
+err_dma_alloc:
+ kfree(comp_temp);
+
+err_alloc:
+ list_for_each_entry_safe(comp_temp, _comp_temp,
+ &queue->comp_free, list) {
+ if (comp_temp->virt_addr)
+ dma_pool_free(queue->comp_pool,
+ comp_temp->virt_addr,
+ comp_temp->bus_addr);
+ if (comp_temp->desc_virt_addr)
+ dma_pool_free(queue->desc_pool,
+ comp_temp->desc_virt_addr,
+ comp_temp->desc_bus_addr);
+
+ list_del(&comp_temp->list);
+ kfree(comp_temp);
+ }
+
+ return -ENOMEM;
+}
+
+/*
+ * Request a command descriptor for enqueue.
+ */
+static struct fsl_qdma_comp
+*fsl_qdma_request_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
+{
+ unsigned long flags;
+ struct fsl_qdma_comp *comp_temp;
+ int timeout = FSL_QDMA_COMP_TIMEOUT;
+ struct fsl_qdma_queue *queue = fsl_chan->queue;
+
+ while (timeout--) {
+ spin_lock_irqsave(&queue->queue_lock, flags);
+ if (!list_empty(&queue->comp_free)) {
+ comp_temp = list_first_entry(&queue->comp_free,
+ struct fsl_qdma_comp,
+ list);
+ list_del(&comp_temp->list);
+
+ spin_unlock_irqrestore(&queue->queue_lock, flags);
+ comp_temp->qchan = fsl_chan;
+ return comp_temp;
+ }
+ spin_unlock_irqrestore(&queue->queue_lock, flags);
+ udelay(1);
+ }
+
+ return NULL;
+}
+
+static struct fsl_qdma_queue
+*fsl_qdma_alloc_queue_resources(struct platform_device *pdev,
+ struct fsl_qdma_engine *fsl_qdma)
+{
+ int ret, len, i, j;
+ int queue_num, block_number;
+ unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
+ struct fsl_qdma_queue *queue_head, *queue_temp;
+
+ queue_num = fsl_qdma->n_queues;
+ block_number = fsl_qdma->block_number;
+
+ if (queue_num > FSL_QDMA_QUEUE_MAX)
+ queue_num = FSL_QDMA_QUEUE_MAX;
+ len = sizeof(*queue_head) * queue_num * block_number;
+ queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+ if (!queue_head)
+ return NULL;
+
+ ret = device_property_read_u32_array(&pdev->dev, "queue-sizes",
+ queue_size, queue_num);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get queue-sizes.\n");
+ return NULL;
+ }
+ for (j = 0; j < block_number; j++) {
+ for (i = 0; i < queue_num; i++) {
+ if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
+ queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
+ dev_err(&pdev->dev,
+ "Get wrong queue-sizes.\n");
+ return NULL;
+ }
+ queue_temp = queue_head + i + (j * queue_num);
+
+ queue_temp->cq =
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct fsl_qdma_format) *
+ queue_size[i],
+ &queue_temp->bus_addr,
+ GFP_KERNEL);
+ if (!queue_temp->cq)
+ return NULL;
+ queue_temp->block_base = fsl_qdma->block_base +
+ FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
+ queue_temp->n_cq = queue_size[i];
+ queue_temp->id = i;
+ queue_temp->virt_head = queue_temp->cq;
+ queue_temp->virt_tail = queue_temp->cq;
+ /*
+ * List for queue command buffer
+ */
+ INIT_LIST_HEAD(&queue_temp->comp_used);
+ spin_lock_init(&queue_temp->queue_lock);
+ }
+ }
+ return queue_head;
+}
+
+static struct fsl_qdma_queue
+*fsl_qdma_prep_status_queue(struct platform_device *pdev)
+{
+ int ret;
+ unsigned int status_size;
+ struct fsl_qdma_queue *status_head;
+ struct device_node *np = pdev->dev.of_node;
+
+ ret = of_property_read_u32(np, "status-sizes", &status_size);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get status-sizes.\n");
+ return NULL;
+ }
+ if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
+ status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
+ dev_err(&pdev->dev, "Get wrong status_size.\n");
+ return NULL;
+ }
+ status_head = devm_kzalloc(&pdev->dev,
+ sizeof(*status_head), GFP_KERNEL);
+ if (!status_head)
+ return NULL;
+
+ /*
+ * Buffer for queue command
+ */
+ status_head->cq = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct fsl_qdma_format) *
+ status_size,
+ &status_head->bus_addr,
+ GFP_KERNEL);
+ if (!status_head->cq) {
+ devm_kfree(&pdev->dev, status_head);
+ return NULL;
+ }
+ status_head->n_cq = status_size;
+ status_head->virt_head = status_head->cq;
+ status_head->virt_tail = status_head->cq;
+ status_head->comp_pool = NULL;
+
+ return status_head;
+}
+
+static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
+{
+ u32 reg;
+ int i, j, count = FSL_QDMA_HALT_COUNT;
+ void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
+
+ /* Disable the command queue and wait for idle state. */
+ reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
+ reg |= FSL_QDMA_DMR_DQD;
+ qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
+ for (j = 0; j < fsl_qdma->block_number; j++) {
+ block = fsl_qdma->block_base +
+ FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
+ for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
+ qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i));
+ }
+ while (1) {
+ reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR);
+ if (!(reg & FSL_QDMA_DSR_DB))
+ break;
+ if (count-- < 0)
+ return -EBUSY;
+ udelay(100);
+ }
+
+ for (j = 0; j < fsl_qdma->block_number; j++) {
+ block = fsl_qdma->block_base +
+ FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
+
+ /* Disable status queue. */
+ qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR);
+
+ /*
+ * clear the command queue interrupt detect register for
+ * all queues.
+ */
+ qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
+ block + FSL_QDMA_BCQIDR(0));
+ }
+
+ return 0;
+}
+
+static int
+fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
+ void *block,
+ int id)
+{
+ bool duplicate;
+ u32 reg, i, count;
+ struct fsl_qdma_queue *temp_queue;
+ struct fsl_qdma_format *status_addr;
+ struct fsl_qdma_comp *fsl_comp = NULL;
+ struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
+ struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id];
+
+ count = FSL_QDMA_MAX_SIZE;
+
+ while (count--) {
+ duplicate = 0;
+ reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
+ if (reg & FSL_QDMA_BSQSR_QE)
+ return 0;
+
+ status_addr = fsl_status->virt_head;
+
+ if (qdma_ccdf_get_queue(status_addr) ==
+ __this_cpu_read(pre.queue) &&
+ qdma_ccdf_addr_get64(status_addr) ==
+ __this_cpu_read(pre.addr))
+ duplicate = 1;
+ i = qdma_ccdf_get_queue(status_addr) +
+ id * fsl_qdma->n_queues;
+ __this_cpu_write(pre.addr, qdma_ccdf_addr_get64(status_addr));
+ __this_cpu_write(pre.queue, qdma_ccdf_get_queue(status_addr));
+ temp_queue = fsl_queue + i;
+
+ spin_lock(&temp_queue->queue_lock);
+ if (list_empty(&temp_queue->comp_used)) {
+ if (!duplicate) {
+ spin_unlock(&temp_queue->queue_lock);
+ return -EAGAIN;
+ }
+ } else {
+ fsl_comp = list_first_entry(&temp_queue->comp_used,
+ struct fsl_qdma_comp, list);
+ if (fsl_comp->bus_addr + 16 !=
+ __this_cpu_read(pre.addr)) {
+ if (!duplicate) {
+ spin_unlock(&temp_queue->queue_lock);
+ return -EAGAIN;
+ }
+ }
+ }
+
+ if (duplicate) {
+ reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
+ reg |= FSL_QDMA_BSQMR_DI;
+ qdma_desc_addr_set64(status_addr, 0x0);
+ fsl_status->virt_head++;
+ if (fsl_status->virt_head == fsl_status->cq
+ + fsl_status->n_cq)
+ fsl_status->virt_head = fsl_status->cq;
+ qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
+ spin_unlock(&temp_queue->queue_lock);
+ continue;
+ }
+ list_del(&fsl_comp->list);
+
+ reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
+ reg |= FSL_QDMA_BSQMR_DI;
+ qdma_desc_addr_set64(status_addr, 0x0);
+ fsl_status->virt_head++;
+ if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
+ fsl_status->virt_head = fsl_status->cq;
+ qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
+ spin_unlock(&temp_queue->queue_lock);
+
+ spin_lock(&fsl_comp->qchan->vchan.lock);
+ vchan_cookie_complete(&fsl_comp->vdesc);
+ fsl_comp->qchan->status = DMA_COMPLETE;
+ spin_unlock(&fsl_comp->qchan->vchan.lock);
+ }
+
+ return 0;
+}
+
+static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
+{
+ unsigned int intr;
+ struct fsl_qdma_engine *fsl_qdma = dev_id;
+ void __iomem *status = fsl_qdma->status_base;
+
+ intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
+
+ if (intr) {
+ dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n");
+ return IRQ_NONE;
+ }
+
+ qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id)
+{
+ int id;
+ unsigned int intr, reg;
+ struct fsl_qdma_engine *fsl_qdma = dev_id;
+ void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
+
+ id = irq - fsl_qdma->irq_base;
+ if (id < 0 && id > fsl_qdma->block_number) {
+ dev_err(fsl_qdma->dma_dev.dev,
+ "irq %d is wrong irq_base is %d\n",
+ irq, fsl_qdma->irq_base);
+ }
+
+ block = fsl_qdma->block_base +
+ FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
+
+ intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0));
+
+ if ((intr & FSL_QDMA_CQIDR_SQT) != 0)
+ intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id);
+
+ if (intr != 0) {
+ reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
+ reg |= FSL_QDMA_DMR_DQD;
+ qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
+ qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0));
+ dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n");
+ }
+
+ /* Clear all detected events and interrupts. */
+ qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
+ block + FSL_QDMA_BCQIDR(0));
+
+ return IRQ_HANDLED;
+}
+
+static int
+fsl_qdma_irq_init(struct platform_device *pdev,
+ struct fsl_qdma_engine *fsl_qdma)
+{
+ int i;
+ int cpu;
+ int ret;
+ char irq_name[20];
+
+ fsl_qdma->error_irq =
+ platform_get_irq_byname(pdev, "qdma-error");
+ if (fsl_qdma->error_irq < 0) {
+ dev_err(&pdev->dev, "Can't get qdma controller irq.\n");
+ return fsl_qdma->error_irq;
+ }
+
+ ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,
+ fsl_qdma_error_handler, 0,
+ "qDMA error", fsl_qdma);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n");
+ return ret;
+ }
+
+ for (i = 0; i < fsl_qdma->block_number; i++) {
+ sprintf(irq_name, "qdma-queue%d", i);
+ fsl_qdma->queue_irq[i] =
+ platform_get_irq_byname(pdev, irq_name);
+
+ if (fsl_qdma->queue_irq[i] < 0) {
+ dev_err(&pdev->dev,
+ "Can't get qdma queue %d irq.\n", i);
+ return fsl_qdma->queue_irq[i];
+ }
+
+ ret = devm_request_irq(&pdev->dev,
+ fsl_qdma->queue_irq[i],
+ fsl_qdma_queue_handler,
+ 0,
+ "qDMA queue",
+ fsl_qdma);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Can't register qDMA queue IRQ.\n");
+ return ret;
+ }
+
+ cpu = i % num_online_cpus();
+ ret = irq_set_affinity_hint(fsl_qdma->queue_irq[i],
+ get_cpu_mask(cpu));
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Can't set cpu %d affinity to IRQ %d.\n",
+ cpu,
+ fsl_qdma->queue_irq[i]);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void fsl_qdma_irq_exit(struct platform_device *pdev,
+ struct fsl_qdma_engine *fsl_qdma)
+{
+ int i;
+
+ devm_free_irq(&pdev->dev, fsl_qdma->error_irq, fsl_qdma);
+ for (i = 0; i < fsl_qdma->block_number; i++)
+ devm_free_irq(&pdev->dev, fsl_qdma->queue_irq[i], fsl_qdma);
+}
+
+static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
+{
+ u32 reg;
+ int i, j, ret;
+ struct fsl_qdma_queue *temp;
+ void __iomem *status = fsl_qdma->status_base;
+ void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
+ struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
+
+ /* Try to halt the qDMA engine first. */
+ ret = fsl_qdma_halt(fsl_qdma);
+ if (ret) {
+ dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!");
+ return ret;
+ }
+
+ for (i = 0; i < fsl_qdma->block_number; i++) {
+ /*
+ * Clear the command queue interrupt detect register for
+ * all queues.
+ */
+
+ block = fsl_qdma->block_base +
+ FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, i);
+ qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
+ block + FSL_QDMA_BCQIDR(0));
+ }
+
+ for (j = 0; j < fsl_qdma->block_number; j++) {
+ block = fsl_qdma->block_base +
+ FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
+ for (i = 0; i < fsl_qdma->n_queues; i++) {
+ temp = fsl_queue + i + (j * fsl_qdma->n_queues);
+ /*
+ * Initialize Command Queue registers to
+ * point to the first
+ * command descriptor in memory.
+ * Dequeue Pointer Address Registers
+ * Enqueue Pointer Address Registers
+ */
+
+ qdma_writel(fsl_qdma, temp->bus_addr,
+ block + FSL_QDMA_BCQDPA_SADDR(i));
+ qdma_writel(fsl_qdma, temp->bus_addr,
+ block + FSL_QDMA_BCQEPA_SADDR(i));
+
+ /* Initialize the queue mode. */
+ reg = FSL_QDMA_BCQMR_EN;
+ reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
+ reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
+ qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i));
+ }
+
+ /*
+ * Workaround for erratum: ERR010812.
+ * We must enable XOFF to avoid the enqueue rejection occurs.
+ * Setting SQCCMR ENTER_WM to 0x20.
+ */
+
+ qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM,
+ block + FSL_QDMA_SQCCMR);
+
+ /*
+ * Initialize status queue registers to point to the first
+ * command descriptor in memory.
+ * Dequeue Pointer Address Registers
+ * Enqueue Pointer Address Registers
+ */
+
+ qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
+ block + FSL_QDMA_SQEPAR);
+ qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
+ block + FSL_QDMA_SQDPAR);
+ /* Initialize status queue interrupt. */
+ qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE,
+ block + FSL_QDMA_BCQIER(0));
+ qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN |
+ FSL_QDMA_BSQICR_ICST(5) | 0x8000,
+ block + FSL_QDMA_BSQICR);
+ qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE |
+ FSL_QDMA_CQIER_TEIE,
+ block + FSL_QDMA_CQIER);
+
+ /* Initialize the status queue mode. */
+ reg = FSL_QDMA_BSQMR_EN;
+ reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2
+ (fsl_qdma->status[j]->n_cq) - 6);
+
+ qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
+ reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
+ }
+
+ /* Initialize controller interrupt register. */
+ qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR);
+ qdma_writel(fsl_qdma, FSL_QDMA_DEIER_CLEAR, status + FSL_QDMA_DEIER);
+
+ reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
+ reg &= ~FSL_QDMA_DMR_DQD;
+ qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
+
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *
+fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
+ dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct fsl_qdma_comp *fsl_comp;
+ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
+
+ fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan);
+
+ if (!fsl_comp)
+ return NULL;
+
+ fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
+
+ return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
+}
+
+static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
+{
+ u32 reg;
+ struct virt_dma_desc *vdesc;
+ struct fsl_qdma_comp *fsl_comp;
+ struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+ void __iomem *block = fsl_queue->block_base;
+
+ reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id));
+ if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF))
+ return;
+ vdesc = vchan_next_desc(&fsl_chan->vchan);
+ if (!vdesc)
+ return;
+ list_del(&vdesc->node);
+ fsl_comp = to_fsl_qdma_comp(vdesc);
+
+ memcpy(fsl_queue->virt_head++,
+ fsl_comp->virt_addr, sizeof(struct fsl_qdma_format));
+ if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
+ fsl_queue->virt_head = fsl_queue->cq;
+
+ list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
+ barrier();
+ reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id));
+ reg |= FSL_QDMA_BCQMR_EI;
+ qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
+ fsl_chan->status = DMA_IN_PROGRESS;
+}
+
+static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc)
+{
+ unsigned long flags;
+ struct fsl_qdma_comp *fsl_comp;
+ struct fsl_qdma_queue *fsl_queue;
+
+ fsl_comp = to_fsl_qdma_comp(vdesc);
+ fsl_queue = fsl_comp->qchan->queue;
+
+ spin_lock_irqsave(&fsl_queue->queue_lock, flags);
+ list_add_tail(&fsl_comp->list, &fsl_queue->comp_free);
+ spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
+}
+
+static void fsl_qdma_issue_pending(struct dma_chan *chan)
+{
+ unsigned long flags;
+ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
+ struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+
+ spin_lock_irqsave(&fsl_queue->queue_lock, flags);
+ spin_lock(&fsl_chan->vchan.lock);
+ if (vchan_issue_pending(&fsl_chan->vchan))
+ fsl_qdma_enqueue_desc(fsl_chan);
+ spin_unlock(&fsl_chan->vchan.lock);
+ spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
+}
+
+static void fsl_qdma_synchronize(struct dma_chan *chan)
+{
+ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
+
+ vchan_synchronize(&fsl_chan->vchan);
+}
+
+static int fsl_qdma_terminate_all(struct dma_chan *chan)
+{
+ LIST_HEAD(head);
+ unsigned long flags;
+ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ vchan_get_all_descriptors(&fsl_chan->vchan, &head);
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+ vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+ return 0;
+}
+
+static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan)
+{
+ int ret;
+ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
+ struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
+ struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+
+ if (fsl_queue->comp_pool && fsl_queue->desc_pool)
+ return fsl_qdma->desc_allocated;
+
+ INIT_LIST_HEAD(&fsl_queue->comp_free);
+
+ /*
+ * The dma pool for queue command buffer
+ */
+ fsl_queue->comp_pool =
+ dma_pool_create("comp_pool",
+ chan->device->dev,
+ FSL_QDMA_COMMAND_BUFFER_SIZE,
+ 64, 0);
+ if (!fsl_queue->comp_pool)
+ return -ENOMEM;
+
+ /*
+ * The dma pool for Descriptor(SD/DD) buffer
+ */
+ fsl_queue->desc_pool =
+ dma_pool_create("desc_pool",
+ chan->device->dev,
+ FSL_QDMA_DESCRIPTOR_BUFFER_SIZE,
+ 32, 0);
+ if (!fsl_queue->desc_pool)
+ goto err_desc_pool;
+
+ ret = fsl_qdma_pre_request_enqueue_desc(fsl_queue);
+ if (ret) {
+ dev_err(chan->device->dev,
+ "failed to alloc dma buffer for S/G descriptor\n");
+ goto err_mem;
+ }
+
+ fsl_qdma->desc_allocated++;
+ return fsl_qdma->desc_allocated;
+
+err_mem:
+ dma_pool_destroy(fsl_queue->desc_pool);
+err_desc_pool:
+ dma_pool_destroy(fsl_queue->comp_pool);
+ return -ENOMEM;
+}
+
+static int fsl_qdma_probe(struct platform_device *pdev)
+{
+ int ret, i;
+ int blk_num, blk_off;
+ u32 len, chans, queues;
+ struct resource *res;
+ struct fsl_qdma_chan *fsl_chan;
+ struct fsl_qdma_engine *fsl_qdma;
+ struct device_node *np = pdev->dev.of_node;
+
+ ret = of_property_read_u32(np, "dma-channels", &chans);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get dma-channels.\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "block-offset", &blk_off);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get block-offset.\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "block-number", &blk_num);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get block-number.\n");
+ return ret;
+ }
+
+ blk_num = min_t(int, blk_num, num_online_cpus());
+
+ len = sizeof(*fsl_qdma);
+ fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+ if (!fsl_qdma)
+ return -ENOMEM;
+
+ len = sizeof(*fsl_chan) * chans;
+ fsl_qdma->chans = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+ if (!fsl_qdma->chans)
+ return -ENOMEM;
+
+ len = sizeof(struct fsl_qdma_queue *) * blk_num;
+ fsl_qdma->status = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+ if (!fsl_qdma->status)
+ return -ENOMEM;
+
+ len = sizeof(int) * blk_num;
+ fsl_qdma->queue_irq = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+ if (!fsl_qdma->queue_irq)
+ return -ENOMEM;
+
+ ret = of_property_read_u32(np, "fsl,dma-queues", &queues);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get queues.\n");
+ return ret;
+ }
+
+ fsl_qdma->desc_allocated = 0;
+ fsl_qdma->n_chans = chans;
+ fsl_qdma->n_queues = queues;
+ fsl_qdma->block_number = blk_num;
+ fsl_qdma->block_offset = blk_off;
+
+ mutex_init(&fsl_qdma->fsl_qdma_mutex);
+
+ for (i = 0; i < fsl_qdma->block_number; i++) {
+ fsl_qdma->status[i] = fsl_qdma_prep_status_queue(pdev);
+ if (!fsl_qdma->status[i])
+ return -ENOMEM;
+ }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fsl_qdma->ctrl_base))
+ return PTR_ERR(fsl_qdma->ctrl_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fsl_qdma->status_base))
+ return PTR_ERR(fsl_qdma->status_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fsl_qdma->block_base))
+ return PTR_ERR(fsl_qdma->block_base);
+ fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, fsl_qdma);
+ if (!fsl_qdma->queue)
+ return -ENOMEM;
+
+ ret = fsl_qdma_irq_init(pdev, fsl_qdma);
+ if (ret)
+ return ret;
+
+ fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
+ fsl_qdma->feature = of_property_read_bool(np, "big-endian");
+ INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
+
+ for (i = 0; i < fsl_qdma->n_chans; i++) {
+ struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
+
+ fsl_chan->qdma = fsl_qdma;
+ fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues *
+ fsl_qdma->block_number);
+ fsl_chan->vchan.desc_free = fsl_qdma_free_desc;
+ vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);
+ }
+
+ dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
+
+ fsl_qdma->dma_dev.dev = &pdev->dev;
+ fsl_qdma->dma_dev.device_free_chan_resources =
+ fsl_qdma_free_chan_resources;
+ fsl_qdma->dma_dev.device_alloc_chan_resources =
+ fsl_qdma_alloc_chan_resources;
+ fsl_qdma->dma_dev.device_tx_status = dma_cookie_status;
+ fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
+ fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;
+ fsl_qdma->dma_dev.device_synchronize = fsl_qdma_synchronize;
+ fsl_qdma->dma_dev.device_terminate_all = fsl_qdma_terminate_all;
+
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
+
+ platform_set_drvdata(pdev, fsl_qdma);
+
+ ret = dma_async_device_register(&fsl_qdma->dma_dev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Can't register NXP Layerscape qDMA engine.\n");
+ return ret;
+ }
+
+ ret = fsl_qdma_reg_init(fsl_qdma);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
+{
+ struct fsl_qdma_chan *chan, *_chan;
+
+ list_for_each_entry_safe(chan, _chan,
+ &dmadev->channels, vchan.chan.device_node) {
+ list_del(&chan->vchan.chan.device_node);
+ tasklet_kill(&chan->vchan.task);
+ }
+}
+
+static int fsl_qdma_remove(struct platform_device *pdev)
+{
+ int i;
+ struct fsl_qdma_queue *status;
+ struct device_node *np = pdev->dev.of_node;
+ struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
+
+ fsl_qdma_irq_exit(pdev, fsl_qdma);
+ fsl_qdma_cleanup_vchan(&fsl_qdma->dma_dev);
+ of_dma_controller_free(np);
+ dma_async_device_unregister(&fsl_qdma->dma_dev);
+
+ for (i = 0; i < fsl_qdma->block_number; i++) {
+ status = fsl_qdma->status[i];
+ dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) *
+ status->n_cq, status->cq, status->bus_addr);
+ }
+ return 0;
+}
+
+static const struct of_device_id fsl_qdma_dt_ids[] = {
+ { .compatible = "fsl,ls1021a-qdma", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids);
+
+static struct platform_driver fsl_qdma_driver = {
+ .driver = {
+ .name = "fsl-qdma",
+ .of_match_table = fsl_qdma_dt_ids,
+ },
+ .probe = fsl_qdma_probe,
+ .remove = fsl_qdma_remove,
+};
+
+module_platform_driver(fsl_qdma_driver);
+
+MODULE_ALIAS("platform:fsl-qdma");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("NXP Layerscape qDMA engine driver");
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 9d360a3fbae3..1e38e6b94006 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -53,42 +53,42 @@ static const char msg_ld_oom[] = "No free memory for link descriptor";
static void set_sr(struct fsldma_chan *chan, u32 val)
{
- DMA_OUT(chan, &chan->regs->sr, val, 32);
+ FSL_DMA_OUT(chan, &chan->regs->sr, val, 32);
}
static u32 get_sr(struct fsldma_chan *chan)
{
- return DMA_IN(chan, &chan->regs->sr, 32);
+ return FSL_DMA_IN(chan, &chan->regs->sr, 32);
}
static void set_mr(struct fsldma_chan *chan, u32 val)
{
- DMA_OUT(chan, &chan->regs->mr, val, 32);
+ FSL_DMA_OUT(chan, &chan->regs->mr, val, 32);
}
static u32 get_mr(struct fsldma_chan *chan)
{
- return DMA_IN(chan, &chan->regs->mr, 32);
+ return FSL_DMA_IN(chan, &chan->regs->mr, 32);
}
static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
{
- DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
+ FSL_DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
}
static dma_addr_t get_cdar(struct fsldma_chan *chan)
{
- return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
+ return FSL_DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
}
static void set_bcr(struct fsldma_chan *chan, u32 val)
{
- DMA_OUT(chan, &chan->regs->bcr, val, 32);
+ FSL_DMA_OUT(chan, &chan->regs->bcr, val, 32);
}
static u32 get_bcr(struct fsldma_chan *chan)
{
- return DMA_IN(chan, &chan->regs->bcr, 32);
+ return FSL_DMA_IN(chan, &chan->regs->bcr, 32);
}
/*
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 4787d485dd76..a9b12f82b5c3 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -196,39 +196,67 @@ struct fsldma_chan {
#define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node)
#define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx)
-#ifndef __powerpc64__
-static u64 in_be64(const u64 __iomem *addr)
+#ifdef CONFIG_PPC
+#define fsl_ioread32(p) in_le32(p)
+#define fsl_ioread32be(p) in_be32(p)
+#define fsl_iowrite32(v, p) out_le32(p, v)
+#define fsl_iowrite32be(v, p) out_be32(p, v)
+
+#ifdef __powerpc64__
+#define fsl_ioread64(p) in_le64(p)
+#define fsl_ioread64be(p) in_be64(p)
+#define fsl_iowrite64(v, p) out_le64(p, v)
+#define fsl_iowrite64be(v, p) out_be64(p, v)
+#else
+static u64 fsl_ioread64(const u64 __iomem *addr)
{
- return ((u64)in_be32((u32 __iomem *)addr) << 32) |
- (in_be32((u32 __iomem *)addr + 1));
+ u32 fsl_addr = lower_32_bits(addr);
+ u64 fsl_addr_hi = (u64)in_le32((u32 *)(fsl_addr + 1)) << 32;
+
+ return fsl_addr_hi | in_le32((u32 *)fsl_addr);
}
-static void out_be64(u64 __iomem *addr, u64 val)
+static void fsl_iowrite64(u64 val, u64 __iomem *addr)
{
- out_be32((u32 __iomem *)addr, val >> 32);
- out_be32((u32 __iomem *)addr + 1, (u32)val);
+ out_le32((u32 __iomem *)addr + 1, val >> 32);
+ out_le32((u32 __iomem *)addr, (u32)val);
}
-/* There is no asm instructions for 64 bits reverse loads and stores */
-static u64 in_le64(const u64 __iomem *addr)
+static u64 fsl_ioread64be(const u64 __iomem *addr)
{
- return ((u64)in_le32((u32 __iomem *)addr + 1) << 32) |
- (in_le32((u32 __iomem *)addr));
+ u32 fsl_addr = lower_32_bits(addr);
+ u64 fsl_addr_hi = (u64)in_be32((u32 *)fsl_addr) << 32;
+
+ return fsl_addr_hi | in_be32((u32 *)(fsl_addr + 1));
}
-static void out_le64(u64 __iomem *addr, u64 val)
+static void fsl_iowrite64be(u64 val, u64 __iomem *addr)
{
- out_le32((u32 __iomem *)addr + 1, val >> 32);
- out_le32((u32 __iomem *)addr, (u32)val);
+ out_be32((u32 __iomem *)addr, val >> 32);
+ out_be32((u32 __iomem *)addr + 1, (u32)val);
}
#endif
+#endif
-#define DMA_IN(fsl_chan, addr, width) \
- (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
- in_be##width(addr) : in_le##width(addr))
-#define DMA_OUT(fsl_chan, addr, val, width) \
- (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
- out_be##width(addr, val) : out_le##width(addr, val))
+#if defined(CONFIG_ARM64) || defined(CONFIG_ARM)
+#define fsl_ioread32(p) ioread32(p)
+#define fsl_ioread32be(p) ioread32be(p)
+#define fsl_iowrite32(v, p) iowrite32(v, p)
+#define fsl_iowrite32be(v, p) iowrite32be(v, p)
+#define fsl_ioread64(p) ioread64(p)
+#define fsl_ioread64be(p) ioread64be(p)
+#define fsl_iowrite64(v, p) iowrite64(v, p)
+#define fsl_iowrite64be(v, p) iowrite64be(v, p)
+#endif
+
+#define FSL_DMA_IN(fsl_dma, addr, width) \
+ (((fsl_dma)->feature & FSL_DMA_BIG_ENDIAN) ? \
+ fsl_ioread##width##be(addr) : fsl_ioread##width(addr))
+
+#define FSL_DMA_OUT(fsl_dma, addr, val, width) \
+ (((fsl_dma)->feature & FSL_DMA_BIG_ENDIAN) ? \
+ fsl_iowrite##width##be(val, addr) : fsl_iowrite \
+ ##width(val, addr))
#define DMA_TO_CPU(fsl_chan, d, width) \
(((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 4a09af3cd546..00a089e24150 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -278,14 +278,14 @@ static int imxdma_hw_chain(struct imxdma_channel *imxdmac)
/*
* imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
*/
-static inline int imxdma_sg_next(struct imxdma_desc *d)
+static inline void imxdma_sg_next(struct imxdma_desc *d)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
struct imxdma_engine *imxdma = imxdmac->imxdma;
struct scatterlist *sg = d->sg;
- unsigned long now;
+ size_t now;
- now = min(d->len, sg_dma_len(sg));
+ now = min_t(size_t, d->len, sg_dma_len(sg));
if (d->len != IMX_DMA_LENGTH_LOOP)
d->len -= now;
@@ -303,8 +303,6 @@ static inline int imxdma_sg_next(struct imxdma_desc *d)
imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)),
imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)),
imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel)));
-
- return now;
}
static void imxdma_enable_hw(struct imxdma_desc *d)
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 86708fb9bda1..5f3c1378b90e 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -377,6 +377,7 @@ struct sdma_channel {
unsigned long watermark_level;
u32 shp_addr, per_addr;
enum dma_status status;
+ bool context_loaded;
struct imx_dma_data data;
struct work_struct terminate_worker;
};
@@ -440,6 +441,8 @@ struct sdma_engine {
unsigned int irq;
dma_addr_t bd0_phys;
struct sdma_buffer_descriptor *bd0;
+ /* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/
+ bool clk_ratio;
};
static int sdma_config_write(struct dma_chan *chan,
@@ -662,8 +665,11 @@ static int sdma_run_channel0(struct sdma_engine *sdma)
dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
/* Set bits of CONFIG register with dynamic context switching */
- if (readl(sdma->regs + SDMA_H_CONFIG) == 0)
- writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
+ reg = readl(sdma->regs + SDMA_H_CONFIG);
+ if ((reg & SDMA_H_CONFIG_CSM) == 0) {
+ reg |= SDMA_H_CONFIG_CSM;
+ writel_relaxed(reg, sdma->regs + SDMA_H_CONFIG);
+ }
return ret;
}
@@ -677,7 +683,7 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
int ret;
unsigned long flags;
- buf_virt = dma_alloc_coherent(NULL, size, &buf_phys, GFP_KERNEL);
+ buf_virt = dma_alloc_coherent(sdma->dev, size, &buf_phys, GFP_KERNEL);
if (!buf_virt) {
return -ENOMEM;
}
@@ -696,7 +702,7 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
- dma_free_coherent(NULL, size, buf_virt, buf_phys);
+ dma_free_coherent(sdma->dev, size, buf_virt, buf_phys);
return ret;
}
@@ -970,6 +976,9 @@ static int sdma_load_context(struct sdma_channel *sdmac)
int ret;
unsigned long flags;
+ if (sdmac->context_loaded)
+ return 0;
+
if (sdmac->direction == DMA_DEV_TO_MEM)
load_address = sdmac->pc_from_device;
else if (sdmac->direction == DMA_DEV_TO_DEV)
@@ -1012,6 +1021,8 @@ static int sdma_load_context(struct sdma_channel *sdmac)
spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
+ sdmac->context_loaded = true;
+
return ret;
}
@@ -1051,6 +1062,7 @@ static void sdma_channel_terminate_work(struct work_struct *work)
sdmac->desc = NULL;
spin_unlock_irqrestore(&sdmac->vc.lock, flags);
vchan_dma_desc_free_list(&sdmac->vc, &head);
+ sdmac->context_loaded = false;
}
static int sdma_disable_channel_async(struct dma_chan *chan)
@@ -1182,8 +1194,8 @@ static int sdma_request_channel0(struct sdma_engine *sdma)
{
int ret = -EBUSY;
- sdma->bd0 = dma_alloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys,
- GFP_NOWAIT);
+ sdma->bd0 = dma_alloc_coherent(sdma->dev, PAGE_SIZE, &sdma->bd0_phys,
+ GFP_NOWAIT);
if (!sdma->bd0) {
ret = -ENOMEM;
goto out;
@@ -1205,8 +1217,8 @@ static int sdma_alloc_bd(struct sdma_desc *desc)
u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
int ret = 0;
- desc->bd = dma_alloc_coherent(NULL, bd_size, &desc->bd_phys,
- GFP_NOWAIT);
+ desc->bd = dma_alloc_coherent(desc->sdmac->sdma->dev, bd_size,
+ &desc->bd_phys, GFP_NOWAIT);
if (!desc->bd) {
ret = -ENOMEM;
goto out;
@@ -1219,7 +1231,8 @@ static void sdma_free_bd(struct sdma_desc *desc)
{
u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
- dma_free_coherent(NULL, bd_size, desc->bd, desc->bd_phys);
+ dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd,
+ desc->bd_phys);
}
static void sdma_desc_free(struct virt_dma_desc *vd)
@@ -1839,10 +1852,13 @@ static int sdma_init(struct sdma_engine *sdma)
if (ret)
goto disable_clk_ipg;
+ if (clk_get_rate(sdma->clk_ahb) == clk_get_rate(sdma->clk_ipg))
+ sdma->clk_ratio = 1;
+
/* Be sure SDMA has not started yet */
writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
- sdma->channel_control = dma_alloc_coherent(NULL,
+ sdma->channel_control = dma_alloc_coherent(sdma->dev,
MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
sizeof(struct sdma_context_data),
&ccb_phys, GFP_KERNEL);
@@ -1879,8 +1895,10 @@ static int sdma_init(struct sdma_engine *sdma)
writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR);
/* Set bits of CONFIG register but with static context switching */
- /* FIXME: Check whether to set ACR bit depending on clock ratios */
- writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
+ if (sdma->clk_ratio)
+ writel_relaxed(SDMA_H_CONFIG_ACR, sdma->regs + SDMA_H_CONFIG);
+ else
+ writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
@@ -1903,11 +1921,16 @@ disable_clk_ipg:
static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
+ struct sdma_engine *sdma = sdmac->sdma;
struct imx_dma_data *data = fn_param;
if (!imx_dma_is_general_purpose(chan))
return false;
+ /* return false if it's not the right device */
+ if (sdma->dev->of_node != data->of_node)
+ return false;
+
sdmac->data = *data;
chan->private = &sdmac->data;
@@ -1935,6 +1958,7 @@ static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
* be set to sdmac->event_id1.
*/
data.dma_request2 = 0;
+ data.of_node = ofdma->of_node;
return dma_request_channel(mask, sdma_filter_fn, &data);
}
@@ -2097,6 +2121,7 @@ static int sdma_probe(struct platform_device *pdev)
sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy;
sdma->dma_device.device_issue_pending = sdma_issue_pending;
sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
+ sdma->dma_device.copy_align = 2;
dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT);
platform_set_drvdata(pdev, sdma);
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 23fb2fa04000..f373a139e0c3 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -372,6 +372,7 @@ struct ioat_ring_ent **
ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
{
struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+ struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
struct ioat_ring_ent **ring;
int total_descs = 1 << order;
int i, chunks;
@@ -437,6 +438,17 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
}
ring[i]->hw->next = ring[0]->txd.phys;
+ /* setup descriptor pre-fetching for v3.4 */
+ if (ioat_dma->cap & IOAT_CAP_DPS) {
+ u16 drsctl = IOAT_CHAN_DRSZ_2MB | IOAT_CHAN_DRS_EN;
+
+ if (chunks == 1)
+ drsctl |= IOAT_CHAN_DRS_AUTOWRAP;
+
+ writew(drsctl, ioat_chan->reg_base + IOAT_CHAN_DRSCTL_OFFSET);
+
+ }
+
return ring;
}
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 1ab42ec2b7ff..aaafd0e882b5 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -27,7 +27,7 @@
#include "registers.h"
#include "hw.h"
-#define IOAT_DMA_VERSION "4.00"
+#define IOAT_DMA_VERSION "5.00"
#define IOAT_DMA_DCA_ANY_CPU ~0
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index abcc51b343ce..781c94de8e81 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -66,11 +66,14 @@
#define PCI_DEVICE_ID_INTEL_IOAT_SKX 0x2021
+#define PCI_DEVICE_ID_INTEL_IOAT_ICX 0x0b00
+
#define IOAT_VER_1_2 0x12 /* Version 1.2 */
#define IOAT_VER_2_0 0x20 /* Version 2.0 */
#define IOAT_VER_3_0 0x30 /* Version 3.0 */
#define IOAT_VER_3_2 0x32 /* Version 3.2 */
#define IOAT_VER_3_3 0x33 /* Version 3.3 */
+#define IOAT_VER_3_4 0x34 /* Version 3.4 */
int system_has_dca_enabled(struct pci_dev *pdev);
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 2d810dfcdc48..d41dc9a9ff68 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -119,6 +119,9 @@ static const struct pci_device_id ioat_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) },
+ /* I/OAT v3.4 platforms */
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_ICX) },
+
{ 0, }
};
MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
@@ -135,10 +138,10 @@ static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma);
static int ioat_dca_enabled = 1;
module_param(ioat_dca_enabled, int, 0644);
MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
-int ioat_pending_level = 4;
+int ioat_pending_level = 7;
module_param(ioat_pending_level, int, 0644);
MODULE_PARM_DESC(ioat_pending_level,
- "high-water mark for pushing ioat descriptors (default: 4)");
+ "high-water mark for pushing ioat descriptors (default: 7)");
static char ioat_interrupt_style[32] = "msix";
module_param_string(ioat_interrupt_style, ioat_interrupt_style,
sizeof(ioat_interrupt_style), 0644);
@@ -635,6 +638,11 @@ static void ioat_free_chan_resources(struct dma_chan *c)
ioat_stop(ioat_chan);
ioat_reset_hw(ioat_chan);
+ /* Put LTR to idle */
+ if (ioat_dma->version >= IOAT_VER_3_4)
+ writeb(IOAT_CHAN_LTR_SWSEL_IDLE,
+ ioat_chan->reg_base + IOAT_CHAN_LTR_SWSEL_OFFSET);
+
spin_lock_bh(&ioat_chan->cleanup_lock);
spin_lock_bh(&ioat_chan->prep_lock);
descs = ioat_ring_space(ioat_chan);
@@ -724,6 +732,28 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
spin_unlock_bh(&ioat_chan->prep_lock);
spin_unlock_bh(&ioat_chan->cleanup_lock);
+ /* Setting up LTR values for 3.4 or later */
+ if (ioat_chan->ioat_dma->version >= IOAT_VER_3_4) {
+ u32 lat_val;
+
+ lat_val = IOAT_CHAN_LTR_ACTIVE_SNVAL |
+ IOAT_CHAN_LTR_ACTIVE_SNLATSCALE |
+ IOAT_CHAN_LTR_ACTIVE_SNREQMNT;
+ writel(lat_val, ioat_chan->reg_base +
+ IOAT_CHAN_LTR_ACTIVE_OFFSET);
+
+ lat_val = IOAT_CHAN_LTR_IDLE_SNVAL |
+ IOAT_CHAN_LTR_IDLE_SNLATSCALE |
+ IOAT_CHAN_LTR_IDLE_SNREQMNT;
+ writel(lat_val, ioat_chan->reg_base +
+ IOAT_CHAN_LTR_IDLE_OFFSET);
+
+ /* Select to active */
+ writeb(IOAT_CHAN_LTR_SWSEL_ACTIVE,
+ ioat_chan->reg_base +
+ IOAT_CHAN_LTR_SWSEL_OFFSET);
+ }
+
ioat_start_null_desc(ioat_chan);
/* check that we got off the ground */
@@ -1185,6 +1215,10 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
if (err)
return err;
+ if (ioat_dma->cap & IOAT_CAP_DPS)
+ writeb(ioat_pending_level + 1,
+ ioat_dma->reg_base + IOAT_PREFETCH_LIMIT_OFFSET);
+
return 0;
}
@@ -1350,6 +1384,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, device);
device->version = readb(device->reg_base + IOAT_VER_OFFSET);
+ if (device->version >= IOAT_VER_3_4)
+ ioat_dca_enabled = 0;
if (device->version >= IOAT_VER_3_0) {
if (is_skx_ioat(pdev))
device->version = IOAT_VER_3_2;
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index 2f3bbc88ff2a..99c1c24d465d 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -84,6 +84,9 @@
#define IOAT_CAP_PQ 0x00000200
#define IOAT_CAP_DWBES 0x00002000
#define IOAT_CAP_RAID16SS 0x00020000
+#define IOAT_CAP_DPS 0x00800000
+
+#define IOAT_PREFETCH_LIMIT_OFFSET 0x4C /* CHWPREFLMT */
#define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */
@@ -243,4 +246,25 @@
#define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */
+#define IOAT_CHAN_DRSCTL_OFFSET 0xB6
+#define IOAT_CHAN_DRSZ_4KB 0x0000
+#define IOAT_CHAN_DRSZ_8KB 0x0001
+#define IOAT_CHAN_DRSZ_2MB 0x0009
+#define IOAT_CHAN_DRS_EN 0x0100
+#define IOAT_CHAN_DRS_AUTOWRAP 0x0200
+
+#define IOAT_CHAN_LTR_SWSEL_OFFSET 0xBC
+#define IOAT_CHAN_LTR_SWSEL_ACTIVE 0x0
+#define IOAT_CHAN_LTR_SWSEL_IDLE 0x1
+
+#define IOAT_CHAN_LTR_ACTIVE_OFFSET 0xC0
+#define IOAT_CHAN_LTR_ACTIVE_SNVAL 0x0000 /* 0 us */
+#define IOAT_CHAN_LTR_ACTIVE_SNLATSCALE 0x0800 /* 1us scale */
+#define IOAT_CHAN_LTR_ACTIVE_SNREQMNT 0x8000 /* snoop req enable */
+
+#define IOAT_CHAN_LTR_IDLE_OFFSET 0xC4
+#define IOAT_CHAN_LTR_IDLE_SNVAL 0x0258 /* 600 us */
+#define IOAT_CHAN_LTR_IDLE_SNLATSCALE 0x0800 /* 1us scale */
+#define IOAT_CHAN_LTR_IDLE_SNREQMNT 0x8000 /* snoop req enable */
+
#endif /* _IOAT_REGISTERS_H_ */
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index fdec2b6cfbb0..5737d92eaeeb 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -52,8 +52,6 @@
#define CX_SRC 0x814
#define CX_DST 0x818
#define CX_CFG 0x81c
-#define AXI_CFG 0x820
-#define AXI_CFG_DEFAULT 0x201201
#define CX_LLI_CHAIN_EN 0x2
#define CX_CFG_EN 0x1
@@ -113,9 +111,18 @@ struct k3_dma_dev {
struct dma_pool *pool;
u32 dma_channels;
u32 dma_requests;
+ u32 dma_channel_mask;
unsigned int irq;
};
+
+#define K3_FLAG_NOCLK BIT(1)
+
+struct k3dma_soc_data {
+ unsigned long flags;
+};
+
+
#define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
static int k3_dma_config_write(struct dma_chan *chan,
@@ -161,7 +168,6 @@ static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw)
writel_relaxed(hw->count, phy->base + CX_CNT0);
writel_relaxed(hw->saddr, phy->base + CX_SRC);
writel_relaxed(hw->daddr, phy->base + CX_DST);
- writel_relaxed(AXI_CFG_DEFAULT, phy->base + AXI_CFG);
writel_relaxed(hw->config, phy->base + CX_CFG);
}
@@ -314,6 +320,9 @@ static void k3_dma_tasklet(unsigned long arg)
/* check new channel request in d->chan_pending */
spin_lock_irq(&d->lock);
for (pch = 0; pch < d->dma_channels; pch++) {
+ if (!(d->dma_channel_mask & (1 << pch)))
+ continue;
+
p = &d->phy[pch];
if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
@@ -331,6 +340,9 @@ static void k3_dma_tasklet(unsigned long arg)
spin_unlock_irq(&d->lock);
for (pch = 0; pch < d->dma_channels; pch++) {
+ if (!(d->dma_channel_mask & (1 << pch)))
+ continue;
+
if (pch_alloc & (1 << pch)) {
p = &d->phy[pch];
c = p->vchan;
@@ -790,8 +802,21 @@ static int k3_dma_transfer_resume(struct dma_chan *chan)
return 0;
}
+static const struct k3dma_soc_data k3_v1_dma_data = {
+ .flags = 0,
+};
+
+static const struct k3dma_soc_data asp_v1_dma_data = {
+ .flags = K3_FLAG_NOCLK,
+};
+
static const struct of_device_id k3_pdma_dt_ids[] = {
- { .compatible = "hisilicon,k3-dma-1.0", },
+ { .compatible = "hisilicon,k3-dma-1.0",
+ .data = &k3_v1_dma_data
+ },
+ { .compatible = "hisilicon,hisi-pcm-asp-dma-1.0",
+ .data = &asp_v1_dma_data
+ },
{}
};
MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids);
@@ -810,6 +835,7 @@ static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
static int k3_dma_probe(struct platform_device *op)
{
+ const struct k3dma_soc_data *soc_data;
struct k3_dma_dev *d;
const struct of_device_id *of_id;
struct resource *iores;
@@ -823,6 +849,10 @@ static int k3_dma_probe(struct platform_device *op)
if (!d)
return -ENOMEM;
+ soc_data = device_get_match_data(&op->dev);
+ if (!soc_data)
+ return -EINVAL;
+
d->base = devm_ioremap_resource(&op->dev, iores);
if (IS_ERR(d->base))
return PTR_ERR(d->base);
@@ -833,12 +863,21 @@ static int k3_dma_probe(struct platform_device *op)
"dma-channels", &d->dma_channels);
of_property_read_u32((&op->dev)->of_node,
"dma-requests", &d->dma_requests);
+ ret = of_property_read_u32((&op->dev)->of_node,
+ "dma-channel-mask", &d->dma_channel_mask);
+ if (ret) {
+ dev_warn(&op->dev,
+ "dma-channel-mask doesn't exist, considering all as available.\n");
+ d->dma_channel_mask = (u32)~0UL;
+ }
}
- d->clk = devm_clk_get(&op->dev, NULL);
- if (IS_ERR(d->clk)) {
- dev_err(&op->dev, "no dma clk\n");
- return PTR_ERR(d->clk);
+ if (!(soc_data->flags & K3_FLAG_NOCLK)) {
+ d->clk = devm_clk_get(&op->dev, NULL);
+ if (IS_ERR(d->clk)) {
+ dev_err(&op->dev, "no dma clk\n");
+ return PTR_ERR(d->clk);
+ }
}
irq = platform_get_irq(op, 0);
@@ -862,8 +901,12 @@ static int k3_dma_probe(struct platform_device *op)
return -ENOMEM;
for (i = 0; i < d->dma_channels; i++) {
- struct k3_dma_phy *p = &d->phy[i];
+ struct k3_dma_phy *p;
+
+ if (!(d->dma_channel_mask & BIT(i)))
+ continue;
+ p = &d->phy[i];
p->idx = i;
p->base = d->base + i * 0x40;
}
diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c
index 5de1b07eddff..7de54b2fafdb 100644
--- a/drivers/dma/mcf-edma.c
+++ b/drivers/dma/mcf-edma.c
@@ -214,6 +214,7 @@ static int mcf_edma_probe(struct platform_device *pdev)
mcf_chan->edma = mcf_edma;
mcf_chan->slave_id = i;
mcf_chan->idle = true;
+ mcf_chan->dma_dir = DMA_NONE;
mcf_chan->vchan.desc_free = fsl_edma_free_desc;
vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev);
iowrite32(0x0, &regs->tcd[i].csr);
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index 131f3974740d..814853842e29 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -253,7 +253,7 @@ static void mtk_cqdma_start(struct mtk_cqdma_pchan *pc,
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
mtk_dma_set(pc, MTK_CQDMA_DST2, cvd->dest >> MTK_CQDMA_ADDR2_SHFIT);
#else
- mtk_dma_set(pc, MTK_CQDMA_SRC2, 0);
+ mtk_dma_set(pc, MTK_CQDMA_DST2, 0);
#endif
/* setup the length */
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 7f595355fb79..65af2e7fcb2c 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1059,6 +1059,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
mv_chan->op_in_desc = XOR_MODE_IN_DESC;
dma_dev = &mv_chan->dmadev;
+ dma_dev->dev = &pdev->dev;
mv_chan->xordev = xordev;
/*
@@ -1091,7 +1092,6 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
dma_dev->device_tx_status = mv_xor_status;
dma_dev->device_issue_pending = mv_xor_issue_pending;
- dma_dev->dev = &pdev->dev;
/* set prep routines based on capability */
if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
@@ -1153,7 +1153,10 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
- dma_async_device_register(dma_dev);
+ ret = dma_async_device_register(dma_dev);
+ if (ret)
+ goto err_free_irq;
+
return mv_chan;
err_free_irq:
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index afd8f27bda96..538b6e0e17bb 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -972,7 +972,6 @@ static void pch_dma_remove(struct pci_dev *pdev)
}
/* PCI Device ID of DMA device */
-#define PCI_VENDOR_ID_ROHM 0x10DB
#define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH 0x8810
#define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH 0x8815
#define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index cff1b143fff5..eec79fdf27a5 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2267,7 +2267,6 @@ static int pl330_terminate_all(struct dma_chan *chan)
struct dma_pl330_desc *desc;
unsigned long flags;
struct pl330_dmac *pl330 = pch->dmac;
- LIST_HEAD(list);
bool power_down = false;
pm_runtime_get_sync(pl330->ddma.dev);
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 1617715aa6e0..cb860cb53c27 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -636,8 +636,8 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_FIFO_SIZE);
/* allocate enough room to accomodate the number of entries */
- async_desc = kzalloc(sizeof(*async_desc) +
- (num_alloc * sizeof(struct bam_desc_hw)), GFP_NOWAIT);
+ async_desc = kzalloc(struct_size(async_desc, desc, num_alloc),
+ GFP_NOWAIT);
if (!async_desc)
goto err_out;
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 43d4b00b8138..411f91fde734 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -138,24 +138,25 @@ static void hidma_process_completed(struct hidma_chan *mchan)
desc = &mdesc->desc;
last_cookie = desc->cookie;
+ llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
+
spin_lock_irqsave(&mchan->lock, irqflags);
+ if (llstat == DMA_COMPLETE) {
+ mchan->last_success = last_cookie;
+ result.result = DMA_TRANS_NOERROR;
+ } else {
+ result.result = DMA_TRANS_ABORTED;
+ }
+
dma_cookie_complete(desc);
spin_unlock_irqrestore(&mchan->lock, irqflags);
- llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
dmaengine_desc_get_callback(desc, &cb);
dma_run_dependencies(desc);
spin_lock_irqsave(&mchan->lock, irqflags);
list_move(&mdesc->node, &mchan->free);
-
- if (llstat == DMA_COMPLETE) {
- mchan->last_success = last_cookie;
- result.result = DMA_TRANS_NOERROR;
- } else
- result.result = DMA_TRANS_ABORTED;
-
spin_unlock_irqrestore(&mchan->lock, irqflags);
dmaengine_desc_callback_invoke(&cb, &result);
@@ -415,6 +416,7 @@ hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
if (!mdesc)
return NULL;
+ mdesc->desc.flags = flags;
hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
src, dest, len, flags,
HIDMA_TRE_MEMCPY);
@@ -447,6 +449,7 @@ hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
if (!mdesc)
return NULL;
+ mdesc->desc.flags = flags;
hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
value, dest, len, flags,
HIDMA_TRE_MEMSET);
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index d64edeb6771a..681de12f4c67 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -423,9 +423,8 @@ static int __init hidma_mgmt_init(void)
hidma_mgmt_of_populate_channels(child);
}
#endif
- platform_driver_register(&hidma_mgmt_driver);
+ return platform_driver_register(&hidma_mgmt_driver);
- return 0;
}
module_init(hidma_mgmt_init);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 784d5f1a473b..3fae23768b47 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -705,7 +705,6 @@ static int sa11x0_dma_device_pause(struct dma_chan *chan)
struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
struct sa11x0_dma_phy *p;
- LIST_HEAD(head);
unsigned long flags;
dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
@@ -732,7 +731,6 @@ static int sa11x0_dma_device_resume(struct dma_chan *chan)
struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
struct sa11x0_dma_phy *p;
- LIST_HEAD(head);
unsigned long flags;
dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 2b4f25698169..e2a5398f89b5 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1282,6 +1282,9 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
enum dma_status status;
unsigned int residue = 0;
unsigned int dptr = 0;
+ unsigned int chcrb;
+ unsigned int tcrb;
+ unsigned int i;
if (!desc)
return 0;
@@ -1330,14 +1333,31 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
}
/*
+ * We need to read two registers.
+ * Make sure the control register does not skip to next chunk
+ * while reading the counter.
+ * Trying it 3 times should be enough: Initial read, retry, retry
+ * for the paranoid.
+ */
+ for (i = 0; i < 3; i++) {
+ chcrb = rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
+ RCAR_DMACHCRB_DPTR_MASK;
+ tcrb = rcar_dmac_chan_read(chan, RCAR_DMATCRB);
+ /* Still the same? */
+ if (chcrb == (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
+ RCAR_DMACHCRB_DPTR_MASK))
+ break;
+ }
+ WARN_ONCE(i >= 3, "residue might be not continuous!");
+
+ /*
* In descriptor mode the descriptor running pointer is not maintained
* by the interrupt handler, find the running descriptor from the
* descriptor pointer field in the CHCRB register. In non-descriptor
* mode just use the running descriptor pointer.
*/
if (desc->hwdescs.use) {
- dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
- RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
+ dptr = chcrb >> RCAR_DMACHCRB_DPTR_SHIFT;
if (dptr == 0)
dptr = desc->nchunks;
dptr--;
@@ -1355,7 +1375,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
}
/* Add the residue for the current chunk. */
- residue += rcar_dmac_chan_read(chan, RCAR_DMATCRB) << desc->xfer_shift;
+ residue += tcrb << desc->xfer_shift;
return residue;
}
@@ -1368,6 +1388,7 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
enum dma_status status;
unsigned long flags;
unsigned int residue;
+ bool cyclic;
status = dma_cookie_status(chan, cookie, txstate);
if (status == DMA_COMPLETE || !txstate)
@@ -1375,10 +1396,11 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
spin_lock_irqsave(&rchan->lock, flags);
residue = rcar_dmac_chan_get_residue(rchan, cookie);
+ cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false;
spin_unlock_irqrestore(&rchan->lock, flags);
/* if there's no residue, the cookie is complete */
- if (!residue)
+ if (!residue && !cyclic)
return DMA_COMPLETE;
dma_set_residue(txstate, residue);
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index 7f7184c3cf95..59403f6d008a 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -694,6 +694,8 @@ static int usb_dmac_runtime_resume(struct device *dev)
#endif /* CONFIG_PM */
static const struct dev_pm_ops usb_dmac_pm = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume,
NULL)
};
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index e2f016700fcc..48431e2da987 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -580,15 +580,7 @@ static irqreturn_t dma_irq_handle(int irq, void *dev_id)
static int sprd_dma_alloc_chan_resources(struct dma_chan *chan)
{
- struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
- int ret;
-
- ret = pm_runtime_get_sync(chan->device->dev);
- if (ret < 0)
- return ret;
-
- schan->dev_id = SPRD_DMA_SOFTWARE_UID;
- return 0;
+ return pm_runtime_get_sync(chan->device->dev);
}
static void sprd_dma_free_chan_resources(struct dma_chan *chan)
@@ -1021,13 +1013,10 @@ static void sprd_dma_free_desc(struct virt_dma_desc *vd)
static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param)
{
struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
- struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
- u32 req = *(u32 *)param;
+ u32 slave_id = *(u32 *)param;
- if (req < sdev->total_chns)
- return req == schan->chn_num + 1;
- else
- return false;
+ schan->dev_id = slave_id;
+ return true;
}
static int sprd_dma_probe(struct platform_device *pdev)
diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c
index 07c20aa2e955..bc7a1de3f29b 100644
--- a/drivers/dma/st_fdma.c
+++ b/drivers/dma/st_fdma.c
@@ -243,8 +243,7 @@ static struct st_fdma_desc *st_fdma_alloc_desc(struct st_fdma_chan *fchan,
struct st_fdma_desc *fdesc;
int i;
- fdesc = kzalloc(sizeof(*fdesc) +
- sizeof(struct st_fdma_sw_node) * sg_len, GFP_NOWAIT);
+ fdesc = kzalloc(struct_size(fdesc, node, sg_len), GFP_NOWAIT);
if (!fdesc)
return NULL;
@@ -294,8 +293,6 @@ static void st_fdma_free_chan_res(struct dma_chan *chan)
struct rproc *rproc = fchan->fdev->slim_rproc->rproc;
unsigned long flags;
- LIST_HEAD(head);
-
dev_dbg(fchan->fdev->dev, "%s: freeing chan:%d\n",
__func__, fchan->vchan.chan.chan_id);
@@ -626,7 +623,6 @@ static void st_fdma_issue_pending(struct dma_chan *chan)
static int st_fdma_pause(struct dma_chan *chan)
{
unsigned long flags;
- LIST_HEAD(head);
struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
int ch_id = fchan->vchan.chan.chan_id;
unsigned long cmd = FDMA_CMD_PAUSE(ch_id);
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 4903a408fc14..ba239b529fa9 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -23,6 +23,7 @@
#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -641,12 +642,13 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
{
struct stm32_dma_chan *chan = devid;
struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
- u32 status, scr;
+ u32 status, scr, sfcr;
spin_lock(&chan->vchan.lock);
status = stm32_dma_irq_status(chan);
scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
+ sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
if (status & STM32_DMA_TCI) {
stm32_dma_irq_clear(chan, STM32_DMA_TCI);
@@ -661,10 +663,12 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
if (status & STM32_DMA_FEI) {
stm32_dma_irq_clear(chan, STM32_DMA_FEI);
status &= ~STM32_DMA_FEI;
- if (!(scr & STM32_DMA_SCR_EN))
- dev_err(chan2dev(chan), "FIFO Error\n");
- else
- dev_dbg(chan2dev(chan), "FIFO over/underrun\n");
+ if (sfcr & STM32_DMA_SFCR_FEIE) {
+ if (!(scr & STM32_DMA_SCR_EN))
+ dev_err(chan2dev(chan), "FIFO Error\n");
+ else
+ dev_dbg(chan2dev(chan), "FIFO over/underrun\n");
+ }
}
if (status) {
stm32_dma_irq_clear(chan, status);
@@ -1112,15 +1116,14 @@ static int stm32_dma_alloc_chan_resources(struct dma_chan *c)
int ret;
chan->config_init = false;
- ret = clk_prepare_enable(dmadev->clk);
- if (ret < 0) {
- dev_err(chan2dev(chan), "clk_prepare_enable failed: %d\n", ret);
+
+ ret = pm_runtime_get_sync(dmadev->ddev.dev);
+ if (ret < 0)
return ret;
- }
ret = stm32_dma_disable_chan(chan);
if (ret < 0)
- clk_disable_unprepare(dmadev->clk);
+ pm_runtime_put(dmadev->ddev.dev);
return ret;
}
@@ -1140,7 +1143,7 @@ static void stm32_dma_free_chan_resources(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
}
- clk_disable_unprepare(dmadev->clk);
+ pm_runtime_put(dmadev->ddev.dev);
vchan_free_chan_resources(to_virt_chan(c));
}
@@ -1240,6 +1243,12 @@ static int stm32_dma_probe(struct platform_device *pdev)
return PTR_ERR(dmadev->clk);
}
+ ret = clk_prepare_enable(dmadev->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
+ return ret;
+ }
+
dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node,
"st,mem2mem");
@@ -1289,7 +1298,7 @@ static int stm32_dma_probe(struct platform_device *pdev)
ret = dma_async_device_register(dd);
if (ret)
- return ret;
+ goto clk_free;
for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
chan = &dmadev->chan[i];
@@ -1321,20 +1330,58 @@ static int stm32_dma_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dmadev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_put(&pdev->dev);
+
dev_info(&pdev->dev, "STM32 DMA driver registered\n");
return 0;
err_unregister:
dma_async_device_unregister(dd);
+clk_free:
+ clk_disable_unprepare(dmadev->clk);
return ret;
}
+#ifdef CONFIG_PM
+static int stm32_dma_runtime_suspend(struct device *dev)
+{
+ struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(dmadev->clk);
+
+ return 0;
+}
+
+static int stm32_dma_runtime_resume(struct device *dev)
+{
+ struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(dmadev->clk);
+ if (ret) {
+ dev_err(dev, "failed to prepare_enable clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops stm32_dma_pm_ops = {
+ SET_RUNTIME_PM_OPS(stm32_dma_runtime_suspend,
+ stm32_dma_runtime_resume, NULL)
+};
+
static struct platform_driver stm32_dma_driver = {
.driver = {
.name = "stm32-dma",
.of_match_table = stm32_dma_of_match,
+ .pm = &stm32_dma_pm_ops,
},
};
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index b922db90939a..a67119199c45 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -28,6 +28,7 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_dma.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -79,8 +80,7 @@ static void stm32_dmamux_free(struct device *dev, void *route_data)
stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id), 0);
clear_bit(mux->chan_id, dmamux->dma_inuse);
- if (!IS_ERR(dmamux->clk))
- clk_disable(dmamux->clk);
+ pm_runtime_put_sync(dev);
spin_unlock_irqrestore(&dmamux->lock, flags);
@@ -146,13 +146,10 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
/* Set dma request */
spin_lock_irqsave(&dmamux->lock, flags);
- if (!IS_ERR(dmamux->clk)) {
- ret = clk_enable(dmamux->clk);
- if (ret < 0) {
- spin_unlock_irqrestore(&dmamux->lock, flags);
- dev_err(&pdev->dev, "clk_prep_enable issue: %d\n", ret);
- goto error;
- }
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ spin_unlock_irqrestore(&dmamux->lock, flags);
+ goto error;
}
spin_unlock_irqrestore(&dmamux->lock, flags);
@@ -254,6 +251,7 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "DMAMUX defaulting on %u requests\n",
stm32_dmamux->dmamux_requests);
}
+ pm_runtime_get_noresume(&pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
iomem = devm_ioremap_resource(&pdev->dev, res);
@@ -282,6 +280,8 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
stm32_dmamux->dmarouter.route_free = stm32_dmamux_free;
platform_set_drvdata(pdev, stm32_dmamux);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
if (!IS_ERR(stm32_dmamux->clk)) {
ret = clk_prepare_enable(stm32_dmamux->clk);
@@ -291,17 +291,52 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
}
}
+ pm_runtime_get_noresume(&pdev->dev);
+
/* Reset the dmamux */
for (i = 0; i < stm32_dmamux->dma_requests; i++)
stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i), 0);
- if (!IS_ERR(stm32_dmamux->clk))
- clk_disable(stm32_dmamux->clk);
+ pm_runtime_put(&pdev->dev);
return of_dma_router_register(node, stm32_dmamux_route_allocate,
&stm32_dmamux->dmarouter);
}
+#ifdef CONFIG_PM
+static int stm32_dmamux_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev =
+ container_of(dev, struct platform_device, dev);
+ struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(stm32_dmamux->clk);
+
+ return 0;
+}
+
+static int stm32_dmamux_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev =
+ container_of(dev, struct platform_device, dev);
+ struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = clk_prepare_enable(stm32_dmamux->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to prepare_enable clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops stm32_dmamux_pm_ops = {
+ SET_RUNTIME_PM_OPS(stm32_dmamux_runtime_suspend,
+ stm32_dmamux_runtime_resume, NULL)
+};
+
static const struct of_device_id stm32_dmamux_match[] = {
{ .compatible = "st,stm32h7-dmamux" },
{},
@@ -312,6 +347,7 @@ static struct platform_driver stm32_dmamux_driver = {
.driver = {
.name = "stm32-dmamux",
.of_match_table = stm32_dmamux_match,
+ .pm = &stm32_dmamux_pm_ops,
},
};
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 390e4cae0e1a..ac0301b69593 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -37,6 +37,7 @@
#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/slab.h>
@@ -1456,15 +1457,13 @@ static int stm32_mdma_alloc_chan_resources(struct dma_chan *c)
return -ENOMEM;
}
- ret = clk_prepare_enable(dmadev->clk);
- if (ret < 0) {
- dev_err(chan2dev(chan), "clk_prepare_enable failed: %d\n", ret);
+ ret = pm_runtime_get_sync(dmadev->ddev.dev);
+ if (ret < 0)
return ret;
- }
ret = stm32_mdma_disable_chan(chan);
if (ret < 0)
- clk_disable_unprepare(dmadev->clk);
+ pm_runtime_put(dmadev->ddev.dev);
return ret;
}
@@ -1484,7 +1483,7 @@ static void stm32_mdma_free_chan_resources(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
}
- clk_disable_unprepare(dmadev->clk);
+ pm_runtime_put(dmadev->ddev.dev);
vchan_free_chan_resources(to_virt_chan(c));
dmam_pool_destroy(chan->desc_pool);
chan->desc_pool = NULL;
@@ -1597,6 +1596,12 @@ static int stm32_mdma_probe(struct platform_device *pdev)
return ret;
}
+ ret = clk_prepare_enable(dmadev->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
+ return ret;
+ }
+
dmadev->rst = devm_reset_control_get(&pdev->dev, NULL);
if (!IS_ERR(dmadev->rst)) {
reset_control_assert(dmadev->rst);
@@ -1668,6 +1673,10 @@ static int stm32_mdma_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, dmadev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_put(&pdev->dev);
dev_info(&pdev->dev, "STM32 MDMA driver registered\n");
@@ -1677,11 +1686,42 @@ err_unregister:
return ret;
}
+#ifdef CONFIG_PM
+static int stm32_mdma_runtime_suspend(struct device *dev)
+{
+ struct stm32_mdma_device *dmadev = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(dmadev->clk);
+
+ return 0;
+}
+
+static int stm32_mdma_runtime_resume(struct device *dev)
+{
+ struct stm32_mdma_device *dmadev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(dmadev->clk);
+ if (ret) {
+ dev_err(dev, "failed to prepare_enable clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops stm32_mdma_pm_ops = {
+ SET_RUNTIME_PM_OPS(stm32_mdma_runtime_suspend,
+ stm32_mdma_runtime_resume, NULL)
+};
+
static struct platform_driver stm32_mdma_driver = {
.probe = stm32_mdma_probe,
.driver = {
.name = "stm32-mdma",
.of_match_table = stm32_mdma_of_match,
+ .pm = &stm32_mdma_pm_ops,
},
};
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 9a558e30c461..cf462b1abc0b 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -38,6 +38,9 @@
#include "dmaengine.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/tegra_apb_dma.h>
+
#define TEGRA_APBDMA_GENERAL 0x0
#define TEGRA_APBDMA_GENERAL_ENABLE BIT(31)
@@ -146,7 +149,7 @@ struct tegra_dma_channel_regs {
};
/*
- * tegra_dma_sg_req: Dma request details to configure hardware. This
+ * tegra_dma_sg_req: DMA request details to configure hardware. This
* contains the details for one transfer to configure DMA hw.
* The client's request for data transfer can be broken into multiple
* sub-transfer as per requester details and hw support.
@@ -155,7 +158,7 @@ struct tegra_dma_channel_regs {
*/
struct tegra_dma_sg_req {
struct tegra_dma_channel_regs ch_regs;
- int req_len;
+ unsigned int req_len;
bool configured;
bool last_sg;
struct list_head node;
@@ -169,8 +172,8 @@ struct tegra_dma_sg_req {
*/
struct tegra_dma_desc {
struct dma_async_tx_descriptor txd;
- int bytes_requested;
- int bytes_transferred;
+ unsigned int bytes_requested;
+ unsigned int bytes_transferred;
enum dma_status dma_status;
struct list_head node;
struct list_head tx_list;
@@ -186,7 +189,7 @@ typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
/* tegra_dma_channel: Channel specific information */
struct tegra_dma_channel {
struct dma_chan dma_chan;
- char name[30];
+ char name[12];
bool config_init;
int id;
int irq;
@@ -574,7 +577,7 @@ static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
struct tegra_dma_sg_req *hsgreq = NULL;
if (list_empty(&tdc->pending_sg_req)) {
- dev_err(tdc2dev(tdc), "Dma is running without req\n");
+ dev_err(tdc2dev(tdc), "DMA is running without req\n");
tegra_dma_stop(tdc);
return false;
}
@@ -587,7 +590,7 @@ static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
if (!hsgreq->configured) {
tegra_dma_stop(tdc);
- dev_err(tdc2dev(tdc), "Error in dma transfer, aborting dma\n");
+ dev_err(tdc2dev(tdc), "Error in DMA transfer, aborting DMA\n");
tegra_dma_abort_all(tdc);
return false;
}
@@ -636,7 +639,10 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
dma_desc = sgreq->dma_desc;
- dma_desc->bytes_transferred += sgreq->req_len;
+ /* if we dma for long enough the transfer count will wrap */
+ dma_desc->bytes_transferred =
+ (dma_desc->bytes_transferred + sgreq->req_len) %
+ dma_desc->bytes_requested;
/* Callback need to be call */
if (!dma_desc->cb_count)
@@ -669,6 +675,8 @@ static void tegra_dma_tasklet(unsigned long data)
dmaengine_desc_get_callback(&dma_desc->txd, &cb);
cb_count = dma_desc->cb_count;
dma_desc->cb_count = 0;
+ trace_tegra_dma_complete_cb(&tdc->dma_chan, cb_count,
+ cb.callback);
spin_unlock_irqrestore(&tdc->lock, flags);
while (cb_count--)
dmaengine_desc_callback_invoke(&cb, NULL);
@@ -685,6 +693,7 @@ static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
spin_lock_irqsave(&tdc->lock, flags);
+ trace_tegra_dma_isr(&tdc->dma_chan, irq);
status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
@@ -843,6 +852,7 @@ found:
dma_set_residue(txstate, residual);
}
+ trace_tegra_dma_tx_status(&tdc->dma_chan, cookie, txstate);
spin_unlock_irqrestore(&tdc->lock, flags);
return ret;
}
@@ -919,7 +929,7 @@ static int get_transfer_param(struct tegra_dma_channel *tdc,
return 0;
default:
- dev_err(tdc2dev(tdc), "Dma direction is not supported\n");
+ dev_err(tdc2dev(tdc), "DMA direction is not supported\n");
return -EINVAL;
}
return -EINVAL;
@@ -952,7 +962,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
enum dma_slave_buswidth slave_bw;
if (!tdc->config_init) {
- dev_err(tdc2dev(tdc), "dma channel is not configured\n");
+ dev_err(tdc2dev(tdc), "DMA channel is not configured\n");
return NULL;
}
if (sg_len < 1) {
@@ -985,7 +995,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
dma_desc = tegra_dma_desc_get(tdc);
if (!dma_desc) {
- dev_err(tdc2dev(tdc), "Dma descriptors not available\n");
+ dev_err(tdc2dev(tdc), "DMA descriptors not available\n");
return NULL;
}
INIT_LIST_HEAD(&dma_desc->tx_list);
@@ -1005,14 +1015,14 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
if ((len & 3) || (mem & 3) ||
(len > tdc->tdma->chip_data->max_dma_count)) {
dev_err(tdc2dev(tdc),
- "Dma length/memory address is not supported\n");
+ "DMA length/memory address is not supported\n");
tegra_dma_desc_put(tdc, dma_desc);
return NULL;
}
sg_req = tegra_dma_sg_req_get(tdc);
if (!sg_req) {
- dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
+ dev_err(tdc2dev(tdc), "DMA sg-req not available\n");
tegra_dma_desc_put(tdc, dma_desc);
return NULL;
}
@@ -1087,7 +1097,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
* terminating the DMA.
*/
if (tdc->busy) {
- dev_err(tdc2dev(tdc), "Request not allowed when dma running\n");
+ dev_err(tdc2dev(tdc), "Request not allowed when DMA running\n");
return NULL;
}
@@ -1144,7 +1154,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
while (remain_len) {
sg_req = tegra_dma_sg_req_get(tdc);
if (!sg_req) {
- dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
+ dev_err(tdc2dev(tdc), "DMA sg-req not available\n");
tegra_dma_desc_put(tdc, dma_desc);
return NULL;
}
@@ -1319,8 +1329,9 @@ static int tegra_dma_probe(struct platform_device *pdev)
return -ENODEV;
}
- tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
- sizeof(struct tegra_dma_channel), GFP_KERNEL);
+ tdma = devm_kzalloc(&pdev->dev,
+ struct_size(tdma, channels, cdata->nr_channels),
+ GFP_KERNEL);
if (!tdma)
return -ENOMEM;
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index b26256f23d67..5ec0dd97b397 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -678,8 +678,9 @@ static int tegra_adma_probe(struct platform_device *pdev)
return -ENODEV;
}
- tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
- sizeof(struct tegra_adma_chan), GFP_KERNEL);
+ tdma = devm_kzalloc(&pdev->dev,
+ struct_size(tdma, channels, cdata->nr_channels),
+ GFP_KERNEL);
if (!tdma)
return -ENOMEM;
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index fc0f9c8766a8..afbb1c95b721 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -643,8 +643,8 @@ static int td_probe(struct platform_device *pdev)
DRIVER_NAME))
return -EBUSY;
- td = kzalloc(sizeof(struct timb_dma) +
- sizeof(struct timb_dma_chan) * pdata->nr_channels, GFP_KERNEL);
+ td = kzalloc(struct_size(td, channels, pdata->nr_channels),
+ GFP_KERNEL);
if (!td) {
err = -ENOMEM;
goto err_release_region;
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index eb45af71d3a3..e8d0881b64d8 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -327,7 +327,6 @@ static void txx9dmac_reset_chan(struct txx9dmac_chan *dc)
channel_writel(dc, SAIR, 0);
channel_writel(dc, DAIR, 0);
channel_writel(dc, CCR, 0);
- mmiowb();
}
/* Called with dc->lock held and bh disabled */
@@ -954,7 +953,6 @@ static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc,
dma_sync_single_for_device(chan2parent(&dc->chan),
prev->txd.phys, ddev->descsize,
DMA_TO_DEVICE);
- mmiowb();
if (!(channel_readl(dc, CSR) & TXX9_DMA_CSR_CHNEN) &&
channel_read_CHAR(dc) == prev->txd.phys)
/* Restart chain DMA */
@@ -1080,7 +1078,6 @@ static void txx9dmac_free_chan_resources(struct dma_chan *chan)
static void txx9dmac_off(struct txx9dmac_dev *ddev)
{
dma_writel(ddev, MCR, 0);
- mmiowb();
}
static int __init txx9dmac_chan_probe(struct platform_device *pdev)
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index cb20b411493e..c43c1a154604 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -86,6 +86,7 @@
#define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
#define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
#define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
+#define XILINX_DMA_DMASR_SG_MASK BIT(3)
#define XILINX_DMA_DMASR_IDLE BIT(1)
#define XILINX_DMA_DMASR_HALTED BIT(0)
#define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
@@ -161,7 +162,9 @@
#define XILINX_DMA_REG_BTT 0x28
/* AXI DMA Specific Masks/Bit fields */
-#define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0)
+#define XILINX_DMA_MAX_TRANS_LEN_MIN 8
+#define XILINX_DMA_MAX_TRANS_LEN_MAX 23
+#define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
#define XILINX_DMA_CR_COALESCE_SHIFT 16
@@ -412,7 +415,6 @@ struct xilinx_dma_config {
* @dev: Device Structure
* @common: DMA device structure
* @chan: Driver specific DMA channel
- * @has_sg: Specifies whether Scatter-Gather is present or not
* @mcdma: Specifies whether Multi-Channel is present or not
* @flush_on_fsync: Flush on frame sync
* @ext_addr: Indicates 64 bit addressing is supported by dma device
@@ -425,13 +427,13 @@ struct xilinx_dma_config {
* @rxs_clk: DMA s2mm stream clock
* @nr_channels: Number of channels DMA device supports
* @chan_id: DMA channel identifier
+ * @max_buffer_len: Max buffer length
*/
struct xilinx_dma_device {
void __iomem *regs;
struct device *dev;
struct dma_device common;
struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
- bool has_sg;
bool mcdma;
u32 flush_on_fsync;
bool ext_addr;
@@ -444,6 +446,7 @@ struct xilinx_dma_device {
struct clk *rxs_clk;
u32 nr_channels;
u32 chan_id;
+ u32 max_buffer_len;
};
/* Macros */
@@ -960,6 +963,34 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
}
/**
+ * xilinx_dma_calc_copysize - Calculate the amount of data to copy
+ * @chan: Driver specific DMA channel
+ * @size: Total data that needs to be copied
+ * @done: Amount of data that has been already copied
+ *
+ * Return: Amount of data that has to be copied
+ */
+static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan,
+ int size, int done)
+{
+ size_t copy;
+
+ copy = min_t(size_t, size - done,
+ chan->xdev->max_buffer_len);
+
+ if ((copy + done < size) &&
+ chan->xdev->common.copy_align) {
+ /*
+ * If this is not the last descriptor, make sure
+ * the next one will be properly aligned
+ */
+ copy = rounddown(copy,
+ (1 << chan->xdev->common.copy_align));
+ }
+ return copy;
+}
+
+/**
* xilinx_dma_tx_status - Get DMA transaction status
* @dchan: DMA channel
* @cookie: Transaction identifier
@@ -992,7 +1023,7 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
list_for_each_entry(segment, &desc->segments, node) {
hw = &segment->hw;
residue += (hw->control - hw->status) &
- XILINX_DMA_MAX_TRANS_LEN;
+ chan->xdev->max_buffer_len;
}
}
spin_unlock_irqrestore(&chan->lock, flags);
@@ -1070,7 +1101,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
struct xilinx_vdma_config *config = &chan->config;
struct xilinx_dma_tx_descriptor *desc, *tail_desc;
u32 reg, j;
- struct xilinx_vdma_tx_segment *tail_segment;
+ struct xilinx_vdma_tx_segment *segment, *last = NULL;
+ int i = 0;
/* This function was invoked with lock held */
if (chan->err)
@@ -1087,17 +1119,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
tail_desc = list_last_entry(&chan->pending_list,
struct xilinx_dma_tx_descriptor, node);
- tail_segment = list_last_entry(&tail_desc->segments,
- struct xilinx_vdma_tx_segment, node);
-
- /*
- * If hardware is idle, then all descriptors on the running lists are
- * done, start new transfers
- */
- if (chan->has_sg)
- dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
- desc->async_tx.phys);
-
/* Configure the hardware using info in the config structure */
if (chan->has_vflip) {
reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
@@ -1114,15 +1135,11 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
else
reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
- /*
- * With SG, start with circular mode, so that BDs can be fetched.
- * In direct register mode, if not parking, enable circular mode
- */
- if (chan->has_sg || !config->park)
- reg |= XILINX_DMA_DMACR_CIRC_EN;
-
+ /* If not parking, enable circular mode */
if (config->park)
reg &= ~XILINX_DMA_DMACR_CIRC_EN;
+ else
+ reg |= XILINX_DMA_DMACR_CIRC_EN;
dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
@@ -1144,48 +1161,38 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
return;
/* Start the transfer */
- if (chan->has_sg) {
- dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
- tail_segment->phys);
- list_splice_tail_init(&chan->pending_list, &chan->active_list);
- chan->desc_pendingcount = 0;
- } else {
- struct xilinx_vdma_tx_segment *segment, *last = NULL;
- int i = 0;
-
- if (chan->desc_submitcount < chan->num_frms)
- i = chan->desc_submitcount;
-
- list_for_each_entry(segment, &desc->segments, node) {
- if (chan->ext_addr)
- vdma_desc_write_64(chan,
- XILINX_VDMA_REG_START_ADDRESS_64(i++),
- segment->hw.buf_addr,
- segment->hw.buf_addr_msb);
- else
- vdma_desc_write(chan,
+ if (chan->desc_submitcount < chan->num_frms)
+ i = chan->desc_submitcount;
+
+ list_for_each_entry(segment, &desc->segments, node) {
+ if (chan->ext_addr)
+ vdma_desc_write_64(chan,
+ XILINX_VDMA_REG_START_ADDRESS_64(i++),
+ segment->hw.buf_addr,
+ segment->hw.buf_addr_msb);
+ else
+ vdma_desc_write(chan,
XILINX_VDMA_REG_START_ADDRESS(i++),
segment->hw.buf_addr);
- last = segment;
- }
+ last = segment;
+ }
- if (!last)
- return;
+ if (!last)
+ return;
- /* HW expects these parameters to be same for one transaction */
- vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
- vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
- last->hw.stride);
- vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
+ /* HW expects these parameters to be same for one transaction */
+ vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
+ vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
+ last->hw.stride);
+ vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
- chan->desc_submitcount++;
- chan->desc_pendingcount--;
- list_del(&desc->node);
- list_add_tail(&desc->node, &chan->active_list);
- if (chan->desc_submitcount == chan->num_frms)
- chan->desc_submitcount = 0;
- }
+ chan->desc_submitcount++;
+ chan->desc_pendingcount--;
+ list_del(&desc->node);
+ list_add_tail(&desc->node, &chan->active_list);
+ if (chan->desc_submitcount == chan->num_frms)
+ chan->desc_submitcount = 0;
chan->idle = false;
}
@@ -1254,7 +1261,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
/* Start the transfer */
dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
- hw->control & XILINX_DMA_MAX_TRANS_LEN);
+ hw->control & chan->xdev->max_buffer_len);
}
list_splice_tail_init(&chan->pending_list, &chan->active_list);
@@ -1357,7 +1364,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
/* Start the transfer */
dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
- hw->control & XILINX_DMA_MAX_TRANS_LEN);
+ hw->control & chan->xdev->max_buffer_len);
}
list_splice_tail_init(&chan->pending_list, &chan->active_list);
@@ -1718,7 +1725,7 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
struct xilinx_cdma_tx_segment *segment;
struct xilinx_cdma_desc_hw *hw;
- if (!len || len > XILINX_DMA_MAX_TRANS_LEN)
+ if (!len || len > chan->xdev->max_buffer_len)
return NULL;
desc = xilinx_dma_alloc_tx_descriptor(chan);
@@ -1808,8 +1815,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
* Calculate the maximum number of bytes to transfer,
* making sure it is less than the hw limit
*/
- copy = min_t(size_t, sg_dma_len(sg) - sg_used,
- XILINX_DMA_MAX_TRANS_LEN);
+ copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg),
+ sg_used);
hw = &segment->hw;
/* Fill in the descriptor */
@@ -1913,8 +1920,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
* Calculate the maximum number of bytes to transfer,
* making sure it is less than the hw limit
*/
- copy = min_t(size_t, period_len - sg_used,
- XILINX_DMA_MAX_TRANS_LEN);
+ copy = xilinx_dma_calc_copysize(chan, period_len,
+ sg_used);
hw = &segment->hw;
xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
period_len * i);
@@ -2389,7 +2396,6 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
chan->dev = xdev->dev;
chan->xdev = xdev;
- chan->has_sg = xdev->has_sg;
chan->desc_pendingcount = 0x0;
chan->ext_addr = xdev->ext_addr;
/* This variable ensures that descriptors are not
@@ -2489,6 +2495,15 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
chan->stop_transfer = xilinx_dma_stop_transfer;
}
+ /* check if SG is enabled (only for AXIDMA and CDMA) */
+ if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) {
+ if (dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
+ XILINX_DMA_DMASR_SG_MASK)
+ chan->has_sg = true;
+ dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id,
+ chan->has_sg ? "enabled" : "disabled");
+ }
+
/* Initialize the tasklet */
tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
(unsigned long)chan);
@@ -2596,7 +2611,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
struct xilinx_dma_device *xdev;
struct device_node *child, *np = pdev->dev.of_node;
struct resource *io;
- u32 num_frames, addr_width;
+ u32 num_frames, addr_width, len_width;
int i, err;
/* Allocate and initialize the DMA engine structure */
@@ -2627,9 +2642,24 @@ static int xilinx_dma_probe(struct platform_device *pdev)
return PTR_ERR(xdev->regs);
/* Retrieve the DMA engine properties from the device tree */
- xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
- if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
+ xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
+
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
+ if (!of_property_read_u32(node, "xlnx,sg-length-width",
+ &len_width)) {
+ if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
+ len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) {
+ dev_warn(xdev->dev,
+ "invalid xlnx,sg-length-width property value. Using default width\n");
+ } else {
+ if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX)
+ dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n");
+ xdev->max_buffer_len =
+ GENMASK(len_width - 1, 0);
+ }
+ }
+ }
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
err = of_property_read_u32(node, "xlnx,num-fstores",
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 1bcf9aea0cdf..8816f74a22b4 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -9,6 +9,7 @@
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/edac.h>
+#include <linux/firmware/intel/stratix10-smc.h>
#include <linux/genalloc.h>
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
@@ -1361,8 +1362,19 @@ static const struct edac_device_prv_data a10_l2ecc_data = {
#ifdef CONFIG_EDAC_ALTERA_ETHERNET
+static int __init socfpga_init_ethernet_ecc(struct altr_edac_device_dev *dev)
+{
+ int ret;
+
+ ret = altr_init_a10_ecc_device_type("altr,socfpga-eth-mac-ecc");
+ if (ret)
+ return ret;
+
+ return altr_check_ecc_deps(dev);
+}
+
static const struct edac_device_prv_data a10_enetecc_data = {
- .setup = altr_check_ecc_deps,
+ .setup = socfpga_init_ethernet_ecc,
.ce_clear_mask = ALTR_A10_ECC_SERRPENA,
.ue_clear_mask = ALTR_A10_ECC_DERRPENA,
.ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
@@ -1374,21 +1386,25 @@ static const struct edac_device_prv_data a10_enetecc_data = {
.inject_fops = &altr_edac_a10_device_inject2_fops,
};
-static int __init socfpga_init_ethernet_ecc(void)
-{
- return altr_init_a10_ecc_device_type("altr,socfpga-eth-mac-ecc");
-}
-
-early_initcall(socfpga_init_ethernet_ecc);
-
#endif /* CONFIG_EDAC_ALTERA_ETHERNET */
/********************** NAND Device Functions **********************/
#ifdef CONFIG_EDAC_ALTERA_NAND
+static int __init socfpga_init_nand_ecc(struct altr_edac_device_dev *device)
+{
+ int ret;
+
+ ret = altr_init_a10_ecc_device_type("altr,socfpga-nand-ecc");
+ if (ret)
+ return ret;
+
+ return altr_check_ecc_deps(device);
+}
+
static const struct edac_device_prv_data a10_nandecc_data = {
- .setup = altr_check_ecc_deps,
+ .setup = socfpga_init_nand_ecc,
.ce_clear_mask = ALTR_A10_ECC_SERRPENA,
.ue_clear_mask = ALTR_A10_ECC_DERRPENA,
.ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
@@ -1400,21 +1416,25 @@ static const struct edac_device_prv_data a10_nandecc_data = {
.inject_fops = &altr_edac_a10_device_inject_fops,
};
-static int __init socfpga_init_nand_ecc(void)
-{
- return altr_init_a10_ecc_device_type("altr,socfpga-nand-ecc");
-}
-
-early_initcall(socfpga_init_nand_ecc);
-
#endif /* CONFIG_EDAC_ALTERA_NAND */
/********************** DMA Device Functions **********************/
#ifdef CONFIG_EDAC_ALTERA_DMA
+static int __init socfpga_init_dma_ecc(struct altr_edac_device_dev *device)
+{
+ int ret;
+
+ ret = altr_init_a10_ecc_device_type("altr,socfpga-dma-ecc");
+ if (ret)
+ return ret;
+
+ return altr_check_ecc_deps(device);
+}
+
static const struct edac_device_prv_data a10_dmaecc_data = {
- .setup = altr_check_ecc_deps,
+ .setup = socfpga_init_dma_ecc,
.ce_clear_mask = ALTR_A10_ECC_SERRPENA,
.ue_clear_mask = ALTR_A10_ECC_DERRPENA,
.ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
@@ -1426,21 +1446,25 @@ static const struct edac_device_prv_data a10_dmaecc_data = {
.inject_fops = &altr_edac_a10_device_inject_fops,
};
-static int __init socfpga_init_dma_ecc(void)
-{
- return altr_init_a10_ecc_device_type("altr,socfpga-dma-ecc");
-}
-
-early_initcall(socfpga_init_dma_ecc);
-
#endif /* CONFIG_EDAC_ALTERA_DMA */
/********************** USB Device Functions **********************/
#ifdef CONFIG_EDAC_ALTERA_USB
+static int __init socfpga_init_usb_ecc(struct altr_edac_device_dev *device)
+{
+ int ret;
+
+ ret = altr_init_a10_ecc_device_type("altr,socfpga-usb-ecc");
+ if (ret)
+ return ret;
+
+ return altr_check_ecc_deps(device);
+}
+
static const struct edac_device_prv_data a10_usbecc_data = {
- .setup = altr_check_ecc_deps,
+ .setup = socfpga_init_usb_ecc,
.ce_clear_mask = ALTR_A10_ECC_SERRPENA,
.ue_clear_mask = ALTR_A10_ECC_DERRPENA,
.ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
@@ -1452,21 +1476,25 @@ static const struct edac_device_prv_data a10_usbecc_data = {
.inject_fops = &altr_edac_a10_device_inject2_fops,
};
-static int __init socfpga_init_usb_ecc(void)
-{
- return altr_init_a10_ecc_device_type("altr,socfpga-usb-ecc");
-}
-
-early_initcall(socfpga_init_usb_ecc);
-
#endif /* CONFIG_EDAC_ALTERA_USB */
/********************** QSPI Device Functions **********************/
#ifdef CONFIG_EDAC_ALTERA_QSPI
+static int __init socfpga_init_qspi_ecc(struct altr_edac_device_dev *device)
+{
+ int ret;
+
+ ret = altr_init_a10_ecc_device_type("altr,socfpga-qspi-ecc");
+ if (ret)
+ return ret;
+
+ return altr_check_ecc_deps(device);
+}
+
static const struct edac_device_prv_data a10_qspiecc_data = {
- .setup = altr_check_ecc_deps,
+ .setup = socfpga_init_qspi_ecc,
.ce_clear_mask = ALTR_A10_ECC_SERRPENA,
.ue_clear_mask = ALTR_A10_ECC_DERRPENA,
.ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
@@ -1478,13 +1506,6 @@ static const struct edac_device_prv_data a10_qspiecc_data = {
.inject_fops = &altr_edac_a10_device_inject_fops,
};
-static int __init socfpga_init_qspi_ecc(void)
-{
- return altr_init_a10_ecc_device_type("altr,socfpga-qspi-ecc");
-}
-
-early_initcall(socfpga_init_qspi_ecc);
-
#endif /* CONFIG_EDAC_ALTERA_QSPI */
/********************* SDMMC Device Functions **********************/
@@ -1593,6 +1614,35 @@ err_release_group_1:
return rc;
}
+static int __init socfpga_init_sdmmc_ecc(struct altr_edac_device_dev *device)
+{
+ int rc = -ENODEV;
+ struct device_node *child;
+
+ child = of_find_compatible_node(NULL, NULL, "altr,socfpga-sdmmc-ecc");
+ if (!child)
+ return -ENODEV;
+
+ if (!of_device_is_available(child))
+ goto exit;
+
+ if (validate_parent_available(child))
+ goto exit;
+
+ /* Init portB */
+ rc = altr_init_a10_ecc_block(child, ALTR_A10_SDMMC_IRQ_MASK,
+ a10_sdmmceccb_data.ecc_enable_mask, 1);
+ if (rc)
+ goto exit;
+
+ /* Setup portB */
+ return altr_portb_setup(device);
+
+exit:
+ of_node_put(child);
+ return rc;
+}
+
static irqreturn_t altr_edac_a10_ecc_irq_portb(int irq, void *dev_id)
{
struct altr_edac_device_dev *ad = dev_id;
@@ -1617,7 +1667,7 @@ static irqreturn_t altr_edac_a10_ecc_irq_portb(int irq, void *dev_id)
}
static const struct edac_device_prv_data a10_sdmmcecca_data = {
- .setup = altr_portb_setup,
+ .setup = socfpga_init_sdmmc_ecc,
.ce_clear_mask = ALTR_A10_ECC_SERRPENA,
.ue_clear_mask = ALTR_A10_ECC_DERRPENA,
.ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
@@ -1630,7 +1680,7 @@ static const struct edac_device_prv_data a10_sdmmcecca_data = {
};
static const struct edac_device_prv_data a10_sdmmceccb_data = {
- .setup = altr_portb_setup,
+ .setup = socfpga_init_sdmmc_ecc,
.ce_clear_mask = ALTR_A10_ECC_SERRPENB,
.ue_clear_mask = ALTR_A10_ECC_DERRPENB,
.ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
@@ -1642,35 +1692,6 @@ static const struct edac_device_prv_data a10_sdmmceccb_data = {
.inject_fops = &altr_edac_a10_device_inject_fops,
};
-static int __init socfpga_init_sdmmc_ecc(void)
-{
- int rc = -ENODEV;
- struct device_node *child;
-
- if (!socfpga_is_a10() && !socfpga_is_s10())
- return -ENODEV;
-
- child = of_find_compatible_node(NULL, NULL, "altr,socfpga-sdmmc-ecc");
- if (!child) {
- edac_printk(KERN_WARNING, EDAC_DEVICE, "SDMMC node not found\n");
- return -ENODEV;
- }
-
- if (!of_device_is_available(child))
- goto exit;
-
- if (validate_parent_available(child))
- goto exit;
-
- rc = altr_init_a10_ecc_block(child, ALTR_A10_SDMMC_IRQ_MASK,
- a10_sdmmcecca_data.ecc_enable_mask, 1);
-exit:
- of_node_put(child);
- return rc;
-}
-
-early_initcall(socfpga_init_sdmmc_ecc);
-
#endif /* CONFIG_EDAC_ALTERA_SDMMC */
/********************* Arria10 EDAC Device Functions *************************/
@@ -1762,28 +1783,24 @@ static ssize_t altr_edac_a10_device_trig2(struct file *file,
if (trig_type == ALTR_UE_TRIGGER_CHAR) {
writel(priv->ue_set_mask, set_addr);
} else {
- /* Setup write of 0 to first 4 bytes */
- writel(0x0, drvdata->base + ECC_BLK_WDATA0_OFST);
- writel(0x0, drvdata->base + ECC_BLK_WDATA1_OFST);
- writel(0x0, drvdata->base + ECC_BLK_WDATA2_OFST);
- writel(0x0, drvdata->base + ECC_BLK_WDATA3_OFST);
- /* Setup write of 4 bytes */
+ /* Setup read/write of 4 bytes */
writel(ECC_WORD_WRITE, drvdata->base + ECC_BLK_DBYTECTRL_OFST);
/* Setup Address to 0 */
- writel(0x0, drvdata->base + ECC_BLK_ADDRESS_OFST);
- /* Setup accctrl to write & data override */
- writel(ECC_WRITE_DOVR, drvdata->base + ECC_BLK_ACCCTRL_OFST);
- /* Kick it. */
- writel(ECC_XACT_KICK, drvdata->base + ECC_BLK_STARTACC_OFST);
- /* Setup accctrl to read & ecc override */
- writel(ECC_READ_EOVR, drvdata->base + ECC_BLK_ACCCTRL_OFST);
+ writel(0, drvdata->base + ECC_BLK_ADDRESS_OFST);
+ /* Setup accctrl to read & ecc & data override */
+ writel(ECC_READ_EDOVR, drvdata->base + ECC_BLK_ACCCTRL_OFST);
/* Kick it. */
writel(ECC_XACT_KICK, drvdata->base + ECC_BLK_STARTACC_OFST);
/* Setup write for single bit change */
- writel(0x1, drvdata->base + ECC_BLK_WDATA0_OFST);
- writel(0x0, drvdata->base + ECC_BLK_WDATA1_OFST);
- writel(0x0, drvdata->base + ECC_BLK_WDATA2_OFST);
- writel(0x0, drvdata->base + ECC_BLK_WDATA3_OFST);
+ writel(readl(drvdata->base + ECC_BLK_RDATA0_OFST) ^ 0x1,
+ drvdata->base + ECC_BLK_WDATA0_OFST);
+ writel(readl(drvdata->base + ECC_BLK_RDATA1_OFST),
+ drvdata->base + ECC_BLK_WDATA1_OFST);
+ writel(readl(drvdata->base + ECC_BLK_RDATA2_OFST),
+ drvdata->base + ECC_BLK_WDATA2_OFST);
+ writel(readl(drvdata->base + ECC_BLK_RDATA3_OFST),
+ drvdata->base + ECC_BLK_WDATA3_OFST);
+
/* Copy Read ECC to Write ECC */
writel(readl(drvdata->base + ECC_BLK_RECC0_OFST),
drvdata->base + ECC_BLK_WECC0_OFST);
@@ -1930,6 +1947,15 @@ static int altr_edac_a10_device_add(struct altr_arria10_edac *edac,
goto err_release_group1;
}
+#ifdef CONFIG_ARCH_STRATIX10
+ /* Use IRQ to determine SError origin instead of assigning IRQ */
+ rc = of_property_read_u32_index(np, "interrupts", 0, &altdev->db_irq);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Unable to parse DB IRQ index\n");
+ goto err_release_group1;
+ }
+#else
altdev->db_irq = irq_of_parse_and_map(np, 1);
if (!altdev->db_irq) {
edac_printk(KERN_ERR, EDAC_DEVICE, "Error allocating DBIRQ\n");
@@ -1943,6 +1969,7 @@ static int altr_edac_a10_device_add(struct altr_arria10_edac *edac,
edac_printk(KERN_ERR, EDAC_DEVICE, "No DBERR IRQ resource\n");
goto err_release_group1;
}
+#endif
rc = edac_device_add_device(dci);
if (rc) {
@@ -2005,6 +2032,10 @@ static const struct irq_domain_ops a10_eccmgr_ic_ops = {
/************** Stratix 10 EDAC Double Bit Error Handler ************/
#define to_a10edac(p, m) container_of(p, struct altr_arria10_edac, m)
+#ifdef CONFIG_ARCH_STRATIX10
+/* panic routine issues reboot on non-zero panic_timeout */
+extern int panic_timeout;
+
/*
* The double bit error is handled through SError which is fatal. This is
* called as a panic notifier to printout ECC error info as part of the panic.
@@ -2018,17 +2049,37 @@ static int s10_edac_dberr_handler(struct notifier_block *this,
regmap_read(edac->ecc_mgr_map, S10_SYSMGR_ECC_INTSTAT_DERR_OFST,
&dberror);
regmap_write(edac->ecc_mgr_map, S10_SYSMGR_UE_VAL_OFST, dberror);
- if (dberror & S10_DDR0_IRQ_MASK) {
- regmap_read(edac->ecc_mgr_map, A10_DERRADDR_OFST, &err_addr);
- regmap_write(edac->ecc_mgr_map, S10_SYSMGR_UE_ADDR_OFST,
- err_addr);
- edac_printk(KERN_ERR, EDAC_MC,
- "EDAC: [Uncorrectable errors @ 0x%08X]\n\n",
- err_addr);
+ if (dberror & S10_DBE_IRQ_MASK) {
+ struct list_head *position;
+ struct altr_edac_device_dev *ed;
+ struct arm_smccc_res result;
+
+ /* Find the matching DBE in the list of devices */
+ list_for_each(position, &edac->a10_ecc_devices) {
+ ed = list_entry(position, struct altr_edac_device_dev,
+ next);
+ if (!(BIT(ed->db_irq) & dberror))
+ continue;
+
+ writel(ALTR_A10_ECC_DERRPENA,
+ ed->base + ALTR_A10_ECC_INTSTAT_OFST);
+ err_addr = readl(ed->base + ALTR_S10_DERR_ADDRA_OFST);
+ regmap_write(edac->ecc_mgr_map,
+ S10_SYSMGR_UE_ADDR_OFST, err_addr);
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "EDAC: [Fatal DBE on %s @ 0x%08X]\n",
+ ed->edac_dev_name, err_addr);
+ break;
+ }
+ /* Notify the System through SMC. Reboot delay = 1 second */
+ panic_timeout = 1;
+ arm_smccc_smc(INTEL_SIP_SMC_ECC_DBE, dberror, 0, 0, 0, 0,
+ 0, 0, &result);
}
return NOTIFY_DONE;
}
+#endif
/****************** Arria 10 EDAC Probe Function *********************/
static int altr_edac_a10_probe(struct platform_device *pdev)
@@ -2098,16 +2149,8 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
altr_edac_a10_irq_handler,
edac);
- if (socfpga_is_a10()) {
- edac->db_irq = platform_get_irq(pdev, 1);
- if (edac->db_irq < 0) {
- dev_err(&pdev->dev, "No DBERR IRQ resource\n");
- return edac->db_irq;
- }
- irq_set_chained_handler_and_data(edac->db_irq,
- altr_edac_a10_irq_handler,
- edac);
- } else {
+#ifdef CONFIG_ARCH_STRATIX10
+ {
int dberror, err_addr;
edac->panic_notifier.notifier_call = s10_edac_dberr_handler;
@@ -2130,6 +2173,15 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
S10_SYSMGR_UE_ADDR_OFST, 0);
}
}
+#else
+ edac->db_irq = platform_get_irq(pdev, 1);
+ if (edac->db_irq < 0) {
+ dev_err(&pdev->dev, "No DBERR IRQ resource\n");
+ return edac->db_irq;
+ }
+ irq_set_chained_handler_and_data(edac->db_irq,
+ altr_edac_a10_irq_handler, edac);
+#endif
for_each_child_of_node(pdev->dev.of_node, child) {
if (!of_device_is_available(child))
diff --git a/drivers/edac/altera_edac.h b/drivers/edac/altera_edac.h
index f8664bac9fa8..55654cc4bcdf 100644
--- a/drivers/edac/altera_edac.h
+++ b/drivers/edac/altera_edac.h
@@ -289,6 +289,7 @@ struct altr_sdram_mc_data {
#define ALTR_A10_ECC_INIT_WATCHDOG_10US 10000
/************* Stratix10 Defines **************/
+#define ALTR_S10_DERR_ADDRA_OFST 0x2C
/* Stratix10 ECC Manager Defines */
#define S10_SYSMGR_ECC_INTMASK_CLR_OFST 0x98
@@ -299,6 +300,7 @@ struct altr_sdram_mc_data {
#define S10_SYSMGR_UE_ADDR_OFST 0x224
#define S10_DDR0_IRQ_MASK BIT(16)
+#define S10_DBE_IRQ_MASK 0x3FE
/* Define ECC Block Offsets for peripherals */
#define ECC_BLK_ADDRESS_OFST 0x40
@@ -319,7 +321,7 @@ struct altr_sdram_mc_data {
#define ECC_BLK_STARTACC_OFST 0x7C
#define ECC_XACT_KICK 0x10000
-#define ECC_WORD_WRITE 0xF
+#define ECC_WORD_WRITE 0xFF
#define ECC_WRITE_DOVR 0x101
#define ECC_WRITE_EDOVR 0x103
#define ECC_READ_EOVR 0x2
@@ -370,69 +372,4 @@ struct altr_arria10_edac {
struct notifier_block panic_notifier;
};
-/*
- * Functions specified by ARM SMC Calling convention:
- *
- * FAST call executes atomic operations, returns when the requested operation
- * has completed.
- * STD call starts a operation which can be preempted by a non-secure
- * interrupt. The call can return before the requested operation has
- * completed.
- *
- * a0..a7 is used as register names in the descriptions below, on arm32
- * that translates to r0..r7 and on arm64 to w0..w7.
- */
-
-#define INTEL_SIP_SMC_STD_CALL_VAL(func_num) \
- ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, ARM_SMCCC_SMC_64, \
- ARM_SMCCC_OWNER_SIP, (func_num))
-
-#define INTEL_SIP_SMC_FAST_CALL_VAL(func_num) \
- ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, \
- ARM_SMCCC_OWNER_SIP, (func_num))
-
-#define INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION 0xFFFFFFFF
-#define INTEL_SIP_SMC_STATUS_OK 0x0
-#define INTEL_SIP_SMC_REG_ERROR 0x5
-
-/*
- * Request INTEL_SIP_SMC_REG_READ
- *
- * Read a protected register using SMCCC
- *
- * Call register usage:
- * a0: INTEL_SIP_SMC_REG_READ.
- * a1: register address.
- * a2-7: not used.
- *
- * Return status:
- * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_REG_ERROR, or
- * INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION
- * a1: Value in the register
- * a2-3: not used.
- */
-#define INTEL_SIP_SMC_FUNCID_REG_READ 7
-#define INTEL_SIP_SMC_REG_READ \
- INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_READ)
-
-/*
- * Request INTEL_SIP_SMC_REG_WRITE
- *
- * Write a protected register using SMCCC
- *
- * Call register usage:
- * a0: INTEL_SIP_SMC_REG_WRITE.
- * a1: register address
- * a2: value to program into register.
- * a3-7: not used.
- *
- * Return status:
- * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_REG_ERROR, or
- * INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION
- * a1-3: not used.
- */
-#define INTEL_SIP_SMC_FUNCID_REG_WRITE 8
-#define INTEL_SIP_SMC_REG_WRITE \
- INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_WRITE)
-
#endif /* #ifndef _ALTERA_EDAC_H */
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 6ea98575a402..e2a99466faaa 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -18,6 +18,9 @@ static struct msr __percpu *msrs;
/* Per-node stuff */
static struct ecc_settings **ecc_stngs;
+/* Number of Unified Memory Controllers */
+static u8 num_umcs;
+
/*
* Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
* bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
@@ -449,6 +452,9 @@ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
#define for_each_chip_select_mask(i, dct, pvt) \
for (i = 0; i < pvt->csels[dct].m_cnt; i++)
+#define for_each_umc(i) \
+ for (i = 0; i < num_umcs; i++)
+
/*
* @input_addr is an InputAddr associated with the node given by mci. Return the
* csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
@@ -722,7 +728,7 @@ static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
if (pvt->umc) {
u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
- for (i = 0; i < NUM_UMCS; i++) {
+ for_each_umc(i) {
if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
continue;
@@ -781,6 +787,22 @@ static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
(dclr & BIT(15)) ? "yes" : "no");
}
+/*
+ * The Address Mask should be a contiguous set of bits in the non-interleaved
+ * case. So to check for CS interleaving, find the most- and least-significant
+ * bits of the mask, generate a contiguous bitmask, and compare the two.
+ */
+static bool f17_cs_interleaved(struct amd64_pvt *pvt, u8 ctrl, int cs)
+{
+ u32 mask = pvt->csels[ctrl].csmasks[cs >> 1];
+ u32 msb = fls(mask) - 1, lsb = ffs(mask) - 1;
+ u32 test_mask = GENMASK(msb, lsb);
+
+ edac_dbg(1, "mask=0x%08x test_mask=0x%08x\n", mask, test_mask);
+
+ return mask ^ test_mask;
+}
+
static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
{
int dimm, size0, size1, cs0, cs1;
@@ -797,8 +819,19 @@ static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
size1 = 0;
cs1 = dimm * 2 + 1;
- if (csrow_enabled(cs1, ctrl, pvt))
- size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs1);
+ if (csrow_enabled(cs1, ctrl, pvt)) {
+ /*
+ * CS interleaving is only supported if both CSes have
+ * the same amount of memory. Because they are
+ * interleaved, it will look like both CSes have the
+ * full amount of memory. Save the size for both as
+ * half the amount we found on CS0, if interleaved.
+ */
+ if (f17_cs_interleaved(pvt, ctrl, cs1))
+ size1 = size0 = (size0 >> 1);
+ else
+ size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs1);
+ }
amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
cs0, size0,
@@ -811,7 +844,7 @@ static void __dump_misc_regs_df(struct amd64_pvt *pvt)
struct amd64_umc *umc;
u32 i, tmp, umc_base;
- for (i = 0; i < NUM_UMCS; i++) {
+ for_each_umc(i) {
umc_base = get_umc_base(i);
umc = &pvt->umc[i];
@@ -894,8 +927,7 @@ static void dump_misc_regs(struct amd64_pvt *pvt)
edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
- amd64_info("using %s syndromes.\n",
- ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
+ amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
}
/*
@@ -1388,7 +1420,7 @@ static int f17_early_channel_count(struct amd64_pvt *pvt)
int i, channels = 0;
/* SDP Control bit 31 (SdpInit) is clear for unused UMC channels */
- for (i = 0; i < NUM_UMCS; i++)
+ for_each_umc(i)
channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT);
amd64_info("MCT channel count: %d\n", channels);
@@ -2211,6 +2243,15 @@ static struct amd64_family_type family_types[] = {
.dbam_to_cs = f17_base_addr_to_cs_size,
}
},
+ [F17_M30H_CPUS] = {
+ .ctl_name = "F17h_M30h",
+ .f0_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F0,
+ .f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6,
+ .ops = {
+ .early_channel_count = f17_early_channel_count,
+ .dbam_to_cs = f17_base_addr_to_cs_size,
+ }
+ },
};
/*
@@ -2464,18 +2505,14 @@ static inline void decode_bus_error(int node_id, struct mce *m)
* To find the UMC channel represented by this bank we need to match on its
* instance_id. The instance_id of a bank is held in the lower 32 bits of its
* IPID.
+ *
+ * Currently, we can derive the channel number by looking at the 6th nibble in
+ * the instance_id. For example, instance_id=0xYXXXXX where Y is the channel
+ * number.
*/
-static int find_umc_channel(struct amd64_pvt *pvt, struct mce *m)
+static int find_umc_channel(struct mce *m)
{
- u32 umc_instance_id[] = {0x50f00, 0x150f00};
- u32 instance_id = m->ipid & GENMASK(31, 0);
- int i, channel = -1;
-
- for (i = 0; i < ARRAY_SIZE(umc_instance_id); i++)
- if (umc_instance_id[i] == instance_id)
- channel = i;
-
- return channel;
+ return (m->ipid & GENMASK(31, 0)) >> 20;
}
static void decode_umc_error(int node_id, struct mce *m)
@@ -2497,11 +2534,7 @@ static void decode_umc_error(int node_id, struct mce *m)
if (m->status & MCI_STATUS_DEFERRED)
ecc_type = 3;
- err.channel = find_umc_channel(pvt, m);
- if (err.channel < 0) {
- err.err_code = ERR_CHANNEL;
- goto log_error;
- }
+ err.channel = find_umc_channel(m);
if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
err.err_code = ERR_NORM_ADDR;
@@ -2603,19 +2636,19 @@ static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
if (pvt->umc) {
u8 i;
- for (i = 0; i < NUM_UMCS; i++) {
+ for_each_umc(i) {
/* Check enabled channels only: */
- if ((pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) &&
- (pvt->umc[i].ecc_ctrl & BIT(7))) {
- pvt->ecc_sym_sz = 8;
- break;
+ if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
+ if (pvt->umc[i].ecc_ctrl & BIT(9)) {
+ pvt->ecc_sym_sz = 16;
+ return;
+ } else if (pvt->umc[i].ecc_ctrl & BIT(7)) {
+ pvt->ecc_sym_sz = 8;
+ return;
+ }
}
}
-
- return;
- }
-
- if (pvt->fam >= 0x10) {
+ } else if (pvt->fam >= 0x10) {
u32 tmp;
amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
@@ -2639,7 +2672,7 @@ static void __read_mc_regs_df(struct amd64_pvt *pvt)
u32 i, umc_base;
/* Read registers from each UMC */
- for (i = 0; i < NUM_UMCS; i++) {
+ for_each_umc(i) {
umc_base = get_umc_base(i);
umc = &pvt->umc[i];
@@ -3052,7 +3085,7 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid)
if (boot_cpu_data.x86 >= 0x17) {
u8 umc_en_mask = 0, ecc_en_mask = 0;
- for (i = 0; i < NUM_UMCS; i++) {
+ for_each_umc(i) {
u32 base = get_umc_base(i);
/* Only check enabled UMCs. */
@@ -3105,7 +3138,7 @@ f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
{
u8 i, ecc_en = 1, cpk_en = 1;
- for (i = 0; i < NUM_UMCS; i++) {
+ for_each_umc(i) {
if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
@@ -3203,6 +3236,10 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
fam_type = &family_types[F17_M10H_CPUS];
pvt->ops = &family_types[F17_M10H_CPUS].ops;
break;
+ } else if (pvt->model >= 0x30 && pvt->model <= 0x3f) {
+ fam_type = &family_types[F17_M30H_CPUS];
+ pvt->ops = &family_types[F17_M30H_CPUS].ops;
+ break;
}
/* fall through */
case 0x18:
@@ -3236,6 +3273,22 @@ static const struct attribute_group *amd64_edac_attr_groups[] = {
NULL
};
+/* Set the number of Unified Memory Controllers in the system. */
+static void compute_num_umcs(void)
+{
+ u8 model = boot_cpu_data.x86_model;
+
+ if (boot_cpu_data.x86 < 0x17)
+ return;
+
+ if (model >= 0x30 && model <= 0x3f)
+ num_umcs = 8;
+ else
+ num_umcs = 2;
+
+ edac_dbg(1, "Number of UMCs: %x", num_umcs);
+}
+
static int init_one_instance(unsigned int nid)
{
struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
@@ -3260,7 +3313,7 @@ static int init_one_instance(unsigned int nid)
goto err_free;
if (pvt->fam >= 0x17) {
- pvt->umc = kcalloc(NUM_UMCS, sizeof(struct amd64_umc), GFP_KERNEL);
+ pvt->umc = kcalloc(num_umcs, sizeof(struct amd64_umc), GFP_KERNEL);
if (!pvt->umc) {
ret = -ENOMEM;
goto err_free;
@@ -3299,8 +3352,14 @@ static int init_one_instance(unsigned int nid)
* Always allocate two channels since we can have setups with DIMMs on
* only one channel. Also, this simplifies handling later for the price
* of a couple of KBs tops.
+ *
+ * On Fam17h+, the number of controllers may be greater than two. So set
+ * the size equal to the maximum number of UMCs.
*/
- layers[1].size = 2;
+ if (pvt->fam >= 0x17)
+ layers[1].size = num_umcs;
+ else
+ layers[1].size = 2;
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
@@ -3481,6 +3540,8 @@ static int __init amd64_edac_init(void)
if (!msrs)
goto err_free;
+ compute_num_umcs();
+
for (i = 0; i < amd_nb_num(); i++) {
err = probe_one_instance(i);
if (err) {
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 4242f8e39c18..8f66472f7adc 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -117,6 +117,8 @@
#define PCI_DEVICE_ID_AMD_17H_DF_F6 0x1466
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F0 0x15e8
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F6 0x15ee
+#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F0 0x1490
+#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F6 0x1496
/*
* Function 1 - Address Map
@@ -272,8 +274,6 @@
#define UMC_SDP_INIT BIT(31)
-#define NUM_UMCS 2
-
enum amd_families {
K8_CPUS = 0,
F10_CPUS,
@@ -284,6 +284,7 @@ enum amd_families {
F16_M30H_CPUS,
F17_CPUS,
F17_M10H_CPUS,
+ F17_M30H_CPUS,
NUM_FAMILIES,
};
@@ -363,7 +364,7 @@ struct amd64_pvt {
u32 dct_sel_hi; /* DRAM Controller Select High */
u32 online_spare; /* On-Line spare Reg */
- /* x4 or x8 syndromes in use */
+ /* x4, x8, or x16 syndromes in use */
u8 ecc_sym_sz;
/* place to store error injection parameters prior to issue */
@@ -396,8 +397,8 @@ struct err_info {
static inline u32 get_umc_base(u8 channel)
{
- /* ch0: 0x50000, ch1: 0x150000 */
- return 0x50000 + (!!channel << 20);
+ /* chY: 0xY50000 */
+ return 0x50000 + (channel << 20);
}
static inline u64 get_dram_base(struct amd64_pvt *pvt, u8 i)
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index c334fb7c63df..6f06aec4877c 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -181,6 +181,54 @@ static struct notifier_block i10nm_mce_dec = {
.priority = MCE_PRIO_EDAC,
};
+#ifdef CONFIG_EDAC_DEBUG
+/*
+ * Debug feature.
+ * Exercise the address decode logic by writing an address to
+ * /sys/kernel/debug/edac/i10nm_test/addr.
+ */
+static struct dentry *i10nm_test;
+
+static int debugfs_u64_set(void *data, u64 val)
+{
+ struct mce m;
+
+ pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val);
+
+ memset(&m, 0, sizeof(m));
+ /* ADDRV + MemRd + Unknown channel */
+ m.status = MCI_STATUS_ADDRV + 0x90;
+ /* One corrected error */
+ m.status |= BIT_ULL(MCI_STATUS_CEC_SHIFT);
+ m.addr = val;
+ skx_mce_check_error(NULL, 0, &m);
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
+
+static void setup_i10nm_debug(void)
+{
+ i10nm_test = edac_debugfs_create_dir("i10nm_test");
+ if (!i10nm_test)
+ return;
+
+ if (!edac_debugfs_create_file("addr", 0200, i10nm_test,
+ NULL, &fops_u64_wo)) {
+ debugfs_remove(i10nm_test);
+ i10nm_test = NULL;
+ }
+}
+
+static void teardown_i10nm_debug(void)
+{
+ debugfs_remove_recursive(i10nm_test);
+}
+#else
+static inline void setup_i10nm_debug(void) {}
+static inline void teardown_i10nm_debug(void) {}
+#endif /*CONFIG_EDAC_DEBUG*/
+
static int __init i10nm_init(void)
{
u8 mc = 0, src_id = 0, node_id = 0;
@@ -249,7 +297,7 @@ static int __init i10nm_init(void)
opstate_init();
mce_register_decode_chain(&i10nm_mce_dec);
- setup_skx_debug("i10nm_test");
+ setup_i10nm_debug();
i10nm_printk(KERN_INFO, "%s\n", I10NM_REVISION);
@@ -262,7 +310,7 @@ fail:
static void __exit i10nm_exit(void)
{
edac_dbg(2, "\n");
- teardown_skx_debug();
+ teardown_i10nm_debug();
mce_unregister_decode_chain(&i10nm_mce_dec);
skx_adxl_put();
skx_remove();
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 0a1814dad6cf..bb0202ad7a13 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -1004,7 +1004,7 @@ static inline void amd_decode_err_code(u16 ec)
/*
* Filter out unwanted MCE signatures here.
*/
-static bool amd_filter_mce(struct mce *m)
+static bool ignore_mce(struct mce *m)
{
/*
* NB GART TLB error reporting is disabled by default.
@@ -1038,7 +1038,7 @@ amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
unsigned int fam = x86_family(m->cpuid);
int ecc;
- if (amd_filter_mce(m))
+ if (ignore_mce(m))
return NOTIFY_STOP;
pr_emerg(HW_ERR "%s\n", decode_error_status(m));
diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
index adae4c848ca1..a5c8fa3a249a 100644
--- a/drivers/edac/skx_base.c
+++ b/drivers/edac/skx_base.c
@@ -540,6 +540,54 @@ static struct notifier_block skx_mce_dec = {
.priority = MCE_PRIO_EDAC,
};
+#ifdef CONFIG_EDAC_DEBUG
+/*
+ * Debug feature.
+ * Exercise the address decode logic by writing an address to
+ * /sys/kernel/debug/edac/skx_test/addr.
+ */
+static struct dentry *skx_test;
+
+static int debugfs_u64_set(void *data, u64 val)
+{
+ struct mce m;
+
+ pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val);
+
+ memset(&m, 0, sizeof(m));
+ /* ADDRV + MemRd + Unknown channel */
+ m.status = MCI_STATUS_ADDRV + 0x90;
+ /* One corrected error */
+ m.status |= BIT_ULL(MCI_STATUS_CEC_SHIFT);
+ m.addr = val;
+ skx_mce_check_error(NULL, 0, &m);
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
+
+static void setup_skx_debug(void)
+{
+ skx_test = edac_debugfs_create_dir("skx_test");
+ if (!skx_test)
+ return;
+
+ if (!edac_debugfs_create_file("addr", 0200, skx_test,
+ NULL, &fops_u64_wo)) {
+ debugfs_remove(skx_test);
+ skx_test = NULL;
+ }
+}
+
+static void teardown_skx_debug(void)
+{
+ debugfs_remove_recursive(skx_test);
+}
+#else
+static inline void setup_skx_debug(void) {}
+static inline void teardown_skx_debug(void) {}
+#endif /*CONFIG_EDAC_DEBUG*/
+
/*
* skx_init:
* make sure we are running on the correct cpu model
@@ -619,7 +667,7 @@ static int __init skx_init(void)
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
- setup_skx_debug("skx_test");
+ setup_skx_debug();
mce_register_decode_chain(&skx_mce_dec);
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index 0e96e7b5b0a7..b0dddcfa9baa 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -1,7 +1,15 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Common codes for both the skx_edac driver and Intel 10nm server EDAC driver.
- * Originally split out from the skx_edac driver.
+ *
+ * Shared code by both skx_edac and i10nm_edac. Originally split out
+ * from the skx_edac driver.
+ *
+ * This file is linked into both skx_edac and i10nm_edac drivers. In
+ * order to avoid link errors, this file must be like a pure library
+ * without including symbols and defines which would otherwise conflict,
+ * when linked once into a module and into a built-in object, at the
+ * same time. For example, __this_module symbol references when that
+ * file is being linked into a built-in object.
*
* Copyright (c) 2018, Intel Corporation.
*/
@@ -644,48 +652,3 @@ void skx_remove(void)
kfree(d);
}
}
-
-#ifdef CONFIG_EDAC_DEBUG
-/*
- * Debug feature.
- * Exercise the address decode logic by writing an address to
- * /sys/kernel/debug/edac/dirname/addr.
- */
-static struct dentry *skx_test;
-
-static int debugfs_u64_set(void *data, u64 val)
-{
- struct mce m;
-
- pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val);
-
- memset(&m, 0, sizeof(m));
- /* ADDRV + MemRd + Unknown channel */
- m.status = MCI_STATUS_ADDRV + 0x90;
- /* One corrected error */
- m.status |= BIT_ULL(MCI_STATUS_CEC_SHIFT);
- m.addr = val;
- skx_mce_check_error(NULL, 0, &m);
-
- return 0;
-}
-DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
-
-void setup_skx_debug(const char *dirname)
-{
- skx_test = edac_debugfs_create_dir(dirname);
- if (!skx_test)
- return;
-
- if (!edac_debugfs_create_file("addr", 0200, skx_test,
- NULL, &fops_u64_wo)) {
- debugfs_remove(skx_test);
- skx_test = NULL;
- }
-}
-
-void teardown_skx_debug(void)
-{
- debugfs_remove_recursive(skx_test);
-}
-#endif /*CONFIG_EDAC_DEBUG*/
diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
index d25374e34d4f..d18fa98669af 100644
--- a/drivers/edac/skx_common.h
+++ b/drivers/edac/skx_common.h
@@ -141,12 +141,4 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
void skx_remove(void);
-#ifdef CONFIG_EDAC_DEBUG
-void setup_skx_debug(const char *dirname);
-void teardown_skx_debug(void);
-#else
-static inline void setup_skx_debug(const char *dirname) {}
-static inline void teardown_skx_debug(void) {}
-#endif /*CONFIG_EDAC_DEBUG*/
-
#endif /* _SKX_COMM_EDAC_H */
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 8e17149655f0..540e8cd16ee6 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -116,7 +116,7 @@ config EXTCON_PALMAS
config EXTCON_PTN5150
tristate "NXP PTN5150 CC LOGIC USB EXTCON support"
- depends on I2C && GPIOLIB || COMPILE_TEST
+ depends on I2C && (GPIOLIB || COMPILE_TEST)
select REGMAP_I2C
help
Say Y here to enable support for USB peripheral and USB host
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index a983708b77a6..50f9402fb325 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -333,7 +333,7 @@ static int axp288_extcon_probe(struct platform_device *pdev)
struct axp288_extcon_info *info;
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
- const char *name;
+ struct acpi_device *adev;
int ret, i, pirq;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
@@ -357,9 +357,10 @@ static int axp288_extcon_probe(struct platform_device *pdev)
if (ret)
return ret;
- name = acpi_dev_get_first_match_name("INT3496", NULL, -1);
- if (name) {
- info->id_extcon = extcon_get_extcon_dev(name);
+ adev = acpi_dev_get_first_match_dev("INT3496", NULL, -1);
+ if (adev) {
+ info->id_extcon = extcon_get_extcon_dev(acpi_dev_name(adev));
+ put_device(&adev->dev);
if (!info->id_extcon)
return -EPROBE_DEFER;
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 45c048751f3b..7183ab34269e 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -2939,7 +2939,6 @@ static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels)
reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo);
reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi);
reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo);
- mmiowb();
ohci->mc_channels = channels;
}
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index cac16c4b0df3..7b655f6156fb 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -5,20 +5,6 @@
menu "Firmware Drivers"
-config ARM_PSCI_FW
- bool
-
-config ARM_PSCI_CHECKER
- bool "ARM PSCI checker"
- depends on ARM_PSCI_FW && HOTPLUG_CPU && CPU_IDLE && !TORTURE_TEST
- help
- Run the PSCI checker during startup. This checks that hotplug and
- suspend operations work correctly when using PSCI.
-
- The torture tests may interfere with the PSCI checker by turning CPUs
- on and off through hotplug, so for now torture tests and PSCI checker
- are mutually exclusive.
-
config ARM_SCMI_PROTOCOL
bool "ARM System Control and Management Interface (SCMI) Message Protocol"
depends on ARM || ARM64 || COMPILE_TEST
@@ -270,6 +256,7 @@ config TI_SCI_PROTOCOL
config HAVE_ARM_SMCCC
bool
+source "drivers/firmware/psci/Kconfig"
source "drivers/firmware/broadcom/Kconfig"
source "drivers/firmware/google/Kconfig"
source "drivers/firmware/efi/Kconfig"
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 80feb635120f..9a3909a22682 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -2,8 +2,6 @@
#
# Makefile for the linux kernel.
#
-obj-$(CONFIG_ARM_PSCI_FW) += psci.o
-obj-$(CONFIG_ARM_PSCI_CHECKER) += psci_checker.o
obj-$(CONFIG_ARM_SCPI_PROTOCOL) += arm_scpi.o
obj-$(CONFIG_ARM_SCPI_POWER_DOMAIN) += scpi_pm_domain.o
obj-$(CONFIG_ARM_SDE_INTERFACE) += arm_sdei.o
@@ -25,6 +23,7 @@ CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQU
obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o
obj-$(CONFIG_ARM_SCMI_PROTOCOL) += arm_scmi/
+obj-y += psci/
obj-y += broadcom/
obj-y += meson/
obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
index e6376f985ef7..9cd70d1a5622 100644
--- a/drivers/firmware/arm_sdei.c
+++ b/drivers/firmware/arm_sdei.c
@@ -165,6 +165,7 @@ static int invoke_sdei_fn(unsigned long function_id, unsigned long arg0,
return err;
}
+NOKPROBE_SYMBOL(invoke_sdei_fn);
static struct sdei_event *sdei_event_find(u32 event_num)
{
@@ -879,6 +880,7 @@ static void sdei_smccc_smc(unsigned long function_id,
{
arm_smccc_smc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
}
+NOKPROBE_SYMBOL(sdei_smccc_smc);
static void sdei_smccc_hvc(unsigned long function_id,
unsigned long arg0, unsigned long arg1,
@@ -887,6 +889,7 @@ static void sdei_smccc_hvc(unsigned long function_id,
{
arm_smccc_hvc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
}
+NOKPROBE_SYMBOL(sdei_smccc_hvc);
int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb,
sdei_event_callback *critical_cb)
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 099d83e4e910..fae2d5c43314 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -416,11 +416,8 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v)
nr++;
}
-void __init dmi_memdev_walk(void)
+static void __init dmi_memdev_walk(void)
{
- if (!dmi_available)
- return;
-
if (dmi_walk_early(count_mem_devices) == 0 && dmi_memdev_nr) {
dmi_memdev = dmi_alloc(sizeof(*dmi_memdev) * dmi_memdev_nr);
if (dmi_memdev)
@@ -614,7 +611,7 @@ static int __init dmi_smbios3_present(const u8 *buf)
return 1;
}
-void __init dmi_scan_machine(void)
+static void __init dmi_scan_machine(void)
{
char __iomem *p, *q;
char buf[32];
@@ -769,15 +766,20 @@ static int __init dmi_init(void)
subsys_initcall(dmi_init);
/**
- * dmi_set_dump_stack_arch_desc - set arch description for dump_stack()
+ * dmi_setup - scan and setup DMI system information
*
- * Invoke dump_stack_set_arch_desc() with DMI system information so that
- * DMI identifiers are printed out on task dumps. Arch boot code should
- * call this function after dmi_scan_machine() if it wants to print out DMI
- * identifiers on task dumps.
+ * Scan the DMI system information. This setups DMI identifiers
+ * (dmi_system_id) for printing it out on task dumps and prepares
+ * DIMM entry information (dmi_memdev_info) from the SMBIOS table
+ * for using this when reporting memory errors.
*/
-void __init dmi_set_dump_stack_arch_desc(void)
+void __init dmi_setup(void)
{
+ dmi_scan_machine();
+ if (!dmi_available)
+ return;
+
+ dmi_memdev_walk();
dump_stack_set_arch_desc("%s", dmi_ids_string);
}
@@ -841,7 +843,7 @@ static bool dmi_is_end_of_table(const struct dmi_system_id *dmi)
* returns non zero or we hit the end. Callback function is called for
* each successful match. Returns the number of matches.
*
- * dmi_scan_machine must be called before this function is called.
+ * dmi_setup must be called before this function is called.
*/
int dmi_check_system(const struct dmi_system_id *list)
{
@@ -871,7 +873,7 @@ EXPORT_SYMBOL(dmi_check_system);
* Walk the blacklist table until the first match is found. Return the
* pointer to the matching entry or NULL if there's no match.
*
- * dmi_scan_machine must be called before this function is called.
+ * dmi_setup must be called before this function is called.
*/
const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list)
{
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index f99995666f86..e2ac5fa5531b 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -42,10 +42,10 @@ static struct ptdump_info efi_ptdump_info = {
static int __init ptdump_init(void)
{
- if (!efi_enabled(EFI_RUNTIME_SERVICES))
- return 0;
+ if (efi_enabled(EFI_RUNTIME_SERVICES))
+ ptdump_debugfs_register(&efi_ptdump_info, "efi_page_tables");
- return ptdump_debugfs_register(&efi_ptdump_info, "efi_page_tables");
+ return 0;
}
device_initcall(ptdump_init);
@@ -162,13 +162,11 @@ void efi_virtmap_unload(void)
static int __init arm_dmi_init(void)
{
/*
- * On arm64/ARM, DMI depends on UEFI, and dmi_scan_machine() needs to
+ * On arm64/ARM, DMI depends on UEFI, and dmi_setup() needs to
* be called early because dmi_id_init(), which is an arch_initcall
* itself, depends on dmi_scan_machine() having been called already.
*/
- dmi_scan_machine();
- if (dmi_available)
- dmi_set_dump_stack_arch_desc();
+ dmi_setup();
return 0;
}
core_initcall(arm_dmi_init);
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index b0103e16fc1b..0460c7581220 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -16,9 +16,9 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \
# arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
# disable the stackleak plugin
-cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie \
- $(DISABLE_STACKLEAK_PLUGIN)
-cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
+cflags-$(CONFIG_ARM64) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
+ -fpie $(DISABLE_STACKLEAK_PLUGIN)
+cflags-$(CONFIG_ARM) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
-fno-builtin -fpic \
$(call cc-option,-mno-single-pic-base)
@@ -71,7 +71,6 @@ CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
extra-$(CONFIG_EFI_ARMSTUB) := $(lib-y)
lib-$(CONFIG_EFI_ARMSTUB) := $(patsubst %.o,%.stub.o,$(lib-y))
-STUBCOPY_RM-y := -R *ksymtab* -R *kcrctab*
STUBCOPY_FLAGS-$(CONFIG_ARM64) += --prefix-alloc-sections=.init \
--prefix-symbols=__efistub_
STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS
@@ -86,12 +85,13 @@ $(obj)/%.stub.o: $(obj)/%.o FORCE
# this time, use objcopy and leave all sections in place.
#
quiet_cmd_stubcopy = STUBCPY $@
- cmd_stubcopy = if $(STRIP) --strip-debug $(STUBCOPY_RM-y) -o $@ $<; \
- then if $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y); \
- then (echo >&2 "$@: absolute symbol references not allowed in the EFI stub"; \
- rm -f $@; /bin/false); \
- else $(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@; fi \
- else /bin/false; fi
+ cmd_stubcopy = \
+ $(STRIP) --strip-debug -o $@ $<; \
+ if $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y); then \
+ echo "$@: absolute symbol references not allowed in the EFI stub" >&2; \
+ /bin/false; \
+ fi; \
+ $(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@
#
# ARM discards the .data section because it disallows r/w data in the
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index 698745c249e8..6fa2df383f22 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -89,11 +89,24 @@ exit: \
efi_rts_work.status; \
})
+#ifndef arch_efi_save_flags
+#define arch_efi_save_flags(state_flags) local_save_flags(state_flags)
+#define arch_efi_restore_flags(state_flags) local_irq_restore(state_flags)
+#endif
+
+unsigned long efi_call_virt_save_flags(void)
+{
+ unsigned long flags;
+
+ arch_efi_save_flags(flags);
+ return flags;
+}
+
void efi_call_virt_check_flags(unsigned long flags, const char *call)
{
unsigned long cur_flags, mismatch;
- local_save_flags(cur_flags);
+ cur_flags = efi_call_virt_save_flags();
mismatch = flags ^ cur_flags;
if (!WARN_ON_ONCE(mismatch & ARCH_EFI_IRQ_FLAGS_MASK))
@@ -102,7 +115,7 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call)
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_NOW_UNRELIABLE);
pr_err_ratelimited(FW_BUG "IRQ flags corrupted (0x%08lx=>0x%08lx) by EFI %s\n",
flags, cur_flags, call);
- local_irq_restore(flags);
+ arch_efi_restore_flags(flags);
}
/*
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index c51462f5aa1e..a5dc0629f225 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -425,7 +425,7 @@ static ssize_t ibft_attr_show_acpitbl(void *data, int type, char *buf)
switch (type) {
case ISCSI_BOOT_ACPITBL_SIGNATURE:
- str += sprintf_string(str, ACPI_NAME_SIZE,
+ str += sprintf_string(str, ACPI_NAMESEG_SIZE,
entry->header->header.signature);
break;
case ISCSI_BOOT_ACPITBL_OEM_ID:
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
index ec4fd253a4e9..d168c87c7d30 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -333,7 +333,7 @@ int __init firmware_map_add_early(u64 start, u64 end, const char *type)
{
struct firmware_map_entry *entry;
- entry = memblock_alloc_nopanic(sizeof(struct firmware_map_entry),
+ entry = memblock_alloc(sizeof(struct firmware_map_entry),
SMP_CACHE_BYTES);
if (WARN_ON(!entry))
return -ENOMEM;
diff --git a/drivers/firmware/psci/Kconfig b/drivers/firmware/psci/Kconfig
new file mode 100644
index 000000000000..26a3b32bf7ab
--- /dev/null
+++ b/drivers/firmware/psci/Kconfig
@@ -0,0 +1,13 @@
+config ARM_PSCI_FW
+ bool
+
+config ARM_PSCI_CHECKER
+ bool "ARM PSCI checker"
+ depends on ARM_PSCI_FW && HOTPLUG_CPU && CPU_IDLE && !TORTURE_TEST
+ help
+ Run the PSCI checker during startup. This checks that hotplug and
+ suspend operations work correctly when using PSCI.
+
+ The torture tests may interfere with the PSCI checker by turning CPUs
+ on and off through hotplug, so for now torture tests and PSCI checker
+ are mutually exclusive.
diff --git a/drivers/firmware/psci/Makefile b/drivers/firmware/psci/Makefile
new file mode 100644
index 000000000000..1956b882470f
--- /dev/null
+++ b/drivers/firmware/psci/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+obj-$(CONFIG_ARM_PSCI_FW) += psci.o
+obj-$(CONFIG_ARM_PSCI_CHECKER) += psci_checker.o
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci/psci.c
index c80ec1d03274..fe090ef43d28 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -88,6 +88,7 @@ static u32 psci_function_id[PSCI_FN_MAX];
PSCI_1_0_EXT_POWER_STATE_TYPE_MASK)
static u32 psci_cpu_suspend_feature;
+static bool psci_system_reset2_supported;
static inline bool psci_has_ext_power_state(void)
{
@@ -95,6 +96,11 @@ static inline bool psci_has_ext_power_state(void)
PSCI_1_0_FEATURES_CPU_SUSPEND_PF_MASK;
}
+static inline bool psci_has_osi_support(void)
+{
+ return psci_cpu_suspend_feature & PSCI_1_0_OS_INITIATED;
+}
+
static inline bool psci_power_state_loses_context(u32 state)
{
const u32 mask = psci_has_ext_power_state() ?
@@ -253,7 +259,17 @@ static int get_set_conduit_method(struct device_node *np)
static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
{
- invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
+ if ((reboot_mode == REBOOT_WARM || reboot_mode == REBOOT_SOFT) &&
+ psci_system_reset2_supported) {
+ /*
+ * reset_type[31] = 0 (architectural)
+ * reset_type[30:0] = 0 (SYSTEM_WARM_RESET)
+ * cookie = 0 (ignored by the implementation)
+ */
+ invoke_psci_fn(PSCI_FN_NATIVE(1_1, SYSTEM_RESET2), 0, 0, 0);
+ } else {
+ invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
+ }
}
static void psci_sys_poweroff(void)
@@ -270,9 +286,26 @@ static int __init psci_features(u32 psci_func_id)
#ifdef CONFIG_CPU_IDLE
static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
+static int psci_dt_parse_state_node(struct device_node *np, u32 *state)
+{
+ int err = of_property_read_u32(np, "arm,psci-suspend-param", state);
+
+ if (err) {
+ pr_warn("%pOF missing arm,psci-suspend-param property\n", np);
+ return err;
+ }
+
+ if (!psci_power_state_is_valid(*state)) {
+ pr_warn("Invalid PSCI power state %#x\n", *state);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
{
- int i, ret, count = 0;
+ int i, ret = 0, count = 0;
u32 *psci_states;
struct device_node *state_node;
@@ -291,29 +324,16 @@ static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
return -ENOMEM;
for (i = 0; i < count; i++) {
- u32 state;
-
state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
+ ret = psci_dt_parse_state_node(state_node, &psci_states[i]);
+ of_node_put(state_node);
- ret = of_property_read_u32(state_node,
- "arm,psci-suspend-param",
- &state);
- if (ret) {
- pr_warn(" * %pOF missing arm,psci-suspend-param property\n",
- state_node);
- of_node_put(state_node);
+ if (ret)
goto free_mem;
- }
- of_node_put(state_node);
- pr_debug("psci-power-state %#x index %d\n", state, i);
- if (!psci_power_state_is_valid(state)) {
- pr_warn("Invalid PSCI power state %#x\n", state);
- ret = -EINVAL;
- goto free_mem;
- }
- psci_states[i] = state;
+ pr_debug("psci-power-state %#x index %d\n", psci_states[i], i);
}
+
/* Idle states parsed correctly, initialize per-cpu pointer */
per_cpu(psci_power_state, cpu) = psci_states;
return 0;
@@ -451,6 +471,16 @@ static const struct platform_suspend_ops psci_suspend_ops = {
.enter = psci_system_suspend_enter,
};
+static void __init psci_init_system_reset2(void)
+{
+ int ret;
+
+ ret = psci_features(PSCI_FN_NATIVE(1_1, SYSTEM_RESET2));
+
+ if (ret != PSCI_RET_NOT_SUPPORTED)
+ psci_system_reset2_supported = true;
+}
+
static void __init psci_init_system_suspend(void)
{
int ret;
@@ -588,6 +618,7 @@ static int __init psci_probe(void)
psci_init_smccc();
psci_init_cpu_suspend();
psci_init_system_suspend();
+ psci_init_system_reset2();
}
return 0;
@@ -605,9 +636,9 @@ static int __init psci_0_2_init(struct device_node *np)
int err;
err = get_set_conduit_method(np);
-
if (err)
- goto out_put_node;
+ return err;
+
/*
* Starting with v0.2, the PSCI specification introduced a call
* (PSCI_VERSION) that allows probing the firmware version, so
@@ -615,11 +646,7 @@ static int __init psci_0_2_init(struct device_node *np)
* can be carried out according to the specific version reported
* by firmware
*/
- err = psci_probe();
-
-out_put_node:
- of_node_put(np);
- return err;
+ return psci_probe();
}
/*
@@ -631,9 +658,8 @@ static int __init psci_0_1_init(struct device_node *np)
int err;
err = get_set_conduit_method(np);
-
if (err)
- goto out_put_node;
+ return err;
pr_info("Using PSCI v0.1 Function IDs from DT\n");
@@ -657,15 +683,27 @@ static int __init psci_0_1_init(struct device_node *np)
psci_ops.migrate = psci_migrate;
}
-out_put_node:
- of_node_put(np);
- return err;
+ return 0;
+}
+
+static int __init psci_1_0_init(struct device_node *np)
+{
+ int err;
+
+ err = psci_0_2_init(np);
+ if (err)
+ return err;
+
+ if (psci_has_osi_support())
+ pr_info("OSI mode supported.\n");
+
+ return 0;
}
static const struct of_device_id psci_of_match[] __initconst = {
{ .compatible = "arm,psci", .data = psci_0_1_init},
{ .compatible = "arm,psci-0.2", .data = psci_0_2_init},
- { .compatible = "arm,psci-1.0", .data = psci_0_2_init},
+ { .compatible = "arm,psci-1.0", .data = psci_1_0_init},
{},
};
@@ -674,6 +712,7 @@ int __init psci_dt_init(void)
struct device_node *np;
const struct of_device_id *matched_np;
psci_initcall_t init_fn;
+ int ret;
np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
@@ -681,7 +720,10 @@ int __init psci_dt_init(void)
return -ENODEV;
init_fn = (psci_initcall_t)matched_np->data;
- return init_fn(np);
+ ret = init_fn(np);
+
+ of_node_put(np);
+ return ret;
}
#ifdef CONFIG_ACPI
diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci/psci_checker.c
index 346943657962..346943657962 100644
--- a/drivers/firmware/psci_checker.c
+++ b/drivers/firmware/psci/psci_checker.c
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index 91b90c0cea73..12acdac85820 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -132,8 +132,10 @@ static int adnp_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
if (err < 0)
goto out;
- if (err & BIT(pos))
- err = -EACCES;
+ if (value & BIT(pos)) {
+ err = -EPERM;
+ goto out;
+ }
err = 0;
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 854bce4fb9e7..217507002dbc 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -1224,6 +1224,8 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
gpio->offset_timer =
devm_kzalloc(&pdev->dev, gpio->chip.ngpio, GFP_KERNEL);
+ if (!gpio->offset_timer)
+ return -ENOMEM;
return aspeed_gpio_setup_irqs(gpio, pdev);
}
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index f0223cee9774..77092268ee95 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -414,6 +414,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
irq_set_handler_locked(data, handle_edge_irq);
break;
case IRQ_TYPE_EDGE_BOTH:
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 1);
irq_set_handler_locked(data, handle_edge_irq);
break;
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
index 0ecd2369c2ca..a09d2f9ebacc 100644
--- a/drivers/gpio/gpio-exar.c
+++ b/drivers/gpio/gpio-exar.c
@@ -148,6 +148,8 @@ static int gpio_exar_probe(struct platform_device *pdev)
mutex_init(&exar_gpio->lock);
index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
+ if (index < 0)
+ goto err_destroy;
sprintf(exar_gpio->name, "exar_gpio%d", index);
exar_gpio->gpio_chip.label = exar_gpio->name;
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index 7c659fdaa6d5..3302125e5265 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -377,10 +377,20 @@ static void mrfld_irq_init_hw(struct mrfld_gpio *priv)
}
}
-static const char *mrfld_gpio_get_pinctrl_dev_name(void)
+static const char *mrfld_gpio_get_pinctrl_dev_name(struct mrfld_gpio *priv)
{
- const char *dev_name = acpi_dev_get_first_match_name("INTC1002", NULL, -1);
- return dev_name ? dev_name : "pinctrl-merrifield";
+ struct acpi_device *adev;
+ const char *name;
+
+ adev = acpi_dev_get_first_match_dev("INTC1002", NULL, -1);
+ if (adev) {
+ name = devm_kstrdup(priv->dev, acpi_dev_name(adev), GFP_KERNEL);
+ acpi_dev_put(adev);
+ } else {
+ name = "pinctrl-merrifield";
+ }
+
+ return name;
}
static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -441,7 +451,7 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
return retval;
}
- pinctrl_dev_name = mrfld_gpio_get_pinctrl_dev_name();
+ pinctrl_dev_name = mrfld_gpio_get_pinctrl_dev_name(priv);
for (i = 0; i < ARRAY_SIZE(mrfld_gpio_ranges); i++) {
range = &mrfld_gpio_ranges[i];
retval = gpiochip_add_pin_range(&priv->chip,
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index 51c7d1b84c2e..0c076dce9e17 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -31,8 +31,6 @@
#define IOH_IRQ_BASE 0
-#define PCI_VENDOR_ID_ROHM 0x10DB
-
struct ioh_reg_comn {
u32 ien;
u32 istatus;
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 154d959e8993..b6a4efce7c92 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -204,8 +204,8 @@ static ssize_t gpio_mockup_debugfs_read(struct file *file,
struct gpio_mockup_chip *chip;
struct seq_file *sfile;
struct gpio_chip *gc;
+ int val, cnt;
char buf[3];
- int val, rv;
if (*ppos != 0)
return 0;
@@ -216,13 +216,9 @@ static ssize_t gpio_mockup_debugfs_read(struct file *file,
gc = &chip->gc;
val = gpio_mockup_get(gc, priv->offset);
- snprintf(buf, sizeof(buf), "%d\n", val);
+ cnt = snprintf(buf, sizeof(buf), "%d\n", val);
- rv = copy_to_user(usr_buf, buf, sizeof(buf));
- if (rv)
- return rv;
-
- return sizeof(buf) - 1;
+ return simple_read_from_buffer(usr_buf, size, ppos, buf, cnt);
}
static ssize_t gpio_mockup_debugfs_write(struct file *file,
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index ee79e5f88b5a..1d99293096f2 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -437,7 +437,6 @@ static int __maybe_unused pch_gpio_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(pch_gpio_pm_ops, pch_gpio_suspend, pch_gpio_resume);
-#define PCI_VENDOR_ID_ROHM 0x10DB
static const struct pci_device_id pch_gpio_pcidev_id[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) },
{ PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8014) },
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 8b9c3ab70f6e..6a3ec575a404 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -120,7 +120,8 @@ static void of_gpio_flags_quirks(struct device_node *np,
* to determine if the flags should have inverted semantics.
*/
if (IS_ENABLED(CONFIG_SPI_MASTER) &&
- of_property_read_bool(np, "cs-gpios")) {
+ of_property_read_bool(np, "cs-gpios") &&
+ !strcmp(propname, "cs-gpios")) {
struct device_node *child;
u32 cs;
int ret;
@@ -142,16 +143,16 @@ static void of_gpio_flags_quirks(struct device_node *np,
* conflict and the "spi-cs-high" flag will
* take precedence.
*/
- if (of_property_read_bool(np, "spi-cs-high")) {
+ if (of_property_read_bool(child, "spi-cs-high")) {
if (*flags & OF_GPIO_ACTIVE_LOW) {
pr_warn("%s GPIO handle specifies active low - ignored\n",
- of_node_full_name(np));
+ of_node_full_name(child));
*flags &= ~OF_GPIO_ACTIVE_LOW;
}
} else {
if (!(*flags & OF_GPIO_ACTIVE_LOW))
pr_info("%s enforce active low on chipselect handle\n",
- of_node_full_name(np));
+ of_node_full_name(child));
*flags |= OF_GPIO_ACTIVE_LOW;
}
break;
@@ -717,7 +718,13 @@ int of_gpiochip_add(struct gpio_chip *chip)
of_node_get(chip->of_node);
- return of_gpiochip_scan_gpios(chip);
+ status = of_gpiochip_scan_gpios(chip);
+ if (status) {
+ of_node_put(chip->of_node);
+ gpiochip_remove_pin_ranges(chip);
+ }
+
+ return status;
}
void of_gpiochip_remove(struct gpio_chip *chip)
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 144af0733581..bca3e7740ef6 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1379,7 +1379,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
status = gpiochip_add_irqchip(chip, lock_key, request_key);
if (status)
- goto err_remove_chip;
+ goto err_free_gpiochip_mask;
status = of_gpiochip_add(chip);
if (status)
@@ -1387,7 +1387,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
status = gpiochip_init_valid_mask(chip);
if (status)
- goto err_remove_chip;
+ goto err_remove_of_chip;
for (i = 0; i < chip->ngpio; i++) {
struct gpio_desc *desc = &gdev->descs[i];
@@ -1415,14 +1415,18 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
if (gpiolib_initialized) {
status = gpiochip_setup_dev(gdev);
if (status)
- goto err_remove_chip;
+ goto err_remove_acpi_chip;
}
return 0;
-err_remove_chip:
+err_remove_acpi_chip:
acpi_gpiochip_remove(chip);
+err_remove_of_chip:
gpiochip_free_hogs(chip);
of_gpiochip_remove(chip);
+err_remove_chip:
+ gpiochip_irqchip_remove(chip);
+err_free_gpiochip_mask:
gpiochip_free_valid_mask(chip);
err_remove_irqchip_mask:
gpiochip_irqchip_free_valid_mask(chip);
@@ -2776,7 +2780,7 @@ int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
}
config = pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE, debounce);
- return gpio_set_config(chip, gpio_chip_hwgpio(desc), config);
+ return chip->set_config(chip, gpio_chip_hwgpio(desc), config);
}
EXPORT_SYMBOL_GPL(gpiod_set_debounce);
@@ -2813,7 +2817,7 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE,
!transitory);
gpio = gpio_chip_hwgpio(desc);
- rc = gpio_set_config(chip, gpio, packed);
+ rc = chip->set_config(chip, gpio, packed);
if (rc == -ENOTSUPP) {
dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n",
gpio);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 4f8fb4ecde34..79fb302fb954 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3165,6 +3165,7 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
/* No need to recover an evicted BO */
if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
+ shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
continue;
@@ -3173,11 +3174,16 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
break;
if (fence) {
- r = dma_fence_wait_timeout(fence, false, tmo);
+ tmo = dma_fence_wait_timeout(fence, false, tmo);
dma_fence_put(fence);
fence = next;
- if (r <= 0)
+ if (tmo == 0) {
+ r = -ETIMEDOUT;
break;
+ } else if (tmo < 0) {
+ r = tmo;
+ break;
+ }
} else {
fence = next;
}
@@ -3188,8 +3194,8 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
tmo = dma_fence_wait_timeout(fence, false, tmo);
dma_fence_put(fence);
- if (r <= 0 || tmo <= 0) {
- DRM_ERROR("recover vram bo from shadow failed\n");
+ if (r < 0 || tmo <= 0) {
+ DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
return -EIO;
}
@@ -3625,6 +3631,7 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,
struct pci_dev *pdev = adev->pdev;
enum pci_bus_speed cur_speed;
enum pcie_link_width cur_width;
+ u32 ret = 1;
*speed = PCI_SPEED_UNKNOWN;
*width = PCIE_LNK_WIDTH_UNKNOWN;
@@ -3632,6 +3639,10 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,
while (pdev) {
cur_speed = pcie_get_speed_cap(pdev);
cur_width = pcie_get_width_cap(pdev);
+ ret = pcie_bandwidth_available(adev->pdev, NULL,
+ NULL, &cur_width);
+ if (!ret)
+ cur_width = PCIE_LNK_WIDTH_RESRV;
if (cur_speed != PCI_SPEED_UNKNOWN) {
if (*speed == PCI_SPEED_UNKNOWN)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 0b8ef2d27d6b..fe393a46f881 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -35,6 +35,7 @@
#include "amdgpu_trace.h"
#define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000)
+#define AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT msecs_to_jiffies(2000)
/*
* IB
@@ -344,6 +345,8 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
* cost waiting for it coming back under RUNTIME only
*/
tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
+ } else if (adev->gmc.xgmi.hive_id) {
+ tmo_gfx = AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT;
}
for (i = 0; i < adev->num_rings; ++i) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index ead851413c0a..16fcb56c232b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -700,6 +700,8 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_vm_bo_base *bo_base, *tmp;
int r = 0;
+ vm->bulk_moveable &= list_empty(&vm->evicted);
+
list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
@@ -947,10 +949,6 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
if (r)
return r;
- r = amdgpu_vm_clear_bo(adev, vm, pt, cursor.level, ats);
- if (r)
- goto error_free_pt;
-
if (vm->use_cpu_for_update) {
r = amdgpu_bo_kmap(pt, NULL);
if (r)
@@ -963,6 +961,10 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
pt->parent = amdgpu_bo_ref(cursor.parent->base.bo);
amdgpu_vm_bo_base_init(&entry->base, vm, pt);
+
+ r = amdgpu_vm_clear_bo(adev, vm, pt, cursor.level, ats);
+ if (r)
+ goto error_free_pt;
}
return 0;
@@ -3033,13 +3035,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (r)
goto error_unreserve;
+ amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
+
r = amdgpu_vm_clear_bo(adev, vm, root,
adev->vm_manager.root_level,
vm->pte_support_ats);
if (r)
goto error_unreserve;
- amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
amdgpu_bo_unreserve(vm->root.base.bo);
if (pasid) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 5533f6e4f4a4..a11db2b1a63f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -220,6 +220,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] =
static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
{
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0xffffffff, 0x000001ff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
};
@@ -2404,8 +2405,6 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
/* disable CG */
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
- adev->gfx.rlc.funcs->reset(adev);
-
gfx_v9_0_init_pg(adev);
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 600259b4e291..2fe8397241ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -742,7 +742,7 @@ static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
}
ring->vm_inv_eng = inv_eng - 1;
- change_bit(inv_eng - 1, (unsigned long *)(&vm_inv_engs[vmhub]));
+ vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index d0d966d6080a..1696644ec022 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -182,6 +182,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
}
+ WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp);
tmp = mmVM_L2_CNTL4_DEFAULT;
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index c63de945c021..0487e3a4e9e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -500,9 +500,7 @@ static bool psp_v3_1_smu_reload_quirk(struct psp_context *psp)
struct amdgpu_device *adev = psp->adev;
uint32_t reg;
- reg = smnMP1_FIRMWARE_FLAGS | 0x03b00000;
- WREG32_SOC15(NBIO, 0, mmPCIE_INDEX2, reg);
- reg = RREG32_SOC15(NBIO, 0, mmPCIE_DATA2);
+ reg = RREG32_PCIE(smnMP1_FIRMWARE_FLAGS | 0x03b00000);
return (reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) ? true : false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 99ebcf29dcb0..ed89a101f73f 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -461,7 +461,6 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_VEGA10:
- case CHIP_VEGA20:
soc15_asic_get_baco_capability(adev, &baco_reset);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 8be9677c0c07..cf9a49f49d3a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -320,6 +320,7 @@ static const struct kfd_deviceid supported_devices[] = {
{ 0x9876, &carrizo_device_info }, /* Carrizo */
{ 0x9877, &carrizo_device_info }, /* Carrizo */
{ 0x15DD, &raven_device_info }, /* Raven */
+ { 0x15D8, &raven_device_info }, /* Raven */
#endif
{ 0x67A0, &hawaii_device_info }, /* Hawaii */
{ 0x67A1, &hawaii_device_info }, /* Hawaii */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 47243165a082..ae90a99909ef 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -323,57 +323,7 @@ static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
{
- uint64_t addr;
- struct cik_mqd *m;
- int retval;
-
- retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
- mqd_mem_obj);
-
- if (retval != 0)
- return -ENOMEM;
-
- m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr;
- addr = (*mqd_mem_obj)->gpu_addr;
-
- memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256));
-
- m->header = 0xC0310800;
- m->compute_pipelinestat_enable = 1;
- m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
- m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
- m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
- m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
-
- m->cp_hqd_persistent_state = DEFAULT_CP_HQD_PERSISTENT_STATE |
- PRELOAD_REQ;
- m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS |
- QUANTUM_DURATION(10);
-
- m->cp_mqd_control = MQD_CONTROL_PRIV_STATE_EN;
- m->cp_mqd_base_addr_lo = lower_32_bits(addr);
- m->cp_mqd_base_addr_hi = upper_32_bits(addr);
-
- m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE;
-
- /*
- * Pipe Priority
- * Identifies the pipe relative priority when this queue is connected
- * to the pipeline. The pipe priority is against the GFX pipe and HP3D.
- * In KFD we are using a fixed pipe priority set to CS_MEDIUM.
- * 0 = CS_LOW (typically below GFX)
- * 1 = CS_MEDIUM (typically between HP3D and GFX
- * 2 = CS_HIGH (typically above HP3D)
- */
- m->cp_hqd_pipe_priority = 1;
- m->cp_hqd_queue_priority = 15;
-
- *mqd = m;
- if (gart_addr)
- *gart_addr = addr;
- retval = mm->update_mqd(mm, m, q);
-
- return retval;
+ return init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
}
static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 2f26581b93ff..3082b55b1e77 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -886,6 +886,7 @@ static void emulated_link_detect(struct dc_link *link)
return;
}
+ /* dc_sink_create returns a new reference */
link->local_sink = sink;
edid_status = dm_helpers_read_local_edid(
@@ -952,6 +953,8 @@ static int dm_resume(void *handle)
if (aconnector->fake_enable && aconnector->dc_link->local_sink)
aconnector->fake_enable = false;
+ if (aconnector->dc_sink)
+ dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
amdgpu_dm_update_connector_after_detect(aconnector);
mutex_unlock(&aconnector->hpd_lock);
@@ -1061,6 +1064,8 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
sink = aconnector->dc_link->local_sink;
+ if (sink)
+ dc_sink_retain(sink);
/*
* Edid mgmt connector gets first update only in mode_valid hook and then
@@ -1085,21 +1090,24 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
* to it anymore after disconnect, so on next crtc to connector
* reshuffle by UMD we will get into unwanted dc_sink release
*/
- if (aconnector->dc_sink != aconnector->dc_em_sink)
- dc_sink_release(aconnector->dc_sink);
+ dc_sink_release(aconnector->dc_sink);
}
aconnector->dc_sink = sink;
+ dc_sink_retain(aconnector->dc_sink);
amdgpu_dm_update_freesync_caps(connector,
aconnector->edid);
} else {
amdgpu_dm_update_freesync_caps(connector, NULL);
- if (!aconnector->dc_sink)
+ if (!aconnector->dc_sink) {
aconnector->dc_sink = aconnector->dc_em_sink;
- else if (aconnector->dc_sink != aconnector->dc_em_sink)
dc_sink_retain(aconnector->dc_sink);
+ }
}
mutex_unlock(&dev->mode_config.mutex);
+
+ if (sink)
+ dc_sink_release(sink);
return;
}
@@ -1107,8 +1115,10 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
* TODO: temporary guard to look for proper fix
* if this sink is MST sink, we should not do anything
*/
- if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ dc_sink_release(sink);
return;
+ }
if (aconnector->dc_sink == sink) {
/*
@@ -1117,6 +1127,8 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
*/
DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
aconnector->connector_id);
+ if (sink)
+ dc_sink_release(sink);
return;
}
@@ -1138,6 +1150,7 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
amdgpu_dm_update_freesync_caps(connector, NULL);
aconnector->dc_sink = sink;
+ dc_sink_retain(aconnector->dc_sink);
if (sink->dc_edid.length == 0) {
aconnector->edid = NULL;
drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
@@ -1158,11 +1171,15 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
amdgpu_dm_update_freesync_caps(connector, NULL);
drm_connector_update_edid_property(connector, NULL);
aconnector->num_modes = 0;
+ dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
aconnector->edid = NULL;
}
mutex_unlock(&dev->mode_config.mutex);
+
+ if (sink)
+ dc_sink_release(sink);
}
static void handle_hpd_irq(void *param)
@@ -2977,6 +2994,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
return stream;
} else {
sink = aconnector->dc_sink;
+ dc_sink_retain(sink);
}
stream = dc_create_stream_for_sink(sink);
@@ -3042,8 +3060,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
update_stream_signal(stream, sink);
finish:
- if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON)
- dc_sink_release(sink);
+ dc_sink_release(sink);
return stream;
}
@@ -3301,6 +3318,14 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
dm->backlight_dev = NULL;
}
#endif
+
+ if (aconnector->dc_em_sink)
+ dc_sink_release(aconnector->dc_em_sink);
+ aconnector->dc_em_sink = NULL;
+ if (aconnector->dc_sink)
+ dc_sink_release(aconnector->dc_sink);
+ aconnector->dc_sink = NULL;
+
drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
@@ -3398,10 +3423,12 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
(edid->extensions + 1) * EDID_LENGTH,
&init_params);
- if (aconnector->base.force == DRM_FORCE_ON)
+ if (aconnector->base.force == DRM_FORCE_ON) {
aconnector->dc_sink = aconnector->dc_link->local_sink ?
aconnector->dc_link->local_sink :
aconnector->dc_em_sink;
+ dc_sink_retain(aconnector->dc_sink);
+ }
}
static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
@@ -4506,6 +4533,7 @@ static void handle_cursor_update(struct drm_plane *plane,
amdgpu_crtc->cursor_width = plane->state->crtc_w;
amdgpu_crtc->cursor_height = plane->state->crtc_h;
+ memset(&attributes, 0, sizeof(attributes));
attributes.address.high_part = upper_32_bits(address);
attributes.address.low_part = lower_32_bits(address);
attributes.width = plane->state->crtc_w;
@@ -5402,9 +5430,11 @@ static void get_freesync_config_for_crtc(
struct amdgpu_dm_connector *aconnector =
to_amdgpu_dm_connector(new_con_state->base.connector);
struct drm_display_mode *mode = &new_crtc_state->base.mode;
+ int vrefresh = drm_mode_vrefresh(mode);
new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
- aconnector->min_vfreq <= drm_mode_vrefresh(mode);
+ vrefresh >= aconnector->min_vfreq &&
+ vrefresh <= aconnector->max_vfreq;
if (new_crtc_state->vrr_supported) {
new_crtc_state->stream->ignore_msa_timing_param = true;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index f51d52eb52e6..c4ea3a91f17a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -191,6 +191,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
&init_params);
dc_sink->priv = aconnector;
+ /* dc_link_add_remote_sink returns a new reference */
aconnector->dc_sink = dc_sink;
if (aconnector->dc_sink)
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 12d1842079ae..eb62d10bb65c 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -1348,12 +1348,12 @@ void dcn_bw_update_from_pplib(struct dc *dc)
struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0};
bool res;
- kernel_fpu_begin();
-
/* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */
res = dm_pp_get_clock_levels_by_type_with_voltage(
ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks);
+ kernel_fpu_begin();
+
if (res)
res = verify_clock_values(&fclks);
@@ -1372,9 +1372,13 @@ void dcn_bw_update_from_pplib(struct dc *dc)
} else
BREAK_TO_DEBUGGER();
+ kernel_fpu_end();
+
res = dm_pp_get_clock_levels_by_type_with_voltage(
ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks);
+ kernel_fpu_begin();
+
if (res)
res = verify_clock_values(&dcfclks);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index c68fbd55db3c..a6cda201c964 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1377,6 +1377,11 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
return UPDATE_TYPE_FULL;
}
+ if (u->surface->force_full_update) {
+ update_flags->bits.full_update = 1;
+ return UPDATE_TYPE_FULL;
+ }
+
type = get_plane_info_update_type(u);
elevate_update_type(&overall_type, type);
@@ -1802,6 +1807,14 @@ void dc_commit_updates_for_stream(struct dc *dc,
}
dc_resource_state_copy_construct(state, context);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
+ new_pipe->plane_state->force_full_update = true;
+ }
}
@@ -1838,6 +1851,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
dc->current_state = context;
dc_release_state(old);
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
+ pipe_ctx->plane_state->force_full_update = false;
+ }
}
/*let's use current_state to update watermark etc*/
if (update_type >= UPDATE_TYPE_FULL)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 7f5a947ad31d..ea18e9c2d8ce 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -794,6 +794,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
sink->link->dongle_max_pix_clk = sink_caps.max_hdmi_pixel_clock;
sink->converter_disable_audio = converter_disable_audio;
+ /* dc_sink_create returns a new reference */
link->local_sink = sink;
edid_status = dm_helpers_read_local_edid(
@@ -2037,6 +2038,9 @@ static enum dc_status enable_link(
break;
}
+ if (status == DC_OK)
+ pipe_ctx->stream->link->link_status.link_active = true;
+
return status;
}
@@ -2060,6 +2064,14 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
dp_disable_link_phy_mst(link, signal);
} else
link->link_enc->funcs->disable_output(link->link_enc, signal);
+
+ if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ /* MST disable link only when no stream use the link */
+ if (link->mst_stream_alloc_table.stream_count <= 0)
+ link->link_status.link_active = false;
+ } else {
+ link->link_status.link_active = false;
+ }
}
static bool dp_active_dongle_validate_timing(
@@ -2623,8 +2635,6 @@ void core_link_enable_stream(
}
}
- stream->link->link_status.link_active = true;
-
core_dc->hwss.enable_audio_stream(pipe_ctx);
/* turn off otg test pattern if enable */
@@ -2650,17 +2660,21 @@ void core_link_enable_stream(
void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
{
struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
core_dc->hwss.blank_stream(pipe_ctx);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
deallocate_mst_payload(pipe_ctx);
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ dal_ddc_service_write_scdc_data(
+ stream->link->ddc, 0,
+ stream->timing.flags.LTE_340MCSC_SCRAMBLE);
+
core_dc->hwss.disable_stream(pipe_ctx, option);
disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
-
- pipe_ctx->stream->link->link_status.link_active = false;
}
void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 1a7fd6aa77eb..0515095574e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -503,6 +503,9 @@ struct dc_plane_state {
struct dc_plane_status status;
struct dc_context *ctx;
+ /* HACK: Workaround for forcing full reprogramming under some conditions */
+ bool force_full_update;
+
/* private to dc_surface.c */
enum dc_irq_source irq_source;
struct kref refcount;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 4febf4ef7240..4fe3664fb495 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -190,6 +190,12 @@ static void submit_channel_request(
1,
0);
}
+
+ REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
+
+ REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
+ 10, aux110->timeout_period/10);
+
/* set the delay and the number of bytes to write */
/* The length include
@@ -242,9 +248,6 @@ static void submit_channel_request(
}
}
- REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
- REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
- 10, aux110->timeout_period/10);
REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
index d27f22c05e4b..e28ed6a00ff4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
@@ -71,11 +71,11 @@ enum { /* This is the timeout as defined in DP 1.2a,
* at most within ~240usec. That means,
* increasing this timeout will not affect normal operation,
* and we'll timeout after
- * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 1600usec.
+ * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 2400usec.
* This timeout is especially important for
- * resume from S3 and CTS.
+ * converters, resume from S3, and CTS.
*/
- SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 4
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 6
};
struct dce_aux {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 683829466a44..0ba68d41b9c3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -1150,28 +1150,9 @@ void hubp1_cursor_set_position(
REG_UPDATE(CURSOR_CONTROL,
CURSOR_ENABLE, cur_en);
- //account for cases where we see negative offset relative to overlay plane
- if (src_x_offset < 0 && src_y_offset < 0) {
- REG_SET_2(CURSOR_POSITION, 0,
- CURSOR_X_POSITION, 0,
- CURSOR_Y_POSITION, 0);
- x_hotspot -= src_x_offset;
- y_hotspot -= src_y_offset;
- } else if (src_x_offset < 0) {
- REG_SET_2(CURSOR_POSITION, 0,
- CURSOR_X_POSITION, 0,
- CURSOR_Y_POSITION, pos->y);
- x_hotspot -= src_x_offset;
- } else if (src_y_offset < 0) {
- REG_SET_2(CURSOR_POSITION, 0,
+ REG_SET_2(CURSOR_POSITION, 0,
CURSOR_X_POSITION, pos->x,
- CURSOR_Y_POSITION, 0);
- y_hotspot -= src_y_offset;
- } else {
- REG_SET_2(CURSOR_POSITION, 0,
- CURSOR_X_POSITION, pos->x,
- CURSOR_Y_POSITION, pos->y);
- }
+ CURSOR_Y_POSITION, pos->y);
REG_SET_2(CURSOR_HOT_SPOT, 0,
CURSOR_HOT_SPOT_X, x_hotspot,
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 94a84bc57c7a..bfd27f10879e 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -724,7 +724,7 @@ static void build_vrr_infopacket_v1(enum signal_type signal,
static void build_vrr_infopacket_v2(enum signal_type signal,
const struct mod_vrr_params *vrr,
- const enum color_transfer_func *app_tf,
+ enum color_transfer_func app_tf,
struct dc_info_packet *infopacket)
{
unsigned int payload_size = 0;
@@ -732,8 +732,7 @@ static void build_vrr_infopacket_v2(enum signal_type signal,
build_vrr_infopacket_header_v2(signal, infopacket, &payload_size);
build_vrr_infopacket_data(vrr, infopacket);
- if (app_tf != NULL)
- build_vrr_infopacket_fs2_data(*app_tf, infopacket);
+ build_vrr_infopacket_fs2_data(app_tf, infopacket);
build_vrr_infopacket_checksum(&payload_size, infopacket);
@@ -757,7 +756,7 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
const struct dc_stream_state *stream,
const struct mod_vrr_params *vrr,
enum vrr_packet_type packet_type,
- const enum color_transfer_func *app_tf,
+ enum color_transfer_func app_tf,
struct dc_info_packet *infopacket)
{
/* SPD info packet for FreeSync
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
index 4222e403b151..dcef85994c45 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -145,7 +145,7 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
const struct dc_stream_state *stream,
const struct mod_vrr_params *vrr,
enum vrr_packet_type packet_type,
- const enum color_transfer_func *app_tf,
+ enum color_transfer_func app_tf,
struct dc_info_packet *infopacket);
void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
index ce177d7f04cb..6bf48934fdc4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
@@ -277,8 +277,7 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_set
if (!skip_display_settings)
phm_notify_smc_display_config_after_ps_adjustment(hwmgr);
- if ((hwmgr->request_dpm_level != hwmgr->dpm_level) &&
- !phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level))
+ if (!phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level))
hwmgr->dpm_level = hwmgr->request_dpm_level;
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
index 4588bddf8b33..615cf2c09e54 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
@@ -489,15 +489,16 @@ int pp_atomfwctrl_get_gpio_information(struct pp_hwmgr *hwmgr,
}
int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr,
- uint8_t id, uint32_t *frequency)
+ uint8_t clk_id, uint8_t syspll_id,
+ uint32_t *frequency)
{
struct amdgpu_device *adev = hwmgr->adev;
struct atom_get_smu_clock_info_parameters_v3_1 parameters;
struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
uint32_t ix;
- parameters.clk_id = id;
- parameters.syspll_id = 0;
+ parameters.clk_id = clk_id;
+ parameters.syspll_id = syspll_id;
parameters.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
parameters.dfsdid = 0;
@@ -530,20 +531,23 @@ static void pp_atomfwctrl_copy_vbios_bootup_values_3_2(struct pp_hwmgr *hwmgr,
boot_values->ulSocClk = 0;
boot_values->ulDCEFClk = 0;
- if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_SOCCLK_ID, &frequency))
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_SOCCLK_ID, SMU11_SYSPLL0_ID, &frequency))
boot_values->ulSocClk = frequency;
- if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCEFCLK_ID, &frequency))
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCEFCLK_ID, SMU11_SYSPLL0_ID, &frequency))
boot_values->ulDCEFClk = frequency;
- if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_ECLK_ID, &frequency))
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_ECLK_ID, SMU11_SYSPLL0_ID, &frequency))
boot_values->ulEClk = frequency;
- if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_VCLK_ID, &frequency))
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_VCLK_ID, SMU11_SYSPLL0_ID, &frequency))
boot_values->ulVClk = frequency;
- if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCLK_ID, &frequency))
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCLK_ID, SMU11_SYSPLL0_ID, &frequency))
boot_values->ulDClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL1_0_FCLK_ID, SMU11_SYSPLL1_2_ID, &frequency))
+ boot_values->ulFClk = frequency;
}
static void pp_atomfwctrl_copy_vbios_bootup_values_3_1(struct pp_hwmgr *hwmgr,
@@ -563,19 +567,19 @@ static void pp_atomfwctrl_copy_vbios_bootup_values_3_1(struct pp_hwmgr *hwmgr,
boot_values->ulSocClk = 0;
boot_values->ulDCEFClk = 0;
- if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, &frequency))
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, 0, &frequency))
boot_values->ulSocClk = frequency;
- if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, &frequency))
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, 0, &frequency))
boot_values->ulDCEFClk = frequency;
- if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_ECLK_ID, &frequency))
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_ECLK_ID, 0, &frequency))
boot_values->ulEClk = frequency;
- if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_VCLK_ID, &frequency))
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_VCLK_ID, 0, &frequency))
boot_values->ulVClk = frequency;
- if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCLK_ID, &frequency))
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCLK_ID, 0, &frequency))
boot_values->ulDClk = frequency;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
index fe9e8ceef50e..b7e2651b570b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
@@ -139,6 +139,7 @@ struct pp_atomfwctrl_bios_boot_up_values {
uint32_t ulEClk;
uint32_t ulVClk;
uint32_t ulDClk;
+ uint32_t ulFClk;
uint16_t usVddc;
uint16_t usVddci;
uint16_t usMvddc;
@@ -236,7 +237,8 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
struct pp_atomfwctrl_smc_dpm_parameters *param);
int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr,
- uint8_t id, uint32_t *frequency);
+ uint8_t clk_id, uint8_t syspll_id,
+ uint32_t *frequency);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 48187acac59e..83d3d935f3ac 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -3491,14 +3491,14 @@ static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixSMU_PM_STATUS_94, 0);
+ ixSMU_PM_STATUS_95, 0);
for (i = 0; i < 10; i++) {
- mdelay(1);
+ mdelay(500);
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample);
tmp = cgs_read_ind_register(hwmgr->device,
CGS_IND_REG__SMC,
- ixSMU_PM_STATUS_94);
+ ixSMU_PM_STATUS_95);
if (tmp != 0)
break;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 5479125ff4f6..5c4f701939ea 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -2575,10 +2575,10 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
pp_atomfwctrl_get_clk_information_by_clkid(hwmgr,
- SMU9_SYSPLL0_SOCCLK_ID, &boot_up_values.ulSocClk);
+ SMU9_SYSPLL0_SOCCLK_ID, 0, &boot_up_values.ulSocClk);
pp_atomfwctrl_get_clk_information_by_clkid(hwmgr,
- SMU9_SYSPLL0_DCEFCLK_ID, &boot_up_values.ulDCEFClk);
+ SMU9_SYSPLL0_DCEFCLK_ID, 0, &boot_up_values.ulDCEFClk);
data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
@@ -4407,9 +4407,9 @@ static int vega10_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe
return ret;
features_to_disable =
- (features_enabled ^ new_ppfeature_masks) & features_enabled;
+ features_enabled & ~new_ppfeature_masks;
features_to_enable =
- (features_enabled ^ new_ppfeature_masks) ^ features_to_disable;
+ ~features_enabled & new_ppfeature_masks;
pr_debug("features_to_disable 0x%llx\n", features_to_disable);
pr_debug("features_to_enable 0x%llx\n", features_to_enable);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 6c8e78611c03..bdb48e94eff6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -2009,9 +2009,9 @@ static int vega12_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe
return ret;
features_to_disable =
- (features_enabled ^ new_ppfeature_masks) & features_enabled;
+ features_enabled & ~new_ppfeature_masks;
features_to_enable =
- (features_enabled ^ new_ppfeature_masks) ^ features_to_disable;
+ ~features_enabled & new_ppfeature_masks;
pr_debug("features_to_disable 0x%llx\n", features_to_disable);
pr_debug("features_to_enable 0x%llx\n", features_to_enable);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index aad79affb081..23b5b94a4939 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -91,6 +91,12 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
* MP0CLK DS
*/
data->registry_data.disallowed_features = 0xE0041C00;
+ /* ECC feature should be disabled on old SMUs */
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
+ hwmgr->smu_version = smum_get_argument(hwmgr);
+ if (hwmgr->smu_version < 0x282100)
+ data->registry_data.disallowed_features |= FEATURE_ECC_MASK;
+
data->registry_data.od_state_in_dc_support = 0;
data->registry_data.thermal_support = 1;
data->registry_data.skip_baco_hardware = 0;
@@ -357,6 +363,7 @@ static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT;
data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT;
data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT;
+ data->smu_features[GNLD_ECC].smu_feature_id = FEATURE_ECC_BIT;
for (i = 0; i < GNLD_FEATURES_MAX; i++) {
data->smu_features[i].smu_feature_bitmap =
@@ -463,9 +470,9 @@ static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr)
static void vega20_init_dpm_state(struct vega20_dpm_state *dpm_state)
{
dpm_state->soft_min_level = 0x0;
- dpm_state->soft_max_level = 0xffff;
+ dpm_state->soft_max_level = VG20_CLOCK_MAX_DEFAULT;
dpm_state->hard_min_level = 0x0;
- dpm_state->hard_max_level = 0xffff;
+ dpm_state->hard_max_level = VG20_CLOCK_MAX_DEFAULT;
}
static int vega20_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
@@ -711,8 +718,10 @@ static int vega20_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!ret,
"[SetupDefaultDpmTable] failed to get fclk dpm levels!",
return ret);
- } else
- dpm_table->count = 0;
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = data->vbios_boot_state.fclock / 100;
+ }
vega20_init_dpm_state(&(dpm_table->dpm_state));
/* save a copy of the default DPM table */
@@ -754,6 +763,7 @@ static int vega20_init_smc_table(struct pp_hwmgr *hwmgr)
data->vbios_boot_state.eclock = boot_up_values.ulEClk;
data->vbios_boot_state.vclock = boot_up_values.ulVClk;
data->vbios_boot_state.dclock = boot_up_values.ulDClk;
+ data->vbios_boot_state.fclock = boot_up_values.ulFClk;
data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID;
smum_send_msg_to_smc_with_parameter(hwmgr,
@@ -780,6 +790,8 @@ static int vega20_init_smc_table(struct pp_hwmgr *hwmgr)
static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+ struct vega20_hwmgr *data =
+ (struct vega20_hwmgr *)(hwmgr->backend);
uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
int ret;
@@ -816,6 +828,10 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
"[OverridePcieParameters] Attempt to override pcie params failed!",
return ret);
+ data->pcie_parameters_override = 1;
+ data->pcie_gen_level1 = pcie_gen;
+ data->pcie_width_level1 = pcie_width;
+
return 0;
}
@@ -979,6 +995,8 @@ static int vega20_od8_set_feature_capabilities(
}
if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+ pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] =
+ data->dpm_table.mem_table.dpm_levels[data->dpm_table.mem_table.count - 2].value;
if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_UCLK_MAX] &&
pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] > 0 &&
pptable_information->od_settings_max[OD8_SETTING_UCLK_FMAX] > 0 &&
@@ -2314,32 +2332,8 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
{
- struct vega20_hwmgr *data =
- (struct vega20_hwmgr *)(hwmgr->backend);
- uint32_t soft_min_level, soft_max_level;
int ret = 0;
- soft_min_level = vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
- soft_max_level = vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table));
- data->dpm_table.gfx_table.dpm_state.soft_min_level =
- data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
- data->dpm_table.gfx_table.dpm_state.soft_max_level =
- data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
-
- soft_min_level = vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table));
- soft_max_level = vega20_find_highest_dpm_level(&(data->dpm_table.mem_table));
- data->dpm_table.mem_table.dpm_state.soft_min_level =
- data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
- data->dpm_table.mem_table.dpm_state.soft_max_level =
- data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
-
- soft_min_level = vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table));
- soft_max_level = vega20_find_highest_dpm_level(&(data->dpm_table.soc_table));
- data->dpm_table.soc_table.dpm_state.soft_min_level =
- data->dpm_table.soc_table.dpm_levels[soft_min_level].value;
- data->dpm_table.soc_table.dpm_state.soft_max_level =
- data->dpm_table.soc_table.dpm_levels[soft_max_level].value;
-
ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload DPM Bootup Levels!",
@@ -2641,9 +2635,8 @@ static int vega20_get_sclks(struct pp_hwmgr *hwmgr,
struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
int i, count;
- PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_GFXCLK].enabled,
- "[GetSclks]: gfxclk dpm not enabled!\n",
- return -EPERM);
+ if (!data->smu_features[GNLD_DPM_GFXCLK].enabled)
+ return -1;
count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
clocks->num_levels = count;
@@ -2670,9 +2663,8 @@ static int vega20_get_memclocks(struct pp_hwmgr *hwmgr,
struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.mem_table);
int i, count;
- PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_UCLK].enabled,
- "[GetMclks]: uclk dpm not enabled!\n",
- return -EPERM);
+ if (!data->smu_features[GNLD_DPM_UCLK].enabled)
+ return -1;
count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
clocks->num_levels = data->mclk_latency_table.count = count;
@@ -2696,9 +2688,8 @@ static int vega20_get_dcefclocks(struct pp_hwmgr *hwmgr,
struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.dcef_table);
int i, count;
- PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_DCEFCLK].enabled,
- "[GetDcfclocks]: dcefclk dpm not enabled!\n",
- return -EPERM);
+ if (!data->smu_features[GNLD_DPM_DCEFCLK].enabled)
+ return -1;
count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
clocks->num_levels = count;
@@ -2719,9 +2710,8 @@ static int vega20_get_socclocks(struct pp_hwmgr *hwmgr,
struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.soc_table);
int i, count;
- PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_SOCCLK].enabled,
- "[GetSocclks]: socclk dpm not enabled!\n",
- return -EPERM);
+ if (!data->smu_features[GNLD_DPM_SOCCLK].enabled)
+ return -1;
count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
clocks->num_levels = count;
@@ -2799,7 +2789,6 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
data->od8_settings.od8_settings_array;
OverDriveTable_t *od_table =
&(data->smc_state_table.overdrive_table);
- struct pp_clock_levels_with_latency clocks;
int32_t input_index, input_clk, input_vol, i;
int od8_id;
int ret;
@@ -2858,11 +2847,6 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
return -EOPNOTSUPP;
}
- ret = vega20_get_memclocks(hwmgr, &clocks);
- PP_ASSERT_WITH_CODE(!ret,
- "Attempt to get memory clk levels failed!",
- return ret);
-
for (i = 0; i < size; i += 2) {
if (i + 2 > size) {
pr_info("invalid number of input parameters %d\n",
@@ -2879,11 +2863,11 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
return -EINVAL;
}
- if (input_clk < clocks.data[0].clocks_in_khz / 1000 ||
+ if (input_clk < od8_settings[OD8_SETTING_UCLK_FMAX].min_value ||
input_clk > od8_settings[OD8_SETTING_UCLK_FMAX].max_value) {
pr_info("clock freq %d is not within allowed range [%d - %d]\n",
input_clk,
- clocks.data[0].clocks_in_khz / 1000,
+ od8_settings[OD8_SETTING_UCLK_FMAX].min_value,
od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
return -EINVAL;
}
@@ -3043,7 +3027,8 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
"FCLK_DS",
"MP1CLK_DS",
"MP0CLK_DS",
- "XGMI"};
+ "XGMI",
+ "ECC"};
static const char *output_title[] = {
"FEATURES",
"BITMASK",
@@ -3088,9 +3073,9 @@ static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe
return ret;
features_to_disable =
- (features_enabled ^ new_ppfeature_masks) & features_enabled;
+ features_enabled & ~new_ppfeature_masks;
features_to_enable =
- (features_enabled ^ new_ppfeature_masks) ^ features_to_disable;
+ ~features_enabled & new_ppfeature_masks;
pr_debug("features_to_disable 0x%llx\n", features_to_disable);
pr_debug("features_to_enable 0x%llx\n", features_to_enable);
@@ -3128,7 +3113,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
&(data->dpm_table.fclk_table);
int i, now, size = 0;
int ret = 0;
- uint32_t gen_speed, lane_width;
+ uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
switch (type) {
case PP_SCLK:
@@ -3137,10 +3122,11 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
"Attempt to get current gfx clk Failed!",
return ret);
- ret = vega20_get_sclks(hwmgr, &clocks);
- PP_ASSERT_WITH_CODE(!ret,
- "Attempt to get gfx clk levels Failed!",
- return ret);
+ if (vega20_get_sclks(hwmgr, &clocks)) {
+ size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
+ now / 100);
+ break;
+ }
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -3154,10 +3140,11 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
"Attempt to get current mclk freq Failed!",
return ret);
- ret = vega20_get_memclocks(hwmgr, &clocks);
- PP_ASSERT_WITH_CODE(!ret,
- "Attempt to get memory clk levels Failed!",
- return ret);
+ if (vega20_get_memclocks(hwmgr, &clocks)) {
+ size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
+ now / 100);
+ break;
+ }
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -3171,10 +3158,11 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
"Attempt to get current socclk freq Failed!",
return ret);
- ret = vega20_get_socclocks(hwmgr, &clocks);
- PP_ASSERT_WITH_CODE(!ret,
- "Attempt to get soc clk levels Failed!",
- return ret);
+ if (vega20_get_socclocks(hwmgr, &clocks)) {
+ size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
+ now / 100);
+ break;
+ }
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -3200,10 +3188,11 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
"Attempt to get current dcefclk freq Failed!",
return ret);
- ret = vega20_get_dcefclocks(hwmgr, &clocks);
- PP_ASSERT_WITH_CODE(!ret,
- "Attempt to get dcefclk levels Failed!",
- return ret);
+ if (vega20_get_dcefclocks(hwmgr, &clocks)) {
+ size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
+ now / 100);
+ break;
+ }
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -3212,28 +3201,36 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
break;
case PP_PCIE:
- gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
+ current_gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
>> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
- lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
+ current_lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
>> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
- for (i = 0; i < NUM_LINK_LEVELS; i++)
+ for (i = 0; i < NUM_LINK_LEVELS; i++) {
+ if (i == 1 && data->pcie_parameters_override) {
+ gen_speed = data->pcie_gen_level1;
+ lane_width = data->pcie_width_level1;
+ } else {
+ gen_speed = pptable->PcieGenSpeed[i];
+ lane_width = pptable->PcieLaneCount[i];
+ }
size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
- (pptable->PcieGenSpeed[i] == 0) ? "2.5GT/s," :
- (pptable->PcieGenSpeed[i] == 1) ? "5.0GT/s," :
- (pptable->PcieGenSpeed[i] == 2) ? "8.0GT/s," :
- (pptable->PcieGenSpeed[i] == 3) ? "16.0GT/s," : "",
- (pptable->PcieLaneCount[i] == 1) ? "x1" :
- (pptable->PcieLaneCount[i] == 2) ? "x2" :
- (pptable->PcieLaneCount[i] == 3) ? "x4" :
- (pptable->PcieLaneCount[i] == 4) ? "x8" :
- (pptable->PcieLaneCount[i] == 5) ? "x12" :
- (pptable->PcieLaneCount[i] == 6) ? "x16" : "",
+ (gen_speed == 0) ? "2.5GT/s," :
+ (gen_speed == 1) ? "5.0GT/s," :
+ (gen_speed == 2) ? "8.0GT/s," :
+ (gen_speed == 3) ? "16.0GT/s," : "",
+ (lane_width == 1) ? "x1" :
+ (lane_width == 2) ? "x2" :
+ (lane_width == 3) ? "x4" :
+ (lane_width == 4) ? "x8" :
+ (lane_width == 5) ? "x12" :
+ (lane_width == 6) ? "x16" : "",
pptable->LclkFreq[i],
- (gen_speed == pptable->PcieGenSpeed[i]) &&
- (lane_width == pptable->PcieLaneCount[i]) ?
+ (current_gen_speed == gen_speed) &&
+ (current_lane_width == lane_width) ?
"*" : "");
+ }
break;
case OD_SCLK:
@@ -3288,13 +3285,8 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
}
if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
- ret = vega20_get_memclocks(hwmgr, &clocks);
- PP_ASSERT_WITH_CODE(!ret,
- "Fail to get memory clk levels!",
- return ret);
-
size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
- clocks.data[0].clocks_in_khz / 1000,
+ od8_settings[OD8_SETTING_UCLK_FMAX].min_value,
od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
}
@@ -3356,6 +3348,31 @@ static int vega20_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
return ret;
}
+static int vega20_set_fclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr)
+{
+ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
+ struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.fclk_table);
+ int ret = 0;
+
+ if (data->smu_features[GNLD_DPM_FCLK].enabled) {
+ PP_ASSERT_WITH_CODE(dpm_table->count > 0,
+ "[SetFclkToHightestDpmLevel] Dpm table has no entry!",
+ return -EINVAL);
+ PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_FCLK_DPM_LEVELS,
+ "[SetFclkToHightestDpmLevel] Dpm table has too many entries!",
+ return -EINVAL);
+
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMinByFreq,
+ (PPCLK_FCLK << 16 ) | dpm_table->dpm_state.soft_min_level)),
+ "[SetFclkToHightestDpmLevel] Set soft min fclk failed!",
+ return ret);
+ }
+
+ return ret;
+}
+
static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
{
struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
@@ -3366,8 +3383,10 @@ static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
ret = vega20_set_uclk_to_highest_dpm_level(hwmgr,
&data->dpm_table.mem_table);
+ if (ret)
+ return ret;
- return ret;
+ return vega20_set_fclk_to_highest_dpm_level(hwmgr);
}
static int vega20_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
@@ -3451,6 +3470,7 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
struct vega20_single_dpm_table *dpm_table;
bool vblank_too_short = false;
bool disable_mclk_switching;
+ bool disable_fclk_switching;
uint32_t i, latency;
disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
@@ -3461,9 +3481,9 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
/* gfxclk */
dpm_table = &(data->dpm_table.gfx_table);
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
- dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
- dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
if (VEGA20_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) {
@@ -3485,9 +3505,9 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
/* memclk */
dpm_table = &(data->dpm_table.mem_table);
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
- dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
- dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
if (VEGA20_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) {
@@ -3526,12 +3546,28 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
if (hwmgr->display_config->nb_pstate_switch_disable)
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ if ((disable_mclk_switching &&
+ (dpm_table->dpm_state.hard_min_level == dpm_table->dpm_levels[dpm_table->count - 1].value)) ||
+ hwmgr->display_config->min_mem_set_clock / 100 >= dpm_table->dpm_levels[dpm_table->count - 1].value)
+ disable_fclk_switching = true;
+ else
+ disable_fclk_switching = false;
+
+ /* fclk */
+ dpm_table = &(data->dpm_table.fclk_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
+ if (hwmgr->display_config->nb_pstate_switch_disable || disable_fclk_switching)
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
/* vclk */
dpm_table = &(data->dpm_table.vclk_table);
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
- dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
- dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
@@ -3548,9 +3584,9 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
/* dclk */
dpm_table = &(data->dpm_table.dclk_table);
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
- dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
- dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
@@ -3567,9 +3603,9 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
/* socclk */
dpm_table = &(data->dpm_table.soc_table);
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
- dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
- dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
if (VEGA20_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) {
@@ -3586,9 +3622,9 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
/* eclk */
dpm_table = &(data->dpm_table.eclk_table);
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
- dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
- dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
if (VEGA20_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
index 37f5f5e657da..ac2a3118a0ae 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
@@ -42,6 +42,8 @@
#define AVFS_CURVE 0
#define OD8_HOTCURVE_TEMPERATURE 85
+#define VG20_CLOCK_MAX_DEFAULT 0xFFFF
+
typedef uint32_t PP_Clock;
enum {
@@ -78,6 +80,7 @@ enum {
GNLD_DS_MP1CLK,
GNLD_DS_MP0CLK,
GNLD_XGMI,
+ GNLD_ECC,
GNLD_FEATURES_MAX
};
@@ -219,6 +222,7 @@ struct vega20_vbios_boot_state {
uint32_t eclock;
uint32_t dclock;
uint32_t vclock;
+ uint32_t fclock;
};
#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
@@ -523,6 +527,10 @@ struct vega20_hwmgr {
unsigned long metrics_time;
SmuMetrics_t metrics_table;
+
+ bool pcie_parameters_override;
+ uint32_t pcie_gen_level1;
+ uint32_t pcie_width_level1;
};
#define VEGA20_DPM2_NEAR_TDP_DEC 10
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
index 97f8a1a970c3..7a7f15d0c53a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
@@ -32,6 +32,8 @@
#include "cgs_common.h"
#include "vega20_pptable.h"
+#define VEGA20_FAN_TARGET_TEMPERATURE_OVERRIDE 105
+
static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable,
enum phm_platform_caps cap)
{
@@ -798,6 +800,17 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
return 0;
}
+static int override_powerplay_table_fantargettemperature(struct pp_hwmgr *hwmgr)
+{
+ struct phm_ppt_v3_information *pptable_information =
+ (struct phm_ppt_v3_information *)hwmgr->pptable;
+ PPTable_t *ppsmc_pptable = (PPTable_t *)(pptable_information->smc_pptable);
+
+ ppsmc_pptable->FanTargetTemperature = VEGA20_FAN_TARGET_TEMPERATURE_OVERRIDE;
+
+ return 0;
+}
+
#define VEGA20_ENGINECLOCK_HARDMAX 198000
static int init_powerplay_table_information(
struct pp_hwmgr *hwmgr,
@@ -887,6 +900,10 @@ static int init_powerplay_table_information(
result = append_vbios_pptable(hwmgr, (pptable_information->smc_pptable));
+ if (result)
+ return result;
+
+ result = override_powerplay_table_fantargettemperature(hwmgr);
return result;
}
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
index 63d5cf691549..195c4ae67058 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
@@ -99,7 +99,7 @@
#define FEATURE_DS_MP1CLK_BIT 30
#define FEATURE_DS_MP0CLK_BIT 31
#define FEATURE_XGMI_BIT 32
-#define FEATURE_SPARE_33_BIT 33
+#define FEATURE_ECC_BIT 33
#define FEATURE_SPARE_34_BIT 34
#define FEATURE_SPARE_35_BIT 35
#define FEATURE_SPARE_36_BIT 36
@@ -165,7 +165,8 @@
#define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT )
#define FEATURE_DS_MP1CLK_MASK (1 << FEATURE_DS_MP1CLK_BIT )
#define FEATURE_DS_MP0CLK_MASK (1 << FEATURE_DS_MP0CLK_BIT )
-#define FEATURE_XGMI_MASK (1 << FEATURE_XGMI_BIT )
+#define FEATURE_XGMI_MASK (1ULL << FEATURE_XGMI_BIT )
+#define FEATURE_ECC_MASK (1ULL << FEATURE_ECC_BIT )
#define DPM_OVERRIDE_DISABLE_SOCCLK_PID 0x00000001
#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 52abca065764..2d4cfe14f72e 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -2330,6 +2330,7 @@ static uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
case DRAM_LOG_BUFF_SIZE:
return offsetof(SMU74_SoftRegisters, DRAM_LOG_BUFF_SIZE);
}
+ break;
case SMU_Discrete_DpmTable:
switch (member) {
case UvdBootLevel:
@@ -2339,6 +2340,7 @@ static uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
case LowSclkInterruptThreshold:
return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
}
+ break;
}
pr_warn("can't get the offset of type %x member %x\n", type, member);
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
index 079fc8e8f709..742b3dc1f6cb 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
@@ -40,10 +40,8 @@ bool smu9_is_smc_ram_running(struct pp_hwmgr *hwmgr)
struct amdgpu_device *adev = hwmgr->adev;
uint32_t mp1_fw_flags;
- WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
- (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
-
- mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
return true;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
index b7ff7d4d6f44..ba00744c3413 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
@@ -49,10 +49,8 @@ static bool vega20_is_smc_ram_running(struct pp_hwmgr *hwmgr)
struct amdgpu_device *adev = hwmgr->adev;
uint32_t mp1_fw_flags;
- WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
- (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
-
- mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index a63e5f0dae56..ab7968c8f6a2 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -1037,6 +1037,35 @@ void dw_hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data,
}
EXPORT_SYMBOL_GPL(dw_hdmi_phy_i2c_write);
+/* Filter out invalid setups to avoid configuring SCDC and scrambling */
+static bool dw_hdmi_support_scdc(struct dw_hdmi *hdmi)
+{
+ struct drm_display_info *display = &hdmi->connector.display_info;
+
+ /* Completely disable SCDC support for older controllers */
+ if (hdmi->version < 0x200a)
+ return false;
+
+ /* Disable if no DDC bus */
+ if (!hdmi->ddc)
+ return false;
+
+ /* Disable if SCDC is not supported, or if an HF-VSDB block is absent */
+ if (!display->hdmi.scdc.supported ||
+ !display->hdmi.scdc.scrambling.supported)
+ return false;
+
+ /*
+ * Disable if display only support low TMDS rates and scrambling
+ * for low rates is not supported either
+ */
+ if (!display->hdmi.scdc.scrambling.low_rates &&
+ display->max_tmds_clock <= 340000)
+ return false;
+
+ return true;
+}
+
/*
* HDMI2.0 Specifies the following procedure for High TMDS Bit Rates:
* - The Source shall suspend transmission of the TMDS clock and data
@@ -1055,7 +1084,7 @@ void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi)
unsigned long mtmdsclock = hdmi->hdmi_data.video_mode.mtmdsclock;
/* Control for TMDS Bit Period/TMDS Clock-Period Ratio */
- if (hdmi->connector.display_info.hdmi.scdc.supported) {
+ if (dw_hdmi_support_scdc(hdmi)) {
if (mtmdsclock > HDMI14_MAX_TMDSCLK)
drm_scdc_set_high_tmds_clock_ratio(hdmi->ddc, 1);
else
@@ -1579,8 +1608,9 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
/* Set up HDMI_FC_INVIDCONF */
inv_val = (hdmi->hdmi_data.hdcp_enable ||
- vmode->mtmdsclock > HDMI14_MAX_TMDSCLK ||
- hdmi_info->scdc.scrambling.low_rates ?
+ (dw_hdmi_support_scdc(hdmi) &&
+ (vmode->mtmdsclock > HDMI14_MAX_TMDSCLK ||
+ hdmi_info->scdc.scrambling.low_rates)) ?
HDMI_FC_INVIDCONF_HDCP_KEEPOUT_ACTIVE :
HDMI_FC_INVIDCONF_HDCP_KEEPOUT_INACTIVE);
@@ -1646,7 +1676,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
}
/* Scrambling Control */
- if (hdmi_info->scdc.supported) {
+ if (dw_hdmi_support_scdc(hdmi)) {
if (vmode->mtmdsclock > HDMI14_MAX_TMDSCLK ||
hdmi_info->scdc.scrambling.low_rates) {
/*
@@ -1658,13 +1688,13 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
* Source Devices compliant shall set the
* Source Version = 1.
*/
- drm_scdc_readb(&hdmi->i2c->adap, SCDC_SINK_VERSION,
+ drm_scdc_readb(hdmi->ddc, SCDC_SINK_VERSION,
&bytes);
- drm_scdc_writeb(&hdmi->i2c->adap, SCDC_SOURCE_VERSION,
+ drm_scdc_writeb(hdmi->ddc, SCDC_SOURCE_VERSION,
min_t(u8, bytes, SCDC_MIN_SOURCE_VERSION));
/* Enabled Scrambling in the Sink */
- drm_scdc_set_scrambling(&hdmi->i2c->adap, 1);
+ drm_scdc_set_scrambling(hdmi->ddc, 1);
/*
* To activate the scrambler feature, you must ensure
@@ -1680,7 +1710,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
hdmi_writeb(hdmi, 0, HDMI_FC_SCRAMBLER_CTRL);
hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ,
HDMI_MC_SWRSTZ);
- drm_scdc_set_scrambling(&hdmi->i2c->adap, 0);
+ drm_scdc_set_scrambling(hdmi->ddc, 0);
}
}
@@ -1774,6 +1804,8 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
* iteration for others.
* The Amlogic Meson GX SoCs (v2.01a) have been identified as needing
* the workaround with a single iteration.
+ * The Rockchip RK3288 SoC (v2.00a) and RK3328/RK3399 SoCs (v2.11a) have
+ * been identified as needing the workaround with a single iteration.
*/
switch (hdmi->version) {
@@ -1782,7 +1814,9 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
break;
case 0x131a:
case 0x132a:
+ case 0x200a:
case 0x201a:
+ case 0x211a:
case 0x212a:
count = 1;
break;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 540a77a2ade9..fbb76332cc9f 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1034,7 +1034,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
funcs->atomic_disable(crtc, old_crtc_state);
else if (funcs->disable)
funcs->disable(crtc);
- else
+ else if (funcs->dpms)
funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
if (!(dev->irq_enabled && dev->num_crtcs))
@@ -1277,10 +1277,9 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
if (new_crtc_state->enable) {
DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n",
crtc->base.id, crtc->name);
-
if (funcs->atomic_enable)
funcs->atomic_enable(crtc, old_crtc_state);
- else
+ else if (funcs->commit)
funcs->commit(crtc);
}
}
@@ -3039,9 +3038,31 @@ commit:
return 0;
}
-static int __drm_atomic_helper_disable_all(struct drm_device *dev,
- struct drm_modeset_acquire_ctx *ctx,
- bool clean_old_fbs)
+/**
+ * drm_atomic_helper_disable_all - disable all currently active outputs
+ * @dev: DRM device
+ * @ctx: lock acquisition context
+ *
+ * Loops through all connectors, finding those that aren't turned off and then
+ * turns them off by setting their DPMS mode to OFF and deactivating the CRTC
+ * that they are connected to.
+ *
+ * This is used for example in suspend/resume to disable all currently active
+ * functions when suspending. If you just want to shut down everything at e.g.
+ * driver unload, look at drm_atomic_helper_shutdown().
+ *
+ * Note that if callers haven't already acquired all modeset locks this might
+ * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ *
+ * See also:
+ * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and
+ * drm_atomic_helper_shutdown().
+ */
+int drm_atomic_helper_disable_all(struct drm_device *dev,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct drm_atomic_state *state;
struct drm_connector_state *conn_state;
@@ -3099,35 +3120,6 @@ free:
drm_atomic_state_put(state);
return ret;
}
-
-/**
- * drm_atomic_helper_disable_all - disable all currently active outputs
- * @dev: DRM device
- * @ctx: lock acquisition context
- *
- * Loops through all connectors, finding those that aren't turned off and then
- * turns them off by setting their DPMS mode to OFF and deactivating the CRTC
- * that they are connected to.
- *
- * This is used for example in suspend/resume to disable all currently active
- * functions when suspending. If you just want to shut down everything at e.g.
- * driver unload, look at drm_atomic_helper_shutdown().
- *
- * Note that if callers haven't already acquired all modeset locks this might
- * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- *
- * See also:
- * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and
- * drm_atomic_helper_shutdown().
- */
-int drm_atomic_helper_disable_all(struct drm_device *dev,
- struct drm_modeset_acquire_ctx *ctx)
-{
- return __drm_atomic_helper_disable_all(dev, ctx, false);
-}
EXPORT_SYMBOL(drm_atomic_helper_disable_all);
/**
@@ -3148,7 +3140,7 @@ void drm_atomic_helper_shutdown(struct drm_device *dev)
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
- ret = __drm_atomic_helper_disable_all(dev, &ctx, true);
+ ret = drm_atomic_helper_disable_all(dev, &ctx);
if (ret)
DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 381581b01d48..05bbc2b622fc 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -376,11 +376,7 @@ void drm_dev_unplug(struct drm_device *dev)
synchronize_srcu(&drm_unplug_srcu);
drm_dev_unregister(dev);
-
- mutex_lock(&drm_global_mutex);
- if (dev->open_count == 0)
- drm_dev_put(dev);
- mutex_unlock(&drm_global_mutex);
+ drm_dev_put(dev);
}
EXPORT_SYMBOL(drm_dev_unplug);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 0e9349ff2d16..af2ab640cadb 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1963,7 +1963,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
best_depth = fmt->depth;
}
}
- if (sizes.surface_depth != best_depth) {
+ if (sizes.surface_depth != best_depth && best_depth) {
DRM_INFO("requested bpp %d, scaled depth down to %d",
sizes.surface_bpp, best_depth);
sizes.surface_depth = best_depth;
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 83a5bbca6e7e..7caa3c7ed978 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -489,11 +489,9 @@ int drm_release(struct inode *inode, struct file *filp)
drm_close_helper(filp);
- if (!--dev->open_count) {
+ if (!--dev->open_count)
drm_lastclose(dev);
- if (drm_dev_is_unplugged(dev))
- drm_put_dev(dev);
- }
+
mutex_unlock(&drm_global_mutex);
drm_minor_release(minor);
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 67b1fca39aa6..0e3043e08c69 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -185,7 +185,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd,
m32.size = map.size;
m32.type = map.type;
m32.flags = map.flags;
- m32.handle = ptr_to_compat(map.handle);
+ m32.handle = ptr_to_compat((void __user *)map.handle);
m32.mtrr = map.mtrr;
if (copy_to_user(argp, &m32, sizeof(m32)))
return -EFAULT;
@@ -216,7 +216,7 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd,
m32.offset = map.offset;
m32.mtrr = map.mtrr;
- m32.handle = ptr_to_compat(map.handle);
+ m32.handle = ptr_to_compat((void __user *)map.handle);
if (map.handle != compat_ptr(m32.handle))
pr_err_ratelimited("compat_drm_addmap truncated handle %p for type %d offset %x\n",
map.handle, m32.type, m32.offset);
@@ -526,7 +526,7 @@ static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
if (err)
return err;
- req32.handle = ptr_to_compat(req.handle);
+ req32.handle = ptr_to_compat((void __user *)req.handle);
if (copy_to_user(argp, &req32, sizeof(req32)))
return -EFAULT;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 2b4f373736c7..8b4cd31ce7bd 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -106,25 +106,19 @@
static noinline void save_stack(struct drm_mm_node *node)
{
unsigned long entries[STACKDEPTH];
- struct stack_trace trace = {
- .entries = entries,
- .max_entries = STACKDEPTH,
- .skip = 1
- };
+ unsigned int n;
- save_stack_trace(&trace);
- if (trace.nr_entries != 0 &&
- trace.entries[trace.nr_entries-1] == ULONG_MAX)
- trace.nr_entries--;
+ n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
/* May be called under spinlock, so avoid sleeping */
- node->stack = depot_save_stack(&trace, GFP_NOWAIT);
+ node->stack = stack_depot_save(entries, n, GFP_NOWAIT);
}
static void show_leaks(struct drm_mm *mm)
{
struct drm_mm_node *node;
- unsigned long entries[STACKDEPTH];
+ unsigned long *entries;
+ unsigned int nr_entries;
char *buf;
buf = kmalloc(BUFSZ, GFP_KERNEL);
@@ -132,19 +126,14 @@ static void show_leaks(struct drm_mm *mm)
return;
list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
- struct stack_trace trace = {
- .entries = entries,
- .max_entries = STACKDEPTH
- };
-
if (!node->stack) {
DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
node->start, node->size);
continue;
}
- depot_fetch_stack(node->stack, &trace);
- snprint_stack_trace(buf, BUFSZ, &trace, 0);
+ nr_entries = stack_depot_fetch(node->stack, &entries);
+ stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0);
DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
node->start, node->size, buf);
}
diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig
index 041a77e400d4..21df44b78df3 100644
--- a/drivers/gpu/drm/etnaviv/Kconfig
+++ b/drivers/gpu/drm/etnaviv/Kconfig
@@ -2,7 +2,6 @@
config DRM_ETNAVIV
tristate "ETNAVIV (DRM support for Vivante GPU IP cores)"
depends on DRM
- depends on ARCH_MXC || ARCH_DOVE || (ARM && COMPILE_TEST)
depends on MMU
select SHMEM
select SYNC_FILE
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
index acb68c698363..4d5d1a77eb2a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
@@ -15,8 +15,6 @@ struct etnaviv_perfmon_request;
struct etnaviv_cmdbuf {
/* suballocator this cmdbuf is allocated from */
struct etnaviv_cmdbuf_suballoc *suballoc;
- /* user context key, must be unique between all active users */
- struct etnaviv_file_private *ctx;
/* cmdbuf properties */
int suballoc_offset;
void *vaddr;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index 3fbb4855396c..33854c94cb85 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -215,7 +215,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
mutex_lock(&obj->lock);
pages = etnaviv_gem_get_pages(obj);
mutex_unlock(&obj->lock);
- if (pages) {
+ if (!IS_ERR(pages)) {
int j;
iter.hdr->data[0] = bomap - bomap_start;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index 76079c2291f8..f0abb744ef95 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -95,6 +95,7 @@ struct etnaviv_gem_submit_bo {
struct etnaviv_gem_submit {
struct drm_sched_job sched_job;
struct kref refcount;
+ struct etnaviv_file_private *ctx;
struct etnaviv_gpu *gpu;
struct dma_fence *out_fence, *in_fence;
int out_fence_id;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index 0566171f8df2..f21529e635e3 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -15,7 +15,7 @@ struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj)
int npages = obj->size >> PAGE_SHIFT;
if (WARN_ON(!etnaviv_obj->pages)) /* should have already pinned! */
- return NULL;
+ return ERR_PTR(-EINVAL);
return drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 30875f8f2933..b2fe3446bfbc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -506,7 +506,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret)
goto err_submit_objects;
- submit->cmdbuf.ctx = file->driver_priv;
+ submit->ctx = file->driver_priv;
submit->exec_state = args->exec_state;
submit->flags = args->flags;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
index f1c88d8ad5ba..f794e04be9e6 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -320,8 +320,8 @@ etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
domain = &etnaviv_domain->base;
domain->dev = gpu->dev;
- domain->base = 0;
- domain->size = (u64)SZ_1G * 4;
+ domain->base = SZ_4K;
+ domain->size = (u64)SZ_1G * 4 - SZ_4K;
domain->ops = &etnaviv_iommuv2_ops;
ret = etnaviv_iommuv2_init(etnaviv_domain);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
index 9980d81a26e3..4227a4006c34 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
@@ -113,7 +113,7 @@ static const struct etnaviv_pm_domain doms_3d[] = {
.name = "PE",
.profile_read = VIVS_MC_PROFILE_PE_READ,
.profile_config = VIVS_MC_PROFILE_CONFIG0,
- .nr_signals = 5,
+ .nr_signals = 4,
.signal = (const struct etnaviv_pm_signal[]) {
{
"PIXEL_COUNT_KILLED_BY_COLOR_PIPE",
@@ -435,7 +435,7 @@ int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu,
dom = meta->domains + signal->domain;
- if (signal->iter > dom->nr_signals)
+ if (signal->iter >= dom->nr_signals)
return -EINVAL;
sig = &dom->signal[signal->iter];
@@ -461,7 +461,7 @@ int etnaviv_pm_req_validate(const struct drm_etnaviv_gem_submit_pmr *r,
dom = meta->domains + r->domain;
- if (r->signal > dom->nr_signals)
+ if (r->signal >= dom->nr_signals)
return -EINVAL;
return 0;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 67ae26602024..6d24fea1766b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -153,7 +153,7 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
mutex_lock(&submit->gpu->fence_lock);
ret = drm_sched_job_init(&submit->sched_job, sched_entity,
- submit->cmdbuf.ctx);
+ submit->ctx);
if (ret)
goto out_unlock;
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 0573eab0e190..f35e4ab55b27 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -20,6 +20,7 @@
#include "regs-vp.h"
#include <linux/kernel.h>
+#include <linux/ktime.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/i2c.h>
@@ -352,15 +353,62 @@ static void mixer_cfg_vp_blend(struct mixer_context *ctx, unsigned int alpha)
mixer_reg_write(ctx, MXR_VIDEO_CFG, val);
}
-static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable)
+static bool mixer_is_synced(struct mixer_context *ctx)
{
- /* block update on vsync */
- mixer_reg_writemask(ctx, MXR_STATUS, enable ?
- MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE);
+ u32 base, shadow;
+ if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
+ ctx->mxr_ver == MXR_VER_128_0_0_184)
+ return !(mixer_reg_read(ctx, MXR_CFG) &
+ MXR_CFG_LAYER_UPDATE_COUNT_MASK);
+
+ if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) &&
+ vp_reg_read(ctx, VP_SHADOW_UPDATE))
+ return false;
+
+ base = mixer_reg_read(ctx, MXR_CFG);
+ shadow = mixer_reg_read(ctx, MXR_CFG_S);
+ if (base != shadow)
+ return false;
+
+ base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0));
+ shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0));
+ if (base != shadow)
+ return false;
+
+ base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1));
+ shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1));
+ if (base != shadow)
+ return false;
+
+ return true;
+}
+
+static int mixer_wait_for_sync(struct mixer_context *ctx)
+{
+ ktime_t timeout = ktime_add_us(ktime_get(), 100000);
+
+ while (!mixer_is_synced(ctx)) {
+ usleep_range(1000, 2000);
+ if (ktime_compare(ktime_get(), timeout) > 0)
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static void mixer_disable_sync(struct mixer_context *ctx)
+{
+ mixer_reg_writemask(ctx, MXR_STATUS, 0, MXR_STATUS_SYNC_ENABLE);
+}
+
+static void mixer_enable_sync(struct mixer_context *ctx)
+{
+ if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
+ ctx->mxr_ver == MXR_VER_128_0_0_184)
+ mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
+ mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SYNC_ENABLE);
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags))
- vp_reg_write(ctx, VP_SHADOW_UPDATE, enable ?
- VP_SHADOW_UPDATE_ENABLE : 0);
+ vp_reg_write(ctx, VP_SHADOW_UPDATE, VP_SHADOW_UPDATE_ENABLE);
}
static void mixer_cfg_scan(struct mixer_context *ctx, int width, int height)
@@ -498,7 +546,6 @@ static void vp_video_buffer(struct mixer_context *ctx,
spin_lock_irqsave(&ctx->reg_slock, flags);
- vp_reg_write(ctx, VP_SHADOW_UPDATE, 1);
/* interlace or progressive scan mode */
val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0);
vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP);
@@ -553,11 +600,6 @@ static void vp_video_buffer(struct mixer_context *ctx,
vp_regs_dump(ctx);
}
-static void mixer_layer_update(struct mixer_context *ctx)
-{
- mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
-}
-
static void mixer_graph_buffer(struct mixer_context *ctx,
struct exynos_drm_plane *plane)
{
@@ -640,11 +682,6 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
mixer_cfg_layer(ctx, win, priority, true);
mixer_cfg_gfx_blend(ctx, win, pixel_alpha, state->base.alpha);
- /* layer update mandatory for mixer 16.0.33.0 */
- if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
- ctx->mxr_ver == MXR_VER_128_0_0_184)
- mixer_layer_update(ctx);
-
spin_unlock_irqrestore(&ctx->reg_slock, flags);
mixer_regs_dump(ctx);
@@ -709,7 +746,7 @@ static void mixer_win_reset(struct mixer_context *ctx)
static irqreturn_t mixer_irq_handler(int irq, void *arg)
{
struct mixer_context *ctx = arg;
- u32 val, base, shadow;
+ u32 val;
spin_lock(&ctx->reg_slock);
@@ -723,26 +760,9 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
val &= ~MXR_INT_STATUS_VSYNC;
/* interlace scan need to check shadow register */
- if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) {
- if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) &&
- vp_reg_read(ctx, VP_SHADOW_UPDATE))
- goto out;
-
- base = mixer_reg_read(ctx, MXR_CFG);
- shadow = mixer_reg_read(ctx, MXR_CFG_S);
- if (base != shadow)
- goto out;
-
- base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0));
- shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0));
- if (base != shadow)
- goto out;
-
- base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1));
- shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1));
- if (base != shadow)
- goto out;
- }
+ if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)
+ && !mixer_is_synced(ctx))
+ goto out;
drm_crtc_handle_vblank(&ctx->crtc->base);
}
@@ -917,12 +937,14 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
static void mixer_atomic_begin(struct exynos_drm_crtc *crtc)
{
- struct mixer_context *mixer_ctx = crtc->ctx;
+ struct mixer_context *ctx = crtc->ctx;
- if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
+ if (!test_bit(MXR_BIT_POWERED, &ctx->flags))
return;
- mixer_vsync_set_update(mixer_ctx, false);
+ if (mixer_wait_for_sync(ctx))
+ dev_err(ctx->dev, "timeout waiting for VSYNC\n");
+ mixer_disable_sync(ctx);
}
static void mixer_update_plane(struct exynos_drm_crtc *crtc,
@@ -964,7 +986,7 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
return;
- mixer_vsync_set_update(mixer_ctx, true);
+ mixer_enable_sync(mixer_ctx);
exynos_crtc_handle_event(crtc);
}
@@ -979,7 +1001,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
exynos_drm_pipe_clk_enable(crtc, true);
- mixer_vsync_set_update(ctx, false);
+ mixer_disable_sync(ctx);
mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
@@ -992,7 +1014,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
mixer_commit(ctx);
- mixer_vsync_set_update(ctx, true);
+ mixer_enable_sync(ctx);
set_bit(MXR_BIT_POWERED, &ctx->flags);
}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 35b4ec3f7618..3592d04c33b2 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1441,7 +1441,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
}
if (index_mode) {
- if (guest_gma >= I915_GTT_PAGE_SIZE / sizeof(u64)) {
+ if (guest_gma >= I915_GTT_PAGE_SIZE) {
ret = -EFAULT;
goto err;
}
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 035479e273be..e3f9caa7839f 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -448,7 +448,7 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
/**
* intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU
* @vgpu: a vGPU
- * @conncted: link state
+ * @connected: link state
*
* This function is used to trigger hotplug interrupt for vGPU
*
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 3e7e2b80c857..69a9a1b2ea4a 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -209,7 +209,7 @@ static int vgpu_get_plane_info(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_vgpu_primary_plane_format p;
struct intel_vgpu_cursor_plane_format c;
- int ret;
+ int ret, tile_height = 1;
if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
ret = intel_vgpu_decode_primary_plane(vgpu, &p);
@@ -228,19 +228,19 @@ static int vgpu_get_plane_info(struct drm_device *dev,
break;
case PLANE_CTL_TILED_X:
info->drm_format_mod = I915_FORMAT_MOD_X_TILED;
+ tile_height = 8;
break;
case PLANE_CTL_TILED_Y:
info->drm_format_mod = I915_FORMAT_MOD_Y_TILED;
+ tile_height = 32;
break;
case PLANE_CTL_TILED_YF:
info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED;
+ tile_height = 32;
break;
default:
gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
}
-
- info->size = (((p.stride * p.height * p.bpp) / 8) +
- (PAGE_SIZE - 1)) >> PAGE_SHIFT;
} else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
if (ret)
@@ -262,14 +262,13 @@ static int vgpu_get_plane_info(struct drm_device *dev,
info->x_hot = UINT_MAX;
info->y_hot = UINT_MAX;
}
-
- info->size = (((info->stride * c.height * c.bpp) / 8)
- + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
} else {
gvt_vgpu_err("invalid plane id:%d\n", plane_id);
return -EINVAL;
}
+ info->size = (info->stride * roundup(info->height, tile_height)
+ + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (info->size == 0) {
gvt_vgpu_err("fb size is zero\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index c7103dd2d8d5..9814773882ec 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -750,14 +750,20 @@ static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
static void ppgtt_free_all_spt(struct intel_vgpu *vgpu)
{
- struct intel_vgpu_ppgtt_spt *spt;
+ struct intel_vgpu_ppgtt_spt *spt, *spn;
struct radix_tree_iter iter;
- void **slot;
+ LIST_HEAD(all_spt);
+ void __rcu **slot;
+ rcu_read_lock();
radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) {
spt = radix_tree_deref_slot(slot);
- ppgtt_free_spt(spt);
+ list_move(&spt->post_shadow_list, &all_spt);
}
+ rcu_read_unlock();
+
+ list_for_each_entry_safe(spt, spn, &all_spt, post_shadow_list)
+ ppgtt_free_spt(spt);
}
static int ppgtt_handle_guest_write_page_table_bytes(
@@ -1882,7 +1888,11 @@ struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
}
list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
+
+ mutex_lock(&gvt->gtt.ppgtt_mm_lock);
list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
+ mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
+
return mm;
}
@@ -1942,7 +1952,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
*/
void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
{
- atomic_dec(&mm->pincount);
+ atomic_dec_if_positive(&mm->pincount);
}
/**
@@ -1967,9 +1977,10 @@ int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
if (ret)
return ret;
+ mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
list_move_tail(&mm->ppgtt_mm.lru_list,
&mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
-
+ mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
}
return 0;
@@ -1980,6 +1991,8 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
struct intel_vgpu_mm *mm;
struct list_head *pos, *n;
+ mutex_lock(&gvt->gtt.ppgtt_mm_lock);
+
list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
@@ -1987,9 +2000,11 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
continue;
list_del_init(&mm->ppgtt_mm.lru_list);
+ mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
invalidate_ppgtt_mm(mm);
return 1;
}
+ mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
return 0;
}
@@ -2659,6 +2674,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
}
}
INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
+ mutex_init(&gvt->gtt.ppgtt_mm_lock);
return 0;
}
@@ -2699,7 +2715,9 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
if (mm->type == INTEL_GVT_MM_PPGTT) {
+ mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock);
list_del_init(&mm->ppgtt_mm.lru_list);
+ mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock);
if (mm->ppgtt_mm.shadowed)
invalidate_ppgtt_mm(mm);
}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index d8cb04cc946d..edb610dc5d86 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -88,6 +88,7 @@ struct intel_gvt_gtt {
void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
struct list_head oos_page_use_list_head;
struct list_head oos_page_free_list_head;
+ struct mutex ppgtt_mm_lock;
struct list_head ppgtt_mm_lru_list_head;
struct page *scratch_page;
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index d5fcc447d22f..a68addf95c23 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -905,7 +905,7 @@ static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off)
static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
void *buf, unsigned long count, bool is_write)
{
- void *aperture_va;
+ void __iomem *aperture_va;
if (!intel_vgpu_in_aperture(vgpu, off) ||
!intel_vgpu_in_aperture(vgpu, off + count)) {
@@ -920,9 +920,9 @@ static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
return -EIO;
if (is_write)
- memcpy(aperture_va + offset_in_page(off), buf, count);
+ memcpy_toio(aperture_va + offset_in_page(off), buf, count);
else
- memcpy(buf, aperture_va + offset_in_page(off), count);
+ memcpy_fromio(buf, aperture_va + offset_in_page(off), count);
io_mapping_unmap(aperture_va);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 7d84cfb9051a..7902fb162d09 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -132,6 +132,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
{RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
{RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
+ {RCS, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */
{RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
{RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 1bb8f936fdaa..05b953793316 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -346,7 +346,7 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
int i = 0;
if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
- return -1;
+ return -EINVAL;
if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0];
@@ -410,12 +410,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
if (workload->shadow)
return 0;
- ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
- if (ret < 0) {
- gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
- return ret;
- }
-
/* pin shadow context by gvt even the shadow context will be pinned
* when i915 alloc request. That is because gvt will update the guest
* context from shadow context when workload is completed, and at that
@@ -678,6 +672,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct i915_gem_context *shadow_ctx = s->shadow_ctx;
+ struct i915_request *rq;
int ring_id = workload->ring_id;
int ret;
@@ -687,6 +684,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
mutex_lock(&vgpu->vgpu_lock);
mutex_lock(&dev_priv->drm.struct_mutex);
+ ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
+ if (ret < 0) {
+ gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
+ goto err_req;
+ }
+
ret = intel_gvt_workload_req_alloc(workload);
if (ret)
goto err_req;
@@ -703,6 +706,14 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
ret = prepare_workload(workload);
out:
+ if (ret) {
+ /* We might still need to add request with
+ * clean ctx to retire it properly..
+ */
+ rq = fetch_and_zero(&workload->req);
+ i915_request_put(rq);
+ }
+
if (!IS_ERR_OR_NULL(workload->req)) {
gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
ring_id, workload->req);
@@ -739,7 +750,8 @@ static struct intel_vgpu_workload *pick_next_workload(
goto out;
}
- if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
+ if (!scheduler->current_vgpu->active ||
+ list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
goto out;
/*
@@ -1474,8 +1486,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
intel_runtime_pm_put_unchecked(dev_priv);
}
- if (ret && (vgpu_is_vm_unhealthy(ret))) {
- enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
+ if (ret) {
+ if (vgpu_is_vm_unhealthy(ret))
+ enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
intel_vgpu_destroy_workload(workload);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 215b6ff8aa73..db7bb5bd5add 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -163,17 +163,25 @@ int i915_active_ref(struct i915_active *ref,
struct i915_request *rq)
{
struct i915_active_request *active;
+ int err = 0;
+
+ /* Prevent reaping in case we malloc/wait while building the tree */
+ i915_active_acquire(ref);
active = active_instance(ref, timeline);
- if (IS_ERR(active))
- return PTR_ERR(active);
+ if (IS_ERR(active)) {
+ err = PTR_ERR(active);
+ goto out;
+ }
if (!i915_active_request_isset(active))
ref->count++;
__i915_active_request_set(active, rq);
GEM_BUG_ON(!ref->count);
- return 0;
+out:
+ i915_active_release(ref);
+ return err;
}
bool i915_active_acquire(struct i915_active *ref)
@@ -223,19 +231,25 @@ int i915_request_await_active_request(struct i915_request *rq,
int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
{
struct active_node *it, *n;
- int ret;
+ int err = 0;
- ret = i915_request_await_active_request(rq, &ref->last);
- if (ret)
- return ret;
+ /* await allocates and so we need to avoid hitting the shrinker */
+ if (i915_active_acquire(ref))
+ goto out; /* was idle */
+
+ err = i915_request_await_active_request(rq, &ref->last);
+ if (err)
+ goto out;
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
- ret = i915_request_await_active_request(rq, &it->base);
- if (ret)
- return ret;
+ err = i915_request_await_active_request(rq, &it->base);
+ if (err)
+ goto out;
}
- return 0;
+out:
+ i915_active_release(ref);
+ return err;
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0bd890c04fe4..f6f6e5b78e97 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -4830,7 +4830,10 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
&ctx);
if (ret) {
- ret = -EINTR;
+ if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
+ try_again = true;
+ continue;
+ }
break;
}
crtc = connector->state->crtc;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 6630212f2faf..9df65d386d11 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -757,39 +757,6 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
return ret;
}
-#if !defined(CONFIG_VGA_CONSOLE)
-static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
-{
- return 0;
-}
-#elif !defined(CONFIG_DUMMY_CONSOLE)
-static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
-{
- return -ENODEV;
-}
-#else
-static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
-{
- int ret = 0;
-
- DRM_INFO("Replacing VGA console driver\n");
-
- console_lock();
- if (con_is_bound(&vga_con))
- ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
- if (ret == 0) {
- ret = do_unregister_con_driver(&vga_con);
-
- /* Ignore "already unregistered". */
- if (ret == -ENODEV)
- ret = 0;
- }
- console_unlock();
-
- return ret;
-}
-#endif
-
static void intel_init_dpio(struct drm_i915_private *dev_priv)
{
/*
@@ -1420,7 +1387,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
goto err_ggtt;
}
- ret = i915_kick_out_vgacon(dev_priv);
+ ret = vga_remove_vgacon(pdev);
if (ret) {
DRM_ERROR("failed to remove conflicting VGA console\n");
goto err_ggtt;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9adc7bb9e69c..a67a63b5aa84 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2346,7 +2346,8 @@ static inline unsigned int i915_sg_segment_size(void)
INTEL_DEVID(dev_priv) == 0x5915 || \
INTEL_DEVID(dev_priv) == 0x591E)
#define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \
- INTEL_DEVID(dev_priv) == 0x87C0)
+ INTEL_DEVID(dev_priv) == 0x87C0 || \
+ INTEL_DEVID(dev_priv) == 0x87CA)
#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 2)
#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6728ea5c71d4..8558e81fdc2a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1688,7 +1688,8 @@ __vma_matches(struct vm_area_struct *vma, struct file *filp,
if (vma->vm_file != filp)
return false;
- return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size;
+ return vma->vm_start == addr &&
+ (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
}
/**
@@ -1733,8 +1734,13 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
* pages from.
*/
if (!obj->base.filp) {
- i915_gem_object_put(obj);
- return -ENXIO;
+ addr = -ENXIO;
+ goto err;
+ }
+
+ if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
+ addr = -EINVAL;
+ goto err;
}
addr = vm_mmap(obj->base.filp, 0, args->size,
@@ -1748,8 +1754,8 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct vm_area_struct *vma;
if (down_write_killable(&mm->mmap_sem)) {
- i915_gem_object_put(obj);
- return -EINTR;
+ addr = -EINTR;
+ goto err;
}
vma = find_vma(mm, addr);
if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
@@ -1767,12 +1773,10 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
i915_gem_object_put(obj);
args->addr_ptr = (u64)addr;
-
return 0;
err:
i915_gem_object_put(obj);
-
return addr;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 02adcaf6ebea..16f80a448820 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1667,6 +1667,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
len)) {
end_user:
user_access_end();
+end:
kvfree(relocs);
err = -EFAULT;
goto err;
@@ -1686,7 +1687,7 @@ end_user:
* relocations were valid.
*/
if (!user_access_begin(urelocs, size))
- goto end_user;
+ goto end;
for (copied = 0; copied < nreloc; copied++)
unsafe_put_user(-1,
@@ -2695,7 +2696,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
* when we did the "copy_from_user()" above.
*/
if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list)))
- goto end_user;
+ goto end;
for (i = 0; i < args->buffer_count; i++) {
if (!(exec2_list[i].offset & UPDATE))
@@ -2709,6 +2710,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
}
end_user:
user_access_end();
+end:;
}
args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 9a65341fec09..aa6791255252 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1721,7 +1721,7 @@ error_msg(struct i915_gpu_state *error, unsigned long engines, const char *msg)
i915_error_generate_code(error, engines));
if (engines) {
/* Just show the first executing process, more is confusing */
- i = ffs(engines);
+ i = __ffs(engines);
len += scnprintf(error->error_msg + len,
sizeof(error->error_msg) - len,
", in %s [%d]",
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 638a586469f9..047855dd8c6b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2863,7 +2863,7 @@ enum i915_power_well_id {
#define GEN11_GT_VEBOX_VDBOX_DISABLE _MMIO(0x9140)
#define GEN11_GT_VDBOX_DISABLE_MASK 0xff
#define GEN11_GT_VEBOX_DISABLE_SHIFT 16
-#define GEN11_GT_VEBOX_DISABLE_MASK (0xff << GEN11_GT_VEBOX_DISABLE_SHIFT)
+#define GEN11_GT_VEBOX_DISABLE_MASK (0x0f << GEN11_GT_VEBOX_DISABLE_SHIFT)
#define GEN11_EU_DISABLE _MMIO(0x9134)
#define GEN11_EU_DIS_MASK 0xFF
@@ -9243,7 +9243,7 @@ enum skl_power_gate {
#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, \
_TRANS_DDI_FUNC_CTL2_A)
#define PORT_SYNC_MODE_ENABLE (1 << 4)
-#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) < 0)
+#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) << 0)
#define PORT_SYNC_MODE_MASTER_SELECT_MASK (0x7 << 0)
#define PORT_SYNC_MODE_MASTER_SELECT_SHIFT 0
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index d01683167c77..8bc042551692 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -223,8 +223,14 @@ out:
return &p->requests[idx];
}
+struct sched_cache {
+ struct list_head *priolist;
+};
+
static struct intel_engine_cs *
-sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
+sched_lock_engine(const struct i915_sched_node *node,
+ struct intel_engine_cs *locked,
+ struct sched_cache *cache)
{
struct intel_engine_cs *engine = node_to_request(node)->engine;
@@ -232,6 +238,7 @@ sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
if (engine != locked) {
spin_unlock(&locked->timeline.lock);
+ memset(cache, 0, sizeof(*cache));
spin_lock(&engine->timeline.lock);
}
@@ -253,11 +260,11 @@ static bool inflight(const struct i915_request *rq,
static void __i915_schedule(struct i915_request *rq,
const struct i915_sched_attr *attr)
{
- struct list_head *uninitialized_var(pl);
- struct intel_engine_cs *engine, *last;
+ struct intel_engine_cs *engine;
struct i915_dependency *dep, *p;
struct i915_dependency stack;
const int prio = attr->priority;
+ struct sched_cache cache;
LIST_HEAD(dfs);
/* Needed in order to use the temporary link inside i915_dependency */
@@ -328,7 +335,7 @@ static void __i915_schedule(struct i915_request *rq,
__list_del_entry(&stack.dfs_link);
}
- last = NULL;
+ memset(&cache, 0, sizeof(cache));
engine = rq->engine;
spin_lock_irq(&engine->timeline.lock);
@@ -338,7 +345,7 @@ static void __i915_schedule(struct i915_request *rq,
INIT_LIST_HEAD(&dep->dfs_link);
- engine = sched_lock_engine(node, engine);
+ engine = sched_lock_engine(node, engine, &cache);
lockdep_assert_held(&engine->timeline.lock);
/* Recheck after acquiring the engine->timeline.lock */
@@ -347,11 +354,11 @@ static void __i915_schedule(struct i915_request *rq,
node->attr.priority = prio;
if (!list_empty(&node->link)) {
- if (last != engine) {
- pl = i915_sched_lookup_priolist(engine, prio);
- last = engine;
- }
- list_move_tail(&node->link, pl);
+ if (!cache.priolist)
+ cache.priolist =
+ i915_sched_lookup_priolist(engine,
+ prio);
+ list_move_tail(&node->link, cache.priolist);
} else {
/*
* If the request is not in the priolist queue because
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index b713bed20c38..41b5bcb803cb 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -36,11 +36,8 @@
static void vma_print_allocator(struct i915_vma *vma, const char *reason)
{
- unsigned long entries[12];
- struct stack_trace trace = {
- .entries = entries,
- .max_entries = ARRAY_SIZE(entries),
- };
+ unsigned long *entries;
+ unsigned int nr_entries;
char buf[512];
if (!vma->node.stack) {
@@ -49,8 +46,8 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason)
return;
}
- depot_fetch_stack(vma->node.stack, &trace);
- snprint_stack_trace(buf, sizeof(buf), &trace, 0);
+ nr_entries = stack_depot_fetch(vma->node.stack, &entries);
+ stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
vma->node.start, vma->node.size, reason, buf);
}
diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c
index 73a7bee24a66..641e0778fa9c 100644
--- a/drivers/gpu/drm/i915/icl_dsi.c
+++ b/drivers/gpu/drm/i915/icl_dsi.c
@@ -323,6 +323,21 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder)
}
}
+static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv,
+ struct intel_dsi *intel_dsi)
+{
+ enum port port;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ WARN_ON(intel_dsi->io_wakeref[port]);
+ intel_dsi->io_wakeref[port] =
+ intel_display_power_get(dev_priv,
+ port == PORT_A ?
+ POWER_DOMAIN_PORT_DDI_A_IO :
+ POWER_DOMAIN_PORT_DDI_B_IO);
+ }
+}
+
static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -336,13 +351,7 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp);
}
- for_each_dsi_port(port, intel_dsi->ports) {
- intel_dsi->io_wakeref[port] =
- intel_display_power_get(dev_priv,
- port == PORT_A ?
- POWER_DOMAIN_PORT_DDI_A_IO :
- POWER_DOMAIN_PORT_DDI_B_IO);
- }
+ get_dsi_io_power_domains(dev_priv, intel_dsi);
}
static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
@@ -589,6 +598,12 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
}
I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
+ }
+ I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+
POSTING_READ(DPCLKA_CFGCR0_ICL);
mutex_unlock(&dev_priv->dpll_lock);
@@ -1117,7 +1132,7 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
DRM_ERROR("DDI port:%c buffer not idle\n",
port_name(port));
}
- gen11_dsi_ungate_clocks(encoder);
+ gen11_dsi_gate_clocks(encoder);
}
static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
@@ -1218,20 +1233,11 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
return 0;
}
-static u64 gen11_dsi_get_power_domains(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state)
+static void gen11_dsi_get_power_domains(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- u64 domains = 0;
- enum port port;
-
- for_each_dsi_port(port, intel_dsi->ports)
- if (port == PORT_A)
- domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO);
- else
- domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO);
-
- return domains;
+ get_dsi_io_power_domains(to_i915(encoder->base.dev),
+ enc_to_intel_dsi(&encoder->base));
}
static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index b508d8a735e0..4364f42cac6b 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1673,6 +1673,7 @@ init_vbt_missing_defaults(struct drm_i915_private *dev_priv)
info->supports_dvi = (port != PORT_A && port != PORT_E);
info->supports_hdmi = info->supports_dvi;
info->supports_dp = (port != PORT_E);
+ info->supports_edp = (port == PORT_A);
}
}
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index cacaa1d04d17..09ed90c0ba00 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -106,16 +106,6 @@ bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL,
&rq->fence.flags));
- clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
-
- /*
- * We may race with direct invocation of
- * dma_fence_signal(), e.g. i915_request_retire(),
- * in which case we can skip processing it ourselves.
- */
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &rq->fence.flags))
- continue;
/*
* Queue for execution after dropping the signaling
@@ -123,6 +113,14 @@ bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
* more signalers to the same context or engine.
*/
i915_request_get(rq);
+
+ /*
+ * We may race with direct invocation of
+ * dma_fence_signal(), e.g. i915_request_retire(),
+ * so we need to acquire our reference to the request
+ * before we cancel the breadcrumb.
+ */
+ clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
list_add_tail(&rq->signal_link, &signal);
}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index ca705546a0ab..98cea1f4b3bf 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -2075,12 +2075,11 @@ intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port)
intel_aux_power_domain(dig_port);
}
-static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state)
+static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port;
- u64 domains;
/*
* TODO: Add support for MST encoders. Atm, the following should never
@@ -2088,10 +2087,10 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
* hook.
*/
if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)))
- return 0;
+ return;
dig_port = enc_to_dig_port(&encoder->base);
- domains = BIT_ULL(dig_port->ddi_io_power_domain);
+ intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
/*
* AUX power is only needed for (e)DP mode, and for HDMI mode on TC
@@ -2099,15 +2098,15 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
*/
if (intel_crtc_has_dp_encoder(crtc_state) ||
intel_port_is_tc(dev_priv, encoder->port))
- domains |= BIT_ULL(intel_ddi_main_link_aux_domain(dig_port));
+ intel_display_power_get(dev_priv,
+ intel_ddi_main_link_aux_domain(dig_port));
/*
* VDSC power is needed when DSC is enabled
*/
if (crtc_state->dsc_params.compression_enable)
- domains |= BIT_ULL(intel_dsc_power_domain(crtc_state));
-
- return domains;
+ intel_display_power_get(dev_priv,
+ intel_dsc_power_domain(crtc_state));
}
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
@@ -2825,10 +2824,10 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
return;
}
/*
- * DSI ports should have their DDI clock ungated when disabled
- * and gated when enabled.
+ * For DSI we keep the ddi clocks gated
+ * except during enable/disable sequence.
*/
- ddi_clk_needed = !encoder->base.crtc;
+ ddi_clk_needed = false;
}
val = I915_READ(DPCLKA_CFGCR0_ICL);
@@ -3568,6 +3567,13 @@ static void intel_ddi_update_pipe(struct intel_encoder *encoder,
{
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state);
+
+ if (conn_state->content_protection ==
+ DRM_MODE_CONTENT_PROTECTION_DESIRED)
+ intel_hdcp_enable(to_intel_connector(conn_state->connector));
+ else if (conn_state->content_protection ==
+ DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+ intel_hdcp_disable(to_intel_connector(conn_state->connector));
}
static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder,
@@ -3856,14 +3862,16 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state);
else
ret = intel_dp_compute_config(encoder, pipe_config, conn_state);
+ if (ret)
+ return ret;
- if (IS_GEN9_LP(dev_priv) && ret)
+ if (IS_GEN9_LP(dev_priv))
pipe_config->lane_lat_optim_mask =
bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
- return ret;
+ return 0;
}
@@ -3962,12 +3970,7 @@ static int modeset_pipe(struct drm_crtc *crtc,
goto out;
ret = drm_atomic_commit(state);
- if (ret)
- goto out;
-
- return 0;
-
- out:
+out:
drm_atomic_state_put(state);
return ret;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index ccb616351bba..421aac80a838 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -15986,8 +15986,6 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
struct intel_encoder *encoder;
for_each_intel_encoder(&dev_priv->drm, encoder) {
- u64 get_domains;
- enum intel_display_power_domain domain;
struct intel_crtc_state *crtc_state;
if (!encoder->get_power_domains)
@@ -16001,9 +15999,7 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
continue;
crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
- get_domains = encoder->get_power_domains(encoder, crtc_state);
- for_each_power_domain(domain, get_domains)
- intel_display_power_get(dev_priv, domain);
+ encoder->get_power_domains(encoder, crtc_state);
}
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index cf709835fb9a..48da4a969a0a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1859,42 +1859,6 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
return -EINVAL;
}
-/* Optimize link config in order: max bpp, min lanes, min clock */
-static int
-intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
- struct intel_crtc_state *pipe_config,
- const struct link_config_limits *limits)
-{
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- int bpp, clock, lane_count;
- int mode_rate, link_clock, link_avail;
-
- for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
- mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
- bpp);
-
- for (lane_count = limits->min_lane_count;
- lane_count <= limits->max_lane_count;
- lane_count <<= 1) {
- for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
- link_clock = intel_dp->common_rates[clock];
- link_avail = intel_dp_max_data_rate(link_clock,
- lane_count);
-
- if (mode_rate <= link_avail) {
- pipe_config->lane_count = lane_count;
- pipe_config->pipe_bpp = bpp;
- pipe_config->port_clock = link_clock;
-
- return 0;
- }
- }
- }
- }
-
- return -EINVAL;
-}
-
static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
{
int i, num_bpc;
@@ -1922,6 +1886,9 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
int pipe_bpp;
int ret;
+ pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
+ intel_dp_supports_fec(intel_dp, pipe_config);
+
if (!intel_dp_supports_dsc(intel_dp, pipe_config))
return -EINVAL;
@@ -2031,15 +1998,13 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
limits.min_bpp = 6 * 3;
limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
- if (intel_dp_is_edp(intel_dp) && intel_dp->edp_dpcd[0] < DP_EDP_14) {
+ if (intel_dp_is_edp(intel_dp)) {
/*
* Use the maximum clock and number of lanes the eDP panel
- * advertizes being capable of. The eDP 1.3 and earlier panels
- * are generally designed to support only a single clock and
- * lane configuration, and typically these values correspond to
- * the native resolution of the panel. With eDP 1.4 rate select
- * and DSC, this is decreasingly the case, and we need to be
- * able to select less than maximum link config.
+ * advertizes being capable of. The panels are generally
+ * designed to support only a single clock and lane
+ * configuration, and typically these values correspond to the
+ * native resolution of the panel.
*/
limits.min_lane_count = limits.max_lane_count;
limits.min_clock = limits.max_clock;
@@ -2053,22 +2018,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
intel_dp->common_rates[limits.max_clock],
limits.max_bpp, adjusted_mode->crtc_clock);
- if (intel_dp_is_edp(intel_dp))
- /*
- * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
- * section A.1: "It is recommended that the minimum number of
- * lanes be used, using the minimum link rate allowed for that
- * lane configuration."
- *
- * Note that we use the max clock and lane count for eDP 1.3 and
- * earlier, and fast vs. wide is irrelevant.
- */
- ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config,
- &limits);
- else
- /* Optimize for slow and wide. */
- ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config,
- &limits);
+ /*
+ * Optimize for slow and wide. This is the place to add alternative
+ * optimization policy.
+ */
+ ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
/* enable compression if the mode doesn't fit available BW */
DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
@@ -2165,9 +2119,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
return -EINVAL;
- pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
- intel_dp_supports_fec(intel_dp, pipe_config);
-
ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 15db41394b9e..d5660ac1b0d6 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -270,10 +270,12 @@ struct intel_encoder {
* be set correctly before calling this function. */
void (*get_config)(struct intel_encoder *,
struct intel_crtc_state *pipe_config);
- /* Returns a mask of power domains that need to be referenced as part
- * of the hardware state readout code. */
- u64 (*get_power_domains)(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state);
+ /*
+ * Acquires the power domains needed for an active encoder during
+ * hardware state readout.
+ */
+ void (*get_power_domains)(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state);
/*
* Called during system suspend after all pending requests for the
* encoder are flushed (for example for DP AUX transactions) and
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index e8f694b57b8a..376ffe842e26 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -338,8 +338,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
bool *enabled, int width, int height)
{
struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
+ unsigned long conn_configured, conn_seq, mask;
unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
- unsigned long conn_configured, conn_seq;
int i, j;
bool *save_enabled;
bool fallback = true, ret = true;
@@ -357,9 +357,10 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
drm_modeset_backoff(&ctx);
memcpy(save_enabled, enabled, count);
- conn_seq = GENMASK(count - 1, 0);
+ mask = GENMASK(count - 1, 0);
conn_configured = 0;
retry:
+ conn_seq = conn_configured;
for (i = 0; i < count; i++) {
struct drm_fb_helper_connector *fb_conn;
struct drm_connector *connector;
@@ -372,8 +373,7 @@ retry:
if (conn_configured & BIT(i))
continue;
- /* First pass, only consider tiled connectors */
- if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile)
+ if (conn_seq == 0 && !connector->has_tile)
continue;
if (connector->status == connector_status_connected)
@@ -477,10 +477,8 @@ retry:
conn_configured |= BIT(i);
}
- if (conn_configured != conn_seq) { /* repeat until no more are found */
- conn_seq = conn_configured;
+ if ((conn_configured & mask) != mask && conn_configured != conn_seq)
goto retry;
- }
/*
* If the BIOS didn't enable everything it could, fall back to have the
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index f125a62eba8c..a46bffe2b288 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -182,7 +182,6 @@ static void g4x_write_infoframe(struct intel_encoder *encoder,
I915_WRITE(VIDEO_DIP_CTL, val);
- mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(VIDEO_DIP_DATA, *data);
data++;
@@ -190,7 +189,6 @@ static void g4x_write_infoframe(struct intel_encoder *encoder,
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
I915_WRITE(VIDEO_DIP_DATA, 0);
- mmiowb();
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
@@ -237,7 +235,6 @@ static void ibx_write_infoframe(struct intel_encoder *encoder,
I915_WRITE(reg, val);
- mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
@@ -245,7 +242,6 @@ static void ibx_write_infoframe(struct intel_encoder *encoder,
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
- mmiowb();
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
@@ -298,7 +294,6 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
I915_WRITE(reg, val);
- mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
@@ -306,7 +301,6 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
- mmiowb();
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
@@ -352,7 +346,6 @@ static void vlv_write_infoframe(struct intel_encoder *encoder,
I915_WRITE(reg, val);
- mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
@@ -360,7 +353,6 @@ static void vlv_write_infoframe(struct intel_encoder *encoder,
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
- mmiowb();
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
@@ -406,7 +398,6 @@ static void hsw_write_infoframe(struct intel_encoder *encoder,
val &= ~hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
- mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
type, i >> 2), *data);
@@ -416,7 +407,6 @@ static void hsw_write_infoframe(struct intel_encoder *encoder,
for (; i < data_size; i += 4)
I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
type, i >> 2), 0);
- mmiowb();
val |= hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index a017a4232c0f..20c4434474e3 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -60,31 +60,20 @@
static noinline depot_stack_handle_t __save_depot_stack(void)
{
unsigned long entries[STACKDEPTH];
- struct stack_trace trace = {
- .entries = entries,
- .max_entries = ARRAY_SIZE(entries),
- .skip = 1,
- };
+ unsigned int n;
- save_stack_trace(&trace);
- if (trace.nr_entries &&
- trace.entries[trace.nr_entries - 1] == ULONG_MAX)
- trace.nr_entries--;
-
- return depot_save_stack(&trace, GFP_NOWAIT | __GFP_NOWARN);
+ n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
+ return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
}
static void __print_depot_stack(depot_stack_handle_t stack,
char *buf, int sz, int indent)
{
- unsigned long entries[STACKDEPTH];
- struct stack_trace trace = {
- .entries = entries,
- .max_entries = ARRAY_SIZE(entries),
- };
+ unsigned long *entries;
+ unsigned int nr_entries;
- depot_fetch_stack(stack, &trace);
- snprint_stack_trace(buf, sz, &trace, indent);
+ nr_entries = stack_depot_fetch(stack, &entries);
+ stack_trace_snprint(buf, sz, entries, nr_entries, indent);
}
static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index d00d0bb07784..7eb58a9d1319 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -710,47 +710,45 @@ __sseu_prepare(struct drm_i915_private *i915,
unsigned int flags,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
- struct igt_spinner **spin_out)
+ struct igt_spinner **spin)
{
- int ret = 0;
-
- if (flags & (TEST_BUSY | TEST_RESET)) {
- struct igt_spinner *spin;
- struct i915_request *rq;
+ struct i915_request *rq;
+ int ret;
- spin = kzalloc(sizeof(*spin), GFP_KERNEL);
- if (!spin) {
- ret = -ENOMEM;
- goto out;
- }
+ *spin = NULL;
+ if (!(flags & (TEST_BUSY | TEST_RESET)))
+ return 0;
- ret = igt_spinner_init(spin, i915);
- if (ret)
- return ret;
+ *spin = kzalloc(sizeof(**spin), GFP_KERNEL);
+ if (!*spin)
+ return -ENOMEM;
- rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
- if (IS_ERR(rq)) {
- ret = PTR_ERR(rq);
- igt_spinner_fini(spin);
- kfree(spin);
- goto out;
- }
+ ret = igt_spinner_init(*spin, i915);
+ if (ret)
+ goto err_free;
- i915_request_add(rq);
+ rq = igt_spinner_create_request(*spin, ctx, engine, MI_NOOP);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ goto err_fini;
+ }
- if (!igt_wait_for_spinner(spin, rq)) {
- pr_err("%s: Spinner failed to start!\n", name);
- igt_spinner_end(spin);
- igt_spinner_fini(spin);
- kfree(spin);
- ret = -ETIMEDOUT;
- goto out;
- }
+ i915_request_add(rq);
- *spin_out = spin;
+ if (!igt_wait_for_spinner(*spin, rq)) {
+ pr_err("%s: Spinner failed to start!\n", name);
+ ret = -ETIMEDOUT;
+ goto err_end;
}
-out:
+ return 0;
+
+err_end:
+ igt_spinner_end(*spin);
+err_fini:
+ igt_spinner_fini(*spin);
+err_free:
+ kfree(fetch_and_zero(spin));
return ret;
}
@@ -897,22 +895,23 @@ __sseu_test(struct drm_i915_private *i915,
ret = __sseu_prepare(i915, name, flags, ctx, engine, &spin);
if (ret)
- goto out;
+ goto out_context;
ret = __i915_gem_context_reconfigure_sseu(ctx, engine, sseu);
if (ret)
- goto out;
+ goto out_spin;
ret = __sseu_finish(i915, name, flags, ctx, kctx, engine, obj,
hweight32(sseu.slice_mask), spin);
-out:
+out_spin:
if (spin) {
igt_spinner_end(spin);
igt_spinner_fini(spin);
kfree(spin);
}
+out_context:
kernel_context_close(kctx);
return ret;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 32dce7176f63..b9b0ea4e2404 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -455,7 +455,7 @@ static int igt_evict_contexts(void *arg)
struct i915_gem_context *ctx;
ctx = live_context(i915, file);
- if (!ctx)
+ if (IS_ERR(ctx))
break;
/* We will need some GGTT space for the rq's context */
diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c
index 6403728fe778..31c93c3ccd00 100644
--- a/drivers/gpu/drm/i915/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/vlv_dsi.c
@@ -256,6 +256,28 @@ static void band_gap_reset(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->sb_lock);
}
+static int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 tmp;
+
+ tmp = I915_READ(PIPEMISC(crtc->pipe));
+
+ switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
+ case PIPEMISC_DITHER_6_BPC:
+ return 18;
+ case PIPEMISC_DITHER_8_BPC:
+ return 24;
+ case PIPEMISC_DITHER_10_BPC:
+ return 30;
+ case PIPEMISC_DITHER_12_BPC:
+ return 36;
+ default:
+ MISSING_CASE(tmp);
+ return 0;
+ }
+}
+
static int intel_dsi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -1071,6 +1093,8 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
bpp = mipi_dsi_pixel_format_to_bpp(
pixel_format_from_register_bits(fmt));
+ pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
+
/* Enable Frame time stamo based scanline reporting */
adjusted_mode->private_flags |=
I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index ec3602ebbc1c..54011df8c2e8 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -71,7 +71,7 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc,
if (disable_partial)
ipu_plane_disable(ipu_crtc->plane[1], true);
if (disable_full)
- ipu_plane_disable(ipu_crtc->plane[0], false);
+ ipu_plane_disable(ipu_crtc->plane[0], true);
}
static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 22e68a100e7b..5d333138f913 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -662,13 +662,11 @@ static unsigned int mt8173_calculate_factor(int clock)
static unsigned int mt2701_calculate_factor(int clock)
{
if (clock <= 64000)
- return 16;
- else if (clock <= 128000)
- return 8;
- else if (clock <= 256000)
return 4;
- else
+ else if (clock <= 128000)
return 2;
+ else
+ return 1;
}
static const struct mtk_dpi_conf mt8173_conf = {
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index cf59ea9bccfd..57ce4708ef1b 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -15,6 +15,7 @@
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_of.h>
@@ -341,6 +342,8 @@ static struct drm_driver mtk_drm_driver = {
.gem_prime_get_sg_table = mtk_gem_prime_get_sg_table,
.gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
.gem_prime_mmap = mtk_drm_gem_mmap_buf,
+ .gem_prime_vmap = mtk_drm_gem_prime_vmap,
+ .gem_prime_vunmap = mtk_drm_gem_prime_vunmap,
.fops = &mtk_drm_fops,
.name = DRIVER_NAME,
@@ -376,6 +379,10 @@ static int mtk_drm_bind(struct device *dev)
if (ret < 0)
goto err_deinit;
+ ret = drm_fbdev_generic_setup(drm, 32);
+ if (ret)
+ DRM_ERROR("Failed to initialize fbdev: %d\n", ret);
+
return 0;
err_deinit:
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index 259b7b0de1d2..38483e9ee071 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -241,3 +241,49 @@ err_gem_free:
kfree(mtk_gem);
return ERR_PTR(ret);
}
+
+void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj)
+{
+ struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
+ struct sg_table *sgt;
+ struct sg_page_iter iter;
+ unsigned int npages;
+ unsigned int i = 0;
+
+ if (mtk_gem->kvaddr)
+ return mtk_gem->kvaddr;
+
+ sgt = mtk_gem_prime_get_sg_table(obj);
+ if (IS_ERR(sgt))
+ return NULL;
+
+ npages = obj->size >> PAGE_SHIFT;
+ mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL);
+ if (!mtk_gem->pages)
+ goto out;
+
+ for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
+ mtk_gem->pages[i++] = sg_page_iter_page(&iter);
+ if (i > npages)
+ break;
+ }
+ mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
+
+out:
+ kfree((void *)sgt);
+
+ return mtk_gem->kvaddr;
+}
+
+void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+ struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
+
+ if (!mtk_gem->pages)
+ return;
+
+ vunmap(vaddr);
+ mtk_gem->kvaddr = 0;
+ kfree((void *)mtk_gem->pages);
+}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.h b/drivers/gpu/drm/mediatek/mtk_drm_gem.h
index 534639b43a1c..c047a7ef294f 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.h
@@ -37,6 +37,7 @@ struct mtk_drm_gem_obj {
dma_addr_t dma_addr;
unsigned long dma_attrs;
struct sg_table *sg;
+ struct page **pages;
};
#define to_mtk_gem_obj(x) container_of(x, struct mtk_drm_gem_obj, base)
@@ -52,5 +53,7 @@ int mtk_drm_gem_mmap_buf(struct drm_gem_object *obj,
struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
+void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj);
+void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 915cc84621ae..e04e6c293d39 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1480,7 +1480,6 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
if (IS_ERR(regmap))
ret = PTR_ERR(regmap);
if (ret) {
- ret = PTR_ERR(regmap);
dev_err(dev,
"Failed to get system configuration registers: %d\n",
ret);
@@ -1516,6 +1515,7 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
of_node_put(remote);
hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np);
+ of_node_put(i2c_np);
if (!hdmi->ddc_adpt) {
dev_err(dev, "Failed to get ddc i2c adapter by node\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c
index 4ef9c57ffd44..5223498502c4 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c
@@ -15,28 +15,6 @@ static const struct phy_ops mtk_hdmi_phy_dev_ops = {
.owner = THIS_MODULE,
};
-long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
-
- hdmi_phy->pll_rate = rate;
- if (rate <= 74250000)
- *parent_rate = rate;
- else
- *parent_rate = rate / 2;
-
- return rate;
-}
-
-unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
-
- return hdmi_phy->pll_rate;
-}
-
void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
u32 bits)
{
@@ -110,13 +88,11 @@ mtk_hdmi_phy_dev_get_ops(const struct mtk_hdmi_phy *hdmi_phy)
return NULL;
}
-static void mtk_hdmi_phy_clk_get_ops(struct mtk_hdmi_phy *hdmi_phy,
- const struct clk_ops **ops)
+static void mtk_hdmi_phy_clk_get_data(struct mtk_hdmi_phy *hdmi_phy,
+ struct clk_init_data *clk_init)
{
- if (hdmi_phy && hdmi_phy->conf && hdmi_phy->conf->hdmi_phy_clk_ops)
- *ops = hdmi_phy->conf->hdmi_phy_clk_ops;
- else
- dev_err(hdmi_phy->dev, "Failed to get clk ops of phy\n");
+ clk_init->flags = hdmi_phy->conf->flags;
+ clk_init->ops = hdmi_phy->conf->hdmi_phy_clk_ops;
}
static int mtk_hdmi_phy_probe(struct platform_device *pdev)
@@ -129,7 +105,6 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev)
struct clk_init_data clk_init = {
.num_parents = 1,
.parent_names = (const char * const *)&ref_clk_name,
- .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
};
struct phy *phy;
@@ -167,7 +142,7 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev)
hdmi_phy->dev = dev;
hdmi_phy->conf =
(struct mtk_hdmi_phy_conf *)of_device_get_match_data(dev);
- mtk_hdmi_phy_clk_get_ops(hdmi_phy, &clk_init.ops);
+ mtk_hdmi_phy_clk_get_data(hdmi_phy, &clk_init);
hdmi_phy->pll_hw.init = &clk_init;
hdmi_phy->pll = devm_clk_register(dev, &hdmi_phy->pll_hw);
if (IS_ERR(hdmi_phy->pll)) {
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h
index f39b1fc66612..2d8b3182470d 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h
@@ -21,6 +21,7 @@ struct mtk_hdmi_phy;
struct mtk_hdmi_phy_conf {
bool tz_disabled;
+ unsigned long flags;
const struct clk_ops *hdmi_phy_clk_ops;
void (*hdmi_phy_enable_tmds)(struct mtk_hdmi_phy *hdmi_phy);
void (*hdmi_phy_disable_tmds)(struct mtk_hdmi_phy *hdmi_phy);
@@ -48,10 +49,6 @@ void mtk_hdmi_phy_set_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
u32 val, u32 mask);
struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw);
-long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate);
-unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate);
extern struct platform_driver mtk_hdmi_phy_driver;
extern struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf;
diff --git a/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c
index fcc42dc6ea7f..d3cc4022e988 100644
--- a/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c
+++ b/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c
@@ -79,7 +79,6 @@ static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
usleep_range(80, 100);
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
@@ -94,7 +93,6 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
usleep_range(80, 100);
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
@@ -108,6 +106,12 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
usleep_range(80, 100);
}
+static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ return rate;
+}
+
static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
@@ -116,13 +120,14 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
if (rate <= 64000000)
pos_div = 3;
- else if (rate <= 12800000)
- pos_div = 1;
+ else if (rate <= 128000000)
+ pos_div = 2;
else
pos_div = 1;
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_PREDIV_MASK);
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IC),
RG_HTPLL_IC_MASK);
mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IR),
@@ -154,6 +159,39 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+ unsigned long out_rate, val;
+
+ val = (readl(hdmi_phy->regs + HDMI_CON6)
+ & RG_HTPLL_PREDIV_MASK) >> RG_HTPLL_PREDIV;
+ switch (val) {
+ case 0x00:
+ out_rate = parent_rate;
+ break;
+ case 0x01:
+ out_rate = parent_rate / 2;
+ break;
+ default:
+ out_rate = parent_rate / 4;
+ break;
+ }
+
+ val = (readl(hdmi_phy->regs + HDMI_CON6)
+ & RG_HTPLL_FBKDIV_MASK) >> RG_HTPLL_FBKDIV;
+ out_rate *= (val + 1) * 2;
+ val = (readl(hdmi_phy->regs + HDMI_CON2)
+ & RG_HDMITX_TX_POSDIV_MASK);
+ out_rate >>= (val >> RG_HDMITX_TX_POSDIV);
+
+ if (readl(hdmi_phy->regs + HDMI_CON2) & RG_HDMITX_EN_TX_POSDIV)
+ out_rate /= 5;
+
+ return out_rate;
+}
+
static const struct clk_ops mtk_hdmi_phy_pll_ops = {
.prepare = mtk_hdmi_pll_prepare,
.unprepare = mtk_hdmi_pll_unprepare,
@@ -174,7 +212,6 @@ static void mtk_hdmi_phy_enable_tmds(struct mtk_hdmi_phy *hdmi_phy)
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
usleep_range(80, 100);
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
@@ -186,7 +223,6 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
usleep_range(80, 100);
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
@@ -202,6 +238,7 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
struct mtk_hdmi_phy_conf mtk_hdmi_phy_2701_conf = {
.tz_disabled = true,
+ .flags = CLK_SET_RATE_GATE,
.hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops,
.hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds,
.hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds,
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
index ed5916b27658..47f8a2951682 100644
--- a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
+++ b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
@@ -199,6 +199,20 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
usleep_range(100, 150);
}
+static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+ hdmi_phy->pll_rate = rate;
+ if (rate <= 74250000)
+ *parent_rate = rate;
+ else
+ *parent_rate = rate / 2;
+
+ return rate;
+}
+
static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
@@ -285,6 +299,14 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+ return hdmi_phy->pll_rate;
+}
+
static const struct clk_ops mtk_hdmi_phy_pll_ops = {
.prepare = mtk_hdmi_pll_prepare,
.unprepare = mtk_hdmi_pll_unprepare,
@@ -309,6 +331,7 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
}
struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf = {
+ .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
.hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops,
.hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds,
.hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds,
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 2281ed3eb774..8a4ebcb6405c 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -337,12 +337,14 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
ret = drm_dev_register(drm, 0);
if (ret)
- goto free_drm;
+ goto uninstall_irq;
drm_fbdev_generic_setup(drm, 32);
return 0;
+uninstall_irq:
+ drm_irq_uninstall(drm);
free_drm:
drm_dev_put(drm);
@@ -356,8 +358,8 @@ static int meson_drv_bind(struct device *dev)
static void meson_drv_unbind(struct device *dev)
{
- struct drm_device *drm = dev_get_drvdata(dev);
- struct meson_drm *priv = drm->dev_private;
+ struct meson_drm *priv = dev_get_drvdata(dev);
+ struct drm_device *drm = priv->drm;
if (priv->canvas) {
meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
@@ -367,6 +369,7 @@ static void meson_drv_unbind(struct device *dev)
}
drm_dev_unregister(drm);
+ drm_irq_uninstall(drm);
drm_kms_helper_poll_fini(drm);
drm_mode_config_cleanup(drm);
drm_dev_put(drm);
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index e28814f4ea6c..563953ec6ad0 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -569,7 +569,8 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
/* If sink max TMDS clock, we reject the mode */
- if (mode->clock > connector->display_info.max_tmds_clock)
+ if (connector->display_info.max_tmds_clock &&
+ mode->clock > connector->display_info.max_tmds_clock)
return MODE_BAD;
/* Check against non-VIC supported modes */
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 88a52f6b39fe..7dfbbbc1beea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -181,7 +181,7 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf,
}
ret = pm_runtime_get_sync(drm->dev);
- if (IS_ERR_VALUE(ret) && ret != -EACCES)
+ if (ret < 0 && ret != -EACCES)
return ret;
ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args));
pm_runtime_put_autosuspend(drm->dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 8be7a83ced9b..40c47d6a7d78 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -100,12 +100,10 @@ static void
nouveau_dmem_free(struct hmm_devmem *devmem, struct page *page)
{
struct nouveau_dmem_chunk *chunk;
- struct nouveau_drm *drm;
unsigned long idx;
chunk = (void *)hmm_devmem_page_get_drvdata(page);
idx = page_to_pfn(page) - chunk->pfn_first;
- drm = chunk->drm;
/*
* FIXME:
@@ -261,7 +259,7 @@ static const struct migrate_vma_ops nouveau_dmem_fault_migrate_ops = {
.finalize_and_map = nouveau_dmem_fault_finalize_and_map,
};
-static int
+static vm_fault_t
nouveau_dmem_fault(struct hmm_devmem *devmem,
struct vm_area_struct *vma,
unsigned long addr,
@@ -456,11 +454,6 @@ nouveau_dmem_resume(struct nouveau_drm *drm)
/* FIXME handle pin failure */
WARN_ON(ret);
}
- list_for_each_entry (chunk, &drm->dmem->chunk_empty, list) {
- ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
- /* FIXME handle pin failure */
- WARN_ON(ret);
- }
mutex_unlock(&drm->dmem->mutex);
}
@@ -479,9 +472,6 @@ nouveau_dmem_suspend(struct nouveau_drm *drm)
list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
nouveau_bo_unpin(chunk->bo);
}
- list_for_each_entry (chunk, &drm->dmem->chunk_empty, list) {
- nouveau_bo_unpin(chunk->bo);
- }
mutex_unlock(&drm->dmem->mutex);
}
@@ -623,7 +613,7 @@ nouveau_dmem_init(struct nouveau_drm *drm)
*/
drm->dmem->devmem = hmm_devmem_add(&nouveau_dmem_devmem_ops,
device, size);
- if (drm->dmem->devmem == NULL) {
+ if (IS_ERR(drm->dmem->devmem)) {
kfree(drm->dmem);
drm->dmem = NULL;
return;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
index 340383150fb9..ebf9c96d43ee 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
@@ -175,6 +175,7 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
REG_FLD_MOD(core->base, HDMI_CORE_SYS_INTR_UNMASK4, 0, 3, 3);
hdmi_wp_clear_irqenable(core->wp, HDMI_IRQ_CORE);
hdmi_wp_set_irqstatus(core->wp, HDMI_IRQ_CORE);
+ REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0);
hdmi4_core_disable(core);
return 0;
}
@@ -182,16 +183,24 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
if (err)
return err;
+ /*
+ * Initialize CEC clock divider: CEC needs 2MHz clock hence
+ * set the divider to 24 to get 48/24=2MHz clock
+ */
+ REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0x18, 5, 0);
+
/* Clear TX FIFO */
if (!hdmi_cec_clear_tx_fifo(adap)) {
pr_err("cec-%s: could not clear TX FIFO\n", adap->name);
- return -EIO;
+ err = -EIO;
+ goto err_disable_clk;
}
/* Clear RX FIFO */
if (!hdmi_cec_clear_rx_fifo(adap)) {
pr_err("cec-%s: could not clear RX FIFO\n", adap->name);
- return -EIO;
+ err = -EIO;
+ goto err_disable_clk;
}
/* Clear CEC interrupts */
@@ -236,6 +245,12 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, temp);
}
return 0;
+
+err_disable_clk:
+ REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0);
+ hdmi4_core_disable(core);
+
+ return err;
}
static int hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
@@ -333,11 +348,8 @@ int hdmi4_cec_init(struct platform_device *pdev, struct hdmi_core_data *core,
return ret;
core->wp = wp;
- /*
- * Initialize CEC clock divider: CEC needs 2MHz clock hence
- * set the devider to 24 to get 48/24=2MHz clock
- */
- REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0x18, 5, 0);
+ /* Disable clock initially, hdmi_cec_adap_enable() manages it */
+ REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0);
ret = cec_register_adapter(core->adap, &pdev->dev);
if (ret < 0) {
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index 813ba42f2753..e384b95ad857 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -708,7 +708,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
else
acore.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
/*
- * The I2S input word length is twice the lenght given in the IEC-60958
+ * The I2S input word length is twice the length given in the IEC-60958
* status word. If the word size is greater than
* 20 bits, increment by one.
*/
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index bb81e310eb6d..f33e349c4ec5 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -79,6 +79,10 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto free_dev;
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "qxl");
+ if (ret)
+ goto disable_pci;
+
ret = qxl_device_init(qdev, &qxl_driver, pdev);
if (ret)
goto disable_pci;
@@ -94,7 +98,6 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto modeset_cleanup;
- drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "qxl");
drm_fbdev_generic_setup(&qdev->ddev, 32);
return 0;
@@ -252,10 +255,14 @@ static struct drm_driver qxl_driver = {
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = qxl_debugfs_init,
#endif
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_pin = qxl_gem_prime_pin,
.gem_prime_unpin = qxl_gem_prime_unpin,
+ .gem_prime_get_sg_table = qxl_gem_prime_get_sg_table,
+ .gem_prime_import_sg_table = qxl_gem_prime_import_sg_table,
.gem_prime_vmap = qxl_gem_prime_vmap,
.gem_prime_vunmap = qxl_gem_prime_vunmap,
.gem_prime_mmap = qxl_gem_prime_mmap,
diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c
index 8b448eca1cd9..114653b471c6 100644
--- a/drivers/gpu/drm/qxl/qxl_prime.c
+++ b/drivers/gpu/drm/qxl/qxl_prime.c
@@ -42,6 +42,18 @@ void qxl_gem_prime_unpin(struct drm_gem_object *obj)
qxl_bo_unpin(bo);
}
+struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+struct drm_gem_object *qxl_gem_prime_import_sg_table(
+ struct drm_device *dev, struct dma_buf_attachment *attach,
+ struct sg_table *table)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
void *qxl_gem_prime_vmap(struct drm_gem_object *obj)
{
struct qxl_bo *bo = gem_to_qxl_bo(obj);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index c7d4c6073ea5..0d4ade9d4722 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -541,6 +541,18 @@ static void vop_core_clks_disable(struct vop *vop)
clk_disable(vop->hclk);
}
+static void vop_win_disable(struct vop *vop, const struct vop_win_data *win)
+{
+ if (win->phy->scl && win->phy->scl->ext) {
+ VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, SCALE_NONE);
+ VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, SCALE_NONE);
+ VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, SCALE_NONE);
+ VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, SCALE_NONE);
+ }
+
+ VOP_WIN_SET(vop, win, enable, 0);
+}
+
static int vop_enable(struct drm_crtc *crtc)
{
struct vop *vop = to_vop(crtc);
@@ -586,7 +598,7 @@ static int vop_enable(struct drm_crtc *crtc)
struct vop_win *vop_win = &vop->win[i];
const struct vop_win_data *win = vop_win->data;
- VOP_WIN_SET(vop, win, enable, 0);
+ vop_win_disable(vop, win);
}
spin_unlock(&vop->reg_lock);
@@ -735,7 +747,7 @@ static void vop_plane_atomic_disable(struct drm_plane *plane,
spin_lock(&vop->reg_lock);
- VOP_WIN_SET(vop, win, enable, 0);
+ vop_win_disable(vop, win);
spin_unlock(&vop->reg_lock);
}
@@ -1622,7 +1634,7 @@ static int vop_initial(struct vop *vop)
int channel = i * 2 + 1;
VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel);
- VOP_WIN_SET(vop, win, enable, 0);
+ vop_win_disable(vop, win);
VOP_WIN_SET(vop, win, gate, 1);
}
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 19fc601c9eeb..a1bec2779e76 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -366,10 +366,9 @@ void drm_sched_increase_karma(struct drm_sched_job *bad)
EXPORT_SYMBOL(drm_sched_increase_karma);
/**
- * drm_sched_hw_job_reset - stop the scheduler if it contains the bad job
+ * drm_sched_stop - stop the scheduler
*
* @sched: scheduler instance
- * @bad: bad scheduler job
*
*/
void drm_sched_stop(struct drm_gpu_scheduler *sched)
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 3ebd9f5e2719..29258b404e54 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -16,6 +16,7 @@
#include <linux/of_reserved_mem.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
@@ -85,6 +86,8 @@ static int sun4i_drv_bind(struct device *dev)
ret = -ENOMEM;
goto free_drm;
}
+
+ dev_set_drvdata(dev, drm);
drm->dev_private = drv;
INIT_LIST_HEAD(&drv->frontend_list);
INIT_LIST_HEAD(&drv->engine_list);
@@ -144,8 +147,12 @@ static void sun4i_drv_unbind(struct device *dev)
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
+ drm_atomic_helper_shutdown(drm);
drm_mode_config_cleanup(drm);
+
+ component_unbind_all(dev, NULL);
of_reserved_mem_device_release(dev);
+
drm_dev_put(drm);
}
@@ -395,6 +402,8 @@ static int sun4i_drv_probe(struct platform_device *pdev)
static int sun4i_drv_remove(struct platform_device *pdev)
{
+ component_master_del(&pdev->dev, &sun4i_drv_master_ops);
+
return 0;
}
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index dc47720c99ba..39d8509d96a0 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -48,8 +48,13 @@ static enum drm_mode_status
sun8i_dw_hdmi_mode_valid_h6(struct drm_connector *connector,
const struct drm_display_mode *mode)
{
- /* This is max for HDMI 2.0b (4K@60Hz) */
- if (mode->clock > 594000)
+ /*
+ * Controller support maximum of 594 MHz, which correlates to
+ * 4K@60Hz 4:4:4 or RGB. However, for frequencies greater than
+ * 340 MHz scrambling has to be enabled. Because scrambling is
+ * not yet implemented, just limit to 340 MHz for now.
+ */
+ if (mode->clock > 340000)
return MODE_CLOCK_HIGH;
return MODE_OK;
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
index fc36e0c10a37..b1e7c76e9c17 100644
--- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
+++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
@@ -227,7 +227,7 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
err_unregister_gates:
for (i = 0; i < CLK_NUM; i++)
- if (clk_data->hws[i])
+ if (!IS_ERR_OR_NULL(clk_data->hws[i]))
clk_hw_unregister_gate(clk_data->hws[i]);
clk_disable_unprepare(tcon_top->bus);
err_assert_reset:
@@ -245,7 +245,8 @@ static void sun8i_tcon_top_unbind(struct device *dev, struct device *master,
of_clk_del_provider(dev->of_node);
for (i = 0; i < CLK_NUM; i++)
- clk_hw_unregister_gate(clk_data->hws[i]);
+ if (clk_data->hws[i])
+ clk_hw_unregister_gate(clk_data->hws[i]);
clk_disable_unprepare(tcon_top->bus);
reset_control_assert(tcon_top->rst);
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 47c55974756d..d23c4bfde790 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -1260,9 +1260,15 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
hdmi->dvi = !tegra_output_is_hdmi(output);
if (!hdmi->dvi) {
- err = tegra_hdmi_setup_audio(hdmi);
- if (err < 0)
- hdmi->dvi = true;
+ /*
+ * Make sure that the audio format has been configured before
+ * enabling audio, otherwise we may try to divide by zero.
+ */
+ if (hdmi->format.sample_rate > 0) {
+ err = tegra_hdmi_setup_audio(hdmi);
+ if (err < 0)
+ hdmi->dvi = true;
+ }
}
if (hdmi->config->has_hda)
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index ba9b3cfb8c3d..b3436c2aed68 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -378,14 +378,16 @@ static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct tegra_dc *dc = to_tegra_dc(old_state->crtc);
struct tegra_plane *p = to_tegra_plane(plane);
+ struct tegra_dc *dc;
u32 value;
/* rien ne va plus */
if (!old_state || !old_state->crtc)
return;
+ dc = to_tegra_dc(old_state->crtc);
+
/*
* XXX Legacy helpers seem to sometimes call ->atomic_disable() even
* on planes that are already disabled. Make sure we fallback to the
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index 39bfed9623de..982ce37ecde1 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -106,6 +106,7 @@ static int vic_boot(struct vic *vic)
if (vic->booted)
return 0;
+#ifdef CONFIG_IOMMU_API
if (vic->config->supports_sid) {
struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev);
u32 value;
@@ -121,6 +122,7 @@ static int vic_boot(struct vic *vic)
vic_writel(vic, value, VIC_THI_STREAMID1);
}
}
+#endif
/* setup clockgating registers */
vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 3f56647cdb35..1a01669b159a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -49,9 +49,8 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj);
* ttm_global_mutex - protecting the global BO state
*/
DEFINE_MUTEX(ttm_global_mutex);
-struct ttm_bo_global ttm_bo_glob = {
- .use_count = 0
-};
+unsigned ttm_bo_glob_use_count;
+struct ttm_bo_global ttm_bo_glob;
static struct attribute ttm_bo_count = {
.name = "bo_count",
@@ -876,8 +875,10 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
reservation_object_add_shared_fence(bo->resv, fence);
ret = reservation_object_reserve_shared(bo->resv, 1);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ dma_fence_put(fence);
return ret;
+ }
dma_fence_put(bo->moving);
bo->moving = fence;
@@ -1529,12 +1530,13 @@ static void ttm_bo_global_release(void)
struct ttm_bo_global *glob = &ttm_bo_glob;
mutex_lock(&ttm_global_mutex);
- if (--glob->use_count > 0)
+ if (--ttm_bo_glob_use_count > 0)
goto out;
kobject_del(&glob->kobj);
kobject_put(&glob->kobj);
ttm_mem_global_release(&ttm_mem_glob);
+ memset(glob, 0, sizeof(*glob));
out:
mutex_unlock(&ttm_global_mutex);
}
@@ -1546,7 +1548,7 @@ static int ttm_bo_global_init(void)
unsigned i;
mutex_lock(&ttm_global_mutex);
- if (++glob->use_count > 1)
+ if (++ttm_bo_glob_use_count > 1)
goto out;
ret = ttm_mem_global_init(&ttm_mem_glob);
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index f1567c353b54..9a0909decb36 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -461,8 +461,8 @@ out_no_zone:
void ttm_mem_global_release(struct ttm_mem_global *glob)
{
- unsigned int i;
struct ttm_mem_zone *zone;
+ unsigned int i;
/* let the page allocator first stop the shrink work. */
ttm_page_alloc_fini();
@@ -475,9 +475,10 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
zone = glob->zones[i];
kobject_del(&zone->kobj);
kobject_put(&zone->kobj);
- }
+ }
kobject_del(&glob->kobj);
kobject_put(&glob->kobj);
+ memset(glob, 0, sizeof(*glob));
}
static void ttm_check_swapping(struct ttm_mem_global *glob)
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index f841accc2c00..627f8dc91d0e 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -730,9 +730,10 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- if (!(flags & TTM_PAGE_FLAG_DMA32)) {
- for (j = 0; j < HPAGE_PMD_NR; ++j)
- if (p++ != pages[i + j])
+ if (!(flags & TTM_PAGE_FLAG_DMA32) &&
+ (npages - i) >= HPAGE_PMD_NR) {
+ for (j = 1; j < HPAGE_PMD_NR; ++j)
+ if (++p != pages[i + j])
break;
if (j == HPAGE_PMD_NR)
@@ -759,15 +760,15 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
unsigned max_size, n2free;
spin_lock_irqsave(&huge->lock, irq_flags);
- while (i < npages) {
+ while ((npages - i) >= HPAGE_PMD_NR) {
struct page *p = pages[i];
unsigned j;
if (!p)
break;
- for (j = 0; j < HPAGE_PMD_NR; ++j)
- if (p++ != pages[i + j])
+ for (j = 1; j < HPAGE_PMD_NR; ++j)
+ if (++p != pages[i + j])
break;
if (j != HPAGE_PMD_NR)
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 66885c24590f..c1bd5e3d9e4a 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -18,18 +18,19 @@
#include "udl_connector.h"
#include "udl_drv.h"
-static bool udl_get_edid_block(struct udl_device *udl, int block_idx,
- u8 *buff)
+static int udl_get_edid_block(void *data, u8 *buf, unsigned int block,
+ size_t len)
{
int ret, i;
u8 *read_buff;
+ struct udl_device *udl = data;
read_buff = kmalloc(2, GFP_KERNEL);
if (!read_buff)
- return false;
+ return -1;
- for (i = 0; i < EDID_LENGTH; i++) {
- int bval = (i + block_idx * EDID_LENGTH) << 8;
+ for (i = 0; i < len; i++) {
+ int bval = (i + block * EDID_LENGTH) << 8;
ret = usb_control_msg(udl->udev,
usb_rcvctrlpipe(udl->udev, 0),
(0x02), (0x80 | (0x02 << 5)), bval,
@@ -37,60 +38,13 @@ static bool udl_get_edid_block(struct udl_device *udl, int block_idx,
if (ret < 1) {
DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
kfree(read_buff);
- return false;
+ return -1;
}
- buff[i] = read_buff[1];
+ buf[i] = read_buff[1];
}
kfree(read_buff);
- return true;
-}
-
-static bool udl_get_edid(struct udl_device *udl, u8 **result_buff,
- int *result_buff_size)
-{
- int i, extensions;
- u8 *block_buff = NULL, *buff_ptr;
-
- block_buff = kmalloc(EDID_LENGTH, GFP_KERNEL);
- if (block_buff == NULL)
- return false;
-
- if (udl_get_edid_block(udl, 0, block_buff) &&
- memchr_inv(block_buff, 0, EDID_LENGTH)) {
- extensions = ((struct edid *)block_buff)->extensions;
- if (extensions > 0) {
- /* we have to read all extensions one by one */
- *result_buff_size = EDID_LENGTH * (extensions + 1);
- *result_buff = kmalloc(*result_buff_size, GFP_KERNEL);
- buff_ptr = *result_buff;
- if (buff_ptr == NULL) {
- kfree(block_buff);
- return false;
- }
- memcpy(buff_ptr, block_buff, EDID_LENGTH);
- kfree(block_buff);
- buff_ptr += EDID_LENGTH;
- for (i = 1; i < extensions; ++i) {
- if (udl_get_edid_block(udl, i, buff_ptr)) {
- buff_ptr += EDID_LENGTH;
- } else {
- kfree(*result_buff);
- *result_buff = NULL;
- return false;
- }
- }
- return true;
- }
- /* we have only base edid block */
- *result_buff = block_buff;
- *result_buff_size = EDID_LENGTH;
- return true;
- }
-
- kfree(block_buff);
-
- return false;
+ return 0;
}
static int udl_get_modes(struct drm_connector *connector)
@@ -122,8 +76,6 @@ static enum drm_mode_status udl_mode_valid(struct drm_connector *connector,
static enum drm_connector_status
udl_detect(struct drm_connector *connector, bool force)
{
- u8 *edid_buff = NULL;
- int edid_buff_size = 0;
struct udl_device *udl = connector->dev->dev_private;
struct udl_drm_connector *udl_connector =
container_of(connector,
@@ -136,12 +88,10 @@ udl_detect(struct drm_connector *connector, bool force)
udl_connector->edid = NULL;
}
-
- if (!udl_get_edid(udl, &edid_buff, &edid_buff_size))
+ udl_connector->edid = drm_do_get_edid(connector, udl_get_edid_block, udl);
+ if (!udl_connector->edid)
return connector_status_disconnected;
- udl_connector->edid = (struct edid *)edid_buff;
-
return connector_status_connected;
}
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 22cd2d13e272..ff47f890e6ad 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -52,6 +52,7 @@ static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
.load = udl_driver_load,
.unload = udl_driver_unload,
+ .release = udl_driver_release,
/* gem hooks */
.gem_free_object_unlocked = udl_gem_free_object,
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index e9e9b1ff678e..4ae67d882eae 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -104,6 +104,7 @@ void udl_urb_completion(struct urb *urb);
int udl_driver_load(struct drm_device *dev, unsigned long flags);
void udl_driver_unload(struct drm_device *dev);
+void udl_driver_release(struct drm_device *dev);
int udl_fbdev_init(struct drm_device *dev);
void udl_fbdev_cleanup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index d5a23295dd80..bb7b58407039 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -224,7 +224,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
*offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
out:
- drm_gem_object_put(&gobj->base);
+ drm_gem_object_put_unlocked(&gobj->base);
unlock:
mutex_unlock(&udl->gem_lock);
return ret;
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 9086d0d1b880..1f8ef34ade24 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -379,6 +379,12 @@ void udl_driver_unload(struct drm_device *dev)
udl_free_urb_list(dev);
udl_fbdev_cleanup(dev);
- udl_modeset_cleanup(dev);
kfree(udl);
}
+
+void udl_driver_release(struct drm_device *dev)
+{
+ udl_modeset_cleanup(dev);
+ drm_dev_fini(dev);
+ kfree(dev);
+}
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 730008d3da76..1baa10e94484 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -1042,7 +1042,7 @@ static void
vc4_crtc_reset(struct drm_crtc *crtc)
{
if (crtc->state)
- __drm_atomic_helper_crtc_destroy_state(crtc->state);
+ vc4_crtc_destroy_state(crtc, crtc->state);
crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
if (crtc->state)
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 5930facd6d2d..11a8f99ba18c 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -191,13 +191,9 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
ret = drm_gem_handle_create(file, &obj->base, handle);
drm_gem_object_put_unlocked(&obj->base);
if (ret)
- goto err;
+ return ERR_PTR(ret);
return &obj->base;
-
-err:
- __vgem_gem_destroy(obj);
- return ERR_PTR(ret);
}
static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index b996ac1d4fcc..af92964b6889 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -205,10 +205,14 @@ static struct drm_driver driver = {
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = virtio_gpu_debugfs_init,
#endif
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_pin = virtgpu_gem_prime_pin,
.gem_prime_unpin = virtgpu_gem_prime_unpin,
+ .gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table,
+ .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
.gem_prime_vmap = virtgpu_gem_prime_vmap,
.gem_prime_vunmap = virtgpu_gem_prime_vunmap,
.gem_prime_mmap = virtgpu_gem_prime_mmap,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 3238fdf58eb4..d577cb76f5ad 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -354,6 +354,10 @@ int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait);
/* virtgpu_prime.c */
int virtgpu_gem_prime_pin(struct drm_gem_object *obj);
void virtgpu_gem_prime_unpin(struct drm_gem_object *obj);
+struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
+ struct drm_device *dev, struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj);
void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index c59ec34c80a5..eb51a78e1199 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -39,6 +39,18 @@ void virtgpu_gem_prime_unpin(struct drm_gem_object *obj)
WARN_ONCE(1, "not implemented");
}
+struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
+ struct drm_device *dev, struct dma_buf_attachment *attach,
+ struct sg_table *table)
+{
+ return ERR_PTR(-ENODEV);
+}
+
void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj)
{
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
index 138b0bb325cf..69048e73377d 100644
--- a/drivers/gpu/drm/vkms/vkms_gem.c
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -111,11 +111,8 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
ret = drm_gem_handle_create(file, &obj->gem, handle);
drm_gem_object_put_unlocked(&obj->gem);
- if (ret) {
- drm_gem_object_release(&obj->gem);
- kfree(obj);
+ if (ret)
return ERR_PTR(ret);
- }
return &obj->gem;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 6165fe2c4504..1bfa353d995c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -546,29 +546,13 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
}
/**
- * vmw_assume_iommu - Figure out whether coherent dma-remapping might be
- * taking place.
- * @dev: Pointer to the struct drm_device.
- *
- * Return: true if iommu present, false otherwise.
- */
-static bool vmw_assume_iommu(struct drm_device *dev)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev->dev);
-
- return !dma_is_direct(ops) && ops &&
- ops->map_page != dma_direct_map_page;
-}
-
-/**
* vmw_dma_select_mode - Determine how DMA mappings should be set up for this
* system.
*
* @dev_priv: Pointer to a struct vmw_private
*
- * This functions tries to determine the IOMMU setup and what actions
- * need to be taken by the driver to make system pages visible to the
- * device.
+ * This functions tries to determine what actions need to be taken by the
+ * driver to make system pages visible to the device.
* If this function decides that DMA is not possible, it returns -EINVAL.
* The driver may then try to disable features of the device that require
* DMA.
@@ -578,23 +562,16 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
static const char *names[vmw_dma_map_max] = {
[vmw_dma_phys] = "Using physical TTM page addresses.",
[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
- [vmw_dma_map_populate] = "Keeping DMA mappings.",
+ [vmw_dma_map_populate] = "Caching DMA mappings.",
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
if (vmw_force_coherent)
dev_priv->map_mode = vmw_dma_alloc_coherent;
- else if (vmw_assume_iommu(dev_priv->dev))
- dev_priv->map_mode = vmw_dma_map_populate;
- else if (!vmw_force_iommu)
- dev_priv->map_mode = vmw_dma_phys;
- else if (IS_ENABLED(CONFIG_SWIOTLB) && swiotlb_nr_tbl())
- dev_priv->map_mode = vmw_dma_alloc_coherent;
+ else if (vmw_restrict_iommu)
+ dev_priv->map_mode = vmw_dma_map_bind;
else
dev_priv->map_mode = vmw_dma_map_populate;
- if (dev_priv->map_mode == vmw_dma_map_populate && vmw_restrict_iommu)
- dev_priv->map_mode = vmw_dma_map_bind;
-
/* No TTM coherent page pool? FIXME: Ask TTM instead! */
if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) &&
(dev_priv->map_mode == vmw_dma_alloc_coherent))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index b913a56f3426..2a9112515f46 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -564,11 +564,9 @@ static int vmw_fb_set_par(struct fb_info *info)
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
};
- struct drm_display_mode *old_mode;
struct drm_display_mode *mode;
int ret;
- old_mode = par->set_mode;
mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
if (!mode) {
DRM_ERROR("Could not create new fb mode.\n");
@@ -579,11 +577,7 @@ static int vmw_fb_set_par(struct fb_info *info)
mode->vdisplay = var->yres;
vmw_guess_mode_timing(mode);
- if (old_mode && drm_mode_equal(old_mode, mode)) {
- drm_mode_destroy(vmw_priv->dev, mode);
- mode = old_mode;
- old_mode = NULL;
- } else if (!vmw_kms_validate_mode_vram(vmw_priv,
+ if (!vmw_kms_validate_mode_vram(vmw_priv,
mode->hdisplay *
DIV_ROUND_UP(var->bits_per_pixel, 8),
mode->vdisplay)) {
@@ -620,8 +614,8 @@ static int vmw_fb_set_par(struct fb_info *info)
schedule_delayed_work(&par->local_work, 0);
out_unlock:
- if (old_mode)
- drm_mode_destroy(vmw_priv->dev, old_mode);
+ if (par->set_mode)
+ drm_mode_destroy(vmw_priv->dev, par->set_mode);
par->set_mode = mode;
mutex_unlock(&par->bo_mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index b93c558dd86e..7da752ca1c34 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -57,7 +57,7 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
if (id < 0)
- return id;
+ return (id != -ENOMEM ? 0 : id);
spin_lock(&gman->lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 31786b200afc..a3357ff7540d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -311,7 +311,13 @@ static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
{
- return sg_page_iter_dma_address(&viter->iter);
+ /*
+ * FIXME: This driver wrongly mixes DMA and CPU SG list iteration and
+ * needs revision. See
+ * https://lore.kernel.org/lkml/20190104223531.GA1705@ziepe.ca/
+ */
+ return sg_page_iter_dma_address(
+ container_of(&viter->iter, struct sg_dma_page_iter, base));
}
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
index 27101c04a827..0c0eb43abf65 100644
--- a/drivers/gpu/host1x/hw/channel_hw.c
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -115,8 +115,12 @@ static inline void synchronize_syncpt_base(struct host1x_job *job)
static void host1x_channel_set_streamid(struct host1x_channel *channel)
{
#if HOST1X_HW >= 6
+ u32 sid = 0x7f;
+#ifdef CONFIG_IOMMU_API
struct iommu_fwspec *spec = dev_iommu_fwspec_get(channel->dev->parent);
- u32 sid = spec ? spec->ids[0] & 0xffff : 0x7f;
+ if (spec)
+ sid = spec->ids[0] & 0xffff;
+#endif
host1x_ch_writel(channel, sid, HOST1X_CHANNEL_SMMU_STREAMID);
#endif
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index 163fadb8a33a..d047a6867c59 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -277,9 +277,10 @@ void ipu_cpmem_set_uv_offset(struct ipuv3_channel *ch, u32 u_off, u32 v_off)
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_uv_offset);
-void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride)
+void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride,
+ u32 pixelformat)
{
- u32 ilo, sly;
+ u32 ilo, sly, sluv;
if (stride < 0) {
stride = -stride;
@@ -290,9 +291,30 @@ void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride)
sly = (stride * 2) - 1;
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ sluv = stride / 2 - 1;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ sluv = stride - 1;
+ break;
+ case V4L2_PIX_FMT_YUV422P:
+ sluv = stride - 1;
+ break;
+ case V4L2_PIX_FMT_NV16:
+ sluv = stride * 2 - 1;
+ break;
+ default:
+ sluv = 0;
+ break;
+ }
+
ipu_ch_param_write_field(ch, IPU_FIELD_SO, 1);
ipu_ch_param_write_field(ch, IPU_FIELD_ILO, ilo);
ipu_ch_param_write_field(ch, IPU_FIELD_SLY, sly);
+ if (sluv)
+ ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, sluv);
};
EXPORT_SYMBOL_GPL(ipu_cpmem_interlaced_scan);
diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c
index aa0e30a2ba18..d1e575571a8d 100644
--- a/drivers/gpu/ipu-v3/ipu-csi.c
+++ b/drivers/gpu/ipu-v3/ipu-csi.c
@@ -325,12 +325,21 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code,
return 0;
}
+/* translate alternate field mode based on given standard */
+static inline enum v4l2_field
+ipu_csi_translate_field(enum v4l2_field field, v4l2_std_id std)
+{
+ return (field != V4L2_FIELD_ALTERNATE) ? field :
+ ((std & V4L2_STD_525_60) ?
+ V4L2_FIELD_SEQ_BT : V4L2_FIELD_SEQ_TB);
+}
+
/*
* Fill a CSI bus config struct from mbus_config and mbus_framefmt.
*/
static int fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
- struct v4l2_mbus_config *mbus_cfg,
- struct v4l2_mbus_framefmt *mbus_fmt)
+ const struct v4l2_mbus_config *mbus_cfg,
+ const struct v4l2_mbus_framefmt *mbus_fmt)
{
int ret;
@@ -374,22 +383,76 @@ static int fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
return 0;
}
+static int
+ipu_csi_set_bt_interlaced_codes(struct ipu_csi *csi,
+ const struct v4l2_mbus_framefmt *infmt,
+ const struct v4l2_mbus_framefmt *outfmt,
+ v4l2_std_id std)
+{
+ enum v4l2_field infield, outfield;
+ bool swap_fields;
+
+ /* get translated field type of input and output */
+ infield = ipu_csi_translate_field(infmt->field, std);
+ outfield = ipu_csi_translate_field(outfmt->field, std);
+
+ /*
+ * Write the H-V-F codes the CSI will match against the
+ * incoming data for start/end of active and blanking
+ * field intervals. If input and output field types are
+ * sequential but not the same (one is SEQ_BT and the other
+ * is SEQ_TB), swap the F-bit so that the CSI will capture
+ * field 1 lines before field 0 lines.
+ */
+ swap_fields = (V4L2_FIELD_IS_SEQUENTIAL(infield) &&
+ V4L2_FIELD_IS_SEQUENTIAL(outfield) &&
+ infield != outfield);
+
+ if (!swap_fields) {
+ /*
+ * Field0BlankEnd = 110, Field0BlankStart = 010
+ * Field0ActiveEnd = 100, Field0ActiveStart = 000
+ * Field1BlankEnd = 111, Field1BlankStart = 011
+ * Field1ActiveEnd = 101, Field1ActiveStart = 001
+ */
+ ipu_csi_write(csi, 0x40596 | CSI_CCIR_ERR_DET_EN,
+ CSI_CCIR_CODE_1);
+ ipu_csi_write(csi, 0xD07DF, CSI_CCIR_CODE_2);
+ } else {
+ dev_dbg(csi->ipu->dev, "capture field swap\n");
+
+ /* same as above but with F-bit inverted */
+ ipu_csi_write(csi, 0xD07DF | CSI_CCIR_ERR_DET_EN,
+ CSI_CCIR_CODE_1);
+ ipu_csi_write(csi, 0x40596, CSI_CCIR_CODE_2);
+ }
+
+ ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3);
+
+ return 0;
+}
+
+
int ipu_csi_init_interface(struct ipu_csi *csi,
- struct v4l2_mbus_config *mbus_cfg,
- struct v4l2_mbus_framefmt *mbus_fmt)
+ const struct v4l2_mbus_config *mbus_cfg,
+ const struct v4l2_mbus_framefmt *infmt,
+ const struct v4l2_mbus_framefmt *outfmt)
{
struct ipu_csi_bus_config cfg;
unsigned long flags;
u32 width, height, data = 0;
+ v4l2_std_id std;
int ret;
- ret = fill_csi_bus_cfg(&cfg, mbus_cfg, mbus_fmt);
+ ret = fill_csi_bus_cfg(&cfg, mbus_cfg, infmt);
if (ret < 0)
return ret;
/* set default sensor frame width and height */
- width = mbus_fmt->width;
- height = mbus_fmt->height;
+ width = infmt->width;
+ height = infmt->height;
+ if (infmt->field == V4L2_FIELD_ALTERNATE)
+ height *= 2;
/* Set the CSI_SENS_CONF register remaining fields */
data |= cfg.data_width << CSI_SENS_CONF_DATA_WIDTH_SHIFT |
@@ -416,42 +479,22 @@ int ipu_csi_init_interface(struct ipu_csi *csi,
ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3);
break;
case IPU_CSI_CLK_MODE_CCIR656_INTERLACED:
- if (mbus_fmt->width == 720 && mbus_fmt->height == 576) {
- /*
- * PAL case
- *
- * Field0BlankEnd = 0x6, Field0BlankStart = 0x2,
- * Field0ActiveEnd = 0x4, Field0ActiveStart = 0
- * Field1BlankEnd = 0x7, Field1BlankStart = 0x3,
- * Field1ActiveEnd = 0x5, Field1ActiveStart = 0x1
- */
- height = 625; /* framelines for PAL */
-
- ipu_csi_write(csi, 0x40596 | CSI_CCIR_ERR_DET_EN,
- CSI_CCIR_CODE_1);
- ipu_csi_write(csi, 0xD07DF, CSI_CCIR_CODE_2);
- ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3);
- } else if (mbus_fmt->width == 720 && mbus_fmt->height == 480) {
- /*
- * NTSC case
- *
- * Field0BlankEnd = 0x7, Field0BlankStart = 0x3,
- * Field0ActiveEnd = 0x5, Field0ActiveStart = 0x1
- * Field1BlankEnd = 0x6, Field1BlankStart = 0x2,
- * Field1ActiveEnd = 0x4, Field1ActiveStart = 0
- */
- height = 525; /* framelines for NTSC */
-
- ipu_csi_write(csi, 0xD07DF | CSI_CCIR_ERR_DET_EN,
- CSI_CCIR_CODE_1);
- ipu_csi_write(csi, 0x40596, CSI_CCIR_CODE_2);
- ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3);
+ if (width == 720 && height == 480) {
+ std = V4L2_STD_NTSC;
+ height = 525;
+ } else if (width == 720 && height == 576) {
+ std = V4L2_STD_PAL;
+ height = 625;
} else {
dev_err(csi->ipu->dev,
- "Unsupported CCIR656 interlaced video mode\n");
- spin_unlock_irqrestore(&csi->lock, flags);
- return -EINVAL;
+ "Unsupported interlaced video mode\n");
+ ret = -EINVAL;
+ goto out_unlock;
}
+
+ ret = ipu_csi_set_bt_interlaced_codes(csi, infmt, outfmt, std);
+ if (ret)
+ goto out_unlock;
break;
case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR:
case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR:
@@ -476,9 +519,10 @@ int ipu_csi_init_interface(struct ipu_csi *csi,
dev_dbg(csi->ipu->dev, "CSI_ACT_FRM_SIZE = 0x%08X\n",
ipu_csi_read(csi, CSI_ACT_FRM_SIZE));
+out_unlock:
spin_unlock_irqrestore(&csi->lock, flags);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(ipu_csi_init_interface);
diff --git a/drivers/gpu/ipu-v3/ipu-dp.c b/drivers/gpu/ipu-v3/ipu-dp.c
index 9b2b3fa479c4..5e44ff1f2085 100644
--- a/drivers/gpu/ipu-v3/ipu-dp.c
+++ b/drivers/gpu/ipu-v3/ipu-dp.c
@@ -195,7 +195,8 @@ int ipu_dp_setup_channel(struct ipu_dp *dp,
ipu_dp_csc_init(flow, flow->foreground.in_cs, flow->out_cs,
DP_COM_CONF_CSC_DEF_BOTH);
} else {
- if (flow->foreground.in_cs == flow->out_cs)
+ if (flow->foreground.in_cs == IPUV3_COLORSPACE_UNKNOWN ||
+ flow->foreground.in_cs == flow->out_cs)
/*
* foreground identical to output, apply color
* conversion on background
@@ -261,6 +262,8 @@ void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync)
struct ipu_dp_priv *priv = flow->priv;
u32 reg, csc;
+ dp->in_cs = IPUV3_COLORSPACE_UNKNOWN;
+
if (!dp->foreground)
return;
@@ -268,8 +271,9 @@ void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync)
reg = readl(flow->base + DP_COM_CONF);
csc = reg & DP_COM_CONF_CSC_DEF_MASK;
- if (csc == DP_COM_CONF_CSC_DEF_FG)
- reg &= ~DP_COM_CONF_CSC_DEF_MASK;
+ reg &= ~DP_COM_CONF_CSC_DEF_MASK;
+ if (csc == DP_COM_CONF_CSC_DEF_BOTH || csc == DP_COM_CONF_CSC_DEF_BG)
+ reg |= DP_COM_CONF_CSC_DEF_BG;
reg &= ~DP_COM_CONF_FG_EN;
writel(reg, flow->base + DP_COM_CONF);
@@ -347,6 +351,8 @@ int ipu_dp_init(struct ipu_soc *ipu, struct device *dev, unsigned long base)
mutex_init(&priv->mutex);
for (i = 0; i < IPUV3_NUM_FLOWS; i++) {
+ priv->flow[i].background.in_cs = IPUV3_COLORSPACE_UNKNOWN;
+ priv->flow[i].foreground.in_cs = IPUV3_COLORSPACE_UNKNOWN;
priv->flow[i].foreground.foreground = true;
priv->flow[i].base = priv->base + ipu_dp_flow_base[i];
priv->flow[i].priv = priv;
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index dc8e039bfab5..f2f3ef8af271 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -48,6 +48,8 @@
#include <linux/miscdevice.h>
#include <linux/slab.h>
#include <linux/screen_info.h>
+#include <linux/vt.h>
+#include <linux/console.h>
#include <linux/uaccess.h>
@@ -168,6 +170,53 @@ void vga_set_default_device(struct pci_dev *pdev)
vga_default = pci_dev_get(pdev);
}
+/**
+ * vga_remove_vgacon - deactivete vga console
+ *
+ * Unbind and unregister vgacon in case pdev is the default vga
+ * device. Can be called by gpu drivers on initialization to make
+ * sure vga register access done by vgacon will not disturb the
+ * device.
+ *
+ * @pdev: pci device.
+ */
+#if !defined(CONFIG_VGA_CONSOLE)
+int vga_remove_vgacon(struct pci_dev *pdev)
+{
+ return 0;
+}
+#elif !defined(CONFIG_DUMMY_CONSOLE)
+int vga_remove_vgacon(struct pci_dev *pdev)
+{
+ return -ENODEV;
+}
+#else
+int vga_remove_vgacon(struct pci_dev *pdev)
+{
+ int ret = 0;
+
+ if (pdev != vga_default)
+ return 0;
+ vgaarb_info(&pdev->dev, "deactivate vga console\n");
+
+ console_lock();
+ if (con_is_bound(&vga_con))
+ ret = do_take_over_console(&dummy_con, 0,
+ MAX_NR_CONSOLES - 1, 1);
+ if (ret == 0) {
+ ret = do_unregister_con_driver(&vga_con);
+
+ /* Ignore "already unregistered". */
+ if (ret == -ENODEV)
+ ret = 0;
+ }
+ console_unlock();
+
+ return ret;
+}
+#endif
+EXPORT_SYMBOL(vga_remove_vgacon);
+
static inline void vga_irq_set_state(struct vga_device *vgadev, bool state)
{
if (vgadev->irq_set_state)
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 6ca8d322b487..c3c390ca3690 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -150,6 +150,7 @@ config HID_ASUS
tristate "Asus"
depends on LEDS_CLASS
depends on ASUS_WMI || ASUS_WMI=n
+ select POWER_SUPPLY
---help---
Support for Asus notebook built-in keyboard and touchpad via i2c, and
the Asus Republic of Gamers laptop keyboard special keys.
@@ -230,6 +231,16 @@ config HID_COUGAR
Supported devices:
- Cougar 500k Gaming Keyboard
+config HID_MACALLY
+ tristate "Macally devices"
+ depends on HID
+ help
+ Support for Macally devices that are not fully compliant with the
+ HID standard.
+
+ supported devices:
+ - Macally ikey keyboard
+
config HID_PRODIKEYS
tristate "Prodikeys PC-MIDI Keyboard support"
depends on HID && SND
@@ -510,6 +521,7 @@ config HID_LOGITECH
config HID_LOGITECH_DJ
tristate "Logitech Unifying receivers full support"
+ depends on USB_HID
depends on HIDRAW
depends on HID_LOGITECH
select HID_LOGITECH_HIDPP
@@ -1002,6 +1014,22 @@ config HID_UDRAW_PS3
Say Y here if you want to use the THQ uDraw gaming tablet for
the PS3.
+config HID_U2FZERO
+ tristate "U2F Zero LED and RNG support"
+ depends on USB_HID
+ depends on LEDS_CLASS
+ depends on HW_RANDOM
+ help
+ Support for the LED of the U2F Zero device.
+
+ U2F Zero supports custom commands for blinking the LED
+ and getting data from the internal hardware RNG.
+ The internal hardware can be used to feed the enthropy pool.
+
+ U2F Zero only supports blinking its LED, so this driver doesn't
+ allow setting the brightness to anything but 1, which will
+ trigger a single blink and immediately reset to back 0.
+
config HID_WACOM
tristate "Wacom Intuos/Graphire tablet support (USB)"
depends on USB_HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 170163b41303..cc5d827c9164 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -65,6 +65,7 @@ obj-$(CONFIG_HID_LENOVO) += hid-lenovo.o
obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o
obj-$(CONFIG_HID_LOGITECH_DJ) += hid-logitech-dj.o
obj-$(CONFIG_HID_LOGITECH_HIDPP) += hid-logitech-hidpp.o
+obj-$(CONFIG_HID_MACALLY) += hid-macally.o
obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o
obj-$(CONFIG_HID_MALTRON) += hid-maltron.o
obj-$(CONFIG_HID_MAYFLASH) += hid-mf.o
@@ -109,6 +110,7 @@ obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o
obj-$(CONFIG_HID_TIVO) += hid-tivo.o
obj-$(CONFIG_HID_TOPSEED) += hid-topseed.o
obj-$(CONFIG_HID_TWINHAN) += hid-twinhan.o
+obj-$(CONFIG_HID_U2FZERO) += hid-u2fzero.o
hid-uclogic-objs := hid-uclogic-core.o \
hid-uclogic-rdesc.o \
hid-uclogic-params.o
@@ -134,3 +136,4 @@ obj-$(CONFIG_USB_KBD) += usbhid/
obj-$(CONFIG_I2C_HID) += i2c-hid/
obj-$(CONFIG_INTEL_ISH_HID) += intel-ish-hid/
+obj-$(INTEL_ISH_FIRMWARE_DOWNLOADER) += intel-ish-hid/
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 9993b692598f..92387fc0bf18 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -30,6 +30,7 @@
#include <linux/vmalloc.h>
#include <linux/sched.h>
#include <linux/semaphore.h>
+#include <linux/async.h>
#include <linux/hid.h>
#include <linux/hiddev.h>
@@ -218,13 +219,14 @@ static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
* Add a usage to the temporary parser table.
*/
-static int hid_add_usage(struct hid_parser *parser, unsigned usage)
+static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size)
{
if (parser->local.usage_index >= HID_MAX_USAGES) {
hid_err(parser->device, "usage index exceeded\n");
return -1;
}
parser->local.usage[parser->local.usage_index] = usage;
+ parser->local.usage_size[parser->local.usage_index] = size;
parser->local.collection_index[parser->local.usage_index] =
parser->collection_stack_ptr ?
parser->collection_stack[parser->collection_stack_ptr - 1] : 0;
@@ -486,10 +488,7 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
return 0;
}
- if (item->size <= 2)
- data = (parser->global.usage_page << 16) + data;
-
- return hid_add_usage(parser, data);
+ return hid_add_usage(parser, data, item->size);
case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
@@ -498,9 +497,6 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
return 0;
}
- if (item->size <= 2)
- data = (parser->global.usage_page << 16) + data;
-
parser->local.usage_minimum = data;
return 0;
@@ -511,9 +507,6 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
return 0;
}
- if (item->size <= 2)
- data = (parser->global.usage_page << 16) + data;
-
count = data - parser->local.usage_minimum;
if (count + parser->local.usage_index >= HID_MAX_USAGES) {
/*
@@ -533,7 +526,7 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
}
for (n = parser->local.usage_minimum; n <= data; n++)
- if (hid_add_usage(parser, n)) {
+ if (hid_add_usage(parser, n, item->size)) {
dbg_hid("hid_add_usage failed\n");
return -1;
}
@@ -548,6 +541,22 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
}
/*
+ * Concatenate Usage Pages into Usages where relevant:
+ * As per specification, 6.2.2.8: "When the parser encounters a main item it
+ * concatenates the last declared Usage Page with a Usage to form a complete
+ * usage value."
+ */
+
+static void hid_concatenate_usage_page(struct hid_parser *parser)
+{
+ int i;
+
+ for (i = 0; i < parser->local.usage_index; i++)
+ if (parser->local.usage_size[i] <= 2)
+ parser->local.usage[i] += parser->global.usage_page << 16;
+}
+
+/*
* Process a main item.
*/
@@ -556,6 +565,8 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
__u32 data;
int ret;
+ hid_concatenate_usage_page(parser);
+
data = item_udata(item);
switch (item->tag) {
@@ -765,6 +776,8 @@ static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
__u32 data;
int i;
+ hid_concatenate_usage_page(parser);
+
data = item_udata(item);
switch (item->tag) {
@@ -1301,10 +1314,10 @@ static u32 __extract(u8 *report, unsigned offset, int n)
u32 hid_field_extract(const struct hid_device *hid, u8 *report,
unsigned offset, unsigned n)
{
- if (n > 32) {
- hid_warn(hid, "hid_field_extract() called with n (%d) > 32! (%s)\n",
+ if (n > 256) {
+ hid_warn(hid, "hid_field_extract() called with n (%d) > 256! (%s)\n",
n, current->comm);
- n = 32;
+ n = 256;
}
return __extract(report, offset, n);
@@ -1624,7 +1637,7 @@ static struct hid_report *hid_get_report(struct hid_report_enum *report_enum,
* Implement a generic .request() callback, using .raw_request()
* DO NOT USE in hid drivers directly, but through hid_hw_request instead.
*/
-void __hid_request(struct hid_device *hid, struct hid_report *report,
+int __hid_request(struct hid_device *hid, struct hid_report *report,
int reqtype)
{
char *buf;
@@ -1633,7 +1646,7 @@ void __hid_request(struct hid_device *hid, struct hid_report *report,
buf = hid_alloc_report_buf(report, GFP_KERNEL);
if (!buf)
- return;
+ return -ENOMEM;
len = hid_report_len(report);
@@ -1650,8 +1663,11 @@ void __hid_request(struct hid_device *hid, struct hid_report *report,
if (reqtype == HID_REQ_GET_REPORT)
hid_input_report(hid, report->type, buf, ret, 0);
+ ret = 0;
+
out:
kfree(buf);
+ return ret;
}
EXPORT_SYMBOL_GPL(__hid_request);
@@ -2349,6 +2365,15 @@ int hid_add_device(struct hid_device *hdev)
dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
hdev->vendor, hdev->product, atomic_inc_return(&id));
+ /*
+ * Try loading the module for the device before the add, so that we do
+ * not first have hid-generic binding only to have it replaced
+ * immediately afterwards with a specialized driver.
+ */
+ if (!current_is_async())
+ request_module("hid:b%04Xg%04Xv%08Xp%08X", hdev->bus,
+ hdev->group, hdev->vendor, hdev->product);
+
hid_debug_register(hdev, dev_name(&hdev->dev));
ret = device_add(&hdev->dev);
if (!ret)
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index ac9fda1b5a72..1384e57182af 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -1060,10 +1060,15 @@ static int hid_debug_rdesc_show(struct seq_file *f, void *p)
seq_printf(f, "\n\n");
/* dump parsed data and input mappings */
+ if (down_interruptible(&hdev->driver_input_lock))
+ return 0;
+
hid_dump_device(hdev, f);
seq_printf(f, "\n");
hid_dump_input_mapping(hdev, f);
+ up(&hdev->driver_input_lock);
+
return 0;
}
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index b6d93f4ad037..1c8c72093b5a 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -323,6 +323,7 @@
#define USB_DEVICE_ID_CYGNAL_RADIO_SI470X 0x818a
#define USB_DEVICE_ID_FOCALTECH_FTXXXX_MULTITOUCH 0x81b9
#define USB_DEVICE_ID_CYGNAL_CP2112 0xea90
+#define USB_DEVICE_ID_U2F_ZERO 0x8acf
#define USB_DEVICE_ID_CYGNAL_RADIO_SI4713 0x8244
@@ -762,8 +763,12 @@
#define USB_DEVICE_ID_S510_RECEIVER_2 0xc517
#define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500 0xc512
#define USB_DEVICE_ID_MX3000_RECEIVER 0xc513
+#define USB_DEVICE_ID_LOGITECH_27MHZ_MOUSE_RECEIVER 0xc51b
#define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER 0xc52b
+#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER 0xc52f
#define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2 0xc532
+#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2 0xc534
+#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_GAMING 0xc539
#define USB_DEVICE_ID_SPACETRAVELLER 0xc623
#define USB_DEVICE_ID_SPACENAVIGATOR 0xc626
#define USB_DEVICE_ID_DINOVO_DESKTOP 0xc704
@@ -1034,6 +1039,7 @@
#define USB_DEVICE_ID_SINO_LITE_CONTROLLER 0x3008
#define USB_VENDOR_ID_SOLID_YEAR 0x060b
+#define USB_DEVICE_ID_MACALLY_IKEY_KEYBOARD 0x0001
#define USB_DEVICE_ID_COUGAR_500K_GAMING_KEYBOARD 0x500a
#define USB_DEVICE_ID_COUGAR_700K_GAMING_KEYBOARD 0x700a
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index b10b1922c5bd..46c6efea1404 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -680,6 +680,14 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
break;
}
+ if ((usage->hid & 0xf0) == 0xb0) { /* SC - Display */
+ switch (usage->hid & 0xf) {
+ case 0x05: map_key_clear(KEY_SWITCHVIDEOMODE); break;
+ default: goto ignore;
+ }
+ break;
+ }
+
/*
* Some lazy vendors declare 255 usages for System Control,
* leading to the creation of ABS_X|Y axis and too many others.
@@ -902,7 +910,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x06a: map_key_clear(KEY_GREEN); break;
case 0x06b: map_key_clear(KEY_BLUE); break;
case 0x06c: map_key_clear(KEY_YELLOW); break;
- case 0x06d: map_key_clear(KEY_ZOOM); break;
+ case 0x06d: map_key_clear(KEY_ASPECT_RATIO); break;
case 0x06f: map_key_clear(KEY_BRIGHTNESSUP); break;
case 0x070: map_key_clear(KEY_BRIGHTNESSDOWN); break;
@@ -911,6 +919,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x074: map_key_clear(KEY_BRIGHTNESS_MAX); break;
case 0x075: map_key_clear(KEY_BRIGHTNESS_AUTO); break;
+ case 0x079: map_key_clear(KEY_KBDILLUMUP); break;
+ case 0x07a: map_key_clear(KEY_KBDILLUMDOWN); break;
+ case 0x07c: map_key_clear(KEY_KBDILLUMTOGGLE); break;
+
case 0x082: map_key_clear(KEY_VIDEO_NEXT); break;
case 0x083: map_key_clear(KEY_LAST); break;
case 0x084: map_key_clear(KEY_ENTER); break;
@@ -998,6 +1010,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x1b8: map_key_clear(KEY_VIDEO); break;
case 0x1bc: map_key_clear(KEY_MESSENGER); break;
case 0x1bd: map_key_clear(KEY_INFO); break;
+ case 0x1cb: map_key_clear(KEY_ASSISTANT); break;
case 0x201: map_key_clear(KEY_NEW); break;
case 0x202: map_key_clear(KEY_OPEN); break;
case 0x203: map_key_clear(KEY_CLOSE); break;
@@ -1021,6 +1034,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x22d: map_key_clear(KEY_ZOOMIN); break;
case 0x22e: map_key_clear(KEY_ZOOMOUT); break;
case 0x22f: map_key_clear(KEY_ZOOMRESET); break;
+ case 0x232: map_key_clear(KEY_FULL_SCREEN); break;
case 0x233: map_key_clear(KEY_SCROLLUP); break;
case 0x234: map_key_clear(KEY_SCROLLDOWN); break;
case 0x238: /* AC Pan */
@@ -1044,6 +1058,8 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x2cb: map_key_clear(KEY_KBDINPUTASSIST_ACCEPT); break;
case 0x2cc: map_key_clear(KEY_KBDINPUTASSIST_CANCEL); break;
+ case 0x29f: map_key_clear(KEY_SCALE); break;
+
default: map_key_clear(KEY_UNKNOWN);
}
break;
@@ -1541,52 +1557,71 @@ static void hidinput_close(struct input_dev *dev)
hid_hw_close(hid);
}
-static void hidinput_change_resolution_multipliers(struct hid_device *hid)
+static bool __hidinput_change_resolution_multipliers(struct hid_device *hid,
+ struct hid_report *report, bool use_logical_max)
{
- struct hid_report_enum *rep_enum;
- struct hid_report *rep;
struct hid_usage *usage;
+ bool update_needed = false;
int i, j;
- rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
- list_for_each_entry(rep, &rep_enum->report_list, list) {
- bool update_needed = false;
+ if (report->maxfield == 0)
+ return false;
- if (rep->maxfield == 0)
- continue;
+ /*
+ * If we have more than one feature within this report we
+ * need to fill in the bits from the others before we can
+ * overwrite the ones for the Resolution Multiplier.
+ */
+ if (report->maxfield > 1) {
+ hid_hw_request(hid, report, HID_REQ_GET_REPORT);
+ hid_hw_wait(hid);
+ }
- /*
- * If we have more than one feature within this report we
- * need to fill in the bits from the others before we can
- * overwrite the ones for the Resolution Multiplier.
+ for (i = 0; i < report->maxfield; i++) {
+ __s32 value = use_logical_max ?
+ report->field[i]->logical_maximum :
+ report->field[i]->logical_minimum;
+
+ /* There is no good reason for a Resolution
+ * Multiplier to have a count other than 1.
+ * Ignore that case.
*/
- if (rep->maxfield > 1) {
- hid_hw_request(hid, rep, HID_REQ_GET_REPORT);
- hid_hw_wait(hid);
- }
+ if (report->field[i]->report_count != 1)
+ continue;
- for (i = 0; i < rep->maxfield; i++) {
- __s32 logical_max = rep->field[i]->logical_maximum;
+ for (j = 0; j < report->field[i]->maxusage; j++) {
+ usage = &report->field[i]->usage[j];
- /* There is no good reason for a Resolution
- * Multiplier to have a count other than 1.
- * Ignore that case.
- */
- if (rep->field[i]->report_count != 1)
+ if (usage->hid != HID_GD_RESOLUTION_MULTIPLIER)
continue;
- for (j = 0; j < rep->field[i]->maxusage; j++) {
- usage = &rep->field[i]->usage[j];
+ report->field[i]->value[j] = value;
+ update_needed = true;
+ }
+ }
- if (usage->hid != HID_GD_RESOLUTION_MULTIPLIER)
- continue;
+ return update_needed;
+}
- *rep->field[i]->value = logical_max;
- update_needed = true;
+static void hidinput_change_resolution_multipliers(struct hid_device *hid)
+{
+ struct hid_report_enum *rep_enum;
+ struct hid_report *rep;
+ int ret;
+
+ rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
+ list_for_each_entry(rep, &rep_enum->report_list, list) {
+ bool update_needed = __hidinput_change_resolution_multipliers(hid,
+ rep, true);
+
+ if (update_needed) {
+ ret = __hid_request(hid, rep, HID_REQ_SET_REPORT);
+ if (ret) {
+ __hidinput_change_resolution_multipliers(hid,
+ rep, false);
+ return;
}
}
- if (update_needed)
- hid_hw_request(hid, rep, HID_REQ_SET_REPORT);
}
/* refresh our structs */
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 5d419a95b6c2..36d725fdb199 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -876,8 +876,6 @@ static const struct hid_device_id lg_devices[] = {
.driver_data = LG_RDESC | LG_WIRELESS },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER),
.driver_data = LG_RDESC | LG_WIRELESS },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2),
- .driver_data = LG_RDESC | LG_WIRELESS },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER),
.driver_data = LG_BAD_RELATIVE_KEYS },
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 826fa1e1c8d9..b1e894618eed 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -25,13 +25,14 @@
#include <linux/device.h>
#include <linux/hid.h>
#include <linux/module.h>
-#include <linux/usb.h>
#include <linux/kfifo.h>
+#include <linux/delay.h>
+#include <linux/usb.h> /* For to_usb_interface for kvm extra intf check */
#include <asm/unaligned.h>
#include "hid-ids.h"
#define DJ_MAX_PAIRED_DEVICES 6
-#define DJ_MAX_NUMBER_NOTIFICATIONS 8
+#define DJ_MAX_NUMBER_NOTIFS 8
#define DJ_RECEIVER_INDEX 0
#define DJ_DEVICE_INDEX_MIN 1
#define DJ_DEVICE_INDEX_MAX 6
@@ -74,7 +75,6 @@
/* Device Un-Paired Notification */
#define REPORT_TYPE_NOTIF_DEVICE_UNPAIRED 0x40
-
/* Connection Status Notification */
#define REPORT_TYPE_NOTIF_CONNECTION_STATUS 0x42
#define CONNECTION_STATUS_PARAM_STATUS 0x00
@@ -94,12 +94,43 @@
#define REPORT_TYPE_LEDS 0x0E
/* RF Report types bitfield */
-#define STD_KEYBOARD 0x00000002
-#define STD_MOUSE 0x00000004
-#define MULTIMEDIA 0x00000008
-#define POWER_KEYS 0x00000010
-#define MEDIA_CENTER 0x00000100
-#define KBD_LEDS 0x00004000
+#define STD_KEYBOARD BIT(1)
+#define STD_MOUSE BIT(2)
+#define MULTIMEDIA BIT(3)
+#define POWER_KEYS BIT(4)
+#define MEDIA_CENTER BIT(8)
+#define KBD_LEDS BIT(14)
+/* Fake (bitnr > NUMBER_OF_HID_REPORTS) bit to track HID++ capability */
+#define HIDPP BIT_ULL(63)
+
+/* HID++ Device Connected Notification */
+#define REPORT_TYPE_NOTIF_DEVICE_CONNECTED 0x41
+#define HIDPP_PARAM_PROTO_TYPE 0x00
+#define HIDPP_PARAM_DEVICE_INFO 0x01
+#define HIDPP_PARAM_EQUAD_LSB 0x02
+#define HIDPP_PARAM_EQUAD_MSB 0x03
+#define HIDPP_PARAM_27MHZ_DEVID 0x03
+#define HIDPP_DEVICE_TYPE_MASK GENMASK(3, 0)
+#define HIDPP_LINK_STATUS_MASK BIT(6)
+#define HIDPP_MANUFACTURER_MASK BIT(7)
+
+#define HIDPP_DEVICE_TYPE_KEYBOARD 1
+#define HIDPP_DEVICE_TYPE_MOUSE 2
+
+#define HIDPP_SET_REGISTER 0x80
+#define HIDPP_GET_LONG_REGISTER 0x83
+#define HIDPP_REG_CONNECTION_STATE 0x02
+#define HIDPP_REG_PAIRING_INFORMATION 0xB5
+#define HIDPP_PAIRING_INFORMATION 0x20
+#define HIDPP_FAKE_DEVICE_ARRIVAL 0x02
+
+enum recvr_type {
+ recvr_type_dj,
+ recvr_type_hidpp,
+ recvr_type_gaming_hidpp,
+ recvr_type_27mhz,
+ recvr_type_bluetooth,
+};
struct dj_report {
u8 report_id;
@@ -108,23 +139,51 @@ struct dj_report {
u8 report_params[DJREPORT_SHORT_LENGTH - 3];
};
+struct hidpp_event {
+ u8 report_id;
+ u8 device_index;
+ u8 sub_id;
+ u8 params[HIDPP_REPORT_LONG_LENGTH - 3U];
+} __packed;
+
struct dj_receiver_dev {
- struct hid_device *hdev;
+ struct hid_device *mouse;
+ struct hid_device *keyboard;
+ struct hid_device *hidpp;
struct dj_device *paired_dj_devices[DJ_MAX_PAIRED_DEVICES +
DJ_DEVICE_INDEX_MIN];
+ struct list_head list;
+ struct kref kref;
struct work_struct work;
struct kfifo notif_fifo;
+ unsigned long last_query; /* in jiffies */
+ bool ready;
+ enum recvr_type type;
+ unsigned int unnumbered_application;
spinlock_t lock;
- bool querying_devices;
};
struct dj_device {
struct hid_device *hdev;
struct dj_receiver_dev *dj_receiver_dev;
- u32 reports_supported;
+ u64 reports_supported;
u8 device_index;
};
+#define WORKITEM_TYPE_EMPTY 0
+#define WORKITEM_TYPE_PAIRED 1
+#define WORKITEM_TYPE_UNPAIRED 2
+#define WORKITEM_TYPE_UNKNOWN 255
+
+struct dj_workitem {
+ u8 type; /* WORKITEM_TYPE_* */
+ u8 device_index;
+ u8 device_type;
+ u8 quad_id_msb;
+ u8 quad_id_lsb;
+ u64 reports_supported;
+};
+
/* Keyboard descriptor (1) */
static const char kbd_descriptor[] = {
0x05, 0x01, /* USAGE_PAGE (generic Desktop) */
@@ -200,6 +259,131 @@ static const char mse_descriptor[] = {
0xC0, /* END_COLLECTION */
};
+/* Mouse descriptor (2) for 27 MHz receiver, only 8 buttons */
+static const char mse_27mhz_descriptor[] = {
+ 0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
+ 0x09, 0x02, /* USAGE (Mouse) */
+ 0xA1, 0x01, /* COLLECTION (Application) */
+ 0x85, 0x02, /* REPORT_ID = 2 */
+ 0x09, 0x01, /* USAGE (pointer) */
+ 0xA1, 0x00, /* COLLECTION (physical) */
+ 0x05, 0x09, /* USAGE_PAGE (buttons) */
+ 0x19, 0x01, /* USAGE_MIN (1) */
+ 0x29, 0x08, /* USAGE_MAX (8) */
+ 0x15, 0x00, /* LOGICAL_MIN (0) */
+ 0x25, 0x01, /* LOGICAL_MAX (1) */
+ 0x95, 0x08, /* REPORT_COUNT (8) */
+ 0x75, 0x01, /* REPORT_SIZE (1) */
+ 0x81, 0x02, /* INPUT (data var abs) */
+ 0x05, 0x01, /* USAGE_PAGE (generic desktop) */
+ 0x16, 0x01, 0xF8, /* LOGICAL_MIN (-2047) */
+ 0x26, 0xFF, 0x07, /* LOGICAL_MAX (2047) */
+ 0x75, 0x0C, /* REPORT_SIZE (12) */
+ 0x95, 0x02, /* REPORT_COUNT (2) */
+ 0x09, 0x30, /* USAGE (X) */
+ 0x09, 0x31, /* USAGE (Y) */
+ 0x81, 0x06, /* INPUT */
+ 0x15, 0x81, /* LOGICAL_MIN (-127) */
+ 0x25, 0x7F, /* LOGICAL_MAX (127) */
+ 0x75, 0x08, /* REPORT_SIZE (8) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x09, 0x38, /* USAGE (wheel) */
+ 0x81, 0x06, /* INPUT */
+ 0x05, 0x0C, /* USAGE_PAGE(consumer) */
+ 0x0A, 0x38, 0x02, /* USAGE(AC Pan) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x81, 0x06, /* INPUT */
+ 0xC0, /* END_COLLECTION */
+ 0xC0, /* END_COLLECTION */
+};
+
+/* Mouse descriptor (2) for Bluetooth receiver, low-res hwheel, 12 buttons */
+static const char mse_bluetooth_descriptor[] = {
+ 0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
+ 0x09, 0x02, /* USAGE (Mouse) */
+ 0xA1, 0x01, /* COLLECTION (Application) */
+ 0x85, 0x02, /* REPORT_ID = 2 */
+ 0x09, 0x01, /* USAGE (pointer) */
+ 0xA1, 0x00, /* COLLECTION (physical) */
+ 0x05, 0x09, /* USAGE_PAGE (buttons) */
+ 0x19, 0x01, /* USAGE_MIN (1) */
+ 0x29, 0x08, /* USAGE_MAX (8) */
+ 0x15, 0x00, /* LOGICAL_MIN (0) */
+ 0x25, 0x01, /* LOGICAL_MAX (1) */
+ 0x95, 0x08, /* REPORT_COUNT (8) */
+ 0x75, 0x01, /* REPORT_SIZE (1) */
+ 0x81, 0x02, /* INPUT (data var abs) */
+ 0x05, 0x01, /* USAGE_PAGE (generic desktop) */
+ 0x16, 0x01, 0xF8, /* LOGICAL_MIN (-2047) */
+ 0x26, 0xFF, 0x07, /* LOGICAL_MAX (2047) */
+ 0x75, 0x0C, /* REPORT_SIZE (12) */
+ 0x95, 0x02, /* REPORT_COUNT (2) */
+ 0x09, 0x30, /* USAGE (X) */
+ 0x09, 0x31, /* USAGE (Y) */
+ 0x81, 0x06, /* INPUT */
+ 0x15, 0x81, /* LOGICAL_MIN (-127) */
+ 0x25, 0x7F, /* LOGICAL_MAX (127) */
+ 0x75, 0x08, /* REPORT_SIZE (8) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x09, 0x38, /* USAGE (wheel) */
+ 0x81, 0x06, /* INPUT */
+ 0x05, 0x0C, /* USAGE_PAGE(consumer) */
+ 0x0A, 0x38, 0x02, /* USAGE(AC Pan) */
+ 0x15, 0xF9, /* LOGICAL_MIN (-7) */
+ 0x25, 0x07, /* LOGICAL_MAX (7) */
+ 0x75, 0x04, /* REPORT_SIZE (4) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x81, 0x06, /* INPUT */
+ 0x05, 0x09, /* USAGE_PAGE (buttons) */
+ 0x19, 0x09, /* USAGE_MIN (9) */
+ 0x29, 0x0C, /* USAGE_MAX (12) */
+ 0x15, 0x00, /* LOGICAL_MIN (0) */
+ 0x25, 0x01, /* LOGICAL_MAX (1) */
+ 0x75, 0x01, /* REPORT_SIZE (1) */
+ 0x95, 0x04, /* REPORT_COUNT (4) */
+ 0x81, 0x06, /* INPUT */
+ 0xC0, /* END_COLLECTION */
+ 0xC0, /* END_COLLECTION */
+};
+
+/* Gaming Mouse descriptor (2) */
+static const char mse_high_res_descriptor[] = {
+ 0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
+ 0x09, 0x02, /* USAGE (Mouse) */
+ 0xA1, 0x01, /* COLLECTION (Application) */
+ 0x85, 0x02, /* REPORT_ID = 2 */
+ 0x09, 0x01, /* USAGE (pointer) */
+ 0xA1, 0x00, /* COLLECTION (physical) */
+ 0x05, 0x09, /* USAGE_PAGE (buttons) */
+ 0x19, 0x01, /* USAGE_MIN (1) */
+ 0x29, 0x10, /* USAGE_MAX (16) */
+ 0x15, 0x00, /* LOGICAL_MIN (0) */
+ 0x25, 0x01, /* LOGICAL_MAX (1) */
+ 0x95, 0x10, /* REPORT_COUNT (16) */
+ 0x75, 0x01, /* REPORT_SIZE (1) */
+ 0x81, 0x02, /* INPUT (data var abs) */
+ 0x05, 0x01, /* USAGE_PAGE (generic desktop) */
+ 0x16, 0x01, 0x80, /* LOGICAL_MIN (-32767) */
+ 0x26, 0xFF, 0x7F, /* LOGICAL_MAX (32767) */
+ 0x75, 0x10, /* REPORT_SIZE (16) */
+ 0x95, 0x02, /* REPORT_COUNT (2) */
+ 0x09, 0x30, /* USAGE (X) */
+ 0x09, 0x31, /* USAGE (Y) */
+ 0x81, 0x06, /* INPUT */
+ 0x15, 0x81, /* LOGICAL_MIN (-127) */
+ 0x25, 0x7F, /* LOGICAL_MAX (127) */
+ 0x75, 0x08, /* REPORT_SIZE (8) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x09, 0x38, /* USAGE (wheel) */
+ 0x81, 0x06, /* INPUT */
+ 0x05, 0x0C, /* USAGE_PAGE(consumer) */
+ 0x0A, 0x38, 0x02, /* USAGE(AC Pan) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x81, 0x06, /* INPUT */
+ 0xC0, /* END_COLLECTION */
+ 0xC0, /* END_COLLECTION */
+};
+
/* Consumer Control descriptor (3) */
static const char consumer_descriptor[] = {
0x05, 0x0C, /* USAGE_PAGE (Consumer Devices) */
@@ -308,7 +492,7 @@ static const char hidpp_descriptor[] = {
/* Make sure all descriptors are present here */
#define MAX_RDESC_SIZE \
(sizeof(kbd_descriptor) + \
- sizeof(mse_descriptor) + \
+ sizeof(mse_bluetooth_descriptor) + \
sizeof(consumer_descriptor) + \
sizeof(syscontrol_descriptor) + \
sizeof(media_descriptor) + \
@@ -341,51 +525,160 @@ static const u8 hid_reportid_size_map[NUMBER_OF_HID_REPORTS] = {
static struct hid_ll_driver logi_dj_ll_driver;
static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev);
+static void delayedwork_callback(struct work_struct *work);
+
+static LIST_HEAD(dj_hdev_list);
+static DEFINE_MUTEX(dj_hdev_list_lock);
+
+/*
+ * dj/HID++ receivers are really a single logical entity, but for BIOS/Windows
+ * compatibility they have multiple USB interfaces. On HID++ receivers we need
+ * to listen for input reports on both interfaces. The functions below are used
+ * to create a single struct dj_receiver_dev for all interfaces belonging to
+ * a single USB-device / receiver.
+ */
+static struct dj_receiver_dev *dj_find_receiver_dev(struct hid_device *hdev,
+ enum recvr_type type)
+{
+ struct dj_receiver_dev *djrcv_dev;
+ char sep;
+
+ /*
+ * The bluetooth receiver contains a built-in hub and has separate
+ * USB-devices for the keyboard and mouse interfaces.
+ */
+ sep = (type == recvr_type_bluetooth) ? '.' : '/';
+
+ /* Try to find an already-probed interface from the same device */
+ list_for_each_entry(djrcv_dev, &dj_hdev_list, list) {
+ if (djrcv_dev->mouse &&
+ hid_compare_device_paths(hdev, djrcv_dev->mouse, sep)) {
+ kref_get(&djrcv_dev->kref);
+ return djrcv_dev;
+ }
+ if (djrcv_dev->keyboard &&
+ hid_compare_device_paths(hdev, djrcv_dev->keyboard, sep)) {
+ kref_get(&djrcv_dev->kref);
+ return djrcv_dev;
+ }
+ if (djrcv_dev->hidpp &&
+ hid_compare_device_paths(hdev, djrcv_dev->hidpp, sep)) {
+ kref_get(&djrcv_dev->kref);
+ return djrcv_dev;
+ }
+ }
+
+ return NULL;
+}
+
+static void dj_release_receiver_dev(struct kref *kref)
+{
+ struct dj_receiver_dev *djrcv_dev = container_of(kref, struct dj_receiver_dev, kref);
+
+ list_del(&djrcv_dev->list);
+ kfifo_free(&djrcv_dev->notif_fifo);
+ kfree(djrcv_dev);
+}
+
+static void dj_put_receiver_dev(struct hid_device *hdev)
+{
+ struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
+
+ mutex_lock(&dj_hdev_list_lock);
+
+ if (djrcv_dev->mouse == hdev)
+ djrcv_dev->mouse = NULL;
+ if (djrcv_dev->keyboard == hdev)
+ djrcv_dev->keyboard = NULL;
+ if (djrcv_dev->hidpp == hdev)
+ djrcv_dev->hidpp = NULL;
+
+ kref_put(&djrcv_dev->kref, dj_release_receiver_dev);
+
+ mutex_unlock(&dj_hdev_list_lock);
+}
+
+static struct dj_receiver_dev *dj_get_receiver_dev(struct hid_device *hdev,
+ enum recvr_type type,
+ unsigned int application,
+ bool is_hidpp)
+{
+ struct dj_receiver_dev *djrcv_dev;
+
+ mutex_lock(&dj_hdev_list_lock);
+
+ djrcv_dev = dj_find_receiver_dev(hdev, type);
+ if (!djrcv_dev) {
+ djrcv_dev = kzalloc(sizeof(*djrcv_dev), GFP_KERNEL);
+ if (!djrcv_dev)
+ goto out;
+
+ INIT_WORK(&djrcv_dev->work, delayedwork_callback);
+ spin_lock_init(&djrcv_dev->lock);
+ if (kfifo_alloc(&djrcv_dev->notif_fifo,
+ DJ_MAX_NUMBER_NOTIFS * sizeof(struct dj_workitem),
+ GFP_KERNEL)) {
+ kfree(djrcv_dev);
+ djrcv_dev = NULL;
+ goto out;
+ }
+ kref_init(&djrcv_dev->kref);
+ list_add_tail(&djrcv_dev->list, &dj_hdev_list);
+ djrcv_dev->last_query = jiffies;
+ djrcv_dev->type = type;
+ }
+
+ if (application == HID_GD_KEYBOARD)
+ djrcv_dev->keyboard = hdev;
+ if (application == HID_GD_MOUSE)
+ djrcv_dev->mouse = hdev;
+ if (is_hidpp)
+ djrcv_dev->hidpp = hdev;
+
+ hid_set_drvdata(hdev, djrcv_dev);
+out:
+ mutex_unlock(&dj_hdev_list_lock);
+ return djrcv_dev;
+}
static void logi_dj_recv_destroy_djhid_device(struct dj_receiver_dev *djrcv_dev,
- struct dj_report *dj_report)
+ struct dj_workitem *workitem)
{
/* Called in delayed work context */
struct dj_device *dj_dev;
unsigned long flags;
spin_lock_irqsave(&djrcv_dev->lock, flags);
- dj_dev = djrcv_dev->paired_dj_devices[dj_report->device_index];
- djrcv_dev->paired_dj_devices[dj_report->device_index] = NULL;
+ dj_dev = djrcv_dev->paired_dj_devices[workitem->device_index];
+ djrcv_dev->paired_dj_devices[workitem->device_index] = NULL;
spin_unlock_irqrestore(&djrcv_dev->lock, flags);
if (dj_dev != NULL) {
hid_destroy_device(dj_dev->hdev);
kfree(dj_dev);
} else {
- dev_err(&djrcv_dev->hdev->dev, "%s: can't destroy a NULL device\n",
+ hid_err(djrcv_dev->hidpp, "%s: can't destroy a NULL device\n",
__func__);
}
}
static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
- struct dj_report *dj_report)
+ struct dj_workitem *workitem)
{
/* Called in delayed work context */
- struct hid_device *djrcv_hdev = djrcv_dev->hdev;
- struct usb_interface *intf = to_usb_interface(djrcv_hdev->dev.parent);
- struct usb_device *usbdev = interface_to_usbdev(intf);
+ struct hid_device *djrcv_hdev = djrcv_dev->hidpp;
struct hid_device *dj_hiddev;
struct dj_device *dj_dev;
+ u8 device_index = workitem->device_index;
+ unsigned long flags;
/* Device index goes from 1 to 6, we need 3 bytes to store the
* semicolon, the index, and a null terminator
*/
unsigned char tmpstr[3];
- if (dj_report->report_params[DEVICE_PAIRED_PARAM_SPFUNCTION] &
- SPFUNCTION_DEVICE_LIST_EMPTY) {
- dbg_hid("%s: device list is empty\n", __func__);
- djrcv_dev->querying_devices = false;
- return;
- }
-
- if (djrcv_dev->paired_dj_devices[dj_report->device_index]) {
+ /* We are the only one ever adding a device, no need to lock */
+ if (djrcv_dev->paired_dj_devices[device_index]) {
/* The device is already known. No need to reallocate it. */
dbg_hid("%s: device is already known\n", __func__);
return;
@@ -393,8 +686,7 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
dj_hiddev = hid_allocate_device();
if (IS_ERR(dj_hiddev)) {
- dev_err(&djrcv_hdev->dev, "%s: hid_allocate_device failed\n",
- __func__);
+ hid_err(djrcv_hdev, "%s: hid_allocate_dev failed\n", __func__);
return;
}
@@ -402,48 +694,67 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
dj_hiddev->dev.parent = &djrcv_hdev->dev;
dj_hiddev->bus = BUS_USB;
- dj_hiddev->vendor = le16_to_cpu(usbdev->descriptor.idVendor);
- dj_hiddev->product =
- (dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_MSB]
- << 8) |
- dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_LSB];
- snprintf(dj_hiddev->name, sizeof(dj_hiddev->name),
- "Logitech Unifying Device. Wireless PID:%04x",
- dj_hiddev->product);
-
- dj_hiddev->group = HID_GROUP_LOGITECH_DJ_DEVICE;
-
- usb_make_path(usbdev, dj_hiddev->phys, sizeof(dj_hiddev->phys));
- snprintf(tmpstr, sizeof(tmpstr), ":%d", dj_report->device_index);
+ dj_hiddev->vendor = djrcv_hdev->vendor;
+ dj_hiddev->product = (workitem->quad_id_msb << 8) |
+ workitem->quad_id_lsb;
+ if (workitem->device_type) {
+ const char *type_str = "Device";
+
+ switch (workitem->device_type) {
+ case 0x01: type_str = "Keyboard"; break;
+ case 0x02: type_str = "Mouse"; break;
+ case 0x03: type_str = "Numpad"; break;
+ case 0x04: type_str = "Presenter"; break;
+ case 0x07: type_str = "Remote Control"; break;
+ case 0x08: type_str = "Trackball"; break;
+ case 0x09: type_str = "Touchpad"; break;
+ }
+ snprintf(dj_hiddev->name, sizeof(dj_hiddev->name),
+ "Logitech Wireless %s PID:%04x",
+ type_str, dj_hiddev->product);
+ } else {
+ snprintf(dj_hiddev->name, sizeof(dj_hiddev->name),
+ "Logitech Unifying Device. Wireless PID:%04x",
+ dj_hiddev->product);
+ }
+
+ if (djrcv_dev->type == recvr_type_27mhz)
+ dj_hiddev->group = HID_GROUP_LOGITECH_27MHZ_DEVICE;
+ else
+ dj_hiddev->group = HID_GROUP_LOGITECH_DJ_DEVICE;
+
+ memcpy(dj_hiddev->phys, djrcv_hdev->phys, sizeof(djrcv_hdev->phys));
+ snprintf(tmpstr, sizeof(tmpstr), ":%d", device_index);
strlcat(dj_hiddev->phys, tmpstr, sizeof(dj_hiddev->phys));
dj_dev = kzalloc(sizeof(struct dj_device), GFP_KERNEL);
if (!dj_dev) {
- dev_err(&djrcv_hdev->dev, "%s: failed allocating dj_device\n",
- __func__);
+ hid_err(djrcv_hdev, "%s: failed allocating dj_dev\n", __func__);
goto dj_device_allocate_fail;
}
- dj_dev->reports_supported = get_unaligned_le32(
- dj_report->report_params + DEVICE_PAIRED_RF_REPORT_TYPE);
+ dj_dev->reports_supported = workitem->reports_supported;
dj_dev->hdev = dj_hiddev;
dj_dev->dj_receiver_dev = djrcv_dev;
- dj_dev->device_index = dj_report->device_index;
+ dj_dev->device_index = device_index;
dj_hiddev->driver_data = dj_dev;
- djrcv_dev->paired_dj_devices[dj_report->device_index] = dj_dev;
+ spin_lock_irqsave(&djrcv_dev->lock, flags);
+ djrcv_dev->paired_dj_devices[device_index] = dj_dev;
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
if (hid_add_device(dj_hiddev)) {
- dev_err(&djrcv_hdev->dev, "%s: failed adding dj_device\n",
- __func__);
+ hid_err(djrcv_hdev, "%s: failed adding dj_device\n", __func__);
goto hid_add_device_fail;
}
return;
hid_add_device_fail:
- djrcv_dev->paired_dj_devices[dj_report->device_index] = NULL;
+ spin_lock_irqsave(&djrcv_dev->lock, flags);
+ djrcv_dev->paired_dj_devices[device_index] = NULL;
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
kfree(dj_dev);
dj_device_allocate_fail:
hid_destroy_device(dj_hiddev);
@@ -454,7 +765,7 @@ static void delayedwork_callback(struct work_struct *work)
struct dj_receiver_dev *djrcv_dev =
container_of(work, struct dj_receiver_dev, work);
- struct dj_report dj_report;
+ struct dj_workitem workitem;
unsigned long flags;
int count;
int retval;
@@ -463,69 +774,231 @@ static void delayedwork_callback(struct work_struct *work)
spin_lock_irqsave(&djrcv_dev->lock, flags);
- count = kfifo_out(&djrcv_dev->notif_fifo, &dj_report,
- sizeof(struct dj_report));
-
- if (count != sizeof(struct dj_report)) {
- dev_err(&djrcv_dev->hdev->dev, "%s: workitem triggered without "
- "notifications available\n", __func__);
+ /*
+ * Since we attach to multiple interfaces, we may get scheduled before
+ * we are bound to the HID++ interface, catch this.
+ */
+ if (!djrcv_dev->ready) {
+ pr_warn("%s: delayedwork queued before hidpp interface was enumerated\n",
+ __func__);
spin_unlock_irqrestore(&djrcv_dev->lock, flags);
return;
}
- if (!kfifo_is_empty(&djrcv_dev->notif_fifo)) {
- if (schedule_work(&djrcv_dev->work) == 0) {
- dbg_hid("%s: did not schedule the work item, was "
- "already queued\n", __func__);
- }
+ count = kfifo_out(&djrcv_dev->notif_fifo, &workitem, sizeof(workitem));
+
+ if (count != sizeof(workitem)) {
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+ return;
}
+ if (!kfifo_is_empty(&djrcv_dev->notif_fifo))
+ schedule_work(&djrcv_dev->work);
+
spin_unlock_irqrestore(&djrcv_dev->lock, flags);
- switch (dj_report.report_type) {
- case REPORT_TYPE_NOTIF_DEVICE_PAIRED:
- logi_dj_recv_add_djhid_device(djrcv_dev, &dj_report);
+ switch (workitem.type) {
+ case WORKITEM_TYPE_PAIRED:
+ logi_dj_recv_add_djhid_device(djrcv_dev, &workitem);
break;
- case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
- logi_dj_recv_destroy_djhid_device(djrcv_dev, &dj_report);
+ case WORKITEM_TYPE_UNPAIRED:
+ logi_dj_recv_destroy_djhid_device(djrcv_dev, &workitem);
break;
- default:
- /* A normal report (i. e. not belonging to a pair/unpair notification)
- * arriving here, means that the report arrived but we did not have a
- * paired dj_device associated to the report's device_index, this
- * means that the original "device paired" notification corresponding
- * to this dj_device never arrived to this driver. The reason is that
- * hid-core discards all packets coming from a device while probe() is
- * executing. */
- if (!djrcv_dev->paired_dj_devices[dj_report.device_index]) {
- /* ok, we don't know the device, just re-ask the
- * receiver for the list of connected devices. */
+ case WORKITEM_TYPE_UNKNOWN:
retval = logi_dj_recv_query_paired_devices(djrcv_dev);
- if (!retval) {
- /* everything went fine, so just leave */
- break;
- }
- dev_err(&djrcv_dev->hdev->dev,
- "%s:logi_dj_recv_query_paired_devices "
- "error:%d\n", __func__, retval);
+ if (retval) {
+ hid_err(djrcv_dev->hidpp, "%s: logi_dj_recv_query_paired_devices error: %d\n",
+ __func__, retval);
}
- dbg_hid("%s: unexpected report type\n", __func__);
+ break;
+ case WORKITEM_TYPE_EMPTY:
+ dbg_hid("%s: device list is empty\n", __func__);
+ break;
}
}
+/*
+ * Sometimes we receive reports for which we do not have a paired dj_device
+ * associated with the device_index or report-type to forward the report to.
+ * This means that the original "device paired" notification corresponding
+ * to the dj_device never arrived to this driver. Possible reasons for this are:
+ * 1) hid-core discards all packets coming from a device during probe().
+ * 2) if the receiver is plugged into a KVM switch then the pairing reports
+ * are only forwarded to it if the focus is on this PC.
+ * This function deals with this by re-asking the receiver for the list of
+ * connected devices in the delayed work callback.
+ * This function MUST be called with djrcv->lock held.
+ */
+static void logi_dj_recv_queue_unknown_work(struct dj_receiver_dev *djrcv_dev)
+{
+ struct dj_workitem workitem = { .type = WORKITEM_TYPE_UNKNOWN };
+
+ /* Rate limit queries done because of unhandeled reports to 2/sec */
+ if (time_before(jiffies, djrcv_dev->last_query + HZ / 2))
+ return;
+
+ kfifo_in(&djrcv_dev->notif_fifo, &workitem, sizeof(workitem));
+ schedule_work(&djrcv_dev->work);
+}
+
static void logi_dj_recv_queue_notification(struct dj_receiver_dev *djrcv_dev,
struct dj_report *dj_report)
{
/* We are called from atomic context (tasklet && djrcv->lock held) */
+ struct dj_workitem workitem = {
+ .device_index = dj_report->device_index,
+ };
+
+ switch (dj_report->report_type) {
+ case REPORT_TYPE_NOTIF_DEVICE_PAIRED:
+ workitem.type = WORKITEM_TYPE_PAIRED;
+ if (dj_report->report_params[DEVICE_PAIRED_PARAM_SPFUNCTION] &
+ SPFUNCTION_DEVICE_LIST_EMPTY) {
+ workitem.type = WORKITEM_TYPE_EMPTY;
+ break;
+ }
+ /* fall-through */
+ case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
+ workitem.quad_id_msb =
+ dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_MSB];
+ workitem.quad_id_lsb =
+ dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_LSB];
+ workitem.reports_supported = get_unaligned_le32(
+ dj_report->report_params +
+ DEVICE_PAIRED_RF_REPORT_TYPE);
+ workitem.reports_supported |= HIDPP;
+ if (dj_report->report_type == REPORT_TYPE_NOTIF_DEVICE_UNPAIRED)
+ workitem.type = WORKITEM_TYPE_UNPAIRED;
+ break;
+ default:
+ logi_dj_recv_queue_unknown_work(djrcv_dev);
+ return;
+ }
- kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report));
+ kfifo_in(&djrcv_dev->notif_fifo, &workitem, sizeof(workitem));
+ schedule_work(&djrcv_dev->work);
+}
- if (schedule_work(&djrcv_dev->work) == 0) {
- dbg_hid("%s: did not schedule the work item, was already "
- "queued\n", __func__);
+static void logi_hidpp_dev_conn_notif_equad(struct hidpp_event *hidpp_report,
+ struct dj_workitem *workitem)
+{
+ workitem->type = WORKITEM_TYPE_PAIRED;
+ workitem->device_type = hidpp_report->params[HIDPP_PARAM_DEVICE_INFO] &
+ HIDPP_DEVICE_TYPE_MASK;
+ workitem->quad_id_msb = hidpp_report->params[HIDPP_PARAM_EQUAD_MSB];
+ workitem->quad_id_lsb = hidpp_report->params[HIDPP_PARAM_EQUAD_LSB];
+ switch (workitem->device_type) {
+ case REPORT_TYPE_KEYBOARD:
+ workitem->reports_supported |= STD_KEYBOARD | MULTIMEDIA |
+ POWER_KEYS | MEDIA_CENTER |
+ HIDPP;
+ break;
+ case REPORT_TYPE_MOUSE:
+ workitem->reports_supported |= STD_MOUSE | HIDPP;
+ break;
+ }
+}
+
+static void logi_hidpp_dev_conn_notif_27mhz(struct hid_device *hdev,
+ struct hidpp_event *hidpp_report,
+ struct dj_workitem *workitem)
+{
+ workitem->type = WORKITEM_TYPE_PAIRED;
+ workitem->quad_id_lsb = hidpp_report->params[HIDPP_PARAM_27MHZ_DEVID];
+ switch (hidpp_report->device_index) {
+ case 1: /* Index 1 is always a mouse */
+ case 2: /* Index 2 is always a mouse */
+ workitem->device_type = HIDPP_DEVICE_TYPE_MOUSE;
+ workitem->reports_supported |= STD_MOUSE | HIDPP;
+ break;
+ case 3: /* Index 3 is always the keyboard */
+ case 4: /* Index 4 is used for an optional separate numpad */
+ workitem->device_type = HIDPP_DEVICE_TYPE_KEYBOARD;
+ workitem->reports_supported |= STD_KEYBOARD | MULTIMEDIA |
+ POWER_KEYS | HIDPP;
+ break;
+ default:
+ hid_warn(hdev, "%s: unexpected device-index %d", __func__,
+ hidpp_report->device_index);
}
}
+static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
+ struct hidpp_event *hidpp_report)
+{
+ /* We are called from atomic context (tasklet && djrcv->lock held) */
+ struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
+ const char *device_type = "UNKNOWN";
+ struct dj_workitem workitem = {
+ .type = WORKITEM_TYPE_EMPTY,
+ .device_index = hidpp_report->device_index,
+ };
+
+ switch (hidpp_report->params[HIDPP_PARAM_PROTO_TYPE]) {
+ case 0x01:
+ device_type = "Bluetooth";
+ /* Bluetooth connect packet contents is the same as (e)QUAD */
+ logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
+ if (!(hidpp_report->params[HIDPP_PARAM_DEVICE_INFO] &
+ HIDPP_MANUFACTURER_MASK)) {
+ hid_info(hdev, "Non Logitech device connected on slot %d\n",
+ hidpp_report->device_index);
+ workitem.reports_supported &= ~HIDPP;
+ }
+ break;
+ case 0x02:
+ device_type = "27 Mhz";
+ logi_hidpp_dev_conn_notif_27mhz(hdev, hidpp_report, &workitem);
+ break;
+ case 0x03:
+ device_type = "QUAD or eQUAD";
+ logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
+ break;
+ case 0x04:
+ device_type = "eQUAD step 4 DJ";
+ logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
+ break;
+ case 0x05:
+ device_type = "DFU Lite";
+ break;
+ case 0x06:
+ device_type = "eQUAD step 4 Lite";
+ logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
+ break;
+ case 0x07:
+ device_type = "eQUAD step 4 Gaming";
+ break;
+ case 0x08:
+ device_type = "eQUAD step 4 for gamepads";
+ break;
+ case 0x0a:
+ device_type = "eQUAD nano Lite";
+ logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
+ break;
+ case 0x0c:
+ device_type = "eQUAD Lightspeed";
+ logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
+ workitem.reports_supported |= STD_KEYBOARD;
+ break;
+ }
+
+ if (workitem.type == WORKITEM_TYPE_EMPTY) {
+ hid_warn(hdev,
+ "unusable device of type %s (0x%02x) connected on slot %d",
+ device_type,
+ hidpp_report->params[HIDPP_PARAM_PROTO_TYPE],
+ hidpp_report->device_index);
+ return;
+ }
+
+ hid_info(hdev, "device of type %s (0x%02x) connected on slot %d",
+ device_type, hidpp_report->params[HIDPP_PARAM_PROTO_TYPE],
+ hidpp_report->device_index);
+
+ kfifo_in(&djrcv_dev->notif_fifo, &workitem, sizeof(workitem));
+ schedule_work(&djrcv_dev->work);
+}
+
static void logi_dj_recv_forward_null_report(struct dj_receiver_dev *djrcv_dev,
struct dj_report *dj_report)
{
@@ -552,8 +1025,8 @@ static void logi_dj_recv_forward_null_report(struct dj_receiver_dev *djrcv_dev,
}
}
-static void logi_dj_recv_forward_report(struct dj_receiver_dev *djrcv_dev,
- struct dj_report *dj_report)
+static void logi_dj_recv_forward_dj(struct dj_receiver_dev *djrcv_dev,
+ struct dj_report *dj_report)
{
/* We are called from atomic context (tasklet && djrcv->lock held) */
struct dj_device *dj_device;
@@ -573,18 +1046,48 @@ static void logi_dj_recv_forward_report(struct dj_receiver_dev *djrcv_dev,
}
}
-static void logi_dj_recv_forward_hidpp(struct dj_device *dj_dev, u8 *data,
- int size)
+static void logi_dj_recv_forward_report(struct dj_device *dj_dev, u8 *data,
+ int size)
{
/* We are called from atomic context (tasklet && djrcv->lock held) */
if (hid_input_report(dj_dev->hdev, HID_INPUT_REPORT, data, size, 1))
dbg_hid("hid_input_report error\n");
}
+static void logi_dj_recv_forward_input_report(struct hid_device *hdev,
+ u8 *data, int size)
+{
+ struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
+ struct dj_device *dj_dev;
+ unsigned long flags;
+ u8 report = data[0];
+ int i;
+
+ if (report > REPORT_TYPE_RFREPORT_LAST) {
+ hid_err(hdev, "Unexpected input report number %d\n", report);
+ return;
+ }
+
+ spin_lock_irqsave(&djrcv_dev->lock, flags);
+ for (i = 0; i < (DJ_MAX_PAIRED_DEVICES + DJ_DEVICE_INDEX_MIN); i++) {
+ dj_dev = djrcv_dev->paired_dj_devices[i];
+ if (dj_dev && (dj_dev->reports_supported & BIT(report))) {
+ logi_dj_recv_forward_report(dj_dev, data, size);
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+ return;
+ }
+ }
+
+ logi_dj_recv_queue_unknown_work(djrcv_dev);
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+
+ dbg_hid("No dj-devs handling input report number %d\n", report);
+}
+
static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
struct dj_report *dj_report)
{
- struct hid_device *hdev = djrcv_dev->hdev;
+ struct hid_device *hdev = djrcv_dev->hidpp;
struct hid_report *report;
struct hid_report_enum *output_report_enum;
u8 *data = (u8 *)(&dj_report->device_index);
@@ -594,7 +1097,7 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
report = output_report_enum->report_id_hash[REPORT_ID_DJ_SHORT];
if (!report) {
- dev_err(&hdev->dev, "%s: unable to find dj report\n", __func__);
+ hid_err(hdev, "%s: unable to find dj report\n", __func__);
return -ENODEV;
}
@@ -606,14 +1109,40 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
return 0;
}
+static int logi_dj_recv_query_hidpp_devices(struct dj_receiver_dev *djrcv_dev)
+{
+ const u8 template[] = {REPORT_ID_HIDPP_SHORT,
+ HIDPP_RECEIVER_INDEX,
+ HIDPP_SET_REGISTER,
+ HIDPP_REG_CONNECTION_STATE,
+ HIDPP_FAKE_DEVICE_ARRIVAL,
+ 0x00, 0x00};
+ u8 *hidpp_report;
+ int retval;
+
+ hidpp_report = kmemdup(template, sizeof(template), GFP_KERNEL);
+ if (!hidpp_report)
+ return -ENOMEM;
+
+ retval = hid_hw_raw_request(djrcv_dev->hidpp,
+ REPORT_ID_HIDPP_SHORT,
+ hidpp_report, sizeof(template),
+ HID_OUTPUT_REPORT,
+ HID_REQ_SET_REPORT);
+
+ kfree(hidpp_report);
+ return 0;
+}
+
static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
{
struct dj_report *dj_report;
int retval;
- /* no need to protect djrcv_dev->querying_devices */
- if (djrcv_dev->querying_devices)
- return 0;
+ djrcv_dev->last_query = jiffies;
+
+ if (djrcv_dev->type != recvr_type_dj)
+ return logi_dj_recv_query_hidpp_devices(djrcv_dev);
dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL);
if (!dj_report)
@@ -630,27 +1159,33 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
unsigned timeout)
{
- struct hid_device *hdev = djrcv_dev->hdev;
+ struct hid_device *hdev = djrcv_dev->hidpp;
struct dj_report *dj_report;
u8 *buf;
- int retval;
+ int retval = 0;
dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL);
if (!dj_report)
return -ENOMEM;
- dj_report->report_id = REPORT_ID_DJ_SHORT;
- dj_report->device_index = 0xFF;
- dj_report->report_type = REPORT_TYPE_CMD_SWITCH;
- dj_report->report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x3F;
- dj_report->report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout;
- retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
- /*
- * Ugly sleep to work around a USB 3.0 bug when the receiver is still
- * processing the "switch-to-dj" command while we send an other command.
- * 50 msec should gives enough time to the receiver to be ready.
- */
- msleep(50);
+ if (djrcv_dev->type == recvr_type_dj) {
+ dj_report->report_id = REPORT_ID_DJ_SHORT;
+ dj_report->device_index = 0xFF;
+ dj_report->report_type = REPORT_TYPE_CMD_SWITCH;
+ dj_report->report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x3F;
+ dj_report->report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] =
+ (u8)timeout;
+
+ retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
+
+ /*
+ * Ugly sleep to work around a USB 3.0 bug when the receiver is
+ * still processing the "switch-to-dj" command while we send an
+ * other command.
+ * 50 msec should gives enough time to the receiver to be ready.
+ */
+ msleep(50);
+ }
/*
* Magical bits to set up hidpp notifications when the dj devices
@@ -682,22 +1217,28 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
static int logi_dj_ll_open(struct hid_device *hid)
{
- dbg_hid("%s:%s\n", __func__, hid->phys);
+ dbg_hid("%s: %s\n", __func__, hid->phys);
return 0;
}
static void logi_dj_ll_close(struct hid_device *hid)
{
- dbg_hid("%s:%s\n", __func__, hid->phys);
+ dbg_hid("%s: %s\n", __func__, hid->phys);
}
/*
* Register 0xB5 is "pairing information". It is solely intended for the
* receiver, so do not overwrite the device index.
*/
-static u8 unifying_pairing_query[] = {0x10, 0xff, 0x83, 0xb5};
-static u8 unifying_pairing_answer[] = {0x11, 0xff, 0x83, 0xb5};
+static u8 unifying_pairing_query[] = { REPORT_ID_HIDPP_SHORT,
+ HIDPP_RECEIVER_INDEX,
+ HIDPP_GET_LONG_REGISTER,
+ HIDPP_REG_PAIRING_INFORMATION };
+static u8 unifying_pairing_answer[] = { REPORT_ID_HIDPP_LONG,
+ HIDPP_RECEIVER_INDEX,
+ HIDPP_GET_LONG_REGISTER,
+ HIDPP_REG_PAIRING_INFORMATION };
static int logi_dj_ll_raw_request(struct hid_device *hid,
unsigned char reportnum, __u8 *buf,
@@ -721,13 +1262,23 @@ static int logi_dj_ll_raw_request(struct hid_device *hid,
buf[4] = (buf[4] & 0xf0) | (djdev->device_index - 1);
else
buf[1] = djdev->device_index;
- return hid_hw_raw_request(djrcv_dev->hdev, reportnum, buf,
+ return hid_hw_raw_request(djrcv_dev->hidpp, reportnum, buf,
count, report_type, reqtype);
}
if (buf[0] != REPORT_TYPE_LEDS)
return -EINVAL;
+ if (djrcv_dev->type != recvr_type_dj && count >= 2) {
+ if (!djrcv_dev->keyboard) {
+ hid_warn(hid, "Received REPORT_TYPE_LEDS request before the keyboard interface was enumerated\n");
+ return 0;
+ }
+ /* usbhid overrides the report ID and ignores the first byte */
+ return hid_hw_raw_request(djrcv_dev->keyboard, 0, buf, count,
+ report_type, reqtype);
+ }
+
out_buf = kzalloc(DJREPORT_SHORT_LENGTH, GFP_ATOMIC);
if (!out_buf)
return -ENOMEM;
@@ -739,7 +1290,7 @@ static int logi_dj_ll_raw_request(struct hid_device *hid,
out_buf[1] = djdev->device_index;
memcpy(out_buf + 2, buf, count);
- ret = hid_hw_raw_request(djrcv_dev->hdev, out_buf[0], out_buf,
+ ret = hid_hw_raw_request(djrcv_dev->hidpp, out_buf[0], out_buf,
DJREPORT_SHORT_LENGTH, report_type, reqtype);
kfree(out_buf);
@@ -769,41 +1320,55 @@ static int logi_dj_ll_parse(struct hid_device *hid)
return -ENOMEM;
if (djdev->reports_supported & STD_KEYBOARD) {
- dbg_hid("%s: sending a kbd descriptor, reports_supported: %x\n",
+ dbg_hid("%s: sending a kbd descriptor, reports_supported: %llx\n",
__func__, djdev->reports_supported);
rdcat(rdesc, &rsize, kbd_descriptor, sizeof(kbd_descriptor));
}
if (djdev->reports_supported & STD_MOUSE) {
- dbg_hid("%s: sending a mouse descriptor, reports_supported: "
- "%x\n", __func__, djdev->reports_supported);
- rdcat(rdesc, &rsize, mse_descriptor, sizeof(mse_descriptor));
+ dbg_hid("%s: sending a mouse descriptor, reports_supported: %llx\n",
+ __func__, djdev->reports_supported);
+ if (djdev->dj_receiver_dev->type == recvr_type_gaming_hidpp)
+ rdcat(rdesc, &rsize, mse_high_res_descriptor,
+ sizeof(mse_high_res_descriptor));
+ else if (djdev->dj_receiver_dev->type == recvr_type_27mhz)
+ rdcat(rdesc, &rsize, mse_27mhz_descriptor,
+ sizeof(mse_27mhz_descriptor));
+ else if (djdev->dj_receiver_dev->type == recvr_type_bluetooth)
+ rdcat(rdesc, &rsize, mse_bluetooth_descriptor,
+ sizeof(mse_bluetooth_descriptor));
+ else
+ rdcat(rdesc, &rsize, mse_descriptor,
+ sizeof(mse_descriptor));
}
if (djdev->reports_supported & MULTIMEDIA) {
- dbg_hid("%s: sending a multimedia report descriptor: %x\n",
+ dbg_hid("%s: sending a multimedia report descriptor: %llx\n",
__func__, djdev->reports_supported);
rdcat(rdesc, &rsize, consumer_descriptor, sizeof(consumer_descriptor));
}
if (djdev->reports_supported & POWER_KEYS) {
- dbg_hid("%s: sending a power keys report descriptor: %x\n",
+ dbg_hid("%s: sending a power keys report descriptor: %llx\n",
__func__, djdev->reports_supported);
rdcat(rdesc, &rsize, syscontrol_descriptor, sizeof(syscontrol_descriptor));
}
if (djdev->reports_supported & MEDIA_CENTER) {
- dbg_hid("%s: sending a media center report descriptor: %x\n",
+ dbg_hid("%s: sending a media center report descriptor: %llx\n",
__func__, djdev->reports_supported);
rdcat(rdesc, &rsize, media_descriptor, sizeof(media_descriptor));
}
if (djdev->reports_supported & KBD_LEDS) {
- dbg_hid("%s: need to send kbd leds report descriptor: %x\n",
+ dbg_hid("%s: need to send kbd leds report descriptor: %llx\n",
__func__, djdev->reports_supported);
}
- rdcat(rdesc, &rsize, hidpp_descriptor, sizeof(hidpp_descriptor));
+ if (djdev->reports_supported & HIDPP) {
+ rdcat(rdesc, &rsize, hidpp_descriptor,
+ sizeof(hidpp_descriptor));
+ }
retval = hid_parse_report(hid, rdesc, rsize);
kfree(rdesc);
@@ -866,7 +1431,7 @@ static int logi_dj_dj_event(struct hid_device *hdev,
* so ignore those reports too.
*/
if (dj_report->device_index != DJ_RECEIVER_INDEX)
- dev_err(&hdev->dev, "%s: invalid device index:%d\n",
+ hid_err(hdev, "%s: invalid device index:%d\n",
__func__, dj_report->device_index);
return false;
}
@@ -893,7 +1458,7 @@ static int logi_dj_dj_event(struct hid_device *hdev,
}
break;
default:
- logi_dj_recv_forward_report(djrcv_dev, dj_report);
+ logi_dj_recv_forward_dj(djrcv_dev, dj_report);
}
out:
@@ -907,9 +1472,10 @@ static int logi_dj_hidpp_event(struct hid_device *hdev,
int size)
{
struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
- struct dj_report *dj_report = (struct dj_report *) data;
+ struct hidpp_event *hidpp_report = (struct hidpp_event *) data;
+ struct dj_device *dj_dev;
unsigned long flags;
- u8 device_index = dj_report->device_index;
+ u8 device_index = hidpp_report->device_index;
if (device_index == HIDPP_RECEIVER_INDEX) {
/* special case were the device wants to know its unifying
@@ -937,21 +1503,42 @@ static int logi_dj_hidpp_event(struct hid_device *hdev,
* This driver can ignore safely the receiver notifications,
* so ignore those reports too.
*/
- dev_err(&hdev->dev, "%s: invalid device index:%d\n",
- __func__, dj_report->device_index);
+ hid_err(hdev, "%s: invalid device index:%d\n", __func__,
+ hidpp_report->device_index);
return false;
}
spin_lock_irqsave(&djrcv_dev->lock, flags);
- if (!djrcv_dev->paired_dj_devices[device_index])
- /* received an event for an unknown device, bail out */
- goto out;
+ dj_dev = djrcv_dev->paired_dj_devices[device_index];
+
+ /*
+ * With 27 MHz receivers, we do not get an explicit unpair event,
+ * remove the old device if the user has paired a *different* device.
+ */
+ if (djrcv_dev->type == recvr_type_27mhz && dj_dev &&
+ hidpp_report->sub_id == REPORT_TYPE_NOTIF_DEVICE_CONNECTED &&
+ hidpp_report->params[HIDPP_PARAM_PROTO_TYPE] == 0x02 &&
+ hidpp_report->params[HIDPP_PARAM_27MHZ_DEVID] !=
+ dj_dev->hdev->product) {
+ struct dj_workitem workitem = {
+ .device_index = hidpp_report->device_index,
+ .type = WORKITEM_TYPE_UNPAIRED,
+ };
+ kfifo_in(&djrcv_dev->notif_fifo, &workitem, sizeof(workitem));
+ /* logi_hidpp_recv_queue_notif will queue the work */
+ dj_dev = NULL;
+ }
- logi_dj_recv_forward_hidpp(djrcv_dev->paired_dj_devices[device_index],
- data, size);
+ if (dj_dev) {
+ logi_dj_recv_forward_report(dj_dev, data, size);
+ } else {
+ if (hidpp_report->sub_id == REPORT_TYPE_NOTIF_DEVICE_CONNECTED)
+ logi_hidpp_recv_queue_notif(hdev, hidpp_report);
+ else
+ logi_dj_recv_queue_unknown_work(djrcv_dev);
+ }
-out:
spin_unlock_irqrestore(&djrcv_dev->lock, flags);
return false;
@@ -961,112 +1548,176 @@ static int logi_dj_raw_event(struct hid_device *hdev,
struct hid_report *report, u8 *data,
int size)
{
+ struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
dbg_hid("%s, size:%d\n", __func__, size);
+ if (!djrcv_dev)
+ return 0;
+
+ if (!hdev->report_enum[HID_INPUT_REPORT].numbered) {
+
+ if (djrcv_dev->unnumbered_application == HID_GD_KEYBOARD) {
+ /*
+ * For the keyboard, we can reuse the same report by
+ * using the second byte which is constant in the USB
+ * HID report descriptor.
+ */
+ data[1] = data[0];
+ data[0] = REPORT_TYPE_KEYBOARD;
+
+ logi_dj_recv_forward_input_report(hdev, data, size);
+
+ /* restore previous state */
+ data[0] = data[1];
+ data[1] = 0;
+ }
+ /* The 27 MHz mouse-only receiver sends unnumbered mouse data */
+ if (djrcv_dev->unnumbered_application == HID_GD_MOUSE &&
+ size == 6) {
+ u8 mouse_report[7];
+
+ /* Prepend report id */
+ mouse_report[0] = REPORT_TYPE_MOUSE;
+ memcpy(mouse_report + 1, data, 6);
+ logi_dj_recv_forward_input_report(hdev, mouse_report, 7);
+ }
+
+ return false;
+ }
+
switch (data[0]) {
case REPORT_ID_DJ_SHORT:
if (size != DJREPORT_SHORT_LENGTH) {
- dev_err(&hdev->dev, "DJ report of bad size (%d)", size);
+ hid_err(hdev, "Short DJ report bad size (%d)", size);
+ return false;
+ }
+ return logi_dj_dj_event(hdev, report, data, size);
+ case REPORT_ID_DJ_LONG:
+ if (size != DJREPORT_LONG_LENGTH) {
+ hid_err(hdev, "Long DJ report bad size (%d)", size);
return false;
}
return logi_dj_dj_event(hdev, report, data, size);
case REPORT_ID_HIDPP_SHORT:
if (size != HIDPP_REPORT_SHORT_LENGTH) {
- dev_err(&hdev->dev,
- "Short HID++ report of bad size (%d)", size);
+ hid_err(hdev, "Short HID++ report bad size (%d)", size);
return false;
}
return logi_dj_hidpp_event(hdev, report, data, size);
case REPORT_ID_HIDPP_LONG:
if (size != HIDPP_REPORT_LONG_LENGTH) {
- dev_err(&hdev->dev,
- "Long HID++ report of bad size (%d)", size);
+ hid_err(hdev, "Long HID++ report bad size (%d)", size);
return false;
}
return logi_dj_hidpp_event(hdev, report, data, size);
}
+ logi_dj_recv_forward_input_report(hdev, data, size);
+
return false;
}
static int logi_dj_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
- struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct hid_report_enum *rep_enum;
+ struct hid_report *rep;
struct dj_receiver_dev *djrcv_dev;
+ struct usb_interface *intf;
+ unsigned int no_dj_interfaces = 0;
+ bool has_hidpp = false;
+ unsigned long flags;
int retval;
- dbg_hid("%s called for ifnum %d\n", __func__,
- intf->cur_altsetting->desc.bInterfaceNumber);
+ /*
+ * Call to usbhid to fetch the HID descriptors of the current
+ * interface subsequently call to the hid/hid-core to parse the
+ * fetched descriptors.
+ */
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "%s: parse failed\n", __func__);
+ return retval;
+ }
- /* Ignore interfaces 0 and 1, they will not carry any data, dont create
- * any hid_device for them */
- if (intf->cur_altsetting->desc.bInterfaceNumber !=
- LOGITECH_DJ_INTERFACE_NUMBER) {
- dbg_hid("%s: ignoring ifnum %d\n", __func__,
- intf->cur_altsetting->desc.bInterfaceNumber);
+ /*
+ * Some KVMs add an extra interface for e.g. mouse emulation. If we
+ * treat these as logitech-dj interfaces then this causes input events
+ * reported through this extra interface to not be reported correctly.
+ * To avoid this, we treat these as generic-hid devices.
+ */
+ switch (id->driver_data) {
+ case recvr_type_dj: no_dj_interfaces = 3; break;
+ case recvr_type_hidpp: no_dj_interfaces = 2; break;
+ case recvr_type_gaming_hidpp: no_dj_interfaces = 3; break;
+ case recvr_type_27mhz: no_dj_interfaces = 2; break;
+ case recvr_type_bluetooth: no_dj_interfaces = 2; break;
+ }
+ if (hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
+ intf = to_usb_interface(hdev->dev.parent);
+ if (intf && intf->altsetting->desc.bInterfaceNumber >=
+ no_dj_interfaces) {
+ hdev->quirks |= HID_QUIRK_INPUT_PER_APP;
+ return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ }
+ }
+
+ rep_enum = &hdev->report_enum[HID_INPUT_REPORT];
+
+ /* no input reports, bail out */
+ if (list_empty(&rep_enum->report_list))
return -ENODEV;
+
+ /*
+ * Check for the HID++ application.
+ * Note: we should theoretically check for HID++ and DJ
+ * collections, but this will do.
+ */
+ list_for_each_entry(rep, &rep_enum->report_list, list) {
+ if (rep->application == 0xff000001)
+ has_hidpp = true;
}
- /* Treat interface 2 */
+ /*
+ * Ignore interfaces without DJ/HID++ collection, they will not carry
+ * any data, dont create any hid_device for them.
+ */
+ if (!has_hidpp && id->driver_data == recvr_type_dj)
+ return -ENODEV;
- djrcv_dev = kzalloc(sizeof(struct dj_receiver_dev), GFP_KERNEL);
+ /* get the current application attached to the node */
+ rep = list_first_entry(&rep_enum->report_list, struct hid_report, list);
+ djrcv_dev = dj_get_receiver_dev(hdev, id->driver_data,
+ rep->application, has_hidpp);
if (!djrcv_dev) {
- dev_err(&hdev->dev,
- "%s:failed allocating dj_receiver_dev\n", __func__);
+ hid_err(hdev, "%s: dj_get_receiver_dev failed\n", __func__);
return -ENOMEM;
}
- djrcv_dev->hdev = hdev;
- INIT_WORK(&djrcv_dev->work, delayedwork_callback);
- spin_lock_init(&djrcv_dev->lock);
- if (kfifo_alloc(&djrcv_dev->notif_fifo,
- DJ_MAX_NUMBER_NOTIFICATIONS * sizeof(struct dj_report),
- GFP_KERNEL)) {
- dev_err(&hdev->dev,
- "%s:failed allocating notif_fifo\n", __func__);
- kfree(djrcv_dev);
- return -ENOMEM;
- }
- hid_set_drvdata(hdev, djrcv_dev);
- /* Call to usbhid to fetch the HID descriptors of interface 2 and
- * subsequently call to the hid/hid-core to parse the fetched
- * descriptors, this will in turn create the hidraw and hiddev nodes
- * for interface 2 of the receiver */
- retval = hid_parse(hdev);
- if (retval) {
- dev_err(&hdev->dev,
- "%s:parse of interface 2 failed\n", __func__);
- goto hid_parse_fail;
- }
-
- if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, REPORT_ID_DJ_SHORT,
- 0, DJREPORT_SHORT_LENGTH - 1)) {
- retval = -ENODEV;
- goto hid_parse_fail;
- }
+ if (!rep_enum->numbered)
+ djrcv_dev->unnumbered_application = rep->application;
/* Starts the usb device and connects to upper interfaces hiddev and
* hidraw */
- retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ retval = hid_hw_start(hdev, HID_CONNECT_HIDRAW|HID_CONNECT_HIDDEV);
if (retval) {
- dev_err(&hdev->dev,
- "%s:hid_hw_start returned error\n", __func__);
+ hid_err(hdev, "%s: hid_hw_start returned error\n", __func__);
goto hid_hw_start_fail;
}
- retval = logi_dj_recv_switch_to_dj_mode(djrcv_dev, 0);
- if (retval < 0) {
- dev_err(&hdev->dev,
- "%s:logi_dj_recv_switch_to_dj_mode returned error:%d\n",
- __func__, retval);
- goto switch_to_dj_mode_fail;
+ if (has_hidpp) {
+ retval = logi_dj_recv_switch_to_dj_mode(djrcv_dev, 0);
+ if (retval < 0) {
+ hid_err(hdev, "%s: logi_dj_recv_switch_to_dj_mode returned error:%d\n",
+ __func__, retval);
+ goto switch_to_dj_mode_fail;
+ }
}
/* This is enabling the polling urb on the IN endpoint */
retval = hid_hw_open(hdev);
if (retval < 0) {
- dev_err(&hdev->dev, "%s:hid_hw_open returned error:%d\n",
+ hid_err(hdev, "%s: hid_hw_open returned error:%d\n",
__func__, retval);
goto llopen_failed;
}
@@ -1074,11 +1725,16 @@ static int logi_dj_probe(struct hid_device *hdev,
/* Allow incoming packets to arrive: */
hid_device_io_start(hdev);
- retval = logi_dj_recv_query_paired_devices(djrcv_dev);
- if (retval < 0) {
- dev_err(&hdev->dev, "%s:logi_dj_recv_query_paired_devices "
- "error:%d\n", __func__, retval);
- goto logi_dj_recv_query_paired_devices_failed;
+ if (has_hidpp) {
+ spin_lock_irqsave(&djrcv_dev->lock, flags);
+ djrcv_dev->ready = true;
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+ retval = logi_dj_recv_query_paired_devices(djrcv_dev);
+ if (retval < 0) {
+ hid_err(hdev, "%s: logi_dj_recv_query_paired_devices error:%d\n",
+ __func__, retval);
+ goto logi_dj_recv_query_paired_devices_failed;
+ }
}
return retval;
@@ -1091,12 +1747,8 @@ switch_to_dj_mode_fail:
hid_hw_stop(hdev);
hid_hw_start_fail:
-hid_parse_fail:
- kfifo_free(&djrcv_dev->notif_fifo);
- kfree(djrcv_dev);
- hid_set_drvdata(hdev, NULL);
+ dj_put_receiver_dev(hdev);
return retval;
-
}
#ifdef CONFIG_PM
@@ -1105,10 +1757,12 @@ static int logi_dj_reset_resume(struct hid_device *hdev)
int retval;
struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
+ if (!djrcv_dev || djrcv_dev->hidpp != hdev)
+ return 0;
+
retval = logi_dj_recv_switch_to_dj_mode(djrcv_dev, 0);
if (retval < 0) {
- dev_err(&hdev->dev,
- "%s:logi_dj_recv_switch_to_dj_mode returned error:%d\n",
+ hid_err(hdev, "%s: logi_dj_recv_switch_to_dj_mode returned error:%d\n",
__func__, retval);
}
@@ -1120,39 +1774,83 @@ static void logi_dj_remove(struct hid_device *hdev)
{
struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
struct dj_device *dj_dev;
+ unsigned long flags;
int i;
dbg_hid("%s\n", __func__);
+ if (!djrcv_dev)
+ return hid_hw_stop(hdev);
+
+ /*
+ * This ensures that if the work gets requeued from another
+ * interface of the same receiver it will be a no-op.
+ */
+ spin_lock_irqsave(&djrcv_dev->lock, flags);
+ djrcv_dev->ready = false;
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+
cancel_work_sync(&djrcv_dev->work);
hid_hw_close(hdev);
hid_hw_stop(hdev);
- /* I suppose that at this point the only context that can access
- * the djrecv_data is this thread as the work item is guaranteed to
- * have finished and no more raw_event callbacks should arrive after
- * the remove callback was triggered so no locks are put around the
- * code below */
+ /*
+ * For proper operation we need access to all interfaces, so we destroy
+ * the paired devices when we're unbound from any interface.
+ *
+ * Note we may still be bound to other interfaces, sharing the same
+ * djrcv_dev, so we need locking here.
+ */
for (i = 0; i < (DJ_MAX_PAIRED_DEVICES + DJ_DEVICE_INDEX_MIN); i++) {
+ spin_lock_irqsave(&djrcv_dev->lock, flags);
dj_dev = djrcv_dev->paired_dj_devices[i];
+ djrcv_dev->paired_dj_devices[i] = NULL;
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
if (dj_dev != NULL) {
hid_destroy_device(dj_dev->hdev);
kfree(dj_dev);
- djrcv_dev->paired_dj_devices[i] = NULL;
}
}
- kfifo_free(&djrcv_dev->notif_fifo);
- kfree(djrcv_dev);
- hid_set_drvdata(hdev, NULL);
+ dj_put_receiver_dev(hdev);
}
static const struct hid_device_id logi_dj_receivers[] = {
{HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
- USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER)},
+ USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER),
+ .driver_data = recvr_type_dj},
{HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
- USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2)},
+ USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2),
+ .driver_data = recvr_type_dj},
+ { /* Logitech Nano (non DJ) receiver */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_NANO_RECEIVER),
+ .driver_data = recvr_type_hidpp},
+ { /* Logitech Nano (non DJ) receiver */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2),
+ .driver_data = recvr_type_hidpp},
+ { /* Logitech gaming receiver (0xc539) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_GAMING),
+ .driver_data = recvr_type_gaming_hidpp},
+ { /* Logitech 27 MHz HID++ 1.0 receiver (0xc517) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_S510_RECEIVER_2),
+ .driver_data = recvr_type_27mhz},
+ { /* Logitech 27 MHz HID++ 1.0 mouse-only receiver (0xc51b) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_27MHZ_MOUSE_RECEIVER),
+ .driver_data = recvr_type_27mhz},
+ { /* Logitech MX5000 HID++ / bluetooth receiver keyboard intf. */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ 0xc70e),
+ .driver_data = recvr_type_bluetooth},
+ { /* Logitech MX5000 HID++ / bluetooth receiver mouse intf. */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ 0xc70a),
+ .driver_data = recvr_type_bluetooth},
{}
};
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 15ed6177a7a3..72fc9c0566db 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -51,7 +51,11 @@ MODULE_PARM_DESC(disable_tap_to_click,
#define HIDPP_REPORT_SHORT_LENGTH 7
#define HIDPP_REPORT_LONG_LENGTH 20
-#define HIDPP_REPORT_VERY_LONG_LENGTH 64
+#define HIDPP_REPORT_VERY_LONG_MAX_LENGTH 64
+
+#define HIDPP_SUB_ID_CONSUMER_VENDOR_KEYS 0x03
+#define HIDPP_SUB_ID_ROLLER 0x05
+#define HIDPP_SUB_ID_MOUSE_EXTRA_BTNS 0x06
#define HIDPP_QUIRK_CLASS_WTP BIT(0)
#define HIDPP_QUIRK_CLASS_M560 BIT(1)
@@ -68,6 +72,13 @@ MODULE_PARM_DESC(disable_tap_to_click,
#define HIDPP_QUIRK_HI_RES_SCROLL_1P0 BIT(26)
#define HIDPP_QUIRK_HI_RES_SCROLL_X2120 BIT(27)
#define HIDPP_QUIRK_HI_RES_SCROLL_X2121 BIT(28)
+#define HIDPP_QUIRK_HIDPP_WHEELS BIT(29)
+#define HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS BIT(30)
+#define HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS BIT(31)
+
+/* These are just aliases for now */
+#define HIDPP_QUIRK_KBD_SCROLL_WHEEL HIDPP_QUIRK_HIDPP_WHEELS
+#define HIDPP_QUIRK_KBD_ZOOM_WHEEL HIDPP_QUIRK_HIDPP_WHEELS
/* Convenience constant to check for any high-res support. */
#define HIDPP_QUIRK_HI_RES_SCROLL (HIDPP_QUIRK_HI_RES_SCROLL_1P0 | \
@@ -106,13 +117,13 @@ MODULE_PARM_DESC(disable_tap_to_click,
struct fap {
u8 feature_index;
u8 funcindex_clientid;
- u8 params[HIDPP_REPORT_VERY_LONG_LENGTH - 4U];
+ u8 params[HIDPP_REPORT_VERY_LONG_MAX_LENGTH - 4U];
};
struct rap {
u8 sub_id;
u8 reg_address;
- u8 params[HIDPP_REPORT_VERY_LONG_LENGTH - 4U];
+ u8 params[HIDPP_REPORT_VERY_LONG_MAX_LENGTH - 4U];
};
struct hidpp_report {
@@ -149,7 +160,6 @@ struct hidpp_battery {
* @last_time: last event time, used to reset remainder after inactivity
*/
struct hidpp_scroll_counter {
- struct input_dev *dev;
int wheel_multiplier;
int remainder;
int direction;
@@ -158,10 +168,12 @@ struct hidpp_scroll_counter {
struct hidpp_device {
struct hid_device *hid_dev;
+ struct input_dev *input;
struct mutex send_mutex;
void *send_receive_buf;
char *name; /* will never be NULL and should not be freed */
wait_queue_head_t wait;
+ int very_long_report_length;
bool answer_available;
u8 protocol_major;
u8 protocol_minor;
@@ -206,8 +218,6 @@ static int __hidpp_send_report(struct hid_device *hdev,
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
int fields_count, ret;
- hidpp = hid_get_drvdata(hdev);
-
switch (hidpp_report->report_id) {
case REPORT_ID_HIDPP_SHORT:
fields_count = HIDPP_REPORT_SHORT_LENGTH;
@@ -216,7 +226,7 @@ static int __hidpp_send_report(struct hid_device *hdev,
fields_count = HIDPP_REPORT_LONG_LENGTH;
break;
case REPORT_ID_HIDPP_VERY_LONG:
- fields_count = HIDPP_REPORT_VERY_LONG_LENGTH;
+ fields_count = hidpp->very_long_report_length;
break;
default:
return -ENODEV;
@@ -342,7 +352,7 @@ static int hidpp_send_rap_command_sync(struct hidpp_device *hidpp_dev,
max_count = HIDPP_REPORT_LONG_LENGTH - 4;
break;
case REPORT_ID_HIDPP_VERY_LONG:
- max_count = HIDPP_REPORT_VERY_LONG_LENGTH - 4;
+ max_count = hidpp_dev->very_long_report_length - 4;
break;
default:
return -EINVAL;
@@ -434,14 +444,15 @@ static void hidpp_prefix_name(char **name, int name_length)
* emit low-resolution scroll events when appropriate for
* backwards-compatibility with userspace input libraries.
*/
-static void hidpp_scroll_counter_handle_scroll(struct hidpp_scroll_counter *counter,
+static void hidpp_scroll_counter_handle_scroll(struct input_dev *input_dev,
+ struct hidpp_scroll_counter *counter,
int hi_res_value)
{
int low_res_value, remainder, direction;
unsigned long long now, previous;
hi_res_value = hi_res_value * 120/counter->wheel_multiplier;
- input_report_rel(counter->dev, REL_WHEEL_HI_RES, hi_res_value);
+ input_report_rel(input_dev, REL_WHEEL_HI_RES, hi_res_value);
remainder = counter->remainder;
direction = hi_res_value > 0 ? 1 : -1;
@@ -475,7 +486,7 @@ static void hidpp_scroll_counter_handle_scroll(struct hidpp_scroll_counter *coun
low_res_value = remainder / 120;
if (low_res_value == 0)
low_res_value = (hi_res_value > 0 ? 1 : -1);
- input_report_rel(counter->dev, REL_WHEEL, low_res_value);
+ input_report_rel(input_dev, REL_WHEEL, low_res_value);
remainder -= low_res_value * 120;
}
counter->remainder = remainder;
@@ -491,14 +502,16 @@ static void hidpp_scroll_counter_handle_scroll(struct hidpp_scroll_counter *coun
#define HIDPP_GET_LONG_REGISTER 0x83
/**
- * hidpp10_set_register_bit() - Sets a single bit in a HID++ 1.0 register.
+ * hidpp10_set_register - Modify a HID++ 1.0 register.
* @hidpp_dev: the device to set the register on.
* @register_address: the address of the register to modify.
* @byte: the byte of the register to modify. Should be less than 3.
+ * @mask: mask of the bits to modify
+ * @value: new values for the bits in mask
* Return: 0 if successful, otherwise a negative error code.
*/
-static int hidpp10_set_register_bit(struct hidpp_device *hidpp_dev,
- u8 register_address, u8 byte, u8 bit)
+static int hidpp10_set_register(struct hidpp_device *hidpp_dev,
+ u8 register_address, u8 byte, u8 mask, u8 value)
{
struct hidpp_report response;
int ret;
@@ -514,7 +527,8 @@ static int hidpp10_set_register_bit(struct hidpp_device *hidpp_dev,
memcpy(params, response.rap.params, 3);
- params[byte] |= BIT(bit);
+ params[byte] &= ~mask;
+ params[byte] |= value & mask;
return hidpp_send_rap_command_sync(hidpp_dev,
REPORT_ID_HIDPP_SHORT,
@@ -523,20 +537,28 @@ static int hidpp10_set_register_bit(struct hidpp_device *hidpp_dev,
params, 3, &response);
}
-
-#define HIDPP_REG_GENERAL 0x00
+#define HIDPP_REG_ENABLE_REPORTS 0x00
+#define HIDPP_ENABLE_CONSUMER_REPORT BIT(0)
+#define HIDPP_ENABLE_WHEEL_REPORT BIT(2)
+#define HIDPP_ENABLE_MOUSE_EXTRA_BTN_REPORT BIT(3)
+#define HIDPP_ENABLE_BAT_REPORT BIT(4)
+#define HIDPP_ENABLE_HWHEEL_REPORT BIT(5)
static int hidpp10_enable_battery_reporting(struct hidpp_device *hidpp_dev)
{
- return hidpp10_set_register_bit(hidpp_dev, HIDPP_REG_GENERAL, 0, 4);
+ return hidpp10_set_register(hidpp_dev, HIDPP_REG_ENABLE_REPORTS, 0,
+ HIDPP_ENABLE_BAT_REPORT, HIDPP_ENABLE_BAT_REPORT);
}
#define HIDPP_REG_FEATURES 0x01
+#define HIDPP_ENABLE_SPECIAL_BUTTON_FUNC BIT(1)
+#define HIDPP_ENABLE_FAST_SCROLL BIT(6)
/* On HID++ 1.0 devices, high-res scroll was called "scrolling acceleration". */
static int hidpp10_enable_scrolling_acceleration(struct hidpp_device *hidpp_dev)
{
- return hidpp10_set_register_bit(hidpp_dev, HIDPP_REG_FEATURES, 0, 6);
+ return hidpp10_set_register(hidpp_dev, HIDPP_REG_FEATURES, 0,
+ HIDPP_ENABLE_FAST_SCROLL, HIDPP_ENABLE_FAST_SCROLL);
}
#define HIDPP_REG_BATTERY_STATUS 0x07
@@ -741,6 +763,9 @@ static char *hidpp_unifying_get_name(struct hidpp_device *hidpp_dev)
if (2 + len > sizeof(response.rap.params))
return NULL;
+ if (len < 4) /* logitech devices are usually at least Xddd */
+ return NULL;
+
name = kzalloc(len + 1, GFP_KERNEL);
if (!name)
return NULL;
@@ -836,18 +861,21 @@ static int hidpp_root_get_feature(struct hidpp_device *hidpp, u16 feature,
static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp)
{
+ const u8 ping_byte = 0x5a;
+ u8 ping_data[3] = { 0, 0, ping_byte };
struct hidpp_report response;
int ret;
- ret = hidpp_send_fap_command_sync(hidpp,
+ ret = hidpp_send_rap_command_sync(hidpp,
+ REPORT_ID_HIDPP_SHORT,
HIDPP_PAGE_ROOT_IDX,
CMD_ROOT_GET_PROTOCOL_VERSION,
- NULL, 0, &response);
+ ping_data, sizeof(ping_data), &response);
if (ret == HIDPP_ERROR_INVALID_SUBID) {
hidpp->protocol_major = 1;
hidpp->protocol_minor = 0;
- return 0;
+ goto print_version;
}
/* the device might not be connected */
@@ -862,21 +890,19 @@ static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp)
if (ret)
return ret;
- hidpp->protocol_major = response.fap.params[0];
- hidpp->protocol_minor = response.fap.params[1];
-
- return ret;
-}
+ if (response.rap.params[2] != ping_byte) {
+ hid_err(hidpp->hid_dev, "%s: ping mismatch 0x%02x != 0x%02x\n",
+ __func__, response.rap.params[2], ping_byte);
+ return -EPROTO;
+ }
-static bool hidpp_is_connected(struct hidpp_device *hidpp)
-{
- int ret;
+ hidpp->protocol_major = response.rap.params[0];
+ hidpp->protocol_minor = response.rap.params[1];
- ret = hidpp_root_get_protocol_version(hidpp);
- if (!ret)
- hid_dbg(hidpp->hid_dev, "HID++ %u.%u device connected.\n",
- hidpp->protocol_major, hidpp->protocol_minor);
- return ret == 0;
+print_version:
+ hid_info(hidpp->hid_dev, "HID++ %u.%u device connected.\n",
+ hidpp->protocol_major, hidpp->protocol_minor);
+ return 0;
}
/* -------------------------------------------------------------------------- */
@@ -932,7 +958,7 @@ static int hidpp_devicenametype_get_device_name(struct hidpp_device *hidpp,
switch (response.report_id) {
case REPORT_ID_HIDPP_VERY_LONG:
- count = HIDPP_REPORT_VERY_LONG_LENGTH - 4;
+ count = hidpp->very_long_report_length - 4;
break;
case REPORT_ID_HIDPP_LONG:
count = HIDPP_REPORT_LONG_LENGTH - 4;
@@ -1012,7 +1038,11 @@ static int hidpp_map_battery_level(int capacity)
{
if (capacity < 11)
return POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
- else if (capacity < 31)
+ /*
+ * The spec says this should be < 31 but some devices report 30
+ * with brand new batteries and Windows reports 30 as "Good".
+ */
+ else if (capacity < 30)
return POWER_SUPPLY_CAPACITY_LEVEL_LOW;
else if (capacity < 81)
return POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
@@ -2111,6 +2141,13 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
kfree(data);
return -ENOMEM;
}
+ data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
+ if (!data->wq) {
+ kfree(data->effect_ids);
+ kfree(data);
+ return -ENOMEM;
+ }
+
data->hidpp = hidpp;
data->feature_index = feature_index;
data->version = version;
@@ -2155,7 +2192,6 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
/* ignore boost value at response.fap.params[2] */
/* init the hardware command queue */
- data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
atomic_set(&data->workqueue_size, 0);
/* initialize with zero autocenter to get wheel in usable state */
@@ -2205,7 +2241,6 @@ static int hidpp_ff_deinit(struct hid_device *hid)
#define WTP_MANUAL_RESOLUTION 39
struct wtp_data {
- struct input_dev *input;
u16 x_size, y_size;
u8 finger_count;
u8 mt_feature_index;
@@ -2223,7 +2258,7 @@ static int wtp_input_mapping(struct hid_device *hdev, struct hid_input *hi,
}
static void wtp_populate_input(struct hidpp_device *hidpp,
- struct input_dev *input_dev, bool origin_is_hid_core)
+ struct input_dev *input_dev)
{
struct wtp_data *wd = hidpp->private_data;
@@ -2249,31 +2284,30 @@ static void wtp_populate_input(struct hidpp_device *hidpp,
input_mt_init_slots(input_dev, wd->maxcontacts, INPUT_MT_POINTER |
INPUT_MT_DROP_UNUSED);
-
- wd->input = input_dev;
}
-static void wtp_touch_event(struct wtp_data *wd,
+static void wtp_touch_event(struct hidpp_device *hidpp,
struct hidpp_touchpad_raw_xy_finger *touch_report)
{
+ struct wtp_data *wd = hidpp->private_data;
int slot;
if (!touch_report->finger_id || touch_report->contact_type)
/* no actual data */
return;
- slot = input_mt_get_slot_by_key(wd->input, touch_report->finger_id);
+ slot = input_mt_get_slot_by_key(hidpp->input, touch_report->finger_id);
- input_mt_slot(wd->input, slot);
- input_mt_report_slot_state(wd->input, MT_TOOL_FINGER,
+ input_mt_slot(hidpp->input, slot);
+ input_mt_report_slot_state(hidpp->input, MT_TOOL_FINGER,
touch_report->contact_status);
if (touch_report->contact_status) {
- input_event(wd->input, EV_ABS, ABS_MT_POSITION_X,
+ input_event(hidpp->input, EV_ABS, ABS_MT_POSITION_X,
touch_report->x);
- input_event(wd->input, EV_ABS, ABS_MT_POSITION_Y,
+ input_event(hidpp->input, EV_ABS, ABS_MT_POSITION_Y,
wd->flip_y ? wd->y_size - touch_report->y :
touch_report->y);
- input_event(wd->input, EV_ABS, ABS_MT_PRESSURE,
+ input_event(hidpp->input, EV_ABS, ABS_MT_PRESSURE,
touch_report->area);
}
}
@@ -2281,19 +2315,18 @@ static void wtp_touch_event(struct wtp_data *wd,
static void wtp_send_raw_xy_event(struct hidpp_device *hidpp,
struct hidpp_touchpad_raw_xy *raw)
{
- struct wtp_data *wd = hidpp->private_data;
int i;
for (i = 0; i < 2; i++)
- wtp_touch_event(wd, &(raw->fingers[i]));
+ wtp_touch_event(hidpp, &(raw->fingers[i]));
if (raw->end_of_frame &&
!(hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS))
- input_event(wd->input, EV_KEY, BTN_LEFT, raw->button);
+ input_event(hidpp->input, EV_KEY, BTN_LEFT, raw->button);
if (raw->end_of_frame || raw->finger_count <= 2) {
- input_mt_sync_frame(wd->input);
- input_sync(wd->input);
+ input_mt_sync_frame(hidpp->input);
+ input_sync(hidpp->input);
}
}
@@ -2343,7 +2376,7 @@ static int wtp_raw_event(struct hid_device *hdev, u8 *data, int size)
struct hidpp_report *report = (struct hidpp_report *)data;
struct hidpp_touchpad_raw_xy raw;
- if (!wd || !wd->input)
+ if (!wd || !hidpp->input)
return 1;
switch (data[0]) {
@@ -2354,11 +2387,11 @@ static int wtp_raw_event(struct hid_device *hdev, u8 *data, int size)
return 1;
}
if (hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS) {
- input_event(wd->input, EV_KEY, BTN_LEFT,
+ input_event(hidpp->input, EV_KEY, BTN_LEFT,
!!(data[1] & 0x01));
- input_event(wd->input, EV_KEY, BTN_RIGHT,
+ input_event(hidpp->input, EV_KEY, BTN_RIGHT,
!!(data[1] & 0x02));
- input_sync(wd->input);
+ input_sync(hidpp->input);
return 0;
} else {
if (size < 21)
@@ -2476,10 +2509,6 @@ static int wtp_connect(struct hid_device *hdev, bool connected)
static const u8 m560_config_parameter[] = {0x00, 0xaf, 0x03};
-struct m560_private_data {
- struct input_dev *input;
-};
-
/* how buttons are mapped in the report */
#define M560_MOUSE_BTN_LEFT 0x01
#define M560_MOUSE_BTN_RIGHT 0x02
@@ -2507,28 +2536,12 @@ static int m560_send_config_command(struct hid_device *hdev, bool connected)
);
}
-static int m560_allocate(struct hid_device *hdev)
-{
- struct hidpp_device *hidpp = hid_get_drvdata(hdev);
- struct m560_private_data *d;
-
- d = devm_kzalloc(&hdev->dev, sizeof(struct m560_private_data),
- GFP_KERNEL);
- if (!d)
- return -ENOMEM;
-
- hidpp->private_data = d;
-
- return 0;
-};
-
static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
{
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
- struct m560_private_data *mydata = hidpp->private_data;
/* sanity check */
- if (!mydata || !mydata->input) {
+ if (!hidpp->input) {
hid_err(hdev, "error in parameter\n");
return -EINVAL;
}
@@ -2555,24 +2568,24 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
switch (data[5]) {
case 0xaf:
- input_report_key(mydata->input, BTN_MIDDLE, 1);
+ input_report_key(hidpp->input, BTN_MIDDLE, 1);
break;
case 0xb0:
- input_report_key(mydata->input, BTN_FORWARD, 1);
+ input_report_key(hidpp->input, BTN_FORWARD, 1);
break;
case 0xae:
- input_report_key(mydata->input, BTN_BACK, 1);
+ input_report_key(hidpp->input, BTN_BACK, 1);
break;
case 0x00:
- input_report_key(mydata->input, BTN_BACK, 0);
- input_report_key(mydata->input, BTN_FORWARD, 0);
- input_report_key(mydata->input, BTN_MIDDLE, 0);
+ input_report_key(hidpp->input, BTN_BACK, 0);
+ input_report_key(hidpp->input, BTN_FORWARD, 0);
+ input_report_key(hidpp->input, BTN_MIDDLE, 0);
break;
default:
hid_err(hdev, "error in report\n");
return 0;
}
- input_sync(mydata->input);
+ input_sync(hidpp->input);
} else if (data[0] == 0x02) {
/*
@@ -2586,58 +2599,55 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
int v;
- input_report_key(mydata->input, BTN_LEFT,
+ input_report_key(hidpp->input, BTN_LEFT,
!!(data[1] & M560_MOUSE_BTN_LEFT));
- input_report_key(mydata->input, BTN_RIGHT,
+ input_report_key(hidpp->input, BTN_RIGHT,
!!(data[1] & M560_MOUSE_BTN_RIGHT));
if (data[1] & M560_MOUSE_BTN_WHEEL_LEFT) {
- input_report_rel(mydata->input, REL_HWHEEL, -1);
- input_report_rel(mydata->input, REL_HWHEEL_HI_RES,
+ input_report_rel(hidpp->input, REL_HWHEEL, -1);
+ input_report_rel(hidpp->input, REL_HWHEEL_HI_RES,
-120);
} else if (data[1] & M560_MOUSE_BTN_WHEEL_RIGHT) {
- input_report_rel(mydata->input, REL_HWHEEL, 1);
- input_report_rel(mydata->input, REL_HWHEEL_HI_RES,
+ input_report_rel(hidpp->input, REL_HWHEEL, 1);
+ input_report_rel(hidpp->input, REL_HWHEEL_HI_RES,
120);
}
v = hid_snto32(hid_field_extract(hdev, data+3, 0, 12), 12);
- input_report_rel(mydata->input, REL_X, v);
+ input_report_rel(hidpp->input, REL_X, v);
v = hid_snto32(hid_field_extract(hdev, data+3, 12, 12), 12);
- input_report_rel(mydata->input, REL_Y, v);
+ input_report_rel(hidpp->input, REL_Y, v);
v = hid_snto32(data[6], 8);
- hidpp_scroll_counter_handle_scroll(
- &hidpp->vertical_wheel_counter, v);
+ if (v != 0)
+ hidpp_scroll_counter_handle_scroll(hidpp->input,
+ &hidpp->vertical_wheel_counter, v);
- input_sync(mydata->input);
+ input_sync(hidpp->input);
}
return 1;
}
static void m560_populate_input(struct hidpp_device *hidpp,
- struct input_dev *input_dev, bool origin_is_hid_core)
+ struct input_dev *input_dev)
{
- struct m560_private_data *mydata = hidpp->private_data;
-
- mydata->input = input_dev;
-
- __set_bit(EV_KEY, mydata->input->evbit);
- __set_bit(BTN_MIDDLE, mydata->input->keybit);
- __set_bit(BTN_RIGHT, mydata->input->keybit);
- __set_bit(BTN_LEFT, mydata->input->keybit);
- __set_bit(BTN_BACK, mydata->input->keybit);
- __set_bit(BTN_FORWARD, mydata->input->keybit);
+ __set_bit(EV_KEY, input_dev->evbit);
+ __set_bit(BTN_MIDDLE, input_dev->keybit);
+ __set_bit(BTN_RIGHT, input_dev->keybit);
+ __set_bit(BTN_LEFT, input_dev->keybit);
+ __set_bit(BTN_BACK, input_dev->keybit);
+ __set_bit(BTN_FORWARD, input_dev->keybit);
- __set_bit(EV_REL, mydata->input->evbit);
- __set_bit(REL_X, mydata->input->relbit);
- __set_bit(REL_Y, mydata->input->relbit);
- __set_bit(REL_WHEEL, mydata->input->relbit);
- __set_bit(REL_HWHEEL, mydata->input->relbit);
- __set_bit(REL_WHEEL_HI_RES, mydata->input->relbit);
- __set_bit(REL_HWHEEL_HI_RES, mydata->input->relbit);
+ __set_bit(EV_REL, input_dev->evbit);
+ __set_bit(REL_X, input_dev->relbit);
+ __set_bit(REL_Y, input_dev->relbit);
+ __set_bit(REL_WHEEL, input_dev->relbit);
+ __set_bit(REL_HWHEEL, input_dev->relbit);
+ __set_bit(REL_WHEEL_HI_RES, input_dev->relbit);
+ __set_bit(REL_HWHEEL_HI_RES, input_dev->relbit);
}
static int m560_input_mapping(struct hid_device *hdev, struct hid_input *hi,
@@ -2740,6 +2750,175 @@ static int g920_get_config(struct hidpp_device *hidpp)
}
/* -------------------------------------------------------------------------- */
+/* HID++1.0 devices which use HID++ reports for their wheels */
+/* -------------------------------------------------------------------------- */
+static int hidpp10_wheel_connect(struct hidpp_device *hidpp)
+{
+ return hidpp10_set_register(hidpp, HIDPP_REG_ENABLE_REPORTS, 0,
+ HIDPP_ENABLE_WHEEL_REPORT | HIDPP_ENABLE_HWHEEL_REPORT,
+ HIDPP_ENABLE_WHEEL_REPORT | HIDPP_ENABLE_HWHEEL_REPORT);
+}
+
+static int hidpp10_wheel_raw_event(struct hidpp_device *hidpp,
+ u8 *data, int size)
+{
+ s8 value, hvalue;
+
+ if (!hidpp->input)
+ return -EINVAL;
+
+ if (size < 7)
+ return 0;
+
+ if (data[0] != REPORT_ID_HIDPP_SHORT || data[2] != HIDPP_SUB_ID_ROLLER)
+ return 0;
+
+ value = data[3];
+ hvalue = data[4];
+
+ input_report_rel(hidpp->input, REL_WHEEL, value);
+ input_report_rel(hidpp->input, REL_WHEEL_HI_RES, value * 120);
+ input_report_rel(hidpp->input, REL_HWHEEL, hvalue);
+ input_report_rel(hidpp->input, REL_HWHEEL_HI_RES, hvalue * 120);
+ input_sync(hidpp->input);
+
+ return 1;
+}
+
+static void hidpp10_wheel_populate_input(struct hidpp_device *hidpp,
+ struct input_dev *input_dev)
+{
+ __set_bit(EV_REL, input_dev->evbit);
+ __set_bit(REL_WHEEL, input_dev->relbit);
+ __set_bit(REL_WHEEL_HI_RES, input_dev->relbit);
+ __set_bit(REL_HWHEEL, input_dev->relbit);
+ __set_bit(REL_HWHEEL_HI_RES, input_dev->relbit);
+}
+
+/* -------------------------------------------------------------------------- */
+/* HID++1.0 mice which use HID++ reports for extra mouse buttons */
+/* -------------------------------------------------------------------------- */
+static int hidpp10_extra_mouse_buttons_connect(struct hidpp_device *hidpp)
+{
+ return hidpp10_set_register(hidpp, HIDPP_REG_ENABLE_REPORTS, 0,
+ HIDPP_ENABLE_MOUSE_EXTRA_BTN_REPORT,
+ HIDPP_ENABLE_MOUSE_EXTRA_BTN_REPORT);
+}
+
+static int hidpp10_extra_mouse_buttons_raw_event(struct hidpp_device *hidpp,
+ u8 *data, int size)
+{
+ int i;
+
+ if (!hidpp->input)
+ return -EINVAL;
+
+ if (size < 7)
+ return 0;
+
+ if (data[0] != REPORT_ID_HIDPP_SHORT ||
+ data[2] != HIDPP_SUB_ID_MOUSE_EXTRA_BTNS)
+ return 0;
+
+ /*
+ * Buttons are either delivered through the regular mouse report *or*
+ * through the extra buttons report. At least for button 6 how it is
+ * delivered differs per receiver firmware version. Even receivers with
+ * the same usb-id show different behavior, so we handle both cases.
+ */
+ for (i = 0; i < 8; i++)
+ input_report_key(hidpp->input, BTN_MOUSE + i,
+ (data[3] & (1 << i)));
+
+ /* Some mice report events on button 9+, use BTN_MISC */
+ for (i = 0; i < 8; i++)
+ input_report_key(hidpp->input, BTN_MISC + i,
+ (data[4] & (1 << i)));
+
+ input_sync(hidpp->input);
+ return 1;
+}
+
+static void hidpp10_extra_mouse_buttons_populate_input(
+ struct hidpp_device *hidpp, struct input_dev *input_dev)
+{
+ /* BTN_MOUSE - BTN_MOUSE+7 are set already by the descriptor */
+ __set_bit(BTN_0, input_dev->keybit);
+ __set_bit(BTN_1, input_dev->keybit);
+ __set_bit(BTN_2, input_dev->keybit);
+ __set_bit(BTN_3, input_dev->keybit);
+ __set_bit(BTN_4, input_dev->keybit);
+ __set_bit(BTN_5, input_dev->keybit);
+ __set_bit(BTN_6, input_dev->keybit);
+ __set_bit(BTN_7, input_dev->keybit);
+}
+
+/* -------------------------------------------------------------------------- */
+/* HID++1.0 kbds which only report 0x10xx consumer usages through sub-id 0x03 */
+/* -------------------------------------------------------------------------- */
+
+/* Find the consumer-page input report desc and change Maximums to 0x107f */
+static u8 *hidpp10_consumer_keys_report_fixup(struct hidpp_device *hidpp,
+ u8 *_rdesc, unsigned int *rsize)
+{
+ /* Note 0 terminated so we can use strnstr to search for this. */
+ const char consumer_rdesc_start[] = {
+ 0x05, 0x0C, /* USAGE_PAGE (Consumer Devices) */
+ 0x09, 0x01, /* USAGE (Consumer Control) */
+ 0xA1, 0x01, /* COLLECTION (Application) */
+ 0x85, 0x03, /* REPORT_ID = 3 */
+ 0x75, 0x10, /* REPORT_SIZE (16) */
+ 0x95, 0x02, /* REPORT_COUNT (2) */
+ 0x15, 0x01, /* LOGICAL_MIN (1) */
+ 0x26, 0x00 /* LOGICAL_MAX (... */
+ };
+ char *consumer_rdesc, *rdesc = (char *)_rdesc;
+ unsigned int size;
+
+ consumer_rdesc = strnstr(rdesc, consumer_rdesc_start, *rsize);
+ size = *rsize - (consumer_rdesc - rdesc);
+ if (consumer_rdesc && size >= 25) {
+ consumer_rdesc[15] = 0x7f;
+ consumer_rdesc[16] = 0x10;
+ consumer_rdesc[20] = 0x7f;
+ consumer_rdesc[21] = 0x10;
+ }
+ return _rdesc;
+}
+
+static int hidpp10_consumer_keys_connect(struct hidpp_device *hidpp)
+{
+ return hidpp10_set_register(hidpp, HIDPP_REG_ENABLE_REPORTS, 0,
+ HIDPP_ENABLE_CONSUMER_REPORT,
+ HIDPP_ENABLE_CONSUMER_REPORT);
+}
+
+static int hidpp10_consumer_keys_raw_event(struct hidpp_device *hidpp,
+ u8 *data, int size)
+{
+ u8 consumer_report[5];
+
+ if (size < 7)
+ return 0;
+
+ if (data[0] != REPORT_ID_HIDPP_SHORT ||
+ data[2] != HIDPP_SUB_ID_CONSUMER_VENDOR_KEYS)
+ return 0;
+
+ /*
+ * Build a normal consumer report (3) out of the data, this detour
+ * is necessary to get some keyboards to report their 0x10xx usages.
+ */
+ consumer_report[0] = 0x03;
+ memcpy(&consumer_report[1], &data[3], 4);
+ /* We are called from atomic context */
+ hid_report_raw_event(hidpp->hid_dev, HID_INPUT_REPORT,
+ consumer_report, 5, 1);
+
+ return 1;
+}
+
+/* -------------------------------------------------------------------------- */
/* High-resolution scroll wheels */
/* -------------------------------------------------------------------------- */
@@ -2774,12 +2953,31 @@ static int hi_res_scroll_enable(struct hidpp_device *hidpp)
/* Generic HID++ devices */
/* -------------------------------------------------------------------------- */
+static u8 *hidpp_report_fixup(struct hid_device *hdev, u8 *rdesc,
+ unsigned int *rsize)
+{
+ struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+
+ if (!hidpp)
+ return rdesc;
+
+ /* For 27 MHz keyboards the quirk gets set after hid_parse. */
+ if (hdev->group == HID_GROUP_LOGITECH_27MHZ_DEVICE ||
+ (hidpp->quirks & HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS))
+ rdesc = hidpp10_consumer_keys_report_fixup(hidpp, rdesc, rsize);
+
+ return rdesc;
+}
+
static int hidpp_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+ if (!hidpp)
+ return 0;
+
if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)
return wtp_input_mapping(hdev, hi, field, usage, bit, max);
else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560 &&
@@ -2795,6 +2993,9 @@ static int hidpp_input_mapped(struct hid_device *hdev, struct hid_input *hi,
{
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+ if (!hidpp)
+ return 0;
+
/* Ensure that Logitech G920 is not given a default fuzz/flat value */
if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
if (usage->type == EV_ABS && (usage->code == ABS_X ||
@@ -2809,15 +3010,20 @@ static int hidpp_input_mapped(struct hid_device *hdev, struct hid_input *hi,
static void hidpp_populate_input(struct hidpp_device *hidpp,
- struct input_dev *input, bool origin_is_hid_core)
+ struct input_dev *input)
{
+ hidpp->input = input;
+
if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)
- wtp_populate_input(hidpp, input, origin_is_hid_core);
+ wtp_populate_input(hidpp, input);
else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560)
- m560_populate_input(hidpp, input, origin_is_hid_core);
+ m560_populate_input(hidpp, input);
- if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL)
- hidpp->vertical_wheel_counter.dev = input;
+ if (hidpp->quirks & HIDPP_QUIRK_HIDPP_WHEELS)
+ hidpp10_wheel_populate_input(hidpp, input);
+
+ if (hidpp->quirks & HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS)
+ hidpp10_extra_mouse_buttons_populate_input(hidpp, input);
}
static int hidpp_input_configured(struct hid_device *hdev,
@@ -2826,7 +3032,10 @@ static int hidpp_input_configured(struct hid_device *hdev,
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
struct input_dev *input = hidinput->input;
- hidpp_populate_input(hidpp, input, true);
+ if (!hidpp)
+ return 0;
+
+ hidpp_populate_input(hidpp, input);
return 0;
}
@@ -2886,6 +3095,24 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data,
return ret;
}
+ if (hidpp->quirks & HIDPP_QUIRK_HIDPP_WHEELS) {
+ ret = hidpp10_wheel_raw_event(hidpp, data, size);
+ if (ret != 0)
+ return ret;
+ }
+
+ if (hidpp->quirks & HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS) {
+ ret = hidpp10_extra_mouse_buttons_raw_event(hidpp, data, size);
+ if (ret != 0)
+ return ret;
+ }
+
+ if (hidpp->quirks & HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS) {
+ ret = hidpp10_consumer_keys_raw_event(hidpp, data, size);
+ if (ret != 0)
+ return ret;
+ }
+
return 0;
}
@@ -2895,10 +3122,13 @@ static int hidpp_raw_event(struct hid_device *hdev, struct hid_report *report,
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
int ret = 0;
+ if (!hidpp)
+ return 0;
+
/* Generic HID++ processing. */
switch (data[0]) {
case REPORT_ID_HIDPP_VERY_LONG:
- if (size != HIDPP_REPORT_VERY_LONG_LENGTH) {
+ if (size != hidpp->very_long_report_length) {
hid_err(hdev, "received hid++ report of bad size (%d)",
size);
return 1;
@@ -2943,17 +3173,22 @@ static int hidpp_event(struct hid_device *hdev, struct hid_field *field,
* restriction imposed in hidpp_usages.
*/
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
- struct hidpp_scroll_counter *counter = &hidpp->vertical_wheel_counter;
+ struct hidpp_scroll_counter *counter;
+
+ if (!hidpp)
+ return 0;
+
+ counter = &hidpp->vertical_wheel_counter;
/* A scroll event may occur before the multiplier has been retrieved or
* the input device set, or high-res scroll enabling may fail. In such
* cases we must return early (falling back to default behaviour) to
* avoid a crash in hidpp_scroll_counter_handle_scroll.
*/
if (!(hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL) || value == 0
- || counter->dev == NULL || counter->wheel_multiplier == 0)
+ || hidpp->input == NULL || counter->wheel_multiplier == 0)
return 0;
- hidpp_scroll_counter_handle_scroll(counter, value);
+ hidpp_scroll_counter_handle_scroll(hidpp->input, counter, value);
return 1;
}
@@ -3125,32 +3360,45 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
return;
}
+ if (hidpp->quirks & HIDPP_QUIRK_HIDPP_WHEELS) {
+ ret = hidpp10_wheel_connect(hidpp);
+ if (ret)
+ return;
+ }
+
+ if (hidpp->quirks & HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS) {
+ ret = hidpp10_extra_mouse_buttons_connect(hidpp);
+ if (ret)
+ return;
+ }
+
+ if (hidpp->quirks & HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS) {
+ ret = hidpp10_consumer_keys_connect(hidpp);
+ if (ret)
+ return;
+ }
+
/* the device is already connected, we can ask for its name and
* protocol */
if (!hidpp->protocol_major) {
- ret = !hidpp_is_connected(hidpp);
+ ret = hidpp_root_get_protocol_version(hidpp);
if (ret) {
hid_err(hdev, "Can not get the protocol version.\n");
return;
}
- hid_info(hdev, "HID++ %u.%u device connected.\n",
- hidpp->protocol_major, hidpp->protocol_minor);
}
if (hidpp->name == hdev->name && hidpp->protocol_major >= 2) {
name = hidpp_get_device_name(hidpp);
- if (!name) {
- hid_err(hdev,
- "unable to retrieve the name of the device");
- return;
- }
-
- devm_name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s", name);
- kfree(name);
- if (!devm_name)
- return;
+ if (name) {
+ devm_name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
+ "%s", name);
+ kfree(name);
+ if (!devm_name)
+ return;
- hidpp->name = devm_name;
+ hidpp->name = devm_name;
+ }
}
hidpp_initialize_battery(hidpp);
@@ -3181,7 +3429,7 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
return;
}
- hidpp_populate_input(hidpp, input, false);
+ hidpp_populate_input(hidpp, input);
ret = input_register_device(input);
if (ret)
@@ -3201,6 +3449,60 @@ static const struct attribute_group ps_attribute_group = {
.attrs = sysfs_attrs
};
+static int hidpp_get_report_length(struct hid_device *hdev, int id)
+{
+ struct hid_report_enum *re;
+ struct hid_report *report;
+
+ re = &(hdev->report_enum[HID_OUTPUT_REPORT]);
+ report = re->report_id_hash[id];
+ if (!report)
+ return 0;
+
+ return report->field[0]->report_count + 1;
+}
+
+static bool hidpp_validate_report(struct hid_device *hdev, int id,
+ int expected_length, bool optional)
+{
+ int report_length;
+
+ if (id >= HID_MAX_IDS || id < 0) {
+ hid_err(hdev, "invalid HID report id %u\n", id);
+ return false;
+ }
+
+ report_length = hidpp_get_report_length(hdev, id);
+ if (!report_length)
+ return optional;
+
+ if (report_length < expected_length) {
+ hid_warn(hdev, "not enough values in hidpp report %d\n", id);
+ return false;
+ }
+
+ return true;
+}
+
+static bool hidpp_validate_device(struct hid_device *hdev)
+{
+ return hidpp_validate_report(hdev, REPORT_ID_HIDPP_SHORT,
+ HIDPP_REPORT_SHORT_LENGTH, false) &&
+ hidpp_validate_report(hdev, REPORT_ID_HIDPP_LONG,
+ HIDPP_REPORT_LONG_LENGTH, true);
+}
+
+static bool hidpp_application_equals(struct hid_device *hdev,
+ unsigned int application)
+{
+ struct list_head *report_list;
+ struct hid_report *report;
+
+ report_list = &hdev->report_enum[HID_INPUT_REPORT].report_list;
+ report = list_first_entry_or_null(report_list, struct hid_report, list);
+ return report && report->application == application;
+}
+
static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
struct hidpp_device *hidpp;
@@ -3208,20 +3510,48 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
bool connected;
unsigned int connect_mask = HID_CONNECT_DEFAULT;
- hidpp = devm_kzalloc(&hdev->dev, sizeof(struct hidpp_device),
- GFP_KERNEL);
+ /* report_fixup needs drvdata to be set before we call hid_parse */
+ hidpp = devm_kzalloc(&hdev->dev, sizeof(*hidpp), GFP_KERNEL);
if (!hidpp)
return -ENOMEM;
hidpp->hid_dev = hdev;
hidpp->name = hdev->name;
+ hidpp->quirks = id->driver_data;
hid_set_drvdata(hdev, hidpp);
- hidpp->quirks = id->driver_data;
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "%s:parse failed\n", __func__);
+ return ret;
+ }
+
+ /*
+ * Make sure the device is HID++ capable, otherwise treat as generic HID
+ */
+ if (!hidpp_validate_device(hdev)) {
+ hid_set_drvdata(hdev, NULL);
+ devm_kfree(&hdev->dev, hidpp);
+ return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ }
+
+ hidpp->very_long_report_length =
+ hidpp_get_report_length(hdev, REPORT_ID_HIDPP_VERY_LONG);
+ if (hidpp->very_long_report_length > HIDPP_REPORT_VERY_LONG_MAX_LENGTH)
+ hidpp->very_long_report_length = HIDPP_REPORT_VERY_LONG_MAX_LENGTH;
if (id->group == HID_GROUP_LOGITECH_DJ_DEVICE)
hidpp->quirks |= HIDPP_QUIRK_UNIFYING;
+ if (id->group == HID_GROUP_LOGITECH_27MHZ_DEVICE &&
+ hidpp_application_equals(hdev, HID_GD_MOUSE))
+ hidpp->quirks |= HIDPP_QUIRK_HIDPP_WHEELS |
+ HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS;
+
+ if (id->group == HID_GROUP_LOGITECH_27MHZ_DEVICE &&
+ hidpp_application_equals(hdev, HID_GD_KEYBOARD))
+ hidpp->quirks |= HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS;
+
if (disable_raw_mode) {
hidpp->quirks &= ~HIDPP_QUIRK_CLASS_WTP;
hidpp->quirks &= ~HIDPP_QUIRK_NO_HIDINPUT;
@@ -3230,15 +3560,11 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP) {
ret = wtp_allocate(hdev, id);
if (ret)
- goto allocate_fail;
- } else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560) {
- ret = m560_allocate(hdev);
- if (ret)
- goto allocate_fail;
+ return ret;
} else if (hidpp->quirks & HIDPP_QUIRK_CLASS_K400) {
ret = k400_allocate(hdev);
if (ret)
- goto allocate_fail;
+ return ret;
}
INIT_WORK(&hidpp->work, delayed_work_cb);
@@ -3251,93 +3577,79 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
hid_warn(hdev, "Cannot allocate sysfs group for %s\n",
hdev->name);
- ret = hid_parse(hdev);
+ /*
+ * Plain USB connections need to actually call start and open
+ * on the transport driver to allow incoming data.
+ */
+ ret = hid_hw_start(hdev, 0);
if (ret) {
- hid_err(hdev, "%s:parse failed\n", __func__);
- goto hid_parse_fail;
+ hid_err(hdev, "hw start failed\n");
+ goto hid_hw_start_fail;
}
- if (hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT)
- connect_mask &= ~HID_CONNECT_HIDINPUT;
-
- if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
- ret = hid_hw_start(hdev, connect_mask);
- if (ret) {
- hid_err(hdev, "hw start failed\n");
- goto hid_hw_start_fail;
- }
- ret = hid_hw_open(hdev);
- if (ret < 0) {
- dev_err(&hdev->dev, "%s:hid_hw_open returned error:%d\n",
- __func__, ret);
- hid_hw_stop(hdev);
- goto hid_hw_start_fail;
- }
+ ret = hid_hw_open(hdev);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "%s:hid_hw_open returned error:%d\n",
+ __func__, ret);
+ hid_hw_stop(hdev);
+ goto hid_hw_open_fail;
}
-
/* Allow incoming packets */
hid_device_io_start(hdev);
if (hidpp->quirks & HIDPP_QUIRK_UNIFYING)
hidpp_unifying_init(hidpp);
- connected = hidpp_is_connected(hidpp);
+ connected = hidpp_root_get_protocol_version(hidpp) == 0;
atomic_set(&hidpp->connected, connected);
if (!(hidpp->quirks & HIDPP_QUIRK_UNIFYING)) {
if (!connected) {
ret = -ENODEV;
hid_err(hdev, "Device not connected");
- goto hid_hw_open_failed;
+ goto hid_hw_init_fail;
}
- hid_info(hdev, "HID++ %u.%u device connected.\n",
- hidpp->protocol_major, hidpp->protocol_minor);
-
hidpp_overwrite_name(hdev);
}
if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)) {
ret = wtp_get_config(hidpp);
if (ret)
- goto hid_hw_open_failed;
+ goto hid_hw_init_fail;
} else if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_G920)) {
ret = g920_get_config(hidpp);
if (ret)
- goto hid_hw_open_failed;
+ goto hid_hw_init_fail;
}
- /* Block incoming packets */
- hid_device_io_stop(hdev);
+ hidpp_connect_event(hidpp);
- if (!(hidpp->quirks & HIDPP_QUIRK_CLASS_G920)) {
- ret = hid_hw_start(hdev, connect_mask);
- if (ret) {
- hid_err(hdev, "%s:hid_hw_start returned error\n", __func__);
- goto hid_hw_start_fail;
- }
- }
+ /* Reset the HID node state */
+ hid_device_io_stop(hdev);
+ hid_hw_close(hdev);
+ hid_hw_stop(hdev);
- /* Allow incoming packets */
- hid_device_io_start(hdev);
+ if (hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT)
+ connect_mask &= ~HID_CONNECT_HIDINPUT;
- hidpp_connect_event(hidpp);
+ /* Now export the actual inputs and hidraw nodes to the world */
+ ret = hid_hw_start(hdev, connect_mask);
+ if (ret) {
+ hid_err(hdev, "%s:hid_hw_start returned error\n", __func__);
+ goto hid_hw_start_fail;
+ }
return ret;
-hid_hw_open_failed:
- hid_device_io_stop(hdev);
- if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
- hid_hw_close(hdev);
- hid_hw_stop(hdev);
- }
+hid_hw_init_fail:
+ hid_hw_close(hdev);
+hid_hw_open_fail:
+ hid_hw_stop(hdev);
hid_hw_start_fail:
-hid_parse_fail:
sysfs_remove_group(&hdev->dev.kobj, &ps_attribute_group);
cancel_work_sync(&hidpp->work);
mutex_destroy(&hidpp->send_mutex);
-allocate_fail:
- hid_set_drvdata(hdev, NULL);
return ret;
}
@@ -3345,12 +3657,14 @@ static void hidpp_remove(struct hid_device *hdev)
{
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+ if (!hidpp)
+ return hid_hw_stop(hdev);
+
sysfs_remove_group(&hdev->dev.kobj, &ps_attribute_group);
- if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
+ if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920)
hidpp_ff_deinit(hdev);
- hid_hw_close(hdev);
- }
+
hid_hw_stop(hdev);
cancel_work_sync(&hidpp->work);
mutex_destroy(&hidpp->send_mutex);
@@ -3360,6 +3674,10 @@ static void hidpp_remove(struct hid_device *hdev)
HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, \
USB_VENDOR_ID_LOGITECH, (product))
+#define L27MHZ_DEVICE(product) \
+ HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_27MHZ_DEVICE, \
+ USB_VENDOR_ID_LOGITECH, (product))
+
static const struct hid_device_id hidpp_devices[] = {
{ /* wireless touchpad */
LDJ_DEVICE(0x4011),
@@ -3411,11 +3729,37 @@ static const struct hid_device_id hidpp_devices[] = {
{ /* Solar Keyboard Logitech K750 */
LDJ_DEVICE(0x4002),
.driver_data = HIDPP_QUIRK_CLASS_K750 },
+ { /* Keyboard MX5000 (Bluetooth-receiver in HID proxy mode) */
+ LDJ_DEVICE(0xb305),
+ .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
{ LDJ_DEVICE(HID_ANY_ID) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL),
+ { /* Keyboard LX501 (Y-RR53) */
+ L27MHZ_DEVICE(0x0049),
+ .driver_data = HIDPP_QUIRK_KBD_ZOOM_WHEEL },
+ { /* Keyboard MX3000 (Y-RAM74) */
+ L27MHZ_DEVICE(0x0057),
+ .driver_data = HIDPP_QUIRK_KBD_SCROLL_WHEEL },
+ { /* Keyboard MX3200 (Y-RAV80) */
+ L27MHZ_DEVICE(0x005c),
+ .driver_data = HIDPP_QUIRK_KBD_ZOOM_WHEEL },
+
+ { L27MHZ_DEVICE(HID_ANY_ID) },
+
+ { /* Logitech G403 Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC082) },
+ { /* Logitech G700 Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC06B) },
+ { /* Logitech G900 Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC081) },
+ { /* Logitech G920 Wheel over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL),
.driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS},
+
+ { /* MX5000 keyboard over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb305),
+ .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
{}
};
@@ -3429,6 +3773,7 @@ static const struct hid_usage_id hidpp_usages[] = {
static struct hid_driver hidpp_driver = {
.name = "logitech-hidpp-device",
.id_table = hidpp_devices,
+ .report_fixup = hidpp_report_fixup,
.probe = hidpp_probe,
.remove = hidpp_remove,
.raw_event = hidpp_raw_event,
diff --git a/drivers/hid/hid-macally.c b/drivers/hid/hid-macally.c
new file mode 100644
index 000000000000..9a4fc7dffb14
--- /dev/null
+++ b/drivers/hid/hid-macally.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * HID driver for quirky Macally devices
+ *
+ * Copyright (c) 2019 Alex Henrie <alexhenrie24@gmail.com>
+ */
+
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+MODULE_AUTHOR("Alex Henrie <alexhenrie24@gmail.com>");
+MODULE_DESCRIPTION("Macally devices");
+MODULE_LICENSE("GPL");
+
+/*
+ * The Macally ikey keyboard says that its logical and usage maximums are both
+ * 101, but the power key is 102 and the equals key is 103
+ */
+static __u8 *macally_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ if (*rsize >= 60 && rdesc[53] == 0x65 && rdesc[59] == 0x65) {
+ hid_info(hdev,
+ "fixing up Macally ikey keyboard report descriptor\n");
+ rdesc[53] = rdesc[59] = 0x67;
+ }
+ return rdesc;
+}
+
+static struct hid_device_id macally_id_table[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_SOLID_YEAR,
+ USB_DEVICE_ID_MACALLY_IKEY_KEYBOARD) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, macally_id_table);
+
+static struct hid_driver macally_driver = {
+ .name = "macally",
+ .id_table = macally_id_table,
+ .report_fixup = macally_report_fixup,
+};
+
+module_hid_driver(macally_driver);
diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c
index c1b29a9eb41a..482c24f0e078 100644
--- a/drivers/hid/hid-picolcd_core.c
+++ b/drivers/hid/hid-picolcd_core.c
@@ -28,6 +28,7 @@
#include <linux/completion.h>
#include <linux/uaccess.h>
#include <linux/module.h>
+#include <linux/string.h>
#include "hid-picolcd.h"
@@ -275,27 +276,20 @@ static ssize_t picolcd_operation_mode_store(struct device *dev,
{
struct picolcd_data *data = dev_get_drvdata(dev);
struct hid_report *report = NULL;
- size_t cnt = count;
int timeout = data->opmode_delay;
unsigned long flags;
- if (cnt >= 3 && strncmp("lcd", buf, 3) == 0) {
+ if (sysfs_streq(buf, "lcd")) {
if (data->status & PICOLCD_BOOTLOADER)
report = picolcd_out_report(REPORT_EXIT_FLASHER, data->hdev);
- buf += 3;
- cnt -= 3;
- } else if (cnt >= 10 && strncmp("bootloader", buf, 10) == 0) {
+ } else if (sysfs_streq(buf, "bootloader")) {
if (!(data->status & PICOLCD_BOOTLOADER))
report = picolcd_out_report(REPORT_EXIT_KEYBOARD, data->hdev);
- buf += 10;
- cnt -= 10;
- }
- if (!report || report->maxfield != 1)
+ } else {
return -EINVAL;
+ }
- while (cnt > 0 && (buf[cnt-1] == '\n' || buf[cnt-1] == '\r'))
- cnt--;
- if (cnt != 0)
+ if (!report || report->maxfield != 1)
return -EINVAL;
spin_lock_irqsave(&data->lock, flags);
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 953908f2267c..fea7f7ff5ab1 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -432,7 +432,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
#if IS_ENABLED(CONFIG_HID_LOGITECH)
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) },
@@ -464,13 +463,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
#endif
#if IS_ENABLED(CONFIG_HID_LOGITECH_HIDPP)
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL) },
#endif
-#if IS_ENABLED(CONFIG_HID_LOGITECH_DJ)
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) },
-#endif
#if IS_ENABLED(CONFIG_HID_MAGICMOUSE)
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) },
@@ -715,7 +709,6 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
@@ -855,7 +848,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ }
};
-/**
+/*
* hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer
*
* There are composite devices for which we want to ignore only a certain
@@ -996,6 +989,10 @@ bool hid_ignore(struct hid_device *hdev)
if (hdev->product == 0x0401 &&
strncmp(hdev->name, "ELAN0800", 8) != 0)
return true;
+ /* Same with product id 0x0400 */
+ if (hdev->product == 0x0400 &&
+ strncmp(hdev->name, "QTEC0001", 8) != 0)
+ return true;
break;
}
@@ -1042,7 +1039,7 @@ static struct hid_device_id *hid_exists_dquirk(const struct hid_device *hdev)
}
if (bl_entry != NULL)
- dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%hx:0x%hx\n",
+ dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%04x:0x%04x\n",
bl_entry->driver_data, bl_entry->vendor,
bl_entry->product);
@@ -1209,7 +1206,7 @@ static unsigned long hid_gets_squirk(const struct hid_device *hdev)
quirks |= bl_entry->driver_data;
if (quirks)
- dbg_hid("Found squirk 0x%lx for HID device 0x%hx:0x%hx\n",
+ dbg_hid("Found squirk 0x%lx for HID device 0x%04x:0x%04x\n",
quirks, hdev->vendor, hdev->product);
return quirks;
}
diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
index bb012bc032e0..462e653a7bbb 100644
--- a/drivers/hid/hid-sensor-custom.c
+++ b/drivers/hid/hid-sensor-custom.c
@@ -157,8 +157,7 @@ static int usage_id_cmp(const void *p1, const void *p2)
static ssize_t enable_sensor_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct hid_sensor_custom *sensor_inst = platform_get_drvdata(pdev);
+ struct hid_sensor_custom *sensor_inst = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", sensor_inst->enable);
}
@@ -237,8 +236,7 @@ static ssize_t enable_sensor_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct hid_sensor_custom *sensor_inst = platform_get_drvdata(pdev);
+ struct hid_sensor_custom *sensor_inst = dev_get_drvdata(dev);
int value;
int ret = -EINVAL;
@@ -283,8 +281,7 @@ static const struct attribute_group enable_sensor_attr_group = {
static ssize_t show_value(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct hid_sensor_custom *sensor_inst = platform_get_drvdata(pdev);
+ struct hid_sensor_custom *sensor_inst = dev_get_drvdata(dev);
struct hid_sensor_hub_attribute_info *attribute;
int index, usage, field_index;
char name[HID_CUSTOM_NAME_LENGTH];
@@ -392,8 +389,7 @@ static ssize_t show_value(struct device *dev, struct device_attribute *attr,
static ssize_t store_value(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct hid_sensor_custom *sensor_inst = platform_get_drvdata(pdev);
+ struct hid_sensor_custom *sensor_inst = dev_get_drvdata(dev);
int index, field_index, usage;
char name[HID_CUSTOM_NAME_LENGTH];
int value;
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index 8141cadfca0e..8dae0f9b819e 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -499,6 +499,7 @@ static void steam_battery_unregister(struct steam_device *steam)
static int steam_register(struct steam_device *steam)
{
int ret;
+ bool client_opened;
/*
* This function can be called several times in a row with the
@@ -511,9 +512,11 @@ static int steam_register(struct steam_device *steam)
* Unlikely, but getting the serial could fail, and it is not so
* important, so make up a serial number and go on.
*/
+ mutex_lock(&steam->mutex);
if (steam_get_serial(steam) < 0)
strlcpy(steam->serial_no, "XXXXXXXXXX",
sizeof(steam->serial_no));
+ mutex_unlock(&steam->mutex);
hid_info(steam->hdev, "Steam Controller '%s' connected",
steam->serial_no);
@@ -528,13 +531,15 @@ static int steam_register(struct steam_device *steam)
}
mutex_lock(&steam->mutex);
- if (!steam->client_opened) {
+ client_opened = steam->client_opened;
+ if (!client_opened)
steam_set_lizard_mode(steam, lizard_mode);
+ mutex_unlock(&steam->mutex);
+
+ if (!client_opened)
ret = steam_input_register(steam);
- } else {
+ else
ret = 0;
- }
- mutex_unlock(&steam->mutex);
return ret;
}
@@ -630,14 +635,21 @@ static void steam_client_ll_close(struct hid_device *hdev)
{
struct steam_device *steam = hdev->driver_data;
+ unsigned long flags;
+ bool connected;
+
+ spin_lock_irqsave(&steam->lock, flags);
+ connected = steam->connected;
+ spin_unlock_irqrestore(&steam->lock, flags);
+
mutex_lock(&steam->mutex);
steam->client_opened = false;
+ if (connected)
+ steam_set_lizard_mode(steam, lizard_mode);
mutex_unlock(&steam->mutex);
- if (steam->connected) {
- steam_set_lizard_mode(steam, lizard_mode);
+ if (connected)
steam_input_register(steam);
- }
}
static int steam_client_ll_raw_request(struct hid_device *hdev,
diff --git a/drivers/hid/hid-u2fzero.c b/drivers/hid/hid-u2fzero.c
new file mode 100644
index 000000000000..95e0807878c7
--- /dev/null
+++ b/drivers/hid/hid-u2fzero.c
@@ -0,0 +1,374 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * U2F Zero LED and RNG driver
+ *
+ * Copyright 2018 Andrej Shadura <andrew@shadura.me>
+ * Loosely based on drivers/hid/hid-led.c
+ * and drivers/usb/misc/chaoskey.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2.
+ */
+
+#include <linux/hid.h>
+#include <linux/hidraw.h>
+#include <linux/hw_random.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/usb.h>
+
+#include "usbhid/usbhid.h"
+#include "hid-ids.h"
+
+#define DRIVER_SHORT "u2fzero"
+
+#define HID_REPORT_SIZE 64
+
+/* We only use broadcast (CID-less) messages */
+#define CID_BROADCAST 0xffffffff
+
+struct u2f_hid_msg {
+ u32 cid;
+ union {
+ struct {
+ u8 cmd;
+ u8 bcnth;
+ u8 bcntl;
+ u8 data[HID_REPORT_SIZE - 7];
+ } init;
+ struct {
+ u8 seq;
+ u8 data[HID_REPORT_SIZE - 5];
+ } cont;
+ };
+} __packed;
+
+struct u2f_hid_report {
+ u8 report_type;
+ struct u2f_hid_msg msg;
+} __packed;
+
+#define U2F_HID_MSG_LEN(f) (size_t)(((f).init.bcnth << 8) + (f).init.bcntl)
+
+/* Custom extensions to the U2FHID protocol */
+#define U2F_CUSTOM_GET_RNG 0x21
+#define U2F_CUSTOM_WINK 0x24
+
+struct u2fzero_device {
+ struct hid_device *hdev;
+ struct urb *urb; /* URB for the RNG data */
+ struct led_classdev ldev; /* Embedded struct for led */
+ struct hwrng hwrng; /* Embedded struct for hwrng */
+ char *led_name;
+ char *rng_name;
+ u8 *buf_out;
+ u8 *buf_in;
+ struct mutex lock;
+ bool present;
+};
+
+static int u2fzero_send(struct u2fzero_device *dev, struct u2f_hid_report *req)
+{
+ int ret;
+
+ mutex_lock(&dev->lock);
+
+ memcpy(dev->buf_out, req, sizeof(struct u2f_hid_report));
+
+ ret = hid_hw_output_report(dev->hdev, dev->buf_out,
+ sizeof(struct u2f_hid_msg));
+
+ mutex_unlock(&dev->lock);
+
+ if (ret < 0)
+ return ret;
+
+ return ret == sizeof(struct u2f_hid_msg) ? 0 : -EMSGSIZE;
+}
+
+struct u2fzero_transfer_context {
+ struct completion done;
+ int status;
+};
+
+static void u2fzero_read_callback(struct urb *urb)
+{
+ struct u2fzero_transfer_context *ctx = urb->context;
+
+ ctx->status = urb->status;
+ complete(&ctx->done);
+}
+
+static int u2fzero_recv(struct u2fzero_device *dev,
+ struct u2f_hid_report *req,
+ struct u2f_hid_msg *resp)
+{
+ int ret;
+ struct hid_device *hdev = dev->hdev;
+ struct u2fzero_transfer_context ctx;
+
+ mutex_lock(&dev->lock);
+
+ memcpy(dev->buf_out, req, sizeof(struct u2f_hid_report));
+
+ dev->urb->context = &ctx;
+ init_completion(&ctx.done);
+
+ ret = usb_submit_urb(dev->urb, GFP_NOIO);
+ if (unlikely(ret)) {
+ hid_err(hdev, "usb_submit_urb failed: %d", ret);
+ goto err;
+ }
+
+ ret = hid_hw_output_report(dev->hdev, dev->buf_out,
+ sizeof(struct u2f_hid_msg));
+
+ if (ret < 0) {
+ hid_err(hdev, "hid_hw_output_report failed: %d", ret);
+ goto err;
+ }
+
+ ret = (wait_for_completion_timeout(
+ &ctx.done, msecs_to_jiffies(USB_CTRL_SET_TIMEOUT)));
+ if (ret < 0) {
+ usb_kill_urb(dev->urb);
+ hid_err(hdev, "urb submission timed out");
+ } else {
+ ret = dev->urb->actual_length;
+ memcpy(resp, dev->buf_in, ret);
+ }
+
+err:
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+static int u2fzero_blink(struct led_classdev *ldev)
+{
+ struct u2fzero_device *dev = container_of(ldev,
+ struct u2fzero_device, ldev);
+ struct u2f_hid_report req = {
+ .report_type = 0,
+ .msg.cid = CID_BROADCAST,
+ .msg.init = {
+ .cmd = U2F_CUSTOM_WINK,
+ .bcnth = 0,
+ .bcntl = 0,
+ .data = {0},
+ }
+ };
+ return u2fzero_send(dev, &req);
+}
+
+static int u2fzero_brightness_set(struct led_classdev *ldev,
+ enum led_brightness brightness)
+{
+ ldev->brightness = LED_OFF;
+ if (brightness)
+ return u2fzero_blink(ldev);
+ else
+ return 0;
+}
+
+static int u2fzero_rng_read(struct hwrng *rng, void *data,
+ size_t max, bool wait)
+{
+ struct u2fzero_device *dev = container_of(rng,
+ struct u2fzero_device, hwrng);
+ struct u2f_hid_report req = {
+ .report_type = 0,
+ .msg.cid = CID_BROADCAST,
+ .msg.init = {
+ .cmd = U2F_CUSTOM_GET_RNG,
+ .bcnth = 0,
+ .bcntl = 0,
+ .data = {0},
+ }
+ };
+ struct u2f_hid_msg resp;
+ int ret;
+ size_t actual_length;
+
+ if (!dev->present) {
+ hid_dbg(dev->hdev, "device not present");
+ return 0;
+ }
+
+ ret = u2fzero_recv(dev, &req, &resp);
+ if (ret < 0)
+ return 0;
+
+ /* only take the minimum amount of data it is safe to take */
+ actual_length = min3((size_t)ret - offsetof(struct u2f_hid_msg,
+ init.data), U2F_HID_MSG_LEN(resp), max);
+
+ memcpy(data, resp.init.data, actual_length);
+
+ return actual_length;
+}
+
+static int u2fzero_init_led(struct u2fzero_device *dev,
+ unsigned int minor)
+{
+ dev->led_name = devm_kasprintf(&dev->hdev->dev, GFP_KERNEL,
+ "%s%u", DRIVER_SHORT, minor);
+ if (dev->led_name == NULL)
+ return -ENOMEM;
+
+ dev->ldev.name = dev->led_name;
+ dev->ldev.max_brightness = LED_ON;
+ dev->ldev.flags = LED_HW_PLUGGABLE;
+ dev->ldev.brightness_set_blocking = u2fzero_brightness_set;
+
+ return devm_led_classdev_register(&dev->hdev->dev, &dev->ldev);
+}
+
+static int u2fzero_init_hwrng(struct u2fzero_device *dev,
+ unsigned int minor)
+{
+ dev->rng_name = devm_kasprintf(&dev->hdev->dev, GFP_KERNEL,
+ "%s-rng%u", DRIVER_SHORT, minor);
+ if (dev->rng_name == NULL)
+ return -ENOMEM;
+
+ dev->hwrng.name = dev->rng_name;
+ dev->hwrng.read = u2fzero_rng_read;
+ dev->hwrng.quality = 1;
+
+ return devm_hwrng_register(&dev->hdev->dev, &dev->hwrng);
+}
+
+static int u2fzero_fill_in_urb(struct u2fzero_device *dev)
+{
+ struct hid_device *hdev = dev->hdev;
+ struct usb_device *udev;
+ struct usbhid_device *usbhid = hdev->driver_data;
+ unsigned int pipe_in;
+ struct usb_host_endpoint *ep;
+
+ if (dev->hdev->bus != BUS_USB)
+ return -EINVAL;
+
+ udev = hid_to_usb_dev(hdev);
+
+ if (!usbhid->urbout || !usbhid->urbin)
+ return -ENODEV;
+
+ ep = usb_pipe_endpoint(udev, usbhid->urbin->pipe);
+ if (!ep)
+ return -ENODEV;
+
+ dev->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!dev->urb)
+ return -ENOMEM;
+
+ pipe_in = (usbhid->urbin->pipe & ~(3 << 30)) | (PIPE_INTERRUPT << 30);
+
+ usb_fill_int_urb(dev->urb,
+ udev,
+ pipe_in,
+ dev->buf_in,
+ HID_REPORT_SIZE,
+ u2fzero_read_callback,
+ NULL,
+ ep->desc.bInterval);
+
+ return 0;
+}
+
+static int u2fzero_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ struct u2fzero_device *dev;
+ unsigned int minor;
+ int ret;
+
+ if (!hid_is_using_ll_driver(hdev, &usb_hid_driver))
+ return -EINVAL;
+
+ dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (dev == NULL)
+ return -ENOMEM;
+
+ dev->buf_out = devm_kmalloc(&hdev->dev,
+ sizeof(struct u2f_hid_report), GFP_KERNEL);
+ if (dev->buf_out == NULL)
+ return -ENOMEM;
+
+ dev->buf_in = devm_kmalloc(&hdev->dev,
+ sizeof(struct u2f_hid_msg), GFP_KERNEL);
+ if (dev->buf_in == NULL)
+ return -ENOMEM;
+
+ ret = hid_parse(hdev);
+ if (ret)
+ return ret;
+
+ dev->hdev = hdev;
+ hid_set_drvdata(hdev, dev);
+ mutex_init(&dev->lock);
+
+ ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+ if (ret)
+ return ret;
+
+ u2fzero_fill_in_urb(dev);
+
+ dev->present = true;
+
+ minor = ((struct hidraw *) hdev->hidraw)->minor;
+
+ ret = u2fzero_init_led(dev, minor);
+ if (ret) {
+ hid_hw_stop(hdev);
+ return ret;
+ }
+
+ hid_info(hdev, "U2F Zero LED initialised\n");
+
+ ret = u2fzero_init_hwrng(dev, minor);
+ if (ret) {
+ hid_hw_stop(hdev);
+ return ret;
+ }
+
+ hid_info(hdev, "U2F Zero RNG initialised\n");
+
+ return 0;
+}
+
+static void u2fzero_remove(struct hid_device *hdev)
+{
+ struct u2fzero_device *dev = hid_get_drvdata(hdev);
+
+ mutex_lock(&dev->lock);
+ dev->present = false;
+ mutex_unlock(&dev->lock);
+
+ hid_hw_stop(hdev);
+ usb_poison_urb(dev->urb);
+ usb_free_urb(dev->urb);
+}
+
+static const struct hid_device_id u2fzero_table[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL,
+ USB_DEVICE_ID_U2F_ZERO) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, u2fzero_table);
+
+static struct hid_driver u2fzero_driver = {
+ .name = "hid-" DRIVER_SHORT,
+ .probe = u2fzero_probe,
+ .remove = u2fzero_remove,
+ .id_table = u2fzero_table,
+};
+
+module_hid_driver(u2fzero_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrej Shadura <andrew@shadura.me>");
+MODULE_DESCRIPTION("U2F Zero LED and RNG driver");
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index 7710d9f957da..0187c9f8fc22 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -735,10 +735,6 @@ static int uclogic_params_huion_init(struct uclogic_params *params,
goto cleanup;
}
rc = usb_string(udev, 201, ver_ptr, ver_len);
- if (ver_ptr == NULL) {
- rc = -ENOMEM;
- goto cleanup;
- }
if (rc == -EPIPE) {
*ver_ptr = '\0';
} else if (rc < 0) {
diff --git a/drivers/hid/intel-ish-hid/Kconfig b/drivers/hid/intel-ish-hid/Kconfig
index 519e4c8b53c4..786adbc97ac5 100644
--- a/drivers/hid/intel-ish-hid/Kconfig
+++ b/drivers/hid/intel-ish-hid/Kconfig
@@ -14,4 +14,19 @@ config INTEL_ISH_HID
Broxton and Kaby Lake.
Say Y here if you want to support Intel ISH. If unsure, say N.
+
+config INTEL_ISH_FIRMWARE_DOWNLOADER
+ tristate "Host Firmware Load feature for Intel ISH"
+ depends on INTEL_ISH_HID
+ depends on X86
+ help
+ The Integrated Sensor Hub (ISH) enables the kernel to offload
+ sensor polling and algorithm processing to a dedicated low power
+ processor in the chipset.
+
+ The Host Firmware Load feature adds support to load the ISH
+ firmware from host file system at boot.
+
+ Say M here if you want to support Host Firmware Loading feature
+ for Intel ISH. If unsure, say N.
endmenu
diff --git a/drivers/hid/intel-ish-hid/Makefile b/drivers/hid/intel-ish-hid/Makefile
index 825b70af672f..2de97e4b7740 100644
--- a/drivers/hid/intel-ish-hid/Makefile
+++ b/drivers/hid/intel-ish-hid/Makefile
@@ -20,4 +20,7 @@ obj-$(CONFIG_INTEL_ISH_HID) += intel-ishtp-hid.o
intel-ishtp-hid-objs := ishtp-hid.o
intel-ishtp-hid-objs += ishtp-hid-client.o
+obj-$(CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER) += intel-ishtp-loader.o
+intel-ishtp-loader-objs += ishtp-fw-loader.o
+
ccflags-y += -Idrivers/hid/intel-ish-hid/ishtp
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index 08a8327dfd22..523c0cbd44a4 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -31,6 +31,7 @@
#define CNL_H_DEVICE_ID 0xA37C
#define ICL_MOBILE_DEVICE_ID 0x34FC
#define SPT_H_DEVICE_ID 0xA135
+#define CML_LP_DEVICE_ID 0x02FC
#define REVISION_ID_CHT_A0 0x6
#define REVISION_ID_CHT_Ax_SI 0x0
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index a6e1ee744f4d..ac0a179daf23 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -40,6 +40,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)},
{0, }
};
MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
diff --git a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
new file mode 100644
index 000000000000..22ba21457035
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
@@ -0,0 +1,1085 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ISH-TP client driver for ISH firmware loading
+ *
+ * Copyright (c) 2019, Intel Corporation.
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/intel-ish-client-if.h>
+#include <linux/property.h>
+#include <asm/cacheflush.h>
+
+/* Number of times we attempt to load the firmware before giving up */
+#define MAX_LOAD_ATTEMPTS 3
+
+/* ISH TX/RX ring buffer pool size */
+#define LOADER_CL_RX_RING_SIZE 1
+#define LOADER_CL_TX_RING_SIZE 1
+
+/*
+ * ISH Shim firmware loader reserves 4 Kb buffer in SRAM. The buffer is
+ * used to temporarily hold the data transferred from host to Shim
+ * firmware loader. Reason for the odd size of 3968 bytes? Each IPC
+ * transfer is 128 bytes (= 4 bytes header + 124 bytes payload). So the
+ * 4 Kb buffer can hold maximum of 32 IPC transfers, which means we can
+ * have a max payload of 3968 bytes (= 32 x 124 payload).
+ */
+#define LOADER_SHIM_IPC_BUF_SIZE 3968
+
+/**
+ * enum ish_loader_commands - ISH loader host commands.
+ * LOADER_CMD_XFER_QUERY Query the Shim firmware loader for
+ * capabilities
+ * LOADER_CMD_XFER_FRAGMENT Transfer one firmware image fragment at a
+ * time. The command may be executed
+ * multiple times until the entire firmware
+ * image is downloaded to SRAM.
+ * LOADER_CMD_START Start executing the main firmware.
+ */
+enum ish_loader_commands {
+ LOADER_CMD_XFER_QUERY = 0,
+ LOADER_CMD_XFER_FRAGMENT,
+ LOADER_CMD_START,
+};
+
+/* Command bit mask */
+#define CMD_MASK GENMASK(6, 0)
+#define IS_RESPONSE BIT(7)
+
+/*
+ * ISH firmware max delay for one transmit failure is 1 Hz,
+ * and firmware will retry 2 times, so 3 Hz is used for timeout.
+ */
+#define ISHTP_SEND_TIMEOUT (3 * HZ)
+
+/*
+ * Loader transfer modes:
+ *
+ * LOADER_XFER_MODE_ISHTP mode uses the existing ISH-TP mechanism to
+ * transfer data. This may use IPC or DMA if supported in firmware.
+ * The buffer size is limited to 4 Kb by the IPC/ISH-TP protocol for
+ * both IPC & DMA (legacy).
+ *
+ * LOADER_XFER_MODE_DIRECT_DMA - firmware loading is a bit different
+ * from the sensor data streaming. Here we download a large (300+ Kb)
+ * image directly to ISH SRAM memory. There is limited benefit of
+ * DMA'ing 300 Kb image in 4 Kb chucks limit. Hence, we introduce
+ * this "direct dma" mode, where we do not use ISH-TP for DMA, but
+ * instead manage the DMA directly in kernel driver and Shim firmware
+ * loader (allocate buffer, break in chucks and transfer). This allows
+ * to overcome 4 Kb limit, and optimize the data flow path in firmware.
+ */
+#define LOADER_XFER_MODE_DIRECT_DMA BIT(0)
+#define LOADER_XFER_MODE_ISHTP BIT(1)
+
+/* ISH Transport Loader client unique GUID */
+static const guid_t loader_ishtp_guid =
+ GUID_INIT(0xc804d06a, 0x55bd, 0x4ea7,
+ 0xad, 0xed, 0x1e, 0x31, 0x22, 0x8c, 0x76, 0xdc);
+
+#define FILENAME_SIZE 256
+
+/*
+ * The firmware loading latency will be minimum if we can DMA the
+ * entire ISH firmware image in one go. This requires that we allocate
+ * a large DMA buffer in kernel, which could be problematic on some
+ * platforms. So here we limit the DMA buffer size via a module_param.
+ * We default to 4 pages, but a customer can set it to higher limit if
+ * deemed appropriate for his platform.
+ */
+static int dma_buf_size_limit = 4 * PAGE_SIZE;
+
+/**
+ * struct loader_msg_hdr - Header for ISH Loader commands.
+ * @command: LOADER_CMD* commands. Bit 7 is the response.
+ * @status: Command response status. Non 0, is error
+ * condition.
+ *
+ * This structure is used as header for every command/data sent/received
+ * between Host driver and ISH Shim firmware loader.
+ */
+struct loader_msg_hdr {
+ u8 command;
+ u8 reserved[2];
+ u8 status;
+} __packed;
+
+struct loader_xfer_query {
+ struct loader_msg_hdr hdr;
+ u32 image_size;
+} __packed;
+
+struct ish_fw_version {
+ u16 major;
+ u16 minor;
+ u16 hotfix;
+ u16 build;
+} __packed;
+
+union loader_version {
+ u32 value;
+ struct {
+ u8 major;
+ u8 minor;
+ u8 hotfix;
+ u8 build;
+ };
+} __packed;
+
+struct loader_capability {
+ u32 max_fw_image_size;
+ u32 xfer_mode;
+ u32 max_dma_buf_size; /* only for dma mode, multiples of cacheline */
+} __packed;
+
+struct shim_fw_info {
+ struct ish_fw_version ish_fw_version;
+ u32 protocol_version;
+ union loader_version ldr_version;
+ struct loader_capability ldr_capability;
+} __packed;
+
+struct loader_xfer_query_response {
+ struct loader_msg_hdr hdr;
+ struct shim_fw_info fw_info;
+} __packed;
+
+struct loader_xfer_fragment {
+ struct loader_msg_hdr hdr;
+ u32 xfer_mode;
+ u32 offset;
+ u32 size;
+ u32 is_last;
+} __packed;
+
+struct loader_xfer_ipc_fragment {
+ struct loader_xfer_fragment fragment;
+ u8 data[] ____cacheline_aligned; /* variable length payload here */
+} __packed;
+
+struct loader_xfer_dma_fragment {
+ struct loader_xfer_fragment fragment;
+ u64 ddr_phys_addr;
+} __packed;
+
+struct loader_start {
+ struct loader_msg_hdr hdr;
+} __packed;
+
+/**
+ * struct response_info - Encapsulate firmware response related
+ * information for passing between function
+ * loader_cl_send() and process_recv() callback.
+ * @data Copy the data received from firmware here.
+ * @max_size Max size allocated for the @data buffer. If the
+ * received data exceeds this value, we log an
+ * error.
+ * @size Actual size of data received from firmware.
+ * @error Returns 0 for success, negative error code for a
+ * failure in function process_recv().
+ * @received Set to true on receiving a valid firmware
+ * response to host command
+ * @wait_queue Wait queue for Host firmware loading where the
+ * client sends message to ISH firmware and waits
+ * for response
+ */
+struct response_info {
+ void *data;
+ size_t max_size;
+ size_t size;
+ int error;
+ bool received;
+ wait_queue_head_t wait_queue;
+};
+
+/**
+ * struct ishtp_cl_data - Encapsulate per ISH-TP Client Data.
+ * @work_ishtp_reset: Work queue for reset handling.
+ * @work_fw_load: Work queue for host firmware loading.
+ * @flag_retry Flag for indicating host firmware loading should
+ * be retried.
+ * @retry_count Count the number of retries.
+ *
+ * This structure is used to store data per client.
+ */
+struct ishtp_cl_data {
+ struct ishtp_cl *loader_ishtp_cl;
+ struct ishtp_cl_device *cl_device;
+
+ /*
+ * Used for passing firmware response information between
+ * loader_cl_send() and process_recv() callback.
+ */
+ struct response_info response;
+
+ struct work_struct work_ishtp_reset;
+ struct work_struct work_fw_load;
+
+ /*
+ * In certain failure scenrios, it makes sense to reset the ISH
+ * subsystem and retry Host firmware loading (e.g. bad message
+ * packet, ENOMEM, etc.). On the other hand, failures due to
+ * protocol mismatch, etc., are not recoverable. We do not
+ * retry them.
+ *
+ * If set, the flag indicates that we should re-try the
+ * particular failure.
+ */
+ bool flag_retry;
+ int retry_count;
+};
+
+#define IPC_FRAGMENT_DATA_PREAMBLE \
+ offsetof(struct loader_xfer_ipc_fragment, data)
+
+#define cl_data_to_dev(client_data) ishtp_device((client_data)->cl_device)
+
+/**
+ * get_firmware_variant() - Gets the filename of firmware image to be
+ * loaded based on platform variant.
+ * @client_data Client data instance.
+ * @filename Returns firmware filename.
+ *
+ * Queries the firmware-name device property string.
+ *
+ * Return: 0 for success, negative error code for failure.
+ */
+static int get_firmware_variant(struct ishtp_cl_data *client_data,
+ char *filename)
+{
+ int rv;
+ const char *val;
+ struct device *devc = ishtp_get_pci_device(client_data->cl_device);
+
+ rv = device_property_read_string(devc, "firmware-name", &val);
+ if (rv < 0) {
+ dev_err(devc,
+ "Error: ISH firmware-name device property required\n");
+ return rv;
+ }
+ return snprintf(filename, FILENAME_SIZE, "intel/%s", val);
+}
+
+/**
+ * loader_cl_send() Send message from host to firmware
+ * @client_data: Client data instance
+ * @out_msg Message buffer to be sent to firmware
+ * @out_size Size of out going message
+ * @in_msg Message buffer where the incoming data copied.
+ * This buffer is allocated by calling
+ * @in_size Max size of incoming message
+ *
+ * Return: Number of bytes copied in the in_msg on success, negative
+ * error code on failure.
+ */
+static int loader_cl_send(struct ishtp_cl_data *client_data,
+ u8 *out_msg, size_t out_size,
+ u8 *in_msg, size_t in_size)
+{
+ int rv;
+ struct loader_msg_hdr *out_hdr = (struct loader_msg_hdr *)out_msg;
+ struct ishtp_cl *loader_ishtp_cl = client_data->loader_ishtp_cl;
+
+ dev_dbg(cl_data_to_dev(client_data),
+ "%s: command=%02lx is_response=%u status=%02x\n",
+ __func__,
+ out_hdr->command & CMD_MASK,
+ out_hdr->command & IS_RESPONSE ? 1 : 0,
+ out_hdr->status);
+
+ /* Setup in coming buffer & size */
+ client_data->response.data = in_msg;
+ client_data->response.max_size = in_size;
+ client_data->response.error = 0;
+ client_data->response.received = false;
+
+ rv = ishtp_cl_send(loader_ishtp_cl, out_msg, out_size);
+ if (rv < 0) {
+ dev_err(cl_data_to_dev(client_data),
+ "ishtp_cl_send error %d\n", rv);
+ return rv;
+ }
+
+ wait_event_interruptible_timeout(client_data->response.wait_queue,
+ client_data->response.received,
+ ISHTP_SEND_TIMEOUT);
+ if (!client_data->response.received) {
+ dev_err(cl_data_to_dev(client_data),
+ "Timed out for response to command=%02lx",
+ out_hdr->command & CMD_MASK);
+ return -ETIMEDOUT;
+ }
+
+ if (client_data->response.error < 0)
+ return client_data->response.error;
+
+ return client_data->response.size;
+}
+
+/**
+ * process_recv() - Receive and parse incoming packet
+ * @loader_ishtp_cl: Client instance to get stats
+ * @rb_in_proc: ISH received message buffer
+ *
+ * Parse the incoming packet. If it is a response packet then it will
+ * update received and wake up the caller waiting to for the response.
+ */
+static void process_recv(struct ishtp_cl *loader_ishtp_cl,
+ struct ishtp_cl_rb *rb_in_proc)
+{
+ struct loader_msg_hdr *hdr;
+ size_t data_len = rb_in_proc->buf_idx;
+ struct ishtp_cl_data *client_data =
+ ishtp_get_client_data(loader_ishtp_cl);
+
+ /* Sanity check */
+ if (!client_data->response.data) {
+ dev_err(cl_data_to_dev(client_data),
+ "Receiving buffer is null. Should be allocated by calling function\n");
+ client_data->response.error = -EINVAL;
+ goto end;
+ }
+
+ if (client_data->response.received) {
+ dev_err(cl_data_to_dev(client_data),
+ "Previous firmware message not yet processed\n");
+ client_data->response.error = -EINVAL;
+ goto end;
+ }
+ /*
+ * All firmware messages have a header. Check buffer size
+ * before accessing elements inside.
+ */
+ if (!rb_in_proc->buffer.data) {
+ dev_warn(cl_data_to_dev(client_data),
+ "rb_in_proc->buffer.data returned null");
+ client_data->response.error = -EBADMSG;
+ goto end;
+ }
+
+ if (data_len < sizeof(struct loader_msg_hdr)) {
+ dev_err(cl_data_to_dev(client_data),
+ "data size %zu is less than header %zu\n",
+ data_len, sizeof(struct loader_msg_hdr));
+ client_data->response.error = -EMSGSIZE;
+ goto end;
+ }
+
+ hdr = (struct loader_msg_hdr *)rb_in_proc->buffer.data;
+
+ dev_dbg(cl_data_to_dev(client_data),
+ "%s: command=%02lx is_response=%u status=%02x\n",
+ __func__,
+ hdr->command & CMD_MASK,
+ hdr->command & IS_RESPONSE ? 1 : 0,
+ hdr->status);
+
+ if (((hdr->command & CMD_MASK) != LOADER_CMD_XFER_QUERY) &&
+ ((hdr->command & CMD_MASK) != LOADER_CMD_XFER_FRAGMENT) &&
+ ((hdr->command & CMD_MASK) != LOADER_CMD_START)) {
+ dev_err(cl_data_to_dev(client_data),
+ "Invalid command=%02lx\n",
+ hdr->command & CMD_MASK);
+ client_data->response.error = -EPROTO;
+ goto end;
+ }
+
+ if (data_len > client_data->response.max_size) {
+ dev_err(cl_data_to_dev(client_data),
+ "Received buffer size %zu is larger than allocated buffer %zu\n",
+ data_len, client_data->response.max_size);
+ client_data->response.error = -EMSGSIZE;
+ goto end;
+ }
+
+ /* We expect only "response" messages from firmware */
+ if (!(hdr->command & IS_RESPONSE)) {
+ dev_err(cl_data_to_dev(client_data),
+ "Invalid response to command\n");
+ client_data->response.error = -EIO;
+ goto end;
+ }
+
+ if (hdr->status) {
+ dev_err(cl_data_to_dev(client_data),
+ "Loader returned status %d\n",
+ hdr->status);
+ client_data->response.error = -EIO;
+ goto end;
+ }
+
+ /* Update the actual received buffer size */
+ client_data->response.size = data_len;
+
+ /*
+ * Copy the buffer received in firmware response for the
+ * calling thread.
+ */
+ memcpy(client_data->response.data,
+ rb_in_proc->buffer.data, data_len);
+
+ /* Set flag before waking up the caller */
+ client_data->response.received = true;
+
+end:
+ /* Free the buffer */
+ ishtp_cl_io_rb_recycle(rb_in_proc);
+ rb_in_proc = NULL;
+
+ /* Wake the calling thread */
+ wake_up_interruptible(&client_data->response.wait_queue);
+}
+
+/**
+ * loader_cl_event_cb() - bus driver callback for incoming message
+ * @device: Pointer to the ishtp client device for which this
+ * message is targeted
+ *
+ * Remove the packet from the list and process the message by calling
+ * process_recv
+ */
+static void loader_cl_event_cb(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl_rb *rb_in_proc;
+ struct ishtp_cl *loader_ishtp_cl = ishtp_get_drvdata(cl_device);
+
+ while ((rb_in_proc = ishtp_cl_rx_get_rb(loader_ishtp_cl)) != NULL) {
+ /* Process the data packet from firmware */
+ process_recv(loader_ishtp_cl, rb_in_proc);
+ }
+}
+
+/**
+ * ish_query_loader_prop() - Query ISH Shim firmware loader
+ * @client_data: Client data instance
+ * @fw: Poiner to firmware data struct in host memory
+ * @fw_info: Loader firmware properties
+ *
+ * This function queries the ISH Shim firmware loader for capabilities.
+ *
+ * Return: 0 for success, negative error code for failure.
+ */
+static int ish_query_loader_prop(struct ishtp_cl_data *client_data,
+ const struct firmware *fw,
+ struct shim_fw_info *fw_info)
+{
+ int rv;
+ struct loader_xfer_query ldr_xfer_query;
+ struct loader_xfer_query_response ldr_xfer_query_resp;
+
+ memset(&ldr_xfer_query, 0, sizeof(ldr_xfer_query));
+ ldr_xfer_query.hdr.command = LOADER_CMD_XFER_QUERY;
+ ldr_xfer_query.image_size = fw->size;
+ rv = loader_cl_send(client_data,
+ (u8 *)&ldr_xfer_query,
+ sizeof(ldr_xfer_query),
+ (u8 *)&ldr_xfer_query_resp,
+ sizeof(ldr_xfer_query_resp));
+ if (rv < 0) {
+ client_data->flag_retry = true;
+ return rv;
+ }
+
+ /* On success, the return value is the received buffer size */
+ if (rv != sizeof(struct loader_xfer_query_response)) {
+ dev_err(cl_data_to_dev(client_data),
+ "data size %d is not equal to size of loader_xfer_query_response %zu\n",
+ rv, sizeof(struct loader_xfer_query_response));
+ client_data->flag_retry = true;
+ return -EMSGSIZE;
+ }
+
+ /* Save fw_info for use outside this function */
+ *fw_info = ldr_xfer_query_resp.fw_info;
+
+ /* Loader firmware properties */
+ dev_dbg(cl_data_to_dev(client_data),
+ "ish_fw_version: major=%d minor=%d hotfix=%d build=%d protocol_version=0x%x loader_version=%d\n",
+ fw_info->ish_fw_version.major,
+ fw_info->ish_fw_version.minor,
+ fw_info->ish_fw_version.hotfix,
+ fw_info->ish_fw_version.build,
+ fw_info->protocol_version,
+ fw_info->ldr_version.value);
+
+ dev_dbg(cl_data_to_dev(client_data),
+ "loader_capability: max_fw_image_size=0x%x xfer_mode=%d max_dma_buf_size=0x%x dma_buf_size_limit=0x%x\n",
+ fw_info->ldr_capability.max_fw_image_size,
+ fw_info->ldr_capability.xfer_mode,
+ fw_info->ldr_capability.max_dma_buf_size,
+ dma_buf_size_limit);
+
+ /* Sanity checks */
+ if (fw_info->ldr_capability.max_fw_image_size < fw->size) {
+ dev_err(cl_data_to_dev(client_data),
+ "ISH firmware size %zu is greater than Shim firmware loader max supported %d\n",
+ fw->size,
+ fw_info->ldr_capability.max_fw_image_size);
+ return -ENOSPC;
+ }
+
+ /* For DMA the buffer size should be multiple of cacheline size */
+ if ((fw_info->ldr_capability.xfer_mode & LOADER_XFER_MODE_DIRECT_DMA) &&
+ (fw_info->ldr_capability.max_dma_buf_size % L1_CACHE_BYTES)) {
+ dev_err(cl_data_to_dev(client_data),
+ "Shim firmware loader buffer size %d should be multiple of cacheline\n",
+ fw_info->ldr_capability.max_dma_buf_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ish_fw_xfer_ishtp() Loads ISH firmware using ishtp interface
+ * @client_data: Client data instance
+ * @fw: Pointer to firmware data struct in host memory
+ *
+ * This function uses ISH-TP to transfer ISH firmware from host to
+ * ISH SRAM. Lower layers may use IPC or DMA depending on firmware
+ * support.
+ *
+ * Return: 0 for success, negative error code for failure.
+ */
+static int ish_fw_xfer_ishtp(struct ishtp_cl_data *client_data,
+ const struct firmware *fw)
+{
+ int rv;
+ u32 fragment_offset, fragment_size, payload_max_size;
+ struct loader_xfer_ipc_fragment *ldr_xfer_ipc_frag;
+ struct loader_msg_hdr ldr_xfer_ipc_ack;
+
+ payload_max_size =
+ LOADER_SHIM_IPC_BUF_SIZE - IPC_FRAGMENT_DATA_PREAMBLE;
+
+ ldr_xfer_ipc_frag = kzalloc(LOADER_SHIM_IPC_BUF_SIZE, GFP_KERNEL);
+ if (!ldr_xfer_ipc_frag) {
+ client_data->flag_retry = true;
+ return -ENOMEM;
+ }
+
+ ldr_xfer_ipc_frag->fragment.hdr.command = LOADER_CMD_XFER_FRAGMENT;
+ ldr_xfer_ipc_frag->fragment.xfer_mode = LOADER_XFER_MODE_ISHTP;
+
+ /* Break the firmware image into fragments and send as ISH-TP payload */
+ fragment_offset = 0;
+ while (fragment_offset < fw->size) {
+ if (fragment_offset + payload_max_size < fw->size) {
+ fragment_size = payload_max_size;
+ ldr_xfer_ipc_frag->fragment.is_last = 0;
+ } else {
+ fragment_size = fw->size - fragment_offset;
+ ldr_xfer_ipc_frag->fragment.is_last = 1;
+ }
+
+ ldr_xfer_ipc_frag->fragment.offset = fragment_offset;
+ ldr_xfer_ipc_frag->fragment.size = fragment_size;
+ memcpy(ldr_xfer_ipc_frag->data,
+ &fw->data[fragment_offset],
+ fragment_size);
+
+ dev_dbg(cl_data_to_dev(client_data),
+ "xfer_mode=ipc offset=0x%08x size=0x%08x is_last=%d\n",
+ ldr_xfer_ipc_frag->fragment.offset,
+ ldr_xfer_ipc_frag->fragment.size,
+ ldr_xfer_ipc_frag->fragment.is_last);
+
+ rv = loader_cl_send(client_data,
+ (u8 *)ldr_xfer_ipc_frag,
+ IPC_FRAGMENT_DATA_PREAMBLE + fragment_size,
+ (u8 *)&ldr_xfer_ipc_ack,
+ sizeof(ldr_xfer_ipc_ack));
+ if (rv < 0) {
+ client_data->flag_retry = true;
+ goto end_err_resp_buf_release;
+ }
+
+ fragment_offset += fragment_size;
+ }
+
+ kfree(ldr_xfer_ipc_frag);
+ return 0;
+
+end_err_resp_buf_release:
+ /* Free ISH buffer if not done already, in error case */
+ kfree(ldr_xfer_ipc_frag);
+ return rv;
+}
+
+/**
+ * ish_fw_xfer_direct_dma() - Loads ISH firmware using direct dma
+ * @client_data: Client data instance
+ * @fw: Pointer to firmware data struct in host memory
+ * @fw_info: Loader firmware properties
+ *
+ * Host firmware load is a unique case where we need to download
+ * a large firmware image (200+ Kb). This function implements
+ * direct DMA transfer in kernel and ISH firmware. This allows
+ * us to overcome the ISH-TP 4 Kb limit, and allows us to DMA
+ * directly to ISH UMA at location of choice.
+ * Function depends on corresponding support in ISH firmware.
+ *
+ * Return: 0 for success, negative error code for failure.
+ */
+static int ish_fw_xfer_direct_dma(struct ishtp_cl_data *client_data,
+ const struct firmware *fw,
+ const struct shim_fw_info fw_info)
+{
+ int rv;
+ void *dma_buf;
+ dma_addr_t dma_buf_phy;
+ u32 fragment_offset, fragment_size, payload_max_size;
+ struct loader_msg_hdr ldr_xfer_dma_frag_ack;
+ struct loader_xfer_dma_fragment ldr_xfer_dma_frag;
+ struct device *devc = ishtp_get_pci_device(client_data->cl_device);
+ u32 shim_fw_buf_size =
+ fw_info.ldr_capability.max_dma_buf_size;
+
+ /*
+ * payload_max_size should be set to minimum of
+ * (1) Size of firmware to be loaded,
+ * (2) Max DMA buffer size supported by Shim firmware,
+ * (3) DMA buffer size limit set by boot_param dma_buf_size_limit.
+ */
+ payload_max_size = min3(fw->size,
+ (size_t)shim_fw_buf_size,
+ (size_t)dma_buf_size_limit);
+
+ /*
+ * Buffer size should be multiple of cacheline size
+ * if it's not, select the previous cacheline boundary.
+ */
+ payload_max_size &= ~(L1_CACHE_BYTES - 1);
+
+ dma_buf = kmalloc(payload_max_size, GFP_KERNEL | GFP_DMA32);
+ if (!dma_buf) {
+ client_data->flag_retry = true;
+ return -ENOMEM;
+ }
+
+ dma_buf_phy = dma_map_single(devc, dma_buf, payload_max_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(devc, dma_buf_phy)) {
+ dev_err(cl_data_to_dev(client_data), "DMA map failed\n");
+ client_data->flag_retry = true;
+ rv = -ENOMEM;
+ goto end_err_dma_buf_release;
+ }
+
+ ldr_xfer_dma_frag.fragment.hdr.command = LOADER_CMD_XFER_FRAGMENT;
+ ldr_xfer_dma_frag.fragment.xfer_mode = LOADER_XFER_MODE_DIRECT_DMA;
+ ldr_xfer_dma_frag.ddr_phys_addr = (u64)dma_buf_phy;
+
+ /* Send the firmware image in chucks of payload_max_size */
+ fragment_offset = 0;
+ while (fragment_offset < fw->size) {
+ if (fragment_offset + payload_max_size < fw->size) {
+ fragment_size = payload_max_size;
+ ldr_xfer_dma_frag.fragment.is_last = 0;
+ } else {
+ fragment_size = fw->size - fragment_offset;
+ ldr_xfer_dma_frag.fragment.is_last = 1;
+ }
+
+ ldr_xfer_dma_frag.fragment.offset = fragment_offset;
+ ldr_xfer_dma_frag.fragment.size = fragment_size;
+ memcpy(dma_buf, &fw->data[fragment_offset], fragment_size);
+
+ dma_sync_single_for_device(devc, dma_buf_phy,
+ payload_max_size,
+ DMA_TO_DEVICE);
+
+ /*
+ * Flush cache here because the dma_sync_single_for_device()
+ * does not do for x86.
+ */
+ clflush_cache_range(dma_buf, payload_max_size);
+
+ dev_dbg(cl_data_to_dev(client_data),
+ "xfer_mode=dma offset=0x%08x size=0x%x is_last=%d ddr_phys_addr=0x%016llx\n",
+ ldr_xfer_dma_frag.fragment.offset,
+ ldr_xfer_dma_frag.fragment.size,
+ ldr_xfer_dma_frag.fragment.is_last,
+ ldr_xfer_dma_frag.ddr_phys_addr);
+
+ rv = loader_cl_send(client_data,
+ (u8 *)&ldr_xfer_dma_frag,
+ sizeof(ldr_xfer_dma_frag),
+ (u8 *)&ldr_xfer_dma_frag_ack,
+ sizeof(ldr_xfer_dma_frag_ack));
+ if (rv < 0) {
+ client_data->flag_retry = true;
+ goto end_err_resp_buf_release;
+ }
+
+ fragment_offset += fragment_size;
+ }
+
+ dma_unmap_single(devc, dma_buf_phy, payload_max_size, DMA_TO_DEVICE);
+ kfree(dma_buf);
+ return 0;
+
+end_err_resp_buf_release:
+ /* Free ISH buffer if not done already, in error case */
+ dma_unmap_single(devc, dma_buf_phy, payload_max_size, DMA_TO_DEVICE);
+end_err_dma_buf_release:
+ kfree(dma_buf);
+ return rv;
+}
+
+/**
+ * ish_fw_start() Start executing ISH main firmware
+ * @client_data: client data instance
+ *
+ * This function sends message to Shim firmware loader to start
+ * the execution of ISH main firmware.
+ *
+ * Return: 0 for success, negative error code for failure.
+ */
+static int ish_fw_start(struct ishtp_cl_data *client_data)
+{
+ struct loader_start ldr_start;
+ struct loader_msg_hdr ldr_start_ack;
+
+ memset(&ldr_start, 0, sizeof(ldr_start));
+ ldr_start.hdr.command = LOADER_CMD_START;
+ return loader_cl_send(client_data,
+ (u8 *)&ldr_start,
+ sizeof(ldr_start),
+ (u8 *)&ldr_start_ack,
+ sizeof(ldr_start_ack));
+}
+
+/**
+ * load_fw_from_host() Loads ISH firmware from host
+ * @client_data: Client data instance
+ *
+ * This function loads the ISH firmware to ISH SRAM and starts execution
+ *
+ * Return: 0 for success, negative error code for failure.
+ */
+static int load_fw_from_host(struct ishtp_cl_data *client_data)
+{
+ int rv;
+ u32 xfer_mode;
+ char *filename;
+ const struct firmware *fw;
+ struct shim_fw_info fw_info;
+ struct ishtp_cl *loader_ishtp_cl = client_data->loader_ishtp_cl;
+
+ client_data->flag_retry = false;
+
+ filename = kzalloc(FILENAME_SIZE, GFP_KERNEL);
+ if (!filename) {
+ client_data->flag_retry = true;
+ rv = -ENOMEM;
+ goto end_error;
+ }
+
+ /* Get filename of the ISH firmware to be loaded */
+ rv = get_firmware_variant(client_data, filename);
+ if (rv < 0)
+ goto end_err_filename_buf_release;
+
+ rv = request_firmware(&fw, filename, cl_data_to_dev(client_data));
+ if (rv < 0)
+ goto end_err_filename_buf_release;
+
+ /* Step 1: Query Shim firmware loader properties */
+
+ rv = ish_query_loader_prop(client_data, fw, &fw_info);
+ if (rv < 0)
+ goto end_err_fw_release;
+
+ /* Step 2: Send the main firmware image to be loaded, to ISH SRAM */
+
+ xfer_mode = fw_info.ldr_capability.xfer_mode;
+ if (xfer_mode & LOADER_XFER_MODE_DIRECT_DMA) {
+ rv = ish_fw_xfer_direct_dma(client_data, fw, fw_info);
+ } else if (xfer_mode & LOADER_XFER_MODE_ISHTP) {
+ rv = ish_fw_xfer_ishtp(client_data, fw);
+ } else {
+ dev_err(cl_data_to_dev(client_data),
+ "No transfer mode selected in firmware\n");
+ rv = -EINVAL;
+ }
+ if (rv < 0)
+ goto end_err_fw_release;
+
+ /* Step 3: Start ISH main firmware exeuction */
+
+ rv = ish_fw_start(client_data);
+ if (rv < 0)
+ goto end_err_fw_release;
+
+ release_firmware(fw);
+ kfree(filename);
+ dev_info(cl_data_to_dev(client_data), "ISH firmware %s loaded\n",
+ filename);
+ return 0;
+
+end_err_fw_release:
+ release_firmware(fw);
+end_err_filename_buf_release:
+ kfree(filename);
+end_error:
+ /* Keep a count of retries, and give up after 3 attempts */
+ if (client_data->flag_retry &&
+ client_data->retry_count++ < MAX_LOAD_ATTEMPTS) {
+ dev_warn(cl_data_to_dev(client_data),
+ "ISH host firmware load failed %d. Resetting ISH, and trying again..\n",
+ rv);
+ ish_hw_reset(ishtp_get_ishtp_device(loader_ishtp_cl));
+ } else {
+ dev_err(cl_data_to_dev(client_data),
+ "ISH host firmware load failed %d\n", rv);
+ }
+ return rv;
+}
+
+static void load_fw_from_host_handler(struct work_struct *work)
+{
+ struct ishtp_cl_data *client_data;
+
+ client_data = container_of(work, struct ishtp_cl_data,
+ work_fw_load);
+ load_fw_from_host(client_data);
+}
+
+/**
+ * loader_init() - Init function for ISH-TP client
+ * @loader_ishtp_cl: ISH-TP client instance
+ * @reset: true if called for init after reset
+ *
+ * Return: 0 for success, negative error code for failure
+ */
+static int loader_init(struct ishtp_cl *loader_ishtp_cl, int reset)
+{
+ int rv;
+ struct ishtp_fw_client *fw_client;
+ struct ishtp_cl_data *client_data =
+ ishtp_get_client_data(loader_ishtp_cl);
+
+ dev_dbg(cl_data_to_dev(client_data), "reset flag: %d\n", reset);
+
+ rv = ishtp_cl_link(loader_ishtp_cl);
+ if (rv < 0) {
+ dev_err(cl_data_to_dev(client_data), "ishtp_cl_link failed\n");
+ return rv;
+ }
+
+ /* Connect to firmware client */
+ ishtp_set_tx_ring_size(loader_ishtp_cl, LOADER_CL_TX_RING_SIZE);
+ ishtp_set_rx_ring_size(loader_ishtp_cl, LOADER_CL_RX_RING_SIZE);
+
+ fw_client =
+ ishtp_fw_cl_get_client(ishtp_get_ishtp_device(loader_ishtp_cl),
+ &loader_ishtp_guid);
+ if (!fw_client) {
+ dev_err(cl_data_to_dev(client_data),
+ "ISH client uuid not found\n");
+ rv = -ENOENT;
+ goto err_cl_unlink;
+ }
+
+ ishtp_cl_set_fw_client_id(loader_ishtp_cl,
+ ishtp_get_fw_client_id(fw_client));
+ ishtp_set_connection_state(loader_ishtp_cl, ISHTP_CL_CONNECTING);
+
+ rv = ishtp_cl_connect(loader_ishtp_cl);
+ if (rv < 0) {
+ dev_err(cl_data_to_dev(client_data), "Client connect fail\n");
+ goto err_cl_unlink;
+ }
+
+ dev_dbg(cl_data_to_dev(client_data), "Client connected\n");
+
+ ishtp_register_event_cb(client_data->cl_device, loader_cl_event_cb);
+
+ return 0;
+
+err_cl_unlink:
+ ishtp_cl_unlink(loader_ishtp_cl);
+ return rv;
+}
+
+static void loader_deinit(struct ishtp_cl *loader_ishtp_cl)
+{
+ ishtp_set_connection_state(loader_ishtp_cl, ISHTP_CL_DISCONNECTING);
+ ishtp_cl_disconnect(loader_ishtp_cl);
+ ishtp_cl_unlink(loader_ishtp_cl);
+ ishtp_cl_flush_queues(loader_ishtp_cl);
+
+ /* Disband and free all Tx and Rx client-level rings */
+ ishtp_cl_free(loader_ishtp_cl);
+}
+
+static void reset_handler(struct work_struct *work)
+{
+ int rv;
+ struct ishtp_cl_data *client_data;
+ struct ishtp_cl *loader_ishtp_cl;
+ struct ishtp_cl_device *cl_device;
+
+ client_data = container_of(work, struct ishtp_cl_data,
+ work_ishtp_reset);
+
+ loader_ishtp_cl = client_data->loader_ishtp_cl;
+ cl_device = client_data->cl_device;
+
+ /* Unlink, flush queues & start again */
+ ishtp_cl_unlink(loader_ishtp_cl);
+ ishtp_cl_flush_queues(loader_ishtp_cl);
+ ishtp_cl_free(loader_ishtp_cl);
+
+ loader_ishtp_cl = ishtp_cl_allocate(cl_device);
+ if (!loader_ishtp_cl)
+ return;
+
+ ishtp_set_drvdata(cl_device, loader_ishtp_cl);
+ ishtp_set_client_data(loader_ishtp_cl, client_data);
+ client_data->loader_ishtp_cl = loader_ishtp_cl;
+ client_data->cl_device = cl_device;
+
+ rv = loader_init(loader_ishtp_cl, 1);
+ if (rv < 0) {
+ dev_err(ishtp_device(cl_device), "Reset Failed\n");
+ return;
+ }
+
+ /* ISH firmware loading from host */
+ load_fw_from_host(client_data);
+}
+
+/**
+ * loader_ishtp_cl_probe() - ISH-TP client driver probe
+ * @cl_device: ISH-TP client device instance
+ *
+ * This function gets called on device create on ISH-TP bus
+ *
+ * Return: 0 for success, negative error code for failure
+ */
+static int loader_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl *loader_ishtp_cl;
+ struct ishtp_cl_data *client_data;
+ int rv;
+
+ client_data = devm_kzalloc(ishtp_device(cl_device),
+ sizeof(*client_data),
+ GFP_KERNEL);
+ if (!client_data)
+ return -ENOMEM;
+
+ loader_ishtp_cl = ishtp_cl_allocate(cl_device);
+ if (!loader_ishtp_cl)
+ return -ENOMEM;
+
+ ishtp_set_drvdata(cl_device, loader_ishtp_cl);
+ ishtp_set_client_data(loader_ishtp_cl, client_data);
+ client_data->loader_ishtp_cl = loader_ishtp_cl;
+ client_data->cl_device = cl_device;
+
+ init_waitqueue_head(&client_data->response.wait_queue);
+
+ INIT_WORK(&client_data->work_ishtp_reset,
+ reset_handler);
+ INIT_WORK(&client_data->work_fw_load,
+ load_fw_from_host_handler);
+
+ rv = loader_init(loader_ishtp_cl, 0);
+ if (rv < 0) {
+ ishtp_cl_free(loader_ishtp_cl);
+ return rv;
+ }
+ ishtp_get_device(cl_device);
+
+ client_data->retry_count = 0;
+
+ /* ISH firmware loading from host */
+ schedule_work(&client_data->work_fw_load);
+
+ return 0;
+}
+
+/**
+ * loader_ishtp_cl_remove() - ISH-TP client driver remove
+ * @cl_device: ISH-TP client device instance
+ *
+ * This function gets called on device remove on ISH-TP bus
+ *
+ * Return: 0
+ */
+static int loader_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl_data *client_data;
+ struct ishtp_cl *loader_ishtp_cl = ishtp_get_drvdata(cl_device);
+
+ client_data = ishtp_get_client_data(loader_ishtp_cl);
+
+ /*
+ * The sequence of the following two cancel_work_sync() is
+ * important. The work_fw_load can in turn schedue
+ * work_ishtp_reset, so first cancel work_fw_load then
+ * cancel work_ishtp_reset.
+ */
+ cancel_work_sync(&client_data->work_fw_load);
+ cancel_work_sync(&client_data->work_ishtp_reset);
+ loader_deinit(loader_ishtp_cl);
+ ishtp_put_device(cl_device);
+
+ return 0;
+}
+
+/**
+ * loader_ishtp_cl_reset() - ISH-TP client driver reset
+ * @cl_device: ISH-TP client device instance
+ *
+ * This function gets called on device reset on ISH-TP bus
+ *
+ * Return: 0
+ */
+static int loader_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl_data *client_data;
+ struct ishtp_cl *loader_ishtp_cl = ishtp_get_drvdata(cl_device);
+
+ client_data = ishtp_get_client_data(loader_ishtp_cl);
+
+ schedule_work(&client_data->work_ishtp_reset);
+
+ return 0;
+}
+
+static struct ishtp_cl_driver loader_ishtp_cl_driver = {
+ .name = "ish-loader",
+ .guid = &loader_ishtp_guid,
+ .probe = loader_ishtp_cl_probe,
+ .remove = loader_ishtp_cl_remove,
+ .reset = loader_ishtp_cl_reset,
+};
+
+static int __init ish_loader_init(void)
+{
+ return ishtp_cl_driver_register(&loader_ishtp_cl_driver, THIS_MODULE);
+}
+
+static void __exit ish_loader_exit(void)
+{
+ ishtp_cl_driver_unregister(&loader_ishtp_cl_driver);
+}
+
+late_initcall(ish_loader_init);
+module_exit(ish_loader_exit);
+
+module_param(dma_buf_size_limit, int, 0644);
+MODULE_PARM_DESC(dma_buf_size_limit, "Limit the DMA buf size to this value in bytes");
+
+MODULE_DESCRIPTION("ISH ISH-TP Host firmware Loader Client Driver");
+MODULE_AUTHOR("Rushikesh S Kadam <rushikesh.s.kadam@intel.com>");
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("ishtp:*");
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
index 30fe0c5e6fad..56777a43e69c 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
@@ -15,15 +15,16 @@
#include <linux/module.h>
#include <linux/hid.h>
+#include <linux/intel-ish-client-if.h>
#include <linux/sched.h>
-#include "ishtp/ishtp-dev.h"
-#include "ishtp/client.h"
#include "ishtp-hid.h"
/* Rx ring buffer pool size */
#define HID_CL_RX_RING_SIZE 32
#define HID_CL_TX_RING_SIZE 16
+#define cl_data_to_dev(client_data) ishtp_device(client_data->cl_device)
+
/**
* report_bad_packets() - Report bad packets
* @hid_ishtp_cl: Client instance to get stats
@@ -37,9 +38,9 @@ static void report_bad_packet(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
size_t cur_pos, size_t payload_len)
{
struct hostif_msg *recv_msg = recv_buf;
- struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
- dev_err(&client_data->cl_device->dev, "[hid-ish]: BAD packet %02X\n"
+ dev_err(cl_data_to_dev(client_data), "[hid-ish]: BAD packet %02X\n"
"total_bad=%u cur_pos=%u\n"
"[%02X %02X %02X %02X]\n"
"payload_len=%u\n"
@@ -69,13 +70,15 @@ static void process_recv(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
unsigned char *payload;
struct device_info *dev_info;
int i, j;
- size_t payload_len, total_len, cur_pos;
+ size_t payload_len, total_len, cur_pos, raw_len;
int report_type;
struct report_list *reports_list;
char *reports;
size_t report_len;
- struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
int curr_hid_dev = client_data->cur_hid_dev;
+ struct ishtp_hid_data *hid_data = NULL;
+ struct hid_device *hid = NULL;
payload = recv_buf + sizeof(struct hostif_msg_hdr);
total_len = data_len;
@@ -83,12 +86,12 @@ static void process_recv(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
do {
if (cur_pos + sizeof(struct hostif_msg) > total_len) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"[hid-ish]: error, received %u which is less than data header %u\n",
(unsigned int)data_len,
(unsigned int)sizeof(struct hostif_msg_hdr));
++client_data->bad_recv_cnt;
- ish_hw_reset(hid_ishtp_cl->dev);
+ ish_hw_reset(ishtp_get_ishtp_device(hid_ishtp_cl));
break;
}
@@ -101,7 +104,7 @@ static void process_recv(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
++client_data->bad_recv_cnt;
report_bad_packet(hid_ishtp_cl, recv_msg, cur_pos,
payload_len);
- ish_hw_reset(hid_ishtp_cl->dev);
+ ish_hw_reset(ishtp_get_ishtp_device(hid_ishtp_cl));
break;
}
@@ -116,18 +119,18 @@ static void process_recv(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
report_bad_packet(hid_ishtp_cl, recv_msg,
cur_pos,
payload_len);
- ish_hw_reset(hid_ishtp_cl->dev);
+ ish_hw_reset(ishtp_get_ishtp_device(hid_ishtp_cl));
break;
}
client_data->hid_dev_count = (unsigned int)*payload;
if (!client_data->hid_devices)
client_data->hid_devices = devm_kcalloc(
- &client_data->cl_device->dev,
+ cl_data_to_dev(client_data),
client_data->hid_dev_count,
sizeof(struct device_info),
GFP_KERNEL);
if (!client_data->hid_devices) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"Mem alloc failed for hid device info\n");
wake_up_interruptible(&client_data->init_wait);
break;
@@ -135,7 +138,7 @@ static void process_recv(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
for (i = 0; i < client_data->hid_dev_count; ++i) {
if (1 + sizeof(struct device_info) * i >=
payload_len) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"[hid-ish]: [ENUM_DEVICES]: content size %zu is bigger than payload_len %zu\n",
1 + sizeof(struct device_info)
* i, payload_len);
@@ -165,12 +168,12 @@ static void process_recv(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
report_bad_packet(hid_ishtp_cl, recv_msg,
cur_pos,
payload_len);
- ish_hw_reset(hid_ishtp_cl->dev);
+ ish_hw_reset(ishtp_get_ishtp_device(hid_ishtp_cl));
break;
}
if (!client_data->hid_descr[curr_hid_dev])
client_data->hid_descr[curr_hid_dev] =
- devm_kmalloc(&client_data->cl_device->dev,
+ devm_kmalloc(cl_data_to_dev(client_data),
payload_len, GFP_KERNEL);
if (client_data->hid_descr[curr_hid_dev]) {
memcpy(client_data->hid_descr[curr_hid_dev],
@@ -190,12 +193,12 @@ static void process_recv(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
report_bad_packet(hid_ishtp_cl, recv_msg,
cur_pos,
payload_len);
- ish_hw_reset(hid_ishtp_cl->dev);
+ ish_hw_reset(ishtp_get_ishtp_device(hid_ishtp_cl));
break;
}
if (!client_data->report_descr[curr_hid_dev])
client_data->report_descr[curr_hid_dev] =
- devm_kmalloc(&client_data->cl_device->dev,
+ devm_kmalloc(cl_data_to_dev(client_data),
payload_len, GFP_KERNEL);
if (client_data->report_descr[curr_hid_dev]) {
memcpy(client_data->report_descr[curr_hid_dev],
@@ -219,18 +222,31 @@ do_get_report:
/* Get index of device that matches this id */
for (i = 0; i < client_data->num_hid_devices; ++i) {
if (recv_msg->hdr.device_id ==
- client_data->hid_devices[i].dev_id)
- if (client_data->hid_sensor_hubs[i]) {
- hid_input_report(
- client_data->hid_sensor_hubs[
- i],
- report_type, payload,
- payload_len, 0);
- ishtp_hid_wakeup(
- client_data->hid_sensor_hubs[
- i]);
+ client_data->hid_devices[i].dev_id) {
+ hid = client_data->hid_sensor_hubs[i];
+ if (!hid)
break;
+
+ hid_data = hid->driver_data;
+ if (hid_data->raw_get_req) {
+ raw_len =
+ (hid_data->raw_buf_size <
+ payload_len) ?
+ hid_data->raw_buf_size :
+ payload_len;
+
+ memcpy(hid_data->raw_buf,
+ payload, raw_len);
+ } else {
+ hid_input_report
+ (hid, report_type,
+ payload, payload_len,
+ 0);
}
+
+ ishtp_hid_wakeup(hid);
+ break;
+ }
}
break;
@@ -295,7 +311,7 @@ do_get_report:
++client_data->bad_recv_cnt;
report_bad_packet(hid_ishtp_cl, recv_msg, cur_pos,
payload_len);
- ish_hw_reset(hid_ishtp_cl->dev);
+ ish_hw_reset(ishtp_get_ishtp_device(hid_ishtp_cl));
break;
}
@@ -475,7 +491,7 @@ int ishtp_hid_link_ready_wait(struct ishtp_cl_data *client_data)
static int ishtp_enum_enum_devices(struct ishtp_cl *hid_ishtp_cl)
{
struct hostif_msg msg;
- struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
int retry_count;
int rv;
@@ -501,18 +517,18 @@ static int ishtp_enum_enum_devices(struct ishtp_cl *hid_ishtp_cl)
sizeof(struct hostif_msg));
}
if (!client_data->enum_devices_done) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"[hid-ish]: timed out waiting for enum_devices\n");
return -ETIMEDOUT;
}
if (!client_data->hid_devices) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"[hid-ish]: failed to allocate HID dev structures\n");
return -ENOMEM;
}
client_data->num_hid_devices = client_data->hid_dev_count;
- dev_info(&hid_ishtp_cl->device->dev,
+ dev_info(ishtp_device(client_data->cl_device),
"[hid-ish]: enum_devices_done OK, num_hid_devices=%d\n",
client_data->num_hid_devices);
@@ -531,7 +547,7 @@ static int ishtp_enum_enum_devices(struct ishtp_cl *hid_ishtp_cl)
static int ishtp_get_hid_descriptor(struct ishtp_cl *hid_ishtp_cl, int index)
{
struct hostif_msg msg;
- struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
int rv;
/* Get HID descriptor */
@@ -549,13 +565,13 @@ static int ishtp_get_hid_descriptor(struct ishtp_cl *hid_ishtp_cl, int index)
client_data->hid_descr_done,
3 * HZ);
if (!client_data->hid_descr_done) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"[hid-ish]: timed out for hid_descr_done\n");
return -EIO;
}
if (!client_data->hid_descr[index]) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"[hid-ish]: allocation HID desc fail\n");
return -ENOMEM;
}
@@ -578,7 +594,7 @@ static int ishtp_get_report_descriptor(struct ishtp_cl *hid_ishtp_cl,
int index)
{
struct hostif_msg msg;
- struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
int rv;
/* Get report descriptor */
@@ -596,12 +612,12 @@ static int ishtp_get_report_descriptor(struct ishtp_cl *hid_ishtp_cl,
client_data->report_descr_done,
3 * HZ);
if (!client_data->report_descr_done) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"[hid-ish]: timed out for report descr\n");
return -EIO;
}
if (!client_data->report_descr[index]) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"[hid-ish]: failed to alloc report descr\n");
return -ENOMEM;
}
@@ -626,42 +642,42 @@ static int ishtp_get_report_descriptor(struct ishtp_cl *hid_ishtp_cl,
static int hid_ishtp_cl_init(struct ishtp_cl *hid_ishtp_cl, int reset)
{
struct ishtp_device *dev;
- struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
struct ishtp_fw_client *fw_client;
int i;
int rv;
- dev_dbg(&client_data->cl_device->dev, "%s\n", __func__);
+ dev_dbg(cl_data_to_dev(client_data), "%s\n", __func__);
hid_ishtp_trace(client_data, "%s reset flag: %d\n", __func__, reset);
- rv = ishtp_cl_link(hid_ishtp_cl, ISHTP_HOST_CLIENT_ID_ANY);
+ rv = ishtp_cl_link(hid_ishtp_cl);
if (rv) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"ishtp_cl_link failed\n");
return -ENOMEM;
}
client_data->init_done = 0;
- dev = hid_ishtp_cl->dev;
+ dev = ishtp_get_ishtp_device(hid_ishtp_cl);
/* Connect to FW client */
- hid_ishtp_cl->rx_ring_size = HID_CL_RX_RING_SIZE;
- hid_ishtp_cl->tx_ring_size = HID_CL_TX_RING_SIZE;
+ ishtp_set_tx_ring_size(hid_ishtp_cl, HID_CL_TX_RING_SIZE);
+ ishtp_set_rx_ring_size(hid_ishtp_cl, HID_CL_RX_RING_SIZE);
fw_client = ishtp_fw_cl_get_client(dev, &hid_ishtp_guid);
if (!fw_client) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"ish client uuid not found\n");
return -ENOENT;
}
-
- hid_ishtp_cl->fw_client_id = fw_client->client_id;
- hid_ishtp_cl->state = ISHTP_CL_CONNECTING;
+ ishtp_cl_set_fw_client_id(hid_ishtp_cl,
+ ishtp_get_fw_client_id(fw_client));
+ ishtp_set_connection_state(hid_ishtp_cl, ISHTP_CL_CONNECTING);
rv = ishtp_cl_connect(hid_ishtp_cl);
if (rv) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"client connect fail\n");
goto err_cl_unlink;
}
@@ -669,7 +685,7 @@ static int hid_ishtp_cl_init(struct ishtp_cl *hid_ishtp_cl, int reset)
hid_ishtp_trace(client_data, "%s client connected\n", __func__);
/* Register read callback */
- ishtp_register_event_cb(hid_ishtp_cl->device, ish_cl_event_cb);
+ ishtp_register_event_cb(client_data->cl_device, ish_cl_event_cb);
rv = ishtp_enum_enum_devices(hid_ishtp_cl);
if (rv)
@@ -692,7 +708,7 @@ static int hid_ishtp_cl_init(struct ishtp_cl *hid_ishtp_cl, int reset)
if (!reset) {
rv = ishtp_hid_probe(i, client_data);
if (rv) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"[hid-ish]: HID probe for #%u failed: %d\n",
i, rv);
goto err_cl_disconnect;
@@ -707,7 +723,7 @@ static int hid_ishtp_cl_init(struct ishtp_cl *hid_ishtp_cl, int reset)
return 0;
err_cl_disconnect:
- hid_ishtp_cl->state = ISHTP_CL_DISCONNECTING;
+ ishtp_set_connection_state(hid_ishtp_cl, ISHTP_CL_DISCONNECTING);
ishtp_cl_disconnect(hid_ishtp_cl);
err_cl_unlink:
ishtp_cl_unlink(hid_ishtp_cl);
@@ -744,16 +760,16 @@ static void hid_ishtp_cl_reset_handler(struct work_struct *work)
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
hid_ishtp_cl);
- dev_dbg(&cl_device->dev, "%s\n", __func__);
+ dev_dbg(ishtp_device(client_data->cl_device), "%s\n", __func__);
hid_ishtp_cl_deinit(hid_ishtp_cl);
- hid_ishtp_cl = ishtp_cl_allocate(cl_device->ishtp_dev);
+ hid_ishtp_cl = ishtp_cl_allocate(cl_device);
if (!hid_ishtp_cl)
return;
ishtp_set_drvdata(cl_device, hid_ishtp_cl);
- hid_ishtp_cl->client_data = client_data;
+ ishtp_set_client_data(hid_ishtp_cl, client_data);
client_data->hid_ishtp_cl = hid_ishtp_cl;
client_data->num_hid_devices = 0;
@@ -762,15 +778,17 @@ static void hid_ishtp_cl_reset_handler(struct work_struct *work)
rv = hid_ishtp_cl_init(hid_ishtp_cl, 1);
if (!rv)
break;
- dev_err(&client_data->cl_device->dev, "Retry reset init\n");
+ dev_err(cl_data_to_dev(client_data), "Retry reset init\n");
}
if (rv) {
- dev_err(&client_data->cl_device->dev, "Reset Failed\n");
+ dev_err(cl_data_to_dev(client_data), "Reset Failed\n");
hid_ishtp_trace(client_data, "%s Failed hid_ishtp_cl %p\n",
__func__, hid_ishtp_cl);
}
}
+void (*hid_print_trace)(void *unused, const char *format, ...);
+
/**
* hid_ishtp_cl_probe() - ISHTP client driver probe
* @cl_device: ISHTP client device instance
@@ -788,21 +806,18 @@ static int hid_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
if (!cl_device)
return -ENODEV;
- if (!guid_equal(&hid_ishtp_guid,
- &cl_device->fw_client->props.protocol_name))
- return -ENODEV;
-
- client_data = devm_kzalloc(&cl_device->dev, sizeof(*client_data),
+ client_data = devm_kzalloc(ishtp_device(cl_device),
+ sizeof(*client_data),
GFP_KERNEL);
if (!client_data)
return -ENOMEM;
- hid_ishtp_cl = ishtp_cl_allocate(cl_device->ishtp_dev);
+ hid_ishtp_cl = ishtp_cl_allocate(cl_device);
if (!hid_ishtp_cl)
return -ENOMEM;
ishtp_set_drvdata(cl_device, hid_ishtp_cl);
- hid_ishtp_cl->client_data = client_data;
+ ishtp_set_client_data(hid_ishtp_cl, client_data);
client_data->hid_ishtp_cl = hid_ishtp_cl;
client_data->cl_device = cl_device;
@@ -811,6 +826,8 @@ static int hid_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
INIT_WORK(&client_data->work, hid_ishtp_cl_reset_handler);
+ hid_print_trace = ishtp_trace_callback(cl_device);
+
rv = hid_ishtp_cl_init(hid_ishtp_cl, 0);
if (rv) {
ishtp_cl_free(hid_ishtp_cl);
@@ -832,13 +849,13 @@ static int hid_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
static int hid_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
{
struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
- struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
hid_ishtp_cl);
- dev_dbg(&cl_device->dev, "%s\n", __func__);
- hid_ishtp_cl->state = ISHTP_CL_DISCONNECTING;
+ dev_dbg(ishtp_device(cl_device), "%s\n", __func__);
+ ishtp_set_connection_state(hid_ishtp_cl, ISHTP_CL_DISCONNECTING);
ishtp_cl_disconnect(hid_ishtp_cl);
ishtp_put_device(cl_device);
ishtp_hid_remove(client_data);
@@ -862,7 +879,7 @@ static int hid_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
static int hid_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
{
struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
- struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
hid_ishtp_cl);
@@ -872,8 +889,6 @@ static int hid_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
return 0;
}
-#define to_ishtp_cl_device(d) container_of(d, struct ishtp_cl_device, dev)
-
/**
* hid_ishtp_cl_suspend() - ISHTP client driver suspend
* @device: device instance
@@ -884,9 +899,9 @@ static int hid_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
*/
static int hid_ishtp_cl_suspend(struct device *device)
{
- struct ishtp_cl_device *cl_device = to_ishtp_cl_device(device);
+ struct ishtp_cl_device *cl_device = dev_get_drvdata(device);
struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
- struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
hid_ishtp_cl);
@@ -905,9 +920,9 @@ static int hid_ishtp_cl_suspend(struct device *device)
*/
static int hid_ishtp_cl_resume(struct device *device)
{
- struct ishtp_cl_device *cl_device = to_ishtp_cl_device(device);
+ struct ishtp_cl_device *cl_device = dev_get_drvdata(device);
struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
- struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
hid_ishtp_cl);
@@ -922,6 +937,7 @@ static const struct dev_pm_ops hid_ishtp_pm_ops = {
static struct ishtp_cl_driver hid_ishtp_cl_driver = {
.name = "ish-hid",
+ .guid = &hid_ishtp_guid,
.probe = hid_ishtp_cl_probe,
.remove = hid_ishtp_cl_remove,
.reset = hid_ishtp_cl_reset,
@@ -933,7 +949,7 @@ static int __init ish_hid_init(void)
int rv;
/* Register ISHTP client device driver with ISHTP Bus */
- rv = ishtp_cl_driver_register(&hid_ishtp_cl_driver);
+ rv = ishtp_cl_driver_register(&hid_ishtp_cl_driver, THIS_MODULE);
return rv;
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.c b/drivers/hid/intel-ish-hid/ishtp-hid.c
index bc4c536f3c0d..62c03561adaa 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.c
@@ -14,8 +14,8 @@
*/
#include <linux/hid.h>
+#include <linux/intel-ish-client-if.h>
#include <uapi/linux/input.h>
-#include "ishtp/client.h"
#include "ishtp-hid.h"
/**
@@ -59,10 +59,46 @@ static void ishtp_hid_close(struct hid_device *hid)
{
}
-static int ishtp_raw_request(struct hid_device *hdev, unsigned char reportnum,
- __u8 *buf, size_t len, unsigned char rtype, int reqtype)
+static int ishtp_raw_request(struct hid_device *hid, unsigned char reportnum,
+ __u8 *buf, size_t len, unsigned char rtype,
+ int reqtype)
{
- return 0;
+ struct ishtp_hid_data *hid_data = hid->driver_data;
+ char *ishtp_buf = NULL;
+ size_t ishtp_buf_len;
+ unsigned int header_size = sizeof(struct hostif_msg);
+
+ if (rtype == HID_OUTPUT_REPORT)
+ return -EINVAL;
+
+ hid_data->request_done = false;
+ switch (reqtype) {
+ case HID_REQ_GET_REPORT:
+ hid_data->raw_buf = buf;
+ hid_data->raw_buf_size = len;
+ hid_data->raw_get_req = true;
+
+ hid_ishtp_get_report(hid, reportnum, rtype);
+ break;
+ case HID_REQ_SET_REPORT:
+ /*
+ * Spare 7 bytes for 64b accesses through
+ * get/put_unaligned_le64()
+ */
+ ishtp_buf_len = len + header_size;
+ ishtp_buf = kzalloc(ishtp_buf_len + 7, GFP_KERNEL);
+ if (!ishtp_buf)
+ return -ENOMEM;
+
+ memcpy(ishtp_buf + header_size, buf, len);
+ hid_ishtp_set_feature(hid, ishtp_buf, ishtp_buf_len, reportnum);
+ kfree(ishtp_buf);
+ break;
+ }
+
+ hid_hw_wait(hid);
+
+ return len;
}
/**
@@ -87,6 +123,7 @@ static void ishtp_hid_request(struct hid_device *hid, struct hid_report *rep,
hid_data->request_done = false;
switch (reqtype) {
case HID_REQ_GET_REPORT:
+ hid_data->raw_get_req = false;
hid_ishtp_get_report(hid, rep->id, rep->type);
break;
case HID_REQ_SET_REPORT:
@@ -116,7 +153,6 @@ static void ishtp_hid_request(struct hid_device *hid, struct hid_report *rep,
static int ishtp_wait_for_response(struct hid_device *hid)
{
struct ishtp_hid_data *hid_data = hid->driver_data;
- struct ishtp_cl_data *client_data = hid_data->client_data;
int rv;
hid_ishtp_trace(client_data, "%s hid %p\n", __func__, hid);
@@ -204,7 +240,8 @@ int ishtp_hid_probe(unsigned int cur_hid_dev,
hid->ll_driver = &ishtp_hid_ll_driver;
hid->bus = BUS_INTEL_ISHTP;
- hid->dev.parent = &client_data->cl_device->dev;
+ hid->dev.parent = ishtp_device(client_data->cl_device);
+
hid->version = le16_to_cpu(ISH_HID_VERSION);
hid->vendor = le16_to_cpu(client_data->hid_devices[cur_hid_dev].vid);
hid->product = le16_to_cpu(client_data->hid_devices[cur_hid_dev].pid);
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.h b/drivers/hid/intel-ish-hid/ishtp-hid.h
index 1cd07a441cd4..e27d3d6c1920 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid.h
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.h
@@ -24,9 +24,9 @@
#define IS_RESPONSE 0x80
/* Used to dump to Linux trace buffer, if enabled */
-#define hid_ishtp_trace(client, ...) \
- client->cl_device->ishtp_dev->print_log(\
- client->cl_device->ishtp_dev, __VA_ARGS__)
+extern void (*hid_print_trace)(void *unused, const char *format, ...);
+#define hid_ishtp_trace(client, ...) \
+ (hid_print_trace)(NULL, __VA_ARGS__)
/* ISH Transport protocol (ISHTP in short) GUID */
static const guid_t hid_ishtp_guid =
@@ -159,6 +159,9 @@ struct ishtp_cl_data {
* @client_data: Link to the client instance
* @hid_wait: Completion waitq
*
+ * @raw_get_req: Flag indicating raw get request ongoing
+ * @raw_buf: raw request buffer filled on receiving get report
+ * @raw_buf_size: raw request buffer size
* Used to tie hid hid->driver data to driver client instance
*/
struct ishtp_hid_data {
@@ -166,6 +169,11 @@ struct ishtp_hid_data {
bool request_done;
struct ishtp_cl_data *client_data;
wait_queue_head_t hid_wait;
+
+ /* raw request */
+ bool raw_get_req;
+ u8 *raw_buf;
+ size_t raw_buf_size;
};
/* Interface functions between HID LL driver and ISH TP client */
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index d5f4b6438d86..fb8ca12955b4 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -171,6 +171,19 @@ struct ishtp_fw_client *ishtp_fw_cl_get_client(struct ishtp_device *dev,
EXPORT_SYMBOL(ishtp_fw_cl_get_client);
/**
+ * ishtp_get_fw_client_id() - Get fw client id
+ *
+ * This interface is used to reset HW get FW client id.
+ *
+ * Return: firmware client id.
+ */
+int ishtp_get_fw_client_id(struct ishtp_fw_client *fw_client)
+{
+ return fw_client->client_id;
+}
+EXPORT_SYMBOL(ishtp_get_fw_client_id);
+
+/**
* ishtp_fw_cl_by_id() - return index to fw_clients for client_id
* @dev: the ishtp device structure
* @client_id: fw client id to search
@@ -220,6 +233,26 @@ static int ishtp_cl_device_probe(struct device *dev)
}
/**
+ * ishtp_cl_bus_match() - Bus match() callback
+ * @dev: the device structure
+ * @drv: the driver structure
+ *
+ * This is a bus match callback, called when a new ishtp_cl_device is
+ * registered during ishtp bus client enumeration. Use the guid_t in
+ * drv and dev to decide whether they match or not.
+ *
+ * Return: 1 if dev & drv matches, 0 otherwise.
+ */
+static int ishtp_cl_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
+ struct ishtp_cl_driver *driver = to_ishtp_cl_driver(drv);
+
+ return guid_equal(driver->guid,
+ &device->fw_client->props.protocol_name);
+}
+
+/**
* ishtp_cl_device_remove() - Bus remove() callback
* @dev: the device structure
*
@@ -372,6 +405,7 @@ static struct bus_type ishtp_cl_bus_type = {
.name = "ishtp",
.dev_groups = ishtp_cl_dev_groups,
.probe = ishtp_cl_device_probe,
+ .match = ishtp_cl_bus_match,
.remove = ishtp_cl_device_remove,
.pm = &ishtp_cl_bus_dev_pm_ops,
.uevent = ishtp_cl_uevent,
@@ -445,6 +479,7 @@ static struct ishtp_cl_device *ishtp_bus_add_device(struct ishtp_device *dev,
}
ishtp_device_ready = true;
+ dev_set_drvdata(&device->dev, device);
return device;
}
@@ -464,7 +499,7 @@ static void ishtp_bus_remove_device(struct ishtp_cl_device *device)
}
/**
- * __ishtp_cl_driver_register() - Client driver register
+ * ishtp_cl_driver_register() - Client driver register
* @driver: the client driver instance
* @owner: Owner of this driver module
*
@@ -473,8 +508,8 @@ static void ishtp_bus_remove_device(struct ishtp_cl_device *device)
*
* Return: Return value of driver_register or -ENODEV if not ready
*/
-int __ishtp_cl_driver_register(struct ishtp_cl_driver *driver,
- struct module *owner)
+int ishtp_cl_driver_register(struct ishtp_cl_driver *driver,
+ struct module *owner)
{
int err;
@@ -491,7 +526,7 @@ int __ishtp_cl_driver_register(struct ishtp_cl_driver *driver,
return 0;
}
-EXPORT_SYMBOL(__ishtp_cl_driver_register);
+EXPORT_SYMBOL(ishtp_cl_driver_register);
/**
* ishtp_cl_driver_unregister() - Client driver unregister
@@ -807,6 +842,59 @@ int ishtp_use_dma_transfer(void)
}
/**
+ * ishtp_device() - Return device pointer
+ *
+ * This interface is used to return device pointer from ishtp_cl_device
+ * instance.
+ *
+ * Return: device *.
+ */
+struct device *ishtp_device(struct ishtp_cl_device *device)
+{
+ return &device->dev;
+}
+EXPORT_SYMBOL(ishtp_device);
+
+/**
+ * ishtp_get_pci_device() - Return PCI device dev pointer
+ * This interface is used to return PCI device pointer
+ * from ishtp_cl_device instance.
+ *
+ * Return: device *.
+ */
+struct device *ishtp_get_pci_device(struct ishtp_cl_device *device)
+{
+ return device->ishtp_dev->devc;
+}
+EXPORT_SYMBOL(ishtp_get_pci_device);
+
+/**
+ * ishtp_trace_callback() - Return trace callback
+ *
+ * This interface is used to return trace callback function pointer.
+ *
+ * Return: void *.
+ */
+void *ishtp_trace_callback(struct ishtp_cl_device *cl_device)
+{
+ return cl_device->ishtp_dev->print_log;
+}
+EXPORT_SYMBOL(ishtp_trace_callback);
+
+/**
+ * ish_hw_reset() - Call HW reset IPC callback
+ *
+ * This interface is used to reset HW in case of error.
+ *
+ * Return: value from IPC hw_reset callback
+ */
+int ish_hw_reset(struct ishtp_device *dev)
+{
+ return dev->ops->hw_reset(dev);
+}
+EXPORT_SYMBOL(ish_hw_reset);
+
+/**
* ishtp_bus_register() - Function to register bus
*
* This register ishtp bus
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.h b/drivers/hid/intel-ish-hid/ishtp/bus.h
index 4cf7ad586c37..93d516f5a853 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.h
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.h
@@ -17,6 +17,7 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
+#include <linux/intel-ish-client-if.h>
struct ishtp_cl;
struct ishtp_cl_device;
@@ -52,25 +53,6 @@ struct ishtp_cl_device {
void (*event_cb)(struct ishtp_cl_device *device);
};
-/**
- * struct ishtp_cl_device - ISHTP device handle
- * @driver: driver instance on a bus
- * @name: Name of the device for probe
- * @probe: driver callback for device probe
- * @remove: driver callback on device removal
- *
- * Client drivers defines to get probed/removed for ISHTP client device.
- */
-struct ishtp_cl_driver {
- struct device_driver driver;
- const char *name;
- int (*probe)(struct ishtp_cl_device *dev);
- int (*remove)(struct ishtp_cl_device *dev);
- int (*reset)(struct ishtp_cl_device *dev);
- const struct dev_pm_ops *pm;
-};
-
-
int ishtp_bus_new_client(struct ishtp_device *dev);
void ishtp_remove_all_clients(struct ishtp_device *dev);
int ishtp_cl_device_bind(struct ishtp_cl *cl);
@@ -98,22 +80,5 @@ void ishtp_recv(struct ishtp_device *dev);
void ishtp_reset_handler(struct ishtp_device *dev);
void ishtp_reset_compl_handler(struct ishtp_device *dev);
-void ishtp_put_device(struct ishtp_cl_device *);
-void ishtp_get_device(struct ishtp_cl_device *);
-
-void ishtp_set_drvdata(struct ishtp_cl_device *cl_device, void *data);
-void *ishtp_get_drvdata(struct ishtp_cl_device *cl_device);
-
-int __ishtp_cl_driver_register(struct ishtp_cl_driver *driver,
- struct module *owner);
-#define ishtp_cl_driver_register(driver) \
- __ishtp_cl_driver_register(driver, THIS_MODULE)
-void ishtp_cl_driver_unregister(struct ishtp_cl_driver *driver);
-
-int ishtp_register_event_cb(struct ishtp_cl_device *device,
- void (*read_cb)(struct ishtp_cl_device *));
int ishtp_fw_cl_by_uuid(struct ishtp_device *dev, const guid_t *cuuid);
-struct ishtp_fw_client *ishtp_fw_cl_get_client(struct ishtp_device *dev,
- const guid_t *uuid);
-
#endif /* _LINUX_ISHTP_CL_BUS_H */
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c
index faeccdb1475b..b7ac5e3b1e82 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client.c
+++ b/drivers/hid/intel-ish-hid/ishtp/client.c
@@ -126,7 +126,7 @@ static void ishtp_cl_init(struct ishtp_cl *cl, struct ishtp_device *dev)
*
* Return: The allocated client instance or NULL on failure
*/
-struct ishtp_cl *ishtp_cl_allocate(struct ishtp_device *dev)
+struct ishtp_cl *ishtp_cl_allocate(struct ishtp_cl_device *cl_device)
{
struct ishtp_cl *cl;
@@ -134,7 +134,7 @@ struct ishtp_cl *ishtp_cl_allocate(struct ishtp_device *dev)
if (!cl)
return NULL;
- ishtp_cl_init(cl, dev);
+ ishtp_cl_init(cl, cl_device->ishtp_dev);
return cl;
}
EXPORT_SYMBOL(ishtp_cl_allocate);
@@ -168,9 +168,6 @@ EXPORT_SYMBOL(ishtp_cl_free);
/**
* ishtp_cl_link() - Reserve a host id and link the client instance
* @cl: client device instance
- * @id: host client id to use. It can be ISHTP_HOST_CLIENT_ID_ANY if any
- * id from the available can be used
- *
*
* This allocates a single bit in the hostmap. This function will make sure
* that not many client sessions are opened at the same time. Once allocated
@@ -179,11 +176,11 @@ EXPORT_SYMBOL(ishtp_cl_free);
*
* Return: 0 or error code on failure
*/
-int ishtp_cl_link(struct ishtp_cl *cl, int id)
+int ishtp_cl_link(struct ishtp_cl *cl)
{
struct ishtp_device *dev;
- unsigned long flags, flags_cl;
- int ret = 0;
+ unsigned long flags, flags_cl;
+ int id, ret = 0;
if (WARN_ON(!cl || !cl->dev))
return -EINVAL;
@@ -197,10 +194,7 @@ int ishtp_cl_link(struct ishtp_cl *cl, int id)
goto unlock_dev;
}
- /* If Id is not assigned get one*/
- if (id == ISHTP_HOST_CLIENT_ID_ANY)
- id = find_first_zero_bit(dev->host_clients_map,
- ISHTP_CLIENTS_MAX);
+ id = find_first_zero_bit(dev->host_clients_map, ISHTP_CLIENTS_MAX);
if (id >= ISHTP_CLIENTS_MAX) {
spin_unlock_irqrestore(&dev->device_lock, flags);
@@ -1069,3 +1063,45 @@ void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
eoi:
return;
}
+
+void *ishtp_get_client_data(struct ishtp_cl *cl)
+{
+ return cl->client_data;
+}
+EXPORT_SYMBOL(ishtp_get_client_data);
+
+void ishtp_set_client_data(struct ishtp_cl *cl, void *data)
+{
+ cl->client_data = data;
+}
+EXPORT_SYMBOL(ishtp_set_client_data);
+
+struct ishtp_device *ishtp_get_ishtp_device(struct ishtp_cl *cl)
+{
+ return cl->dev;
+}
+EXPORT_SYMBOL(ishtp_get_ishtp_device);
+
+void ishtp_set_tx_ring_size(struct ishtp_cl *cl, int size)
+{
+ cl->tx_ring_size = size;
+}
+EXPORT_SYMBOL(ishtp_set_tx_ring_size);
+
+void ishtp_set_rx_ring_size(struct ishtp_cl *cl, int size)
+{
+ cl->rx_ring_size = size;
+}
+EXPORT_SYMBOL(ishtp_set_rx_ring_size);
+
+void ishtp_set_connection_state(struct ishtp_cl *cl, int state)
+{
+ cl->state = state;
+}
+EXPORT_SYMBOL(ishtp_set_connection_state);
+
+void ishtp_cl_set_fw_client_id(struct ishtp_cl *cl, int fw_client_id)
+{
+ cl->fw_client_id = fw_client_id;
+}
+EXPORT_SYMBOL(ishtp_cl_set_fw_client_id);
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.h b/drivers/hid/intel-ish-hid/ishtp/client.h
index e0df3eb611e6..6ed00947d6bc 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client.h
+++ b/drivers/hid/intel-ish-hid/ishtp/client.h
@@ -19,15 +19,6 @@
#include <linux/types.h>
#include "ishtp-dev.h"
-/* Client state */
-enum cl_state {
- ISHTP_CL_INITIALIZING = 0,
- ISHTP_CL_CONNECTING,
- ISHTP_CL_CONNECTED,
- ISHTP_CL_DISCONNECTING,
- ISHTP_CL_DISCONNECTED
-};
-
/* Tx and Rx ring size */
#define CL_DEF_RX_RING_SIZE 2
#define CL_DEF_TX_RING_SIZE 2
@@ -169,19 +160,4 @@ static inline bool ishtp_cl_cmp_id(const struct ishtp_cl *cl1,
(cl1->fw_client_id == cl2->fw_client_id);
}
-/* exported functions from ISHTP under client management scope */
-struct ishtp_cl *ishtp_cl_allocate(struct ishtp_device *dev);
-void ishtp_cl_free(struct ishtp_cl *cl);
-int ishtp_cl_link(struct ishtp_cl *cl, int id);
-void ishtp_cl_unlink(struct ishtp_cl *cl);
-int ishtp_cl_disconnect(struct ishtp_cl *cl);
-int ishtp_cl_connect(struct ishtp_cl *cl);
-int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length);
-int ishtp_cl_flush_queues(struct ishtp_cl *cl);
-
-/* exported functions from ISHTP client buffer management scope */
-int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb);
-bool ishtp_cl_tx_empty(struct ishtp_cl *cl);
-struct ishtp_cl_rb *ishtp_cl_rx_get_rb(struct ishtp_cl *cl);
-
#endif /* _ISHTP_CLIENT_H_ */
diff --git a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
index e54ce1ef27dd..3cfef084b9fc 100644
--- a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
+++ b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
@@ -79,32 +79,6 @@ struct ishtp_fw_client {
uint8_t client_id;
};
-/**
- * struct ishtp_msg_data - ISHTP message data struct
- * @size: Size of data in the *data
- * @data: Pointer to data
- */
-struct ishtp_msg_data {
- uint32_t size;
- unsigned char *data;
-};
-
-/*
- * struct ishtp_cl_rb - request block structure
- * @list: Link to list members
- * @cl: ISHTP client instance
- * @buffer: message header
- * @buf_idx: Index into buffer
- * @read_time: unused at this time
- */
-struct ishtp_cl_rb {
- struct list_head list;
- struct ishtp_cl *cl;
- struct ishtp_msg_data buffer;
- unsigned long buf_idx;
- unsigned long read_time;
-};
-
/*
* Control info for IPC messages ISHTP/IPC sending FIFO -
* list with inline data buffer
@@ -264,11 +238,6 @@ static inline int ish_ipc_reset(struct ishtp_device *dev)
return dev->ops->ipc_reset(dev);
}
-static inline int ish_hw_reset(struct ishtp_device *dev)
-{
- return dev->ops->hw_reset(dev);
-}
-
/* Exported function */
void ishtp_device_init(struct ishtp_device *dev);
int ishtp_start(struct ishtp_device *dev);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 6f929bfa9fcd..1915a18b537b 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -17,7 +17,7 @@ menuconfig HWMON
To find out which specific driver(s) you need, use the
sensors-detect script from the lm_sensors package. Read
- <file:Documentation/hwmon/userspace-tools> for details.
+ <file:Documentation/hwmon/userspace-tools.rst> for details.
This support can also be built as a module. If so, the module
will be called hwmon.
@@ -59,7 +59,7 @@ config SENSORS_ABITUGURU
chip can be found on Abit uGuru featuring motherboards (most modern
Abit motherboards from before end 2005). For more info and a list
of which motherboards have which revision see
- Documentation/hwmon/abituguru
+ Documentation/hwmon/abituguru.rst
This driver can also be built as a module. If so, the module
will be called abituguru.
@@ -73,7 +73,7 @@ config SENSORS_ABITUGURU3
and their settings is supported. The third revision of the Abit
uGuru chip can be found on recent Abit motherboards (since end
2005). For more info and a list of which motherboards have which
- revision see Documentation/hwmon/abituguru3
+ revision see Documentation/hwmon/abituguru3.rst
This driver can also be built as a module. If so, the module
will be called abituguru3.
@@ -643,7 +643,7 @@ config SENSORS_CORETEMP
help
If you say yes here you get support for the temperature
sensor inside your CPU. Most of the family 6 CPUs
- are supported. Check Documentation/hwmon/coretemp for details.
+ are supported. Check Documentation/hwmon/coretemp.rst for details.
config SENSORS_IT87
tristate "ITE IT87xx and compatibles"
@@ -705,6 +705,16 @@ config SENSORS_LINEAGE
This driver can also be built as a module. If so, the module
will be called lineage-pem.
+config SENSORS_LOCHNAGAR
+ tristate "Lochnagar Hardware Monitor"
+ depends on MFD_LOCHNAGAR
+ help
+ If you say yes here you get support for Lochnagar 2 temperature,
+ voltage and current sensors abilities.
+
+ This driver can also be built as a module. If so, the module
+ will be called lochnagar-hwmon.
+
config SENSORS_LTC2945
tristate "Linear Technology LTC2945"
depends on I2C
@@ -898,6 +908,7 @@ config SENSORS_MAX6642
config SENSORS_MAX6650
tristate "Maxim MAX6650 sensor chip"
depends on I2C
+ depends on THERMAL || THERMAL=n
help
If you say yes here you get support for the MAX6650 / MAX6651
sensor chips.
@@ -1759,6 +1770,7 @@ config SENSORS_VT8231
config SENSORS_W83773G
tristate "Nuvoton W83773G"
depends on I2C
+ select REGMAP_I2C
help
If you say yes here you get support for the Nuvoton W83773G hardware
monitoring chip.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index f5c7b442e69e..8db472ea04f0 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -89,6 +89,7 @@ obj-$(CONFIG_SENSORS_JZ4740) += jz4740-hwmon.o
obj-$(CONFIG_SENSORS_K8TEMP) += k8temp.o
obj-$(CONFIG_SENSORS_K10TEMP) += k10temp.o
obj-$(CONFIG_SENSORS_LINEAGE) += lineage-pem.o
+obj-$(CONFIG_SENSORS_LOCHNAGAR) += lochnagar-hwmon.o
obj-$(CONFIG_SENSORS_LM63) += lm63.o
obj-$(CONFIG_SENSORS_LM70) += lm70.o
obj-$(CONFIG_SENSORS_LM73) += lm73.o
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
index f13806d731fa..b176da8e92a7 100644
--- a/drivers/hwmon/ad7414.c
+++ b/drivers/hwmon/ad7414.c
@@ -215,7 +215,7 @@ static const struct i2c_device_id ad7414_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ad7414_id);
-static const struct of_device_id ad7414_of_match[] = {
+static const struct of_device_id __maybe_unused ad7414_of_match[] = {
{ .compatible = "ad,ad7414" },
{ },
};
diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c
index ca794bf904de..e640be442dae 100644
--- a/drivers/hwmon/adc128d818.c
+++ b/drivers/hwmon/adc128d818.c
@@ -521,7 +521,7 @@ static const struct i2c_device_id adc128_id[] = {
};
MODULE_DEVICE_TABLE(i2c, adc128_id);
-static const struct of_device_id adc128_of_match[] = {
+static const struct of_device_id __maybe_unused adc128_of_match[] = {
{ .compatible = "ti,adc128d818" },
{ },
};
diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c
index 1e4dad36f5ef..ec437cc9e739 100644
--- a/drivers/hwmon/adm1025.c
+++ b/drivers/hwmon/adm1025.c
@@ -174,7 +174,7 @@ static struct adm1025_data *adm1025_update_device(struct device *dev)
*/
static ssize_t
-show_in(struct device *dev, struct device_attribute *attr, char *buf)
+in_show(struct device *dev, struct device_attribute *attr, char *buf)
{
int index = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = adm1025_update_device(dev);
@@ -183,7 +183,7 @@ show_in(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-show_in_min(struct device *dev, struct device_attribute *attr, char *buf)
+in_min_show(struct device *dev, struct device_attribute *attr, char *buf)
{
int index = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = adm1025_update_device(dev);
@@ -192,7 +192,7 @@ show_in_min(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-show_in_max(struct device *dev, struct device_attribute *attr, char *buf)
+in_max_show(struct device *dev, struct device_attribute *attr, char *buf)
{
int index = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = adm1025_update_device(dev);
@@ -201,7 +201,7 @@ show_in_max(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-show_temp(struct device *dev, struct device_attribute *attr, char *buf)
+temp_show(struct device *dev, struct device_attribute *attr, char *buf)
{
int index = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = adm1025_update_device(dev);
@@ -209,7 +209,7 @@ show_temp(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-show_temp_min(struct device *dev, struct device_attribute *attr, char *buf)
+temp_min_show(struct device *dev, struct device_attribute *attr, char *buf)
{
int index = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = adm1025_update_device(dev);
@@ -217,15 +217,15 @@ show_temp_min(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-show_temp_max(struct device *dev, struct device_attribute *attr, char *buf)
+temp_max_show(struct device *dev, struct device_attribute *attr, char *buf)
{
int index = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = adm1025_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[index]));
}
-static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_min_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
int index = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = dev_get_drvdata(dev);
@@ -245,8 +245,8 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_max_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
int index = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = dev_get_drvdata(dev);
@@ -266,22 +266,28 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
return count;
}
-#define set_in(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
- show_in, NULL, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IWUSR | S_IRUGO, \
- show_in_min, set_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IWUSR | S_IRUGO, \
- show_in_max, set_in_max, offset)
-set_in(0);
-set_in(1);
-set_in(2);
-set_in(3);
-set_in(4);
-set_in(5);
-
-static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_min, in_min, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_max, in_max, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
+static SENSOR_DEVICE_ATTR_RO(in5_input, in, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_min, in_min, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_max, in_max, 5);
+
+static ssize_t temp_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
int index = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = dev_get_drvdata(dev);
@@ -301,8 +307,9 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
int index = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = dev_get_drvdata(dev);
@@ -322,15 +329,12 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
return count;
}
-#define set_temp(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \
- show_temp, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IWUSR | S_IRUGO, \
- show_temp_min, set_temp_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IWUSR | S_IRUGO, \
- show_temp_max, set_temp_max, offset - 1)
-set_temp(1);
-set_temp(2);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_min, temp_min, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_min, temp_min, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
static ssize_t
alarms_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -341,21 +345,21 @@ alarms_show(struct device *dev, struct device_attribute *attr, char *buf)
static DEVICE_ATTR_RO(alarms);
static ssize_t
-show_alarm(struct device *dev, struct device_attribute *attr, char *buf)
+alarm_show(struct device *dev, struct device_attribute *attr, char *buf)
{
int bitnr = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = adm1025_update_device(dev);
return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
}
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 9);
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, show_alarm, NULL, 14);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(in5_alarm, alarm, 9);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 5);
+static SENSOR_DEVICE_ATTR_RO(temp2_alarm, alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(temp1_fault, alarm, 14);
static ssize_t
cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf)
diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
index e43f09a07cd0..d34a68a11036 100644
--- a/drivers/hwmon/adm1026.c
+++ b/drivers/hwmon/adm1026.c
@@ -477,24 +477,24 @@ static struct adm1026_data *adm1026_update_device(struct device *dev)
return data;
}
-static ssize_t show_in(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", INS_FROM_REG(nr, data->in[nr]));
}
-static ssize_t show_in_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", INS_FROM_REG(nr, data->in_min[nr]));
}
-static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_min_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -513,16 +513,16 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t show_in_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_max_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", INS_FROM_REG(nr, data->in_max[nr]));
}
-static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_max_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -542,48 +542,72 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
return count;
}
-#define in_reg(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, show_in, \
- NULL, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
- show_in_min, set_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
- show_in_max, set_in_max, offset);
-
-
-in_reg(0);
-in_reg(1);
-in_reg(2);
-in_reg(3);
-in_reg(4);
-in_reg(5);
-in_reg(6);
-in_reg(7);
-in_reg(8);
-in_reg(9);
-in_reg(10);
-in_reg(11);
-in_reg(12);
-in_reg(13);
-in_reg(14);
-in_reg(15);
-
-static ssize_t show_in16(struct device *dev, struct device_attribute *attr,
+static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_min, in_min, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_max, in_max, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
+static SENSOR_DEVICE_ATTR_RO(in5_input, in, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_min, in_min, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_max, in_max, 5);
+static SENSOR_DEVICE_ATTR_RO(in6_input, in, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_min, in_min, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_max, in_max, 6);
+static SENSOR_DEVICE_ATTR_RO(in7_input, in, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_min, in_min, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_max, in_max, 7);
+static SENSOR_DEVICE_ATTR_RO(in8_input, in, 8);
+static SENSOR_DEVICE_ATTR_RW(in8_min, in_min, 8);
+static SENSOR_DEVICE_ATTR_RW(in8_max, in_max, 8);
+static SENSOR_DEVICE_ATTR_RO(in9_input, in, 9);
+static SENSOR_DEVICE_ATTR_RW(in9_min, in_min, 9);
+static SENSOR_DEVICE_ATTR_RW(in9_max, in_max, 9);
+static SENSOR_DEVICE_ATTR_RO(in10_input, in, 10);
+static SENSOR_DEVICE_ATTR_RW(in10_min, in_min, 10);
+static SENSOR_DEVICE_ATTR_RW(in10_max, in_max, 10);
+static SENSOR_DEVICE_ATTR_RO(in11_input, in, 11);
+static SENSOR_DEVICE_ATTR_RW(in11_min, in_min, 11);
+static SENSOR_DEVICE_ATTR_RW(in11_max, in_max, 11);
+static SENSOR_DEVICE_ATTR_RO(in12_input, in, 12);
+static SENSOR_DEVICE_ATTR_RW(in12_min, in_min, 12);
+static SENSOR_DEVICE_ATTR_RW(in12_max, in_max, 12);
+static SENSOR_DEVICE_ATTR_RO(in13_input, in, 13);
+static SENSOR_DEVICE_ATTR_RW(in13_min, in_min, 13);
+static SENSOR_DEVICE_ATTR_RW(in13_max, in_max, 13);
+static SENSOR_DEVICE_ATTR_RO(in14_input, in, 14);
+static SENSOR_DEVICE_ATTR_RW(in14_min, in_min, 14);
+static SENSOR_DEVICE_ATTR_RW(in14_max, in_max, 14);
+static SENSOR_DEVICE_ATTR_RO(in15_input, in, 15);
+static SENSOR_DEVICE_ATTR_RW(in15_min, in_min, 15);
+static SENSOR_DEVICE_ATTR_RW(in15_max, in_max, 15);
+
+static ssize_t in16_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", INS_FROM_REG(16, data->in[16]) -
NEG12_OFFSET);
}
-static ssize_t show_in16_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in16_min_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", INS_FROM_REG(16, data->in_min[16])
- NEG12_OFFSET);
}
-static ssize_t set_in16_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in16_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -603,15 +627,16 @@ static ssize_t set_in16_min(struct device *dev, struct device_attribute *attr,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t show_in16_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in16_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", INS_FROM_REG(16, data->in_max[16])
- NEG12_OFFSET);
}
-static ssize_t set_in16_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in16_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -632,17 +657,14 @@ static ssize_t set_in16_max(struct device *dev, struct device_attribute *attr,
return count;
}
-static SENSOR_DEVICE_ATTR(in16_input, S_IRUGO, show_in16, NULL, 16);
-static SENSOR_DEVICE_ATTR(in16_min, S_IRUGO | S_IWUSR, show_in16_min,
- set_in16_min, 16);
-static SENSOR_DEVICE_ATTR(in16_max, S_IRUGO | S_IWUSR, show_in16_max,
- set_in16_max, 16);
-
+static SENSOR_DEVICE_ATTR_RO(in16_input, in16, 16);
+static SENSOR_DEVICE_ATTR_RW(in16_min, in16_min, 16);
+static SENSOR_DEVICE_ATTR_RW(in16_max, in16_max, 16);
/* Now add fan read/write functions */
-static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t fan_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -650,8 +672,8 @@ static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[nr],
data->fan_div[nr]));
}
-static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t fan_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -659,8 +681,9 @@ static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_min[nr],
data->fan_div[nr]));
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -681,20 +704,22 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
return count;
}
-#define fan_offset(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, show_fan, NULL, \
- offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, set_fan_min, offset - 1);
-
-fan_offset(1);
-fan_offset(2);
-fan_offset(3);
-fan_offset(4);
-fan_offset(5);
-fan_offset(6);
-fan_offset(7);
-fan_offset(8);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RO(fan3_input, fan, 2);
+static SENSOR_DEVICE_ATTR_RW(fan3_min, fan_min, 2);
+static SENSOR_DEVICE_ATTR_RO(fan4_input, fan, 3);
+static SENSOR_DEVICE_ATTR_RW(fan4_min, fan_min, 3);
+static SENSOR_DEVICE_ATTR_RO(fan5_input, fan, 4);
+static SENSOR_DEVICE_ATTR_RW(fan5_min, fan_min, 4);
+static SENSOR_DEVICE_ATTR_RO(fan6_input, fan, 5);
+static SENSOR_DEVICE_ATTR_RW(fan6_min, fan_min, 5);
+static SENSOR_DEVICE_ATTR_RO(fan7_input, fan, 6);
+static SENSOR_DEVICE_ATTR_RW(fan7_min, fan_min, 6);
+static SENSOR_DEVICE_ATTR_RO(fan8_input, fan, 7);
+static SENSOR_DEVICE_ATTR_RW(fan8_min, fan_min, 7);
/* Adjust fan_min to account for new fan divisor */
static void fixup_fan_min(struct device *dev, int fan, int old_div)
@@ -715,16 +740,17 @@ static void fixup_fan_min(struct device *dev, int fan, int old_div)
}
/* Now add fan_div read/write functions */
-static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t fan_div_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", data->fan_div[nr]);
}
-static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_div_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -765,38 +791,35 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
return count;
}
-#define fan_offset_div(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \
- show_fan_div, set_fan_div, offset - 1);
-
-fan_offset_div(1);
-fan_offset_div(2);
-fan_offset_div(3);
-fan_offset_div(4);
-fan_offset_div(5);
-fan_offset_div(6);
-fan_offset_div(7);
-fan_offset_div(8);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
+static SENSOR_DEVICE_ATTR_RW(fan3_div, fan_div, 2);
+static SENSOR_DEVICE_ATTR_RW(fan4_div, fan_div, 3);
+static SENSOR_DEVICE_ATTR_RW(fan5_div, fan_div, 4);
+static SENSOR_DEVICE_ATTR_RW(fan6_div, fan_div, 5);
+static SENSOR_DEVICE_ATTR_RW(fan7_div, fan_div, 6);
+static SENSOR_DEVICE_ATTR_RW(fan8_div, fan_div, 7);
/* Temps */
-static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr]));
}
-static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_min_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_min[nr]));
}
-static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -816,16 +839,17 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[nr]));
}
-static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -846,30 +870,27 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
return count;
}
-#define temp_reg(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, show_temp, \
- NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IRUGO | S_IWUSR, \
- show_temp_min, set_temp_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \
- show_temp_max, set_temp_max, offset - 1);
-
-
-temp_reg(1);
-temp_reg(2);
-temp_reg(3);
-
-static ssize_t show_temp_offset(struct device *dev,
- struct device_attribute *attr, char *buf)
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_min, temp_min, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_min, temp_min, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_min, temp_min, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_max, 2);
+
+static ssize_t temp_offset_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_offset[nr]));
}
-static ssize_t set_temp_offset(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
+static ssize_t temp_offset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -890,16 +911,13 @@ static ssize_t set_temp_offset(struct device *dev,
return count;
}
-#define temp_offset_reg(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_offset, S_IRUGO | S_IWUSR, \
- show_temp_offset, set_temp_offset, offset - 1);
-
-temp_offset_reg(1);
-temp_offset_reg(2);
-temp_offset_reg(3);
+static SENSOR_DEVICE_ATTR_RW(temp1_offset, temp_offset, 0);
+static SENSOR_DEVICE_ATTR_RW(temp2_offset, temp_offset, 1);
+static SENSOR_DEVICE_ATTR_RW(temp3_offset, temp_offset, 2);
-static ssize_t show_temp_auto_point1_temp_hyst(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp_auto_point1_temp_hyst_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -907,8 +925,9 @@ static ssize_t show_temp_auto_point1_temp_hyst(struct device *dev,
return sprintf(buf, "%d\n", TEMP_FROM_REG(
ADM1026_FAN_ACTIVATION_TEMP_HYST + data->temp_tmin[nr]));
}
-static ssize_t show_temp_auto_point2_temp(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp_auto_point2_temp_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -916,16 +935,18 @@ static ssize_t show_temp_auto_point2_temp(struct device *dev,
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_tmin[nr] +
ADM1026_FAN_CONTROL_TEMP_RANGE));
}
-static ssize_t show_temp_auto_point1_temp(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp_auto_point1_temp_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_tmin[nr]));
}
-static ssize_t set_temp_auto_point1_temp(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t temp_auto_point1_temp_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -946,18 +967,18 @@ static ssize_t set_temp_auto_point1_temp(struct device *dev,
return count;
}
-#define temp_auto_point(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_auto_point1_temp, \
- S_IRUGO | S_IWUSR, show_temp_auto_point1_temp, \
- set_temp_auto_point1_temp, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_auto_point1_temp_hyst, S_IRUGO,\
- show_temp_auto_point1_temp_hyst, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_auto_point2_temp, S_IRUGO, \
- show_temp_auto_point2_temp, NULL, offset - 1);
-
-temp_auto_point(1);
-temp_auto_point(2);
-temp_auto_point(3);
+static SENSOR_DEVICE_ATTR_RW(temp1_auto_point1_temp, temp_auto_point1_temp, 0);
+static SENSOR_DEVICE_ATTR_RO(temp1_auto_point1_temp_hyst,
+ temp_auto_point1_temp_hyst, 0);
+static SENSOR_DEVICE_ATTR_RO(temp1_auto_point2_temp, temp_auto_point2_temp, 0);
+static SENSOR_DEVICE_ATTR_RW(temp2_auto_point1_temp, temp_auto_point1_temp, 1);
+static SENSOR_DEVICE_ATTR_RO(temp2_auto_point1_temp_hyst,
+ temp_auto_point1_temp_hyst, 1);
+static SENSOR_DEVICE_ATTR_RO(temp2_auto_point2_temp, temp_auto_point2_temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp3_auto_point1_temp, temp_auto_point1_temp, 2);
+static SENSOR_DEVICE_ATTR_RO(temp3_auto_point1_temp_hyst,
+ temp_auto_point1_temp_hyst, 2);
+static SENSOR_DEVICE_ATTR_RO(temp3_auto_point2_temp, temp_auto_point2_temp, 2);
static ssize_t show_temp_crit_enable(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -988,24 +1009,24 @@ static ssize_t set_temp_crit_enable(struct device *dev,
return count;
}
-#define temp_crit_enable(offset) \
-static DEVICE_ATTR(temp##offset##_crit_enable, S_IRUGO | S_IWUSR, \
- show_temp_crit_enable, set_temp_crit_enable);
+static DEVICE_ATTR(temp1_crit_enable, 0644, show_temp_crit_enable,
+ set_temp_crit_enable);
+static DEVICE_ATTR(temp2_crit_enable, 0644, show_temp_crit_enable,
+ set_temp_crit_enable);
+static DEVICE_ATTR(temp3_crit_enable, 0644, show_temp_crit_enable,
+ set_temp_crit_enable);
-temp_crit_enable(1);
-temp_crit_enable(2);
-temp_crit_enable(3);
-
-static ssize_t show_temp_crit(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp_crit_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_crit[nr]));
}
-static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_crit_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -1026,13 +1047,9 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr,
return count;
}
-#define temp_crit_reg(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_crit, S_IRUGO | S_IWUSR, \
- show_temp_crit, set_temp_crit, offset - 1);
-
-temp_crit_reg(1);
-temp_crit_reg(2);
-temp_crit_reg(3);
+static SENSOR_DEVICE_ATTR_RW(temp1_crit, temp_crit, 0);
+static SENSOR_DEVICE_ATTR_RW(temp2_crit, temp_crit, 1);
+static SENSOR_DEVICE_ATTR_RW(temp3_crit, temp_crit, 2);
static ssize_t analog_out_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -1110,7 +1127,7 @@ static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(alarms);
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
@@ -1118,34 +1135,34 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%ld\n", (data->alarms >> bitnr) & 1);
}
-static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in9_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in11_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(in12_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in13_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(in14_alarm, S_IRUGO, show_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(in15_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(in16_alarm, S_IRUGO, show_alarm, NULL, 7);
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 9);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 10);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 11);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 12);
-static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 13);
-static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 14);
-static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 15);
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 16);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 17);
-static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 18);
-static SENSOR_DEVICE_ATTR(fan4_alarm, S_IRUGO, show_alarm, NULL, 19);
-static SENSOR_DEVICE_ATTR(fan5_alarm, S_IRUGO, show_alarm, NULL, 20);
-static SENSOR_DEVICE_ATTR(fan6_alarm, S_IRUGO, show_alarm, NULL, 21);
-static SENSOR_DEVICE_ATTR(fan7_alarm, S_IRUGO, show_alarm, NULL, 22);
-static SENSOR_DEVICE_ATTR(fan8_alarm, S_IRUGO, show_alarm, NULL, 23);
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 24);
-static SENSOR_DEVICE_ATTR(in10_alarm, S_IRUGO, show_alarm, NULL, 25);
-static SENSOR_DEVICE_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 26);
+static SENSOR_DEVICE_ATTR_RO(temp2_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(temp3_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in9_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in11_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(in12_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(in13_alarm, alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(in14_alarm, alarm, 5);
+static SENSOR_DEVICE_ATTR_RO(in15_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(in16_alarm, alarm, 7);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 9);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 10);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 11);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 12);
+static SENSOR_DEVICE_ATTR_RO(in5_alarm, alarm, 13);
+static SENSOR_DEVICE_ATTR_RO(in6_alarm, alarm, 14);
+static SENSOR_DEVICE_ATTR_RO(in7_alarm, alarm, 15);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 16);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 17);
+static SENSOR_DEVICE_ATTR_RO(fan3_alarm, alarm, 18);
+static SENSOR_DEVICE_ATTR_RO(fan4_alarm, alarm, 19);
+static SENSOR_DEVICE_ATTR_RO(fan5_alarm, alarm, 20);
+static SENSOR_DEVICE_ATTR_RO(fan6_alarm, alarm, 21);
+static SENSOR_DEVICE_ATTR_RO(fan7_alarm, alarm, 22);
+static SENSOR_DEVICE_ATTR_RO(fan8_alarm, alarm, 23);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 24);
+static SENSOR_DEVICE_ATTR_RO(in10_alarm, alarm, 25);
+static SENSOR_DEVICE_ATTR_RO(in8_alarm, alarm, 26);
static ssize_t alarm_mask_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -1188,7 +1205,6 @@ static ssize_t alarm_mask_store(struct device *dev,
static DEVICE_ATTR_RW(alarm_mask);
-
static ssize_t gpio_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -1371,23 +1387,23 @@ static ssize_t pwm1_enable_store(struct device *dev,
/* enable PWM fan control */
static DEVICE_ATTR_RW(pwm1);
-static DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, pwm1_show, pwm1_store);
-static DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR, pwm1_show, pwm1_store);
+static DEVICE_ATTR(pwm2, 0644, pwm1_show, pwm1_store);
+static DEVICE_ATTR(pwm3, 0644, pwm1_show, pwm1_store);
static DEVICE_ATTR_RW(pwm1_enable);
-static DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR, pwm1_enable_show,
+static DEVICE_ATTR(pwm2_enable, 0644, pwm1_enable_show,
pwm1_enable_store);
-static DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR, pwm1_enable_show,
+static DEVICE_ATTR(pwm3_enable, 0644, pwm1_enable_show,
pwm1_enable_store);
static DEVICE_ATTR_RW(temp1_auto_point1_pwm);
-static DEVICE_ATTR(temp2_auto_point1_pwm, S_IRUGO | S_IWUSR,
- temp1_auto_point1_pwm_show, temp1_auto_point1_pwm_store);
-static DEVICE_ATTR(temp3_auto_point1_pwm, S_IRUGO | S_IWUSR,
- temp1_auto_point1_pwm_show, temp1_auto_point1_pwm_store);
+static DEVICE_ATTR(temp2_auto_point1_pwm, 0644,
+ temp1_auto_point1_pwm_show, temp1_auto_point1_pwm_store);
+static DEVICE_ATTR(temp3_auto_point1_pwm, 0644,
+ temp1_auto_point1_pwm_show, temp1_auto_point1_pwm_store);
static DEVICE_ATTR_RO(temp1_auto_point2_pwm);
-static DEVICE_ATTR(temp2_auto_point2_pwm, S_IRUGO, temp1_auto_point2_pwm_show,
+static DEVICE_ATTR(temp2_auto_point2_pwm, 0444, temp1_auto_point2_pwm_show,
NULL);
-static DEVICE_ATTR(temp3_auto_point2_pwm, S_IRUGO, temp1_auto_point2_pwm_show,
+static DEVICE_ATTR(temp3_auto_point2_pwm, 0444, temp1_auto_point2_pwm_show,
NULL);
static struct attribute *adm1026_attributes[] = {
diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c
index e561279aea21..388060ff85e7 100644
--- a/drivers/hwmon/adm1029.c
+++ b/drivers/hwmon/adm1029.c
@@ -166,7 +166,7 @@ static struct adm1029_data *adm1029_update_device(struct device *dev)
*/
static ssize_t
-show_temp(struct device *dev, struct device_attribute *devattr, char *buf)
+temp_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm1029_data *data = adm1029_update_device(dev);
@@ -175,7 +175,7 @@ show_temp(struct device *dev, struct device_attribute *devattr, char *buf)
}
static ssize_t
-show_fan(struct device *dev, struct device_attribute *devattr, char *buf)
+fan_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm1029_data *data = adm1029_update_device(dev);
@@ -193,7 +193,7 @@ show_fan(struct device *dev, struct device_attribute *devattr, char *buf)
}
static ssize_t
-show_fan_div(struct device *dev, struct device_attribute *devattr, char *buf)
+fan_div_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm1029_data *data = adm1029_update_device(dev);
@@ -203,8 +203,9 @@ show_fan_div(struct device *dev, struct device_attribute *devattr, char *buf)
return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[attr->index]));
}
-static ssize_t set_fan_div(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t fan_div_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct adm1029_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -254,26 +255,26 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *devattr,
}
/* Access rights on sysfs. */
-static SENSOR_DEVICE_ATTR(temp1_input, 0444, show_temp, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_input, 0444, show_temp, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_input, 0444, show_temp, NULL, 2);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
-static SENSOR_DEVICE_ATTR(temp1_max, 0444, show_temp, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp2_max, 0444, show_temp, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp3_max, 0444, show_temp, NULL, 5);
+static SENSOR_DEVICE_ATTR_RO(temp1_max, temp, 3);
+static SENSOR_DEVICE_ATTR_RO(temp2_max, temp, 4);
+static SENSOR_DEVICE_ATTR_RO(temp3_max, temp, 5);
-static SENSOR_DEVICE_ATTR(temp1_min, 0444, show_temp, NULL, 6);
-static SENSOR_DEVICE_ATTR(temp2_min, 0444, show_temp, NULL, 7);
-static SENSOR_DEVICE_ATTR(temp3_min, 0444, show_temp, NULL, 8);
+static SENSOR_DEVICE_ATTR_RO(temp1_min, temp, 6);
+static SENSOR_DEVICE_ATTR_RO(temp2_min, temp, 7);
+static SENSOR_DEVICE_ATTR_RO(temp3_min, temp, 8);
-static SENSOR_DEVICE_ATTR(fan1_input, 0444, show_fan, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan2_input, 0444, show_fan, NULL, 1);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
-static SENSOR_DEVICE_ATTR(fan1_min, 0444, show_fan, NULL, 2);
-static SENSOR_DEVICE_ATTR(fan2_min, 0444, show_fan, NULL, 3);
+static SENSOR_DEVICE_ATTR_RO(fan1_min, fan, 2);
+static SENSOR_DEVICE_ATTR_RO(fan2_min, fan, 3);
-static SENSOR_DEVICE_ATTR(fan1_div, 0644, show_fan_div, set_fan_div, 0);
-static SENSOR_DEVICE_ATTR(fan2_div, 0644, show_fan_div, set_fan_div, 1);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
static struct attribute *adm1029_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index bcf508269fd6..6b46d8eaa775 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -331,7 +331,7 @@ get_fan_auto_nearest(struct adm1031_data *data, int chan, u8 val, u8 reg)
return -EINVAL;
}
-static ssize_t show_fan_auto_channel(struct device *dev,
+static ssize_t fan_auto_channel_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
@@ -340,8 +340,8 @@ static ssize_t show_fan_auto_channel(struct device *dev,
}
static ssize_t
-set_fan_auto_channel(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+fan_auto_channel_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct adm1031_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -392,13 +392,11 @@ set_fan_auto_channel(struct device *dev, struct device_attribute *attr,
return count;
}
-static SENSOR_DEVICE_ATTR(auto_fan1_channel, S_IRUGO | S_IWUSR,
- show_fan_auto_channel, set_fan_auto_channel, 0);
-static SENSOR_DEVICE_ATTR(auto_fan2_channel, S_IRUGO | S_IWUSR,
- show_fan_auto_channel, set_fan_auto_channel, 1);
+static SENSOR_DEVICE_ATTR_RW(auto_fan1_channel, fan_auto_channel, 0);
+static SENSOR_DEVICE_ATTR_RW(auto_fan2_channel, fan_auto_channel, 1);
/* Auto Temps */
-static ssize_t show_auto_temp_off(struct device *dev,
+static ssize_t auto_temp_off_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
@@ -406,7 +404,7 @@ static ssize_t show_auto_temp_off(struct device *dev,
return sprintf(buf, "%d\n",
AUTO_TEMP_OFF_FROM_REG(data->auto_temp[nr]));
}
-static ssize_t show_auto_temp_min(struct device *dev,
+static ssize_t auto_temp_min_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
@@ -415,8 +413,8 @@ static ssize_t show_auto_temp_min(struct device *dev,
AUTO_TEMP_MIN_FROM_REG(data->auto_temp[nr]));
}
static ssize_t
-set_auto_temp_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+auto_temp_min_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct adm1031_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -436,7 +434,7 @@ set_auto_temp_min(struct device *dev, struct device_attribute *attr,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t show_auto_temp_max(struct device *dev,
+static ssize_t auto_temp_max_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
@@ -445,8 +443,8 @@ static ssize_t show_auto_temp_max(struct device *dev,
AUTO_TEMP_MAX_FROM_REG(data->auto_temp[nr]));
}
static ssize_t
-set_auto_temp_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+auto_temp_max_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct adm1031_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -468,28 +466,26 @@ set_auto_temp_max(struct device *dev, struct device_attribute *attr,
return count;
}
-#define auto_temp_reg(offset) \
-static SENSOR_DEVICE_ATTR(auto_temp##offset##_off, S_IRUGO, \
- show_auto_temp_off, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(auto_temp##offset##_min, S_IRUGO | S_IWUSR, \
- show_auto_temp_min, set_auto_temp_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(auto_temp##offset##_max, S_IRUGO | S_IWUSR, \
- show_auto_temp_max, set_auto_temp_max, offset - 1)
-
-auto_temp_reg(1);
-auto_temp_reg(2);
-auto_temp_reg(3);
+static SENSOR_DEVICE_ATTR_RO(auto_temp1_off, auto_temp_off, 0);
+static SENSOR_DEVICE_ATTR_RW(auto_temp1_min, auto_temp_min, 0);
+static SENSOR_DEVICE_ATTR_RW(auto_temp1_max, auto_temp_max, 0);
+static SENSOR_DEVICE_ATTR_RO(auto_temp2_off, auto_temp_off, 1);
+static SENSOR_DEVICE_ATTR_RW(auto_temp2_min, auto_temp_min, 1);
+static SENSOR_DEVICE_ATTR_RW(auto_temp2_max, auto_temp_max, 1);
+static SENSOR_DEVICE_ATTR_RO(auto_temp3_off, auto_temp_off, 2);
+static SENSOR_DEVICE_ATTR_RW(auto_temp3_min, auto_temp_min, 2);
+static SENSOR_DEVICE_ATTR_RW(auto_temp3_max, auto_temp_max, 2);
/* pwm */
-static ssize_t show_pwm(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pwm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct adm1031_data *data = adm1031_update_device(dev);
return sprintf(buf, "%d\n", PWM_FROM_REG(data->pwm[nr]));
}
-static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t pwm_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct adm1031_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -517,12 +513,10 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
return count;
}
-static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 0);
-static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 1);
-static SENSOR_DEVICE_ATTR(auto_fan1_min_pwm, S_IRUGO | S_IWUSR,
- show_pwm, set_pwm, 0);
-static SENSOR_DEVICE_ATTR(auto_fan2_min_pwm, S_IRUGO | S_IWUSR,
- show_pwm, set_pwm, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm1, pwm, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2, pwm, 1);
+static SENSOR_DEVICE_ATTR_RW(auto_fan1_min_pwm, pwm, 0);
+static SENSOR_DEVICE_ATTR_RW(auto_fan2_min_pwm, pwm, 1);
/* Fans */
@@ -572,9 +566,8 @@ static int trust_fan_readings(struct adm1031_data *data, int chan)
return res;
}
-
-static ssize_t show_fan(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t fan_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct adm1031_data *data = adm1031_update_device(dev);
@@ -585,15 +578,15 @@ static ssize_t show_fan(struct device *dev,
return sprintf(buf, "%d\n", value);
}
-static ssize_t show_fan_div(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t fan_div_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct adm1031_data *data = adm1031_update_device(dev);
return sprintf(buf, "%d\n", FAN_DIV_FROM_REG(data->fan_div[nr]));
}
-static ssize_t show_fan_min(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t fan_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct adm1031_data *data = adm1031_update_device(dev);
@@ -601,8 +594,9 @@ static ssize_t show_fan_min(struct device *dev,
FAN_FROM_REG(data->fan_min[nr],
FAN_DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct adm1031_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -625,8 +619,9 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_div_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct adm1031_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -673,21 +668,16 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
return count;
}
-#define fan_offset(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
- show_fan, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, set_fan_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \
- show_fan_div, set_fan_div, offset - 1)
-
-fan_offset(1);
-fan_offset(2);
-
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
/* Temps */
-static ssize_t show_temp(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct adm1031_data *data = adm1031_update_device(dev);
@@ -697,7 +687,7 @@ static ssize_t show_temp(struct device *dev,
(((data->ext_temp[nr] >> ((nr - 1) * 3)) & 7));
return sprintf(buf, "%d\n", TEMP_FROM_REG_EXT(data->temp[nr], ext));
}
-static ssize_t show_temp_offset(struct device *dev,
+static ssize_t temp_offset_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
@@ -705,30 +695,30 @@ static ssize_t show_temp_offset(struct device *dev,
return sprintf(buf, "%d\n",
TEMP_OFFSET_FROM_REG(data->temp_offset[nr]));
}
-static ssize_t show_temp_min(struct device *dev,
+static ssize_t temp_min_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct adm1031_data *data = adm1031_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_min[nr]));
}
-static ssize_t show_temp_max(struct device *dev,
+static ssize_t temp_max_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct adm1031_data *data = adm1031_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[nr]));
}
-static ssize_t show_temp_crit(struct device *dev,
+static ssize_t temp_crit_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct adm1031_data *data = adm1031_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_crit[nr]));
}
-static ssize_t set_temp_offset(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
+static ssize_t temp_offset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct adm1031_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -748,8 +738,9 @@ static ssize_t set_temp_offset(struct device *dev,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct adm1031_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -769,8 +760,9 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct adm1031_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -790,8 +782,9 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_crit_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct adm1031_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -812,21 +805,21 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr,
return count;
}
-#define temp_reg(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \
- show_temp, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_offset, S_IRUGO | S_IWUSR, \
- show_temp_offset, set_temp_offset, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IRUGO | S_IWUSR, \
- show_temp_min, set_temp_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \
- show_temp_max, set_temp_max, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_crit, S_IRUGO | S_IWUSR, \
- show_temp_crit, set_temp_crit, offset - 1)
-
-temp_reg(1);
-temp_reg(2);
-temp_reg(3);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_offset, temp_offset, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_min, temp_min, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_crit, temp_crit, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_offset, temp_offset, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_min, temp_min, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_crit, temp_crit, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_offset, temp_offset, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_min, temp_min, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_max, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_crit, temp_crit, 2);
/* Alarms */
static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
@@ -838,29 +831,29 @@ static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(alarms);
-static ssize_t show_alarm(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int bitnr = to_sensor_dev_attr(attr)->index;
struct adm1031_data *data = adm1031_update_device(dev);
return sprintf(buf, "%d\n", (data->alarm >> bitnr) & 1);
}
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan1_fault, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, 7);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(fan2_fault, S_IRUGO, show_alarm, NULL, 9);
-static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_alarm, NULL, 10);
-static SENSOR_DEVICE_ATTR(temp3_min_alarm, S_IRUGO, show_alarm, NULL, 11);
-static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 12);
-static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 13);
-static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(fan1_fault, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(temp2_max_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(temp2_min_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(temp2_crit_alarm, alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, 5);
+static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(temp1_min_alarm, alarm, 7);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(fan2_fault, alarm, 9);
+static SENSOR_DEVICE_ATTR_RO(temp3_max_alarm, alarm, 10);
+static SENSOR_DEVICE_ATTR_RO(temp3_min_alarm, alarm, 11);
+static SENSOR_DEVICE_ATTR_RO(temp3_crit_alarm, alarm, 12);
+static SENSOR_DEVICE_ATTR_RO(temp3_fault, alarm, 13);
+static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, alarm, 14);
/* Update Interval */
static const unsigned int update_intervals[] = {
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 255413fdbde9..000b20f1db71 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -269,16 +269,16 @@ static ssize_t temp1_input_show(struct device *dev,
return sprintf(buf, "%d\n", data->temp / 128 * 500); /* 9-bit value */
}
-static ssize_t show_max(struct device *dev, struct device_attribute *devattr,
- char *buf)
+static ssize_t max_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = adm9240_update_device(dev);
return sprintf(buf, "%d\n", data->temp_max[attr->index] * 1000);
}
-static ssize_t set_max(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t max_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = dev_get_drvdata(dev);
@@ -299,14 +299,12 @@ static ssize_t set_max(struct device *dev, struct device_attribute *devattr,
}
static DEVICE_ATTR_RO(temp1_input);
-static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO,
- show_max, set_max, 0);
-static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO,
- show_max, set_max, 1);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, max, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max_hyst, max, 1);
/* voltage */
-static ssize_t show_in(struct device *dev, struct device_attribute *devattr,
- char *buf)
+static ssize_t in_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = adm9240_update_device(dev);
@@ -314,8 +312,8 @@ static ssize_t show_in(struct device *dev, struct device_attribute *devattr,
attr->index));
}
-static ssize_t show_in_min(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t in_min_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = adm9240_update_device(dev);
@@ -323,8 +321,8 @@ static ssize_t show_in_min(struct device *dev,
attr->index));
}
-static ssize_t show_in_max(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t in_max_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = adm9240_update_device(dev);
@@ -332,9 +330,9 @@ static ssize_t show_in_max(struct device *dev,
attr->index));
}
-static ssize_t set_in_min(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t in_min_store(struct device *dev,
+ struct device_attribute *devattr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = dev_get_drvdata(dev);
@@ -354,9 +352,9 @@ static ssize_t set_in_min(struct device *dev,
return count;
}
-static ssize_t set_in_max(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t in_max_store(struct device *dev,
+ struct device_attribute *devattr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = dev_get_drvdata(dev);
@@ -376,24 +374,28 @@ static ssize_t set_in_max(struct device *dev,
return count;
}
-#define vin(nr) \
-static SENSOR_DEVICE_ATTR(in##nr##_input, S_IRUGO, \
- show_in, NULL, nr); \
-static SENSOR_DEVICE_ATTR(in##nr##_min, S_IRUGO | S_IWUSR, \
- show_in_min, set_in_min, nr); \
-static SENSOR_DEVICE_ATTR(in##nr##_max, S_IRUGO | S_IWUSR, \
- show_in_max, set_in_max, nr);
-
-vin(0);
-vin(1);
-vin(2);
-vin(3);
-vin(4);
-vin(5);
+static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_min, in_min, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_max, in_max, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
+static SENSOR_DEVICE_ATTR_RO(in5_input, in, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_min, in_min, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_max, in_max, 5);
/* fans */
-static ssize_t show_fan(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t fan_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = adm9240_update_device(dev);
@@ -401,8 +403,8 @@ static ssize_t show_fan(struct device *dev,
1 << data->fan_div[attr->index]));
}
-static ssize_t show_fan_min(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t fan_min_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = adm9240_update_device(dev);
@@ -410,8 +412,8 @@ static ssize_t show_fan_min(struct device *dev,
1 << data->fan_div[attr->index]));
}
-static ssize_t show_fan_div(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t fan_div_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = adm9240_update_device(dev);
@@ -429,9 +431,9 @@ static ssize_t show_fan_div(struct device *dev,
* - otherwise: select fan clock divider to suit fan speed low limit,
* measurement code may adjust registers to ensure fan speed reading
*/
-static ssize_t set_fan_min(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t fan_min_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = dev_get_drvdata(dev);
@@ -489,16 +491,12 @@ static ssize_t set_fan_min(struct device *dev,
return count;
}
-#define fan(nr) \
-static SENSOR_DEVICE_ATTR(fan##nr##_input, S_IRUGO, \
- show_fan, NULL, nr - 1); \
-static SENSOR_DEVICE_ATTR(fan##nr##_div, S_IRUGO, \
- show_fan_div, NULL, nr - 1); \
-static SENSOR_DEVICE_ATTR(fan##nr##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, set_fan_min, nr - 1);
-
-fan(1);
-fan(2);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RO(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RO(fan2_div, fan_div, 1);
/* alarms */
static ssize_t alarms_show(struct device *dev,
@@ -509,22 +507,22 @@ static ssize_t alarms_show(struct device *dev,
}
static DEVICE_ATTR_RO(alarms);
-static ssize_t show_alarm(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int bitnr = to_sensor_dev_attr(attr)->index;
struct adm9240_data *data = adm9240_update_device(dev);
return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
}
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 9);
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(in5_alarm, alarm, 9);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 7);
/* vid */
static ssize_t cpu0_vid_show(struct device *dev,
@@ -564,9 +562,8 @@ static ssize_t aout_output_store(struct device *dev,
}
static DEVICE_ATTR_RW(aout_output);
-static ssize_t chassis_clear(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t alarm_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct adm9240_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -583,8 +580,7 @@ static ssize_t chassis_clear(struct device *dev,
return count;
}
-static SENSOR_DEVICE_ATTR(intrusion0_alarm, S_IRUGO | S_IWUSR, show_alarm,
- chassis_clear, 12);
+static SENSOR_DEVICE_ATTR_RW(intrusion0_alarm, alarm, 12);
static struct attribute *adm9240_attrs[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
@@ -632,7 +628,6 @@ static struct attribute *adm9240_attrs[] = {
ATTRIBUTE_GROUPS(adm9240);
-
/*** sensor chip detect and driver install ***/
/* Return 0 if detection is successful, -ENODEV otherwise */
diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
index c21b0529adb2..412ab7015d75 100644
--- a/drivers/hwmon/ads1015.c
+++ b/drivers/hwmon/ads1015.c
@@ -307,7 +307,7 @@ static const struct i2c_device_id ads1015_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ads1015_id);
-static const struct of_device_id ads1015_of_match[] = {
+static const struct of_device_id __maybe_unused ads1015_of_match[] = {
{
.compatible = "ti,ads1015",
.data = (void *)ads1015
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index 12c56d3783ed..03d6e782777a 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -8,7 +8,7 @@
*
* ADS7830 support, by Guillaume Roguez <guillaume.roguez@savoirfairelinux.com>
*
- * For further information, see the Documentation/hwmon/ads7828 file.
+ * For further information, see the Documentation/hwmon/ads7828.rst file.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -200,7 +200,7 @@ static const struct i2c_device_id ads7828_device_ids[] = {
};
MODULE_DEVICE_TABLE(i2c, ads7828_device_ids);
-static const struct of_device_id ads7828_of_match[] = {
+static const struct of_device_id __maybe_unused ads7828_of_match[] = {
{
.compatible = "ti,ads7828",
.data = (void *)ads7828
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index b939f8a115ba..44a827b031cb 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -639,40 +639,22 @@ static int adt7411_init_device(struct adt7411_data *data)
return i2c_smbus_write_byte_data(data->client, ADT7411_REG_CFG1, val);
}
-static const u32 adt7411_in_config[] = {
- HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
- 0
-};
-
-static const struct hwmon_channel_info adt7411_in = {
- .type = hwmon_in,
- .config = adt7411_in_config,
-};
-
-static const u32 adt7411_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MIN_ALARM |
- HWMON_T_MAX | HWMON_T_MAX_ALARM,
- HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MIN_ALARM |
- HWMON_T_MAX | HWMON_T_MAX_ALARM | HWMON_T_FAULT,
- 0
-};
-
-static const struct hwmon_channel_info adt7411_temp = {
- .type = hwmon_temp,
- .config = adt7411_temp_config,
-};
-
static const struct hwmon_channel_info *adt7411_info[] = {
- &adt7411_in,
- &adt7411_temp,
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MIN_ALARM |
+ HWMON_T_MAX | HWMON_T_MAX_ALARM,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MIN_ALARM |
+ HWMON_T_MAX | HWMON_T_MAX_ALARM | HWMON_T_FAULT),
NULL
};
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 0dbb8df74e44..7caec127df86 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -168,7 +168,7 @@ static const struct i2c_device_id adt7475_id[] = {
};
MODULE_DEVICE_TABLE(i2c, adt7475_id);
-static const struct of_device_id adt7475_of_match[] = {
+static const struct of_device_id __maybe_unused adt7475_of_match[] = {
{
.compatible = "adi,adt7473",
.data = (void *)adt7473
diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c
index 73c681162653..623736d2a7c1 100644
--- a/drivers/hwmon/f71805f.c
+++ b/drivers/hwmon/f71805f.c
@@ -96,17 +96,23 @@ superio_select(int base, int ld)
outb(ld, base + 1);
}
-static inline void
+static inline int
superio_enter(int base)
{
+ if (!request_muxed_region(base, 2, DRVNAME))
+ return -EBUSY;
+
outb(0x87, base);
outb(0x87, base);
+
+ return 0;
}
static inline void
superio_exit(int base)
{
outb(0xaa, base);
+ release_region(base, 2);
}
/*
@@ -1561,7 +1567,7 @@ exit:
static int __init f71805f_find(int sioaddr, unsigned short *address,
struct f71805f_sio_data *sio_data)
{
- int err = -ENODEV;
+ int err;
u16 devid;
static const char * const names[] = {
@@ -1569,8 +1575,11 @@ static int __init f71805f_find(int sioaddr, unsigned short *address,
"F71872F/FG or F71806F/FG",
};
- superio_enter(sioaddr);
+ err = superio_enter(sioaddr);
+ if (err)
+ return err;
+ err = -ENODEV;
devid = superio_inw(sioaddr, SIO_REG_MANID);
if (devid != SIO_FINTEK_ID)
goto exit;
diff --git a/drivers/hwmon/hih6130.c b/drivers/hwmon/hih6130.c
index d167fcfec765..2bf9599b34a1 100644
--- a/drivers/hwmon/hih6130.c
+++ b/drivers/hwmon/hih6130.c
@@ -252,7 +252,7 @@ static const struct i2c_device_id hih6130_id[] = {
};
MODULE_DEVICE_TABLE(i2c, hih6130_id);
-static const struct of_device_id hih6130_of_match[] = {
+static const struct of_device_id __maybe_unused hih6130_of_match[] = {
{ .compatible = "honeywell,hih6130", },
{ }
};
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index c22dc1e07911..cd91510a5387 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -324,6 +324,11 @@ static const char * const hwmon_chip_attrs[] = {
[hwmon_chip_power_reset_history] = "power_reset_history",
[hwmon_chip_update_interval] = "update_interval",
[hwmon_chip_alarms] = "alarms",
+ [hwmon_chip_samples] = "samples",
+ [hwmon_chip_curr_samples] = "curr_samples",
+ [hwmon_chip_in_samples] = "in_samples",
+ [hwmon_chip_power_samples] = "power_samples",
+ [hwmon_chip_temp_samples] = "temp_samples",
};
static const char * const hwmon_temp_attr_templates[] = {
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index 5c3c08449de7..1770423f7a80 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -92,6 +92,9 @@ static int iio_hwmon_probe(struct platform_device *pdev)
return -ENOMEM;
for (i = 0; i < st->num_channels; i++) {
+ const char *prefix;
+ int n;
+
a = devm_kzalloc(dev, sizeof(*a), GFP_KERNEL);
if (a == NULL)
return -ENOMEM;
@@ -103,28 +106,28 @@ static int iio_hwmon_probe(struct platform_device *pdev)
switch (type) {
case IIO_VOLTAGE:
- a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
- "in%d_input",
- in_i++);
+ n = in_i++;
+ prefix = "in";
break;
case IIO_TEMP:
- a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
- "temp%d_input",
- temp_i++);
+ n = temp_i++;
+ prefix = "temp";
break;
case IIO_CURRENT:
- a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
- "curr%d_input",
- curr_i++);
+ n = curr_i++;
+ prefix = "curr";
break;
case IIO_HUMIDITYRELATIVE:
- a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
- "humidity%d_input",
- humidity_i++);
+ n = humidity_i++;
+ prefix = "humidity";
break;
default:
return -EINVAL;
}
+
+ a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "%s%d_input",
+ prefix, n);
if (a->dev_attr.attr.name == NULL)
return -ENOMEM;
diff --git a/drivers/hwmon/ina209.c b/drivers/hwmon/ina209.c
index e3854463db84..6a4ec2f2ddb2 100644
--- a/drivers/hwmon/ina209.c
+++ b/drivers/hwmon/ina209.c
@@ -587,7 +587,7 @@ static const struct i2c_device_id ina209_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ina209_id);
-static const struct of_device_id ina209_of_match[] = {
+static const struct of_device_id __maybe_unused ina209_of_match[] = {
{ .compatible = "ti,ina209" },
{ },
};
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index 290379c49be9..42df51f8cdf2 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -507,7 +507,7 @@ static const struct i2c_device_id ina2xx_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ina2xx_id);
-static const struct of_device_id ina2xx_of_match[] = {
+static const struct of_device_id __maybe_unused ina2xx_of_match[] = {
{
.compatible = "ti,ina219",
.data = (void *)ina219
diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
index 3626b87a5fd2..e0637fed9585 100644
--- a/drivers/hwmon/ina3221.c
+++ b/drivers/hwmon/ina3221.c
@@ -22,6 +22,7 @@
#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#include <linux/util_macros.h>
#define INA3221_DRIVER_NAME "ina3221"
@@ -51,6 +52,9 @@
#define INA3221_CONFIG_VBUS_CT_SHIFT 6
#define INA3221_CONFIG_VBUS_CT_MASK GENMASK(8, 6)
#define INA3221_CONFIG_VBUS_CT(x) (((x) & GENMASK(8, 6)) >> 6)
+#define INA3221_CONFIG_AVG_SHIFT 9
+#define INA3221_CONFIG_AVG_MASK GENMASK(11, 9)
+#define INA3221_CONFIG_AVG(x) (((x) & GENMASK(11, 9)) >> 9)
#define INA3221_CONFIG_CHs_EN_MASK GENMASK(14, 12)
#define INA3221_CONFIG_CHx_EN(x) BIT(14 - (x))
@@ -135,17 +139,42 @@ static const u16 ina3221_conv_time[] = {
140, 204, 332, 588, 1100, 2116, 4156, 8244,
};
-static inline int ina3221_wait_for_data(struct ina3221_data *ina)
+/* Lookup table for number of samples using in averaging mode */
+static const int ina3221_avg_samples[] = {
+ 1, 4, 16, 64, 128, 256, 512, 1024,
+};
+
+/* Converting update_interval in msec to conversion time in usec */
+static inline u32 ina3221_interval_ms_to_conv_time(u16 config, int interval)
+{
+ u32 channels = hweight16(config & INA3221_CONFIG_CHs_EN_MASK);
+ u32 samples_idx = INA3221_CONFIG_AVG(config);
+ u32 samples = ina3221_avg_samples[samples_idx];
+
+ /* Bisect the result to Bus and Shunt conversion times */
+ return DIV_ROUND_CLOSEST(interval * 1000 / 2, channels * samples);
+}
+
+/* Converting CONFIG register value to update_interval in usec */
+static inline u32 ina3221_reg_to_interval_us(u16 config)
{
- u32 channels = hweight16(ina->reg_config & INA3221_CONFIG_CHs_EN_MASK);
- u32 vbus_ct_idx = INA3221_CONFIG_VBUS_CT(ina->reg_config);
- u32 vsh_ct_idx = INA3221_CONFIG_VSH_CT(ina->reg_config);
+ u32 channels = hweight16(config & INA3221_CONFIG_CHs_EN_MASK);
+ u32 vbus_ct_idx = INA3221_CONFIG_VBUS_CT(config);
+ u32 vsh_ct_idx = INA3221_CONFIG_VSH_CT(config);
+ u32 samples_idx = INA3221_CONFIG_AVG(config);
+ u32 samples = ina3221_avg_samples[samples_idx];
u32 vbus_ct = ina3221_conv_time[vbus_ct_idx];
u32 vsh_ct = ina3221_conv_time[vsh_ct_idx];
- u32 wait, cvrf;
/* Calculate total conversion time */
- wait = channels * (vbus_ct + vsh_ct);
+ return channels * (vbus_ct + vsh_ct) * samples;
+}
+
+static inline int ina3221_wait_for_data(struct ina3221_data *ina)
+{
+ u32 wait, cvrf;
+
+ wait = ina3221_reg_to_interval_us(ina->reg_config);
/* Polling the CVRF bit to make sure read data is ready */
return regmap_field_read_poll_timeout(ina->fields[F_CVRF],
@@ -176,6 +205,26 @@ static const u8 ina3221_in_reg[] = {
INA3221_SHUNT3,
};
+static int ina3221_read_chip(struct device *dev, u32 attr, long *val)
+{
+ struct ina3221_data *ina = dev_get_drvdata(dev);
+ int regval;
+
+ switch (attr) {
+ case hwmon_chip_samples:
+ regval = INA3221_CONFIG_AVG(ina->reg_config);
+ *val = ina3221_avg_samples[regval];
+ return 0;
+ case hwmon_chip_update_interval:
+ /* Return in msec */
+ *val = ina3221_reg_to_interval_us(ina->reg_config);
+ *val = DIV_ROUND_CLOSEST(*val, 1000);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int ina3221_read_in(struct device *dev, u32 attr, int channel, long *val)
{
const bool is_shunt = channel > INA3221_CHANNEL3;
@@ -279,6 +328,48 @@ static int ina3221_read_curr(struct device *dev, u32 attr,
}
}
+static int ina3221_write_chip(struct device *dev, u32 attr, long val)
+{
+ struct ina3221_data *ina = dev_get_drvdata(dev);
+ int ret, idx;
+ u32 tmp;
+
+ switch (attr) {
+ case hwmon_chip_samples:
+ idx = find_closest(val, ina3221_avg_samples,
+ ARRAY_SIZE(ina3221_avg_samples));
+
+ tmp = (ina->reg_config & ~INA3221_CONFIG_AVG_MASK) |
+ (idx << INA3221_CONFIG_AVG_SHIFT);
+ ret = regmap_write(ina->regmap, INA3221_CONFIG, tmp);
+ if (ret)
+ return ret;
+
+ /* Update reg_config accordingly */
+ ina->reg_config = tmp;
+ return 0;
+ case hwmon_chip_update_interval:
+ tmp = ina3221_interval_ms_to_conv_time(ina->reg_config, val);
+ idx = find_closest(tmp, ina3221_conv_time,
+ ARRAY_SIZE(ina3221_conv_time));
+
+ /* Update Bus and Shunt voltage conversion times */
+ tmp = INA3221_CONFIG_VBUS_CT_MASK | INA3221_CONFIG_VSH_CT_MASK;
+ tmp = (ina->reg_config & ~tmp) |
+ (idx << INA3221_CONFIG_VBUS_CT_SHIFT) |
+ (idx << INA3221_CONFIG_VSH_CT_SHIFT);
+ ret = regmap_write(ina->regmap, INA3221_CONFIG, tmp);
+ if (ret)
+ return ret;
+
+ /* Update reg_config accordingly */
+ ina->reg_config = tmp;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int ina3221_write_curr(struct device *dev, u32 attr,
int channel, long val)
{
@@ -309,6 +400,7 @@ static int ina3221_write_enable(struct device *dev, int channel, bool enable)
struct ina3221_data *ina = dev_get_drvdata(dev);
u16 config, mask = INA3221_CONFIG_CHx_EN(channel);
u16 config_old = ina->reg_config & mask;
+ u32 tmp;
int ret;
config = enable ? mask : 0;
@@ -327,14 +419,13 @@ static int ina3221_write_enable(struct device *dev, int channel, bool enable)
}
/* Enable or disable the channel */
- ret = regmap_update_bits(ina->regmap, INA3221_CONFIG, mask, config);
+ tmp = (ina->reg_config & ~mask) | (config & mask);
+ ret = regmap_write(ina->regmap, INA3221_CONFIG, tmp);
if (ret)
goto fail;
/* Cache the latest config register value */
- ret = regmap_read(ina->regmap, INA3221_CONFIG, &ina->reg_config);
- if (ret)
- goto fail;
+ ina->reg_config = tmp;
/* For disabling routine, decrease refcount or suspend() at last */
if (!enable)
@@ -361,6 +452,9 @@ static int ina3221_read(struct device *dev, enum hwmon_sensor_types type,
mutex_lock(&ina->lock);
switch (type) {
+ case hwmon_chip:
+ ret = ina3221_read_chip(dev, attr, val);
+ break;
case hwmon_in:
/* 0-align channel ID */
ret = ina3221_read_in(dev, attr, channel - 1, val);
@@ -387,6 +481,9 @@ static int ina3221_write(struct device *dev, enum hwmon_sensor_types type,
mutex_lock(&ina->lock);
switch (type) {
+ case hwmon_chip:
+ ret = ina3221_write_chip(dev, attr, val);
+ break;
case hwmon_in:
/* 0-align channel ID */
ret = ina3221_write_enable(dev, channel - 1, val);
@@ -423,6 +520,14 @@ static umode_t ina3221_is_visible(const void *drvdata,
const struct ina3221_input *input = NULL;
switch (type) {
+ case hwmon_chip:
+ switch (attr) {
+ case hwmon_chip_samples:
+ case hwmon_chip_update_interval:
+ return 0644;
+ default:
+ return 0;
+ }
case hwmon_in:
/* Ignore in0_ */
if (channel == 0)
@@ -458,44 +563,29 @@ static umode_t ina3221_is_visible(const void *drvdata,
}
}
-static const u32 ina3221_in_config[] = {
- /* 0: dummy, skipped in is_visible */
- HWMON_I_INPUT,
- /* 1-3: input voltage Channels */
- HWMON_I_INPUT | HWMON_I_ENABLE | HWMON_I_LABEL,
- HWMON_I_INPUT | HWMON_I_ENABLE | HWMON_I_LABEL,
- HWMON_I_INPUT | HWMON_I_ENABLE | HWMON_I_LABEL,
- /* 4-6: shunt voltage Channels */
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info ina3221_in = {
- .type = hwmon_in,
- .config = ina3221_in_config,
-};
-
#define INA3221_HWMON_CURR_CONFIG (HWMON_C_INPUT | \
HWMON_C_CRIT | HWMON_C_CRIT_ALARM | \
HWMON_C_MAX | HWMON_C_MAX_ALARM)
-static const u32 ina3221_curr_config[] = {
- INA3221_HWMON_CURR_CONFIG,
- INA3221_HWMON_CURR_CONFIG,
- INA3221_HWMON_CURR_CONFIG,
- 0
-};
-
-static const struct hwmon_channel_info ina3221_curr = {
- .type = hwmon_curr,
- .config = ina3221_curr_config,
-};
-
static const struct hwmon_channel_info *ina3221_info[] = {
- &ina3221_in,
- &ina3221_curr,
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_SAMPLES,
+ HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(in,
+ /* 0: dummy, skipped in is_visible */
+ HWMON_I_INPUT,
+ /* 1-3: input voltage Channels */
+ HWMON_I_INPUT | HWMON_I_ENABLE | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_ENABLE | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_ENABLE | HWMON_I_LABEL,
+ /* 4-6: shunt voltage Channels */
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT),
+ HWMON_CHANNEL_INFO(curr,
+ INA3221_HWMON_CURR_CONFIG,
+ INA3221_HWMON_CURR_CONFIG,
+ INA3221_HWMON_CURR_CONFIG),
NULL
};
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 4fa482ae0eb5..6b57a6d4e626 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -451,20 +451,12 @@ static int jc42_detect(struct i2c_client *client, struct i2c_board_info *info)
return -ENODEV;
}
-static const u32 jc42_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | HWMON_T_CRIT |
- HWMON_T_MAX_HYST | HWMON_T_CRIT_HYST |
- HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM,
- 0
-};
-
-static const struct hwmon_channel_info jc42_temp = {
- .type = hwmon_temp,
- .config = jc42_temp_config,
-};
-
static const struct hwmon_channel_info *jc42_info[] = {
- &jc42_temp,
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_CRIT | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT_HYST | HWMON_T_MIN_ALARM |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM),
NULL
};
diff --git a/drivers/hwmon/jz4740-hwmon.c b/drivers/hwmon/jz4740-hwmon.c
index 2d40a2e771d7..7d5947595b45 100644
--- a/drivers/hwmon/jz4740-hwmon.c
+++ b/drivers/hwmon/jz4740-hwmon.c
@@ -94,7 +94,6 @@ static int jz4740_hwmon_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct jz4740_hwmon *hwmon;
struct device *hwmon_dev;
- struct resource *mem;
hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL);
if (!hwmon)
@@ -109,8 +108,7 @@ static int jz4740_hwmon_probe(struct platform_device *pdev)
return hwmon->irq;
}
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hwmon->base = devm_ioremap_resource(&pdev->dev, mem);
+ hwmon->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hwmon->base))
return PTR_ERR(hwmon->base);
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index eac54b9cdeec..8848fbe5fd16 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -1153,7 +1153,7 @@ static const struct i2c_device_id lm63_id[] = {
};
MODULE_DEVICE_TABLE(i2c, lm63_id);
-static const struct of_device_id lm63_of_match[] = {
+static const struct of_device_id __maybe_unused lm63_of_match[] = {
{
.compatible = "national,lm63",
.data = (void *)lm63
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 447af07450f1..423a382420b9 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -59,6 +59,7 @@ enum lm75_type { /* keep sorted in alphabetical order */
tmp175,
tmp275,
tmp75,
+ tmp75b,
tmp75c,
};
@@ -194,35 +195,11 @@ static umode_t lm75_is_visible(const void *data, enum hwmon_sensor_types type,
return 0;
}
-/*-----------------------------------------------------------------------*/
-
-/* device probe and removal */
-
-/* chip configuration */
-
-static const u32 lm75_chip_config[] = {
- HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL,
- 0
-};
-
-static const struct hwmon_channel_info lm75_chip = {
- .type = hwmon_chip,
- .config = lm75_chip_config,
-};
-
-static const u32 lm75_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST,
- 0
-};
-
-static const struct hwmon_channel_info lm75_temp = {
- .type = hwmon_temp,
- .config = lm75_temp_config,
-};
-
static const struct hwmon_channel_info *lm75_info[] = {
- &lm75_chip,
- &lm75_temp,
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST),
NULL
};
@@ -378,6 +355,11 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
data->resolution = 12;
data->sample_time = MSEC_PER_SEC / 2;
break;
+ case tmp75b: /* not one-shot mode, Conversion rate 37Hz */
+ clr_mask |= 1 << 15 | 0x3 << 13;
+ data->resolution = 12;
+ data->sample_time = MSEC_PER_SEC / 37;
+ break;
case tmp75c:
clr_mask |= 1 << 5; /* not one-shot mode */
data->resolution = 12;
@@ -438,12 +420,13 @@ static const struct i2c_device_id lm75_ids[] = {
{ "tmp175", tmp175, },
{ "tmp275", tmp275, },
{ "tmp75", tmp75, },
+ { "tmp75b", tmp75b, },
{ "tmp75c", tmp75c, },
{ /* LIST END */ }
};
MODULE_DEVICE_TABLE(i2c, lm75_ids);
-static const struct of_device_id lm75_of_match[] = {
+static const struct of_device_id __maybe_unused lm75_of_match[] = {
{
.compatible = "adi,adt75",
.data = (void *)adt75
@@ -537,6 +520,10 @@ static const struct of_device_id lm75_of_match[] = {
.data = (void *)tmp75
},
{
+ .compatible = "ti,tmp75b",
+ .data = (void *)tmp75b
+ },
+ {
.compatible = "ti,tmp75c",
.data = (void *)tmp75c
},
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index 0cb7ff613b80..eb95947673de 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -73,7 +73,6 @@ enum chips { lm78, lm79 };
#define LM78_REG_CHIPID 0x49
#define LM78_REG_I2C_ADDR 0x48
-
/*
* Conversions. Rounding and limit checking is only done on the TO_REG
* variants.
@@ -147,15 +146,13 @@ struct lm78_data {
u16 alarms; /* Register encoding, combined */
};
-
static int lm78_read_value(struct lm78_data *data, u8 reg);
static int lm78_write_value(struct lm78_data *data, u8 reg, u8 value);
static struct lm78_data *lm78_update_device(struct device *dev);
static void lm78_init_device(struct lm78_data *data);
-
/* 7 Voltages */
-static ssize_t show_in(struct device *dev, struct device_attribute *da,
+static ssize_t in_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -163,7 +160,7 @@ static ssize_t show_in(struct device *dev, struct device_attribute *da,
return sprintf(buf, "%d\n", IN_FROM_REG(data->in[attr->index]));
}
-static ssize_t show_in_min(struct device *dev, struct device_attribute *da,
+static ssize_t in_min_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -171,7 +168,7 @@ static ssize_t show_in_min(struct device *dev, struct device_attribute *da,
return sprintf(buf, "%d\n", IN_FROM_REG(data->in_min[attr->index]));
}
-static ssize_t show_in_max(struct device *dev, struct device_attribute *da,
+static ssize_t in_max_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -179,8 +176,8 @@ static ssize_t show_in_max(struct device *dev, struct device_attribute *da,
return sprintf(buf, "%d\n", IN_FROM_REG(data->in_max[attr->index]));
}
-static ssize_t set_in_min(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t in_min_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct lm78_data *data = dev_get_drvdata(dev);
@@ -199,8 +196,8 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *da,
return count;
}
-static ssize_t set_in_max(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t in_max_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct lm78_data *data = dev_get_drvdata(dev);
@@ -219,21 +216,27 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *da,
return count;
}
-#define show_in_offset(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
- show_in, NULL, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
- show_in_min, set_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
- show_in_max, set_in_max, offset);
-
-show_in_offset(0);
-show_in_offset(1);
-show_in_offset(2);
-show_in_offset(3);
-show_in_offset(4);
-show_in_offset(5);
-show_in_offset(6);
+static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_min, in_min, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_max, in_max, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
+static SENSOR_DEVICE_ATTR_RO(in5_input, in, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_min, in_min, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_max, in_max, 5);
+static SENSOR_DEVICE_ATTR_RO(in6_input, in, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_min, in_min, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_max, in_max, 6);
/* Temperature */
static ssize_t temp1_input_show(struct device *dev,
@@ -300,7 +303,7 @@ static DEVICE_ATTR_RW(temp1_max);
static DEVICE_ATTR_RW(temp1_max_hyst);
/* 3 Fans */
-static ssize_t show_fan(struct device *dev, struct device_attribute *da,
+static ssize_t fan_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -310,7 +313,7 @@ static ssize_t show_fan(struct device *dev, struct device_attribute *da,
DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t show_fan_min(struct device *dev, struct device_attribute *da,
+static ssize_t fan_min_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -320,8 +323,8 @@ static ssize_t show_fan_min(struct device *dev, struct device_attribute *da,
DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t fan_min_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct lm78_data *data = dev_get_drvdata(dev);
@@ -340,7 +343,7 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *da,
return count;
}
-static ssize_t show_fan_div(struct device *dev, struct device_attribute *da,
+static ssize_t fan_div_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -354,8 +357,8 @@ static ssize_t show_fan_div(struct device *dev, struct device_attribute *da,
* least surprise; the user doesn't expect the fan minimum to change just
* because the divisor changed.
*/
-static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t fan_div_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct lm78_data *data = dev_get_drvdata(dev);
@@ -413,22 +416,17 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
return count;
}
-#define show_fan_offset(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
- show_fan, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, set_fan_min, offset - 1);
-
-show_fan_offset(1);
-show_fan_offset(2);
-show_fan_offset(3);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RO(fan3_input, fan, 2);
+static SENSOR_DEVICE_ATTR_RW(fan3_min, fan_min, 2);
/* Fan 3 divisor is locked in H/W */
-static SENSOR_DEVICE_ATTR(fan1_div, S_IRUGO | S_IWUSR,
- show_fan_div, set_fan_div, 0);
-static SENSOR_DEVICE_ATTR(fan2_div, S_IRUGO | S_IWUSR,
- show_fan_div, set_fan_div, 1);
-static SENSOR_DEVICE_ATTR(fan3_div, S_IRUGO, show_fan_div, NULL, 2);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
+static SENSOR_DEVICE_ATTR_RO(fan3_div, fan_div, 2);
/* VID */
static ssize_t cpu0_vid_show(struct device *dev, struct device_attribute *da,
@@ -448,24 +446,24 @@ static ssize_t alarms_show(struct device *dev, struct device_attribute *da,
}
static DEVICE_ATTR_RO(alarms);
-static ssize_t show_alarm(struct device *dev, struct device_attribute *da,
+static ssize_t alarm_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct lm78_data *data = lm78_update_device(dev);
int nr = to_sensor_dev_attr(da)->index;
return sprintf(buf, "%u\n", (data->alarms >> nr) & 1);
}
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 9);
-static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 10);
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7);
-static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 11);
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(in5_alarm, alarm, 9);
+static SENSOR_DEVICE_ATTR_RO(in6_alarm, alarm, 10);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 7);
+static SENSOR_DEVICE_ATTR_RO(fan3_alarm, alarm, 11);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 4);
static struct attribute *lm78_attrs[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index a95d48316f06..80db367b4c54 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -165,7 +165,6 @@ static inline u16 FAN_TO_REG(unsigned long val)
#define PWM_TO_REG(val) clamp_val(val, 0, 255)
#define PWM_FROM_REG(val) (val)
-
/*
* ZONEs have the following parameters:
* Limit (low) temp, 1. degC
@@ -563,24 +562,25 @@ static struct lm85_data *lm85_update_device(struct device *dev)
}
/* 4 Fans */
-static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t fan_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[nr]));
}
-static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t fan_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_min[nr]));
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -599,16 +599,14 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
return count;
}
-#define show_fan_offset(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
- show_fan, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, set_fan_min, offset - 1)
-
-show_fan_offset(1);
-show_fan_offset(2);
-show_fan_offset(3);
-show_fan_offset(4);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RO(fan3_input, fan, 2);
+static SENSOR_DEVICE_ATTR_RW(fan3_min, fan_min, 2);
+static SENSOR_DEVICE_ATTR_RO(fan4_input, fan, 3);
+static SENSOR_DEVICE_ATTR_RW(fan4_min, fan_min, 3);
/* vid, vrm, alarms */
@@ -667,44 +665,44 @@ static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(alarms);
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%u\n", (data->alarms >> nr) & 1);
}
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 18);
-static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 16);
-static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 17);
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, show_alarm, NULL, 14);
-static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 15);
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 10);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 11);
-static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 12);
-static SENSOR_DEVICE_ATTR(fan4_alarm, S_IRUGO, show_alarm, NULL, 13);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(in5_alarm, alarm, 18);
+static SENSOR_DEVICE_ATTR_RO(in6_alarm, alarm, 16);
+static SENSOR_DEVICE_ATTR_RO(in7_alarm, alarm, 17);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(temp1_fault, alarm, 14);
+static SENSOR_DEVICE_ATTR_RO(temp2_alarm, alarm, 5);
+static SENSOR_DEVICE_ATTR_RO(temp3_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(temp3_fault, alarm, 15);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 10);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 11);
+static SENSOR_DEVICE_ATTR_RO(fan3_alarm, alarm, 12);
+static SENSOR_DEVICE_ATTR_RO(fan4_alarm, alarm, 13);
/* pwm */
-static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t pwm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", PWM_FROM_REG(data->pwm[nr]));
}
-static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t pwm_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -723,8 +721,8 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_pwm_enable(struct device *dev, struct device_attribute
- *attr, char *buf)
+static ssize_t pwm_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
@@ -745,8 +743,9 @@ static ssize_t show_pwm_enable(struct device *dev, struct device_attribute
return sprintf(buf, "%d\n", enable);
}
-static ssize_t set_pwm_enable(struct device *dev, struct device_attribute
- *attr, const char *buf, size_t count)
+static ssize_t pwm_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -788,8 +787,8 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute
return count;
}
-static ssize_t show_pwm_freq(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pwm_freq_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
@@ -804,8 +803,9 @@ static ssize_t show_pwm_freq(struct device *dev,
return sprintf(buf, "%d\n", freq);
}
-static ssize_t set_pwm_freq(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t pwm_freq_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -841,22 +841,20 @@ static ssize_t set_pwm_freq(struct device *dev,
return count;
}
-#define show_pwm_reg(offset) \
-static SENSOR_DEVICE_ATTR(pwm##offset, S_IRUGO | S_IWUSR, \
- show_pwm, set_pwm, offset - 1); \
-static SENSOR_DEVICE_ATTR(pwm##offset##_enable, S_IRUGO | S_IWUSR, \
- show_pwm_enable, set_pwm_enable, offset - 1); \
-static SENSOR_DEVICE_ATTR(pwm##offset##_freq, S_IRUGO | S_IWUSR, \
- show_pwm_freq, set_pwm_freq, offset - 1)
-
-show_pwm_reg(1);
-show_pwm_reg(2);
-show_pwm_reg(3);
+static SENSOR_DEVICE_ATTR_RW(pwm1, pwm, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm1_enable, pwm_enable, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm1_freq, pwm_freq, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2, pwm, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm2_enable, pwm_enable, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm2_freq, pwm_freq, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm3, pwm, 2);
+static SENSOR_DEVICE_ATTR_RW(pwm3_enable, pwm_enable, 2);
+static SENSOR_DEVICE_ATTR_RW(pwm3_freq, pwm_freq, 2);
/* Voltages */
-static ssize_t show_in(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
@@ -864,16 +862,16 @@ static ssize_t show_in(struct device *dev, struct device_attribute *attr,
data->in_ext[nr]));
}
-static ssize_t show_in_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", INS_FROM_REG(nr, data->in_min[nr]));
}
-static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_min_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -892,16 +890,16 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_in_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_max_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", INS_FROM_REG(nr, data->in_max[nr]));
}
-static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_max_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -920,27 +918,35 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
return count;
}
-#define show_in_reg(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
- show_in, NULL, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
- show_in_min, set_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
- show_in_max, set_in_max, offset)
-
-show_in_reg(0);
-show_in_reg(1);
-show_in_reg(2);
-show_in_reg(3);
-show_in_reg(4);
-show_in_reg(5);
-show_in_reg(6);
-show_in_reg(7);
+static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_min, in_min, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_max, in_max, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
+static SENSOR_DEVICE_ATTR_RO(in5_input, in, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_min, in_min, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_max, in_max, 5);
+static SENSOR_DEVICE_ATTR_RO(in6_input, in, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_min, in_min, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_max, in_max, 6);
+static SENSOR_DEVICE_ATTR_RO(in7_input, in, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_min, in_min, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_max, in_max, 7);
/* Temps */
-static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
@@ -948,16 +954,17 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
data->temp_ext[nr]));
}
-static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_min_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_min[nr]));
}
-static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -979,16 +986,17 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[nr]));
}
-static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -1010,31 +1018,30 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
return count;
}
-#define show_temp_reg(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \
- show_temp, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IRUGO | S_IWUSR, \
- show_temp_min, set_temp_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \
- show_temp_max, set_temp_max, offset - 1);
-
-show_temp_reg(1);
-show_temp_reg(2);
-show_temp_reg(3);
-
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_min, temp_min, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_min, temp_min, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_min, temp_min, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_max, 2);
/* Automatic PWM control */
-static ssize_t show_pwm_auto_channels(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pwm_auto_channels_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", ZONE_FROM_REG(data->autofan[nr].config));
}
-static ssize_t set_pwm_auto_channels(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t pwm_auto_channels_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -1055,16 +1062,17 @@ static ssize_t set_pwm_auto_channels(struct device *dev,
return count;
}
-static ssize_t show_pwm_auto_pwm_min(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pwm_auto_pwm_min_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", PWM_FROM_REG(data->autofan[nr].min_pwm));
}
-static ssize_t set_pwm_auto_pwm_min(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t pwm_auto_pwm_min_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -1084,16 +1092,18 @@ static ssize_t set_pwm_auto_pwm_min(struct device *dev,
return count;
}
-static ssize_t show_pwm_auto_pwm_minctl(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pwm_auto_pwm_minctl_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", data->autofan[nr].min_off);
}
-static ssize_t set_pwm_auto_pwm_minctl(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t pwm_auto_pwm_minctl_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -1117,25 +1127,21 @@ static ssize_t set_pwm_auto_pwm_minctl(struct device *dev,
return count;
}
-#define pwm_auto(offset) \
-static SENSOR_DEVICE_ATTR(pwm##offset##_auto_channels, \
- S_IRUGO | S_IWUSR, show_pwm_auto_channels, \
- set_pwm_auto_channels, offset - 1); \
-static SENSOR_DEVICE_ATTR(pwm##offset##_auto_pwm_min, \
- S_IRUGO | S_IWUSR, show_pwm_auto_pwm_min, \
- set_pwm_auto_pwm_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(pwm##offset##_auto_pwm_minctl, \
- S_IRUGO | S_IWUSR, show_pwm_auto_pwm_minctl, \
- set_pwm_auto_pwm_minctl, offset - 1)
-
-pwm_auto(1);
-pwm_auto(2);
-pwm_auto(3);
+static SENSOR_DEVICE_ATTR_RW(pwm1_auto_channels, pwm_auto_channels, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm1_auto_pwm_min, pwm_auto_pwm_min, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm1_auto_pwm_minctl, pwm_auto_pwm_minctl, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2_auto_channels, pwm_auto_channels, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm2_auto_pwm_min, pwm_auto_pwm_min, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm2_auto_pwm_minctl, pwm_auto_pwm_minctl, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm3_auto_channels, pwm_auto_channels, 2);
+static SENSOR_DEVICE_ATTR_RW(pwm3_auto_pwm_min, pwm_auto_pwm_min, 2);
+static SENSOR_DEVICE_ATTR_RW(pwm3_auto_pwm_minctl, pwm_auto_pwm_minctl, 2);
/* Temperature settings for automatic PWM control */
-static ssize_t show_temp_auto_temp_off(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp_auto_temp_off_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
@@ -1143,8 +1149,9 @@ static ssize_t show_temp_auto_temp_off(struct device *dev,
HYST_FROM_REG(data->zone[nr].hyst));
}
-static ssize_t set_temp_auto_temp_off(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t temp_auto_temp_off_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -1172,16 +1179,18 @@ static ssize_t set_temp_auto_temp_off(struct device *dev,
return count;
}
-static ssize_t show_temp_auto_temp_min(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp_auto_temp_min_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->zone[nr].limit));
}
-static ssize_t set_temp_auto_temp_min(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t temp_auto_temp_min_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -1210,8 +1219,9 @@ static ssize_t set_temp_auto_temp_min(struct device *dev,
return count;
}
-static ssize_t show_temp_auto_temp_max(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp_auto_temp_max_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
@@ -1219,8 +1229,9 @@ static ssize_t show_temp_auto_temp_max(struct device *dev,
RANGE_FROM_REG(data->zone[nr].range));
}
-static ssize_t set_temp_auto_temp_max(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t temp_auto_temp_max_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -1245,16 +1256,18 @@ static ssize_t set_temp_auto_temp_max(struct device *dev,
return count;
}
-static ssize_t show_temp_auto_temp_crit(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp_auto_temp_crit_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->zone[nr].critical));
}
-static ssize_t set_temp_auto_temp_crit(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t temp_auto_temp_crit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -1274,23 +1287,18 @@ static ssize_t set_temp_auto_temp_crit(struct device *dev,
return count;
}
-#define temp_auto(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_auto_temp_off, \
- S_IRUGO | S_IWUSR, show_temp_auto_temp_off, \
- set_temp_auto_temp_off, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_auto_temp_min, \
- S_IRUGO | S_IWUSR, show_temp_auto_temp_min, \
- set_temp_auto_temp_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_auto_temp_max, \
- S_IRUGO | S_IWUSR, show_temp_auto_temp_max, \
- set_temp_auto_temp_max, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_auto_temp_crit, \
- S_IRUGO | S_IWUSR, show_temp_auto_temp_crit, \
- set_temp_auto_temp_crit, offset - 1);
-
-temp_auto(1);
-temp_auto(2);
-temp_auto(3);
+static SENSOR_DEVICE_ATTR_RW(temp1_auto_temp_off, temp_auto_temp_off, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_auto_temp_min, temp_auto_temp_min, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_auto_temp_max, temp_auto_temp_max, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_auto_temp_crit, temp_auto_temp_crit, 0);
+static SENSOR_DEVICE_ATTR_RW(temp2_auto_temp_off, temp_auto_temp_off, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_auto_temp_min, temp_auto_temp_min, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_auto_temp_max, temp_auto_temp_max, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_auto_temp_crit, temp_auto_temp_crit, 1);
+static SENSOR_DEVICE_ATTR_RW(temp3_auto_temp_off, temp_auto_temp_off, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_auto_temp_min, temp_auto_temp_min, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_auto_temp_max, temp_auto_temp_max, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_auto_temp_crit, temp_auto_temp_crit, 2);
static struct attribute *lm85_attributes[] = {
&sensor_dev_attr_fan1_input.dev_attr.attr,
@@ -1642,7 +1650,7 @@ static const struct i2c_device_id lm85_id[] = {
};
MODULE_DEVICE_TABLE(i2c, lm85_id);
-static const struct of_device_id lm85_of_match[] = {
+static const struct of_device_id __maybe_unused lm85_of_match[] = {
{
.compatible = "adi,adm1027",
.data = (void *)adm1027
diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
index b48d30760388..644e61eae574 100644
--- a/drivers/hwmon/lm87.c
+++ b/drivers/hwmon/lm87.c
@@ -276,8 +276,8 @@ static struct lm87_data *lm87_update_device(struct device *dev)
* Sysfs stuff
*/
-static ssize_t show_in_input(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_input_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
int nr = to_sensor_dev_attr(attr)->index;
@@ -286,8 +286,8 @@ static ssize_t show_in_input(struct device *dev, struct device_attribute *attr,
data->in_scale[nr]));
}
-static ssize_t show_in_min(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t in_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
int nr = to_sensor_dev_attr(attr)->index;
@@ -296,8 +296,8 @@ static ssize_t show_in_min(struct device *dev,
data->in_scale[nr]));
}
-static ssize_t show_in_max(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t in_max_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
int nr = to_sensor_dev_attr(attr)->index;
@@ -306,8 +306,8 @@ static ssize_t show_in_max(struct device *dev,
data->in_scale[nr]));
}
-static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_min_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
@@ -327,8 +327,8 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_max_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
@@ -348,23 +348,32 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
return count;
}
-#define set_in(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
- show_in_input, NULL, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
- show_in_min, set_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
- show_in_max, set_in_max, offset)
-set_in(0);
-set_in(1);
-set_in(2);
-set_in(3);
-set_in(4);
-set_in(5);
-set_in(6);
-set_in(7);
-
-static ssize_t show_temp_input(struct device *dev,
+static SENSOR_DEVICE_ATTR_RO(in0_input, in_input, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_min, in_min, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_max, in_max, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in_input, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in_input, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in_input, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in_input, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
+static SENSOR_DEVICE_ATTR_RO(in5_input, in_input, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_min, in_min, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_max, in_max, 5);
+static SENSOR_DEVICE_ATTR_RO(in6_input, in_input, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_min, in_min, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_max, in_max, 6);
+static SENSOR_DEVICE_ATTR_RO(in7_input, in_input, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_min, in_min, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_max, in_max, 7);
+
+static ssize_t temp_input_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
@@ -373,7 +382,7 @@ static ssize_t show_temp_input(struct device *dev,
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr]));
}
-static ssize_t show_temp_low(struct device *dev,
+static ssize_t temp_low_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
@@ -383,7 +392,7 @@ static ssize_t show_temp_low(struct device *dev,
TEMP_FROM_REG(data->temp_low[nr]));
}
-static ssize_t show_temp_high(struct device *dev,
+static ssize_t temp_high_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
@@ -393,8 +402,9 @@ static ssize_t show_temp_high(struct device *dev,
TEMP_FROM_REG(data->temp_high[nr]));
}
-static ssize_t set_temp_low(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_low_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
@@ -413,8 +423,9 @@ static ssize_t set_temp_low(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_temp_high(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_high_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
@@ -433,16 +444,15 @@ static ssize_t set_temp_high(struct device *dev, struct device_attribute *attr,
return count;
}
-#define set_temp(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \
- show_temp_input, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \
- show_temp_high, set_temp_high, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IRUGO | S_IWUSR, \
- show_temp_low, set_temp_low, offset - 1)
-set_temp(1);
-set_temp(2);
-set_temp(3);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp_input, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_min, temp_low, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_high, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp_input, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_min, temp_low, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_high, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp_input, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_min, temp_low, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_high, 2);
static ssize_t temp1_crit_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -460,9 +470,9 @@ static ssize_t temp2_crit_show(struct device *dev,
static DEVICE_ATTR_RO(temp1_crit);
static DEVICE_ATTR_RO(temp2_crit);
-static DEVICE_ATTR(temp3_crit, S_IRUGO, temp2_crit_show, NULL);
+static DEVICE_ATTR(temp3_crit, 0444, temp2_crit_show, NULL);
-static ssize_t show_fan_input(struct device *dev,
+static ssize_t fan_input_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
@@ -472,8 +482,8 @@ static ssize_t show_fan_input(struct device *dev,
FAN_DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t show_fan_min(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t fan_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
int nr = to_sensor_dev_attr(attr)->index;
@@ -482,8 +492,8 @@ static ssize_t show_fan_min(struct device *dev,
FAN_DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t show_fan_div(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t fan_div_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
int nr = to_sensor_dev_attr(attr)->index;
@@ -492,8 +502,9 @@ static ssize_t show_fan_div(struct device *dev,
FAN_DIV_FROM_REG(data->fan_div[nr]));
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
@@ -519,8 +530,9 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
* of least surprise; the user doesn't expect the fan minimum to change just
* because the divider changed.
*/
-static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_div_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
@@ -575,15 +587,12 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
return count;
}
-#define set_fan(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
- show_fan_input, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, set_fan_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \
- show_fan_div, set_fan_div, offset - 1)
-set_fan(1);
-set_fan(2);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan_input, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan_input, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -653,28 +662,28 @@ static ssize_t aout_output_store(struct device *dev,
}
static DEVICE_ATTR_RW(aout_output);
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
int bitnr = to_sensor_dev_attr(attr)->index;
return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
}
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 9);
-static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 7);
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7);
-static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 14);
-static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 15);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(in5_alarm, alarm, 9);
+static SENSOR_DEVICE_ATTR_RO(in6_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(in7_alarm, alarm, 7);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(temp2_alarm, alarm, 5);
+static SENSOR_DEVICE_ATTR_RO(temp3_alarm, alarm, 5);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 7);
+static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, 14);
+static SENSOR_DEVICE_ATTR_RO(temp3_fault, alarm, 15);
/*
* Real code
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 480d70a51778..b99eda01696e 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -236,7 +236,7 @@ static const struct i2c_device_id lm90_id[] = {
};
MODULE_DEVICE_TABLE(i2c, lm90_id);
-static const struct of_device_id lm90_of_match[] = {
+static const struct of_device_id __maybe_unused lm90_of_match[] = {
{
.compatible = "adi,adm1032",
.data = (void *)adm1032
@@ -1720,16 +1720,6 @@ static void lm90_regulator_disable(void *regulator)
regulator_disable(regulator);
}
-static const u32 lm90_chip_config[] = {
- HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL | HWMON_C_ALARMS,
- 0
-};
-
-static const struct hwmon_channel_info lm90_chip_info = {
- .type = hwmon_chip,
- .config = lm90_chip_config,
-};
-
static const struct hwmon_ops lm90_ops = {
.is_visible = lm90_is_visible,
@@ -1792,7 +1782,8 @@ static int lm90_probe(struct i2c_client *client,
data->chip.ops = &lm90_ops;
data->chip.info = data->info;
- data->info[0] = &lm90_chip_info;
+ data->info[0] = HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL | HWMON_C_ALARMS);
data->info[1] = &data->temp_info;
info = &data->temp_info;
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 3ff188937158..6c5215e6d448 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -418,33 +418,15 @@ static void lm95241_init_client(struct i2c_client *client,
data->model);
}
-static const u32 lm95241_chip_config[] = {
- HWMON_C_UPDATE_INTERVAL,
- 0
-};
-
-static const struct hwmon_channel_info lm95241_chip = {
- .type = hwmon_chip,
- .config = lm95241_chip_config,
-};
-
-static const u32 lm95241_temp_config[] = {
- HWMON_T_INPUT,
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | HWMON_T_TYPE |
- HWMON_T_FAULT,
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | HWMON_T_TYPE |
- HWMON_T_FAULT,
- 0
-};
-
-static const struct hwmon_channel_info lm95241_temp = {
- .type = hwmon_temp,
- .config = lm95241_temp_config,
-};
-
static const struct hwmon_channel_info *lm95241_info[] = {
- &lm95241_chip,
- &lm95241_temp,
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_TYPE | HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_TYPE | HWMON_T_FAULT),
NULL
};
diff --git a/drivers/hwmon/lm95245.c b/drivers/hwmon/lm95245.c
index e4cac3a04536..8d08ca8bbdf8 100644
--- a/drivers/hwmon/lm95245.c
+++ b/drivers/hwmon/lm95245.c
@@ -92,19 +92,6 @@ static const unsigned short normal_i2c[] = {
#define LM95235_REVISION 0xB1
#define LM95245_REVISION 0xB3
-static const u8 lm95245_reg_address[] = {
- LM95245_REG_R_LOCAL_TEMPH_S,
- LM95245_REG_R_LOCAL_TEMPL_S,
- LM95245_REG_R_REMOTE_TEMPH_S,
- LM95245_REG_R_REMOTE_TEMPL_S,
- LM95245_REG_R_REMOTE_TEMPH_U,
- LM95245_REG_R_REMOTE_TEMPL_U,
- LM95245_REG_RW_LOCAL_OS_TCRIT_LIMIT,
- LM95245_REG_RW_REMOTE_TCRIT_LIMIT,
- LM95245_REG_RW_COMMON_HYSTERESIS,
- LM95245_REG_R_STATUS1,
-};
-
/* Client data (each client gets its own) */
struct lm95245_data {
struct regmap *regmap;
@@ -545,32 +532,16 @@ static const struct regmap_config lm95245_regmap_config = {
.use_single_write = true,
};
-static const u32 lm95245_chip_config[] = {
- HWMON_C_UPDATE_INTERVAL,
- 0
-};
-
-static const struct hwmon_channel_info lm95245_chip = {
- .type = hwmon_chip,
- .config = lm95245_chip_config,
-};
-
-static const u32 lm95245_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_CRIT_ALARM,
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST | HWMON_T_CRIT |
- HWMON_T_CRIT_HYST | HWMON_T_FAULT | HWMON_T_MAX_ALARM |
- HWMON_T_CRIT_ALARM | HWMON_T_TYPE | HWMON_T_OFFSET,
- 0
-};
-
-static const struct hwmon_channel_info lm95245_temp = {
- .type = hwmon_temp,
- .config = lm95245_temp_config,
-};
-
static const struct hwmon_channel_info *lm95245_info[] = {
- &lm95245_chip,
- &lm95245_temp,
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_HYST |
+ HWMON_T_CRIT_ALARM,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_FAULT |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM |
+ HWMON_T_TYPE | HWMON_T_OFFSET),
NULL
};
@@ -623,7 +594,7 @@ static const struct i2c_device_id lm95245_id[] = {
};
MODULE_DEVICE_TABLE(i2c, lm95245_id);
-static const struct of_device_id lm95245_of_match[] = {
+static const struct of_device_id __maybe_unused lm95245_of_match[] = {
{ .compatible = "national,lm95235" },
{ .compatible = "national,lm95245" },
{ },
diff --git a/drivers/hwmon/lochnagar-hwmon.c b/drivers/hwmon/lochnagar-hwmon.c
new file mode 100644
index 000000000000..8b19adf2eeb0
--- /dev/null
+++ b/drivers/hwmon/lochnagar-hwmon.c
@@ -0,0 +1,412 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Lochnagar hardware monitoring features
+ *
+ * Copyright (c) 2016-2019 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ *
+ * Author: Lucas Tanure <tanureal@opensource.cirrus.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/math64.h>
+#include <linux/mfd/lochnagar.h>
+#include <linux/mfd/lochnagar2_regs.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define LN2_MAX_NSAMPLE 1023
+#define LN2_SAMPLE_US 1670
+
+#define LN2_CURR_UNITS 1000
+#define LN2_VOLT_UNITS 1000
+#define LN2_TEMP_UNITS 1000
+#define LN2_PWR_UNITS 1000000
+
+static const char * const lochnagar_chan_names[] = {
+ "DBVDD1",
+ "1V8 DSP",
+ "1V8 CDC",
+ "VDDCORE DSP",
+ "AVDD 1V8",
+ "SYSVDD",
+ "VDDCORE CDC",
+ "MICVDD",
+};
+
+struct lochnagar_hwmon {
+ struct regmap *regmap;
+
+ long power_nsamples[ARRAY_SIZE(lochnagar_chan_names)];
+
+ /* Lock to ensure only a single sensor is read at a time */
+ struct mutex sensor_lock;
+};
+
+enum lochnagar_measure_mode {
+ LN2_CURR = 0,
+ LN2_VOLT,
+ LN2_TEMP,
+};
+
+/**
+ * float_to_long - Convert ieee754 reading from hardware to an integer
+ *
+ * @data: Value read from the hardware
+ * @precision: Units to multiply up to eg. 1000 = milli, 1000000 = micro
+ *
+ * Return: Converted integer reading
+ *
+ * Depending on the measurement type the hardware returns an ieee754
+ * floating point value in either volts, amps or celsius. This function
+ * will convert that into an integer in a smaller unit such as micro-amps
+ * or milli-celsius. The hardware does not return NaN, so consideration of
+ * that is not required.
+ */
+static long float_to_long(u32 data, u32 precision)
+{
+ u64 man = data & 0x007FFFFF;
+ int exp = ((data & 0x7F800000) >> 23) - 127 - 23;
+ bool negative = data & 0x80000000;
+ long result;
+
+ man = (man + (1 << 23)) * precision;
+
+ if (fls64(man) + exp > (int)sizeof(long) * 8 - 1)
+ result = LONG_MAX;
+ else if (exp < 0)
+ result = (man + (1ull << (-exp - 1))) >> -exp;
+ else
+ result = man << exp;
+
+ return negative ? -result : result;
+}
+
+static int do_measurement(struct regmap *regmap, int chan,
+ enum lochnagar_measure_mode mode, int nsamples)
+{
+ unsigned int val;
+ int ret;
+
+ chan = 1 << (chan + LOCHNAGAR2_IMON_MEASURED_CHANNELS_SHIFT);
+
+ ret = regmap_write(regmap, LOCHNAGAR2_IMON_CTRL1,
+ LOCHNAGAR2_IMON_ENA_MASK | chan | mode);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(regmap, LOCHNAGAR2_IMON_CTRL2, nsamples);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(regmap, LOCHNAGAR2_IMON_CTRL3,
+ LOCHNAGAR2_IMON_CONFIGURE_MASK);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read_poll_timeout(regmap, LOCHNAGAR2_IMON_CTRL3, val,
+ val & LOCHNAGAR2_IMON_DONE_MASK,
+ 1000, 10000);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(regmap, LOCHNAGAR2_IMON_CTRL3,
+ LOCHNAGAR2_IMON_MEASURE_MASK);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Actual measurement time is ~1.67mS per sample, approximate this
+ * with a 1.5mS per sample msleep and then poll for success up to
+ * ~0.17mS * 1023 (LN2_MAX_NSAMPLES). Normally for smaller values
+ * of nsamples the poll will complete on the first loop due to
+ * other latency in the system.
+ */
+ msleep((nsamples * 3) / 2);
+
+ ret = regmap_read_poll_timeout(regmap, LOCHNAGAR2_IMON_CTRL3, val,
+ val & LOCHNAGAR2_IMON_DONE_MASK,
+ 5000, 200000);
+ if (ret < 0)
+ return ret;
+
+ return regmap_write(regmap, LOCHNAGAR2_IMON_CTRL3, 0);
+}
+
+static int request_data(struct regmap *regmap, int chan, u32 *data)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_write(regmap, LOCHNAGAR2_IMON_CTRL4,
+ LOCHNAGAR2_IMON_DATA_REQ_MASK |
+ chan << LOCHNAGAR2_IMON_CH_SEL_SHIFT);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read_poll_timeout(regmap, LOCHNAGAR2_IMON_CTRL4, val,
+ val & LOCHNAGAR2_IMON_DATA_RDY_MASK,
+ 1000, 10000);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(regmap, LOCHNAGAR2_IMON_DATA1, &val);
+ if (ret < 0)
+ return ret;
+
+ *data = val << 16;
+
+ ret = regmap_read(regmap, LOCHNAGAR2_IMON_DATA2, &val);
+ if (ret < 0)
+ return ret;
+
+ *data |= val;
+
+ return regmap_write(regmap, LOCHNAGAR2_IMON_CTRL4, 0);
+}
+
+static int read_sensor(struct device *dev, int chan,
+ enum lochnagar_measure_mode mode, int nsamples,
+ unsigned int precision, long *val)
+{
+ struct lochnagar_hwmon *priv = dev_get_drvdata(dev);
+ struct regmap *regmap = priv->regmap;
+ u32 data;
+ int ret;
+
+ mutex_lock(&priv->sensor_lock);
+
+ ret = do_measurement(regmap, chan, mode, nsamples);
+ if (ret < 0) {
+ dev_err(dev, "Failed to perform measurement: %d\n", ret);
+ goto error;
+ }
+
+ ret = request_data(regmap, chan, &data);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read measurement: %d\n", ret);
+ goto error;
+ }
+
+ *val = float_to_long(data, precision);
+
+error:
+ mutex_unlock(&priv->sensor_lock);
+
+ return ret;
+}
+
+static int read_power(struct device *dev, int chan, long *val)
+{
+ struct lochnagar_hwmon *priv = dev_get_drvdata(dev);
+ int nsamples = priv->power_nsamples[chan];
+ u64 power;
+ int ret;
+
+ if (!strcmp("SYSVDD", lochnagar_chan_names[chan])) {
+ power = 5 * LN2_PWR_UNITS;
+ } else {
+ ret = read_sensor(dev, chan, LN2_VOLT, 1, LN2_PWR_UNITS, val);
+ if (ret < 0)
+ return ret;
+
+ power = abs(*val);
+ }
+
+ ret = read_sensor(dev, chan, LN2_CURR, nsamples, LN2_PWR_UNITS, val);
+ if (ret < 0)
+ return ret;
+
+ power *= abs(*val);
+ power = DIV_ROUND_CLOSEST_ULL(power, LN2_PWR_UNITS);
+
+ if (power > LONG_MAX)
+ *val = LONG_MAX;
+ else
+ *val = power;
+
+ return 0;
+}
+
+static umode_t lochnagar_is_visible(const void *drvdata,
+ enum hwmon_sensor_types type,
+ u32 attr, int chan)
+{
+ switch (type) {
+ case hwmon_in:
+ if (!strcmp("SYSVDD", lochnagar_chan_names[chan]))
+ return 0;
+ break;
+ case hwmon_power:
+ if (attr == hwmon_power_average_interval)
+ return 0644;
+ break;
+ default:
+ break;
+ }
+
+ return 0444;
+}
+
+static int lochnagar_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int chan, long *val)
+{
+ struct lochnagar_hwmon *priv = dev_get_drvdata(dev);
+ int interval;
+
+ switch (type) {
+ case hwmon_in:
+ return read_sensor(dev, chan, LN2_VOLT, 1, LN2_VOLT_UNITS, val);
+ case hwmon_curr:
+ return read_sensor(dev, chan, LN2_CURR, 1, LN2_CURR_UNITS, val);
+ case hwmon_temp:
+ return read_sensor(dev, chan, LN2_TEMP, 1, LN2_TEMP_UNITS, val);
+ case hwmon_power:
+ switch (attr) {
+ case hwmon_power_average:
+ return read_power(dev, chan, val);
+ case hwmon_power_average_interval:
+ interval = priv->power_nsamples[chan] * LN2_SAMPLE_US;
+ *val = DIV_ROUND_CLOSEST(interval, 1000);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int lochnagar_read_string(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int chan, const char **str)
+{
+ switch (type) {
+ case hwmon_in:
+ case hwmon_curr:
+ case hwmon_power:
+ *str = lochnagar_chan_names[chan];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int lochnagar_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int chan, long val)
+{
+ struct lochnagar_hwmon *priv = dev_get_drvdata(dev);
+
+ if (type != hwmon_power || attr != hwmon_power_average_interval)
+ return -EOPNOTSUPP;
+
+ val = clamp_t(long, val, 1, (LN2_MAX_NSAMPLE * LN2_SAMPLE_US) / 1000);
+ val = DIV_ROUND_CLOSEST(val * 1000, LN2_SAMPLE_US);
+
+ priv->power_nsamples[chan] = val;
+
+ return 0;
+}
+
+static const struct hwmon_ops lochnagar_ops = {
+ .is_visible = lochnagar_is_visible,
+ .read = lochnagar_read,
+ .read_string = lochnagar_read_string,
+ .write = lochnagar_write,
+};
+
+static const struct hwmon_channel_info *lochnagar_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
+ HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL),
+ HWMON_CHANNEL_INFO(curr, HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL),
+ HWMON_CHANNEL_INFO(power, HWMON_P_AVERAGE | HWMON_P_AVERAGE_INTERVAL |
+ HWMON_P_LABEL,
+ HWMON_P_AVERAGE | HWMON_P_AVERAGE_INTERVAL |
+ HWMON_P_LABEL,
+ HWMON_P_AVERAGE | HWMON_P_AVERAGE_INTERVAL |
+ HWMON_P_LABEL,
+ HWMON_P_AVERAGE | HWMON_P_AVERAGE_INTERVAL |
+ HWMON_P_LABEL,
+ HWMON_P_AVERAGE | HWMON_P_AVERAGE_INTERVAL |
+ HWMON_P_LABEL,
+ HWMON_P_AVERAGE | HWMON_P_AVERAGE_INTERVAL |
+ HWMON_P_LABEL,
+ HWMON_P_AVERAGE | HWMON_P_AVERAGE_INTERVAL |
+ HWMON_P_LABEL,
+ HWMON_P_AVERAGE | HWMON_P_AVERAGE_INTERVAL |
+ HWMON_P_LABEL),
+ NULL
+};
+
+static const struct hwmon_chip_info lochnagar_chip_info = {
+ .ops = &lochnagar_ops,
+ .info = lochnagar_info,
+};
+
+static const struct of_device_id lochnagar_of_match[] = {
+ { .compatible = "cirrus,lochnagar2-hwmon" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, lochnagar_of_match);
+
+static int lochnagar_hwmon_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device *hwmon_dev;
+ struct lochnagar_hwmon *priv;
+ int i;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_init(&priv->sensor_lock);
+
+ priv->regmap = dev_get_regmap(dev->parent, NULL);
+ if (!priv->regmap) {
+ dev_err(dev, "No register map found\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(priv->power_nsamples); i++)
+ priv->power_nsamples[i] = 96;
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, "Lochnagar", priv,
+ &lochnagar_chip_info,
+ NULL);
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static struct platform_driver lochnagar_hwmon_driver = {
+ .driver = {
+ .name = "lochnagar-hwmon",
+ .of_match_table = lochnagar_of_match,
+ },
+ .probe = lochnagar_hwmon_probe,
+};
+module_platform_driver(lochnagar_hwmon_driver);
+
+MODULE_AUTHOR("Lucas Tanure <tanureal@opensource.cirrus.com>");
+MODULE_DESCRIPTION("Lochnagar hardware monitoring features");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ltc4151.c b/drivers/hwmon/ltc4151.c
index 76c6fda76d95..2b5cd37e6dc3 100644
--- a/drivers/hwmon/ltc4151.c
+++ b/drivers/hwmon/ltc4151.c
@@ -208,7 +208,7 @@ static const struct i2c_device_id ltc4151_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ltc4151_id);
-static const struct of_device_id ltc4151_match[] = {
+static const struct of_device_id __maybe_unused ltc4151_match[] = {
{ .compatible = "lltc,ltc4151" },
{},
};
diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c
index 34d0653ca607..26542635de9b 100644
--- a/drivers/hwmon/ltc4245.c
+++ b/drivers/hwmon/ltc4245.c
@@ -390,57 +390,30 @@ static umode_t ltc4245_is_visible(const void *_data,
}
}
-static const u32 ltc4245_in_config[] = {
- HWMON_I_INPUT, /* dummy, skipped in is_visible */
- HWMON_I_INPUT | HWMON_I_MIN_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN_ALARM,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info ltc4245_in = {
- .type = hwmon_in,
- .config = ltc4245_in_config,
-};
-
-static const u32 ltc4245_curr_config[] = {
- HWMON_C_INPUT | HWMON_C_MAX_ALARM,
- HWMON_C_INPUT | HWMON_C_MAX_ALARM,
- HWMON_C_INPUT | HWMON_C_MAX_ALARM,
- HWMON_C_INPUT | HWMON_C_MAX_ALARM,
- 0
-};
-
-static const struct hwmon_channel_info ltc4245_curr = {
- .type = hwmon_curr,
- .config = ltc4245_curr_config,
-};
-
-static const u32 ltc4245_power_config[] = {
- HWMON_P_INPUT,
- HWMON_P_INPUT,
- HWMON_P_INPUT,
- HWMON_P_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info ltc4245_power = {
- .type = hwmon_power,
- .config = ltc4245_power_config,
-};
-
static const struct hwmon_channel_info *ltc4245_info[] = {
- &ltc4245_in,
- &ltc4245_curr,
- &ltc4245_power,
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT | HWMON_I_MIN_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN_ALARM,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT),
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_MAX_ALARM,
+ HWMON_C_INPUT | HWMON_C_MAX_ALARM,
+ HWMON_C_INPUT | HWMON_C_MAX_ALARM,
+ HWMON_C_INPUT | HWMON_C_MAX_ALARM),
+ HWMON_CHANNEL_INFO(power,
+ HWMON_P_INPUT,
+ HWMON_P_INPUT,
+ HWMON_P_INPUT,
+ HWMON_P_INPUT),
NULL
};
diff --git a/drivers/hwmon/ltq-cputemp.c b/drivers/hwmon/ltq-cputemp.c
index 1d33f94594c1..570791f0e024 100644
--- a/drivers/hwmon/ltq-cputemp.c
+++ b/drivers/hwmon/ltq-cputemp.c
@@ -77,29 +77,11 @@ static umode_t ltq_is_visible(const void *_data, enum hwmon_sensor_types type,
}
}
-static const u32 ltq_chip_config[] = {
- HWMON_C_REGISTER_TZ,
- 0
-};
-
-static const struct hwmon_channel_info ltq_chip = {
- .type = hwmon_chip,
- .config = ltq_chip_config,
-};
-
-static const u32 ltq_temp_config[] = {
- HWMON_T_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info ltq_temp = {
- .type = hwmon_temp,
- .config = ltq_temp_config,
-};
-
static const struct hwmon_channel_info *ltq_info[] = {
- &ltq_chip,
- &ltq_temp,
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT),
NULL
};
diff --git a/drivers/hwmon/max197.c b/drivers/hwmon/max197.c
index 3d9e210beedf..dd6a35219a18 100644
--- a/drivers/hwmon/max197.c
+++ b/drivers/hwmon/max197.c
@@ -8,7 +8,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
- * For further information, see the Documentation/hwmon/max197 file.
+ * For further information, see the Documentation/hwmon/max197.rst file.
*/
#include <linux/kernel.h>
diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c
index 722bcbb9865a..0b0b04d36931 100644
--- a/drivers/hwmon/max31790.c
+++ b/drivers/hwmon/max31790.c
@@ -400,45 +400,27 @@ static umode_t max31790_is_visible(const void *data,
}
}
-static const u32 max31790_fan_config[] = {
- HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- 0
-};
-
-static const struct hwmon_channel_info max31790_fan = {
- .type = hwmon_fan,
- .config = max31790_fan_config,
-};
-
-static const u32 max31790_pwm_config[] = {
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- 0
-};
-
-static const struct hwmon_channel_info max31790_pwm = {
- .type = hwmon_pwm,
- .config = max31790_pwm_config,
-};
-
static const struct hwmon_channel_info *max31790_info[] = {
- &max31790_fan,
- &max31790_pwm,
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE),
NULL
};
diff --git a/drivers/hwmon/max6621.c b/drivers/hwmon/max6621.c
index 35555f0eefb9..1810a753f1a3 100644
--- a/drivers/hwmon/max6621.c
+++ b/drivers/hwmon/max6621.c
@@ -458,37 +458,19 @@ static const struct regmap_config max6621_regmap_config = {
.num_reg_defaults = ARRAY_SIZE(max6621_regmap_default),
};
-static u32 max6621_chip_config[] = {
- HWMON_C_REGISTER_TZ,
- 0
-};
-
-static const struct hwmon_channel_info max6621_chip = {
- .type = hwmon_chip,
- .config = max6621_chip_config,
-};
-
-static const u32 max6621_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_OFFSET,
- HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
- HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
- HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
- HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
- HWMON_T_INPUT | HWMON_T_LABEL,
- HWMON_T_INPUT | HWMON_T_LABEL,
- HWMON_T_INPUT | HWMON_T_LABEL,
- HWMON_T_INPUT | HWMON_T_LABEL,
- 0
-};
-
-static const struct hwmon_channel_info max6621_temp = {
- .type = hwmon_temp,
- .config = max6621_temp_config,
-};
-
static const struct hwmon_channel_info *max6621_info[] = {
- &max6621_chip,
- &max6621_temp,
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_OFFSET,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL),
NULL
};
@@ -570,7 +552,7 @@ static const struct i2c_device_id max6621_id[] = {
};
MODULE_DEVICE_TABLE(i2c, max6621_id);
-static const struct of_device_id max6621_of_match[] = {
+static const struct of_device_id __maybe_unused max6621_of_match[] = {
{ .compatible = "maxim,max6621" },
{ }
};
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index 61135a2d0cff..939953240827 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -40,6 +40,7 @@
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/of_device.h>
+#include <linux/thermal.h>
/*
* Insmod parameters
@@ -113,6 +114,7 @@ module_param(clock, int, 0444);
struct max6650_data {
struct i2c_client *client;
const struct attribute_group *groups[3];
+ struct thermal_cooling_device *cooling_dev;
struct mutex update_lock;
int nr_fans;
char valid; /* zero until following fields are valid */
@@ -125,6 +127,7 @@ struct max6650_data {
u8 count;
u8 dac;
u8 alarm;
+ unsigned long cooling_dev_state;
};
static const u8 tach_reg[] = {
@@ -134,7 +137,7 @@ static const u8 tach_reg[] = {
MAX6650_REG_TACH3,
};
-static const struct of_device_id max6650_dt_match[] = {
+static const struct of_device_id __maybe_unused max6650_dt_match[] = {
{
.compatible = "maxim,max6650",
.data = (void *)1
@@ -694,6 +697,63 @@ static int max6650_init_client(struct max6650_data *data,
return 0;
}
+#if IS_ENABLED(CONFIG_THERMAL)
+
+static int max6650_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = 255;
+
+ return 0;
+}
+
+static int max6650_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct max6650_data *data = cdev->devdata;
+
+ *state = data->cooling_dev_state;
+
+ return 0;
+}
+
+static int max6650_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct max6650_data *data = cdev->devdata;
+ struct i2c_client *client = data->client;
+ int err;
+
+ state = clamp_val(state, 0, 255);
+
+ mutex_lock(&data->update_lock);
+
+ if (data->config & MAX6650_CFG_V12)
+ data->dac = 180 - (180 * state)/255;
+ else
+ data->dac = 76 - (76 * state)/255;
+
+ err = i2c_smbus_write_byte_data(client, MAX6650_REG_DAC, data->dac);
+
+ if (!err) {
+ max6650_set_operating_mode(data, state ?
+ MAX6650_CFG_MODE_OPEN_LOOP :
+ MAX6650_CFG_MODE_OFF);
+ data->cooling_dev_state = state;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return err < 0 ? err : 0;
+}
+
+static const struct thermal_cooling_device_ops max6650_cooling_ops = {
+ .get_max_state = max6650_get_max_state,
+ .get_cur_state = max6650_get_cur_state,
+ .set_cur_state = max6650_set_cur_state,
+};
+#endif
+
static int max6650_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -709,6 +769,7 @@ static int max6650_probe(struct i2c_client *client,
return -ENOMEM;
data->client = client;
+ i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
data->nr_fans = of_id ? (int)(uintptr_t)of_id->data : id->driver_data;
@@ -727,7 +788,31 @@ static int max6650_probe(struct i2c_client *client,
hwmon_dev = devm_hwmon_device_register_with_groups(dev,
client->name, data,
data->groups);
- return PTR_ERR_OR_ZERO(hwmon_dev);
+ err = PTR_ERR_OR_ZERO(hwmon_dev);
+ if (err)
+ return err;
+
+#if IS_ENABLED(CONFIG_THERMAL)
+ data->cooling_dev =
+ thermal_of_cooling_device_register(client->dev.of_node,
+ client->name, data,
+ &max6650_cooling_ops);
+ if (IS_ERR(data->cooling_dev))
+ dev_warn(&client->dev,
+ "thermal cooling device register failed: %ld\n",
+ PTR_ERR(data->cooling_dev));
+#endif
+ return 0;
+}
+
+static int max6650_remove(struct i2c_client *client)
+{
+ struct max6650_data *data = i2c_get_clientdata(client);
+
+ if (!IS_ERR(data->cooling_dev))
+ thermal_cooling_device_unregister(data->cooling_dev);
+
+ return 0;
}
static const struct i2c_device_id max6650_id[] = {
@@ -743,6 +828,7 @@ static struct i2c_driver max6650_driver = {
.of_match_table = of_match_ptr(max6650_dt_match),
},
.probe = max6650_probe,
+ .remove = max6650_remove,
.id_table = max6650_id,
};
diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c
index da43f7ae3de1..328793eaee3c 100644
--- a/drivers/hwmon/max6697.c
+++ b/drivers/hwmon/max6697.c
@@ -650,7 +650,7 @@ static const struct i2c_device_id max6697_id[] = {
};
MODULE_DEVICE_TABLE(i2c, max6697_id);
-static const struct of_device_id max6697_of_match[] = {
+static const struct of_device_id __maybe_unused max6697_of_match[] = {
{
.compatible = "maxim,max6581",
.data = (void *)max6581
diff --git a/drivers/hwmon/menf21bmc_hwmon.c b/drivers/hwmon/menf21bmc_hwmon.c
index c29a4c3c6b9e..f540f938fcd9 100644
--- a/drivers/hwmon/menf21bmc_hwmon.c
+++ b/drivers/hwmon/menf21bmc_hwmon.c
@@ -101,7 +101,7 @@ static int menf21bmc_hwmon_get_volt_limits(struct menf21bmc_hwmon *drv_data)
}
static ssize_t
-show_label(struct device *dev, struct device_attribute *devattr, char *buf)
+label_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
@@ -109,7 +109,7 @@ show_label(struct device *dev, struct device_attribute *devattr, char *buf)
}
static ssize_t
-show_in(struct device *dev, struct device_attribute *devattr, char *buf)
+in_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct menf21bmc_hwmon *drv_data = menf21bmc_hwmon_update(dev);
@@ -121,7 +121,7 @@ show_in(struct device *dev, struct device_attribute *devattr, char *buf)
}
static ssize_t
-show_min(struct device *dev, struct device_attribute *devattr, char *buf)
+min_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct menf21bmc_hwmon *drv_data = dev_get_drvdata(dev);
@@ -130,7 +130,7 @@ show_min(struct device *dev, struct device_attribute *devattr, char *buf)
}
static ssize_t
-show_max(struct device *dev, struct device_attribute *devattr, char *buf)
+max_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct menf21bmc_hwmon *drv_data = dev_get_drvdata(dev);
@@ -138,21 +138,26 @@ show_max(struct device *dev, struct device_attribute *devattr, char *buf)
return sprintf(buf, "%d\n", drv_data->in_max[attr->index]);
}
-#define create_voltage_sysfs(idx) \
-static SENSOR_DEVICE_ATTR(in##idx##_input, S_IRUGO, \
- show_in, NULL, idx); \
-static SENSOR_DEVICE_ATTR(in##idx##_min, S_IRUGO, \
- show_min, NULL, idx); \
-static SENSOR_DEVICE_ATTR(in##idx##_max, S_IRUGO, \
- show_max, NULL, idx); \
-static SENSOR_DEVICE_ATTR(in##idx##_label, S_IRUGO, \
- show_label, NULL, idx);
-
-create_voltage_sysfs(0);
-create_voltage_sysfs(1);
-create_voltage_sysfs(2);
-create_voltage_sysfs(3);
-create_voltage_sysfs(4);
+static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
+static SENSOR_DEVICE_ATTR_RO(in0_min, min, 0);
+static SENSOR_DEVICE_ATTR_RO(in0_max, max, 0);
+static SENSOR_DEVICE_ATTR_RO(in0_label, label, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in, 1);
+static SENSOR_DEVICE_ATTR_RO(in1_min, min, 1);
+static SENSOR_DEVICE_ATTR_RO(in1_max, max, 1);
+static SENSOR_DEVICE_ATTR_RO(in1_label, label, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in, 2);
+static SENSOR_DEVICE_ATTR_RO(in2_min, min, 2);
+static SENSOR_DEVICE_ATTR_RO(in2_max, max, 2);
+static SENSOR_DEVICE_ATTR_RO(in2_label, label, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in, 3);
+static SENSOR_DEVICE_ATTR_RO(in3_min, min, 3);
+static SENSOR_DEVICE_ATTR_RO(in3_max, max, 3);
+static SENSOR_DEVICE_ATTR_RO(in3_label, label, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in, 4);
+static SENSOR_DEVICE_ATTR_RO(in4_min, min, 4);
+static SENSOR_DEVICE_ATTR_RO(in4_max, max, 4);
+static SENSOR_DEVICE_ATTR_RO(in4_label, label, 4);
static struct attribute *menf21bmc_hwmon_attrs[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
index db8c6de0b6a0..f816d2ae1e58 100644
--- a/drivers/hwmon/mlxreg-fan.c
+++ b/drivers/hwmon/mlxreg-fan.c
@@ -27,7 +27,9 @@
#define MLXREG_FAN_SPEED_MAX (MLXREG_FAN_MAX_STATE * 2)
#define MLXREG_FAN_SPEED_MIN_LEVEL 2 /* 20 percent */
#define MLXREG_FAN_TACHO_SAMPLES_PER_PULSE_DEF 44
-#define MLXREG_FAN_TACHO_DIVIDER_DEF 1132
+#define MLXREG_FAN_TACHO_DIV_MIN 283
+#define MLXREG_FAN_TACHO_DIV_DEF (MLXREG_FAN_TACHO_DIV_MIN * 4)
+#define MLXREG_FAN_TACHO_DIV_SCALE_MAX 64
/*
* FAN datasheet defines the formula for RPM calculations as RPM = 15/t-high.
* The logic in a programmable device measures the time t-high by sampling the
@@ -227,40 +229,22 @@ mlxreg_fan_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
return 0;
}
-static const u32 mlxreg_fan_hwmon_fan_config[] = {
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- 0
-};
-
-static const struct hwmon_channel_info mlxreg_fan_hwmon_fan = {
- .type = hwmon_fan,
- .config = mlxreg_fan_hwmon_fan_config,
-};
-
-static const u32 mlxreg_fan_hwmon_pwm_config[] = {
- HWMON_PWM_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info mlxreg_fan_hwmon_pwm = {
- .type = hwmon_pwm,
- .config = mlxreg_fan_hwmon_pwm_config,
-};
-
static const struct hwmon_channel_info *mlxreg_fan_hwmon_info[] = {
- &mlxreg_fan_hwmon_fan,
- &mlxreg_fan_hwmon_pwm,
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT),
NULL
};
@@ -360,15 +344,57 @@ static const struct thermal_cooling_device_ops mlxreg_fan_cooling_ops = {
.set_cur_state = mlxreg_fan_set_cur_state,
};
+static int mlxreg_fan_connect_verify(struct mlxreg_fan *fan,
+ struct mlxreg_core_data *data)
+{
+ u32 regval;
+ int err;
+
+ err = regmap_read(fan->regmap, data->capability, &regval);
+ if (err) {
+ dev_err(fan->dev, "Failed to query capability register 0x%08x\n",
+ data->capability);
+ return err;
+ }
+
+ return !!(regval & data->bit);
+}
+
+static int mlxreg_fan_speed_divider_get(struct mlxreg_fan *fan,
+ struct mlxreg_core_data *data)
+{
+ u32 regval;
+ int err;
+
+ err = regmap_read(fan->regmap, data->capability, &regval);
+ if (err) {
+ dev_err(fan->dev, "Failed to query capability register 0x%08x\n",
+ data->capability);
+ return err;
+ }
+
+ /*
+ * Set divider value according to the capability register, in case it
+ * contains valid value. Otherwise use default value. The purpose of
+ * this validation is to protect against the old hardware, in which
+ * this register can return zero.
+ */
+ if (regval > 0 && regval <= MLXREG_FAN_TACHO_DIV_SCALE_MAX)
+ fan->divider = regval * MLXREG_FAN_TACHO_DIV_MIN;
+
+ return 0;
+}
+
static int mlxreg_fan_config(struct mlxreg_fan *fan,
struct mlxreg_core_platform_data *pdata)
{
struct mlxreg_core_data *data = pdata->data;
bool configured = false;
int tacho_num = 0, i;
+ int err;
fan->samples = MLXREG_FAN_TACHO_SAMPLES_PER_PULSE_DEF;
- fan->divider = MLXREG_FAN_TACHO_DIVIDER_DEF;
+ fan->divider = MLXREG_FAN_TACHO_DIV_DEF;
for (i = 0; i < pdata->counter; i++, data++) {
if (strnstr(data->label, "tacho", sizeof(data->label))) {
if (tacho_num == MLXREG_FAN_MAX_TACHO) {
@@ -376,6 +402,17 @@ static int mlxreg_fan_config(struct mlxreg_fan *fan,
data->label);
return -EINVAL;
}
+
+ if (data->capability) {
+ err = mlxreg_fan_connect_verify(fan, data);
+ if (err < 0)
+ return err;
+ else if (!err) {
+ tacho_num++;
+ continue;
+ }
+ }
+
fan->tacho[tacho_num].reg = data->reg;
fan->tacho[tacho_num].mask = data->mask;
fan->tacho[tacho_num++].connected = true;
@@ -394,13 +431,21 @@ static int mlxreg_fan_config(struct mlxreg_fan *fan,
return -EINVAL;
}
/* Validate that conf parameters are not zeros. */
- if (!data->mask || !data->bit) {
+ if (!data->mask && !data->bit && !data->capability) {
dev_err(fan->dev, "invalid conf entry params: %s\n",
data->label);
return -EINVAL;
}
- fan->samples = data->mask;
- fan->divider = data->bit;
+ if (data->capability) {
+ err = mlxreg_fan_speed_divider_get(fan, data);
+ if (err)
+ return err;
+ } else {
+ if (data->mask)
+ fan->samples = data->mask;
+ if (data->bit)
+ fan->divider = data->bit;
+ }
configured = true;
} else {
dev_err(fan->dev, "invalid label: %s\n", data->label);
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index 82c7de7b4639..04516789b070 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -400,89 +400,53 @@ static int nct7904_detect(struct i2c_client *client,
return 0;
}
-static const u32 nct7904_in_config[] = {
- HWMON_I_INPUT, /* dummy, skipped in is_visible */
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info nct7904_in = {
- .type = hwmon_in,
- .config = nct7904_in_config,
-};
-
-static const u32 nct7904_fan_config[] = {
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info nct7904_fan = {
- .type = hwmon_fan,
- .config = nct7904_fan_config,
-};
-
-static const u32 nct7904_pwm_config[] = {
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- 0
-};
-
-static const struct hwmon_channel_info nct7904_pwm = {
- .type = hwmon_pwm,
- .config = nct7904_pwm_config,
-};
-
-static const u32 nct7904_temp_config[] = {
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info nct7904_temp = {
- .type = hwmon_temp,
- .config = nct7904_temp_config,
-};
-
static const struct hwmon_channel_info *nct7904_info[] = {
- &nct7904_in,
- &nct7904_fan,
- &nct7904_pwm,
- &nct7904_temp,
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT, /* dummy, skipped in is_visible */
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT),
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT),
NULL
};
diff --git a/drivers/hwmon/npcm750-pwm-fan.c b/drivers/hwmon/npcm750-pwm-fan.c
index b3b907bdfb63..1dc0cd452498 100644
--- a/drivers/hwmon/npcm750-pwm-fan.c
+++ b/drivers/hwmon/npcm750-pwm-fan.c
@@ -629,51 +629,33 @@ static umode_t npcm7xx_is_visible(const void *data,
}
}
-static const u32 npcm7xx_pwm_config[] = {
- HWMON_PWM_INPUT,
- HWMON_PWM_INPUT,
- HWMON_PWM_INPUT,
- HWMON_PWM_INPUT,
- HWMON_PWM_INPUT,
- HWMON_PWM_INPUT,
- HWMON_PWM_INPUT,
- HWMON_PWM_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info npcm7xx_pwm = {
- .type = hwmon_pwm,
- .config = npcm7xx_pwm_config,
-};
-
-static const u32 npcm7xx_fan_config[] = {
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info npcm7xx_fan = {
- .type = hwmon_fan,
- .config = npcm7xx_fan_config,
-};
-
static const struct hwmon_channel_info *npcm7xx_info[] = {
- &npcm7xx_pwm,
- &npcm7xx_fan,
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT),
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT),
NULL
};
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index e4f9f7ce92fa..fd47c36a52bc 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -629,29 +629,9 @@ static umode_t ntc_is_visible(const void *data, enum hwmon_sensor_types type,
return 0;
}
-static const u32 ntc_chip_config[] = {
- HWMON_C_REGISTER_TZ,
- 0
-};
-
-static const struct hwmon_channel_info ntc_chip = {
- .type = hwmon_chip,
- .config = ntc_chip_config,
-};
-
-static const u32 ntc_temp_config[] = {
- HWMON_T_INPUT, HWMON_T_TYPE,
- 0
-};
-
-static const struct hwmon_channel_info ntc_temp = {
- .type = hwmon_temp,
- .config = ntc_temp_config,
-};
-
static const struct hwmon_channel_info *ntc_info[] = {
- &ntc_chip,
- &ntc_temp,
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_TYPE),
NULL
};
diff --git a/drivers/hwmon/occ/Kconfig b/drivers/hwmon/occ/Kconfig
index 66686628fb53..1658634a053e 100644
--- a/drivers/hwmon/occ/Kconfig
+++ b/drivers/hwmon/occ/Kconfig
@@ -5,11 +5,14 @@
config SENSORS_OCC_P8_I2C
tristate "POWER8 OCC through I2C"
depends on I2C
+ depends on ARM || ARM64 || COMPILE_TEST
select SENSORS_OCC
help
This option enables support for monitoring sensors provided by the
- On-Chip Controller (OCC) on a POWER8 processor. Communications with
- the OCC are established through I2C bus.
+ On-Chip Controller (OCC) on a POWER8 processor. However, this driver
+ can only run on a baseboard management controller (BMC) connected to
+ the P8, not the POWER processor itself. Communications with the OCC are
+ established through I2C bus.
This driver can also be built as a module. If so, the module will be
called occ-p8-hwmon.
@@ -17,15 +20,17 @@ config SENSORS_OCC_P8_I2C
config SENSORS_OCC_P9_SBE
tristate "POWER9 OCC through SBE"
depends on FSI_OCC
+ depends on ARM || ARM64 || COMPILE_TEST
select SENSORS_OCC
help
This option enables support for monitoring sensors provided by the
- On-Chip Controller (OCC) on a POWER9 processor. Communications with
- the OCC are established through SBE fifo on an FSI bus.
+ On-Chip Controller (OCC) on a POWER9 processor. However, this driver
+ can only run on a baseboard management controller (BMC) connected to
+ the P9, not the POWER processor itself. Communications with the OCC are
+ established through SBE fifo on an FSI bus.
This driver can also be built as a module. If so, the module will be
called occ-p9-hwmon.
config SENSORS_OCC
- bool "POWER On-Chip Controller"
- depends on SENSORS_OCC_P8_I2C || SENSORS_OCC_P9_SBE
+ tristate
diff --git a/drivers/hwmon/occ/Makefile b/drivers/hwmon/occ/Makefile
index 3fec12ddc7e7..493588d5a9d3 100644
--- a/drivers/hwmon/occ/Makefile
+++ b/drivers/hwmon/occ/Makefile
@@ -1,5 +1,7 @@
-occ-p8-hwmon-objs := common.o sysfs.o p8_i2c.o
-occ-p9-hwmon-objs := common.o sysfs.o p9_sbe.o
+occ-hwmon-common-objs := common.o sysfs.o
+occ-p8-hwmon-objs := p8_i2c.o
+occ-p9-hwmon-objs := p9_sbe.o
+obj-$(CONFIG_SENSORS_OCC) += occ-hwmon-common.o
obj-$(CONFIG_SENSORS_OCC_P8_I2C) += occ-p8-hwmon.o
obj-$(CONFIG_SENSORS_OCC_P9_SBE) += occ-p9-hwmon.o
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index b91a80abf724..13a6290c8d25 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -2,11 +2,13 @@
// Copyright IBM Corp 2019
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/math64.h>
+#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/sysfs.h>
#include <asm/unaligned.h>
@@ -139,6 +141,7 @@ static int occ_poll(struct occ *occ)
/* mutex should already be locked if necessary */
rc = occ->send_cmd(occ, cmd);
if (rc) {
+ occ->last_error = rc;
if (occ->error_count++ > OCC_ERROR_COUNT_THRESHOLD)
occ->error = rc;
@@ -147,6 +150,7 @@ static int occ_poll(struct occ *occ)
/* clear error since communication was successful */
occ->error_count = 0;
+ occ->last_error = 0;
occ->error = 0;
/* check for safe state */
@@ -208,6 +212,8 @@ int occ_update_response(struct occ *occ)
if (time_after(jiffies, occ->last_update + OCC_UPDATE_FREQUENCY)) {
rc = occ_poll(occ);
occ->last_update = jiffies;
+ } else {
+ rc = occ->last_error;
}
mutex_unlock(&occ->lock);
@@ -890,6 +896,8 @@ static int occ_setup_sensor_attrs(struct occ *occ)
s++;
}
}
+
+ s = (sensors->power.num_sensors * 4) + 1;
} else {
for (i = 0; i < sensors->power.num_sensors; ++i) {
s = i + 1;
@@ -918,11 +926,11 @@ static int occ_setup_sensor_attrs(struct occ *occ)
show_power, NULL, 3, i);
attr++;
}
- }
- if (sensors->caps.num_sensors >= 1) {
s = sensors->power.num_sensors + 1;
+ }
+ if (sensors->caps.num_sensors >= 1) {
snprintf(attr->name, sizeof(attr->name), "power%d_label", s);
attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
0, 0);
@@ -1097,3 +1105,8 @@ int occ_setup(struct occ *occ, const char *name)
return rc;
}
+EXPORT_SYMBOL_GPL(occ_setup);
+
+MODULE_AUTHOR("Eddie James <eajames@linux.ibm.com>");
+MODULE_DESCRIPTION("Common OCC hwmon code");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/occ/common.h b/drivers/hwmon/occ/common.h
index ed2cf4245295..fc13f3c73c47 100644
--- a/drivers/hwmon/occ/common.h
+++ b/drivers/hwmon/occ/common.h
@@ -106,7 +106,8 @@ struct occ {
struct attribute_group group;
const struct attribute_group *groups[2];
- int error; /* latest transfer error */
+ int error; /* final transfer error after retry */
+ int last_error; /* latest transfer error */
unsigned int error_count; /* number of xfr errors observed */
unsigned long last_safe; /* time OCC entered "safe" state */
diff --git a/drivers/hwmon/occ/sysfs.c b/drivers/hwmon/occ/sysfs.c
index fe3d15e416e7..c73be0747e66 100644
--- a/drivers/hwmon/occ/sysfs.c
+++ b/drivers/hwmon/occ/sysfs.c
@@ -3,6 +3,7 @@
#include <linux/bitops.h>
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/hwmon-sysfs.h>
#include <linux/kernel.h>
#include <linux/sysfs.h>
@@ -42,16 +43,16 @@ static ssize_t occ_sysfs_show(struct device *dev,
val = !!(header->status & OCC_STAT_ACTIVE);
break;
case 2:
- val = !!(header->status & OCC_EXT_STAT_DVFS_OT);
+ val = !!(header->ext_status & OCC_EXT_STAT_DVFS_OT);
break;
case 3:
- val = !!(header->status & OCC_EXT_STAT_DVFS_POWER);
+ val = !!(header->ext_status & OCC_EXT_STAT_DVFS_POWER);
break;
case 4:
- val = !!(header->status & OCC_EXT_STAT_MEM_THROTTLE);
+ val = !!(header->ext_status & OCC_EXT_STAT_MEM_THROTTLE);
break;
case 5:
- val = !!(header->status & OCC_EXT_STAT_QUICK_DROP);
+ val = !!(header->ext_status & OCC_EXT_STAT_QUICK_DROP);
break;
case 6:
val = header->occ_state;
@@ -62,9 +63,6 @@ static ssize_t occ_sysfs_show(struct device *dev,
else
val = 1;
break;
- case 8:
- val = occ->error;
- break;
default:
return -EINVAL;
}
@@ -72,6 +70,16 @@ static ssize_t occ_sysfs_show(struct device *dev,
return snprintf(buf, PAGE_SIZE - 1, "%d\n", val);
}
+static ssize_t occ_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct occ *occ = dev_get_drvdata(dev);
+
+ occ_update_response(occ);
+
+ return snprintf(buf, PAGE_SIZE - 1, "%d\n", occ->error);
+}
+
static SENSOR_DEVICE_ATTR(occ_master, 0444, occ_sysfs_show, NULL, 0);
static SENSOR_DEVICE_ATTR(occ_active, 0444, occ_sysfs_show, NULL, 1);
static SENSOR_DEVICE_ATTR(occ_dvfs_overtemp, 0444, occ_sysfs_show, NULL, 2);
@@ -80,7 +88,7 @@ static SENSOR_DEVICE_ATTR(occ_mem_throttle, 0444, occ_sysfs_show, NULL, 4);
static SENSOR_DEVICE_ATTR(occ_quick_pwr_drop, 0444, occ_sysfs_show, NULL, 5);
static SENSOR_DEVICE_ATTR(occ_state, 0444, occ_sysfs_show, NULL, 6);
static SENSOR_DEVICE_ATTR(occs_present, 0444, occ_sysfs_show, NULL, 7);
-static SENSOR_DEVICE_ATTR(occ_error, 0444, occ_sysfs_show, NULL, 8);
+static DEVICE_ATTR_RO(occ_error);
static struct attribute *occ_attributes[] = {
&sensor_dev_attr_occ_master.dev_attr.attr,
@@ -91,7 +99,7 @@ static struct attribute *occ_attributes[] = {
&sensor_dev_attr_occ_quick_pwr_drop.dev_attr.attr,
&sensor_dev_attr_occ_state.dev_attr.attr,
&sensor_dev_attr_occs_present.dev_attr.attr,
- &sensor_dev_attr_occ_error.dev_attr.attr,
+ &dev_attr_occ_error.attr,
NULL
};
@@ -155,7 +163,7 @@ void occ_sysfs_poll_done(struct occ *occ)
}
if (occ->error && occ->error != occ->prev_error) {
- name = sensor_dev_attr_occ_error.dev_attr.attr.name;
+ name = dev_attr_occ_error.attr.name;
sysfs_notify(&occ->bus_dev->kobj, NULL, name);
}
@@ -177,3 +185,4 @@ void occ_shutdown(struct occ *occ)
{
sysfs_remove_group(&occ->bus_dev->kobj, &occ_sysfs);
}
+EXPORT_SYMBOL_GPL(occ_shutdown);
diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c
index d1a3f2040c00..58eee8fa3e6d 100644
--- a/drivers/hwmon/pc87427.c
+++ b/drivers/hwmon/pc87427.c
@@ -106,6 +106,13 @@ static const char *logdev_str[2] = { DRVNAME " FMC", DRVNAME " HMC" };
#define LD_IN 1
#define LD_TEMP 1
+static inline int superio_enter(int sioaddr)
+{
+ if (!request_muxed_region(sioaddr, 2, DRVNAME))
+ return -EBUSY;
+ return 0;
+}
+
static inline void superio_outb(int sioaddr, int reg, int val)
{
outb(reg, sioaddr);
@@ -122,6 +129,7 @@ static inline void superio_exit(int sioaddr)
{
outb(0x02, sioaddr);
outb(0x02, sioaddr + 1);
+ release_region(sioaddr, 2);
}
/*
@@ -1195,7 +1203,11 @@ static int __init pc87427_find(int sioaddr, struct pc87427_sio_data *sio_data)
{
u16 val;
u8 cfg, cfg_b;
- int i, err = 0;
+ int i, err;
+
+ err = superio_enter(sioaddr);
+ if (err)
+ return err;
/* Identify device */
val = force_id ? force_id : superio_inb(sioaddr, SIOREG_DEVID);
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 629cb45f8557..7edab7e30eaf 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -54,6 +54,24 @@ config SENSORS_IR35221
This driver can also be built as a module. If so, the module will
be called ir35521.
+config SENSORS_IR38064
+ tristate "Infineon IR38064"
+ help
+ If you say yes here you get hardware monitoring support for Infineon
+ IR38064.
+
+ This driver can also be built as a module. If so, the module will
+ be called ir38064.
+
+config SENSORS_ISL68137
+ tristate "Intersil ISL68137"
+ help
+ If you say yes here you get hardware monitoring support for Intersil
+ ISL68137.
+
+ This driver can also be built as a module. If so, the module will
+ be called isl68137.
+
config SENSORS_LM25066
tristate "National Semiconductor LM25066 and compatibles"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index ea0e39518c21..2219b9300316 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -8,6 +8,8 @@ obj-$(CONFIG_SENSORS_PMBUS) += pmbus.o
obj-$(CONFIG_SENSORS_ADM1275) += adm1275.o
obj-$(CONFIG_SENSORS_IBM_CFFPS) += ibm-cffps.o
obj-$(CONFIG_SENSORS_IR35221) += ir35221.o
+obj-$(CONFIG_SENSORS_IR38064) += ir38064.o
+obj-$(CONFIG_SENSORS_ISL68137) += isl68137.o
obj-$(CONFIG_SENSORS_LM25066) += lm25066.o
obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o
obj-$(CONFIG_SENSORS_LTC3815) += ltc3815.o
diff --git a/drivers/hwmon/pmbus/ir38064.c b/drivers/hwmon/pmbus/ir38064.c
new file mode 100644
index 000000000000..1820f5077f66
--- /dev/null
+++ b/drivers/hwmon/pmbus/ir38064.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Hardware monitoring driver for Infineon IR38064
+ *
+ * Copyright (c) 2017 Google Inc
+ *
+ * VOUT_MODE is not supported by the device. The driver fakes VOUT linear16
+ * mode with exponent value -8 as direct mode with m=256/b=0/R=0.
+ *
+ * The device supports VOUT_PEAK, IOUT_PEAK, and TEMPERATURE_PEAK, however
+ * this driver does not currently support them.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "pmbus.h"
+
+static struct pmbus_driver_info ir38064_info = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_POWER] = linear,
+ .format[PSC_TEMPERATURE] = linear,
+ .m[PSC_VOLTAGE_OUT] = 256,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 0,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP
+ | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
+ | PMBUS_HAVE_POUT,
+};
+
+static int ir38064_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ return pmbus_do_probe(client, id, &ir38064_info);
+}
+
+static const struct i2c_device_id ir38064_id[] = {
+ {"ir38064", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, ir38064_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver ir38064_driver = {
+ .driver = {
+ .name = "ir38064",
+ },
+ .probe = ir38064_probe,
+ .remove = pmbus_do_remove,
+ .id_table = ir38064_id,
+};
+
+module_i2c_driver(ir38064_driver);
+
+MODULE_AUTHOR("Maxim Sloyko <maxims@google.com>");
+MODULE_DESCRIPTION("PMBus driver for Infineon IR38064");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/isl68137.c b/drivers/hwmon/pmbus/isl68137.c
new file mode 100644
index 000000000000..515596c92fe1
--- /dev/null
+++ b/drivers/hwmon/pmbus/isl68137.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Hardware monitoring driver for Intersil ISL68137
+ *
+ * Copyright (c) 2017 Google Inc
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include "pmbus.h"
+
+#define ISL68137_VOUT_AVS 0x30
+
+static ssize_t isl68137_avs_enable_show_page(struct i2c_client *client,
+ int page,
+ char *buf)
+{
+ int val = pmbus_read_byte_data(client, page, PMBUS_OPERATION);
+
+ return sprintf(buf, "%d\n",
+ (val & ISL68137_VOUT_AVS) == ISL68137_VOUT_AVS ? 1 : 0);
+}
+
+static ssize_t isl68137_avs_enable_store_page(struct i2c_client *client,
+ int page,
+ const char *buf, size_t count)
+{
+ int rc, op_val;
+ bool result;
+
+ rc = kstrtobool(buf, &result);
+ if (rc)
+ return rc;
+
+ op_val = result ? ISL68137_VOUT_AVS : 0;
+
+ /*
+ * Writes to VOUT setpoint over AVSBus will persist after the VRM is
+ * switched to PMBus control. Switching back to AVSBus control
+ * restores this persisted setpoint rather than re-initializing to
+ * PMBus VOUT_COMMAND. Writing VOUT_COMMAND first over PMBus before
+ * enabling AVS control is the workaround.
+ */
+ if (op_val == ISL68137_VOUT_AVS) {
+ rc = pmbus_read_word_data(client, page, PMBUS_VOUT_COMMAND);
+ if (rc < 0)
+ return rc;
+
+ rc = pmbus_write_word_data(client, page, PMBUS_VOUT_COMMAND,
+ rc);
+ if (rc < 0)
+ return rc;
+ }
+
+ rc = pmbus_update_byte_data(client, page, PMBUS_OPERATION,
+ ISL68137_VOUT_AVS, op_val);
+
+ return (rc < 0) ? rc : count;
+}
+
+static ssize_t isl68137_avs_enable_show(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+
+ return isl68137_avs_enable_show_page(client, attr->index, buf);
+}
+
+static ssize_t isl68137_avs_enable_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+
+ return isl68137_avs_enable_store_page(client, attr->index, buf, count);
+}
+
+static SENSOR_DEVICE_ATTR_RW(avs0_enable, isl68137_avs_enable, 0);
+static SENSOR_DEVICE_ATTR_RW(avs1_enable, isl68137_avs_enable, 1);
+
+static struct attribute *enable_attrs[] = {
+ &sensor_dev_attr_avs0_enable.dev_attr.attr,
+ &sensor_dev_attr_avs1_enable.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group enable_group = {
+ .attrs = enable_attrs,
+};
+
+static const struct attribute_group *attribute_groups[] = {
+ &enable_group,
+ NULL,
+};
+
+static struct pmbus_driver_info isl68137_info = {
+ .pages = 2,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_CURRENT_IN] = direct,
+ .format[PSC_CURRENT_OUT] = direct,
+ .format[PSC_POWER] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_VOLTAGE_IN] = 1,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = 3,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 3,
+ .m[PSC_CURRENT_IN] = 1,
+ .b[PSC_CURRENT_IN] = 0,
+ .R[PSC_CURRENT_IN] = 2,
+ .m[PSC_CURRENT_OUT] = 1,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = 1,
+ .m[PSC_POWER] = 1,
+ .b[PSC_POWER] = 0,
+ .R[PSC_POWER] = 0,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 0,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_IIN | PMBUS_HAVE_PIN
+ | PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2
+ | PMBUS_HAVE_TEMP3 | PMBUS_HAVE_STATUS_TEMP
+ | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_POUT,
+ .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_POUT,
+ .groups = attribute_groups,
+};
+
+static int isl68137_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ return pmbus_do_probe(client, id, &isl68137_info);
+}
+
+static const struct i2c_device_id isl68137_id[] = {
+ {"isl68137", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, isl68137_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver isl68137_driver = {
+ .driver = {
+ .name = "isl68137",
+ },
+ .probe = isl68137_probe,
+ .remove = pmbus_do_remove,
+ .id_table = isl68137_id,
+};
+
+module_i2c_driver(isl68137_driver);
+
+MODULE_AUTHOR("Maxim Sloyko <maxims@google.com>");
+MODULE_DESCRIPTION("PMBus driver for Intersil ISL68137");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index 53db78753a0d..715d4ab57516 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -26,6 +26,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/log2.h>
#include "pmbus.h"
enum chips { lm25056, lm25066, lm5064, lm5066, lm5066i };
@@ -39,12 +40,15 @@ enum chips { lm25056, lm25066, lm5064, lm5066, lm5066i };
#define LM25066_CLEAR_PIN_PEAK 0xd6
#define LM25066_DEVICE_SETUP 0xd9
#define LM25066_READ_AVG_VIN 0xdc
+#define LM25066_SAMPLES_FOR_AVG 0xdb
#define LM25066_READ_AVG_VOUT 0xdd
#define LM25066_READ_AVG_IIN 0xde
#define LM25066_READ_AVG_PIN 0xdf
#define LM25066_DEV_SETUP_CL BIT(4) /* Current limit */
+#define LM25066_SAMPLES_FOR_AVG_MAX 4096
+
/* LM25056 only */
#define LM25056_VAUX_OV_WARN_LIMIT 0xe3
@@ -284,6 +288,12 @@ static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
case PMBUS_VIRT_RESET_PIN_HISTORY:
ret = 0;
break;
+ case PMBUS_VIRT_SAMPLES:
+ ret = pmbus_read_byte_data(client, 0, LM25066_SAMPLES_FOR_AVG);
+ if (ret < 0)
+ break;
+ ret = 1 << ret;
+ break;
default:
ret = -ENODATA;
break;
@@ -398,6 +408,11 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
case PMBUS_VIRT_RESET_PIN_HISTORY:
ret = pmbus_write_byte(client, 0, LM25066_CLEAR_PIN_PEAK);
break;
+ case PMBUS_VIRT_SAMPLES:
+ word = clamp_val(word, 1, LM25066_SAMPLES_FOR_AVG_MAX);
+ ret = pmbus_write_byte_data(client, 0, LM25066_SAMPLES_FOR_AVG,
+ ilog2(word));
+ break;
default:
ret = -ENODATA;
break;
@@ -438,7 +453,7 @@ static int lm25066_probe(struct i2c_client *client,
info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VMON
| PMBUS_HAVE_PIN | PMBUS_HAVE_IIN | PMBUS_HAVE_STATUS_INPUT
- | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | PMBUS_HAVE_SAMPLES;
if (data->id == lm25056) {
info->func[0] |= PMBUS_HAVE_STATUS_VMON;
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index 1d24397d36ec..59f85658313c 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -217,6 +217,20 @@ enum pmbus_regs {
PMBUS_VIRT_PWM_ENABLE_2,
PMBUS_VIRT_PWM_ENABLE_3,
PMBUS_VIRT_PWM_ENABLE_4,
+
+ /* Samples for average
+ *
+ * Drivers wanting to expose functionality for changing the number of
+ * samples used for average values should implement support in
+ * {read,write}_word_data callback for either PMBUS_VIRT_SAMPLES if it
+ * applies to all types of measurements, or any number of specific
+ * PMBUS_VIRT_*_SAMPLES registers to allow for individual control.
+ */
+ PMBUS_VIRT_SAMPLES,
+ PMBUS_VIRT_IN_SAMPLES,
+ PMBUS_VIRT_CURR_SAMPLES,
+ PMBUS_VIRT_POWER_SAMPLES,
+ PMBUS_VIRT_TEMP_SAMPLES,
};
/*
@@ -371,6 +385,7 @@ enum pmbus_sensor_classes {
#define PMBUS_HAVE_STATUS_VMON BIT(19)
#define PMBUS_HAVE_PWM12 BIT(20)
#define PMBUS_HAVE_PWM34 BIT(21)
+#define PMBUS_HAVE_SAMPLES BIT(22)
#define PMBUS_PAGE_VIRTUAL BIT(31)
@@ -417,6 +432,9 @@ struct pmbus_driver_info {
/* Regulator functionality, if supported by this chip driver. */
int num_regulators;
const struct regulator_desc *reg_desc;
+
+ /* custom attributes */
+ const struct attribute_group **groups;
};
/* Regulator ops */
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 2e2b5851139c..32a74b8be6bd 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -103,7 +103,7 @@ struct pmbus_data {
int max_attributes;
int num_attributes;
struct attribute_group group;
- const struct attribute_group *groups[2];
+ const struct attribute_group **groups;
struct dentry *debugfs; /* debugfs device directory */
struct pmbus_sensor *sensors;
@@ -1073,7 +1073,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
name, seq, type);
boolean->s1 = s1;
boolean->s2 = s2;
- pmbus_attr_init(a, boolean->name, S_IRUGO, pmbus_show_boolean, NULL,
+ pmbus_attr_init(a, boolean->name, 0444, pmbus_show_boolean, NULL,
(reg << 16) | mask);
return pmbus_add_attribute(data, &a->dev_attr.attr);
@@ -1107,7 +1107,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
sensor->update = update;
sensor->convert = convert;
pmbus_dev_attr_init(a, sensor->name,
- readonly ? S_IRUGO : S_IRUGO | S_IWUSR,
+ readonly ? 0444 : 0644,
pmbus_show_sensor, pmbus_set_sensor);
if (pmbus_add_attribute(data, &a->attr))
@@ -1139,7 +1139,7 @@ static int pmbus_add_label(struct pmbus_data *data,
snprintf(label->label, sizeof(label->label), "%s%d", lstring,
index);
- pmbus_dev_attr_init(a, label->name, S_IRUGO, pmbus_show_label, NULL);
+ pmbus_dev_attr_init(a, label->name, 0444, pmbus_show_label, NULL);
return pmbus_add_attribute(data, &a->attr);
}
@@ -1901,6 +1901,112 @@ static int pmbus_add_fan_attributes(struct i2c_client *client,
return 0;
}
+struct pmbus_samples_attr {
+ int reg;
+ char *name;
+};
+
+struct pmbus_samples_reg {
+ int page;
+ struct pmbus_samples_attr *attr;
+ struct device_attribute dev_attr;
+};
+
+static struct pmbus_samples_attr pmbus_samples_registers[] = {
+ {
+ .reg = PMBUS_VIRT_SAMPLES,
+ .name = "samples",
+ }, {
+ .reg = PMBUS_VIRT_IN_SAMPLES,
+ .name = "in_samples",
+ }, {
+ .reg = PMBUS_VIRT_CURR_SAMPLES,
+ .name = "curr_samples",
+ }, {
+ .reg = PMBUS_VIRT_POWER_SAMPLES,
+ .name = "power_samples",
+ }, {
+ .reg = PMBUS_VIRT_TEMP_SAMPLES,
+ .name = "temp_samples",
+ }
+};
+
+#define to_samples_reg(x) container_of(x, struct pmbus_samples_reg, dev_attr)
+
+static ssize_t pmbus_show_samples(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int val;
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ struct pmbus_samples_reg *reg = to_samples_reg(devattr);
+
+ val = _pmbus_read_word_data(client, reg->page, reg->attr->reg);
+ if (val < 0)
+ return val;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t pmbus_set_samples(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ int ret;
+ long val;
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ struct pmbus_samples_reg *reg = to_samples_reg(devattr);
+
+ if (kstrtol(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ ret = _pmbus_write_word_data(client, reg->page, reg->attr->reg, val);
+
+ return ret ? : count;
+}
+
+static int pmbus_add_samples_attr(struct pmbus_data *data, int page,
+ struct pmbus_samples_attr *attr)
+{
+ struct pmbus_samples_reg *reg;
+
+ reg = devm_kzalloc(data->dev, sizeof(*reg), GFP_KERNEL);
+ if (!reg)
+ return -ENOMEM;
+
+ reg->attr = attr;
+ reg->page = page;
+
+ pmbus_dev_attr_init(&reg->dev_attr, attr->name, 0644,
+ pmbus_show_samples, pmbus_set_samples);
+
+ return pmbus_add_attribute(data, &reg->dev_attr.attr);
+}
+
+static int pmbus_add_samples_attributes(struct i2c_client *client,
+ struct pmbus_data *data)
+{
+ const struct pmbus_driver_info *info = data->info;
+ int s;
+
+ if (!(info->func[0] & PMBUS_HAVE_SAMPLES))
+ return 0;
+
+ for (s = 0; s < ARRAY_SIZE(pmbus_samples_registers); s++) {
+ struct pmbus_samples_attr *attr;
+ int ret;
+
+ attr = &pmbus_samples_registers[s];
+ if (!pmbus_check_word_register(client, 0, attr->reg))
+ continue;
+
+ ret = pmbus_add_samples_attr(data, 0, attr);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int pmbus_find_attributes(struct i2c_client *client,
struct pmbus_data *data)
{
@@ -1932,6 +2038,10 @@ static int pmbus_find_attributes(struct i2c_client *client,
/* Fans */
ret = pmbus_add_fan_attributes(client, data);
+ if (ret)
+ return ret;
+
+ ret = pmbus_add_samples_attributes(client, data);
return ret;
}
@@ -2305,6 +2415,7 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
struct device *dev = &client->dev;
const struct pmbus_platform_data *pdata = dev_get_platdata(dev);
struct pmbus_data *data;
+ size_t groups_num = 0;
int ret;
if (!info)
@@ -2319,6 +2430,15 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
if (!data)
return -ENOMEM;
+ if (info->groups)
+ while (info->groups[groups_num])
+ groups_num++;
+
+ data->groups = devm_kcalloc(dev, groups_num + 2, sizeof(void *),
+ GFP_KERNEL);
+ if (!data->groups)
+ return -ENOMEM;
+
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
data->dev = dev;
@@ -2346,6 +2466,7 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
}
data->groups[0] = &data->group;
+ memcpy(data->groups + 1, info->groups, sizeof(void *) * groups_num);
data->hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
data, data->groups);
if (IS_ERR(data->hwmon_dev)) {
diff --git a/drivers/hwmon/pmbus/tps53679.c b/drivers/hwmon/pmbus/tps53679.c
index 2bc352c5357f..3fd5105ee9ae 100644
--- a/drivers/hwmon/pmbus/tps53679.c
+++ b/drivers/hwmon/pmbus/tps53679.c
@@ -97,7 +97,7 @@ static const struct i2c_device_id tps53679_id[] = {
MODULE_DEVICE_TABLE(i2c, tps53679_id);
-static const struct of_device_id tps53679_of_match[] = {
+static const struct of_device_id __maybe_unused tps53679_of_match[] = {
{.compatible = "ti,tps53679"},
{}
};
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index ae93885fccd8..40a3e3d0e661 100644
--- a/drivers/hwmon/pmbus/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -151,7 +151,7 @@ static const struct i2c_device_id ucd9000_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ucd9000_id);
-static const struct of_device_id ucd9000_of_match[] = {
+static const struct of_device_id __maybe_unused ucd9000_of_match[] = {
{
.compatible = "ti,ucd9000",
.data = (void *)ucd9000
diff --git a/drivers/hwmon/pmbus/ucd9200.c b/drivers/hwmon/pmbus/ucd9200.c
index 3ed94585837a..fec7656700db 100644
--- a/drivers/hwmon/pmbus/ucd9200.c
+++ b/drivers/hwmon/pmbus/ucd9200.c
@@ -47,7 +47,7 @@ static const struct i2c_device_id ucd9200_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ucd9200_id);
-static const struct of_device_id ucd9200_of_match[] = {
+static const struct of_device_id __maybe_unused ucd9200_of_match[] = {
{
.compatible = "ti,cd9200",
.data = (void *)ucd9200
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 167221c7628a..eead8afe6447 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -18,6 +18,7 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
@@ -26,6 +27,7 @@
#include <linux/regulator/consumer.h>
#include <linux/sysfs.h>
#include <linux/thermal.h>
+#include <linux/timer.h>
#define MAX_PWM 255
@@ -33,6 +35,14 @@ struct pwm_fan_ctx {
struct mutex lock;
struct pwm_device *pwm;
struct regulator *reg_en;
+
+ int irq;
+ atomic_t pulses;
+ unsigned int rpm;
+ u8 pulses_per_revolution;
+ ktime_t sample_start;
+ struct timer_list rpm_timer;
+
unsigned int pwm_value;
unsigned int pwm_fan_state;
unsigned int pwm_fan_max_state;
@@ -40,6 +50,32 @@ struct pwm_fan_ctx {
struct thermal_cooling_device *cdev;
};
+/* This handler assumes self resetting edge triggered interrupt. */
+static irqreturn_t pulse_handler(int irq, void *dev_id)
+{
+ struct pwm_fan_ctx *ctx = dev_id;
+
+ atomic_inc(&ctx->pulses);
+
+ return IRQ_HANDLED;
+}
+
+static void sample_timer(struct timer_list *t)
+{
+ struct pwm_fan_ctx *ctx = from_timer(ctx, t, rpm_timer);
+ int pulses;
+ u64 tmp;
+
+ pulses = atomic_read(&ctx->pulses);
+ atomic_sub(pulses, &ctx->pulses);
+ tmp = (u64)pulses * ktime_ms_delta(ktime_get(), ctx->sample_start) * 60;
+ do_div(tmp, ctx->pulses_per_revolution * 1000);
+ ctx->rpm = tmp;
+
+ ctx->sample_start = ktime_get();
+ mod_timer(&ctx->rpm_timer, jiffies + HZ);
+}
+
static int __set_pwm(struct pwm_fan_ctx *ctx, unsigned long pwm)
{
unsigned long period;
@@ -100,15 +136,45 @@ static ssize_t pwm_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%u\n", ctx->pwm_value);
}
+static ssize_t rpm_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", ctx->rpm);
+}
static SENSOR_DEVICE_ATTR_RW(pwm1, pwm, 0);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, rpm, 0);
static struct attribute *pwm_fan_attrs[] = {
&sensor_dev_attr_pwm1.dev_attr.attr,
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
NULL,
};
-ATTRIBUTE_GROUPS(pwm_fan);
+static umode_t pwm_fan_attrs_visible(struct kobject *kobj, struct attribute *a,
+ int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
+
+ /* Hide fan_input in case no interrupt is available */
+ if (n == 1 && ctx->irq <= 0)
+ return 0;
+
+ return a->mode;
+}
+
+static const struct attribute_group pwm_fan_group = {
+ .attrs = pwm_fan_attrs,
+ .is_visible = pwm_fan_attrs_visible,
+};
+
+static const struct attribute_group *pwm_fan_groups[] = {
+ &pwm_fan_group,
+ NULL,
+};
/* thermal cooling device callbacks */
static int pwm_fan_get_max_state(struct thermal_cooling_device *cdev,
@@ -214,6 +280,7 @@ static int pwm_fan_probe(struct platform_device *pdev)
struct device *hwmon;
int ret;
struct pwm_state state = { };
+ u32 ppr = 2;
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -233,6 +300,10 @@ static int pwm_fan_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ctx);
+ ctx->irq = platform_get_irq(pdev, 0);
+ if (ctx->irq == -EPROBE_DEFER)
+ return ctx->irq;
+
ctx->reg_en = devm_regulator_get_optional(&pdev->dev, "fan");
if (IS_ERR(ctx->reg_en)) {
if (PTR_ERR(ctx->reg_en) != -ENODEV)
@@ -257,21 +328,44 @@ static int pwm_fan_probe(struct platform_device *pdev)
ret = pwm_apply_state(ctx->pwm, &state);
if (ret) {
- dev_err(&pdev->dev, "Failed to configure PWM\n");
+ dev_err(&pdev->dev, "Failed to configure PWM: %d\n", ret);
goto err_reg_disable;
}
+ timer_setup(&ctx->rpm_timer, sample_timer, 0);
+
+ of_property_read_u32(pdev->dev.of_node, "pulses-per-revolution", &ppr);
+ ctx->pulses_per_revolution = ppr;
+ if (!ctx->pulses_per_revolution) {
+ dev_err(&pdev->dev, "pulses-per-revolution can't be zero.\n");
+ ret = -EINVAL;
+ goto err_pwm_disable;
+ }
+
+ if (ctx->irq > 0) {
+ ret = devm_request_irq(&pdev->dev, ctx->irq, pulse_handler, 0,
+ pdev->name, ctx);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to request interrupt: %d\n", ret);
+ goto err_pwm_disable;
+ }
+ ctx->sample_start = ktime_get();
+ mod_timer(&ctx->rpm_timer, jiffies + HZ);
+ }
+
hwmon = devm_hwmon_device_register_with_groups(&pdev->dev, "pwmfan",
ctx, pwm_fan_groups);
if (IS_ERR(hwmon)) {
- dev_err(&pdev->dev, "Failed to register hwmon device\n");
ret = PTR_ERR(hwmon);
- goto err_pwm_disable;
+ dev_err(&pdev->dev,
+ "Failed to register hwmon device: %d\n", ret);
+ goto err_del_timer;
}
ret = pwm_fan_of_get_cooling_data(&pdev->dev, ctx);
if (ret)
- return ret;
+ goto err_del_timer;
ctx->pwm_fan_state = ctx->pwm_fan_max_state;
if (IS_ENABLED(CONFIG_THERMAL)) {
@@ -279,10 +373,11 @@ static int pwm_fan_probe(struct platform_device *pdev)
"pwm-fan", ctx,
&pwm_fan_cooling_ops);
if (IS_ERR(cdev)) {
- dev_err(&pdev->dev,
- "Failed to register pwm-fan as cooling device");
ret = PTR_ERR(cdev);
- goto err_pwm_disable;
+ dev_err(&pdev->dev,
+ "Failed to register pwm-fan as cooling device: %d\n",
+ ret);
+ goto err_del_timer;
}
ctx->cdev = cdev;
thermal_cdev_update(cdev);
@@ -290,6 +385,9 @@ static int pwm_fan_probe(struct platform_device *pdev)
return 0;
+err_del_timer:
+ del_timer_sync(&ctx->rpm_timer);
+
err_pwm_disable:
state.enabled = false;
pwm_apply_state(ctx->pwm, &state);
@@ -306,6 +404,8 @@ static int pwm_fan_remove(struct platform_device *pdev)
struct pwm_fan_ctx *ctx = platform_get_drvdata(pdev);
thermal_cooling_device_unregister(ctx->cdev);
+ del_timer_sync(&ctx->rpm_timer);
+
if (ctx->pwm_value)
pwm_disable(ctx->pwm);
diff --git a/drivers/hwmon/raspberrypi-hwmon.c b/drivers/hwmon/raspberrypi-hwmon.c
index 0d0457245e7d..efe4bb1ff221 100644
--- a/drivers/hwmon/raspberrypi-hwmon.c
+++ b/drivers/hwmon/raspberrypi-hwmon.c
@@ -86,18 +86,9 @@ static umode_t rpi_is_visible(const void *_data, enum hwmon_sensor_types type,
return 0444;
}
-static const u32 rpi_in_config[] = {
- HWMON_I_LCRIT_ALARM,
- 0
-};
-
-static const struct hwmon_channel_info rpi_in = {
- .type = hwmon_in,
- .config = rpi_in_config,
-};
-
static const struct hwmon_channel_info *rpi_info[] = {
- &rpi_in,
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_LCRIT_ALARM),
NULL
};
diff --git a/drivers/hwmon/s3c-hwmon.c b/drivers/hwmon/s3c-hwmon.c
index 0c4710d35d16..0d65aa5985e2 100644
--- a/drivers/hwmon/s3c-hwmon.c
+++ b/drivers/hwmon/s3c-hwmon.c
@@ -98,7 +98,7 @@ static int s3c_hwmon_read_ch(struct device *dev,
static ssize_t s3c_hwmon_show_raw(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct s3c_hwmon *adc = platform_get_drvdata(to_platform_device(dev));
+ struct s3c_hwmon *adc = dev_get_drvdata(dev);
struct sensor_device_attribute *sa = to_sensor_dev_attr(attr);
int ret;
@@ -164,7 +164,7 @@ static ssize_t s3c_hwmon_ch_show(struct device *dev,
char *buf)
{
struct sensor_device_attribute *sen_attr = to_sensor_dev_attr(attr);
- struct s3c_hwmon *hwmon = platform_get_drvdata(to_platform_device(dev));
+ struct s3c_hwmon *hwmon = dev_get_drvdata(dev);
struct s3c_hwmon_pdata *pdata = dev_get_platdata(dev);
struct s3c_hwmon_chcfg *cfg;
int ret;
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index 39b41e35c2bf..7f4a63959730 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -10,7 +10,7 @@
*
* Copyright (c) 2007 Wouter Horre
*
- * For further information, see the Documentation/hwmon/sht15 file.
+ * For further information, see the Documentation/hwmon/sht15.rst file.
*/
#include <linux/interrupt.h>
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 6d789aab54c9..44451b913292 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -67,7 +67,6 @@
#include <linux/acpi.h>
#include <linux/io.h>
-
/*
* If force_addr is set to anything different from 0, we forcibly enable
* the device at the given address.
@@ -222,7 +221,7 @@ static struct platform_driver sis5595_driver = {
};
/* 4 Voltages */
-static ssize_t show_in(struct device *dev, struct device_attribute *da,
+static ssize_t in_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sis5595_data *data = sis5595_update_device(dev);
@@ -231,7 +230,7 @@ static ssize_t show_in(struct device *dev, struct device_attribute *da,
return sprintf(buf, "%d\n", IN_FROM_REG(data->in[nr]));
}
-static ssize_t show_in_min(struct device *dev, struct device_attribute *da,
+static ssize_t in_min_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sis5595_data *data = sis5595_update_device(dev);
@@ -240,7 +239,7 @@ static ssize_t show_in_min(struct device *dev, struct device_attribute *da,
return sprintf(buf, "%d\n", IN_FROM_REG(data->in_min[nr]));
}
-static ssize_t show_in_max(struct device *dev, struct device_attribute *da,
+static ssize_t in_max_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sis5595_data *data = sis5595_update_device(dev);
@@ -249,8 +248,8 @@ static ssize_t show_in_max(struct device *dev, struct device_attribute *da,
return sprintf(buf, "%d\n", IN_FROM_REG(data->in_max[nr]));
}
-static ssize_t set_in_min(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t in_min_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
struct sis5595_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -269,8 +268,8 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *da,
return count;
}
-static ssize_t set_in_max(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t in_max_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
struct sis5595_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -289,19 +288,21 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *da,
return count;
}
-#define show_in_offset(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
- show_in, NULL, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
- show_in_min, set_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
- show_in_max, set_in_max, offset);
-
-show_in_offset(0);
-show_in_offset(1);
-show_in_offset(2);
-show_in_offset(3);
-show_in_offset(4);
+static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_min, in_min, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_max, in_max, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
/* Temperature */
static ssize_t temp1_input_show(struct device *dev,
@@ -368,7 +369,7 @@ static DEVICE_ATTR_RW(temp1_max);
static DEVICE_ATTR_RW(temp1_max_hyst);
/* 2 Fans */
-static ssize_t show_fan(struct device *dev, struct device_attribute *da,
+static ssize_t fan_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sis5595_data *data = sis5595_update_device(dev);
@@ -378,7 +379,7 @@ static ssize_t show_fan(struct device *dev, struct device_attribute *da,
DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t show_fan_min(struct device *dev, struct device_attribute *da,
+static ssize_t fan_min_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sis5595_data *data = sis5595_update_device(dev);
@@ -388,8 +389,8 @@ static ssize_t show_fan_min(struct device *dev, struct device_attribute *da,
DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t fan_min_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
struct sis5595_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -408,7 +409,7 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *da,
return count;
}
-static ssize_t show_fan_div(struct device *dev, struct device_attribute *da,
+static ssize_t fan_div_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sis5595_data *data = sis5595_update_device(dev);
@@ -423,8 +424,8 @@ static ssize_t show_fan_div(struct device *dev, struct device_attribute *da,
* least surprise; the user doesn't expect the fan minimum to change just
* because the divisor changed.
*/
-static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t fan_div_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
struct sis5595_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -480,16 +481,12 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
return count;
}
-#define show_fan_offset(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
- show_fan, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, set_fan_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \
- show_fan_div, set_fan_div, offset - 1);
-
-show_fan_offset(1);
-show_fan_offset(2);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
/* Alarms */
static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
@@ -500,21 +497,21 @@ static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(alarms);
-static ssize_t show_alarm(struct device *dev, struct device_attribute *da,
+static ssize_t alarm_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sis5595_data *data = sis5595_update_device(dev);
int nr = to_sensor_dev_attr(da)->index;
return sprintf(buf, "%u\n", (data->alarms >> nr) & 1);
}
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 15);
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7);
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 15);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 15);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 7);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 15);
static ssize_t name_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -673,7 +670,6 @@ static int sis5595_remove(struct platform_device *pdev)
return 0;
}
-
/* ISA access must be locked explicitly. */
static int sis5595_read_value(struct sis5595_data *data, u8 reg)
{
diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c
index c0775084dde0..60e193f2e970 100644
--- a/drivers/hwmon/smsc47b397.c
+++ b/drivers/hwmon/smsc47b397.c
@@ -72,14 +72,19 @@ static inline void superio_select(int ld)
superio_outb(0x07, ld);
}
-static inline void superio_enter(void)
+static inline int superio_enter(void)
{
+ if (!request_muxed_region(REG, 2, DRVNAME))
+ return -EBUSY;
+
outb(0x55, REG);
+ return 0;
}
static inline void superio_exit(void)
{
outb(0xAA, REG);
+ release_region(REG, 2);
}
#define SUPERIO_REG_DEVID 0x20
@@ -300,8 +305,12 @@ static int __init smsc47b397_find(void)
u8 id, rev;
char *name;
unsigned short addr;
+ int err;
+
+ err = superio_enter();
+ if (err)
+ return err;
- superio_enter();
id = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID);
switch (id) {
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index c7b6a425e2c0..5f92eab24c62 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -73,16 +73,21 @@ superio_inb(int reg)
/* logical device for fans is 0x0A */
#define superio_select() superio_outb(0x07, 0x0A)
-static inline void
+static inline int
superio_enter(void)
{
+ if (!request_muxed_region(REG, 2, DRVNAME))
+ return -EBUSY;
+
outb(0x55, REG);
+ return 0;
}
static inline void
superio_exit(void)
{
outb(0xAA, REG);
+ release_region(REG, 2);
}
#define SUPERIO_REG_ACT 0x30
@@ -202,8 +207,8 @@ static struct smsc47m1_data *smsc47m1_update_device(struct device *dev,
return data;
}
-static ssize_t get_fan(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t fan_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47m1_data *data = smsc47m1_update_device(dev, 0);
@@ -221,8 +226,8 @@ static ssize_t get_fan(struct device *dev, struct device_attribute
return sprintf(buf, "%d\n", rpm);
}
-static ssize_t get_fan_min(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t fan_min_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47m1_data *data = smsc47m1_update_device(dev, 0);
@@ -232,32 +237,32 @@ static ssize_t get_fan_min(struct device *dev, struct device_attribute
return sprintf(buf, "%d\n", rpm);
}
-static ssize_t get_fan_div(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t fan_div_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47m1_data *data = smsc47m1_update_device(dev, 0);
return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[attr->index]));
}
-static ssize_t get_fan_alarm(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t fan_alarm_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
int bitnr = to_sensor_dev_attr(devattr)->index;
struct smsc47m1_data *data = smsc47m1_update_device(dev, 0);
return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
}
-static ssize_t get_pwm(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t pwm_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47m1_data *data = smsc47m1_update_device(dev, 0);
return sprintf(buf, "%d\n", PWM_FROM_REG(data->pwm[attr->index]));
}
-static ssize_t get_pwm_en(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t pwm_en_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47m1_data *data = smsc47m1_update_device(dev, 0);
@@ -271,8 +276,9 @@ static ssize_t alarms_show(struct device *dev,
return sprintf(buf, "%d\n", data->alarms);
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count)
+static ssize_t fan_min_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47m1_data *data = dev_get_drvdata(dev);
@@ -307,8 +313,9 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute
* of least surprise; the user doesn't expect the fan minimum to change just
* because the divider changed.
*/
-static ssize_t set_fan_div(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count)
+static ssize_t fan_div_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47m1_data *data = dev_get_drvdata(dev);
@@ -370,8 +377,8 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute
return count;
}
-static ssize_t set_pwm(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count)
+static ssize_t pwm_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47m1_data *data = dev_get_drvdata(dev);
@@ -396,8 +403,9 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute
return count;
}
-static ssize_t set_pwm_en(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count)
+static ssize_t pwm_en_store(struct device *dev,
+ struct device_attribute *devattr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47m1_data *data = dev_get_drvdata(dev);
@@ -422,23 +430,24 @@ static ssize_t set_pwm_en(struct device *dev, struct device_attribute
return count;
}
-#define fan_present(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, get_fan, \
- NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- get_fan_min, set_fan_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \
- get_fan_div, set_fan_div, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_alarm, S_IRUGO, get_fan_alarm, \
- NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(pwm##offset, S_IRUGO | S_IWUSR, \
- get_pwm, set_pwm, offset - 1); \
-static SENSOR_DEVICE_ATTR(pwm##offset##_enable, S_IRUGO | S_IWUSR, \
- get_pwm_en, set_pwm_en, offset - 1)
-
-fan_present(1);
-fan_present(2);
-fan_present(3);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, fan_alarm, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm1, pwm, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm1_enable, pwm_en, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, fan_alarm, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm2, pwm, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm2_enable, pwm_en, 1);
+static SENSOR_DEVICE_ATTR_RO(fan3_input, fan, 2);
+static SENSOR_DEVICE_ATTR_RW(fan3_min, fan_min, 2);
+static SENSOR_DEVICE_ATTR_RW(fan3_div, fan_div, 2);
+static SENSOR_DEVICE_ATTR_RO(fan3_alarm, fan_alarm, 2);
+static SENSOR_DEVICE_ATTR_RW(pwm3, pwm, 2);
+static SENSOR_DEVICE_ATTR_RW(pwm3_enable, pwm_en, 2);
static DEVICE_ATTR_RO(alarms);
@@ -531,8 +540,12 @@ static int __init smsc47m1_find(struct smsc47m1_sio_data *sio_data)
{
u8 val;
unsigned short addr;
+ int err;
+
+ err = superio_enter();
+ if (err)
+ return err;
- superio_enter();
val = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID);
/*
@@ -608,13 +621,14 @@ static int __init smsc47m1_find(struct smsc47m1_sio_data *sio_data)
static void smsc47m1_restore(const struct smsc47m1_sio_data *sio_data)
{
if ((sio_data->activate & 0x01) == 0) {
- superio_enter();
- superio_select();
-
- pr_info("Disabling device\n");
- superio_outb(SUPERIO_REG_ACT, sio_data->activate);
-
- superio_exit();
+ if (!superio_enter()) {
+ superio_select();
+ pr_info("Disabling device\n");
+ superio_outb(SUPERIO_REG_ACT, sio_data->activate);
+ superio_exit();
+ } else {
+ pr_warn("Failed to disable device\n");
+ }
}
}
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index 6989408033ec..e5d9222b22f1 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -179,8 +179,8 @@ static struct smsc47m192_data *smsc47m192_update_device(struct device *dev)
}
/* Voltages */
-static ssize_t show_in(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -188,8 +188,8 @@ static ssize_t show_in(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", IN_FROM_REG(data->in[nr], nr));
}
-static ssize_t show_in_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -197,8 +197,8 @@ static ssize_t show_in_min(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", IN_FROM_REG(data->in_min[nr], nr));
}
-static ssize_t show_in_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_max_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -206,8 +206,8 @@ static ssize_t show_in_max(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", IN_FROM_REG(data->in_max[nr], nr));
}
-static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_min_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -228,8 +228,8 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_max_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -250,26 +250,34 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
return count;
}
-#define show_in_offset(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
- show_in, NULL, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
- show_in_min, set_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
- show_in_max, set_in_max, offset);
-
-show_in_offset(0)
-show_in_offset(1)
-show_in_offset(2)
-show_in_offset(3)
-show_in_offset(4)
-show_in_offset(5)
-show_in_offset(6)
-show_in_offset(7)
+static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_min, in_min, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_max, in_max, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
+static SENSOR_DEVICE_ATTR_RO(in5_input, in, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_min, in_min, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_max, in_max, 5);
+static SENSOR_DEVICE_ATTR_RO(in6_input, in, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_min, in_min, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_max, in_max, 6);
+static SENSOR_DEVICE_ATTR_RO(in7_input, in, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_min, in_min, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_max, in_max, 7);
/* Temperatures */
-static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -277,8 +285,8 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr]));
}
-static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_min_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -286,8 +294,8 @@ static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_min[nr]));
}
-static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -295,8 +303,9 @@ static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[nr]));
}
-static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -317,8 +326,9 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -339,8 +349,8 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_temp_offset(struct device *dev, struct device_attribute
- *attr, char *buf)
+static ssize_t temp_offset_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -348,8 +358,9 @@ static ssize_t show_temp_offset(struct device *dev, struct device_attribute
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_offset[nr]));
}
-static ssize_t set_temp_offset(struct device *dev, struct device_attribute
- *attr, const char *buf, size_t count)
+static ssize_t temp_offset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -385,19 +396,18 @@ static ssize_t set_temp_offset(struct device *dev, struct device_attribute
return count;
}
-#define show_temp_index(index) \
-static SENSOR_DEVICE_ATTR(temp##index##_input, S_IRUGO, \
- show_temp, NULL, index-1); \
-static SENSOR_DEVICE_ATTR(temp##index##_min, S_IRUGO | S_IWUSR, \
- show_temp_min, set_temp_min, index-1); \
-static SENSOR_DEVICE_ATTR(temp##index##_max, S_IRUGO | S_IWUSR, \
- show_temp_max, set_temp_max, index-1); \
-static SENSOR_DEVICE_ATTR(temp##index##_offset, S_IRUGO | S_IWUSR, \
- show_temp_offset, set_temp_offset, index-1);
-
-show_temp_index(1)
-show_temp_index(2)
-show_temp_index(3)
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_min, temp_min, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_offset, temp_offset, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_min, temp_min, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_offset, temp_offset, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_min, temp_min, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_max, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_offset, temp_offset, 2);
/* VID */
static ssize_t cpu0_vid_show(struct device *dev,
@@ -434,8 +444,8 @@ static ssize_t vrm_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RW(vrm);
/* Alarms */
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -443,19 +453,19 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%u\n", (data->alarms & nr) ? 1 : 0);
}
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 0x0010);
-static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 0x0020);
-static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 0x0040);
-static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 0x4000);
-static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 0x8000);
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0x0001);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 0x0002);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 0x0004);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 0x0008);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 0x0100);
-static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 0x0200);
-static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 0x0400);
-static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 0x0800);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 0x0010);
+static SENSOR_DEVICE_ATTR_RO(temp2_alarm, alarm, 0x0020);
+static SENSOR_DEVICE_ATTR_RO(temp3_alarm, alarm, 0x0040);
+static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, 0x4000);
+static SENSOR_DEVICE_ATTR_RO(temp3_fault, alarm, 0x8000);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0x0001);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 0x0002);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 0x0004);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 0x0008);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 0x0100);
+static SENSOR_DEVICE_ATTR_RO(in5_alarm, alarm, 0x0200);
+static SENSOR_DEVICE_ATTR_RO(in6_alarm, alarm, 0x0400);
+static SENSOR_DEVICE_ATTR_RO(in7_alarm, alarm, 0x0800);
static struct attribute *smsc47m192_attributes[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
diff --git a/drivers/hwmon/stts751.c b/drivers/hwmon/stts751.c
index 90b60297f2f7..f670796b609a 100644
--- a/drivers/hwmon/stts751.c
+++ b/drivers/hwmon/stts751.c
@@ -85,7 +85,7 @@ static const struct i2c_device_id stts751_id[] = {
{ }
};
-static const struct of_device_id stts751_of_match[] = {
+static const struct of_device_id __maybe_unused stts751_of_match[] = {
{ .compatible = "stts751" },
{ },
};
diff --git a/drivers/hwmon/thmc50.c b/drivers/hwmon/thmc50.c
index 6a0ee903127e..ae9942331cae 100644
--- a/drivers/hwmon/thmc50.c
+++ b/drivers/hwmon/thmc50.c
@@ -128,16 +128,16 @@ static struct thmc50_data *thmc50_update_device(struct device *dev)
return data;
}
-static ssize_t show_analog_out(struct device *dev,
+static ssize_t analog_out_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct thmc50_data *data = thmc50_update_device(dev);
return sprintf(buf, "%d\n", data->analog_out);
}
-static ssize_t set_analog_out(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t analog_out_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct thmc50_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -166,14 +166,14 @@ static ssize_t set_analog_out(struct device *dev,
}
/* There is only one PWM mode = DC */
-static ssize_t show_pwm_mode(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t pwm_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "0\n");
}
/* Temperatures */
-static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
+static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
@@ -181,16 +181,17 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", data->temp_input[nr] * 1000);
}
-static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_min_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct thmc50_data *data = thmc50_update_device(dev);
return sprintf(buf, "%d\n", data->temp_min[nr] * 1000);
}
-static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct thmc50_data *data = dev_get_drvdata(dev);
@@ -210,16 +211,17 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct thmc50_data *data = thmc50_update_device(dev);
return sprintf(buf, "%d\n", data->temp_max[nr] * 1000);
}
-static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct thmc50_data *data = dev_get_drvdata(dev);
@@ -239,16 +241,15 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_temp_critical(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t temp_critical_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct thmc50_data *data = thmc50_update_device(dev);
return sprintf(buf, "%d\n", data->temp_critical[nr] * 1000);
}
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int index = to_sensor_dev_attr(attr)->index;
@@ -257,29 +258,27 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%u\n", (data->alarms >> index) & 1);
}
-#define temp_reg(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, show_temp, \
- NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IRUGO | S_IWUSR, \
- show_temp_min, set_temp_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \
- show_temp_max, set_temp_max, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_crit, S_IRUGO, \
- show_temp_critical, NULL, offset - 1);
-
-temp_reg(1);
-temp_reg(2);
-temp_reg(3);
-
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 7);
-static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 2);
-
-static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, show_analog_out,
- set_analog_out, 0);
-static SENSOR_DEVICE_ATTR(pwm1_mode, S_IRUGO, show_pwm_mode, NULL, 0);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_min, temp_min, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0);
+static SENSOR_DEVICE_ATTR_RO(temp1_crit, temp_critical, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_min, temp_min, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
+static SENSOR_DEVICE_ATTR_RO(temp2_crit, temp_critical, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_min, temp_min, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_max, 2);
+static SENSOR_DEVICE_ATTR_RO(temp3_crit, temp_critical, 2);
+
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_alarm, alarm, 5);
+static SENSOR_DEVICE_ATTR_RO(temp3_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, 7);
+static SENSOR_DEVICE_ATTR_RO(temp3_fault, alarm, 2);
+
+static SENSOR_DEVICE_ATTR_RW(pwm1, analog_out, 0);
+static SENSOR_DEVICE_ATTR_RO(pwm1_mode, pwm_mode, 0);
static struct attribute *thmc50_attributes[] = {
&sensor_dev_attr_temp1_max.dev_attr.attr,
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 35523d315f25..f4ee55615dea 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -150,29 +150,11 @@ static umode_t tmp102_is_visible(const void *data, enum hwmon_sensor_types type,
}
}
-static u32 tmp102_chip_config[] = {
- HWMON_C_REGISTER_TZ,
- 0
-};
-
-static const struct hwmon_channel_info tmp102_chip = {
- .type = hwmon_chip,
- .config = tmp102_chip_config,
-};
-
-static u32 tmp102_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST,
- 0
-};
-
-static const struct hwmon_channel_info tmp102_temp = {
- .type = hwmon_temp,
- .config = tmp102_temp_config,
-};
-
static const struct hwmon_channel_info *tmp102_info[] = {
- &tmp102_chip,
- &tmp102_temp,
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST),
NULL
};
@@ -321,7 +303,7 @@ static const struct i2c_device_id tmp102_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tmp102_id);
-static const struct of_device_id tmp102_of_match[] = {
+static const struct of_device_id __maybe_unused tmp102_of_match[] = {
{ .compatible = "ti,tmp102" },
{ },
};
diff --git a/drivers/hwmon/tmp103.c b/drivers/hwmon/tmp103.c
index bda0fdc1eb53..a91726d33da8 100644
--- a/drivers/hwmon/tmp103.c
+++ b/drivers/hwmon/tmp103.c
@@ -170,7 +170,7 @@ static const struct i2c_device_id tmp103_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tmp103_id);
-static const struct of_device_id tmp103_of_match[] = {
+static const struct of_device_id __maybe_unused tmp103_of_match[] = {
{ .compatible = "ti,tmp103" },
{ },
};
diff --git a/drivers/hwmon/tmp108.c b/drivers/hwmon/tmp108.c
index 429bfeae4ca8..2447af704424 100644
--- a/drivers/hwmon/tmp108.c
+++ b/drivers/hwmon/tmp108.c
@@ -281,30 +281,13 @@ static umode_t tmp108_is_visible(const void *data, enum hwmon_sensor_types type,
}
}
-static u32 tmp108_chip_config[] = {
- HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL,
- 0
-};
-
-static const struct hwmon_channel_info tmp108_chip = {
- .type = hwmon_chip,
- .config = tmp108_chip_config,
-};
-
-static u32 tmp108_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | HWMON_T_MIN_HYST
- | HWMON_T_MAX_HYST | HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM,
- 0
-};
-
-static const struct hwmon_channel_info tmp108_temp = {
- .type = hwmon_temp,
- .config = tmp108_temp_config,
-};
-
static const struct hwmon_channel_info *tmp108_info[] = {
- &tmp108_chip,
- &tmp108_temp,
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_MIN_HYST | HWMON_T_MAX_HYST |
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM),
NULL
};
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index 2732a71f3b39..5e63010dd3a0 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -70,7 +70,7 @@ static const struct i2c_device_id tmp421_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tmp421_id);
-static const struct of_device_id tmp421_of_match[] = {
+static const struct of_device_id __maybe_unused tmp421_of_match[] = {
{
.compatible = "ti,tmp421",
.data = (void *)2
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index 81f35e3a06b8..259725dec37e 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -47,7 +47,6 @@
#include <linux/acpi.h>
#include <linux/io.h>
-
/*
* If force_addr is set to anything different from 0, we forcibly enable
* the device at the given address.
@@ -355,32 +354,32 @@ static void via686a_init_device(struct via686a_data *data);
/* following are the sysfs callback functions */
/* 7 voltage sensors */
-static ssize_t show_in(struct device *dev, struct device_attribute *da,
- char *buf) {
+static ssize_t in_show(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
return sprintf(buf, "%ld\n", IN_FROM_REG(data->in[nr], nr));
}
-static ssize_t show_in_min(struct device *dev, struct device_attribute *da,
- char *buf) {
+static ssize_t in_min_show(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
return sprintf(buf, "%ld\n", IN_FROM_REG(data->in_min[nr], nr));
}
-static ssize_t show_in_max(struct device *dev, struct device_attribute *da,
- char *buf) {
+static ssize_t in_max_show(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
return sprintf(buf, "%ld\n", IN_FROM_REG(data->in_max[nr], nr));
}
-static ssize_t set_in_min(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count) {
+static ssize_t in_min_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count) {
struct via686a_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
@@ -398,8 +397,8 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *da,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_in_max(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count) {
+static ssize_t in_max_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count) {
struct via686a_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
@@ -417,44 +416,48 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *da,
mutex_unlock(&data->update_lock);
return count;
}
-#define show_in_offset(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
- show_in, NULL, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
- show_in_min, set_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
- show_in_max, set_in_max, offset);
-show_in_offset(0);
-show_in_offset(1);
-show_in_offset(2);
-show_in_offset(3);
-show_in_offset(4);
+static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_min, in_min, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_max, in_max, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
/* 3 temperatures */
-static ssize_t show_temp(struct device *dev, struct device_attribute *da,
- char *buf) {
+static ssize_t temp_show(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
return sprintf(buf, "%ld\n", TEMP_FROM_REG10(data->temp[nr]));
}
-static ssize_t show_temp_over(struct device *dev, struct device_attribute *da,
- char *buf) {
+static ssize_t temp_over_show(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
return sprintf(buf, "%ld\n", TEMP_FROM_REG(data->temp_over[nr]));
}
-static ssize_t show_temp_hyst(struct device *dev, struct device_attribute *da,
- char *buf) {
+static ssize_t temp_hyst_show(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
return sprintf(buf, "%ld\n", TEMP_FROM_REG(data->temp_hyst[nr]));
}
-static ssize_t set_temp_over(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count) {
+static ssize_t temp_over_store(struct device *dev,
+ struct device_attribute *da, const char *buf,
+ size_t count) {
struct via686a_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
@@ -472,8 +475,9 @@ static ssize_t set_temp_over(struct device *dev, struct device_attribute *da,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_temp_hyst(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count) {
+static ssize_t temp_hyst_store(struct device *dev,
+ struct device_attribute *da, const char *buf,
+ size_t count) {
struct via686a_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
@@ -491,29 +495,28 @@ static ssize_t set_temp_hyst(struct device *dev, struct device_attribute *da,
mutex_unlock(&data->update_lock);
return count;
}
-#define show_temp_offset(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \
- show_temp, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \
- show_temp_over, set_temp_over, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max_hyst, S_IRUGO | S_IWUSR, \
- show_temp_hyst, set_temp_hyst, offset - 1);
-show_temp_offset(1);
-show_temp_offset(2);
-show_temp_offset(3);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_over, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max_hyst, temp_hyst, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_over, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max_hyst, temp_hyst, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_over, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max_hyst, temp_hyst, 2);
/* 2 Fans */
-static ssize_t show_fan(struct device *dev, struct device_attribute *da,
- char *buf) {
+static ssize_t fan_show(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[nr],
DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t show_fan_min(struct device *dev, struct device_attribute *da,
- char *buf) {
+static ssize_t fan_min_show(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
@@ -521,15 +524,15 @@ static ssize_t show_fan_min(struct device *dev, struct device_attribute *da,
FAN_FROM_REG(data->fan_min[nr],
DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t show_fan_div(struct device *dev, struct device_attribute *da,
- char *buf) {
+static ssize_t fan_div_show(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[nr]));
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count) {
+static ssize_t fan_min_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count) {
struct via686a_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
@@ -546,8 +549,8 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *da,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count) {
+static ssize_t fan_div_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count) {
struct via686a_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
@@ -568,16 +571,12 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
return count;
}
-#define show_fan_offset(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
- show_fan, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, set_fan_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \
- show_fan_div, set_fan_div, offset - 1);
-
-show_fan_offset(1);
-show_fan_offset(2);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
/* Alarms */
static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
@@ -589,23 +588,23 @@ static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(alarms);
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int bitnr = to_sensor_dev_attr(attr)->index;
struct via686a_data *data = via686a_update_device(dev);
return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
}
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 11);
-static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 15);
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(temp2_alarm, alarm, 11);
+static SENSOR_DEVICE_ATTR_RO(temp3_alarm, alarm, 15);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 7);
static ssize_t name_show(struct device *dev, struct device_attribute
*devattr, char *buf)
@@ -676,7 +675,6 @@ static struct platform_driver via686a_driver = {
.remove = via686a_remove,
};
-
/* This is called when the module is loaded */
static int via686a_probe(struct platform_device *pdev)
{
diff --git a/drivers/hwmon/vt1211.c b/drivers/hwmon/vt1211.c
index 3a6bfa51cb94..95d5e8ec8b7f 100644
--- a/drivers/hwmon/vt1211.c
+++ b/drivers/hwmon/vt1211.c
@@ -226,15 +226,21 @@ static inline void superio_select(int sio_cip, int ldn)
outb(ldn, sio_cip + 1);
}
-static inline void superio_enter(int sio_cip)
+static inline int superio_enter(int sio_cip)
{
+ if (!request_muxed_region(sio_cip, 2, DRVNAME))
+ return -EBUSY;
+
outb(0x87, sio_cip);
outb(0x87, sio_cip);
+
+ return 0;
}
static inline void superio_exit(int sio_cip)
{
outb(0xaa, sio_cip);
+ release_region(sio_cip, 2);
}
/* ---------------------------------------------------------------------
@@ -1282,11 +1288,14 @@ EXIT:
static int __init vt1211_find(int sio_cip, unsigned short *address)
{
- int err = -ENODEV;
+ int err;
int devid;
- superio_enter(sio_cip);
+ err = superio_enter(sio_cip);
+ if (err)
+ return err;
+ err = -ENODEV;
devid = force_id ? force_id : superio_inb(sio_cip, SIO_VT1211_DEVID);
if (devid != SIO_VT1211_ID)
goto EXIT;
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index 367b5eb53fb6..e2f1a80367e2 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -192,8 +192,8 @@ static inline void vt8231_write_value(struct vt8231_data *data, u8 reg,
}
/* following are the sysfs callback functions */
-static ssize_t show_in(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -202,8 +202,8 @@ static ssize_t show_in(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", ((data->in[nr] - 3) * 10000) / 958);
}
-static ssize_t show_in_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -212,8 +212,8 @@ static ssize_t show_in_min(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", ((data->in_min[nr] - 3) * 10000) / 958);
}
-static ssize_t show_in_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_max_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -222,8 +222,8 @@ static ssize_t show_in_max(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", (((data->in_max[nr] - 3) * 10000) / 958));
}
-static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_min_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -242,8 +242,8 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_max_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -330,19 +330,21 @@ static ssize_t in5_max_store(struct device *dev,
return count;
}
-#define define_voltage_sysfs(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
- show_in, NULL, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
- show_in_min, set_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
- show_in_max, set_in_max, offset)
-
-define_voltage_sysfs(0);
-define_voltage_sysfs(1);
-define_voltage_sysfs(2);
-define_voltage_sysfs(3);
-define_voltage_sysfs(4);
+static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_min, in_min, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_max, in_max, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
static DEVICE_ATTR_RO(in5_input);
static DEVICE_ATTR_RW(in5_min);
@@ -407,8 +409,8 @@ static ssize_t temp1_max_hyst_store(struct device *dev,
return count;
}
-static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -416,8 +418,8 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr]));
}
-static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -425,8 +427,8 @@ static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", TEMP_MAXMIN_FROM_REG(data->temp_max[nr]));
}
-static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_min_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -434,8 +436,9 @@ static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", TEMP_MAXMIN_FROM_REG(data->temp_min[nr]));
}
-static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -453,8 +456,9 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -477,27 +481,30 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
* Note that these map the Linux temperature sensor numbering (1-6) to the VIA
* temperature sensor numbering (0-5)
*/
-#define define_temperature_sysfs(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \
- show_temp, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \
- show_temp_max, set_temp_max, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max_hyst, S_IRUGO | S_IWUSR, \
- show_temp_min, set_temp_min, offset - 1)
static DEVICE_ATTR_RO(temp1_input);
static DEVICE_ATTR_RW(temp1_max);
static DEVICE_ATTR_RW(temp1_max_hyst);
-define_temperature_sysfs(2);
-define_temperature_sysfs(3);
-define_temperature_sysfs(4);
-define_temperature_sysfs(5);
-define_temperature_sysfs(6);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max_hyst, temp_min, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_max, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max_hyst, temp_min, 2);
+static SENSOR_DEVICE_ATTR_RO(temp4_input, temp, 3);
+static SENSOR_DEVICE_ATTR_RW(temp4_max, temp_max, 3);
+static SENSOR_DEVICE_ATTR_RW(temp4_max_hyst, temp_min, 3);
+static SENSOR_DEVICE_ATTR_RO(temp5_input, temp, 4);
+static SENSOR_DEVICE_ATTR_RW(temp5_max, temp_max, 4);
+static SENSOR_DEVICE_ATTR_RW(temp5_max_hyst, temp_min, 4);
+static SENSOR_DEVICE_ATTR_RO(temp6_input, temp, 5);
+static SENSOR_DEVICE_ATTR_RW(temp6_max, temp_max, 5);
+static SENSOR_DEVICE_ATTR_RW(temp6_max_hyst, temp_min, 5);
/* Fans */
-static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t fan_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -506,8 +513,8 @@ static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t fan_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -516,8 +523,8 @@ static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr,
DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t fan_div_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -525,8 +532,9 @@ static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[nr]));
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -545,8 +553,9 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_div_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct vt8231_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
@@ -593,17 +602,12 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
return count;
}
-
-#define define_fan_sysfs(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
- show_fan, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \
- show_fan_div, set_fan_div, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, set_fan_min, offset - 1)
-
-define_fan_sysfs(1);
-define_fan_sysfs(2);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
/* Alarms */
static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
@@ -614,27 +618,27 @@ static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(alarms);
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int bitnr = to_sensor_dev_attr(attr)->index;
struct vt8231_data *data = vt8231_update_device(dev);
return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
}
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 11);
-static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp4_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp5_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp6_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 11);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(temp2_alarm, alarm, 11);
+static SENSOR_DEVICE_ATTR_RO(temp3_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(temp4_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(temp5_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(temp6_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 11);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(in5_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 7);
static ssize_t name_show(struct device *dev, struct device_attribute
*devattr, char *buf)
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index 8ac89d0781cc..7ca53a28c305 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -130,17 +130,23 @@ superio_select(struct w83627hf_sio_data *sio, int ld)
outb(ld, sio->sioaddr + 1);
}
-static inline void
+static inline int
superio_enter(struct w83627hf_sio_data *sio)
{
+ if (!request_muxed_region(sio->sioaddr, 2, DRVNAME))
+ return -EBUSY;
+
outb(0x87, sio->sioaddr);
outb(0x87, sio->sioaddr);
+
+ return 0;
}
static inline void
superio_exit(struct w83627hf_sio_data *sio)
{
outb(0xAA, sio->sioaddr);
+ release_region(sio->sioaddr, 2);
}
#define W627_DEVID 0x52
@@ -396,7 +402,6 @@ struct w83627hf_data {
#endif
};
-
static int w83627hf_probe(struct platform_device *pdev);
static int w83627hf_remove(struct platform_device *pdev);
@@ -482,28 +487,28 @@ static struct platform_driver w83627hf_driver = {
};
static ssize_t
-show_in_input(struct device *dev, struct device_attribute *devattr, char *buf)
+in_input_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
return sprintf(buf, "%ld\n", (long)IN_FROM_REG(data->in[nr]));
}
static ssize_t
-show_in_min(struct device *dev, struct device_attribute *devattr, char *buf)
+in_min_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
return sprintf(buf, "%ld\n", (long)IN_FROM_REG(data->in_min[nr]));
}
static ssize_t
-show_in_max(struct device *dev, struct device_attribute *devattr, char *buf)
+in_max_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
return sprintf(buf, "%ld\n", (long)IN_FROM_REG(data->in_max[nr]));
}
static ssize_t
-store_in_min(struct device *dev, struct device_attribute *devattr,
+in_min_store(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -522,7 +527,7 @@ store_in_min(struct device *dev, struct device_attribute *devattr,
return count;
}
static ssize_t
-store_in_max(struct device *dev, struct device_attribute *devattr,
+in_max_store(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -540,22 +545,31 @@ store_in_max(struct device *dev, struct device_attribute *devattr,
mutex_unlock(&data->update_lock);
return count;
}
-#define sysfs_vin_decl(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
- show_in_input, NULL, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO|S_IWUSR, \
- show_in_min, store_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO|S_IWUSR, \
- show_in_max, store_in_max, offset);
-
-sysfs_vin_decl(1);
-sysfs_vin_decl(2);
-sysfs_vin_decl(3);
-sysfs_vin_decl(4);
-sysfs_vin_decl(5);
-sysfs_vin_decl(6);
-sysfs_vin_decl(7);
-sysfs_vin_decl(8);
+
+static SENSOR_DEVICE_ATTR_RO(in1_input, in_input, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in_input, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in_input, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in_input, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
+static SENSOR_DEVICE_ATTR_RO(in5_input, in_input, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_min, in_min, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_max, in_max, 5);
+static SENSOR_DEVICE_ATTR_RO(in6_input, in_input, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_min, in_min, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_max, in_max, 6);
+static SENSOR_DEVICE_ATTR_RO(in7_input, in_input, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_min, in_min, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_max, in_max, 7);
+static SENSOR_DEVICE_ATTR_RO(in8_input, in_input, 8);
+static SENSOR_DEVICE_ATTR_RW(in8_min, in_min, 8);
+static SENSOR_DEVICE_ATTR_RW(in8_max, in_max, 8);
/* use a different set of functions for in0 */
static ssize_t show_in_0(struct w83627hf_data *data, char *buf, u8 reg)
@@ -661,7 +675,8 @@ static DEVICE_ATTR_RW(in0_min);
static DEVICE_ATTR_RW(in0_max);
static ssize_t
-show_fan_input(struct device *dev, struct device_attribute *devattr, char *buf)
+fan_input_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
@@ -669,7 +684,7 @@ show_fan_input(struct device *dev, struct device_attribute *devattr, char *buf)
(long)DIV_FROM_REG(data->fan_div[nr])));
}
static ssize_t
-show_fan_min(struct device *dev, struct device_attribute *devattr, char *buf)
+fan_min_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
@@ -677,7 +692,7 @@ show_fan_min(struct device *dev, struct device_attribute *devattr, char *buf)
(long)DIV_FROM_REG(data->fan_div[nr])));
}
static ssize_t
-store_fan_min(struct device *dev, struct device_attribute *devattr,
+fan_min_store(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -697,18 +712,16 @@ store_fan_min(struct device *dev, struct device_attribute *devattr,
mutex_unlock(&data->update_lock);
return count;
}
-#define sysfs_fan_decl(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
- show_fan_input, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, store_fan_min, offset - 1);
-sysfs_fan_decl(1);
-sysfs_fan_decl(2);
-sysfs_fan_decl(3);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan_input, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan_input, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RO(fan3_input, fan_input, 2);
+static SENSOR_DEVICE_ATTR_RW(fan3_min, fan_min, 2);
static ssize_t
-show_temp(struct device *dev, struct device_attribute *devattr, char *buf)
+temp_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
@@ -719,8 +732,7 @@ show_temp(struct device *dev, struct device_attribute *devattr, char *buf)
}
static ssize_t
-show_temp_max(struct device *dev, struct device_attribute *devattr,
- char *buf)
+temp_max_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
@@ -731,7 +743,7 @@ show_temp_max(struct device *dev, struct device_attribute *devattr,
}
static ssize_t
-show_temp_max_hyst(struct device *dev, struct device_attribute *devattr,
+temp_max_hyst_show(struct device *dev, struct device_attribute *devattr,
char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -743,7 +755,7 @@ show_temp_max_hyst(struct device *dev, struct device_attribute *devattr,
}
static ssize_t
-store_temp_max(struct device *dev, struct device_attribute *devattr,
+temp_max_store(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -765,7 +777,7 @@ store_temp_max(struct device *dev, struct device_attribute *devattr,
}
static ssize_t
-store_temp_max_hyst(struct device *dev, struct device_attribute *devattr,
+temp_max_hyst_store(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -786,17 +798,15 @@ store_temp_max_hyst(struct device *dev, struct device_attribute *devattr,
return count;
}
-#define sysfs_temp_decl(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \
- show_temp, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO|S_IWUSR, \
- show_temp_max, store_temp_max, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max_hyst, S_IRUGO|S_IWUSR, \
- show_temp_max_hyst, store_temp_max_hyst, offset - 1);
-
-sysfs_temp_decl(1);
-sysfs_temp_decl(2);
-sysfs_temp_decl(3);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max_hyst, temp_max_hyst, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max_hyst, temp_max_hyst, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_max, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max_hyst, temp_max_hyst, 2);
static ssize_t
cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -841,27 +851,27 @@ alarms_show(struct device *dev, struct device_attribute *attr, char *buf)
static DEVICE_ATTR_RO(alarms);
static ssize_t
-show_alarm(struct device *dev, struct device_attribute *attr, char *buf)
+alarm_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83627hf_data *data = w83627hf_update_device(dev);
int bitnr = to_sensor_dev_attr(attr)->index;
return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
}
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 9);
-static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 10);
-static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 16);
-static SENSOR_DEVICE_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 17);
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7);
-static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 11);
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(in5_alarm, alarm, 9);
+static SENSOR_DEVICE_ATTR_RO(in6_alarm, alarm, 10);
+static SENSOR_DEVICE_ATTR_RO(in7_alarm, alarm, 16);
+static SENSOR_DEVICE_ATTR_RO(in8_alarm, alarm, 17);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 7);
+static SENSOR_DEVICE_ATTR_RO(fan3_alarm, alarm, 11);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(temp2_alarm, alarm, 5);
+static SENSOR_DEVICE_ATTR_RO(temp3_alarm, alarm, 13);
static ssize_t
beep_mask_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -902,7 +912,7 @@ beep_mask_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RW(beep_mask);
static ssize_t
-show_beep(struct device *dev, struct device_attribute *attr, char *buf)
+beep_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83627hf_data *data = w83627hf_update_device(dev);
int bitnr = to_sensor_dev_attr(attr)->index;
@@ -910,8 +920,8 @@ show_beep(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-store_beep(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+beep_store(struct device *dev, struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct w83627hf_data *data = dev_get_drvdata(dev);
int bitnr = to_sensor_dev_attr(attr)->index;
@@ -959,41 +969,25 @@ store_beep(struct device *dev, struct device_attribute *attr,
return count;
}
-static SENSOR_DEVICE_ATTR(in0_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 0);
-static SENSOR_DEVICE_ATTR(in1_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 1);
-static SENSOR_DEVICE_ATTR(in2_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 2);
-static SENSOR_DEVICE_ATTR(in3_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 3);
-static SENSOR_DEVICE_ATTR(in4_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 8);
-static SENSOR_DEVICE_ATTR(in5_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 9);
-static SENSOR_DEVICE_ATTR(in6_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 10);
-static SENSOR_DEVICE_ATTR(in7_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 16);
-static SENSOR_DEVICE_ATTR(in8_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 17);
-static SENSOR_DEVICE_ATTR(fan1_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 6);
-static SENSOR_DEVICE_ATTR(fan2_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 7);
-static SENSOR_DEVICE_ATTR(fan3_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 11);
-static SENSOR_DEVICE_ATTR(temp1_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 4);
-static SENSOR_DEVICE_ATTR(temp2_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 5);
-static SENSOR_DEVICE_ATTR(temp3_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 13);
-static SENSOR_DEVICE_ATTR(beep_enable, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 15);
+static SENSOR_DEVICE_ATTR_RW(in0_beep, beep, 0);
+static SENSOR_DEVICE_ATTR_RW(in1_beep, beep, 1);
+static SENSOR_DEVICE_ATTR_RW(in2_beep, beep, 2);
+static SENSOR_DEVICE_ATTR_RW(in3_beep, beep, 3);
+static SENSOR_DEVICE_ATTR_RW(in4_beep, beep, 8);
+static SENSOR_DEVICE_ATTR_RW(in5_beep, beep, 9);
+static SENSOR_DEVICE_ATTR_RW(in6_beep, beep, 10);
+static SENSOR_DEVICE_ATTR_RW(in7_beep, beep, 16);
+static SENSOR_DEVICE_ATTR_RW(in8_beep, beep, 17);
+static SENSOR_DEVICE_ATTR_RW(fan1_beep, beep, 6);
+static SENSOR_DEVICE_ATTR_RW(fan2_beep, beep, 7);
+static SENSOR_DEVICE_ATTR_RW(fan3_beep, beep, 11);
+static SENSOR_DEVICE_ATTR_RW(temp1_beep, beep, 4);
+static SENSOR_DEVICE_ATTR_RW(temp2_beep, beep, 5);
+static SENSOR_DEVICE_ATTR_RW(temp3_beep, beep, 13);
+static SENSOR_DEVICE_ATTR_RW(beep_enable, beep, 15);
static ssize_t
-show_fan_div(struct device *dev, struct device_attribute *devattr, char *buf)
+fan_div_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
@@ -1007,7 +1001,7 @@ show_fan_div(struct device *dev, struct device_attribute *devattr, char *buf)
* because the divisor changed.
*/
static ssize_t
-store_fan_div(struct device *dev, struct device_attribute *devattr,
+fan_div_store(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -1047,15 +1041,12 @@ store_fan_div(struct device *dev, struct device_attribute *devattr,
return count;
}
-static SENSOR_DEVICE_ATTR(fan1_div, S_IRUGO|S_IWUSR,
- show_fan_div, store_fan_div, 0);
-static SENSOR_DEVICE_ATTR(fan2_div, S_IRUGO|S_IWUSR,
- show_fan_div, store_fan_div, 1);
-static SENSOR_DEVICE_ATTR(fan3_div, S_IRUGO|S_IWUSR,
- show_fan_div, store_fan_div, 2);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
+static SENSOR_DEVICE_ATTR_RW(fan3_div, fan_div, 2);
static ssize_t
-show_pwm(struct device *dev, struct device_attribute *devattr, char *buf)
+pwm_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
@@ -1063,7 +1054,7 @@ show_pwm(struct device *dev, struct device_attribute *devattr, char *buf)
}
static ssize_t
-store_pwm(struct device *dev, struct device_attribute *devattr,
+pwm_store(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -1096,12 +1087,13 @@ store_pwm(struct device *dev, struct device_attribute *devattr,
return count;
}
-static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 0);
-static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 1);
-static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 2);
+static SENSOR_DEVICE_ATTR_RW(pwm1, pwm, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2, pwm, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm3, pwm, 2);
static ssize_t
-show_pwm_enable(struct device *dev, struct device_attribute *devattr, char *buf)
+pwm_enable_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
@@ -1109,8 +1101,8 @@ show_pwm_enable(struct device *dev, struct device_attribute *devattr, char *buf)
}
static ssize_t
-store_pwm_enable(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+pwm_enable_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = dev_get_drvdata(dev);
@@ -1134,15 +1126,12 @@ store_pwm_enable(struct device *dev, struct device_attribute *devattr,
return count;
}
-static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO|S_IWUSR, show_pwm_enable,
- store_pwm_enable, 0);
-static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO|S_IWUSR, show_pwm_enable,
- store_pwm_enable, 1);
-static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO|S_IWUSR, show_pwm_enable,
- store_pwm_enable, 2);
+static SENSOR_DEVICE_ATTR_RW(pwm1_enable, pwm_enable, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2_enable, pwm_enable, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm3_enable, pwm_enable, 2);
static ssize_t
-show_pwm_freq(struct device *dev, struct device_attribute *devattr, char *buf)
+pwm_freq_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
@@ -1155,7 +1144,7 @@ show_pwm_freq(struct device *dev, struct device_attribute *devattr, char *buf)
}
static ssize_t
-store_pwm_freq(struct device *dev, struct device_attribute *devattr,
+pwm_freq_store(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -1186,15 +1175,12 @@ store_pwm_freq(struct device *dev, struct device_attribute *devattr,
return count;
}
-static SENSOR_DEVICE_ATTR(pwm1_freq, S_IRUGO|S_IWUSR,
- show_pwm_freq, store_pwm_freq, 0);
-static SENSOR_DEVICE_ATTR(pwm2_freq, S_IRUGO|S_IWUSR,
- show_pwm_freq, store_pwm_freq, 1);
-static SENSOR_DEVICE_ATTR(pwm3_freq, S_IRUGO|S_IWUSR,
- show_pwm_freq, store_pwm_freq, 2);
+static SENSOR_DEVICE_ATTR_RW(pwm1_freq, pwm_freq, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2_freq, pwm_freq, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm3_freq, pwm_freq, 2);
static ssize_t
-show_temp_type(struct device *dev, struct device_attribute *devattr,
+temp_type_show(struct device *dev, struct device_attribute *devattr,
char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -1203,7 +1189,7 @@ show_temp_type(struct device *dev, struct device_attribute *devattr,
}
static ssize_t
-store_temp_type(struct device *dev, struct device_attribute *devattr,
+temp_type_store(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -1258,13 +1244,9 @@ store_temp_type(struct device *dev, struct device_attribute *devattr,
return count;
}
-#define sysfs_temp_type(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_type, S_IRUGO | S_IWUSR, \
- show_temp_type, store_temp_type, offset - 1);
-
-sysfs_temp_type(1);
-sysfs_temp_type(2);
-sysfs_temp_type(3);
+static SENSOR_DEVICE_ATTR_RW(temp1_type, temp_type, 0);
+static SENSOR_DEVICE_ATTR_RW(temp2_type, temp_type, 1);
+static SENSOR_DEVICE_ATTR_RW(temp3_type, temp_type, 2);
static ssize_t
name_show(struct device *dev, struct device_attribute *devattr, char *buf)
@@ -1278,7 +1260,7 @@ static DEVICE_ATTR_RO(name);
static int __init w83627hf_find(int sioaddr, unsigned short *addr,
struct w83627hf_sio_data *sio_data)
{
- int err = -ENODEV;
+ int err;
u16 val;
static __initconst char *const names[] = {
@@ -1290,7 +1272,11 @@ static int __init w83627hf_find(int sioaddr, unsigned short *addr,
};
sio_data->sioaddr = sioaddr;
- superio_enter(sio_data);
+ err = superio_enter(sio_data);
+ if (err)
+ return err;
+
+ err = -ENODEV;
val = force_id ? force_id : superio_inb(sio_data, DEVID);
switch (val) {
case W627_DEVID:
@@ -1595,7 +1581,6 @@ static int w83627hf_remove(struct platform_device *pdev)
return 0;
}
-
/* Registers 0x50-0x5f are banked */
static inline void w83627hf_set_bank(struct w83627hf_data *data, u16 reg)
{
@@ -1644,9 +1629,21 @@ static int w83627thf_read_gpio5(struct platform_device *pdev)
struct w83627hf_sio_data *sio_data = dev_get_platdata(&pdev->dev);
int res = 0xff, sel;
- superio_enter(sio_data);
+ if (superio_enter(sio_data)) {
+ /*
+ * Some other driver reserved the address space for itself.
+ * We don't want to fail driver instantiation because of that,
+ * so display a warning and keep going.
+ */
+ dev_warn(&pdev->dev,
+ "Can not read VID data: Failed to enable SuperIO access\n");
+ return res;
+ }
+
superio_select(sio_data, W83627HF_LD_GPIO5);
+ res = 0xff;
+
/* Make sure these GPIO pins are enabled */
if (!(superio_inb(sio_data, W83627THF_GPIO5_EN) & (1<<3))) {
dev_dbg(&pdev->dev, "GPIO5 disabled, no VID function\n");
@@ -1677,7 +1674,17 @@ static int w83687thf_read_vid(struct platform_device *pdev)
struct w83627hf_sio_data *sio_data = dev_get_platdata(&pdev->dev);
int res = 0xff;
- superio_enter(sio_data);
+ if (superio_enter(sio_data)) {
+ /*
+ * Some other driver reserved the address space for itself.
+ * We don't want to fail driver instantiation because of that,
+ * so display a warning and keep going.
+ */
+ dev_warn(&pdev->dev,
+ "Can not read VID data: Failed to enable SuperIO access\n");
+ return res;
+ }
+
superio_select(sio_data, W83627HF_LD_HWM);
/* Make sure these GPIO pins are enabled */
diff --git a/drivers/hwmon/w83773g.c b/drivers/hwmon/w83773g.c
index e858093ac806..d4105321e462 100644
--- a/drivers/hwmon/w83773g.c
+++ b/drivers/hwmon/w83773g.c
@@ -44,7 +44,7 @@ static const struct i2c_device_id w83773_id[] = {
MODULE_DEVICE_TABLE(i2c, w83773_id);
-static const struct of_device_id w83773_of_match[] = {
+static const struct of_device_id __maybe_unused w83773_of_match[] = {
{
.compatible = "nuvoton,w83773g"
},
@@ -237,31 +237,13 @@ static umode_t w83773_is_visible(const void *data, enum hwmon_sensor_types type,
return 0;
}
-static const u32 w83773_chip_config[] = {
- HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL,
- 0
-};
-
-static const struct hwmon_channel_info w83773_chip = {
- .type = hwmon_chip,
- .config = w83773_chip_config,
-};
-
-static const u32 w83773_temp_config[] = {
- HWMON_T_INPUT,
- HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_OFFSET,
- HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_OFFSET,
- 0
-};
-
-static const struct hwmon_channel_info w83773_temp = {
- .type = hwmon_temp,
- .config = w83773_temp_config,
-};
-
static const struct hwmon_channel_info *w83773_info[] = {
- &w83773_chip,
- &w83773_temp,
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_OFFSET,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_OFFSET),
NULL
};
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index 9a63e87ea5f3..be302ec5f66b 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -871,7 +871,7 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
}
pm_runtime_put(&adev->dev);
- dev_info(dev, "%s initialized\n", (char *)id->data);
+ dev_info(dev, "%s initialized\n", (char *)coresight_get_uci_data(id));
if (boot_enable) {
coresight_enable(drvdata->csdev);
drvdata->boot_enable = true;
@@ -915,36 +915,18 @@ static const struct dev_pm_ops etm_dev_pm_ops = {
};
static const struct amba_id etm_ids[] = {
- { /* ETM 3.3 */
- .id = 0x000bb921,
- .mask = 0x000fffff,
- .data = "ETM 3.3",
- },
- { /* ETM 3.5 - Cortex-A5 */
- .id = 0x000bb955,
- .mask = 0x000fffff,
- .data = "ETM 3.5",
- },
- { /* ETM 3.5 */
- .id = 0x000bb956,
- .mask = 0x000fffff,
- .data = "ETM 3.5",
- },
- { /* PTM 1.0 */
- .id = 0x000bb950,
- .mask = 0x000fffff,
- .data = "PTM 1.0",
- },
- { /* PTM 1.1 */
- .id = 0x000bb95f,
- .mask = 0x000fffff,
- .data = "PTM 1.1",
- },
- { /* PTM 1.1 Qualcomm */
- .id = 0x000b006f,
- .mask = 0x000fffff,
- .data = "PTM 1.1",
- },
+ /* ETM 3.3 */
+ CS_AMBA_ID_DATA(0x000bb921, "ETM 3.3"),
+ /* ETM 3.5 - Cortex-A5 */
+ CS_AMBA_ID_DATA(0x000bb955, "ETM 3.5"),
+ /* ETM 3.5 */
+ CS_AMBA_ID_DATA(0x000bb956, "ETM 3.5"),
+ /* PTM 1.0 */
+ CS_AMBA_ID_DATA(0x000bb950, "PTM 1.0"),
+ /* PTM 1.1 */
+ CS_AMBA_ID_DATA(0x000bb95f, "PTM 1.1"),
+ /* PTM 1.1 Qualcomm */
+ CS_AMBA_ID_DATA(0x000b006f, "PTM 1.1"),
{ 0, 0},
};
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index fe76b176974a..08ce37c9475d 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -1068,18 +1068,21 @@ err_arch_supported:
return ret;
}
-#define ETM4x_AMBA_ID(pid) \
- { \
- .id = pid, \
- .mask = 0x000fffff, \
+static struct amba_cs_uci_id uci_id_etm4[] = {
+ {
+ /* ETMv4 UCI data */
+ .devarch = 0x47704a13,
+ .devarch_mask = 0xfff0ffff,
+ .devtype = 0x00000013,
}
+};
static const struct amba_id etm4_ids[] = {
- ETM4x_AMBA_ID(0x000bb95d), /* Cortex-A53 */
- ETM4x_AMBA_ID(0x000bb95e), /* Cortex-A57 */
- ETM4x_AMBA_ID(0x000bb95a), /* Cortex-A72 */
- ETM4x_AMBA_ID(0x000bb959), /* Cortex-A73 */
- ETM4x_AMBA_ID(0x000bb9da), /* Cortex-A35 */
+ CS_AMBA_ID(0x000bb95d), /* Cortex-A53 */
+ CS_AMBA_ID(0x000bb95e), /* Cortex-A57 */
+ CS_AMBA_ID(0x000bb95a), /* Cortex-A72 */
+ CS_AMBA_ID(0x000bb959), /* Cortex-A73 */
+ CS_AMBA_UCI_ID(0x000bb9da, uci_id_etm4), /* Cortex-A35 */
{},
};
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index b936c6d7e13f..e0684d06e9ee 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -6,6 +6,7 @@
#ifndef _CORESIGHT_PRIV_H
#define _CORESIGHT_PRIV_H
+#include <linux/amba/bus.h>
#include <linux/bitops.h>
#include <linux/io.h>
#include <linux/coresight.h>
@@ -160,4 +161,43 @@ static inline int etm_readl_cp14(u32 off, unsigned int *val) { return 0; }
static inline int etm_writel_cp14(u32 off, u32 val) { return 0; }
#endif
+/*
+ * Macros and inline functions to handle CoreSight UCI data and driver
+ * private data in AMBA ID table entries, and extract data values.
+ */
+
+/* coresight AMBA ID, no UCI, no driver data: id table entry */
+#define CS_AMBA_ID(pid) \
+ { \
+ .id = pid, \
+ .mask = 0x000fffff, \
+ }
+
+/* coresight AMBA ID, UCI with driver data only: id table entry. */
+#define CS_AMBA_ID_DATA(pid, dval) \
+ { \
+ .id = pid, \
+ .mask = 0x000fffff, \
+ .data = (void *)&(struct amba_cs_uci_id) \
+ { \
+ .data = (void *)dval, \
+ } \
+ }
+
+/* coresight AMBA ID, full UCI structure: id table entry. */
+#define CS_AMBA_UCI_ID(pid, uci_ptr) \
+ { \
+ .id = pid, \
+ .mask = 0x000fffff, \
+ .data = uci_ptr \
+ }
+
+/* extract the data value from a UCI structure given amba_id pointer. */
+static inline void *coresight_get_uci_data(const struct amba_id *id)
+{
+ if (id->data)
+ return ((struct amba_cs_uci_id *)(id->data))->data;
+ return 0;
+}
+
#endif
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index f07825df5c7a..9f8a844ed7aa 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -870,7 +870,7 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
pm_runtime_put(&adev->dev);
- dev_info(dev, "%s initialized\n", (char *)id->data);
+ dev_info(dev, "%s initialized\n", (char *)coresight_get_uci_data(id));
return 0;
stm_unregister:
@@ -905,16 +905,8 @@ static const struct dev_pm_ops stm_dev_pm_ops = {
};
static const struct amba_id stm_ids[] = {
- {
- .id = 0x000bb962,
- .mask = 0x000fffff,
- .data = "STM32",
- },
- {
- .id = 0x000bb963,
- .mask = 0x000fffff,
- .data = "STM500",
- },
+ CS_AMBA_ID_DATA(0x000bb962, "STM32"),
+ CS_AMBA_ID_DATA(0x000bb963, "STM500"),
{ 0, 0},
};
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index ea249f0bcd73..2a02da3d630f 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -443,7 +443,8 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
desc.type = CORESIGHT_DEV_TYPE_SINK;
desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
desc.ops = &tmc_etr_cs_ops;
- ret = tmc_etr_setup_caps(drvdata, devid, id->data);
+ ret = tmc_etr_setup_caps(drvdata, devid,
+ coresight_get_uci_data(id));
if (ret)
goto out;
break;
@@ -475,26 +476,13 @@ out:
}
static const struct amba_id tmc_ids[] = {
- {
- .id = 0x000bb961,
- .mask = 0x000fffff,
- },
- {
- /* Coresight SoC 600 TMC-ETR/ETS */
- .id = 0x000bb9e8,
- .mask = 0x000fffff,
- .data = (void *)(unsigned long)CORESIGHT_SOC_600_ETR_CAPS,
- },
- {
- /* Coresight SoC 600 TMC-ETB */
- .id = 0x000bb9e9,
- .mask = 0x000fffff,
- },
- {
- /* Coresight SoC 600 TMC-ETF */
- .id = 0x000bb9ea,
- .mask = 0x000fffff,
- },
+ CS_AMBA_ID(0x000bb961),
+ /* Coresight SoC 600 TMC-ETR/ETS */
+ CS_AMBA_ID_DATA(0x000bb9e8, (unsigned long)CORESIGHT_SOC_600_ETR_CAPS),
+ /* Coresight SoC 600 TMC-ETB */
+ CS_AMBA_ID(0x000bb9e9),
+ /* Coresight SoC 600 TMC-ETF */
+ CS_AMBA_ID(0x000bb9ea),
{ 0, 0},
};
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index f2c681971201..f8979abb9a19 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -131,6 +131,7 @@ config I2C_I801
Cannon Lake (PCH)
Cedar Fork (PCH)
Ice Lake (PCH)
+ Comet Lake (PCH)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index bb8e3f149979..d464799e40a3 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -426,8 +426,7 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
pm_runtime_get_sync(dev->dev);
- if (dev->suspended) {
- dev_err(dev->dev, "Error %s call while suspended\n", __func__);
+ if (dev_WARN_ONCE(dev->dev, dev->suspended, "Transfer while suspended\n")) {
ret = -ESHUTDOWN;
goto done_nolock;
}
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index ead5e7de3e4d..416f89b8f881 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -86,7 +86,6 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev)
struct i2c_timings *t = &dev->timings;
u32 ss_ht = 0, fp_ht = 0, hs_ht = 0, fs_ht = 0;
- dev->adapter.nr = -1;
dev->tx_fifo_depth = 32;
dev->rx_fifo_depth = 32;
@@ -219,7 +218,7 @@ static void i2c_dw_configure_slave(struct dw_i2c_dev *dev)
dev->mode = DW_IC_SLAVE;
}
-static void dw_i2c_set_fifo_size(struct dw_i2c_dev *dev, int id)
+static void dw_i2c_set_fifo_size(struct dw_i2c_dev *dev)
{
u32 param, tx_fifo_depth, rx_fifo_depth;
@@ -233,7 +232,6 @@ static void dw_i2c_set_fifo_size(struct dw_i2c_dev *dev, int id)
if (!dev->tx_fifo_depth) {
dev->tx_fifo_depth = tx_fifo_depth;
dev->rx_fifo_depth = rx_fifo_depth;
- dev->adapter.nr = id;
} else if (tx_fifo_depth >= 2) {
dev->tx_fifo_depth = min_t(u32, dev->tx_fifo_depth,
tx_fifo_depth);
@@ -358,13 +356,14 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
div_u64(clk_khz * t->sda_hold_ns + 500000, 1000000);
}
- dw_i2c_set_fifo_size(dev, pdev->id);
+ dw_i2c_set_fifo_size(dev);
adap = &dev->adapter;
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_DEPRECATED;
ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev));
adap->dev.of_node = pdev->dev.of_node;
+ adap->nr = -1;
dev_pm_set_driver_flags(&pdev->dev,
DPM_FLAG_SMART_PREPARE |
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 835d54ac2971..231675b10376 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -177,7 +177,6 @@ static wait_queue_head_t pch_event;
static DEFINE_MUTEX(pch_mutex);
/* Definition for ML7213 by LAPIS Semiconductor */
-#define PCI_VENDOR_ID_ROHM 0x10DB
#define PCI_DEVICE_ID_ML7213_I2C 0x802D
#define PCI_DEVICE_ID_ML7223_I2C 0x8010
#define PCI_DEVICE_ID_ML7831_I2C 0x8817
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index c91e145ef5a5..679c6c41f64b 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -71,6 +71,7 @@
* Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes
* Cedar Fork (PCH) 0x18df 32 hard yes yes yes
* Ice Lake-LP (PCH) 0x34a3 32 hard yes yes yes
+ * Comet Lake (PCH) 0x02a3 32 hard yes yes yes
*
* Features supported by this driver:
* Software PEC no
@@ -240,6 +241,7 @@
#define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS 0xa223
#define PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS 0xa2a3
#define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS 0xa323
+#define PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS 0x02a3
struct i801_mux_config {
char *gpio_chip;
@@ -1038,6 +1040,7 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS) },
{ 0, }
};
@@ -1534,6 +1537,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS:
case PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS:
+ case PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS:
priv->features |= FEATURE_I2C_BLOCK_READ;
priv->features |= FEATURE_IRQ;
priv->features |= FEATURE_SMBUS_PEC;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 42fed40198a0..fd70b110e8f4 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -515,9 +515,9 @@ static int i2c_imx_clk_notifier_call(struct notifier_block *nb,
unsigned long action, void *data)
{
struct clk_notifier_data *ndata = data;
- struct imx_i2c_struct *i2c_imx = container_of(&ndata->clk,
+ struct imx_i2c_struct *i2c_imx = container_of(nb,
struct imx_i2c_struct,
- clk);
+ clk_change_nb);
if (action & POST_RATE_CHANGE)
i2c_imx_set_clk(i2c_imx, ndata->new_rate);
@@ -1169,11 +1169,13 @@ static int i2c_imx_probe(struct platform_device *pdev)
/* Init DMA config if supported */
ret = i2c_imx_dma_request(i2c_imx, phy_addr);
if (ret < 0)
- goto clk_notifier_unregister;
+ goto del_adapter;
dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
return 0; /* Return OK */
+del_adapter:
+ i2c_del_adapter(&i2c_imx->adapter);
clk_notifier_unregister:
clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
rpm_disable:
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 660de1ee68ed..684d651612b3 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -503,7 +503,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
writel(I2C_DMA_INT_FLAG_NONE, i2c->pdmabase + OFFSET_INT_FLAG);
writel(I2C_DMA_CON_RX, i2c->pdmabase + OFFSET_CON);
- dma_rd_buf = i2c_get_dma_safe_msg_buf(msgs, 0);
+ dma_rd_buf = i2c_get_dma_safe_msg_buf(msgs, 1);
if (!dma_rd_buf)
return -ENOMEM;
@@ -526,7 +526,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
writel(I2C_DMA_INT_FLAG_NONE, i2c->pdmabase + OFFSET_INT_FLAG);
writel(I2C_DMA_CON_TX, i2c->pdmabase + OFFSET_CON);
- dma_wr_buf = i2c_get_dma_safe_msg_buf(msgs, 0);
+ dma_wr_buf = i2c_get_dma_safe_msg_buf(msgs, 1);
if (!dma_wr_buf)
return -ENOMEM;
@@ -549,7 +549,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_INT_FLAG);
writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_CON);
- dma_wr_buf = i2c_get_dma_safe_msg_buf(msgs, 0);
+ dma_wr_buf = i2c_get_dma_safe_msg_buf(msgs, 1);
if (!dma_wr_buf)
return -ENOMEM;
@@ -561,7 +561,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
return -ENOMEM;
}
- dma_rd_buf = i2c_get_dma_safe_msg_buf((msgs + 1), 0);
+ dma_rd_buf = i2c_get_dma_safe_msg_buf((msgs + 1), 1);
if (!dma_rd_buf) {
dma_unmap_single(i2c->dev, wpaddr,
msgs->len, DMA_TO_DEVICE);
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index dd52a068b140..a7578f6da979 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -363,9 +363,6 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv)
struct dma_chan *chan = priv->dma_direction == DMA_FROM_DEVICE
? priv->dma_rx : priv->dma_tx;
- /* Disable DMA Master Received/Transmitted */
- rcar_i2c_write(priv, ICDMAER, 0);
-
dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
sg_dma_len(&priv->sg), priv->dma_direction);
@@ -375,6 +372,9 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv)
priv->flags |= ID_P_NO_RXDMA;
priv->dma_direction = DMA_NONE;
+
+ /* Disable DMA Master Received/Transmitted, must be last! */
+ rcar_i2c_write(priv, ICDMAER, 0);
}
static void rcar_i2c_cleanup_dma(struct rcar_i2c_priv *priv)
@@ -611,6 +611,15 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
return true;
}
+/*
+ * This driver has a lock-free design because there are IP cores (at least
+ * R-Car Gen2) which have an inherent race condition in their hardware design.
+ * There, we need to clear RCAR_BUS_MASK_DATA bits as soon as possible after
+ * the interrupt was generated, otherwise an unwanted repeated message gets
+ * generated. It turned out that taking a spinlock at the beginning of the ISR
+ * was already causing repeated messages. Thus, this driver was converted to
+ * the now lockless behaviour. Please keep this in mind when hacking the driver.
+ */
static irqreturn_t rcar_i2c_irq(int irq, void *ptr)
{
struct rcar_i2c_priv *priv = ptr;
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index 1e6805b5cef2..a57aa4fe51a4 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -478,7 +478,7 @@ static int sis630_setup(struct pci_dev *sis630_dev)
if (!request_region(smbus_base + SMB_STS, SIS630_SMB_IOREGION,
sis630_driver.name)) {
dev_err(&sis630_dev->dev,
- "I/O Region 0x%04hx-0x%04hx for SMBus already in use.\n",
+ "I/O Region 0x%04x-0x%04x for SMBus already in use.\n",
smbus_base + SMB_STS,
smbus_base + SMB_STS + SIS630_SMB_IOREGION - 1);
retval = -EBUSY;
@@ -528,7 +528,7 @@ static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id)
sis630_adapter.dev.parent = &dev->dev;
snprintf(sis630_adapter.name, sizeof(sis630_adapter.name),
- "SMBus SIS630 adapter at %04hx", smbus_base + SMB_STS);
+ "SMBus SIS630 adapter at %04x", smbus_base + SMB_STS);
return i2c_add_adapter(&sis630_adapter);
}
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index 13e1213561d4..4284fc991cfd 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -432,7 +432,7 @@ static int stm32f7_i2c_compute_timing(struct stm32f7_i2c_dev *i2c_dev,
STM32F7_I2C_ANALOG_FILTER_DELAY_MAX : 0);
dnf_delay = setup->dnf * i2cclk;
- sdadel_min = setup->fall_time - i2c_specs[setup->speed].hddat_min -
+ sdadel_min = i2c_specs[setup->speed].hddat_min + setup->fall_time -
af_delay_min - (setup->dnf + 3) * i2cclk;
sdadel_max = i2c_specs[setup->speed].vddat_max - setup->rise_time -
diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c
index d18b0941b71a..f14d4b3fab44 100644
--- a/drivers/i2c/busses/i2c-synquacer.c
+++ b/drivers/i2c/busses/i2c-synquacer.c
@@ -597,6 +597,8 @@ static int synquacer_i2c_probe(struct platform_device *pdev)
i2c->adapter = synquacer_i2c_ops;
i2c_set_adapdata(&i2c->adapter, i2c);
i2c->adapter.dev.parent = &pdev->dev;
+ i2c->adapter.dev.of_node = pdev->dev.of_node;
+ ACPI_COMPANION_SET(&i2c->adapter.dev, ACPI_COMPANION(&pdev->dev));
i2c->adapter.nr = pdev->id;
init_completion(&i2c->completion);
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index cb6c5cb0df0b..688aa3b5f3ac 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -185,7 +185,7 @@ static int i2c_generic_bus_free(struct i2c_adapter *adap)
int i2c_generic_scl_recovery(struct i2c_adapter *adap)
{
struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
- int i = 0, scl = 1, ret;
+ int i = 0, scl = 1, ret = 0;
if (bri->prepare_recovery)
bri->prepare_recovery(adap);
@@ -327,6 +327,8 @@ static int i2c_device_probe(struct device *dev)
if (client->flags & I2C_CLIENT_HOST_NOTIFY) {
dev_dbg(dev, "Using Host Notify IRQ\n");
+ /* Keep adapter active when Host Notify is required */
+ pm_runtime_get_sync(&client->adapter->dev);
irq = i2c_smbus_host_notify_to_irq(client);
} else if (dev->of_node) {
irq = of_irq_get_byname(dev->of_node, "irq");
@@ -431,6 +433,8 @@ static int i2c_device_remove(struct device *dev)
device_init_wakeup(&client->dev, false);
client->irq = client->init_irq;
+ if (client->flags & I2C_CLIENT_HOST_NOTIFY)
+ pm_runtime_put(&client->adapter->dev);
return status;
}
@@ -2258,7 +2262,8 @@ EXPORT_SYMBOL(i2c_put_adapter);
/**
* i2c_get_dma_safe_msg_buf() - get a DMA safe buffer for the given i2c_msg
* @msg: the message to be checked
- * @threshold: the minimum number of bytes for which using DMA makes sense
+ * @threshold: the minimum number of bytes for which using DMA makes sense.
+ * Should at least be 1.
*
* Return: NULL if a DMA safe buffer was not obtained. Use msg->buf with PIO.
* Or a valid pointer to be used with DMA. After use, release it by
@@ -2268,7 +2273,11 @@ EXPORT_SYMBOL(i2c_put_adapter);
*/
u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold)
{
- if (msg->len < threshold)
+ /* also skip 0-length msgs for bogus thresholds of 0 */
+ if (!threshold)
+ pr_debug("DMA buffer for addr=0x%02x with length 0 is bogus\n",
+ msg->addr);
+ if (msg->len < threshold || msg->len == 0)
return NULL;
if (msg->flags & I2C_M_DMA_SAFE)
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 2dc628d4f1ae..5f4bd52121fe 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -385,8 +385,9 @@ static void i3c_bus_set_addr_slot_status(struct i3c_bus *bus, u16 addr,
return;
ptr = bus->addrslots + (bitpos / BITS_PER_LONG);
- *ptr &= ~(I3C_ADDR_SLOT_STATUS_MASK << (bitpos % BITS_PER_LONG));
- *ptr |= status << (bitpos % BITS_PER_LONG);
+ *ptr &= ~((unsigned long)I3C_ADDR_SLOT_STATUS_MASK <<
+ (bitpos % BITS_PER_LONG));
+ *ptr |= (unsigned long)status << (bitpos % BITS_PER_LONG);
}
static bool i3c_bus_dev_addr_is_avail(struct i3c_bus *bus, u8 addr)
@@ -1980,7 +1981,6 @@ of_i3c_master_add_i3c_boardinfo(struct i3c_master_controller *master,
{
struct i3c_dev_boardinfo *boardinfo;
struct device *dev = &master->dev;
- struct i3c_device_info info = { };
enum i3c_addr_slot_status addrstatus;
u32 init_dyn_addr = 0;
@@ -2012,8 +2012,8 @@ of_i3c_master_add_i3c_boardinfo(struct i3c_master_controller *master,
boardinfo->pid = ((u64)reg[1] << 32) | reg[2];
- if ((info.pid & GENMASK_ULL(63, 48)) ||
- I3C_PID_RND_LOWER_32BITS(info.pid))
+ if ((boardinfo->pid & GENMASK_ULL(63, 48)) ||
+ I3C_PID_RND_LOWER_32BITS(boardinfo->pid))
return -EINVAL;
boardinfo->init_dyn_addr = init_dyn_addr;
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index 59279224e07f..1d83c97431c7 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -300,7 +300,7 @@ to_dw_i3c_master(struct i3c_master_controller *master)
static void dw_i3c_master_disable(struct dw_i3c_master *master)
{
- writel(readl(master->regs + DEVICE_CTRL) & DEV_CTRL_ENABLE,
+ writel(readl(master->regs + DEVICE_CTRL) & ~DEV_CTRL_ENABLE,
master->regs + DEVICE_CTRL);
}
@@ -841,11 +841,6 @@ static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
return -ENOTSUPP;
for (i = 0; i < i3c_nxfers; i++) {
- if (i3c_xfers[i].len > COMMAND_PORT_ARG_DATA_LEN_MAX)
- return -ENOTSUPP;
- }
-
- for (i = 0; i < i3c_nxfers; i++) {
if (i3c_xfers[i].rnw)
nrxwords += DIV_ROUND_UP(i3c_xfers[i].len, 4);
else
@@ -974,11 +969,6 @@ static int dw_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
return -ENOTSUPP;
for (i = 0; i < i2c_nxfers; i++) {
- if (i2c_xfers[i].len > COMMAND_PORT_ARG_DATA_LEN_MAX)
- return -ENOTSUPP;
- }
-
- for (i = 0; i < i2c_nxfers; i++) {
if (i2c_xfers[i].flags & I2C_M_RD)
nrxwords += DIV_ROUND_UP(i2c_xfers[i].len, 4);
else
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 901b8833847f..19fcd0756f46 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -320,9 +320,9 @@ config BLK_DEV_OFFBOARD
config BLK_DEV_GENERIC
tristate "Generic PCI IDE Chipset Support"
select BLK_DEV_IDEPCI
- help
- This option provides generic support for various PCI IDE Chipsets
- which otherwise might not be supported.
+ help
+ This option provides generic support for various PCI IDE Chipsets
+ which otherwise might not be supported.
config BLK_DEV_OPTI621
tristate "OPTi 82C621 chipset enhanced support"
@@ -516,7 +516,7 @@ config BLK_DEV_IT8213
tristate "IT8213 IDE support"
select BLK_DEV_IDEDMA_PCI
help
- This driver adds support for the ITE 8213 IDE controller.
+ This driver adds support for the ITE 8213 IDE controller.
config BLK_DEV_IT821X
tristate "IT821X IDE support"
@@ -671,20 +671,20 @@ config BLK_DEV_IDE_PMAC_ATA100FIRST
hard disk and hdc for CD-ROM.
config BLK_DEV_IDE_AU1XXX
- bool "IDE for AMD Alchemy Au1200"
- depends on MIPS_ALCHEMY
- select IDE_XFER_MODE
+ bool "IDE for AMD Alchemy Au1200"
+ depends on MIPS_ALCHEMY
+ select IDE_XFER_MODE
choice
- prompt "IDE Mode for AMD Alchemy Au1200"
- default BLK_DEV_IDE_AU1XXX_PIO_DBDMA
- depends on BLK_DEV_IDE_AU1XXX
+ prompt "IDE Mode for AMD Alchemy Au1200"
+ default BLK_DEV_IDE_AU1XXX_PIO_DBDMA
+ depends on BLK_DEV_IDE_AU1XXX
config BLK_DEV_IDE_AU1XXX_PIO_DBDMA
- bool "PIO+DbDMA IDE for AMD Alchemy Au1200"
+ bool "PIO+DbDMA IDE for AMD Alchemy Au1200"
config BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
- bool "MDMA2+DbDMA IDE for AMD Alchemy Au1200"
- depends on BLK_DEV_IDE_AU1XXX
+ bool "MDMA2+DbDMA IDE for AMD Alchemy Au1200"
+ depends on BLK_DEV_IDE_AU1XXX
endchoice
config BLK_DEV_IDE_TX4938
diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
index 4d565b0c5a6e..0a3f9bcc8b04 100644
--- a/drivers/ide/hpt366.c
+++ b/drivers/ide/hpt366.c
@@ -574,7 +574,7 @@ static u8 hpt3xx_udma_filter(ide_drive_t *drive)
if (!HPT370_ALLOW_ATA100_5 ||
check_in_drive_list(drive, bad_ata100_5))
return ATA_UDMA4;
- /* else: fall through */
+ /* fall through */
case HPT372 :
case HPT372A:
case HPT372N:
@@ -601,7 +601,7 @@ static u8 hpt3xx_mdma_filter(ide_drive_t *drive)
case HPT374 :
if (ata_id_is_sata(drive->id))
return 0x00;
- /* else: fall through */
+ /* fall through */
default:
return 0x07;
}
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 780d33ccc5d8..1ea2f9e82bf8 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -427,7 +427,7 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
* (maintains previous driver behaviour)
*/
break;
- /* else: fall through */
+ /* fall through */
case CAPACITY_CURRENT:
/* Normal Zip/LS-120 disks */
if (memcmp(cap_desc, &floppy->cap_desc, 8))
diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c
index 67d4a7d4acc8..88d132edc4e3 100644
--- a/drivers/ide/tx4939ide.c
+++ b/drivers/ide/tx4939ide.c
@@ -156,7 +156,6 @@ static u16 tx4939ide_check_error_ints(ide_hwif_t *hwif)
u16 sysctl = tx4939ide_readw(base, TX4939IDE_Sys_Ctl);
tx4939ide_writew(sysctl | 0x4000, base, TX4939IDE_Sys_Ctl);
- mmiowb();
/* wait 12GBUSCLK (typ. 60ns @ GBUS200MHz, max 270ns) */
ndelay(270);
tx4939ide_writew(sysctl, base, TX4939IDE_Sys_Ctl);
@@ -396,7 +395,6 @@ static void tx4939ide_init_hwif(ide_hwif_t *hwif)
/* Soft Reset */
tx4939ide_writew(0x8000, base, TX4939IDE_Sys_Ctl);
- mmiowb();
/* at least 20 GBUSCLK (typ. 100ns @ GBUS200MHz, max 450ns) */
ndelay(450);
tx4939ide_writew(0x0000, base, TX4939IDE_Sys_Ctl);
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index 7096e577b23f..50f3ff386bea 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -1437,6 +1437,8 @@ static int kxcjk1013_resume(struct device *dev)
mutex_lock(&data->mutex);
ret = kxcjk1013_set_mode(data, OPERATION);
+ if (ret == 0)
+ ret = kxcjk1013_set_range(data, data->range);
mutex_unlock(&data->mutex);
return ret;
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index ff5f2da2e1b1..54d9978b2740 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -121,6 +121,7 @@ static int ad_sd_read_reg_raw(struct ad_sigma_delta *sigma_delta,
if (sigma_delta->info->has_registers) {
data[0] = reg << sigma_delta->info->addr_shift;
data[0] |= sigma_delta->info->read_mask;
+ data[0] |= sigma_delta->comm;
spi_message_add_tail(&t[0], &m);
}
spi_message_add_tail(&t[1], &m);
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 75d2f73582a3..596841a3c4db 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -704,23 +704,29 @@ static int at91_adc_read_raw(struct iio_dev *idev,
ret = wait_event_interruptible_timeout(st->wq_data_avail,
st->done,
msecs_to_jiffies(1000));
- if (ret == 0)
- ret = -ETIMEDOUT;
- if (ret < 0) {
- mutex_unlock(&st->lock);
- return ret;
- }
-
- *val = st->last_value;
+ /* Disable interrupts, regardless if adc conversion was
+ * successful or not
+ */
at91_adc_writel(st, AT91_ADC_CHDR,
AT91_ADC_CH(chan->channel));
at91_adc_writel(st, AT91_ADC_IDR, BIT(chan->channel));
- st->last_value = 0;
- st->done = false;
+ if (ret > 0) {
+ /* a valid conversion took place */
+ *val = st->last_value;
+ st->last_value = 0;
+ st->done = false;
+ ret = IIO_VAL_INT;
+ } else if (ret == 0) {
+ /* conversion timeout */
+ dev_err(&idev->dev, "ADC Channel %d timeout.\n",
+ chan->channel);
+ ret = -ETIMEDOUT;
+ }
+
mutex_unlock(&st->lock);
- return IIO_VAL_INT;
+ return ret;
case IIO_CHAN_INFO_SCALE:
*val = st->vref_mv;
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index b13c61539d46..6401ca7a9a20 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -1292,6 +1292,7 @@ static int xadc_probe(struct platform_device *pdev)
err_free_irq:
free_irq(xadc->irq, indio_dev);
+ cancel_delayed_work_sync(&xadc->zynq_unmask_work);
err_clk_disable_unprepare:
clk_disable_unprepare(xadc->clk);
err_free_samplerate_trigger:
@@ -1321,8 +1322,8 @@ static int xadc_remove(struct platform_device *pdev)
iio_triggered_buffer_cleanup(indio_dev);
}
free_irq(xadc->irq, indio_dev);
+ cancel_delayed_work_sync(&xadc->zynq_unmask_work);
clk_disable_unprepare(xadc->clk);
- cancel_delayed_work(&xadc->zynq_unmask_work);
kfree(xadc->data);
kfree(indio_dev->channels);
diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig
index d5d146e9e372..92c684d2b67e 100644
--- a/drivers/iio/chemical/Kconfig
+++ b/drivers/iio/chemical/Kconfig
@@ -64,6 +64,7 @@ config IAQCORE
config PMS7003
tristate "Plantower PMS7003 particulate matter sensor"
depends on SERIAL_DEV_BUS
+ select IIO_TRIGGERED_BUFFER
help
Say Y here to build support for the Plantower PMS7003 particulate
matter sensor.
@@ -71,6 +72,19 @@ config PMS7003
To compile this driver as a module, choose M here: the module will
be called pms7003.
+config SENSIRION_SGP30
+ tristate "Sensirion SGPxx gas sensors"
+ depends on I2C
+ select CRC8
+ help
+ Say Y here to build I2C interface support for the following
+ Sensirion SGP gas sensors:
+ * SGP30 gas sensor
+ * SGPC3 low power gas sensor
+
+ To compile this driver as module, choose M here: the
+ module will be called sgp30.
+
config SPS30
tristate "SPS30 particulate matter sensor"
depends on I2C
diff --git a/drivers/iio/chemical/bme680.h b/drivers/iio/chemical/bme680.h
index 0ae89b87e2d6..4edc5d21cb9f 100644
--- a/drivers/iio/chemical/bme680.h
+++ b/drivers/iio/chemical/bme680.h
@@ -2,11 +2,9 @@
#ifndef BME680_H_
#define BME680_H_
-#define BME680_REG_CHIP_I2C_ID 0xD0
-#define BME680_REG_CHIP_SPI_ID 0x50
+#define BME680_REG_CHIP_ID 0xD0
#define BME680_CHIP_ID_VAL 0x61
-#define BME680_REG_SOFT_RESET_I2C 0xE0
-#define BME680_REG_SOFT_RESET_SPI 0x60
+#define BME680_REG_SOFT_RESET 0xE0
#define BME680_CMD_SOFTRESET 0xB6
#define BME680_REG_STATUS 0x73
#define BME680_SPI_MEM_PAGE_BIT BIT(4)
diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c
index 70c1fe4366f4..ccde4c65ff93 100644
--- a/drivers/iio/chemical/bme680_core.c
+++ b/drivers/iio/chemical/bme680_core.c
@@ -63,9 +63,23 @@ struct bme680_data {
s32 t_fine;
};
+static const struct regmap_range bme680_volatile_ranges[] = {
+ regmap_reg_range(BME680_REG_MEAS_STAT_0, BME680_REG_GAS_R_LSB),
+ regmap_reg_range(BME680_REG_STATUS, BME680_REG_STATUS),
+ regmap_reg_range(BME680_T2_LSB_REG, BME680_GH3_REG),
+};
+
+static const struct regmap_access_table bme680_volatile_table = {
+ .yes_ranges = bme680_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(bme680_volatile_ranges),
+};
+
const struct regmap_config bme680_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
+ .max_register = 0xef,
+ .volatile_table = &bme680_volatile_table,
+ .cache_type = REGCACHE_RBTREE,
};
EXPORT_SYMBOL(bme680_regmap_config);
@@ -316,6 +330,10 @@ static s16 bme680_compensate_temp(struct bme680_data *data,
s64 var1, var2, var3;
s16 calc_temp;
+ /* If the calibration is invalid, attempt to reload it */
+ if (!calib->par_t2)
+ bme680_read_calib(data, calib);
+
var1 = (adc_temp >> 3) - (calib->par_t1 << 1);
var2 = (var1 * calib->par_t2) >> 11;
var3 = ((var1 >> 1) * (var1 >> 1)) >> 12;
@@ -583,8 +601,7 @@ static int bme680_gas_config(struct bme680_data *data)
return ret;
}
-static int bme680_read_temp(struct bme680_data *data,
- int *val, int *val2)
+static int bme680_read_temp(struct bme680_data *data, int *val)
{
struct device *dev = regmap_get_device(data->regmap);
int ret;
@@ -617,10 +634,9 @@ static int bme680_read_temp(struct bme680_data *data,
* compensate_press/compensate_humid to get compensated
* pressure/humidity readings.
*/
- if (val && val2) {
- *val = comp_temp;
- *val2 = 100;
- return IIO_VAL_FRACTIONAL;
+ if (val) {
+ *val = comp_temp * 10; /* Centidegrees to millidegrees */
+ return IIO_VAL_INT;
}
return ret;
@@ -635,7 +651,7 @@ static int bme680_read_press(struct bme680_data *data,
s32 adc_press;
/* Read and compensate temperature to get a reading of t_fine */
- ret = bme680_read_temp(data, NULL, NULL);
+ ret = bme680_read_temp(data, NULL);
if (ret < 0)
return ret;
@@ -668,7 +684,7 @@ static int bme680_read_humid(struct bme680_data *data,
u32 comp_humidity;
/* Read and compensate temperature to get a reading of t_fine */
- ret = bme680_read_temp(data, NULL, NULL);
+ ret = bme680_read_temp(data, NULL);
if (ret < 0)
return ret;
@@ -761,7 +777,7 @@ static int bme680_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_PROCESSED:
switch (chan->type) {
case IIO_TEMP:
- return bme680_read_temp(data, val, val2);
+ return bme680_read_temp(data, val);
case IIO_PRESSURE:
return bme680_read_press(data, val, val2);
case IIO_HUMIDITYRELATIVE:
@@ -867,8 +883,28 @@ int bme680_core_probe(struct device *dev, struct regmap *regmap,
{
struct iio_dev *indio_dev;
struct bme680_data *data;
+ unsigned int val;
int ret;
+ ret = regmap_write(regmap, BME680_REG_SOFT_RESET,
+ BME680_CMD_SOFTRESET);
+ if (ret < 0) {
+ dev_err(dev, "Failed to reset chip\n");
+ return ret;
+ }
+
+ ret = regmap_read(regmap, BME680_REG_CHIP_ID, &val);
+ if (ret < 0) {
+ dev_err(dev, "Error reading chip ID\n");
+ return ret;
+ }
+
+ if (val != BME680_CHIP_ID_VAL) {
+ dev_err(dev, "Wrong chip ID, got %x expected %x\n",
+ val, BME680_CHIP_ID_VAL);
+ return -ENODEV;
+ }
+
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
diff --git a/drivers/iio/chemical/bme680_i2c.c b/drivers/iio/chemical/bme680_i2c.c
index b2f805b6b36a..de9c9e3d23ea 100644
--- a/drivers/iio/chemical/bme680_i2c.c
+++ b/drivers/iio/chemical/bme680_i2c.c
@@ -23,8 +23,6 @@ static int bme680_i2c_probe(struct i2c_client *client,
{
struct regmap *regmap;
const char *name = NULL;
- unsigned int val;
- int ret;
regmap = devm_regmap_init_i2c(client, &bme680_regmap_config);
if (IS_ERR(regmap)) {
@@ -33,25 +31,6 @@ static int bme680_i2c_probe(struct i2c_client *client,
return PTR_ERR(regmap);
}
- ret = regmap_write(regmap, BME680_REG_SOFT_RESET_I2C,
- BME680_CMD_SOFTRESET);
- if (ret < 0) {
- dev_err(&client->dev, "Failed to reset chip\n");
- return ret;
- }
-
- ret = regmap_read(regmap, BME680_REG_CHIP_I2C_ID, &val);
- if (ret < 0) {
- dev_err(&client->dev, "Error reading I2C chip ID\n");
- return ret;
- }
-
- if (val != BME680_CHIP_ID_VAL) {
- dev_err(&client->dev, "Wrong chip ID, got %x expected %x\n",
- val, BME680_CHIP_ID_VAL);
- return -ENODEV;
- }
-
if (id)
name = id->name;
diff --git a/drivers/iio/chemical/bme680_spi.c b/drivers/iio/chemical/bme680_spi.c
index d0b7bdd3f066..3b838068a7e4 100644
--- a/drivers/iio/chemical/bme680_spi.c
+++ b/drivers/iio/chemical/bme680_spi.c
@@ -12,28 +12,93 @@
#include "bme680.h"
+struct bme680_spi_bus_context {
+ struct spi_device *spi;
+ u8 current_page;
+};
+
+/*
+ * In SPI mode there are only 7 address bits, a "page" register determines
+ * which part of the 8-bit range is active. This function looks at the address
+ * and writes the page selection bit if needed
+ */
+static int bme680_regmap_spi_select_page(
+ struct bme680_spi_bus_context *ctx, u8 reg)
+{
+ struct spi_device *spi = ctx->spi;
+ int ret;
+ u8 buf[2];
+ u8 page = (reg & 0x80) ? 0 : 1; /* Page "1" is low range */
+
+ if (page == ctx->current_page)
+ return 0;
+
+ /*
+ * Data sheet claims we're only allowed to change bit 4, so we must do
+ * a read-modify-write on each and every page select
+ */
+ buf[0] = BME680_REG_STATUS;
+ ret = spi_write_then_read(spi, buf, 1, buf + 1, 1);
+ if (ret < 0) {
+ dev_err(&spi->dev, "failed to set page %u\n", page);
+ return ret;
+ }
+
+ buf[0] = BME680_REG_STATUS;
+ if (page)
+ buf[1] |= BME680_SPI_MEM_PAGE_BIT;
+ else
+ buf[1] &= ~BME680_SPI_MEM_PAGE_BIT;
+
+ ret = spi_write(spi, buf, 2);
+ if (ret < 0) {
+ dev_err(&spi->dev, "failed to set page %u\n", page);
+ return ret;
+ }
+
+ ctx->current_page = page;
+
+ return 0;
+}
+
static int bme680_regmap_spi_write(void *context, const void *data,
size_t count)
{
- struct spi_device *spi = context;
+ struct bme680_spi_bus_context *ctx = context;
+ struct spi_device *spi = ctx->spi;
+ int ret;
u8 buf[2];
memcpy(buf, data, 2);
+
+ ret = bme680_regmap_spi_select_page(ctx, buf[0]);
+ if (ret)
+ return ret;
+
/*
* The SPI register address (= full register address without bit 7)
* and the write command (bit7 = RW = '0')
*/
buf[0] &= ~0x80;
- return spi_write_then_read(spi, buf, 2, NULL, 0);
+ return spi_write(spi, buf, 2);
}
static int bme680_regmap_spi_read(void *context, const void *reg,
size_t reg_size, void *val, size_t val_size)
{
- struct spi_device *spi = context;
+ struct bme680_spi_bus_context *ctx = context;
+ struct spi_device *spi = ctx->spi;
+ int ret;
+ u8 addr = *(const u8 *)reg;
+
+ ret = bme680_regmap_spi_select_page(ctx, addr);
+ if (ret)
+ return ret;
- return spi_write_then_read(spi, reg, reg_size, val, val_size);
+ addr |= 0x80; /* bit7 = RW = '1' */
+
+ return spi_write_then_read(spi, &addr, 1, val, val_size);
}
static struct regmap_bus bme680_regmap_bus = {
@@ -46,8 +111,8 @@ static struct regmap_bus bme680_regmap_bus = {
static int bme680_spi_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
+ struct bme680_spi_bus_context *bus_context;
struct regmap *regmap;
- unsigned int val;
int ret;
spi->bits_per_word = 8;
@@ -57,45 +122,21 @@ static int bme680_spi_probe(struct spi_device *spi)
return ret;
}
+ bus_context = devm_kzalloc(&spi->dev, sizeof(*bus_context), GFP_KERNEL);
+ if (!bus_context)
+ return -ENOMEM;
+
+ bus_context->spi = spi;
+ bus_context->current_page = 0xff; /* Undefined on warm boot */
+
regmap = devm_regmap_init(&spi->dev, &bme680_regmap_bus,
- &spi->dev, &bme680_regmap_config);
+ bus_context, &bme680_regmap_config);
if (IS_ERR(regmap)) {
dev_err(&spi->dev, "Failed to register spi regmap %d\n",
(int)PTR_ERR(regmap));
return PTR_ERR(regmap);
}
- ret = regmap_write(regmap, BME680_REG_SOFT_RESET_SPI,
- BME680_CMD_SOFTRESET);
- if (ret < 0) {
- dev_err(&spi->dev, "Failed to reset chip\n");
- return ret;
- }
-
- /* after power-on reset, Page 0(0x80-0xFF) of spi_mem_page is active */
- ret = regmap_read(regmap, BME680_REG_CHIP_SPI_ID, &val);
- if (ret < 0) {
- dev_err(&spi->dev, "Error reading SPI chip ID\n");
- return ret;
- }
-
- if (val != BME680_CHIP_ID_VAL) {
- dev_err(&spi->dev, "Wrong chip ID, got %x expected %x\n",
- val, BME680_CHIP_ID_VAL);
- return -ENODEV;
- }
- /*
- * select Page 1 of spi_mem_page to enable access to
- * to registers from address 0x00 to 0x7F.
- */
- ret = regmap_write_bits(regmap, BME680_REG_STATUS,
- BME680_SPI_MEM_PAGE_BIT,
- BME680_SPI_MEM_PAGE_1_VAL);
- if (ret < 0) {
- dev_err(&spi->dev, "failed to set page 1 of spi_mem_page\n");
- return ret;
- }
-
return bme680_core_probe(&spi->dev, regmap, id->name);
}
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
index 89cb0066a6e0..8d76afb87d87 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
@@ -103,9 +103,10 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev,
* Do not use IIO_DEGREE_TO_RAD to avoid precision
* loss. Round to the nearest integer.
*/
- *val = div_s64(val64 * 314159 + 9000000ULL, 1000);
- *val2 = 18000 << (CROS_EC_SENSOR_BITS - 1);
- ret = IIO_VAL_FRACTIONAL;
+ *val = 0;
+ *val2 = div_s64(val64 * 3141592653ULL,
+ 180 << (CROS_EC_SENSOR_BITS - 1));
+ ret = IIO_VAL_INT_PLUS_NANO;
break;
case MOTIONSENSE_TYPE_MAG:
/*
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index 6d71fd905e29..c701a45469f6 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -92,6 +92,7 @@ static ssize_t mcp4725_store_eeprom(struct device *dev,
inoutbuf[0] = 0x60; /* write EEPROM */
inoutbuf[0] |= data->ref_mode << 3;
+ inoutbuf[0] |= data->powerdown ? ((data->powerdown_mode + 1) << 1) : 0;
inoutbuf[1] = data->dac_value >> 4;
inoutbuf[2] = (data->dac_value & 0xf) << 4;
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index 63ca31628a93..92c07ab826eb 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -582,11 +582,10 @@ static int bmg160_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
return bmg160_get_filter(data, val);
case IIO_CHAN_INFO_SCALE:
- *val = 0;
switch (chan->type) {
case IIO_TEMP:
- *val2 = 500000;
- return IIO_VAL_INT_PLUS_MICRO;
+ *val = 500;
+ return IIO_VAL_INT;
case IIO_ANGL_VEL:
{
int i;
@@ -594,6 +593,7 @@ static int bmg160_read_raw(struct iio_dev *indio_dev,
for (i = 0; i < ARRAY_SIZE(bmg160_scale_table); ++i) {
if (bmg160_scale_table[i].dps_range ==
data->dps_range) {
+ *val = 0;
*val2 = bmg160_scale_table[i].scale;
return IIO_VAL_INT_PLUS_MICRO;
}
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
index 77fac81a3adc..5ddebede31a6 100644
--- a/drivers/iio/gyro/mpu3050-core.c
+++ b/drivers/iio/gyro/mpu3050-core.c
@@ -29,7 +29,8 @@
#include "mpu3050.h"
-#define MPU3050_CHIP_ID 0x69
+#define MPU3050_CHIP_ID 0x68
+#define MPU3050_CHIP_ID_MASK 0x7E
/*
* Register map: anything suffixed *_H is a big-endian high byte and always
@@ -1176,8 +1177,9 @@ int mpu3050_common_probe(struct device *dev,
goto err_power_down;
}
- if (val != MPU3050_CHIP_ID) {
- dev_err(dev, "unsupported chip id %02x\n", (u8)val);
+ if ((val & MPU3050_CHIP_ID_MASK) != MPU3050_CHIP_ID) {
+ dev_err(dev, "unsupported chip id %02x\n",
+ (u8)(val & MPU3050_CHIP_ID_MASK));
ret = -ENODEV;
goto err_power_down;
}
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index cd5bfe39591b..dadd921a4a30 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -320,9 +320,8 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev,
const unsigned long *mask;
unsigned long *trialmask;
- trialmask = kmalloc_array(BITS_TO_LONGS(indio_dev->masklength),
- sizeof(*trialmask),
- GFP_KERNEL);
+ trialmask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
+ sizeof(*trialmask), GFP_KERNEL);
if (trialmask == NULL)
return -ENOMEM;
if (!indio_dev->masklength) {
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 4700fd5d8c90..9c4d92115504 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -1743,10 +1743,10 @@ EXPORT_SYMBOL(__iio_device_register);
**/
void iio_device_unregister(struct iio_dev *indio_dev)
{
- mutex_lock(&indio_dev->info_exist_lock);
-
cdev_device_del(&indio_dev->chrdev, &indio_dev->dev);
+ mutex_lock(&indio_dev->info_exist_lock);
+
iio_device_unregister_debugfs(indio_dev);
iio_disable_all_buffers(indio_dev);
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 0a3ec7c726ec..a1fb840de45d 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -89,6 +89,7 @@ config INFINIBAND_ADDR_TRANS_CONFIGFS
This allows the user to config the default GID type that the CM
uses for each device, when initiaing new connections.
+if INFINIBAND_USER_ACCESS || !INFINIBAND_USER_ACCESS
source "drivers/infiniband/hw/mthca/Kconfig"
source "drivers/infiniband/hw/qib/Kconfig"
source "drivers/infiniband/hw/cxgb3/Kconfig"
@@ -101,6 +102,12 @@ source "drivers/infiniband/hw/ocrdma/Kconfig"
source "drivers/infiniband/hw/vmw_pvrdma/Kconfig"
source "drivers/infiniband/hw/usnic/Kconfig"
source "drivers/infiniband/hw/hns/Kconfig"
+source "drivers/infiniband/hw/bnxt_re/Kconfig"
+source "drivers/infiniband/hw/hfi1/Kconfig"
+source "drivers/infiniband/hw/qedr/Kconfig"
+source "drivers/infiniband/sw/rdmavt/Kconfig"
+source "drivers/infiniband/sw/rxe/Kconfig"
+endif
source "drivers/infiniband/ulp/ipoib/Kconfig"
@@ -111,13 +118,5 @@ source "drivers/infiniband/ulp/iser/Kconfig"
source "drivers/infiniband/ulp/isert/Kconfig"
source "drivers/infiniband/ulp/opa_vnic/Kconfig"
-source "drivers/infiniband/sw/rdmavt/Kconfig"
-source "drivers/infiniband/sw/rxe/Kconfig"
-
-source "drivers/infiniband/hw/hfi1/Kconfig"
-
-source "drivers/infiniband/hw/qedr/Kconfig"
-
-source "drivers/infiniband/hw/bnxt_re/Kconfig"
endif # INFINIBAND
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 69dee36e0e89..313f2349b518 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -15,8 +15,6 @@ ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
nldev.o restrack.o
ib_core-$(CONFIG_SECURITY_INFINIBAND) += security.o
-ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
-ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o
ib_core-$(CONFIG_CGROUP_RDMA) += cgroup.o
ib_cm-y := cm.o
@@ -39,3 +37,5 @@ ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_marshall.o \
uverbs_std_types_flow_action.o uverbs_std_types_dm.o \
uverbs_std_types_mr.o uverbs_std_types_counters.o \
uverbs_uapi.o uverbs_std_types_device.o
+ib_uverbs-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
+ib_uverbs-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 7b04590f307f..43c67e5f43c6 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -185,7 +185,7 @@ EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u8 port)
{
- return device->cache.ports[port - rdma_start_port(device)].gid;
+ return device->port_data[port].cache.gid;
}
static bool is_gid_entry_free(const struct ib_gid_table_entry *entry)
@@ -547,21 +547,19 @@ int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
unsigned long mask;
int ret;
- if (ib_dev->ops.get_netdev) {
- idev = ib_dev->ops.get_netdev(ib_dev, port);
- if (idev && attr->ndev != idev) {
- union ib_gid default_gid;
+ idev = ib_device_get_netdev(ib_dev, port);
+ if (idev && attr->ndev != idev) {
+ union ib_gid default_gid;
- /* Adding default GIDs in not permitted */
- make_default_gid(idev, &default_gid);
- if (!memcmp(gid, &default_gid, sizeof(*gid))) {
- dev_put(idev);
- return -EPERM;
- }
- }
- if (idev)
+ /* Adding default GIDs is not permitted */
+ make_default_gid(idev, &default_gid);
+ if (!memcmp(gid, &default_gid, sizeof(*gid))) {
dev_put(idev);
+ return -EPERM;
+ }
}
+ if (idev)
+ dev_put(idev);
mask = GID_ATTR_FIND_MASK_GID |
GID_ATTR_FIND_MASK_GID_TYPE |
@@ -765,7 +763,7 @@ err_free_table:
return NULL;
}
-static void release_gid_table(struct ib_device *device, u8 port,
+static void release_gid_table(struct ib_device *device,
struct ib_gid_table *table)
{
bool leak = false;
@@ -863,31 +861,27 @@ static void gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
static void gid_table_release_one(struct ib_device *ib_dev)
{
- struct ib_gid_table *table;
- u8 port;
+ unsigned int p;
- for (port = 0; port < ib_dev->phys_port_cnt; port++) {
- table = ib_dev->cache.ports[port].gid;
- release_gid_table(ib_dev, port, table);
- ib_dev->cache.ports[port].gid = NULL;
+ rdma_for_each_port (ib_dev, p) {
+ release_gid_table(ib_dev, ib_dev->port_data[p].cache.gid);
+ ib_dev->port_data[p].cache.gid = NULL;
}
}
static int _gid_table_setup_one(struct ib_device *ib_dev)
{
- u8 port;
struct ib_gid_table *table;
+ unsigned int rdma_port;
- for (port = 0; port < ib_dev->phys_port_cnt; port++) {
- u8 rdma_port = port + rdma_start_port(ib_dev);
-
- table = alloc_gid_table(
- ib_dev->port_immutable[rdma_port].gid_tbl_len);
+ rdma_for_each_port (ib_dev, rdma_port) {
+ table = alloc_gid_table(
+ ib_dev->port_data[rdma_port].immutable.gid_tbl_len);
if (!table)
goto rollback_table_setup;
gid_table_reserve_default(ib_dev, rdma_port, table);
- ib_dev->cache.ports[port].gid = table;
+ ib_dev->port_data[rdma_port].cache.gid = table;
}
return 0;
@@ -898,14 +892,11 @@ rollback_table_setup:
static void gid_table_cleanup_one(struct ib_device *ib_dev)
{
- struct ib_gid_table *table;
- u8 port;
+ unsigned int p;
- for (port = 0; port < ib_dev->phys_port_cnt; port++) {
- table = ib_dev->cache.ports[port].gid;
- cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
- table);
- }
+ rdma_for_each_port (ib_dev, p)
+ cleanup_gid_table_port(ib_dev, p,
+ ib_dev->port_data[p].cache.gid);
}
static int gid_table_setup_one(struct ib_device *ib_dev)
@@ -983,17 +974,17 @@ const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
unsigned long mask = GID_ATTR_FIND_MASK_GID |
GID_ATTR_FIND_MASK_GID_TYPE;
struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
- u8 p;
+ unsigned int p;
if (ndev)
mask |= GID_ATTR_FIND_MASK_NETDEV;
- for (p = 0; p < device->phys_port_cnt; p++) {
+ rdma_for_each_port(device, p) {
struct ib_gid_table *table;
unsigned long flags;
int index;
- table = device->cache.ports[p].gid;
+ table = device->port_data[p].cache.gid;
read_lock_irqsave(&table->rwlock, flags);
index = find_gid(table, gid, &gid_attr_val, false, mask, NULL);
if (index >= 0) {
@@ -1025,7 +1016,7 @@ int ib_get_cached_pkey(struct ib_device *device,
read_lock_irqsave(&device->cache.lock, flags);
- cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
+ cache = device->port_data[port_num].cache.pkey;
if (index < 0 || index >= cache->table_len)
ret = -EINVAL;
@@ -1043,14 +1034,12 @@ int ib_get_cached_subnet_prefix(struct ib_device *device,
u64 *sn_pfx)
{
unsigned long flags;
- int p;
if (!rdma_is_port_valid(device, port_num))
return -EINVAL;
- p = port_num - rdma_start_port(device);
read_lock_irqsave(&device->cache.lock, flags);
- *sn_pfx = device->cache.ports[p].subnet_prefix;
+ *sn_pfx = device->port_data[port_num].cache.subnet_prefix;
read_unlock_irqrestore(&device->cache.lock, flags);
return 0;
@@ -1073,7 +1062,7 @@ int ib_find_cached_pkey(struct ib_device *device,
read_lock_irqsave(&device->cache.lock, flags);
- cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
+ cache = device->port_data[port_num].cache.pkey;
*index = -1;
@@ -1113,7 +1102,7 @@ int ib_find_exact_cached_pkey(struct ib_device *device,
read_lock_irqsave(&device->cache.lock, flags);
- cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
+ cache = device->port_data[port_num].cache.pkey;
*index = -1;
@@ -1141,7 +1130,7 @@ int ib_get_cached_lmc(struct ib_device *device,
return -EINVAL;
read_lock_irqsave(&device->cache.lock, flags);
- *lmc = device->cache.ports[port_num - rdma_start_port(device)].lmc;
+ *lmc = device->port_data[port_num].cache.lmc;
read_unlock_irqrestore(&device->cache.lock, flags);
return ret;
@@ -1159,8 +1148,7 @@ int ib_get_cached_port_state(struct ib_device *device,
return -EINVAL;
read_lock_irqsave(&device->cache.lock, flags);
- *port_state = device->cache.ports[port_num
- - rdma_start_port(device)].port_state;
+ *port_state = device->port_data[port_num].cache.port_state;
read_unlock_irqrestore(&device->cache.lock, flags);
return ret;
@@ -1361,16 +1349,13 @@ static void ib_cache_update(struct ib_device *device,
write_lock_irq(&device->cache.lock);
- old_pkey_cache = device->cache.ports[port -
- rdma_start_port(device)].pkey;
+ old_pkey_cache = device->port_data[port].cache.pkey;
- device->cache.ports[port - rdma_start_port(device)].pkey = pkey_cache;
- device->cache.ports[port - rdma_start_port(device)].lmc = tprops->lmc;
- device->cache.ports[port - rdma_start_port(device)].port_state =
- tprops->state;
+ device->port_data[port].cache.pkey = pkey_cache;
+ device->port_data[port].cache.lmc = tprops->lmc;
+ device->port_data[port].cache.port_state = tprops->state;
- device->cache.ports[port - rdma_start_port(device)].subnet_prefix =
- tprops->subnet_prefix;
+ device->port_data[port].cache.subnet_prefix = tprops->subnet_prefix;
write_unlock_irq(&device->cache.lock);
if (enforce_security)
@@ -1428,27 +1413,17 @@ static void ib_cache_event(struct ib_event_handler *handler,
int ib_cache_setup_one(struct ib_device *device)
{
- int p;
+ unsigned int p;
int err;
rwlock_init(&device->cache.lock);
- device->cache.ports =
- kcalloc(rdma_end_port(device) - rdma_start_port(device) + 1,
- sizeof(*device->cache.ports),
- GFP_KERNEL);
- if (!device->cache.ports)
- return -ENOMEM;
-
err = gid_table_setup_one(device);
- if (err) {
- kfree(device->cache.ports);
- device->cache.ports = NULL;
+ if (err)
return err;
- }
- for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
- ib_cache_update(device, p + rdma_start_port(device), true);
+ rdma_for_each_port (device, p)
+ ib_cache_update(device, p, true);
INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
device, ib_cache_event);
@@ -1458,7 +1433,7 @@ int ib_cache_setup_one(struct ib_device *device)
void ib_cache_release_one(struct ib_device *device)
{
- int p;
+ unsigned int p;
/*
* The release function frees all the cache elements.
@@ -1466,11 +1441,10 @@ void ib_cache_release_one(struct ib_device *device)
* all the device's resources when the cache could no
* longer be accessed.
*/
- for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
- kfree(device->cache.ports[p].pkey);
+ rdma_for_each_port (device, p)
+ kfree(device->port_data[p].cache.pkey);
gid_table_release_one(device);
- kfree(device->cache.ports);
}
void ib_cache_cleanup_one(struct ib_device *device)
diff --git a/drivers/infiniband/core/cgroup.c b/drivers/infiniband/core/cgroup.c
index 126ac5f99db7..388fd04e5f63 100644
--- a/drivers/infiniband/core/cgroup.c
+++ b/drivers/infiniband/core/cgroup.c
@@ -21,12 +21,11 @@
* Register with the rdma cgroup. Should be called before
* exposing rdma device to user space applications to avoid
* resource accounting leak.
- * Returns 0 on success or otherwise failure code.
*/
-int ib_device_register_rdmacg(struct ib_device *device)
+void ib_device_register_rdmacg(struct ib_device *device)
{
device->cg_device.name = device->name;
- return rdmacg_register_device(&device->cg_device);
+ rdmacg_register_device(&device->cg_device);
}
/**
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 37980c7564c0..b9416a6fca36 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -4052,8 +4052,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
atomic_long_inc(&port->counter_group[CM_RECV].
counter[attr_id - CM_ATTR_ID_OFFSET]);
- work = kmalloc(sizeof(*work) + sizeof(struct sa_path_rec) * paths,
- GFP_KERNEL);
+ work = kmalloc(struct_size(work, path, paths), GFP_KERNEL);
if (!work) {
ib_free_recv_mad(mad_recv_wc);
return;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 84f077b2b90a..68c997be2429 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -659,7 +659,7 @@ static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv)
struct cma_device *cma_dev;
enum ib_gid_type gid_type;
int ret = -ENODEV;
- u8 port;
+ unsigned int port;
if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
id_priv->id.ps == RDMA_PS_IPOIB)
@@ -673,8 +673,7 @@ static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv)
mutex_lock(&lock);
list_for_each_entry(cma_dev, &dev_list, list) {
- for (port = rdma_start_port(cma_dev->device);
- port <= rdma_end_port(cma_dev->device); port++) {
+ rdma_for_each_port (cma_dev->device, port) {
gidp = rdma_protocol_roce(cma_dev->device, port) ?
&iboe_gid : &gid;
gid_type = cma_dev->default_gid_type[port - 1];
@@ -888,6 +887,7 @@ struct rdma_cm_id *__rdma_create_id(struct net *net,
id_priv->id.ps = ps;
id_priv->id.qp_type = qp_type;
id_priv->tos_set = false;
+ id_priv->timeout_set = false;
id_priv->gid_type = IB_GID_TYPE_IB;
spin_lock_init(&id_priv->lock);
mutex_init(&id_priv->qp_mutex);
@@ -1130,6 +1130,9 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
} else
ret = -ENOSYS;
+ if ((*qp_attr_mask & IB_QP_TIMEOUT) && id_priv->timeout_set)
+ qp_attr->timeout = id_priv->timeout;
+
return ret;
}
EXPORT_SYMBOL(rdma_init_qp_attr);
@@ -2410,6 +2413,7 @@ static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
return PTR_ERR(id);
id->tos = id_priv->tos;
+ id->tos_set = id_priv->tos_set;
id_priv->cm_id.iw = id;
memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv),
@@ -2462,6 +2466,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
atomic_inc(&id_priv->refcount);
dev_id_priv->internal_id = 1;
dev_id_priv->afonly = id_priv->afonly;
+ dev_id_priv->tos_set = id_priv->tos_set;
+ dev_id_priv->tos = id_priv->tos;
ret = rdma_listen(id, id_priv->backlog);
if (ret)
@@ -2490,6 +2496,34 @@ void rdma_set_service_type(struct rdma_cm_id *id, int tos)
}
EXPORT_SYMBOL(rdma_set_service_type);
+/**
+ * rdma_set_ack_timeout() - Set the ack timeout of QP associated
+ * with a connection identifier.
+ * @id: Communication identifier to associated with service type.
+ * @timeout: Ack timeout to set a QP, expressed as 4.096 * 2^(timeout) usec.
+ *
+ * This function should be called before rdma_connect() on active side,
+ * and on passive side before rdma_accept(). It is applicable to primary
+ * path only. The timeout will affect the local side of the QP, it is not
+ * negotiated with remote side and zero disables the timer.
+ *
+ * Return: 0 for success
+ */
+int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout)
+{
+ struct rdma_id_private *id_priv;
+
+ if (id->qp_type != IB_QPT_RC)
+ return -EINVAL;
+
+ id_priv = container_of(id, struct rdma_id_private, id);
+ id_priv->timeout = timeout;
+ id_priv->timeout_set = true;
+
+ return 0;
+}
+EXPORT_SYMBOL(rdma_set_ack_timeout);
+
static void cma_query_handler(int status, struct sa_path_rec *path_rec,
void *context)
{
@@ -2966,13 +3000,22 @@ static void addr_handler(int status, struct sockaddr *src_addr,
{
struct rdma_id_private *id_priv = context;
struct rdma_cm_event event = {};
+ struct sockaddr *addr;
+ struct sockaddr_storage old_addr;
mutex_lock(&id_priv->handler_mutex);
if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY,
RDMA_CM_ADDR_RESOLVED))
goto out;
- memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr));
+ /*
+ * Store the previous src address, so that if we fail to acquire
+ * matching rdma device, old address can be restored back, which helps
+ * to cancel the cma listen operation correctly.
+ */
+ addr = cma_src_addr(id_priv);
+ memcpy(&old_addr, addr, rdma_addr_size(addr));
+ memcpy(addr, src_addr, rdma_addr_size(src_addr));
if (!status && !id_priv->cma_dev) {
status = cma_acquire_dev_by_src_ip(id_priv);
if (status)
@@ -2983,6 +3026,8 @@ static void addr_handler(int status, struct sockaddr *src_addr,
}
if (status) {
+ memcpy(addr, &old_addr,
+ rdma_addr_size((struct sockaddr *)&old_addr));
if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
RDMA_CM_ADDR_BOUND))
goto out;
@@ -3798,6 +3843,7 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
return PTR_ERR(cm_id);
cm_id->tos = id_priv->tos;
+ cm_id->tos_set = id_priv->tos_set;
id_priv->cm_id.iw = cm_id;
memcpy(&cm_id->local_addr, cma_src_addr(id_priv),
@@ -4501,7 +4547,7 @@ static void cma_add_one(struct ib_device *device)
if (!cma_dev->default_roce_tos)
goto free_gid_type;
- for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
+ rdma_for_each_port (device, i) {
supported_gids = roce_gid_type_mask_support(device, i);
WARN_ON(!supported_gids);
if (supported_gids & (1 << CMA_PREFERRED_ROCE_GID_TYPE))
@@ -4605,85 +4651,6 @@ static void cma_remove_one(struct ib_device *device, void *client_data)
kfree(cma_dev);
}
-static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
-{
- struct nlmsghdr *nlh;
- struct rdma_cm_id_stats *id_stats;
- struct rdma_id_private *id_priv;
- struct rdma_cm_id *id = NULL;
- struct cma_device *cma_dev;
- int i_dev = 0, i_id = 0;
-
- /*
- * We export all of the IDs as a sequence of messages. Each
- * ID gets its own netlink message.
- */
- mutex_lock(&lock);
-
- list_for_each_entry(cma_dev, &dev_list, list) {
- if (i_dev < cb->args[0]) {
- i_dev++;
- continue;
- }
-
- i_id = 0;
- list_for_each_entry(id_priv, &cma_dev->id_list, list) {
- if (i_id < cb->args[1]) {
- i_id++;
- continue;
- }
-
- id_stats = ibnl_put_msg(skb, &nlh, cb->nlh->nlmsg_seq,
- sizeof *id_stats, RDMA_NL_RDMA_CM,
- RDMA_NL_RDMA_CM_ID_STATS,
- NLM_F_MULTI);
- if (!id_stats)
- goto out;
-
- memset(id_stats, 0, sizeof *id_stats);
- id = &id_priv->id;
- id_stats->node_type = id->route.addr.dev_addr.dev_type;
- id_stats->port_num = id->port_num;
- id_stats->bound_dev_if =
- id->route.addr.dev_addr.bound_dev_if;
-
- if (ibnl_put_attr(skb, nlh,
- rdma_addr_size(cma_src_addr(id_priv)),
- cma_src_addr(id_priv),
- RDMA_NL_RDMA_CM_ATTR_SRC_ADDR))
- goto out;
- if (ibnl_put_attr(skb, nlh,
- rdma_addr_size(cma_dst_addr(id_priv)),
- cma_dst_addr(id_priv),
- RDMA_NL_RDMA_CM_ATTR_DST_ADDR))
- goto out;
-
- id_stats->pid = task_pid_vnr(id_priv->res.task);
- id_stats->port_space = id->ps;
- id_stats->cm_state = id_priv->state;
- id_stats->qp_num = id_priv->qp_num;
- id_stats->qp_type = id->qp_type;
-
- i_id++;
- nlmsg_end(skb, nlh);
- }
-
- cb->args[1] = 0;
- i_dev++;
- }
-
-out:
- mutex_unlock(&lock);
- cb->args[0] = i_dev;
- cb->args[1] = i_id;
-
- return skb->len;
-}
-
-static const struct rdma_nl_cbs cma_cb_table[RDMA_NL_RDMA_CM_NUM_OPS] = {
- [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats},
-};
-
static int cma_init_net(struct net *net)
{
struct cma_pernet *pernet = cma_pernet(net);
@@ -4732,7 +4699,6 @@ static int __init cma_init(void)
if (ret)
goto err;
- rdma_nl_register(RDMA_NL_RDMA_CM, cma_cb_table);
cma_configfs_init();
return 0;
@@ -4748,7 +4714,6 @@ err_wq:
static void __exit cma_cleanup(void)
{
cma_configfs_exit();
- rdma_nl_unregister(RDMA_NL_RDMA_CM);
ib_unregister_client(&cma_client);
unregister_netdevice_notifier(&cma_nb);
ib_sa_unregister_client(&sa_client);
@@ -4756,7 +4721,5 @@ static void __exit cma_cleanup(void)
destroy_workqueue(cma_wq);
}
-MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_RDMA_CM, 1);
-
module_init(cma_init);
module_exit(cma_cleanup);
diff --git a/drivers/infiniband/core/cma_priv.h b/drivers/infiniband/core/cma_priv.h
index cf47c69436a7..ca7307277518 100644
--- a/drivers/infiniband/core/cma_priv.h
+++ b/drivers/infiniband/core/cma_priv.h
@@ -84,9 +84,11 @@ struct rdma_id_private {
u32 options;
u8 srq;
u8 tos;
- bool tos_set;
+ u8 tos_set:1;
+ u8 timeout_set:1;
u8 reuseaddr;
u8 afonly;
+ u8 timeout;
enum ib_gid_type gid_type;
/*
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 616734313f0c..08c690249594 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -54,9 +54,9 @@ struct pkey_index_qp_list {
struct list_head qp_list;
};
-int ib_device_register_sysfs(struct ib_device *device,
- int (*port_callback)(struct ib_device *,
- u8, struct kobject *));
+extern const struct attribute_group ib_dev_attr_group;
+
+int ib_device_register_sysfs(struct ib_device *device);
void ib_device_unregister_sysfs(struct ib_device *device);
int ib_device_rename(struct ib_device *ibdev, const char *name);
@@ -66,6 +66,9 @@ typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port,
typedef bool (*roce_netdev_filter)(struct ib_device *device, u8 port,
struct net_device *idev, void *cookie);
+struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
+ unsigned int port);
+
void ib_enum_roce_netdev(struct ib_device *ib_dev,
roce_netdev_filter filter,
void *filter_cookie,
@@ -117,7 +120,7 @@ void ib_cache_cleanup_one(struct ib_device *device);
void ib_cache_release_one(struct ib_device *device);
#ifdef CONFIG_CGROUP_RDMA
-int ib_device_register_rdmacg(struct ib_device *device);
+void ib_device_register_rdmacg(struct ib_device *device);
void ib_device_unregister_rdmacg(struct ib_device *device);
int ib_rdmacg_try_charge(struct ib_rdmacg_object *cg_obj,
@@ -128,21 +131,26 @@ void ib_rdmacg_uncharge(struct ib_rdmacg_object *cg_obj,
struct ib_device *device,
enum rdmacg_resource_type resource_index);
#else
-static inline int ib_device_register_rdmacg(struct ib_device *device)
-{ return 0; }
+static inline void ib_device_register_rdmacg(struct ib_device *device)
+{
+}
static inline void ib_device_unregister_rdmacg(struct ib_device *device)
-{ }
+{
+}
static inline int ib_rdmacg_try_charge(struct ib_rdmacg_object *cg_obj,
struct ib_device *device,
enum rdmacg_resource_type resource_index)
-{ return 0; }
+{
+ return 0;
+}
static inline void ib_rdmacg_uncharge(struct ib_rdmacg_object *cg_obj,
struct ib_device *device,
enum rdmacg_resource_type resource_index)
-{ }
+{
+}
#endif
static inline bool rdma_is_upper_dev_rcu(struct net_device *dev,
@@ -178,7 +186,7 @@ int ib_get_cached_subnet_prefix(struct ib_device *device,
u64 *sn_pfx);
#ifdef CONFIG_SECURITY_INFINIBAND
-void ib_security_destroy_port_pkey_list(struct ib_device *device);
+void ib_security_release_port_pkey_list(struct ib_device *device);
void ib_security_cache_change(struct ib_device *device,
u8 port_num,
@@ -199,8 +207,9 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
enum ib_qp_type qp_type);
void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent);
int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index);
+void ib_mad_agent_security_change(void);
#else
-static inline void ib_security_destroy_port_pkey_list(struct ib_device *device)
+static inline void ib_security_release_port_pkey_list(struct ib_device *device)
{
}
@@ -264,6 +273,10 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map,
{
return 0;
}
+
+static inline void ib_mad_agent_security_change(void)
+{
+}
#endif
struct ib_device *ib_device_get_by_index(u32 ifindex);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 238ec42778ef..7421ec4883fb 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -37,54 +37,111 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
-#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/security.h>
#include <linux/notifier.h>
+#include <linux/hashtable.h>
#include <rdma/rdma_netlink.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
#include "core_priv.h"
+#include "restrack.h"
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("core kernel InfiniBand API");
MODULE_LICENSE("Dual BSD/GPL");
-struct ib_client_data {
- struct list_head list;
- struct ib_client *client;
- void * data;
- /* The device or client is going down. Do not call client or device
- * callbacks other than remove(). */
- bool going_down;
-};
-
struct workqueue_struct *ib_comp_wq;
struct workqueue_struct *ib_comp_unbound_wq;
struct workqueue_struct *ib_wq;
EXPORT_SYMBOL_GPL(ib_wq);
-/* The device_list and client_list contain devices and clients after their
- * registration has completed, and the devices and clients are removed
- * during unregistration. */
-static LIST_HEAD(device_list);
-static LIST_HEAD(client_list);
+/*
+ * Each of the three rwsem locks (devices, clients, client_data) protects the
+ * xarray of the same name. Specifically it allows the caller to assert that
+ * the MARK will/will not be changing under the lock, and for devices and
+ * clients, that the value in the xarray is still a valid pointer. Change of
+ * the MARK is linked to the object state, so holding the lock and testing the
+ * MARK also asserts that the contained object is in a certain state.
+ *
+ * This is used to build a two stage register/unregister flow where objects
+ * can continue to be in the xarray even though they are still in progress to
+ * register/unregister.
+ *
+ * The xarray itself provides additional locking, and restartable iteration,
+ * which is also relied on.
+ *
+ * Locks should not be nested, with the exception of client_data, which is
+ * allowed to nest under the read side of the other two locks.
+ *
+ * The devices_rwsem also protects the device name list, any change or
+ * assignment of device name must also hold the write side to guarantee unique
+ * names.
+ */
/*
- * device_mutex and lists_rwsem protect access to both device_list and
- * client_list. device_mutex protects writer access by device and client
- * registration / de-registration. lists_rwsem protects reader access to
- * these lists. Iterators of these lists must lock it for read, while updates
- * to the lists must be done with a write lock. A special case is when the
- * device_mutex is locked. In this case locking the lists for read access is
- * not necessary as the device_mutex implies it.
+ * devices contains devices that have had their names assigned. The
+ * devices may not be registered. Users that care about the registration
+ * status need to call ib_device_try_get() on the device to ensure it is
+ * registered, and keep it registered, for the required duration.
*
- * lists_rwsem also protects access to the client data list.
*/
-static DEFINE_MUTEX(device_mutex);
-static DECLARE_RWSEM(lists_rwsem);
+static DEFINE_XARRAY_FLAGS(devices, XA_FLAGS_ALLOC);
+static DECLARE_RWSEM(devices_rwsem);
+#define DEVICE_REGISTERED XA_MARK_1
+static LIST_HEAD(client_list);
+#define CLIENT_REGISTERED XA_MARK_1
+static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC);
+static DECLARE_RWSEM(clients_rwsem);
+
+/*
+ * If client_data is registered then the corresponding client must also still
+ * be registered.
+ */
+#define CLIENT_DATA_REGISTERED XA_MARK_1
+/*
+ * xarray has this behavior where it won't iterate over NULL values stored in
+ * allocated arrays. So we need our own iterator to see all values stored in
+ * the array. This does the same thing as xa_for_each except that it also
+ * returns NULL valued entries if the array is allocating. Simplified to only
+ * work on simple xarrays.
+ */
+static void *xan_find_marked(struct xarray *xa, unsigned long *indexp,
+ xa_mark_t filter)
+{
+ XA_STATE(xas, xa, *indexp);
+ void *entry;
+
+ rcu_read_lock();
+ do {
+ entry = xas_find_marked(&xas, ULONG_MAX, filter);
+ if (xa_is_zero(entry))
+ break;
+ } while (xas_retry(&xas, entry));
+ rcu_read_unlock();
+
+ if (entry) {
+ *indexp = xas.xa_index;
+ if (xa_is_zero(entry))
+ return NULL;
+ return entry;
+ }
+ return XA_ERROR(-ENOENT);
+}
+#define xan_for_each_marked(xa, index, entry, filter) \
+ for (index = 0, entry = xan_find_marked(xa, &(index), filter); \
+ !xa_is_err(entry); \
+ (index)++, entry = xan_find_marked(xa, &(index), filter))
+
+/* RCU hash table mapping netdevice pointers to struct ib_port_data */
+static DEFINE_SPINLOCK(ndev_hash_lock);
+static DECLARE_HASHTABLE(ndev_hash, 5);
+
+static void free_netdevs(struct ib_device *ib_dev);
+static void ib_unregister_work(struct work_struct *work);
+static void __ib_unregister_device(struct ib_device *device);
static int ib_security_change(struct notifier_block *nb, unsigned long event,
void *lsm_data);
static void ib_policy_change_task(struct work_struct *work);
@@ -94,6 +151,12 @@ static struct notifier_block ibdev_lsm_nb = {
.notifier_call = ib_security_change,
};
+/* Pointer to the RCU head at the start of the ib_port_data array */
+struct ib_port_data_rcu {
+ struct rcu_head rcu_head;
+ struct ib_port_data pdata[];
+};
+
static int ib_device_check_mandatory(struct ib_device *device)
{
#define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device_ops, x), #x }
@@ -121,30 +184,18 @@ static int ib_device_check_mandatory(struct ib_device *device)
};
int i;
+ device->kverbs_provider = true;
for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
if (!*(void **) ((void *) &device->ops +
mandatory_table[i].offset)) {
- dev_warn(&device->dev,
- "Device is missing mandatory function %s\n",
- mandatory_table[i].name);
- return -EINVAL;
+ device->kverbs_provider = false;
+ break;
}
}
return 0;
}
-static struct ib_device *__ib_device_get_by_index(u32 index)
-{
- struct ib_device *device;
-
- list_for_each_entry(device, &device_list, core_list)
- if (device->index == index)
- return device;
-
- return NULL;
-}
-
/*
* Caller must perform ib_device_put() to return the device reference count
* when ib_device_get_by_index() returns valid device pointer.
@@ -153,13 +204,13 @@ struct ib_device *ib_device_get_by_index(u32 index)
{
struct ib_device *device;
- down_read(&lists_rwsem);
- device = __ib_device_get_by_index(index);
+ down_read(&devices_rwsem);
+ device = xa_load(&devices, index);
if (device) {
if (!ib_device_try_get(device))
device = NULL;
}
- up_read(&lists_rwsem);
+ up_read(&devices_rwsem);
return device;
}
@@ -180,28 +231,56 @@ EXPORT_SYMBOL(ib_device_put);
static struct ib_device *__ib_device_get_by_name(const char *name)
{
struct ib_device *device;
+ unsigned long index;
- list_for_each_entry(device, &device_list, core_list)
+ xa_for_each (&devices, index, device)
if (!strcmp(name, dev_name(&device->dev)))
return device;
return NULL;
}
-int ib_device_rename(struct ib_device *ibdev, const char *name)
+/**
+ * ib_device_get_by_name - Find an IB device by name
+ * @name: The name to look for
+ * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all)
+ *
+ * Find and hold an ib_device by its name. The caller must call
+ * ib_device_put() on the returned pointer.
+ */
+struct ib_device *ib_device_get_by_name(const char *name,
+ enum rdma_driver_id driver_id)
{
struct ib_device *device;
- int ret = 0;
- if (!strcmp(name, dev_name(&ibdev->dev)))
- return ret;
+ down_read(&devices_rwsem);
+ device = __ib_device_get_by_name(name);
+ if (device && driver_id != RDMA_DRIVER_UNKNOWN &&
+ device->driver_id != driver_id)
+ device = NULL;
- mutex_lock(&device_mutex);
- list_for_each_entry(device, &device_list, core_list) {
- if (!strcmp(name, dev_name(&device->dev))) {
- ret = -EEXIST;
- goto out;
- }
+ if (device) {
+ if (!ib_device_try_get(device))
+ device = NULL;
+ }
+ up_read(&devices_rwsem);
+ return device;
+}
+EXPORT_SYMBOL(ib_device_get_by_name);
+
+int ib_device_rename(struct ib_device *ibdev, const char *name)
+{
+ int ret;
+
+ down_write(&devices_rwsem);
+ if (!strcmp(name, dev_name(&ibdev->dev))) {
+ ret = 0;
+ goto out;
+ }
+
+ if (__ib_device_get_by_name(name)) {
+ ret = -EEXIST;
+ goto out;
}
ret = device_rename(&ibdev->dev, name);
@@ -209,53 +288,60 @@ int ib_device_rename(struct ib_device *ibdev, const char *name)
goto out;
strlcpy(ibdev->name, name, IB_DEVICE_NAME_MAX);
out:
- mutex_unlock(&device_mutex);
+ up_write(&devices_rwsem);
return ret;
}
static int alloc_name(struct ib_device *ibdev, const char *name)
{
- unsigned long *inuse;
struct ib_device *device;
+ unsigned long index;
+ struct ida inuse;
+ int rc;
int i;
- inuse = (unsigned long *) get_zeroed_page(GFP_KERNEL);
- if (!inuse)
- return -ENOMEM;
-
- list_for_each_entry(device, &device_list, core_list) {
+ lockdep_assert_held_exclusive(&devices_rwsem);
+ ida_init(&inuse);
+ xa_for_each (&devices, index, device) {
char buf[IB_DEVICE_NAME_MAX];
if (sscanf(dev_name(&device->dev), name, &i) != 1)
continue;
- if (i < 0 || i >= PAGE_SIZE * 8)
+ if (i < 0 || i >= INT_MAX)
continue;
snprintf(buf, sizeof buf, name, i);
- if (!strcmp(buf, dev_name(&device->dev)))
- set_bit(i, inuse);
+ if (strcmp(buf, dev_name(&device->dev)) != 0)
+ continue;
+
+ rc = ida_alloc_range(&inuse, i, i, GFP_KERNEL);
+ if (rc < 0)
+ goto out;
}
- i = find_first_zero_bit(inuse, PAGE_SIZE * 8);
- free_page((unsigned long) inuse);
+ rc = ida_alloc(&inuse, GFP_KERNEL);
+ if (rc < 0)
+ goto out;
- return dev_set_name(&ibdev->dev, name, i);
+ rc = dev_set_name(&ibdev->dev, name, rc);
+out:
+ ida_destroy(&inuse);
+ return rc;
}
static void ib_device_release(struct device *device)
{
struct ib_device *dev = container_of(device, struct ib_device, dev);
- WARN_ON(dev->reg_state == IB_DEV_REGISTERED);
- if (dev->reg_state == IB_DEV_UNREGISTERED) {
- /*
- * In IB_DEV_UNINITIALIZED state, cache or port table
- * is not even created. Free cache and port table only when
- * device reaches UNREGISTERED state.
- */
- ib_cache_release_one(dev);
- kfree(dev->port_immutable);
- }
- kfree(dev);
+ free_netdevs(dev);
+ WARN_ON(refcount_read(&dev->refcount));
+ ib_cache_release_one(dev);
+ ib_security_release_port_pkey_list(dev);
+ xa_destroy(&dev->client_data);
+ if (dev->port_data)
+ kfree_rcu(container_of(dev->port_data, struct ib_port_data_rcu,
+ pdata[0]),
+ rcu_head);
+ kfree_rcu(dev, rcu_head);
}
static int ib_device_uevent(struct device *device,
@@ -278,7 +364,7 @@ static struct class ib_class = {
};
/**
- * ib_alloc_device - allocate an IB device struct
+ * _ib_alloc_device - allocate an IB device struct
* @size:size of structure to allocate
*
* Low-level drivers should use ib_alloc_device() to allocate &struct
@@ -287,7 +373,7 @@ static struct class ib_class = {
* ib_dealloc_device() must be used to free structures allocated with
* ib_alloc_device().
*/
-struct ib_device *ib_alloc_device(size_t size)
+struct ib_device *_ib_alloc_device(size_t size)
{
struct ib_device *device;
@@ -298,23 +384,32 @@ struct ib_device *ib_alloc_device(size_t size)
if (!device)
return NULL;
- rdma_restrack_init(&device->res);
+ if (rdma_restrack_init(device)) {
+ kfree(device);
+ return NULL;
+ }
device->dev.class = &ib_class;
+ device->groups[0] = &ib_dev_attr_group;
+ device->dev.groups = device->groups;
device_initialize(&device->dev);
- dev_set_drvdata(&device->dev, device);
-
INIT_LIST_HEAD(&device->event_handler_list);
spin_lock_init(&device->event_handler_lock);
- rwlock_init(&device->client_data_lock);
- INIT_LIST_HEAD(&device->client_data_list);
+ mutex_init(&device->unregistration_lock);
+ /*
+ * client_data needs to be alloc because we don't want our mark to be
+ * destroyed if the user stores NULL in the client data.
+ */
+ xa_init_flags(&device->client_data, XA_FLAGS_ALLOC);
+ init_rwsem(&device->client_data_rwsem);
INIT_LIST_HEAD(&device->port_list);
init_completion(&device->unreg_completion);
+ INIT_WORK(&device->unregistration_work, ib_unregister_work);
return device;
}
-EXPORT_SYMBOL(ib_alloc_device);
+EXPORT_SYMBOL(_ib_alloc_device);
/**
* ib_dealloc_device - free an IB device struct
@@ -324,32 +419,153 @@ EXPORT_SYMBOL(ib_alloc_device);
*/
void ib_dealloc_device(struct ib_device *device)
{
- WARN_ON(!list_empty(&device->client_data_list));
- WARN_ON(device->reg_state != IB_DEV_UNREGISTERED &&
- device->reg_state != IB_DEV_UNINITIALIZED);
- rdma_restrack_clean(&device->res);
+ if (device->ops.dealloc_driver)
+ device->ops.dealloc_driver(device);
+
+ /*
+ * ib_unregister_driver() requires all devices to remain in the xarray
+ * while their ops are callable. The last op we call is dealloc_driver
+ * above. This is needed to create a fence on op callbacks prior to
+ * allowing the driver module to unload.
+ */
+ down_write(&devices_rwsem);
+ if (xa_load(&devices, device->index) == device)
+ xa_erase(&devices, device->index);
+ up_write(&devices_rwsem);
+
+ /* Expedite releasing netdev references */
+ free_netdevs(device);
+
+ WARN_ON(!xa_empty(&device->client_data));
+ WARN_ON(refcount_read(&device->refcount));
+ rdma_restrack_clean(device);
+ /* Balances with device_initialize */
put_device(&device->dev);
}
EXPORT_SYMBOL(ib_dealloc_device);
-static int add_client_context(struct ib_device *device, struct ib_client *client)
+/*
+ * add_client_context() and remove_client_context() must be safe against
+ * parallel calls on the same device - registration/unregistration of both the
+ * device and client can be occurring in parallel.
+ *
+ * The routines need to be a fence, any caller must not return until the add
+ * or remove is fully completed.
+ */
+static int add_client_context(struct ib_device *device,
+ struct ib_client *client)
{
- struct ib_client_data *context;
+ int ret = 0;
- context = kmalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
- return -ENOMEM;
+ if (!device->kverbs_provider && !client->no_kverbs_req)
+ return 0;
+
+ down_write(&device->client_data_rwsem);
+ /*
+ * Another caller to add_client_context got here first and has already
+ * completely initialized context.
+ */
+ if (xa_get_mark(&device->client_data, client->client_id,
+ CLIENT_DATA_REGISTERED))
+ goto out;
+
+ ret = xa_err(xa_store(&device->client_data, client->client_id, NULL,
+ GFP_KERNEL));
+ if (ret)
+ goto out;
+ downgrade_write(&device->client_data_rwsem);
+ if (client->add)
+ client->add(device);
+
+ /* Readers shall not see a client until add has been completed */
+ xa_set_mark(&device->client_data, client->client_id,
+ CLIENT_DATA_REGISTERED);
+ up_read(&device->client_data_rwsem);
+ return 0;
+
+out:
+ up_write(&device->client_data_rwsem);
+ return ret;
+}
- context->client = client;
- context->data = NULL;
- context->going_down = false;
+static void remove_client_context(struct ib_device *device,
+ unsigned int client_id)
+{
+ struct ib_client *client;
+ void *client_data;
- down_write(&lists_rwsem);
- write_lock_irq(&device->client_data_lock);
- list_add(&context->list, &device->client_data_list);
- write_unlock_irq(&device->client_data_lock);
- up_write(&lists_rwsem);
+ down_write(&device->client_data_rwsem);
+ if (!xa_get_mark(&device->client_data, client_id,
+ CLIENT_DATA_REGISTERED)) {
+ up_write(&device->client_data_rwsem);
+ return;
+ }
+ client_data = xa_load(&device->client_data, client_id);
+ xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED);
+ client = xa_load(&clients, client_id);
+ downgrade_write(&device->client_data_rwsem);
+ /*
+ * Notice we cannot be holding any exclusive locks when calling the
+ * remove callback as the remove callback can recurse back into any
+ * public functions in this module and thus try for any locks those
+ * functions take.
+ *
+ * For this reason clients and drivers should not call the
+ * unregistration functions will holdling any locks.
+ *
+ * It tempting to drop the client_data_rwsem too, but this is required
+ * to ensure that unregister_client does not return until all clients
+ * are completely unregistered, which is required to avoid module
+ * unloading races.
+ */
+ if (client->remove)
+ client->remove(device, client_data);
+
+ xa_erase(&device->client_data, client_id);
+ up_read(&device->client_data_rwsem);
+}
+
+static int alloc_port_data(struct ib_device *device)
+{
+ struct ib_port_data_rcu *pdata_rcu;
+ unsigned int port;
+
+ if (device->port_data)
+ return 0;
+
+ /* This can only be called once the physical port range is defined */
+ if (WARN_ON(!device->phys_port_cnt))
+ return -EINVAL;
+
+ /*
+ * device->port_data is indexed directly by the port number to make
+ * access to this data as efficient as possible.
+ *
+ * Therefore port_data is declared as a 1 based array with potential
+ * empty slots at the beginning.
+ */
+ pdata_rcu = kzalloc(struct_size(pdata_rcu, pdata,
+ rdma_end_port(device) + 1),
+ GFP_KERNEL);
+ if (!pdata_rcu)
+ return -ENOMEM;
+ /*
+ * The rcu_head is put in front of the port data array and the stored
+ * pointer is adjusted since we never need to see that member until
+ * kfree_rcu.
+ */
+ device->port_data = pdata_rcu->pdata;
+
+ rdma_for_each_port (device, port) {
+ struct ib_port_data *pdata = &device->port_data[port];
+
+ pdata->ib_dev = device;
+ spin_lock_init(&pdata->pkey_list_lock);
+ INIT_LIST_HEAD(&pdata->pkey_list);
+ spin_lock_init(&pdata->netdev_lock);
+ INIT_HLIST_NODE(&pdata->ndev_hash_link);
+ }
return 0;
}
@@ -359,29 +575,20 @@ static int verify_immutable(const struct ib_device *dev, u8 port)
rdma_max_mad_size(dev, port) != 0);
}
-static int read_port_immutable(struct ib_device *device)
+static int setup_port_data(struct ib_device *device)
{
+ unsigned int port;
int ret;
- u8 start_port = rdma_start_port(device);
- u8 end_port = rdma_end_port(device);
- u8 port;
- /**
- * device->port_immutable is indexed directly by the port number to make
- * access to this data as efficient as possible.
- *
- * Therefore port_immutable is declared as a 1 based array with
- * potential empty slots at the beginning.
- */
- device->port_immutable = kcalloc(end_port + 1,
- sizeof(*device->port_immutable),
- GFP_KERNEL);
- if (!device->port_immutable)
- return -ENOMEM;
+ ret = alloc_port_data(device);
+ if (ret)
+ return ret;
+
+ rdma_for_each_port (device, port) {
+ struct ib_port_data *pdata = &device->port_data[port];
- for (port = start_port; port <= end_port; ++port) {
- ret = device->ops.get_port_immutable(
- device, port, &device->port_immutable[port]);
+ ret = device->ops.get_port_immutable(device, port,
+ &pdata->immutable);
if (ret)
return ret;
@@ -400,39 +607,16 @@ void ib_get_device_fw_str(struct ib_device *dev, char *str)
}
EXPORT_SYMBOL(ib_get_device_fw_str);
-static int setup_port_pkey_list(struct ib_device *device)
-{
- int i;
-
- /**
- * device->port_pkey_list is indexed directly by the port number,
- * Therefore it is declared as a 1 based array with potential empty
- * slots at the beginning.
- */
- device->port_pkey_list = kcalloc(rdma_end_port(device) + 1,
- sizeof(*device->port_pkey_list),
- GFP_KERNEL);
-
- if (!device->port_pkey_list)
- return -ENOMEM;
-
- for (i = 0; i < (rdma_end_port(device) + 1); i++) {
- spin_lock_init(&device->port_pkey_list[i].list_lock);
- INIT_LIST_HEAD(&device->port_pkey_list[i].pkey_list);
- }
-
- return 0;
-}
-
static void ib_policy_change_task(struct work_struct *work)
{
struct ib_device *dev;
+ unsigned long index;
- down_read(&lists_rwsem);
- list_for_each_entry(dev, &device_list, core_list) {
- int i;
+ down_read(&devices_rwsem);
+ xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
+ unsigned int i;
- for (i = rdma_start_port(dev); i <= rdma_end_port(dev); i++) {
+ rdma_for_each_port (dev, i) {
u64 sp;
int ret = ib_get_cached_subnet_prefix(dev,
i,
@@ -445,7 +629,7 @@ static void ib_policy_change_task(struct work_struct *work)
ib_security_cache_change(dev, i, sp);
}
}
- up_read(&lists_rwsem);
+ up_read(&devices_rwsem);
}
static int ib_security_change(struct notifier_block *nb, unsigned long event,
@@ -455,32 +639,43 @@ static int ib_security_change(struct notifier_block *nb, unsigned long event,
return NOTIFY_DONE;
schedule_work(&ib_policy_change_work);
+ ib_mad_agent_security_change();
return NOTIFY_OK;
}
-/**
- * __dev_new_index - allocate an device index
- *
- * Returns a suitable unique value for a new device interface
- * number. It assumes that there are less than 2^32-1 ib devices
- * will be present in the system.
+/*
+ * Assign the unique string device name and the unique device index. This is
+ * undone by ib_dealloc_device.
*/
-static u32 __dev_new_index(void)
+static int assign_name(struct ib_device *device, const char *name)
{
- /*
- * The device index to allow stable naming.
- * Similar to struct net -> ifindex.
- */
- static u32 index;
+ static u32 last_id;
+ int ret;
- for (;;) {
- if (!(++index))
- index = 1;
+ down_write(&devices_rwsem);
+ /* Assign a unique name to the device */
+ if (strchr(name, '%'))
+ ret = alloc_name(device, name);
+ else
+ ret = dev_set_name(&device->dev, name);
+ if (ret)
+ goto out;
- if (!__ib_device_get_by_index(index))
- return index;
+ if (__ib_device_get_by_name(dev_name(&device->dev))) {
+ ret = -ENFILE;
+ goto out;
}
+ strlcpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX);
+
+ ret = xa_alloc_cyclic(&devices, &device->index, device, xa_limit_31b,
+ &last_id, GFP_KERNEL);
+ if (ret > 0)
+ ret = 0;
+
+out:
+ up_write(&devices_rwsem);
+ return ret;
}
static void setup_dma_device(struct ib_device *device)
@@ -518,27 +713,25 @@ static void setup_dma_device(struct ib_device *device)
}
}
-static void cleanup_device(struct ib_device *device)
-{
- ib_cache_cleanup_one(device);
- ib_cache_release_one(device);
- kfree(device->port_pkey_list);
- kfree(device->port_immutable);
-}
-
+/*
+ * setup_device() allocates memory and sets up data that requires calling the
+ * device ops, this is the only reason these actions are not done during
+ * ib_alloc_device. It is undone by ib_dealloc_device().
+ */
static int setup_device(struct ib_device *device)
{
struct ib_udata uhw = {.outlen = 0, .inlen = 0};
int ret;
+ setup_dma_device(device);
+
ret = ib_device_check_mandatory(device);
if (ret)
return ret;
- ret = read_port_immutable(device);
+ ret = setup_port_data(device);
if (ret) {
- dev_warn(&device->dev,
- "Couldn't create per port immutable data\n");
+ dev_warn(&device->dev, "Couldn't create per-port data\n");
return ret;
}
@@ -547,27 +740,76 @@ static int setup_device(struct ib_device *device)
if (ret) {
dev_warn(&device->dev,
"Couldn't query the device attributes\n");
- goto port_cleanup;
+ return ret;
}
- ret = setup_port_pkey_list(device);
- if (ret) {
- dev_warn(&device->dev, "Couldn't create per port_pkey_list\n");
- goto port_cleanup;
+ return 0;
+}
+
+static void disable_device(struct ib_device *device)
+{
+ struct ib_client *client;
+
+ WARN_ON(!refcount_read(&device->refcount));
+
+ down_write(&devices_rwsem);
+ xa_clear_mark(&devices, device->index, DEVICE_REGISTERED);
+ up_write(&devices_rwsem);
+
+ down_read(&clients_rwsem);
+ list_for_each_entry_reverse(client, &client_list, list)
+ remove_client_context(device, client->client_id);
+ up_read(&clients_rwsem);
+
+ /* Pairs with refcount_set in enable_device */
+ ib_device_put(device);
+ wait_for_completion(&device->unreg_completion);
+
+ /* Expedite removing unregistered pointers from the hash table */
+ free_netdevs(device);
+}
+
+/*
+ * An enabled device is visible to all clients and to all the public facing
+ * APIs that return a device pointer. This always returns with a new get, even
+ * if it fails.
+ */
+static int enable_device_and_get(struct ib_device *device)
+{
+ struct ib_client *client;
+ unsigned long index;
+ int ret = 0;
+
+ /*
+ * One ref belongs to the xa and the other belongs to this
+ * thread. This is needed to guard against parallel unregistration.
+ */
+ refcount_set(&device->refcount, 2);
+ down_write(&devices_rwsem);
+ xa_set_mark(&devices, device->index, DEVICE_REGISTERED);
+
+ /*
+ * By using downgrade_write() we ensure that no other thread can clear
+ * DEVICE_REGISTERED while we are completing the client setup.
+ */
+ downgrade_write(&devices_rwsem);
+
+ if (device->ops.enable_driver) {
+ ret = device->ops.enable_driver(device);
+ if (ret)
+ goto out;
}
- ret = ib_cache_setup_one(device);
- if (ret) {
- dev_warn(&device->dev,
- "Couldn't set up InfiniBand P_Key/GID cache\n");
- goto pkey_cleanup;
+ down_read(&clients_rwsem);
+ xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) {
+ ret = add_client_context(device, client);
+ if (ret)
+ break;
}
- return 0;
+ up_read(&clients_rwsem);
-pkey_cleanup:
- kfree(device->port_pkey_list);
-port_cleanup:
- kfree(device->port_immutable);
+out:
+ up_read(&devices_rwsem);
return ret;
}
@@ -579,133 +821,254 @@ port_cleanup:
* devices with the IB core. All registered clients will receive a
* callback for each device that is added. @device must be allocated
* with ib_alloc_device().
+ *
+ * If the driver uses ops.dealloc_driver and calls any ib_unregister_device()
+ * asynchronously then the device pointer may become freed as soon as this
+ * function returns.
*/
-int ib_register_device(struct ib_device *device, const char *name,
- int (*port_callback)(struct ib_device *, u8,
- struct kobject *))
+int ib_register_device(struct ib_device *device, const char *name)
{
int ret;
- struct ib_client *client;
- setup_dma_device(device);
-
- mutex_lock(&device_mutex);
-
- if (strchr(name, '%')) {
- ret = alloc_name(device, name);
- if (ret)
- goto out;
- } else {
- ret = dev_set_name(&device->dev, name);
- if (ret)
- goto out;
- }
- if (__ib_device_get_by_name(dev_name(&device->dev))) {
- ret = -ENFILE;
- goto out;
- }
- strlcpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX);
+ ret = assign_name(device, name);
+ if (ret)
+ return ret;
ret = setup_device(device);
if (ret)
- goto out;
-
- device->index = __dev_new_index();
+ return ret;
- ret = ib_device_register_rdmacg(device);
+ ret = ib_cache_setup_one(device);
if (ret) {
dev_warn(&device->dev,
- "Couldn't register device with rdma cgroup\n");
- goto dev_cleanup;
+ "Couldn't set up InfiniBand P_Key/GID cache\n");
+ return ret;
}
- ret = ib_device_register_sysfs(device, port_callback);
+ ib_device_register_rdmacg(device);
+
+ ret = device_add(&device->dev);
+ if (ret)
+ goto cg_cleanup;
+
+ ret = ib_device_register_sysfs(device);
if (ret) {
dev_warn(&device->dev,
"Couldn't register device with driver model\n");
- goto cg_cleanup;
+ goto dev_cleanup;
}
- refcount_set(&device->refcount, 1);
- device->reg_state = IB_DEV_REGISTERED;
+ ret = enable_device_and_get(device);
+ if (ret) {
+ void (*dealloc_fn)(struct ib_device *);
- list_for_each_entry(client, &client_list, list)
- if (!add_client_context(device, client) && client->add)
- client->add(device);
+ /*
+ * If we hit this error flow then we don't want to
+ * automatically dealloc the device since the caller is
+ * expected to call ib_dealloc_device() after
+ * ib_register_device() fails. This is tricky due to the
+ * possibility for a parallel unregistration along with this
+ * error flow. Since we have a refcount here we know any
+ * parallel flow is stopped in disable_device and will see the
+ * NULL pointers, causing the responsibility to
+ * ib_dealloc_device() to revert back to this thread.
+ */
+ dealloc_fn = device->ops.dealloc_driver;
+ device->ops.dealloc_driver = NULL;
+ ib_device_put(device);
+ __ib_unregister_device(device);
+ device->ops.dealloc_driver = dealloc_fn;
+ return ret;
+ }
+ ib_device_put(device);
- down_write(&lists_rwsem);
- list_add_tail(&device->core_list, &device_list);
- up_write(&lists_rwsem);
- mutex_unlock(&device_mutex);
return 0;
+dev_cleanup:
+ device_del(&device->dev);
cg_cleanup:
ib_device_unregister_rdmacg(device);
-dev_cleanup:
- cleanup_device(device);
-out:
- mutex_unlock(&device_mutex);
+ ib_cache_cleanup_one(device);
return ret;
}
EXPORT_SYMBOL(ib_register_device);
+/* Callers must hold a get on the device. */
+static void __ib_unregister_device(struct ib_device *ib_dev)
+{
+ /*
+ * We have a registration lock so that all the calls to unregister are
+ * fully fenced, once any unregister returns the device is truely
+ * unregistered even if multiple callers are unregistering it at the
+ * same time. This also interacts with the registration flow and
+ * provides sane semantics if register and unregister are racing.
+ */
+ mutex_lock(&ib_dev->unregistration_lock);
+ if (!refcount_read(&ib_dev->refcount))
+ goto out;
+
+ disable_device(ib_dev);
+ ib_device_unregister_sysfs(ib_dev);
+ device_del(&ib_dev->dev);
+ ib_device_unregister_rdmacg(ib_dev);
+ ib_cache_cleanup_one(ib_dev);
+
+ /*
+ * Drivers using the new flow may not call ib_dealloc_device except
+ * in error unwind prior to registration success.
+ */
+ if (ib_dev->ops.dealloc_driver) {
+ WARN_ON(kref_read(&ib_dev->dev.kobj.kref) <= 1);
+ ib_dealloc_device(ib_dev);
+ }
+out:
+ mutex_unlock(&ib_dev->unregistration_lock);
+}
+
/**
* ib_unregister_device - Unregister an IB device
- * @device:Device to unregister
+ * @device: The device to unregister
*
* Unregister an IB device. All clients will receive a remove callback.
+ *
+ * Callers should call this routine only once, and protect against races with
+ * registration. Typically it should only be called as part of a remove
+ * callback in an implementation of driver core's struct device_driver and
+ * related.
+ *
+ * If ops.dealloc_driver is used then ib_dev will be freed upon return from
+ * this function.
*/
-void ib_unregister_device(struct ib_device *device)
+void ib_unregister_device(struct ib_device *ib_dev)
{
- struct ib_client_data *context, *tmp;
- unsigned long flags;
+ get_device(&ib_dev->dev);
+ __ib_unregister_device(ib_dev);
+ put_device(&ib_dev->dev);
+}
+EXPORT_SYMBOL(ib_unregister_device);
- /*
- * Wait for all netlink command callers to finish working on the
- * device.
- */
- ib_device_put(device);
- wait_for_completion(&device->unreg_completion);
+/**
+ * ib_unregister_device_and_put - Unregister a device while holding a 'get'
+ * device: The device to unregister
+ *
+ * This is the same as ib_unregister_device(), except it includes an internal
+ * ib_device_put() that should match a 'get' obtained by the caller.
+ *
+ * It is safe to call this routine concurrently from multiple threads while
+ * holding the 'get'. When the function returns the device is fully
+ * unregistered.
+ *
+ * Drivers using this flow MUST use the driver_unregister callback to clean up
+ * their resources associated with the device and dealloc it.
+ */
+void ib_unregister_device_and_put(struct ib_device *ib_dev)
+{
+ WARN_ON(!ib_dev->ops.dealloc_driver);
+ get_device(&ib_dev->dev);
+ ib_device_put(ib_dev);
+ __ib_unregister_device(ib_dev);
+ put_device(&ib_dev->dev);
+}
+EXPORT_SYMBOL(ib_unregister_device_and_put);
- mutex_lock(&device_mutex);
+/**
+ * ib_unregister_driver - Unregister all IB devices for a driver
+ * @driver_id: The driver to unregister
+ *
+ * This implements a fence for device unregistration. It only returns once all
+ * devices associated with the driver_id have fully completed their
+ * unregistration and returned from ib_unregister_device*().
+ *
+ * If device's are not yet unregistered it goes ahead and starts unregistering
+ * them.
+ *
+ * This does not block creation of new devices with the given driver_id, that
+ * is the responsibility of the caller.
+ */
+void ib_unregister_driver(enum rdma_driver_id driver_id)
+{
+ struct ib_device *ib_dev;
+ unsigned long index;
+
+ down_read(&devices_rwsem);
+ xa_for_each (&devices, index, ib_dev) {
+ if (ib_dev->driver_id != driver_id)
+ continue;
+
+ get_device(&ib_dev->dev);
+ up_read(&devices_rwsem);
- down_write(&lists_rwsem);
- list_del(&device->core_list);
- write_lock_irq(&device->client_data_lock);
- list_for_each_entry(context, &device->client_data_list, list)
- context->going_down = true;
- write_unlock_irq(&device->client_data_lock);
- downgrade_write(&lists_rwsem);
+ WARN_ON(!ib_dev->ops.dealloc_driver);
+ __ib_unregister_device(ib_dev);
- list_for_each_entry(context, &device->client_data_list, list) {
- if (context->client->remove)
- context->client->remove(device, context->data);
+ put_device(&ib_dev->dev);
+ down_read(&devices_rwsem);
}
- up_read(&lists_rwsem);
+ up_read(&devices_rwsem);
+}
+EXPORT_SYMBOL(ib_unregister_driver);
- ib_device_unregister_sysfs(device);
- ib_device_unregister_rdmacg(device);
+static void ib_unregister_work(struct work_struct *work)
+{
+ struct ib_device *ib_dev =
+ container_of(work, struct ib_device, unregistration_work);
- mutex_unlock(&device_mutex);
+ __ib_unregister_device(ib_dev);
+ put_device(&ib_dev->dev);
+}
- ib_cache_cleanup_one(device);
+/**
+ * ib_unregister_device_queued - Unregister a device using a work queue
+ * device: The device to unregister
+ *
+ * This schedules an asynchronous unregistration using a WQ for the device. A
+ * driver should use this to avoid holding locks while doing unregistration,
+ * such as holding the RTNL lock.
+ *
+ * Drivers using this API must use ib_unregister_driver before module unload
+ * to ensure that all scheduled unregistrations have completed.
+ */
+void ib_unregister_device_queued(struct ib_device *ib_dev)
+{
+ WARN_ON(!refcount_read(&ib_dev->refcount));
+ WARN_ON(!ib_dev->ops.dealloc_driver);
+ get_device(&ib_dev->dev);
+ if (!queue_work(system_unbound_wq, &ib_dev->unregistration_work))
+ put_device(&ib_dev->dev);
+}
+EXPORT_SYMBOL(ib_unregister_device_queued);
- ib_security_destroy_port_pkey_list(device);
- kfree(device->port_pkey_list);
+static int assign_client_id(struct ib_client *client)
+{
+ int ret;
- down_write(&lists_rwsem);
- write_lock_irqsave(&device->client_data_lock, flags);
- list_for_each_entry_safe(context, tmp, &device->client_data_list,
- list) {
- list_del(&context->list);
- kfree(context);
+ down_write(&clients_rwsem);
+ /*
+ * The add/remove callbacks must be called in FIFO/LIFO order. To
+ * achieve this we assign client_ids so they are sorted in
+ * registration order, and retain a linked list we can reverse iterate
+ * to get the LIFO order. The extra linked list can go away if xarray
+ * learns to reverse iterate.
+ */
+ if (list_empty(&client_list)) {
+ client->client_id = 0;
+ } else {
+ struct ib_client *last;
+
+ last = list_last_entry(&client_list, struct ib_client, list);
+ client->client_id = last->client_id + 1;
}
- write_unlock_irqrestore(&device->client_data_lock, flags);
- up_write(&lists_rwsem);
+ ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL);
+ if (ret)
+ goto out;
- device->reg_state = IB_DEV_UNREGISTERED;
+ xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED);
+ list_add_tail(&client->list, &client_list);
+
+out:
+ up_write(&clients_rwsem);
+ return ret;
}
-EXPORT_SYMBOL(ib_unregister_device);
/**
* ib_register_client - Register an IB client
@@ -723,19 +1086,23 @@ EXPORT_SYMBOL(ib_unregister_device);
int ib_register_client(struct ib_client *client)
{
struct ib_device *device;
+ unsigned long index;
+ int ret;
- mutex_lock(&device_mutex);
-
- list_for_each_entry(device, &device_list, core_list)
- if (!add_client_context(device, client) && client->add)
- client->add(device);
-
- down_write(&lists_rwsem);
- list_add_tail(&client->list, &client_list);
- up_write(&lists_rwsem);
-
- mutex_unlock(&device_mutex);
+ ret = assign_client_id(client);
+ if (ret)
+ return ret;
+ down_read(&devices_rwsem);
+ xa_for_each_marked (&devices, index, device, DEVICE_REGISTERED) {
+ ret = add_client_context(device, client);
+ if (ret) {
+ up_read(&devices_rwsem);
+ ib_unregister_client(client);
+ return ret;
+ }
+ }
+ up_read(&devices_rwsem);
return 0;
}
EXPORT_SYMBOL(ib_register_client);
@@ -747,108 +1114,56 @@ EXPORT_SYMBOL(ib_register_client);
* Upper level users use ib_unregister_client() to remove their client
* registration. When ib_unregister_client() is called, the client
* will receive a remove callback for each IB device still registered.
+ *
+ * This is a full fence, once it returns no client callbacks will be called,
+ * or are running in another thread.
*/
void ib_unregister_client(struct ib_client *client)
{
- struct ib_client_data *context;
struct ib_device *device;
+ unsigned long index;
- mutex_lock(&device_mutex);
+ down_write(&clients_rwsem);
+ xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED);
+ up_write(&clients_rwsem);
+ /*
+ * Every device still known must be serialized to make sure we are
+ * done with the client callbacks before we return.
+ */
+ down_read(&devices_rwsem);
+ xa_for_each (&devices, index, device)
+ remove_client_context(device, client->client_id);
+ up_read(&devices_rwsem);
- down_write(&lists_rwsem);
+ down_write(&clients_rwsem);
list_del(&client->list);
- up_write(&lists_rwsem);
-
- list_for_each_entry(device, &device_list, core_list) {
- struct ib_client_data *found_context = NULL;
-
- down_write(&lists_rwsem);
- write_lock_irq(&device->client_data_lock);
- list_for_each_entry(context, &device->client_data_list, list)
- if (context->client == client) {
- context->going_down = true;
- found_context = context;
- break;
- }
- write_unlock_irq(&device->client_data_lock);
- up_write(&lists_rwsem);
-
- if (client->remove)
- client->remove(device, found_context ?
- found_context->data : NULL);
-
- if (!found_context) {
- dev_warn(&device->dev,
- "No client context found for %s\n",
- client->name);
- continue;
- }
-
- down_write(&lists_rwsem);
- write_lock_irq(&device->client_data_lock);
- list_del(&found_context->list);
- write_unlock_irq(&device->client_data_lock);
- up_write(&lists_rwsem);
- kfree(found_context);
- }
-
- mutex_unlock(&device_mutex);
+ xa_erase(&clients, client->client_id);
+ up_write(&clients_rwsem);
}
EXPORT_SYMBOL(ib_unregister_client);
/**
- * ib_get_client_data - Get IB client context
- * @device:Device to get context for
- * @client:Client to get context for
- *
- * ib_get_client_data() returns client context set with
- * ib_set_client_data().
- */
-void *ib_get_client_data(struct ib_device *device, struct ib_client *client)
-{
- struct ib_client_data *context;
- void *ret = NULL;
- unsigned long flags;
-
- read_lock_irqsave(&device->client_data_lock, flags);
- list_for_each_entry(context, &device->client_data_list, list)
- if (context->client == client) {
- ret = context->data;
- break;
- }
- read_unlock_irqrestore(&device->client_data_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(ib_get_client_data);
-
-/**
* ib_set_client_data - Set IB client context
* @device:Device to set context for
* @client:Client to set context for
* @data:Context to set
*
- * ib_set_client_data() sets client context that can be retrieved with
- * ib_get_client_data().
+ * ib_set_client_data() sets client context data that can be retrieved with
+ * ib_get_client_data(). This can only be called while the client is
+ * registered to the device, once the ib_client remove() callback returns this
+ * cannot be called.
*/
void ib_set_client_data(struct ib_device *device, struct ib_client *client,
void *data)
{
- struct ib_client_data *context;
- unsigned long flags;
-
- write_lock_irqsave(&device->client_data_lock, flags);
- list_for_each_entry(context, &device->client_data_list, list)
- if (context->client == client) {
- context->data = data;
- goto out;
- }
+ void *rc;
- dev_warn(&device->dev, "No client context found for %s\n",
- client->name);
+ if (WARN_ON(IS_ERR(data)))
+ data = NULL;
-out:
- write_unlock_irqrestore(&device->client_data_lock, flags);
+ rc = xa_store(&device->client_data, client->client_id, data,
+ GFP_KERNEL);
+ WARN_ON(xa_is_err(rc));
}
EXPORT_SYMBOL(ib_set_client_data);
@@ -947,6 +1262,185 @@ int ib_query_port(struct ib_device *device,
}
EXPORT_SYMBOL(ib_query_port);
+static void add_ndev_hash(struct ib_port_data *pdata)
+{
+ unsigned long flags;
+
+ might_sleep();
+
+ spin_lock_irqsave(&ndev_hash_lock, flags);
+ if (hash_hashed(&pdata->ndev_hash_link)) {
+ hash_del_rcu(&pdata->ndev_hash_link);
+ spin_unlock_irqrestore(&ndev_hash_lock, flags);
+ /*
+ * We cannot do hash_add_rcu after a hash_del_rcu until the
+ * grace period
+ */
+ synchronize_rcu();
+ spin_lock_irqsave(&ndev_hash_lock, flags);
+ }
+ if (pdata->netdev)
+ hash_add_rcu(ndev_hash, &pdata->ndev_hash_link,
+ (uintptr_t)pdata->netdev);
+ spin_unlock_irqrestore(&ndev_hash_lock, flags);
+}
+
+/**
+ * ib_device_set_netdev - Associate the ib_dev with an underlying net_device
+ * @ib_dev: Device to modify
+ * @ndev: net_device to affiliate, may be NULL
+ * @port: IB port the net_device is connected to
+ *
+ * Drivers should use this to link the ib_device to a netdev so the netdev
+ * shows up in interfaces like ib_enum_roce_netdev. Only one netdev may be
+ * affiliated with any port.
+ *
+ * The caller must ensure that the given ndev is not unregistered or
+ * unregistering, and that either the ib_device is unregistered or
+ * ib_device_set_netdev() is called with NULL when the ndev sends a
+ * NETDEV_UNREGISTER event.
+ */
+int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
+ unsigned int port)
+{
+ struct net_device *old_ndev;
+ struct ib_port_data *pdata;
+ unsigned long flags;
+ int ret;
+
+ /*
+ * Drivers wish to call this before ib_register_driver, so we have to
+ * setup the port data early.
+ */
+ ret = alloc_port_data(ib_dev);
+ if (ret)
+ return ret;
+
+ if (!rdma_is_port_valid(ib_dev, port))
+ return -EINVAL;
+
+ pdata = &ib_dev->port_data[port];
+ spin_lock_irqsave(&pdata->netdev_lock, flags);
+ old_ndev = rcu_dereference_protected(
+ pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
+ if (old_ndev == ndev) {
+ spin_unlock_irqrestore(&pdata->netdev_lock, flags);
+ return 0;
+ }
+
+ if (ndev)
+ dev_hold(ndev);
+ rcu_assign_pointer(pdata->netdev, ndev);
+ spin_unlock_irqrestore(&pdata->netdev_lock, flags);
+
+ add_ndev_hash(pdata);
+ if (old_ndev)
+ dev_put(old_ndev);
+
+ return 0;
+}
+EXPORT_SYMBOL(ib_device_set_netdev);
+
+static void free_netdevs(struct ib_device *ib_dev)
+{
+ unsigned long flags;
+ unsigned int port;
+
+ rdma_for_each_port (ib_dev, port) {
+ struct ib_port_data *pdata = &ib_dev->port_data[port];
+ struct net_device *ndev;
+
+ spin_lock_irqsave(&pdata->netdev_lock, flags);
+ ndev = rcu_dereference_protected(
+ pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
+ if (ndev) {
+ spin_lock(&ndev_hash_lock);
+ hash_del_rcu(&pdata->ndev_hash_link);
+ spin_unlock(&ndev_hash_lock);
+
+ /*
+ * If this is the last dev_put there is still a
+ * synchronize_rcu before the netdev is kfreed, so we
+ * can continue to rely on unlocked pointer
+ * comparisons after the put
+ */
+ rcu_assign_pointer(pdata->netdev, NULL);
+ dev_put(ndev);
+ }
+ spin_unlock_irqrestore(&pdata->netdev_lock, flags);
+ }
+}
+
+struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
+ unsigned int port)
+{
+ struct ib_port_data *pdata;
+ struct net_device *res;
+
+ if (!rdma_is_port_valid(ib_dev, port))
+ return NULL;
+
+ pdata = &ib_dev->port_data[port];
+
+ /*
+ * New drivers should use ib_device_set_netdev() not the legacy
+ * get_netdev().
+ */
+ if (ib_dev->ops.get_netdev)
+ res = ib_dev->ops.get_netdev(ib_dev, port);
+ else {
+ spin_lock(&pdata->netdev_lock);
+ res = rcu_dereference_protected(
+ pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
+ if (res)
+ dev_hold(res);
+ spin_unlock(&pdata->netdev_lock);
+ }
+
+ /*
+ * If we are starting to unregister expedite things by preventing
+ * propagation of an unregistering netdev.
+ */
+ if (res && res->reg_state != NETREG_REGISTERED) {
+ dev_put(res);
+ return NULL;
+ }
+
+ return res;
+}
+
+/**
+ * ib_device_get_by_netdev - Find an IB device associated with a netdev
+ * @ndev: netdev to locate
+ * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all)
+ *
+ * Find and hold an ib_device that is associated with a netdev via
+ * ib_device_set_netdev(). The caller must call ib_device_put() on the
+ * returned pointer.
+ */
+struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
+ enum rdma_driver_id driver_id)
+{
+ struct ib_device *res = NULL;
+ struct ib_port_data *cur;
+
+ rcu_read_lock();
+ hash_for_each_possible_rcu (ndev_hash, cur, ndev_hash_link,
+ (uintptr_t)ndev) {
+ if (rcu_access_pointer(cur->netdev) == ndev &&
+ (driver_id == RDMA_DRIVER_UNKNOWN ||
+ cur->ib_dev->driver_id == driver_id) &&
+ ib_device_try_get(cur->ib_dev)) {
+ res = cur->ib_dev;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return res;
+}
+EXPORT_SYMBOL(ib_device_get_by_netdev);
+
/**
* ib_enum_roce_netdev - enumerate all RoCE ports
* @ib_dev : IB device we want to query
@@ -965,21 +1459,12 @@ void ib_enum_roce_netdev(struct ib_device *ib_dev,
roce_netdev_callback cb,
void *cookie)
{
- u8 port;
+ unsigned int port;
- for (port = rdma_start_port(ib_dev); port <= rdma_end_port(ib_dev);
- port++)
+ rdma_for_each_port (ib_dev, port)
if (rdma_protocol_roce(ib_dev, port)) {
- struct net_device *idev = NULL;
-
- if (ib_dev->ops.get_netdev)
- idev = ib_dev->ops.get_netdev(ib_dev, port);
-
- if (idev &&
- idev->reg_state >= NETREG_UNREGISTERED) {
- dev_put(idev);
- idev = NULL;
- }
+ struct net_device *idev =
+ ib_device_get_netdev(ib_dev, port);
if (filter(ib_dev, port, idev, filter_cookie))
cb(ib_dev, port, idev, cookie);
@@ -1006,11 +1491,12 @@ void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
void *cookie)
{
struct ib_device *dev;
+ unsigned long index;
- down_read(&lists_rwsem);
- list_for_each_entry(dev, &device_list, core_list)
+ down_read(&devices_rwsem);
+ xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED)
ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie);
- up_read(&lists_rwsem);
+ up_read(&devices_rwsem);
}
/**
@@ -1022,19 +1508,19 @@ void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
struct netlink_callback *cb)
{
+ unsigned long index;
struct ib_device *dev;
unsigned int idx = 0;
int ret = 0;
- down_read(&lists_rwsem);
- list_for_each_entry(dev, &device_list, core_list) {
+ down_read(&devices_rwsem);
+ xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
ret = nldev_cb(dev, skb, cb, idx);
if (ret)
break;
idx++;
}
-
- up_read(&lists_rwsem);
+ up_read(&devices_rwsem);
return ret;
}
@@ -1121,13 +1607,15 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid,
u8 *port_num, u16 *index)
{
union ib_gid tmp_gid;
- int ret, port, i;
+ unsigned int port;
+ int ret, i;
- for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) {
+ rdma_for_each_port (device, port) {
if (!rdma_protocol_ib(device, port))
continue;
- for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) {
+ for (i = 0; i < device->port_data[port].immutable.gid_tbl_len;
+ ++i) {
ret = rdma_query_gid(device, port, i, &tmp_gid);
if (ret)
return ret;
@@ -1159,7 +1647,8 @@ int ib_find_pkey(struct ib_device *device,
u16 tmp_pkey;
int partial_ix = -1;
- for (i = 0; i < device->port_immutable[port_num].pkey_tbl_len; ++i) {
+ for (i = 0; i < device->port_data[port_num].immutable.pkey_tbl_len;
+ ++i) {
ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
if (ret)
return ret;
@@ -1192,6 +1681,7 @@ EXPORT_SYMBOL(ib_find_pkey);
* @gid: A GID that the net_dev uses to communicate.
* @addr: Contains the IP address that the request specified as its
* destination.
+ *
*/
struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
u8 port,
@@ -1200,29 +1690,30 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
const struct sockaddr *addr)
{
struct net_device *net_dev = NULL;
- struct ib_client_data *context;
+ unsigned long index;
+ void *client_data;
if (!rdma_protocol_ib(dev, port))
return NULL;
- down_read(&lists_rwsem);
-
- list_for_each_entry(context, &dev->client_data_list, list) {
- struct ib_client *client = context->client;
+ /*
+ * Holding the read side guarantees that the client will not become
+ * unregistered while we are calling get_net_dev_by_params()
+ */
+ down_read(&dev->client_data_rwsem);
+ xan_for_each_marked (&dev->client_data, index, client_data,
+ CLIENT_DATA_REGISTERED) {
+ struct ib_client *client = xa_load(&clients, index);
- if (context->going_down)
+ if (!client || !client->get_net_dev_by_params)
continue;
- if (client->get_net_dev_by_params) {
- net_dev = client->get_net_dev_by_params(dev, port, pkey,
- gid, addr,
- context->data);
- if (net_dev)
- break;
- }
+ net_dev = client->get_net_dev_by_params(dev, port, pkey, gid,
+ addr, client_data);
+ if (net_dev)
+ break;
}
-
- up_read(&lists_rwsem);
+ up_read(&dev->client_data_rwsem);
return net_dev;
}
@@ -1238,6 +1729,8 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
(ptr)->name = ops->name; \
} while (0)
+#define SET_OBJ_SIZE(ptr, name) SET_DEVICE_OP(ptr, size_##name)
+
SET_DEVICE_OP(dev_ops, add_gid);
SET_DEVICE_OP(dev_ops, advise_mr);
SET_DEVICE_OP(dev_ops, alloc_dm);
@@ -1261,6 +1754,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, create_srq);
SET_DEVICE_OP(dev_ops, create_wq);
SET_DEVICE_OP(dev_ops, dealloc_dm);
+ SET_DEVICE_OP(dev_ops, dealloc_driver);
SET_DEVICE_OP(dev_ops, dealloc_fmr);
SET_DEVICE_OP(dev_ops, dealloc_mw);
SET_DEVICE_OP(dev_ops, dealloc_pd);
@@ -1281,6 +1775,8 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, disassociate_ucontext);
SET_DEVICE_OP(dev_ops, drain_rq);
SET_DEVICE_OP(dev_ops, drain_sq);
+ SET_DEVICE_OP(dev_ops, enable_driver);
+ SET_DEVICE_OP(dev_ops, fill_res_entry);
SET_DEVICE_OP(dev_ops, get_dev_fw_str);
SET_DEVICE_OP(dev_ops, get_dma_mr);
SET_DEVICE_OP(dev_ops, get_hw_stats);
@@ -1290,6 +1786,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, get_vector_affinity);
SET_DEVICE_OP(dev_ops, get_vf_config);
SET_DEVICE_OP(dev_ops, get_vf_stats);
+ SET_DEVICE_OP(dev_ops, init_port);
SET_DEVICE_OP(dev_ops, map_mr_sg);
SET_DEVICE_OP(dev_ops, map_phys_fmr);
SET_DEVICE_OP(dev_ops, mmap);
@@ -1325,6 +1822,9 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, set_vf_guid);
SET_DEVICE_OP(dev_ops, set_vf_link_state);
SET_DEVICE_OP(dev_ops, unmap_fmr);
+
+ SET_OBJ_SIZE(dev_ops, ib_pd);
+ SET_OBJ_SIZE(dev_ops, ib_ucontext);
}
EXPORT_SYMBOL(ib_set_device_ops);
@@ -1443,6 +1943,9 @@ static void __exit ib_core_cleanup(void)
destroy_workqueue(ib_comp_wq);
/* Make sure that any pending umem accounting work is done. */
destroy_workqueue(ib_wq);
+ flush_workqueue(system_unbound_wq);
+ WARN_ON(!xa_empty(&clients));
+ WARN_ON(!xa_empty(&devices));
}
MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4);
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 476abc74178e..732637c913d9 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -87,7 +87,8 @@ static struct rdma_nl_cbs iwcm_nl_cb_table[RDMA_NL_IWPM_NUM_OPS] = {
[RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
[RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
[RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
- [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
+ [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb},
+ [RDMA_NL_IWPM_HELLO] = {.dump = iwpm_hello_cb}
};
static struct workqueue_struct *iwcm_wq;
@@ -504,7 +505,7 @@ static int iw_cm_map(struct iw_cm_id *cm_id, bool active)
{
const char *devname = dev_name(&cm_id->device->dev);
const char *ifname = cm_id->device->iwcm->ifname;
- struct iwpm_dev_data pm_reg_msg;
+ struct iwpm_dev_data pm_reg_msg = {};
struct iwpm_sa_data pm_msg;
int status;
@@ -515,8 +516,8 @@ static int iw_cm_map(struct iw_cm_id *cm_id, bool active)
cm_id->m_local_addr = cm_id->local_addr;
cm_id->m_remote_addr = cm_id->remote_addr;
- strncpy(pm_reg_msg.dev_name, devname, sizeof(pm_reg_msg.dev_name));
- strncpy(pm_reg_msg.if_name, ifname, sizeof(pm_reg_msg.if_name));
+ strcpy(pm_reg_msg.dev_name, devname);
+ strcpy(pm_reg_msg.if_name, ifname);
if (iwpm_register_pid(&pm_reg_msg, RDMA_NL_IWCM) ||
!iwpm_valid_pid())
@@ -525,6 +526,8 @@ static int iw_cm_map(struct iw_cm_id *cm_id, bool active)
cm_id->mapped = true;
pm_msg.loc_addr = cm_id->local_addr;
pm_msg.rem_addr = cm_id->remote_addr;
+ pm_msg.flags = (cm_id->device->iwcm->driver_flags & IW_F_NO_PORT_MAP) ?
+ IWPM_FLAGS_NO_PORT_MAP : 0;
if (active)
status = iwpm_add_and_query_mapping(&pm_msg,
RDMA_NL_IWCM);
@@ -543,7 +546,7 @@ static int iw_cm_map(struct iw_cm_id *cm_id, bool active)
return iwpm_create_mapinfo(&cm_id->local_addr,
&cm_id->m_local_addr,
- RDMA_NL_IWCM);
+ RDMA_NL_IWCM, pm_msg.flags);
}
/*
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index 8861c052155a..2452b0ddcf0d 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -34,18 +34,25 @@
#include "iwpm_util.h"
static const char iwpm_ulib_name[IWPM_ULIBNAME_SIZE] = "iWarpPortMapperUser";
-static int iwpm_ulib_version = 3;
+u16 iwpm_ulib_version = IWPM_UABI_VERSION_MIN;
static int iwpm_user_pid = IWPM_PID_UNDEFINED;
static atomic_t echo_nlmsg_seq;
+/**
+ * iwpm_valid_pid - Check if the userspace iwarp port mapper pid is valid
+ *
+ * Returns true if the pid is greater than zero, otherwise returns false
+ */
int iwpm_valid_pid(void)
{
return iwpm_user_pid > 0;
}
-/*
- * iwpm_register_pid - Send a netlink query to user space
- * for the iwarp port mapper pid
+/**
+ * iwpm_register_pid - Send a netlink query to userspace
+ * to get the iwarp port mapper pid
+ * @pm_msg: Contains driver info to send to the userspace port mapper
+ * @nl_client: The index of the netlink client
*
* nlmsg attributes:
* [IWPM_NLA_REG_PID_SEQ]
@@ -124,12 +131,19 @@ pid_query_error:
return ret;
}
-/*
- * iwpm_add_mapping - Send a netlink add mapping message
- * to the port mapper
+/**
+ * iwpm_add_mapping - Send a netlink add mapping request to
+ * the userspace port mapper
+ * @pm_msg: Contains the local ip/tcp address info to send
+ * @nl_client: The index of the netlink client
+ *
* nlmsg attributes:
* [IWPM_NLA_MANAGE_MAPPING_SEQ]
* [IWPM_NLA_MANAGE_ADDR]
+ * [IWPM_NLA_MANAGE_FLAGS]
+ *
+ * If the request is successful, the pm_msg stores
+ * the port mapper response (mapped address info)
*/
int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
{
@@ -173,6 +187,18 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
if (ret)
goto add_mapping_error;
+ /* If flags are required and we're not V4, then return a quiet error */
+ if (pm_msg->flags && iwpm_ulib_version == IWPM_UABI_VERSION_MIN) {
+ ret = -EINVAL;
+ goto add_mapping_error_nowarn;
+ }
+ if (iwpm_ulib_version > IWPM_UABI_VERSION_MIN) {
+ ret = ibnl_put_attr(skb, nlh, sizeof(u32), &pm_msg->flags,
+ IWPM_NLA_MANAGE_FLAGS);
+ if (ret)
+ goto add_mapping_error;
+ }
+
nlmsg_end(skb, nlh);
nlmsg_request->req_buffer = pm_msg;
@@ -187,6 +213,7 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
return ret;
add_mapping_error:
pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client);
+add_mapping_error_nowarn:
if (skb)
dev_kfree_skb(skb);
if (nlmsg_request)
@@ -194,13 +221,17 @@ add_mapping_error:
return ret;
}
-/*
- * iwpm_add_and_query_mapping - Send a netlink add and query
- * mapping message to the port mapper
+/**
+ * iwpm_add_and_query_mapping - Process the port mapper response to
+ * iwpm_add_and_query_mapping request
+ * @pm_msg: Contains the local ip/tcp address info to send
+ * @nl_client: The index of the netlink client
+ *
* nlmsg attributes:
* [IWPM_NLA_QUERY_MAPPING_SEQ]
* [IWPM_NLA_QUERY_LOCAL_ADDR]
* [IWPM_NLA_QUERY_REMOTE_ADDR]
+ * [IWPM_NLA_QUERY_FLAGS]
*/
int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
{
@@ -251,6 +282,18 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
if (ret)
goto query_mapping_error;
+ /* If flags are required and we're not V4, then return a quite error */
+ if (pm_msg->flags && iwpm_ulib_version == IWPM_UABI_VERSION_MIN) {
+ ret = -EINVAL;
+ goto query_mapping_error_nowarn;
+ }
+ if (iwpm_ulib_version > IWPM_UABI_VERSION_MIN) {
+ ret = ibnl_put_attr(skb, nlh, sizeof(u32), &pm_msg->flags,
+ IWPM_NLA_QUERY_FLAGS);
+ if (ret)
+ goto query_mapping_error;
+ }
+
nlmsg_end(skb, nlh);
nlmsg_request->req_buffer = pm_msg;
@@ -264,6 +307,7 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
return ret;
query_mapping_error:
pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client);
+query_mapping_error_nowarn:
if (skb)
dev_kfree_skb(skb);
if (nlmsg_request)
@@ -271,9 +315,13 @@ query_mapping_error:
return ret;
}
-/*
- * iwpm_remove_mapping - Send a netlink remove mapping message
- * to the port mapper
+/**
+ * iwpm_remove_mapping - Send a netlink remove mapping request
+ * to the userspace port mapper
+ *
+ * @local_addr: Local ip/tcp address to remove
+ * @nl_client: The index of the netlink client
+ *
* nlmsg attributes:
* [IWPM_NLA_MANAGE_MAPPING_SEQ]
* [IWPM_NLA_MANAGE_ADDR]
@@ -344,9 +392,14 @@ static const struct nla_policy resp_reg_policy[IWPM_NLA_RREG_PID_MAX] = {
[IWPM_NLA_RREG_PID_ERR] = { .type = NLA_U16 }
};
-/*
- * iwpm_register_pid_cb - Process a port mapper response to
- * iwpm_register_pid()
+/**
+ * iwpm_register_pid_cb - Process the port mapper response to
+ * iwpm_register_pid query
+ * @skb:
+ * @cb: Contains the received message (payload and netlink header)
+ *
+ * If successful, the function receives the userspace port mapper pid
+ * which is used in future communication with the port mapper
*/
int iwpm_register_pid_cb(struct sk_buff *skb, struct netlink_callback *cb)
{
@@ -379,7 +432,7 @@ int iwpm_register_pid_cb(struct sk_buff *skb, struct netlink_callback *cb)
/* check device name, ulib name and version */
if (strcmp(pm_msg->dev_name, dev_name) ||
strcmp(iwpm_ulib_name, iwpm_name) ||
- iwpm_version != iwpm_ulib_version) {
+ iwpm_version < IWPM_UABI_VERSION_MIN) {
pr_info("%s: Incorrect info (dev = %s name = %s version = %d)\n",
__func__, dev_name, iwpm_name, iwpm_version);
@@ -387,6 +440,10 @@ int iwpm_register_pid_cb(struct sk_buff *skb, struct netlink_callback *cb)
goto register_pid_response_exit;
}
iwpm_user_pid = cb->nlh->nlmsg_pid;
+ iwpm_ulib_version = iwpm_version;
+ if (iwpm_ulib_version < IWPM_UABI_VERSION)
+ pr_warn_once("%s: Down level iwpmd/pid %u. Continuing...",
+ __func__, iwpm_user_pid);
atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n",
__func__, iwpm_user_pid);
@@ -403,15 +460,19 @@ register_pid_response_exit:
/* netlink attribute policy for the received response to add mapping request */
static const struct nla_policy resp_add_policy[IWPM_NLA_RMANAGE_MAPPING_MAX] = {
- [IWPM_NLA_MANAGE_MAPPING_SEQ] = { .type = NLA_U32 },
- [IWPM_NLA_MANAGE_ADDR] = { .len = sizeof(struct sockaddr_storage) },
- [IWPM_NLA_MANAGE_MAPPED_LOC_ADDR] = { .len = sizeof(struct sockaddr_storage) },
- [IWPM_NLA_RMANAGE_MAPPING_ERR] = { .type = NLA_U16 }
+ [IWPM_NLA_RMANAGE_MAPPING_SEQ] = { .type = NLA_U32 },
+ [IWPM_NLA_RMANAGE_ADDR] = {
+ .len = sizeof(struct sockaddr_storage) },
+ [IWPM_NLA_RMANAGE_MAPPED_LOC_ADDR] = {
+ .len = sizeof(struct sockaddr_storage) },
+ [IWPM_NLA_RMANAGE_MAPPING_ERR] = { .type = NLA_U16 }
};
-/*
- * iwpm_add_mapping_cb - Process a port mapper response to
- * iwpm_add_mapping()
+/**
+ * iwpm_add_mapping_cb - Process the port mapper response to
+ * iwpm_add_mapping request
+ * @skb:
+ * @cb: Contains the received message (payload and netlink header)
*/
int iwpm_add_mapping_cb(struct sk_buff *skb, struct netlink_callback *cb)
{
@@ -430,7 +491,7 @@ int iwpm_add_mapping_cb(struct sk_buff *skb, struct netlink_callback *cb)
atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
- msg_seq = nla_get_u32(nltb[IWPM_NLA_MANAGE_MAPPING_SEQ]);
+ msg_seq = nla_get_u32(nltb[IWPM_NLA_RMANAGE_MAPPING_SEQ]);
nlmsg_request = iwpm_find_nlmsg_request(msg_seq);
if (!nlmsg_request) {
pr_info("%s: Could not find a matching request (seq = %u)\n",
@@ -439,9 +500,9 @@ int iwpm_add_mapping_cb(struct sk_buff *skb, struct netlink_callback *cb)
}
pm_msg = nlmsg_request->req_buffer;
local_sockaddr = (struct sockaddr_storage *)
- nla_data(nltb[IWPM_NLA_MANAGE_ADDR]);
+ nla_data(nltb[IWPM_NLA_RMANAGE_ADDR]);
mapped_sockaddr = (struct sockaddr_storage *)
- nla_data(nltb[IWPM_NLA_MANAGE_MAPPED_LOC_ADDR]);
+ nla_data(nltb[IWPM_NLA_RMANAGE_MAPPED_LOC_ADDR]);
if (iwpm_compare_sockaddr(local_sockaddr, &pm_msg->loc_addr)) {
nlmsg_request->err_code = IWPM_USER_LIB_INFO_ERR;
@@ -472,17 +533,23 @@ add_mapping_response_exit:
/* netlink attribute policy for the response to add and query mapping request
* and response with remote address info */
static const struct nla_policy resp_query_policy[IWPM_NLA_RQUERY_MAPPING_MAX] = {
- [IWPM_NLA_QUERY_MAPPING_SEQ] = { .type = NLA_U32 },
- [IWPM_NLA_QUERY_LOCAL_ADDR] = { .len = sizeof(struct sockaddr_storage) },
- [IWPM_NLA_QUERY_REMOTE_ADDR] = { .len = sizeof(struct sockaddr_storage) },
- [IWPM_NLA_RQUERY_MAPPED_LOC_ADDR] = { .len = sizeof(struct sockaddr_storage) },
- [IWPM_NLA_RQUERY_MAPPED_REM_ADDR] = { .len = sizeof(struct sockaddr_storage) },
+ [IWPM_NLA_RQUERY_MAPPING_SEQ] = { .type = NLA_U32 },
+ [IWPM_NLA_RQUERY_LOCAL_ADDR] = {
+ .len = sizeof(struct sockaddr_storage) },
+ [IWPM_NLA_RQUERY_REMOTE_ADDR] = {
+ .len = sizeof(struct sockaddr_storage) },
+ [IWPM_NLA_RQUERY_MAPPED_LOC_ADDR] = {
+ .len = sizeof(struct sockaddr_storage) },
+ [IWPM_NLA_RQUERY_MAPPED_REM_ADDR] = {
+ .len = sizeof(struct sockaddr_storage) },
[IWPM_NLA_RQUERY_MAPPING_ERR] = { .type = NLA_U16 }
};
-/*
- * iwpm_add_and_query_mapping_cb - Process a port mapper response to
- * iwpm_add_and_query_mapping()
+/**
+ * iwpm_add_and_query_mapping_cb - Process the port mapper response to
+ * iwpm_add_and_query_mapping request
+ * @skb:
+ * @cb: Contains the received message (payload and netlink header)
*/
int iwpm_add_and_query_mapping_cb(struct sk_buff *skb,
struct netlink_callback *cb)
@@ -502,7 +569,7 @@ int iwpm_add_and_query_mapping_cb(struct sk_buff *skb,
return -EINVAL;
atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
- msg_seq = nla_get_u32(nltb[IWPM_NLA_QUERY_MAPPING_SEQ]);
+ msg_seq = nla_get_u32(nltb[IWPM_NLA_RQUERY_MAPPING_SEQ]);
nlmsg_request = iwpm_find_nlmsg_request(msg_seq);
if (!nlmsg_request) {
pr_info("%s: Could not find a matching request (seq = %u)\n",
@@ -511,9 +578,9 @@ int iwpm_add_and_query_mapping_cb(struct sk_buff *skb,
}
pm_msg = nlmsg_request->req_buffer;
local_sockaddr = (struct sockaddr_storage *)
- nla_data(nltb[IWPM_NLA_QUERY_LOCAL_ADDR]);
+ nla_data(nltb[IWPM_NLA_RQUERY_LOCAL_ADDR]);
remote_sockaddr = (struct sockaddr_storage *)
- nla_data(nltb[IWPM_NLA_QUERY_REMOTE_ADDR]);
+ nla_data(nltb[IWPM_NLA_RQUERY_REMOTE_ADDR]);
mapped_loc_sockaddr = (struct sockaddr_storage *)
nla_data(nltb[IWPM_NLA_RQUERY_MAPPED_LOC_ADDR]);
mapped_rem_sockaddr = (struct sockaddr_storage *)
@@ -560,9 +627,13 @@ query_mapping_response_exit:
return 0;
}
-/*
- * iwpm_remote_info_cb - Process a port mapper message, containing
- * the remote connecting peer address info
+/**
+ * iwpm_remote_info_cb - Process remote connecting peer address info, which
+ * the port mapper has received from the connecting peer
+ * @skb:
+ * @cb: Contains the received message (payload and netlink header)
+ *
+ * Stores the IPv4/IPv6 address info in a hash table
*/
int iwpm_remote_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
{
@@ -588,9 +659,9 @@ int iwpm_remote_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
local_sockaddr = (struct sockaddr_storage *)
- nla_data(nltb[IWPM_NLA_QUERY_LOCAL_ADDR]);
+ nla_data(nltb[IWPM_NLA_RQUERY_LOCAL_ADDR]);
remote_sockaddr = (struct sockaddr_storage *)
- nla_data(nltb[IWPM_NLA_QUERY_REMOTE_ADDR]);
+ nla_data(nltb[IWPM_NLA_RQUERY_REMOTE_ADDR]);
mapped_loc_sockaddr = (struct sockaddr_storage *)
nla_data(nltb[IWPM_NLA_RQUERY_MAPPED_LOC_ADDR]);
mapped_rem_sockaddr = (struct sockaddr_storage *)
@@ -635,8 +706,14 @@ static const struct nla_policy resp_mapinfo_policy[IWPM_NLA_MAPINFO_REQ_MAX] = {
[IWPM_NLA_MAPINFO_ULIB_VER] = { .type = NLA_U16 }
};
-/*
- * iwpm_mapping_info_cb - Process a port mapper request for mapping info
+/**
+ * iwpm_mapping_info_cb - Process a notification that the userspace
+ * port mapper daemon is started
+ * @skb:
+ * @cb: Contains the received message (payload and netlink header)
+ *
+ * Using the received port mapper pid, send all the local mapping
+ * info records to the userspace port mapper
*/
int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
{
@@ -655,7 +732,7 @@ int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
iwpm_name = (char *)nla_data(nltb[IWPM_NLA_MAPINFO_ULIB_NAME]);
iwpm_version = nla_get_u16(nltb[IWPM_NLA_MAPINFO_ULIB_VER]);
if (strcmp(iwpm_ulib_name, iwpm_name) ||
- iwpm_version != iwpm_ulib_version) {
+ iwpm_version < IWPM_UABI_VERSION_MIN) {
pr_info("%s: Invalid port mapper name = %s version = %d\n",
__func__, iwpm_name, iwpm_version);
return ret;
@@ -669,6 +746,11 @@ int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
iwpm_set_registration(nl_client, IWPM_REG_INCOMPL);
atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
iwpm_user_pid = cb->nlh->nlmsg_pid;
+
+ if (iwpm_ulib_version < IWPM_UABI_VERSION)
+ pr_warn_once("%s: Down level iwpmd/pid %u. Continuing...",
+ __func__, iwpm_user_pid);
+
if (!iwpm_mapinfo_available())
return 0;
pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n",
@@ -684,9 +766,11 @@ static const struct nla_policy ack_mapinfo_policy[IWPM_NLA_MAPINFO_NUM_MAX] = {
[IWPM_NLA_MAPINFO_ACK_NUM] = { .type = NLA_U32 }
};
-/*
- * iwpm_ack_mapping_info_cb - Process a port mapper ack for
- * the provided mapping info records
+/**
+ * iwpm_ack_mapping_info_cb - Process the port mapper ack for
+ * the provided local mapping info records
+ * @skb:
+ * @cb: Contains the received message (payload and netlink header)
*/
int iwpm_ack_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
{
@@ -712,8 +796,11 @@ static const struct nla_policy map_error_policy[IWPM_NLA_ERR_MAX] = {
[IWPM_NLA_ERR_CODE] = { .type = NLA_U16 },
};
-/*
- * iwpm_mapping_error_cb - Process a port mapper error message
+/**
+ * iwpm_mapping_error_cb - Process port mapper notification for error
+ *
+ * @skb:
+ * @cb: Contains the received message (payload and netlink header)
*/
int iwpm_mapping_error_cb(struct sk_buff *skb, struct netlink_callback *cb)
{
@@ -748,3 +835,46 @@ int iwpm_mapping_error_cb(struct sk_buff *skb, struct netlink_callback *cb)
up(&nlmsg_request->sem);
return 0;
}
+
+/* netlink attribute policy for the received hello request */
+static const struct nla_policy hello_policy[IWPM_NLA_HELLO_MAX] = {
+ [IWPM_NLA_HELLO_ABI_VERSION] = { .type = NLA_U16 }
+};
+
+/**
+ * iwpm_hello_cb - Process a hello message from iwpmd
+ *
+ * @skb:
+ * @cb: Contains the received message (payload and netlink header)
+ *
+ * Using the received port mapper pid, send the kernel's abi_version
+ * after adjusting it to support the iwpmd version.
+ */
+int iwpm_hello_cb(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct nlattr *nltb[IWPM_NLA_HELLO_MAX];
+ const char *msg_type = "Hello request";
+ u8 nl_client;
+ u16 abi_version;
+ int ret = -EINVAL;
+
+ if (iwpm_parse_nlmsg(cb, IWPM_NLA_HELLO_MAX, hello_policy, nltb,
+ msg_type)) {
+ pr_info("%s: Unable to parse nlmsg\n", __func__);
+ return ret;
+ }
+ abi_version = nla_get_u16(nltb[IWPM_NLA_HELLO_ABI_VERSION]);
+ nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type);
+ if (!iwpm_valid_client(nl_client)) {
+ pr_info("%s: Invalid port mapper client = %d\n",
+ __func__, nl_client);
+ return ret;
+ }
+ iwpm_set_registration(nl_client, IWPM_REG_INCOMPL);
+ atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
+ iwpm_ulib_version = min_t(u16, IWPM_UABI_VERSION, abi_version);
+ pr_debug("Using ABI version %u\n", iwpm_ulib_version);
+ iwpm_user_pid = cb->nlh->nlmsg_pid;
+ ret = iwpm_send_hello(nl_client, iwpm_user_pid, iwpm_ulib_version);
+ return ret;
+}
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index cdb63f3f4de7..a5d2a20ee697 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -51,6 +51,12 @@ static DEFINE_SPINLOCK(iwpm_reminfo_lock);
static DEFINE_MUTEX(iwpm_admin_lock);
static struct iwpm_admin_data iwpm_admin;
+/**
+ * iwpm_init - Allocate resources for the iwarp port mapper
+ * @nl_client: The index of the netlink client
+ *
+ * Should be called when network interface goes up.
+ */
int iwpm_init(u8 nl_client)
{
int ret = 0;
@@ -87,6 +93,12 @@ init_exit:
static void free_hash_bucket(void);
static void free_reminfo_bucket(void);
+/**
+ * iwpm_exit - Deallocate resources for the iwarp port mapper
+ * @nl_client: The index of the netlink client
+ *
+ * Should be called when network interface goes down.
+ */
int iwpm_exit(u8 nl_client)
{
@@ -112,9 +124,17 @@ int iwpm_exit(u8 nl_client)
static struct hlist_head *get_mapinfo_hash_bucket(struct sockaddr_storage *,
struct sockaddr_storage *);
+/**
+ * iwpm_create_mapinfo - Store local and mapped IPv4/IPv6 address
+ * info in a hash table
+ * @local_addr: Local ip/tcp address
+ * @mapped_addr: Mapped local ip/tcp address
+ * @nl_client: The index of the netlink client
+ * @map_flags: IWPM mapping flags
+ */
int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
struct sockaddr_storage *mapped_sockaddr,
- u8 nl_client)
+ u8 nl_client, u32 map_flags)
{
struct hlist_head *hash_bucket_head = NULL;
struct iwpm_mapping_info *map_info;
@@ -132,6 +152,7 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
memcpy(&map_info->mapped_sockaddr, mapped_sockaddr,
sizeof(struct sockaddr_storage));
map_info->nl_client = nl_client;
+ map_info->map_flags = map_flags;
spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
if (iwpm_hash_bucket) {
@@ -150,6 +171,15 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
return ret;
}
+/**
+ * iwpm_remove_mapinfo - Remove local and mapped IPv4/IPv6 address
+ * info from the hash table
+ * @local_addr: Local ip/tcp address
+ * @mapped_local_addr: Mapped local ip/tcp address
+ *
+ * Returns err code if mapping info is not found in the hash table,
+ * otherwise returns 0
+ */
int iwpm_remove_mapinfo(struct sockaddr_storage *local_sockaddr,
struct sockaddr_storage *mapped_local_addr)
{
@@ -250,6 +280,17 @@ void iwpm_add_remote_info(struct iwpm_remote_info *rem_info)
spin_unlock_irqrestore(&iwpm_reminfo_lock, flags);
}
+/**
+ * iwpm_get_remote_info - Get the remote connecting peer address info
+ *
+ * @mapped_loc_addr: Mapped local address of the listening peer
+ * @mapped_rem_addr: Mapped remote address of the connecting peer
+ * @remote_addr: To store the remote address of the connecting peer
+ * @nl_client: The index of the netlink client
+ *
+ * The remote address info is retrieved and provided to the client in
+ * the remote_addr. After that it is removed from the hash table
+ */
int iwpm_get_remote_info(struct sockaddr_storage *mapped_loc_addr,
struct sockaddr_storage *mapped_rem_addr,
struct sockaddr_storage *remote_addr,
@@ -686,6 +727,14 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid)
if (ret)
goto send_mapping_info_unlock;
+ if (iwpm_ulib_version > IWPM_UABI_VERSION_MIN) {
+ ret = ibnl_put_attr(skb, nlh, sizeof(u32),
+ &map_info->map_flags,
+ IWPM_NLA_MAPINFO_FLAGS);
+ if (ret)
+ goto send_mapping_info_unlock;
+ }
+
nlmsg_end(skb, nlh);
iwpm_print_sockaddr(&map_info->local_sockaddr,
@@ -754,3 +803,38 @@ int iwpm_mapinfo_available(void)
spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
return full_bucket;
}
+
+int iwpm_send_hello(u8 nl_client, int iwpm_pid, u16 abi_version)
+{
+ struct sk_buff *skb = NULL;
+ struct nlmsghdr *nlh;
+ const char *err_str = "";
+ int ret = -EINVAL;
+
+ skb = iwpm_create_nlmsg(RDMA_NL_IWPM_HELLO, &nlh, nl_client);
+ if (!skb) {
+ err_str = "Unable to create a nlmsg";
+ goto hello_num_error;
+ }
+ nlh->nlmsg_seq = iwpm_get_nlmsg_seq();
+ err_str = "Unable to put attribute of abi_version into nlmsg";
+ ret = ibnl_put_attr(skb, nlh, sizeof(u16), &abi_version,
+ IWPM_NLA_HELLO_ABI_VERSION);
+ if (ret)
+ goto hello_num_error;
+ nlmsg_end(skb, nlh);
+
+ ret = rdma_nl_unicast(skb, iwpm_pid);
+ if (ret) {
+ skb = NULL;
+ err_str = "Unable to send a nlmsg";
+ goto hello_num_error;
+ }
+ pr_debug("%s: Sent hello abi_version = %u\n", __func__, abi_version);
+ return 0;
+hello_num_error:
+ pr_info("%s: %s\n", __func__, err_str);
+ if (skb)
+ dev_kfree_skb(skb);
+ return ret;
+}
diff --git a/drivers/infiniband/core/iwpm_util.h b/drivers/infiniband/core/iwpm_util.h
index af1fc14a0d3d..7e2bcc72f66c 100644
--- a/drivers/infiniband/core/iwpm_util.h
+++ b/drivers/infiniband/core/iwpm_util.h
@@ -78,6 +78,7 @@ struct iwpm_mapping_info {
struct sockaddr_storage local_sockaddr;
struct sockaddr_storage mapped_sockaddr;
u8 nl_client;
+ u32 map_flags;
};
struct iwpm_remote_info {
@@ -266,4 +267,15 @@ int iwpm_parse_nlmsg(struct netlink_callback *cb, int policy_max,
* @msg: Message to print
*/
void iwpm_print_sockaddr(struct sockaddr_storage *sockaddr, char *msg);
+
+/**
+ * iwpm_send_hello - Send hello response to iwpmd
+ *
+ * @nl_client: The index of the netlink client
+ * @abi_version: The kernel's abi_version
+ *
+ * Returns 0 on success or a negative error code
+ */
+int iwpm_send_hello(u8 nl_client, int iwpm_pid, u16 abi_version);
+extern u16 iwpm_ulib_version;
#endif
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 7870823bac47..e742a6a2c138 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -3326,9 +3326,9 @@ error:
static void ib_mad_remove_device(struct ib_device *device, void *client_data)
{
- int i;
+ unsigned int i;
- for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
+ rdma_for_each_port (device, i) {
if (!rdma_cap_ib_mad(device, i))
continue;
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index 724f5a62e82f..eecfc0b377c9 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -56,7 +56,6 @@ EXPORT_SYMBOL(rdma_nl_chk_listeners);
static bool is_nl_msg_valid(unsigned int type, unsigned int op)
{
static const unsigned int max_num_ops[RDMA_NL_NUM_CLIENTS] = {
- [RDMA_NL_RDMA_CM] = RDMA_NL_RDMA_CM_NUM_OPS,
[RDMA_NL_IWCM] = RDMA_NL_IWPM_NUM_OPS,
[RDMA_NL_LS] = RDMA_NL_LS_NUM_OPS,
[RDMA_NL_NLDEV] = RDMA_NLDEV_NUM_OPS,
@@ -181,8 +180,7 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
return -EINVAL;
}
/* FIXME: Convert IWCM to properly handle doit callbacks */
- if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_RDMA_CM ||
- index == RDMA_NL_IWCM) {
+ if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_IWCM) {
struct netlink_dump_control c = {
.dump = cb_table[op].dump,
};
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 3c97a8b6bf1e..11ed58d3fce5 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -33,12 +33,14 @@
#include <linux/module.h>
#include <linux/pid.h>
#include <linux/pid_namespace.h>
+#include <linux/mutex.h>
#include <net/netlink.h>
#include <rdma/rdma_cm.h>
#include <rdma/rdma_netlink.h>
#include "core_priv.h"
#include "cma_priv.h"
+#include "restrack.h"
static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
[RDMA_NLDEV_ATTR_DEV_INDEX] = { .type = NLA_U32 },
@@ -107,6 +109,13 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
[RDMA_NLDEV_ATTR_DRIVER_U32] = { .type = NLA_U32 },
[RDMA_NLDEV_ATTR_DRIVER_S64] = { .type = NLA_S64 },
[RDMA_NLDEV_ATTR_DRIVER_U64] = { .type = NLA_U64 },
+ [RDMA_NLDEV_ATTR_RES_PDN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_CQN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_MRN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_CM_IDN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_CTXN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_LINK_TYPE] = { .type = NLA_NUL_STRING,
+ .len = RDMA_NLDEV_ATTR_ENTRY_STRLEN },
};
static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
@@ -262,9 +271,7 @@ static int fill_port_info(struct sk_buff *msg,
if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state))
return -EMSGSIZE;
- if (device->ops.get_netdev)
- netdev = device->ops.get_netdev(device, port);
-
+ netdev = ib_device_get_netdev(device, port);
if (netdev && net_eq(dev_net(netdev), net)) {
ret = nla_put_u32(msg,
RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex);
@@ -314,7 +321,6 @@ static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
[RDMA_RESTRACK_CTX] = "ctx",
};
- struct rdma_restrack_root *res = &device->res;
struct nlattr *table_attr;
int ret, i, curr;
@@ -328,7 +334,8 @@ static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
if (!names[i])
continue;
- curr = rdma_restrack_count(res, i, task_active_pid_ns(current));
+ curr = rdma_restrack_count(device, i,
+ task_active_pid_ns(current));
ret = fill_res_info_entry(msg, names[i], curr);
if (ret)
goto err;
@@ -361,13 +368,20 @@ static int fill_res_name_pid(struct sk_buff *msg,
return 0;
}
-static int fill_res_qp_entry(struct sk_buff *msg, struct netlink_callback *cb,
+static bool fill_res_entry(struct ib_device *dev, struct sk_buff *msg,
+ struct rdma_restrack_entry *res)
+{
+ if (!dev->ops.fill_res_entry)
+ return false;
+ return dev->ops.fill_res_entry(msg, res);
+}
+
+static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin,
struct rdma_restrack_entry *res, uint32_t port)
{
struct ib_qp *qp = container_of(res, struct ib_qp, res);
- struct rdma_restrack_root *resroot = &qp->device->res;
+ struct ib_device *dev = qp->device;
struct ib_qp_init_attr qp_init_attr;
- struct nlattr *entry_attr;
struct ib_qp_attr qp_attr;
int ret;
@@ -376,11 +390,7 @@ static int fill_res_qp_entry(struct sk_buff *msg, struct netlink_callback *cb,
return ret;
if (port && port != qp_attr.port_num)
- return 0;
-
- entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY);
- if (!entry_attr)
- goto out;
+ return -EAGAIN;
/* In create_qp() port is not set yet */
if (qp_attr.port_num &&
@@ -412,38 +422,32 @@ static int fill_res_qp_entry(struct sk_buff *msg, struct netlink_callback *cb,
if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state))
goto err;
+ if (!rdma_is_kernel_res(res) &&
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, qp->pd->res.id))
+ goto err;
+
if (fill_res_name_pid(msg, res))
goto err;
- if (resroot->fill_res_entry(msg, res))
+ if (fill_res_entry(dev, msg, res))
goto err;
- nla_nest_end(msg, entry_attr);
return 0;
-err:
- nla_nest_cancel(msg, entry_attr);
-out:
- return -EMSGSIZE;
+err: return -EMSGSIZE;
}
-static int fill_res_cm_id_entry(struct sk_buff *msg,
- struct netlink_callback *cb,
+static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin,
struct rdma_restrack_entry *res, uint32_t port)
{
struct rdma_id_private *id_priv =
container_of(res, struct rdma_id_private, res);
- struct rdma_restrack_root *resroot = &id_priv->id.device->res;
+ struct ib_device *dev = id_priv->id.device;
struct rdma_cm_id *cm_id = &id_priv->id;
- struct nlattr *entry_attr;
if (port && port != cm_id->port_num)
return 0;
- entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY);
- if (!entry_attr)
- goto out;
-
if (cm_id->port_num &&
nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num))
goto err;
@@ -472,31 +476,25 @@ static int fill_res_cm_id_entry(struct sk_buff *msg,
&cm_id->route.addr.dst_addr))
goto err;
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CM_IDN, res->id))
+ goto err;
+
if (fill_res_name_pid(msg, res))
goto err;
- if (resroot->fill_res_entry(msg, res))
+ if (fill_res_entry(dev, msg, res))
goto err;
- nla_nest_end(msg, entry_attr);
return 0;
-err:
- nla_nest_cancel(msg, entry_attr);
-out:
- return -EMSGSIZE;
+err: return -EMSGSIZE;
}
-static int fill_res_cq_entry(struct sk_buff *msg, struct netlink_callback *cb,
+static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin,
struct rdma_restrack_entry *res, uint32_t port)
{
struct ib_cq *cq = container_of(res, struct ib_cq, res);
- struct rdma_restrack_root *resroot = &cq->device->res;
- struct nlattr *entry_attr;
-
- entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_CQ_ENTRY);
- if (!entry_attr)
- goto out;
+ struct ib_device *dev = cq->device;
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe))
goto err;
@@ -509,33 +507,31 @@ static int fill_res_cq_entry(struct sk_buff *msg, struct netlink_callback *cb,
nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx))
goto err;
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, res->id))
+ goto err;
+ if (!rdma_is_kernel_res(res) &&
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
+ cq->uobject->context->res.id))
+ goto err;
+
if (fill_res_name_pid(msg, res))
goto err;
- if (resroot->fill_res_entry(msg, res))
+ if (fill_res_entry(dev, msg, res))
goto err;
- nla_nest_end(msg, entry_attr);
return 0;
-err:
- nla_nest_cancel(msg, entry_attr);
-out:
- return -EMSGSIZE;
+err: return -EMSGSIZE;
}
-static int fill_res_mr_entry(struct sk_buff *msg, struct netlink_callback *cb,
+static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin,
struct rdma_restrack_entry *res, uint32_t port)
{
struct ib_mr *mr = container_of(res, struct ib_mr, res);
- struct rdma_restrack_root *resroot = &mr->pd->device->res;
- struct nlattr *entry_attr;
-
- entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_MR_ENTRY);
- if (!entry_attr)
- goto out;
+ struct ib_device *dev = mr->pd->device;
- if (netlink_capable(cb->skb, CAP_NET_ADMIN)) {
+ if (has_cap_net_admin) {
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey))
goto err;
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey))
@@ -546,33 +542,31 @@ static int fill_res_mr_entry(struct sk_buff *msg, struct netlink_callback *cb,
RDMA_NLDEV_ATTR_PAD))
goto err;
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id))
+ goto err;
+
+ if (!rdma_is_kernel_res(res) &&
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, mr->pd->res.id))
+ goto err;
+
if (fill_res_name_pid(msg, res))
goto err;
- if (resroot->fill_res_entry(msg, res))
+ if (fill_res_entry(dev, msg, res))
goto err;
- nla_nest_end(msg, entry_attr);
return 0;
-err:
- nla_nest_cancel(msg, entry_attr);
-out:
- return -EMSGSIZE;
+err: return -EMSGSIZE;
}
-static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb,
+static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin,
struct rdma_restrack_entry *res, uint32_t port)
{
struct ib_pd *pd = container_of(res, struct ib_pd, res);
- struct rdma_restrack_root *resroot = &pd->device->res;
- struct nlattr *entry_attr;
+ struct ib_device *dev = pd->device;
- entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_PD_ENTRY);
- if (!entry_attr)
- goto out;
-
- if (netlink_capable(cb->skb, CAP_NET_ADMIN)) {
+ if (has_cap_net_admin) {
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY,
pd->local_dma_lkey))
goto err;
@@ -585,19 +579,23 @@ static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb,
atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
goto err;
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, res->id))
+ goto err;
+
+ if (!rdma_is_kernel_res(res) &&
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
+ pd->uobject->context->res.id))
+ goto err;
+
if (fill_res_name_pid(msg, res))
goto err;
- if (resroot->fill_res_entry(msg, res))
+ if (fill_res_entry(dev, msg, res))
goto err;
- nla_nest_end(msg, entry_attr);
return 0;
-err:
- nla_nest_cancel(msg, entry_attr);
-out:
- return -EMSGSIZE;
+err: return -EMSGSIZE;
}
static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -777,7 +775,7 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
u32 idx = 0;
u32 ifindex;
int err;
- u32 p;
+ unsigned int p;
err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
nldev_policy, NULL);
@@ -789,7 +787,7 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
if (!device)
return -EINVAL;
- for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
+ rdma_for_each_port (device, p) {
/*
* The dumpit function returns all information from specific
* index. This specific index is taken from the netlink
@@ -905,10 +903,17 @@ static int nldev_res_get_dumpit(struct sk_buff *skb,
}
struct nldev_fill_res_entry {
- int (*fill_res_func)(struct sk_buff *msg, struct netlink_callback *cb,
+ int (*fill_res_func)(struct sk_buff *msg, bool has_cap_net_admin,
struct rdma_restrack_entry *res, u32 port);
enum rdma_nldev_attr nldev_attr;
enum rdma_nldev_command nldev_cmd;
+ u8 flags;
+ u32 entry;
+ u32 id;
+};
+
+enum nldev_res_flags {
+ NLDEV_PER_DEV = 1 << 0,
};
static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
@@ -916,29 +921,136 @@ static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
.fill_res_func = fill_res_qp_entry,
.nldev_cmd = RDMA_NLDEV_CMD_RES_QP_GET,
.nldev_attr = RDMA_NLDEV_ATTR_RES_QP,
+ .entry = RDMA_NLDEV_ATTR_RES_QP_ENTRY,
+ .id = RDMA_NLDEV_ATTR_RES_LQPN,
},
[RDMA_RESTRACK_CM_ID] = {
.fill_res_func = fill_res_cm_id_entry,
.nldev_cmd = RDMA_NLDEV_CMD_RES_CM_ID_GET,
.nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID,
+ .entry = RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY,
+ .id = RDMA_NLDEV_ATTR_RES_CM_IDN,
},
[RDMA_RESTRACK_CQ] = {
.fill_res_func = fill_res_cq_entry,
.nldev_cmd = RDMA_NLDEV_CMD_RES_CQ_GET,
.nldev_attr = RDMA_NLDEV_ATTR_RES_CQ,
+ .flags = NLDEV_PER_DEV,
+ .entry = RDMA_NLDEV_ATTR_RES_CQ_ENTRY,
+ .id = RDMA_NLDEV_ATTR_RES_CQN,
},
[RDMA_RESTRACK_MR] = {
.fill_res_func = fill_res_mr_entry,
.nldev_cmd = RDMA_NLDEV_CMD_RES_MR_GET,
.nldev_attr = RDMA_NLDEV_ATTR_RES_MR,
+ .flags = NLDEV_PER_DEV,
+ .entry = RDMA_NLDEV_ATTR_RES_MR_ENTRY,
+ .id = RDMA_NLDEV_ATTR_RES_MRN,
},
[RDMA_RESTRACK_PD] = {
.fill_res_func = fill_res_pd_entry,
.nldev_cmd = RDMA_NLDEV_CMD_RES_PD_GET,
.nldev_attr = RDMA_NLDEV_ATTR_RES_PD,
+ .flags = NLDEV_PER_DEV,
+ .entry = RDMA_NLDEV_ATTR_RES_PD_ENTRY,
+ .id = RDMA_NLDEV_ATTR_RES_PDN,
},
};
+static bool is_visible_in_pid_ns(struct rdma_restrack_entry *res)
+{
+ /*
+ * 1. Kern resources should be visible in init name space only
+ * 2. Present only resources visible in the current namespace
+ */
+ if (rdma_is_kernel_res(res))
+ return task_active_pid_ns(current) == &init_pid_ns;
+ return task_active_pid_ns(current) == task_active_pid_ns(res->task);
+}
+
+static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack,
+ enum rdma_restrack_type res_type)
+{
+ const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ struct rdma_restrack_entry *res;
+ struct ib_device *device;
+ u32 index, id, port = 0;
+ bool has_cap_net_admin;
+ struct sk_buff *msg;
+ int ret;
+
+ ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
+ if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id])
+ return -EINVAL;
+
+ index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ device = ib_device_get_by_index(index);
+ if (!device)
+ return -EINVAL;
+
+ if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
+ port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
+ if (!rdma_is_port_valid(device, port)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ if ((port && fe->flags & NLDEV_PER_DEV) ||
+ (!port && ~fe->flags & NLDEV_PER_DEV)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ id = nla_get_u32(tb[fe->id]);
+ res = rdma_restrack_get_byid(device, res_type, id);
+ if (IS_ERR(res)) {
+ ret = PTR_ERR(res);
+ goto err;
+ }
+
+ if (!is_visible_in_pid_ns(res)) {
+ ret = -ENOENT;
+ goto err_get;
+ }
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, fe->nldev_cmd),
+ 0, 0);
+
+ if (fill_nldev_handle(msg, device)) {
+ ret = -EMSGSIZE;
+ goto err_free;
+ }
+
+ has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN);
+ ret = fe->fill_res_func(msg, has_cap_net_admin, res, port);
+ rdma_restrack_put(res);
+ if (ret)
+ goto err_free;
+
+ nlmsg_end(msg, nlh);
+ ib_device_put(device);
+ return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+
+err_free:
+ nlmsg_free(msg);
+err_get:
+ rdma_restrack_put(res);
+err:
+ ib_device_put(device);
+ return ret;
+}
+
static int res_get_common_dumpit(struct sk_buff *skb,
struct netlink_callback *cb,
enum rdma_restrack_type res_type)
@@ -946,11 +1058,15 @@ static int res_get_common_dumpit(struct sk_buff *skb,
const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
struct rdma_restrack_entry *res;
+ struct rdma_restrack_root *rt;
int err, ret = 0, idx = 0;
struct nlattr *table_attr;
+ struct nlattr *entry_attr;
struct ib_device *device;
int start = cb->args[0];
+ bool has_cap_net_admin;
struct nlmsghdr *nlh;
+ unsigned long id;
u32 index, port = 0;
bool filled = false;
@@ -998,55 +1114,51 @@ static int res_get_common_dumpit(struct sk_buff *skb,
goto err;
}
- down_read(&device->res.rwsem);
- hash_for_each_possible(device->res.hash, res, node, res_type) {
- if (idx < start)
- goto next;
+ has_cap_net_admin = netlink_capable(cb->skb, CAP_NET_ADMIN);
- if ((rdma_is_kernel_res(res) &&
- task_active_pid_ns(current) != &init_pid_ns) ||
- (!rdma_is_kernel_res(res) && task_active_pid_ns(current) !=
- task_active_pid_ns(res->task)))
- /*
- * 1. Kern resources should be visible in init
- * namspace only
- * 2. Present only resources visible in the current
- * namespace
- */
- goto next;
+ rt = &device->res[res_type];
+ xa_lock(&rt->xa);
+ /*
+ * FIXME: if the skip ahead is something common this loop should
+ * use xas_for_each & xas_pause to optimize, we can have a lot of
+ * objects.
+ */
+ xa_for_each(&rt->xa, id, res) {
+ if (!is_visible_in_pid_ns(res))
+ continue;
- if (!rdma_restrack_get(res))
- /*
- * Resource is under release now, but we are not
- * relesing lock now, so it will be released in
- * our next pass, once we will get ->next pointer.
- */
+ if (idx < start || !rdma_restrack_get(res))
goto next;
+ xa_unlock(&rt->xa);
+
filled = true;
- up_read(&device->res.rwsem);
- ret = fe->fill_res_func(skb, cb, res, port);
- down_read(&device->res.rwsem);
- /*
- * Return resource back, but it won't be released till
- * the &device->res.rwsem will be released for write.
- */
+ entry_attr = nla_nest_start(skb, fe->entry);
+ if (!entry_attr) {
+ ret = -EMSGSIZE;
+ rdma_restrack_put(res);
+ goto msg_full;
+ }
+
+ ret = fe->fill_res_func(skb, has_cap_net_admin, res, port);
rdma_restrack_put(res);
- if (ret == -EMSGSIZE)
- /*
- * There is a chance to optimize here.
- * It can be done by using list_prepare_entry
- * and list_for_each_entry_continue afterwards.
- */
- break;
- if (ret)
+ if (ret) {
+ nla_nest_cancel(skb, entry_attr);
+ if (ret == -EMSGSIZE)
+ goto msg_full;
+ if (ret == -EAGAIN)
+ goto again;
goto res_err;
+ }
+ nla_nest_end(skb, entry_attr);
+again: xa_lock(&rt->xa);
next: idx++;
}
- up_read(&device->res.rwsem);
+ xa_unlock(&rt->xa);
+msg_full:
nla_nest_end(skb, table_attr);
nlmsg_end(skb, nlh);
cb->args[0] = idx;
@@ -1063,7 +1175,6 @@ next: idx++;
res_err:
nla_nest_cancel(skb, table_attr);
- up_read(&device->res.rwsem);
err:
nlmsg_cancel(skb, nlh);
@@ -1073,34 +1184,132 @@ err_index:
return ret;
}
-static int nldev_res_get_qp_dumpit(struct sk_buff *skb,
- struct netlink_callback *cb)
+#define RES_GET_FUNCS(name, type) \
+ static int nldev_res_get_##name##_dumpit(struct sk_buff *skb, \
+ struct netlink_callback *cb) \
+ { \
+ return res_get_common_dumpit(skb, cb, type); \
+ } \
+ static int nldev_res_get_##name##_doit(struct sk_buff *skb, \
+ struct nlmsghdr *nlh, \
+ struct netlink_ext_ack *extack) \
+ { \
+ return res_get_common_doit(skb, nlh, extack, type); \
+ }
+
+RES_GET_FUNCS(qp, RDMA_RESTRACK_QP);
+RES_GET_FUNCS(cm_id, RDMA_RESTRACK_CM_ID);
+RES_GET_FUNCS(cq, RDMA_RESTRACK_CQ);
+RES_GET_FUNCS(pd, RDMA_RESTRACK_PD);
+RES_GET_FUNCS(mr, RDMA_RESTRACK_MR);
+
+static LIST_HEAD(link_ops);
+static DECLARE_RWSEM(link_ops_rwsem);
+
+static const struct rdma_link_ops *link_ops_get(const char *type)
{
- return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_QP);
+ const struct rdma_link_ops *ops;
+
+ list_for_each_entry(ops, &link_ops, list) {
+ if (!strcmp(ops->type, type))
+ goto out;
+ }
+ ops = NULL;
+out:
+ return ops;
}
-static int nldev_res_get_cm_id_dumpit(struct sk_buff *skb,
- struct netlink_callback *cb)
+void rdma_link_register(struct rdma_link_ops *ops)
{
- return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_CM_ID);
+ down_write(&link_ops_rwsem);
+ if (WARN_ON_ONCE(link_ops_get(ops->type)))
+ goto out;
+ list_add(&ops->list, &link_ops);
+out:
+ up_write(&link_ops_rwsem);
}
+EXPORT_SYMBOL(rdma_link_register);
-static int nldev_res_get_cq_dumpit(struct sk_buff *skb,
- struct netlink_callback *cb)
+void rdma_link_unregister(struct rdma_link_ops *ops)
{
- return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_CQ);
+ down_write(&link_ops_rwsem);
+ list_del(&ops->list);
+ up_write(&link_ops_rwsem);
}
+EXPORT_SYMBOL(rdma_link_unregister);
-static int nldev_res_get_mr_dumpit(struct sk_buff *skb,
- struct netlink_callback *cb)
+static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
{
- return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_MR);
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ char ibdev_name[IB_DEVICE_NAME_MAX];
+ const struct rdma_link_ops *ops;
+ char ndev_name[IFNAMSIZ];
+ struct net_device *ndev;
+ char type[IFNAMSIZ];
+ int err;
+
+ err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
+ if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] ||
+ !tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME])
+ return -EINVAL;
+
+ nla_strlcpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
+ sizeof(ibdev_name));
+ if (strchr(ibdev_name, '%'))
+ return -EINVAL;
+
+ nla_strlcpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type));
+ nla_strlcpy(ndev_name, tb[RDMA_NLDEV_ATTR_NDEV_NAME],
+ sizeof(ndev_name));
+
+ ndev = dev_get_by_name(&init_net, ndev_name);
+ if (!ndev)
+ return -ENODEV;
+
+ down_read(&link_ops_rwsem);
+ ops = link_ops_get(type);
+#ifdef CONFIG_MODULES
+ if (!ops) {
+ up_read(&link_ops_rwsem);
+ request_module("rdma-link-%s", type);
+ down_read(&link_ops_rwsem);
+ ops = link_ops_get(type);
+ }
+#endif
+ err = ops ? ops->newlink(ibdev_name, ndev) : -EINVAL;
+ up_read(&link_ops_rwsem);
+ dev_put(ndev);
+
+ return err;
}
-static int nldev_res_get_pd_dumpit(struct sk_buff *skb,
- struct netlink_callback *cb)
+static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
{
- return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_PD);
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ struct ib_device *device;
+ u32 index;
+ int err;
+
+ err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
+ if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
+ return -EINVAL;
+
+ index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ device = ib_device_get_by_index(index);
+ if (!device)
+ return -EINVAL;
+
+ if (!(device->attrs.device_cap_flags & IB_DEVICE_ALLOW_USER_UNREG)) {
+ ib_device_put(device);
+ return -EINVAL;
+ }
+
+ ib_unregister_device_and_put(device);
+ return 0;
}
static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
@@ -1112,6 +1321,14 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
.doit = nldev_set_doit,
.flags = RDMA_NL_ADMIN_PERM,
},
+ [RDMA_NLDEV_CMD_NEWLINK] = {
+ .doit = nldev_newlink,
+ .flags = RDMA_NL_ADMIN_PERM,
+ },
+ [RDMA_NLDEV_CMD_DELLINK] = {
+ .doit = nldev_dellink,
+ .flags = RDMA_NL_ADMIN_PERM,
+ },
[RDMA_NLDEV_CMD_PORT_GET] = {
.doit = nldev_port_get_doit,
.dump = nldev_port_get_dumpit,
@@ -1121,28 +1338,23 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
.dump = nldev_res_get_dumpit,
},
[RDMA_NLDEV_CMD_RES_QP_GET] = {
+ .doit = nldev_res_get_qp_doit,
.dump = nldev_res_get_qp_dumpit,
- /*
- * .doit is not implemented yet for two reasons:
- * 1. It is not needed yet.
- * 2. There is a need to provide identifier, while it is easy
- * for the QPs (device index + port index + LQPN), it is not
- * the case for the rest of resources (PD and CQ). Because it
- * is better to provide similar interface for all resources,
- * let's wait till we will have other resources implemented
- * too.
- */
},
[RDMA_NLDEV_CMD_RES_CM_ID_GET] = {
+ .doit = nldev_res_get_cm_id_doit,
.dump = nldev_res_get_cm_id_dumpit,
},
[RDMA_NLDEV_CMD_RES_CQ_GET] = {
+ .doit = nldev_res_get_cq_doit,
.dump = nldev_res_get_cq_dumpit,
},
[RDMA_NLDEV_CMD_RES_MR_GET] = {
+ .doit = nldev_res_get_mr_doit,
.dump = nldev_res_get_mr_dumpit,
},
[RDMA_NLDEV_CMD_RES_PD_GET] = {
+ .doit = nldev_res_get_pd_doit,
.dump = nldev_res_get_pd_dumpit,
},
};
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index 6c4747e61d2b..778375ff664e 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -438,6 +438,38 @@ free:
uverbs_uobject_put(uobj);
return ERR_PTR(ret);
}
+struct ib_uobject *_uobj_get_read(enum uverbs_default_objects type,
+ u32 object_id,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj;
+
+ uobj = rdma_lookup_get_uobject(uobj_get_type(attrs, type), attrs->ufile,
+ object_id, UVERBS_LOOKUP_READ);
+ if (IS_ERR(uobj))
+ return uobj;
+
+ attrs->context = uobj->context;
+
+ return uobj;
+}
+
+struct ib_uobject *_uobj_get_write(enum uverbs_default_objects type,
+ u32 object_id,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj;
+
+ uobj = rdma_lookup_get_uobject(uobj_get_type(attrs, type), attrs->ufile,
+ object_id, UVERBS_LOOKUP_WRITE);
+
+ if (IS_ERR(uobj))
+ return uobj;
+
+ attrs->context = uobj->context;
+
+ return uobj;
+}
static struct ib_uobject *
alloc_begin_idr_uobject(const struct uverbs_api_object *obj,
@@ -801,6 +833,7 @@ void uverbs_close_fd(struct file *f)
/* Pairs with filp->private_data in alloc_begin_fd_uobject */
uverbs_uobject_put(uobj);
}
+EXPORT_SYMBOL(uverbs_close_fd);
/*
* Drop the ucontext off the ufile and completely disconnect it from the
@@ -811,7 +844,6 @@ static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile,
{
struct ib_ucontext *ucontext = ufile->ucontext;
struct ib_device *ib_dev = ucontext->device;
- int ret;
/*
* If we are closing the FD then the user mmap VMAs must have
@@ -829,12 +861,8 @@ static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile,
rdma_restrack_del(&ucontext->res);
- /*
- * FIXME: Drivers are not permitted to fail dealloc_ucontext, remove
- * the error return.
- */
- ret = ib_dev->ops.dealloc_ucontext(ucontext);
- WARN_ON(ret);
+ ib_dev->ops.dealloc_ucontext(ucontext);
+ kfree(ucontext);
ufile->ucontext = NULL;
}
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index 46a5c553c624..3b5ff2f7b5f8 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -11,17 +11,29 @@
#include <linux/pid_namespace.h>
#include "cma_priv.h"
+#include "restrack.h"
-static int fill_res_noop(struct sk_buff *msg,
- struct rdma_restrack_entry *entry)
+/**
+ * rdma_restrack_init() - initialize and allocate resource tracking
+ * @dev: IB device
+ *
+ * Return: 0 on success
+ */
+int rdma_restrack_init(struct ib_device *dev)
{
- return 0;
-}
+ struct rdma_restrack_root *rt;
+ int i;
-void rdma_restrack_init(struct rdma_restrack_root *res)
-{
- init_rwsem(&res->rwsem);
- res->fill_res_entry = fill_res_noop;
+ dev->res = kcalloc(RDMA_RESTRACK_MAX, sizeof(*rt), GFP_KERNEL);
+ if (!dev->res)
+ return -ENOMEM;
+
+ rt = dev->res;
+
+ for (i = 0; i < RDMA_RESTRACK_MAX; i++)
+ xa_init_flags(&rt[i].xa, XA_FLAGS_ALLOC);
+
+ return 0;
}
static const char *type2str(enum rdma_restrack_type type)
@@ -38,55 +50,79 @@ static const char *type2str(enum rdma_restrack_type type)
return names[type];
};
-void rdma_restrack_clean(struct rdma_restrack_root *res)
+/**
+ * rdma_restrack_clean() - clean resource tracking
+ * @dev: IB device
+ */
+void rdma_restrack_clean(struct ib_device *dev)
{
+ struct rdma_restrack_root *rt = dev->res;
struct rdma_restrack_entry *e;
char buf[TASK_COMM_LEN];
- struct ib_device *dev;
+ bool found = false;
const char *owner;
- int bkt;
-
- if (hash_empty(res->hash))
- return;
-
- dev = container_of(res, struct ib_device, res);
- pr_err("restrack: %s", CUT_HERE);
- dev_err(&dev->dev, "BUG: RESTRACK detected leak of resources\n");
- hash_for_each(res->hash, bkt, e, node) {
- if (rdma_is_kernel_res(e)) {
- owner = e->kern_name;
- } else {
- /*
- * There is no need to call get_task_struct here,
- * because we can be here only if there are more
- * get_task_struct() call than put_task_struct().
- */
- get_task_comm(buf, e->task);
- owner = buf;
+ int i;
+
+ for (i = 0 ; i < RDMA_RESTRACK_MAX; i++) {
+ struct xarray *xa = &dev->res[i].xa;
+
+ if (!xa_empty(xa)) {
+ unsigned long index;
+
+ if (!found) {
+ pr_err("restrack: %s", CUT_HERE);
+ dev_err(&dev->dev, "BUG: RESTRACK detected leak of resources\n");
+ }
+ xa_for_each(xa, index, e) {
+ if (rdma_is_kernel_res(e)) {
+ owner = e->kern_name;
+ } else {
+ /*
+ * There is no need to call get_task_struct here,
+ * because we can be here only if there are more
+ * get_task_struct() call than put_task_struct().
+ */
+ get_task_comm(buf, e->task);
+ owner = buf;
+ }
+
+ pr_err("restrack: %s %s object allocated by %s is not freed\n",
+ rdma_is_kernel_res(e) ? "Kernel" :
+ "User",
+ type2str(e->type), owner);
+ }
+ found = true;
}
-
- pr_err("restrack: %s %s object allocated by %s is not freed\n",
- rdma_is_kernel_res(e) ? "Kernel" : "User",
- type2str(e->type), owner);
+ xa_destroy(xa);
}
- pr_err("restrack: %s", CUT_HERE);
+ if (found)
+ pr_err("restrack: %s", CUT_HERE);
+
+ kfree(rt);
}
-int rdma_restrack_count(struct rdma_restrack_root *res,
- enum rdma_restrack_type type,
+/**
+ * rdma_restrack_count() - the current usage of specific object
+ * @dev: IB device
+ * @type: actual type of object to operate
+ * @ns: PID namespace
+ */
+int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type,
struct pid_namespace *ns)
{
+ struct rdma_restrack_root *rt = &dev->res[type];
struct rdma_restrack_entry *e;
+ XA_STATE(xas, &rt->xa, 0);
u32 cnt = 0;
- down_read(&res->rwsem);
- hash_for_each_possible(res->hash, e, node, type) {
+ xa_lock(&rt->xa);
+ xas_for_each(&xas, e, U32_MAX) {
if (ns == &init_pid_ns ||
(!rdma_is_kernel_res(e) &&
ns == task_active_pid_ns(e->task)))
cnt++;
}
- up_read(&res->rwsem);
+ xa_unlock(&rt->xa);
return cnt;
}
EXPORT_SYMBOL(rdma_restrack_count);
@@ -157,28 +193,29 @@ EXPORT_SYMBOL(rdma_restrack_set_task);
static void rdma_restrack_add(struct rdma_restrack_entry *res)
{
struct ib_device *dev = res_to_dev(res);
+ struct rdma_restrack_root *rt;
+ int ret;
if (!dev)
return;
- if (res->type != RDMA_RESTRACK_CM_ID || rdma_is_kernel_res(res))
- res->task = NULL;
-
- if (!rdma_is_kernel_res(res)) {
- if (!res->task)
- rdma_restrack_set_task(res, NULL);
- res->kern_name = NULL;
- } else {
- set_kern_name(res);
- }
+ rt = &dev->res[res->type];
kref_init(&res->kref);
init_completion(&res->comp);
- res->valid = true;
+ if (res->type != RDMA_RESTRACK_QP)
+ ret = xa_alloc_cyclic(&rt->xa, &res->id, res, xa_limit_32b,
+ &rt->next_id, GFP_KERNEL);
+ else {
+ /* Special case to ensure that LQPN points to right QP */
+ struct ib_qp *qp = container_of(res, struct ib_qp, res);
+
+ ret = xa_insert(&rt->xa, qp->qp_num, res, GFP_KERNEL);
+ res->id = ret ? 0 : qp->qp_num;
+ }
- down_write(&dev->res.rwsem);
- hash_add(dev->res.hash, &res->node, res->type);
- up_write(&dev->res.rwsem);
+ if (!ret)
+ res->valid = true;
}
/**
@@ -187,6 +224,8 @@ static void rdma_restrack_add(struct rdma_restrack_entry *res)
*/
void rdma_restrack_kadd(struct rdma_restrack_entry *res)
{
+ res->task = NULL;
+ set_kern_name(res);
res->user = false;
rdma_restrack_add(res);
}
@@ -198,6 +237,13 @@ EXPORT_SYMBOL(rdma_restrack_kadd);
*/
void rdma_restrack_uadd(struct rdma_restrack_entry *res)
{
+ if (res->type != RDMA_RESTRACK_CM_ID)
+ res->task = NULL;
+
+ if (!res->task)
+ rdma_restrack_set_task(res, NULL);
+ res->kern_name = NULL;
+
res->user = true;
rdma_restrack_add(res);
}
@@ -209,6 +255,31 @@ int __must_check rdma_restrack_get(struct rdma_restrack_entry *res)
}
EXPORT_SYMBOL(rdma_restrack_get);
+/**
+ * rdma_restrack_get_byid() - translate from ID to restrack object
+ * @dev: IB device
+ * @type: resource track type
+ * @id: ID to take a look
+ *
+ * Return: Pointer to restrack entry or -ENOENT in case of error.
+ */
+struct rdma_restrack_entry *
+rdma_restrack_get_byid(struct ib_device *dev,
+ enum rdma_restrack_type type, u32 id)
+{
+ struct rdma_restrack_root *rt = &dev->res[type];
+ struct rdma_restrack_entry *res;
+
+ xa_lock(&rt->xa);
+ res = xa_load(&rt->xa, id);
+ if (!res || !rdma_restrack_get(res))
+ res = ERR_PTR(-ENOENT);
+ xa_unlock(&rt->xa);
+
+ return res;
+}
+EXPORT_SYMBOL(rdma_restrack_get_byid);
+
static void restrack_release(struct kref *kref)
{
struct rdma_restrack_entry *res;
@@ -225,23 +296,25 @@ EXPORT_SYMBOL(rdma_restrack_put);
void rdma_restrack_del(struct rdma_restrack_entry *res)
{
+ struct rdma_restrack_entry *old;
+ struct rdma_restrack_root *rt;
struct ib_device *dev;
if (!res->valid)
goto out;
dev = res_to_dev(res);
- if (!dev)
+ if (WARN_ON(!dev))
return;
- rdma_restrack_put(res);
-
- wait_for_completion(&res->comp);
+ rt = &dev->res[res->type];
- down_write(&dev->res.rwsem);
- hash_del(&res->node);
+ old = xa_erase(&rt->xa, res->id);
+ WARN_ON(old != res);
res->valid = false;
- up_write(&dev->res.rwsem);
+
+ rdma_restrack_put(res);
+ wait_for_completion(&res->comp);
out:
if (res->task) {
diff --git a/drivers/infiniband/core/restrack.h b/drivers/infiniband/core/restrack.h
new file mode 100644
index 000000000000..09a1fbdf578e
--- /dev/null
+++ b/drivers/infiniband/core/restrack.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2017-2019 Mellanox Technologies. All rights reserved.
+ */
+
+#ifndef _RDMA_CORE_RESTRACK_H_
+#define _RDMA_CORE_RESTRACK_H_
+
+#include <linux/mutex.h>
+
+/**
+ * struct rdma_restrack_root - main resource tracking management
+ * entity, per-device
+ */
+struct rdma_restrack_root {
+ /**
+ * @xa: Array of XArray structure to hold restrack entries.
+ */
+ struct xarray xa;
+ /**
+ * @next_id: Next ID to support cyclic allocation
+ */
+ u32 next_id;
+};
+
+int rdma_restrack_init(struct ib_device *dev);
+void rdma_restrack_clean(struct ib_device *dev);
+#endif /* _RDMA_CORE_RESTRACK_H_ */
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index d22c4a2ebac6..89a5be3a2f97 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -179,7 +179,6 @@ static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
struct scatterlist *sg, u32 sg_cnt, u32 offset,
u64 remote_addr, u32 rkey, enum dma_data_direction dir)
{
- struct ib_device *dev = qp->pd->device;
u32 max_sge = dir == DMA_TO_DEVICE ? qp->max_write_sge :
qp->max_read_sge;
struct ib_sge *sge;
@@ -209,8 +208,8 @@ static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
rdma_wr->wr.sg_list = sge;
for (j = 0; j < nr_sge; j++, sg = sg_next(sg)) {
- sge->addr = ib_sg_dma_address(dev, sg) + offset;
- sge->length = ib_sg_dma_len(dev, sg) - offset;
+ sge->addr = sg_dma_address(sg) + offset;
+ sge->length = sg_dma_len(sg) - offset;
sge->lkey = qp->pd->local_dma_lkey;
total_len += sge->length;
@@ -236,14 +235,13 @@ static int rdma_rw_init_single_wr(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
struct scatterlist *sg, u32 offset, u64 remote_addr, u32 rkey,
enum dma_data_direction dir)
{
- struct ib_device *dev = qp->pd->device;
struct ib_rdma_wr *rdma_wr = &ctx->single.wr;
ctx->nr_ops = 1;
ctx->single.sge.lkey = qp->pd->local_dma_lkey;
- ctx->single.sge.addr = ib_sg_dma_address(dev, sg) + offset;
- ctx->single.sge.length = ib_sg_dma_len(dev, sg) - offset;
+ ctx->single.sge.addr = sg_dma_address(sg) + offset;
+ ctx->single.sge.length = sg_dma_len(sg) - offset;
memset(rdma_wr, 0, sizeof(*rdma_wr));
if (dir == DMA_TO_DEVICE)
@@ -294,7 +292,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
* Skip to the S/G entry that sg_offset falls into:
*/
for (;;) {
- u32 len = ib_sg_dma_len(dev, sg);
+ u32 len = sg_dma_len(sg);
if (sg_offset < len)
break;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 97e6d7b69abf..7925e45ea88a 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -2342,9 +2342,7 @@ static void ib_sa_add_one(struct ib_device *device)
s = rdma_start_port(device);
e = rdma_end_port(device);
- sa_dev = kzalloc(sizeof *sa_dev +
- (e - s + 1) * sizeof (struct ib_sa_port),
- GFP_KERNEL);
+ sa_dev = kzalloc(struct_size(sa_dev, port, e - s + 1), GFP_KERNEL);
if (!sa_dev)
return;
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index 1efadbccf394..1ab423b19f77 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -39,22 +39,25 @@
#include "core_priv.h"
#include "mad_priv.h"
+static LIST_HEAD(mad_agent_list);
+/* Lock to protect mad_agent_list */
+static DEFINE_SPINLOCK(mad_agent_list_lock);
+
static struct pkey_index_qp_list *get_pkey_idx_qp_list(struct ib_port_pkey *pp)
{
struct pkey_index_qp_list *pkey = NULL;
struct pkey_index_qp_list *tmp_pkey;
struct ib_device *dev = pp->sec->dev;
- spin_lock(&dev->port_pkey_list[pp->port_num].list_lock);
- list_for_each_entry(tmp_pkey,
- &dev->port_pkey_list[pp->port_num].pkey_list,
- pkey_index_list) {
+ spin_lock(&dev->port_data[pp->port_num].pkey_list_lock);
+ list_for_each_entry (tmp_pkey, &dev->port_data[pp->port_num].pkey_list,
+ pkey_index_list) {
if (tmp_pkey->pkey_index == pp->pkey_index) {
pkey = tmp_pkey;
break;
}
}
- spin_unlock(&dev->port_pkey_list[pp->port_num].list_lock);
+ spin_unlock(&dev->port_data[pp->port_num].pkey_list_lock);
return pkey;
}
@@ -259,12 +262,12 @@ static int port_pkey_list_insert(struct ib_port_pkey *pp)
if (!pkey)
return -ENOMEM;
- spin_lock(&dev->port_pkey_list[port_num].list_lock);
+ spin_lock(&dev->port_data[port_num].pkey_list_lock);
/* Check for the PKey again. A racing process may
* have created it.
*/
list_for_each_entry(tmp_pkey,
- &dev->port_pkey_list[port_num].pkey_list,
+ &dev->port_data[port_num].pkey_list,
pkey_index_list) {
if (tmp_pkey->pkey_index == pp->pkey_index) {
kfree(pkey);
@@ -279,9 +282,9 @@ static int port_pkey_list_insert(struct ib_port_pkey *pp)
spin_lock_init(&pkey->qp_list_lock);
INIT_LIST_HEAD(&pkey->qp_list);
list_add(&pkey->pkey_index_list,
- &dev->port_pkey_list[port_num].pkey_list);
+ &dev->port_data[port_num].pkey_list);
}
- spin_unlock(&dev->port_pkey_list[port_num].list_lock);
+ spin_unlock(&dev->port_data[port_num].pkey_list_lock);
}
spin_lock(&pkey->qp_list_lock);
@@ -418,12 +421,15 @@ void ib_close_shared_qp_security(struct ib_qp_security *sec)
int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
{
- u8 i = rdma_start_port(dev);
+ unsigned int i;
bool is_ib = false;
int ret;
- while (i <= rdma_end_port(dev) && !is_ib)
+ rdma_for_each_port (dev, i) {
is_ib = rdma_protocol_ib(dev, i++);
+ if (is_ib)
+ break;
+ }
/* If this isn't an IB device don't create the security context */
if (!is_ib)
@@ -544,9 +550,8 @@ void ib_security_cache_change(struct ib_device *device,
{
struct pkey_index_qp_list *pkey;
- list_for_each_entry(pkey,
- &device->port_pkey_list[port_num].pkey_list,
- pkey_index_list) {
+ list_for_each_entry (pkey, &device->port_data[port_num].pkey_list,
+ pkey_index_list) {
check_pkey_qps(pkey,
device,
port_num,
@@ -554,21 +559,19 @@ void ib_security_cache_change(struct ib_device *device,
}
}
-void ib_security_destroy_port_pkey_list(struct ib_device *device)
+void ib_security_release_port_pkey_list(struct ib_device *device)
{
struct pkey_index_qp_list *pkey, *tmp_pkey;
- int i;
+ unsigned int i;
- for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
- spin_lock(&device->port_pkey_list[i].list_lock);
+ rdma_for_each_port (device, i) {
list_for_each_entry_safe(pkey,
tmp_pkey,
- &device->port_pkey_list[i].pkey_list,
+ &device->port_data[i].pkey_list,
pkey_index_list) {
list_del(&pkey->pkey_index_list);
kfree(pkey);
}
- spin_unlock(&device->port_pkey_list[i].list_lock);
}
}
@@ -676,19 +679,18 @@ static int ib_security_pkey_access(struct ib_device *dev,
return security_ib_pkey_access(sec, subnet_prefix, pkey);
}
-static int ib_mad_agent_security_change(struct notifier_block *nb,
- unsigned long event,
- void *data)
+void ib_mad_agent_security_change(void)
{
- struct ib_mad_agent *ag = container_of(nb, struct ib_mad_agent, lsm_nb);
-
- if (event != LSM_POLICY_CHANGE)
- return NOTIFY_DONE;
-
- ag->smp_allowed = !security_ib_endport_manage_subnet(
- ag->security, dev_name(&ag->device->dev), ag->port_num);
-
- return NOTIFY_OK;
+ struct ib_mad_agent *ag;
+
+ spin_lock(&mad_agent_list_lock);
+ list_for_each_entry(ag,
+ &mad_agent_list,
+ mad_agent_sec_list)
+ WRITE_ONCE(ag->smp_allowed,
+ !security_ib_endport_manage_subnet(ag->security,
+ dev_name(&ag->device->dev), ag->port_num));
+ spin_unlock(&mad_agent_list_lock);
}
int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
@@ -699,6 +701,8 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
if (!rdma_protocol_ib(agent->device, agent->port_num))
return 0;
+ INIT_LIST_HEAD(&agent->mad_agent_sec_list);
+
ret = security_ib_alloc_security(&agent->security);
if (ret)
return ret;
@@ -706,20 +710,22 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
if (qp_type != IB_QPT_SMI)
return 0;
+ spin_lock(&mad_agent_list_lock);
ret = security_ib_endport_manage_subnet(agent->security,
dev_name(&agent->device->dev),
agent->port_num);
if (ret)
- return ret;
+ goto free_security;
- agent->lsm_nb.notifier_call = ib_mad_agent_security_change;
- ret = register_lsm_notifier(&agent->lsm_nb);
- if (ret)
- return ret;
-
- agent->smp_allowed = true;
- agent->lsm_nb_reg = true;
+ WRITE_ONCE(agent->smp_allowed, true);
+ list_add(&agent->mad_agent_sec_list, &mad_agent_list);
+ spin_unlock(&mad_agent_list_lock);
return 0;
+
+free_security:
+ spin_unlock(&mad_agent_list_lock);
+ security_ib_free_security(agent->security);
+ return ret;
}
void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
@@ -727,9 +733,13 @@ void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
if (!rdma_protocol_ib(agent->device, agent->port_num))
return;
+ if (agent->qp->qp_type == IB_QPT_SMI) {
+ spin_lock(&mad_agent_list_lock);
+ list_del(&agent->mad_agent_sec_list);
+ spin_unlock(&mad_agent_list_lock);
+ }
+
security_ib_free_security(agent->security);
- if (agent->lsm_nb_reg)
- unregister_lsm_notifier(&agent->lsm_nb);
}
int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
@@ -738,7 +748,7 @@ int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
return 0;
if (map->agent.qp->qp_type == IB_QPT_SMI) {
- if (!map->agent.smp_allowed)
+ if (!READ_ONCE(map->agent.smp_allowed))
return -EACCES;
return 0;
}
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 80f68eb0ba5c..9b6a065bdfa5 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -1015,9 +1015,7 @@ err_free_stats:
return;
}
-static int add_port(struct ib_device *device, int port_num,
- int (*port_callback)(struct ib_device *,
- u8, struct kobject *))
+static int add_port(struct ib_device *device, int port_num)
{
struct ib_port *p;
struct ib_port_attr attr;
@@ -1113,8 +1111,8 @@ static int add_port(struct ib_device *device, int port_num,
if (ret)
goto err_free_pkey;
- if (port_callback) {
- ret = port_callback(device, port_num, &p->kobj);
+ if (device->ops.init_port) {
+ ret = device->ops.init_port(device, port_num, &p->kobj);
if (ret)
goto err_remove_pkey;
}
@@ -1189,7 +1187,7 @@ err_put:
static ssize_t node_type_show(struct device *device,
struct device_attribute *attr, char *buf)
{
- struct ib_device *dev = container_of(device, struct ib_device, dev);
+ struct ib_device *dev = rdma_device_to_ibdev(device);
switch (dev->node_type) {
case RDMA_NODE_IB_CA: return sprintf(buf, "%d: CA\n", dev->node_type);
@@ -1206,7 +1204,7 @@ static DEVICE_ATTR_RO(node_type);
static ssize_t sys_image_guid_show(struct device *device,
struct device_attribute *dev_attr, char *buf)
{
- struct ib_device *dev = container_of(device, struct ib_device, dev);
+ struct ib_device *dev = rdma_device_to_ibdev(device);
return sprintf(buf, "%04x:%04x:%04x:%04x\n",
be16_to_cpu(((__be16 *) &dev->attrs.sys_image_guid)[0]),
@@ -1219,7 +1217,7 @@ static DEVICE_ATTR_RO(sys_image_guid);
static ssize_t node_guid_show(struct device *device,
struct device_attribute *attr, char *buf)
{
- struct ib_device *dev = container_of(device, struct ib_device, dev);
+ struct ib_device *dev = rdma_device_to_ibdev(device);
return sprintf(buf, "%04x:%04x:%04x:%04x\n",
be16_to_cpu(((__be16 *) &dev->node_guid)[0]),
@@ -1232,7 +1230,7 @@ static DEVICE_ATTR_RO(node_guid);
static ssize_t node_desc_show(struct device *device,
struct device_attribute *attr, char *buf)
{
- struct ib_device *dev = container_of(device, struct ib_device, dev);
+ struct ib_device *dev = rdma_device_to_ibdev(device);
return sprintf(buf, "%.64s\n", dev->node_desc);
}
@@ -1241,7 +1239,7 @@ static ssize_t node_desc_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct ib_device *dev = container_of(device, struct ib_device, dev);
+ struct ib_device *dev = rdma_device_to_ibdev(device);
struct ib_device_modify desc = {};
int ret;
@@ -1260,7 +1258,7 @@ static DEVICE_ATTR_RW(node_desc);
static ssize_t fw_ver_show(struct device *device, struct device_attribute *attr,
char *buf)
{
- struct ib_device *dev = container_of(device, struct ib_device, dev);
+ struct ib_device *dev = rdma_device_to_ibdev(device);
ib_get_device_fw_str(dev, buf);
strlcat(buf, "\n", IB_FW_VERSION_NAME_MAX);
@@ -1277,21 +1275,21 @@ static struct attribute *ib_dev_attrs[] = {
NULL,
};
-static const struct attribute_group dev_attr_group = {
+const struct attribute_group ib_dev_attr_group = {
.attrs = ib_dev_attrs,
};
-static void free_port_list_attributes(struct ib_device *device)
+static void ib_free_port_attrs(struct ib_device *device)
{
struct kobject *p, *t;
list_for_each_entry_safe(p, t, &device->port_list, entry) {
struct ib_port *port = container_of(p, struct ib_port, kobj);
+
list_del(&p->entry);
- if (port->hw_stats) {
- kfree(port->hw_stats);
+ if (port->hw_stats_ag)
free_hsag(&port->kobj, port->hw_stats_ag);
- }
+ kfree(port->hw_stats);
if (port->pma_table)
sysfs_remove_group(p, port->pma_table);
@@ -1308,62 +1306,47 @@ static void free_port_list_attributes(struct ib_device *device)
kobject_put(device->ports_kobj);
}
-int ib_device_register_sysfs(struct ib_device *device,
- int (*port_callback)(struct ib_device *,
- u8, struct kobject *))
+static int ib_setup_port_attrs(struct ib_device *device)
{
- struct device *class_dev = &device->dev;
+ unsigned int port;
int ret;
- int i;
-
- device->groups[0] = &dev_attr_group;
- class_dev->groups = device->groups;
- ret = device_add(class_dev);
- if (ret)
- goto err;
-
- device->ports_kobj = kobject_create_and_add("ports", &class_dev->kobj);
- if (!device->ports_kobj) {
- ret = -ENOMEM;
- goto err_put;
- }
+ device->ports_kobj = kobject_create_and_add("ports", &device->dev.kobj);
+ if (!device->ports_kobj)
+ return -ENOMEM;
- if (rdma_cap_ib_switch(device)) {
- ret = add_port(device, 0, port_callback);
+ rdma_for_each_port (device, port) {
+ ret = add_port(device, port);
if (ret)
goto err_put;
- } else {
- for (i = 1; i <= device->phys_port_cnt; ++i) {
- ret = add_port(device, i, port_callback);
- if (ret)
- goto err_put;
- }
}
- if (device->ops.alloc_hw_stats)
- setup_hw_stats(device, NULL, 0);
-
return 0;
err_put:
- free_port_list_attributes(device);
- device_del(class_dev);
-err:
+ ib_free_port_attrs(device);
return ret;
}
-void ib_device_unregister_sysfs(struct ib_device *device)
+int ib_device_register_sysfs(struct ib_device *device)
{
- /* Hold device until ib_dealloc_device() */
- get_device(&device->dev);
+ int ret;
+
+ ret = ib_setup_port_attrs(device);
+ if (ret)
+ return ret;
+
+ if (device->ops.alloc_hw_stats)
+ setup_hw_stats(device, NULL, 0);
- free_port_list_attributes(device);
+ return 0;
+}
- if (device->hw_stats) {
- kfree(device->hw_stats);
+void ib_device_unregister_sysfs(struct ib_device *device)
+{
+ if (device->hw_stats_ag)
free_hsag(&device->dev.kobj, device->hw_stats_ag);
- }
+ kfree(device->hw_stats);
- device_unregister(&device->dev);
+ ib_free_port_attrs(device);
}
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 01d68ed46c1b..7468b26b8a01 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1236,6 +1236,13 @@ static int ucma_set_option_id(struct ucma_context *ctx, int optname,
}
ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0);
break;
+ case RDMA_OPTION_ID_ACK_TIMEOUT:
+ if (optlen != sizeof(u8)) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = rdma_set_ack_timeout(ctx->cm_id, *((u8 *)optval));
+ break;
default:
ret = -ENOSYS;
}
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index c6144df47ea4..fe5551562dbc 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -72,15 +72,16 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
* If access flags indicate ODP memory, avoid pinning. Instead, stores
* the mm for future page fault handling in conjunction with MMU notifiers.
*
- * @context: userspace context to pin memory for
+ * @udata: userspace context to pin memory for
* @addr: userspace virtual address to start at
* @size: length of region to pin
* @access: IB_ACCESS_xxx flags for memory being pinned
* @dmasync: flush in-flight DMA when the memory region is written
*/
-struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
+struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
size_t size, int access, int dmasync)
{
+ struct ib_ucontext *context;
struct ib_umem *umem;
struct page **page_list;
struct vm_area_struct **vma_list;
@@ -95,6 +96,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
struct scatterlist *sg, *sg_list_start;
unsigned int gup_flags = FOLL_WRITE;
+ if (!udata)
+ return ERR_PTR(-EIO);
+
+ context = container_of(udata, struct uverbs_attr_bundle, driver_udata)
+ ->context;
+ if (!context)
+ return ERR_PTR(-EIO);
+
if (dmasync)
dma_attrs |= DMA_ATTR_WRITE_BARRIER;
@@ -160,15 +169,12 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
- down_write(&mm->mmap_sem);
- if (check_add_overflow(mm->pinned_vm, npages, &new_pinned) ||
- (new_pinned > lock_limit && !capable(CAP_IPC_LOCK))) {
- up_write(&mm->mmap_sem);
+ new_pinned = atomic64_add_return(npages, &mm->pinned_vm);
+ if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
+ atomic64_sub(npages, &mm->pinned_vm);
ret = -ENOMEM;
goto out;
}
- mm->pinned_vm = new_pinned;
- up_write(&mm->mmap_sem);
cur_base = addr & PAGE_MASK;
@@ -228,9 +234,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
umem_release:
__ib_umem_release(context->device, umem, 0);
vma:
- down_write(&mm->mmap_sem);
- mm->pinned_vm -= ib_umem_num_pages(umem);
- up_write(&mm->mmap_sem);
+ atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
out:
if (vma_list)
free_page((unsigned long) vma_list);
@@ -253,25 +257,12 @@ static void __ib_umem_release_tail(struct ib_umem *umem)
kfree(umem);
}
-static void ib_umem_release_defer(struct work_struct *work)
-{
- struct ib_umem *umem = container_of(work, struct ib_umem, work);
-
- down_write(&umem->owning_mm->mmap_sem);
- umem->owning_mm->pinned_vm -= ib_umem_num_pages(umem);
- up_write(&umem->owning_mm->mmap_sem);
-
- __ib_umem_release_tail(umem);
-}
-
/**
* ib_umem_release - release memory pinned with ib_umem_get
* @umem: umem struct to release
*/
void ib_umem_release(struct ib_umem *umem)
{
- struct ib_ucontext *context = umem->context;
-
if (umem->is_odp) {
ib_umem_odp_release(to_ib_umem_odp(umem));
__ib_umem_release_tail(umem);
@@ -280,26 +271,7 @@ void ib_umem_release(struct ib_umem *umem)
__ib_umem_release(umem->context->device, umem, 1);
- /*
- * We may be called with the mm's mmap_sem already held. This
- * can happen when a userspace munmap() is the call that drops
- * the last reference to our file and calls our release
- * method. If there are memory regions to destroy, we'll end
- * up here and not be able to take the mmap_sem. In that case
- * we defer the vm_locked accounting a workqueue.
- */
- if (context->closing) {
- if (!down_write_trylock(&umem->owning_mm->mmap_sem)) {
- INIT_WORK(&umem->work, ib_umem_release_defer);
- queue_work(ib_wq, &umem->work);
- return;
- }
- } else {
- down_write(&umem->owning_mm->mmap_sem);
- }
- umem->owning_mm->pinned_vm -= ib_umem_num_pages(umem);
- up_write(&umem->owning_mm->mmap_sem);
-
+ atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm);
__ib_umem_release_tail(umem);
}
EXPORT_SYMBOL(ib_umem_release);
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index acb882f279cb..e6ec79ad9cc8 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -40,6 +40,7 @@
#include <linux/vmalloc.h>
#include <linux/hugetlb.h>
#include <linux/interval_tree_generic.h>
+#include <linux/pagemap.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_umem.h>
@@ -299,7 +300,7 @@ static void free_per_mm(struct rcu_head *rcu)
kfree(container_of(rcu, struct ib_ucontext_per_mm, rcu));
}
-void put_per_mm(struct ib_umem_odp *umem_odp)
+static void put_per_mm(struct ib_umem_odp *umem_odp)
{
struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm;
struct ib_ucontext *ctx = umem_odp->umem.context;
@@ -332,9 +333,10 @@ void put_per_mm(struct ib_umem_odp *umem_odp)
mmu_notifier_call_srcu(&per_mm->rcu, free_per_mm);
}
-struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
+struct ib_umem_odp *ib_alloc_odp_umem(struct ib_umem_odp *root,
unsigned long addr, size_t size)
{
+ struct ib_ucontext_per_mm *per_mm = root->per_mm;
struct ib_ucontext *ctx = per_mm->context;
struct ib_umem_odp *odp_data;
struct ib_umem *umem;
@@ -349,7 +351,7 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
umem->length = size;
umem->address = addr;
umem->page_shift = PAGE_SHIFT;
- umem->writable = 1;
+ umem->writable = root->umem.writable;
umem->is_odp = 1;
odp_data->per_mm = per_mm;
umem->owning_mm = per_mm->mm;
@@ -617,7 +619,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
* mmget_not_zero will fail in this case.
*/
owning_process = get_pid_task(umem_odp->per_mm->tgid, PIDTYPE_PID);
- if (WARN_ON(!mmget_not_zero(umem_odp->umem.owning_mm))) {
+ if (!owning_process || !mmget_not_zero(owning_mm)) {
ret = -EINVAL;
goto out_put_task;
}
@@ -684,9 +686,14 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
mutex_unlock(&umem_odp->umem_mutex);
if (ret < 0) {
- /* Release left over pages when handling errors. */
- for (++j; j < npages; ++j)
- put_page(local_page_list[j]);
+ /*
+ * Release pages, remembering that the first page
+ * to hit an error was already released by
+ * ib_umem_odp_map_dma_single_page().
+ */
+ if (npages - (j + 1) > 0)
+ release_pages(&local_page_list[j+1],
+ npages - (j + 1));
break;
}
}
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index de8d31ab8945..02b7947ab215 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -957,19 +957,22 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
{
struct ib_umad_port *port;
struct ib_umad_file *file;
- int ret = -ENXIO;
+ int ret = 0;
port = container_of(inode->i_cdev, struct ib_umad_port, cdev);
mutex_lock(&port->file_mutex);
- if (!port->ib_dev)
+ if (!port->ib_dev) {
+ ret = -ENXIO;
goto out;
+ }
- ret = -ENOMEM;
- file = kzalloc(sizeof *file, GFP_KERNEL);
- if (!file)
+ file = kzalloc(sizeof(*file), GFP_KERNEL);
+ if (!file) {
+ ret = -ENOMEM;
goto out;
+ }
mutex_init(&file->mutex);
spin_lock_init(&file->send_lock);
@@ -982,14 +985,7 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
list_add_tail(&file->port_list, &port->file_list);
- ret = nonseekable_open(inode, filp);
- if (ret) {
- list_del(&file->port_list);
- kfree(file);
- goto out;
- }
-
- ib_umad_dev_get(port->umad_dev);
+ nonseekable_open(inode, filp);
out:
mutex_unlock(&port->file_mutex);
return ret;
@@ -998,7 +994,6 @@ out:
static int ib_umad_close(struct inode *inode, struct file *filp)
{
struct ib_umad_file *file = filp->private_data;
- struct ib_umad_device *dev = file->port->umad_dev;
struct ib_umad_packet *packet, *tmp;
int already_dead;
int i;
@@ -1027,7 +1022,6 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
mutex_unlock(&file->port->file_mutex);
kfree(file);
- ib_umad_dev_put(dev);
return 0;
}
@@ -1073,17 +1067,9 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp)
filp->private_data = port;
- ret = nonseekable_open(inode, filp);
- if (ret)
- goto err_clr_sm_cap;
-
- ib_umad_dev_get(port->umad_dev);
+ nonseekable_open(inode, filp);
return 0;
-err_clr_sm_cap:
- swap(props.set_port_cap_mask, props.clr_port_cap_mask);
- ib_modify_port(port->ib_dev, port->port_num, 0, &props);
-
err_up_sem:
up(&port->sm_sem);
@@ -1106,7 +1092,6 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp)
up(&port->sm_sem);
- ib_umad_dev_put(port->umad_dev);
return ret;
}
@@ -1283,10 +1268,12 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
mutex_unlock(&port->file_mutex);
cdev_device_del(&port->sm_cdev, &port->sm_dev);
- put_device(&port->sm_dev);
cdev_device_del(&port->cdev, &port->dev);
- put_device(&port->dev);
ida_free(&umad_ida, port->dev_num);
+
+ /* balances device_initialize() */
+ put_device(&port->sm_dev);
+ put_device(&port->dev);
}
static void ib_umad_add_one(struct ib_device *device)
@@ -1329,21 +1316,24 @@ err:
ib_umad_kill_port(&umad_dev->ports[i - s]);
}
free:
+ /* balances kref_init */
ib_umad_dev_put(umad_dev);
}
static void ib_umad_remove_one(struct ib_device *device, void *client_data)
{
struct ib_umad_device *umad_dev = client_data;
- int i;
+ unsigned int i;
if (!umad_dev)
return;
- for (i = 0; i <= rdma_end_port(device) - rdma_start_port(device); ++i) {
- if (rdma_cap_ib_mad(device, i + rdma_start_port(device)))
- ib_umad_kill_port(&umad_dev->ports[i]);
+ rdma_for_each_port (device, i) {
+ if (rdma_cap_ib_mad(device, i))
+ ib_umad_kill_port(
+ &umad_dev->ports[i - rdma_start_port(device)]);
}
+ /* balances kref_init() */
ib_umad_dev_put(umad_dev);
}
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index ea0bc6885517..32cc8fe7902f 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -160,6 +160,7 @@ struct ib_uverbs_file {
struct mutex umap_lock;
struct list_head umaps;
+ struct page *disassociate_page;
struct idr idr;
/* spinlock protects write access to idr */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 3317300ab036..062a86c04123 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -224,12 +224,13 @@ static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
if (ret)
goto err;
- ucontext = ib_dev->ops.alloc_ucontext(ib_dev, &attrs->driver_udata);
- if (IS_ERR(ucontext)) {
- ret = PTR_ERR(ucontext);
+ ucontext = rdma_zalloc_drv_obj(ib_dev, ib_ucontext);
+ if (!ucontext) {
+ ret = -ENOMEM;
goto err_alloc;
}
+ ucontext->res.type = RDMA_RESTRACK_CTX;
ucontext->device = ib_dev;
ucontext->cg_obj = cg_obj;
/* ufile is required when some objects are released */
@@ -238,15 +239,8 @@ static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
ucontext->closing = false;
ucontext->cleanup_retryable = false;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
mutex_init(&ucontext->per_mm_list_lock);
INIT_LIST_HEAD(&ucontext->per_mm_list);
- if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
- ucontext->invalidate_range = NULL;
-
-#endif
-
- resp.num_comp_vectors = file->device->num_comp_vectors;
ret = get_unused_fd_flags(O_CLOEXEC);
if (ret < 0)
@@ -259,15 +253,22 @@ static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
goto err_fd;
}
+ resp.num_comp_vectors = file->device->num_comp_vectors;
+
ret = uverbs_response(attrs, &resp, sizeof(resp));
if (ret)
goto err_file;
- fd_install(resp.async_fd, filp);
+ ret = ib_dev->ops.alloc_ucontext(ucontext, &attrs->driver_udata);
+ if (ret)
+ goto err_file;
+ if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
+ ucontext->invalidate_range = NULL;
- ucontext->res.type = RDMA_RESTRACK_CTX;
rdma_restrack_uadd(&ucontext->res);
+ fd_install(resp.async_fd, filp);
+
/*
* Make sure that ib_uverbs_get_ucontext() sees the pointer update
* only after all writes to setup the ucontext have completed
@@ -286,7 +287,7 @@ err_fd:
put_unused_fd(resp.async_fd);
err_free:
- ib_dev->ops.dealloc_ucontext(ucontext);
+ kfree(ucontext);
err_alloc:
ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
@@ -410,9 +411,9 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
if (IS_ERR(uobj))
return PTR_ERR(uobj);
- pd = ib_dev->ops.alloc_pd(ib_dev, uobj->context, &attrs->driver_udata);
- if (IS_ERR(pd)) {
- ret = PTR_ERR(pd);
+ pd = rdma_zalloc_drv_obj(ib_dev, ib_pd);
+ if (!pd) {
+ ret = -ENOMEM;
goto err;
}
@@ -420,11 +421,15 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
pd->uobject = uobj;
pd->__internal_mr = NULL;
atomic_set(&pd->usecnt, 0);
+ pd->res.type = RDMA_RESTRACK_PD;
+
+ ret = ib_dev->ops.alloc_pd(pd, uobj->context, &attrs->driver_udata);
+ if (ret)
+ goto err_alloc;
uobj->object = pd;
memset(&resp, 0, sizeof resp);
resp.pd_handle = uobj->id;
- pd->res.type = RDMA_RESTRACK_PD;
rdma_restrack_uadd(&pd->res);
ret = uverbs_response(attrs, &resp, sizeof(resp));
@@ -435,7 +440,9 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
err_copy:
ib_dealloc_pd(pd);
-
+ pd = NULL;
+err_alloc:
+ kfree(pd);
err:
uobj_alloc_abort(uobj);
return ret;
@@ -822,14 +829,13 @@ static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs)
cmd.length, cmd.hca_va,
cmd.access_flags, pd,
&attrs->driver_udata);
- if (!ret) {
- if (cmd.flags & IB_MR_REREG_PD) {
- atomic_inc(&pd->usecnt);
- mr->pd = pd;
- atomic_dec(&old_pd->usecnt);
- }
- } else {
+ if (ret)
goto put_uobj_pd;
+
+ if (cmd.flags & IB_MR_REREG_PD) {
+ atomic_inc(&pd->usecnt);
+ mr->pd = pd;
+ atomic_dec(&old_pd->usecnt);
}
memset(&resp, 0, sizeof(resp));
@@ -884,6 +890,11 @@ static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
goto err_free;
}
+ if (cmd.mw_type != IB_MW_TYPE_1 && cmd.mw_type != IB_MW_TYPE_2) {
+ ret = -EINVAL;
+ goto err_put;
+ }
+
mw = pd->device->ops.alloc_mw(pd, cmd.mw_type, &attrs->driver_udata);
if (IS_ERR(mw)) {
ret = PTR_ERR(mw);
@@ -1184,12 +1195,11 @@ static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs)
ret = -EFAULT;
goto out_put;
}
+ ret = 0;
if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT))
ret = uverbs_output_written(attrs, UVERBS_ATTR_CORE_OUT);
- ret = 0;
-
out_put:
uobj_put_obj_read(cq);
return ret;
@@ -2632,7 +2642,7 @@ void flow_resources_add(struct ib_uflow_resources *uflow_res,
}
EXPORT_SYMBOL(flow_resources_add);
-static int kern_spec_to_ib_spec_action(const struct uverbs_attr_bundle *attrs,
+static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs,
struct ib_uverbs_flow_spec *kern_spec,
union ib_flow_spec *ib_spec,
struct ib_uflow_resources *uflow_res)
@@ -3618,7 +3628,6 @@ static int ib_uverbs_ex_query_device(struct uverbs_attr_bundle *attrs)
copy_query_dev_fields(ucontext, &resp.base, &attr);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
resp.odp_caps.general_caps = attr.odp_caps.general_caps;
resp.odp_caps.per_transport_caps.rc_odp_caps =
attr.odp_caps.per_transport_caps.rc_odp_caps;
@@ -3626,7 +3635,7 @@ static int ib_uverbs_ex_query_device(struct uverbs_attr_bundle *attrs)
attr.odp_caps.per_transport_caps.uc_odp_caps;
resp.odp_caps.per_transport_caps.ud_odp_caps =
attr.odp_caps.per_transport_caps.ud_odp_caps;
-#endif
+ resp.xrc_odp_caps = attr.odp_caps.per_transport_caps.xrc_odp_caps;
resp.timestamp_mask = attr.timestamp_mask;
resp.hca_core_clock = attr.hca_core_clock;
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index 0ca04d224015..e1379949e663 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -213,6 +213,7 @@ static int uverbs_process_idrs_array(struct bundle_priv *pbundle,
ret = PTR_ERR(attr->uobjects[i]);
break;
}
+ pbundle->bundle.context = attr->uobjects[i]->context;
}
attr->len = i;
@@ -330,6 +331,7 @@ static int uverbs_process_attr(struct bundle_priv *pbundle,
uattr->data_s64);
if (IS_ERR(o_attr->uobject))
return PTR_ERR(o_attr->uobject);
+ pbundle->bundle.context = o_attr->uobject->context;
__set_bit(attr_bkey, pbundle->uobj_finalize);
if (spec->u.obj.access == UVERBS_ACCESS_NEW) {
@@ -592,6 +594,7 @@ static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile,
pbundle->method_elm = method_elm;
pbundle->method_key = attrs_iter.index;
pbundle->bundle.ufile = ufile;
+ pbundle->bundle.context = NULL; /* only valid if bundle has uobject */
pbundle->radix = &uapi->radix;
pbundle->radix_slots = slot;
pbundle->radix_slots_len = radix_tree_chunk_size(&attrs_iter);
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 5f366838b7ff..c489f545baae 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -208,6 +208,9 @@ void ib_uverbs_release_file(struct kref *ref)
kref_put(&file->async_file->ref,
ib_uverbs_release_async_event_file);
put_device(&file->device->dev);
+
+ if (file->disassociate_page)
+ __free_pages(file->disassociate_page, 0);
kfree(file);
}
@@ -695,6 +698,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
memset(bundle.attr_present, 0, sizeof(bundle.attr_present));
bundle.ufile = file;
+ bundle.context = NULL; /* only valid if bundle has uobject */
if (!method_elm->is_ex) {
size_t in_len = hdr.in_words * 4 - sizeof(hdr);
size_t out_len = hdr.out_words * 4;
@@ -876,9 +880,50 @@ static void rdma_umap_close(struct vm_area_struct *vma)
kfree(priv);
}
+/*
+ * Once the zap_vma_ptes has been called touches to the VMA will come here and
+ * we return a dummy writable zero page for all the pfns.
+ */
+static vm_fault_t rdma_umap_fault(struct vm_fault *vmf)
+{
+ struct ib_uverbs_file *ufile = vmf->vma->vm_file->private_data;
+ struct rdma_umap_priv *priv = vmf->vma->vm_private_data;
+ vm_fault_t ret = 0;
+
+ if (!priv)
+ return VM_FAULT_SIGBUS;
+
+ /* Read only pages can just use the system zero page. */
+ if (!(vmf->vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) {
+ vmf->page = ZERO_PAGE(vmf->address);
+ get_page(vmf->page);
+ return 0;
+ }
+
+ mutex_lock(&ufile->umap_lock);
+ if (!ufile->disassociate_page)
+ ufile->disassociate_page =
+ alloc_pages(vmf->gfp_mask | __GFP_ZERO, 0);
+
+ if (ufile->disassociate_page) {
+ /*
+ * This VMA is forced to always be shared so this doesn't have
+ * to worry about COW.
+ */
+ vmf->page = ufile->disassociate_page;
+ get_page(vmf->page);
+ } else {
+ ret = VM_FAULT_SIGBUS;
+ }
+ mutex_unlock(&ufile->umap_lock);
+
+ return ret;
+}
+
static const struct vm_operations_struct rdma_umap_ops = {
.open = rdma_umap_open,
.close = rdma_umap_close,
+ .fault = rdma_umap_fault,
};
static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext,
@@ -888,6 +933,9 @@ static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext,
struct ib_uverbs_file *ufile = ucontext->ufile;
struct rdma_umap_priv *priv;
+ if (!(vma->vm_flags & VM_SHARED))
+ return ERR_PTR(-EINVAL);
+
if (vma->vm_end - vma->vm_start != size)
return ERR_PTR(-EINVAL);
@@ -991,7 +1039,9 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
* at a time to get the lock ordering right. Typically there
* will only be one mm, so no big deal.
*/
- down_write(&mm->mmap_sem);
+ down_read(&mm->mmap_sem);
+ if (!mmget_still_valid(mm))
+ goto skip_mm;
mutex_lock(&ufile->umap_lock);
list_for_each_entry_safe (priv, next_priv, &ufile->umaps,
list) {
@@ -1003,10 +1053,10 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
zap_vma_ptes(vma, vma->vm_start,
vma->vm_end - vma->vm_start);
- vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
}
mutex_unlock(&ufile->umap_lock);
- up_write(&mm->mmap_sem);
+ skip_mm:
+ up_read(&mm->mmap_sem);
mmput(mm);
}
}
@@ -1135,6 +1185,7 @@ static const struct file_operations uverbs_mmap_fops = {
static struct ib_client uverbs_client = {
.name = "uverbs",
+ .no_kverbs_req = true,
.add = ib_uverbs_add_one,
.remove = ib_uverbs_remove_one
};
diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
index cbc72312eb41..f224cb727224 100644
--- a/drivers/infiniband/core/uverbs_std_types.c
+++ b/drivers/infiniband/core/uverbs_std_types.c
@@ -188,7 +188,7 @@ static int uverbs_free_pd(struct ib_uobject *uobject,
if (ret)
return ret;
- ib_dealloc_pd((struct ib_pd *)uobject->object);
+ ib_dealloc_pd(pd);
return 0;
}
diff --git a/drivers/infiniband/core/uverbs_uapi.c b/drivers/infiniband/core/uverbs_uapi.c
index 9ae08e4b78a3..7a987acf0c0b 100644
--- a/drivers/infiniband/core/uverbs_uapi.c
+++ b/drivers/infiniband/core/uverbs_uapi.c
@@ -188,13 +188,18 @@ static int uapi_merge_obj_tree(struct uverbs_api *uapi,
obj_elm->type_attrs = obj->type_attrs;
obj_elm->type_class = obj->type_attrs->type_class;
/*
- * Today drivers are only permitted to use idr_class
- * types. They cannot use FD types because we currently have
- * no way to revoke the fops pointer after device
- * disassociation.
+ * Today drivers are only permitted to use idr_class and
+ * fd_class types. We can revoke the IDR types during
+ * disassociation, and the FD types require the driver to use
+ * struct file_operations.owner to prevent the driver module
+ * code from unloading while the file is open. This provides
+ * enough safety that uverbs_close_fd() will continue to work.
+ * Drivers using FD are responsible to handle disassociation of
+ * the device on their own.
*/
if (WARN_ON(is_driver &&
- obj->type_attrs->type_class != &uverbs_idr_class))
+ obj->type_attrs->type_class != &uverbs_idr_class &&
+ obj->type_attrs->type_class != &uverbs_fd_class))
return -EINVAL;
}
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index ac011836bb54..5a5e83f5f0fc 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -254,10 +254,11 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
{
struct ib_pd *pd;
int mr_access_flags = 0;
+ int ret;
- pd = device->ops.alloc_pd(device, NULL, NULL);
- if (IS_ERR(pd))
- return pd;
+ pd = rdma_zalloc_drv_obj(device, ib_pd);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
pd->device = device;
pd->uobject = NULL;
@@ -265,6 +266,16 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
atomic_set(&pd->usecnt, 0);
pd->flags = flags;
+ pd->res.type = RDMA_RESTRACK_PD;
+ rdma_restrack_set_task(&pd->res, caller);
+
+ ret = device->ops.alloc_pd(pd, NULL, NULL);
+ if (ret) {
+ kfree(pd);
+ return ERR_PTR(ret);
+ }
+ rdma_restrack_kadd(&pd->res);
+
if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
pd->local_dma_lkey = device->local_dma_lkey;
else
@@ -275,10 +286,6 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
}
- pd->res.type = RDMA_RESTRACK_PD;
- rdma_restrack_set_task(&pd->res, caller);
- rdma_restrack_kadd(&pd->res);
-
if (mr_access_flags) {
struct ib_mr *mr;
@@ -329,10 +336,8 @@ void ib_dealloc_pd(struct ib_pd *pd)
WARN_ON(atomic_read(&pd->usecnt));
rdma_restrack_del(&pd->res);
- /* Making delalloc_pd a void return is a WIP, no driver should return
- an error here. */
- ret = pd->device->ops.dealloc_pd(pd);
- WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
+ pd->device->ops.dealloc_pd(pd);
+ kfree(pd);
}
EXPORT_SYMBOL(ib_dealloc_pd);
@@ -1106,8 +1111,8 @@ struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
}
EXPORT_SYMBOL(ib_open_qp);
-static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp,
- struct ib_qp_init_attr *qp_init_attr)
+static struct ib_qp *create_xrc_qp(struct ib_qp *qp,
+ struct ib_qp_init_attr *qp_init_attr)
{
struct ib_qp *real_qp = qp;
@@ -1122,10 +1127,10 @@ static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp,
qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
qp_init_attr->qp_context);
- if (!IS_ERR(qp))
- __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
- else
- real_qp->device->ops.destroy_qp(real_qp);
+ if (IS_ERR(qp))
+ return qp;
+
+ __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
return qp;
}
@@ -1156,10 +1161,8 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
return qp;
ret = ib_create_qp_security(qp, device);
- if (ret) {
- ib_destroy_qp(qp);
- return ERR_PTR(ret);
- }
+ if (ret)
+ goto err;
qp->real_qp = qp;
qp->qp_type = qp_init_attr->qp_type;
@@ -1172,8 +1175,15 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
INIT_LIST_HEAD(&qp->sig_mrs);
qp->port = 0;
- if (qp_init_attr->qp_type == IB_QPT_XRC_TGT)
- return ib_create_xrc_qp(qp, qp_init_attr);
+ if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
+ struct ib_qp *xrc_qp = create_xrc_qp(qp, qp_init_attr);
+
+ if (IS_ERR(xrc_qp)) {
+ ret = PTR_ERR(xrc_qp);
+ goto err;
+ }
+ return xrc_qp;
+ }
qp->event_handler = qp_init_attr->event_handler;
qp->qp_context = qp_init_attr->qp_context;
@@ -1200,11 +1210,8 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
if (qp_init_attr->cap.max_rdma_ctxs) {
ret = rdma_rw_init_mrs(qp, qp_init_attr);
- if (ret) {
- pr_err("failed to init MR pool ret= %d\n", ret);
- ib_destroy_qp(qp);
- return ERR_PTR(ret);
- }
+ if (ret)
+ goto err;
}
/*
@@ -1217,6 +1224,11 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
device->attrs.max_sge_rd);
return qp;
+
+err:
+ ib_destroy_qp(qp);
+ return ERR_PTR(ret);
+
}
EXPORT_SYMBOL(ib_create_qp);
@@ -1711,10 +1723,7 @@ int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width)
if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET)
return -EINVAL;
- if (!dev->ops.get_netdev)
- return -EOPNOTSUPP;
-
- netdev = dev->ops.get_netdev(dev, port_num);
+ netdev = ib_device_get_netdev(dev, port_num);
if (!netdev)
return -ENODEV;
diff --git a/drivers/infiniband/hw/bnxt_re/Kconfig b/drivers/infiniband/hw/bnxt_re/Kconfig
index 19982a4a9bba..d25439c305f7 100644
--- a/drivers/infiniband/hw/bnxt_re/Kconfig
+++ b/drivers/infiniband/hw/bnxt_re/Kconfig
@@ -1,5 +1,6 @@
config INFINIBAND_BNXT_RE
tristate "Broadcom Netxtreme HCA support"
+ depends on 64BIT
depends on ETHERNET && NETDEVICES && PCI && INET && DCB
select NET_VENDOR_BROADCOM
select BNXT
diff --git a/drivers/infiniband/hw/bnxt_re/Makefile b/drivers/infiniband/hw/bnxt_re/Makefile
index 6e3bc25cc140..ee9bb1be61ea 100644
--- a/drivers/infiniband/hw/bnxt_re/Makefile
+++ b/drivers/infiniband/hw/bnxt_re/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-ccflags-y := -Idrivers/net/ethernet/broadcom/bnxt
+ccflags-y := -I $(srctree)/drivers/net/ethernet/broadcom/bnxt
obj-$(CONFIG_INFINIBAND_BNXT_RE) += bnxt_re.o
bnxt_re-y := main.o ib_verbs.o \
qplib_res.o qplib_rcfw.o \
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 31baa8939a4f..e55a1666c0cd 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -124,6 +124,7 @@ struct bnxt_re_dev {
#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
struct net_device *netdev;
unsigned int version, major, minor;
+ struct bnxt_qplib_chip_ctx chip_ctx;
struct bnxt_en_dev *en_dev;
struct bnxt_msix_entry msix_entries[BNXT_RE_MAX_MSIX];
int num_msix;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 1e2515e2eb62..071b2fc38b0b 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -48,6 +48,7 @@
#include <rdma/ib_addr.h>
#include <rdma/ib_mad.h>
#include <rdma/ib_cache.h>
+#include <rdma/uverbs_ioctl.h>
#include "bnxt_ulp.h"
@@ -563,41 +564,29 @@ fail:
}
/* Protection Domains */
-int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
+void bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
{
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev;
- int rc;
bnxt_re_destroy_fence_mr(pd);
- if (pd->qplib_pd.id) {
- rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
- &rdev->qplib_res.pd_tbl,
- &pd->qplib_pd);
- if (rc)
- dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
- }
-
- kfree(pd);
- return 0;
+ if (pd->qplib_pd.id)
+ bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
+ &pd->qplib_pd);
}
-struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *ucontext,
- struct ib_udata *udata)
+int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *ucontext,
+ struct ib_udata *udata)
{
+ struct ib_device *ibdev = ibpd->device;
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
struct bnxt_re_ucontext *ucntx = container_of(ucontext,
struct bnxt_re_ucontext,
ib_uctx);
- struct bnxt_re_pd *pd;
+ struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
int rc;
- pd = kzalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd)
- return ERR_PTR(-ENOMEM);
-
pd->rdev = rdev;
if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
@@ -637,13 +626,12 @@ struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
if (bnxt_re_create_fence_mr(pd))
dev_warn(rdev_to_dev(rdev),
"Failed to create Fence-MR\n");
- return &pd->ib_pd;
+ return 0;
dbfail:
- (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
- &pd->qplib_pd);
+ bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
+ &pd->qplib_pd);
fail:
- kfree(pd);
- return ERR_PTR(rc);
+ return rc;
}
/* Address Handles */
@@ -663,17 +651,36 @@ int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
return 0;
}
+static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
+{
+ u8 nw_type;
+
+ switch (ntype) {
+ case RDMA_NETWORK_IPV4:
+ nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
+ break;
+ case RDMA_NETWORK_IPV6:
+ nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
+ break;
+ default:
+ nw_type = CMDQ_CREATE_AH_TYPE_V1;
+ break;
+ }
+ return nw_type;
+}
+
struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
struct rdma_ah_attr *ah_attr,
u32 flags,
struct ib_udata *udata)
{
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
struct bnxt_re_dev *rdev = pd->rdev;
+ const struct ib_gid_attr *sgid_attr;
struct bnxt_re_ah *ah;
- const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
- int rc;
u8 nw_type;
+ int rc;
if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
@@ -700,28 +707,11 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
ah->qplib_ah.flow_label = grh->flow_label;
ah->qplib_ah.hop_limit = grh->hop_limit;
ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
- if (udata &&
- !rdma_is_multicast_addr((struct in6_addr *)
- grh->dgid.raw) &&
- !rdma_link_local_addr((struct in6_addr *)
- grh->dgid.raw)) {
- const struct ib_gid_attr *sgid_attr;
- sgid_attr = grh->sgid_attr;
- /* Get network header type for this GID */
- nw_type = rdma_gid_attr_network_type(sgid_attr);
- switch (nw_type) {
- case RDMA_NETWORK_IPV4:
- ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
- break;
- case RDMA_NETWORK_IPV6:
- ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
- break;
- default:
- ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V1;
- break;
- }
- }
+ sgid_attr = grh->sgid_attr;
+ /* Get network header type for this GID */
+ nw_type = rdma_gid_attr_network_type(sgid_attr);
+ ah->qplib_ah.nw_type = bnxt_re_stack_to_dev_nw_type(nw_type);
memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah,
@@ -733,12 +723,11 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
/* Write AVID to shared page. */
if (udata) {
- struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
- struct bnxt_re_ucontext *uctx;
+ struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
+ udata, struct bnxt_re_ucontext, ib_uctx);
unsigned long flag;
u32 *wrptr;
- uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
spin_lock_irqsave(&uctx->sh_lock, flag);
wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
*wrptr = ah->qplib_ah.id;
@@ -804,8 +793,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
{
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
struct bnxt_re_dev *rdev = qp->rdev;
- int rc;
unsigned int flags;
+ int rc;
bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
@@ -814,9 +803,12 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
return rc;
}
- flags = bnxt_re_lock_cqs(qp);
- bnxt_qplib_clean_qp(&qp->qplib_qp);
- bnxt_re_unlock_cqs(qp, flags);
+ if (rdma_is_kernel_res(&qp->ib_qp.res)) {
+ flags = bnxt_re_lock_cqs(qp);
+ bnxt_qplib_clean_qp(&qp->qplib_qp);
+ bnxt_re_unlock_cqs(qp, flags);
+ }
+
bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
@@ -882,21 +874,23 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
struct bnxt_re_qp_req ureq;
struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
struct ib_umem *umem;
- int bytes = 0;
- struct ib_ucontext *context = pd->ib_pd.uobject->context;
- struct bnxt_re_ucontext *cntx = container_of(context,
- struct bnxt_re_ucontext,
- ib_uctx);
+ int bytes = 0, psn_sz;
+ struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context(
+ udata, struct bnxt_re_ucontext, ib_uctx);
+
if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
return -EFAULT;
bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
/* Consider mapping PSN search memory only for RC QPs. */
- if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC)
- bytes += (qplib_qp->sq.max_wqe * sizeof(struct sq_psn_search));
+ if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
+ psn_sz = bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx) ?
+ sizeof(struct sq_psn_search_ext) :
+ sizeof(struct sq_psn_search);
+ bytes += (qplib_qp->sq.max_wqe * psn_sz);
+ }
bytes = PAGE_ALIGN(bytes);
- umem = ib_umem_get(context, ureq.qpsva, bytes,
- IB_ACCESS_LOCAL_WRITE, 1);
+ umem = ib_umem_get(udata, ureq.qpsva, bytes, IB_ACCESS_LOCAL_WRITE, 1);
if (IS_ERR(umem))
return PTR_ERR(umem);
@@ -908,7 +902,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
if (!qp->qplib_qp.srq) {
bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
bytes = PAGE_ALIGN(bytes);
- umem = ib_umem_get(context, ureq.qprva, bytes,
+ umem = ib_umem_get(udata, ureq.qprva, bytes,
IB_ACCESS_LOCAL_WRITE, 1);
if (IS_ERR(umem))
goto rqfail;
@@ -1066,12 +1060,17 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
qp->qplib_qp.pd = &pd->qplib_pd;
qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
+
+ if (qp_init_attr->qp_type == IB_QPT_GSI &&
+ bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))
+ qp->qplib_qp.type = CMDQ_CREATE_QP_TYPE_GSI;
if (qp->qplib_qp.type == IB_QPT_MAX) {
dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
qp->qplib_qp.type);
rc = -EINVAL;
goto fail;
}
+
qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
IB_SIGNAL_ALL_WR) ? true : false);
@@ -1132,7 +1131,8 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
- if (qp_init_attr->qp_type == IB_QPT_GSI) {
+ if (qp_init_attr->qp_type == IB_QPT_GSI &&
+ !(bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))) {
/* Allocate 1 more than what's provided */
entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
@@ -1361,17 +1361,15 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
struct ib_umem *umem;
int bytes = 0;
- struct ib_ucontext *context = pd->ib_pd.uobject->context;
- struct bnxt_re_ucontext *cntx = container_of(context,
- struct bnxt_re_ucontext,
- ib_uctx);
+ struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context(
+ udata, struct bnxt_re_ucontext, ib_uctx);
+
if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
return -EFAULT;
bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
bytes = PAGE_ALIGN(bytes);
- umem = ib_umem_get(context, ureq.srqva, bytes,
- IB_ACCESS_LOCAL_WRITE, 1);
+ umem = ib_umem_get(udata, ureq.srqva, bytes, IB_ACCESS_LOCAL_WRITE, 1);
if (IS_ERR(umem))
return PTR_ERR(umem);
@@ -1646,6 +1644,9 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
__from_ib_access_flags(qp_attr->qp_access_flags);
/* LOCAL_WRITE access must be set to allow RC receive */
qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
+ /* Temp: Set all params on QP as of now */
+ qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE;
+ qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ;
}
if (qp_attr_mask & IB_QP_PKEY_INDEX) {
qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
@@ -2093,7 +2094,8 @@ static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
static int is_ud_qp(struct bnxt_re_qp *qp)
{
- return qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD;
+ return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD ||
+ qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI);
}
static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
@@ -2397,7 +2399,7 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_SEND:
case IB_WR_SEND_WITH_IMM:
- if (ib_qp->qp_type == IB_QPT_GSI) {
+ if (qp->qplib_qp.type == CMDQ_CREATE_QP1_TYPE_GSI) {
rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
payload_sz);
if (rc)
@@ -2527,7 +2529,8 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
wqe.wr_id = wr->wr_id;
wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
- if (ib_qp->qp_type == IB_QPT_GSI)
+ if (ib_qp->qp_type == IB_QPT_GSI &&
+ qp->qplib_qp.type != CMDQ_CREATE_QP_TYPE_GSI)
rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
payload_sz);
if (!rc)
@@ -2622,7 +2625,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
goto fail;
}
- cq->umem = ib_umem_get(context, req.cq_va,
+ cq->umem = ib_umem_get(udata, req.cq_va,
entries * sizeof(struct cq_base),
IB_ACCESS_LOCAL_WRITE, 1);
if (IS_ERR(cq->umem)) {
@@ -3122,19 +3125,33 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
}
}
-static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
+static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp,
+ struct ib_wc *wc,
struct bnxt_qplib_cqe *cqe)
{
+ u8 nw_type;
+
wc->opcode = IB_WC_RECV;
wc->status = __rc_to_ib_wc_status(cqe->status);
- if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
+ if (cqe->flags & CQ_RES_UD_FLAGS_IMM)
wc->wc_flags |= IB_WC_WITH_IMM;
- if (cqe->flags & CQ_RES_RC_FLAGS_INV)
- wc->wc_flags |= IB_WC_WITH_INVALIDATE;
- if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
- (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
- wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ /* report only on GSI QP for Thor */
+ if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI) {
+ wc->wc_flags |= IB_WC_GRH;
+ memcpy(wc->smac, cqe->smac, ETH_ALEN);
+ wc->wc_flags |= IB_WC_WITH_SMAC;
+ if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) {
+ wc->vlan_id = (cqe->cfa_meta & 0xFFF);
+ if (wc->vlan_id < 0x1000)
+ wc->wc_flags |= IB_WC_WITH_VLAN;
+ }
+ nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >>
+ CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT;
+ wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
+ wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
+ }
+
}
static int send_phantom_wqe(struct bnxt_re_qp *qp)
@@ -3226,7 +3243,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
switch (cqe->opcode) {
case CQ_BASE_CQE_TYPE_REQ:
- if (qp->qplib_qp.id ==
+ if (qp->rdev->qp1_sqp && qp->qplib_qp.id ==
qp->rdev->qp1_sqp->qplib_qp.id) {
/* Handle this completion with
* the stored completion
@@ -3261,7 +3278,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
bnxt_re_process_res_rc_wc(wc, cqe);
break;
case CQ_BASE_CQE_TYPE_RES_UD:
- if (qp->qplib_qp.id ==
+ if (qp->rdev->qp1_sqp && qp->qplib_qp.id ==
qp->rdev->qp1_sqp->qplib_qp.id) {
/* Handle this completion with
* the stored completion
@@ -3274,7 +3291,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
break;
}
}
- bnxt_re_process_res_ud_wc(wc, cqe);
+ bnxt_re_process_res_ud_wc(qp, wc, cqe);
break;
default:
dev_err(rdev_to_dev(cq->rdev),
@@ -3301,10 +3318,10 @@ int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
spin_lock_irqsave(&cq->cq_lock, flags);
/* Trigger on the very next completion */
if (ib_cqn_flags & IB_CQ_NEXT_COMP)
- type = DBR_DBR_TYPE_CQ_ARMALL;
+ type = DBC_DBC_TYPE_CQ_ARMALL;
/* Trigger on the next solicited completion */
else if (ib_cqn_flags & IB_CQ_SOLICITED)
- type = DBR_DBR_TYPE_CQ_ARMSE;
+ type = DBC_DBC_TYPE_CQ_ARMSE;
/* Poll to see if there are missed events */
if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
@@ -3537,19 +3554,14 @@ static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
u64 *pbl_tbl = pbl_tbl_orig;
u64 paddr;
u64 page_mask = (1ULL << page_shift) - 1;
- int i, pages;
- struct scatterlist *sg;
- int entry;
-
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
- pages = sg_dma_len(sg) >> PAGE_SHIFT;
- for (i = 0; i < pages; i++) {
- paddr = sg_dma_address(sg) + (i << PAGE_SHIFT);
- if (pbl_tbl == pbl_tbl_orig)
- *pbl_tbl++ = paddr & ~page_mask;
- else if ((paddr & page_mask) == 0)
- *pbl_tbl++ = paddr;
- }
+ struct sg_dma_page_iter sg_iter;
+
+ for_each_sg_dma_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
+ paddr = sg_page_iter_dma_address(&sg_iter);
+ if (pbl_tbl == pbl_tbl_orig)
+ *pbl_tbl++ = paddr & ~page_mask;
+ else if ((paddr & page_mask) == 0)
+ *pbl_tbl++ = paddr;
}
return pbl_tbl - pbl_tbl_orig;
}
@@ -3589,8 +3601,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
/* The fixed portion of the rkey is the same as the lkey */
mr->ib_mr.rkey = mr->qplib_mr.rkey;
- umem = ib_umem_get(ib_pd->uobject->context, start, length,
- mr_access_flags, 0);
+ umem = ib_umem_get(udata, start, length, mr_access_flags, 0);
if (IS_ERR(umem)) {
dev_err(rdev_to_dev(rdev), "Failed to get umem");
rc = -EFAULT;
@@ -3613,7 +3624,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
goto free_umem;
}
- page_shift = umem->page_shift;
+ page_shift = PAGE_SHIFT;
if (!bnxt_re_page_size_ok(page_shift)) {
dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
@@ -3660,13 +3671,15 @@ free_mr:
return ERR_PTR(rc);
}
-struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
- struct ib_udata *udata)
+int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
{
+ struct ib_device *ibdev = ctx->device;
+ struct bnxt_re_ucontext *uctx =
+ container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
- struct bnxt_re_uctx_resp resp;
- struct bnxt_re_ucontext *uctx;
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_re_uctx_resp resp;
+ u32 chip_met_rev_num = 0;
int rc;
dev_dbg(rdev_to_dev(rdev), "ABI version requested %d",
@@ -3675,13 +3688,9 @@ struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
if (ibdev->uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
dev_dbg(rdev_to_dev(rdev), " is different from the device %d ",
BNXT_RE_ABI_VERSION);
- return ERR_PTR(-EPERM);
+ return -EPERM;
}
- uctx = kzalloc(sizeof(*uctx), GFP_KERNEL);
- if (!uctx)
- return ERR_PTR(-ENOMEM);
-
uctx->rdev = rdev;
uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
@@ -3691,37 +3700,45 @@ struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
}
spin_lock_init(&uctx->sh_lock);
- resp.dev_id = rdev->en_dev->pdev->devfn; /*Temp, Use idr_alloc instead*/
+ resp.comp_mask = BNXT_RE_UCNTX_CMASK_HAVE_CCTX;
+ chip_met_rev_num = rdev->chip_ctx.chip_num;
+ chip_met_rev_num |= ((u32)rdev->chip_ctx.chip_rev & 0xFF) <<
+ BNXT_RE_CHIP_ID0_CHIP_REV_SFT;
+ chip_met_rev_num |= ((u32)rdev->chip_ctx.chip_metal & 0xFF) <<
+ BNXT_RE_CHIP_ID0_CHIP_MET_SFT;
+ resp.chip_id0 = chip_met_rev_num;
+ /* Future extension of chip info */
+ resp.chip_id1 = 0;
+ /*Temp, Use idr_alloc instead */
+ resp.dev_id = rdev->en_dev->pdev->devfn;
resp.max_qp = rdev->qplib_ctx.qpc_count;
resp.pg_size = PAGE_SIZE;
resp.cqe_sz = sizeof(struct cq_base);
resp.max_cqd = dev_attr->max_cq_wqes;
resp.rsvd = 0;
- rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to copy user context");
rc = -EFAULT;
goto cfail;
}
- return &uctx->ib_uctx;
+ return 0;
cfail:
free_page((unsigned long)uctx->shpg);
uctx->shpg = NULL;
fail:
- kfree(uctx);
- return ERR_PTR(rc);
+ return rc;
}
-int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
+void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
{
struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
struct bnxt_re_ucontext,
ib_uctx);
struct bnxt_re_dev *rdev = uctx->rdev;
- int rc = 0;
if (uctx->shpg)
free_page((unsigned long)uctx->shpg);
@@ -3730,17 +3747,10 @@ int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
/* Free DPI only if this is the first PD allocated by the
* application and mark the context dpi as NULL
*/
- rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
- &rdev->qplib_res.dpi_tbl,
- &uctx->dpi);
- if (rc)
- dev_err(rdev_to_dev(rdev), "Deallocate HW DPI failed!");
- /* Don't fail, continue*/
+ bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
+ &rdev->qplib_res.dpi_tbl, &uctx->dpi);
uctx->dpi.dbr = NULL;
}
-
- kfree(uctx);
- return 0;
}
/* Helper function to mmap the virtual memory from user app */
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index c4af72604b4f..e45465ed4eee 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -56,8 +56,8 @@ struct bnxt_re_fence_data {
};
struct bnxt_re_pd {
+ struct ib_pd ib_pd;
struct bnxt_re_dev *rdev;
- struct ib_pd ib_pd;
struct bnxt_qplib_pd qplib_pd;
struct bnxt_re_fence_data fence;
};
@@ -135,8 +135,8 @@ struct bnxt_re_mw {
};
struct bnxt_re_ucontext {
+ struct ib_ucontext ib_uctx;
struct bnxt_re_dev *rdev;
- struct ib_ucontext ib_uctx;
struct bnxt_qplib_dpi dpi;
void *shpg;
spinlock_t sh_lock; /* protect shpg */
@@ -163,10 +163,9 @@ int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
int index, union ib_gid *gid);
enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
u8 port_num);
-struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata);
-int bnxt_re_dealloc_pd(struct ib_pd *pd);
+int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
+ struct ib_udata *udata);
+void bnxt_re_dealloc_pd(struct ib_pd *pd);
struct ib_ah *bnxt_re_create_ah(struct ib_pd *pd,
struct rdma_ah_attr *ah_attr,
u32 flags,
@@ -216,9 +215,8 @@ int bnxt_re_dealloc_mw(struct ib_mw *mw);
struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
struct ib_udata *udata);
-struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
- struct ib_udata *udata);
-int bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
+int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata);
+void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index e7a997f2a537..2bd24ac45ee4 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -80,6 +80,29 @@ static DEFINE_MUTEX(bnxt_re_dev_lock);
static struct workqueue_struct *bnxt_re_wq;
static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev);
+static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
+{
+ rdev->rcfw.res = NULL;
+ rdev->qplib_res.cctx = NULL;
+}
+
+static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_en_dev *en_dev;
+ struct bnxt *bp;
+
+ en_dev = rdev->en_dev;
+ bp = netdev_priv(en_dev->net);
+
+ rdev->chip_ctx.chip_num = bp->chip_num;
+ /* rest members to follow eventually */
+
+ rdev->qplib_res.cctx = &rdev->chip_ctx;
+ rdev->rcfw.res = &rdev->qplib_res;
+
+ return 0;
+}
+
/* SR-IOV helper functions */
static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev *rdev)
@@ -278,6 +301,7 @@ static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
rc = en_dev->en_ops->bnxt_register_device(en_dev, BNXT_ROCE_ULP,
&bnxt_re_ulp_ops, rdev);
+ rdev->qplib_res.pdev = rdev->en_dev->pdev;
return rc;
}
@@ -345,7 +369,8 @@ static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg,
fw_msg->timeout = timeout;
}
-static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id)
+static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
+ u16 fw_ring_id, int type)
{
struct bnxt_en_dev *en_dev = rdev->en_dev;
struct hwrm_ring_free_input req = {0};
@@ -359,7 +384,7 @@ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id)
memset(&fw_msg, 0, sizeof(fw_msg));
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1);
- req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
+ req.ring_type = type;
req.ring_id = cpu_to_le16(fw_ring_id);
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
@@ -396,7 +421,7 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
/* Association of ring index with doorbell index and MSIX number */
req.logical_id = cpu_to_le16(map_index);
req.length = cpu_to_le32(ring_mask + 1);
- req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
+ req.ring_type = type;
req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
@@ -538,7 +563,8 @@ static struct bnxt_en_dev *bnxt_re_dev_probe(struct net_device *netdev)
static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
char *buf)
{
- struct bnxt_re_dev *rdev = to_bnxt_re_dev(device, ibdev.dev);
+ struct bnxt_re_dev *rdev =
+ rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev);
return scnprintf(buf, PAGE_SIZE, "0x%x\n", rdev->en_dev->pdev->vendor);
}
@@ -547,7 +573,8 @@ static DEVICE_ATTR_RO(hw_rev);
static ssize_t hca_type_show(struct device *device,
struct device_attribute *attr, char *buf)
{
- struct bnxt_re_dev *rdev = to_bnxt_re_dev(device, ibdev.dev);
+ struct bnxt_re_dev *rdev =
+ rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev);
return scnprintf(buf, PAGE_SIZE, "%s\n", rdev->ibdev.node_desc);
}
@@ -610,6 +637,8 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
.query_srq = bnxt_re_query_srq,
.reg_user_mr = bnxt_re_reg_user_mr,
.req_notify_cq = bnxt_re_req_notify_cq,
+ INIT_RDMA_OBJ_SIZE(ib_pd, bnxt_re_pd, ib_pd),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, bnxt_re_ucontext, ib_uctx),
};
static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
@@ -662,7 +691,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
rdma_set_device_sysfs_group(ibdev, &bnxt_re_dev_attr_group);
ibdev->driver_id = RDMA_DRIVER_BNXT_RE;
ib_set_device_ops(ibdev, &bnxt_re_dev_ops);
- return ib_register_device(ibdev, "bnxt_re%d", NULL);
+ return ib_register_device(ibdev, "bnxt_re%d");
}
static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev)
@@ -686,7 +715,7 @@ static struct bnxt_re_dev *bnxt_re_dev_add(struct net_device *netdev,
struct bnxt_re_dev *rdev;
/* Allocate bnxt_re_dev instance here */
- rdev = (struct bnxt_re_dev *)ib_alloc_device(sizeof(*rdev));
+ rdev = ib_alloc_device(bnxt_re_dev, ibdev);
if (!rdev) {
dev_err(NULL, "%s: bnxt_re_dev allocation failure!",
ROCE_DRV_MODULE_NAME);
@@ -858,6 +887,12 @@ static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
return 0;
}
+static u32 bnxt_re_get_nqdb_offset(struct bnxt_re_dev *rdev, u16 indx)
+{
+ return bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx) ?
+ 0x10000 : rdev->msix_entries[indx].db_offset;
+}
+
static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
{
int i;
@@ -871,18 +906,18 @@ static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
{
- int rc = 0, i;
int num_vec_enabled = 0;
+ int rc = 0, i;
+ u32 db_offt;
bnxt_qplib_init_res(&rdev->qplib_res);
for (i = 1; i < rdev->num_msix ; i++) {
+ db_offt = bnxt_re_get_nqdb_offset(rdev, i);
rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1],
i - 1, rdev->msix_entries[i].vector,
- rdev->msix_entries[i].db_offset,
- &bnxt_re_cqn_handler,
+ db_offt, &bnxt_re_cqn_handler,
&bnxt_re_srqn_handler);
-
if (rc) {
dev_err(rdev_to_dev(rdev),
"Failed to enable NQ with rc = 0x%x", rc);
@@ -894,16 +929,18 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
fail:
for (i = num_vec_enabled; i >= 0; i--)
bnxt_qplib_disable_nq(&rdev->nq[i]);
-
return rc;
}
static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev)
{
+ u8 type;
int i;
for (i = 0; i < rdev->num_msix - 1; i++) {
- bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id);
+ type = bnxt_qplib_get_ring_type(&rdev->chip_ctx);
+ bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type);
+ rdev->nq[i].res = NULL;
bnxt_qplib_free_nq(&rdev->nq[i]);
}
}
@@ -925,8 +962,11 @@ static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
{
- int rc = 0, i;
int num_vec_created = 0;
+ dma_addr_t *pg_map;
+ int rc = 0, i;
+ int pages;
+ u8 type;
/* Configure and allocate resources for qplib */
rdev->qplib_res.rcfw = &rdev->rcfw;
@@ -947,6 +987,7 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
goto dealloc_res;
for (i = 0; i < rdev->num_msix - 1; i++) {
+ rdev->nq[i].res = &rdev->qplib_res;
rdev->nq[i].hwq.max_elements = BNXT_RE_MAX_CQ_COUNT +
BNXT_RE_MAX_SRQC_COUNT + 2;
rc = bnxt_qplib_alloc_nq(rdev->en_dev->pdev, &rdev->nq[i]);
@@ -955,13 +996,13 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
i, rc);
goto free_nq;
}
- rc = bnxt_re_net_ring_alloc
- (rdev, rdev->nq[i].hwq.pbl[PBL_LVL_0].pg_map_arr,
- rdev->nq[i].hwq.pbl[rdev->nq[i].hwq.level].pg_count,
- HWRM_RING_ALLOC_CMPL,
- BNXT_QPLIB_NQE_MAX_CNT - 1,
- rdev->msix_entries[i + 1].ring_idx,
- &rdev->nq[i].ring_id);
+ type = bnxt_qplib_get_ring_type(&rdev->chip_ctx);
+ pg_map = rdev->nq[i].hwq.pbl[PBL_LVL_0].pg_map_arr;
+ pages = rdev->nq[i].hwq.pbl[rdev->nq[i].hwq.level].pg_count;
+ rc = bnxt_re_net_ring_alloc(rdev, pg_map, pages, type,
+ BNXT_QPLIB_NQE_MAX_CNT - 1,
+ rdev->msix_entries[i + 1].ring_idx,
+ &rdev->nq[i].ring_id);
if (rc) {
dev_err(rdev_to_dev(rdev),
"Failed to allocate NQ fw id with rc = 0x%x",
@@ -974,7 +1015,8 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
return 0;
free_nq:
for (i = num_vec_created; i >= 0; i--) {
- bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id);
+ type = bnxt_qplib_get_ring_type(&rdev->chip_ctx);
+ bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type);
bnxt_qplib_free_nq(&rdev->nq[i]);
}
bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
@@ -1228,6 +1270,7 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev)
{
+ u8 type;
int rc;
if (test_and_clear_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) {
@@ -1251,7 +1294,8 @@ static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev)
bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx);
bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
- bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id);
+ type = bnxt_qplib_get_ring_type(&rdev->chip_ctx);
+ bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, type);
bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
}
if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) {
@@ -1260,6 +1304,8 @@ static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev)
dev_warn(rdev_to_dev(rdev),
"Failed to free MSI-X vectors: %#x", rc);
}
+
+ bnxt_re_destroy_chip_ctx(rdev);
if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) {
rc = bnxt_re_unregister_netdev(rdev);
if (rc)
@@ -1280,9 +1326,12 @@ static void bnxt_re_worker(struct work_struct *work)
static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
{
- int rc;
-
+ dma_addr_t *pg_map;
+ u32 db_offt, ridx;
+ int pages, vid;
bool locked;
+ u8 type;
+ int rc;
/* Acquire rtnl lock through out this function */
rtnl_lock();
@@ -1297,6 +1346,12 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
}
set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
+ rc = bnxt_re_setup_chip_ctx(rdev);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Failed to get chip context\n");
+ return -EINVAL;
+ }
+
/* Check whether VF or PF */
bnxt_re_get_sriov_func_type(rdev);
@@ -1320,21 +1375,22 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
pr_err("Failed to allocate RCFW Channel: %#x\n", rc);
goto fail;
}
- rc = bnxt_re_net_ring_alloc
- (rdev, rdev->rcfw.creq.pbl[PBL_LVL_0].pg_map_arr,
- rdev->rcfw.creq.pbl[rdev->rcfw.creq.level].pg_count,
- HWRM_RING_ALLOC_CMPL, BNXT_QPLIB_CREQE_MAX_CNT - 1,
- rdev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx,
- &rdev->rcfw.creq_ring_id);
+ type = bnxt_qplib_get_ring_type(&rdev->chip_ctx);
+ pg_map = rdev->rcfw.creq.pbl[PBL_LVL_0].pg_map_arr;
+ pages = rdev->rcfw.creq.pbl[rdev->rcfw.creq.level].pg_count;
+ ridx = rdev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
+ rc = bnxt_re_net_ring_alloc(rdev, pg_map, pages, type,
+ BNXT_QPLIB_CREQE_MAX_CNT - 1,
+ ridx, &rdev->rcfw.creq_ring_id);
if (rc) {
pr_err("Failed to allocate CREQ: %#x\n", rc);
goto free_rcfw;
}
- rc = bnxt_qplib_enable_rcfw_channel
- (rdev->en_dev->pdev, &rdev->rcfw,
- rdev->msix_entries[BNXT_RE_AEQ_IDX].vector,
- rdev->msix_entries[BNXT_RE_AEQ_IDX].db_offset,
- rdev->is_virtfn, &bnxt_re_aeq_handler);
+ db_offt = bnxt_re_get_nqdb_offset(rdev, BNXT_RE_AEQ_IDX);
+ vid = rdev->msix_entries[BNXT_RE_AEQ_IDX].vector;
+ rc = bnxt_qplib_enable_rcfw_channel(rdev->en_dev->pdev, &rdev->rcfw,
+ vid, db_offt, rdev->is_virtfn,
+ &bnxt_re_aeq_handler);
if (rc) {
pr_err("Failed to enable RCFW channel: %#x\n", rc);
goto free_ring;
@@ -1347,7 +1403,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
if (!rdev->is_virtfn)
bnxt_re_set_resource_limits(rdev);
- rc = bnxt_qplib_alloc_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx, 0);
+ rc = bnxt_qplib_alloc_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx, 0,
+ bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx));
if (rc) {
pr_err("Failed to allocate QPLIB context: %#x\n", rc);
goto disable_rcfw;
@@ -1418,7 +1475,8 @@ free_ctx:
disable_rcfw:
bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
free_ring:
- bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id);
+ type = bnxt_qplib_get_ring_type(&rdev->chip_ctx);
+ bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, type);
free_rcfw:
bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
fail:
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index b98b054148cd..71c34d5b0ac0 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -44,6 +44,7 @@
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/prefetch.h>
+#include <linux/if_ether.h>
#include "roce_hsi.h"
@@ -244,6 +245,7 @@ static void bnxt_qplib_service_nq(unsigned long data)
u16 type;
int budget = nq->budget;
uintptr_t q_handle;
+ bool gen_p5 = bnxt_qplib_is_chip_gen_p5(nq->res->cctx);
/* Service the NQ until empty */
raw_cons = hwq->cons;
@@ -290,7 +292,7 @@ static void bnxt_qplib_service_nq(unsigned long data)
q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
<< 32;
bnxt_qplib_arm_srq((struct bnxt_qplib_srq *)q_handle,
- DBR_DBR_TYPE_SRQ_ARMENA);
+ DBC_DBC_TYPE_SRQ_ARMENA);
if (!nq->srqn_handler(nq,
(struct bnxt_qplib_srq *)q_handle,
nqsrqe->event))
@@ -312,7 +314,9 @@ static void bnxt_qplib_service_nq(unsigned long data)
}
if (hwq->cons != raw_cons) {
hwq->cons = raw_cons;
- NQ_DB_REARM(nq->bar_reg_iomem, hwq->cons, hwq->max_elements);
+ bnxt_qplib_ring_nq_db_rearm(nq->bar_reg_iomem, hwq->cons,
+ hwq->max_elements, nq->ring_id,
+ gen_p5);
}
}
@@ -336,9 +340,11 @@ static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
{
+ bool gen_p5 = bnxt_qplib_is_chip_gen_p5(nq->res->cctx);
tasklet_disable(&nq->worker);
/* Mask h/w interrupt */
- NQ_DB(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
+ bnxt_qplib_ring_nq_db(nq->bar_reg_iomem, nq->hwq.cons,
+ nq->hwq.max_elements, nq->ring_id, gen_p5);
/* Sync with last running IRQ handler */
synchronize_irq(nq->vector);
if (kill)
@@ -373,6 +379,7 @@ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
int msix_vector, bool need_init)
{
+ bool gen_p5 = bnxt_qplib_is_chip_gen_p5(nq->res->cctx);
int rc;
if (nq->requested)
@@ -399,7 +406,8 @@ int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
nq->vector, nq_indx);
}
nq->requested = true;
- NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
+ bnxt_qplib_ring_nq_db_rearm(nq->bar_reg_iomem, nq->hwq.cons,
+ nq->hwq.max_elements, nq->ring_id, gen_p5);
return rc;
}
@@ -433,7 +441,8 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
rc = -ENOMEM;
goto fail;
}
- nq->bar_reg_iomem = ioremap_nocache(nq_base + nq->bar_reg_off, 4);
+ /* Unconditionally map 8 bytes to support 57500 series */
+ nq->bar_reg_iomem = ioremap_nocache(nq_base + nq->bar_reg_off, 8);
if (!nq->bar_reg_iomem) {
rc = -ENOMEM;
goto fail;
@@ -462,15 +471,17 @@ void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq)
{
+ u8 hwq_type;
+
nq->pdev = pdev;
if (!nq->hwq.max_elements ||
nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
-
+ hwq_type = bnxt_qplib_get_hwq_type(nq->res);
if (bnxt_qplib_alloc_init_hwq(nq->pdev, &nq->hwq, NULL, 0,
&nq->hwq.max_elements,
BNXT_QPLIB_MAX_NQE_ENTRY_SIZE, 0,
- PAGE_SIZE, HWQ_TYPE_L2_CMPL))
+ PAGE_SIZE, hwq_type))
return -ENOMEM;
nq->budget = 8;
@@ -481,21 +492,19 @@ int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq)
static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type)
{
struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
- struct dbr_dbr db_msg = { 0 };
void __iomem *db;
- u32 sw_prod = 0;
+ u32 sw_prod;
+ u64 val = 0;
/* Ring DB */
- sw_prod = (arm_type == DBR_DBR_TYPE_SRQ_ARM) ? srq->threshold :
- HWQ_CMP(srq_hwq->prod, srq_hwq);
- db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
- DBR_DBR_INDEX_MASK);
- db_msg.type_xid = cpu_to_le32(((srq->id << DBR_DBR_XID_SFT) &
- DBR_DBR_XID_MASK) | arm_type);
- db = (arm_type == DBR_DBR_TYPE_SRQ_ARMENA) ?
- srq->dbr_base : srq->dpi->dbr;
- wmb(); /* barrier before db ring */
- __iowrite64_copy(db, &db_msg, sizeof(db_msg) / sizeof(u64));
+ sw_prod = (arm_type == DBC_DBC_TYPE_SRQ_ARM) ?
+ srq->threshold : HWQ_CMP(srq_hwq->prod, srq_hwq);
+ db = (arm_type == DBC_DBC_TYPE_SRQ_ARMENA) ? srq->dbr_base :
+ srq->dpi->dbr;
+ val = ((srq->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) | arm_type;
+ val <<= 32;
+ val |= (sw_prod << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK;
+ writeq(val, db);
}
int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
@@ -590,7 +599,7 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
srq->id = le32_to_cpu(resp.xid);
srq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
if (srq->threshold)
- bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ_ARMENA);
+ bnxt_qplib_arm_srq(srq, DBC_DBC_TYPE_SRQ_ARMENA);
srq->arm_req = false;
return 0;
@@ -614,7 +623,7 @@ int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
srq_hwq->max_elements - sw_cons + sw_prod;
if (count > srq->threshold) {
srq->arm_req = false;
- bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ_ARM);
+ bnxt_qplib_arm_srq(srq, DBC_DBC_TYPE_SRQ_ARM);
} else {
/* Deferred arming */
srq->arm_req = true;
@@ -702,10 +711,10 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
srq_hwq->max_elements - sw_cons + sw_prod;
spin_unlock(&srq_hwq->lock);
/* Ring DB */
- bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ);
+ bnxt_qplib_arm_srq(srq, DBC_DBC_TYPE_SRQ);
if (srq->arm_req == true && count > srq->threshold) {
srq->arm_req = false;
- bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ_ARM);
+ bnxt_qplib_arm_srq(srq, DBC_DBC_TYPE_SRQ_ARM);
}
done:
return rc;
@@ -853,18 +862,19 @@ exit:
int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
- struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
- struct cmdq_create_qp req;
- struct creq_create_qp_resp resp;
- struct bnxt_qplib_pbl *pbl;
- struct sq_psn_search **psn_search_ptr;
unsigned long int psn_search, poff = 0;
+ struct sq_psn_search **psn_search_ptr;
struct bnxt_qplib_q *sq = &qp->sq;
struct bnxt_qplib_q *rq = &qp->rq;
+ int i, rc, req_size, psn_sz = 0;
+ struct sq_send **hw_sq_send_ptr;
+ struct creq_create_qp_resp resp;
struct bnxt_qplib_hwq *xrrq;
- int i, rc, req_size, psn_sz;
u16 cmd_flags = 0, max_ssge;
- u32 sw_prod, qp_flags = 0;
+ struct cmdq_create_qp req;
+ struct bnxt_qplib_pbl *pbl;
+ u32 qp_flags = 0;
+ u16 max_rsge;
RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
@@ -874,8 +884,11 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
req.qp_handle = cpu_to_le64(qp->qp_handle);
/* SQ */
- psn_sz = (qp->type == CMDQ_CREATE_QP_TYPE_RC) ?
- sizeof(struct sq_psn_search) : 0;
+ if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
+ psn_sz = bnxt_qplib_is_chip_gen_p5(res->cctx) ?
+ sizeof(struct sq_psn_search_ext) :
+ sizeof(struct sq_psn_search);
+ }
sq->hwq.max_elements = sq->max_wqe;
rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, sq->sglist,
sq->nmap, &sq->hwq.max_elements,
@@ -905,10 +918,16 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
poff = (psn_search & ~PAGE_MASK) /
BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE;
}
- for (i = 0; i < sq->hwq.max_elements; i++)
+ for (i = 0; i < sq->hwq.max_elements; i++) {
sq->swq[i].psn_search =
&psn_search_ptr[get_psne_pg(i + poff)]
[get_psne_idx(i + poff)];
+ /*psns_ext will be used only for P5 chips. */
+ sq->swq[i].psn_ext =
+ (struct sq_psn_search_ext *)
+ &psn_search_ptr[get_psne_pg(i + poff)]
+ [get_psne_idx(i + poff)];
+ }
}
pbl = &sq->hwq.pbl[PBL_LVL_0];
req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
@@ -929,14 +948,6 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G :
CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K);
- /* initialize all SQ WQEs to LOCAL_INVALID (sq prep for hw fetch) */
- hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
- for (sw_prod = 0; sw_prod < sq->hwq.max_elements; sw_prod++) {
- hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
- [get_sqe_idx(sw_prod)];
- hw_sq_send_hdr->wqe_type = SQ_BASE_WQE_TYPE_LOCAL_INVALID;
- }
-
if (qp->scq)
req.scq_cid = cpu_to_le32(qp->scq->id);
@@ -1007,8 +1018,9 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
req.sq_fwo_sq_sge = cpu_to_le16(
((max_ssge & CMDQ_CREATE_QP_SQ_SGE_MASK)
<< CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
+ max_rsge = bnxt_qplib_is_chip_gen_p5(res->cctx) ? 6 : rq->max_sge;
req.rq_fwo_rq_sge = cpu_to_le16(
- ((rq->max_sge & CMDQ_CREATE_QP_RQ_SGE_MASK)
+ ((max_rsge & CMDQ_CREATE_QP_RQ_SGE_MASK)
<< CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
/* ORRQ and IRRQ */
if (psn_sz) {
@@ -1053,6 +1065,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp->id = le32_to_cpu(resp.xid);
qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
+ qp->cctx = res->cctx;
INIT_LIST_HEAD(&qp->sq_flush);
INIT_LIST_HEAD(&qp->rq_flush);
rcfw->qp_tbl[qp->id].qp_id = qp->id;
@@ -1494,19 +1507,16 @@ void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
{
struct bnxt_qplib_q *sq = &qp->sq;
- struct dbr_dbr db_msg = { 0 };
u32 sw_prod;
+ u64 val = 0;
+ val = (((qp->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) |
+ DBC_DBC_TYPE_SQ);
+ val <<= 32;
sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
-
- db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
- DBR_DBR_INDEX_MASK);
- db_msg.type_xid =
- cpu_to_le32(((qp->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
- DBR_DBR_TYPE_SQ);
+ val |= (sw_prod << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK;
/* Flush all the WQE writes to HW */
- wmb();
- __iowrite64_copy(qp->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
+ writeq(val, qp->dpi->dbr);
}
int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
@@ -1617,7 +1627,8 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
((offsetof(typeof(*sqe), data) + 15) >> 4);
sqe->inv_key_or_imm_data = cpu_to_le32(
wqe->send.inv_key);
- if (qp->type == CMDQ_CREATE_QP_TYPE_UD) {
+ if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
+ qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
sqe->q_key = cpu_to_le32(wqe->send.q_key);
sqe->dst_qp = cpu_to_le32(
wqe->send.dst_qp & SQ_SEND_DST_QP_MASK);
@@ -1741,14 +1752,26 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
}
swq->next_psn = sq->psn & BTH_PSN_MASK;
if (swq->psn_search) {
- swq->psn_search->opcode_start_psn = cpu_to_le32(
- ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
- SQ_PSN_SEARCH_START_PSN_MASK) |
- ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
- SQ_PSN_SEARCH_OPCODE_MASK));
- swq->psn_search->flags_next_psn = cpu_to_le32(
- ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
- SQ_PSN_SEARCH_NEXT_PSN_MASK));
+ u32 opcd_spsn;
+ u32 flg_npsn;
+
+ opcd_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
+ SQ_PSN_SEARCH_START_PSN_MASK);
+ opcd_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
+ SQ_PSN_SEARCH_OPCODE_MASK);
+ flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
+ SQ_PSN_SEARCH_NEXT_PSN_MASK);
+ if (bnxt_qplib_is_chip_gen_p5(qp->cctx)) {
+ swq->psn_ext->opcode_start_psn =
+ cpu_to_le32(opcd_spsn);
+ swq->psn_ext->flags_next_psn =
+ cpu_to_le32(flg_npsn);
+ } else {
+ swq->psn_search->opcode_start_psn =
+ cpu_to_le32(opcd_spsn);
+ swq->psn_search->flags_next_psn =
+ cpu_to_le32(flg_npsn);
+ }
}
queue_err:
if (sch_handler) {
@@ -1785,19 +1808,16 @@ done:
void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
{
struct bnxt_qplib_q *rq = &qp->rq;
- struct dbr_dbr db_msg = { 0 };
u32 sw_prod;
+ u64 val = 0;
+ val = (((qp->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) |
+ DBC_DBC_TYPE_RQ);
+ val <<= 32;
sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
- db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
- DBR_DBR_INDEX_MASK);
- db_msg.type_xid =
- cpu_to_le32(((qp->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
- DBR_DBR_TYPE_RQ);
-
+ val |= (sw_prod << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK;
/* Flush the writes to HW Rx WQE before the ringing Rx DB */
- wmb();
- __iowrite64_copy(qp->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
+ writeq(val, qp->dpi->dbr);
}
int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
@@ -1881,32 +1901,28 @@ done:
/* Spinlock must be held */
static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq)
{
- struct dbr_dbr db_msg = { 0 };
+ u64 val = 0;
- db_msg.type_xid =
- cpu_to_le32(((cq->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
- DBR_DBR_TYPE_CQ_ARMENA);
+ val = ((cq->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) |
+ DBC_DBC_TYPE_CQ_ARMENA;
+ val <<= 32;
/* Flush memory writes before enabling the CQ */
- wmb();
- __iowrite64_copy(cq->dbr_base, &db_msg, sizeof(db_msg) / sizeof(u64));
+ writeq(val, cq->dbr_base);
}
static void bnxt_qplib_arm_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
{
struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
- struct dbr_dbr db_msg = { 0 };
u32 sw_cons;
+ u64 val = 0;
/* Ring DB */
+ val = ((cq->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) | arm_type;
+ val <<= 32;
sw_cons = HWQ_CMP(cq_hwq->cons, cq_hwq);
- db_msg.index = cpu_to_le32((sw_cons << DBR_DBR_INDEX_SFT) &
- DBR_DBR_INDEX_MASK);
- db_msg.type_xid =
- cpu_to_le32(((cq->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
- arm_type);
+ val |= (sw_cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK;
/* flush memory writes before arming the CQ */
- wmb();
- __iowrite64_copy(cq->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
+ writeq(val, cq->dpi->dbr);
}
int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
@@ -2053,6 +2069,7 @@ static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
opcode = CQ_BASE_CQE_TYPE_RES_RC;
break;
case CMDQ_CREATE_QP_TYPE_UD:
+ case CMDQ_CREATE_QP_TYPE_GSI:
opcode = CQ_BASE_CQE_TYPE_RES_UD;
break;
}
@@ -2125,7 +2142,7 @@ static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
sq->send_phantom = true;
/* TODO: Only ARM if the previous SQE is ARMALL */
- bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ_ARMALL);
+ bnxt_qplib_arm_cq(cq, DBC_DBC_TYPE_CQ_ARMALL);
rc = -EAGAIN;
goto out;
@@ -2410,12 +2427,14 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
}
cqe = *pcqe;
cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
- cqe->length = le32_to_cpu(hwcqe->length);
+ cqe->length = (u32)le16_to_cpu(hwcqe->length);
+ cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
cqe->flags = le16_to_cpu(hwcqe->flags);
cqe->status = hwcqe->status;
cqe->qp_handle = (u64)(unsigned long)qp;
- memcpy(cqe->smac, hwcqe->src_mac, 6);
+ /*FIXME: Endianness fix needed for smace */
+ memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
& CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
@@ -2794,7 +2813,7 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
}
if (cq->hwq.cons != raw_cons) {
cq->hwq.cons = raw_cons;
- bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ);
+ bnxt_qplib_arm_cq(cq, DBC_DBC_TYPE_CQ);
}
exit:
return num_cqes - budget;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index 72352ca80ace..3f618b5f1f06 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -106,6 +106,7 @@ struct bnxt_qplib_swq {
u32 start_psn;
u32 next_psn;
struct sq_psn_search *psn_search;
+ struct sq_psn_search_ext *psn_ext;
};
struct bnxt_qplib_swqe {
@@ -254,6 +255,7 @@ struct bnxt_qplib_q {
struct bnxt_qplib_qp {
struct bnxt_qplib_pd *pd;
struct bnxt_qplib_dpi *dpi;
+ struct bnxt_qplib_chip_ctx *cctx;
u64 qp_handle;
#define BNXT_QPLIB_QP_ID_INVALID 0xFFFFFFFF
u32 id;
@@ -347,6 +349,7 @@ struct bnxt_qplib_cqe {
u8 type;
u8 opcode;
u32 length;
+ u16 cfa_meta;
u64 wr_id;
union {
__be32 immdata;
@@ -432,13 +435,47 @@ struct bnxt_qplib_cq {
#define NQ_DB_CP_FLAGS (NQ_DB_KEY_CP | \
NQ_DB_IDX_VALID | \
NQ_DB_IRQ_DIS)
-#define NQ_DB_REARM(db, raw_cons, cp_bit) \
- writel(NQ_DB_CP_FLAGS_REARM | ((raw_cons) & ((cp_bit) - 1)), db)
-#define NQ_DB(db, raw_cons, cp_bit) \
- writel(NQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db)
+
+static inline void bnxt_qplib_ring_nq_db64(void __iomem *db, u32 index,
+ u32 xid, bool arm)
+{
+ u64 val;
+
+ val = xid & DBC_DBC_XID_MASK;
+ val |= DBC_DBC_PATH_ROCE;
+ val |= arm ? DBC_DBC_TYPE_NQ_ARM : DBC_DBC_TYPE_NQ;
+ val <<= 32;
+ val |= index & DBC_DBC_INDEX_MASK;
+ writeq(val, db);
+}
+
+static inline void bnxt_qplib_ring_nq_db_rearm(void __iomem *db, u32 raw_cons,
+ u32 max_elements, u32 xid,
+ bool gen_p5)
+{
+ u32 index = raw_cons & (max_elements - 1);
+
+ if (gen_p5)
+ bnxt_qplib_ring_nq_db64(db, index, xid, true);
+ else
+ writel(NQ_DB_CP_FLAGS_REARM | (index & DBC_DBC32_XID_MASK), db);
+}
+
+static inline void bnxt_qplib_ring_nq_db(void __iomem *db, u32 raw_cons,
+ u32 max_elements, u32 xid,
+ bool gen_p5)
+{
+ u32 index = raw_cons & (max_elements - 1);
+
+ if (gen_p5)
+ bnxt_qplib_ring_nq_db64(db, index, xid, false);
+ else
+ writel(NQ_DB_CP_FLAGS | (index & DBC_DBC32_XID_MASK), db);
+}
struct bnxt_qplib_nq {
struct pci_dev *pdev;
+ struct bnxt_qplib_res *res;
int vector;
cpumask_t mask;
@@ -448,7 +485,7 @@ struct bnxt_qplib_nq {
struct bnxt_qplib_hwq hwq;
u16 bar_reg;
- u16 bar_reg_off;
+ u32 bar_reg_off;
u16 ring_id;
void __iomem *bar_reg_iomem;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 19551aa43850..c6461e957078 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -359,11 +359,12 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
static void bnxt_qplib_service_creq(unsigned long data)
{
struct bnxt_qplib_rcfw *rcfw = (struct bnxt_qplib_rcfw *)data;
+ bool gen_p5 = bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx);
struct bnxt_qplib_hwq *creq = &rcfw->creq;
+ u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
struct creq_base *creqe, **creq_ptr;
u32 sw_cons, raw_cons;
unsigned long flags;
- u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
/* Service the CREQ until budget is over */
spin_lock_irqsave(&creq->lock, flags);
@@ -407,8 +408,9 @@ static void bnxt_qplib_service_creq(unsigned long data)
if (creq->cons != raw_cons) {
creq->cons = raw_cons;
- CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons,
- creq->max_elements);
+ bnxt_qplib_ring_creq_db_rearm(rcfw->creq_bar_reg_iomem,
+ raw_cons, creq->max_elements,
+ rcfw->creq_ring_id, gen_p5);
}
spin_unlock_irqrestore(&creq->lock, flags);
}
@@ -480,11 +482,13 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT -
RCFW_DBR_BASE_PAGE_SHIFT);
/*
- * VFs need not setup the HW context area, PF
+ * Gen P5 devices doesn't require this allocation
+ * as the L2 driver does the same for RoCE also.
+ * Also, VFs need not setup the HW context area, PF
* shall setup this area for VF. Skipping the
* HW programming
*/
- if (is_virtfn)
+ if (is_virtfn || bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx))
goto skip_ctx_setup;
level = ctx->qpc_tbl.level;
@@ -560,12 +564,15 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
struct bnxt_qplib_ctx *ctx,
int qp_tbl_sz)
{
+ u8 hwq_type;
+
rcfw->pdev = pdev;
rcfw->creq.max_elements = BNXT_QPLIB_CREQE_MAX_CNT;
+ hwq_type = bnxt_qplib_get_hwq_type(rcfw->res);
if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->creq, NULL, 0,
&rcfw->creq.max_elements,
- BNXT_QPLIB_CREQE_UNITS, 0, PAGE_SIZE,
- HWQ_TYPE_L2_CMPL)) {
+ BNXT_QPLIB_CREQE_UNITS,
+ 0, PAGE_SIZE, hwq_type)) {
dev_err(&rcfw->pdev->dev,
"HW channel CREQ allocation failed\n");
goto fail;
@@ -607,10 +614,13 @@ fail:
void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill)
{
+ bool gen_p5 = bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx);
+
tasklet_disable(&rcfw->worker);
/* Mask h/w interrupts */
- CREQ_DB(rcfw->creq_bar_reg_iomem, rcfw->creq.cons,
- rcfw->creq.max_elements);
+ bnxt_qplib_ring_creq_db(rcfw->creq_bar_reg_iomem, rcfw->creq.cons,
+ rcfw->creq.max_elements, rcfw->creq_ring_id,
+ gen_p5);
/* Sync with last running IRQ-handler */
synchronize_irq(rcfw->vector);
if (kill)
@@ -647,6 +657,7 @@ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
bool need_init)
{
+ bool gen_p5 = bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx);
int rc;
if (rcfw->requested)
@@ -663,8 +674,9 @@ int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
if (rc)
return rc;
rcfw->requested = true;
- CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, rcfw->creq.cons,
- rcfw->creq.max_elements);
+ bnxt_qplib_ring_creq_db_rearm(rcfw->creq_bar_reg_iomem,
+ rcfw->creq.cons, rcfw->creq.max_elements,
+ rcfw->creq_ring_id, gen_p5);
return 0;
}
@@ -684,8 +696,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
/* General */
rcfw->seq_num = 0;
set_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags);
- bmap_size = BITS_TO_LONGS(rcfw->cmdq_depth *
- sizeof(unsigned long));
+ bmap_size = BITS_TO_LONGS(rcfw->cmdq_depth) * sizeof(unsigned long);
rcfw->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL);
if (!rcfw->cmdq_bitmap)
return -ENOMEM;
@@ -718,8 +729,9 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
dev_err(&rcfw->pdev->dev,
"CREQ BAR region %d resc start is 0!\n",
rcfw->creq_bar_reg);
+ /* Unconditionally map 8 bytes to support 57500 series */
rcfw->creq_bar_reg_iomem = ioremap_nocache(res_base + cp_bar_reg_off,
- 4);
+ 8);
if (!rcfw->creq_bar_reg_iomem) {
dev_err(&rcfw->pdev->dev, "CREQ BAR region %d mapping failed\n",
rcfw->creq_bar_reg);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index be0ef0e8c53e..2138533bb642 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -157,10 +157,46 @@ static inline u32 get_creq_idx(u32 val)
#define CREQ_DB_CP_FLAGS (CREQ_DB_KEY_CP | \
CREQ_DB_IDX_VALID | \
CREQ_DB_IRQ_DIS)
-#define CREQ_DB_REARM(db, raw_cons, cp_bit) \
- writel(CREQ_DB_CP_FLAGS_REARM | ((raw_cons) & ((cp_bit) - 1)), db)
-#define CREQ_DB(db, raw_cons, cp_bit) \
- writel(CREQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db)
+
+static inline void bnxt_qplib_ring_creq_db64(void __iomem *db, u32 index,
+ u32 xid, bool arm)
+{
+ u64 val = 0;
+
+ val = xid & DBC_DBC_XID_MASK;
+ val |= DBC_DBC_PATH_ROCE;
+ val |= arm ? DBC_DBC_TYPE_NQ_ARM : DBC_DBC_TYPE_NQ;
+ val <<= 32;
+ val |= index & DBC_DBC_INDEX_MASK;
+
+ writeq(val, db);
+}
+
+static inline void bnxt_qplib_ring_creq_db_rearm(void __iomem *db, u32 raw_cons,
+ u32 max_elements, u32 xid,
+ bool gen_p5)
+{
+ u32 index = raw_cons & (max_elements - 1);
+
+ if (gen_p5)
+ bnxt_qplib_ring_creq_db64(db, index, xid, true);
+ else
+ writel(CREQ_DB_CP_FLAGS_REARM | (index & DBC_DBC32_XID_MASK),
+ db);
+}
+
+static inline void bnxt_qplib_ring_creq_db(void __iomem *db, u32 raw_cons,
+ u32 max_elements, u32 xid,
+ bool gen_p5)
+{
+ u32 index = raw_cons & (max_elements - 1);
+
+ if (gen_p5)
+ bnxt_qplib_ring_creq_db64(db, index, xid, true);
+ else
+ writel(CREQ_DB_CP_FLAGS | (index & DBC_DBC32_XID_MASK),
+ db);
+}
#define CREQ_ENTRY_POLL_BUDGET 0x100
@@ -187,6 +223,7 @@ struct bnxt_qplib_qp_node {
/* RCFW Communication Channels */
struct bnxt_qplib_rcfw {
struct pci_dev *pdev;
+ struct bnxt_qplib_res *res;
int vector;
struct tasklet_struct worker;
bool requested;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 57d4951679cb..0bc24f934829 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -85,7 +85,7 @@ static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
struct scatterlist *sghead, u32 pages, u32 pg_size)
{
- struct scatterlist *sg;
+ struct sg_dma_page_iter sg_iter;
bool is_umem = false;
int i;
@@ -116,13 +116,11 @@ static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
} else {
i = 0;
is_umem = true;
- for_each_sg(sghead, sg, pages, i) {
- pbl->pg_map_arr[i] = sg_dma_address(sg);
- pbl->pg_arr[i] = sg_virt(sg);
- if (!pbl->pg_arr[i])
- goto fail;
-
+ for_each_sg_dma_page (sghead, &sg_iter, pages, 0) {
+ pbl->pg_map_arr[i] = sg_page_iter_dma_address(&sg_iter);
+ pbl->pg_arr[i] = NULL;
pbl->pg_count++;
+ i++;
}
}
@@ -330,13 +328,13 @@ void bnxt_qplib_free_ctx(struct pci_dev *pdev,
*/
int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
struct bnxt_qplib_ctx *ctx,
- bool virt_fn)
+ bool virt_fn, bool is_p5)
{
int i, j, k, rc = 0;
int fnz_idx = -1;
__le64 **pbl_ptr;
- if (virt_fn)
+ if (virt_fn || is_p5)
goto stats_alloc;
/* QPC Tables */
@@ -762,7 +760,11 @@ static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
{
memset(stats, 0, sizeof(*stats));
stats->fw_id = -1;
- stats->size = sizeof(struct ctx_hw_stats);
+ /* 128 byte aligned context memory is required only for 57500.
+ * However making this unconditional, it does not harm previous
+ * generation.
+ */
+ stats->size = ALIGN(sizeof(struct ctx_hw_stats), 128);
stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
&stats->dma_map, GFP_KERNEL);
if (!stats->dma) {
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 1e80aa7bbcce..32cebd0f1436 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -180,12 +180,20 @@ struct bnxt_qplib_ctx {
u64 hwrm_intf_ver;
};
+struct bnxt_qplib_chip_ctx {
+ u16 chip_num;
+ u8 chip_rev;
+ u8 chip_metal;
+};
+
+#define CHIP_NUM_57500 0x1750
+
struct bnxt_qplib_res {
struct pci_dev *pdev;
+ struct bnxt_qplib_chip_ctx *cctx;
struct net_device *netdev;
struct bnxt_qplib_rcfw *rcfw;
-
struct bnxt_qplib_pd_tbl pd_tbl;
struct bnxt_qplib_sgid_tbl sgid_tbl;
struct bnxt_qplib_pkey_tbl pkey_tbl;
@@ -193,6 +201,24 @@ struct bnxt_qplib_res {
bool prio;
};
+static inline bool bnxt_qplib_is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx)
+{
+ return (cctx->chip_num == CHIP_NUM_57500);
+}
+
+static inline u8 bnxt_qplib_get_hwq_type(struct bnxt_qplib_res *res)
+{
+ return bnxt_qplib_is_chip_gen_p5(res->cctx) ?
+ HWQ_TYPE_QUEUE : HWQ_TYPE_L2_CMPL;
+}
+
+static inline u8 bnxt_qplib_get_ring_type(struct bnxt_qplib_chip_ctx *cctx)
+{
+ return bnxt_qplib_is_chip_gen_p5(cctx) ?
+ RING_ALLOC_REQ_RING_TYPE_NQ :
+ RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL;
+}
+
#define to_bnxt_qplib(ptr, type, member) \
container_of(ptr, type, member)
@@ -226,5 +252,5 @@ void bnxt_qplib_free_ctx(struct pci_dev *pdev,
struct bnxt_qplib_ctx *ctx);
int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
struct bnxt_qplib_ctx *ctx,
- bool virt_fn);
+ bool virt_fn, bool is_p5);
#endif /* __BNXT_QPLIB_RES_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index efa0f2949dc7..e9c53e406404 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -119,7 +119,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
* reporting the max number
*/
attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS;
- attr->max_qp_sges = sb->max_sge;
+ attr->max_qp_sges = bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx) ?
+ 6 : sb->max_sge;
attr->max_cq = le32_to_cpu(sb->max_cq);
attr->max_cq_wqes = le32_to_cpu(sb->max_cqe);
attr->max_cq_sges = attr->max_qp_sges;
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index 8a9ead419ac2..e4b09e7c2175 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -49,11 +49,11 @@ struct cmpl_doorbell {
#define CMPL_DOORBELL_IDX_SFT 0
#define CMPL_DOORBELL_RESERVED_MASK 0x3000000UL
#define CMPL_DOORBELL_RESERVED_SFT 24
- #define CMPL_DOORBELL_IDX_VALID 0x4000000UL
+ #define CMPL_DOORBELL_IDX_VALID 0x4000000UL
#define CMPL_DOORBELL_MASK 0x8000000UL
#define CMPL_DOORBELL_KEY_MASK 0xf0000000UL
#define CMPL_DOORBELL_KEY_SFT 28
- #define CMPL_DOORBELL_KEY_CMPL (0x2UL << 28)
+ #define CMPL_DOORBELL_KEY_CMPL (0x2UL << 28)
};
/* Status Door Bell Format (4 bytes) */
@@ -71,46 +71,56 @@ struct status_doorbell {
/* RoCE Host Structures */
/* Doorbell Structures */
-/* 64b Doorbell Format (8 bytes) */
-struct dbr_dbr {
- __le32 index;
- #define DBR_DBR_INDEX_MASK 0xfffffUL
- #define DBR_DBR_INDEX_SFT 0
- #define DBR_DBR_RESERVED12_MASK 0xfff00000UL
- #define DBR_DBR_RESERVED12_SFT 20
- __le32 type_xid;
- #define DBR_DBR_XID_MASK 0xfffffUL
- #define DBR_DBR_XID_SFT 0
- #define DBR_DBR_RESERVED8_MASK 0xff00000UL
- #define DBR_DBR_RESERVED8_SFT 20
- #define DBR_DBR_TYPE_MASK 0xf0000000UL
- #define DBR_DBR_TYPE_SFT 28
- #define DBR_DBR_TYPE_SQ (0x0UL << 28)
- #define DBR_DBR_TYPE_RQ (0x1UL << 28)
- #define DBR_DBR_TYPE_SRQ (0x2UL << 28)
- #define DBR_DBR_TYPE_SRQ_ARM (0x3UL << 28)
- #define DBR_DBR_TYPE_CQ (0x4UL << 28)
- #define DBR_DBR_TYPE_CQ_ARMSE (0x5UL << 28)
- #define DBR_DBR_TYPE_CQ_ARMALL (0x6UL << 28)
- #define DBR_DBR_TYPE_CQ_ARMENA (0x7UL << 28)
- #define DBR_DBR_TYPE_SRQ_ARMENA (0x8UL << 28)
- #define DBR_DBR_TYPE_CQ_CUTOFF_ACK (0x9UL << 28)
- #define DBR_DBR_TYPE_NULL (0xfUL << 28)
-};
-
-/* 32b Doorbell Format (4 bytes) */
-struct dbr_dbr32 {
- __le32 type_abs_incr_xid;
- #define DBR_DBR32_XID_MASK 0xfffffUL
- #define DBR_DBR32_XID_SFT 0
- #define DBR_DBR32_RESERVED4_MASK 0xf00000UL
- #define DBR_DBR32_RESERVED4_SFT 20
- #define DBR_DBR32_INCR_MASK 0xf000000UL
- #define DBR_DBR32_INCR_SFT 24
- #define DBR_DBR32_ABS 0x10000000UL
- #define DBR_DBR32_TYPE_MASK 0xe0000000UL
- #define DBR_DBR32_TYPE_SFT 29
- #define DBR_DBR32_TYPE_SQ (0x0UL << 29)
+/* dbc_dbc (size:64b/8B) */
+struct dbc_dbc {
+ __le32 index;
+ #define DBC_DBC_INDEX_MASK 0xffffffUL
+ #define DBC_DBC_INDEX_SFT 0
+ __le32 type_path_xid;
+ #define DBC_DBC_XID_MASK 0xfffffUL
+ #define DBC_DBC_XID_SFT 0
+ #define DBC_DBC_PATH_MASK 0x3000000UL
+ #define DBC_DBC_PATH_SFT 24
+ #define DBC_DBC_PATH_ROCE (0x0UL << 24)
+ #define DBC_DBC_PATH_L2 (0x1UL << 24)
+ #define DBC_DBC_PATH_ENGINE (0x2UL << 24)
+ #define DBC_DBC_PATH_LAST DBC_DBC_PATH_ENGINE
+ #define DBC_DBC_DEBUG_TRACE 0x8000000UL
+ #define DBC_DBC_TYPE_MASK 0xf0000000UL
+ #define DBC_DBC_TYPE_SFT 28
+ #define DBC_DBC_TYPE_SQ (0x0UL << 28)
+ #define DBC_DBC_TYPE_RQ (0x1UL << 28)
+ #define DBC_DBC_TYPE_SRQ (0x2UL << 28)
+ #define DBC_DBC_TYPE_SRQ_ARM (0x3UL << 28)
+ #define DBC_DBC_TYPE_CQ (0x4UL << 28)
+ #define DBC_DBC_TYPE_CQ_ARMSE (0x5UL << 28)
+ #define DBC_DBC_TYPE_CQ_ARMALL (0x6UL << 28)
+ #define DBC_DBC_TYPE_CQ_ARMENA (0x7UL << 28)
+ #define DBC_DBC_TYPE_SRQ_ARMENA (0x8UL << 28)
+ #define DBC_DBC_TYPE_CQ_CUTOFF_ACK (0x9UL << 28)
+ #define DBC_DBC_TYPE_NQ (0xaUL << 28)
+ #define DBC_DBC_TYPE_NQ_ARM (0xbUL << 28)
+ #define DBC_DBC_TYPE_NULL (0xfUL << 28)
+ #define DBC_DBC_TYPE_LAST DBC_DBC_TYPE_NULL
+};
+
+/* dbc_dbc32 (size:32b/4B) */
+struct dbc_dbc32 {
+ __le32 type_abs_incr_xid;
+ #define DBC_DBC32_XID_MASK 0xfffffUL
+ #define DBC_DBC32_XID_SFT 0
+ #define DBC_DBC32_PATH_MASK 0xc00000UL
+ #define DBC_DBC32_PATH_SFT 22
+ #define DBC_DBC32_PATH_ROCE (0x0UL << 22)
+ #define DBC_DBC32_PATH_L2 (0x1UL << 22)
+ #define DBC_DBC32_PATH_LAST DBC_DBC32_PATH_L2
+ #define DBC_DBC32_INCR_MASK 0xf000000UL
+ #define DBC_DBC32_INCR_SFT 24
+ #define DBC_DBC32_ABS 0x10000000UL
+ #define DBC_DBC32_TYPE_MASK 0xe0000000UL
+ #define DBC_DBC32_TYPE_SFT 29
+ #define DBC_DBC32_TYPE_SQ (0x0UL << 29)
+ #define DBC_DBC32_TYPE_LAST DBC_DBC32_TYPE_SQ
};
/* SQ WQE Structures */
@@ -149,7 +159,24 @@ struct sq_psn_search {
#define SQ_PSN_SEARCH_NEXT_PSN_MASK 0xffffffUL
#define SQ_PSN_SEARCH_NEXT_PSN_SFT 0
#define SQ_PSN_SEARCH_FLAGS_MASK 0xff000000UL
- #define SQ_PSN_SEARCH_FLAGS_SFT 24
+ #define SQ_PSN_SEARCH_FLAGS_SFT 24
+};
+
+/* sq_psn_search_ext (size:128b/16B) */
+struct sq_psn_search_ext {
+ __le32 opcode_start_psn;
+ #define SQ_PSN_SEARCH_EXT_START_PSN_MASK 0xffffffUL
+ #define SQ_PSN_SEARCH_EXT_START_PSN_SFT 0
+ #define SQ_PSN_SEARCH_EXT_OPCODE_MASK 0xff000000UL
+ #define SQ_PSN_SEARCH_EXT_OPCODE_SFT 24
+ __le32 flags_next_psn;
+ #define SQ_PSN_SEARCH_EXT_NEXT_PSN_MASK 0xffffffUL
+ #define SQ_PSN_SEARCH_EXT_NEXT_PSN_SFT 0
+ #define SQ_PSN_SEARCH_EXT_FLAGS_MASK 0xff000000UL
+ #define SQ_PSN_SEARCH_EXT_FLAGS_SFT 24
+ __le16 start_slot_idx;
+ __le16 reserved16;
+ __le32 reserved32;
};
/* Send SQ WQE (40 bytes) */
@@ -505,22 +532,24 @@ struct cq_res_rc {
/* Responder UD CQE (32 bytes) */
struct cq_res_ud {
- __le32 length;
+ __le16 length;
#define CQ_RES_UD_LENGTH_MASK 0x3fffUL
#define CQ_RES_UD_LENGTH_SFT 0
- #define CQ_RES_UD_RESERVED18_MASK 0xffffc000UL
- #define CQ_RES_UD_RESERVED18_SFT 14
+ __le16 cfa_metadata;
+ #define CQ_RES_UD_CFA_METADATA_VID_MASK 0xfffUL
+ #define CQ_RES_UD_CFA_METADATA_VID_SFT 0
+ #define CQ_RES_UD_CFA_METADATA_DE 0x1000UL
+ #define CQ_RES_UD_CFA_METADATA_PRI_MASK 0xe000UL
+ #define CQ_RES_UD_CFA_METADATA_PRI_SFT 13
__le32 imm_data;
__le64 qp_handle;
__le16 src_mac[3];
__le16 src_qp_low;
u8 cqe_type_toggle;
- #define CQ_RES_UD_TOGGLE 0x1UL
- #define CQ_RES_UD_CQE_TYPE_MASK 0x1eUL
- #define CQ_RES_UD_CQE_TYPE_SFT 1
+ #define CQ_RES_UD_TOGGLE 0x1UL
+ #define CQ_RES_UD_CQE_TYPE_MASK 0x1eUL
+ #define CQ_RES_UD_CQE_TYPE_SFT 1
#define CQ_RES_UD_CQE_TYPE_RES_UD (0x2UL << 1)
- #define CQ_RES_UD_RESERVED3_MASK 0xe0UL
- #define CQ_RES_UD_RESERVED3_SFT 5
u8 status;
#define CQ_RES_UD_STATUS_OK 0x0UL
#define CQ_RES_UD_STATUS_LOCAL_ACCESS_ERROR 0x1UL
@@ -536,18 +565,30 @@ struct cq_res_ud {
#define CQ_RES_UD_FLAGS_SRQ_SRQ (0x1UL << 0)
#define CQ_RES_UD_FLAGS_SRQ_LAST CQ_RES_UD_FLAGS_SRQ_SRQ
#define CQ_RES_UD_FLAGS_IMM 0x2UL
- #define CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK 0xcUL
- #define CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT 2
- #define CQ_RES_UD_FLAGS_ROCE_IP_VER_V1 (0x0UL << 2)
- #define CQ_RES_UD_FLAGS_ROCE_IP_VER_V2IPV4 (0x2UL << 2)
- #define CQ_RES_UD_FLAGS_ROCE_IP_VER_V2IPV6 (0x3UL << 2)
+ #define CQ_RES_UD_FLAGS_UNUSED_MASK 0xcUL
+ #define CQ_RES_UD_FLAGS_UNUSED_SFT 2
+ #define CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK 0x30UL
+ #define CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT 4
+ #define CQ_RES_UD_FLAGS_ROCE_IP_VER_V1 (0x0UL << 4)
+ #define CQ_RES_UD_FLAGS_ROCE_IP_VER_V2IPV4 (0x2UL << 4)
+ #define CQ_RES_UD_FLAGS_ROCE_IP_VER_V2IPV6 (0x3UL << 4)
#define CQ_RES_UD_FLAGS_ROCE_IP_VER_LAST \
CQ_RES_UD_FLAGS_ROCE_IP_VER_V2IPV6
+ #define CQ_RES_UD_FLAGS_META_FORMAT_MASK 0x3c0UL
+ #define CQ_RES_UD_FLAGS_META_FORMAT_SFT 6
+ #define CQ_RES_UD_FLAGS_META_FORMAT_NONE (0x0UL << 6)
+ #define CQ_RES_UD_FLAGS_META_FORMAT_VLAN (0x1UL << 6)
+ #define CQ_RES_UD_FLAGS_META_FORMAT_TUNNEL_ID (0x2UL << 6)
+ #define CQ_RES_UD_FLAGS_META_FORMAT_CHDR_DATA (0x3UL << 6)
+ #define CQ_RES_UD_FLAGS_META_FORMAT_HDR_OFFSET (0x4UL << 6)
+ #define CQ_RES_UD_FLAGS_META_FORMAT_LAST \
+ CQ_RES_UD_FLAGS_META_FORMAT_HDR_OFFSET
+ #define CQ_RES_UD_FLAGS_EXT_META_FORMAT_MASK 0xc00UL
+ #define CQ_RES_UD_FLAGS_EXT_META_FORMAT_SFT 10
+
__le32 src_qp_high_srq_or_rq_wr_id;
#define CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL
#define CQ_RES_UD_SRQ_OR_RQ_WR_ID_SFT 0
- #define CQ_RES_UD_RESERVED4_MASK 0xf00000UL
- #define CQ_RES_UD_RESERVED4_SFT 20
#define CQ_RES_UD_SRC_QP_HIGH_MASK 0xff000000UL
#define CQ_RES_UD_SRC_QP_HIGH_SFT 24
};
@@ -983,6 +1024,7 @@ struct cmdq_create_qp {
#define CMDQ_CREATE_QP_TYPE_RC 0x2UL
#define CMDQ_CREATE_QP_TYPE_UD 0x4UL
#define CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE 0x6UL
+ #define CMDQ_CREATE_QP_TYPE_GSI 0x7UL
u8 sq_pg_size_sq_lvl;
#define CMDQ_CREATE_QP_SQ_LVL_MASK 0xfUL
#define CMDQ_CREATE_QP_SQ_LVL_SFT 0
@@ -2719,6 +2761,8 @@ struct creq_query_func_resp_sb {
__le16 max_srq;
__le32 max_gid;
__le32 tqm_alloc_reqs[12];
+ __le32 max_dpi;
+ __le32 reserved_32;
};
/* Set resources command response (16 bytes) */
diff --git a/drivers/infiniband/hw/cxgb3/Makefile b/drivers/infiniband/hw/cxgb3/Makefile
index 66fe0917aba0..34bb86a6ae3a 100644
--- a/drivers/infiniband/hw/cxgb3/Makefile
+++ b/drivers/infiniband/hw/cxgb3/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb3
+ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb3
obj-$(CONFIG_INFINIBAND_CXGB3) += iw_cxgb3.o
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index 591de319c178..fb03bc492ef7 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -146,7 +146,7 @@ static void open_rnic_dev(struct t3cdev *tdev)
pr_debug("%s t3cdev %p\n", __func__, tdev);
pr_info_once("Chelsio T3 RDMA Driver - version %s\n", DRV_VERSION);
- rnicp = (struct iwch_dev *)ib_alloc_device(sizeof(*rnicp));
+ rnicp = ib_alloc_device(iwch_dev, ibdev);
if (!rnicp) {
pr_err("Cannot allocate ib device\n");
return;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index b34b1a1bd94b..4accf7b3dcf2 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -53,6 +53,7 @@
#include <rdma/ib_smi.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_user_verbs.h>
+#include <rdma/uverbs_ioctl.h>
#include "cxio_hal.h"
#include "iwch.h"
@@ -61,7 +62,7 @@
#include <rdma/cxgb3-abi.h>
#include "common.h"
-static int iwch_dealloc_ucontext(struct ib_ucontext *context)
+static void iwch_dealloc_ucontext(struct ib_ucontext *context)
{
struct iwch_dev *rhp = to_iwch_dev(context->device);
struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
@@ -71,24 +72,20 @@ static int iwch_dealloc_ucontext(struct ib_ucontext *context)
list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
kfree(mm);
cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
- kfree(ucontext);
- return 0;
}
-static struct ib_ucontext *iwch_alloc_ucontext(struct ib_device *ibdev,
- struct ib_udata *udata)
+static int iwch_alloc_ucontext(struct ib_ucontext *ucontext,
+ struct ib_udata *udata)
{
- struct iwch_ucontext *context;
+ struct ib_device *ibdev = ucontext->device;
+ struct iwch_ucontext *context = to_iwch_ucontext(ucontext);
struct iwch_dev *rhp = to_iwch_dev(ibdev);
pr_debug("%s ibdev %p\n", __func__, ibdev);
- context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
- return ERR_PTR(-ENOMEM);
cxio_init_ucontext(&rhp->rdev, &context->uctx);
INIT_LIST_HEAD(&context->mmaps);
spin_lock_init(&context->mmap_lock);
- return &context->ibucontext;
+ return 0;
}
static int iwch_destroy_cq(struct ib_cq *ib_cq)
@@ -370,7 +367,7 @@ static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
return ret;
}
-static int iwch_deallocate_pd(struct ib_pd *pd)
+static void iwch_deallocate_pd(struct ib_pd *pd)
{
struct iwch_dev *rhp;
struct iwch_pd *php;
@@ -379,15 +376,13 @@ static int iwch_deallocate_pd(struct ib_pd *pd)
rhp = php->rhp;
pr_debug("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
- kfree(php);
- return 0;
}
-static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata)
+static int iwch_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context,
+ struct ib_udata *udata)
{
- struct iwch_pd *php;
+ struct iwch_pd *php = to_iwch_pd(pd);
+ struct ib_device *ibdev = pd->device;
u32 pdid;
struct iwch_dev *rhp;
@@ -395,12 +390,8 @@ static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
rhp = (struct iwch_dev *) ibdev;
pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
if (!pdid)
- return ERR_PTR(-EINVAL);
- php = kzalloc(sizeof(*php), GFP_KERNEL);
- if (!php) {
- cxio_hal_put_pdid(rhp->rdev.rscp, pdid);
- return ERR_PTR(-ENOMEM);
- }
+ return -EINVAL;
+
php->pdid = pdid;
php->rhp = rhp;
if (context) {
@@ -408,11 +399,11 @@ static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
iwch_deallocate_pd(&php->ibpd);
- return ERR_PTR(-EFAULT);
+ return -EFAULT;
}
}
pr_debug("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
- return &php->ibpd;
+ return 0;
}
static int iwch_dereg_mr(struct ib_mr *ib_mr)
@@ -522,14 +513,13 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt, int acc, struct ib_udata *udata)
{
__be64 *pages;
- int shift, n, len;
- int i, k, entry;
+ int shift, n, i;
int err = 0;
struct iwch_dev *rhp;
struct iwch_pd *php;
struct iwch_mr *mhp;
struct iwch_reg_user_mr_resp uresp;
- struct scatterlist *sg;
+ struct sg_dma_page_iter sg_iter;
pr_debug("%s ib_pd %p\n", __func__, pd);
php = to_iwch_pd(pd);
@@ -540,14 +530,14 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mhp->rhp = rhp;
- mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
+ mhp->umem = ib_umem_get(udata, start, length, acc, 0);
if (IS_ERR(mhp->umem)) {
err = PTR_ERR(mhp->umem);
kfree(mhp);
return ERR_PTR(err);
}
- shift = mhp->umem->page_shift;
+ shift = PAGE_SHIFT;
n = mhp->umem->nmap;
@@ -563,19 +553,15 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
i = n = 0;
- for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
- len = sg_dma_len(sg) >> shift;
- for (k = 0; k < len; ++k) {
- pages[i++] = cpu_to_be64(sg_dma_address(sg) +
- (k << shift));
- if (i == PAGE_SIZE / sizeof *pages) {
- err = iwch_write_pbl(mhp, pages, i, n);
- if (err)
- goto pbl_done;
- n += i;
- i = 0;
- }
- }
+ for_each_sg_dma_page(mhp->umem->sg_head.sgl, &sg_iter, mhp->umem->nmap, 0) {
+ pages[i++] = cpu_to_be64(sg_page_iter_dma_address(&sg_iter));
+ if (i == PAGE_SIZE / sizeof *pages) {
+ err = iwch_write_pbl(mhp, pages, i, n);
+ if (err)
+ goto pbl_done;
+ n += i;
+ i = 0;
+ }
}
if (i)
@@ -836,7 +822,8 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
* Kernel users need more wq space for fastreg WRs which can take
* 2 WR fragments.
*/
- ucontext = udata ? to_iwch_ucontext(pd->uobject->context) : NULL;
+ ucontext = rdma_udata_to_drv_context(udata, struct iwch_ucontext,
+ ibucontext);
if (!ucontext && wqsize < (rqsize + (2 * sqsize)))
wqsize = roundup_pow_of_two(rqsize +
roundup_pow_of_two(attrs->cap.max_send_wr * 2));
@@ -1130,8 +1117,9 @@ static int iwch_query_port(struct ib_device *ibdev,
static ssize_t hw_rev_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
- ibdev.dev);
+ struct iwch_dev *iwch_dev =
+ rdma_device_to_drv_device(dev, struct iwch_dev, ibdev);
+
pr_debug("%s dev 0x%p\n", __func__, dev);
return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type);
}
@@ -1140,8 +1128,8 @@ static DEVICE_ATTR_RO(hw_rev);
static ssize_t hca_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
- ibdev.dev);
+ struct iwch_dev *iwch_dev =
+ rdma_device_to_drv_device(dev, struct iwch_dev, ibdev);
struct ethtool_drvinfo info;
struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
@@ -1154,8 +1142,9 @@ static DEVICE_ATTR_RO(hca_type);
static ssize_t board_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
- ibdev.dev);
+ struct iwch_dev *iwch_dev =
+ rdma_device_to_drv_device(dev, struct iwch_dev, ibdev);
+
pr_debug("%s dev 0x%p\n", __func__, dev);
return sprintf(buf, "%x.%x\n", iwch_dev->rdev.rnic_info.pdev->vendor,
iwch_dev->rdev.rnic_info.pdev->device);
@@ -1348,6 +1337,8 @@ static const struct ib_device_ops iwch_dev_ops = {
.reg_user_mr = iwch_reg_user_mr,
.req_notify_cq = iwch_arm_cq,
.resize_cq = iwch_resize_cq,
+ INIT_RDMA_OBJ_SIZE(ib_pd, iwch_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, iwch_ucontext, ibucontext),
};
int iwch_register_device(struct iwch_dev *dev)
@@ -1391,7 +1382,7 @@ int iwch_register_device(struct iwch_dev *dev)
dev->ibdev.dev.parent = &dev->rdev.rnic_info.pdev->dev;
dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION;
- dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
+ dev->ibdev.iwcm = kzalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
if (!dev->ibdev.iwcm)
return -ENOMEM;
@@ -1409,7 +1400,7 @@ int iwch_register_device(struct iwch_dev *dev)
dev->ibdev.driver_id = RDMA_DRIVER_CXGB3;
rdma_set_device_sysfs_group(&dev->ibdev, &iwch_attr_group);
ib_set_device_ops(&dev->ibdev, &iwch_dev_ops);
- ret = ib_register_device(&dev->ibdev, "cxgb3_%d", NULL);
+ ret = ib_register_device(&dev->ibdev, "cxgb3_%d");
if (ret)
kfree(dev->ibdev.iwcm);
return ret;
diff --git a/drivers/infiniband/hw/cxgb4/Makefile b/drivers/infiniband/hw/cxgb4/Makefile
index 9edd92023e18..31a87d90a40b 100644
--- a/drivers/infiniband/hw/cxgb4/Makefile
+++ b/drivers/infiniband/hw/cxgb4/Makefile
@@ -1,5 +1,5 @@
-ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
-ccflags-y += -Idrivers/net/ethernet/chelsio/libcxgb
+ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb4
+ccflags-y += -I $(srctree)/drivers/net/ethernet/chelsio/libcxgb
obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 8221813219e5..4d232bdf9e97 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -655,7 +655,33 @@ static int send_halfclose(struct c4iw_ep *ep)
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
-static int send_abort(struct c4iw_ep *ep)
+static void read_tcb(struct c4iw_ep *ep)
+{
+ struct sk_buff *skb;
+ struct cpl_get_tcb *req;
+ int wrlen = roundup(sizeof(*req), 16);
+
+ skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
+ if (WARN_ON(!skb))
+ return;
+
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
+ req = (struct cpl_get_tcb *) skb_put(skb, wrlen);
+ memset(req, 0, wrlen);
+ INIT_TP_WR(req, ep->hwtid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_GET_TCB, ep->hwtid));
+ req->reply_ctrl = htons(REPLY_CHAN_V(0) | QUEUENO_V(ep->rss_qid));
+
+ /*
+ * keep a ref on the ep so the tcb is not unlocked before this
+ * cpl completes. The ref is released in read_tcb_rpl().
+ */
+ c4iw_get_ep(&ep->com);
+ if (WARN_ON(c4iw_ofld_send(&ep->com.dev->rdev, skb)))
+ c4iw_put_ep(&ep->com);
+}
+
+static int send_abort_req(struct c4iw_ep *ep)
{
u32 wrlen = roundup(sizeof(struct cpl_abort_req), 16);
struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list);
@@ -670,6 +696,17 @@ static int send_abort(struct c4iw_ep *ep)
return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t);
}
+static int send_abort(struct c4iw_ep *ep)
+{
+ if (!ep->com.qp || !ep->com.qp->srq) {
+ send_abort_req(ep);
+ return 0;
+ }
+ set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags);
+ read_tcb(ep);
+ return 0;
+}
+
static int send_connect(struct c4iw_ep *ep)
{
struct cpl_act_open_req *req = NULL;
@@ -1851,14 +1888,11 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
-static void complete_cached_srq_buffers(struct c4iw_ep *ep,
- __be32 srqidx_status)
+static void complete_cached_srq_buffers(struct c4iw_ep *ep, u32 srqidx)
{
enum chip_type adapter_type;
- u32 srqidx;
adapter_type = ep->com.dev->rdev.lldi.adapter_type;
- srqidx = ABORT_RSS_SRQIDX_G(be32_to_cpu(srqidx_status));
/*
* If this TCB had a srq buffer cached, then we must complete
@@ -1876,6 +1910,7 @@ static void complete_cached_srq_buffers(struct c4iw_ep *ep,
static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{
+ u32 srqidx;
struct c4iw_ep *ep;
struct cpl_abort_rpl_rss6 *rpl = cplhdr(skb);
int release = 0;
@@ -1887,7 +1922,10 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
- complete_cached_srq_buffers(ep, rpl->srqidx_status);
+ if (ep->com.qp && ep->com.qp->srq) {
+ srqidx = ABORT_RSS_SRQIDX_G(be32_to_cpu(rpl->srqidx_status));
+ complete_cached_srq_buffers(ep, srqidx ? srqidx : ep->srqe_idx);
+ }
pr_debug("ep %p tid %u\n", ep, ep->hwtid);
mutex_lock(&ep->com.mutex);
@@ -1903,8 +1941,10 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
}
mutex_unlock(&ep->com.mutex);
- if (release)
+ if (release) {
+ close_complete_upcall(ep, -ECONNRESET);
release_ep_resources(ep);
+ }
c4iw_put_ep(&ep->com);
return 0;
}
@@ -2072,7 +2112,7 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
} else {
pdev = get_real_dev(n->dev);
ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
- n, pdev, 0);
+ n, pdev, rt_tos2priority(tos));
if (!ep->l2t)
goto out;
ep->mtu = dst_mtu(dst);
@@ -2161,7 +2201,8 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
laddr6->sin6_addr.s6_addr,
raddr6->sin6_addr.s6_addr,
laddr6->sin6_port,
- raddr6->sin6_port, 0,
+ raddr6->sin6_port,
+ ep->com.cm_id->tos,
raddr6->sin6_scope_id);
iptype = 6;
ra = (__u8 *)&raddr6->sin6_addr;
@@ -2476,7 +2517,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
u16 peer_mss = ntohs(req->tcpopt.mss);
int iptype;
unsigned short hdrs;
- u8 tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
+ u8 tos;
parent_ep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
if (!parent_ep) {
@@ -2490,6 +2531,11 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
goto reject;
}
+ if (parent_ep->com.cm_id->tos_set)
+ tos = parent_ep->com.cm_id->tos;
+ else
+ tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
+
cxgb_get_4tuple(req, parent_ep->com.dev->rdev.lldi.adapter_type,
&iptype, local_ip, peer_ip, &local_port, &peer_port);
@@ -2509,7 +2555,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
ntohs(peer_port), peer_mss);
dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
local_ip, peer_ip, local_port, peer_port,
- PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
+ tos,
((struct sockaddr_in6 *)
&parent_ep->com.local_addr)->sin6_scope_id);
}
@@ -2740,6 +2786,21 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
+static void finish_peer_abort(struct c4iw_dev *dev, struct c4iw_ep *ep)
+{
+ complete_cached_srq_buffers(ep, ep->srqe_idx);
+ if (ep->com.cm_id && ep->com.qp) {
+ struct c4iw_qp_attributes attrs;
+
+ attrs.next_state = C4IW_QP_STATE_ERROR;
+ c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+ C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+ }
+ peer_abort_upcall(ep);
+ release_ep_resources(ep);
+ c4iw_put_ep(&ep->com);
+}
+
static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct cpl_abort_req_rss6 *req = cplhdr(skb);
@@ -2750,6 +2811,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
int release = 0;
unsigned int tid = GET_TID(req);
u8 status;
+ u32 srqidx;
u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
@@ -2769,8 +2831,6 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
goto deref_ep;
}
- complete_cached_srq_buffers(ep, req->srqidx_status);
-
pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid,
ep->com.state);
set_bit(PEER_ABORT, &ep->com.history);
@@ -2819,6 +2879,23 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
stop_ep_timer(ep);
/*FALLTHROUGH*/
case FPDU_MODE:
+ if (ep->com.qp && ep->com.qp->srq) {
+ srqidx = ABORT_RSS_SRQIDX_G(
+ be32_to_cpu(req->srqidx_status));
+ if (srqidx) {
+ complete_cached_srq_buffers(ep,
+ req->srqidx_status);
+ } else {
+ /* Hold ep ref until finish_peer_abort() */
+ c4iw_get_ep(&ep->com);
+ __state_set(&ep->com, ABORTING);
+ set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags);
+ read_tcb(ep);
+ break;
+
+ }
+ }
+
if (ep->com.cm_id && ep->com.qp) {
attrs.next_state = C4IW_QP_STATE_ERROR;
ret = c4iw_modify_qp(ep->com.qp->rhp,
@@ -2942,15 +3019,18 @@ static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
ep = get_ep_from_tid(dev, tid);
- if (ep && ep->com.qp) {
- pr_warn("TERM received tid %u qpid %u\n",
- tid, ep->com.qp->wq.sq.qid);
- attrs.next_state = C4IW_QP_STATE_TERMINATE;
- c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
- C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+ if (ep) {
+ if (ep->com.qp) {
+ pr_warn("TERM received tid %u qpid %u\n", tid,
+ ep->com.qp->wq.sq.qid);
+ attrs.next_state = C4IW_QP_STATE_TERMINATE;
+ c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+ C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+ }
+
+ c4iw_put_ep(&ep->com);
} else
pr_warn("TERM received tid %u no ep/qp\n", tid);
- c4iw_put_ep(&ep->com);
return 0;
}
@@ -3318,7 +3398,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
laddr6->sin6_addr.s6_addr,
raddr6->sin6_addr.s6_addr,
laddr6->sin6_port,
- raddr6->sin6_port, 0,
+ raddr6->sin6_port, cm_id->tos,
raddr6->sin6_scope_id);
}
if (!ep->dst) {
@@ -3606,7 +3686,6 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
if (close) {
if (abrupt) {
set_bit(EP_DISC_ABORT, &ep->com.history);
- close_complete_upcall(ep, -ECONNRESET);
ret = send_abort(ep);
} else {
set_bit(EP_DISC_CLOSE, &ep->com.history);
@@ -3717,6 +3796,80 @@ static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
return;
}
+static inline u64 t4_tcb_get_field64(__be64 *tcb, u16 word)
+{
+ u64 tlo = be64_to_cpu(tcb[((31 - word) / 2)]);
+ u64 thi = be64_to_cpu(tcb[((31 - word) / 2) - 1]);
+ u64 t;
+ u32 shift = 32;
+
+ t = (thi << shift) | (tlo >> shift);
+
+ return t;
+}
+
+static inline u32 t4_tcb_get_field32(__be64 *tcb, u16 word, u32 mask, u32 shift)
+{
+ u32 v;
+ u64 t = be64_to_cpu(tcb[(31 - word) / 2]);
+
+ if (word & 0x1)
+ shift += 32;
+ v = (t >> shift) & mask;
+ return v;
+}
+
+static int read_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct cpl_get_tcb_rpl *rpl = cplhdr(skb);
+ __be64 *tcb = (__be64 *)(rpl + 1);
+ unsigned int tid = GET_TID(rpl);
+ struct c4iw_ep *ep;
+ u64 t_flags_64;
+ u32 rx_pdu_out;
+
+ ep = get_ep_from_tid(dev, tid);
+ if (!ep)
+ return 0;
+ /* Examine the TF_RX_PDU_OUT (bit 49 of the t_flags) in order to
+ * determine if there's a rx PDU feedback event pending.
+ *
+ * If that bit is set, it means we'll need to re-read the TCB's
+ * rq_start value. The final value is the one present in a TCB
+ * with the TF_RX_PDU_OUT bit cleared.
+ */
+
+ t_flags_64 = t4_tcb_get_field64(tcb, TCB_T_FLAGS_W);
+ rx_pdu_out = (t_flags_64 & TF_RX_PDU_OUT_V(1)) >> TF_RX_PDU_OUT_S;
+
+ c4iw_put_ep(&ep->com); /* from get_ep_from_tid() */
+ c4iw_put_ep(&ep->com); /* from read_tcb() */
+
+ /* If TF_RX_PDU_OUT bit is set, re-read the TCB */
+ if (rx_pdu_out) {
+ if (++ep->rx_pdu_out_cnt >= 2) {
+ WARN_ONCE(1, "tcb re-read() reached the guard limit, finishing the cleanup\n");
+ goto cleanup;
+ }
+ read_tcb(ep);
+ return 0;
+ }
+
+ ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_W,
+ TCB_RQ_START_S);
+cleanup:
+ pr_debug("ep %p tid %u %016x\n", ep, ep->hwtid, ep->srqe_idx);
+
+ if (test_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags))
+ finish_peer_abort(dev, ep);
+ else if (test_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags))
+ send_abort_req(ep);
+ else
+ WARN_ONCE(1, "unexpected state!");
+
+ return 0;
+}
+
static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct cpl_fw6_msg *rpl = cplhdr(skb);
@@ -4037,6 +4190,7 @@ static c4iw_handler_func work_handlers[NUM_CPL_CMDS + NUM_FAKE_CPLS] = {
[CPL_CLOSE_CON_RPL] = close_con_rpl,
[CPL_RDMA_TERMINATE] = terminate,
[CPL_FW4_ACK] = fw4_ack,
+ [CPL_GET_TCB_RPL] = read_tcb_rpl,
[CPL_FW6_MSG] = deferred_fw6_msg,
[CPL_RX_PKT] = rx_pkt,
[FAKE_CPL_PUT_EP_SAFE] = _put_ep_safe,
@@ -4268,6 +4422,7 @@ c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
[CPL_RDMA_TERMINATE] = sched,
[CPL_FW4_ACK] = sched,
[CPL_SET_TCB_RPL] = set_tcb_rpl,
+ [CPL_GET_TCB_RPL] = sched,
[CPL_FW6_MSG] = fw6_msg,
[CPL_RX_PKT] = sched
};
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index d499cd61c0e8..c79cf63fb0bb 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -720,11 +720,8 @@ static const struct file_operations ep_debugfs_fops = {
.read = debugfs_read,
};
-static int setup_debugfs(struct c4iw_dev *devp)
+static void setup_debugfs(struct c4iw_dev *devp)
{
- if (!devp->debugfs_root)
- return -1;
-
debugfs_create_file_size("qps", S_IWUSR, devp->debugfs_root,
(void *)devp, &qp_debugfs_fops, 4096);
@@ -740,7 +737,6 @@ static int setup_debugfs(struct c4iw_dev *devp)
if (c4iw_wr_log)
debugfs_create_file_size("wr_log", S_IWUSR, devp->debugfs_root,
(void *)devp, &wr_log_debugfs_fops, 4096);
- return 0;
}
void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
@@ -981,7 +977,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
pr_info("%s: On-Chip Queues not supported on this device\n",
pci_name(infop->pdev));
- devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
+ devp = ib_alloc_device(c4iw_dev, ibdev);
if (!devp) {
pr_err("Cannot allocate ib device\n");
return ERR_PTR(-ENOMEM);
@@ -1564,8 +1560,6 @@ static int __init c4iw_init_module(void)
return err;
c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
- if (!c4iw_debugfs_root)
- pr_warn("could not create debugfs entry, continuing\n");
reg_workq = create_singlethread_workqueue("Register_iWARP_device");
if (!reg_workq) {
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index f0fceadd0d12..5a5da41faef6 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -589,7 +589,6 @@ struct c4iw_ucontext {
u32 key;
spinlock_t mmap_lock;
struct list_head mmaps;
- struct kref kref;
bool is_32b_cqe;
};
@@ -598,18 +597,6 @@ static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
return container_of(c, struct c4iw_ucontext, ibucontext);
}
-void _c4iw_free_ucontext(struct kref *kref);
-
-static inline void c4iw_put_ucontext(struct c4iw_ucontext *ucontext)
-{
- kref_put(&ucontext->kref, _c4iw_free_ucontext);
-}
-
-static inline void c4iw_get_ucontext(struct c4iw_ucontext *ucontext)
-{
- kref_get(&ucontext->kref);
-}
-
struct c4iw_mm_entry {
struct list_head entry;
u64 addr;
@@ -982,6 +969,9 @@ struct c4iw_ep {
int rcv_win;
u32 snd_wscale;
struct c4iw_ep_stats stats;
+ u32 srqe_idx;
+ u32 rx_pdu_out_cnt;
+ struct sk_buff *peer_abort_skb;
};
static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 7b76e6f81aeb..5baa31ab6366 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -502,10 +502,9 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt, int acc, struct ib_udata *udata)
{
__be64 *pages;
- int shift, n, len;
- int i, k, entry;
+ int shift, n, i;
int err = -ENOMEM;
- struct scatterlist *sg;
+ struct sg_dma_page_iter sg_iter;
struct c4iw_dev *rhp;
struct c4iw_pd *php;
struct c4iw_mr *mhp;
@@ -537,11 +536,11 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mhp->rhp = rhp;
- mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
+ mhp->umem = ib_umem_get(udata, start, length, acc, 0);
if (IS_ERR(mhp->umem))
goto err_free_skb;
- shift = mhp->umem->page_shift;
+ shift = PAGE_SHIFT;
n = mhp->umem->nmap;
err = alloc_pbl(mhp, n);
@@ -556,21 +555,16 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
i = n = 0;
- for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
- len = sg_dma_len(sg) >> shift;
- for (k = 0; k < len; ++k) {
- pages[i++] = cpu_to_be64(sg_dma_address(sg) +
- (k << shift));
- if (i == PAGE_SIZE / sizeof *pages) {
- err = write_pbl(&mhp->rhp->rdev,
- pages,
- mhp->attr.pbl_addr + (n << 3), i,
- mhp->wr_waitp);
- if (err)
- goto pbl_done;
- n += i;
- i = 0;
- }
+ for_each_sg_dma_page(mhp->umem->sg_head.sgl, &sg_iter, mhp->umem->nmap, 0) {
+ pages[i++] = cpu_to_be64(sg_page_iter_dma_address(&sg_iter));
+ if (i == PAGE_SIZE / sizeof(*pages)) {
+ err = write_pbl(&mhp->rhp->rdev, pages,
+ mhp->attr.pbl_addr + (n << 3), i,
+ mhp->wr_waitp);
+ if (err)
+ goto pbl_done;
+ n += i;
+ i = 0;
}
}
@@ -684,8 +678,8 @@ int c4iw_dealloc_mw(struct ib_mw *mw)
mhp->wr_waitp);
kfree_skb(mhp->dereg_skb);
c4iw_put_wr_wait(mhp->wr_waitp);
- kfree(mhp);
pr_debug("ib_mw %p mmid 0x%x ptr %p\n", mw, mmid, mhp);
+ kfree(mhp);
return 0;
}
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 586b0c37481f..507c54572cc9 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -58,51 +58,34 @@ static int fastreg_support = 1;
module_param(fastreg_support, int, 0644);
MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)");
-void _c4iw_free_ucontext(struct kref *kref)
+static void c4iw_dealloc_ucontext(struct ib_ucontext *context)
{
- struct c4iw_ucontext *ucontext;
+ struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
struct c4iw_dev *rhp;
struct c4iw_mm_entry *mm, *tmp;
- ucontext = container_of(kref, struct c4iw_ucontext, kref);
+ pr_debug("context %p\n", context);
rhp = to_c4iw_dev(ucontext->ibucontext.device);
- pr_debug("ucontext %p\n", ucontext);
list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
kfree(mm);
c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
- kfree(ucontext);
}
-static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
+static int c4iw_alloc_ucontext(struct ib_ucontext *ucontext,
+ struct ib_udata *udata)
{
- struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
-
- pr_debug("context %p\n", context);
- c4iw_put_ucontext(ucontext);
- return 0;
-}
-
-static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
- struct ib_udata *udata)
-{
- struct c4iw_ucontext *context;
+ struct ib_device *ibdev = ucontext->device;
+ struct c4iw_ucontext *context = to_c4iw_ucontext(ucontext);
struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
struct c4iw_alloc_ucontext_resp uresp;
int ret = 0;
struct c4iw_mm_entry *mm = NULL;
pr_debug("ibdev %p\n", ibdev);
- context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context) {
- ret = -ENOMEM;
- goto err;
- }
-
c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
INIT_LIST_HEAD(&context->mmaps);
spin_lock_init(&context->mmap_lock);
- kref_init(&context->kref);
if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
pr_err_once("Warning - downlevel libcxgb4 (non-fatal), device status page disabled\n");
@@ -111,7 +94,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
mm = kmalloc(sizeof(*mm), GFP_KERNEL);
if (!mm) {
ret = -ENOMEM;
- goto err_free;
+ goto err;
}
uresp.status_page_size = PAGE_SIZE;
@@ -131,13 +114,11 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
mm->len = PAGE_SIZE;
insert_mmap(context, mm);
}
- return &context->ibucontext;
+ return 0;
err_mm:
kfree(mm);
-err_free:
- kfree(context);
err:
- return ERR_PTR(ret);
+ return ret;
}
static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
@@ -209,7 +190,7 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
return ret;
}
-static int c4iw_deallocate_pd(struct ib_pd *pd)
+static void c4iw_deallocate_pd(struct ib_pd *pd)
{
struct c4iw_dev *rhp;
struct c4iw_pd *php;
@@ -221,15 +202,13 @@ static int c4iw_deallocate_pd(struct ib_pd *pd)
mutex_lock(&rhp->rdev.stats.lock);
rhp->rdev.stats.pd.cur--;
mutex_unlock(&rhp->rdev.stats.lock);
- kfree(php);
- return 0;
}
-static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata)
+static int c4iw_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context,
+ struct ib_udata *udata)
{
- struct c4iw_pd *php;
+ struct c4iw_pd *php = to_c4iw_pd(pd);
+ struct ib_device *ibdev = pd->device;
u32 pdid;
struct c4iw_dev *rhp;
@@ -237,12 +216,8 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
rhp = (struct c4iw_dev *) ibdev;
pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_table);
if (!pdid)
- return ERR_PTR(-EINVAL);
- php = kzalloc(sizeof(*php), GFP_KERNEL);
- if (!php) {
- c4iw_put_resource(&rhp->rdev.resource.pdid_table, pdid);
- return ERR_PTR(-ENOMEM);
- }
+ return -EINVAL;
+
php->pdid = pdid;
php->rhp = rhp;
if (context) {
@@ -250,7 +225,7 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
c4iw_deallocate_pd(&php->ibpd);
- return ERR_PTR(-EFAULT);
+ return -EFAULT;
}
}
mutex_lock(&rhp->rdev.stats.lock);
@@ -259,7 +234,7 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur;
mutex_unlock(&rhp->rdev.stats.lock);
pr_debug("pdid 0x%0x ptr 0x%p\n", pdid, php);
- return &php->ibpd;
+ return 0;
}
static int c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
@@ -376,8 +351,9 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port,
static ssize_t hw_rev_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
- ibdev.dev);
+ struct c4iw_dev *c4iw_dev =
+ rdma_device_to_drv_device(dev, struct c4iw_dev, ibdev);
+
pr_debug("dev 0x%p\n", dev);
return sprintf(buf, "%d\n",
CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type));
@@ -387,8 +363,8 @@ static DEVICE_ATTR_RO(hw_rev);
static ssize_t hca_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
- ibdev.dev);
+ struct c4iw_dev *c4iw_dev =
+ rdma_device_to_drv_device(dev, struct c4iw_dev, ibdev);
struct ethtool_drvinfo info;
struct net_device *lldev = c4iw_dev->rdev.lldi.ports[0];
@@ -401,8 +377,9 @@ static DEVICE_ATTR_RO(hca_type);
static ssize_t board_id_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
- ibdev.dev);
+ struct c4iw_dev *c4iw_dev =
+ rdma_device_to_drv_device(dev, struct c4iw_dev, ibdev);
+
pr_debug("dev 0x%p\n", dev);
return sprintf(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor,
c4iw_dev->rdev.lldi.pdev->device);
@@ -547,6 +524,7 @@ static const struct ib_device_ops c4iw_dev_ops = {
.destroy_cq = c4iw_destroy_cq,
.destroy_qp = c4iw_destroy_qp,
.destroy_srq = c4iw_destroy_srq,
+ .fill_res_entry = fill_res_entry,
.get_dev_fw_str = get_dev_fw_str,
.get_dma_mr = c4iw_get_dma_mr,
.get_hw_stats = c4iw_get_mib,
@@ -567,6 +545,8 @@ static const struct ib_device_ops c4iw_dev_ops = {
.query_qp = c4iw_ib_query_qp,
.reg_user_mr = c4iw_reg_user_mr,
.req_notify_cq = c4iw_arm_cq,
+ INIT_RDMA_OBJ_SIZE(ib_pd, c4iw_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, c4iw_ucontext, ibucontext),
};
void c4iw_register_device(struct work_struct *work)
@@ -613,7 +593,7 @@ void c4iw_register_device(struct work_struct *work)
dev->ibdev.dev.parent = &dev->rdev.lldi.pdev->dev;
dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
- dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
+ dev->ibdev.iwcm = kzalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
if (!dev->ibdev.iwcm) {
ret = -ENOMEM;
goto err_dealloc_ctx;
@@ -627,14 +607,13 @@ void c4iw_register_device(struct work_struct *work)
dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref;
dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
dev->ibdev.iwcm->get_qp = c4iw_get_qp;
- dev->ibdev.res.fill_res_entry = fill_res_entry;
memcpy(dev->ibdev.iwcm->ifname, dev->rdev.lldi.ports[0]->name,
sizeof(dev->ibdev.iwcm->ifname));
rdma_set_device_sysfs_group(&dev->ibdev, &c4iw_attr_group);
dev->ibdev.driver_id = RDMA_DRIVER_CXGB4;
ib_set_device_ops(&dev->ibdev, &c4iw_dev_ops);
- ret = ib_register_device(&dev->ibdev, "cxgb4_%d", NULL);
+ ret = ib_register_device(&dev->ibdev, "cxgb4_%d");
if (ret)
goto err_kfree_iwcm;
return;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 504cf525508f..d3a82839f5ea 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -31,6 +31,7 @@
*/
#include <linux/module.h>
+#include <rdma/uverbs_ioctl.h>
#include "iw_cxgb4.h"
@@ -632,7 +633,10 @@ static void build_rdma_write_cmpl(struct t4_sq *sq,
wcwr->stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
wcwr->to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
- wcwr->stag_inv = cpu_to_be32(wr->next->ex.invalidate_rkey);
+ if (wr->next->opcode == IB_WR_SEND)
+ wcwr->stag_inv = 0;
+ else
+ wcwr->stag_inv = cpu_to_be32(wr->next->ex.invalidate_rkey);
wcwr->r2 = 0;
wcwr->r3 = 0;
@@ -726,7 +730,10 @@ static void post_write_cmpl(struct c4iw_qp *qhp, const struct ib_send_wr *wr)
/* SEND_WITH_INV swsqe */
swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
- swsqe->opcode = FW_RI_SEND_WITH_INV;
+ if (wr->next->opcode == IB_WR_SEND)
+ swsqe->opcode = FW_RI_SEND;
+ else
+ swsqe->opcode = FW_RI_SEND_WITH_INV;
swsqe->idx = qhp->wq.sq.pidx;
swsqe->complete = 0;
swsqe->signaled = send_signaled;
@@ -897,8 +904,6 @@ static void free_qp_work(struct work_struct *work)
destroy_qp(&rhp->rdev, &qhp->wq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq);
- if (ucontext)
- c4iw_put_ucontext(ucontext);
c4iw_put_wr_wait(qhp->wr_waitp);
kfree(qhp);
}
@@ -1133,9 +1138,9 @@ int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
/*
* Fastpath for NVMe-oF target WRITE + SEND_WITH_INV wr chain which is
* the response for small NVMEe-oF READ requests. If the chain is
- * exactly a WRITE->SEND_WITH_INV and the sgl depths and lengths
- * meet the requirements of the fw_ri_write_cmpl_wr work request,
- * then build and post the write_cmpl WR. If any of the tests
+ * exactly a WRITE->SEND_WITH_INV or a WRITE->SEND and the sgl depths
+ * and lengths meet the requirements of the fw_ri_write_cmpl_wr work
+ * request, then build and post the write_cmpl WR. If any of the tests
* below are not true, then we continue on with the tradtional WRITE
* and SEND WRs.
*/
@@ -1145,7 +1150,8 @@ int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
wr && wr->next && !wr->next->next &&
wr->opcode == IB_WR_RDMA_WRITE &&
wr->sg_list[0].length && wr->num_sge <= T4_WRITE_CMPL_MAX_SGL &&
- wr->next->opcode == IB_WR_SEND_WITH_INV &&
+ (wr->next->opcode == IB_WR_SEND ||
+ wr->next->opcode == IB_WR_SEND_WITH_INV) &&
wr->next->sg_list[0].length == T4_WRITE_CMPL_MAX_CQE &&
wr->next->num_sge == 1 && num_wrs >= 2) {
post_write_cmpl(qhp, wr);
@@ -2129,7 +2135,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
struct c4iw_cq *rchp;
struct c4iw_create_qp_resp uresp;
unsigned int sqsize, rqsize = 0;
- struct c4iw_ucontext *ucontext;
+ struct c4iw_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct c4iw_ucontext, ibucontext);
int ret;
struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm;
struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL;
@@ -2163,8 +2170,6 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
if (sqsize < 8)
sqsize = 8;
- ucontext = udata ? to_c4iw_ucontext(pd->uobject->context) : NULL;
-
qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
if (!qhp)
return ERR_PTR(-ENOMEM);
@@ -2331,7 +2336,6 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
insert_mmap(ucontext, ma_sync_key_mm);
}
- c4iw_get_ucontext(ucontext);
qhp->ucontext = ucontext;
}
if (!attrs->srq) {
@@ -2589,7 +2593,7 @@ static int alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
/* build fw_ri_res_wr */
wr_len = sizeof(*res_wr) + sizeof(*res);
- skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+ skb = alloc_skb(wr_len, GFP_KERNEL);
if (!skb)
goto err_free_queue;
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
@@ -2711,7 +2715,8 @@ struct ib_srq *c4iw_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *attrs,
rqsize = attrs->attr.max_wr + 1;
rqsize = roundup_pow_of_two(max_t(u16, rqsize, 16));
- ucontext = udata ? to_c4iw_ucontext(pd->uobject->context) : NULL;
+ ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
+ ibucontext);
srq = kzalloc(sizeof(*srq), GFP_KERNEL);
if (!srq)
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index fff6d48d262f..b170817b2741 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -35,6 +35,7 @@
#include "t4_regs.h"
#include "t4_values.h"
#include "t4_msg.h"
+#include "t4_tcb.h"
#include "t4fw_ri_api.h"
#define T4_MAX_NUM_PD 65536
diff --git a/drivers/infiniband/hw/hfi1/Makefile b/drivers/infiniband/hw/hfi1/Makefile
index 3ce9dc8c3463..4044a8c8dbf4 100644
--- a/drivers/infiniband/hw/hfi1/Makefile
+++ b/drivers/infiniband/hw/hfi1/Makefile
@@ -24,6 +24,7 @@ hfi1-y := \
mad.o \
mmu_rb.o \
msix.o \
+ opfn.o \
pcie.o \
pio.o \
pio_copy.o \
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index b443642eac02..addefae16c9c 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -4253,6 +4253,8 @@ static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
access_sw_pio_drain),
[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
access_sw_kmem_wait),
+[C_SW_TID_WAIT] = CNTR_ELEM("TidWait", 0, 0, CNTR_NORMAL,
+ hfi1_access_sw_tid_wait),
[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
access_sw_send_schedule),
[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
@@ -5222,6 +5224,17 @@ int is_bx(struct hfi1_devdata *dd)
return (chip_rev_minor & 0xF0) == 0x10;
}
+/* return true is kernel urg disabled for rcd */
+bool is_urg_masked(struct hfi1_ctxtdata *rcd)
+{
+ u64 mask;
+ u32 is = IS_RCVURGENT_START + rcd->ctxt;
+ u8 bit = is % 64;
+
+ mask = read_csr(rcd->dd, CCE_INT_MASK + (8 * (is / 64)));
+ return !(mask & BIT_ULL(bit));
+}
+
/*
* Append string s to buffer buf. Arguments curp and len are the current
* position and remaining length, respectively.
@@ -8352,7 +8365,6 @@ static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
struct hfi1_devdata *dd = rcd->dd;
u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
- mmiowb(); /* make sure everything before is written */
write_csr(dd, addr, rcd->imask);
/* force the above write on the chip and get a value back */
(void)read_csr(dd, addr);
@@ -11790,12 +11802,10 @@ void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
<< RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
}
- mmiowb();
reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
(((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
<< RCV_HDR_HEAD_HEAD_SHIFT);
write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
- mmiowb();
}
u32 hdrqempty(struct hfi1_ctxtdata *rcd)
@@ -13219,7 +13229,7 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
int total_contexts;
int ret;
unsigned ngroups;
- int qos_rmt_count;
+ int rmt_count;
int user_rmt_reduced;
u32 n_usr_ctxts;
u32 send_contexts = chip_send_contexts(dd);
@@ -13281,10 +13291,20 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
n_usr_ctxts = rcv_contexts - total_contexts;
}
- /* each user context requires an entry in the RMT */
- qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
- if (qos_rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
- user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
+ /*
+ * The RMT entries are currently allocated as shown below:
+ * 1. QOS (0 to 128 entries);
+ * 2. FECN for PSM (num_user_contexts + num_vnic_contexts);
+ * 3. VNIC (num_vnic_contexts).
+ * It should be noted that PSM FECN oversubscribe num_vnic_contexts
+ * entries of RMT because both VNIC and PSM could allocate any receive
+ * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
+ * and PSM FECN must reserve an RMT entry for each possible PSM receive
+ * context.
+ */
+ rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2);
+ if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
+ user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
dd_dev_err(dd,
"RMT size is reducing the number of user receive contexts from %u to %d\n",
n_usr_ctxts,
@@ -14272,9 +14292,11 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd,
u64 reg;
int i, idx, regoff, regidx;
u8 offset;
+ u32 total_cnt;
/* there needs to be enough room in the map table */
- if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
+ total_cnt = dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt;
+ if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) {
dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
return;
}
@@ -14328,7 +14350,7 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd,
/* add rule 1 */
add_rsm_rule(dd, RSM_INS_FECN, &rrd);
- rmt->used += dd->num_user_contexts;
+ rmt->used += total_cnt;
}
/* Initialize RSM for VNIC */
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index 6b9c8f12dff8..6c27c1c6a868 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -1,7 +1,7 @@
#ifndef _CHIP_H
#define _CHIP_H
/*
- * Copyright(c) 2015 - 2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -804,6 +804,7 @@ void clear_linkup_counters(struct hfi1_devdata *dd);
u32 hdrqempty(struct hfi1_ctxtdata *rcd);
int is_ax(struct hfi1_devdata *dd);
int is_bx(struct hfi1_devdata *dd);
+bool is_urg_masked(struct hfi1_ctxtdata *rcd);
u32 read_physical_state(struct hfi1_devdata *dd);
u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate);
const char *opa_lstate_name(u32 lstate);
@@ -926,6 +927,7 @@ enum {
C_SW_PIO_WAIT,
C_SW_PIO_DRAIN,
C_SW_KMEM_WAIT,
+ C_SW_TID_WAIT,
C_SW_SEND_SCHED,
C_SDMA_DESC_FETCHED_CNT,
C_SDMA_INT_CNT,
diff --git a/drivers/infiniband/hw/hfi1/common.h b/drivers/infiniband/hw/hfi1/common.h
index 40d3cfb58bd1..7310a5dba420 100644
--- a/drivers/infiniband/hw/hfi1/common.h
+++ b/drivers/infiniband/hw/hfi1/common.h
@@ -340,6 +340,10 @@ struct diag_pkt {
#define HFI1_PSM_IOC_BASE_SEQ 0x0
+/* Number of BTH.PSN bits used for sequence number in expected rcvs */
+#define HFI1_KDETH_BTH_SEQ_SHIFT 11
+#define HFI1_KDETH_BTH_SEQ_MASK (BIT(HFI1_KDETH_BTH_SEQ_SHIFT) - 1)
+
static inline __u64 rhf_to_cpu(const __le32 *rbuf)
{
return __le64_to_cpu(*((__le64 *)rbuf));
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index 0a557795563c..427ba0ce74a5 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
@@ -1167,6 +1167,7 @@ void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd)
char link[10];
struct hfi1_devdata *dd = dd_from_dev(ibd);
struct hfi1_pportdata *ppd;
+ struct dentry *root;
int unit = dd->unit;
int i, j;
@@ -1174,31 +1175,29 @@ void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd)
return;
snprintf(name, sizeof(name), "%s_%d", class_name(), unit);
snprintf(link, sizeof(link), "%d", unit);
- ibd->hfi1_ibdev_dbg = debugfs_create_dir(name, hfi1_dbg_root);
- if (!ibd->hfi1_ibdev_dbg) {
- pr_warn("create of %s failed\n", name);
- return;
- }
+ root = debugfs_create_dir(name, hfi1_dbg_root);
+ ibd->hfi1_ibdev_dbg = root;
+
ibd->hfi1_ibdev_link =
debugfs_create_symlink(link, hfi1_dbg_root, name);
- if (!ibd->hfi1_ibdev_link) {
- pr_warn("create of %s symlink failed\n", name);
- return;
- }
- DEBUGFS_SEQ_FILE_CREATE(opcode_stats, ibd->hfi1_ibdev_dbg, ibd);
- DEBUGFS_SEQ_FILE_CREATE(tx_opcode_stats, ibd->hfi1_ibdev_dbg, ibd);
- DEBUGFS_SEQ_FILE_CREATE(ctx_stats, ibd->hfi1_ibdev_dbg, ibd);
- DEBUGFS_SEQ_FILE_CREATE(qp_stats, ibd->hfi1_ibdev_dbg, ibd);
- DEBUGFS_SEQ_FILE_CREATE(sdes, ibd->hfi1_ibdev_dbg, ibd);
- DEBUGFS_SEQ_FILE_CREATE(rcds, ibd->hfi1_ibdev_dbg, ibd);
- DEBUGFS_SEQ_FILE_CREATE(pios, ibd->hfi1_ibdev_dbg, ibd);
- DEBUGFS_SEQ_FILE_CREATE(sdma_cpu_list, ibd->hfi1_ibdev_dbg, ibd);
+
+ debugfs_create_file("opcode_stats", 0444, root, ibd,
+ &_opcode_stats_file_ops);
+ debugfs_create_file("tx_opcode_stats", 0444, root, ibd,
+ &_tx_opcode_stats_file_ops);
+ debugfs_create_file("ctx_stats", 0444, root, ibd, &_ctx_stats_file_ops);
+ debugfs_create_file("qp_stats", 0444, root, ibd, &_qp_stats_file_ops);
+ debugfs_create_file("sdes", 0444, root, ibd, &_sdes_file_ops);
+ debugfs_create_file("rcds", 0444, root, ibd, &_rcds_file_ops);
+ debugfs_create_file("pios", 0444, root, ibd, &_pios_file_ops);
+ debugfs_create_file("sdma_cpu_list", 0444, root, ibd,
+ &_sdma_cpu_list_file_ops);
+
/* dev counter files */
for (i = 0; i < ARRAY_SIZE(cntr_ops); i++)
- DEBUGFS_FILE_CREATE(cntr_ops[i].name,
- ibd->hfi1_ibdev_dbg,
- dd,
- &cntr_ops[i].ops, S_IRUGO);
+ debugfs_create_file(cntr_ops[i].name, 0444, root, dd,
+ &cntr_ops[i].ops);
+
/* per port files */
for (ppd = dd->pport, j = 0; j < dd->num_pports; j++, ppd++)
for (i = 0; i < ARRAY_SIZE(port_cntr_ops); i++) {
@@ -1206,12 +1205,11 @@ void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd)
sizeof(name),
port_cntr_ops[i].name,
j + 1);
- DEBUGFS_FILE_CREATE(name,
- ibd->hfi1_ibdev_dbg,
- ppd,
- &port_cntr_ops[i].ops,
+ debugfs_create_file(name,
!port_cntr_ops[i].ops.write ?
- S_IRUGO : S_IRUGO | S_IWUSR);
+ S_IRUGO :
+ S_IRUGO | S_IWUSR,
+ root, ppd, &port_cntr_ops[i].ops);
}
hfi1_fault_init_debugfs(ibd);
@@ -1341,10 +1339,10 @@ DEBUGFS_FILE_OPS(driver_stats);
void hfi1_dbg_init(void)
{
hfi1_dbg_root = debugfs_create_dir(DRIVER_NAME, NULL);
- if (!hfi1_dbg_root)
- pr_warn("init of debugfs failed\n");
- DEBUGFS_SEQ_FILE_CREATE(driver_stats_names, hfi1_dbg_root, NULL);
- DEBUGFS_SEQ_FILE_CREATE(driver_stats, hfi1_dbg_root, NULL);
+ debugfs_create_file("driver_stats_names", 0444, hfi1_dbg_root, NULL,
+ &_driver_stats_names_file_ops);
+ debugfs_create_file("driver_stats", 0444, hfi1_dbg_root, NULL,
+ &_driver_stats_file_ops);
}
void hfi1_dbg_exit(void)
diff --git a/drivers/infiniband/hw/hfi1/debugfs.h b/drivers/infiniband/hw/hfi1/debugfs.h
index d5d824459fcc..57e582caa5eb 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.h
+++ b/drivers/infiniband/hw/hfi1/debugfs.h
@@ -49,16 +49,6 @@
struct hfi1_ibdev;
-#define DEBUGFS_FILE_CREATE(name, parent, data, ops, mode) \
-do { \
- struct dentry *ent; \
- const char *__name = name; \
- ent = debugfs_create_file(__name, mode, parent, \
- data, ops); \
- if (!ent) \
- pr_warn("create of %s failed\n", __name); \
-} while (0)
-
#define DEBUGFS_SEQ_FILE_OPS(name) \
static const struct seq_operations _##name##_seq_ops = { \
.start = _##name##_seq_start, \
@@ -89,8 +79,6 @@ static const struct file_operations _##name##_file_ops = { \
.release = seq_release \
}
-#define DEBUGFS_SEQ_FILE_CREATE(name, parent, data) \
- DEBUGFS_FILE_CREATE(#name, parent, data, &_##name##_file_ops, 0444)
ssize_t hfi1_seq_read(struct file *file, char __user *buf, size_t size,
loff_t *ppos);
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index a8ad70730203..2a9d2912f5db 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -1575,25 +1575,32 @@ drop:
return -EINVAL;
}
-void handle_eflags(struct hfi1_packet *packet)
+static void show_eflags_errs(struct hfi1_packet *packet)
{
struct hfi1_ctxtdata *rcd = packet->rcd;
u32 rte = rhf_rcv_type_err(packet->rhf);
+ dd_dev_err(rcd->dd,
+ "receive context %d: rhf 0x%016llx, errs [ %s%s%s%s%s%s%s%s] rte 0x%x\n",
+ rcd->ctxt, packet->rhf,
+ packet->rhf & RHF_K_HDR_LEN_ERR ? "k_hdr_len " : "",
+ packet->rhf & RHF_DC_UNC_ERR ? "dc_unc " : "",
+ packet->rhf & RHF_DC_ERR ? "dc " : "",
+ packet->rhf & RHF_TID_ERR ? "tid " : "",
+ packet->rhf & RHF_LEN_ERR ? "len " : "",
+ packet->rhf & RHF_ECC_ERR ? "ecc " : "",
+ packet->rhf & RHF_VCRC_ERR ? "vcrc " : "",
+ packet->rhf & RHF_ICRC_ERR ? "icrc " : "",
+ rte);
+}
+
+void handle_eflags(struct hfi1_packet *packet)
+{
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+
rcv_hdrerr(rcd, rcd->ppd, packet);
if (rhf_err_flags(packet->rhf))
- dd_dev_err(rcd->dd,
- "receive context %d: rhf 0x%016llx, errs [ %s%s%s%s%s%s%s%s] rte 0x%x\n",
- rcd->ctxt, packet->rhf,
- packet->rhf & RHF_K_HDR_LEN_ERR ? "k_hdr_len " : "",
- packet->rhf & RHF_DC_UNC_ERR ? "dc_unc " : "",
- packet->rhf & RHF_DC_ERR ? "dc " : "",
- packet->rhf & RHF_TID_ERR ? "tid " : "",
- packet->rhf & RHF_LEN_ERR ? "len " : "",
- packet->rhf & RHF_ECC_ERR ? "ecc " : "",
- packet->rhf & RHF_VCRC_ERR ? "vcrc " : "",
- packet->rhf & RHF_ICRC_ERR ? "icrc " : "",
- rte);
+ show_eflags_errs(packet);
}
/*
@@ -1699,11 +1706,14 @@ static int kdeth_process_expected(struct hfi1_packet *packet)
if (unlikely(hfi1_dbg_should_fault_rx(packet)))
return RHF_RCV_CONTINUE;
- if (unlikely(rhf_err_flags(packet->rhf)))
- handle_eflags(packet);
+ if (unlikely(rhf_err_flags(packet->rhf))) {
+ struct hfi1_ctxtdata *rcd = packet->rcd;
- dd_dev_err(packet->rcd->dd,
- "Unhandled expected packet received. Dropping.\n");
+ if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet))
+ return RHF_RCV_CONTINUE;
+ }
+
+ hfi1_kdeth_expected_rcv(packet);
return RHF_RCV_CONTINUE;
}
@@ -1712,11 +1722,17 @@ static int kdeth_process_eager(struct hfi1_packet *packet)
hfi1_setup_9B_packet(packet);
if (unlikely(hfi1_dbg_should_fault_rx(packet)))
return RHF_RCV_CONTINUE;
- if (unlikely(rhf_err_flags(packet->rhf)))
- handle_eflags(packet);
- dd_dev_err(packet->rcd->dd,
- "Unhandled eager packet received. Dropping.\n");
+ trace_hfi1_rcvhdr(packet);
+ if (unlikely(rhf_err_flags(packet->rhf))) {
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+
+ show_eflags_errs(packet);
+ if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet))
+ return RHF_RCV_CONTINUE;
+ }
+
+ hfi1_kdeth_eager_rcv(packet);
return RHF_RCV_CONTINUE;
}
diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c
index e2290f32c8d9..3fd3315d0fb0 100644
--- a/drivers/infiniband/hw/hfi1/fault.c
+++ b/drivers/infiniband/hw/hfi1/fault.c
@@ -250,6 +250,7 @@ void hfi1_fault_exit_debugfs(struct hfi1_ibdev *ibd)
int hfi1_fault_init_debugfs(struct hfi1_ibdev *ibd)
{
struct dentry *parent = ibd->hfi1_ibdev_dbg;
+ struct dentry *fault_dir;
ibd->fault = kzalloc(sizeof(*ibd->fault), GFP_KERNEL);
if (!ibd->fault)
@@ -269,45 +270,31 @@ int hfi1_fault_init_debugfs(struct hfi1_ibdev *ibd)
bitmap_zero(ibd->fault->opcodes,
sizeof(ibd->fault->opcodes) * BITS_PER_BYTE);
- ibd->fault->dir =
- fault_create_debugfs_attr("fault", parent,
- &ibd->fault->attr);
- if (IS_ERR(ibd->fault->dir)) {
+ fault_dir =
+ fault_create_debugfs_attr("fault", parent, &ibd->fault->attr);
+ if (IS_ERR(fault_dir)) {
kfree(ibd->fault);
ibd->fault = NULL;
return -ENOENT;
}
-
- DEBUGFS_SEQ_FILE_CREATE(fault_stats, ibd->fault->dir, ibd);
- if (!debugfs_create_bool("enable", 0600, ibd->fault->dir,
- &ibd->fault->enable))
- goto fail;
- if (!debugfs_create_bool("suppress_err", 0600,
- ibd->fault->dir,
- &ibd->fault->suppress_err))
- goto fail;
- if (!debugfs_create_bool("opcode_mode", 0600, ibd->fault->dir,
- &ibd->fault->opcode))
- goto fail;
- if (!debugfs_create_file("opcodes", 0600, ibd->fault->dir,
- ibd->fault, &__fault_opcodes_fops))
- goto fail;
- if (!debugfs_create_u64("skip_pkts", 0600,
- ibd->fault->dir,
- &ibd->fault->fault_skip))
- goto fail;
- if (!debugfs_create_u64("skip_usec", 0600,
- ibd->fault->dir,
- &ibd->fault->fault_skip_usec))
- goto fail;
- if (!debugfs_create_u8("direction", 0600, ibd->fault->dir,
- &ibd->fault->direction))
- goto fail;
+ ibd->fault->dir = fault_dir;
+
+ debugfs_create_file("fault_stats", 0444, fault_dir, ibd,
+ &_fault_stats_file_ops);
+ debugfs_create_bool("enable", 0600, fault_dir, &ibd->fault->enable);
+ debugfs_create_bool("suppress_err", 0600, fault_dir,
+ &ibd->fault->suppress_err);
+ debugfs_create_bool("opcode_mode", 0600, fault_dir,
+ &ibd->fault->opcode);
+ debugfs_create_file("opcodes", 0600, fault_dir, ibd->fault,
+ &__fault_opcodes_fops);
+ debugfs_create_u64("skip_pkts", 0600, fault_dir,
+ &ibd->fault->fault_skip);
+ debugfs_create_u64("skip_usec", 0600, fault_dir,
+ &ibd->fault->fault_skip_usec);
+ debugfs_create_u8("direction", 0600, fault_dir, &ibd->fault->direction);
return 0;
-fail:
- hfi1_fault_exit_debugfs(ibd);
- return -ENOMEM;
}
bool hfi1_dbg_fault_suppress_err(struct hfi1_ibdev *ibd)
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 6db2276f5c13..048b5d73ba39 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -73,6 +73,7 @@
#include "chip_registers.h"
#include "common.h"
+#include "opfn.h"
#include "verbs.h"
#include "pio.h"
#include "chip.h"
@@ -98,6 +99,8 @@
#define NEIGHBOR_TYPE_HFI 0
#define NEIGHBOR_TYPE_SWITCH 1
+#define HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES 5
+
extern unsigned long hfi1_cap_mask;
#define HFI1_CAP_KGET_MASK(mask, cap) ((mask) & HFI1_CAP_##cap)
#define HFI1_CAP_UGET_MASK(mask, cap) \
@@ -195,6 +198,14 @@ struct exp_tid_set {
};
typedef int (*rhf_rcv_function_ptr)(struct hfi1_packet *packet);
+
+struct tid_queue {
+ struct list_head queue_head;
+ /* queue head for QP TID resource waiters */
+ u32 enqueue; /* count of tid enqueues */
+ u32 dequeue; /* count of tid dequeues */
+};
+
struct hfi1_ctxtdata {
/* rcvhdrq base, needs mmap before useful */
void *rcvhdrq;
@@ -288,6 +299,12 @@ struct hfi1_ctxtdata {
/* PSM Specific fields */
/* lock protecting all Expected TID data */
struct mutex exp_mutex;
+ /* lock protecting all Expected TID data of kernel contexts */
+ spinlock_t exp_lock;
+ /* Queue for QP's waiting for HW TID flows */
+ struct tid_queue flow_queue;
+ /* Queue for QP's waiting for HW receive array entries */
+ struct tid_queue rarr_queue;
/* when waiting for rcv or pioavail */
wait_queue_head_t wait;
/* uuid from PSM */
@@ -320,6 +337,9 @@ struct hfi1_ctxtdata {
*/
u8 subctxt_cnt;
+ /* Bit mask to track free TID RDMA HW flows */
+ unsigned long flow_mask;
+ struct tid_flow_state flows[RXE_NUM_TID_FLOWS];
};
/**
@@ -1435,7 +1455,7 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
struct hfi1_devdata *dd, u8 hw_pidx, u8 port);
void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd);
int hfi1_rcd_put(struct hfi1_ctxtdata *rcd);
-void hfi1_rcd_get(struct hfi1_ctxtdata *rcd);
+int hfi1_rcd_get(struct hfi1_ctxtdata *rcd);
struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd,
u16 ctxt);
struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt);
@@ -2100,7 +2120,7 @@ static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd,
SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_TEST_SMASK |
#endif
HFI1_PKT_USER_SC_INTEGRITY;
- else
+ else if (ctxt_type != SC_KERNEL)
base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY;
/* turn on send-side job key checks if !A0 */
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 441b06e2a154..faaaac8fbc55 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -73,7 +73,6 @@
#undef pr_fmt
#define pr_fmt(fmt) DRIVER_NAME ": " fmt
-#define HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES 5
/*
* min buffers we want to have per context, after driver
*/
@@ -216,12 +215,12 @@ static void hfi1_rcd_free(struct kref *kref)
struct hfi1_ctxtdata *rcd =
container_of(kref, struct hfi1_ctxtdata, kref);
- hfi1_free_ctxtdata(rcd->dd, rcd);
-
spin_lock_irqsave(&rcd->dd->uctxt_lock, flags);
rcd->dd->rcd[rcd->ctxt] = NULL;
spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags);
+ hfi1_free_ctxtdata(rcd->dd, rcd);
+
kfree(rcd);
}
@@ -244,10 +243,13 @@ int hfi1_rcd_put(struct hfi1_ctxtdata *rcd)
* @rcd: pointer to an initialized rcd data structure
*
* Use this to get a reference after the init.
+ *
+ * Return : reflect kref_get_unless_zero(), which returns non-zero on
+ * increment, otherwise 0.
*/
-void hfi1_rcd_get(struct hfi1_ctxtdata *rcd)
+int hfi1_rcd_get(struct hfi1_ctxtdata *rcd)
{
- kref_get(&rcd->kref);
+ return kref_get_unless_zero(&rcd->kref);
}
/**
@@ -327,7 +329,8 @@ struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt)
spin_lock_irqsave(&dd->uctxt_lock, flags);
if (dd->rcd[ctxt]) {
rcd = dd->rcd[ctxt];
- hfi1_rcd_get(rcd);
+ if (!hfi1_rcd_get(rcd))
+ rcd = NULL;
}
spin_unlock_irqrestore(&dd->uctxt_lock, flags);
@@ -372,6 +375,9 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
rcd->rhf_rcv_function_map = normal_rhf_rcv_functions;
mutex_init(&rcd->exp_mutex);
+ spin_lock_init(&rcd->exp_lock);
+ INIT_LIST_HEAD(&rcd->flow_queue.queue_head);
+ INIT_LIST_HEAD(&rcd->rarr_queue.queue_head);
hfi1_cdbg(PROC, "setting up context %u\n", rcd->ctxt);
@@ -474,6 +480,9 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
GFP_KERNEL, numa);
if (!rcd->opstats)
goto bail;
+
+ /* Initialize TID flow generations for the context */
+ hfi1_kern_init_ctxt_generations(rcd);
}
*context = rcd;
@@ -773,6 +782,8 @@ static void enable_chip(struct hfi1_devdata *dd)
rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_EGR_FULL))
rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
+ if (HFI1_CAP_IS_KSET(TID_RDMA))
+ rcvmask |= HFI1_RCVCTRL_TIDFLOW_ENB;
hfi1_rcvctrl(dd, rcvmask, rcd);
sc_enable(rcd->sc);
hfi1_rcd_put(rcd);
@@ -928,6 +939,8 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
lastfail = hfi1_create_rcvhdrq(dd, rcd);
if (!lastfail)
lastfail = hfi1_setup_eagerbufs(rcd);
+ if (!lastfail)
+ lastfail = hfi1_kern_exp_rcv_init(rcd, reinit);
if (lastfail) {
dd_dev_err(dd,
"failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
@@ -1498,6 +1511,13 @@ static int __init hfi1_mod_init(void)
/* sanitize link CRC options */
link_crc_mask &= SUPPORTED_CRCS;
+ ret = opfn_init();
+ if (ret < 0) {
+ pr_err("Failed to allocate opfn_wq");
+ goto bail_dev;
+ }
+
+ hfi1_compute_tid_rdma_flow_wt();
/*
* These must be called before the driver is registered with
* the PCI subsystem.
@@ -1528,6 +1548,7 @@ module_init(hfi1_mod_init);
static void __exit hfi1_mod_cleanup(void)
{
pci_unregister_driver(&hfi1_pci_driver);
+ opfn_exit();
node_affinity_destroy_all();
hfi1_dbg_exit();
@@ -1582,7 +1603,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
if (rcd) {
- hfi1_clear_tids(rcd);
+ hfi1_free_ctxt_rcv_groups(rcd);
hfi1_free_ctxt(rcd);
}
}
diff --git a/drivers/infiniband/hw/hfi1/iowait.c b/drivers/infiniband/hw/hfi1/iowait.c
index 582f1ba136ff..adb4a1ba921b 100644
--- a/drivers/infiniband/hw/hfi1/iowait.c
+++ b/drivers/infiniband/hw/hfi1/iowait.c
@@ -6,6 +6,9 @@
#include "iowait.h"
#include "trace_iowait.h"
+/* 1 priority == 16 starve_cnt */
+#define IOWAIT_PRIORITY_STARVE_SHIFT 4
+
void iowait_set_flag(struct iowait *wait, u32 flag)
{
trace_hfi1_iowait_set(wait, flag);
@@ -44,7 +47,8 @@ void iowait_init(struct iowait *wait, u32 tx_limit,
uint seq,
bool pkts_sent),
void (*wakeup)(struct iowait *wait, int reason),
- void (*sdma_drained)(struct iowait *wait))
+ void (*sdma_drained)(struct iowait *wait),
+ void (*init_priority)(struct iowait *wait))
{
int i;
@@ -58,6 +62,7 @@ void iowait_init(struct iowait *wait, u32 tx_limit,
wait->sleep = sleep;
wait->wakeup = wakeup;
wait->sdma_drained = sdma_drained;
+ wait->init_priority = init_priority;
wait->flags = 0;
for (i = 0; i < IOWAIT_SES; i++) {
wait->wait[i].iow = wait;
@@ -92,3 +97,30 @@ int iowait_set_work_flag(struct iowait_work *w)
iowait_set_flag(w->iow, IOWAIT_PENDING_TID);
return IOWAIT_TID_SE;
}
+
+/**
+ * iowait_priority_update_top - update the top priority entry
+ * @w: the iowait struct
+ * @top: a pointer to the top priority entry
+ * @idx: the index of the current iowait in an array
+ * @top_idx: the array index for the iowait entry that has the top priority
+ *
+ * This function is called to compare the priority of a given
+ * iowait with the given top priority entry. The top index will
+ * be returned.
+ */
+uint iowait_priority_update_top(struct iowait *w,
+ struct iowait *top,
+ uint idx, uint top_idx)
+{
+ u8 cnt, tcnt;
+
+ /* Convert priority into starve_cnt and compare the total.*/
+ cnt = (w->priority << IOWAIT_PRIORITY_STARVE_SHIFT) + w->starved_cnt;
+ tcnt = (top->priority << IOWAIT_PRIORITY_STARVE_SHIFT) +
+ top->starved_cnt;
+ if (cnt > tcnt)
+ return idx;
+ else
+ return top_idx;
+}
diff --git a/drivers/infiniband/hw/hfi1/iowait.h b/drivers/infiniband/hw/hfi1/iowait.h
index 23a58ac0d47c..07847cb72169 100644
--- a/drivers/infiniband/hw/hfi1/iowait.h
+++ b/drivers/infiniband/hw/hfi1/iowait.h
@@ -100,6 +100,7 @@ struct iowait_work {
* @sleep: no space callback
* @wakeup: space callback wakeup
* @sdma_drained: sdma count drained
+ * @init_priority: callback to manipulate priority
* @lock: lock protected head of wait queue
* @iowork: workqueue overhead
* @wait_dma: wait for sdma_busy == 0
@@ -109,7 +110,7 @@ struct iowait_work {
* @tx_limit: limit for overflow queuing
* @tx_count: number of tx entry's in tx_head'ed list
* @flags: wait flags (one per QP)
- * @wait: SE array
+ * @wait: SE array for multiple legs
*
* This is to be embedded in user's state structure
* (QP or PQ).
@@ -120,10 +121,13 @@ struct iowait_work {
* are callbacks for the ULP to implement
* what ever queuing/dequeuing of
* the embedded iowait and its containing struct
- * when a resource shortage like SDMA ring space is seen.
+ * when a resource shortage like SDMA ring space
+ * or PIO credit space is seen.
*
* Both potentially have locks help
- * so sleeping is not allowed.
+ * so sleeping is not allowed and it is not
+ * supported to submit txreqs from the wakeup
+ * call directly because of lock conflicts.
*
* The wait_dma member along with the iow
*
@@ -143,6 +147,7 @@ struct iowait {
);
void (*wakeup)(struct iowait *wait, int reason);
void (*sdma_drained)(struct iowait *wait);
+ void (*init_priority)(struct iowait *wait);
seqlock_t *lock;
wait_queue_head_t wait_dma;
wait_queue_head_t wait_pio;
@@ -152,6 +157,7 @@ struct iowait {
u32 tx_limit;
u32 tx_count;
u8 starved_cnt;
+ u8 priority;
unsigned long flags;
struct iowait_work wait[IOWAIT_SES];
};
@@ -171,7 +177,8 @@ void iowait_init(struct iowait *wait, u32 tx_limit,
uint seq,
bool pkts_sent),
void (*wakeup)(struct iowait *wait, int reason),
- void (*sdma_drained)(struct iowait *wait));
+ void (*sdma_drained)(struct iowait *wait),
+ void (*init_priority)(struct iowait *wait));
/**
* iowait_schedule() - schedule the default send engine work
@@ -186,6 +193,18 @@ static inline bool iowait_schedule(struct iowait *wait,
}
/**
+ * iowait_tid_schedule - schedule the tid SE
+ * @wait: the iowait structure
+ * @wq: the work queue
+ * @cpu: the cpu
+ */
+static inline bool iowait_tid_schedule(struct iowait *wait,
+ struct workqueue_struct *wq, int cpu)
+{
+ return !!queue_work_on(cpu, wq, &wait->wait[IOWAIT_TID_SE].iowork);
+}
+
+/**
* iowait_sdma_drain() - wait for DMAs to drain
*
* @wait: iowait structure
@@ -327,6 +346,8 @@ static inline u16 iowait_get_desc(struct iowait_work *w)
tx = list_first_entry(&w->tx_head, struct sdma_txreq,
list);
num_desc = tx->num_desc;
+ if (tx->flags & SDMA_TXREQ_F_VIP)
+ w->iow->priority++;
}
return num_desc;
}
@@ -340,6 +361,37 @@ static inline u32 iowait_get_all_desc(struct iowait *w)
return num_desc;
}
+static inline void iowait_update_priority(struct iowait_work *w)
+{
+ struct sdma_txreq *tx = NULL;
+
+ if (!list_empty(&w->tx_head)) {
+ tx = list_first_entry(&w->tx_head, struct sdma_txreq,
+ list);
+ if (tx->flags & SDMA_TXREQ_F_VIP)
+ w->iow->priority++;
+ }
+}
+
+static inline void iowait_update_all_priority(struct iowait *w)
+{
+ iowait_update_priority(&w->wait[IOWAIT_IB_SE]);
+ iowait_update_priority(&w->wait[IOWAIT_TID_SE]);
+}
+
+static inline void iowait_init_priority(struct iowait *w)
+{
+ w->priority = 0;
+ if (w->init_priority)
+ w->init_priority(w);
+}
+
+static inline void iowait_get_priority(struct iowait *w)
+{
+ iowait_init_priority(w);
+ iowait_update_all_priority(w);
+}
+
/**
* iowait_queue - Put the iowait on a wait queue
* @pkts_sent: have some packets been sent before queuing?
@@ -356,14 +408,18 @@ static inline void iowait_queue(bool pkts_sent, struct iowait *w,
/*
* To play fair, insert the iowait at the tail of the wait queue if it
* has already sent some packets; Otherwise, put it at the head.
+ * However, if it has priority packets to send, also put it at the
+ * head.
*/
- if (pkts_sent) {
- list_add_tail(&w->list, wait_head);
+ if (pkts_sent)
w->starved_cnt = 0;
- } else {
- list_add(&w->list, wait_head);
+ else
w->starved_cnt++;
- }
+
+ if (w->priority > 0 || !pkts_sent)
+ list_add(&w->list, wait_head);
+ else
+ list_add_tail(&w->list, wait_head);
}
/**
@@ -380,27 +436,10 @@ static inline void iowait_starve_clear(bool pkts_sent, struct iowait *w)
w->starved_cnt = 0;
}
-/**
- * iowait_starve_find_max - Find the maximum of the starve count
- * @w: the iowait struct
- * @max: a variable containing the max starve count
- * @idx: the index of the current iowait in an array
- * @max_idx: a variable containing the array index for the
- * iowait entry that has the max starve count
- *
- * This function is called to compare the starve count of a
- * given iowait with the given max starve count. The max starve
- * count and the index will be updated if the iowait's start
- * count is larger.
- */
-static inline void iowait_starve_find_max(struct iowait *w, u8 *max,
- uint idx, uint *max_idx)
-{
- if (w->starved_cnt > *max) {
- *max = w->starved_cnt;
- *max_idx = idx;
- }
-}
+/* Update the top priority index */
+uint iowait_priority_update_top(struct iowait *w,
+ struct iowait *top,
+ uint idx, uint top_idx);
/**
* iowait_packet_queued() - determine if a packet is queued
diff --git a/drivers/infiniband/hw/hfi1/opfn.c b/drivers/infiniband/hw/hfi1/opfn.c
new file mode 100644
index 000000000000..370a5a8eaa71
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/opfn.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright(c) 2018 Intel Corporation.
+ *
+ */
+#include "hfi.h"
+#include "trace.h"
+#include "qp.h"
+#include "opfn.h"
+
+#define IB_BTHE_E BIT(IB_BTHE_E_SHIFT)
+
+#define OPFN_CODE(code) BIT((code) - 1)
+#define OPFN_MASK(code) OPFN_CODE(STL_VERBS_EXTD_##code)
+
+struct hfi1_opfn_type {
+ bool (*request)(struct rvt_qp *qp, u64 *data);
+ bool (*response)(struct rvt_qp *qp, u64 *data);
+ bool (*reply)(struct rvt_qp *qp, u64 data);
+ void (*error)(struct rvt_qp *qp);
+};
+
+static struct hfi1_opfn_type hfi1_opfn_handlers[STL_VERBS_EXTD_MAX] = {
+ [STL_VERBS_EXTD_TID_RDMA] = {
+ .request = tid_rdma_conn_req,
+ .response = tid_rdma_conn_resp,
+ .reply = tid_rdma_conn_reply,
+ .error = tid_rdma_conn_error,
+ },
+};
+
+static struct workqueue_struct *opfn_wq;
+
+static void opfn_schedule_conn_request(struct rvt_qp *qp);
+
+static bool hfi1_opfn_extended(u32 bth1)
+{
+ return !!(bth1 & IB_BTHE_E);
+}
+
+static void opfn_conn_request(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct ib_atomic_wr wr;
+ u16 mask, capcode;
+ struct hfi1_opfn_type *extd;
+ u64 data;
+ unsigned long flags;
+ int ret = 0;
+
+ trace_hfi1_opfn_state_conn_request(qp);
+ spin_lock_irqsave(&priv->opfn.lock, flags);
+ /*
+ * Exit if the extended bit is not set, or if nothing is requested, or
+ * if we have completed all requests, or if a previous request is in
+ * progress
+ */
+ if (!priv->opfn.extended || !priv->opfn.requested ||
+ priv->opfn.requested == priv->opfn.completed || priv->opfn.curr)
+ goto done;
+
+ mask = priv->opfn.requested & ~priv->opfn.completed;
+ capcode = ilog2(mask & ~(mask - 1)) + 1;
+ if (capcode >= STL_VERBS_EXTD_MAX) {
+ priv->opfn.completed |= OPFN_CODE(capcode);
+ goto done;
+ }
+
+ extd = &hfi1_opfn_handlers[capcode];
+ if (!extd || !extd->request || !extd->request(qp, &data)) {
+ /*
+ * Either there is no handler for this capability or the request
+ * packet could not be generated. Either way, mark it as done so
+ * we don't keep attempting to complete it.
+ */
+ priv->opfn.completed |= OPFN_CODE(capcode);
+ goto done;
+ }
+
+ trace_hfi1_opfn_data_conn_request(qp, capcode, data);
+ data = (data & ~0xf) | capcode;
+
+ memset(&wr, 0, sizeof(wr));
+ wr.wr.opcode = IB_WR_OPFN;
+ wr.remote_addr = HFI1_VERBS_E_ATOMIC_VADDR;
+ wr.compare_add = data;
+
+ priv->opfn.curr = capcode; /* A new request is now in progress */
+ /* Drop opfn.lock before calling ib_post_send() */
+ spin_unlock_irqrestore(&priv->opfn.lock, flags);
+
+ ret = ib_post_send(&qp->ibqp, &wr.wr, NULL);
+ if (ret)
+ goto err;
+ trace_hfi1_opfn_state_conn_request(qp);
+ return;
+err:
+ trace_hfi1_msg_opfn_conn_request(qp, "ib_ost_send failed: ret = ",
+ (u64)ret);
+ spin_lock_irqsave(&priv->opfn.lock, flags);
+ /*
+ * In case of an unexpected error return from ib_post_send
+ * clear opfn.curr and reschedule to try again
+ */
+ priv->opfn.curr = STL_VERBS_EXTD_NONE;
+ opfn_schedule_conn_request(qp);
+done:
+ spin_unlock_irqrestore(&priv->opfn.lock, flags);
+}
+
+void opfn_send_conn_request(struct work_struct *work)
+{
+ struct hfi1_opfn_data *od;
+ struct hfi1_qp_priv *qpriv;
+
+ od = container_of(work, struct hfi1_opfn_data, opfn_work);
+ qpriv = container_of(od, struct hfi1_qp_priv, opfn);
+
+ opfn_conn_request(qpriv->owner);
+}
+
+/*
+ * When QP s_lock is held in the caller, the OPFN request must be scheduled
+ * to a different workqueue to avoid double locking QP s_lock in call to
+ * ib_post_send in opfn_conn_request
+ */
+static void opfn_schedule_conn_request(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ trace_hfi1_opfn_state_sched_conn_request(qp);
+ queue_work(opfn_wq, &priv->opfn.opfn_work);
+}
+
+void opfn_conn_response(struct rvt_qp *qp, struct rvt_ack_entry *e,
+ struct ib_atomic_eth *ateth)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ u64 data = be64_to_cpu(ateth->compare_data);
+ struct hfi1_opfn_type *extd;
+ u8 capcode;
+ unsigned long flags;
+
+ trace_hfi1_opfn_state_conn_response(qp);
+ capcode = data & 0xf;
+ trace_hfi1_opfn_data_conn_response(qp, capcode, data);
+ if (!capcode || capcode >= STL_VERBS_EXTD_MAX)
+ return;
+
+ extd = &hfi1_opfn_handlers[capcode];
+
+ if (!extd || !extd->response) {
+ e->atomic_data = capcode;
+ return;
+ }
+
+ spin_lock_irqsave(&priv->opfn.lock, flags);
+ if (priv->opfn.completed & OPFN_CODE(capcode)) {
+ /*
+ * We are receiving a request for a feature that has already
+ * been negotiated. This may mean that the other side has reset
+ */
+ priv->opfn.completed &= ~OPFN_CODE(capcode);
+ if (extd->error)
+ extd->error(qp);
+ }
+
+ if (extd->response(qp, &data))
+ priv->opfn.completed |= OPFN_CODE(capcode);
+ e->atomic_data = (data & ~0xf) | capcode;
+ trace_hfi1_opfn_state_conn_response(qp);
+ spin_unlock_irqrestore(&priv->opfn.lock, flags);
+}
+
+void opfn_conn_reply(struct rvt_qp *qp, u64 data)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct hfi1_opfn_type *extd;
+ u8 capcode;
+ unsigned long flags;
+
+ trace_hfi1_opfn_state_conn_reply(qp);
+ capcode = data & 0xf;
+ trace_hfi1_opfn_data_conn_reply(qp, capcode, data);
+ if (!capcode || capcode >= STL_VERBS_EXTD_MAX)
+ return;
+
+ spin_lock_irqsave(&priv->opfn.lock, flags);
+ /*
+ * Either there is no previous request or the reply is not for the
+ * current request
+ */
+ if (!priv->opfn.curr || capcode != priv->opfn.curr)
+ goto done;
+
+ extd = &hfi1_opfn_handlers[capcode];
+
+ if (!extd || !extd->reply)
+ goto clear;
+
+ if (extd->reply(qp, data))
+ priv->opfn.completed |= OPFN_CODE(capcode);
+clear:
+ /*
+ * Clear opfn.curr to indicate that the previous request is no longer in
+ * progress
+ */
+ priv->opfn.curr = STL_VERBS_EXTD_NONE;
+ trace_hfi1_opfn_state_conn_reply(qp);
+done:
+ spin_unlock_irqrestore(&priv->opfn.lock, flags);
+}
+
+void opfn_conn_error(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct hfi1_opfn_type *extd = NULL;
+ unsigned long flags;
+ u16 capcode;
+
+ trace_hfi1_opfn_state_conn_error(qp);
+ trace_hfi1_msg_opfn_conn_error(qp, "error. qp state ", (u64)qp->state);
+ /*
+ * The QP has gone into the Error state. We have to invalidate all
+ * negotiated feature, including the one in progress (if any). The RC
+ * QP handling will clean the WQE for the connection request.
+ */
+ spin_lock_irqsave(&priv->opfn.lock, flags);
+ while (priv->opfn.completed) {
+ capcode = priv->opfn.completed & ~(priv->opfn.completed - 1);
+ extd = &hfi1_opfn_handlers[ilog2(capcode) + 1];
+ if (extd->error)
+ extd->error(qp);
+ priv->opfn.completed &= ~OPFN_CODE(capcode);
+ }
+ priv->opfn.extended = 0;
+ priv->opfn.requested = 0;
+ priv->opfn.curr = STL_VERBS_EXTD_NONE;
+ spin_unlock_irqrestore(&priv->opfn.lock, flags);
+}
+
+void opfn_qp_init(struct rvt_qp *qp, struct ib_qp_attr *attr, int attr_mask)
+{
+ struct ib_qp *ibqp = &qp->ibqp;
+ struct hfi1_qp_priv *priv = qp->priv;
+ unsigned long flags;
+
+ if (attr_mask & IB_QP_RETRY_CNT)
+ priv->s_retry = attr->retry_cnt;
+
+ spin_lock_irqsave(&priv->opfn.lock, flags);
+ if (ibqp->qp_type == IB_QPT_RC && HFI1_CAP_IS_KSET(TID_RDMA)) {
+ struct tid_rdma_params *local = &priv->tid_rdma.local;
+
+ if (attr_mask & IB_QP_TIMEOUT)
+ priv->tid_retry_timeout_jiffies = qp->timeout_jiffies;
+ if (qp->pmtu == enum_to_mtu(OPA_MTU_4096) ||
+ qp->pmtu == enum_to_mtu(OPA_MTU_8192)) {
+ tid_rdma_opfn_init(qp, local);
+ /*
+ * We only want to set the OPFN requested bit when the
+ * QP transitions to RTS.
+ */
+ if (attr_mask & IB_QP_STATE &&
+ attr->qp_state == IB_QPS_RTS) {
+ priv->opfn.requested |= OPFN_MASK(TID_RDMA);
+ /*
+ * If the QP is transitioning to RTS and the
+ * opfn.completed for TID RDMA has already been
+ * set, the QP is being moved *back* into RTS.
+ * We can now renegotiate the TID RDMA
+ * parameters.
+ */
+ if (priv->opfn.completed &
+ OPFN_MASK(TID_RDMA)) {
+ priv->opfn.completed &=
+ ~OPFN_MASK(TID_RDMA);
+ /*
+ * Since the opfn.completed bit was
+ * already set, it is safe to assume
+ * that the opfn.extended is also set.
+ */
+ opfn_schedule_conn_request(qp);
+ }
+ }
+ } else {
+ memset(local, 0, sizeof(*local));
+ }
+ }
+ spin_unlock_irqrestore(&priv->opfn.lock, flags);
+}
+
+void opfn_trigger_conn_request(struct rvt_qp *qp, u32 bth1)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ if (!priv->opfn.extended && hfi1_opfn_extended(bth1) &&
+ HFI1_CAP_IS_KSET(OPFN)) {
+ priv->opfn.extended = 1;
+ if (qp->state == IB_QPS_RTS)
+ opfn_conn_request(qp);
+ }
+}
+
+int opfn_init(void)
+{
+ opfn_wq = alloc_workqueue("hfi_opfn",
+ WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE |
+ WQ_MEM_RECLAIM,
+ HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES);
+ if (!opfn_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void opfn_exit(void)
+{
+ if (opfn_wq) {
+ destroy_workqueue(opfn_wq);
+ opfn_wq = NULL;
+ }
+}
diff --git a/drivers/infiniband/hw/hfi1/opfn.h b/drivers/infiniband/hw/hfi1/opfn.h
new file mode 100644
index 000000000000..5f2011cabc25
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/opfn.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright(c) 2018 Intel Corporation.
+ *
+ */
+#ifndef _HFI1_OPFN_H
+#define _HFI1_OPFN_H
+
+/**
+ * DOC: Omni Path Feature Negotion (OPFN)
+ *
+ * OPFN is a discovery protocol for Intel Omni-Path fabric that
+ * allows two RC QPs to negotiate a common feature that both QPs
+ * can support. Currently, the only OPA feature that OPFN
+ * supports is TID RDMA.
+ *
+ * Architecture
+ *
+ * OPFN involves the communication between two QPs on the HFI
+ * level on an Omni-Path fabric, and ULPs have no knowledge of
+ * OPFN at all.
+ *
+ * Implementation
+ *
+ * OPFN extends the existing IB RC protocol with the following
+ * changes:
+ * -- Uses Bit 24 (reserved) of DWORD 1 of Base Transport
+ * Header (BTH1) to indicate that the RC QP supports OPFN;
+ * -- Uses a combination of RC COMPARE_SWAP opcode (0x13) and
+ * the address U64_MAX (0xFFFFFFFFFFFFFFFF) as an OPFN
+ * request; The 64-bit data carried with the request/response
+ * contains the parameters for negotiation and will be
+ * defined in tid_rdma.c file;
+ * -- Defines IB_WR_RESERVED3 as IB_WR_OPFN.
+ *
+ * The OPFN communication will be triggered when an RC QP
+ * receives a request with Bit 24 of BTH1 set. The responder QP
+ * will then post send an OPFN request with its local
+ * parameters, which will be sent to the requester QP once all
+ * existing requests on the responder QP side have been sent.
+ * Once the requester QP receives the OPFN request, it will
+ * keep a copy of the responder QP's parameters, and return a
+ * response packet with its own local parameters. The responder
+ * QP receives the response packet and keeps a copy of the requester
+ * QP's parameters. After this exchange, each side has the parameters
+ * for both sides and therefore can select the right parameters
+ * for future transactions
+ */
+
+/* STL Verbs Extended */
+#define IB_BTHE_E_SHIFT 24
+#define HFI1_VERBS_E_ATOMIC_VADDR U64_MAX
+
+struct ib_atomic_eth;
+
+enum hfi1_opfn_codes {
+ STL_VERBS_EXTD_NONE = 0,
+ STL_VERBS_EXTD_TID_RDMA,
+ STL_VERBS_EXTD_MAX
+};
+
+struct hfi1_opfn_data {
+ u8 extended;
+ u16 requested;
+ u16 completed;
+ enum hfi1_opfn_codes curr;
+ /* serialize opfn function calls */
+ spinlock_t lock;
+ struct work_struct opfn_work;
+};
+
+/* WR opcode for OPFN */
+#define IB_WR_OPFN IB_WR_RESERVED3
+
+void opfn_send_conn_request(struct work_struct *work);
+void opfn_conn_response(struct rvt_qp *qp, struct rvt_ack_entry *e,
+ struct ib_atomic_eth *ateth);
+void opfn_conn_reply(struct rvt_qp *qp, u64 data);
+void opfn_conn_error(struct rvt_qp *qp);
+void opfn_qp_init(struct rvt_qp *qp, struct ib_qp_attr *attr, int attr_mask);
+void opfn_trigger_conn_request(struct rvt_qp *qp, u32 bth1);
+int opfn_init(void);
+void opfn_exit(void);
+
+#endif /* _HFI1_OPFN_H */
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 04126d7e318d..16ba9d52e1b9 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -1578,7 +1578,6 @@ void hfi1_sc_wantpiobuf_intr(struct send_context *sc, u32 needint)
sc_del_credit_return_intr(sc);
trace_hfi1_wantpiointr(sc, needint, sc->credit_ctrl);
if (needint) {
- mmiowb();
sc_return_credits(sc);
}
}
@@ -1599,8 +1598,7 @@ static void sc_piobufavail(struct send_context *sc)
struct rvt_qp *qp;
struct hfi1_qp_priv *priv;
unsigned long flags;
- uint i, n = 0, max_idx = 0;
- u8 max_starved_cnt = 0;
+ uint i, n = 0, top_idx = 0;
if (dd->send_contexts[sc->sw_index].type != SC_KERNEL &&
dd->send_contexts[sc->sw_index].type != SC_VL15)
@@ -1619,11 +1617,18 @@ static void sc_piobufavail(struct send_context *sc)
if (n == ARRAY_SIZE(qps))
break;
wait = list_first_entry(list, struct iowait, list);
+ iowait_get_priority(wait);
qp = iowait_to_qp(wait);
priv = qp->priv;
list_del_init(&priv->s_iowait.list);
priv->s_iowait.lock = NULL;
- iowait_starve_find_max(wait, &max_starved_cnt, n, &max_idx);
+ if (n) {
+ priv = qps[top_idx]->priv;
+ top_idx = iowait_priority_update_top(wait,
+ &priv->s_iowait,
+ n, top_idx);
+ }
+
/* refcount held until actual wake up */
qps[n++] = qp;
}
@@ -1638,12 +1643,12 @@ static void sc_piobufavail(struct send_context *sc)
}
write_sequnlock_irqrestore(&sc->waitlock, flags);
- /* Wake up the most starved one first */
+ /* Wake up the top-priority one first */
if (n)
- hfi1_qp_wakeup(qps[max_idx],
+ hfi1_qp_wakeup(qps[top_idx],
RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
for (i = 0; i < n; i++)
- if (i != max_idx)
+ if (i != top_idx)
hfi1_qp_wakeup(qps[i],
RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
}
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 5344e8993b28..eba300330a02 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -132,6 +132,18 @@ const struct rvt_operation_params hfi1_post_parms[RVT_OPERATION_MAX] = {
.qpt_support = BIT(IB_QPT_RC),
},
+[IB_WR_OPFN] = {
+ .length = sizeof(struct ib_atomic_wr),
+ .qpt_support = BIT(IB_QPT_RC),
+ .flags = RVT_OPERATION_USE_RESERVE,
+},
+
+[IB_WR_TID_RDMA_WRITE] = {
+ .length = sizeof(struct ib_rdma_wr),
+ .qpt_support = BIT(IB_QPT_RC),
+ .flags = RVT_OPERATION_IGN_RNR_CNT,
+},
+
};
static void flush_list_head(struct list_head *l)
@@ -285,6 +297,8 @@ void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
qp_set_16b(qp);
}
+
+ opfn_qp_init(qp, attr, attr_mask);
}
/**
@@ -311,6 +325,8 @@ int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send)
switch (qp->ibqp.qp_type) {
case IB_QPT_RC:
+ hfi1_setup_tid_rdma_wqe(qp, wqe);
+ /* fall through */
case IB_QPT_UC:
if (wqe->length > 0x80000000U)
return -EINVAL;
@@ -422,6 +438,11 @@ static void hfi1_qp_schedule(struct rvt_qp *qp)
if (ret)
iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
}
+ if (iowait_flag_set(&priv->s_iowait, IOWAIT_PENDING_TID)) {
+ ret = hfi1_schedule_tid_send(qp);
+ if (ret)
+ iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
+ }
}
void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag)
@@ -441,8 +462,27 @@ void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag)
void hfi1_qp_unbusy(struct rvt_qp *qp, struct iowait_work *wait)
{
- if (iowait_set_work_flag(wait) == IOWAIT_IB_SE)
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ if (iowait_set_work_flag(wait) == IOWAIT_IB_SE) {
qp->s_flags &= ~RVT_S_BUSY;
+ /*
+ * If we are sending a first-leg packet from the second leg,
+ * we need to clear the busy flag from priv->s_flags to
+ * avoid a race condition when the qp wakes up before
+ * the call to hfi1_verbs_send() returns to the second
+ * leg. In that case, the second leg will terminate without
+ * being re-scheduled, resulting in failure to send TID RDMA
+ * WRITE DATA and TID RDMA ACK packets.
+ */
+ if (priv->s_flags & HFI1_S_TID_BUSY_SET) {
+ priv->s_flags &= ~(HFI1_S_TID_BUSY_SET |
+ RVT_S_BUSY);
+ iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
+ }
+ } else {
+ priv->s_flags &= ~RVT_S_BUSY;
+ }
}
static int iowait_sleep(
@@ -479,6 +519,7 @@ static int iowait_sleep(
ibp->rvp.n_dmawait++;
qp->s_flags |= RVT_S_WAIT_DMA_DESC;
+ iowait_get_priority(&priv->s_iowait);
iowait_queue(pkts_sent, &priv->s_iowait,
&sde->dmawait);
priv->s_iowait.lock = &sde->waitlock;
@@ -528,6 +569,17 @@ static void iowait_sdma_drained(struct iowait *wait)
spin_unlock_irqrestore(&qp->s_lock, flags);
}
+static void hfi1_init_priority(struct iowait *w)
+{
+ struct rvt_qp *qp = iowait_to_qp(w);
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ if (qp->s_flags & RVT_S_ACK_PENDING)
+ w->priority++;
+ if (priv->s_flags & RVT_S_ACK_PENDING)
+ w->priority++;
+}
+
/**
* qp_to_sdma_engine - map a qp to a send engine
* @qp: the QP
@@ -685,10 +737,11 @@ void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
&priv->s_iowait,
1,
_hfi1_do_send,
- NULL,
+ _hfi1_do_tid_send,
iowait_sleep,
iowait_wakeup,
- iowait_sdma_drained);
+ iowait_sdma_drained,
+ hfi1_init_priority);
return priv;
}
@@ -696,6 +749,7 @@ void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
+ hfi1_qp_priv_tid_free(rdi, qp);
kfree(priv->s_ahg);
kfree(priv);
}
@@ -729,6 +783,7 @@ void flush_qp_waiters(struct rvt_qp *qp)
{
lockdep_assert_held(&qp->s_lock);
flush_iowait(qp);
+ hfi1_tid_rdma_flush_wait(qp);
}
void stop_send_queue(struct rvt_qp *qp)
@@ -736,12 +791,16 @@ void stop_send_queue(struct rvt_qp *qp)
struct hfi1_qp_priv *priv = qp->priv;
iowait_cancel_work(&priv->s_iowait);
+ if (cancel_work_sync(&priv->tid_rdma.trigger_work))
+ rvt_put_qp(qp);
}
void quiesce_qp(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
+ hfi1_del_tid_reap_timer(qp);
+ hfi1_del_tid_retry_timer(qp);
iowait_sdma_drain(&priv->s_iowait);
qp_pio_drain(qp);
flush_tx_list(qp);
@@ -749,8 +808,13 @@ void quiesce_qp(struct rvt_qp *qp)
void notify_qp_reset(struct rvt_qp *qp)
{
+ hfi1_qp_kern_exp_rcv_clear_all(qp);
qp->r_adefered = 0;
clear_ahg(qp);
+
+ /* Clear any OPFN state */
+ if (qp->ibqp.qp_type == IB_QPT_RC)
+ opfn_conn_error(qp);
}
/*
@@ -832,8 +896,11 @@ void notify_error_qp(struct rvt_qp *qp)
if (lock) {
write_seqlock(lock);
if (!list_empty(&priv->s_iowait.list) &&
- !(qp->s_flags & RVT_S_BUSY)) {
- qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
+ !(qp->s_flags & RVT_S_BUSY) &&
+ !(priv->s_flags & RVT_S_BUSY)) {
+ qp->s_flags &= ~HFI1_S_ANY_WAIT_IO;
+ iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
+ iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
list_del_init(&priv->s_iowait.list);
priv->s_iowait.lock = NULL;
rvt_put_qp(qp);
@@ -841,7 +908,8 @@ void notify_error_qp(struct rvt_qp *qp)
write_sequnlock(lock);
}
- if (!(qp->s_flags & RVT_S_BUSY)) {
+ if (!(qp->s_flags & RVT_S_BUSY) && !(priv->s_flags & RVT_S_BUSY)) {
+ qp->s_hdrwords = 0;
if (qp->s_rdma_mr) {
rvt_put_mr(qp->s_rdma_mr);
qp->s_rdma_mr = NULL;
diff --git a/drivers/infiniband/hw/hfi1/qp.h b/drivers/infiniband/hw/hfi1/qp.h
index 7adb6dff6813..b670321365d3 100644
--- a/drivers/infiniband/hw/hfi1/qp.h
+++ b/drivers/infiniband/hw/hfi1/qp.h
@@ -63,11 +63,17 @@ extern const struct rvt_operation_params hfi1_post_parms[];
* HFI1_S_AHG_VALID - ahg header valid on chip
* HFI1_S_AHG_CLEAR - have send engine clear ahg state
* HFI1_S_WAIT_PIO_DRAIN - qp waiting for PIOs to drain
+ * HFI1_S_WAIT_TID_SPACE - a QP is waiting for TID resource
+ * HFI1_S_WAIT_TID_RESP - waiting for a TID RDMA WRITE response
+ * HFI1_S_WAIT_HALT - halt the first leg send engine
* HFI1_S_MIN_BIT_MASK - the lowest bit that can be used by hfi1
*/
#define HFI1_S_AHG_VALID 0x80000000
#define HFI1_S_AHG_CLEAR 0x40000000
#define HFI1_S_WAIT_PIO_DRAIN 0x20000000
+#define HFI1_S_WAIT_TID_SPACE 0x10000000
+#define HFI1_S_WAIT_TID_RESP 0x08000000
+#define HFI1_S_WAIT_HALT 0x04000000
#define HFI1_S_MIN_BIT_MASK 0x01000000
/*
@@ -76,6 +82,7 @@ extern const struct rvt_operation_params hfi1_post_parms[];
#define HFI1_S_ANY_WAIT_IO (RVT_S_ANY_WAIT_IO | HFI1_S_WAIT_PIO_DRAIN)
#define HFI1_S_ANY_WAIT (HFI1_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND)
+#define HFI1_S_ANY_TID_WAIT_SEND (RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_DMA)
/*
* Send if not busy or waiting for I/O and either
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index be603f35d7e4..5991211d72bd 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -51,24 +51,48 @@
#include "hfi.h"
#include "qp.h"
+#include "rc.h"
#include "verbs_txreq.h"
#include "trace.h"
-/* cut down ridiculously long IB macro names */
-#define OP(x) RC_OP(x)
-
-static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
- u32 psn, u32 pmtu)
+struct rvt_ack_entry *find_prev_entry(struct rvt_qp *qp, u32 psn, u8 *prev,
+ u8 *prev_ack, bool *scheduled)
+ __must_hold(&qp->s_lock)
{
- u32 len;
-
- len = delta_psn(psn, wqe->psn) * pmtu;
- ss->sge = wqe->sg_list[0];
- ss->sg_list = wqe->sg_list + 1;
- ss->num_sge = wqe->wr.num_sge;
- ss->total_len = wqe->length;
- rvt_skip_sge(ss, len, false);
- return wqe->length - len;
+ struct rvt_ack_entry *e = NULL;
+ u8 i, p;
+ bool s = true;
+
+ for (i = qp->r_head_ack_queue; ; i = p) {
+ if (i == qp->s_tail_ack_queue)
+ s = false;
+ if (i)
+ p = i - 1;
+ else
+ p = rvt_size_atomic(ib_to_rvt(qp->ibqp.device));
+ if (p == qp->r_head_ack_queue) {
+ e = NULL;
+ break;
+ }
+ e = &qp->s_ack_queue[p];
+ if (!e->opcode) {
+ e = NULL;
+ break;
+ }
+ if (cmp_psn(psn, e->psn) >= 0) {
+ if (p == qp->s_tail_ack_queue &&
+ cmp_psn(psn, e->lpsn) <= 0)
+ s = false;
+ break;
+ }
+ }
+ if (prev)
+ *prev = p;
+ if (prev_ack)
+ *prev_ack = i;
+ if (scheduled)
+ *scheduled = s;
+ return e;
}
/**
@@ -87,20 +111,25 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
struct hfi1_pkt_state *ps)
{
struct rvt_ack_entry *e;
- u32 hwords;
- u32 len;
- u32 bth0;
- u32 bth2;
+ u32 hwords, hdrlen;
+ u32 len = 0;
+ u32 bth0 = 0, bth2 = 0;
+ u32 bth1 = qp->remote_qpn | (HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT);
int middle = 0;
u32 pmtu = qp->pmtu;
- struct hfi1_qp_priv *priv = qp->priv;
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ bool last_pkt;
+ u32 delta;
+ u8 next = qp->s_tail_ack_queue;
+ struct tid_rdma_request *req;
+ trace_hfi1_rsp_make_rc_ack(qp, 0);
lockdep_assert_held(&qp->s_lock);
/* Don't send an ACK if we aren't supposed to. */
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
goto bail;
- if (priv->hdr_type == HFI1_PKT_TYPE_9B)
+ if (qpriv->hdr_type == HFI1_PKT_TYPE_9B)
/* header size in 32-bit words LRH+BTH = (8+12)/4. */
hwords = 5;
else
@@ -122,8 +151,18 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
* response has been sent instead of only being
* constructed.
*/
- if (++qp->s_tail_ack_queue > HFI1_MAX_RDMA_ATOMIC)
- qp->s_tail_ack_queue = 0;
+ if (++next > rvt_size_atomic(&dev->rdi))
+ next = 0;
+ /*
+ * Only advance the s_acked_ack_queue pointer if there
+ * have been no TID RDMA requests.
+ */
+ e = &qp->s_ack_queue[qp->s_tail_ack_queue];
+ if (e->opcode != TID_OP(WRITE_REQ) &&
+ qp->s_acked_ack_queue == qp->s_tail_ack_queue)
+ qp->s_acked_ack_queue = next;
+ qp->s_tail_ack_queue = next;
+ trace_hfi1_rsp_make_rc_ack(qp, e->psn);
/* FALLTHROUGH */
case OP(SEND_ONLY):
case OP(ACKNOWLEDGE):
@@ -135,6 +174,12 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
}
e = &qp->s_ack_queue[qp->s_tail_ack_queue];
+ /* Check for tid write fence */
+ if ((qpriv->s_flags & HFI1_R_TID_WAIT_INTERLCK) ||
+ hfi1_tid_rdma_ack_interlock(qp, e)) {
+ iowait_set_flag(&qpriv->s_iowait, IOWAIT_PENDING_IB);
+ goto bail;
+ }
if (e->opcode == OP(RDMA_READ_REQUEST)) {
/*
* If a RDMA read response is being resent and
@@ -144,6 +189,10 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
*/
len = e->rdma_sge.sge_length;
if (len && !e->rdma_sge.mr) {
+ if (qp->s_acked_ack_queue ==
+ qp->s_tail_ack_queue)
+ qp->s_acked_ack_queue =
+ qp->r_head_ack_queue;
qp->s_tail_ack_queue = qp->r_head_ack_queue;
goto bail;
}
@@ -165,6 +214,45 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
hwords++;
qp->s_ack_rdma_psn = e->psn;
bth2 = mask_psn(qp->s_ack_rdma_psn++);
+ } else if (e->opcode == TID_OP(WRITE_REQ)) {
+ /*
+ * If a TID RDMA WRITE RESP is being resent, we have to
+ * wait for the actual request. All requests that are to
+ * be resent will have their state set to
+ * TID_REQUEST_RESEND. When the new request arrives, the
+ * state will be changed to TID_REQUEST_RESEND_ACTIVE.
+ */
+ req = ack_to_tid_req(e);
+ if (req->state == TID_REQUEST_RESEND ||
+ req->state == TID_REQUEST_INIT_RESEND)
+ goto bail;
+ qp->s_ack_state = TID_OP(WRITE_RESP);
+ qp->s_ack_rdma_psn = mask_psn(e->psn + req->cur_seg);
+ goto write_resp;
+ } else if (e->opcode == TID_OP(READ_REQ)) {
+ /*
+ * If a TID RDMA read response is being resent and
+ * we haven't seen the duplicate request yet,
+ * then stop sending the remaining responses the
+ * responder has seen until the requester re-sends it.
+ */
+ len = e->rdma_sge.sge_length;
+ if (len && !e->rdma_sge.mr) {
+ if (qp->s_acked_ack_queue ==
+ qp->s_tail_ack_queue)
+ qp->s_acked_ack_queue =
+ qp->r_head_ack_queue;
+ qp->s_tail_ack_queue = qp->r_head_ack_queue;
+ goto bail;
+ }
+ /* Copy SGE state in case we need to resend */
+ ps->s_txreq->mr = e->rdma_sge.mr;
+ if (ps->s_txreq->mr)
+ rvt_get_mr(ps->s_txreq->mr);
+ qp->s_ack_rdma_sge.sge = e->rdma_sge;
+ qp->s_ack_rdma_sge.num_sge = 1;
+ qp->s_ack_state = TID_OP(READ_RESP);
+ goto read_resp;
} else {
/* COMPARE_SWAP or FETCH_ADD */
ps->s_txreq->ss = NULL;
@@ -176,6 +264,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
bth2 = mask_psn(e->psn);
e->sent = 1;
}
+ trace_hfi1_tid_write_rsp_make_rc_ack(qp);
bth0 = qp->s_ack_state << 24;
break;
@@ -202,6 +291,83 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
bth2 = mask_psn(qp->s_ack_rdma_psn++);
break;
+ case TID_OP(WRITE_RESP):
+write_resp:
+ /*
+ * 1. Check if RVT_S_ACK_PENDING is set. If yes,
+ * goto normal.
+ * 2. Attempt to allocate TID resources.
+ * 3. Remove RVT_S_RESP_PENDING flags from s_flags
+ * 4. If resources not available:
+ * 4.1 Set RVT_S_WAIT_TID_SPACE
+ * 4.2 Queue QP on RCD TID queue
+ * 4.3 Put QP on iowait list.
+ * 4.4 Build IB RNR NAK with appropriate timeout value
+ * 4.5 Return indication progress made.
+ * 5. If resources are available:
+ * 5.1 Program HW flow CSRs
+ * 5.2 Build TID RDMA WRITE RESP packet
+ * 5.3 If more resources needed, do 2.1 - 2.3.
+ * 5.4 Wake up next QP on RCD TID queue.
+ * 5.5 Return indication progress made.
+ */
+
+ e = &qp->s_ack_queue[qp->s_tail_ack_queue];
+ req = ack_to_tid_req(e);
+
+ /*
+ * Send scheduled RNR NAK's. RNR NAK's need to be sent at
+ * segment boundaries, not at request boundaries. Don't change
+ * s_ack_state because we are still in the middle of a request
+ */
+ if (qpriv->rnr_nak_state == TID_RNR_NAK_SEND &&
+ qp->s_tail_ack_queue == qpriv->r_tid_alloc &&
+ req->cur_seg == req->alloc_seg) {
+ qpriv->rnr_nak_state = TID_RNR_NAK_SENT;
+ goto normal_no_state;
+ }
+
+ bth2 = mask_psn(qp->s_ack_rdma_psn);
+ hdrlen = hfi1_build_tid_rdma_write_resp(qp, e, ohdr, &bth1,
+ bth2, &len,
+ &ps->s_txreq->ss);
+ if (!hdrlen)
+ return 0;
+
+ hwords += hdrlen;
+ bth0 = qp->s_ack_state << 24;
+ qp->s_ack_rdma_psn++;
+ trace_hfi1_tid_req_make_rc_ack_write(qp, 0, e->opcode, e->psn,
+ e->lpsn, req);
+ if (req->cur_seg != req->total_segs)
+ break;
+
+ e->sent = 1;
+ qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
+ break;
+
+ case TID_OP(READ_RESP):
+read_resp:
+ e = &qp->s_ack_queue[qp->s_tail_ack_queue];
+ ps->s_txreq->ss = &qp->s_ack_rdma_sge;
+ delta = hfi1_build_tid_rdma_read_resp(qp, e, ohdr, &bth0,
+ &bth1, &bth2, &len,
+ &last_pkt);
+ if (delta == 0)
+ goto error_qp;
+ hwords += delta;
+ if (last_pkt) {
+ e->sent = 1;
+ /*
+ * Increment qp->s_tail_ack_queue through s_ack_state
+ * transition.
+ */
+ qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
+ }
+ break;
+ case TID_OP(READ_REQ):
+ goto bail;
+
default:
normal:
/*
@@ -211,8 +377,7 @@ normal:
* (see above).
*/
qp->s_ack_state = OP(SEND_ONLY);
- qp->s_flags &= ~RVT_S_ACK_PENDING;
- ps->s_txreq->ss = NULL;
+normal_no_state:
if (qp->s_nak_state)
ohdr->u.aeth =
cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
@@ -224,14 +389,24 @@ normal:
len = 0;
bth0 = OP(ACKNOWLEDGE) << 24;
bth2 = mask_psn(qp->s_ack_psn);
+ qp->s_flags &= ~RVT_S_ACK_PENDING;
+ ps->s_txreq->txreq.flags |= SDMA_TXREQ_F_VIP;
+ ps->s_txreq->ss = NULL;
}
qp->s_rdma_ack_cnt++;
- ps->s_txreq->sde = priv->s_sde;
+ ps->s_txreq->sde = qpriv->s_sde;
ps->s_txreq->s_cur_size = len;
ps->s_txreq->hdr_dwords = hwords;
- hfi1_make_ruc_header(qp, ohdr, bth0, bth2, middle, ps);
+ hfi1_make_ruc_header(qp, ohdr, bth0, bth1, bth2, middle, ps);
return 1;
-
+error_qp:
+ spin_unlock_irqrestore(&qp->s_lock, ps->flags);
+ spin_lock_irqsave(&qp->r_lock, ps->flags);
+ spin_lock(&qp->s_lock);
+ rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ spin_unlock(&qp->s_lock);
+ spin_unlock_irqrestore(&qp->r_lock, ps->flags);
+ spin_lock_irqsave(&qp->s_lock, ps->flags);
bail:
qp->s_ack_state = OP(ACKNOWLEDGE);
/*
@@ -258,17 +433,23 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
struct hfi1_qp_priv *priv = qp->priv;
struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
struct ib_other_headers *ohdr;
- struct rvt_sge_state *ss;
+ struct rvt_sge_state *ss = NULL;
struct rvt_swqe *wqe;
- u32 hwords;
- u32 len;
- u32 bth0 = 0;
- u32 bth2;
+ struct hfi1_swqe_priv *wpriv;
+ struct tid_rdma_request *req = NULL;
+ /* header size in 32-bit words LRH+BTH = (8+12)/4. */
+ u32 hwords = 5;
+ u32 len = 0;
+ u32 bth0 = 0, bth2 = 0;
+ u32 bth1 = qp->remote_qpn | (HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT);
u32 pmtu = qp->pmtu;
char newreq;
int middle = 0;
int delta;
+ struct tid_rdma_flow *flow = NULL;
+ struct tid_rdma_params *remote;
+ trace_hfi1_sender_make_rc_req(qp);
lockdep_assert_held(&qp->s_lock);
ps->s_txreq = get_txreq(ps->dev, qp);
if (!ps->s_txreq)
@@ -309,13 +490,13 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
}
clear_ahg(qp);
wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- rvt_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
- IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
+ hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
+ IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
/* will get called again */
goto done_free_tx;
}
- if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK))
+ if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK | HFI1_S_WAIT_HALT))
goto bail;
if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) {
@@ -329,6 +510,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
/* Send a request. */
wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
+check_s_state:
switch (qp->s_state) {
default:
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK))
@@ -350,9 +532,13 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
/*
* If a fence is requested, wait for previous
* RDMA read and atomic operations to finish.
+ * However, there is no need to guard against
+ * TID RDMA READ after TID RDMA READ.
*/
if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
- qp->s_num_rd_atomic) {
+ qp->s_num_rd_atomic &&
+ (wqe->wr.opcode != IB_WR_TID_RDMA_READ ||
+ priv->pending_tid_r_segs < qp->s_num_rd_atomic)) {
qp->s_flags |= RVT_S_WAIT_FENCE;
goto bail;
}
@@ -397,6 +583,15 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
len = wqe->length;
ss = &qp->s_sge;
bth2 = mask_psn(qp->s_psn);
+
+ /*
+ * Interlock between various IB requests and TID RDMA
+ * if necessary.
+ */
+ if ((priv->s_flags & HFI1_S_TID_WAIT_INTERLCK) ||
+ hfi1_tid_rdma_wqe_interlock(qp, wqe))
+ goto bail;
+
switch (wqe->wr.opcode) {
case IB_WR_SEND:
case IB_WR_SEND_WITH_IMM:
@@ -473,21 +668,126 @@ no_flow_control:
qp->s_cur = 0;
break;
+ case IB_WR_TID_RDMA_WRITE:
+ if (newreq) {
+ /*
+ * Limit the number of TID RDMA WRITE requests.
+ */
+ if (atomic_read(&priv->n_tid_requests) >=
+ HFI1_TID_RDMA_WRITE_CNT)
+ goto bail;
+
+ if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
+ qp->s_lsn++;
+ }
+
+ hwords += hfi1_build_tid_rdma_write_req(qp, wqe, ohdr,
+ &bth1, &bth2,
+ &len);
+ ss = NULL;
+ if (priv->s_tid_cur == HFI1_QP_WQE_INVALID) {
+ priv->s_tid_cur = qp->s_cur;
+ if (priv->s_tid_tail == HFI1_QP_WQE_INVALID) {
+ priv->s_tid_tail = qp->s_cur;
+ priv->s_state = TID_OP(WRITE_RESP);
+ }
+ } else if (priv->s_tid_cur == priv->s_tid_head) {
+ struct rvt_swqe *__w;
+ struct tid_rdma_request *__r;
+
+ __w = rvt_get_swqe_ptr(qp, priv->s_tid_cur);
+ __r = wqe_to_tid_req(__w);
+
+ /*
+ * The s_tid_cur pointer is advanced to s_cur if
+ * any of the following conditions about the WQE
+ * to which s_ti_cur currently points to are
+ * satisfied:
+ * 1. The request is not a TID RDMA WRITE
+ * request,
+ * 2. The request is in the INACTIVE or
+ * COMPLETE states (TID RDMA READ requests
+ * stay at INACTIVE and TID RDMA WRITE
+ * transition to COMPLETE when done),
+ * 3. The request is in the ACTIVE or SYNC
+ * state and the number of completed
+ * segments is equal to the total segment
+ * count.
+ * (If ACTIVE, the request is waiting for
+ * ACKs. If SYNC, the request has not
+ * received any responses because it's
+ * waiting on a sync point.)
+ */
+ if (__w->wr.opcode != IB_WR_TID_RDMA_WRITE ||
+ __r->state == TID_REQUEST_INACTIVE ||
+ __r->state == TID_REQUEST_COMPLETE ||
+ ((__r->state == TID_REQUEST_ACTIVE ||
+ __r->state == TID_REQUEST_SYNC) &&
+ __r->comp_seg == __r->total_segs)) {
+ if (priv->s_tid_tail ==
+ priv->s_tid_cur &&
+ priv->s_state ==
+ TID_OP(WRITE_DATA_LAST)) {
+ priv->s_tid_tail = qp->s_cur;
+ priv->s_state =
+ TID_OP(WRITE_RESP);
+ }
+ priv->s_tid_cur = qp->s_cur;
+ }
+ /*
+ * A corner case: when the last TID RDMA WRITE
+ * request was completed, s_tid_head,
+ * s_tid_cur, and s_tid_tail all point to the
+ * same location. Other requests are posted and
+ * s_cur wraps around to the same location,
+ * where a new TID RDMA WRITE is posted. In
+ * this case, none of the indices need to be
+ * updated. However, the priv->s_state should.
+ */
+ if (priv->s_tid_tail == qp->s_cur &&
+ priv->s_state == TID_OP(WRITE_DATA_LAST))
+ priv->s_state = TID_OP(WRITE_RESP);
+ }
+ req = wqe_to_tid_req(wqe);
+ if (newreq) {
+ priv->s_tid_head = qp->s_cur;
+ priv->pending_tid_w_resp += req->total_segs;
+ atomic_inc(&priv->n_tid_requests);
+ atomic_dec(&priv->n_requests);
+ } else {
+ req->state = TID_REQUEST_RESEND;
+ req->comp_seg = delta_psn(bth2, wqe->psn);
+ /*
+ * Pull back any segments since we are going
+ * to re-receive them.
+ */
+ req->setup_head = req->clear_tail;
+ priv->pending_tid_w_resp +=
+ delta_psn(wqe->lpsn, bth2) + 1;
+ }
+
+ trace_hfi1_tid_write_sender_make_req(qp, newreq);
+ trace_hfi1_tid_req_make_req_write(qp, newreq,
+ wqe->wr.opcode,
+ wqe->psn, wqe->lpsn,
+ req);
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+
case IB_WR_RDMA_READ:
/*
* Don't allow more operations to be started
* than the QP limits allow.
*/
- if (newreq) {
- if (qp->s_num_rd_atomic >=
- qp->s_max_rd_atomic) {
- qp->s_flags |= RVT_S_WAIT_RDMAR;
- goto bail;
- }
- qp->s_num_rd_atomic++;
- if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
- qp->s_lsn++;
+ if (qp->s_num_rd_atomic >=
+ qp->s_max_rd_atomic) {
+ qp->s_flags |= RVT_S_WAIT_RDMAR;
+ goto bail;
}
+ qp->s_num_rd_atomic++;
+ if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
+ qp->s_lsn++;
put_ib_reth_vaddr(
wqe->rdma_wr.remote_addr,
&ohdr->u.rc.reth);
@@ -503,23 +803,99 @@ no_flow_control:
qp->s_cur = 0;
break;
+ case IB_WR_TID_RDMA_READ:
+ trace_hfi1_tid_read_sender_make_req(qp, newreq);
+ wpriv = wqe->priv;
+ req = wqe_to_tid_req(wqe);
+ trace_hfi1_tid_req_make_req_read(qp, newreq,
+ wqe->wr.opcode,
+ wqe->psn, wqe->lpsn,
+ req);
+ delta = cmp_psn(qp->s_psn, wqe->psn);
+
+ /*
+ * Don't allow more operations to be started
+ * than the QP limits allow. We could get here under
+ * three conditions; (1) It's a new request; (2) We are
+ * sending the second or later segment of a request,
+ * but the qp->s_state is set to OP(RDMA_READ_REQUEST)
+ * when the last segment of a previous request is
+ * received just before this; (3) We are re-sending a
+ * request.
+ */
+ if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) {
+ qp->s_flags |= RVT_S_WAIT_RDMAR;
+ goto bail;
+ }
+ if (newreq) {
+ struct tid_rdma_flow *flow =
+ &req->flows[req->setup_head];
+
+ /*
+ * Set up s_sge as it is needed for TID
+ * allocation. However, if the pages have been
+ * walked and mapped, skip it. An earlier try
+ * has failed to allocate the TID entries.
+ */
+ if (!flow->npagesets) {
+ qp->s_sge.sge = wqe->sg_list[0];
+ qp->s_sge.sg_list = wqe->sg_list + 1;
+ qp->s_sge.num_sge = wqe->wr.num_sge;
+ qp->s_sge.total_len = wqe->length;
+ qp->s_len = wqe->length;
+ req->isge = 0;
+ req->clear_tail = req->setup_head;
+ req->flow_idx = req->setup_head;
+ req->state = TID_REQUEST_ACTIVE;
+ }
+ } else if (delta == 0) {
+ /* Re-send a request */
+ req->cur_seg = 0;
+ req->comp_seg = 0;
+ req->ack_pending = 0;
+ req->flow_idx = req->clear_tail;
+ req->state = TID_REQUEST_RESEND;
+ }
+ req->s_next_psn = qp->s_psn;
+ /* Read one segment at a time */
+ len = min_t(u32, req->seg_len,
+ wqe->length - req->seg_len * req->cur_seg);
+ delta = hfi1_build_tid_rdma_read_req(qp, wqe, ohdr,
+ &bth1, &bth2,
+ &len);
+ if (delta <= 0) {
+ /* Wait for TID space */
+ goto bail;
+ }
+ if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
+ qp->s_lsn++;
+ hwords += delta;
+ ss = &wpriv->ss;
+ /* Check if this is the last segment */
+ if (req->cur_seg >= req->total_segs &&
+ ++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
/*
* Don't allow more operations to be started
* than the QP limits allow.
*/
- if (newreq) {
- if (qp->s_num_rd_atomic >=
- qp->s_max_rd_atomic) {
- qp->s_flags |= RVT_S_WAIT_RDMAR;
- goto bail;
- }
- qp->s_num_rd_atomic++;
- if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
- qp->s_lsn++;
+ if (qp->s_num_rd_atomic >=
+ qp->s_max_rd_atomic) {
+ qp->s_flags |= RVT_S_WAIT_RDMAR;
+ goto bail;
}
- if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
+ qp->s_num_rd_atomic++;
+
+ /* FALLTHROUGH */
+ case IB_WR_OPFN:
+ if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
+ qp->s_lsn++;
+ if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wqe->wr.opcode == IB_WR_OPFN) {
qp->s_state = OP(COMPARE_SWAP);
put_ib_ateth_swap(wqe->atomic_wr.swap,
&ohdr->u.atomic_eth);
@@ -546,18 +922,23 @@ no_flow_control:
default:
goto bail;
}
- qp->s_sge.sge = wqe->sg_list[0];
- qp->s_sge.sg_list = wqe->sg_list + 1;
- qp->s_sge.num_sge = wqe->wr.num_sge;
- qp->s_sge.total_len = wqe->length;
- qp->s_len = wqe->length;
+ if (wqe->wr.opcode != IB_WR_TID_RDMA_READ) {
+ qp->s_sge.sge = wqe->sg_list[0];
+ qp->s_sge.sg_list = wqe->sg_list + 1;
+ qp->s_sge.num_sge = wqe->wr.num_sge;
+ qp->s_sge.total_len = wqe->length;
+ qp->s_len = wqe->length;
+ }
if (newreq) {
qp->s_tail++;
if (qp->s_tail >= qp->s_size)
qp->s_tail = 0;
}
- if (wqe->wr.opcode == IB_WR_RDMA_READ)
+ if (wqe->wr.opcode == IB_WR_RDMA_READ ||
+ wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
qp->s_psn = wqe->lpsn + 1;
+ else if (wqe->wr.opcode == IB_WR_TID_RDMA_READ)
+ qp->s_psn = req->s_next_psn;
else
qp->s_psn++;
break;
@@ -674,10 +1055,137 @@ no_flow_control:
if (qp->s_cur == qp->s_size)
qp->s_cur = 0;
break;
+
+ case TID_OP(WRITE_RESP):
+ /*
+ * This value for s_state is used for restarting a TID RDMA
+ * WRITE request. See comment in OP(RDMA_READ_RESPONSE_MIDDLE
+ * for more).
+ */
+ req = wqe_to_tid_req(wqe);
+ req->state = TID_REQUEST_RESEND;
+ rcu_read_lock();
+ remote = rcu_dereference(priv->tid_rdma.remote);
+ req->comp_seg = delta_psn(qp->s_psn, wqe->psn);
+ len = wqe->length - (req->comp_seg * remote->max_len);
+ rcu_read_unlock();
+
+ bth2 = mask_psn(qp->s_psn);
+ hwords += hfi1_build_tid_rdma_write_req(qp, wqe, ohdr, &bth1,
+ &bth2, &len);
+ qp->s_psn = wqe->lpsn + 1;
+ ss = NULL;
+ qp->s_state = TID_OP(WRITE_REQ);
+ priv->pending_tid_w_resp += delta_psn(wqe->lpsn, bth2) + 1;
+ priv->s_tid_cur = qp->s_cur;
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ trace_hfi1_tid_req_make_req_write(qp, 0, wqe->wr.opcode,
+ wqe->psn, wqe->lpsn, req);
+ break;
+
+ case TID_OP(READ_RESP):
+ if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
+ goto bail;
+ /* This is used to restart a TID read request */
+ req = wqe_to_tid_req(wqe);
+ wpriv = wqe->priv;
+ /*
+ * Back down. The field qp->s_psn has been set to the psn with
+ * which the request should be restart. It's OK to use division
+ * as this is on the retry path.
+ */
+ req->cur_seg = delta_psn(qp->s_psn, wqe->psn) / priv->pkts_ps;
+
+ /*
+ * The following function need to be redefined to return the
+ * status to make sure that we find the flow. At the same
+ * time, we can use the req->state change to check if the
+ * call succeeds or not.
+ */
+ req->state = TID_REQUEST_RESEND;
+ hfi1_tid_rdma_restart_req(qp, wqe, &bth2);
+ if (req->state != TID_REQUEST_ACTIVE) {
+ /*
+ * Failed to find the flow. Release all allocated tid
+ * resources.
+ */
+ hfi1_kern_exp_rcv_clear_all(req);
+ hfi1_kern_clear_hw_flow(priv->rcd, qp);
+
+ hfi1_trdma_send_complete(qp, wqe, IB_WC_LOC_QP_OP_ERR);
+ goto bail;
+ }
+ req->state = TID_REQUEST_RESEND;
+ len = min_t(u32, req->seg_len,
+ wqe->length - req->seg_len * req->cur_seg);
+ flow = &req->flows[req->flow_idx];
+ len -= flow->sent;
+ req->s_next_psn = flow->flow_state.ib_lpsn + 1;
+ delta = hfi1_build_tid_rdma_read_packet(wqe, ohdr, &bth1,
+ &bth2, &len);
+ if (delta <= 0) {
+ /* Wait for TID space */
+ goto bail;
+ }
+ hwords += delta;
+ ss = &wpriv->ss;
+ /* Check if this is the last segment */
+ if (req->cur_seg >= req->total_segs &&
+ ++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ qp->s_psn = req->s_next_psn;
+ trace_hfi1_tid_req_make_req_read(qp, 0, wqe->wr.opcode,
+ wqe->psn, wqe->lpsn, req);
+ break;
+ case TID_OP(READ_REQ):
+ req = wqe_to_tid_req(wqe);
+ delta = cmp_psn(qp->s_psn, wqe->psn);
+ /*
+ * If the current WR is not TID RDMA READ, or this is the start
+ * of a new request, we need to change the qp->s_state so that
+ * the request can be set up properly.
+ */
+ if (wqe->wr.opcode != IB_WR_TID_RDMA_READ || delta == 0 ||
+ qp->s_cur == qp->s_tail) {
+ qp->s_state = OP(RDMA_READ_REQUEST);
+ if (delta == 0 || qp->s_cur == qp->s_tail)
+ goto check_s_state;
+ else
+ goto bail;
+ }
+
+ /* Rate limiting */
+ if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) {
+ qp->s_flags |= RVT_S_WAIT_RDMAR;
+ goto bail;
+ }
+
+ wpriv = wqe->priv;
+ /* Read one segment at a time */
+ len = min_t(u32, req->seg_len,
+ wqe->length - req->seg_len * req->cur_seg);
+ delta = hfi1_build_tid_rdma_read_req(qp, wqe, ohdr, &bth1,
+ &bth2, &len);
+ if (delta <= 0) {
+ /* Wait for TID space */
+ goto bail;
+ }
+ hwords += delta;
+ ss = &wpriv->ss;
+ /* Check if this is the last segment */
+ if (req->cur_seg >= req->total_segs &&
+ ++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ qp->s_psn = req->s_next_psn;
+ trace_hfi1_tid_req_make_req_read(qp, 0, wqe->wr.opcode,
+ wqe->psn, wqe->lpsn, req);
+ break;
}
qp->s_sending_hpsn = bth2;
delta = delta_psn(bth2, wqe->psn);
- if (delta && delta % HFI1_PSN_CREDIT == 0)
+ if (delta && delta % HFI1_PSN_CREDIT == 0 &&
+ wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
bth2 |= IB_BTH_REQ_ACK;
if (qp->s_flags & RVT_S_SEND_ONE) {
qp->s_flags &= ~RVT_S_SEND_ONE;
@@ -693,6 +1201,7 @@ no_flow_control:
qp,
ohdr,
bth0 | (qp->s_state << 24),
+ bth1,
bth2,
middle,
ps);
@@ -709,6 +1218,12 @@ bail:
bail_no_tx:
ps->s_txreq = NULL;
qp->s_flags &= ~RVT_S_BUSY;
+ /*
+ * If we didn't get a txreq, the QP will be woken up later to try
+ * again. Set the flags to indicate which work item to wake
+ * up.
+ */
+ iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
return 0;
}
@@ -796,6 +1311,11 @@ static inline void hfi1_make_rc_ack_9B(struct hfi1_packet *packet,
if (qp->s_mig_state == IB_MIG_MIGRATED)
bth0 |= IB_BTH_MIG_REQ;
bth1 = (!!is_fecn) << IB_BECN_SHIFT;
+ /*
+ * Inline ACKs go out without the use of the Verbs send engine, so
+ * we need to set the STL Verbs Extended bit here
+ */
+ bth1 |= HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT;
hfi1_make_bth_aeth(qp, ohdr, bth0, bth1);
}
@@ -936,6 +1456,43 @@ void hfi1_send_rc_ack(struct hfi1_packet *packet, bool is_fecn)
}
/**
+ * update_num_rd_atomic - update the qp->s_num_rd_atomic
+ * @qp: the QP
+ * @psn: the packet sequence number to restart at
+ * @wqe: the wqe
+ *
+ * This is called from reset_psn() to update qp->s_num_rd_atomic
+ * for the current wqe.
+ * Called at interrupt level with the QP s_lock held.
+ */
+static void update_num_rd_atomic(struct rvt_qp *qp, u32 psn,
+ struct rvt_swqe *wqe)
+{
+ u32 opcode = wqe->wr.opcode;
+
+ if (opcode == IB_WR_RDMA_READ ||
+ opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
+ qp->s_num_rd_atomic++;
+ } else if (opcode == IB_WR_TID_RDMA_READ) {
+ struct tid_rdma_request *req = wqe_to_tid_req(wqe);
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ if (cmp_psn(psn, wqe->lpsn) <= 0) {
+ u32 cur_seg;
+
+ cur_seg = (psn - wqe->psn) / priv->pkts_ps;
+ req->ack_pending = cur_seg - req->comp_seg;
+ priv->pending_tid_r_segs += req->ack_pending;
+ qp->s_num_rd_atomic += req->ack_pending;
+ } else {
+ priv->pending_tid_r_segs += req->total_segs;
+ qp->s_num_rd_atomic += req->total_segs;
+ }
+ }
+}
+
+/**
* reset_psn - reset the QP state to send starting from PSN
* @qp: the QP
* @psn: the packet sequence number to restart at
@@ -949,9 +1506,13 @@ static void reset_psn(struct rvt_qp *qp, u32 psn)
u32 n = qp->s_acked;
struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
u32 opcode;
+ struct hfi1_qp_priv *priv = qp->priv;
lockdep_assert_held(&qp->s_lock);
qp->s_cur = n;
+ priv->pending_tid_r_segs = 0;
+ priv->pending_tid_w_resp = 0;
+ qp->s_num_rd_atomic = 0;
/*
* If we are starting the request from the beginning,
@@ -961,9 +1522,9 @@ static void reset_psn(struct rvt_qp *qp, u32 psn)
qp->s_state = OP(SEND_LAST);
goto done;
}
+ update_num_rd_atomic(qp, psn, wqe);
/* Find the work request opcode corresponding to the given PSN. */
- opcode = wqe->wr.opcode;
for (;;) {
int diff;
@@ -973,8 +1534,11 @@ static void reset_psn(struct rvt_qp *qp, u32 psn)
break;
wqe = rvt_get_swqe_ptr(qp, n);
diff = cmp_psn(psn, wqe->psn);
- if (diff < 0)
+ if (diff < 0) {
+ /* Point wqe back to the previous one*/
+ wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
break;
+ }
qp->s_cur = n;
/*
* If we are starting the request from the beginning,
@@ -984,8 +1548,10 @@ static void reset_psn(struct rvt_qp *qp, u32 psn)
qp->s_state = OP(SEND_LAST);
goto done;
}
- opcode = wqe->wr.opcode;
+
+ update_num_rd_atomic(qp, psn, wqe);
}
+ opcode = wqe->wr.opcode;
/*
* Set the state to restart in the middle of a request.
@@ -1003,10 +1569,18 @@ static void reset_psn(struct rvt_qp *qp, u32 psn)
qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
break;
+ case IB_WR_TID_RDMA_WRITE:
+ qp->s_state = TID_OP(WRITE_RESP);
+ break;
+
case IB_WR_RDMA_READ:
qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
break;
+ case IB_WR_TID_RDMA_READ:
+ qp->s_state = TID_OP(READ_RESP);
+ break;
+
default:
/*
* This case shouldn't happen since its only
@@ -1015,6 +1589,7 @@ static void reset_psn(struct rvt_qp *qp, u32 psn)
qp->s_state = OP(SEND_LAST);
}
done:
+ priv->s_flags &= ~HFI1_S_TID_WAIT_INTERLCK;
qp->s_psn = psn;
/*
* Set RVT_S_WAIT_PSN as rc_complete() may start the timer
@@ -1025,6 +1600,7 @@ done:
(cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
qp->s_flags |= RVT_S_WAIT_PSN;
qp->s_flags &= ~HFI1_S_AHG_VALID;
+ trace_hfi1_sender_reset_psn(qp);
}
/*
@@ -1033,18 +1609,47 @@ done:
*/
void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
{
+ struct hfi1_qp_priv *priv = qp->priv;
struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
struct hfi1_ibport *ibp;
lockdep_assert_held(&qp->r_lock);
lockdep_assert_held(&qp->s_lock);
+ trace_hfi1_sender_restart_rc(qp);
if (qp->s_retry == 0) {
if (qp->s_mig_state == IB_MIG_ARMED) {
hfi1_migrate_qp(qp);
qp->s_retry = qp->s_retry_cnt;
} else if (qp->s_last == qp->s_acked) {
- rvt_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
- rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ /*
+ * We need special handling for the OPFN request WQEs as
+ * they are not allowed to generate real user errors
+ */
+ if (wqe->wr.opcode == IB_WR_OPFN) {
+ struct hfi1_ibport *ibp =
+ to_iport(qp->ibqp.device, qp->port_num);
+ /*
+ * Call opfn_conn_reply() with capcode and
+ * remaining data as 0 to close out the
+ * current request
+ */
+ opfn_conn_reply(qp, priv->opfn.curr);
+ wqe = do_rc_completion(qp, wqe, ibp);
+ qp->s_flags &= ~RVT_S_WAIT_ACK;
+ } else {
+ trace_hfi1_tid_write_sender_restart_rc(qp, 0);
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
+ struct tid_rdma_request *req;
+
+ req = wqe_to_tid_req(wqe);
+ hfi1_kern_exp_rcv_clear_all(req);
+ hfi1_kern_clear_hw_flow(priv->rcd, qp);
+ }
+
+ hfi1_trdma_send_complete(qp, wqe,
+ IB_WC_RETRY_EXC_ERR);
+ rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ }
return;
} else { /* need to handle delayed completion */
return;
@@ -1054,14 +1659,15 @@ void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
}
ibp = to_iport(qp->ibqp.device, qp->port_num);
- if (wqe->wr.opcode == IB_WR_RDMA_READ)
+ if (wqe->wr.opcode == IB_WR_RDMA_READ ||
+ wqe->wr.opcode == IB_WR_TID_RDMA_READ)
ibp->rvp.n_rc_resends++;
else
ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
- RVT_S_WAIT_ACK);
+ RVT_S_WAIT_ACK | HFI1_S_WAIT_TID_RESP);
if (wait)
qp->s_flags |= RVT_S_SEND_ONE;
reset_psn(qp, psn);
@@ -1069,7 +1675,8 @@ void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
/*
* Set qp->s_sending_psn to the next PSN after the given one.
- * This would be psn+1 except when RDMA reads are present.
+ * This would be psn+1 except when RDMA reads or TID RDMA ops
+ * are present.
*/
static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
{
@@ -1081,7 +1688,9 @@ static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
for (;;) {
wqe = rvt_get_swqe_ptr(qp, n);
if (cmp_psn(psn, wqe->lpsn) <= 0) {
- if (wqe->wr.opcode == IB_WR_RDMA_READ)
+ if (wqe->wr.opcode == IB_WR_RDMA_READ ||
+ wqe->wr.opcode == IB_WR_TID_RDMA_READ ||
+ wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
qp->s_sending_psn = wqe->lpsn + 1;
else
qp->s_sending_psn = psn + 1;
@@ -1104,8 +1713,9 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
struct rvt_swqe *wqe;
struct ib_header *hdr = NULL;
struct hfi1_16b_header *hdr_16b = NULL;
- u32 opcode;
+ u32 opcode, head, tail;
u32 psn;
+ struct tid_rdma_request *req;
lockdep_assert_held(&qp->s_lock);
if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK))
@@ -1130,25 +1740,85 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
}
opcode = ib_bth_get_opcode(ohdr);
- if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
- opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
+ if ((opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
+ opcode <= OP(ATOMIC_ACKNOWLEDGE)) ||
+ opcode == TID_OP(READ_RESP) ||
+ opcode == TID_OP(WRITE_RESP)) {
WARN_ON(!qp->s_rdma_ack_cnt);
qp->s_rdma_ack_cnt--;
return;
}
psn = ib_bth_get_psn(ohdr);
- reset_sending_psn(qp, psn);
+ /*
+ * Don't attempt to reset the sending PSN for packets in the
+ * KDETH PSN space since the PSN does not match anything.
+ */
+ if (opcode != TID_OP(WRITE_DATA) &&
+ opcode != TID_OP(WRITE_DATA_LAST) &&
+ opcode != TID_OP(ACK) && opcode != TID_OP(RESYNC))
+ reset_sending_psn(qp, psn);
+
+ /* Handle TID RDMA WRITE packets differently */
+ if (opcode >= TID_OP(WRITE_REQ) &&
+ opcode <= TID_OP(WRITE_DATA_LAST)) {
+ head = priv->s_tid_head;
+ tail = priv->s_tid_cur;
+ /*
+ * s_tid_cur is set to s_tid_head in the case, where
+ * a new TID RDMA request is being started and all
+ * previous ones have been completed.
+ * Therefore, we need to do a secondary check in order
+ * to properly determine whether we should start the
+ * RC timer.
+ */
+ wqe = rvt_get_swqe_ptr(qp, tail);
+ req = wqe_to_tid_req(wqe);
+ if (head == tail && req->comp_seg < req->total_segs) {
+ if (tail == 0)
+ tail = qp->s_size - 1;
+ else
+ tail -= 1;
+ }
+ } else {
+ head = qp->s_tail;
+ tail = qp->s_acked;
+ }
/*
* Start timer after a packet requesting an ACK has been sent and
* there are still requests that haven't been acked.
*/
- if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
+ if ((psn & IB_BTH_REQ_ACK) && tail != head &&
+ opcode != TID_OP(WRITE_DATA) && opcode != TID_OP(WRITE_DATA_LAST) &&
+ opcode != TID_OP(RESYNC) &&
!(qp->s_flags &
- (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
- (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
- rvt_add_retry_timer(qp);
+ (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
+ (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
+ if (opcode == TID_OP(READ_REQ))
+ rvt_add_retry_timer_ext(qp, priv->timeout_shift);
+ else
+ rvt_add_retry_timer(qp);
+ }
+
+ /* Start TID RDMA ACK timer */
+ if ((opcode == TID_OP(WRITE_DATA) ||
+ opcode == TID_OP(WRITE_DATA_LAST) ||
+ opcode == TID_OP(RESYNC)) &&
+ (psn & IB_BTH_REQ_ACK) &&
+ !(priv->s_flags & HFI1_S_TID_RETRY_TIMER) &&
+ (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
+ /*
+ * The TID RDMA ACK packet could be received before this
+ * function is called. Therefore, add the timer only if TID
+ * RDMA ACK packets are actually pending.
+ */
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
+ req = wqe_to_tid_req(wqe);
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE &&
+ req->ack_seg < req->cur_seg)
+ hfi1_add_tid_retry_timer(qp);
+ }
while (qp->s_last != qp->s_acked) {
u32 s_last;
@@ -1157,6 +1827,7 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 &&
cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
break;
+ trdma_clean_swqe(qp, wqe);
rvt_qp_wqe_unreserve(qp, wqe);
s_last = qp->s_last;
trace_hfi1_qp_send_completion(qp, wqe, s_last);
@@ -1195,20 +1866,24 @@ static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
* This is similar to hfi1_send_complete but has to check to be sure
* that the SGEs are not being referenced if the SWQE is being resent.
*/
-static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
- struct rvt_swqe *wqe,
- struct hfi1_ibport *ibp)
+struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
+ struct rvt_swqe *wqe,
+ struct hfi1_ibport *ibp)
{
+ struct hfi1_qp_priv *priv = qp->priv;
+
lockdep_assert_held(&qp->s_lock);
/*
* Don't decrement refcount and don't generate a
* completion if the SWQE is being resent until the send
* is finished.
*/
+ trace_hfi1_rc_completion(qp, wqe->lpsn);
if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
u32 s_last;
+ trdma_clean_swqe(qp, wqe);
rvt_put_swqe(wqe);
rvt_qp_wqe_unreserve(qp, wqe);
s_last = qp->s_last;
@@ -1243,7 +1918,16 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
}
qp->s_retry = qp->s_retry_cnt;
- update_last_psn(qp, wqe->lpsn);
+ /*
+ * Don't update the last PSN if the request being completed is
+ * a TID RDMA WRITE request.
+ * Completion of the TID RDMA WRITE requests are done by the
+ * TID RDMA ACKs and as such could be for a request that has
+ * already been ACKed as far as the IB state machine is
+ * concerned.
+ */
+ if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
+ update_last_psn(qp, wqe->lpsn);
/*
* If we are completing a request which is in the process of
@@ -1266,9 +1950,61 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
qp->s_draining = 0;
wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
}
+ if (priv->s_flags & HFI1_S_TID_WAIT_INTERLCK) {
+ priv->s_flags &= ~HFI1_S_TID_WAIT_INTERLCK;
+ hfi1_schedule_send(qp);
+ }
return wqe;
}
+static void set_restart_qp(struct rvt_qp *qp, struct hfi1_ctxtdata *rcd)
+{
+ /* Retry this request. */
+ if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
+ qp->r_flags |= RVT_R_RDMAR_SEQ;
+ hfi1_restart_rc(qp, qp->s_last_psn + 1, 0);
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= RVT_R_RSP_SEND;
+ rvt_get_qp(qp);
+ list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+ }
+ }
+}
+
+/**
+ * update_qp_retry_state - Update qp retry state.
+ * @qp: the QP
+ * @psn: the packet sequence number of the TID RDMA WRITE RESP.
+ * @spsn: The start psn for the given TID RDMA WRITE swqe.
+ * @lpsn: The last psn for the given TID RDMA WRITE swqe.
+ *
+ * This function is called to update the qp retry state upon
+ * receiving a TID WRITE RESP after the qp is scheduled to retry
+ * a request.
+ */
+static void update_qp_retry_state(struct rvt_qp *qp, u32 psn, u32 spsn,
+ u32 lpsn)
+{
+ struct hfi1_qp_priv *qpriv = qp->priv;
+
+ qp->s_psn = psn + 1;
+ /*
+ * If this is the first TID RDMA WRITE RESP packet for the current
+ * request, change the s_state so that the retry will be processed
+ * correctly. Similarly, if this is the last TID RDMA WRITE RESP
+ * packet, change the s_state and advance the s_cur.
+ */
+ if (cmp_psn(psn, lpsn) >= 0) {
+ qp->s_cur = qpriv->s_tid_cur + 1;
+ if (qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ qp->s_state = TID_OP(WRITE_REQ);
+ } else if (!cmp_psn(psn, spsn)) {
+ qp->s_cur = qpriv->s_tid_cur;
+ qp->s_state = TID_OP(WRITE_RESP);
+ }
+}
+
/**
* do_rc_ack - process an incoming RC ACK
* @qp: the QP the ACK came in on
@@ -1280,15 +2016,17 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
* May be called at interrupt level, with the QP s_lock held.
* Returns 1 if OK, 0 if current operation should be aborted (NAK).
*/
-static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
- u64 val, struct hfi1_ctxtdata *rcd)
+int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
+ u64 val, struct hfi1_ctxtdata *rcd)
{
struct hfi1_ibport *ibp;
enum ib_wc_status status;
+ struct hfi1_qp_priv *qpriv = qp->priv;
struct rvt_swqe *wqe;
int ret = 0;
u32 ack_psn;
int diff;
+ struct rvt_dev_info *rdi;
lockdep_assert_held(&qp->s_lock);
/*
@@ -1331,20 +2069,14 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
*/
if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
(opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
+ (wqe->wr.opcode == IB_WR_TID_RDMA_READ &&
+ (opcode != TID_OP(READ_RESP) || diff != 0)) ||
((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
- (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
- /* Retry this request. */
- if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
- qp->r_flags |= RVT_R_RDMAR_SEQ;
- hfi1_restart_rc(qp, qp->s_last_psn + 1, 0);
- if (list_empty(&qp->rspwait)) {
- qp->r_flags |= RVT_R_RSP_SEND;
- rvt_get_qp(qp);
- list_add_tail(&qp->rspwait,
- &rcd->qp_wait_list);
- }
- }
+ (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0)) ||
+ (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE &&
+ (delta_psn(psn, qp->s_last_psn) != 1))) {
+ set_restart_qp(qp, rcd);
/*
* No need to process the ACK/NAK since we are
* restarting an earlier request.
@@ -1356,6 +2088,9 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
u64 *vaddr = wqe->sg_list[0].vaddr;
*vaddr = val;
}
+ if (wqe->wr.opcode == IB_WR_OPFN)
+ opfn_conn_reply(qp, val);
+
if (qp->s_num_rd_atomic &&
(wqe->wr.opcode == IB_WR_RDMA_READ ||
wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
@@ -1373,26 +2108,85 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
hfi1_schedule_send(qp);
}
}
+
+ /*
+ * TID RDMA WRITE requests will be completed by the TID RDMA
+ * ACK packet handler (see tid_rdma.c).
+ */
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
+ break;
+
wqe = do_rc_completion(qp, wqe, ibp);
if (qp->s_acked == qp->s_tail)
break;
}
+ trace_hfi1_rc_ack_do(qp, aeth, psn, wqe);
+ trace_hfi1_sender_do_rc_ack(qp);
switch (aeth >> IB_AETH_NAK_SHIFT) {
case 0: /* ACK */
this_cpu_inc(*ibp->rvp.rc_acks);
- if (qp->s_acked != qp->s_tail) {
- /*
- * We are expecting more ACKs so
- * mod the retry timer.
- */
- rvt_mod_retry_timer(qp);
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
+ if (wqe_to_tid_req(wqe)->ack_pending)
+ rvt_mod_retry_timer_ext(qp,
+ qpriv->timeout_shift);
+ else
+ rvt_stop_rc_timers(qp);
+ } else if (qp->s_acked != qp->s_tail) {
+ struct rvt_swqe *__w = NULL;
+
+ if (qpriv->s_tid_cur != HFI1_QP_WQE_INVALID)
+ __w = rvt_get_swqe_ptr(qp, qpriv->s_tid_cur);
+
/*
- * We can stop re-sending the earlier packets and
- * continue with the next packet the receiver wants.
+ * Stop timers if we've received all of the TID RDMA
+ * WRITE * responses.
*/
- if (cmp_psn(qp->s_psn, psn) <= 0)
- reset_psn(qp, psn + 1);
+ if (__w && __w->wr.opcode == IB_WR_TID_RDMA_WRITE &&
+ opcode == TID_OP(WRITE_RESP)) {
+ /*
+ * Normally, the loop above would correctly
+ * process all WQEs from s_acked onward and
+ * either complete them or check for correct
+ * PSN sequencing.
+ * However, for TID RDMA, due to pipelining,
+ * the response may not be for the request at
+ * s_acked so the above look would just be
+ * skipped. This does not allow for checking
+ * the PSN sequencing. It has to be done
+ * separately.
+ */
+ if (cmp_psn(psn, qp->s_last_psn + 1)) {
+ set_restart_qp(qp, rcd);
+ goto bail_stop;
+ }
+ /*
+ * If the psn is being resent, stop the
+ * resending.
+ */
+ if (qp->s_cur != qp->s_tail &&
+ cmp_psn(qp->s_psn, psn) <= 0)
+ update_qp_retry_state(qp, psn,
+ __w->psn,
+ __w->lpsn);
+ else if (--qpriv->pending_tid_w_resp)
+ rvt_mod_retry_timer(qp);
+ else
+ rvt_stop_rc_timers(qp);
+ } else {
+ /*
+ * We are expecting more ACKs so
+ * mod the retry timer.
+ */
+ rvt_mod_retry_timer(qp);
+ /*
+ * We can stop re-sending the earlier packets
+ * and continue with the next packet the
+ * receiver wants.
+ */
+ if (cmp_psn(qp->s_psn, psn) <= 0)
+ reset_psn(qp, psn + 1);
+ }
} else {
/* No more acks - kill all timers */
rvt_stop_rc_timers(qp);
@@ -1408,6 +2202,15 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
rvt_get_credit(qp, aeth);
qp->s_rnr_retry = qp->s_rnr_retry_cnt;
qp->s_retry = qp->s_retry_cnt;
+ /*
+ * If the current request is a TID RDMA WRITE request and the
+ * response is not a TID RDMA WRITE RESP packet, s_last_psn
+ * can't be advanced.
+ */
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE &&
+ opcode != TID_OP(WRITE_RESP) &&
+ cmp_psn(psn, wqe->psn) >= 0)
+ return 1;
update_last_psn(qp, psn);
return 1;
@@ -1417,20 +2220,31 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
goto bail_stop;
if (qp->s_flags & RVT_S_WAIT_RNR)
goto bail_stop;
- if (qp->s_rnr_retry == 0) {
+ rdi = ib_to_rvt(qp->ibqp.device);
+ if (qp->s_rnr_retry == 0 &&
+ !((rdi->post_parms[wqe->wr.opcode].flags &
+ RVT_OPERATION_IGN_RNR_CNT) &&
+ qp->s_rnr_retry_cnt == 0)) {
status = IB_WC_RNR_RETRY_EXC_ERR;
goto class_b;
}
- if (qp->s_rnr_retry_cnt < 7)
+ if (qp->s_rnr_retry_cnt < 7 && qp->s_rnr_retry_cnt > 0)
qp->s_rnr_retry--;
- /* The last valid PSN is the previous PSN. */
- update_last_psn(qp, psn - 1);
+ /*
+ * The last valid PSN is the previous PSN. For TID RDMA WRITE
+ * request, s_last_psn should be incremented only when a TID
+ * RDMA WRITE RESP is received to avoid skipping lost TID RDMA
+ * WRITE RESP packets.
+ */
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) {
+ reset_psn(qp, qp->s_last_psn + 1);
+ } else {
+ update_last_psn(qp, psn - 1);
+ reset_psn(qp, psn);
+ }
ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
-
- reset_psn(qp, psn);
-
qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
rvt_stop_rc_timers(qp);
rvt_add_rnr_timer(qp, aeth);
@@ -1470,7 +2284,10 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
ibp->rvp.n_other_naks++;
class_b:
if (qp->s_last == qp->s_acked) {
- rvt_send_complete(qp, wqe, status);
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_READ)
+ hfi1_kern_read_tid_flow_free(qp);
+
+ hfi1_trdma_send_complete(qp, wqe, status);
rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
}
break;
@@ -1511,6 +2328,8 @@ static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn,
while (cmp_psn(psn, wqe->lpsn) > 0) {
if (wqe->wr.opcode == IB_WR_RDMA_READ ||
+ wqe->wr.opcode == IB_WR_TID_RDMA_READ ||
+ wqe->wr.opcode == IB_WR_TID_RDMA_WRITE ||
wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
break;
@@ -1717,16 +2536,6 @@ bail:
return;
}
-static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd,
- struct rvt_qp *qp)
-{
- if (list_empty(&qp->rspwait)) {
- qp->r_flags |= RVT_R_RSP_NAK;
- rvt_get_qp(qp);
- list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
- }
-}
-
static inline void rc_cancel_ack(struct rvt_qp *qp)
{
qp->r_adefered = 0;
@@ -1759,8 +2568,9 @@ static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data,
struct hfi1_ibport *ibp = rcd_to_iport(rcd);
struct rvt_ack_entry *e;
unsigned long flags;
- u8 i, prev;
- int old_req;
+ u8 prev;
+ u8 mra; /* most recent ACK */
+ bool old_req;
trace_hfi1_rcv_error(qp, psn);
if (diff > 0) {
@@ -1806,29 +2616,8 @@ static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data,
spin_lock_irqsave(&qp->s_lock, flags);
- for (i = qp->r_head_ack_queue; ; i = prev) {
- if (i == qp->s_tail_ack_queue)
- old_req = 0;
- if (i)
- prev = i - 1;
- else
- prev = HFI1_MAX_RDMA_ATOMIC;
- if (prev == qp->r_head_ack_queue) {
- e = NULL;
- break;
- }
- e = &qp->s_ack_queue[prev];
- if (!e->opcode) {
- e = NULL;
- break;
- }
- if (cmp_psn(psn, e->psn) >= 0) {
- if (prev == qp->s_tail_ack_queue &&
- cmp_psn(psn, e->lpsn) <= 0)
- old_req = 0;
- break;
- }
- }
+ e = find_prev_entry(qp, psn, &prev, &mra, &old_req);
+
switch (opcode) {
case OP(RDMA_READ_REQUEST): {
struct ib_reth *reth;
@@ -1875,6 +2664,8 @@ static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data,
e->psn = psn;
if (old_req)
goto unlock_done;
+ if (qp->s_acked_ack_queue == qp->s_tail_ack_queue)
+ qp->s_acked_ack_queue = prev;
qp->s_tail_ack_queue = prev;
break;
}
@@ -1888,6 +2679,8 @@ static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data,
*/
if (!e || e->opcode != (u8)opcode || old_req)
goto unlock_done;
+ if (qp->s_tail_ack_queue == qp->s_acked_ack_queue)
+ qp->s_acked_ack_queue = prev;
qp->s_tail_ack_queue = prev;
break;
}
@@ -1903,7 +2696,7 @@ static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data,
* Resend the most recent ACK if this request is
* after all the previous RDMA reads and atomics.
*/
- if (i == qp->r_head_ack_queue) {
+ if (mra == qp->r_head_ack_queue) {
spin_unlock_irqrestore(&qp->s_lock, flags);
qp->r_nak_state = 0;
qp->r_ack_psn = qp->r_psn - 1;
@@ -1914,7 +2707,9 @@ static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data,
* Resend the RDMA read or atomic op which
* ACKs this duplicate request.
*/
- qp->s_tail_ack_queue = i;
+ if (qp->s_tail_ack_queue == qp->s_acked_ack_queue)
+ qp->s_acked_ack_queue = mra;
+ qp->s_tail_ack_queue = mra;
break;
}
qp->s_ack_state = OP(ACKNOWLEDGE);
@@ -1931,17 +2726,6 @@ send_ack:
return 0;
}
-static inline void update_ack_queue(struct rvt_qp *qp, unsigned n)
-{
- unsigned next;
-
- next = n + 1;
- if (next > HFI1_MAX_RDMA_ATOMIC)
- next = 0;
- qp->s_tail_ack_queue = next;
- qp->s_ack_state = OP(ACKNOWLEDGE);
-}
-
static void log_cca_event(struct hfi1_pportdata *ppd, u8 sl, u32 rlid,
u32 lqpn, u32 rqpn, u8 svc_type)
{
@@ -2039,6 +2823,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
void *data = packet->payload;
u32 tlen = packet->tlen;
struct rvt_qp *qp = packet->qp;
+ struct hfi1_qp_priv *qpriv = qp->priv;
struct hfi1_ibport *ibp = rcd_to_iport(rcd);
struct ib_other_headers *ohdr = packet->ohdr;
u32 opcode = packet->opcode;
@@ -2061,6 +2846,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
return;
fecn = process_ecn(qp, packet);
+ opfn_trigger_conn_request(qp, be32_to_cpu(ohdr->bth[1]));
/*
* Process responses (ACKs) before anything else. Note that the
@@ -2292,17 +3078,17 @@ send_last:
if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
goto nack_inv;
next = qp->r_head_ack_queue + 1;
- /* s_ack_queue is size HFI1_MAX_RDMA_ATOMIC+1 so use > not >= */
- if (next > HFI1_MAX_RDMA_ATOMIC)
+ /* s_ack_queue is size rvt_size_atomic()+1 so use > not >= */
+ if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
next = 0;
spin_lock_irqsave(&qp->s_lock, flags);
- if (unlikely(next == qp->s_tail_ack_queue)) {
+ if (unlikely(next == qp->s_acked_ack_queue)) {
if (!qp->s_ack_queue[next].sent)
goto nack_inv_unlck;
update_ack_queue(qp, next);
}
e = &qp->s_ack_queue[qp->r_head_ack_queue];
- if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
+ if (e->rdma_sge.mr) {
rvt_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
@@ -2343,6 +3129,7 @@ send_last:
qp->r_state = opcode;
qp->r_nak_state = 0;
qp->r_head_ack_queue = next;
+ qpriv->r_tid_alloc = qp->r_head_ack_queue;
/* Schedule the send engine. */
qp->s_flags |= RVT_S_RESP_PENDING;
@@ -2356,32 +3143,38 @@ send_last:
case OP(COMPARE_SWAP):
case OP(FETCH_ADD): {
- struct ib_atomic_eth *ateth;
+ struct ib_atomic_eth *ateth = &ohdr->u.atomic_eth;
+ u64 vaddr = get_ib_ateth_vaddr(ateth);
+ bool opfn = opcode == OP(COMPARE_SWAP) &&
+ vaddr == HFI1_VERBS_E_ATOMIC_VADDR;
struct rvt_ack_entry *e;
- u64 vaddr;
atomic64_t *maddr;
u64 sdata;
u32 rkey;
u8 next;
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
+ !opfn))
goto nack_inv;
next = qp->r_head_ack_queue + 1;
- if (next > HFI1_MAX_RDMA_ATOMIC)
+ if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
next = 0;
spin_lock_irqsave(&qp->s_lock, flags);
- if (unlikely(next == qp->s_tail_ack_queue)) {
+ if (unlikely(next == qp->s_acked_ack_queue)) {
if (!qp->s_ack_queue[next].sent)
goto nack_inv_unlck;
update_ack_queue(qp, next);
}
e = &qp->s_ack_queue[qp->r_head_ack_queue];
- if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
+ if (e->rdma_sge.mr) {
rvt_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
- ateth = &ohdr->u.atomic_eth;
- vaddr = get_ib_ateth_vaddr(ateth);
+ /* Process OPFN special virtual address */
+ if (opfn) {
+ opfn_conn_response(qp, e, ateth);
+ goto ack;
+ }
if (unlikely(vaddr & (sizeof(u64) - 1)))
goto nack_inv_unlck;
rkey = be32_to_cpu(ateth->rkey);
@@ -2400,6 +3193,7 @@ send_last:
sdata);
rvt_put_mr(qp->r_sge.sge.mr);
qp->r_sge.num_sge = 0;
+ack:
e->opcode = opcode;
e->sent = 0;
e->psn = psn;
@@ -2409,6 +3203,7 @@ send_last:
qp->r_state = opcode;
qp->r_nak_state = 0;
qp->r_head_ack_queue = next;
+ qpriv->r_tid_alloc = qp->r_head_ack_queue;
/* Schedule the send engine. */
qp->s_flags |= RVT_S_RESP_PENDING;
diff --git a/drivers/infiniband/hw/hfi1/rc.h b/drivers/infiniband/hw/hfi1/rc.h
new file mode 100644
index 000000000000..8e0935b9bf2a
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/rc.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright(c) 2018 Intel Corporation.
+ *
+ */
+
+#ifndef HFI1_RC_H
+#define HFI1_RC_H
+
+/* cut down ridiculously long IB macro names */
+#define OP(x) IB_OPCODE_RC_##x
+
+static inline void update_ack_queue(struct rvt_qp *qp, unsigned int n)
+{
+ unsigned int next;
+
+ next = n + 1;
+ if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
+ next = 0;
+ qp->s_tail_ack_queue = next;
+ qp->s_acked_ack_queue = next;
+ qp->s_ack_state = OP(ACKNOWLEDGE);
+}
+
+static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd,
+ struct rvt_qp *qp)
+{
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= RVT_R_RSP_NAK;
+ rvt_get_qp(qp);
+ list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+ }
+}
+
+static inline u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
+ u32 psn, u32 pmtu)
+{
+ u32 len;
+
+ len = delta_psn(psn, wqe->psn) * pmtu;
+ return rvt_restart_sge(ss, wqe, len);
+}
+
+struct rvt_ack_entry *find_prev_entry(struct rvt_qp *qp, u32 psn, u8 *prev,
+ u8 *prev_ack, bool *scheduled);
+int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, u64 val,
+ struct hfi1_ctxtdata *rcd);
+struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ struct hfi1_ibport *ibp);
+
+#endif /* HFI1_RC_H */
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index 7fb317c711df..124a3ec1e15c 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -250,7 +250,6 @@ static inline void hfi1_make_ruc_bth(struct rvt_qp *qp,
struct ib_other_headers *ohdr,
u32 bth0, u32 bth1, u32 bth2)
{
- bth1 |= qp->remote_qpn;
ohdr->bth[0] = cpu_to_be32(bth0);
ohdr->bth[1] = cpu_to_be32(bth1);
ohdr->bth[2] = cpu_to_be32(bth2);
@@ -272,13 +271,13 @@ static inline void hfi1_make_ruc_bth(struct rvt_qp *qp,
*/
static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
struct ib_other_headers *ohdr,
- u32 bth0, u32 bth2, int middle,
+ u32 bth0, u32 bth1, u32 bth2,
+ int middle,
struct hfi1_pkt_state *ps)
{
struct hfi1_qp_priv *priv = qp->priv;
struct hfi1_ibport *ibp = ps->ibp;
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- u32 bth1 = 0;
u32 slid;
u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
u8 l4 = OPA_16B_L4_IB_LOCAL;
@@ -360,12 +359,12 @@ static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
*/
static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
struct ib_other_headers *ohdr,
- u32 bth0, u32 bth2, int middle,
+ u32 bth0, u32 bth1, u32 bth2,
+ int middle,
struct hfi1_pkt_state *ps)
{
struct hfi1_qp_priv *priv = qp->priv;
struct hfi1_ibport *ibp = ps->ibp;
- u32 bth1 = 0;
u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
u16 lrh0 = HFI1_LRH_BTH;
u8 extra_bytes = -ps->s_txreq->s_cur_size & 3;
@@ -415,7 +414,7 @@ static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
typedef void (*hfi1_make_ruc_hdr)(struct rvt_qp *qp,
struct ib_other_headers *ohdr,
- u32 bth0, u32 bth2, int middle,
+ u32 bth0, u32 bth1, u32 bth2, int middle,
struct hfi1_pkt_state *ps);
/* We support only two types - 9B and 16B for now */
@@ -425,7 +424,7 @@ static const hfi1_make_ruc_hdr hfi1_ruc_header_tbl[2] = {
};
void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
- u32 bth0, u32 bth2, int middle,
+ u32 bth0, u32 bth1, u32 bth2, int middle,
struct hfi1_pkt_state *ps)
{
struct hfi1_qp_priv *priv = qp->priv;
@@ -446,18 +445,21 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
priv->s_ahg->ahgidx = 0;
/* Make the appropriate header */
- hfi1_ruc_header_tbl[priv->hdr_type](qp, ohdr, bth0, bth2, middle, ps);
+ hfi1_ruc_header_tbl[priv->hdr_type](qp, ohdr, bth0, bth1, bth2, middle,
+ ps);
}
/* when sending, force a reschedule every one of these periods */
#define SEND_RESCHED_TIMEOUT (5 * HZ) /* 5s in jiffies */
/**
- * schedule_send_yield - test for a yield required for QP send engine
+ * hfi1_schedule_send_yield - test for a yield required for QP
+ * send engine
* @timeout: Final time for timeout slice for jiffies
* @qp: a pointer to QP
* @ps: a pointer to a structure with commonly lookup values for
* the the send engine progress
+ * @tid - true if it is the tid leg
*
* This routine checks if the time slice for the QP has expired
* for RC QPs, if so an additional work entry is queued. At this
@@ -465,8 +467,8 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
* returns true if a yield is required, otherwise, false
* is returned.
*/
-static bool schedule_send_yield(struct rvt_qp *qp,
- struct hfi1_pkt_state *ps)
+bool hfi1_schedule_send_yield(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
+ bool tid)
{
ps->pkts_sent = true;
@@ -474,8 +476,24 @@ static bool schedule_send_yield(struct rvt_qp *qp,
if (!ps->in_thread ||
workqueue_congested(ps->cpu, ps->ppd->hfi1_wq)) {
spin_lock_irqsave(&qp->s_lock, ps->flags);
- qp->s_flags &= ~RVT_S_BUSY;
- hfi1_schedule_send(qp);
+ if (!tid) {
+ qp->s_flags &= ~RVT_S_BUSY;
+ hfi1_schedule_send(qp);
+ } else {
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ if (priv->s_flags &
+ HFI1_S_TID_BUSY_SET) {
+ qp->s_flags &= ~RVT_S_BUSY;
+ priv->s_flags &=
+ ~(HFI1_S_TID_BUSY_SET |
+ RVT_S_BUSY);
+ } else {
+ priv->s_flags &= ~RVT_S_BUSY;
+ }
+ hfi1_schedule_tid_send(qp);
+ }
+
spin_unlock_irqrestore(&qp->s_lock, ps->flags);
this_cpu_inc(*ps->ppd->dd->send_schedule);
trace_hfi1_rc_expired_time_slice(qp, true);
@@ -576,6 +594,8 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
do {
/* Check for a constructed packet to be sent. */
if (ps.s_txreq) {
+ if (priv->s_flags & HFI1_S_TID_BUSY_SET)
+ qp->s_flags |= RVT_S_BUSY;
spin_unlock_irqrestore(&qp->s_lock, ps.flags);
/*
* If the packet cannot be sent now, return and
@@ -585,7 +605,7 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
return;
/* allow other tasks to run */
- if (schedule_send_yield(qp, &ps))
+ if (hfi1_schedule_send_yield(qp, &ps, false))
return;
spin_lock_irqsave(&qp->s_lock, ps.flags);
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 96897a91fb0a..b0110728f541 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -1747,10 +1747,9 @@ retry:
*/
static void sdma_desc_avail(struct sdma_engine *sde, uint avail)
{
- struct iowait *wait, *nw;
+ struct iowait *wait, *nw, *twait;
struct iowait *waits[SDMA_WAIT_BATCH_SIZE];
- uint i, n = 0, seq, max_idx = 0;
- u8 max_starved_cnt = 0;
+ uint i, n = 0, seq, tidx = 0;
#ifdef CONFIG_SDMA_VERBOSITY
dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
@@ -1775,13 +1774,20 @@ static void sdma_desc_avail(struct sdma_engine *sde, uint avail)
continue;
if (n == ARRAY_SIZE(waits))
break;
+ iowait_init_priority(wait);
num_desc = iowait_get_all_desc(wait);
if (num_desc > avail)
break;
avail -= num_desc;
- /* Find the most starved wait memeber */
- iowait_starve_find_max(wait, &max_starved_cnt,
- n, &max_idx);
+ /* Find the top-priority wait memeber */
+ if (n) {
+ twait = waits[tidx];
+ tidx =
+ iowait_priority_update_top(wait,
+ twait,
+ n,
+ tidx);
+ }
list_del_init(&wait->list);
waits[n++] = wait;
}
@@ -1790,12 +1796,12 @@ static void sdma_desc_avail(struct sdma_engine *sde, uint avail)
}
} while (read_seqretry(&sde->waitlock, seq));
- /* Schedule the most starved one first */
+ /* Schedule the top-priority entry first */
if (n)
- waits[max_idx]->wakeup(waits[max_idx], SDMA_AVAIL_REASON);
+ waits[tidx]->wakeup(waits[tidx], SDMA_AVAIL_REASON);
for (i = 0; i < n; i++)
- if (i != max_idx)
+ if (i != tidx)
waits[i]->wakeup(waits[i], SDMA_AVAIL_REASON);
}
diff --git a/drivers/infiniband/hw/hfi1/sdma_txreq.h b/drivers/infiniband/hw/hfi1/sdma_txreq.h
index bf7d777d756e..514a4784566b 100644
--- a/drivers/infiniband/hw/hfi1/sdma_txreq.h
+++ b/drivers/infiniband/hw/hfi1/sdma_txreq.h
@@ -91,6 +91,7 @@ struct sdma_desc {
#define SDMA_TXREQ_F_URGENT 0x0001
#define SDMA_TXREQ_F_AHG_COPY 0x0002
#define SDMA_TXREQ_F_USE_AHG 0x0004
+#define SDMA_TXREQ_F_VIP 0x0010
struct sdma_txreq;
typedef void (*callback_t)(struct sdma_txreq *, int);
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index 2be513d4c9da..90f62c4bddba 100644
--- a/drivers/infiniband/hw/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -498,7 +498,7 @@ static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
char *buf)
{
struct hfi1_ibdev *dev =
- container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
+ rdma_device_to_drv_device(device, struct hfi1_ibdev, rdi.ibdev);
return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev);
}
@@ -508,7 +508,7 @@ static ssize_t board_id_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct hfi1_ibdev *dev =
- container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
+ rdma_device_to_drv_device(device, struct hfi1_ibdev, rdi.ibdev);
struct hfi1_devdata *dd = dd_from_dev(dev);
int ret;
@@ -524,7 +524,7 @@ static ssize_t boardversion_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct hfi1_ibdev *dev =
- container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
+ rdma_device_to_drv_device(device, struct hfi1_ibdev, rdi.ibdev);
struct hfi1_devdata *dd = dd_from_dev(dev);
/* The string printed here is already newline-terminated. */
@@ -536,7 +536,7 @@ static ssize_t nctxts_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct hfi1_ibdev *dev =
- container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
+ rdma_device_to_drv_device(device, struct hfi1_ibdev, rdi.ibdev);
struct hfi1_devdata *dd = dd_from_dev(dev);
/*
@@ -555,7 +555,7 @@ static ssize_t nfreectxts_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct hfi1_ibdev *dev =
- container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
+ rdma_device_to_drv_device(device, struct hfi1_ibdev, rdi.ibdev);
struct hfi1_devdata *dd = dd_from_dev(dev);
/* Return the number of free user ports (contexts) available. */
@@ -567,7 +567,7 @@ static ssize_t serial_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct hfi1_ibdev *dev =
- container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
+ rdma_device_to_drv_device(device, struct hfi1_ibdev, rdi.ibdev);
struct hfi1_devdata *dd = dd_from_dev(dev);
return scnprintf(buf, PAGE_SIZE, "%s", dd->serial);
@@ -579,7 +579,7 @@ static ssize_t chip_reset_store(struct device *device,
size_t count)
{
struct hfi1_ibdev *dev =
- container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
+ rdma_device_to_drv_device(device, struct hfi1_ibdev, rdi.ibdev);
struct hfi1_devdata *dd = dd_from_dev(dev);
int ret;
@@ -609,7 +609,7 @@ static ssize_t tempsense_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct hfi1_ibdev *dev =
- container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
+ rdma_device_to_drv_device(device, struct hfi1_ibdev, rdi.ibdev);
struct hfi1_devdata *dd = dd_from_dev(dev);
struct hfi1_temp temp;
int ret;
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index da1ecb68a928..43cbce7a19ea 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -5,8 +5,282 @@
*/
#include "hfi.h"
+#include "qp.h"
+#include "rc.h"
#include "verbs.h"
#include "tid_rdma.h"
+#include "exp_rcv.h"
+#include "trace.h"
+
+/**
+ * DOC: TID RDMA READ protocol
+ *
+ * This is an end-to-end protocol at the hfi1 level between two nodes that
+ * improves performance by avoiding data copy on the requester side. It
+ * converts a qualified RDMA READ request into a TID RDMA READ request on
+ * the requester side and thereafter handles the request and response
+ * differently. To be qualified, the RDMA READ request should meet the
+ * following:
+ * -- The total data length should be greater than 256K;
+ * -- The total data length should be a multiple of 4K page size;
+ * -- Each local scatter-gather entry should be 4K page aligned;
+ * -- Each local scatter-gather entry should be a multiple of 4K page size;
+ */
+
+#define RCV_TID_FLOW_TABLE_CTRL_FLOW_VALID_SMASK BIT_ULL(32)
+#define RCV_TID_FLOW_TABLE_CTRL_HDR_SUPP_EN_SMASK BIT_ULL(33)
+#define RCV_TID_FLOW_TABLE_CTRL_KEEP_AFTER_SEQ_ERR_SMASK BIT_ULL(34)
+#define RCV_TID_FLOW_TABLE_CTRL_KEEP_ON_GEN_ERR_SMASK BIT_ULL(35)
+#define RCV_TID_FLOW_TABLE_STATUS_SEQ_MISMATCH_SMASK BIT_ULL(37)
+#define RCV_TID_FLOW_TABLE_STATUS_GEN_MISMATCH_SMASK BIT_ULL(38)
+
+/* Maximum number of packets within a flow generation. */
+#define MAX_TID_FLOW_PSN BIT(HFI1_KDETH_BTH_SEQ_SHIFT)
+
+#define GENERATION_MASK 0xFFFFF
+
+static u32 mask_generation(u32 a)
+{
+ return a & GENERATION_MASK;
+}
+
+/* Reserved generation value to set to unused flows for kernel contexts */
+#define KERN_GENERATION_RESERVED mask_generation(U32_MAX)
+
+/*
+ * J_KEY for kernel contexts when TID RDMA is used.
+ * See generate_jkey() in hfi.h for more information.
+ */
+#define TID_RDMA_JKEY 32
+#define HFI1_KERNEL_MIN_JKEY HFI1_ADMIN_JKEY_RANGE
+#define HFI1_KERNEL_MAX_JKEY (2 * HFI1_ADMIN_JKEY_RANGE - 1)
+
+/* Maximum number of segments in flight per QP request. */
+#define TID_RDMA_MAX_READ_SEGS_PER_REQ 6
+#define TID_RDMA_MAX_WRITE_SEGS_PER_REQ 4
+#define MAX_REQ max_t(u16, TID_RDMA_MAX_READ_SEGS_PER_REQ, \
+ TID_RDMA_MAX_WRITE_SEGS_PER_REQ)
+#define MAX_FLOWS roundup_pow_of_two(MAX_REQ + 1)
+
+#define MAX_EXPECTED_PAGES (MAX_EXPECTED_BUFFER / PAGE_SIZE)
+
+#define TID_RDMA_DESTQP_FLOW_SHIFT 11
+#define TID_RDMA_DESTQP_FLOW_MASK 0x1f
+
+#define TID_FLOW_SW_PSN BIT(0)
+
+#define TID_OPFN_QP_CTXT_MASK 0xff
+#define TID_OPFN_QP_CTXT_SHIFT 56
+#define TID_OPFN_QP_KDETH_MASK 0xff
+#define TID_OPFN_QP_KDETH_SHIFT 48
+#define TID_OPFN_MAX_LEN_MASK 0x7ff
+#define TID_OPFN_MAX_LEN_SHIFT 37
+#define TID_OPFN_TIMEOUT_MASK 0x1f
+#define TID_OPFN_TIMEOUT_SHIFT 32
+#define TID_OPFN_RESERVED_MASK 0x3f
+#define TID_OPFN_RESERVED_SHIFT 26
+#define TID_OPFN_URG_MASK 0x1
+#define TID_OPFN_URG_SHIFT 25
+#define TID_OPFN_VER_MASK 0x7
+#define TID_OPFN_VER_SHIFT 22
+#define TID_OPFN_JKEY_MASK 0x3f
+#define TID_OPFN_JKEY_SHIFT 16
+#define TID_OPFN_MAX_READ_MASK 0x3f
+#define TID_OPFN_MAX_READ_SHIFT 10
+#define TID_OPFN_MAX_WRITE_MASK 0x3f
+#define TID_OPFN_MAX_WRITE_SHIFT 4
+
+/*
+ * OPFN TID layout
+ *
+ * 63 47 31 15
+ * NNNNNNNNKKKKKKKK MMMMMMMMMMMTTTTT DDDDDDUVVVJJJJJJ RRRRRRWWWWWWCCCC
+ * 3210987654321098 7654321098765432 1098765432109876 5432109876543210
+ * N - the context Number
+ * K - the Kdeth_qp
+ * M - Max_len
+ * T - Timeout
+ * D - reserveD
+ * V - version
+ * U - Urg capable
+ * J - Jkey
+ * R - max_Read
+ * W - max_Write
+ * C - Capcode
+ */
+
+static u32 tid_rdma_flow_wt;
+
+static void tid_rdma_trigger_resume(struct work_struct *work);
+static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req);
+static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
+ gfp_t gfp);
+static void hfi1_init_trdma_req(struct rvt_qp *qp,
+ struct tid_rdma_request *req);
+static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx);
+static void hfi1_tid_timeout(struct timer_list *t);
+static void hfi1_add_tid_reap_timer(struct rvt_qp *qp);
+static void hfi1_mod_tid_reap_timer(struct rvt_qp *qp);
+static void hfi1_mod_tid_retry_timer(struct rvt_qp *qp);
+static int hfi1_stop_tid_retry_timer(struct rvt_qp *qp);
+static void hfi1_tid_retry_timeout(struct timer_list *t);
+static int make_tid_rdma_ack(struct rvt_qp *qp,
+ struct ib_other_headers *ohdr,
+ struct hfi1_pkt_state *ps);
+static void hfi1_do_tid_send(struct rvt_qp *qp);
+
+static u64 tid_rdma_opfn_encode(struct tid_rdma_params *p)
+{
+ return
+ (((u64)p->qp & TID_OPFN_QP_CTXT_MASK) <<
+ TID_OPFN_QP_CTXT_SHIFT) |
+ ((((u64)p->qp >> 16) & TID_OPFN_QP_KDETH_MASK) <<
+ TID_OPFN_QP_KDETH_SHIFT) |
+ (((u64)((p->max_len >> PAGE_SHIFT) - 1) &
+ TID_OPFN_MAX_LEN_MASK) << TID_OPFN_MAX_LEN_SHIFT) |
+ (((u64)p->timeout & TID_OPFN_TIMEOUT_MASK) <<
+ TID_OPFN_TIMEOUT_SHIFT) |
+ (((u64)p->urg & TID_OPFN_URG_MASK) << TID_OPFN_URG_SHIFT) |
+ (((u64)p->jkey & TID_OPFN_JKEY_MASK) << TID_OPFN_JKEY_SHIFT) |
+ (((u64)p->max_read & TID_OPFN_MAX_READ_MASK) <<
+ TID_OPFN_MAX_READ_SHIFT) |
+ (((u64)p->max_write & TID_OPFN_MAX_WRITE_MASK) <<
+ TID_OPFN_MAX_WRITE_SHIFT);
+}
+
+static void tid_rdma_opfn_decode(struct tid_rdma_params *p, u64 data)
+{
+ p->max_len = (((data >> TID_OPFN_MAX_LEN_SHIFT) &
+ TID_OPFN_MAX_LEN_MASK) + 1) << PAGE_SHIFT;
+ p->jkey = (data >> TID_OPFN_JKEY_SHIFT) & TID_OPFN_JKEY_MASK;
+ p->max_write = (data >> TID_OPFN_MAX_WRITE_SHIFT) &
+ TID_OPFN_MAX_WRITE_MASK;
+ p->max_read = (data >> TID_OPFN_MAX_READ_SHIFT) &
+ TID_OPFN_MAX_READ_MASK;
+ p->qp =
+ ((((data >> TID_OPFN_QP_KDETH_SHIFT) & TID_OPFN_QP_KDETH_MASK)
+ << 16) |
+ ((data >> TID_OPFN_QP_CTXT_SHIFT) & TID_OPFN_QP_CTXT_MASK));
+ p->urg = (data >> TID_OPFN_URG_SHIFT) & TID_OPFN_URG_MASK;
+ p->timeout = (data >> TID_OPFN_TIMEOUT_SHIFT) & TID_OPFN_TIMEOUT_MASK;
+}
+
+void tid_rdma_opfn_init(struct rvt_qp *qp, struct tid_rdma_params *p)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ p->qp = (kdeth_qp << 16) | priv->rcd->ctxt;
+ p->max_len = TID_RDMA_MAX_SEGMENT_SIZE;
+ p->jkey = priv->rcd->jkey;
+ p->max_read = TID_RDMA_MAX_READ_SEGS_PER_REQ;
+ p->max_write = TID_RDMA_MAX_WRITE_SEGS_PER_REQ;
+ p->timeout = qp->timeout;
+ p->urg = is_urg_masked(priv->rcd);
+}
+
+bool tid_rdma_conn_req(struct rvt_qp *qp, u64 *data)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ *data = tid_rdma_opfn_encode(&priv->tid_rdma.local);
+ return true;
+}
+
+bool tid_rdma_conn_reply(struct rvt_qp *qp, u64 data)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct tid_rdma_params *remote, *old;
+ bool ret = true;
+
+ old = rcu_dereference_protected(priv->tid_rdma.remote,
+ lockdep_is_held(&priv->opfn.lock));
+ data &= ~0xfULL;
+ /*
+ * If data passed in is zero, return true so as not to continue the
+ * negotiation process
+ */
+ if (!data || !HFI1_CAP_IS_KSET(TID_RDMA))
+ goto null;
+ /*
+ * If kzalloc fails, return false. This will result in:
+ * * at the requester a new OPFN request being generated to retry
+ * the negotiation
+ * * at the responder, 0 being returned to the requester so as to
+ * disable TID RDMA at both the requester and the responder
+ */
+ remote = kzalloc(sizeof(*remote), GFP_ATOMIC);
+ if (!remote) {
+ ret = false;
+ goto null;
+ }
+
+ tid_rdma_opfn_decode(remote, data);
+ priv->tid_timer_timeout_jiffies =
+ usecs_to_jiffies((((4096UL * (1UL << remote->timeout)) /
+ 1000UL) << 3) * 7);
+ trace_hfi1_opfn_param(qp, 0, &priv->tid_rdma.local);
+ trace_hfi1_opfn_param(qp, 1, remote);
+ rcu_assign_pointer(priv->tid_rdma.remote, remote);
+ /*
+ * A TID RDMA READ request's segment size is not equal to
+ * remote->max_len only when the request's data length is smaller
+ * than remote->max_len. In that case, there will be only one segment.
+ * Therefore, when priv->pkts_ps is used to calculate req->cur_seg
+ * during retry, it will lead to req->cur_seg = 0, which is exactly
+ * what is expected.
+ */
+ priv->pkts_ps = (u16)rvt_div_mtu(qp, remote->max_len);
+ priv->timeout_shift = ilog2(priv->pkts_ps - 1) + 1;
+ goto free;
+null:
+ RCU_INIT_POINTER(priv->tid_rdma.remote, NULL);
+ priv->timeout_shift = 0;
+free:
+ if (old)
+ kfree_rcu(old, rcu_head);
+ return ret;
+}
+
+bool tid_rdma_conn_resp(struct rvt_qp *qp, u64 *data)
+{
+ bool ret;
+
+ ret = tid_rdma_conn_reply(qp, *data);
+ *data = 0;
+ /*
+ * If tid_rdma_conn_reply() returns error, set *data as 0 to indicate
+ * TID RDMA could not be enabled. This will result in TID RDMA being
+ * disabled at the requester too.
+ */
+ if (ret)
+ (void)tid_rdma_conn_req(qp, data);
+ return ret;
+}
+
+void tid_rdma_conn_error(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct tid_rdma_params *old;
+
+ old = rcu_dereference_protected(priv->tid_rdma.remote,
+ lockdep_is_held(&priv->opfn.lock));
+ RCU_INIT_POINTER(priv->tid_rdma.remote, NULL);
+ if (old)
+ kfree_rcu(old, rcu_head);
+}
+
+/* This is called at context initialization time */
+int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata *rcd, int reinit)
+{
+ if (reinit)
+ return 0;
+
+ BUILD_BUG_ON(TID_RDMA_JKEY < HFI1_KERNEL_MIN_JKEY);
+ BUILD_BUG_ON(TID_RDMA_JKEY > HFI1_KERNEL_MAX_JKEY);
+ rcd->jkey = TID_RDMA_JKEY;
+ hfi1_set_ctxt_jkey(rcd->dd, rcd, rcd->jkey);
+ return hfi1_alloc_ctxt_rcv_groups(rcd);
+}
/**
* qp_to_rcd - determine the receive context used by a qp
@@ -41,8 +315,5137 @@ int hfi1_qp_priv_init(struct rvt_dev_info *rdi, struct rvt_qp *qp,
struct ib_qp_init_attr *init_attr)
{
struct hfi1_qp_priv *qpriv = qp->priv;
+ int i, ret;
qpriv->rcd = qp_to_rcd(rdi, qp);
+ spin_lock_init(&qpriv->opfn.lock);
+ INIT_WORK(&qpriv->opfn.opfn_work, opfn_send_conn_request);
+ INIT_WORK(&qpriv->tid_rdma.trigger_work, tid_rdma_trigger_resume);
+ qpriv->flow_state.psn = 0;
+ qpriv->flow_state.index = RXE_NUM_TID_FLOWS;
+ qpriv->flow_state.last_index = RXE_NUM_TID_FLOWS;
+ qpriv->flow_state.generation = KERN_GENERATION_RESERVED;
+ qpriv->s_state = TID_OP(WRITE_RESP);
+ qpriv->s_tid_cur = HFI1_QP_WQE_INVALID;
+ qpriv->s_tid_head = HFI1_QP_WQE_INVALID;
+ qpriv->s_tid_tail = HFI1_QP_WQE_INVALID;
+ qpriv->rnr_nak_state = TID_RNR_NAK_INIT;
+ qpriv->r_tid_head = HFI1_QP_WQE_INVALID;
+ qpriv->r_tid_tail = HFI1_QP_WQE_INVALID;
+ qpriv->r_tid_ack = HFI1_QP_WQE_INVALID;
+ qpriv->r_tid_alloc = HFI1_QP_WQE_INVALID;
+ atomic_set(&qpriv->n_requests, 0);
+ atomic_set(&qpriv->n_tid_requests, 0);
+ timer_setup(&qpriv->s_tid_timer, hfi1_tid_timeout, 0);
+ timer_setup(&qpriv->s_tid_retry_timer, hfi1_tid_retry_timeout, 0);
+ INIT_LIST_HEAD(&qpriv->tid_wait);
+
+ if (init_attr->qp_type == IB_QPT_RC && HFI1_CAP_IS_KSET(TID_RDMA)) {
+ struct hfi1_devdata *dd = qpriv->rcd->dd;
+
+ qpriv->pages = kzalloc_node(TID_RDMA_MAX_PAGES *
+ sizeof(*qpriv->pages),
+ GFP_KERNEL, dd->node);
+ if (!qpriv->pages)
+ return -ENOMEM;
+ for (i = 0; i < qp->s_size; i++) {
+ struct hfi1_swqe_priv *priv;
+ struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, i);
+
+ priv = kzalloc_node(sizeof(*priv), GFP_KERNEL,
+ dd->node);
+ if (!priv)
+ return -ENOMEM;
+
+ hfi1_init_trdma_req(qp, &priv->tid_req);
+ priv->tid_req.e.swqe = wqe;
+ wqe->priv = priv;
+ }
+ for (i = 0; i < rvt_max_atomic(rdi); i++) {
+ struct hfi1_ack_priv *priv;
+
+ priv = kzalloc_node(sizeof(*priv), GFP_KERNEL,
+ dd->node);
+ if (!priv)
+ return -ENOMEM;
+
+ hfi1_init_trdma_req(qp, &priv->tid_req);
+ priv->tid_req.e.ack = &qp->s_ack_queue[i];
+
+ ret = hfi1_kern_exp_rcv_alloc_flows(&priv->tid_req,
+ GFP_KERNEL);
+ if (ret) {
+ kfree(priv);
+ return ret;
+ }
+ qp->s_ack_queue[i].priv = priv;
+ }
+ }
+
+ return 0;
+}
+
+void hfi1_qp_priv_tid_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct rvt_swqe *wqe;
+ u32 i;
+
+ if (qp->ibqp.qp_type == IB_QPT_RC && HFI1_CAP_IS_KSET(TID_RDMA)) {
+ for (i = 0; i < qp->s_size; i++) {
+ wqe = rvt_get_swqe_ptr(qp, i);
+ kfree(wqe->priv);
+ wqe->priv = NULL;
+ }
+ for (i = 0; i < rvt_max_atomic(rdi); i++) {
+ struct hfi1_ack_priv *priv = qp->s_ack_queue[i].priv;
+
+ if (priv)
+ hfi1_kern_exp_rcv_free_flows(&priv->tid_req);
+ kfree(priv);
+ qp->s_ack_queue[i].priv = NULL;
+ }
+ cancel_work_sync(&qpriv->opfn.opfn_work);
+ kfree(qpriv->pages);
+ qpriv->pages = NULL;
+ }
+}
+
+/* Flow and tid waiter functions */
+/**
+ * DOC: lock ordering
+ *
+ * There are two locks involved with the queuing
+ * routines: the qp s_lock and the exp_lock.
+ *
+ * Since the tid space allocation is called from
+ * the send engine, the qp s_lock is already held.
+ *
+ * The allocation routines will get the exp_lock.
+ *
+ * The first_qp() call is provided to allow the head of
+ * the rcd wait queue to be fetched under the exp_lock and
+ * followed by a drop of the exp_lock.
+ *
+ * Any qp in the wait list will have the qp reference count held
+ * to hold the qp in memory.
+ */
+
+/*
+ * return head of rcd wait list
+ *
+ * Must hold the exp_lock.
+ *
+ * Get a reference to the QP to hold the QP in memory.
+ *
+ * The caller must release the reference when the local
+ * is no longer being used.
+ */
+static struct rvt_qp *first_qp(struct hfi1_ctxtdata *rcd,
+ struct tid_queue *queue)
+ __must_hold(&rcd->exp_lock)
+{
+ struct hfi1_qp_priv *priv;
+
+ lockdep_assert_held(&rcd->exp_lock);
+ priv = list_first_entry_or_null(&queue->queue_head,
+ struct hfi1_qp_priv,
+ tid_wait);
+ if (!priv)
+ return NULL;
+ rvt_get_qp(priv->owner);
+ return priv->owner;
+}
+
+/**
+ * kernel_tid_waiters - determine rcd wait
+ * @rcd: the receive context
+ * @qp: the head of the qp being processed
+ *
+ * This routine will return false IFF
+ * the list is NULL or the head of the
+ * list is the indicated qp.
+ *
+ * Must hold the qp s_lock and the exp_lock.
+ *
+ * Return:
+ * false if either of the conditions below are statisfied:
+ * 1. The list is empty or
+ * 2. The indicated qp is at the head of the list and the
+ * HFI1_S_WAIT_TID_SPACE bit is set in qp->s_flags.
+ * true is returned otherwise.
+ */
+static bool kernel_tid_waiters(struct hfi1_ctxtdata *rcd,
+ struct tid_queue *queue, struct rvt_qp *qp)
+ __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock)
+{
+ struct rvt_qp *fqp;
+ bool ret = true;
+
+ lockdep_assert_held(&qp->s_lock);
+ lockdep_assert_held(&rcd->exp_lock);
+ fqp = first_qp(rcd, queue);
+ if (!fqp || (fqp == qp && (qp->s_flags & HFI1_S_WAIT_TID_SPACE)))
+ ret = false;
+ rvt_put_qp(fqp);
+ return ret;
+}
+
+/**
+ * dequeue_tid_waiter - dequeue the qp from the list
+ * @qp - the qp to remove the wait list
+ *
+ * This routine removes the indicated qp from the
+ * wait list if it is there.
+ *
+ * This should be done after the hardware flow and
+ * tid array resources have been allocated.
+ *
+ * Must hold the qp s_lock and the rcd exp_lock.
+ *
+ * It assumes the s_lock to protect the s_flags
+ * field and to reliably test the HFI1_S_WAIT_TID_SPACE flag.
+ */
+static void dequeue_tid_waiter(struct hfi1_ctxtdata *rcd,
+ struct tid_queue *queue, struct rvt_qp *qp)
+ __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ lockdep_assert_held(&qp->s_lock);
+ lockdep_assert_held(&rcd->exp_lock);
+ if (list_empty(&priv->tid_wait))
+ return;
+ list_del_init(&priv->tid_wait);
+ qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE;
+ queue->dequeue++;
+ rvt_put_qp(qp);
+}
+
+/**
+ * queue_qp_for_tid_wait - suspend QP on tid space
+ * @rcd: the receive context
+ * @qp: the qp
+ *
+ * The qp is inserted at the tail of the rcd
+ * wait queue and the HFI1_S_WAIT_TID_SPACE s_flag is set.
+ *
+ * Must hold the qp s_lock and the exp_lock.
+ */
+static void queue_qp_for_tid_wait(struct hfi1_ctxtdata *rcd,
+ struct tid_queue *queue, struct rvt_qp *qp)
+ __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ lockdep_assert_held(&qp->s_lock);
+ lockdep_assert_held(&rcd->exp_lock);
+ if (list_empty(&priv->tid_wait)) {
+ qp->s_flags |= HFI1_S_WAIT_TID_SPACE;
+ list_add_tail(&priv->tid_wait, &queue->queue_head);
+ priv->tid_enqueue = ++queue->enqueue;
+ rcd->dd->verbs_dev.n_tidwait++;
+ trace_hfi1_qpsleep(qp, HFI1_S_WAIT_TID_SPACE);
+ rvt_get_qp(qp);
+ }
+}
+
+/**
+ * __trigger_tid_waiter - trigger tid waiter
+ * @qp: the qp
+ *
+ * This is a private entrance to schedule the qp
+ * assuming the caller is holding the qp->s_lock.
+ */
+static void __trigger_tid_waiter(struct rvt_qp *qp)
+ __must_hold(&qp->s_lock)
+{
+ lockdep_assert_held(&qp->s_lock);
+ if (!(qp->s_flags & HFI1_S_WAIT_TID_SPACE))
+ return;
+ trace_hfi1_qpwakeup(qp, HFI1_S_WAIT_TID_SPACE);
+ hfi1_schedule_send(qp);
+}
+
+/**
+ * tid_rdma_schedule_tid_wakeup - schedule wakeup for a qp
+ * @qp - the qp
+ *
+ * trigger a schedule or a waiting qp in a deadlock
+ * safe manner. The qp reference is held prior
+ * to this call via first_qp().
+ *
+ * If the qp trigger was already scheduled (!rval)
+ * the the reference is dropped, otherwise the resume
+ * or the destroy cancel will dispatch the reference.
+ */
+static void tid_rdma_schedule_tid_wakeup(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv;
+ struct hfi1_ibport *ibp;
+ struct hfi1_pportdata *ppd;
+ struct hfi1_devdata *dd;
+ bool rval;
+
+ if (!qp)
+ return;
+
+ priv = qp->priv;
+ ibp = to_iport(qp->ibqp.device, qp->port_num);
+ ppd = ppd_from_ibp(ibp);
+ dd = dd_from_ibdev(qp->ibqp.device);
+
+ rval = queue_work_on(priv->s_sde ?
+ priv->s_sde->cpu :
+ cpumask_first(cpumask_of_node(dd->node)),
+ ppd->hfi1_wq,
+ &priv->tid_rdma.trigger_work);
+ if (!rval)
+ rvt_put_qp(qp);
+}
+
+/**
+ * tid_rdma_trigger_resume - field a trigger work request
+ * @work - the work item
+ *
+ * Complete the off qp trigger processing by directly
+ * calling the progress routine.
+ */
+static void tid_rdma_trigger_resume(struct work_struct *work)
+{
+ struct tid_rdma_qp_params *tr;
+ struct hfi1_qp_priv *priv;
+ struct rvt_qp *qp;
+
+ tr = container_of(work, struct tid_rdma_qp_params, trigger_work);
+ priv = container_of(tr, struct hfi1_qp_priv, tid_rdma);
+ qp = priv->owner;
+ spin_lock_irq(&qp->s_lock);
+ if (qp->s_flags & HFI1_S_WAIT_TID_SPACE) {
+ spin_unlock_irq(&qp->s_lock);
+ hfi1_do_send(priv->owner, true);
+ } else {
+ spin_unlock_irq(&qp->s_lock);
+ }
+ rvt_put_qp(qp);
+}
+
+/**
+ * tid_rdma_flush_wait - unwind any tid space wait
+ *
+ * This is called when resetting a qp to
+ * allow a destroy or reset to get rid
+ * of any tid space linkage and reference counts.
+ */
+static void _tid_rdma_flush_wait(struct rvt_qp *qp, struct tid_queue *queue)
+ __must_hold(&qp->s_lock)
+{
+ struct hfi1_qp_priv *priv;
+
+ if (!qp)
+ return;
+ lockdep_assert_held(&qp->s_lock);
+ priv = qp->priv;
+ qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE;
+ spin_lock(&priv->rcd->exp_lock);
+ if (!list_empty(&priv->tid_wait)) {
+ list_del_init(&priv->tid_wait);
+ qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE;
+ queue->dequeue++;
+ rvt_put_qp(qp);
+ }
+ spin_unlock(&priv->rcd->exp_lock);
+}
+
+void hfi1_tid_rdma_flush_wait(struct rvt_qp *qp)
+ __must_hold(&qp->s_lock)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ _tid_rdma_flush_wait(qp, &priv->rcd->flow_queue);
+ _tid_rdma_flush_wait(qp, &priv->rcd->rarr_queue);
+}
+
+/* Flow functions */
+/**
+ * kern_reserve_flow - allocate a hardware flow
+ * @rcd - the context to use for allocation
+ * @last - the index of the preferred flow. Use RXE_NUM_TID_FLOWS to
+ * signify "don't care".
+ *
+ * Use a bit mask based allocation to reserve a hardware
+ * flow for use in receiving KDETH data packets. If a preferred flow is
+ * specified the function will attempt to reserve that flow again, if
+ * available.
+ *
+ * The exp_lock must be held.
+ *
+ * Return:
+ * On success: a value postive value between 0 and RXE_NUM_TID_FLOWS - 1
+ * On failure: -EAGAIN
+ */
+static int kern_reserve_flow(struct hfi1_ctxtdata *rcd, int last)
+ __must_hold(&rcd->exp_lock)
+{
+ int nr;
+
+ /* Attempt to reserve the preferred flow index */
+ if (last >= 0 && last < RXE_NUM_TID_FLOWS &&
+ !test_and_set_bit(last, &rcd->flow_mask))
+ return last;
+
+ nr = ffz(rcd->flow_mask);
+ BUILD_BUG_ON(RXE_NUM_TID_FLOWS >=
+ (sizeof(rcd->flow_mask) * BITS_PER_BYTE));
+ if (nr > (RXE_NUM_TID_FLOWS - 1))
+ return -EAGAIN;
+ set_bit(nr, &rcd->flow_mask);
+ return nr;
+}
+
+static void kern_set_hw_flow(struct hfi1_ctxtdata *rcd, u32 generation,
+ u32 flow_idx)
+{
+ u64 reg;
+
+ reg = ((u64)generation << HFI1_KDETH_BTH_SEQ_SHIFT) |
+ RCV_TID_FLOW_TABLE_CTRL_FLOW_VALID_SMASK |
+ RCV_TID_FLOW_TABLE_CTRL_KEEP_AFTER_SEQ_ERR_SMASK |
+ RCV_TID_FLOW_TABLE_CTRL_KEEP_ON_GEN_ERR_SMASK |
+ RCV_TID_FLOW_TABLE_STATUS_SEQ_MISMATCH_SMASK |
+ RCV_TID_FLOW_TABLE_STATUS_GEN_MISMATCH_SMASK;
+
+ if (generation != KERN_GENERATION_RESERVED)
+ reg |= RCV_TID_FLOW_TABLE_CTRL_HDR_SUPP_EN_SMASK;
+
+ write_uctxt_csr(rcd->dd, rcd->ctxt,
+ RCV_TID_FLOW_TABLE + 8 * flow_idx, reg);
+}
+
+static u32 kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, u32 flow_idx)
+ __must_hold(&rcd->exp_lock)
+{
+ u32 generation = rcd->flows[flow_idx].generation;
+
+ kern_set_hw_flow(rcd, generation, flow_idx);
+ return generation;
+}
+
+static u32 kern_flow_generation_next(u32 gen)
+{
+ u32 generation = mask_generation(gen + 1);
+
+ if (generation == KERN_GENERATION_RESERVED)
+ generation = mask_generation(generation + 1);
+ return generation;
+}
+
+static void kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, u32 flow_idx)
+ __must_hold(&rcd->exp_lock)
+{
+ rcd->flows[flow_idx].generation =
+ kern_flow_generation_next(rcd->flows[flow_idx].generation);
+ kern_set_hw_flow(rcd, KERN_GENERATION_RESERVED, flow_idx);
+}
+
+int hfi1_kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv;
+ struct tid_flow_state *fs = &qpriv->flow_state;
+ struct rvt_qp *fqp;
+ unsigned long flags;
+ int ret = 0;
+
+ /* The QP already has an allocated flow */
+ if (fs->index != RXE_NUM_TID_FLOWS)
+ return ret;
+
+ spin_lock_irqsave(&rcd->exp_lock, flags);
+ if (kernel_tid_waiters(rcd, &rcd->flow_queue, qp))
+ goto queue;
+
+ ret = kern_reserve_flow(rcd, fs->last_index);
+ if (ret < 0)
+ goto queue;
+ fs->index = ret;
+ fs->last_index = fs->index;
+
+ /* Generation received in a RESYNC overrides default flow generation */
+ if (fs->generation != KERN_GENERATION_RESERVED)
+ rcd->flows[fs->index].generation = fs->generation;
+ fs->generation = kern_setup_hw_flow(rcd, fs->index);
+ fs->psn = 0;
+ fs->flags = 0;
+ dequeue_tid_waiter(rcd, &rcd->flow_queue, qp);
+ /* get head before dropping lock */
+ fqp = first_qp(rcd, &rcd->flow_queue);
+ spin_unlock_irqrestore(&rcd->exp_lock, flags);
+
+ tid_rdma_schedule_tid_wakeup(fqp);
+ return 0;
+queue:
+ queue_qp_for_tid_wait(rcd, &rcd->flow_queue, qp);
+ spin_unlock_irqrestore(&rcd->exp_lock, flags);
+ return -EAGAIN;
+}
+
+void hfi1_kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv;
+ struct tid_flow_state *fs = &qpriv->flow_state;
+ struct rvt_qp *fqp;
+ unsigned long flags;
+
+ if (fs->index >= RXE_NUM_TID_FLOWS)
+ return;
+ spin_lock_irqsave(&rcd->exp_lock, flags);
+ kern_clear_hw_flow(rcd, fs->index);
+ clear_bit(fs->index, &rcd->flow_mask);
+ fs->index = RXE_NUM_TID_FLOWS;
+ fs->psn = 0;
+ fs->generation = KERN_GENERATION_RESERVED;
+
+ /* get head before dropping lock */
+ fqp = first_qp(rcd, &rcd->flow_queue);
+ spin_unlock_irqrestore(&rcd->exp_lock, flags);
+
+ if (fqp == qp) {
+ __trigger_tid_waiter(fqp);
+ rvt_put_qp(fqp);
+ } else {
+ tid_rdma_schedule_tid_wakeup(fqp);
+ }
+}
+
+void hfi1_kern_init_ctxt_generations(struct hfi1_ctxtdata *rcd)
+{
+ int i;
+
+ for (i = 0; i < RXE_NUM_TID_FLOWS; i++) {
+ rcd->flows[i].generation = mask_generation(prandom_u32());
+ kern_set_hw_flow(rcd, KERN_GENERATION_RESERVED, i);
+ }
+}
+
+/* TID allocation functions */
+static u8 trdma_pset_order(struct tid_rdma_pageset *s)
+{
+ u8 count = s->count;
+
+ return ilog2(count) + 1;
+}
+
+/**
+ * tid_rdma_find_phys_blocks_4k - get groups base on mr info
+ * @npages - number of pages
+ * @pages - pointer to an array of page structs
+ * @list - page set array to return
+ *
+ * This routine returns the number of groups associated with
+ * the current sge information. This implementation is based
+ * on the expected receive find_phys_blocks() adjusted to
+ * use the MR information vs. the pfn.
+ *
+ * Return:
+ * the number of RcvArray entries
+ */
+static u32 tid_rdma_find_phys_blocks_4k(struct tid_rdma_flow *flow,
+ struct page **pages,
+ u32 npages,
+ struct tid_rdma_pageset *list)
+{
+ u32 pagecount, pageidx, setcount = 0, i;
+ void *vaddr, *this_vaddr;
+
+ if (!npages)
+ return 0;
+
+ /*
+ * Look for sets of physically contiguous pages in the user buffer.
+ * This will allow us to optimize Expected RcvArray entry usage by
+ * using the bigger supported sizes.
+ */
+ vaddr = page_address(pages[0]);
+ trace_hfi1_tid_flow_page(flow->req->qp, flow, 0, 0, 0, vaddr);
+ for (pageidx = 0, pagecount = 1, i = 1; i <= npages; i++) {
+ this_vaddr = i < npages ? page_address(pages[i]) : NULL;
+ trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 0, 0,
+ this_vaddr);
+ /*
+ * If the vaddr's are not sequential, pages are not physically
+ * contiguous.
+ */
+ if (this_vaddr != (vaddr + PAGE_SIZE)) {
+ /*
+ * At this point we have to loop over the set of
+ * physically contiguous pages and break them down it
+ * sizes supported by the HW.
+ * There are two main constraints:
+ * 1. The max buffer size is MAX_EXPECTED_BUFFER.
+ * If the total set size is bigger than that
+ * program only a MAX_EXPECTED_BUFFER chunk.
+ * 2. The buffer size has to be a power of two. If
+ * it is not, round down to the closes power of
+ * 2 and program that size.
+ */
+ while (pagecount) {
+ int maxpages = pagecount;
+ u32 bufsize = pagecount * PAGE_SIZE;
+
+ if (bufsize > MAX_EXPECTED_BUFFER)
+ maxpages =
+ MAX_EXPECTED_BUFFER >>
+ PAGE_SHIFT;
+ else if (!is_power_of_2(bufsize))
+ maxpages =
+ rounddown_pow_of_two(bufsize) >>
+ PAGE_SHIFT;
+
+ list[setcount].idx = pageidx;
+ list[setcount].count = maxpages;
+ trace_hfi1_tid_pageset(flow->req->qp, setcount,
+ list[setcount].idx,
+ list[setcount].count);
+ pagecount -= maxpages;
+ pageidx += maxpages;
+ setcount++;
+ }
+ pageidx = i;
+ pagecount = 1;
+ vaddr = this_vaddr;
+ } else {
+ vaddr += PAGE_SIZE;
+ pagecount++;
+ }
+ }
+ /* insure we always return an even number of sets */
+ if (setcount & 1)
+ list[setcount++].count = 0;
+ return setcount;
+}
+
+/**
+ * tid_flush_pages - dump out pages into pagesets
+ * @list - list of pagesets
+ * @idx - pointer to current page index
+ * @pages - number of pages to dump
+ * @sets - current number of pagesset
+ *
+ * This routine flushes out accumuated pages.
+ *
+ * To insure an even number of sets the
+ * code may add a filler.
+ *
+ * This can happen with when pages is not
+ * a power of 2 or pages is a power of 2
+ * less than the maximum pages.
+ *
+ * Return:
+ * The new number of sets
+ */
+
+static u32 tid_flush_pages(struct tid_rdma_pageset *list,
+ u32 *idx, u32 pages, u32 sets)
+{
+ while (pages) {
+ u32 maxpages = pages;
+
+ if (maxpages > MAX_EXPECTED_PAGES)
+ maxpages = MAX_EXPECTED_PAGES;
+ else if (!is_power_of_2(maxpages))
+ maxpages = rounddown_pow_of_two(maxpages);
+ list[sets].idx = *idx;
+ list[sets++].count = maxpages;
+ *idx += maxpages;
+ pages -= maxpages;
+ }
+ /* might need a filler */
+ if (sets & 1)
+ list[sets++].count = 0;
+ return sets;
+}
+
+/**
+ * tid_rdma_find_phys_blocks_8k - get groups base on mr info
+ * @pages - pointer to an array of page structs
+ * @npages - number of pages
+ * @list - page set array to return
+ *
+ * This routine parses an array of pages to compute pagesets
+ * in an 8k compatible way.
+ *
+ * pages are tested two at a time, i, i + 1 for contiguous
+ * pages and i - 1 and i contiguous pages.
+ *
+ * If any condition is false, any accumlated pages are flushed and
+ * v0,v1 are emitted as separate PAGE_SIZE pagesets
+ *
+ * Otherwise, the current 8k is totaled for a future flush.
+ *
+ * Return:
+ * The number of pagesets
+ * list set with the returned number of pagesets
+ *
+ */
+static u32 tid_rdma_find_phys_blocks_8k(struct tid_rdma_flow *flow,
+ struct page **pages,
+ u32 npages,
+ struct tid_rdma_pageset *list)
+{
+ u32 idx, sets = 0, i;
+ u32 pagecnt = 0;
+ void *v0, *v1, *vm1;
+
+ if (!npages)
+ return 0;
+ for (idx = 0, i = 0, vm1 = NULL; i < npages; i += 2) {
+ /* get a new v0 */
+ v0 = page_address(pages[i]);
+ trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 0, v0);
+ v1 = i + 1 < npages ?
+ page_address(pages[i + 1]) : NULL;
+ trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 1, v1);
+ /* compare i, i + 1 vaddr */
+ if (v1 != (v0 + PAGE_SIZE)) {
+ /* flush out pages */
+ sets = tid_flush_pages(list, &idx, pagecnt, sets);
+ /* output v0,v1 as two pagesets */
+ list[sets].idx = idx++;
+ list[sets++].count = 1;
+ if (v1) {
+ list[sets].count = 1;
+ list[sets++].idx = idx++;
+ } else {
+ list[sets++].count = 0;
+ }
+ vm1 = NULL;
+ pagecnt = 0;
+ continue;
+ }
+ /* i,i+1 consecutive, look at i-1,i */
+ if (vm1 && v0 != (vm1 + PAGE_SIZE)) {
+ /* flush out pages */
+ sets = tid_flush_pages(list, &idx, pagecnt, sets);
+ pagecnt = 0;
+ }
+ /* pages will always be a multiple of 8k */
+ pagecnt += 2;
+ /* save i-1 */
+ vm1 = v1;
+ /* move to next pair */
+ }
+ /* dump residual pages at end */
+ sets = tid_flush_pages(list, &idx, npages - idx, sets);
+ /* by design cannot be odd sets */
+ WARN_ON(sets & 1);
+ return sets;
+}
+
+/**
+ * Find pages for one segment of a sge array represented by @ss. The function
+ * does not check the sge, the sge must have been checked for alignment with a
+ * prior call to hfi1_kern_trdma_ok. Other sge checking is done as part of
+ * rvt_lkey_ok and rvt_rkey_ok. Also, the function only modifies the local sge
+ * copy maintained in @ss->sge, the original sge is not modified.
+ *
+ * Unlike IB RDMA WRITE, we can't decrement ss->num_sge here because we are not
+ * releasing the MR reference count at the same time. Otherwise, we'll "leak"
+ * references to the MR. This difference requires that we keep track of progress
+ * into the sg_list. This is done by the cur_seg cursor in the tid_rdma_request
+ * structure.
+ */
+static u32 kern_find_pages(struct tid_rdma_flow *flow,
+ struct page **pages,
+ struct rvt_sge_state *ss, bool *last)
+{
+ struct tid_rdma_request *req = flow->req;
+ struct rvt_sge *sge = &ss->sge;
+ u32 length = flow->req->seg_len;
+ u32 len = PAGE_SIZE;
+ u32 i = 0;
+
+ while (length && req->isge < ss->num_sge) {
+ pages[i++] = virt_to_page(sge->vaddr);
+
+ sge->vaddr += len;
+ sge->length -= len;
+ sge->sge_length -= len;
+ if (!sge->sge_length) {
+ if (++req->isge < ss->num_sge)
+ *sge = ss->sg_list[req->isge - 1];
+ } else if (sge->length == 0 && sge->mr->lkey) {
+ if (++sge->n >= RVT_SEGSZ) {
+ ++sge->m;
+ sge->n = 0;
+ }
+ sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+ length -= len;
+ }
+
+ flow->length = flow->req->seg_len - length;
+ *last = req->isge == ss->num_sge ? false : true;
+ return i;
+}
+
+static void dma_unmap_flow(struct tid_rdma_flow *flow)
+{
+ struct hfi1_devdata *dd;
+ int i;
+ struct tid_rdma_pageset *pset;
+
+ dd = flow->req->rcd->dd;
+ for (i = 0, pset = &flow->pagesets[0]; i < flow->npagesets;
+ i++, pset++) {
+ if (pset->count && pset->addr) {
+ dma_unmap_page(&dd->pcidev->dev,
+ pset->addr,
+ PAGE_SIZE * pset->count,
+ DMA_FROM_DEVICE);
+ pset->mapped = 0;
+ }
+ }
+}
+
+static int dma_map_flow(struct tid_rdma_flow *flow, struct page **pages)
+{
+ int i;
+ struct hfi1_devdata *dd = flow->req->rcd->dd;
+ struct tid_rdma_pageset *pset;
+
+ for (i = 0, pset = &flow->pagesets[0]; i < flow->npagesets;
+ i++, pset++) {
+ if (pset->count) {
+ pset->addr = dma_map_page(&dd->pcidev->dev,
+ pages[pset->idx],
+ 0,
+ PAGE_SIZE * pset->count,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(&dd->pcidev->dev, pset->addr)) {
+ dma_unmap_flow(flow);
+ return -ENOMEM;
+ }
+ pset->mapped = 1;
+ }
+ }
+ return 0;
+}
+
+static inline bool dma_mapped(struct tid_rdma_flow *flow)
+{
+ return !!flow->pagesets[0].mapped;
+}
+
+/*
+ * Get pages pointers and identify contiguous physical memory chunks for a
+ * segment. All segments are of length flow->req->seg_len.
+ */
+static int kern_get_phys_blocks(struct tid_rdma_flow *flow,
+ struct page **pages,
+ struct rvt_sge_state *ss, bool *last)
+{
+ u8 npages;
+
+ /* Reuse previously computed pagesets, if any */
+ if (flow->npagesets) {
+ trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head,
+ flow);
+ if (!dma_mapped(flow))
+ return dma_map_flow(flow, pages);
+ return 0;
+ }
+
+ npages = kern_find_pages(flow, pages, ss, last);
+
+ if (flow->req->qp->pmtu == enum_to_mtu(OPA_MTU_4096))
+ flow->npagesets =
+ tid_rdma_find_phys_blocks_4k(flow, pages, npages,
+ flow->pagesets);
+ else
+ flow->npagesets =
+ tid_rdma_find_phys_blocks_8k(flow, pages, npages,
+ flow->pagesets);
+
+ return dma_map_flow(flow, pages);
+}
+
+static inline void kern_add_tid_node(struct tid_rdma_flow *flow,
+ struct hfi1_ctxtdata *rcd, char *s,
+ struct tid_group *grp, u8 cnt)
+{
+ struct kern_tid_node *node = &flow->tnode[flow->tnode_cnt++];
+
+ WARN_ON_ONCE(flow->tnode_cnt >=
+ (TID_RDMA_MAX_SEGMENT_SIZE >> PAGE_SHIFT));
+ if (WARN_ON_ONCE(cnt & 1))
+ dd_dev_err(rcd->dd,
+ "unexpected odd allocation cnt %u map 0x%x used %u",
+ cnt, grp->map, grp->used);
+
+ node->grp = grp;
+ node->map = grp->map;
+ node->cnt = cnt;
+ trace_hfi1_tid_node_add(flow->req->qp, s, flow->tnode_cnt - 1,
+ grp->base, grp->map, grp->used, cnt);
+}
+
+/*
+ * Try to allocate pageset_count TID's from TID groups for a context
+ *
+ * This function allocates TID's without moving groups between lists or
+ * modifying grp->map. This is done as follows, being cogizant of the lists
+ * between which the TID groups will move:
+ * 1. First allocate complete groups of 8 TID's since this is more efficient,
+ * these groups will move from group->full without affecting used
+ * 2. If more TID's are needed allocate from used (will move from used->full or
+ * stay in used)
+ * 3. If we still don't have the required number of TID's go back and look again
+ * at a complete group (will move from group->used)
+ */
+static int kern_alloc_tids(struct tid_rdma_flow *flow)
+{
+ struct hfi1_ctxtdata *rcd = flow->req->rcd;
+ struct hfi1_devdata *dd = rcd->dd;
+ u32 ngroups, pageidx = 0;
+ struct tid_group *group = NULL, *used;
+ u8 use;
+
+ flow->tnode_cnt = 0;
+ ngroups = flow->npagesets / dd->rcv_entries.group_size;
+ if (!ngroups)
+ goto used_list;
+
+ /* First look at complete groups */
+ list_for_each_entry(group, &rcd->tid_group_list.list, list) {
+ kern_add_tid_node(flow, rcd, "complete groups", group,
+ group->size);
+
+ pageidx += group->size;
+ if (!--ngroups)
+ break;
+ }
+
+ if (pageidx >= flow->npagesets)
+ goto ok;
+
+used_list:
+ /* Now look at partially used groups */
+ list_for_each_entry(used, &rcd->tid_used_list.list, list) {
+ use = min_t(u32, flow->npagesets - pageidx,
+ used->size - used->used);
+ kern_add_tid_node(flow, rcd, "used groups", used, use);
+
+ pageidx += use;
+ if (pageidx >= flow->npagesets)
+ goto ok;
+ }
+
+ /*
+ * Look again at a complete group, continuing from where we left.
+ * However, if we are at the head, we have reached the end of the
+ * complete groups list from the first loop above
+ */
+ if (group && &group->list == &rcd->tid_group_list.list)
+ goto bail_eagain;
+ group = list_prepare_entry(group, &rcd->tid_group_list.list,
+ list);
+ if (list_is_last(&group->list, &rcd->tid_group_list.list))
+ goto bail_eagain;
+ group = list_next_entry(group, list);
+ use = min_t(u32, flow->npagesets - pageidx, group->size);
+ kern_add_tid_node(flow, rcd, "complete continue", group, use);
+ pageidx += use;
+ if (pageidx >= flow->npagesets)
+ goto ok;
+bail_eagain:
+ trace_hfi1_msg_alloc_tids(flow->req->qp, " insufficient tids: needed ",
+ (u64)flow->npagesets);
+ return -EAGAIN;
+ok:
+ return 0;
+}
+
+static void kern_program_rcv_group(struct tid_rdma_flow *flow, int grp_num,
+ u32 *pset_idx)
+{
+ struct hfi1_ctxtdata *rcd = flow->req->rcd;
+ struct hfi1_devdata *dd = rcd->dd;
+ struct kern_tid_node *node = &flow->tnode[grp_num];
+ struct tid_group *grp = node->grp;
+ struct tid_rdma_pageset *pset;
+ u32 pmtu_pg = flow->req->qp->pmtu >> PAGE_SHIFT;
+ u32 rcventry, npages = 0, pair = 0, tidctrl;
+ u8 i, cnt = 0;
+
+ for (i = 0; i < grp->size; i++) {
+ rcventry = grp->base + i;
+
+ if (node->map & BIT(i) || cnt >= node->cnt) {
+ rcv_array_wc_fill(dd, rcventry);
+ continue;
+ }
+ pset = &flow->pagesets[(*pset_idx)++];
+ if (pset->count) {
+ hfi1_put_tid(dd, rcventry, PT_EXPECTED,
+ pset->addr, trdma_pset_order(pset));
+ } else {
+ hfi1_put_tid(dd, rcventry, PT_INVALID, 0, 0);
+ }
+ npages += pset->count;
+
+ rcventry -= rcd->expected_base;
+ tidctrl = pair ? 0x3 : rcventry & 0x1 ? 0x2 : 0x1;
+ /*
+ * A single TID entry will be used to use a rcvarr pair (with
+ * tidctrl 0x3), if ALL these are true (a) the bit pos is even
+ * (b) the group map shows current and the next bits as free
+ * indicating two consecutive rcvarry entries are available (c)
+ * we actually need 2 more entries
+ */
+ pair = !(i & 0x1) && !((node->map >> i) & 0x3) &&
+ node->cnt >= cnt + 2;
+ if (!pair) {
+ if (!pset->count)
+ tidctrl = 0x1;
+ flow->tid_entry[flow->tidcnt++] =
+ EXP_TID_SET(IDX, rcventry >> 1) |
+ EXP_TID_SET(CTRL, tidctrl) |
+ EXP_TID_SET(LEN, npages);
+ trace_hfi1_tid_entry_alloc(/* entry */
+ flow->req->qp, flow->tidcnt - 1,
+ flow->tid_entry[flow->tidcnt - 1]);
+
+ /* Efficient DIV_ROUND_UP(npages, pmtu_pg) */
+ flow->npkts += (npages + pmtu_pg - 1) >> ilog2(pmtu_pg);
+ npages = 0;
+ }
+
+ if (grp->used == grp->size - 1)
+ tid_group_move(grp, &rcd->tid_used_list,
+ &rcd->tid_full_list);
+ else if (!grp->used)
+ tid_group_move(grp, &rcd->tid_group_list,
+ &rcd->tid_used_list);
+
+ grp->used++;
+ grp->map |= BIT(i);
+ cnt++;
+ }
+}
+
+static void kern_unprogram_rcv_group(struct tid_rdma_flow *flow, int grp_num)
+{
+ struct hfi1_ctxtdata *rcd = flow->req->rcd;
+ struct hfi1_devdata *dd = rcd->dd;
+ struct kern_tid_node *node = &flow->tnode[grp_num];
+ struct tid_group *grp = node->grp;
+ u32 rcventry;
+ u8 i, cnt = 0;
+
+ for (i = 0; i < grp->size; i++) {
+ rcventry = grp->base + i;
+
+ if (node->map & BIT(i) || cnt >= node->cnt) {
+ rcv_array_wc_fill(dd, rcventry);
+ continue;
+ }
+
+ hfi1_put_tid(dd, rcventry, PT_INVALID, 0, 0);
+
+ grp->used--;
+ grp->map &= ~BIT(i);
+ cnt++;
+
+ if (grp->used == grp->size - 1)
+ tid_group_move(grp, &rcd->tid_full_list,
+ &rcd->tid_used_list);
+ else if (!grp->used)
+ tid_group_move(grp, &rcd->tid_used_list,
+ &rcd->tid_group_list);
+ }
+ if (WARN_ON_ONCE(cnt & 1)) {
+ struct hfi1_ctxtdata *rcd = flow->req->rcd;
+ struct hfi1_devdata *dd = rcd->dd;
+
+ dd_dev_err(dd, "unexpected odd free cnt %u map 0x%x used %u",
+ cnt, grp->map, grp->used);
+ }
+}
+
+static void kern_program_rcvarray(struct tid_rdma_flow *flow)
+{
+ u32 pset_idx = 0;
+ int i;
+
+ flow->npkts = 0;
+ flow->tidcnt = 0;
+ for (i = 0; i < flow->tnode_cnt; i++)
+ kern_program_rcv_group(flow, i, &pset_idx);
+ trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head, flow);
+}
+
+/**
+ * hfi1_kern_exp_rcv_setup() - setup TID's and flow for one segment of a
+ * TID RDMA request
+ *
+ * @req: TID RDMA request for which the segment/flow is being set up
+ * @ss: sge state, maintains state across successive segments of a sge
+ * @last: set to true after the last sge segment has been processed
+ *
+ * This function
+ * (1) finds a free flow entry in the flow circular buffer
+ * (2) finds pages and continuous physical chunks constituing one segment
+ * of an sge
+ * (3) allocates TID group entries for those chunks
+ * (4) programs rcvarray entries in the hardware corresponding to those
+ * TID's
+ * (5) computes a tidarray with formatted TID entries which can be sent
+ * to the sender
+ * (6) Reserves and programs HW flows.
+ * (7) It also manages queing the QP when TID/flow resources are not
+ * available.
+ *
+ * @req points to struct tid_rdma_request of which the segments are a part. The
+ * function uses qp, rcd and seg_len members of @req. In the absence of errors,
+ * req->flow_idx is the index of the flow which has been prepared in this
+ * invocation of function call. With flow = &req->flows[req->flow_idx],
+ * flow->tid_entry contains the TID array which the sender can use for TID RDMA
+ * sends and flow->npkts contains number of packets required to send the
+ * segment.
+ *
+ * hfi1_check_sge_align should be called prior to calling this function and if
+ * it signals error TID RDMA cannot be used for this sge and this function
+ * should not be called.
+ *
+ * For the queuing, caller must hold the flow->req->qp s_lock from the send
+ * engine and the function will procure the exp_lock.
+ *
+ * Return:
+ * The function returns -EAGAIN if sufficient number of TID/flow resources to
+ * map the segment could not be allocated. In this case the function should be
+ * called again with previous arguments to retry the TID allocation. There are
+ * no other error returns. The function returns 0 on success.
+ */
+int hfi1_kern_exp_rcv_setup(struct tid_rdma_request *req,
+ struct rvt_sge_state *ss, bool *last)
+ __must_hold(&req->qp->s_lock)
+{
+ struct tid_rdma_flow *flow = &req->flows[req->setup_head];
+ struct hfi1_ctxtdata *rcd = req->rcd;
+ struct hfi1_qp_priv *qpriv = req->qp->priv;
+ unsigned long flags;
+ struct rvt_qp *fqp;
+ u16 clear_tail = req->clear_tail;
+
+ lockdep_assert_held(&req->qp->s_lock);
+ /*
+ * We return error if either (a) we don't have space in the flow
+ * circular buffer, or (b) we already have max entries in the buffer.
+ * Max entries depend on the type of request we are processing and the
+ * negotiated TID RDMA parameters.
+ */
+ if (!CIRC_SPACE(req->setup_head, clear_tail, MAX_FLOWS) ||
+ CIRC_CNT(req->setup_head, clear_tail, MAX_FLOWS) >=
+ req->n_flows)
+ return -EINVAL;
+
+ /*
+ * Get pages, identify contiguous physical memory chunks for the segment
+ * If we can not determine a DMA address mapping we will treat it just
+ * like if we ran out of space above.
+ */
+ if (kern_get_phys_blocks(flow, qpriv->pages, ss, last)) {
+ hfi1_wait_kmem(flow->req->qp);
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&rcd->exp_lock, flags);
+ if (kernel_tid_waiters(rcd, &rcd->rarr_queue, flow->req->qp))
+ goto queue;
+
+ /*
+ * At this point we know the number of pagesets and hence the number of
+ * TID's to map the segment. Allocate the TID's from the TID groups. If
+ * we cannot allocate the required number we exit and try again later
+ */
+ if (kern_alloc_tids(flow))
+ goto queue;
+ /*
+ * Finally program the TID entries with the pagesets, compute the
+ * tidarray and enable the HW flow
+ */
+ kern_program_rcvarray(flow);
+
+ /*
+ * Setup the flow state with relevant information.
+ * This information is used for tracking the sequence of data packets
+ * for the segment.
+ * The flow is setup here as this is the most accurate time and place
+ * to do so. Doing at a later time runs the risk of the flow data in
+ * qpriv getting out of sync.
+ */
+ memset(&flow->flow_state, 0x0, sizeof(flow->flow_state));
+ flow->idx = qpriv->flow_state.index;
+ flow->flow_state.generation = qpriv->flow_state.generation;
+ flow->flow_state.spsn = qpriv->flow_state.psn;
+ flow->flow_state.lpsn = flow->flow_state.spsn + flow->npkts - 1;
+ flow->flow_state.r_next_psn =
+ full_flow_psn(flow, flow->flow_state.spsn);
+ qpriv->flow_state.psn += flow->npkts;
+
+ dequeue_tid_waiter(rcd, &rcd->rarr_queue, flow->req->qp);
+ /* get head before dropping lock */
+ fqp = first_qp(rcd, &rcd->rarr_queue);
+ spin_unlock_irqrestore(&rcd->exp_lock, flags);
+ tid_rdma_schedule_tid_wakeup(fqp);
+
+ req->setup_head = (req->setup_head + 1) & (MAX_FLOWS - 1);
+ return 0;
+queue:
+ queue_qp_for_tid_wait(rcd, &rcd->rarr_queue, flow->req->qp);
+ spin_unlock_irqrestore(&rcd->exp_lock, flags);
+ return -EAGAIN;
+}
+
+static void hfi1_tid_rdma_reset_flow(struct tid_rdma_flow *flow)
+{
+ flow->npagesets = 0;
+}
+
+/*
+ * This function is called after one segment has been successfully sent to
+ * release the flow and TID HW/SW resources for that segment. The segments for a
+ * TID RDMA request are setup and cleared in FIFO order which is managed using a
+ * circular buffer.
+ */
+int hfi1_kern_exp_rcv_clear(struct tid_rdma_request *req)
+ __must_hold(&req->qp->s_lock)
+{
+ struct tid_rdma_flow *flow = &req->flows[req->clear_tail];
+ struct hfi1_ctxtdata *rcd = req->rcd;
+ unsigned long flags;
+ int i;
+ struct rvt_qp *fqp;
+
+ lockdep_assert_held(&req->qp->s_lock);
+ /* Exit if we have nothing in the flow circular buffer */
+ if (!CIRC_CNT(req->setup_head, req->clear_tail, MAX_FLOWS))
+ return -EINVAL;
+
+ spin_lock_irqsave(&rcd->exp_lock, flags);
+
+ for (i = 0; i < flow->tnode_cnt; i++)
+ kern_unprogram_rcv_group(flow, i);
+ /* To prevent double unprogramming */
+ flow->tnode_cnt = 0;
+ /* get head before dropping lock */
+ fqp = first_qp(rcd, &rcd->rarr_queue);
+ spin_unlock_irqrestore(&rcd->exp_lock, flags);
+
+ dma_unmap_flow(flow);
+
+ hfi1_tid_rdma_reset_flow(flow);
+ req->clear_tail = (req->clear_tail + 1) & (MAX_FLOWS - 1);
+
+ if (fqp == req->qp) {
+ __trigger_tid_waiter(fqp);
+ rvt_put_qp(fqp);
+ } else {
+ tid_rdma_schedule_tid_wakeup(fqp);
+ }
+
+ return 0;
+}
+
+/*
+ * This function is called to release all the tid entries for
+ * a request.
+ */
+void hfi1_kern_exp_rcv_clear_all(struct tid_rdma_request *req)
+ __must_hold(&req->qp->s_lock)
+{
+ /* Use memory barrier for proper ordering */
+ while (CIRC_CNT(req->setup_head, req->clear_tail, MAX_FLOWS)) {
+ if (hfi1_kern_exp_rcv_clear(req))
+ break;
+ }
+}
+
+/**
+ * hfi1_kern_exp_rcv_free_flows - free priviously allocated flow information
+ * @req - the tid rdma request to be cleaned
+ */
+static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req)
+{
+ kfree(req->flows);
+ req->flows = NULL;
+}
+
+/**
+ * __trdma_clean_swqe - clean up for large sized QPs
+ * @qp: the queue patch
+ * @wqe: the send wqe
+ */
+void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
+{
+ struct hfi1_swqe_priv *p = wqe->priv;
+
+ hfi1_kern_exp_rcv_free_flows(&p->tid_req);
+}
+
+/*
+ * This can be called at QP create time or in the data path.
+ */
+static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
+ gfp_t gfp)
+{
+ struct tid_rdma_flow *flows;
+ int i;
+
+ if (likely(req->flows))
+ return 0;
+ flows = kmalloc_node(MAX_FLOWS * sizeof(*flows), gfp,
+ req->rcd->numa_id);
+ if (!flows)
+ return -ENOMEM;
+ /* mini init */
+ for (i = 0; i < MAX_FLOWS; i++) {
+ flows[i].req = req;
+ flows[i].npagesets = 0;
+ flows[i].pagesets[0].mapped = 0;
+ }
+ req->flows = flows;
+ return 0;
+}
+
+static void hfi1_init_trdma_req(struct rvt_qp *qp,
+ struct tid_rdma_request *req)
+{
+ struct hfi1_qp_priv *qpriv = qp->priv;
+
+ /*
+ * Initialize various TID RDMA request variables.
+ * These variables are "static", which is why they
+ * can be pre-initialized here before the WRs has
+ * even been submitted.
+ * However, non-NULL values for these variables do not
+ * imply that this WQE has been enabled for TID RDMA.
+ * Drivers should check the WQE's opcode to determine
+ * if a request is a TID RDMA one or not.
+ */
+ req->qp = qp;
+ req->rcd = qpriv->rcd;
+}
+
+u64 hfi1_access_sw_tid_wait(const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = context;
+
+ return dd->verbs_dev.n_tidwait;
+}
+
+static struct tid_rdma_flow *find_flow_ib(struct tid_rdma_request *req,
+ u32 psn, u16 *fidx)
+{
+ u16 head, tail;
+ struct tid_rdma_flow *flow;
+
+ head = req->setup_head;
+ tail = req->clear_tail;
+ for ( ; CIRC_CNT(head, tail, MAX_FLOWS);
+ tail = CIRC_NEXT(tail, MAX_FLOWS)) {
+ flow = &req->flows[tail];
+ if (cmp_psn(psn, flow->flow_state.ib_spsn) >= 0 &&
+ cmp_psn(psn, flow->flow_state.ib_lpsn) <= 0) {
+ if (fidx)
+ *fidx = tail;
+ return flow;
+ }
+ }
+ return NULL;
+}
+
+static struct tid_rdma_flow *
+__find_flow_ranged(struct tid_rdma_request *req, u16 head, u16 tail,
+ u32 psn, u16 *fidx)
+{
+ for ( ; CIRC_CNT(head, tail, MAX_FLOWS);
+ tail = CIRC_NEXT(tail, MAX_FLOWS)) {
+ struct tid_rdma_flow *flow = &req->flows[tail];
+ u32 spsn, lpsn;
+
+ spsn = full_flow_psn(flow, flow->flow_state.spsn);
+ lpsn = full_flow_psn(flow, flow->flow_state.lpsn);
+
+ if (cmp_psn(psn, spsn) >= 0 && cmp_psn(psn, lpsn) <= 0) {
+ if (fidx)
+ *fidx = tail;
+ return flow;
+ }
+ }
+ return NULL;
+}
+
+static struct tid_rdma_flow *find_flow(struct tid_rdma_request *req,
+ u32 psn, u16 *fidx)
+{
+ return __find_flow_ranged(req, req->setup_head, req->clear_tail, psn,
+ fidx);
+}
+
+/* TID RDMA READ functions */
+u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe,
+ struct ib_other_headers *ohdr, u32 *bth1,
+ u32 *bth2, u32 *len)
+{
+ struct tid_rdma_request *req = wqe_to_tid_req(wqe);
+ struct tid_rdma_flow *flow = &req->flows[req->flow_idx];
+ struct rvt_qp *qp = req->qp;
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct hfi1_swqe_priv *wpriv = wqe->priv;
+ struct tid_rdma_read_req *rreq = &ohdr->u.tid_rdma.r_req;
+ struct tid_rdma_params *remote;
+ u32 req_len = 0;
+ void *req_addr = NULL;
+
+ /* This is the IB psn used to send the request */
+ *bth2 = mask_psn(flow->flow_state.ib_spsn + flow->pkt);
+ trace_hfi1_tid_flow_build_read_pkt(qp, req->flow_idx, flow);
+
+ /* TID Entries for TID RDMA READ payload */
+ req_addr = &flow->tid_entry[flow->tid_idx];
+ req_len = sizeof(*flow->tid_entry) *
+ (flow->tidcnt - flow->tid_idx);
+
+ memset(&ohdr->u.tid_rdma.r_req, 0, sizeof(ohdr->u.tid_rdma.r_req));
+ wpriv->ss.sge.vaddr = req_addr;
+ wpriv->ss.sge.sge_length = req_len;
+ wpriv->ss.sge.length = wpriv->ss.sge.sge_length;
+ /*
+ * We can safely zero these out. Since the first SGE covers the
+ * entire packet, nothing else should even look at the MR.
+ */
+ wpriv->ss.sge.mr = NULL;
+ wpriv->ss.sge.m = 0;
+ wpriv->ss.sge.n = 0;
+
+ wpriv->ss.sg_list = NULL;
+ wpriv->ss.total_len = wpriv->ss.sge.sge_length;
+ wpriv->ss.num_sge = 1;
+
+ /* Construct the TID RDMA READ REQ packet header */
+ rcu_read_lock();
+ remote = rcu_dereference(qpriv->tid_rdma.remote);
+
+ KDETH_RESET(rreq->kdeth0, KVER, 0x1);
+ KDETH_RESET(rreq->kdeth1, JKEY, remote->jkey);
+ rreq->reth.vaddr = cpu_to_be64(wqe->rdma_wr.remote_addr +
+ req->cur_seg * req->seg_len + flow->sent);
+ rreq->reth.rkey = cpu_to_be32(wqe->rdma_wr.rkey);
+ rreq->reth.length = cpu_to_be32(*len);
+ rreq->tid_flow_psn =
+ cpu_to_be32((flow->flow_state.generation <<
+ HFI1_KDETH_BTH_SEQ_SHIFT) |
+ ((flow->flow_state.spsn + flow->pkt) &
+ HFI1_KDETH_BTH_SEQ_MASK));
+ rreq->tid_flow_qp =
+ cpu_to_be32(qpriv->tid_rdma.local.qp |
+ ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) <<
+ TID_RDMA_DESTQP_FLOW_SHIFT) |
+ qpriv->rcd->ctxt);
+ rreq->verbs_qp = cpu_to_be32(qp->remote_qpn);
+ *bth1 &= ~RVT_QPN_MASK;
+ *bth1 |= remote->qp;
+ *bth2 |= IB_BTH_REQ_ACK;
+ rcu_read_unlock();
+
+ /* We are done with this segment */
+ flow->sent += *len;
+ req->cur_seg++;
+ qp->s_state = TID_OP(READ_REQ);
+ req->ack_pending++;
+ req->flow_idx = (req->flow_idx + 1) & (MAX_FLOWS - 1);
+ qpriv->pending_tid_r_segs++;
+ qp->s_num_rd_atomic++;
+
+ /* Set the TID RDMA READ request payload size */
+ *len = req_len;
+
+ return sizeof(ohdr->u.tid_rdma.r_req) / sizeof(u32);
+}
+
+/*
+ * @len: contains the data length to read upon entry and the read request
+ * payload length upon exit.
+ */
+u32 hfi1_build_tid_rdma_read_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ struct ib_other_headers *ohdr, u32 *bth1,
+ u32 *bth2, u32 *len)
+ __must_hold(&qp->s_lock)
+{
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct tid_rdma_request *req = wqe_to_tid_req(wqe);
+ struct tid_rdma_flow *flow = NULL;
+ u32 hdwords = 0;
+ bool last;
+ bool retry = true;
+ u32 npkts = rvt_div_round_up_mtu(qp, *len);
+
+ trace_hfi1_tid_req_build_read_req(qp, 0, wqe->wr.opcode, wqe->psn,
+ wqe->lpsn, req);
+ /*
+ * Check sync conditions. Make sure that there are no pending
+ * segments before freeing the flow.
+ */
+sync_check:
+ if (req->state == TID_REQUEST_SYNC) {
+ if (qpriv->pending_tid_r_segs)
+ goto done;
+
+ hfi1_kern_clear_hw_flow(req->rcd, qp);
+ req->state = TID_REQUEST_ACTIVE;
+ }
+
+ /*
+ * If the request for this segment is resent, the tid resources should
+ * have been allocated before. In this case, req->flow_idx should
+ * fall behind req->setup_head.
+ */
+ if (req->flow_idx == req->setup_head) {
+ retry = false;
+ if (req->state == TID_REQUEST_RESEND) {
+ /*
+ * This is the first new segment for a request whose
+ * earlier segments have been re-sent. We need to
+ * set up the sge pointer correctly.
+ */
+ restart_sge(&qp->s_sge, wqe, req->s_next_psn,
+ qp->pmtu);
+ req->isge = 0;
+ req->state = TID_REQUEST_ACTIVE;
+ }
+
+ /*
+ * Check sync. The last PSN of each generation is reserved for
+ * RESYNC.
+ */
+ if ((qpriv->flow_state.psn + npkts) > MAX_TID_FLOW_PSN - 1) {
+ req->state = TID_REQUEST_SYNC;
+ goto sync_check;
+ }
+
+ /* Allocate the flow if not yet */
+ if (hfi1_kern_setup_hw_flow(qpriv->rcd, qp))
+ goto done;
+
+ /*
+ * The following call will advance req->setup_head after
+ * allocating the tid entries.
+ */
+ if (hfi1_kern_exp_rcv_setup(req, &qp->s_sge, &last)) {
+ req->state = TID_REQUEST_QUEUED;
+
+ /*
+ * We don't have resources for this segment. The QP has
+ * already been queued.
+ */
+ goto done;
+ }
+ }
+
+ /* req->flow_idx should only be one slot behind req->setup_head */
+ flow = &req->flows[req->flow_idx];
+ flow->pkt = 0;
+ flow->tid_idx = 0;
+ flow->sent = 0;
+ if (!retry) {
+ /* Set the first and last IB PSN for the flow in use.*/
+ flow->flow_state.ib_spsn = req->s_next_psn;
+ flow->flow_state.ib_lpsn =
+ flow->flow_state.ib_spsn + flow->npkts - 1;
+ }
+
+ /* Calculate the next segment start psn.*/
+ req->s_next_psn += flow->npkts;
+
+ /* Build the packet header */
+ hdwords = hfi1_build_tid_rdma_read_packet(wqe, ohdr, bth1, bth2, len);
+done:
+ return hdwords;
+}
+
+/*
+ * Validate and accept the TID RDMA READ request parameters.
+ * Return 0 if the request is accepted successfully;
+ * Return 1 otherwise.
+ */
+static int tid_rdma_rcv_read_request(struct rvt_qp *qp,
+ struct rvt_ack_entry *e,
+ struct hfi1_packet *packet,
+ struct ib_other_headers *ohdr,
+ u32 bth0, u32 psn, u64 vaddr, u32 len)
+{
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct tid_rdma_request *req;
+ struct tid_rdma_flow *flow;
+ u32 flow_psn, i, tidlen = 0, pktlen, tlen;
+
+ req = ack_to_tid_req(e);
+
+ /* Validate the payload first */
+ flow = &req->flows[req->setup_head];
+
+ /* payload length = packet length - (header length + ICRC length) */
+ pktlen = packet->tlen - (packet->hlen + 4);
+ if (pktlen > sizeof(flow->tid_entry))
+ return 1;
+ memcpy(flow->tid_entry, packet->ebuf, pktlen);
+ flow->tidcnt = pktlen / sizeof(*flow->tid_entry);
+
+ /*
+ * Walk the TID_ENTRY list to make sure we have enough space for a
+ * complete segment. Also calculate the number of required packets.
+ */
+ flow->npkts = rvt_div_round_up_mtu(qp, len);
+ for (i = 0; i < flow->tidcnt; i++) {
+ trace_hfi1_tid_entry_rcv_read_req(qp, i,
+ flow->tid_entry[i]);
+ tlen = EXP_TID_GET(flow->tid_entry[i], LEN);
+ if (!tlen)
+ return 1;
+
+ /*
+ * For tid pair (tidctr == 3), the buffer size of the pair
+ * should be the sum of the buffer size described by each
+ * tid entry. However, only the first entry needs to be
+ * specified in the request (see WFR HAS Section 8.5.7.1).
+ */
+ tidlen += tlen;
+ }
+ if (tidlen * PAGE_SIZE < len)
+ return 1;
+
+ /* Empty the flow array */
+ req->clear_tail = req->setup_head;
+ flow->pkt = 0;
+ flow->tid_idx = 0;
+ flow->tid_offset = 0;
+ flow->sent = 0;
+ flow->tid_qpn = be32_to_cpu(ohdr->u.tid_rdma.r_req.tid_flow_qp);
+ flow->idx = (flow->tid_qpn >> TID_RDMA_DESTQP_FLOW_SHIFT) &
+ TID_RDMA_DESTQP_FLOW_MASK;
+ flow_psn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_req.tid_flow_psn));
+ flow->flow_state.generation = flow_psn >> HFI1_KDETH_BTH_SEQ_SHIFT;
+ flow->flow_state.spsn = flow_psn & HFI1_KDETH_BTH_SEQ_MASK;
+ flow->length = len;
+
+ flow->flow_state.lpsn = flow->flow_state.spsn +
+ flow->npkts - 1;
+ flow->flow_state.ib_spsn = psn;
+ flow->flow_state.ib_lpsn = flow->flow_state.ib_spsn + flow->npkts - 1;
+
+ trace_hfi1_tid_flow_rcv_read_req(qp, req->setup_head, flow);
+ /* Set the initial flow index to the current flow. */
+ req->flow_idx = req->setup_head;
+
+ /* advance circular buffer head */
+ req->setup_head = (req->setup_head + 1) & (MAX_FLOWS - 1);
+
+ /*
+ * Compute last PSN for request.
+ */
+ e->opcode = (bth0 >> 24) & 0xff;
+ e->psn = psn;
+ e->lpsn = psn + flow->npkts - 1;
+ e->sent = 0;
+
+ req->n_flows = qpriv->tid_rdma.local.max_read;
+ req->state = TID_REQUEST_ACTIVE;
+ req->cur_seg = 0;
+ req->comp_seg = 0;
+ req->ack_seg = 0;
+ req->isge = 0;
+ req->seg_len = qpriv->tid_rdma.local.max_len;
+ req->total_len = len;
+ req->total_segs = 1;
+ req->r_flow_psn = e->psn;
+
+ trace_hfi1_tid_req_rcv_read_req(qp, 0, e->opcode, e->psn, e->lpsn,
+ req);
+ return 0;
+}
+
+static int tid_rdma_rcv_error(struct hfi1_packet *packet,
+ struct ib_other_headers *ohdr,
+ struct rvt_qp *qp, u32 psn, int diff)
+{
+ struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct hfi1_ctxtdata *rcd = ((struct hfi1_qp_priv *)qp->priv)->rcd;
+ struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct rvt_ack_entry *e;
+ struct tid_rdma_request *req;
+ unsigned long flags;
+ u8 prev;
+ bool old_req;
+
+ trace_hfi1_rsp_tid_rcv_error(qp, psn);
+ trace_hfi1_tid_rdma_rcv_err(qp, 0, psn, diff);
+ if (diff > 0) {
+ /* sequence error */
+ if (!qp->r_nak_state) {
+ ibp->rvp.n_rc_seqnak++;
+ qp->r_nak_state = IB_NAK_PSN_ERROR;
+ qp->r_ack_psn = qp->r_psn;
+ rc_defered_ack(rcd, qp);
+ }
+ goto done;
+ }
+
+ ibp->rvp.n_rc_dupreq++;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ e = find_prev_entry(qp, psn, &prev, NULL, &old_req);
+ if (!e || (e->opcode != TID_OP(READ_REQ) &&
+ e->opcode != TID_OP(WRITE_REQ)))
+ goto unlock;
+
+ req = ack_to_tid_req(e);
+ req->r_flow_psn = psn;
+ trace_hfi1_tid_req_rcv_err(qp, 0, e->opcode, e->psn, e->lpsn, req);
+ if (e->opcode == TID_OP(READ_REQ)) {
+ struct ib_reth *reth;
+ u32 offset;
+ u32 len;
+ u32 rkey;
+ u64 vaddr;
+ int ok;
+ u32 bth0;
+
+ reth = &ohdr->u.tid_rdma.r_req.reth;
+ /*
+ * The requester always restarts from the start of the original
+ * request.
+ */
+ offset = delta_psn(psn, e->psn) * qp->pmtu;
+ len = be32_to_cpu(reth->length);
+ if (psn != e->psn || len != req->total_len)
+ goto unlock;
+
+ if (e->rdma_sge.mr) {
+ rvt_put_mr(e->rdma_sge.mr);
+ e->rdma_sge.mr = NULL;
+ }
+
+ rkey = be32_to_cpu(reth->rkey);
+ vaddr = get_ib_reth_vaddr(reth);
+
+ qp->r_len = len;
+ ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
+ IB_ACCESS_REMOTE_READ);
+ if (unlikely(!ok))
+ goto unlock;
+
+ /*
+ * If all the response packets for the current request have
+ * been sent out and this request is complete (old_request
+ * == false) and the TID flow may be unusable (the
+ * req->clear_tail is advanced). However, when an earlier
+ * request is received, this request will not be complete any
+ * more (qp->s_tail_ack_queue is moved back, see below).
+ * Consequently, we need to update the TID flow info everytime
+ * a duplicate request is received.
+ */
+ bth0 = be32_to_cpu(ohdr->bth[0]);
+ if (tid_rdma_rcv_read_request(qp, e, packet, ohdr, bth0, psn,
+ vaddr, len))
+ goto unlock;
+
+ /*
+ * True if the request is already scheduled (between
+ * qp->s_tail_ack_queue and qp->r_head_ack_queue);
+ */
+ if (old_req)
+ goto unlock;
+ } else {
+ struct flow_state *fstate;
+ bool schedule = false;
+ u8 i;
+
+ if (req->state == TID_REQUEST_RESEND) {
+ req->state = TID_REQUEST_RESEND_ACTIVE;
+ } else if (req->state == TID_REQUEST_INIT_RESEND) {
+ req->state = TID_REQUEST_INIT;
+ schedule = true;
+ }
+
+ /*
+ * True if the request is already scheduled (between
+ * qp->s_tail_ack_queue and qp->r_head_ack_queue).
+ * Also, don't change requests, which are at the SYNC
+ * point and haven't generated any responses yet.
+ * There is nothing to retransmit for them yet.
+ */
+ if (old_req || req->state == TID_REQUEST_INIT ||
+ (req->state == TID_REQUEST_SYNC && !req->cur_seg)) {
+ for (i = prev + 1; ; i++) {
+ if (i > rvt_size_atomic(&dev->rdi))
+ i = 0;
+ if (i == qp->r_head_ack_queue)
+ break;
+ e = &qp->s_ack_queue[i];
+ req = ack_to_tid_req(e);
+ if (e->opcode == TID_OP(WRITE_REQ) &&
+ req->state == TID_REQUEST_INIT)
+ req->state = TID_REQUEST_INIT_RESEND;
+ }
+ /*
+ * If the state of the request has been changed,
+ * the first leg needs to get scheduled in order to
+ * pick up the change. Otherwise, normal response
+ * processing should take care of it.
+ */
+ if (!schedule)
+ goto unlock;
+ }
+
+ /*
+ * If there is no more allocated segment, just schedule the qp
+ * without changing any state.
+ */
+ if (req->clear_tail == req->setup_head)
+ goto schedule;
+ /*
+ * If this request has sent responses for segments, which have
+ * not received data yet (flow_idx != clear_tail), the flow_idx
+ * pointer needs to be adjusted so the same responses can be
+ * re-sent.
+ */
+ if (CIRC_CNT(req->flow_idx, req->clear_tail, MAX_FLOWS)) {
+ fstate = &req->flows[req->clear_tail].flow_state;
+ qpriv->pending_tid_w_segs -=
+ CIRC_CNT(req->flow_idx, req->clear_tail,
+ MAX_FLOWS);
+ req->flow_idx =
+ CIRC_ADD(req->clear_tail,
+ delta_psn(psn, fstate->resp_ib_psn),
+ MAX_FLOWS);
+ qpriv->pending_tid_w_segs +=
+ delta_psn(psn, fstate->resp_ib_psn);
+ /*
+ * When flow_idx == setup_head, we've gotten a duplicate
+ * request for a segment, which has not been allocated
+ * yet. In that case, don't adjust this request.
+ * However, we still want to go through the loop below
+ * to adjust all subsequent requests.
+ */
+ if (CIRC_CNT(req->setup_head, req->flow_idx,
+ MAX_FLOWS)) {
+ req->cur_seg = delta_psn(psn, e->psn);
+ req->state = TID_REQUEST_RESEND_ACTIVE;
+ }
+ }
+
+ for (i = prev + 1; ; i++) {
+ /*
+ * Look at everything up to and including
+ * s_tail_ack_queue
+ */
+ if (i > rvt_size_atomic(&dev->rdi))
+ i = 0;
+ if (i == qp->r_head_ack_queue)
+ break;
+ e = &qp->s_ack_queue[i];
+ req = ack_to_tid_req(e);
+ trace_hfi1_tid_req_rcv_err(qp, 0, e->opcode, e->psn,
+ e->lpsn, req);
+ if (e->opcode != TID_OP(WRITE_REQ) ||
+ req->cur_seg == req->comp_seg ||
+ req->state == TID_REQUEST_INIT ||
+ req->state == TID_REQUEST_INIT_RESEND) {
+ if (req->state == TID_REQUEST_INIT)
+ req->state = TID_REQUEST_INIT_RESEND;
+ continue;
+ }
+ qpriv->pending_tid_w_segs -=
+ CIRC_CNT(req->flow_idx,
+ req->clear_tail,
+ MAX_FLOWS);
+ req->flow_idx = req->clear_tail;
+ req->state = TID_REQUEST_RESEND;
+ req->cur_seg = req->comp_seg;
+ }
+ qpriv->s_flags &= ~HFI1_R_TID_WAIT_INTERLCK;
+ }
+ /* Re-process old requests.*/
+ if (qp->s_acked_ack_queue == qp->s_tail_ack_queue)
+ qp->s_acked_ack_queue = prev;
+ qp->s_tail_ack_queue = prev;
+ /*
+ * Since the qp->s_tail_ack_queue is modified, the
+ * qp->s_ack_state must be changed to re-initialize
+ * qp->s_ack_rdma_sge; Otherwise, we will end up in
+ * wrong memory region.
+ */
+ qp->s_ack_state = OP(ACKNOWLEDGE);
+schedule:
+ /*
+ * It's possible to receive a retry psn that is earlier than an RNRNAK
+ * psn. In this case, the rnrnak state should be cleared.
+ */
+ if (qpriv->rnr_nak_state) {
+ qp->s_nak_state = 0;
+ qpriv->rnr_nak_state = TID_RNR_NAK_INIT;
+ qp->r_psn = e->lpsn + 1;
+ hfi1_tid_write_alloc_resources(qp, true);
+ }
+
+ qp->r_state = e->opcode;
+ qp->r_nak_state = 0;
+ qp->s_flags |= RVT_S_RESP_PENDING;
+ hfi1_schedule_send(qp);
+unlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+done:
+ return 1;
+}
+
+void hfi1_rc_rcv_tid_rdma_read_req(struct hfi1_packet *packet)
+{
+ /* HANDLER FOR TID RDMA READ REQUEST packet (Responder side)*/
+
+ /*
+ * 1. Verify TID RDMA READ REQ as per IB_OPCODE_RC_RDMA_READ
+ * (see hfi1_rc_rcv())
+ * 2. Put TID RDMA READ REQ into the response queueu (s_ack_queue)
+ * - Setup struct tid_rdma_req with request info
+ * - Initialize struct tid_rdma_flow info;
+ * - Copy TID entries;
+ * 3. Set the qp->s_ack_state.
+ * 4. Set RVT_S_RESP_PENDING in s_flags.
+ * 5. Kick the send engine (hfi1_schedule_send())
+ */
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+ struct rvt_qp *qp = packet->qp;
+ struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct ib_other_headers *ohdr = packet->ohdr;
+ struct rvt_ack_entry *e;
+ unsigned long flags;
+ struct ib_reth *reth;
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ u32 bth0, psn, len, rkey;
+ bool is_fecn;
+ u8 next;
+ u64 vaddr;
+ int diff;
+ u8 nack_state = IB_NAK_INVALID_REQUEST;
+
+ bth0 = be32_to_cpu(ohdr->bth[0]);
+ if (hfi1_ruc_check_hdr(ibp, packet))
+ return;
+
+ is_fecn = process_ecn(qp, packet);
+ psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
+ trace_hfi1_rsp_rcv_tid_read_req(qp, psn);
+
+ if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
+ rvt_comm_est(qp);
+
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
+ goto nack_inv;
+
+ reth = &ohdr->u.tid_rdma.r_req.reth;
+ vaddr = be64_to_cpu(reth->vaddr);
+ len = be32_to_cpu(reth->length);
+ /* The length needs to be in multiples of PAGE_SIZE */
+ if (!len || len & ~PAGE_MASK || len > qpriv->tid_rdma.local.max_len)
+ goto nack_inv;
+
+ diff = delta_psn(psn, qp->r_psn);
+ if (unlikely(diff)) {
+ if (tid_rdma_rcv_error(packet, ohdr, qp, psn, diff))
+ return;
+ goto send_ack;
+ }
+
+ /* We've verified the request, insert it into the ack queue. */
+ next = qp->r_head_ack_queue + 1;
+ if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
+ next = 0;
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (unlikely(next == qp->s_tail_ack_queue)) {
+ if (!qp->s_ack_queue[next].sent) {
+ nack_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
+ goto nack_inv_unlock;
+ }
+ update_ack_queue(qp, next);
+ }
+ e = &qp->s_ack_queue[qp->r_head_ack_queue];
+ if (e->rdma_sge.mr) {
+ rvt_put_mr(e->rdma_sge.mr);
+ e->rdma_sge.mr = NULL;
+ }
+
+ rkey = be32_to_cpu(reth->rkey);
+ qp->r_len = len;
+
+ if (unlikely(!rvt_rkey_ok(qp, &e->rdma_sge, qp->r_len, vaddr,
+ rkey, IB_ACCESS_REMOTE_READ)))
+ goto nack_acc;
+
+ /* Accept the request parameters */
+ if (tid_rdma_rcv_read_request(qp, e, packet, ohdr, bth0, psn, vaddr,
+ len))
+ goto nack_inv_unlock;
+
+ qp->r_state = e->opcode;
+ qp->r_nak_state = 0;
+ /*
+ * We need to increment the MSN here instead of when we
+ * finish sending the result since a duplicate request would
+ * increment it more than once.
+ */
+ qp->r_msn++;
+ qp->r_psn += e->lpsn - e->psn + 1;
+
+ qp->r_head_ack_queue = next;
+
+ /*
+ * For all requests other than TID WRITE which are added to the ack
+ * queue, qpriv->r_tid_alloc follows qp->r_head_ack_queue. It is ok to
+ * do this because of interlocks between these and TID WRITE
+ * requests. The same change has also been made in hfi1_rc_rcv().
+ */
+ qpriv->r_tid_alloc = qp->r_head_ack_queue;
+
+ /* Schedule the send tasklet. */
+ qp->s_flags |= RVT_S_RESP_PENDING;
+ hfi1_schedule_send(qp);
+
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ if (is_fecn)
+ goto send_ack;
+ return;
+
+nack_inv_unlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+nack_inv:
+ rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ qp->r_nak_state = nack_state;
+ qp->r_ack_psn = qp->r_psn;
+ /* Queue NAK for later */
+ rc_defered_ack(rcd, qp);
+ return;
+nack_acc:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
+ qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
+ qp->r_ack_psn = qp->r_psn;
+send_ack:
+ hfi1_send_rc_ack(packet, is_fecn);
+}
+
+u32 hfi1_build_tid_rdma_read_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
+ struct ib_other_headers *ohdr, u32 *bth0,
+ u32 *bth1, u32 *bth2, u32 *len, bool *last)
+{
+ struct hfi1_ack_priv *epriv = e->priv;
+ struct tid_rdma_request *req = &epriv->tid_req;
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct tid_rdma_flow *flow = &req->flows[req->clear_tail];
+ u32 tidentry = flow->tid_entry[flow->tid_idx];
+ u32 tidlen = EXP_TID_GET(tidentry, LEN) << PAGE_SHIFT;
+ struct tid_rdma_read_resp *resp = &ohdr->u.tid_rdma.r_rsp;
+ u32 next_offset, om = KDETH_OM_LARGE;
+ bool last_pkt;
+ u32 hdwords = 0;
+ struct tid_rdma_params *remote;
+
+ *len = min_t(u32, qp->pmtu, tidlen - flow->tid_offset);
+ flow->sent += *len;
+ next_offset = flow->tid_offset + *len;
+ last_pkt = (flow->sent >= flow->length);
+
+ trace_hfi1_tid_entry_build_read_resp(qp, flow->tid_idx, tidentry);
+ trace_hfi1_tid_flow_build_read_resp(qp, req->clear_tail, flow);
+
+ rcu_read_lock();
+ remote = rcu_dereference(qpriv->tid_rdma.remote);
+ if (!remote) {
+ rcu_read_unlock();
+ goto done;
+ }
+ KDETH_RESET(resp->kdeth0, KVER, 0x1);
+ KDETH_SET(resp->kdeth0, SH, !last_pkt);
+ KDETH_SET(resp->kdeth0, INTR, !!(!last_pkt && remote->urg));
+ KDETH_SET(resp->kdeth0, TIDCTRL, EXP_TID_GET(tidentry, CTRL));
+ KDETH_SET(resp->kdeth0, TID, EXP_TID_GET(tidentry, IDX));
+ KDETH_SET(resp->kdeth0, OM, om == KDETH_OM_LARGE);
+ KDETH_SET(resp->kdeth0, OFFSET, flow->tid_offset / om);
+ KDETH_RESET(resp->kdeth1, JKEY, remote->jkey);
+ resp->verbs_qp = cpu_to_be32(qp->remote_qpn);
+ rcu_read_unlock();
+
+ resp->aeth = rvt_compute_aeth(qp);
+ resp->verbs_psn = cpu_to_be32(mask_psn(flow->flow_state.ib_spsn +
+ flow->pkt));
+
+ *bth0 = TID_OP(READ_RESP) << 24;
+ *bth1 = flow->tid_qpn;
+ *bth2 = mask_psn(((flow->flow_state.spsn + flow->pkt++) &
+ HFI1_KDETH_BTH_SEQ_MASK) |
+ (flow->flow_state.generation <<
+ HFI1_KDETH_BTH_SEQ_SHIFT));
+ *last = last_pkt;
+ if (last_pkt)
+ /* Advance to next flow */
+ req->clear_tail = (req->clear_tail + 1) &
+ (MAX_FLOWS - 1);
+
+ if (next_offset >= tidlen) {
+ flow->tid_offset = 0;
+ flow->tid_idx++;
+ } else {
+ flow->tid_offset = next_offset;
+ }
+
+ hdwords = sizeof(ohdr->u.tid_rdma.r_rsp) / sizeof(u32);
+
+done:
+ return hdwords;
+}
+
+static inline struct tid_rdma_request *
+find_tid_request(struct rvt_qp *qp, u32 psn, enum ib_wr_opcode opcode)
+ __must_hold(&qp->s_lock)
+{
+ struct rvt_swqe *wqe;
+ struct tid_rdma_request *req = NULL;
+ u32 i, end;
+
+ end = qp->s_cur + 1;
+ if (end == qp->s_size)
+ end = 0;
+ for (i = qp->s_acked; i != end;) {
+ wqe = rvt_get_swqe_ptr(qp, i);
+ if (cmp_psn(psn, wqe->psn) >= 0 &&
+ cmp_psn(psn, wqe->lpsn) <= 0) {
+ if (wqe->wr.opcode == opcode)
+ req = wqe_to_tid_req(wqe);
+ break;
+ }
+ if (++i == qp->s_size)
+ i = 0;
+ }
+
+ return req;
+}
+
+void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet)
+{
+ /* HANDLER FOR TID RDMA READ RESPONSE packet (Requestor side */
+
+ /*
+ * 1. Find matching SWQE
+ * 2. Check that the entire segment has been read.
+ * 3. Remove HFI1_S_WAIT_TID_RESP from s_flags.
+ * 4. Free the TID flow resources.
+ * 5. Kick the send engine (hfi1_schedule_send())
+ */
+ struct ib_other_headers *ohdr = packet->ohdr;
+ struct rvt_qp *qp = packet->qp;
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+ struct tid_rdma_request *req;
+ struct tid_rdma_flow *flow;
+ u32 opcode, aeth;
+ bool is_fecn;
+ unsigned long flags;
+ u32 kpsn, ipsn;
+
+ trace_hfi1_sender_rcv_tid_read_resp(qp);
+ is_fecn = process_ecn(qp, packet);
+ kpsn = mask_psn(be32_to_cpu(ohdr->bth[2]));
+ aeth = be32_to_cpu(ohdr->u.tid_rdma.r_rsp.aeth);
+ opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ ipsn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn));
+ req = find_tid_request(qp, ipsn, IB_WR_TID_RDMA_READ);
+ if (unlikely(!req))
+ goto ack_op_err;
+
+ flow = &req->flows[req->clear_tail];
+ /* When header suppression is disabled */
+ if (cmp_psn(ipsn, flow->flow_state.ib_lpsn))
+ goto ack_done;
+ req->ack_pending--;
+ priv->pending_tid_r_segs--;
+ qp->s_num_rd_atomic--;
+ if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
+ !qp->s_num_rd_atomic) {
+ qp->s_flags &= ~(RVT_S_WAIT_FENCE |
+ RVT_S_WAIT_ACK);
+ hfi1_schedule_send(qp);
+ }
+ if (qp->s_flags & RVT_S_WAIT_RDMAR) {
+ qp->s_flags &= ~(RVT_S_WAIT_RDMAR | RVT_S_WAIT_ACK);
+ hfi1_schedule_send(qp);
+ }
+
+ trace_hfi1_ack(qp, ipsn);
+ trace_hfi1_tid_req_rcv_read_resp(qp, 0, req->e.swqe->wr.opcode,
+ req->e.swqe->psn, req->e.swqe->lpsn,
+ req);
+ trace_hfi1_tid_flow_rcv_read_resp(qp, req->clear_tail, flow);
+
+ /* Release the tid resources */
+ hfi1_kern_exp_rcv_clear(req);
+
+ if (!do_rc_ack(qp, aeth, ipsn, opcode, 0, rcd))
+ goto ack_done;
+
+ /* If not done yet, build next read request */
+ if (++req->comp_seg >= req->total_segs) {
+ priv->tid_r_comp++;
+ req->state = TID_REQUEST_COMPLETE;
+ }
+
+ /*
+ * Clear the hw flow under two conditions:
+ * 1. This request is a sync point and it is complete;
+ * 2. Current request is completed and there are no more requests.
+ */
+ if ((req->state == TID_REQUEST_SYNC &&
+ req->comp_seg == req->cur_seg) ||
+ priv->tid_r_comp == priv->tid_r_reqs) {
+ hfi1_kern_clear_hw_flow(priv->rcd, qp);
+ if (req->state == TID_REQUEST_SYNC)
+ req->state = TID_REQUEST_ACTIVE;
+ }
+
+ hfi1_schedule_send(qp);
+ goto ack_done;
+
+ack_op_err:
+ /*
+ * The test indicates that the send engine has finished its cleanup
+ * after sending the request and it's now safe to put the QP into error
+ * state. However, if the wqe queue is empty (qp->s_acked == qp->s_tail
+ * == qp->s_head), it would be unsafe to complete the wqe pointed by
+ * qp->s_acked here. Putting the qp into error state will safely flush
+ * all remaining requests.
+ */
+ if (qp->s_last == qp->s_acked)
+ rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+
+ack_done:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ if (is_fecn)
+ hfi1_send_rc_ack(packet, is_fecn);
+}
+
+void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp)
+ __must_hold(&qp->s_lock)
+{
+ u32 n = qp->s_acked;
+ struct rvt_swqe *wqe;
+ struct tid_rdma_request *req;
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ lockdep_assert_held(&qp->s_lock);
+ /* Free any TID entries */
+ while (n != qp->s_tail) {
+ wqe = rvt_get_swqe_ptr(qp, n);
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
+ req = wqe_to_tid_req(wqe);
+ hfi1_kern_exp_rcv_clear_all(req);
+ }
+
+ if (++n == qp->s_size)
+ n = 0;
+ }
+ /* Free flow */
+ hfi1_kern_clear_hw_flow(priv->rcd, qp);
+}
+
+static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd,
+ struct hfi1_packet *packet, u8 rcv_type,
+ u8 opcode)
+{
+ struct rvt_qp *qp = packet->qp;
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ u32 ipsn;
+ struct ib_other_headers *ohdr = packet->ohdr;
+ struct rvt_ack_entry *e;
+ struct tid_rdma_request *req;
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+ u32 i;
+
+ if (rcv_type >= RHF_RCV_TYPE_IB)
+ goto done;
+
+ spin_lock(&qp->s_lock);
+
+ /*
+ * We've ran out of space in the eager buffer.
+ * Eagerly received KDETH packets which require space in the
+ * Eager buffer (packet that have payload) are TID RDMA WRITE
+ * response packets. In this case, we have to re-transmit the
+ * TID RDMA WRITE request.
+ */
+ if (rcv_type == RHF_RCV_TYPE_EAGER) {
+ hfi1_restart_rc(qp, qp->s_last_psn + 1, 1);
+ hfi1_schedule_send(qp);
+ goto done_unlock;
+ }
+
+ /*
+ * For TID READ response, error out QP after freeing the tid
+ * resources.
+ */
+ if (opcode == TID_OP(READ_RESP)) {
+ ipsn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn));
+ if (cmp_psn(ipsn, qp->s_last_psn) > 0 &&
+ cmp_psn(ipsn, qp->s_psn) < 0) {
+ hfi1_kern_read_tid_flow_free(qp);
+ spin_unlock(&qp->s_lock);
+ rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ goto done;
+ }
+ goto done_unlock;
+ }
+
+ /*
+ * Error out the qp for TID RDMA WRITE
+ */
+ hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
+ for (i = 0; i < rvt_max_atomic(rdi); i++) {
+ e = &qp->s_ack_queue[i];
+ if (e->opcode == TID_OP(WRITE_REQ)) {
+ req = ack_to_tid_req(e);
+ hfi1_kern_exp_rcv_clear_all(req);
+ }
+ }
+ spin_unlock(&qp->s_lock);
+ rvt_rc_error(qp, IB_WC_LOC_LEN_ERR);
+ goto done;
+
+done_unlock:
+ spin_unlock(&qp->s_lock);
+done:
+ return true;
+}
+
+static void restart_tid_rdma_read_req(struct hfi1_ctxtdata *rcd,
+ struct rvt_qp *qp, struct rvt_swqe *wqe)
+{
+ struct tid_rdma_request *req;
+ struct tid_rdma_flow *flow;
+
+ /* Start from the right segment */
+ qp->r_flags |= RVT_R_RDMAR_SEQ;
+ req = wqe_to_tid_req(wqe);
+ flow = &req->flows[req->clear_tail];
+ hfi1_restart_rc(qp, flow->flow_state.ib_spsn, 0);
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= RVT_R_RSP_SEND;
+ rvt_get_qp(qp);
+ list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+ }
+}
+
+/*
+ * Handle the KDETH eflags for TID RDMA READ response.
+ *
+ * Return true if the last packet for a segment has been received and it is
+ * time to process the response normally; otherwise, return true.
+ *
+ * The caller must hold the packet->qp->r_lock and the rcu_read_lock.
+ */
+static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
+ struct hfi1_packet *packet, u8 rcv_type,
+ u8 rte, u32 psn, u32 ibpsn)
+ __must_hold(&packet->qp->r_lock) __must_hold(RCU)
+{
+ struct hfi1_pportdata *ppd = rcd->ppd;
+ struct hfi1_devdata *dd = ppd->dd;
+ struct hfi1_ibport *ibp;
+ struct rvt_swqe *wqe;
+ struct tid_rdma_request *req;
+ struct tid_rdma_flow *flow;
+ u32 ack_psn;
+ struct rvt_qp *qp = packet->qp;
+ struct hfi1_qp_priv *priv = qp->priv;
+ bool ret = true;
+ int diff = 0;
+ u32 fpsn;
+
+ lockdep_assert_held(&qp->r_lock);
+ /* If the psn is out of valid range, drop the packet */
+ if (cmp_psn(ibpsn, qp->s_last_psn) < 0 ||
+ cmp_psn(ibpsn, qp->s_psn) > 0)
+ return ret;
+
+ spin_lock(&qp->s_lock);
+ /*
+ * Note that NAKs implicitly ACK outstanding SEND and RDMA write
+ * requests and implicitly NAK RDMA read and atomic requests issued
+ * before the NAK'ed request.
+ */
+ ack_psn = ibpsn - 1;
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
+ ibp = to_iport(qp->ibqp.device, qp->port_num);
+
+ /* Complete WQEs that the PSN finishes. */
+ while ((int)delta_psn(ack_psn, wqe->lpsn) >= 0) {
+ /*
+ * If this request is a RDMA read or atomic, and the NACK is
+ * for a later operation, this NACK NAKs the RDMA read or
+ * atomic.
+ */
+ if (wqe->wr.opcode == IB_WR_RDMA_READ ||
+ wqe->wr.opcode == IB_WR_TID_RDMA_READ ||
+ wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
+ /* Retry this request. */
+ if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
+ qp->r_flags |= RVT_R_RDMAR_SEQ;
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
+ restart_tid_rdma_read_req(rcd, qp,
+ wqe);
+ } else {
+ hfi1_restart_rc(qp, qp->s_last_psn + 1,
+ 0);
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= RVT_R_RSP_SEND;
+ rvt_get_qp(qp);
+ list_add_tail(/* wait */
+ &qp->rspwait,
+ &rcd->qp_wait_list);
+ }
+ }
+ }
+ /*
+ * No need to process the NAK since we are
+ * restarting an earlier request.
+ */
+ break;
+ }
+
+ wqe = do_rc_completion(qp, wqe, ibp);
+ if (qp->s_acked == qp->s_tail)
+ break;
+ }
+
+ /* Handle the eflags for the request */
+ if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
+ goto s_unlock;
+
+ req = wqe_to_tid_req(wqe);
+ switch (rcv_type) {
+ case RHF_RCV_TYPE_EXPECTED:
+ switch (rte) {
+ case RHF_RTE_EXPECTED_FLOW_SEQ_ERR:
+ /*
+ * On the first occurrence of a Flow Sequence error,
+ * the flag TID_FLOW_SW_PSN is set.
+ *
+ * After that, the flow is *not* reprogrammed and the
+ * protocol falls back to SW PSN checking. This is done
+ * to prevent continuous Flow Sequence errors for any
+ * packets that could be still in the fabric.
+ */
+ flow = find_flow(req, psn, NULL);
+ if (!flow) {
+ /*
+ * We can't find the IB PSN matching the
+ * received KDETH PSN. The only thing we can
+ * do at this point is report the error to
+ * the QP.
+ */
+ hfi1_kern_read_tid_flow_free(qp);
+ spin_unlock(&qp->s_lock);
+ rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ return ret;
+ }
+ if (priv->flow_state.flags & TID_FLOW_SW_PSN) {
+ diff = cmp_psn(psn,
+ priv->flow_state.r_next_psn);
+ if (diff > 0) {
+ if (!(qp->r_flags & RVT_R_RDMAR_SEQ))
+ restart_tid_rdma_read_req(rcd,
+ qp,
+ wqe);
+
+ /* Drop the packet.*/
+ goto s_unlock;
+ } else if (diff < 0) {
+ /*
+ * If a response packet for a restarted
+ * request has come back, reset the
+ * restart flag.
+ */
+ if (qp->r_flags & RVT_R_RDMAR_SEQ)
+ qp->r_flags &=
+ ~RVT_R_RDMAR_SEQ;
+
+ /* Drop the packet.*/
+ goto s_unlock;
+ }
+
+ /*
+ * If SW PSN verification is successful and
+ * this is the last packet in the segment, tell
+ * the caller to process it as a normal packet.
+ */
+ fpsn = full_flow_psn(flow,
+ flow->flow_state.lpsn);
+ if (cmp_psn(fpsn, psn) == 0) {
+ ret = false;
+ if (qp->r_flags & RVT_R_RDMAR_SEQ)
+ qp->r_flags &=
+ ~RVT_R_RDMAR_SEQ;
+ }
+ priv->flow_state.r_next_psn++;
+ } else {
+ u64 reg;
+ u32 last_psn;
+
+ /*
+ * The only sane way to get the amount of
+ * progress is to read the HW flow state.
+ */
+ reg = read_uctxt_csr(dd, rcd->ctxt,
+ RCV_TID_FLOW_TABLE +
+ (8 * flow->idx));
+ last_psn = mask_psn(reg);
+
+ priv->flow_state.r_next_psn = last_psn;
+ priv->flow_state.flags |= TID_FLOW_SW_PSN;
+ /*
+ * If no request has been restarted yet,
+ * restart the current one.
+ */
+ if (!(qp->r_flags & RVT_R_RDMAR_SEQ))
+ restart_tid_rdma_read_req(rcd, qp,
+ wqe);
+ }
+
+ break;
+
+ case RHF_RTE_EXPECTED_FLOW_GEN_ERR:
+ /*
+ * Since the TID flow is able to ride through
+ * generation mismatch, drop this stale packet.
+ */
+ break;
+
+ default:
+ break;
+ }
+ break;
+
+ case RHF_RCV_TYPE_ERROR:
+ switch (rte) {
+ case RHF_RTE_ERROR_OP_CODE_ERR:
+ case RHF_RTE_ERROR_KHDR_MIN_LEN_ERR:
+ case RHF_RTE_ERROR_KHDR_HCRC_ERR:
+ case RHF_RTE_ERROR_KHDR_KVER_ERR:
+ case RHF_RTE_ERROR_CONTEXT_ERR:
+ case RHF_RTE_ERROR_KHDR_TID_ERR:
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+s_unlock:
+ spin_unlock(&qp->s_lock);
+ return ret;
+}
+
+bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
+ struct hfi1_pportdata *ppd,
+ struct hfi1_packet *packet)
+{
+ struct hfi1_ibport *ibp = &ppd->ibport_data;
+ struct hfi1_devdata *dd = ppd->dd;
+ struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
+ u8 rcv_type = rhf_rcv_type(packet->rhf);
+ u8 rte = rhf_rcv_type_err(packet->rhf);
+ struct ib_header *hdr = packet->hdr;
+ struct ib_other_headers *ohdr = NULL;
+ int lnh = be16_to_cpu(hdr->lrh[0]) & 3;
+ u16 lid = be16_to_cpu(hdr->lrh[1]);
+ u8 opcode;
+ u32 qp_num, psn, ibpsn;
+ struct rvt_qp *qp;
+ struct hfi1_qp_priv *qpriv;
+ unsigned long flags;
+ bool ret = true;
+ struct rvt_ack_entry *e;
+ struct tid_rdma_request *req;
+ struct tid_rdma_flow *flow;
+
+ trace_hfi1_msg_handle_kdeth_eflags(NULL, "Kdeth error: rhf ",
+ packet->rhf);
+ if (packet->rhf & (RHF_VCRC_ERR | RHF_ICRC_ERR))
+ return ret;
+
+ packet->ohdr = &hdr->u.oth;
+ ohdr = packet->ohdr;
+ trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
+
+ /* Get the destination QP number. */
+ qp_num = be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_qp) &
+ RVT_QPN_MASK;
+ if (lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
+ goto drop;
+
+ psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
+ opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
+
+ rcu_read_lock();
+ qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
+ if (!qp)
+ goto rcu_unlock;
+
+ packet->qp = qp;
+
+ /* Check for valid receive state. */
+ spin_lock_irqsave(&qp->r_lock, flags);
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
+ ibp->rvp.n_pkt_drops++;
+ goto r_unlock;
+ }
+
+ if (packet->rhf & RHF_TID_ERR) {
+ /* For TIDERR and RC QPs preemptively schedule a NAK */
+ u32 tlen = rhf_pkt_len(packet->rhf); /* in bytes */
+
+ /* Sanity check packet */
+ if (tlen < 24)
+ goto r_unlock;
+
+ /*
+ * Check for GRH. We should never get packets with GRH in this
+ * path.
+ */
+ if (lnh == HFI1_LRH_GRH)
+ goto r_unlock;
+
+ if (tid_rdma_tid_err(rcd, packet, rcv_type, opcode))
+ goto r_unlock;
+ }
+
+ /* handle TID RDMA READ */
+ if (opcode == TID_OP(READ_RESP)) {
+ ibpsn = be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn);
+ ibpsn = mask_psn(ibpsn);
+ ret = handle_read_kdeth_eflags(rcd, packet, rcv_type, rte, psn,
+ ibpsn);
+ goto r_unlock;
+ }
+
+ /*
+ * qp->s_tail_ack_queue points to the rvt_ack_entry currently being
+ * processed. These a completed sequentially so we can be sure that
+ * the pointer will not change until the entire request has completed.
+ */
+ spin_lock(&qp->s_lock);
+ qpriv = qp->priv;
+ e = &qp->s_ack_queue[qpriv->r_tid_tail];
+ req = ack_to_tid_req(e);
+ flow = &req->flows[req->clear_tail];
+ trace_hfi1_eflags_err_write(qp, rcv_type, rte, psn);
+ trace_hfi1_rsp_handle_kdeth_eflags(qp, psn);
+ trace_hfi1_tid_write_rsp_handle_kdeth_eflags(qp);
+ trace_hfi1_tid_req_handle_kdeth_eflags(qp, 0, e->opcode, e->psn,
+ e->lpsn, req);
+ trace_hfi1_tid_flow_handle_kdeth_eflags(qp, req->clear_tail, flow);
+
+ switch (rcv_type) {
+ case RHF_RCV_TYPE_EXPECTED:
+ switch (rte) {
+ case RHF_RTE_EXPECTED_FLOW_SEQ_ERR:
+ if (!(qpriv->s_flags & HFI1_R_TID_SW_PSN)) {
+ u64 reg;
+
+ qpriv->s_flags |= HFI1_R_TID_SW_PSN;
+ /*
+ * The only sane way to get the amount of
+ * progress is to read the HW flow state.
+ */
+ reg = read_uctxt_csr(dd, rcd->ctxt,
+ RCV_TID_FLOW_TABLE +
+ (8 * flow->idx));
+ flow->flow_state.r_next_psn = mask_psn(reg);
+ qpriv->r_next_psn_kdeth =
+ flow->flow_state.r_next_psn;
+ goto nak_psn;
+ } else {
+ /*
+ * If the received PSN does not match the next
+ * expected PSN, NAK the packet.
+ * However, only do that if we know that the a
+ * NAK has already been sent. Otherwise, this
+ * mismatch could be due to packets that were
+ * already in flight.
+ */
+ if (psn != flow->flow_state.r_next_psn) {
+ psn = flow->flow_state.r_next_psn;
+ goto nak_psn;
+ }
+
+ qpriv->s_nak_state = 0;
+ /*
+ * If SW PSN verification is successful and this
+ * is the last packet in the segment, tell the
+ * caller to process it as a normal packet.
+ */
+ if (psn == full_flow_psn(flow,
+ flow->flow_state.lpsn))
+ ret = false;
+ qpriv->r_next_psn_kdeth =
+ ++flow->flow_state.r_next_psn;
+ }
+ break;
+
+ case RHF_RTE_EXPECTED_FLOW_GEN_ERR:
+ goto nak_psn;
+
+ default:
+ break;
+ }
+ break;
+
+ case RHF_RCV_TYPE_ERROR:
+ switch (rte) {
+ case RHF_RTE_ERROR_OP_CODE_ERR:
+ case RHF_RTE_ERROR_KHDR_MIN_LEN_ERR:
+ case RHF_RTE_ERROR_KHDR_HCRC_ERR:
+ case RHF_RTE_ERROR_KHDR_KVER_ERR:
+ case RHF_RTE_ERROR_CONTEXT_ERR:
+ case RHF_RTE_ERROR_KHDR_TID_ERR:
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+
+unlock:
+ spin_unlock(&qp->s_lock);
+r_unlock:
+ spin_unlock_irqrestore(&qp->r_lock, flags);
+rcu_unlock:
+ rcu_read_unlock();
+drop:
+ return ret;
+nak_psn:
+ ibp->rvp.n_rc_seqnak++;
+ if (!qpriv->s_nak_state) {
+ qpriv->s_nak_state = IB_NAK_PSN_ERROR;
+ /* We are NAK'ing the next expected PSN */
+ qpriv->s_nak_psn = mask_psn(flow->flow_state.r_next_psn);
+ qpriv->s_flags |= RVT_S_ACK_PENDING;
+ if (qpriv->r_tid_ack == HFI1_QP_WQE_INVALID)
+ qpriv->r_tid_ack = qpriv->r_tid_tail;
+ hfi1_schedule_tid_send(qp);
+ }
+ goto unlock;
+}
+
+/*
+ * "Rewind" the TID request information.
+ * This means that we reset the state back to ACTIVE,
+ * find the proper flow, set the flow index to that flow,
+ * and reset the flow information.
+ */
+void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ u32 *bth2)
+{
+ struct tid_rdma_request *req = wqe_to_tid_req(wqe);
+ struct tid_rdma_flow *flow;
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ int diff, delta_pkts;
+ u32 tididx = 0, i;
+ u16 fidx;
+
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
+ *bth2 = mask_psn(qp->s_psn);
+ flow = find_flow_ib(req, *bth2, &fidx);
+ if (!flow) {
+ trace_hfi1_msg_tid_restart_req(/* msg */
+ qp, "!!!!!! Could not find flow to restart: bth2 ",
+ (u64)*bth2);
+ trace_hfi1_tid_req_restart_req(qp, 0, wqe->wr.opcode,
+ wqe->psn, wqe->lpsn,
+ req);
+ return;
+ }
+ } else {
+ fidx = req->acked_tail;
+ flow = &req->flows[fidx];
+ *bth2 = mask_psn(req->r_ack_psn);
+ }
+
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_READ)
+ delta_pkts = delta_psn(*bth2, flow->flow_state.ib_spsn);
+ else
+ delta_pkts = delta_psn(*bth2,
+ full_flow_psn(flow,
+ flow->flow_state.spsn));
+
+ trace_hfi1_tid_flow_restart_req(qp, fidx, flow);
+ diff = delta_pkts + flow->resync_npkts;
+
+ flow->sent = 0;
+ flow->pkt = 0;
+ flow->tid_idx = 0;
+ flow->tid_offset = 0;
+ if (diff) {
+ for (tididx = 0; tididx < flow->tidcnt; tididx++) {
+ u32 tidentry = flow->tid_entry[tididx], tidlen,
+ tidnpkts, npkts;
+
+ flow->tid_offset = 0;
+ tidlen = EXP_TID_GET(tidentry, LEN) * PAGE_SIZE;
+ tidnpkts = rvt_div_round_up_mtu(qp, tidlen);
+ npkts = min_t(u32, diff, tidnpkts);
+ flow->pkt += npkts;
+ flow->sent += (npkts == tidnpkts ? tidlen :
+ npkts * qp->pmtu);
+ flow->tid_offset += npkts * qp->pmtu;
+ diff -= npkts;
+ if (!diff)
+ break;
+ }
+ }
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) {
+ rvt_skip_sge(&qpriv->tid_ss, (req->cur_seg * req->seg_len) +
+ flow->sent, 0);
+ /*
+ * Packet PSN is based on flow_state.spsn + flow->pkt. However,
+ * during a RESYNC, the generation is incremented and the
+ * sequence is reset to 0. Since we've adjusted the npkts in the
+ * flow and the SGE has been sufficiently advanced, we have to
+ * adjust flow->pkt in order to calculate the correct PSN.
+ */
+ flow->pkt -= flow->resync_npkts;
+ }
+
+ if (flow->tid_offset ==
+ EXP_TID_GET(flow->tid_entry[tididx], LEN) * PAGE_SIZE) {
+ tididx++;
+ flow->tid_offset = 0;
+ }
+ flow->tid_idx = tididx;
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_READ)
+ /* Move flow_idx to correct index */
+ req->flow_idx = fidx;
+ else
+ req->clear_tail = fidx;
+
+ trace_hfi1_tid_flow_restart_req(qp, fidx, flow);
+ trace_hfi1_tid_req_restart_req(qp, 0, wqe->wr.opcode, wqe->psn,
+ wqe->lpsn, req);
+ req->state = TID_REQUEST_ACTIVE;
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) {
+ /* Reset all the flows that we are going to resend */
+ fidx = CIRC_NEXT(fidx, MAX_FLOWS);
+ i = qpriv->s_tid_tail;
+ do {
+ for (; CIRC_CNT(req->setup_head, fidx, MAX_FLOWS);
+ fidx = CIRC_NEXT(fidx, MAX_FLOWS)) {
+ req->flows[fidx].sent = 0;
+ req->flows[fidx].pkt = 0;
+ req->flows[fidx].tid_idx = 0;
+ req->flows[fidx].tid_offset = 0;
+ req->flows[fidx].resync_npkts = 0;
+ }
+ if (i == qpriv->s_tid_cur)
+ break;
+ do {
+ i = (++i == qp->s_size ? 0 : i);
+ wqe = rvt_get_swqe_ptr(qp, i);
+ } while (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE);
+ req = wqe_to_tid_req(wqe);
+ req->cur_seg = req->ack_seg;
+ fidx = req->acked_tail;
+ /* Pull req->clear_tail back */
+ req->clear_tail = fidx;
+ } while (1);
+ }
+}
+
+void hfi1_qp_kern_exp_rcv_clear_all(struct rvt_qp *qp)
+{
+ int i, ret;
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct tid_flow_state *fs;
+
+ if (qp->ibqp.qp_type != IB_QPT_RC || !HFI1_CAP_IS_KSET(TID_RDMA))
+ return;
+
+ /*
+ * First, clear the flow to help prevent any delayed packets from
+ * being delivered.
+ */
+ fs = &qpriv->flow_state;
+ if (fs->index != RXE_NUM_TID_FLOWS)
+ hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
+
+ for (i = qp->s_acked; i != qp->s_head;) {
+ struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, i);
+
+ if (++i == qp->s_size)
+ i = 0;
+ /* Free only locally allocated TID entries */
+ if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
+ continue;
+ do {
+ struct hfi1_swqe_priv *priv = wqe->priv;
+
+ ret = hfi1_kern_exp_rcv_clear(&priv->tid_req);
+ } while (!ret);
+ }
+ for (i = qp->s_acked_ack_queue; i != qp->r_head_ack_queue;) {
+ struct rvt_ack_entry *e = &qp->s_ack_queue[i];
+
+ if (++i == rvt_max_atomic(ib_to_rvt(qp->ibqp.device)))
+ i = 0;
+ /* Free only locally allocated TID entries */
+ if (e->opcode != TID_OP(WRITE_REQ))
+ continue;
+ do {
+ struct hfi1_ack_priv *priv = e->priv;
+
+ ret = hfi1_kern_exp_rcv_clear(&priv->tid_req);
+ } while (!ret);
+ }
+}
+
+bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe)
+{
+ struct rvt_swqe *prev;
+ struct hfi1_qp_priv *priv = qp->priv;
+ u32 s_prev;
+ struct tid_rdma_request *req;
+
+ s_prev = (qp->s_cur == 0 ? qp->s_size : qp->s_cur) - 1;
+ prev = rvt_get_swqe_ptr(qp, s_prev);
+
+ switch (wqe->wr.opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ case IB_WR_SEND_WITH_INV:
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ case IB_WR_RDMA_WRITE:
+ switch (prev->wr.opcode) {
+ case IB_WR_TID_RDMA_WRITE:
+ req = wqe_to_tid_req(prev);
+ if (req->ack_seg != req->total_segs)
+ goto interlock;
+ default:
+ break;
+ }
+ break;
+ case IB_WR_RDMA_READ:
+ if (prev->wr.opcode != IB_WR_TID_RDMA_WRITE)
+ break;
+ /* fall through */
+ case IB_WR_TID_RDMA_READ:
+ switch (prev->wr.opcode) {
+ case IB_WR_RDMA_READ:
+ if (qp->s_acked != qp->s_cur)
+ goto interlock;
+ break;
+ case IB_WR_TID_RDMA_WRITE:
+ req = wqe_to_tid_req(prev);
+ if (req->ack_seg != req->total_segs)
+ goto interlock;
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+ return false;
+
+interlock:
+ priv->s_flags |= HFI1_S_TID_WAIT_INTERLCK;
+ return true;
+}
+
+/* Does @sge meet the alignment requirements for tid rdma? */
+static inline bool hfi1_check_sge_align(struct rvt_qp *qp,
+ struct rvt_sge *sge, int num_sge)
+{
+ int i;
+
+ for (i = 0; i < num_sge; i++, sge++) {
+ trace_hfi1_sge_check_align(qp, i, sge);
+ if ((u64)sge->vaddr & ~PAGE_MASK ||
+ sge->sge_length & ~PAGE_MASK)
+ return false;
+ }
+ return true;
+}
+
+void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
+{
+ struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv;
+ struct hfi1_swqe_priv *priv = wqe->priv;
+ struct tid_rdma_params *remote;
+ enum ib_wr_opcode new_opcode;
+ bool do_tid_rdma = false;
+ struct hfi1_pportdata *ppd = qpriv->rcd->ppd;
+
+ if ((rdma_ah_get_dlid(&qp->remote_ah_attr) & ~((1 << ppd->lmc) - 1)) ==
+ ppd->lid)
+ return;
+ if (qpriv->hdr_type != HFI1_PKT_TYPE_9B)
+ return;
+
+ rcu_read_lock();
+ remote = rcu_dereference(qpriv->tid_rdma.remote);
+ /*
+ * If TID RDMA is disabled by the negotiation, don't
+ * use it.
+ */
+ if (!remote)
+ goto exit;
+
+ if (wqe->wr.opcode == IB_WR_RDMA_READ) {
+ if (hfi1_check_sge_align(qp, &wqe->sg_list[0],
+ wqe->wr.num_sge)) {
+ new_opcode = IB_WR_TID_RDMA_READ;
+ do_tid_rdma = true;
+ }
+ } else if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
+ /*
+ * TID RDMA is enabled for this RDMA WRITE request iff:
+ * 1. The remote address is page-aligned,
+ * 2. The length is larger than the minimum segment size,
+ * 3. The length is page-multiple.
+ */
+ if (!(wqe->rdma_wr.remote_addr & ~PAGE_MASK) &&
+ !(wqe->length & ~PAGE_MASK)) {
+ new_opcode = IB_WR_TID_RDMA_WRITE;
+ do_tid_rdma = true;
+ }
+ }
+
+ if (do_tid_rdma) {
+ if (hfi1_kern_exp_rcv_alloc_flows(&priv->tid_req, GFP_ATOMIC))
+ goto exit;
+ wqe->wr.opcode = new_opcode;
+ priv->tid_req.seg_len =
+ min_t(u32, remote->max_len, wqe->length);
+ priv->tid_req.total_segs =
+ DIV_ROUND_UP(wqe->length, priv->tid_req.seg_len);
+ /* Compute the last PSN of the request */
+ wqe->lpsn = wqe->psn;
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
+ priv->tid_req.n_flows = remote->max_read;
+ qpriv->tid_r_reqs++;
+ wqe->lpsn += rvt_div_round_up_mtu(qp, wqe->length) - 1;
+ } else {
+ wqe->lpsn += priv->tid_req.total_segs - 1;
+ atomic_inc(&qpriv->n_requests);
+ }
+
+ priv->tid_req.cur_seg = 0;
+ priv->tid_req.comp_seg = 0;
+ priv->tid_req.ack_seg = 0;
+ priv->tid_req.state = TID_REQUEST_INACTIVE;
+ /*
+ * Reset acked_tail.
+ * TID RDMA READ does not have ACKs so it does not
+ * update the pointer. We have to reset it so TID RDMA
+ * WRITE does not get confused.
+ */
+ priv->tid_req.acked_tail = priv->tid_req.setup_head;
+ trace_hfi1_tid_req_setup_tid_wqe(qp, 1, wqe->wr.opcode,
+ wqe->psn, wqe->lpsn,
+ &priv->tid_req);
+ }
+exit:
+ rcu_read_unlock();
+}
+
+/* TID RDMA WRITE functions */
+
+u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ struct ib_other_headers *ohdr,
+ u32 *bth1, u32 *bth2, u32 *len)
+{
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct tid_rdma_request *req = wqe_to_tid_req(wqe);
+ struct tid_rdma_params *remote;
+
+ rcu_read_lock();
+ remote = rcu_dereference(qpriv->tid_rdma.remote);
+ /*
+ * Set the number of flow to be used based on negotiated
+ * parameters.
+ */
+ req->n_flows = remote->max_write;
+ req->state = TID_REQUEST_ACTIVE;
+
+ KDETH_RESET(ohdr->u.tid_rdma.w_req.kdeth0, KVER, 0x1);
+ KDETH_RESET(ohdr->u.tid_rdma.w_req.kdeth1, JKEY, remote->jkey);
+ ohdr->u.tid_rdma.w_req.reth.vaddr =
+ cpu_to_be64(wqe->rdma_wr.remote_addr + (wqe->length - *len));
+ ohdr->u.tid_rdma.w_req.reth.rkey =
+ cpu_to_be32(wqe->rdma_wr.rkey);
+ ohdr->u.tid_rdma.w_req.reth.length = cpu_to_be32(*len);
+ ohdr->u.tid_rdma.w_req.verbs_qp = cpu_to_be32(qp->remote_qpn);
+ *bth1 &= ~RVT_QPN_MASK;
+ *bth1 |= remote->qp;
+ qp->s_state = TID_OP(WRITE_REQ);
+ qp->s_flags |= HFI1_S_WAIT_TID_RESP;
+ *bth2 |= IB_BTH_REQ_ACK;
+ *len = 0;
+
+ rcu_read_unlock();
+ return sizeof(ohdr->u.tid_rdma.w_req) / sizeof(u32);
+}
+
+void hfi1_compute_tid_rdma_flow_wt(void)
+{
+ /*
+ * Heuristic for computing the RNR timeout when waiting on the flow
+ * queue. Rather than a computationaly expensive exact estimate of when
+ * a flow will be available, we assume that if a QP is at position N in
+ * the flow queue it has to wait approximately (N + 1) * (number of
+ * segments between two sync points), assuming PMTU of 4K. The rationale
+ * for this is that flows are released and recycled at each sync point.
+ */
+ tid_rdma_flow_wt = MAX_TID_FLOW_PSN * enum_to_mtu(OPA_MTU_4096) /
+ TID_RDMA_MAX_SEGMENT_SIZE;
+}
+
+static u32 position_in_queue(struct hfi1_qp_priv *qpriv,
+ struct tid_queue *queue)
+{
+ return qpriv->tid_enqueue - queue->dequeue;
+}
+
+/*
+ * @qp: points to rvt_qp context.
+ * @to_seg: desired RNR timeout in segments.
+ * Return: index of the next highest timeout in the ib_hfi1_rnr_table[]
+ */
+static u32 hfi1_compute_tid_rnr_timeout(struct rvt_qp *qp, u32 to_seg)
+{
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ u64 timeout;
+ u32 bytes_per_us;
+ u8 i;
+
+ bytes_per_us = active_egress_rate(qpriv->rcd->ppd) / 8;
+ timeout = (to_seg * TID_RDMA_MAX_SEGMENT_SIZE) / bytes_per_us;
+ /*
+ * Find the next highest value in the RNR table to the required
+ * timeout. This gives the responder some padding.
+ */
+ for (i = 1; i <= IB_AETH_CREDIT_MASK; i++)
+ if (rvt_rnr_tbl_to_usec(i) >= timeout)
+ return i;
+ return 0;
+}
+
+/**
+ * Central place for resource allocation at TID write responder,
+ * is called from write_req and write_data interrupt handlers as
+ * well as the send thread when a queued QP is scheduled for
+ * resource allocation.
+ *
+ * Iterates over (a) segments of a request and then (b) queued requests
+ * themselves to allocate resources for up to local->max_write
+ * segments across multiple requests. Stop allocating when we
+ * hit a sync point, resume allocating after data packets at
+ * sync point have been received.
+ *
+ * Resource allocation and sending of responses is decoupled. The
+ * request/segment which are being allocated and sent are as follows.
+ * Resources are allocated for:
+ * [request: qpriv->r_tid_alloc, segment: req->alloc_seg]
+ * The send thread sends:
+ * [request: qp->s_tail_ack_queue, segment:req->cur_seg]
+ */
+static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
+{
+ struct tid_rdma_request *req;
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct hfi1_ctxtdata *rcd = qpriv->rcd;
+ struct tid_rdma_params *local = &qpriv->tid_rdma.local;
+ struct rvt_ack_entry *e;
+ u32 npkts, to_seg;
+ bool last;
+ int ret = 0;
+
+ lockdep_assert_held(&qp->s_lock);
+
+ while (1) {
+ trace_hfi1_rsp_tid_write_alloc_res(qp, 0);
+ trace_hfi1_tid_write_rsp_alloc_res(qp);
+ /*
+ * Don't allocate more segments if a RNR NAK has already been
+ * scheduled to avoid messing up qp->r_psn: the RNR NAK will
+ * be sent only when all allocated segments have been sent.
+ * However, if more segments are allocated before that, TID RDMA
+ * WRITE RESP packets will be sent out for these new segments
+ * before the RNR NAK packet. When the requester receives the
+ * RNR NAK packet, it will restart with qp->s_last_psn + 1,
+ * which does not match qp->r_psn and will be dropped.
+ * Consequently, the requester will exhaust its retries and
+ * put the qp into error state.
+ */
+ if (qpriv->rnr_nak_state == TID_RNR_NAK_SEND)
+ break;
+
+ /* No requests left to process */
+ if (qpriv->r_tid_alloc == qpriv->r_tid_head) {
+ /* If all data has been received, clear the flow */
+ if (qpriv->flow_state.index < RXE_NUM_TID_FLOWS &&
+ !qpriv->alloc_w_segs)
+ hfi1_kern_clear_hw_flow(rcd, qp);
+ break;
+ }
+
+ e = &qp->s_ack_queue[qpriv->r_tid_alloc];
+ if (e->opcode != TID_OP(WRITE_REQ))
+ goto next_req;
+ req = ack_to_tid_req(e);
+ trace_hfi1_tid_req_write_alloc_res(qp, 0, e->opcode, e->psn,
+ e->lpsn, req);
+ /* Finished allocating for all segments of this request */
+ if (req->alloc_seg >= req->total_segs)
+ goto next_req;
+
+ /* Can allocate only a maximum of local->max_write for a QP */
+ if (qpriv->alloc_w_segs >= local->max_write)
+ break;
+
+ /* Don't allocate at a sync point with data packets pending */
+ if (qpriv->sync_pt && qpriv->alloc_w_segs)
+ break;
+
+ /* All data received at the sync point, continue */
+ if (qpriv->sync_pt && !qpriv->alloc_w_segs) {
+ hfi1_kern_clear_hw_flow(rcd, qp);
+ qpriv->sync_pt = false;
+ if (qpriv->s_flags & HFI1_R_TID_SW_PSN)
+ qpriv->s_flags &= ~HFI1_R_TID_SW_PSN;
+ }
+
+ /* Allocate flow if we don't have one */
+ if (qpriv->flow_state.index >= RXE_NUM_TID_FLOWS) {
+ ret = hfi1_kern_setup_hw_flow(qpriv->rcd, qp);
+ if (ret) {
+ to_seg = tid_rdma_flow_wt *
+ position_in_queue(qpriv,
+ &rcd->flow_queue);
+ break;
+ }
+ }
+
+ npkts = rvt_div_round_up_mtu(qp, req->seg_len);
+
+ /*
+ * We are at a sync point if we run out of KDETH PSN space.
+ * Last PSN of every generation is reserved for RESYNC.
+ */
+ if (qpriv->flow_state.psn + npkts > MAX_TID_FLOW_PSN - 1) {
+ qpriv->sync_pt = true;
+ break;
+ }
+
+ /*
+ * If overtaking req->acked_tail, send an RNR NAK. Because the
+ * QP is not queued in this case, and the issue can only be
+ * caused due a delay in scheduling the second leg which we
+ * cannot estimate, we use a rather arbitrary RNR timeout of
+ * (MAX_FLOWS / 2) segments
+ */
+ if (!CIRC_SPACE(req->setup_head, req->acked_tail,
+ MAX_FLOWS)) {
+ ret = -EAGAIN;
+ to_seg = MAX_FLOWS >> 1;
+ qpriv->s_flags |= RVT_S_ACK_PENDING;
+ hfi1_schedule_tid_send(qp);
+ break;
+ }
+
+ /* Try to allocate rcv array / TID entries */
+ ret = hfi1_kern_exp_rcv_setup(req, &req->ss, &last);
+ if (ret == -EAGAIN)
+ to_seg = position_in_queue(qpriv, &rcd->rarr_queue);
+ if (ret)
+ break;
+
+ qpriv->alloc_w_segs++;
+ req->alloc_seg++;
+ continue;
+next_req:
+ /* Begin processing the next request */
+ if (++qpriv->r_tid_alloc >
+ rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
+ qpriv->r_tid_alloc = 0;
+ }
+
+ /*
+ * Schedule an RNR NAK to be sent if (a) flow or rcv array allocation
+ * has failed (b) we are called from the rcv handler interrupt context
+ * (c) an RNR NAK has not already been scheduled
+ */
+ if (ret == -EAGAIN && intr_ctx && !qp->r_nak_state)
+ goto send_rnr_nak;
+
+ return;
+
+send_rnr_nak:
+ lockdep_assert_held(&qp->r_lock);
+
+ /* Set r_nak_state to prevent unrelated events from generating NAK's */
+ qp->r_nak_state = hfi1_compute_tid_rnr_timeout(qp, to_seg) | IB_RNR_NAK;
+
+ /* Pull back r_psn to the segment being RNR NAK'd */
+ qp->r_psn = e->psn + req->alloc_seg;
+ qp->r_ack_psn = qp->r_psn;
+ /*
+ * Pull back r_head_ack_queue to the ack entry following the request
+ * being RNR NAK'd. This allows resources to be allocated to the request
+ * if the queued QP is scheduled.
+ */
+ qp->r_head_ack_queue = qpriv->r_tid_alloc + 1;
+ if (qp->r_head_ack_queue > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
+ qp->r_head_ack_queue = 0;
+ qpriv->r_tid_head = qp->r_head_ack_queue;
+ /*
+ * These send side fields are used in make_rc_ack(). They are set in
+ * hfi1_send_rc_ack() but must be set here before dropping qp->s_lock
+ * for consistency
+ */
+ qp->s_nak_state = qp->r_nak_state;
+ qp->s_ack_psn = qp->r_ack_psn;
+ /*
+ * Clear the ACK PENDING flag to prevent unwanted ACK because we
+ * have modified qp->s_ack_psn here.
+ */
+ qp->s_flags &= ~(RVT_S_ACK_PENDING);
+
+ trace_hfi1_rsp_tid_write_alloc_res(qp, qp->r_psn);
+ /*
+ * qpriv->rnr_nak_state is used to determine when the scheduled RNR NAK
+ * has actually been sent. qp->s_flags RVT_S_ACK_PENDING bit cannot be
+ * used for this because qp->s_lock is dropped before calling
+ * hfi1_send_rc_ack() leading to inconsistency between the receive
+ * interrupt handlers and the send thread in make_rc_ack()
+ */
+ qpriv->rnr_nak_state = TID_RNR_NAK_SEND;
+
+ /*
+ * Schedule RNR NAK to be sent. RNR NAK's are scheduled from the receive
+ * interrupt handlers but will be sent from the send engine behind any
+ * previous responses that may have been scheduled
+ */
+ rc_defered_ack(rcd, qp);
+}
+
+void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet)
+{
+ /* HANDLER FOR TID RDMA WRITE REQUEST packet (Responder side)*/
+
+ /*
+ * 1. Verify TID RDMA WRITE REQ as per IB_OPCODE_RC_RDMA_WRITE_FIRST
+ * (see hfi1_rc_rcv())
+ * - Don't allow 0-length requests.
+ * 2. Put TID RDMA WRITE REQ into the response queueu (s_ack_queue)
+ * - Setup struct tid_rdma_req with request info
+ * - Prepare struct tid_rdma_flow array?
+ * 3. Set the qp->s_ack_state as state diagram in design doc.
+ * 4. Set RVT_S_RESP_PENDING in s_flags.
+ * 5. Kick the send engine (hfi1_schedule_send())
+ */
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+ struct rvt_qp *qp = packet->qp;
+ struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct ib_other_headers *ohdr = packet->ohdr;
+ struct rvt_ack_entry *e;
+ unsigned long flags;
+ struct ib_reth *reth;
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct tid_rdma_request *req;
+ u32 bth0, psn, len, rkey, num_segs;
+ bool is_fecn;
+ u8 next;
+ u64 vaddr;
+ int diff;
+
+ bth0 = be32_to_cpu(ohdr->bth[0]);
+ if (hfi1_ruc_check_hdr(ibp, packet))
+ return;
+
+ is_fecn = process_ecn(qp, packet);
+ psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
+ trace_hfi1_rsp_rcv_tid_write_req(qp, psn);
+
+ if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
+ rvt_comm_est(qp);
+
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
+ goto nack_inv;
+
+ reth = &ohdr->u.tid_rdma.w_req.reth;
+ vaddr = be64_to_cpu(reth->vaddr);
+ len = be32_to_cpu(reth->length);
+
+ num_segs = DIV_ROUND_UP(len, qpriv->tid_rdma.local.max_len);
+ diff = delta_psn(psn, qp->r_psn);
+ if (unlikely(diff)) {
+ if (tid_rdma_rcv_error(packet, ohdr, qp, psn, diff))
+ return;
+ goto send_ack;
+ }
+
+ /*
+ * The resent request which was previously RNR NAK'd is inserted at the
+ * location of the original request, which is one entry behind
+ * r_head_ack_queue
+ */
+ if (qpriv->rnr_nak_state)
+ qp->r_head_ack_queue = qp->r_head_ack_queue ?
+ qp->r_head_ack_queue - 1 :
+ rvt_size_atomic(ib_to_rvt(qp->ibqp.device));
+
+ /* We've verified the request, insert it into the ack queue. */
+ next = qp->r_head_ack_queue + 1;
+ if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
+ next = 0;
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (unlikely(next == qp->s_acked_ack_queue)) {
+ if (!qp->s_ack_queue[next].sent)
+ goto nack_inv_unlock;
+ update_ack_queue(qp, next);
+ }
+ e = &qp->s_ack_queue[qp->r_head_ack_queue];
+ req = ack_to_tid_req(e);
+
+ /* Bring previously RNR NAK'd request back to life */
+ if (qpriv->rnr_nak_state) {
+ qp->r_nak_state = 0;
+ qp->s_nak_state = 0;
+ qpriv->rnr_nak_state = TID_RNR_NAK_INIT;
+ qp->r_psn = e->lpsn + 1;
+ req->state = TID_REQUEST_INIT;
+ goto update_head;
+ }
+
+ if (e->rdma_sge.mr) {
+ rvt_put_mr(e->rdma_sge.mr);
+ e->rdma_sge.mr = NULL;
+ }
+
+ /* The length needs to be in multiples of PAGE_SIZE */
+ if (!len || len & ~PAGE_MASK)
+ goto nack_inv_unlock;
+
+ rkey = be32_to_cpu(reth->rkey);
+ qp->r_len = len;
+
+ if (e->opcode == TID_OP(WRITE_REQ) &&
+ (req->setup_head != req->clear_tail ||
+ req->clear_tail != req->acked_tail))
+ goto nack_inv_unlock;
+
+ if (unlikely(!rvt_rkey_ok(qp, &e->rdma_sge, qp->r_len, vaddr,
+ rkey, IB_ACCESS_REMOTE_WRITE)))
+ goto nack_acc;
+
+ qp->r_psn += num_segs - 1;
+
+ e->opcode = (bth0 >> 24) & 0xff;
+ e->psn = psn;
+ e->lpsn = qp->r_psn;
+ e->sent = 0;
+
+ req->n_flows = min_t(u16, num_segs, qpriv->tid_rdma.local.max_write);
+ req->state = TID_REQUEST_INIT;
+ req->cur_seg = 0;
+ req->comp_seg = 0;
+ req->ack_seg = 0;
+ req->alloc_seg = 0;
+ req->isge = 0;
+ req->seg_len = qpriv->tid_rdma.local.max_len;
+ req->total_len = len;
+ req->total_segs = num_segs;
+ req->r_flow_psn = e->psn;
+ req->ss.sge = e->rdma_sge;
+ req->ss.num_sge = 1;
+
+ req->flow_idx = req->setup_head;
+ req->clear_tail = req->setup_head;
+ req->acked_tail = req->setup_head;
+
+ qp->r_state = e->opcode;
+ qp->r_nak_state = 0;
+ /*
+ * We need to increment the MSN here instead of when we
+ * finish sending the result since a duplicate request would
+ * increment it more than once.
+ */
+ qp->r_msn++;
+ qp->r_psn++;
+
+ trace_hfi1_tid_req_rcv_write_req(qp, 0, e->opcode, e->psn, e->lpsn,
+ req);
+
+ if (qpriv->r_tid_tail == HFI1_QP_WQE_INVALID) {
+ qpriv->r_tid_tail = qp->r_head_ack_queue;
+ } else if (qpriv->r_tid_tail == qpriv->r_tid_head) {
+ struct tid_rdma_request *ptr;
+
+ e = &qp->s_ack_queue[qpriv->r_tid_tail];
+ ptr = ack_to_tid_req(e);
+
+ if (e->opcode != TID_OP(WRITE_REQ) ||
+ ptr->comp_seg == ptr->total_segs) {
+ if (qpriv->r_tid_tail == qpriv->r_tid_ack)
+ qpriv->r_tid_ack = qp->r_head_ack_queue;
+ qpriv->r_tid_tail = qp->r_head_ack_queue;
+ }
+ }
+update_head:
+ qp->r_head_ack_queue = next;
+ qpriv->r_tid_head = qp->r_head_ack_queue;
+
+ hfi1_tid_write_alloc_resources(qp, true);
+ trace_hfi1_tid_write_rsp_rcv_req(qp);
+
+ /* Schedule the send tasklet. */
+ qp->s_flags |= RVT_S_RESP_PENDING;
+ hfi1_schedule_send(qp);
+
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ if (is_fecn)
+ goto send_ack;
+ return;
+
+nack_inv_unlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+nack_inv:
+ rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ qp->r_nak_state = IB_NAK_INVALID_REQUEST;
+ qp->r_ack_psn = qp->r_psn;
+ /* Queue NAK for later */
+ rc_defered_ack(rcd, qp);
+ return;
+nack_acc:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
+ qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
+ qp->r_ack_psn = qp->r_psn;
+send_ack:
+ hfi1_send_rc_ack(packet, is_fecn);
+}
+
+u32 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
+ struct ib_other_headers *ohdr, u32 *bth1,
+ u32 bth2, u32 *len,
+ struct rvt_sge_state **ss)
+{
+ struct hfi1_ack_priv *epriv = e->priv;
+ struct tid_rdma_request *req = &epriv->tid_req;
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct tid_rdma_flow *flow = NULL;
+ u32 resp_len = 0, hdwords = 0;
+ void *resp_addr = NULL;
+ struct tid_rdma_params *remote;
+
+ trace_hfi1_tid_req_build_write_resp(qp, 0, e->opcode, e->psn, e->lpsn,
+ req);
+ trace_hfi1_tid_write_rsp_build_resp(qp);
+ trace_hfi1_rsp_build_tid_write_resp(qp, bth2);
+ flow = &req->flows[req->flow_idx];
+ switch (req->state) {
+ default:
+ /*
+ * Try to allocate resources here in case QP was queued and was
+ * later scheduled when resources became available
+ */
+ hfi1_tid_write_alloc_resources(qp, false);
+
+ /* We've already sent everything which is ready */
+ if (req->cur_seg >= req->alloc_seg)
+ goto done;
+
+ /*
+ * Resources can be assigned but responses cannot be sent in
+ * rnr_nak state, till the resent request is received
+ */
+ if (qpriv->rnr_nak_state == TID_RNR_NAK_SENT)
+ goto done;
+
+ req->state = TID_REQUEST_ACTIVE;
+ trace_hfi1_tid_flow_build_write_resp(qp, req->flow_idx, flow);
+ req->flow_idx = CIRC_NEXT(req->flow_idx, MAX_FLOWS);
+ hfi1_add_tid_reap_timer(qp);
+ break;
+
+ case TID_REQUEST_RESEND_ACTIVE:
+ case TID_REQUEST_RESEND:
+ trace_hfi1_tid_flow_build_write_resp(qp, req->flow_idx, flow);
+ req->flow_idx = CIRC_NEXT(req->flow_idx, MAX_FLOWS);
+ if (!CIRC_CNT(req->setup_head, req->flow_idx, MAX_FLOWS))
+ req->state = TID_REQUEST_ACTIVE;
+
+ hfi1_mod_tid_reap_timer(qp);
+ break;
+ }
+ flow->flow_state.resp_ib_psn = bth2;
+ resp_addr = (void *)flow->tid_entry;
+ resp_len = sizeof(*flow->tid_entry) * flow->tidcnt;
+ req->cur_seg++;
+
+ memset(&ohdr->u.tid_rdma.w_rsp, 0, sizeof(ohdr->u.tid_rdma.w_rsp));
+ epriv->ss.sge.vaddr = resp_addr;
+ epriv->ss.sge.sge_length = resp_len;
+ epriv->ss.sge.length = epriv->ss.sge.sge_length;
+ /*
+ * We can safely zero these out. Since the first SGE covers the
+ * entire packet, nothing else should even look at the MR.
+ */
+ epriv->ss.sge.mr = NULL;
+ epriv->ss.sge.m = 0;
+ epriv->ss.sge.n = 0;
+
+ epriv->ss.sg_list = NULL;
+ epriv->ss.total_len = epriv->ss.sge.sge_length;
+ epriv->ss.num_sge = 1;
+
+ *ss = &epriv->ss;
+ *len = epriv->ss.total_len;
+
+ /* Construct the TID RDMA WRITE RESP packet header */
+ rcu_read_lock();
+ remote = rcu_dereference(qpriv->tid_rdma.remote);
+
+ KDETH_RESET(ohdr->u.tid_rdma.w_rsp.kdeth0, KVER, 0x1);
+ KDETH_RESET(ohdr->u.tid_rdma.w_rsp.kdeth1, JKEY, remote->jkey);
+ ohdr->u.tid_rdma.w_rsp.aeth = rvt_compute_aeth(qp);
+ ohdr->u.tid_rdma.w_rsp.tid_flow_psn =
+ cpu_to_be32((flow->flow_state.generation <<
+ HFI1_KDETH_BTH_SEQ_SHIFT) |
+ (flow->flow_state.spsn &
+ HFI1_KDETH_BTH_SEQ_MASK));
+ ohdr->u.tid_rdma.w_rsp.tid_flow_qp =
+ cpu_to_be32(qpriv->tid_rdma.local.qp |
+ ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) <<
+ TID_RDMA_DESTQP_FLOW_SHIFT) |
+ qpriv->rcd->ctxt);
+ ohdr->u.tid_rdma.w_rsp.verbs_qp = cpu_to_be32(qp->remote_qpn);
+ *bth1 = remote->qp;
+ rcu_read_unlock();
+ hdwords = sizeof(ohdr->u.tid_rdma.w_rsp) / sizeof(u32);
+ qpriv->pending_tid_w_segs++;
+done:
+ return hdwords;
+}
+
+static void hfi1_add_tid_reap_timer(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *qpriv = qp->priv;
+
+ lockdep_assert_held(&qp->s_lock);
+ if (!(qpriv->s_flags & HFI1_R_TID_RSC_TIMER)) {
+ qpriv->s_flags |= HFI1_R_TID_RSC_TIMER;
+ qpriv->s_tid_timer.expires = jiffies +
+ qpriv->tid_timer_timeout_jiffies;
+ add_timer(&qpriv->s_tid_timer);
+ }
+}
+
+static void hfi1_mod_tid_reap_timer(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *qpriv = qp->priv;
+
+ lockdep_assert_held(&qp->s_lock);
+ qpriv->s_flags |= HFI1_R_TID_RSC_TIMER;
+ mod_timer(&qpriv->s_tid_timer, jiffies +
+ qpriv->tid_timer_timeout_jiffies);
+}
+
+static int hfi1_stop_tid_reap_timer(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ int rval = 0;
+
+ lockdep_assert_held(&qp->s_lock);
+ if (qpriv->s_flags & HFI1_R_TID_RSC_TIMER) {
+ rval = del_timer(&qpriv->s_tid_timer);
+ qpriv->s_flags &= ~HFI1_R_TID_RSC_TIMER;
+ }
+ return rval;
+}
+
+void hfi1_del_tid_reap_timer(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *qpriv = qp->priv;
+
+ del_timer_sync(&qpriv->s_tid_timer);
+ qpriv->s_flags &= ~HFI1_R_TID_RSC_TIMER;
+}
+
+static void hfi1_tid_timeout(struct timer_list *t)
+{
+ struct hfi1_qp_priv *qpriv = from_timer(qpriv, t, s_tid_timer);
+ struct rvt_qp *qp = qpriv->owner;
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+ unsigned long flags;
+ u32 i;
+
+ spin_lock_irqsave(&qp->r_lock, flags);
+ spin_lock(&qp->s_lock);
+ if (qpriv->s_flags & HFI1_R_TID_RSC_TIMER) {
+ dd_dev_warn(dd_from_ibdev(qp->ibqp.device), "[QP%u] %s %d\n",
+ qp->ibqp.qp_num, __func__, __LINE__);
+ trace_hfi1_msg_tid_timeout(/* msg */
+ qp, "resource timeout = ",
+ (u64)qpriv->tid_timer_timeout_jiffies);
+ hfi1_stop_tid_reap_timer(qp);
+ /*
+ * Go though the entire ack queue and clear any outstanding
+ * HW flow and RcvArray resources.
+ */
+ hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
+ for (i = 0; i < rvt_max_atomic(rdi); i++) {
+ struct tid_rdma_request *req =
+ ack_to_tid_req(&qp->s_ack_queue[i]);
+
+ hfi1_kern_exp_rcv_clear_all(req);
+ }
+ spin_unlock(&qp->s_lock);
+ if (qp->ibqp.event_handler) {
+ struct ib_event ev;
+
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_QP_FATAL;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+ rvt_rc_error(qp, IB_WC_RESP_TIMEOUT_ERR);
+ goto unlock_r_lock;
+ }
+ spin_unlock(&qp->s_lock);
+unlock_r_lock:
+ spin_unlock_irqrestore(&qp->r_lock, flags);
+}
+
+void hfi1_rc_rcv_tid_rdma_write_resp(struct hfi1_packet *packet)
+{
+ /* HANDLER FOR TID RDMA WRITE RESPONSE packet (Requestor side */
+
+ /*
+ * 1. Find matching SWQE
+ * 2. Check that TIDENTRY array has enough space for a complete
+ * segment. If not, put QP in error state.
+ * 3. Save response data in struct tid_rdma_req and struct tid_rdma_flow
+ * 4. Remove HFI1_S_WAIT_TID_RESP from s_flags.
+ * 5. Set qp->s_state
+ * 6. Kick the send engine (hfi1_schedule_send())
+ */
+ struct ib_other_headers *ohdr = packet->ohdr;
+ struct rvt_qp *qp = packet->qp;
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+ struct rvt_swqe *wqe;
+ struct tid_rdma_request *req;
+ struct tid_rdma_flow *flow;
+ enum ib_wc_status status;
+ u32 opcode, aeth, psn, flow_psn, i, tidlen = 0, pktlen;
+ bool is_fecn;
+ unsigned long flags;
+
+ is_fecn = process_ecn(qp, packet);
+ psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
+ aeth = be32_to_cpu(ohdr->u.tid_rdma.w_rsp.aeth);
+ opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ /* Ignore invalid responses */
+ if (cmp_psn(psn, qp->s_next_psn) >= 0)
+ goto ack_done;
+
+ /* Ignore duplicate responses. */
+ if (unlikely(cmp_psn(psn, qp->s_last_psn) <= 0))
+ goto ack_done;
+
+ if (unlikely(qp->s_acked == qp->s_tail))
+ goto ack_done;
+
+ /*
+ * If we are waiting for a particular packet sequence number
+ * due to a request being resent, check for it. Otherwise,
+ * ensure that we haven't missed anything.
+ */
+ if (qp->r_flags & RVT_R_RDMAR_SEQ) {
+ if (cmp_psn(psn, qp->s_last_psn + 1) != 0)
+ goto ack_done;
+ qp->r_flags &= ~RVT_R_RDMAR_SEQ;
+ }
+
+ wqe = rvt_get_swqe_ptr(qp, qpriv->s_tid_cur);
+ if (unlikely(wqe->wr.opcode != IB_WR_TID_RDMA_WRITE))
+ goto ack_op_err;
+
+ req = wqe_to_tid_req(wqe);
+ /*
+ * If we've lost ACKs and our acked_tail pointer is too far
+ * behind, don't overwrite segments. Just drop the packet and
+ * let the reliability protocol take care of it.
+ */
+ if (!CIRC_SPACE(req->setup_head, req->acked_tail, MAX_FLOWS))
+ goto ack_done;
+
+ /*
+ * The call to do_rc_ack() should be last in the chain of
+ * packet checks because it will end up updating the QP state.
+ * Therefore, anything that would prevent the packet from
+ * being accepted as a successful response should be prior
+ * to it.
+ */
+ if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
+ goto ack_done;
+
+ trace_hfi1_ack(qp, psn);
+
+ flow = &req->flows[req->setup_head];
+ flow->pkt = 0;
+ flow->tid_idx = 0;
+ flow->tid_offset = 0;
+ flow->sent = 0;
+ flow->resync_npkts = 0;
+ flow->tid_qpn = be32_to_cpu(ohdr->u.tid_rdma.w_rsp.tid_flow_qp);
+ flow->idx = (flow->tid_qpn >> TID_RDMA_DESTQP_FLOW_SHIFT) &
+ TID_RDMA_DESTQP_FLOW_MASK;
+ flow_psn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.w_rsp.tid_flow_psn));
+ flow->flow_state.generation = flow_psn >> HFI1_KDETH_BTH_SEQ_SHIFT;
+ flow->flow_state.spsn = flow_psn & HFI1_KDETH_BTH_SEQ_MASK;
+ flow->flow_state.resp_ib_psn = psn;
+ flow->length = min_t(u32, req->seg_len,
+ (wqe->length - (req->comp_seg * req->seg_len)));
+
+ flow->npkts = rvt_div_round_up_mtu(qp, flow->length);
+ flow->flow_state.lpsn = flow->flow_state.spsn +
+ flow->npkts - 1;
+ /* payload length = packet length - (header length + ICRC length) */
+ pktlen = packet->tlen - (packet->hlen + 4);
+ if (pktlen > sizeof(flow->tid_entry)) {
+ status = IB_WC_LOC_LEN_ERR;
+ goto ack_err;
+ }
+ memcpy(flow->tid_entry, packet->ebuf, pktlen);
+ flow->tidcnt = pktlen / sizeof(*flow->tid_entry);
+ trace_hfi1_tid_flow_rcv_write_resp(qp, req->setup_head, flow);
+
+ req->comp_seg++;
+ trace_hfi1_tid_write_sender_rcv_resp(qp, 0);
+ /*
+ * Walk the TID_ENTRY list to make sure we have enough space for a
+ * complete segment.
+ */
+ for (i = 0; i < flow->tidcnt; i++) {
+ trace_hfi1_tid_entry_rcv_write_resp(/* entry */
+ qp, i, flow->tid_entry[i]);
+ if (!EXP_TID_GET(flow->tid_entry[i], LEN)) {
+ status = IB_WC_LOC_LEN_ERR;
+ goto ack_err;
+ }
+ tidlen += EXP_TID_GET(flow->tid_entry[i], LEN);
+ }
+ if (tidlen * PAGE_SIZE < flow->length) {
+ status = IB_WC_LOC_LEN_ERR;
+ goto ack_err;
+ }
+
+ trace_hfi1_tid_req_rcv_write_resp(qp, 0, wqe->wr.opcode, wqe->psn,
+ wqe->lpsn, req);
+ /*
+ * If this is the first response for this request, set the initial
+ * flow index to the current flow.
+ */
+ if (!cmp_psn(psn, wqe->psn)) {
+ req->r_last_acked = mask_psn(wqe->psn - 1);
+ /* Set acked flow index to head index */
+ req->acked_tail = req->setup_head;
+ }
+
+ /* advance circular buffer head */
+ req->setup_head = CIRC_NEXT(req->setup_head, MAX_FLOWS);
+ req->state = TID_REQUEST_ACTIVE;
+
+ /*
+ * If all responses for this TID RDMA WRITE request have been received
+ * advance the pointer to the next one.
+ * Since TID RDMA requests could be mixed in with regular IB requests,
+ * they might not appear sequentially in the queue. Therefore, the
+ * next request needs to be "found".
+ */
+ if (qpriv->s_tid_cur != qpriv->s_tid_head &&
+ req->comp_seg == req->total_segs) {
+ for (i = qpriv->s_tid_cur + 1; ; i++) {
+ if (i == qp->s_size)
+ i = 0;
+ wqe = rvt_get_swqe_ptr(qp, i);
+ if (i == qpriv->s_tid_head)
+ break;
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
+ break;
+ }
+ qpriv->s_tid_cur = i;
+ }
+ qp->s_flags &= ~HFI1_S_WAIT_TID_RESP;
+
+ hfi1_schedule_tid_send(qp);
+ goto ack_done;
+
+ack_op_err:
+ status = IB_WC_LOC_QP_OP_ERR;
+ack_err:
+ rvt_error_qp(qp, status);
+ack_done:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ if (is_fecn)
+ hfi1_send_rc_ack(packet, is_fecn);
+}
+
+bool hfi1_build_tid_rdma_packet(struct rvt_swqe *wqe,
+ struct ib_other_headers *ohdr,
+ u32 *bth1, u32 *bth2, u32 *len)
+{
+ struct tid_rdma_request *req = wqe_to_tid_req(wqe);
+ struct tid_rdma_flow *flow = &req->flows[req->clear_tail];
+ struct tid_rdma_params *remote;
+ struct rvt_qp *qp = req->qp;
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ u32 tidentry = flow->tid_entry[flow->tid_idx];
+ u32 tidlen = EXP_TID_GET(tidentry, LEN) << PAGE_SHIFT;
+ struct tid_rdma_write_data *wd = &ohdr->u.tid_rdma.w_data;
+ u32 next_offset, om = KDETH_OM_LARGE;
+ bool last_pkt;
+
+ if (!tidlen) {
+ hfi1_trdma_send_complete(qp, wqe, IB_WC_REM_INV_RD_REQ_ERR);
+ rvt_error_qp(qp, IB_WC_REM_INV_RD_REQ_ERR);
+ }
+
+ *len = min_t(u32, qp->pmtu, tidlen - flow->tid_offset);
+ flow->sent += *len;
+ next_offset = flow->tid_offset + *len;
+ last_pkt = (flow->tid_idx == (flow->tidcnt - 1) &&
+ next_offset >= tidlen) || (flow->sent >= flow->length);
+ trace_hfi1_tid_entry_build_write_data(qp, flow->tid_idx, tidentry);
+ trace_hfi1_tid_flow_build_write_data(qp, req->clear_tail, flow);
+
+ rcu_read_lock();
+ remote = rcu_dereference(qpriv->tid_rdma.remote);
+ KDETH_RESET(wd->kdeth0, KVER, 0x1);
+ KDETH_SET(wd->kdeth0, SH, !last_pkt);
+ KDETH_SET(wd->kdeth0, INTR, !!(!last_pkt && remote->urg));
+ KDETH_SET(wd->kdeth0, TIDCTRL, EXP_TID_GET(tidentry, CTRL));
+ KDETH_SET(wd->kdeth0, TID, EXP_TID_GET(tidentry, IDX));
+ KDETH_SET(wd->kdeth0, OM, om == KDETH_OM_LARGE);
+ KDETH_SET(wd->kdeth0, OFFSET, flow->tid_offset / om);
+ KDETH_RESET(wd->kdeth1, JKEY, remote->jkey);
+ wd->verbs_qp = cpu_to_be32(qp->remote_qpn);
+ rcu_read_unlock();
+
+ *bth1 = flow->tid_qpn;
+ *bth2 = mask_psn(((flow->flow_state.spsn + flow->pkt++) &
+ HFI1_KDETH_BTH_SEQ_MASK) |
+ (flow->flow_state.generation <<
+ HFI1_KDETH_BTH_SEQ_SHIFT));
+ if (last_pkt) {
+ /* PSNs are zero-based, so +1 to count number of packets */
+ if (flow->flow_state.lpsn + 1 +
+ rvt_div_round_up_mtu(qp, req->seg_len) >
+ MAX_TID_FLOW_PSN)
+ req->state = TID_REQUEST_SYNC;
+ *bth2 |= IB_BTH_REQ_ACK;
+ }
+
+ if (next_offset >= tidlen) {
+ flow->tid_offset = 0;
+ flow->tid_idx++;
+ } else {
+ flow->tid_offset = next_offset;
+ }
+ return last_pkt;
+}
+
+void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet)
+{
+ struct rvt_qp *qp = packet->qp;
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct hfi1_ctxtdata *rcd = priv->rcd;
+ struct ib_other_headers *ohdr = packet->ohdr;
+ struct rvt_ack_entry *e;
+ struct tid_rdma_request *req;
+ struct tid_rdma_flow *flow;
+ struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
+ unsigned long flags;
+ u32 psn, next;
+ u8 opcode;
+
+ psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
+ opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
+
+ /*
+ * All error handling should be done by now. If we are here, the packet
+ * is either good or been accepted by the error handler.
+ */
+ spin_lock_irqsave(&qp->s_lock, flags);
+ e = &qp->s_ack_queue[priv->r_tid_tail];
+ req = ack_to_tid_req(e);
+ flow = &req->flows[req->clear_tail];
+ if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.lpsn))) {
+ if (cmp_psn(psn, flow->flow_state.r_next_psn))
+ goto send_nak;
+ flow->flow_state.r_next_psn++;
+ goto exit;
+ }
+ flow->flow_state.r_next_psn = mask_psn(psn + 1);
+ hfi1_kern_exp_rcv_clear(req);
+ priv->alloc_w_segs--;
+ rcd->flows[flow->idx].psn = psn & HFI1_KDETH_BTH_SEQ_MASK;
+ req->comp_seg++;
+ priv->s_nak_state = 0;
+
+ /*
+ * Release the flow if one of the following conditions has been met:
+ * - The request has reached a sync point AND all outstanding
+ * segments have been completed, or
+ * - The entire request is complete and there are no more requests
+ * (of any kind) in the queue.
+ */
+ trace_hfi1_rsp_rcv_tid_write_data(qp, psn);
+ trace_hfi1_tid_req_rcv_write_data(qp, 0, e->opcode, e->psn, e->lpsn,
+ req);
+ trace_hfi1_tid_write_rsp_rcv_data(qp);
+ if (priv->r_tid_ack == HFI1_QP_WQE_INVALID)
+ priv->r_tid_ack = priv->r_tid_tail;
+
+ if (opcode == TID_OP(WRITE_DATA_LAST)) {
+ for (next = priv->r_tid_tail + 1; ; next++) {
+ if (next > rvt_size_atomic(&dev->rdi))
+ next = 0;
+ if (next == priv->r_tid_head)
+ break;
+ e = &qp->s_ack_queue[next];
+ if (e->opcode == TID_OP(WRITE_REQ))
+ break;
+ }
+ priv->r_tid_tail = next;
+ if (++qp->s_acked_ack_queue > rvt_size_atomic(&dev->rdi))
+ qp->s_acked_ack_queue = 0;
+ }
+
+ hfi1_tid_write_alloc_resources(qp, true);
+
+ /*
+ * If we need to generate more responses, schedule the
+ * send engine.
+ */
+ if (req->cur_seg < req->total_segs ||
+ qp->s_tail_ack_queue != qp->r_head_ack_queue) {
+ qp->s_flags |= RVT_S_RESP_PENDING;
+ hfi1_schedule_send(qp);
+ }
+
+ priv->pending_tid_w_segs--;
+ if (priv->s_flags & HFI1_R_TID_RSC_TIMER) {
+ if (priv->pending_tid_w_segs)
+ hfi1_mod_tid_reap_timer(req->qp);
+ else
+ hfi1_stop_tid_reap_timer(req->qp);
+ }
+
+done:
+ priv->s_flags |= RVT_S_ACK_PENDING;
+ hfi1_schedule_tid_send(qp);
+exit:
+ priv->r_next_psn_kdeth = flow->flow_state.r_next_psn;
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return;
+
+send_nak:
+ if (!priv->s_nak_state) {
+ priv->s_nak_state = IB_NAK_PSN_ERROR;
+ priv->s_nak_psn = flow->flow_state.r_next_psn;
+ priv->s_flags |= RVT_S_ACK_PENDING;
+ if (priv->r_tid_ack == HFI1_QP_WQE_INVALID)
+ priv->r_tid_ack = priv->r_tid_tail;
+ hfi1_schedule_tid_send(qp);
+ }
+ goto done;
+}
+
+static bool hfi1_tid_rdma_is_resync_psn(u32 psn)
+{
+ return (bool)((psn & HFI1_KDETH_BTH_SEQ_MASK) ==
+ HFI1_KDETH_BTH_SEQ_MASK);
+}
+
+u32 hfi1_build_tid_rdma_write_ack(struct rvt_qp *qp, struct rvt_ack_entry *e,
+ struct ib_other_headers *ohdr, u16 iflow,
+ u32 *bth1, u32 *bth2)
+{
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct tid_flow_state *fs = &qpriv->flow_state;
+ struct tid_rdma_request *req = ack_to_tid_req(e);
+ struct tid_rdma_flow *flow = &req->flows[iflow];
+ struct tid_rdma_params *remote;
+
+ rcu_read_lock();
+ remote = rcu_dereference(qpriv->tid_rdma.remote);
+ KDETH_RESET(ohdr->u.tid_rdma.ack.kdeth1, JKEY, remote->jkey);
+ ohdr->u.tid_rdma.ack.verbs_qp = cpu_to_be32(qp->remote_qpn);
+ *bth1 = remote->qp;
+ rcu_read_unlock();
+
+ if (qpriv->resync) {
+ *bth2 = mask_psn((fs->generation <<
+ HFI1_KDETH_BTH_SEQ_SHIFT) - 1);
+ ohdr->u.tid_rdma.ack.aeth = rvt_compute_aeth(qp);
+ } else if (qpriv->s_nak_state) {
+ *bth2 = mask_psn(qpriv->s_nak_psn);
+ ohdr->u.tid_rdma.ack.aeth =
+ cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
+ (qpriv->s_nak_state <<
+ IB_AETH_CREDIT_SHIFT));
+ } else {
+ *bth2 = full_flow_psn(flow, flow->flow_state.lpsn);
+ ohdr->u.tid_rdma.ack.aeth = rvt_compute_aeth(qp);
+ }
+ KDETH_RESET(ohdr->u.tid_rdma.ack.kdeth0, KVER, 0x1);
+ ohdr->u.tid_rdma.ack.tid_flow_qp =
+ cpu_to_be32(qpriv->tid_rdma.local.qp |
+ ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) <<
+ TID_RDMA_DESTQP_FLOW_SHIFT) |
+ qpriv->rcd->ctxt);
+
+ ohdr->u.tid_rdma.ack.tid_flow_psn = 0;
+ ohdr->u.tid_rdma.ack.verbs_psn =
+ cpu_to_be32(flow->flow_state.resp_ib_psn);
+
+ if (qpriv->resync) {
+ /*
+ * If the PSN before the current expect KDETH PSN is the
+ * RESYNC PSN, then we never received a good TID RDMA WRITE
+ * DATA packet after a previous RESYNC.
+ * In this case, the next expected KDETH PSN stays the same.
+ */
+ if (hfi1_tid_rdma_is_resync_psn(qpriv->r_next_psn_kdeth - 1)) {
+ ohdr->u.tid_rdma.ack.tid_flow_psn =
+ cpu_to_be32(qpriv->r_next_psn_kdeth_save);
+ } else {
+ /*
+ * Because the KDETH PSNs jump during a RESYNC, it's
+ * not possible to infer (or compute) the previous value
+ * of r_next_psn_kdeth in the case of back-to-back
+ * RESYNC packets. Therefore, we save it.
+ */
+ qpriv->r_next_psn_kdeth_save =
+ qpriv->r_next_psn_kdeth - 1;
+ ohdr->u.tid_rdma.ack.tid_flow_psn =
+ cpu_to_be32(qpriv->r_next_psn_kdeth_save);
+ qpriv->r_next_psn_kdeth = mask_psn(*bth2 + 1);
+ }
+ qpriv->resync = false;
+ }
+
+ return sizeof(ohdr->u.tid_rdma.ack) / sizeof(u32);
+}
+
+void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
+{
+ struct ib_other_headers *ohdr = packet->ohdr;
+ struct rvt_qp *qp = packet->qp;
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct rvt_swqe *wqe;
+ struct tid_rdma_request *req;
+ struct tid_rdma_flow *flow;
+ u32 aeth, psn, req_psn, ack_psn, fspsn, resync_psn, ack_kpsn;
+ bool is_fecn;
+ unsigned long flags;
+ u16 fidx;
+
+ trace_hfi1_tid_write_sender_rcv_tid_ack(qp, 0);
+ is_fecn = process_ecn(qp, packet);
+ psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
+ aeth = be32_to_cpu(ohdr->u.tid_rdma.ack.aeth);
+ req_psn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.ack.verbs_psn));
+ resync_psn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.ack.tid_flow_psn));
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ trace_hfi1_rcv_tid_ack(qp, aeth, psn, req_psn, resync_psn);
+
+ /* If we are waiting for an ACK to RESYNC, drop any other packets */
+ if ((qp->s_flags & HFI1_S_WAIT_HALT) &&
+ cmp_psn(psn, qpriv->s_resync_psn))
+ goto ack_op_err;
+
+ ack_psn = req_psn;
+ if (hfi1_tid_rdma_is_resync_psn(psn))
+ ack_kpsn = resync_psn;
+ else
+ ack_kpsn = psn;
+ if (aeth >> 29) {
+ ack_psn--;
+ ack_kpsn--;
+ }
+
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
+
+ if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
+ goto ack_op_err;
+
+ req = wqe_to_tid_req(wqe);
+ trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn,
+ wqe->lpsn, req);
+ flow = &req->flows[req->acked_tail];
+ trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
+
+ /* Drop stale ACK/NAK */
+ if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0)
+ goto ack_op_err;
+
+ while (cmp_psn(ack_kpsn,
+ full_flow_psn(flow, flow->flow_state.lpsn)) >= 0 &&
+ req->ack_seg < req->cur_seg) {
+ req->ack_seg++;
+ /* advance acked segment pointer */
+ req->acked_tail = CIRC_NEXT(req->acked_tail, MAX_FLOWS);
+ req->r_last_acked = flow->flow_state.resp_ib_psn;
+ trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn,
+ wqe->lpsn, req);
+ if (req->ack_seg == req->total_segs) {
+ req->state = TID_REQUEST_COMPLETE;
+ wqe = do_rc_completion(qp, wqe,
+ to_iport(qp->ibqp.device,
+ qp->port_num));
+ trace_hfi1_sender_rcv_tid_ack(qp);
+ atomic_dec(&qpriv->n_tid_requests);
+ if (qp->s_acked == qp->s_tail)
+ break;
+ if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
+ break;
+ req = wqe_to_tid_req(wqe);
+ }
+ flow = &req->flows[req->acked_tail];
+ trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
+ }
+
+ trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn,
+ wqe->lpsn, req);
+ switch (aeth >> 29) {
+ case 0: /* ACK */
+ if (qpriv->s_flags & RVT_S_WAIT_ACK)
+ qpriv->s_flags &= ~RVT_S_WAIT_ACK;
+ if (!hfi1_tid_rdma_is_resync_psn(psn)) {
+ /* Check if there is any pending TID ACK */
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE &&
+ req->ack_seg < req->cur_seg)
+ hfi1_mod_tid_retry_timer(qp);
+ else
+ hfi1_stop_tid_retry_timer(qp);
+ hfi1_schedule_send(qp);
+ } else {
+ u32 spsn, fpsn, last_acked, generation;
+ struct tid_rdma_request *rptr;
+
+ /* ACK(RESYNC) */
+ hfi1_stop_tid_retry_timer(qp);
+ /* Allow new requests (see hfi1_make_tid_rdma_pkt) */
+ qp->s_flags &= ~HFI1_S_WAIT_HALT;
+ /*
+ * Clear RVT_S_SEND_ONE flag in case that the TID RDMA
+ * ACK is received after the TID retry timer is fired
+ * again. In this case, do not send any more TID
+ * RESYNC request or wait for any more TID ACK packet.
+ */
+ qpriv->s_flags &= ~RVT_S_SEND_ONE;
+ hfi1_schedule_send(qp);
+
+ if ((qp->s_acked == qpriv->s_tid_tail &&
+ req->ack_seg == req->total_segs) ||
+ qp->s_acked == qp->s_tail) {
+ qpriv->s_state = TID_OP(WRITE_DATA_LAST);
+ goto done;
+ }
+
+ if (req->ack_seg == req->comp_seg) {
+ qpriv->s_state = TID_OP(WRITE_DATA);
+ goto done;
+ }
+
+ /*
+ * The PSN to start with is the next PSN after the
+ * RESYNC PSN.
+ */
+ psn = mask_psn(psn + 1);
+ generation = psn >> HFI1_KDETH_BTH_SEQ_SHIFT;
+ spsn = 0;
+
+ /*
+ * Update to the correct WQE when we get an ACK(RESYNC)
+ * in the middle of a request.
+ */
+ if (delta_psn(ack_psn, wqe->lpsn))
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
+ req = wqe_to_tid_req(wqe);
+ flow = &req->flows[req->acked_tail];
+ /*
+ * RESYNC re-numbers the PSN ranges of all remaining
+ * segments. Also, PSN's start from 0 in the middle of a
+ * segment and the first segment size is less than the
+ * default number of packets. flow->resync_npkts is used
+ * to track the number of packets from the start of the
+ * real segment to the point of 0 PSN after the RESYNC
+ * in order to later correctly rewind the SGE.
+ */
+ fpsn = full_flow_psn(flow, flow->flow_state.spsn);
+ req->r_ack_psn = psn;
+ flow->resync_npkts +=
+ delta_psn(mask_psn(resync_psn + 1), fpsn);
+ /*
+ * Renumber all packet sequence number ranges
+ * based on the new generation.
+ */
+ last_acked = qp->s_acked;
+ rptr = req;
+ while (1) {
+ /* start from last acked segment */
+ for (fidx = rptr->acked_tail;
+ CIRC_CNT(rptr->setup_head, fidx,
+ MAX_FLOWS);
+ fidx = CIRC_NEXT(fidx, MAX_FLOWS)) {
+ u32 lpsn;
+ u32 gen;
+
+ flow = &rptr->flows[fidx];
+ gen = flow->flow_state.generation;
+ if (WARN_ON(gen == generation &&
+ flow->flow_state.spsn !=
+ spsn))
+ continue;
+ lpsn = flow->flow_state.lpsn;
+ lpsn = full_flow_psn(flow, lpsn);
+ flow->npkts =
+ delta_psn(lpsn,
+ mask_psn(resync_psn)
+ );
+ flow->flow_state.generation =
+ generation;
+ flow->flow_state.spsn = spsn;
+ flow->flow_state.lpsn =
+ flow->flow_state.spsn +
+ flow->npkts - 1;
+ flow->pkt = 0;
+ spsn += flow->npkts;
+ resync_psn += flow->npkts;
+ trace_hfi1_tid_flow_rcv_tid_ack(qp,
+ fidx,
+ flow);
+ }
+ if (++last_acked == qpriv->s_tid_cur + 1)
+ break;
+ if (last_acked == qp->s_size)
+ last_acked = 0;
+ wqe = rvt_get_swqe_ptr(qp, last_acked);
+ rptr = wqe_to_tid_req(wqe);
+ }
+ req->cur_seg = req->ack_seg;
+ qpriv->s_tid_tail = qp->s_acked;
+ qpriv->s_state = TID_OP(WRITE_REQ);
+ hfi1_schedule_tid_send(qp);
+ }
+done:
+ qpriv->s_retry = qp->s_retry_cnt;
+ break;
+
+ case 3: /* NAK */
+ hfi1_stop_tid_retry_timer(qp);
+ switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
+ IB_AETH_CREDIT_MASK) {
+ case 0: /* PSN sequence error */
+ flow = &req->flows[req->acked_tail];
+ fspsn = full_flow_psn(flow, flow->flow_state.spsn);
+ trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail,
+ flow);
+ req->r_ack_psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
+ req->cur_seg = req->ack_seg;
+ qpriv->s_tid_tail = qp->s_acked;
+ qpriv->s_state = TID_OP(WRITE_REQ);
+ qpriv->s_retry = qp->s_retry_cnt;
+ hfi1_schedule_tid_send(qp);
+ break;
+
+ default:
+ break;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ack_op_err:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+}
+
+void hfi1_add_tid_retry_timer(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct ib_qp *ibqp = &qp->ibqp;
+ struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
+
+ lockdep_assert_held(&qp->s_lock);
+ if (!(priv->s_flags & HFI1_S_TID_RETRY_TIMER)) {
+ priv->s_flags |= HFI1_S_TID_RETRY_TIMER;
+ priv->s_tid_retry_timer.expires = jiffies +
+ priv->tid_retry_timeout_jiffies + rdi->busy_jiffies;
+ add_timer(&priv->s_tid_retry_timer);
+ }
+}
+
+static void hfi1_mod_tid_retry_timer(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct ib_qp *ibqp = &qp->ibqp;
+ struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
+
+ lockdep_assert_held(&qp->s_lock);
+ priv->s_flags |= HFI1_S_TID_RETRY_TIMER;
+ mod_timer(&priv->s_tid_retry_timer, jiffies +
+ priv->tid_retry_timeout_jiffies + rdi->busy_jiffies);
+}
+
+static int hfi1_stop_tid_retry_timer(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ int rval = 0;
+
+ lockdep_assert_held(&qp->s_lock);
+ if (priv->s_flags & HFI1_S_TID_RETRY_TIMER) {
+ rval = del_timer(&priv->s_tid_retry_timer);
+ priv->s_flags &= ~HFI1_S_TID_RETRY_TIMER;
+ }
+ return rval;
+}
+
+void hfi1_del_tid_retry_timer(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ del_timer_sync(&priv->s_tid_retry_timer);
+ priv->s_flags &= ~HFI1_S_TID_RETRY_TIMER;
+}
+
+static void hfi1_tid_retry_timeout(struct timer_list *t)
+{
+ struct hfi1_qp_priv *priv = from_timer(priv, t, s_tid_retry_timer);
+ struct rvt_qp *qp = priv->owner;
+ struct rvt_swqe *wqe;
+ unsigned long flags;
+ struct tid_rdma_request *req;
+
+ spin_lock_irqsave(&qp->r_lock, flags);
+ spin_lock(&qp->s_lock);
+ trace_hfi1_tid_write_sender_retry_timeout(qp, 0);
+ if (priv->s_flags & HFI1_S_TID_RETRY_TIMER) {
+ hfi1_stop_tid_retry_timer(qp);
+ if (!priv->s_retry) {
+ trace_hfi1_msg_tid_retry_timeout(/* msg */
+ qp,
+ "Exhausted retries. Tid retry timeout = ",
+ (u64)priv->tid_retry_timeout_jiffies);
+
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
+ hfi1_trdma_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
+ rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ } else {
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
+ req = wqe_to_tid_req(wqe);
+ trace_hfi1_tid_req_tid_retry_timeout(/* req */
+ qp, 0, wqe->wr.opcode, wqe->psn, wqe->lpsn, req);
+
+ priv->s_flags &= ~RVT_S_WAIT_ACK;
+ /* Only send one packet (the RESYNC) */
+ priv->s_flags |= RVT_S_SEND_ONE;
+ /*
+ * No additional request shall be made by this QP until
+ * the RESYNC has been complete.
+ */
+ qp->s_flags |= HFI1_S_WAIT_HALT;
+ priv->s_state = TID_OP(RESYNC);
+ priv->s_retry--;
+ hfi1_schedule_tid_send(qp);
+ }
+ }
+ spin_unlock(&qp->s_lock);
+ spin_unlock_irqrestore(&qp->r_lock, flags);
+}
+
+u32 hfi1_build_tid_rdma_resync(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ struct ib_other_headers *ohdr, u32 *bth1,
+ u32 *bth2, u16 fidx)
+{
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct tid_rdma_params *remote;
+ struct tid_rdma_request *req = wqe_to_tid_req(wqe);
+ struct tid_rdma_flow *flow = &req->flows[fidx];
+ u32 generation;
+
+ rcu_read_lock();
+ remote = rcu_dereference(qpriv->tid_rdma.remote);
+ KDETH_RESET(ohdr->u.tid_rdma.ack.kdeth1, JKEY, remote->jkey);
+ ohdr->u.tid_rdma.ack.verbs_qp = cpu_to_be32(qp->remote_qpn);
+ *bth1 = remote->qp;
+ rcu_read_unlock();
+
+ generation = kern_flow_generation_next(flow->flow_state.generation);
+ *bth2 = mask_psn((generation << HFI1_KDETH_BTH_SEQ_SHIFT) - 1);
+ qpriv->s_resync_psn = *bth2;
+ *bth2 |= IB_BTH_REQ_ACK;
+ KDETH_RESET(ohdr->u.tid_rdma.ack.kdeth0, KVER, 0x1);
+
+ return sizeof(ohdr->u.tid_rdma.resync) / sizeof(u32);
+}
+
+void hfi1_rc_rcv_tid_rdma_resync(struct hfi1_packet *packet)
+{
+ struct ib_other_headers *ohdr = packet->ohdr;
+ struct rvt_qp *qp = packet->qp;
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct hfi1_ctxtdata *rcd = qpriv->rcd;
+ struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
+ struct rvt_ack_entry *e;
+ struct tid_rdma_request *req;
+ struct tid_rdma_flow *flow;
+ struct tid_flow_state *fs = &qpriv->flow_state;
+ u32 psn, generation, idx, gen_next;
+ bool is_fecn;
+ unsigned long flags;
+
+ is_fecn = process_ecn(qp, packet);
+ psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
+
+ generation = mask_psn(psn + 1) >> HFI1_KDETH_BTH_SEQ_SHIFT;
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ gen_next = (fs->generation == KERN_GENERATION_RESERVED) ?
+ generation : kern_flow_generation_next(fs->generation);
+ /*
+ * RESYNC packet contains the "next" generation and can only be
+ * from the current or previous generations
+ */
+ if (generation != mask_generation(gen_next - 1) &&
+ generation != gen_next)
+ goto bail;
+ /* Already processing a resync */
+ if (qpriv->resync)
+ goto bail;
+
+ spin_lock(&rcd->exp_lock);
+ if (fs->index >= RXE_NUM_TID_FLOWS) {
+ /*
+ * If we don't have a flow, save the generation so it can be
+ * applied when a new flow is allocated
+ */
+ fs->generation = generation;
+ } else {
+ /* Reprogram the QP flow with new generation */
+ rcd->flows[fs->index].generation = generation;
+ fs->generation = kern_setup_hw_flow(rcd, fs->index);
+ }
+ fs->psn = 0;
+ /*
+ * Disable SW PSN checking since a RESYNC is equivalent to a
+ * sync point and the flow has/will be reprogrammed
+ */
+ qpriv->s_flags &= ~HFI1_R_TID_SW_PSN;
+ trace_hfi1_tid_write_rsp_rcv_resync(qp);
+
+ /*
+ * Reset all TID flow information with the new generation.
+ * This is done for all requests and segments after the
+ * last received segment
+ */
+ for (idx = qpriv->r_tid_tail; ; idx++) {
+ u16 flow_idx;
+
+ if (idx > rvt_size_atomic(&dev->rdi))
+ idx = 0;
+ e = &qp->s_ack_queue[idx];
+ if (e->opcode == TID_OP(WRITE_REQ)) {
+ req = ack_to_tid_req(e);
+ trace_hfi1_tid_req_rcv_resync(qp, 0, e->opcode, e->psn,
+ e->lpsn, req);
+
+ /* start from last unacked segment */
+ for (flow_idx = req->clear_tail;
+ CIRC_CNT(req->setup_head, flow_idx,
+ MAX_FLOWS);
+ flow_idx = CIRC_NEXT(flow_idx, MAX_FLOWS)) {
+ u32 lpsn;
+ u32 next;
+
+ flow = &req->flows[flow_idx];
+ lpsn = full_flow_psn(flow,
+ flow->flow_state.lpsn);
+ next = flow->flow_state.r_next_psn;
+ flow->npkts = delta_psn(lpsn, next - 1);
+ flow->flow_state.generation = fs->generation;
+ flow->flow_state.spsn = fs->psn;
+ flow->flow_state.lpsn =
+ flow->flow_state.spsn + flow->npkts - 1;
+ flow->flow_state.r_next_psn =
+ full_flow_psn(flow,
+ flow->flow_state.spsn);
+ fs->psn += flow->npkts;
+ trace_hfi1_tid_flow_rcv_resync(qp, flow_idx,
+ flow);
+ }
+ }
+ if (idx == qp->s_tail_ack_queue)
+ break;
+ }
+
+ spin_unlock(&rcd->exp_lock);
+ qpriv->resync = true;
+ /* RESYNC request always gets a TID RDMA ACK. */
+ qpriv->s_nak_state = 0;
+ qpriv->s_flags |= RVT_S_ACK_PENDING;
+ hfi1_schedule_tid_send(qp);
+bail:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+}
+
+/*
+ * Call this function when the last TID RDMA WRITE DATA packet for a request
+ * is built.
+ */
+static void update_tid_tail(struct rvt_qp *qp)
+ __must_hold(&qp->s_lock)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ u32 i;
+ struct rvt_swqe *wqe;
+
+ lockdep_assert_held(&qp->s_lock);
+ /* Can't move beyond s_tid_cur */
+ if (priv->s_tid_tail == priv->s_tid_cur)
+ return;
+ for (i = priv->s_tid_tail + 1; ; i++) {
+ if (i == qp->s_size)
+ i = 0;
+
+ if (i == priv->s_tid_cur)
+ break;
+ wqe = rvt_get_swqe_ptr(qp, i);
+ if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
+ break;
+ }
+ priv->s_tid_tail = i;
+ priv->s_state = TID_OP(WRITE_RESP);
+}
+
+int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
+ __must_hold(&qp->s_lock)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct rvt_swqe *wqe;
+ u32 bth1 = 0, bth2 = 0, hwords = 5, len, middle = 0;
+ struct ib_other_headers *ohdr;
+ struct rvt_sge_state *ss = &qp->s_sge;
+ struct rvt_ack_entry *e = &qp->s_ack_queue[qp->s_tail_ack_queue];
+ struct tid_rdma_request *req = ack_to_tid_req(e);
+ bool last = false;
+ u8 opcode = TID_OP(WRITE_DATA);
+
+ lockdep_assert_held(&qp->s_lock);
+ trace_hfi1_tid_write_sender_make_tid_pkt(qp, 0);
+ /*
+ * Prioritize the sending of the requests and responses over the
+ * sending of the TID RDMA data packets.
+ */
+ if (((atomic_read(&priv->n_tid_requests) < HFI1_TID_RDMA_WRITE_CNT) &&
+ atomic_read(&priv->n_requests) &&
+ !(qp->s_flags & (RVT_S_BUSY | RVT_S_WAIT_ACK |
+ HFI1_S_ANY_WAIT_IO))) ||
+ (e->opcode == TID_OP(WRITE_REQ) && req->cur_seg < req->alloc_seg &&
+ !(qp->s_flags & (RVT_S_BUSY | HFI1_S_ANY_WAIT_IO)))) {
+ struct iowait_work *iowork;
+
+ iowork = iowait_get_ib_work(&priv->s_iowait);
+ ps->s_txreq = get_waiting_verbs_txreq(iowork);
+ if (ps->s_txreq || hfi1_make_rc_req(qp, ps)) {
+ priv->s_flags |= HFI1_S_TID_BUSY_SET;
+ return 1;
+ }
+ }
+
+ ps->s_txreq = get_txreq(ps->dev, qp);
+ if (!ps->s_txreq)
+ goto bail_no_tx;
+
+ ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth;
+
+ if ((priv->s_flags & RVT_S_ACK_PENDING) &&
+ make_tid_rdma_ack(qp, ohdr, ps))
+ return 1;
+
+ /*
+ * Bail out if we can't send data.
+ * Be reminded that this check must been done after the call to
+ * make_tid_rdma_ack() because the responding QP could be in
+ * RTR state where it can send TID RDMA ACK, not TID RDMA WRITE DATA.
+ */
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK))
+ goto bail;
+
+ if (priv->s_flags & RVT_S_WAIT_ACK)
+ goto bail;
+
+ /* Check whether there is anything to do. */
+ if (priv->s_tid_tail == HFI1_QP_WQE_INVALID)
+ goto bail;
+ wqe = rvt_get_swqe_ptr(qp, priv->s_tid_tail);
+ req = wqe_to_tid_req(wqe);
+ trace_hfi1_tid_req_make_tid_pkt(qp, 0, wqe->wr.opcode, wqe->psn,
+ wqe->lpsn, req);
+ switch (priv->s_state) {
+ case TID_OP(WRITE_REQ):
+ case TID_OP(WRITE_RESP):
+ priv->tid_ss.sge = wqe->sg_list[0];
+ priv->tid_ss.sg_list = wqe->sg_list + 1;
+ priv->tid_ss.num_sge = wqe->wr.num_sge;
+ priv->tid_ss.total_len = wqe->length;
+
+ if (priv->s_state == TID_OP(WRITE_REQ))
+ hfi1_tid_rdma_restart_req(qp, wqe, &bth2);
+ priv->s_state = TID_OP(WRITE_DATA);
+ /* fall through */
+
+ case TID_OP(WRITE_DATA):
+ /*
+ * 1. Check whether TID RDMA WRITE RESP available.
+ * 2. If no:
+ * 2.1 If have more segments and no TID RDMA WRITE RESP,
+ * set HFI1_S_WAIT_TID_RESP
+ * 2.2 Return indicating no progress made.
+ * 3. If yes:
+ * 3.1 Build TID RDMA WRITE DATA packet.
+ * 3.2 If last packet in segment:
+ * 3.2.1 Change KDETH header bits
+ * 3.2.2 Advance RESP pointers.
+ * 3.3 Return indicating progress made.
+ */
+ trace_hfi1_sender_make_tid_pkt(qp);
+ trace_hfi1_tid_write_sender_make_tid_pkt(qp, 0);
+ wqe = rvt_get_swqe_ptr(qp, priv->s_tid_tail);
+ req = wqe_to_tid_req(wqe);
+ len = wqe->length;
+
+ if (!req->comp_seg || req->cur_seg == req->comp_seg)
+ goto bail;
+
+ trace_hfi1_tid_req_make_tid_pkt(qp, 0, wqe->wr.opcode,
+ wqe->psn, wqe->lpsn, req);
+ last = hfi1_build_tid_rdma_packet(wqe, ohdr, &bth1, &bth2,
+ &len);
+
+ if (last) {
+ /* move pointer to next flow */
+ req->clear_tail = CIRC_NEXT(req->clear_tail,
+ MAX_FLOWS);
+ if (++req->cur_seg < req->total_segs) {
+ if (!CIRC_CNT(req->setup_head, req->clear_tail,
+ MAX_FLOWS))
+ qp->s_flags |= HFI1_S_WAIT_TID_RESP;
+ } else {
+ priv->s_state = TID_OP(WRITE_DATA_LAST);
+ opcode = TID_OP(WRITE_DATA_LAST);
+
+ /* Advance the s_tid_tail now */
+ update_tid_tail(qp);
+ }
+ }
+ hwords += sizeof(ohdr->u.tid_rdma.w_data) / sizeof(u32);
+ ss = &priv->tid_ss;
+ break;
+
+ case TID_OP(RESYNC):
+ trace_hfi1_sender_make_tid_pkt(qp);
+ /* Use generation from the most recently received response */
+ wqe = rvt_get_swqe_ptr(qp, priv->s_tid_cur);
+ req = wqe_to_tid_req(wqe);
+ /* If no responses for this WQE look at the previous one */
+ if (!req->comp_seg) {
+ wqe = rvt_get_swqe_ptr(qp,
+ (!priv->s_tid_cur ? qp->s_size :
+ priv->s_tid_cur) - 1);
+ req = wqe_to_tid_req(wqe);
+ }
+ hwords += hfi1_build_tid_rdma_resync(qp, wqe, ohdr, &bth1,
+ &bth2,
+ CIRC_PREV(req->setup_head,
+ MAX_FLOWS));
+ ss = NULL;
+ len = 0;
+ opcode = TID_OP(RESYNC);
+ break;
+
+ default:
+ goto bail;
+ }
+ if (priv->s_flags & RVT_S_SEND_ONE) {
+ priv->s_flags &= ~RVT_S_SEND_ONE;
+ priv->s_flags |= RVT_S_WAIT_ACK;
+ bth2 |= IB_BTH_REQ_ACK;
+ }
+ qp->s_len -= len;
+ ps->s_txreq->hdr_dwords = hwords;
+ ps->s_txreq->sde = priv->s_sde;
+ ps->s_txreq->ss = ss;
+ ps->s_txreq->s_cur_size = len;
+ hfi1_make_ruc_header(qp, ohdr, (opcode << 24), bth1, bth2,
+ middle, ps);
+ return 1;
+bail:
+ hfi1_put_txreq(ps->s_txreq);
+bail_no_tx:
+ ps->s_txreq = NULL;
+ priv->s_flags &= ~RVT_S_BUSY;
+ /*
+ * If we didn't get a txreq, the QP will be woken up later to try
+ * again, set the flags to the the wake up which work item to wake
+ * up.
+ * (A better algorithm should be found to do this and generalize the
+ * sleep/wakeup flags.)
+ */
+ iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
return 0;
}
+
+static int make_tid_rdma_ack(struct rvt_qp *qp,
+ struct ib_other_headers *ohdr,
+ struct hfi1_pkt_state *ps)
+{
+ struct rvt_ack_entry *e;
+ struct hfi1_qp_priv *qpriv = qp->priv;
+ struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
+ u32 hwords, next;
+ u32 len = 0;
+ u32 bth1 = 0, bth2 = 0;
+ int middle = 0;
+ u16 flow;
+ struct tid_rdma_request *req, *nreq;
+
+ trace_hfi1_tid_write_rsp_make_tid_ack(qp);
+ /* Don't send an ACK if we aren't supposed to. */
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
+ goto bail;
+
+ /* header size in 32-bit words LRH+BTH = (8+12)/4. */
+ hwords = 5;
+
+ e = &qp->s_ack_queue[qpriv->r_tid_ack];
+ req = ack_to_tid_req(e);
+ /*
+ * In the RESYNC case, we are exactly one segment past the
+ * previously sent ack or at the previously sent NAK. So to send
+ * the resync ack, we go back one segment (which might be part of
+ * the previous request) and let the do-while loop execute again.
+ * The advantage of executing the do-while loop is that any data
+ * received after the previous ack is automatically acked in the
+ * RESYNC ack. It turns out that for the do-while loop we only need
+ * to pull back qpriv->r_tid_ack, not the segment
+ * indices/counters. The scheme works even if the previous request
+ * was not a TID WRITE request.
+ */
+ if (qpriv->resync) {
+ if (!req->ack_seg || req->ack_seg == req->total_segs)
+ qpriv->r_tid_ack = !qpriv->r_tid_ack ?
+ rvt_size_atomic(&dev->rdi) :
+ qpriv->r_tid_ack - 1;
+ e = &qp->s_ack_queue[qpriv->r_tid_ack];
+ req = ack_to_tid_req(e);
+ }
+
+ trace_hfi1_rsp_make_tid_ack(qp, e->psn);
+ trace_hfi1_tid_req_make_tid_ack(qp, 0, e->opcode, e->psn, e->lpsn,
+ req);
+ /*
+ * If we've sent all the ACKs that we can, we are done
+ * until we get more segments...
+ */
+ if (!qpriv->s_nak_state && !qpriv->resync &&
+ req->ack_seg == req->comp_seg)
+ goto bail;
+
+ do {
+ /*
+ * To deal with coalesced ACKs, the acked_tail pointer
+ * into the flow array is used. The distance between it
+ * and the clear_tail is the number of flows that are
+ * being ACK'ed.
+ */
+ req->ack_seg +=
+ /* Get up-to-date value */
+ CIRC_CNT(req->clear_tail, req->acked_tail,
+ MAX_FLOWS);
+ /* Advance acked index */
+ req->acked_tail = req->clear_tail;
+
+ /*
+ * req->clear_tail points to the segment currently being
+ * received. So, when sending an ACK, the previous
+ * segment is being ACK'ed.
+ */
+ flow = CIRC_PREV(req->acked_tail, MAX_FLOWS);
+ if (req->ack_seg != req->total_segs)
+ break;
+ req->state = TID_REQUEST_COMPLETE;
+
+ next = qpriv->r_tid_ack + 1;
+ if (next > rvt_size_atomic(&dev->rdi))
+ next = 0;
+ qpriv->r_tid_ack = next;
+ if (qp->s_ack_queue[next].opcode != TID_OP(WRITE_REQ))
+ break;
+ nreq = ack_to_tid_req(&qp->s_ack_queue[next]);
+ if (!nreq->comp_seg || nreq->ack_seg == nreq->comp_seg)
+ break;
+
+ /* Move to the next ack entry now */
+ e = &qp->s_ack_queue[qpriv->r_tid_ack];
+ req = ack_to_tid_req(e);
+ } while (1);
+
+ /*
+ * At this point qpriv->r_tid_ack == qpriv->r_tid_tail but e and
+ * req could be pointing at the previous ack queue entry
+ */
+ if (qpriv->s_nak_state ||
+ (qpriv->resync &&
+ !hfi1_tid_rdma_is_resync_psn(qpriv->r_next_psn_kdeth - 1) &&
+ (cmp_psn(qpriv->r_next_psn_kdeth - 1,
+ full_flow_psn(&req->flows[flow],
+ req->flows[flow].flow_state.lpsn)) > 0))) {
+ /*
+ * A NAK will implicitly acknowledge all previous TID RDMA
+ * requests. Therefore, we NAK with the req->acked_tail
+ * segment for the request at qpriv->r_tid_ack (same at
+ * this point as the req->clear_tail segment for the
+ * qpriv->r_tid_tail request)
+ */
+ e = &qp->s_ack_queue[qpriv->r_tid_ack];
+ req = ack_to_tid_req(e);
+ flow = req->acked_tail;
+ } else if (req->ack_seg == req->total_segs &&
+ qpriv->s_flags & HFI1_R_TID_WAIT_INTERLCK)
+ qpriv->s_flags &= ~HFI1_R_TID_WAIT_INTERLCK;
+
+ trace_hfi1_tid_write_rsp_make_tid_ack(qp);
+ trace_hfi1_tid_req_make_tid_ack(qp, 0, e->opcode, e->psn, e->lpsn,
+ req);
+ hwords += hfi1_build_tid_rdma_write_ack(qp, e, ohdr, flow, &bth1,
+ &bth2);
+ len = 0;
+ qpriv->s_flags &= ~RVT_S_ACK_PENDING;
+ ps->s_txreq->hdr_dwords = hwords;
+ ps->s_txreq->sde = qpriv->s_sde;
+ ps->s_txreq->s_cur_size = len;
+ ps->s_txreq->ss = NULL;
+ hfi1_make_ruc_header(qp, ohdr, (TID_OP(ACK) << 24), bth1, bth2, middle,
+ ps);
+ ps->s_txreq->txreq.flags |= SDMA_TXREQ_F_VIP;
+ return 1;
+bail:
+ /*
+ * Ensure s_rdma_ack_cnt changes are committed prior to resetting
+ * RVT_S_RESP_PENDING
+ */
+ smp_wmb();
+ qpriv->s_flags &= ~RVT_S_ACK_PENDING;
+ return 0;
+}
+
+static int hfi1_send_tid_ok(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ return !(priv->s_flags & RVT_S_BUSY ||
+ qp->s_flags & HFI1_S_ANY_WAIT_IO) &&
+ (verbs_txreq_queued(iowait_get_tid_work(&priv->s_iowait)) ||
+ (priv->s_flags & RVT_S_RESP_PENDING) ||
+ !(qp->s_flags & HFI1_S_ANY_TID_WAIT_SEND));
+}
+
+void _hfi1_do_tid_send(struct work_struct *work)
+{
+ struct iowait_work *w = container_of(work, struct iowait_work, iowork);
+ struct rvt_qp *qp = iowait_to_qp(w->iow);
+
+ hfi1_do_tid_send(qp);
+}
+
+static void hfi1_do_tid_send(struct rvt_qp *qp)
+{
+ struct hfi1_pkt_state ps;
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ ps.dev = to_idev(qp->ibqp.device);
+ ps.ibp = to_iport(qp->ibqp.device, qp->port_num);
+ ps.ppd = ppd_from_ibp(ps.ibp);
+ ps.wait = iowait_get_tid_work(&priv->s_iowait);
+ ps.in_thread = false;
+ ps.timeout_int = qp->timeout_jiffies / 8;
+
+ trace_hfi1_rc_do_tid_send(qp, false);
+ spin_lock_irqsave(&qp->s_lock, ps.flags);
+
+ /* Return if we are already busy processing a work request. */
+ if (!hfi1_send_tid_ok(qp)) {
+ if (qp->s_flags & HFI1_S_ANY_WAIT_IO)
+ iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
+ spin_unlock_irqrestore(&qp->s_lock, ps.flags);
+ return;
+ }
+
+ priv->s_flags |= RVT_S_BUSY;
+
+ ps.timeout = jiffies + ps.timeout_int;
+ ps.cpu = priv->s_sde ? priv->s_sde->cpu :
+ cpumask_first(cpumask_of_node(ps.ppd->dd->node));
+ ps.pkts_sent = false;
+
+ /* insure a pre-built packet is handled */
+ ps.s_txreq = get_waiting_verbs_txreq(ps.wait);
+ do {
+ /* Check for a constructed packet to be sent. */
+ if (ps.s_txreq) {
+ if (priv->s_flags & HFI1_S_TID_BUSY_SET) {
+ qp->s_flags |= RVT_S_BUSY;
+ ps.wait = iowait_get_ib_work(&priv->s_iowait);
+ }
+ spin_unlock_irqrestore(&qp->s_lock, ps.flags);
+
+ /*
+ * If the packet cannot be sent now, return and
+ * the send tasklet will be woken up later.
+ */
+ if (hfi1_verbs_send(qp, &ps))
+ return;
+
+ /* allow other tasks to run */
+ if (hfi1_schedule_send_yield(qp, &ps, true))
+ return;
+
+ spin_lock_irqsave(&qp->s_lock, ps.flags);
+ if (priv->s_flags & HFI1_S_TID_BUSY_SET) {
+ qp->s_flags &= ~RVT_S_BUSY;
+ priv->s_flags &= ~HFI1_S_TID_BUSY_SET;
+ ps.wait = iowait_get_tid_work(&priv->s_iowait);
+ if (iowait_flag_set(&priv->s_iowait,
+ IOWAIT_PENDING_IB))
+ hfi1_schedule_send(qp);
+ }
+ }
+ } while (hfi1_make_tid_rdma_pkt(qp, &ps));
+ iowait_starve_clear(ps.pkts_sent, &priv->s_iowait);
+ spin_unlock_irqrestore(&qp->s_lock, ps.flags);
+}
+
+static bool _hfi1_schedule_tid_send(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct hfi1_ibport *ibp =
+ to_iport(qp->ibqp.device, qp->port_num);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+
+ return iowait_tid_schedule(&priv->s_iowait, ppd->hfi1_wq,
+ priv->s_sde ?
+ priv->s_sde->cpu :
+ cpumask_first(cpumask_of_node(dd->node)));
+}
+
+/**
+ * hfi1_schedule_tid_send - schedule progress on TID RDMA state machine
+ * @qp: the QP
+ *
+ * This schedules qp progress on the TID RDMA state machine. Caller
+ * should hold the s_lock.
+ * Unlike hfi1_schedule_send(), this cannot use hfi1_send_ok() because
+ * the two state machines can step on each other with respect to the
+ * RVT_S_BUSY flag.
+ * Therefore, a modified test is used.
+ * @return true if the second leg is scheduled;
+ * false if the second leg is not scheduled.
+ */
+bool hfi1_schedule_tid_send(struct rvt_qp *qp)
+{
+ lockdep_assert_held(&qp->s_lock);
+ if (hfi1_send_tid_ok(qp)) {
+ /*
+ * The following call returns true if the qp is not on the
+ * queue and false if the qp is already on the queue before
+ * this call. Either way, the qp will be on the queue when the
+ * call returns.
+ */
+ _hfi1_schedule_tid_send(qp);
+ return true;
+ }
+ if (qp->s_flags & HFI1_S_ANY_WAIT_IO)
+ iowait_set_flag(&((struct hfi1_qp_priv *)qp->priv)->s_iowait,
+ IOWAIT_PENDING_TID);
+ return false;
+}
+
+bool hfi1_tid_rdma_ack_interlock(struct rvt_qp *qp, struct rvt_ack_entry *e)
+{
+ struct rvt_ack_entry *prev;
+ struct tid_rdma_request *req;
+ struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
+ struct hfi1_qp_priv *priv = qp->priv;
+ u32 s_prev;
+
+ s_prev = qp->s_tail_ack_queue == 0 ? rvt_size_atomic(&dev->rdi) :
+ (qp->s_tail_ack_queue - 1);
+ prev = &qp->s_ack_queue[s_prev];
+
+ if ((e->opcode == TID_OP(READ_REQ) ||
+ e->opcode == OP(RDMA_READ_REQUEST)) &&
+ prev->opcode == TID_OP(WRITE_REQ)) {
+ req = ack_to_tid_req(prev);
+ if (req->ack_seg != req->total_segs) {
+ priv->s_flags |= HFI1_R_TID_WAIT_INTERLCK;
+ return true;
+ }
+ }
+ return false;
+}
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.h b/drivers/infiniband/hw/hfi1/tid_rdma.h
index 6fcd3adcdcc3..53ab24ef4f02 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.h
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.h
@@ -6,8 +6,317 @@
#ifndef HFI1_TID_RDMA_H
#define HFI1_TID_RDMA_H
+#include <linux/circ_buf.h>
+#include "common.h"
+
+/* Add a convenience helper */
+#define CIRC_ADD(val, add, size) (((val) + (add)) & ((size) - 1))
+#define CIRC_NEXT(val, size) CIRC_ADD(val, 1, size)
+#define CIRC_PREV(val, size) CIRC_ADD(val, -1, size)
+
+#define TID_RDMA_MIN_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */
+#define TID_RDMA_MAX_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */
+#define TID_RDMA_MAX_PAGES (BIT(18) >> PAGE_SHIFT)
+
+/*
+ * Bit definitions for priv->s_flags.
+ * These bit flags overload the bit flags defined for the QP's s_flags.
+ * Due to the fact that these bit fields are used only for the QP priv
+ * s_flags, there are no collisions.
+ *
+ * HFI1_S_TID_WAIT_INTERLCK - QP is waiting for requester interlock
+ * HFI1_R_TID_WAIT_INTERLCK - QP is waiting for responder interlock
+ */
+#define HFI1_S_TID_BUSY_SET BIT(0)
+/* BIT(1) reserved for RVT_S_BUSY. */
+#define HFI1_R_TID_RSC_TIMER BIT(2)
+/* BIT(3) reserved for RVT_S_RESP_PENDING. */
+/* BIT(4) reserved for RVT_S_ACK_PENDING. */
+#define HFI1_S_TID_WAIT_INTERLCK BIT(5)
+#define HFI1_R_TID_WAIT_INTERLCK BIT(6)
+/* BIT(7) - BIT(15) reserved for RVT_S_WAIT_*. */
+/* BIT(16) reserved for RVT_S_SEND_ONE */
+#define HFI1_S_TID_RETRY_TIMER BIT(17)
+/* BIT(18) reserved for RVT_S_ECN. */
+#define HFI1_R_TID_SW_PSN BIT(19)
+/* BIT(26) reserved for HFI1_S_WAIT_HALT */
+/* BIT(27) reserved for HFI1_S_WAIT_TID_RESP */
+/* BIT(28) reserved for HFI1_S_WAIT_TID_SPACE */
+
+/*
+ * Unlike regular IB RDMA VERBS, which do not require an entry
+ * in the s_ack_queue, TID RDMA WRITE requests do because they
+ * generate responses.
+ * Therefore, the s_ack_queue needs to be extended by a certain
+ * amount. The key point is that the queue needs to be extended
+ * without letting the "user" know so they user doesn't end up
+ * using these extra entries.
+ */
+#define HFI1_TID_RDMA_WRITE_CNT 8
+
+struct tid_rdma_params {
+ struct rcu_head rcu_head;
+ u32 qp;
+ u32 max_len;
+ u16 jkey;
+ u8 max_read;
+ u8 max_write;
+ u8 timeout;
+ u8 urg;
+ u8 version;
+};
+
+struct tid_rdma_qp_params {
+ struct work_struct trigger_work;
+ struct tid_rdma_params local;
+ struct tid_rdma_params __rcu *remote;
+};
+
+/* Track state for each hardware flow */
+struct tid_flow_state {
+ u32 generation;
+ u32 psn;
+ u32 r_next_psn; /* next PSN to be received (in TID space) */
+ u8 index;
+ u8 last_index;
+ u8 flags;
+};
+
+enum tid_rdma_req_state {
+ TID_REQUEST_INACTIVE = 0,
+ TID_REQUEST_INIT,
+ TID_REQUEST_INIT_RESEND,
+ TID_REQUEST_ACTIVE,
+ TID_REQUEST_RESEND,
+ TID_REQUEST_RESEND_ACTIVE,
+ TID_REQUEST_QUEUED,
+ TID_REQUEST_SYNC,
+ TID_REQUEST_RNR_NAK,
+ TID_REQUEST_COMPLETE,
+};
+
+struct tid_rdma_request {
+ struct rvt_qp *qp;
+ struct hfi1_ctxtdata *rcd;
+ union {
+ struct rvt_swqe *swqe;
+ struct rvt_ack_entry *ack;
+ } e;
+
+ struct tid_rdma_flow *flows; /* array of tid flows */
+ struct rvt_sge_state ss; /* SGE state for TID RDMA requests */
+ u16 n_flows; /* size of the flow buffer window */
+ u16 setup_head; /* flow index we are setting up */
+ u16 clear_tail; /* flow index we are clearing */
+ u16 flow_idx; /* flow index most recently set up */
+ u16 acked_tail;
+
+ u32 seg_len;
+ u32 total_len;
+ u32 r_ack_psn; /* next expected ack PSN */
+ u32 r_flow_psn; /* IB PSN of next segment start */
+ u32 r_last_acked; /* IB PSN of last ACK'ed packet */
+ u32 s_next_psn; /* IB PSN of next segment start for read */
+
+ u32 total_segs; /* segments required to complete a request */
+ u32 cur_seg; /* index of current segment */
+ u32 comp_seg; /* index of last completed segment */
+ u32 ack_seg; /* index of last ack'ed segment */
+ u32 alloc_seg; /* index of next segment to be allocated */
+ u32 isge; /* index of "current" sge */
+ u32 ack_pending; /* num acks pending for this request */
+
+ enum tid_rdma_req_state state;
+};
+
+/*
+ * When header suppression is used, PSNs associated with a "flow" are
+ * relevant (and not the PSNs maintained by verbs). Track per-flow
+ * PSNs here for a TID RDMA segment.
+ *
+ */
+struct flow_state {
+ u32 flags;
+ u32 resp_ib_psn; /* The IB PSN of the response for this flow */
+ u32 generation; /* generation of flow */
+ u32 spsn; /* starting PSN in TID space */
+ u32 lpsn; /* last PSN in TID space */
+ u32 r_next_psn; /* next PSN to be received (in TID space) */
+
+ /* For tid rdma read */
+ u32 ib_spsn; /* starting PSN in Verbs space */
+ u32 ib_lpsn; /* last PSn in Verbs space */
+};
+
+struct tid_rdma_pageset {
+ dma_addr_t addr : 48; /* Only needed for the first page */
+ u8 idx: 8;
+ u8 count : 7;
+ u8 mapped: 1;
+};
+
+/**
+ * kern_tid_node - used for managing TID's in TID groups
+ *
+ * @grp_idx: rcd relative index to tid_group
+ * @map: grp->map captured prior to programming this TID group in HW
+ * @cnt: Only @cnt of available group entries are actually programmed
+ */
+struct kern_tid_node {
+ struct tid_group *grp;
+ u8 map;
+ u8 cnt;
+};
+
+/* Overall info for a TID RDMA segment */
+struct tid_rdma_flow {
+ /*
+ * While a TID RDMA segment is being transferred, it uses a QP number
+ * from the "KDETH section of QP numbers" (which is different from the
+ * QP number that originated the request). Bits 11-15 of these QP
+ * numbers identify the "TID flow" for the segment.
+ */
+ struct flow_state flow_state;
+ struct tid_rdma_request *req;
+ u32 tid_qpn;
+ u32 tid_offset;
+ u32 length;
+ u32 sent;
+ u8 tnode_cnt;
+ u8 tidcnt;
+ u8 tid_idx;
+ u8 idx;
+ u8 npagesets;
+ u8 npkts;
+ u8 pkt;
+ u8 resync_npkts;
+ struct kern_tid_node tnode[TID_RDMA_MAX_PAGES];
+ struct tid_rdma_pageset pagesets[TID_RDMA_MAX_PAGES];
+ u32 tid_entry[TID_RDMA_MAX_PAGES];
+};
+
+enum tid_rnr_nak_state {
+ TID_RNR_NAK_INIT = 0,
+ TID_RNR_NAK_SEND,
+ TID_RNR_NAK_SENT,
+};
+
+bool tid_rdma_conn_req(struct rvt_qp *qp, u64 *data);
+bool tid_rdma_conn_reply(struct rvt_qp *qp, u64 data);
+bool tid_rdma_conn_resp(struct rvt_qp *qp, u64 *data);
+void tid_rdma_conn_error(struct rvt_qp *qp);
+void tid_rdma_opfn_init(struct rvt_qp *qp, struct tid_rdma_params *p);
+
+int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata *rcd, int reinit);
+int hfi1_kern_exp_rcv_setup(struct tid_rdma_request *req,
+ struct rvt_sge_state *ss, bool *last);
+int hfi1_kern_exp_rcv_clear(struct tid_rdma_request *req);
+void hfi1_kern_exp_rcv_clear_all(struct tid_rdma_request *req);
+void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe);
+
+/**
+ * trdma_clean_swqe - clean flows for swqe if large send queue
+ * @qp: the qp
+ * @wqe: the send wqe
+ */
+static inline void trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
+{
+ if (!wqe->priv)
+ return;
+ __trdma_clean_swqe(qp, wqe);
+}
+
+void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp);
+
int hfi1_qp_priv_init(struct rvt_dev_info *rdi, struct rvt_qp *qp,
struct ib_qp_init_attr *init_attr);
+void hfi1_qp_priv_tid_free(struct rvt_dev_info *rdi, struct rvt_qp *qp);
-#endif /* HFI1_TID_RDMA_H */
+void hfi1_tid_rdma_flush_wait(struct rvt_qp *qp);
+
+int hfi1_kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp);
+void hfi1_kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp);
+void hfi1_kern_init_ctxt_generations(struct hfi1_ctxtdata *rcd);
+
+struct cntr_entry;
+u64 hfi1_access_sw_tid_wait(const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data);
+
+u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe,
+ struct ib_other_headers *ohdr,
+ u32 *bth1, u32 *bth2, u32 *len);
+u32 hfi1_build_tid_rdma_read_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ struct ib_other_headers *ohdr, u32 *bth1,
+ u32 *bth2, u32 *len);
+void hfi1_rc_rcv_tid_rdma_read_req(struct hfi1_packet *packet);
+u32 hfi1_build_tid_rdma_read_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
+ struct ib_other_headers *ohdr, u32 *bth0,
+ u32 *bth1, u32 *bth2, u32 *len, bool *last);
+void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet);
+bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
+ struct hfi1_pportdata *ppd,
+ struct hfi1_packet *packet);
+void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ u32 *bth2);
+void hfi1_qp_kern_exp_rcv_clear_all(struct rvt_qp *qp);
+bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe);
+
+void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe);
+static inline void hfi1_setup_tid_rdma_wqe(struct rvt_qp *qp,
+ struct rvt_swqe *wqe)
+{
+ if (wqe->priv &&
+ (wqe->wr.opcode == IB_WR_RDMA_READ ||
+ wqe->wr.opcode == IB_WR_RDMA_WRITE) &&
+ wqe->length >= TID_RDMA_MIN_SEGMENT_SIZE)
+ setup_tid_rdma_wqe(qp, wqe);
+}
+
+u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ struct ib_other_headers *ohdr,
+ u32 *bth1, u32 *bth2, u32 *len);
+
+void hfi1_compute_tid_rdma_flow_wt(void);
+
+void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet);
+
+u32 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
+ struct ib_other_headers *ohdr, u32 *bth1,
+ u32 bth2, u32 *len,
+ struct rvt_sge_state **ss);
+void hfi1_del_tid_reap_timer(struct rvt_qp *qp);
+
+void hfi1_rc_rcv_tid_rdma_write_resp(struct hfi1_packet *packet);
+
+bool hfi1_build_tid_rdma_packet(struct rvt_swqe *wqe,
+ struct ib_other_headers *ohdr,
+ u32 *bth1, u32 *bth2, u32 *len);
+
+void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet);
+
+u32 hfi1_build_tid_rdma_write_ack(struct rvt_qp *qp, struct rvt_ack_entry *e,
+ struct ib_other_headers *ohdr, u16 iflow,
+ u32 *bth1, u32 *bth2);
+
+void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet);
+
+void hfi1_add_tid_retry_timer(struct rvt_qp *qp);
+void hfi1_del_tid_retry_timer(struct rvt_qp *qp);
+
+u32 hfi1_build_tid_rdma_resync(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ struct ib_other_headers *ohdr, u32 *bth1,
+ u32 *bth2, u16 fidx);
+
+void hfi1_rc_rcv_tid_rdma_resync(struct hfi1_packet *packet);
+
+struct hfi1_pkt_state;
+int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
+
+void _hfi1_do_tid_send(struct work_struct *work);
+
+bool hfi1_schedule_tid_send(struct rvt_qp *qp);
+
+bool hfi1_tid_rdma_ack_interlock(struct rvt_qp *qp, struct rvt_ack_entry *e);
+
+#endif /* HFI1_TID_RDMA_H */
diff --git a/drivers/infiniband/hw/hfi1/trace.c b/drivers/infiniband/hw/hfi1/trace.c
index 7c8aed0ffc07..9a3d236bcc88 100644
--- a/drivers/infiniband/hw/hfi1/trace.c
+++ b/drivers/infiniband/hw/hfi1/trace.c
@@ -46,6 +46,7 @@
*/
#define CREATE_TRACE_POINTS
#include "trace.h"
+#include "exp_rcv.h"
static u8 __get_ib_hdr_len(struct ib_header *hdr)
{
@@ -128,6 +129,15 @@ const char *hfi1_trace_get_packet_l2_str(u8 l2)
#define IETH_PRN "ieth rkey:0x%.8x"
#define ATOMICACKETH_PRN "origdata:%llx"
#define ATOMICETH_PRN "vaddr:0x%llx rkey:0x%.8x sdata:%llx cdata:%llx"
+#define TID_RDMA_KDETH "kdeth0 0x%x kdeth1 0x%x"
+#define TID_RDMA_KDETH_DATA "kdeth0 0x%x: kver %u sh %u intr %u tidctrl %u tid %x offset %x kdeth1 0x%x: jkey %x"
+#define TID_READ_REQ_PRN "tid_flow_psn 0x%x tid_flow_qp 0x%x verbs_qp 0x%x"
+#define TID_READ_RSP_PRN "verbs_qp 0x%x"
+#define TID_WRITE_REQ_PRN "original_qp 0x%x"
+#define TID_WRITE_RSP_PRN "tid_flow_psn 0x%x tid_flow_qp 0x%x verbs_qp 0x%x"
+#define TID_WRITE_DATA_PRN "verbs_qp 0x%x"
+#define TID_ACK_PRN "tid_flow_psn 0x%x verbs_psn 0x%x tid_flow_qp 0x%x verbs_qp 0x%x"
+#define TID_RESYNC_PRN "verbs_qp 0x%x"
#define OP(transport, op) IB_OPCODE_## transport ## _ ## op
@@ -322,6 +332,99 @@ const char *parse_everbs_hdrs(
parse_syndrome(be32_to_cpu(eh->aeth) >> 24),
be32_to_cpu(eh->aeth) & IB_MSN_MASK);
break;
+ case OP(TID_RDMA, WRITE_REQ):
+ trace_seq_printf(p, TID_RDMA_KDETH " " RETH_PRN " "
+ TID_WRITE_REQ_PRN,
+ le32_to_cpu(eh->tid_rdma.w_req.kdeth0),
+ le32_to_cpu(eh->tid_rdma.w_req.kdeth1),
+ ib_u64_get(&eh->tid_rdma.w_req.reth.vaddr),
+ be32_to_cpu(eh->tid_rdma.w_req.reth.rkey),
+ be32_to_cpu(eh->tid_rdma.w_req.reth.length),
+ be32_to_cpu(eh->tid_rdma.w_req.verbs_qp));
+ break;
+ case OP(TID_RDMA, WRITE_RESP):
+ trace_seq_printf(p, TID_RDMA_KDETH " " AETH_PRN " "
+ TID_WRITE_RSP_PRN,
+ le32_to_cpu(eh->tid_rdma.w_rsp.kdeth0),
+ le32_to_cpu(eh->tid_rdma.w_rsp.kdeth1),
+ be32_to_cpu(eh->tid_rdma.w_rsp.aeth) >> 24,
+ parse_syndrome(/* aeth */
+ be32_to_cpu(eh->tid_rdma.w_rsp.aeth)
+ >> 24),
+ (be32_to_cpu(eh->tid_rdma.w_rsp.aeth) &
+ IB_MSN_MASK),
+ be32_to_cpu(eh->tid_rdma.w_rsp.tid_flow_psn),
+ be32_to_cpu(eh->tid_rdma.w_rsp.tid_flow_qp),
+ be32_to_cpu(eh->tid_rdma.w_rsp.verbs_qp));
+ break;
+ case OP(TID_RDMA, WRITE_DATA_LAST):
+ case OP(TID_RDMA, WRITE_DATA):
+ trace_seq_printf(p, TID_RDMA_KDETH_DATA " " TID_WRITE_DATA_PRN,
+ le32_to_cpu(eh->tid_rdma.w_data.kdeth0),
+ KDETH_GET(eh->tid_rdma.w_data.kdeth0, KVER),
+ KDETH_GET(eh->tid_rdma.w_data.kdeth0, SH),
+ KDETH_GET(eh->tid_rdma.w_data.kdeth0, INTR),
+ KDETH_GET(eh->tid_rdma.w_data.kdeth0, TIDCTRL),
+ KDETH_GET(eh->tid_rdma.w_data.kdeth0, TID),
+ KDETH_GET(eh->tid_rdma.w_data.kdeth0, OFFSET),
+ le32_to_cpu(eh->tid_rdma.w_data.kdeth1),
+ KDETH_GET(eh->tid_rdma.w_data.kdeth1, JKEY),
+ be32_to_cpu(eh->tid_rdma.w_data.verbs_qp));
+ break;
+ case OP(TID_RDMA, READ_REQ):
+ trace_seq_printf(p, TID_RDMA_KDETH " " RETH_PRN " "
+ TID_READ_REQ_PRN,
+ le32_to_cpu(eh->tid_rdma.r_req.kdeth0),
+ le32_to_cpu(eh->tid_rdma.r_req.kdeth1),
+ ib_u64_get(&eh->tid_rdma.r_req.reth.vaddr),
+ be32_to_cpu(eh->tid_rdma.r_req.reth.rkey),
+ be32_to_cpu(eh->tid_rdma.r_req.reth.length),
+ be32_to_cpu(eh->tid_rdma.r_req.tid_flow_psn),
+ be32_to_cpu(eh->tid_rdma.r_req.tid_flow_qp),
+ be32_to_cpu(eh->tid_rdma.r_req.verbs_qp));
+ break;
+ case OP(TID_RDMA, READ_RESP):
+ trace_seq_printf(p, TID_RDMA_KDETH_DATA " " AETH_PRN " "
+ TID_READ_RSP_PRN,
+ le32_to_cpu(eh->tid_rdma.r_rsp.kdeth0),
+ KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, KVER),
+ KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, SH),
+ KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, INTR),
+ KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, TIDCTRL),
+ KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, TID),
+ KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, OFFSET),
+ le32_to_cpu(eh->tid_rdma.r_rsp.kdeth1),
+ KDETH_GET(eh->tid_rdma.r_rsp.kdeth1, JKEY),
+ be32_to_cpu(eh->tid_rdma.r_rsp.aeth) >> 24,
+ parse_syndrome(/* aeth */
+ be32_to_cpu(eh->tid_rdma.r_rsp.aeth)
+ >> 24),
+ (be32_to_cpu(eh->tid_rdma.r_rsp.aeth) &
+ IB_MSN_MASK),
+ be32_to_cpu(eh->tid_rdma.r_rsp.verbs_qp));
+ break;
+ case OP(TID_RDMA, ACK):
+ trace_seq_printf(p, TID_RDMA_KDETH " " AETH_PRN " "
+ TID_ACK_PRN,
+ le32_to_cpu(eh->tid_rdma.ack.kdeth0),
+ le32_to_cpu(eh->tid_rdma.ack.kdeth1),
+ be32_to_cpu(eh->tid_rdma.ack.aeth) >> 24,
+ parse_syndrome(/* aeth */
+ be32_to_cpu(eh->tid_rdma.ack.aeth)
+ >> 24),
+ (be32_to_cpu(eh->tid_rdma.ack.aeth) &
+ IB_MSN_MASK),
+ be32_to_cpu(eh->tid_rdma.ack.tid_flow_psn),
+ be32_to_cpu(eh->tid_rdma.ack.verbs_psn),
+ be32_to_cpu(eh->tid_rdma.ack.tid_flow_qp),
+ be32_to_cpu(eh->tid_rdma.ack.verbs_qp));
+ break;
+ case OP(TID_RDMA, RESYNC):
+ trace_seq_printf(p, TID_RDMA_KDETH " " TID_RESYNC_PRN,
+ le32_to_cpu(eh->tid_rdma.resync.kdeth0),
+ le32_to_cpu(eh->tid_rdma.resync.kdeth1),
+ be32_to_cpu(eh->tid_rdma.resync.verbs_qp));
+ break;
/* aeth + atomicacketh */
case OP(RC, ATOMIC_ACKNOWLEDGE):
trace_seq_printf(p, AETH_PRN " " ATOMICACKETH_PRN,
@@ -394,6 +497,21 @@ const char *print_u32_array(
return ret;
}
+u8 hfi1_trace_get_tid_ctrl(u32 ent)
+{
+ return EXP_TID_GET(ent, CTRL);
+}
+
+u16 hfi1_trace_get_tid_len(u32 ent)
+{
+ return EXP_TID_GET(ent, LEN);
+}
+
+u16 hfi1_trace_get_tid_idx(u32 ent)
+{
+ return EXP_TID_GET(ent, IDX);
+}
+
__hfi1_trace_fn(AFFINITY);
__hfi1_trace_fn(PKT);
__hfi1_trace_fn(PROC);
diff --git a/drivers/infiniband/hw/hfi1/trace.h b/drivers/infiniband/hw/hfi1/trace.h
index 84458f1325e1..1ce551864118 100644
--- a/drivers/infiniband/hw/hfi1/trace.h
+++ b/drivers/infiniband/hw/hfi1/trace.h
@@ -63,3 +63,4 @@ __print_symbolic(etype, \
#include "trace_tx.h"
#include "trace_mmu.h"
#include "trace_iowait.h"
+#include "trace_tid.h"
diff --git a/drivers/infiniband/hw/hfi1/trace_ibhdrs.h b/drivers/infiniband/hw/hfi1/trace_ibhdrs.h
index 1dc2c28fc96e..d1372cc66de6 100644
--- a/drivers/infiniband/hw/hfi1/trace_ibhdrs.h
+++ b/drivers/infiniband/hw/hfi1/trace_ibhdrs.h
@@ -79,6 +79,14 @@ __print_symbolic(opcode, \
ib_opcode_name(RC_ATOMIC_ACKNOWLEDGE), \
ib_opcode_name(RC_COMPARE_SWAP), \
ib_opcode_name(RC_FETCH_ADD), \
+ ib_opcode_name(TID_RDMA_WRITE_REQ), \
+ ib_opcode_name(TID_RDMA_WRITE_RESP), \
+ ib_opcode_name(TID_RDMA_WRITE_DATA), \
+ ib_opcode_name(TID_RDMA_WRITE_DATA_LAST), \
+ ib_opcode_name(TID_RDMA_READ_REQ), \
+ ib_opcode_name(TID_RDMA_READ_RESP), \
+ ib_opcode_name(TID_RDMA_RESYNC), \
+ ib_opcode_name(TID_RDMA_ACK), \
ib_opcode_name(UC_SEND_FIRST), \
ib_opcode_name(UC_SEND_MIDDLE), \
ib_opcode_name(UC_SEND_LAST), \
diff --git a/drivers/infiniband/hw/hfi1/trace_rc.h b/drivers/infiniband/hw/hfi1/trace_rc.h
index 8ce476570462..1ebca37862e0 100644
--- a/drivers/infiniband/hw/hfi1/trace_rc.h
+++ b/drivers/infiniband/hw/hfi1/trace_rc.h
@@ -109,6 +109,54 @@ DEFINE_EVENT(hfi1_rc_template, hfi1_rcv_error,
TP_ARGS(qp, psn)
);
+DEFINE_EVENT(/* event */
+ hfi1_rc_template, hfi1_rc_completion,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+DECLARE_EVENT_CLASS(/* rc_ack */
+ hfi1_rc_ack_template,
+ TP_PROTO(struct rvt_qp *qp, u32 aeth, u32 psn,
+ struct rvt_swqe *wqe),
+ TP_ARGS(qp, aeth, psn, wqe),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u32, aeth)
+ __field(u32, psn)
+ __field(u8, opcode)
+ __field(u32, spsn)
+ __field(u32, lpsn)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->aeth = aeth;
+ __entry->psn = psn;
+ __entry->opcode = wqe->wr.opcode;
+ __entry->spsn = wqe->psn;
+ __entry->lpsn = wqe->lpsn;
+ ),
+ TP_printk(/* print */
+ "[%s] qpn 0x%x aeth 0x%x psn 0x%x opcode 0x%x spsn 0x%x lpsn 0x%x",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->aeth,
+ __entry->psn,
+ __entry->opcode,
+ __entry->spsn,
+ __entry->lpsn
+ )
+);
+
+DEFINE_EVENT(/* do_rc_ack */
+ hfi1_rc_ack_template, hfi1_rc_ack_do,
+ TP_PROTO(struct rvt_qp *qp, u32 aeth, u32 psn,
+ struct rvt_swqe *wqe),
+ TP_ARGS(qp, aeth, psn, wqe)
+);
+
#endif /* __HFI1_TRACE_RC_H */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/infiniband/hw/hfi1/trace_rx.h b/drivers/infiniband/hw/hfi1/trace_rx.h
index 7eceb57e0415..3cec960e9674 100644
--- a/drivers/infiniband/hw/hfi1/trace_rx.h
+++ b/drivers/infiniband/hw/hfi1/trace_rx.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -128,111 +128,6 @@ TRACE_EVENT(hfi1_receive_interrupt,
)
);
-DECLARE_EVENT_CLASS(
- hfi1_exp_tid_reg_unreg,
- TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr,
- u32 npages, unsigned long va, unsigned long pa,
- dma_addr_t dma),
- TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma),
- TP_STRUCT__entry(
- __field(unsigned int, ctxt)
- __field(u16, subctxt)
- __field(u32, rarr)
- __field(u32, npages)
- __field(unsigned long, va)
- __field(unsigned long, pa)
- __field(dma_addr_t, dma)
- ),
- TP_fast_assign(
- __entry->ctxt = ctxt;
- __entry->subctxt = subctxt;
- __entry->rarr = rarr;
- __entry->npages = npages;
- __entry->va = va;
- __entry->pa = pa;
- __entry->dma = dma;
- ),
- TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx, va:0x%lx dma:0x%llx",
- __entry->ctxt,
- __entry->subctxt,
- __entry->rarr,
- __entry->npages,
- __entry->pa,
- __entry->va,
- __entry->dma
- )
- );
-
-DEFINE_EVENT(
- hfi1_exp_tid_reg_unreg, hfi1_exp_tid_unreg,
- TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages,
- unsigned long va, unsigned long pa, dma_addr_t dma),
- TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma));
-
-DEFINE_EVENT(
- hfi1_exp_tid_reg_unreg, hfi1_exp_tid_reg,
- TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages,
- unsigned long va, unsigned long pa, dma_addr_t dma),
- TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma));
-
-TRACE_EVENT(
- hfi1_put_tid,
- TP_PROTO(struct hfi1_devdata *dd,
- u32 index, u32 type, unsigned long pa, u16 order),
- TP_ARGS(dd, index, type, pa, order),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd)
- __field(unsigned long, pa);
- __field(u32, index);
- __field(u32, type);
- __field(u16, order);
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(dd);
- __entry->pa = pa;
- __entry->index = index;
- __entry->type = type;
- __entry->order = order;
- ),
- TP_printk("[%s] type %s pa %lx index %u order %u",
- __get_str(dev),
- show_tidtype(__entry->type),
- __entry->pa,
- __entry->index,
- __entry->order
- )
-);
-
-TRACE_EVENT(hfi1_exp_tid_inval,
- TP_PROTO(unsigned int ctxt, u16 subctxt, unsigned long va, u32 rarr,
- u32 npages, dma_addr_t dma),
- TP_ARGS(ctxt, subctxt, va, rarr, npages, dma),
- TP_STRUCT__entry(
- __field(unsigned int, ctxt)
- __field(u16, subctxt)
- __field(unsigned long, va)
- __field(u32, rarr)
- __field(u32, npages)
- __field(dma_addr_t, dma)
- ),
- TP_fast_assign(
- __entry->ctxt = ctxt;
- __entry->subctxt = subctxt;
- __entry->va = va;
- __entry->rarr = rarr;
- __entry->npages = npages;
- __entry->dma = dma;
- ),
- TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx dma: 0x%llx",
- __entry->ctxt,
- __entry->subctxt,
- __entry->rarr,
- __entry->npages,
- __entry->va,
- __entry->dma
- )
- );
-
TRACE_EVENT(hfi1_mmu_invalidate,
TP_PROTO(unsigned int ctxt, u16 subctxt, const char *type,
unsigned long start, unsigned long end),
diff --git a/drivers/infiniband/hw/hfi1/trace_tid.h b/drivers/infiniband/hw/hfi1/trace_tid.h
new file mode 100644
index 000000000000..548dfc45a407
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/trace_tid.h
@@ -0,0 +1,1610 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright(c) 2018 Intel Corporation.
+ *
+ */
+#if !defined(__HFI1_TRACE_TID_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HFI1_TRACE_TID_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "hfi.h"
+
+#define tidtype_name(type) { PT_##type, #type }
+#define show_tidtype(type) \
+__print_symbolic(type, \
+ tidtype_name(EXPECTED), \
+ tidtype_name(EAGER), \
+ tidtype_name(INVALID)) \
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_tid
+
+u8 hfi1_trace_get_tid_ctrl(u32 ent);
+u16 hfi1_trace_get_tid_len(u32 ent);
+u16 hfi1_trace_get_tid_idx(u32 ent);
+
+#define OPFN_PARAM_PRN "[%s] qpn 0x%x %s OPFN: qp 0x%x, max read %u, " \
+ "max write %u, max length %u, jkey 0x%x timeout %u " \
+ "urg %u"
+
+#define TID_FLOW_PRN "[%s] qpn 0x%x flow %d: idx %d resp_ib_psn 0x%x " \
+ "generation 0x%x fpsn 0x%x-%x r_next_psn 0x%x " \
+ "ib_psn 0x%x-%x npagesets %u tnode_cnt %u " \
+ "tidcnt %u tid_idx %u tid_offset %u length %u sent %u"
+
+#define TID_NODE_PRN "[%s] qpn 0x%x %s idx %u grp base 0x%x map 0x%x " \
+ "used %u cnt %u"
+
+#define RSP_INFO_PRN "[%s] qpn 0x%x state 0x%x s_state 0x%x psn 0x%x " \
+ "r_psn 0x%x r_state 0x%x r_flags 0x%x " \
+ "r_head_ack_queue %u s_tail_ack_queue %u " \
+ "s_acked_ack_queue %u s_ack_state 0x%x " \
+ "s_nak_state 0x%x s_flags 0x%x ps_flags 0x%x " \
+ "iow_flags 0x%lx"
+
+#define SENDER_INFO_PRN "[%s] qpn 0x%x state 0x%x s_cur %u s_tail %u " \
+ "s_head %u s_acked %u s_last %u s_psn 0x%x " \
+ "s_last_psn 0x%x s_flags 0x%x ps_flags 0x%x " \
+ "iow_flags 0x%lx s_state 0x%x s_num_rd %u s_retry %u"
+
+#define TID_READ_SENDER_PRN "[%s] qpn 0x%x newreq %u tid_r_reqs %u " \
+ "tid_r_comp %u pending_tid_r_segs %u " \
+ "s_flags 0x%x ps_flags 0x%x iow_flags 0x%lx " \
+ "s_state 0x%x hw_flow_index %u generation 0x%x " \
+ "fpsn 0x%x flow_flags 0x%x"
+
+#define TID_REQ_PRN "[%s] qpn 0x%x newreq %u opcode 0x%x psn 0x%x lpsn 0x%x " \
+ "cur_seg %u comp_seg %u ack_seg %u alloc_seg %u " \
+ "total_segs %u setup_head %u clear_tail %u flow_idx %u " \
+ "acked_tail %u state %u r_ack_psn 0x%x r_flow_psn 0x%x " \
+ "r_last_ackd 0x%x s_next_psn 0x%x"
+
+#define RCV_ERR_PRN "[%s] qpn 0x%x s_flags 0x%x state 0x%x " \
+ "s_acked_ack_queue %u s_tail_ack_queue %u " \
+ "r_head_ack_queue %u opcode 0x%x psn 0x%x r_psn 0x%x " \
+ " diff %d"
+
+#define TID_WRITE_RSPDR_PRN "[%s] qpn 0x%x r_tid_head %u r_tid_tail %u " \
+ "r_tid_ack %u r_tid_alloc %u alloc_w_segs %u " \
+ "pending_tid_w_segs %u sync_pt %s " \
+ "ps_nak_psn 0x%x ps_nak_state 0x%x " \
+ "prnr_nak_state 0x%x hw_flow_index %u generation "\
+ "0x%x fpsn 0x%x flow_flags 0x%x resync %s" \
+ "r_next_psn_kdeth 0x%x"
+
+#define TID_WRITE_SENDER_PRN "[%s] qpn 0x%x newreq %u s_tid_cur %u " \
+ "s_tid_tail %u s_tid_head %u " \
+ "pending_tid_w_resp %u n_requests %u " \
+ "n_tid_requests %u s_flags 0x%x ps_flags 0x%x "\
+ "iow_flags 0x%lx s_state 0x%x s_retry %u"
+
+#define KDETH_EFLAGS_ERR_PRN "[%s] qpn 0x%x TID ERR: RcvType 0x%x " \
+ "RcvTypeError 0x%x PSN 0x%x"
+
+DECLARE_EVENT_CLASS(/* class */
+ hfi1_exp_tid_reg_unreg,
+ TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages,
+ unsigned long va, unsigned long pa, dma_addr_t dma),
+ TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma),
+ TP_STRUCT__entry(/* entry */
+ __field(unsigned int, ctxt)
+ __field(u16, subctxt)
+ __field(u32, rarr)
+ __field(u32, npages)
+ __field(unsigned long, va)
+ __field(unsigned long, pa)
+ __field(dma_addr_t, dma)
+ ),
+ TP_fast_assign(/* assign */
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->rarr = rarr;
+ __entry->npages = npages;
+ __entry->va = va;
+ __entry->pa = pa;
+ __entry->dma = dma;
+ ),
+ TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx, va:0x%lx dma:0x%llx",
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->rarr,
+ __entry->npages,
+ __entry->pa,
+ __entry->va,
+ __entry->dma
+ )
+);
+
+DEFINE_EVENT(/* exp_tid_unreg */
+ hfi1_exp_tid_reg_unreg, hfi1_exp_tid_unreg,
+ TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages,
+ unsigned long va, unsigned long pa, dma_addr_t dma),
+ TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma)
+);
+
+DEFINE_EVENT(/* exp_tid_reg */
+ hfi1_exp_tid_reg_unreg, hfi1_exp_tid_reg,
+ TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages,
+ unsigned long va, unsigned long pa, dma_addr_t dma),
+ TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma)
+);
+
+TRACE_EVENT(/* put_tid */
+ hfi1_put_tid,
+ TP_PROTO(struct hfi1_devdata *dd,
+ u32 index, u32 type, unsigned long pa, u16 order),
+ TP_ARGS(dd, index, type, pa, order),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd)
+ __field(unsigned long, pa);
+ __field(u32, index);
+ __field(u32, type);
+ __field(u16, order);
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(dd);
+ __entry->pa = pa;
+ __entry->index = index;
+ __entry->type = type;
+ __entry->order = order;
+ ),
+ TP_printk("[%s] type %s pa %lx index %u order %u",
+ __get_str(dev),
+ show_tidtype(__entry->type),
+ __entry->pa,
+ __entry->index,
+ __entry->order
+ )
+);
+
+TRACE_EVENT(/* exp_tid_inval */
+ hfi1_exp_tid_inval,
+ TP_PROTO(unsigned int ctxt, u16 subctxt, unsigned long va, u32 rarr,
+ u32 npages, dma_addr_t dma),
+ TP_ARGS(ctxt, subctxt, va, rarr, npages, dma),
+ TP_STRUCT__entry(/* entry */
+ __field(unsigned int, ctxt)
+ __field(u16, subctxt)
+ __field(unsigned long, va)
+ __field(u32, rarr)
+ __field(u32, npages)
+ __field(dma_addr_t, dma)
+ ),
+ TP_fast_assign(/* assign */
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->va = va;
+ __entry->rarr = rarr;
+ __entry->npages = npages;
+ __entry->dma = dma;
+ ),
+ TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx dma: 0x%llx",
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->rarr,
+ __entry->npages,
+ __entry->va,
+ __entry->dma
+ )
+);
+
+DECLARE_EVENT_CLASS(/* opfn_state */
+ hfi1_opfn_state_template,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u16, requested)
+ __field(u16, completed)
+ __field(u8, curr)
+ ),
+ TP_fast_assign(/* assign */
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->requested = priv->opfn.requested;
+ __entry->completed = priv->opfn.completed;
+ __entry->curr = priv->opfn.curr;
+ ),
+ TP_printk(/* print */
+ "[%s] qpn 0x%x requested 0x%x completed 0x%x curr 0x%x",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->requested,
+ __entry->completed,
+ __entry->curr
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_opfn_state_template, hfi1_opfn_state_conn_request,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_opfn_state_template, hfi1_opfn_state_sched_conn_request,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_opfn_state_template, hfi1_opfn_state_conn_response,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_opfn_state_template, hfi1_opfn_state_conn_reply,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_opfn_state_template, hfi1_opfn_state_conn_error,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DECLARE_EVENT_CLASS(/* opfn_data */
+ hfi1_opfn_data_template,
+ TP_PROTO(struct rvt_qp *qp, u8 capcode, u64 data),
+ TP_ARGS(qp, capcode, data),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u32, state)
+ __field(u8, capcode)
+ __field(u64, data)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->state = qp->state;
+ __entry->capcode = capcode;
+ __entry->data = data;
+ ),
+ TP_printk(/* printk */
+ "[%s] qpn 0x%x (state 0x%x) Capcode %u data 0x%llx",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->state,
+ __entry->capcode,
+ __entry->data
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_opfn_data_template, hfi1_opfn_data_conn_request,
+ TP_PROTO(struct rvt_qp *qp, u8 capcode, u64 data),
+ TP_ARGS(qp, capcode, data)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_opfn_data_template, hfi1_opfn_data_conn_response,
+ TP_PROTO(struct rvt_qp *qp, u8 capcode, u64 data),
+ TP_ARGS(qp, capcode, data)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_opfn_data_template, hfi1_opfn_data_conn_reply,
+ TP_PROTO(struct rvt_qp *qp, u8 capcode, u64 data),
+ TP_ARGS(qp, capcode, data)
+);
+
+DECLARE_EVENT_CLASS(/* opfn_param */
+ hfi1_opfn_param_template,
+ TP_PROTO(struct rvt_qp *qp, char remote,
+ struct tid_rdma_params *param),
+ TP_ARGS(qp, remote, param),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(char, remote)
+ __field(u32, param_qp)
+ __field(u32, max_len)
+ __field(u16, jkey)
+ __field(u8, max_read)
+ __field(u8, max_write)
+ __field(u8, timeout)
+ __field(u8, urg)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->remote = remote;
+ __entry->param_qp = param->qp;
+ __entry->max_len = param->max_len;
+ __entry->jkey = param->jkey;
+ __entry->max_read = param->max_read;
+ __entry->max_write = param->max_write;
+ __entry->timeout = param->timeout;
+ __entry->urg = param->urg;
+ ),
+ TP_printk(/* print */
+ OPFN_PARAM_PRN,
+ __get_str(dev),
+ __entry->qpn,
+ __entry->remote ? "remote" : "local",
+ __entry->param_qp,
+ __entry->max_read,
+ __entry->max_write,
+ __entry->max_len,
+ __entry->jkey,
+ __entry->timeout,
+ __entry->urg
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_opfn_param_template, hfi1_opfn_param,
+ TP_PROTO(struct rvt_qp *qp, char remote,
+ struct tid_rdma_params *param),
+ TP_ARGS(qp, remote, param)
+);
+
+DECLARE_EVENT_CLASS(/* msg */
+ hfi1_msg_template,
+ TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
+ TP_ARGS(qp, msg, more),
+ TP_STRUCT__entry(/* entry */
+ __field(u32, qpn)
+ __string(msg, msg)
+ __field(u64, more)
+ ),
+ TP_fast_assign(/* assign */
+ __entry->qpn = qp ? qp->ibqp.qp_num : 0;
+ __assign_str(msg, msg);
+ __entry->more = more;
+ ),
+ TP_printk(/* print */
+ "qpn 0x%x %s 0x%llx",
+ __entry->qpn,
+ __get_str(msg),
+ __entry->more
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_msg_template, hfi1_msg_opfn_conn_request,
+ TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
+ TP_ARGS(qp, msg, more)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_msg_template, hfi1_msg_opfn_conn_error,
+ TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
+ TP_ARGS(qp, msg, more)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_msg_template, hfi1_msg_alloc_tids,
+ TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
+ TP_ARGS(qp, msg, more)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_msg_template, hfi1_msg_tid_restart_req,
+ TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
+ TP_ARGS(qp, msg, more)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_msg_template, hfi1_msg_handle_kdeth_eflags,
+ TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
+ TP_ARGS(qp, msg, more)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_msg_template, hfi1_msg_tid_timeout,
+ TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
+ TP_ARGS(qp, msg, more)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_msg_template, hfi1_msg_tid_retry_timeout,
+ TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
+ TP_ARGS(qp, msg, more)
+);
+
+DECLARE_EVENT_CLASS(/* tid_flow_page */
+ hfi1_tid_flow_page_template,
+ TP_PROTO(struct rvt_qp *qp, struct tid_rdma_flow *flow, u32 index,
+ char mtu8k, char v1, void *vaddr),
+ TP_ARGS(qp, flow, index, mtu8k, v1, vaddr),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(char, mtu8k)
+ __field(char, v1)
+ __field(u32, index)
+ __field(u64, page)
+ __field(u64, vaddr)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->mtu8k = mtu8k;
+ __entry->v1 = v1;
+ __entry->index = index;
+ __entry->page = vaddr ? (u64)virt_to_page(vaddr) : 0ULL;
+ __entry->vaddr = (u64)vaddr;
+ ),
+ TP_printk(/* print */
+ "[%s] qpn 0x%x page[%u]: page 0x%llx %s 0x%llx",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->index,
+ __entry->page,
+ __entry->mtu8k ? (__entry->v1 ? "v1" : "v0") : "vaddr",
+ __entry->vaddr
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_flow_page_template, hfi1_tid_flow_page,
+ TP_PROTO(struct rvt_qp *qp, struct tid_rdma_flow *flow, u32 index,
+ char mtu8k, char v1, void *vaddr),
+ TP_ARGS(qp, flow, index, mtu8k, v1, vaddr)
+);
+
+DECLARE_EVENT_CLASS(/* tid_pageset */
+ hfi1_tid_pageset_template,
+ TP_PROTO(struct rvt_qp *qp, u32 index, u16 idx, u16 count),
+ TP_ARGS(qp, index, idx, count),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u32, index)
+ __field(u16, idx)
+ __field(u16, count)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->index = index;
+ __entry->idx = idx;
+ __entry->count = count;
+ ),
+ TP_printk(/* print */
+ "[%s] qpn 0x%x list[%u]: idx %u count %u",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->index,
+ __entry->idx,
+ __entry->count
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_pageset_template, hfi1_tid_pageset,
+ TP_PROTO(struct rvt_qp *qp, u32 index, u16 idx, u16 count),
+ TP_ARGS(qp, index, idx, count)
+);
+
+DECLARE_EVENT_CLASS(/* tid_fow */
+ hfi1_tid_flow_template,
+ TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
+ TP_ARGS(qp, index, flow),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(int, index)
+ __field(int, idx)
+ __field(u32, resp_ib_psn)
+ __field(u32, generation)
+ __field(u32, fspsn)
+ __field(u32, flpsn)
+ __field(u32, r_next_psn)
+ __field(u32, ib_spsn)
+ __field(u32, ib_lpsn)
+ __field(u32, npagesets)
+ __field(u32, tnode_cnt)
+ __field(u32, tidcnt)
+ __field(u32, tid_idx)
+ __field(u32, tid_offset)
+ __field(u32, length)
+ __field(u32, sent)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->index = index;
+ __entry->idx = flow->idx;
+ __entry->resp_ib_psn = flow->flow_state.resp_ib_psn;
+ __entry->generation = flow->flow_state.generation;
+ __entry->fspsn = full_flow_psn(flow,
+ flow->flow_state.spsn);
+ __entry->flpsn = full_flow_psn(flow,
+ flow->flow_state.lpsn);
+ __entry->r_next_psn = flow->flow_state.r_next_psn;
+ __entry->ib_spsn = flow->flow_state.ib_spsn;
+ __entry->ib_lpsn = flow->flow_state.ib_lpsn;
+ __entry->npagesets = flow->npagesets;
+ __entry->tnode_cnt = flow->tnode_cnt;
+ __entry->tidcnt = flow->tidcnt;
+ __entry->tid_idx = flow->tid_idx;
+ __entry->tid_offset = flow->tid_offset;
+ __entry->length = flow->length;
+ __entry->sent = flow->sent;
+ ),
+ TP_printk(/* print */
+ TID_FLOW_PRN,
+ __get_str(dev),
+ __entry->qpn,
+ __entry->index,
+ __entry->idx,
+ __entry->resp_ib_psn,
+ __entry->generation,
+ __entry->fspsn,
+ __entry->flpsn,
+ __entry->r_next_psn,
+ __entry->ib_spsn,
+ __entry->ib_lpsn,
+ __entry->npagesets,
+ __entry->tnode_cnt,
+ __entry->tidcnt,
+ __entry->tid_idx,
+ __entry->tid_offset,
+ __entry->length,
+ __entry->sent
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_flow_template, hfi1_tid_flow_alloc,
+ TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
+ TP_ARGS(qp, index, flow)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_flow_template, hfi1_tid_flow_build_read_pkt,
+ TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
+ TP_ARGS(qp, index, flow)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_flow_template, hfi1_tid_flow_build_read_resp,
+ TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
+ TP_ARGS(qp, index, flow)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_flow_template, hfi1_tid_flow_rcv_read_req,
+ TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
+ TP_ARGS(qp, index, flow)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_flow_template, hfi1_tid_flow_rcv_read_resp,
+ TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
+ TP_ARGS(qp, index, flow)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_flow_template, hfi1_tid_flow_restart_req,
+ TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
+ TP_ARGS(qp, index, flow)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_flow_template, hfi1_tid_flow_build_write_resp,
+ TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
+ TP_ARGS(qp, index, flow)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_flow_template, hfi1_tid_flow_rcv_write_resp,
+ TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
+ TP_ARGS(qp, index, flow)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_flow_template, hfi1_tid_flow_build_write_data,
+ TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
+ TP_ARGS(qp, index, flow)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_flow_template, hfi1_tid_flow_rcv_tid_ack,
+ TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
+ TP_ARGS(qp, index, flow)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_flow_template, hfi1_tid_flow_rcv_resync,
+ TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
+ TP_ARGS(qp, index, flow)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_flow_template, hfi1_tid_flow_handle_kdeth_eflags,
+ TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
+ TP_ARGS(qp, index, flow)
+);
+
+DECLARE_EVENT_CLASS(/* tid_node */
+ hfi1_tid_node_template,
+ TP_PROTO(struct rvt_qp *qp, const char *msg, u32 index, u32 base,
+ u8 map, u8 used, u8 cnt),
+ TP_ARGS(qp, msg, index, base, map, used, cnt),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __string(msg, msg)
+ __field(u32, index)
+ __field(u32, base)
+ __field(u8, map)
+ __field(u8, used)
+ __field(u8, cnt)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __assign_str(msg, msg);
+ __entry->index = index;
+ __entry->base = base;
+ __entry->map = map;
+ __entry->used = used;
+ __entry->cnt = cnt;
+ ),
+ TP_printk(/* print */
+ TID_NODE_PRN,
+ __get_str(dev),
+ __entry->qpn,
+ __get_str(msg),
+ __entry->index,
+ __entry->base,
+ __entry->map,
+ __entry->used,
+ __entry->cnt
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_node_template, hfi1_tid_node_add,
+ TP_PROTO(struct rvt_qp *qp, const char *msg, u32 index, u32 base,
+ u8 map, u8 used, u8 cnt),
+ TP_ARGS(qp, msg, index, base, map, used, cnt)
+);
+
+DECLARE_EVENT_CLASS(/* tid_entry */
+ hfi1_tid_entry_template,
+ TP_PROTO(struct rvt_qp *qp, int index, u32 ent),
+ TP_ARGS(qp, index, ent),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(int, index)
+ __field(u8, ctrl)
+ __field(u16, idx)
+ __field(u16, len)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->index = index;
+ __entry->ctrl = hfi1_trace_get_tid_ctrl(ent);
+ __entry->idx = hfi1_trace_get_tid_idx(ent);
+ __entry->len = hfi1_trace_get_tid_len(ent);
+ ),
+ TP_printk(/* print */
+ "[%s] qpn 0x%x TID entry %d: idx %u len %u ctrl 0x%x",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->index,
+ __entry->idx,
+ __entry->len,
+ __entry->ctrl
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_entry_template, hfi1_tid_entry_alloc,
+ TP_PROTO(struct rvt_qp *qp, int index, u32 entry),
+ TP_ARGS(qp, index, entry)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_entry_template, hfi1_tid_entry_build_read_resp,
+ TP_PROTO(struct rvt_qp *qp, int index, u32 ent),
+ TP_ARGS(qp, index, ent)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_entry_template, hfi1_tid_entry_rcv_read_req,
+ TP_PROTO(struct rvt_qp *qp, int index, u32 ent),
+ TP_ARGS(qp, index, ent)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_entry_template, hfi1_tid_entry_rcv_write_resp,
+ TP_PROTO(struct rvt_qp *qp, int index, u32 entry),
+ TP_ARGS(qp, index, entry)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_entry_template, hfi1_tid_entry_build_write_data,
+ TP_PROTO(struct rvt_qp *qp, int index, u32 entry),
+ TP_ARGS(qp, index, entry)
+);
+
+DECLARE_EVENT_CLASS(/* rsp_info */
+ hfi1_responder_info_template,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u8, state)
+ __field(u8, s_state)
+ __field(u32, psn)
+ __field(u32, r_psn)
+ __field(u8, r_state)
+ __field(u8, r_flags)
+ __field(u8, r_head_ack_queue)
+ __field(u8, s_tail_ack_queue)
+ __field(u8, s_acked_ack_queue)
+ __field(u8, s_ack_state)
+ __field(u8, s_nak_state)
+ __field(u8, r_nak_state)
+ __field(u32, s_flags)
+ __field(u32, ps_flags)
+ __field(unsigned long, iow_flags)
+ ),
+ TP_fast_assign(/* assign */
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->state = qp->state;
+ __entry->s_state = qp->s_state;
+ __entry->psn = psn;
+ __entry->r_psn = qp->r_psn;
+ __entry->r_state = qp->r_state;
+ __entry->r_flags = qp->r_flags;
+ __entry->r_head_ack_queue = qp->r_head_ack_queue;
+ __entry->s_tail_ack_queue = qp->s_tail_ack_queue;
+ __entry->s_acked_ack_queue = qp->s_acked_ack_queue;
+ __entry->s_ack_state = qp->s_ack_state;
+ __entry->s_nak_state = qp->s_nak_state;
+ __entry->s_flags = qp->s_flags;
+ __entry->ps_flags = priv->s_flags;
+ __entry->iow_flags = priv->s_iowait.flags;
+ ),
+ TP_printk(/* print */
+ RSP_INFO_PRN,
+ __get_str(dev),
+ __entry->qpn,
+ __entry->state,
+ __entry->s_state,
+ __entry->psn,
+ __entry->r_psn,
+ __entry->r_state,
+ __entry->r_flags,
+ __entry->r_head_ack_queue,
+ __entry->s_tail_ack_queue,
+ __entry->s_acked_ack_queue,
+ __entry->s_ack_state,
+ __entry->s_nak_state,
+ __entry->s_flags,
+ __entry->ps_flags,
+ __entry->iow_flags
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_responder_info_template, hfi1_rsp_make_rc_ack,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_responder_info_template, hfi1_rsp_rcv_tid_read_req,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_responder_info_template, hfi1_rsp_tid_rcv_error,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_responder_info_template, hfi1_rsp_tid_write_alloc_res,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_responder_info_template, hfi1_rsp_rcv_tid_write_req,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_responder_info_template, hfi1_rsp_build_tid_write_resp,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_responder_info_template, hfi1_rsp_rcv_tid_write_data,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_responder_info_template, hfi1_rsp_make_tid_ack,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_responder_info_template, hfi1_rsp_handle_kdeth_eflags,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+DECLARE_EVENT_CLASS(/* sender_info */
+ hfi1_sender_info_template,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u8, state)
+ __field(u32, s_cur)
+ __field(u32, s_tail)
+ __field(u32, s_head)
+ __field(u32, s_acked)
+ __field(u32, s_last)
+ __field(u32, s_psn)
+ __field(u32, s_last_psn)
+ __field(u32, s_flags)
+ __field(u32, ps_flags)
+ __field(unsigned long, iow_flags)
+ __field(u8, s_state)
+ __field(u8, s_num_rd)
+ __field(u8, s_retry)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->state = qp->state;
+ __entry->s_cur = qp->s_cur;
+ __entry->s_tail = qp->s_tail;
+ __entry->s_head = qp->s_head;
+ __entry->s_acked = qp->s_acked;
+ __entry->s_last = qp->s_last;
+ __entry->s_psn = qp->s_psn;
+ __entry->s_last_psn = qp->s_last_psn;
+ __entry->s_flags = qp->s_flags;
+ __entry->ps_flags = ((struct hfi1_qp_priv *)qp->priv)->s_flags;
+ __entry->iow_flags =
+ ((struct hfi1_qp_priv *)qp->priv)->s_iowait.flags;
+ __entry->s_state = qp->s_state;
+ __entry->s_num_rd = qp->s_num_rd_atomic;
+ __entry->s_retry = qp->s_retry;
+ ),
+ TP_printk(/* print */
+ SENDER_INFO_PRN,
+ __get_str(dev),
+ __entry->qpn,
+ __entry->state,
+ __entry->s_cur,
+ __entry->s_tail,
+ __entry->s_head,
+ __entry->s_acked,
+ __entry->s_last,
+ __entry->s_psn,
+ __entry->s_last_psn,
+ __entry->s_flags,
+ __entry->ps_flags,
+ __entry->iow_flags,
+ __entry->s_state,
+ __entry->s_num_rd,
+ __entry->s_retry
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_sender_info_template, hfi1_sender_make_rc_req,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_sender_info_template, hfi1_sender_reset_psn,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_sender_info_template, hfi1_sender_restart_rc,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_sender_info_template, hfi1_sender_do_rc_ack,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_sender_info_template, hfi1_sender_rcv_tid_read_resp,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_sender_info_template, hfi1_sender_rcv_tid_ack,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_sender_info_template, hfi1_sender_make_tid_pkt,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DECLARE_EVENT_CLASS(/* tid_read_sender */
+ hfi1_tid_read_sender_template,
+ TP_PROTO(struct rvt_qp *qp, char newreq),
+ TP_ARGS(qp, newreq),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(char, newreq)
+ __field(u32, tid_r_reqs)
+ __field(u32, tid_r_comp)
+ __field(u32, pending_tid_r_segs)
+ __field(u32, s_flags)
+ __field(u32, ps_flags)
+ __field(unsigned long, iow_flags)
+ __field(u8, s_state)
+ __field(u32, hw_flow_index)
+ __field(u32, generation)
+ __field(u32, fpsn)
+ __field(u32, flow_flags)
+ ),
+ TP_fast_assign(/* assign */
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->newreq = newreq;
+ __entry->tid_r_reqs = priv->tid_r_reqs;
+ __entry->tid_r_comp = priv->tid_r_comp;
+ __entry->pending_tid_r_segs = priv->pending_tid_r_segs;
+ __entry->s_flags = qp->s_flags;
+ __entry->ps_flags = priv->s_flags;
+ __entry->iow_flags = priv->s_iowait.flags;
+ __entry->s_state = priv->s_state;
+ __entry->hw_flow_index = priv->flow_state.index;
+ __entry->generation = priv->flow_state.generation;
+ __entry->fpsn = priv->flow_state.psn;
+ __entry->flow_flags = priv->flow_state.flags;
+ ),
+ TP_printk(/* print */
+ TID_READ_SENDER_PRN,
+ __get_str(dev),
+ __entry->qpn,
+ __entry->newreq,
+ __entry->tid_r_reqs,
+ __entry->tid_r_comp,
+ __entry->pending_tid_r_segs,
+ __entry->s_flags,
+ __entry->ps_flags,
+ __entry->iow_flags,
+ __entry->s_state,
+ __entry->hw_flow_index,
+ __entry->generation,
+ __entry->fpsn,
+ __entry->flow_flags
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_read_sender_template, hfi1_tid_read_sender_make_req,
+ TP_PROTO(struct rvt_qp *qp, char newreq),
+ TP_ARGS(qp, newreq)
+);
+
+DECLARE_EVENT_CLASS(/* tid_rdma_request */
+ hfi1_tid_rdma_request_template,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(char, newreq)
+ __field(u8, opcode)
+ __field(u32, psn)
+ __field(u32, lpsn)
+ __field(u32, cur_seg)
+ __field(u32, comp_seg)
+ __field(u32, ack_seg)
+ __field(u32, alloc_seg)
+ __field(u32, total_segs)
+ __field(u16, setup_head)
+ __field(u16, clear_tail)
+ __field(u16, flow_idx)
+ __field(u16, acked_tail)
+ __field(u32, state)
+ __field(u32, r_ack_psn)
+ __field(u32, r_flow_psn)
+ __field(u32, r_last_acked)
+ __field(u32, s_next_psn)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->newreq = newreq;
+ __entry->opcode = opcode;
+ __entry->psn = psn;
+ __entry->lpsn = lpsn;
+ __entry->cur_seg = req->cur_seg;
+ __entry->comp_seg = req->comp_seg;
+ __entry->ack_seg = req->ack_seg;
+ __entry->alloc_seg = req->alloc_seg;
+ __entry->total_segs = req->total_segs;
+ __entry->setup_head = req->setup_head;
+ __entry->clear_tail = req->clear_tail;
+ __entry->flow_idx = req->flow_idx;
+ __entry->acked_tail = req->acked_tail;
+ __entry->state = req->state;
+ __entry->r_ack_psn = req->r_ack_psn;
+ __entry->r_flow_psn = req->r_flow_psn;
+ __entry->r_last_acked = req->r_last_acked;
+ __entry->s_next_psn = req->s_next_psn;
+ ),
+ TP_printk(/* print */
+ TID_REQ_PRN,
+ __get_str(dev),
+ __entry->qpn,
+ __entry->newreq,
+ __entry->opcode,
+ __entry->psn,
+ __entry->lpsn,
+ __entry->cur_seg,
+ __entry->comp_seg,
+ __entry->ack_seg,
+ __entry->alloc_seg,
+ __entry->total_segs,
+ __entry->setup_head,
+ __entry->clear_tail,
+ __entry->flow_idx,
+ __entry->acked_tail,
+ __entry->state,
+ __entry->r_ack_psn,
+ __entry->r_flow_psn,
+ __entry->r_last_acked,
+ __entry->s_next_psn
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_make_req_read,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_build_read_req,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_read_req,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_read_resp,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_err,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_restart_req,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_setup_tid_wqe,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_write_alloc_res,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_write_req,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_build_write_resp,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_write_resp,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_write_data,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_tid_ack,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_tid_retry_timeout,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_resync,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_make_tid_pkt,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_make_tid_ack,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_handle_kdeth_eflags,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_make_rc_ack_write,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_rdma_request_template, hfi1_tid_req_make_req_write,
+ TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
+ struct tid_rdma_request *req),
+ TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
+);
+
+DECLARE_EVENT_CLASS(/* rc_rcv_err */
+ hfi1_rc_rcv_err_template,
+ TP_PROTO(struct rvt_qp *qp, u32 opcode, u32 psn, int diff),
+ TP_ARGS(qp, opcode, psn, diff),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u32, s_flags)
+ __field(u8, state)
+ __field(u8, s_acked_ack_queue)
+ __field(u8, s_tail_ack_queue)
+ __field(u8, r_head_ack_queue)
+ __field(u32, opcode)
+ __field(u32, psn)
+ __field(u32, r_psn)
+ __field(int, diff)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->s_flags = qp->s_flags;
+ __entry->state = qp->state;
+ __entry->s_acked_ack_queue = qp->s_acked_ack_queue;
+ __entry->s_tail_ack_queue = qp->s_tail_ack_queue;
+ __entry->r_head_ack_queue = qp->r_head_ack_queue;
+ __entry->opcode = opcode;
+ __entry->psn = psn;
+ __entry->r_psn = qp->r_psn;
+ __entry->diff = diff;
+ ),
+ TP_printk(/* print */
+ RCV_ERR_PRN,
+ __get_str(dev),
+ __entry->qpn,
+ __entry->s_flags,
+ __entry->state,
+ __entry->s_acked_ack_queue,
+ __entry->s_tail_ack_queue,
+ __entry->r_head_ack_queue,
+ __entry->opcode,
+ __entry->psn,
+ __entry->r_psn,
+ __entry->diff
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_rc_rcv_err_template, hfi1_tid_rdma_rcv_err,
+ TP_PROTO(struct rvt_qp *qp, u32 opcode, u32 psn, int diff),
+ TP_ARGS(qp, opcode, psn, diff)
+);
+
+DECLARE_EVENT_CLASS(/* sge */
+ hfi1_sge_template,
+ TP_PROTO(struct rvt_qp *qp, int index, struct rvt_sge *sge),
+ TP_ARGS(qp, index, sge),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(int, index)
+ __field(u64, vaddr)
+ __field(u32, sge_length)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->index = index;
+ __entry->vaddr = (u64)sge->vaddr;
+ __entry->sge_length = sge->sge_length;
+ ),
+ TP_printk(/* print */
+ "[%s] qpn 0x%x sge %d: vaddr 0x%llx sge_length %u",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->index,
+ __entry->vaddr,
+ __entry->sge_length
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_sge_template, hfi1_sge_check_align,
+ TP_PROTO(struct rvt_qp *qp, int index, struct rvt_sge *sge),
+ TP_ARGS(qp, index, sge)
+);
+
+DECLARE_EVENT_CLASS(/* tid_write_sp */
+ hfi1_tid_write_rsp_template,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u32, r_tid_head)
+ __field(u32, r_tid_tail)
+ __field(u32, r_tid_ack)
+ __field(u32, r_tid_alloc)
+ __field(u32, alloc_w_segs)
+ __field(u32, pending_tid_w_segs)
+ __field(bool, sync_pt)
+ __field(u32, ps_nak_psn)
+ __field(u8, ps_nak_state)
+ __field(u8, prnr_nak_state)
+ __field(u32, hw_flow_index)
+ __field(u32, generation)
+ __field(u32, fpsn)
+ __field(u32, flow_flags)
+ __field(bool, resync)
+ __field(u32, r_next_psn_kdeth)
+ ),
+ TP_fast_assign(/* assign */
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->r_tid_head = priv->r_tid_head;
+ __entry->r_tid_tail = priv->r_tid_tail;
+ __entry->r_tid_ack = priv->r_tid_ack;
+ __entry->r_tid_alloc = priv->r_tid_alloc;
+ __entry->alloc_w_segs = priv->alloc_w_segs;
+ __entry->pending_tid_w_segs = priv->pending_tid_w_segs;
+ __entry->sync_pt = priv->sync_pt;
+ __entry->ps_nak_psn = priv->s_nak_psn;
+ __entry->ps_nak_state = priv->s_nak_state;
+ __entry->prnr_nak_state = priv->rnr_nak_state;
+ __entry->hw_flow_index = priv->flow_state.index;
+ __entry->generation = priv->flow_state.generation;
+ __entry->fpsn = priv->flow_state.psn;
+ __entry->flow_flags = priv->flow_state.flags;
+ __entry->resync = priv->resync;
+ __entry->r_next_psn_kdeth = priv->r_next_psn_kdeth;
+ ),
+ TP_printk(/* print */
+ TID_WRITE_RSPDR_PRN,
+ __get_str(dev),
+ __entry->qpn,
+ __entry->r_tid_head,
+ __entry->r_tid_tail,
+ __entry->r_tid_ack,
+ __entry->r_tid_alloc,
+ __entry->alloc_w_segs,
+ __entry->pending_tid_w_segs,
+ __entry->sync_pt ? "yes" : "no",
+ __entry->ps_nak_psn,
+ __entry->ps_nak_state,
+ __entry->prnr_nak_state,
+ __entry->hw_flow_index,
+ __entry->generation,
+ __entry->fpsn,
+ __entry->flow_flags,
+ __entry->resync ? "yes" : "no",
+ __entry->r_next_psn_kdeth
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_alloc_res,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_rcv_req,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_build_resp,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_rcv_data,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_rcv_resync,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_make_tid_ack,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_handle_kdeth_eflags,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_make_rc_ack,
+ TP_PROTO(struct rvt_qp *qp),
+ TP_ARGS(qp)
+);
+
+DECLARE_EVENT_CLASS(/* tid_write_sender */
+ hfi1_tid_write_sender_template,
+ TP_PROTO(struct rvt_qp *qp, char newreq),
+ TP_ARGS(qp, newreq),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(char, newreq)
+ __field(u32, s_tid_cur)
+ __field(u32, s_tid_tail)
+ __field(u32, s_tid_head)
+ __field(u32, pending_tid_w_resp)
+ __field(u32, n_requests)
+ __field(u32, n_tid_requests)
+ __field(u32, s_flags)
+ __field(u32, ps_flags)
+ __field(unsigned long, iow_flags)
+ __field(u8, s_state)
+ __field(u8, s_retry)
+ ),
+ TP_fast_assign(/* assign */
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->newreq = newreq;
+ __entry->s_tid_cur = priv->s_tid_cur;
+ __entry->s_tid_tail = priv->s_tid_tail;
+ __entry->s_tid_head = priv->s_tid_head;
+ __entry->pending_tid_w_resp = priv->pending_tid_w_resp;
+ __entry->n_requests = atomic_read(&priv->n_requests);
+ __entry->n_tid_requests = atomic_read(&priv->n_tid_requests);
+ __entry->s_flags = qp->s_flags;
+ __entry->ps_flags = priv->s_flags;
+ __entry->iow_flags = priv->s_iowait.flags;
+ __entry->s_state = priv->s_state;
+ __entry->s_retry = priv->s_retry;
+ ),
+ TP_printk(/* print */
+ TID_WRITE_SENDER_PRN,
+ __get_str(dev),
+ __entry->qpn,
+ __entry->newreq,
+ __entry->s_tid_cur,
+ __entry->s_tid_tail,
+ __entry->s_tid_head,
+ __entry->pending_tid_w_resp,
+ __entry->n_requests,
+ __entry->n_tid_requests,
+ __entry->s_flags,
+ __entry->ps_flags,
+ __entry->iow_flags,
+ __entry->s_state,
+ __entry->s_retry
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_write_sender_template, hfi1_tid_write_sender_rcv_resp,
+ TP_PROTO(struct rvt_qp *qp, char newreq),
+ TP_ARGS(qp, newreq)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_write_sender_template, hfi1_tid_write_sender_rcv_tid_ack,
+ TP_PROTO(struct rvt_qp *qp, char newreq),
+ TP_ARGS(qp, newreq)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_write_sender_template, hfi1_tid_write_sender_retry_timeout,
+ TP_PROTO(struct rvt_qp *qp, char newreq),
+ TP_ARGS(qp, newreq)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_write_sender_template, hfi1_tid_write_sender_make_tid_pkt,
+ TP_PROTO(struct rvt_qp *qp, char newreq),
+ TP_ARGS(qp, newreq)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_write_sender_template, hfi1_tid_write_sender_make_req,
+ TP_PROTO(struct rvt_qp *qp, char newreq),
+ TP_ARGS(qp, newreq)
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_tid_write_sender_template, hfi1_tid_write_sender_restart_rc,
+ TP_PROTO(struct rvt_qp *qp, char newreq),
+ TP_ARGS(qp, newreq)
+);
+
+DECLARE_EVENT_CLASS(/* tid_ack */
+ hfi1_tid_ack_template,
+ TP_PROTO(struct rvt_qp *qp, u32 aeth, u32 psn,
+ u32 req_psn, u32 resync_psn),
+ TP_ARGS(qp, aeth, psn, req_psn, resync_psn),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u32, aeth)
+ __field(u32, psn)
+ __field(u32, req_psn)
+ __field(u32, resync_psn)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->aeth = aeth;
+ __entry->psn = psn;
+ __entry->req_psn = req_psn;
+ __entry->resync_psn = resync_psn;
+ ),
+ TP_printk(/* print */
+ "[%s] qpn 0x%x aeth 0x%x psn 0x%x req_psn 0x%x resync_psn 0x%x",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->aeth,
+ __entry->psn,
+ __entry->req_psn,
+ __entry->resync_psn
+ )
+);
+
+DEFINE_EVENT(/* rcv_tid_ack */
+ hfi1_tid_ack_template, hfi1_rcv_tid_ack,
+ TP_PROTO(struct rvt_qp *qp, u32 aeth, u32 psn,
+ u32 req_psn, u32 resync_psn),
+ TP_ARGS(qp, aeth, psn, req_psn, resync_psn)
+);
+
+DECLARE_EVENT_CLASS(/* kdeth_eflags_error */
+ hfi1_kdeth_eflags_error_template,
+ TP_PROTO(struct rvt_qp *qp, u8 rcv_type, u8 rte, u32 psn),
+ TP_ARGS(qp, rcv_type, rte, psn),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u8, rcv_type)
+ __field(u8, rte)
+ __field(u32, psn)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->rcv_type = rcv_type;
+ __entry->rte = rte;
+ __entry->psn = psn;
+ ),
+ TP_printk(/* print */
+ KDETH_EFLAGS_ERR_PRN,
+ __get_str(dev),
+ __entry->qpn,
+ __entry->rcv_type,
+ __entry->rte,
+ __entry->psn
+ )
+);
+
+DEFINE_EVENT(/* event */
+ hfi1_kdeth_eflags_error_template, hfi1_eflags_err_write,
+ TP_PROTO(struct rvt_qp *qp, u8 rcv_type, u8 rte, u32 psn),
+ TP_ARGS(qp, rcv_type, rte, psn)
+);
+
+#endif /* __HFI1_TRACE_TID_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_tid
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/hfi1/trace_tx.h b/drivers/infiniband/hw/hfi1/trace_tx.h
index c57af3b31fe1..09eb0c9ada00 100644
--- a/drivers/infiniband/hw/hfi1/trace_tx.h
+++ b/drivers/infiniband/hw/hfi1/trace_tx.h
@@ -114,19 +114,27 @@ DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template,
__field(u32, qpn)
__field(u32, flags)
__field(u32, s_flags)
+ __field(u32, ps_flags)
+ __field(unsigned long, iow_flags)
),
TP_fast_assign(
DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
__entry->flags = flags;
__entry->qpn = qp->ibqp.qp_num;
__entry->s_flags = qp->s_flags;
+ __entry->ps_flags =
+ ((struct hfi1_qp_priv *)qp->priv)->s_flags;
+ __entry->iow_flags =
+ ((struct hfi1_qp_priv *)qp->priv)->s_iowait.flags;
),
TP_printk(
- "[%s] qpn 0x%x flags 0x%x s_flags 0x%x",
+ "[%s] qpn 0x%x flags 0x%x s_flags 0x%x ps_flags 0x%x iow_flags 0x%lx",
__get_str(dev),
__entry->qpn,
__entry->flags,
- __entry->s_flags
+ __entry->s_flags,
+ __entry->ps_flags,
+ __entry->iow_flags
)
);
@@ -838,6 +846,12 @@ DEFINE_EVENT(
TP_ARGS(qp, flag)
);
+DEFINE_EVENT(/* event */
+ hfi1_do_send_template, hfi1_rc_do_tid_send,
+ TP_PROTO(struct rvt_qp *qp, bool flag),
+ TP_ARGS(qp, flag)
+);
+
DEFINE_EVENT(
hfi1_do_send_template, hfi1_rc_expired_time_slice,
TP_PROTO(struct rvt_qp *qp, bool flag),
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index 6ba47037c424..4ed4fcfabd6c 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -271,7 +271,8 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
ps->s_txreq->ss = &qp->s_sge;
ps->s_txreq->s_cur_size = len;
hfi1_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
- mask_psn(qp->s_psn++), middle, ps);
+ qp->remote_qpn, mask_psn(qp->s_psn++),
+ middle, ps);
return 1;
done_free_tx:
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index bf96067876c9..f88ad425664a 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -222,31 +222,11 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
ssge.num_sge = swqe->wr.num_sge;
sge = &ssge.sge;
while (length) {
- u32 len = sge->length;
+ u32 len = rvt_get_sge_length(sge, length);
- if (len > length)
- len = length;
- if (len > sge->sge_length)
- len = sge->sge_length;
WARN_ON_ONCE(len == 0);
rvt_copy_sge(qp, &qp->r_sge, sge->vaddr, len, true, false);
- sge->vaddr += len;
- sge->length -= len;
- sge->sge_length -= len;
- if (sge->sge_length == 0) {
- if (--ssge.num_sge)
- *sge = *ssge.sg_list++;
- } else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= RVT_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- break;
- sge->n = 0;
- }
- sge->vaddr =
- sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length =
- sge->mr->map[sge->m]->segs[sge->n].length;
- }
+ rvt_update_sge(&ssge, len, false);
length -= len;
}
rvt_put_ss(&qp->r_sge);
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.h b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
index e383cc01a2bf..43b105de1d54 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.h
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
@@ -48,7 +48,6 @@
*/
#include "hfi.h"
-
#include "exp_rcv.h"
struct tid_pageset {
diff --git a/drivers/infiniband/hw/hfi1/user_pages.c b/drivers/infiniband/hw/hfi1/user_pages.c
index e341e6dcc388..24b592c6522e 100644
--- a/drivers/infiniband/hw/hfi1/user_pages.c
+++ b/drivers/infiniband/hw/hfi1/user_pages.c
@@ -91,9 +91,7 @@ bool hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm,
/* Convert to number of pages */
size = DIV_ROUND_UP(size, PAGE_SIZE);
- down_read(&mm->mmap_sem);
- pinned = mm->pinned_vm;
- up_read(&mm->mmap_sem);
+ pinned = atomic64_read(&mm->pinned_vm);
/* First, check the absolute limit against all pinned pages. */
if (pinned + npages >= ulimit && !can_lock)
@@ -111,9 +109,7 @@ int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t np
if (ret < 0)
return ret;
- down_write(&mm->mmap_sem);
- mm->pinned_vm += ret;
- up_write(&mm->mmap_sem);
+ atomic64_add(ret, &mm->pinned_vm);
return ret;
}
@@ -130,8 +126,6 @@ void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
}
if (mm) { /* during close after signal, mm can be NULL */
- down_write(&mm->mmap_sem);
- mm->pinned_vm -= npages;
- up_write(&mm->mmap_sem);
+ atomic64_sub(npages, &mm->pinned_vm);
}
}
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index e5e7fad09f32..8bfbc6d7ea34 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -144,8 +144,10 @@ static int defer_packet_queue(
*/
xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
write_seqlock(&sde->waitlock);
- if (list_empty(&pq->busy.list))
+ if (list_empty(&pq->busy.list)) {
+ iowait_get_priority(&pq->busy);
iowait_queue(pkts_sent, &pq->busy, &sde->dmawait);
+ }
write_sequnlock(&sde->waitlock);
return -EBUSY;
eagain:
@@ -191,7 +193,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
pq->mm = fd->mm;
iowait_init(&pq->busy, 0, NULL, NULL, defer_packet_queue,
- activate_packet_queue, NULL);
+ activate_packet_queue, NULL, NULL);
pq->reqidx = 0;
pq->reqs = kcalloc(hfi1_sdma_comp_ring_size,
@@ -1126,7 +1128,8 @@ static inline u32 set_pkt_bth_psn(__be32 bthpsn, u8 expct, u32 frags)
0xffffffull),
psn = val & mask;
if (expct)
- psn = (psn & ~BTH_SEQ_MASK) | ((psn + frags) & BTH_SEQ_MASK);
+ psn = (psn & ~HFI1_KDETH_BTH_SEQ_MASK) |
+ ((psn + frags) & HFI1_KDETH_BTH_SEQ_MASK);
else
psn = psn + frags;
return psn & mask;
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index ec582d86025f..55a56b3d7f83 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -161,10 +161,12 @@ MODULE_PARM_DESC(wss_clean_period, "Count of verbs copies before an entry in the
*/
const enum ib_wc_opcode ib_hfi1_wc_opcode[] = {
[IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
+ [IB_WR_TID_RDMA_WRITE] = IB_WC_RDMA_WRITE,
[IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
[IB_WR_SEND] = IB_WC_SEND,
[IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
[IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
+ [IB_WR_TID_RDMA_READ] = IB_WC_RDMA_READ,
[IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
[IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD,
[IB_WR_SEND_WITH_INV] = IB_WC_SEND,
@@ -200,6 +202,14 @@ const u8 hdr_len_by_opcode[256] = {
[IB_OPCODE_RC_FETCH_ADD] = 12 + 8 + 28,
[IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = 12 + 8 + 4,
[IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = 12 + 8 + 4,
+ [IB_OPCODE_TID_RDMA_READ_REQ] = 12 + 8 + 36,
+ [IB_OPCODE_TID_RDMA_READ_RESP] = 12 + 8 + 36,
+ [IB_OPCODE_TID_RDMA_WRITE_REQ] = 12 + 8 + 36,
+ [IB_OPCODE_TID_RDMA_WRITE_RESP] = 12 + 8 + 36,
+ [IB_OPCODE_TID_RDMA_WRITE_DATA] = 12 + 8 + 36,
+ [IB_OPCODE_TID_RDMA_WRITE_DATA_LAST] = 12 + 8 + 36,
+ [IB_OPCODE_TID_RDMA_ACK] = 12 + 8 + 36,
+ [IB_OPCODE_TID_RDMA_RESYNC] = 12 + 8 + 36,
/* UC */
[IB_OPCODE_UC_SEND_FIRST] = 12 + 8,
[IB_OPCODE_UC_SEND_MIDDLE] = 12 + 8,
@@ -243,6 +253,17 @@ static const opcode_handler opcode_handler_tbl[256] = {
[IB_OPCODE_RC_FETCH_ADD] = &hfi1_rc_rcv,
[IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = &hfi1_rc_rcv,
[IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = &hfi1_rc_rcv,
+
+ /* TID RDMA has separate handlers for different opcodes.*/
+ [IB_OPCODE_TID_RDMA_WRITE_REQ] = &hfi1_rc_rcv_tid_rdma_write_req,
+ [IB_OPCODE_TID_RDMA_WRITE_RESP] = &hfi1_rc_rcv_tid_rdma_write_resp,
+ [IB_OPCODE_TID_RDMA_WRITE_DATA] = &hfi1_rc_rcv_tid_rdma_write_data,
+ [IB_OPCODE_TID_RDMA_WRITE_DATA_LAST] = &hfi1_rc_rcv_tid_rdma_write_data,
+ [IB_OPCODE_TID_RDMA_READ_REQ] = &hfi1_rc_rcv_tid_rdma_read_req,
+ [IB_OPCODE_TID_RDMA_READ_RESP] = &hfi1_rc_rcv_tid_rdma_read_resp,
+ [IB_OPCODE_TID_RDMA_RESYNC] = &hfi1_rc_rcv_tid_rdma_resync,
+ [IB_OPCODE_TID_RDMA_ACK] = &hfi1_rc_rcv_tid_rdma_ack,
+
/* UC */
[IB_OPCODE_UC_SEND_FIRST] = &hfi1_uc_rcv,
[IB_OPCODE_UC_SEND_MIDDLE] = &hfi1_uc_rcv,
@@ -308,7 +329,7 @@ static inline opcode_handler qp_ok(struct hfi1_packet *packet)
static u64 hfi1_fault_tx(struct rvt_qp *qp, u8 opcode, u64 pbc)
{
#ifdef CONFIG_FAULT_INJECTION
- if ((opcode & IB_OPCODE_MSP) == IB_OPCODE_MSP)
+ if ((opcode & IB_OPCODE_MSP) == IB_OPCODE_MSP) {
/*
* In order to drop non-IB traffic we
* set PbcInsertHrc to NONE (0x2).
@@ -319,8 +340,9 @@ static u64 hfi1_fault_tx(struct rvt_qp *qp, u8 opcode, u64 pbc)
* packet will not be delivered to the
* correct context.
*/
+ pbc &= ~PBC_INSERT_HCRC_SMASK;
pbc |= (u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT;
- else
+ } else {
/*
* In order to drop regular verbs
* traffic we set the PbcTestEbp
@@ -330,10 +352,129 @@ static u64 hfi1_fault_tx(struct rvt_qp *qp, u8 opcode, u64 pbc)
* triggered and will be dropped.
*/
pbc |= PBC_TEST_EBP;
+ }
#endif
return pbc;
}
+static opcode_handler tid_qp_ok(int opcode, struct hfi1_packet *packet)
+{
+ if (packet->qp->ibqp.qp_type != IB_QPT_RC ||
+ !(ib_rvt_state_ops[packet->qp->state] & RVT_PROCESS_RECV_OK))
+ return NULL;
+ if ((opcode & RVT_OPCODE_QP_MASK) == IB_OPCODE_TID_RDMA)
+ return opcode_handler_tbl[opcode];
+ return NULL;
+}
+
+void hfi1_kdeth_eager_rcv(struct hfi1_packet *packet)
+{
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+ struct ib_header *hdr = packet->hdr;
+ u32 tlen = packet->tlen;
+ struct hfi1_pportdata *ppd = rcd->ppd;
+ struct hfi1_ibport *ibp = &ppd->ibport_data;
+ struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi;
+ opcode_handler opcode_handler;
+ unsigned long flags;
+ u32 qp_num;
+ int lnh;
+ u8 opcode;
+
+ /* DW == LRH (2) + BTH (3) + KDETH (9) + CRC (1) */
+ if (unlikely(tlen < 15 * sizeof(u32)))
+ goto drop;
+
+ lnh = be16_to_cpu(hdr->lrh[0]) & 3;
+ if (lnh != HFI1_LRH_BTH)
+ goto drop;
+
+ packet->ohdr = &hdr->u.oth;
+ trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
+
+ opcode = (be32_to_cpu(packet->ohdr->bth[0]) >> 24);
+ inc_opstats(tlen, &rcd->opstats->stats[opcode]);
+
+ /* verbs_qp can be picked up from any tid_rdma header struct */
+ qp_num = be32_to_cpu(packet->ohdr->u.tid_rdma.r_req.verbs_qp) &
+ RVT_QPN_MASK;
+
+ rcu_read_lock();
+ packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
+ if (!packet->qp)
+ goto drop_rcu;
+ spin_lock_irqsave(&packet->qp->r_lock, flags);
+ opcode_handler = tid_qp_ok(opcode, packet);
+ if (likely(opcode_handler))
+ opcode_handler(packet);
+ else
+ goto drop_unlock;
+ spin_unlock_irqrestore(&packet->qp->r_lock, flags);
+ rcu_read_unlock();
+
+ return;
+drop_unlock:
+ spin_unlock_irqrestore(&packet->qp->r_lock, flags);
+drop_rcu:
+ rcu_read_unlock();
+drop:
+ ibp->rvp.n_pkt_drops++;
+}
+
+void hfi1_kdeth_expected_rcv(struct hfi1_packet *packet)
+{
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+ struct ib_header *hdr = packet->hdr;
+ u32 tlen = packet->tlen;
+ struct hfi1_pportdata *ppd = rcd->ppd;
+ struct hfi1_ibport *ibp = &ppd->ibport_data;
+ struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi;
+ opcode_handler opcode_handler;
+ unsigned long flags;
+ u32 qp_num;
+ int lnh;
+ u8 opcode;
+
+ /* DW == LRH (2) + BTH (3) + KDETH (9) + CRC (1) */
+ if (unlikely(tlen < 15 * sizeof(u32)))
+ goto drop;
+
+ lnh = be16_to_cpu(hdr->lrh[0]) & 3;
+ if (lnh != HFI1_LRH_BTH)
+ goto drop;
+
+ packet->ohdr = &hdr->u.oth;
+ trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
+
+ opcode = (be32_to_cpu(packet->ohdr->bth[0]) >> 24);
+ inc_opstats(tlen, &rcd->opstats->stats[opcode]);
+
+ /* verbs_qp can be picked up from any tid_rdma header struct */
+ qp_num = be32_to_cpu(packet->ohdr->u.tid_rdma.r_rsp.verbs_qp) &
+ RVT_QPN_MASK;
+
+ rcu_read_lock();
+ packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
+ if (!packet->qp)
+ goto drop_rcu;
+ spin_lock_irqsave(&packet->qp->r_lock, flags);
+ opcode_handler = tid_qp_ok(opcode, packet);
+ if (likely(opcode_handler))
+ opcode_handler(packet);
+ else
+ goto drop_unlock;
+ spin_unlock_irqrestore(&packet->qp->r_lock, flags);
+ rcu_read_unlock();
+
+ return;
+drop_unlock:
+ spin_unlock_irqrestore(&packet->qp->r_lock, flags);
+drop_rcu:
+ rcu_read_unlock();
+drop:
+ ibp->rvp.n_pkt_drops++;
+}
+
static int hfi1_do_pkey_check(struct hfi1_packet *packet)
{
struct hfi1_ctxtdata *rcd = packet->rcd;
@@ -504,11 +645,28 @@ static void verbs_sdma_complete(
hfi1_put_txreq(tx);
}
+void hfi1_wait_kmem(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct ib_qp *ibqp = &qp->ibqp;
+ struct ib_device *ibdev = ibqp->device;
+ struct hfi1_ibdev *dev = to_idev(ibdev);
+
+ if (list_empty(&priv->s_iowait.list)) {
+ if (list_empty(&dev->memwait))
+ mod_timer(&dev->mem_timer, jiffies + 1);
+ qp->s_flags |= RVT_S_WAIT_KMEM;
+ list_add_tail(&priv->s_iowait.list, &dev->memwait);
+ priv->s_iowait.lock = &dev->iowait_lock;
+ trace_hfi1_qpsleep(qp, RVT_S_WAIT_KMEM);
+ rvt_get_qp(qp);
+ }
+}
+
static int wait_kmem(struct hfi1_ibdev *dev,
struct rvt_qp *qp,
struct hfi1_pkt_state *ps)
{
- struct hfi1_qp_priv *priv = qp->priv;
unsigned long flags;
int ret = 0;
@@ -517,15 +675,7 @@ static int wait_kmem(struct hfi1_ibdev *dev,
write_seqlock(&dev->iowait_lock);
list_add_tail(&ps->s_txreq->txreq.list,
&ps->wait->tx_head);
- if (list_empty(&priv->s_iowait.list)) {
- if (list_empty(&dev->memwait))
- mod_timer(&dev->mem_timer, jiffies + 1);
- qp->s_flags |= RVT_S_WAIT_KMEM;
- list_add_tail(&priv->s_iowait.list, &dev->memwait);
- priv->s_iowait.lock = &dev->iowait_lock;
- trace_hfi1_qpsleep(qp, RVT_S_WAIT_KMEM);
- rvt_get_qp(qp);
- }
+ hfi1_wait_kmem(qp);
write_sequnlock(&dev->iowait_lock);
hfi1_qp_unbusy(qp, ps->wait);
ret = -EBUSY;
@@ -553,11 +703,7 @@ static noinline int build_verbs_ulp_payload(
int ret = 0;
while (length) {
- len = ss->sge.length;
- if (len > length)
- len = length;
- if (len > ss->sge.sge_length)
- len = ss->sge.sge_length;
+ len = rvt_get_sge_length(&ss->sge, length);
WARN_ON_ONCE(len == 0);
ret = sdma_txadd_kvaddr(
sde->dd,
@@ -678,6 +824,15 @@ bail_txadd:
return ret;
}
+static u64 update_hcrc(u8 opcode, u64 pbc)
+{
+ if ((opcode & IB_OPCODE_TID_RDMA) == IB_OPCODE_TID_RDMA) {
+ pbc &= ~PBC_INSERT_HCRC_SMASK;
+ pbc |= (u64)PBC_IHCRC_LKDETH << PBC_INSERT_HCRC_SHIFT;
+ }
+ return pbc;
+}
+
int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
u64 pbc)
{
@@ -723,6 +878,9 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
qp->srate_mbps,
vl,
plen);
+
+ /* Update HCRC based on packet opcode */
+ pbc = update_hcrc(ps->opcode, pbc);
}
tx->wqe = qp->s_wqe;
ret = build_verbs_tx_desc(tx->sde, len, tx, ahg_info, pbc);
@@ -787,6 +945,7 @@ static int pio_wait(struct rvt_qp *qp,
dev->n_piodrain += !!(flag & HFI1_S_WAIT_PIO_DRAIN);
qp->s_flags |= flag;
was_empty = list_empty(&sc->piowait);
+ iowait_get_priority(&priv->s_iowait);
iowait_queue(ps->pkts_sent, &priv->s_iowait,
&sc->piowait);
priv->s_iowait.lock = &sc->waitlock;
@@ -871,6 +1030,9 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode)))
pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen);
+
+ /* Update HCRC based on packet opcode */
+ pbc = update_hcrc(ps->opcode, pbc);
}
if (cb)
iowait_pio_inc(&priv->s_iowait);
@@ -914,12 +1076,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
if (ss) {
while (len) {
void *addr = ss->sge.vaddr;
- u32 slen = ss->sge.length;
+ u32 slen = rvt_get_sge_length(&ss->sge, len);
- if (slen > len)
- slen = len;
- if (slen > ss->sge.sge_length)
- slen = ss->sge.sge_length;
rvt_update_sge(ss, slen, false);
seg_pio_copy_mid(pbuf, addr, slen);
len -= slen;
@@ -1188,7 +1346,9 @@ static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
rdi->dparms.props.max_mr_size = U64_MAX;
rdi->dparms.props.max_fast_reg_page_list_len = UINT_MAX;
rdi->dparms.props.max_qp = hfi1_max_qps;
- rdi->dparms.props.max_qp_wr = hfi1_max_qp_wrs;
+ rdi->dparms.props.max_qp_wr =
+ (hfi1_max_qp_wrs >= HFI1_QP_WQE_INVALID ?
+ HFI1_QP_WQE_INVALID - 1 : hfi1_max_qp_wrs);
rdi->dparms.props.max_send_sge = hfi1_max_sges;
rdi->dparms.props.max_recv_sge = hfi1_max_sges;
rdi->dparms.props.max_sge_rd = hfi1_max_sges;
@@ -1622,6 +1782,7 @@ static const struct ib_device_ops hfi1_dev_ops = {
.alloc_rdma_netdev = hfi1_vnic_alloc_rn,
.get_dev_fw_str = hfi1_get_dev_fw_str,
.get_hw_stats = get_hw_stats,
+ .init_port = hfi1_create_port_files,
.modify_device = modify_device,
/* keep process mad in the driver */
.process_mad = hfi1_process_mad,
@@ -1679,7 +1840,6 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
/*
* Fill in rvt info object.
*/
- dd->verbs_dev.rdi.driver_f.port_callback = hfi1_create_port_files;
dd->verbs_dev.rdi.driver_f.get_pci_dev = get_pci_dev;
dd->verbs_dev.rdi.driver_f.check_ah = hfi1_check_ah;
dd->verbs_dev.rdi.driver_f.notify_new_ah = hfi1_notify_new_ah;
@@ -1743,6 +1903,8 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
dd->verbs_dev.rdi.dparms.sge_copy_mode = sge_copy_mode;
dd->verbs_dev.rdi.dparms.wss_threshold = wss_threshold;
dd->verbs_dev.rdi.dparms.wss_clean_period = wss_clean_period;
+ dd->verbs_dev.rdi.dparms.reserved_operations = 1;
+ dd->verbs_dev.rdi.dparms.extra_rdma_atomic = HFI1_TID_RDMA_WRITE_CNT;
/* post send table */
dd->verbs_dev.rdi.post_parms = hfi1_post_parms;
diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index 1ad0b14bdb3c..62ace0b2d17a 100644
--- a/drivers/infiniband/hw/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -72,6 +72,7 @@ struct hfi1_packet;
#include "iowait.h"
#include "tid_rdma.h"
+#include "opfn.h"
#define HFI1_MAX_RDMA_ATOMIC 16
@@ -158,10 +159,68 @@ struct hfi1_qp_priv {
struct sdma_engine *s_sde; /* current sde */
struct send_context *s_sendcontext; /* current sendcontext */
struct hfi1_ctxtdata *rcd; /* QP's receive context */
+ struct page **pages; /* for TID page scan */
+ u32 tid_enqueue; /* saved when tid waited */
u8 s_sc; /* SC[0..4] for next packet */
struct iowait s_iowait;
+ struct timer_list s_tid_timer; /* for timing tid wait */
+ struct timer_list s_tid_retry_timer; /* for timing tid ack */
+ struct list_head tid_wait; /* for queueing tid space */
+ struct hfi1_opfn_data opfn;
+ struct tid_flow_state flow_state;
+ struct tid_rdma_qp_params tid_rdma;
struct rvt_qp *owner;
u8 hdr_type; /* 9B or 16B */
+ struct rvt_sge_state tid_ss; /* SGE state pointer for 2nd leg */
+ atomic_t n_requests; /* # of TID RDMA requests in the */
+ /* queue */
+ atomic_t n_tid_requests; /* # of sent TID RDMA requests */
+ unsigned long tid_timer_timeout_jiffies;
+ unsigned long tid_retry_timeout_jiffies;
+
+ /* variables for the TID RDMA SE state machine */
+ u8 s_state;
+ u8 s_retry;
+ u8 rnr_nak_state; /* RNR NAK state */
+ u8 s_nak_state;
+ u32 s_nak_psn;
+ u32 s_flags;
+ u32 s_tid_cur;
+ u32 s_tid_head;
+ u32 s_tid_tail;
+ u32 r_tid_head; /* Most recently added TID RDMA request */
+ u32 r_tid_tail; /* the last completed TID RDMA request */
+ u32 r_tid_ack; /* the TID RDMA request to be ACK'ed */
+ u32 r_tid_alloc; /* Request for which we are allocating resources */
+ u32 pending_tid_w_segs; /* Num of pending tid write segments */
+ u32 pending_tid_w_resp; /* Num of pending tid write responses */
+ u32 alloc_w_segs; /* Number of segments for which write */
+ /* resources have been allocated for this QP */
+
+ /* For TID RDMA READ */
+ u32 tid_r_reqs; /* Num of tid reads requested */
+ u32 tid_r_comp; /* Num of tid reads completed */
+ u32 pending_tid_r_segs; /* Num of pending tid read segments */
+ u16 pkts_ps; /* packets per segment */
+ u8 timeout_shift; /* account for number of packets per segment */
+
+ u32 r_next_psn_kdeth;
+ u32 r_next_psn_kdeth_save;
+ u32 s_resync_psn;
+ u8 sync_pt; /* Set when QP reaches sync point */
+ u8 resync;
+};
+
+#define HFI1_QP_WQE_INVALID ((u32)-1)
+
+struct hfi1_swqe_priv {
+ struct tid_rdma_request tid_req;
+ struct rvt_sge_state ss; /* Used for TID RDMA READ Request */
+};
+
+struct hfi1_ack_priv {
+ struct rvt_sge_state ss; /* used for TID WRITE RESP */
+ struct tid_rdma_request tid_req;
};
/*
@@ -225,6 +284,7 @@ struct hfi1_ibdev {
struct kmem_cache *verbs_txreq_cache;
u64 n_txwait;
u64 n_kmem_wait;
+ u64 n_tidwait;
/* protect iowait lists */
seqlock_t iowait_lock ____cacheline_aligned_in_smp;
@@ -312,6 +372,31 @@ static inline u32 delta_psn(u32 a, u32 b)
return (((int)a - (int)b) << PSN_SHIFT) >> PSN_SHIFT;
}
+static inline struct tid_rdma_request *wqe_to_tid_req(struct rvt_swqe *wqe)
+{
+ return &((struct hfi1_swqe_priv *)wqe->priv)->tid_req;
+}
+
+static inline struct tid_rdma_request *ack_to_tid_req(struct rvt_ack_entry *e)
+{
+ return &((struct hfi1_ack_priv *)e->priv)->tid_req;
+}
+
+/*
+ * Look through all the active flows for a TID RDMA request and find
+ * the one (if it exists) that contains the specified PSN.
+ */
+static inline u32 __full_flow_psn(struct flow_state *state, u32 psn)
+{
+ return mask_psn((state->generation << HFI1_KDETH_BTH_SEQ_SHIFT) |
+ (psn & HFI1_KDETH_BTH_SEQ_MASK));
+}
+
+static inline u32 full_flow_psn(struct tid_rdma_flow *flow, u32 psn)
+{
+ return __full_flow_psn(&flow->flow_state, psn);
+}
+
struct verbs_txreq;
void hfi1_put_txreq(struct verbs_txreq *tx);
@@ -356,9 +441,12 @@ u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
const struct ib_global_route *grh, u32 hwords, u32 nwords);
void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
- u32 bth0, u32 bth2, int middle,
+ u32 bth0, u32 bth1, u32 bth2, int middle,
struct hfi1_pkt_state *ps);
+bool hfi1_schedule_send_yield(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
+ bool tid);
+
void _hfi1_do_send(struct work_struct *work);
void hfi1_do_send_from_rvt(struct rvt_qp *qp);
@@ -377,6 +465,10 @@ int hfi1_register_ib_device(struct hfi1_devdata *);
void hfi1_unregister_ib_device(struct hfi1_devdata *);
+void hfi1_kdeth_eager_rcv(struct hfi1_packet *packet);
+
+void hfi1_kdeth_expected_rcv(struct hfi1_packet *packet);
+
void hfi1_ib_rcv(struct hfi1_packet *packet);
void hfi1_16B_rcv(struct hfi1_packet *packet);
@@ -394,6 +486,16 @@ static inline bool opa_bth_is_migration(struct ib_other_headers *ohdr)
return ohdr->bth[1] & cpu_to_be32(OPA_BTH_MIG_REQ);
}
+void hfi1_wait_kmem(struct rvt_qp *qp);
+
+static inline void hfi1_trdma_send_complete(struct rvt_qp *qp,
+ struct rvt_swqe *wqe,
+ enum ib_wc_status status)
+{
+ trdma_clean_swqe(qp, wqe);
+ rvt_send_complete(qp, wqe, status);
+}
+
extern const enum ib_wc_opcode ib_hfi1_wc_opcode[];
extern const u8 hdr_len_by_opcode[];
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
index 2a77af26a231..b002e96eb335 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.h
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
@@ -94,6 +94,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
tx->txreq.num_desc = 0;
/* Set the header type */
tx->phdr.hdr.hdr_type = priv->hdr_type;
+ tx->txreq.flags = 0;
return tx;
}
diff --git a/drivers/infiniband/hw/hfi1/vnic_sdma.c b/drivers/infiniband/hw/hfi1/vnic_sdma.c
index 1f81c480e028..af1b1ffcb38e 100644
--- a/drivers/infiniband/hw/hfi1/vnic_sdma.c
+++ b/drivers/infiniband/hw/hfi1/vnic_sdma.c
@@ -240,8 +240,10 @@ static int hfi1_vnic_sdma_sleep(struct sdma_engine *sde,
}
vnic_sdma->state = HFI1_VNIC_SDMA_Q_DEFERRED;
- if (list_empty(&vnic_sdma->wait.list))
+ if (list_empty(&vnic_sdma->wait.list)) {
+ iowait_get_priority(wait->iow);
iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
+ }
write_sequnlock(&sde->waitlock);
return -EBUSY;
}
@@ -281,7 +283,7 @@ void hfi1_vnic_sdma_init(struct hfi1_vnic_vport_info *vinfo)
iowait_init(&vnic_sdma->wait, 0, NULL, NULL,
hfi1_vnic_sdma_sleep,
- hfi1_vnic_sdma_wakeup, NULL);
+ hfi1_vnic_sdma_wakeup, NULL, NULL);
vnic_sdma->sde = &vinfo->dd->per_sdma[i];
vnic_sdma->dd = vinfo->dd;
vnic_sdma->vinfo = vinfo;
diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig
index 21c2100b2ea9..fddb5fdf92de 100644
--- a/drivers/infiniband/hw/hns/Kconfig
+++ b/drivers/infiniband/hw/hns/Kconfig
@@ -1,7 +1,6 @@
config INFINIBAND_HNS
tristate "HNS RoCE Driver"
depends on NET_VENDOR_HISILICON
- depends on INFINIBAND_USER_ACCESS || !INFINIBAND_USER_ACCESS
depends on ARM64 || (COMPILE_TEST && 64BIT)
---help---
This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index 004c88b32e13..e2a7f1488f76 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -2,7 +2,7 @@
# Makefile for the Hisilicon RoCE drivers.
#
-ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
+ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_INFINIBAND_HNS) += hns-roce.o
hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c
index a0ba19d4a10e..2acf946d02e5 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c
@@ -176,17 +176,33 @@ int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
unsigned long in_modifier, u8 op_modifier, u16 op,
unsigned long timeout)
{
- if (hr_dev->is_reset)
- return 0;
+ int ret;
+
+ if (hr_dev->hw->rst_prc_mbox) {
+ ret = hr_dev->hw->rst_prc_mbox(hr_dev);
+ if (ret == CMD_RST_PRC_SUCCESS)
+ return 0;
+ else if (ret == CMD_RST_PRC_EBUSY)
+ return -EBUSY;
+ }
if (hr_dev->cmd.use_events)
- return hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param,
- in_modifier, op_modifier, op,
- timeout);
+ ret = hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param,
+ in_modifier, op_modifier, op,
+ timeout);
else
- return hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param,
- in_modifier, op_modifier, op,
- timeout);
+ ret = hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param,
+ in_modifier, op_modifier, op,
+ timeout);
+
+ if (ret == CMD_RST_PRC_EBUSY)
+ return -EBUSY;
+
+ if (ret && (hr_dev->hw->rst_prc_mbox &&
+ hr_dev->hw->rst_prc_mbox(hr_dev) == CMD_RST_PRC_SUCCESS))
+ return 0;
+
+ return ret;
}
EXPORT_SYMBOL_GPL(hns_roce_cmd_mbox);
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.h b/drivers/infiniband/hw/hns/hns_roce_cmd.h
index 927701df5eff..059fd1da493e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.h
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h
@@ -75,6 +75,10 @@ enum {
HNS_ROCE_CMD_DESTROY_MPT_BT1 = 0x29,
HNS_ROCE_CMD_DESTROY_MPT_BT2 = 0x2a,
+ /* CQC TIMER commands */
+ HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0 = 0x23,
+ HNS_ROCE_CMD_READ_CQC_TIMER_BT0 = 0x27,
+
/* MPT commands */
HNS_ROCE_CMD_QUERY_MPT = 0x62,
@@ -89,6 +93,10 @@ enum {
HNS_ROCE_CMD_DESTROY_SRQC_BT1 = 0x39,
HNS_ROCE_CMD_DESTROY_SRQC_BT2 = 0x3a,
+ /* QPC TIMER commands */
+ HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0 = 0x33,
+ HNS_ROCE_CMD_READ_QPC_TIMER_BT0 = 0x37,
+
/* EQC commands */
HNS_ROCE_CMD_CREATE_AEQC = 0x80,
HNS_ROCE_CMD_MODIFY_AEQC = 0x81,
@@ -98,6 +106,10 @@ enum {
HNS_ROCE_CMD_MODIFY_CEQC = 0x91,
HNS_ROCE_CMD_QUERY_CEQC = 0x92,
HNS_ROCE_CMD_DESTROY_CEQC = 0x93,
+
+ /* SCC CTX BT commands */
+ HNS_ROCE_CMD_READ_SCCC_BT0 = 0xa4,
+ HNS_ROCE_CMD_WRITE_SCCC_BT0 = 0xa5,
};
enum {
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 3a485f50fede..1dfe5627006c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -215,7 +215,7 @@ void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
EXPORT_SYMBOL_GPL(hns_roce_free_cq);
static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
- struct ib_ucontext *context,
+ struct ib_udata *udata,
struct hns_roce_cq_buf *buf,
struct ib_umem **umem, u64 buf_addr, int cqe)
{
@@ -223,7 +223,7 @@ static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
u32 page_shift;
u32 npages;
- *umem = ib_umem_get(context, buf_addr, cqe * hr_dev->caps.cq_entry_sz,
+ *umem = ib_umem_get(udata, buf_addr, cqe * hr_dev->caps.cq_entry_sz,
IB_ACCESS_LOCAL_WRITE, 1);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
@@ -347,7 +347,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
}
/* Get user space address, write it into mtt table */
- ret = hns_roce_ib_get_cq_umem(hr_dev, context, &hr_cq->hr_buf,
+ ret = hns_roce_ib_get_cq_umem(hr_dev, udata, &hr_cq->hr_buf,
&hr_cq->umem, ucmd.buf_addr,
cq_entries);
if (ret) {
@@ -358,7 +358,8 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
(udata->outlen >= sizeof(resp))) {
ret = hns_roce_db_map_user(to_hr_ucontext(context),
- ucmd.db_addr, &hr_cq->db);
+ udata, ucmd.db_addr,
+ &hr_cq->db);
if (ret) {
dev_err(dev, "cq record doorbell map failed!\n");
goto err_mtt;
diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c
index e2f93c1ce86a..0c6c1fe87705 100644
--- a/drivers/infiniband/hw/hns/hns_roce_db.c
+++ b/drivers/infiniband/hw/hns/hns_roce_db.c
@@ -8,7 +8,8 @@
#include <rdma/ib_umem.h>
#include "hns_roce_device.h"
-int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
+int hns_roce_db_map_user(struct hns_roce_ucontext *context,
+ struct ib_udata *udata, unsigned long virt,
struct hns_roce_db *db)
{
struct hns_roce_user_db_page *page;
@@ -28,8 +29,7 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
refcount_set(&page->refcount, 1);
page->user_virt = (virt & PAGE_MASK);
- page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
- PAGE_SIZE, 0, 0);
+ page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0);
if (IS_ERR(page->umem)) {
ret = PTR_ERR(page->umem);
kfree(page);
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 509e467843f6..9ee86daf1700 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -202,6 +202,7 @@ enum {
HNS_ROCE_CAP_FLAG_SRQ = BIT(5),
HNS_ROCE_CAP_FLAG_MW = BIT(7),
HNS_ROCE_CAP_FLAG_FRMR = BIT(8),
+ HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL = BIT(9),
HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10),
};
@@ -216,6 +217,32 @@ enum {
HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4
};
+enum hns_roce_reset_stage {
+ HNS_ROCE_STATE_NON_RST,
+ HNS_ROCE_STATE_RST_BEF_DOWN,
+ HNS_ROCE_STATE_RST_DOWN,
+ HNS_ROCE_STATE_RST_UNINIT,
+ HNS_ROCE_STATE_RST_INIT,
+ HNS_ROCE_STATE_RST_INITED,
+};
+
+enum hns_roce_instance_state {
+ HNS_ROCE_STATE_NON_INIT,
+ HNS_ROCE_STATE_INIT,
+ HNS_ROCE_STATE_INITED,
+ HNS_ROCE_STATE_UNINIT,
+};
+
+enum {
+ HNS_ROCE_RST_DIRECT_RETURN = 0,
+};
+
+enum {
+ CMD_RST_PRC_OTHERS,
+ CMD_RST_PRC_SUCCESS,
+ CMD_RST_PRC_EBUSY,
+};
+
#define HNS_ROCE_CMD_SUCCESS 1
#define HNS_ROCE_PORT_DOWN 0
@@ -482,6 +509,8 @@ struct hns_roce_qp_table {
struct hns_roce_hem_table qp_table;
struct hns_roce_hem_table irrl_table;
struct hns_roce_hem_table trrl_table;
+ struct hns_roce_hem_table sccc_table;
+ struct mutex scc_mutex;
};
struct hns_roce_cq_table {
@@ -729,6 +758,8 @@ struct hns_roce_caps {
u32 max_extend_sg;
int num_qps; /* 256k */
int reserved_qps;
+ int num_qpc_timer;
+ int num_cqc_timer;
u32 max_srq_sg;
int num_srqs;
u32 max_wqes; /* 16k */
@@ -768,6 +799,9 @@ struct hns_roce_caps {
int irrl_entry_sz;
int trrl_entry_sz;
int cqc_entry_sz;
+ int sccc_entry_sz;
+ int qpc_timer_entry_sz;
+ int cqc_timer_entry_sz;
int srqc_entry_sz;
int idx_entry_sz;
u32 pbl_ba_pg_sz;
@@ -777,9 +811,12 @@ struct hns_roce_caps {
int ceqe_depth;
enum ib_mtu max_mtu;
u32 qpc_bt_num;
+ u32 qpc_timer_bt_num;
u32 srqc_bt_num;
u32 cqc_bt_num;
+ u32 cqc_timer_bt_num;
u32 mpt_bt_num;
+ u32 sccc_bt_num;
u32 qpc_ba_pg_sz;
u32 qpc_buf_pg_sz;
u32 qpc_hop_num;
@@ -795,6 +832,15 @@ struct hns_roce_caps {
u32 mtt_ba_pg_sz;
u32 mtt_buf_pg_sz;
u32 mtt_hop_num;
+ u32 sccc_ba_pg_sz;
+ u32 sccc_buf_pg_sz;
+ u32 sccc_hop_num;
+ u32 qpc_timer_ba_pg_sz;
+ u32 qpc_timer_buf_pg_sz;
+ u32 qpc_timer_hop_num;
+ u32 cqc_timer_ba_pg_sz;
+ u32 cqc_timer_buf_pg_sz;
+ u32 cqc_timer_hop_num;
u32 cqe_ba_pg_sz;
u32 cqe_buf_pg_sz;
u32 cqe_hop_num;
@@ -834,6 +880,7 @@ struct hns_roce_hw {
u64 out_param, u32 in_modifier, u8 op_modifier, u16 op,
u16 token, int event);
int (*chk_mbox)(struct hns_roce_dev *hr_dev, unsigned long timeout);
+ int (*rst_prc_mbox)(struct hns_roce_dev *hr_dev);
int (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
const union ib_gid *gid, const struct ib_gid_attr *attr);
int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
@@ -861,6 +908,8 @@ struct hns_roce_hw {
int attr_mask, enum ib_qp_state cur_state,
enum ib_qp_state new_state);
int (*destroy_qp)(struct ib_qp *ibqp);
+ int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp);
int (*post_send)(struct ib_qp *ibqp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr);
int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
@@ -898,6 +947,8 @@ struct hns_roce_dev {
spinlock_t bt_cmd_lock;
bool active;
bool is_reset;
+ bool dis_db;
+ unsigned long reset_cnt;
struct hns_roce_ib_iboe iboe;
struct list_head pgdir_list;
@@ -922,6 +973,8 @@ struct hns_roce_dev {
struct hns_roce_srq_table srq_table;
struct hns_roce_qp_table qp_table;
struct hns_roce_eq_table eq_table;
+ struct hns_roce_hem_table qpc_timer_table;
+ struct hns_roce_hem_table cqc_timer_table;
int cmd_mod;
int loop_idc;
@@ -1061,10 +1114,9 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *pd,
int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags);
-struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
- struct ib_ucontext *context,
- struct ib_udata *udata);
-int hns_roce_dealloc_pd(struct ib_pd *pd);
+int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
+ struct ib_udata *udata);
+void hns_roce_dealloc_pd(struct ib_pd *pd);
struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
@@ -1133,7 +1185,8 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq);
void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq);
-int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
+int hns_roce_db_map_user(struct hns_roce_ucontext *context,
+ struct ib_udata *udata, unsigned long virt,
struct hns_roce_db *db);
void hns_roce_db_unmap_user(struct hns_roce_ucontext *context,
struct hns_roce_db *db);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index 4cdbcafa5915..8e29dbb5b5fb 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -45,6 +45,9 @@ bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
(hr_dev->caps.mpt_hop_num && type == HEM_TYPE_MTPT) ||
(hr_dev->caps.cqc_hop_num && type == HEM_TYPE_CQC) ||
(hr_dev->caps.srqc_hop_num && type == HEM_TYPE_SRQC) ||
+ (hr_dev->caps.sccc_hop_num && type == HEM_TYPE_SCCC) ||
+ (hr_dev->caps.qpc_timer_hop_num && type == HEM_TYPE_QPC_TIMER) ||
+ (hr_dev->caps.cqc_timer_hop_num && type == HEM_TYPE_CQC_TIMER) ||
(hr_dev->caps.cqe_hop_num && type == HEM_TYPE_CQE) ||
(hr_dev->caps.mtt_hop_num && type == HEM_TYPE_MTT) ||
(hr_dev->caps.srqwqe_hop_num && type == HEM_TYPE_SRQWQE) ||
@@ -125,6 +128,30 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
mhop->ba_l0_num = hr_dev->caps.cqc_bt_num;
mhop->hop_num = hr_dev->caps.cqc_hop_num;
break;
+ case HEM_TYPE_SCCC:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.sccc_buf_pg_sz
+ + PAGE_SHIFT);
+ mhop->bt_chunk_size = 1 << (hr_dev->caps.sccc_ba_pg_sz
+ + PAGE_SHIFT);
+ mhop->ba_l0_num = hr_dev->caps.sccc_bt_num;
+ mhop->hop_num = hr_dev->caps.sccc_hop_num;
+ break;
+ case HEM_TYPE_QPC_TIMER:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz
+ + PAGE_SHIFT);
+ mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz
+ + PAGE_SHIFT);
+ mhop->ba_l0_num = hr_dev->caps.qpc_timer_bt_num;
+ mhop->hop_num = hr_dev->caps.qpc_timer_hop_num;
+ break;
+ case HEM_TYPE_CQC_TIMER:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz
+ + PAGE_SHIFT);
+ mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz
+ + PAGE_SHIFT);
+ mhop->ba_l0_num = hr_dev->caps.cqc_timer_bt_num;
+ mhop->hop_num = hr_dev->caps.cqc_timer_hop_num;
+ break;
case HEM_TYPE_SRQC:
mhop->buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
+ PAGE_SHIFT);
@@ -175,7 +202,7 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
return 0;
/*
- * QPC/MTPT/CQC/SRQC alloc hem for buffer pages.
+ * QPC/MTPT/CQC/SRQC/SCCC alloc hem for buffer pages.
* MTT/CQE alloc hem for bt pages.
*/
bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
@@ -486,7 +513,7 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
}
/*
- * alloc buffer space chunk for QPC/MTPT/CQC/SRQC.
+ * alloc buffer space chunk for QPC/MTPT/CQC/SRQC/SCCC.
* alloc bt space chunk for MTT/CQE.
*/
size = table->type < HEM_TYPE_MTT ? buf_chunk_size : bt_chunk_size;
@@ -593,6 +620,7 @@ out:
mutex_unlock(&table->mutex);
return ret;
}
+EXPORT_SYMBOL_GPL(hns_roce_table_get);
static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table,
@@ -658,7 +686,7 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
}
/*
- * free buffer space chunk for QPC/MTPT/CQC/SRQC.
+ * free buffer space chunk for QPC/MTPT/CQC/SRQC/SCCC.
* free bt space chunk for MTT/CQE.
*/
hns_roce_free_hem(hr_dev, table->hem[hem_idx]);
@@ -735,6 +763,7 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev,
mutex_unlock(&table->mutex);
}
+EXPORT_SYMBOL_GPL(hns_roce_table_put);
void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table,
@@ -763,6 +792,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
idx_offset = (obj & (table->num_obj - 1)) % obj_per_chunk;
dma_offset = offset = idx_offset * table->obj_size;
} else {
+ u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */
+
hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
/* mtt mhop */
i = mhop.l0_idx;
@@ -774,8 +805,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
hem_idx = i;
hem = table->hem[hem_idx];
- dma_offset = offset = (obj & (table->num_obj - 1)) *
- table->obj_size % mhop.bt_chunk_size;
+ dma_offset = offset = (obj & (table->num_obj - 1)) * seg_size %
+ mhop.bt_chunk_size;
if (mhop.hop_num == 2)
dma_offset = offset = 0;
}
@@ -904,6 +935,30 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
num_bt_l0 = hr_dev->caps.cqc_bt_num;
hop_num = hr_dev->caps.cqc_hop_num;
break;
+ case HEM_TYPE_SCCC:
+ buf_chunk_size = 1 << (hr_dev->caps.sccc_buf_pg_sz
+ + PAGE_SHIFT);
+ bt_chunk_size = 1 << (hr_dev->caps.sccc_ba_pg_sz
+ + PAGE_SHIFT);
+ num_bt_l0 = hr_dev->caps.sccc_bt_num;
+ hop_num = hr_dev->caps.sccc_hop_num;
+ break;
+ case HEM_TYPE_QPC_TIMER:
+ buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz
+ + PAGE_SHIFT);
+ bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz
+ + PAGE_SHIFT);
+ num_bt_l0 = hr_dev->caps.qpc_timer_bt_num;
+ hop_num = hr_dev->caps.qpc_timer_hop_num;
+ break;
+ case HEM_TYPE_CQC_TIMER:
+ buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz
+ + PAGE_SHIFT);
+ bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz
+ + PAGE_SHIFT);
+ num_bt_l0 = hr_dev->caps.cqc_timer_bt_num;
+ hop_num = hr_dev->caps.cqc_timer_hop_num;
+ break;
case HEM_TYPE_SRQC:
buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
+ PAGE_SHIFT);
@@ -1081,6 +1136,15 @@ void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->srq_table.table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
+ if (hr_dev->caps.qpc_timer_entry_sz)
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->qpc_timer_table);
+ if (hr_dev->caps.cqc_timer_entry_sz)
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->cqc_timer_table);
+ if (hr_dev->caps.sccc_entry_sz)
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->qp_table.sccc_table);
if (hr_dev->caps.trrl_entry_sz)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->qp_table.trrl_table);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h
index a650278c6fbd..d9d668992e49 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
@@ -44,6 +44,9 @@ enum {
HEM_TYPE_MTPT,
HEM_TYPE_CQC,
HEM_TYPE_SRQC,
+ HEM_TYPE_SCCC,
+ HEM_TYPE_QPC_TIMER,
+ HEM_TYPE_CQC_TIMER,
/* UNMAP HEM */
HEM_TYPE_MTT,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index b74c742b000c..c8555f7704d8 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -711,13 +711,14 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
struct ib_qp_attr attr = { 0 };
struct hns_roce_v1_priv *priv;
struct hns_roce_qp *hr_qp;
+ struct ib_device *ibdev;
struct ib_cq *cq;
struct ib_pd *pd;
union ib_gid dgid;
u64 subnet_prefix;
int attr_mask = 0;
+ int ret = -ENOMEM;
int i, j;
- int ret;
u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 };
u8 phy_port;
u8 port = 0;
@@ -742,12 +743,16 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
free_mr->mr_free_cq->ib_cq.cq_context = NULL;
atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0);
- pd = hns_roce_alloc_pd(&hr_dev->ib_dev, NULL, NULL);
- if (IS_ERR(pd)) {
- dev_err(dev, "Create pd for reserved loop qp failed!");
- ret = -ENOMEM;
+ ibdev = &hr_dev->ib_dev;
+ pd = rdma_zalloc_drv_obj(ibdev, ib_pd);
+ if (!pd)
+ goto alloc_mem_failed;
+
+ pd->device = ibdev;
+ ret = hns_roce_alloc_pd(pd, NULL, NULL);
+ if (ret)
goto alloc_pd_failed;
- }
+
free_mr->mr_free_pd = to_hr_pd(pd);
free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev;
free_mr->mr_free_pd->ibpd.uobject = NULL;
@@ -854,10 +859,12 @@ create_lp_qp_failed:
dev_err(dev, "Destroy qp %d for mr free failed!\n", i);
}
- if (hns_roce_dealloc_pd(pd))
- dev_err(dev, "Destroy pd for create_lp_qp failed!\n");
+ hns_roce_dealloc_pd(pd);
alloc_pd_failed:
+ kfree(pd);
+
+alloc_mem_failed:
if (hns_roce_ib_destroy_cq(cq))
dev_err(dev, "Destroy cq for create_lp_qp failed!\n");
@@ -891,9 +898,7 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
if (ret)
dev_err(dev, "Destroy cq for mr_free failed(%d)!\n", ret);
- ret = hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd);
- if (ret)
- dev_err(dev, "Destroy pd for mr_free failed(%d)!\n", ret);
+ hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd);
}
static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
@@ -1745,8 +1750,6 @@ static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
writel(val, hcr + 5);
- mmiowb();
-
return 0;
}
@@ -1866,9 +1869,8 @@ static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
unsigned long mtpt_idx)
{
struct hns_roce_v1_mpt_entry *mpt_entry;
- struct scatterlist *sg;
+ struct sg_dma_page_iter sg_iter;
u64 *pages;
- int entry;
int i;
/* MPT filled into mailbox buf */
@@ -1923,8 +1925,8 @@ static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
return -ENOMEM;
i = 0;
- for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
- pages[i] = ((u64)sg_dma_address(sg)) >> 12;
+ for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
+ pages[i] = ((u64)sg_page_iter_dma_address(&sg_iter)) >> 12;
/* Directly record to MTPT table firstly 7 entry */
if (i >= HNS_ROCE_MAX_INNER_MTPT_NUM)
@@ -5002,7 +5004,7 @@ static int hns_roce_probe(struct platform_device *pdev)
struct hns_roce_dev *hr_dev;
struct device *dev = &pdev->dev;
- hr_dev = (struct hns_roce_dev *)ib_alloc_device(sizeof(*hr_dev));
+ hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
if (!hr_dev)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 543fa1504cd3..1c54390e1c85 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -587,7 +587,7 @@ out:
roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
V2_DB_PARAMETER_SL_S, qp->sl);
- hns_roce_write64_k((__le32 *)&sq_db, qp->sq.db_reg_l);
+ hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l);
qp->sq_next_wqe = ind;
qp->next_sge = sge_ind;
@@ -712,6 +712,113 @@ out:
return ret;
}
+static int hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
+ unsigned long instance_stage,
+ unsigned long reset_stage)
+{
+ /* When hardware reset has been completed once or more, we should stop
+ * sending mailbox&cmq&doorbell to hardware. If now in .init_instance()
+ * function, we should exit with error. If now at HNAE3_INIT_CLIENT
+ * stage of soft reset process, we should exit with error, and then
+ * HNAE3_INIT_CLIENT related process can rollback the operation like
+ * notifing hardware to free resources, HNAE3_INIT_CLIENT related
+ * process will exit with error to notify NIC driver to reschedule soft
+ * reset process once again.
+ */
+ hr_dev->is_reset = true;
+ hr_dev->dis_db = true;
+
+ if (reset_stage == HNS_ROCE_STATE_RST_INIT ||
+ instance_stage == HNS_ROCE_STATE_INIT)
+ return CMD_RST_PRC_EBUSY;
+
+ return CMD_RST_PRC_SUCCESS;
+}
+
+static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
+ unsigned long instance_stage,
+ unsigned long reset_stage)
+{
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hnae3_handle *handle = priv->handle;
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+ /* When hardware reset is detected, we should stop sending mailbox&cmq&
+ * doorbell to hardware. If now in .init_instance() function, we should
+ * exit with error. If now at HNAE3_INIT_CLIENT stage of soft reset
+ * process, we should exit with error, and then HNAE3_INIT_CLIENT
+ * related process can rollback the operation like notifing hardware to
+ * free resources, HNAE3_INIT_CLIENT related process will exit with
+ * error to notify NIC driver to reschedule soft reset process once
+ * again.
+ */
+ hr_dev->dis_db = true;
+ if (!ops->get_hw_reset_stat(handle))
+ hr_dev->is_reset = true;
+
+ if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
+ instance_stage == HNS_ROCE_STATE_INIT)
+ return CMD_RST_PRC_EBUSY;
+
+ return CMD_RST_PRC_SUCCESS;
+}
+
+static int hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hnae3_handle *handle = priv->handle;
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+ /* When software reset is detected at .init_instance() function, we
+ * should stop sending mailbox&cmq&doorbell to hardware, and exit
+ * with error.
+ */
+ hr_dev->dis_db = true;
+ if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt)
+ hr_dev->is_reset = true;
+
+ return CMD_RST_PRC_EBUSY;
+}
+
+static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hnae3_handle *handle = priv->handle;
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ unsigned long instance_stage; /* the current instance stage */
+ unsigned long reset_stage; /* the current reset stage */
+ unsigned long reset_cnt;
+ bool sw_resetting;
+ bool hw_resetting;
+
+ if (hr_dev->is_reset)
+ return CMD_RST_PRC_SUCCESS;
+
+ /* Get information about reset from NIC driver or RoCE driver itself,
+ * the meaning of the following variables from NIC driver are described
+ * as below:
+ * reset_cnt -- The count value of completed hardware reset.
+ * hw_resetting -- Whether hardware device is resetting now.
+ * sw_resetting -- Whether NIC's software reset process is running now.
+ */
+ instance_stage = handle->rinfo.instance_state;
+ reset_stage = handle->rinfo.reset_state;
+ reset_cnt = ops->ae_dev_reset_cnt(handle);
+ hw_resetting = ops->get_hw_reset_stat(handle);
+ sw_resetting = ops->ae_dev_resetting(handle);
+
+ if (reset_cnt != hr_dev->reset_cnt)
+ return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage,
+ reset_stage);
+ else if (hw_resetting)
+ return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage,
+ reset_stage);
+ else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
+ return hns_roce_v2_cmd_sw_resetting(hr_dev);
+
+ return 0;
+}
+
static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
{
int ntu = ring->next_to_use;
@@ -892,8 +999,8 @@ static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
return clean;
}
-static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
- struct hns_roce_cmq_desc *desc, int num)
+static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc, int num)
{
struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
@@ -905,9 +1012,6 @@ static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
int ret = 0;
int ntc;
- if (hr_dev->is_reset)
- return 0;
-
spin_lock_bh(&csq->lock);
if (num > hns_roce_cmq_space(csq)) {
@@ -982,6 +1086,30 @@ static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
return ret;
}
+int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc, int num)
+{
+ int retval;
+ int ret;
+
+ ret = hns_roce_v2_rst_process_cmd(hr_dev);
+ if (ret == CMD_RST_PRC_SUCCESS)
+ return 0;
+ if (ret == CMD_RST_PRC_EBUSY)
+ return ret;
+
+ ret = __hns_roce_cmq_send(hr_dev, desc, num);
+ if (ret) {
+ retval = hns_roce_v2_rst_process_cmd(hr_dev);
+ if (retval == CMD_RST_PRC_SUCCESS)
+ return 0;
+ else if (retval == CMD_RST_PRC_EBUSY)
+ return retval;
+ }
+
+ return ret;
+}
+
static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
{
struct hns_roce_query_version *resp;
@@ -1078,6 +1206,44 @@ static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
hr_dev->caps.sl_num = roce_get_field(req_b->qid_idx_sl_num,
PF_RES_DATA_3_PF_SL_NUM_M,
PF_RES_DATA_3_PF_SL_NUM_S);
+ hr_dev->caps.sccc_bt_num = roce_get_field(req_b->sccc_bt_idx_num,
+ PF_RES_DATA_4_PF_SCCC_BT_NUM_M,
+ PF_RES_DATA_4_PF_SCCC_BT_NUM_S);
+
+ return 0;
+}
+
+static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_pf_timer_res_a *req_a;
+ struct hns_roce_cmq_desc desc[2];
+ int ret, i;
+
+ for (i = 0; i < 2; i++) {
+ hns_roce_cmq_setup_basic_desc(&desc[i],
+ HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
+ true);
+
+ if (i == 0)
+ desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+ else
+ desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+ }
+
+ ret = hns_roce_cmq_send(hr_dev, desc, 2);
+ if (ret)
+ return ret;
+
+ req_a = (struct hns_roce_pf_timer_res_a *)desc[0].data;
+
+ hr_dev->caps.qpc_timer_bt_num =
+ roce_get_field(req_a->qpc_timer_bt_idx_num,
+ PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M,
+ PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S);
+ hr_dev->caps.cqc_timer_bt_num =
+ roce_get_field(req_a->cqc_timer_bt_idx_num,
+ PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M,
+ PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S);
return 0;
}
@@ -1193,6 +1359,14 @@ static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
VF_RES_B_DATA_3_VF_SL_NUM_M,
VF_RES_B_DATA_3_VF_SL_NUM_S,
HNS_ROCE_VF_SL_NUM);
+
+ roce_set_field(req_b->vf_sccc_idx_num,
+ VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M,
+ VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S, 0);
+ roce_set_field(req_b->vf_sccc_idx_num,
+ VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M,
+ VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S,
+ HNS_ROCE_VF_SCCC_BT_NUM);
}
}
@@ -1205,6 +1379,7 @@ static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
u8 qpc_hop_num = hr_dev->caps.qpc_hop_num;
u8 cqc_hop_num = hr_dev->caps.cqc_hop_num;
u8 mpt_hop_num = hr_dev->caps.mpt_hop_num;
+ u8 sccc_hop_num = hr_dev->caps.sccc_hop_num;
struct hns_roce_cfg_bt_attr *req;
struct hns_roce_cmq_desc desc;
@@ -1252,6 +1427,20 @@ static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
+ roce_set_field(req->vf_sccc_cfg,
+ CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_M,
+ CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_S,
+ hr_dev->caps.sccc_ba_pg_sz + PG_SHIFT_OFFSET);
+ roce_set_field(req->vf_sccc_cfg,
+ CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_M,
+ CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_S,
+ hr_dev->caps.sccc_buf_pg_sz + PG_SHIFT_OFFSET);
+ roce_set_field(req->vf_sccc_cfg,
+ CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_M,
+ CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_S,
+ sccc_hop_num ==
+ HNS_ROCE_HOP_NUM_0 ? 0 : sccc_hop_num);
+
return hns_roce_cmq_send(hr_dev, &desc, 1);
}
@@ -1289,6 +1478,16 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
return ret;
}
+ if (hr_dev->pci_dev->revision == 0x21) {
+ ret = hns_roce_query_pf_timer_resource(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev,
+ "Query pf timer resource fail, ret = %d.\n",
+ ret);
+ return ret;
+ }
+ }
+
ret = hns_roce_alloc_vf_resource(hr_dev);
if (ret) {
dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
@@ -1313,6 +1512,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM;
caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM;
caps->num_srqs = HNS_ROCE_V2_MAX_SRQ_NUM;
+ caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
caps->max_srqwqes = HNS_ROCE_V2_MAX_SRQWQE_NUM;
caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
@@ -1366,7 +1566,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->mpt_ba_pg_sz = 0;
caps->mpt_buf_pg_sz = 0;
caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
- caps->pbl_ba_pg_sz = 0;
+ caps->pbl_ba_pg_sz = 2;
caps->pbl_buf_pg_sz = 0;
caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
caps->mtt_ba_pg_sz = 0;
@@ -1408,9 +1608,27 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->max_srq_wrs = HNS_ROCE_V2_MAX_SRQ_WR;
caps->max_srq_sges = HNS_ROCE_V2_MAX_SRQ_SGE;
- if (hr_dev->pci_dev->revision == 0x21)
+ if (hr_dev->pci_dev->revision == 0x21) {
caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC |
- HNS_ROCE_CAP_FLAG_SRQ;
+ HNS_ROCE_CAP_FLAG_SRQ |
+ HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
+
+ caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
+ caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
+ caps->qpc_timer_ba_pg_sz = 0;
+ caps->qpc_timer_buf_pg_sz = 0;
+ caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
+ caps->num_cqc_timer = HNS_ROCE_V2_MAX_CQC_TIMER_NUM;
+ caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
+ caps->cqc_timer_ba_pg_sz = 0;
+ caps->cqc_timer_buf_pg_sz = 0;
+ caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
+
+ caps->sccc_entry_sz = HNS_ROCE_V2_SCCC_ENTRY_SZ;
+ caps->sccc_ba_pg_sz = 0;
+ caps->sccc_buf_pg_sz = 0;
+ caps->sccc_hop_num = HNS_ROCE_SCCC_HOP_NUM;
+ }
ret = hns_roce_v2_set_bt(hr_dev);
if (ret)
@@ -1611,7 +1829,8 @@ static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev,
static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
- int ret;
+ int qpc_count, cqc_count;
+ int ret, i;
/* TSQ includes SQ doorbell and ack doorbell */
ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE);
@@ -1626,8 +1845,40 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
goto err_tpq_init_failed;
}
+ /* Alloc memory for QPC Timer buffer space chunk*/
+ for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
+ qpc_count++) {
+ ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table,
+ qpc_count);
+ if (ret) {
+ dev_err(hr_dev->dev, "QPC Timer get failed\n");
+ goto err_qpc_timer_failed;
+ }
+ }
+
+ /* Alloc memory for CQC Timer buffer space chunk*/
+ for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num;
+ cqc_count++) {
+ ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table,
+ cqc_count);
+ if (ret) {
+ dev_err(hr_dev->dev, "CQC Timer get failed\n");
+ goto err_cqc_timer_failed;
+ }
+ }
+
return 0;
+err_cqc_timer_failed:
+ for (i = 0; i < cqc_count; i++)
+ hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
+
+err_qpc_timer_failed:
+ for (i = 0; i < qpc_count; i++)
+ hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
+
+ hns_roce_free_link_table(hr_dev, &priv->tpq);
+
err_tpq_init_failed:
hns_roce_free_link_table(hr_dev, &priv->tsq);
@@ -1735,6 +1986,9 @@ static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
status = hns_roce_v2_cmd_complete(hr_dev);
if (status != 0x1) {
+ if (status == CMD_RST_PRC_EBUSY)
+ return status;
+
dev_err(dev, "mailbox status 0x%x!\n", status);
return -EBUSY;
}
@@ -1831,12 +2085,10 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
struct hns_roce_mr *mr)
{
- struct scatterlist *sg;
+ struct sg_dma_page_iter sg_iter;
u64 page_addr;
u64 *pages;
- int i, j;
- int len;
- int entry;
+ int i;
mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
@@ -1849,17 +2101,14 @@ static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
return -ENOMEM;
i = 0;
- for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
- len = sg_dma_len(sg) >> PAGE_SHIFT;
- for (j = 0; j < len; ++j) {
- page_addr = sg_dma_address(sg) +
- (j << mr->umem->page_shift);
- pages[i] = page_addr >> 6;
- /* Record the first 2 entry directly to MTPT table */
- if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
- goto found;
- i++;
- }
+ for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
+ page_addr = sg_page_iter_dma_address(&sg_iter);
+ pages[i] = page_addr >> 6;
+
+ /* Record the first 2 entry directly to MTPT table */
+ if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
+ goto found;
+ i++;
}
found:
mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
@@ -1941,6 +2190,9 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
int ret = 0;
+ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
+ V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
+
if (flags & IB_MR_REREG_PD) {
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
V2_MPT_BYTE_4_PD_S, pdn);
@@ -2245,6 +2497,7 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
enum ib_cq_notify_flags flags)
{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
u32 notification_flag;
u32 doorbell[2];
@@ -2270,7 +2523,7 @@ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
notification_flag);
- hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
+ hns_roce_write64(hr_dev, doorbell, hr_cq->cq_db_l);
return 0;
}
@@ -2663,17 +2916,33 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
case HEM_TYPE_SRQC:
op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
break;
+ case HEM_TYPE_SCCC:
+ op = HNS_ROCE_CMD_WRITE_SCCC_BT0;
+ break;
+ case HEM_TYPE_QPC_TIMER:
+ op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
+ break;
+ case HEM_TYPE_CQC_TIMER:
+ op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
+ break;
default:
dev_warn(dev, "Table %d not to be written by mailbox!\n",
table->type);
return 0;
}
+
+ if (table->type == HEM_TYPE_SCCC && step_idx)
+ return 0;
+
op += step_idx;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
+ if (table->type == HEM_TYPE_SCCC)
+ obj = mhop.l0_idx;
+
if (check_whether_last_step(hop_num, step_idx)) {
hem = table->hem[hem_idx];
for (hns_roce_hem_first(hem, &iter);
@@ -2722,6 +2991,10 @@ static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
case HEM_TYPE_CQC:
op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
break;
+ case HEM_TYPE_SCCC:
+ case HEM_TYPE_QPC_TIMER:
+ case HEM_TYPE_CQC_TIMER:
+ break;
case HEM_TYPE_SRQC:
op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
break;
@@ -2730,6 +3003,12 @@ static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
table->type);
return 0;
}
+
+ if (table->type == HEM_TYPE_SCCC ||
+ table->type == HEM_TYPE_QPC_TIMER ||
+ table->type == HEM_TYPE_CQC_TIMER)
+ return 0;
+
op += step_idx;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
@@ -3686,10 +3965,16 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
V2_QPC_BYTE_212_LSN_S, 0);
if (attr_mask & IB_QP_TIMEOUT) {
- roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_AT_M,
- V2_QPC_BYTE_28_AT_S, attr->timeout);
- roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_AT_M,
- V2_QPC_BYTE_28_AT_S, 0);
+ if (attr->timeout < 31) {
+ roce_set_field(context->byte_28_at_fl,
+ V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
+ attr->timeout);
+ roce_set_field(qpc_mask->byte_28_at_fl,
+ V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
+ 0);
+ } else {
+ dev_warn(dev, "Local ACK timeout shall be 0 to 30.\n");
+ }
}
roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
@@ -3742,7 +4027,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
struct device *dev = hr_dev->dev;
int ret = -EINVAL;
- context = kcalloc(2, sizeof(*context), GFP_KERNEL);
+ context = kcalloc(2, sizeof(*context), GFP_ATOMIC);
if (!context)
return -ENOMEM;
@@ -3789,13 +4074,16 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_160_sq_ci_pi,
V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
- roce_set_field(context->byte_84_rq_ci_pi,
+
+ if (!ibqp->srq) {
+ roce_set_field(context->byte_84_rq_ci_pi,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
hr_qp->rq.head);
- roce_set_field(qpc_mask->byte_84_rq_ci_pi,
+ roce_set_field(qpc_mask->byte_84_rq_ci_pi,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
+ }
}
if (attr_mask & IB_QP_AV) {
@@ -4224,6 +4512,59 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp)
return 0;
}
+static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp)
+{
+ struct hns_roce_sccc_clr_done *resp;
+ struct hns_roce_sccc_clr *clr;
+ struct hns_roce_cmq_desc desc;
+ int ret, i;
+
+ mutex_lock(&hr_dev->qp_table.scc_mutex);
+
+ /* set scc ctx clear done flag */
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret) {
+ dev_err(hr_dev->dev, "Reset SCC ctx failed(%d)\n", ret);
+ goto out;
+ }
+
+ /* clear scc context */
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLR_SCCC, false);
+ clr = (struct hns_roce_sccc_clr *)desc.data;
+ clr->qpn = cpu_to_le32(hr_qp->qpn);
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret) {
+ dev_err(hr_dev->dev, "Clear SCC ctx failed(%d)\n", ret);
+ goto out;
+ }
+
+ /* query scc context clear is done or not */
+ resp = (struct hns_roce_sccc_clr_done *)desc.data;
+ for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) {
+ hns_roce_cmq_setup_basic_desc(&desc,
+ HNS_ROCE_OPC_QUERY_SCCC, true);
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret) {
+ dev_err(hr_dev->dev, "Query clr cmq failed(%d)\n", ret);
+ goto out;
+ }
+
+ if (resp->clr_done)
+ goto out;
+
+ msleep(20);
+ }
+
+ dev_err(hr_dev->dev, "Query SCC clr done flag overtime.\n");
+ ret = -ETIMEDOUT;
+
+out:
+ mutex_unlock(&hr_dev->qp_table.scc_mutex);
+ return ret;
+}
+
static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
{
struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
@@ -4281,7 +4622,8 @@ static void hns_roce_set_qps_to_err(struct hns_roce_dev *hr_dev, u32 qpn)
if (hr_qp->ibqp.uobject) {
if (hr_qp->sdb_en == 1) {
hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
- hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
+ if (hr_qp->rdb_en == 1)
+ hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
} else {
dev_warn(hr_dev->dev, "flush cqe is unsupported in userspace!\n");
return;
@@ -4319,64 +4661,19 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
dev_warn(dev, "Send queue drained.\n");
break;
case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
- dev_err(dev, "Local work queue catastrophic error.\n");
+ dev_err(dev, "Local work queue 0x%x catas error, sub_type:%d\n",
+ qpn, irq_work->sub_type);
hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
- switch (irq_work->sub_type) {
- case HNS_ROCE_LWQCE_QPC_ERROR:
- dev_err(dev, "QP %d, QPC error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_MTU_ERROR:
- dev_err(dev, "QP %d, MTU error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
- dev_err(dev, "QP %d, WQE BA addr error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
- dev_err(dev, "QP %d, WQE addr error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
- dev_err(dev, "QP %d, WQE shift error.\n", qpn);
- break;
- default:
- dev_err(dev, "Unhandled sub_event type %d.\n",
- irq_work->sub_type);
- break;
- }
break;
case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
- dev_err(dev, "Invalid request local work queue error.\n");
+ dev_err(dev, "Invalid request local work queue 0x%x error.\n",
+ qpn);
hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
break;
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
- dev_err(dev, "Local access violation work queue error.\n");
+ dev_err(dev, "Local access violation work queue 0x%x error, sub_type:%d\n",
+ qpn, irq_work->sub_type);
hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
- switch (irq_work->sub_type) {
- case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
- dev_err(dev, "QP %d, R_key violation.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_LENGTH_ERROR:
- dev_err(dev, "QP %d, length error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_VA_ERROR:
- dev_err(dev, "QP %d, VA error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_PD_ERROR:
- dev_err(dev, "QP %d, PD error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
- dev_err(dev, "QP %d, rw acc error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
- dev_err(dev, "QP %d, key state error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
- dev_err(dev, "QP %d, MR operation error.\n", qpn);
- break;
- default:
- dev_err(dev, "Unhandled sub_event type %d.\n",
- irq_work->sub_type);
- break;
- }
break;
case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
dev_warn(dev, "SRQ limit reach.\n");
@@ -4427,6 +4724,7 @@ static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
{
+ struct hns_roce_dev *hr_dev = eq->hr_dev;
u32 doorbell[2];
doorbell[0] = 0;
@@ -4453,7 +4751,7 @@ static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
HNS_ROCE_V2_EQ_DB_PARA_S,
(eq->cons_index & HNS_ROCE_V2_CONS_IDX_M));
- hns_roce_write64_k(doorbell, eq->doorbell);
+ hns_roce_write64(hr_dev, doorbell, eq->doorbell);
}
static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry)
@@ -4568,7 +4866,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
event_type, eq->eqn, eq->cons_index);
break;
- };
+ }
eq->event_type = event_type;
eq->sub_type = sub_type;
@@ -4692,11 +4990,22 @@ static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
+ struct pci_dev *pdev = hr_dev->pci_dev;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ const struct hnae3_ae_ops *ops = ae_dev->ops;
+
dev_err(dev, "AEQ overflow!\n");
roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S, 1);
roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
+ /* Set reset level for reset_event() */
+ if (ops->set_default_reset_request)
+ ops->set_default_reset_request(ae_dev,
+ HNAE3_FUNC_RESET);
+ if (ops->reset_event)
+ ops->reset_event(pdev, NULL);
+
roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
@@ -5599,7 +5908,7 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
return 0;
}
-int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
+static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
struct hns_roce_srq *srq = to_hr_srq(ibsrq);
@@ -5664,6 +5973,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr)
{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
struct hns_roce_srq *srq = to_hr_srq(ibsrq);
struct hns_roce_v2_wqe_data_seg *dseg;
struct hns_roce_v2_db srq_db;
@@ -5725,7 +6035,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
srq_db.byte_4 = HNS_ROCE_V2_SRQ_DB << 24 | srq->srqn;
srq_db.parameter = srq->head;
- hns_roce_write64_k((__le32 *)&srq_db, srq->db_reg_l);
+ hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
}
@@ -5758,6 +6068,7 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.hw_exit = hns_roce_v2_exit,
.post_mbox = hns_roce_v2_post_mbox,
.chk_mbox = hns_roce_v2_chk_mbox,
+ .rst_prc_mbox = hns_roce_v2_rst_process_cmd,
.set_gid = hns_roce_v2_set_gid,
.set_mac = hns_roce_v2_set_mac,
.write_mtpt = hns_roce_v2_write_mtpt,
@@ -5770,6 +6081,7 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.modify_qp = hns_roce_v2_modify_qp,
.query_qp = hns_roce_v2_query_qp,
.destroy_qp = hns_roce_v2_destroy_qp,
+ .qp_flow_control_init = hns_roce_v2_qp_flow_control_init,
.modify_cq = hns_roce_v2_modify_cq,
.post_send = hns_roce_v2_post_send,
.post_recv = hns_roce_v2_post_recv,
@@ -5800,6 +6112,7 @@ MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
struct hnae3_handle *handle)
{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
const struct pci_device_id *id;
int i;
@@ -5830,15 +6143,18 @@ static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
hr_dev->cmd_mod = 1;
hr_dev->loop_idc = 0;
+ hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle);
+ priv->handle = handle;
+
return 0;
}
-static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
+static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
{
struct hns_roce_dev *hr_dev;
int ret;
- hr_dev = (struct hns_roce_dev *)ib_alloc_device(sizeof(*hr_dev));
+ hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
if (!hr_dev)
return -ENOMEM;
@@ -5850,7 +6166,6 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
hr_dev->pci_dev = handle->pdev;
hr_dev->dev = &handle->pdev->dev;
- handle->priv = hr_dev;
ret = hns_roce_hw_v2_get_cfg(hr_dev, handle);
if (ret) {
@@ -5864,6 +6179,8 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
goto error_failed_get_cfg;
}
+ handle->priv = hr_dev;
+
return 0;
error_failed_get_cfg:
@@ -5875,7 +6192,7 @@ error_failed_kzalloc:
return ret;
}
-static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
+static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
bool reset)
{
struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
@@ -5883,24 +6200,79 @@ static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
if (!hr_dev)
return;
+ handle->priv = NULL;
hns_roce_exit(hr_dev);
kfree(hr_dev->priv);
ib_dealloc_device(&hr_dev->ib_dev);
}
+static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
+{
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct device *dev = &handle->pdev->dev;
+ int ret;
+
+ handle->rinfo.instance_state = HNS_ROCE_STATE_INIT;
+
+ if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) {
+ handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
+ goto reset_chk_err;
+ }
+
+ ret = __hns_roce_hw_v2_init_instance(handle);
+ if (ret) {
+ handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
+ dev_err(dev, "RoCE instance init failed! ret = %d\n", ret);
+ if (ops->ae_dev_resetting(handle) ||
+ ops->get_hw_reset_stat(handle))
+ goto reset_chk_err;
+ else
+ return ret;
+ }
+
+ handle->rinfo.instance_state = HNS_ROCE_STATE_INITED;
+
+
+ return 0;
+
+reset_chk_err:
+ dev_err(dev, "Device is busy in resetting state.\n"
+ "please retry later.\n");
+
+ return -EBUSY;
+}
+
+static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
+ bool reset)
+{
+ if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED)
+ return;
+
+ handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT;
+
+ __hns_roce_hw_v2_uninit_instance(handle, reset);
+
+ handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
+}
static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
{
- struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
+ struct hns_roce_dev *hr_dev;
struct ib_event event;
- if (!hr_dev) {
- dev_err(&handle->pdev->dev,
- "Input parameter handle->priv is NULL!\n");
- return -EINVAL;
+ if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) {
+ set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
+ return 0;
}
+ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN;
+ clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
+
+ hr_dev = (struct hns_roce_dev *)handle->priv;
+ if (!hr_dev)
+ return 0;
+
hr_dev->active = false;
- hr_dev->is_reset = true;
+ hr_dev->dis_db = true;
event.event = IB_EVENT_DEVICE_FATAL;
event.device = &hr_dev->ib_dev;
@@ -5912,17 +6284,29 @@ static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
{
+ struct device *dev = &handle->pdev->dev;
int ret;
- ret = hns_roce_hw_v2_init_instance(handle);
+ if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN,
+ &handle->rinfo.state)) {
+ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
+ return 0;
+ }
+
+ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT;
+
+ dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n");
+ ret = __hns_roce_hw_v2_init_instance(handle);
if (ret) {
/* when reset notify type is HNAE3_INIT_CLIENT In reset notify
* callback function, RoCE Engine reinitialize. If RoCE reinit
* failed, we should inform NIC driver.
*/
handle->priv = NULL;
- dev_err(&handle->pdev->dev,
- "In reset process RoCE reinit failed %d.\n", ret);
+ dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret);
+ } else {
+ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
+ dev_info(dev, "Reset done, RoCE client reinit finished.\n");
}
return ret;
@@ -5930,8 +6314,14 @@ static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
{
+ if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state))
+ return 0;
+
+ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT;
+ dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n");
msleep(100);
- hns_roce_hw_v2_uninit_instance(handle, false);
+ __hns_roce_hw_v2_uninit_instance(handle, false);
+
return 0;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index b72d0443c835..f1f1b75812f9 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -36,6 +36,7 @@
#include <linux/bitops.h>
#define HNS_ROCE_VF_QPC_BT_NUM 256
+#define HNS_ROCE_VF_SCCC_BT_NUM 64
#define HNS_ROCE_VF_SRQC_BT_NUM 64
#define HNS_ROCE_VF_CQC_BT_NUM 64
#define HNS_ROCE_VF_MPT_BT_NUM 64
@@ -44,12 +45,14 @@
#define HNS_ROCE_VF_SGID_NUM 32
#define HNS_ROCE_VF_SL_NUM 8
-#define HNS_ROCE_V2_MAX_QP_NUM 0x2000
+#define HNS_ROCE_V2_MAX_QP_NUM 0x100000
+#define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200
#define HNS_ROCE_V2_MAX_WQE_NUM 0x8000
#define HNS_ROCE_V2_MAX_SRQ 0x100000
#define HNS_ROCE_V2_MAX_SRQ_WR 0x8000
#define HNS_ROCE_V2_MAX_SRQ_SGE 0x100
-#define HNS_ROCE_V2_MAX_CQ_NUM 0x8000
+#define HNS_ROCE_V2_MAX_CQ_NUM 0x100000
+#define HNS_ROCE_V2_MAX_CQC_TIMER_NUM 0x100
#define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000
#define HNS_ROCE_V2_MAX_CQE_NUM 0x10000
#define HNS_ROCE_V2_MAX_SRQWQE_NUM 0x8000
@@ -64,7 +67,7 @@
#define HNS_ROCE_V2_COMP_VEC_NUM 63
#define HNS_ROCE_V2_AEQE_VEC_NUM 1
#define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1
-#define HNS_ROCE_V2_MAX_MTPT_NUM 0x8000
+#define HNS_ROCE_V2_MAX_MTPT_NUM 0x100000
#define HNS_ROCE_V2_MAX_MTT_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_CQE_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_SRQWQE_SEGS 0x1000000
@@ -83,6 +86,9 @@
#define HNS_ROCE_V2_MTPT_ENTRY_SZ 64
#define HNS_ROCE_V2_MTT_ENTRY_SZ 64
#define HNS_ROCE_V2_CQE_ENTRY_SIZE 32
+#define HNS_ROCE_V2_SCCC_ENTRY_SZ 32
+#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ 4096
+#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ 4096
#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
#define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
#define HNS_ROCE_INVALID_LKEY 0x100
@@ -90,7 +96,10 @@
#define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2
#define HNS_ROCE_V2_RSV_QPS 8
+#define HNS_ROCE_V2_HW_RST_TIMEOUT 1000
+
#define HNS_ROCE_CONTEXT_HOP_NUM 1
+#define HNS_ROCE_SCCC_HOP_NUM 1
#define HNS_ROCE_MTT_HOP_NUM 1
#define HNS_ROCE_CQE_HOP_NUM 1
#define HNS_ROCE_SRQWQE_HOP_NUM 1
@@ -120,6 +129,8 @@
#define HNS_ROCE_CMQ_EN_B 16
#define HNS_ROCE_CMQ_ENABLE BIT(HNS_ROCE_CMQ_EN_B)
+#define HNS_ROCE_CMQ_SCC_CLR_DONE_CNT 5
+
#define check_whether_last_step(hop_num, step_idx) \
((step_idx == 0 && hop_num == HNS_ROCE_HOP_NUM_0) || \
(step_idx == 1 && hop_num == 1) || \
@@ -224,11 +235,15 @@ enum hns_roce_opcode_type {
HNS_ROCE_OPC_ALLOC_VF_RES = 0x8401,
HNS_ROCE_OPC_CFG_EXT_LLM = 0x8403,
HNS_ROCE_OPC_CFG_TMOUT_LLM = 0x8404,
+ HNS_ROCE_OPC_QUERY_PF_TIMER_RES = 0x8406,
HNS_ROCE_OPC_CFG_SGID_TB = 0x8500,
HNS_ROCE_OPC_CFG_SMAC_TB = 0x8501,
HNS_ROCE_OPC_POST_MB = 0x8504,
HNS_ROCE_OPC_QUERY_MB_ST = 0x8505,
HNS_ROCE_OPC_CFG_BT_ATTR = 0x8506,
+ HNS_ROCE_OPC_CLR_SCCC = 0x8509,
+ HNS_ROCE_OPC_QUERY_SCCC = 0x850a,
+ HNS_ROCE_OPC_RESET_SCCC = 0x850b,
HNS_SWITCH_PARAMETER_CFG = 0x1033,
};
@@ -1300,7 +1315,8 @@ struct hns_roce_pf_res_b {
__le32 smac_idx_num;
__le32 sgid_idx_num;
__le32 qid_idx_sl_num;
- __le32 rsv[2];
+ __le32 sccc_bt_idx_num;
+ __le32 rsv;
};
#define PF_RES_DATA_1_PF_SMAC_IDX_S 0
@@ -1321,6 +1337,31 @@ struct hns_roce_pf_res_b {
#define PF_RES_DATA_3_PF_SL_NUM_S 16
#define PF_RES_DATA_3_PF_SL_NUM_M GENMASK(26, 16)
+#define PF_RES_DATA_4_PF_SCCC_BT_IDX_S 0
+#define PF_RES_DATA_4_PF_SCCC_BT_IDX_M GENMASK(8, 0)
+
+#define PF_RES_DATA_4_PF_SCCC_BT_NUM_S 9
+#define PF_RES_DATA_4_PF_SCCC_BT_NUM_M GENMASK(17, 9)
+
+struct hns_roce_pf_timer_res_a {
+ __le32 rsv0;
+ __le32 qpc_timer_bt_idx_num;
+ __le32 cqc_timer_bt_idx_num;
+ __le32 rsv[3];
+};
+
+#define PF_RES_DATA_1_PF_QPC_TIMER_BT_IDX_S 0
+#define PF_RES_DATA_1_PF_QPC_TIMER_BT_IDX_M GENMASK(11, 0)
+
+#define PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S 16
+#define PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M GENMASK(28, 16)
+
+#define PF_RES_DATA_2_PF_CQC_TIMER_BT_IDX_S 0
+#define PF_RES_DATA_2_PF_CQC_TIMER_BT_IDX_M GENMASK(10, 0)
+
+#define PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S 16
+#define PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M GENMASK(27, 16)
+
struct hns_roce_vf_res_a {
__le32 vf_id;
__le32 vf_qpc_bt_idx_num;
@@ -1365,7 +1406,8 @@ struct hns_roce_vf_res_b {
__le32 vf_smac_idx_num;
__le32 vf_sgid_idx_num;
__le32 vf_qid_idx_sl_num;
- __le32 rsv[2];
+ __le32 vf_sccc_idx_num;
+ __le32 rsv1;
};
#define VF_RES_B_DATA_0_VF_ID_S 0
@@ -1389,6 +1431,12 @@ struct hns_roce_vf_res_b {
#define VF_RES_B_DATA_3_VF_SL_NUM_S 16
#define VF_RES_B_DATA_3_VF_SL_NUM_M GENMASK(19, 16)
+#define VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S 0
+#define VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M GENMASK(8, 0)
+
+#define VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S 9
+#define VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M GENMASK(17, 9)
+
struct hns_roce_vf_switch {
__le32 rocee_sel;
__le32 fun_id;
@@ -1424,7 +1472,8 @@ struct hns_roce_cfg_bt_attr {
__le32 vf_srqc_cfg;
__le32 vf_cqc_cfg;
__le32 vf_mpt_cfg;
- __le32 rsv[2];
+ __le32 vf_sccc_cfg;
+ __le32 rsv;
};
#define CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S 0
@@ -1463,6 +1512,15 @@ struct hns_roce_cfg_bt_attr {
#define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S 8
#define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M GENMASK(9, 8)
+#define CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_S 0
+#define CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_M GENMASK(3, 0)
+
+#define CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_S 4
+#define CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_M GENMASK(7, 4)
+
+#define CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_S 8
+#define CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_M GENMASK(9, 8)
+
struct hns_roce_cfg_sgid_tb {
__le32 table_idx_rsv;
__le32 vf_sgid_l;
@@ -1546,6 +1604,7 @@ struct hns_roce_link_table_entry {
#define HNS_ROCE_LINK_TABLE_NXT_PTR_M GENMASK(31, 20)
struct hns_roce_v2_priv {
+ struct hnae3_handle *handle;
struct hns_roce_v2_cmq cmq;
struct hns_roce_link_table tsq;
struct hns_roce_link_table tpq;
@@ -1730,4 +1789,25 @@ struct hns_roce_wqe_atomic_seg {
__le64 cmp_data;
};
+struct hns_roce_sccc_clr {
+ __le32 qpn;
+ __le32 rsv[5];
+};
+
+struct hns_roce_sccc_clr_done {
+ __le32 clr_done;
+ __le32 rsv[5];
+};
+
+static inline void hns_roce_write64(struct hns_roce_dev *hr_dev, __le32 val[2],
+ void __iomem *dest)
+{
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hnae3_handle *handle = priv->handle;
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+ if (!hr_dev->dis_db && !ops->get_hw_reset_stat(handle))
+ hns_roce_write64_k(val, dest);
+}
+
#endif
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index c79054ba9495..c929125da84b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -226,6 +226,11 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
props->max_srq_sge = hr_dev->caps.max_srq_sges;
}
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR) {
+ props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
+ props->max_fast_reg_page_list_len = HNS_ROCE_FRMR_MAX_PA;
+ }
+
return 0;
}
@@ -330,23 +335,19 @@ static int hns_roce_modify_port(struct ib_device *ib_dev, u8 port_num, int mask,
return 0;
}
-static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev,
- struct ib_udata *udata)
+static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
+ struct ib_udata *udata)
{
int ret = 0;
- struct hns_roce_ucontext *context;
+ struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
struct hns_roce_ib_alloc_ucontext_resp resp = {};
- struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
+ struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
if (!hr_dev->active)
- return ERR_PTR(-EAGAIN);
+ return -EAGAIN;
resp.qp_tab_size = hr_dev->caps.num_qps;
- context = kmalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
- return ERR_PTR(-ENOMEM);
-
ret = hns_roce_uar_alloc(hr_dev, &context->uar);
if (ret)
goto error_fail_uar_alloc;
@@ -360,25 +361,20 @@ static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev,
if (ret)
goto error_fail_copy_to_udata;
- return &context->ibucontext;
+ return 0;
error_fail_copy_to_udata:
hns_roce_uar_free(hr_dev, &context->uar);
error_fail_uar_alloc:
- kfree(context);
-
- return ERR_PTR(ret);
+ return ret;
}
-static int hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
+static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
{
struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
hns_roce_uar_free(to_hr_dev(ibcontext->device), &context->uar);
- kfree(context);
-
- return 0;
}
static int hns_roce_mmap(struct ib_ucontext *context,
@@ -472,6 +468,8 @@ static const struct ib_device_ops hns_roce_dev_ops = {
.query_pkey = hns_roce_query_pkey,
.query_port = hns_roce_query_port,
.reg_user_mr = hns_roce_reg_user_mr,
+ INIT_RDMA_OBJ_SIZE(ib_pd, hns_roce_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, hns_roce_ucontext, ibucontext),
};
static const struct ib_device_ops hns_roce_dev_mr_ops = {
@@ -564,7 +562,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ib_dev->driver_id = RDMA_DRIVER_HNS;
ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops);
ib_set_device_ops(ib_dev, &hns_roce_dev_ops);
- ret = ib_register_device(ib_dev, "hns_%d", NULL);
+ ret = ib_register_device(ib_dev, "hns_%d");
if (ret) {
dev_err(dev, "ib_register_device failed!\n");
return ret;
@@ -702,8 +700,62 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
}
}
+ if (hr_dev->caps.sccc_entry_sz) {
+ ret = hns_roce_init_hem_table(hr_dev,
+ &hr_dev->qp_table.sccc_table,
+ HEM_TYPE_SCCC,
+ hr_dev->caps.sccc_entry_sz,
+ hr_dev->caps.num_qps, 1);
+ if (ret) {
+ dev_err(dev,
+ "Failed to init SCC context memory, aborting.\n");
+ goto err_unmap_idx;
+ }
+ }
+
+ if (hr_dev->caps.qpc_timer_entry_sz) {
+ ret = hns_roce_init_hem_table(hr_dev,
+ &hr_dev->qpc_timer_table,
+ HEM_TYPE_QPC_TIMER,
+ hr_dev->caps.qpc_timer_entry_sz,
+ hr_dev->caps.num_qpc_timer, 1);
+ if (ret) {
+ dev_err(dev,
+ "Failed to init QPC timer memory, aborting.\n");
+ goto err_unmap_ctx;
+ }
+ }
+
+ if (hr_dev->caps.cqc_timer_entry_sz) {
+ ret = hns_roce_init_hem_table(hr_dev,
+ &hr_dev->cqc_timer_table,
+ HEM_TYPE_CQC_TIMER,
+ hr_dev->caps.cqc_timer_entry_sz,
+ hr_dev->caps.num_cqc_timer, 1);
+ if (ret) {
+ dev_err(dev,
+ "Failed to init CQC timer memory, aborting.\n");
+ goto err_unmap_qpc_timer;
+ }
+ }
+
return 0;
+err_unmap_qpc_timer:
+ if (hr_dev->caps.qpc_timer_entry_sz)
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->qpc_timer_table);
+
+err_unmap_ctx:
+ if (hr_dev->caps.sccc_entry_sz)
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->qp_table.sccc_table);
+
+err_unmap_idx:
+ if (hr_dev->caps.num_idx_segs)
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->mr_table.mtt_idx_table);
+
err_unmap_srqwqe:
if (hr_dev->caps.num_srqwqe_segs)
hns_roce_cleanup_hem_table(hr_dev,
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index ee5991bd4171..08be0e4eabcd 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -746,7 +746,6 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table;
dma_addr_t dma_handle;
__le64 *mtts;
- u32 s = start_index * sizeof(u64);
u32 bt_page_size;
u32 i;
@@ -780,7 +779,8 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
return -EINVAL;
mtts = hns_roce_table_find(hr_dev, table,
- mtt->first_seg + s / hr_dev->caps.mtt_entry_sz,
+ mtt->first_seg +
+ start_index / HNS_ROCE_MTT_ENTRY_PER_SEG,
&dma_handle);
if (!mtts)
return -ENOMEM;
@@ -976,12 +976,11 @@ int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
struct hns_roce_mtt *mtt, struct ib_umem *umem)
{
struct device *dev = hr_dev->dev;
- struct scatterlist *sg;
+ struct sg_dma_page_iter sg_iter;
unsigned int order;
- int i, k, entry;
int npage = 0;
int ret = 0;
- int len;
+ int i;
u64 page_addr;
u64 *pages;
u32 bt_page_size;
@@ -1014,29 +1013,25 @@ int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
i = n = 0;
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
- len = sg_dma_len(sg) >> PAGE_SHIFT;
- for (k = 0; k < len; ++k) {
- page_addr =
- sg_dma_address(sg) + (k << umem->page_shift);
- if (!(npage % (1 << (mtt->page_shift - PAGE_SHIFT)))) {
- if (page_addr & ((1 << mtt->page_shift) - 1)) {
- dev_err(dev, "page_addr 0x%llx is not page_shift %d alignment!\n",
- page_addr, mtt->page_shift);
- ret = -EINVAL;
- goto out;
- }
- pages[i++] = page_addr;
- }
- npage++;
- if (i == bt_page_size / sizeof(u64)) {
- ret = hns_roce_write_mtt(hr_dev, mtt, n, i,
- pages);
- if (ret)
- goto out;
- n += i;
- i = 0;
+ for_each_sg_dma_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
+ page_addr = sg_page_iter_dma_address(&sg_iter);
+ if (!(npage % (1 << (mtt->page_shift - PAGE_SHIFT)))) {
+ if (page_addr & ((1 << mtt->page_shift) - 1)) {
+ dev_err(dev,
+ "page_addr 0x%llx is not page_shift %d alignment!\n",
+ page_addr, mtt->page_shift);
+ ret = -EINVAL;
+ goto out;
}
+ pages[i++] = page_addr;
+ }
+ npage++;
+ if (i == bt_page_size / sizeof(u64)) {
+ ret = hns_roce_write_mtt(hr_dev, mtt, n, i, pages);
+ if (ret)
+ goto out;
+ n += i;
+ i = 0;
}
}
@@ -1052,10 +1047,8 @@ static int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev,
struct hns_roce_mr *mr,
struct ib_umem *umem)
{
- struct scatterlist *sg;
- int i = 0, j = 0, k;
- int entry;
- int len;
+ struct sg_dma_page_iter sg_iter;
+ int i = 0, j = 0;
u64 page_addr;
u32 pbl_bt_sz;
@@ -1063,27 +1056,22 @@ static int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev,
return 0;
pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
- len = sg_dma_len(sg) >> PAGE_SHIFT;
- for (k = 0; k < len; ++k) {
- page_addr = sg_dma_address(sg) +
- (k << umem->page_shift);
-
- if (!hr_dev->caps.pbl_hop_num) {
- mr->pbl_buf[i++] = page_addr >> 12;
- } else if (hr_dev->caps.pbl_hop_num == 1) {
- mr->pbl_buf[i++] = page_addr;
- } else {
- if (hr_dev->caps.pbl_hop_num == 2)
- mr->pbl_bt_l1[i][j] = page_addr;
- else if (hr_dev->caps.pbl_hop_num == 3)
- mr->pbl_bt_l2[i][j] = page_addr;
-
- j++;
- if (j >= (pbl_bt_sz / 8)) {
- i++;
- j = 0;
- }
+ for_each_sg_dma_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
+ page_addr = sg_page_iter_dma_address(&sg_iter);
+ if (!hr_dev->caps.pbl_hop_num) {
+ mr->pbl_buf[i++] = page_addr >> 12;
+ } else if (hr_dev->caps.pbl_hop_num == 1) {
+ mr->pbl_buf[i++] = page_addr;
+ } else {
+ if (hr_dev->caps.pbl_hop_num == 2)
+ mr->pbl_bt_l1[i][j] = page_addr;
+ else if (hr_dev->caps.pbl_hop_num == 3)
+ mr->pbl_bt_l2[i][j] = page_addr;
+
+ j++;
+ if (j >= (pbl_bt_sz / 8)) {
+ i++;
+ j = 0;
}
}
}
@@ -1110,8 +1098,7 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
- mr->umem = ib_umem_get(pd->uobject->context, start, length,
- access_flags, 0);
+ mr->umem = ib_umem_get(udata, start, length, access_flags, 0);
if (IS_ERR(mr->umem)) {
ret = PTR_ERR(mr->umem);
goto err_free;
@@ -1220,8 +1207,8 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
}
ib_umem_release(mr->umem);
- mr->umem = ib_umem_get(ibmr->uobject->context, start, length,
- mr_access_flags, 0);
+ mr->umem =
+ ib_umem_get(udata, start, length, mr_access_flags, 0);
if (IS_ERR(mr->umem)) {
ret = PTR_ERR(mr->umem);
mr->umem = NULL;
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index e11c149da04d..b9b97c5e97e6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -57,24 +57,19 @@ void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev)
hns_roce_bitmap_cleanup(&hr_dev->pd_bitmap);
}
-struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
- struct ib_ucontext *context,
- struct ib_udata *udata)
+int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
+ struct ib_udata *udata)
{
+ struct ib_device *ib_dev = ibpd->device;
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
struct device *dev = hr_dev->dev;
- struct hns_roce_pd *pd;
+ struct hns_roce_pd *pd = to_hr_pd(ibpd);
int ret;
- pd = kmalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd)
- return ERR_PTR(-ENOMEM);
-
ret = hns_roce_pd_alloc(to_hr_dev(ib_dev), &pd->pdn);
if (ret) {
- kfree(pd);
dev_err(dev, "[alloc_pd]hns_roce_pd_alloc failed!\n");
- return ERR_PTR(ret);
+ return ret;
}
if (context) {
@@ -83,21 +78,17 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn);
dev_err(dev, "[alloc_pd]ib_copy_to_udata failed!\n");
- kfree(pd);
- return ERR_PTR(-EFAULT);
+ return -EFAULT;
}
}
- return &pd->ibpd;
+ return 0;
}
EXPORT_SYMBOL_GPL(hns_roce_alloc_pd);
-int hns_roce_dealloc_pd(struct ib_pd *pd)
+void hns_roce_dealloc_pd(struct ib_pd *pd)
{
hns_roce_pd_free(to_hr_dev(pd->device), to_hr_pd(pd)->pdn);
- kfree(to_hr_pd(pd));
-
- return 0;
}
EXPORT_SYMBOL_GPL(hns_roce_dealloc_pd);
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 54031c5b53fa..60cf9f03e941 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -35,6 +35,7 @@
#include <linux/platform_device.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_umem.h>
+#include <rdma/uverbs_ioctl.h>
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_hem.h"
@@ -209,13 +210,23 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
}
}
+ if (hr_dev->caps.sccc_entry_sz) {
+ /* Alloc memory for SCC CTX */
+ ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table,
+ hr_qp->qpn);
+ if (ret) {
+ dev_err(dev, "SCC CTX table get failed\n");
+ goto err_put_trrl;
+ }
+ }
+
spin_lock_irq(&qp_table->lock);
ret = radix_tree_insert(&hr_dev->qp_table_tree,
hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
spin_unlock_irq(&qp_table->lock);
if (ret) {
dev_err(dev, "QPC radix_tree_insert failed\n");
- goto err_put_trrl;
+ goto err_put_sccc;
}
atomic_set(&hr_qp->refcount, 1);
@@ -223,6 +234,11 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
return 0;
+err_put_sccc:
+ if (hr_dev->caps.sccc_entry_sz)
+ hns_roce_table_put(hr_dev, &qp_table->sccc_table,
+ hr_qp->qpn);
+
err_put_trrl:
if (hr_dev->caps.trrl_entry_sz)
hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
@@ -517,7 +533,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
{
- if (attr->qp_type == IB_QPT_XRC_TGT)
+ if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr)
return 0;
return 1;
@@ -526,7 +542,8 @@ static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
{
if (attr->qp_type == IB_QPT_XRC_INI ||
- attr->qp_type == IB_QPT_XRC_TGT || attr->srq)
+ attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
+ !attr->cap.max_recv_wr)
return 0;
return 1;
@@ -541,6 +558,8 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
struct device *dev = hr_dev->dev;
struct hns_roce_ib_create_qp ucmd;
struct hns_roce_ib_create_qp_resp resp = {};
+ struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(
+ udata, struct hns_roce_ucontext, ibucontext);
unsigned long qpn = 0;
int ret = 0;
u32 page_shift;
@@ -612,9 +631,8 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
goto err_rq_sge_list;
}
- hr_qp->umem = ib_umem_get(ib_pd->uobject->context,
- ucmd.buf_addr, hr_qp->buff_size, 0,
- 0);
+ hr_qp->umem = ib_umem_get(udata, ucmd.buf_addr,
+ hr_qp->buff_size, 0, 0);
if (IS_ERR(hr_qp->umem)) {
dev_err(dev, "ib_umem_get error for create qp\n");
ret = PTR_ERR(hr_qp->umem);
@@ -622,19 +640,19 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
}
hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
+ page_shift = PAGE_SHIFT;
if (hr_dev->caps.mtt_buf_pg_sz) {
npages = (ib_umem_page_count(hr_qp->umem) +
(1 << hr_dev->caps.mtt_buf_pg_sz) - 1) /
- (1 << hr_dev->caps.mtt_buf_pg_sz);
- page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
+ (1 << hr_dev->caps.mtt_buf_pg_sz);
+ page_shift += hr_dev->caps.mtt_buf_pg_sz;
ret = hns_roce_mtt_init(hr_dev, npages,
page_shift,
&hr_qp->mtt);
} else {
ret = hns_roce_mtt_init(hr_dev,
- ib_umem_page_count(hr_qp->umem),
- hr_qp->umem->page_shift,
- &hr_qp->mtt);
+ ib_umem_page_count(hr_qp->umem),
+ page_shift, &hr_qp->mtt);
}
if (ret) {
dev_err(dev, "hns_roce_mtt_init error for create qp\n");
@@ -652,9 +670,8 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
(udata->inlen >= sizeof(ucmd)) &&
(udata->outlen >= sizeof(resp)) &&
hns_roce_qp_has_sq(init_attr)) {
- ret = hns_roce_db_map_user(
- to_hr_ucontext(ib_pd->uobject->context),
- ucmd.sdb_addr, &hr_qp->sdb);
+ ret = hns_roce_db_map_user(uctx, udata, ucmd.sdb_addr,
+ &hr_qp->sdb);
if (ret) {
dev_err(dev, "sq record doorbell map failed!\n");
goto err_mtt;
@@ -668,13 +685,16 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
(udata->outlen >= sizeof(resp)) &&
hns_roce_qp_has_rq(init_attr)) {
- ret = hns_roce_db_map_user(
- to_hr_ucontext(ib_pd->uobject->context),
- ucmd.db_addr, &hr_qp->rdb);
+ ret = hns_roce_db_map_user(uctx, udata, ucmd.db_addr,
+ &hr_qp->rdb);
if (ret) {
dev_err(dev, "rq record doorbell map failed!\n");
goto err_sq_dbmap;
}
+
+ /* indicate kernel supports rq record db */
+ resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB;
+ hr_qp->rdb_en = 1;
}
} else {
if (init_attr->create_flags &
@@ -741,10 +761,10 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
goto err_mtt;
}
- hr_qp->sq.wrid = kmalloc_array(hr_qp->sq.wqe_cnt, sizeof(u64),
- GFP_KERNEL);
- hr_qp->rq.wrid = kmalloc_array(hr_qp->rq.wqe_cnt, sizeof(u64),
- GFP_KERNEL);
+ hr_qp->sq.wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64),
+ GFP_KERNEL);
+ hr_qp->rq.wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64),
+ GFP_KERNEL);
if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) {
ret = -ENOMEM;
goto err_wrid;
@@ -783,17 +803,19 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
else
hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn);
- if (udata && (udata->outlen >= sizeof(resp)) &&
- (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) {
-
- /* indicate kernel supports rq record db */
- resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB;
- ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (udata) {
+ ret = ib_copy_to_udata(udata, &resp,
+ min(udata->outlen, sizeof(resp)));
if (ret)
goto err_qp;
+ }
- hr_qp->rdb_en = 1;
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
+ ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp);
+ if (ret)
+ goto err_qp;
}
+
hr_qp->event = hns_roce_ib_qp_event;
return 0;
@@ -814,9 +836,7 @@ err_wrid:
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
(udata->outlen >= sizeof(resp)) &&
hns_roce_qp_has_rq(init_attr))
- hns_roce_db_unmap_user(
- to_hr_ucontext(ib_pd->uobject->context),
- &hr_qp->rdb);
+ hns_roce_db_unmap_user(uctx, &hr_qp->rdb);
} else {
kfree(hr_qp->sq.wrid);
kfree(hr_qp->rq.wrid);
@@ -828,9 +848,7 @@ err_sq_dbmap:
(udata->inlen >= sizeof(ucmd)) &&
(udata->outlen >= sizeof(resp)) &&
hns_roce_qp_has_sq(init_attr))
- hns_roce_db_unmap_user(
- to_hr_ucontext(ib_pd->uobject->context),
- &hr_qp->sdb);
+ hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
err_mtt:
hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
@@ -969,7 +987,9 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
(attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
if (hr_qp->sdb_en == 1) {
hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
- hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
+
+ if (hr_qp->rdb_en == 1)
+ hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
} else {
dev_warn(dev, "flush cqe is not supported in userspace!\n");
goto out;
@@ -1133,6 +1153,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
int reserved_from_bot;
int ret;
+ mutex_init(&qp_table->scc_mutex);
spin_lock_init(&qp_table->lock);
INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC);
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index 12deacf442cf..a8ee2f6da967 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -78,9 +78,9 @@ static int hns_roce_hw2sw_srq(struct hns_roce_dev *dev,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
-int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn, u16 xrcd,
- struct hns_roce_mtt *hr_mtt, u64 db_rec_addr,
- struct hns_roce_srq *srq)
+static int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn,
+ u16 xrcd, struct hns_roce_mtt *hr_mtt,
+ u64 db_rec_addr, struct hns_roce_srq *srq)
{
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
struct hns_roce_cmd_mailbox *mailbox;
@@ -155,7 +155,8 @@ err_out:
return ret;
}
-void hns_roce_srq_free(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
+static void hns_roce_srq_free(struct hns_roce_dev *hr_dev,
+ struct hns_roce_srq *srq)
{
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
int ret;
@@ -253,8 +254,8 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
goto err_srq;
}
- srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
- srq_buf_size, 0, 0);
+ srq->umem =
+ ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0, 0);
if (IS_ERR(srq->umem)) {
ret = PTR_ERR(srq->umem);
goto err_srq;
@@ -281,8 +282,7 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
goto err_srq_mtt;
/* config index queue BA */
- srq->idx_que.umem = ib_umem_get(pd->uobject->context,
- ucmd.que_addr,
+ srq->idx_que.umem = ib_umem_get(udata, ucmd.que_addr,
srq->idx_que.buf_size, 0, 0);
if (IS_ERR(srq->idx_que.umem)) {
dev_err(hr_dev->dev,
diff --git a/drivers/infiniband/hw/i40iw/Makefile b/drivers/infiniband/hw/i40iw/Makefile
index 5a8a7a3f28ae..8942f8229945 100644
--- a/drivers/infiniband/hw/i40iw/Makefile
+++ b/drivers/infiniband/hw/i40iw/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-ccflags-y := -Idrivers/net/ethernet/intel/i40e
+ccflags-y := -I $(srctree)/drivers/net/ethernet/intel/i40e
obj-$(CONFIG_INFINIBAND_I40IW) += i40iw.o
diff --git a/drivers/infiniband/hw/i40iw/i40iw_osdep.h b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
index f27be3e7830b..d474aad62a81 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_osdep.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
@@ -211,7 +211,7 @@ enum i40iw_status_code i40iw_hw_manage_vf_pble_bp(struct i40iw_device *iwdev,
struct i40iw_sc_vsi;
void i40iw_hw_stats_start_timer(struct i40iw_sc_vsi *vsi);
void i40iw_hw_stats_stop_timer(struct i40iw_sc_vsi *vsi);
-#define i40iw_mmiowb() mmiowb()
+#define i40iw_mmiowb() do { } while (0)
void i40iw_wr32(struct i40iw_hw *hw, u32 reg, u32 value);
u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg);
#endif /* _I40IW_OSDEP_H_ */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 59e978141ad4..337410f40860 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -173,7 +173,12 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
rcu_read_lock();
in = __in_dev_get_rcu(upper_dev);
- local_ipaddr = ntohl(in->ifa_list->ifa_address);
+
+ if (!in->ifa_list)
+ local_ipaddr = 0;
+ else
+ local_ipaddr = ntohl(in->ifa_list->ifa_address);
+
rcu_read_unlock();
} else {
local_ipaddr = ntohl(ifa->ifa_address);
@@ -185,6 +190,11 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
case NETDEV_UP:
/* Fall through */
case NETDEV_CHANGEADDR:
+
+ /* Just skip if no need to handle ARP cache */
+ if (!local_ipaddr)
+ break;
+
i40iw_manage_arp_cache(iwdev,
netdev->dev_addr,
&local_ipaddr,
@@ -601,7 +611,6 @@ void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev)
if (!atomic_dec_and_test(&iwpd->usecount))
return;
i40iw_free_resource(iwdev, iwdev->allocated_pds, iwpd->sc_pd.pd_id);
- kfree(iwpd);
}
/**
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 0b675b0742c2..a8352e3ca23d 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -45,6 +45,7 @@
#include <rdma/iw_cm.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_umem.h>
+#include <rdma/uverbs_ioctl.h>
#include "i40iw.h"
/**
@@ -120,78 +121,55 @@ static int i40iw_query_port(struct ib_device *ibdev,
/**
* i40iw_alloc_ucontext - Allocate the user context data structure
- * @ibdev: device pointer from stack
+ * @uctx: Uverbs context pointer from stack
* @udata: user data
*
* This keeps track of all objects associated with a particular
* user-mode client.
*/
-static struct ib_ucontext *i40iw_alloc_ucontext(struct ib_device *ibdev,
- struct ib_udata *udata)
+static int i40iw_alloc_ucontext(struct ib_ucontext *uctx,
+ struct ib_udata *udata)
{
+ struct ib_device *ibdev = uctx->device;
struct i40iw_device *iwdev = to_iwdev(ibdev);
struct i40iw_alloc_ucontext_req req;
- struct i40iw_alloc_ucontext_resp uresp;
- struct i40iw_ucontext *ucontext;
+ struct i40iw_alloc_ucontext_resp uresp = {};
+ struct i40iw_ucontext *ucontext = to_ucontext(uctx);
if (ib_copy_from_udata(&req, udata, sizeof(req)))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
if (req.userspace_ver < 4 || req.userspace_ver > I40IW_ABI_VER) {
i40iw_pr_err("Unsupported provider library version %u.\n", req.userspace_ver);
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
- memset(&uresp, 0, sizeof(uresp));
uresp.max_qps = iwdev->max_qp;
uresp.max_pds = iwdev->max_pd;
uresp.wq_size = iwdev->max_qp_wr * 2;
uresp.kernel_ver = req.userspace_ver;
- ucontext = kzalloc(sizeof(*ucontext), GFP_KERNEL);
- if (!ucontext)
- return ERR_PTR(-ENOMEM);
-
ucontext->iwdev = iwdev;
ucontext->abi_ver = req.userspace_ver;
- if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
- kfree(ucontext);
- return ERR_PTR(-EFAULT);
- }
+ if (ib_copy_to_udata(udata, &uresp, sizeof(uresp)))
+ return -EFAULT;
INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
spin_lock_init(&ucontext->cq_reg_mem_list_lock);
INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
spin_lock_init(&ucontext->qp_reg_mem_list_lock);
- return &ucontext->ibucontext;
+ return 0;
}
/**
* i40iw_dealloc_ucontext - deallocate the user context data structure
* @context: user context created during alloc
*/
-static int i40iw_dealloc_ucontext(struct ib_ucontext *context)
+static void i40iw_dealloc_ucontext(struct ib_ucontext *context)
{
- struct i40iw_ucontext *ucontext = to_ucontext(context);
- unsigned long flags;
-
- spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
- if (!list_empty(&ucontext->cq_reg_mem_list)) {
- spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
- return -EBUSY;
- }
- spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
- spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
- if (!list_empty(&ucontext->qp_reg_mem_list)) {
- spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
- return -EBUSY;
- }
- spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
-
- kfree(ucontext);
- return 0;
+ return;
}
/**
@@ -312,16 +290,15 @@ static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_
/**
* i40iw_alloc_pd - allocate protection domain
- * @ibdev: device pointer from stack
+ * @pd: PD pointer
* @context: user context created during alloc
* @udata: user data
*/
-static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata)
+static int i40iw_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
+ struct ib_udata *udata)
{
- struct i40iw_pd *iwpd;
- struct i40iw_device *iwdev = to_iwdev(ibdev);
+ struct i40iw_pd *iwpd = to_iwpd(pd);
+ struct i40iw_device *iwdev = to_iwdev(pd->device);
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
struct i40iw_alloc_pd_resp uresp;
struct i40iw_sc_pd *sc_pd;
@@ -330,19 +307,13 @@ static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev,
int err;
if (iwdev->closing)
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
err = i40iw_alloc_resource(iwdev, iwdev->allocated_pds,
iwdev->max_pd, &pd_id, &iwdev->next_pd);
if (err) {
i40iw_pr_err("alloc resource failed\n");
- return ERR_PTR(err);
- }
-
- iwpd = kzalloc(sizeof(*iwpd), GFP_KERNEL);
- if (!iwpd) {
- err = -ENOMEM;
- goto free_res;
+ return err;
}
sc_pd = &iwpd->sc_pd;
@@ -361,25 +332,23 @@ static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev,
}
i40iw_add_pdusecount(iwpd);
- return &iwpd->ibpd;
+ return 0;
+
error:
- kfree(iwpd);
-free_res:
i40iw_free_resource(iwdev, iwdev->allocated_pds, pd_id);
- return ERR_PTR(err);
+ return err;
}
/**
* i40iw_dealloc_pd - deallocate pd
* @ibpd: ptr of pd to be deallocated
*/
-static int i40iw_dealloc_pd(struct ib_pd *ibpd)
+static void i40iw_dealloc_pd(struct ib_pd *ibpd)
{
struct i40iw_pd *iwpd = to_iwpd(ibpd);
struct i40iw_device *iwdev = to_iwdev(ibpd->device);
i40iw_rem_pdusecount(iwpd, iwdev);
- return 0;
}
/**
@@ -565,7 +534,8 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
struct i40iw_device *iwdev = to_iwdev(ibpd->device);
struct i40iw_cqp *iwcqp = &iwdev->cqp;
struct i40iw_qp *iwqp;
- struct i40iw_ucontext *ucontext;
+ struct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct i40iw_ucontext, ibucontext);
struct i40iw_create_qp_req req;
struct i40iw_create_qp_resp uresp;
u32 qp_num = 0;
@@ -674,7 +644,6 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
}
iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
iwqp->user_mode = 1;
- ucontext = to_ucontext(ibpd->uobject->context);
if (req.user_wqe_buffers) {
struct i40iw_pbl *iwpbl;
@@ -1369,32 +1338,29 @@ static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
{
struct ib_umem *region = iwmr->region;
struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
- int chunk_pages, entry, i;
struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
struct i40iw_pble_info *pinfo;
- struct scatterlist *sg;
+ struct sg_dma_page_iter sg_iter;
u64 pg_addr = 0;
u32 idx = 0;
+ bool first_pg = true;
pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf;
- for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
- chunk_pages = sg_dma_len(sg) >> region->page_shift;
- if ((iwmr->type == IW_MEMREG_TYPE_QP) &&
- !iwpbl->qp_mr.sq_page)
- iwpbl->qp_mr.sq_page = sg_page(sg);
- for (i = 0; i < chunk_pages; i++) {
- pg_addr = sg_dma_address(sg) +
- (i << region->page_shift);
-
- if ((entry + i) == 0)
- *pbl = cpu_to_le64(pg_addr & iwmr->page_msk);
- else if (!(pg_addr & ~iwmr->page_msk))
- *pbl = cpu_to_le64(pg_addr);
- else
- continue;
- pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);
- }
+ if (iwmr->type == IW_MEMREG_TYPE_QP)
+ iwpbl->qp_mr.sq_page = sg_page(region->sg_head.sgl);
+
+ for_each_sg_dma_page (region->sg_head.sgl, &sg_iter, region->nmap, 0) {
+ pg_addr = sg_page_iter_dma_address(&sg_iter);
+ if (first_pg)
+ *pbl = cpu_to_le64(pg_addr & iwmr->page_msk);
+ else if (!(pg_addr & ~iwmr->page_msk))
+ *pbl = cpu_to_le64(pg_addr);
+ else
+ continue;
+
+ first_pg = false;
+ pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);
}
}
@@ -1831,7 +1797,8 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
{
struct i40iw_pd *iwpd = to_iwpd(pd);
struct i40iw_device *iwdev = to_iwdev(pd->device);
- struct i40iw_ucontext *ucontext;
+ struct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct i40iw_ucontext, ibucontext);
struct i40iw_pble_alloc *palloc;
struct i40iw_pbl *iwpbl;
struct i40iw_mr *iwmr;
@@ -1852,7 +1819,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
if (length > I40IW_MAX_MR_SIZE)
return ERR_PTR(-EINVAL);
- region = ib_umem_get(pd->uobject->context, start, length, acc, 0);
+ region = ib_umem_get(udata, start, length, acc, 0);
if (IS_ERR(region))
return (struct ib_mr *)region;
@@ -1872,7 +1839,6 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
iwmr->region = region;
iwmr->ibmr.pd = pd;
iwmr->ibmr.device = pd->device;
- ucontext = to_ucontext(pd->uobject->context);
iwmr->page_size = PAGE_SIZE;
iwmr->page_msk = PAGE_MASK;
@@ -2139,9 +2105,8 @@ static int i40iw_dereg_mr(struct ib_mr *ib_mr)
static ssize_t hw_rev_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct i40iw_ib_device *iwibdev = container_of(dev,
- struct i40iw_ib_device,
- ibdev.dev);
+ struct i40iw_ib_device *iwibdev =
+ rdma_device_to_drv_device(dev, struct i40iw_ib_device, ibdev);
u32 hw_rev = iwibdev->iwdev->sc_dev.hw_rev;
return sprintf(buf, "%x\n", hw_rev);
@@ -2751,6 +2716,8 @@ static const struct ib_device_ops i40iw_dev_ops = {
.query_qp = i40iw_query_qp,
.reg_user_mr = i40iw_reg_user_mr,
.req_notify_cq = i40iw_req_notify_cq,
+ INIT_RDMA_OBJ_SIZE(ib_pd, i40iw_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, i40iw_ucontext, ibucontext),
};
/**
@@ -2763,7 +2730,7 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev
struct net_device *netdev = iwdev->netdev;
struct pci_dev *pcidev = (struct pci_dev *)iwdev->hw.dev_context;
- iwibdev = (struct i40iw_ib_device *)ib_alloc_device(sizeof(*iwibdev));
+ iwibdev = ib_alloc_device(i40iw_ib_device, ibdev);
if (!iwibdev) {
i40iw_pr_err("iwdev == NULL\n");
return NULL;
@@ -2868,7 +2835,7 @@ int i40iw_register_rdma_device(struct i40iw_device *iwdev)
iwibdev = iwdev->iwibdev;
rdma_set_device_sysfs_group(&iwibdev->ibdev, &i40iw_attr_group);
iwibdev->ibdev.driver_id = RDMA_DRIVER_I40IW;
- ret = ib_register_device(&iwibdev->ibdev, "i40iw%d", NULL);
+ ret = ib_register_device(&iwibdev->ibdev, "i40iw%d");
if (ret)
goto error;
diff --git a/drivers/infiniband/hw/mlx4/Kconfig b/drivers/infiniband/hw/mlx4/Kconfig
index 4e9936731867..fc01deac1d3c 100644
--- a/drivers/infiniband/hw/mlx4/Kconfig
+++ b/drivers/infiniband/hw/mlx4/Kconfig
@@ -1,7 +1,6 @@
config MLX4_INFINIBAND
tristate "Mellanox ConnectX HCA support"
depends on NETDEVICES && ETHERNET && PCI && INET
- depends on INFINIBAND_USER_ACCESS || !INFINIBAND_USER_ACCESS
select NET_VENDOR_MELLANOX
select MLX4_CORE
---help---
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index 782499abcd98..2a0b59a4b6eb 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -804,8 +804,8 @@ void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev)
unsigned long flags;
for (i = 0 ; i < dev->num_ports; i++) {
- cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work);
det = &sriov->alias_guid.ports_guid[i];
+ cancel_delayed_work_sync(&det->alias_guid_work);
spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
while (!list_empty(&det->cb_list)) {
cb_ctx = list_entry(det->cb_list.next,
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index fedaf8260105..8c79a480f2b7 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -39,7 +39,7 @@
#include "mlx4_ib.h"
-#define CM_CLEANUP_CACHE_TIMEOUT (5 * HZ)
+#define CM_CLEANUP_CACHE_TIMEOUT (30 * HZ)
struct id_map_entry {
struct rb_node node;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 43512347b4f0..03ac72339dd2 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -134,16 +134,16 @@ static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf);
}
-static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context,
- struct mlx4_ib_cq_buf *buf, struct ib_umem **umem,
- u64 buf_addr, int cqe)
+static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_udata *udata,
+ struct mlx4_ib_cq_buf *buf,
+ struct ib_umem **umem, u64 buf_addr, int cqe)
{
int err;
int cqe_size = dev->dev->caps.cqe_size;
int shift;
int n;
- *umem = ib_umem_get(context, buf_addr, cqe * cqe_size,
+ *umem = ib_umem_get(udata, buf_addr, cqe * cqe_size,
IB_ACCESS_LOCAL_WRITE, 1);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
@@ -190,7 +190,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
if (attr->flags & ~CQ_CREATE_FLAGS_SUPPORTED)
return ERR_PTR(-EINVAL);
- cq = kmalloc(sizeof *cq, GFP_KERNEL);
+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);
if (!cq)
return ERR_PTR(-ENOMEM);
@@ -213,14 +213,13 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
}
buf_addr = (void *)(unsigned long)ucmd.buf_addr;
-
- err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem,
+ err = mlx4_ib_get_cq_umem(dev, udata, &cq->buf, &cq->umem,
ucmd.buf_addr, entries);
if (err)
goto err_cq;
- err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
- &cq->db);
+ err = mlx4_ib_db_map_user(to_mucontext(context), udata,
+ ucmd.db_addr, &cq->db);
if (err)
goto err_mtt;
@@ -336,7 +335,7 @@ static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq
if (!cq->resize_buf)
return -ENOMEM;
- err = mlx4_ib_get_cq_umem(dev, cq->umem->context, &cq->resize_buf->buf,
+ err = mlx4_ib_get_cq_umem(dev, udata, &cq->resize_buf->buf,
&cq->resize_umem, ucmd.buf_addr, entries);
if (err) {
kfree(cq->resize_buf);
diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c
index c51740986367..3aab71b29ce8 100644
--- a/drivers/infiniband/hw/mlx4/doorbell.c
+++ b/drivers/infiniband/hw/mlx4/doorbell.c
@@ -41,7 +41,8 @@ struct mlx4_ib_user_db_page {
int refcnt;
};
-int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
+int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context,
+ struct ib_udata *udata, unsigned long virt,
struct mlx4_db *db)
{
struct mlx4_ib_user_db_page *page;
@@ -61,8 +62,7 @@ int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
page->user_virt = (virt & PAGE_MASK);
page->refcnt = 0;
- page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
- PAGE_SIZE, 0, 0);
+ page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0);
if (IS_ERR(page->umem)) {
err = PTR_ERR(page->umem);
kfree(page);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 1f15ec3e2b83..733f7bbd5901 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1076,17 +1076,18 @@ out:
return err;
}
-static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
- struct ib_udata *udata)
+static int mlx4_ib_alloc_ucontext(struct ib_ucontext *uctx,
+ struct ib_udata *udata)
{
+ struct ib_device *ibdev = uctx->device;
struct mlx4_ib_dev *dev = to_mdev(ibdev);
- struct mlx4_ib_ucontext *context;
+ struct mlx4_ib_ucontext *context = to_mucontext(uctx);
struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
struct mlx4_ib_alloc_ucontext_resp resp;
int err;
if (!dev->ib_active)
- return ERR_PTR(-EAGAIN);
+ return -EAGAIN;
if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
resp_v3.qp_tab_size = dev->dev->caps.num_qps;
@@ -1100,15 +1101,9 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
resp.cqe_size = dev->dev->caps.cqe_size;
}
- context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
- return ERR_PTR(-ENOMEM);
-
err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
- if (err) {
- kfree(context);
- return ERR_PTR(err);
- }
+ if (err)
+ return err;
INIT_LIST_HEAD(&context->db_page_list);
mutex_init(&context->db_page_mutex);
@@ -1123,21 +1118,17 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
if (err) {
mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
- kfree(context);
- return ERR_PTR(-EFAULT);
+ return -EFAULT;
}
- return &context->ibucontext;
+ return err;
}
-static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
+static void mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
{
struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
- kfree(context);
-
- return 0;
}
static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
@@ -1186,38 +1177,27 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
}
}
-static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata)
+static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
+ struct ib_udata *udata)
{
- struct mlx4_ib_pd *pd;
+ struct mlx4_ib_pd *pd = to_mpd(ibpd);
+ struct ib_device *ibdev = ibpd->device;
int err;
- pd = kzalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd)
- return ERR_PTR(-ENOMEM);
-
err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
- if (err) {
- kfree(pd);
- return ERR_PTR(err);
- }
+ if (err)
+ return err;
- if (context)
- if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
- mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
- kfree(pd);
- return ERR_PTR(-EFAULT);
- }
- return &pd->ibpd;
+ if (context && ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) {
+ mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
+ return -EFAULT;
+ }
+ return 0;
}
-static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
+static void mlx4_ib_dealloc_pd(struct ib_pd *pd)
{
mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
- kfree(pd);
-
- return 0;
}
static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
@@ -2043,7 +2023,7 @@ static ssize_t hca_type_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct mlx4_ib_dev *dev =
- container_of(device, struct mlx4_ib_dev, ib_dev.dev);
+ rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
}
static DEVICE_ATTR_RO(hca_type);
@@ -2052,7 +2032,7 @@ static ssize_t hw_rev_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct mlx4_ib_dev *dev =
- container_of(device, struct mlx4_ib_dev, ib_dev.dev);
+ rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
return sprintf(buf, "%x\n", dev->dev->rev_id);
}
static DEVICE_ATTR_RO(hw_rev);
@@ -2061,7 +2041,8 @@ static ssize_t board_id_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct mlx4_ib_dev *dev =
- container_of(device, struct mlx4_ib_dev, ib_dev.dev);
+ rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
+
return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
dev->dev->board_id);
}
@@ -2579,6 +2560,8 @@ static const struct ib_device_ops mlx4_ib_dev_ops = {
.req_notify_cq = mlx4_ib_arm_cq,
.rereg_user_mr = mlx4_ib_rereg_user_mr,
.resize_cq = mlx4_ib_resize_cq,
+ INIT_RDMA_OBJ_SIZE(ib_pd, mlx4_ib_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx4_ib_ucontext, ibucontext),
};
static const struct ib_device_ops mlx4_ib_dev_wq_ops = {
@@ -2634,7 +2617,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
if (num_ports == 0)
return NULL;
- ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
+ ibdev = ib_alloc_device(mlx4_ib_dev, ib_dev);
if (!ibdev) {
dev_err(&dev->persist->pdev->dev,
"Device struct alloc failed\n");
@@ -2856,7 +2839,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
rdma_set_device_sysfs_group(&ibdev->ib_dev, &mlx4_attr_group);
ibdev->ib_dev.driver_id = RDMA_DRIVER_MLX4;
- if (ib_register_device(&ibdev->ib_dev, "mlx4_%d", NULL))
+ if (ib_register_device(&ibdev->ib_dev, "mlx4_%d"))
goto err_diag_counters;
if (mlx4_ib_mad_init(ibdev))
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index e491f3eda6e7..60dc1347c5ab 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -722,7 +722,8 @@ static inline u8 mlx4_ib_bond_next_port(struct mlx4_ib_dev *dev)
int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev);
void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev);
-int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
+int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context,
+ struct ib_udata *udata, unsigned long virt,
struct mlx4_db *db);
void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db);
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index c7c85c22e4e3..395379a480cb 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -367,7 +367,7 @@ end:
return block_shift;
}
-static struct ib_umem *mlx4_get_umem_mr(struct ib_ucontext *context, u64 start,
+static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start,
u64 length, u64 virt_addr,
int access_flags)
{
@@ -398,7 +398,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_ucontext *context, u64 start,
up_read(&current->mm->mmap_sem);
}
- return ib_umem_get(context, start, length, access_flags, 0);
+ return ib_umem_get(udata, start, length, access_flags, 0);
}
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
@@ -415,8 +415,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
- mr->umem = mlx4_get_umem_mr(pd->uobject->context, start, length,
- virt_addr, access_flags);
+ mr->umem =
+ mlx4_get_umem_mr(udata, start, length, virt_addr, access_flags);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
goto err_free;
@@ -505,9 +505,8 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
ib_umem_release(mmr->umem);
- mmr->umem =
- mlx4_get_umem_mr(mr->uobject->context, start, length,
- virt_addr, mr_access_flags);
+ mmr->umem = mlx4_get_umem_mr(udata, start, length, virt_addr,
+ mr_access_flags);
if (IS_ERR(mmr->umem)) {
err = PTR_ERR(mmr->umem);
/* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 971e9a9ebdaf..9426936460f8 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -41,6 +41,7 @@
#include <rdma/ib_pack.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_mad.h>
+#include <rdma/uverbs_ioctl.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/qp.h>
@@ -52,7 +53,8 @@ static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq,
struct mlx4_ib_cq *recv_cq);
static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq,
struct mlx4_ib_cq *recv_cq);
-static int _mlx4_ib_modify_wq(struct ib_wq *ibwq, enum ib_wq_state new_state);
+static int _mlx4_ib_modify_wq(struct ib_wq *ibwq, enum ib_wq_state new_state,
+ struct ib_udata *udata);
enum {
MLX4_IB_ACK_REQ_FREQ = 8,
@@ -863,6 +865,8 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
int err;
struct mlx4_ib_sqp *sqp = NULL;
struct mlx4_ib_qp *qp;
+ struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mlx4_ib_ucontext, ibucontext);
enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type;
struct mlx4_ib_cq *mcq;
unsigned long flags;
@@ -1015,9 +1019,11 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
(qp->sq.wqe_cnt << qp->sq.wqe_shift);
}
- qp->umem = ib_umem_get(pd->uobject->context,
- (src == MLX4_IB_QP_SRC) ? ucmd.qp.buf_addr :
- ucmd.wq.buf_addr, qp->buf_size, 0, 0);
+ qp->umem =
+ ib_umem_get(udata,
+ (src == MLX4_IB_QP_SRC) ? ucmd.qp.buf_addr :
+ ucmd.wq.buf_addr,
+ qp->buf_size, 0, 0);
if (IS_ERR(qp->umem)) {
err = PTR_ERR(qp->umem);
goto err;
@@ -1035,9 +1041,11 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
goto err_mtt;
if (qp_has_rq(init_attr)) {
- err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
+ err = mlx4_ib_db_map_user(
+ context, udata,
(src == MLX4_IB_QP_SRC) ? ucmd.qp.db_addr :
- ucmd.wq.db_addr, &qp->db);
+ ucmd.wq.db_addr,
+ &qp->db);
if (err)
goto err_mtt;
}
@@ -1108,8 +1116,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
}
}
} else if (src == MLX4_IB_RWQ_SRC) {
- err = mlx4_ib_alloc_wqn(to_mucontext(pd->uobject->context), qp,
- range_size, &qpn);
+ err = mlx4_ib_alloc_wqn(context, qp, range_size, &qpn);
if (err)
goto err_wrid;
} else {
@@ -1180,8 +1187,7 @@ err_qpn:
if (qp->flags & MLX4_IB_QP_NETIF)
mlx4_ib_steer_qp_free(dev, qpn, 1);
else if (src == MLX4_IB_RWQ_SRC)
- mlx4_ib_release_wqn(to_mucontext(pd->uobject->context),
- qp, 0);
+ mlx4_ib_release_wqn(context, qp, 0);
else
mlx4_qp_release_range(dev->dev, qpn, 1);
}
@@ -1191,7 +1197,7 @@ err_proxy:
err_wrid:
if (udata) {
if (qp_has_rq(init_attr))
- mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db);
+ mlx4_ib_db_unmap_user(context, &qp->db);
} else {
kvfree(qp->sq.wrid);
kvfree(qp->rq.wrid);
@@ -1938,7 +1944,8 @@ static u8 gid_type_to_qpc(enum ib_gid_type gid_type)
* Go over all RSS QP's childes (WQs) and apply their HW state according to
* their logic state if the RSS QP is the first RSS QP associated for the WQ.
*/
-static int bringup_rss_rwqs(struct ib_rwq_ind_table *ind_tbl, u8 port_num)
+static int bringup_rss_rwqs(struct ib_rwq_ind_table *ind_tbl, u8 port_num,
+ struct ib_udata *udata)
{
int err = 0;
int i;
@@ -1962,7 +1969,7 @@ static int bringup_rss_rwqs(struct ib_rwq_ind_table *ind_tbl, u8 port_num)
}
wq->port = port_num;
if ((wq->rss_usecnt == 0) && (ibwq->state == IB_WQS_RDY)) {
- err = _mlx4_ib_modify_wq(ibwq, IB_WQS_RDY);
+ err = _mlx4_ib_modify_wq(ibwq, IB_WQS_RDY, udata);
if (err) {
mutex_unlock(&wq->mutex);
break;
@@ -1984,7 +1991,8 @@ static int bringup_rss_rwqs(struct ib_rwq_ind_table *ind_tbl, u8 port_num)
if ((wq->rss_usecnt == 1) &&
(ibwq->state == IB_WQS_RDY))
- if (_mlx4_ib_modify_wq(ibwq, IB_WQS_RESET))
+ if (_mlx4_ib_modify_wq(ibwq, IB_WQS_RESET,
+ udata))
pr_warn("failed to reverse WQN=0x%06x\n",
ibwq->wq_num);
wq->rss_usecnt--;
@@ -1996,7 +2004,8 @@ static int bringup_rss_rwqs(struct ib_rwq_ind_table *ind_tbl, u8 port_num)
return err;
}
-static void bring_down_rss_rwqs(struct ib_rwq_ind_table *ind_tbl)
+static void bring_down_rss_rwqs(struct ib_rwq_ind_table *ind_tbl,
+ struct ib_udata *udata)
{
int i;
@@ -2007,7 +2016,7 @@ static void bring_down_rss_rwqs(struct ib_rwq_ind_table *ind_tbl)
mutex_lock(&wq->mutex);
if ((wq->rss_usecnt == 1) && (ibwq->state == IB_WQS_RDY))
- if (_mlx4_ib_modify_wq(ibwq, IB_WQS_RESET))
+ if (_mlx4_ib_modify_wq(ibwq, IB_WQS_RESET, udata))
pr_warn("failed to reverse WQN=%x\n",
ibwq->wq_num);
wq->rss_usecnt--;
@@ -2039,9 +2048,10 @@ static void fill_qp_rss_context(struct mlx4_qp_context *context,
static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
const struct ib_qp_attr *attr, int attr_mask,
- enum ib_qp_state cur_state, enum ib_qp_state new_state)
+ enum ib_qp_state cur_state,
+ enum ib_qp_state new_state,
+ struct ib_udata *udata)
{
- struct ib_uobject *ibuobject;
struct ib_srq *ibsrq;
const struct ib_gid_attr *gid_attr = NULL;
struct ib_rwq_ind_table *rwq_ind_tbl;
@@ -2050,6 +2060,8 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
struct mlx4_ib_qp *qp;
struct mlx4_ib_pd *pd;
struct mlx4_ib_cq *send_cq, *recv_cq;
+ struct mlx4_ib_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct mlx4_ib_ucontext, ibucontext);
struct mlx4_qp_context *context;
enum mlx4_qp_optpar optpar = 0;
int sqd_event;
@@ -2061,7 +2073,6 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
struct ib_wq *ibwq;
ibwq = (struct ib_wq *)src;
- ibuobject = ibwq->uobject;
ibsrq = NULL;
rwq_ind_tbl = NULL;
qp_type = IB_QPT_RAW_PACKET;
@@ -2072,7 +2083,6 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
struct ib_qp *ibqp;
ibqp = (struct ib_qp *)src;
- ibuobject = ibqp->uobject;
ibsrq = ibqp->srq;
rwq_ind_tbl = ibqp->rwq_ind_tbl;
qp_type = ibqp->qp_type;
@@ -2157,11 +2167,9 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
context->param3 |= cpu_to_be32(1 << 30);
}
- if (ibuobject)
+ if (ucontext)
context->usr_page = cpu_to_be32(
- mlx4_to_hw_uar_index(dev->dev,
- to_mucontext(ibuobject->context)
- ->uar.index));
+ mlx4_to_hw_uar_index(dev->dev, ucontext->uar.index));
else
context->usr_page = cpu_to_be32(
mlx4_to_hw_uar_index(dev->dev, dev->priv_uar.index));
@@ -2293,7 +2301,7 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn);
/* Set "fast registration enabled" for all kernel QPs */
- if (!ibuobject)
+ if (!ucontext)
context->params1 |= cpu_to_be32(1 << 11);
if (attr_mask & IB_QP_RNR_RETRY) {
@@ -2430,7 +2438,7 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
else
sqd_event = 0;
- if (!ibuobject &&
+ if (!ucontext &&
cur_state == IB_QPS_RESET &&
new_state == IB_QPS_INIT)
context->rlkey_roce_mode |= (1 << 4);
@@ -2441,7 +2449,7 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
* headroom is stamped so that the hardware doesn't start
* processing stale work requests.
*/
- if (!ibuobject &&
+ if (!ucontext &&
cur_state == IB_QPS_RESET &&
new_state == IB_QPS_INIT) {
struct mlx4_wqe_ctrl_seg *ctrl;
@@ -2505,7 +2513,7 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
* entries and reinitialize the QP.
*/
if (new_state == IB_QPS_RESET) {
- if (!ibuobject) {
+ if (!ucontext) {
mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
ibsrq ? to_msrq(ibsrq) : NULL);
if (send_cq != recv_cq)
@@ -2731,16 +2739,17 @@ static int _mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
if (ibqp->rwq_ind_tbl && (new_state == IB_QPS_INIT)) {
- err = bringup_rss_rwqs(ibqp->rwq_ind_tbl, attr->port_num);
+ err = bringup_rss_rwqs(ibqp->rwq_ind_tbl, attr->port_num,
+ udata);
if (err)
goto out;
}
err = __mlx4_ib_modify_qp(ibqp, MLX4_IB_QP_SRC, attr, attr_mask,
- cur_state, new_state);
+ cur_state, new_state, udata);
if (ibqp->rwq_ind_tbl && err)
- bring_down_rss_rwqs(ibqp->rwq_ind_tbl);
+ bring_down_rss_rwqs(ibqp->rwq_ind_tbl, udata);
if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT))
attr->port_num = 1;
@@ -3735,12 +3744,6 @@ out:
writel_relaxed(qp->doorbell_qpn,
to_mdev(ibqp->device)->uar_map + MLX4_SEND_DOORBELL);
- /*
- * Make sure doorbells don't leak out of SQ spinlock
- * and reach the HCA out of order.
- */
- mmiowb();
-
stamp_send_wqe(qp, ind + qp->sq_spare_wqes - 1);
qp->sq_next_wqe = ind;
@@ -4118,7 +4121,8 @@ static int ib_wq2qp_state(enum ib_wq_state state)
}
}
-static int _mlx4_ib_modify_wq(struct ib_wq *ibwq, enum ib_wq_state new_state)
+static int _mlx4_ib_modify_wq(struct ib_wq *ibwq, enum ib_wq_state new_state,
+ struct ib_udata *udata)
{
struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);
enum ib_qp_state qp_cur_state;
@@ -4142,7 +4146,8 @@ static int _mlx4_ib_modify_wq(struct ib_wq *ibwq, enum ib_wq_state new_state)
attr_mask = IB_QP_PORT;
err = __mlx4_ib_modify_qp(ibwq, MLX4_IB_RWQ_SRC, &attr,
- attr_mask, IB_QPS_RESET, IB_QPS_INIT);
+ attr_mask, IB_QPS_RESET, IB_QPS_INIT,
+ udata);
if (err) {
pr_debug("WQN=0x%06x failed to apply RST->INIT on the HW QP\n",
ibwq->wq_num);
@@ -4154,12 +4159,13 @@ static int _mlx4_ib_modify_wq(struct ib_wq *ibwq, enum ib_wq_state new_state)
attr_mask = 0;
err = __mlx4_ib_modify_qp(ibwq, MLX4_IB_RWQ_SRC, NULL, attr_mask,
- qp_cur_state, qp_new_state);
+ qp_cur_state, qp_new_state, udata);
if (err && (qp_cur_state == IB_QPS_INIT)) {
qp_new_state = IB_QPS_RESET;
if (__mlx4_ib_modify_qp(ibwq, MLX4_IB_RWQ_SRC, NULL,
- attr_mask, IB_QPS_INIT, IB_QPS_RESET)) {
+ attr_mask, IB_QPS_INIT, IB_QPS_RESET,
+ udata)) {
pr_warn("WQN=0x%06x failed with reverting HW's resources failure\n",
ibwq->wq_num);
qp_new_state = IB_QPS_INIT;
@@ -4222,7 +4228,7 @@ int mlx4_ib_modify_wq(struct ib_wq *ibwq, struct ib_wq_attr *wq_attr,
* WQ, so we can apply its port on the WQ.
*/
if (qp->rss_usecnt)
- err = _mlx4_ib_modify_wq(ibwq, new_state);
+ err = _mlx4_ib_modify_wq(ibwq, new_state, udata);
if (!err)
ibwq->state = new_state;
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index 4456f1b8921d..381cf899bcef 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -37,6 +37,7 @@
#include "mlx4_ib.h"
#include <rdma/mlx4-abi.h>
+#include <rdma/uverbs_ioctl.h>
static void *get_wqe(struct mlx4_ib_srq *srq, int n)
{
@@ -73,6 +74,8 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
struct ib_udata *udata)
{
struct mlx4_ib_dev *dev = to_mdev(pd->device);
+ struct mlx4_ib_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct mlx4_ib_ucontext, ibucontext);
struct mlx4_ib_srq *srq;
struct mlx4_wqe_srq_next_seg *next;
struct mlx4_wqe_data_seg *scatter;
@@ -113,8 +116,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
goto err_srq;
}
- srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
- buf_size, 0, 0);
+ srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0, 0);
if (IS_ERR(srq->umem)) {
err = PTR_ERR(srq->umem);
goto err_srq;
@@ -129,8 +131,8 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
if (err)
goto err_mtt;
- err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
- ucmd.db_addr, &srq->db);
+ err = mlx4_ib_db_map_user(ucontext, udata, ucmd.db_addr,
+ &srq->db);
if (err)
goto err_mtt;
} else {
@@ -203,7 +205,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
err_wrid:
if (udata)
- mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
+ mlx4_ib_db_unmap_user(ucontext, &srq->db);
else
kvfree(srq->wrid);
diff --git a/drivers/infiniband/hw/mlx5/Kconfig b/drivers/infiniband/hw/mlx5/Kconfig
index 0440966bc6ec..8d651c05de62 100644
--- a/drivers/infiniband/hw/mlx5/Kconfig
+++ b/drivers/infiniband/hw/mlx5/Kconfig
@@ -1,7 +1,6 @@
config MLX5_INFINIBAND
tristate "Mellanox 5th generation network adapters (ConnectX series) support"
depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
- depends on INFINIBAND_USER_ACCESS || INFINIBAND_USER_ACCESS=n
---help---
This driver provides low-level InfiniBand support for
Mellanox Connect-IB PCI Express host channel adapters (HCAs).
diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c
index 7e4e358a4fd8..8ba439fabf7f 100644
--- a/drivers/infiniband/hw/mlx5/cong.c
+++ b/drivers/infiniband/hw/mlx5/cong.c
@@ -389,19 +389,19 @@ void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num)
dev->port[port_num].dbg_cc_params = NULL;
}
-int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num)
+void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num)
{
struct mlx5_ib_dbg_cc_params *dbg_cc_params;
struct mlx5_core_dev *mdev;
int i;
if (!mlx5_debugfs_root)
- goto out;
+ return;
/* Takes a 1-based port number */
mdev = mlx5_ib_get_native_port_mdev(dev, port_num + 1, NULL);
if (!mdev)
- goto out;
+ return;
if (!MLX5_CAP_GEN(mdev, cc_query_allowed) ||
!MLX5_CAP_GEN(mdev, cc_modify_allowed))
@@ -415,8 +415,6 @@ int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num)
dbg_cc_params->root = debugfs_create_dir("cc_params",
mdev->priv.dbg_root);
- if (!dbg_cc_params->root)
- goto err;
for (i = 0; i < MLX5_IB_DBG_CC_MAX; i++) {
dbg_cc_params->params[i].offset = i;
@@ -427,14 +425,11 @@ int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num)
0600, dbg_cc_params->root,
&dbg_cc_params->params[i],
&dbg_cc_fops);
- if (!dbg_cc_params->params[i].dentry)
- goto err;
}
put_mdev:
mlx5_ib_put_native_port_mdev(dev, port_num + 1);
-out:
- return 0;
+ return;
err:
mlx5_ib_warn(dev, "cong debugfs failure\n");
@@ -445,5 +440,5 @@ err:
* We don't want to fail driver if debugfs failed to initialize,
* so we are not forwarding error to the user.
*/
- return 0;
+ return;
}
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 90f1b0bae5b5..18704e503508 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -187,8 +187,8 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
wqe_ctr = be16_to_cpu(cqe->wqe_counter);
wc->wr_id = srq->wrid[wqe_ctr];
mlx5_ib_free_srq_wqe(srq, wqe_ctr);
- if (msrq && atomic_dec_and_test(&msrq->refcount))
- complete(&msrq->free);
+ if (msrq)
+ mlx5_core_res_put(&msrq->common);
}
} else {
wq = &qp->rq;
@@ -707,15 +707,15 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
*cqe_size = ucmd.cqe_size;
- cq->buf.umem = ib_umem_get(context, ucmd.buf_addr,
- entries * ucmd.cqe_size,
- IB_ACCESS_LOCAL_WRITE, 1);
+ cq->buf.umem =
+ ib_umem_get(udata, ucmd.buf_addr, entries * ucmd.cqe_size,
+ IB_ACCESS_LOCAL_WRITE, 1);
if (IS_ERR(cq->buf.umem)) {
err = PTR_ERR(cq->buf.umem);
return err;
}
- err = mlx5_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
+ err = mlx5_ib_db_map_user(to_mucontext(context), udata, ucmd.db_addr,
&cq->db);
if (err)
goto err_umem;
@@ -1111,7 +1111,6 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
struct ib_umem *umem;
int err;
int npages;
- struct ib_ucontext *context = cq->buf.umem->context;
err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
if (err)
@@ -1124,7 +1123,7 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
return -EINVAL;
- umem = ib_umem_get(context, ucmd.buf_addr,
+ umem = ib_umem_get(udata, ucmd.buf_addr,
(size_t)ucmd.cqe_size * entries,
IB_ACCESS_LOCAL_WRITE, 1);
if (IS_ERR(umem)) {
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 5a588f3cfb1b..9e08df7914aa 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -8,6 +8,7 @@
#include <rdma/uverbs_types.h>
#include <rdma/uverbs_ioctl.h>
#include <rdma/mlx5_user_ioctl_cmds.h>
+#include <rdma/mlx5_user_ioctl_verbs.h>
#include <rdma/ib_umem.h>
#include <rdma/uverbs_std_types.h>
#include <linux/mlx5/driver.h>
@@ -17,12 +18,32 @@
#define UVERBS_MODULE_NAME mlx5_ib
#include <rdma/uverbs_named_ioctl.h>
+enum devx_obj_flags {
+ DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0,
+ DEVX_OBJ_FLAGS_DCT = 1 << 1,
+};
+
+struct devx_async_data {
+ struct mlx5_ib_dev *mdev;
+ struct list_head list;
+ struct ib_uobject *fd_uobj;
+ struct mlx5_async_work cb_work;
+ u16 cmd_out_len;
+ /* must be last field in this structure */
+ struct mlx5_ib_uapi_devx_async_cmd_hdr hdr;
+};
+
#define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in)
struct devx_obj {
struct mlx5_core_dev *mdev;
u64 obj_id;
u32 dinlen; /* destroy inbox length */
u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
+ u32 flags;
+ union {
+ struct mlx5_ib_devx_mr devx_mr;
+ struct mlx5_core_dct core_dct;
+ };
};
struct devx_umem {
@@ -330,7 +351,6 @@ static u64 devx_get_obj_id(const void *in)
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
MLX5_GET(arm_rq_in, in, srq_number));
break;
- case MLX5_CMD_OP_DRAIN_DCT:
case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
MLX5_GET(drain_dct_in, in, dctn));
@@ -601,7 +621,6 @@ static bool devx_is_obj_modify_cmd(const void *in)
case MLX5_CMD_OP_2RST_QP:
case MLX5_CMD_OP_ARM_XRC_SRQ:
case MLX5_CMD_OP_ARM_RQ:
- case MLX5_CMD_OP_DRAIN_DCT:
case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
case MLX5_CMD_OP_ARM_XRQ:
case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
@@ -1011,6 +1030,92 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
}
}
+static int devx_handle_mkey_indirect(struct devx_obj *obj,
+ struct mlx5_ib_dev *dev,
+ void *in, void *out)
+{
+ struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table;
+ struct mlx5_ib_devx_mr *devx_mr = &obj->devx_mr;
+ unsigned long flags;
+ struct mlx5_core_mkey *mkey;
+ void *mkc;
+ u8 key;
+ int err;
+
+ mkey = &devx_mr->mmkey;
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ key = MLX5_GET(mkc, mkc, mkey_7_0);
+ mkey->key = mlx5_idx_to_mkey(
+ MLX5_GET(create_mkey_out, out, mkey_index)) | key;
+ mkey->type = MLX5_MKEY_INDIRECT_DEVX;
+ mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
+ mkey->size = MLX5_GET64(mkc, mkc, len);
+ mkey->pd = MLX5_GET(mkc, mkc, pd);
+ devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
+
+ write_lock_irqsave(&table->lock, flags);
+ err = radix_tree_insert(&table->tree, mlx5_base_mkey(mkey->key),
+ mkey);
+ write_unlock_irqrestore(&table->lock, flags);
+ return err;
+}
+
+static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
+ struct devx_obj *obj,
+ void *in, int in_len)
+{
+ int min_len = MLX5_BYTE_OFF(create_mkey_in, memory_key_mkey_entry) +
+ MLX5_FLD_SZ_BYTES(create_mkey_in,
+ memory_key_mkey_entry);
+ void *mkc;
+ u8 access_mode;
+
+ if (in_len < min_len)
+ return -EINVAL;
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+
+ access_mode = MLX5_GET(mkc, mkc, access_mode_1_0);
+ access_mode |= MLX5_GET(mkc, mkc, access_mode_4_2) << 2;
+
+ if (access_mode == MLX5_MKC_ACCESS_MODE_KLMS ||
+ access_mode == MLX5_MKC_ACCESS_MODE_KSM) {
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
+ obj->flags |= DEVX_OBJ_FLAGS_INDIRECT_MKEY;
+ return 0;
+ }
+
+ MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
+ return 0;
+}
+
+static void devx_free_indirect_mkey(struct rcu_head *rcu)
+{
+ kfree(container_of(rcu, struct devx_obj, devx_mr.rcu));
+}
+
+/* This function to delete from the radix tree needs to be called before
+ * destroying the underlying mkey. Otherwise a race might occur in case that
+ * other thread will get the same mkey before this one will be deleted,
+ * in that case it will fail via inserting to the tree its own data.
+ *
+ * Note:
+ * An error in the destroy is not expected unless there is some other indirect
+ * mkey which points to this one. In a kernel cleanup flow it will be just
+ * destroyed in the iterative destruction call. In a user flow, in case
+ * the application didn't close in the expected order it's its own problem,
+ * the mkey won't be part of the tree, in both cases the kernel is safe.
+ */
+static void devx_cleanup_mkey(struct devx_obj *obj)
+{
+ struct mlx5_mkey_table *table = &obj->mdev->priv.mkey_table;
+ unsigned long flags;
+
+ write_lock_irqsave(&table->lock, flags);
+ radix_tree_delete(&table->tree, mlx5_base_mkey(obj->devx_mr.mmkey.key));
+ write_unlock_irqrestore(&table->lock, flags);
+}
+
static int devx_obj_cleanup(struct ib_uobject *uobject,
enum rdma_remove_reason why)
{
@@ -1018,10 +1123,25 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
struct devx_obj *obj = uobject->object;
int ret;
- ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
+ if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
+ devx_cleanup_mkey(obj);
+
+ if (obj->flags & DEVX_OBJ_FLAGS_DCT)
+ ret = mlx5_core_destroy_dct(obj->mdev, &obj->core_dct);
+ else
+ ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out,
+ sizeof(out));
if (ib_is_destroy_retryable(ret, why, uobject))
return ret;
+ if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
+ struct mlx5_ib_dev *dev = to_mdev(uobject->context->device);
+
+ call_srcu(&dev->mr_srcu, &obj->devx_mr.rcu,
+ devx_free_indirect_mkey);
+ return ret;
+ }
+
kfree(obj);
return ret;
}
@@ -1032,10 +1152,13 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
int cmd_out_len = uverbs_attr_get_len(attrs,
MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT);
+ int cmd_in_len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
void *cmd_out;
struct ib_uobject *uobj = uverbs_attr_get_uobject(
attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
- struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
+ struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
+ &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
struct devx_obj *obj;
@@ -1060,11 +1183,25 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
return -ENOMEM;
MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
- devx_set_umem_valid(cmd_in);
+ if (opcode == MLX5_CMD_OP_CREATE_MKEY) {
+ err = devx_handle_mkey_create(dev, obj, cmd_in, cmd_in_len);
+ if (err)
+ goto obj_free;
+ } else {
+ devx_set_umem_valid(cmd_in);
+ }
+
+ if (opcode == MLX5_CMD_OP_CREATE_DCT) {
+ obj->flags |= DEVX_OBJ_FLAGS_DCT;
+ err = mlx5_core_create_dct(dev->mdev, &obj->core_dct,
+ cmd_in, cmd_in_len,
+ cmd_out, cmd_out_len);
+ } else {
+ err = mlx5_cmd_exec(dev->mdev, cmd_in,
+ cmd_in_len,
+ cmd_out, cmd_out_len);
+ }
- err = mlx5_cmd_exec(dev->mdev, cmd_in,
- uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN),
- cmd_out, cmd_out_len);
if (err)
goto obj_free;
@@ -1074,15 +1211,28 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
&obj_id);
WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
+ if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
+ err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out);
+ if (err)
+ goto obj_destroy;
+ }
+
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
if (err)
- goto obj_destroy;
+ goto err_copy;
obj->obj_id = get_enc_obj_id(opcode, obj_id);
return 0;
+err_copy:
+ if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
+ devx_cleanup_mkey(obj);
obj_destroy:
- mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
+ if (obj->flags & DEVX_OBJ_FLAGS_DCT)
+ mlx5_core_destroy_dct(obj->mdev, &obj->core_dct);
+ else
+ mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out,
+ sizeof(out));
obj_free:
kfree(obj);
return err;
@@ -1096,8 +1246,9 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT);
struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
- struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
- struct mlx5_ib_dev *mdev = to_mdev(uobj->context->device);
+ struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
+ &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
+ struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
void *cmd_out;
int err;
int uid;
@@ -1137,11 +1288,12 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT);
struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
- struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
+ struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
+ &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
void *cmd_out;
int err;
int uid;
- struct mlx5_ib_dev *mdev = to_mdev(uobj->context->device);
+ struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
uid = devx_get_uid(c, cmd_in);
if (uid < 0)
@@ -1168,6 +1320,154 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
cmd_out, cmd_out_len);
}
+struct devx_async_event_queue {
+ spinlock_t lock;
+ wait_queue_head_t poll_wait;
+ struct list_head event_list;
+ atomic_t bytes_in_use;
+ u8 is_destroyed:1;
+};
+
+struct devx_async_cmd_event_file {
+ struct ib_uobject uobj;
+ struct devx_async_event_queue ev_queue;
+ struct mlx5_async_ctx async_ctx;
+};
+
+static void devx_init_event_queue(struct devx_async_event_queue *ev_queue)
+{
+ spin_lock_init(&ev_queue->lock);
+ INIT_LIST_HEAD(&ev_queue->event_list);
+ init_waitqueue_head(&ev_queue->poll_wait);
+ atomic_set(&ev_queue->bytes_in_use, 0);
+ ev_queue->is_destroyed = 0;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct devx_async_cmd_event_file *ev_file;
+
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE);
+ struct mlx5_ib_dev *mdev = to_mdev(uobj->context->device);
+
+ ev_file = container_of(uobj, struct devx_async_cmd_event_file,
+ uobj);
+ devx_init_event_queue(&ev_file->ev_queue);
+ mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx);
+ return 0;
+}
+
+static void devx_query_callback(int status, struct mlx5_async_work *context)
+{
+ struct devx_async_data *async_data =
+ container_of(context, struct devx_async_data, cb_work);
+ struct ib_uobject *fd_uobj = async_data->fd_uobj;
+ struct devx_async_cmd_event_file *ev_file;
+ struct devx_async_event_queue *ev_queue;
+ unsigned long flags;
+
+ ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
+ uobj);
+ ev_queue = &ev_file->ev_queue;
+
+ spin_lock_irqsave(&ev_queue->lock, flags);
+ list_add_tail(&async_data->list, &ev_queue->event_list);
+ spin_unlock_irqrestore(&ev_queue->lock, flags);
+
+ wake_up_interruptible(&ev_queue->poll_wait);
+ fput(fd_uobj->object);
+}
+
+#define MAX_ASYNC_BYTES_IN_USE (1024 * 1024) /* 1MB */
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
+ struct uverbs_attr_bundle *attrs)
+{
+ void *cmd_in = uverbs_attr_get_alloced_ptr(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN);
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_HANDLE);
+ u16 cmd_out_len;
+ struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
+ &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
+ struct ib_uobject *fd_uobj;
+ int err;
+ int uid;
+ struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
+ struct devx_async_cmd_event_file *ev_file;
+ struct devx_async_data *async_data;
+
+ uid = devx_get_uid(c, cmd_in);
+ if (uid < 0)
+ return uid;
+
+ if (!devx_is_obj_query_cmd(cmd_in))
+ return -EINVAL;
+
+ err = uverbs_get_const(&cmd_out_len, attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN);
+ if (err)
+ return err;
+
+ if (!devx_is_valid_obj_id(uobj, cmd_in))
+ return -EINVAL;
+
+ fd_uobj = uverbs_attr_get_uobject(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD);
+ if (IS_ERR(fd_uobj))
+ return PTR_ERR(fd_uobj);
+
+ ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
+ uobj);
+
+ if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) >
+ MAX_ASYNC_BYTES_IN_USE) {
+ atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
+ return -EAGAIN;
+ }
+
+ async_data = kvzalloc(struct_size(async_data, hdr.out_data,
+ cmd_out_len), GFP_KERNEL);
+ if (!async_data) {
+ err = -ENOMEM;
+ goto sub_bytes;
+ }
+
+ err = uverbs_copy_from(&async_data->hdr.wr_id, attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID);
+ if (err)
+ goto free_async;
+
+ async_data->cmd_out_len = cmd_out_len;
+ async_data->mdev = mdev;
+ async_data->fd_uobj = fd_uobj;
+
+ get_file(fd_uobj->object);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
+ err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in,
+ uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN),
+ async_data->hdr.out_data,
+ async_data->cmd_out_len,
+ devx_query_callback, &async_data->cb_work);
+
+ if (err)
+ goto cb_err;
+
+ return 0;
+
+cb_err:
+ fput(fd_uobj->object);
+free_async:
+ kvfree(async_data);
+sub_bytes:
+ atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
+ return err;
+}
+
static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
struct uverbs_attr_bundle *attrs,
struct devx_umem *obj)
@@ -1195,7 +1495,7 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
if (err)
return err;
- obj->umem = ib_umem_get(ucontext, addr, size, access, 0);
+ obj->umem = ib_umem_get(&attrs->driver_udata, addr, size, access, 0);
if (IS_ERR(obj->umem))
return PTR_ERR(obj->umem);
@@ -1252,7 +1552,8 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
struct ib_uobject *uobj = uverbs_attr_get_uobject(
attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
u32 obj_id;
- struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
+ struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
+ &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
int err;
@@ -1313,6 +1614,123 @@ static int devx_umem_cleanup(struct ib_uobject *uobject,
return 0;
}
+static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
+ struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
+ struct devx_async_data *event;
+ int ret = 0;
+ size_t eventsz;
+
+ spin_lock_irq(&ev_queue->lock);
+
+ while (list_empty(&ev_queue->event_list)) {
+ spin_unlock_irq(&ev_queue->lock);
+
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ if (wait_event_interruptible(
+ ev_queue->poll_wait,
+ (!list_empty(&ev_queue->event_list) ||
+ ev_queue->is_destroyed))) {
+ return -ERESTARTSYS;
+ }
+
+ if (list_empty(&ev_queue->event_list) &&
+ ev_queue->is_destroyed)
+ return -EIO;
+
+ spin_lock_irq(&ev_queue->lock);
+ }
+
+ event = list_entry(ev_queue->event_list.next,
+ struct devx_async_data, list);
+ eventsz = event->cmd_out_len +
+ sizeof(struct mlx5_ib_uapi_devx_async_cmd_hdr);
+
+ if (eventsz > count) {
+ spin_unlock_irq(&ev_queue->lock);
+ return -ENOSPC;
+ }
+
+ list_del(ev_queue->event_list.next);
+ spin_unlock_irq(&ev_queue->lock);
+
+ if (copy_to_user(buf, &event->hdr, eventsz))
+ ret = -EFAULT;
+ else
+ ret = eventsz;
+
+ atomic_sub(event->cmd_out_len, &ev_queue->bytes_in_use);
+ kvfree(event);
+ return ret;
+}
+
+static int devx_async_cmd_event_close(struct inode *inode, struct file *filp)
+{
+ struct ib_uobject *uobj = filp->private_data;
+ struct devx_async_cmd_event_file *comp_ev_file = container_of(
+ uobj, struct devx_async_cmd_event_file, uobj);
+ struct devx_async_data *entry, *tmp;
+
+ spin_lock_irq(&comp_ev_file->ev_queue.lock);
+ list_for_each_entry_safe(entry, tmp,
+ &comp_ev_file->ev_queue.event_list, list)
+ kvfree(entry);
+ spin_unlock_irq(&comp_ev_file->ev_queue.lock);
+
+ uverbs_close_fd(filp);
+ return 0;
+}
+
+static __poll_t devx_async_cmd_event_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
+ struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
+ __poll_t pollflags = 0;
+
+ poll_wait(filp, &ev_queue->poll_wait, wait);
+
+ spin_lock_irq(&ev_queue->lock);
+ if (ev_queue->is_destroyed)
+ pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
+ else if (!list_empty(&ev_queue->event_list))
+ pollflags = EPOLLIN | EPOLLRDNORM;
+ spin_unlock_irq(&ev_queue->lock);
+
+ return pollflags;
+}
+
+const struct file_operations devx_async_cmd_event_fops = {
+ .owner = THIS_MODULE,
+ .read = devx_async_cmd_event_read,
+ .poll = devx_async_cmd_event_poll,
+ .release = devx_async_cmd_event_close,
+ .llseek = no_llseek,
+};
+
+static int devx_hot_unplug_async_cmd_event_file(struct ib_uobject *uobj,
+ enum rdma_remove_reason why)
+{
+ struct devx_async_cmd_event_file *comp_ev_file =
+ container_of(uobj, struct devx_async_cmd_event_file,
+ uobj);
+ struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
+
+ spin_lock_irq(&ev_queue->lock);
+ ev_queue->is_destroyed = 1;
+ spin_unlock_irq(&ev_queue->lock);
+
+ if (why == RDMA_REMOVE_DRIVER_REMOVE)
+ wake_up_interruptible(&ev_queue->poll_wait);
+
+ mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx);
+ return 0;
+};
+
DECLARE_UVERBS_NAMED_METHOD(
MLX5_IB_METHOD_DEVX_UMEM_REG,
UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
@@ -1423,6 +1841,27 @@ DECLARE_UVERBS_NAMED_METHOD(
UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
UA_MANDATORY));
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
+ UVERBS_IDR_ANY_OBJECT,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN,
+ u16, UA_MANDATORY),
+ UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD,
+ MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY));
+
DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
@@ -1433,13 +1872,30 @@ DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
- &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY));
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY));
DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC,
+ UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE,
+ MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
+ UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_cmd_event_file),
+ devx_hot_unplug_async_cmd_event_file,
+ &devx_async_cmd_event_fops, "[devx_async_cmd]",
+ O_RDONLY),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC));
+
static bool devx_is_supported(struct ib_device *device)
{
struct mlx5_ib_dev *dev = to_mdev(device);
@@ -1457,5 +1913,8 @@ const struct uapi_definition mlx5_ib_devx_defs[] = {
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
MLX5_IB_OBJECT_DEVX_UMEM,
UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
+ MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
+ UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
{},
};
diff --git a/drivers/infiniband/hw/mlx5/doorbell.c b/drivers/infiniband/hw/mlx5/doorbell.c
index a0e4e6ddb71a..8f4e5f22b84c 100644
--- a/drivers/infiniband/hw/mlx5/doorbell.c
+++ b/drivers/infiniband/hw/mlx5/doorbell.c
@@ -43,7 +43,8 @@ struct mlx5_ib_user_db_page {
int refcnt;
};
-int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
+int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
+ struct ib_udata *udata, unsigned long virt,
struct mlx5_db *db)
{
struct mlx5_ib_user_db_page *page;
@@ -63,8 +64,7 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
page->user_virt = (virt & PAGE_MASK);
page->refcnt = 0;
- page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
- PAGE_SIZE, 0, 0);
+ page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0);
if (IS_ERR(page->umem)) {
err = PTR_ERR(page->umem);
kfree(page);
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
index 4700cffb5a00..b8639ac71336 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.c
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -57,7 +57,7 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
else
profile = &vf_rep_profile;
- ibdev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*ibdev));
+ ibdev = ib_alloc_device(mlx5_ib_dev, ib_dev);
if (!ibdev)
return -ENOMEM;
@@ -65,8 +65,10 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
ibdev->mdev = dev;
ibdev->num_ports = max(MLX5_CAP_GEN(dev, num_ports),
MLX5_CAP_GEN(dev, num_vhca_ports));
- if (!__mlx5_ib_add(ibdev, profile))
+ if (!__mlx5_ib_add(ibdev, profile)) {
+ ib_dealloc_device(&ibdev->ib_dev);
return -EINVAL;
+ }
rep->rep_if[REP_IB].priv = ibdev;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 581ae11e2fc9..d3dd290ae1b1 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -415,10 +415,17 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u8 *active_speed,
*active_speed = IB_SPEED_EDR;
break;
case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2):
+ *active_width = IB_WIDTH_2X;
+ *active_speed = IB_SPEED_EDR;
+ break;
case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR):
*active_width = IB_WIDTH_1X;
*active_speed = IB_SPEED_HDR;
break;
+ case MLX5E_PROT_MASK(MLX5E_CAUI_4_100GBASE_CR4_KR4):
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_EDR;
+ break;
case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2):
*active_width = IB_WIDTH_2X;
*active_speed = IB_SPEED_HDR;
@@ -535,24 +542,51 @@ out:
return err;
}
+struct mlx5_ib_vlan_info {
+ u16 vlan_id;
+ bool vlan;
+};
+
+static int get_lower_dev_vlan(struct net_device *lower_dev, void *data)
+{
+ struct mlx5_ib_vlan_info *vlan_info = data;
+
+ if (is_vlan_dev(lower_dev)) {
+ vlan_info->vlan = true;
+ vlan_info->vlan_id = vlan_dev_vlan_id(lower_dev);
+ }
+ /* We are interested only in first level vlan device, so
+ * always return 1 to stop iterating over next level devices.
+ */
+ return 1;
+}
+
static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
unsigned int index, const union ib_gid *gid,
const struct ib_gid_attr *attr)
{
enum ib_gid_type gid_type = IB_GID_TYPE_IB;
+ struct mlx5_ib_vlan_info vlan_info = { };
u8 roce_version = 0;
u8 roce_l3_type = 0;
- bool vlan = false;
u8 mac[ETH_ALEN];
- u16 vlan_id = 0;
if (gid) {
gid_type = attr->gid_type;
ether_addr_copy(mac, attr->ndev->dev_addr);
if (is_vlan_dev(attr->ndev)) {
- vlan = true;
- vlan_id = vlan_dev_vlan_id(attr->ndev);
+ vlan_info.vlan = true;
+ vlan_info.vlan_id = vlan_dev_vlan_id(attr->ndev);
+ } else {
+ /* If the netdev is upper device and if it's lower
+ * lower device is vlan device, consider vlan id of
+ * the lower vlan device for this gid entry.
+ */
+ rcu_read_lock();
+ netdev_walk_all_lower_dev_rcu(attr->ndev,
+ get_lower_dev_vlan, &vlan_info);
+ rcu_read_unlock();
}
}
@@ -573,8 +607,9 @@ static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
}
return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
- roce_l3_type, gid->raw, mac, vlan,
- vlan_id, port_num);
+ roce_l3_type, gid->raw, mac,
+ vlan_info.vlan, vlan_info.vlan_id,
+ port_num);
}
static int mlx5_ib_add_gid(const struct ib_gid_attr *attr,
@@ -982,11 +1017,11 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (MLX5_CAP_GEN(mdev, pg))
- props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
- props->odp_caps = dev->odp_caps;
-#endif
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
+ if (MLX5_CAP_GEN(mdev, pg))
+ props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
+ props->odp_caps = dev->odp_caps;
+ }
if (MLX5_CAP_GEN(mdev, cd))
props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
@@ -1084,6 +1119,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (MLX5_CAP_GEN(mdev, qp_packet_based))
resp.flags |=
MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE;
+
+ resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT;
}
if (field_avail(typeof(resp), sw_parsing_caps,
@@ -1717,14 +1754,15 @@ static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn,
mlx5_ib_disable_lb(dev, true, false);
}
-static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
- struct ib_udata *udata)
+static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
+ struct ib_udata *udata)
{
+ struct ib_device *ibdev = uctx->device;
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_alloc_ucontext_req_v2 req = {};
struct mlx5_ib_alloc_ucontext_resp resp = {};
struct mlx5_core_dev *mdev = dev->mdev;
- struct mlx5_ib_ucontext *context;
+ struct mlx5_ib_ucontext *context = to_mucontext(uctx);
struct mlx5_bfreg_info *bfregi;
int ver;
int err;
@@ -1734,29 +1772,29 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
bool lib_uar_4k;
if (!dev->ib_active)
- return ERR_PTR(-EAGAIN);
+ return -EAGAIN;
if (udata->inlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
ver = 0;
else if (udata->inlen >= min_req_v2)
ver = 2;
else
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
if (err)
- return ERR_PTR(err);
+ return err;
if (req.flags & ~MLX5_IB_ALLOC_UCTX_DEVX)
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
req.total_num_bfregs = ALIGN(req.total_num_bfregs,
MLX5_NON_FP_BFREGS_PER_UAR);
if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
@@ -1789,10 +1827,6 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
/* MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD is currently always 0 */
}
- context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
- return ERR_PTR(-ENOMEM);
-
lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
bfregi = &context->bfregi;
@@ -1822,9 +1856,9 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (err)
goto out_sys_pages;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
-#endif
+ if (ibdev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING)
+ context->ibucontext.invalidate_range =
+ &mlx5_ib_invalidate_range;
if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
err = mlx5_ib_devx_create(dev, true);
@@ -1927,7 +1961,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
1, &dev->roce[port].tx_port_affinity));
}
- return &context->ibucontext;
+ return 0;
out_mdev:
mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
@@ -1945,23 +1979,19 @@ out_count:
kfree(bfregi->count);
out_ctx:
- kfree(context);
-
- return ERR_PTR(err);
+ return err;
}
-static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
+static void mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
{
struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
struct mlx5_bfreg_info *bfregi;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
/* All umem's must be destroyed before destroying the ucontext. */
mutex_lock(&ibcontext->per_mm_list_lock);
WARN_ON(!list_empty(&ibcontext->per_mm_list));
mutex_unlock(&ibcontext->per_mm_list_lock);
-#endif
bfregi = &context->bfregi;
mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
@@ -1972,9 +2002,6 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
deallocate_uars(dev, context);
kfree(bfregi->sys_pages);
kfree(bfregi->count);
- kfree(context);
-
- return 0;
}
static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
@@ -2041,6 +2068,7 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
if (vma->vm_flags & VM_WRITE)
return -EPERM;
+ vma->vm_flags &= ~VM_MAYWRITE;
if (!dev->mdev->clock_info_page)
return -EOPNOTSUPP;
@@ -2206,19 +2234,18 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
if (vma->vm_flags & VM_WRITE)
return -EPERM;
+ vma->vm_flags &= ~VM_MAYWRITE;
/* Don't expose to user-space information it shouldn't have */
if (PAGE_SIZE > 4096)
return -EOPNOTSUPP;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
pfn = (dev->mdev->iseg_base +
offsetof(struct mlx5_init_seg, internal_timer_h)) >>
PAGE_SHIFT;
- if (io_remap_pfn_range(vma, vma->vm_start, pfn,
- PAGE_SIZE, vma->vm_page_prot))
- return -EAGAIN;
- break;
+ return rdma_user_mmap_io(&context->ibucontext, vma, pfn,
+ PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot));
case MLX5_IB_MMAP_CLOCK_INFO:
return mlx5_ib_mmap_clock_info_page(dev, vma, context);
@@ -2313,30 +2340,24 @@ int mlx5_ib_dealloc_dm(struct ib_dm *ibdm)
return 0;
}
-static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata)
+static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
+ struct ib_udata *udata)
{
+ struct mlx5_ib_pd *pd = to_mpd(ibpd);
+ struct ib_device *ibdev = ibpd->device;
struct mlx5_ib_alloc_pd_resp resp;
- struct mlx5_ib_pd *pd;
int err;
u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
u16 uid = 0;
- pd = kmalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd)
- return ERR_PTR(-ENOMEM);
-
uid = context ? to_mucontext(context)->devx_uid : 0;
MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
MLX5_SET(alloc_pd_in, in, uid, uid);
err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in),
out, sizeof(out));
- if (err) {
- kfree(pd);
- return ERR_PTR(err);
- }
+ if (err)
+ return err;
pd->pdn = MLX5_GET(alloc_pd_out, out, pd);
pd->uid = uid;
@@ -2344,23 +2365,19 @@ static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
resp.pdn = pd->pdn;
if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid);
- kfree(pd);
- return ERR_PTR(-EFAULT);
+ return -EFAULT;
}
}
- return &pd->ibpd;
+ return 0;
}
-static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
+static void mlx5_ib_dealloc_pd(struct ib_pd *pd)
{
struct mlx5_ib_dev *mdev = to_mdev(pd->device);
struct mlx5_ib_pd *mpd = to_mpd(pd);
mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid);
- kfree(mpd);
-
- return 0;
}
enum {
@@ -2394,10 +2411,29 @@ static u8 get_match_criteria_enable(u32 *match_criteria)
return match_criteria_enable;
}
-static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
+static int set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
{
- MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask);
- MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
+ u8 entry_mask;
+ u8 entry_val;
+ int err = 0;
+
+ if (!mask)
+ goto out;
+
+ entry_mask = MLX5_GET(fte_match_set_lyr_2_4, outer_c,
+ ip_protocol);
+ entry_val = MLX5_GET(fte_match_set_lyr_2_4, outer_v,
+ ip_protocol);
+ if (!entry_mask) {
+ MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
+ goto out;
+ }
+ /* Don't override existing ip protocol */
+ if (mask != entry_mask || val != entry_val)
+ err = -EINVAL;
+out:
+ return err;
}
static void set_flow_label(void *misc_c, void *misc_v, u32 mask, u32 val,
@@ -2631,8 +2667,10 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
set_tos(headers_c, headers_v,
ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos);
- set_proto(headers_c, headers_v,
- ib_spec->ipv4.mask.proto, ib_spec->ipv4.val.proto);
+ if (set_proto(headers_c, headers_v,
+ ib_spec->ipv4.mask.proto,
+ ib_spec->ipv4.val.proto))
+ return -EINVAL;
break;
case IB_FLOW_SPEC_IPV6:
if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
@@ -2671,9 +2709,10 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
ib_spec->ipv6.mask.traffic_class,
ib_spec->ipv6.val.traffic_class);
- set_proto(headers_c, headers_v,
- ib_spec->ipv6.mask.next_hdr,
- ib_spec->ipv6.val.next_hdr);
+ if (set_proto(headers_c, headers_v,
+ ib_spec->ipv6.mask.next_hdr,
+ ib_spec->ipv6.val.next_hdr))
+ return -EINVAL;
set_flow_label(misc_params_c, misc_params_v,
ntohl(ib_spec->ipv6.mask.flow_label),
@@ -2694,10 +2733,8 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
LAST_TCP_UDP_FIELD))
return -EOPNOTSUPP;
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
- 0xff);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
- IPPROTO_TCP);
+ if (set_proto(headers_c, headers_v, 0xff, IPPROTO_TCP))
+ return -EINVAL;
MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport,
ntohs(ib_spec->tcp_udp.mask.src_port));
@@ -2714,10 +2751,8 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
LAST_TCP_UDP_FIELD))
return -EOPNOTSUPP;
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
- 0xff);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
- IPPROTO_UDP);
+ if (set_proto(headers_c, headers_v, 0xff, IPPROTO_UDP))
+ return -EINVAL;
MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
ntohs(ib_spec->tcp_udp.mask.src_port));
@@ -2733,6 +2768,9 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
if (ib_spec->gre.mask.c_ks_res0_ver)
return -EOPNOTSUPP;
+ if (set_proto(headers_c, headers_v, 0xff, IPPROTO_GRE))
+ return -EINVAL;
+
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
0xff);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
@@ -3884,7 +3922,7 @@ mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO)
return ERR_PTR(-ENOMEM);
- dst = kzalloc(sizeof(*dst) * 2, GFP_KERNEL);
+ dst = kcalloc(2, sizeof(*dst), GFP_KERNEL);
if (!dst)
return ERR_PTR(-ENOMEM);
@@ -4165,7 +4203,7 @@ static ssize_t fw_pages_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct mlx5_ib_dev *dev =
- container_of(device, struct mlx5_ib_dev, ib_dev.dev);
+ rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages);
}
@@ -4175,7 +4213,7 @@ static ssize_t reg_pages_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct mlx5_ib_dev *dev =
- container_of(device, struct mlx5_ib_dev, ib_dev.dev);
+ rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
}
@@ -4185,7 +4223,8 @@ static ssize_t hca_type_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct mlx5_ib_dev *dev =
- container_of(device, struct mlx5_ib_dev, ib_dev.dev);
+ rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
+
return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
}
static DEVICE_ATTR_RO(hca_type);
@@ -4194,7 +4233,8 @@ static ssize_t hw_rev_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct mlx5_ib_dev *dev =
- container_of(device, struct mlx5_ib_dev, ib_dev.dev);
+ rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
+
return sprintf(buf, "%x\n", dev->mdev->rev_id);
}
static DEVICE_ATTR_RO(hw_rev);
@@ -4203,7 +4243,8 @@ static ssize_t board_id_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct mlx5_ib_dev *dev =
- container_of(device, struct mlx5_ib_dev, ib_dev.dev);
+ rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
+
return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
dev->mdev->board_id);
}
@@ -4689,23 +4730,28 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
{
struct ib_srq_init_attr attr;
struct mlx5_ib_dev *dev;
+ struct ib_device *ibdev;
struct ib_cq_init_attr cq_attr = {.cqe = 1};
int port;
int ret = 0;
dev = container_of(devr, struct mlx5_ib_dev, devr);
+ ibdev = &dev->ib_dev;
mutex_init(&devr->mutex);
- devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
- if (IS_ERR(devr->p0)) {
- ret = PTR_ERR(devr->p0);
- goto error0;
- }
- devr->p0->device = &dev->ib_dev;
+ devr->p0 = rdma_zalloc_drv_obj(ibdev, ib_pd);
+ if (!devr->p0)
+ return -ENOMEM;
+
+ devr->p0->device = ibdev;
devr->p0->uobject = NULL;
atomic_set(&devr->p0->usecnt, 0);
+ ret = mlx5_ib_alloc_pd(devr->p0, NULL, NULL);
+ if (ret)
+ goto error0;
+
devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL);
if (IS_ERR(devr->c0)) {
ret = PTR_ERR(devr->c0);
@@ -4803,6 +4849,7 @@ error2:
error1:
mlx5_ib_dealloc_pd(devr->p0);
error0:
+ kfree(devr->p0);
return ret;
}
@@ -4818,6 +4865,7 @@ static void destroy_dev_resources(struct mlx5_ib_resources *devr)
mlx5_ib_dealloc_xrcd(devr->x1);
mlx5_ib_destroy_cq(devr->c0);
mlx5_ib_dealloc_pd(devr->p0);
+ kfree(devr->p0);
/* Make sure no change P_Key work items are still executing */
for (port = 0; port < dev->num_ports; ++port)
@@ -5567,9 +5615,7 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
mpi->mdev_events.notifier_call = mlx5_ib_event_slave_port;
mlx5_notifier_register(mpi->mdev, &mpi->mdev_events);
- err = mlx5_ib_init_cong_debugfs(ibdev, port_num);
- if (err)
- goto unbind;
+ mlx5_ib_init_cong_debugfs(ibdev, port_num);
return true;
@@ -5781,11 +5827,10 @@ static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device,
void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
{
mlx5_ib_cleanup_multiport_master(dev);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- cleanup_srcu_struct(&dev->mr_srcu);
- drain_workqueue(dev->advise_mr_wq);
- destroy_workqueue(dev->advise_mr_wq);
-#endif
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
+ srcu_barrier(&dev->mr_srcu);
+ cleanup_srcu_struct(&dev->mr_srcu);
+ }
kfree(dev->port);
}
@@ -5838,19 +5883,11 @@ int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
spin_lock_init(&dev->memic.memic_lock);
dev->memic.dev = mdev;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- dev->advise_mr_wq = alloc_ordered_workqueue("mlx5_ib_advise_mr_wq", 0);
- if (!dev->advise_mr_wq) {
- err = -ENOMEM;
- goto err_mp;
- }
-
- err = init_srcu_struct(&dev->mr_srcu);
- if (err) {
- destroy_workqueue(dev->advise_mr_wq);
- goto err_mp;
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
+ err = init_srcu_struct(&dev->mr_srcu);
+ if (err)
+ goto err_mp;
}
-#endif
return 0;
err_mp:
@@ -5947,6 +5984,8 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
.req_notify_cq = mlx5_ib_arm_cq,
.rereg_user_mr = mlx5_ib_rereg_user_mr,
.resize_cq = mlx5_ib_resize_cq,
+ INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext),
};
static const struct ib_device_ops mlx5_ib_dev_flow_ipsec_ops = {
@@ -6213,7 +6252,7 @@ static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
return mlx5_ib_odp_init_one(dev);
}
-void mlx5_ib_stage_odp_cleanup(struct mlx5_ib_dev *dev)
+static void mlx5_ib_stage_odp_cleanup(struct mlx5_ib_dev *dev)
{
mlx5_ib_odp_cleanup_one(dev);
}
@@ -6242,8 +6281,9 @@ void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev)
static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev)
{
- return mlx5_ib_init_cong_debugfs(dev,
- mlx5_core_native_port_num(dev->mdev) - 1);
+ mlx5_ib_init_cong_debugfs(dev,
+ mlx5_core_native_port_num(dev->mdev) - 1);
+ return 0;
}
static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
@@ -6293,7 +6333,7 @@ int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
name = "mlx5_%d";
else
name = "mlx5_bond_%d";
- return ib_register_device(&dev->ib_dev, name, NULL);
+ return ib_register_device(&dev->ib_dev, name);
}
void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
@@ -6550,7 +6590,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET)
return mlx5_ib_add_slave_port(mdev);
- dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
+ dev = ib_alloc_device(mlx5_ib_dev, ib_dev);
if (!dev)
return NULL;
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 549234988bb4..9f90be296ee0 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -111,7 +111,6 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
*count = i;
}
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
{
u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK;
@@ -123,7 +122,6 @@ static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
return mtt_entry;
}
-#endif
/*
* Populate the given array with bus addresses from the umem.
@@ -151,7 +149,7 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
int len;
struct scatterlist *sg;
int entry;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+
if (umem->is_odp) {
WARN_ON(shift != 0);
WARN_ON(access_flags != (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE));
@@ -164,7 +162,6 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
}
return;
}
-#endif
i = 0;
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index eedba0d2ec4b..4a617d78eae1 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -36,6 +36,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <rdma/ib_verbs.h>
+#include <rdma/ib_umem.h>
#include <rdma/ib_smi.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cq.h>
@@ -588,14 +589,27 @@ struct mlx5_ib_mr {
atomic_t num_leaf_free;
wait_queue_head_t q_leaf_free;
struct mlx5_async_work cb_work;
+ atomic_t num_pending_prefetch;
};
+static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
+{
+ return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
+ mr->umem->is_odp;
+}
+
struct mlx5_ib_mw {
struct ib_mw ibmw;
struct mlx5_core_mkey mmkey;
int ndescs;
};
+struct mlx5_ib_devx_mr {
+ struct mlx5_core_mkey mmkey;
+ int ndescs;
+ struct rcu_head rcu;
+};
+
struct mlx5_ib_umr_context {
struct ib_cqe cqe;
enum ib_wc_status status;
@@ -624,7 +638,6 @@ struct mlx5_cache_ent {
spinlock_t lock;
- struct dentry *dir;
char name[4];
u32 order;
u32 xlt;
@@ -636,11 +649,6 @@ struct mlx5_cache_ent {
u32 miss;
u32 limit;
- struct dentry *fsize;
- struct dentry *fcur;
- struct dentry *fmiss;
- struct dentry *flimit;
-
struct mlx5_ib_dev *dev;
struct work_struct work;
struct delayed_work dwork;
@@ -912,7 +920,6 @@ struct mlx5_ib_dev {
/* Prevents soft lock on massive reg MRs */
struct mutex slow_path_mutex;
int fill_delay;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
struct ib_odp_caps odp_caps;
u64 odp_max_size;
struct mlx5_ib_pf_eq odp_pf_eq;
@@ -923,8 +930,6 @@ struct mlx5_ib_dev {
*/
struct srcu_struct mr_srcu;
u32 null_mkey;
- struct workqueue_struct *advise_mr_wq;
-#endif
struct mlx5_ib_flow_db *flow_db;
/* protect resources needed as part of reset flow */
spinlock_t reset_flow_resource_lock;
@@ -1034,7 +1039,8 @@ to_mflow_act(struct ib_flow_action *ibact)
return container_of(ibact, struct mlx5_ib_flow_action, ib_action);
}
-int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
+int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
+ struct ib_udata *udata, unsigned long virt,
struct mlx5_db *db);
void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
@@ -1069,9 +1075,12 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr);
int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr);
-int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
- void *buffer, u32 length,
- struct mlx5_ib_qp_base *base);
+int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
+ int buflen, size_t *bc);
+int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
+ int buflen, size_t *bc);
+int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index,
+ void *buffer, int buflen, size_t *bc);
struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
@@ -1097,6 +1106,7 @@ int mlx5_ib_dealloc_mw(struct ib_mw *mw);
int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
int page_shift, int flags);
struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
+ struct ib_udata *udata,
int access_flags);
void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
@@ -1214,6 +1224,9 @@ mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
{
return -EOPNOTSUPP;
}
+static inline void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp,
+ unsigned long start,
+ unsigned long end){};
#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
/* Needed for rep profile */
@@ -1253,7 +1266,7 @@ __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev,
const struct ib_gid_attr *attr);
void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
-int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
+void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
/* GSI QP helper functions */
struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index bf2b6ea23851..c85f00255884 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -71,10 +71,9 @@ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- /* Wait until all page fault handlers using the mr complete. */
- synchronize_srcu(&dev->mr_srcu);
-#endif
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
+ /* Wait until all page fault handlers using the mr complete. */
+ synchronize_srcu(&dev->mr_srcu);
return err;
}
@@ -95,10 +94,9 @@ static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
}
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
static void update_odp_mr(struct mlx5_ib_mr *mr)
{
- if (mr->umem->is_odp) {
+ if (is_odp_mr(mr)) {
/*
* This barrier prevents the compiler from moving the
* setting of umem->odp_data->private to point to our
@@ -121,7 +119,6 @@ static void update_odp_mr(struct mlx5_ib_mr *mr)
smp_wmb();
}
}
-#endif
static void reg_mr_callback(int status, struct mlx5_async_work *context)
{
@@ -257,9 +254,8 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
}
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- synchronize_srcu(&dev->mr_srcu);
-#endif
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
+ synchronize_srcu(&dev->mr_srcu);
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
list_del(&mr->list);
@@ -611,52 +607,27 @@ static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
dev->cache.root = NULL;
}
-static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
+static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
{
struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent;
+ struct dentry *dir;
int i;
if (!mlx5_debugfs_root || dev->rep)
- return 0;
+ return;
cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
- if (!cache->root)
- return -ENOMEM;
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
ent = &cache->ent[i];
sprintf(ent->name, "%d", ent->order);
- ent->dir = debugfs_create_dir(ent->name, cache->root);
- if (!ent->dir)
- goto err;
-
- ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
- &size_fops);
- if (!ent->fsize)
- goto err;
-
- ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
- &limit_fops);
- if (!ent->flimit)
- goto err;
-
- ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
- &ent->cur);
- if (!ent->fcur)
- goto err;
-
- ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
- &ent->miss);
- if (!ent->fmiss)
- goto err;
+ dir = debugfs_create_dir(ent->name, cache->root);
+ debugfs_create_file("size", 0600, dir, ent, &size_fops);
+ debugfs_create_file("limit", 0600, dir, ent, &limit_fops);
+ debugfs_create_u32("cur", 0400, dir, &ent->cur);
+ debugfs_create_u32("miss", 0600, dir, &ent->miss);
}
-
- return 0;
-err:
- mlx5_mr_cache_debugfs_cleanup(dev);
-
- return -ENOMEM;
}
static void delay_time_func(struct timer_list *t)
@@ -670,7 +641,6 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
{
struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent;
- int err;
int i;
mutex_init(&dev->slow_path_mutex);
@@ -715,14 +685,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
queue_work(cache->wq, &ent->work);
}
- err = mlx5_mr_cache_debugfs_init(dev);
- if (err)
- mlx5_ib_warn(dev, "cache debugfs failure\n");
-
- /*
- * We don't want to fail driver if debugfs failed to initialize,
- * so we are not forwarding error to the user.
- */
+ mlx5_mr_cache_debugfs_init(dev);
return 0;
}
@@ -822,18 +785,17 @@ static int mr_cache_max_order(struct mlx5_ib_dev *dev)
return MLX5_MAX_UMR_SHIFT;
}
-static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
- int access_flags, struct ib_umem **umem,
- int *npages, int *page_shift, int *ncont,
- int *order)
+static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
+ u64 start, u64 length, int access_flags,
+ struct ib_umem **umem, int *npages, int *page_shift,
+ int *ncont, int *order)
{
- struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct ib_umem *u;
int err;
*umem = NULL;
- u = ib_umem_get(pd->uobject->context, start, length, access_flags, 0);
+ u = ib_umem_get(udata, start, length, access_flags, 0);
err = PTR_ERR_OR_ZERO(u);
if (err) {
mlx5_ib_dbg(dev, "umem get failed (%d)\n", err);
@@ -1306,21 +1268,20 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
start, virt_addr, length, access_flags);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (!start && length == U64_MAX) {
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && !start &&
+ length == U64_MAX) {
if (!(access_flags & IB_ACCESS_ON_DEMAND) ||
!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
return ERR_PTR(-EINVAL);
- mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags);
+ mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), udata, access_flags);
if (IS_ERR(mr))
return ERR_CAST(mr);
return &mr->ibmr;
}
-#endif
- err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
- &page_shift, &ncont, &order);
+ err = mr_umem_get(dev, udata, start, length, access_flags, &umem,
+ &npages, &page_shift, &ncont, &order);
if (err < 0)
return ERR_PTR(err);
@@ -1361,9 +1322,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->umem = umem;
set_mr_fields(dev, mr, npages, length, access_flags);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
update_odp_mr(mr);
-#endif
if (!populate_mtts) {
int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
@@ -1380,9 +1339,11 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
}
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- mr->live = 1;
-#endif
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
+ mr->live = 1;
+ atomic_set(&mr->num_pending_prefetch, 0);
+ }
+
return &mr->ibmr;
error:
ib_umem_release(umem);
@@ -1470,8 +1431,9 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
flags |= IB_MR_REREG_TRANS;
ib_umem_release(mr->umem);
mr->umem = NULL;
- err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
- &npages, &page_shift, &ncont, &order);
+ err = mr_umem_get(dev, udata, addr, len, access_flags,
+ &mr->umem, &npages, &page_shift, &ncont,
+ &order);
if (err)
goto err;
}
@@ -1497,9 +1459,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
}
mr->allocated_from_cache = 0;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- mr->live = 1;
-#endif
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
+ mr->live = 1;
} else {
/*
* Send a UMR WQE
@@ -1528,9 +1489,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
set_mr_fields(dev, mr, npages, len, access_flags);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
update_odp_mr(mr);
-#endif
return 0;
err:
@@ -1616,12 +1575,19 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
int npages = mr->npages;
struct ib_umem *umem = mr->umem;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (umem && umem->is_odp) {
+ if (is_odp_mr(mr)) {
struct ib_umem_odp *umem_odp = to_ib_umem_odp(umem);
- /* Prevent new page faults from succeeding */
+ /* Prevent new page faults and
+ * prefetch requests from succeeding
+ */
mr->live = 0;
+
+ /* dequeue pending prefetch requests for the mr */
+ if (atomic_read(&mr->num_pending_prefetch))
+ flush_workqueue(system_unbound_wq);
+ WARN_ON(atomic_read(&mr->num_pending_prefetch));
+
/* Wait for all running page-fault handlers to finish. */
synchronize_srcu(&dev->mr_srcu);
/* Destroy all page mappings */
@@ -1641,7 +1607,7 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
/* Avoid double-freeing the umem. */
umem = NULL;
}
-#endif
+
clean_mr(dev, mr);
/*
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 4ee32964e1dd..0aa10ebda5d9 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -101,9 +101,9 @@ static int check_parent(struct ib_umem_odp *odp,
return mr && mr->parent == parent && !odp->dying;
}
-struct ib_ucontext_per_mm *mr_to_per_mm(struct mlx5_ib_mr *mr)
+static struct ib_ucontext_per_mm *mr_to_per_mm(struct mlx5_ib_mr *mr)
{
- if (WARN_ON(!mr || !mr->umem || !mr->umem->is_odp))
+ if (WARN_ON(!mr || !is_odp_mr(mr)))
return NULL;
return to_ib_umem_odp(mr->umem)->per_mm;
@@ -315,6 +315,9 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send))
caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
+ if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.srq_receive))
+ caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
+
if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.send))
caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND;
@@ -330,6 +333,27 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.atomic))
caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
+ if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.srq_receive))
+ caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
+
+ if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.send))
+ caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SEND;
+
+ if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.receive))
+ caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_RECV;
+
+ if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.write))
+ caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_WRITE;
+
+ if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.read))
+ caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_READ;
+
+ if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.atomic))
+ caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
+
+ if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.srq_receive))
+ caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
+
if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
MLX5_CAP_GEN(dev->mdev, null_mkey) &&
MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
@@ -439,7 +463,7 @@ next_mr:
if (nentries)
nentries++;
} else {
- odp = ib_alloc_odp_umem(odp_mr->per_mm, addr,
+ odp = ib_alloc_odp_umem(odp_mr, addr,
MLX5_IMR_MTT_SIZE);
if (IS_ERR(odp)) {
mutex_unlock(&odp_mr->umem_mutex);
@@ -492,13 +516,13 @@ next_mr:
}
struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
+ struct ib_udata *udata,
int access_flags)
{
- struct ib_ucontext *ctx = pd->ibpd.uobject->context;
struct mlx5_ib_mr *imr;
struct ib_umem *umem;
- umem = ib_umem_get(ctx, 0, 0, IB_ACCESS_ON_DEMAND, 0);
+ umem = ib_umem_get(udata, 0, 0, access_flags, 0);
if (IS_ERR(umem))
return ERR_CAST(umem);
@@ -511,6 +535,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
imr->umem = umem;
init_waitqueue_head(&imr->q_leaf_free);
atomic_set(&imr->num_leaf_free, 0);
+ atomic_set(&imr->num_pending_prefetch, 0);
return imr;
}
@@ -560,7 +585,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
- u64 access_mask = ODP_READ_ALLOWED_BIT;
+ u64 access_mask;
u64 start_idx, page_mask;
struct ib_umem_odp *odp;
size_t size;
@@ -582,6 +607,7 @@ next_mr:
page_shift = mr->umem->page_shift;
page_mask = ~(BIT(page_shift) - 1);
start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift;
+ access_mask = ODP_READ_ALLOWED_BIT;
if (prefetch && !downgrade && !mr->umem->writable) {
/* prefetch with write-access must
@@ -685,6 +711,21 @@ struct pf_frame {
int depth;
};
+static int get_indirect_num_descs(struct mlx5_core_mkey *mmkey)
+{
+ struct mlx5_ib_mw *mw;
+ struct mlx5_ib_devx_mr *devx_mr;
+
+ if (mmkey->type == MLX5_MKEY_MW) {
+ mw = container_of(mmkey, struct mlx5_ib_mw, mmkey);
+ return mw->ndescs;
+ }
+
+ devx_mr = container_of(mmkey, struct mlx5_ib_devx_mr,
+ mmkey);
+ return devx_mr->ndescs;
+}
+
/*
* Handle a single data segment in a page-fault WQE or RDMA region.
*
@@ -696,7 +737,8 @@ struct pf_frame {
* -EFAULT when there's an error mapping the requested pages. The caller will
* abort the page fault handling.
*/
-static int pagefault_single_data_segment(struct mlx5_ib_dev *dev, u32 key,
+static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
+ struct ib_pd *pd, u32 key,
u64 io_virt, size_t bcnt,
u32 *bytes_committed,
u32 *bytes_mapped, u32 flags)
@@ -705,11 +747,11 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev, u32 key,
bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
struct pf_frame *head = NULL, *frame;
struct mlx5_core_mkey *mmkey;
- struct mlx5_ib_mw *mw;
struct mlx5_ib_mr *mr;
struct mlx5_klm *pklm;
u32 *out = NULL;
size_t offset;
+ int ndescs;
srcu_key = srcu_read_lock(&dev->mr_srcu);
@@ -739,12 +781,18 @@ next_mr:
goto srcu_unlock;
}
- if (prefetch && !mr->umem->is_odp) {
- ret = -EINVAL;
- goto srcu_unlock;
+ if (prefetch) {
+ if (!is_odp_mr(mr) ||
+ mr->ibmr.pd != pd) {
+ mlx5_ib_dbg(dev, "Invalid prefetch request: %s\n",
+ is_odp_mr(mr) ? "MR is not ODP" :
+ "PD is not of the MR");
+ ret = -EINVAL;
+ goto srcu_unlock;
+ }
}
- if (!mr->umem->is_odp) {
+ if (!is_odp_mr(mr)) {
mlx5_ib_dbg(dev, "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
key);
if (bytes_mapped)
@@ -762,7 +810,8 @@ next_mr:
break;
case MLX5_MKEY_MW:
- mw = container_of(mmkey, struct mlx5_ib_mw, mmkey);
+ case MLX5_MKEY_INDIRECT_DEVX:
+ ndescs = get_indirect_num_descs(mmkey);
if (depth >= MLX5_CAP_GEN(dev->mdev, max_indirection)) {
mlx5_ib_dbg(dev, "indirection level exceeded\n");
@@ -771,7 +820,7 @@ next_mr:
}
outlen = MLX5_ST_SZ_BYTES(query_mkey_out) +
- sizeof(*pklm) * (mw->ndescs - 2);
+ sizeof(*pklm) * (ndescs - 2);
if (outlen > cur_outlen) {
kfree(out);
@@ -786,14 +835,14 @@ next_mr:
pklm = (struct mlx5_klm *)MLX5_ADDR_OF(query_mkey_out, out,
bsf0_klm0_pas_mtt0_1);
- ret = mlx5_core_query_mkey(dev->mdev, &mw->mmkey, out, outlen);
+ ret = mlx5_core_query_mkey(dev->mdev, mmkey, out, outlen);
if (ret)
goto srcu_unlock;
offset = io_virt - MLX5_GET64(query_mkey_out, out,
memory_key_mkey_entry.start_addr);
- for (i = 0; bcnt && i < mw->ndescs; i++, pklm++) {
+ for (i = 0; bcnt && i < ndescs; i++, pklm++) {
if (offset >= be32_to_cpu(pklm->bcount)) {
offset -= be32_to_cpu(pklm->bcount);
continue;
@@ -853,7 +902,6 @@ srcu_unlock:
/**
* Parse a series of data segments for page fault handling.
*
- * @qp the QP on which the fault occurred.
* @pfault contains page fault information.
* @wqe points at the first data segment in the WQE.
* @wqe_end points after the end of the WQE.
@@ -870,7 +918,7 @@ srcu_unlock:
*/
static int pagefault_data_segments(struct mlx5_ib_dev *dev,
struct mlx5_pagefault *pfault,
- struct mlx5_ib_qp *qp, void *wqe,
+ void *wqe,
void *wqe_end, u32 *bytes_mapped,
u32 *total_wqe_bytes, int receive_queue)
{
@@ -881,10 +929,6 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
size_t bcnt;
int inline_segment;
- /* Skip SRQ next-WQE segment. */
- if (receive_queue && qp->ibqp.srq)
- wqe += sizeof(struct mlx5_wqe_srq_next_seg);
-
if (bytes_mapped)
*bytes_mapped = 0;
if (total_wqe_bytes)
@@ -928,7 +972,8 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
continue;
}
- ret = pagefault_single_data_segment(dev, key, io_virt, bcnt,
+ ret = pagefault_single_data_segment(dev, NULL, key,
+ io_virt, bcnt,
&pfault->bytes_committed,
bytes_mapped, 0);
if (ret < 0)
@@ -1009,6 +1054,10 @@ static int mlx5_ib_mr_initiator_pfault_handler(
MLX5_WQE_CTRL_OPCODE_MASK;
switch (qp->ibqp.qp_type) {
+ case IB_QPT_XRC_INI:
+ *wqe += sizeof(struct mlx5_wqe_xrc_seg);
+ transport_caps = dev->odp_caps.per_transport_caps.xrc_odp_caps;
+ break;
case IB_QPT_RC:
transport_caps = dev->odp_caps.per_transport_caps.rc_odp_caps;
break;
@@ -1028,7 +1077,7 @@ static int mlx5_ib_mr_initiator_pfault_handler(
return -EFAULT;
}
- if (qp->ibqp.qp_type != IB_QPT_RC) {
+ if (qp->ibqp.qp_type == IB_QPT_UD) {
av = *wqe;
if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV))
*wqe += sizeof(struct mlx5_av);
@@ -1053,21 +1102,34 @@ static int mlx5_ib_mr_initiator_pfault_handler(
}
/*
- * Parse responder WQE. Advances the wqe pointer to point at the
- * scatter-gather list, and set wqe_end to the end of the WQE.
+ * Parse responder WQE and set wqe_end to the end of the WQE.
*/
-static int mlx5_ib_mr_responder_pfault_handler(
- struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault,
- struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length)
+static int mlx5_ib_mr_responder_pfault_handler_srq(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_srq *srq,
+ void **wqe, void **wqe_end,
+ int wqe_length)
{
- struct mlx5_ib_wq *wq = &qp->rq;
- int wqe_size = 1 << wq->wqe_shift;
+ int wqe_size = 1 << srq->msrq.wqe_shift;
- if (qp->ibqp.srq) {
- mlx5_ib_err(dev, "ODP fault on SRQ is not supported\n");
+ if (wqe_size > wqe_length) {
+ mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n");
return -EFAULT;
}
+ *wqe_end = *wqe + wqe_size;
+ *wqe += sizeof(struct mlx5_wqe_srq_next_seg);
+
+ return 0;
+}
+
+static int mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_qp *qp,
+ void *wqe, void **wqe_end,
+ int wqe_length)
+{
+ struct mlx5_ib_wq *wq = &qp->rq;
+ int wqe_size = 1 << wq->wqe_shift;
+
if (qp->wq_sig) {
mlx5_ib_err(dev, "ODP fault with WQE signatures is not supported\n");
return -EFAULT;
@@ -1091,7 +1153,7 @@ invalid_transport_or_opcode:
return -EFAULT;
}
- *wqe_end = *wqe + wqe_size;
+ *wqe_end = wqe + wqe_size;
return 0;
}
@@ -1099,22 +1161,25 @@ invalid_transport_or_opcode:
static inline struct mlx5_core_rsc_common *odp_get_rsc(struct mlx5_ib_dev *dev,
u32 wq_num, int pf_type)
{
- enum mlx5_res_type res_type;
+ struct mlx5_core_rsc_common *common = NULL;
+ struct mlx5_core_srq *srq;
switch (pf_type) {
case MLX5_WQE_PF_TYPE_RMP:
- res_type = MLX5_RES_SRQ;
+ srq = mlx5_cmd_get_srq(dev, wq_num);
+ if (srq)
+ common = &srq->common;
break;
case MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE:
case MLX5_WQE_PF_TYPE_RESP:
case MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC:
- res_type = MLX5_RES_QP;
+ common = mlx5_core_res_hold(dev->mdev, wq_num, MLX5_RES_QP);
break;
default:
- return NULL;
+ break;
}
- return mlx5_core_res_hold(dev->mdev, wq_num, res_type);
+ return common;
}
static inline struct mlx5_ib_qp *res_to_qp(struct mlx5_core_rsc_common *res)
@@ -1124,6 +1189,14 @@ static inline struct mlx5_ib_qp *res_to_qp(struct mlx5_core_rsc_common *res)
return to_mibqp(mqp);
}
+static inline struct mlx5_ib_srq *res_to_srq(struct mlx5_core_rsc_common *res)
+{
+ struct mlx5_core_srq *msrq =
+ container_of(res, struct mlx5_core_srq, common);
+
+ return to_mibsrq(msrq);
+}
+
static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
struct mlx5_pagefault *pfault)
{
@@ -1134,8 +1207,10 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
int resume_with_error = 1;
u16 wqe_index = pfault->wqe.wqe_index;
int requestor = pfault->type & MLX5_PFAULT_REQUESTOR;
- struct mlx5_core_rsc_common *res;
- struct mlx5_ib_qp *qp;
+ struct mlx5_core_rsc_common *res = NULL;
+ struct mlx5_ib_qp *qp = NULL;
+ struct mlx5_ib_srq *srq = NULL;
+ size_t bytes_copied;
res = odp_get_rsc(dev, pfault->wqe.wq_num, pfault->type);
if (!res) {
@@ -1147,6 +1222,10 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
case MLX5_RES_QP:
qp = res_to_qp(res);
break;
+ case MLX5_RES_SRQ:
+ case MLX5_RES_XSRQ:
+ srq = res_to_srq(res);
+ break;
default:
mlx5_ib_err(dev, "wqe page fault for unsupported type %d\n", pfault->type);
goto resolve_page_fault;
@@ -1158,9 +1237,23 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
goto resolve_page_fault;
}
- ret = mlx5_ib_read_user_wqe(qp, requestor, wqe_index, buffer,
- PAGE_SIZE, &qp->trans_qp.base);
- if (ret < 0) {
+ if (qp) {
+ if (requestor) {
+ ret = mlx5_ib_read_user_wqe_sq(qp, wqe_index,
+ buffer, PAGE_SIZE,
+ &bytes_copied);
+ } else {
+ ret = mlx5_ib_read_user_wqe_rq(qp, wqe_index,
+ buffer, PAGE_SIZE,
+ &bytes_copied);
+ }
+ } else {
+ ret = mlx5_ib_read_user_wqe_srq(srq, wqe_index,
+ buffer, PAGE_SIZE,
+ &bytes_copied);
+ }
+
+ if (ret) {
mlx5_ib_err(dev, "Failed reading a WQE following page fault, error=%d, wqe_index=%x, qpn=%x\n",
ret, wqe_index, pfault->token);
goto resolve_page_fault;
@@ -1168,11 +1261,18 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
wqe = buffer;
if (requestor)
- ret = mlx5_ib_mr_initiator_pfault_handler(dev, pfault, qp, &wqe,
- &wqe_end, ret);
+ ret = mlx5_ib_mr_initiator_pfault_handler(dev, pfault, qp,
+ &wqe, &wqe_end,
+ bytes_copied);
+ else if (qp)
+ ret = mlx5_ib_mr_responder_pfault_handler_rq(dev, qp,
+ wqe, &wqe_end,
+ bytes_copied);
else
- ret = mlx5_ib_mr_responder_pfault_handler(dev, pfault, qp, &wqe,
- &wqe_end, ret);
+ ret = mlx5_ib_mr_responder_pfault_handler_srq(dev, srq,
+ &wqe, &wqe_end,
+ bytes_copied);
+
if (ret < 0)
goto resolve_page_fault;
@@ -1181,7 +1281,7 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
goto resolve_page_fault;
}
- ret = pagefault_data_segments(dev, pfault, qp, wqe, wqe_end,
+ ret = pagefault_data_segments(dev, pfault, wqe, wqe_end,
&bytes_mapped, &total_wqe_bytes,
!requestor);
if (ret == -EAGAIN) {
@@ -1240,7 +1340,7 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
prefetch_len = min(MAX_PREFETCH_LEN, prefetch_len);
}
- ret = pagefault_single_data_segment(dev, rkey, address, length,
+ ret = pagefault_single_data_segment(dev, NULL, rkey, address, length,
&pfault->bytes_committed, NULL,
0);
if (ret == -EAGAIN) {
@@ -1267,7 +1367,7 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
if (prefetch_activated) {
u32 bytes_committed = 0;
- ret = pagefault_single_data_segment(dev, rkey, address,
+ ret = pagefault_single_data_segment(dev, NULL, rkey, address,
prefetch_len,
&bytes_committed, NULL,
0);
@@ -1564,30 +1664,98 @@ int mlx5_ib_odp_init(void)
struct prefetch_mr_work {
struct work_struct work;
- struct mlx5_ib_dev *dev;
+ struct ib_pd *pd;
u32 pf_flags;
u32 num_sge;
struct ib_sge sg_list[0];
};
-static int mlx5_ib_prefetch_sg_list(struct mlx5_ib_dev *dev, u32 pf_flags,
+static void num_pending_prefetch_dec(struct mlx5_ib_dev *dev,
+ struct ib_sge *sg_list, u32 num_sge,
+ u32 from)
+{
+ u32 i;
+ int srcu_key;
+
+ srcu_key = srcu_read_lock(&dev->mr_srcu);
+
+ for (i = from; i < num_sge; ++i) {
+ struct mlx5_core_mkey *mmkey;
+ struct mlx5_ib_mr *mr;
+
+ mmkey = __mlx5_mr_lookup(dev->mdev,
+ mlx5_base_mkey(sg_list[i].lkey));
+ mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
+ atomic_dec(&mr->num_pending_prefetch);
+ }
+
+ srcu_read_unlock(&dev->mr_srcu, srcu_key);
+}
+
+static bool num_pending_prefetch_inc(struct ib_pd *pd,
+ struct ib_sge *sg_list, u32 num_sge)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ bool ret = true;
+ u32 i;
+
+ for (i = 0; i < num_sge; ++i) {
+ struct mlx5_core_mkey *mmkey;
+ struct mlx5_ib_mr *mr;
+
+ mmkey = __mlx5_mr_lookup(dev->mdev,
+ mlx5_base_mkey(sg_list[i].lkey));
+ if (!mmkey || mmkey->key != sg_list[i].lkey) {
+ ret = false;
+ break;
+ }
+
+ if (mmkey->type != MLX5_MKEY_MR) {
+ ret = false;
+ break;
+ }
+
+ mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
+
+ if (mr->ibmr.pd != pd) {
+ ret = false;
+ break;
+ }
+
+ if (!mr->live) {
+ ret = false;
+ break;
+ }
+
+ atomic_inc(&mr->num_pending_prefetch);
+ }
+
+ if (!ret)
+ num_pending_prefetch_dec(dev, sg_list, i, 0);
+
+ return ret;
+}
+
+static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd, u32 pf_flags,
struct ib_sge *sg_list, u32 num_sge)
{
- int i;
+ u32 i;
+ int ret = 0;
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
for (i = 0; i < num_sge; ++i) {
struct ib_sge *sg = &sg_list[i];
int bytes_committed = 0;
- int ret;
- ret = pagefault_single_data_segment(dev, sg->lkey, sg->addr,
+ ret = pagefault_single_data_segment(dev, pd, sg->lkey, sg->addr,
sg->length,
&bytes_committed, NULL,
pf_flags);
if (ret < 0)
- return ret;
+ break;
}
- return 0;
+
+ return ret < 0 ? ret : 0;
}
static void mlx5_ib_prefetch_mr_work(struct work_struct *work)
@@ -1595,12 +1763,14 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *work)
struct prefetch_mr_work *w =
container_of(work, struct prefetch_mr_work, work);
- if (ib_device_try_get(&w->dev->ib_dev)) {
- mlx5_ib_prefetch_sg_list(w->dev, w->pf_flags, w->sg_list,
+ if (ib_device_try_get(w->pd->device)) {
+ mlx5_ib_prefetch_sg_list(w->pd, w->pf_flags, w->sg_list,
w->num_sge);
- ib_device_put(&w->dev->ib_dev);
+ ib_device_put(w->pd->device);
}
- put_device(&w->dev->ib_dev.dev);
+
+ num_pending_prefetch_dec(to_mdev(w->pd->device), w->sg_list,
+ w->num_sge, 0);
kfree(w);
}
@@ -1611,12 +1781,14 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
struct mlx5_ib_dev *dev = to_mdev(pd->device);
u32 pf_flags = MLX5_PF_FLAGS_PREFETCH;
struct prefetch_mr_work *work;
+ bool valid_req;
+ int srcu_key;
if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH)
pf_flags |= MLX5_PF_FLAGS_DOWNGRADE;
if (flags & IB_UVERBS_ADVISE_MR_FLAG_FLUSH)
- return mlx5_ib_prefetch_sg_list(dev, pf_flags, sg_list,
+ return mlx5_ib_prefetch_sg_list(pd, pf_flags, sg_list,
num_sge);
work = kvzalloc(struct_size(work, sg_list, num_sge), GFP_KERNEL);
@@ -1625,12 +1797,25 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
memcpy(work->sg_list, sg_list, num_sge * sizeof(struct ib_sge));
- get_device(&dev->ib_dev.dev);
- work->dev = dev;
+ /* It is guaranteed that the pd when work is executed is the pd when
+ * work was queued since pd can't be destroyed while it holds MRs and
+ * destroying a MR leads to flushing the workquque
+ */
+ work->pd = pd;
work->pf_flags = pf_flags;
work->num_sge = num_sge;
INIT_WORK(&work->work, mlx5_ib_prefetch_mr_work);
- schedule_work(&work->work);
- return 0;
+
+ srcu_key = srcu_read_lock(&dev->mr_srcu);
+
+ valid_req = num_pending_prefetch_inc(pd, sg_list, num_sge);
+ if (valid_req)
+ queue_work(system_unbound_wq, &work->work);
+ else
+ kfree(work);
+
+ srcu_read_unlock(&dev->mr_srcu, srcu_key);
+
+ return valid_req ? 0 : -EINVAL;
}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 7db778d96ef5..8aafb2208899 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -109,75 +109,173 @@ static int is_sqp(enum ib_qp_type qp_type)
}
/**
- * mlx5_ib_read_user_wqe() - Copy a user-space WQE to kernel space.
+ * mlx5_ib_read_user_wqe_common() - Copy a WQE (or part of) from user WQ
+ * to kernel buffer
*
- * @qp: QP to copy from.
- * @send: copy from the send queue when non-zero, use the receive queue
- * otherwise.
- * @wqe_index: index to start copying from. For send work queues, the
- * wqe_index is in units of MLX5_SEND_WQE_BB.
- * For receive work queue, it is the number of work queue
- * element in the queue.
- * @buffer: destination buffer.
- * @length: maximum number of bytes to copy.
+ * @umem: User space memory where the WQ is
+ * @buffer: buffer to copy to
+ * @buflen: buffer length
+ * @wqe_index: index of WQE to copy from
+ * @wq_offset: offset to start of WQ
+ * @wq_wqe_cnt: number of WQEs in WQ
+ * @wq_wqe_shift: log2 of WQE size
+ * @bcnt: number of bytes to copy
+ * @bytes_copied: number of bytes to copy (return value)
*
- * Copies at least a single WQE, but may copy more data.
+ * Copies from start of WQE bcnt or less bytes.
+ * Does not gurantee to copy the entire WQE.
*
- * Return: the number of bytes copied, or an error code.
+ * Return: zero on success, or an error code.
*/
-int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
- void *buffer, u32 length,
- struct mlx5_ib_qp_base *base)
+static int mlx5_ib_read_user_wqe_common(struct ib_umem *umem,
+ void *buffer,
+ u32 buflen,
+ int wqe_index,
+ int wq_offset,
+ int wq_wqe_cnt,
+ int wq_wqe_shift,
+ int bcnt,
+ size_t *bytes_copied)
+{
+ size_t offset = wq_offset + ((wqe_index % wq_wqe_cnt) << wq_wqe_shift);
+ size_t wq_end = wq_offset + (wq_wqe_cnt << wq_wqe_shift);
+ size_t copy_length;
+ int ret;
+
+ /* don't copy more than requested, more than buffer length or
+ * beyond WQ end
+ */
+ copy_length = min_t(u32, buflen, wq_end - offset);
+ copy_length = min_t(u32, copy_length, bcnt);
+
+ ret = ib_umem_copy_from(buffer, umem, offset, copy_length);
+ if (ret)
+ return ret;
+
+ if (!ret && bytes_copied)
+ *bytes_copied = copy_length;
+
+ return 0;
+}
+
+int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp,
+ int wqe_index,
+ void *buffer,
+ int buflen,
+ size_t *bc)
{
- struct ib_device *ibdev = qp->ibqp.device;
- struct mlx5_ib_dev *dev = to_mdev(ibdev);
- struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq;
- size_t offset;
- size_t wq_end;
+ struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
struct ib_umem *umem = base->ubuffer.umem;
- u32 first_copy_length;
- int wqe_length;
+ struct mlx5_ib_wq *wq = &qp->sq;
+ struct mlx5_wqe_ctrl_seg *ctrl;
+ size_t bytes_copied;
+ size_t bytes_copied2;
+ size_t wqe_length;
int ret;
+ int ds;
- if (wq->wqe_cnt == 0) {
- mlx5_ib_dbg(dev, "mlx5_ib_read_user_wqe for a QP with wqe_cnt == 0. qp_type: 0x%x\n",
- qp->ibqp.qp_type);
+ if (buflen < sizeof(*ctrl))
return -EINVAL;
- }
- offset = wq->offset + ((wqe_index % wq->wqe_cnt) << wq->wqe_shift);
- wq_end = wq->offset + (wq->wqe_cnt << wq->wqe_shift);
+ /* at first read as much as possible */
+ ret = mlx5_ib_read_user_wqe_common(umem,
+ buffer,
+ buflen,
+ wqe_index,
+ wq->offset,
+ wq->wqe_cnt,
+ wq->wqe_shift,
+ buflen,
+ &bytes_copied);
+ if (ret)
+ return ret;
- if (send && length < sizeof(struct mlx5_wqe_ctrl_seg))
+ /* we need at least control segment size to proceed */
+ if (bytes_copied < sizeof(*ctrl))
return -EINVAL;
- if (offset > umem->length ||
- (send && offset + sizeof(struct mlx5_wqe_ctrl_seg) > umem->length))
- return -EINVAL;
+ ctrl = buffer;
+ ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
+ wqe_length = ds * MLX5_WQE_DS_UNITS;
+
+ /* if we copied enough then we are done */
+ if (bytes_copied >= wqe_length) {
+ *bc = bytes_copied;
+ return 0;
+ }
+
+ /* otherwise this a wrapped around wqe
+ * so read the remaining bytes starting
+ * from wqe_index 0
+ */
+ ret = mlx5_ib_read_user_wqe_common(umem,
+ buffer + bytes_copied,
+ buflen - bytes_copied,
+ 0,
+ wq->offset,
+ wq->wqe_cnt,
+ wq->wqe_shift,
+ wqe_length - bytes_copied,
+ &bytes_copied2);
- first_copy_length = min_t(u32, offset + length, wq_end) - offset;
- ret = ib_umem_copy_from(buffer, umem, offset, first_copy_length);
if (ret)
return ret;
+ *bc = bytes_copied + bytes_copied2;
+ return 0;
+}
- if (send) {
- struct mlx5_wqe_ctrl_seg *ctrl = buffer;
- int ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
-
- wqe_length = ds * MLX5_WQE_DS_UNITS;
- } else {
- wqe_length = 1 << wq->wqe_shift;
- }
+int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp,
+ int wqe_index,
+ void *buffer,
+ int buflen,
+ size_t *bc)
+{
+ struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
+ struct ib_umem *umem = base->ubuffer.umem;
+ struct mlx5_ib_wq *wq = &qp->rq;
+ size_t bytes_copied;
+ int ret;
- if (wqe_length <= first_copy_length)
- return first_copy_length;
+ ret = mlx5_ib_read_user_wqe_common(umem,
+ buffer,
+ buflen,
+ wqe_index,
+ wq->offset,
+ wq->wqe_cnt,
+ wq->wqe_shift,
+ buflen,
+ &bytes_copied);
- ret = ib_umem_copy_from(buffer + first_copy_length, umem, wq->offset,
- wqe_length - first_copy_length);
if (ret)
return ret;
+ *bc = bytes_copied;
+ return 0;
+}
- return wqe_length;
+int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq,
+ int wqe_index,
+ void *buffer,
+ int buflen,
+ size_t *bc)
+{
+ struct ib_umem *umem = srq->umem;
+ size_t bytes_copied;
+ int ret;
+
+ ret = mlx5_ib_read_user_wqe_common(umem,
+ buffer,
+ buflen,
+ wqe_index,
+ 0,
+ srq->msrq.max,
+ srq->msrq.wqe_shift,
+ buflen,
+ &bytes_copied);
+
+ if (ret)
+ return ret;
+ *bc = bytes_copied;
+ return 0;
}
static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
@@ -435,9 +533,9 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,
return -EINVAL;
}
- if (ucmd->sq_wqe_count && ((1 << ilog2(ucmd->sq_wqe_count)) != ucmd->sq_wqe_count)) {
- mlx5_ib_warn(dev, "sq_wqe_count %d, sq_wqe_count %d\n",
- ucmd->sq_wqe_count, ucmd->sq_wqe_count);
+ if (ucmd->sq_wqe_count && !is_power_of_2(ucmd->sq_wqe_count)) {
+ mlx5_ib_warn(dev, "sq_wqe_count %d is not a power of two\n",
+ ucmd->sq_wqe_count);
return -EINVAL;
}
@@ -645,16 +743,14 @@ int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
return bfregi->sys_pages[index_of_sys_page] + offset;
}
-static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev,
- struct ib_pd *pd,
+static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
unsigned long addr, size_t size,
- struct ib_umem **umem,
- int *npages, int *page_shift, int *ncont,
- u32 *offset)
+ struct ib_umem **umem, int *npages, int *page_shift,
+ int *ncont, u32 *offset)
{
int err;
- *umem = ib_umem_get(pd->uobject->context, addr, size, 0, 0);
+ *umem = ib_umem_get(udata, addr, size, 0, 0);
if (IS_ERR(*umem)) {
mlx5_ib_dbg(dev, "umem_get failed\n");
return PTR_ERR(*umem);
@@ -695,10 +791,11 @@ static void destroy_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
}
static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
- struct mlx5_ib_rwq *rwq,
+ struct ib_udata *udata, struct mlx5_ib_rwq *rwq,
struct mlx5_ib_create_wq *ucmd)
{
- struct mlx5_ib_ucontext *context;
+ struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
int page_shift = 0;
int npages;
u32 offset = 0;
@@ -708,9 +805,7 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (!ucmd->buf_addr)
return -EINVAL;
- context = to_mucontext(pd->uobject->context);
- rwq->umem = ib_umem_get(pd->uobject->context, ucmd->buf_addr,
- rwq->buf_size, 0, 0);
+ rwq->umem = ib_umem_get(udata, ucmd->buf_addr, rwq->buf_size, 0, 0);
if (IS_ERR(rwq->umem)) {
mlx5_ib_dbg(dev, "umem_get failed\n");
err = PTR_ERR(rwq->umem);
@@ -735,7 +830,7 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
(unsigned long long)ucmd->buf_addr, rwq->buf_size,
npages, page_shift, ncont, offset);
- err = mlx5_ib_db_map_user(context, ucmd->db_addr, &rwq->db);
+ err = mlx5_ib_db_map_user(ucontext, udata, ucmd->db_addr, &rwq->db);
if (err) {
mlx5_ib_dbg(dev, "map failed\n");
goto err_umem;
@@ -783,7 +878,8 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
return err;
}
- context = to_mucontext(pd->uobject->context);
+ context = rdma_udata_to_drv_context(udata, struct mlx5_ib_ucontext,
+ ibucontext);
if (ucmd.flags & MLX5_QP_FLAG_BFREG_INDEX) {
uar_index = bfregn_to_uar_index(dev, &context->bfregi,
ucmd.bfreg_index, true);
@@ -819,10 +915,9 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (ucmd.buf_addr && ubuffer->buf_size) {
ubuffer->buf_addr = ucmd.buf_addr;
- err = mlx5_ib_umem_get(dev, pd, ubuffer->buf_addr,
- ubuffer->buf_size,
- &ubuffer->umem, &npages, &page_shift,
- &ncont, &offset);
+ err = mlx5_ib_umem_get(dev, udata, ubuffer->buf_addr,
+ ubuffer->buf_size, &ubuffer->umem,
+ &npages, &page_shift, &ncont, &offset);
if (err)
goto err_bfreg;
} else {
@@ -856,7 +951,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
resp->bfreg_index = MLX5_IB_INVALID_BFREG;
qp->bfregn = bfregn;
- err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db);
+ err = mlx5_ib_db_map_user(context, udata, ucmd.db_addr, &qp->db);
if (err) {
mlx5_ib_dbg(dev, "map failed\n");
goto err_free;
@@ -1119,6 +1214,7 @@ static void destroy_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
}
static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
+ struct ib_udata *udata,
struct mlx5_ib_sq *sq, void *qpin,
struct ib_pd *pd)
{
@@ -1135,9 +1231,9 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
int ncont = 0;
u32 offset = 0;
- err = mlx5_ib_umem_get(dev, pd, ubuffer->buf_addr, ubuffer->buf_size,
- &sq->ubuffer.umem, &npages, &page_shift,
- &ncont, &offset);
+ err = mlx5_ib_umem_get(dev, udata, ubuffer->buf_addr, ubuffer->buf_size,
+ &sq->ubuffer.umem, &npages, &page_shift, &ncont,
+ &offset);
if (err)
return err;
@@ -1362,9 +1458,8 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
- struct ib_uobject *uobj = pd->uobject;
- struct ib_ucontext *ucontext = uobj->context;
- struct mlx5_ib_ucontext *mucontext = to_mucontext(ucontext);
+ struct mlx5_ib_ucontext *mucontext = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
int err;
u32 tdn = mucontext->tdn;
u16 uid = to_mpd(pd)->uid;
@@ -1374,7 +1469,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (err)
return err;
- err = create_raw_packet_qp_sq(dev, sq, in, pd);
+ err = create_raw_packet_qp_sq(dev, udata, sq, in, pd);
if (err)
goto err_destroy_tis;
@@ -1478,9 +1573,8 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata)
{
- struct ib_uobject *uobj = pd->uobject;
- struct ib_ucontext *ucontext = uobj->context;
- struct mlx5_ib_ucontext *mucontext = to_mucontext(ucontext);
+ struct mlx5_ib_ucontext *mucontext = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
struct mlx5_ib_create_qp_resp resp = {};
int inlen;
int err;
@@ -1724,13 +1818,16 @@ static void configure_responder_scat_cqe(struct ib_qp_init_attr *init_attr,
rcqe_sz = mlx5_ib_get_cqe_size(init_attr->recv_cq);
- if (rcqe_sz == 128) {
- MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
+ if (init_attr->qp_type == MLX5_IB_QPT_DCT) {
+ if (rcqe_sz == 128)
+ MLX5_SET(dctc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
+
return;
}
- if (init_attr->qp_type != MLX5_IB_QPT_DCT)
- MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA32_CQE);
+ MLX5_SET(qpc, qpc, cs_res,
+ rcqe_sz == 128 ? MLX5_RES_SCAT_DATA64_CQE :
+ MLX5_RES_SCAT_DATA32_CQE);
}
static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
@@ -1822,6 +1919,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_ib_create_qp_resp resp = {};
+ struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
struct mlx5_ib_cq *send_cq;
struct mlx5_ib_cq *recv_cq;
unsigned long flags;
@@ -1924,8 +2023,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
MLX5_QP_FLAG_TYPE_DCT))
return -EINVAL;
- err = get_qp_user_index(to_mucontext(pd->uobject->context),
- &ucmd, udata->inlen, &uidx);
+ err = get_qp_user_index(ucontext, &ucmd, udata->inlen, &uidx);
if (err)
return err;
@@ -2409,8 +2507,11 @@ static const char *ib_qp_type_str(enum ib_qp_type type)
static struct ib_qp *mlx5_ib_create_dct(struct ib_pd *pd,
struct ib_qp_init_attr *attr,
- struct mlx5_ib_create_qp *ucmd)
+ struct mlx5_ib_create_qp *ucmd,
+ struct ib_udata *udata)
{
+ struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
struct mlx5_ib_qp *qp;
int err = 0;
u32 uidx = MLX5_IB_DEFAULT_UIDX;
@@ -2419,8 +2520,7 @@ static struct ib_qp *mlx5_ib_create_dct(struct ib_pd *pd,
if (!attr->srq || !attr->recv_cq)
return ERR_PTR(-EINVAL);
- err = get_qp_user_index(to_mucontext(pd->uobject->context),
- ucmd, sizeof(*ucmd), &uidx);
+ err = get_qp_user_index(ucontext, ucmd, sizeof(*ucmd), &uidx);
if (err)
return ERR_PTR(err);
@@ -2502,15 +2602,17 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
int err;
struct ib_qp_init_attr mlx_init_attr;
struct ib_qp_init_attr *init_attr = verbs_init_attr;
+ struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
if (pd) {
dev = to_mdev(pd->device);
if (init_attr->qp_type == IB_QPT_RAW_PACKET) {
- if (!udata) {
+ if (!ucontext) {
mlx5_ib_dbg(dev, "Raw Packet QP is not supported for kernel consumers\n");
return ERR_PTR(-EINVAL);
- } else if (!to_mucontext(pd->uobject->context)->cqe_version) {
+ } else if (!ucontext->cqe_version) {
mlx5_ib_dbg(dev, "Raw Packet QP is only supported for CQE version > 0\n");
return ERR_PTR(-EINVAL);
}
@@ -2542,7 +2644,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
return ERR_PTR(-EINVAL);
}
} else {
- return mlx5_ib_create_dct(pd, init_attr, &ucmd);
+ return mlx5_ib_create_dct(pd, init_attr, &ucmd, udata);
}
}
@@ -2653,10 +2755,10 @@ int mlx5_ib_destroy_qp(struct ib_qp *qp)
static int to_mlx5_access_flags(struct mlx5_ib_qp *qp,
const struct ib_qp_attr *attr,
- int attr_mask, __be32 *hw_access_flags)
+ int attr_mask, __be32 *hw_access_flags_be)
{
u8 dest_rd_atomic;
- u32 access_flags;
+ u32 access_flags, hw_access_flags = 0;
struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
@@ -2674,7 +2776,7 @@ static int to_mlx5_access_flags(struct mlx5_ib_qp *qp,
access_flags &= IB_ACCESS_REMOTE_WRITE;
if (access_flags & IB_ACCESS_REMOTE_READ)
- *hw_access_flags |= MLX5_QP_BIT_RRE;
+ hw_access_flags |= MLX5_QP_BIT_RRE;
if (access_flags & IB_ACCESS_REMOTE_ATOMIC) {
int atomic_mode;
@@ -2682,14 +2784,14 @@ static int to_mlx5_access_flags(struct mlx5_ib_qp *qp,
if (atomic_mode < 0)
return -EOPNOTSUPP;
- *hw_access_flags |= MLX5_QP_BIT_RAE;
- *hw_access_flags |= atomic_mode << MLX5_ATOMIC_MODE_OFFSET;
+ hw_access_flags |= MLX5_QP_BIT_RAE;
+ hw_access_flags |= atomic_mode << MLX5_ATOMIC_MODE_OFFSET;
}
if (access_flags & IB_ACCESS_REMOTE_WRITE)
- *hw_access_flags |= MLX5_QP_BIT_RWE;
+ hw_access_flags |= MLX5_QP_BIT_RWE;
- *hw_access_flags = cpu_to_be32(*hw_access_flags);
+ *hw_access_flags_be = cpu_to_be32(hw_access_flags);
return 0;
}
@@ -3180,14 +3282,12 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
static unsigned int get_tx_affinity(struct mlx5_ib_dev *dev,
struct mlx5_ib_pd *pd,
struct mlx5_ib_qp_base *qp_base,
- u8 port_num)
+ u8 port_num, struct ib_udata *udata)
{
- struct mlx5_ib_ucontext *ucontext = NULL;
+ struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
unsigned int tx_port_affinity;
- if (pd && pd->ibpd.uobject && pd->ibpd.uobject->context)
- ucontext = to_mucontext(pd->ibpd.uobject->context);
-
if (ucontext) {
tx_port_affinity = (unsigned int)atomic_add_return(
1, &ucontext->tx_port_affinity) %
@@ -3210,8 +3310,10 @@ static unsigned int get_tx_affinity(struct mlx5_ib_dev *dev,
static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
const struct ib_qp_attr *attr, int attr_mask,
- enum ib_qp_state cur_state, enum ib_qp_state new_state,
- const struct mlx5_ib_modify_qp *ucmd)
+ enum ib_qp_state cur_state,
+ enum ib_qp_state new_state,
+ const struct mlx5_ib_modify_qp *ucmd,
+ struct ib_udata *udata)
{
static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = {
[MLX5_QP_STATE_RST] = {
@@ -3302,7 +3404,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
(ibqp->qp_type == IB_QPT_XRC_TGT)) {
if (dev->lag_active) {
u8 p = mlx5_core_native_port_num(dev->mdev);
- tx_affinity = get_tx_affinity(dev, pd, base, p);
+ tx_affinity = get_tx_affinity(dev, pd, base, p,
+ udata);
context->flags |= cpu_to_be32(tx_affinity << 24);
}
}
@@ -3390,7 +3493,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
}
if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
- __be32 access_flags = 0;
+ __be32 access_flags;
err = to_mlx5_access_flags(qp, attr, attr_mask, &access_flags);
if (err)
@@ -3629,6 +3732,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
struct mlx5_ib_modify_qp_resp resp = {};
+ u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0};
u32 min_resp_len = offsetof(typeof(resp), dctn) +
sizeof(resp.dctn);
@@ -3647,7 +3751,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
err = mlx5_core_create_dct(dev->mdev, &qp->dct.mdct, qp->dct.in,
- MLX5_ST_SZ_BYTES(create_dct_in));
+ MLX5_ST_SZ_BYTES(create_dct_in), out,
+ sizeof(out));
if (err)
return err;
resp.dctn = qp->dct.mdct.mqp.qpn;
@@ -3785,7 +3890,7 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state,
- new_state, &ucmd);
+ new_state, &ucmd, udata);
out:
mutex_unlock(&qp->mutex);
@@ -5021,7 +5126,6 @@ out:
/* Make sure doorbells don't leak out of SQ spinlock
* and reach the HCA out of order.
*/
- mmiowb();
bf->offset ^= bf->buf_size;
}
@@ -5795,7 +5899,7 @@ static int prepare_user_rq(struct ib_pd *pd,
return err;
}
- err = create_user_rq(dev, pd, rwq, &ucmd);
+ err = create_user_rq(dev, pd, udata, rwq, &ucmd);
if (err) {
mlx5_ib_dbg(dev, "err %d\n", err);
return err;
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 4e8d18009f58..1ec1beb1296b 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -47,6 +47,8 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_create_srq ucmd = {};
+ struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
size_t ucmdlen;
int err;
int npages;
@@ -71,16 +73,14 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
return -EINVAL;
if (in->type != IB_SRQT_BASIC) {
- err = get_srq_user_index(to_mucontext(pd->uobject->context),
- &ucmd, udata->inlen, &uidx);
+ err = get_srq_user_index(ucontext, &ucmd, udata->inlen, &uidx);
if (err)
return err;
}
srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
- srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size,
- 0, 0);
+ srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0, 0);
if (IS_ERR(srq->umem)) {
mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
err = PTR_ERR(srq->umem);
@@ -104,8 +104,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
mlx5_ib_populate_pas(dev, srq->umem, page_shift, in->pas, 0);
- err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context),
- ucmd.db_addr, &srq->db);
+ err = mlx5_ib_db_map_user(ucontext, udata, ucmd.db_addr, &srq->db);
if (err) {
mlx5_ib_dbg(dev, "map doorbell failed\n");
goto err_in;
diff --git a/drivers/infiniband/hw/mlx5/srq.h b/drivers/infiniband/hw/mlx5/srq.h
index 75eb5839ae95..c330af35ff10 100644
--- a/drivers/infiniband/hw/mlx5/srq.h
+++ b/drivers/infiniband/hw/mlx5/srq.h
@@ -46,8 +46,6 @@ struct mlx5_core_srq {
int wqe_shift;
void (*event)(struct mlx5_core_srq *srq, enum mlx5_event e);
- atomic_t refcount;
- struct completion free;
u16 uid;
};
diff --git a/drivers/infiniband/hw/mlx5/srq_cmd.c b/drivers/infiniband/hw/mlx5/srq_cmd.c
index 7aaaffbd4afa..63ac38bb3498 100644
--- a/drivers/infiniband/hw/mlx5/srq_cmd.c
+++ b/drivers/infiniband/hw/mlx5/srq_cmd.c
@@ -87,7 +87,7 @@ struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
srq = radix_tree_lookup(&table->tree, srqn);
if (srq)
- atomic_inc(&srq->refcount);
+ atomic_inc(&srq->common.refcount);
spin_unlock(&table->lock);
@@ -594,8 +594,8 @@ int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
if (err)
return err;
- atomic_set(&srq->refcount, 1);
- init_completion(&srq->free);
+ atomic_set(&srq->common.refcount, 1);
+ init_completion(&srq->common.free);
spin_lock_irq(&table->lock);
err = radix_tree_insert(&table->tree, srq->srqn, srq);
@@ -627,9 +627,8 @@ int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
if (err)
return err;
- if (atomic_dec_and_test(&srq->refcount))
- complete(&srq->free);
- wait_for_completion(&srq->free);
+ mlx5_core_res_put(&srq->common);
+ wait_for_completion(&srq->common.free);
return 0;
}
@@ -685,7 +684,7 @@ static int srq_event_notifier(struct notifier_block *nb,
srq = radix_tree_lookup(&table->tree, srqn);
if (srq)
- atomic_inc(&srq->refcount);
+ atomic_inc(&srq->common.refcount);
spin_unlock(&table->lock);
@@ -694,8 +693,7 @@ static int srq_event_notifier(struct notifier_block *nb,
srq->event(srq, eqe->type);
- if (atomic_dec_and_test(&srq->refcount))
- complete(&srq->free);
+ mlx5_core_res_put(&srq->common);
return NOTIFY_OK;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 83aa47eb81a9..bdf5ed38de22 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -292,12 +292,6 @@ static int mthca_cmd_post(struct mthca_dev *dev,
err = mthca_cmd_post_hcr(dev, in_param, out_param, in_modifier,
op_modifier, op, token, event);
- /*
- * Make sure that our HCR writes don't get mixed in with
- * writes from another CPU starting a FW command.
- */
- mmiowb();
-
mutex_unlock(&dev->cmd.hcr_mutex);
return err;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index a6531ffe29a6..877a6daffa98 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -211,11 +211,6 @@ static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq,
mthca_write64(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn, incr - 1,
dev->kar + MTHCA_CQ_DOORBELL,
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
- /*
- * Make sure doorbells don't leak out of CQ spinlock
- * and reach the HCA out of order:
- */
- mmiowb();
}
}
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 92c49bff22bc..fe9654a7af71 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -961,7 +961,7 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
/* We can handle large RDMA requests, so allow larger segments. */
dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
- mdev = (struct mthca_dev *) ib_alloc_device(sizeof *mdev);
+ mdev = ib_alloc_device(mthca_dev, ib_dev);
if (!mdev) {
dev_err(&pdev->dev, "Device struct alloc failed, "
"aborting.\n");
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index e3e9dd54caa2..d063d7a37762 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -37,6 +37,7 @@
#include <rdma/ib_smi.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_user_verbs.h>
+#include <rdma/uverbs_ioctl.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -300,17 +301,16 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port,
return err;
}
-static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
- struct ib_udata *udata)
+static int mthca_alloc_ucontext(struct ib_ucontext *uctx,
+ struct ib_udata *udata)
{
- struct mthca_alloc_ucontext_resp uresp;
- struct mthca_ucontext *context;
+ struct ib_device *ibdev = uctx->device;
+ struct mthca_alloc_ucontext_resp uresp = {};
+ struct mthca_ucontext *context = to_mucontext(uctx);
int err;
if (!(to_mdev(ibdev)->active))
- return ERR_PTR(-EAGAIN);
-
- memset(&uresp, 0, sizeof uresp);
+ return -EAGAIN;
uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
if (mthca_is_memfree(to_mdev(ibdev)))
@@ -318,44 +318,33 @@ static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
else
uresp.uarc_size = 0;
- context = kmalloc(sizeof *context, GFP_KERNEL);
- if (!context)
- return ERR_PTR(-ENOMEM);
-
err = mthca_uar_alloc(to_mdev(ibdev), &context->uar);
- if (err) {
- kfree(context);
- return ERR_PTR(err);
- }
+ if (err)
+ return err;
context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev));
if (IS_ERR(context->db_tab)) {
err = PTR_ERR(context->db_tab);
mthca_uar_free(to_mdev(ibdev), &context->uar);
- kfree(context);
- return ERR_PTR(err);
+ return err;
}
- if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) {
+ if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab);
mthca_uar_free(to_mdev(ibdev), &context->uar);
- kfree(context);
- return ERR_PTR(-EFAULT);
+ return -EFAULT;
}
context->reg_mr_warned = 0;
- return &context->ibucontext;
+ return 0;
}
-static int mthca_dealloc_ucontext(struct ib_ucontext *context)
+static void mthca_dealloc_ucontext(struct ib_ucontext *context)
{
mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar,
to_mucontext(context)->db_tab);
mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar);
- kfree(to_mucontext(context));
-
- return 0;
}
static int mthca_mmap_uar(struct ib_ucontext *context,
@@ -374,40 +363,30 @@ static int mthca_mmap_uar(struct ib_ucontext *context,
return 0;
}
-static struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata)
+static int mthca_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
+ struct ib_udata *udata)
{
- struct mthca_pd *pd;
+ struct ib_device *ibdev = ibpd->device;
+ struct mthca_pd *pd = to_mpd(ibpd);
int err;
- pd = kmalloc(sizeof *pd, GFP_KERNEL);
- if (!pd)
- return ERR_PTR(-ENOMEM);
-
err = mthca_pd_alloc(to_mdev(ibdev), !context, pd);
- if (err) {
- kfree(pd);
- return ERR_PTR(err);
- }
+ if (err)
+ return err;
if (context) {
if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) {
mthca_pd_free(to_mdev(ibdev), pd);
- kfree(pd);
- return ERR_PTR(-EFAULT);
+ return -EFAULT;
}
}
- return &pd->ibpd;
+ return 0;
}
-static int mthca_dealloc_pd(struct ib_pd *pd)
+static void mthca_dealloc_pd(struct ib_pd *pd)
{
mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
- kfree(pd);
-
- return 0;
}
static struct ib_ah *mthca_ah_create(struct ib_pd *pd,
@@ -445,7 +424,8 @@ static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
struct ib_udata *udata)
{
struct mthca_create_srq ucmd;
- struct mthca_ucontext *context = NULL;
+ struct mthca_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mthca_ucontext, ibucontext);
struct mthca_srq *srq;
int err;
@@ -457,8 +437,6 @@ static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
return ERR_PTR(-ENOMEM);
if (udata) {
- context = to_mucontext(pd->uobject->context);
-
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
err = -EFAULT;
goto err_free;
@@ -520,6 +498,8 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata)
{
+ struct mthca_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mthca_ucontext, ibucontext);
struct mthca_create_qp ucmd;
struct mthca_qp *qp;
int err;
@@ -532,15 +512,11 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
case IB_QPT_UC:
case IB_QPT_UD:
{
- struct mthca_ucontext *context;
-
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp)
return ERR_PTR(-ENOMEM);
if (udata) {
- context = to_mucontext(pd->uobject->context);
-
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
kfree(qp);
return ERR_PTR(-EFAULT);
@@ -578,8 +554,6 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
&init_attr->cap, qp, udata);
if (err && udata) {
- context = to_mucontext(pd->uobject->context);
-
mthca_unmap_user_db(to_mdev(pd->device),
&context->uar,
context->db_tab,
@@ -684,7 +658,7 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev,
goto err_unmap_set;
}
- cq = kmalloc(sizeof *cq, GFP_KERNEL);
+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);
if (!cq) {
err = -ENOMEM;
goto err_unmap_arm;
@@ -907,22 +881,23 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt, int acc, struct ib_udata *udata)
{
struct mthca_dev *dev = to_mdev(pd->device);
- struct scatterlist *sg;
+ struct sg_dma_page_iter sg_iter;
+ struct mthca_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mthca_ucontext, ibucontext);
struct mthca_mr *mr;
struct mthca_reg_mr ucmd;
u64 *pages;
- int shift, n, len;
- int i, k, entry;
+ int n, i;
int err = 0;
int write_mtt_size;
if (udata->inlen < sizeof ucmd) {
- if (!to_mucontext(pd->uobject->context)->reg_mr_warned) {
+ if (!context->reg_mr_warned) {
mthca_warn(dev, "Process '%s' did not pass in MR attrs.\n",
current->comm);
mthca_warn(dev, " Update libmthca to fix this.\n");
}
- ++to_mucontext(pd->uobject->context)->reg_mr_warned;
+ ++context->reg_mr_warned;
ucmd.mr_attrs = 0;
} else if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
return ERR_PTR(-EFAULT);
@@ -931,7 +906,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
- mr->umem = ib_umem_get(pd->uobject->context, start, length, acc,
+ mr->umem = ib_umem_get(udata, start, length, acc,
ucmd.mr_attrs & MTHCA_MR_DMASYNC);
if (IS_ERR(mr->umem)) {
@@ -939,7 +914,6 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
goto err;
}
- shift = mr->umem->page_shift;
n = mr->umem->nmap;
mr->mtt = mthca_alloc_mtt(dev, n);
@@ -958,21 +932,19 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages));
- for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
- len = sg_dma_len(sg) >> shift;
- for (k = 0; k < len; ++k) {
- pages[i++] = sg_dma_address(sg) + (k << shift);
- /*
- * Be friendly to write_mtt and pass it chunks
- * of appropriate size.
- */
- if (i == write_mtt_size) {
- err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
- if (err)
- goto mtt_done;
- n += i;
- i = 0;
- }
+ for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
+ pages[i++] = sg_page_iter_dma_address(&sg_iter);
+
+ /*
+ * Be friendly to write_mtt and pass it chunks
+ * of appropriate size.
+ */
+ if (i == write_mtt_size) {
+ err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
+ if (err)
+ goto mtt_done;
+ n += i;
+ i = 0;
}
}
@@ -983,7 +955,7 @@ mtt_done:
if (err)
goto err_mtt;
- err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, shift, virt, length,
+ err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, PAGE_SHIFT, virt, length,
convert_access(acc), mr);
if (err)
@@ -1081,7 +1053,8 @@ static ssize_t hw_rev_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct mthca_dev *dev =
- container_of(device, struct mthca_dev, ib_dev.dev);
+ rdma_device_to_drv_device(device, struct mthca_dev, ib_dev);
+
return sprintf(buf, "%x\n", dev->rev_id);
}
static DEVICE_ATTR_RO(hw_rev);
@@ -1090,7 +1063,8 @@ static ssize_t hca_type_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct mthca_dev *dev =
- container_of(device, struct mthca_dev, ib_dev.dev);
+ rdma_device_to_drv_device(device, struct mthca_dev, ib_dev);
+
switch (dev->pdev->device) {
case PCI_DEVICE_ID_MELLANOX_TAVOR:
return sprintf(buf, "MT23108\n");
@@ -1111,7 +1085,8 @@ static ssize_t board_id_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct mthca_dev *dev =
- container_of(device, struct mthca_dev, ib_dev.dev);
+ rdma_device_to_drv_device(device, struct mthca_dev, ib_dev);
+
return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id);
}
static DEVICE_ATTR_RO(board_id);
@@ -1225,6 +1200,8 @@ static const struct ib_device_ops mthca_dev_ops = {
.query_qp = mthca_query_qp,
.reg_user_mr = mthca_reg_user_mr,
.resize_cq = mthca_resize_cq,
+ INIT_RDMA_OBJ_SIZE(ib_pd, mthca_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, mthca_ucontext, ibucontext),
};
static const struct ib_device_ops mthca_dev_arbel_srq_ops = {
@@ -1338,7 +1315,7 @@ int mthca_register_device(struct mthca_dev *dev)
rdma_set_device_sysfs_group(&dev->ib_dev, &mthca_attr_group);
dev->ib_dev.driver_id = RDMA_DRIVER_MTHCA;
- ret = ib_register_device(&dev->ib_dev, "mthca%d", NULL);
+ ret = ib_register_device(&dev->ib_dev, "mthca%d");
if (ret)
return ret;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 4e5b5cc17f1d..d65b189f20ea 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -42,6 +42,7 @@
#include <rdma/ib_verbs.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_pack.h>
+#include <rdma/uverbs_ioctl.h>
#include "mthca_dev.h"
#include "mthca_cmd.h"
@@ -554,10 +555,14 @@ static int mthca_path_set(struct mthca_dev *dev, const struct rdma_ah_attr *ah,
static int __mthca_modify_qp(struct ib_qp *ibqp,
const struct ib_qp_attr *attr, int attr_mask,
- enum ib_qp_state cur_state, enum ib_qp_state new_state)
+ enum ib_qp_state cur_state,
+ enum ib_qp_state new_state,
+ struct ib_udata *udata)
{
struct mthca_dev *dev = to_mdev(ibqp->device);
struct mthca_qp *qp = to_mqp(ibqp);
+ struct mthca_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mthca_ucontext, ibucontext);
struct mthca_mailbox *mailbox;
struct mthca_qp_param *qp_param;
struct mthca_qp_context *qp_context;
@@ -619,8 +624,7 @@ static int __mthca_modify_qp(struct ib_qp *ibqp,
/* leave arbel_sched_queue as 0 */
if (qp->ibqp.uobject)
- qp_context->usr_page =
- cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index);
+ qp_context->usr_page = cpu_to_be32(context->uar.index);
else
qp_context->usr_page = cpu_to_be32(dev->driver_uar.index);
qp_context->local_qpn = cpu_to_be32(qp->qpn);
@@ -913,7 +917,8 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
goto out;
}
- err = __mthca_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
+ err = __mthca_modify_qp(ibqp, attr, attr_mask, cur_state, new_state,
+ udata);
out:
mutex_unlock(&qp->mutex);
@@ -1804,11 +1809,6 @@ out:
(qp->qpn << 8) | size0,
dev->kar + MTHCA_SEND_DOORBELL,
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
- /*
- * Make sure doorbells don't leak out of SQ spinlock
- * and reach the HCA out of order:
- */
- mmiowb();
}
qp->sq.next_ind = ind;
@@ -1919,12 +1919,6 @@ out:
qp->rq.next_ind = ind;
qp->rq.head += nreq;
- /*
- * Make sure doorbells don't leak out of RQ spinlock and reach
- * the HCA out of order:
- */
- mmiowb();
-
spin_unlock_irqrestore(&qp->rq.lock, flags);
return err;
}
@@ -2159,12 +2153,6 @@ out:
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
}
- /*
- * Make sure doorbells don't leak out of SQ spinlock and reach
- * the HCA out of order:
- */
- mmiowb();
-
spin_unlock_irqrestore(&qp->sq.lock, flags);
return err;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index b8333c79e3fa..a85935ccce88 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -36,6 +36,8 @@
#include <asm/io.h>
+#include <rdma/uverbs_ioctl.h>
+
#include "mthca_dev.h"
#include "mthca_cmd.h"
#include "mthca_memfree.h"
@@ -96,17 +98,19 @@ static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
struct mthca_pd *pd,
struct mthca_srq *srq,
struct mthca_tavor_srq_context *context,
- bool is_user)
+ struct ib_udata *udata)
{
+ struct mthca_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct mthca_ucontext, ibucontext);
+
memset(context, 0, sizeof *context);
context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4));
context->state_pd = cpu_to_be32(pd->pd_num);
context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
- if (is_user)
- context->uar =
- cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
+ if (udata)
+ context->uar = cpu_to_be32(ucontext->uar.index);
else
context->uar = cpu_to_be32(dev->driver_uar.index);
}
@@ -115,8 +119,10 @@ static void mthca_arbel_init_srq_context(struct mthca_dev *dev,
struct mthca_pd *pd,
struct mthca_srq *srq,
struct mthca_arbel_srq_context *context,
- bool is_user)
+ struct ib_udata *udata)
{
+ struct mthca_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct mthca_ucontext, ibucontext);
int logsize, max;
memset(context, 0, sizeof *context);
@@ -131,9 +137,8 @@ static void mthca_arbel_init_srq_context(struct mthca_dev *dev,
context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
context->db_index = cpu_to_be32(srq->db_index);
context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29);
- if (is_user)
- context->logstride_usrpage |=
- cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
+ if (udata)
+ context->logstride_usrpage |= cpu_to_be32(ucontext->uar.index);
else
context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index);
context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num);
@@ -565,12 +570,6 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
}
- /*
- * Make sure doorbells don't leak out of SRQ spinlock and
- * reach the HCA out of order:
- */
- mmiowb();
-
spin_unlock_irqrestore(&srq->lock, flags);
return err;
}
diff --git a/drivers/infiniband/hw/nes/Kconfig b/drivers/infiniband/hw/nes/Kconfig
index 7964eba8e7ed..52caae954e4a 100644
--- a/drivers/infiniband/hw/nes/Kconfig
+++ b/drivers/infiniband/hw/nes/Kconfig
@@ -1,6 +1,6 @@
config INFINIBAND_NES
tristate "NetEffect RNIC Driver"
- depends on PCI && INET && INFINIBAND
+ depends on PCI && INET
select LIBCRC32C
---help---
This is the RDMA Network Interface Card (RNIC) driver for
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 4e7f08ee1907..828e4af3f951 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -41,6 +41,7 @@
#include <rdma/ib_verbs.h>
#include <rdma/iw_cm.h>
#include <rdma/ib_user_verbs.h>
+#include <rdma/uverbs_ioctl.h>
#include "nes.h"
@@ -528,42 +529,36 @@ static int nes_query_gid(struct ib_device *ibdev, u8 port,
* nes_alloc_ucontext - Allocate the user context data structure. This keeps track
* of all objects associated with a particular user-mode client.
*/
-static struct ib_ucontext *nes_alloc_ucontext(struct ib_device *ibdev,
- struct ib_udata *udata)
+static int nes_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
{
+ struct ib_device *ibdev = uctx->device;
struct nes_vnic *nesvnic = to_nesvnic(ibdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
struct nes_alloc_ucontext_req req;
- struct nes_alloc_ucontext_resp uresp;
- struct nes_ucontext *nes_ucontext;
+ struct nes_alloc_ucontext_resp uresp = {};
+ struct nes_ucontext *nes_ucontext = to_nesucontext(uctx);
struct nes_ib_device *nesibdev = nesvnic->nesibdev;
if (ib_copy_from_udata(&req, udata, sizeof(struct nes_alloc_ucontext_req))) {
printk(KERN_ERR PFX "Invalid structure size on allocate user context.\n");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
if (req.userspace_ver != NES_ABI_USERSPACE_VER) {
printk(KERN_ERR PFX "Invalid userspace driver version detected. Detected version %d, should be %d\n",
req.userspace_ver, NES_ABI_USERSPACE_VER);
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
- memset(&uresp, 0, sizeof uresp);
-
uresp.max_qps = nesibdev->max_qp;
uresp.max_pds = nesibdev->max_pd;
uresp.wq_size = nesdev->nesadapter->max_qp_wr * 2;
uresp.virtwq = nesadapter->virtwq;
uresp.kernel_ver = NES_ABI_KERNEL_VER;
- nes_ucontext = kzalloc(sizeof *nes_ucontext, GFP_KERNEL);
- if (!nes_ucontext)
- return ERR_PTR(-ENOMEM);
-
nes_ucontext->nesdev = nesdev;
nes_ucontext->mmap_wq_offset = uresp.max_pds;
nes_ucontext->mmap_cq_offset = nes_ucontext->mmap_wq_offset +
@@ -571,34 +566,22 @@ static struct ib_ucontext *nes_alloc_ucontext(struct ib_device *ibdev,
PAGE_SIZE;
- if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) {
- kfree(nes_ucontext);
- return ERR_PTR(-EFAULT);
- }
+ if (ib_copy_to_udata(udata, &uresp, sizeof(uresp)))
+ return -EFAULT;
INIT_LIST_HEAD(&nes_ucontext->cq_reg_mem_list);
INIT_LIST_HEAD(&nes_ucontext->qp_reg_mem_list);
- atomic_set(&nes_ucontext->usecnt, 1);
- return &nes_ucontext->ibucontext;
+ return 0;
}
-
/**
* nes_dealloc_ucontext
*/
-static int nes_dealloc_ucontext(struct ib_ucontext *context)
+static void nes_dealloc_ucontext(struct ib_ucontext *context)
{
- /* struct nes_vnic *nesvnic = to_nesvnic(context->device); */
- /* struct nes_device *nesdev = nesvnic->nesdev; */
- struct nes_ucontext *nes_ucontext = to_nesucontext(context);
-
- if (!atomic_dec_and_test(&nes_ucontext->usecnt))
- return 0;
- kfree(nes_ucontext);
- return 0;
+ return;
}
-
/**
* nes_mmap
*/
@@ -658,10 +641,11 @@ static int nes_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
/**
* nes_alloc_pd
*/
-static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *context, struct ib_udata *udata)
+static int nes_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
+ struct ib_udata *udata)
{
- struct nes_pd *nespd;
+ struct ib_device *ibdev = pd->device;
+ struct nes_pd *nespd = to_nespd(pd);
struct nes_vnic *nesvnic = to_nesvnic(ibdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
@@ -676,15 +660,8 @@ static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev,
err = nes_alloc_resource(nesadapter, nesadapter->allocated_pds,
nesadapter->max_pd, &pd_num, &nesadapter->next_pd, NES_RESOURCE_PD);
- if (err) {
- return ERR_PTR(err);
- }
-
- nespd = kzalloc(sizeof (struct nes_pd), GFP_KERNEL);
- if (!nespd) {
- nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num);
- return ERR_PTR(-ENOMEM);
- }
+ if (err)
+ return err;
nes_debug(NES_DBG_PD, "Allocating PD (%p) for ib device %s\n",
nespd, dev_name(&nesvnic->nesibdev->ibdev.dev));
@@ -700,16 +677,14 @@ static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev,
if (nespd->mmap_db_index >= NES_MAX_USER_DB_REGIONS) {
nes_debug(NES_DBG_PD, "mmap_db_index > MAX\n");
nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num);
- kfree(nespd);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
uresp.pd_id = nespd->pd_id;
uresp.mmap_db_index = nespd->mmap_db_index;
if (ib_copy_to_udata(udata, &uresp, sizeof (struct nes_alloc_pd_resp))) {
nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num);
- kfree(nespd);
- return ERR_PTR(-EFAULT);
+ return -EFAULT;
}
set_bit(nespd->mmap_db_index, nesucontext->allocated_doorbells);
@@ -718,14 +693,14 @@ static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev,
}
nes_debug(NES_DBG_PD, "PD%u structure located @%p.\n", nespd->pd_id, nespd);
- return &nespd->ibpd;
+ return 0;
}
/**
* nes_dealloc_pd
*/
-static int nes_dealloc_pd(struct ib_pd *ibpd)
+static void nes_dealloc_pd(struct ib_pd *ibpd)
{
struct nes_ucontext *nesucontext;
struct nes_pd *nespd = to_nespd(ibpd);
@@ -748,9 +723,6 @@ static int nes_dealloc_pd(struct ib_pd *ibpd)
nespd->pd_id, nespd);
nes_free_resource(nesadapter, nesadapter->allocated_pds,
(nespd->pd_id-nesadapter->base_pd)>>(PAGE_SHIFT-12));
- kfree(nespd);
-
- return 0;
}
@@ -985,7 +957,8 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
struct nes_adapter *nesadapter = nesdev->nesadapter;
struct nes_qp *nesqp;
struct nes_cq *nescq;
- struct nes_ucontext *nes_ucontext;
+ struct nes_ucontext *nes_ucontext = rdma_udata_to_drv_context(
+ udata, struct nes_ucontext, ibucontext);
struct nes_hw_cqp_wqe *cqp_wqe;
struct nes_cqp_request *cqp_request;
struct nes_create_qp_req req;
@@ -1066,9 +1039,8 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
}
if (req.user_qp_buffer)
nesqp->nesuqp_addr = req.user_qp_buffer;
- if (udata && (ibpd->uobject->context)) {
+ if (udata) {
nesqp->user_mode = 1;
- nes_ucontext = to_nesucontext(ibpd->uobject->context);
if (virt_wqs) {
err = 1;
list_for_each_entry(nespbl, &nes_ucontext->qp_reg_mem_list, list) {
@@ -1089,7 +1061,6 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
}
}
- nes_ucontext = to_nesucontext(ibpd->uobject->context);
nesqp->mmap_sq_db_index =
find_next_zero_bit(nes_ucontext->allocated_wqs,
NES_MAX_USER_WQ_REGIONS, nes_ucontext->first_free_wq);
@@ -2109,18 +2080,18 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
struct ib_mr *ibmr = ERR_PTR(-EINVAL);
- struct scatterlist *sg;
- struct nes_ucontext *nes_ucontext;
+ struct sg_dma_page_iter dma_iter;
+ struct nes_ucontext *nes_ucontext = rdma_udata_to_drv_context(
+ udata, struct nes_ucontext, ibucontext);
struct nes_pbl *nespbl;
struct nes_mr *nesmr;
struct ib_umem *region;
struct nes_mem_reg_req req;
struct nes_vpbl vpbl;
struct nes_root_vpbl root_vpbl;
- int entry, page_index;
+ int page_index;
int page_count = 0;
int err, pbl_depth = 0;
- int chunk_pages;
int ret;
u32 stag;
u32 stag_index = 0;
@@ -2132,9 +2103,8 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u16 pbl_count;
u8 single_page = 1;
u8 stag_key;
- int first_page = 1;
- region = ib_umem_get(pd->uobject->context, start, length, acc, 0);
+ region = ib_umem_get(udata, start, length, acc, 0);
if (IS_ERR(region)) {
return (struct ib_mr *)region;
}
@@ -2183,127 +2153,99 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
nesmr->region = region;
- for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
- if (sg_dma_address(sg) & ~PAGE_MASK) {
- ib_umem_release(region);
- nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
- nes_debug(NES_DBG_MR, "Unaligned Memory Buffer: 0x%x\n",
- (unsigned int) sg_dma_address(sg));
- ibmr = ERR_PTR(-EINVAL);
- kfree(nesmr);
- goto reg_user_mr_err;
- }
+ for_each_sg_dma_page (region->sg_head.sgl, &dma_iter, region->nmap, 0) {
- if (!sg_dma_len(sg)) {
- ib_umem_release(region);
- nes_free_resource(nesadapter, nesadapter->allocated_mrs,
- stag_index);
- nes_debug(NES_DBG_MR, "Invalid Buffer Size\n");
- ibmr = ERR_PTR(-EINVAL);
- kfree(nesmr);
- goto reg_user_mr_err;
- }
-
- region_length += sg_dma_len(sg);
- chunk_pages = sg_dma_len(sg) >> 12;
+ region_length += PAGE_SIZE;
region_length -= skip_pages << 12;
- for (page_index = skip_pages; page_index < chunk_pages; page_index++) {
- skip_pages = 0;
- if ((page_count != 0) && (page_count << 12) - (ib_umem_offset(region) & (4096 - 1)) >= region->length)
- goto enough_pages;
- if ((page_count&0x01FF) == 0) {
- if (page_count >= 1024 * 512) {
+ skip_pages = 0;
+ if ((page_count != 0) && (page_count << 12) - (ib_umem_offset(region) & (4096 - 1)) >= region->length)
+ goto enough_pages;
+ if ((page_count & 0x01FF) == 0) {
+ if (page_count >= 1024 * 512) {
+ ib_umem_release(region);
+ nes_free_resource(nesadapter,
+ nesadapter->allocated_mrs, stag_index);
+ kfree(nesmr);
+ ibmr = ERR_PTR(-E2BIG);
+ goto reg_user_mr_err;
+ }
+ if (root_pbl_index == 1) {
+ root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev,
+ 8192, &root_vpbl.pbl_pbase);
+ nes_debug(NES_DBG_MR, "Allocating root PBL, va = %p, pa = 0x%08X\n",
+ root_vpbl.pbl_vbase, (unsigned int)root_vpbl.pbl_pbase);
+ if (!root_vpbl.pbl_vbase) {
ib_umem_release(region);
- nes_free_resource(nesadapter,
- nesadapter->allocated_mrs, stag_index);
+ pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
+ vpbl.pbl_pbase);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs,
+ stag_index);
kfree(nesmr);
- ibmr = ERR_PTR(-E2BIG);
+ ibmr = ERR_PTR(-ENOMEM);
goto reg_user_mr_err;
}
- if (root_pbl_index == 1) {
- root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev,
- 8192, &root_vpbl.pbl_pbase);
- nes_debug(NES_DBG_MR, "Allocating root PBL, va = %p, pa = 0x%08X\n",
- root_vpbl.pbl_vbase, (unsigned int)root_vpbl.pbl_pbase);
- if (!root_vpbl.pbl_vbase) {
- ib_umem_release(region);
- pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
- vpbl.pbl_pbase);
- nes_free_resource(nesadapter, nesadapter->allocated_mrs,
- stag_index);
- kfree(nesmr);
- ibmr = ERR_PTR(-ENOMEM);
- goto reg_user_mr_err;
- }
- root_vpbl.leaf_vpbl = kcalloc(1024,
- sizeof(*root_vpbl.leaf_vpbl),
- GFP_KERNEL);
- if (!root_vpbl.leaf_vpbl) {
- ib_umem_release(region);
- pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase,
- root_vpbl.pbl_pbase);
- pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
- vpbl.pbl_pbase);
- nes_free_resource(nesadapter, nesadapter->allocated_mrs,
- stag_index);
- kfree(nesmr);
- ibmr = ERR_PTR(-ENOMEM);
- goto reg_user_mr_err;
- }
- root_vpbl.pbl_vbase[0].pa_low =
- cpu_to_le32((u32)vpbl.pbl_pbase);
- root_vpbl.pbl_vbase[0].pa_high =
- cpu_to_le32((u32)((((u64)vpbl.pbl_pbase) >> 32)));
- root_vpbl.leaf_vpbl[0] = vpbl;
- }
- vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
- &vpbl.pbl_pbase);
- nes_debug(NES_DBG_MR, "Allocating leaf PBL, va = %p, pa = 0x%08X\n",
- vpbl.pbl_vbase, (unsigned int)vpbl.pbl_pbase);
- if (!vpbl.pbl_vbase) {
+ root_vpbl.leaf_vpbl = kcalloc(1024,
+ sizeof(*root_vpbl.leaf_vpbl),
+ GFP_KERNEL);
+ if (!root_vpbl.leaf_vpbl) {
ib_umem_release(region);
- nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
- ibmr = ERR_PTR(-ENOMEM);
+ pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase,
+ root_vpbl.pbl_pbase);
+ pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
+ vpbl.pbl_pbase);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs,
+ stag_index);
kfree(nesmr);
+ ibmr = ERR_PTR(-ENOMEM);
goto reg_user_mr_err;
}
- if (1 <= root_pbl_index) {
- root_vpbl.pbl_vbase[root_pbl_index].pa_low =
- cpu_to_le32((u32)vpbl.pbl_pbase);
- root_vpbl.pbl_vbase[root_pbl_index].pa_high =
- cpu_to_le32((u32)((((u64)vpbl.pbl_pbase)>>32)));
- root_vpbl.leaf_vpbl[root_pbl_index] = vpbl;
- }
- root_pbl_index++;
- cur_pbl_index = 0;
+ root_vpbl.pbl_vbase[0].pa_low =
+ cpu_to_le32((u32)vpbl.pbl_pbase);
+ root_vpbl.pbl_vbase[0].pa_high =
+ cpu_to_le32((u32)((((u64)vpbl.pbl_pbase) >> 32)));
+ root_vpbl.leaf_vpbl[0] = vpbl;
}
- if (single_page) {
- if (page_count != 0) {
- if ((last_dma_addr+4096) !=
- (sg_dma_address(sg)+
- (page_index*4096)))
- single_page = 0;
- last_dma_addr = sg_dma_address(sg)+
- (page_index*4096);
- } else {
- first_dma_addr = sg_dma_address(sg)+
- (page_index*4096);
- last_dma_addr = first_dma_addr;
- }
+ vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
+ &vpbl.pbl_pbase);
+ nes_debug(NES_DBG_MR, "Allocating leaf PBL, va = %p, pa = 0x%08X\n",
+ vpbl.pbl_vbase, (unsigned int)vpbl.pbl_pbase);
+ if (!vpbl.pbl_vbase) {
+ ib_umem_release(region);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+ ibmr = ERR_PTR(-ENOMEM);
+ kfree(nesmr);
+ goto reg_user_mr_err;
}
-
- vpbl.pbl_vbase[cur_pbl_index].pa_low =
- cpu_to_le32((u32)(sg_dma_address(sg)+
- (page_index*4096)));
- vpbl.pbl_vbase[cur_pbl_index].pa_high =
- cpu_to_le32((u32)((((u64)(sg_dma_address(sg)+
- (page_index*4096))) >> 32)));
- cur_pbl_index++;
- page_count++;
+ if (1 <= root_pbl_index) {
+ root_vpbl.pbl_vbase[root_pbl_index].pa_low =
+ cpu_to_le32((u32)vpbl.pbl_pbase);
+ root_vpbl.pbl_vbase[root_pbl_index].pa_high =
+ cpu_to_le32((u32)((((u64)vpbl.pbl_pbase) >> 32)));
+ root_vpbl.leaf_vpbl[root_pbl_index] = vpbl;
+ }
+ root_pbl_index++;
+ cur_pbl_index = 0;
}
+ if (single_page) {
+ if (page_count != 0) {
+ if ((last_dma_addr + 4096) != sg_page_iter_dma_address(&dma_iter))
+ single_page = 0;
+ last_dma_addr = sg_page_iter_dma_address(&dma_iter);
+ } else {
+ first_dma_addr = sg_page_iter_dma_address(&dma_iter);
+ last_dma_addr = first_dma_addr;
+ }
+ }
+
+ vpbl.pbl_vbase[cur_pbl_index].pa_low =
+ cpu_to_le32((u32)(sg_page_iter_dma_address(&dma_iter)));
+ vpbl.pbl_vbase[cur_pbl_index].pa_high =
+ cpu_to_le32((u32)((u64)(sg_page_iter_dma_address(&dma_iter))));
+ cur_pbl_index++;
+ page_count++;
}
- enough_pages:
+enough_pages:
nes_debug(NES_DBG_MR, "calculating stag, stag_index=0x%08x, driver_key=0x%08x,"
" stag_key=0x%08x\n",
stag_index, driver_key, stag_key);
@@ -2345,7 +2287,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
ibmr = ERR_PTR(-ENOMEM);
}
- reg_user_mr_err:
+reg_user_mr_err:
/* free the resources */
if (root_pbl_index == 1) {
pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
@@ -2383,7 +2325,6 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_PTR(-ENOMEM);
}
nesmr->region = region;
- nes_ucontext = to_nesucontext(pd->uobject->context);
pbl_depth = region->length >> 12;
pbl_depth += (region->length & (4096-1)) ? 1 : 0;
nespbl->pbl_size = pbl_depth*sizeof(u64);
@@ -2412,26 +2353,14 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
nespbl->pbl_size, (unsigned long) nespbl->pbl_pbase,
(void *) nespbl->pbl_vbase, nespbl->user_base);
- for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
- chunk_pages = sg_dma_len(sg) >> 12;
- chunk_pages += (sg_dma_len(sg) & (4096-1)) ? 1 : 0;
- if (first_page) {
- nespbl->page = sg_page(sg);
- first_page = 0;
- }
-
- for (page_index = 0; page_index < chunk_pages; page_index++) {
- ((__le32 *)pbl)[0] = cpu_to_le32((u32)
- (sg_dma_address(sg)+
- (page_index*4096)));
- ((__le32 *)pbl)[1] = cpu_to_le32(((u64)
- (sg_dma_address(sg)+
- (page_index*4096)))>>32);
- nes_debug(NES_DBG_MR, "pbl=%p, *pbl=0x%016llx, 0x%08x%08x\n", pbl,
- (unsigned long long)*pbl,
- le32_to_cpu(((__le32 *)pbl)[1]), le32_to_cpu(((__le32 *)pbl)[0]));
- pbl++;
- }
+ nespbl->page = sg_page(region->sg_head.sgl);
+ for_each_sg_dma_page(region->sg_head.sgl, &dma_iter, region->nmap, 0) {
+ ((__le32 *)pbl)[0] = cpu_to_le32((u32)(sg_page_iter_dma_address(&dma_iter)));
+ ((__le32 *)pbl)[1] = cpu_to_le32(((u64)(sg_page_iter_dma_address(&dma_iter)))>>32);
+ nes_debug(NES_DBG_MR, "pbl=%p, *pbl=0x%016llx, 0x%08x%08x\n", pbl,
+ (unsigned long long)*pbl,
+ le32_to_cpu(((__le32 *)pbl)[1]), le32_to_cpu(((__le32 *)pbl)[0]));
+ pbl++;
}
if (req.reg_type == IWNES_MEMREG_TYPE_QP) {
@@ -2560,7 +2489,7 @@ static ssize_t hw_rev_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nes_ib_device *nesibdev =
- container_of(dev, struct nes_ib_device, ibdev.dev);
+ rdma_device_to_drv_device(dev, struct nes_ib_device, ibdev);
struct nes_vnic *nesvnic = nesibdev->nesvnic;
nes_debug(NES_DBG_INIT, "\n");
@@ -3658,6 +3587,8 @@ static const struct ib_device_ops nes_dev_ops = {
.query_qp = nes_query_qp,
.reg_user_mr = nes_reg_user_mr,
.req_notify_cq = nes_req_notify_cq,
+ INIT_RDMA_OBJ_SIZE(ib_pd, nes_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, nes_ucontext, ibucontext),
};
/**
@@ -3669,7 +3600,7 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
- nesibdev = (struct nes_ib_device *)ib_alloc_device(sizeof(struct nes_ib_device));
+ nesibdev = ib_alloc_device(nes_ib_device, ibdev);
if (nesibdev == NULL) {
return NULL;
}
@@ -3801,7 +3732,7 @@ int nes_register_ofa_device(struct nes_ib_device *nesibdev)
rdma_set_device_sysfs_group(&nesvnic->nesibdev->ibdev, &nes_attr_group);
nesvnic->nesibdev->ibdev.driver_id = RDMA_DRIVER_NES;
- ret = ib_register_device(&nesvnic->nesibdev->ibdev, "nes%d", NULL);
+ ret = ib_register_device(&nesvnic->nesibdev->ibdev, "nes%d");
if (ret) {
return ret;
}
diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h
index e02a5662dc20..114a9b59fefd 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.h
+++ b/drivers/infiniband/hw/nes/nes_verbs.h
@@ -59,7 +59,6 @@ struct nes_ucontext {
struct list_head cq_reg_mem_list;
struct list_head qp_reg_mem_list;
u32 mcrqf;
- atomic_t usecnt;
};
struct nes_pd {
diff --git a/drivers/infiniband/hw/ocrdma/Makefile b/drivers/infiniband/hw/ocrdma/Makefile
index d1bfd4f4cdde..e3f20ca15462 100644
--- a/drivers/infiniband/hw/ocrdma/Makefile
+++ b/drivers/infiniband/hw/ocrdma/Makefile
@@ -1,4 +1,4 @@
-ccflags-y := -Idrivers/net/ethernet/emulex/benet
+ccflags-y := -I $(srctree)/drivers/net/ethernet/emulex/benet
obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma.o
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 1f393842453a..b9e10d55a58e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -118,7 +118,8 @@ static void get_dev_fw_str(struct ib_device *device, char *str)
static ssize_t hw_rev_show(struct device *device,
struct device_attribute *attr, char *buf)
{
- struct ocrdma_dev *dev = dev_get_drvdata(device);
+ struct ocrdma_dev *dev =
+ rdma_device_to_drv_device(device, struct ocrdma_dev, ibdev);
return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->nic_info.pdev->vendor);
}
@@ -127,7 +128,8 @@ static DEVICE_ATTR_RO(hw_rev);
static ssize_t hca_type_show(struct device *device,
struct device_attribute *attr, char *buf)
{
- struct ocrdma_dev *dev = dev_get_drvdata(device);
+ struct ocrdma_dev *dev =
+ rdma_device_to_drv_device(device, struct ocrdma_dev, ibdev);
return scnprintf(buf, PAGE_SIZE, "%s\n", &dev->model_number[0]);
}
@@ -177,6 +179,8 @@ static const struct ib_device_ops ocrdma_dev_ops = {
.reg_user_mr = ocrdma_reg_user_mr,
.req_notify_cq = ocrdma_arm_cq,
.resize_cq = ocrdma_resize_cq,
+ INIT_RDMA_OBJ_SIZE(ib_pd, ocrdma_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, ocrdma_ucontext, ibucontext),
};
static const struct ib_device_ops ocrdma_dev_srq_ops = {
@@ -243,7 +247,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
}
rdma_set_device_sysfs_group(&dev->ibdev, &ocrdma_attr_group);
dev->ibdev.driver_id = RDMA_DRIVER_OCRDMA;
- return ib_register_device(&dev->ibdev, "ocrdma%d", NULL);
+ return ib_register_device(&dev->ibdev, "ocrdma%d");
}
static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
@@ -295,7 +299,7 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
u8 lstate = 0;
struct ocrdma_dev *dev;
- dev = (struct ocrdma_dev *)ib_alloc_device(sizeof(struct ocrdma_dev));
+ dev = ib_alloc_device(ocrdma_dev, ibdev);
if (!dev) {
pr_err("Unable to allocate ib device\n");
return NULL;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 6be0ea109138..a902942adb5d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -767,88 +767,65 @@ void ocrdma_add_port_stats(struct ocrdma_dev *dev)
/* Create post stats base dir */
dev->dir = debugfs_create_dir(pci_name(pdev), ocrdma_dbgfs_dir);
- if (!dev->dir)
- goto err;
dev->rsrc_stats.type = OCRDMA_RSRC_STATS;
dev->rsrc_stats.dev = dev;
- if (!debugfs_create_file("resource_stats", S_IRUSR, dev->dir,
- &dev->rsrc_stats, &ocrdma_dbg_ops))
- goto err;
+ debugfs_create_file("resource_stats", S_IRUSR, dev->dir,
+ &dev->rsrc_stats, &ocrdma_dbg_ops);
dev->rx_stats.type = OCRDMA_RXSTATS;
dev->rx_stats.dev = dev;
- if (!debugfs_create_file("rx_stats", S_IRUSR, dev->dir,
- &dev->rx_stats, &ocrdma_dbg_ops))
- goto err;
+ debugfs_create_file("rx_stats", S_IRUSR, dev->dir, &dev->rx_stats,
+ &ocrdma_dbg_ops);
dev->wqe_stats.type = OCRDMA_WQESTATS;
dev->wqe_stats.dev = dev;
- if (!debugfs_create_file("wqe_stats", S_IRUSR, dev->dir,
- &dev->wqe_stats, &ocrdma_dbg_ops))
- goto err;
+ debugfs_create_file("wqe_stats", S_IRUSR, dev->dir, &dev->wqe_stats,
+ &ocrdma_dbg_ops);
dev->tx_stats.type = OCRDMA_TXSTATS;
dev->tx_stats.dev = dev;
- if (!debugfs_create_file("tx_stats", S_IRUSR, dev->dir,
- &dev->tx_stats, &ocrdma_dbg_ops))
- goto err;
+ debugfs_create_file("tx_stats", S_IRUSR, dev->dir, &dev->tx_stats,
+ &ocrdma_dbg_ops);
dev->db_err_stats.type = OCRDMA_DB_ERRSTATS;
dev->db_err_stats.dev = dev;
- if (!debugfs_create_file("db_err_stats", S_IRUSR, dev->dir,
- &dev->db_err_stats, &ocrdma_dbg_ops))
- goto err;
-
+ debugfs_create_file("db_err_stats", S_IRUSR, dev->dir,
+ &dev->db_err_stats, &ocrdma_dbg_ops);
dev->tx_qp_err_stats.type = OCRDMA_TXQP_ERRSTATS;
dev->tx_qp_err_stats.dev = dev;
- if (!debugfs_create_file("tx_qp_err_stats", S_IRUSR, dev->dir,
- &dev->tx_qp_err_stats, &ocrdma_dbg_ops))
- goto err;
+ debugfs_create_file("tx_qp_err_stats", S_IRUSR, dev->dir,
+ &dev->tx_qp_err_stats, &ocrdma_dbg_ops);
dev->rx_qp_err_stats.type = OCRDMA_RXQP_ERRSTATS;
dev->rx_qp_err_stats.dev = dev;
- if (!debugfs_create_file("rx_qp_err_stats", S_IRUSR, dev->dir,
- &dev->rx_qp_err_stats, &ocrdma_dbg_ops))
- goto err;
-
+ debugfs_create_file("rx_qp_err_stats", S_IRUSR, dev->dir,
+ &dev->rx_qp_err_stats, &ocrdma_dbg_ops);
dev->tx_dbg_stats.type = OCRDMA_TX_DBG_STATS;
dev->tx_dbg_stats.dev = dev;
- if (!debugfs_create_file("tx_dbg_stats", S_IRUSR, dev->dir,
- &dev->tx_dbg_stats, &ocrdma_dbg_ops))
- goto err;
+ debugfs_create_file("tx_dbg_stats", S_IRUSR, dev->dir,
+ &dev->tx_dbg_stats, &ocrdma_dbg_ops);
dev->rx_dbg_stats.type = OCRDMA_RX_DBG_STATS;
dev->rx_dbg_stats.dev = dev;
- if (!debugfs_create_file("rx_dbg_stats", S_IRUSR, dev->dir,
- &dev->rx_dbg_stats, &ocrdma_dbg_ops))
- goto err;
+ debugfs_create_file("rx_dbg_stats", S_IRUSR, dev->dir,
+ &dev->rx_dbg_stats, &ocrdma_dbg_ops);
dev->driver_stats.type = OCRDMA_DRV_STATS;
dev->driver_stats.dev = dev;
- if (!debugfs_create_file("driver_dbg_stats", S_IRUSR, dev->dir,
- &dev->driver_stats, &ocrdma_dbg_ops))
- goto err;
+ debugfs_create_file("driver_dbg_stats", S_IRUSR, dev->dir,
+ &dev->driver_stats, &ocrdma_dbg_ops);
dev->reset_stats.type = OCRDMA_RESET_STATS;
dev->reset_stats.dev = dev;
- if (!debugfs_create_file("reset_stats", 0200, dev->dir,
- &dev->reset_stats, &ocrdma_dbg_ops))
- goto err;
-
-
- return;
-err:
- debugfs_remove_recursive(dev->dir);
- dev->dir = NULL;
+ debugfs_create_file("reset_stats", 0200, dev->dir, &dev->reset_stats,
+ &ocrdma_dbg_ops);
}
void ocrdma_rem_port_stats(struct ocrdma_dev *dev)
{
- if (!dev->dir)
- return;
debugfs_remove_recursive(dev->dir);
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 287c332ff0e6..b4e1777c2c97 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -55,7 +55,7 @@
int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
{
- if (index > 1)
+ if (index > 0)
return -EINVAL;
*pkey = 0xffff;
@@ -367,17 +367,12 @@ static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
return status;
}
-static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
- struct ocrdma_ucontext *uctx,
- struct ib_udata *udata)
+static int _ocrdma_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
+ struct ocrdma_ucontext *uctx,
+ struct ib_udata *udata)
{
- struct ocrdma_pd *pd = NULL;
int status;
- pd = kzalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd)
- return ERR_PTR(-ENOMEM);
-
if (udata && uctx && dev->attr.max_dpp_pds) {
pd->dpp_enabled =
ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R;
@@ -386,15 +381,8 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
dev->attr.wqe_size) : 0;
}
- if (dev->pd_mgr->pd_prealloc_valid) {
- status = ocrdma_get_pd_num(dev, pd);
- if (status == 0) {
- return pd;
- } else {
- kfree(pd);
- return ERR_PTR(status);
- }
- }
+ if (dev->pd_mgr->pd_prealloc_valid)
+ return ocrdma_get_pd_num(dev, pd);
retry:
status = ocrdma_mbx_alloc_pd(dev, pd);
@@ -403,13 +391,11 @@ retry:
pd->dpp_enabled = false;
pd->num_dpp_qp = 0;
goto retry;
- } else {
- kfree(pd);
- return ERR_PTR(status);
}
+ return status;
}
- return pd;
+ return 0;
}
static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
@@ -418,30 +404,33 @@ static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
return (uctx->cntxt_pd == pd);
}
-static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
+static void _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
struct ocrdma_pd *pd)
{
- int status;
-
if (dev->pd_mgr->pd_prealloc_valid)
- status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
+ ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
else
- status = ocrdma_mbx_dealloc_pd(dev, pd);
-
- kfree(pd);
- return status;
+ ocrdma_mbx_dealloc_pd(dev, pd);
}
static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev,
struct ocrdma_ucontext *uctx,
struct ib_udata *udata)
{
- int status = 0;
+ struct ib_device *ibdev = &dev->ibdev;
+ struct ib_pd *pd;
+ int status;
+
+ pd = rdma_zalloc_drv_obj(ibdev, ib_pd);
+ if (!pd)
+ return -ENOMEM;
- uctx->cntxt_pd = _ocrdma_alloc_pd(dev, uctx, udata);
- if (IS_ERR(uctx->cntxt_pd)) {
- status = PTR_ERR(uctx->cntxt_pd);
- uctx->cntxt_pd = NULL;
+ pd->device = ibdev;
+ uctx->cntxt_pd = get_ocrdma_pd(pd);
+
+ status = _ocrdma_alloc_pd(dev, uctx->cntxt_pd, uctx, udata);
+ if (status) {
+ kfree(uctx->cntxt_pd);
goto err;
}
@@ -451,7 +440,7 @@ err:
return status;
}
-static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
+static void ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
{
struct ocrdma_pd *pd = uctx->cntxt_pd;
struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
@@ -460,9 +449,9 @@ static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
pr_err("%s(%d) Freeing in use pdid=0x%x.\n",
__func__, dev->id, pd->id);
}
+ kfree(uctx->cntxt_pd);
uctx->cntxt_pd = NULL;
- (void)_ocrdma_dealloc_pd(dev, pd);
- return 0;
+ _ocrdma_dealloc_pd(dev, pd);
}
static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
@@ -486,33 +475,28 @@ static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx)
mutex_unlock(&uctx->mm_list_lock);
}
-struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
- struct ib_udata *udata)
+int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
{
+ struct ib_device *ibdev = uctx->device;
int status;
- struct ocrdma_ucontext *ctx;
- struct ocrdma_alloc_ucontext_resp resp;
+ struct ocrdma_ucontext *ctx = get_ocrdma_ucontext(uctx);
+ struct ocrdma_alloc_ucontext_resp resp = {};
struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
struct pci_dev *pdev = dev->nic_info.pdev;
u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE);
if (!udata)
- return ERR_PTR(-EFAULT);
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return ERR_PTR(-ENOMEM);
+ return -EFAULT;
INIT_LIST_HEAD(&ctx->mm_head);
mutex_init(&ctx->mm_list_lock);
ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
&ctx->ah_tbl.pa, GFP_KERNEL);
- if (!ctx->ah_tbl.va) {
- kfree(ctx);
- return ERR_PTR(-ENOMEM);
- }
+ if (!ctx->ah_tbl.va)
+ return -ENOMEM;
+
ctx->ah_tbl.len = map_len;
- memset(&resp, 0, sizeof(resp));
resp.ah_tbl_len = ctx->ah_tbl.len;
resp.ah_tbl_page = virt_to_phys(ctx->ah_tbl.va);
@@ -534,27 +518,26 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
status = ib_copy_to_udata(udata, &resp, sizeof(resp));
if (status)
goto cpy_err;
- return &ctx->ibucontext;
+ return 0;
cpy_err:
+ ocrdma_dealloc_ucontext_pd(ctx);
pd_err:
ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
map_err:
dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
ctx->ah_tbl.pa);
- kfree(ctx);
- return ERR_PTR(status);
+ return status;
}
-int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
+void ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
{
- int status;
struct ocrdma_mm *mm, *tmp;
struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
struct pci_dev *pdev = dev->nic_info.pdev;
- status = ocrdma_dealloc_ucontext_pd(uctx);
+ ocrdma_dealloc_ucontext_pd(uctx);
ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
@@ -564,8 +547,6 @@ int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
list_del(&mm->entry);
kfree(mm);
}
- kfree(uctx);
- return status;
}
int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
@@ -658,10 +639,10 @@ dpp_map_err:
return status;
}
-struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata)
+int ocrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
+ struct ib_udata *udata)
{
+ struct ib_device *ibdev = ibpd->device;
struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
struct ocrdma_pd *pd;
struct ocrdma_ucontext *uctx = NULL;
@@ -677,11 +658,10 @@ struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
}
}
- pd = _ocrdma_alloc_pd(dev, uctx, udata);
- if (IS_ERR(pd)) {
- status = PTR_ERR(pd);
+ pd = get_ocrdma_pd(ibpd);
+ status = _ocrdma_alloc_pd(dev, pd, uctx, udata);
+ if (status)
goto exit;
- }
pd_mapping:
if (udata && context) {
@@ -689,25 +669,22 @@ pd_mapping:
if (status)
goto err;
}
- return &pd->ibpd;
+ return 0;
err:
- if (is_uctx_pd) {
+ if (is_uctx_pd)
ocrdma_release_ucontext_pd(uctx);
- } else {
- if (_ocrdma_dealloc_pd(dev, pd))
- pr_err("%s: _ocrdma_dealloc_pd() failed\n", __func__);
- }
+ else
+ _ocrdma_dealloc_pd(dev, pd);
exit:
- return ERR_PTR(status);
+ return status;
}
-int ocrdma_dealloc_pd(struct ib_pd *ibpd)
+void ocrdma_dealloc_pd(struct ib_pd *ibpd)
{
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
struct ocrdma_ucontext *uctx = NULL;
- int status = 0;
u64 usr_db;
uctx = pd->uctx;
@@ -721,11 +698,10 @@ int ocrdma_dealloc_pd(struct ib_pd *ibpd)
if (is_ucontext_pd(uctx, pd)) {
ocrdma_release_ucontext_pd(uctx);
- return status;
+ return;
}
}
- status = _ocrdma_dealloc_pd(dev, pd);
- return status;
+ _ocrdma_dealloc_pd(dev, pd);
}
static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
@@ -854,10 +830,11 @@ static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
u32 num_pbes)
{
struct ocrdma_pbe *pbe;
- struct scatterlist *sg;
+ struct sg_dma_page_iter sg_iter;
struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
struct ib_umem *umem = mr->umem;
- int shift, pg_cnt, pages, pbe_cnt, entry, total_num_pbes = 0;
+ int pbe_cnt, total_num_pbes = 0;
+ u64 pg_addr;
if (!mr->hwmr.num_pbes)
return;
@@ -865,36 +842,26 @@ static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
pbe = (struct ocrdma_pbe *)pbl_tbl->va;
pbe_cnt = 0;
- shift = umem->page_shift;
-
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
- pages = sg_dma_len(sg) >> shift;
- for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
- /* store the page address in pbe */
- pbe->pa_lo =
- cpu_to_le32(sg_dma_address(sg) +
- (pg_cnt << shift));
- pbe->pa_hi =
- cpu_to_le32(upper_32_bits(sg_dma_address(sg) +
- (pg_cnt << shift)));
- pbe_cnt += 1;
- total_num_pbes += 1;
- pbe++;
-
- /* if done building pbes, issue the mbx cmd. */
- if (total_num_pbes == num_pbes)
- return;
-
- /* if the given pbl is full storing the pbes,
- * move to next pbl.
- */
- if (pbe_cnt ==
- (mr->hwmr.pbl_size / sizeof(u64))) {
- pbl_tbl++;
- pbe = (struct ocrdma_pbe *)pbl_tbl->va;
- pbe_cnt = 0;
- }
+ for_each_sg_dma_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
+ /* store the page address in pbe */
+ pg_addr = sg_page_iter_dma_address(&sg_iter);
+ pbe->pa_lo = cpu_to_le32(pg_addr);
+ pbe->pa_hi = cpu_to_le32(upper_32_bits(pg_addr));
+ pbe_cnt += 1;
+ total_num_pbes += 1;
+ pbe++;
+
+ /* if done building pbes, issue the mbx cmd. */
+ if (total_num_pbes == num_pbes)
+ return;
+ /* if the given pbl is full storing the pbes,
+ * move to next pbl.
+ */
+ if (pbe_cnt == (mr->hwmr.pbl_size / sizeof(u64))) {
+ pbl_tbl++;
+ pbe = (struct ocrdma_pbe *)pbl_tbl->va;
+ pbe_cnt = 0;
}
}
}
@@ -916,7 +883,7 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(status);
- mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
+ mr->umem = ib_umem_get(udata, start, len, acc, 0);
if (IS_ERR(mr->umem)) {
status = -EFAULT;
goto umem_err;
@@ -926,7 +893,7 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
if (status)
goto umem_err;
- mr->hwmr.pbe_size = BIT(mr->umem->page_shift);
+ mr->hwmr.pbe_size = PAGE_SIZE;
mr->hwmr.fbo = ib_umem_offset(mr->umem);
mr->hwmr.va = usr_addr;
mr->hwmr.len = len;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index b69cfdce7970..4c04ab40798e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -64,15 +64,14 @@ void ocrdma_get_guid(struct ocrdma_dev *, u8 *guid);
struct net_device *ocrdma_get_netdev(struct ib_device *device, u8 port_num);
int ocrdma_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);
-struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *,
- struct ib_udata *);
-int ocrdma_dealloc_ucontext(struct ib_ucontext *);
+int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
+void ocrdma_dealloc_ucontext(struct ib_ucontext *uctx);
int ocrdma_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
-struct ib_pd *ocrdma_alloc_pd(struct ib_device *,
- struct ib_ucontext *, struct ib_udata *);
-int ocrdma_dealloc_pd(struct ib_pd *pd);
+int ocrdma_alloc_pd(struct ib_pd *pd, struct ib_ucontext *uctx,
+ struct ib_udata *udata);
+void ocrdma_dealloc_pd(struct ib_pd *pd);
struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 75940e2a8791..996d9ecd93e0 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -137,7 +137,8 @@ static int qedr_iw_port_immutable(struct ib_device *ibdev, u8 port_num,
static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
char *buf)
{
- struct qedr_dev *dev = dev_get_drvdata(device);
+ struct qedr_dev *dev =
+ rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor);
}
@@ -238,6 +239,8 @@ static const struct ib_device_ops qedr_dev_ops = {
.reg_user_mr = qedr_reg_user_mr,
.req_notify_cq = qedr_arm_cq,
.resize_cq = qedr_resize_cq,
+ INIT_RDMA_OBJ_SIZE(ib_pd, qedr_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, qedr_ucontext, ibucontext),
};
static int qedr_register_device(struct qedr_dev *dev)
@@ -290,7 +293,7 @@ static int qedr_register_device(struct qedr_dev *dev)
ib_set_device_ops(&dev->ibdev, &qedr_dev_ops);
dev->ibdev.driver_id = RDMA_DRIVER_QEDR;
- return ib_register_device(&dev->ibdev, "qedr%d", NULL);
+ return ib_register_device(&dev->ibdev, "qedr%d");
}
/* This function allocates fast-path status block memory */
@@ -852,7 +855,7 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
struct qedr_dev *dev;
int rc = 0;
- dev = (struct qedr_dev *)ib_alloc_device(sizeof(*dev));
+ dev = ib_alloc_device(qedr_dev, ibdev);
if (!dev) {
pr_err("Unable to allocate ib device\n");
return NULL;
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
index 93b16237b767..0555e5a8c9ed 100644
--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
@@ -349,7 +349,7 @@ qedr_iw_event_handler(void *context, struct qed_iwarp_cm_event_params *params)
default:
DP_NOTICE(dev, "Unknown event received %d\n", params->event);
break;
- };
+ }
return 0;
}
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index e1ccf32b1c3d..8686a98e113d 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -67,7 +67,7 @@ static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
{
- if (index > QEDR_ROCE_PKEY_TABLE_LEN)
+ if (index >= QEDR_ROCE_PKEY_TABLE_LEN)
return -EINVAL;
*pkey = QEDR_ROCE_PKEY_DEFAULT;
@@ -316,28 +316,24 @@ static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
return found;
}
-struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
- struct ib_udata *udata)
+int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
{
+ struct ib_device *ibdev = uctx->device;
int rc;
- struct qedr_ucontext *ctx;
- struct qedr_alloc_ucontext_resp uresp;
+ struct qedr_ucontext *ctx = get_qedr_ucontext(uctx);
+ struct qedr_alloc_ucontext_resp uresp = {};
struct qedr_dev *dev = get_qedr_dev(ibdev);
struct qed_rdma_add_user_out_params oparams;
if (!udata)
- return ERR_PTR(-EFAULT);
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return ERR_PTR(-ENOMEM);
+ return -EFAULT;
rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
if (rc) {
DP_ERR(dev,
"failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
rc);
- goto err;
+ return rc;
}
ctx->dpi = oparams.dpi;
@@ -347,8 +343,6 @@ struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
INIT_LIST_HEAD(&ctx->mm_head);
mutex_init(&ctx->mm_list_lock);
- memset(&uresp, 0, sizeof(uresp));
-
uresp.dpm_enabled = dev->user_dpm_enabled;
uresp.wids_enabled = 1;
uresp.wid_count = oparams.wid_count;
@@ -364,28 +358,23 @@ struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (rc)
- goto err;
+ return rc;
ctx->dev = dev;
rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
if (rc)
- goto err;
+ return rc;
DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
&ctx->ibucontext);
- return &ctx->ibucontext;
-
-err:
- kfree(ctx);
- return ERR_PTR(rc);
+ return 0;
}
-int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
+void qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
{
struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
struct qedr_mm *mm, *tmp;
- int status = 0;
DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
uctx);
@@ -398,9 +387,6 @@ int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
list_del(&mm->entry);
kfree(mm);
}
-
- kfree(uctx);
- return status;
}
int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
@@ -450,11 +436,12 @@ int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
vma->vm_page_prot);
}
-struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *context, struct ib_udata *udata)
+int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
+ struct ib_udata *udata)
{
+ struct ib_device *ibdev = ibpd->device;
struct qedr_dev *dev = get_qedr_dev(ibdev);
- struct qedr_pd *pd;
+ struct qedr_pd *pd = get_qedr_pd(ibpd);
u16 pd_id;
int rc;
@@ -463,16 +450,12 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
if (!dev->rdma_ctx) {
DP_ERR(dev, "invalid RDMA context\n");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
- pd = kzalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd)
- return ERR_PTR(-ENOMEM);
-
rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
if (rc)
- goto err;
+ return rc;
pd->pd_id = pd_id;
@@ -485,36 +468,23 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
if (rc) {
DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
- goto err;
+ return rc;
}
pd->uctx = get_qedr_ucontext(context);
pd->uctx->pd = pd;
}
- return &pd->ibpd;
-
-err:
- kfree(pd);
- return ERR_PTR(rc);
+ return 0;
}
-int qedr_dealloc_pd(struct ib_pd *ibpd)
+void qedr_dealloc_pd(struct ib_pd *ibpd)
{
struct qedr_dev *dev = get_qedr_dev(ibpd->device);
struct qedr_pd *pd = get_qedr_pd(ibpd);
- if (!pd) {
- pr_err("Invalid PD received in dealloc_pd\n");
- return -EINVAL;
- }
-
DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
-
- kfree(pd);
-
- return 0;
}
static void qedr_free_pbl(struct qedr_dev *dev,
@@ -636,13 +606,12 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
struct qedr_pbl *pbl,
struct qedr_pbl_info *pbl_info, u32 pg_shift)
{
- int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
+ int pbe_cnt, total_num_pbes = 0;
u32 fw_pg_cnt, fw_pg_per_umem_pg;
struct qedr_pbl *pbl_tbl;
- struct scatterlist *sg;
+ struct sg_dma_page_iter sg_iter;
struct regpair *pbe;
u64 pg_addr;
- int entry;
if (!pbl_info->num_pbes)
return;
@@ -663,38 +632,32 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
pbe_cnt = 0;
- shift = umem->page_shift;
-
- fw_pg_per_umem_pg = BIT(umem->page_shift - pg_shift);
-
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
- pages = sg_dma_len(sg) >> shift;
- pg_addr = sg_dma_address(sg);
- for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
- for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) {
- pbe->lo = cpu_to_le32(pg_addr);
- pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
-
- pg_addr += BIT(pg_shift);
- pbe_cnt++;
- total_num_pbes++;
- pbe++;
-
- if (total_num_pbes == pbl_info->num_pbes)
- return;
-
- /* If the given pbl is full storing the pbes,
- * move to next pbl.
- */
- if (pbe_cnt ==
- (pbl_info->pbl_size / sizeof(u64))) {
- pbl_tbl++;
- pbe = (struct regpair *)pbl_tbl->va;
- pbe_cnt = 0;
- }
+ fw_pg_per_umem_pg = BIT(PAGE_SHIFT - pg_shift);
+
+ for_each_sg_dma_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
+ pg_addr = sg_page_iter_dma_address(&sg_iter);
+ for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) {
+ pbe->lo = cpu_to_le32(pg_addr);
+ pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
- fw_pg_cnt++;
+ pg_addr += BIT(pg_shift);
+ pbe_cnt++;
+ total_num_pbes++;
+ pbe++;
+
+ if (total_num_pbes == pbl_info->num_pbes)
+ return;
+
+ /* If the given pbl is full storing the pbes,
+ * move to next pbl.
+ */
+ if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
+ pbl_tbl++;
+ pbe = (struct regpair *)pbl_tbl->va;
+ pbe_cnt = 0;
}
+
+ fw_pg_cnt++;
}
}
}
@@ -736,11 +699,10 @@ static inline int qedr_align_cq_entries(int entries)
return aligned_size / QEDR_CQE_SIZE;
}
-static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
+static inline int qedr_init_user_queue(struct ib_udata *udata,
struct qedr_dev *dev,
- struct qedr_userq *q,
- u64 buf_addr, size_t buf_len,
- int access, int dmasync,
+ struct qedr_userq *q, u64 buf_addr,
+ size_t buf_len, int access, int dmasync,
int alloc_and_init)
{
u32 fw_pages;
@@ -748,7 +710,7 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
q->buf_addr = buf_addr;
q->buf_len = buf_len;
- q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
+ q->umem = ib_umem_get(udata, q->buf_addr, q->buf_len, access, dmasync);
if (IS_ERR(q->umem)) {
DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
PTR_ERR(q->umem));
@@ -756,7 +718,7 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
}
fw_pages = ib_umem_page_count(q->umem) <<
- (q->umem->page_shift - FW_PAGE_SHIFT);
+ (PAGE_SHIFT - FW_PAGE_SHIFT);
rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
if (rc)
@@ -811,9 +773,6 @@ static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
cq->db.data.agg_flags = flags;
cq->db.data.value = cpu_to_le32(cons);
writeq(cq->db.raw, cq->db_addr);
-
- /* Make sure write would stick */
- mmiowb();
}
int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
@@ -905,9 +864,9 @@ struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
cq->cq_type = QEDR_CQ_TYPE_USER;
- rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
- ureq.len, IB_ACCESS_LOCAL_WRITE,
- 1, 1);
+ rc = qedr_init_user_queue(udata, dev, &cq->q, ureq.addr,
+ ureq.len, IB_ACCESS_LOCAL_WRITE, 1,
+ 1);
if (rc)
goto err0;
@@ -1344,7 +1303,7 @@ static void qedr_free_srq_kernel_params(struct qedr_srq *srq)
hw_srq->phy_prod_pair_addr);
}
-static int qedr_init_srq_user_params(struct ib_ucontext *ib_ctx,
+static int qedr_init_srq_user_params(struct ib_udata *udata,
struct qedr_srq *srq,
struct qedr_create_srq_ureq *ureq,
int access, int dmasync)
@@ -1352,14 +1311,14 @@ static int qedr_init_srq_user_params(struct ib_ucontext *ib_ctx,
struct scatterlist *sg;
int rc;
- rc = qedr_init_user_queue(ib_ctx, srq->dev, &srq->usrq, ureq->srq_addr,
+ rc = qedr_init_user_queue(udata, srq->dev, &srq->usrq, ureq->srq_addr,
ureq->srq_len, access, dmasync, 1);
if (rc)
return rc;
- srq->prod_umem = ib_umem_get(ib_ctx, ureq->prod_pair_addr,
- sizeof(struct rdma_srq_producers),
- access, dmasync);
+ srq->prod_umem =
+ ib_umem_get(udata, ureq->prod_pair_addr,
+ sizeof(struct rdma_srq_producers), access, dmasync);
if (IS_ERR(srq->prod_umem)) {
qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
ib_umem_release(srq->usrq.umem);
@@ -1434,7 +1393,6 @@ struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
struct qedr_pd *pd = get_qedr_pd(ibpd);
struct qedr_create_srq_ureq ureq = {};
u64 pbl_base_addr, phy_prod_pair_addr;
- struct ib_ucontext *ib_ctx = NULL;
struct qedr_srq_hwq_info *hw_srq;
u32 page_cnt, page_size;
struct qedr_srq *srq;
@@ -1459,23 +1417,21 @@ struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
hw_srq->max_wr = init_attr->attr.max_wr;
hw_srq->max_sges = init_attr->attr.max_sge;
- if (udata && ibpd->uobject && ibpd->uobject->context) {
- ib_ctx = ibpd->uobject->context;
-
+ if (udata) {
if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
DP_ERR(dev,
"create srq: problem copying data from user space\n");
goto err0;
}
- rc = qedr_init_srq_user_params(ib_ctx, srq, &ureq, 0, 0);
+ rc = qedr_init_srq_user_params(udata, srq, &ureq, 0, 0);
if (rc)
goto err0;
page_cnt = srq->usrq.pbl_info.num_pbes;
pbl_base_addr = srq->usrq.pbl_tbl->pa;
phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
- page_size = BIT(srq->usrq.umem->page_shift);
+ page_size = PAGE_SIZE;
} else {
struct qed_chain *pbl;
@@ -1699,13 +1655,10 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
struct qed_rdma_create_qp_in_params in_params;
struct qed_rdma_create_qp_out_params out_params;
struct qedr_pd *pd = get_qedr_pd(ibpd);
- struct ib_ucontext *ib_ctx = NULL;
struct qedr_create_qp_ureq ureq;
int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
int rc = -EINVAL;
- ib_ctx = ibpd->uobject->context;
-
memset(&ureq, 0, sizeof(ureq));
rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
if (rc) {
@@ -1714,14 +1667,14 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
}
/* SQ - read access only (0), dma sync not required (0) */
- rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq.sq_addr,
+ rc = qedr_init_user_queue(udata, dev, &qp->usq, ureq.sq_addr,
ureq.sq_len, 0, 0, alloc_and_init);
if (rc)
return rc;
if (!qp->srq) {
/* RQ - read access only (0), dma sync not required (0) */
- rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
+ rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
ureq.rq_len, 0, 0, alloc_and_init);
if (rc)
return rc;
@@ -2117,7 +2070,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
default:
status = -EINVAL;
break;
- };
+ }
break;
case QED_ROCE_QP_STATE_INIT:
switch (new_state) {
@@ -2128,8 +2081,6 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
if (rdma_protocol_roce(&dev->ibdev, 1)) {
writel(qp->rq.db_data.raw, qp->rq.db);
- /* Make sure write takes effect */
- mmiowb();
}
break;
case QED_ROCE_QP_STATE_ERR:
@@ -2138,7 +2089,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
/* Invalid state change. */
status = -EINVAL;
break;
- };
+ }
break;
case QED_ROCE_QP_STATE_RTR:
/* RTR->XXX */
@@ -2151,7 +2102,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
/* Invalid state change. */
status = -EINVAL;
break;
- };
+ }
break;
case QED_ROCE_QP_STATE_RTS:
/* RTS->XXX */
@@ -2164,7 +2115,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
/* Invalid state change. */
status = -EINVAL;
break;
- };
+ }
break;
case QED_ROCE_QP_STATE_SQD:
/* SQD->XXX */
@@ -2176,7 +2127,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
/* Invalid state change. */
status = -EINVAL;
break;
- };
+ }
break;
case QED_ROCE_QP_STATE_ERR:
/* ERR->XXX */
@@ -2194,12 +2145,12 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
default:
status = -EINVAL;
break;
- };
+ }
break;
default:
status = -EINVAL;
break;
- };
+ }
return status;
}
@@ -2719,7 +2670,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr->type = QEDR_MR_USER;
- mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
+ mr->umem = ib_umem_get(udata, start, len, acc, 0);
if (IS_ERR(mr->umem)) {
rc = -EFAULT;
goto err0;
@@ -2730,7 +2681,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
goto err1;
qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
- &mr->info.pbl_info, mr->umem->page_shift);
+ &mr->info.pbl_info, PAGE_SHIFT);
rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
if (rc) {
@@ -2751,7 +2702,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
- mr->hw_mr.page_size_log = mr->umem->page_shift;
+ mr->hw_mr.page_size_log = PAGE_SHIFT;
mr->hw_mr.fbo = ib_umem_offset(mr->umem);
mr->hw_mr.length = len;
mr->hw_mr.vaddr = usr_addr;
@@ -3546,9 +3497,6 @@ int qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
smp_wmb();
writel(qp->sq.db_data.raw, qp->sq.db);
- /* Make sure write sticks */
- mmiowb();
-
spin_unlock_irqrestore(&qp->q_lock, flags);
return rc;
@@ -3739,12 +3687,8 @@ int qedr_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
writel(qp->rq.db_data.raw, qp->rq.db);
- /* Make sure write sticks */
- mmiowb();
-
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
- mmiowb(); /* for second doorbell */
}
wr = wr->next;
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
index 1852b7012bf4..f0c05f4771ac 100644
--- a/drivers/infiniband/hw/qedr/verbs.h
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -43,13 +43,13 @@ int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
int qedr_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);
-struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *, struct ib_udata *);
-int qedr_dealloc_ucontext(struct ib_ucontext *);
+int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
+void qedr_dealloc_ucontext(struct ib_ucontext *uctx);
int qedr_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
-struct ib_pd *qedr_alloc_pd(struct ib_device *,
- struct ib_ucontext *, struct ib_udata *);
-int qedr_dealloc_pd(struct ib_pd *pd);
+int qedr_alloc_pd(struct ib_pd *pd, struct ib_ucontext *uctx,
+ struct ib_udata *udata);
+void qedr_dealloc_pd(struct ib_pd *pd);
struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
diff --git a/drivers/infiniband/hw/qib/qib_debugfs.c b/drivers/infiniband/hw/qib/qib_debugfs.c
index 5ed1ed93380f..caeb77d07a58 100644
--- a/drivers/infiniband/hw/qib/qib_debugfs.c
+++ b/drivers/infiniband/hw/qib/qib_debugfs.c
@@ -66,15 +66,6 @@ static const struct file_operations _##name##_file_ops = { \
.release = seq_release \
};
-#define DEBUGFS_FILE_CREATE(name) \
-do { \
- struct dentry *ent; \
- ent = debugfs_create_file(#name , 0400, ibd->qib_ibdev_dbg, \
- ibd, &_##name##_file_ops); \
- if (!ent) \
- pr_warn("create of " #name " failed\n"); \
-} while (0)
-
static void *_opcode_stats_seq_start(struct seq_file *s, loff_t *pos)
{
struct qib_opcode_stats_perctx *opstats;
@@ -249,17 +240,17 @@ DEBUGFS_FILE(qp_stats)
void qib_dbg_ibdev_init(struct qib_ibdev *ibd)
{
+ struct dentry *root;
char name[10];
snprintf(name, sizeof(name), "qib%d", dd_from_dev(ibd)->unit);
- ibd->qib_ibdev_dbg = debugfs_create_dir(name, qib_dbg_root);
- if (!ibd->qib_ibdev_dbg) {
- pr_warn("create of %s failed\n", name);
- return;
- }
- DEBUGFS_FILE_CREATE(opcode_stats);
- DEBUGFS_FILE_CREATE(ctx_stats);
- DEBUGFS_FILE_CREATE(qp_stats);
+ root = debugfs_create_dir(name, qib_dbg_root);
+ ibd->qib_ibdev_dbg = root;
+
+ debugfs_create_file("opcode_stats", 0400, root, ibd,
+ &_opcode_stats_file_ops);
+ debugfs_create_file("ctx_stats", 0400, root, ibd, &_ctx_stats_file_ops);
+ debugfs_create_file("qp_stats", 0400, root, ibd, &_qp_stats_file_ops);
}
void qib_dbg_ibdev_exit(struct qib_ibdev *ibd)
@@ -274,8 +265,6 @@ out:
void qib_dbg_init(void)
{
qib_dbg_root = debugfs_create_dir(QIB_DRV_NAME, NULL);
- if (!qib_dbg_root)
- pr_warn("init of debugfs failed\n");
}
void qib_dbg_exit(void)
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index cdbf707fa267..531d8a1db2c3 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -1884,7 +1884,6 @@ static void qib_6120_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
qib_write_kreg(dd, kr_scratch, 0xfeeddeaf);
writel(pa, tidp32);
qib_write_kreg(dd, kr_scratch, 0xdeadbeef);
- mmiowb();
spin_unlock_irqrestore(tidlockp, flags);
}
@@ -1928,7 +1927,6 @@ static void qib_6120_put_tid_2(struct qib_devdata *dd, u64 __iomem *tidptr,
pa |= 2 << 29;
}
writel(pa, tidp32);
- mmiowb();
}
@@ -2053,9 +2051,7 @@ static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,
{
if (updegr)
qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
- mmiowb();
qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
- mmiowb();
}
static u32 qib_6120_hdrqempty(struct qib_ctxtdata *rcd)
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 9fde45538f6e..ea3ddb05cbad 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -2175,7 +2175,6 @@ static void qib_7220_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
pa = chippa;
}
writeq(pa, tidptr);
- mmiowb();
}
/**
@@ -2704,9 +2703,7 @@ static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,
{
if (updegr)
qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
- mmiowb();
qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
- mmiowb();
}
static u32 qib_7220_hdrqempty(struct qib_ctxtdata *rcd)
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 17d6b24b3473..ac6a84f11ad0 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -3793,7 +3793,6 @@ static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
pa = chippa;
}
writeq(pa, tidptr);
- mmiowb();
}
/**
@@ -4440,10 +4439,8 @@ static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
adjust_rcv_timeout(rcd, npkts);
if (updegr)
qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
- mmiowb();
qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
- mmiowb();
}
static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 6fa002940451..50dd9811b088 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -45,12 +45,7 @@ static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
u32 len;
len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
- ss->sge = wqe->sg_list[0];
- ss->sg_list = wqe->sg_list + 1;
- ss->num_sge = wqe->wr.num_sge;
- ss->total_len = wqe->length;
- rvt_skip_sge(ss, len, false);
- return wqe->length - len;
+ return rvt_restart_sge(ss, wqe, len);
}
/**
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index 12caf3db8c34..4f4a09c2dbcd 100644
--- a/drivers/infiniband/hw/qib/qib_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -1068,7 +1068,6 @@ static int qib_sd_setvals(struct qib_devdata *dd)
for (idx = 0; idx < NUM_DDS_REGS; ++idx) {
data = ((dds_reg_map & 0xF) << 4) | TX_FAST_ELT;
writeq(data, iaddr + idx);
- mmiowb();
qib_read_kreg32(dd, kr_scratch);
dds_reg_map >>= 4;
for (midx = 0; midx < DDS_ROWS; ++midx) {
@@ -1076,7 +1075,6 @@ static int qib_sd_setvals(struct qib_devdata *dd)
data = dds_init_vals[midx].reg_vals[idx];
writeq(data, daddr);
- mmiowb();
qib_read_kreg32(dd, kr_scratch);
} /* End inner for (vals for this reg, each row) */
} /* end outer for (regs to be stored) */
@@ -1098,13 +1096,11 @@ static int qib_sd_setvals(struct qib_devdata *dd)
didx = idx + min_idx;
/* Store the next RXEQ register address */
writeq(rxeq_init_vals[idx].rdesc, iaddr + didx);
- mmiowb();
qib_read_kreg32(dd, kr_scratch);
/* Iterate through RXEQ values */
for (vidx = 0; vidx < 4; vidx++) {
data = rxeq_init_vals[idx].rdata[vidx];
writeq(data, taddr + (vidx << 6) + idx);
- mmiowb();
qib_read_kreg32(dd, kr_scratch);
}
} /* end outer for (Reg-writes for RXEQ) */
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
index 3d64081c4819..99e11c347130 100644
--- a/drivers/infiniband/hw/qib/qib_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_sdma.c
@@ -565,13 +565,8 @@ retry:
sge = &ss->sge;
while (dwords) {
u32 dw;
- u32 len;
+ u32 len = rvt_get_sge_length(sge, dwords << 2);
- len = dwords << 2;
- if (len > sge->length)
- len = sge->length;
- if (len > sge->sge_length)
- len = sge->sge_length;
dw = (len + 3) >> 2;
addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr,
dw << 2, DMA_TO_DEVICE);
@@ -594,24 +589,7 @@ retry:
descqp = &ppd->sdma_descq[0].qw[0];
++ppd->sdma_generation;
}
- sge->vaddr += len;
- sge->length -= len;
- sge->sge_length -= len;
- if (sge->sge_length == 0) {
- if (--ss->num_sge)
- *sge = *ss->sg_list++;
- } else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= RVT_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- break;
- sge->n = 0;
- }
- sge->vaddr =
- sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length =
- sge->mr->map[sge->m]->segs[sge->n].length;
- }
-
+ rvt_update_sge(ss, len, false);
dwoffset += dw;
dwords -= dw;
}
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index 1cf4ca3f23e3..905206a0c2d5 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -555,7 +555,7 @@ static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
char *buf)
{
struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, rdi.ibdev.dev);
+ rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev);
}
@@ -565,7 +565,7 @@ static ssize_t hca_type_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, rdi.ibdev.dev);
+ rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
struct qib_devdata *dd = dd_from_dev(dev);
int ret;
@@ -590,7 +590,7 @@ static ssize_t boardversion_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, rdi.ibdev.dev);
+ rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
struct qib_devdata *dd = dd_from_dev(dev);
/* The string printed here is already newline-terminated. */
@@ -602,7 +602,7 @@ static ssize_t localbus_info_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, rdi.ibdev.dev);
+ rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
struct qib_devdata *dd = dd_from_dev(dev);
/* The string printed here is already newline-terminated. */
@@ -614,7 +614,7 @@ static ssize_t nctxts_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, rdi.ibdev.dev);
+ rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
struct qib_devdata *dd = dd_from_dev(dev);
/* Return the number of user ports (contexts) available. */
@@ -630,7 +630,7 @@ static ssize_t nfreectxts_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, rdi.ibdev.dev);
+ rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
struct qib_devdata *dd = dd_from_dev(dev);
/* Return the number of free user ports (contexts) available. */
@@ -642,7 +642,7 @@ static ssize_t serial_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, rdi.ibdev.dev);
+ rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
struct qib_devdata *dd = dd_from_dev(dev);
buf[sizeof(dd->serial)] = '\0';
@@ -657,7 +657,7 @@ static ssize_t chip_reset_store(struct device *device,
size_t count)
{
struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, rdi.ibdev.dev);
+ rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
struct qib_devdata *dd = dd_from_dev(dev);
int ret;
@@ -679,7 +679,7 @@ static ssize_t tempsense_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, rdi.ibdev.dev);
+ rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
struct qib_devdata *dd = dd_from_dev(dev);
int ret;
int idx;
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index 445ea19a2ec8..5cdedba2d164 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -172,12 +172,8 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
ssge.num_sge = swqe->wr.num_sge;
sge = &ssge.sge;
while (length) {
- u32 len = sge->length;
+ u32 len = rvt_get_sge_length(sge, length);
- if (len > length)
- len = length;
- if (len > sge->sge_length)
- len = sge->sge_length;
rvt_copy_sge(qp, &qp->r_sge, sge->vaddr, len, true, false);
sge->vaddr += len;
sge->length -= len;
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index 16543d5e80c3..123ca8f64f75 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -49,43 +49,6 @@ static void __qib_release_user_pages(struct page **p, size_t num_pages,
}
}
-/*
- * Call with current->mm->mmap_sem held.
- */
-static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
- struct page **p)
-{
- unsigned long lock_limit;
- size_t got;
- int ret;
-
- lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
-
- if (num_pages > lock_limit && !capable(CAP_IPC_LOCK)) {
- ret = -ENOMEM;
- goto bail;
- }
-
- for (got = 0; got < num_pages; got += ret) {
- ret = get_user_pages(start_page + got * PAGE_SIZE,
- num_pages - got,
- FOLL_WRITE | FOLL_FORCE,
- p + got, NULL);
- if (ret < 0)
- goto bail_release;
- }
-
- current->mm->pinned_vm += num_pages;
-
- ret = 0;
- goto bail;
-
-bail_release:
- __qib_release_user_pages(p, got, 0);
-bail:
- return ret;
-}
-
/**
* qib_map_page - a safety wrapper around pci_map_page()
*
@@ -137,26 +100,44 @@ int qib_map_page(struct pci_dev *hwdev, struct page *page, dma_addr_t *daddr)
int qib_get_user_pages(unsigned long start_page, size_t num_pages,
struct page **p)
{
+ unsigned long locked, lock_limit;
+ size_t got;
int ret;
- down_write(&current->mm->mmap_sem);
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ locked = atomic64_add_return(num_pages, &current->mm->pinned_vm);
- ret = __qib_get_user_pages(start_page, num_pages, p);
+ if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
+ ret = -ENOMEM;
+ goto bail;
+ }
- up_write(&current->mm->mmap_sem);
+ down_read(&current->mm->mmap_sem);
+ for (got = 0; got < num_pages; got += ret) {
+ ret = get_user_pages_longterm(start_page + got * PAGE_SIZE,
+ num_pages - got,
+ FOLL_WRITE | FOLL_FORCE,
+ p + got, NULL);
+ if (ret < 0) {
+ up_read(&current->mm->mmap_sem);
+ goto bail_release;
+ }
+ }
+ up_read(&current->mm->mmap_sem);
+ return 0;
+bail_release:
+ __qib_release_user_pages(p, got, 0);
+bail:
+ atomic64_sub(num_pages, &current->mm->pinned_vm);
return ret;
}
void qib_release_user_pages(struct page **p, size_t num_pages)
{
- if (current->mm) /* during close after signal, mm can be NULL */
- down_write(&current->mm->mmap_sem);
-
__qib_release_user_pages(p, num_pages, 1);
- if (current->mm) {
- current->mm->pinned_vm -= num_pages;
- up_write(&current->mm->mmap_sem);
- }
+ /* during close after signal, mm can be NULL */
+ if (current->mm)
+ atomic64_sub(num_pages, &current->mm->pinned_vm);
}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 276304f611ab..5ff32d32c61c 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -144,12 +144,8 @@ static u32 qib_count_sge(struct rvt_sge_state *ss, u32 length)
u32 ndesc = 1; /* count the header */
while (length) {
- u32 len = sge.length;
+ u32 len = rvt_get_sge_length(&sge, length);
- if (len > length)
- len = length;
- if (len > sge.sge_length)
- len = sge.sge_length;
if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
(len != length && (len & (sizeof(u32) - 1)))) {
ndesc = 0;
@@ -186,12 +182,8 @@ static void qib_copy_from_sge(void *data, struct rvt_sge_state *ss, u32 length)
struct rvt_sge *sge = &ss->sge;
while (length) {
- u32 len = sge->length;
+ u32 len = rvt_get_sge_length(sge, length);
- if (len > length)
- len = length;
- if (len > sge->sge_length)
- len = sge->sge_length;
memcpy(data, sge->vaddr, len);
sge->vaddr += len;
sge->length -= len;
@@ -440,13 +432,9 @@ static void copy_io(u32 __iomem *piobuf, struct rvt_sge_state *ss,
u32 last;
while (1) {
- u32 len = ss->sge.length;
+ u32 len = rvt_get_sge_length(&ss->sge, length);
u32 off;
- if (len > length)
- len = length;
- if (len > ss->sge.sge_length)
- len = ss->sge.sge_length;
/* If the source address is not aligned, try to align it. */
off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
if (off) {
@@ -1494,6 +1482,7 @@ static void qib_fill_device_attr(struct qib_devdata *dd)
}
static const struct ib_device_ops qib_dev_ops = {
+ .init_port = qib_create_port_files,
.modify_device = qib_modify_device,
.process_mad = qib_process_mad,
};
@@ -1567,7 +1556,6 @@ int qib_register_ib_device(struct qib_devdata *dd)
/*
* Fill in rvt info object.
*/
- dd->verbs_dev.rdi.driver_f.port_callback = qib_create_port_files;
dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev;
dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah;
dd->verbs_dev.rdi.driver_f.setup_wqe = qib_check_send_wqe;
diff --git a/drivers/infiniband/hw/usnic/Makefile b/drivers/infiniband/hw/usnic/Makefile
index 94ae7a1a6950..f12a4938ffd2 100644
--- a/drivers/infiniband/hw/usnic/Makefile
+++ b/drivers/infiniband/hw/usnic/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-ccflags-y := -Idrivers/net/ethernet/cisco/enic
+ccflags-y := -I $(srctree)/drivers/net/ethernet/cisco/enic
obj-$(CONFIG_INFINIBAND_USNIC)+= usnic_verbs.o
diff --git a/drivers/infiniband/hw/usnic/usnic_debugfs.c b/drivers/infiniband/hw/usnic/usnic_debugfs.c
index a3115709fb03..e5a3f02fb078 100644
--- a/drivers/infiniband/hw/usnic/usnic_debugfs.c
+++ b/drivers/infiniband/hw/usnic/usnic_debugfs.c
@@ -113,42 +113,21 @@ static const struct file_operations flowinfo_ops = {
void usnic_debugfs_init(void)
{
debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
- if (IS_ERR(debugfs_root)) {
- usnic_err("Failed to create debugfs root dir, check if debugfs is enabled in kernel configuration\n");
- goto out_clear_root;
- }
flows_dentry = debugfs_create_dir("flows", debugfs_root);
- if (IS_ERR_OR_NULL(flows_dentry)) {
- usnic_err("Failed to create debugfs flow dir with err %ld\n",
- PTR_ERR(flows_dentry));
- goto out_free_root;
- }
debugfs_create_file("build-info", S_IRUGO, debugfs_root,
NULL, &usnic_debugfs_buildinfo_ops);
- return;
-
-out_free_root:
- debugfs_remove_recursive(debugfs_root);
-out_clear_root:
- debugfs_root = NULL;
}
void usnic_debugfs_exit(void)
{
- if (!debugfs_root)
- return;
-
debugfs_remove_recursive(debugfs_root);
debugfs_root = NULL;
}
void usnic_debugfs_flow_add(struct usnic_ib_qp_grp_flow *qp_flow)
{
- if (IS_ERR_OR_NULL(flows_dentry))
- return;
-
scnprintf(qp_flow->dentry_name, sizeof(qp_flow->dentry_name),
"%u", qp_flow->flow->flow_id);
qp_flow->dbgfs_dentry = debugfs_create_file(qp_flow->dentry_name,
@@ -156,11 +135,6 @@ void usnic_debugfs_flow_add(struct usnic_ib_qp_grp_flow *qp_flow)
flows_dentry,
qp_flow,
&flowinfo_ops);
- if (IS_ERR_OR_NULL(qp_flow->dbgfs_dentry)) {
- usnic_err("Failed to create dbg fs entry for flow %u with error %ld\n",
- qp_flow->flow->flow_id,
- PTR_ERR(qp_flow->dbgfs_dentry));
- }
}
void usnic_debugfs_flow_remove(struct usnic_ib_qp_grp_flow *qp_flow)
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index b2323a52a0dd..d88d9f8a7f9a 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -216,18 +216,17 @@ static int usnic_ib_netdevice_event(struct notifier_block *notifier,
unsigned long event, void *ptr)
{
struct usnic_ib_dev *us_ibdev;
+ struct ib_device *ibdev;
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
- mutex_lock(&usnic_ib_ibdev_list_lock);
- list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) {
- if (us_ibdev->netdev == netdev) {
- usnic_ib_handle_usdev_event(us_ibdev, event);
- break;
- }
- }
- mutex_unlock(&usnic_ib_ibdev_list_lock);
+ ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_USNIC);
+ if (!ibdev)
+ return NOTIFY_DONE;
+ us_ibdev = container_of(ibdev, struct usnic_ib_dev, ib_dev);
+ usnic_ib_handle_usdev_event(us_ibdev, event);
+ ib_device_put(ibdev);
return NOTIFY_DONE;
}
@@ -282,16 +281,15 @@ static int usnic_ib_inetaddr_event(struct notifier_block *notifier,
struct usnic_ib_dev *us_ibdev;
struct in_ifaddr *ifa = ptr;
struct net_device *netdev = ifa->ifa_dev->dev;
+ struct ib_device *ibdev;
- mutex_lock(&usnic_ib_ibdev_list_lock);
- list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) {
- if (us_ibdev->netdev == netdev) {
- usnic_ib_handle_inet_event(us_ibdev, event, ptr);
- break;
- }
- }
- mutex_unlock(&usnic_ib_ibdev_list_lock);
+ ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_USNIC);
+ if (!ibdev)
+ return NOTIFY_DONE;
+ us_ibdev = container_of(ibdev, struct usnic_ib_dev, ib_dev);
+ usnic_ib_handle_inet_event(us_ibdev, event, ptr);
+ ib_device_put(ibdev);
return NOTIFY_DONE;
}
static struct notifier_block usnic_ib_inetaddr_notifier = {
@@ -333,32 +331,26 @@ static void usnic_get_dev_fw_str(struct ib_device *device, char *str)
static const struct ib_device_ops usnic_dev_ops = {
.alloc_pd = usnic_ib_alloc_pd,
.alloc_ucontext = usnic_ib_alloc_ucontext,
- .create_ah = usnic_ib_create_ah,
.create_cq = usnic_ib_create_cq,
.create_qp = usnic_ib_create_qp,
.dealloc_pd = usnic_ib_dealloc_pd,
.dealloc_ucontext = usnic_ib_dealloc_ucontext,
.dereg_mr = usnic_ib_dereg_mr,
- .destroy_ah = usnic_ib_destroy_ah,
.destroy_cq = usnic_ib_destroy_cq,
.destroy_qp = usnic_ib_destroy_qp,
.get_dev_fw_str = usnic_get_dev_fw_str,
- .get_dma_mr = usnic_ib_get_dma_mr,
.get_link_layer = usnic_ib_port_link_layer,
- .get_netdev = usnic_get_netdev,
.get_port_immutable = usnic_port_immutable,
.mmap = usnic_ib_mmap,
.modify_qp = usnic_ib_modify_qp,
- .poll_cq = usnic_ib_poll_cq,
- .post_recv = usnic_ib_post_recv,
- .post_send = usnic_ib_post_send,
.query_device = usnic_ib_query_device,
.query_gid = usnic_ib_query_gid,
.query_pkey = usnic_ib_query_pkey,
.query_port = usnic_ib_query_port,
.query_qp = usnic_ib_query_qp,
.reg_user_mr = usnic_ib_reg_mr,
- .req_notify_cq = usnic_ib_req_notify_cq,
+ INIT_RDMA_OBJ_SIZE(ib_pd, usnic_ib_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, usnic_ib_ucontext, ibucontext),
};
/* Start of PF discovery section */
@@ -368,11 +360,12 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
union ib_gid gid;
struct in_device *ind;
struct net_device *netdev;
+ int ret;
usnic_dbg("\n");
netdev = pci_get_drvdata(dev);
- us_ibdev = (struct usnic_ib_dev *)ib_alloc_device(sizeof(*us_ibdev));
+ us_ibdev = ib_alloc_device(usnic_ib_dev, ib_dev);
if (!us_ibdev) {
usnic_err("Device %s context alloc failed\n",
netdev_name(pci_get_drvdata(dev)));
@@ -422,7 +415,11 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
us_ibdev->ib_dev.driver_id = RDMA_DRIVER_USNIC;
rdma_set_device_sysfs_group(&us_ibdev->ib_dev, &usnic_attr_group);
- if (ib_register_device(&us_ibdev->ib_dev, "usnic_%d", NULL))
+ ret = ib_device_set_netdev(&us_ibdev->ib_dev, us_ibdev->netdev, 1);
+ if (ret)
+ goto err_fwd_dealloc;
+
+ if (ib_register_device(&us_ibdev->ib_dev, "usnic_%d"))
goto err_fwd_dealloc;
usnic_fwd_set_mtu(us_ibdev->ufdev, us_ibdev->netdev->mtu);
@@ -477,15 +474,17 @@ static void usnic_ib_undiscover_pf(struct kref *kref)
&usnic_ib_ibdev_list, ib_dev_link) {
if (us_ibdev->pdev == dev) {
list_del(&us_ibdev->ib_dev_link);
- usnic_ib_device_remove(us_ibdev);
found = true;
break;
}
}
- WARN(!found, "Failed to remove PF %s\n", pci_name(dev));
mutex_unlock(&usnic_ib_ibdev_list_lock);
+ if (found)
+ usnic_ib_device_remove(us_ibdev);
+ else
+ WARN(1, "Failed to remove PF %s\n", pci_name(dev));
}
static struct usnic_ib_dev *usnic_ib_discover_pf(struct usnic_vnic *vnic)
@@ -691,7 +690,6 @@ out_unreg_netdev_notifier:
out_pci_unreg:
pci_unregister_driver(&usnic_ib_pci_driver);
out_umem_fini:
- usnic_uiom_fini();
return err;
}
@@ -704,7 +702,6 @@ static void __exit usnic_ib_destroy(void)
unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
unregister_netdevice_notifier(&usnic_ib_netdevice_notifier);
pci_unregister_driver(&usnic_ib_pci_driver);
- usnic_uiom_fini();
}
MODULE_DESCRIPTION("Cisco VIC (usNIC) Verbs Driver");
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
index a7e4b2ccfaf8..c85d48ae7442 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
@@ -50,7 +50,7 @@ static ssize_t board_id_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct usnic_ib_dev *us_ibdev =
- container_of(device, struct usnic_ib_dev, ib_dev.dev);
+ rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev);
unsigned short subsystem_device_id;
mutex_lock(&us_ibdev->usdev_lock);
@@ -67,14 +67,13 @@ static DEVICE_ATTR_RO(board_id);
static ssize_t
config_show(struct device *device, struct device_attribute *attr, char *buf)
{
- struct usnic_ib_dev *us_ibdev;
+ struct usnic_ib_dev *us_ibdev =
+ rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev);
char *ptr;
unsigned left;
unsigned n;
enum usnic_vnic_res_type res_type;
- us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
-
/* Buffer space limit is 1 page */
ptr = buf;
left = PAGE_SIZE;
@@ -130,9 +129,8 @@ static DEVICE_ATTR_RO(config);
static ssize_t
iface_show(struct device *device, struct device_attribute *attr, char *buf)
{
- struct usnic_ib_dev *us_ibdev;
-
- us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
+ struct usnic_ib_dev *us_ibdev =
+ rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev);
return scnprintf(buf, PAGE_SIZE, "%s\n",
netdev_name(us_ibdev->netdev));
@@ -142,9 +140,8 @@ static DEVICE_ATTR_RO(iface);
static ssize_t
max_vf_show(struct device *device, struct device_attribute *attr, char *buf)
{
- struct usnic_ib_dev *us_ibdev;
-
- us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
+ struct usnic_ib_dev *us_ibdev =
+ rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev);
return scnprintf(buf, PAGE_SIZE, "%u\n",
kref_read(&us_ibdev->vf_cnt));
@@ -154,10 +151,10 @@ static DEVICE_ATTR_RO(max_vf);
static ssize_t
qp_per_vf_show(struct device *device, struct device_attribute *attr, char *buf)
{
- struct usnic_ib_dev *us_ibdev;
+ struct usnic_ib_dev *us_ibdev =
+ rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev);
int qp_per_vf;
- us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ],
us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]);
@@ -169,9 +166,8 @@ static DEVICE_ATTR_RO(qp_per_vf);
static ssize_t
cq_per_vf_show(struct device *device, struct device_attribute *attr, char *buf)
{
- struct usnic_ib_dev *us_ibdev;
-
- us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
+ struct usnic_ib_dev *us_ibdev =
+ rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev);
return scnprintf(buf, PAGE_SIZE, "%d\n",
us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ]);
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 1d4abef17e38..bd4521b2cc5f 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -37,6 +37,7 @@
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_addr.h>
+#include <rdma/uverbs_ioctl.h>
#include "usnic_abi.h"
#include "usnic_ib.h"
@@ -436,57 +437,33 @@ int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
return 0;
}
-struct net_device *usnic_get_netdev(struct ib_device *device, u8 port_num)
-{
- struct usnic_ib_dev *us_ibdev = to_usdev(device);
-
- if (us_ibdev->netdev)
- dev_hold(us_ibdev->netdev);
-
- return us_ibdev->netdev;
-}
-
int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey)
{
- if (index > 1)
+ if (index > 0)
return -EINVAL;
*pkey = 0xffff;
return 0;
}
-struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata)
+int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
+ struct ib_udata *udata)
{
- struct usnic_ib_pd *pd;
+ struct usnic_ib_pd *pd = to_upd(ibpd);
void *umem_pd;
- usnic_dbg("\n");
-
- pd = kzalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd)
- return ERR_PTR(-ENOMEM);
-
umem_pd = pd->umem_pd = usnic_uiom_alloc_pd();
if (IS_ERR_OR_NULL(umem_pd)) {
- kfree(pd);
- return ERR_PTR(umem_pd ? PTR_ERR(umem_pd) : -ENOMEM);
+ return umem_pd ? PTR_ERR(umem_pd) : -ENOMEM;
}
- usnic_info("domain 0x%p allocated for context 0x%p and device %s\n",
- pd, context, dev_name(&ibdev->dev));
- return &pd->ibpd;
+ return 0;
}
-int usnic_ib_dealloc_pd(struct ib_pd *pd)
+void usnic_ib_dealloc_pd(struct ib_pd *pd)
{
- usnic_info("freeing domain 0x%p\n", pd);
-
usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd);
- kfree(pd);
- return 0;
}
struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
@@ -496,7 +473,8 @@ struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
int err;
struct usnic_ib_dev *us_ibdev;
struct usnic_ib_qp_grp *qp_grp;
- struct usnic_ib_ucontext *ucontext;
+ struct usnic_ib_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct usnic_ib_ucontext, ibucontext);
int cq_cnt;
struct usnic_vnic_res_spec res_spec;
struct usnic_ib_create_qp_cmd cmd;
@@ -504,7 +482,6 @@ struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
usnic_dbg("\n");
- ucontext = to_uucontext(pd->uobject->context);
us_ibdev = to_usdev(pd->device);
if (init_attr->create_flags)
@@ -676,37 +653,31 @@ int usnic_ib_dereg_mr(struct ib_mr *ibmr)
return 0;
}
-struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev,
- struct ib_udata *udata)
+int usnic_ib_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
{
- struct usnic_ib_ucontext *context;
+ struct ib_device *ibdev = uctx->device;
+ struct usnic_ib_ucontext *context = to_ucontext(uctx);
struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
usnic_dbg("\n");
- context = kmalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
- return ERR_PTR(-ENOMEM);
-
INIT_LIST_HEAD(&context->qp_grp_list);
mutex_lock(&us_ibdev->usdev_lock);
list_add_tail(&context->link, &us_ibdev->ctx_list);
mutex_unlock(&us_ibdev->usdev_lock);
- return &context->ibucontext;
+ return 0;
}
-int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
+void usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
{
struct usnic_ib_ucontext *context = to_uucontext(ibcontext);
struct usnic_ib_dev *us_ibdev = to_usdev(ibcontext->device);
usnic_dbg("\n");
mutex_lock(&us_ibdev->usdev_lock);
- BUG_ON(!list_empty(&context->qp_grp_list));
+ WARN_ON_ONCE(!list_empty(&context->qp_grp_list));
list_del(&context->link);
mutex_unlock(&us_ibdev->usdev_lock);
- kfree(context);
- return 0;
}
int usnic_ib_mmap(struct ib_ucontext *context,
@@ -760,57 +731,4 @@ int usnic_ib_mmap(struct ib_ucontext *context,
return -EINVAL;
}
-/* In ib callbacks section - Start of stub funcs */
-struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
- struct rdma_ah_attr *ah_attr,
- u32 flags,
- struct ib_udata *udata)
-
-{
- usnic_dbg("\n");
- return ERR_PTR(-EPERM);
-}
-
-int usnic_ib_destroy_ah(struct ib_ah *ah, u32 flags)
-{
- usnic_dbg("\n");
- return -EINVAL;
-}
-
-int usnic_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
- const struct ib_send_wr **bad_wr)
-{
- usnic_dbg("\n");
- return -EINVAL;
-}
-
-int usnic_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
- const struct ib_recv_wr **bad_wr)
-{
- usnic_dbg("\n");
- return -EINVAL;
-}
-
-int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries,
- struct ib_wc *wc)
-{
- usnic_dbg("\n");
- return -EINVAL;
-}
-
-int usnic_ib_req_notify_cq(struct ib_cq *cq,
- enum ib_cq_notify_flags flags)
-{
- usnic_dbg("\n");
- return -EINVAL;
-}
-
-struct ib_mr *usnic_ib_get_dma_mr(struct ib_pd *pd, int acc)
-{
- usnic_dbg("\n");
- return ERR_PTR(-ENOMEM);
-}
-
-
-/* In ib callbacks section - End of stub funcs */
/* End of ib callbacks section */
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
index e33144261b9a..c40e89b6246f 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
@@ -48,13 +48,11 @@ int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
struct ib_qp_init_attr *qp_init_attr);
int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
union ib_gid *gid);
-struct net_device *usnic_get_netdev(struct ib_device *device, u8 port_num);
int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey);
-struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata);
-int usnic_ib_dealloc_pd(struct ib_pd *pd);
+int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
+ struct ib_udata *udata);
+void usnic_ib_dealloc_pd(struct ib_pd *pd);
struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata);
@@ -70,24 +68,8 @@ struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
int usnic_ib_dereg_mr(struct ib_mr *ibmr);
-struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev,
- struct ib_udata *udata);
-int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext);
+int usnic_ib_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
+void usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext);
int usnic_ib_mmap(struct ib_ucontext *context,
struct vm_area_struct *vma);
-struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
- struct rdma_ah_attr *ah_attr,
- u32 flags,
- struct ib_udata *udata);
-
-int usnic_ib_destroy_ah(struct ib_ah *ah, u32 flags);
-int usnic_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
- const struct ib_send_wr **bad_wr);
-int usnic_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
- const struct ib_recv_wr **bad_wr);
-int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries,
- struct ib_wc *wc);
-int usnic_ib_req_notify_cq(struct ib_cq *cq,
- enum ib_cq_notify_flags flags);
-struct ib_mr *usnic_ib_get_dma_mr(struct ib_pd *pd, int acc);
#endif /* !USNIC_IB_VERBS_H */
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index 49275a548751..06862a6af185 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -47,8 +47,6 @@
#include "usnic_uiom.h"
#include "usnic_uiom_interval_tree.h"
-static struct workqueue_struct *usnic_uiom_wq;
-
#define USNIC_UIOM_PAGE_CHUNK \
((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list)) /\
((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \
@@ -127,9 +125,9 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;
uiomr->owning_mm = mm = current->mm;
- down_write(&mm->mmap_sem);
+ down_read(&mm->mmap_sem);
- locked = npages + current->mm->pinned_vm;
+ locked = atomic64_add_return(npages, &current->mm->pinned_vm);
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
@@ -157,9 +155,8 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
off = 0;
while (ret) {
- chunk = kmalloc(sizeof(*chunk) +
- sizeof(struct scatterlist) *
- min_t(int, ret, USNIC_UIOM_PAGE_CHUNK),
+ chunk = kmalloc(struct_size(chunk, page_list,
+ min_t(int, ret, USNIC_UIOM_PAGE_CHUNK)),
GFP_KERNEL);
if (!chunk) {
ret = -ENOMEM;
@@ -185,14 +182,13 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
}
out:
- if (ret < 0)
+ if (ret < 0) {
usnic_uiom_put_pages(chunk_list, 0);
- else {
- mm->pinned_vm = locked;
+ atomic64_sub(npages, &current->mm->pinned_vm);
+ } else
mmgrab(uiomr->owning_mm);
- }
- up_write(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
free_page((unsigned long) page_list);
return ret;
}
@@ -436,43 +432,12 @@ static inline size_t usnic_uiom_num_pages(struct usnic_uiom_reg *uiomr)
return PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
}
-static void usnic_uiom_release_defer(struct work_struct *work)
-{
- struct usnic_uiom_reg *uiomr =
- container_of(work, struct usnic_uiom_reg, work);
-
- down_write(&uiomr->owning_mm->mmap_sem);
- uiomr->owning_mm->pinned_vm -= usnic_uiom_num_pages(uiomr);
- up_write(&uiomr->owning_mm->mmap_sem);
-
- __usnic_uiom_release_tail(uiomr);
-}
-
void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr,
struct ib_ucontext *context)
{
__usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
- /*
- * We may be called with the mm's mmap_sem already held. This
- * can happen when a userspace munmap() is the call that drops
- * the last reference to our file and calls our release
- * method. If there are memory regions to destroy, we'll end
- * up here and not be able to take the mmap_sem. In that case
- * we defer the vm_locked accounting to a workqueue.
- */
- if (context->closing) {
- if (!down_write_trylock(&uiomr->owning_mm->mmap_sem)) {
- INIT_WORK(&uiomr->work, usnic_uiom_release_defer);
- queue_work(usnic_uiom_wq, &uiomr->work);
- return;
- }
- } else {
- down_write(&uiomr->owning_mm->mmap_sem);
- }
- uiomr->owning_mm->pinned_vm -= usnic_uiom_num_pages(uiomr);
- up_write(&uiomr->owning_mm->mmap_sem);
-
+ atomic64_sub(usnic_uiom_num_pages(uiomr), &uiomr->owning_mm->pinned_vm);
__usnic_uiom_release_tail(uiomr);
}
@@ -601,17 +566,5 @@ int usnic_uiom_init(char *drv_name)
return -EPERM;
}
- usnic_uiom_wq = create_workqueue(drv_name);
- if (!usnic_uiom_wq) {
- usnic_err("Unable to alloc wq for drv %s\n", drv_name);
- return -ENOMEM;
- }
-
return 0;
}
-
-void usnic_uiom_fini(void)
-{
- flush_workqueue(usnic_uiom_wq);
- destroy_workqueue(usnic_uiom_wq);
-}
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.h b/drivers/infiniband/hw/usnic/usnic_uiom.h
index b86a9731071b..c88cfa087e3a 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.h
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.h
@@ -93,5 +93,4 @@ struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr,
struct ib_ucontext *ucontext);
int usnic_uiom_init(char *drv_name);
-void usnic_uiom_fini(void);
#endif /* USNIC_UIOM_H_ */
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index 0f004c737620..104c7db4704f 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -141,7 +141,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
goto err_cq;
}
- cq->umem = ib_umem_get(context, ucmd.buf_addr, ucmd.buf_size,
+ cq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size,
IB_ACCESS_LOCAL_WRITE, 1);
if (IS_ERR(cq->umem)) {
ret = PTR_ERR(cq->umem);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
index 6fd5a8f4e2f6..8f9749d54688 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
@@ -57,7 +57,8 @@
#define PVRDMA_ROCEV1_VERSION 17
#define PVRDMA_ROCEV2_VERSION 18
-#define PVRDMA_VERSION PVRDMA_ROCEV2_VERSION
+#define PVRDMA_PPN64_VERSION 19
+#define PVRDMA_VERSION PVRDMA_PPN64_VERSION
#define PVRDMA_BOARD_ID 1
#define PVRDMA_REV_ID 1
@@ -279,8 +280,10 @@ struct pvrdma_device_shared_region {
/* W: Async ring page info. */
struct pvrdma_ring_page_info cq_ring_pages;
/* W: CQ ring page info. */
- u32 uar_pfn; /* W: UAR pageframe. */
- u32 pad2; /* Pad to 8-byte align. */
+ union {
+ u32 uar_pfn; /* W: UAR pageframe. */
+ u64 uar_pfn64; /* W: 64-bit UAR page frame. */
+ };
struct pvrdma_device_caps caps; /* R: Device capabilities. */
};
@@ -411,8 +414,10 @@ struct pvrdma_cmd_query_pkey_resp {
struct pvrdma_cmd_create_uc {
struct pvrdma_cmd_hdr hdr;
- u32 pfn; /* UAR page frame number */
- u8 reserved[4];
+ union {
+ u32 pfn; /* UAR page frame number */
+ u64 pfn64; /* 64-bit UAR page frame number */
+ };
};
struct pvrdma_cmd_create_uc_resp {
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 39c37b6fd715..ec41400fec0c 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -195,6 +195,8 @@ static const struct ib_device_ops pvrdma_dev_ops = {
.query_qp = pvrdma_query_qp,
.reg_user_mr = pvrdma_reg_user_mr,
.req_notify_cq = pvrdma_req_notify_cq,
+ INIT_RDMA_OBJ_SIZE(ib_pd, pvrdma_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, pvrdma_ucontext, ibucontext),
};
static const struct ib_device_ops pvrdma_dev_srq_ops = {
@@ -278,7 +280,7 @@ static int pvrdma_register_device(struct pvrdma_dev *dev)
spin_lock_init(&dev->srq_tbl_lock);
rdma_set_device_sysfs_group(&dev->ib_dev, &pvrdma_attr_group);
- ret = ib_register_device(&dev->ib_dev, "vmw_pvrdma%d", NULL);
+ ret = ib_register_device(&dev->ib_dev, "vmw_pvrdma%d");
if (ret)
goto err_srq_free;
@@ -795,7 +797,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
dev_dbg(&pdev->dev, "initializing driver %s\n", pci_name(pdev));
/* Allocate zero-out device */
- dev = (struct pvrdma_dev *)ib_alloc_device(sizeof(*dev));
+ dev = ib_alloc_device(pvrdma_dev, ib_dev);
if (!dev) {
dev_err(&pdev->dev, "failed to allocate IB device\n");
return -ENOMEM;
@@ -905,7 +907,11 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
PVRDMA_GOS_BITS_64;
dev->dsr->gos_info.gos_type = PVRDMA_GOS_TYPE_LINUX;
dev->dsr->gos_info.gos_ver = 1;
- dev->dsr->uar_pfn = dev->driver_uar.pfn;
+
+ if (dev->dsr_version < PVRDMA_PPN64_VERSION)
+ dev->dsr->uar_pfn = dev->driver_uar.pfn;
+ else
+ dev->dsr->uar_pfn64 = dev->driver_uar.pfn;
/* Command slot. */
dev->cmd_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
@@ -1125,6 +1131,8 @@ static void pvrdma_pci_remove(struct pci_dev *pdev)
pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
pvrdma_free_slots(dev);
+ dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr,
+ dev->dsrbase);
iounmap(dev->regs);
kfree(dev->sgid_tbl);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
index fb0c5c0976b3..7944c58ded0e 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
@@ -183,25 +183,20 @@ int pvrdma_page_dir_insert_umem(struct pvrdma_page_dir *pdir,
struct ib_umem *umem, u64 offset)
{
u64 i = offset;
- int j, entry;
- int ret = 0, len = 0;
- struct scatterlist *sg;
+ int ret = 0;
+ struct sg_dma_page_iter sg_iter;
if (offset >= pdir->npages)
return -EINVAL;
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
- len = sg_dma_len(sg) >> PAGE_SHIFT;
- for (j = 0; j < len; j++) {
- dma_addr_t addr = sg_dma_address(sg) +
- (j << umem->page_shift);
+ for_each_sg_dma_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
+ dma_addr_t addr = sg_page_iter_dma_address(&sg_iter);
- ret = pvrdma_page_dir_insert_dma(pdir, i, addr);
- if (ret)
- goto exit;
+ ret = pvrdma_page_dir_insert_dma(pdir, i, addr);
+ if (ret)
+ goto exit;
- i++;
- }
+ i++;
}
exit:
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
index fa96fa4fb829..a85884e90e84 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
@@ -126,8 +126,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_PTR(-EINVAL);
}
- umem = ib_umem_get(pd->uobject->context, start,
- length, access_flags, 0);
+ umem = ib_umem_get(udata, start, length, access_flags, 0);
if (IS_ERR(umem)) {
dev_warn(&dev->pdev->dev,
"could not get umem for mem region\n");
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index 1ec3646087ba..08f4257169bd 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -262,8 +262,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
if (!is_srq) {
/* set qp->sq.wqe_cnt, shift, buf_size.. */
- qp->rumem = ib_umem_get(pd->uobject->context,
- ucmd.rbuf_addr,
+ qp->rumem = ib_umem_get(udata, ucmd.rbuf_addr,
ucmd.rbuf_size, 0, 0);
if (IS_ERR(qp->rumem)) {
ret = PTR_ERR(qp->rumem);
@@ -275,8 +274,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
qp->srq = to_vsrq(init_attr->srq);
}
- qp->sumem = ib_umem_get(pd->uobject->context,
- ucmd.sbuf_addr,
+ qp->sumem = ib_umem_get(udata, ucmd.sbuf_addr,
ucmd.sbuf_size, 0, 0);
if (IS_ERR(qp->sumem)) {
if (!is_srq)
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
index 06ba7c7a2235..951d9d68107a 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
@@ -153,9 +153,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
goto err_srq;
}
- srq->umem = ib_umem_get(pd->uobject->context,
- ucmd.buf_addr,
- ucmd.buf_size, 0, 0);
+ srq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size, 0, 0);
if (IS_ERR(srq->umem)) {
ret = PTR_ERR(srq->umem);
goto err_srq;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
index 4d238d0e484b..42fe821f8d58 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
@@ -306,47 +306,42 @@ out:
/**
* pvrdma_alloc_ucontext - allocate ucontext
- * @ibdev: the IB device
+ * @uctx: the uverbs countext
* @udata: user data
*
- * @return: the ib_ucontext pointer on success, otherwise errno.
+ * @return: zero on success, otherwise errno.
*/
-struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev,
- struct ib_udata *udata)
+int pvrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
{
+ struct ib_device *ibdev = uctx->device;
struct pvrdma_dev *vdev = to_vdev(ibdev);
- struct pvrdma_ucontext *context;
- union pvrdma_cmd_req req;
- union pvrdma_cmd_resp rsp;
+ struct pvrdma_ucontext *context = to_vucontext(uctx);
+ union pvrdma_cmd_req req = {};
+ union pvrdma_cmd_resp rsp = {};
struct pvrdma_cmd_create_uc *cmd = &req.create_uc;
struct pvrdma_cmd_create_uc_resp *resp = &rsp.create_uc_resp;
- struct pvrdma_alloc_ucontext_resp uresp = {0};
+ struct pvrdma_alloc_ucontext_resp uresp = {};
int ret;
- void *ptr;
if (!vdev->ib_active)
- return ERR_PTR(-EAGAIN);
-
- context = kmalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
- return ERR_PTR(-ENOMEM);
+ return -EAGAIN;
context->dev = vdev;
ret = pvrdma_uar_alloc(vdev, &context->uar);
- if (ret) {
- kfree(context);
- return ERR_PTR(-ENOMEM);
- }
+ if (ret)
+ return -ENOMEM;
/* get ctx_handle from host */
- memset(cmd, 0, sizeof(*cmd));
- cmd->pfn = context->uar.pfn;
+ if (vdev->dsr_version < PVRDMA_PPN64_VERSION)
+ cmd->pfn = context->uar.pfn;
+ else
+ cmd->pfn64 = context->uar.pfn;
+
cmd->hdr.cmd = PVRDMA_CMD_CREATE_UC;
ret = pvrdma_cmd_post(vdev, &req, &rsp, PVRDMA_CMD_CREATE_UC_RESP);
if (ret < 0) {
dev_warn(&vdev->pdev->dev,
"could not create ucontext, error: %d\n", ret);
- ptr = ERR_PTR(ret);
goto err;
}
@@ -357,33 +352,28 @@ struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev,
ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (ret) {
pvrdma_uar_free(vdev, &context->uar);
- context->ibucontext.device = ibdev;
pvrdma_dealloc_ucontext(&context->ibucontext);
- return ERR_PTR(-EFAULT);
+ return -EFAULT;
}
- return &context->ibucontext;
+ return 0;
err:
pvrdma_uar_free(vdev, &context->uar);
- kfree(context);
- return ptr;
+ return ret;
}
/**
* pvrdma_dealloc_ucontext - deallocate ucontext
* @ibcontext: the ucontext
- *
- * @return: 0 on success, otherwise errno.
*/
-int pvrdma_dealloc_ucontext(struct ib_ucontext *ibcontext)
+void pvrdma_dealloc_ucontext(struct ib_ucontext *ibcontext)
{
struct pvrdma_ucontext *context = to_vucontext(ibcontext);
- union pvrdma_cmd_req req;
+ union pvrdma_cmd_req req = {};
struct pvrdma_cmd_destroy_uc *cmd = &req.destroy_uc;
int ret;
- memset(cmd, 0, sizeof(*cmd));
cmd->hdr.cmd = PVRDMA_CMD_DESTROY_UC;
cmd->ctx_handle = context->ctx_handle;
@@ -394,9 +384,6 @@ int pvrdma_dealloc_ucontext(struct ib_ucontext *ibcontext)
/* Free the UAR even if the device command failed */
pvrdma_uar_free(to_vdev(ibcontext->device), &context->uar);
- kfree(context);
-
- return ret;
}
/**
@@ -433,37 +420,29 @@ int pvrdma_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
/**
* pvrdma_alloc_pd - allocate protection domain
- * @ibdev: the IB device
+ * @ibpd: PD pointer
* @context: user context
* @udata: user data
*
* @return: the ib_pd protection domain pointer on success, otherwise errno.
*/
-struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata)
+int pvrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
+ struct ib_udata *udata)
{
- struct pvrdma_pd *pd;
+ struct ib_device *ibdev = ibpd->device;
+ struct pvrdma_pd *pd = to_vpd(ibpd);
struct pvrdma_dev *dev = to_vdev(ibdev);
- union pvrdma_cmd_req req;
- union pvrdma_cmd_resp rsp;
+ union pvrdma_cmd_req req = {};
+ union pvrdma_cmd_resp rsp = {};
struct pvrdma_cmd_create_pd *cmd = &req.create_pd;
struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp;
struct pvrdma_alloc_pd_resp pd_resp = {0};
int ret;
- void *ptr;
/* Check allowed max pds */
if (!atomic_add_unless(&dev->num_pds, 1, dev->dsr->caps.max_pd))
- return ERR_PTR(-ENOMEM);
-
- pd = kmalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd) {
- ptr = ERR_PTR(-ENOMEM);
- goto err;
- }
+ return -ENOMEM;
- memset(cmd, 0, sizeof(*cmd));
cmd->hdr.cmd = PVRDMA_CMD_CREATE_PD;
cmd->ctx_handle = (context) ? to_vucontext(context)->ctx_handle : 0;
ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_PD_RESP);
@@ -471,8 +450,7 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev,
dev_warn(&dev->pdev->dev,
"failed to allocate protection domain, error: %d\n",
ret);
- ptr = ERR_PTR(ret);
- goto freepd;
+ goto err;
}
pd->privileged = !context;
@@ -485,18 +463,16 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev,
dev_warn(&dev->pdev->dev,
"failed to copy back protection domain\n");
pvrdma_dealloc_pd(&pd->ibpd);
- return ERR_PTR(-EFAULT);
+ return -EFAULT;
}
}
/* u32 pd handle */
- return &pd->ibpd;
+ return 0;
-freepd:
- kfree(pd);
err:
atomic_dec(&dev->num_pds);
- return ptr;
+ return ret;
}
/**
@@ -505,14 +481,13 @@ err:
*
* @return: 0 on success, otherwise errno.
*/
-int pvrdma_dealloc_pd(struct ib_pd *pd)
+void pvrdma_dealloc_pd(struct ib_pd *pd)
{
struct pvrdma_dev *dev = to_vdev(pd->device);
- union pvrdma_cmd_req req;
+ union pvrdma_cmd_req req = {};
struct pvrdma_cmd_destroy_pd *cmd = &req.destroy_pd;
int ret;
- memset(cmd, 0, sizeof(*cmd));
cmd->hdr.cmd = PVRDMA_CMD_DESTROY_PD;
cmd->pd_handle = to_vpd(pd)->pd_handle;
@@ -522,10 +497,7 @@ int pvrdma_dealloc_pd(struct ib_pd *pd)
"could not dealloc protection domain, error: %d\n",
ret);
- kfree(to_vpd(pd));
atomic_dec(&dev->num_pds);
-
- return 0;
}
/**
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
index f7f758d60110..607aa131d67c 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
@@ -396,13 +396,11 @@ int pvrdma_modify_device(struct ib_device *ibdev, int mask,
int pvrdma_modify_port(struct ib_device *ibdev, u8 port,
int mask, struct ib_port_modify *props);
int pvrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
-struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev,
- struct ib_udata *udata);
-int pvrdma_dealloc_ucontext(struct ib_ucontext *context);
-struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata);
-int pvrdma_dealloc_pd(struct ib_pd *ibpd);
+int pvrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
+void pvrdma_dealloc_ucontext(struct ib_ucontext *context);
+int pvrdma_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
+ struct ib_udata *udata);
+void pvrdma_dealloc_pd(struct ib_pd *ibpd);
struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 49c9541050d4..0bb6e39dd03a 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -381,15 +381,14 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
{
struct rvt_mr *mr;
struct ib_umem *umem;
- struct scatterlist *sg;
- int n, m, entry;
+ struct sg_page_iter sg_iter;
+ int n, m;
struct ib_mr *ret;
if (length == 0)
return ERR_PTR(-EINVAL);
- umem = ib_umem_get(pd->uobject->context, start, length,
- mr_access_flags, 0);
+ umem = ib_umem_get(udata, start, length, mr_access_flags, 0);
if (IS_ERR(umem))
return (void *)umem;
@@ -408,23 +407,21 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->mr.access_flags = mr_access_flags;
mr->umem = umem;
- mr->mr.page_shift = umem->page_shift;
+ mr->mr.page_shift = PAGE_SHIFT;
m = 0;
n = 0;
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+ for_each_sg_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
void *vaddr;
- vaddr = page_address(sg_page(sg));
+ vaddr = page_address(sg_page_iter_page(&sg_iter));
if (!vaddr) {
ret = ERR_PTR(-EINVAL);
goto bail_inval;
}
mr->mr.map[m]->segs[n].vaddr = vaddr;
- mr->mr.map[m]->segs[n].length = BIT(umem->page_shift);
- trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr,
- BIT(umem->page_shift));
- n++;
- if (n == RVT_SEGSZ) {
+ mr->mr.map[m]->segs[n].length = PAGE_SIZE;
+ trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr, PAGE_SIZE);
+ if (++n == RVT_SEGSZ) {
m++;
n = 0;
}
@@ -611,11 +608,6 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
if (unlikely(mapped_segs == mr->mr.max_segs))
return -ENOMEM;
- if (mr->mr.length == 0) {
- mr->mr.user_base = addr;
- mr->mr.iova = addr;
- }
-
m = mapped_segs / RVT_SEGSZ;
n = mapped_segs % RVT_SEGSZ;
mr->mr.map[m]->segs[n].vaddr = (void *)addr;
@@ -633,17 +625,24 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
* @sg_nents: number of entries in sg
* @sg_offset: offset in bytes into sg
*
+ * Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages.
+ *
* Return: number of sg elements mapped to the memory region
*/
int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset)
{
struct rvt_mr *mr = to_imr(ibmr);
+ int ret;
mr->mr.length = 0;
mr->mr.page_shift = PAGE_SHIFT;
- return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
- rvt_set_page);
+ ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page);
+ mr->mr.user_base = ibmr->iova;
+ mr->mr.iova = ibmr->iova;
+ mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
+ mr->mr.length = (size_t)ibmr->length;
+ return ret;
}
/**
@@ -674,6 +673,7 @@ int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
ibmr->rkey = key;
mr->mr.lkey = key;
mr->mr.access_flags = access;
+ mr->mr.iova = ibmr->iova;
atomic_set(&mr->mr.lkey_invalid, 0);
return 0;
diff --git a/drivers/infiniband/sw/rdmavt/pd.c b/drivers/infiniband/sw/rdmavt/pd.c
index 8a89afff3363..6033054b22fa 100644
--- a/drivers/infiniband/sw/rdmavt/pd.c
+++ b/drivers/infiniband/sw/rdmavt/pd.c
@@ -50,7 +50,7 @@
/**
* rvt_alloc_pd - allocate a protection domain
- * @ibdev: ib device
+ * @ibpd: PD
* @context: optional user context
* @udata: optional user data
*
@@ -58,19 +58,14 @@
*
* Return: 0 on success
*/
-struct ib_pd *rvt_alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata)
+int rvt_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
+ struct ib_udata *udata)
{
+ struct ib_device *ibdev = ibpd->device;
struct rvt_dev_info *dev = ib_to_rvt(ibdev);
- struct rvt_pd *pd;
- struct ib_pd *ret;
+ struct rvt_pd *pd = ibpd_to_rvtpd(ibpd);
+ int ret = 0;
- pd = kmalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd) {
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
/*
* While we could continue allocating protecetion domains, being
* constrained only by system resources. The IBTA spec defines that
@@ -81,8 +76,7 @@ struct ib_pd *rvt_alloc_pd(struct ib_device *ibdev,
spin_lock(&dev->n_pds_lock);
if (dev->n_pds_allocated == dev->dparms.props.max_pd) {
spin_unlock(&dev->n_pds_lock);
- kfree(pd);
- ret = ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
goto bail;
}
@@ -92,8 +86,6 @@ struct ib_pd *rvt_alloc_pd(struct ib_device *ibdev,
/* ib_alloc_pd() will initialize pd->ibpd. */
pd->user = !!udata;
- ret = &pd->ibpd;
-
bail:
return ret;
}
@@ -104,16 +96,11 @@ bail:
*
* Return: always 0
*/
-int rvt_dealloc_pd(struct ib_pd *ibpd)
+void rvt_dealloc_pd(struct ib_pd *ibpd)
{
- struct rvt_pd *pd = ibpd_to_rvtpd(ibpd);
struct rvt_dev_info *dev = ib_to_rvt(ibpd->device);
spin_lock(&dev->n_pds_lock);
dev->n_pds_allocated--;
spin_unlock(&dev->n_pds_lock);
-
- kfree(pd);
-
- return 0;
}
diff --git a/drivers/infiniband/sw/rdmavt/pd.h b/drivers/infiniband/sw/rdmavt/pd.h
index 1892ca4a9746..7a887e4a45e7 100644
--- a/drivers/infiniband/sw/rdmavt/pd.h
+++ b/drivers/infiniband/sw/rdmavt/pd.h
@@ -50,9 +50,8 @@
#include <rdma/rdma_vt.h>
-struct ib_pd *rvt_alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata);
-int rvt_dealloc_pd(struct ib_pd *ibpd);
+int rvt_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
+ struct ib_udata *udata);
+void rvt_dealloc_pd(struct ib_pd *ibpd);
#endif /* DEF_RDMAVTPD_H */
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index c6cc3e4ab71d..a34b9a2a32b6 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -53,6 +53,7 @@
#include <rdma/ib_verbs.h>
#include <rdma/ib_hdrs.h>
#include <rdma/opa_addr.h>
+#include <rdma/uverbs_ioctl.h>
#include "qp.h"
#include "vt.h"
#include "trace.h"
@@ -854,6 +855,7 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
qp->s_mig_state = IB_MIG_MIGRATED;
qp->r_head_ack_queue = 0;
qp->s_tail_ack_queue = 0;
+ qp->s_acked_ack_queue = 0;
qp->s_num_rd_atomic = 0;
if (qp->r_rq.wq) {
qp->r_rq.wq->head = 0;
@@ -955,6 +957,8 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
size_t sg_list_sz;
struct ib_qp *ret = ERR_PTR(-ENOMEM);
struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
+ struct rvt_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct rvt_ucontext, ibucontext);
void *priv = NULL;
size_t sqsize;
@@ -1128,7 +1132,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
qp->ip = rvt_create_mmap_info(rdi, s,
- ibpd->uobject->context,
+ &ucontext->ibucontext,
qp->r_rq.wq);
if (!qp->ip) {
ret = ERR_PTR(-ENOMEM);
@@ -1642,11 +1646,11 @@ int rvt_destroy_qp(struct ib_qp *ibqp)
kref_put(&qp->ip->ref, rvt_release_mmap_info);
else
vfree(qp->r_rq.wq);
- vfree(qp->s_wq);
rdi->driver_f.qp_priv_free(rdi, qp);
kfree(qp->s_ack_queue);
rdma_destroy_ah_attr(&qp->remote_ah_attr);
rdma_destroy_ah_attr(&qp->alt_ah_attr);
+ vfree(qp->s_wq);
kfree(qp);
return 0;
}
@@ -2393,11 +2397,12 @@ static inline unsigned long rvt_aeth_to_usec(u32 aeth)
}
/*
- * rvt_add_retry_timer - add/start a retry timer
+ * rvt_add_retry_timer_ext - add/start a retry timer
* @qp - the QP
+ * @shift - timeout shift to wait for multiple packets
* add a retry timer on the QP
*/
-void rvt_add_retry_timer(struct rvt_qp *qp)
+void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift)
{
struct ib_qp *ibqp = &qp->ibqp;
struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
@@ -2405,11 +2410,11 @@ void rvt_add_retry_timer(struct rvt_qp *qp)
lockdep_assert_held(&qp->s_lock);
qp->s_flags |= RVT_S_TIMER;
/* 4.096 usec. * (1 << qp->timeout) */
- qp->s_timer.expires = jiffies + qp->timeout_jiffies +
- rdi->busy_jiffies;
+ qp->s_timer.expires = jiffies + rdi->busy_jiffies +
+ (qp->timeout_jiffies << shift);
add_timer(&qp->s_timer);
}
-EXPORT_SYMBOL(rvt_add_retry_timer);
+EXPORT_SYMBOL(rvt_add_retry_timer_ext);
/**
* rvt_add_rnr_timer - add/start an rnr timer
@@ -2785,6 +2790,18 @@ again:
}
EXPORT_SYMBOL(rvt_copy_sge);
+static enum ib_wc_status loopback_qp_drop(struct rvt_ibport *rvp,
+ struct rvt_qp *sqp)
+{
+ rvp->n_pkt_drops++;
+ /*
+ * For RC, the requester would timeout and retry so
+ * shortcut the timeouts and just signal too many retries.
+ */
+ return sqp->ibqp.qp_type == IB_QPT_RC ?
+ IB_WC_RETRY_EXC_ERR : IB_WC_SUCCESS;
+}
+
/**
* ruc_loopback - handle UC and RC loopback requests
* @sqp: the sending QP
@@ -2857,17 +2874,14 @@ again:
}
spin_unlock_irqrestore(&sqp->s_lock, flags);
- if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
+ if (!qp) {
+ send_status = loopback_qp_drop(rvp, sqp);
+ goto serr_no_r_lock;
+ }
+ spin_lock_irqsave(&qp->r_lock, flags);
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
qp->ibqp.qp_type != sqp->ibqp.qp_type) {
- rvp->n_pkt_drops++;
- /*
- * For RC, the requester would timeout and retry so
- * shortcut the timeouts and just signal too many retries.
- */
- if (sqp->ibqp.qp_type == IB_QPT_RC)
- send_status = IB_WC_RETRY_EXC_ERR;
- else
- send_status = IB_WC_SUCCESS;
+ send_status = loopback_qp_drop(rvp, sqp);
goto serr;
}
@@ -2893,18 +2907,8 @@ again:
goto send_comp;
case IB_WR_SEND_WITH_INV:
- if (!rvt_invalidate_rkey(qp, wqe->wr.ex.invalidate_rkey)) {
- wc.wc_flags = IB_WC_WITH_INVALIDATE;
- wc.ex.invalidate_rkey = wqe->wr.ex.invalidate_rkey;
- }
- goto send;
-
case IB_WR_SEND_WITH_IMM:
- wc.wc_flags = IB_WC_WITH_IMM;
- wc.ex.imm_data = wqe->wr.ex.imm_data;
- /* FALLTHROUGH */
case IB_WR_SEND:
-send:
ret = rvt_get_rwqe(qp, false);
if (ret < 0)
goto op_err;
@@ -2912,6 +2916,22 @@ send:
goto rnr_nak;
if (wqe->length > qp->r_len)
goto inv_err;
+ switch (wqe->wr.opcode) {
+ case IB_WR_SEND_WITH_INV:
+ if (!rvt_invalidate_rkey(qp,
+ wqe->wr.ex.invalidate_rkey)) {
+ wc.wc_flags = IB_WC_WITH_INVALIDATE;
+ wc.ex.invalidate_rkey =
+ wqe->wr.ex.invalidate_rkey;
+ }
+ break;
+ case IB_WR_SEND_WITH_IMM:
+ wc.wc_flags = IB_WC_WITH_IMM;
+ wc.ex.imm_data = wqe->wr.ex.imm_data;
+ break;
+ default:
+ break;
+ }
break;
case IB_WR_RDMA_WRITE_WITH_IMM:
@@ -2988,34 +3008,12 @@ do_write:
sge = &sqp->s_sge.sge;
while (sqp->s_len) {
- u32 len = sqp->s_len;
+ u32 len = rvt_get_sge_length(sge, sqp->s_len);
- if (len > sge->length)
- len = sge->length;
- if (len > sge->sge_length)
- len = sge->sge_length;
WARN_ON_ONCE(len == 0);
rvt_copy_sge(qp, &qp->r_sge, sge->vaddr,
len, release, copy_last);
- sge->vaddr += len;
- sge->length -= len;
- sge->sge_length -= len;
- if (sge->sge_length == 0) {
- if (!release)
- rvt_put_mr(sge->mr);
- if (--sqp->s_sge.num_sge)
- *sge = *sqp->s_sge.sg_list++;
- } else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= RVT_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- break;
- sge->n = 0;
- }
- sge->vaddr =
- sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length =
- sge->mr->map[sge->m]->segs[sge->n].length;
- }
+ rvt_update_sge(&sqp->s_sge, len, !release);
sqp->s_len -= len;
}
if (release)
@@ -3041,6 +3039,7 @@ do_write:
wqe->wr.send_flags & IB_SEND_SOLICITED);
send_comp:
+ spin_unlock_irqrestore(&qp->r_lock, flags);
spin_lock_irqsave(&sqp->s_lock, flags);
rvp->n_loop_pkts++;
flush_send:
@@ -3067,6 +3066,7 @@ rnr_nak:
}
if (sqp->s_rnr_retry_cnt < 7)
sqp->s_rnr_retry--;
+ spin_unlock_irqrestore(&qp->r_lock, flags);
spin_lock_irqsave(&sqp->s_lock, flags);
if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
goto clr_busy;
@@ -3095,6 +3095,8 @@ err:
rvt_rc_error(qp, wc.status);
serr:
+ spin_unlock_irqrestore(&qp->r_lock, flags);
+serr_no_r_lock:
spin_lock_irqsave(&sqp->s_lock, flags);
rvt_send_complete(sqp, wqe, send_status);
if (sqp->ibqp.qp_type == IB_QPT_RC) {
diff --git a/drivers/infiniband/sw/rdmavt/rc.c b/drivers/infiniband/sw/rdmavt/rc.c
index 6131cc558bdb..8d71647820a8 100644
--- a/drivers/infiniband/sw/rdmavt/rc.c
+++ b/drivers/infiniband/sw/rdmavt/rc.c
@@ -187,3 +187,16 @@ void rvt_get_credit(struct rvt_qp *qp, u32 aeth)
}
}
EXPORT_SYMBOL(rvt_get_credit);
+
+/* rvt_restart_sge - rewind the sge state for a wqe */
+u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len)
+{
+ ss->sge = wqe->sg_list[0];
+ ss->sg_list = wqe->sg_list + 1;
+ ss->num_sge = wqe->wr.num_sge;
+ ss->total_len = wqe->length;
+ rvt_skip_sge(ss, len, false);
+ return wqe->length - len;
+}
+EXPORT_SYMBOL(rvt_restart_sge);
+
diff --git a/drivers/infiniband/sw/rdmavt/srq.c b/drivers/infiniband/sw/rdmavt/srq.c
index 78e06fc456c5..895b3fabd0bf 100644
--- a/drivers/infiniband/sw/rdmavt/srq.c
+++ b/drivers/infiniband/sw/rdmavt/srq.c
@@ -48,6 +48,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include <rdma/uverbs_ioctl.h>
#include "srq.h"
#include "vt.h"
@@ -77,6 +78,8 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
struct ib_udata *udata)
{
struct rvt_dev_info *dev = ib_to_rvt(ibpd->device);
+ struct rvt_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct rvt_ucontext, ibucontext);
struct rvt_srq *srq;
u32 sz;
struct ib_srq *ret;
@@ -119,7 +122,7 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz;
srq->ip =
- rvt_create_mmap_info(dev, s, ibpd->uobject->context,
+ rvt_create_mmap_info(dev, s, &ucontext->ibucontext,
srq->rq.wq);
if (!srq->ip) {
ret = ERR_PTR(-ENOMEM);
diff --git a/drivers/infiniband/sw/rdmavt/trace_cq.h b/drivers/infiniband/sw/rdmavt/trace_cq.h
index df8e1adbef9d..e3c416c6f900 100644
--- a/drivers/infiniband/sw/rdmavt/trace_cq.h
+++ b/drivers/infiniband/sw/rdmavt/trace_cq.h
@@ -105,7 +105,7 @@ DEFINE_EVENT(rvt_cq_template, rvt_create_cq,
TP_ARGS(cq, attr));
#define CQ_PRN \
-"[%s] idx %u wr_id %llx status %u opcode %u,%s length %u qpn %x"
+"[%s] idx %u wr_id %llx status %u opcode %u,%s length %u qpn %x flags %x imm %x"
DECLARE_EVENT_CLASS(
rvt_cq_entry_template,
@@ -119,6 +119,8 @@ DECLARE_EVENT_CLASS(
__field(u32, qpn)
__field(u32, length)
__field(u32, idx)
+ __field(u32, flags)
+ __field(u32, imm)
),
TP_fast_assign(
RDI_DEV_ASSIGN(cq->rdi)
@@ -128,6 +130,8 @@ DECLARE_EVENT_CLASS(
__entry->length = wc->byte_len;
__entry->qpn = wc->qp->qp_num;
__entry->idx = idx;
+ __entry->flags = wc->wc_flags;
+ __entry->imm = be32_to_cpu(wc->ex.imm_data);
),
TP_printk(
CQ_PRN,
@@ -137,7 +141,9 @@ DECLARE_EVENT_CLASS(
__entry->status,
__entry->opcode, show_wc_opcode(__entry->opcode),
__entry->length,
- __entry->qpn
+ __entry->qpn,
+ __entry->flags,
+ __entry->imm
)
);
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index aef3aa3fe667..42c9d35f832d 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -91,7 +91,7 @@ struct rvt_dev_info *rvt_alloc_device(size_t size, int nports)
{
struct rvt_dev_info *rdi;
- rdi = (struct rvt_dev_info *)ib_alloc_device(size);
+ rdi = container_of(_ib_alloc_device(size), struct rvt_dev_info, ibdev);
if (!rdi)
return rdi;
@@ -284,10 +284,6 @@ static int rvt_query_gid(struct ib_device *ibdev, u8 port_num,
&gid->global.interface_id);
}
-struct rvt_ucontext {
- struct ib_ucontext ibucontext;
-};
-
static inline struct rvt_ucontext *to_iucontext(struct ib_ucontext
*ibucontext)
{
@@ -296,28 +292,21 @@ static inline struct rvt_ucontext *to_iucontext(struct ib_ucontext
/**
* rvt_alloc_ucontext - Allocate a user context
- * @ibdev: Verbs IB dev
+ * @uctx: Verbs context
* @udata: User data allocated
*/
-static struct ib_ucontext *rvt_alloc_ucontext(struct ib_device *ibdev,
- struct ib_udata *udata)
+static int rvt_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
{
- struct rvt_ucontext *context;
-
- context = kmalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
- return ERR_PTR(-ENOMEM);
- return &context->ibucontext;
+ return 0;
}
/**
- *rvt_dealloc_ucontext - Free a user context
- *@context - Free this
+ * rvt_dealloc_ucontext - Free a user context
+ * @context - Free this
*/
-static int rvt_dealloc_ucontext(struct ib_ucontext *context)
+static void rvt_dealloc_ucontext(struct ib_ucontext *context)
{
- kfree(to_iucontext(context));
- return 0;
+ return;
}
static int rvt_get_port_immutable(struct ib_device *ibdev, u8 port_num,
@@ -436,6 +425,8 @@ static const struct ib_device_ops rvt_dev_ops = {
.req_notify_cq = rvt_req_notify_cq,
.resize_cq = rvt_resize_cq,
.unmap_fmr = rvt_unmap_fmr,
+ INIT_RDMA_OBJ_SIZE(ib_pd, rvt_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, rvt_ucontext, ibucontext),
};
static noinline int check_support(struct rvt_dev_info *rdi, int verb)
@@ -446,7 +437,7 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
* These functions are not part of verbs specifically but are
* required for rdmavt to function.
*/
- if ((!rdi->driver_f.port_callback) ||
+ if ((!rdi->ibdev.ops.init_port) ||
(!rdi->driver_f.get_pci_dev))
return -EINVAL;
break;
@@ -644,8 +635,7 @@ int rvt_register_device(struct rvt_dev_info *rdi, u32 driver_id)
rdi->ibdev.driver_id = driver_id;
/* We are now good to announce we exist */
- ret = ib_register_device(&rdi->ibdev, dev_name(&rdi->ibdev.dev),
- rdi->driver_f.port_callback);
+ ret = ib_register_device(&rdi->ibdev, dev_name(&rdi->ibdev.dev));
if (ret) {
rvt_pr_err(rdi, "Failed to register driver with ib core.\n");
goto bail_wss;
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 383e65c7bbc0..a8c11b5e1e94 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -31,6 +31,7 @@
* SOFTWARE.
*/
+#include <rdma/rdma_netlink.h>
#include <net/addrconf.h>
#include "rxe.h"
#include "rxe_loc.h"
@@ -50,8 +51,10 @@ static void rxe_cleanup_ports(struct rxe_dev *rxe)
/* free resources for a rxe device all objects created for this device must
* have been destroyed
*/
-static void rxe_cleanup(struct rxe_dev *rxe)
+void rxe_dealloc(struct ib_device *ib_dev)
{
+ struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev);
+
rxe_pool_cleanup(&rxe->uc_pool);
rxe_pool_cleanup(&rxe->pd_pool);
rxe_pool_cleanup(&rxe->ah_pool);
@@ -65,16 +68,8 @@ static void rxe_cleanup(struct rxe_dev *rxe)
rxe_cleanup_ports(rxe);
- crypto_free_shash(rxe->tfm);
-}
-
-/* called when all references have been dropped */
-void rxe_release(struct kref *kref)
-{
- struct rxe_dev *rxe = container_of(kref, struct rxe_dev, ref_cnt);
-
- rxe_cleanup(rxe);
- ib_dealloc_device(&rxe->ib_dev);
+ if (rxe->tfm)
+ crypto_free_shash(rxe->tfm);
}
/* initialize rxe device parameters */
@@ -279,7 +274,6 @@ static int rxe_init(struct rxe_dev *rxe)
spin_lock_init(&rxe->mmap_offset_lock);
spin_lock_init(&rxe->pending_lock);
INIT_LIST_HEAD(&rxe->pending_mmaps);
- INIT_LIST_HEAD(&rxe->list);
mutex_init(&rxe->usdev_lock);
@@ -308,37 +302,46 @@ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
/* called by ifc layer to create new rxe device.
* The caller should allocate memory for rxe by calling ib_alloc_device.
*/
-int rxe_add(struct rxe_dev *rxe, unsigned int mtu)
+int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name)
{
int err;
- kref_init(&rxe->ref_cnt);
-
err = rxe_init(rxe);
if (err)
- goto err1;
+ return err;
rxe_set_mtu(rxe, mtu);
- err = rxe_register_device(rxe);
- if (err)
- goto err1;
-
- return 0;
-
-err1:
- rxe_dev_put(rxe);
- return err;
+ return rxe_register_device(rxe, ibdev_name);
}
-/* called by the ifc layer to remove a device */
-void rxe_remove(struct rxe_dev *rxe)
+static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
{
- rxe_unregister_device(rxe);
+ struct rxe_dev *exists;
+ int err = 0;
+
+ exists = rxe_get_dev_from_net(ndev);
+ if (exists) {
+ ib_device_put(&exists->ib_dev);
+ pr_err("already configured on %s\n", ndev->name);
+ err = -EEXIST;
+ goto err;
+ }
- rxe_dev_put(rxe);
+ err = rxe_net_add(ibdev_name, ndev);
+ if (err) {
+ pr_err("failed to add %s\n", ndev->name);
+ goto err;
+ }
+err:
+ return err;
}
+static struct rdma_link_ops rxe_link_ops = {
+ .type = "rxe",
+ .newlink = rxe_newlink,
+};
+
static int __init rxe_module_init(void)
{
int err;
@@ -354,13 +357,15 @@ static int __init rxe_module_init(void)
if (err)
return err;
+ rdma_link_register(&rxe_link_ops);
pr_info("loaded\n");
return 0;
}
static void __exit rxe_module_exit(void)
{
- rxe_remove_all();
+ rdma_link_unregister(&rxe_link_ops);
+ ib_unregister_driver(RDMA_DRIVER_RXE);
rxe_net_exit();
rxe_cache_exit();
@@ -369,3 +374,5 @@ static void __exit rxe_module_exit(void)
late_initcall(rxe_module_init);
module_exit(rxe_module_exit);
+
+MODULE_ALIAS_RDMA_LINK("rxe");
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index 5bde2ad964d2..ecf6e659c0da 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -80,7 +80,6 @@ static inline u32 rxe_crc32(struct rxe_dev *rxe,
SHASH_DESC_ON_STACK(shash, rxe->tfm);
shash->tfm = rxe->tfm;
- shash->flags = 0;
*(u32 *)shash_desc_ctx(shash) = crc;
err = crypto_shash_update(shash, next, len);
if (unlikely(err)) {
@@ -95,18 +94,20 @@ static inline u32 rxe_crc32(struct rxe_dev *rxe,
void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
-int rxe_add(struct rxe_dev *rxe, unsigned int mtu);
-void rxe_remove(struct rxe_dev *rxe);
-void rxe_remove_all(void);
+int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name);
void rxe_rcv(struct sk_buff *skb);
-static inline void rxe_dev_put(struct rxe_dev *rxe)
+/* The caller must do a matching ib_device_put(&dev->ib_dev) */
+static inline struct rxe_dev *rxe_get_dev_from_net(struct net_device *ndev)
{
- kref_put(&rxe->ref_cnt, rxe_release);
+ struct ib_device *ibdev =
+ ib_device_get_by_netdev(ndev, RDMA_DRIVER_RXE);
+
+ if (!ibdev)
+ return NULL;
+ return container_of(ibdev, struct rxe_dev, ib_dev);
}
-struct rxe_dev *net_to_rxe(struct net_device *ndev);
-struct rxe_dev *get_rxe_by_name(const char *name);
void rxe_port_up(struct rxe_dev *rxe);
void rxe_port_down(struct rxe_dev *rxe);
diff --git a/drivers/infiniband/sw/rxe/rxe_av.c b/drivers/infiniband/sw/rxe/rxe_av.c
index 26fe8d7dbc55..81ee756c19b8 100644
--- a/drivers/infiniband/sw/rxe/rxe_av.c
+++ b/drivers/infiniband/sw/rxe/rxe_av.c
@@ -34,6 +34,13 @@
#include "rxe.h"
#include "rxe_loc.h"
+void rxe_init_av(struct rdma_ah_attr *attr, struct rxe_av *av)
+{
+ rxe_av_from_attr(rdma_ah_get_port_num(attr), av, attr);
+ rxe_av_fill_ip_info(av, attr);
+ memcpy(av->dmac, attr->roce.dmac, ETH_ALEN);
+}
+
int rxe_av_chk_attr(struct rxe_dev *rxe, struct rdma_ah_attr *attr)
{
struct rxe_port *port;
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index e996da67a851..00eb99d3df86 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -146,8 +146,7 @@ void retransmit_timer(struct timer_list *t)
}
}
-void rxe_comp_queue_pkt(struct rxe_dev *rxe, struct rxe_qp *qp,
- struct sk_buff *skb)
+void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
{
int must_sched;
@@ -155,7 +154,8 @@ void rxe_comp_queue_pkt(struct rxe_dev *rxe, struct rxe_qp *qp,
must_sched = skb_queue_len(&qp->resp_pkts) > 1;
if (must_sched != 0)
- rxe_counter_inc(rxe, RXE_CNT_COMPLETER_SCHED);
+ rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_COMPLETER_SCHED);
+
rxe_run_task(&qp->comp.task, must_sched);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 01b74597b36a..3d8cef836f0d 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -35,6 +35,7 @@
#define RXE_LOC_H
/* rxe_av.c */
+void rxe_init_av(struct rdma_ah_attr *attr, struct rxe_av *av);
int rxe_av_chk_attr(struct rxe_dev *rxe, struct rdma_ah_attr *attr);
@@ -231,7 +232,7 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
struct rxe_modify_srq_cmd *ucmd);
-void rxe_release(struct kref *kref);
+void rxe_dealloc(struct ib_device *ib_dev);
int rxe_completer(void *arg);
int rxe_requester(void *arg);
@@ -239,11 +240,9 @@ int rxe_responder(void *arg);
u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb);
-void rxe_resp_queue_pkt(struct rxe_dev *rxe,
- struct rxe_qp *qp, struct sk_buff *skb);
+void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb);
-void rxe_comp_queue_pkt(struct rxe_dev *rxe,
- struct rxe_qp *qp, struct sk_buff *skb);
+void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb);
static inline unsigned int wr_opcode_mask(int opcode, struct rxe_qp *qp)
{
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 9d3916b93f23..42f0f25e396c 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -162,16 +162,15 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
u64 length, u64 iova, int access, struct ib_udata *udata,
struct rxe_mem *mem)
{
- int entry;
struct rxe_map **map;
struct rxe_phys_buf *buf = NULL;
struct ib_umem *umem;
- struct scatterlist *sg;
+ struct sg_page_iter sg_iter;
int num_buf;
void *vaddr;
int err;
- umem = ib_umem_get(pd->ibpd.uobject->context, start, length, access, 0);
+ umem = ib_umem_get(udata, start, length, access, 0);
if (IS_ERR(umem)) {
pr_warn("err %d from rxe_umem_get\n",
(int)PTR_ERR(umem));
@@ -191,16 +190,16 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
goto err1;
}
- mem->page_shift = umem->page_shift;
- mem->page_mask = BIT(umem->page_shift) - 1;
+ mem->page_shift = PAGE_SHIFT;
+ mem->page_mask = PAGE_SIZE - 1;
num_buf = 0;
map = mem->map;
if (length > 0) {
buf = map[0]->buf;
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
- vaddr = page_address(sg_page(sg));
+ for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
+ vaddr = page_address(sg_page_iter_page(&sg_iter));
if (!vaddr) {
pr_warn("null vaddr\n");
err = -ENOMEM;
@@ -208,7 +207,7 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
}
buf->addr = (uintptr_t)vaddr;
- buf->size = BIT(umem->page_shift);
+ buf->size = PAGE_SIZE;
num_buf++;
buf++;
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 8fd03ae20efc..753cabcd441c 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -45,43 +45,6 @@
#include "rxe_net.h"
#include "rxe_loc.h"
-static LIST_HEAD(rxe_dev_list);
-static DEFINE_SPINLOCK(dev_list_lock); /* spinlock for device list */
-
-struct rxe_dev *net_to_rxe(struct net_device *ndev)
-{
- struct rxe_dev *rxe;
- struct rxe_dev *found = NULL;
-
- spin_lock_bh(&dev_list_lock);
- list_for_each_entry(rxe, &rxe_dev_list, list) {
- if (rxe->ndev == ndev) {
- found = rxe;
- break;
- }
- }
- spin_unlock_bh(&dev_list_lock);
-
- return found;
-}
-
-struct rxe_dev *get_rxe_by_name(const char *name)
-{
- struct rxe_dev *rxe;
- struct rxe_dev *found = NULL;
-
- spin_lock_bh(&dev_list_lock);
- list_for_each_entry(rxe, &rxe_dev_list, list) {
- if (!strcmp(name, dev_name(&rxe->ib_dev.dev))) {
- found = rxe;
- break;
- }
- }
- spin_unlock_bh(&dev_list_lock);
- return found;
-}
-
-
static struct rxe_recv_sockets recv_sockets;
struct device *rxe_dma_device(struct rxe_dev *rxe)
@@ -229,18 +192,19 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
struct udphdr *udph;
struct net_device *ndev = skb->dev;
struct net_device *rdev = ndev;
- struct rxe_dev *rxe = net_to_rxe(ndev);
+ struct rxe_dev *rxe = rxe_get_dev_from_net(ndev);
struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
if (!rxe && is_vlan_dev(rdev)) {
rdev = vlan_dev_real_dev(ndev);
- rxe = net_to_rxe(rdev);
+ rxe = rxe_get_dev_from_net(rdev);
}
if (!rxe)
goto drop;
if (skb_linearize(skb)) {
pr_err("skb_linearize failed\n");
+ ib_device_put(&rxe->ib_dev);
goto drop;
}
@@ -253,6 +217,12 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
rxe_rcv(skb);
+ /*
+ * FIXME: this is in the wrong place, it needs to be done when pkt is
+ * destroyed
+ */
+ ib_device_put(&rxe->ib_dev);
+
return 0;
drop:
kfree_skb(skb);
@@ -384,9 +354,6 @@ static int prepare4(struct rxe_pkt_info *pkt, struct sk_buff *skb,
return -EHOSTUNREACH;
}
- if (!memcmp(saddr, daddr, sizeof(*daddr)))
- pkt->mask |= RXE_LOOPBACK_MASK;
-
prepare_udp_hdr(skb, cpu_to_be16(qp->src_port),
cpu_to_be16(ROCE_V2_UDP_DPORT));
@@ -411,9 +378,6 @@ static int prepare6(struct rxe_pkt_info *pkt, struct sk_buff *skb,
return -EHOSTUNREACH;
}
- if (!memcmp(saddr, daddr, sizeof(*daddr)))
- pkt->mask |= RXE_LOOPBACK_MASK;
-
prepare_udp_hdr(skb, cpu_to_be16(qp->src_port),
cpu_to_be16(ROCE_V2_UDP_DPORT));
@@ -437,6 +401,9 @@ int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc)
*crc = rxe_icrc_hdr(pkt, skb);
+ if (ether_addr_equal(skb->dev->dev_addr, av->dmac))
+ pkt->mask |= RXE_LOOPBACK_MASK;
+
return err;
}
@@ -550,42 +517,24 @@ enum rdma_link_layer rxe_link_layer(struct rxe_dev *rxe, unsigned int port_num)
return IB_LINK_LAYER_ETHERNET;
}
-struct rxe_dev *rxe_net_add(struct net_device *ndev)
+int rxe_net_add(const char *ibdev_name, struct net_device *ndev)
{
int err;
struct rxe_dev *rxe = NULL;
- rxe = (struct rxe_dev *)ib_alloc_device(sizeof(*rxe));
+ rxe = ib_alloc_device(rxe_dev, ib_dev);
if (!rxe)
- return NULL;
+ return -ENOMEM;
rxe->ndev = ndev;
- err = rxe_add(rxe, ndev->mtu);
+ err = rxe_add(rxe, ndev->mtu, ibdev_name);
if (err) {
ib_dealloc_device(&rxe->ib_dev);
- return NULL;
+ return err;
}
- spin_lock_bh(&dev_list_lock);
- list_add_tail(&rxe->list, &rxe_dev_list);
- spin_unlock_bh(&dev_list_lock);
- return rxe;
-}
-
-void rxe_remove_all(void)
-{
- spin_lock_bh(&dev_list_lock);
- while (!list_empty(&rxe_dev_list)) {
- struct rxe_dev *rxe =
- list_first_entry(&rxe_dev_list, struct rxe_dev, list);
-
- list_del(&rxe->list);
- spin_unlock_bh(&dev_list_lock);
- rxe_remove(rxe);
- spin_lock_bh(&dev_list_lock);
- }
- spin_unlock_bh(&dev_list_lock);
+ return 0;
}
static void rxe_port_event(struct rxe_dev *rxe,
@@ -638,15 +587,14 @@ static int rxe_notify(struct notifier_block *not_blk,
void *arg)
{
struct net_device *ndev = netdev_notifier_info_to_dev(arg);
- struct rxe_dev *rxe = net_to_rxe(ndev);
+ struct rxe_dev *rxe = rxe_get_dev_from_net(ndev);
if (!rxe)
- goto out;
+ return NOTIFY_OK;
switch (event) {
case NETDEV_UNREGISTER:
- list_del(&rxe->list);
- rxe_remove(rxe);
+ ib_unregister_device_queued(&rxe->ib_dev);
break;
case NETDEV_UP:
rxe_port_up(rxe);
@@ -671,7 +619,8 @@ static int rxe_notify(struct notifier_block *not_blk,
event, ndev->name);
break;
}
-out:
+
+ ib_device_put(&rxe->ib_dev);
return NOTIFY_OK;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_net.h b/drivers/infiniband/sw/rxe/rxe_net.h
index 106c586dbb26..2ca71d3d245c 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.h
+++ b/drivers/infiniband/sw/rxe/rxe_net.h
@@ -43,7 +43,7 @@ struct rxe_recv_sockets {
struct socket *sk6;
};
-struct rxe_dev *rxe_net_add(struct net_device *ndev);
+int rxe_net_add(const char *ibdev_name, struct net_device *ndev);
int rxe_net_init(void);
void rxe_net_exit(void);
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index bdea899a58ac..1abed47ca221 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -78,7 +78,8 @@ enum rxe_device_param {
| IB_DEVICE_SYS_IMAGE_GUID
| IB_DEVICE_RC_RNR_NAK_GEN
| IB_DEVICE_SRQ_RESIZE
- | IB_DEVICE_MEM_MGT_EXTENSIONS,
+ | IB_DEVICE_MEM_MGT_EXTENSIONS
+ | IB_DEVICE_ALLOW_USER_UNREG,
RXE_MAX_SGE = 32,
RXE_MAX_SGE_RD = 32,
RXE_MAX_CQ = 16384,
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index b5c91df22047..120fa9005954 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -42,10 +42,12 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
[RXE_TYPE_UC] = {
.name = "rxe-uc",
.size = sizeof(struct rxe_ucontext),
+ .flags = RXE_POOL_NO_ALLOC,
},
[RXE_TYPE_PD] = {
.name = "rxe-pd",
.size = sizeof(struct rxe_pd),
+ .flags = RXE_POOL_NO_ALLOC,
},
[RXE_TYPE_AH] = {
.name = "rxe-ah",
@@ -119,8 +121,10 @@ static void rxe_cache_clean(size_t cnt)
for (i = 0; i < cnt; i++) {
type = &rxe_type_info[i];
- kmem_cache_destroy(type->cache);
- type->cache = NULL;
+ if (!(type->flags & RXE_POOL_NO_ALLOC)) {
+ kmem_cache_destroy(type->cache);
+ type->cache = NULL;
+ }
}
}
@@ -134,14 +138,17 @@ int rxe_cache_init(void)
for (i = 0; i < RXE_NUM_TYPES; i++) {
type = &rxe_type_info[i];
size = ALIGN(type->size, RXE_POOL_ALIGN);
- type->cache = kmem_cache_create(type->name, size,
- RXE_POOL_ALIGN,
- RXE_POOL_CACHE_FLAGS, NULL);
- if (!type->cache) {
- pr_err("Unable to init kmem cache for %s\n",
- type->name);
- err = -ENOMEM;
- goto err1;
+ if (!(type->flags & RXE_POOL_NO_ALLOC)) {
+ type->cache =
+ kmem_cache_create(type->name, size,
+ RXE_POOL_ALIGN,
+ RXE_POOL_CACHE_FLAGS, NULL);
+ if (!type->cache) {
+ pr_err("Unable to init kmem cache for %s\n",
+ type->name);
+ err = -ENOMEM;
+ goto err1;
+ }
}
}
@@ -392,29 +399,64 @@ void *rxe_alloc(struct rxe_pool *pool)
kref_get(&pool->ref_cnt);
read_unlock_irqrestore(&pool->pool_lock, flags);
- kref_get(&pool->rxe->ref_cnt);
+ if (!ib_device_try_get(&pool->rxe->ib_dev))
+ goto out_put_pool;
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
- goto out_put_pool;
+ goto out_cnt;
elem = kmem_cache_zalloc(pool_cache(pool),
(pool->flags & RXE_POOL_ATOMIC) ?
GFP_ATOMIC : GFP_KERNEL);
if (!elem)
- goto out_put_pool;
+ goto out_cnt;
elem->pool = pool;
kref_init(&elem->ref_cnt);
return elem;
-out_put_pool:
+out_cnt:
atomic_dec(&pool->num_elem);
- rxe_dev_put(pool->rxe);
+ ib_device_put(&pool->rxe->ib_dev);
+out_put_pool:
rxe_pool_put(pool);
return NULL;
}
+int rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem)
+{
+ unsigned long flags;
+
+ might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
+
+ read_lock_irqsave(&pool->pool_lock, flags);
+ if (pool->state != RXE_POOL_STATE_VALID) {
+ read_unlock_irqrestore(&pool->pool_lock, flags);
+ return -EINVAL;
+ }
+ kref_get(&pool->ref_cnt);
+ read_unlock_irqrestore(&pool->pool_lock, flags);
+
+ if (!ib_device_try_get(&pool->rxe->ib_dev))
+ goto out_put_pool;
+
+ if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
+ goto out_cnt;
+
+ elem->pool = pool;
+ kref_init(&elem->ref_cnt);
+
+ return 0;
+
+out_cnt:
+ atomic_dec(&pool->num_elem);
+ ib_device_put(&pool->rxe->ib_dev);
+out_put_pool:
+ rxe_pool_put(pool);
+ return -EINVAL;
+}
+
void rxe_elem_release(struct kref *kref)
{
struct rxe_pool_entry *elem =
@@ -424,9 +466,10 @@ void rxe_elem_release(struct kref *kref)
if (pool->cleanup)
pool->cleanup(elem);
- kmem_cache_free(pool_cache(pool), elem);
+ if (!(pool->flags & RXE_POOL_NO_ALLOC))
+ kmem_cache_free(pool_cache(pool), elem);
atomic_dec(&pool->num_elem);
- rxe_dev_put(pool->rxe);
+ ib_device_put(&pool->rxe->ib_dev);
rxe_pool_put(pool);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
index 72968c29e01f..2f2cff1cbe43 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.h
+++ b/drivers/infiniband/sw/rxe/rxe_pool.h
@@ -41,6 +41,7 @@ enum rxe_pool_flags {
RXE_POOL_ATOMIC = BIT(0),
RXE_POOL_INDEX = BIT(1),
RXE_POOL_KEY = BIT(2),
+ RXE_POOL_NO_ALLOC = BIT(4),
};
enum rxe_elem_type {
@@ -131,6 +132,9 @@ void rxe_pool_cleanup(struct rxe_pool *pool);
/* allocate an object from pool */
void *rxe_alloc(struct rxe_pool *pool);
+/* connect already allocated object to pool */
+int rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem);
+
/* assign an index to an indexed object and insert object into
* pool's rb tree
*/
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index fd86fd2fbb26..09ede70dc1e8 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -35,6 +35,7 @@
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
+#include <rdma/uverbs_ioctl.h>
#include "rxe.h"
#include "rxe_loc.h"
@@ -343,7 +344,8 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
struct rxe_cq *rcq = to_rcq(init->recv_cq);
struct rxe_cq *scq = to_rcq(init->send_cq);
struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
- struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
+ struct rxe_ucontext *ucontext =
+ rdma_udata_to_drv_context(udata, struct rxe_ucontext, ibuc);
rxe_add_ref(pd);
rxe_add_ref(rcq);
@@ -358,11 +360,11 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
rxe_qp_init_misc(rxe, qp, init);
- err = rxe_qp_init_req(rxe, qp, init, context, uresp);
+ err = rxe_qp_init_req(rxe, qp, init, &ucontext->ibuc, uresp);
if (err)
goto err1;
- err = rxe_qp_init_resp(rxe, qp, init, context, uresp);
+ err = rxe_qp_init_resp(rxe, qp, init, &ucontext->ibuc, uresp);
if (err)
goto err2;
@@ -631,14 +633,11 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
qp->attr.qkey = attr->qkey;
if (mask & IB_QP_AV) {
- rxe_av_from_attr(attr->port_num, &qp->pri_av, &attr->ah_attr);
- rxe_av_fill_ip_info(&qp->pri_av, &attr->ah_attr);
+ rxe_init_av(&attr->ah_attr, &qp->pri_av);
}
if (mask & IB_QP_ALT_PATH) {
- rxe_av_from_attr(attr->alt_port_num, &qp->alt_av,
- &attr->alt_ah_attr);
- rxe_av_fill_ip_info(&qp->alt_av, &attr->alt_ah_attr);
+ rxe_init_av(&attr->alt_ah_attr, &qp->alt_av);
qp->attr.alt_port_num = attr->alt_port_num;
qp->attr.alt_pkey_index = attr->alt_pkey_index;
qp->attr.alt_timeout = attr->alt_timeout;
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index 5c29a1bb575a..f9a492ed900b 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -266,14 +266,12 @@ err1:
return -EINVAL;
}
-static inline void rxe_rcv_pkt(struct rxe_dev *rxe,
- struct rxe_pkt_info *pkt,
- struct sk_buff *skb)
+static inline void rxe_rcv_pkt(struct rxe_pkt_info *pkt, struct sk_buff *skb)
{
if (pkt->mask & RXE_REQ_MASK)
- rxe_resp_queue_pkt(rxe, pkt->qp, skb);
+ rxe_resp_queue_pkt(pkt->qp, skb);
else
- rxe_comp_queue_pkt(rxe, pkt->qp, skb);
+ rxe_comp_queue_pkt(pkt->qp, skb);
}
static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
@@ -319,7 +317,7 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
pkt->qp = qp;
rxe_add_ref(qp);
- rxe_rcv_pkt(rxe, pkt, skb);
+ rxe_rcv_pkt(pkt, skb);
}
spin_unlock_bh(&mcg->mcg_lock);
@@ -411,7 +409,7 @@ void rxe_rcv(struct sk_buff *skb)
if (unlikely(bth_qpn(pkt) == IB_MULTICAST_QPN))
rxe_rcv_mcast_pkt(rxe, skb);
else
- rxe_rcv_pkt(rxe, pkt, skb);
+ rxe_rcv_pkt(pkt, skb);
return;
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 231528188250..aca9f60f9b21 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -104,8 +104,7 @@ static char *resp_state_name[] = {
};
/* rxe_recv calls here to add a request packet to the input queue */
-void rxe_resp_queue_pkt(struct rxe_dev *rxe, struct rxe_qp *qp,
- struct sk_buff *skb)
+void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
{
int must_sched;
struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
diff --git a/drivers/infiniband/sw/rxe/rxe_sysfs.c b/drivers/infiniband/sw/rxe/rxe_sysfs.c
index 95a15892f7e6..ccda5f5a3bc0 100644
--- a/drivers/infiniband/sw/rxe/rxe_sysfs.c
+++ b/drivers/infiniband/sw/rxe/rxe_sysfs.c
@@ -58,41 +58,37 @@ static int rxe_param_set_add(const char *val, const struct kernel_param *kp)
int len;
int err = 0;
char intf[32];
- struct net_device *ndev = NULL;
- struct rxe_dev *rxe;
+ struct net_device *ndev;
+ struct rxe_dev *exists;
len = sanitize_arg(val, intf, sizeof(intf));
if (!len) {
pr_err("add: invalid interface name\n");
- err = -EINVAL;
- goto err;
+ return -EINVAL;
}
ndev = dev_get_by_name(&init_net, intf);
if (!ndev) {
pr_err("interface %s not found\n", intf);
- err = -EINVAL;
- goto err;
+ return -EINVAL;
}
- if (net_to_rxe(ndev)) {
+ exists = rxe_get_dev_from_net(ndev);
+ if (exists) {
+ ib_device_put(&exists->ib_dev);
pr_err("already configured on %s\n", intf);
err = -EINVAL;
goto err;
}
- rxe = rxe_net_add(ndev);
- if (!rxe) {
+ err = rxe_net_add("rxe%d", ndev);
+ if (err) {
pr_err("failed to add %s\n", intf);
- err = -EINVAL;
goto err;
}
- rxe_set_port_state(rxe);
- dev_info(&rxe->ib_dev.dev, "added %s\n", intf);
err:
- if (ndev)
- dev_put(ndev);
+ dev_put(ndev);
return err;
}
@@ -100,7 +96,7 @@ static int rxe_param_set_remove(const char *val, const struct kernel_param *kp)
{
int len;
char intf[32];
- struct rxe_dev *rxe;
+ struct ib_device *ib_dev;
len = sanitize_arg(val, intf, sizeof(intf));
if (!len) {
@@ -110,19 +106,17 @@ static int rxe_param_set_remove(const char *val, const struct kernel_param *kp)
if (strncmp("all", intf, len) == 0) {
pr_info("rxe_sys: remove all");
- rxe_remove_all();
+ ib_unregister_driver(RDMA_DRIVER_RXE);
return 0;
}
- rxe = get_rxe_by_name(intf);
-
- if (!rxe) {
+ ib_dev = ib_device_get_by_name(intf, RDMA_DRIVER_RXE);
+ if (!ib_dev) {
pr_err("not configured on %s\n", intf);
return -EINVAL;
}
- list_del(&rxe->list);
- rxe_remove(rxe);
+ ib_unregister_device_and_put(ib_dev);
return 0;
}
@@ -136,6 +130,6 @@ static const struct kernel_param_ops rxe_remove_ops = {
};
module_param_cb(add, &rxe_add_ops, NULL, 0200);
-MODULE_PARM_DESC(add, "Create RXE device over network interface");
+MODULE_PARM_DESC(add, "DEPRECATED. Create RXE device over network interface");
module_param_cb(remove, &rxe_remove_ops, NULL, 0200);
-MODULE_PARM_DESC(remove, "Remove RXE device over network interface");
+MODULE_PARM_DESC(remove, "DEPRECATED. Remove RXE device over network interface");
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index b20e6e0415f5..6ecf28570ff0 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -33,6 +33,7 @@
#include <linux/dma-mapping.h>
#include <net/addrconf.h>
+#include <rdma/uverbs_ioctl.h>
#include "rxe.h"
#include "rxe_loc.h"
#include "rxe_queue.h"
@@ -79,19 +80,6 @@ static int rxe_query_port(struct ib_device *dev,
return rc;
}
-static struct net_device *rxe_get_netdev(struct ib_device *device,
- u8 port_num)
-{
- struct rxe_dev *rxe = to_rdev(device);
-
- if (rxe->ndev) {
- dev_hold(rxe->ndev);
- return rxe->ndev;
- }
-
- return NULL;
-}
-
static int rxe_query_pkey(struct ib_device *device,
u8 port_num, u16 index, u16 *pkey)
{
@@ -154,22 +142,19 @@ static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
return rxe_link_layer(rxe, port_num);
}
-static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev,
- struct ib_udata *udata)
+static int rxe_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
{
- struct rxe_dev *rxe = to_rdev(dev);
- struct rxe_ucontext *uc;
+ struct rxe_dev *rxe = to_rdev(uctx->device);
+ struct rxe_ucontext *uc = to_ruc(uctx);
- uc = rxe_alloc(&rxe->uc_pool);
- return uc ? &uc->ibuc : ERR_PTR(-ENOMEM);
+ return rxe_add_to_pool(&rxe->uc_pool, &uc->pelem);
}
-static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
+static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
{
struct rxe_ucontext *uc = to_ruc(ibuc);
rxe_drop_ref(uc);
- return 0;
}
static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
@@ -191,30 +176,20 @@ static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
return 0;
}
-static struct ib_pd *rxe_alloc_pd(struct ib_device *dev,
- struct ib_ucontext *context,
- struct ib_udata *udata)
+static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
+ struct ib_udata *udata)
{
- struct rxe_dev *rxe = to_rdev(dev);
- struct rxe_pd *pd;
+ struct rxe_dev *rxe = to_rdev(ibpd->device);
+ struct rxe_pd *pd = to_rpd(ibpd);
- pd = rxe_alloc(&rxe->pd_pool);
- return pd ? &pd->ibpd : ERR_PTR(-ENOMEM);
+ return rxe_add_to_pool(&rxe->pd_pool, &pd->pelem);
}
-static int rxe_dealloc_pd(struct ib_pd *ibpd)
+static void rxe_dealloc_pd(struct ib_pd *ibpd)
{
struct rxe_pd *pd = to_rpd(ibpd);
rxe_drop_ref(pd);
- return 0;
-}
-
-static void rxe_init_av(struct rxe_dev *rxe, struct rdma_ah_attr *attr,
- struct rxe_av *av)
-{
- rxe_av_from_attr(rdma_ah_get_port_num(attr), av, attr);
- rxe_av_fill_ip_info(av, attr);
}
static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd,
@@ -239,7 +214,7 @@ static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd,
rxe_add_ref(pd);
ah->pd = pd;
- rxe_init_av(rxe, attr, &ah->av);
+ rxe_init_av(attr, &ah->av);
return &ah->ibah;
}
@@ -253,7 +228,7 @@ static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
if (err)
return err;
- rxe_init_av(rxe, attr, &ah->av);
+ rxe_init_av(attr, &ah->av);
return 0;
}
@@ -330,8 +305,9 @@ static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
int err;
struct rxe_dev *rxe = to_rdev(ibpd->device);
struct rxe_pd *pd = to_rpd(ibpd);
+ struct rxe_ucontext *ucontext =
+ rdma_udata_to_drv_context(udata, struct rxe_ucontext, ibuc);
struct rxe_srq *srq;
- struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
struct rxe_create_srq_resp __user *uresp = NULL;
if (udata) {
@@ -354,7 +330,7 @@ static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
rxe_add_ref(pd);
srq->pd = pd;
- err = rxe_srq_from_init(rxe, srq, init, context, uresp);
+ err = rxe_srq_from_init(rxe, srq, init, &ucontext->ibuc, uresp);
if (err)
goto err2;
@@ -1129,8 +1105,8 @@ static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
static ssize_t parent_show(struct device *device,
struct device_attribute *attr, char *buf)
{
- struct rxe_dev *rxe = container_of(device, struct rxe_dev,
- ib_dev.dev);
+ struct rxe_dev *rxe =
+ rdma_device_to_drv_device(device, struct rxe_dev, ib_dev);
return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
}
@@ -1146,6 +1122,15 @@ static const struct attribute_group rxe_attr_group = {
.attrs = rxe_dev_attributes,
};
+static int rxe_enable_driver(struct ib_device *ib_dev)
+{
+ struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev);
+
+ rxe_set_port_state(rxe);
+ dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(rxe->ndev));
+ return 0;
+}
+
static const struct ib_device_ops rxe_dev_ops = {
.alloc_hw_stats = rxe_ib_alloc_hw_stats,
.alloc_mr = rxe_alloc_mr,
@@ -1156,6 +1141,7 @@ static const struct ib_device_ops rxe_dev_ops = {
.create_cq = rxe_create_cq,
.create_qp = rxe_create_qp,
.create_srq = rxe_create_srq,
+ .dealloc_driver = rxe_dealloc,
.dealloc_pd = rxe_dealloc_pd,
.dealloc_ucontext = rxe_dealloc_ucontext,
.dereg_mr = rxe_dereg_mr,
@@ -1164,10 +1150,10 @@ static const struct ib_device_ops rxe_dev_ops = {
.destroy_qp = rxe_destroy_qp,
.destroy_srq = rxe_destroy_srq,
.detach_mcast = rxe_detach_mcast,
+ .enable_driver = rxe_enable_driver,
.get_dma_mr = rxe_get_dma_mr,
.get_hw_stats = rxe_ib_get_hw_stats,
.get_link_layer = rxe_get_link_layer,
- .get_netdev = rxe_get_netdev,
.get_port_immutable = rxe_port_immutable,
.map_mr_sg = rxe_map_mr_sg,
.mmap = rxe_mmap,
@@ -1190,9 +1176,11 @@ static const struct ib_device_ops rxe_dev_ops = {
.reg_user_mr = rxe_reg_user_mr,
.req_notify_cq = rxe_req_notify_cq,
.resize_cq = rxe_resize_cq,
+ INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc),
};
-int rxe_register_device(struct rxe_dev *rxe)
+int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
{
int err;
struct ib_device *dev = &rxe->ib_dev;
@@ -1247,6 +1235,9 @@ int rxe_register_device(struct rxe_dev *rxe)
;
ib_set_device_ops(dev, &rxe_dev_ops);
+ err = ib_device_set_netdev(&rxe->ib_dev, rxe->ndev, 1);
+ if (err)
+ return err;
tfm = crypto_alloc_shash("crc32", 0, 0);
if (IS_ERR(tfm)) {
@@ -1258,23 +1249,13 @@ int rxe_register_device(struct rxe_dev *rxe)
rdma_set_device_sysfs_group(dev, &rxe_attr_group);
dev->driver_id = RDMA_DRIVER_RXE;
- err = ib_register_device(dev, "rxe%d", NULL);
- if (err) {
+ err = ib_register_device(dev, ibdev_name);
+ if (err)
pr_warn("%s failed with error %d\n", __func__, err);
- goto err1;
- }
-
- return 0;
-
-err1:
- crypto_free_shash(rxe->tfm);
+ /*
+ * Note that rxe may be invalid at this point if another thread
+ * unregistered it.
+ */
return err;
}
-
-void rxe_unregister_device(struct rxe_dev *rxe)
-{
- struct ib_device *dev = &rxe->ib_dev;
-
- ib_unregister_device(dev);
-}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 74e04801d34d..157e51aeb1e1 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -61,13 +61,13 @@ static inline int psn_compare(u32 psn_a, u32 psn_b)
}
struct rxe_ucontext {
+ struct ib_ucontext ibuc;
struct rxe_pool_entry pelem;
- struct ib_ucontext ibuc;
};
struct rxe_pd {
+ struct ib_pd ibpd;
struct rxe_pool_entry pelem;
- struct ib_pd ibpd;
};
struct rxe_ah {
@@ -385,7 +385,6 @@ struct rxe_dev {
struct ib_device_attr attr;
int max_ucontext;
int max_inline_data;
- struct kref ref_cnt;
struct mutex usdev_lock;
struct net_device *ndev;
@@ -412,7 +411,6 @@ struct rxe_dev {
atomic64_t stats_counters[RXE_NUM_OF_COUNTERS];
struct rxe_port port;
- struct list_head list;
struct crypto_shash *tfm;
};
@@ -466,8 +464,7 @@ static inline struct rxe_mem *to_rmw(struct ib_mw *mw)
return mw ? container_of(mw, struct rxe_mem, ibmw) : NULL;
}
-int rxe_register_device(struct rxe_dev *rxe);
-void rxe_unregister_device(struct rxe_dev *rxe);
+int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name);
void rxe_mc_cleanup(struct rxe_pool_entry *arg);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 73e808c1e6ad..2aa3457a30ce 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -780,12 +780,12 @@ static inline void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *w
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
void ipoib_create_debug_files(struct net_device *dev);
void ipoib_delete_debug_files(struct net_device *dev);
-int ipoib_register_debugfs(void);
+void ipoib_register_debugfs(void);
void ipoib_unregister_debugfs(void);
#else
static inline void ipoib_create_debug_files(struct net_device *dev) { }
static inline void ipoib_delete_debug_files(struct net_device *dev) { }
-static inline int ipoib_register_debugfs(void) { return 0; }
+static inline void ipoib_register_debugfs(void) { }
static inline void ipoib_unregister_debugfs(void) { }
#endif
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
index 178488028734..64c19f6fa931 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
@@ -267,14 +267,10 @@ void ipoib_create_debug_files(struct net_device *dev)
snprintf(name, sizeof(name), "%s_mcg", dev->name);
priv->mcg_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
ipoib_root, dev, &ipoib_mcg_fops);
- if (!priv->mcg_dentry)
- ipoib_warn(priv, "failed to create mcg debug file\n");
snprintf(name, sizeof(name), "%s_path", dev->name);
priv->path_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
ipoib_root, dev, &ipoib_path_fops);
- if (!priv->path_dentry)
- ipoib_warn(priv, "failed to create path debug file\n");
}
void ipoib_delete_debug_files(struct net_device *dev)
@@ -286,10 +282,9 @@ void ipoib_delete_debug_files(struct net_device *dev)
priv->mcg_dentry = priv->path_dentry = NULL;
}
-int ipoib_register_debugfs(void)
+void ipoib_register_debugfs(void)
{
ipoib_root = debugfs_create_dir("ipoib", NULL);
- return ipoib_root ? 0 : -ENOMEM;
}
void ipoib_unregister_debugfs(void)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index d932f99201d1..48eda16db1a7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -613,7 +613,7 @@ static void path_free(struct net_device *dev, struct ipoib_path *path)
while ((skb = __skb_dequeue(&path->queue)))
dev_kfree_skb_irq(skb);
- ipoib_dbg(ipoib_priv(dev), "path_free\n");
+ ipoib_dbg(ipoib_priv(dev), "%s\n", __func__);
/* remove all neigh connected to this path */
ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
@@ -1641,7 +1641,7 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n");
+ ipoib_dbg(priv, "%s\n", __func__);
init_completion(&priv->ntbl.deleted);
cancel_delayed_work_sync(&priv->neigh_reap_task);
@@ -2411,7 +2411,7 @@ static ssize_t dev_id_show(struct device *dev,
}
static DEVICE_ATTR_RO(dev_id);
-int ipoib_intercept_dev_id_attr(struct net_device *dev)
+static int ipoib_intercept_dev_id_attr(struct net_device *dev)
{
device_remove_file(&dev->dev, &dev_attr_dev_id);
return device_create_file(&dev->dev, &dev_attr_dev_id);
@@ -2495,7 +2495,7 @@ static void ipoib_add_one(struct ib_device *device)
struct list_head *dev_list;
struct net_device *dev;
struct ipoib_dev_priv *priv;
- int p;
+ unsigned int p;
int count = 0;
dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL);
@@ -2504,7 +2504,7 @@ static void ipoib_add_one(struct ib_device *device)
INIT_LIST_HEAD(dev_list);
- for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
+ rdma_for_each_port (device, p) {
if (!rdma_protocol_ib(device, p))
continue;
dev = ipoib_add_port("ib%d", device, p);
@@ -2577,9 +2577,7 @@ static int __init ipoib_init_module(void)
*/
BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE);
- ret = ipoib_register_debugfs();
- if (ret)
- return ret;
+ ipoib_register_debugfs();
/*
* We create a global workqueue here that is used for all flush
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 120b40829560..a7aeaa0c6fbc 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -197,7 +197,7 @@ struct iser_data_buf {
struct scatterlist *sg;
int size;
unsigned long data_len;
- unsigned int dma_nents;
+ int dma_nents;
};
/* fwd declarations */
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index e9b7efc302d0..2ba70729d7b0 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -145,9 +145,8 @@ static void iser_data_buf_dump(struct iser_data_buf *data,
for_each_sg(data->sg, sg, data->dma_nents, i)
iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
"off:0x%x sz:0x%x dma_len:0x%x\n",
- i, (unsigned long)ib_sg_dma_address(ibdev, sg),
- sg_page(sg), sg->offset,
- sg->length, ib_sg_dma_len(ibdev, sg));
+ i, (unsigned long)sg_dma_address(sg),
+ sg_page(sg), sg->offset, sg->length, sg_dma_len(sg));
}
static void iser_dump_page_vec(struct iser_page_vec *page_vec)
@@ -204,8 +203,8 @@ iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
reg->rkey = device->pd->unsafe_global_rkey;
else
reg->rkey = 0;
- reg->sge.addr = ib_sg_dma_address(device->ib_device, &sg[0]);
- reg->sge.length = ib_sg_dma_len(device->ib_device, &sg[0]);
+ reg->sge.addr = sg_dma_address(&sg[0]);
+ reg->sge.length = sg_dma_len(&sg[0]);
iser_dbg("Single DMA entry: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
" length=0x%x\n", reg->sge.lkey, reg->rkey,
@@ -240,8 +239,8 @@ int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
page_vec->npages = 0;
page_vec->fake_mr.page_size = SIZE_4K;
plen = ib_sg_to_pages(&page_vec->fake_mr, mem->sg,
- mem->size, NULL, iser_set_page);
- if (unlikely(plen < mem->size)) {
+ mem->dma_nents, NULL, iser_set_page);
+ if (unlikely(plen < mem->dma_nents)) {
iser_err("page vec too short to hold this SG\n");
iser_data_buf_dump(mem, device->ib_device);
iser_dump_page_vec(page_vec);
@@ -448,10 +447,10 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
- n = ib_map_mr_sg(mr, mem->sg, mem->size, NULL, SIZE_4K);
- if (unlikely(n != mem->size)) {
+ n = ib_map_mr_sg(mr, mem->sg, mem->dma_nents, NULL, SIZE_4K);
+ if (unlikely(n != mem->dma_nents)) {
iser_err("failed to map sg (%d/%d)\n",
- n, mem->size);
+ n, mem->dma_nents);
return n < 0 ? n : -EINVAL;
}
diff --git a/drivers/infiniband/ulp/isert/Makefile b/drivers/infiniband/ulp/isert/Makefile
index c8bf2421f5bc..a4a4766e3e18 100644
--- a/drivers/infiniband/ulp/isert/Makefile
+++ b/drivers/infiniband/ulp/isert/Makefile
@@ -1,2 +1 @@
-ccflags-y := -Idrivers/target -Idrivers/target/iscsi
obj-$(CONFIG_INFINIBAND_ISERT) += ib_isert.o
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index e3dd13798d79..989f1ac4245c 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -1186,7 +1186,7 @@ sequence_cmd:
rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
if (!rc && dump_payload == false && unsol_data)
- iscsit_set_unsoliticed_dataout(cmd);
+ iscsit_set_unsolicited_dataout(cmd);
else if (dump_payload && imm_data)
target_put_sess_cmd(&cmd->se_cmd);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 694324b37480..be9ddcad8f28 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -443,8 +443,7 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
if (pool_size <= 0)
goto err;
ret = -ENOMEM;
- pool = kzalloc(sizeof(struct srp_fr_pool) +
- pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
+ pool = kzalloc(struct_size(pool, desc, pool_size), GFP_KERNEL);
if (!pool)
goto err;
pool->size = pool_size;
@@ -1601,9 +1600,8 @@ static int srp_map_sg_entry(struct srp_map_state *state,
{
struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev;
- struct ib_device *ibdev = dev->dev;
- dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
- unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
+ dma_addr_t dma_addr = sg_dma_address(sg);
+ unsigned int dma_len = sg_dma_len(sg);
unsigned int len = 0;
int ret;
@@ -1697,13 +1695,11 @@ static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
int count)
{
struct srp_target_port *target = ch->target;
- struct srp_device *dev = target->srp_host->srp_dev;
struct scatterlist *sg;
int i;
for_each_sg(scat, sg, count, i) {
- srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
- ib_sg_dma_len(dev->dev, sg),
+ srp_map_desc(state, sg_dma_address(sg), sg_dma_len(sg),
target->global_rkey);
}
@@ -1853,8 +1849,8 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
buf->len = cpu_to_be32(data_len);
WARN_ON_ONCE((void *)(buf + 1) > (void *)cmd + len);
for_each_sg(scat, sg, count, i) {
- sge[i].addr = ib_sg_dma_address(ibdev, sg);
- sge[i].length = ib_sg_dma_len(ibdev, sg);
+ sge[i].addr = sg_dma_address(sg);
+ sge[i].length = sg_dma_len(sg);
sge[i].lkey = target->lkey;
}
req->cmd->num_sge += count;
@@ -1875,9 +1871,9 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
struct srp_direct_buf *buf;
buf = (void *)cmd->add_data + cmd->add_cdb_len;
- buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
+ buf->va = cpu_to_be64(sg_dma_address(scat));
buf->key = cpu_to_be32(target->global_rkey);
- buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
+ buf->len = cpu_to_be32(sg_dma_len(scat));
req->nmdesc = 0;
goto map_complete;
@@ -3814,6 +3810,7 @@ static ssize_t srp_create_target(struct device *dev,
target_host->max_id = 1;
target_host->max_lun = -1LL;
target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
+ target_host->max_segment_size = ib_dma_max_seg_size(ibdev);
target = host_to_target(target_host);
@@ -4120,7 +4117,8 @@ static void srp_add_one(struct ib_device *device)
struct srp_device *srp_dev;
struct ib_device_attr *attr = &device->attrs;
struct srp_host *host;
- int mr_page_shift, p;
+ int mr_page_shift;
+ unsigned int p;
u64 max_pages_per_mr;
unsigned int flags = 0;
@@ -4187,7 +4185,7 @@ static void srp_add_one(struct ib_device *device)
WARN_ON_ONCE(srp_dev->global_rkey == 0);
}
- for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
+ rdma_for_each_port (device, p) {
host = srp_add_port(srp_dev, p);
if (host)
list_add_tail(&host->list, &srp_dev->dev_list);
diff --git a/drivers/infiniband/ulp/srpt/Makefile b/drivers/infiniband/ulp/srpt/Makefile
index e3ee4bdfffa5..43fbde42c58b 100644
--- a/drivers/infiniband/ulp/srpt/Makefile
+++ b/drivers/infiniband/ulp/srpt/Makefile
@@ -1,2 +1 @@
-ccflags-y := -Idrivers/target
obj-$(CONFIG_INFINIBAND_SRPT) += ib_srpt.o
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index e9c336cff8f5..1a039f16d315 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1217,22 +1217,15 @@ static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
{
struct srpt_send_ioctx *ioctx;
- unsigned long flags;
+ int tag, cpu;
BUG_ON(!ch);
- ioctx = NULL;
- spin_lock_irqsave(&ch->spinlock, flags);
- if (!list_empty(&ch->free_list)) {
- ioctx = list_first_entry(&ch->free_list,
- struct srpt_send_ioctx, free_list);
- list_del(&ioctx->free_list);
- }
- spin_unlock_irqrestore(&ch->spinlock, flags);
-
- if (!ioctx)
- return ioctx;
+ tag = sbitmap_queue_get(&ch->sess->sess_tag_pool, &cpu);
+ if (tag < 0)
+ return NULL;
+ ioctx = ch->ioctx_ring[tag];
BUG_ON(ioctx->ch != ch);
ioctx->state = SRPT_STATE_NEW;
WARN_ON_ONCE(ioctx->recv_ioctx);
@@ -1245,6 +1238,8 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
*/
memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
+ ioctx->cmd.map_tag = tag;
+ ioctx->cmd.map_cpu = cpu;
return ioctx;
}
@@ -1505,7 +1500,7 @@ static void srpt_handle_cmd(struct srpt_rdma_ch *ch,
pr_err("0x%llx: parsing SRP descriptor table failed.\n",
srp_cmd->tag);
}
- goto release_ioctx;
+ goto busy;
}
rc = target_submit_cmd_map_sgls(cmd, ch->sess, srp_cmd->cdb,
@@ -1516,13 +1511,12 @@ static void srpt_handle_cmd(struct srpt_rdma_ch *ch,
if (rc != 0) {
pr_debug("target_submit_cmd() returned %d for tag %#llx\n", rc,
srp_cmd->tag);
- goto release_ioctx;
+ goto busy;
}
return;
-release_ioctx:
- send_ioctx->state = SRPT_STATE_DONE;
- srpt_release_cmd(cmd);
+busy:
+ target_send_busy(cmd);
}
static int srp_tmr_to_tcm(int fn)
@@ -1582,11 +1576,9 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
TARGET_SCF_ACK_KREF);
if (rc != 0) {
send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
- goto fail;
+ cmd->se_tfo->queue_tm_rsp(cmd);
}
return;
-fail:
- transport_send_check_condition_and_sense(cmd, 0, 0); // XXX:
}
/**
@@ -2151,7 +2143,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
struct srpt_rdma_ch *ch = NULL;
char i_port_id[36];
u32 it_iu_len;
- int i, ret;
+ int i, tag_num, tag_size, ret;
WARN_ON_ONCE(irqs_disabled());
@@ -2251,11 +2243,8 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
goto free_rsp_cache;
}
- INIT_LIST_HEAD(&ch->free_list);
- for (i = 0; i < ch->rq_size; i++) {
+ for (i = 0; i < ch->rq_size; i++)
ch->ioctx_ring[i]->ch = ch;
- list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
- }
if (!sdev->use_srq) {
u16 imm_data_offset = req->req_flags & SRP_IMMED_REQUESTED ?
be16_to_cpu(req->imm_data_offset) : 0;
@@ -2309,18 +2298,20 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
pr_debug("registering session %s\n", ch->sess_name);
+ tag_num = ch->rq_size;
+ tag_size = 1; /* ib_srpt does not use se_sess->sess_cmd_map */
if (sport->port_guid_tpg.se_tpg_wwn)
- ch->sess = target_setup_session(&sport->port_guid_tpg, 0, 0,
- TARGET_PROT_NORMAL,
+ ch->sess = target_setup_session(&sport->port_guid_tpg, tag_num,
+ tag_size, TARGET_PROT_NORMAL,
ch->sess_name, ch, NULL);
if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
- ch->sess = target_setup_session(&sport->port_gid_tpg, 0, 0,
- TARGET_PROT_NORMAL, i_port_id, ch,
- NULL);
+ ch->sess = target_setup_session(&sport->port_gid_tpg, tag_num,
+ tag_size, TARGET_PROT_NORMAL, i_port_id,
+ ch, NULL);
/* Retry without leading "0x" */
if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
- ch->sess = target_setup_session(&sport->port_gid_tpg, 0, 0,
- TARGET_PROT_NORMAL,
+ ch->sess = target_setup_session(&sport->port_gid_tpg, tag_num,
+ tag_size, TARGET_PROT_NORMAL,
i_port_id + 2, ch, NULL);
if (IS_ERR_OR_NULL(ch->sess)) {
WARN_ON_ONCE(ch->sess == NULL);
@@ -2703,14 +2694,6 @@ static int srpt_rdma_cm_handler(struct rdma_cm_id *cm_id,
return ret;
}
-static int srpt_write_pending_status(struct se_cmd *se_cmd)
-{
- struct srpt_send_ioctx *ioctx;
-
- ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
- return ioctx->state == SRPT_STATE_NEED_DATA;
-}
-
/*
* srpt_write_pending - Start data transfer from initiator to target (write).
*/
@@ -2887,8 +2870,19 @@ static void srpt_queue_tm_rsp(struct se_cmd *cmd)
srpt_queue_response(cmd);
}
+/*
+ * This function is called for aborted commands if no response is sent to the
+ * initiator. Make sure that the credits freed by aborting a command are
+ * returned to the initiator the next time a response is sent by incrementing
+ * ch->req_lim_delta.
+ */
static void srpt_aborted_task(struct se_cmd *cmd)
{
+ struct srpt_send_ioctx *ioctx = container_of(cmd,
+ struct srpt_send_ioctx, cmd);
+ struct srpt_rdma_ch *ch = ioctx->ch;
+
+ atomic_inc(&ch->req_lim_delta);
}
static int srpt_queue_status(struct se_cmd *cmd)
@@ -3290,7 +3284,6 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
struct srpt_send_ioctx, cmd);
struct srpt_rdma_ch *ch = ioctx->ch;
struct srpt_recv_ioctx *recv_ioctx = ioctx->recv_ioctx;
- unsigned long flags;
WARN_ON_ONCE(ioctx->state != SRPT_STATE_DONE &&
!(ioctx->cmd.transport_state & CMD_T_ABORTED));
@@ -3306,9 +3299,7 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
ioctx->n_rw_ctx = 0;
}
- spin_lock_irqsave(&ch->spinlock, flags);
- list_add(&ioctx->free_list, &ch->free_list);
- spin_unlock_irqrestore(&ch->spinlock, flags);
+ target_free_tag(se_cmd->se_sess, se_cmd);
}
/**
@@ -3806,7 +3797,6 @@ static const struct target_core_fabric_ops srpt_template = {
.sess_get_index = srpt_sess_get_index,
.sess_get_initiator_sid = NULL,
.write_pending = srpt_write_pending,
- .write_pending_status = srpt_write_pending_status,
.set_default_node_attributes = srpt_set_default_node_attrs,
.get_cmd_state = srpt_get_tcm_cmd_state,
.queue_data_in = srpt_queue_data_in,
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 39b3e50baf3d..ee9f20e9177a 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -207,7 +207,6 @@ struct srpt_rw_ctx {
* @rw_ctxs: RDMA read/write contexts.
* @imm_sg: Scatterlist for immediate data.
* @rdma_cqe: RDMA completion queue element.
- * @free_list: Node in srpt_rdma_ch.free_list.
* @state: I/O context state.
* @cmd: Target core command data structure.
* @sense_data: SCSI sense data.
@@ -227,7 +226,6 @@ struct srpt_send_ioctx {
struct scatterlist imm_sg;
struct ib_cqe rdma_cqe;
- struct list_head free_list;
enum srpt_command_state state;
struct se_cmd cmd;
u8 n_rdma;
@@ -277,7 +275,6 @@ enum rdma_ch_state {
* @req_lim_delta: Number of credits not yet sent back to the initiator.
* @imm_data_offset: Offset from start of SRP_CMD for immediate data.
* @spinlock: Protects free_list and state.
- * @free_list: Head of list with free send I/O contexts.
* @state: channel state. See also enum rdma_ch_state.
* @using_rdma_cm: Whether the RDMA/CM or IB/CM is used for this channel.
* @processing_wait_list: Whether or not cmd_wait_list is being processed.
@@ -318,7 +315,6 @@ struct srpt_rdma_ch {
atomic_t req_lim_delta;
u16 imm_data_offset;
spinlock_t spinlock;
- struct list_head free_list;
enum rdma_ch_state state;
struct kmem_cache *rsp_buf_cache;
struct srpt_send_ioctx **ioctx_ring;
diff --git a/drivers/input/joystick/db9.c b/drivers/input/joystick/db9.c
index 804b1b80a8be..5a52b65bef9a 100644
--- a/drivers/input/joystick/db9.c
+++ b/drivers/input/joystick/db9.c
@@ -259,7 +259,7 @@ static unsigned char db9_saturn_read_packet(struct parport *port, unsigned char
db9_saturn_write_sub(port, type, 3, powered, 0);
return data[0] = 0xe3;
}
- /* else: fall through */
+ /* fall through */
default:
return data[0];
}
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index a878351f1643..52d7f55fca32 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -420,7 +420,7 @@ config KEYBOARD_MPR121
config KEYBOARD_SNVS_PWRKEY
tristate "IMX SNVS Power Key Driver"
- depends on SOC_IMX6SX || SOC_IMX7D
+ depends on ARCH_MXC || COMPILE_TEST
depends on OF
help
This is the snvs powerkey driver for the Freescale i.MX application
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 492a971b95b5..6cd199e8a370 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -1015,8 +1015,18 @@ static int __maybe_unused gpio_keys_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(gpio_keys_pm_ops, gpio_keys_suspend, gpio_keys_resume);
+static void gpio_keys_shutdown(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = gpio_keys_suspend(&pdev->dev);
+ if (ret)
+ dev_err(&pdev->dev, "failed to shutdown\n");
+}
+
static struct platform_driver gpio_keys_device_driver = {
.probe = gpio_keys_probe,
+ .shutdown = gpio_keys_shutdown,
.driver = {
.name = "gpio-keys",
.pm = &gpio_keys_pm_ops,
diff --git a/drivers/input/keyboard/mcs_touchkey.c b/drivers/input/keyboard/mcs_touchkey.c
index be56d4f262a7..b132662201a4 100644
--- a/drivers/input/keyboard/mcs_touchkey.c
+++ b/drivers/input/keyboard/mcs_touchkey.c
@@ -113,9 +113,8 @@ static int mcs_touchkey_probe(struct i2c_client *client,
return -EINVAL;
}
- data = kzalloc(sizeof(struct mcs_touchkey_data) +
- sizeof(data->keycodes[0]) * (pdata->key_maxval + 1),
- GFP_KERNEL);
+ data = kzalloc(struct_size(data, keycodes, pdata->key_maxval + 1),
+ GFP_KERNEL);
input_dev = input_allocate_device();
if (!data || !input_dev) {
dev_err(&client->dev, "Failed to allocate memory\n");
diff --git a/drivers/input/keyboard/mtk-pmic-keys.c b/drivers/input/keyboard/mtk-pmic-keys.c
index 02c67a1749fc..8e6ebab05ab4 100644
--- a/drivers/input/keyboard/mtk-pmic-keys.c
+++ b/drivers/input/keyboard/mtk-pmic-keys.c
@@ -14,18 +14,17 @@
*
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
#include <linux/input.h>
#include <linux/interrupt.h>
-#include <linux/platform_device.h>
#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/regmap.h>
#include <linux/mfd/mt6323/registers.h>
-#include <linux/mfd/mt6397/registers.h>
#include <linux/mfd/mt6397/core.h>
+#include <linux/mfd/mt6397/registers.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
#define MTK_PMIC_PWRKEY_RST_EN_MASK 0x1
#define MTK_PMIC_PWRKEY_RST_EN_SHIFT 6
diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c
index d466bc07aebb..6a43895b28e7 100644
--- a/drivers/input/keyboard/qt2160.c
+++ b/drivers/input/keyboard/qt2160.c
@@ -68,7 +68,6 @@ struct qt2160_data {
struct i2c_client *client;
struct input_dev *input;
struct delayed_work dwork;
- spinlock_t lock; /* Protects canceling/rescheduling of dwork */
unsigned short keycodes[ARRAY_SIZE(qt2160_key2code)];
u16 key_matrix;
#ifdef CONFIG_LEDS_CLASS
@@ -212,22 +211,15 @@ static int qt2160_get_key_matrix(struct qt2160_data *qt2160)
static irqreturn_t qt2160_irq(int irq, void *_qt2160)
{
struct qt2160_data *qt2160 = _qt2160;
- unsigned long flags;
-
- spin_lock_irqsave(&qt2160->lock, flags);
mod_delayed_work(system_wq, &qt2160->dwork, 0);
- spin_unlock_irqrestore(&qt2160->lock, flags);
-
return IRQ_HANDLED;
}
static void qt2160_schedule_read(struct qt2160_data *qt2160)
{
- spin_lock_irq(&qt2160->lock);
schedule_delayed_work(&qt2160->dwork, QT2160_CYCLE_INTERVAL);
- spin_unlock_irq(&qt2160->lock);
}
static void qt2160_worker(struct work_struct *work)
@@ -391,7 +383,6 @@ static int qt2160_probe(struct i2c_client *client,
qt2160->client = client;
qt2160->input = input;
INIT_DELAYED_WORK(&qt2160->dwork, qt2160_worker);
- spin_lock_init(&qt2160->lock);
input->name = "AT42QT2160 Touch Sense Keyboard";
input->id.bustype = BUS_I2C;
diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c
index effb63205d3d..4c67cf30a5d9 100644
--- a/drivers/input/keyboard/snvs_pwrkey.c
+++ b/drivers/input/keyboard/snvs_pwrkey.c
@@ -148,6 +148,9 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
return error;
}
+ pdata->input = input;
+ platform_set_drvdata(pdev, pdata);
+
error = devm_request_irq(&pdev->dev, pdata->irq,
imx_snvs_pwrkey_interrupt,
0, pdev->name, pdev);
@@ -163,9 +166,6 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
return error;
}
- pdata->input = input;
- platform_set_drvdata(pdev, pdata);
-
device_init_wakeup(&pdev->dev, pdata->wakeup);
return 0;
diff --git a/drivers/input/keyboard/tca6416-keypad.c b/drivers/input/keyboard/tca6416-keypad.c
index dc983ab6c0ad..cdeef180aead 100644
--- a/drivers/input/keyboard/tca6416-keypad.c
+++ b/drivers/input/keyboard/tca6416-keypad.c
@@ -219,9 +219,7 @@ static int tca6416_keypad_probe(struct i2c_client *client,
return -EINVAL;
}
- chip = kzalloc(sizeof(struct tca6416_keypad_chip) +
- pdata->nbuttons * sizeof(struct tca6416_button),
- GFP_KERNEL);
+ chip = kzalloc(struct_size(chip, buttons, pdata->nbuttons), GFP_KERNEL);
input = input_allocate_device();
if (!chip || !input) {
error = -ENOMEM;
diff --git a/drivers/input/keyboard/tm2-touchkey.c b/drivers/input/keyboard/tm2-touchkey.c
index abc266e40e17..d4455f3a5cf1 100644
--- a/drivers/input/keyboard/tm2-touchkey.c
+++ b/drivers/input/keyboard/tm2-touchkey.c
@@ -22,12 +22,14 @@
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/pm.h>
#include <linux/regulator/consumer.h>
#define TM2_TOUCHKEY_DEV_NAME "tm2-touchkey"
-#define TM2_TOUCHKEY_KEYCODE_REG 0x03
-#define TM2_TOUCHKEY_BASE_REG 0x00
+
+#define ARIES_TOUCHKEY_CMD_LED_ON 0x1
+#define ARIES_TOUCHKEY_CMD_LED_OFF 0x2
#define TM2_TOUCHKEY_CMD_LED_ON 0x10
#define TM2_TOUCHKEY_CMD_LED_OFF 0x20
#define TM2_TOUCHKEY_BIT_PRESS_EV BIT(3)
@@ -35,9 +37,13 @@
#define TM2_TOUCHKEY_LED_VOLTAGE_MIN 2500000
#define TM2_TOUCHKEY_LED_VOLTAGE_MAX 3300000
-enum {
- TM2_TOUCHKEY_KEY_MENU = 0x1,
- TM2_TOUCHKEY_KEY_BACK,
+struct touchkey_variant {
+ u8 keycode_reg;
+ u8 base_reg;
+ u8 cmd_led_on;
+ u8 cmd_led_off;
+ bool no_reg;
+ bool fixed_regulator;
};
struct tm2_touchkey_data {
@@ -46,9 +52,33 @@ struct tm2_touchkey_data {
struct led_classdev led_dev;
struct regulator *vdd;
struct regulator_bulk_data regulators[2];
+ const struct touchkey_variant *variant;
+ u32 keycodes[4];
+ int num_keycodes;
+};
+
+static const struct touchkey_variant tm2_touchkey_variant = {
+ .keycode_reg = 0x03,
+ .base_reg = 0x00,
+ .cmd_led_on = TM2_TOUCHKEY_CMD_LED_ON,
+ .cmd_led_off = TM2_TOUCHKEY_CMD_LED_OFF,
+};
+
+static const struct touchkey_variant midas_touchkey_variant = {
+ .keycode_reg = 0x00,
+ .base_reg = 0x00,
+ .cmd_led_on = TM2_TOUCHKEY_CMD_LED_ON,
+ .cmd_led_off = TM2_TOUCHKEY_CMD_LED_OFF,
};
-static void tm2_touchkey_led_brightness_set(struct led_classdev *led_dev,
+static struct touchkey_variant aries_touchkey_variant = {
+ .no_reg = true,
+ .fixed_regulator = true,
+ .cmd_led_on = ARIES_TOUCHKEY_CMD_LED_ON,
+ .cmd_led_off = ARIES_TOUCHKEY_CMD_LED_OFF,
+};
+
+static int tm2_touchkey_led_brightness_set(struct led_classdev *led_dev,
enum led_brightness brightness)
{
struct tm2_touchkey_data *touchkey =
@@ -58,15 +88,19 @@ static void tm2_touchkey_led_brightness_set(struct led_classdev *led_dev,
if (brightness == LED_OFF) {
volt = TM2_TOUCHKEY_LED_VOLTAGE_MIN;
- data = TM2_TOUCHKEY_CMD_LED_OFF;
+ data = touchkey->variant->cmd_led_off;
} else {
volt = TM2_TOUCHKEY_LED_VOLTAGE_MAX;
- data = TM2_TOUCHKEY_CMD_LED_ON;
+ data = touchkey->variant->cmd_led_on;
}
- regulator_set_voltage(touchkey->vdd, volt, volt);
- i2c_smbus_write_byte_data(touchkey->client,
- TM2_TOUCHKEY_BASE_REG, data);
+ if (!touchkey->variant->fixed_regulator)
+ regulator_set_voltage(touchkey->vdd, volt, volt);
+
+ return touchkey->variant->no_reg ?
+ i2c_smbus_write_byte(touchkey->client, data) :
+ i2c_smbus_write_byte_data(touchkey->client,
+ touchkey->variant->base_reg, data);
}
static int tm2_touchkey_power_enable(struct tm2_touchkey_data *touchkey)
@@ -96,49 +130,57 @@ static irqreturn_t tm2_touchkey_irq_handler(int irq, void *devid)
{
struct tm2_touchkey_data *touchkey = devid;
int data;
- int key;
-
- data = i2c_smbus_read_byte_data(touchkey->client,
- TM2_TOUCHKEY_KEYCODE_REG);
+ int index;
+ int i;
+
+ if (touchkey->variant->no_reg)
+ data = i2c_smbus_read_byte(touchkey->client);
+ else
+ data = i2c_smbus_read_byte_data(touchkey->client,
+ touchkey->variant->keycode_reg);
if (data < 0) {
dev_err(&touchkey->client->dev,
"failed to read i2c data: %d\n", data);
goto out;
}
- switch (data & TM2_TOUCHKEY_BIT_KEYCODE) {
- case TM2_TOUCHKEY_KEY_MENU:
- key = KEY_PHONE;
- break;
-
- case TM2_TOUCHKEY_KEY_BACK:
- key = KEY_BACK;
- break;
-
- default:
+ index = (data & TM2_TOUCHKEY_BIT_KEYCODE) - 1;
+ if (index < 0 || index >= touchkey->num_keycodes) {
dev_warn(&touchkey->client->dev,
- "unhandled keycode, data %#02x\n", data);
+ "invalid keycode index %d\n", index);
goto out;
}
if (data & TM2_TOUCHKEY_BIT_PRESS_EV) {
- input_report_key(touchkey->input_dev, KEY_PHONE, 0);
- input_report_key(touchkey->input_dev, KEY_BACK, 0);
+ for (i = 0; i < touchkey->num_keycodes; i++)
+ input_report_key(touchkey->input_dev,
+ touchkey->keycodes[i], 0);
} else {
- input_report_key(touchkey->input_dev, key, 1);
+ input_report_key(touchkey->input_dev,
+ touchkey->keycodes[index], 1);
}
input_sync(touchkey->input_dev);
out:
+ if (touchkey->variant->fixed_regulator &&
+ data & TM2_TOUCHKEY_BIT_PRESS_EV) {
+ /* touch turns backlight on, so make sure we're in sync */
+ if (touchkey->led_dev.brightness == LED_OFF)
+ tm2_touchkey_led_brightness_set(&touchkey->led_dev,
+ LED_OFF);
+ }
+
return IRQ_HANDLED;
}
static int tm2_touchkey_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct device_node *np = client->dev.of_node;
struct tm2_touchkey_data *touchkey;
int error;
+ int i;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA)) {
@@ -153,6 +195,8 @@ static int tm2_touchkey_probe(struct i2c_client *client,
touchkey->client = client;
i2c_set_clientdata(client, touchkey);
+ touchkey->variant = of_device_get_match_data(&client->dev);
+
touchkey->regulators[0].supply = "vcc";
touchkey->regulators[1].supply = "vdd";
error = devm_regulator_bulk_get(&client->dev,
@@ -166,6 +210,16 @@ static int tm2_touchkey_probe(struct i2c_client *client,
/* Save VDD for easy access */
touchkey->vdd = touchkey->regulators[1].consumer;
+ touchkey->num_keycodes = of_property_read_variable_u32_array(np,
+ "linux,keycodes", touchkey->keycodes, 0,
+ ARRAY_SIZE(touchkey->keycodes));
+ if (touchkey->num_keycodes <= 0) {
+ /* default keycodes */
+ touchkey->keycodes[0] = KEY_PHONE;
+ touchkey->keycodes[1] = KEY_BACK;
+ touchkey->num_keycodes = 2;
+ }
+
error = tm2_touchkey_power_enable(touchkey);
if (error) {
dev_err(&client->dev, "failed to power up device: %d\n", error);
@@ -190,8 +244,9 @@ static int tm2_touchkey_probe(struct i2c_client *client,
touchkey->input_dev->name = TM2_TOUCHKEY_DEV_NAME;
touchkey->input_dev->id.bustype = BUS_I2C;
- input_set_capability(touchkey->input_dev, EV_KEY, KEY_PHONE);
- input_set_capability(touchkey->input_dev, EV_KEY, KEY_BACK);
+ for (i = 0; i < touchkey->num_keycodes; i++)
+ input_set_capability(touchkey->input_dev, EV_KEY,
+ touchkey->keycodes[i]);
error = input_register_device(touchkey->input_dev);
if (error) {
@@ -212,9 +267,10 @@ static int tm2_touchkey_probe(struct i2c_client *client,
/* led device */
touchkey->led_dev.name = TM2_TOUCHKEY_DEV_NAME;
- touchkey->led_dev.brightness = LED_FULL;
+ touchkey->led_dev.brightness = LED_ON;
touchkey->led_dev.max_brightness = LED_ON;
- touchkey->led_dev.brightness_set = tm2_touchkey_led_brightness_set;
+ touchkey->led_dev.brightness_set_blocking =
+ tm2_touchkey_led_brightness_set;
error = devm_led_classdev_register(&client->dev, &touchkey->led_dev);
if (error) {
@@ -223,6 +279,9 @@ static int tm2_touchkey_probe(struct i2c_client *client,
return error;
}
+ if (touchkey->variant->fixed_regulator)
+ tm2_touchkey_led_brightness_set(&touchkey->led_dev, LED_ON);
+
return 0;
}
@@ -262,7 +321,16 @@ static const struct i2c_device_id tm2_touchkey_id_table[] = {
MODULE_DEVICE_TABLE(i2c, tm2_touchkey_id_table);
static const struct of_device_id tm2_touchkey_of_match[] = {
- { .compatible = "cypress,tm2-touchkey", },
+ {
+ .compatible = "cypress,tm2-touchkey",
+ .data = &tm2_touchkey_variant,
+ }, {
+ .compatible = "cypress,midas-touchkey",
+ .data = &midas_touchkey_variant,
+ }, {
+ .compatible = "cypress,aries-touchkey",
+ .data = &aries_touchkey_variant,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, tm2_touchkey_of_match);
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 279fb02a0f14..e15ed1bb8558 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -117,6 +117,16 @@ config INPUT_E3X0_BUTTON
To compile this driver as a module, choose M here: the
module will be called e3x0_button.
+config INPUT_MSM_VIBRATOR
+ tristate "Qualcomm MSM vibrator driver"
+ select INPUT_FF_MEMLESS
+ help
+ Support for the vibrator that is found on various Qualcomm MSM
+ SOCs.
+
+ To compile this driver as a module, choose M here: the module
+ will be called msm_vibrator.
+
config INPUT_PCSPKR
tristate "PC Speaker support"
depends on PCSPKR_PLATFORM
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 1b44202ad8f7..b936c5b1d4ac 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o
obj-$(CONFIG_INPUT_MAX8997_HAPTIC) += max8997_haptic.o
obj-$(CONFIG_INPUT_MC13783_PWRBUTTON) += mc13783-pwrbutton.o
obj-$(CONFIG_INPUT_MMA8450) += mma8450.o
+obj-$(CONFIG_INPUT_MSM_VIBRATOR) += msm-vibrator.o
obj-$(CONFIG_INPUT_PALMAS_PWRBUTTON) += palmas-pwrbutton.o
obj-$(CONFIG_INPUT_PCAP) += pcap_keys.o
obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index 3d51175c4d72..74cf3b612f05 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -39,8 +39,6 @@ struct ims_pcu_gamepad {
struct ims_pcu_backlight {
struct led_classdev cdev;
- struct work_struct work;
- enum led_brightness desired_brightness;
char name[32];
};
@@ -949,14 +947,14 @@ out:
#define IMS_PCU_MAX_BRIGHTNESS 31998
-static void ims_pcu_backlight_work(struct work_struct *work)
+static int ims_pcu_backlight_set_brightness(struct led_classdev *cdev,
+ enum led_brightness value)
{
struct ims_pcu_backlight *backlight =
- container_of(work, struct ims_pcu_backlight, work);
+ container_of(cdev, struct ims_pcu_backlight, cdev);
struct ims_pcu *pcu =
container_of(backlight, struct ims_pcu, backlight);
- int desired_brightness = backlight->desired_brightness;
- __le16 br_val = cpu_to_le16(desired_brightness);
+ __le16 br_val = cpu_to_le16(value);
int error;
mutex_lock(&pcu->cmd_mutex);
@@ -966,19 +964,11 @@ static void ims_pcu_backlight_work(struct work_struct *work)
if (error && error != -ENODEV)
dev_warn(pcu->dev,
"Failed to set desired brightness %u, error: %d\n",
- desired_brightness, error);
+ value, error);
mutex_unlock(&pcu->cmd_mutex);
-}
-static void ims_pcu_backlight_set_brightness(struct led_classdev *cdev,
- enum led_brightness value)
-{
- struct ims_pcu_backlight *backlight =
- container_of(cdev, struct ims_pcu_backlight, cdev);
-
- backlight->desired_brightness = value;
- schedule_work(&backlight->work);
+ return error;
}
static enum led_brightness
@@ -1015,14 +1005,14 @@ static int ims_pcu_setup_backlight(struct ims_pcu *pcu)
struct ims_pcu_backlight *backlight = &pcu->backlight;
int error;
- INIT_WORK(&backlight->work, ims_pcu_backlight_work);
snprintf(backlight->name, sizeof(backlight->name),
"pcu%d::kbd_backlight", pcu->device_no);
backlight->cdev.name = backlight->name;
backlight->cdev.max_brightness = IMS_PCU_MAX_BRIGHTNESS;
backlight->cdev.brightness_get = ims_pcu_backlight_get_brightness;
- backlight->cdev.brightness_set = ims_pcu_backlight_set_brightness;
+ backlight->cdev.brightness_set_blocking =
+ ims_pcu_backlight_set_brightness;
error = led_classdev_register(pcu->dev, &backlight->cdev);
if (error) {
@@ -1040,7 +1030,6 @@ static void ims_pcu_destroy_backlight(struct ims_pcu *pcu)
struct ims_pcu_backlight *backlight = &pcu->backlight;
led_classdev_unregister(&backlight->cdev);
- cancel_work_sync(&backlight->work);
}
diff --git a/drivers/input/misc/msm-vibrator.c b/drivers/input/misc/msm-vibrator.c
new file mode 100644
index 000000000000..b60f1aaee705
--- /dev/null
+++ b/drivers/input/misc/msm-vibrator.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Qualcomm MSM vibrator driver
+ *
+ * Copyright (c) 2018 Brian Masney <masneyb@onstation.org>
+ *
+ * Based on qcom,pwm-vibrator.c from:
+ * Copyright (c) 2018 Jonathan Marek <jonathan@marek.ca>
+ *
+ * Based on msm_pwm_vibrator.c from downstream Android sources:
+ * Copyright (C) 2009-2014 LGE, Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/input.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#define REG_CMD_RCGR 0x00
+#define REG_CFG_RCGR 0x04
+#define REG_M 0x08
+#define REG_N 0x0C
+#define REG_D 0x10
+#define REG_CBCR 0x24
+#define MMSS_CC_M_DEFAULT 1
+
+struct msm_vibrator {
+ struct input_dev *input;
+ struct mutex mutex;
+ struct work_struct worker;
+ void __iomem *base;
+ struct regulator *vcc;
+ struct clk *clk;
+ struct gpio_desc *enable_gpio;
+ u16 magnitude;
+ bool enabled;
+};
+
+static void msm_vibrator_write(struct msm_vibrator *vibrator, int offset,
+ u32 value)
+{
+ writel(value, vibrator->base + offset);
+}
+
+static int msm_vibrator_start(struct msm_vibrator *vibrator)
+{
+ int d_reg_val, ret = 0;
+
+ mutex_lock(&vibrator->mutex);
+
+ if (!vibrator->enabled) {
+ ret = clk_set_rate(vibrator->clk, 24000);
+ if (ret) {
+ dev_err(&vibrator->input->dev,
+ "Failed to set clock rate: %d\n", ret);
+ goto unlock;
+ }
+
+ ret = clk_prepare_enable(vibrator->clk);
+ if (ret) {
+ dev_err(&vibrator->input->dev,
+ "Failed to enable clock: %d\n", ret);
+ goto unlock;
+ }
+
+ ret = regulator_enable(vibrator->vcc);
+ if (ret) {
+ dev_err(&vibrator->input->dev,
+ "Failed to enable regulator: %d\n", ret);
+ clk_disable(vibrator->clk);
+ goto unlock;
+ }
+
+ gpiod_set_value_cansleep(vibrator->enable_gpio, 1);
+
+ vibrator->enabled = true;
+ }
+
+ d_reg_val = 127 - ((126 * vibrator->magnitude) / 0xffff);
+ msm_vibrator_write(vibrator, REG_CFG_RCGR,
+ (2 << 12) | /* dual edge mode */
+ (0 << 8) | /* cxo */
+ (7 << 0));
+ msm_vibrator_write(vibrator, REG_M, 1);
+ msm_vibrator_write(vibrator, REG_N, 128);
+ msm_vibrator_write(vibrator, REG_D, d_reg_val);
+ msm_vibrator_write(vibrator, REG_CMD_RCGR, 1);
+ msm_vibrator_write(vibrator, REG_CBCR, 1);
+
+unlock:
+ mutex_unlock(&vibrator->mutex);
+
+ return ret;
+}
+
+static void msm_vibrator_stop(struct msm_vibrator *vibrator)
+{
+ mutex_lock(&vibrator->mutex);
+
+ if (vibrator->enabled) {
+ gpiod_set_value_cansleep(vibrator->enable_gpio, 0);
+ regulator_disable(vibrator->vcc);
+ clk_disable(vibrator->clk);
+ vibrator->enabled = false;
+ }
+
+ mutex_unlock(&vibrator->mutex);
+}
+
+static void msm_vibrator_worker(struct work_struct *work)
+{
+ struct msm_vibrator *vibrator = container_of(work,
+ struct msm_vibrator,
+ worker);
+
+ if (vibrator->magnitude)
+ msm_vibrator_start(vibrator);
+ else
+ msm_vibrator_stop(vibrator);
+}
+
+static int msm_vibrator_play_effect(struct input_dev *dev, void *data,
+ struct ff_effect *effect)
+{
+ struct msm_vibrator *vibrator = input_get_drvdata(dev);
+
+ mutex_lock(&vibrator->mutex);
+
+ if (effect->u.rumble.strong_magnitude > 0)
+ vibrator->magnitude = effect->u.rumble.strong_magnitude;
+ else
+ vibrator->magnitude = effect->u.rumble.weak_magnitude;
+
+ mutex_unlock(&vibrator->mutex);
+
+ schedule_work(&vibrator->worker);
+
+ return 0;
+}
+
+static void msm_vibrator_close(struct input_dev *input)
+{
+ struct msm_vibrator *vibrator = input_get_drvdata(input);
+
+ cancel_work_sync(&vibrator->worker);
+ msm_vibrator_stop(vibrator);
+}
+
+static int msm_vibrator_probe(struct platform_device *pdev)
+{
+ struct msm_vibrator *vibrator;
+ struct resource *res;
+ int ret;
+
+ vibrator = devm_kzalloc(&pdev->dev, sizeof(*vibrator), GFP_KERNEL);
+ if (!vibrator)
+ return -ENOMEM;
+
+ vibrator->input = devm_input_allocate_device(&pdev->dev);
+ if (!vibrator->input)
+ return -ENOMEM;
+
+ vibrator->vcc = devm_regulator_get(&pdev->dev, "vcc");
+ if (IS_ERR(vibrator->vcc)) {
+ if (PTR_ERR(vibrator->vcc) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Failed to get regulator: %ld\n",
+ PTR_ERR(vibrator->vcc));
+ return PTR_ERR(vibrator->vcc);
+ }
+
+ vibrator->enable_gpio = devm_gpiod_get(&pdev->dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(vibrator->enable_gpio)) {
+ if (PTR_ERR(vibrator->enable_gpio) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Failed to get enable gpio: %ld\n",
+ PTR_ERR(vibrator->enable_gpio));
+ return PTR_ERR(vibrator->enable_gpio);
+ }
+
+ vibrator->clk = devm_clk_get(&pdev->dev, "pwm");
+ if (IS_ERR(vibrator->clk)) {
+ if (PTR_ERR(vibrator->clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Failed to lookup pwm clock: %ld\n",
+ PTR_ERR(vibrator->clk));
+ return PTR_ERR(vibrator->clk);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get platform resource\n");
+ return -ENODEV;
+ }
+
+ vibrator->base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!vibrator->base) {
+ dev_err(&pdev->dev, "Failed to iomap resource.\n");
+ return -ENOMEM;
+ }
+
+ vibrator->enabled = false;
+ mutex_init(&vibrator->mutex);
+ INIT_WORK(&vibrator->worker, msm_vibrator_worker);
+
+ vibrator->input->name = "msm-vibrator";
+ vibrator->input->id.bustype = BUS_HOST;
+ vibrator->input->close = msm_vibrator_close;
+
+ input_set_drvdata(vibrator->input, vibrator);
+ input_set_capability(vibrator->input, EV_FF, FF_RUMBLE);
+
+ ret = input_ff_create_memless(vibrator->input, NULL,
+ msm_vibrator_play_effect);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to create ff memless: %d", ret);
+ return ret;
+ }
+
+ ret = input_register_device(vibrator->input);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register input device: %d", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, vibrator);
+
+ return 0;
+}
+
+static int __maybe_unused msm_vibrator_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_vibrator *vibrator = platform_get_drvdata(pdev);
+
+ cancel_work_sync(&vibrator->worker);
+
+ if (vibrator->enabled)
+ msm_vibrator_stop(vibrator);
+
+ return 0;
+}
+
+static int __maybe_unused msm_vibrator_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_vibrator *vibrator = platform_get_drvdata(pdev);
+
+ if (vibrator->enabled)
+ msm_vibrator_start(vibrator);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(msm_vibrator_pm_ops, msm_vibrator_suspend,
+ msm_vibrator_resume);
+
+static const struct of_device_id msm_vibrator_of_match[] = {
+ { .compatible = "qcom,msm8226-vibrator" },
+ { .compatible = "qcom,msm8974-vibrator" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, msm_vibrator_of_match);
+
+static struct platform_driver msm_vibrator_driver = {
+ .probe = msm_vibrator_probe,
+ .driver = {
+ .name = "msm-vibrator",
+ .pm = &msm_vibrator_pm_ops,
+ .of_match_table = of_match_ptr(msm_vibrator_of_match),
+ },
+};
+module_platform_driver(msm_vibrator_driver);
+
+MODULE_AUTHOR("Brian Masney <masneyb@onstation.org>");
+MODULE_DESCRIPTION("Qualcomm MSM vibrator driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index 23520df7650f..bb458beecb43 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -185,6 +185,10 @@ static int soc_button_parse_btn_desc(struct device *dev,
info->name = "power";
info->event_code = KEY_POWER;
info->wakeup = true;
+ } else if (upage == 0x01 && usage == 0xca) {
+ info->name = "rotation lock switch";
+ info->event_type = EV_SW;
+ info->event_code = SW_ROTATE_LOCK;
} else if (upage == 0x07 && usage == 0xe3) {
info->name = "home";
info->event_code = KEY_LEFTMETA;
@@ -373,7 +377,7 @@ static struct soc_button_info soc_button_PNP0C40[] = {
{ "home", 1, EV_KEY, KEY_LEFTMETA, false, true },
{ "volume_up", 2, EV_KEY, KEY_VOLUMEUP, true, false },
{ "volume_down", 3, EV_KEY, KEY_VOLUMEDOWN, true, false },
- { "rotation_lock", 4, EV_SW, SW_ROTATE_LOCK, false, false },
+ { "rotation_lock", 4, EV_KEY, KEY_ROTATE_LOCK_TOGGLE, false, false },
{ }
};
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 225ae6980182..f9525d6f0bfe 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1337,22 +1337,48 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0000", 0 },
{ "ELAN0100", 0 },
{ "ELAN0600", 0 },
+ { "ELAN0601", 0 },
{ "ELAN0602", 0 },
+ { "ELAN0603", 0 },
+ { "ELAN0604", 0 },
{ "ELAN0605", 0 },
+ { "ELAN0606", 0 },
+ { "ELAN0607", 0 },
{ "ELAN0608", 0 },
{ "ELAN0609", 0 },
{ "ELAN060B", 0 },
{ "ELAN060C", 0 },
+ { "ELAN060F", 0 },
+ { "ELAN0610", 0 },
{ "ELAN0611", 0 },
{ "ELAN0612", 0 },
+ { "ELAN0615", 0 },
+ { "ELAN0616", 0 },
{ "ELAN0617", 0 },
{ "ELAN0618", 0 },
+ { "ELAN0619", 0 },
+ { "ELAN061A", 0 },
+ { "ELAN061B", 0 },
{ "ELAN061C", 0 },
{ "ELAN061D", 0 },
{ "ELAN061E", 0 },
+ { "ELAN061F", 0 },
{ "ELAN0620", 0 },
{ "ELAN0621", 0 },
{ "ELAN0622", 0 },
+ { "ELAN0623", 0 },
+ { "ELAN0624", 0 },
+ { "ELAN0625", 0 },
+ { "ELAN0626", 0 },
+ { "ELAN0627", 0 },
+ { "ELAN0628", 0 },
+ { "ELAN0629", 0 },
+ { "ELAN062A", 0 },
+ { "ELAN062B", 0 },
+ { "ELAN062C", 0 },
+ { "ELAN062D", 0 },
+ { "ELAN0631", 0 },
+ { "ELAN0632", 0 },
{ "ELAN1000", 0 },
{ }
};
diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c
index 8538318d332c..fa304648d611 100644
--- a/drivers/input/mouse/synaptics_i2c.c
+++ b/drivers/input/mouse/synaptics_i2c.c
@@ -219,7 +219,6 @@ struct synaptics_i2c {
struct i2c_client *client;
struct input_dev *input;
struct delayed_work dwork;
- spinlock_t lock;
int no_data_count;
int no_decel_param;
int reduce_report_param;
@@ -369,23 +368,11 @@ static bool synaptics_i2c_get_input(struct synaptics_i2c *touch)
return xy_delta || gesture;
}
-static void synaptics_i2c_reschedule_work(struct synaptics_i2c *touch,
- unsigned long delay)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&touch->lock, flags);
-
- mod_delayed_work(system_wq, &touch->dwork, delay);
-
- spin_unlock_irqrestore(&touch->lock, flags);
-}
-
static irqreturn_t synaptics_i2c_irq(int irq, void *dev_id)
{
struct synaptics_i2c *touch = dev_id;
- synaptics_i2c_reschedule_work(touch, 0);
+ mod_delayed_work(system_wq, &touch->dwork, 0);
return IRQ_HANDLED;
}
@@ -461,7 +448,7 @@ static void synaptics_i2c_work_handler(struct work_struct *work)
* We poll the device once in THREAD_IRQ_SLEEP_SECS and
* if error is detected, we try to reset and reconfigure the touchpad.
*/
- synaptics_i2c_reschedule_work(touch, delay);
+ mod_delayed_work(system_wq, &touch->dwork, delay);
}
static int synaptics_i2c_open(struct input_dev *input)
@@ -474,7 +461,7 @@ static int synaptics_i2c_open(struct input_dev *input)
return ret;
if (polling_req)
- synaptics_i2c_reschedule_work(touch,
+ mod_delayed_work(system_wq, &touch->dwork,
msecs_to_jiffies(NO_DATA_SLEEP_MSECS));
return 0;
@@ -530,7 +517,6 @@ static struct synaptics_i2c *synaptics_i2c_touch_create(struct i2c_client *clien
touch->scan_rate_param = scan_rate;
set_scan_rate(touch, scan_rate);
INIT_DELAYED_WORK(&touch->dwork, synaptics_i2c_work_handler);
- spin_lock_init(&touch->lock);
return touch;
}
@@ -637,7 +623,7 @@ static int __maybe_unused synaptics_i2c_resume(struct device *dev)
if (ret)
return ret;
- synaptics_i2c_reschedule_work(touch,
+ mod_delayed_work(system_wq, &touch->dwork,
msecs_to_jiffies(NO_DATA_SLEEP_MSECS));
return 0;
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index fc3ab93b7aea..7fb358f96195 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -860,7 +860,7 @@ static int rmi_create_function(struct rmi_device *rmi_dev,
error = rmi_register_function(fn);
if (error)
- goto err_put_fn;
+ return error;
if (pdt->function_number == 0x01)
data->f01_container = fn;
@@ -870,10 +870,6 @@ static int rmi_create_function(struct rmi_device *rmi_dev,
list_add_tail(&fn->node, &data->function_list);
return RMI_SCAN_CONTINUE;
-
-err_put_fn:
- put_device(&fn->dev);
- return error;
}
void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake)
diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c
index df64d6aed4f7..93901ebd122a 100644
--- a/drivers/input/rmi4/rmi_f11.c
+++ b/drivers/input/rmi4/rmi_f11.c
@@ -1230,7 +1230,7 @@ static int rmi_f11_initialize(struct rmi_function *fn)
}
rc = f11_write_control_regs(fn, &f11->sens_query,
- &f11->dev_controls, fn->fd.query_base_addr);
+ &f11->dev_controls, fn->fd.control_base_addr);
if (rc)
dev_warn(&fn->dev, "Failed to write control registers\n");
diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
index 796289846204..fce76812843b 100644
--- a/drivers/input/serio/i8042-sparcio.h
+++ b/drivers/input/serio/i8042-sparcio.h
@@ -53,12 +53,11 @@ static struct resource *kbd_res;
static int sparc_i8042_probe(struct platform_device *op)
{
- struct device_node *dp = op->dev.of_node;
+ struct device_node *dp;
- dp = dp->child;
- while (dp) {
- if (!strcmp(dp->name, OBP_PS2KBD_NAME1) ||
- !strcmp(dp->name, OBP_PS2KBD_NAME2)) {
+ for_each_child_of_node(op->dev.of_node, dp) {
+ if (of_node_name_eq(dp, OBP_PS2KBD_NAME1) ||
+ of_node_name_eq(dp, OBP_PS2KBD_NAME2)) {
struct platform_device *kbd = of_find_device_by_node(dp);
unsigned int irq = kbd->archdata.irqs[0];
if (irq == 0xffffffff)
@@ -67,16 +66,14 @@ static int sparc_i8042_probe(struct platform_device *op)
kbd_iobase = of_ioremap(&kbd->resource[0],
0, 8, "kbd");
kbd_res = &kbd->resource[0];
- } else if (!strcmp(dp->name, OBP_PS2MS_NAME1) ||
- !strcmp(dp->name, OBP_PS2MS_NAME2)) {
+ } else if (of_node_name_eq(dp, OBP_PS2MS_NAME1) ||
+ of_node_name_eq(dp, OBP_PS2MS_NAME2)) {
struct platform_device *ms = of_find_device_by_node(dp);
unsigned int irq = ms->archdata.irqs[0];
if (irq == 0xffffffff)
irq = op->archdata.irqs[0];
i8042_aux_irq = irq;
}
-
- dp = dp->sibling;
}
return 0;
@@ -109,8 +106,9 @@ static struct platform_driver sparc_i8042_driver = {
static int __init i8042_platform_init(void)
{
struct device_node *root = of_find_node_by_path("/");
+ const char *name = of_get_property(root, "name", NULL);
- if (!strcmp(root->name, "SUNW,JavaStation-1")) {
+ if (name && !strcmp(name, "SUNW,JavaStation-1")) {
/* Hardcoded values for MrCoffee. */
i8042_kbd_irq = i8042_aux_irq = 13 | 0x20;
kbd_iobase = ioremap(0x71300060, 8);
@@ -139,8 +137,9 @@ static int __init i8042_platform_init(void)
static inline void i8042_platform_exit(void)
{
struct device_node *root = of_find_node_by_path("/");
+ const char *name = of_get_property(root, "name", NULL);
- if (strcmp(root->name, "SUNW,JavaStation-1"))
+ if (!name || strcmp(name, "SUNW,JavaStation-1"))
platform_driver_unregister(&sparc_i8042_driver);
}
diff --git a/drivers/input/tablet/wacom_serial4.c b/drivers/input/tablet/wacom_serial4.c
index 38bfaca48eab..150f9eecaca7 100644
--- a/drivers/input/tablet/wacom_serial4.c
+++ b/drivers/input/tablet/wacom_serial4.c
@@ -187,6 +187,7 @@ enum {
MODEL_DIGITIZER_II = 0x5544, /* UD */
MODEL_GRAPHIRE = 0x4554, /* ET */
MODEL_PENPARTNER = 0x4354, /* CT */
+ MODEL_ARTPAD_II = 0x4B54, /* KT */
};
static void wacom_handle_model_response(struct wacom *wacom)
@@ -245,6 +246,7 @@ static void wacom_handle_model_response(struct wacom *wacom)
wacom->flags = F_HAS_STYLUS2 | F_HAS_SCROLLWHEEL;
break;
+ case MODEL_ARTPAD_II:
case MODEL_DIGITIZER_II:
wacom->dev->name = "Wacom Digitizer II";
wacom->dev->id.version = MODEL_DIGITIZER_II;
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 068dbbc610fc..7a4884ad198b 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -699,6 +699,7 @@ config TOUCHSCREEN_EDT_FT5X06
config TOUCHSCREEN_RASPBERRYPI_FW
tristate "Raspberry Pi's firmware base touch screen support"
depends on RASPBERRYPI_FIRMWARE || (RASPBERRYPI_FIRMWARE=n && COMPILE_TEST)
+ select INPUT_POLLDEV
help
Say Y here if you have the official Raspberry Pi 7 inch screen on
your system.
@@ -1168,11 +1169,11 @@ config TOUCHSCREEN_SIS_I2C
module will be called sis_i2c.
config TOUCHSCREEN_ST1232
- tristate "Sitronix ST1232 touchscreen controllers"
+ tristate "Sitronix ST1232 or ST1633 touchscreen controllers"
depends on I2C
help
- Say Y here if you want to support Sitronix ST1232
- touchscreen controller.
+ Say Y here if you want to support the Sitronix ST1232
+ or ST1633 touchscreen controller.
If unsure, say N.
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index 6fa714c587b4..3a016f43fb85 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -246,11 +246,14 @@ static void ad7879_timer(struct timer_list *t)
static irqreturn_t ad7879_irq(int irq, void *handle)
{
struct ad7879 *ts = handle;
+ int error;
- regmap_bulk_read(ts->regmap, AD7879_REG_XPLUS,
- ts->conversion_data, AD7879_NR_SENSE);
-
- if (!ad7879_report(ts))
+ error = regmap_bulk_read(ts->regmap, AD7879_REG_XPLUS,
+ ts->conversion_data, AD7879_NR_SENSE);
+ if (error)
+ dev_err_ratelimited(ts->dev, "failed to read %#02x: %d\n",
+ AD7879_REG_XPLUS, error);
+ else if (!ad7879_report(ts))
mod_timer(&ts->timer, jiffies + TS_PEN_UP_TIMEOUT);
return IRQ_HANDLED;
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 1e18ca0d1b4e..702bfda7ee77 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -31,6 +31,7 @@
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/i2c.h>
+#include <linux/kernel.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/debugfs.h>
@@ -53,6 +54,11 @@
#define M09_REGISTER_NUM_X 0x94
#define M09_REGISTER_NUM_Y 0x95
+#define EV_REGISTER_THRESHOLD 0x40
+#define EV_REGISTER_GAIN 0x41
+#define EV_REGISTER_OFFSET_Y 0x45
+#define EV_REGISTER_OFFSET_X 0x46
+
#define NO_REGISTER 0xff
#define WORK_REGISTER_OPMODE 0x3c
@@ -73,6 +79,7 @@ enum edt_ver {
EDT_M06,
EDT_M09,
EDT_M12,
+ EV_FT,
GENERIC_FT,
};
@@ -81,6 +88,8 @@ struct edt_reg_addr {
int reg_report_rate;
int reg_gain;
int reg_offset;
+ int reg_offset_x;
+ int reg_offset_y;
int reg_num_x;
int reg_num_y;
};
@@ -106,6 +115,8 @@ struct edt_ft5x06_ts_data {
int threshold;
int gain;
int offset;
+ int offset_x;
+ int offset_y;
int report_rate;
int max_support_points;
@@ -190,6 +201,7 @@ static irqreturn_t edt_ft5x06_ts_isr(int irq, void *dev_id)
case EDT_M09:
case EDT_M12:
+ case EV_FT:
case GENERIC_FT:
cmd = 0x0;
offset = 3;
@@ -242,6 +254,10 @@ static irqreturn_t edt_ft5x06_ts_isr(int irq, void *dev_id)
x = ((buf[0] << 8) | buf[1]) & 0x0fff;
y = ((buf[2] << 8) | buf[3]) & 0x0fff;
+ /* The FT5x26 send the y coordinate first */
+ if (tsdata->version == EV_FT)
+ swap(x, y);
+
id = (buf[2] >> 4) & 0x0f;
down = type != TOUCH_EVENT_UP;
@@ -275,8 +291,10 @@ static int edt_ft5x06_register_write(struct edt_ft5x06_ts_data *tsdata,
wrbuf[3] = wrbuf[0] ^ wrbuf[1] ^ wrbuf[2];
return edt_ft5x06_ts_readwrite(tsdata->client, 4,
wrbuf, 0, NULL);
+ /* fallthrough */
case EDT_M09:
case EDT_M12:
+ case EV_FT:
case GENERIC_FT:
wrbuf[0] = addr;
wrbuf[1] = value;
@@ -315,8 +333,10 @@ static int edt_ft5x06_register_read(struct edt_ft5x06_ts_data *tsdata,
}
break;
+ /* fallthrough */
case EDT_M09:
case EDT_M12:
+ case EV_FT:
case GENERIC_FT:
wrbuf[0] = addr;
error = edt_ft5x06_ts_readwrite(tsdata->client, 1,
@@ -339,9 +359,10 @@ struct edt_ft5x06_attribute {
u8 limit_high;
u8 addr_m06;
u8 addr_m09;
+ u8 addr_ev;
};
-#define EDT_ATTR(_field, _mode, _addr_m06, _addr_m09, \
+#define EDT_ATTR(_field, _mode, _addr_m06, _addr_m09, _addr_ev, \
_limit_low, _limit_high) \
struct edt_ft5x06_attribute edt_ft5x06_attr_##_field = { \
.dattr = __ATTR(_field, _mode, \
@@ -350,6 +371,7 @@ struct edt_ft5x06_attribute {
.field_offset = offsetof(struct edt_ft5x06_ts_data, _field), \
.addr_m06 = _addr_m06, \
.addr_m09 = _addr_m09, \
+ .addr_ev = _addr_ev, \
.limit_low = _limit_low, \
.limit_high = _limit_high, \
}
@@ -386,6 +408,10 @@ static ssize_t edt_ft5x06_setting_show(struct device *dev,
addr = attr->addr_m09;
break;
+ case EV_FT:
+ addr = attr->addr_ev;
+ break;
+
default:
error = -ENODEV;
goto out;
@@ -457,6 +483,10 @@ static ssize_t edt_ft5x06_setting_store(struct device *dev,
addr = attr->addr_m09;
break;
+ case EV_FT:
+ addr = attr->addr_ev;
+ break;
+
default:
error = -ENODEV;
goto out;
@@ -480,20 +510,28 @@ out:
/* m06, m09: range 0-31, m12: range 0-5 */
static EDT_ATTR(gain, S_IWUSR | S_IRUGO, WORK_REGISTER_GAIN,
- M09_REGISTER_GAIN, 0, 31);
+ M09_REGISTER_GAIN, EV_REGISTER_GAIN, 0, 31);
/* m06, m09: range 0-31, m12: range 0-16 */
static EDT_ATTR(offset, S_IWUSR | S_IRUGO, WORK_REGISTER_OFFSET,
- M09_REGISTER_OFFSET, 0, 31);
+ M09_REGISTER_OFFSET, NO_REGISTER, 0, 31);
+/* m06, m09, m12: no supported, ev_ft: range 0-80 */
+static EDT_ATTR(offset_x, S_IWUSR | S_IRUGO, NO_REGISTER, NO_REGISTER,
+ EV_REGISTER_OFFSET_X, 0, 80);
+/* m06, m09, m12: no supported, ev_ft: range 0-80 */
+static EDT_ATTR(offset_y, S_IWUSR | S_IRUGO, NO_REGISTER, NO_REGISTER,
+ EV_REGISTER_OFFSET_Y, 0, 80);
/* m06: range 20 to 80, m09: range 0 to 30, m12: range 1 to 255... */
static EDT_ATTR(threshold, S_IWUSR | S_IRUGO, WORK_REGISTER_THRESHOLD,
- M09_REGISTER_THRESHOLD, 0, 255);
+ M09_REGISTER_THRESHOLD, EV_REGISTER_THRESHOLD, 0, 255);
/* m06: range 3 to 14, m12: (0x64: 100Hz) */
static EDT_ATTR(report_rate, S_IWUSR | S_IRUGO, WORK_REGISTER_REPORT_RATE,
- NO_REGISTER, 0, 255);
+ NO_REGISTER, NO_REGISTER, 0, 255);
static struct attribute *edt_ft5x06_attrs[] = {
&edt_ft5x06_attr_gain.dattr.attr,
&edt_ft5x06_attr_offset.dattr.attr,
+ &edt_ft5x06_attr_offset_x.dattr.attr,
+ &edt_ft5x06_attr_offset_y.dattr.attr,
&edt_ft5x06_attr_threshold.dattr.attr,
&edt_ft5x06_attr_report_rate.dattr.attr,
NULL
@@ -605,8 +643,15 @@ static int edt_ft5x06_work_mode(struct edt_ft5x06_ts_data *tsdata)
tsdata->threshold);
edt_ft5x06_register_write(tsdata, reg_addr->reg_gain,
tsdata->gain);
- edt_ft5x06_register_write(tsdata, reg_addr->reg_offset,
- tsdata->offset);
+ if (reg_addr->reg_offset != NO_REGISTER)
+ edt_ft5x06_register_write(tsdata, reg_addr->reg_offset,
+ tsdata->offset);
+ if (reg_addr->reg_offset_x != NO_REGISTER)
+ edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_x,
+ tsdata->offset_x);
+ if (reg_addr->reg_offset_y != NO_REGISTER)
+ edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_y,
+ tsdata->offset_y);
if (reg_addr->reg_report_rate != NO_REGISTER)
edt_ft5x06_register_write(tsdata, reg_addr->reg_report_rate,
tsdata->report_rate);
@@ -867,6 +912,16 @@ static int edt_ft5x06_ts_identify(struct i2c_client *client,
case 0x5a: /* Solomon Goldentek Display */
snprintf(model_name, EDT_NAME_LEN, "GKTW50SCED1R0");
break;
+ case 0x59: /* Evervision Display with FT5xx6 TS */
+ tsdata->version = EV_FT;
+ error = edt_ft5x06_ts_readwrite(client, 1, "\x53",
+ 1, rdbuf);
+ if (error)
+ return error;
+ strlcpy(fw_version, rdbuf, 1);
+ snprintf(model_name, EDT_NAME_LEN,
+ "EVERVISION-FT5726NEi");
+ break;
default:
snprintf(model_name, EDT_NAME_LEN,
"generic ft5x06 (%02x)",
@@ -902,6 +957,18 @@ static void edt_ft5x06_ts_get_defaults(struct device *dev,
edt_ft5x06_register_write(tsdata, reg_addr->reg_offset, val);
tsdata->offset = val;
}
+
+ error = device_property_read_u32(dev, "offset-x", &val);
+ if (!error) {
+ edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_x, val);
+ tsdata->offset_x = val;
+ }
+
+ error = device_property_read_u32(dev, "offset-y", &val);
+ if (!error) {
+ edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_y, val);
+ tsdata->offset_y = val;
+ }
}
static void
@@ -912,7 +979,15 @@ edt_ft5x06_ts_get_parameters(struct edt_ft5x06_ts_data *tsdata)
tsdata->threshold = edt_ft5x06_register_read(tsdata,
reg_addr->reg_threshold);
tsdata->gain = edt_ft5x06_register_read(tsdata, reg_addr->reg_gain);
- tsdata->offset = edt_ft5x06_register_read(tsdata, reg_addr->reg_offset);
+ if (reg_addr->reg_offset != NO_REGISTER)
+ tsdata->offset =
+ edt_ft5x06_register_read(tsdata, reg_addr->reg_offset);
+ if (reg_addr->reg_offset_x != NO_REGISTER)
+ tsdata->offset_x = edt_ft5x06_register_read(tsdata,
+ reg_addr->reg_offset_x);
+ if (reg_addr->reg_offset_y != NO_REGISTER)
+ tsdata->offset_y = edt_ft5x06_register_read(tsdata,
+ reg_addr->reg_offset_y);
if (reg_addr->reg_report_rate != NO_REGISTER)
tsdata->report_rate = edt_ft5x06_register_read(tsdata,
reg_addr->reg_report_rate);
@@ -940,6 +1015,8 @@ edt_ft5x06_ts_set_regs(struct edt_ft5x06_ts_data *tsdata)
reg_addr->reg_report_rate = WORK_REGISTER_REPORT_RATE;
reg_addr->reg_gain = WORK_REGISTER_GAIN;
reg_addr->reg_offset = WORK_REGISTER_OFFSET;
+ reg_addr->reg_offset_x = NO_REGISTER;
+ reg_addr->reg_offset_y = NO_REGISTER;
reg_addr->reg_num_x = WORK_REGISTER_NUM_X;
reg_addr->reg_num_y = WORK_REGISTER_NUM_Y;
break;
@@ -950,15 +1027,30 @@ edt_ft5x06_ts_set_regs(struct edt_ft5x06_ts_data *tsdata)
reg_addr->reg_report_rate = NO_REGISTER;
reg_addr->reg_gain = M09_REGISTER_GAIN;
reg_addr->reg_offset = M09_REGISTER_OFFSET;
+ reg_addr->reg_offset_x = NO_REGISTER;
+ reg_addr->reg_offset_y = NO_REGISTER;
reg_addr->reg_num_x = M09_REGISTER_NUM_X;
reg_addr->reg_num_y = M09_REGISTER_NUM_Y;
break;
+ case EV_FT:
+ reg_addr->reg_threshold = EV_REGISTER_THRESHOLD;
+ reg_addr->reg_gain = EV_REGISTER_GAIN;
+ reg_addr->reg_offset = NO_REGISTER;
+ reg_addr->reg_offset_x = EV_REGISTER_OFFSET_X;
+ reg_addr->reg_offset_y = EV_REGISTER_OFFSET_Y;
+ reg_addr->reg_num_x = NO_REGISTER;
+ reg_addr->reg_num_y = NO_REGISTER;
+ reg_addr->reg_report_rate = NO_REGISTER;
+ break;
+
case GENERIC_FT:
/* this is a guesswork */
reg_addr->reg_threshold = M09_REGISTER_THRESHOLD;
reg_addr->reg_gain = M09_REGISTER_GAIN;
reg_addr->reg_offset = M09_REGISTER_OFFSET;
+ reg_addr->reg_offset_x = NO_REGISTER;
+ reg_addr->reg_offset_y = NO_REGISTER;
break;
}
}
@@ -1155,6 +1247,7 @@ static const struct edt_i2c_chip_data edt_ft6236_data = {
static const struct i2c_device_id edt_ft5x06_ts_id[] = {
{ .name = "edt-ft5x06", .driver_data = (long)&edt_ft5x06_data },
{ .name = "edt-ft5506", .driver_data = (long)&edt_ft5506_data },
+ { .name = "ev-ft5726", .driver_data = (long)&edt_ft5506_data },
/* Note no edt- prefix for compatibility with the ft6236.c driver */
{ .name = "ft6236", .driver_data = (long)&edt_ft6236_data },
{ /* sentinel */ }
@@ -1167,6 +1260,7 @@ static const struct of_device_id edt_ft5x06_of_match[] = {
{ .compatible = "edt,edt-ft5306", .data = &edt_ft5x06_data },
{ .compatible = "edt,edt-ft5406", .data = &edt_ft5x06_data },
{ .compatible = "edt,edt-ft5506", .data = &edt_ft5506_data },
+ { .compatible = "evervision,ev-ft5726", .data = &edt_ft5506_data },
/* Note focaltech vendor prefix for compatibility with ft6236.c */
{ .compatible = "focaltech,ft6236", .data = &edt_ft6236_data },
{ /* sentinel */ }
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index f2d9c2c41885..f57d82220a88 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -216,6 +216,7 @@ static const struct goodix_chip_data *goodix_get_chip_data(u16 id)
{
switch (id) {
case 1151:
+ case 5688:
return &gt1x_chip_data;
case 911:
@@ -692,7 +693,9 @@ static int goodix_configure_dev(struct goodix_ts_data *ts)
touchscreen_parse_properties(ts->input_dev, true, &ts->prop);
if (!ts->prop.max_x || !ts->prop.max_y || !ts->max_touch_num) {
- dev_err(&ts->client->dev, "Invalid config, using defaults\n");
+ dev_err(&ts->client->dev,
+ "Invalid config (%d, %d, %d), using defaults\n",
+ ts->prop.max_x, ts->prop.max_y, ts->max_touch_num);
ts->prop.max_x = GOODIX_MAX_WIDTH - 1;
ts->prop.max_y = GOODIX_MAX_HEIGHT - 1;
ts->max_touch_num = GOODIX_MAX_CONTACTS;
@@ -942,6 +945,7 @@ MODULE_DEVICE_TABLE(acpi, goodix_acpi_match);
#ifdef CONFIG_OF
static const struct of_device_id goodix_of_match[] = {
{ .compatible = "goodix,gt1151" },
+ { .compatible = "goodix,gt5688" },
{ .compatible = "goodix,gt911" },
{ .compatible = "goodix,gt9110" },
{ .compatible = "goodix,gt912" },
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index 6f76eeedf465..9169aa03958a 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -4,11 +4,15 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
-#include <linux/input/ili210x.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_device.h>
+#include <asm/unaligned.h>
-#define MAX_TOUCHES 2
+#define ILI210X_TOUCHES 2
+#define ILI251X_TOUCHES 10
#define DEFAULT_POLL_PERIOD 20
/* Touchscreen commands */
@@ -17,41 +21,32 @@
#define REG_FIRMWARE_VERSION 0x40
#define REG_CALIBRATE 0xcc
-struct finger {
- u8 x_low;
- u8 x_high;
- u8 y_low;
- u8 y_high;
-} __packed;
-
-struct touchdata {
- u8 status;
- struct finger finger[MAX_TOUCHES];
-} __packed;
-
-struct panel_info {
- struct finger finger_max;
- u8 xchannel_num;
- u8 ychannel_num;
-} __packed;
-
struct firmware_version {
u8 id;
u8 major;
u8 minor;
} __packed;
+enum ili2xxx_model {
+ MODEL_ILI210X,
+ MODEL_ILI251X,
+};
+
struct ili210x {
struct i2c_client *client;
struct input_dev *input;
- bool (*get_pendown_state)(void);
unsigned int poll_period;
struct delayed_work dwork;
+ struct gpio_desc *reset_gpio;
+ struct touchscreen_properties prop;
+ enum ili2xxx_model model;
+ unsigned int max_touches;
};
static int ili210x_read_reg(struct i2c_client *client, u8 reg, void *buf,
size_t len)
{
+ struct ili210x *priv = i2c_get_clientdata(client);
struct i2c_msg msg[2] = {
{
.addr = client->addr,
@@ -67,7 +62,38 @@ static int ili210x_read_reg(struct i2c_client *client, u8 reg, void *buf,
}
};
- if (i2c_transfer(client->adapter, msg, 2) != 2) {
+ if (priv->model == MODEL_ILI251X) {
+ if (i2c_transfer(client->adapter, msg, 1) != 1) {
+ dev_err(&client->dev, "i2c transfer failed\n");
+ return -EIO;
+ }
+
+ usleep_range(5000, 5500);
+
+ if (i2c_transfer(client->adapter, msg + 1, 1) != 1) {
+ dev_err(&client->dev, "i2c transfer failed\n");
+ return -EIO;
+ }
+ } else {
+ if (i2c_transfer(client->adapter, msg, 2) != 2) {
+ dev_err(&client->dev, "i2c transfer failed\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int ili210x_read(struct i2c_client *client, void *buf, size_t len)
+{
+ struct i2c_msg msg = {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = buf,
+ };
+
+ if (i2c_transfer(client->adapter, &msg, 1) != 1) {
dev_err(&client->dev, "i2c transfer failed\n");
return -EIO;
}
@@ -75,42 +101,72 @@ static int ili210x_read_reg(struct i2c_client *client, u8 reg, void *buf,
return 0;
}
-static void ili210x_report_events(struct input_dev *input,
- const struct touchdata *touchdata)
+static bool ili210x_touchdata_to_coords(struct ili210x *priv, u8 *touchdata,
+ unsigned int finger,
+ unsigned int *x, unsigned int *y)
{
- int i;
- bool touch;
- unsigned int x, y;
- const struct finger *finger;
+ if (finger >= ILI210X_TOUCHES)
+ return false;
- for (i = 0; i < MAX_TOUCHES; i++) {
- input_mt_slot(input, i);
+ if (touchdata[0] & BIT(finger))
+ return false;
- finger = &touchdata->finger[i];
+ *x = get_unaligned_be16(touchdata + 1 + (finger * 4) + 0);
+ *y = get_unaligned_be16(touchdata + 1 + (finger * 4) + 2);
- touch = touchdata->status & (1 << i);
- input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
- if (touch) {
- x = finger->x_low | (finger->x_high << 8);
- y = finger->y_low | (finger->y_high << 8);
+ return true;
+}
+
+static bool ili251x_touchdata_to_coords(struct ili210x *priv, u8 *touchdata,
+ unsigned int finger,
+ unsigned int *x, unsigned int *y)
+{
+ if (finger >= ILI251X_TOUCHES)
+ return false;
+
+ *x = get_unaligned_be16(touchdata + 1 + (finger * 5) + 0);
+ if (!(*x & BIT(15))) /* Touch indication */
+ return false;
- input_report_abs(input, ABS_MT_POSITION_X, x);
- input_report_abs(input, ABS_MT_POSITION_Y, y);
+ *x &= 0x3fff;
+ *y = get_unaligned_be16(touchdata + 1 + (finger * 5) + 2);
+
+ return true;
+}
+
+static bool ili210x_report_events(struct ili210x *priv, u8 *touchdata)
+{
+ struct input_dev *input = priv->input;
+ int i;
+ bool contact = false, touch = false;
+ unsigned int x = 0, y = 0;
+
+ for (i = 0; i < priv->max_touches; i++) {
+ if (priv->model == MODEL_ILI210X) {
+ touch = ili210x_touchdata_to_coords(priv, touchdata,
+ i, &x, &y);
+ } else if (priv->model == MODEL_ILI251X) {
+ touch = ili251x_touchdata_to_coords(priv, touchdata,
+ i, &x, &y);
+ if (touch)
+ contact = true;
}
+
+ input_mt_slot(input, i);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
+ if (!touch)
+ continue;
+ touchscreen_report_pos(input, &priv->prop, x, y,
+ true);
}
input_mt_report_pointer_emulation(input, false);
input_sync(input);
-}
-static bool get_pendown_state(const struct ili210x *priv)
-{
- bool state = false;
-
- if (priv->get_pendown_state)
- state = priv->get_pendown_state();
+ if (priv->model == MODEL_ILI210X)
+ contact = touchdata[0] & 0xf3;
- return state;
+ return contact;
}
static void ili210x_work(struct work_struct *work)
@@ -118,20 +174,29 @@ static void ili210x_work(struct work_struct *work)
struct ili210x *priv = container_of(work, struct ili210x,
dwork.work);
struct i2c_client *client = priv->client;
- struct touchdata touchdata;
- int error;
+ u8 touchdata[64] = { 0 };
+ bool touch;
+ int error = -EINVAL;
+
+ if (priv->model == MODEL_ILI210X) {
+ error = ili210x_read_reg(client, REG_TOUCHDATA,
+ touchdata, sizeof(touchdata));
+ } else if (priv->model == MODEL_ILI251X) {
+ error = ili210x_read_reg(client, REG_TOUCHDATA,
+ touchdata, 31);
+ if (!error && touchdata[0] == 2)
+ error = ili210x_read(client, &touchdata[31], 20);
+ }
- error = ili210x_read_reg(client, REG_TOUCHDATA,
- &touchdata, sizeof(touchdata));
if (error) {
dev_err(&client->dev,
"Unable to get touchdata, err = %d\n", error);
return;
}
- ili210x_report_events(priv->input, &touchdata);
+ touch = ili210x_report_events(priv, touchdata);
- if ((touchdata.status & 0xf3) || get_pendown_state(priv))
+ if (touch)
schedule_delayed_work(&priv->dwork,
msecs_to_jiffies(priv->poll_period));
}
@@ -180,103 +245,119 @@ static const struct attribute_group ili210x_attr_group = {
.attrs = ili210x_attributes,
};
+static void ili210x_power_down(void *data)
+{
+ struct gpio_desc *reset_gpio = data;
+
+ gpiod_set_value_cansleep(reset_gpio, 1);
+}
+
+static void ili210x_cancel_work(void *data)
+{
+ struct ili210x *priv = data;
+
+ cancel_delayed_work_sync(&priv->dwork);
+}
+
static int ili210x_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
- const struct ili210x_platform_data *pdata = dev_get_platdata(dev);
struct ili210x *priv;
+ struct gpio_desc *reset_gpio;
struct input_dev *input;
- struct panel_info panel;
struct firmware_version firmware;
- int xmax, ymax;
+ enum ili2xxx_model model;
int error;
- dev_dbg(dev, "Probing for ILI210X I2C Touschreen driver");
+ model = (enum ili2xxx_model)id->driver_data;
- if (!pdata) {
- dev_err(dev, "No platform data!\n");
- return -EINVAL;
- }
+ dev_dbg(dev, "Probing for ILI210X I2C Touschreen driver");
if (client->irq <= 0) {
dev_err(dev, "No IRQ!\n");
return -EINVAL;
}
- /* Get firmware version */
- error = ili210x_read_reg(client, REG_FIRMWARE_VERSION,
- &firmware, sizeof(firmware));
- if (error) {
- dev_err(dev, "Failed to get firmware version, err: %d\n",
- error);
- return error;
- }
+ reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(reset_gpio))
+ return PTR_ERR(reset_gpio);
- /* get panel info */
- error = ili210x_read_reg(client, REG_PANEL_INFO, &panel, sizeof(panel));
- if (error) {
- dev_err(dev, "Failed to get panel information, err: %d\n",
- error);
- return error;
+ if (reset_gpio) {
+ error = devm_add_action_or_reset(dev, ili210x_power_down,
+ reset_gpio);
+ if (error)
+ return error;
+
+ usleep_range(50, 100);
+ gpiod_set_value_cansleep(reset_gpio, 0);
+ msleep(100);
}
- xmax = panel.finger_max.x_low | (panel.finger_max.x_high << 8);
- ymax = panel.finger_max.y_low | (panel.finger_max.y_high << 8);
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- input = input_allocate_device();
- if (!priv || !input) {
- error = -ENOMEM;
- goto err_free_mem;
- }
+ input = devm_input_allocate_device(dev);
+ if (!input)
+ return -ENOMEM;
priv->client = client;
priv->input = input;
- priv->get_pendown_state = pdata->get_pendown_state;
- priv->poll_period = pdata->poll_period ? : DEFAULT_POLL_PERIOD;
+ priv->poll_period = DEFAULT_POLL_PERIOD;
INIT_DELAYED_WORK(&priv->dwork, ili210x_work);
+ priv->reset_gpio = reset_gpio;
+ priv->model = model;
+ if (model == MODEL_ILI210X)
+ priv->max_touches = ILI210X_TOUCHES;
+ if (model == MODEL_ILI251X)
+ priv->max_touches = ILI251X_TOUCHES;
+
+ i2c_set_clientdata(client, priv);
+
+ /* Get firmware version */
+ error = ili210x_read_reg(client, REG_FIRMWARE_VERSION,
+ &firmware, sizeof(firmware));
+ if (error) {
+ dev_err(dev, "Failed to get firmware version, err: %d\n",
+ error);
+ return error;
+ }
/* Setup input device */
input->name = "ILI210x Touchscreen";
input->id.bustype = BUS_I2C;
input->dev.parent = dev;
- __set_bit(EV_SYN, input->evbit);
- __set_bit(EV_KEY, input->evbit);
- __set_bit(EV_ABS, input->evbit);
- __set_bit(BTN_TOUCH, input->keybit);
-
- /* Single touch */
- input_set_abs_params(input, ABS_X, 0, xmax, 0, 0);
- input_set_abs_params(input, ABS_Y, 0, ymax, 0, 0);
-
/* Multi touch */
- input_mt_init_slots(input, MAX_TOUCHES, 0);
- input_set_abs_params(input, ABS_MT_POSITION_X, 0, xmax, 0, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y, 0, ymax, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, 0xffff, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, 0xffff, 0, 0);
+ touchscreen_parse_properties(input, true, &priv->prop);
+ input_mt_init_slots(input, priv->max_touches, INPUT_MT_DIRECT);
- i2c_set_clientdata(client, priv);
+ error = devm_add_action(dev, ili210x_cancel_work, priv);
+ if (error)
+ return error;
- error = request_irq(client->irq, ili210x_irq, pdata->irq_flags,
- client->name, priv);
+ error = devm_request_irq(dev, client->irq, ili210x_irq, 0,
+ client->name, priv);
if (error) {
dev_err(dev, "Unable to request touchscreen IRQ, err: %d\n",
error);
- goto err_free_mem;
+ return error;
}
- error = sysfs_create_group(&dev->kobj, &ili210x_attr_group);
+ error = devm_device_add_group(dev, &ili210x_attr_group);
if (error) {
dev_err(dev, "Unable to create sysfs attributes, err: %d\n",
error);
- goto err_free_irq;
+ return error;
}
error = input_register_device(priv->input);
if (error) {
dev_err(dev, "Cannot register input device, err: %d\n", error);
- goto err_remove_sysfs;
+ return error;
}
device_init_wakeup(dev, 1);
@@ -286,28 +367,6 @@ static int ili210x_i2c_probe(struct i2c_client *client,
client->irq, firmware.id, firmware.major, firmware.minor);
return 0;
-
-err_remove_sysfs:
- sysfs_remove_group(&dev->kobj, &ili210x_attr_group);
-err_free_irq:
- free_irq(client->irq, priv);
-err_free_mem:
- input_free_device(input);
- kfree(priv);
- return error;
-}
-
-static int ili210x_i2c_remove(struct i2c_client *client)
-{
- struct ili210x *priv = i2c_get_clientdata(client);
-
- sysfs_remove_group(&client->dev.kobj, &ili210x_attr_group);
- free_irq(priv->client->irq, priv);
- cancel_delayed_work_sync(&priv->dwork);
- input_unregister_device(priv->input);
- kfree(priv);
-
- return 0;
}
static int __maybe_unused ili210x_i2c_suspend(struct device *dev)
@@ -334,19 +393,27 @@ static SIMPLE_DEV_PM_OPS(ili210x_i2c_pm,
ili210x_i2c_suspend, ili210x_i2c_resume);
static const struct i2c_device_id ili210x_i2c_id[] = {
- { "ili210x", 0 },
+ { "ili210x", MODEL_ILI210X },
+ { "ili251x", MODEL_ILI251X },
{ }
};
MODULE_DEVICE_TABLE(i2c, ili210x_i2c_id);
+static const struct of_device_id ili210x_dt_ids[] = {
+ { .compatible = "ilitek,ili210x", .data = (void *)MODEL_ILI210X },
+ { .compatible = "ilitek,ili251x", .data = (void *)MODEL_ILI251X },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ili210x_dt_ids);
+
static struct i2c_driver ili210x_ts_driver = {
.driver = {
.name = "ili210x_i2c",
.pm = &ili210x_i2c_pm,
+ .of_match_table = ili210x_dt_ids,
},
.id_table = ili210x_i2c_id,
.probe = ili210x_i2c_probe,
- .remove = ili210x_i2c_remove,
};
module_i2c_driver(ili210x_ts_driver);
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index 11ff32c68025..34923399ece4 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -11,25 +11,19 @@
*/
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/input/touchscreen.h>
#define ST1232_TS_NAME "st1232-ts"
-
-#define MIN_X 0x00
-#define MIN_Y 0x00
-#define MAX_X 0x31f /* (800 - 1) */
-#define MAX_Y 0x1df /* (480 - 1) */
-#define MAX_AREA 0xff
-#define MAX_FINGERS 2
+#define ST1633_TS_NAME "st1633-ts"
struct st1232_ts_finger {
u16 x;
@@ -38,12 +32,25 @@ struct st1232_ts_finger {
bool is_valid;
};
+struct st_chip_info {
+ bool have_z;
+ u16 max_x;
+ u16 max_y;
+ u16 max_area;
+ u16 max_fingers;
+ u8 start_reg;
+};
+
struct st1232_ts_data {
struct i2c_client *client;
struct input_dev *input_dev;
- struct st1232_ts_finger finger[MAX_FINGERS];
+ struct touchscreen_properties prop;
struct dev_pm_qos_request low_latency_req;
- int reset_gpio;
+ struct gpio_desc *reset_gpio;
+ const struct st_chip_info *chip_info;
+ int read_buf_len;
+ u8 *read_buf;
+ struct st1232_ts_finger *finger;
};
static int st1232_ts_read_data(struct st1232_ts_data *ts)
@@ -52,40 +59,35 @@ static int st1232_ts_read_data(struct st1232_ts_data *ts)
struct i2c_client *client = ts->client;
struct i2c_msg msg[2];
int error;
- u8 start_reg;
- u8 buf[10];
+ int i, y;
+ u8 start_reg = ts->chip_info->start_reg;
+ u8 *buf = ts->read_buf;
- /* read touchscreen data from ST1232 */
+ /* read touchscreen data */
msg[0].addr = client->addr;
msg[0].flags = 0;
msg[0].len = 1;
msg[0].buf = &start_reg;
- start_reg = 0x10;
msg[1].addr = ts->client->addr;
msg[1].flags = I2C_M_RD;
- msg[1].len = sizeof(buf);
+ msg[1].len = ts->read_buf_len;
msg[1].buf = buf;
error = i2c_transfer(client->adapter, msg, 2);
if (error < 0)
return error;
- /* get "valid" bits */
- finger[0].is_valid = buf[2] >> 7;
- finger[1].is_valid = buf[5] >> 7;
+ for (i = 0, y = 0; i < ts->chip_info->max_fingers; i++, y += 3) {
+ finger[i].is_valid = buf[i + y] >> 7;
+ if (finger[i].is_valid) {
+ finger[i].x = ((buf[i + y] & 0x0070) << 4) | buf[i + 1];
+ finger[i].y = ((buf[i + y] & 0x0007) << 8) | buf[i + 2];
- /* get xy coordinate */
- if (finger[0].is_valid) {
- finger[0].x = ((buf[2] & 0x0070) << 4) | buf[3];
- finger[0].y = ((buf[2] & 0x0007) << 8) | buf[4];
- finger[0].t = buf[8];
- }
-
- if (finger[1].is_valid) {
- finger[1].x = ((buf[5] & 0x0070) << 4) | buf[6];
- finger[1].y = ((buf[5] & 0x0007) << 8) | buf[7];
- finger[1].t = buf[9];
+ /* st1232 includes a z-axis / touch strength */
+ if (ts->chip_info->have_z)
+ finger[i].t = buf[i + 6];
+ }
}
return 0;
@@ -104,13 +106,16 @@ static irqreturn_t st1232_ts_irq_handler(int irq, void *dev_id)
goto end;
/* multi touch protocol */
- for (i = 0; i < MAX_FINGERS; i++) {
+ for (i = 0; i < ts->chip_info->max_fingers; i++) {
if (!finger[i].is_valid)
continue;
- input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR, finger[i].t);
- input_report_abs(input_dev, ABS_MT_POSITION_X, finger[i].x);
- input_report_abs(input_dev, ABS_MT_POSITION_Y, finger[i].y);
+ if (ts->chip_info->have_z)
+ input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR,
+ finger[i].t);
+
+ touchscreen_report_pos(input_dev, &ts->prop,
+ finger[i].x, finger[i].y, true);
input_mt_sync(input_dev);
count++;
}
@@ -138,17 +143,45 @@ end:
static void st1232_ts_power(struct st1232_ts_data *ts, bool poweron)
{
- if (gpio_is_valid(ts->reset_gpio))
- gpio_direction_output(ts->reset_gpio, poweron);
+ if (ts->reset_gpio)
+ gpiod_set_value_cansleep(ts->reset_gpio, !poweron);
}
+static const struct st_chip_info st1232_chip_info = {
+ .have_z = true,
+ .max_x = 0x31f, /* 800 - 1 */
+ .max_y = 0x1df, /* 480 -1 */
+ .max_area = 0xff,
+ .max_fingers = 2,
+ .start_reg = 0x12,
+};
+
+static const struct st_chip_info st1633_chip_info = {
+ .have_z = false,
+ .max_x = 0x13f, /* 320 - 1 */
+ .max_y = 0x1df, /* 480 -1 */
+ .max_area = 0x00,
+ .max_fingers = 5,
+ .start_reg = 0x12,
+};
+
static int st1232_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ const struct st_chip_info *match;
struct st1232_ts_data *ts;
+ struct st1232_ts_finger *finger;
struct input_dev *input_dev;
int error;
+ match = device_get_match_data(&client->dev);
+ if (!match && id)
+ match = (const void *)id->driver_data;
+ if (!match) {
+ dev_err(&client->dev, "unknown device model\n");
+ return -ENODEV;
+ }
+
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_err(&client->dev, "need I2C_FUNC_I2C\n");
return -EIO;
@@ -163,6 +196,19 @@ static int st1232_ts_probe(struct i2c_client *client,
if (!ts)
return -ENOMEM;
+ ts->chip_info = match;
+ ts->finger = devm_kcalloc(&client->dev,
+ ts->chip_info->max_fingers, sizeof(*finger),
+ GFP_KERNEL);
+ if (!ts->finger)
+ return -ENOMEM;
+
+ /* allocate a buffer according to the number of registers to read */
+ ts->read_buf_len = ts->chip_info->max_fingers * 4;
+ ts->read_buf = devm_kzalloc(&client->dev, ts->read_buf_len, GFP_KERNEL);
+ if (!ts->read_buf)
+ return -ENOMEM;
+
input_dev = devm_input_allocate_device(&client->dev);
if (!input_dev)
return -ENOMEM;
@@ -170,15 +216,13 @@ static int st1232_ts_probe(struct i2c_client *client,
ts->client = client;
ts->input_dev = input_dev;
- ts->reset_gpio = of_get_gpio(client->dev.of_node, 0);
- if (gpio_is_valid(ts->reset_gpio)) {
- error = devm_gpio_request(&client->dev, ts->reset_gpio, NULL);
- if (error) {
- dev_err(&client->dev,
- "Unable to request GPIO pin %d.\n",
- ts->reset_gpio);
- return error;
- }
+ ts->reset_gpio = devm_gpiod_get_optional(&client->dev, NULL,
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ts->reset_gpio)) {
+ error = PTR_ERR(ts->reset_gpio);
+ dev_err(&client->dev, "Unable to request GPIO pin: %d.\n",
+ error);
+ return error;
}
st1232_ts_power(ts, true);
@@ -192,9 +236,16 @@ static int st1232_ts_probe(struct i2c_client *client,
__set_bit(EV_KEY, input_dev->evbit);
__set_bit(EV_ABS, input_dev->evbit);
- input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, MAX_AREA, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_X, MIN_X, MAX_X, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_Y, MIN_Y, MAX_Y, 0, 0);
+ if (ts->chip_info->have_z)
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0,
+ ts->chip_info->max_area, 0, 0);
+
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X,
+ 0, ts->chip_info->max_x, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
+ 0, ts->chip_info->max_y, 0, 0);
+
+ touchscreen_parse_properties(input_dev, true, &ts->prop);
error = devm_request_threaded_irq(&client->dev, client->irq,
NULL, st1232_ts_irq_handler,
@@ -261,13 +312,15 @@ static SIMPLE_DEV_PM_OPS(st1232_ts_pm_ops,
st1232_ts_suspend, st1232_ts_resume);
static const struct i2c_device_id st1232_ts_id[] = {
- { ST1232_TS_NAME, 0 },
+ { ST1232_TS_NAME, (unsigned long)&st1232_chip_info },
+ { ST1633_TS_NAME, (unsigned long)&st1633_chip_info },
{ }
};
MODULE_DEVICE_TABLE(i2c, st1232_ts_id);
static const struct of_device_id st1232_ts_dt_ids[] = {
- { .compatible = "sitronix,st1232", },
+ { .compatible = "sitronix,st1232", .data = &st1232_chip_info },
+ { .compatible = "sitronix,st1633", .data = &st1633_chip_info },
{ }
};
MODULE_DEVICE_TABLE(of, st1232_ts_dt_ids);
@@ -286,5 +339,6 @@ static struct i2c_driver st1232_ts_driver = {
module_i2c_driver(st1232_ts_driver);
MODULE_AUTHOR("Tony SIM <chinyeow.sim.xt@renesas.com>");
+MODULE_AUTHOR("Martin Kepplinger <martin.kepplinger@ginzinger.com>");
MODULE_DESCRIPTION("SITRONIX ST1232 Touchscreen Controller Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
index 704e99046916..b6f95f20f924 100644
--- a/drivers/input/touchscreen/stmfts.c
+++ b/drivers/input/touchscreen/stmfts.c
@@ -106,27 +106,29 @@ struct stmfts_data {
bool running;
};
-static void stmfts_brightness_set(struct led_classdev *led_cdev,
+static int stmfts_brightness_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct stmfts_data *sdata = container_of(led_cdev,
struct stmfts_data, led_cdev);
int err;
- if (value == sdata->led_status || !sdata->ledvdd)
- return;
-
- if (!value) {
- regulator_disable(sdata->ledvdd);
- } else {
- err = regulator_enable(sdata->ledvdd);
- if (err)
- dev_warn(&sdata->client->dev,
- "failed to disable ledvdd regulator: %d\n",
- err);
+ if (value != sdata->led_status && sdata->ledvdd) {
+ if (!value) {
+ regulator_disable(sdata->ledvdd);
+ } else {
+ err = regulator_enable(sdata->ledvdd);
+ if (err) {
+ dev_warn(&sdata->client->dev,
+ "failed to disable ledvdd regulator: %d\n",
+ err);
+ return err;
+ }
+ }
+ sdata->led_status = value;
}
- sdata->led_status = value;
+ return 0;
}
static enum led_brightness stmfts_brightness_get(struct led_classdev *led_cdev)
@@ -608,7 +610,7 @@ static int stmfts_enable_led(struct stmfts_data *sdata)
sdata->led_cdev.name = STMFTS_DEV_NAME;
sdata->led_cdev.max_brightness = LED_ON;
sdata->led_cdev.brightness = LED_OFF;
- sdata->led_cdev.brightness_set = stmfts_brightness_set;
+ sdata->led_cdev.brightness_set_blocking = stmfts_brightness_set;
sdata->led_cdev.brightness_get = stmfts_brightness_get;
err = devm_led_classdev_register(&sdata->client->dev, &sdata->led_cdev);
diff --git a/drivers/input/touchscreen/sx8654.c b/drivers/input/touchscreen/sx8654.c
index ed29db3ec731..dbdf4898aa17 100644
--- a/drivers/input/touchscreen/sx8654.c
+++ b/drivers/input/touchscreen/sx8654.c
@@ -27,12 +27,16 @@
* published by the Free Software Foundation.
*/
-#include <linux/input.h>
-#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/touchscreen.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
/* register addresses */
#define I2C_REG_TOUCH0 0x00
@@ -42,25 +46,28 @@
#define I2C_REG_IRQSRC 0x23
#define I2C_REG_SOFTRESET 0x3f
+#define I2C_REG_SX8650_STAT 0x05
+#define SX8650_STAT_CONVIRQ BIT(7)
+
/* commands */
#define CMD_READ_REGISTER 0x40
-#define CMD_MANUAL 0xc0
#define CMD_PENTRG 0xe0
/* value for I2C_REG_SOFTRESET */
#define SOFTRESET_VALUE 0xde
/* bits for I2C_REG_IRQSRC */
-#define IRQ_PENTOUCH_TOUCHCONVDONE 0x08
-#define IRQ_PENRELEASE 0x04
+#define IRQ_PENTOUCH_TOUCHCONVDONE BIT(3)
+#define IRQ_PENRELEASE BIT(2)
/* bits for RegTouch1 */
#define CONDIRQ 0x20
+#define RPDNT_100K 0x00
#define FILT_7SA 0x03
/* bits for I2C_REG_CHANMASK */
-#define CONV_X 0x80
-#define CONV_Y 0x40
+#define CONV_X BIT(7)
+#define CONV_Y BIT(6)
/* coordinates rate: higher nibble of CTRL0 register */
#define RATE_MANUAL 0x00
@@ -69,13 +76,122 @@
/* power delay: lower nibble of CTRL0 register */
#define POWDLY_1_1MS 0x0b
+/* for sx8650, as we have no pen release IRQ there: timeout in ns following the
+ * last PENIRQ after which we assume the pen is lifted.
+ */
+#define SX8650_PENIRQ_TIMEOUT msecs_to_jiffies(10)
+
#define MAX_12BIT ((1 << 12) - 1)
+#define MAX_I2C_READ_LEN 10 /* see datasheet section 5.1.5 */
+
+/* channel definition */
+#define CH_X 0x00
+#define CH_Y 0x01
+
+struct sx865x_data {
+ u8 cmd_manual;
+ u8 chan_mask;
+ bool has_irq_penrelease;
+ bool has_reg_irqmask;
+ irq_handler_t irqh;
+};
struct sx8654 {
struct input_dev *input;
struct i2c_client *client;
+ struct gpio_desc *gpio_reset;
+
+ spinlock_t lock; /* for input reporting from irq/timer */
+ struct timer_list timer;
+
+ struct touchscreen_properties props;
+
+ const struct sx865x_data *data;
};
+static inline void sx865x_penrelease(struct sx8654 *ts)
+{
+ struct input_dev *input_dev = ts->input;
+
+ input_report_key(input_dev, BTN_TOUCH, 0);
+ input_sync(input_dev);
+}
+
+static void sx865x_penrelease_timer_handler(struct timer_list *t)
+{
+ struct sx8654 *ts = from_timer(ts, t, timer);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ts->lock, flags);
+ sx865x_penrelease(ts);
+ spin_unlock_irqrestore(&ts->lock, flags);
+ dev_dbg(&ts->client->dev, "penrelease by timer\n");
+}
+
+static irqreturn_t sx8650_irq(int irq, void *handle)
+{
+ struct sx8654 *ts = handle;
+ struct device *dev = &ts->client->dev;
+ int len, i;
+ unsigned long flags;
+ u8 stat;
+ u16 x, y;
+ u16 ch;
+ u16 chdata;
+ __be16 data[MAX_I2C_READ_LEN / sizeof(__be16)];
+ u8 nchan = hweight32(ts->data->chan_mask);
+ u8 readlen = nchan * sizeof(*data);
+
+ stat = i2c_smbus_read_byte_data(ts->client, CMD_READ_REGISTER
+ | I2C_REG_SX8650_STAT);
+
+ if (!(stat & SX8650_STAT_CONVIRQ)) {
+ dev_dbg(dev, "%s ignore stat [0x%02x]", __func__, stat);
+ return IRQ_HANDLED;
+ }
+
+ len = i2c_master_recv(ts->client, (u8 *)data, readlen);
+ if (len != readlen) {
+ dev_dbg(dev, "ignore short recv (%d)\n", len);
+ return IRQ_HANDLED;
+ }
+
+ spin_lock_irqsave(&ts->lock, flags);
+
+ x = 0;
+ y = 0;
+ for (i = 0; i < nchan; i++) {
+ chdata = be16_to_cpu(data[i]);
+
+ if (unlikely(chdata == 0xFFFF)) {
+ dev_dbg(dev, "invalid qualified data @ %d\n", i);
+ continue;
+ } else if (unlikely(chdata & 0x8000)) {
+ dev_warn(dev, "hibit @ %d [0x%04x]\n", i, chdata);
+ continue;
+ }
+
+ ch = chdata >> 12;
+ if (ch == CH_X)
+ x = chdata & MAX_12BIT;
+ else if (ch == CH_Y)
+ y = chdata & MAX_12BIT;
+ else
+ dev_warn(dev, "unknown channel %d [0x%04x]\n", ch,
+ chdata);
+ }
+
+ touchscreen_report_pos(ts->input, &ts->props, x, y, false);
+ input_report_key(ts->input, BTN_TOUCH, 1);
+ input_sync(ts->input);
+ dev_dbg(dev, "point(%4d,%4d)\n", x, y);
+
+ mod_timer(&ts->timer, jiffies + SX8650_PENIRQ_TIMEOUT);
+ spin_unlock_irqrestore(&ts->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t sx8654_irq(int irq, void *handle)
{
struct sx8654 *sx8654 = handle;
@@ -112,8 +228,8 @@ static irqreturn_t sx8654_irq(int irq, void *handle)
x = ((data[0] & 0xf) << 8) | (data[1]);
y = ((data[2] & 0xf) << 8) | (data[3]);
- input_report_abs(sx8654->input, ABS_X, x);
- input_report_abs(sx8654->input, ABS_Y, y);
+ touchscreen_report_pos(sx8654->input, &sx8654->props, x, y,
+ false);
input_report_key(sx8654->input, BTN_TOUCH, 1);
input_sync(sx8654->input);
@@ -124,6 +240,25 @@ out:
return IRQ_HANDLED;
}
+static int sx8654_reset(struct sx8654 *ts)
+{
+ int err;
+
+ if (ts->gpio_reset) {
+ gpiod_set_value_cansleep(ts->gpio_reset, 1);
+ udelay(2); /* Tpulse > 1µs */
+ gpiod_set_value_cansleep(ts->gpio_reset, 0);
+ } else {
+ dev_dbg(&ts->client->dev, "NRST unavailable, try softreset\n");
+ err = i2c_smbus_write_byte_data(ts->client, I2C_REG_SOFTRESET,
+ SOFTRESET_VALUE);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int sx8654_open(struct input_dev *dev)
{
struct sx8654 *sx8654 = input_get_drvdata(dev);
@@ -157,14 +292,17 @@ static void sx8654_close(struct input_dev *dev)
disable_irq(client->irq);
+ if (!sx8654->data->has_irq_penrelease)
+ del_timer_sync(&sx8654->timer);
+
/* enable manual mode mode */
- error = i2c_smbus_write_byte(client, CMD_MANUAL);
+ error = i2c_smbus_write_byte(client, sx8654->data->cmd_manual);
if (error) {
dev_err(&client->dev, "writing command CMD_MANUAL failed");
return;
}
- error = i2c_smbus_write_byte_data(client, I2C_REG_TOUCH0, 0);
+ error = i2c_smbus_write_byte_data(client, I2C_REG_TOUCH0, RATE_MANUAL);
if (error) {
dev_err(&client->dev, "writing to I2C_REG_TOUCH0 failed");
return;
@@ -186,6 +324,31 @@ static int sx8654_probe(struct i2c_client *client,
if (!sx8654)
return -ENOMEM;
+ sx8654->gpio_reset = devm_gpiod_get_optional(&client->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(sx8654->gpio_reset)) {
+ error = PTR_ERR(sx8654->gpio_reset);
+ if (error != -EPROBE_DEFER)
+ dev_err(&client->dev, "unable to get reset-gpio: %d\n",
+ error);
+ return error;
+ }
+ dev_dbg(&client->dev, "got GPIO reset pin\n");
+
+ sx8654->data = device_get_match_data(&client->dev);
+ if (!sx8654->data)
+ sx8654->data = (const struct sx865x_data *)id->driver_data;
+ if (!sx8654->data) {
+ dev_err(&client->dev, "invalid or missing device data\n");
+ return -EINVAL;
+ }
+
+ if (!sx8654->data->has_irq_penrelease) {
+ dev_dbg(&client->dev, "use timer for penrelease\n");
+ timer_setup(&sx8654->timer, sx865x_penrelease_timer_handler, 0);
+ spin_lock_init(&sx8654->lock);
+ }
+
input = devm_input_allocate_device(&client->dev);
if (!input)
return -ENOMEM;
@@ -201,43 +364,46 @@ static int sx8654_probe(struct i2c_client *client,
input_set_abs_params(input, ABS_X, 0, MAX_12BIT, 0, 0);
input_set_abs_params(input, ABS_Y, 0, MAX_12BIT, 0, 0);
+ touchscreen_parse_properties(input, false, &sx8654->props);
+
sx8654->client = client;
sx8654->input = input;
input_set_drvdata(sx8654->input, sx8654);
- error = i2c_smbus_write_byte_data(client, I2C_REG_SOFTRESET,
- SOFTRESET_VALUE);
+ error = sx8654_reset(sx8654);
if (error) {
- dev_err(&client->dev, "writing softreset value failed");
+ dev_err(&client->dev, "reset failed");
return error;
}
error = i2c_smbus_write_byte_data(client, I2C_REG_CHANMASK,
- CONV_X | CONV_Y);
+ sx8654->data->chan_mask);
if (error) {
dev_err(&client->dev, "writing to I2C_REG_CHANMASK failed");
return error;
}
- error = i2c_smbus_write_byte_data(client, I2C_REG_IRQMASK,
- IRQ_PENTOUCH_TOUCHCONVDONE |
- IRQ_PENRELEASE);
- if (error) {
- dev_err(&client->dev, "writing to I2C_REG_IRQMASK failed");
- return error;
+ if (sx8654->data->has_reg_irqmask) {
+ error = i2c_smbus_write_byte_data(client, I2C_REG_IRQMASK,
+ IRQ_PENTOUCH_TOUCHCONVDONE |
+ IRQ_PENRELEASE);
+ if (error) {
+ dev_err(&client->dev, "writing I2C_REG_IRQMASK failed");
+ return error;
+ }
}
error = i2c_smbus_write_byte_data(client, I2C_REG_TOUCH1,
- CONDIRQ | FILT_7SA);
+ CONDIRQ | RPDNT_100K | FILT_7SA);
if (error) {
dev_err(&client->dev, "writing to I2C_REG_TOUCH1 failed");
return error;
}
error = devm_request_threaded_irq(&client->dev, client->irq,
- NULL, sx8654_irq,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ NULL, sx8654->data->irqh,
+ IRQF_ONESHOT,
client->name, sx8654);
if (error) {
dev_err(&client->dev,
@@ -256,17 +422,48 @@ static int sx8654_probe(struct i2c_client *client,
return 0;
}
+static const struct sx865x_data sx8650_data = {
+ .cmd_manual = 0xb0,
+ .has_irq_penrelease = false,
+ .has_reg_irqmask = false,
+ .chan_mask = (CONV_X | CONV_Y),
+ .irqh = sx8650_irq,
+};
+
+static const struct sx865x_data sx8654_data = {
+ .cmd_manual = 0xc0,
+ .has_irq_penrelease = true,
+ .has_reg_irqmask = true,
+ .chan_mask = (CONV_X | CONV_Y),
+ .irqh = sx8654_irq,
+};
+
#ifdef CONFIG_OF
static const struct of_device_id sx8654_of_match[] = {
- { .compatible = "semtech,sx8654", },
- { },
+ {
+ .compatible = "semtech,sx8650",
+ .data = &sx8650_data,
+ }, {
+ .compatible = "semtech,sx8654",
+ .data = &sx8654_data,
+ }, {
+ .compatible = "semtech,sx8655",
+ .data = &sx8654_data,
+ }, {
+ .compatible = "semtech,sx8656",
+ .data = &sx8654_data,
+ },
+ { }
};
MODULE_DEVICE_TABLE(of, sx8654_of_match);
#endif
static const struct i2c_device_id sx8654_id_table[] = {
- { "semtech_sx8654", 0 },
- { },
+ { .name = "semtech_sx8650", .driver_data = (long)&sx8650_data },
+ { .name = "semtech_sx8654", .driver_data = (long)&sx8654_data },
+ { .name = "semtech_sx8655", .driver_data = (long)&sx8654_data },
+ { .name = "semtech_sx8656", .driver_data = (long)&sx8654_data },
+ { }
};
MODULE_DEVICE_TABLE(i2c, sx8654_id_table);
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index 9e8684ab48f4..83e685557a19 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -507,10 +507,8 @@ static int titsc_remove(struct platform_device *pdev)
static int __maybe_unused titsc_suspend(struct device *dev)
{
struct titsc *ts_dev = dev_get_drvdata(dev);
- struct ti_tscadc_dev *tscadc_dev;
unsigned int idle;
- tscadc_dev = ti_tscadc_dev_get(to_platform_device(dev));
if (device_may_wakeup(dev)) {
titsc_writel(ts_dev, REG_IRQSTATUS, TSC_IRQENB_MASK);
idle = titsc_readl(ts_dev, REG_IRQENABLE);
@@ -524,9 +522,7 @@ static int __maybe_unused titsc_suspend(struct device *dev)
static int __maybe_unused titsc_resume(struct device *dev)
{
struct titsc *ts_dev = dev_get_drvdata(dev);
- struct ti_tscadc_dev *tscadc_dev;
- tscadc_dev = ti_tscadc_dev_get(to_platform_device(dev));
if (device_may_wakeup(dev)) {
titsc_writel(ts_dev, REG_IRQWAKEUP,
0x00);
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index d9a25715650e..6f07f3b21816 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -1,3 +1,7 @@
+# The IOVA library may also be used by non-IOMMU_API users
+config IOMMU_IOVA
+ tristate
+
# IOMMU_API always gets selected by whoever wants it.
config IOMMU_API
bool
@@ -81,9 +85,6 @@ config IOMMU_DEFAULT_PASSTHROUGH
If unsure, say N here.
-config IOMMU_IOVA
- tristate
-
config OF_IOMMU
def_bool y
depends on OF && IOMMU_API
@@ -282,6 +283,7 @@ config ROCKCHIP_IOMMU
config TEGRA_IOMMU_GART
bool "Tegra GART IOMMU Support"
depends on ARCH_TEGRA_2x_SOC
+ depends on TEGRA_MC
select IOMMU_API
help
Enables support for remapping discontiguous physical memory
@@ -435,4 +437,13 @@ config QCOM_IOMMU
help
Support for IOMMU on certain Qualcomm SoCs.
+config HYPERV_IOMMU
+ bool "Hyper-V x2APIC IRQ Handling"
+ depends on HYPERV
+ select IOMMU_API
+ default HYPERV
+ help
+ Stub IOMMU driver to handle IRQs as to allow Hyper-V Linux
+ guests to run with x2APIC mode enabled.
+
endif # IOMMU_SUPPORT
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index a158a68c8ea8..8c71a15e986b 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -32,3 +32,4 @@ obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o
+obj-$(CONFIG_HYPERV_IOMMU) += hyperv-iommu.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 2a7b78bb98b4..f7cdd2ab7f11 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -18,6 +18,7 @@
*/
#define pr_fmt(fmt) "AMD-Vi: " fmt
+#define dev_fmt(fmt) pr_fmt(fmt)
#include <linux/ratelimit.h>
#include <linux/pci.h>
@@ -139,10 +140,14 @@ static struct lock_class_key reserved_rbtree_key;
static inline int match_hid_uid(struct device *dev,
struct acpihid_map_entry *entry)
{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
const char *hid, *uid;
- hid = acpi_device_hid(ACPI_COMPANION(dev));
- uid = acpi_device_uid(ACPI_COMPANION(dev));
+ if (!adev)
+ return -ENODEV;
+
+ hid = acpi_device_hid(adev);
+ uid = acpi_device_uid(adev);
if (!hid || !(*hid))
return -ENODEV;
@@ -279,10 +284,10 @@ static u16 get_alias(struct device *dev)
return pci_alias;
}
- pr_info("Using IVRS reported alias %02x:%02x.%d "
- "for device %s[%04x:%04x], kernel reported alias "
+ pci_info(pdev, "Using IVRS reported alias %02x:%02x.%d "
+ "for device [%04x:%04x], kernel reported alias "
"%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
- PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
+ PCI_FUNC(ivrs_alias), pdev->vendor, pdev->device,
PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
PCI_FUNC(pci_alias));
@@ -293,9 +298,8 @@ static u16 get_alias(struct device *dev)
if (pci_alias == devid &&
PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
pci_add_dma_alias(pdev, ivrs_alias & 0xff);
- pr_info("Added PCI DMA alias %02x.%d for %s\n",
- PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
- dev_name(dev));
+ pci_info(pdev, "Added PCI DMA alias %02x.%d\n",
+ PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias));
}
return ivrs_alias;
@@ -545,7 +549,7 @@ static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
dev_data = get_dev_data(&pdev->dev);
if (dev_data && __ratelimit(&dev_data->rs)) {
- dev_err(&pdev->dev, "Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%llx flags=0x%04x]\n",
+ pci_err(pdev, "Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%llx flags=0x%04x]\n",
domain_id, address, flags);
} else if (printk_ratelimit()) {
pr_err("Event logged [IO_PAGE_FAULT device=%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
@@ -2258,8 +2262,7 @@ static int amd_iommu_add_device(struct device *dev)
ret = iommu_init_device(dev);
if (ret) {
if (ret != -ENOTSUPP)
- pr_err("Failed to initialize device %s - trying to proceed anyway\n",
- dev_name(dev));
+ dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
iommu_ignore_device(dev);
dev->dma_ops = NULL;
@@ -2569,6 +2572,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
struct scatterlist *s;
unsigned long address;
u64 dma_mask;
+ int ret;
domain = get_domain(dev);
if (IS_ERR(domain))
@@ -2591,7 +2595,6 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
for (j = 0; j < pages; ++j) {
unsigned long bus_addr, phys_addr;
- int ret;
bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT);
@@ -2605,15 +2608,20 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
/* Everything is mapped - write the right values into s->dma_address */
for_each_sg(sglist, s, nelems, i) {
- s->dma_address += address + s->offset;
+ /*
+ * Add in the remaining piece of the scatter-gather offset that
+ * was masked out when we were determining the physical address
+ * via (sg_phys(s) & PAGE_MASK) earlier.
+ */
+ s->dma_address += address + (s->offset & ~PAGE_MASK);
s->dma_length = s->length;
}
return nelems;
out_unmap:
- pr_err("%s: IOMMU mapping error in map_sg (io-pages: %d)\n",
- dev_name(dev), npages);
+ dev_err(dev, "IOMMU mapping error in map_sg (io-pages: %d reason: %d)\n",
+ npages, ret);
for_each_sg(sglist, s, nelems, i) {
int j, pages = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);
@@ -2807,7 +2815,7 @@ static int init_reserved_iova_ranges(void)
IOVA_PFN(r->start),
IOVA_PFN(r->end));
if (!val) {
- pr_err("Reserve pci-resource range failed\n");
+ pci_err(pdev, "Reserve pci-resource range %pR failed\n", r);
return -ENOMEM;
}
}
@@ -3161,24 +3169,26 @@ static void amd_iommu_get_resv_regions(struct device *dev,
return;
list_for_each_entry(entry, &amd_iommu_unity_map, list) {
+ int type, prot = 0;
size_t length;
- int prot = 0;
if (devid < entry->devid_start || devid > entry->devid_end)
continue;
+ type = IOMMU_RESV_DIRECT;
length = entry->address_end - entry->address_start;
if (entry->prot & IOMMU_PROT_IR)
prot |= IOMMU_READ;
if (entry->prot & IOMMU_PROT_IW)
prot |= IOMMU_WRITE;
+ if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE)
+ /* Exclusion range */
+ type = IOMMU_RESV_RESERVED;
region = iommu_alloc_resv_region(entry->address_start,
- length, prot,
- IOMMU_RESV_DIRECT);
+ length, prot, type);
if (!region) {
- pr_err("Out of memory allocating dm-regions for %s\n",
- dev_name(dev));
+ dev_err(dev, "Out of memory allocating dm-regions\n");
return;
}
list_add_tail(&region->list, head);
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 66123b911ec8..ff40ba758cf3 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -18,6 +18,7 @@
*/
#define pr_fmt(fmt) "AMD-Vi: " fmt
+#define dev_fmt(fmt) pr_fmt(fmt)
#include <linux/pci.h>
#include <linux/acpi.h>
@@ -358,7 +359,7 @@ static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
static void iommu_set_exclusion_range(struct amd_iommu *iommu)
{
u64 start = iommu->exclusion_start & PAGE_MASK;
- u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
+ u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
u64 entry;
if (!iommu->exclusion_start)
@@ -1457,8 +1458,7 @@ static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
- pr_info("Applying erratum 746 workaround for IOMMU at %s\n",
- dev_name(&iommu->dev->dev));
+ pci_info(iommu->dev, "Applying erratum 746 workaround\n");
/* Clear the enable writing bit */
pci_write_config_dword(iommu->dev, 0xf0, 0x90);
@@ -1488,8 +1488,7 @@ static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
/* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
iommu_write_l2(iommu, 0x47, value | BIT(0));
- pr_info("Applying ATS write check workaround for IOMMU at %s\n",
- dev_name(&iommu->dev->dev));
+ pci_info(iommu->dev, "Applying ATS write check workaround\n");
}
/*
@@ -1665,6 +1664,7 @@ static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
static void init_iommu_perf_ctr(struct amd_iommu *iommu)
{
+ struct pci_dev *pdev = iommu->dev;
u64 val = 0xabcd, val2 = 0;
if (!iommu_feature(iommu, FEATURE_PC))
@@ -1676,12 +1676,12 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu)
if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
(iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
(val != val2)) {
- pr_err("Unable to write to IOMMU perf counter.\n");
+ pci_err(pdev, "Unable to write to IOMMU perf counter.\n");
amd_iommu_pc_present = false;
return;
}
- pr_info("IOMMU performance counters supported\n");
+ pci_info(pdev, "IOMMU performance counters supported\n");
val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
iommu->max_banks = (u8) ((val >> 12) & 0x3f);
@@ -1840,14 +1840,14 @@ static void print_iommu_info(void)
struct amd_iommu *iommu;
for_each_iommu(iommu) {
+ struct pci_dev *pdev = iommu->dev;
int i;
- pr_info("Found IOMMU at %s cap 0x%hx\n",
- dev_name(&iommu->dev->dev), iommu->cap_ptr);
+ pci_info(pdev, "Found IOMMU cap 0x%hx\n", iommu->cap_ptr);
if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
- pr_info("Extended features (%#llx):\n",
- iommu->features);
+ pci_info(pdev, "Extended features (%#llx):\n",
+ iommu->features);
for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
if (iommu_feature(iommu, (1ULL << i)))
pr_cont(" %s", feat_str[i]);
@@ -2013,6 +2013,9 @@ static int __init init_unity_map_range(struct ivmd_header *m)
if (e == NULL)
return -ENOMEM;
+ if (m->flags & IVMD_FLAG_EXCL_RANGE)
+ init_exclusion_range(m);
+
switch (m->type) {
default:
kfree(e);
@@ -2059,9 +2062,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
while (p < end) {
m = (struct ivmd_header *)p;
- if (m->flags & IVMD_FLAG_EXCL_RANGE)
- init_exclusion_range(m);
- else if (m->flags & IVMD_FLAG_UNITY_MAP)
+ if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
init_unity_map_range(m);
p += m->length;
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index eae0741f72dc..87965e4d9647 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -374,6 +374,8 @@
#define IOMMU_PROT_IR 0x01
#define IOMMU_PROT_IW 0x02
+#define IOMMU_UNITY_MAP_FLAG_EXCL_RANGE (1 << 2)
+
/* IOMMU capabilities */
#define IOMMU_CAP_IOTLB 24
#define IOMMU_CAP_NPCACHE 26
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 23dae9348ace..5d7ef750e4a0 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -370,29 +370,6 @@ static struct pasid_state *mn_to_state(struct mmu_notifier *mn)
return container_of(mn, struct pasid_state, mn);
}
-static void __mn_flush_page(struct mmu_notifier *mn,
- unsigned long address)
-{
- struct pasid_state *pasid_state;
- struct device_state *dev_state;
-
- pasid_state = mn_to_state(mn);
- dev_state = pasid_state->device_state;
-
- amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, address);
-}
-
-static int mn_clear_flush_young(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end)
-{
- for (; start < end; start += PAGE_SIZE)
- __mn_flush_page(mn, start);
-
- return 0;
-}
-
static void mn_invalidate_range(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start, unsigned long end)
@@ -430,7 +407,6 @@ static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
static const struct mmu_notifier_ops iommu_mn = {
.release = mn_release,
- .clear_flush_young = mn_clear_flush_young,
.invalidate_range = mn_invalidate_range,
};
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 0d284029dc73..d3880010c6cf 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -18,6 +18,7 @@
#include <linux/dma-iommu.h>
#include <linux/err.h>
#include <linux/interrupt.h>
+#include <linux/io-pgtable.h>
#include <linux/iommu.h>
#include <linux/iopoll.h>
#include <linux/init.h>
@@ -32,8 +33,6 @@
#include <linux/amba/bus.h>
-#include "io-pgtable.h"
-
/* MMIO registers */
#define ARM_SMMU_IDR0 0x0
#define IDR0_ST_LVL GENMASK(28, 27)
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index af18a7e7f917..045d93884164 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -39,6 +39,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/io-pgtable.h>
#include <linux/iommu.h>
#include <linux/iopoll.h>
#include <linux/init.h>
@@ -56,7 +57,6 @@
#include <linux/amba/bus.h>
#include <linux/fsl/mc.h>
-#include "io-pgtable.h"
#include "arm-smmu-regs.h"
#define ARM_MMU500_ACTLR_CPRE (1 << 1)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index d19f3d6b43c1..77aabe637a60 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -289,7 +289,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
- unsigned long order, base_pfn, end_pfn;
+ unsigned long order, base_pfn;
int attr;
if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
@@ -298,7 +298,6 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
/* Use the smallest supported page size for IOVA granularity */
order = __ffs(domain->pgsize_bitmap);
base_pfn = max_t(unsigned long, 1, base >> order);
- end_pfn = (base + size - 1) >> order;
/* Check the domain allows at least some access to the device... */
if (domain->geometry.force_aperture) {
diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c
new file mode 100644
index 000000000000..a386b83e0e34
--- /dev/null
+++ b/drivers/iommu/hyperv-iommu.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Hyper-V stub IOMMU driver.
+ *
+ * Copyright (C) 2019, Microsoft, Inc.
+ *
+ * Author : Lan Tianyu <Tianyu.Lan@microsoft.com>
+ */
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+
+#include <asm/apic.h>
+#include <asm/cpu.h>
+#include <asm/hw_irq.h>
+#include <asm/io_apic.h>
+#include <asm/irq_remapping.h>
+#include <asm/hypervisor.h>
+
+#include "irq_remapping.h"
+
+#ifdef CONFIG_IRQ_REMAP
+
+/*
+ * According 82093AA IO-APIC spec , IO APIC has a 24-entry Interrupt
+ * Redirection Table. Hyper-V exposes one single IO-APIC and so define
+ * 24 IO APIC remmapping entries.
+ */
+#define IOAPIC_REMAPPING_ENTRY 24
+
+static cpumask_t ioapic_max_cpumask = { CPU_BITS_NONE };
+static struct irq_domain *ioapic_ir_domain;
+
+static int hyperv_ir_set_affinity(struct irq_data *data,
+ const struct cpumask *mask, bool force)
+{
+ struct irq_data *parent = data->parent_data;
+ struct irq_cfg *cfg = irqd_cfg(data);
+ struct IO_APIC_route_entry *entry;
+ int ret;
+
+ /* Return error If new irq affinity is out of ioapic_max_cpumask. */
+ if (!cpumask_subset(mask, &ioapic_max_cpumask))
+ return -EINVAL;
+
+ ret = parent->chip->irq_set_affinity(parent, mask, force);
+ if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
+ return ret;
+
+ entry = data->chip_data;
+ entry->dest = cfg->dest_apicid;
+ entry->vector = cfg->vector;
+ send_cleanup_vector(cfg);
+
+ return 0;
+}
+
+static struct irq_chip hyperv_ir_chip = {
+ .name = "HYPERV-IR",
+ .irq_ack = apic_ack_irq,
+ .irq_set_affinity = hyperv_ir_set_affinity,
+};
+
+static int hyperv_irq_remapping_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *arg)
+{
+ struct irq_alloc_info *info = arg;
+ struct irq_data *irq_data;
+ struct irq_desc *desc;
+ int ret = 0;
+
+ if (!info || info->type != X86_IRQ_ALLOC_TYPE_IOAPIC || nr_irqs > 1)
+ return -EINVAL;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
+ if (ret < 0)
+ return ret;
+
+ irq_data = irq_domain_get_irq_data(domain, virq);
+ if (!irq_data) {
+ irq_domain_free_irqs_common(domain, virq, nr_irqs);
+ return -EINVAL;
+ }
+
+ irq_data->chip = &hyperv_ir_chip;
+
+ /*
+ * If there is interrupt remapping function of IOMMU, setting irq
+ * affinity only needs to change IRTE of IOMMU. But Hyper-V doesn't
+ * support interrupt remapping function, setting irq affinity of IO-APIC
+ * interrupts still needs to change IO-APIC registers. But ioapic_
+ * configure_entry() will ignore value of cfg->vector and cfg->
+ * dest_apicid when IO-APIC's parent irq domain is not the vector
+ * domain.(See ioapic_configure_entry()) In order to setting vector
+ * and dest_apicid to IO-APIC register, IO-APIC entry pointer is saved
+ * in the chip_data and hyperv_irq_remapping_activate()/hyperv_ir_set_
+ * affinity() set vector and dest_apicid directly into IO-APIC entry.
+ */
+ irq_data->chip_data = info->ioapic_entry;
+
+ /*
+ * Hypver-V IO APIC irq affinity should be in the scope of
+ * ioapic_max_cpumask because no irq remapping support.
+ */
+ desc = irq_data_to_desc(irq_data);
+ cpumask_copy(desc->irq_common_data.affinity, &ioapic_max_cpumask);
+
+ return 0;
+}
+
+static void hyperv_irq_remapping_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ irq_domain_free_irqs_common(domain, virq, nr_irqs);
+}
+
+static int hyperv_irq_remapping_activate(struct irq_domain *domain,
+ struct irq_data *irq_data, bool reserve)
+{
+ struct irq_cfg *cfg = irqd_cfg(irq_data);
+ struct IO_APIC_route_entry *entry = irq_data->chip_data;
+
+ entry->dest = cfg->dest_apicid;
+ entry->vector = cfg->vector;
+
+ return 0;
+}
+
+static struct irq_domain_ops hyperv_ir_domain_ops = {
+ .alloc = hyperv_irq_remapping_alloc,
+ .free = hyperv_irq_remapping_free,
+ .activate = hyperv_irq_remapping_activate,
+};
+
+static int __init hyperv_prepare_irq_remapping(void)
+{
+ struct fwnode_handle *fn;
+ int i;
+
+ if (!hypervisor_is_type(X86_HYPER_MS_HYPERV) ||
+ !x2apic_supported())
+ return -ENODEV;
+
+ fn = irq_domain_alloc_named_id_fwnode("HYPERV-IR", 0);
+ if (!fn)
+ return -ENOMEM;
+
+ ioapic_ir_domain =
+ irq_domain_create_hierarchy(arch_get_ir_parent_domain(),
+ 0, IOAPIC_REMAPPING_ENTRY, fn,
+ &hyperv_ir_domain_ops, NULL);
+
+ irq_domain_free_fwnode(fn);
+
+ /*
+ * Hyper-V doesn't provide irq remapping function for
+ * IO-APIC and so IO-APIC only accepts 8-bit APIC ID.
+ * Cpu's APIC ID is read from ACPI MADT table and APIC IDs
+ * in the MADT table on Hyper-v are sorted monotonic increasingly.
+ * APIC ID reflects cpu topology. There maybe some APIC ID
+ * gaps when cpu number in a socket is not power of two. Prepare
+ * max cpu affinity for IOAPIC irqs. Scan cpu 0-255 and set cpu
+ * into ioapic_max_cpumask if its APIC ID is less than 256.
+ */
+ for (i = min_t(unsigned int, num_possible_cpus() - 1, 255); i >= 0; i--)
+ if (cpu_physical_id(i) < 256)
+ cpumask_set_cpu(i, &ioapic_max_cpumask);
+
+ return 0;
+}
+
+static int __init hyperv_enable_irq_remapping(void)
+{
+ return IRQ_REMAP_X2APIC_MODE;
+}
+
+static struct irq_domain *hyperv_get_ir_irq_domain(struct irq_alloc_info *info)
+{
+ if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC)
+ return ioapic_ir_domain;
+ else
+ return NULL;
+}
+
+struct irq_remap_ops hyperv_irq_remap_ops = {
+ .prepare = hyperv_prepare_irq_remapping,
+ .enable = hyperv_enable_irq_remapping,
+ .get_ir_irq_domain = hyperv_get_ir_irq_domain,
+};
+
+#endif
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 39a33dec4d0b..28cb713d728c 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -19,6 +19,7 @@
*/
#define pr_fmt(fmt) "DMAR: " fmt
+#define dev_fmt(fmt) pr_fmt(fmt)
#include <linux/init.h>
#include <linux/bitmap.h>
@@ -343,8 +344,7 @@ static int g_num_of_iommus;
static void domain_exit(struct dmar_domain *domain);
static void domain_remove_dev_info(struct dmar_domain *domain);
-static void dmar_remove_one_dev_info(struct dmar_domain *domain,
- struct device *dev);
+static void dmar_remove_one_dev_info(struct device *dev);
static void __dmar_remove_one_dev_info(struct device_domain_info *info);
static void domain_context_clear(struct intel_iommu *iommu,
struct device *dev);
@@ -865,7 +865,7 @@ out:
static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
unsigned long pfn, int *target_level)
{
- struct dma_pte *parent, *pte = NULL;
+ struct dma_pte *parent, *pte;
int level = agaw_to_level(domain->agaw);
int offset;
@@ -922,7 +922,7 @@ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
unsigned long pfn,
int level, int *large_page)
{
- struct dma_pte *parent, *pte = NULL;
+ struct dma_pte *parent, *pte;
int total = agaw_to_level(domain->agaw);
int offset;
@@ -954,7 +954,7 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
unsigned long start_pfn,
unsigned long last_pfn)
{
- unsigned int large_page = 1;
+ unsigned int large_page;
struct dma_pte *first_pte, *pte;
BUG_ON(!domain_pfn_supported(domain, start_pfn));
@@ -1132,7 +1132,7 @@ static struct page *domain_unmap(struct dmar_domain *domain,
unsigned long start_pfn,
unsigned long last_pfn)
{
- struct page *freelist = NULL;
+ struct page *freelist;
BUG_ON(!domain_pfn_supported(domain, start_pfn));
BUG_ON(!domain_pfn_supported(domain, last_pfn));
@@ -1403,10 +1403,13 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
info->pasid_enabled = 1;
- if (info->pri_supported && !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
+ if (info->pri_supported &&
+ (info->pasid_enabled ? pci_prg_resp_pasid_required(pdev) : 1) &&
+ !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
info->pri_enabled = 1;
#endif
if (!pdev->untrusted && info->ats_supported &&
+ pci_ats_page_aligned(pdev) &&
!pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
info->ats_enabled = 1;
domain_update_iotlb(info->domain);
@@ -1535,6 +1538,9 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
u32 pmen;
unsigned long flags;
+ if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
+ return;
+
raw_spin_lock_irqsave(&iommu->register_lock, flags);
pmen = readl(iommu->reg + DMAR_PMEN_REG);
pmen &= ~DMA_PMEN_EPM;
@@ -1763,7 +1769,7 @@ static int domain_attach_iommu(struct dmar_domain *domain,
static int domain_detach_iommu(struct dmar_domain *domain,
struct intel_iommu *iommu)
{
- int num, count = INT_MAX;
+ int num, count;
assert_spin_locked(&device_domain_lock);
assert_spin_locked(&iommu->lock);
@@ -1816,7 +1822,7 @@ static int dmar_init_reserved_ranges(void)
IOVA_PFN(r->start),
IOVA_PFN(r->end));
if (!iova) {
- pr_err("Reserve iova failed\n");
+ pci_err(pdev, "Reserve iova for %pR failed\n", r);
return -ENODEV;
}
}
@@ -1902,11 +1908,7 @@ static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
static void domain_exit(struct dmar_domain *domain)
{
- struct page *freelist = NULL;
-
- /* Domain 0 is reserved, so dont process it */
- if (!domain)
- return;
+ struct page *freelist;
/* Remove associated devices and clear attached or cached domains */
rcu_read_lock();
@@ -2058,7 +2060,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
int agaw;
context_set_domain_id(context, did);
- context_set_translation_type(context, translation);
if (translation != CONTEXT_TT_PASS_THROUGH) {
/*
@@ -2088,6 +2089,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
*/
context_set_address_width(context, iommu->msagaw);
}
+
+ context_set_translation_type(context, translation);
}
context_set_fault_enable(context);
@@ -2486,7 +2489,8 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
if (dev && dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(info->dev);
- if (!pci_ats_disabled() &&
+ if (!pdev->untrusted &&
+ !pci_ats_disabled() &&
ecap_dev_iotlb_support(iommu->ecap) &&
pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
dmar_find_matched_atsr_unit(pdev))
@@ -2545,9 +2549,8 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
if (dev && dev_is_pci(dev) && sm_supported(iommu)) {
ret = intel_pasid_alloc_table(dev);
if (ret) {
- pr_err("PASID table allocation for %s failed\n",
- dev_name(dev));
- dmar_remove_one_dev_info(domain, dev);
+ dev_err(dev, "PASID table allocation failed\n");
+ dmar_remove_one_dev_info(dev);
return NULL;
}
@@ -2561,16 +2564,15 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
dev, PASID_RID2PASID);
spin_unlock(&iommu->lock);
if (ret) {
- pr_err("Setup RID2PASID for %s failed\n",
- dev_name(dev));
- dmar_remove_one_dev_info(domain, dev);
+ dev_err(dev, "Setup RID2PASID failed\n");
+ dmar_remove_one_dev_info(dev);
return NULL;
}
}
if (dev && domain_context_mapping(domain, dev)) {
- pr_err("Domain context map for %s failed\n", dev_name(dev));
- dmar_remove_one_dev_info(domain, dev);
+ dev_err(dev, "Domain context map failed\n");
+ dmar_remove_one_dev_info(dev);
return NULL;
}
@@ -2585,7 +2587,7 @@ static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
{
- struct device_domain_info *info = NULL;
+ struct device_domain_info *info;
struct dmar_domain *domain = NULL;
struct intel_iommu *iommu;
u16 dma_alias;
@@ -2724,13 +2726,12 @@ static int domain_prepare_identity_map(struct device *dev,
range which is reserved in E820, so which didn't get set
up to start with in si_domain */
if (domain == si_domain && hw_pass_through) {
- pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
- dev_name(dev), start, end);
+ dev_warn(dev, "Ignoring identity map for HW passthrough [0x%Lx - 0x%Lx]\n",
+ start, end);
return 0;
}
- pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
- dev_name(dev), start, end);
+ dev_info(dev, "Setting identity map [0x%Lx - 0x%Lx]\n", start, end);
if (end < start) {
WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
@@ -2810,7 +2811,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width);
static int __init si_domain_init(int hw)
{
- int nid, ret = 0;
+ int nid, ret;
si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
if (!si_domain)
@@ -2934,7 +2935,6 @@ static bool device_is_rmrr_locked(struct device *dev)
static int iommu_should_identity_map(struct device *dev, int startup)
{
-
if (dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(dev);
@@ -3017,8 +3017,8 @@ static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw
ret = domain_add_dev_info(si_domain, dev);
if (!ret)
- pr_info("%s identity mapping for device %s\n",
- hw ? "Hardware" : "Software", dev_name(dev));
+ dev_info(dev, "%s identity mapping\n",
+ hw ? "Hardware" : "Software");
else if (ret == -ENODEV)
/* device not associated with an iommu */
ret = 0;
@@ -3530,7 +3530,7 @@ static unsigned long intel_alloc_iova(struct device *dev,
struct dmar_domain *domain,
unsigned long nrpages, uint64_t dma_mask)
{
- unsigned long iova_pfn = 0;
+ unsigned long iova_pfn;
/* Restrict dma_mask to the width that the iommu can handle */
dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
@@ -3551,8 +3551,7 @@ static unsigned long intel_alloc_iova(struct device *dev,
iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
IOVA_PFN(dma_mask), true);
if (unlikely(!iova_pfn)) {
- pr_err("Allocating %ld-page iova for %s failed",
- nrpages, dev_name(dev));
+ dev_err(dev, "Allocating %ld-page iova failed", nrpages);
return 0;
}
@@ -3600,7 +3599,7 @@ struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
out:
if (!domain)
- pr_err("Allocating domain for %s failed\n", dev_name(dev));
+ dev_err(dev, "Allocating domain failed\n");
return domain;
@@ -3626,9 +3625,8 @@ static int iommu_no_mapping(struct device *dev)
* 32 bit DMA is removed from si_domain and fall back
* to non-identity mapping.
*/
- dmar_remove_one_dev_info(si_domain, dev);
- pr_info("32bit %s uses non-identity mapping\n",
- dev_name(dev));
+ dmar_remove_one_dev_info(dev);
+ dev_info(dev, "32bit DMA uses non-identity mapping\n");
return 0;
}
} else {
@@ -3640,8 +3638,7 @@ static int iommu_no_mapping(struct device *dev)
int ret;
ret = domain_add_dev_info(si_domain, dev);
if (!ret) {
- pr_info("64bit %s uses identity mapping\n",
- dev_name(dev));
+ dev_info(dev, "64bit DMA uses identity mapping\n");
return 1;
}
}
@@ -3650,11 +3647,9 @@ static int iommu_no_mapping(struct device *dev)
return 0;
}
-static dma_addr_t __intel_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, int dir,
- u64 dma_mask)
+static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
+ size_t size, int dir, u64 dma_mask)
{
- phys_addr_t paddr = page_to_phys(page) + offset;
struct dmar_domain *domain;
phys_addr_t start_paddr;
unsigned long iova_pfn;
@@ -3706,8 +3701,8 @@ static dma_addr_t __intel_map_page(struct device *dev, struct page *page,
error:
if (iova_pfn)
free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
- pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
- dev_name(dev), size, (unsigned long long)paddr, dir);
+ dev_err(dev, "Device request: %zx@%llx dir %d --- failed\n",
+ size, (unsigned long long)paddr, dir);
return DMA_MAPPING_ERROR;
}
@@ -3716,7 +3711,15 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page,
enum dma_data_direction dir,
unsigned long attrs)
{
- return __intel_map_page(dev, page, offset, size, dir, *dev->dma_mask);
+ return __intel_map_single(dev, page_to_phys(page) + offset, size,
+ dir, *dev->dma_mask);
+}
+
+static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ return __intel_map_single(dev, phys_addr, size, dir, *dev->dma_mask);
}
static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
@@ -3742,8 +3745,7 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
start_pfn = mm_to_dma_pfn(iova_pfn);
last_pfn = start_pfn + nrpages - 1;
- pr_debug("Device %s unmapping: pfn %lx-%lx\n",
- dev_name(dev), start_pfn, last_pfn);
+ dev_dbg(dev, "Device unmapping: pfn %lx-%lx\n", start_pfn, last_pfn);
freelist = domain_unmap(domain, start_pfn, last_pfn);
@@ -3807,8 +3809,9 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
return NULL;
memset(page_address(page), 0, size);
- *dma_handle = __intel_map_page(dev, page, 0, size, DMA_BIDIRECTIONAL,
- dev->coherent_dma_mask);
+ *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
+ DMA_BIDIRECTIONAL,
+ dev->coherent_dma_mask);
if (*dma_handle != DMA_MAPPING_ERROR)
return page_address(page);
if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
@@ -3925,6 +3928,8 @@ static const struct dma_map_ops intel_dma_ops = {
.unmap_sg = intel_unmap_sg,
.map_page = intel_map_page,
.unmap_page = intel_unmap_page,
+ .map_resource = intel_map_resource,
+ .unmap_resource = intel_unmap_page,
.dma_supported = dma_direct_supported,
};
@@ -4340,7 +4345,7 @@ int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
{
- int sp, ret = 0;
+ int sp, ret;
struct intel_iommu *iommu = dmaru->iommu;
if (g_iommus[iommu->seq_id])
@@ -4504,7 +4509,7 @@ out:
int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
{
- int ret = 0;
+ int ret;
struct dmar_rmrr_unit *rmrru;
struct dmar_atsr_unit *atsru;
struct acpi_dmar_atsr *atsr;
@@ -4521,7 +4526,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
((void *)rmrr) + rmrr->header.length,
rmrr->segment, rmrru->devices,
rmrru->devices_cnt);
- if(ret < 0)
+ if (ret < 0)
return ret;
} else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
dmar_remove_dev_scope(info, rmrr->segment,
@@ -4541,7 +4546,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
atsru->devices_cnt);
if (ret > 0)
break;
- else if(ret < 0)
+ else if (ret < 0)
return ret;
} else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
if (dmar_remove_dev_scope(info, atsr->segment,
@@ -4568,16 +4573,19 @@ static int device_notifier(struct notifier_block *nb,
if (iommu_dummy(dev))
return 0;
- if (action != BUS_NOTIFY_REMOVED_DEVICE)
- return 0;
-
- domain = find_domain(dev);
- if (!domain)
- return 0;
+ if (action == BUS_NOTIFY_REMOVED_DEVICE) {
+ domain = find_domain(dev);
+ if (!domain)
+ return 0;
- dmar_remove_one_dev_info(domain, dev);
- if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
- domain_exit(domain);
+ dmar_remove_one_dev_info(dev);
+ if (!domain_type_is_vm_or_si(domain) &&
+ list_empty(&domain->devices))
+ domain_exit(domain);
+ } else if (action == BUS_NOTIFY_ADD_DEVICE) {
+ if (iommu_should_identity_map(dev, 1))
+ domain_add_dev_info(si_domain, dev);
+ }
return 0;
}
@@ -4988,8 +4996,7 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
free_devinfo_mem(info);
}
-static void dmar_remove_one_dev_info(struct dmar_domain *domain,
- struct device *dev)
+static void dmar_remove_one_dev_info(struct device *dev)
{
struct device_domain_info *info;
unsigned long flags;
@@ -5078,7 +5085,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
old_domain = find_domain(dev);
if (old_domain) {
rcu_read_lock();
- dmar_remove_one_dev_info(old_domain, dev);
+ dmar_remove_one_dev_info(dev);
rcu_read_unlock();
if (!domain_type_is_vm_or_si(old_domain) &&
@@ -5097,9 +5104,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
addr_width = cap_mgaw(iommu->cap);
if (dmar_domain->max_addr > (1LL << addr_width)) {
- pr_err("%s: iommu width (%d) is not "
- "sufficient for the mapped address (%llx)\n",
- __func__, addr_width, dmar_domain->max_addr);
+ dev_err(dev, "%s: iommu width (%d) is not "
+ "sufficient for the mapped address (%llx)\n",
+ __func__, addr_width, dmar_domain->max_addr);
return -EFAULT;
}
dmar_domain->gaw = addr_width;
@@ -5125,7 +5132,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
static void intel_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
- dmar_remove_one_dev_info(to_dmar_domain(domain), dev);
+ dmar_remove_one_dev_info(dev);
}
static int intel_iommu_map(struct iommu_domain *domain,
@@ -5328,7 +5335,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
ctx_lo = context[0].lo;
- sdev->did = domain->iommu_did[iommu->seq_id];
+ sdev->did = FLPT_DEFAULT_DID;
sdev->sid = PCI_DEVID(info->bus, info->devfn);
if (!(ctx_lo & CONTEXT_PASIDE)) {
@@ -5400,7 +5407,7 @@ const struct iommu_ops intel_iommu_ops = {
static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
{
/* G4x/GM45 integrated gfx dmar support is totally busted. */
- pr_info("Disabling IOMMU for graphics on this chipset\n");
+ pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
dmar_map_gfx = 0;
}
@@ -5418,7 +5425,7 @@ static void quirk_iommu_rwbf(struct pci_dev *dev)
* Mobile 4 Series Chipset neglects to set RWBF capability,
* but needs it. Same seems to hold for the desktop versions.
*/
- pr_info("Forcing write-buffer flush capability\n");
+ pci_info(dev, "Forcing write-buffer flush capability\n");
rwbf_quirk = 1;
}
@@ -5448,11 +5455,11 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
return;
if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
- pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
+ pci_info(dev, "BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
dmar_map_gfx = 0;
} else if (dmar_map_gfx) {
/* we have to ensure the gfx device is idle before we flush */
- pr_info("Disabling batched IOTLB flush on Ironlake\n");
+ pci_info(dev, "Disabling batched IOTLB flush on Ironlake\n");
intel_iommu_strict = 1;
}
}
diff --git a/drivers/iommu/intel-pasid.c b/drivers/iommu/intel-pasid.c
index 53fe5248d8f1..03b12d2ee213 100644
--- a/drivers/iommu/intel-pasid.c
+++ b/drivers/iommu/intel-pasid.c
@@ -466,8 +466,8 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
if (WARN_ON(!pte))
return;
- intel_pasid_clear_entry(dev, pasid);
did = pasid_get_domain_id(pte);
+ intel_pasid_clear_entry(dev, pasid);
if (!ecap_coherent(iommu->ecap))
clflush_cache_range(pte, sizeof(*pte));
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index a2a2aa4439aa..3a4b09ae8561 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -180,14 +180,6 @@ static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
rcu_read_unlock();
}
-static void intel_change_pte(struct mmu_notifier *mn, struct mm_struct *mm,
- unsigned long address, pte_t pte)
-{
- struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
-
- intel_flush_svm_range(svm, address, 1, 1, 0);
-}
-
/* Pages have been freed at this point */
static void intel_invalidate_range(struct mmu_notifier *mn,
struct mm_struct *mm,
@@ -227,7 +219,6 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
static const struct mmu_notifier_ops intel_mmuops = {
.release = intel_mm_release,
- .change_pte = intel_change_pte,
.invalidate_range = intel_invalidate_range,
};
@@ -243,7 +234,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
int pasid_max;
int ret;
- if (!iommu)
+ if (!iommu || dmar_disabled)
return -EINVAL;
if (dev_is_pci(dev)) {
@@ -470,20 +461,31 @@ EXPORT_SYMBOL_GPL(intel_svm_is_pasid_valid);
/* Page request queue descriptor */
struct page_req_dsc {
- u64 srr:1;
- u64 bof:1;
- u64 pasid_present:1;
- u64 lpig:1;
- u64 pasid:20;
- u64 bus:8;
- u64 private:23;
- u64 prg_index:9;
- u64 rd_req:1;
- u64 wr_req:1;
- u64 exe_req:1;
- u64 priv_req:1;
- u64 devfn:8;
- u64 addr:52;
+ union {
+ struct {
+ u64 type:8;
+ u64 pasid_present:1;
+ u64 priv_data_present:1;
+ u64 rsvd:6;
+ u64 rid:16;
+ u64 pasid:20;
+ u64 exe_req:1;
+ u64 pm_req:1;
+ u64 rsvd2:10;
+ };
+ u64 qw_0;
+ };
+ union {
+ struct {
+ u64 rd_req:1;
+ u64 wr_req:1;
+ u64 lpig:1;
+ u64 prg_index:9;
+ u64 addr:52;
+ };
+ u64 qw_1;
+ };
+ u64 priv_data[2];
};
#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10)
@@ -596,7 +598,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
/* Accounting for major/minor faults? */
rcu_read_lock();
list_for_each_entry_rcu(sdev, &svm->devs, list) {
- if (sdev->sid == PCI_DEVID(req->bus, req->devfn))
+ if (sdev->sid == req->rid)
break;
}
/* Other devices can go away, but the drivers are not permitted
@@ -609,33 +611,35 @@ static irqreturn_t prq_event_thread(int irq, void *d)
if (sdev && sdev->ops && sdev->ops->fault_cb) {
int rwxp = (req->rd_req << 3) | (req->wr_req << 2) |
- (req->exe_req << 1) | (req->priv_req);
- sdev->ops->fault_cb(sdev->dev, req->pasid, req->addr, req->private, rwxp, result);
+ (req->exe_req << 1) | (req->pm_req);
+ sdev->ops->fault_cb(sdev->dev, req->pasid, req->addr,
+ req->priv_data, rwxp, result);
}
/* We get here in the error case where the PASID lookup failed,
and these can be NULL. Do not use them below this point! */
sdev = NULL;
svm = NULL;
no_pasid:
- if (req->lpig) {
- /* Page Group Response */
+ if (req->lpig || req->priv_data_present) {
+ /*
+ * Per VT-d spec. v3.0 ch7.7, system software must
+ * respond with page group response if private data
+ * is present (PDP) or last page in group (LPIG) bit
+ * is set. This is an additional VT-d feature beyond
+ * PCI ATS spec.
+ */
resp.qw0 = QI_PGRP_PASID(req->pasid) |
- QI_PGRP_DID((req->bus << 8) | req->devfn) |
+ QI_PGRP_DID(req->rid) |
QI_PGRP_PASID_P(req->pasid_present) |
+ QI_PGRP_PDP(req->pasid_present) |
+ QI_PGRP_RESP_CODE(result) |
QI_PGRP_RESP_TYPE;
resp.qw1 = QI_PGRP_IDX(req->prg_index) |
- QI_PGRP_PRIV(req->private) |
- QI_PGRP_RESP_CODE(result);
- } else if (req->srr) {
- /* Page Stream Response */
- resp.qw0 = QI_PSTRM_IDX(req->prg_index) |
- QI_PSTRM_PRIV(req->private) |
- QI_PSTRM_BUS(req->bus) |
- QI_PSTRM_PASID(req->pasid) |
- QI_PSTRM_RESP_TYPE;
- resp.qw1 = QI_PSTRM_ADDR(address) |
- QI_PSTRM_DEVFN(req->devfn) |
- QI_PSTRM_RESP_CODE(result);
+ QI_PGRP_LPIG(req->lpig);
+
+ if (req->priv_data_present)
+ memcpy(&resp.qw2, req->priv_data,
+ sizeof(req->priv_data));
}
resp.qw2 = 0;
resp.qw3 = 0;
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 24d45b07f425..2d74641b7f7b 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -294,6 +294,18 @@ static void set_irte_sid(struct irte *irte, unsigned int svt,
irte->sid = sid;
}
+/*
+ * Set an IRTE to match only the bus number. Interrupt requests that reference
+ * this IRTE must have a requester-id whose bus number is between or equal
+ * to the start_bus and end_bus arguments.
+ */
+static void set_irte_verify_bus(struct irte *irte, unsigned int start_bus,
+ unsigned int end_bus)
+{
+ set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
+ (start_bus << 8) | end_bus);
+}
+
static int set_ioapic_sid(struct irte *irte, int apic)
{
int i;
@@ -356,6 +368,8 @@ static int set_hpet_sid(struct irte *irte, u8 id)
struct set_msi_sid_data {
struct pci_dev *pdev;
u16 alias;
+ int count;
+ int busmatch_count;
};
static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque)
@@ -364,6 +378,10 @@ static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque)
data->pdev = pdev;
data->alias = alias;
+ data->count++;
+
+ if (PCI_BUS_NUM(alias) == pdev->bus->number)
+ data->busmatch_count++;
return 0;
}
@@ -375,6 +393,8 @@ static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
if (!irte || !dev)
return -1;
+ data.count = 0;
+ data.busmatch_count = 0;
pci_for_each_dma_alias(dev, set_msi_sid_cb, &data);
/*
@@ -383,6 +403,11 @@ static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
* device is the case of a PCIe-to-PCI bridge, where the alias is for
* the subordinate bus. In this case we can only verify the bus.
*
+ * If there are multiple aliases, all with the same bus number,
+ * then all we can do is verify the bus. This is typical in NTB
+ * hardware which use proxy IDs where the device will generate traffic
+ * from multiple devfn numbers on the same bus.
+ *
* If the alias device is on a different bus than our source device
* then we have a topology based alias, use it.
*
@@ -391,9 +416,10 @@ static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
* original device.
*/
if (PCI_BUS_NUM(data.alias) != data.pdev->bus->number)
- set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
- PCI_DEVID(PCI_BUS_NUM(data.alias),
- dev->bus->number));
+ set_irte_verify_bus(irte, PCI_BUS_NUM(data.alias),
+ dev->bus->number);
+ else if (data.count >= 2 && data.busmatch_count == data.count)
+ set_irte_verify_bus(irte, dev->bus->number, dev->bus->number);
else if (data.pdev->bus->number != dev->bus->number)
set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias);
else
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index cec29bf45c9b..9a8a8870e267 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -35,6 +35,7 @@
#include <linux/atomic.h>
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
+#include <linux/io-pgtable.h>
#include <linux/iommu.h>
#include <linux/kernel.h>
#include <linux/kmemleak.h>
@@ -45,8 +46,6 @@
#include <asm/barrier.h>
-#include "io-pgtable.h"
-
/* Struct accessors */
#define io_pgtable_to_data(x) \
container_of((x), struct arm_v7s_io_pgtable, iop)
@@ -161,6 +160,14 @@
#define ARM_V7S_TCR_PD1 BIT(5)
+#ifdef CONFIG_ZONE_DMA32
+#define ARM_V7S_TABLE_GFP_DMA GFP_DMA32
+#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA32
+#else
+#define ARM_V7S_TABLE_GFP_DMA GFP_DMA
+#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA
+#endif
+
typedef u32 arm_v7s_iopte;
static bool selftest_running;
@@ -198,13 +205,16 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
void *table = NULL;
if (lvl == 1)
- table = (void *)__get_dma_pages(__GFP_ZERO, get_order(size));
+ table = (void *)__get_free_pages(
+ __GFP_ZERO | ARM_V7S_TABLE_GFP_DMA, get_order(size));
else if (lvl == 2)
- table = kmem_cache_zalloc(data->l2_tables, gfp | GFP_DMA);
+ table = kmem_cache_zalloc(data->l2_tables, gfp);
phys = virt_to_phys(table);
- if (phys != (arm_v7s_iopte)phys)
+ if (phys != (arm_v7s_iopte)phys) {
/* Doesn't fit in PTE */
+ dev_err(dev, "Page table does not fit in PTE: %pa", &phys);
goto out_free;
+ }
if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
dma = dma_map_single(dev, table, size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma))
@@ -217,7 +227,8 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
if (dma != phys)
goto out_unmap;
}
- kmemleak_ignore(table);
+ if (lvl == 2)
+ kmemleak_ignore(table);
return table;
out_unmap:
@@ -733,7 +744,7 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2",
ARM_V7S_TABLE_SIZE(2),
ARM_V7S_TABLE_SIZE(2),
- SLAB_CACHE_DMA, NULL);
+ ARM_V7S_TABLE_SLAB_FLAGS, NULL);
if (!data->l2_tables)
goto out_free_data;
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 237cacd4a62b..d3700ec15cbd 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -22,6 +22,7 @@
#include <linux/atomic.h>
#include <linux/bitops.h>
+#include <linux/io-pgtable.h>
#include <linux/iommu.h>
#include <linux/kernel.h>
#include <linux/sizes.h>
@@ -31,8 +32,6 @@
#include <asm/barrier.h>
-#include "io-pgtable.h"
-
#define ARM_LPAE_MAX_ADDR_BITS 52
#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
#define ARM_LPAE_MAX_LEVELS 4
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
index 127558d83667..93f2880be6c6 100644
--- a/drivers/iommu/io-pgtable.c
+++ b/drivers/iommu/io-pgtable.c
@@ -19,11 +19,10 @@
*/
#include <linux/bug.h>
+#include <linux/io-pgtable.h>
#include <linux/kernel.h>
#include <linux/types.h>
-#include "io-pgtable.h"
-
static const struct io_pgtable_init_fns *
io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = {
#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE
@@ -61,6 +60,7 @@ struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
return &iop->ops;
}
+EXPORT_SYMBOL_GPL(alloc_io_pgtable_ops);
/*
* It is the IOMMU driver's responsibility to ensure that the page table
@@ -77,3 +77,4 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops)
io_pgtable_tlb_flush_all(iop);
io_pgtable_init_table[iop->fmt]->free(iop);
}
+EXPORT_SYMBOL_GPL(free_io_pgtable_ops);
diff --git a/drivers/iommu/iommu-debugfs.c b/drivers/iommu/iommu-debugfs.c
index 3b1bf88fd1b0..f03548942096 100644
--- a/drivers/iommu/iommu-debugfs.c
+++ b/drivers/iommu/iommu-debugfs.c
@@ -12,6 +12,7 @@
#include <linux/debugfs.h>
struct dentry *iommu_debugfs_dir;
+EXPORT_SYMBOL_GPL(iommu_debugfs_dir);
/**
* iommu_debugfs_setup - create the top-level iommu directory in debugfs
@@ -23,9 +24,9 @@ struct dentry *iommu_debugfs_dir;
* Emit a strong warning at boot time to indicate that this feature is
* enabled.
*
- * This function is called from iommu_init; drivers may then call
- * iommu_debugfs_new_driver_dir() to instantiate a vendor-specific
- * directory to be used to expose internal data.
+ * This function is called from iommu_init; drivers may then use
+ * iommu_debugfs_dir to instantiate a vendor-specific directory to be used
+ * to expose internal data.
*/
void iommu_debugfs_setup(void)
{
@@ -48,19 +49,3 @@ void iommu_debugfs_setup(void)
pr_warn("*************************************************************\n");
}
}
-
-/**
- * iommu_debugfs_new_driver_dir - create a vendor directory under debugfs/iommu
- * @vendor: name of the vendor-specific subdirectory to create
- *
- * This function is called by an IOMMU driver to create the top-level debugfs
- * directory for that driver.
- *
- * Return: upon success, a pointer to the dentry for the new directory.
- * NULL in case of failure.
- */
-struct dentry *iommu_debugfs_new_driver_dir(const char *vendor)
-{
- return debugfs_create_dir(vendor, iommu_debugfs_dir);
-}
-EXPORT_SYMBOL_GPL(iommu_debugfs_new_driver_dir);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 3ed4db334341..109de67d5d72 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -668,7 +668,7 @@ rename:
trace_add_device_to_group(group->id, dev);
- pr_info("Adding device %s to group %d\n", dev_name(dev), group->id);
+ dev_info(dev, "Adding to iommu group %d\n", group->id);
return 0;
@@ -684,7 +684,7 @@ err_remove_link:
sysfs_remove_link(&dev->kobj, "iommu_group");
err_free_device:
kfree(device);
- pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev), group->id, ret);
+ dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret);
return ret;
}
EXPORT_SYMBOL_GPL(iommu_group_add_device);
@@ -701,7 +701,7 @@ void iommu_group_remove_device(struct device *dev)
struct iommu_group *group = dev->iommu_group;
struct group_device *tmp_device, *device = NULL;
- pr_info("Removing device %s from group %d\n", dev_name(dev), group->id);
+ dev_info(dev, "Removing from iommu group %d\n", group->id);
/* Pre-notify listeners that a device is being removed. */
blocking_notifier_call_chain(&group->notifier,
@@ -1105,10 +1105,12 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
- dev_warn(dev,
- "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
- iommu_def_domain_type);
dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
+ if (dom) {
+ dev_warn(dev,
+ "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
+ iommu_def_domain_type);
+ }
}
group->default_domain = dom;
@@ -1585,13 +1587,14 @@ static size_t iommu_pgsize(struct iommu_domain *domain,
int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
+ const struct iommu_ops *ops = domain->ops;
unsigned long orig_iova = iova;
unsigned int min_pagesz;
size_t orig_size = size;
phys_addr_t orig_paddr = paddr;
int ret = 0;
- if (unlikely(domain->ops->map == NULL ||
+ if (unlikely(ops->map == NULL ||
domain->pgsize_bitmap == 0UL))
return -ENODEV;
@@ -1620,7 +1623,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
iova, &paddr, pgsize);
- ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
+ ret = ops->map(domain, iova, paddr, pgsize, prot);
if (ret)
break;
@@ -1629,6 +1632,9 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
size -= pgsize;
}
+ if (ops->iotlb_sync_map)
+ ops->iotlb_sync_map(domain);
+
/* unroll mapping in case something went wrong */
if (ret)
iommu_unmap(domain, orig_iova, orig_size - size);
@@ -1951,7 +1957,7 @@ int iommu_request_dm_for_dev(struct device *dev)
iommu_domain_free(group->default_domain);
group->default_domain = dm_domain;
- pr_info("Using direct mapping for device %s\n", dev_name(dev));
+ dev_info(dev, "Using iommu direct mapping\n");
ret = 0;
out:
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index f8d3ba247523..2de8122e218f 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -207,8 +207,10 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
curr_iova = rb_entry(curr, struct iova, node);
} while (curr && new_pfn <= curr_iova->pfn_hi);
- if (limit_pfn < size || new_pfn < iovad->start_pfn)
+ if (limit_pfn < size || new_pfn < iovad->start_pfn) {
+ iovad->max32_alloc_size = size;
goto iova32_full;
+ }
/* pfn_lo will point to size aligned address if size_aligned is set */
new->pfn_lo = new_pfn;
@@ -222,7 +224,6 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
return 0;
iova32_full:
- iovad->max32_alloc_size = size;
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
return -ENOMEM;
}
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 7a4529c61c19..9a380c10655e 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/io-pgtable.h>
#include <linux/iommu.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -35,8 +36,6 @@
#define arm_iommu_detach_device(...) do {} while (0)
#endif
-#include "io-pgtable.h"
-
#define IPMMU_CTX_MAX 8
struct ipmmu_features {
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index b94ebd42edd8..81cf2908c531 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -103,6 +103,9 @@ int __init irq_remapping_prepare(void)
else if (IS_ENABLED(CONFIG_AMD_IOMMU) &&
amd_iommu_irq_ops.prepare() == 0)
remap_ops = &amd_iommu_irq_ops;
+ else if (IS_ENABLED(CONFIG_HYPERV_IOMMU) &&
+ hyperv_irq_remap_ops.prepare() == 0)
+ remap_ops = &hyperv_irq_remap_ops;
else
return -ENOSYS;
diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h
index 0afef6e43be4..f8609e9f1f5d 100644
--- a/drivers/iommu/irq_remapping.h
+++ b/drivers/iommu/irq_remapping.h
@@ -64,6 +64,7 @@ struct irq_remap_ops {
extern struct irq_remap_ops intel_irq_remap_ops;
extern struct irq_remap_ops amd_iommu_irq_ops;
+extern struct irq_remap_ops hyperv_irq_remap_ops;
#else /* CONFIG_IRQ_REMAP */
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index fc4270733f11..9fb0eb7a4d02 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -23,6 +23,7 @@
#include <linux/platform_device.h>
#include <linux/errno.h>
#include <linux/io.h>
+#include <linux/io-pgtable.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/spinlock.h>
@@ -37,7 +38,6 @@
#include "msm_iommu_hw-8xxx.h"
#include "msm_iommu.h"
-#include "io-pgtable.h"
#define MRC(reg, processor, op1, crn, crm, op2) \
__asm__ __volatile__ ( \
@@ -461,10 +461,10 @@ static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
master->num =
msm_iommu_alloc_ctx(iommu->context_map,
0, iommu->ncb);
- if (IS_ERR_VALUE(master->num)) {
- ret = -ENODEV;
- goto fail;
- }
+ if (IS_ERR_VALUE(master->num)) {
+ ret = -ENODEV;
+ goto fail;
+ }
config_mids(iommu, master);
__program_context(iommu->base, master->num,
priv);
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
index 778498b8633f..62c2c3e8c5df 100644
--- a/drivers/iommu/mtk_iommu.h
+++ b/drivers/iommu/mtk_iommu.h
@@ -19,13 +19,12 @@
#include <linux/component.h>
#include <linux/device.h>
#include <linux/io.h>
+#include <linux/io-pgtable.h>
#include <linux/iommu.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <soc/mediatek/smi.h>
-#include "io-pgtable.h"
-
struct mtk_iommu_suspend_reg {
u32 standard_axi_mode;
u32 dcm_dis;
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 7e0df67bd3e9..52b01e3a49df 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -474,7 +474,7 @@ static int mtk_iommu_add_device(struct device *dev)
return err;
}
- return iommu_device_link(&data->iommu, dev);;
+ return iommu_device_link(&data->iommu, dev);
}
static void mtk_iommu_remove_device(struct device *dev)
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
index d8595f0a987d..8cdd3f059513 100644
--- a/drivers/iommu/qcom_iommu.c
+++ b/drivers/iommu/qcom_iommu.c
@@ -26,6 +26,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/io-pgtable.h>
#include <linux/iommu.h>
#include <linux/iopoll.h>
#include <linux/kconfig.h>
@@ -42,7 +43,6 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include "io-pgtable.h"
#include "arm-smmu-regs.h"
#define SMMU_INTR_SEL_NS 0x2000
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index da6a4e357b2b..4d8057916552 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -1,5 +1,5 @@
/*
- * IOMMU API for GART in Tegra20
+ * IOMMU API for Graphics Address Relocation Table on Tegra20
*
* Copyright (c) 2010-2012, NVIDIA CORPORATION. All rights reserved.
*
@@ -19,101 +19,75 @@
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
-#define pr_fmt(fmt) "%s(): " fmt, __func__
+#define dev_fmt(fmt) "gart: " fmt
-#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
-#include <linux/spinlock.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/vmalloc.h>
-#include <linux/mm.h>
-#include <linux/list.h>
-#include <linux/device.h>
-#include <linux/io.h>
-#include <linux/iommu.h>
-#include <linux/of.h>
-
-#include <asm/cacheflush.h>
-/* bitmap of the page sizes currently supported */
-#define GART_IOMMU_PGSIZES (SZ_4K)
+#include <soc/tegra/mc.h>
#define GART_REG_BASE 0x24
#define GART_CONFIG (0x24 - GART_REG_BASE)
#define GART_ENTRY_ADDR (0x28 - GART_REG_BASE)
#define GART_ENTRY_DATA (0x2c - GART_REG_BASE)
-#define GART_ENTRY_PHYS_ADDR_VALID (1 << 31)
+
+#define GART_ENTRY_PHYS_ADDR_VALID BIT(31)
#define GART_PAGE_SHIFT 12
#define GART_PAGE_SIZE (1 << GART_PAGE_SHIFT)
-#define GART_PAGE_MASK \
- (~(GART_PAGE_SIZE - 1) & ~GART_ENTRY_PHYS_ADDR_VALID)
+#define GART_PAGE_MASK GENMASK(30, GART_PAGE_SHIFT)
-struct gart_client {
- struct device *dev;
- struct list_head list;
-};
+/* bitmap of the page sizes currently supported */
+#define GART_IOMMU_PGSIZES (GART_PAGE_SIZE)
struct gart_device {
void __iomem *regs;
u32 *savedata;
- u32 page_count; /* total remappable size */
- dma_addr_t iovmm_base; /* offset to vmm_area */
+ unsigned long iovmm_base; /* offset to vmm_area start */
+ unsigned long iovmm_end; /* offset to vmm_area end */
spinlock_t pte_lock; /* for pagetable */
- struct list_head client;
- spinlock_t client_lock; /* for client list */
- struct device *dev;
-
+ spinlock_t dom_lock; /* for active domain */
+ unsigned int active_devices; /* number of active devices */
+ struct iommu_domain *active_domain; /* current active domain */
struct iommu_device iommu; /* IOMMU Core handle */
-};
-
-struct gart_domain {
- struct iommu_domain domain; /* generic domain handle */
- struct gart_device *gart; /* link to gart device */
+ struct device *dev;
};
static struct gart_device *gart_handle; /* unique for a system */
static bool gart_debug;
-#define GART_PTE(_pfn) \
- (GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT))
-
-static struct gart_domain *to_gart_domain(struct iommu_domain *dom)
-{
- return container_of(dom, struct gart_domain, domain);
-}
-
/*
* Any interaction between any block on PPSB and a block on APB or AHB
* must have these read-back to ensure the APB/AHB bus transaction is
* complete before initiating activity on the PPSB block.
*/
-#define FLUSH_GART_REGS(gart) ((void)readl((gart)->regs + GART_CONFIG))
+#define FLUSH_GART_REGS(gart) readl_relaxed((gart)->regs + GART_CONFIG)
#define for_each_gart_pte(gart, iova) \
for (iova = gart->iovmm_base; \
- iova < gart->iovmm_base + GART_PAGE_SIZE * gart->page_count; \
+ iova < gart->iovmm_end; \
iova += GART_PAGE_SIZE)
static inline void gart_set_pte(struct gart_device *gart,
- unsigned long offs, u32 pte)
+ unsigned long iova, unsigned long pte)
{
- writel(offs, gart->regs + GART_ENTRY_ADDR);
- writel(pte, gart->regs + GART_ENTRY_DATA);
-
- dev_dbg(gart->dev, "%s %08lx:%08x\n",
- pte ? "map" : "unmap", offs, pte & GART_PAGE_MASK);
+ writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR);
+ writel_relaxed(pte, gart->regs + GART_ENTRY_DATA);
}
static inline unsigned long gart_read_pte(struct gart_device *gart,
- unsigned long offs)
+ unsigned long iova)
{
unsigned long pte;
- writel(offs, gart->regs + GART_ENTRY_ADDR);
- pte = readl(gart->regs + GART_ENTRY_DATA);
+ writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR);
+ pte = readl_relaxed(gart->regs + GART_ENTRY_DATA);
return pte;
}
@@ -125,224 +99,155 @@ static void do_gart_setup(struct gart_device *gart, const u32 *data)
for_each_gart_pte(gart, iova)
gart_set_pte(gart, iova, data ? *(data++) : 0);
- writel(1, gart->regs + GART_CONFIG);
+ writel_relaxed(1, gart->regs + GART_CONFIG);
FLUSH_GART_REGS(gart);
}
-#ifdef DEBUG
-static void gart_dump_table(struct gart_device *gart)
-{
- unsigned long iova;
- unsigned long flags;
-
- spin_lock_irqsave(&gart->pte_lock, flags);
- for_each_gart_pte(gart, iova) {
- unsigned long pte;
-
- pte = gart_read_pte(gart, iova);
-
- dev_dbg(gart->dev, "%s %08lx:%08lx\n",
- (GART_ENTRY_PHYS_ADDR_VALID & pte) ? "v" : " ",
- iova, pte & GART_PAGE_MASK);
- }
- spin_unlock_irqrestore(&gart->pte_lock, flags);
-}
-#else
-static inline void gart_dump_table(struct gart_device *gart)
+static inline bool gart_iova_range_invalid(struct gart_device *gart,
+ unsigned long iova, size_t bytes)
{
+ return unlikely(iova < gart->iovmm_base || bytes != GART_PAGE_SIZE ||
+ iova + bytes > gart->iovmm_end);
}
-#endif
-static inline bool gart_iova_range_valid(struct gart_device *gart,
- unsigned long iova, size_t bytes)
+static inline bool gart_pte_valid(struct gart_device *gart, unsigned long iova)
{
- unsigned long iova_start, iova_end, gart_start, gart_end;
-
- iova_start = iova;
- iova_end = iova_start + bytes - 1;
- gart_start = gart->iovmm_base;
- gart_end = gart_start + gart->page_count * GART_PAGE_SIZE - 1;
-
- if (iova_start < gart_start)
- return false;
- if (iova_end > gart_end)
- return false;
- return true;
+ return !!(gart_read_pte(gart, iova) & GART_ENTRY_PHYS_ADDR_VALID);
}
static int gart_iommu_attach_dev(struct iommu_domain *domain,
struct device *dev)
{
- struct gart_domain *gart_domain = to_gart_domain(domain);
- struct gart_device *gart = gart_domain->gart;
- struct gart_client *client, *c;
- int err = 0;
-
- client = devm_kzalloc(gart->dev, sizeof(*c), GFP_KERNEL);
- if (!client)
- return -ENOMEM;
- client->dev = dev;
-
- spin_lock(&gart->client_lock);
- list_for_each_entry(c, &gart->client, list) {
- if (c->dev == dev) {
- dev_err(gart->dev,
- "%s is already attached\n", dev_name(dev));
- err = -EINVAL;
- goto fail;
- }
+ struct gart_device *gart = gart_handle;
+ int ret = 0;
+
+ spin_lock(&gart->dom_lock);
+
+ if (gart->active_domain && gart->active_domain != domain) {
+ ret = -EBUSY;
+ } else if (dev->archdata.iommu != domain) {
+ dev->archdata.iommu = domain;
+ gart->active_domain = domain;
+ gart->active_devices++;
}
- list_add(&client->list, &gart->client);
- spin_unlock(&gart->client_lock);
- dev_dbg(gart->dev, "Attached %s\n", dev_name(dev));
- return 0;
-fail:
- devm_kfree(gart->dev, client);
- spin_unlock(&gart->client_lock);
- return err;
+ spin_unlock(&gart->dom_lock);
+
+ return ret;
}
static void gart_iommu_detach_dev(struct iommu_domain *domain,
struct device *dev)
{
- struct gart_domain *gart_domain = to_gart_domain(domain);
- struct gart_device *gart = gart_domain->gart;
- struct gart_client *c;
-
- spin_lock(&gart->client_lock);
-
- list_for_each_entry(c, &gart->client, list) {
- if (c->dev == dev) {
- list_del(&c->list);
- devm_kfree(gart->dev, c);
- dev_dbg(gart->dev, "Detached %s\n", dev_name(dev));
- goto out;
- }
+ struct gart_device *gart = gart_handle;
+
+ spin_lock(&gart->dom_lock);
+
+ if (dev->archdata.iommu == domain) {
+ dev->archdata.iommu = NULL;
+
+ if (--gart->active_devices == 0)
+ gart->active_domain = NULL;
}
- dev_err(gart->dev, "Couldn't find\n");
-out:
- spin_unlock(&gart->client_lock);
+
+ spin_unlock(&gart->dom_lock);
}
static struct iommu_domain *gart_iommu_domain_alloc(unsigned type)
{
- struct gart_domain *gart_domain;
- struct gart_device *gart;
+ struct iommu_domain *domain;
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
- gart = gart_handle;
- if (!gart)
- return NULL;
-
- gart_domain = kzalloc(sizeof(*gart_domain), GFP_KERNEL);
- if (!gart_domain)
- return NULL;
-
- gart_domain->gart = gart;
- gart_domain->domain.geometry.aperture_start = gart->iovmm_base;
- gart_domain->domain.geometry.aperture_end = gart->iovmm_base +
- gart->page_count * GART_PAGE_SIZE - 1;
- gart_domain->domain.geometry.force_aperture = true;
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (domain) {
+ domain->geometry.aperture_start = gart_handle->iovmm_base;
+ domain->geometry.aperture_end = gart_handle->iovmm_end - 1;
+ domain->geometry.force_aperture = true;
+ }
- return &gart_domain->domain;
+ return domain;
}
static void gart_iommu_domain_free(struct iommu_domain *domain)
{
- struct gart_domain *gart_domain = to_gart_domain(domain);
- struct gart_device *gart = gart_domain->gart;
-
- if (gart) {
- spin_lock(&gart->client_lock);
- if (!list_empty(&gart->client)) {
- struct gart_client *c;
-
- list_for_each_entry(c, &gart->client, list)
- gart_iommu_detach_dev(domain, c->dev);
- }
- spin_unlock(&gart->client_lock);
+ WARN_ON(gart_handle->active_domain == domain);
+ kfree(domain);
+}
+
+static inline int __gart_iommu_map(struct gart_device *gart, unsigned long iova,
+ unsigned long pa)
+{
+ if (unlikely(gart_debug && gart_pte_valid(gart, iova))) {
+ dev_err(gart->dev, "Page entry is in-use\n");
+ return -EINVAL;
}
- kfree(gart_domain);
+ gart_set_pte(gart, iova, GART_ENTRY_PHYS_ADDR_VALID | pa);
+
+ return 0;
}
static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t pa, size_t bytes, int prot)
{
- struct gart_domain *gart_domain = to_gart_domain(domain);
- struct gart_device *gart = gart_domain->gart;
- unsigned long flags;
- unsigned long pfn;
- unsigned long pte;
+ struct gart_device *gart = gart_handle;
+ int ret;
- if (!gart_iova_range_valid(gart, iova, bytes))
+ if (gart_iova_range_invalid(gart, iova, bytes))
return -EINVAL;
- spin_lock_irqsave(&gart->pte_lock, flags);
- pfn = __phys_to_pfn(pa);
- if (!pfn_valid(pfn)) {
- dev_err(gart->dev, "Invalid page: %pa\n", &pa);
- spin_unlock_irqrestore(&gart->pte_lock, flags);
+ spin_lock(&gart->pte_lock);
+ ret = __gart_iommu_map(gart, iova, (unsigned long)pa);
+ spin_unlock(&gart->pte_lock);
+
+ return ret;
+}
+
+static inline int __gart_iommu_unmap(struct gart_device *gart,
+ unsigned long iova)
+{
+ if (unlikely(gart_debug && !gart_pte_valid(gart, iova))) {
+ dev_err(gart->dev, "Page entry is invalid\n");
return -EINVAL;
}
- if (gart_debug) {
- pte = gart_read_pte(gart, iova);
- if (pte & GART_ENTRY_PHYS_ADDR_VALID) {
- spin_unlock_irqrestore(&gart->pte_lock, flags);
- dev_err(gart->dev, "Page entry is in-use\n");
- return -EBUSY;
- }
- }
- gart_set_pte(gart, iova, GART_PTE(pfn));
- FLUSH_GART_REGS(gart);
- spin_unlock_irqrestore(&gart->pte_lock, flags);
+
+ gart_set_pte(gart, iova, 0);
+
return 0;
}
static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t bytes)
{
- struct gart_domain *gart_domain = to_gart_domain(domain);
- struct gart_device *gart = gart_domain->gart;
- unsigned long flags;
+ struct gart_device *gart = gart_handle;
+ int err;
- if (!gart_iova_range_valid(gart, iova, bytes))
+ if (gart_iova_range_invalid(gart, iova, bytes))
return 0;
- spin_lock_irqsave(&gart->pte_lock, flags);
- gart_set_pte(gart, iova, 0);
- FLUSH_GART_REGS(gart);
- spin_unlock_irqrestore(&gart->pte_lock, flags);
- return bytes;
+ spin_lock(&gart->pte_lock);
+ err = __gart_iommu_unmap(gart, iova);
+ spin_unlock(&gart->pte_lock);
+
+ return err ? 0 : bytes;
}
static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
- struct gart_domain *gart_domain = to_gart_domain(domain);
- struct gart_device *gart = gart_domain->gart;
+ struct gart_device *gart = gart_handle;
unsigned long pte;
- phys_addr_t pa;
- unsigned long flags;
- if (!gart_iova_range_valid(gart, iova, 0))
+ if (gart_iova_range_invalid(gart, iova, GART_PAGE_SIZE))
return -EINVAL;
- spin_lock_irqsave(&gart->pte_lock, flags);
+ spin_lock(&gart->pte_lock);
pte = gart_read_pte(gart, iova);
- spin_unlock_irqrestore(&gart->pte_lock, flags);
+ spin_unlock(&gart->pte_lock);
- pa = (pte & GART_PAGE_MASK);
- if (!pfn_valid(__phys_to_pfn(pa))) {
- dev_err(gart->dev, "No entry for %08llx:%pa\n",
- (unsigned long long)iova, &pa);
- gart_dump_table(gart);
- return -EINVAL;
- }
- return pa;
+ return pte & GART_PAGE_MASK;
}
static bool gart_iommu_capable(enum iommu_cap cap)
@@ -352,8 +257,12 @@ static bool gart_iommu_capable(enum iommu_cap cap)
static int gart_iommu_add_device(struct device *dev)
{
- struct iommu_group *group = iommu_group_get_for_dev(dev);
+ struct iommu_group *group;
+
+ if (!dev->iommu_fwspec)
+ return -ENODEV;
+ group = iommu_group_get_for_dev(dev);
if (IS_ERR(group))
return PTR_ERR(group);
@@ -370,6 +279,17 @@ static void gart_iommu_remove_device(struct device *dev)
iommu_device_unlink(&gart_handle->iommu, dev);
}
+static int gart_iommu_of_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ return 0;
+}
+
+static void gart_iommu_sync(struct iommu_domain *domain)
+{
+ FLUSH_GART_REGS(gart_handle);
+}
+
static const struct iommu_ops gart_iommu_ops = {
.capable = gart_iommu_capable,
.domain_alloc = gart_iommu_domain_alloc,
@@ -383,129 +303,96 @@ static const struct iommu_ops gart_iommu_ops = {
.unmap = gart_iommu_unmap,
.iova_to_phys = gart_iommu_iova_to_phys,
.pgsize_bitmap = GART_IOMMU_PGSIZES,
+ .of_xlate = gart_iommu_of_xlate,
+ .iotlb_sync_map = gart_iommu_sync,
+ .iotlb_sync = gart_iommu_sync,
};
-static int tegra_gart_suspend(struct device *dev)
+int tegra_gart_suspend(struct gart_device *gart)
{
- struct gart_device *gart = dev_get_drvdata(dev);
- unsigned long iova;
u32 *data = gart->savedata;
- unsigned long flags;
+ unsigned long iova;
+
+ /*
+ * All GART users shall be suspended at this point. Disable
+ * address translation to trap all GART accesses as invalid
+ * memory accesses.
+ */
+ writel_relaxed(0, gart->regs + GART_CONFIG);
+ FLUSH_GART_REGS(gart);
- spin_lock_irqsave(&gart->pte_lock, flags);
for_each_gart_pte(gart, iova)
*(data++) = gart_read_pte(gart, iova);
- spin_unlock_irqrestore(&gart->pte_lock, flags);
+
return 0;
}
-static int tegra_gart_resume(struct device *dev)
+int tegra_gart_resume(struct gart_device *gart)
{
- struct gart_device *gart = dev_get_drvdata(dev);
- unsigned long flags;
-
- spin_lock_irqsave(&gart->pte_lock, flags);
do_gart_setup(gart, gart->savedata);
- spin_unlock_irqrestore(&gart->pte_lock, flags);
+
return 0;
}
-static int tegra_gart_probe(struct platform_device *pdev)
+struct gart_device *tegra_gart_probe(struct device *dev, struct tegra_mc *mc)
{
struct gart_device *gart;
- struct resource *res, *res_remap;
- void __iomem *gart_regs;
- struct device *dev = &pdev->dev;
- int ret;
-
- if (gart_handle)
- return -EIO;
+ struct resource *res;
+ int err;
BUILD_BUG_ON(PAGE_SHIFT != GART_PAGE_SHIFT);
/* the GART memory aperture is required */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- res_remap = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res || !res_remap) {
- dev_err(dev, "GART memory aperture expected\n");
- return -ENXIO;
- }
-
- gart = devm_kzalloc(dev, sizeof(*gart), GFP_KERNEL);
- if (!gart) {
- dev_err(dev, "failed to allocate gart_device\n");
- return -ENOMEM;
+ res = platform_get_resource(to_platform_device(dev), IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(dev, "Memory aperture resource unavailable\n");
+ return ERR_PTR(-ENXIO);
}
- gart_regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!gart_regs) {
- dev_err(dev, "failed to remap GART registers\n");
- return -ENXIO;
- }
-
- ret = iommu_device_sysfs_add(&gart->iommu, &pdev->dev, NULL,
- dev_name(&pdev->dev));
- if (ret) {
- dev_err(dev, "Failed to register IOMMU in sysfs\n");
- return ret;
- }
-
- iommu_device_set_ops(&gart->iommu, &gart_iommu_ops);
+ gart = kzalloc(sizeof(*gart), GFP_KERNEL);
+ if (!gart)
+ return ERR_PTR(-ENOMEM);
- ret = iommu_device_register(&gart->iommu);
- if (ret) {
- dev_err(dev, "Failed to register IOMMU\n");
- iommu_device_sysfs_remove(&gart->iommu);
- return ret;
- }
+ gart_handle = gart;
- gart->dev = &pdev->dev;
+ gart->dev = dev;
+ gart->regs = mc->regs + GART_REG_BASE;
+ gart->iovmm_base = res->start;
+ gart->iovmm_end = res->end + 1;
spin_lock_init(&gart->pte_lock);
- spin_lock_init(&gart->client_lock);
- INIT_LIST_HEAD(&gart->client);
- gart->regs = gart_regs;
- gart->iovmm_base = (dma_addr_t)res_remap->start;
- gart->page_count = (resource_size(res_remap) >> GART_PAGE_SHIFT);
-
- gart->savedata = vmalloc(array_size(sizeof(u32), gart->page_count));
- if (!gart->savedata) {
- dev_err(dev, "failed to allocate context save area\n");
- return -ENOMEM;
- }
+ spin_lock_init(&gart->dom_lock);
- platform_set_drvdata(pdev, gart);
do_gart_setup(gart, NULL);
- gart_handle = gart;
+ err = iommu_device_sysfs_add(&gart->iommu, dev, NULL, "gart");
+ if (err)
+ goto free_gart;
- return 0;
-}
+ iommu_device_set_ops(&gart->iommu, &gart_iommu_ops);
+ iommu_device_set_fwnode(&gart->iommu, dev->fwnode);
-static const struct dev_pm_ops tegra_gart_pm_ops = {
- .suspend = tegra_gart_suspend,
- .resume = tegra_gart_resume,
-};
+ err = iommu_device_register(&gart->iommu);
+ if (err)
+ goto remove_sysfs;
-static const struct of_device_id tegra_gart_of_match[] = {
- { .compatible = "nvidia,tegra20-gart", },
- { },
-};
+ gart->savedata = vmalloc(resource_size(res) / GART_PAGE_SIZE *
+ sizeof(u32));
+ if (!gart->savedata) {
+ err = -ENOMEM;
+ goto unregister_iommu;
+ }
-static struct platform_driver tegra_gart_driver = {
- .probe = tegra_gart_probe,
- .driver = {
- .name = "tegra-gart",
- .pm = &tegra_gart_pm_ops,
- .of_match_table = tegra_gart_of_match,
- .suppress_bind_attrs = true,
- },
-};
+ return gart;
-static int __init tegra_gart_init(void)
-{
- return platform_driver_register(&tegra_gart_driver);
+unregister_iommu:
+ iommu_device_unregister(&gart->iommu);
+remove_sysfs:
+ iommu_device_sysfs_remove(&gart->iommu);
+free_gart:
+ kfree(gart);
+
+ return ERR_PTR(err);
}
-subsys_initcall(tegra_gart_init);
module_param(gart_debug, bool, 0644);
MODULE_PARM_DESC(gart_debug, "Enable GART debugging");
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 3a5c7dc6dc57..5182c7d6171e 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -982,10 +982,6 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
u32 value;
int err;
- /* This can happen on Tegra20 which doesn't have an SMMU */
- if (!soc)
- return NULL;
-
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
if (!smmu)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/irqchip/irq-ath79-misc.c b/drivers/irqchip/irq-ath79-misc.c
index aa7290784636..0390603170b4 100644
--- a/drivers/irqchip/irq-ath79-misc.c
+++ b/drivers/irqchip/irq-ath79-misc.c
@@ -22,6 +22,15 @@
#define AR71XX_RESET_REG_MISC_INT_ENABLE 4
#define ATH79_MISC_IRQ_COUNT 32
+#define ATH79_MISC_PERF_IRQ 5
+
+static int ath79_perfcount_irq;
+
+int get_c0_perfcount_int(void)
+{
+ return ath79_perfcount_irq;
+}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
static void ath79_misc_irq_handler(struct irq_desc *desc)
{
@@ -113,6 +122,8 @@ static void __init ath79_misc_intc_domain_init(
{
void __iomem *base = domain->host_data;
+ ath79_perfcount_irq = irq_create_mapping(domain, ATH79_MISC_PERF_IRQ);
+
/* Disable and clear all interrupts */
__raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE);
__raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS);
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index 83364fedbf0a..5e4ca139e4ea 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -275,14 +275,14 @@ out_free:
return ret;
}
-int __init brcmstb_l2_edge_intc_of_init(struct device_node *np,
+static int __init brcmstb_l2_edge_intc_of_init(struct device_node *np,
struct device_node *parent)
{
return brcmstb_l2_intc_of_init(np, parent, &l2_edge_intc_init);
}
IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_edge_intc_of_init);
-int __init brcmstb_l2_lvl_intc_of_init(struct device_node *np,
+static int __init brcmstb_l2_lvl_intc_of_init(struct device_node *np,
struct device_node *parent)
{
return brcmstb_l2_intc_of_init(np, parent, &l2_lvl_intc_init);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 2dd1ff0cf558..7577755bdcf4 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1482,7 +1482,7 @@ static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b)
ra = container_of(a, struct lpi_range, entry);
rb = container_of(b, struct lpi_range, entry);
- return rb->base_id - ra->base_id;
+ return ra->base_id - rb->base_id;
}
static void merge_lpi_ranges(void)
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 0868a9d81c3c..15e55d327505 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -27,6 +27,7 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/percpu.h>
+#include <linux/refcount.h>
#include <linux/slab.h>
#include <linux/irqchip.h>
@@ -41,6 +42,8 @@
#include "irq-gic-common.h"
+#define GICD_INT_NMI_PRI (GICD_INT_DEF_PRI & ~0x80)
+
#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
struct redist_region {
@@ -66,6 +69,34 @@ struct gic_chip_data {
static struct gic_chip_data gic_data __read_mostly;
static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
+/*
+ * The behaviours of RPR and PMR registers differ depending on the value of
+ * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the
+ * distributor and redistributors depends on whether security is enabled in the
+ * GIC.
+ *
+ * When security is enabled, non-secure priority values from the (re)distributor
+ * are presented to the GIC CPUIF as follow:
+ * (GIC_(R)DIST_PRI[irq] >> 1) | 0x80;
+ *
+ * If SCR_EL3.FIQ == 1, the values writen to/read from PMR and RPR at non-secure
+ * EL1 are subject to a similar operation thus matching the priorities presented
+ * from the (re)distributor when security is enabled.
+ *
+ * see GICv3/GICv4 Architecture Specification (IHI0069D):
+ * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt
+ * priorities.
+ * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1
+ * interrupt.
+ *
+ * For now, we only support pseudo-NMIs if we have non-secure view of
+ * priorities.
+ */
+static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
+
+/* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */
+static refcount_t ppi_nmi_refs[16];
+
static struct gic_kvm_info gic_v3_kvm_info;
static DEFINE_PER_CPU(bool, has_rss);
@@ -232,6 +263,12 @@ static void gic_unmask_irq(struct irq_data *d)
gic_poke_irq(d, GICD_ISENABLER);
}
+static inline bool gic_supports_nmi(void)
+{
+ return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
+ static_branch_likely(&supports_pseudo_nmis);
+}
+
static int gic_irq_set_irqchip_state(struct irq_data *d,
enum irqchip_irq_state which, bool val)
{
@@ -287,6 +324,79 @@ static int gic_irq_get_irqchip_state(struct irq_data *d,
return 0;
}
+static void gic_irq_set_prio(struct irq_data *d, u8 prio)
+{
+ void __iomem *base = gic_dist_base(d);
+
+ writeb_relaxed(prio, base + GICD_IPRIORITYR + gic_irq(d));
+}
+
+static int gic_irq_nmi_setup(struct irq_data *d)
+{
+ struct irq_desc *desc = irq_to_desc(d->irq);
+
+ if (!gic_supports_nmi())
+ return -EINVAL;
+
+ if (gic_peek_irq(d, GICD_ISENABLER)) {
+ pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
+ return -EINVAL;
+ }
+
+ /*
+ * A secondary irq_chip should be in charge of LPI request,
+ * it should not be possible to get there
+ */
+ if (WARN_ON(gic_irq(d) >= 8192))
+ return -EINVAL;
+
+ /* desc lock should already be held */
+ if (gic_irq(d) < 32) {
+ /* Setting up PPI as NMI, only switch handler for first NMI */
+ if (!refcount_inc_not_zero(&ppi_nmi_refs[gic_irq(d) - 16])) {
+ refcount_set(&ppi_nmi_refs[gic_irq(d) - 16], 1);
+ desc->handle_irq = handle_percpu_devid_fasteoi_nmi;
+ }
+ } else {
+ desc->handle_irq = handle_fasteoi_nmi;
+ }
+
+ gic_irq_set_prio(d, GICD_INT_NMI_PRI);
+
+ return 0;
+}
+
+static void gic_irq_nmi_teardown(struct irq_data *d)
+{
+ struct irq_desc *desc = irq_to_desc(d->irq);
+
+ if (WARN_ON(!gic_supports_nmi()))
+ return;
+
+ if (gic_peek_irq(d, GICD_ISENABLER)) {
+ pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
+ return;
+ }
+
+ /*
+ * A secondary irq_chip should be in charge of LPI request,
+ * it should not be possible to get there
+ */
+ if (WARN_ON(gic_irq(d) >= 8192))
+ return;
+
+ /* desc lock should already be held */
+ if (gic_irq(d) < 32) {
+ /* Tearing down NMI, only switch handler for last NMI */
+ if (refcount_dec_and_test(&ppi_nmi_refs[gic_irq(d) - 16]))
+ desc->handle_irq = handle_percpu_devid_irq;
+ } else {
+ desc->handle_irq = handle_fasteoi_irq;
+ }
+
+ gic_irq_set_prio(d, GICD_INT_DEF_PRI);
+}
+
static void gic_eoi_irq(struct irq_data *d)
{
gic_write_eoir(gic_irq(d));
@@ -350,12 +460,50 @@ static u64 gic_mpidr_to_affinity(unsigned long mpidr)
return aff;
}
+static void gic_deactivate_unhandled(u32 irqnr)
+{
+ if (static_branch_likely(&supports_deactivate_key)) {
+ if (irqnr < 8192)
+ gic_write_dir(irqnr);
+ } else {
+ gic_write_eoir(irqnr);
+ }
+}
+
+static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
+{
+ int err;
+
+ if (static_branch_likely(&supports_deactivate_key))
+ gic_write_eoir(irqnr);
+ /*
+ * Leave the PSR.I bit set to prevent other NMIs to be
+ * received while handling this one.
+ * PSR.I will be restored when we ERET to the
+ * interrupted context.
+ */
+ err = handle_domain_nmi(gic_data.domain, irqnr, regs);
+ if (err)
+ gic_deactivate_unhandled(irqnr);
+}
+
static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
{
u32 irqnr;
irqnr = gic_read_iar();
+ if (gic_supports_nmi() &&
+ unlikely(gic_read_rpr() == GICD_INT_NMI_PRI)) {
+ gic_handle_nmi(irqnr, regs);
+ return;
+ }
+
+ if (gic_prio_masking_enabled()) {
+ gic_pmr_mask_irqs();
+ gic_arch_enable_irqs();
+ }
+
if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
int err;
@@ -367,12 +515,7 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
err = handle_domain_irq(gic_data.domain, irqnr, regs);
if (err) {
WARN_ONCE(true, "Unexpected interrupt received!\n");
- if (static_branch_likely(&supports_deactivate_key)) {
- if (irqnr < 8192)
- gic_write_dir(irqnr);
- } else {
- gic_write_eoir(irqnr);
- }
+ gic_deactivate_unhandled(irqnr);
}
return;
}
@@ -395,6 +538,44 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
}
}
+static u32 gic_get_pribits(void)
+{
+ u32 pribits;
+
+ pribits = gic_read_ctlr();
+ pribits &= ICC_CTLR_EL1_PRI_BITS_MASK;
+ pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT;
+ pribits++;
+
+ return pribits;
+}
+
+static bool gic_has_group0(void)
+{
+ u32 val;
+ u32 old_pmr;
+
+ old_pmr = gic_read_pmr();
+
+ /*
+ * Let's find out if Group0 is under control of EL3 or not by
+ * setting the highest possible, non-zero priority in PMR.
+ *
+ * If SCR_EL3.FIQ is set, the priority gets shifted down in
+ * order for the CPU interface to set bit 7, and keep the
+ * actual priority in the non-secure range. In the process, it
+ * looses the least significant bit and the actual priority
+ * becomes 0x80. Reading it back returns 0, indicating that
+ * we're don't have access to Group0.
+ */
+ gic_write_pmr(BIT(8 - gic_get_pribits()));
+ val = gic_read_pmr();
+
+ gic_write_pmr(old_pmr);
+
+ return val != 0;
+}
+
static void __init gic_dist_init(void)
{
unsigned int i;
@@ -530,13 +711,19 @@ static void gic_update_vlpi_properties(void)
!gic_data.rdists.has_direct_lpi ? "no " : "");
}
+/* Check whether it's single security state view */
+static inline bool gic_dist_security_disabled(void)
+{
+ return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
+}
+
static void gic_cpu_sys_reg_init(void)
{
int i, cpu = smp_processor_id();
u64 mpidr = cpu_logical_map(cpu);
u64 need_rss = MPIDR_RS(mpidr);
bool group0;
- u32 val, pribits;
+ u32 pribits;
/*
* Need to check that the SRE bit has actually been set. If
@@ -548,28 +735,22 @@ static void gic_cpu_sys_reg_init(void)
if (!gic_enable_sre())
pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
- pribits = gic_read_ctlr();
- pribits &= ICC_CTLR_EL1_PRI_BITS_MASK;
- pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT;
- pribits++;
+ pribits = gic_get_pribits();
- /*
- * Let's find out if Group0 is under control of EL3 or not by
- * setting the highest possible, non-zero priority in PMR.
- *
- * If SCR_EL3.FIQ is set, the priority gets shifted down in
- * order for the CPU interface to set bit 7, and keep the
- * actual priority in the non-secure range. In the process, it
- * looses the least significant bit and the actual priority
- * becomes 0x80. Reading it back returns 0, indicating that
- * we're don't have access to Group0.
- */
- write_gicreg(BIT(8 - pribits), ICC_PMR_EL1);
- val = read_gicreg(ICC_PMR_EL1);
- group0 = val != 0;
+ group0 = gic_has_group0();
/* Set priority mask register */
- write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
+ if (!gic_prio_masking_enabled()) {
+ write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
+ } else {
+ /*
+ * Mismatch configuration with boot CPU, the system is likely
+ * to die as interrupt masking will not work properly on all
+ * CPUs
+ */
+ WARN_ON(gic_supports_nmi() && group0 &&
+ !gic_dist_security_disabled());
+ }
/*
* Some firmwares hand over to the kernel with the BPR changed from
@@ -824,12 +1005,6 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
#endif
#ifdef CONFIG_CPU_PM
-/* Check whether it's single security state view */
-static bool gic_dist_security_disabled(void)
-{
- return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
-}
-
static int gic_cpu_pm_notifier(struct notifier_block *self,
unsigned long cmd, void *v)
{
@@ -866,6 +1041,8 @@ static struct irq_chip gic_chip = {
.irq_set_affinity = gic_set_affinity,
.irq_get_irqchip_state = gic_irq_get_irqchip_state,
.irq_set_irqchip_state = gic_irq_set_irqchip_state,
+ .irq_nmi_setup = gic_irq_nmi_setup,
+ .irq_nmi_teardown = gic_irq_nmi_teardown,
.flags = IRQCHIP_SET_TYPE_MASKED |
IRQCHIP_SKIP_SET_WAKE |
IRQCHIP_MASK_ON_SUSPEND,
@@ -881,6 +1058,8 @@ static struct irq_chip gic_eoimode1_chip = {
.irq_get_irqchip_state = gic_irq_get_irqchip_state,
.irq_set_irqchip_state = gic_irq_set_irqchip_state,
.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
+ .irq_nmi_setup = gic_irq_nmi_setup,
+ .irq_nmi_teardown = gic_irq_nmi_teardown,
.flags = IRQCHIP_SET_TYPE_MASKED |
IRQCHIP_SKIP_SET_WAKE |
IRQCHIP_MASK_ON_SUSPEND,
@@ -1082,6 +1261,21 @@ static bool gic_enable_quirk_msm8996(void *data)
return true;
}
+static void gic_enable_nmi_support(void)
+{
+ int i;
+
+ for (i = 0; i < 16; i++)
+ refcount_set(&ppi_nmi_refs[i], 0);
+
+ static_branch_enable(&supports_pseudo_nmis);
+
+ if (static_branch_likely(&supports_deactivate_key))
+ gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI;
+ else
+ gic_chip.flags |= IRQCHIP_SUPPORTS_NMI;
+}
+
static int __init gic_init_bases(void __iomem *dist_base,
struct redist_region *rdist_regs,
u32 nr_redist_regions,
@@ -1151,6 +1345,13 @@ static int __init gic_init_bases(void __iomem *dist_base,
its_cpu_init();
}
+ if (gic_prio_masking_enabled()) {
+ if (!gic_has_group0() || gic_dist_security_disabled())
+ gic_enable_nmi_support();
+ else
+ pr_warn("SCR_EL3.FIQ is cleared, cannot enable use of pseudo-NMIs\n");
+ }
+
return 0;
out_free:
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index ba2a37a27a54..fd3110c171ba 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1089,11 +1089,10 @@ static void gic_init_chip(struct gic_chip_data *gic, struct device *dev,
#endif
}
-static int gic_init_bases(struct gic_chip_data *gic, int irq_start,
+static int gic_init_bases(struct gic_chip_data *gic,
struct fwnode_handle *handle)
{
- irq_hw_number_t hwirq_base;
- int gic_irqs, irq_base, ret;
+ int gic_irqs, ret;
if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
/* Frankein-GIC without banked registers... */
@@ -1145,28 +1144,21 @@ static int gic_init_bases(struct gic_chip_data *gic, int irq_start,
} else { /* Legacy support */
/*
* For primary GICs, skip over SGIs.
- * For secondary GICs, skip over PPIs, too.
+ * No secondary GIC support whatsoever.
*/
- if (gic == &gic_data[0] && (irq_start & 31) > 0) {
- hwirq_base = 16;
- if (irq_start != -1)
- irq_start = (irq_start & ~31) + 16;
- } else {
- hwirq_base = 32;
- }
+ int irq_base;
- gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
+ gic_irqs -= 16; /* calculate # of irqs to allocate */
- irq_base = irq_alloc_descs(irq_start, 16, gic_irqs,
+ irq_base = irq_alloc_descs(16, 16, gic_irqs,
numa_node_id());
if (irq_base < 0) {
- WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
- irq_start);
- irq_base = irq_start;
+ WARN(1, "Cannot allocate irq_descs @ IRQ16, assuming pre-allocated\n");
+ irq_base = 16;
}
gic->domain = irq_domain_add_legacy(NULL, gic_irqs, irq_base,
- hwirq_base, &gic_irq_domain_ops, gic);
+ 16, &gic_irq_domain_ops, gic);
}
if (WARN_ON(!gic->domain)) {
@@ -1195,7 +1187,6 @@ error:
}
static int __init __gic_init_bases(struct gic_chip_data *gic,
- int irq_start,
struct fwnode_handle *handle)
{
char *name;
@@ -1231,32 +1222,28 @@ static int __init __gic_init_bases(struct gic_chip_data *gic,
gic_init_chip(gic, NULL, name, false);
}
- ret = gic_init_bases(gic, irq_start, handle);
+ ret = gic_init_bases(gic, handle);
if (ret)
kfree(name);
return ret;
}
-void __init gic_init(unsigned int gic_nr, int irq_start,
- void __iomem *dist_base, void __iomem *cpu_base)
+void __init gic_init(void __iomem *dist_base, void __iomem *cpu_base)
{
struct gic_chip_data *gic;
- if (WARN_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR))
- return;
-
/*
* Non-DT/ACPI systems won't run a hypervisor, so let's not
* bother with these...
*/
static_branch_disable(&supports_deactivate_key);
- gic = &gic_data[gic_nr];
+ gic = &gic_data[0];
gic->raw_dist_base = dist_base;
gic->raw_cpu_base = cpu_base;
- __gic_init_bases(gic, irq_start, NULL);
+ __gic_init_bases(gic, NULL);
}
static void gic_teardown(struct gic_chip_data *gic)
@@ -1399,7 +1386,7 @@ int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
if (ret)
return ret;
- ret = gic_init_bases(*gic, -1, &dev->of_node->fwnode);
+ ret = gic_init_bases(*gic, &dev->of_node->fwnode);
if (ret) {
gic_teardown(*gic);
return ret;
@@ -1459,7 +1446,7 @@ gic_of_init(struct device_node *node, struct device_node *parent)
if (gic_cnt == 0 && !gic_check_eoimode(node, &gic->raw_cpu_base))
static_branch_disable(&supports_deactivate_key);
- ret = __gic_init_bases(gic, -1, &node->fwnode);
+ ret = __gic_init_bases(gic, &node->fwnode);
if (ret) {
gic_teardown(gic);
return ret;
@@ -1650,7 +1637,7 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
return -ENOMEM;
}
- ret = __gic_init_bases(gic, -1, domain_handle);
+ ret = __gic_init_bases(gic, domain_handle);
if (ret) {
pr_err("Failed to initialise GIC\n");
irq_domain_free_fwnode(domain_handle);
diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c
index d1098f4da6a4..88df3d00052c 100644
--- a/drivers/irqchip/irq-imx-irqsteer.c
+++ b/drivers/irqchip/irq-imx-irqsteer.c
@@ -169,8 +169,12 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
raw_spin_lock_init(&data->lock);
- of_property_read_u32(np, "fsl,num-irqs", &irqs_num);
- of_property_read_u32(np, "fsl,channel", &data->channel);
+ ret = of_property_read_u32(np, "fsl,num-irqs", &irqs_num);
+ if (ret)
+ return ret;
+ ret = of_property_read_u32(np, "fsl,channel", &data->channel);
+ if (ret)
+ return ret;
/*
* There is one output irq for each group of 64 inputs.
diff --git a/drivers/irqchip/irq-ls1x.c b/drivers/irqchip/irq-ls1x.c
index 86b72fbd3b45..353111a10413 100644
--- a/drivers/irqchip/irq-ls1x.c
+++ b/drivers/irqchip/irq-ls1x.c
@@ -130,6 +130,7 @@ static int __init ls1x_intc_of_init(struct device_node *node,
NULL);
if (!priv->domain) {
pr_err("ls1x-irq: cannot add IRQ domain\n");
+ err = -ENOMEM;
goto out_iounmap;
}
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index 567b29c47608..98b6e1d4b1a6 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -161,6 +161,9 @@ static void mbigen_write_msg(struct msi_desc *desc, struct msi_msg *msg)
void __iomem *base = d->chip_data;
u32 val;
+ if (!msg->address_lo && !msg->address_hi)
+ return;
+
base += get_mbigen_vec_reg(d->hwirq);
val = readl_relaxed(base);
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
index 3496b61a312a..8eed478f3b7e 100644
--- a/drivers/irqchip/irq-mmp.c
+++ b/drivers/irqchip/irq-mmp.c
@@ -179,7 +179,7 @@ static int mmp_irq_domain_xlate(struct irq_domain *d, struct device_node *node,
return 0;
}
-const struct irq_domain_ops mmp_irq_domain_ops = {
+static const struct irq_domain_ops mmp_irq_domain_ops = {
.map = mmp_irq_domain_map,
.xlate = mmp_irq_domain_xlate,
};
diff --git a/drivers/irqchip/irq-mvebu-sei.c b/drivers/irqchip/irq-mvebu-sei.c
index add4c9c934c8..18832ccc8ff8 100644
--- a/drivers/irqchip/irq-mvebu-sei.c
+++ b/drivers/irqchip/irq-mvebu-sei.c
@@ -478,7 +478,7 @@ dispose_irq:
return ret;
}
-struct mvebu_sei_caps mvebu_sei_ap806_caps = {
+static struct mvebu_sei_caps mvebu_sei_ap806_caps = {
.ap_range = {
.first = 0,
.size = 21,
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index a93296b9b45d..7bd1d4cb2e19 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -716,7 +716,6 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
const struct stm32_exti_bank *stm32_bank;
struct stm32_exti_chip_data *chip_data;
void __iomem *base = h_data->base;
- u32 irqs_mask;
stm32_bank = h_data->drv_data->exti_banks[bank_idx];
chip_data = &h_data->chips_data[bank_idx];
@@ -725,21 +724,12 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
raw_spin_lock_init(&chip_data->rlock);
- /* Determine number of irqs supported */
- writel_relaxed(~0UL, base + stm32_bank->rtsr_ofst);
- irqs_mask = readl_relaxed(base + stm32_bank->rtsr_ofst);
-
/*
* This IP has no reset, so after hot reboot we should
* clear registers to avoid residue
*/
writel_relaxed(0, base + stm32_bank->imr_ofst);
writel_relaxed(0, base + stm32_bank->emr_ofst);
- writel_relaxed(0, base + stm32_bank->rtsr_ofst);
- writel_relaxed(0, base + stm32_bank->ftsr_ofst);
- writel_relaxed(~0UL, base + stm32_bank->rpr_ofst);
- if (stm32_bank->fpr_ofst != UNDEF_REG)
- writel_relaxed(~0UL, base + stm32_bank->fpr_ofst);
pr_info("%pOF: bank%d\n", h_data->node, bank_idx);
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index 4d85645c87f7..0928fd1f0e0c 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -4365,7 +4365,8 @@ setup_pci(struct hfc_multi *hc, struct pci_dev *pdev,
if (m->clock2)
test_and_set_bit(HFC_CHIP_CLOCK2, &hc->chip);
- if (ent->device == 0xB410) {
+ if (ent->vendor == PCI_VENDOR_ID_DIGIUM &&
+ ent->device == PCI_DEVICE_ID_DIGIUM_HFC4S) {
test_and_set_bit(HFC_CHIP_B410P, &hc->chip);
test_and_set_bit(HFC_CHIP_PCM_MASTER, &hc->chip);
test_and_clear_bit(HFC_CHIP_PCM_SLAVE, &hc->chip);
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index ebb3fa2e1d00..362aa5450a5e 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -2032,10 +2032,19 @@ setup_hw(struct hfc_pci *hc)
hc->hw.fifos = buffer;
pci_write_config_dword(hc->pdev, 0x80, hc->hw.dmahandle);
hc->hw.pci_io = ioremap((ulong) hc->hw.pci_io, 256);
+ if (unlikely(!hc->hw.pci_io)) {
+ printk(KERN_WARNING
+ "HFC-PCI: Error in ioremap for PCI!\n");
+ pci_free_consistent(hc->pdev, 0x8000, hc->hw.fifos,
+ hc->hw.dmahandle);
+ return 1;
+ }
+
printk(KERN_INFO
"HFC-PCI: defined at mem %#lx fifo %#lx(%#lx) IRQ %d HZ %d\n",
(u_long) hc->hw.pci_io, (u_long) hc->hw.fifos,
(u_long) hc->hw.dmahandle, hc->irq, HZ);
+
/* enable memory mapped ports, disable busmaster */
pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
hc->hw.int_m2 = 0;
diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
index 3e01012be4ab..0fe6ddcb3fdc 100644
--- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
@@ -712,8 +712,11 @@ setup_io(struct inf_hw *hw)
(ulong)hw->addr.start, (ulong)hw->addr.size);
return err;
}
- if (hw->ci->addr_mode == AM_MEMIO)
+ if (hw->ci->addr_mode == AM_MEMIO) {
hw->addr.p = ioremap(hw->addr.start, hw->addr.size);
+ if (unlikely(!hw->addr.p))
+ return -ENOMEM;
+ }
hw->addr.mode = hw->ci->addr_mode;
if (debug & DEBUG_HW)
pr_notice("%s: IO addr %lx (%lu bytes) mode%d\n",
diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
index f4253d468ae1..755c6bbc9553 100644
--- a/drivers/isdn/isdnloop/isdnloop.c
+++ b/drivers/isdn/isdnloop/isdnloop.c
@@ -570,7 +570,7 @@ isdnloop_atimeout(isdnloop_card *card, int ch)
char buf[60];
spin_lock_irqsave(&card->isdnloop_lock, flags);
- if (card->rcard) {
+ if (card->rcard[ch]) {
isdnloop_fake(card->rcard[ch], "DDIS_I", card->rch[ch] + 1);
card->rcard[ch]->rcard[card->rch[ch]] = NULL;
card->rcard[ch] = NULL;
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 4ab8b1b6608f..a14e35d40538 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -710,10 +710,10 @@ base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
struct sock *sk = sock->sk;
int err = 0;
- if (!maddr || maddr->family != AF_ISDN)
+ if (addr_len < sizeof(struct sockaddr_mISDN))
return -EINVAL;
- if (addr_len < sizeof(struct sockaddr_mISDN))
+ if (!maddr || maddr->family != AF_ISDN)
return -EINVAL;
lock_sock(sk);
diff --git a/drivers/leds/leds-mlxreg.c b/drivers/leds/leds-mlxreg.c
index 1ee48cb21df9..cabe379071a7 100644
--- a/drivers/leds/leds-mlxreg.c
+++ b/drivers/leds/leds-mlxreg.c
@@ -22,6 +22,7 @@
#define MLXREG_LED_AMBER_SOLID 0x09 /* Solid amber */
#define MLXREG_LED_BLINK_3HZ 167 /* ~167 msec off/on - HW support */
#define MLXREG_LED_BLINK_6HZ 83 /* ~83 msec off/on - HW support */
+#define MLXREG_LED_CAPABILITY_CLEAR GENMASK(31, 8) /* Clear mask */
/**
* struct mlxreg_led_data - led control data:
@@ -187,6 +188,7 @@ static int mlxreg_led_config(struct mlxreg_led_priv_data *priv)
struct mlxreg_led_data *led_data;
struct led_classdev *led_cdev;
enum led_brightness brightness;
+ u32 regval;
int i;
int err;
@@ -196,6 +198,23 @@ static int mlxreg_led_config(struct mlxreg_led_priv_data *priv)
if (!led_data)
return -ENOMEM;
+ if (data->capability) {
+ err = regmap_read(led_pdata->regmap, data->capability,
+ &regval);
+ if (err) {
+ dev_err(&priv->pdev->dev, "Failed to query capability register\n");
+ return err;
+ }
+ if (!(regval & data->bit))
+ continue;
+ /*
+ * Field "bit" can contain one capability bit in 0 byte
+ * and offset bit in 1-3 bytes. Clear capability bit and
+ * keep only offset bit.
+ */
+ data->bit &= MLXREG_LED_CAPABILITY_CLEAR;
+ }
+
led_cdev = &led_data->led_cdev;
led_data->data_parent = priv;
if (strstr(data->label, "red") ||
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 7fea18b0c15d..7cb4d685a1f1 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -513,6 +513,7 @@ static int pca9532_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int devid;
+ const struct of_device_id *of_id;
struct pca9532_data *data = i2c_get_clientdata(client);
struct pca9532_platform_data *pca9532_pdata =
dev_get_platdata(&client->dev);
@@ -528,8 +529,11 @@ static int pca9532_probe(struct i2c_client *client,
dev_err(&client->dev, "no platform data\n");
return -EINVAL;
}
- devid = (int)(uintptr_t)of_match_device(
- of_pca9532_leds_match, &client->dev)->data;
+ of_id = of_match_device(of_pca9532_leds_match,
+ &client->dev);
+ if (unlikely(!of_id))
+ return -EINVAL;
+ devid = (int)(uintptr_t) of_id->data;
} else {
devid = id->driver_data;
}
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
index 3dd3ed46d473..136f86a1627d 100644
--- a/drivers/leds/trigger/ledtrig-netdev.c
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -122,7 +122,8 @@ static ssize_t device_name_store(struct device *dev,
trigger_data->net_dev = NULL;
}
- strncpy(trigger_data->device_name, buf, size);
+ memcpy(trigger_data->device_name, buf, size);
+ trigger_data->device_name[size] = 0;
if (size > 0 && trigger_data->device_name[size - 1] == '\n')
trigger_data->device_name[size - 1] = 0;
@@ -301,11 +302,11 @@ static int netdev_trig_notify(struct notifier_block *nb,
container_of(nb, struct led_netdev_data, notifier);
if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE
- && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER
- && evt != NETDEV_CHANGENAME)
+ && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER)
return NOTIFY_DONE;
- if (strcmp(dev->name, trigger_data->device_name))
+ if (!(dev == trigger_data->net_dev ||
+ (evt == NETDEV_REGISTER && !strcmp(dev->name, trigger_data->device_name))))
return NOTIFY_DONE;
cancel_delayed_work_sync(&trigger_data->work);
@@ -320,12 +321,9 @@ static int netdev_trig_notify(struct notifier_block *nb,
dev_hold(dev);
trigger_data->net_dev = dev;
break;
- case NETDEV_CHANGENAME:
case NETDEV_UNREGISTER:
- if (trigger_data->net_dev) {
- dev_put(trigger_data->net_dev);
- trigger_data->net_dev = NULL;
- }
+ dev_put(trigger_data->net_dev);
+ trigger_data->net_dev = NULL;
break;
case NETDEV_UP:
case NETDEV_CHANGE:
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index 3789185144da..0b7d5fb4548d 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -231,14 +231,14 @@ static void pblk_end_partial_read(struct nvm_rq *rqd)
struct pblk_sec_meta *meta;
struct bio *new_bio = rqd->bio;
struct bio *bio = pr_ctx->orig_bio;
- struct bio_vec src_bv, dst_bv;
void *meta_list = rqd->meta_list;
- int bio_init_idx = pr_ctx->bio_init_idx;
unsigned long *read_bitmap = pr_ctx->bitmap;
+ struct bvec_iter orig_iter = BVEC_ITER_ALL_INIT;
+ struct bvec_iter new_iter = BVEC_ITER_ALL_INIT;
int nr_secs = pr_ctx->orig_nr_secs;
int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
void *src_p, *dst_p;
- int hole, i;
+ int bit, i;
if (unlikely(nr_holes == 1)) {
struct ppa_addr ppa;
@@ -257,33 +257,39 @@ static void pblk_end_partial_read(struct nvm_rq *rqd)
/* Fill the holes in the original bio */
i = 0;
- hole = find_first_zero_bit(read_bitmap, nr_secs);
- do {
- struct pblk_line *line;
+ for (bit = 0; bit < nr_secs; bit++) {
+ if (!test_bit(bit, read_bitmap)) {
+ struct bio_vec dst_bv, src_bv;
+ struct pblk_line *line;
- line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]);
- kref_put(&line->ref, pblk_line_put);
+ line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]);
+ kref_put(&line->ref, pblk_line_put);
- meta = pblk_get_meta(pblk, meta_list, hole);
- meta->lba = cpu_to_le64(pr_ctx->lba_list_media[i]);
+ meta = pblk_get_meta(pblk, meta_list, bit);
+ meta->lba = cpu_to_le64(pr_ctx->lba_list_media[i]);
- src_bv = new_bio->bi_io_vec[i++];
- dst_bv = bio->bi_io_vec[bio_init_idx + hole];
+ dst_bv = bio_iter_iovec(bio, orig_iter);
+ src_bv = bio_iter_iovec(new_bio, new_iter);
- src_p = kmap_atomic(src_bv.bv_page);
- dst_p = kmap_atomic(dst_bv.bv_page);
+ src_p = kmap_atomic(src_bv.bv_page);
+ dst_p = kmap_atomic(dst_bv.bv_page);
- memcpy(dst_p + dst_bv.bv_offset,
- src_p + src_bv.bv_offset,
- PBLK_EXPOSED_PAGE_SIZE);
+ memcpy(dst_p + dst_bv.bv_offset,
+ src_p + src_bv.bv_offset,
+ PBLK_EXPOSED_PAGE_SIZE);
- kunmap_atomic(src_p);
- kunmap_atomic(dst_p);
+ kunmap_atomic(src_p);
+ kunmap_atomic(dst_p);
- mempool_free(src_bv.bv_page, &pblk->page_bio_pool);
+ flush_dcache_page(dst_bv.bv_page);
+ mempool_free(src_bv.bv_page, &pblk->page_bio_pool);
- hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
- } while (hole < nr_secs);
+ bio_advance_iter(new_bio, &new_iter,
+ PBLK_EXPOSED_PAGE_SIZE);
+ i++;
+ }
+ bio_advance_iter(bio, &orig_iter, PBLK_EXPOSED_PAGE_SIZE);
+ }
bio_put(new_bio);
kfree(pr_ctx);
diff --git a/drivers/lightnvm/pblk-rl.c b/drivers/lightnvm/pblk-rl.c
index b014957dde0b..a5f8bc2defbc 100644
--- a/drivers/lightnvm/pblk-rl.c
+++ b/drivers/lightnvm/pblk-rl.c
@@ -233,10 +233,15 @@ void pblk_rl_init(struct pblk_rl *rl, int budget, int threshold)
/* To start with, all buffer is available to user I/O writers */
rl->rb_budget = budget;
rl->rb_user_max = budget;
- rl->rb_max_io = threshold ? (budget - threshold) : (budget - 1);
rl->rb_gc_max = 0;
rl->rb_state = PBLK_RL_HIGH;
+ /* Maximize I/O size and ansure that back threshold is respected */
+ if (threshold)
+ rl->rb_max_io = budget - pblk->min_write_pgs_data - threshold;
+ else
+ rl->rb_max_io = budget - pblk->min_write_pgs_data - 1;
+
atomic_set(&rl->rb_user_cnt, 0);
atomic_set(&rl->rb_gc_cnt, 0);
atomic_set(&rl->rb_space, -1);
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 0a0b8e1f4236..6a844125cf2d 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -485,7 +485,7 @@ int __init smu_init (void)
* SMU based G5s need some memory below 2Gb. Thankfully this is
* called at a time where memblock is still available.
*/
- smu_cmdbuf_abs = memblock_alloc_base(4096, 4096, 0x80000000UL);
+ smu_cmdbuf_abs = memblock_phys_alloc_range(4096, 4096, 0, 0x80000000UL);
if (smu_cmdbuf_abs == 0) {
printk(KERN_ERR "SMU: Command buffer allocation failed !\n");
ret = -EINVAL;
@@ -493,6 +493,9 @@ int __init smu_init (void)
}
smu = memblock_alloc(sizeof(struct smu_device), SMP_CACHE_BYTES);
+ if (!smu)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(struct smu_device));
spin_lock_init(&smu->lock);
INIT_LIST_HEAD(&smu->cmd_list);
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 3eeb12e93e98..d86e7a4ac04d 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -205,4 +205,15 @@ config MTK_CMDQ_MBOX
mailbox driver. The CMDQ is used to help read/write registers with
critical time limitation, such as updating display configuration
during the vblank.
+
+config ZYNQMP_IPI_MBOX
+ bool "Xilinx ZynqMP IPI Mailbox"
+ depends on ARCH_ZYNQMP && OF
+ help
+ Say yes here to add support for Xilinx IPI mailbox driver.
+ This mailbox driver is used to send notification or short message
+ between processors with Xilinx ZynqMP IPI. It will place the
+ message to the IPI buffer and will access the IPI control
+ registers to kick the other processor or enquire status.
+
endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index c818b5d011ae..8be3bcbcf882 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -44,3 +44,5 @@ obj-$(CONFIG_TEGRA_HSP_MBOX) += tegra-hsp.o
obj-$(CONFIG_STM32_IPCC) += stm32-ipcc.o
obj-$(CONFIG_MTK_CMDQ_MBOX) += mtk-cmdq-mailbox.o
+
+obj-$(CONFIG_ZYNQMP_IPI_MBOX) += zynqmp-ipi-mailbox.o
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 774362a05159..85fc5b56f99b 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -187,8 +187,8 @@ static int imx_mu_startup(struct mbox_chan *chan)
return 0;
}
- ret = request_irq(priv->irq, imx_mu_isr, IRQF_SHARED, cp->irq_desc,
- chan);
+ ret = request_irq(priv->irq, imx_mu_isr, IRQF_SHARED |
+ IRQF_NO_SUSPEND, cp->irq_desc, chan);
if (ret) {
dev_err(priv->dev,
"Unable to acquire IRQ %d\n", priv->irq);
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
index 58bfafc34bc4..4e4ac4be6423 100644
--- a/drivers/mailbox/mailbox-test.c
+++ b/drivers/mailbox/mailbox-test.c
@@ -31,7 +31,6 @@
(MBOX_MAX_MSG_LEN / MBOX_BYTES_PER_LINE))
static bool mbox_data_ready;
-static struct dentry *root_debugfs_dir;
struct mbox_test_device {
struct device *dev;
@@ -45,6 +44,7 @@ struct mbox_test_device {
spinlock_t lock;
wait_queue_head_t waitq;
struct fasync_struct *async_queue;
+ struct dentry *root_debugfs_dir;
};
static ssize_t mbox_test_signal_write(struct file *filp,
@@ -262,16 +262,16 @@ static int mbox_test_add_debugfs(struct platform_device *pdev,
if (!debugfs_initialized())
return 0;
- root_debugfs_dir = debugfs_create_dir("mailbox", NULL);
- if (!root_debugfs_dir) {
+ tdev->root_debugfs_dir = debugfs_create_dir(dev_name(&pdev->dev), NULL);
+ if (!tdev->root_debugfs_dir) {
dev_err(&pdev->dev, "Failed to create Mailbox debugfs\n");
return -EINVAL;
}
- debugfs_create_file("message", 0600, root_debugfs_dir,
+ debugfs_create_file("message", 0600, tdev->root_debugfs_dir,
tdev, &mbox_test_message_ops);
- debugfs_create_file("signal", 0200, root_debugfs_dir,
+ debugfs_create_file("signal", 0200, tdev->root_debugfs_dir,
tdev, &mbox_test_signal_ops);
return 0;
@@ -363,22 +363,24 @@ static int mbox_test_probe(struct platform_device *pdev)
/* It's okay for MMIO to be NULL */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- size = resource_size(res);
tdev->tx_mmio = devm_ioremap_resource(&pdev->dev, res);
- if (PTR_ERR(tdev->tx_mmio) == -EBUSY)
+ if (PTR_ERR(tdev->tx_mmio) == -EBUSY) {
/* if reserved area in SRAM, try just ioremap */
+ size = resource_size(res);
tdev->tx_mmio = devm_ioremap(&pdev->dev, res->start, size);
- else if (IS_ERR(tdev->tx_mmio))
+ } else if (IS_ERR(tdev->tx_mmio)) {
tdev->tx_mmio = NULL;
+ }
/* If specified, second reg entry is Rx MMIO */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- size = resource_size(res);
tdev->rx_mmio = devm_ioremap_resource(&pdev->dev, res);
- if (PTR_ERR(tdev->rx_mmio) == -EBUSY)
+ if (PTR_ERR(tdev->rx_mmio) == -EBUSY) {
+ size = resource_size(res);
tdev->rx_mmio = devm_ioremap(&pdev->dev, res->start, size);
- else if (IS_ERR(tdev->rx_mmio))
+ } else if (IS_ERR(tdev->rx_mmio)) {
tdev->rx_mmio = tdev->tx_mmio;
+ }
tdev->tx_channel = mbox_test_request_channel(pdev, "tx");
tdev->rx_channel = mbox_test_request_channel(pdev, "rx");
@@ -416,7 +418,7 @@ static int mbox_test_remove(struct platform_device *pdev)
{
struct mbox_test_device *tdev = platform_get_drvdata(pdev);
- debugfs_remove_recursive(root_debugfs_dir);
+ debugfs_remove_recursive(tdev->root_debugfs_dir);
if (tdev->tx_channel)
mbox_free_channel(tdev->tx_channel);
diff --git a/drivers/mailbox/stm32-ipcc.c b/drivers/mailbox/stm32-ipcc.c
index a338bd4cd7db..210fe504f5ae 100644
--- a/drivers/mailbox/stm32-ipcc.c
+++ b/drivers/mailbox/stm32-ipcc.c
@@ -270,14 +270,12 @@ static int stm32_ipcc_probe(struct platform_device *pdev)
goto err_clk;
}
- device_init_wakeup(dev, true);
+ device_set_wakeup_capable(dev, true);
ret = dev_pm_set_dedicated_wake_irq(dev, ipcc->wkp);
if (ret) {
dev_err(dev, "Failed to set wake up irq\n");
goto err_init_wkp;
}
- } else {
- device_init_wakeup(dev, false);
}
/* mailbox controller */
diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c
index e443f6a2ec4b..11fc9fd6a94a 100644
--- a/drivers/mailbox/tegra-hsp.c
+++ b/drivers/mailbox/tegra-hsp.c
@@ -779,7 +779,7 @@ static int tegra_hsp_probe(struct platform_device *pdev)
return 0;
}
-static int tegra_hsp_resume(struct device *dev)
+static int __maybe_unused tegra_hsp_resume(struct device *dev)
{
struct tegra_hsp *hsp = dev_get_drvdata(dev);
unsigned int i;
diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
new file mode 100644
index 000000000000..86887c9a349a
--- /dev/null
+++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
@@ -0,0 +1,725 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Inter Processor Interrupt(IPI) Mailbox Driver
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox/zynqmp-ipi-message.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+
+/* IPI agent ID any */
+#define IPI_ID_ANY 0xFFUL
+
+/* indicate if ZynqMP IPI mailbox driver uses SMC calls or HVC calls */
+#define USE_SMC 0
+#define USE_HVC 1
+
+/* Default IPI SMC function IDs */
+#define SMC_IPI_MAILBOX_OPEN 0x82001000U
+#define SMC_IPI_MAILBOX_RELEASE 0x82001001U
+#define SMC_IPI_MAILBOX_STATUS_ENQUIRY 0x82001002U
+#define SMC_IPI_MAILBOX_NOTIFY 0x82001003U
+#define SMC_IPI_MAILBOX_ACK 0x82001004U
+#define SMC_IPI_MAILBOX_ENABLE_IRQ 0x82001005U
+#define SMC_IPI_MAILBOX_DISABLE_IRQ 0x82001006U
+
+/* IPI SMC Macros */
+#define IPI_SMC_ENQUIRY_DIRQ_MASK 0x00000001UL /* Flag to indicate if
+ * notification interrupt
+ * to be disabled.
+ */
+#define IPI_SMC_ACK_EIRQ_MASK 0x00000001UL /* Flag to indicate if
+ * notification interrupt
+ * to be enabled.
+ */
+
+/* IPI mailbox status */
+#define IPI_MB_STATUS_IDLE 0
+#define IPI_MB_STATUS_SEND_PENDING 1
+#define IPI_MB_STATUS_RECV_PENDING 2
+
+#define IPI_MB_CHNL_TX 0 /* IPI mailbox TX channel */
+#define IPI_MB_CHNL_RX 1 /* IPI mailbox RX channel */
+
+/**
+ * struct zynqmp_ipi_mchan - Description of a Xilinx ZynqMP IPI mailbox channel
+ * @is_opened: indicate if the IPI channel is opened
+ * @req_buf: local to remote request buffer start address
+ * @resp_buf: local to remote response buffer start address
+ * @req_buf_size: request buffer size
+ * @resp_buf_size: response buffer size
+ * @rx_buf: receive buffer to pass received message to client
+ * @chan_type: channel type
+ */
+struct zynqmp_ipi_mchan {
+ int is_opened;
+ void __iomem *req_buf;
+ void __iomem *resp_buf;
+ void *rx_buf;
+ size_t req_buf_size;
+ size_t resp_buf_size;
+ unsigned int chan_type;
+};
+
+/**
+ * struct zynqmp_ipi_mbox - Description of a ZynqMP IPI mailbox
+ * platform data.
+ * @pdata: pointer to the IPI private data
+ * @dev: device pointer corresponding to the Xilinx ZynqMP
+ * IPI mailbox
+ * @remote_id: remote IPI agent ID
+ * @mbox: mailbox Controller
+ * @mchans: array for channels, tx channel and rx channel.
+ * @irq: IPI agent interrupt ID
+ */
+struct zynqmp_ipi_mbox {
+ struct zynqmp_ipi_pdata *pdata;
+ struct device dev;
+ u32 remote_id;
+ struct mbox_controller mbox;
+ struct zynqmp_ipi_mchan mchans[2];
+};
+
+/**
+ * struct zynqmp_ipi_pdata - Description of z ZynqMP IPI agent platform data.
+ *
+ * @dev: device pointer corresponding to the Xilinx ZynqMP
+ * IPI agent
+ * @irq: IPI agent interrupt ID
+ * @method: IPI SMC or HVC is going to be used
+ * @local_id: local IPI agent ID
+ * @num_mboxes: number of mailboxes of this IPI agent
+ * @ipi_mboxes: IPI mailboxes of this IPI agent
+ */
+struct zynqmp_ipi_pdata {
+ struct device *dev;
+ int irq;
+ unsigned int method;
+ u32 local_id;
+ int num_mboxes;
+ struct zynqmp_ipi_mbox *ipi_mboxes;
+};
+
+static struct device_driver zynqmp_ipi_mbox_driver = {
+ .owner = THIS_MODULE,
+ .name = "zynqmp-ipi-mbox",
+};
+
+static void zynqmp_ipi_fw_call(struct zynqmp_ipi_mbox *ipi_mbox,
+ unsigned long a0, unsigned long a3,
+ struct arm_smccc_res *res)
+{
+ struct zynqmp_ipi_pdata *pdata = ipi_mbox->pdata;
+ unsigned long a1, a2;
+
+ a1 = pdata->local_id;
+ a2 = ipi_mbox->remote_id;
+ if (pdata->method == USE_SMC)
+ arm_smccc_smc(a0, a1, a2, a3, 0, 0, 0, 0, res);
+ else
+ arm_smccc_hvc(a0, a1, a2, a3, 0, 0, 0, 0, res);
+}
+
+/**
+ * zynqmp_ipi_interrupt - Interrupt handler for IPI notification
+ *
+ * @irq: Interrupt number
+ * @data: ZynqMP IPI mailbox platform data.
+ *
+ * Return: -EINVAL if there is no instance
+ * IRQ_NONE if the interrupt is not ours.
+ * IRQ_HANDLED if the rx interrupt was successfully handled.
+ */
+static irqreturn_t zynqmp_ipi_interrupt(int irq, void *data)
+{
+ struct zynqmp_ipi_pdata *pdata = data;
+ struct mbox_chan *chan;
+ struct zynqmp_ipi_mbox *ipi_mbox;
+ struct zynqmp_ipi_mchan *mchan;
+ struct zynqmp_ipi_message *msg;
+ u64 arg0, arg3;
+ struct arm_smccc_res res;
+ int ret, i;
+
+ (void)irq;
+ arg0 = SMC_IPI_MAILBOX_STATUS_ENQUIRY;
+ arg3 = IPI_SMC_ENQUIRY_DIRQ_MASK;
+ for (i = 0; i < pdata->num_mboxes; i++) {
+ ipi_mbox = &pdata->ipi_mboxes[i];
+ mchan = &ipi_mbox->mchans[IPI_MB_CHNL_RX];
+ chan = &ipi_mbox->mbox.chans[IPI_MB_CHNL_RX];
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, arg3, &res);
+ ret = (int)(res.a0 & 0xFFFFFFFF);
+ if (ret > 0 && ret & IPI_MB_STATUS_RECV_PENDING) {
+ if (mchan->is_opened) {
+ msg = mchan->rx_buf;
+ msg->len = mchan->req_buf_size;
+ memcpy_fromio(msg->data, mchan->req_buf,
+ msg->len);
+ mbox_chan_received_data(chan, (void *)msg);
+ return IRQ_HANDLED;
+ }
+ }
+ }
+ return IRQ_NONE;
+}
+
+/**
+ * zynqmp_ipi_peek_data - Peek to see if there are any rx messages.
+ *
+ * @chan: Channel Pointer
+ *
+ * Return: 'true' if there is pending rx data, 'false' if there is none.
+ */
+static bool zynqmp_ipi_peek_data(struct mbox_chan *chan)
+{
+ struct device *dev = chan->mbox->dev;
+ struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev);
+ struct zynqmp_ipi_mchan *mchan = chan->con_priv;
+ int ret;
+ u64 arg0;
+ struct arm_smccc_res res;
+
+ if (WARN_ON(!ipi_mbox)) {
+ dev_err(dev, "no platform drv data??\n");
+ return false;
+ }
+
+ arg0 = SMC_IPI_MAILBOX_STATUS_ENQUIRY;
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
+ ret = (int)(res.a0 & 0xFFFFFFFF);
+
+ if (mchan->chan_type == IPI_MB_CHNL_TX) {
+ /* TX channel, check if the message has been acked
+ * by the remote, if yes, response is available.
+ */
+ if (ret < 0 || ret & IPI_MB_STATUS_SEND_PENDING)
+ return false;
+ else
+ return true;
+ } else if (ret > 0 && ret & IPI_MB_STATUS_RECV_PENDING) {
+ /* RX channel, check if there is message arrived. */
+ return true;
+ }
+ return false;
+}
+
+/**
+ * zynqmp_ipi_last_tx_done - See if the last tx message is sent
+ *
+ * @chan: Channel pointer
+ *
+ * Return: 'true' is no pending tx data, 'false' if there are any.
+ */
+static bool zynqmp_ipi_last_tx_done(struct mbox_chan *chan)
+{
+ struct device *dev = chan->mbox->dev;
+ struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev);
+ struct zynqmp_ipi_mchan *mchan = chan->con_priv;
+ int ret;
+ u64 arg0;
+ struct arm_smccc_res res;
+
+ if (WARN_ON(!ipi_mbox)) {
+ dev_err(dev, "no platform drv data??\n");
+ return false;
+ }
+
+ if (mchan->chan_type == IPI_MB_CHNL_TX) {
+ /* We only need to check if the message been taken
+ * by the remote in the TX channel
+ */
+ arg0 = SMC_IPI_MAILBOX_STATUS_ENQUIRY;
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
+ /* Check the SMC call status, a0 of the result */
+ ret = (int)(res.a0 & 0xFFFFFFFF);
+ if (ret < 0 || ret & IPI_MB_STATUS_SEND_PENDING)
+ return false;
+ return true;
+ }
+ /* Always true for the response message in RX channel */
+ return true;
+}
+
+/**
+ * zynqmp_ipi_send_data - Send data
+ *
+ * @chan: Channel Pointer
+ * @data: Message Pointer
+ *
+ * Return: 0 if all goes good, else appropriate error messages.
+ */
+static int zynqmp_ipi_send_data(struct mbox_chan *chan, void *data)
+{
+ struct device *dev = chan->mbox->dev;
+ struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev);
+ struct zynqmp_ipi_mchan *mchan = chan->con_priv;
+ struct zynqmp_ipi_message *msg = data;
+ u64 arg0;
+ struct arm_smccc_res res;
+
+ if (WARN_ON(!ipi_mbox)) {
+ dev_err(dev, "no platform drv data??\n");
+ return -EINVAL;
+ }
+
+ if (mchan->chan_type == IPI_MB_CHNL_TX) {
+ /* Send request message */
+ if (msg && msg->len > mchan->req_buf_size) {
+ dev_err(dev, "channel %d message length %u > max %lu\n",
+ mchan->chan_type, (unsigned int)msg->len,
+ mchan->req_buf_size);
+ return -EINVAL;
+ }
+ if (msg && msg->len)
+ memcpy_toio(mchan->req_buf, msg->data, msg->len);
+ /* Kick IPI mailbox to send message */
+ arg0 = SMC_IPI_MAILBOX_NOTIFY;
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
+ } else {
+ /* Send response message */
+ if (msg && msg->len > mchan->resp_buf_size) {
+ dev_err(dev, "channel %d message length %u > max %lu\n",
+ mchan->chan_type, (unsigned int)msg->len,
+ mchan->resp_buf_size);
+ return -EINVAL;
+ }
+ if (msg && msg->len)
+ memcpy_toio(mchan->resp_buf, msg->data, msg->len);
+ arg0 = SMC_IPI_MAILBOX_ACK;
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, IPI_SMC_ACK_EIRQ_MASK,
+ &res);
+ }
+ return 0;
+}
+
+/**
+ * zynqmp_ipi_startup - Startup the IPI channel
+ *
+ * @chan: Channel pointer
+ *
+ * Return: 0 if all goes good, else return corresponding error message
+ */
+static int zynqmp_ipi_startup(struct mbox_chan *chan)
+{
+ struct device *dev = chan->mbox->dev;
+ struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev);
+ struct zynqmp_ipi_mchan *mchan = chan->con_priv;
+ u64 arg0;
+ struct arm_smccc_res res;
+ int ret = 0;
+ unsigned int nchan_type;
+
+ if (mchan->is_opened)
+ return 0;
+
+ /* If no channel has been opened, open the IPI mailbox */
+ nchan_type = (mchan->chan_type + 1) % 2;
+ if (!ipi_mbox->mchans[nchan_type].is_opened) {
+ arg0 = SMC_IPI_MAILBOX_OPEN;
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
+ /* Check the SMC call status, a0 of the result */
+ ret = (int)(res.a0 & 0xFFFFFFFF);
+ if (ret < 0) {
+ dev_err(dev, "SMC to open the IPI channel failed.\n");
+ return ret;
+ }
+ ret = 0;
+ }
+
+ /* If it is RX channel, enable the IPI notification interrupt */
+ if (mchan->chan_type == IPI_MB_CHNL_RX) {
+ arg0 = SMC_IPI_MAILBOX_ENABLE_IRQ;
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
+ }
+ mchan->is_opened = 1;
+
+ return ret;
+}
+
+/**
+ * zynqmp_ipi_shutdown - Shutdown the IPI channel
+ *
+ * @chan: Channel pointer
+ */
+static void zynqmp_ipi_shutdown(struct mbox_chan *chan)
+{
+ struct device *dev = chan->mbox->dev;
+ struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev);
+ struct zynqmp_ipi_mchan *mchan = chan->con_priv;
+ u64 arg0;
+ struct arm_smccc_res res;
+ unsigned int chan_type;
+
+ if (!mchan->is_opened)
+ return;
+
+ /* If it is RX channel, disable notification interrupt */
+ chan_type = mchan->chan_type;
+ if (chan_type == IPI_MB_CHNL_RX) {
+ arg0 = SMC_IPI_MAILBOX_DISABLE_IRQ;
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
+ }
+ /* Release IPI mailbox if no other channel is opened */
+ chan_type = (chan_type + 1) % 2;
+ if (!ipi_mbox->mchans[chan_type].is_opened) {
+ arg0 = SMC_IPI_MAILBOX_RELEASE;
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
+ }
+
+ mchan->is_opened = 0;
+}
+
+/* ZynqMP IPI mailbox operations */
+static const struct mbox_chan_ops zynqmp_ipi_chan_ops = {
+ .startup = zynqmp_ipi_startup,
+ .shutdown = zynqmp_ipi_shutdown,
+ .peek_data = zynqmp_ipi_peek_data,
+ .last_tx_done = zynqmp_ipi_last_tx_done,
+ .send_data = zynqmp_ipi_send_data,
+};
+
+/**
+ * zynqmp_ipi_of_xlate - Translate of phandle to IPI mailbox channel
+ *
+ * @mbox: mailbox controller pointer
+ * @p: phandle pointer
+ *
+ * Return: Mailbox channel, else return error pointer.
+ */
+static struct mbox_chan *zynqmp_ipi_of_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *p)
+{
+ struct mbox_chan *chan;
+ struct device *dev = mbox->dev;
+ unsigned int chan_type;
+
+ /* Only supports TX and RX channels */
+ chan_type = p->args[0];
+ if (chan_type != IPI_MB_CHNL_TX && chan_type != IPI_MB_CHNL_RX) {
+ dev_err(dev, "req chnl failure: invalid chnl type %u.\n",
+ chan_type);
+ return ERR_PTR(-EINVAL);
+ }
+ chan = &mbox->chans[chan_type];
+ return chan;
+}
+
+static const struct of_device_id zynqmp_ipi_of_match[] = {
+ { .compatible = "xlnx,zynqmp-ipi-mailbox" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, zynqmp_ipi_of_match);
+
+/**
+ * zynqmp_ipi_mbox_get_buf_res - Get buffer resource from the IPI dev node
+ *
+ * @node: IPI mbox device child node
+ * @name: name of the IPI buffer
+ * @res: pointer to where the resource information will be stored.
+ *
+ * Return: 0 for success, negative value for failure
+ */
+static int zynqmp_ipi_mbox_get_buf_res(struct device_node *node,
+ const char *name,
+ struct resource *res)
+{
+ int ret, index;
+
+ index = of_property_match_string(node, "reg-names", name);
+ if (index >= 0) {
+ ret = of_address_to_resource(node, index, res);
+ if (ret < 0)
+ return -EINVAL;
+ return 0;
+ }
+ return -ENODEV;
+}
+
+/**
+ * zynqmp_ipi_mbox_dev_release() - release the existence of a ipi mbox dev
+ *
+ * @dev: the ipi mailbox device
+ *
+ * This is to avoid the no device release() function kernel warning.
+ *
+ */
+static void zynqmp_ipi_mbox_dev_release(struct device *dev)
+{
+ (void)dev;
+}
+
+/**
+ * zynqmp_ipi_mbox_probe - probe IPI mailbox resource from device node
+ *
+ * @ipi_mbox: pointer to IPI mailbox private data structure
+ * @node: IPI mailbox device node
+ *
+ * Return: 0 for success, negative value for failure
+ */
+static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
+ struct device_node *node)
+{
+ struct zynqmp_ipi_mchan *mchan;
+ struct mbox_chan *chans;
+ struct mbox_controller *mbox;
+ struct resource res;
+ struct device *dev, *mdev;
+ const char *name;
+ int ret;
+
+ dev = ipi_mbox->pdata->dev;
+ /* Initialize dev for IPI mailbox */
+ ipi_mbox->dev.parent = dev;
+ ipi_mbox->dev.release = NULL;
+ ipi_mbox->dev.of_node = node;
+ dev_set_name(&ipi_mbox->dev, "%s", of_node_full_name(node));
+ dev_set_drvdata(&ipi_mbox->dev, ipi_mbox);
+ ipi_mbox->dev.release = zynqmp_ipi_mbox_dev_release;
+ ipi_mbox->dev.driver = &zynqmp_ipi_mbox_driver;
+ ret = device_register(&ipi_mbox->dev);
+ if (ret) {
+ dev_err(dev, "Failed to register ipi mbox dev.\n");
+ return ret;
+ }
+ mdev = &ipi_mbox->dev;
+
+ mchan = &ipi_mbox->mchans[IPI_MB_CHNL_TX];
+ name = "local_request_region";
+ ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res);
+ if (!ret) {
+ mchan->req_buf_size = resource_size(&res);
+ mchan->req_buf = devm_ioremap(mdev, res.start,
+ mchan->req_buf_size);
+ if (IS_ERR(mchan->req_buf)) {
+ dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
+ ret = PTR_ERR(mchan->req_buf);
+ return ret;
+ }
+ } else if (ret != -ENODEV) {
+ dev_err(mdev, "Unmatched resource %s, %d.\n", name, ret);
+ return ret;
+ }
+
+ name = "remote_response_region";
+ ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res);
+ if (!ret) {
+ mchan->resp_buf_size = resource_size(&res);
+ mchan->resp_buf = devm_ioremap(mdev, res.start,
+ mchan->resp_buf_size);
+ if (IS_ERR(mchan->resp_buf)) {
+ dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
+ ret = PTR_ERR(mchan->resp_buf);
+ return ret;
+ }
+ } else if (ret != -ENODEV) {
+ dev_err(mdev, "Unmatched resource %s.\n", name);
+ return ret;
+ }
+ mchan->rx_buf = devm_kzalloc(mdev,
+ mchan->resp_buf_size +
+ sizeof(struct zynqmp_ipi_message),
+ GFP_KERNEL);
+ if (!mchan->rx_buf)
+ return -ENOMEM;
+
+ mchan = &ipi_mbox->mchans[IPI_MB_CHNL_RX];
+ name = "remote_request_region";
+ ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res);
+ if (!ret) {
+ mchan->req_buf_size = resource_size(&res);
+ mchan->req_buf = devm_ioremap(mdev, res.start,
+ mchan->req_buf_size);
+ if (IS_ERR(mchan->req_buf)) {
+ dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
+ ret = PTR_ERR(mchan->req_buf);
+ return ret;
+ }
+ } else if (ret != -ENODEV) {
+ dev_err(mdev, "Unmatched resource %s.\n", name);
+ return ret;
+ }
+
+ name = "local_response_region";
+ ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res);
+ if (!ret) {
+ mchan->resp_buf_size = resource_size(&res);
+ mchan->resp_buf = devm_ioremap(mdev, res.start,
+ mchan->resp_buf_size);
+ if (IS_ERR(mchan->resp_buf)) {
+ dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
+ ret = PTR_ERR(mchan->resp_buf);
+ return ret;
+ }
+ } else if (ret != -ENODEV) {
+ dev_err(mdev, "Unmatched resource %s.\n", name);
+ return ret;
+ }
+ mchan->rx_buf = devm_kzalloc(mdev,
+ mchan->resp_buf_size +
+ sizeof(struct zynqmp_ipi_message),
+ GFP_KERNEL);
+ if (!mchan->rx_buf)
+ return -ENOMEM;
+
+ /* Get the IPI remote agent ID */
+ ret = of_property_read_u32(node, "xlnx,ipi-id", &ipi_mbox->remote_id);
+ if (ret < 0) {
+ dev_err(dev, "No IPI remote ID is specified.\n");
+ return ret;
+ }
+
+ mbox = &ipi_mbox->mbox;
+ mbox->dev = mdev;
+ mbox->ops = &zynqmp_ipi_chan_ops;
+ mbox->num_chans = 2;
+ mbox->txdone_irq = false;
+ mbox->txdone_poll = true;
+ mbox->txpoll_period = 5;
+ mbox->of_xlate = zynqmp_ipi_of_xlate;
+ chans = devm_kzalloc(mdev, 2 * sizeof(*chans), GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+ mbox->chans = chans;
+ chans[IPI_MB_CHNL_TX].con_priv = &ipi_mbox->mchans[IPI_MB_CHNL_TX];
+ chans[IPI_MB_CHNL_RX].con_priv = &ipi_mbox->mchans[IPI_MB_CHNL_RX];
+ ipi_mbox->mchans[IPI_MB_CHNL_TX].chan_type = IPI_MB_CHNL_TX;
+ ipi_mbox->mchans[IPI_MB_CHNL_RX].chan_type = IPI_MB_CHNL_RX;
+ ret = devm_mbox_controller_register(mdev, mbox);
+ if (ret)
+ dev_err(mdev,
+ "Failed to register mbox_controller(%d)\n", ret);
+ else
+ dev_info(mdev,
+ "Registered ZynqMP IPI mbox with TX/RX channels.\n");
+ return ret;
+}
+
+/**
+ * zynqmp_ipi_free_mboxes - Free IPI mailboxes devices
+ *
+ * @pdata: IPI private data
+ */
+static void zynqmp_ipi_free_mboxes(struct zynqmp_ipi_pdata *pdata)
+{
+ struct zynqmp_ipi_mbox *ipi_mbox;
+ int i;
+
+ i = pdata->num_mboxes;
+ for (; i >= 0; i--) {
+ ipi_mbox = &pdata->ipi_mboxes[i];
+ if (ipi_mbox->dev.parent) {
+ mbox_controller_unregister(&ipi_mbox->mbox);
+ device_unregister(&ipi_mbox->dev);
+ }
+ }
+}
+
+static int zynqmp_ipi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *nc, *np = pdev->dev.of_node;
+ struct zynqmp_ipi_pdata *pdata;
+ struct zynqmp_ipi_mbox *mbox;
+ int num_mboxes, ret = -EINVAL;
+
+ num_mboxes = of_get_child_count(np);
+ pdata = devm_kzalloc(dev, sizeof(*pdata) + (num_mboxes * sizeof(*mbox)),
+ GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+ pdata->dev = dev;
+
+ /* Get the IPI local agents ID */
+ ret = of_property_read_u32(np, "xlnx,ipi-id", &pdata->local_id);
+ if (ret < 0) {
+ dev_err(dev, "No IPI local ID is specified.\n");
+ return ret;
+ }
+
+ pdata->num_mboxes = num_mboxes;
+ pdata->ipi_mboxes = (struct zynqmp_ipi_mbox *)
+ ((char *)pdata + sizeof(*pdata));
+
+ mbox = pdata->ipi_mboxes;
+ for_each_available_child_of_node(np, nc) {
+ mbox->pdata = pdata;
+ ret = zynqmp_ipi_mbox_probe(mbox, nc);
+ if (ret) {
+ dev_err(dev, "failed to probe subdev.\n");
+ ret = -EINVAL;
+ goto free_mbox_dev;
+ }
+ mbox++;
+ }
+
+ /* IPI IRQ */
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(dev, "unable to find IPI IRQ.\n");
+ goto free_mbox_dev;
+ }
+ pdata->irq = ret;
+ ret = devm_request_irq(dev, pdata->irq, zynqmp_ipi_interrupt,
+ IRQF_SHARED, dev_name(dev), pdata);
+ if (ret) {
+ dev_err(dev, "IRQ %d is not requested successfully.\n",
+ pdata->irq);
+ goto free_mbox_dev;
+ }
+
+ platform_set_drvdata(pdev, pdata);
+ return ret;
+
+free_mbox_dev:
+ zynqmp_ipi_free_mboxes(pdata);
+ return ret;
+}
+
+static int zynqmp_ipi_remove(struct platform_device *pdev)
+{
+ struct zynqmp_ipi_pdata *pdata;
+
+ pdata = platform_get_drvdata(pdev);
+ zynqmp_ipi_free_mboxes(pdata);
+
+ return 0;
+}
+
+static struct platform_driver zynqmp_ipi_driver = {
+ .probe = zynqmp_ipi_probe,
+ .remove = zynqmp_ipi_remove,
+ .driver = {
+ .name = "zynqmp-ipi",
+ .of_match_table = of_match_ptr(zynqmp_ipi_of_match),
+ },
+};
+
+static int __init zynqmp_ipi_init(void)
+{
+ return platform_driver_register(&zynqmp_ipi_driver);
+}
+subsys_initcall(zynqmp_ipi_init);
+
+static void __exit zynqmp_ipi_exit(void)
+{
+ platform_driver_unregister(&zynqmp_ipi_driver);
+}
+module_exit(zynqmp_ipi_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Xilinx ZynqMP IPI Mailbox driver");
+MODULE_AUTHOR("Xilinx Inc.");
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 3db222509e44..2557f198e175 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -436,6 +436,18 @@ config DM_DELAY
If unsure, say N.
+config DM_INIT
+ bool "DM \"dm-mod.create=\" parameter support"
+ depends on BLK_DEV_DM=y
+ ---help---
+ Enable "dm-mod.create=" parameter to create mapped devices at init time.
+ This option is useful to allow mounting rootfs without requiring an
+ initramfs.
+ See Documentation/device-mapper/dm-init.txt for dm-mod.create="..."
+ format.
+
+ If unsure, say N.
+
config DM_UEVENT
bool "DM uevents"
depends on BLK_DEV_DM
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 822f4e8753bc..a52b703e588e 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -69,6 +69,10 @@ obj-$(CONFIG_DM_INTEGRITY) += dm-integrity.o
obj-$(CONFIG_DM_ZONED) += dm-zoned.o
obj-$(CONFIG_DM_WRITECACHE) += dm-writecache.o
+ifeq ($(CONFIG_DM_INIT),y)
+dm-mod-objs += dm-init.o
+endif
+
ifeq ($(CONFIG_DM_UEVENT),y)
dm-mod-objs += dm-uevent.o
endif
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 1ecef76225a1..2a48ea3f1b30 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -150,7 +150,7 @@ struct dm_buffer {
void (*end_io)(struct dm_buffer *, blk_status_t);
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
#define MAX_STACK 10
- struct stack_trace stack_trace;
+ unsigned int stack_len;
unsigned long stack_entries[MAX_STACK];
#endif
};
@@ -232,11 +232,7 @@ static DEFINE_MUTEX(dm_bufio_clients_lock);
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
static void buffer_record_stack(struct dm_buffer *b)
{
- b->stack_trace.nr_entries = 0;
- b->stack_trace.max_entries = MAX_STACK;
- b->stack_trace.entries = b->stack_entries;
- b->stack_trace.skip = 2;
- save_stack_trace(&b->stack_trace);
+ b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2);
}
#endif
@@ -438,7 +434,7 @@ static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
adjust_total_allocated(b->data_mode, (long)c->block_size);
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
- memset(&b->stack_trace, 0, sizeof(b->stack_trace));
+ b->stack_len = 0;
#endif
return b;
}
@@ -1520,8 +1516,9 @@ static void drop_buffers(struct dm_bufio_client *c)
DMERR("leaked buffer %llx, hold count %u, list %d",
(unsigned long long)b->block, b->hold_count, i);
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
- print_stack_trace(&b->stack_trace, 1);
- b->hold_count = 0; /* mark unclaimed to avoid BUG_ON below */
+ stack_trace_print(b->stack_entries, b->stack_len, 1);
+ /* mark unclaimed to avoid BUG_ON below */
+ b->hold_count = 0;
#endif
}
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index b29a8327eed1..d249cf8ac277 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -353,6 +353,7 @@ struct cache_features {
enum cache_metadata_mode mode;
enum cache_io_mode io_mode;
unsigned metadata_version;
+ bool discard_passdown:1;
};
struct cache_stats {
@@ -1899,7 +1900,11 @@ static bool process_discard_bio(struct cache *cache, struct bio *bio)
b = to_dblock(from_dblock(b) + 1);
}
- bio_endio(bio);
+ if (cache->features.discard_passdown) {
+ remap_to_origin(cache, bio);
+ generic_make_request(bio);
+ } else
+ bio_endio(bio);
return false;
}
@@ -2233,13 +2238,14 @@ static void init_features(struct cache_features *cf)
cf->mode = CM_WRITE;
cf->io_mode = CM_IO_WRITEBACK;
cf->metadata_version = 1;
+ cf->discard_passdown = true;
}
static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
char **error)
{
static const struct dm_arg _args[] = {
- {0, 2, "Invalid number of cache feature arguments"},
+ {0, 3, "Invalid number of cache feature arguments"},
};
int r, mode_ctr = 0;
@@ -2274,6 +2280,9 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
else if (!strcasecmp(arg, "metadata2"))
cf->metadata_version = 2;
+ else if (!strcasecmp(arg, "no_discard_passdown"))
+ cf->discard_passdown = false;
+
else {
*error = "Unrecognised cache feature requested";
return -EINVAL;
@@ -2496,7 +2505,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)
ti->num_discard_bios = 1;
ti->discards_supported = true;
- ti->split_discard_bios = false;
ti->per_io_data_size = sizeof(struct per_bio_data);
@@ -3120,6 +3128,39 @@ static void cache_resume(struct dm_target *ti)
do_waker(&cache->waker.work);
}
+static void emit_flags(struct cache *cache, char *result,
+ unsigned maxlen, ssize_t *sz_ptr)
+{
+ ssize_t sz = *sz_ptr;
+ struct cache_features *cf = &cache->features;
+ unsigned count = (cf->metadata_version == 2) + !cf->discard_passdown + 1;
+
+ DMEMIT("%u ", count);
+
+ if (cf->metadata_version == 2)
+ DMEMIT("metadata2 ");
+
+ if (writethrough_mode(cache))
+ DMEMIT("writethrough ");
+
+ else if (passthrough_mode(cache))
+ DMEMIT("passthrough ");
+
+ else if (writeback_mode(cache))
+ DMEMIT("writeback ");
+
+ else {
+ DMEMIT("unknown ");
+ DMERR("%s: internal error: unknown io mode: %d",
+ cache_device_name(cache), (int) cf->io_mode);
+ }
+
+ if (!cf->discard_passdown)
+ DMEMIT("no_discard_passdown ");
+
+ *sz_ptr = sz;
+}
+
/*
* Status format:
*
@@ -3186,25 +3227,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
(unsigned) atomic_read(&cache->stats.promotion),
(unsigned long) atomic_read(&cache->nr_dirty));
- if (cache->features.metadata_version == 2)
- DMEMIT("2 metadata2 ");
- else
- DMEMIT("1 ");
-
- if (writethrough_mode(cache))
- DMEMIT("writethrough ");
-
- else if (passthrough_mode(cache))
- DMEMIT("passthrough ");
-
- else if (writeback_mode(cache))
- DMEMIT("writeback ");
-
- else {
- DMERR("%s: internal error: unknown io mode: %d",
- cache_device_name(cache), (int) cache->features.io_mode);
- goto err;
- }
+ emit_flags(cache, result, maxlen, &sz);
DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
@@ -3433,14 +3456,62 @@ static int cache_iterate_devices(struct dm_target *ti,
return r;
}
+static bool origin_dev_supports_discard(struct block_device *origin_bdev)
+{
+ struct request_queue *q = bdev_get_queue(origin_bdev);
+
+ return q && blk_queue_discard(q);
+}
+
+/*
+ * If discard_passdown was enabled verify that the origin device
+ * supports discards. Disable discard_passdown if not.
+ */
+static void disable_passdown_if_not_supported(struct cache *cache)
+{
+ struct block_device *origin_bdev = cache->origin_dev->bdev;
+ struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits;
+ const char *reason = NULL;
+ char buf[BDEVNAME_SIZE];
+
+ if (!cache->features.discard_passdown)
+ return;
+
+ if (!origin_dev_supports_discard(origin_bdev))
+ reason = "discard unsupported";
+
+ else if (origin_limits->max_discard_sectors < cache->sectors_per_block)
+ reason = "max discard sectors smaller than a block";
+
+ if (reason) {
+ DMWARN("Origin device (%s) %s: Disabling discard passdown.",
+ bdevname(origin_bdev, buf), reason);
+ cache->features.discard_passdown = false;
+ }
+}
+
static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
{
+ struct block_device *origin_bdev = cache->origin_dev->bdev;
+ struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits;
+
+ if (!cache->features.discard_passdown) {
+ /* No passdown is done so setting own virtual limits */
+ limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024,
+ cache->origin_sectors);
+ limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
+ return;
+ }
+
/*
- * FIXME: these limits may be incompatible with the cache device
+ * cache_iterate_devices() is stacking both origin and fast device limits
+ * but discards aren't passed to fast device, so inherit origin's limits.
*/
- limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024,
- cache->origin_sectors);
- limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
+ limits->max_discard_sectors = origin_limits->max_discard_sectors;
+ limits->max_hw_discard_sectors = origin_limits->max_hw_discard_sectors;
+ limits->discard_granularity = origin_limits->discard_granularity;
+ limits->discard_alignment = origin_limits->discard_alignment;
+ limits->discard_misaligned = origin_limits->discard_misaligned;
}
static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
@@ -3457,6 +3528,8 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT);
blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
}
+
+ disable_passdown_if_not_supported(cache);
set_discard_limits(cache, limits);
}
@@ -3464,7 +3537,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type cache_target = {
.name = "cache",
- .version = {2, 0, 0},
+ .version = {2, 1, 0},
.module = THIS_MODULE,
.ctr = cache_ctr,
.dtr = cache_dtr,
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 95c6d86ab5e8..c4ef1fceead6 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -115,6 +115,7 @@ struct mapped_device {
struct srcu_struct io_barrier;
};
+void disable_discard(struct mapped_device *md);
void disable_write_same(struct mapped_device *md);
void disable_write_zeroes(struct mapped_device *md);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index dd6565798778..9faed1c92b52 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -332,7 +332,6 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
int err;
desc->tfm = essiv->hash_tfm;
- desc->flags = 0;
err = crypto_shash_digest(desc, cc->key, cc->key_size, essiv->salt);
shash_desc_zero(desc);
@@ -606,7 +605,6 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
int i, r;
desc->tfm = lmk->hash_tfm;
- desc->flags = 0;
r = crypto_shash_init(desc);
if (r)
@@ -768,7 +766,6 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc,
/* calculate crc32 for every 32bit part and xor it */
desc->tfm = tcw->crc32_tfm;
- desc->flags = 0;
for (i = 0; i < 4; i++) {
r = crypto_shash_init(desc);
if (r)
diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c
new file mode 100644
index 000000000000..4b76f84424c3
--- /dev/null
+++ b/drivers/md/dm-init.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * dm-init.c
+ * Copyright (C) 2017 The Chromium OS Authors <chromium-os-dev@chromium.org>
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/ctype.h>
+#include <linux/device.h>
+#include <linux/device-mapper.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/moduleparam.h>
+
+#define DM_MSG_PREFIX "init"
+#define DM_MAX_DEVICES 256
+#define DM_MAX_TARGETS 256
+#define DM_MAX_STR_SIZE 4096
+
+static char *create;
+
+/*
+ * Format: dm-mod.create=<name>,<uuid>,<minor>,<flags>,<table>[,<table>+][;<name>,<uuid>,<minor>,<flags>,<table>[,<table>+]+]
+ * Table format: <start_sector> <num_sectors> <target_type> <target_args>
+ *
+ * See Documentation/device-mapper/dm-init.txt for dm-mod.create="..." format
+ * details.
+ */
+
+struct dm_device {
+ struct dm_ioctl dmi;
+ struct dm_target_spec *table[DM_MAX_TARGETS];
+ char *target_args_array[DM_MAX_TARGETS];
+ struct list_head list;
+};
+
+const char * const dm_allowed_targets[] __initconst = {
+ "crypt",
+ "delay",
+ "linear",
+ "snapshot-origin",
+ "striped",
+ "verity",
+};
+
+static int __init dm_verify_target_type(const char *target)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(dm_allowed_targets); i++) {
+ if (!strcmp(dm_allowed_targets[i], target))
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void __init dm_setup_cleanup(struct list_head *devices)
+{
+ struct dm_device *dev, *tmp;
+ unsigned int i;
+
+ list_for_each_entry_safe(dev, tmp, devices, list) {
+ list_del(&dev->list);
+ for (i = 0; i < dev->dmi.target_count; i++) {
+ kfree(dev->table[i]);
+ kfree(dev->target_args_array[i]);
+ }
+ kfree(dev);
+ }
+}
+
+/**
+ * str_field_delimit - delimit a string based on a separator char.
+ * @str: the pointer to the string to delimit.
+ * @separator: char that delimits the field
+ *
+ * Find a @separator and replace it by '\0'.
+ * Remove leading and trailing spaces.
+ * Return the remainder string after the @separator.
+ */
+static char __init *str_field_delimit(char **str, char separator)
+{
+ char *s;
+
+ /* TODO: add support for escaped characters */
+ *str = skip_spaces(*str);
+ s = strchr(*str, separator);
+ /* Delimit the field and remove trailing spaces */
+ if (s)
+ *s = '\0';
+ *str = strim(*str);
+ return s ? ++s : NULL;
+}
+
+/**
+ * dm_parse_table_entry - parse a table entry
+ * @dev: device to store the parsed information.
+ * @str: the pointer to a string with the format:
+ * <start_sector> <num_sectors> <target_type> <target_args>[, ...]
+ *
+ * Return the remainder string after the table entry, i.e, after the comma which
+ * delimits the entry or NULL if reached the end of the string.
+ */
+static char __init *dm_parse_table_entry(struct dm_device *dev, char *str)
+{
+ const unsigned int n = dev->dmi.target_count - 1;
+ struct dm_target_spec *sp;
+ unsigned int i;
+ /* fields: */
+ char *field[4];
+ char *next;
+
+ field[0] = str;
+ /* Delimit first 3 fields that are separated by space */
+ for (i = 0; i < ARRAY_SIZE(field) - 1; i++) {
+ field[i + 1] = str_field_delimit(&field[i], ' ');
+ if (!field[i + 1])
+ return ERR_PTR(-EINVAL);
+ }
+ /* Delimit last field that can be terminated by comma */
+ next = str_field_delimit(&field[i], ',');
+
+ sp = kzalloc(sizeof(*sp), GFP_KERNEL);
+ if (!sp)
+ return ERR_PTR(-ENOMEM);
+ dev->table[n] = sp;
+
+ /* start_sector */
+ if (kstrtoull(field[0], 0, &sp->sector_start))
+ return ERR_PTR(-EINVAL);
+ /* num_sector */
+ if (kstrtoull(field[1], 0, &sp->length))
+ return ERR_PTR(-EINVAL);
+ /* target_type */
+ strscpy(sp->target_type, field[2], sizeof(sp->target_type));
+ if (dm_verify_target_type(sp->target_type)) {
+ DMERR("invalid type \"%s\"", sp->target_type);
+ return ERR_PTR(-EINVAL);
+ }
+ /* target_args */
+ dev->target_args_array[n] = kstrndup(field[3], GFP_KERNEL,
+ DM_MAX_STR_SIZE);
+ if (!dev->target_args_array[n])
+ return ERR_PTR(-ENOMEM);
+
+ return next;
+}
+
+/**
+ * dm_parse_table - parse "dm-mod.create=" table field
+ * @dev: device to store the parsed information.
+ * @str: the pointer to a string with the format:
+ * <table>[,<table>+]
+ */
+static int __init dm_parse_table(struct dm_device *dev, char *str)
+{
+ char *table_entry = str;
+
+ while (table_entry) {
+ DMDEBUG("parsing table \"%s\"", str);
+ if (++dev->dmi.target_count >= DM_MAX_TARGETS) {
+ DMERR("too many targets %u > %d",
+ dev->dmi.target_count, DM_MAX_TARGETS);
+ return -EINVAL;
+ }
+ table_entry = dm_parse_table_entry(dev, table_entry);
+ if (IS_ERR(table_entry)) {
+ DMERR("couldn't parse table");
+ return PTR_ERR(table_entry);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * dm_parse_device_entry - parse a device entry
+ * @dev: device to store the parsed information.
+ * @str: the pointer to a string with the format:
+ * name,uuid,minor,flags,table[; ...]
+ *
+ * Return the remainder string after the table entry, i.e, after the semi-colon
+ * which delimits the entry or NULL if reached the end of the string.
+ */
+static char __init *dm_parse_device_entry(struct dm_device *dev, char *str)
+{
+ /* There are 5 fields: name,uuid,minor,flags,table; */
+ char *field[5];
+ unsigned int i;
+ char *next;
+
+ field[0] = str;
+ /* Delimit first 4 fields that are separated by comma */
+ for (i = 0; i < ARRAY_SIZE(field) - 1; i++) {
+ field[i+1] = str_field_delimit(&field[i], ',');
+ if (!field[i+1])
+ return ERR_PTR(-EINVAL);
+ }
+ /* Delimit last field that can be delimited by semi-colon */
+ next = str_field_delimit(&field[i], ';');
+
+ /* name */
+ strscpy(dev->dmi.name, field[0], sizeof(dev->dmi.name));
+ /* uuid */
+ strscpy(dev->dmi.uuid, field[1], sizeof(dev->dmi.uuid));
+ /* minor */
+ if (strlen(field[2])) {
+ if (kstrtoull(field[2], 0, &dev->dmi.dev))
+ return ERR_PTR(-EINVAL);
+ dev->dmi.flags |= DM_PERSISTENT_DEV_FLAG;
+ }
+ /* flags */
+ if (!strcmp(field[3], "ro"))
+ dev->dmi.flags |= DM_READONLY_FLAG;
+ else if (strcmp(field[3], "rw"))
+ return ERR_PTR(-EINVAL);
+ /* table */
+ if (dm_parse_table(dev, field[4]))
+ return ERR_PTR(-EINVAL);
+
+ return next;
+}
+
+/**
+ * dm_parse_devices - parse "dm-mod.create=" argument
+ * @devices: list of struct dm_device to store the parsed information.
+ * @str: the pointer to a string with the format:
+ * <device>[;<device>+]
+ */
+static int __init dm_parse_devices(struct list_head *devices, char *str)
+{
+ unsigned long ndev = 0;
+ struct dm_device *dev;
+ char *device = str;
+
+ DMDEBUG("parsing \"%s\"", str);
+ while (device) {
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+ list_add_tail(&dev->list, devices);
+
+ if (++ndev >= DM_MAX_DEVICES) {
+ DMERR("too many targets %u > %d",
+ dev->dmi.target_count, DM_MAX_TARGETS);
+ return -EINVAL;
+ }
+
+ device = dm_parse_device_entry(dev, device);
+ if (IS_ERR(device)) {
+ DMERR("couldn't parse device");
+ return PTR_ERR(device);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * dm_init_init - parse "dm-mod.create=" argument and configure drivers
+ */
+static int __init dm_init_init(void)
+{
+ struct dm_device *dev;
+ LIST_HEAD(devices);
+ char *str;
+ int r;
+
+ if (!create)
+ return 0;
+
+ if (strlen(create) >= DM_MAX_STR_SIZE) {
+ DMERR("Argument is too big. Limit is %d\n", DM_MAX_STR_SIZE);
+ return -EINVAL;
+ }
+ str = kstrndup(create, GFP_KERNEL, DM_MAX_STR_SIZE);
+ if (!str)
+ return -ENOMEM;
+
+ r = dm_parse_devices(&devices, str);
+ if (r)
+ goto out;
+
+ DMINFO("waiting for all devices to be available before creating mapped devices\n");
+ wait_for_device_probe();
+
+ list_for_each_entry(dev, &devices, list) {
+ if (dm_early_create(&dev->dmi, dev->table,
+ dev->target_args_array))
+ break;
+ }
+out:
+ kfree(str);
+ dm_setup_cleanup(&devices);
+ return r;
+}
+
+late_initcall(dm_init_init);
+
+module_param(create, charp, 0);
+MODULE_PARM_DESC(create, "Create a mapped device in early boot");
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 457200ca6287..95ae4bf34203 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -532,7 +532,6 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
unsigned j, size;
desc->tfm = ic->journal_mac;
- desc->flags = 0;
r = crypto_shash_init(desc);
if (unlikely(r)) {
@@ -913,7 +912,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsig
static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
{
return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
- range2->logical_sector + range2->n_sectors > range2->logical_sector;
+ range1->logical_sector + range1->n_sectors > range2->logical_sector;
}
static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
@@ -959,8 +958,6 @@ static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity
struct dm_integrity_range *last_range =
list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
struct task_struct *last_range_task;
- if (!ranges_overlap(range, last_range))
- break;
last_range_task = last_range->task;
list_del(&last_range->wait_entry);
if (!add_new_range(ic, last_range, false)) {
@@ -1122,7 +1119,7 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se
return r;
data = dm_bufio_read(ic->bufio, *metadata_block, &b);
- if (unlikely(IS_ERR(data)))
+ if (IS_ERR(data))
return PTR_ERR(data);
to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size);
@@ -1278,7 +1275,6 @@ static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector
unsigned digest_size;
req->tfm = ic->internal_hash;
- req->flags = 0;
r = crypto_shash_init(req);
if (unlikely(r < 0)) {
@@ -1368,8 +1364,8 @@ again:
checksums_ptr - checksums, !dio->write ? TAG_CMP : TAG_WRITE);
if (unlikely(r)) {
if (r > 0) {
- DMERR("Checksum failed at sector 0x%llx",
- (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size)));
+ DMERR_LIMIT("Checksum failed at sector 0x%llx",
+ (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size)));
r = -EILSEQ;
atomic64_inc(&ic->number_of_mismatches);
}
@@ -1561,8 +1557,8 @@ retry_kmap:
integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
- DMERR("Checksum failed when reading from journal, at sector 0x%llx",
- (unsigned long long)logical_sector);
+ DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
+ (unsigned long long)logical_sector);
}
}
#endif
@@ -3185,7 +3181,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
journal_watermark = val;
else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
sync_msec = val;
- else if (!memcmp(opt_string, "meta_device:", strlen("meta_device:"))) {
+ else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
if (ic->meta_dev) {
dm_put_device(ti, ic->meta_dev);
ic->meta_dev = NULL;
@@ -3204,17 +3200,17 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
ic->sectors_per_block = val >> SECTOR_SHIFT;
- } else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
+ } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
"Invalid internal_hash argument");
if (r)
goto bad;
- } else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
+ } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
"Invalid journal_crypt argument");
if (r)
goto bad;
- } else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
+ } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
"Invalid journal_mac argument");
if (r)
@@ -3616,7 +3612,7 @@ static struct target_type integrity_target = {
.io_hints = dm_integrity_io_hints,
};
-int __init dm_integrity_init(void)
+static int __init dm_integrity_init(void)
{
int r;
@@ -3635,7 +3631,7 @@ int __init dm_integrity_init(void)
return r;
}
-void dm_integrity_exit(void)
+static void __exit dm_integrity_exit(void)
{
dm_unregister_target(&integrity_target);
kmem_cache_destroy(journal_io_cache);
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index f666778ad237..c740153b4e52 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -2018,3 +2018,106 @@ out:
return r;
}
+
+
+/**
+ * dm_early_create - create a mapped device in early boot.
+ *
+ * @dmi: Contains main information of the device mapping to be created.
+ * @spec_array: array of pointers to struct dm_target_spec. Describes the
+ * mapping table of the device.
+ * @target_params_array: array of strings with the parameters to a specific
+ * target.
+ *
+ * Instead of having the struct dm_target_spec and the parameters for every
+ * target embedded at the end of struct dm_ioctl (as performed in a normal
+ * ioctl), pass them as arguments, so the caller doesn't need to serialize them.
+ * The size of the spec_array and target_params_array is given by
+ * @dmi->target_count.
+ * This function is supposed to be called in early boot, so locking mechanisms
+ * to protect against concurrent loads are not required.
+ */
+int __init dm_early_create(struct dm_ioctl *dmi,
+ struct dm_target_spec **spec_array,
+ char **target_params_array)
+{
+ int r, m = DM_ANY_MINOR;
+ struct dm_table *t, *old_map;
+ struct mapped_device *md;
+ unsigned int i;
+
+ if (!dmi->target_count)
+ return -EINVAL;
+
+ r = check_name(dmi->name);
+ if (r)
+ return r;
+
+ if (dmi->flags & DM_PERSISTENT_DEV_FLAG)
+ m = MINOR(huge_decode_dev(dmi->dev));
+
+ /* alloc dm device */
+ r = dm_create(m, &md);
+ if (r)
+ return r;
+
+ /* hash insert */
+ r = dm_hash_insert(dmi->name, *dmi->uuid ? dmi->uuid : NULL, md);
+ if (r)
+ goto err_destroy_dm;
+
+ /* alloc table */
+ r = dm_table_create(&t, get_mode(dmi), dmi->target_count, md);
+ if (r)
+ goto err_destroy_dm;
+
+ /* add targets */
+ for (i = 0; i < dmi->target_count; i++) {
+ r = dm_table_add_target(t, spec_array[i]->target_type,
+ (sector_t) spec_array[i]->sector_start,
+ (sector_t) spec_array[i]->length,
+ target_params_array[i]);
+ if (r) {
+ DMWARN("error adding target to table");
+ goto err_destroy_table;
+ }
+ }
+
+ /* finish table */
+ r = dm_table_complete(t);
+ if (r)
+ goto err_destroy_table;
+
+ md->type = dm_table_get_type(t);
+ /* setup md->queue to reflect md's type (may block) */
+ r = dm_setup_md_queue(md, t);
+ if (r) {
+ DMWARN("unable to set up device queue for new table.");
+ goto err_destroy_table;
+ }
+
+ /* Set new map */
+ dm_suspend(md, 0);
+ old_map = dm_swap_table(md, t);
+ if (IS_ERR(old_map)) {
+ r = PTR_ERR(old_map);
+ goto err_destroy_table;
+ }
+ set_disk_ro(dm_disk(md), !!(dmi->flags & DM_READONLY_FLAG));
+
+ /* resume device */
+ r = dm_resume(md);
+ if (r)
+ goto err_destroy_table;
+
+ DMINFO("%s (%s) is ready", md->disk->disk_name, dmi->name);
+ dm_put(md);
+ return 0;
+
+err_destroy_table:
+ dm_table_destroy(t);
+err_destroy_dm:
+ dm_put(md);
+ dm_destroy(md);
+ return r;
+}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index adcfe8ae10aa..9fdef6897316 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -2986,11 +2986,6 @@ static void configure_discard_support(struct raid_set *rs)
}
}
- /*
- * RAID1 and RAID10 personalities require bio splitting,
- * RAID0/4/5/6 don't and process large discard bios properly.
- */
- ti->split_discard_bios = !!(rs_is_raid1(rs) || rs_is_raid10(rs));
ti->num_discard_bios = 1;
}
@@ -3747,6 +3742,15 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
blk_limits_io_min(limits, chunk_size);
blk_limits_io_opt(limits, chunk_size * mddev_data_stripes(rs));
+
+ /*
+ * RAID1 and RAID10 personalities require bio splitting,
+ * RAID0/4/5/6 don't and process large discard bios properly.
+ */
+ if (rs_is_raid1(rs) || rs_is_raid10(rs)) {
+ limits->discard_granularity = chunk_size;
+ limits->max_discard_sectors = chunk_size;
+ }
}
static void raid_postsuspend(struct dm_target *ti)
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index b2c03c079a8d..b66745bd08bb 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -12,6 +12,22 @@
#define DM_MSG_PREFIX "core-rq"
+/*
+ * One of these is allocated per request.
+ */
+struct dm_rq_target_io {
+ struct mapped_device *md;
+ struct dm_target *ti;
+ struct request *orig, *clone;
+ struct kthread_work work;
+ blk_status_t error;
+ union map_info info;
+ struct dm_stats_aux stats_aux;
+ unsigned long duration_jiffies;
+ unsigned n_sectors;
+ unsigned completed;
+};
+
#define DM_MQ_NR_HW_QUEUES 1
#define DM_MQ_QUEUE_DEPTH 2048
static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
@@ -206,11 +222,14 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
}
if (unlikely(error == BLK_STS_TARGET)) {
- if (req_op(clone) == REQ_OP_WRITE_SAME &&
- !clone->q->limits.max_write_same_sectors)
+ if (req_op(clone) == REQ_OP_DISCARD &&
+ !clone->q->limits.max_discard_sectors)
+ disable_discard(tio->md);
+ else if (req_op(clone) == REQ_OP_WRITE_SAME &&
+ !clone->q->limits.max_write_same_sectors)
disable_write_same(tio->md);
- if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
- !clone->q->limits.max_write_zeroes_sectors)
+ else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
+ !clone->q->limits.max_write_zeroes_sectors)
disable_write_zeroes(tio->md);
}
diff --git a/drivers/md/dm-rq.h b/drivers/md/dm-rq.h
index b39245545229..1eea0da641db 100644
--- a/drivers/md/dm-rq.h
+++ b/drivers/md/dm-rq.h
@@ -17,22 +17,6 @@
struct mapped_device;
/*
- * One of these is allocated per request.
- */
-struct dm_rq_target_io {
- struct mapped_device *md;
- struct dm_target *ti;
- struct request *orig, *clone;
- struct kthread_work work;
- blk_status_t error;
- union map_info info;
- struct dm_stats_aux stats_aux;
- unsigned long duration_jiffies;
- unsigned n_sectors;
- unsigned completed;
-};
-
-/*
* For request-based dm - the bio clones we allocate are embedded in these
* structs.
*
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 36805b12661e..a168963b757d 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -2338,13 +2338,6 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
return do_origin(o->dev, bio);
}
-static long origin_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn)
-{
- DMWARN("device does not support dax.");
- return -EIO;
-}
-
/*
* Set the target "max_io_len" field to the minimum of all the snapshots'
* chunk sizes.
@@ -2404,7 +2397,6 @@ static struct target_type origin_target = {
.postsuspend = origin_postsuspend,
.status = origin_status,
.iterate_devices = origin_iterate_devices,
- .direct_access = origin_dax_direct_access,
};
static struct target_type snapshot_target = {
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index fae35caf3672..8a0f057b8122 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -61,8 +61,7 @@ static struct switch_ctx *alloc_switch_ctx(struct dm_target *ti, unsigned nr_pat
{
struct switch_ctx *sctx;
- sctx = kzalloc(sizeof(struct switch_ctx) + nr_paths * sizeof(struct switch_path),
- GFP_KERNEL);
+ sctx = kzalloc(struct_size(sctx, path_list, nr_paths), GFP_KERNEL);
if (!sctx)
return NULL;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index ba9481f1bf3c..cde3b49b2a91 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1844,6 +1844,36 @@ static bool dm_table_supports_secure_erase(struct dm_table *t)
return true;
}
+static int device_requires_stable_pages(struct dm_target *ti,
+ struct dm_dev *dev, sector_t start,
+ sector_t len, void *data)
+{
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+
+ return q && bdi_cap_stable_pages_required(q->backing_dev_info);
+}
+
+/*
+ * If any underlying device requires stable pages, a table must require
+ * them as well. Only targets that support iterate_devices are considered:
+ * don't want error, zero, etc to require stable pages.
+ */
+static bool dm_table_requires_stable_pages(struct dm_table *t)
+{
+ struct dm_target *ti;
+ unsigned i;
+
+ for (i = 0; i < dm_table_get_num_targets(t); i++) {
+ ti = dm_table_get_target(t, i);
+
+ if (ti->type->iterate_devices &&
+ ti->type->iterate_devices(ti, device_requires_stable_pages, NULL))
+ return true;
+ }
+
+ return false;
+}
+
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
@@ -1897,6 +1927,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
dm_table_verify_integrity(t);
/*
+ * Some devices don't use blk_integrity but still want stable pages
+ * because they do their own checksumming.
+ */
+ if (dm_table_requires_stable_pages(t))
+ q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
+ else
+ q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
+
+ /*
* Determine whether or not this queue's I/O timings contribute
* to the entropy pool, Only request-based targets use this.
* Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index e83b63608262..fcd887703f95 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -3283,6 +3283,13 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
as.argc = argc;
as.argv = argv;
+ /* make sure metadata and data are different devices */
+ if (!strcmp(argv[0], argv[1])) {
+ ti->error = "Error setting metadata or data device";
+ r = -EINVAL;
+ goto out_unlock;
+ }
+
/*
* Set default pool features.
*/
@@ -4167,6 +4174,12 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
tc->sort_bio_list = RB_ROOT;
if (argc == 3) {
+ if (!strcmp(argv[0], argv[2])) {
+ ti->error = "Error setting origin device";
+ r = -EINVAL;
+ goto bad_origin_dev;
+ }
+
r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
if (r) {
ti->error = "Error opening origin device";
@@ -4227,7 +4240,6 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (tc->pool->pf.discard_enabled) {
ti->discards_supported = true;
ti->num_discard_bios = 1;
- ti->split_discard_bios = false;
}
mutex_unlock(&dm_thin_pool_table.mutex);
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 0ce04e5b4afb..b634fa23f4c4 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -73,7 +73,7 @@ static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
*offset = (unsigned)(position - (block << v->data_dev_block_bits));
res = dm_bufio_read(v->fec->bufio, v->fec->start + block, buf);
- if (unlikely(IS_ERR(res))) {
+ if (IS_ERR(res)) {
DMERR("%s: FEC %llu: parity read failed (block %llu): %ld",
v->data_dev->name, (unsigned long long)rsb,
(unsigned long long)(v->fec->start + block),
@@ -163,7 +163,7 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
dm_bufio_release(buf);
par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
- if (unlikely(IS_ERR(par)))
+ if (IS_ERR(par))
return PTR_ERR(par);
}
}
@@ -253,7 +253,7 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
}
bbuf = dm_bufio_read(bufio, block, &buf);
- if (unlikely(IS_ERR(bbuf))) {
+ if (IS_ERR(bbuf)) {
DMWARN_LIMIT("%s: FEC %llu: read failed (%llu): %ld",
v->data_dev->name,
(unsigned long long)rsb,
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 2b8cee35e4d5..f7822875589e 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -1859,7 +1859,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
- wc->writeback_wq = alloc_workqueue("writecache-writeabck", WQ_MEM_RECLAIM, 1);
+ wc->writeback_wq = alloc_workqueue("writecache-writeback", WQ_MEM_RECLAIM, 1);
if (!wc->writeback_wq) {
r = -ENOMEM;
ti->error = "Could not allocate writeback workqueue";
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 6af5babe6837..8865c1709e16 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -727,7 +727,6 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->per_io_data_size = sizeof(struct dmz_bioctx);
ti->flush_supported = true;
ti->discards_supported = true;
- ti->split_discard_bios = true;
/* The exposed capacity is the number of chunks that can be mapped */
ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) << dev->zone_nr_sectors_shift;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 515e6af9bed2..043f0761e4a0 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -158,9 +158,6 @@ struct table_device {
struct dm_dev dm_dev;
};
-static struct kmem_cache *_rq_tio_cache;
-static struct kmem_cache *_rq_cache;
-
/*
* Bio-based DM's mempools' reserved IOs set by the user.
*/
@@ -222,20 +219,11 @@ static unsigned dm_get_numa_node(void)
static int __init local_init(void)
{
- int r = -ENOMEM;
-
- _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
- if (!_rq_tio_cache)
- return r;
-
- _rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request),
- __alignof__(struct request), 0, NULL);
- if (!_rq_cache)
- goto out_free_rq_tio_cache;
+ int r;
r = dm_uevent_init();
if (r)
- goto out_free_rq_cache;
+ return r;
deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
if (!deferred_remove_workqueue) {
@@ -257,10 +245,6 @@ out_free_workqueue:
destroy_workqueue(deferred_remove_workqueue);
out_uevent_exit:
dm_uevent_exit();
-out_free_rq_cache:
- kmem_cache_destroy(_rq_cache);
-out_free_rq_tio_cache:
- kmem_cache_destroy(_rq_tio_cache);
return r;
}
@@ -270,8 +254,6 @@ static void local_exit(void)
flush_scheduled_work();
destroy_workqueue(deferred_remove_workqueue);
- kmem_cache_destroy(_rq_cache);
- kmem_cache_destroy(_rq_tio_cache);
unregister_blkdev(_major, _name);
dm_uevent_exit();
@@ -963,6 +945,15 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
}
}
+void disable_discard(struct mapped_device *md)
+{
+ struct queue_limits *limits = dm_get_queue_limits(md);
+
+ /* device doesn't really support DISCARD, disable it */
+ limits->max_discard_sectors = 0;
+ blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
+}
+
void disable_write_same(struct mapped_device *md)
{
struct queue_limits *limits = dm_get_queue_limits(md);
@@ -988,11 +979,14 @@ static void clone_endio(struct bio *bio)
dm_endio_fn endio = tio->ti->type->end_io;
if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
- if (bio_op(bio) == REQ_OP_WRITE_SAME &&
- !bio->bi_disk->queue->limits.max_write_same_sectors)
+ if (bio_op(bio) == REQ_OP_DISCARD &&
+ !bio->bi_disk->queue->limits.max_discard_sectors)
+ disable_discard(md);
+ else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
+ !bio->bi_disk->queue->limits.max_write_same_sectors)
disable_write_same(md);
- if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
- !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
+ else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
+ !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
disable_write_zeroes(md);
}
@@ -1060,15 +1054,7 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
return -EINVAL;
}
- /*
- * BIO based queue uses its own splitting. When multipage bvecs
- * is switched on, size of the incoming bio may be too big to
- * be handled in some targets, such as crypt.
- *
- * When these targets are ready for the big bio, we can remove
- * the limit.
- */
- ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE);
+ ti->max_io_len = (uint32_t) len;
return 0;
}
@@ -1478,17 +1464,10 @@ static unsigned get_num_write_zeroes_bios(struct dm_target *ti)
return ti->num_write_zeroes_bios;
}
-typedef bool (*is_split_required_fn)(struct dm_target *ti);
-
-static bool is_split_required_for_discard(struct dm_target *ti)
-{
- return ti->split_discard_bios;
-}
-
static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
- unsigned num_bios, bool is_split_required)
+ unsigned num_bios)
{
- unsigned len;
+ unsigned len = ci->sector_count;
/*
* Even though the device advertised support for this type of
@@ -1499,11 +1478,6 @@ static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *
if (!num_bios)
return -EOPNOTSUPP;
- if (!is_split_required)
- len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
- else
- len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti));
-
__send_duplicate_bios(ci, ti, num_bios, &len);
ci->sector += len;
@@ -1514,23 +1488,38 @@ static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *
static int __send_discard(struct clone_info *ci, struct dm_target *ti)
{
- return __send_changing_extent_only(ci, ti, get_num_discard_bios(ti),
- is_split_required_for_discard(ti));
+ return __send_changing_extent_only(ci, ti, get_num_discard_bios(ti));
}
static int __send_secure_erase(struct clone_info *ci, struct dm_target *ti)
{
- return __send_changing_extent_only(ci, ti, get_num_secure_erase_bios(ti), false);
+ return __send_changing_extent_only(ci, ti, get_num_secure_erase_bios(ti));
}
static int __send_write_same(struct clone_info *ci, struct dm_target *ti)
{
- return __send_changing_extent_only(ci, ti, get_num_write_same_bios(ti), false);
+ return __send_changing_extent_only(ci, ti, get_num_write_same_bios(ti));
}
static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti)
{
- return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios(ti), false);
+ return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios(ti));
+}
+
+static bool is_abnormal_io(struct bio *bio)
+{
+ bool r = false;
+
+ switch (bio_op(bio)) {
+ case REQ_OP_DISCARD:
+ case REQ_OP_SECURE_ERASE:
+ case REQ_OP_WRITE_SAME:
+ case REQ_OP_WRITE_ZEROES:
+ r = true;
+ break;
+ }
+
+ return r;
}
static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
@@ -1565,7 +1554,7 @@ static int __split_and_process_non_flush(struct clone_info *ci)
if (!dm_target_is_valid(ti))
return -EIO;
- if (unlikely(__process_abnormal_io(ci, ti, &r)))
+ if (__process_abnormal_io(ci, ti, &r))
return r;
len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
@@ -1601,13 +1590,6 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
blk_qc_t ret = BLK_QC_T_NONE;
int error = 0;
- if (unlikely(!map)) {
- bio_io_error(bio);
- return ret;
- }
-
- blk_queue_split(md->queue, &bio);
-
init_clone_info(&ci, md, map, bio);
if (bio->bi_opf & REQ_PREFLUSH) {
@@ -1675,18 +1657,13 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
* Optimized variant of __split_and_process_bio that leverages the
* fact that targets that use it do _not_ have a need to split bios.
*/
-static blk_qc_t __process_bio(struct mapped_device *md,
- struct dm_table *map, struct bio *bio)
+static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map,
+ struct bio *bio, struct dm_target *ti)
{
struct clone_info ci;
blk_qc_t ret = BLK_QC_T_NONE;
int error = 0;
- if (unlikely(!map)) {
- bio_io_error(bio);
- return ret;
- }
-
init_clone_info(&ci, md, map, bio);
if (bio->bi_opf & REQ_PREFLUSH) {
@@ -1704,21 +1681,11 @@ static blk_qc_t __process_bio(struct mapped_device *md,
error = __send_empty_flush(&ci);
/* dec_pending submits any data associated with flush */
} else {
- struct dm_target *ti = md->immutable_target;
struct dm_target_io *tio;
- /*
- * Defend against IO still getting in during teardown
- * - as was seen for a time with nvme-fcloop
- */
- if (WARN_ON_ONCE(!ti || !dm_target_is_valid(ti))) {
- error = -EIO;
- goto out;
- }
-
ci.bio = bio;
ci.sector_count = bio_sectors(bio);
- if (unlikely(__process_abnormal_io(&ci, ti, &error)))
+ if (__process_abnormal_io(&ci, ti, &error))
goto out;
tio = alloc_tio(&ci, ti, 0, GFP_NOIO);
@@ -1730,11 +1697,55 @@ out:
return ret;
}
+static void dm_queue_split(struct mapped_device *md, struct dm_target *ti, struct bio **bio)
+{
+ unsigned len, sector_count;
+
+ sector_count = bio_sectors(*bio);
+ len = min_t(sector_t, max_io_len((*bio)->bi_iter.bi_sector, ti), sector_count);
+
+ if (sector_count > len) {
+ struct bio *split = bio_split(*bio, len, GFP_NOIO, &md->queue->bio_split);
+
+ bio_chain(split, *bio);
+ trace_block_split(md->queue, split, (*bio)->bi_iter.bi_sector);
+ generic_make_request(*bio);
+ *bio = split;
+ }
+}
+
static blk_qc_t dm_process_bio(struct mapped_device *md,
struct dm_table *map, struct bio *bio)
{
+ blk_qc_t ret = BLK_QC_T_NONE;
+ struct dm_target *ti = md->immutable_target;
+
+ if (unlikely(!map)) {
+ bio_io_error(bio);
+ return ret;
+ }
+
+ if (!ti) {
+ ti = dm_table_find_target(map, bio->bi_iter.bi_sector);
+ if (unlikely(!ti || !dm_target_is_valid(ti))) {
+ bio_io_error(bio);
+ return ret;
+ }
+ }
+
+ /*
+ * If in ->make_request_fn we need to use blk_queue_split(), otherwise
+ * queue_limits for abnormal requests (e.g. discard, writesame, etc)
+ * won't be imposed.
+ */
+ if (current->bio_list) {
+ blk_queue_split(md->queue, &bio);
+ if (!is_abnormal_io(bio))
+ dm_queue_split(md, ti, &bio);
+ }
+
if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
- return __process_bio(md, map, bio);
+ return __process_bio(md, map, bio, ti);
else
return __split_and_process_bio(md, map, bio);
}
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 492a3f8ac119..749ec268d957 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -35,7 +35,10 @@
#define MAX_HOLDERS 4
#define MAX_STACK 10
-typedef unsigned long stack_entries[MAX_STACK];
+struct stack_store {
+ unsigned int nr_entries;
+ unsigned long entries[MAX_STACK];
+};
struct block_lock {
spinlock_t lock;
@@ -44,8 +47,7 @@ struct block_lock {
struct task_struct *holders[MAX_HOLDERS];
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
- struct stack_trace traces[MAX_HOLDERS];
- stack_entries entries[MAX_HOLDERS];
+ struct stack_store traces[MAX_HOLDERS];
#endif
};
@@ -73,7 +75,7 @@ static void __add_holder(struct block_lock *lock, struct task_struct *task)
{
unsigned h = __find_holder(lock, NULL);
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
- struct stack_trace *t;
+ struct stack_store *t;
#endif
get_task_struct(task);
@@ -81,11 +83,7 @@ static void __add_holder(struct block_lock *lock, struct task_struct *task)
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
t = lock->traces + h;
- t->nr_entries = 0;
- t->max_entries = MAX_STACK;
- t->entries = lock->entries[h];
- t->skip = 2;
- save_stack_trace(t);
+ t->nr_entries = stack_trace_save(t->entries, MAX_STACK, 2);
#endif
}
@@ -106,7 +104,8 @@ static int __check_holder(struct block_lock *lock)
DMERR("recursive lock detected in metadata");
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
DMERR("previously held here:");
- print_stack_trace(lock->traces + i, 4);
+ stack_trace_print(lock->traces[i].entries,
+ lock->traces[i].nr_entries, 4);
DMERR("subsequent acquisition attempted here:");
dump_stack();
@@ -462,7 +461,7 @@ int dm_bm_read_lock(struct dm_block_manager *bm, dm_block_t b,
int r;
p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
- if (unlikely(IS_ERR(p)))
+ if (IS_ERR(p))
return PTR_ERR(p);
aux = dm_bufio_get_aux_data(to_buffer(*result));
@@ -498,7 +497,7 @@ int dm_bm_write_lock(struct dm_block_manager *bm,
return -EPERM;
p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
- if (unlikely(IS_ERR(p)))
+ if (IS_ERR(p))
return PTR_ERR(p);
aux = dm_bufio_get_aux_data(to_buffer(*result));
@@ -531,7 +530,7 @@ int dm_bm_read_try_lock(struct dm_block_manager *bm,
int r;
p = dm_bufio_get(bm->bufio, b, (struct dm_buffer **) result);
- if (unlikely(IS_ERR(p)))
+ if (IS_ERR(p))
return PTR_ERR(p);
if (unlikely(!p))
return -EWOULDBLOCK;
@@ -567,7 +566,7 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm,
return -EPERM;
p = dm_bufio_new(bm->bufio, b, (struct dm_buffer **) result);
- if (unlikely(IS_ERR(p)))
+ if (IS_ERR(p))
return PTR_ERR(p);
memset(p, 0, dm_bm_block_size(bm));
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index abb5d382f64d..3b6880dd648d 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -3939,6 +3939,8 @@ static int raid10_run(struct mddev *mddev)
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
mddev->sync_thread = md_register_thread(md_do_sync, mddev,
"reshape");
+ if (!mddev->sync_thread)
+ goto out_free_conf;
}
return 0;
@@ -4670,7 +4672,6 @@ read_more:
atomic_inc(&r10_bio->remaining);
read_bio->bi_next = NULL;
generic_make_request(read_bio);
- sector_nr += nr_sectors;
sectors_done += nr_sectors;
if (sector_nr <= last)
goto read_more;
diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h
index bfb811407061..43c714a8798c 100644
--- a/drivers/md/raid5-log.h
+++ b/drivers/md/raid5-log.h
@@ -45,6 +45,7 @@ extern void ppl_stripe_write_finished(struct stripe_head *sh);
extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add);
extern void ppl_quiesce(struct r5conf *conf, int quiesce);
extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio);
+extern struct md_sysfs_entry ppl_write_hint;
static inline bool raid5_has_log(struct r5conf *conf)
{
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 3a7c36326589..17e9e7d51097 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -16,11 +16,11 @@
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/crc32c.h>
-#include <linux/flex_array.h>
#include <linux/async_tx.h>
#include <linux/raid/md_p.h>
#include "md.h"
#include "raid5.h"
+#include "raid5-log.h"
/*
* PPL consists of a 4KB header (struct ppl_header) and at least 128KB for
@@ -116,6 +116,8 @@ struct ppl_conf {
/* stripes to retry if failed to allocate io_unit */
struct list_head no_mem_stripes;
spinlock_t no_mem_stripes_lock;
+
+ unsigned short write_hint;
};
struct ppl_log {
@@ -165,7 +167,7 @@ ops_run_partial_parity(struct stripe_head *sh, struct raid5_percpu *percpu,
struct dma_async_tx_descriptor *tx)
{
int disks = sh->disks;
- struct page **srcs = flex_array_get(percpu->scribble, 0);
+ struct page **srcs = percpu->scribble;
int count = 0, pd_idx = sh->pd_idx, i;
struct async_submit_ctl submit;
@@ -196,8 +198,7 @@ ops_run_partial_parity(struct stripe_head *sh, struct raid5_percpu *percpu,
}
init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, tx,
- NULL, sh, flex_array_get(percpu->scribble, 0)
- + sizeof(struct page *) * (sh->disks + 2));
+ NULL, sh, (void *) (srcs + sh->disks + 2));
if (count == 1)
tx = async_memcpy(sh->ppl_page, srcs[0], 0, 0, PAGE_SIZE,
@@ -476,6 +477,7 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
bio_set_dev(bio, log->rdev->bdev);
bio->bi_iter.bi_sector = log->next_io_sector;
bio_add_page(bio, io->header_page, PAGE_SIZE, 0);
+ bio->bi_write_hint = ppl_conf->write_hint;
pr_debug("%s: log->current_io_sector: %llu\n", __func__,
(unsigned long long)log->next_io_sector);
@@ -505,6 +507,7 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES,
&ppl_conf->bs);
bio->bi_opf = prev->bi_opf;
+ bio->bi_write_hint = prev->bi_write_hint;
bio_copy_dev(bio, prev);
bio->bi_iter.bi_sector = bio_end_sector(prev);
bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0);
@@ -1409,6 +1412,7 @@ int ppl_init_log(struct r5conf *conf)
atomic64_set(&ppl_conf->seq, 0);
INIT_LIST_HEAD(&ppl_conf->no_mem_stripes);
spin_lock_init(&ppl_conf->no_mem_stripes_lock);
+ ppl_conf->write_hint = RWF_WRITE_LIFE_NOT_SET;
if (!mddev->external) {
ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid));
@@ -1503,3 +1507,60 @@ int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add)
return ret;
}
+
+static ssize_t
+ppl_write_hint_show(struct mddev *mddev, char *buf)
+{
+ size_t ret = 0;
+ struct r5conf *conf;
+ struct ppl_conf *ppl_conf = NULL;
+
+ spin_lock(&mddev->lock);
+ conf = mddev->private;
+ if (conf && raid5_has_ppl(conf))
+ ppl_conf = conf->log_private;
+ ret = sprintf(buf, "%d\n", ppl_conf ? ppl_conf->write_hint : 0);
+ spin_unlock(&mddev->lock);
+
+ return ret;
+}
+
+static ssize_t
+ppl_write_hint_store(struct mddev *mddev, const char *page, size_t len)
+{
+ struct r5conf *conf;
+ struct ppl_conf *ppl_conf;
+ int err = 0;
+ unsigned short new;
+
+ if (len >= PAGE_SIZE)
+ return -EINVAL;
+ if (kstrtou16(page, 10, &new))
+ return -EINVAL;
+
+ err = mddev_lock(mddev);
+ if (err)
+ return err;
+
+ conf = mddev->private;
+ if (!conf) {
+ err = -ENODEV;
+ } else if (raid5_has_ppl(conf)) {
+ ppl_conf = conf->log_private;
+ if (!ppl_conf)
+ err = -EINVAL;
+ else
+ ppl_conf->write_hint = new;
+ } else {
+ err = -EINVAL;
+ }
+
+ mddev_unlock(mddev);
+
+ return err ?: len;
+}
+
+struct md_sysfs_entry
+ppl_write_hint = __ATTR(ppl_write_hint, S_IRUGO | S_IWUSR,
+ ppl_write_hint_show,
+ ppl_write_hint_store);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index cecea901ab8c..c033bfcb209e 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -54,7 +54,6 @@
#include <linux/slab.h>
#include <linux/ratelimit.h>
#include <linux/nodemask.h>
-#include <linux/flex_array.h>
#include <trace/events/block.h>
#include <linux/list_sort.h>
@@ -1394,22 +1393,16 @@ static void ops_complete_compute(void *stripe_head_ref)
}
/* return a pointer to the address conversion region of the scribble buffer */
-static addr_conv_t *to_addr_conv(struct stripe_head *sh,
- struct raid5_percpu *percpu, int i)
+static struct page **to_addr_page(struct raid5_percpu *percpu, int i)
{
- void *addr;
-
- addr = flex_array_get(percpu->scribble, i);
- return addr + sizeof(struct page *) * (sh->disks + 2);
+ return percpu->scribble + i * percpu->scribble_obj_size;
}
/* return a pointer to the address conversion region of the scribble buffer */
-static struct page **to_addr_page(struct raid5_percpu *percpu, int i)
+static addr_conv_t *to_addr_conv(struct stripe_head *sh,
+ struct raid5_percpu *percpu, int i)
{
- void *addr;
-
- addr = flex_array_get(percpu->scribble, i);
- return addr;
+ return (void *) (to_addr_page(percpu, i) + sh->disks + 2);
}
static struct dma_async_tx_descriptor *
@@ -2238,21 +2231,23 @@ static int grow_stripes(struct r5conf *conf, int num)
* calculate over all devices (not just the data blocks), using zeros in place
* of the P and Q blocks.
*/
-static struct flex_array *scribble_alloc(int num, int cnt, gfp_t flags)
+static int scribble_alloc(struct raid5_percpu *percpu,
+ int num, int cnt, gfp_t flags)
{
- struct flex_array *ret;
- size_t len;
+ size_t obj_size =
+ sizeof(struct page *) * (num+2) +
+ sizeof(addr_conv_t) * (num+2);
+ void *scribble;
- len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2);
- ret = flex_array_alloc(len, cnt, flags);
- if (!ret)
- return NULL;
- /* always prealloc all elements, so no locking is required */
- if (flex_array_prealloc(ret, 0, cnt, flags)) {
- flex_array_free(ret);
- return NULL;
- }
- return ret;
+ scribble = kvmalloc_array(cnt, obj_size, flags);
+ if (!scribble)
+ return -ENOMEM;
+
+ kvfree(percpu->scribble);
+
+ percpu->scribble = scribble;
+ percpu->scribble_obj_size = obj_size;
+ return 0;
}
static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
@@ -2270,23 +2265,18 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
return 0;
mddev_suspend(conf->mddev);
get_online_cpus();
+
for_each_present_cpu(cpu) {
struct raid5_percpu *percpu;
- struct flex_array *scribble;
percpu = per_cpu_ptr(conf->percpu, cpu);
- scribble = scribble_alloc(new_disks,
- new_sectors / STRIPE_SECTORS,
- GFP_NOIO);
-
- if (scribble) {
- flex_array_free(percpu->scribble);
- percpu->scribble = scribble;
- } else {
- err = -ENOMEM;
+ err = scribble_alloc(percpu, new_disks,
+ new_sectors / STRIPE_SECTORS,
+ GFP_NOIO);
+ if (err)
break;
- }
}
+
put_online_cpus();
mddev_resume(conf->mddev);
if (!err) {
@@ -6660,6 +6650,7 @@ static struct attribute *raid5_attrs[] = {
&raid5_skip_copy.attr,
&raid5_rmw_level.attr,
&r5c_journal_mode.attr,
+ &ppl_write_hint.attr,
NULL,
};
static struct attribute_group raid5_attrs_group = {
@@ -6742,25 +6733,26 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
{
safe_put_page(percpu->spare_page);
- if (percpu->scribble)
- flex_array_free(percpu->scribble);
percpu->spare_page = NULL;
+ kvfree(percpu->scribble);
percpu->scribble = NULL;
}
static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
{
- if (conf->level == 6 && !percpu->spare_page)
+ if (conf->level == 6 && !percpu->spare_page) {
percpu->spare_page = alloc_page(GFP_KERNEL);
- if (!percpu->scribble)
- percpu->scribble = scribble_alloc(max(conf->raid_disks,
- conf->previous_raid_disks),
- max(conf->chunk_sectors,
- conf->prev_chunk_sectors)
- / STRIPE_SECTORS,
- GFP_KERNEL);
-
- if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) {
+ if (!percpu->spare_page)
+ return -ENOMEM;
+ }
+
+ if (scribble_alloc(percpu,
+ max(conf->raid_disks,
+ conf->previous_raid_disks),
+ max(conf->chunk_sectors,
+ conf->prev_chunk_sectors)
+ / STRIPE_SECTORS,
+ GFP_KERNEL)) {
free_scratch_buffer(conf, percpu);
return -ENOMEM;
}
@@ -7402,6 +7394,8 @@ static int raid5_run(struct mddev *mddev)
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
mddev->sync_thread = md_register_thread(md_do_sync, mddev,
"reshape");
+ if (!mddev->sync_thread)
+ goto abort;
}
/* Ok, everything is just fine now */
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 8474c224127b..cf991f13403e 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -638,10 +638,11 @@ struct r5conf {
/* per cpu variables */
struct raid5_percpu {
struct page *spare_page; /* Used when checking P/Q in raid6 */
- struct flex_array *scribble; /* space for constructing buffer
- * lists and performing address
- * conversions
- */
+ void *scribble; /* space for constructing buffer
+ * lists and performing address
+ * conversions
+ */
+ int scribble_obj_size;
} __percpu *percpu;
int scribble_disks;
int scribble_sectors;
diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c
index 391b6fd483e1..156a0d76ab2a 100644
--- a/drivers/media/cec/cec-api.c
+++ b/drivers/media/cec/cec-api.c
@@ -38,6 +38,7 @@ static __poll_t cec_poll(struct file *filp,
struct cec_adapter *adap = fh->adap;
__poll_t res = 0;
+ poll_wait(filp, &fh->wait, poll);
if (!cec_is_registered(adap))
return EPOLLERR | EPOLLHUP;
mutex_lock(&adap->lock);
@@ -48,7 +49,6 @@ static __poll_t cec_poll(struct file *filp,
res |= EPOLLIN | EPOLLRDNORM;
if (fh->total_queued_events)
res |= EPOLLPRI;
- poll_wait(filp, &fh->wait, poll);
mutex_unlock(&adap->lock);
return res;
}
diff --git a/drivers/media/common/saa7146/saa7146_fops.c b/drivers/media/common/saa7146/saa7146_fops.c
index c790ae264464..be4355a4c126 100644
--- a/drivers/media/common/saa7146/saa7146_fops.c
+++ b/drivers/media/common/saa7146/saa7146_fops.c
@@ -105,7 +105,7 @@ void saa7146_buffer_finish(struct saa7146_dev *dev,
}
q->curr->vb.state = state;
- v4l2_get_timestamp(&q->curr->vb.ts);
+ q->curr->vb.ts = ktime_get_ns();
wake_up(&q->curr->vb.done);
q->curr = NULL;
diff --git a/drivers/media/common/saa7146/saa7146_i2c.c b/drivers/media/common/saa7146/saa7146_i2c.c
index 3feddc52c446..df9ebe2a168c 100644
--- a/drivers/media/common/saa7146/saa7146_i2c.c
+++ b/drivers/media/common/saa7146/saa7146_i2c.c
@@ -54,10 +54,7 @@ static int saa7146_i2c_msg_prepare(const struct i2c_msg *m, int num, __le32 *op)
/* loop through all messages */
for(i = 0; i < num; i++) {
- /* insert the address of the i2c-slave.
- note: we get 7 bit i2c-addresses,
- so we have to perform a translation */
- addr = (m[i].addr*2) + ( (0 != (m[i].flags & I2C_M_RD)) ? 1 : 0);
+ addr = i2c_8bit_addr_from_msg(&m[i]);
h1 = op_count/3; h2 = op_count%3;
op[h1] |= cpu_to_le32( (u8)addr << ((3-h2)*8));
op[h1] |= cpu_to_le32(SAA7146_I2C_START << ((3-h2)*2));
diff --git a/drivers/media/common/saa7146/saa7146_video.c b/drivers/media/common/saa7146/saa7146_video.c
index f90aa8109663..a0f0b5eef0bd 100644
--- a/drivers/media/common/saa7146/saa7146_video.c
+++ b/drivers/media/common/saa7146/saa7146_video.c
@@ -796,7 +796,7 @@ static int vidioc_s_fmt_vid_overlay(struct file *file, void *__fh, struct v4l2_f
return -EFAULT;
}
- /* vv->ov.fh is used to indicate that we have valid overlay informations, too */
+ /* vv->ov.fh is used to indicate that we have valid overlay information, too */
vv->ov.fh = fh;
/* check if our current overlay is active */
diff --git a/drivers/media/common/siano/sms-cards.c b/drivers/media/common/siano/sms-cards.c
index af6b2268db61..e238c9bc17d3 100644
--- a/drivers/media/common/siano/sms-cards.c
+++ b/drivers/media/common/siano/sms-cards.c
@@ -311,7 +311,7 @@ int sms_board_led_feedback(struct smscore_device_t *coredev, int led)
int board_id = smscore_get_board_id(coredev);
struct sms_board *board = sms_get_board(board_id);
- /* dont touch GPIO if LEDs are already set */
+ /* don't touch GPIO if LEDs are already set */
if (smscore_led_state(coredev, -1) == led)
return 0;
diff --git a/drivers/media/common/siano/smscoreapi.h b/drivers/media/common/siano/smscoreapi.h
index eb58853008c9..476fa7a8b152 100644
--- a/drivers/media/common/siano/smscoreapi.h
+++ b/drivers/media/common/siano/smscoreapi.h
@@ -750,7 +750,7 @@ struct sms_stats {
u32 num_of_corrected_mpe_tlbs;/* Number of MPE tables which were
corrected by MPE RS decoding */
/* Common params */
- u32 ber_error_count; /* Number of errornous SYNC bits. */
+ u32 ber_error_count; /* Number of erroneous SYNC bits. */
u32 ber_bit_count; /* Total number of SYNC bits. */
/* Interface information */
diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
index d9a590ae7545..07e0629af8ed 100644
--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
@@ -246,6 +246,10 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
case V4L2_PIX_FMT_YUV555:
case V4L2_PIX_FMT_YUV565:
case V4L2_PIX_FMT_YUV32:
+ case V4L2_PIX_FMT_AYUV32:
+ case V4L2_PIX_FMT_XYUV32:
+ case V4L2_PIX_FMT_VUYA32:
+ case V4L2_PIX_FMT_VUYX32:
tpg->color_enc = TGP_COLOR_ENC_YCBCR;
break;
case V4L2_PIX_FMT_YUV420M:
@@ -372,6 +376,10 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
case V4L2_PIX_FMT_ARGB32:
case V4L2_PIX_FMT_ABGR32:
case V4L2_PIX_FMT_YUV32:
+ case V4L2_PIX_FMT_AYUV32:
+ case V4L2_PIX_FMT_XYUV32:
+ case V4L2_PIX_FMT_VUYA32:
+ case V4L2_PIX_FMT_VUYX32:
case V4L2_PIX_FMT_HSV32:
tpg->twopixelsize[0] = 2 * 4;
break;
@@ -1267,10 +1275,12 @@ static void gen_twopix(struct tpg_data *tpg,
case V4L2_PIX_FMT_RGB32:
case V4L2_PIX_FMT_XRGB32:
case V4L2_PIX_FMT_HSV32:
+ case V4L2_PIX_FMT_XYUV32:
alpha = 0;
/* fall through */
case V4L2_PIX_FMT_YUV32:
case V4L2_PIX_FMT_ARGB32:
+ case V4L2_PIX_FMT_AYUV32:
buf[0][offset] = alpha;
buf[0][offset + 1] = r_y_h;
buf[0][offset + 2] = g_u_s;
@@ -1278,9 +1288,11 @@ static void gen_twopix(struct tpg_data *tpg,
break;
case V4L2_PIX_FMT_BGR32:
case V4L2_PIX_FMT_XBGR32:
+ case V4L2_PIX_FMT_VUYX32:
alpha = 0;
/* fall through */
case V4L2_PIX_FMT_ABGR32:
+ case V4L2_PIX_FMT_VUYA32:
buf[0][offset] = b_v;
buf[0][offset + 1] = g_u_s;
buf[0][offset + 2] = r_y_h;
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index 70e8c3366f9c..15b6b9c0a2e4 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -499,9 +499,9 @@ static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
pr_info(" buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n",
vb->cnt_buf_init, vb->cnt_buf_cleanup,
vb->cnt_buf_prepare, vb->cnt_buf_finish);
- pr_info(" buf_queue: %u buf_done: %u buf_request_complete: %u\n",
- vb->cnt_buf_queue, vb->cnt_buf_done,
- vb->cnt_buf_request_complete);
+ pr_info(" buf_out_validate: %u buf_queue: %u buf_done: %u buf_request_complete: %u\n",
+ vb->cnt_buf_out_validate, vb->cnt_buf_queue,
+ vb->cnt_buf_done, vb->cnt_buf_request_complete);
pr_info(" alloc: %u put: %u prepare: %u finish: %u mmap: %u\n",
vb->cnt_mem_alloc, vb->cnt_mem_put,
vb->cnt_mem_prepare, vb->cnt_mem_finish,
@@ -934,7 +934,7 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
/* sync buffers */
for (plane = 0; plane < vb->num_planes; ++plane)
call_void_memop(vb, finish, vb->planes[plane].mem_priv);
- vb->synced = false;
+ vb->synced = 0;
}
spin_lock_irqsave(&q->done_lock, flags);
@@ -1041,6 +1041,7 @@ static int __prepare_userptr(struct vb2_buffer *vb)
if (vb->planes[plane].mem_priv) {
if (!reacquired) {
reacquired = true;
+ vb->copied_timestamp = 0;
call_void_vb_qop(vb, buf_cleanup, vb);
}
call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
@@ -1165,6 +1166,7 @@ static int __prepare_dmabuf(struct vb2_buffer *vb)
if (!reacquired) {
reacquired = true;
+ vb->copied_timestamp = 0;
call_void_vb_qop(vb, buf_cleanup, vb);
}
@@ -1196,6 +1198,9 @@ static int __prepare_dmabuf(struct vb2_buffer *vb)
* userspace knows sooner rather than later if the dma-buf map fails.
*/
for (plane = 0; plane < vb->num_planes; ++plane) {
+ if (vb->planes[plane].dbuf_mapped)
+ continue;
+
ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv);
if (ret) {
dprintk(1, "failed to map dmabuf for plane %d\n",
@@ -1274,6 +1279,14 @@ static int __buf_prepare(struct vb2_buffer *vb)
return 0;
WARN_ON(vb->synced);
+ if (q->is_output) {
+ ret = call_vb_qop(vb, buf_out_validate, vb);
+ if (ret) {
+ dprintk(1, "buffer validation failed\n");
+ return ret;
+ }
+ }
+
vb->state = VB2_BUF_STATE_PREPARING;
switch (q->memory) {
@@ -1302,8 +1315,8 @@ static int __buf_prepare(struct vb2_buffer *vb)
for (plane = 0; plane < vb->num_planes; ++plane)
call_void_memop(vb, prepare, vb->planes[plane].mem_priv);
- vb->synced = true;
- vb->prepared = true;
+ vb->synced = 1;
+ vb->prepared = 1;
vb->state = orig_state;
return 0;
@@ -1520,6 +1533,14 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
return -EINVAL;
}
+ if (q->is_output && !vb->prepared) {
+ ret = call_vb_qop(vb, buf_out_validate, vb);
+ if (ret) {
+ dprintk(1, "buffer validation failed\n");
+ return ret;
+ }
+ }
+
media_request_object_init(&vb->req_obj);
/* Make sure the request is in a safe state for updating. */
@@ -1750,7 +1771,6 @@ EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
static void __vb2_dqbuf(struct vb2_buffer *vb)
{
struct vb2_queue *q = vb->vb2_queue;
- unsigned int i;
/* nothing to do if the buffer is already dequeued */
if (vb->state == VB2_BUF_STATE_DEQUEUED)
@@ -1758,14 +1778,6 @@ static void __vb2_dqbuf(struct vb2_buffer *vb)
vb->state = VB2_BUF_STATE_DEQUEUED;
- /* unmap DMABUF buffer */
- if (q->memory == VB2_MEMORY_DMABUF)
- for (i = 0; i < vb->num_planes; ++i) {
- if (!vb->planes[i].dbuf_mapped)
- continue;
- call_void_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv);
- vb->planes[i].dbuf_mapped = 0;
- }
call_void_bufop(q, init_buffer, vb);
}
@@ -1792,7 +1804,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
}
call_void_vb_qop(vb, buf_finish, vb);
- vb->prepared = false;
+ vb->prepared = 0;
if (pindex)
*pindex = vb->index;
@@ -1916,12 +1928,12 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
for (plane = 0; plane < vb->num_planes; ++plane)
call_void_memop(vb, finish,
vb->planes[plane].mem_priv);
- vb->synced = false;
+ vb->synced = 0;
}
if (vb->prepared) {
call_void_vb_qop(vb, buf_finish, vb);
- vb->prepared = false;
+ vb->prepared = 0;
}
__vb2_dqbuf(vb);
@@ -1932,6 +1944,7 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
if (vb->request)
media_request_put(vb->request);
vb->request = NULL;
+ vb->copied_timestamp = 0;
}
}
@@ -2278,6 +2291,8 @@ __poll_t vb2_core_poll(struct vb2_queue *q, struct file *file,
if (q->is_output && !(req_events & (EPOLLOUT | EPOLLWRNORM)))
return 0;
+ poll_wait(file, &q->done_wq, wait);
+
/*
* Start file I/O emulator only if streaming API has not been used yet.
*/
@@ -2329,8 +2344,6 @@ __poll_t vb2_core_poll(struct vb2_queue *q, struct file *file,
*/
if (q->last_buffer_dequeued)
return EPOLLIN | EPOLLRDNORM;
-
- poll_wait(file, &q->done_wq, wait);
}
/*
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
index aff0ab7bf83d..82389aead6ed 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
@@ -439,42 +439,14 @@ static void vb2_dc_put_userptr(void *buf_priv)
set_page_dirty_lock(pages[i]);
sg_free_table(sgt);
kfree(sgt);
+ } else {
+ dma_unmap_resource(buf->dev, buf->dma_addr, buf->size,
+ buf->dma_dir, 0);
}
vb2_destroy_framevec(buf->vec);
kfree(buf);
}
-/*
- * For some kind of reserved memory there might be no struct page available,
- * so all that can be done to support such 'pages' is to try to convert
- * pfn to dma address or at the last resort just assume that
- * dma address == physical address (like it has been assumed in earlier version
- * of videobuf2-dma-contig
- */
-
-#ifdef __arch_pfn_to_dma
-static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
-{
- return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
-}
-#elif defined(__pfn_to_bus)
-static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
-{
- return (dma_addr_t)__pfn_to_bus(pfn);
-}
-#elif defined(__pfn_to_phys)
-static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
-{
- return (dma_addr_t)__pfn_to_phys(pfn);
-}
-#else
-static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
-{
- /* really, we cannot do anything better at this point */
- return (dma_addr_t)(pfn) << PAGE_SHIFT;
-}
-#endif
-
static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
unsigned long size, enum dma_data_direction dma_dir)
{
@@ -528,7 +500,12 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
for (i = 1; i < n_pages; i++)
if (nums[i-1] + 1 != nums[i])
goto fail_pfnvec;
- buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, nums[0]);
+ buf->dma_addr = dma_map_resource(buf->dev,
+ __pfn_to_phys(nums[0]), size, buf->dma_dir, 0);
+ if (dma_mapping_error(buf->dev, buf->dma_addr)) {
+ ret = -ENOMEM;
+ goto fail_pfnvec;
+ }
goto out;
}
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
index 015e737095cd..270c3162fdcb 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2010 Samsung Electronics
*
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -67,7 +67,7 @@ static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
int i;
order = get_order(size);
- /* Dont over allocate*/
+ /* Don't over allocate*/
if ((PAGE_SIZE << order) > size)
order--;
diff --git a/drivers/media/common/videobuf2/videobuf2-memops.c b/drivers/media/common/videobuf2/videobuf2-memops.c
index 89e51989332b..c4a85be48ac2 100644
--- a/drivers/media/common/videobuf2/videobuf2-memops.c
+++ b/drivers/media/common/videobuf2/videobuf2-memops.c
@@ -121,7 +121,7 @@ static void vb2_common_vm_close(struct vm_area_struct *vma)
}
/*
- * vb2_common_vm_ops - common vm_ops used for tracking refcount of mmaped
+ * vb2_common_vm_ops - common vm_ops used for tracking refcount of mmapped
* video buffers
*/
const struct vm_operations_struct vb2_common_vm_ops = {
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index 3a0ca2f9854f..d09dee20e421 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -143,7 +143,7 @@ static void __copy_timestamp(struct vb2_buffer *vb, const void *pb)
* and the timecode field and flag if needed.
*/
if (q->copy_timestamp)
- vb->timestamp = timeval_to_ns(&b->timestamp);
+ vb->timestamp = v4l2_timeval_to_ns(&b->timestamp);
vbuf->flags |= b->flags & V4L2_BUF_FLAG_TIMECODE;
if (b->flags & V4L2_BUF_FLAG_TIMECODE)
vbuf->timecode = b->timecode;
@@ -409,6 +409,15 @@ static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *md
*/
if (WARN_ON(!q->ops->buf_request_complete))
return -EINVAL;
+ /*
+ * Make sure this op is implemented by the driver for the output queue.
+ * It's easy to forget this callback, but is it important to correctly
+ * validate the 'field' value at QBUF time.
+ */
+ if (WARN_ON((q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+ q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
+ !q->ops->buf_out_validate))
+ return -EINVAL;
if (vb->state != VB2_BUF_STATE_DEQUEUED) {
dprintk(1, "%s: buffer is not in dequeued state\n", opname);
@@ -567,7 +576,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb, struct vb2_plane *planes)
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
unsigned int plane;
- if (!vb->vb2_queue->is_output || !vb->vb2_queue->copy_timestamp)
+ if (!vb->vb2_queue->copy_timestamp)
vb->timestamp = 0;
for (plane = 0; plane < vb->num_planes; ++plane) {
@@ -589,6 +598,19 @@ static const struct vb2_buf_ops v4l2_buf_ops = {
.copy_timestamp = __copy_timestamp,
};
+int vb2_find_timestamp(const struct vb2_queue *q, u64 timestamp,
+ unsigned int start_idx)
+{
+ unsigned int i;
+
+ for (i = start_idx; i < q->num_buffers; i++)
+ if (q->bufs[i]->copied_timestamp &&
+ q->bufs[i]->timestamp == timestamp)
+ return i;
+ return -1;
+}
+EXPORT_SYMBOL_GPL(vb2_find_timestamp);
+
/*
* vb2_querybuf() - query video buffer information
* @q: videobuf queue
@@ -846,16 +868,14 @@ EXPORT_SYMBOL_GPL(vb2_queue_release);
__poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
{
struct video_device *vfd = video_devdata(file);
- __poll_t req_events = poll_requested_events(wait);
__poll_t res = 0;
if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
struct v4l2_fh *fh = file->private_data;
+ poll_wait(file, &fh->wait, wait);
if (v4l2_event_pending(fh))
res = EPOLLPRI;
- else if (req_events & EPOLLPRI)
- poll_wait(file, &fh->wait, wait);
}
return res | vb2_core_poll(q, file, wait);
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index 1544e8cef564..f14a872d1268 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -1195,13 +1195,13 @@ static __poll_t dvb_demux_poll(struct file *file, poll_table *wait)
struct dmxdev_filter *dmxdevfilter = file->private_data;
__poll_t mask = 0;
+ poll_wait(file, &dmxdevfilter->buffer.queue, wait);
+
if ((!dmxdevfilter) || dmxdevfilter->dev->exit)
return EPOLLERR;
if (dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx))
return dvb_vb2_poll(&dmxdevfilter->vb2_ctx, file, wait);
- poll_wait(file, &dmxdevfilter->buffer.queue, wait);
-
if (dmxdevfilter->state != DMXDEV_STATE_GO &&
dmxdevfilter->state != DMXDEV_STATE_DONE &&
dmxdevfilter->state != DMXDEV_STATE_TIMEDOUT)
@@ -1346,13 +1346,13 @@ static __poll_t dvb_dvr_poll(struct file *file, poll_table *wait)
dprintk("%s\n", __func__);
+ poll_wait(file, &dmxdev->dvr_buffer.queue, wait);
+
if (dmxdev->exit)
return EPOLLERR;
if (dvb_vb2_is_streaming(&dmxdev->dvr_vb2_ctx))
return dvb_vb2_poll(&dmxdev->dvr_vb2_ctx, file, wait);
- poll_wait(file, &dmxdev->dvr_buffer.queue, wait);
-
if (((file->f_flags & O_ACCMODE) == O_RDONLY) ||
dmxdev->may_do_mmap) {
if (dmxdev->dvr_buffer.error)
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
index 4d371cea0d5d..ebf1e3b03819 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb-core/dvb_ca_en50221.c
@@ -1797,6 +1797,8 @@ static __poll_t dvb_ca_en50221_io_poll(struct file *file, poll_table *wait)
dprintk("%s\n", __func__);
+ poll_wait(file, &ca->wait_queue, wait);
+
if (dvb_ca_en50221_io_read_condition(ca, &result, &slot) == 1)
mask |= EPOLLIN;
@@ -1804,9 +1806,6 @@ static __poll_t dvb_ca_en50221_io_poll(struct file *file, poll_table *wait)
if (mask)
return mask;
- /* wait for something to happen */
- poll_wait(file, &ca->wait_queue, wait);
-
if (dvb_ca_en50221_io_read_condition(ca, &result, &slot) == 1)
mask |= EPOLLIN;
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 27a1d4a98d73..fbdb4ecc7c50 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -1596,7 +1596,7 @@ static bool is_dvbv3_delsys(u32 delsys)
*
* Provides emulation for delivery systems that are compatible with the old
* DVBv3 call. Among its usages, it provices support for ISDB-T, and allows
- * using a DVB-S2 only frontend just like it were a DVB-S, if the frontent
+ * using a DVB-S2 only frontend just like it were a DVB-S, if the frontend
* parameters are compatible with DVB-S spec.
*/
static int emulate_delivery_system(struct dvb_frontend *fe, u32 delsys)
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index b7171bf094fb..4a5834a1c3b7 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -898,7 +898,7 @@ EXPORT_SYMBOL(dvb_unregister_adapter);
/* if the miracle happens and "generic_usercopy()" is included into
the kernel, then this can vanish. please don't make the mistake and
- define this as video_usercopy(). this will introduce a dependecy
+ define this as video_usercopy(). this will introduce a dependency
to the v4l "videodev.o" module, which is unnecessary for some
cards (ie. the budget dvb-cards don't need the v4l module...) */
int dvb_usercopy(struct file *file,
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
index c98093ed3dd7..8acf0b91b437 100644
--- a/drivers/media/dvb-frontends/cxd2841er.c
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -2947,7 +2947,7 @@ static int cxd2841er_sleep_tc_to_active_t(struct cxd2841er_priv *priv,
((priv->flags & CXD2841ER_ASCOT) ? 0x01 : 0x00), 0x01);
/* Set SLV-T Bank : 0x18 */
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x18);
- /* Pre-RS BER moniter setting */
+ /* Pre-RS BER monitor setting */
cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x36, 0x40, 0x07);
/* FEC Auto Recovery setting */
cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x30, 0x01, 0x01);
diff --git a/drivers/media/dvb-frontends/dib0090.c b/drivers/media/dvb-frontends/dib0090.c
index 4813a88eb9f7..18c41cfef8d6 100644
--- a/drivers/media/dvb-frontends/dib0090.c
+++ b/drivers/media/dvb-frontends/dib0090.c
@@ -2459,7 +2459,7 @@ static int dib0090_tune(struct dvb_frontend *fe)
state->current_standard = state->fe->dtv_property_cache.delivery_system;
ret = 20;
- state->calibrate = CAPTRIM_CAL; /* captrim serach now */
+ state->calibrate = CAPTRIM_CAL; /* captrim search now */
}
else if (*tune_state == CT_TUNER_STEP_0) { /* Warning : because of captrim cal, if you change this step, change it also in _cal.c file because it is the step following captrim cal state machine */
diff --git a/drivers/media/dvb-frontends/dib7000m.c b/drivers/media/dvb-frontends/dib7000m.c
index b79358d09de6..389db9077ad5 100644
--- a/drivers/media/dvb-frontends/dib7000m.c
+++ b/drivers/media/dvb-frontends/dib7000m.c
@@ -369,7 +369,7 @@ static int dib7000m_sad_calib(struct dib7000m_state *state)
{
/* internal */
-// dib7000m_write_word(state, 928, (3 << 14) | (1 << 12) | (524 << 0)); // sampling clock of the SAD is writting in set_bandwidth
+// dib7000m_write_word(state, 928, (3 << 14) | (1 << 12) | (524 << 0)); // sampling clock of the SAD is writing in set_bandwidth
dib7000m_write_word(state, 929, (0 << 1) | (0 << 0));
dib7000m_write_word(state, 930, 776); // 0.625*3.3 / 4096
@@ -928,7 +928,7 @@ static void dib7000m_set_channel(struct dib7000m_state *state, struct dtv_fronte
}
state->div_sync_wait = (value * 3) / 2 + 32; // add 50% SFN margin + compensate for one DVSY-fifo TODO
- /* deactive the possibility of diversity reception if extended interleave - not for 7000MC */
+ /* deactivate the possibility of diversity reception if extended interleave - not for 7000MC */
/* P_dvsy_sync_mode = 0, P_dvsy_sync_enable=1, P_dvcb_comb_mode=2 */
if (1 == 1 || state->revision > 0x4000)
state->div_force_off = 0;
diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c
index 2818e8def1b3..f8040f6def62 100644
--- a/drivers/media/dvb-frontends/dib7000p.c
+++ b/drivers/media/dvb-frontends/dib7000p.c
@@ -94,7 +94,7 @@ enum dib7000p_power_mode {
DIB7000P_POWER_INTERFACE_ONLY,
};
-/* dib7090 specific fonctions */
+/* dib7090 specific functions */
static int dib7090_set_output_mode(struct dvb_frontend *fe, int mode);
static int dib7090_set_diversity_in(struct dvb_frontend *fe, int onoff);
static void dib7090_setDibTxMux(struct dib7000p_state *state, int mode);
@@ -319,7 +319,7 @@ static void dib7000p_set_adc_state(struct dib7000p_state *state, enum dibx000_ad
dib7000p_write_word(state, 1925, reg | (1 << 4) | (1 << 2)); /* en_slowAdc = 1 & reset_sladc = 1 */
- reg = dib7000p_read_word(state, 1925); /* read acces to make it works... strange ... */
+ reg = dib7000p_read_word(state, 1925); /* read access to make it works... strange ... */
msleep(200);
dib7000p_write_word(state, 1925, reg & ~(1 << 4)); /* en_slowAdc = 1 & reset_sladc = 0 */
@@ -1101,7 +1101,7 @@ static void dib7000p_set_channel(struct dib7000p_state *state,
else
state->div_sync_wait = (value * 3) / 2 + state->cfg.diversity_delay;
- /* deactive the possibility of diversity reception if extended interleaver */
+ /* deactivate the possibility of diversity reception if extended interleaver */
state->div_force_off = !1 && ch->transmission_mode != TRANSMISSION_MODE_8K;
dib7000p_set_diversity_in(&state->demod, state->div_state);
@@ -2378,7 +2378,7 @@ static int dib7090_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[]
}
}
- if (apb_address != 0) /* R/W acces via APB */
+ if (apb_address != 0) /* R/W access via APB */
return dib7090p_rw_on_apb(i2c_adap, msg, num, apb_address);
else /* R/W access via SERPAR */
return w7090p_tuner_rw_serpar(i2c_adap, msg, num);
diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
index 3c3f8cb14845..85c429cce23e 100644
--- a/drivers/media/dvb-frontends/dib8000.c
+++ b/drivers/media/dvb-frontends/dib8000.c
@@ -564,7 +564,7 @@ static int dib8000_set_adc_state(struct dib8000_state *state, enum dibx000_adc_s
dib8000_write_word(state, 1925, reg |
(1<<4) | (1<<2));
- /* read acces to make it works... strange ... */
+ /* read access to make it works... strange ... */
reg = dib8000_read_word(state, 1925);
msleep(20);
/* en_slowAdc = 1 & reset_sladc = 0 */
@@ -1091,7 +1091,7 @@ static int dib8000_reset(struct dvb_frontend *fe)
if ((state->revision != 0x8090) &&
(dib8000_set_output_mode(fe, OUTMODE_HIGH_Z) != 0))
- dprintk("OUTPUT_MODE could not be resetted.\n");
+ dprintk("OUTPUT_MODE could not be reset.\n");
state->current_agc = NULL;
@@ -1867,7 +1867,7 @@ static int dib8096p_tuner_xfer(struct i2c_adapter *i2c_adap,
}
}
- if (apb_address != 0) /* R/W acces via APB */
+ if (apb_address != 0) /* R/W access via APB */
return dib8096p_rw_on_apb(i2c_adap, msg, num, apb_address);
else /* R/W access via SERPAR */
return dib8096p_tuner_rw_serpar(i2c_adap, msg, num);
@@ -3082,7 +3082,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
state->autosearch_state = AS_DONE;
*tune_state = CT_DEMOD_STOP; /* else we are done here */
break;
- case 2: /* Succes */
+ case 2: /* Success */
state->status = FE_STATUS_FFT_SUCCESS; /* signal to the upper layer, that there was a channel found and the parameters can be read */
*tune_state = CT_DEMOD_STEP_3;
if (state->autosearch_state == AS_SEARCHING_GUARD)
@@ -3193,10 +3193,10 @@ static int dib8000_tune(struct dvb_frontend *fe)
case CT_DEMOD_STEP_6: /* (36) if there is an input (diversity) */
if ((state->fe[1] != NULL) && (state->output_mode != OUTMODE_DIVERSITY)) {
- /* if there is a diversity fe in input and this fe is has not already failled : wait here until this this fe has succedeed or failled */
+ /* if there is a diversity fe in input and this fe is has not already failed : wait here until this this fe has succedeed or failed */
if (dib8000_get_status(state->fe[1]) <= FE_STATUS_STD_SUCCESS) /* Something is locked on the input fe */
*tune_state = CT_DEMOD_STEP_8; /* go for mpeg */
- else if (dib8000_get_status(state->fe[1]) >= FE_STATUS_TUNE_TIME_TOO_SHORT) { /* fe in input failled also, break the current one */
+ else if (dib8000_get_status(state->fe[1]) >= FE_STATUS_TUNE_TIME_TOO_SHORT) { /* fe in input failed also, break the current one */
*tune_state = CT_DEMOD_STOP; /* else we are done here ; step 8 will close the loops and exit */
dib8000_viterbi_state(state, 1); /* start viterbi chandec */
dib8000_set_isdbt_loop_params(state, LOOP_TUNE_2);
diff --git a/drivers/media/dvb-frontends/dib9000.c b/drivers/media/dvb-frontends/dib9000.c
index 0183fb1346ef..1875da07c150 100644
--- a/drivers/media/dvb-frontends/dib9000.c
+++ b/drivers/media/dvb-frontends/dib9000.c
@@ -1020,7 +1020,7 @@ static int dib9000_risc_apb_access_read(struct dib9000_state *state, u32 address
if (address >= 1024 || !state->platform.risc.fw_is_running)
return -EINVAL;
- /* dprintk( "APB access thru rd fw %d %x\n", address, attribute); */
+ /* dprintk( "APB access through rd fw %d %x\n", address, attribute); */
mb[0] = (u16) address;
mb[1] = len / 2;
@@ -1050,7 +1050,7 @@ static int dib9000_risc_apb_access_write(struct dib9000_state *state, u32 addres
if (len > 18)
return -EINVAL;
- /* dprintk( "APB access thru wr fw %d %x\n", address, attribute); */
+ /* dprintk( "APB access through wr fw %d %x\n", address, attribute); */
mb[0] = (u16)address;
for (i = 0; i + 1 < len; i += 2)
diff --git a/drivers/media/dvb-frontends/drx39xyj/drx_dap_fasi.h b/drivers/media/dvb-frontends/drx39xyj/drx_dap_fasi.h
index 23ae72468025..739dc5590fa4 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drx_dap_fasi.h
+++ b/drivers/media/dvb-frontends/drx39xyj/drx_dap_fasi.h
@@ -67,7 +67,7 @@
* (2 bytes). The DAP can operate in 3 modes:
* (1) only short
* (2) only long
-* (3) both long and short but short preferred and long only when necesarry
+* (3) both long and short but short preferred and long only when necessary
*
* These modes must be selected compile time via compile switches.
* Compile switch settings for the different modes:
@@ -112,14 +112,14 @@
* + single master mode means no use of repeated starts
* + multi master mode means use of repeated starts
* Default is single master.
-* Default can be overriden by setting the compile switch DRXDAP_SINGLE_MASTER.
+* Default can be overridden by setting the compile switch DRXDAP_SINGLE_MASTER.
*
* Slave:
* Single/multi master selected via the flags in the FASI protocol.
* + single master means remember memory address between i2c packets
* + multimaster means flush memory address between i2c packets
* Default is single master, DAP FASI changes multi-master setting silently
-* into single master setting. This cannot be overrriden.
+* into single master setting. This cannot be overridden.
*
*/
/* set default */
@@ -139,7 +139,7 @@
* In single master mode, data can be written by sending the register address
* first, then two or four bytes of data in the next packet.
* Because the device address plus a register address equals five bytes,
-* the mimimum chunk size must be five.
+* the minimum chunk size must be five.
* If ten-bit I2C device addresses are used, the minimum chunk size must be six,
* because the I2C device address will then occupy two bytes when writing.
*
diff --git a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h
index 1ec20eecc433..15f7e58c5a30 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h
+++ b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h
@@ -94,7 +94,7 @@ int drxbsp_i2c_term(void);
* \param r_count The number of bytes to read
* \param r_data The array to read the data from
* \return int Return status.
-* \retval 0 Succes.
+* \retval 0 Success.
* \retval -EIO Failure.
* \retval -EINVAL Parameter 'wcount' is not zero but parameter
* 'wdata' contains NULL.
@@ -986,7 +986,7 @@ struct drx_filter_info {
* \struct struct drx_channel * \brief The set of parameters describing a single channel.
*
* Used by DRX_CTRL_SET_CHANNEL and DRX_CTRL_GET_CHANNEL.
-* Only certain fields need to be used for a specfic standard.
+* Only certain fields need to be used for a specific standard.
*
*/
struct drx_channel {
@@ -1606,7 +1606,7 @@ struct drx_version_list {
DRX_AUD_I2S_MATRIX_B_MONO,
/*< B sound only, stereo or mono */
DRX_AUD_I2S_MATRIX_STEREO,
- /*< A+B sound, transparant */
+ /*< A+B sound, transparent */
DRX_AUD_I2S_MATRIX_MONO /*< A+B mixed to mono sum, (L+R)/2 */};
/*
@@ -1870,7 +1870,7 @@ struct drx_reg_dump {
/*< current power management mode */
/* Tuner */
- u8 tuner_port_nr; /*< nr of I2C port to wich tuner is */
+ u8 tuner_port_nr; /*< nr of I2C port to which tuner is */
s32 tuner_min_freq_rf;
/*< minimum RF input frequency, in kHz */
s32 tuner_max_freq_rf;
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c
index 551b7d65fa66..a6876fa48753 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.c
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c
@@ -380,10 +380,10 @@ DEFINES
*/
/*****************************************************************************/
-/* Audio block 0x103 is write only. To avoid shadowing in driver accessing */
-/* RAM adresses directly. This must be READ ONLY to avoid problems. */
-/* Writing to the interface adresses is more than only writing the RAM */
-/* locations */
+/* Audio block 0x103 is write only. To avoid shadowing in driver accessing */
+/* RAM addresses directly. This must be READ ONLY to avoid problems. */
+/* Writing to the interface addresses are more than only writing the RAM */
+/* locations */
/*****************************************************************************/
/*
* \brief RAM location of MODUS registers
@@ -656,8 +656,8 @@ static struct drxj_data drxj_data_g = {
false, /* flag: true=bypass */
ATV_TOP_VID_PEAK__PRE, /* shadow of ATV_TOP_VID_PEAK__A */
ATV_TOP_NOISE_TH__PRE, /* shadow of ATV_TOP_NOISE_TH__A */
- true, /* flag CVBS ouput enable */
- false, /* flag SIF ouput enable */
+ true, /* flag CVBS output enable */
+ false, /* flag SIF output enable */
DRXJ_SIF_ATTENUATION_0DB, /* current SIF att setting */
{ /* qam_rf_agc_cfg */
DRX_STANDARD_ITU_B, /* standard */
@@ -832,7 +832,7 @@ static struct drx_common_attr drxj_default_comm_attr_g = {
false, /* If true mirror frequency spectrum */
{
/* MPEG output configuration */
- true, /* If true, enable MPEG ouput */
+ true, /* If true, enable MPEG output */
false, /* If true, insert RS byte */
false, /* If true, parallel out otherwise serial */
false, /* If true, invert DATA signals */
@@ -848,7 +848,7 @@ static struct drx_common_attr drxj_default_comm_attr_g = {
DRX_MPEG_STR_WIDTH_1 /* MPEG Start width in clock cycles */
},
/* Initilisations below can be omitted, they require no user input and
- are initialy 0, NULL or false. The compiler will initialize them to these
+ are initially 0, NULL or false. The compiler will initialize them to these
values when omitted. */
false, /* is_opened */
@@ -869,7 +869,7 @@ static struct drx_common_attr drxj_default_comm_attr_g = {
DRX_POWER_UP,
/* Tuner */
- 1, /* nr of I2C port to wich tuner is */
+ 1, /* nr of I2C port to which tuner is */
0L, /* minimum RF input frequency, in kHz */
0L, /* maximum RF input frequency, in kHz */
false, /* Rf Agc Polarity */
@@ -1656,7 +1656,7 @@ static int drxdap_fasi_write_block(struct i2c_device_addr *dev_addr,
sequense will be visible: (1) write address {i2c addr,
4 bytes chip address} (2) write data {i2c addr, 4 bytes data }
(3) write address (4) write data etc...
- Address must be rewriten because HI is reset after data transport and
+ Address must be rewritten because HI is reset after data transport and
expects an address.
*/
todo = (block_size < datasize ? block_size : datasize);
@@ -1820,7 +1820,7 @@ static int drxdap_fasi_write_reg32(struct i2c_device_addr *dev_addr,
* \param wdata Data to write
* \param rdata Buffer for data to read
* \return int
-* \retval 0 Succes
+* \retval 0 Success
* \retval -EIO Timeout, I2C error, illegal bank
*
* 16 bits register read modify write access using short addressing format only.
@@ -1897,7 +1897,7 @@ static int drxj_dap_read_modify_write_reg16(struct i2c_device_addr *dev_addr,
* \param addr
* \param data
* \return int
-* \retval 0 Succes
+* \retval 0 Success
* \retval -EIO Timeout, I2C error, illegal bank
*
* 16 bits register read access via audio token ring interface.
@@ -2004,7 +2004,7 @@ static int drxj_dap_read_reg16(struct i2c_device_addr *dev_addr,
* \param addr
* \param data
* \return int
-* \retval 0 Succes
+* \retval 0 Success
* \retval -EIO Timeout, I2C error, illegal bank
*
* 16 bits register write access via audio token ring interface.
@@ -2094,7 +2094,7 @@ static int drxj_dap_write_reg16(struct i2c_device_addr *dev_addr,
* \param datasize size of data buffer in bytes
* \param data pointer to data buffer
* \return int
-* \retval 0 Succes
+* \retval 0 Success
* \retval -EIO Timeout, I2C error, illegal bank
*
*/
@@ -2338,7 +2338,7 @@ hi_command(struct i2c_device_addr *dev_addr, const struct drxj_hi_cmd *cmd, u16
if ((cmd->cmd) == SIO_HI_RA_RAM_CMD_RESET)
msleep(1);
- /* Detect power down to ommit reading result */
+ /* Detect power down to omit reading result */
powerdown_cmd = (bool) ((cmd->cmd == SIO_HI_RA_RAM_CMD_CONFIG) &&
(((cmd->
param5) & SIO_HI_RA_RAM_PAR_5_CFG_SLEEP__M)
@@ -2754,7 +2754,7 @@ ctrl_set_cfg_mpeg_output(struct drx_demod_instance *demod, struct drx_cfg_mpeg_o
common_attr = (struct drx_common_attr *) demod->my_common_attr;
if (cfg_data->enable_mpeg_output == true) {
- /* quick and dirty patch to set MPEG incase current std is not
+ /* quick and dirty patch to set MPEG in case current std is not
producing MPEG */
switch (ext_attr->standard) {
case DRX_STANDARD_8VSB:
@@ -2894,7 +2894,7 @@ ctrl_set_cfg_mpeg_output(struct drx_demod_instance *demod, struct drx_cfg_mpeg_o
break;
default:
break;
- } /* swtich (standard) */
+ } /* switch (standard) */
/* Check insertion of the Reed-Solomon parity bytes */
rc = drxj_dap_read_reg16(dev_addr, FEC_OC_MODE__A, &fec_oc_reg_mode, 0);
@@ -4127,7 +4127,7 @@ rw_error:
* \param datasize size of data buffer in bytes
* \param data pointer to data buffer
* \return int
-* \retval 0 Succes
+* \retval 0 Success
* \retval -EIO Timeout, I2C error, illegal bank
*
*/
@@ -8989,7 +8989,7 @@ qam64auto(struct drx_demod_instance *demod,
((jiffies_to_msecs(jiffies) - start_time) <
(DRXJ_QAM_MAX_WAITTIME + timeout_ofs))
);
- /* Returning control to apllication ... */
+ /* Returning control to application ... */
return 0;
rw_error:
@@ -9309,7 +9309,7 @@ get_qamrs_err_count(struct i2c_device_addr *dev_addr,
return -EINVAL;
/* all reported errors are received in the */
- /* most recently finished measurment period */
+ /* most recently finished measurement period */
/* no of pre RS bit errors */
rc = drxj_dap_read_reg16(dev_addr, FEC_RS_NR_BIT_ERRORS__A, &nr_bit_errors, 0);
if (rc != 0) {
@@ -9689,7 +9689,7 @@ rw_error:
(3) SIF AGC (used to amplify the output signal in case input to low)
The SIF AGC is now coupled to the RF/IF AGCs.
- The SIF AGC is needed for both SIF ouput and the internal SIF signal to
+ The SIF AGC is needed for both SIF output and the internal SIF signal to
the AUD block.
RF and IF AGCs DACs are part of AFE, Video and SIF AGC DACs are part of
@@ -9702,11 +9702,11 @@ rw_error:
later on because of the schedule)
Several HW/SCU "settings" can be used for ATV. The standard selection
- will reset most of these settings. To avoid that the end user apllication
+ will reset most of these settings. To avoid that the end user application
has to perform these settings each time the ATV or FM standards is
selected the driver will shadow these settings. This enables the end user
to perform the settings only once after a drx_open(). The driver must
- write the shadow settings to HW/SCU incase:
+ write the shadow settings to HW/SCU in case:
( setstandard FM/ATV) ||
( settings have changed && FM/ATV standard is active)
The shadow settings will be stored in the device specific data container.
@@ -9908,7 +9908,7 @@ rw_error:
#define IMPULSE_COSINE_ALPHA_0_5 { 2, 0, -2, -2, 2, 5, 2, -10, -20, -14, 20, 74, 125, 145} /*sqrt raised-cosine filter with alpha=0.5 */
#define IMPULSE_COSINE_ALPHA_RO_0_5 { 0, 0, 1, 2, 3, 0, -7, -15, -16, 0, 34, 77, 114, 128} /*full raised-cosine filter with alpha=0.5 (receiver only) */
-/* Coefficients for the nyquist fitler (total: 27 taps) */
+/* Coefficients for the nyquist filter (total: 27 taps) */
#define NYQFILTERLEN 27
static int ctrl_set_oob(struct drx_demod_instance *demod, struct drxoob *oob_param)
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.h b/drivers/media/dvb-frontends/drx39xyj/drxj.h
index d3ee1c23bb2f..d62412f71c88 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.h
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.h
@@ -49,7 +49,7 @@ INCLUDES
#if ((DRXDAP_SINGLE_MASTER == 0) && (DRXDAPFASI_LONG_ADDR_ALLOWED == 0))
#error "Multi master mode and short addressing only is an illegal combination"
*; /* Generate a fatal compiler error to make sure it stops here,
- this is necesarry because not all compilers stop after a #error. */
+ this is necessary because not all compilers stop after a #error. */
#endif
/*-------------------------------------------------------------------------
@@ -203,7 +203,7 @@ struct drxj_agc_status {
* /struct drxjrs_errors
* Available failure information in DRXJ_FEC_RS.
*
-* Container for errors that are received in the most recently finished measurment period
+* Container for errors that are received in the most recently finished measurement period
*
*/
struct drxjrs_errors {
@@ -405,7 +405,7 @@ struct drxj_cfg_atv_output {
*
*/
struct drxj_data {
- /* device capabilties (determined during drx_open()) */
+ /* device capabilities (determined during drx_open()) */
bool has_lna; /*< true if LNA (aka PGA) present */
bool has_oob; /*< true if OOB supported */
bool has_ntsc; /*< true if NTSC supported */
@@ -455,7 +455,7 @@ struct drxj_cfg_atv_output {
/* IQM fs frequecy shift and inversion */
u32 iqm_fs_rate_ofs; /*< frequency shifter setting after setchannel */
- bool pos_image; /*< Ture: positive image */
+ bool pos_image; /*< True: positive image */
/* IQM RC frequecy shift */
u32 iqm_rc_rate_ofs; /*< frequency shifter setting after setchannel */
@@ -468,8 +468,8 @@ struct drxj_cfg_atv_output {
bool phase_correction_bypass;/*< flag: true=bypass */
s16 atv_top_vid_peak; /*< shadow of ATV_TOP_VID_PEAK__A */
u16 atv_top_noise_th; /*< shadow of ATV_TOP_NOISE_TH__A */
- bool enable_cvbs_output; /*< flag CVBS ouput enable */
- bool enable_sif_output; /*< flag SIF ouput enable */
+ bool enable_cvbs_output; /*< flag CVBS output enable */
+ bool enable_sif_output; /*< flag SIF output enable */
enum drxjsif_attenuation sif_attenuation;
/*< current SIF att setting */
/* Agc configuration for QAM and VSB */
diff --git a/drivers/media/dvb-frontends/drxd_firm.c b/drivers/media/dvb-frontends/drxd_firm.c
index 4e1d8905e06a..412871d6636b 100644
--- a/drivers/media/dvb-frontends/drxd_firm.c
+++ b/drivers/media/dvb-frontends/drxd_firm.c
@@ -890,7 +890,7 @@ u8 DRXD_StartDiversityEnd[] = {
/* End demod, combining RF in and diversity in, MPEG TS out */
WR16(B_FE_CF_REG_IMP_VAL__A, 0x0), /* disable impulse noise cruncher */
WR16(B_FE_AD_REG_INVEXT__A, 0x0), /* clock inversion (for sohard board) */
- WR16(B_CP_REG_BR_STR_DEL__A, 10), /* apperently no mb delay matching is best */
+ WR16(B_CP_REG_BR_STR_DEL__A, 10), /* apparently no mb delay matching is best */
WR16(B_EQ_REG_RC_SEL_CAR__A, B_EQ_REG_RC_SEL_CAR_DIV_ON | /* org = 0x81 combining enabled */
B_EQ_REG_RC_SEL_CAR_MEAS_A_CC |
diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c
index 684d428efb0d..0a5b15bee1d7 100644
--- a/drivers/media/dvb-frontends/drxd_hard.c
+++ b/drivers/media/dvb-frontends/drxd_hard.c
@@ -1144,6 +1144,8 @@ static int EnableAndResetMB(struct drxd_state *state)
static int InitCC(struct drxd_state *state)
{
+ int status = 0;
+
if (state->osc_clock_freq == 0 ||
state->osc_clock_freq > 20000 ||
(state->osc_clock_freq % 4000) != 0) {
@@ -1151,14 +1153,17 @@ static int InitCC(struct drxd_state *state)
return -1;
}
- Write16(state, CC_REG_OSC_MODE__A, CC_REG_OSC_MODE_M20, 0);
- Write16(state, CC_REG_PLL_MODE__A, CC_REG_PLL_MODE_BYPASS_PLL |
- CC_REG_PLL_MODE_PUMP_CUR_12, 0);
- Write16(state, CC_REG_REF_DIVIDE__A, state->osc_clock_freq / 4000, 0);
- Write16(state, CC_REG_PWD_MODE__A, CC_REG_PWD_MODE_DOWN_PLL, 0);
- Write16(state, CC_REG_UPDATE__A, CC_REG_UPDATE_KEY, 0);
+ status |= Write16(state, CC_REG_OSC_MODE__A, CC_REG_OSC_MODE_M20, 0);
+ status |= Write16(state, CC_REG_PLL_MODE__A,
+ CC_REG_PLL_MODE_BYPASS_PLL |
+ CC_REG_PLL_MODE_PUMP_CUR_12, 0);
+ status |= Write16(state, CC_REG_REF_DIVIDE__A,
+ state->osc_clock_freq / 4000, 0);
+ status |= Write16(state, CC_REG_PWD_MODE__A, CC_REG_PWD_MODE_DOWN_PLL,
+ 0);
+ status |= Write16(state, CC_REG_UPDATE__A, CC_REG_UPDATE_KEY, 0);
- return 0;
+ return status;
}
static int ResetECOD(struct drxd_state *state)
@@ -1312,7 +1317,10 @@ static int SC_SendCommand(struct drxd_state *state, u16 cmd)
int status = 0, ret;
u16 errCode;
- Write16(state, SC_RA_RAM_CMD__A, cmd, 0);
+ status = Write16(state, SC_RA_RAM_CMD__A, cmd, 0);
+ if (status < 0)
+ return status;
+
SC_WaitForReady(state);
ret = Read16(state, SC_RA_RAM_CMD_ADDR__A, &errCode, 0);
@@ -1339,9 +1347,9 @@ static int SC_ProcStartCommand(struct drxd_state *state,
break;
}
SC_WaitForReady(state);
- Write16(state, SC_RA_RAM_CMD_ADDR__A, subCmd, 0);
- Write16(state, SC_RA_RAM_PARAM1__A, param1, 0);
- Write16(state, SC_RA_RAM_PARAM0__A, param0, 0);
+ status |= Write16(state, SC_RA_RAM_CMD_ADDR__A, subCmd, 0);
+ status |= Write16(state, SC_RA_RAM_PARAM1__A, param1, 0);
+ status |= Write16(state, SC_RA_RAM_PARAM0__A, param0, 0);
SC_SendCommand(state, SC_RA_RAM_CMD_PROC_START);
} while (0);
diff --git a/drivers/media/dvb-frontends/drxk.h b/drivers/media/dvb-frontends/drxk.h
index 76466f7ec3a0..ee06e89187e4 100644
--- a/drivers/media/dvb-frontends/drxk.h
+++ b/drivers/media/dvb-frontends/drxk.h
@@ -24,7 +24,7 @@
* @microcode_name: Name of the firmware file with the microcode
* @qam_demod_parameter_count: The number of parameters used for the command
* to set the demodulator parameters. All
- * firmwares are using the 2-parameter commmand.
+ * firmwares are using the 2-parameter command.
* An exception is the ``drxk_a3.mc`` firmware,
* which uses the 4-parameter command.
* A value of 0 (default) or lower indicates that
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index 8ea1e45be710..86652a4ef9ce 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -723,7 +723,7 @@ static int init_state(struct drxk_state *state)
state->m_drxk_state = DRXK_UNINITIALIZED;
/* MPEG output configuration */
- state->m_enable_mpeg_output = true; /* If TRUE; enable MPEG ouput */
+ state->m_enable_mpeg_output = true; /* If TRUE; enable MPEG output */
state->m_insert_rs_byte = false; /* If TRUE; insert RS byte */
state->m_invert_data = false; /* If TRUE; invert DATA signals */
state->m_invert_err = false; /* If TRUE; invert ERR signal */
@@ -3870,7 +3870,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
goto error;
}
#else
- /* Set Priorty high */
+ /* Set Priority high */
transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_PRIO_HI;
status = write16(state, OFDM_EC_SB_PRIOR__A, OFDM_EC_SB_PRIOR_HI);
if (status < 0)
@@ -3901,7 +3901,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
}
/*
- * SAW filter selection: normaly not necesarry, but if wanted
+ * SAW filter selection: normally not necessary, but if wanted
* the application can select a SAW filter via the driver by
* using UIOs
*/
@@ -5423,7 +5423,7 @@ static int qam_demodulator_command(struct drxk_state *state,
set_param_parameters[3] |= (QAM_MIRROR_AUTO_ON);
/* Env parameters */
- /* check for LOCKRANGE Extented */
+ /* check for LOCKRANGE Extended */
/* set_param_parameters[3] |= QAM_LOCKRANGE_NORMAL; */
status = scu_command(state,
diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c
index 46a55146cb07..2b422d3ac5fa 100644
--- a/drivers/media/dvb-frontends/ds3000.c
+++ b/drivers/media/dvb-frontends/ds3000.c
@@ -914,7 +914,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe)
/* ds3000 global reset */
ds3000_writereg(state, 0x07, 0x80);
ds3000_writereg(state, 0x07, 0x00);
- /* ds3000 build-in uC reset */
+ /* ds3000 built-in uC reset */
ds3000_writereg(state, 0xb2, 0x01);
/* ds3000 software reset */
ds3000_writereg(state, 0x00, 0x01);
@@ -1023,7 +1023,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe)
/* ds3000 out of software reset */
ds3000_writereg(state, 0x00, 0x00);
- /* start ds3000 build-in uC */
+ /* start ds3000 built-in uC */
ds3000_writereg(state, 0xb2, 0x00);
if (fe->ops.tuner_ops.get_frequency) {
diff --git a/drivers/media/dvb-frontends/isl6421.c b/drivers/media/dvb-frontends/isl6421.c
index ae8ec59b665c..7de11d5062c2 100644
--- a/drivers/media/dvb-frontends/isl6421.c
+++ b/drivers/media/dvb-frontends/isl6421.c
@@ -98,7 +98,7 @@ static int isl6421_set_voltage(struct dvb_frontend *fe,
if (ret != 2)
return -EIO;
- /* Store off status now incase future commands fail */
+ /* Store off status now in case future commands fail */
isl6421->is_off = is_off;
/* On overflow, the device will try again after 900 ms (typically) */
diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
index cee9c83e48de..99c6289ae585 100644
--- a/drivers/media/dvb-frontends/lgdt3306a.c
+++ b/drivers/media/dvb-frontends/lgdt3306a.c
@@ -1685,7 +1685,10 @@ static int lgdt3306a_read_signal_strength(struct dvb_frontend *fe,
case QAM_256:
case QAM_AUTO:
/* need to know actual modulation to set proper SNR baseline */
- lgdt3306a_read_reg(state, 0x00a6, &val);
+ ret = lgdt3306a_read_reg(state, 0x00a6, &val);
+ if (lg_chkerr(ret))
+ goto fail;
+
if(val & 0x04)
ref_snr = 2800; /* QAM-256 28dB */
else
diff --git a/drivers/media/dvb-frontends/lgdt330x.c b/drivers/media/dvb-frontends/lgdt330x.c
index 96807e134886..8abb1a510a81 100644
--- a/drivers/media/dvb-frontends/lgdt330x.c
+++ b/drivers/media/dvb-frontends/lgdt330x.c
@@ -783,7 +783,7 @@ static int lgdt3303_read_status(struct dvb_frontend *fe,
if ((buf[0] & 0x02) == 0x00)
*status |= FE_HAS_SYNC;
- if ((buf[0] & 0xfd) == 0x01)
+ if ((buf[0] & 0x01) == 0x01)
*status |= FE_HAS_VITERBI | FE_HAS_LOCK;
break;
default:
diff --git a/drivers/media/dvb-frontends/m88rs2000.c b/drivers/media/dvb-frontends/m88rs2000.c
index d5bc85501f9e..13888732951c 100644
--- a/drivers/media/dvb-frontends/m88rs2000.c
+++ b/drivers/media/dvb-frontends/m88rs2000.c
@@ -701,7 +701,7 @@ static int m88rs2000_set_frontend(struct dvb_frontend *fe)
if (status & FE_HAS_LOCK) {
state->fec_inner = m88rs2000_get_fec(state);
- /* Uknown suspect SNR level */
+ /* Unknown suspect SNR level */
reg = m88rs2000_readreg(state, 0x65);
}
diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c
index 03e74a729168..bfbb879469f2 100644
--- a/drivers/media/dvb-frontends/mt312.c
+++ b/drivers/media/dvb-frontends/mt312.c
@@ -645,7 +645,9 @@ static int mt312_set_frontend(struct dvb_frontend *fe)
if (ret < 0)
return ret;
- mt312_reset(state, 0);
+ ret = mt312_reset(state, 0);
+ if (ret < 0)
+ return ret;
return 0;
}
diff --git a/drivers/media/dvb-frontends/nxt200x.c b/drivers/media/dvb-frontends/nxt200x.c
index 0961e686ff68..0ef72d6c6f8b 100644
--- a/drivers/media/dvb-frontends/nxt200x.c
+++ b/drivers/media/dvb-frontends/nxt200x.c
@@ -153,7 +153,7 @@ static int nxt200x_writereg_multibyte (struct nxt200x_state* state, u8 reg, u8*
u8 attr, len2, buf;
dprintk("%s\n", __func__);
- /* set mutli register register */
+ /* set multi register register */
nxt200x_writebytes(state, 0x35, &reg, 1);
/* send the actual data */
@@ -214,7 +214,7 @@ static int nxt200x_readreg_multibyte (struct nxt200x_state* state, u8 reg, u8* d
u8 buf, len2, attr;
dprintk("%s\n", __func__);
- /* set mutli register register */
+ /* set multi register register */
nxt200x_writebytes(state, 0x35, &reg, 1);
switch (state->demod_chip) {
diff --git a/drivers/media/dvb-frontends/or51211.c b/drivers/media/dvb-frontends/or51211.c
index a39bbd8ff1f0..7343da11a1d8 100644
--- a/drivers/media/dvb-frontends/or51211.c
+++ b/drivers/media/dvb-frontends/or51211.c
@@ -59,7 +59,7 @@ struct or51211_state {
/* Demodulator private data */
u8 initialized:1;
- u32 snr; /* Result of last SNR claculation */
+ u32 snr; /* Result of last SNR calculation */
/* Tuner private data */
u32 current_frequency;
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c
index d6673f4fb47b..57fb05bb7e96 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.c
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.c
@@ -471,7 +471,7 @@ static int rtl2832_sdr_buf_prepare(struct vb2_buffer *vb)
{
struct rtl2832_sdr_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
- /* Don't allow queing new buffers after device disconnection */
+ /* Don't allow queueing new buffers after device disconnection */
if (!dev->udev)
return -ENODEV;
diff --git a/drivers/media/dvb-frontends/s5h1409.c b/drivers/media/dvb-frontends/s5h1409.c
index ceeb0c3551ce..a2907d035fe2 100644
--- a/drivers/media/dvb-frontends/s5h1409.c
+++ b/drivers/media/dvb-frontends/s5h1409.c
@@ -490,7 +490,7 @@ static void s5h1409_set_qam_amhum_mode(struct dvb_frontend *fe)
if (state->qam_state == QAM_STATE_QAM_OPTIMIZED_L3) {
/* We've already reached the maximum optimization level, so
- dont bother banging on the status registers */
+ don't bother banging on the status registers */
return;
}
diff --git a/drivers/media/dvb-frontends/sp8870.c b/drivers/media/dvb-frontends/sp8870.c
index 8d31cf3f4f07..270a3c559e08 100644
--- a/drivers/media/dvb-frontends/sp8870.c
+++ b/drivers/media/dvb-frontends/sp8870.c
@@ -293,7 +293,9 @@ static int sp8870_set_frontend_parameters(struct dvb_frontend *fe)
sp8870_writereg(state, 0xc05, reg0xc05);
// read status reg in order to clear pending irqs
- sp8870_readreg(state, 0x200);
+ err = sp8870_readreg(state, 0x200);
+ if (err)
+ return err;
// system controller start
sp8870_microcontroller_start(state);
diff --git a/drivers/media/dvb-frontends/stb0899_algo.c b/drivers/media/dvb-frontends/stb0899_algo.c
index bd2defde7a77..b5debb61bca5 100644
--- a/drivers/media/dvb-frontends/stb0899_algo.c
+++ b/drivers/media/dvb-frontends/stb0899_algo.c
@@ -835,8 +835,8 @@ static u32 stb0899_dvbs2_calc_dev(struct stb0899_state *state)
dec_ratio = (internal->master_clk * 2) / (5 * internal->srate);
dec_ratio = (dec_ratio == 0) ? 1 : dec_ratio;
- master_clk = internal->master_clk / 1000; /* for integer Caculation*/
- srate = internal->srate / 1000; /* for integer Caculation*/
+ master_clk = internal->master_clk / 1000; /* for integer Calculation*/
+ srate = internal->srate / 1000; /* for integer Calculation*/
correction = (512 * master_clk) / (2 * dec_ratio * srate);
return correction;
@@ -864,7 +864,7 @@ static void stb0899_dvbs2_set_srate(struct stb0899_state *state)
win_sel = dec_rate - 4;
decim = (1 << dec_rate);
- /* (FSamp/Fsymbol *100) for integer Caculation */
+ /* (FSamp/Fsymbol *100) for integer Calculation */
f_sym = internal->master_clk / ((decim * internal->srate) / 1000);
if (f_sym <= 2250) /* don't band limit signal going into btr block*/
diff --git a/drivers/media/dvb-frontends/stv0367_defs.h b/drivers/media/dvb-frontends/stv0367_defs.h
index 277d2971ed3f..4afe8248a667 100644
--- a/drivers/media/dvb-frontends/stv0367_defs.h
+++ b/drivers/media/dvb-frontends/stv0367_defs.h
@@ -1096,7 +1096,7 @@ static const struct st_register def0367dd_ofdm[] = {
};
static const struct st_register def0367dd_qam[] = {
- {R367CAB_CTRL_1, 0x06}, /* Orginal 0x04 */
+ {R367CAB_CTRL_1, 0x06}, /* Original 0x04 */
{R367CAB_CTRL_2, 0x03},
{R367CAB_IT_STATUS1, 0x2b},
{R367CAB_IT_STATUS2, 0x08},
diff --git a/drivers/media/dvb-frontends/stv0900_core.c b/drivers/media/dvb-frontends/stv0900_core.c
index 254618a06140..fa1a0fb577ad 100644
--- a/drivers/media/dvb-frontends/stv0900_core.c
+++ b/drivers/media/dvb-frontends/stv0900_core.c
@@ -744,12 +744,12 @@ static int stv0900_read_ucblocks(struct dvb_frontend *fe, u32 * ucblocks)
if (stv0900_get_standard(fe, demod) == STV0900_DVBS2_STANDARD) {
/* DVB-S2 delineator errors count */
- /* retreiving number for errnous headers */
+ /* retrieving number for errnous headers */
err_val1 = stv0900_read_reg(intp, BBFCRCKO1);
err_val0 = stv0900_read_reg(intp, BBFCRCKO0);
header_err_val = (err_val1 << 8) | err_val0;
- /* retreiving number for errnous packets */
+ /* retrieving number for errnous packets */
err_val1 = stv0900_read_reg(intp, UPCRCKO1);
err_val0 = stv0900_read_reg(intp, UPCRCKO0);
*ucblocks = (err_val1 << 8) | err_val0;
diff --git a/drivers/media/dvb-frontends/stv0910.c b/drivers/media/dvb-frontends/stv0910.c
index fc2440d8af36..68d7c7b41071 100644
--- a/drivers/media/dvb-frontends/stv0910.c
+++ b/drivers/media/dvb-frontends/stv0910.c
@@ -1238,7 +1238,7 @@ static int gate_ctrl(struct dvb_frontend *fe, int enable)
* mutex_lock note: Concurrent I2C gate bus accesses must be
* prevented (STV0910 = dual demod on a single IC with a single I2C
* gate/bus, and two tuners attached), similar to most (if not all)
- * other I2C host interfaces/busses.
+ * other I2C host interfaces/buses.
*
* enable=1 (open I2C gate) will grab the lock
* enable=0 (close I2C gate) releases the lock
@@ -1500,7 +1500,7 @@ static int read_status(struct dvb_frontend *fe, enum fe_status *status)
RSTV0910_P2_FBERCPT4 + state->regoff, 0x00);
/*
* Reset the packet Error counter2 (and Set it to
- * infinit error count mode)
+ * infinite error count mode)
*/
write_reg(state,
RSTV0910_P2_ERRCTRL2 + state->regoff, 0xc1);
diff --git a/drivers/media/dvb-frontends/stv6110.c b/drivers/media/dvb-frontends/stv6110.c
index 7db9a5bceccc..e54708eb4fb0 100644
--- a/drivers/media/dvb-frontends/stv6110.c
+++ b/drivers/media/dvb-frontends/stv6110.c
@@ -202,7 +202,7 @@ static int stv6110_set_bandwidth(struct dvb_frontend *fe, u32 bandwidth)
i++;
}
- /* RCCLKOFF = 1 calibration done, desactivate the calibration Clock */
+ /* RCCLKOFF = 1 calibration done, deactivate the calibration Clock */
priv->regs[RSTV6110_CTRL3] |= (1 << 6);
stv6110_write_regs(fe, &priv->regs[RSTV6110_CTRL3], RSTV6110_CTRL3, 1);
return 0;
diff --git a/drivers/media/dvb-frontends/tda1004x.h b/drivers/media/dvb-frontends/tda1004x.h
index efd7659dace9..26f504a830e3 100644
--- a/drivers/media/dvb-frontends/tda1004x.h
+++ b/drivers/media/dvb-frontends/tda1004x.h
@@ -33,7 +33,7 @@ enum tda10046_xtal {
enum tda10046_agc {
TDA10046_AGC_DEFAULT, /* original configuration */
- TDA10046_AGC_IFO_AUTO_NEG, /* IF AGC only, automatic, negtive */
+ TDA10046_AGC_IFO_AUTO_NEG, /* IF AGC only, automatic, negative */
TDA10046_AGC_IFO_AUTO_POS, /* IF AGC only, automatic, positive */
TDA10046_AGC_TDA827X, /* IF AGC only, special setup for tda827x */
};
diff --git a/drivers/media/dvb-frontends/tda10086.c b/drivers/media/dvb-frontends/tda10086.c
index 8323e4e53d66..85dddfce8ef4 100644
--- a/drivers/media/dvb-frontends/tda10086.c
+++ b/drivers/media/dvb-frontends/tda10086.c
@@ -437,7 +437,7 @@ static int tda10086_set_frontend(struct dvb_frontend *fe)
fe->ops.i2c_gate_ctrl(fe, 0);
}
- /* calcluate the frequency offset (in *Hz* not kHz) */
+ /* calculate the frequency offset (in *Hz* not kHz) */
freqoff = fe_params->frequency - freq;
freqoff = ((1<<16) * freqoff) / (SACLK/1000);
tda10086_write_byte(state, 0x3d, 0x80 | ((freqoff >> 8) & 0x7f));
diff --git a/drivers/media/dvb-frontends/tda18271c2dd.c b/drivers/media/dvb-frontends/tda18271c2dd.c
index eeb2318c102f..e064e2b22d9d 100644
--- a/drivers/media/dvb-frontends/tda18271c2dd.c
+++ b/drivers/media/dvb-frontends/tda18271c2dd.c
@@ -105,7 +105,7 @@ struct tda_state {
s32 m_RF_B2[7];
u32 m_RF3[7];
- u8 m_TMValue_RFCal; /* Calibration temperatur */
+ u8 m_TMValue_RFCal; /* Calibration temperature */
bool m_bFMInput; /* true to use Pin 8 for FM Radio */
@@ -400,7 +400,7 @@ static int CalibrateRF(struct tda_state *state,
break;
/* Switching off LT (as datasheet says) causes calibration on C1 to fail */
- /* (Readout of Cprog is allways 255) */
+ /* (Readout of Cprog is always 255) */
if (state->m_Regs[ID] != 0x83) /* C1: ID == 83, C2: ID == 84 */
state->m_Regs[EP3] |= 0x40; /* SM_LT = 1 */
@@ -644,7 +644,7 @@ static int PowerScan(struct tda_state *state,
if (status < 0)
break;
CID_Gain = Regs[EB10] & 0x3F;
- state->m_Regs[ID] = Regs[ID]; /* Chip version, (needed for C1 workarround in CalibrateRF) */
+ state->m_Regs[ID] = Regs[ID]; /* Chip version, (needed for C1 workaround in CalibrateRF) */
*pRF_Out = RF_in;
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 4c936e129500..6d32f8dcf83b 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -820,6 +820,25 @@ config VIDEO_OV7740
This is a Video4Linux2 sensor driver for the OmniVision
OV7740 VGA camera sensor.
+config VIDEO_OV8856
+ tristate "OmniVision OV8856 sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on MEDIA_CAMERA_SUPPORT
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the OmniVision
+ OV8856 camera sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ov8856.
+
+config VIDEO_OV9640
+ tristate "OmniVision OV9640 sensor support"
+ depends on I2C && VIDEO_V4L2
+ help
+ This is a Video4Linux2 sensor driver for the OmniVision
+ OV9640 camera sensor.
+
config VIDEO_OV9650
tristate "OmniVision OV9650/OV9652 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
@@ -848,6 +867,14 @@ config VIDEO_VS6624
To compile this driver as a module, choose M here: the
module will be called vs6624.
+config VIDEO_MT9M001
+ tristate "mt9m001 support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on MEDIA_CAMERA_SUPPORT
+ help
+ This driver supports MT9M001 cameras from Micron, monochrome
+ and colour models.
+
config VIDEO_MT9M032
tristate "MT9M032 camera sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
@@ -1100,18 +1127,11 @@ config VIDEO_I2C
Enable the I2C transport video support which supports the
following:
* Panasonic AMG88xx Grid-Eye Sensors
+ * Melexis MLX90640 Thermal Cameras
To compile this driver as a module, choose M here: the
module will be called video-i2c
endmenu
-menu "Sensors used on soc_camera driver"
-
-if SOC_CAMERA
- source "drivers/media/i2c/soc_camera/Kconfig"
-endif
-
-endmenu
-
endif
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index 65fae7732de0..a64fca82e0c4 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -6,7 +6,6 @@ obj-$(CONFIG_VIDEO_SMIAPP) += smiapp/
obj-$(CONFIG_VIDEO_ET8EK8) += et8ek8/
obj-$(CONFIG_VIDEO_CX25840) += cx25840/
obj-$(CONFIG_VIDEO_M5MOLS) += m5mols/
-obj-y += soc_camera/
obj-$(CONFIG_VIDEO_APTINA_PLL) += aptina-pll.o
obj-$(CONFIG_VIDEO_TVAUDIO) += tvaudio.o
@@ -78,8 +77,11 @@ obj-$(CONFIG_VIDEO_OV7640) += ov7640.o
obj-$(CONFIG_VIDEO_OV7670) += ov7670.o
obj-$(CONFIG_VIDEO_OV772X) += ov772x.o
obj-$(CONFIG_VIDEO_OV7740) += ov7740.o
+obj-$(CONFIG_VIDEO_OV8856) += ov8856.o
+obj-$(CONFIG_VIDEO_OV9640) += ov9640.o
obj-$(CONFIG_VIDEO_OV9650) += ov9650.o
obj-$(CONFIG_VIDEO_OV13858) += ov13858.o
+obj-$(CONFIG_VIDEO_MT9M001) += mt9m001.o
obj-$(CONFIG_VIDEO_MT9M032) += mt9m032.o
obj-$(CONFIG_VIDEO_MT9M111) += mt9m111.o
obj-$(CONFIG_VIDEO_MT9P031) += mt9p031.o
diff --git a/drivers/media/i2c/adv7175.c b/drivers/media/i2c/adv7175.c
index e31e8d909bb9..419b98117133 100644
--- a/drivers/media/i2c/adv7175.c
+++ b/drivers/media/i2c/adv7175.c
@@ -219,7 +219,7 @@ static int adv7175_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std)
* SECAM->PAL (typically it does not work
* due to genlock: when decoder is in SECAM
* and encoder in in PAL the subcarrier can
- * not be syncronized with horizontal
+ * not be synchronized with horizontal
* quency) */
adv7175_write_block(sd, init_pal, sizeof(init_pal));
if (encoder->input == 0)
diff --git a/drivers/media/i2c/adv748x/adv748x-afe.c b/drivers/media/i2c/adv748x/adv748x-afe.c
index 71714634efb0..dbbb1e4d6363 100644
--- a/drivers/media/i2c/adv748x/adv748x-afe.c
+++ b/drivers/media/i2c/adv748x/adv748x-afe.c
@@ -282,7 +282,7 @@ static int adv748x_afe_s_stream(struct v4l2_subdev *sd, int enable)
goto unlock;
}
- ret = adv748x_tx_power(&state->txb, enable);
+ ret = adv748x_tx_power(afe->tx, enable);
if (ret)
goto unlock;
diff --git a/drivers/media/i2c/adv748x/adv748x-core.c b/drivers/media/i2c/adv748x/adv748x-core.c
index 6854d898fdd1..f57cd77a32fa 100644
--- a/drivers/media/i2c/adv748x/adv748x-core.c
+++ b/drivers/media/i2c/adv748x/adv748x-core.c
@@ -23,6 +23,7 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-dv-timings.h>
+#include <media/v4l2-fwnode.h>
#include <media/v4l2-ioctl.h>
#include "adv748x.h"
@@ -124,6 +125,16 @@ int adv748x_write(struct adv748x_state *state, u8 page, u8 reg, u8 value)
return regmap_write(state->regmap[page], reg, value);
}
+static int adv748x_write_check(struct adv748x_state *state, u8 page, u8 reg,
+ u8 value, int *error)
+{
+ if (*error)
+ return *error;
+
+ *error = adv748x_write(state, page, reg, value);
+ return *error;
+}
+
/* adv748x_write_block(): Write raw data with a maximum of I2C_SMBUS_BLOCK_MAX
* size to one or more registers.
*
@@ -207,20 +218,13 @@ static int adv748x_write_regs(struct adv748x_state *state,
{
int ret;
- while (regs->page != ADV748X_PAGE_EOR) {
- if (regs->page == ADV748X_PAGE_WAIT) {
- msleep(regs->value);
- } else {
- ret = adv748x_write(state, regs->page, regs->reg,
- regs->value);
- if (ret < 0) {
- adv_err(state,
- "Error regs page: 0x%02x reg: 0x%02x\n",
- regs->page, regs->reg);
- return ret;
- }
+ for (; regs->page != ADV748X_PAGE_EOR; regs++) {
+ ret = adv748x_write(state, regs->page, regs->reg, regs->value);
+ if (ret < 0) {
+ adv_err(state, "Error regs page: 0x%02x reg: 0x%02x\n",
+ regs->page, regs->reg);
+ return ret;
}
- regs++;
}
return 0;
@@ -230,68 +234,77 @@ static int adv748x_write_regs(struct adv748x_state *state,
* TXA and TXB
*/
-static const struct adv748x_reg_value adv748x_power_up_txa_4lane[] = {
+static int adv748x_power_up_tx(struct adv748x_csi2 *tx)
+{
+ struct adv748x_state *state = tx->state;
+ u8 page = is_txa(tx) ? ADV748X_PAGE_TXA : ADV748X_PAGE_TXB;
+ int ret = 0;
- {ADV748X_PAGE_TXA, 0x00, 0x84}, /* Enable 4-lane MIPI */
- {ADV748X_PAGE_TXA, 0x00, 0xa4}, /* Set Auto DPHY Timing */
+ /* Enable n-lane MIPI */
+ adv748x_write_check(state, page, 0x00, 0x80 | tx->num_lanes, &ret);
- {ADV748X_PAGE_TXA, 0x31, 0x82}, /* ADI Required Write */
- {ADV748X_PAGE_TXA, 0x1e, 0x40}, /* ADI Required Write */
- {ADV748X_PAGE_TXA, 0xda, 0x01}, /* i2c_mipi_pll_en - 1'b1 */
- {ADV748X_PAGE_WAIT, 0x00, 0x02},/* delay 2 */
- {ADV748X_PAGE_TXA, 0x00, 0x24 },/* Power-up CSI-TX */
- {ADV748X_PAGE_WAIT, 0x00, 0x01},/* delay 1 */
- {ADV748X_PAGE_TXA, 0xc1, 0x2b}, /* ADI Required Write */
- {ADV748X_PAGE_WAIT, 0x00, 0x01},/* delay 1 */
- {ADV748X_PAGE_TXA, 0x31, 0x80}, /* ADI Required Write */
+ /* Set Auto DPHY Timing */
+ adv748x_write_check(state, page, 0x00, 0xa0 | tx->num_lanes, &ret);
- {ADV748X_PAGE_EOR, 0xff, 0xff} /* End of register table */
-};
+ /* ADI Required Write */
+ if (tx->src == &state->hdmi.sd) {
+ adv748x_write_check(state, page, 0xdb, 0x10, &ret);
+ adv748x_write_check(state, page, 0xd6, 0x07, &ret);
+ } else {
+ adv748x_write_check(state, page, 0xd2, 0x40, &ret);
+ }
-static const struct adv748x_reg_value adv748x_power_down_txa_4lane[] = {
+ adv748x_write_check(state, page, 0xc4, 0x0a, &ret);
+ adv748x_write_check(state, page, 0x71, 0x33, &ret);
+ adv748x_write_check(state, page, 0x72, 0x11, &ret);
- {ADV748X_PAGE_TXA, 0x31, 0x82}, /* ADI Required Write */
- {ADV748X_PAGE_TXA, 0x1e, 0x00}, /* ADI Required Write */
- {ADV748X_PAGE_TXA, 0x00, 0x84}, /* Enable 4-lane MIPI */
- {ADV748X_PAGE_TXA, 0xda, 0x01}, /* i2c_mipi_pll_en - 1'b1 */
- {ADV748X_PAGE_TXA, 0xc1, 0x3b}, /* ADI Required Write */
+ /* i2c_dphy_pwdn - 1'b0 */
+ adv748x_write_check(state, page, 0xf0, 0x00, &ret);
- {ADV748X_PAGE_EOR, 0xff, 0xff} /* End of register table */
-};
+ /* ADI Required Writes*/
+ adv748x_write_check(state, page, 0x31, 0x82, &ret);
+ adv748x_write_check(state, page, 0x1e, 0x40, &ret);
-static const struct adv748x_reg_value adv748x_power_up_txb_1lane[] = {
+ /* i2c_mipi_pll_en - 1'b1 */
+ adv748x_write_check(state, page, 0xda, 0x01, &ret);
+ usleep_range(2000, 2500);
- {ADV748X_PAGE_TXB, 0x00, 0x81}, /* Enable 1-lane MIPI */
- {ADV748X_PAGE_TXB, 0x00, 0xa1}, /* Set Auto DPHY Timing */
+ /* Power-up CSI-TX */
+ adv748x_write_check(state, page, 0x00, 0x20 | tx->num_lanes, &ret);
+ usleep_range(1000, 1500);
- {ADV748X_PAGE_TXB, 0x31, 0x82}, /* ADI Required Write */
- {ADV748X_PAGE_TXB, 0x1e, 0x40}, /* ADI Required Write */
- {ADV748X_PAGE_TXB, 0xda, 0x01}, /* i2c_mipi_pll_en - 1'b1 */
- {ADV748X_PAGE_WAIT, 0x00, 0x02},/* delay 2 */
- {ADV748X_PAGE_TXB, 0x00, 0x21 },/* Power-up CSI-TX */
- {ADV748X_PAGE_WAIT, 0x00, 0x01},/* delay 1 */
- {ADV748X_PAGE_TXB, 0xc1, 0x2b}, /* ADI Required Write */
- {ADV748X_PAGE_WAIT, 0x00, 0x01},/* delay 1 */
- {ADV748X_PAGE_TXB, 0x31, 0x80}, /* ADI Required Write */
+ /* ADI Required Writes */
+ adv748x_write_check(state, page, 0xc1, 0x2b, &ret);
+ usleep_range(1000, 1500);
+ adv748x_write_check(state, page, 0x31, 0x80, &ret);
- {ADV748X_PAGE_EOR, 0xff, 0xff} /* End of register table */
-};
+ return ret;
+}
-static const struct adv748x_reg_value adv748x_power_down_txb_1lane[] = {
+static int adv748x_power_down_tx(struct adv748x_csi2 *tx)
+{
+ struct adv748x_state *state = tx->state;
+ u8 page = is_txa(tx) ? ADV748X_PAGE_TXA : ADV748X_PAGE_TXB;
+ int ret = 0;
- {ADV748X_PAGE_TXB, 0x31, 0x82}, /* ADI Required Write */
- {ADV748X_PAGE_TXB, 0x1e, 0x00}, /* ADI Required Write */
- {ADV748X_PAGE_TXB, 0x00, 0x81}, /* Enable 1-lane MIPI */
- {ADV748X_PAGE_TXB, 0xda, 0x01}, /* i2c_mipi_pll_en - 1'b1 */
- {ADV748X_PAGE_TXB, 0xc1, 0x3b}, /* ADI Required Write */
+ /* ADI Required Writes */
+ adv748x_write_check(state, page, 0x31, 0x82, &ret);
+ adv748x_write_check(state, page, 0x1e, 0x00, &ret);
- {ADV748X_PAGE_EOR, 0xff, 0xff} /* End of register table */
-};
+ /* Enable n-lane MIPI */
+ adv748x_write_check(state, page, 0x00, 0x80 | tx->num_lanes, &ret);
+
+ /* i2c_mipi_pll_en - 1'b1 */
+ adv748x_write_check(state, page, 0xda, 0x01, &ret);
+
+ /* ADI Required Write */
+ adv748x_write_check(state, page, 0xc1, 0x3b, &ret);
+
+ return ret;
+}
int adv748x_tx_power(struct adv748x_csi2 *tx, bool on)
{
- struct adv748x_state *state = tx->state;
- const struct adv748x_reg_value *reglist;
int val;
if (!is_tx_enabled(tx))
@@ -309,19 +322,57 @@ int adv748x_tx_power(struct adv748x_csi2 *tx, bool on)
WARN_ONCE((on && val & ADV748X_CSI_FS_AS_LS_UNKNOWN),
"Enabling with unknown bit set");
- if (on)
- reglist = is_txa(tx) ? adv748x_power_up_txa_4lane :
- adv748x_power_up_txb_1lane;
- else
- reglist = is_txa(tx) ? adv748x_power_down_txa_4lane :
- adv748x_power_down_txb_1lane;
-
- return adv748x_write_regs(state, reglist);
+ return on ? adv748x_power_up_tx(tx) : adv748x_power_down_tx(tx);
}
/* -----------------------------------------------------------------------------
* Media Operations
*/
+static int adv748x_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *rsd = media_entity_to_v4l2_subdev(remote->entity);
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct adv748x_state *state = v4l2_get_subdevdata(sd);
+ struct adv748x_csi2 *tx = adv748x_sd_to_csi2(sd);
+ bool enable = flags & MEDIA_LNK_FL_ENABLED;
+ u8 io10_mask = ADV748X_IO_10_CSI1_EN |
+ ADV748X_IO_10_CSI4_EN |
+ ADV748X_IO_10_CSI4_IN_SEL_AFE;
+ u8 io10 = 0;
+
+ /* Refuse to enable multiple links to the same TX at the same time. */
+ if (enable && tx->src)
+ return -EINVAL;
+
+ /* Set or clear the source (HDMI or AFE) and the current TX. */
+ if (rsd == &state->afe.sd)
+ state->afe.tx = enable ? tx : NULL;
+ else
+ state->hdmi.tx = enable ? tx : NULL;
+
+ tx->src = enable ? rsd : NULL;
+
+ if (state->afe.tx) {
+ /* AFE Requires TXA enabled, even when output to TXB */
+ io10 |= ADV748X_IO_10_CSI4_EN;
+ if (is_txa(tx))
+ io10 |= ADV748X_IO_10_CSI4_IN_SEL_AFE;
+ else
+ io10 |= ADV748X_IO_10_CSI1_EN;
+ }
+
+ if (state->hdmi.tx)
+ io10 |= ADV748X_IO_10_CSI4_EN;
+
+ return io_clrset(state, ADV748X_IO_10, io10_mask, io10);
+}
+
+static const struct media_entity_operations adv748x_tx_media_ops = {
+ .link_setup = adv748x_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
static const struct media_entity_operations adv748x_media_ops = {
.link_validate = v4l2_subdev_link_validate,
@@ -331,18 +382,8 @@ static const struct media_entity_operations adv748x_media_ops = {
* HW setup
*/
-static const struct adv748x_reg_value adv748x_sw_reset[] = {
-
- {ADV748X_PAGE_IO, 0xff, 0xff}, /* SW reset */
- {ADV748X_PAGE_WAIT, 0x00, 0x05},/* delay 5 */
- {ADV748X_PAGE_IO, 0x01, 0x76}, /* ADI Required Write */
- {ADV748X_PAGE_IO, 0xf2, 0x01}, /* Enable I2C Read Auto-Increment */
- {ADV748X_PAGE_EOR, 0xff, 0xff} /* End of register table */
-};
-
-/* Supported Formats For Script Below */
-/* - 01-29 HDMI to MIPI TxA CSI 4-Lane - RGB888: */
-static const struct adv748x_reg_value adv748x_init_txa_4lane[] = {
+/* Initialize CP Core with RGB888 format. */
+static const struct adv748x_reg_value adv748x_init_hdmi[] = {
/* Disable chip powerdown & Enable HDMI Rx block */
{ADV748X_PAGE_IO, 0x00, 0x40},
@@ -383,32 +424,11 @@ static const struct adv748x_reg_value adv748x_init_txa_4lane[] = {
{ADV748X_PAGE_IO, 0x0c, 0xe0}, /* Enable LLC_DLL & Double LLC Timing */
{ADV748X_PAGE_IO, 0x0e, 0xdd}, /* LLC/PIX/SPI PINS TRISTATED AUD */
- {ADV748X_PAGE_TXA, 0x00, 0x84}, /* Enable 4-lane MIPI */
- {ADV748X_PAGE_TXA, 0x00, 0xa4}, /* Set Auto DPHY Timing */
- {ADV748X_PAGE_TXA, 0xdb, 0x10}, /* ADI Required Write */
- {ADV748X_PAGE_TXA, 0xd6, 0x07}, /* ADI Required Write */
- {ADV748X_PAGE_TXA, 0xc4, 0x0a}, /* ADI Required Write */
- {ADV748X_PAGE_TXA, 0x71, 0x33}, /* ADI Required Write */
- {ADV748X_PAGE_TXA, 0x72, 0x11}, /* ADI Required Write */
- {ADV748X_PAGE_TXA, 0xf0, 0x00}, /* i2c_dphy_pwdn - 1'b0 */
-
- {ADV748X_PAGE_TXA, 0x31, 0x82}, /* ADI Required Write */
- {ADV748X_PAGE_TXA, 0x1e, 0x40}, /* ADI Required Write */
- {ADV748X_PAGE_TXA, 0xda, 0x01}, /* i2c_mipi_pll_en - 1'b1 */
- {ADV748X_PAGE_WAIT, 0x00, 0x02},/* delay 2 */
- {ADV748X_PAGE_TXA, 0x00, 0x24 },/* Power-up CSI-TX */
- {ADV748X_PAGE_WAIT, 0x00, 0x01},/* delay 1 */
- {ADV748X_PAGE_TXA, 0xc1, 0x2b}, /* ADI Required Write */
- {ADV748X_PAGE_WAIT, 0x00, 0x01},/* delay 1 */
- {ADV748X_PAGE_TXA, 0x31, 0x80}, /* ADI Required Write */
-
{ADV748X_PAGE_EOR, 0xff, 0xff} /* End of register table */
};
-/* 02-01 Analog CVBS to MIPI TX-B CSI 1-Lane - */
-/* Autodetect CVBS Single Ended In Ain 1 - MIPI Out */
-static const struct adv748x_reg_value adv748x_init_txb_1lane[] = {
-
+/* Initialize AFE core with YUV8 format. */
+static const struct adv748x_reg_value adv748x_init_afe[] = {
{ADV748X_PAGE_IO, 0x00, 0x30}, /* Disable chip powerdown Rx */
{ADV748X_PAGE_IO, 0xf2, 0x01}, /* Enable I2C Read Auto-Increment */
@@ -435,33 +455,36 @@ static const struct adv748x_reg_value adv748x_init_txb_1lane[] = {
{ADV748X_PAGE_SDP, 0x31, 0x12}, /* ADI Required Write */
{ADV748X_PAGE_SDP, 0xe6, 0x4f}, /* V bit end pos manually in NTSC */
- {ADV748X_PAGE_TXB, 0x00, 0x81}, /* Enable 1-lane MIPI */
- {ADV748X_PAGE_TXB, 0x00, 0xa1}, /* Set Auto DPHY Timing */
- {ADV748X_PAGE_TXB, 0xd2, 0x40}, /* ADI Required Write */
- {ADV748X_PAGE_TXB, 0xc4, 0x0a}, /* ADI Required Write */
- {ADV748X_PAGE_TXB, 0x71, 0x33}, /* ADI Required Write */
- {ADV748X_PAGE_TXB, 0x72, 0x11}, /* ADI Required Write */
- {ADV748X_PAGE_TXB, 0xf0, 0x00}, /* i2c_dphy_pwdn - 1'b0 */
- {ADV748X_PAGE_TXB, 0x31, 0x82}, /* ADI Required Write */
- {ADV748X_PAGE_TXB, 0x1e, 0x40}, /* ADI Required Write */
- {ADV748X_PAGE_TXB, 0xda, 0x01}, /* i2c_mipi_pll_en - 1'b1 */
-
- {ADV748X_PAGE_WAIT, 0x00, 0x02},/* delay 2 */
- {ADV748X_PAGE_TXB, 0x00, 0x21 },/* Power-up CSI-TX */
- {ADV748X_PAGE_WAIT, 0x00, 0x01},/* delay 1 */
- {ADV748X_PAGE_TXB, 0xc1, 0x2b}, /* ADI Required Write */
- {ADV748X_PAGE_WAIT, 0x00, 0x01},/* delay 1 */
- {ADV748X_PAGE_TXB, 0x31, 0x80}, /* ADI Required Write */
-
{ADV748X_PAGE_EOR, 0xff, 0xff} /* End of register table */
};
+static int adv748x_sw_reset(struct adv748x_state *state)
+{
+ int ret;
+
+ ret = io_write(state, ADV748X_IO_REG_FF, ADV748X_IO_REG_FF_MAIN_RESET);
+ if (ret)
+ return ret;
+
+ usleep_range(5000, 6000);
+
+ /* Disable CEC Wakeup from power-down mode */
+ ret = io_clrset(state, ADV748X_IO_REG_01, ADV748X_IO_REG_01_PWRDN_MASK,
+ ADV748X_IO_REG_01_PWRDNB);
+ if (ret)
+ return ret;
+
+ /* Enable I2C Read Auto-Increment for consecutive reads */
+ return io_write(state, ADV748X_IO_REG_F2,
+ ADV748X_IO_REG_F2_READ_AUTO_INC);
+}
+
static int adv748x_reset(struct adv748x_state *state)
{
int ret;
u8 regval = 0;
- ret = adv748x_write_regs(state, adv748x_sw_reset);
+ ret = adv748x_sw_reset(state);
if (ret < 0)
return ret;
@@ -469,18 +492,19 @@ static int adv748x_reset(struct adv748x_state *state)
if (ret < 0)
return ret;
- /* Init and power down TXA */
- ret = adv748x_write_regs(state, adv748x_init_txa_4lane);
+ /* Initialize CP and AFE cores. */
+ ret = adv748x_write_regs(state, adv748x_init_hdmi);
if (ret)
return ret;
- adv748x_tx_power(&state->txa, 0);
-
- /* Init and power down TXB */
- ret = adv748x_write_regs(state, adv748x_init_txb_1lane);
+ ret = adv748x_write_regs(state, adv748x_init_afe);
if (ret)
return ret;
+ /* Reset TXA and TXB */
+ adv748x_tx_power(&state->txa, 1);
+ adv748x_tx_power(&state->txa, 0);
+ adv748x_tx_power(&state->txb, 1);
adv748x_tx_power(&state->txb, 0);
/* Disable chip powerdown & Enable HDMI Rx block */
@@ -542,7 +566,51 @@ void adv748x_subdev_init(struct v4l2_subdev *sd, struct adv748x_state *state,
state->client->addr, ident);
sd->entity.function = function;
- sd->entity.ops = &adv748x_media_ops;
+ sd->entity.ops = is_tx(adv748x_sd_to_csi2(sd)) ?
+ &adv748x_tx_media_ops : &adv748x_media_ops;
+}
+
+static int adv748x_parse_csi2_lanes(struct adv748x_state *state,
+ unsigned int port,
+ struct device_node *ep)
+{
+ struct v4l2_fwnode_endpoint vep;
+ unsigned int num_lanes;
+ int ret;
+
+ if (port != ADV748X_PORT_TXA && port != ADV748X_PORT_TXB)
+ return 0;
+
+ vep.bus_type = V4L2_MBUS_CSI2_DPHY;
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep), &vep);
+ if (ret)
+ return ret;
+
+ num_lanes = vep.bus.mipi_csi2.num_data_lanes;
+
+ if (vep.base.port == ADV748X_PORT_TXA) {
+ if (num_lanes != 1 && num_lanes != 2 && num_lanes != 4) {
+ adv_err(state, "TXA: Invalid number (%u) of lanes\n",
+ num_lanes);
+ return -EINVAL;
+ }
+
+ state->txa.num_lanes = num_lanes;
+ adv_dbg(state, "TXA: using %u lanes\n", state->txa.num_lanes);
+ }
+
+ if (vep.base.port == ADV748X_PORT_TXB) {
+ if (num_lanes != 1) {
+ adv_err(state, "TXB: Invalid number (%u) of lanes\n",
+ num_lanes);
+ return -EINVAL;
+ }
+
+ state->txb.num_lanes = num_lanes;
+ adv_dbg(state, "TXB: using %u lanes\n", state->txb.num_lanes);
+ }
+
+ return 0;
}
static int adv748x_parse_dt(struct adv748x_state *state)
@@ -551,6 +619,7 @@ static int adv748x_parse_dt(struct adv748x_state *state)
struct of_endpoint ep;
bool out_found = false;
bool in_found = false;
+ int ret;
for_each_endpoint_of_node(state->dev->of_node, ep_np) {
of_graph_parse_endpoint(ep_np, &ep);
@@ -581,6 +650,11 @@ static int adv748x_parse_dt(struct adv748x_state *state)
in_found = true;
else
out_found = true;
+
+ /* Store number of CSI-2 lanes used for TXA and TXB. */
+ ret = adv748x_parse_csi2_lanes(state, ep.port, ep_np);
+ if (ret)
+ return ret;
}
return in_found && out_found ? 0 : -ENODEV;
@@ -604,7 +678,7 @@ static int adv748x_probe(struct i2c_client *client,
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
- state = kzalloc(sizeof(struct adv748x_state), GFP_KERNEL);
+ state = devm_kzalloc(&client->dev, sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
@@ -702,7 +776,6 @@ err_cleanup_dt:
adv748x_dt_cleanup(state);
err_free_mutex:
mutex_destroy(&state->mutex);
- kfree(state);
return ret;
}
@@ -721,8 +794,6 @@ static int adv748x_remove(struct i2c_client *client)
adv748x_dt_cleanup(state);
mutex_destroy(&state->mutex);
- kfree(state);
-
return 0;
}
diff --git a/drivers/media/i2c/adv748x/adv748x-csi2.c b/drivers/media/i2c/adv748x/adv748x-csi2.c
index 6ce21542ed48..2091cda50935 100644
--- a/drivers/media/i2c/adv748x/adv748x-csi2.c
+++ b/drivers/media/i2c/adv748x/adv748x-csi2.c
@@ -27,6 +27,7 @@ static int adv748x_csi2_set_virtual_channel(struct adv748x_csi2 *tx,
* @v4l2_dev: Video registration device
* @src: Source subdevice to establish link
* @src_pad: Pad number of source to link to this @tx
+ * @enable: Link enabled flag
*
* Ensure that the subdevice is registered against the v4l2_device, and link the
* source pad to the sink pad of the CSI2 bus entity.
@@ -34,26 +35,27 @@ static int adv748x_csi2_set_virtual_channel(struct adv748x_csi2 *tx,
static int adv748x_csi2_register_link(struct adv748x_csi2 *tx,
struct v4l2_device *v4l2_dev,
struct v4l2_subdev *src,
- unsigned int src_pad)
+ unsigned int src_pad,
+ bool enable)
{
- int enabled = MEDIA_LNK_FL_ENABLED;
int ret;
- /*
- * Dynamic linking of the AFE is not supported.
- * Register the links as immutable.
- */
- enabled |= MEDIA_LNK_FL_IMMUTABLE;
-
if (!src->v4l2_dev) {
ret = v4l2_device_register_subdev(v4l2_dev, src);
if (ret)
return ret;
}
- return media_create_pad_link(&src->entity, src_pad,
- &tx->sd.entity, ADV748X_CSI2_SINK,
- enabled);
+ ret = media_create_pad_link(&src->entity, src_pad,
+ &tx->sd.entity, ADV748X_CSI2_SINK,
+ enable ? MEDIA_LNK_FL_ENABLED : 0);
+ if (ret)
+ return ret;
+
+ if (enable)
+ tx->src = src;
+
+ return 0;
}
/* -----------------------------------------------------------------------------
@@ -68,24 +70,42 @@ static int adv748x_csi2_registered(struct v4l2_subdev *sd)
{
struct adv748x_csi2 *tx = adv748x_sd_to_csi2(sd);
struct adv748x_state *state = tx->state;
+ int ret;
adv_dbg(state, "Registered %s (%s)", is_txa(tx) ? "TXA":"TXB",
sd->name);
/*
- * The adv748x hardware allows the AFE to route through the TXA, however
- * this is not currently supported in this driver.
+ * Link TXA to AFE and HDMI, and TXB to AFE only as TXB cannot output
+ * HDMI.
*
- * Link HDMI->TXA, and AFE->TXB directly.
+ * The HDMI->TXA link is enabled by default, as is the AFE->TXB one.
*/
- if (is_txa(tx) && is_hdmi_enabled(state))
- return adv748x_csi2_register_link(tx, sd->v4l2_dev,
- &state->hdmi.sd,
- ADV748X_HDMI_SOURCE);
- if (!is_txa(tx) && is_afe_enabled(state))
- return adv748x_csi2_register_link(tx, sd->v4l2_dev,
- &state->afe.sd,
- ADV748X_AFE_SOURCE);
+ if (is_afe_enabled(state)) {
+ ret = adv748x_csi2_register_link(tx, sd->v4l2_dev,
+ &state->afe.sd,
+ ADV748X_AFE_SOURCE,
+ is_txb(tx));
+ if (ret)
+ return ret;
+
+ /* TXB can output AFE signals only. */
+ if (is_txb(tx))
+ state->afe.tx = tx;
+ }
+
+ /* Register link to HDMI for TXA only. */
+ if (is_txb(tx) || !is_hdmi_enabled(state))
+ return 0;
+
+ ret = adv748x_csi2_register_link(tx, sd->v4l2_dev, &state->hdmi.sd,
+ ADV748X_HDMI_SOURCE, true);
+ if (ret)
+ return ret;
+
+ /* The default HDMI output is TXA. */
+ state->hdmi.tx = tx;
+
return 0;
}
diff --git a/drivers/media/i2c/adv748x/adv748x-hdmi.c b/drivers/media/i2c/adv748x/adv748x-hdmi.c
index 35d027941482..c557f8fdf11a 100644
--- a/drivers/media/i2c/adv748x/adv748x-hdmi.c
+++ b/drivers/media/i2c/adv748x/adv748x-hdmi.c
@@ -358,7 +358,7 @@ static int adv748x_hdmi_s_stream(struct v4l2_subdev *sd, int enable)
mutex_lock(&state->mutex);
- ret = adv748x_tx_power(&state->txa, enable);
+ ret = adv748x_tx_power(hdmi->tx, enable);
if (ret)
goto done;
diff --git a/drivers/media/i2c/adv748x/adv748x.h b/drivers/media/i2c/adv748x/adv748x.h
index 39c2fdc3b416..5042f9e94aee 100644
--- a/drivers/media/i2c/adv748x/adv748x.h
+++ b/drivers/media/i2c/adv748x/adv748x.h
@@ -39,7 +39,6 @@ enum adv748x_page {
ADV748X_PAGE_MAX,
/* Fake pages for register sequences */
- ADV748X_PAGE_WAIT, /* Wait x msec */
ADV748X_PAGE_EOR, /* End Mark */
};
@@ -79,17 +78,23 @@ struct adv748x_csi2 {
struct v4l2_mbus_framefmt format;
unsigned int page;
unsigned int port;
+ unsigned int num_lanes;
struct media_pad pads[ADV748X_CSI2_NR_PADS];
struct v4l2_ctrl_handler ctrl_hdl;
struct v4l2_ctrl *pixel_rate;
+ struct v4l2_subdev *src;
struct v4l2_subdev sd;
};
#define notifier_to_csi2(n) container_of(n, struct adv748x_csi2, notifier)
#define adv748x_sd_to_csi2(sd) container_of(sd, struct adv748x_csi2, sd)
+
#define is_tx_enabled(_tx) ((_tx)->state->endpoints[(_tx)->port] != NULL)
#define is_txa(_tx) ((_tx) == &(_tx)->state->txa)
+#define is_txb(_tx) ((_tx) == &(_tx)->state->txb)
+#define is_tx(_tx) (is_txa(_tx) || is_txb(_tx))
+
#define is_afe_enabled(_state) \
((_state)->endpoints[ADV748X_PORT_AIN0] != NULL || \
(_state)->endpoints[ADV748X_PORT_AIN1] != NULL || \
@@ -116,6 +121,8 @@ struct adv748x_hdmi {
struct v4l2_dv_timings timings;
struct v4l2_fract aspect_ratio;
+ struct adv748x_csi2 *tx;
+
struct {
u8 edid[512];
u32 present;
@@ -146,6 +153,8 @@ struct adv748x_afe {
struct v4l2_subdev sd;
struct v4l2_mbus_framefmt format;
+ struct adv748x_csi2 *tx;
+
bool streaming;
v4l2_std_id curr_norm;
unsigned int input;
@@ -201,6 +210,11 @@ struct adv748x_state {
#define ADV748X_IO_PD 0x00 /* power down controls */
#define ADV748X_IO_PD_RX_EN BIT(6)
+#define ADV748X_IO_REG_01 0x01 /* pwrdn{2}b, prog_xtal_freq */
+#define ADV748X_IO_REG_01_PWRDN_MASK (BIT(7) | BIT(6))
+#define ADV748X_IO_REG_01_PWRDN2B BIT(7) /* CEC Wakeup Support */
+#define ADV748X_IO_REG_01_PWRDNB BIT(6) /* CEC Wakeup Support */
+
#define ADV748X_IO_REG_04 0x04
#define ADV748X_IO_REG_04_FORCE_FR BIT(0) /* Force CP free-run */
@@ -214,12 +228,24 @@ struct adv748x_state {
#define ADV748X_IO_10_CSI4_EN BIT(7)
#define ADV748X_IO_10_CSI1_EN BIT(6)
#define ADV748X_IO_10_PIX_OUT_EN BIT(5)
+#define ADV748X_IO_10_CSI4_IN_SEL_AFE BIT(3)
#define ADV748X_IO_CHIP_REV_ID_1 0xdf
#define ADV748X_IO_CHIP_REV_ID_2 0xe0
+#define ADV748X_IO_REG_F2 0xf2
+#define ADV748X_IO_REG_F2_READ_AUTO_INC BIT(0)
+
+/* For PAGE slave address offsets */
#define ADV748X_IO_SLAVE_ADDR_BASE 0xf2
+/*
+ * The ADV748x_Recommended_Settings_PrA_2014-08-20.pdf details both 0x80 and
+ * 0xff as examples for performing a software reset.
+ */
+#define ADV748X_IO_REG_FF 0xff
+#define ADV748X_IO_REG_FF_MAIN_RESET 0xff
+
/* HDMI RX Map */
#define ADV748X_HDMI_LW1 0x07 /* line width_1 */
#define ADV748X_HDMI_LW1_VERT_FILTER BIT(7)
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 989259488e3d..11ab2df02dc7 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -3102,11 +3102,11 @@ static int adv7842_ddr_ram_test(struct v4l2_subdev *sd)
io_write(sd, 0x00, 0x01); /* Program SDP 4x1 */
io_write(sd, 0x01, 0x00); /* Program SDP mode */
- afe_write(sd, 0x80, 0x92); /* SDP Recommeneded Write */
- afe_write(sd, 0x9B, 0x01); /* SDP Recommeneded Write ADV7844ES1 */
- afe_write(sd, 0x9C, 0x60); /* SDP Recommeneded Write ADV7844ES1 */
- afe_write(sd, 0x9E, 0x02); /* SDP Recommeneded Write ADV7844ES1 */
- afe_write(sd, 0xA0, 0x0B); /* SDP Recommeneded Write ADV7844ES1 */
+ afe_write(sd, 0x80, 0x92); /* SDP Recommended Write */
+ afe_write(sd, 0x9B, 0x01); /* SDP Recommended Write ADV7844ES1 */
+ afe_write(sd, 0x9C, 0x60); /* SDP Recommended Write ADV7844ES1 */
+ afe_write(sd, 0x9E, 0x02); /* SDP Recommended Write ADV7844ES1 */
+ afe_write(sd, 0xA0, 0x0B); /* SDP Recommended Write ADV7844ES1 */
afe_write(sd, 0xC3, 0x02); /* Memory BIST Initialisation */
io_write(sd, 0x0C, 0x40); /* Power up ADV7844 */
io_write(sd, 0x15, 0xBA); /* Enable outputs */
diff --git a/drivers/media/i2c/bt819.c b/drivers/media/i2c/bt819.c
index 472e37637c8d..e6d3fe7790bc 100644
--- a/drivers/media/i2c/bt819.c
+++ b/drivers/media/i2c/bt819.c
@@ -164,12 +164,12 @@ static int bt819_init(struct v4l2_subdev *sd)
0x0e, 0xb4, /* 0x0e Chroma Gain (V) msb */
0x0f, 0x00, /* 0x0f Hue control */
0x12, 0x04, /* 0x12 Output Format */
- 0x13, 0x20, /* 0x13 Vertial Scaling msb 0x00
+ 0x13, 0x20, /* 0x13 Vertical Scaling msb 0x00
chroma comb OFF, line drop scaling, interlace scaling
BUG? Why does turning the chroma comb on fuck up color?
Bug in the bt819 stepping on my board?
*/
- 0x14, 0x00, /* 0x14 Vertial Scaling lsb */
+ 0x14, 0x00, /* 0x14 Vertical Scaling lsb */
0x16, 0x07, /* 0x16 Video Timing Polarity
ACTIVE=active low
FIELD: high=odd,
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index b168bf3635b6..8b0b8b5aa531 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -5216,8 +5216,9 @@ static int cx25840_probe(struct i2c_client *client,
* those extra inputs. So, let's add it only when needed.
*/
state->pads[CX25840_PAD_INPUT].flags = MEDIA_PAD_FL_SINK;
+ state->pads[CX25840_PAD_INPUT].sig_type = PAD_SIGNAL_ANALOG;
state->pads[CX25840_PAD_VID_OUT].flags = MEDIA_PAD_FL_SOURCE;
- state->pads[CX25840_PAD_VBI_OUT].flags = MEDIA_PAD_FL_SOURCE;
+ state->pads[CX25840_PAD_VID_OUT].sig_type = PAD_SIGNAL_DV;
sd->entity.function = MEDIA_ENT_F_ATV_DECODER;
ret = media_entity_pads_init(&sd->entity, ARRAY_SIZE(state->pads),
diff --git a/drivers/media/i2c/cx25840/cx25840-core.h b/drivers/media/i2c/cx25840/cx25840-core.h
index c323b1af1f83..e3ff1d7ec770 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.h
+++ b/drivers/media/i2c/cx25840/cx25840-core.h
@@ -40,7 +40,6 @@ enum cx25840_model {
enum cx25840_media_pads {
CX25840_PAD_INPUT,
CX25840_PAD_VID_OUT,
- CX25840_PAD_VBI_OUT,
CX25840_NUM_PADS
};
@@ -67,7 +66,7 @@ enum cx25840_media_pads {
* @is_initialized: whether we have already loaded firmware into the chip
* and initialized it
* @vbi_regs_offset: offset of vbi regs
- * @fw_wait: wait queue to wake an initalization function up when
+ * @fw_wait: wait queue to wake an initialization function up when
* firmware loading (on a separate workqueue) finishes
* @fw_work: a work that actually loads the firmware on a separate
* workqueue
diff --git a/drivers/media/i2c/cx25840/cx25840-ir.c b/drivers/media/i2c/cx25840/cx25840-ir.c
index 69cdc09981af..a266118cd7ca 100644
--- a/drivers/media/i2c/cx25840/cx25840-ir.c
+++ b/drivers/media/i2c/cx25840/cx25840-ir.c
@@ -549,7 +549,7 @@ int cx25840_ir_irq_handler(struct v4l2_subdev *sd, u32 status, bool *handled)
ror = stats & STATS_ROR; /* Rx FIFO Over Run */
tse = irqen & IRQEN_TSE; /* Tx FIFO Service Request IRQ Enable */
- rse = irqen & IRQEN_RSE; /* Rx FIFO Service Reuqest IRQ Enable */
+ rse = irqen & IRQEN_RSE; /* Rx FIFO Service Request IRQ Enable */
rte = irqen & IRQEN_RTE; /* Rx Pulse Width Timer Time Out IRQ Enable */
roe = irqen & IRQEN_ROE; /* Rx FIFO Over Run IRQ Enable */
@@ -638,7 +638,7 @@ int cx25840_ir_irq_handler(struct v4l2_subdev *sd, u32 status, bool *handled)
events |= V4L2_SUBDEV_IR_RX_END_OF_RX_DETECTED;
}
if (v) {
- /* Clear STATS_ROR & STATS_RTO as needed by reseting hardware */
+ /* Clear STATS_ROR & STATS_RTO as needed by resetting hardware */
cx25840_write4(c, CX25840_IR_CNTRL_REG, cntrl & ~v);
cx25840_write4(c, CX25840_IR_CNTRL_REG, cntrl);
*handled = true;
diff --git a/drivers/media/i2c/dw9714.c b/drivers/media/i2c/dw9714.c
index 26d83693a681..3f0b082f863f 100644
--- a/drivers/media/i2c/dw9714.c
+++ b/drivers/media/i2c/dw9714.c
@@ -267,7 +267,7 @@ static struct i2c_driver dw9714_i2c_driver = {
module_i2c_driver(dw9714_i2c_driver);
MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
-MODULE_AUTHOR("Jian Xu Zheng <jian.xu.zheng@intel.com>");
+MODULE_AUTHOR("Jian Xu Zheng");
MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>");
MODULE_AUTHOR("Jouni Ukkonen <jouni.ukkonen@intel.com>");
MODULE_AUTHOR("Tommi Franttila <tommi.franttila@intel.com>");
diff --git a/drivers/media/i2c/et8ek8/et8ek8_mode.c b/drivers/media/i2c/et8ek8/et8ek8_mode.c
index a79882a83885..f503303cb8bc 100644
--- a/drivers/media/i2c/et8ek8/et8ek8_mode.c
+++ b/drivers/media/i2c/et8ek8/et8ek8_mode.c
@@ -79,7 +79,7 @@ static struct et8ek8_reglist mode1_poweron_mode2_16vga_2592x1968_12_07fps = {
{ ET8EK8_REG_8BIT, 0x1258, 0x00 },
/* From parallel out to serial out */
{ ET8EK8_REG_8BIT, 0x125D, 0x88 },
- /* From w/ embeded data to w/o embeded data */
+ /* From w/ embedded data to w/o embedded data */
{ ET8EK8_REG_8BIT, 0x125E, 0xC0 },
/* CCP2 out is from STOP to ACTIVE */
{ ET8EK8_REG_8BIT, 0x1263, 0x98 },
diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c
index ec3d1b855f62..9857e151db46 100644
--- a/drivers/media/i2c/imx214.c
+++ b/drivers/media/i2c/imx214.c
@@ -377,7 +377,7 @@ static const struct reg_8 mode_table_common[] = {
/* Moire reduction */
{0x6957, 0x01},
- /* image enhancment */
+ /* image enhancement */
{0x6987, 0x17},
{0x698A, 0x03},
{0x698B, 0x03},
diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
index 5fac7fd32634..f3ff1af209f9 100644
--- a/drivers/media/i2c/imx274.c
+++ b/drivers/media/i2c/imx274.c
@@ -207,8 +207,8 @@ static const char * const tp_qmenu[] = {
"Vertical Stripe (555h / 000h)",
"Vertical Stripe (000h / FFFh)",
"Vertical Stripe (FFFh / 000h)",
- "Horizontal Color Bars",
"Vertical Color Bars",
+ "Horizontal Color Bars",
};
/*
@@ -405,12 +405,12 @@ static const struct reg_8 imx274_start_2[] = {
*/
static const struct reg_8 imx274_start_3[] = {
{0x30F4, 0x00},
- {0x3018, 0xA2}, /* XHS VHS OUTUPT */
+ {0x3018, 0xA2}, /* XHS VHS OUTPUT */
{IMX274_TABLE_END, 0x00}
};
/*
- * imx274 register configuration for stoping stream
+ * imx274 register configuration for stopping stream
*/
static const struct reg_8 imx274_stop[] = {
{IMX274_STANDBY_REG, 0x01},
@@ -617,24 +617,6 @@ static int imx274_write_table(struct stimx274 *priv, const struct reg_8 table[])
return 0;
}
-static inline int imx274_read_reg(struct stimx274 *priv, u16 addr, u8 *val)
-{
- unsigned int uint_val;
- int err;
-
- err = regmap_read(priv->regmap, addr, &uint_val);
- if (err)
- dev_err(&priv->client->dev,
- "%s : i2c read failed, addr = %x\n", __func__, addr);
- else
- dev_dbg(&priv->client->dev,
- "%s : addr 0x%x, val=0x%x\n", __func__,
- addr, uint_val);
-
- *val = uint_val;
- return err;
-}
-
static inline int imx274_write_reg(struct stimx274 *priv, u16 addr, u8 val)
{
int err;
diff --git a/drivers/media/i2c/lm3560.c b/drivers/media/i2c/lm3560.c
index f122f03bd6b7..70c3294c21d3 100644
--- a/drivers/media/i2c/lm3560.c
+++ b/drivers/media/i2c/lm3560.c
@@ -55,7 +55,7 @@ enum led_enable {
* @regmap: reg. map for i2c
* @lock: muxtex for serial access.
* @led_mode: V4L2 LED mode
- * @ctrls_led: V4L2 contols
+ * @ctrls_led: V4L2 controls
* @subdev_led: V4L2 subdev
*/
struct lm3560_flash {
diff --git a/drivers/media/i2c/lm3646.c b/drivers/media/i2c/lm3646.c
index 12ef2653987b..73fbe3c37fc9 100644
--- a/drivers/media/i2c/lm3646.c
+++ b/drivers/media/i2c/lm3646.c
@@ -62,7 +62,7 @@ enum led_mode {
* @regmap: reg. map for i2c
* @lock: muxtex for serial access.
* @led_mode: V4L2 LED mode
- * @ctrls_led: V4L2 contols
+ * @ctrls_led: V4L2 controls
* @subdev_led: V4L2 subdev
* @mode_reg : mode register value
*/
diff --git a/drivers/media/i2c/m5mols/m5mols.h b/drivers/media/i2c/m5mols/m5mols.h
index 90a6c520f115..aef5b4f8904e 100644
--- a/drivers/media/i2c/m5mols/m5mols.h
+++ b/drivers/media/i2c/m5mols/m5mols.h
@@ -253,7 +253,7 @@ struct m5mols_info {
*
* The I2C read operation of the M-5MOLS requires 2 messages. The first
* message sends the information about the command, command category, and total
- * message size. The second message is used to retrieve the data specifed in
+ * message size. The second message is used to retrieve the data specified in
* the first message
*
* 1st message 2nd message
diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c
index b8b2bf4cbfb2..454a336be336 100644
--- a/drivers/media/i2c/m5mols/m5mols_core.c
+++ b/drivers/media/i2c/m5mols/m5mols_core.c
@@ -291,7 +291,7 @@ int m5mols_write(struct v4l2_subdev *sd, u32 reg, u32 val)
* @reg: the I2C_REG() address of an 8-bit status register to check
* @value: expected status register value
* @mask: bit mask for the read status register value
- * @timeout: timeout in miliseconds, or -1 for default timeout
+ * @timeout: timeout in milliseconds, or -1 for default timeout
*
* The @reg register value is ORed with @mask before comparing with @value.
*
diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c
index c63be01059b2..522fb1d561e7 100644
--- a/drivers/media/i2c/msp3400-driver.c
+++ b/drivers/media/i2c/msp3400-driver.c
@@ -11,7 +11,7 @@
*
* FM-Mono
* should work. The stereo modes are backward compatible to FM-mono,
- * therefore FM-Mono should be allways available.
+ * therefore FM-Mono should be always available.
*
* FM-Stereo (B/G, used in germany)
* should work, with autodetect
diff --git a/drivers/media/i2c/soc_camera/soc_mt9m001.c b/drivers/media/i2c/mt9m001.c
index a1a85ff838c5..4b23fde937b3 100644
--- a/drivers/media/i2c/soc_camera/soc_mt9m001.c
+++ b/drivers/media/i2c/mt9m001.c
@@ -1,29 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for MT9M001 CMOS Image Sensor from Micron
*
* Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
-#include <linux/videodev2.h>
-#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/log2.h>
#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
-#include <media/soc_camera.h>
-#include <media/drv-intf/soc_mediabus.h>
-#include <media/v4l2-clk.h>
-#include <media/v4l2-subdev.h>
#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-subdev.h>
/*
* mt9m001 i2c address 0x5d
- * The platform has to define struct i2c_board_info objects and link to them
- * from struct soc_camera_host_desc
*/
/* mt9m001 selected register addresses */
@@ -50,6 +48,8 @@
#define MT9M001_MIN_HEIGHT 32
#define MT9M001_COLUMN_SKIP 20
#define MT9M001_ROW_SKIP 12
+#define MT9M001_DEFAULT_HBLANK 9
+#define MT9M001_DEFAULT_VBLANK 25
/* MT9M001 has only one fixed colorspace per pixelcode */
struct mt9m001_datafmt {
@@ -93,13 +93,18 @@ struct mt9m001 {
struct v4l2_ctrl *autoexposure;
struct v4l2_ctrl *exposure;
};
+ bool streaming;
+ struct mutex mutex;
struct v4l2_rect rect; /* Sensor window */
- struct v4l2_clk *clk;
+ struct clk *clk;
+ struct gpio_desc *standby_gpio;
+ struct gpio_desc *reset_gpio;
const struct mt9m001_datafmt *fmt;
const struct mt9m001_datafmt *fmts;
int num_fmts;
unsigned int total_h;
unsigned short y_skip_top; /* Lines to skip at the top */
+ struct media_pad pad;
};
static struct mt9m001 *to_mt9m001(const struct i2c_client *client)
@@ -140,35 +145,111 @@ static int reg_clear(struct i2c_client *client, const u8 reg,
return reg_write(client, reg, ret & ~data);
}
+struct mt9m001_reg {
+ u8 reg;
+ u16 data;
+};
+
+static int multi_reg_write(struct i2c_client *client,
+ const struct mt9m001_reg *regs, int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ int ret = reg_write(client, regs[i].reg, regs[i].data);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int mt9m001_init(struct i2c_client *client)
{
- int ret;
+ const struct mt9m001_reg init_regs[] = {
+ /*
+ * Issue a soft reset. This returns all registers to their
+ * default values.
+ */
+ { MT9M001_RESET, 1 },
+ { MT9M001_RESET, 0 },
+ /* Disable chip, synchronous option update */
+ { MT9M001_OUTPUT_CONTROL, 0 }
+ };
dev_dbg(&client->dev, "%s\n", __func__);
- /*
- * We don't know, whether platform provides reset, issue a soft reset
- * too. This returns all registers to their default values.
- */
- ret = reg_write(client, MT9M001_RESET, 1);
- if (!ret)
- ret = reg_write(client, MT9M001_RESET, 0);
+ return multi_reg_write(client, init_regs, ARRAY_SIZE(init_regs));
+}
- /* Disable chip, synchronous option update */
- if (!ret)
- ret = reg_write(client, MT9M001_OUTPUT_CONTROL, 0);
+static int mt9m001_apply_selection(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9m001 *mt9m001 = to_mt9m001(client);
+ const struct mt9m001_reg regs[] = {
+ /* Blanking and start values - default... */
+ { MT9M001_HORIZONTAL_BLANKING, MT9M001_DEFAULT_HBLANK },
+ { MT9M001_VERTICAL_BLANKING, MT9M001_DEFAULT_VBLANK },
+ /*
+ * The caller provides a supported format, as verified per
+ * call to .set_fmt(FORMAT_TRY).
+ */
+ { MT9M001_COLUMN_START, mt9m001->rect.left },
+ { MT9M001_ROW_START, mt9m001->rect.top },
+ { MT9M001_WINDOW_WIDTH, mt9m001->rect.width - 1 },
+ { MT9M001_WINDOW_HEIGHT,
+ mt9m001->rect.height + mt9m001->y_skip_top - 1 },
+ };
- return ret;
+ return multi_reg_write(client, regs, ARRAY_SIZE(regs));
}
static int mt9m001_s_stream(struct v4l2_subdev *sd, int enable)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9m001 *mt9m001 = to_mt9m001(client);
+ int ret = 0;
+
+ mutex_lock(&mt9m001->mutex);
+
+ if (mt9m001->streaming == enable)
+ goto done;
+
+ if (enable) {
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0)
+ goto put_unlock;
+
+ ret = mt9m001_apply_selection(sd);
+ if (ret)
+ goto put_unlock;
+
+ ret = __v4l2_ctrl_handler_setup(&mt9m001->hdl);
+ if (ret)
+ goto put_unlock;
+
+ /* Switch to master "normal" mode */
+ ret = reg_write(client, MT9M001_OUTPUT_CONTROL, 2);
+ if (ret < 0)
+ goto put_unlock;
+ } else {
+ /* Switch to master stop sensor readout */
+ reg_write(client, MT9M001_OUTPUT_CONTROL, 0);
+ pm_runtime_put(&client->dev);
+ }
+
+ mt9m001->streaming = enable;
+done:
+ mutex_unlock(&mt9m001->mutex);
- /* Switch to master "normal" mode or stop sensor readout */
- if (reg_write(client, MT9M001_OUTPUT_CONTROL, enable ? 2 : 0) < 0)
- return -EIO;
return 0;
+
+put_unlock:
+ pm_runtime_put(&client->dev);
+ mutex_unlock(&mt9m001->mutex);
+
+ return ret;
}
static int mt9m001_set_selection(struct v4l2_subdev *sd,
@@ -178,8 +259,6 @@ static int mt9m001_set_selection(struct v4l2_subdev *sd,
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m001 *mt9m001 = to_mt9m001(client);
struct v4l2_rect rect = sel->r;
- const u16 hblank = 9, vblank = 25;
- int ret;
if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
sel->target != V4L2_SEL_TGT_CROP)
@@ -196,39 +275,22 @@ static int mt9m001_set_selection(struct v4l2_subdev *sd,
rect.width = ALIGN(rect.width, 2);
rect.left = ALIGN(rect.left, 2);
- soc_camera_limit_side(&rect.left, &rect.width,
- MT9M001_COLUMN_SKIP, MT9M001_MIN_WIDTH, MT9M001_MAX_WIDTH);
+ rect.width = clamp_t(u32, rect.width, MT9M001_MIN_WIDTH,
+ MT9M001_MAX_WIDTH);
+ rect.left = clamp_t(u32, rect.left, MT9M001_COLUMN_SKIP,
+ MT9M001_COLUMN_SKIP + MT9M001_MAX_WIDTH - rect.width);
- soc_camera_limit_side(&rect.top, &rect.height,
- MT9M001_ROW_SKIP, MT9M001_MIN_HEIGHT, MT9M001_MAX_HEIGHT);
+ rect.height = clamp_t(u32, rect.height, MT9M001_MIN_HEIGHT,
+ MT9M001_MAX_HEIGHT);
+ rect.top = clamp_t(u32, rect.top, MT9M001_ROW_SKIP,
+ MT9M001_ROW_SKIP + MT9M001_MAX_HEIGHT - rect.height);
- mt9m001->total_h = rect.height + mt9m001->y_skip_top + vblank;
+ mt9m001->total_h = rect.height + mt9m001->y_skip_top +
+ MT9M001_DEFAULT_VBLANK;
- /* Blanking and start values - default... */
- ret = reg_write(client, MT9M001_HORIZONTAL_BLANKING, hblank);
- if (!ret)
- ret = reg_write(client, MT9M001_VERTICAL_BLANKING, vblank);
+ mt9m001->rect = rect;
- /*
- * The caller provides a supported format, as verified per
- * call to .set_fmt(FORMAT_TRY).
- */
- if (!ret)
- ret = reg_write(client, MT9M001_COLUMN_START, rect.left);
- if (!ret)
- ret = reg_write(client, MT9M001_ROW_START, rect.top);
- if (!ret)
- ret = reg_write(client, MT9M001_WINDOW_WIDTH, rect.width - 1);
- if (!ret)
- ret = reg_write(client, MT9M001_WINDOW_HEIGHT,
- rect.height + mt9m001->y_skip_top - 1);
- if (!ret && v4l2_ctrl_g_ctrl(mt9m001->autoexposure) == V4L2_EXPOSURE_AUTO)
- ret = reg_write(client, MT9M001_SHUTTER_WIDTH, mt9m001->total_h);
-
- if (!ret)
- mt9m001->rect = rect;
-
- return ret;
+ return 0;
}
static int mt9m001_get_selection(struct v4l2_subdev *sd,
@@ -267,11 +329,20 @@ static int mt9m001_get_fmt(struct v4l2_subdev *sd,
if (format->pad)
return -EINVAL;
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ format->format = *mf;
+ return 0;
+ }
+
mf->width = mt9m001->rect.width;
mf->height = mt9m001->rect.height;
mf->code = mt9m001->fmt->code;
mf->colorspace = mt9m001->fmt->colorspace;
mf->field = V4L2_FIELD_NONE;
+ mf->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ mf->quantization = V4L2_QUANTIZATION_DEFAULT;
+ mf->xfer_func = V4L2_XFER_FUNC_DEFAULT;
return 0;
}
@@ -332,6 +403,10 @@ static int mt9m001_set_fmt(struct v4l2_subdev *sd,
}
mf->colorspace = fmt->colorspace;
+ mf->field = V4L2_FIELD_NONE;
+ mf->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ mf->quantization = V4L2_QUANTIZATION_DEFAULT;
+ mf->xfer_func = V4L2_XFER_FUNC_DEFAULT;
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
return mt9m001_s_fmt(sd, fmt, mf);
@@ -372,13 +447,40 @@ static int mt9m001_s_register(struct v4l2_subdev *sd,
}
#endif
-static int mt9m001_s_power(struct v4l2_subdev *sd, int on)
+static int mt9m001_power_on(struct device *dev)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
+ struct i2c_client *client = to_i2c_client(dev);
struct mt9m001 *mt9m001 = to_mt9m001(client);
+ int ret;
+
+ ret = clk_prepare_enable(mt9m001->clk);
+ if (ret)
+ return ret;
+
+ if (mt9m001->standby_gpio) {
+ gpiod_set_value_cansleep(mt9m001->standby_gpio, 0);
+ usleep_range(1000, 2000);
+ }
+
+ if (mt9m001->reset_gpio) {
+ gpiod_set_value_cansleep(mt9m001->reset_gpio, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(mt9m001->reset_gpio, 0);
+ usleep_range(1000, 2000);
+ }
- return soc_camera_set_power(&client->dev, ssdd, mt9m001->clk, on);
+ return 0;
+}
+
+static int mt9m001_power_off(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct mt9m001 *mt9m001 = to_mt9m001(client);
+
+ gpiod_set_value_cansleep(mt9m001->standby_gpio, 1);
+ clk_disable_unprepare(mt9m001->clk);
+
+ return 0;
}
static int mt9m001_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
@@ -406,16 +508,18 @@ static int mt9m001_s_ctrl(struct v4l2_ctrl *ctrl)
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct v4l2_ctrl *exp = mt9m001->exposure;
int data;
+ int ret;
+
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
switch (ctrl->id) {
case V4L2_CID_VFLIP:
if (ctrl->val)
- data = reg_set(client, MT9M001_READ_OPTIONS2, 0x8000);
+ ret = reg_set(client, MT9M001_READ_OPTIONS2, 0x8000);
else
- data = reg_clear(client, MT9M001_READ_OPTIONS2, 0x8000);
- if (data < 0)
- return -EIO;
- return 0;
+ ret = reg_clear(client, MT9M001_READ_OPTIONS2, 0x8000);
+ break;
case V4L2_CID_GAIN:
/* See Datasheet Table 7, Gain settings. */
@@ -425,9 +529,7 @@ static int mt9m001_s_ctrl(struct v4l2_ctrl *ctrl)
data = ((ctrl->val - (s32)ctrl->minimum) * 8 + range / 2) / range;
dev_dbg(&client->dev, "Setting gain %d\n", data);
- data = reg_write(client, MT9M001_GLOBAL_GAIN, data);
- if (data < 0)
- return -EIO;
+ ret = reg_write(client, MT9M001_GLOBAL_GAIN, data);
} else {
/* Pack it into 1.125..15 variable step, register values 9..67 */
/* We assume qctrl->maximum - qctrl->default_value - 1 > 0 */
@@ -444,11 +546,9 @@ static int mt9m001_s_ctrl(struct v4l2_ctrl *ctrl)
dev_dbg(&client->dev, "Setting gain from %d to %d\n",
reg_read(client, MT9M001_GLOBAL_GAIN), data);
- data = reg_write(client, MT9M001_GLOBAL_GAIN, data);
- if (data < 0)
- return -EIO;
+ ret = reg_write(client, MT9M001_GLOBAL_GAIN, data);
}
- return 0;
+ break;
case V4L2_CID_EXPOSURE_AUTO:
if (ctrl->val == V4L2_EXPOSURE_MANUAL) {
@@ -459,37 +559,34 @@ static int mt9m001_s_ctrl(struct v4l2_ctrl *ctrl)
dev_dbg(&client->dev,
"Setting shutter width from %d to %lu\n",
reg_read(client, MT9M001_SHUTTER_WIDTH), shutter);
- if (reg_write(client, MT9M001_SHUTTER_WIDTH, shutter) < 0)
- return -EIO;
+ ret = reg_write(client, MT9M001_SHUTTER_WIDTH, shutter);
} else {
- const u16 vblank = 25;
-
mt9m001->total_h = mt9m001->rect.height +
- mt9m001->y_skip_top + vblank;
- if (reg_write(client, MT9M001_SHUTTER_WIDTH, mt9m001->total_h) < 0)
- return -EIO;
+ mt9m001->y_skip_top + MT9M001_DEFAULT_VBLANK;
+ ret = reg_write(client, MT9M001_SHUTTER_WIDTH,
+ mt9m001->total_h);
}
- return 0;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
- return -EINVAL;
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
}
/*
* Interface active, can use i2c. If it fails, it can indeed mean, that
* this wasn't our capture interface, so, we wait for the right one
*/
-static int mt9m001_video_probe(struct soc_camera_subdev_desc *ssdd,
- struct i2c_client *client)
+static int mt9m001_video_probe(struct i2c_client *client)
{
struct mt9m001 *mt9m001 = to_mt9m001(client);
s32 data;
- unsigned long flags;
int ret;
- ret = mt9m001_s_power(&mt9m001->subdev, 1);
- if (ret < 0)
- return ret;
-
/* Enable the chip */
data = reg_write(client, MT9M001_CHIP_ENABLE, 1);
dev_dbg(&client->dev, "write: %d\n", data);
@@ -502,9 +599,11 @@ static int mt9m001_video_probe(struct soc_camera_subdev_desc *ssdd,
case 0x8411:
case 0x8421:
mt9m001->fmts = mt9m001_colour_fmts;
+ mt9m001->num_fmts = ARRAY_SIZE(mt9m001_colour_fmts);
break;
case 0x8431:
mt9m001->fmts = mt9m001_monochrome_fmts;
+ mt9m001->num_fmts = ARRAY_SIZE(mt9m001_monochrome_fmts);
break;
default:
dev_err(&client->dev,
@@ -513,26 +612,6 @@ static int mt9m001_video_probe(struct soc_camera_subdev_desc *ssdd,
goto done;
}
- mt9m001->num_fmts = 0;
-
- /*
- * This is a 10bit sensor, so by default we only allow 10bit.
- * The platform may support different bus widths due to
- * different routing of the data lines.
- */
- if (ssdd->query_bus_param)
- flags = ssdd->query_bus_param(ssdd);
- else
- flags = SOCAM_DATAWIDTH_10;
-
- if (flags & SOCAM_DATAWIDTH_10)
- mt9m001->num_fmts++;
- else
- mt9m001->fmts++;
-
- if (flags & SOCAM_DATAWIDTH_8)
- mt9m001->num_fmts++;
-
mt9m001->fmt = &mt9m001->fmts[0];
dev_info(&client->dev, "Detected a MT9M001 chip ID %x (%s)\n", data,
@@ -548,16 +627,9 @@ static int mt9m001_video_probe(struct soc_camera_subdev_desc *ssdd,
ret = v4l2_ctrl_handler_setup(&mt9m001->hdl);
done:
- mt9m001_s_power(&mt9m001->subdev, 0);
return ret;
}
-static void mt9m001_video_remove(struct soc_camera_subdev_desc *ssdd)
-{
- if (ssdd->free_bus)
- ssdd->free_bus(ssdd);
-}
-
static int mt9m001_g_skip_top_lines(struct v4l2_subdev *sd, u32 *lines)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -574,13 +646,35 @@ static const struct v4l2_ctrl_ops mt9m001_ctrl_ops = {
};
static const struct v4l2_subdev_core_ops mt9m001_subdev_core_ops = {
+ .log_status = v4l2_ctrl_subdev_log_status,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = mt9m001_g_register,
.s_register = mt9m001_s_register,
#endif
- .s_power = mt9m001_s_power,
};
+static int mt9m001_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9m001 *mt9m001 = to_mt9m001(client);
+ struct v4l2_mbus_framefmt *try_fmt =
+ v4l2_subdev_get_try_format(sd, cfg, 0);
+
+ try_fmt->width = MT9M001_MAX_WIDTH;
+ try_fmt->height = MT9M001_MAX_HEIGHT;
+ try_fmt->code = mt9m001->fmts[0].code;
+ try_fmt->colorspace = mt9m001->fmts[0].colorspace;
+ try_fmt->field = V4L2_FIELD_NONE;
+ try_fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ try_fmt->quantization = V4L2_QUANTIZATION_DEFAULT;
+ try_fmt->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+
+ return 0;
+}
+
static int mt9m001_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
@@ -598,41 +692,18 @@ static int mt9m001_enum_mbus_code(struct v4l2_subdev *sd,
static int mt9m001_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
-
/* MT9M001 has all capture_format parameters fixed */
cfg->flags = V4L2_MBUS_PCLK_SAMPLE_FALLING |
V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH |
V4L2_MBUS_DATA_ACTIVE_HIGH | V4L2_MBUS_MASTER;
cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
return 0;
}
-static int mt9m001_s_mbus_config(struct v4l2_subdev *sd,
- const struct v4l2_mbus_config *cfg)
-{
- const struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- struct mt9m001 *mt9m001 = to_mt9m001(client);
- unsigned int bps = soc_mbus_get_fmtdesc(mt9m001->fmt->code)->bits_per_sample;
-
- if (ssdd->set_bus_param)
- return ssdd->set_bus_param(ssdd, 1 << (bps - 1));
-
- /*
- * Without board specific bus width settings we only support the
- * sensors native bus width
- */
- return bps == 10 ? 0 : -EINVAL;
-}
-
static const struct v4l2_subdev_video_ops mt9m001_subdev_video_ops = {
.s_stream = mt9m001_s_stream,
.g_mbus_config = mt9m001_g_mbus_config,
- .s_mbus_config = mt9m001_s_mbus_config,
};
static const struct v4l2_subdev_sensor_ops mt9m001_subdev_sensor_ops = {
@@ -640,6 +711,7 @@ static const struct v4l2_subdev_sensor_ops mt9m001_subdev_sensor_ops = {
};
static const struct v4l2_subdev_pad_ops mt9m001_subdev_pad_ops = {
+ .init_cfg = mt9m001_init_cfg,
.enum_mbus_code = mt9m001_enum_mbus_code,
.get_selection = mt9m001_get_selection,
.set_selection = mt9m001_set_selection,
@@ -659,25 +731,35 @@ static int mt9m001_probe(struct i2c_client *client,
{
struct mt9m001 *mt9m001;
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
int ret;
- if (!ssdd) {
- dev_err(&client->dev, "MT9M001 driver needs platform data\n");
- return -EINVAL;
- }
-
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) {
dev_warn(&adapter->dev,
"I2C-Adapter doesn't support I2C_FUNC_SMBUS_WORD\n");
return -EIO;
}
- mt9m001 = devm_kzalloc(&client->dev, sizeof(struct mt9m001), GFP_KERNEL);
+ mt9m001 = devm_kzalloc(&client->dev, sizeof(*mt9m001), GFP_KERNEL);
if (!mt9m001)
return -ENOMEM;
+ mt9m001->clk = devm_clk_get(&client->dev, NULL);
+ if (IS_ERR(mt9m001->clk))
+ return PTR_ERR(mt9m001->clk);
+
+ mt9m001->standby_gpio = devm_gpiod_get_optional(&client->dev, "standby",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(mt9m001->standby_gpio))
+ return PTR_ERR(mt9m001->standby_gpio);
+
+ mt9m001->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(mt9m001->reset_gpio))
+ return PTR_ERR(mt9m001->reset_gpio);
+
v4l2_i2c_subdev_init(&mt9m001->subdev, client, &mt9m001_subdev_ops);
+ mt9m001->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
v4l2_ctrl_handler_init(&mt9m001->hdl, 4);
v4l2_ctrl_new_std(&mt9m001->hdl, &mt9m001_ctrl_ops,
V4L2_CID_VFLIP, 0, 1, 1, 0);
@@ -699,6 +781,9 @@ static int mt9m001_probe(struct i2c_client *client,
v4l2_ctrl_auto_cluster(2, &mt9m001->autoexposure,
V4L2_EXPOSURE_MANUAL, true);
+ mutex_init(&mt9m001->mutex);
+ mt9m001->hdl.lock = &mt9m001->mutex;
+
/* Second stage probe - when a capture adapter is there */
mt9m001->y_skip_top = 0;
mt9m001->rect.left = MT9M001_COLUMN_SKIP;
@@ -706,18 +791,41 @@ static int mt9m001_probe(struct i2c_client *client,
mt9m001->rect.width = MT9M001_MAX_WIDTH;
mt9m001->rect.height = MT9M001_MAX_HEIGHT;
- mt9m001->clk = v4l2_clk_get(&client->dev, "mclk");
- if (IS_ERR(mt9m001->clk)) {
- ret = PTR_ERR(mt9m001->clk);
- goto eclkget;
- }
+ ret = mt9m001_power_on(&client->dev);
+ if (ret)
+ goto error_hdl_free;
- ret = mt9m001_video_probe(ssdd, client);
- if (ret) {
- v4l2_clk_put(mt9m001->clk);
-eclkget:
- v4l2_ctrl_handler_free(&mt9m001->hdl);
- }
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+
+ ret = mt9m001_video_probe(client);
+ if (ret)
+ goto error_power_off;
+
+ mt9m001->pad.flags = MEDIA_PAD_FL_SOURCE;
+ mt9m001->subdev.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ret = media_entity_pads_init(&mt9m001->subdev.entity, 1, &mt9m001->pad);
+ if (ret)
+ goto error_power_off;
+
+ ret = v4l2_async_register_subdev(&mt9m001->subdev);
+ if (ret)
+ goto error_entity_cleanup;
+
+ pm_runtime_idle(&client->dev);
+
+ return 0;
+
+error_entity_cleanup:
+ media_entity_cleanup(&mt9m001->subdev.entity);
+error_power_off:
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ mt9m001_power_off(&client->dev);
+
+error_hdl_free:
+ v4l2_ctrl_handler_free(&mt9m001->hdl);
+ mutex_destroy(&mt9m001->mutex);
return ret;
}
@@ -725,12 +833,19 @@ eclkget:
static int mt9m001_remove(struct i2c_client *client)
{
struct mt9m001 *mt9m001 = to_mt9m001(client);
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- v4l2_clk_put(mt9m001->clk);
- v4l2_device_unregister_subdev(&mt9m001->subdev);
+ pm_runtime_get_sync(&client->dev);
+
+ v4l2_async_unregister_subdev(&mt9m001->subdev);
+ media_entity_cleanup(&mt9m001->subdev.entity);
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+ mt9m001_power_off(&client->dev);
+
v4l2_ctrl_handler_free(&mt9m001->hdl);
- mt9m001_video_remove(ssdd);
+ mutex_destroy(&mt9m001->mutex);
return 0;
}
@@ -741,9 +856,21 @@ static const struct i2c_device_id mt9m001_id[] = {
};
MODULE_DEVICE_TABLE(i2c, mt9m001_id);
+static const struct dev_pm_ops mt9m001_pm_ops = {
+ SET_RUNTIME_PM_OPS(mt9m001_power_off, mt9m001_power_on, NULL)
+};
+
+static const struct of_device_id mt9m001_of_match[] = {
+ { .compatible = "onnn,mt9m001", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, mt9m001_of_match);
+
static struct i2c_driver mt9m001_i2c_driver = {
.driver = {
.name = "mt9m001",
+ .pm = &mt9m001_pm_ops,
+ .of_match_table = mt9m001_of_match,
},
.probe = mt9m001_probe,
.remove = mt9m001_remove,
@@ -754,4 +881,4 @@ module_i2c_driver(mt9m001_i2c_driver);
MODULE_DESCRIPTION("Micron MT9M001 Camera driver");
MODULE_AUTHOR("Guennadi Liakhovetski <kernel@pengutronix.de>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c
index d639b9bcf64a..5168bb5880c4 100644
--- a/drivers/media/i2c/mt9m111.c
+++ b/drivers/media/i2c/mt9m111.c
@@ -528,11 +528,24 @@ static int mt9m111_get_fmt(struct v4l2_subdev *sd,
if (format->pad)
return -EINVAL;
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ mf = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ format->format = *mf;
+ return 0;
+#else
+ return -ENOTTY;
+#endif
+ }
+
mf->width = mt9m111->width;
mf->height = mt9m111->height;
mf->code = mt9m111->fmt->code;
mf->colorspace = mt9m111->fmt->colorspace;
mf->field = V4L2_FIELD_NONE;
+ mf->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ mf->quantization = V4L2_QUANTIZATION_DEFAULT;
+ mf->xfer_func = V4L2_XFER_FUNC_DEFAULT;
return 0;
}
@@ -660,6 +673,10 @@ static int mt9m111_set_fmt(struct v4l2_subdev *sd,
mf->code = fmt->code;
mf->colorspace = fmt->colorspace;
+ mf->field = V4L2_FIELD_NONE;
+ mf->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ mf->quantization = V4L2_QUANTIZATION_DEFAULT;
+ mf->xfer_func = V4L2_XFER_FUNC_DEFAULT;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
cfg->try_fmt = *mf;
@@ -1089,6 +1106,25 @@ static int mt9m111_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
}
+static int mt9m111_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg)
+{
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ struct v4l2_mbus_framefmt *format =
+ v4l2_subdev_get_try_format(sd, cfg, 0);
+
+ format->width = MT9M111_MAX_WIDTH;
+ format->height = MT9M111_MAX_HEIGHT;
+ format->code = mt9m111_colour_fmts[0].code;
+ format->colorspace = mt9m111_colour_fmts[0].colorspace;
+ format->field = V4L2_FIELD_NONE;
+ format->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ format->quantization = V4L2_QUANTIZATION_DEFAULT;
+ format->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+#endif
+ return 0;
+}
+
static int mt9m111_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
@@ -1114,6 +1150,7 @@ static const struct v4l2_subdev_video_ops mt9m111_subdev_video_ops = {
};
static const struct v4l2_subdev_pad_ops mt9m111_subdev_pad_ops = {
+ .init_cfg = mt9m111_init_cfg,
.enum_mbus_code = mt9m111_enum_mbus_code,
.get_selection = mt9m111_get_selection,
.set_selection = mt9m111_set_selection,
@@ -1273,6 +1310,8 @@ static int mt9m111_probe(struct i2c_client *client,
mt9m111->rect.top = MT9M111_MIN_DARK_ROWS;
mt9m111->rect.width = MT9M111_MAX_WIDTH;
mt9m111->rect.height = MT9M111_MAX_HEIGHT;
+ mt9m111->width = mt9m111->rect.width;
+ mt9m111->height = mt9m111->rect.height;
mt9m111->fmt = &mt9m111_colour_fmts[0];
mt9m111->lastpage = -1;
mutex_init(&mt9m111->power_lock);
diff --git a/drivers/media/i2c/mt9t112.c b/drivers/media/i2c/mt9t112.c
index ef353a244e33..ae3c336eadf5 100644
--- a/drivers/media/i2c/mt9t112.c
+++ b/drivers/media/i2c/mt9t112.c
@@ -541,7 +541,7 @@ static int mt9t112_init_setting(const struct i2c_client *client)
mt9t112_mcu_write(ret, client, VAR(18, 109), 0x0AF0);
/*
- * Flicker Dectection registers.
+ * Flicker Detection registers.
* This section should be replaced whenever new timing file is
* generated. All the following registers need to be replaced.
* Following registers are generated from Register Wizard but user can
diff --git a/drivers/media/i2c/ov2640.c b/drivers/media/i2c/ov2640.c
index 5d2d6735cc78..83031cfc7914 100644
--- a/drivers/media/i2c/ov2640.c
+++ b/drivers/media/i2c/ov2640.c
@@ -842,9 +842,6 @@ static int ov2640_set_params(struct i2c_client *client,
u8 val;
int ret;
- if (!win)
- return -EINVAL;
-
switch (code) {
case MEDIA_BUS_FMT_RGB565_2X8_BE:
dev_dbg(&client->dev, "%s: Selected cfmt RGB565 BE", __func__);
@@ -929,9 +926,14 @@ static int ov2640_get_fmt(struct v4l2_subdev *sd,
if (format->pad)
return -EINVAL;
- if (!priv->win) {
- priv->win = ov2640_select_win(SVGA_WIDTH, SVGA_HEIGHT);
- priv->cfmt_code = MEDIA_BUS_FMT_UYVY8_2X8;
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ format->format = *mf;
+ return 0;
+#else
+ return -ENOTTY;
+#endif
}
mf->width = priv->win->width;
@@ -939,6 +941,9 @@ static int ov2640_get_fmt(struct v4l2_subdev *sd,
mf->code = priv->cfmt_code;
mf->colorspace = V4L2_COLORSPACE_SRGB;
mf->field = V4L2_FIELD_NONE;
+ mf->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ mf->quantization = V4L2_QUANTIZATION_DEFAULT;
+ mf->xfer_func = V4L2_XFER_FUNC_DEFAULT;
return 0;
}
@@ -965,6 +970,9 @@ static int ov2640_set_fmt(struct v4l2_subdev *sd,
mf->field = V4L2_FIELD_NONE;
mf->colorspace = V4L2_COLORSPACE_SRGB;
+ mf->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ mf->quantization = V4L2_QUANTIZATION_DEFAULT;
+ mf->xfer_func = V4L2_XFER_FUNC_DEFAULT;
switch (mf->code) {
case MEDIA_BUS_FMT_RGB565_2X8_BE:
@@ -999,6 +1007,27 @@ out:
return ret;
}
+static int ov2640_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg)
+{
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ struct v4l2_mbus_framefmt *try_fmt =
+ v4l2_subdev_get_try_format(sd, cfg, 0);
+ const struct ov2640_win_size *win =
+ ov2640_select_win(SVGA_WIDTH, SVGA_HEIGHT);
+
+ try_fmt->width = win->width;
+ try_fmt->height = win->height;
+ try_fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
+ try_fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ try_fmt->field = V4L2_FIELD_NONE;
+ try_fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ try_fmt->quantization = V4L2_QUANTIZATION_DEFAULT;
+ try_fmt->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+#endif
+ return 0;
+}
+
static int ov2640_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
@@ -1108,6 +1137,7 @@ static const struct v4l2_subdev_core_ops ov2640_subdev_core_ops = {
};
static const struct v4l2_subdev_pad_ops ov2640_subdev_pad_ops = {
+ .init_cfg = ov2640_init_cfg,
.enum_mbus_code = ov2640_enum_mbus_code,
.get_selection = ov2640_get_selection,
.get_fmt = ov2640_get_fmt,
@@ -1193,6 +1223,9 @@ static int ov2640_probe(struct i2c_client *client,
if (ret)
goto err_clk;
+ priv->win = ov2640_select_win(SVGA_WIDTH, SVGA_HEIGHT);
+ priv->cfmt_code = MEDIA_BUS_FMT_UYVY8_2X8;
+
v4l2_i2c_subdev_init(&priv->subdev, client, &ov2640_subdev_ops);
priv->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
V4L2_SUBDEV_FL_HAS_EVENTS;
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index bef3f3aae0ed..82d4ce93312c 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -83,6 +83,9 @@
#define OV5640_REG_SIGMADELTA_CTRL0C 0x3c0c
#define OV5640_REG_FRAME_CTRL01 0x4202
#define OV5640_REG_FORMAT_CONTROL00 0x4300
+#define OV5640_REG_VFIFO_HSIZE 0x4602
+#define OV5640_REG_VFIFO_VSIZE 0x4604
+#define OV5640_REG_JPG_MODE_SELECT 0x4713
#define OV5640_REG_POLARITY_CTRL00 0x4740
#define OV5640_REG_MIPI_CTRL00 0x4800
#define OV5640_REG_DEBUG_MODE 0x4814
@@ -115,6 +118,15 @@ enum ov5640_frame_rate {
OV5640_NUM_FRAMERATES,
};
+enum ov5640_format_mux {
+ OV5640_FMT_MUX_YUV422 = 0,
+ OV5640_FMT_MUX_RGB,
+ OV5640_FMT_MUX_DITHER,
+ OV5640_FMT_MUX_RAW_DPC,
+ OV5640_FMT_MUX_SNR_RAW,
+ OV5640_FMT_MUX_RAW_CIP,
+};
+
struct ov5640_pixfmt {
u32 code;
u32 colorspace;
@@ -126,6 +138,10 @@ static const struct ov5640_pixfmt ov5640_formats[] = {
{ MEDIA_BUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_SRGB, },
{ MEDIA_BUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB, },
{ MEDIA_BUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_SRGB, },
+ { MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB, },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, V4L2_COLORSPACE_SRGB, },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, V4L2_COLORSPACE_SRGB, },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, V4L2_COLORSPACE_SRGB, },
};
/*
@@ -288,7 +304,7 @@ static const struct reg_value ov5640_init_setting_30fps_VGA[] = {
{0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x3000, 0x00, 0, 0},
{0x3002, 0x1c, 0, 0}, {0x3004, 0xff, 0, 0}, {0x3006, 0xc3, 0, 0},
{0x302e, 0x08, 0, 0}, {0x4300, 0x3f, 0, 0},
- {0x501f, 0x00, 0, 0}, {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0},
+ {0x501f, 0x00, 0, 0}, {0x4407, 0x04, 0, 0},
{0x440e, 0x00, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
{0x4837, 0x0a, 0, 0}, {0x3824, 0x02, 0, 0},
{0x5000, 0xa7, 0, 0}, {0x5001, 0xa3, 0, 0}, {0x5180, 0xff, 0, 0},
@@ -357,7 +373,7 @@ static const struct reg_value ov5640_setting_VGA_640_480[] = {
{0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
{0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
{0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
- {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
+ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0},
{0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
{0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
};
@@ -376,7 +392,7 @@ static const struct reg_value ov5640_setting_XGA_1024_768[] = {
{0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
{0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
{0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
- {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
+ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0},
{0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
{0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
};
@@ -395,7 +411,7 @@ static const struct reg_value ov5640_setting_QVGA_320_240[] = {
{0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
{0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
{0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
- {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
+ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0},
{0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
{0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
};
@@ -414,7 +430,7 @@ static const struct reg_value ov5640_setting_QCIF_176_144[] = {
{0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
{0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
{0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
- {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
+ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0},
{0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
{0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
};
@@ -433,7 +449,7 @@ static const struct reg_value ov5640_setting_NTSC_720_480[] = {
{0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
{0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
{0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
- {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
+ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0},
{0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
{0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
};
@@ -452,7 +468,7 @@ static const struct reg_value ov5640_setting_PAL_720_576[] = {
{0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
{0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
{0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
- {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
+ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0},
{0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
{0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
};
@@ -471,7 +487,7 @@ static const struct reg_value ov5640_setting_720P_1280_720[] = {
{0x3a03, 0xe4, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0xbc, 0, 0},
{0x3a0a, 0x01, 0, 0}, {0x3a0b, 0x72, 0, 0}, {0x3a0e, 0x01, 0, 0},
{0x3a0d, 0x02, 0, 0}, {0x3a14, 0x02, 0, 0}, {0x3a15, 0xe4, 0, 0},
- {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
+ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0},
{0x4407, 0x04, 0, 0}, {0x460b, 0x37, 0, 0}, {0x460c, 0x20, 0, 0},
{0x3824, 0x04, 0, 0}, {0x5001, 0x83, 0, 0},
};
@@ -491,7 +507,7 @@ static const struct reg_value ov5640_setting_1080P_1920_1080[] = {
{0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
{0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
{0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
- {0x4001, 0x02, 0, 0}, {0x4004, 0x06, 0, 0}, {0x4713, 0x03, 0, 0},
+ {0x4001, 0x02, 0, 0}, {0x4004, 0x06, 0, 0},
{0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
{0x3824, 0x02, 0, 0}, {0x5001, 0x83, 0, 0},
{0x3c07, 0x07, 0, 0}, {0x3c08, 0x00, 0, 0},
@@ -503,7 +519,7 @@ static const struct reg_value ov5640_setting_1080P_1920_1080[] = {
{0x3a02, 0x04, 0, 0}, {0x3a03, 0x60, 0, 0}, {0x3a08, 0x01, 0, 0},
{0x3a09, 0x50, 0, 0}, {0x3a0a, 0x01, 0, 0}, {0x3a0b, 0x18, 0, 0},
{0x3a0e, 0x03, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x04, 0, 0},
- {0x3a15, 0x60, 0, 0}, {0x4713, 0x02, 0, 0}, {0x4407, 0x04, 0, 0},
+ {0x3a15, 0x60, 0, 0}, {0x4407, 0x04, 0, 0},
{0x460b, 0x37, 0, 0}, {0x460c, 0x20, 0, 0}, {0x3824, 0x04, 0, 0},
{0x4005, 0x1a, 0, 0}, {0x3008, 0x02, 0, 0},
};
@@ -522,7 +538,7 @@ static const struct reg_value ov5640_setting_QSXGA_2592_1944[] = {
{0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
{0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
{0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
- {0x4001, 0x02, 0, 0}, {0x4004, 0x06, 0, 0}, {0x4713, 0x03, 0, 0},
+ {0x4001, 0x02, 0, 0}, {0x4004, 0x06, 0, 0},
{0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
{0x3824, 0x02, 0, 0}, {0x5001, 0x83, 0, 70},
};
@@ -705,7 +721,7 @@ static int ov5640_mod_reg(struct ov5640_dev *sensor, u16 reg,
/*
* After trying the various combinations, reading various
- * documentations spreaded around the net, and from the various
+ * documentations spread around the net, and from the various
* feedback, the clock tree is probably as follows:
*
* +--------------+
@@ -1030,12 +1046,42 @@ static int ov5640_set_dvp_pclk(struct ov5640_dev *sensor, unsigned long rate)
(ilog2(pclk_div) << 4));
}
+/* set JPEG framing sizes */
+static int ov5640_set_jpeg_timings(struct ov5640_dev *sensor,
+ const struct ov5640_mode_info *mode)
+{
+ int ret;
+
+ /*
+ * compression mode 3 timing
+ *
+ * Data is transmitted with programmable width (VFIFO_HSIZE).
+ * No padding done. Last line may have less data. Varying
+ * number of lines per frame, depending on amount of data.
+ */
+ ret = ov5640_mod_reg(sensor, OV5640_REG_JPG_MODE_SELECT, 0x7, 0x3);
+ if (ret < 0)
+ return ret;
+
+ ret = ov5640_write_reg16(sensor, OV5640_REG_VFIFO_HSIZE, mode->hact);
+ if (ret < 0)
+ return ret;
+
+ return ov5640_write_reg16(sensor, OV5640_REG_VFIFO_VSIZE, mode->vact);
+}
+
/* download ov5640 settings to sensor through i2c */
static int ov5640_set_timings(struct ov5640_dev *sensor,
const struct ov5640_mode_info *mode)
{
int ret;
+ if (sensor->fmt.code == MEDIA_BUS_FMT_JPEG_1X8) {
+ ret = ov5640_set_jpeg_timings(sensor, mode);
+ if (ret < 0)
+ return ret;
+ }
+
ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_DVPHO, mode->hact);
if (ret < 0)
return ret;
@@ -1893,7 +1939,7 @@ static void ov5640_reset(struct ov5640_dev *sensor)
usleep_range(1000, 2000);
gpiod_set_value_cansleep(sensor->reset_gpio, 0);
- usleep_range(5000, 10000);
+ usleep_range(20000, 25000);
}
static int ov5640_set_power_on(struct ov5640_dev *sensor)
@@ -2059,7 +2105,7 @@ static int ov5640_try_frame_interval(struct ov5640_dev *sensor,
u32 width, u32 height)
{
const struct ov5640_mode_info *mode;
- enum ov5640_frame_rate rate = OV5640_30_FPS;
+ enum ov5640_frame_rate rate = OV5640_15_FPS;
int minfps, maxfps, best_fps, fps;
int i;
@@ -2200,46 +2246,67 @@ static int ov5640_set_framefmt(struct ov5640_dev *sensor,
struct v4l2_mbus_framefmt *format)
{
int ret = 0;
- bool is_rgb = false;
bool is_jpeg = false;
- u8 val;
+ u8 fmt, mux;
switch (format->code) {
case MEDIA_BUS_FMT_UYVY8_2X8:
/* YUV422, UYVY */
- val = 0x3f;
+ fmt = 0x3f;
+ mux = OV5640_FMT_MUX_YUV422;
break;
case MEDIA_BUS_FMT_YUYV8_2X8:
/* YUV422, YUYV */
- val = 0x30;
+ fmt = 0x30;
+ mux = OV5640_FMT_MUX_YUV422;
break;
case MEDIA_BUS_FMT_RGB565_2X8_LE:
/* RGB565 {g[2:0],b[4:0]},{r[4:0],g[5:3]} */
- val = 0x6F;
- is_rgb = true;
+ fmt = 0x6F;
+ mux = OV5640_FMT_MUX_RGB;
break;
case MEDIA_BUS_FMT_RGB565_2X8_BE:
/* RGB565 {r[4:0],g[5:3]},{g[2:0],b[4:0]} */
- val = 0x61;
- is_rgb = true;
+ fmt = 0x61;
+ mux = OV5640_FMT_MUX_RGB;
break;
case MEDIA_BUS_FMT_JPEG_1X8:
/* YUV422, YUYV */
- val = 0x30;
+ fmt = 0x30;
+ mux = OV5640_FMT_MUX_YUV422;
is_jpeg = true;
break;
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ /* Raw, BGBG... / GRGR... */
+ fmt = 0x00;
+ mux = OV5640_FMT_MUX_RAW_DPC;
+ break;
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ /* Raw bayer, GBGB... / RGRG... */
+ fmt = 0x01;
+ mux = OV5640_FMT_MUX_RAW_DPC;
+ break;
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ /* Raw bayer, GRGR... / BGBG... */
+ fmt = 0x02;
+ mux = OV5640_FMT_MUX_RAW_DPC;
+ break;
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ /* Raw bayer, RGRG... / GBGB... */
+ fmt = 0x03;
+ mux = OV5640_FMT_MUX_RAW_DPC;
+ break;
default:
return -EINVAL;
}
/* FORMAT CONTROL00: YUV and RGB formatting */
- ret = ov5640_write_reg(sensor, OV5640_REG_FORMAT_CONTROL00, val);
+ ret = ov5640_write_reg(sensor, OV5640_REG_FORMAT_CONTROL00, fmt);
if (ret)
return ret;
/* FORMAT MUX CONTROL: ISP YUV or RGB */
- ret = ov5640_write_reg(sensor, OV5640_REG_ISP_FORMAT_MUX_CTRL,
- is_rgb ? 0x01 : 0x00);
+ ret = ov5640_write_reg(sensor, OV5640_REG_ISP_FORMAT_MUX_CTRL, mux);
if (ret)
return ret;
@@ -2407,10 +2474,41 @@ static int ov5640_set_ctrl_gain(struct ov5640_dev *sensor, bool auto_gain)
return ret;
}
+static const char * const test_pattern_menu[] = {
+ "Disabled",
+ "Color bars",
+ "Color bars w/ rolling bar",
+ "Color squares",
+ "Color squares w/ rolling bar",
+};
+
+#define OV5640_TEST_ENABLE BIT(7)
+#define OV5640_TEST_ROLLING BIT(6) /* rolling horizontal bar */
+#define OV5640_TEST_TRANSPARENT BIT(5)
+#define OV5640_TEST_SQUARE_BW BIT(4) /* black & white squares */
+#define OV5640_TEST_BAR_STANDARD (0 << 2)
+#define OV5640_TEST_BAR_VERT_CHANGE_1 (1 << 2)
+#define OV5640_TEST_BAR_HOR_CHANGE (2 << 2)
+#define OV5640_TEST_BAR_VERT_CHANGE_2 (3 << 2)
+#define OV5640_TEST_BAR (0 << 0)
+#define OV5640_TEST_RANDOM (1 << 0)
+#define OV5640_TEST_SQUARE (2 << 0)
+#define OV5640_TEST_BLACK (3 << 0)
+
+static const u8 test_pattern_val[] = {
+ 0,
+ OV5640_TEST_ENABLE | OV5640_TEST_BAR_VERT_CHANGE_1 |
+ OV5640_TEST_BAR,
+ OV5640_TEST_ENABLE | OV5640_TEST_ROLLING |
+ OV5640_TEST_BAR_VERT_CHANGE_1 | OV5640_TEST_BAR,
+ OV5640_TEST_ENABLE | OV5640_TEST_SQUARE,
+ OV5640_TEST_ENABLE | OV5640_TEST_ROLLING | OV5640_TEST_SQUARE,
+};
+
static int ov5640_set_ctrl_test_pattern(struct ov5640_dev *sensor, int value)
{
- return ov5640_mod_reg(sensor, OV5640_REG_PRE_ISP_TEST_SET1,
- 0xa4, value ? 0xa4 : 0);
+ return ov5640_write_reg(sensor, OV5640_REG_PRE_ISP_TEST_SET1,
+ test_pattern_val[value]);
}
static int ov5640_set_ctrl_light_freq(struct ov5640_dev *sensor, int value)
@@ -2551,11 +2649,6 @@ static const struct v4l2_ctrl_ops ov5640_ctrl_ops = {
.s_ctrl = ov5640_s_ctrl,
};
-static const char * const test_pattern_menu[] = {
- "Disabled",
- "Color bars",
-};
-
static int ov5640_init_controls(struct ov5640_dev *sensor)
{
const struct v4l2_ctrl_ops *ops = &ov5640_ctrl_ops;
diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
index 5d1b218bb7f0..c33fd584cb44 100644
--- a/drivers/media/i2c/ov6650.c
+++ b/drivers/media/i2c/ov6650.c
@@ -15,7 +15,7 @@
* Copyright (C) 2008 Magnus Damm
* Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
*
- * Hardware specific bits initialy based on former work by Matt Callow
+ * Hardware specific bits initially based on former work by Matt Callow
* drivers/media/video/omap/sensor_ov6650.c
* Copyright (C) 2006 Matt Callow
*
@@ -759,7 +759,7 @@ static int ov6650_s_frame_interval(struct v4l2_subdev *sd,
/*
* Keep result to be used as tpf limit
- * for subseqent clock divider calculations
+ * for subsequent clock divider calculations
*/
priv->tpf.numerator = div;
priv->tpf.denominator = FRAME_RATE_MAX;
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index a70a6ff7b36e..a7d26b294eb5 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -160,10 +160,10 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)");
#define REG_GFIX 0x69 /* Fix gain control */
#define REG_DBLV 0x6b /* PLL control an debugging */
-#define DBLV_BYPASS 0x00 /* Bypass PLL */
-#define DBLV_X4 0x01 /* clock x4 */
-#define DBLV_X6 0x10 /* clock x6 */
-#define DBLV_X8 0x11 /* clock x8 */
+#define DBLV_BYPASS 0x0a /* Bypass PLL */
+#define DBLV_X4 0x4a /* clock x4 */
+#define DBLV_X6 0x8a /* clock x6 */
+#define DBLV_X8 0xca /* clock x8 */
#define REG_SCALING_XSC 0x70 /* Test pattern and horizontal scale factor */
#define TEST_PATTTERN_0 0x80
@@ -241,7 +241,9 @@ struct ov7670_info {
};
struct v4l2_mbus_framefmt format;
struct ov7670_format_struct *fmt; /* Current format */
+ struct ov7670_win_size *wsize;
struct clk *clk;
+ int on;
struct gpio_desc *resetb_gpio;
struct gpio_desc *pwdn_gpio;
unsigned int mbus_config; /* Media bus configuration flags */
@@ -810,13 +812,25 @@ static void ov7675_get_framerate(struct v4l2_subdev *sd,
(4 * clkrc);
}
+static int ov7675_apply_framerate(struct v4l2_subdev *sd)
+{
+ struct ov7670_info *info = to_state(sd);
+ int ret;
+
+ ret = ov7670_write(sd, REG_CLKRC, info->clkrc);
+ if (ret < 0)
+ return ret;
+
+ return ov7670_write(sd, REG_DBLV,
+ info->pll_bypass ? DBLV_BYPASS : DBLV_X4);
+}
+
static int ov7675_set_framerate(struct v4l2_subdev *sd,
struct v4l2_fract *tpf)
{
struct ov7670_info *info = to_state(sd);
u32 clkrc;
int pll_factor;
- int ret;
/*
* The formula is fps = 5/4*pixclk for YUV/RGB and
@@ -825,19 +839,10 @@ static int ov7675_set_framerate(struct v4l2_subdev *sd,
* pixclk = clock_speed / (clkrc + 1) * PLLfactor
*
*/
- if (info->pll_bypass) {
- pll_factor = 1;
- ret = ov7670_write(sd, REG_DBLV, DBLV_BYPASS);
- } else {
- pll_factor = PLL_FACTOR;
- ret = ov7670_write(sd, REG_DBLV, DBLV_X4);
- }
- if (ret < 0)
- return ret;
-
if (tpf->numerator == 0 || tpf->denominator == 0) {
clkrc = 0;
} else {
+ pll_factor = info->pll_bypass ? 1 : PLL_FACTOR;
clkrc = (5 * pll_factor * info->clock_speed * tpf->numerator) /
(4 * tpf->denominator);
if (info->fmt->mbus_code == MEDIA_BUS_FMT_SBGGR8_1X8)
@@ -859,11 +864,7 @@ static int ov7675_set_framerate(struct v4l2_subdev *sd,
/* Recalculate frame rate */
ov7675_get_framerate(sd, tpf);
- ret = ov7670_write(sd, REG_CLKRC, info->clkrc);
- if (ret < 0)
- return ret;
-
- return ov7670_write(sd, REG_DBLV, DBLV_X4);
+ return ov7675_apply_framerate(sd);
}
static void ov7670_get_framerate_legacy(struct v4l2_subdev *sd,
@@ -1004,48 +1005,20 @@ static int ov7670_try_fmt_internal(struct v4l2_subdev *sd,
return 0;
}
-/*
- * Set a format.
- */
-static int ov7670_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *format)
+static int ov7670_apply_fmt(struct v4l2_subdev *sd)
{
- struct ov7670_format_struct *ovfmt;
- struct ov7670_win_size *wsize;
struct ov7670_info *info = to_state(sd);
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
- struct v4l2_mbus_framefmt *mbus_fmt;
-#endif
+ struct ov7670_win_size *wsize = info->wsize;
unsigned char com7, com10 = 0;
int ret;
- if (format->pad)
- return -EINVAL;
-
- if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- ret = ov7670_try_fmt_internal(sd, &format->format, NULL, NULL);
- if (ret)
- return ret;
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
- mbus_fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
- *mbus_fmt = format->format;
- return 0;
-#else
- return -ENOTTY;
-#endif
- }
-
- ret = ov7670_try_fmt_internal(sd, &format->format, &ovfmt, &wsize);
- if (ret)
- return ret;
/*
* COM7 is a pain in the ass, it doesn't like to be read then
* quickly written afterward. But we have everything we need
* to set it absolutely here, as long as the format-specific
* register sets list it first.
*/
- com7 = ovfmt->regs[0].value;
+ com7 = info->fmt->regs[0].value;
com7 |= wsize->com7_bit;
ret = ov7670_write(sd, REG_COM7, com7);
if (ret)
@@ -1067,7 +1040,7 @@ static int ov7670_set_fmt(struct v4l2_subdev *sd,
/*
* Now write the rest of the array. Also store start/stops
*/
- ret = ov7670_write_array(sd, ovfmt->regs + 1);
+ ret = ov7670_write_array(sd, info->fmt->regs + 1);
if (ret)
return ret;
@@ -1082,8 +1055,6 @@ static int ov7670_set_fmt(struct v4l2_subdev *sd,
return ret;
}
- info->fmt = ovfmt;
-
/*
* If we're running RGB565, we must rewrite clkrc after setting
* the other parameters or the image looks poor. If we're *not*
@@ -1101,6 +1072,46 @@ static int ov7670_set_fmt(struct v4l2_subdev *sd,
return 0;
}
+/*
+ * Set a format.
+ */
+static int ov7670_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct ov7670_info *info = to_state(sd);
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ struct v4l2_mbus_framefmt *mbus_fmt;
+#endif
+ int ret;
+
+ if (format->pad)
+ return -EINVAL;
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ ret = ov7670_try_fmt_internal(sd, &format->format, NULL, NULL);
+ if (ret)
+ return ret;
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ mbus_fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ *mbus_fmt = format->format;
+ return 0;
+#else
+ return -ENOTTY;
+#endif
+ }
+
+ ret = ov7670_try_fmt_internal(sd, &format->format, &info->fmt, &info->wsize);
+ if (ret)
+ return ret;
+
+ ret = ov7670_apply_fmt(sd);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int ov7670_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *format)
@@ -1607,17 +1618,57 @@ static int ov7670_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_regis
}
#endif
-static int ov7670_s_power(struct v4l2_subdev *sd, int on)
+static void ov7670_power_on(struct v4l2_subdev *sd)
{
struct ov7670_info *info = to_state(sd);
+ if (info->on)
+ return;
+
+ clk_prepare_enable(info->clk);
+
if (info->pwdn_gpio)
- gpiod_set_value(info->pwdn_gpio, !on);
- if (on && info->resetb_gpio) {
+ gpiod_set_value(info->pwdn_gpio, 0);
+ if (info->resetb_gpio) {
gpiod_set_value(info->resetb_gpio, 1);
usleep_range(500, 1000);
gpiod_set_value(info->resetb_gpio, 0);
+ }
+ if (info->pwdn_gpio || info->resetb_gpio || info->clk)
usleep_range(3000, 5000);
+
+ info->on = true;
+}
+
+static void ov7670_power_off(struct v4l2_subdev *sd)
+{
+ struct ov7670_info *info = to_state(sd);
+
+ if (!info->on)
+ return;
+
+ clk_disable_unprepare(info->clk);
+
+ if (info->pwdn_gpio)
+ gpiod_set_value(info->pwdn_gpio, 1);
+
+ info->on = false;
+}
+
+static int ov7670_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct ov7670_info *info = to_state(sd);
+
+ if (info->on == on)
+ return 0;
+
+ if (on) {
+ ov7670_power_on (sd);
+ ov7670_apply_fmt(sd);
+ ov7675_apply_framerate(sd);
+ v4l2_ctrl_handler_setup(&info->hdl);
+ } else {
+ ov7670_power_off (sd);
}
return 0;
@@ -1652,6 +1703,7 @@ static int ov7670_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
static const struct v4l2_subdev_core_ops ov7670_core_ops = {
.reset = ov7670_reset,
.init = ov7670_init,
+ .s_power = ov7670_s_power,
.log_status = v4l2_ctrl_subdev_log_status,
.subscribe_event = v4l2_ctrl_subdev_subscribe_event,
.unsubscribe_event = v4l2_event_subdev_unsubscribe,
@@ -1801,11 +1853,7 @@ static int ov7670_probe(struct i2c_client *client,
if (config->clock_speed)
info->clock_speed = config->clock_speed;
- /*
- * It should be allowed for ov7670 too when it is migrated to
- * the new frame rate formula.
- */
- if (config->pll_bypass && id->driver_data != MODEL_OV7670)
+ if (config->pll_bypass)
info->pll_bypass = true;
if (config->pclk_hb_disable)
@@ -1820,24 +1868,21 @@ static int ov7670_probe(struct i2c_client *client,
else
return ret;
}
- if (info->clk) {
- ret = clk_prepare_enable(info->clk);
- if (ret)
- return ret;
+ ret = ov7670_init_gpio(client, info);
+ if (ret)
+ return ret;
+
+ ov7670_power_on(sd);
+
+ if (info->clk) {
info->clock_speed = clk_get_rate(info->clk) / 1000000;
if (info->clock_speed < 10 || info->clock_speed > 48) {
ret = -EINVAL;
- goto clk_disable;
+ goto power_off;
}
}
- ret = ov7670_init_gpio(client, info);
- if (ret)
- goto clk_disable;
-
- ov7670_s_power(sd, 1);
-
/* Make sure it's an ov7670 */
ret = ov7670_detect(sd);
if (ret) {
@@ -1851,6 +1896,7 @@ static int ov7670_probe(struct i2c_client *client,
info->devtype = &ov7670_devdata[id->driver_data];
info->fmt = &ov7670_formats[0];
+ info->wsize = &info->devtype->win_sizes[0];
ov7670_get_default_format(sd, &info->format);
@@ -1916,6 +1962,7 @@ static int ov7670_probe(struct i2c_client *client,
if (ret < 0)
goto entity_cleanup;
+ ov7670_power_off(sd);
return 0;
entity_cleanup:
@@ -1923,13 +1970,10 @@ entity_cleanup:
hdl_free:
v4l2_ctrl_handler_free(&info->hdl);
power_off:
- ov7670_s_power(sd, 0);
-clk_disable:
- clk_disable_unprepare(info->clk);
+ ov7670_power_off(sd);
return ret;
}
-
static int ov7670_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
@@ -1937,9 +1981,8 @@ static int ov7670_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
v4l2_ctrl_handler_free(&info->hdl);
- clk_disable_unprepare(info->clk);
media_entity_cleanup(&info->sd.entity);
- ov7670_s_power(sd, 0);
+ ov7670_power_off(sd);
return 0;
}
diff --git a/drivers/media/i2c/ov7740.c b/drivers/media/i2c/ov7740.c
index 177688afd9a6..dfece91ce96b 100644
--- a/drivers/media/i2c/ov7740.c
+++ b/drivers/media/i2c/ov7740.c
@@ -20,7 +20,7 @@
#define REG_BGAIN 0x01 /* blue gain */
#define REG_RGAIN 0x02 /* red gain */
#define REG_GGAIN 0x03 /* green gain */
-#define REG_REG04 0x04 /* analog setting, dont change*/
+#define REG_REG04 0x04 /* analog setting, don't change*/
#define REG_BAVG 0x05 /* b channel average */
#define REG_GAVG 0x06 /* g channel average */
#define REG_RAVG 0x07 /* r channel average */
@@ -1101,6 +1101,9 @@ static int ov7740_probe(struct i2c_client *client,
if (ret)
return ret;
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+
ret = ov7740_detect(ov7740);
if (ret)
goto error_detect;
@@ -1123,8 +1126,6 @@ static int ov7740_probe(struct i2c_client *client,
if (ret)
goto error_async_register;
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
pm_runtime_idle(&client->dev);
return 0;
@@ -1134,6 +1135,8 @@ error_async_register:
error_init_controls:
ov7740_free_controls(ov7740);
error_detect:
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
ov7740_set_power(ov7740, 0);
media_entity_cleanup(&ov7740->subdev.entity);
diff --git a/drivers/media/i2c/ov8856.c b/drivers/media/i2c/ov8856.c
new file mode 100644
index 000000000000..dbf1095b9440
--- /dev/null
+++ b/drivers/media/i2c/ov8856.c
@@ -0,0 +1,1268 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Intel Corporation.
+
+#include <asm/unaligned.h>
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#define OV8856_REG_VALUE_08BIT 1
+#define OV8856_REG_VALUE_16BIT 2
+#define OV8856_REG_VALUE_24BIT 3
+
+#define OV8856_LINK_FREQ_360MHZ 360000000ULL
+#define OV8856_LINK_FREQ_180MHZ 180000000ULL
+#define OV8856_SCLK 144000000ULL
+#define OV8856_MCLK 19200000
+#define OV8856_DATA_LANES 4
+#define OV8856_RGB_DEPTH 10
+
+#define OV8856_REG_CHIP_ID 0x300a
+#define OV8856_CHIP_ID 0x00885a
+
+#define OV8856_REG_MODE_SELECT 0x0100
+#define OV8856_MODE_STANDBY 0x00
+#define OV8856_MODE_STREAMING 0x01
+
+/* vertical-timings from sensor */
+#define OV8856_REG_VTS 0x380e
+#define OV8856_VTS_MAX 0x7fff
+
+/* horizontal-timings from sensor */
+#define OV8856_REG_HTS 0x380c
+
+/* Exposure controls from sensor */
+#define OV8856_REG_EXPOSURE 0x3500
+#define OV8856_EXPOSURE_MIN 6
+#define OV8856_EXPOSURE_MAX_MARGIN 6
+#define OV8856_EXPOSURE_STEP 1
+
+/* Analog gain controls from sensor */
+#define OV8856_REG_ANALOG_GAIN 0x3508
+#define OV8856_ANAL_GAIN_MIN 128
+#define OV8856_ANAL_GAIN_MAX 2047
+#define OV8856_ANAL_GAIN_STEP 1
+
+/* Digital gain controls from sensor */
+#define OV8856_REG_MWB_R_GAIN 0x5019
+#define OV8856_REG_MWB_G_GAIN 0x501b
+#define OV8856_REG_MWB_B_GAIN 0x501d
+#define OV8856_DGTL_GAIN_MIN 0
+#define OV8856_DGTL_GAIN_MAX 4095
+#define OV8856_DGTL_GAIN_STEP 1
+#define OV8856_DGTL_GAIN_DEFAULT 1024
+
+/* Test Pattern Control */
+#define OV8856_REG_TEST_PATTERN 0x5e00
+#define OV8856_TEST_PATTERN_ENABLE BIT(7)
+#define OV8856_TEST_PATTERN_BAR_SHIFT 2
+
+#define to_ov8856(_sd) container_of(_sd, struct ov8856, sd)
+
+enum {
+ OV8856_LINK_FREQ_720MBPS,
+ OV8856_LINK_FREQ_360MBPS,
+};
+
+struct ov8856_reg {
+ u16 address;
+ u8 val;
+};
+
+struct ov8856_reg_list {
+ u32 num_of_regs;
+ const struct ov8856_reg *regs;
+};
+
+struct ov8856_link_freq_config {
+ const struct ov8856_reg_list reg_list;
+};
+
+struct ov8856_mode {
+ /* Frame width in pixels */
+ u32 width;
+
+ /* Frame height in pixels */
+ u32 height;
+
+ /* Horizontal timining size */
+ u32 hts;
+
+ /* Default vertical timining size */
+ u32 vts_def;
+
+ /* Min vertical timining size */
+ u32 vts_min;
+
+ /* Link frequency needed for this resolution */
+ u32 link_freq_index;
+
+ /* Sensor register settings for this resolution */
+ const struct ov8856_reg_list reg_list;
+};
+
+static const struct ov8856_reg mipi_data_rate_720mbps[] = {
+ {0x0103, 0x01},
+ {0x0100, 0x00},
+ {0x0302, 0x4b},
+ {0x0303, 0x01},
+ {0x030b, 0x02},
+ {0x030d, 0x4b},
+ {0x031e, 0x0c},
+};
+
+static const struct ov8856_reg mipi_data_rate_360mbps[] = {
+ {0x0103, 0x01},
+ {0x0100, 0x00},
+ {0x0302, 0x4b},
+ {0x0303, 0x03},
+ {0x030b, 0x02},
+ {0x030d, 0x4b},
+ {0x031e, 0x0c},
+};
+
+static const struct ov8856_reg mode_3280x2464_regs[] = {
+ {0x3000, 0x20},
+ {0x3003, 0x08},
+ {0x300e, 0x20},
+ {0x3010, 0x00},
+ {0x3015, 0x84},
+ {0x3018, 0x72},
+ {0x3021, 0x23},
+ {0x3033, 0x24},
+ {0x3500, 0x00},
+ {0x3501, 0x9a},
+ {0x3502, 0x20},
+ {0x3503, 0x08},
+ {0x3505, 0x83},
+ {0x3508, 0x01},
+ {0x3509, 0x80},
+ {0x350c, 0x00},
+ {0x350d, 0x80},
+ {0x350e, 0x04},
+ {0x350f, 0x00},
+ {0x3510, 0x00},
+ {0x3511, 0x02},
+ {0x3512, 0x00},
+ {0x3600, 0x72},
+ {0x3601, 0x40},
+ {0x3602, 0x30},
+ {0x3610, 0xc5},
+ {0x3611, 0x58},
+ {0x3612, 0x5c},
+ {0x3613, 0xca},
+ {0x3614, 0x20},
+ {0x3628, 0xff},
+ {0x3629, 0xff},
+ {0x362a, 0xff},
+ {0x3633, 0x10},
+ {0x3634, 0x10},
+ {0x3635, 0x10},
+ {0x3636, 0x10},
+ {0x3663, 0x08},
+ {0x3669, 0x34},
+ {0x366e, 0x10},
+ {0x3706, 0x86},
+ {0x370b, 0x7e},
+ {0x3714, 0x23},
+ {0x3730, 0x12},
+ {0x3733, 0x10},
+ {0x3764, 0x00},
+ {0x3765, 0x00},
+ {0x3769, 0x62},
+ {0x376a, 0x2a},
+ {0x376b, 0x30},
+ {0x3780, 0x00},
+ {0x3781, 0x24},
+ {0x3782, 0x00},
+ {0x3783, 0x23},
+ {0x3798, 0x2f},
+ {0x37a1, 0x60},
+ {0x37a8, 0x6a},
+ {0x37ab, 0x3f},
+ {0x37c2, 0x04},
+ {0x37c3, 0xf1},
+ {0x37c9, 0x80},
+ {0x37cb, 0x16},
+ {0x37cc, 0x16},
+ {0x37cd, 0x16},
+ {0x37ce, 0x16},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x07},
+ {0x3804, 0x0c},
+ {0x3805, 0xdf},
+ {0x3806, 0x09},
+ {0x3807, 0xa6},
+ {0x3808, 0x0c},
+ {0x3809, 0xd0},
+ {0x380a, 0x09},
+ {0x380b, 0xa0},
+ {0x380c, 0x07},
+ {0x380d, 0x88},
+ {0x380e, 0x09},
+ {0x380f, 0xb8},
+ {0x3810, 0x00},
+ {0x3811, 0x00},
+ {0x3812, 0x00},
+ {0x3813, 0x00},
+ {0x3814, 0x01},
+ {0x3815, 0x01},
+ {0x3816, 0x00},
+ {0x3817, 0x00},
+ {0x3818, 0x00},
+ {0x3819, 0x10},
+ {0x3820, 0x80},
+ {0x3821, 0x46},
+ {0x382a, 0x01},
+ {0x382b, 0x01},
+ {0x3830, 0x06},
+ {0x3836, 0x02},
+ {0x3862, 0x04},
+ {0x3863, 0x08},
+ {0x3cc0, 0x33},
+ {0x3d85, 0x17},
+ {0x3d8c, 0x73},
+ {0x3d8d, 0xde},
+ {0x4001, 0xe0},
+ {0x4003, 0x40},
+ {0x4008, 0x00},
+ {0x4009, 0x0b},
+ {0x400a, 0x00},
+ {0x400b, 0x84},
+ {0x400f, 0x80},
+ {0x4010, 0xf0},
+ {0x4011, 0xff},
+ {0x4012, 0x02},
+ {0x4013, 0x01},
+ {0x4014, 0x01},
+ {0x4015, 0x01},
+ {0x4042, 0x00},
+ {0x4043, 0x80},
+ {0x4044, 0x00},
+ {0x4045, 0x80},
+ {0x4046, 0x00},
+ {0x4047, 0x80},
+ {0x4048, 0x00},
+ {0x4049, 0x80},
+ {0x4041, 0x03},
+ {0x404c, 0x20},
+ {0x404d, 0x00},
+ {0x404e, 0x20},
+ {0x4203, 0x80},
+ {0x4307, 0x30},
+ {0x4317, 0x00},
+ {0x4503, 0x08},
+ {0x4601, 0x80},
+ {0x4800, 0x44},
+ {0x4816, 0x53},
+ {0x481b, 0x58},
+ {0x481f, 0x27},
+ {0x4837, 0x16},
+ {0x483c, 0x0f},
+ {0x484b, 0x05},
+ {0x5000, 0x57},
+ {0x5001, 0x0a},
+ {0x5004, 0x04},
+ {0x502e, 0x03},
+ {0x5030, 0x41},
+ {0x5780, 0x14},
+ {0x5781, 0x0f},
+ {0x5782, 0x44},
+ {0x5783, 0x02},
+ {0x5784, 0x01},
+ {0x5785, 0x01},
+ {0x5786, 0x00},
+ {0x5787, 0x04},
+ {0x5788, 0x02},
+ {0x5789, 0x0f},
+ {0x578a, 0xfd},
+ {0x578b, 0xf5},
+ {0x578c, 0xf5},
+ {0x578d, 0x03},
+ {0x578e, 0x08},
+ {0x578f, 0x0c},
+ {0x5790, 0x08},
+ {0x5791, 0x04},
+ {0x5792, 0x00},
+ {0x5793, 0x52},
+ {0x5794, 0xa3},
+ {0x5795, 0x02},
+ {0x5796, 0x20},
+ {0x5797, 0x20},
+ {0x5798, 0xd5},
+ {0x5799, 0xd5},
+ {0x579a, 0x00},
+ {0x579b, 0x50},
+ {0x579c, 0x00},
+ {0x579d, 0x2c},
+ {0x579e, 0x0c},
+ {0x579f, 0x40},
+ {0x57a0, 0x09},
+ {0x57a1, 0x40},
+ {0x59f8, 0x3d},
+ {0x5a08, 0x02},
+ {0x5b00, 0x02},
+ {0x5b01, 0x10},
+ {0x5b02, 0x03},
+ {0x5b03, 0xcf},
+ {0x5b05, 0x6c},
+ {0x5e00, 0x00}
+};
+
+static const struct ov8856_reg mode_1640x1232_regs[] = {
+ {0x3000, 0x20},
+ {0x3003, 0x08},
+ {0x300e, 0x20},
+ {0x3010, 0x00},
+ {0x3015, 0x84},
+ {0x3018, 0x72},
+ {0x3021, 0x23},
+ {0x3033, 0x24},
+ {0x3500, 0x00},
+ {0x3501, 0x4c},
+ {0x3502, 0xe0},
+ {0x3503, 0x08},
+ {0x3505, 0x83},
+ {0x3508, 0x01},
+ {0x3509, 0x80},
+ {0x350c, 0x00},
+ {0x350d, 0x80},
+ {0x350e, 0x04},
+ {0x350f, 0x00},
+ {0x3510, 0x00},
+ {0x3511, 0x02},
+ {0x3512, 0x00},
+ {0x3600, 0x72},
+ {0x3601, 0x40},
+ {0x3602, 0x30},
+ {0x3610, 0xc5},
+ {0x3611, 0x58},
+ {0x3612, 0x5c},
+ {0x3613, 0xca},
+ {0x3614, 0x20},
+ {0x3628, 0xff},
+ {0x3629, 0xff},
+ {0x362a, 0xff},
+ {0x3633, 0x10},
+ {0x3634, 0x10},
+ {0x3635, 0x10},
+ {0x3636, 0x10},
+ {0x3663, 0x08},
+ {0x3669, 0x34},
+ {0x366e, 0x08},
+ {0x3706, 0x86},
+ {0x370b, 0x7e},
+ {0x3714, 0x27},
+ {0x3730, 0x12},
+ {0x3733, 0x10},
+ {0x3764, 0x00},
+ {0x3765, 0x00},
+ {0x3769, 0x62},
+ {0x376a, 0x2a},
+ {0x376b, 0x30},
+ {0x3780, 0x00},
+ {0x3781, 0x24},
+ {0x3782, 0x00},
+ {0x3783, 0x23},
+ {0x3798, 0x2f},
+ {0x37a1, 0x60},
+ {0x37a8, 0x6a},
+ {0x37ab, 0x3f},
+ {0x37c2, 0x14},
+ {0x37c3, 0xf1},
+ {0x37c9, 0x80},
+ {0x37cb, 0x16},
+ {0x37cc, 0x16},
+ {0x37cd, 0x16},
+ {0x37ce, 0x16},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x07},
+ {0x3804, 0x0c},
+ {0x3805, 0xdf},
+ {0x3806, 0x09},
+ {0x3807, 0xa6},
+ {0x3808, 0x06},
+ {0x3809, 0x68},
+ {0x380a, 0x04},
+ {0x380b, 0xd0},
+ {0x380c, 0x0e},
+ {0x380d, 0xec},
+ {0x380e, 0x04},
+ {0x380f, 0xe8},
+ {0x3810, 0x00},
+ {0x3811, 0x00},
+ {0x3812, 0x00},
+ {0x3813, 0x00},
+ {0x3814, 0x03},
+ {0x3815, 0x01},
+ {0x3816, 0x00},
+ {0x3817, 0x00},
+ {0x3818, 0x00},
+ {0x3819, 0x10},
+ {0x3820, 0x90},
+ {0x3821, 0x67},
+ {0x382a, 0x03},
+ {0x382b, 0x01},
+ {0x3830, 0x06},
+ {0x3836, 0x02},
+ {0x3862, 0x04},
+ {0x3863, 0x08},
+ {0x3cc0, 0x33},
+ {0x3d85, 0x17},
+ {0x3d8c, 0x73},
+ {0x3d8d, 0xde},
+ {0x4001, 0xe0},
+ {0x4003, 0x40},
+ {0x4008, 0x00},
+ {0x4009, 0x05},
+ {0x400a, 0x00},
+ {0x400b, 0x84},
+ {0x400f, 0x80},
+ {0x4010, 0xf0},
+ {0x4011, 0xff},
+ {0x4012, 0x02},
+ {0x4013, 0x01},
+ {0x4014, 0x01},
+ {0x4015, 0x01},
+ {0x4042, 0x00},
+ {0x4043, 0x80},
+ {0x4044, 0x00},
+ {0x4045, 0x80},
+ {0x4046, 0x00},
+ {0x4047, 0x80},
+ {0x4048, 0x00},
+ {0x4049, 0x80},
+ {0x4041, 0x03},
+ {0x404c, 0x20},
+ {0x404d, 0x00},
+ {0x404e, 0x20},
+ {0x4203, 0x80},
+ {0x4307, 0x30},
+ {0x4317, 0x00},
+ {0x4503, 0x08},
+ {0x4601, 0x80},
+ {0x4800, 0x44},
+ {0x4816, 0x53},
+ {0x481b, 0x58},
+ {0x481f, 0x27},
+ {0x4837, 0x16},
+ {0x483c, 0x0f},
+ {0x484b, 0x05},
+ {0x5000, 0x57},
+ {0x5001, 0x0a},
+ {0x5004, 0x04},
+ {0x502e, 0x03},
+ {0x5030, 0x41},
+ {0x5780, 0x14},
+ {0x5781, 0x0f},
+ {0x5782, 0x44},
+ {0x5783, 0x02},
+ {0x5784, 0x01},
+ {0x5785, 0x01},
+ {0x5786, 0x00},
+ {0x5787, 0x04},
+ {0x5788, 0x02},
+ {0x5789, 0x0f},
+ {0x578a, 0xfd},
+ {0x578b, 0xf5},
+ {0x578c, 0xf5},
+ {0x578d, 0x03},
+ {0x578e, 0x08},
+ {0x578f, 0x0c},
+ {0x5790, 0x08},
+ {0x5791, 0x04},
+ {0x5792, 0x00},
+ {0x5793, 0x52},
+ {0x5794, 0xa3},
+ {0x5795, 0x00},
+ {0x5796, 0x10},
+ {0x5797, 0x10},
+ {0x5798, 0x73},
+ {0x5799, 0x73},
+ {0x579a, 0x00},
+ {0x579b, 0x28},
+ {0x579c, 0x00},
+ {0x579d, 0x16},
+ {0x579e, 0x06},
+ {0x579f, 0x20},
+ {0x57a0, 0x04},
+ {0x57a1, 0xa0},
+ {0x59f8, 0x3d},
+ {0x5a08, 0x02},
+ {0x5b00, 0x02},
+ {0x5b01, 0x10},
+ {0x5b02, 0x03},
+ {0x5b03, 0xcf},
+ {0x5b05, 0x6c},
+ {0x5e00, 0x00}
+};
+
+static const char * const ov8856_test_pattern_menu[] = {
+ "Disabled",
+ "Standard Color Bar",
+ "Top-Bottom Darker Color Bar",
+ "Right-Left Darker Color Bar",
+ "Bottom-Top Darker Color Bar"
+};
+
+static const s64 link_freq_menu_items[] = {
+ OV8856_LINK_FREQ_360MHZ,
+ OV8856_LINK_FREQ_180MHZ
+};
+
+static const struct ov8856_link_freq_config link_freq_configs[] = {
+ [OV8856_LINK_FREQ_720MBPS] = {
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mipi_data_rate_720mbps),
+ .regs = mipi_data_rate_720mbps,
+ }
+ },
+ [OV8856_LINK_FREQ_360MBPS] = {
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mipi_data_rate_360mbps),
+ .regs = mipi_data_rate_360mbps,
+ }
+ }
+};
+
+static const struct ov8856_mode supported_modes[] = {
+ {
+ .width = 3280,
+ .height = 2464,
+ .hts = 1928,
+ .vts_def = 2488,
+ .vts_min = 2488,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_3280x2464_regs),
+ .regs = mode_3280x2464_regs,
+ },
+ .link_freq_index = OV8856_LINK_FREQ_720MBPS,
+ },
+ {
+ .width = 1640,
+ .height = 1232,
+ .hts = 3820,
+ .vts_def = 1256,
+ .vts_min = 1256,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1640x1232_regs),
+ .regs = mode_1640x1232_regs,
+ },
+ .link_freq_index = OV8856_LINK_FREQ_360MBPS,
+ }
+};
+
+struct ov8856 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ /* V4L2 Controls */
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *exposure;
+
+ /* Current mode */
+ const struct ov8856_mode *cur_mode;
+
+ /* To serialize asynchronus callbacks */
+ struct mutex mutex;
+
+ /* Streaming on/off */
+ bool streaming;
+};
+
+static u64 to_pixel_rate(u32 f_index)
+{
+ u64 pixel_rate = link_freq_menu_items[f_index] * 2 * OV8856_DATA_LANES;
+
+ do_div(pixel_rate, OV8856_RGB_DEPTH);
+
+ return pixel_rate;
+}
+
+static u64 to_pixels_per_line(u32 hts, u32 f_index)
+{
+ u64 ppl = hts * to_pixel_rate(f_index);
+
+ do_div(ppl, OV8856_SCLK);
+
+ return ppl;
+}
+
+static int ov8856_read_reg(struct ov8856 *ov8856, u16 reg, u16 len, u32 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd);
+ struct i2c_msg msgs[2];
+ u8 addr_buf[2];
+ u8 data_buf[4] = {0};
+ int ret;
+
+ if (len > 4)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, addr_buf);
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = sizeof(addr_buf);
+ msgs[0].buf = addr_buf;
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = len;
+ msgs[1].buf = &data_buf[4 - len];
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs))
+ return -EIO;
+
+ *val = get_unaligned_be32(data_buf);
+
+ return 0;
+}
+
+static int ov8856_write_reg(struct ov8856 *ov8856, u16 reg, u16 len, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd);
+ u8 buf[6];
+
+ if (len > 4)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, buf);
+ put_unaligned_be32(val << 8 * (4 - len), buf + 2);
+ if (i2c_master_send(client, buf, len + 2) != len + 2)
+ return -EIO;
+
+ return 0;
+}
+
+static int ov8856_write_reg_list(struct ov8856 *ov8856,
+ const struct ov8856_reg_list *r_list)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd);
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < r_list->num_of_regs; i++) {
+ ret = ov8856_write_reg(ov8856, r_list->regs[i].address, 1,
+ r_list->regs[i].val);
+ if (ret) {
+ dev_err_ratelimited(&client->dev,
+ "failed to write reg 0x%4.4x. error = %d",
+ r_list->regs[i].address, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ov8856_update_digital_gain(struct ov8856 *ov8856, u32 d_gain)
+{
+ int ret;
+
+ ret = ov8856_write_reg(ov8856, OV8856_REG_MWB_R_GAIN,
+ OV8856_REG_VALUE_16BIT, d_gain);
+ if (ret)
+ return ret;
+
+ ret = ov8856_write_reg(ov8856, OV8856_REG_MWB_G_GAIN,
+ OV8856_REG_VALUE_16BIT, d_gain);
+ if (ret)
+ return ret;
+
+ return ov8856_write_reg(ov8856, OV8856_REG_MWB_B_GAIN,
+ OV8856_REG_VALUE_16BIT, d_gain);
+}
+
+static int ov8856_test_pattern(struct ov8856 *ov8856, u32 pattern)
+{
+ if (pattern)
+ pattern = (pattern - 1) << OV8856_TEST_PATTERN_BAR_SHIFT |
+ OV8856_TEST_PATTERN_ENABLE;
+
+ return ov8856_write_reg(ov8856, OV8856_REG_TEST_PATTERN,
+ OV8856_REG_VALUE_08BIT, pattern);
+}
+
+static int ov8856_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov8856 *ov8856 = container_of(ctrl->handler,
+ struct ov8856, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd);
+ s64 exposure_max;
+ int ret = 0;
+
+ /* Propagate change of current control to all related controls */
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ /* Update max exposure while meeting expected vblanking */
+ exposure_max = ov8856->cur_mode->height + ctrl->val -
+ OV8856_EXPOSURE_MAX_MARGIN;
+ __v4l2_ctrl_modify_range(ov8856->exposure,
+ ov8856->exposure->minimum,
+ exposure_max, ov8856->exposure->step,
+ exposure_max);
+ }
+
+ /* V4L2 controls values will be applied only when power is already up */
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = ov8856_write_reg(ov8856, OV8856_REG_ANALOG_GAIN,
+ OV8856_REG_VALUE_16BIT, ctrl->val);
+ break;
+
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = ov8856_update_digital_gain(ov8856, ctrl->val);
+ break;
+
+ case V4L2_CID_EXPOSURE:
+ /* 4 least significant bits of expsoure are fractional part */
+ ret = ov8856_write_reg(ov8856, OV8856_REG_EXPOSURE,
+ OV8856_REG_VALUE_24BIT, ctrl->val << 4);
+ break;
+
+ case V4L2_CID_VBLANK:
+ ret = ov8856_write_reg(ov8856, OV8856_REG_VTS,
+ OV8856_REG_VALUE_16BIT,
+ ov8856->cur_mode->height + ctrl->val);
+ break;
+
+ case V4L2_CID_TEST_PATTERN:
+ ret = ov8856_test_pattern(ov8856, ctrl->val);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ov8856_ctrl_ops = {
+ .s_ctrl = ov8856_set_ctrl,
+};
+
+static int ov8856_init_controls(struct ov8856 *ov8856)
+{
+ struct v4l2_ctrl_handler *ctrl_hdlr;
+ s64 exposure_max, h_blank;
+ int ret;
+
+ ctrl_hdlr = &ov8856->ctrl_handler;
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8);
+ if (ret)
+ return ret;
+
+ ctrl_hdlr->lock = &ov8856->mutex;
+ ov8856->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &ov8856_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(link_freq_menu_items) - 1,
+ 0, link_freq_menu_items);
+ if (ov8856->link_freq)
+ ov8856->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ ov8856->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov8856_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 0,
+ to_pixel_rate(OV8856_LINK_FREQ_720MBPS),
+ 1,
+ to_pixel_rate(OV8856_LINK_FREQ_720MBPS));
+ ov8856->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov8856_ctrl_ops,
+ V4L2_CID_VBLANK,
+ ov8856->cur_mode->vts_min - ov8856->cur_mode->height,
+ OV8856_VTS_MAX - ov8856->cur_mode->height, 1,
+ ov8856->cur_mode->vts_def - ov8856->cur_mode->height);
+ h_blank = to_pixels_per_line(ov8856->cur_mode->hts,
+ ov8856->cur_mode->link_freq_index) - ov8856->cur_mode->width;
+ ov8856->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov8856_ctrl_ops,
+ V4L2_CID_HBLANK, h_blank, h_blank, 1,
+ h_blank);
+ if (ov8856->hblank)
+ ov8856->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov8856_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ OV8856_ANAL_GAIN_MIN, OV8856_ANAL_GAIN_MAX,
+ OV8856_ANAL_GAIN_STEP, OV8856_ANAL_GAIN_MIN);
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov8856_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ OV8856_DGTL_GAIN_MIN, OV8856_DGTL_GAIN_MAX,
+ OV8856_DGTL_GAIN_STEP, OV8856_DGTL_GAIN_DEFAULT);
+ exposure_max = ov8856->cur_mode->vts_def - OV8856_EXPOSURE_MAX_MARGIN;
+ ov8856->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &ov8856_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ OV8856_EXPOSURE_MIN, exposure_max,
+ OV8856_EXPOSURE_STEP,
+ exposure_max);
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov8856_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(ov8856_test_pattern_menu) - 1,
+ 0, 0, ov8856_test_pattern_menu);
+ if (ctrl_hdlr->error)
+ return ctrl_hdlr->error;
+
+ ov8856->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+}
+
+static void ov8856_update_pad_format(const struct ov8856_mode *mode,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+static int ov8856_start_streaming(struct ov8856 *ov8856)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd);
+ const struct ov8856_reg_list *reg_list;
+ int link_freq_index, ret;
+
+ link_freq_index = ov8856->cur_mode->link_freq_index;
+ reg_list = &link_freq_configs[link_freq_index].reg_list;
+ ret = ov8856_write_reg_list(ov8856, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "failed to set plls");
+ return ret;
+ }
+
+ reg_list = &ov8856->cur_mode->reg_list;
+ ret = ov8856_write_reg_list(ov8856, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "failed to set mode");
+ return ret;
+ }
+
+ ret = __v4l2_ctrl_handler_setup(ov8856->sd.ctrl_handler);
+ if (ret)
+ return ret;
+
+ ret = ov8856_write_reg(ov8856, OV8856_REG_MODE_SELECT,
+ OV8856_REG_VALUE_08BIT, OV8856_MODE_STREAMING);
+ if (ret) {
+ dev_err(&client->dev, "failed to set stream");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ov8856_stop_streaming(struct ov8856 *ov8856)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd);
+
+ if (ov8856_write_reg(ov8856, OV8856_REG_MODE_SELECT,
+ OV8856_REG_VALUE_08BIT, OV8856_MODE_STANDBY))
+ dev_err(&client->dev, "failed to set stream");
+}
+
+static int ov8856_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ov8856 *ov8856 = to_ov8856(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (ov8856->streaming == enable)
+ return 0;
+
+ mutex_lock(&ov8856->mutex);
+ if (enable) {
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&client->dev);
+ mutex_unlock(&ov8856->mutex);
+ return ret;
+ }
+
+ ret = ov8856_start_streaming(ov8856);
+ if (ret) {
+ enable = 0;
+ ov8856_stop_streaming(ov8856);
+ pm_runtime_put(&client->dev);
+ }
+ } else {
+ ov8856_stop_streaming(ov8856);
+ pm_runtime_put(&client->dev);
+ }
+
+ ov8856->streaming = enable;
+ mutex_unlock(&ov8856->mutex);
+
+ return ret;
+}
+
+static int __maybe_unused ov8856_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov8856 *ov8856 = to_ov8856(sd);
+
+ mutex_lock(&ov8856->mutex);
+ if (ov8856->streaming)
+ ov8856_stop_streaming(ov8856);
+
+ mutex_unlock(&ov8856->mutex);
+
+ return 0;
+}
+
+static int __maybe_unused ov8856_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov8856 *ov8856 = to_ov8856(sd);
+ int ret;
+
+ mutex_lock(&ov8856->mutex);
+ if (ov8856->streaming) {
+ ret = ov8856_start_streaming(ov8856);
+ if (ret) {
+ ov8856->streaming = false;
+ ov8856_stop_streaming(ov8856);
+ mutex_unlock(&ov8856->mutex);
+ return ret;
+ }
+ }
+
+ mutex_unlock(&ov8856->mutex);
+
+ return 0;
+}
+
+static int ov8856_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov8856 *ov8856 = to_ov8856(sd);
+ const struct ov8856_mode *mode;
+ s32 vblank_def, h_blank;
+
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes), width,
+ height, fmt->format.width,
+ fmt->format.height);
+
+ mutex_lock(&ov8856->mutex);
+ ov8856_update_pad_format(mode, &fmt->format);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
+ } else {
+ ov8856->cur_mode = mode;
+ __v4l2_ctrl_s_ctrl(ov8856->link_freq, mode->link_freq_index);
+ __v4l2_ctrl_s_ctrl_int64(ov8856->pixel_rate,
+ to_pixel_rate(mode->link_freq_index));
+
+ /* Update limits and set FPS to default */
+ vblank_def = mode->vts_def - mode->height;
+ __v4l2_ctrl_modify_range(ov8856->vblank,
+ mode->vts_min - mode->height,
+ OV8856_VTS_MAX - mode->height, 1,
+ vblank_def);
+ __v4l2_ctrl_s_ctrl(ov8856->vblank, vblank_def);
+ h_blank = to_pixels_per_line(mode->hts, mode->link_freq_index) -
+ mode->width;
+ __v4l2_ctrl_modify_range(ov8856->hblank, h_blank, h_blank, 1,
+ h_blank);
+ }
+
+ mutex_unlock(&ov8856->mutex);
+
+ return 0;
+}
+
+static int ov8856_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov8856 *ov8856 = to_ov8856(sd);
+
+ mutex_lock(&ov8856->mutex);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ fmt->format = *v4l2_subdev_get_try_format(&ov8856->sd, cfg,
+ fmt->pad);
+ else
+ ov8856_update_pad_format(ov8856->cur_mode, &fmt->format);
+
+ mutex_unlock(&ov8856->mutex);
+
+ return 0;
+}
+
+static int ov8856_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ /* Only one bayer order GRBG is supported */
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ return 0;
+}
+
+static int ov8856_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static int ov8856_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct ov8856 *ov8856 = to_ov8856(sd);
+
+ mutex_lock(&ov8856->mutex);
+ ov8856_update_pad_format(&supported_modes[0],
+ v4l2_subdev_get_try_format(sd, fh->pad, 0));
+ mutex_unlock(&ov8856->mutex);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops ov8856_video_ops = {
+ .s_stream = ov8856_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops ov8856_pad_ops = {
+ .set_fmt = ov8856_set_format,
+ .get_fmt = ov8856_get_format,
+ .enum_mbus_code = ov8856_enum_mbus_code,
+ .enum_frame_size = ov8856_enum_frame_size,
+};
+
+static const struct v4l2_subdev_ops ov8856_subdev_ops = {
+ .video = &ov8856_video_ops,
+ .pad = &ov8856_pad_ops,
+};
+
+static const struct media_entity_operations ov8856_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_internal_ops ov8856_internal_ops = {
+ .open = ov8856_open,
+};
+
+static int ov8856_identify_module(struct ov8856 *ov8856)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd);
+ int ret;
+ u32 val;
+
+ ret = ov8856_read_reg(ov8856, OV8856_REG_CHIP_ID,
+ OV8856_REG_VALUE_24BIT, &val);
+ if (ret)
+ return ret;
+
+ if (val != OV8856_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ OV8856_CHIP_ID, val);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int ov8856_check_hwcfg(struct device *dev)
+{
+ struct fwnode_handle *ep;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ u32 mclk;
+ int ret;
+ unsigned int i, j;
+
+ if (!fwnode)
+ return -ENXIO;
+
+ fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
+ if (mclk != OV8856_MCLK) {
+ dev_err(dev, "external clock %d is not supported", mclk);
+ return -EINVAL;
+ }
+
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep)
+ return -ENXIO;
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret)
+ return ret;
+
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != OV8856_DATA_LANES) {
+ dev_err(dev, "number of CSI2 data lanes %d is not supported",
+ bus_cfg.bus.mipi_csi2.num_data_lanes);
+ ret = -EINVAL;
+ goto check_hwcfg_error;
+ }
+
+ if (!bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "no link frequencies defined");
+ ret = -EINVAL;
+ goto check_hwcfg_error;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(link_freq_menu_items); i++) {
+ for (j = 0; j < bus_cfg.nr_of_link_frequencies; j++) {
+ if (link_freq_menu_items[i] ==
+ bus_cfg.link_frequencies[j])
+ break;
+ }
+
+ if (j == bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "no link frequency %lld supported",
+ link_freq_menu_items[i]);
+ ret = -EINVAL;
+ goto check_hwcfg_error;
+ }
+ }
+
+check_hwcfg_error:
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+
+ return ret;
+}
+
+static int ov8856_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov8856 *ov8856 = to_ov8856(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ pm_runtime_disable(&client->dev);
+ mutex_destroy(&ov8856->mutex);
+
+ return 0;
+}
+
+static int ov8856_probe(struct i2c_client *client)
+{
+ struct ov8856 *ov8856;
+ int ret;
+
+ ret = ov8856_check_hwcfg(&client->dev);
+ if (ret) {
+ dev_err(&client->dev, "failed to check HW configuration: %d",
+ ret);
+ return ret;
+ }
+
+ ov8856 = devm_kzalloc(&client->dev, sizeof(*ov8856), GFP_KERNEL);
+ if (!ov8856)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&ov8856->sd, client, &ov8856_subdev_ops);
+ ret = ov8856_identify_module(ov8856);
+ if (ret) {
+ dev_err(&client->dev, "failed to find sensor: %d", ret);
+ return ret;
+ }
+
+ mutex_init(&ov8856->mutex);
+ ov8856->cur_mode = &supported_modes[0];
+ ret = ov8856_init_controls(ov8856);
+ if (ret) {
+ dev_err(&client->dev, "failed to init controls: %d", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ ov8856->sd.internal_ops = &ov8856_internal_ops;
+ ov8856->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ov8856->sd.entity.ops = &ov8856_subdev_entity_ops;
+ ov8856->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ov8856->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&ov8856->sd.entity, 1, &ov8856->pad);
+ if (ret) {
+ dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ ret = v4l2_async_register_subdev_sensor_common(&ov8856->sd);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ ret);
+ goto probe_error_media_entity_cleanup;
+ }
+
+ /*
+ * Device is already turned on by i2c-core with ACPI domain PM.
+ * Enable runtime PM and turn off the device.
+ */
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
+ return 0;
+
+probe_error_media_entity_cleanup:
+ media_entity_cleanup(&ov8856->sd.entity);
+
+probe_error_v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(ov8856->sd.ctrl_handler);
+ mutex_destroy(&ov8856->mutex);
+
+ return ret;
+}
+
+static const struct dev_pm_ops ov8856_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ov8856_suspend, ov8856_resume)
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id ov8856_acpi_ids[] = {
+ {"OVTI8856"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, ov8856_acpi_ids);
+#endif
+
+static struct i2c_driver ov8856_i2c_driver = {
+ .driver = {
+ .name = "ov8856",
+ .pm = &ov8856_pm_ops,
+ .acpi_match_table = ACPI_PTR(ov8856_acpi_ids),
+ },
+ .probe_new = ov8856_probe,
+ .remove = ov8856_remove,
+};
+
+module_i2c_driver(ov8856_i2c_driver);
+
+MODULE_AUTHOR("Ben Kao <ben.kao@intel.com>");
+MODULE_DESCRIPTION("OmniVision OV8856 sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/soc_camera/soc_ov9640.c b/drivers/media/i2c/ov9640.c
index eb91b8240083..d6831f28378b 100644
--- a/drivers/media/i2c/soc_camera/soc_ov9640.c
+++ b/drivers/media/i2c/ov9640.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* OmniVision OV96xx Camera Driver
*
@@ -9,14 +10,11 @@
* Kuninori Morimoto <morimoto.kuninori@renesas.com>
*
* Based on ov7670 and soc_camera_platform driver,
+ * transition from soc_camera to pxa_camera based on mt9m111
*
* Copyright 2006-7 Jonathan Corbet <corbet@lwn.net>
* Copyright (C) 2008 Magnus Damm
* Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/init.h>
@@ -27,10 +25,14 @@
#include <linux/v4l2-mediabus.h>
#include <linux/videodev2.h>
-#include <media/soc_camera.h>
+#include <media/v4l2-async.h>
#include <media/v4l2-clk.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+
+#include <linux/gpio/consumer.h>
#include "ov9640.h"
@@ -159,7 +161,7 @@ static const struct ov9640_reg ov9640_regs_rgb[] = {
{ OV9640_MTXS, 0x65 },
};
-static u32 ov9640_codes[] = {
+static const u32 ov9640_codes[] = {
MEDIA_BUS_FMT_UYVY8_2X8,
MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
MEDIA_BUS_FMT_RGB565_2X8_LE,
@@ -269,21 +271,23 @@ static int ov9640_s_stream(struct v4l2_subdev *sd, int enable)
/* Set status of additional camera capabilities */
static int ov9640_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct ov9640_priv *priv = container_of(ctrl->handler, struct ov9640_priv, hdl);
+ struct ov9640_priv *priv = container_of(ctrl->handler,
+ struct ov9640_priv, hdl);
struct i2c_client *client = v4l2_get_subdevdata(&priv->subdev);
switch (ctrl->id) {
case V4L2_CID_VFLIP:
if (ctrl->val)
return ov9640_reg_rmw(client, OV9640_MVFP,
- OV9640_MVFP_V, 0);
+ OV9640_MVFP_V, 0);
return ov9640_reg_rmw(client, OV9640_MVFP, 0, OV9640_MVFP_V);
case V4L2_CID_HFLIP:
if (ctrl->val)
return ov9640_reg_rmw(client, OV9640_MVFP,
- OV9640_MVFP_H, 0);
+ OV9640_MVFP_H, 0);
return ov9640_reg_rmw(client, OV9640_MVFP, 0, OV9640_MVFP_H);
}
+
return -EINVAL;
}
@@ -323,20 +327,33 @@ static int ov9640_set_register(struct v4l2_subdev *sd,
static int ov9640_s_power(struct v4l2_subdev *sd, int on)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
struct ov9640_priv *priv = to_ov9640_sensor(sd);
+ int ret = 0;
+
+ if (on) {
+ gpiod_set_value(priv->gpio_power, 1);
+ usleep_range(1000, 2000);
+ ret = v4l2_clk_enable(priv->clk);
+ usleep_range(1000, 2000);
+ gpiod_set_value(priv->gpio_reset, 0);
+ } else {
+ gpiod_set_value(priv->gpio_reset, 1);
+ usleep_range(1000, 2000);
+ v4l2_clk_disable(priv->clk);
+ usleep_range(1000, 2000);
+ gpiod_set_value(priv->gpio_power, 0);
+ }
- return soc_camera_set_power(&client->dev, ssdd, priv->clk, on);
+ return ret;
}
/* select nearest higher resolution for capture */
static void ov9640_res_roundup(u32 *width, u32 *height)
{
- int i;
+ unsigned int i;
enum { QQCIF, QQVGA, QCIF, QVGA, CIF, VGA, SXGA };
- static const int res_x[] = { 88, 160, 176, 320, 352, 640, 1280 };
- static const int res_y[] = { 72, 120, 144, 240, 288, 480, 960 };
+ static const u32 res_x[] = { 88, 160, 176, 320, 352, 640, 1280 };
+ static const u32 res_y[] = { 72, 120, 144, 240, 288, 480, 960 };
for (i = 0; i < ARRAY_SIZE(res_x); i++) {
if (res_x[i] >= *width && res_y[i] >= *height) {
@@ -379,8 +396,9 @@ static int ov9640_write_regs(struct i2c_client *client, u32 width,
u32 code, struct ov9640_reg_alt *alts)
{
const struct ov9640_reg *ov9640_regs, *matrix_regs;
- int ov9640_regs_len, matrix_regs_len;
- int i, ret;
+ unsigned int ov9640_regs_len, matrix_regs_len;
+ unsigned int i;
+ int ret;
u8 val;
/* select register configuration for given resolution */
@@ -454,7 +472,7 @@ static int ov9640_write_regs(struct i2c_client *client, u32 width,
/* write color matrix configuration into the module */
for (i = 0; i < matrix_regs_len; i++) {
ret = ov9640_reg_write(client, matrix_regs[i].reg,
- matrix_regs[i].val);
+ matrix_regs[i].val);
if (ret)
return ret;
}
@@ -465,17 +483,18 @@ static int ov9640_write_regs(struct i2c_client *client, u32 width,
/* program default register values */
static int ov9640_prog_dflt(struct i2c_client *client)
{
- int i, ret;
+ unsigned int i;
+ int ret;
for (i = 0; i < ARRAY_SIZE(ov9640_regs_dflt); i++) {
ret = ov9640_reg_write(client, ov9640_regs_dflt[i].reg,
- ov9640_regs_dflt[i].val);
+ ov9640_regs_dflt[i].val);
if (ret)
return ret;
}
/* wait for the changes to actually happen, 140ms are not enough yet */
- mdelay(150);
+ msleep(150);
return 0;
}
@@ -529,6 +548,7 @@ static int ov9640_set_fmt(struct v4l2_subdev *sd,
return ov9640_s_fmt(sd, mf);
cfg->try_fmt = *mf;
+
return 0;
}
@@ -540,6 +560,7 @@ static int ov9640_enum_mbus_code(struct v4l2_subdev *sd,
return -EINVAL;
code->code = ov9640_codes[code->index];
+
return 0;
}
@@ -630,14 +651,10 @@ static const struct v4l2_subdev_core_ops ov9640_core_ops = {
static int ov9640_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
-
cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_MASTER |
V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_HIGH |
V4L2_MBUS_DATA_ACTIVE_HIGH;
cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
return 0;
}
@@ -666,41 +683,62 @@ static int ov9640_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct ov9640_priv *priv;
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
int ret;
- if (!ssdd) {
- dev_err(&client->dev, "Missing platform_data for driver\n");
- return -EINVAL;
- }
-
priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ priv->gpio_power = devm_gpiod_get(&client->dev, "Camera power",
+ GPIOD_OUT_LOW);
+ if (IS_ERR_OR_NULL(priv->gpio_power)) {
+ ret = PTR_ERR(priv->gpio_power);
+ return ret;
+ }
+
+ priv->gpio_reset = devm_gpiod_get(&client->dev, "Camera reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR_OR_NULL(priv->gpio_reset)) {
+ ret = PTR_ERR(priv->gpio_reset);
+ return ret;
+ }
+
v4l2_i2c_subdev_init(&priv->subdev, client, &ov9640_subdev_ops);
v4l2_ctrl_handler_init(&priv->hdl, 2);
v4l2_ctrl_new_std(&priv->hdl, &ov9640_ctrl_ops,
- V4L2_CID_VFLIP, 0, 1, 1, 0);
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
v4l2_ctrl_new_std(&priv->hdl, &ov9640_ctrl_ops,
- V4L2_CID_HFLIP, 0, 1, 1, 0);
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+
+ if (priv->hdl.error) {
+ ret = priv->hdl.error;
+ goto ectrlinit;
+ }
+
priv->subdev.ctrl_handler = &priv->hdl;
- if (priv->hdl.error)
- return priv->hdl.error;
priv->clk = v4l2_clk_get(&client->dev, "mclk");
if (IS_ERR(priv->clk)) {
ret = PTR_ERR(priv->clk);
- goto eclkget;
+ goto ectrlinit;
}
ret = ov9640_video_probe(client);
- if (ret) {
- v4l2_clk_put(priv->clk);
-eclkget:
- v4l2_ctrl_handler_free(&priv->hdl);
- }
+ if (ret)
+ goto eprobe;
+
+ priv->subdev.dev = &client->dev;
+ ret = v4l2_async_register_subdev(&priv->subdev);
+ if (ret)
+ goto eprobe;
+
+ return 0;
+
+eprobe:
+ v4l2_clk_put(priv->clk);
+ectrlinit:
+ v4l2_ctrl_handler_free(&priv->hdl);
return ret;
}
@@ -711,8 +749,9 @@ static int ov9640_remove(struct i2c_client *client)
struct ov9640_priv *priv = to_ov9640_sensor(sd);
v4l2_clk_put(priv->clk);
- v4l2_device_unregister_subdev(&priv->subdev);
+ v4l2_async_unregister_subdev(&priv->subdev);
v4l2_ctrl_handler_free(&priv->hdl);
+
return 0;
}
diff --git a/drivers/media/i2c/soc_camera/ov9640.h b/drivers/media/i2c/ov9640.h
index 65d13ff17536..a8ed6992c1a8 100644
--- a/drivers/media/i2c/soc_camera/ov9640.h
+++ b/drivers/media/i2c/ov9640.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* OmniVision OV96xx Camera Header File
*
* Copyright (C) 2009 Marek Vasut <marek.vasut@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __DRIVERS_MEDIA_VIDEO_OV9640_H__
@@ -200,6 +197,8 @@ struct ov9640_priv {
struct v4l2_subdev subdev;
struct v4l2_ctrl_handler hdl;
struct v4l2_clk *clk;
+ struct gpio_desc *gpio_power;
+ struct gpio_desc *gpio_reset;
int model;
int revision;
diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
index f0587c0c0a72..eefd57ec2a73 100644
--- a/drivers/media/i2c/ov9650.c
+++ b/drivers/media/i2c/ov9650.c
@@ -45,8 +45,8 @@ MODULE_PARM_DESC(debug, "Debug level (0-2)");
* OV9650/OV9652 register definitions
*/
#define REG_GAIN 0x00 /* Gain control, AGC[7:0] */
-#define REG_BLUE 0x01 /* AWB - Blue chanel gain */
-#define REG_RED 0x02 /* AWB - Red chanel gain */
+#define REG_BLUE 0x01 /* AWB - Blue channel gain */
+#define REG_RED 0x02 /* AWB - Red channel gain */
#define REG_VREF 0x03 /* [7:6] - AGC[9:8], [5:3]/[2:0] */
#define VREF_GAIN_MASK 0xc0 /* - VREF end/start low 3 bits */
#define REG_COM1 0x04
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index c461847ddae8..b52fe250f75f 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -1431,7 +1431,7 @@ err:
for (++i; i < S5C73M3_MAX_SUPPLIES; i++) {
int r = regulator_enable(state->supplies[i].consumer);
if (r < 0)
- v4l2_err(&state->oif_sd, "Failed to reenable %s: %d\n",
+ v4l2_err(&state->oif_sd, "Failed to re-enable %s: %d\n",
state->supplies[i].supply, r);
}
diff --git a/drivers/media/i2c/s5k4ecgx.c b/drivers/media/i2c/s5k4ecgx.c
index 79aa2740edc4..79c1894c2c83 100644
--- a/drivers/media/i2c/s5k4ecgx.c
+++ b/drivers/media/i2c/s5k4ecgx.c
@@ -263,8 +263,6 @@ static int s5k4ecgx_read(struct i2c_client *client, u32 addr, u16 *val)
ret = s5k4ecgx_i2c_write(client, REG_CMDRD_ADDRL, low);
if (!ret)
ret = s5k4ecgx_i2c_read(client, REG_CMDBUF0_ADDR, val);
- if (!ret)
- dev_err(&client->dev, "Failed to execute read command\n");
return ret;
}
diff --git a/drivers/media/i2c/s5k6aa.c b/drivers/media/i2c/s5k6aa.c
index ab26f549d716..f8630c4c2ef0 100644
--- a/drivers/media/i2c/s5k6aa.c
+++ b/drivers/media/i2c/s5k6aa.c
@@ -729,7 +729,7 @@ static int s5k6aa_new_config_sync(struct i2c_client *client, int timeout,
* @s5k6aa: pointer to &struct s5k6aa describing the device
* @preset: s5kaa preset to be applied
*
- * Configure output resolution and color fromat, pixel clock
+ * Configure output resolution and color format, pixel clock
* frequency range, device frame rate type and frame period range.
*/
static int s5k6aa_set_prev_config(struct s5k6aa *s5k6aa,
diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c
index 6bc278aa31fc..88dc6baac639 100644
--- a/drivers/media/i2c/saa7115.c
+++ b/drivers/media/i2c/saa7115.c
@@ -1766,7 +1766,7 @@ static int saa711x_detect_chip(struct i2c_client *client,
* exists. However, tests on a device labeled as:
* "GM7113C 1145" returned "10" on all 16 chip
* version (reg 0x00) reads. So, we need to also
- * accept at least verion 0. For now, let's just
+ * accept at least version 0. For now, let's just
* assume that a device that returns "0000" for
* the lower nibble is a gm7113c.
*/
diff --git a/drivers/media/i2c/saa717x.c b/drivers/media/i2c/saa717x.c
index 668c39cc29e8..86b8b65ea683 100644
--- a/drivers/media/i2c/saa717x.c
+++ b/drivers/media/i2c/saa717x.c
@@ -844,7 +844,7 @@ static void set_h_prescale(struct v4l2_subdev *sd,
if (i == count)
return;
- /* horizonal prescaling */
+ /* horizontal prescaling */
saa717x_write(sd, 0x60 + task_shift, vals[i].xpsc);
/* accumulation length */
saa717x_write(sd, 0x61 + task_shift, vals[i].xacl);
diff --git a/drivers/media/i2c/soc_camera/Makefile b/drivers/media/i2c/soc_camera/Makefile
deleted file mode 100644
index 09ae483b96ef..000000000000
--- a/drivers/media/i2c/soc_camera/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_SOC_CAMERA_MT9M001) += soc_mt9m001.o
-obj-$(CONFIG_SOC_CAMERA_MT9T112) += soc_mt9t112.o
-obj-$(CONFIG_SOC_CAMERA_MT9V022) += soc_mt9v022.o
-obj-$(CONFIG_SOC_CAMERA_OV5642) += soc_ov5642.o
-obj-$(CONFIG_SOC_CAMERA_OV772X) += soc_ov772x.o
-obj-$(CONFIG_SOC_CAMERA_OV9640) += soc_ov9640.o
-obj-$(CONFIG_SOC_CAMERA_OV9740) += soc_ov9740.o
-obj-$(CONFIG_SOC_CAMERA_RJ54N1) += soc_rj54n1cb0c.o
-obj-$(CONFIG_SOC_CAMERA_TW9910) += soc_tw9910.o
diff --git a/drivers/media/i2c/soc_camera/soc_mt9t112.c b/drivers/media/i2c/soc_camera/soc_mt9t112.c
deleted file mode 100644
index ea1ff270bc2d..000000000000
--- a/drivers/media/i2c/soc_camera/soc_mt9t112.c
+++ /dev/null
@@ -1,1157 +0,0 @@
-/*
- * mt9t112 Camera Driver
- *
- * Copyright (C) 2009 Renesas Solutions Corp.
- * Kuninori Morimoto <morimoto.kuninori@renesas.com>
- *
- * Based on ov772x driver, mt9m111 driver,
- *
- * Copyright (C) 2008 Kuninori Morimoto <morimoto.kuninori@renesas.com>
- * Copyright (C) 2008, Robert Jarzmik <robert.jarzmik@free.fr>
- * Copyright 2006-7 Jonathan Corbet <corbet@lwn.net>
- * Copyright (C) 2008 Magnus Damm
- * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/delay.h>
-#include <linux/i2c.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/v4l2-mediabus.h>
-#include <linux/videodev2.h>
-
-#include <media/i2c/mt9t112.h>
-#include <media/soc_camera.h>
-#include <media/v4l2-clk.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-image-sizes.h>
-
-/* you can check PLL/clock info */
-/* #define EXT_CLOCK 24000000 */
-
-/************************************************************************
- macro
-************************************************************************/
-/*
- * frame size
- */
-#define MAX_WIDTH 2048
-#define MAX_HEIGHT 1536
-
-/*
- * macro of read/write
- */
-#define ECHECKER(ret, x) \
- do { \
- (ret) = (x); \
- if ((ret) < 0) \
- return (ret); \
- } while (0)
-
-#define mt9t112_reg_write(ret, client, a, b) \
- ECHECKER(ret, __mt9t112_reg_write(client, a, b))
-#define mt9t112_mcu_write(ret, client, a, b) \
- ECHECKER(ret, __mt9t112_mcu_write(client, a, b))
-
-#define mt9t112_reg_mask_set(ret, client, a, b, c) \
- ECHECKER(ret, __mt9t112_reg_mask_set(client, a, b, c))
-#define mt9t112_mcu_mask_set(ret, client, a, b, c) \
- ECHECKER(ret, __mt9t112_mcu_mask_set(client, a, b, c))
-
-#define mt9t112_reg_read(ret, client, a) \
- ECHECKER(ret, __mt9t112_reg_read(client, a))
-
-/*
- * Logical address
- */
-#define _VAR(id, offset, base) (base | (id & 0x1f) << 10 | (offset & 0x3ff))
-#define VAR(id, offset) _VAR(id, offset, 0x0000)
-#define VAR8(id, offset) _VAR(id, offset, 0x8000)
-
-/************************************************************************
- struct
-************************************************************************/
-struct mt9t112_format {
- u32 code;
- enum v4l2_colorspace colorspace;
- u16 fmt;
- u16 order;
-};
-
-struct mt9t112_priv {
- struct v4l2_subdev subdev;
- struct mt9t112_platform_data *info;
- struct i2c_client *client;
- struct v4l2_rect frame;
- struct v4l2_clk *clk;
- const struct mt9t112_format *format;
- int num_formats;
- u32 flags;
-/* for flags */
-#define INIT_DONE (1 << 0)
-#define PCLK_RISING (1 << 1)
-};
-
-/************************************************************************
- supported format
-************************************************************************/
-
-static const struct mt9t112_format mt9t112_cfmts[] = {
- {
- .code = MEDIA_BUS_FMT_UYVY8_2X8,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .fmt = 1,
- .order = 0,
- }, {
- .code = MEDIA_BUS_FMT_VYUY8_2X8,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .fmt = 1,
- .order = 1,
- }, {
- .code = MEDIA_BUS_FMT_YUYV8_2X8,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .fmt = 1,
- .order = 2,
- }, {
- .code = MEDIA_BUS_FMT_YVYU8_2X8,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .fmt = 1,
- .order = 3,
- }, {
- .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .fmt = 8,
- .order = 2,
- }, {
- .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .fmt = 4,
- .order = 2,
- },
-};
-
-/************************************************************************
- general function
-************************************************************************/
-static struct mt9t112_priv *to_mt9t112(const struct i2c_client *client)
-{
- return container_of(i2c_get_clientdata(client),
- struct mt9t112_priv,
- subdev);
-}
-
-static int __mt9t112_reg_read(const struct i2c_client *client, u16 command)
-{
- struct i2c_msg msg[2];
- u8 buf[2];
- int ret;
-
- command = swab16(command);
-
- msg[0].addr = client->addr;
- msg[0].flags = 0;
- msg[0].len = 2;
- msg[0].buf = (u8 *)&command;
-
- msg[1].addr = client->addr;
- msg[1].flags = I2C_M_RD;
- msg[1].len = 2;
- msg[1].buf = buf;
-
- /*
- * if return value of this function is < 0,
- * it mean error.
- * else, under 16bit is valid data.
- */
- ret = i2c_transfer(client->adapter, msg, 2);
- if (ret < 0)
- return ret;
-
- memcpy(&ret, buf, 2);
- return swab16(ret);
-}
-
-static int __mt9t112_reg_write(const struct i2c_client *client,
- u16 command, u16 data)
-{
- struct i2c_msg msg;
- u8 buf[4];
- int ret;
-
- command = swab16(command);
- data = swab16(data);
-
- memcpy(buf + 0, &command, 2);
- memcpy(buf + 2, &data, 2);
-
- msg.addr = client->addr;
- msg.flags = 0;
- msg.len = 4;
- msg.buf = buf;
-
- /*
- * i2c_transfer return message length,
- * but this function should return 0 if correct case
- */
- ret = i2c_transfer(client->adapter, &msg, 1);
- if (ret >= 0)
- ret = 0;
-
- return ret;
-}
-
-static int __mt9t112_reg_mask_set(const struct i2c_client *client,
- u16 command,
- u16 mask,
- u16 set)
-{
- int val = __mt9t112_reg_read(client, command);
- if (val < 0)
- return val;
-
- val &= ~mask;
- val |= set & mask;
-
- return __mt9t112_reg_write(client, command, val);
-}
-
-/* mcu access */
-static int __mt9t112_mcu_read(const struct i2c_client *client, u16 command)
-{
- int ret;
-
- ret = __mt9t112_reg_write(client, 0x098E, command);
- if (ret < 0)
- return ret;
-
- return __mt9t112_reg_read(client, 0x0990);
-}
-
-static int __mt9t112_mcu_write(const struct i2c_client *client,
- u16 command, u16 data)
-{
- int ret;
-
- ret = __mt9t112_reg_write(client, 0x098E, command);
- if (ret < 0)
- return ret;
-
- return __mt9t112_reg_write(client, 0x0990, data);
-}
-
-static int __mt9t112_mcu_mask_set(const struct i2c_client *client,
- u16 command,
- u16 mask,
- u16 set)
-{
- int val = __mt9t112_mcu_read(client, command);
- if (val < 0)
- return val;
-
- val &= ~mask;
- val |= set & mask;
-
- return __mt9t112_mcu_write(client, command, val);
-}
-
-static int mt9t112_reset(const struct i2c_client *client)
-{
- int ret;
-
- mt9t112_reg_mask_set(ret, client, 0x001a, 0x0001, 0x0001);
- msleep(1);
- mt9t112_reg_mask_set(ret, client, 0x001a, 0x0001, 0x0000);
-
- return ret;
-}
-
-#ifndef EXT_CLOCK
-#define CLOCK_INFO(a, b)
-#else
-#define CLOCK_INFO(a, b) mt9t112_clock_info(a, b)
-static int mt9t112_clock_info(const struct i2c_client *client, u32 ext)
-{
- int m, n, p1, p2, p3, p4, p5, p6, p7;
- u32 vco, clk;
- char *enable;
-
- ext /= 1000; /* kbyte order */
-
- mt9t112_reg_read(n, client, 0x0012);
- p1 = n & 0x000f;
- n = n >> 4;
- p2 = n & 0x000f;
- n = n >> 4;
- p3 = n & 0x000f;
-
- mt9t112_reg_read(n, client, 0x002a);
- p4 = n & 0x000f;
- n = n >> 4;
- p5 = n & 0x000f;
- n = n >> 4;
- p6 = n & 0x000f;
-
- mt9t112_reg_read(n, client, 0x002c);
- p7 = n & 0x000f;
-
- mt9t112_reg_read(n, client, 0x0010);
- m = n & 0x00ff;
- n = (n >> 8) & 0x003f;
-
- enable = ((6000 > ext) || (54000 < ext)) ? "X" : "";
- dev_dbg(&client->dev, "EXTCLK : %10u K %s\n", ext, enable);
-
- vco = 2 * m * ext / (n+1);
- enable = ((384000 > vco) || (768000 < vco)) ? "X" : "";
- dev_dbg(&client->dev, "VCO : %10u K %s\n", vco, enable);
-
- clk = vco / (p1+1) / (p2+1);
- enable = (96000 < clk) ? "X" : "";
- dev_dbg(&client->dev, "PIXCLK : %10u K %s\n", clk, enable);
-
- clk = vco / (p3+1);
- enable = (768000 < clk) ? "X" : "";
- dev_dbg(&client->dev, "MIPICLK : %10u K %s\n", clk, enable);
-
- clk = vco / (p6+1);
- enable = (96000 < clk) ? "X" : "";
- dev_dbg(&client->dev, "MCU CLK : %10u K %s\n", clk, enable);
-
- clk = vco / (p5+1);
- enable = (54000 < clk) ? "X" : "";
- dev_dbg(&client->dev, "SOC CLK : %10u K %s\n", clk, enable);
-
- clk = vco / (p4+1);
- enable = (70000 < clk) ? "X" : "";
- dev_dbg(&client->dev, "Sensor CLK : %10u K %s\n", clk, enable);
-
- clk = vco / (p7+1);
- dev_dbg(&client->dev, "External sensor : %10u K\n", clk);
-
- clk = ext / (n+1);
- enable = ((2000 > clk) || (24000 < clk)) ? "X" : "";
- dev_dbg(&client->dev, "PFD : %10u K %s\n", clk, enable);
-
- return 0;
-}
-#endif
-
-static void mt9t112_frame_check(u32 *width, u32 *height, u32 *left, u32 *top)
-{
- soc_camera_limit_side(left, width, 0, 0, MAX_WIDTH);
- soc_camera_limit_side(top, height, 0, 0, MAX_HEIGHT);
-}
-
-static int mt9t112_set_a_frame_size(const struct i2c_client *client,
- u16 width,
- u16 height)
-{
- int ret;
- u16 wstart = (MAX_WIDTH - width) / 2;
- u16 hstart = (MAX_HEIGHT - height) / 2;
-
- /* (Context A) Image Width/Height */
- mt9t112_mcu_write(ret, client, VAR(26, 0), width);
- mt9t112_mcu_write(ret, client, VAR(26, 2), height);
-
- /* (Context A) Output Width/Height */
- mt9t112_mcu_write(ret, client, VAR(18, 43), 8 + width);
- mt9t112_mcu_write(ret, client, VAR(18, 45), 8 + height);
-
- /* (Context A) Start Row/Column */
- mt9t112_mcu_write(ret, client, VAR(18, 2), 4 + hstart);
- mt9t112_mcu_write(ret, client, VAR(18, 4), 4 + wstart);
-
- /* (Context A) End Row/Column */
- mt9t112_mcu_write(ret, client, VAR(18, 6), 11 + height + hstart);
- mt9t112_mcu_write(ret, client, VAR(18, 8), 11 + width + wstart);
-
- mt9t112_mcu_write(ret, client, VAR8(1, 0), 0x06);
-
- return ret;
-}
-
-static int mt9t112_set_pll_dividers(const struct i2c_client *client,
- u8 m, u8 n,
- u8 p1, u8 p2, u8 p3,
- u8 p4, u8 p5, u8 p6,
- u8 p7)
-{
- int ret;
- u16 val;
-
- /* N/M */
- val = (n << 8) |
- (m << 0);
- mt9t112_reg_mask_set(ret, client, 0x0010, 0x3fff, val);
-
- /* P1/P2/P3 */
- val = ((p3 & 0x0F) << 8) |
- ((p2 & 0x0F) << 4) |
- ((p1 & 0x0F) << 0);
- mt9t112_reg_mask_set(ret, client, 0x0012, 0x0fff, val);
-
- /* P4/P5/P6 */
- val = (0x7 << 12) |
- ((p6 & 0x0F) << 8) |
- ((p5 & 0x0F) << 4) |
- ((p4 & 0x0F) << 0);
- mt9t112_reg_mask_set(ret, client, 0x002A, 0x7fff, val);
-
- /* P7 */
- val = (0x1 << 12) |
- ((p7 & 0x0F) << 0);
- mt9t112_reg_mask_set(ret, client, 0x002C, 0x100f, val);
-
- return ret;
-}
-
-static int mt9t112_init_pll(const struct i2c_client *client)
-{
- struct mt9t112_priv *priv = to_mt9t112(client);
- int data, i, ret;
-
- mt9t112_reg_mask_set(ret, client, 0x0014, 0x003, 0x0001);
-
- /* PLL control: BYPASS PLL = 8517 */
- mt9t112_reg_write(ret, client, 0x0014, 0x2145);
-
- /* Replace these registers when new timing parameters are generated */
- mt9t112_set_pll_dividers(client,
- priv->info->divider.m,
- priv->info->divider.n,
- priv->info->divider.p1,
- priv->info->divider.p2,
- priv->info->divider.p3,
- priv->info->divider.p4,
- priv->info->divider.p5,
- priv->info->divider.p6,
- priv->info->divider.p7);
-
- /*
- * TEST_BYPASS on
- * PLL_ENABLE on
- * SEL_LOCK_DET on
- * TEST_BYPASS off
- */
- mt9t112_reg_write(ret, client, 0x0014, 0x2525);
- mt9t112_reg_write(ret, client, 0x0014, 0x2527);
- mt9t112_reg_write(ret, client, 0x0014, 0x3427);
- mt9t112_reg_write(ret, client, 0x0014, 0x3027);
-
- mdelay(10);
-
- /*
- * PLL_BYPASS off
- * Reference clock count
- * I2C Master Clock Divider
- */
- mt9t112_reg_write(ret, client, 0x0014, 0x3046);
- mt9t112_reg_write(ret, client, 0x0016, 0x0400); /* JPEG initialization workaround */
- mt9t112_reg_write(ret, client, 0x0022, 0x0190);
- mt9t112_reg_write(ret, client, 0x3B84, 0x0212);
-
- /* External sensor clock is PLL bypass */
- mt9t112_reg_write(ret, client, 0x002E, 0x0500);
-
- mt9t112_reg_mask_set(ret, client, 0x0018, 0x0002, 0x0002);
- mt9t112_reg_mask_set(ret, client, 0x3B82, 0x0004, 0x0004);
-
- /* MCU disabled */
- mt9t112_reg_mask_set(ret, client, 0x0018, 0x0004, 0x0004);
-
- /* out of standby */
- mt9t112_reg_mask_set(ret, client, 0x0018, 0x0001, 0);
-
- mdelay(50);
-
- /*
- * Standby Workaround
- * Disable Secondary I2C Pads
- */
- mt9t112_reg_write(ret, client, 0x0614, 0x0001);
- mdelay(1);
- mt9t112_reg_write(ret, client, 0x0614, 0x0001);
- mdelay(1);
- mt9t112_reg_write(ret, client, 0x0614, 0x0001);
- mdelay(1);
- mt9t112_reg_write(ret, client, 0x0614, 0x0001);
- mdelay(1);
- mt9t112_reg_write(ret, client, 0x0614, 0x0001);
- mdelay(1);
- mt9t112_reg_write(ret, client, 0x0614, 0x0001);
- mdelay(1);
-
- /* poll to verify out of standby. Must Poll this bit */
- for (i = 0; i < 100; i++) {
- mt9t112_reg_read(data, client, 0x0018);
- if (!(0x4000 & data))
- break;
-
- mdelay(10);
- }
-
- return ret;
-}
-
-static int mt9t112_init_setting(const struct i2c_client *client)
-{
-
- int ret;
-
- /* Adaptive Output Clock (A) */
- mt9t112_mcu_mask_set(ret, client, VAR(26, 160), 0x0040, 0x0000);
-
- /* Read Mode (A) */
- mt9t112_mcu_write(ret, client, VAR(18, 12), 0x0024);
-
- /* Fine Correction (A) */
- mt9t112_mcu_write(ret, client, VAR(18, 15), 0x00CC);
-
- /* Fine IT Min (A) */
- mt9t112_mcu_write(ret, client, VAR(18, 17), 0x01f1);
-
- /* Fine IT Max Margin (A) */
- mt9t112_mcu_write(ret, client, VAR(18, 19), 0x00fF);
-
- /* Base Frame Lines (A) */
- mt9t112_mcu_write(ret, client, VAR(18, 29), 0x032D);
-
- /* Min Line Length (A) */
- mt9t112_mcu_write(ret, client, VAR(18, 31), 0x073a);
-
- /* Line Length (A) */
- mt9t112_mcu_write(ret, client, VAR(18, 37), 0x07d0);
-
- /* Adaptive Output Clock (B) */
- mt9t112_mcu_mask_set(ret, client, VAR(27, 160), 0x0040, 0x0000);
-
- /* Row Start (B) */
- mt9t112_mcu_write(ret, client, VAR(18, 74), 0x004);
-
- /* Column Start (B) */
- mt9t112_mcu_write(ret, client, VAR(18, 76), 0x004);
-
- /* Row End (B) */
- mt9t112_mcu_write(ret, client, VAR(18, 78), 0x60B);
-
- /* Column End (B) */
- mt9t112_mcu_write(ret, client, VAR(18, 80), 0x80B);
-
- /* Fine Correction (B) */
- mt9t112_mcu_write(ret, client, VAR(18, 87), 0x008C);
-
- /* Fine IT Min (B) */
- mt9t112_mcu_write(ret, client, VAR(18, 89), 0x01F1);
-
- /* Fine IT Max Margin (B) */
- mt9t112_mcu_write(ret, client, VAR(18, 91), 0x00FF);
-
- /* Base Frame Lines (B) */
- mt9t112_mcu_write(ret, client, VAR(18, 101), 0x0668);
-
- /* Min Line Length (B) */
- mt9t112_mcu_write(ret, client, VAR(18, 103), 0x0AF0);
-
- /* Line Length (B) */
- mt9t112_mcu_write(ret, client, VAR(18, 109), 0x0AF0);
-
- /*
- * Flicker Dectection registers
- * This section should be replaced whenever new Timing file is generated
- * All the following registers need to be replaced
- * Following registers are generated from Register Wizard but user can
- * modify them. For detail see auto flicker detection tuning
- */
-
- /* FD_FDPERIOD_SELECT */
- mt9t112_mcu_write(ret, client, VAR8(8, 5), 0x01);
-
- /* PRI_B_CONFIG_FD_ALGO_RUN */
- mt9t112_mcu_write(ret, client, VAR(27, 17), 0x0003);
-
- /* PRI_A_CONFIG_FD_ALGO_RUN */
- mt9t112_mcu_write(ret, client, VAR(26, 17), 0x0003);
-
- /*
- * AFD range detection tuning registers
- */
-
- /* search_f1_50 */
- mt9t112_mcu_write(ret, client, VAR8(18, 165), 0x25);
-
- /* search_f2_50 */
- mt9t112_mcu_write(ret, client, VAR8(18, 166), 0x28);
-
- /* search_f1_60 */
- mt9t112_mcu_write(ret, client, VAR8(18, 167), 0x2C);
-
- /* search_f2_60 */
- mt9t112_mcu_write(ret, client, VAR8(18, 168), 0x2F);
-
- /* period_50Hz (A) */
- mt9t112_mcu_write(ret, client, VAR8(18, 68), 0xBA);
-
- /* secret register by aptina */
- /* period_50Hz (A MSB) */
- mt9t112_mcu_write(ret, client, VAR8(18, 303), 0x00);
-
- /* period_60Hz (A) */
- mt9t112_mcu_write(ret, client, VAR8(18, 69), 0x9B);
-
- /* secret register by aptina */
- /* period_60Hz (A MSB) */
- mt9t112_mcu_write(ret, client, VAR8(18, 301), 0x00);
-
- /* period_50Hz (B) */
- mt9t112_mcu_write(ret, client, VAR8(18, 140), 0x82);
-
- /* secret register by aptina */
- /* period_50Hz (B) MSB */
- mt9t112_mcu_write(ret, client, VAR8(18, 304), 0x00);
-
- /* period_60Hz (B) */
- mt9t112_mcu_write(ret, client, VAR8(18, 141), 0x6D);
-
- /* secret register by aptina */
- /* period_60Hz (B) MSB */
- mt9t112_mcu_write(ret, client, VAR8(18, 302), 0x00);
-
- /* FD Mode */
- mt9t112_mcu_write(ret, client, VAR8(8, 2), 0x10);
-
- /* Stat_min */
- mt9t112_mcu_write(ret, client, VAR8(8, 9), 0x02);
-
- /* Stat_max */
- mt9t112_mcu_write(ret, client, VAR8(8, 10), 0x03);
-
- /* Min_amplitude */
- mt9t112_mcu_write(ret, client, VAR8(8, 12), 0x0A);
-
- /* RX FIFO Watermark (A) */
- mt9t112_mcu_write(ret, client, VAR(18, 70), 0x0014);
-
- /* RX FIFO Watermark (B) */
- mt9t112_mcu_write(ret, client, VAR(18, 142), 0x0014);
-
- /* MCLK: 16MHz
- * PCLK: 73MHz
- * CorePixCLK: 36.5 MHz
- */
- mt9t112_mcu_write(ret, client, VAR8(18, 0x0044), 133);
- mt9t112_mcu_write(ret, client, VAR8(18, 0x0045), 110);
- mt9t112_mcu_write(ret, client, VAR8(18, 0x008c), 130);
- mt9t112_mcu_write(ret, client, VAR8(18, 0x008d), 108);
-
- mt9t112_mcu_write(ret, client, VAR8(18, 0x00A5), 27);
- mt9t112_mcu_write(ret, client, VAR8(18, 0x00a6), 30);
- mt9t112_mcu_write(ret, client, VAR8(18, 0x00a7), 32);
- mt9t112_mcu_write(ret, client, VAR8(18, 0x00a8), 35);
-
- return ret;
-}
-
-static int mt9t112_auto_focus_setting(const struct i2c_client *client)
-{
- int ret;
-
- mt9t112_mcu_write(ret, client, VAR(12, 13), 0x000F);
- mt9t112_mcu_write(ret, client, VAR(12, 23), 0x0F0F);
- mt9t112_mcu_write(ret, client, VAR8(1, 0), 0x06);
-
- mt9t112_reg_write(ret, client, 0x0614, 0x0000);
-
- mt9t112_mcu_write(ret, client, VAR8(1, 0), 0x05);
- mt9t112_mcu_write(ret, client, VAR8(12, 2), 0x02);
- mt9t112_mcu_write(ret, client, VAR(12, 3), 0x0002);
- mt9t112_mcu_write(ret, client, VAR(17, 3), 0x8001);
- mt9t112_mcu_write(ret, client, VAR(17, 11), 0x0025);
- mt9t112_mcu_write(ret, client, VAR(17, 13), 0x0193);
- mt9t112_mcu_write(ret, client, VAR8(17, 33), 0x18);
- mt9t112_mcu_write(ret, client, VAR8(1, 0), 0x05);
-
- return ret;
-}
-
-static int mt9t112_auto_focus_trigger(const struct i2c_client *client)
-{
- int ret;
-
- mt9t112_mcu_write(ret, client, VAR8(12, 25), 0x01);
-
- return ret;
-}
-
-static int mt9t112_init_camera(const struct i2c_client *client)
-{
- int ret;
-
- ECHECKER(ret, mt9t112_reset(client));
-
- ECHECKER(ret, mt9t112_init_pll(client));
-
- ECHECKER(ret, mt9t112_init_setting(client));
-
- ECHECKER(ret, mt9t112_auto_focus_setting(client));
-
- mt9t112_reg_mask_set(ret, client, 0x0018, 0x0004, 0);
-
- /* Analog setting B */
- mt9t112_reg_write(ret, client, 0x3084, 0x2409);
- mt9t112_reg_write(ret, client, 0x3092, 0x0A49);
- mt9t112_reg_write(ret, client, 0x3094, 0x4949);
- mt9t112_reg_write(ret, client, 0x3096, 0x4950);
-
- /*
- * Disable adaptive clock
- * PRI_A_CONFIG_JPEG_OB_TX_CONTROL_VAR
- * PRI_B_CONFIG_JPEG_OB_TX_CONTROL_VAR
- */
- mt9t112_mcu_write(ret, client, VAR(26, 160), 0x0A2E);
- mt9t112_mcu_write(ret, client, VAR(27, 160), 0x0A2E);
-
- /* Configure STatus in Status_before_length Format and enable header */
- /* PRI_B_CONFIG_JPEG_OB_TX_CONTROL_VAR */
- mt9t112_mcu_write(ret, client, VAR(27, 144), 0x0CB4);
-
- /* Enable JPEG in context B */
- /* PRI_B_CONFIG_JPEG_OB_TX_CONTROL_VAR */
- mt9t112_mcu_write(ret, client, VAR8(27, 142), 0x01);
-
- /* Disable Dac_TXLO */
- mt9t112_reg_write(ret, client, 0x316C, 0x350F);
-
- /* Set max slew rates */
- mt9t112_reg_write(ret, client, 0x1E, 0x777);
-
- return ret;
-}
-
-/************************************************************************
- v4l2_subdev_core_ops
-************************************************************************/
-
-#ifdef CONFIG_VIDEO_ADV_DEBUG
-static int mt9t112_g_register(struct v4l2_subdev *sd,
- struct v4l2_dbg_register *reg)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- reg->size = 2;
- mt9t112_reg_read(ret, client, reg->reg);
-
- reg->val = (__u64)ret;
-
- return 0;
-}
-
-static int mt9t112_s_register(struct v4l2_subdev *sd,
- const struct v4l2_dbg_register *reg)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- mt9t112_reg_write(ret, client, reg->reg, reg->val);
-
- return ret;
-}
-#endif
-
-static int mt9t112_s_power(struct v4l2_subdev *sd, int on)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- struct mt9t112_priv *priv = to_mt9t112(client);
-
- return soc_camera_set_power(&client->dev, ssdd, priv->clk, on);
-}
-
-static const struct v4l2_subdev_core_ops mt9t112_subdev_core_ops = {
-#ifdef CONFIG_VIDEO_ADV_DEBUG
- .g_register = mt9t112_g_register,
- .s_register = mt9t112_s_register,
-#endif
- .s_power = mt9t112_s_power,
-};
-
-
-/************************************************************************
- v4l2_subdev_video_ops
-************************************************************************/
-static int mt9t112_s_stream(struct v4l2_subdev *sd, int enable)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct mt9t112_priv *priv = to_mt9t112(client);
- int ret = 0;
-
- if (!enable) {
- /* FIXME
- *
- * If user selected large output size,
- * and used it long time,
- * mt9t112 camera will be very warm.
- *
- * But current driver can not stop mt9t112 camera.
- * So, set small size here to solve this problem.
- */
- mt9t112_set_a_frame_size(client, VGA_WIDTH, VGA_HEIGHT);
- return ret;
- }
-
- if (!(priv->flags & INIT_DONE)) {
- u16 param = PCLK_RISING & priv->flags ? 0x0001 : 0x0000;
-
- ECHECKER(ret, mt9t112_init_camera(client));
-
- /* Invert PCLK (Data sampled on falling edge of pixclk) */
- mt9t112_reg_write(ret, client, 0x3C20, param);
-
- mdelay(5);
-
- priv->flags |= INIT_DONE;
- }
-
- mt9t112_mcu_write(ret, client, VAR(26, 7), priv->format->fmt);
- mt9t112_mcu_write(ret, client, VAR(26, 9), priv->format->order);
- mt9t112_mcu_write(ret, client, VAR8(1, 0), 0x06);
-
- mt9t112_set_a_frame_size(client,
- priv->frame.width,
- priv->frame.height);
-
- ECHECKER(ret, mt9t112_auto_focus_trigger(client));
-
- dev_dbg(&client->dev, "format : %d\n", priv->format->code);
- dev_dbg(&client->dev, "size : %d x %d\n",
- priv->frame.width,
- priv->frame.height);
-
- CLOCK_INFO(client, EXT_CLOCK);
-
- return ret;
-}
-
-static int mt9t112_set_params(struct mt9t112_priv *priv,
- const struct v4l2_rect *rect,
- u32 code)
-{
- int i;
-
- /*
- * get color format
- */
- for (i = 0; i < priv->num_formats; i++)
- if (mt9t112_cfmts[i].code == code)
- break;
-
- if (i == priv->num_formats)
- return -EINVAL;
-
- priv->frame = *rect;
-
- /*
- * frame size check
- */
- mt9t112_frame_check(&priv->frame.width, &priv->frame.height,
- &priv->frame.left, &priv->frame.top);
-
- priv->format = mt9t112_cfmts + i;
-
- return 0;
-}
-
-static int mt9t112_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_selection *sel)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct mt9t112_priv *priv = to_mt9t112(client);
-
- if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
- return -EINVAL;
-
- switch (sel->target) {
- case V4L2_SEL_TGT_CROP_BOUNDS:
- sel->r.left = 0;
- sel->r.top = 0;
- sel->r.width = MAX_WIDTH;
- sel->r.height = MAX_HEIGHT;
- return 0;
- case V4L2_SEL_TGT_CROP:
- sel->r = priv->frame;
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-static int mt9t112_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_selection *sel)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct mt9t112_priv *priv = to_mt9t112(client);
- const struct v4l2_rect *rect = &sel->r;
-
- if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
- sel->target != V4L2_SEL_TGT_CROP)
- return -EINVAL;
-
- return mt9t112_set_params(priv, rect, priv->format->code);
-}
-
-static int mt9t112_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *format)
-{
- struct v4l2_mbus_framefmt *mf = &format->format;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct mt9t112_priv *priv = to_mt9t112(client);
-
- if (format->pad)
- return -EINVAL;
-
- mf->width = priv->frame.width;
- mf->height = priv->frame.height;
- mf->colorspace = priv->format->colorspace;
- mf->code = priv->format->code;
- mf->field = V4L2_FIELD_NONE;
-
- return 0;
-}
-
-static int mt9t112_s_fmt(struct v4l2_subdev *sd,
- struct v4l2_mbus_framefmt *mf)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct mt9t112_priv *priv = to_mt9t112(client);
- struct v4l2_rect rect = {
- .width = mf->width,
- .height = mf->height,
- .left = priv->frame.left,
- .top = priv->frame.top,
- };
- int ret;
-
- ret = mt9t112_set_params(priv, &rect, mf->code);
-
- if (!ret)
- mf->colorspace = priv->format->colorspace;
-
- return ret;
-}
-
-static int mt9t112_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *format)
-{
- struct v4l2_mbus_framefmt *mf = &format->format;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct mt9t112_priv *priv = to_mt9t112(client);
- unsigned int top, left;
- int i;
-
- if (format->pad)
- return -EINVAL;
-
- for (i = 0; i < priv->num_formats; i++)
- if (mt9t112_cfmts[i].code == mf->code)
- break;
-
- if (i == priv->num_formats) {
- mf->code = MEDIA_BUS_FMT_UYVY8_2X8;
- mf->colorspace = V4L2_COLORSPACE_JPEG;
- } else {
- mf->colorspace = mt9t112_cfmts[i].colorspace;
- }
-
- mt9t112_frame_check(&mf->width, &mf->height, &left, &top);
-
- mf->field = V4L2_FIELD_NONE;
-
- if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- return mt9t112_s_fmt(sd, mf);
- cfg->try_fmt = *mf;
- return 0;
-}
-
-static int mt9t112_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct mt9t112_priv *priv = to_mt9t112(client);
-
- if (code->pad || code->index >= priv->num_formats)
- return -EINVAL;
-
- code->code = mt9t112_cfmts[code->index].code;
-
- return 0;
-}
-
-static int mt9t112_g_mbus_config(struct v4l2_subdev *sd,
- struct v4l2_mbus_config *cfg)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
-
- cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_VSYNC_ACTIVE_HIGH |
- V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_DATA_ACTIVE_HIGH |
- V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING;
- cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
-
- return 0;
-}
-
-static int mt9t112_s_mbus_config(struct v4l2_subdev *sd,
- const struct v4l2_mbus_config *cfg)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- struct mt9t112_priv *priv = to_mt9t112(client);
-
- if (soc_camera_apply_board_flags(ssdd, cfg) & V4L2_MBUS_PCLK_SAMPLE_RISING)
- priv->flags |= PCLK_RISING;
-
- return 0;
-}
-
-static const struct v4l2_subdev_video_ops mt9t112_subdev_video_ops = {
- .s_stream = mt9t112_s_stream,
- .g_mbus_config = mt9t112_g_mbus_config,
- .s_mbus_config = mt9t112_s_mbus_config,
-};
-
-static const struct v4l2_subdev_pad_ops mt9t112_subdev_pad_ops = {
- .enum_mbus_code = mt9t112_enum_mbus_code,
- .get_selection = mt9t112_get_selection,
- .set_selection = mt9t112_set_selection,
- .get_fmt = mt9t112_get_fmt,
- .set_fmt = mt9t112_set_fmt,
-};
-
-/************************************************************************
- i2c driver
-************************************************************************/
-static const struct v4l2_subdev_ops mt9t112_subdev_ops = {
- .core = &mt9t112_subdev_core_ops,
- .video = &mt9t112_subdev_video_ops,
- .pad = &mt9t112_subdev_pad_ops,
-};
-
-static int mt9t112_camera_probe(struct i2c_client *client)
-{
- struct mt9t112_priv *priv = to_mt9t112(client);
- const char *devname;
- int chipid;
- int ret;
-
- ret = mt9t112_s_power(&priv->subdev, 1);
- if (ret < 0)
- return ret;
-
- /*
- * check and show chip ID
- */
- mt9t112_reg_read(chipid, client, 0x0000);
-
- switch (chipid) {
- case 0x2680:
- devname = "mt9t111";
- priv->num_formats = 1;
- break;
- case 0x2682:
- devname = "mt9t112";
- priv->num_formats = ARRAY_SIZE(mt9t112_cfmts);
- break;
- default:
- dev_err(&client->dev, "Product ID error %04x\n", chipid);
- ret = -ENODEV;
- goto done;
- }
-
- dev_info(&client->dev, "%s chip ID %04x\n", devname, chipid);
-
-done:
- mt9t112_s_power(&priv->subdev, 0);
- return ret;
-}
-
-static int mt9t112_probe(struct i2c_client *client,
- const struct i2c_device_id *did)
-{
- struct mt9t112_priv *priv;
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- struct v4l2_rect rect = {
- .width = VGA_WIDTH,
- .height = VGA_HEIGHT,
- .left = (MAX_WIDTH - VGA_WIDTH) / 2,
- .top = (MAX_HEIGHT - VGA_HEIGHT) / 2,
- };
- int ret;
-
- if (!ssdd || !ssdd->drv_priv) {
- dev_err(&client->dev, "mt9t112: missing platform data!\n");
- return -EINVAL;
- }
-
- priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->info = ssdd->drv_priv;
-
- v4l2_i2c_subdev_init(&priv->subdev, client, &mt9t112_subdev_ops);
-
- priv->clk = v4l2_clk_get(&client->dev, "mclk");
- if (IS_ERR(priv->clk))
- return PTR_ERR(priv->clk);
-
- ret = mt9t112_camera_probe(client);
-
- /* Cannot fail: using the default supported pixel code */
- if (!ret)
- mt9t112_set_params(priv, &rect, MEDIA_BUS_FMT_UYVY8_2X8);
- else
- v4l2_clk_put(priv->clk);
-
- return ret;
-}
-
-static int mt9t112_remove(struct i2c_client *client)
-{
- struct mt9t112_priv *priv = to_mt9t112(client);
-
- v4l2_clk_put(priv->clk);
- return 0;
-}
-
-static const struct i2c_device_id mt9t112_id[] = {
- { "mt9t112", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, mt9t112_id);
-
-static struct i2c_driver mt9t112_i2c_driver = {
- .driver = {
- .name = "mt9t112",
- },
- .probe = mt9t112_probe,
- .remove = mt9t112_remove,
- .id_table = mt9t112_id,
-};
-
-module_i2c_driver(mt9t112_i2c_driver);
-
-MODULE_DESCRIPTION("SoC Camera driver for mt9t112");
-MODULE_AUTHOR("Kuninori Morimoto");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/soc_camera/soc_ov772x.c b/drivers/media/i2c/soc_camera/soc_ov772x.c
deleted file mode 100644
index fafd372527b2..000000000000
--- a/drivers/media/i2c/soc_camera/soc_ov772x.c
+++ /dev/null
@@ -1,1123 +0,0 @@
-/*
- * ov772x Camera Driver
- *
- * Copyright (C) 2008 Renesas Solutions Corp.
- * Kuninori Morimoto <morimoto.kuninori@renesas.com>
- *
- * Based on ov7670 and soc_camera_platform driver,
- *
- * Copyright 2006-7 Jonathan Corbet <corbet@lwn.net>
- * Copyright (C) 2008 Magnus Damm
- * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/i2c.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/v4l2-mediabus.h>
-#include <linux/videodev2.h>
-
-#include <media/i2c/ov772x.h>
-#include <media/soc_camera.h>
-#include <media/v4l2-clk.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-subdev.h>
-#include <media/v4l2-image-sizes.h>
-
-/*
- * register offset
- */
-#define GAIN 0x00 /* AGC - Gain control gain setting */
-#define BLUE 0x01 /* AWB - Blue channel gain setting */
-#define RED 0x02 /* AWB - Red channel gain setting */
-#define GREEN 0x03 /* AWB - Green channel gain setting */
-#define COM1 0x04 /* Common control 1 */
-#define BAVG 0x05 /* U/B Average Level */
-#define GAVG 0x06 /* Y/Gb Average Level */
-#define RAVG 0x07 /* V/R Average Level */
-#define AECH 0x08 /* Exposure Value - AEC MSBs */
-#define COM2 0x09 /* Common control 2 */
-#define PID 0x0A /* Product ID Number MSB */
-#define VER 0x0B /* Product ID Number LSB */
-#define COM3 0x0C /* Common control 3 */
-#define COM4 0x0D /* Common control 4 */
-#define COM5 0x0E /* Common control 5 */
-#define COM6 0x0F /* Common control 6 */
-#define AEC 0x10 /* Exposure Value */
-#define CLKRC 0x11 /* Internal clock */
-#define COM7 0x12 /* Common control 7 */
-#define COM8 0x13 /* Common control 8 */
-#define COM9 0x14 /* Common control 9 */
-#define COM10 0x15 /* Common control 10 */
-#define REG16 0x16 /* Register 16 */
-#define HSTART 0x17 /* Horizontal sensor size */
-#define HSIZE 0x18 /* Horizontal frame (HREF column) end high 8-bit */
-#define VSTART 0x19 /* Vertical frame (row) start high 8-bit */
-#define VSIZE 0x1A /* Vertical sensor size */
-#define PSHFT 0x1B /* Data format - pixel delay select */
-#define MIDH 0x1C /* Manufacturer ID byte - high */
-#define MIDL 0x1D /* Manufacturer ID byte - low */
-#define LAEC 0x1F /* Fine AEC value */
-#define COM11 0x20 /* Common control 11 */
-#define BDBASE 0x22 /* Banding filter Minimum AEC value */
-#define DBSTEP 0x23 /* Banding filter Maximum Setp */
-#define AEW 0x24 /* AGC/AEC - Stable operating region (upper limit) */
-#define AEB 0x25 /* AGC/AEC - Stable operating region (lower limit) */
-#define VPT 0x26 /* AGC/AEC Fast mode operating region */
-#define REG28 0x28 /* Register 28 */
-#define HOUTSIZE 0x29 /* Horizontal data output size MSBs */
-#define EXHCH 0x2A /* Dummy pixel insert MSB */
-#define EXHCL 0x2B /* Dummy pixel insert LSB */
-#define VOUTSIZE 0x2C /* Vertical data output size MSBs */
-#define ADVFL 0x2D /* LSB of insert dummy lines in Vertical direction */
-#define ADVFH 0x2E /* MSG of insert dummy lines in Vertical direction */
-#define YAVE 0x2F /* Y/G Channel Average value */
-#define LUMHTH 0x30 /* Histogram AEC/AGC Luminance high level threshold */
-#define LUMLTH 0x31 /* Histogram AEC/AGC Luminance low level threshold */
-#define HREF 0x32 /* Image start and size control */
-#define DM_LNL 0x33 /* Dummy line low 8 bits */
-#define DM_LNH 0x34 /* Dummy line high 8 bits */
-#define ADOFF_B 0x35 /* AD offset compensation value for B channel */
-#define ADOFF_R 0x36 /* AD offset compensation value for R channel */
-#define ADOFF_GB 0x37 /* AD offset compensation value for Gb channel */
-#define ADOFF_GR 0x38 /* AD offset compensation value for Gr channel */
-#define OFF_B 0x39 /* Analog process B channel offset value */
-#define OFF_R 0x3A /* Analog process R channel offset value */
-#define OFF_GB 0x3B /* Analog process Gb channel offset value */
-#define OFF_GR 0x3C /* Analog process Gr channel offset value */
-#define COM12 0x3D /* Common control 12 */
-#define COM13 0x3E /* Common control 13 */
-#define COM14 0x3F /* Common control 14 */
-#define COM15 0x40 /* Common control 15*/
-#define COM16 0x41 /* Common control 16 */
-#define TGT_B 0x42 /* BLC blue channel target value */
-#define TGT_R 0x43 /* BLC red channel target value */
-#define TGT_GB 0x44 /* BLC Gb channel target value */
-#define TGT_GR 0x45 /* BLC Gr channel target value */
-/* for ov7720 */
-#define LCC0 0x46 /* Lens correction control 0 */
-#define LCC1 0x47 /* Lens correction option 1 - X coordinate */
-#define LCC2 0x48 /* Lens correction option 2 - Y coordinate */
-#define LCC3 0x49 /* Lens correction option 3 */
-#define LCC4 0x4A /* Lens correction option 4 - radius of the circular */
-#define LCC5 0x4B /* Lens correction option 5 */
-#define LCC6 0x4C /* Lens correction option 6 */
-/* for ov7725 */
-#define LC_CTR 0x46 /* Lens correction control */
-#define LC_XC 0x47 /* X coordinate of lens correction center relative */
-#define LC_YC 0x48 /* Y coordinate of lens correction center relative */
-#define LC_COEF 0x49 /* Lens correction coefficient */
-#define LC_RADI 0x4A /* Lens correction radius */
-#define LC_COEFB 0x4B /* Lens B channel compensation coefficient */
-#define LC_COEFR 0x4C /* Lens R channel compensation coefficient */
-
-#define FIXGAIN 0x4D /* Analog fix gain amplifer */
-#define AREF0 0x4E /* Sensor reference control */
-#define AREF1 0x4F /* Sensor reference current control */
-#define AREF2 0x50 /* Analog reference control */
-#define AREF3 0x51 /* ADC reference control */
-#define AREF4 0x52 /* ADC reference control */
-#define AREF5 0x53 /* ADC reference control */
-#define AREF6 0x54 /* Analog reference control */
-#define AREF7 0x55 /* Analog reference control */
-#define UFIX 0x60 /* U channel fixed value output */
-#define VFIX 0x61 /* V channel fixed value output */
-#define AWBB_BLK 0x62 /* AWB option for advanced AWB */
-#define AWB_CTRL0 0x63 /* AWB control byte 0 */
-#define DSP_CTRL1 0x64 /* DSP control byte 1 */
-#define DSP_CTRL2 0x65 /* DSP control byte 2 */
-#define DSP_CTRL3 0x66 /* DSP control byte 3 */
-#define DSP_CTRL4 0x67 /* DSP control byte 4 */
-#define AWB_BIAS 0x68 /* AWB BLC level clip */
-#define AWB_CTRL1 0x69 /* AWB control 1 */
-#define AWB_CTRL2 0x6A /* AWB control 2 */
-#define AWB_CTRL3 0x6B /* AWB control 3 */
-#define AWB_CTRL4 0x6C /* AWB control 4 */
-#define AWB_CTRL5 0x6D /* AWB control 5 */
-#define AWB_CTRL6 0x6E /* AWB control 6 */
-#define AWB_CTRL7 0x6F /* AWB control 7 */
-#define AWB_CTRL8 0x70 /* AWB control 8 */
-#define AWB_CTRL9 0x71 /* AWB control 9 */
-#define AWB_CTRL10 0x72 /* AWB control 10 */
-#define AWB_CTRL11 0x73 /* AWB control 11 */
-#define AWB_CTRL12 0x74 /* AWB control 12 */
-#define AWB_CTRL13 0x75 /* AWB control 13 */
-#define AWB_CTRL14 0x76 /* AWB control 14 */
-#define AWB_CTRL15 0x77 /* AWB control 15 */
-#define AWB_CTRL16 0x78 /* AWB control 16 */
-#define AWB_CTRL17 0x79 /* AWB control 17 */
-#define AWB_CTRL18 0x7A /* AWB control 18 */
-#define AWB_CTRL19 0x7B /* AWB control 19 */
-#define AWB_CTRL20 0x7C /* AWB control 20 */
-#define AWB_CTRL21 0x7D /* AWB control 21 */
-#define GAM1 0x7E /* Gamma Curve 1st segment input end point */
-#define GAM2 0x7F /* Gamma Curve 2nd segment input end point */
-#define GAM3 0x80 /* Gamma Curve 3rd segment input end point */
-#define GAM4 0x81 /* Gamma Curve 4th segment input end point */
-#define GAM5 0x82 /* Gamma Curve 5th segment input end point */
-#define GAM6 0x83 /* Gamma Curve 6th segment input end point */
-#define GAM7 0x84 /* Gamma Curve 7th segment input end point */
-#define GAM8 0x85 /* Gamma Curve 8th segment input end point */
-#define GAM9 0x86 /* Gamma Curve 9th segment input end point */
-#define GAM10 0x87 /* Gamma Curve 10th segment input end point */
-#define GAM11 0x88 /* Gamma Curve 11th segment input end point */
-#define GAM12 0x89 /* Gamma Curve 12th segment input end point */
-#define GAM13 0x8A /* Gamma Curve 13th segment input end point */
-#define GAM14 0x8B /* Gamma Curve 14th segment input end point */
-#define GAM15 0x8C /* Gamma Curve 15th segment input end point */
-#define SLOP 0x8D /* Gamma curve highest segment slope */
-#define DNSTH 0x8E /* De-noise threshold */
-#define EDGE_STRNGT 0x8F /* Edge strength control when manual mode */
-#define EDGE_TRSHLD 0x90 /* Edge threshold control when manual mode */
-#define DNSOFF 0x91 /* Auto De-noise threshold control */
-#define EDGE_UPPER 0x92 /* Edge strength upper limit when Auto mode */
-#define EDGE_LOWER 0x93 /* Edge strength lower limit when Auto mode */
-#define MTX1 0x94 /* Matrix coefficient 1 */
-#define MTX2 0x95 /* Matrix coefficient 2 */
-#define MTX3 0x96 /* Matrix coefficient 3 */
-#define MTX4 0x97 /* Matrix coefficient 4 */
-#define MTX5 0x98 /* Matrix coefficient 5 */
-#define MTX6 0x99 /* Matrix coefficient 6 */
-#define MTX_CTRL 0x9A /* Matrix control */
-#define BRIGHT 0x9B /* Brightness control */
-#define CNTRST 0x9C /* Contrast contrast */
-#define CNTRST_CTRL 0x9D /* Contrast contrast center */
-#define UVAD_J0 0x9E /* Auto UV adjust contrast 0 */
-#define UVAD_J1 0x9F /* Auto UV adjust contrast 1 */
-#define SCAL0 0xA0 /* Scaling control 0 */
-#define SCAL1 0xA1 /* Scaling control 1 */
-#define SCAL2 0xA2 /* Scaling control 2 */
-#define FIFODLYM 0xA3 /* FIFO manual mode delay control */
-#define FIFODLYA 0xA4 /* FIFO auto mode delay control */
-#define SDE 0xA6 /* Special digital effect control */
-#define USAT 0xA7 /* U component saturation control */
-#define VSAT 0xA8 /* V component saturation control */
-/* for ov7720 */
-#define HUE0 0xA9 /* Hue control 0 */
-#define HUE1 0xAA /* Hue control 1 */
-/* for ov7725 */
-#define HUECOS 0xA9 /* Cosine value */
-#define HUESIN 0xAA /* Sine value */
-
-#define SIGN 0xAB /* Sign bit for Hue and contrast */
-#define DSPAUTO 0xAC /* DSP auto function ON/OFF control */
-
-/*
- * register detail
- */
-
-/* COM2 */
-#define SOFT_SLEEP_MODE 0x10 /* Soft sleep mode */
- /* Output drive capability */
-#define OCAP_1x 0x00 /* 1x */
-#define OCAP_2x 0x01 /* 2x */
-#define OCAP_3x 0x02 /* 3x */
-#define OCAP_4x 0x03 /* 4x */
-
-/* COM3 */
-#define SWAP_MASK (SWAP_RGB | SWAP_YUV | SWAP_ML)
-#define IMG_MASK (VFLIP_IMG | HFLIP_IMG)
-
-#define VFLIP_IMG 0x80 /* Vertical flip image ON/OFF selection */
-#define HFLIP_IMG 0x40 /* Horizontal mirror image ON/OFF selection */
-#define SWAP_RGB 0x20 /* Swap B/R output sequence in RGB mode */
-#define SWAP_YUV 0x10 /* Swap Y/UV output sequence in YUV mode */
-#define SWAP_ML 0x08 /* Swap output MSB/LSB */
- /* Tri-state option for output clock */
-#define NOTRI_CLOCK 0x04 /* 0: Tri-state at this period */
- /* 1: No tri-state at this period */
- /* Tri-state option for output data */
-#define NOTRI_DATA 0x02 /* 0: Tri-state at this period */
- /* 1: No tri-state at this period */
-#define SCOLOR_TEST 0x01 /* Sensor color bar test pattern */
-
-/* COM4 */
- /* PLL frequency control */
-#define PLL_BYPASS 0x00 /* 00: Bypass PLL */
-#define PLL_4x 0x40 /* 01: PLL 4x */
-#define PLL_6x 0x80 /* 10: PLL 6x */
-#define PLL_8x 0xc0 /* 11: PLL 8x */
- /* AEC evaluate window */
-#define AEC_FULL 0x00 /* 00: Full window */
-#define AEC_1p2 0x10 /* 01: 1/2 window */
-#define AEC_1p4 0x20 /* 10: 1/4 window */
-#define AEC_2p3 0x30 /* 11: Low 2/3 window */
-
-/* COM5 */
-#define AFR_ON_OFF 0x80 /* Auto frame rate control ON/OFF selection */
-#define AFR_SPPED 0x40 /* Auto frame rate control speed selection */
- /* Auto frame rate max rate control */
-#define AFR_NO_RATE 0x00 /* No reduction of frame rate */
-#define AFR_1p2 0x10 /* Max reduction to 1/2 frame rate */
-#define AFR_1p4 0x20 /* Max reduction to 1/4 frame rate */
-#define AFR_1p8 0x30 /* Max reduction to 1/8 frame rate */
- /* Auto frame rate active point control */
-#define AF_2x 0x00 /* Add frame when AGC reaches 2x gain */
-#define AF_4x 0x04 /* Add frame when AGC reaches 4x gain */
-#define AF_8x 0x08 /* Add frame when AGC reaches 8x gain */
-#define AF_16x 0x0c /* Add frame when AGC reaches 16x gain */
- /* AEC max step control */
-#define AEC_NO_LIMIT 0x01 /* 0 : AEC incease step has limit */
- /* 1 : No limit to AEC increase step */
-
-/* COM7 */
- /* SCCB Register Reset */
-#define SCCB_RESET 0x80 /* 0 : No change */
- /* 1 : Resets all registers to default */
- /* Resolution selection */
-#define SLCT_MASK 0x40 /* Mask of VGA or QVGA */
-#define SLCT_VGA 0x00 /* 0 : VGA */
-#define SLCT_QVGA 0x40 /* 1 : QVGA */
-#define ITU656_ON_OFF 0x20 /* ITU656 protocol ON/OFF selection */
-#define SENSOR_RAW 0x10 /* Sensor RAW */
- /* RGB output format control */
-#define FMT_MASK 0x0c /* Mask of color format */
-#define FMT_GBR422 0x00 /* 00 : GBR 4:2:2 */
-#define FMT_RGB565 0x04 /* 01 : RGB 565 */
-#define FMT_RGB555 0x08 /* 10 : RGB 555 */
-#define FMT_RGB444 0x0c /* 11 : RGB 444 */
- /* Output format control */
-#define OFMT_MASK 0x03 /* Mask of output format */
-#define OFMT_YUV 0x00 /* 00 : YUV */
-#define OFMT_P_BRAW 0x01 /* 01 : Processed Bayer RAW */
-#define OFMT_RGB 0x02 /* 10 : RGB */
-#define OFMT_BRAW 0x03 /* 11 : Bayer RAW */
-
-/* COM8 */
-#define FAST_ALGO 0x80 /* Enable fast AGC/AEC algorithm */
- /* AEC Setp size limit */
-#define UNLMT_STEP 0x40 /* 0 : Step size is limited */
- /* 1 : Unlimited step size */
-#define BNDF_ON_OFF 0x20 /* Banding filter ON/OFF */
-#define AEC_BND 0x10 /* Enable AEC below banding value */
-#define AEC_ON_OFF 0x08 /* Fine AEC ON/OFF control */
-#define AGC_ON 0x04 /* AGC Enable */
-#define AWB_ON 0x02 /* AWB Enable */
-#define AEC_ON 0x01 /* AEC Enable */
-
-/* COM9 */
-#define BASE_AECAGC 0x80 /* Histogram or average based AEC/AGC */
- /* Automatic gain ceiling - maximum AGC value */
-#define GAIN_2x 0x00 /* 000 : 2x */
-#define GAIN_4x 0x10 /* 001 : 4x */
-#define GAIN_8x 0x20 /* 010 : 8x */
-#define GAIN_16x 0x30 /* 011 : 16x */
-#define GAIN_32x 0x40 /* 100 : 32x */
-#define GAIN_64x 0x50 /* 101 : 64x */
-#define GAIN_128x 0x60 /* 110 : 128x */
-#define DROP_VSYNC 0x04 /* Drop VSYNC output of corrupt frame */
-#define DROP_HREF 0x02 /* Drop HREF output of corrupt frame */
-
-/* COM11 */
-#define SGLF_ON_OFF 0x02 /* Single frame ON/OFF selection */
-#define SGLF_TRIG 0x01 /* Single frame transfer trigger */
-
-/* HREF */
-#define HREF_VSTART_SHIFT 6 /* VSTART LSB */
-#define HREF_HSTART_SHIFT 4 /* HSTART 2 LSBs */
-#define HREF_VSIZE_SHIFT 2 /* VSIZE LSB */
-#define HREF_HSIZE_SHIFT 0 /* HSIZE 2 LSBs */
-
-/* EXHCH */
-#define EXHCH_VSIZE_SHIFT 2 /* VOUTSIZE LSB */
-#define EXHCH_HSIZE_SHIFT 0 /* HOUTSIZE 2 LSBs */
-
-/* DSP_CTRL1 */
-#define FIFO_ON 0x80 /* FIFO enable/disable selection */
-#define UV_ON_OFF 0x40 /* UV adjust function ON/OFF selection */
-#define YUV444_2_422 0x20 /* YUV444 to 422 UV channel option selection */
-#define CLR_MTRX_ON_OFF 0x10 /* Color matrix ON/OFF selection */
-#define INTPLT_ON_OFF 0x08 /* Interpolation ON/OFF selection */
-#define GMM_ON_OFF 0x04 /* Gamma function ON/OFF selection */
-#define AUTO_BLK_ON_OFF 0x02 /* Black defect auto correction ON/OFF */
-#define AUTO_WHT_ON_OFF 0x01 /* White define auto correction ON/OFF */
-
-/* DSP_CTRL3 */
-#define UV_MASK 0x80 /* UV output sequence option */
-#define UV_ON 0x80 /* ON */
-#define UV_OFF 0x00 /* OFF */
-#define CBAR_MASK 0x20 /* DSP Color bar mask */
-#define CBAR_ON 0x20 /* ON */
-#define CBAR_OFF 0x00 /* OFF */
-
-/* DSP_CTRL4 */
-#define DSP_OFMT_YUV 0x00
-#define DSP_OFMT_RGB 0x00
-#define DSP_OFMT_RAW8 0x02
-#define DSP_OFMT_RAW10 0x03
-
-/* DSPAUTO (DSP Auto Function ON/OFF Control) */
-#define AWB_ACTRL 0x80 /* AWB auto threshold control */
-#define DENOISE_ACTRL 0x40 /* De-noise auto threshold control */
-#define EDGE_ACTRL 0x20 /* Edge enhancement auto strength control */
-#define UV_ACTRL 0x10 /* UV adjust auto slope control */
-#define SCAL0_ACTRL 0x08 /* Auto scaling factor control */
-#define SCAL1_2_ACTRL 0x04 /* Auto scaling factor control */
-
-#define OV772X_MAX_WIDTH VGA_WIDTH
-#define OV772X_MAX_HEIGHT VGA_HEIGHT
-
-/*
- * ID
- */
-#define OV7720 0x7720
-#define OV7725 0x7721
-#define VERSION(pid, ver) ((pid<<8)|(ver&0xFF))
-
-/*
- * struct
- */
-
-struct ov772x_color_format {
- u32 code;
- enum v4l2_colorspace colorspace;
- u8 dsp3;
- u8 dsp4;
- u8 com3;
- u8 com7;
-};
-
-struct ov772x_win_size {
- char *name;
- unsigned char com7_bit;
- struct v4l2_rect rect;
-};
-
-struct ov772x_priv {
- struct v4l2_subdev subdev;
- struct v4l2_ctrl_handler hdl;
- struct v4l2_clk *clk;
- struct ov772x_camera_info *info;
- const struct ov772x_color_format *cfmt;
- const struct ov772x_win_size *win;
- unsigned short flag_vflip:1;
- unsigned short flag_hflip:1;
- /* band_filter = COM8[5] ? 256 - BDBASE : 0 */
- unsigned short band_filter;
-};
-
-/*
- * supported color format list
- */
-static const struct ov772x_color_format ov772x_cfmts[] = {
- {
- .code = MEDIA_BUS_FMT_YUYV8_2X8,
- .colorspace = V4L2_COLORSPACE_JPEG,
- .dsp3 = 0x0,
- .dsp4 = DSP_OFMT_YUV,
- .com3 = SWAP_YUV,
- .com7 = OFMT_YUV,
- },
- {
- .code = MEDIA_BUS_FMT_YVYU8_2X8,
- .colorspace = V4L2_COLORSPACE_JPEG,
- .dsp3 = UV_ON,
- .dsp4 = DSP_OFMT_YUV,
- .com3 = SWAP_YUV,
- .com7 = OFMT_YUV,
- },
- {
- .code = MEDIA_BUS_FMT_UYVY8_2X8,
- .colorspace = V4L2_COLORSPACE_JPEG,
- .dsp3 = 0x0,
- .dsp4 = DSP_OFMT_YUV,
- .com3 = 0x0,
- .com7 = OFMT_YUV,
- },
- {
- .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .dsp3 = 0x0,
- .dsp4 = DSP_OFMT_YUV,
- .com3 = SWAP_RGB,
- .com7 = FMT_RGB555 | OFMT_RGB,
- },
- {
- .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .dsp3 = 0x0,
- .dsp4 = DSP_OFMT_YUV,
- .com3 = 0x0,
- .com7 = FMT_RGB555 | OFMT_RGB,
- },
- {
- .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .dsp3 = 0x0,
- .dsp4 = DSP_OFMT_YUV,
- .com3 = SWAP_RGB,
- .com7 = FMT_RGB565 | OFMT_RGB,
- },
- {
- .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .dsp3 = 0x0,
- .dsp4 = DSP_OFMT_YUV,
- .com3 = 0x0,
- .com7 = FMT_RGB565 | OFMT_RGB,
- },
- {
- /* Setting DSP4 to DSP_OFMT_RAW8 still gives 10-bit output,
- * regardless of the COM7 value. We can thus only support 10-bit
- * Bayer until someone figures it out.
- */
- .code = MEDIA_BUS_FMT_SBGGR10_1X10,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .dsp3 = 0x0,
- .dsp4 = DSP_OFMT_RAW10,
- .com3 = 0x0,
- .com7 = SENSOR_RAW | OFMT_BRAW,
- },
-};
-
-
-/*
- * window size list
- */
-
-static const struct ov772x_win_size ov772x_win_sizes[] = {
- {
- .name = "VGA",
- .com7_bit = SLCT_VGA,
- .rect = {
- .left = 140,
- .top = 14,
- .width = VGA_WIDTH,
- .height = VGA_HEIGHT,
- },
- }, {
- .name = "QVGA",
- .com7_bit = SLCT_QVGA,
- .rect = {
- .left = 252,
- .top = 6,
- .width = QVGA_WIDTH,
- .height = QVGA_HEIGHT,
- },
- },
-};
-
-/*
- * general function
- */
-
-static struct ov772x_priv *to_ov772x(struct v4l2_subdev *sd)
-{
- return container_of(sd, struct ov772x_priv, subdev);
-}
-
-static inline int ov772x_read(struct i2c_client *client, u8 addr)
-{
- return i2c_smbus_read_byte_data(client, addr);
-}
-
-static inline int ov772x_write(struct i2c_client *client, u8 addr, u8 value)
-{
- return i2c_smbus_write_byte_data(client, addr, value);
-}
-
-static int ov772x_mask_set(struct i2c_client *client, u8 command, u8 mask,
- u8 set)
-{
- s32 val = ov772x_read(client, command);
- if (val < 0)
- return val;
-
- val &= ~mask;
- val |= set & mask;
-
- return ov772x_write(client, command, val);
-}
-
-static int ov772x_reset(struct i2c_client *client)
-{
- int ret;
-
- ret = ov772x_write(client, COM7, SCCB_RESET);
- if (ret < 0)
- return ret;
-
- msleep(1);
-
- return ov772x_mask_set(client, COM2, SOFT_SLEEP_MODE, SOFT_SLEEP_MODE);
-}
-
-/*
- * soc_camera_ops function
- */
-
-static int ov772x_s_stream(struct v4l2_subdev *sd, int enable)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov772x_priv *priv = to_ov772x(sd);
-
- if (!enable) {
- ov772x_mask_set(client, COM2, SOFT_SLEEP_MODE, SOFT_SLEEP_MODE);
- return 0;
- }
-
- ov772x_mask_set(client, COM2, SOFT_SLEEP_MODE, 0);
-
- dev_dbg(&client->dev, "format %d, win %s\n",
- priv->cfmt->code, priv->win->name);
-
- return 0;
-}
-
-static int ov772x_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct ov772x_priv *priv = container_of(ctrl->handler,
- struct ov772x_priv, hdl);
- struct v4l2_subdev *sd = &priv->subdev;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret = 0;
- u8 val;
-
- switch (ctrl->id) {
- case V4L2_CID_VFLIP:
- val = ctrl->val ? VFLIP_IMG : 0x00;
- priv->flag_vflip = ctrl->val;
- if (priv->info->flags & OV772X_FLAG_VFLIP)
- val ^= VFLIP_IMG;
- return ov772x_mask_set(client, COM3, VFLIP_IMG, val);
- case V4L2_CID_HFLIP:
- val = ctrl->val ? HFLIP_IMG : 0x00;
- priv->flag_hflip = ctrl->val;
- if (priv->info->flags & OV772X_FLAG_HFLIP)
- val ^= HFLIP_IMG;
- return ov772x_mask_set(client, COM3, HFLIP_IMG, val);
- case V4L2_CID_BAND_STOP_FILTER:
- if (!ctrl->val) {
- /* Switch the filter off, it is on now */
- ret = ov772x_mask_set(client, BDBASE, 0xff, 0xff);
- if (!ret)
- ret = ov772x_mask_set(client, COM8,
- BNDF_ON_OFF, 0);
- } else {
- /* Switch the filter on, set AEC low limit */
- val = 256 - ctrl->val;
- ret = ov772x_mask_set(client, COM8,
- BNDF_ON_OFF, BNDF_ON_OFF);
- if (!ret)
- ret = ov772x_mask_set(client, BDBASE,
- 0xff, val);
- }
- if (!ret)
- priv->band_filter = ctrl->val;
- return ret;
- }
-
- return -EINVAL;
-}
-
-#ifdef CONFIG_VIDEO_ADV_DEBUG
-static int ov772x_g_register(struct v4l2_subdev *sd,
- struct v4l2_dbg_register *reg)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- reg->size = 1;
- if (reg->reg > 0xff)
- return -EINVAL;
-
- ret = ov772x_read(client, reg->reg);
- if (ret < 0)
- return ret;
-
- reg->val = (__u64)ret;
-
- return 0;
-}
-
-static int ov772x_s_register(struct v4l2_subdev *sd,
- const struct v4l2_dbg_register *reg)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
-
- if (reg->reg > 0xff ||
- reg->val > 0xff)
- return -EINVAL;
-
- return ov772x_write(client, reg->reg, reg->val);
-}
-#endif
-
-static int ov772x_s_power(struct v4l2_subdev *sd, int on)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- struct ov772x_priv *priv = to_ov772x(sd);
-
- return soc_camera_set_power(&client->dev, ssdd, priv->clk, on);
-}
-
-static const struct ov772x_win_size *ov772x_select_win(u32 width, u32 height)
-{
- const struct ov772x_win_size *win = &ov772x_win_sizes[0];
- u32 best_diff = UINT_MAX;
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(ov772x_win_sizes); ++i) {
- u32 diff = abs(width - ov772x_win_sizes[i].rect.width)
- + abs(height - ov772x_win_sizes[i].rect.height);
- if (diff < best_diff) {
- best_diff = diff;
- win = &ov772x_win_sizes[i];
- }
- }
-
- return win;
-}
-
-static void ov772x_select_params(const struct v4l2_mbus_framefmt *mf,
- const struct ov772x_color_format **cfmt,
- const struct ov772x_win_size **win)
-{
- unsigned int i;
-
- /* Select a format. */
- *cfmt = &ov772x_cfmts[0];
-
- for (i = 0; i < ARRAY_SIZE(ov772x_cfmts); i++) {
- if (mf->code == ov772x_cfmts[i].code) {
- *cfmt = &ov772x_cfmts[i];
- break;
- }
- }
-
- /* Select a window size. */
- *win = ov772x_select_win(mf->width, mf->height);
-}
-
-static int ov772x_set_params(struct ov772x_priv *priv,
- const struct ov772x_color_format *cfmt,
- const struct ov772x_win_size *win)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&priv->subdev);
- int ret;
- u8 val;
-
- /*
- * reset hardware
- */
- ov772x_reset(client);
-
- /*
- * Edge Ctrl
- */
- if (priv->info->edgectrl.strength & OV772X_MANUAL_EDGE_CTRL) {
-
- /*
- * Manual Edge Control Mode
- *
- * Edge auto strength bit is set by default.
- * Remove it when manual mode.
- */
-
- ret = ov772x_mask_set(client, DSPAUTO, EDGE_ACTRL, 0x00);
- if (ret < 0)
- goto ov772x_set_fmt_error;
-
- ret = ov772x_mask_set(client,
- EDGE_TRSHLD, OV772X_EDGE_THRESHOLD_MASK,
- priv->info->edgectrl.threshold);
- if (ret < 0)
- goto ov772x_set_fmt_error;
-
- ret = ov772x_mask_set(client,
- EDGE_STRNGT, OV772X_EDGE_STRENGTH_MASK,
- priv->info->edgectrl.strength);
- if (ret < 0)
- goto ov772x_set_fmt_error;
-
- } else if (priv->info->edgectrl.upper > priv->info->edgectrl.lower) {
- /*
- * Auto Edge Control Mode
- *
- * set upper and lower limit
- */
- ret = ov772x_mask_set(client,
- EDGE_UPPER, OV772X_EDGE_UPPER_MASK,
- priv->info->edgectrl.upper);
- if (ret < 0)
- goto ov772x_set_fmt_error;
-
- ret = ov772x_mask_set(client,
- EDGE_LOWER, OV772X_EDGE_LOWER_MASK,
- priv->info->edgectrl.lower);
- if (ret < 0)
- goto ov772x_set_fmt_error;
- }
-
- /* Format and window size */
- ret = ov772x_write(client, HSTART, win->rect.left >> 2);
- if (ret < 0)
- goto ov772x_set_fmt_error;
- ret = ov772x_write(client, HSIZE, win->rect.width >> 2);
- if (ret < 0)
- goto ov772x_set_fmt_error;
- ret = ov772x_write(client, VSTART, win->rect.top >> 1);
- if (ret < 0)
- goto ov772x_set_fmt_error;
- ret = ov772x_write(client, VSIZE, win->rect.height >> 1);
- if (ret < 0)
- goto ov772x_set_fmt_error;
- ret = ov772x_write(client, HOUTSIZE, win->rect.width >> 2);
- if (ret < 0)
- goto ov772x_set_fmt_error;
- ret = ov772x_write(client, VOUTSIZE, win->rect.height >> 1);
- if (ret < 0)
- goto ov772x_set_fmt_error;
- ret = ov772x_write(client, HREF,
- ((win->rect.top & 1) << HREF_VSTART_SHIFT) |
- ((win->rect.left & 3) << HREF_HSTART_SHIFT) |
- ((win->rect.height & 1) << HREF_VSIZE_SHIFT) |
- ((win->rect.width & 3) << HREF_HSIZE_SHIFT));
- if (ret < 0)
- goto ov772x_set_fmt_error;
- ret = ov772x_write(client, EXHCH,
- ((win->rect.height & 1) << EXHCH_VSIZE_SHIFT) |
- ((win->rect.width & 3) << EXHCH_HSIZE_SHIFT));
- if (ret < 0)
- goto ov772x_set_fmt_error;
-
- /*
- * set DSP_CTRL3
- */
- val = cfmt->dsp3;
- if (val) {
- ret = ov772x_mask_set(client,
- DSP_CTRL3, UV_MASK, val);
- if (ret < 0)
- goto ov772x_set_fmt_error;
- }
-
- /* DSP_CTRL4: AEC reference point and DSP output format. */
- if (cfmt->dsp4) {
- ret = ov772x_write(client, DSP_CTRL4, cfmt->dsp4);
- if (ret < 0)
- goto ov772x_set_fmt_error;
- }
-
- /*
- * set COM3
- */
- val = cfmt->com3;
- if (priv->info->flags & OV772X_FLAG_VFLIP)
- val |= VFLIP_IMG;
- if (priv->info->flags & OV772X_FLAG_HFLIP)
- val |= HFLIP_IMG;
- if (priv->flag_vflip)
- val ^= VFLIP_IMG;
- if (priv->flag_hflip)
- val ^= HFLIP_IMG;
-
- ret = ov772x_mask_set(client,
- COM3, SWAP_MASK | IMG_MASK, val);
- if (ret < 0)
- goto ov772x_set_fmt_error;
-
- /* COM7: Sensor resolution and output format control. */
- ret = ov772x_write(client, COM7, win->com7_bit | cfmt->com7);
- if (ret < 0)
- goto ov772x_set_fmt_error;
-
- /*
- * set COM8
- */
- if (priv->band_filter) {
- ret = ov772x_mask_set(client, COM8, BNDF_ON_OFF, BNDF_ON_OFF);
- if (!ret)
- ret = ov772x_mask_set(client, BDBASE,
- 0xff, 256 - priv->band_filter);
- if (ret < 0)
- goto ov772x_set_fmt_error;
- }
-
- return ret;
-
-ov772x_set_fmt_error:
-
- ov772x_reset(client);
-
- return ret;
-}
-
-static int ov772x_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_selection *sel)
-{
- if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
- return -EINVAL;
-
- sel->r.left = 0;
- sel->r.top = 0;
- switch (sel->target) {
- case V4L2_SEL_TGT_CROP_BOUNDS:
- sel->r.width = OV772X_MAX_WIDTH;
- sel->r.height = OV772X_MAX_HEIGHT;
- return 0;
- case V4L2_SEL_TGT_CROP:
- sel->r.width = VGA_WIDTH;
- sel->r.height = VGA_HEIGHT;
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-static int ov772x_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *format)
-{
- struct v4l2_mbus_framefmt *mf = &format->format;
- struct ov772x_priv *priv = to_ov772x(sd);
-
- if (format->pad)
- return -EINVAL;
-
- mf->width = priv->win->rect.width;
- mf->height = priv->win->rect.height;
- mf->code = priv->cfmt->code;
- mf->colorspace = priv->cfmt->colorspace;
- mf->field = V4L2_FIELD_NONE;
-
- return 0;
-}
-
-static int ov772x_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *format)
-{
- struct ov772x_priv *priv = to_ov772x(sd);
- struct v4l2_mbus_framefmt *mf = &format->format;
- const struct ov772x_color_format *cfmt;
- const struct ov772x_win_size *win;
- int ret;
-
- if (format->pad)
- return -EINVAL;
-
- ov772x_select_params(mf, &cfmt, &win);
-
- mf->code = cfmt->code;
- mf->width = win->rect.width;
- mf->height = win->rect.height;
- mf->field = V4L2_FIELD_NONE;
- mf->colorspace = cfmt->colorspace;
-
- if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- cfg->try_fmt = *mf;
- return 0;
- }
-
- ret = ov772x_set_params(priv, cfmt, win);
- if (ret < 0)
- return ret;
-
- priv->win = win;
- priv->cfmt = cfmt;
- return 0;
-}
-
-static int ov772x_video_probe(struct ov772x_priv *priv)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&priv->subdev);
- u8 pid, ver;
- const char *devname;
- int ret;
-
- ret = ov772x_s_power(&priv->subdev, 1);
- if (ret < 0)
- return ret;
-
- /*
- * check and show product ID and manufacturer ID
- */
- pid = ov772x_read(client, PID);
- ver = ov772x_read(client, VER);
-
- switch (VERSION(pid, ver)) {
- case OV7720:
- devname = "ov7720";
- break;
- case OV7725:
- devname = "ov7725";
- break;
- default:
- dev_err(&client->dev,
- "Product ID error %x:%x\n", pid, ver);
- ret = -ENODEV;
- goto done;
- }
-
- dev_info(&client->dev,
- "%s Product ID %0x:%0x Manufacturer ID %x:%x\n",
- devname,
- pid,
- ver,
- ov772x_read(client, MIDH),
- ov772x_read(client, MIDL));
- ret = v4l2_ctrl_handler_setup(&priv->hdl);
-
-done:
- ov772x_s_power(&priv->subdev, 0);
- return ret;
-}
-
-static const struct v4l2_ctrl_ops ov772x_ctrl_ops = {
- .s_ctrl = ov772x_s_ctrl,
-};
-
-static const struct v4l2_subdev_core_ops ov772x_subdev_core_ops = {
-#ifdef CONFIG_VIDEO_ADV_DEBUG
- .g_register = ov772x_g_register,
- .s_register = ov772x_s_register,
-#endif
- .s_power = ov772x_s_power,
-};
-
-static int ov772x_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- if (code->pad || code->index >= ARRAY_SIZE(ov772x_cfmts))
- return -EINVAL;
-
- code->code = ov772x_cfmts[code->index].code;
- return 0;
-}
-
-static int ov772x_g_mbus_config(struct v4l2_subdev *sd,
- struct v4l2_mbus_config *cfg)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
-
- cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_MASTER |
- V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_HIGH |
- V4L2_MBUS_DATA_ACTIVE_HIGH;
- cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
-
- return 0;
-}
-
-static const struct v4l2_subdev_video_ops ov772x_subdev_video_ops = {
- .s_stream = ov772x_s_stream,
- .g_mbus_config = ov772x_g_mbus_config,
-};
-
-static const struct v4l2_subdev_pad_ops ov772x_subdev_pad_ops = {
- .enum_mbus_code = ov772x_enum_mbus_code,
- .get_selection = ov772x_get_selection,
- .get_fmt = ov772x_get_fmt,
- .set_fmt = ov772x_set_fmt,
-};
-
-static const struct v4l2_subdev_ops ov772x_subdev_ops = {
- .core = &ov772x_subdev_core_ops,
- .video = &ov772x_subdev_video_ops,
- .pad = &ov772x_subdev_pad_ops,
-};
-
-/*
- * i2c_driver function
- */
-
-static int ov772x_probe(struct i2c_client *client,
- const struct i2c_device_id *did)
-{
- struct ov772x_priv *priv;
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
- int ret;
-
- if (!ssdd || !ssdd->drv_priv) {
- dev_err(&client->dev, "OV772X: missing platform data!\n");
- return -EINVAL;
- }
-
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_PROTOCOL_MANGLING)) {
- dev_err(&adapter->dev,
- "I2C-Adapter doesn't support SMBUS_BYTE_DATA or PROTOCOL_MANGLING\n");
- return -EIO;
- }
- client->flags |= I2C_CLIENT_SCCB;
-
- priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->info = ssdd->drv_priv;
-
- v4l2_i2c_subdev_init(&priv->subdev, client, &ov772x_subdev_ops);
- v4l2_ctrl_handler_init(&priv->hdl, 3);
- v4l2_ctrl_new_std(&priv->hdl, &ov772x_ctrl_ops,
- V4L2_CID_VFLIP, 0, 1, 1, 0);
- v4l2_ctrl_new_std(&priv->hdl, &ov772x_ctrl_ops,
- V4L2_CID_HFLIP, 0, 1, 1, 0);
- v4l2_ctrl_new_std(&priv->hdl, &ov772x_ctrl_ops,
- V4L2_CID_BAND_STOP_FILTER, 0, 256, 1, 0);
- priv->subdev.ctrl_handler = &priv->hdl;
- if (priv->hdl.error)
- return priv->hdl.error;
-
- priv->clk = v4l2_clk_get(&client->dev, "mclk");
- if (IS_ERR(priv->clk)) {
- ret = PTR_ERR(priv->clk);
- goto eclkget;
- }
-
- ret = ov772x_video_probe(priv);
- if (ret < 0) {
- v4l2_clk_put(priv->clk);
-eclkget:
- v4l2_ctrl_handler_free(&priv->hdl);
- } else {
- priv->cfmt = &ov772x_cfmts[0];
- priv->win = &ov772x_win_sizes[0];
- }
-
- return ret;
-}
-
-static int ov772x_remove(struct i2c_client *client)
-{
- struct ov772x_priv *priv = to_ov772x(i2c_get_clientdata(client));
-
- v4l2_clk_put(priv->clk);
- v4l2_device_unregister_subdev(&priv->subdev);
- v4l2_ctrl_handler_free(&priv->hdl);
- return 0;
-}
-
-static const struct i2c_device_id ov772x_id[] = {
- { "ov772x", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, ov772x_id);
-
-static struct i2c_driver ov772x_i2c_driver = {
- .driver = {
- .name = "ov772x",
- },
- .probe = ov772x_probe,
- .remove = ov772x_remove,
- .id_table = ov772x_id,
-};
-
-module_i2c_driver(ov772x_i2c_driver);
-
-MODULE_DESCRIPTION("SoC Camera driver for ov772x");
-MODULE_AUTHOR("Kuninori Morimoto");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/soc_camera/soc_rj54n1cb0c.c b/drivers/media/i2c/soc_camera/soc_rj54n1cb0c.c
deleted file mode 100644
index f0cb49a6167b..000000000000
--- a/drivers/media/i2c/soc_camera/soc_rj54n1cb0c.c
+++ /dev/null
@@ -1,1415 +0,0 @@
-/*
- * Driver for RJ54N1CB0C CMOS Image Sensor from Sharp
- *
- * Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/delay.h>
-#include <linux/i2c.h>
-#include <linux/slab.h>
-#include <linux/v4l2-mediabus.h>
-#include <linux/videodev2.h>
-#include <linux/module.h>
-
-#include <media/i2c/rj54n1cb0c.h>
-#include <media/soc_camera.h>
-#include <media/v4l2-clk.h>
-#include <media/v4l2-subdev.h>
-#include <media/v4l2-ctrls.h>
-
-#define RJ54N1_DEV_CODE 0x0400
-#define RJ54N1_DEV_CODE2 0x0401
-#define RJ54N1_OUT_SEL 0x0403
-#define RJ54N1_XY_OUTPUT_SIZE_S_H 0x0404
-#define RJ54N1_X_OUTPUT_SIZE_S_L 0x0405
-#define RJ54N1_Y_OUTPUT_SIZE_S_L 0x0406
-#define RJ54N1_XY_OUTPUT_SIZE_P_H 0x0407
-#define RJ54N1_X_OUTPUT_SIZE_P_L 0x0408
-#define RJ54N1_Y_OUTPUT_SIZE_P_L 0x0409
-#define RJ54N1_LINE_LENGTH_PCK_S_H 0x040a
-#define RJ54N1_LINE_LENGTH_PCK_S_L 0x040b
-#define RJ54N1_LINE_LENGTH_PCK_P_H 0x040c
-#define RJ54N1_LINE_LENGTH_PCK_P_L 0x040d
-#define RJ54N1_RESIZE_N 0x040e
-#define RJ54N1_RESIZE_N_STEP 0x040f
-#define RJ54N1_RESIZE_STEP 0x0410
-#define RJ54N1_RESIZE_HOLD_H 0x0411
-#define RJ54N1_RESIZE_HOLD_L 0x0412
-#define RJ54N1_H_OBEN_OFS 0x0413
-#define RJ54N1_V_OBEN_OFS 0x0414
-#define RJ54N1_RESIZE_CONTROL 0x0415
-#define RJ54N1_STILL_CONTROL 0x0417
-#define RJ54N1_INC_USE_SEL_H 0x0425
-#define RJ54N1_INC_USE_SEL_L 0x0426
-#define RJ54N1_MIRROR_STILL_MODE 0x0427
-#define RJ54N1_INIT_START 0x0428
-#define RJ54N1_SCALE_1_2_LEV 0x0429
-#define RJ54N1_SCALE_4_LEV 0x042a
-#define RJ54N1_Y_GAIN 0x04d8
-#define RJ54N1_APT_GAIN_UP 0x04fa
-#define RJ54N1_RA_SEL_UL 0x0530
-#define RJ54N1_BYTE_SWAP 0x0531
-#define RJ54N1_OUT_SIGPO 0x053b
-#define RJ54N1_WB_SEL_WEIGHT_I 0x054e
-#define RJ54N1_BIT8_WB 0x0569
-#define RJ54N1_HCAPS_WB 0x056a
-#define RJ54N1_VCAPS_WB 0x056b
-#define RJ54N1_HCAPE_WB 0x056c
-#define RJ54N1_VCAPE_WB 0x056d
-#define RJ54N1_EXPOSURE_CONTROL 0x058c
-#define RJ54N1_FRAME_LENGTH_S_H 0x0595
-#define RJ54N1_FRAME_LENGTH_S_L 0x0596
-#define RJ54N1_FRAME_LENGTH_P_H 0x0597
-#define RJ54N1_FRAME_LENGTH_P_L 0x0598
-#define RJ54N1_PEAK_H 0x05b7
-#define RJ54N1_PEAK_50 0x05b8
-#define RJ54N1_PEAK_60 0x05b9
-#define RJ54N1_PEAK_DIFF 0x05ba
-#define RJ54N1_IOC 0x05ef
-#define RJ54N1_TG_BYPASS 0x0700
-#define RJ54N1_PLL_L 0x0701
-#define RJ54N1_PLL_N 0x0702
-#define RJ54N1_PLL_EN 0x0704
-#define RJ54N1_RATIO_TG 0x0706
-#define RJ54N1_RATIO_T 0x0707
-#define RJ54N1_RATIO_R 0x0708
-#define RJ54N1_RAMP_TGCLK_EN 0x0709
-#define RJ54N1_OCLK_DSP 0x0710
-#define RJ54N1_RATIO_OP 0x0711
-#define RJ54N1_RATIO_O 0x0712
-#define RJ54N1_OCLK_SEL_EN 0x0713
-#define RJ54N1_CLK_RST 0x0717
-#define RJ54N1_RESET_STANDBY 0x0718
-#define RJ54N1_FWFLG 0x07fe
-
-#define E_EXCLK (1 << 7)
-#define SOFT_STDBY (1 << 4)
-#define SEN_RSTX (1 << 2)
-#define TG_RSTX (1 << 1)
-#define DSP_RSTX (1 << 0)
-
-#define RESIZE_HOLD_SEL (1 << 2)
-#define RESIZE_GO (1 << 1)
-
-/*
- * When cropping, the camera automatically centers the cropped region, there
- * doesn't seem to be a way to specify an explicit location of the rectangle.
- */
-#define RJ54N1_COLUMN_SKIP 0
-#define RJ54N1_ROW_SKIP 0
-#define RJ54N1_MAX_WIDTH 1600
-#define RJ54N1_MAX_HEIGHT 1200
-
-#define PLL_L 2
-#define PLL_N 0x31
-
-/* I2C addresses: 0x50, 0x51, 0x60, 0x61 */
-
-/* RJ54N1CB0C has only one fixed colorspace per pixelcode */
-struct rj54n1_datafmt {
- u32 code;
- enum v4l2_colorspace colorspace;
-};
-
-/* Find a data format by a pixel code in an array */
-static const struct rj54n1_datafmt *rj54n1_find_datafmt(
- u32 code, const struct rj54n1_datafmt *fmt,
- int n)
-{
- int i;
- for (i = 0; i < n; i++)
- if (fmt[i].code == code)
- return fmt + i;
-
- return NULL;
-}
-
-static const struct rj54n1_datafmt rj54n1_colour_fmts[] = {
- {MEDIA_BUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG},
- {MEDIA_BUS_FMT_YVYU8_2X8, V4L2_COLORSPACE_JPEG},
- {MEDIA_BUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB},
- {MEDIA_BUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_SRGB},
- {MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB},
- {MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE, V4L2_COLORSPACE_SRGB},
- {MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE, V4L2_COLORSPACE_SRGB},
- {MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE, V4L2_COLORSPACE_SRGB},
- {MEDIA_BUS_FMT_SBGGR10_1X10, V4L2_COLORSPACE_SRGB},
-};
-
-struct rj54n1_clock_div {
- u8 ratio_tg; /* can be 0 or an odd number */
- u8 ratio_t;
- u8 ratio_r;
- u8 ratio_op;
- u8 ratio_o;
-};
-
-struct rj54n1 {
- struct v4l2_subdev subdev;
- struct v4l2_ctrl_handler hdl;
- struct v4l2_clk *clk;
- struct rj54n1_clock_div clk_div;
- const struct rj54n1_datafmt *fmt;
- struct v4l2_rect rect; /* Sensor window */
- unsigned int tgclk_mhz;
- bool auto_wb;
- unsigned short width; /* Output window */
- unsigned short height;
- unsigned short resize; /* Sensor * 1024 / resize = Output */
- unsigned short scale;
- u8 bank;
-};
-
-struct rj54n1_reg_val {
- u16 reg;
- u8 val;
-};
-
-static const struct rj54n1_reg_val bank_4[] = {
- {0x417, 0},
- {0x42c, 0},
- {0x42d, 0xf0},
- {0x42e, 0},
- {0x42f, 0x50},
- {0x430, 0xf5},
- {0x431, 0x16},
- {0x432, 0x20},
- {0x433, 0},
- {0x434, 0xc8},
- {0x43c, 8},
- {0x43e, 0x90},
- {0x445, 0x83},
- {0x4ba, 0x58},
- {0x4bb, 4},
- {0x4bc, 0x20},
- {0x4db, 4},
- {0x4fe, 2},
-};
-
-static const struct rj54n1_reg_val bank_5[] = {
- {0x514, 0},
- {0x516, 0},
- {0x518, 0},
- {0x51a, 0},
- {0x51d, 0xff},
- {0x56f, 0x28},
- {0x575, 0x40},
- {0x5bc, 0x48},
- {0x5c1, 6},
- {0x5e5, 0x11},
- {0x5e6, 0x43},
- {0x5e7, 0x33},
- {0x5e8, 0x21},
- {0x5e9, 0x30},
- {0x5ea, 0x0},
- {0x5eb, 0xa5},
- {0x5ec, 0xff},
- {0x5fe, 2},
-};
-
-static const struct rj54n1_reg_val bank_7[] = {
- {0x70a, 0},
- {0x714, 0xff},
- {0x715, 0xff},
- {0x716, 0x1f},
- {0x7FE, 2},
-};
-
-static const struct rj54n1_reg_val bank_8[] = {
- {0x800, 0x00},
- {0x801, 0x01},
- {0x802, 0x61},
- {0x805, 0x00},
- {0x806, 0x00},
- {0x807, 0x00},
- {0x808, 0x00},
- {0x809, 0x01},
- {0x80A, 0x61},
- {0x80B, 0x00},
- {0x80C, 0x01},
- {0x80D, 0x00},
- {0x80E, 0x00},
- {0x80F, 0x00},
- {0x810, 0x00},
- {0x811, 0x01},
- {0x812, 0x61},
- {0x813, 0x00},
- {0x814, 0x11},
- {0x815, 0x00},
- {0x816, 0x41},
- {0x817, 0x00},
- {0x818, 0x51},
- {0x819, 0x01},
- {0x81A, 0x1F},
- {0x81B, 0x00},
- {0x81C, 0x01},
- {0x81D, 0x00},
- {0x81E, 0x11},
- {0x81F, 0x00},
- {0x820, 0x41},
- {0x821, 0x00},
- {0x822, 0x51},
- {0x823, 0x00},
- {0x824, 0x00},
- {0x825, 0x00},
- {0x826, 0x47},
- {0x827, 0x01},
- {0x828, 0x4F},
- {0x829, 0x00},
- {0x82A, 0x00},
- {0x82B, 0x00},
- {0x82C, 0x30},
- {0x82D, 0x00},
- {0x82E, 0x40},
- {0x82F, 0x00},
- {0x830, 0xB3},
- {0x831, 0x00},
- {0x832, 0xE3},
- {0x833, 0x00},
- {0x834, 0x00},
- {0x835, 0x00},
- {0x836, 0x00},
- {0x837, 0x00},
- {0x838, 0x00},
- {0x839, 0x01},
- {0x83A, 0x61},
- {0x83B, 0x00},
- {0x83C, 0x01},
- {0x83D, 0x00},
- {0x83E, 0x00},
- {0x83F, 0x00},
- {0x840, 0x00},
- {0x841, 0x01},
- {0x842, 0x61},
- {0x843, 0x00},
- {0x844, 0x1D},
- {0x845, 0x00},
- {0x846, 0x00},
- {0x847, 0x00},
- {0x848, 0x00},
- {0x849, 0x01},
- {0x84A, 0x1F},
- {0x84B, 0x00},
- {0x84C, 0x05},
- {0x84D, 0x00},
- {0x84E, 0x19},
- {0x84F, 0x01},
- {0x850, 0x21},
- {0x851, 0x01},
- {0x852, 0x5D},
- {0x853, 0x00},
- {0x854, 0x00},
- {0x855, 0x00},
- {0x856, 0x19},
- {0x857, 0x01},
- {0x858, 0x21},
- {0x859, 0x00},
- {0x85A, 0x00},
- {0x85B, 0x00},
- {0x85C, 0x00},
- {0x85D, 0x00},
- {0x85E, 0x00},
- {0x85F, 0x00},
- {0x860, 0xB3},
- {0x861, 0x00},
- {0x862, 0xE3},
- {0x863, 0x00},
- {0x864, 0x00},
- {0x865, 0x00},
- {0x866, 0x00},
- {0x867, 0x00},
- {0x868, 0x00},
- {0x869, 0xE2},
- {0x86A, 0x00},
- {0x86B, 0x01},
- {0x86C, 0x06},
- {0x86D, 0x00},
- {0x86E, 0x00},
- {0x86F, 0x00},
- {0x870, 0x60},
- {0x871, 0x8C},
- {0x872, 0x10},
- {0x873, 0x00},
- {0x874, 0xE0},
- {0x875, 0x00},
- {0x876, 0x27},
- {0x877, 0x01},
- {0x878, 0x00},
- {0x879, 0x00},
- {0x87A, 0x00},
- {0x87B, 0x03},
- {0x87C, 0x00},
- {0x87D, 0x00},
- {0x87E, 0x00},
- {0x87F, 0x00},
- {0x880, 0x00},
- {0x881, 0x00},
- {0x882, 0x00},
- {0x883, 0x00},
- {0x884, 0x00},
- {0x885, 0x00},
- {0x886, 0xF8},
- {0x887, 0x00},
- {0x888, 0x03},
- {0x889, 0x00},
- {0x88A, 0x64},
- {0x88B, 0x00},
- {0x88C, 0x03},
- {0x88D, 0x00},
- {0x88E, 0xB1},
- {0x88F, 0x00},
- {0x890, 0x03},
- {0x891, 0x01},
- {0x892, 0x1D},
- {0x893, 0x00},
- {0x894, 0x03},
- {0x895, 0x01},
- {0x896, 0x4B},
- {0x897, 0x00},
- {0x898, 0xE5},
- {0x899, 0x00},
- {0x89A, 0x01},
- {0x89B, 0x00},
- {0x89C, 0x01},
- {0x89D, 0x04},
- {0x89E, 0xC8},
- {0x89F, 0x00},
- {0x8A0, 0x01},
- {0x8A1, 0x01},
- {0x8A2, 0x61},
- {0x8A3, 0x00},
- {0x8A4, 0x01},
- {0x8A5, 0x00},
- {0x8A6, 0x00},
- {0x8A7, 0x00},
- {0x8A8, 0x00},
- {0x8A9, 0x00},
- {0x8AA, 0x7F},
- {0x8AB, 0x03},
- {0x8AC, 0x00},
- {0x8AD, 0x00},
- {0x8AE, 0x00},
- {0x8AF, 0x00},
- {0x8B0, 0x00},
- {0x8B1, 0x00},
- {0x8B6, 0x00},
- {0x8B7, 0x01},
- {0x8B8, 0x00},
- {0x8B9, 0x00},
- {0x8BA, 0x02},
- {0x8BB, 0x00},
- {0x8BC, 0xFF},
- {0x8BD, 0x00},
- {0x8FE, 2},
-};
-
-static const struct rj54n1_reg_val bank_10[] = {
- {0x10bf, 0x69}
-};
-
-/* Clock dividers - these are default register values, divider = register + 1 */
-static const struct rj54n1_clock_div clk_div = {
- .ratio_tg = 3 /* default: 5 */,
- .ratio_t = 4 /* default: 1 */,
- .ratio_r = 4 /* default: 0 */,
- .ratio_op = 1 /* default: 5 */,
- .ratio_o = 9 /* default: 0 */,
-};
-
-static struct rj54n1 *to_rj54n1(const struct i2c_client *client)
-{
- return container_of(i2c_get_clientdata(client), struct rj54n1, subdev);
-}
-
-static int reg_read(struct i2c_client *client, const u16 reg)
-{
- struct rj54n1 *rj54n1 = to_rj54n1(client);
- int ret;
-
- /* set bank */
- if (rj54n1->bank != reg >> 8) {
- dev_dbg(&client->dev, "[0x%x] = 0x%x\n", 0xff, reg >> 8);
- ret = i2c_smbus_write_byte_data(client, 0xff, reg >> 8);
- if (ret < 0)
- return ret;
- rj54n1->bank = reg >> 8;
- }
- return i2c_smbus_read_byte_data(client, reg & 0xff);
-}
-
-static int reg_write(struct i2c_client *client, const u16 reg,
- const u8 data)
-{
- struct rj54n1 *rj54n1 = to_rj54n1(client);
- int ret;
-
- /* set bank */
- if (rj54n1->bank != reg >> 8) {
- dev_dbg(&client->dev, "[0x%x] = 0x%x\n", 0xff, reg >> 8);
- ret = i2c_smbus_write_byte_data(client, 0xff, reg >> 8);
- if (ret < 0)
- return ret;
- rj54n1->bank = reg >> 8;
- }
- dev_dbg(&client->dev, "[0x%x] = 0x%x\n", reg & 0xff, data);
- return i2c_smbus_write_byte_data(client, reg & 0xff, data);
-}
-
-static int reg_set(struct i2c_client *client, const u16 reg,
- const u8 data, const u8 mask)
-{
- int ret;
-
- ret = reg_read(client, reg);
- if (ret < 0)
- return ret;
- return reg_write(client, reg, (ret & ~mask) | (data & mask));
-}
-
-static int reg_write_multiple(struct i2c_client *client,
- const struct rj54n1_reg_val *rv, const int n)
-{
- int i, ret;
-
- for (i = 0; i < n; i++) {
- ret = reg_write(client, rv->reg, rv->val);
- if (ret < 0)
- return ret;
- rv++;
- }
-
- return 0;
-}
-
-static int rj54n1_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- if (code->pad || code->index >= ARRAY_SIZE(rj54n1_colour_fmts))
- return -EINVAL;
-
- code->code = rj54n1_colour_fmts[code->index].code;
- return 0;
-}
-
-static int rj54n1_s_stream(struct v4l2_subdev *sd, int enable)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
-
- /* Switch between preview and still shot modes */
- return reg_set(client, RJ54N1_STILL_CONTROL, (!enable) << 7, 0x80);
-}
-
-static int rj54n1_set_rect(struct i2c_client *client,
- u16 reg_x, u16 reg_y, u16 reg_xy,
- u32 width, u32 height)
-{
- int ret;
-
- ret = reg_write(client, reg_xy,
- ((width >> 4) & 0x70) |
- ((height >> 8) & 7));
-
- if (!ret)
- ret = reg_write(client, reg_x, width & 0xff);
- if (!ret)
- ret = reg_write(client, reg_y, height & 0xff);
-
- return ret;
-}
-
-/*
- * Some commands, specifically certain initialisation sequences, require
- * a commit operation.
- */
-static int rj54n1_commit(struct i2c_client *client)
-{
- int ret = reg_write(client, RJ54N1_INIT_START, 1);
- msleep(10);
- if (!ret)
- ret = reg_write(client, RJ54N1_INIT_START, 0);
- return ret;
-}
-
-static int rj54n1_sensor_scale(struct v4l2_subdev *sd, s32 *in_w, s32 *in_h,
- s32 *out_w, s32 *out_h);
-
-static int rj54n1_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_selection *sel)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct rj54n1 *rj54n1 = to_rj54n1(client);
- const struct v4l2_rect *rect = &sel->r;
- int dummy = 0, output_w, output_h,
- input_w = rect->width, input_h = rect->height;
- int ret;
-
- if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
- sel->target != V4L2_SEL_TGT_CROP)
- return -EINVAL;
-
- /* arbitrary minimum width and height, edges unimportant */
- soc_camera_limit_side(&dummy, &input_w,
- RJ54N1_COLUMN_SKIP, 8, RJ54N1_MAX_WIDTH);
-
- soc_camera_limit_side(&dummy, &input_h,
- RJ54N1_ROW_SKIP, 8, RJ54N1_MAX_HEIGHT);
-
- output_w = (input_w * 1024 + rj54n1->resize / 2) / rj54n1->resize;
- output_h = (input_h * 1024 + rj54n1->resize / 2) / rj54n1->resize;
-
- dev_dbg(&client->dev, "Scaling for %dx%d : %u = %dx%d\n",
- input_w, input_h, rj54n1->resize, output_w, output_h);
-
- ret = rj54n1_sensor_scale(sd, &input_w, &input_h, &output_w, &output_h);
- if (ret < 0)
- return ret;
-
- rj54n1->width = output_w;
- rj54n1->height = output_h;
- rj54n1->resize = ret;
- rj54n1->rect.width = input_w;
- rj54n1->rect.height = input_h;
-
- return 0;
-}
-
-static int rj54n1_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_selection *sel)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct rj54n1 *rj54n1 = to_rj54n1(client);
-
- if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
- return -EINVAL;
-
- switch (sel->target) {
- case V4L2_SEL_TGT_CROP_BOUNDS:
- sel->r.left = RJ54N1_COLUMN_SKIP;
- sel->r.top = RJ54N1_ROW_SKIP;
- sel->r.width = RJ54N1_MAX_WIDTH;
- sel->r.height = RJ54N1_MAX_HEIGHT;
- return 0;
- case V4L2_SEL_TGT_CROP:
- sel->r = rj54n1->rect;
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-static int rj54n1_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *format)
-{
- struct v4l2_mbus_framefmt *mf = &format->format;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct rj54n1 *rj54n1 = to_rj54n1(client);
-
- if (format->pad)
- return -EINVAL;
-
- mf->code = rj54n1->fmt->code;
- mf->colorspace = rj54n1->fmt->colorspace;
- mf->field = V4L2_FIELD_NONE;
- mf->width = rj54n1->width;
- mf->height = rj54n1->height;
-
- return 0;
-}
-
-/*
- * The actual geometry configuration routine. It scales the input window into
- * the output one, updates the window sizes and returns an error or the resize
- * coefficient on success. Note: we only use the "Fixed Scaling" on this camera.
- */
-static int rj54n1_sensor_scale(struct v4l2_subdev *sd, s32 *in_w, s32 *in_h,
- s32 *out_w, s32 *out_h)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct rj54n1 *rj54n1 = to_rj54n1(client);
- unsigned int skip, resize, input_w = *in_w, input_h = *in_h,
- output_w = *out_w, output_h = *out_h;
- u16 inc_sel, wb_bit8, wb_left, wb_right, wb_top, wb_bottom;
- unsigned int peak, peak_50, peak_60;
- int ret;
-
- /*
- * We have a problem with crops, where the window is larger than 512x384
- * and output window is larger than a half of the input one. In this
- * case we have to either reduce the input window to equal or below
- * 512x384 or the output window to equal or below 1/2 of the input.
- */
- if (output_w > max(512U, input_w / 2)) {
- if (2 * output_w > RJ54N1_MAX_WIDTH) {
- input_w = RJ54N1_MAX_WIDTH;
- output_w = RJ54N1_MAX_WIDTH / 2;
- } else {
- input_w = output_w * 2;
- }
-
- dev_dbg(&client->dev, "Adjusted output width: in %u, out %u\n",
- input_w, output_w);
- }
-
- if (output_h > max(384U, input_h / 2)) {
- if (2 * output_h > RJ54N1_MAX_HEIGHT) {
- input_h = RJ54N1_MAX_HEIGHT;
- output_h = RJ54N1_MAX_HEIGHT / 2;
- } else {
- input_h = output_h * 2;
- }
-
- dev_dbg(&client->dev, "Adjusted output height: in %u, out %u\n",
- input_h, output_h);
- }
-
- /* Idea: use the read mode for snapshots, handle separate geometries */
- ret = rj54n1_set_rect(client, RJ54N1_X_OUTPUT_SIZE_S_L,
- RJ54N1_Y_OUTPUT_SIZE_S_L,
- RJ54N1_XY_OUTPUT_SIZE_S_H, output_w, output_h);
- if (!ret)
- ret = rj54n1_set_rect(client, RJ54N1_X_OUTPUT_SIZE_P_L,
- RJ54N1_Y_OUTPUT_SIZE_P_L,
- RJ54N1_XY_OUTPUT_SIZE_P_H, output_w, output_h);
-
- if (ret < 0)
- return ret;
-
- if (output_w > input_w && output_h > input_h) {
- input_w = output_w;
- input_h = output_h;
-
- resize = 1024;
- } else {
- unsigned int resize_x, resize_y;
- resize_x = (input_w * 1024 + output_w / 2) / output_w;
- resize_y = (input_h * 1024 + output_h / 2) / output_h;
-
- /* We want max(resize_x, resize_y), check if it still fits */
- if (resize_x > resize_y &&
- (output_h * resize_x + 512) / 1024 > RJ54N1_MAX_HEIGHT)
- resize = (RJ54N1_MAX_HEIGHT * 1024 + output_h / 2) /
- output_h;
- else if (resize_y > resize_x &&
- (output_w * resize_y + 512) / 1024 > RJ54N1_MAX_WIDTH)
- resize = (RJ54N1_MAX_WIDTH * 1024 + output_w / 2) /
- output_w;
- else
- resize = max(resize_x, resize_y);
-
- /* Prohibited value ranges */
- switch (resize) {
- case 2040 ... 2047:
- resize = 2039;
- break;
- case 4080 ... 4095:
- resize = 4079;
- break;
- case 8160 ... 8191:
- resize = 8159;
- break;
- case 16320 ... 16384:
- resize = 16319;
- }
- }
-
- /* Set scaling */
- ret = reg_write(client, RJ54N1_RESIZE_HOLD_L, resize & 0xff);
- if (!ret)
- ret = reg_write(client, RJ54N1_RESIZE_HOLD_H, resize >> 8);
-
- if (ret < 0)
- return ret;
-
- /*
- * Configure a skipping bitmask. The sensor will select a skipping value
- * among set bits automatically. This is very unclear in the datasheet
- * too. I was told, in this register one enables all skipping values,
- * that are required for a specific resize, and the camera selects
- * automatically, which ones to use. But it is unclear how to identify,
- * which cropping values are needed. Secondly, why don't we just set all
- * bits and let the camera choose? Would it increase processing time and
- * reduce the framerate? Using 0xfffc for INC_USE_SEL doesn't seem to
- * improve the image quality or stability for larger frames (see comment
- * above), but I didn't check the framerate.
- */
- skip = min(resize / 1024, 15U);
-
- inc_sel = 1 << skip;
-
- if (inc_sel <= 2)
- inc_sel = 0xc;
- else if (resize & 1023 && skip < 15)
- inc_sel |= 1 << (skip + 1);
-
- ret = reg_write(client, RJ54N1_INC_USE_SEL_L, inc_sel & 0xfc);
- if (!ret)
- ret = reg_write(client, RJ54N1_INC_USE_SEL_H, inc_sel >> 8);
-
- if (!rj54n1->auto_wb) {
- /* Auto white balance window */
- wb_left = output_w / 16;
- wb_right = (3 * output_w / 4 - 3) / 4;
- wb_top = output_h / 16;
- wb_bottom = (3 * output_h / 4 - 3) / 4;
- wb_bit8 = ((wb_left >> 2) & 0x40) | ((wb_top >> 4) & 0x10) |
- ((wb_right >> 6) & 4) | ((wb_bottom >> 8) & 1);
-
- if (!ret)
- ret = reg_write(client, RJ54N1_BIT8_WB, wb_bit8);
- if (!ret)
- ret = reg_write(client, RJ54N1_HCAPS_WB, wb_left);
- if (!ret)
- ret = reg_write(client, RJ54N1_VCAPS_WB, wb_top);
- if (!ret)
- ret = reg_write(client, RJ54N1_HCAPE_WB, wb_right);
- if (!ret)
- ret = reg_write(client, RJ54N1_VCAPE_WB, wb_bottom);
- }
-
- /* Antiflicker */
- peak = 12 * RJ54N1_MAX_WIDTH * (1 << 14) * resize / rj54n1->tgclk_mhz /
- 10000;
- peak_50 = peak / 6;
- peak_60 = peak / 5;
-
- if (!ret)
- ret = reg_write(client, RJ54N1_PEAK_H,
- ((peak_50 >> 4) & 0xf0) | (peak_60 >> 8));
- if (!ret)
- ret = reg_write(client, RJ54N1_PEAK_50, peak_50);
- if (!ret)
- ret = reg_write(client, RJ54N1_PEAK_60, peak_60);
- if (!ret)
- ret = reg_write(client, RJ54N1_PEAK_DIFF, peak / 150);
-
- /* Start resizing */
- if (!ret)
- ret = reg_write(client, RJ54N1_RESIZE_CONTROL,
- RESIZE_HOLD_SEL | RESIZE_GO | 1);
-
- if (ret < 0)
- return ret;
-
- /* Constant taken from manufacturer's example */
- msleep(230);
-
- ret = reg_write(client, RJ54N1_RESIZE_CONTROL, RESIZE_HOLD_SEL | 1);
- if (ret < 0)
- return ret;
-
- *in_w = (output_w * resize + 512) / 1024;
- *in_h = (output_h * resize + 512) / 1024;
- *out_w = output_w;
- *out_h = output_h;
-
- dev_dbg(&client->dev, "Scaled for %dx%d : %u = %ux%u, skip %u\n",
- *in_w, *in_h, resize, output_w, output_h, skip);
-
- return resize;
-}
-
-static int rj54n1_set_clock(struct i2c_client *client)
-{
- struct rj54n1 *rj54n1 = to_rj54n1(client);
- int ret;
-
- /* Enable external clock */
- ret = reg_write(client, RJ54N1_RESET_STANDBY, E_EXCLK | SOFT_STDBY);
- /* Leave stand-by. Note: use this when implementing suspend / resume */
- if (!ret)
- ret = reg_write(client, RJ54N1_RESET_STANDBY, E_EXCLK);
-
- if (!ret)
- ret = reg_write(client, RJ54N1_PLL_L, PLL_L);
- if (!ret)
- ret = reg_write(client, RJ54N1_PLL_N, PLL_N);
-
- /* TGCLK dividers */
- if (!ret)
- ret = reg_write(client, RJ54N1_RATIO_TG,
- rj54n1->clk_div.ratio_tg);
- if (!ret)
- ret = reg_write(client, RJ54N1_RATIO_T,
- rj54n1->clk_div.ratio_t);
- if (!ret)
- ret = reg_write(client, RJ54N1_RATIO_R,
- rj54n1->clk_div.ratio_r);
-
- /* Enable TGCLK & RAMP */
- if (!ret)
- ret = reg_write(client, RJ54N1_RAMP_TGCLK_EN, 3);
-
- /* Disable clock output */
- if (!ret)
- ret = reg_write(client, RJ54N1_OCLK_DSP, 0);
-
- /* Set divisors */
- if (!ret)
- ret = reg_write(client, RJ54N1_RATIO_OP,
- rj54n1->clk_div.ratio_op);
- if (!ret)
- ret = reg_write(client, RJ54N1_RATIO_O,
- rj54n1->clk_div.ratio_o);
-
- /* Enable OCLK */
- if (!ret)
- ret = reg_write(client, RJ54N1_OCLK_SEL_EN, 1);
-
- /* Use PLL for Timing Generator, write 2 to reserved bits */
- if (!ret)
- ret = reg_write(client, RJ54N1_TG_BYPASS, 2);
-
- /* Take sensor out of reset */
- if (!ret)
- ret = reg_write(client, RJ54N1_RESET_STANDBY,
- E_EXCLK | SEN_RSTX);
- /* Enable PLL */
- if (!ret)
- ret = reg_write(client, RJ54N1_PLL_EN, 1);
-
- /* Wait for PLL to stabilise */
- msleep(10);
-
- /* Enable clock to frequency divider */
- if (!ret)
- ret = reg_write(client, RJ54N1_CLK_RST, 1);
-
- if (!ret)
- ret = reg_read(client, RJ54N1_CLK_RST);
- if (ret != 1) {
- dev_err(&client->dev,
- "Resetting RJ54N1CB0C clock failed: %d!\n", ret);
- return -EIO;
- }
-
- /* Start the PLL */
- ret = reg_set(client, RJ54N1_OCLK_DSP, 1, 1);
-
- /* Enable OCLK */
- if (!ret)
- ret = reg_write(client, RJ54N1_OCLK_SEL_EN, 1);
-
- return ret;
-}
-
-static int rj54n1_reg_init(struct i2c_client *client)
-{
- struct rj54n1 *rj54n1 = to_rj54n1(client);
- int ret = rj54n1_set_clock(client);
-
- if (!ret)
- ret = reg_write_multiple(client, bank_7, ARRAY_SIZE(bank_7));
- if (!ret)
- ret = reg_write_multiple(client, bank_10, ARRAY_SIZE(bank_10));
-
- /* Set binning divisors */
- if (!ret)
- ret = reg_write(client, RJ54N1_SCALE_1_2_LEV, 3 | (7 << 4));
- if (!ret)
- ret = reg_write(client, RJ54N1_SCALE_4_LEV, 0xf);
-
- /* Switch to fixed resize mode */
- if (!ret)
- ret = reg_write(client, RJ54N1_RESIZE_CONTROL,
- RESIZE_HOLD_SEL | 1);
-
- /* Set gain */
- if (!ret)
- ret = reg_write(client, RJ54N1_Y_GAIN, 0x84);
-
- /*
- * Mirror the image back: default is upside down and left-to-right...
- * Set manual preview / still shot switching
- */
- if (!ret)
- ret = reg_write(client, RJ54N1_MIRROR_STILL_MODE, 0x27);
-
- if (!ret)
- ret = reg_write_multiple(client, bank_4, ARRAY_SIZE(bank_4));
-
- /* Auto exposure area */
- if (!ret)
- ret = reg_write(client, RJ54N1_EXPOSURE_CONTROL, 0x80);
- /* Check current auto WB config */
- if (!ret)
- ret = reg_read(client, RJ54N1_WB_SEL_WEIGHT_I);
- if (ret >= 0) {
- rj54n1->auto_wb = ret & 0x80;
- ret = reg_write_multiple(client, bank_5, ARRAY_SIZE(bank_5));
- }
- if (!ret)
- ret = reg_write_multiple(client, bank_8, ARRAY_SIZE(bank_8));
-
- if (!ret)
- ret = reg_write(client, RJ54N1_RESET_STANDBY,
- E_EXCLK | DSP_RSTX | SEN_RSTX);
-
- /* Commit init */
- if (!ret)
- ret = rj54n1_commit(client);
-
- /* Take DSP, TG, sensor out of reset */
- if (!ret)
- ret = reg_write(client, RJ54N1_RESET_STANDBY,
- E_EXCLK | DSP_RSTX | TG_RSTX | SEN_RSTX);
-
- /* Start register update? Same register as 0x?FE in many bank_* sets */
- if (!ret)
- ret = reg_write(client, RJ54N1_FWFLG, 2);
-
- /* Constant taken from manufacturer's example */
- msleep(700);
-
- return ret;
-}
-
-static int rj54n1_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *format)
-{
- struct v4l2_mbus_framefmt *mf = &format->format;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct rj54n1 *rj54n1 = to_rj54n1(client);
- const struct rj54n1_datafmt *fmt;
- int output_w, output_h, max_w, max_h,
- input_w = rj54n1->rect.width, input_h = rj54n1->rect.height;
- int align = mf->code == MEDIA_BUS_FMT_SBGGR10_1X10 ||
- mf->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE ||
- mf->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE ||
- mf->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE ||
- mf->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE;
- int ret;
-
- if (format->pad)
- return -EINVAL;
-
- dev_dbg(&client->dev, "%s: code = %d, width = %u, height = %u\n",
- __func__, mf->code, mf->width, mf->height);
-
- fmt = rj54n1_find_datafmt(mf->code, rj54n1_colour_fmts,
- ARRAY_SIZE(rj54n1_colour_fmts));
- if (!fmt) {
- fmt = rj54n1->fmt;
- mf->code = fmt->code;
- }
-
- mf->field = V4L2_FIELD_NONE;
- mf->colorspace = fmt->colorspace;
-
- v4l_bound_align_image(&mf->width, 112, RJ54N1_MAX_WIDTH, align,
- &mf->height, 84, RJ54N1_MAX_HEIGHT, align, 0);
-
- if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- cfg->try_fmt = *mf;
- return 0;
- }
-
- /*
- * Verify if the sensor has just been powered on. TODO: replace this
- * with proper PM, when a suitable API is available.
- */
- ret = reg_read(client, RJ54N1_RESET_STANDBY);
- if (ret < 0)
- return ret;
-
- if (!(ret & E_EXCLK)) {
- ret = rj54n1_reg_init(client);
- if (ret < 0)
- return ret;
- }
-
- /* RA_SEL_UL is only relevant for raw modes, ignored otherwise. */
- switch (mf->code) {
- case MEDIA_BUS_FMT_YUYV8_2X8:
- ret = reg_write(client, RJ54N1_OUT_SEL, 0);
- if (!ret)
- ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8);
- break;
- case MEDIA_BUS_FMT_YVYU8_2X8:
- ret = reg_write(client, RJ54N1_OUT_SEL, 0);
- if (!ret)
- ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8);
- break;
- case MEDIA_BUS_FMT_RGB565_2X8_LE:
- ret = reg_write(client, RJ54N1_OUT_SEL, 0x11);
- if (!ret)
- ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8);
- break;
- case MEDIA_BUS_FMT_RGB565_2X8_BE:
- ret = reg_write(client, RJ54N1_OUT_SEL, 0x11);
- if (!ret)
- ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8);
- break;
- case MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE:
- ret = reg_write(client, RJ54N1_OUT_SEL, 4);
- if (!ret)
- ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8);
- if (!ret)
- ret = reg_write(client, RJ54N1_RA_SEL_UL, 0);
- break;
- case MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE:
- ret = reg_write(client, RJ54N1_OUT_SEL, 4);
- if (!ret)
- ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8);
- if (!ret)
- ret = reg_write(client, RJ54N1_RA_SEL_UL, 8);
- break;
- case MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE:
- ret = reg_write(client, RJ54N1_OUT_SEL, 4);
- if (!ret)
- ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8);
- if (!ret)
- ret = reg_write(client, RJ54N1_RA_SEL_UL, 0);
- break;
- case MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE:
- ret = reg_write(client, RJ54N1_OUT_SEL, 4);
- if (!ret)
- ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8);
- if (!ret)
- ret = reg_write(client, RJ54N1_RA_SEL_UL, 8);
- break;
- case MEDIA_BUS_FMT_SBGGR10_1X10:
- ret = reg_write(client, RJ54N1_OUT_SEL, 5);
- break;
- default:
- ret = -EINVAL;
- }
-
- /* Special case: a raw mode with 10 bits of data per clock tick */
- if (!ret)
- ret = reg_set(client, RJ54N1_OCLK_SEL_EN,
- (mf->code == MEDIA_BUS_FMT_SBGGR10_1X10) << 1, 2);
-
- if (ret < 0)
- return ret;
-
- /* Supported scales 1:1 >= scale > 1:16 */
- max_w = mf->width * (16 * 1024 - 1) / 1024;
- if (input_w > max_w)
- input_w = max_w;
- max_h = mf->height * (16 * 1024 - 1) / 1024;
- if (input_h > max_h)
- input_h = max_h;
-
- output_w = mf->width;
- output_h = mf->height;
-
- ret = rj54n1_sensor_scale(sd, &input_w, &input_h, &output_w, &output_h);
- if (ret < 0)
- return ret;
-
- fmt = rj54n1_find_datafmt(mf->code, rj54n1_colour_fmts,
- ARRAY_SIZE(rj54n1_colour_fmts));
-
- rj54n1->fmt = fmt;
- rj54n1->resize = ret;
- rj54n1->rect.width = input_w;
- rj54n1->rect.height = input_h;
- rj54n1->width = output_w;
- rj54n1->height = output_h;
-
- mf->width = output_w;
- mf->height = output_h;
- mf->field = V4L2_FIELD_NONE;
- mf->colorspace = fmt->colorspace;
-
- return 0;
-}
-
-#ifdef CONFIG_VIDEO_ADV_DEBUG
-static int rj54n1_g_register(struct v4l2_subdev *sd,
- struct v4l2_dbg_register *reg)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
-
- if (reg->reg < 0x400 || reg->reg > 0x1fff)
- /* Registers > 0x0800 are only available from Sharp support */
- return -EINVAL;
-
- reg->size = 1;
- reg->val = reg_read(client, reg->reg);
-
- if (reg->val > 0xff)
- return -EIO;
-
- return 0;
-}
-
-static int rj54n1_s_register(struct v4l2_subdev *sd,
- const struct v4l2_dbg_register *reg)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
-
- if (reg->reg < 0x400 || reg->reg > 0x1fff)
- /* Registers >= 0x0800 are only available from Sharp support */
- return -EINVAL;
-
- if (reg_write(client, reg->reg, reg->val) < 0)
- return -EIO;
-
- return 0;
-}
-#endif
-
-static int rj54n1_s_power(struct v4l2_subdev *sd, int on)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- struct rj54n1 *rj54n1 = to_rj54n1(client);
-
- return soc_camera_set_power(&client->dev, ssdd, rj54n1->clk, on);
-}
-
-static int rj54n1_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct rj54n1 *rj54n1 = container_of(ctrl->handler, struct rj54n1, hdl);
- struct v4l2_subdev *sd = &rj54n1->subdev;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int data;
-
- switch (ctrl->id) {
- case V4L2_CID_VFLIP:
- if (ctrl->val)
- data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 0, 1);
- else
- data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 1, 1);
- if (data < 0)
- return -EIO;
- return 0;
- case V4L2_CID_HFLIP:
- if (ctrl->val)
- data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 0, 2);
- else
- data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 2, 2);
- if (data < 0)
- return -EIO;
- return 0;
- case V4L2_CID_GAIN:
- if (reg_write(client, RJ54N1_Y_GAIN, ctrl->val * 2) < 0)
- return -EIO;
- return 0;
- case V4L2_CID_AUTO_WHITE_BALANCE:
- /* Auto WB area - whole image */
- if (reg_set(client, RJ54N1_WB_SEL_WEIGHT_I, ctrl->val << 7,
- 0x80) < 0)
- return -EIO;
- rj54n1->auto_wb = ctrl->val;
- return 0;
- }
-
- return -EINVAL;
-}
-
-static const struct v4l2_ctrl_ops rj54n1_ctrl_ops = {
- .s_ctrl = rj54n1_s_ctrl,
-};
-
-static const struct v4l2_subdev_core_ops rj54n1_subdev_core_ops = {
-#ifdef CONFIG_VIDEO_ADV_DEBUG
- .g_register = rj54n1_g_register,
- .s_register = rj54n1_s_register,
-#endif
- .s_power = rj54n1_s_power,
-};
-
-static int rj54n1_g_mbus_config(struct v4l2_subdev *sd,
- struct v4l2_mbus_config *cfg)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
-
- cfg->flags =
- V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING |
- V4L2_MBUS_MASTER | V4L2_MBUS_DATA_ACTIVE_HIGH |
- V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH;
- cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
-
- return 0;
-}
-
-static int rj54n1_s_mbus_config(struct v4l2_subdev *sd,
- const struct v4l2_mbus_config *cfg)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
-
- /* Figures 2.5-1 to 2.5-3 - default falling pixclk edge */
- if (soc_camera_apply_board_flags(ssdd, cfg) &
- V4L2_MBUS_PCLK_SAMPLE_RISING)
- return reg_write(client, RJ54N1_OUT_SIGPO, 1 << 4);
- else
- return reg_write(client, RJ54N1_OUT_SIGPO, 0);
-}
-
-static const struct v4l2_subdev_video_ops rj54n1_subdev_video_ops = {
- .s_stream = rj54n1_s_stream,
- .g_mbus_config = rj54n1_g_mbus_config,
- .s_mbus_config = rj54n1_s_mbus_config,
-};
-
-static const struct v4l2_subdev_pad_ops rj54n1_subdev_pad_ops = {
- .enum_mbus_code = rj54n1_enum_mbus_code,
- .get_selection = rj54n1_get_selection,
- .set_selection = rj54n1_set_selection,
- .get_fmt = rj54n1_get_fmt,
- .set_fmt = rj54n1_set_fmt,
-};
-
-static const struct v4l2_subdev_ops rj54n1_subdev_ops = {
- .core = &rj54n1_subdev_core_ops,
- .video = &rj54n1_subdev_video_ops,
- .pad = &rj54n1_subdev_pad_ops,
-};
-
-/*
- * Interface active, can use i2c. If it fails, it can indeed mean, that
- * this wasn't our capture interface, so, we wait for the right one
- */
-static int rj54n1_video_probe(struct i2c_client *client,
- struct rj54n1_pdata *priv)
-{
- struct rj54n1 *rj54n1 = to_rj54n1(client);
- int data1, data2;
- int ret;
-
- ret = rj54n1_s_power(&rj54n1->subdev, 1);
- if (ret < 0)
- return ret;
-
- /* Read out the chip version register */
- data1 = reg_read(client, RJ54N1_DEV_CODE);
- data2 = reg_read(client, RJ54N1_DEV_CODE2);
-
- if (data1 != 0x51 || data2 != 0x10) {
- ret = -ENODEV;
- dev_info(&client->dev, "No RJ54N1CB0C found, read 0x%x:0x%x\n",
- data1, data2);
- goto done;
- }
-
- /* Configure IOCTL polarity from the platform data: 0 or 1 << 7. */
- ret = reg_write(client, RJ54N1_IOC, priv->ioctl_high << 7);
- if (ret < 0)
- goto done;
-
- dev_info(&client->dev, "Detected a RJ54N1CB0C chip ID 0x%x:0x%x\n",
- data1, data2);
-
- ret = v4l2_ctrl_handler_setup(&rj54n1->hdl);
-
-done:
- rj54n1_s_power(&rj54n1->subdev, 0);
- return ret;
-}
-
-static int rj54n1_probe(struct i2c_client *client,
- const struct i2c_device_id *did)
-{
- struct rj54n1 *rj54n1;
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
- struct rj54n1_pdata *rj54n1_priv;
- int ret;
-
- if (!ssdd || !ssdd->drv_priv) {
- dev_err(&client->dev, "RJ54N1CB0C: missing platform data!\n");
- return -EINVAL;
- }
-
- rj54n1_priv = ssdd->drv_priv;
-
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
- dev_warn(&adapter->dev,
- "I2C-Adapter doesn't support I2C_FUNC_SMBUS_BYTE\n");
- return -EIO;
- }
-
- rj54n1 = devm_kzalloc(&client->dev, sizeof(struct rj54n1), GFP_KERNEL);
- if (!rj54n1)
- return -ENOMEM;
-
- v4l2_i2c_subdev_init(&rj54n1->subdev, client, &rj54n1_subdev_ops);
- v4l2_ctrl_handler_init(&rj54n1->hdl, 4);
- v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops,
- V4L2_CID_VFLIP, 0, 1, 1, 0);
- v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops,
- V4L2_CID_HFLIP, 0, 1, 1, 0);
- v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops,
- V4L2_CID_GAIN, 0, 127, 1, 66);
- v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops,
- V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1);
- rj54n1->subdev.ctrl_handler = &rj54n1->hdl;
- if (rj54n1->hdl.error)
- return rj54n1->hdl.error;
-
- rj54n1->clk_div = clk_div;
- rj54n1->rect.left = RJ54N1_COLUMN_SKIP;
- rj54n1->rect.top = RJ54N1_ROW_SKIP;
- rj54n1->rect.width = RJ54N1_MAX_WIDTH;
- rj54n1->rect.height = RJ54N1_MAX_HEIGHT;
- rj54n1->width = RJ54N1_MAX_WIDTH;
- rj54n1->height = RJ54N1_MAX_HEIGHT;
- rj54n1->fmt = &rj54n1_colour_fmts[0];
- rj54n1->resize = 1024;
- rj54n1->tgclk_mhz = (rj54n1_priv->mclk_freq / PLL_L * PLL_N) /
- (clk_div.ratio_tg + 1) / (clk_div.ratio_t + 1);
-
- rj54n1->clk = v4l2_clk_get(&client->dev, "mclk");
- if (IS_ERR(rj54n1->clk)) {
- ret = PTR_ERR(rj54n1->clk);
- goto eclkget;
- }
-
- ret = rj54n1_video_probe(client, rj54n1_priv);
- if (ret < 0) {
- v4l2_clk_put(rj54n1->clk);
-eclkget:
- v4l2_ctrl_handler_free(&rj54n1->hdl);
- }
-
- return ret;
-}
-
-static int rj54n1_remove(struct i2c_client *client)
-{
- struct rj54n1 *rj54n1 = to_rj54n1(client);
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
-
- v4l2_clk_put(rj54n1->clk);
- v4l2_device_unregister_subdev(&rj54n1->subdev);
- if (ssdd->free_bus)
- ssdd->free_bus(ssdd);
- v4l2_ctrl_handler_free(&rj54n1->hdl);
-
- return 0;
-}
-
-static const struct i2c_device_id rj54n1_id[] = {
- { "rj54n1cb0c", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, rj54n1_id);
-
-static struct i2c_driver rj54n1_i2c_driver = {
- .driver = {
- .name = "rj54n1cb0c",
- },
- .probe = rj54n1_probe,
- .remove = rj54n1_remove,
- .id_table = rj54n1_id,
-};
-
-module_i2c_driver(rj54n1_i2c_driver);
-
-MODULE_DESCRIPTION("Sharp RJ54N1CB0C Camera driver");
-MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/soc_camera/soc_tw9910.c b/drivers/media/i2c/soc_camera/soc_tw9910.c
deleted file mode 100644
index bdb5e0a431e9..000000000000
--- a/drivers/media/i2c/soc_camera/soc_tw9910.c
+++ /dev/null
@@ -1,999 +0,0 @@
-/*
- * tw9910 Video Driver
- *
- * Copyright (C) 2008 Renesas Solutions Corp.
- * Kuninori Morimoto <morimoto.kuninori@renesas.com>
- *
- * Based on ov772x driver,
- *
- * Copyright (C) 2008 Kuninori Morimoto <morimoto.kuninori@renesas.com>
- * Copyright 2006-7 Jonathan Corbet <corbet@lwn.net>
- * Copyright (C) 2008 Magnus Damm
- * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/i2c.h>
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/v4l2-mediabus.h>
-#include <linux/videodev2.h>
-
-#include <media/soc_camera.h>
-#include <media/i2c/tw9910.h>
-#include <media/v4l2-clk.h>
-#include <media/v4l2-subdev.h>
-
-#define GET_ID(val) ((val & 0xF8) >> 3)
-#define GET_REV(val) (val & 0x07)
-
-/*
- * register offset
- */
-#define ID 0x00 /* Product ID Code Register */
-#define STATUS1 0x01 /* Chip Status Register I */
-#define INFORM 0x02 /* Input Format */
-#define OPFORM 0x03 /* Output Format Control Register */
-#define DLYCTR 0x04 /* Hysteresis and HSYNC Delay Control */
-#define OUTCTR1 0x05 /* Output Control I */
-#define ACNTL1 0x06 /* Analog Control Register 1 */
-#define CROP_HI 0x07 /* Cropping Register, High */
-#define VDELAY_LO 0x08 /* Vertical Delay Register, Low */
-#define VACTIVE_LO 0x09 /* Vertical Active Register, Low */
-#define HDELAY_LO 0x0A /* Horizontal Delay Register, Low */
-#define HACTIVE_LO 0x0B /* Horizontal Active Register, Low */
-#define CNTRL1 0x0C /* Control Register I */
-#define VSCALE_LO 0x0D /* Vertical Scaling Register, Low */
-#define SCALE_HI 0x0E /* Scaling Register, High */
-#define HSCALE_LO 0x0F /* Horizontal Scaling Register, Low */
-#define BRIGHT 0x10 /* BRIGHTNESS Control Register */
-#define CONTRAST 0x11 /* CONTRAST Control Register */
-#define SHARPNESS 0x12 /* SHARPNESS Control Register I */
-#define SAT_U 0x13 /* Chroma (U) Gain Register */
-#define SAT_V 0x14 /* Chroma (V) Gain Register */
-#define HUE 0x15 /* Hue Control Register */
-#define CORING1 0x17
-#define CORING2 0x18 /* Coring and IF compensation */
-#define VBICNTL 0x19 /* VBI Control Register */
-#define ACNTL2 0x1A /* Analog Control 2 */
-#define OUTCTR2 0x1B /* Output Control 2 */
-#define SDT 0x1C /* Standard Selection */
-#define SDTR 0x1D /* Standard Recognition */
-#define TEST 0x1F /* Test Control Register */
-#define CLMPG 0x20 /* Clamping Gain */
-#define IAGC 0x21 /* Individual AGC Gain */
-#define AGCGAIN 0x22 /* AGC Gain */
-#define PEAKWT 0x23 /* White Peak Threshold */
-#define CLMPL 0x24 /* Clamp level */
-#define SYNCT 0x25 /* Sync Amplitude */
-#define MISSCNT 0x26 /* Sync Miss Count Register */
-#define PCLAMP 0x27 /* Clamp Position Register */
-#define VCNTL1 0x28 /* Vertical Control I */
-#define VCNTL2 0x29 /* Vertical Control II */
-#define CKILL 0x2A /* Color Killer Level Control */
-#define COMB 0x2B /* Comb Filter Control */
-#define LDLY 0x2C /* Luma Delay and H Filter Control */
-#define MISC1 0x2D /* Miscellaneous Control I */
-#define LOOP 0x2E /* LOOP Control Register */
-#define MISC2 0x2F /* Miscellaneous Control II */
-#define MVSN 0x30 /* Macrovision Detection */
-#define STATUS2 0x31 /* Chip STATUS II */
-#define HFREF 0x32 /* H monitor */
-#define CLMD 0x33 /* CLAMP MODE */
-#define IDCNTL 0x34 /* ID Detection Control */
-#define CLCNTL1 0x35 /* Clamp Control I */
-#define ANAPLLCTL 0x4C
-#define VBIMIN 0x4D
-#define HSLOWCTL 0x4E
-#define WSS3 0x4F
-#define FILLDATA 0x50
-#define SDID 0x51
-#define DID 0x52
-#define WSS1 0x53
-#define WSS2 0x54
-#define VVBI 0x55
-#define LCTL6 0x56
-#define LCTL7 0x57
-#define LCTL8 0x58
-#define LCTL9 0x59
-#define LCTL10 0x5A
-#define LCTL11 0x5B
-#define LCTL12 0x5C
-#define LCTL13 0x5D
-#define LCTL14 0x5E
-#define LCTL15 0x5F
-#define LCTL16 0x60
-#define LCTL17 0x61
-#define LCTL18 0x62
-#define LCTL19 0x63
-#define LCTL20 0x64
-#define LCTL21 0x65
-#define LCTL22 0x66
-#define LCTL23 0x67
-#define LCTL24 0x68
-#define LCTL25 0x69
-#define LCTL26 0x6A
-#define HSBEGIN 0x6B
-#define HSEND 0x6C
-#define OVSDLY 0x6D
-#define OVSEND 0x6E
-#define VBIDELAY 0x6F
-
-/*
- * register detail
- */
-
-/* INFORM */
-#define FC27_ON 0x40 /* 1 : Input crystal clock frequency is 27MHz */
-#define FC27_FF 0x00 /* 0 : Square pixel mode. */
- /* Must use 24.54MHz for 60Hz field rate */
- /* source or 29.5MHz for 50Hz field rate */
-#define IFSEL_S 0x10 /* 01 : S-video decoding */
-#define IFSEL_C 0x00 /* 00 : Composite video decoding */
- /* Y input video selection */
-#define YSEL_M0 0x00 /* 00 : Mux0 selected */
-#define YSEL_M1 0x04 /* 01 : Mux1 selected */
-#define YSEL_M2 0x08 /* 10 : Mux2 selected */
-#define YSEL_M3 0x10 /* 11 : Mux3 selected */
-
-/* OPFORM */
-#define MODE 0x80 /* 0 : CCIR601 compatible YCrCb 4:2:2 format */
- /* 1 : ITU-R-656 compatible data sequence format */
-#define LEN 0x40 /* 0 : 8-bit YCrCb 4:2:2 output format */
- /* 1 : 16-bit YCrCb 4:2:2 output format.*/
-#define LLCMODE 0x20 /* 1 : LLC output mode. */
- /* 0 : free-run output mode */
-#define AINC 0x10 /* Serial interface auto-indexing control */
- /* 0 : auto-increment */
- /* 1 : non-auto */
-#define VSCTL 0x08 /* 1 : Vertical out ctrl by DVALID */
- /* 0 : Vertical out ctrl by HACTIVE and DVALID */
-#define OEN_TRI_SEL_MASK 0x07
-#define OEN_TRI_SEL_ALL_ON 0x00 /* Enable output for Rev0/Rev1 */
-#define OEN_TRI_SEL_ALL_OFF_r0 0x06 /* All tri-stated for Rev0 */
-#define OEN_TRI_SEL_ALL_OFF_r1 0x07 /* All tri-stated for Rev1 */
-
-/* OUTCTR1 */
-#define VSP_LO 0x00 /* 0 : VS pin output polarity is active low */
-#define VSP_HI 0x80 /* 1 : VS pin output polarity is active high. */
- /* VS pin output control */
-#define VSSL_VSYNC 0x00 /* 0 : VSYNC */
-#define VSSL_VACT 0x10 /* 1 : VACT */
-#define VSSL_FIELD 0x20 /* 2 : FIELD */
-#define VSSL_VVALID 0x30 /* 3 : VVALID */
-#define VSSL_ZERO 0x70 /* 7 : 0 */
-#define HSP_LOW 0x00 /* 0 : HS pin output polarity is active low */
-#define HSP_HI 0x08 /* 1 : HS pin output polarity is active high.*/
- /* HS pin output control */
-#define HSSL_HACT 0x00 /* 0 : HACT */
-#define HSSL_HSYNC 0x01 /* 1 : HSYNC */
-#define HSSL_DVALID 0x02 /* 2 : DVALID */
-#define HSSL_HLOCK 0x03 /* 3 : HLOCK */
-#define HSSL_ASYNCW 0x04 /* 4 : ASYNCW */
-#define HSSL_ZERO 0x07 /* 7 : 0 */
-
-/* ACNTL1 */
-#define SRESET 0x80 /* resets the device to its default state
- * but all register content remain unchanged.
- * This bit is self-resetting.
- */
-#define ACNTL1_PDN_MASK 0x0e
-#define CLK_PDN 0x08 /* system clock power down */
-#define Y_PDN 0x04 /* Luma ADC power down */
-#define C_PDN 0x02 /* Chroma ADC power down */
-
-/* ACNTL2 */
-#define ACNTL2_PDN_MASK 0x40
-#define PLL_PDN 0x40 /* PLL power down */
-
-/* VBICNTL */
-
-/* RTSEL : control the real time signal output from the MPOUT pin */
-#define RTSEL_MASK 0x07
-#define RTSEL_VLOSS 0x00 /* 0000 = Video loss */
-#define RTSEL_HLOCK 0x01 /* 0001 = H-lock */
-#define RTSEL_SLOCK 0x02 /* 0010 = S-lock */
-#define RTSEL_VLOCK 0x03 /* 0011 = V-lock */
-#define RTSEL_MONO 0x04 /* 0100 = MONO */
-#define RTSEL_DET50 0x05 /* 0101 = DET50 */
-#define RTSEL_FIELD 0x06 /* 0110 = FIELD */
-#define RTSEL_RTCO 0x07 /* 0111 = RTCO ( Real Time Control ) */
-
-/* HSYNC start and end are constant for now */
-#define HSYNC_START 0x0260
-#define HSYNC_END 0x0300
-
-/*
- * structure
- */
-
-struct regval_list {
- unsigned char reg_num;
- unsigned char value;
-};
-
-struct tw9910_scale_ctrl {
- char *name;
- unsigned short width;
- unsigned short height;
- u16 hscale;
- u16 vscale;
-};
-
-struct tw9910_priv {
- struct v4l2_subdev subdev;
- struct v4l2_clk *clk;
- struct tw9910_video_info *info;
- const struct tw9910_scale_ctrl *scale;
- v4l2_std_id norm;
- u32 revision;
-};
-
-static const struct tw9910_scale_ctrl tw9910_ntsc_scales[] = {
- {
- .name = "NTSC SQ",
- .width = 640,
- .height = 480,
- .hscale = 0x0100,
- .vscale = 0x0100,
- },
- {
- .name = "NTSC CCIR601",
- .width = 720,
- .height = 480,
- .hscale = 0x0100,
- .vscale = 0x0100,
- },
- {
- .name = "NTSC SQ (CIF)",
- .width = 320,
- .height = 240,
- .hscale = 0x0200,
- .vscale = 0x0200,
- },
- {
- .name = "NTSC CCIR601 (CIF)",
- .width = 360,
- .height = 240,
- .hscale = 0x0200,
- .vscale = 0x0200,
- },
- {
- .name = "NTSC SQ (QCIF)",
- .width = 160,
- .height = 120,
- .hscale = 0x0400,
- .vscale = 0x0400,
- },
- {
- .name = "NTSC CCIR601 (QCIF)",
- .width = 180,
- .height = 120,
- .hscale = 0x0400,
- .vscale = 0x0400,
- },
-};
-
-static const struct tw9910_scale_ctrl tw9910_pal_scales[] = {
- {
- .name = "PAL SQ",
- .width = 768,
- .height = 576,
- .hscale = 0x0100,
- .vscale = 0x0100,
- },
- {
- .name = "PAL CCIR601",
- .width = 720,
- .height = 576,
- .hscale = 0x0100,
- .vscale = 0x0100,
- },
- {
- .name = "PAL SQ (CIF)",
- .width = 384,
- .height = 288,
- .hscale = 0x0200,
- .vscale = 0x0200,
- },
- {
- .name = "PAL CCIR601 (CIF)",
- .width = 360,
- .height = 288,
- .hscale = 0x0200,
- .vscale = 0x0200,
- },
- {
- .name = "PAL SQ (QCIF)",
- .width = 192,
- .height = 144,
- .hscale = 0x0400,
- .vscale = 0x0400,
- },
- {
- .name = "PAL CCIR601 (QCIF)",
- .width = 180,
- .height = 144,
- .hscale = 0x0400,
- .vscale = 0x0400,
- },
-};
-
-/*
- * general function
- */
-static struct tw9910_priv *to_tw9910(const struct i2c_client *client)
-{
- return container_of(i2c_get_clientdata(client), struct tw9910_priv,
- subdev);
-}
-
-static int tw9910_mask_set(struct i2c_client *client, u8 command,
- u8 mask, u8 set)
-{
- s32 val = i2c_smbus_read_byte_data(client, command);
- if (val < 0)
- return val;
-
- val &= ~mask;
- val |= set & mask;
-
- return i2c_smbus_write_byte_data(client, command, val);
-}
-
-static int tw9910_set_scale(struct i2c_client *client,
- const struct tw9910_scale_ctrl *scale)
-{
- int ret;
-
- ret = i2c_smbus_write_byte_data(client, SCALE_HI,
- (scale->vscale & 0x0F00) >> 4 |
- (scale->hscale & 0x0F00) >> 8);
- if (ret < 0)
- return ret;
-
- ret = i2c_smbus_write_byte_data(client, HSCALE_LO,
- scale->hscale & 0x00FF);
- if (ret < 0)
- return ret;
-
- ret = i2c_smbus_write_byte_data(client, VSCALE_LO,
- scale->vscale & 0x00FF);
-
- return ret;
-}
-
-static int tw9910_set_hsync(struct i2c_client *client)
-{
- struct tw9910_priv *priv = to_tw9910(client);
- int ret;
-
- /* bit 10 - 3 */
- ret = i2c_smbus_write_byte_data(client, HSBEGIN,
- (HSYNC_START & 0x07F8) >> 3);
- if (ret < 0)
- return ret;
-
- /* bit 10 - 3 */
- ret = i2c_smbus_write_byte_data(client, HSEND,
- (HSYNC_END & 0x07F8) >> 3);
- if (ret < 0)
- return ret;
-
- /* So far only revisions 0 and 1 have been seen */
- /* bit 2 - 0 */
- if (1 == priv->revision)
- ret = tw9910_mask_set(client, HSLOWCTL, 0x77,
- (HSYNC_START & 0x0007) << 4 |
- (HSYNC_END & 0x0007));
-
- return ret;
-}
-
-static void tw9910_reset(struct i2c_client *client)
-{
- tw9910_mask_set(client, ACNTL1, SRESET, SRESET);
- msleep(1);
-}
-
-static int tw9910_power(struct i2c_client *client, int enable)
-{
- int ret;
- u8 acntl1;
- u8 acntl2;
-
- if (enable) {
- acntl1 = 0;
- acntl2 = 0;
- } else {
- acntl1 = CLK_PDN | Y_PDN | C_PDN;
- acntl2 = PLL_PDN;
- }
-
- ret = tw9910_mask_set(client, ACNTL1, ACNTL1_PDN_MASK, acntl1);
- if (ret < 0)
- return ret;
-
- return tw9910_mask_set(client, ACNTL2, ACNTL2_PDN_MASK, acntl2);
-}
-
-static const struct tw9910_scale_ctrl *tw9910_select_norm(v4l2_std_id norm,
- u32 width, u32 height)
-{
- const struct tw9910_scale_ctrl *scale;
- const struct tw9910_scale_ctrl *ret = NULL;
- __u32 diff = 0xffffffff, tmp;
- int size, i;
-
- if (norm & V4L2_STD_NTSC) {
- scale = tw9910_ntsc_scales;
- size = ARRAY_SIZE(tw9910_ntsc_scales);
- } else if (norm & V4L2_STD_PAL) {
- scale = tw9910_pal_scales;
- size = ARRAY_SIZE(tw9910_pal_scales);
- } else {
- return NULL;
- }
-
- for (i = 0; i < size; i++) {
- tmp = abs(width - scale[i].width) +
- abs(height - scale[i].height);
- if (tmp < diff) {
- diff = tmp;
- ret = scale + i;
- }
- }
-
- return ret;
-}
-
-/*
- * subdevice operations
- */
-static int tw9910_s_stream(struct v4l2_subdev *sd, int enable)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct tw9910_priv *priv = to_tw9910(client);
- u8 val;
- int ret;
-
- if (!enable) {
- switch (priv->revision) {
- case 0:
- val = OEN_TRI_SEL_ALL_OFF_r0;
- break;
- case 1:
- val = OEN_TRI_SEL_ALL_OFF_r1;
- break;
- default:
- dev_err(&client->dev, "un-supported revision\n");
- return -EINVAL;
- }
- } else {
- val = OEN_TRI_SEL_ALL_ON;
-
- if (!priv->scale) {
- dev_err(&client->dev, "norm select error\n");
- return -EPERM;
- }
-
- dev_dbg(&client->dev, "%s %dx%d\n",
- priv->scale->name,
- priv->scale->width,
- priv->scale->height);
- }
-
- ret = tw9910_mask_set(client, OPFORM, OEN_TRI_SEL_MASK, val);
- if (ret < 0)
- return ret;
-
- return tw9910_power(client, enable);
-}
-
-static int tw9910_g_std(struct v4l2_subdev *sd, v4l2_std_id *norm)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct tw9910_priv *priv = to_tw9910(client);
-
- *norm = priv->norm;
-
- return 0;
-}
-
-static int tw9910_s_std(struct v4l2_subdev *sd, v4l2_std_id norm)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct tw9910_priv *priv = to_tw9910(client);
- const unsigned hact = 720;
- const unsigned hdelay = 15;
- unsigned vact;
- unsigned vdelay;
- int ret;
-
- if (!(norm & (V4L2_STD_NTSC | V4L2_STD_PAL)))
- return -EINVAL;
-
- priv->norm = norm;
- if (norm & V4L2_STD_525_60) {
- vact = 240;
- vdelay = 18;
- ret = tw9910_mask_set(client, VVBI, 0x10, 0x10);
- } else {
- vact = 288;
- vdelay = 24;
- ret = tw9910_mask_set(client, VVBI, 0x10, 0x00);
- }
- if (!ret)
- ret = i2c_smbus_write_byte_data(client, CROP_HI,
- ((vdelay >> 2) & 0xc0) |
- ((vact >> 4) & 0x30) |
- ((hdelay >> 6) & 0x0c) |
- ((hact >> 8) & 0x03));
- if (!ret)
- ret = i2c_smbus_write_byte_data(client, VDELAY_LO,
- vdelay & 0xff);
- if (!ret)
- ret = i2c_smbus_write_byte_data(client, VACTIVE_LO,
- vact & 0xff);
-
- return ret;
-}
-
-#ifdef CONFIG_VIDEO_ADV_DEBUG
-static int tw9910_g_register(struct v4l2_subdev *sd,
- struct v4l2_dbg_register *reg)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- if (reg->reg > 0xff)
- return -EINVAL;
-
- reg->size = 1;
- ret = i2c_smbus_read_byte_data(client, reg->reg);
- if (ret < 0)
- return ret;
-
- /*
- * ret = int
- * reg->val = __u64
- */
- reg->val = (__u64)ret;
-
- return 0;
-}
-
-static int tw9910_s_register(struct v4l2_subdev *sd,
- const struct v4l2_dbg_register *reg)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
-
- if (reg->reg > 0xff ||
- reg->val > 0xff)
- return -EINVAL;
-
- return i2c_smbus_write_byte_data(client, reg->reg, reg->val);
-}
-#endif
-
-static int tw9910_s_power(struct v4l2_subdev *sd, int on)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- struct tw9910_priv *priv = to_tw9910(client);
-
- return soc_camera_set_power(&client->dev, ssdd, priv->clk, on);
-}
-
-static int tw9910_set_frame(struct v4l2_subdev *sd, u32 *width, u32 *height)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct tw9910_priv *priv = to_tw9910(client);
- int ret = -EINVAL;
- u8 val;
-
- /*
- * select suitable norm
- */
- priv->scale = tw9910_select_norm(priv->norm, *width, *height);
- if (!priv->scale)
- goto tw9910_set_fmt_error;
-
- /*
- * reset hardware
- */
- tw9910_reset(client);
-
- /*
- * set bus width
- */
- val = 0x00;
- if (SOCAM_DATAWIDTH_16 == priv->info->buswidth)
- val = LEN;
-
- ret = tw9910_mask_set(client, OPFORM, LEN, val);
- if (ret < 0)
- goto tw9910_set_fmt_error;
-
- /*
- * select MPOUT behavior
- */
- switch (priv->info->mpout) {
- case TW9910_MPO_VLOSS:
- val = RTSEL_VLOSS; break;
- case TW9910_MPO_HLOCK:
- val = RTSEL_HLOCK; break;
- case TW9910_MPO_SLOCK:
- val = RTSEL_SLOCK; break;
- case TW9910_MPO_VLOCK:
- val = RTSEL_VLOCK; break;
- case TW9910_MPO_MONO:
- val = RTSEL_MONO; break;
- case TW9910_MPO_DET50:
- val = RTSEL_DET50; break;
- case TW9910_MPO_FIELD:
- val = RTSEL_FIELD; break;
- case TW9910_MPO_RTCO:
- val = RTSEL_RTCO; break;
- default:
- val = 0;
- }
-
- ret = tw9910_mask_set(client, VBICNTL, RTSEL_MASK, val);
- if (ret < 0)
- goto tw9910_set_fmt_error;
-
- /*
- * set scale
- */
- ret = tw9910_set_scale(client, priv->scale);
- if (ret < 0)
- goto tw9910_set_fmt_error;
-
- /*
- * set hsync
- */
- ret = tw9910_set_hsync(client);
- if (ret < 0)
- goto tw9910_set_fmt_error;
-
- *width = priv->scale->width;
- *height = priv->scale->height;
-
- return ret;
-
-tw9910_set_fmt_error:
-
- tw9910_reset(client);
- priv->scale = NULL;
-
- return ret;
-}
-
-static int tw9910_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_selection *sel)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct tw9910_priv *priv = to_tw9910(client);
-
- if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
- return -EINVAL;
- /* Only CROP, CROP_DEFAULT and CROP_BOUNDS are supported */
- if (sel->target > V4L2_SEL_TGT_CROP_BOUNDS)
- return -EINVAL;
-
- sel->r.left = 0;
- sel->r.top = 0;
- if (priv->norm & V4L2_STD_NTSC) {
- sel->r.width = 640;
- sel->r.height = 480;
- } else {
- sel->r.width = 768;
- sel->r.height = 576;
- }
- return 0;
-}
-
-static int tw9910_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *format)
-{
- struct v4l2_mbus_framefmt *mf = &format->format;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct tw9910_priv *priv = to_tw9910(client);
-
- if (format->pad)
- return -EINVAL;
-
- if (!priv->scale) {
- priv->scale = tw9910_select_norm(priv->norm, 640, 480);
- if (!priv->scale)
- return -EINVAL;
- }
-
- mf->width = priv->scale->width;
- mf->height = priv->scale->height;
- mf->code = MEDIA_BUS_FMT_UYVY8_2X8;
- mf->colorspace = V4L2_COLORSPACE_SMPTE170M;
- mf->field = V4L2_FIELD_INTERLACED_BT;
-
- return 0;
-}
-
-static int tw9910_s_fmt(struct v4l2_subdev *sd,
- struct v4l2_mbus_framefmt *mf)
-{
- u32 width = mf->width, height = mf->height;
- int ret;
-
- WARN_ON(mf->field != V4L2_FIELD_ANY &&
- mf->field != V4L2_FIELD_INTERLACED_BT);
-
- /*
- * check color format
- */
- if (mf->code != MEDIA_BUS_FMT_UYVY8_2X8)
- return -EINVAL;
-
- mf->colorspace = V4L2_COLORSPACE_SMPTE170M;
-
- ret = tw9910_set_frame(sd, &width, &height);
- if (!ret) {
- mf->width = width;
- mf->height = height;
- }
- return ret;
-}
-
-static int tw9910_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *format)
-{
- struct v4l2_mbus_framefmt *mf = &format->format;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct tw9910_priv *priv = to_tw9910(client);
- const struct tw9910_scale_ctrl *scale;
-
- if (format->pad)
- return -EINVAL;
-
- if (V4L2_FIELD_ANY == mf->field) {
- mf->field = V4L2_FIELD_INTERLACED_BT;
- } else if (V4L2_FIELD_INTERLACED_BT != mf->field) {
- dev_err(&client->dev, "Field type %d invalid.\n", mf->field);
- return -EINVAL;
- }
-
- mf->code = MEDIA_BUS_FMT_UYVY8_2X8;
- mf->colorspace = V4L2_COLORSPACE_SMPTE170M;
-
- /*
- * select suitable norm
- */
- scale = tw9910_select_norm(priv->norm, mf->width, mf->height);
- if (!scale)
- return -EINVAL;
-
- mf->width = scale->width;
- mf->height = scale->height;
-
- if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- return tw9910_s_fmt(sd, mf);
- cfg->try_fmt = *mf;
- return 0;
-}
-
-static int tw9910_video_probe(struct i2c_client *client)
-{
- struct tw9910_priv *priv = to_tw9910(client);
- s32 id;
- int ret;
-
- /*
- * tw9910 only use 8 or 16 bit bus width
- */
- if (SOCAM_DATAWIDTH_16 != priv->info->buswidth &&
- SOCAM_DATAWIDTH_8 != priv->info->buswidth) {
- dev_err(&client->dev, "bus width error\n");
- return -ENODEV;
- }
-
- ret = tw9910_s_power(&priv->subdev, 1);
- if (ret < 0)
- return ret;
-
- /*
- * check and show Product ID
- * So far only revisions 0 and 1 have been seen
- */
- id = i2c_smbus_read_byte_data(client, ID);
- priv->revision = GET_REV(id);
- id = GET_ID(id);
-
- if (0x0B != id ||
- 0x01 < priv->revision) {
- dev_err(&client->dev,
- "Product ID error %x:%x\n",
- id, priv->revision);
- ret = -ENODEV;
- goto done;
- }
-
- dev_info(&client->dev,
- "tw9910 Product ID %0x:%0x\n", id, priv->revision);
-
- priv->norm = V4L2_STD_NTSC;
- priv->scale = &tw9910_ntsc_scales[0];
-
-done:
- tw9910_s_power(&priv->subdev, 0);
- return ret;
-}
-
-static const struct v4l2_subdev_core_ops tw9910_subdev_core_ops = {
-#ifdef CONFIG_VIDEO_ADV_DEBUG
- .g_register = tw9910_g_register,
- .s_register = tw9910_s_register,
-#endif
- .s_power = tw9910_s_power,
-};
-
-static int tw9910_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- if (code->pad || code->index)
- return -EINVAL;
-
- code->code = MEDIA_BUS_FMT_UYVY8_2X8;
- return 0;
-}
-
-static int tw9910_g_mbus_config(struct v4l2_subdev *sd,
- struct v4l2_mbus_config *cfg)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
-
- cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_MASTER |
- V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW |
- V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_LOW |
- V4L2_MBUS_DATA_ACTIVE_HIGH;
- cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
-
- return 0;
-}
-
-static int tw9910_s_mbus_config(struct v4l2_subdev *sd,
- const struct v4l2_mbus_config *cfg)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- u8 val = VSSL_VVALID | HSSL_DVALID;
- unsigned long flags = soc_camera_apply_board_flags(ssdd, cfg);
-
- /*
- * set OUTCTR1
- *
- * We use VVALID and DVALID signals to control VSYNC and HSYNC
- * outputs, in this mode their polarity is inverted.
- */
- if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
- val |= HSP_HI;
-
- if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
- val |= VSP_HI;
-
- return i2c_smbus_write_byte_data(client, OUTCTR1, val);
-}
-
-static int tw9910_g_tvnorms(struct v4l2_subdev *sd, v4l2_std_id *norm)
-{
- *norm = V4L2_STD_NTSC | V4L2_STD_PAL;
- return 0;
-}
-
-static const struct v4l2_subdev_video_ops tw9910_subdev_video_ops = {
- .s_std = tw9910_s_std,
- .g_std = tw9910_g_std,
- .s_stream = tw9910_s_stream,
- .g_mbus_config = tw9910_g_mbus_config,
- .s_mbus_config = tw9910_s_mbus_config,
- .g_tvnorms = tw9910_g_tvnorms,
-};
-
-static const struct v4l2_subdev_pad_ops tw9910_subdev_pad_ops = {
- .enum_mbus_code = tw9910_enum_mbus_code,
- .get_selection = tw9910_get_selection,
- .get_fmt = tw9910_get_fmt,
- .set_fmt = tw9910_set_fmt,
-};
-
-static const struct v4l2_subdev_ops tw9910_subdev_ops = {
- .core = &tw9910_subdev_core_ops,
- .video = &tw9910_subdev_video_ops,
- .pad = &tw9910_subdev_pad_ops,
-};
-
-/*
- * i2c_driver function
- */
-
-static int tw9910_probe(struct i2c_client *client,
- const struct i2c_device_id *did)
-
-{
- struct tw9910_priv *priv;
- struct tw9910_video_info *info;
- struct i2c_adapter *adapter =
- to_i2c_adapter(client->dev.parent);
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- int ret;
-
- if (!ssdd || !ssdd->drv_priv) {
- dev_err(&client->dev, "TW9910: missing platform data!\n");
- return -EINVAL;
- }
-
- info = ssdd->drv_priv;
-
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
- dev_err(&client->dev,
- "I2C-Adapter doesn't support I2C_FUNC_SMBUS_BYTE_DATA\n");
- return -EIO;
- }
-
- priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->info = info;
-
- v4l2_i2c_subdev_init(&priv->subdev, client, &tw9910_subdev_ops);
-
- priv->clk = v4l2_clk_get(&client->dev, "mclk");
- if (IS_ERR(priv->clk))
- return PTR_ERR(priv->clk);
-
- ret = tw9910_video_probe(client);
- if (ret < 0)
- v4l2_clk_put(priv->clk);
-
- return ret;
-}
-
-static int tw9910_remove(struct i2c_client *client)
-{
- struct tw9910_priv *priv = to_tw9910(client);
- v4l2_clk_put(priv->clk);
- return 0;
-}
-
-static const struct i2c_device_id tw9910_id[] = {
- { "tw9910", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, tw9910_id);
-
-static struct i2c_driver tw9910_i2c_driver = {
- .driver = {
- .name = "tw9910",
- },
- .probe = tw9910_probe,
- .remove = tw9910_remove,
- .id_table = tw9910_id,
-};
-
-module_i2c_driver(tw9910_i2c_driver);
-
-MODULE_DESCRIPTION("SoC Camera driver for tw9910");
-MODULE_AUTHOR("Kuninori Morimoto");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
index e8613e364403..a62ede096636 100644
--- a/drivers/media/i2c/tda1997x.c
+++ b/drivers/media/i2c/tda1997x.c
@@ -1884,6 +1884,10 @@ static int tda1997x_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
for (i = 0; i < 128; i++)
io_write(sd, REG_EDID_IN_BYTE128 + i, edid->edid[i+128]);
+ /* store state */
+ memcpy(state->edid.edid, edid->edid, 256);
+ state->edid.blocks = edid->blocks;
+
tda1997x_enable_edid(sd);
return 0;
diff --git a/drivers/media/i2c/tda1997x_regs.h b/drivers/media/i2c/tda1997x_regs.h
index f55dfc423a86..ecf87534613b 100644
--- a/drivers/media/i2c/tda1997x_regs.h
+++ b/drivers/media/i2c/tda1997x_regs.h
@@ -596,7 +596,7 @@
#define RESET_AUDIO BIT(0) /* Reset Audio FIFO control */
/* HDCP_BCAPS bits */
-#define HDCP_HDMI BIT(7) /* HDCP suports HDMI (vs DVI only) */
+#define HDCP_HDMI BIT(7) /* HDCP supports HDMI (vs DVI only) */
#define HDCP_REPEATER BIT(6) /* HDCP supports repeater function */
#define HDCP_READY BIT(5) /* set by repeater function */
#define HDCP_FAST BIT(4) /* Up to 400kHz */
diff --git a/drivers/media/i2c/tda9840.c b/drivers/media/i2c/tda9840.c
index 0dd6ff3e6201..6ba53f3a6dd2 100644
--- a/drivers/media/i2c/tda9840.c
+++ b/drivers/media/i2c/tda9840.c
@@ -7,7 +7,7 @@
The tda9840 is a stereo/dual sound processor with digital
identification. It can be found at address 0x84 on the i2c-bus.
- For detailed informations download the specifications directly
+ For detailed information download the specifications directly
from SGS Thomson at http://www.st.com
This program is free software; you can redistribute it and/or modify
diff --git a/drivers/media/i2c/tea6415c.c b/drivers/media/i2c/tea6415c.c
index 084bd75bb32c..965c6ccc4fee 100644
--- a/drivers/media/i2c/tea6415c.c
+++ b/drivers/media/i2c/tea6415c.c
@@ -9,7 +9,7 @@
It is cascadable, i.e. it can be found at the addresses
0x86 and 0x06 on the i2c-bus.
- For detailed informations download the specifications directly
+ For detailed information download the specifications directly
from SGS Thomson at http://www.st.com
This program is free software; you can redistribute it and/or modify
diff --git a/drivers/media/i2c/tea6420.c b/drivers/media/i2c/tea6420.c
index b7f4e58f3624..2701a4c9734d 100644
--- a/drivers/media/i2c/tea6420.c
+++ b/drivers/media/i2c/tea6420.c
@@ -9,7 +9,7 @@
It is cascadable, i.e. it can be found at the addresses 0x98
and 0x9a on the i2c-bus.
- For detailed informations download the specifications directly
+ For detailed information download the specifications directly
from SGS Thomson at http://www.st.com
This program is free software; you can redistribute it and/or modify
diff --git a/drivers/media/i2c/tvaudio.c b/drivers/media/i2c/tvaudio.c
index af2da977a685..e6796e94dadf 100644
--- a/drivers/media/i2c/tvaudio.c
+++ b/drivers/media/i2c/tvaudio.c
@@ -538,7 +538,7 @@ static int tda9840_checkit(struct CHIPSTATE *chip)
#define TDA9855_INT 0 /* Selects inputs LOR and LOL. (internal) */
/* Unique to TDA9850: */
-/* lower 4 bits contol SAP noise threshold, over which SAP turns off
+/* lower 4 bits control SAP noise threshold, over which SAP turns off
* set to values of 0x00 through 0x0f for SAP1 through SAP16 */
@@ -546,7 +546,7 @@ static int tda9840_checkit(struct CHIPSTATE *chip)
/* Common to TDA9855 and TDA9850: */
#define TDA985x_SAP 3<<6 /* Selects SAP output, mute if not received */
#define TDA985x_MONOSAP 2<<6 /* Selects Mono on left, SAP on right */
-#define TDA985x_STEREO 1<<6 /* Selects Stereo ouput, mono if not received */
+#define TDA985x_STEREO 1<<6 /* Selects Stereo output, mono if not received */
#define TDA985x_MONO 0 /* Forces Mono output */
#define TDA985x_LMU 1<<3 /* Mute (LOR/LOL for 9855, OUTL/OUTR for 9850) */
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index 1cc83cb934e2..3ada3bb27402 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -67,7 +67,7 @@ enum tvp514x_std {
};
/**
- * struct tvp514x_std_info - Structure to store standard informations
+ * struct tvp514x_std_info - Structure to store standard information
* @width: Line width in pixels
* @height:Number of active lines
* @video_std: Value to write in REG_VIDEO_STD register
diff --git a/drivers/media/i2c/tw9910.c b/drivers/media/i2c/tw9910.c
index a54548cc4285..4d7cd736b930 100644
--- a/drivers/media/i2c/tw9910.c
+++ b/drivers/media/i2c/tw9910.c
@@ -584,6 +584,14 @@ static int tw9910_s_register(struct v4l2_subdev *sd,
}
#endif
+static void tw9910_set_gpio_value(struct gpio_desc *desc, int value)
+{
+ if (desc) {
+ gpiod_set_value(desc, value);
+ usleep_range(500, 1000);
+ }
+}
+
static int tw9910_power_on(struct tw9910_priv *priv)
{
struct i2c_client *client = v4l2_get_subdevdata(&priv->subdev);
@@ -595,10 +603,7 @@ static int tw9910_power_on(struct tw9910_priv *priv)
return ret;
}
- if (priv->pdn_gpio) {
- gpiod_set_value(priv->pdn_gpio, 0);
- usleep_range(500, 1000);
- }
+ tw9910_set_gpio_value(priv->pdn_gpio, 0);
/*
* FIXME: The reset signal is connected to a shared GPIO on some
@@ -610,14 +615,14 @@ static int tw9910_power_on(struct tw9910_priv *priv)
GPIOD_OUT_LOW);
if (IS_ERR(priv->rstb_gpio)) {
dev_info(&client->dev, "Unable to get GPIO \"rstb\"");
+ clk_disable_unprepare(priv->clk);
+ tw9910_set_gpio_value(priv->pdn_gpio, 1);
return PTR_ERR(priv->rstb_gpio);
}
if (priv->rstb_gpio) {
- gpiod_set_value(priv->rstb_gpio, 1);
- usleep_range(500, 1000);
- gpiod_set_value(priv->rstb_gpio, 0);
- usleep_range(500, 1000);
+ tw9910_set_gpio_value(priv->rstb_gpio, 1);
+ tw9910_set_gpio_value(priv->rstb_gpio, 0);
gpiod_put(priv->rstb_gpio);
}
@@ -628,11 +633,7 @@ static int tw9910_power_on(struct tw9910_priv *priv)
static int tw9910_power_off(struct tw9910_priv *priv)
{
clk_disable_unprepare(priv->clk);
-
- if (priv->pdn_gpio) {
- gpiod_set_value(priv->pdn_gpio, 1);
- usleep_range(500, 1000);
- }
+ tw9910_set_gpio_value(priv->pdn_gpio, 1);
return 0;
}
@@ -1000,7 +1001,7 @@ static int tw9910_remove(struct i2c_client *client)
if (priv->pdn_gpio)
gpiod_put(priv->pdn_gpio);
clk_put(priv->clk);
- v4l2_device_unregister_subdev(&priv->subdev);
+ v4l2_async_unregister_subdev(&priv->subdev);
return 0;
}
diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c
index 01dcf179f203..abd3152df7d0 100644
--- a/drivers/media/i2c/video-i2c.c
+++ b/drivers/media/i2c/video-i2c.c
@@ -6,6 +6,7 @@
*
* Supported:
* - Panasonic AMG88xx Grid-Eye Sensors
+ * - Melexis MLX90640 Thermal Cameras
*/
#include <linux/delay.h>
@@ -18,6 +19,7 @@
#include <linux/mutex.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
+#include <linux/nvmem-provider.h>
#include <linux/regmap.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -66,12 +68,26 @@ static const struct v4l2_frmsize_discrete amg88xx_size = {
.height = 8,
};
+static const struct v4l2_fmtdesc mlx90640_format = {
+ .pixelformat = V4L2_PIX_FMT_Y16_BE,
+};
+
+static const struct v4l2_frmsize_discrete mlx90640_size = {
+ .width = 32,
+ .height = 26, /* 24 lines of pixel data + 2 lines of processing data */
+};
+
static const struct regmap_config amg88xx_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0xff
};
+static const struct regmap_config mlx90640_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 16,
+};
+
struct video_i2c_chip {
/* video dimensions */
const struct v4l2_fmtdesc *format;
@@ -88,6 +104,7 @@ struct video_i2c_chip {
unsigned int bpp;
const struct regmap_config *regmap_config;
+ struct nvmem_config *nvmem_config;
/* setup function */
int (*setup)(struct video_i2c_data *data);
@@ -102,6 +119,22 @@ struct video_i2c_chip {
int (*hwmon_init)(struct video_i2c_data *data);
};
+static int mlx90640_nvram_read(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct video_i2c_data *data = priv;
+
+ return regmap_bulk_read(data->regmap, 0x2400 + offset, val, bytes);
+}
+
+static struct nvmem_config mlx90640_nvram_config = {
+ .name = "mlx90640_nvram",
+ .word_size = 2,
+ .stride = 1,
+ .size = 1664,
+ .reg_read = mlx90640_nvram_read,
+};
+
/* Power control register */
#define AMG88XX_REG_PCTL 0x00
#define AMG88XX_PCTL_NORMAL 0x00
@@ -122,12 +155,23 @@ struct video_i2c_chip {
/* Temperature register */
#define AMG88XX_REG_T01L 0x80
+/* Control register */
+#define MLX90640_REG_CTL1 0x800d
+#define MLX90640_REG_CTL1_MASK 0x0380
+#define MLX90640_REG_CTL1_MASK_SHIFT 7
+
static int amg88xx_xfer(struct video_i2c_data *data, char *buf)
{
return regmap_bulk_read(data->regmap, AMG88XX_REG_T01L, buf,
data->chip->buffer_size);
}
+static int mlx90640_xfer(struct video_i2c_data *data, char *buf)
+{
+ return regmap_bulk_read(data->regmap, 0x400, buf,
+ data->chip->buffer_size);
+}
+
static int amg88xx_setup(struct video_i2c_data *data)
{
unsigned int mask = AMG88XX_FPSC_1FPS;
@@ -141,6 +185,27 @@ static int amg88xx_setup(struct video_i2c_data *data)
return regmap_update_bits(data->regmap, AMG88XX_REG_FPSC, mask, val);
}
+static int mlx90640_setup(struct video_i2c_data *data)
+{
+ unsigned int n, idx;
+
+ for (n = 0; n < data->chip->num_frame_intervals - 1; n++) {
+ if (data->frame_interval.numerator
+ != data->chip->frame_intervals[n].numerator)
+ continue;
+
+ if (data->frame_interval.denominator
+ == data->chip->frame_intervals[n].denominator)
+ break;
+ }
+
+ idx = data->chip->num_frame_intervals - n - 1;
+
+ return regmap_update_bits(data->regmap, MLX90640_REG_CTL1,
+ MLX90640_REG_CTL1_MASK,
+ idx << MLX90640_REG_CTL1_MASK_SHIFT);
+}
+
static int amg88xx_set_power_on(struct video_i2c_data *data)
{
int ret;
@@ -274,13 +339,27 @@ static int amg88xx_hwmon_init(struct video_i2c_data *data)
#define amg88xx_hwmon_init NULL
#endif
-#define AMG88XX 0
+enum {
+ AMG88XX,
+ MLX90640,
+};
static const struct v4l2_fract amg88xx_frame_intervals[] = {
{ 1, 10 },
{ 1, 1 },
};
+static const struct v4l2_fract mlx90640_frame_intervals[] = {
+ { 1, 64 },
+ { 1, 32 },
+ { 1, 16 },
+ { 1, 8 },
+ { 1, 4 },
+ { 1, 2 },
+ { 1, 1 },
+ { 2, 1 },
+};
+
static const struct video_i2c_chip video_i2c_chip[] = {
[AMG88XX] = {
.size = &amg88xx_size,
@@ -295,6 +374,18 @@ static const struct video_i2c_chip video_i2c_chip[] = {
.set_power = amg88xx_set_power,
.hwmon_init = amg88xx_hwmon_init,
},
+ [MLX90640] = {
+ .size = &mlx90640_size,
+ .format = &mlx90640_format,
+ .frame_intervals = mlx90640_frame_intervals,
+ .num_frame_intervals = ARRAY_SIZE(mlx90640_frame_intervals),
+ .buffer_size = 1664,
+ .bpp = 16,
+ .regmap_config = &mlx90640_regmap_config,
+ .nvmem_config = &mlx90640_nvram_config,
+ .setup = mlx90640_setup,
+ .xfer = mlx90640_xfer,
+ },
};
static const struct v4l2_file_operations video_i2c_fops = {
@@ -756,6 +847,21 @@ static int video_i2c_probe(struct i2c_client *client,
}
}
+ if (data->chip->nvmem_config) {
+ struct nvmem_config *config = data->chip->nvmem_config;
+ struct nvmem_device *device;
+
+ config->priv = data;
+ config->dev = &client->dev;
+
+ device = devm_nvmem_register(&client->dev, config);
+
+ if (IS_ERR(device)) {
+ dev_warn(&client->dev,
+ "failed to register nvmem device\n");
+ }
+ }
+
ret = video_register_device(&data->vdev, VFL_TYPE_GRABBER, -1);
if (ret < 0)
goto error_pm_disable;
@@ -835,12 +941,14 @@ static const struct dev_pm_ops video_i2c_pm_ops = {
static const struct i2c_device_id video_i2c_id_table[] = {
{ "amg88xx", AMG88XX },
+ { "mlx90640", MLX90640 },
{}
};
MODULE_DEVICE_TABLE(i2c, video_i2c_id_table);
static const struct of_device_id video_i2c_of_match[] = {
{ .compatible = "panasonic,amg88xx", .data = &video_i2c_chip[AMG88XX] },
+ { .compatible = "melexis,mlx90640", .data = &video_i2c_chip[MLX90640] },
{}
};
MODULE_DEVICE_TABLE(of, video_i2c_of_match);
diff --git a/drivers/media/media-request.c b/drivers/media/media-request.c
index c71a34ae6383..eec2e2b2f6ec 100644
--- a/drivers/media/media-request.c
+++ b/drivers/media/media-request.c
@@ -100,6 +100,7 @@ static __poll_t media_request_poll(struct file *filp,
if (!(poll_requested_events(wait) & EPOLLPRI))
return 0;
+ poll_wait(filp, &req->poll_wait, wait);
spin_lock_irqsave(&req->lock, flags);
if (req->state == MEDIA_REQUEST_STATE_COMPLETE) {
ret = EPOLLPRI;
@@ -110,8 +111,6 @@ static __poll_t media_request_poll(struct file *filp,
goto unlock;
}
- poll_wait(filp, &req->poll_wait, wait);
-
unlock:
spin_unlock_irqrestore(&req->lock, flags);
return ret;
diff --git a/drivers/media/pci/bt8xx/bttv-audio-hook.c b/drivers/media/pci/bt8xx/bttv-audio-hook.c
index 346fc7f58839..8febe7358a8f 100644
--- a/drivers/media/pci/bt8xx/bttv-audio-hook.c
+++ b/drivers/media/pci/bt8xx/bttv-audio-hook.c
@@ -1,5 +1,5 @@
/*
- * Handlers for board audio hooks, splitted from bttv-cards
+ * Handlers for board audio hooks, split from bttv-cards
*
* Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@kernel.org>
* This code is placed under the terms of the GNU General Public License
diff --git a/drivers/media/pci/bt8xx/bttv-audio-hook.h b/drivers/media/pci/bt8xx/bttv-audio-hook.h
index be16a537a03a..c61b9ac4f4e3 100644
--- a/drivers/media/pci/bt8xx/bttv-audio-hook.h
+++ b/drivers/media/pci/bt8xx/bttv-audio-hook.h
@@ -1,5 +1,5 @@
/*
- * Handlers for board audio hooks, splitted from bttv-cards
+ * Handlers for board audio hooks, split from bttv-cards
*
* Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@kernel.org>
* This code is placed under the terms of the GNU General Public License
diff --git a/drivers/media/pci/bt8xx/bttv-cards.c b/drivers/media/pci/bt8xx/bttv-cards.c
index 2616243b2c49..b1c6f3e9c3d0 100644
--- a/drivers/media/pci/bt8xx/bttv-cards.c
+++ b/drivers/media/pci/bt8xx/bttv-cards.c
@@ -2,7 +2,7 @@
bttv-cards.c
- this file has configuration informations - card-specific stuff
+ this file has configuration information - card-specific stuff
like the big tvcards array for the most part
Copyright (C) 1996,97,98 Ralph Metzler (rjkm@thp.uni-koeln.de)
@@ -1391,7 +1391,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomux = {0x947fff, 0x987fff,0x947fff,0x947fff },
.gpiomute = 0x947fff,
/* tvtuner, radio, external,internal, mute, stereo
- * tuner, Composit, SVid, Composit-on-Svid-adapter */
+ * tuner, Composite, SVid, Composite-on-Svid-adapter */
.muxsel = MUXSEL(2, 3, 0, 1),
.tuner_type = TUNER_MT2032,
.tuner_addr = ADDR_UNSET,
@@ -1411,7 +1411,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomux = {0x947fff, 0x987fff,0x947fff,0x947fff },
.gpiomute = 0x947fff,
/* tvtuner, radio, external,internal, mute, stereo
- * tuner, Composit, SVid, Composit-on-Svid-adapter */
+ * tuner, Composite, SVid, Composite-on-Svid-adapter */
.muxsel = MUXSEL(2, 3, 0, 1),
.tuner_type = TUNER_MT2032,
.tuner_addr = ADDR_UNSET,
@@ -4180,7 +4180,7 @@ static void init_PXC200(struct bttv *btv)
bttv_I2CWrite(btv,0x5E,0,0x80,1);
/* Initialise 12C508 PIC */
- /* The I2CWrite and I2CRead commmands are actually to the
+ /* The I2CWrite and I2CRead commands are actually to the
* same chips - but the R/W bit is included in the address
* argument so the numbers are different */
@@ -4289,7 +4289,7 @@ init_RTV24 (struct bttv *btv)
/* ----------------------------------------------------------------------- */
/*
* The PCI-8604PW contains a CPLD, probably an ispMACH 4A, that filters
- * the PCI REQ signals comming from the four BT878 chips. After power
+ * the PCI REQ signals coming from the four BT878 chips. After power
* up, the CPLD does not forward requests to the bus, which prevents
* the BT878 from fetching RISC instructions from memory. While the
* CPLD is connected to most of the GPIOs of PCI device 0xD, only
@@ -4405,7 +4405,7 @@ static void rv605_muxsel(struct bttv *btv, unsigned int input)
gpio_bits(0x07f, muxgpio[input]);
- /* reset all conections */
+ /* reset all connections */
gpio_bits(0x200,0x200);
mdelay(1);
gpio_bits(0x200,0x000);
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index d09785fd37a8..b7150648081d 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -2435,7 +2435,7 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.field = field;
- /* update our state informations */
+ /* update our state information */
fh->fmt = fmt;
fh->cap.field = f->fmt.pix.field;
fh->cap.last = V4L2_FIELD_NONE;
@@ -3064,7 +3064,7 @@ static int bttv_open(struct file *file)
V4L2 apps we now reset the cropping parameters as seen through
this fh, which is to say VIDIOC_G_SELECTION and scaling limit checks
will use btv->crop[0], the default cropping parameters for the
- current video standard, and VIDIOC_S_FMT will not implicitely
+ current video standard, and VIDIOC_S_FMT will not implicitly
change the cropping parameters until VIDIOC_S_SELECTION has been
called. */
fh->do_crop = !reset_crop; /* module parameter */
@@ -3600,9 +3600,7 @@ static void
bttv_irq_wakeup_video(struct bttv *btv, struct bttv_buffer_set *wakeup,
struct bttv_buffer_set *curr, unsigned int state)
{
- struct timeval ts;
-
- v4l2_get_timestamp(&ts);
+ u64 ts = ktime_get_ns();
if (wakeup->top == wakeup->bottom) {
if (NULL != wakeup->top && curr->top != wakeup->top) {
@@ -3643,7 +3641,7 @@ bttv_irq_wakeup_vbi(struct bttv *btv, struct bttv_buffer *wakeup,
if (NULL == wakeup)
return;
- v4l2_get_timestamp(&wakeup->vb.ts);
+ wakeup->vb.ts = ktime_get_ns();
wakeup->vb.field_count = btv->field_count;
wakeup->vb.state = state;
wake_up(&wakeup->vb.done);
@@ -3713,7 +3711,7 @@ bttv_irq_wakeup_top(struct bttv *btv)
btv->curr.top = NULL;
bttv_risc_hook(btv, RISC_SLOT_O_FIELD, NULL, 0);
- v4l2_get_timestamp(&wakeup->vb.ts);
+ wakeup->vb.ts = ktime_get_ns();
wakeup->vb.field_count = btv->field_count;
wakeup->vb.state = VIDEOBUF_DONE;
wake_up(&wakeup->vb.done);
diff --git a/drivers/media/pci/bt8xx/bttv-risc.c b/drivers/media/pci/bt8xx/bttv-risc.c
index 74aff6877d9c..6b3c73674a3c 100644
--- a/drivers/media/pci/bt8xx/bttv-risc.c
+++ b/drivers/media/pci/bt8xx/bttv-risc.c
@@ -93,7 +93,7 @@ bttv_risc_packed(struct bttv *btv, struct btcx_riscmem *risc,
*(rp++)=cpu_to_le32(sg_dma_address(sg)+offset);
offset+=bpl;
} else {
- /* scanline needs to be splitted */
+ /* scanline needs to be split */
todo = bpl;
*(rp++)=cpu_to_le32(BT848_RISC_WRITE|BT848_RISC_SOL|
(sg_dma_len(sg)-offset));
diff --git a/drivers/media/pci/bt8xx/bttv.h b/drivers/media/pci/bt8xx/bttv.h
index a27384adadd2..d24b9ef9f59f 100644
--- a/drivers/media/pci/bt8xx/bttv.h
+++ b/drivers/media/pci/bt8xx/bttv.h
@@ -288,7 +288,7 @@ extern void bttv_init_card1(struct bttv *btv);
extern void bttv_init_card2(struct bttv *btv);
extern void bttv_init_tuner(struct bttv *btv);
-/* card-specific funtions */
+/* card-specific functions */
extern void tea5757_set_freq(struct bttv *btv, unsigned short freq);
extern u32 bttv_tda9880_setnorm(struct bttv *btv, u32 gpiobits);
diff --git a/drivers/media/pci/bt8xx/dst.c b/drivers/media/pci/bt8xx/dst.c
index b98de2a22f78..c94318c71a0c 100644
--- a/drivers/media/pci/bt8xx/dst.c
+++ b/drivers/media/pci/bt8xx/dst.c
@@ -1295,15 +1295,15 @@ static int dst_get_signal(struct dst_state *state)
static int dst_tone_power_cmd(struct dst_state *state)
{
- u8 paket[8] = { 0x00, 0x09, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00 };
+ u8 packet[8] = { 0x00, 0x09, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00 };
if (state->dst_type != DST_TYPE_IS_SAT)
return -EOPNOTSUPP;
- paket[4] = state->tx_tuna[4];
- paket[2] = state->tx_tuna[2];
- paket[3] = state->tx_tuna[3];
- paket[7] = dst_check_sum (paket, 7);
- return dst_command(state, paket, 8);
+ packet[4] = state->tx_tuna[4];
+ packet[2] = state->tx_tuna[2];
+ packet[3] = state->tx_tuna[3];
+ packet[7] = dst_check_sum (packet, 7);
+ return dst_command(state, packet, 8);
}
static int dst_get_tuna(struct dst_state *state)
@@ -1429,18 +1429,18 @@ error:
static int dst_set_diseqc(struct dvb_frontend *fe, struct dvb_diseqc_master_cmd *cmd)
{
struct dst_state *state = fe->demodulator_priv;
- u8 paket[8] = { 0x00, 0x08, 0x04, 0xe0, 0x10, 0x38, 0xf0, 0xec };
+ u8 packet[8] = { 0x00, 0x08, 0x04, 0xe0, 0x10, 0x38, 0xf0, 0xec };
if (state->dst_type != DST_TYPE_IS_SAT)
return -EOPNOTSUPP;
if (cmd->msg_len > 0 && cmd->msg_len < 5)
- memcpy(&paket[3], cmd->msg, cmd->msg_len);
+ memcpy(&packet[3], cmd->msg, cmd->msg_len);
else if (cmd->msg_len == 5 && state->dst_hw_cap & DST_TYPE_HAS_DISEQC5)
- memcpy(&paket[2], cmd->msg, cmd->msg_len);
+ memcpy(&packet[2], cmd->msg, cmd->msg_len);
else
return -EINVAL;
- paket[7] = dst_check_sum(&paket[0], 7);
- return dst_command(state, paket, 8);
+ packet[7] = dst_check_sum(&packet[0], 7);
+ return dst_command(state, packet, 8);
}
static int dst_set_voltage(struct dvb_frontend *fe, enum fe_sec_voltage voltage)
diff --git a/drivers/media/pci/cobalt/cobalt-v4l2.c b/drivers/media/pci/cobalt/cobalt-v4l2.c
index c088de551081..f9fa3a7c3b8f 100644
--- a/drivers/media/pci/cobalt/cobalt-v4l2.c
+++ b/drivers/media/pci/cobalt/cobalt-v4l2.c
@@ -375,7 +375,7 @@ static void cobalt_dma_stop_streaming(struct cobalt_stream *s)
}
spin_unlock_irqrestore(&s->irqlock, flags);
- /* Wait 100 milisecond for DMA to finish, abort on timeout. */
+ /* Wait 100 millisecond for DMA to finish, abort on timeout. */
if (!wait_event_timeout(s->q.done_wq, is_dma_done(s),
msecs_to_jiffies(timeout_msec))) {
omni_sg_dma_abort_channel(s);
diff --git a/drivers/media/pci/cx18/cx18-cards.h b/drivers/media/pci/cx18/cx18-cards.h
index 02d0fb703a41..c563cc2358ba 100644
--- a/drivers/media/pci/cx18/cx18-cards.h
+++ b/drivers/media/pci/cx18/cx18-cards.h
@@ -83,7 +83,7 @@ struct cx18_gpio_i2c_slave_reset {
u32 active_hi_mask; /* GPIO outputs that reset i2c chips when high */
int msecs_asserted; /* time period reset must remain asserted */
int msecs_recovery; /* time after deassert for chips to be ready */
- u32 ir_reset_mask; /* GPIO to reset the Zilog Z8F0811 IR contoller */
+ u32 ir_reset_mask; /* GPIO to reset the Zilog Z8F0811 IR controller */
};
struct cx18_gpio_audio_input { /* select tuner/line in input */
diff --git a/drivers/media/pci/cx18/cx18-dvb.c b/drivers/media/pci/cx18/cx18-dvb.c
index a3a7f7065349..61452c50a9c3 100644
--- a/drivers/media/pci/cx18/cx18-dvb.c
+++ b/drivers/media/pci/cx18/cx18-dvb.c
@@ -120,8 +120,8 @@ static struct zl10353_config leadtek_dvr3100h_demod = {
/*
* Due to
*
- * 1. an absence of information on how to prgram the MT352
- * 2. the Linux mt352 module pushing MT352 initialzation off onto us here
+ * 1. an absence of information on how to program the MT352
+ * 2. the Linux mt352 module pushing MT352 initialization off onto us here
*
* We have to use an init sequence that *you* must extract from the Windows
* driver (yuanrap.sys) and which we load as a firmware.
@@ -458,7 +458,7 @@ void cx18_dvb_unregister(struct cx18_stream *stream)
dvb_unregister_adapter(dvb_adapter);
}
-/* All the DVB attach calls go here, this function get's modified
+/* All the DVB attach calls go here, this function gets modified
* for each new card. cx18_dvb_start_feed() will also need changes.
*/
static int dvb_register(struct cx18_stream *stream)
diff --git a/drivers/media/pci/cx18/cx18-fileops.c b/drivers/media/pci/cx18/cx18-fileops.c
index a3f44e30f821..f778965a2eb8 100644
--- a/drivers/media/pci/cx18/cx18-fileops.c
+++ b/drivers/media/pci/cx18/cx18-fileops.c
@@ -403,7 +403,7 @@ static size_t cx18_copy_mdl_to_user(struct cx18_stream *s,
tot_written += rc;
if (stop || /* Forced stopping point for VBI insertion */
- tot_written >= ucount || /* Reader request statisfied */
+ tot_written >= ucount || /* Reader request satisfied */
mdl->curr_buf->readpos < mdl->curr_buf->bytesused ||
mdl->readpos >= mdl->bytesused) /* MDL buffers drained */
break;
diff --git a/drivers/media/pci/cx18/cx18-io.h b/drivers/media/pci/cx18/cx18-io.h
index a3c96fb5d28d..da7871fdb56d 100644
--- a/drivers/media/pci/cx18/cx18-io.h
+++ b/drivers/media/pci/cx18/cx18-io.h
@@ -23,7 +23,7 @@
/*
* Readback and retry of MMIO access for reliability:
* The concept was suggested by Steve Toth <stoth@linuxtv.org>.
- * The implmentation is the fault of Andy Walls <awalls@md.metrocast.net>.
+ * The implementation is the fault of Andy Walls <awalls@md.metrocast.net>.
*
* *write* functions are implied to retry the mmio unless suffixed with _noretry
* *read* functions never retry the mmio (it never helps to do so)
diff --git a/drivers/media/pci/cx18/cx18-mailbox.c b/drivers/media/pci/cx18/cx18-mailbox.c
index f66dd63e1994..0ffd2196a980 100644
--- a/drivers/media/pci/cx18/cx18-mailbox.c
+++ b/drivers/media/pci/cx18/cx18-mailbox.c
@@ -197,7 +197,7 @@ static void cx18_mdl_send_to_videobuf(struct cx18_stream *s,
}
if (dispatch) {
- v4l2_get_timestamp(&vb_buf->vb.ts);
+ vb_buf->vb.ts = ktime_get_ns();
list_del(&vb_buf->vb.queue);
vb_buf->vb.state = VIDEOBUF_DONE;
wake_up(&vb_buf->vb.done);
diff --git a/drivers/media/pci/cx18/cx18-vbi.c b/drivers/media/pci/cx18/cx18-vbi.c
index 81f1e27436fd..48cb96f953a0 100644
--- a/drivers/media/pci/cx18/cx18-vbi.c
+++ b/drivers/media/pci/cx18/cx18-vbi.c
@@ -24,7 +24,7 @@
/*
* Raster Reference/Protection (RP) bytes, used in Start/End Active
* Video codes emitted from the digitzer in VIP 1.x mode, that flag the start
- * of VBI sample or VBI ancillary data regions in the digitial ratser line.
+ * of VBI sample or VBI ancillary data regions in the digital ratser line.
*
* Task FieldEven VerticalBlank HorizontalBlank 0 0 0 0
*/
diff --git a/drivers/media/pci/cx18/cx23418.h b/drivers/media/pci/cx18/cx23418.h
index 15205b662952..e9f6e50ca5b4 100644
--- a/drivers/media/pci/cx18/cx23418.h
+++ b/drivers/media/pci/cx18/cx23418.h
@@ -439,7 +439,7 @@
/* Error in I2C data xfer (but I2C device is present) */
#define CXERR_I2CDEV_XFERERR 0x000011
-/* Chanel changing component not ready */
+/* Channel changing component not ready */
#define CXERR_CHANNELNOTREADY 0x000012
/* PPU (Presensation/Decoder) mail box is corrupted */
diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c
index a00b77d80ed9..4863bd4ea5d0 100644
--- a/drivers/media/pci/cx23885/cx23885-417.c
+++ b/drivers/media/pci/cx23885/cx23885-417.c
@@ -1561,7 +1561,7 @@ int cx23885_417_register(struct cx23885_dev *dev)
pr_info("%s: registered device %s [mpeg]\n",
dev->name, video_device_node_name(dev->v4l_device));
- /* ST: Configure the encoder paramaters, but don't begin
+ /* ST: Configure the encoder parameters, but don't begin
* encoding, this resolves an issue where the first time the
* encoder is started video can be choppy.
*/
diff --git a/drivers/media/pci/cx23885/cx23885-alsa.c b/drivers/media/pci/cx23885/cx23885-alsa.c
index ee9d329c4038..c9c1385208f9 100644
--- a/drivers/media/pci/cx23885/cx23885-alsa.c
+++ b/drivers/media/pci/cx23885/cx23885-alsa.c
@@ -59,7 +59,7 @@ module_param(audio_debug, int, 0644);
MODULE_PARM_DESC(audio_debug, "enable debug messages [analog audio]");
/****************************************************************************
- Board specific funtions
+ Board specific functions
****************************************************************************/
/* Constants taken from cx88-reg.h */
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index fd5c52b21436..cec3cbca8d32 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -1996,9 +1996,9 @@ static inline int encoder_on_portc(struct cx23885_dev *dev)
* and report errors if we think we're tampering with a GPIo that might
* be assigned to the encoder (and used for the host bus).
*
- * GPIO 2 thru 0 - On the cx23885 bridge
- * GPIO 18 thru 3 - On the cx23417 host bus interface
- * GPIO 23 thru 19 - On the cx25840 a/v core
+ * GPIO 2 through 0 - On the cx23885 bridge
+ * GPIO 18 through 3 - On the cx23417 host bus interface
+ * GPIO 23 through 19 - On the cx25840 a/v core
*/
void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
{
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index cf965efabe66..4d34799431dc 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -246,7 +246,7 @@ struct cx23885_i2c {
struct i2c_client i2c_client;
u32 i2c_rc;
- /* 885 registers used for raw addess */
+ /* 885 registers used for raw address */
u32 i2c_period;
u32 reg_ctrl;
u32 reg_stat;
diff --git a/drivers/media/pci/cx23885/cx23888-ir.c b/drivers/media/pci/cx23885/cx23888-ir.c
index 1d775c90df51..a4d66141c4bb 100644
--- a/drivers/media/pci/cx23885/cx23888-ir.c
+++ b/drivers/media/pci/cx23885/cx23888-ir.c
@@ -548,7 +548,7 @@ static int cx23888_ir_irq_handler(struct v4l2_subdev *sd, u32 status,
ror = stats & STATS_ROR; /* Rx FIFO Over Run */
tse = irqen & IRQEN_TSE; /* Tx FIFO Service Request IRQ Enable */
- rse = irqen & IRQEN_RSE; /* Rx FIFO Service Reuqest IRQ Enable */
+ rse = irqen & IRQEN_RSE; /* Rx FIFO Service Request IRQ Enable */
rte = irqen & IRQEN_RTE; /* Rx Pulse Width Timer Time Out IRQ Enable */
roe = irqen & IRQEN_ROE; /* Rx FIFO Over Run IRQ Enable */
@@ -638,7 +638,7 @@ static int cx23888_ir_irq_handler(struct v4l2_subdev *sd, u32 status,
events |= V4L2_SUBDEV_IR_RX_END_OF_RX_DETECTED;
}
if (v) {
- /* Clear STATS_ROR & STATS_RTO as needed by reseting hardware */
+ /* Clear STATS_ROR & STATS_RTO as needed by resetting hardware */
cx23888_ir_write4(dev, CX23888_IR_CNTRL_REG, cntrl & ~v);
cx23888_ir_write4(dev, CX23888_IR_CNTRL_REG, cntrl);
*handled = true;
diff --git a/drivers/media/pci/cx25821/cx25821-alsa.c b/drivers/media/pci/cx25821/cx25821-alsa.c
index 0a6c90e92557..926e12a1554a 100644
--- a/drivers/media/pci/cx25821/cx25821-alsa.c
+++ b/drivers/media/pci/cx25821/cx25821-alsa.c
@@ -120,7 +120,7 @@ module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "enable debug messages");
/****************************************************************************
- Module specific funtions
+ Module specific functions
****************************************************************************/
/* Constants taken from cx88-reg.h */
#define AUD_INT_DN_RISCI1 (1 << 0)
diff --git a/drivers/media/pci/cx25821/cx25821-sram.h b/drivers/media/pci/cx25821/cx25821-sram.h
index b94e0d4df664..a907662dbb1c 100644
--- a/drivers/media/pci/cx25821/cx25821-sram.h
+++ b/drivers/media/pci/cx25821/cx25821-sram.h
@@ -24,7 +24,7 @@
#define AUDIO_CMDS_SIZE 80 /* AUDIO CMDS size in bytes */
#define MBIF_CMDS_SIZE 80 /* MBIF CMDS size in bytes */
-/* #define RX_SRAM_POOL_START_SIZE = 0; // Start of useable RX SRAM for buffers */
+/* #define RX_SRAM_POOL_START_SIZE = 0; // Start of usable RX SRAM for buffers */
#define VID_IQ_SIZE 64 /* VID instruction queue size in bytes */
#define MBIF_IQ_SIZE 64
#define AUDIO_IQ_SIZE 64 /* AUD instruction queue size in bytes */
diff --git a/drivers/media/pci/cx25821/cx25821.h b/drivers/media/pci/cx25821/cx25821.h
index 25eba4ac4499..b422275ff4f1 100644
--- a/drivers/media/pci/cx25821/cx25821.h
+++ b/drivers/media/pci/cx25821/cx25821.h
@@ -156,7 +156,7 @@ struct cx25821_i2c {
struct i2c_client i2c_client;
u32 i2c_rc;
- /* cx25821 registers used for raw addess */
+ /* cx25821 registers used for raw address */
u32 i2c_period;
u32 reg_ctrl;
u32 reg_stat;
diff --git a/drivers/media/pci/dm1105/dm1105.c b/drivers/media/pci/dm1105/dm1105.c
index a84c8270ea13..60138ca3372b 100644
--- a/drivers/media/pci/dm1105/dm1105.c
+++ b/drivers/media/pci/dm1105/dm1105.c
@@ -517,7 +517,7 @@ static int dm1105_i2c_xfer(struct i2c_adapter *i2c_adap,
msgs[i].buf[byte] = rc;
}
} else if ((msgs[i].buf[0] == 0xf7) && (msgs[i].addr == 0x55)) {
- /* prepaired for cx24116 firmware */
+ /* prepared for cx24116 firmware */
/* Write in small blocks */
len = msgs[i].len - 1;
k = 1;
diff --git a/drivers/media/pci/dt3155/dt3155.c b/drivers/media/pci/dt3155/dt3155.c
index 17d69bd5d7f1..49677ee889e3 100644
--- a/drivers/media/pci/dt3155/dt3155.c
+++ b/drivers/media/pci/dt3155/dt3155.c
@@ -46,7 +46,6 @@ static int read_i2c_reg(void __iomem *addr, u8 index, u8 *data)
u32 tmp = index;
iowrite32((tmp << 17) | IIC_READ, addr + IIC_CSR2);
- mmiowb();
udelay(45); /* wait at least 43 usec for NEW_CYCLE to clear */
if (ioread32(addr + IIC_CSR2) & NEW_CYCLE)
return -EIO; /* error: NEW_CYCLE not cleared */
@@ -77,7 +76,6 @@ static int write_i2c_reg(void __iomem *addr, u8 index, u8 data)
u32 tmp = index;
iowrite32((tmp << 17) | IIC_WRITE | data, addr + IIC_CSR2);
- mmiowb();
udelay(65); /* wait at least 63 usec for NEW_CYCLE to clear */
if (ioread32(addr + IIC_CSR2) & NEW_CYCLE)
return -EIO; /* error: NEW_CYCLE not cleared */
@@ -104,7 +102,6 @@ static void write_i2c_reg_nowait(void __iomem *addr, u8 index, u8 data)
u32 tmp = index;
iowrite32((tmp << 17) | IIC_WRITE | data, addr + IIC_CSR2);
- mmiowb();
}
/**
@@ -264,7 +261,6 @@ static irqreturn_t dt3155_irq_handler_even(int irq, void *dev_id)
FLD_DN_ODD | FLD_DN_EVEN |
CAP_CONT_EVEN | CAP_CONT_ODD,
ipd->regs + CSR1);
- mmiowb();
}
spin_lock(&ipd->lock);
@@ -282,7 +278,6 @@ static irqreturn_t dt3155_irq_handler_even(int irq, void *dev_id)
iowrite32(dma_addr + ipd->width, ipd->regs + ODD_DMA_START);
iowrite32(ipd->width, ipd->regs + EVEN_DMA_STRIDE);
iowrite32(ipd->width, ipd->regs + ODD_DMA_STRIDE);
- mmiowb();
}
/* enable interrupts, clear all irq flags */
@@ -437,12 +432,10 @@ static int dt3155_init_board(struct dt3155_priv *pd)
/* resetting the adapter */
iowrite32(ADDR_ERR_ODD | ADDR_ERR_EVEN | FLD_CRPT_ODD | FLD_CRPT_EVEN |
FLD_DN_ODD | FLD_DN_EVEN, pd->regs + CSR1);
- mmiowb();
msleep(20);
/* initializing adapter registers */
iowrite32(FIFO_EN | SRST, pd->regs + CSR1);
- mmiowb();
iowrite32(0xEEEEEE01, pd->regs + EVEN_PIXEL_FMT);
iowrite32(0xEEEEEE01, pd->regs + ODD_PIXEL_FMT);
iowrite32(0x00000020, pd->regs + FIFO_TRIGER);
@@ -454,7 +447,6 @@ static int dt3155_init_board(struct dt3155_priv *pd)
iowrite32(0, pd->regs + MASK_LENGTH);
iowrite32(0x0005007C, pd->regs + FIFO_FLAG_CNT);
iowrite32(0x01010101, pd->regs + IIC_CLK_DUR);
- mmiowb();
/* verifying that we have a DT3155 board (not just a SAA7116 chip) */
read_i2c_reg(pd->regs, DT_ID, &tmp);
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
index cdb79ae2d8dc..f8020ebe9f05 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -264,7 +264,7 @@ static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev)
*/
/*
- * shift for keeping value range suitable for 32-bit integer arithmetics
+ * shift for keeping value range suitable for 32-bit integer arithmetic
*/
#define LIMIT_SHIFT 8
@@ -846,7 +846,7 @@ static int cio2_vb2_buf_init(struct vb2_buffer *vb)
unsigned int pages = DIV_ROUND_UP(vb->planes[0].length, CIO2_PAGE_SIZE);
unsigned int lops = DIV_ROUND_UP(pages + 1, entries_per_page);
struct sg_table *sg;
- struct sg_page_iter sg_iter;
+ struct sg_dma_page_iter sg_iter;
int i, j;
if (lops <= 0 || lops > CIO2_MAX_LOPS) {
@@ -873,7 +873,7 @@ static int cio2_vb2_buf_init(struct vb2_buffer *vb)
b->offset = sg->sgl->offset;
i = j = 0;
- for_each_sg_page(sg->sgl, &sg_iter, sg->nents, 0) {
+ for_each_sg_dma_page (sg->sgl, &sg_iter, sg->nents, 0) {
if (!pages--)
break;
b->lop[i][j] = sg_page_iter_dma_address(&sg_iter) >> PAGE_SHIFT;
@@ -1810,7 +1810,8 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
/* Register notifier for subdevices we care */
r = cio2_notifier_init(cio2);
- if (r)
+ /* Proceed without sensors connected to allow the device to suspend. */
+ if (r && r != -ENODEV)
goto fail_cio2_queue_exit;
r = devm_request_irq(&pci_dev->dev, pci_dev->irq, cio2_irq,
@@ -2047,7 +2048,7 @@ module_pci_driver(cio2_pci_driver);
MODULE_AUTHOR("Tuukka Toivonen <tuukka.toivonen@intel.com>");
MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
-MODULE_AUTHOR("Jian Xu Zheng <jian.xu.zheng@intel.com>");
+MODULE_AUTHOR("Jian Xu Zheng");
MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>");
MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/pci/ivtv/Kconfig b/drivers/media/pci/ivtv/Kconfig
index c72cbbd2d40c..06ca4e23f9fb 100644
--- a/drivers/media/pci/ivtv/Kconfig
+++ b/drivers/media/pci/ivtv/Kconfig
@@ -70,8 +70,25 @@ config VIDEO_FB_IVTV
This is used in the Hauppauge PVR-350 card. There is a driver
homepage at <http://www.ivtvdriver.org>.
- In order to use this module, you will need to boot with PAT disabled
- on x86 systems, using the nopat kernel parameter.
-
To compile this driver as a module, choose M here: the
module will be called ivtvfb.
+
+config VIDEO_FB_IVTV_FORCE_PAT
+ bool "force cx23415 framebuffer init with x86 PAT enabled"
+ depends on VIDEO_FB_IVTV && X86_PAT
+ default n
+ ---help---
+ With PAT enabled, the cx23415 framebuffer driver does not
+ utilize write-combined caching on the framebuffer memory.
+ For this reason, the driver will by default disable itself
+ when initializied on a kernel with PAT enabled (i.e. not
+ using the nopat kernel parameter).
+
+ The driver is not easily upgradable to the PAT-aware
+ ioremap_wc() API since the firmware hides the address
+ ranges that should be marked write-combined from the driver.
+
+ With this setting enabled, the framebuffer will initialize on
+ PAT-enabled systems but the framebuffer memory will be uncached.
+
+ If unsure, say N.
diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c
index 1380474519f2..c3c2c79585f5 100644
--- a/drivers/media/pci/ivtv/ivtv-yuv.c
+++ b/drivers/media/pci/ivtv/ivtv-yuv.c
@@ -110,7 +110,7 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
/*
* Inherit the -EFAULT from rc's
* initialization, but allow it to be
- * overriden by uv_pages above if it was an
+ * overridden by uv_pages above if it was an
* actual errno.
*/
} else {
diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c
index 8ec2525d8ef5..cfd21040d0e3 100644
--- a/drivers/media/pci/ivtv/ivtvfb.c
+++ b/drivers/media/pci/ivtv/ivtvfb.c
@@ -55,6 +55,7 @@
/* card parameters */
static int ivtvfb_card_id = -1;
static int ivtvfb_debug = 0;
+static bool ivtvfb_force_pat = IS_ENABLED(CONFIG_VIDEO_FB_IVTV_FORCE_PAT);
static bool osd_laced;
static int osd_depth;
static int osd_upper;
@@ -64,6 +65,7 @@ static int osd_xres;
module_param(ivtvfb_card_id, int, 0444);
module_param_named(debug,ivtvfb_debug, int, 0644);
+module_param_named(force_pat, ivtvfb_force_pat, bool, 0644);
module_param(osd_laced, bool, 0444);
module_param(osd_depth, int, 0444);
module_param(osd_upper, int, 0444);
@@ -79,6 +81,9 @@ MODULE_PARM_DESC(debug,
"Debug level (bitmask). Default: errors only\n"
"\t\t\t(debug = 3 gives full debugging)");
+MODULE_PARM_DESC(force_pat,
+ "Force initialization on x86 PAT-enabled systems (bool).\n");
+
/* Why upper, left, xres, yres, depth, laced ? To match terminology used
by fbset.
Why start at 1 for left & upper coordinate ? Because X doesn't allow 0 */
@@ -1167,8 +1172,15 @@ static int ivtvfb_init_card(struct ivtv *itv)
#ifdef CONFIG_X86_64
if (pat_enabled()) {
- pr_warn("ivtvfb needs PAT disabled, boot with nopat kernel parameter\n");
- return -ENODEV;
+ if (ivtvfb_force_pat) {
+ pr_info("PAT is enabled. Write-combined framebuffer caching will be disabled.\n");
+ pr_info("To enable caching, boot with nopat kernel parameter\n");
+ } else {
+ pr_warn("ivtvfb needs PAT disabled for write-combined framebuffer caching.\n");
+ pr_warn("Boot with nopat kernel parameter to use caching, or use the\n");
+ pr_warn("force_pat module parameter to run with caching disabled\n");
+ return -ENODEV;
+ }
}
#endif
diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c
index bd870e60c32b..896d2d856795 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/media/pci/meye/meye.c
@@ -805,7 +805,7 @@ again:
mchip_hsize() * mchip_vsize() * 2);
meye.grab_buffer[reqnr].size = mchip_hsize() * mchip_vsize() * 2;
meye.grab_buffer[reqnr].state = MEYE_BUF_DONE;
- v4l2_get_timestamp(&meye.grab_buffer[reqnr].timestamp);
+ meye.grab_buffer[reqnr].ts = ktime_get_ns();
meye.grab_buffer[reqnr].sequence = sequence++;
kfifo_in_locked(&meye.doneq, (unsigned char *)&reqnr,
sizeof(int), &meye.doneq_lock);
@@ -826,7 +826,7 @@ again:
size);
meye.grab_buffer[reqnr].size = size;
meye.grab_buffer[reqnr].state = MEYE_BUF_DONE;
- v4l2_get_timestamp(&meye.grab_buffer[reqnr].timestamp);
+ meye.grab_buffer[reqnr].ts = ktime_get_ns();
meye.grab_buffer[reqnr].sequence = sequence++;
kfifo_in_locked(&meye.doneq, (unsigned char *)&reqnr,
sizeof(int), &meye.doneq_lock);
@@ -1283,7 +1283,7 @@ static int vidioc_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf)
buf->flags |= V4L2_BUF_FLAG_DONE;
buf->field = V4L2_FIELD_NONE;
- buf->timestamp = meye.grab_buffer[index].timestamp;
+ buf->timestamp = ns_to_timeval(meye.grab_buffer[index].ts);
buf->sequence = meye.grab_buffer[index].sequence;
buf->memory = V4L2_MEMORY_MMAP;
buf->m.offset = index * gbufsize;
@@ -1349,7 +1349,7 @@ static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
buf->bytesused = meye.grab_buffer[reqnr].size;
buf->flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
buf->field = V4L2_FIELD_NONE;
- buf->timestamp = meye.grab_buffer[reqnr].timestamp;
+ buf->timestamp = ns_to_timeval(meye.grab_buffer[reqnr].ts);
buf->sequence = meye.grab_buffer[reqnr].sequence;
buf->memory = V4L2_MEMORY_MMAP;
buf->m.offset = reqnr * gbufsize;
diff --git a/drivers/media/pci/meye/meye.h b/drivers/media/pci/meye/meye.h
index c4a8a5fe040c..aff6631535be 100644
--- a/drivers/media/pci/meye/meye.h
+++ b/drivers/media/pci/meye/meye.h
@@ -277,11 +277,11 @@
struct meye_grab_buffer {
int state; /* state of buffer */
unsigned long size; /* size of jpg frame */
- struct timeval timestamp; /* timestamp */
+ u64 ts; /* timestamp */
unsigned long sequence; /* sequence number */
};
-/* size of kfifos containings buffer indices */
+/* size of kfifos containing buffer indices */
#define MEYE_QUEUE_SIZE MEYE_MAX_BUFNBRS
/* Motion Eye device structure */
diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c
index 25f16833a475..27953b3610a3 100644
--- a/drivers/media/pci/ngene/ngene-core.c
+++ b/drivers/media/pci/ngene/ngene-core.c
@@ -1014,7 +1014,7 @@ static int FillTSIdleBuffer(struct SRingBufferDescriptor *pIdleBuffer,
/* Point to first buffer entry */
struct SBufferHeader *Cur = pRingBuffer->Head;
int i;
- /* Loop thru all buffer and set Buffer 2 pointers to TSIdlebuffer */
+ /* Loop through all buffer and set Buffer 2 pointers to TSIdlebuffer */
for (i = 0; i < n; i++) {
Cur->Buffer2 = pIdleBuffer->Head->Buffer1;
Cur->scList2 = pIdleBuffer->Head->scList1;
diff --git a/drivers/media/pci/pt1/pt1.c b/drivers/media/pci/pt1/pt1.c
index f4b8030e2369..393f4c596819 100644
--- a/drivers/media/pci/pt1/pt1.c
+++ b/drivers/media/pci/pt1/pt1.c
@@ -200,16 +200,10 @@ static const u8 va1j5jf8007t_25mhz_configs[][2] = {
static int config_demod(struct i2c_client *cl, enum pt1_fe_clk clk)
{
int ret;
- u8 buf[2] = {0x01, 0x80};
bool is_sat;
const u8 (*cfg_data)[2];
int i, len;
- ret = i2c_master_send(cl, buf, 2);
- if (ret < 0)
- return ret;
- usleep_range(30000, 50000);
-
is_sat = !strncmp(cl->name, TC90522_I2C_DEV_SAT,
strlen(TC90522_I2C_DEV_SAT));
if (is_sat) {
@@ -260,6 +254,46 @@ static int config_demod(struct i2c_client *cl, enum pt1_fe_clk clk)
return 0;
}
+/*
+ * Init registers for (each pair of) terrestrial/satellite block in demod.
+ * Note that resetting terr. block also resets its peer sat. block as well.
+ * This function must be called before configuring any demod block
+ * (before pt1_wakeup(), fe->ops.init()).
+ */
+static int pt1_demod_block_init(struct pt1 *pt1)
+{
+ struct i2c_client *cl;
+ u8 buf[2] = {0x01, 0x80};
+ int ret;
+ int i;
+
+ /* reset all terr. & sat. pairs first */
+ for (i = 0; i < PT1_NR_ADAPS; i++) {
+ cl = pt1->adaps[i]->demod_i2c_client;
+ if (strncmp(cl->name, TC90522_I2C_DEV_TER,
+ strlen(TC90522_I2C_DEV_TER)))
+ continue;
+
+ ret = i2c_master_send(cl, buf, 2);
+ if (ret < 0)
+ return ret;
+ usleep_range(30000, 50000);
+ }
+
+ for (i = 0; i < PT1_NR_ADAPS; i++) {
+ cl = pt1->adaps[i]->demod_i2c_client;
+ if (strncmp(cl->name, TC90522_I2C_DEV_SAT,
+ strlen(TC90522_I2C_DEV_SAT)))
+ continue;
+
+ ret = i2c_master_send(cl, buf, 2);
+ if (ret < 0)
+ return ret;
+ usleep_range(30000, 50000);
+ }
+ return 0;
+}
+
static void pt1_write_reg(struct pt1 *pt1, int reg, u32 data)
{
writel(data, pt1->regs + reg * 4);
@@ -987,6 +1021,10 @@ static int pt1_init_frontends(struct pt1 *pt1)
goto tuner_release;
}
+ ret = pt1_demod_block_init(pt1);
+ if (ret < 0)
+ goto fe_unregister;
+
return 0;
tuner_release:
@@ -1245,6 +1283,10 @@ static int pt1_resume(struct device *dev)
pt1_update_power(pt1);
usleep_range(1000, 2000);
+ ret = pt1_demod_block_init(pt1);
+ if (ret < 0)
+ goto resume_err;
+
for (i = 0; i < PT1_NR_ADAPS; i++)
dvb_frontend_reinitialise(pt1->adaps[i]->fe);
diff --git a/drivers/media/pci/pt3/pt3.h b/drivers/media/pci/pt3/pt3.h
index 495891a4ee17..a53124438f51 100644
--- a/drivers/media/pci/pt3/pt3.h
+++ b/drivers/media/pci/pt3/pt3.h
@@ -76,7 +76,7 @@ struct xfer_desc {
u32 addr_l; /* bus address of target data buffer */
u32 addr_h;
u32 size;
- u32 next_l; /* bus adddress of the next xfer_desc */
+ u32 next_l; /* bus address of the next xfer_desc */
u32 next_h;
};
diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c
index 40ce033cb884..94d6484a3afe 100644
--- a/drivers/media/pci/saa7134/saa7134-cards.c
+++ b/drivers/media/pci/saa7134/saa7134-cards.c
@@ -6423,7 +6423,7 @@ struct pci_device_id saa7134_pci_tbl[] = {
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7133,
.subvendor = 0x5168,
- .subdevice = 0x3502, /* whats the difference to 0x3306 ?*/
+ .subdevice = 0x3502, /* what's the difference to 0x3306 ?*/
.driver_data = SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS,
},{
.vendor = PCI_VENDOR_ID_PHILIPS,
diff --git a/drivers/media/pci/saa7146/mxb.c b/drivers/media/pci/saa7146/mxb.c
index 44440c6208df..e94324b1de68 100644
--- a/drivers/media/pci/saa7146/mxb.c
+++ b/drivers/media/pci/saa7146/mxb.c
@@ -399,7 +399,7 @@ static int mxb_init_done(struct saa7146_dev* dev)
/* check if the saa7740 (aka 'sound arena module') is present
on the mxb. if so, we must initialize it. due to lack of
- informations about the saa7740, the values were reverse
+ information about the saa7740, the values were reverse
engineered. */
msg.addr = 0x1b;
msg.flags = 0;
@@ -495,7 +495,7 @@ static int vidioc_s_input(struct file *file, void *fh, unsigned int input)
input_port_selection[input].hps_sync);
/* prepare switching of tea6415c and saa7111a;
- have a look at the 'background'-file for further informations */
+ have a look at the 'background'-file for further information */
switch (input) {
case TUNER:
i = SAA7115_COMPOSITE0;
diff --git a/drivers/media/pci/saa7164/saa7164-api.c b/drivers/media/pci/saa7164/saa7164-api.c
index e318ccf81277..d6c996f39cf2 100644
--- a/drivers/media/pci/saa7164/saa7164-api.c
+++ b/drivers/media/pci/saa7164/saa7164-api.c
@@ -749,7 +749,7 @@ int saa7164_api_initialize_dif(struct saa7164_port *port)
if (port->type == SAA7164_MPEG_ENCODER) {
/* Pick any analog standard to init the diff.
* we'll come back during encoder_init'
- * and set the correct standard if requried.
+ * and set the correct standard if required.
*/
std = V4L2_STD_NTSC;
} else
diff --git a/drivers/media/pci/saa7164/saa7164-cards.c b/drivers/media/pci/saa7164/saa7164-cards.c
index 3af16062e79d..9a6fe7cd4d59 100644
--- a/drivers/media/pci/saa7164/saa7164-cards.c
+++ b/drivers/media/pci/saa7164/saa7164-cards.c
@@ -685,7 +685,7 @@ struct saa7164_subid saa7164_subids[] = {
.subvendor = 0x0070,
.subdevice = 0xf111,
.card = SAA7164_BOARD_HAUPPAUGE_HVR2255,
- /* Prototype card left here for documenation purposes.
+ /* Prototype card left here for documentation purposes.
.card = SAA7164_BOARD_HAUPPAUGE_HVR2255proto,
*/
}, {
@@ -866,7 +866,7 @@ void saa7164_card_setup(struct saa7164_dev *dev)
* access to I2C. Instead we have to communicate through the device f/w for
* register access to 'processing units'. Each unit has a unique
* id, regardless of how the physical implementation occurs across
- * the three physical i2c busses. The being said if we want leverge of
+ * the three physical i2c buses. The being said if we want leverge of
* the existing kernel drivers for tuners and demods we have to 'speak i2c',
* to this bridge implements 3 virtual i2c buses. This is a helper function
* for those.
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index f33349e16359..05f25c9bb308 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -157,7 +157,7 @@ static void saa7164_ts_verifier(struct saa7164_buffer *buf)
}
- /* Only report errors if we've been through this function atleast
+ /* Only report errors if we've been through this function at least
* once already and the cached cc values are primed. First time through
* always generates errors.
*/
@@ -1020,7 +1020,7 @@ static int saa7164_dev_setup(struct saa7164_dev *dev)
dev->bmmio = (u8 __iomem *)dev->lmmio;
dev->bmmio2 = (u8 __iomem *)dev->lmmio2;
- /* Inerrupt and ack register locations offset of bmmio */
+ /* Interrupt and ack register locations offset of bmmio */
dev->int_status = 0x183000 + 0xf80;
dev->int_ack = 0x183000 + 0xf90;
diff --git a/drivers/media/pci/saa7164/saa7164-dvb.c b/drivers/media/pci/saa7164/saa7164-dvb.c
index dfb118d7d1ec..3e73cb3c7e88 100644
--- a/drivers/media/pci/saa7164/saa7164-dvb.c
+++ b/drivers/media/pci/saa7164/saa7164-dvb.c
@@ -529,7 +529,7 @@ int saa7164_dvb_unregister(struct saa7164_port *port)
return 0;
}
-/* All the DVB attach calls go here, this function get's modified
+/* All the DVB attach calls go here, this function gets modified
* for each new card.
*/
int saa7164_dvb_register(struct saa7164_port *port)
diff --git a/drivers/media/pci/saa7164/saa7164-fw.c b/drivers/media/pci/saa7164/saa7164-fw.c
index a50461861133..ed27b3ce5e8e 100644
--- a/drivers/media/pci/saa7164/saa7164-fw.c
+++ b/drivers/media/pci/saa7164/saa7164-fw.c
@@ -409,7 +409,7 @@ int saa7164_downloadfirmware(struct saa7164_dev *dev)
(version & 0x0000001f),
(version & 0xffff0000) >> 16);
- /* Load the firmwware from the disk if required */
+ /* Load the firmware from the disk if required */
if (version == 0) {
printk(KERN_INFO "%s() Waiting for firmware upload (%s)\n",
diff --git a/drivers/media/pci/smipcie/smipcie-ir.c b/drivers/media/pci/smipcie/smipcie-ir.c
index c5595af6b976..d292cdfb3671 100644
--- a/drivers/media/pci/smipcie/smipcie-ir.c
+++ b/drivers/media/pci/smipcie/smipcie-ir.c
@@ -16,6 +16,9 @@
#include "smipcie.h"
+#define SMI_SAMPLE_PERIOD 83
+#define SMI_SAMPLE_IDLEMIN (10000 / SMI_SAMPLE_PERIOD)
+
static void smi_ir_enableInterrupt(struct smi_rc *ir)
{
struct smi_dev *dev = ir->dev;
@@ -42,114 +45,64 @@ static void smi_ir_stop(struct smi_rc *ir)
struct smi_dev *dev = ir->dev;
smi_ir_disableInterrupt(ir);
- smi_clear(IR_Init_Reg, 0x80);
+ smi_clear(IR_Init_Reg, rbIRen);
}
-#define BITS_PER_COMMAND 14
-#define GROUPS_PER_BIT 2
-#define IR_RC5_MIN_BIT 36
-#define IR_RC5_MAX_BIT 52
-static u32 smi_decode_rc5(u8 *pData, u8 size)
+static void smi_raw_process(struct rc_dev *rc_dev, const u8 *buffer,
+ const u8 length)
{
- u8 index, current_bit, bit_count;
- u8 group_array[BITS_PER_COMMAND * GROUPS_PER_BIT + 4];
- u8 group_index = 0;
- u32 command = 0xFFFFFFFF;
-
- group_array[group_index++] = 1;
-
- for (index = 0; index < size; index++) {
-
- current_bit = (pData[index] & 0x80) ? 1 : 0;
- bit_count = pData[index] & 0x7f;
-
- if ((current_bit == 1) && (bit_count >= 2*IR_RC5_MAX_BIT + 1)) {
- goto process_code;
- } else if ((bit_count >= IR_RC5_MIN_BIT) &&
- (bit_count <= IR_RC5_MAX_BIT)) {
- group_array[group_index++] = current_bit;
- } else if ((bit_count > IR_RC5_MAX_BIT) &&
- (bit_count <= 2*IR_RC5_MAX_BIT)) {
- group_array[group_index++] = current_bit;
- group_array[group_index++] = current_bit;
- } else {
- goto invalid_timing;
- }
- if (group_index >= BITS_PER_COMMAND*GROUPS_PER_BIT)
- goto process_code;
-
- if ((group_index == BITS_PER_COMMAND*GROUPS_PER_BIT - 1)
- && (group_array[group_index-1] == 0)) {
- group_array[group_index++] = 1;
- goto process_code;
- }
- }
-
-process_code:
- if (group_index == (BITS_PER_COMMAND*GROUPS_PER_BIT-1))
- group_array[group_index++] = 1;
-
- if (group_index == BITS_PER_COMMAND*GROUPS_PER_BIT) {
- command = 0;
- for (index = 0; index < (BITS_PER_COMMAND*GROUPS_PER_BIT);
- index = index + 2) {
- if ((group_array[index] == 1) &&
- (group_array[index+1] == 0)) {
- command |= (1 << (BITS_PER_COMMAND -
- (index/2) - 1));
- } else if ((group_array[index] == 0) &&
- (group_array[index+1] == 1)) {
- /* */
- } else {
- command = 0xFFFFFFFF;
- goto invalid_timing;
- }
+ struct ir_raw_event rawir = {};
+ int cnt;
+
+ for (cnt = 0; cnt < length; cnt++) {
+ if (buffer[cnt] & 0x7f) {
+ rawir.pulse = (buffer[cnt] & 0x80) == 0;
+ rawir.duration = ((buffer[cnt] & 0x7f) +
+ (rawir.pulse ? 0 : -1)) *
+ rc_dev->rx_resolution;
+ ir_raw_event_store_with_filter(rc_dev, &rawir);
}
}
-
-invalid_timing:
- return command;
}
-static void smi_ir_decode(struct work_struct *work)
+static void smi_ir_decode(struct smi_rc *ir)
{
- struct smi_rc *ir = container_of(work, struct smi_rc, work);
struct smi_dev *dev = ir->dev;
struct rc_dev *rc_dev = ir->rc_dev;
- u32 dwIRControl, dwIRData, dwIRCode, scancode;
- u8 index, ucIRCount, readLoop, rc5_command, rc5_system, toggle;
+ u32 dwIRControl, dwIRData;
+ u8 index, ucIRCount, readLoop;
dwIRControl = smi_read(IR_Init_Reg);
+
if (dwIRControl & rbIRVld) {
ucIRCount = (u8) smi_read(IR_Data_Cnt);
- if (ucIRCount < 4)
- goto end_ir_decode;
-
readLoop = ucIRCount/4;
if (ucIRCount % 4)
readLoop += 1;
for (index = 0; index < readLoop; index++) {
- dwIRData = smi_read(IR_DATA_BUFFER_BASE + (index*4));
+ dwIRData = smi_read(IR_DATA_BUFFER_BASE + (index * 4));
ir->irData[index*4 + 0] = (u8)(dwIRData);
ir->irData[index*4 + 1] = (u8)(dwIRData >> 8);
ir->irData[index*4 + 2] = (u8)(dwIRData >> 16);
ir->irData[index*4 + 3] = (u8)(dwIRData >> 24);
}
- dwIRCode = smi_decode_rc5(ir->irData, ucIRCount);
-
- if (dwIRCode != 0xFFFFFFFF) {
- rc5_command = dwIRCode & 0x3F;
- rc5_system = (dwIRCode & 0x7C0) >> 6;
- toggle = (dwIRCode & 0x800) ? 1 : 0;
- scancode = rc5_system << 8 | rc5_command;
- rc_keydown(rc_dev, RC_PROTO_RC5, scancode, toggle);
- }
+ smi_raw_process(rc_dev, ir->irData, ucIRCount);
+ smi_set(IR_Init_Reg, rbIRVld);
}
-end_ir_decode:
- smi_set(IR_Init_Reg, 0x04);
- smi_ir_enableInterrupt(ir);
+
+ if (dwIRControl & rbIRhighidle) {
+ struct ir_raw_event rawir = {};
+
+ rawir.pulse = 0;
+ rawir.duration = US_TO_NS(SMI_SAMPLE_PERIOD *
+ SMI_SAMPLE_IDLEMIN);
+ ir_raw_event_store_with_filter(rc_dev, &rawir);
+ smi_set(IR_Init_Reg, rbIRhighidle);
+ }
+
+ ir_raw_event_handle(rc_dev);
}
/* ir functions call by main driver.*/
@@ -160,7 +113,8 @@ int smi_ir_irq(struct smi_rc *ir, u32 int_status)
if (int_status & IR_X_INT) {
smi_ir_disableInterrupt(ir);
smi_ir_clearInterrupt(ir);
- schedule_work(&ir->work);
+ smi_ir_decode(ir);
+ smi_ir_enableInterrupt(ir);
handled = 1;
}
return handled;
@@ -170,9 +124,11 @@ void smi_ir_start(struct smi_rc *ir)
{
struct smi_dev *dev = ir->dev;
- smi_write(IR_Idle_Cnt_Low, 0x00140070);
+ smi_write(IR_Idle_Cnt_Low,
+ (((SMI_SAMPLE_PERIOD - 1) & 0xFFFF) << 16) |
+ (SMI_SAMPLE_IDLEMIN & 0xFFFF));
msleep(20);
- smi_set(IR_Init_Reg, 0x90);
+ smi_set(IR_Init_Reg, rbIRen | rbIRhighidle);
smi_ir_enableInterrupt(ir);
}
@@ -183,7 +139,7 @@ int smi_ir_init(struct smi_dev *dev)
struct rc_dev *rc_dev;
struct smi_rc *ir = &dev->ir;
- rc_dev = rc_allocate_device(RC_DRIVER_SCANCODE);
+ rc_dev = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!rc_dev)
return -ENOMEM;
@@ -193,6 +149,7 @@ int smi_ir_init(struct smi_dev *dev)
snprintf(ir->input_phys, sizeof(ir->input_phys), "pci-%s/ir0",
pci_name(dev->pci_dev));
+ rc_dev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
rc_dev->driver_name = "SMI_PCIe";
rc_dev->input_phys = ir->input_phys;
rc_dev->device_name = ir->device_name;
@@ -203,11 +160,12 @@ int smi_ir_init(struct smi_dev *dev)
rc_dev->dev.parent = &dev->pci_dev->dev;
rc_dev->map_name = dev->info->rc_map;
+ rc_dev->timeout = MS_TO_NS(100);
+ rc_dev->rx_resolution = US_TO_NS(SMI_SAMPLE_PERIOD);
ir->rc_dev = rc_dev;
ir->dev = dev;
- INIT_WORK(&ir->work, smi_ir_decode);
smi_ir_disableInterrupt(ir);
ret = rc_register_device(rc_dev);
diff --git a/drivers/media/pci/smipcie/smipcie.h b/drivers/media/pci/smipcie/smipcie.h
index a6c5b1bd7edb..e52229a87b84 100644
--- a/drivers/media/pci/smipcie/smipcie.h
+++ b/drivers/media/pci/smipcie/smipcie.h
@@ -241,7 +241,6 @@ struct smi_rc {
struct rc_dev *rc_dev;
char input_phys[64];
char device_name[64];
- struct work_struct work;
u8 irData[256];
int users;
diff --git a/drivers/media/pci/solo6x10/solo6x10-disp.c b/drivers/media/pci/solo6x10/solo6x10-disp.c
index 11c98f0625e4..f61007022471 100644
--- a/drivers/media/pci/solo6x10/solo6x10-disp.c
+++ b/drivers/media/pci/solo6x10/solo6x10-disp.c
@@ -71,7 +71,7 @@ static void solo_vin_config(struct solo_dev *solo_dev)
solo_reg_write(solo_dev, SOLO_VI_CH_FORMAT,
SOLO_VI_FD_SEL_MASK(0) | SOLO_VI_PROG_MASK(0));
- /* On 6110, initialize mozaic darkness stength */
+ /* On 6110, initialize mozaic darkness strength */
if (solo_dev->type == SOLO_DEV_6010)
solo_reg_write(solo_dev, SOLO_VI_FMT_CFG, 0);
else
@@ -230,7 +230,7 @@ int solo_set_motion_block(struct solo_dev *solo_dev, u8 ch,
}
/* First 8k is motion flag (512 bytes * 16). Following that is an 8k+8k
- * threshold and working table for each channel. Atleast that's what the
+ * threshold and working table for each channel. At least that's what the
* spec says. However, this code (taken from rdk) has some mystery 8k
* block right after the flag area, before the first thresh table. */
static void solo_motion_config(struct solo_dev *solo_dev)
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
index 411177ec4d72..2452d8f59cb0 100644
--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
+++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
@@ -110,7 +110,7 @@ static inline struct vip_buffer *to_vip_buffer(struct vb2_v4l2_buffer *vb2)
* @std: video standard (e.g. PAL/NTSC)
* @input: input line for video signal ( 0 or 1 )
* @disabled: Device is in power down state
- * @slock: for excluse acces of registers
+ * @slock: for excluse access of registers
* @vb_vidq: queue maintained by videobuf2 layer
* @buffer_list: list of buffer in use
* @sequence: sequence number of acquired buffer
diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c
index 409defc75c05..9345287ad963 100644
--- a/drivers/media/pci/ttpci/av7110.c
+++ b/drivers/media/pci/ttpci/av7110.c
@@ -2313,7 +2313,7 @@ static int frontend_init(struct av7110 *av7110)
* (n in defined in the RPS_THRESH1 counter threshold)
* I think HS is raised high on the beginning of the n-th line
* and remains high until this n-th line that triggered
- * it is completely received. When the receiption of n-th line
+ * it is completely received. When the reception of n-th line
* ends, HS is lowered.
*
* To transmit data over DMA, 7146 needs changing state at
@@ -2347,7 +2347,7 @@ static int frontend_init(struct av7110 *av7110)
* hardware debug note: a working budget card (including budget patch)
* with vpeirq() interrupt setup in mode "0x90" (every 64K) will
* generate 3 interrupts per 25-Hz DMA frame of 2*188*512 bytes
- * and that means 3*25=75 Hz of interrupt freqency, as seen by
+ * and that means 3*25=75 Hz of interrupt frequency, as seen by
* watch cat /proc/interrupts
*
* If this frequency is 3x lower (and data received in the DMA
@@ -2356,7 +2356,7 @@ static int frontend_init(struct av7110 *av7110)
* this means VSYNC line is not connected in the hardware.
* (check soldering pcb and pins)
* The same behaviour of missing VSYNC can be duplicated on budget
- * cards, by seting DD1_INIT trigger mode 7 in 3rd nibble.
+ * cards, by setting DD1_INIT trigger mode 7 in 3rd nibble.
*/
static int av7110_attach(struct saa7146_dev* dev,
struct saa7146_pci_extension_data *pci_ext)
diff --git a/drivers/media/pci/tw68/tw68-video.c b/drivers/media/pci/tw68/tw68-video.c
index d3f727045ae8..4f74b14c3b4f 100644
--- a/drivers/media/pci/tw68/tw68-video.c
+++ b/drivers/media/pci/tw68/tw68-video.c
@@ -446,7 +446,7 @@ static void tw68_buf_queue(struct vb2_buffer *vb)
/*
* buffer_prepare
*
- * Set the ancilliary information into the buffer structure. This
+ * Set the ancillary information into the buffer structure. This
* includes generating the necessary risc program if it hasn't already
* been done for the current buffer format.
* The structure fh contains the details of the format requested by the
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index a505e9f5a1e2..4acbed189644 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -141,7 +141,6 @@ config VIDEO_RENESAS_CEU
---help---
This is a v4l2 driver for the Renesas CEU Interface
-source "drivers/media/platform/soc_camera/Kconfig"
source "drivers/media/platform/exynos4-is/Kconfig"
source "drivers/media/platform/am437x/Kconfig"
source "drivers/media/platform/xilinx/Kconfig"
@@ -650,7 +649,7 @@ config VIDEO_SECO_CEC
config VIDEO_SECO_RC
bool "SECO Boards IR RC5 support"
depends on VIDEO_SECO_CEC
- select RC_CORE
+ depends on RC_CORE
help
If you say yes here you will get support for the
SECO Boards Consumer-IR in seco-cec driver.
@@ -669,7 +668,7 @@ menuconfig SDR_PLATFORM_DRIVERS
if SDR_PLATFORM_DRIVERS
config VIDEO_RCAR_DRIF
- tristate "Renesas Digitial Radio Interface (DRIF)"
+ tristate "Renesas Digital Radio Interface (DRIF)"
depends on VIDEO_V4L2
depends on ARCH_RENESAS || COMPILE_TEST
select VIDEOBUF2_VMALLOC
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index e6deb2597738..7cbbd925124c 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -62,8 +62,6 @@ obj-y += davinci/
obj-$(CONFIG_VIDEO_SH_VOU) += sh_vou.o
-obj-$(CONFIG_SOC_CAMERA) += soc_camera/
-
obj-$(CONFIG_VIDEO_RCAR_DRIF) += rcar_drif.o
obj-$(CONFIG_VIDEO_RENESAS_CEU) += renesas-ceu.o
obj-$(CONFIG_VIDEO_RENESAS_FCP) += rcar-fcp.o
diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
index dfec813f50a9..692e08ef38c0 100644
--- a/drivers/media/platform/aspeed-video.c
+++ b/drivers/media/platform/aspeed-video.c
@@ -1661,6 +1661,7 @@ static int aspeed_video_probe(struct platform_device *pdev)
video->frame_rate = 30;
video->dev = &pdev->dev;
+ spin_lock_init(&video->lock);
mutex_init(&video->video_lock);
init_waitqueue_head(&video->wait);
INIT_DELAYED_WORK(&video->res_work, aspeed_video_resolution_work);
diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c
index fdb255e4a956..08b8d5583080 100644
--- a/drivers/media/platform/atmel/atmel-isi.c
+++ b/drivers/media/platform/atmel/atmel-isi.c
@@ -110,7 +110,7 @@ struct atmel_isi {
bool enable_preview_path;
struct completion complete;
- /* ISI peripherial clock */
+ /* ISI peripheral clock */
struct clk *pclk;
unsigned int irq;
@@ -1078,7 +1078,7 @@ static void isi_graph_notify_unbind(struct v4l2_async_notifier *notifier,
dev_dbg(isi->dev, "Removing %s\n", video_device_node_name(isi->vdev));
- /* Checks internaly if vdev have been init or not */
+ /* Checks internally if vdev have been init or not */
video_unregister_device(isi->vdev);
}
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index 8e0194993a52..b4f396c2e72c 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -1010,7 +1010,11 @@ static int coda_start_encoding(struct coda_ctx *ctx)
CODA_264PARAM_DEBLKFILTEROFFSETALPHA_OFFSET) |
((ctx->params.h264_slice_beta_offset_div2 &
CODA_264PARAM_DEBLKFILTEROFFSETBETA_MASK) <<
- CODA_264PARAM_DEBLKFILTEROFFSETBETA_OFFSET);
+ CODA_264PARAM_DEBLKFILTEROFFSETBETA_OFFSET) |
+ (ctx->params.h264_constrained_intra_pred_flag <<
+ CODA_264PARAM_CONSTRAINEDINTRAPREDFLAG_OFFSET) |
+ (ctx->params.h264_chroma_qp_index_offset &
+ CODA_264PARAM_CHROMAQPOFFSET_MASK);
coda_write(dev, value, CODA_CMD_ENC_SEQ_264_PARA);
break;
case V4L2_PIX_FMT_JPEG:
@@ -2020,7 +2024,6 @@ static void coda_finish_decode(struct coda_ctx *ctx)
struct coda_q_data *q_data_dst;
struct vb2_v4l2_buffer *dst_buf;
struct coda_buffer_meta *meta;
- unsigned long payload;
int width, height;
int decoded_idx;
int display_idx;
@@ -2226,21 +2229,8 @@ static void coda_finish_decode(struct coda_ctx *ctx)
trace_coda_dec_rot_done(ctx, dst_buf, meta);
- switch (q_data_dst->fourcc) {
- case V4L2_PIX_FMT_YUYV:
- payload = width * height * 2;
- break;
- case V4L2_PIX_FMT_YUV420:
- case V4L2_PIX_FMT_YVU420:
- case V4L2_PIX_FMT_NV12:
- default:
- payload = width * height * 3 / 2;
- break;
- case V4L2_PIX_FMT_YUV422P:
- payload = width * height * 2;
- break;
- }
- vb2_set_plane_payload(&dst_buf->vb2_buf, 0, payload);
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
+ q_data_dst->sizeimage);
if (ctx->frame_errors[ctx->display_idx] || err_vdoa)
coda_m2m_buf_done(ctx, dst_buf, VB2_BUF_STATE_ERROR);
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 7518f01c48f7..fa0b22fb7991 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -728,7 +728,7 @@ static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f,
ctx->tiled_map_type = GDI_TILED_FRAME_MB_RASTER_MAP;
break;
case V4L2_PIX_FMT_NV12:
- if (!disable_tiling) {
+ if (!disable_tiling && ctx->dev->devtype->product == CODA_960) {
ctx->tiled_map_type = GDI_TILED_FRAME_MB_RASTER_MAP;
break;
}
@@ -1839,6 +1839,12 @@ static int coda_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
ctx->params.h264_disable_deblocking_filter_idc = ctrl->val;
break;
+ case V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION:
+ ctx->params.h264_constrained_intra_pred_flag = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET:
+ ctx->params.h264_chroma_qp_index_offset = ctrl->val;
+ break;
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
/* TODO: switch between baseline and constrained baseline */
if (ctx->inst_type == CODA_INST_ENCODER)
@@ -1925,6 +1931,11 @@ static void coda_encode_ctrls(struct coda_ctx *ctx)
V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE,
V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY,
0x0, V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION, 0, 1, 1,
+ 0);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET, -12, 12, 1, 0);
v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_PROFILE,
V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE, 0x0,
diff --git a/drivers/media/platform/coda/coda-jpeg.c b/drivers/media/platform/coda/coda-jpeg.c
index 9f899a6cefed..39a2351c1e48 100644
--- a/drivers/media/platform/coda/coda-jpeg.c
+++ b/drivers/media/platform/coda/coda-jpeg.c
@@ -103,7 +103,7 @@ static const unsigned char chroma_ac_value[162 + 2] = {
/*
* Quantization tables for luminance and chrominance components in
- * zig-zag scan order from the Freescale i.MX VPU libaries
+ * zig-zag scan order from the Freescale i.MX VPU libraries
*/
static unsigned char luma_q[64] = {
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index 31cea72f5b2a..31c80bda2c0b 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -118,6 +118,8 @@ struct coda_params {
u8 h264_disable_deblocking_filter_idc;
s8 h264_slice_alpha_c0_offset_div2;
s8 h264_slice_beta_offset_div2;
+ bool h264_constrained_intra_pred_flag;
+ s8 h264_chroma_qp_index_offset;
u8 h264_profile_idc;
u8 h264_level_idc;
u8 mpeg4_intra_qp;
diff --git a/drivers/media/platform/davinci/isif.c b/drivers/media/platform/davinci/isif.c
index 340f8218f54d..47cecd10eb9f 100644
--- a/drivers/media/platform/davinci/isif.c
+++ b/drivers/media/platform/davinci/isif.c
@@ -328,7 +328,7 @@ static void isif_config_bclamp(struct isif_black_clamp *bc)
if (bc->en) {
val = bc->bc_mode_color << ISIF_BC_MODE_COLOR_SHIFT;
- /* Enable BC and horizontal clamp caculation paramaters */
+ /* Enable BC and horizontal clamp calculation parameters */
val = val | 1 | (bc->horz.mode << ISIF_HORZ_BC_MODE_SHIFT);
regw(val, CLAMPCFG);
@@ -358,7 +358,7 @@ static void isif_config_bclamp(struct isif_black_clamp *bc)
regw(bc->horz.win_start_v_calc, CLHWIN2);
}
- /* vertical clamp caculation paramaters */
+ /* vertical clamp calculation parameters */
/* Reset clamp value sel for previous line */
val |=
diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c
index 4766a7a23d16..8339163a5231 100644
--- a/drivers/media/platform/davinci/vpbe.c
+++ b/drivers/media/platform/davinci/vpbe.c
@@ -242,7 +242,7 @@ static int vpbe_set_output(struct vpbe_device *vpbe_dev, int index)
goto unlock;
/*
- * It is assumed that venc or extenal encoder will set a default
+ * It is assumed that venc or external encoder will set a default
* mode in the sub device. For external encoder or LCD pannel output,
* we also need to set up the lcd port for the required mode. So setup
* the lcd port for the default mode that is configured in the board
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index 9996bab98fe3..26dadbba930f 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -518,7 +518,7 @@ static void vpfe_schedule_bottom_field(struct vpfe_device *vpfe_dev)
static void vpfe_process_buffer_complete(struct vpfe_device *vpfe_dev)
{
- v4l2_get_timestamp(&vpfe_dev->cur_frm->ts);
+ vpfe_dev->cur_frm->ts = ktime_get_ns();
vpfe_dev->cur_frm->state = VIDEOBUF_DONE;
vpfe_dev->cur_frm->size = vpfe_dev->fmt.fmt.pix.sizeimage;
wake_up_interruptible(&vpfe_dev->cur_frm->done);
diff --git a/drivers/media/platform/davinci/vpif.c b/drivers/media/platform/davinci/vpif.c
index 16352e2263d2..df66461f5d4f 100644
--- a/drivers/media/platform/davinci/vpif.c
+++ b/drivers/media/platform/davinci/vpif.c
@@ -1,7 +1,7 @@
/*
* vpif - Video Port Interface driver
* VPIF is a receiver and transmitter for video data. It has two channels(0, 1)
- * that receiveing video byte stream and two channels(2, 3) for video output.
+ * that receiving video byte stream and two channels(2, 3) for video output.
* The hardware supports SDTV, HDTV formats, raw data capture.
* Currently, the driver supports NTSC and PAL standards.
*
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index 3517487d9760..f4b4f2a1dfc0 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -138,7 +138,7 @@ static int vpif_buffer_queue_setup(struct vb2_queue *vq,
* vpif_buffer_queue : Callback function to add buffer to DMA queue
* @vb: ptr to vb2_buffer
*
- * This callback fucntion queues the buffer to DMA engine
+ * This callback function queues the buffer to DMA engine
*/
static void vpif_buffer_queue(struct vb2_buffer *vb)
{
@@ -635,7 +635,7 @@ static int vpif_try_fmt_vid_out(struct file *file, void *priv,
struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
/*
- * to supress v4l-compliance warnings silently correct
+ * to suppress v4l-compliance warnings silently correct
* the pixelformat
*/
if (pixfmt->pixelformat != V4L2_PIX_FMT_YUV422P)
diff --git a/drivers/media/platform/exynos4-is/fimc-is-command.h b/drivers/media/platform/exynos4-is/fimc-is-command.h
index 0d1f52e394b1..b06b56b890d5 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-command.h
+++ b/drivers/media/platform/exynos4-is/fimc-is-command.h
@@ -18,7 +18,7 @@
#define FIMC_IS_COMMAND_VER 110 /* FIMC-IS command set version 1.10 */
-/* Enumeration of commands beetween the FIMC-IS and the host processor. */
+/* Enumeration of commands between the FIMC-IS and the host processor. */
/* HOST to FIMC-IS */
#define HIC_PREVIEW_STILL 0x0001
diff --git a/drivers/media/platform/exynos4-is/fimc-is-param.h b/drivers/media/platform/exynos4-is/fimc-is-param.h
index 8e31f7642776..22923a3d405e 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-param.h
+++ b/drivers/media/platform/exynos4-is/fimc-is-param.h
@@ -298,7 +298,7 @@ enum isp_af_mode {
#define ISP_FLASH_COMMAND_AUTO 2
#define ISP_FLASH_COMMAND_TORCH 3 /* 3 sec */
-/* Flash red-eye commads */
+/* Flash red-eye commands */
#define ISP_FLASH_REDEYE_DISABLE 0
#define ISP_FLASH_REDEYE_ENABLE 1
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
index f5fc54de19da..02da0b06e56a 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -738,7 +738,7 @@ int fimc_is_hw_initialize(struct fimc_is *is)
return 0;
}
-static int fimc_is_log_show(struct seq_file *s, void *data)
+static int fimc_is_show(struct seq_file *s, void *data)
{
struct fimc_is *is = s->private;
const u8 *buf = is->memory.vaddr + FIMC_IS_DEBUG_REGION_OFFSET;
@@ -752,17 +752,7 @@ static int fimc_is_log_show(struct seq_file *s, void *data)
return 0;
}
-static int fimc_is_debugfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, fimc_is_log_show, inode->i_private);
-}
-
-static const struct file_operations fimc_is_debugfs_fops = {
- .open = fimc_is_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(fimc_is);
static void fimc_is_debugfs_remove(struct fimc_is *is)
{
@@ -777,7 +767,7 @@ static int fimc_is_debugfs_create(struct fimc_is *is)
is->debugfs_entry = debugfs_create_dir("fimc_is", NULL);
dentry = debugfs_create_file("fw_log", S_IRUGO, is->debugfs_entry,
- is, &fimc_is_debugfs_fops);
+ is, &fimc_is_fops);
if (!dentry)
fimc_is_debugfs_remove(is);
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c
index de6bd28f7e31..bb35a2017f21 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp-video.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c
@@ -606,9 +606,7 @@ int fimc_isp_video_device_register(struct fimc_isp *isp,
vdev = &iv->ve.vdev;
memset(vdev, 0, sizeof(*vdev));
- snprintf(vdev->name, sizeof(vdev->name), "fimc-is-isp.%s",
- type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ?
- "capture" : "output");
+ strscpy(vdev->name, "fimc-is-isp.capture", sizeof(vdev->name));
vdev->queue = q;
vdev->fops = &isp_video_fops;
vdev->ioctl_ops = &isp_video_ioctl_ops;
diff --git a/drivers/media/platform/exynos4-is/media-dev.h b/drivers/media/platform/exynos4-is/media-dev.h
index 9f527670395a..a7c9490bbcb4 100644
--- a/drivers/media/platform/exynos4-is/media-dev.h
+++ b/drivers/media/platform/exynos4-is/media-dev.h
@@ -79,7 +79,7 @@ struct fimc_camclk_info {
/**
* struct fimc_sensor_info - image data source subdev information
- * @pdata: sensor's atrributes passed as media device's platform data
+ * @pdata: sensor's attributes passed as media device's platform data
* @asd: asynchronous subdev registration data structure
* @subdev: image sensor v4l2 subdev
* @host: fimc device the sensor is currently linked to
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index 35cb0162085b..234e047e3e8f 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -183,7 +183,7 @@ struct csis_drvdata {
* @index: the hardware instance index
* @pdev: CSIS platform device
* @phy: pointer to the CSIS generic PHY
- * @regs: mmaped I/O registers memory
+ * @regs: mmapped I/O registers memory
* @supplies: CSIS regulator supplies
* @clock: CSIS clocks
* @irq: requested s5p-mipi-csis irq number
@@ -745,7 +745,7 @@ static int s5pcsis_parse_dt(struct platform_device *pdev,
goto err;
}
- /* Get MIPI CSI-2 bus configration from the endpoint node. */
+ /* Get MIPI CSI-2 bus configuration from the endpoint node. */
of_property_read_u32(node, "samsung,csis-hs-settle",
&state->hs_settle);
state->wclk_ext = of_property_read_bool(node,
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index ca6d0317ab42..cffebcaacb90 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -1090,7 +1090,7 @@ static void viu_capture_intr(struct viu_dev *dev, u32 status)
if (waitqueue_active(&buf->vb.done)) {
list_del(&buf->vb.queue);
- v4l2_get_timestamp(&buf->vb.ts);
+ buf->vb.ts = ktime_get_ns();
buf->vb.state = VIDEOBUF_DONE;
buf->vb.field_count++;
wake_up(&buf->vb.done);
diff --git a/drivers/media/platform/imx-pxp.c b/drivers/media/platform/imx-pxp.c
index c1c255408d16..0bcfc5aa8f3d 100644
--- a/drivers/media/platform/imx-pxp.c
+++ b/drivers/media/platform/imx-pxp.c
@@ -90,7 +90,11 @@ static struct pxp_fmt formats[] = {
.depth = 16,
.types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT,
}, {
- .fourcc = V4L2_PIX_FMT_YUV32,
+ .fourcc = V4L2_PIX_FMT_VUYA32,
+ .depth = 32,
+ .types = MEM2MEM_CAPTURE,
+ }, {
+ .fourcc = V4L2_PIX_FMT_VUYX32,
.depth = 32,
.types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT,
}, {
@@ -236,7 +240,7 @@ static u32 pxp_v4l2_pix_fmt_to_ps_format(u32 v4l2_pix_fmt)
case V4L2_PIX_FMT_RGB555: return BV_PXP_PS_CTRL_FORMAT__RGB555;
case V4L2_PIX_FMT_RGB444: return BV_PXP_PS_CTRL_FORMAT__RGB444;
case V4L2_PIX_FMT_RGB565: return BV_PXP_PS_CTRL_FORMAT__RGB565;
- case V4L2_PIX_FMT_YUV32: return BV_PXP_PS_CTRL_FORMAT__YUV1P444;
+ case V4L2_PIX_FMT_VUYX32: return BV_PXP_PS_CTRL_FORMAT__YUV1P444;
case V4L2_PIX_FMT_UYVY: return BV_PXP_PS_CTRL_FORMAT__UYVY1P422;
case V4L2_PIX_FMT_YUYV: return BM_PXP_PS_CTRL_WB_SWAP |
BV_PXP_PS_CTRL_FORMAT__UYVY1P422;
@@ -265,7 +269,8 @@ static u32 pxp_v4l2_pix_fmt_to_out_format(u32 v4l2_pix_fmt)
case V4L2_PIX_FMT_RGB555: return BV_PXP_OUT_CTRL_FORMAT__RGB555;
case V4L2_PIX_FMT_RGB444: return BV_PXP_OUT_CTRL_FORMAT__RGB444;
case V4L2_PIX_FMT_RGB565: return BV_PXP_OUT_CTRL_FORMAT__RGB565;
- case V4L2_PIX_FMT_YUV32: return BV_PXP_OUT_CTRL_FORMAT__YUV1P444;
+ case V4L2_PIX_FMT_VUYA32:
+ case V4L2_PIX_FMT_VUYX32: return BV_PXP_OUT_CTRL_FORMAT__YUV1P444;
case V4L2_PIX_FMT_UYVY: return BV_PXP_OUT_CTRL_FORMAT__UYVY1P422;
case V4L2_PIX_FMT_VYUY: return BV_PXP_OUT_CTRL_FORMAT__VYUY1P422;
case V4L2_PIX_FMT_GREY: return BV_PXP_OUT_CTRL_FORMAT__Y8;
@@ -281,7 +286,8 @@ static u32 pxp_v4l2_pix_fmt_to_out_format(u32 v4l2_pix_fmt)
static bool pxp_v4l2_pix_fmt_is_yuv(u32 v4l2_pix_fmt)
{
switch (v4l2_pix_fmt) {
- case V4L2_PIX_FMT_YUV32:
+ case V4L2_PIX_FMT_VUYA32:
+ case V4L2_PIX_FMT_VUYX32:
case V4L2_PIX_FMT_UYVY:
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_VYUY:
@@ -680,7 +686,7 @@ static void pxp_setup_csc(struct pxp_ctx *ctx)
csc2_coef = csc2_coef_rec709_full;
else
csc2_coef = csc2_coef_rec709_lim;
- } else if (ycbcr_enc == V4L2_YCBCR_ENC_709) {
+ } else if (ycbcr_enc == V4L2_YCBCR_ENC_BT2020) {
if (quantization == V4L2_QUANTIZATION_FULL_RANGE)
csc2_coef = csc2_coef_bt2020_full;
else
diff --git a/drivers/media/platform/marvell-ccic/mmp-driver.c b/drivers/media/platform/marvell-ccic/mmp-driver.c
index 70a2833db0d1..af76eb637773 100644
--- a/drivers/media/platform/marvell-ccic/mmp-driver.c
+++ b/drivers/media/platform/marvell-ccic/mmp-driver.c
@@ -240,8 +240,8 @@ static void mmpcam_calc_dphy(struct mcam_camera *mcam)
* bit 8 ~ bit 15: HS_SETTLE
* Time interval during which the HS
* receiver shall ignore any Data Lane
- * HS transistions.
- * The vaule has been calibrated on
+ * HS transitions.
+ * The value has been calibrated on
* different boards. It seems to work well.
*
* More detail please refer
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
index 2a5d5002c27e..f761e4d8bf2a 100644
--- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
@@ -702,7 +702,7 @@ end:
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, to_vb2_v4l2_buffer(vb));
}
-static void *mtk_jpeg_buf_remove(struct mtk_jpeg_ctx *ctx,
+static struct vb2_v4l2_buffer *mtk_jpeg_buf_remove(struct mtk_jpeg_ctx *ctx,
enum v4l2_buf_type type)
{
if (V4L2_TYPE_IS_OUTPUT(type))
@@ -714,7 +714,7 @@ static void *mtk_jpeg_buf_remove(struct mtk_jpeg_ctx *ctx,
static int mtk_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(q);
- struct vb2_buffer *vb;
+ struct vb2_v4l2_buffer *vb;
int ret = 0;
ret = pm_runtime_get_sync(ctx->jpeg->dev);
@@ -724,14 +724,14 @@ static int mtk_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
return 0;
err:
while ((vb = mtk_jpeg_buf_remove(ctx, q->type)))
- v4l2_m2m_buf_done(to_vb2_v4l2_buffer(vb), VB2_BUF_STATE_QUEUED);
+ v4l2_m2m_buf_done(vb, VB2_BUF_STATE_QUEUED);
return ret;
}
static void mtk_jpeg_stop_streaming(struct vb2_queue *q)
{
struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(q);
- struct vb2_buffer *vb;
+ struct vb2_v4l2_buffer *vb;
/*
* STREAMOFF is an acknowledgment for source change event.
@@ -743,7 +743,7 @@ static void mtk_jpeg_stop_streaming(struct vb2_queue *q)
struct mtk_jpeg_src_buf *src_buf;
vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
- src_buf = mtk_jpeg_vb2_to_srcbuf(vb);
+ src_buf = mtk_jpeg_vb2_to_srcbuf(&vb->vb2_buf);
mtk_jpeg_set_queue_data(ctx, &src_buf->dec_param);
ctx->state = MTK_JPEG_RUNNING;
} else if (V4L2_TYPE_IS_OUTPUT(q->type)) {
@@ -751,7 +751,7 @@ static void mtk_jpeg_stop_streaming(struct vb2_queue *q)
}
while ((vb = mtk_jpeg_buf_remove(ctx, q->type)))
- v4l2_m2m_buf_done(to_vb2_v4l2_buffer(vb), VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(vb, VB2_BUF_STATE_ERROR);
pm_runtime_put_sync(ctx->jpeg->dev);
}
@@ -807,7 +807,7 @@ static void mtk_jpeg_device_run(void *priv)
{
struct mtk_jpeg_ctx *ctx = priv;
struct mtk_jpeg_dev *jpeg = ctx->jpeg;
- struct vb2_buffer *src_buf, *dst_buf;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR;
unsigned long flags;
struct mtk_jpeg_src_buf *jpeg_src_buf;
@@ -817,11 +817,11 @@ static void mtk_jpeg_device_run(void *priv)
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
- jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(src_buf);
+ jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(&src_buf->vb2_buf);
if (jpeg_src_buf->flags & MTK_JPEG_BUF_FLAGS_LAST_FRAME) {
- for (i = 0; i < dst_buf->num_planes; i++)
- vb2_set_plane_payload(dst_buf, i, 0);
+ for (i = 0; i < dst_buf->vb2_buf.num_planes; i++)
+ vb2_set_plane_payload(&dst_buf->vb2_buf, i, 0);
buf_state = VB2_BUF_STATE_DONE;
goto dec_end;
}
@@ -833,8 +833,8 @@ static void mtk_jpeg_device_run(void *priv)
return;
}
- mtk_jpeg_set_dec_src(ctx, src_buf, &bs);
- if (mtk_jpeg_set_dec_dst(ctx, &jpeg_src_buf->dec_param, dst_buf, &fb))
+ mtk_jpeg_set_dec_src(ctx, &src_buf->vb2_buf, &bs);
+ if (mtk_jpeg_set_dec_dst(ctx, &jpeg_src_buf->dec_param, &dst_buf->vb2_buf, &fb))
goto dec_end;
spin_lock_irqsave(&jpeg->hw_lock, flags);
@@ -849,8 +849,8 @@ static void mtk_jpeg_device_run(void *priv)
dec_end:
v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
- v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf), buf_state);
- v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), buf_state);
+ v4l2_m2m_buf_done(src_buf, buf_state);
+ v4l2_m2m_buf_done(dst_buf, buf_state);
v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
}
@@ -921,7 +921,7 @@ static irqreturn_t mtk_jpeg_dec_irq(int irq, void *priv)
{
struct mtk_jpeg_dev *jpeg = priv;
struct mtk_jpeg_ctx *ctx;
- struct vb2_buffer *src_buf, *dst_buf;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
struct mtk_jpeg_src_buf *jpeg_src_buf;
enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR;
u32 dec_irq_ret;
@@ -938,7 +938,7 @@ static irqreturn_t mtk_jpeg_dec_irq(int irq, void *priv)
src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
- jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(src_buf);
+ jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(&src_buf->vb2_buf);
if (dec_irq_ret >= MTK_JPEG_DEC_RESULT_UNDERFLOW)
mtk_jpeg_dec_reset(jpeg->dec_reg_base);
@@ -948,15 +948,15 @@ static irqreturn_t mtk_jpeg_dec_irq(int irq, void *priv)
goto dec_end;
}
- for (i = 0; i < dst_buf->num_planes; i++)
- vb2_set_plane_payload(dst_buf, i,
+ for (i = 0; i < dst_buf->vb2_buf.num_planes; i++)
+ vb2_set_plane_payload(&dst_buf->vb2_buf, i,
jpeg_src_buf->dec_param.comp_size[i]);
buf_state = VB2_BUF_STATE_DONE;
dec_end:
- v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf), buf_state);
- v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), buf_state);
+ v4l2_m2m_buf_done(src_buf, buf_state);
+ v4l2_m2m_buf_done(dst_buf, buf_state);
v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
return IRQ_HANDLED;
}
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.h b/drivers/media/platform/mtk-mdp/mtk_mdp_core.h
index ad1cff306efd..e5abb1abb3a3 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_core.h
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_core.h
@@ -41,7 +41,7 @@
#define MTK_MDP_CTX_ERROR BIT(5)
/**
- * struct mtk_mdp_pix_align - alignement of image
+ * struct mtk_mdp_pix_align - alignment of image
* @org_w: source alignment of width
* @org_h: source alignment of height
* @target_w: dst alignment of width
@@ -122,8 +122,8 @@ struct mtk_mdp_frame {
/**
* struct mtk_mdp_variant - image processor variant information
* @pix_max: maximum limit of image size
- * @pix_min: minimun limit of image size
- * @pix_align: alignement of image
+ * @pix_min: minimum limit of image size
+ * @pix_align: alignment of image
* @h_scale_up_max: maximum scale-up in horizontal
* @v_scale_up_max: maximum scale-up in vertical
* @h_scale_down_max: maximum scale-down in horizontal
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
index 51a13466261e..7d15c06e9db9 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
@@ -473,20 +473,17 @@ static void mtk_mdp_prepare_addr(struct mtk_mdp_ctx *ctx,
static void mtk_mdp_m2m_get_bufs(struct mtk_mdp_ctx *ctx)
{
struct mtk_mdp_frame *s_frame, *d_frame;
- struct vb2_buffer *src_vb, *dst_vb;
struct vb2_v4l2_buffer *src_vbuf, *dst_vbuf;
s_frame = &ctx->s_frame;
d_frame = &ctx->d_frame;
- src_vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
- mtk_mdp_prepare_addr(ctx, src_vb, s_frame, &s_frame->addr);
+ src_vbuf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ mtk_mdp_prepare_addr(ctx, &src_vbuf->vb2_buf, s_frame, &s_frame->addr);
- dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
- mtk_mdp_prepare_addr(ctx, dst_vb, d_frame, &d_frame->addr);
+ dst_vbuf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ mtk_mdp_prepare_addr(ctx, &dst_vbuf->vb2_buf, d_frame, &d_frame->addr);
- src_vbuf = to_vb2_v4l2_buffer(src_vb);
- dst_vbuf = to_vb2_v4l2_buffer(dst_vb);
dst_vbuf->vb2_buf.timestamp = src_vbuf->vb2_buf.timestamp;
}
@@ -494,17 +491,14 @@ static void mtk_mdp_process_done(void *priv, int vb_state)
{
struct mtk_mdp_dev *mdp = priv;
struct mtk_mdp_ctx *ctx;
- struct vb2_buffer *src_vb, *dst_vb;
- struct vb2_v4l2_buffer *src_vbuf = NULL, *dst_vbuf = NULL;
+ struct vb2_v4l2_buffer *src_vbuf, *dst_vbuf;
ctx = v4l2_m2m_get_curr_priv(mdp->m2m_dev);
if (!ctx)
return;
- src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
- src_vbuf = to_vb2_v4l2_buffer(src_vb);
- dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
- dst_vbuf = to_vb2_v4l2_buffer(dst_vb);
+ src_vbuf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ dst_vbuf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
dst_vbuf->vb2_buf.timestamp = src_vbuf->vb2_buf.timestamp;
dst_vbuf->timecode = src_vbuf->timecode;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
index ba619647bc10..d022c65bb34c 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
@@ -325,13 +325,12 @@ static void mtk_vdec_worker(struct work_struct *work)
struct mtk_vcodec_ctx *ctx = container_of(work, struct mtk_vcodec_ctx,
decode_work);
struct mtk_vcodec_dev *dev = ctx->dev;
- struct vb2_buffer *src_buf, *dst_buf;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
struct mtk_vcodec_mem buf;
struct vdec_fb *pfb;
bool res_chg = false;
int ret;
struct mtk_video_dec_buf *dst_buf_info, *src_buf_info;
- struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2;
src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
if (src_buf == NULL) {
@@ -347,26 +346,23 @@ static void mtk_vdec_worker(struct work_struct *work)
return;
}
- src_vb2_v4l2 = container_of(src_buf, struct vb2_v4l2_buffer, vb2_buf);
- src_buf_info = container_of(src_vb2_v4l2, struct mtk_video_dec_buf, vb);
-
- dst_vb2_v4l2 = container_of(dst_buf, struct vb2_v4l2_buffer, vb2_buf);
- dst_buf_info = container_of(dst_vb2_v4l2, struct mtk_video_dec_buf, vb);
+ src_buf_info = container_of(src_buf, struct mtk_video_dec_buf, vb);
+ dst_buf_info = container_of(dst_buf, struct mtk_video_dec_buf, vb);
pfb = &dst_buf_info->frame_buffer;
- pfb->base_y.va = vb2_plane_vaddr(dst_buf, 0);
- pfb->base_y.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ pfb->base_y.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
+ pfb->base_y.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
pfb->base_y.size = ctx->picinfo.y_bs_sz + ctx->picinfo.y_len_sz;
- pfb->base_c.va = vb2_plane_vaddr(dst_buf, 1);
- pfb->base_c.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 1);
+ pfb->base_c.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 1);
+ pfb->base_c.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 1);
pfb->base_c.size = ctx->picinfo.c_bs_sz + ctx->picinfo.c_len_sz;
pfb->status = 0;
mtk_v4l2_debug(3, "===>[%d] vdec_if_decode() ===>", ctx->id);
mtk_v4l2_debug(3,
"id=%d Framebuf pfb=%p VA=%p Y_DMA=%pad C_DMA=%pad Size=%zx",
- dst_buf->index, pfb,
+ dst_buf->vb2_buf.index, pfb,
pfb->base_y.va, &pfb->base_y.dma_addr,
&pfb->base_c.dma_addr, pfb->base_y.size);
@@ -384,19 +380,19 @@ static void mtk_vdec_worker(struct work_struct *work)
clean_display_buffer(ctx);
vb2_set_plane_payload(&dst_buf_info->vb.vb2_buf, 0, 0);
vb2_set_plane_payload(&dst_buf_info->vb.vb2_buf, 1, 0);
- dst_vb2_v4l2->flags |= V4L2_BUF_FLAG_LAST;
+ dst_buf->flags |= V4L2_BUF_FLAG_LAST;
v4l2_m2m_buf_done(&dst_buf_info->vb, VB2_BUF_STATE_DONE);
clean_free_buffer(ctx);
v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
return;
}
- buf.va = vb2_plane_vaddr(src_buf, 0);
- buf.dma_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ buf.va = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
+ buf.dma_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
buf.size = (size_t)src_buf->planes[0].bytesused;
if (!buf.va) {
v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
mtk_v4l2_err("[%d] id=%d src_addr is NULL!!",
- ctx->id, src_buf->index);
+ ctx->id, src_buf->vb2_buf.index);
return;
}
mtk_v4l2_debug(3, "[%d] Bitstream VA=%p DMA=%pad Size=%zx vb=%p",
@@ -416,10 +412,10 @@ static void mtk_vdec_worker(struct work_struct *work)
mtk_v4l2_err(
" <===[%d], src_buf[%d] sz=0x%zx pts=%llu dst_buf[%d] vdec_if_decode() ret=%d res_chg=%d===>",
ctx->id,
- src_buf->index,
+ src_buf->vb2_buf.index,
buf.size,
src_buf_info->vb.vb2_buf.timestamp,
- dst_buf->index,
+ dst_buf->vb2_buf.index,
ret, res_chg);
src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
if (ret == -EIO) {
@@ -1103,7 +1099,7 @@ static int vb2ops_vdec_buf_prepare(struct vb2_buffer *vb)
static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
{
- struct vb2_buffer *src_buf;
+ struct vb2_v4l2_buffer *src_buf;
struct mtk_vcodec_mem src_mem;
bool res_chg = false;
int ret = 0;
@@ -1149,8 +1145,7 @@ static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
mtk_v4l2_err("No src buffer");
return;
}
- vb2_v4l2 = to_vb2_v4l2_buffer(src_buf);
- buf = container_of(vb2_v4l2, struct mtk_video_dec_buf, vb);
+ buf = container_of(src_buf, struct mtk_video_dec_buf, vb);
if (buf->lastframe) {
/* This shouldn't happen. Just in case. */
mtk_v4l2_err("Invalid flush buffer.");
@@ -1158,8 +1153,8 @@ static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
return;
}
- src_mem.va = vb2_plane_vaddr(src_buf, 0);
- src_mem.dma_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ src_mem.va = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
+ src_mem.dma_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
src_mem.size = (size_t)src_buf->planes[0].bytesused;
mtk_v4l2_debug(2,
"[%d] buf id=%d va=%p dma=%pad size=%zx",
@@ -1170,7 +1165,7 @@ static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
ret = vdec_if_decode(ctx, &src_mem, NULL, &res_chg);
if (ret || !res_chg) {
/*
- * fb == NULL menas to parse SPS/PPS header or
+ * fb == NULL means to parse SPS/PPS header or
* resolution info in src_mem. Decode can fail
* if there is no SPS header or picture info
* in bs
@@ -1181,11 +1176,9 @@ static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
mtk_v4l2_err("[%d] Unrecoverable error in vdec_if_decode.",
ctx->id);
ctx->state = MTK_STATE_ABORT;
- v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf),
- VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
} else {
- v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf),
- VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
}
mtk_v4l2_debug(ret ? 0 : 1,
"[%d] vdec_if_decode() src_buf=%d, size=%zu, fail=%d, res_chg=%d",
@@ -1281,7 +1274,7 @@ static int vb2ops_vdec_start_streaming(struct vb2_queue *q, unsigned int count)
static void vb2ops_vdec_stop_streaming(struct vb2_queue *q)
{
- struct vb2_buffer *src_buf = NULL, *dst_buf = NULL;
+ struct vb2_v4l2_buffer *src_buf = NULL, *dst_buf = NULL;
struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q);
mtk_v4l2_debug(3, "[%d] (%d) state=(%x) ctx->decoded_frame_cnt=%d",
@@ -1289,12 +1282,10 @@ static void vb2ops_vdec_stop_streaming(struct vb2_queue *q)
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
while ((src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx))) {
- struct vb2_v4l2_buffer *vb2_v4l2 =
- to_vb2_v4l2_buffer(src_buf);
struct mtk_video_dec_buf *buf_info = container_of(
- vb2_v4l2, struct mtk_video_dec_buf, vb);
+ src_buf, struct mtk_video_dec_buf, vb);
if (!buf_info->lastframe)
- v4l2_m2m_buf_done(vb2_v4l2,
+ v4l2_m2m_buf_done(src_buf,
VB2_BUF_STATE_ERROR);
}
return;
@@ -1323,10 +1314,9 @@ static void vb2ops_vdec_stop_streaming(struct vb2_queue *q)
ctx->state = MTK_STATE_FLUSH;
while ((dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx))) {
- vb2_set_plane_payload(dst_buf, 0, 0);
- vb2_set_plane_payload(dst_buf, 1, 0);
- v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf),
- VB2_BUF_STATE_ERROR);
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, 0);
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 1, 0);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
}
}
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
index 79ca03ac449c..7884465afcd2 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
@@ -27,11 +27,14 @@ int mtk_vcodec_init_dec_pm(struct mtk_vcodec_dev *mtkdev)
struct device_node *node;
struct platform_device *pdev;
struct mtk_vcodec_pm *pm;
- int ret = 0;
+ struct mtk_vcodec_clk *dec_clk;
+ struct mtk_vcodec_clk_info *clk_info;
+ int i = 0, ret = 0;
pdev = mtkdev->plat_dev;
pm = &mtkdev->pm;
pm->mtkdev = mtkdev;
+ dec_clk = &pm->vdec_clk;
node = of_parse_phandle(pdev->dev.of_node, "mediatek,larb", 0);
if (!node) {
mtk_v4l2_err("of_parse_phandle mediatek,larb fail!");
@@ -47,52 +50,34 @@ int mtk_vcodec_init_dec_pm(struct mtk_vcodec_dev *mtkdev)
pdev = mtkdev->plat_dev;
pm->dev = &pdev->dev;
- pm->vcodecpll = devm_clk_get(&pdev->dev, "vcodecpll");
- if (IS_ERR(pm->vcodecpll)) {
- mtk_v4l2_err("devm_clk_get vcodecpll fail");
- ret = PTR_ERR(pm->vcodecpll);
+ dec_clk->clk_num =
+ of_property_count_strings(pdev->dev.of_node, "clock-names");
+ if (dec_clk->clk_num > 0) {
+ dec_clk->clk_info = devm_kcalloc(&pdev->dev,
+ dec_clk->clk_num, sizeof(*clk_info),
+ GFP_KERNEL);
+ if (!dec_clk->clk_info)
+ return -ENOMEM;
+ } else {
+ mtk_v4l2_err("Failed to get vdec clock count");
+ return -EINVAL;
}
- pm->univpll_d2 = devm_clk_get(&pdev->dev, "univpll_d2");
- if (IS_ERR(pm->univpll_d2)) {
- mtk_v4l2_err("devm_clk_get univpll_d2 fail");
- ret = PTR_ERR(pm->univpll_d2);
- }
-
- pm->clk_cci400_sel = devm_clk_get(&pdev->dev, "clk_cci400_sel");
- if (IS_ERR(pm->clk_cci400_sel)) {
- mtk_v4l2_err("devm_clk_get clk_cci400_sel fail");
- ret = PTR_ERR(pm->clk_cci400_sel);
- }
-
- pm->vdec_sel = devm_clk_get(&pdev->dev, "vdec_sel");
- if (IS_ERR(pm->vdec_sel)) {
- mtk_v4l2_err("devm_clk_get vdec_sel fail");
- ret = PTR_ERR(pm->vdec_sel);
- }
-
- pm->vdecpll = devm_clk_get(&pdev->dev, "vdecpll");
- if (IS_ERR(pm->vdecpll)) {
- mtk_v4l2_err("devm_clk_get vdecpll fail");
- ret = PTR_ERR(pm->vdecpll);
- }
-
- pm->vencpll = devm_clk_get(&pdev->dev, "vencpll");
- if (IS_ERR(pm->vencpll)) {
- mtk_v4l2_err("devm_clk_get vencpll fail");
- ret = PTR_ERR(pm->vencpll);
- }
-
- pm->venc_lt_sel = devm_clk_get(&pdev->dev, "venc_lt_sel");
- if (IS_ERR(pm->venc_lt_sel)) {
- mtk_v4l2_err("devm_clk_get venc_lt_sel fail");
- ret = PTR_ERR(pm->venc_lt_sel);
- }
-
- pm->vdec_bus_clk_src = devm_clk_get(&pdev->dev, "vdec_bus_clk_src");
- if (IS_ERR(pm->vdec_bus_clk_src)) {
- mtk_v4l2_err("devm_clk_get vdec_bus_clk_src");
- ret = PTR_ERR(pm->vdec_bus_clk_src);
+ for (i = 0; i < dec_clk->clk_num; i++) {
+ clk_info = &dec_clk->clk_info[i];
+ ret = of_property_read_string_index(pdev->dev.of_node,
+ "clock-names", i, &clk_info->clk_name);
+ if (ret) {
+ mtk_v4l2_err("Failed to get clock name id = %d", i);
+ return ret;
+ }
+ clk_info->vcodec_clk = devm_clk_get(&pdev->dev,
+ clk_info->clk_name);
+ if (IS_ERR(clk_info->vcodec_clk)) {
+ mtk_v4l2_err("devm_clk_get (%d)%s fail", i,
+ clk_info->clk_name);
+ return PTR_ERR(clk_info->vcodec_clk);
+ }
}
pm_runtime_enable(&pdev->dev);
@@ -125,78 +110,36 @@ void mtk_vcodec_dec_pw_off(struct mtk_vcodec_pm *pm)
void mtk_vcodec_dec_clock_on(struct mtk_vcodec_pm *pm)
{
- int ret;
-
- ret = clk_set_rate(pm->vcodecpll, 1482 * 1000000);
- if (ret)
- mtk_v4l2_err("clk_set_rate vcodecpll fail %d", ret);
-
- ret = clk_set_rate(pm->vencpll, 800 * 1000000);
- if (ret)
- mtk_v4l2_err("clk_set_rate vencpll fail %d", ret);
-
- ret = clk_prepare_enable(pm->vcodecpll);
- if (ret)
- mtk_v4l2_err("clk_prepare_enable vcodecpll fail %d", ret);
-
- ret = clk_prepare_enable(pm->vencpll);
- if (ret)
- mtk_v4l2_err("clk_prepare_enable vencpll fail %d", ret);
-
- ret = clk_prepare_enable(pm->vdec_bus_clk_src);
- if (ret)
- mtk_v4l2_err("clk_prepare_enable vdec_bus_clk_src fail %d",
- ret);
-
- ret = clk_prepare_enable(pm->venc_lt_sel);
- if (ret)
- mtk_v4l2_err("clk_prepare_enable venc_lt_sel fail %d", ret);
-
- ret = clk_set_parent(pm->venc_lt_sel, pm->vdec_bus_clk_src);
- if (ret)
- mtk_v4l2_err("clk_set_parent venc_lt_sel vdec_bus_clk_src fail %d",
- ret);
-
- ret = clk_prepare_enable(pm->univpll_d2);
- if (ret)
- mtk_v4l2_err("clk_prepare_enable univpll_d2 fail %d", ret);
-
- ret = clk_prepare_enable(pm->clk_cci400_sel);
- if (ret)
- mtk_v4l2_err("clk_prepare_enable clk_cci400_sel fail %d", ret);
-
- ret = clk_set_parent(pm->clk_cci400_sel, pm->univpll_d2);
- if (ret)
- mtk_v4l2_err("clk_set_parent clk_cci400_sel univpll_d2 fail %d",
- ret);
-
- ret = clk_prepare_enable(pm->vdecpll);
- if (ret)
- mtk_v4l2_err("clk_prepare_enable vdecpll fail %d", ret);
-
- ret = clk_prepare_enable(pm->vdec_sel);
- if (ret)
- mtk_v4l2_err("clk_prepare_enable vdec_sel fail %d", ret);
-
- ret = clk_set_parent(pm->vdec_sel, pm->vdecpll);
- if (ret)
- mtk_v4l2_err("clk_set_parent vdec_sel vdecpll fail %d", ret);
+ struct mtk_vcodec_clk *dec_clk = &pm->vdec_clk;
+ int ret, i = 0;
+
+ for (i = 0; i < dec_clk->clk_num; i++) {
+ ret = clk_prepare_enable(dec_clk->clk_info[i].vcodec_clk);
+ if (ret) {
+ mtk_v4l2_err("clk_prepare_enable %d %s fail %d", i,
+ dec_clk->clk_info[i].clk_name, ret);
+ goto error;
+ }
+ }
ret = mtk_smi_larb_get(pm->larbvdec);
- if (ret)
+ if (ret) {
mtk_v4l2_err("mtk_smi_larb_get larbvdec fail %d", ret);
+ goto error;
+ }
+ return;
+error:
+ for (i -= 1; i >= 0; i--)
+ clk_disable_unprepare(dec_clk->clk_info[i].vcodec_clk);
}
void mtk_vcodec_dec_clock_off(struct mtk_vcodec_pm *pm)
{
+ struct mtk_vcodec_clk *dec_clk = &pm->vdec_clk;
+ int i = 0;
+
mtk_smi_larb_put(pm->larbvdec);
- clk_disable_unprepare(pm->vdec_sel);
- clk_disable_unprepare(pm->vdecpll);
- clk_disable_unprepare(pm->univpll_d2);
- clk_disable_unprepare(pm->clk_cci400_sel);
- clk_disable_unprepare(pm->venc_lt_sel);
- clk_disable_unprepare(pm->vdec_bus_clk_src);
- clk_disable_unprepare(pm->vencpll);
- clk_disable_unprepare(pm->vcodecpll);
+ for (i = dec_clk->clk_num - 1; i >= 0; i--)
+ clk_disable_unprepare(dec_clk->clk_info[i].vcodec_clk);
}
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
index 3cffb381ac8e..e7e2a108def9 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
@@ -151,9 +151,9 @@ struct mtk_q_data {
* @intra_period: I frame period
* @gop_size: group of picture size, it's used as the intra frame period
* @framerate_num: frame rate numerator. ex: framerate_num=30 and
- * framerate_denom=1 menas FPS is 30
+ * framerate_denom=1 means FPS is 30
* @framerate_denom: frame rate denominator. ex: framerate_num=30 and
- * framerate_denom=1 menas FPS is 30
+ * framerate_denom=1 means FPS is 30
* @h264_max_qp: Max value for H.264 quantization parameter
* @h264_profile: V4L2 defined H.264 profile
* @h264_level: V4L2 defined H.264 level
@@ -176,22 +176,29 @@ struct mtk_enc_params {
};
/**
+ * struct mtk_vcodec_clk_info - Structure used to store clock name
+ */
+struct mtk_vcodec_clk_info {
+ const char *clk_name;
+ struct clk *vcodec_clk;
+};
+
+/**
+ * struct mtk_vcodec_clk - Structure used to store vcodec clock information
+ */
+struct mtk_vcodec_clk {
+ struct mtk_vcodec_clk_info *clk_info;
+ int clk_num;
+};
+
+/**
* struct mtk_vcodec_pm - Power management data structure
*/
struct mtk_vcodec_pm {
- struct clk *vdec_bus_clk_src;
- struct clk *vencpll;
-
- struct clk *vcodecpll;
- struct clk *univpll_d2;
- struct clk *clk_cci400_sel;
- struct clk *vdecpll;
- struct clk *vdec_sel;
- struct clk *vencpll_d2;
- struct clk *venc_sel;
- struct clk *univpll1_d2;
- struct clk *venc_lt_sel;
+ struct mtk_vcodec_clk vdec_clk;
struct device *larbvdec;
+
+ struct mtk_vcodec_clk venc_clk;
struct device *larbvenc;
struct device *larbvenclt;
struct device *dev;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
index d1f12257bf66..c6b48b5925fb 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
@@ -393,7 +393,7 @@ static void mtk_venc_set_param(struct mtk_vcodec_ctx *ctx,
param->input_yuv_fmt = VENC_YUV_FORMAT_NV21;
break;
default:
- mtk_v4l2_err("Unsupport fourcc =%d", q_data_src->fmt->fourcc);
+ mtk_v4l2_err("Unsupported fourcc =%d", q_data_src->fmt->fourcc);
break;
}
param->h264_profile = enc_params->h264_profile;
@@ -887,7 +887,7 @@ err_set_param:
static void vb2ops_venc_stop_streaming(struct vb2_queue *q)
{
struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q);
- struct vb2_buffer *src_buf, *dst_buf;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
int ret;
mtk_v4l2_debug(2, "[%d]-> type=%d", ctx->id, q->type);
@@ -895,13 +895,11 @@ static void vb2ops_venc_stop_streaming(struct vb2_queue *q)
if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
while ((dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx))) {
dst_buf->planes[0].bytesused = 0;
- v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf),
- VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
}
} else {
while ((src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx)))
- v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf),
- VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
}
if ((q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
@@ -937,8 +935,7 @@ static int mtk_venc_encode_header(void *priv)
{
struct mtk_vcodec_ctx *ctx = priv;
int ret;
- struct vb2_buffer *src_buf, *dst_buf;
- struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
struct mtk_vcodec_mem bs_buf;
struct venc_done_result enc_result;
@@ -948,14 +945,14 @@ static int mtk_venc_encode_header(void *priv)
return -EINVAL;
}
- bs_buf.va = vb2_plane_vaddr(dst_buf, 0);
- bs_buf.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ bs_buf.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
+ bs_buf.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
bs_buf.size = (size_t)dst_buf->planes[0].length;
mtk_v4l2_debug(1,
"[%d] buf id=%d va=0x%p dma_addr=0x%llx size=%zu",
ctx->id,
- dst_buf->index, bs_buf.va,
+ dst_buf->vb2_buf.index, bs_buf.va,
(u64)bs_buf.dma_addr,
bs_buf.size);
@@ -964,26 +961,23 @@ static int mtk_venc_encode_header(void *priv)
NULL, &bs_buf, &enc_result);
if (ret) {
- dst_buf->planes[0].bytesused = 0;
+ dst_buf->vb2_buf.planes[0].bytesused = 0;
ctx->state = MTK_STATE_ABORT;
- v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf),
- VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
mtk_v4l2_err("venc_if_encode failed=%d", ret);
return -EINVAL;
}
src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
if (src_buf) {
- src_vb2_v4l2 = to_vb2_v4l2_buffer(src_buf);
- dst_vb2_v4l2 = to_vb2_v4l2_buffer(dst_buf);
- dst_buf->timestamp = src_buf->timestamp;
- dst_vb2_v4l2->timecode = src_vb2_v4l2->timecode;
+ dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
+ dst_buf->timecode = src_buf->timecode;
} else {
mtk_v4l2_err("No timestamp for the header buffer.");
}
ctx->state = MTK_STATE_HEADER;
dst_buf->planes[0].bytesused = enc_result.bs_size;
- v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
return 0;
}
@@ -991,9 +985,7 @@ static int mtk_venc_encode_header(void *priv)
static int mtk_venc_param_change(struct mtk_vcodec_ctx *ctx)
{
struct venc_enc_param enc_prm;
- struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
- struct vb2_v4l2_buffer *vb2_v4l2 =
- container_of(vb, struct vb2_v4l2_buffer, vb2_buf);
+ struct vb2_v4l2_buffer *vb2_v4l2 = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
struct mtk_video_enc_buf *mtk_buf =
container_of(vb2_v4l2, struct mtk_video_enc_buf, vb);
@@ -1067,12 +1059,11 @@ static void mtk_venc_worker(struct work_struct *work)
{
struct mtk_vcodec_ctx *ctx = container_of(work, struct mtk_vcodec_ctx,
encode_work);
- struct vb2_buffer *src_buf, *dst_buf;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
struct venc_frm_buf frm_buf;
struct mtk_vcodec_mem bs_buf;
struct venc_done_result enc_result;
int ret, i;
- struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2;
/* check dst_buf, dst_buf may be removed in device_run
* to stored encdoe header so we need check dst_buf and
@@ -1086,15 +1077,15 @@ static void mtk_venc_worker(struct work_struct *work)
src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
memset(&frm_buf, 0, sizeof(frm_buf));
- for (i = 0; i < src_buf->num_planes ; i++) {
+ for (i = 0; i < src_buf->vb2_buf.num_planes ; i++) {
frm_buf.fb_addr[i].dma_addr =
- vb2_dma_contig_plane_dma_addr(src_buf, i);
+ vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, i);
frm_buf.fb_addr[i].size =
- (size_t)src_buf->planes[i].length;
+ (size_t)src_buf->vb2_buf.planes[i].length;
}
- bs_buf.va = vb2_plane_vaddr(dst_buf, 0);
- bs_buf.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
- bs_buf.size = (size_t)dst_buf->planes[0].length;
+ bs_buf.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
+ bs_buf.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+ bs_buf.size = (size_t)dst_buf->vb2_buf.planes[0].length;
mtk_v4l2_debug(2,
"Framebuf PA=%llx Size=0x%zx;PA=0x%llx Size=0x%zx;PA=0x%llx Size=%zu",
@@ -1108,28 +1099,21 @@ static void mtk_venc_worker(struct work_struct *work)
ret = venc_if_encode(ctx, VENC_START_OPT_ENCODE_FRAME,
&frm_buf, &bs_buf, &enc_result);
- src_vb2_v4l2 = to_vb2_v4l2_buffer(src_buf);
- dst_vb2_v4l2 = to_vb2_v4l2_buffer(dst_buf);
-
- dst_buf->timestamp = src_buf->timestamp;
- dst_vb2_v4l2->timecode = src_vb2_v4l2->timecode;
+ dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
+ dst_buf->timecode = src_buf->timecode;
if (enc_result.is_key_frm)
- dst_vb2_v4l2->flags |= V4L2_BUF_FLAG_KEYFRAME;
+ dst_buf->flags |= V4L2_BUF_FLAG_KEYFRAME;
if (ret) {
- v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf),
- VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
dst_buf->planes[0].bytesused = 0;
- v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf),
- VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
mtk_v4l2_err("venc_if_encode failed=%d", ret);
} else {
- v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf),
- VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
dst_buf->planes[0].bytesused = enc_result.bs_size;
- v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf),
- VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
mtk_v4l2_debug(2, "venc_if_encode bs size=%d",
enc_result.bs_size);
}
@@ -1137,7 +1121,7 @@ static void mtk_venc_worker(struct work_struct *work)
v4l2_m2m_job_finish(ctx->dev->m2m_dev_enc, ctx->m2m_ctx);
mtk_v4l2_debug(1, "<=== src_buf[%d] dst_buf[%d] venc_if_encode ret=%d Size=%u===>",
- src_buf->index, dst_buf->index, ret,
+ src_buf->vb2_buf.index, dst_buf->vb2_buf.index, ret,
enc_result.bs_size);
}
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
index 7c025045ea90..39375b8ea27c 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
@@ -27,9 +27,11 @@ int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *mtkdev)
{
struct device_node *node;
struct platform_device *pdev;
- struct device *dev;
struct mtk_vcodec_pm *pm;
- int ret = 0;
+ struct mtk_vcodec_clk *enc_clk;
+ struct mtk_vcodec_clk_info *clk_info;
+ int ret = 0, i = 0;
+ struct device *dev;
pdev = mtkdev->plat_dev;
pm = &mtkdev->pm;
@@ -37,6 +39,7 @@ int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *mtkdev)
pm->mtkdev = mtkdev;
pm->dev = &pdev->dev;
dev = &pdev->dev;
+ enc_clk = &pm->venc_clk;
node = of_parse_phandle(dev->of_node, "mediatek,larb", 0);
if (!node) {
@@ -68,28 +71,34 @@ int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *mtkdev)
pdev = mtkdev->plat_dev;
pm->dev = &pdev->dev;
- pm->vencpll_d2 = devm_clk_get(&pdev->dev, "venc_sel_src");
- if (IS_ERR(pm->vencpll_d2)) {
- mtk_v4l2_err("devm_clk_get vencpll_d2 fail");
- ret = PTR_ERR(pm->vencpll_d2);
- }
-
- pm->venc_sel = devm_clk_get(&pdev->dev, "venc_sel");
- if (IS_ERR(pm->venc_sel)) {
- mtk_v4l2_err("devm_clk_get venc_sel fail");
- ret = PTR_ERR(pm->venc_sel);
+ enc_clk->clk_num = of_property_count_strings(pdev->dev.of_node,
+ "clock-names");
+ if (enc_clk->clk_num > 0) {
+ enc_clk->clk_info = devm_kcalloc(&pdev->dev,
+ enc_clk->clk_num, sizeof(*clk_info),
+ GFP_KERNEL);
+ if (!enc_clk->clk_info)
+ return -ENOMEM;
+ } else {
+ mtk_v4l2_err("Failed to get venc clock count");
+ return -EINVAL;
}
- pm->univpll1_d2 = devm_clk_get(&pdev->dev, "venc_lt_sel_src");
- if (IS_ERR(pm->univpll1_d2)) {
- mtk_v4l2_err("devm_clk_get univpll1_d2 fail");
- ret = PTR_ERR(pm->univpll1_d2);
- }
-
- pm->venc_lt_sel = devm_clk_get(&pdev->dev, "venc_lt_sel");
- if (IS_ERR(pm->venc_lt_sel)) {
- mtk_v4l2_err("devm_clk_get venc_lt_sel fail");
- ret = PTR_ERR(pm->venc_lt_sel);
+ for (i = 0; i < enc_clk->clk_num; i++) {
+ clk_info = &enc_clk->clk_info[i];
+ ret = of_property_read_string_index(pdev->dev.of_node,
+ "clock-names", i, &clk_info->clk_name);
+ if (ret) {
+ mtk_v4l2_err("venc failed to get clk name %d", i);
+ return ret;
+ }
+ clk_info->vcodec_clk = devm_clk_get(&pdev->dev,
+ clk_info->clk_name);
+ if (IS_ERR(clk_info->vcodec_clk)) {
+ mtk_v4l2_err("venc devm_clk_get (%d)%s fail", i,
+ clk_info->clk_name);
+ return PTR_ERR(clk_info->vcodec_clk);
+ }
}
return ret;
@@ -102,38 +111,45 @@ void mtk_vcodec_release_enc_pm(struct mtk_vcodec_dev *mtkdev)
void mtk_vcodec_enc_clock_on(struct mtk_vcodec_pm *pm)
{
- int ret;
-
- ret = clk_prepare_enable(pm->venc_sel);
- if (ret)
- mtk_v4l2_err("clk_prepare_enable fail %d", ret);
-
- ret = clk_set_parent(pm->venc_sel, pm->vencpll_d2);
- if (ret)
- mtk_v4l2_err("clk_set_parent fail %d", ret);
-
- ret = clk_prepare_enable(pm->venc_lt_sel);
- if (ret)
- mtk_v4l2_err("clk_prepare_enable fail %d", ret);
-
- ret = clk_set_parent(pm->venc_lt_sel, pm->univpll1_d2);
- if (ret)
- mtk_v4l2_err("clk_set_parent fail %d", ret);
+ struct mtk_vcodec_clk *enc_clk = &pm->venc_clk;
+ int ret, i = 0;
+
+ for (i = 0; i < enc_clk->clk_num; i++) {
+ ret = clk_prepare_enable(enc_clk->clk_info[i].vcodec_clk);
+ if (ret) {
+ mtk_v4l2_err("venc clk_prepare_enable %d %s fail %d", i,
+ enc_clk->clk_info[i].clk_name, ret);
+ goto clkerr;
+ }
+ }
ret = mtk_smi_larb_get(pm->larbvenc);
- if (ret)
+ if (ret) {
mtk_v4l2_err("mtk_smi_larb_get larb3 fail %d", ret);
-
+ goto larbvencerr;
+ }
ret = mtk_smi_larb_get(pm->larbvenclt);
- if (ret)
+ if (ret) {
mtk_v4l2_err("mtk_smi_larb_get larb4 fail %d", ret);
+ goto larbvenclterr;
+ }
+ return;
+larbvenclterr:
+ mtk_smi_larb_put(pm->larbvenc);
+larbvencerr:
+clkerr:
+ for (i -= 1; i >= 0; i--)
+ clk_disable_unprepare(enc_clk->clk_info[i].vcodec_clk);
}
void mtk_vcodec_enc_clock_off(struct mtk_vcodec_pm *pm)
{
+ struct mtk_vcodec_clk *enc_clk = &pm->venc_clk;
+ int i = 0;
+
mtk_smi_larb_put(pm->larbvenc);
mtk_smi_larb_put(pm->larbvenclt);
- clk_disable_unprepare(pm->venc_lt_sel);
- clk_disable_unprepare(pm->venc_sel);
+ for (i = enc_clk->clk_num - 1; i >= 0; i--)
+ clk_disable_unprepare(enc_clk->clk_info[i].vcodec_clk);
}
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
index aa3ce41898bc..02c960c63ac0 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
@@ -55,7 +55,7 @@ struct h264_fb {
/**
* struct h264_ring_fb_list - ring frame buffer list
- * @fb_list : frame buffer arrary
+ * @fb_list : frame buffer array
* @read_idx : read index
* @write_idx : write index
* @count : buffer count in list
@@ -72,7 +72,7 @@ struct h264_ring_fb_list {
/**
* struct vdec_h264_dec_info - decode information
* @dpb_sz : decoding picture buffer size
- * @resolution_changed : resoltion change happen
+ * @resolution_changed : resolution change happen
* @realloc_mv_buf : flag to notify driver to re-allocate mv buffer
* @reserved : for 8 bytes alignment
* @bs_dma : Input bit-stream buffer dma address
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
index 3e84a761db3a..bac3723038de 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
@@ -120,7 +120,7 @@ struct vdec_vp8_hw_reg_base {
/**
* struct vdec_vp8_vpu_inst - VPU instance for VP8 decode
* @wq_hd : Wait queue to wait VPU message ack
- * @signaled : 1 - Host has received ack message from VPU, 0 - not recevie
+ * @signaled : 1 - Host has received ack message from VPU, 0 - not receive
* @failure : VPU execution result status 0 - success, others - fail
* @inst_addr : VPU decoder instance address
*/
diff --git a/drivers/media/platform/mtk-vcodec/vdec_drv_if.h b/drivers/media/platform/mtk-vcodec/vdec_drv_if.h
index ded1154481cd..9a21591f3818 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_drv_if.h
+++ b/drivers/media/platform/mtk-vcodec/vdec_drv_if.h
@@ -80,7 +80,7 @@ void vdec_if_deinit(struct mtk_vcodec_ctx *ctx);
* vdec_if_decode() - trigger decode
* @ctx : [in] v4l2 context
* @bs : [in] input bitstream
- * @fb : [in] frame buffer to store decoded frame, when null menas parse
+ * @fb : [in] frame buffer to store decoded frame, when null means parse
* header only
* @res_chg : [out] resolution change happens if current bs have different
* picture width/height
diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
index cd37bb2a610f..79d8eac7f5e2 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
+++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
@@ -62,7 +62,7 @@ int vpu_dec_start(struct vdec_vpu_inst *vpu, uint32_t *data, unsigned int len);
/**
* vpu_dec_end - end decoding, basically the function will be invoked once
* when HW decoding done interrupt received successfully. The
- * decoder in VPU will continute to do referene frame management
+ * decoder in VPU will continue to do reference frame management
* and check if there is a new decoded frame available to display.
*
* @vpu : instance for vdec_vpu_inst
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index 27b078cf98e3..f60f499c596b 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -274,7 +274,7 @@ static void emmaprp_device_run(void *priv)
{
struct emmaprp_ctx *ctx = priv;
struct emmaprp_q_data *s_q_data, *d_q_data;
- struct vb2_buffer *src_buf, *dst_buf;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
struct emmaprp_dev *pcdev = ctx->dev;
unsigned int s_width, s_height;
unsigned int d_width, d_height;
@@ -294,8 +294,8 @@ static void emmaprp_device_run(void *priv)
d_height = d_q_data->height;
d_size = d_width * d_height;
- p_in = vb2_dma_contig_plane_dma_addr(src_buf, 0);
- p_out = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ p_in = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ p_out = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
if (!p_in || !p_out) {
v4l2_err(&pcdev->v4l2_dev,
"Acquiring kernel pointers to buffers failed\n");
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index f447ae3bb465..37f0d7146dfa 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -513,7 +513,7 @@ static int omapvid_apply_changes(struct omap_vout_device *vout)
}
static int omapvid_handle_interlace_display(struct omap_vout_device *vout,
- unsigned int irqstatus, struct timeval timevalue)
+ unsigned int irqstatus, u64 ts)
{
u32 fid;
@@ -537,7 +537,7 @@ static int omapvid_handle_interlace_display(struct omap_vout_device *vout,
if (vout->cur_frm == vout->next_frm)
goto err;
- vout->cur_frm->ts = timevalue;
+ vout->cur_frm->ts = ts;
vout->cur_frm->state = VIDEOBUF_DONE;
wake_up_interruptible(&vout->cur_frm->done);
vout->cur_frm = vout->next_frm;
@@ -557,7 +557,7 @@ static void omap_vout_isr(void *arg, unsigned int irqstatus)
int ret, fid, mgr_id;
u32 addr, irq;
struct omap_overlay *ovl;
- struct timeval timevalue;
+ u64 ts;
struct omapvideo_info *ovid;
struct omap_dss_device *cur_display;
struct omap_vout_device *vout = (struct omap_vout_device *)arg;
@@ -577,7 +577,7 @@ static void omap_vout_isr(void *arg, unsigned int irqstatus)
return;
spin_lock(&vout->vbq_lock);
- v4l2_get_timestamp(&timevalue);
+ ts = ktime_get_ns();
switch (cur_display->type) {
case OMAP_DISPLAY_TYPE_DSI:
@@ -595,7 +595,7 @@ static void omap_vout_isr(void *arg, unsigned int irqstatus)
break;
case OMAP_DISPLAY_TYPE_VENC:
fid = omapvid_handle_interlace_display(vout, irqstatus,
- timevalue);
+ ts);
if (!fid)
goto vout_isr_err;
break;
@@ -608,7 +608,7 @@ static void omap_vout_isr(void *arg, unsigned int irqstatus)
}
if (!vout->first_int && (vout->cur_frm != vout->next_frm)) {
- vout->cur_frm->ts = timevalue;
+ vout->cur_frm->ts = ts;
vout->cur_frm->state = VIDEOBUF_DONE;
wake_up_interruptible(&vout->cur_frm->done);
vout->cur_frm = vout->next_frm;
@@ -1129,7 +1129,7 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *fh,
}
timing = &dssdev->panel.timings;
- /* We dont support RGB24-packed mode if vrfb rotation
+ /* We don't support RGB24-packed mode if vrfb rotation
* is enabled*/
if ((is_rotation_enabled(vout)) &&
f->fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24) {
@@ -1147,7 +1147,7 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *fh,
vout->fbuf.fmt.width = timing->x_res;
}
- /* change to samller size is OK */
+ /* change to smaller size is OK */
bpp = omap_vout_try_format(&f->fmt.pix);
f->fmt.pix.sizeimage = f->fmt.pix.width * f->fmt.pix.height * bpp;
diff --git a/drivers/media/platform/omap/omap_voutdef.h b/drivers/media/platform/omap/omap_voutdef.h
index 56b630b1c8b4..c740393c8509 100644
--- a/drivers/media/platform/omap/omap_voutdef.h
+++ b/drivers/media/platform/omap/omap_voutdef.h
@@ -37,7 +37,7 @@
#define VID_MAX_WIDTH 1280 /* Largest width */
#define VID_MAX_HEIGHT 720 /* Largest height */
-/* Mimimum requirement is 2x2 for DSS */
+/* Minimum requirement is 2x2 for DSS */
#define VID_MIN_WIDTH 2
#define VID_MIN_HEIGHT 2
@@ -135,7 +135,7 @@ struct omap_vout_device {
enum omap_color_mode dss_mode;
/* we don't allow to request new buffer when old buffers are
- * still mmaped
+ * still mmapped
*/
int mmap_count;
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 13f2828d880d..bd57174d81a7 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -1517,7 +1517,7 @@ void omap3isp_print_status(struct isp_device *isp)
*
* To solve this problem power management support is split into prepare/complete
* and suspend/resume operations. The pipelines are stopped in prepare() and the
- * ISP clocks get disabled in suspend(). Similarly, the clocks are reenabled in
+ * ISP clocks get disabled in suspend(). Similarly, the clocks are re-enabled in
* resume(), and the the pipelines are restarted in complete().
*
* TODO: PM dependencies between the ISP and sensors are not modelled explicitly
diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
index 14a1c24037c4..261ad1175f98 100644
--- a/drivers/media/platform/omap3isp/ispccdc.c
+++ b/drivers/media/platform/omap3isp/ispccdc.c
@@ -1594,7 +1594,7 @@ static int ccdc_isr_buffer(struct isp_ccdc_device *ccdc)
return 0;
/* We're in continuous mode, and memory writes were disabled due to a
- * buffer underrun. Reenable them now that we have a buffer. The buffer
+ * buffer underrun. Re-enable them now that we have a buffer. The buffer
* address has been set in ccdc_video_queue.
*/
if (ccdc->state == ISP_PIPELINE_STREAM_CONTINUOUS && ccdc->underrun) {
@@ -1712,7 +1712,7 @@ static void ccdc_vd1_isr(struct isp_ccdc_device *ccdc)
* data to memory the CCDC and LSC are stopped immediately but
* without change the CCDC stopping state machine. The CCDC
* stopping state machine should be used only when user request
- * for stopping is received (SINGLESHOT is an exeption).
+ * for stopping is received (SINGLESHOT is an exception).
*/
switch (ccdc->state) {
case ISP_PIPELINE_STREAM_SINGLESHOT:
diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c
index 9c180f607bcb..da66ea65be5d 100644
--- a/drivers/media/platform/omap3isp/ispcsi2.c
+++ b/drivers/media/platform/omap3isp/ispcsi2.c
@@ -710,7 +710,7 @@ static void csi2_isr_ctx(struct isp_csi2_device *csi2,
/* Skip interrupts until we reach the frame skip count. The CSI2 will be
* automatically disabled, as the frame skip count has been programmed
- * in the CSI2_CTx_CTRL1::COUNT field, so reenable it.
+ * in the CSI2_CTx_CTRL1::COUNT field, so re-enable it.
*
* It would have been nice to rely on the FRAME_NUMBER interrupt instead
* but it turned out that the interrupt is only generated when the CSI2
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
index 5f930560eb30..4fe228752a43 100644
--- a/drivers/media/platform/pxa_camera.c
+++ b/drivers/media/platform/pxa_camera.c
@@ -1631,7 +1631,7 @@ static int pxa_camera_set_bus_param(struct pxa_camera_dev *pcdev)
pcdev->channels = 1;
- /* Make choises, based on platform preferences */
+ /* Make choices, based on platform preferences */
if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) &&
(common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) {
if (pcdev->platform_flags & PXA_CAMERA_HSP)
@@ -2394,15 +2394,17 @@ static int pxa_camera_probe(struct platform_device *pdev)
pcdev->res = res;
pcdev->pdata = pdev->dev.platform_data;
- if (pdev->dev.of_node && !pcdev->pdata) {
- err = pxa_camera_pdata_from_dt(&pdev->dev, pcdev, &pcdev->asd);
- } else {
+ if (pcdev->pdata) {
pcdev->platform_flags = pcdev->pdata->flags;
pcdev->mclk = pcdev->pdata->mclk_10khz * 10000;
pcdev->asd.match_type = V4L2_ASYNC_MATCH_I2C;
pcdev->asd.match.i2c.adapter_id =
pcdev->pdata->sensor_i2c_adapter_id;
pcdev->asd.match.i2c.address = pcdev->pdata->sensor_i2c_address;
+ } else if (pdev->dev.of_node) {
+ err = pxa_camera_pdata_from_dt(&pdev->dev, pcdev, &pcdev->asd);
+ } else {
+ return -ENODEV;
}
if (err < 0)
return err;
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index cb411eb85ee4..739366744e0f 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -455,7 +455,7 @@ static const struct venus_resources msm8996_res = {
.reg_tbl_size = ARRAY_SIZE(msm8996_reg_preset),
.clks = {"core", "iface", "bus", "mbus" },
.clks_num = 4,
- .max_load = 2563200,
+ .max_load = 3110400, /* 4096x2160@90 */
.hfi_version = HFI_VERSION_3XX,
.vmem_id = VIDC_RESOURCE_NONE,
.vmem_size = 0,
@@ -465,10 +465,12 @@ static const struct venus_resources msm8996_res = {
};
static const struct freq_tbl sdm845_freq_table[] = {
- { 1944000, 380000000 }, /* 4k UHD @ 60 */
- { 972000, 320000000 }, /* 4k UHD @ 30 */
- { 489600, 200000000 }, /* 1080p @ 60 */
- { 244800, 100000000 }, /* 1080p @ 30 */
+ { 3110400, 533000000 }, /* 4096x2160@90 */
+ { 2073600, 444000000 }, /* 4096x2160@60 */
+ { 1944000, 404000000 }, /* 3840x2160@60 */
+ { 972000, 330000000 }, /* 3840x2160@30 */
+ { 489600, 200000000 }, /* 1920x1080@60 */
+ { 244800, 100000000 }, /* 1920x1080@30 */
};
static const struct venus_resources sdm845_res = {
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index 6382cea29185..7a3feb5cee00 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -134,6 +134,7 @@ struct venus_core {
struct video_firmware {
struct device *dev;
struct iommu_domain *iommu_domain;
+ size_t mapped_mem_size;
} fw;
struct mutex lock;
struct list_head instances;
@@ -218,7 +219,7 @@ struct venus_buffer {
#define to_venus_buffer(ptr) container_of(ptr, struct venus_buffer, vb)
/**
- * struct venus_inst - holds per instance paramerters
+ * struct venus_inst - holds per instance parameters
*
* @list: used for attach an instance to the core
* @lock: instance lock
diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c
index c29acfd70c1b..6cfa8021721e 100644
--- a/drivers/media/platform/qcom/venus/firmware.c
+++ b/drivers/media/platform/qcom/venus/firmware.c
@@ -35,14 +35,15 @@
static void venus_reset_cpu(struct venus_core *core)
{
+ u32 fw_size = core->fw.mapped_mem_size;
void __iomem *base = core->base;
writel(0, base + WRAPPER_FW_START_ADDR);
- writel(VENUS_FW_MEM_SIZE, base + WRAPPER_FW_END_ADDR);
+ writel(fw_size, base + WRAPPER_FW_END_ADDR);
writel(0, base + WRAPPER_CPA_START_ADDR);
- writel(VENUS_FW_MEM_SIZE, base + WRAPPER_CPA_END_ADDR);
- writel(VENUS_FW_MEM_SIZE, base + WRAPPER_NONPIX_START_ADDR);
- writel(VENUS_FW_MEM_SIZE, base + WRAPPER_NONPIX_END_ADDR);
+ writel(fw_size, base + WRAPPER_CPA_END_ADDR);
+ writel(fw_size, base + WRAPPER_NONPIX_START_ADDR);
+ writel(fw_size, base + WRAPPER_NONPIX_END_ADDR);
writel(0x0, base + WRAPPER_CPU_CGC_DIS);
writel(0x0, base + WRAPPER_CPU_CLOCK_CONFIG);
@@ -74,6 +75,9 @@ static int venus_load_fw(struct venus_core *core, const char *fwname,
void *mem_va;
int ret;
+ *mem_phys = 0;
+ *mem_size = 0;
+
dev = core->dev;
node = of_parse_phandle(dev->of_node, "memory-region", 0);
if (!node) {
@@ -85,28 +89,30 @@ static int venus_load_fw(struct venus_core *core, const char *fwname,
if (ret)
return ret;
+ ret = request_firmware(&mdt, fwname, dev);
+ if (ret < 0)
+ return ret;
+
+ fw_size = qcom_mdt_get_size(mdt);
+ if (fw_size < 0) {
+ ret = fw_size;
+ goto err_release_fw;
+ }
+
*mem_phys = r.start;
*mem_size = resource_size(&r);
- if (*mem_size < VENUS_FW_MEM_SIZE)
- return -EINVAL;
+ if (*mem_size < fw_size || fw_size > VENUS_FW_MEM_SIZE) {
+ ret = -EINVAL;
+ goto err_release_fw;
+ }
mem_va = memremap(r.start, *mem_size, MEMREMAP_WC);
if (!mem_va) {
dev_err(dev, "unable to map memory region: %pa+%zx\n",
&r.start, *mem_size);
- return -ENOMEM;
- }
-
- ret = request_firmware(&mdt, fwname, dev);
- if (ret < 0)
- goto err_unmap;
-
- fw_size = qcom_mdt_get_size(mdt);
- if (fw_size < 0) {
- ret = fw_size;
- release_firmware(mdt);
- goto err_unmap;
+ ret = -ENOMEM;
+ goto err_release_fw;
}
if (core->use_tz)
@@ -116,10 +122,9 @@ static int venus_load_fw(struct venus_core *core, const char *fwname,
ret = qcom_mdt_load_no_init(dev, mdt, fwname, VENUS_PAS_ID,
mem_va, *mem_phys, *mem_size, NULL);
- release_firmware(mdt);
-
-err_unmap:
memunmap(mem_va);
+err_release_fw:
+ release_firmware(mdt);
return ret;
}
@@ -135,6 +140,7 @@ static int venus_boot_no_tz(struct venus_core *core, phys_addr_t mem_phys,
return -EPROBE_DEFER;
iommu = core->fw.iommu_domain;
+ core->fw.mapped_mem_size = mem_size;
ret = iommu_map(iommu, VENUS_FW_START_ADDR, mem_phys, mem_size,
IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV);
@@ -150,6 +156,7 @@ static int venus_boot_no_tz(struct venus_core *core, phys_addr_t mem_phys,
static int venus_shutdown_no_tz(struct venus_core *core)
{
+ const size_t mapped = core->fw.mapped_mem_size;
struct iommu_domain *iommu;
size_t unmapped;
u32 reg;
@@ -166,8 +173,8 @@ static int venus_shutdown_no_tz(struct venus_core *core)
iommu = core->fw.iommu_domain;
- unmapped = iommu_unmap(iommu, VENUS_FW_START_ADDR, VENUS_FW_MEM_SIZE);
- if (unmapped != VENUS_FW_MEM_SIZE)
+ unmapped = iommu_unmap(iommu, VENUS_FW_START_ADDR, mapped);
+ if (unmapped != mapped)
dev_err(dev, "failed to unmap firmware\n");
return 0;
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
index e436385bc5ab..5cad601d4c57 100644
--- a/drivers/media/platform/qcom/venus/helpers.c
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -439,9 +439,6 @@ session_process_buf(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
fdata.flags = 0;
fdata.clnt_data = vbuf->vb2_buf.index;
- if (!fdata.timestamp)
- fdata.flags |= HFI_BUFFERFLAG_TIMESTAMPINVALID;
-
if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
fdata.buffer_type = HFI_BUFFER_INPUT;
fdata.filled_len = vb2_get_plane_payload(vb, 0);
diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
index f0719ce24b97..594d80434004 100644
--- a/drivers/media/platform/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/rcar-vin/rcar-core.c
@@ -131,9 +131,13 @@ static int rvin_group_link_notify(struct media_link *link, u32 flags,
!is_media_entity_v4l2_video_device(link->sink->entity))
return 0;
- /* If any entity is in use don't allow link changes. */
+ /*
+ * Don't allow link changes if any entity in the graph is
+ * streaming, modifying the CHSEL register fields can disrupt
+ * running streams.
+ */
media_device_for_each_entity(entity, &group->mdev)
- if (entity->use_count)
+ if (entity->stream_count)
return -EBUSY;
mutex_lock(&group->lock);
@@ -542,9 +546,7 @@ static void rvin_parallel_notify_unbind(struct v4l2_async_notifier *notifier,
vin_dbg(vin, "unbind parallel subdev %s\n", subdev->name);
- mutex_lock(&vin->lock);
rvin_parallel_subdevice_detach(vin);
- mutex_unlock(&vin->lock);
}
static int rvin_parallel_notify_bound(struct v4l2_async_notifier *notifier,
@@ -554,9 +556,7 @@ static int rvin_parallel_notify_bound(struct v4l2_async_notifier *notifier,
struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
int ret;
- mutex_lock(&vin->lock);
ret = rvin_parallel_subdevice_attach(vin, subdev);
- mutex_unlock(&vin->lock);
if (ret)
return ret;
@@ -664,7 +664,6 @@ static int rvin_group_notify_complete(struct v4l2_async_notifier *notifier)
}
/* Create all media device links between VINs and CSI-2's. */
- mutex_lock(&vin->group->lock);
for (route = vin->info->routes; route->mask; route++) {
struct media_pad *source_pad, *sink_pad;
struct media_entity *source, *sink;
@@ -700,7 +699,6 @@ static int rvin_group_notify_complete(struct v4l2_async_notifier *notifier)
break;
}
}
- mutex_unlock(&vin->group->lock);
return ret;
}
@@ -716,8 +714,6 @@ static void rvin_group_notify_unbind(struct v4l2_async_notifier *notifier,
if (vin->group->vin[i])
rvin_v4l2_unregister(vin->group->vin[i]);
- mutex_lock(&vin->group->lock);
-
for (i = 0; i < RVIN_CSI_MAX; i++) {
if (vin->group->csi[i].fwnode != asd->match.fwnode)
continue;
@@ -725,8 +721,6 @@ static void rvin_group_notify_unbind(struct v4l2_async_notifier *notifier,
vin_dbg(vin, "Unbind CSI-2 %s from slot %u\n", subdev->name, i);
break;
}
-
- mutex_unlock(&vin->group->lock);
}
static int rvin_group_notify_bound(struct v4l2_async_notifier *notifier,
@@ -736,8 +730,6 @@ static int rvin_group_notify_bound(struct v4l2_async_notifier *notifier,
struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
unsigned int i;
- mutex_lock(&vin->group->lock);
-
for (i = 0; i < RVIN_CSI_MAX; i++) {
if (vin->group->csi[i].fwnode != asd->match.fwnode)
continue;
@@ -746,8 +738,6 @@ static int rvin_group_notify_bound(struct v4l2_async_notifier *notifier,
break;
}
- mutex_unlock(&vin->group->lock);
-
return 0;
}
@@ -1146,6 +1136,10 @@ static const struct rvin_info rcar_info_r8a77995 = {
static const struct of_device_id rvin_of_id_table[] = {
{
+ .compatible = "renesas,vin-r8a774c0",
+ .data = &rcar_info_r8a77990,
+ },
+ {
.compatible = "renesas,vin-r8a7778",
.data = &rcar_info_m1,
},
diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
index 6d356f5a9456..f64528d2be3c 100644
--- a/drivers/media/platform/rcar-vin/rcar-csi2.c
+++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
@@ -152,37 +152,37 @@ static const struct rcsi2_mbps_reg phtw_mbps_h3_v3h_m3n[] = {
};
static const struct rcsi2_mbps_reg phtw_mbps_v3m_e3[] = {
- { .mbps = 89, .reg = 0x00 },
- { .mbps = 99, .reg = 0x20 },
- { .mbps = 109, .reg = 0x40 },
- { .mbps = 129, .reg = 0x02 },
- { .mbps = 139, .reg = 0x22 },
- { .mbps = 149, .reg = 0x42 },
- { .mbps = 169, .reg = 0x04 },
- { .mbps = 179, .reg = 0x24 },
- { .mbps = 199, .reg = 0x44 },
- { .mbps = 219, .reg = 0x06 },
- { .mbps = 239, .reg = 0x26 },
- { .mbps = 249, .reg = 0x46 },
- { .mbps = 269, .reg = 0x08 },
- { .mbps = 299, .reg = 0x28 },
- { .mbps = 329, .reg = 0x0a },
- { .mbps = 359, .reg = 0x2a },
- { .mbps = 399, .reg = 0x4a },
- { .mbps = 449, .reg = 0x0c },
- { .mbps = 499, .reg = 0x2c },
- { .mbps = 549, .reg = 0x0e },
- { .mbps = 599, .reg = 0x2e },
- { .mbps = 649, .reg = 0x10 },
- { .mbps = 699, .reg = 0x30 },
- { .mbps = 749, .reg = 0x12 },
- { .mbps = 799, .reg = 0x32 },
- { .mbps = 849, .reg = 0x52 },
- { .mbps = 899, .reg = 0x72 },
- { .mbps = 949, .reg = 0x14 },
- { .mbps = 999, .reg = 0x34 },
- { .mbps = 1049, .reg = 0x54 },
- { .mbps = 1099, .reg = 0x74 },
+ { .mbps = 80, .reg = 0x00 },
+ { .mbps = 90, .reg = 0x20 },
+ { .mbps = 100, .reg = 0x40 },
+ { .mbps = 110, .reg = 0x02 },
+ { .mbps = 130, .reg = 0x22 },
+ { .mbps = 140, .reg = 0x42 },
+ { .mbps = 150, .reg = 0x04 },
+ { .mbps = 170, .reg = 0x24 },
+ { .mbps = 180, .reg = 0x44 },
+ { .mbps = 200, .reg = 0x06 },
+ { .mbps = 220, .reg = 0x26 },
+ { .mbps = 240, .reg = 0x46 },
+ { .mbps = 250, .reg = 0x08 },
+ { .mbps = 270, .reg = 0x28 },
+ { .mbps = 300, .reg = 0x0a },
+ { .mbps = 330, .reg = 0x2a },
+ { .mbps = 360, .reg = 0x4a },
+ { .mbps = 400, .reg = 0x0c },
+ { .mbps = 450, .reg = 0x2c },
+ { .mbps = 500, .reg = 0x0e },
+ { .mbps = 550, .reg = 0x2e },
+ { .mbps = 600, .reg = 0x10 },
+ { .mbps = 650, .reg = 0x30 },
+ { .mbps = 700, .reg = 0x12 },
+ { .mbps = 750, .reg = 0x32 },
+ { .mbps = 800, .reg = 0x52 },
+ { .mbps = 850, .reg = 0x72 },
+ { .mbps = 900, .reg = 0x14 },
+ { .mbps = 950, .reg = 0x34 },
+ { .mbps = 1000, .reg = 0x54 },
+ { .mbps = 1050, .reg = 0x74 },
{ .mbps = 1125, .reg = 0x16 },
{ /* sentinel */ },
};
@@ -986,6 +986,10 @@ static const struct rcar_csi2_info rcar_csi2_info_r8a77990 = {
static const struct of_device_id rcar_csi2_of_table[] = {
{
+ .compatible = "renesas,r8a774c0-csi2",
+ .data = &rcar_csi2_info_r8a77990,
+ },
+ {
.compatible = "renesas,r8a7795-csi2",
.data = &rcar_csi2_info_r8a7795,
},
diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
index 92323310f735..2207a31d355e 100644
--- a/drivers/media/platform/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/rcar-vin/rcar-dma.c
@@ -681,7 +681,7 @@ static int rvin_setup(struct rvin_dev *vin)
break;
}
- /* Enable VSYNC Field Toogle mode after one VSYNC input */
+ /* Enable VSYNC Field Toggle mode after one VSYNC input */
if (vin->info->model == RCAR_GEN3)
dmr2 = VNDMR2_FTEV;
else
@@ -1341,5 +1341,5 @@ int rvin_set_channel_routing(struct rvin_dev *vin, u8 chsel)
pm_runtime_put(vin->dev);
- return ret;
+ return 0;
}
diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c
index 7a2851790b91..7cbdcbf9b090 100644
--- a/drivers/media/platform/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c
@@ -665,7 +665,7 @@ static void rvin_mc_try_format(struct rvin_dev *vin,
* The V4L2 specification clearly documents the colorspace fields
* as being set by drivers for capture devices. Using the values
* supplied by userspace thus wouldn't comply with the API. Until
- * the API is updated force fixed vaules.
+ * the API is updated force fixed values.
*/
pix->colorspace = RVIN_DEFAULT_COLORSPACE;
pix->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(pix->colorspace);
@@ -964,7 +964,7 @@ void rvin_v4l2_unregister(struct rvin_dev *vin)
v4l2_info(&vin->v4l2_dev, "Removing %s\n",
video_device_node_name(&vin->vdev));
- /* Checks internaly if vdev have been init or not */
+ /* Checks internally if vdev have been init or not */
video_unregister_device(&vin->vdev);
}
diff --git a/drivers/media/platform/rockchip/rga/rga-hw.c b/drivers/media/platform/rockchip/rga/rga-hw.c
index 96d1b1b3fe8e..843e50d17a72 100644
--- a/drivers/media/platform/rockchip/rga/rga-hw.c
+++ b/drivers/media/platform/rockchip/rga/rga-hw.c
@@ -256,7 +256,7 @@ static void rga_cmd_set_trans_info(struct rga_ctx *ctx)
}
/*
- * Cacluate the up/down scaling mode/factor.
+ * Calculate the up/down scaling mode/factor.
*
* RGA used to scale the picture first, and then rotate second,
* so we need to swap the w/h when rotate degree is 90/270.
@@ -304,7 +304,7 @@ static void rga_cmd_set_trans_info(struct rga_ctx *ctx)
}
/*
- * Cacluate the framebuffer virtual strides and active size,
+ * Calculate the framebuffer virtual strides and active size,
* note that the step of vir_stride / vir_width is 4 byte words
*/
src_vir_info.data.vir_stride = ctx->in.stride >> 2;
@@ -318,7 +318,7 @@ static void rga_cmd_set_trans_info(struct rga_ctx *ctx)
dst_act_info.data.act_width = dst_w - 1;
/*
- * Cacluate the source framebuffer base address with offset pixel.
+ * Calculate the source framebuffer base address with offset pixel.
*/
src_offsets = rga_get_addr_offset(&ctx->in, src_x, src_y,
src_w, src_h);
diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
index 5c653287185f..b096227a9722 100644
--- a/drivers/media/platform/rockchip/rga/rga.c
+++ b/drivers/media/platform/rockchip/rga/rga.c
@@ -43,7 +43,7 @@ static void device_run(void *prv)
{
struct rga_ctx *ctx = prv;
struct rockchip_rga *rga = ctx->rga;
- struct vb2_buffer *src, *dst;
+ struct vb2_v4l2_buffer *src, *dst;
unsigned long flags;
spin_lock_irqsave(&rga->ctrl_lock, flags);
@@ -53,8 +53,8 @@ static void device_run(void *prv)
src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
- rga_buf_map(src);
- rga_buf_map(dst);
+ rga_buf_map(&src->vb2_buf);
+ rga_buf_map(&dst->vb2_buf);
rga_hw_start(rga);
diff --git a/drivers/media/platform/s3c-camif/camif-core.h b/drivers/media/platform/s3c-camif/camif-core.h
index 1f5c8c94ce89..be5e7357dffc 100644
--- a/drivers/media/platform/s3c-camif/camif-core.h
+++ b/drivers/media/platform/s3c-camif/camif-core.h
@@ -260,7 +260,7 @@ struct camif_vp {
* @clock: clocks required for the CAMIF operation
* @lock: mutex protecting this data structure
* @slock: spinlock protecting CAMIF registers
- * @io_base: start address of the mmaped CAMIF registers
+ * @io_base: start address of the mmapped CAMIF registers
*/
struct camif_dev {
struct media_device media_dev;
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 57ab1d1085d1..971c47165010 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -513,7 +513,7 @@ static void device_run(void *prv)
{
struct g2d_ctx *ctx = prv;
struct g2d_dev *dev = ctx->dev;
- struct vb2_buffer *src, *dst;
+ struct vb2_v4l2_buffer *src, *dst;
unsigned long flags;
u32 cmd = 0;
@@ -528,10 +528,10 @@ static void device_run(void *prv)
spin_lock_irqsave(&dev->ctrl_lock, flags);
g2d_set_src_size(dev, &ctx->in);
- g2d_set_src_addr(dev, vb2_dma_contig_plane_dma_addr(src, 0));
+ g2d_set_src_addr(dev, vb2_dma_contig_plane_dma_addr(&src->vb2_buf, 0));
g2d_set_dst_size(dev, &ctx->out);
- g2d_set_dst_addr(dev, vb2_dma_contig_plane_dma_addr(dst, 0));
+ g2d_set_dst_addr(dev, vb2_dma_contig_plane_dma_addr(&dst->vb2_buf, 0));
g2d_set_rop4(dev, ctx->rop);
g2d_set_flip(dev, ctx->flip);
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 3f9000b70385..8cc730eccb6c 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -3,7 +3,7 @@
* Copyright (c) 2011-2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
* Author: Jacek Anaszewski <j.anaszewski@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -793,14 +793,14 @@ static void skip(struct s5p_jpeg_buffer *buf, long len);
static void exynos4_jpeg_parse_decode_h_tbl(struct s5p_jpeg_ctx *ctx)
{
struct s5p_jpeg *jpeg = ctx->jpeg;
- struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
struct s5p_jpeg_buffer jpeg_buffer;
unsigned int word;
int c, x, components;
jpeg_buffer.size = 2; /* Ls */
jpeg_buffer.data =
- (unsigned long)vb2_plane_vaddr(vb, 0) + ctx->out_q.sos + 2;
+ (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.sos + 2;
jpeg_buffer.curr = 0;
word = 0;
@@ -830,14 +830,14 @@ static void exynos4_jpeg_parse_decode_h_tbl(struct s5p_jpeg_ctx *ctx)
static void exynos4_jpeg_parse_huff_tbl(struct s5p_jpeg_ctx *ctx)
{
struct s5p_jpeg *jpeg = ctx->jpeg;
- struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
struct s5p_jpeg_buffer jpeg_buffer;
unsigned int word;
int c, i, n, j;
for (j = 0; j < ctx->out_q.dht.n; ++j) {
jpeg_buffer.size = ctx->out_q.dht.len[j];
- jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(vb, 0) +
+ jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) +
ctx->out_q.dht.marker[j];
jpeg_buffer.curr = 0;
@@ -889,13 +889,13 @@ static void exynos4_jpeg_parse_huff_tbl(struct s5p_jpeg_ctx *ctx)
static void exynos4_jpeg_parse_decode_q_tbl(struct s5p_jpeg_ctx *ctx)
{
struct s5p_jpeg *jpeg = ctx->jpeg;
- struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
struct s5p_jpeg_buffer jpeg_buffer;
int c, x, components;
jpeg_buffer.size = ctx->out_q.sof_len;
jpeg_buffer.data =
- (unsigned long)vb2_plane_vaddr(vb, 0) + ctx->out_q.sof;
+ (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.sof;
jpeg_buffer.curr = 0;
skip(&jpeg_buffer, 5); /* P, Y, X */
@@ -920,14 +920,14 @@ static void exynos4_jpeg_parse_decode_q_tbl(struct s5p_jpeg_ctx *ctx)
static void exynos4_jpeg_parse_q_tbl(struct s5p_jpeg_ctx *ctx)
{
struct s5p_jpeg *jpeg = ctx->jpeg;
- struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
struct s5p_jpeg_buffer jpeg_buffer;
unsigned int word;
int c, i, j;
for (j = 0; j < ctx->out_q.dqt.n; ++j) {
jpeg_buffer.size = ctx->out_q.dqt.len[j];
- jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(vb, 0) +
+ jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) +
ctx->out_q.dqt.marker[j];
jpeg_buffer.curr = 0;
@@ -1293,13 +1293,16 @@ static int s5p_jpeg_querycap(struct file *file, void *priv,
return 0;
}
-static int enum_fmt(struct s5p_jpeg_fmt *sjpeg_formats, int n,
+static int enum_fmt(struct s5p_jpeg_ctx *ctx,
+ struct s5p_jpeg_fmt *sjpeg_formats, int n,
struct v4l2_fmtdesc *f, u32 type)
{
int i, num = 0;
+ unsigned int fmt_ver_flag = ctx->jpeg->variant->fmt_ver_flag;
for (i = 0; i < n; ++i) {
- if (sjpeg_formats[i].flags & type) {
+ if (sjpeg_formats[i].flags & type &&
+ sjpeg_formats[i].flags & fmt_ver_flag) {
/* index-th format of type type found ? */
if (num == f->index)
break;
@@ -1326,11 +1329,11 @@ static int s5p_jpeg_enum_fmt_vid_cap(struct file *file, void *priv,
struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
if (ctx->mode == S5P_JPEG_ENCODE)
- return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
+ return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
SJPEG_FMT_FLAG_ENC_CAPTURE);
- return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
- SJPEG_FMT_FLAG_DEC_CAPTURE);
+ return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
+ SJPEG_FMT_FLAG_DEC_CAPTURE);
}
static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
@@ -1339,11 +1342,11 @@ static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
if (ctx->mode == S5P_JPEG_ENCODE)
- return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
+ return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
SJPEG_FMT_FLAG_ENC_OUTPUT);
- return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
- SJPEG_FMT_FLAG_DEC_OUTPUT);
+ return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
+ SJPEG_FMT_FLAG_DEC_OUTPUT);
}
static struct s5p_jpeg_q_data *get_q_data(struct s5p_jpeg_ctx *ctx,
@@ -2002,7 +2005,7 @@ static int s5p_jpeg_controls_create(struct s5p_jpeg_ctx *ctx)
v4l2_ctrl_new_std(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops,
V4L2_CID_JPEG_RESTART_INTERVAL,
- 0, 3, 0xffff, 0);
+ 0, 0xffff, 1, 0);
if (ctx->jpeg->variant->version == SJPEG_S5P)
mask = ~0x06; /* 422, 420 */
}
@@ -2072,15 +2075,15 @@ static void s5p_jpeg_device_run(void *priv)
{
struct s5p_jpeg_ctx *ctx = priv;
struct s5p_jpeg *jpeg = ctx->jpeg;
- struct vb2_buffer *src_buf, *dst_buf;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
unsigned long src_addr, dst_addr, flags;
spin_lock_irqsave(&ctx->jpeg->slock, flags);
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
- src_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
- dst_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ src_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ dst_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
s5p_jpeg_reset(jpeg->regs);
s5p_jpeg_poweron(jpeg->regs);
@@ -2153,7 +2156,7 @@ static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
{
struct s5p_jpeg *jpeg = ctx->jpeg;
struct s5p_jpeg_fmt *fmt;
- struct vb2_buffer *vb;
+ struct vb2_v4l2_buffer *vb;
struct s5p_jpeg_addr jpeg_addr = {};
u32 pix_size, padding_bytes = 0;
@@ -2172,7 +2175,7 @@ static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
}
- jpeg_addr.y = vb2_dma_contig_plane_dma_addr(vb, 0);
+ jpeg_addr.y = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
if (fmt->colplanes == 2) {
jpeg_addr.cb = jpeg_addr.y + pix_size - padding_bytes;
@@ -2190,7 +2193,7 @@ static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
static void exynos4_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
{
struct s5p_jpeg *jpeg = ctx->jpeg;
- struct vb2_buffer *vb;
+ struct vb2_v4l2_buffer *vb;
unsigned int jpeg_addr = 0;
if (ctx->mode == S5P_JPEG_ENCODE)
@@ -2198,7 +2201,7 @@ static void exynos4_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
else
vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
- jpeg_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ jpeg_addr = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
if (jpeg->variant->version == SJPEG_EXYNOS5433 &&
ctx->mode == S5P_JPEG_DECODE)
jpeg_addr += ctx->out_q.sos;
@@ -2314,7 +2317,7 @@ static void exynos3250_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
{
struct s5p_jpeg *jpeg = ctx->jpeg;
struct s5p_jpeg_fmt *fmt;
- struct vb2_buffer *vb;
+ struct vb2_v4l2_buffer *vb;
struct s5p_jpeg_addr jpeg_addr = {};
u32 pix_size;
@@ -2328,7 +2331,7 @@ static void exynos3250_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
fmt = ctx->cap_q.fmt;
}
- jpeg_addr.y = vb2_dma_contig_plane_dma_addr(vb, 0);
+ jpeg_addr.y = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
if (fmt->colplanes == 2) {
jpeg_addr.cb = jpeg_addr.y + pix_size;
@@ -2346,7 +2349,7 @@ static void exynos3250_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
static void exynos3250_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
{
struct s5p_jpeg *jpeg = ctx->jpeg;
- struct vb2_buffer *vb;
+ struct vb2_v4l2_buffer *vb;
unsigned int jpeg_addr = 0;
if (ctx->mode == S5P_JPEG_ENCODE)
@@ -2354,7 +2357,7 @@ static void exynos3250_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
else
vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
- jpeg_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ jpeg_addr = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
exynos3250_jpeg_jpgadr(jpeg->regs, jpeg_addr);
}
@@ -3220,7 +3223,7 @@ static struct platform_driver s5p_jpeg_driver = {
module_platform_driver(s5p_jpeg_driver);
-MODULE_AUTHOR("Andrzej Pietrasiewicz <andrzej.p@samsung.com>");
+MODULE_AUTHOR("Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>");
MODULE_AUTHOR("Jacek Anaszewski <j.anaszewski@samsung.com>");
MODULE_DESCRIPTION("Samsung JPEG codec driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.h b/drivers/media/platform/s5p-jpeg/jpeg-core.h
index a46465e10351..144c102ff05f 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.h
@@ -3,7 +3,7 @@
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -153,7 +153,7 @@ struct s5p_jpeg_variant {
/**
* struct jpeg_fmt - driver's internal color format data
- * @name: format descritpion
+ * @name: format description
* @fourcc: the fourcc code, 0 if not applicable
* @depth: number of bits per pixel
* @colplanes: number of color planes (1 for packed formats)
@@ -193,7 +193,7 @@ struct s5p_jpeg_marker {
* @sos: SOS marker's position relative to the buffer beginning
* @dht: DHT markers' positions relative to the buffer beginning
* @dqt: DQT markers' positions relative to the buffer beginning
- * @sof: SOF0 marker's postition relative to the buffer beginning
+ * @sof: SOF0 marker's position relative to the buffer beginning
* @sof_len: SOF0 marker's payload length (without length field itself)
* @components: number of image components
* @size: image buffer size in bytes
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c
index b5f20e722b63..59c6263a71bf 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c
@@ -3,7 +3,7 @@
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h
index f208fa3ed738..bfe746f8f750 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h
@@ -3,7 +3,7 @@
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-regs.h b/drivers/media/platform/s5p-jpeg/jpeg-regs.h
index df790b10140c..574f0e8021e5 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-regs.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-regs.h
@@ -5,7 +5,7 @@
* Copyright (c) 2011-2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
* Author: Jacek Anaszewski <j.anaszewski@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 8a5ba3bec3af..9a53d3908b52 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -1089,11 +1089,17 @@ static struct device *s5p_mfc_alloc_memdev(struct device *dev,
device_initialize(child);
dev_set_name(child, "%s:%s", dev_name(dev), name);
child->parent = dev;
- child->bus = dev->bus;
child->coherent_dma_mask = dev->coherent_dma_mask;
child->dma_mask = dev->dma_mask;
child->release = s5p_mfc_memdev_release;
+ /*
+ * The memdevs are not proper OF platform devices, so in order for them
+ * to be treated as valid DMA masters we need a bit of a hack to force
+ * them to inherit the MFC node's DMA configuration.
+ */
+ of_dma_configure(child, dev->of_node, true);
+
if (device_add(child) == 0) {
ret = of_reserved_mem_device_init_by_idx(child, dev->of_node,
idx);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
index 20442a9b9f7a..24148020baa9 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
@@ -268,7 +268,7 @@ struct s5p_mfc_priv_buf {
* @enc_ctrl_handler: control framework handler for encoding
* @pm: power management control
* @variant: MFC hardware variant information
- * @num_inst: couter of active MFC instances
+ * @num_inst: counter of active MFC instances
* @irqlock: lock for operations on videobuf2 queues
* @condlock: lock for changing/checking if a context is ready to be
* processed
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
index ee7b15b335e0..9f832ba7bc8c 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
@@ -51,7 +51,7 @@ int s5p_mfc_load_firmware(struct s5p_mfc_dev *dev)
struct firmware *fw_blob;
int i, err = -EINVAL;
- /* Firmare has to be present as a separate file or compiled
+ /* Firmware has to be present as a separate file or compiled
* into kernel. */
mfc_debug_enter();
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index f4c0e3a8f27d..e111f9c47179 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -602,7 +602,7 @@ static int vidioc_querybuf(struct file *file, void *priv,
int i;
if (buf->memory != V4L2_MEMORY_MMAP) {
- mfc_err("Only mmaped buffers can be used\n");
+ mfc_err("Only mmapped buffers can be used\n");
return -EINVAL;
}
mfc_debug(2, "State: %d, buf->type: %d\n", ctx->state, buf->type);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
index 0913881219ff..6144e95f6425 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
@@ -1293,7 +1293,7 @@ static int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx)
* First set the output frame buffers
*/
if (ctx->capture_state != QUEUE_BUFS_MMAPED) {
- mfc_err("It seems that not all destination buffers were mmaped\nMFC requires that all destination are mmaped before starting processing\n");
+ mfc_err("It seems that not all destination buffers were mmapped\nMFC requires that all destination are mmapped before starting processing\n");
return -EAGAIN;
}
if (list_empty(&ctx->src_queue)) {
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
index 7c629be43205..281699ab7fe1 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
@@ -53,7 +53,7 @@ static int s5p_mfc_alloc_dec_temp_buffers_v6(struct s5p_mfc_ctx *ctx)
return 0;
}
-/* Release temproary buffers for decoding */
+/* Release temporary buffers for decoding */
static void s5p_mfc_release_dec_desc_buffer_v6(struct s5p_mfc_ctx *ctx)
{
/* NOP */
@@ -1928,7 +1928,7 @@ static inline int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx)
if (ctx->capture_state != QUEUE_BUFS_MMAPED) {
mfc_err("It seems that not all destination buffers were\n"
- "mmaped.MFC requires that all destination are mmaped\n"
+ "mmapped.MFC requires that all destination are mmapped\n"
"before starting processing.\n");
return -EAGAIN;
}
diff --git a/drivers/media/platform/seco-cec/seco-cec.h b/drivers/media/platform/seco-cec/seco-cec.h
index e632c4a2a044..843de8c7dfd4 100644
--- a/drivers/media/platform/seco-cec/seco-cec.h
+++ b/drivers/media/platform/seco-cec/seco-cec.h
@@ -106,7 +106,7 @@
#define SECOCEC_IR_COMMAND_MASK 0x007F
#define SECOCEC_IR_COMMAND_SHL 0
#define SECOCEC_IR_ADDRESS_MASK 0x1F00
-#define SECOCEC_IR_ADDRESS_SHL 7
+#define SECOCEC_IR_ADDRESS_SHL 8
#define SECOCEC_IR_TOGGLE_MASK 0x8000
#define SECOCEC_IR_TOGGLE_SHL 15
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
index 09ae64a0004c..d277cc674349 100644
--- a/drivers/media/platform/sh_veu.c
+++ b/drivers/media/platform/sh_veu.c
@@ -273,13 +273,13 @@ static void sh_veu_process(struct sh_veu_dev *veu,
static void sh_veu_device_run(void *priv)
{
struct sh_veu_dev *veu = priv;
- struct vb2_buffer *src_buf, *dst_buf;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
src_buf = v4l2_m2m_next_src_buf(veu->m2m_ctx);
dst_buf = v4l2_m2m_next_dst_buf(veu->m2m_ctx);
if (src_buf && dst_buf)
- sh_veu_process(veu, src_buf, dst_buf);
+ sh_veu_process(veu, &src_buf->vb2_buf, &dst_buf->vb2_buf);
}
/* ========== video ioctls ========== */
diff --git a/drivers/media/platform/soc_camera/Kconfig b/drivers/media/platform/soc_camera/Kconfig
deleted file mode 100644
index 669d116b8f09..000000000000
--- a/drivers/media/platform/soc_camera/Kconfig
+++ /dev/null
@@ -1,26 +0,0 @@
-config SOC_CAMERA
- tristate "SoC camera support"
- depends on VIDEO_V4L2 && HAS_DMA && I2C
- select VIDEOBUF2_CORE
- help
- SoC Camera is a common API to several cameras, not connecting
- over a bus like PCI or USB. For example some i2c camera connected
- directly to the data bus of an SoC.
-
-config SOC_CAMERA_SCALE_CROP
- tristate
-
-config SOC_CAMERA_PLATFORM
- tristate "platform camera support"
- depends on SOC_CAMERA
- help
- This is a generic SoC camera platform driver, useful for testing
-
-config VIDEO_SH_MOBILE_CEU
- tristate "SuperH Mobile CEU Interface driver"
- depends on VIDEO_DEV && SOC_CAMERA && HAVE_CLK
- depends on ARCH_SHMOBILE || COMPILE_TEST
- select VIDEOBUF2_DMA_CONTIG
- select SOC_CAMERA_SCALE_CROP
- ---help---
- This is a v4l2 driver for the SuperH Mobile CEU Interface
diff --git a/drivers/media/platform/soc_camera/Makefile b/drivers/media/platform/soc_camera/Makefile
deleted file mode 100644
index 07a451e8b228..000000000000
--- a/drivers/media/platform/soc_camera/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-obj-$(CONFIG_SOC_CAMERA) += soc_camera.o soc_mediabus.o
-obj-$(CONFIG_SOC_CAMERA_SCALE_CROP) += soc_scale_crop.o
-
-# a platform subdevice driver stub, allowing to support cameras by adding a
-# couple of callback functions to the board code
-obj-$(CONFIG_SOC_CAMERA_PLATFORM) += soc_camera_platform.o
-
-# soc-camera host drivers have to be linked after camera drivers
-obj-$(CONFIG_VIDEO_SH_MOBILE_CEU) += sh_mobile_ceu_camera.o
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
deleted file mode 100644
index 6803f744e307..000000000000
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ /dev/null
@@ -1,1810 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * V4L2 Driver for SuperH Mobile CEU interface
- *
- * Copyright (C) 2008 Magnus Damm
- *
- * Based on V4L2 Driver for PXA camera host - "pxa_camera.c",
- *
- * Copyright (C) 2006, Sascha Hauer, Pengutronix
- * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <linux/completion.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/moduleparam.h>
-#include <linux/of.h>
-#include <linux/time.h>
-#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/videodev2.h>
-#include <linux/pm_runtime.h>
-#include <linux/sched.h>
-
-#include <media/v4l2-async.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-dev.h>
-#include <media/soc_camera.h>
-#include <media/drv-intf/sh_mobile_ceu.h>
-#include <media/videobuf2-dma-contig.h>
-#include <media/v4l2-mediabus.h>
-#include <media/drv-intf/soc_mediabus.h>
-
-#include "soc_scale_crop.h"
-
-/* register offsets for sh7722 / sh7723 */
-
-#define CAPSR 0x00 /* Capture start register */
-#define CAPCR 0x04 /* Capture control register */
-#define CAMCR 0x08 /* Capture interface control register */
-#define CMCYR 0x0c /* Capture interface cycle register */
-#define CAMOR 0x10 /* Capture interface offset register */
-#define CAPWR 0x14 /* Capture interface width register */
-#define CAIFR 0x18 /* Capture interface input format register */
-#define CSTCR 0x20 /* Camera strobe control register (<= sh7722) */
-#define CSECR 0x24 /* Camera strobe emission count register (<= sh7722) */
-#define CRCNTR 0x28 /* CEU register control register */
-#define CRCMPR 0x2c /* CEU register forcible control register */
-#define CFLCR 0x30 /* Capture filter control register */
-#define CFSZR 0x34 /* Capture filter size clip register */
-#define CDWDR 0x38 /* Capture destination width register */
-#define CDAYR 0x3c /* Capture data address Y register */
-#define CDACR 0x40 /* Capture data address C register */
-#define CDBYR 0x44 /* Capture data bottom-field address Y register */
-#define CDBCR 0x48 /* Capture data bottom-field address C register */
-#define CBDSR 0x4c /* Capture bundle destination size register */
-#define CFWCR 0x5c /* Firewall operation control register */
-#define CLFCR 0x60 /* Capture low-pass filter control register */
-#define CDOCR 0x64 /* Capture data output control register */
-#define CDDCR 0x68 /* Capture data complexity level register */
-#define CDDAR 0x6c /* Capture data complexity level address register */
-#define CEIER 0x70 /* Capture event interrupt enable register */
-#define CETCR 0x74 /* Capture event flag clear register */
-#define CSTSR 0x7c /* Capture status register */
-#define CSRTR 0x80 /* Capture software reset register */
-#define CDSSR 0x84 /* Capture data size register */
-#define CDAYR2 0x90 /* Capture data address Y register 2 */
-#define CDACR2 0x94 /* Capture data address C register 2 */
-#define CDBYR2 0x98 /* Capture data bottom-field address Y register 2 */
-#define CDBCR2 0x9c /* Capture data bottom-field address C register 2 */
-
-#undef DEBUG_GEOMETRY
-#ifdef DEBUG_GEOMETRY
-#define dev_geo dev_info
-#else
-#define dev_geo dev_dbg
-#endif
-
-/* per video frame buffer */
-struct sh_mobile_ceu_buffer {
- struct vb2_v4l2_buffer vb; /* v4l buffer must be first */
- struct list_head queue;
-};
-
-struct sh_mobile_ceu_dev {
- struct soc_camera_host ici;
-
- unsigned int irq;
- void __iomem *base;
- size_t video_limit;
- size_t buf_total;
-
- spinlock_t lock; /* Protects video buffer lists */
- struct list_head capture;
- struct vb2_v4l2_buffer *active;
-
- struct sh_mobile_ceu_info *pdata;
- struct completion complete;
-
- u32 cflcr;
-
- /* static max sizes either from platform data or default */
- int max_width;
- int max_height;
-
- enum v4l2_field field;
- int sequence;
- unsigned long flags;
-
- unsigned int image_mode:1;
- unsigned int is_16bit:1;
- unsigned int frozen:1;
-};
-
-struct sh_mobile_ceu_cam {
- /* CEU offsets within the camera output, before the CEU scaler */
- unsigned int ceu_left;
- unsigned int ceu_top;
- /* Client output, as seen by the CEU */
- unsigned int width;
- unsigned int height;
- /*
- * User window from S_SELECTION / G_SELECTION, produced by client cropping and
- * scaling, CEU scaling and CEU cropping, mapped back onto the client
- * input window
- */
- struct v4l2_rect subrect;
- /* Camera cropping rectangle */
- struct v4l2_rect rect;
- const struct soc_mbus_pixelfmt *extra_fmt;
- u32 code;
-};
-
-static struct sh_mobile_ceu_buffer *to_ceu_vb(struct vb2_v4l2_buffer *vbuf)
-{
- return container_of(vbuf, struct sh_mobile_ceu_buffer, vb);
-}
-
-static void ceu_write(struct sh_mobile_ceu_dev *priv,
- unsigned long reg_offs, u32 data)
-{
- iowrite32(data, priv->base + reg_offs);
-}
-
-static u32 ceu_read(struct sh_mobile_ceu_dev *priv, unsigned long reg_offs)
-{
- return ioread32(priv->base + reg_offs);
-}
-
-static int sh_mobile_ceu_soft_reset(struct sh_mobile_ceu_dev *pcdev)
-{
- int i, success = 0;
-
- ceu_write(pcdev, CAPSR, 1 << 16); /* reset */
-
- /* wait CSTSR.CPTON bit */
- for (i = 0; i < 1000; i++) {
- if (!(ceu_read(pcdev, CSTSR) & 1)) {
- success++;
- break;
- }
- udelay(1);
- }
-
- /* wait CAPSR.CPKIL bit */
- for (i = 0; i < 1000; i++) {
- if (!(ceu_read(pcdev, CAPSR) & (1 << 16))) {
- success++;
- break;
- }
- udelay(1);
- }
-
- if (2 != success) {
- dev_warn(pcdev->ici.v4l2_dev.dev, "soft reset time out\n");
- return -EIO;
- }
-
- return 0;
-}
-
-/*
- * Videobuf operations
- */
-
-/*
- * .queue_setup() is called to check, whether the driver can accept the
- * requested number of buffers and to fill in plane sizes
- * for the current frame format if required
- */
-static int sh_mobile_ceu_videobuf_setup(struct vb2_queue *vq,
- unsigned int *count, unsigned int *num_planes,
- unsigned int sizes[], struct device *alloc_devs[])
-{
- struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
-
- if (!vq->num_buffers)
- pcdev->sequence = 0;
-
- if (!*count)
- *count = 2;
-
- /* Called from VIDIOC_REQBUFS or in compatibility mode */
- if (!*num_planes)
- sizes[0] = icd->sizeimage;
- else if (sizes[0] < icd->sizeimage)
- return -EINVAL;
-
- /* If *num_planes != 0, we have already verified *count. */
- if (pcdev->video_limit) {
- size_t size = PAGE_ALIGN(sizes[0]) * *count;
-
- if (size + pcdev->buf_total > pcdev->video_limit)
- *count = (pcdev->video_limit - pcdev->buf_total) /
- PAGE_ALIGN(sizes[0]);
- }
-
- *num_planes = 1;
-
- dev_dbg(icd->parent, "count=%d, size=%u\n", *count, sizes[0]);
-
- return 0;
-}
-
-#define CEU_CETCR_MAGIC 0x0317f313 /* acknowledge magical interrupt sources */
-#define CEU_CETCR_IGRW (1 << 4) /* prohibited register access interrupt bit */
-#define CEU_CEIER_CPEIE (1 << 0) /* one-frame capture end interrupt */
-#define CEU_CEIER_VBP (1 << 20) /* vbp error */
-#define CEU_CAPCR_CTNCP (1 << 16) /* continuous capture mode (if set) */
-#define CEU_CEIER_MASK (CEU_CEIER_CPEIE | CEU_CEIER_VBP)
-
-
-/*
- * return value doesn't reflex the success/failure to queue the new buffer,
- * but rather the status of the previous buffer.
- */
-static int sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev)
-{
- struct soc_camera_device *icd = pcdev->ici.icd;
- dma_addr_t phys_addr_top, phys_addr_bottom;
- unsigned long top1, top2;
- unsigned long bottom1, bottom2;
- u32 status;
- bool planar;
- int ret = 0;
-
- /*
- * The hardware is _very_ picky about this sequence. Especially
- * the CEU_CETCR_MAGIC value. It seems like we need to acknowledge
- * several not-so-well documented interrupt sources in CETCR.
- */
- ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) & ~CEU_CEIER_MASK);
- status = ceu_read(pcdev, CETCR);
- ceu_write(pcdev, CETCR, ~status & CEU_CETCR_MAGIC);
- if (!pcdev->frozen)
- ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) | CEU_CEIER_MASK);
- ceu_write(pcdev, CAPCR, ceu_read(pcdev, CAPCR) & ~CEU_CAPCR_CTNCP);
- ceu_write(pcdev, CETCR, CEU_CETCR_MAGIC ^ CEU_CETCR_IGRW);
-
- /*
- * When a VBP interrupt occurs, a capture end interrupt does not occur
- * and the image of that frame is not captured correctly. So, soft reset
- * is needed here.
- */
- if (status & CEU_CEIER_VBP) {
- sh_mobile_ceu_soft_reset(pcdev);
- ret = -EIO;
- }
-
- if (pcdev->frozen) {
- complete(&pcdev->complete);
- return ret;
- }
-
- if (!pcdev->active)
- return ret;
-
- if (V4L2_FIELD_INTERLACED_BT == pcdev->field) {
- top1 = CDBYR;
- top2 = CDBCR;
- bottom1 = CDAYR;
- bottom2 = CDACR;
- } else {
- top1 = CDAYR;
- top2 = CDACR;
- bottom1 = CDBYR;
- bottom2 = CDBCR;
- }
-
- phys_addr_top =
- vb2_dma_contig_plane_dma_addr(&pcdev->active->vb2_buf, 0);
-
- switch (icd->current_fmt->host_fmt->fourcc) {
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV21:
- case V4L2_PIX_FMT_NV16:
- case V4L2_PIX_FMT_NV61:
- planar = true;
- break;
- default:
- planar = false;
- }
-
- ceu_write(pcdev, top1, phys_addr_top);
- if (V4L2_FIELD_NONE != pcdev->field) {
- phys_addr_bottom = phys_addr_top + icd->bytesperline;
- ceu_write(pcdev, bottom1, phys_addr_bottom);
- }
-
- if (planar) {
- phys_addr_top += icd->bytesperline * icd->user_height;
- ceu_write(pcdev, top2, phys_addr_top);
- if (V4L2_FIELD_NONE != pcdev->field) {
- phys_addr_bottom = phys_addr_top + icd->bytesperline;
- ceu_write(pcdev, bottom2, phys_addr_bottom);
- }
- }
-
- ceu_write(pcdev, CAPSR, 0x1); /* start capture */
-
- return ret;
-}
-
-static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf);
-
- /* Added list head initialization on alloc */
- WARN(!list_empty(&buf->queue), "Buffer %p on queue!\n", vb);
-
- return 0;
-}
-
-static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
- struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf);
- unsigned long size;
-
- size = icd->sizeimage;
-
- if (vb2_plane_size(vb, 0) < size) {
- dev_err(icd->parent, "Buffer #%d too small (%lu < %lu)\n",
- vb->index, vb2_plane_size(vb, 0), size);
- goto error;
- }
-
- vb2_set_plane_payload(vb, 0, size);
-
- dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
- vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));
-
-#ifdef DEBUG
- /*
- * This can be useful if you want to see if we actually fill
- * the buffer with something
- */
- if (vb2_plane_vaddr(vb, 0))
- memset(vb2_plane_vaddr(vb, 0), 0xaa, vb2_get_plane_payload(vb, 0));
-#endif
-
- spin_lock_irq(&pcdev->lock);
- list_add_tail(&buf->queue, &pcdev->capture);
-
- if (!pcdev->active) {
- /*
- * Because there were no active buffer at this moment,
- * we are not interested in the return value of
- * sh_mobile_ceu_capture here.
- */
- pcdev->active = vbuf;
- sh_mobile_ceu_capture(pcdev);
- }
- spin_unlock_irq(&pcdev->lock);
-
- return;
-
-error:
- vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
-}
-
-static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf);
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
-
- spin_lock_irq(&pcdev->lock);
-
- if (pcdev->active == vbuf) {
- /* disable capture (release DMA buffer), reset */
- ceu_write(pcdev, CAPSR, 1 << 16);
- pcdev->active = NULL;
- }
-
- /*
- * Doesn't hurt also if the list is empty, but it hurts, if queuing the
- * buffer failed, and .buf_init() hasn't been called
- */
- if (buf->queue.next)
- list_del_init(&buf->queue);
-
- pcdev->buf_total -= PAGE_ALIGN(vb2_plane_size(vb, 0));
- dev_dbg(icd->parent, "%s() %zu bytes buffers\n", __func__,
- pcdev->buf_total);
-
- spin_unlock_irq(&pcdev->lock);
-}
-
-static int sh_mobile_ceu_videobuf_init(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
-
- pcdev->buf_total += PAGE_ALIGN(vb2_plane_size(vb, 0));
- dev_dbg(icd->parent, "%s() %zu bytes buffers\n", __func__,
- pcdev->buf_total);
-
- /* This is for locking debugging only */
- INIT_LIST_HEAD(&to_ceu_vb(vbuf)->queue);
- return 0;
-}
-
-static void sh_mobile_ceu_stop_streaming(struct vb2_queue *q)
-{
- struct soc_camera_device *icd = soc_camera_from_vb2q(q);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
- struct list_head *buf_head, *tmp;
- struct vb2_v4l2_buffer *vbuf;
-
- spin_lock_irq(&pcdev->lock);
-
- pcdev->active = NULL;
-
- list_for_each_safe(buf_head, tmp, &pcdev->capture) {
- vbuf = &list_entry(buf_head, struct sh_mobile_ceu_buffer,
- queue)->vb;
- vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);
- list_del_init(buf_head);
- }
-
- spin_unlock_irq(&pcdev->lock);
-
- sh_mobile_ceu_soft_reset(pcdev);
-}
-
-static const struct vb2_ops sh_mobile_ceu_videobuf_ops = {
- .queue_setup = sh_mobile_ceu_videobuf_setup,
- .buf_prepare = sh_mobile_ceu_videobuf_prepare,
- .buf_queue = sh_mobile_ceu_videobuf_queue,
- .buf_cleanup = sh_mobile_ceu_videobuf_release,
- .buf_init = sh_mobile_ceu_videobuf_init,
- .wait_prepare = vb2_ops_wait_prepare,
- .wait_finish = vb2_ops_wait_finish,
- .stop_streaming = sh_mobile_ceu_stop_streaming,
-};
-
-static irqreturn_t sh_mobile_ceu_irq(int irq, void *data)
-{
- struct sh_mobile_ceu_dev *pcdev = data;
- struct vb2_v4l2_buffer *vbuf;
- int ret;
-
- spin_lock(&pcdev->lock);
-
- vbuf = pcdev->active;
- if (!vbuf)
- /* Stale interrupt from a released buffer */
- goto out;
-
- list_del_init(&to_ceu_vb(vbuf)->queue);
-
- if (!list_empty(&pcdev->capture))
- pcdev->active = &list_entry(pcdev->capture.next,
- struct sh_mobile_ceu_buffer, queue)->vb;
- else
- pcdev->active = NULL;
-
- ret = sh_mobile_ceu_capture(pcdev);
- vbuf->vb2_buf.timestamp = ktime_get_ns();
- if (!ret) {
- vbuf->field = pcdev->field;
- vbuf->sequence = pcdev->sequence++;
- }
- vb2_buffer_done(&vbuf->vb2_buf,
- ret < 0 ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
-
-out:
- spin_unlock(&pcdev->lock);
-
- return IRQ_HANDLED;
-}
-
-static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
-{
- dev_info(icd->parent,
- "SuperH Mobile CEU driver attached to camera %d\n",
- icd->devnum);
-
- return 0;
-}
-
-static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd)
-{
- dev_info(icd->parent,
- "SuperH Mobile CEU driver detached from camera %d\n",
- icd->devnum);
-}
-
-/* Called with .host_lock held */
-static int sh_mobile_ceu_clock_start(struct soc_camera_host *ici)
-{
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
-
- pm_runtime_get_sync(ici->v4l2_dev.dev);
-
- pcdev->buf_total = 0;
-
- sh_mobile_ceu_soft_reset(pcdev);
-
- return 0;
-}
-
-/* Called with .host_lock held */
-static void sh_mobile_ceu_clock_stop(struct soc_camera_host *ici)
-{
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
-
- /* disable capture, disable interrupts */
- ceu_write(pcdev, CEIER, 0);
- sh_mobile_ceu_soft_reset(pcdev);
-
- /* make sure active buffer is canceled */
- spin_lock_irq(&pcdev->lock);
- if (pcdev->active) {
- list_del_init(&to_ceu_vb(pcdev->active)->queue);
- vb2_buffer_done(&pcdev->active->vb2_buf, VB2_BUF_STATE_ERROR);
- pcdev->active = NULL;
- }
- spin_unlock_irq(&pcdev->lock);
-
- pm_runtime_put(ici->v4l2_dev.dev);
-}
-
-/*
- * See chapter 29.4.12 "Capture Filter Control Register (CFLCR)"
- * in SH7722 Hardware Manual
- */
-static unsigned int size_dst(unsigned int src, unsigned int scale)
-{
- unsigned int mant_pre = scale >> 12;
- if (!src || !scale)
- return src;
- return ((mant_pre + 2 * (src - 1)) / (2 * mant_pre) - 1) *
- mant_pre * 4096 / scale + 1;
-}
-
-static u16 calc_scale(unsigned int src, unsigned int *dst)
-{
- u16 scale;
-
- if (src == *dst)
- return 0;
-
- scale = (src * 4096 / *dst) & ~7;
-
- while (scale > 4096 && size_dst(src, scale) < *dst)
- scale -= 8;
-
- *dst = size_dst(src, scale);
-
- return scale;
-}
-
-/* rect is guaranteed to not exceed the scaled camera rectangle */
-static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd)
-{
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct sh_mobile_ceu_cam *cam = icd->host_priv;
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
- unsigned int height, width, cdwdr_width, in_width, in_height;
- unsigned int left_offset, top_offset;
- u32 camor;
-
- dev_geo(icd->parent, "Crop %ux%u@%u:%u\n",
- icd->user_width, icd->user_height, cam->ceu_left, cam->ceu_top);
-
- left_offset = cam->ceu_left;
- top_offset = cam->ceu_top;
-
- WARN_ON(icd->user_width & 3 || icd->user_height & 3);
-
- width = icd->user_width;
-
- if (pcdev->image_mode) {
- in_width = cam->width;
- if (!pcdev->is_16bit) {
- in_width *= 2;
- left_offset *= 2;
- }
- } else {
- unsigned int w_factor;
-
- switch (icd->current_fmt->host_fmt->packing) {
- case SOC_MBUS_PACKING_2X8_PADHI:
- w_factor = 2;
- break;
- default:
- w_factor = 1;
- }
-
- in_width = cam->width * w_factor;
- left_offset *= w_factor;
- }
-
- cdwdr_width = icd->bytesperline;
-
- height = icd->user_height;
- in_height = cam->height;
- if (V4L2_FIELD_NONE != pcdev->field) {
- height = (height / 2) & ~3;
- in_height /= 2;
- top_offset /= 2;
- cdwdr_width *= 2;
- }
-
- /* Set CAMOR, CAPWR, CFSZR, take care of CDWDR */
- camor = left_offset | (top_offset << 16);
-
- dev_geo(icd->parent,
- "CAMOR 0x%x, CAPWR 0x%x, CFSZR 0x%x, CDWDR 0x%x\n", camor,
- (in_height << 16) | in_width, (height << 16) | width,
- cdwdr_width);
-
- ceu_write(pcdev, CAMOR, camor);
- ceu_write(pcdev, CAPWR, (in_height << 16) | in_width);
- /* CFSZR clipping is applied _after_ the scaling filter (CFLCR) */
- ceu_write(pcdev, CFSZR, (height << 16) | width);
- ceu_write(pcdev, CDWDR, cdwdr_width);
-}
-
-static u32 capture_save_reset(struct sh_mobile_ceu_dev *pcdev)
-{
- u32 capsr = ceu_read(pcdev, CAPSR);
- ceu_write(pcdev, CAPSR, 1 << 16); /* reset, stop capture */
- return capsr;
-}
-
-static void capture_restore(struct sh_mobile_ceu_dev *pcdev, u32 capsr)
-{
- unsigned long timeout = jiffies + 10 * HZ;
-
- /*
- * Wait until the end of the current frame. It can take a long time,
- * but if it has been aborted by a CAPSR reset, it shoule exit sooner.
- */
- while ((ceu_read(pcdev, CSTSR) & 1) && time_before(jiffies, timeout))
- msleep(1);
-
- if (time_after(jiffies, timeout)) {
- dev_err(pcdev->ici.v4l2_dev.dev,
- "Timeout waiting for frame end! Interface problem?\n");
- return;
- }
-
- /* Wait until reset clears, this shall not hang... */
- while (ceu_read(pcdev, CAPSR) & (1 << 16))
- udelay(10);
-
- /* Anything to restore? */
- if (capsr & ~(1 << 16))
- ceu_write(pcdev, CAPSR, capsr);
-}
-
-#define CEU_BUS_FLAGS (V4L2_MBUS_MASTER | \
- V4L2_MBUS_PCLK_SAMPLE_RISING | \
- V4L2_MBUS_HSYNC_ACTIVE_HIGH | \
- V4L2_MBUS_HSYNC_ACTIVE_LOW | \
- V4L2_MBUS_VSYNC_ACTIVE_HIGH | \
- V4L2_MBUS_VSYNC_ACTIVE_LOW | \
- V4L2_MBUS_DATA_ACTIVE_HIGH)
-
-/* Capture is not running, no interrupts, no locking needed */
-static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd)
-{
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct sh_mobile_ceu_cam *cam = icd->host_priv;
- struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
- unsigned long value, common_flags = CEU_BUS_FLAGS;
- u32 capsr = capture_save_reset(pcdev);
- unsigned int yuv_lineskip;
- int ret;
-
- /*
- * If the client doesn't implement g_mbus_config, we just use our
- * platform data
- */
- ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
- if (!ret) {
- common_flags = soc_mbus_config_compatible(&cfg,
- common_flags);
- if (!common_flags)
- return -EINVAL;
- } else if (ret != -ENOIOCTLCMD) {
- return ret;
- }
-
- /* Make choises, based on platform preferences */
- if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) &&
- (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) {
- if (pcdev->flags & SH_CEU_FLAG_HSYNC_LOW)
- common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH;
- else
- common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW;
- }
-
- if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) &&
- (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) {
- if (pcdev->flags & SH_CEU_FLAG_VSYNC_LOW)
- common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH;
- else
- common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW;
- }
-
- cfg.flags = common_flags;
- ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg);
- if (ret < 0 && ret != -ENOIOCTLCMD)
- return ret;
-
- if (icd->current_fmt->host_fmt->bits_per_sample > 8)
- pcdev->is_16bit = 1;
- else
- pcdev->is_16bit = 0;
-
- ceu_write(pcdev, CRCNTR, 0);
- ceu_write(pcdev, CRCMPR, 0);
-
- value = 0x00000010; /* data fetch by default */
- yuv_lineskip = 0x10;
-
- switch (icd->current_fmt->host_fmt->fourcc) {
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV21:
- /* convert 4:2:2 -> 4:2:0 */
- yuv_lineskip = 0; /* skip for NV12/21, no skip for NV16/61 */
- /* fall-through */
- case V4L2_PIX_FMT_NV16:
- case V4L2_PIX_FMT_NV61:
- switch (cam->code) {
- case MEDIA_BUS_FMT_UYVY8_2X8:
- value = 0x00000000; /* Cb0, Y0, Cr0, Y1 */
- break;
- case MEDIA_BUS_FMT_VYUY8_2X8:
- value = 0x00000100; /* Cr0, Y0, Cb0, Y1 */
- break;
- case MEDIA_BUS_FMT_YUYV8_2X8:
- value = 0x00000200; /* Y0, Cb0, Y1, Cr0 */
- break;
- case MEDIA_BUS_FMT_YVYU8_2X8:
- value = 0x00000300; /* Y0, Cr0, Y1, Cb0 */
- break;
- default:
- BUG();
- }
- }
-
- if (icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV21 ||
- icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV61)
- value ^= 0x00000100; /* swap U, V to change from NV1x->NVx1 */
-
- value |= common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW ? 1 << 1 : 0;
- value |= common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW ? 1 << 0 : 0;
-
- if (pcdev->is_16bit)
- value |= 1 << 12;
- else if (pcdev->flags & SH_CEU_FLAG_LOWER_8BIT)
- value |= 2 << 12;
-
- ceu_write(pcdev, CAMCR, value);
-
- ceu_write(pcdev, CAPCR, 0x00300000);
-
- switch (pcdev->field) {
- case V4L2_FIELD_INTERLACED_TB:
- value = 0x101;
- break;
- case V4L2_FIELD_INTERLACED_BT:
- value = 0x102;
- break;
- default:
- value = 0;
- break;
- }
- ceu_write(pcdev, CAIFR, value);
-
- sh_mobile_ceu_set_rect(icd);
- mdelay(1);
-
- dev_geo(icd->parent, "CFLCR 0x%x\n", pcdev->cflcr);
- ceu_write(pcdev, CFLCR, pcdev->cflcr);
-
- /*
- * A few words about byte order (observed in Big Endian mode)
- *
- * In data fetch mode bytes are received in chunks of 8 bytes.
- * D0, D1, D2, D3, D4, D5, D6, D7 (D0 received first)
- *
- * The data is however by default written to memory in reverse order:
- * D7, D6, D5, D4, D3, D2, D1, D0 (D7 written to lowest byte)
- *
- * The lowest three bits of CDOCR allows us to do swapping,
- * using 7 we swap the data bytes to match the incoming order:
- * D0, D1, D2, D3, D4, D5, D6, D7
- */
- value = 0x00000007 | yuv_lineskip;
-
- ceu_write(pcdev, CDOCR, value);
- ceu_write(pcdev, CFWCR, 0); /* keep "datafetch firewall" disabled */
-
- capture_restore(pcdev, capsr);
-
- /* not in bundle mode: skip CBDSR, CDAYR2, CDACR2, CDBYR2, CDBCR2 */
- return 0;
-}
-
-static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd,
- unsigned char buswidth)
-{
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- unsigned long common_flags = CEU_BUS_FLAGS;
- struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
- int ret;
-
- ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
- if (!ret)
- common_flags = soc_mbus_config_compatible(&cfg,
- common_flags);
- else if (ret != -ENOIOCTLCMD)
- return ret;
-
- if (!common_flags || buswidth > 16)
- return -EINVAL;
-
- return 0;
-}
-
-static const struct soc_mbus_pixelfmt sh_mobile_ceu_formats[] = {
- {
- .fourcc = V4L2_PIX_FMT_NV12,
- .name = "NV12",
- .bits_per_sample = 8,
- .packing = SOC_MBUS_PACKING_1_5X8,
- .order = SOC_MBUS_ORDER_LE,
- .layout = SOC_MBUS_LAYOUT_PLANAR_2Y_C,
- }, {
- .fourcc = V4L2_PIX_FMT_NV21,
- .name = "NV21",
- .bits_per_sample = 8,
- .packing = SOC_MBUS_PACKING_1_5X8,
- .order = SOC_MBUS_ORDER_LE,
- .layout = SOC_MBUS_LAYOUT_PLANAR_2Y_C,
- }, {
- .fourcc = V4L2_PIX_FMT_NV16,
- .name = "NV16",
- .bits_per_sample = 8,
- .packing = SOC_MBUS_PACKING_2X8_PADHI,
- .order = SOC_MBUS_ORDER_LE,
- .layout = SOC_MBUS_LAYOUT_PLANAR_Y_C,
- }, {
- .fourcc = V4L2_PIX_FMT_NV61,
- .name = "NV61",
- .bits_per_sample = 8,
- .packing = SOC_MBUS_PACKING_2X8_PADHI,
- .order = SOC_MBUS_ORDER_LE,
- .layout = SOC_MBUS_LAYOUT_PLANAR_Y_C,
- },
-};
-
-/* This will be corrected as we get more formats */
-static bool sh_mobile_ceu_packing_supported(const struct soc_mbus_pixelfmt *fmt)
-{
- return fmt->packing == SOC_MBUS_PACKING_NONE ||
- (fmt->bits_per_sample == 8 &&
- fmt->packing == SOC_MBUS_PACKING_1_5X8) ||
- (fmt->bits_per_sample == 8 &&
- fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) ||
- (fmt->bits_per_sample > 8 &&
- fmt->packing == SOC_MBUS_PACKING_EXTEND16);
-}
-
-static struct soc_camera_device *ctrl_to_icd(struct v4l2_ctrl *ctrl)
-{
- return container_of(ctrl->handler, struct soc_camera_device,
- ctrl_handler);
-}
-
-static int sh_mobile_ceu_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct soc_camera_device *icd = ctrl_to_icd(ctrl);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
-
- switch (ctrl->id) {
- case V4L2_CID_SHARPNESS:
- switch (icd->current_fmt->host_fmt->fourcc) {
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV21:
- case V4L2_PIX_FMT_NV16:
- case V4L2_PIX_FMT_NV61:
- ceu_write(pcdev, CLFCR, !ctrl->val);
- return 0;
- }
- break;
- }
-
- return -EINVAL;
-}
-
-static const struct v4l2_ctrl_ops sh_mobile_ceu_ctrl_ops = {
- .s_ctrl = sh_mobile_ceu_s_ctrl,
-};
-
-static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int idx,
- struct soc_camera_format_xlate *xlate)
-{
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct device *dev = icd->parent;
- struct soc_camera_host *ici = to_soc_camera_host(dev);
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
- int ret, k, n;
- int formats = 0;
- struct sh_mobile_ceu_cam *cam;
- struct v4l2_subdev_mbus_code_enum code = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- .index = idx,
- };
- const struct soc_mbus_pixelfmt *fmt;
-
- ret = v4l2_subdev_call(sd, pad, enum_mbus_code, NULL, &code);
- if (ret < 0)
- /* No more formats */
- return 0;
-
- fmt = soc_mbus_get_fmtdesc(code.code);
- if (!fmt) {
- dev_warn(dev, "unsupported format code #%u: %d\n", idx, code.code);
- return 0;
- }
-
- ret = sh_mobile_ceu_try_bus_param(icd, fmt->bits_per_sample);
- if (ret < 0)
- return 0;
-
- if (!icd->host_priv) {
- struct v4l2_subdev_format fmt = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- };
- struct v4l2_mbus_framefmt *mf = &fmt.format;
- struct v4l2_rect rect;
- int shift = 0;
-
- /* Add our control */
- v4l2_ctrl_new_std(&icd->ctrl_handler, &sh_mobile_ceu_ctrl_ops,
- V4L2_CID_SHARPNESS, 0, 1, 1, 1);
- if (icd->ctrl_handler.error)
- return icd->ctrl_handler.error;
-
- /* FIXME: subwindow is lost between close / open */
-
- /* Cache current client geometry */
- ret = soc_camera_client_g_rect(sd, &rect);
- if (ret < 0)
- return ret;
-
- /* First time */
- ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt);
- if (ret < 0)
- return ret;
-
- /*
- * All currently existing CEU implementations support 2560x1920
- * or larger frames. If the sensor is proposing too big a frame,
- * don't bother with possibly supportred by the CEU larger
- * sizes, just try VGA multiples. If needed, this can be
- * adjusted in the future.
- */
- while ((mf->width > pcdev->max_width ||
- mf->height > pcdev->max_height) && shift < 4) {
- /* Try 2560x1920, 1280x960, 640x480, 320x240 */
- mf->width = 2560 >> shift;
- mf->height = 1920 >> shift;
- ret = v4l2_device_call_until_err(sd->v4l2_dev,
- soc_camera_grp_id(icd), pad,
- set_fmt, NULL, &fmt);
- if (ret < 0)
- return ret;
- shift++;
- }
-
- if (shift == 4) {
- dev_err(dev, "Failed to configure the client below %ux%x\n",
- mf->width, mf->height);
- return -EIO;
- }
-
- dev_geo(dev, "camera fmt %ux%u\n", mf->width, mf->height);
-
- cam = kzalloc(sizeof(*cam), GFP_KERNEL);
- if (!cam)
- return -ENOMEM;
-
- /* We are called with current camera crop, initialise subrect with it */
- cam->rect = rect;
- cam->subrect = rect;
-
- cam->width = mf->width;
- cam->height = mf->height;
-
- icd->host_priv = cam;
- } else {
- cam = icd->host_priv;
- }
-
- /* Beginning of a pass */
- if (!idx)
- cam->extra_fmt = NULL;
-
- switch (code.code) {
- case MEDIA_BUS_FMT_UYVY8_2X8:
- case MEDIA_BUS_FMT_VYUY8_2X8:
- case MEDIA_BUS_FMT_YUYV8_2X8:
- case MEDIA_BUS_FMT_YVYU8_2X8:
- if (cam->extra_fmt)
- break;
-
- /*
- * Our case is simple so far: for any of the above four camera
- * formats we add all our four synthesized NV* formats, so,
- * just marking the device with a single flag suffices. If
- * the format generation rules are more complex, you would have
- * to actually hang your already added / counted formats onto
- * the host_priv pointer and check whether the format you're
- * going to add now is already there.
- */
- cam->extra_fmt = sh_mobile_ceu_formats;
-
- n = ARRAY_SIZE(sh_mobile_ceu_formats);
- formats += n;
- for (k = 0; xlate && k < n; k++) {
- xlate->host_fmt = &sh_mobile_ceu_formats[k];
- xlate->code = code.code;
- xlate++;
- dev_dbg(dev, "Providing format %s using code %d\n",
- sh_mobile_ceu_formats[k].name, code.code);
- }
- break;
- default:
- if (!sh_mobile_ceu_packing_supported(fmt))
- return 0;
- }
-
- /* Generic pass-through */
- formats++;
- if (xlate) {
- xlate->host_fmt = fmt;
- xlate->code = code.code;
- xlate++;
- dev_dbg(dev, "Providing format %s in pass-through mode\n",
- fmt->name);
- }
-
- return formats;
-}
-
-static void sh_mobile_ceu_put_formats(struct soc_camera_device *icd)
-{
- kfree(icd->host_priv);
- icd->host_priv = NULL;
-}
-
-#define scale_down(size, scale) soc_camera_shift_scale(size, 12, scale)
-#define calc_generic_scale(in, out) soc_camera_calc_scale(in, 12, out)
-
-/*
- * CEU can scale and crop, but we don't want to waste bandwidth and kill the
- * framerate by always requesting the maximum image from the client. See
- * Documentation/media/v4l-drivers/sh_mobile_ceu_camera.rst for a description of
- * scaling and cropping algorithms and for the meaning of referenced here steps.
- */
-static int sh_mobile_ceu_set_selection(struct soc_camera_device *icd,
- struct v4l2_selection *sel)
-{
- struct v4l2_rect *rect = &sel->r;
- struct device *dev = icd->parent;
- struct soc_camera_host *ici = to_soc_camera_host(dev);
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
- struct v4l2_selection cam_sel;
- struct sh_mobile_ceu_cam *cam = icd->host_priv;
- struct v4l2_rect *cam_rect = &cam_sel.r;
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct v4l2_subdev_format fmt = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- };
- struct v4l2_mbus_framefmt *mf = &fmt.format;
- unsigned int scale_cam_h, scale_cam_v, scale_ceu_h, scale_ceu_v,
- out_width, out_height;
- int interm_width, interm_height;
- u32 capsr, cflcr;
- int ret;
-
- dev_geo(dev, "S_SELECTION(%ux%u@%u:%u)\n", rect->width, rect->height,
- rect->left, rect->top);
-
- /* During camera cropping its output window can change too, stop CEU */
- capsr = capture_save_reset(pcdev);
- dev_dbg(dev, "CAPSR 0x%x, CFLCR 0x%x\n", capsr, pcdev->cflcr);
-
- /*
- * 1. - 2. Apply iterative camera S_SELECTION for new input window, read back
- * actual camera rectangle.
- */
- ret = soc_camera_client_s_selection(sd, sel, &cam_sel,
- &cam->rect, &cam->subrect);
- if (ret < 0)
- return ret;
-
- dev_geo(dev, "1-2: camera cropped to %ux%u@%u:%u\n",
- cam_rect->width, cam_rect->height,
- cam_rect->left, cam_rect->top);
-
- /* On success cam_crop contains current camera crop */
-
- /* 3. Retrieve camera output window */
- ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt);
- if (ret < 0)
- return ret;
-
- if (mf->width > pcdev->max_width || mf->height > pcdev->max_height)
- return -EINVAL;
-
- /* 4. Calculate camera scales */
- scale_cam_h = calc_generic_scale(cam_rect->width, mf->width);
- scale_cam_v = calc_generic_scale(cam_rect->height, mf->height);
-
- /* Calculate intermediate window */
- interm_width = scale_down(rect->width, scale_cam_h);
- interm_height = scale_down(rect->height, scale_cam_v);
-
- if (interm_width < icd->user_width) {
- u32 new_scale_h;
-
- new_scale_h = calc_generic_scale(rect->width, icd->user_width);
-
- mf->width = scale_down(cam_rect->width, new_scale_h);
- }
-
- if (interm_height < icd->user_height) {
- u32 new_scale_v;
-
- new_scale_v = calc_generic_scale(rect->height, icd->user_height);
-
- mf->height = scale_down(cam_rect->height, new_scale_v);
- }
-
- if (interm_width < icd->user_width || interm_height < icd->user_height) {
- ret = v4l2_device_call_until_err(sd->v4l2_dev,
- soc_camera_grp_id(icd), pad,
- set_fmt, NULL, &fmt);
- if (ret < 0)
- return ret;
-
- dev_geo(dev, "New camera output %ux%u\n", mf->width, mf->height);
- scale_cam_h = calc_generic_scale(cam_rect->width, mf->width);
- scale_cam_v = calc_generic_scale(cam_rect->height, mf->height);
- interm_width = scale_down(rect->width, scale_cam_h);
- interm_height = scale_down(rect->height, scale_cam_v);
- }
-
- /* Cache camera output window */
- cam->width = mf->width;
- cam->height = mf->height;
-
- if (pcdev->image_mode) {
- out_width = min(interm_width, icd->user_width);
- out_height = min(interm_height, icd->user_height);
- } else {
- out_width = interm_width;
- out_height = interm_height;
- }
-
- /*
- * 5. Calculate CEU scales from camera scales from results of (5) and
- * the user window
- */
- scale_ceu_h = calc_scale(interm_width, &out_width);
- scale_ceu_v = calc_scale(interm_height, &out_height);
-
- dev_geo(dev, "5: CEU scales %u:%u\n", scale_ceu_h, scale_ceu_v);
-
- /* Apply CEU scales. */
- cflcr = scale_ceu_h | (scale_ceu_v << 16);
- if (cflcr != pcdev->cflcr) {
- pcdev->cflcr = cflcr;
- ceu_write(pcdev, CFLCR, cflcr);
- }
-
- icd->user_width = out_width & ~3;
- icd->user_height = out_height & ~3;
- /* Offsets are applied at the CEU scaling filter input */
- cam->ceu_left = scale_down(rect->left - cam_rect->left, scale_cam_h) & ~1;
- cam->ceu_top = scale_down(rect->top - cam_rect->top, scale_cam_v) & ~1;
-
- /* 6. Use CEU cropping to crop to the new window. */
- sh_mobile_ceu_set_rect(icd);
-
- cam->subrect = *rect;
-
- dev_geo(dev, "6: CEU cropped to %ux%u@%u:%u\n",
- icd->user_width, icd->user_height,
- cam->ceu_left, cam->ceu_top);
-
- /* Restore capture. The CE bit can be cleared by the hardware */
- if (pcdev->active)
- capsr |= 1;
- capture_restore(pcdev, capsr);
-
- /* Even if only camera cropping succeeded */
- return ret;
-}
-
-static int sh_mobile_ceu_get_selection(struct soc_camera_device *icd,
- struct v4l2_selection *sel)
-{
- struct sh_mobile_ceu_cam *cam = icd->host_priv;
-
- sel->r = cam->subrect;
-
- return 0;
-}
-
-/* Similar to set_crop multistage iterative algorithm */
-static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
- struct v4l2_format *f)
-{
- struct device *dev = icd->parent;
- struct soc_camera_host *ici = to_soc_camera_host(dev);
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
- struct sh_mobile_ceu_cam *cam = icd->host_priv;
- struct v4l2_pix_format *pix = &f->fmt.pix;
- struct v4l2_mbus_framefmt mf;
- __u32 pixfmt = pix->pixelformat;
- const struct soc_camera_format_xlate *xlate;
- unsigned int ceu_sub_width = pcdev->max_width,
- ceu_sub_height = pcdev->max_height;
- u16 scale_v, scale_h;
- int ret;
- bool image_mode;
- enum v4l2_field field;
-
- switch (pix->field) {
- default:
- pix->field = V4L2_FIELD_NONE;
- /* fall-through */
- case V4L2_FIELD_INTERLACED_TB:
- case V4L2_FIELD_INTERLACED_BT:
- case V4L2_FIELD_NONE:
- field = pix->field;
- break;
- case V4L2_FIELD_INTERLACED:
- field = V4L2_FIELD_INTERLACED_TB;
- break;
- }
-
- xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
- if (!xlate) {
- dev_warn(dev, "Format %x not found\n", pixfmt);
- return -EINVAL;
- }
-
- /* 1.-4. Calculate desired client output geometry */
- soc_camera_calc_client_output(icd, &cam->rect, &cam->subrect, pix, &mf, 12);
- mf.field = pix->field;
- mf.colorspace = pix->colorspace;
- mf.code = xlate->code;
-
- switch (pixfmt) {
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV21:
- case V4L2_PIX_FMT_NV16:
- case V4L2_PIX_FMT_NV61:
- image_mode = true;
- break;
- default:
- image_mode = false;
- }
-
- dev_geo(dev, "S_FMT(pix=0x%x, fld 0x%x, code 0x%x, %ux%u)\n", pixfmt, mf.field, mf.code,
- pix->width, pix->height);
-
- dev_geo(dev, "4: request camera output %ux%u\n", mf.width, mf.height);
-
- /* 5. - 9. */
- ret = soc_camera_client_scale(icd, &cam->rect, &cam->subrect,
- &mf, &ceu_sub_width, &ceu_sub_height,
- image_mode && V4L2_FIELD_NONE == field, 12);
-
- dev_geo(dev, "5-9: client scale return %d\n", ret);
-
- /* Done with the camera. Now see if we can improve the result */
-
- dev_geo(dev, "fmt %ux%u, requested %ux%u\n",
- mf.width, mf.height, pix->width, pix->height);
- if (ret < 0)
- return ret;
-
- if (mf.code != xlate->code)
- return -EINVAL;
-
- /* 9. Prepare CEU crop */
- cam->width = mf.width;
- cam->height = mf.height;
-
- /* 10. Use CEU scaling to scale to the requested user window. */
-
- /* We cannot scale up */
- if (pix->width > ceu_sub_width)
- ceu_sub_width = pix->width;
-
- if (pix->height > ceu_sub_height)
- ceu_sub_height = pix->height;
-
- pix->colorspace = mf.colorspace;
-
- if (image_mode) {
- /* Scale pix->{width x height} down to width x height */
- scale_h = calc_scale(ceu_sub_width, &pix->width);
- scale_v = calc_scale(ceu_sub_height, &pix->height);
- } else {
- pix->width = ceu_sub_width;
- pix->height = ceu_sub_height;
- scale_h = 0;
- scale_v = 0;
- }
-
- pcdev->cflcr = scale_h | (scale_v << 16);
-
- /*
- * We have calculated CFLCR, the actual configuration will be performed
- * in sh_mobile_ceu_set_bus_param()
- */
-
- dev_geo(dev, "10: W: %u : 0x%x = %u, H: %u : 0x%x = %u\n",
- ceu_sub_width, scale_h, pix->width,
- ceu_sub_height, scale_v, pix->height);
-
- cam->code = xlate->code;
- icd->current_fmt = xlate;
-
- pcdev->field = field;
- pcdev->image_mode = image_mode;
-
- /* CFSZR requirement */
- pix->width &= ~3;
- pix->height &= ~3;
-
- return 0;
-}
-
-#define CEU_CHDW_MAX 8188U /* Maximum line stride */
-
-static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
- struct v4l2_format *f)
-{
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
- const struct soc_camera_format_xlate *xlate;
- struct v4l2_pix_format *pix = &f->fmt.pix;
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct v4l2_subdev_pad_config pad_cfg;
- struct v4l2_subdev_format format = {
- .which = V4L2_SUBDEV_FORMAT_TRY,
- };
- struct v4l2_mbus_framefmt *mf = &format.format;
- __u32 pixfmt = pix->pixelformat;
- int width, height;
- int ret;
-
- dev_geo(icd->parent, "TRY_FMT(pix=0x%x, %ux%u)\n",
- pixfmt, pix->width, pix->height);
-
- xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
- if (!xlate) {
- xlate = icd->current_fmt;
- dev_dbg(icd->parent, "Format %x not found, keeping %x\n",
- pixfmt, xlate->host_fmt->fourcc);
- pixfmt = xlate->host_fmt->fourcc;
- pix->pixelformat = pixfmt;
- pix->colorspace = icd->colorspace;
- }
-
- /* FIXME: calculate using depth and bus width */
-
- /* CFSZR requires height and width to be 4-pixel aligned */
- v4l_bound_align_image(&pix->width, 2, pcdev->max_width, 2,
- &pix->height, 4, pcdev->max_height, 2, 0);
-
- width = pix->width;
- height = pix->height;
-
- /* limit to sensor capabilities */
- mf->width = pix->width;
- mf->height = pix->height;
- mf->field = pix->field;
- mf->code = xlate->code;
- mf->colorspace = pix->colorspace;
-
- ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd),
- pad, set_fmt, &pad_cfg, &format);
- if (ret < 0)
- return ret;
-
- pix->width = mf->width;
- pix->height = mf->height;
- pix->field = mf->field;
- pix->colorspace = mf->colorspace;
-
- switch (pixfmt) {
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV21:
- case V4L2_PIX_FMT_NV16:
- case V4L2_PIX_FMT_NV61:
- /* FIXME: check against rect_max after converting soc-camera */
- /* We can scale precisely, need a bigger image from camera */
- if (pix->width < width || pix->height < height) {
- /*
- * We presume, the sensor behaves sanely, i.e., if
- * requested a bigger rectangle, it will not return a
- * smaller one.
- */
- mf->width = pcdev->max_width;
- mf->height = pcdev->max_height;
- ret = v4l2_device_call_until_err(sd->v4l2_dev,
- soc_camera_grp_id(icd), pad,
- set_fmt, &pad_cfg, &format);
- if (ret < 0) {
- /* Shouldn't actually happen... */
- dev_err(icd->parent,
- "FIXME: client try_fmt() = %d\n", ret);
- return ret;
- }
- }
- /* We will scale exactly */
- if (mf->width > width)
- pix->width = width;
- if (mf->height > height)
- pix->height = height;
-
- pix->bytesperline = max(pix->bytesperline, pix->width);
- pix->bytesperline = min(pix->bytesperline, CEU_CHDW_MAX);
- pix->bytesperline &= ~3;
- break;
-
- default:
- /* Configurable stride isn't supported in pass-through mode. */
- pix->bytesperline = 0;
- }
-
- pix->width &= ~3;
- pix->height &= ~3;
- pix->sizeimage = 0;
-
- dev_geo(icd->parent, "%s(): return %d, fmt 0x%x, %ux%u\n",
- __func__, ret, pix->pixelformat, pix->width, pix->height);
-
- return ret;
-}
-
-static int sh_mobile_ceu_set_liveselection(struct soc_camera_device *icd,
- struct v4l2_selection *sel)
-{
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
- u32 out_width = icd->user_width, out_height = icd->user_height;
- int ret;
-
- /* Freeze queue */
- pcdev->frozen = 1;
- /* Wait for frame */
- ret = wait_for_completion_interruptible(&pcdev->complete);
- /* Stop the client */
- ret = v4l2_subdev_call(sd, video, s_stream, 0);
- if (ret < 0)
- dev_warn(icd->parent,
- "Client failed to stop the stream: %d\n", ret);
- else
- /* Do the crop, if it fails, there's nothing more we can do */
- sh_mobile_ceu_set_selection(icd, sel);
-
- dev_geo(icd->parent, "Output after crop: %ux%u\n", icd->user_width, icd->user_height);
-
- if (icd->user_width != out_width || icd->user_height != out_height) {
- struct v4l2_format f = {
- .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
- .fmt.pix = {
- .width = out_width,
- .height = out_height,
- .pixelformat = icd->current_fmt->host_fmt->fourcc,
- .field = pcdev->field,
- .colorspace = icd->colorspace,
- },
- };
- ret = sh_mobile_ceu_set_fmt(icd, &f);
- if (!ret && (out_width != f.fmt.pix.width ||
- out_height != f.fmt.pix.height))
- ret = -EINVAL;
- if (!ret) {
- icd->user_width = out_width & ~3;
- icd->user_height = out_height & ~3;
- ret = sh_mobile_ceu_set_bus_param(icd);
- }
- }
-
- /* Thaw the queue */
- pcdev->frozen = 0;
- spin_lock_irq(&pcdev->lock);
- sh_mobile_ceu_capture(pcdev);
- spin_unlock_irq(&pcdev->lock);
- /* Start the client */
- ret = v4l2_subdev_call(sd, video, s_stream, 1);
- return ret;
-}
-
-static __poll_t sh_mobile_ceu_poll(struct file *file, poll_table *pt)
-{
- struct soc_camera_device *icd = file->private_data;
-
- return vb2_poll(&icd->vb2_vidq, file, pt);
-}
-
-static int sh_mobile_ceu_querycap(struct soc_camera_host *ici,
- struct v4l2_capability *cap)
-{
- strscpy(cap->card, "SuperH_Mobile_CEU", sizeof(cap->card));
- strscpy(cap->driver, "sh_mobile_ceu", sizeof(cap->driver));
- strscpy(cap->bus_info, "platform:sh_mobile_ceu", sizeof(cap->bus_info));
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
-
- return 0;
-}
-
-static int sh_mobile_ceu_init_videobuf(struct vb2_queue *q,
- struct soc_camera_device *icd)
-{
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
-
- q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- q->io_modes = VB2_MMAP | VB2_USERPTR;
- q->drv_priv = icd;
- q->ops = &sh_mobile_ceu_videobuf_ops;
- q->mem_ops = &vb2_dma_contig_memops;
- q->buf_struct_size = sizeof(struct sh_mobile_ceu_buffer);
- q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->lock = &ici->host_lock;
- q->dev = ici->v4l2_dev.dev;
-
- return vb2_queue_init(q);
-}
-
-static struct soc_camera_host_ops sh_mobile_ceu_host_ops = {
- .owner = THIS_MODULE,
- .add = sh_mobile_ceu_add_device,
- .remove = sh_mobile_ceu_remove_device,
- .clock_start = sh_mobile_ceu_clock_start,
- .clock_stop = sh_mobile_ceu_clock_stop,
- .get_formats = sh_mobile_ceu_get_formats,
- .put_formats = sh_mobile_ceu_put_formats,
- .get_selection = sh_mobile_ceu_get_selection,
- .set_selection = sh_mobile_ceu_set_selection,
- .set_liveselection = sh_mobile_ceu_set_liveselection,
- .set_fmt = sh_mobile_ceu_set_fmt,
- .try_fmt = sh_mobile_ceu_try_fmt,
- .poll = sh_mobile_ceu_poll,
- .querycap = sh_mobile_ceu_querycap,
- .set_bus_param = sh_mobile_ceu_set_bus_param,
- .init_videobuf2 = sh_mobile_ceu_init_videobuf,
-};
-
-struct bus_wait {
- struct notifier_block notifier;
- struct completion completion;
- struct device *dev;
-};
-
-static int bus_notify(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct device *dev = data;
- struct bus_wait *wait = container_of(nb, struct bus_wait, notifier);
-
- if (wait->dev != dev)
- return NOTIFY_DONE;
-
- switch (action) {
- case BUS_NOTIFY_UNBOUND_DRIVER:
- /* Protect from module unloading */
- wait_for_completion(&wait->completion);
- return NOTIFY_OK;
- }
- return NOTIFY_DONE;
-}
-
-static int sh_mobile_ceu_probe(struct platform_device *pdev)
-{
- struct sh_mobile_ceu_dev *pcdev;
- struct resource *res;
- void __iomem *base;
- unsigned int irq;
- int err;
- struct bus_wait wait = {
- .completion = COMPLETION_INITIALIZER_ONSTACK(wait.completion),
- .notifier.notifier_call = bus_notify,
- };
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- irq = platform_get_irq(pdev, 0);
- if (!res || (int)irq <= 0) {
- dev_err(&pdev->dev, "Not enough CEU platform resources.\n");
- return -ENODEV;
- }
-
- pcdev = devm_kzalloc(&pdev->dev, sizeof(*pcdev), GFP_KERNEL);
- if (!pcdev) {
- dev_err(&pdev->dev, "Could not allocate pcdev\n");
- return -ENOMEM;
- }
-
- INIT_LIST_HEAD(&pcdev->capture);
- spin_lock_init(&pcdev->lock);
- init_completion(&pcdev->complete);
-
- pcdev->pdata = pdev->dev.platform_data;
- if (!pcdev->pdata && !pdev->dev.of_node) {
- dev_err(&pdev->dev, "CEU platform data not set.\n");
- return -EINVAL;
- }
-
- /* TODO: implement per-device bus flags */
- if (pcdev->pdata) {
- pcdev->max_width = pcdev->pdata->max_width;
- pcdev->max_height = pcdev->pdata->max_height;
- pcdev->flags = pcdev->pdata->flags;
- }
- pcdev->field = V4L2_FIELD_NONE;
-
- if (!pcdev->max_width) {
- unsigned int v;
- err = of_property_read_u32(pdev->dev.of_node, "renesas,max-width", &v);
- if (!err)
- pcdev->max_width = v;
-
- if (!pcdev->max_width)
- pcdev->max_width = 2560;
- }
- if (!pcdev->max_height) {
- unsigned int v;
- err = of_property_read_u32(pdev->dev.of_node, "renesas,max-height", &v);
- if (!err)
- pcdev->max_height = v;
-
- if (!pcdev->max_height)
- pcdev->max_height = 1920;
- }
-
- base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- pcdev->irq = irq;
- pcdev->base = base;
- pcdev->video_limit = 0; /* only enabled if second resource exists */
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (res) {
- err = dma_declare_coherent_memory(&pdev->dev, res->start,
- res->start,
- resource_size(res),
- DMA_MEMORY_EXCLUSIVE);
- if (err) {
- dev_err(&pdev->dev, "Unable to declare CEU memory.\n");
- return err;
- }
-
- pcdev->video_limit = resource_size(res);
- }
-
- /* request irq */
- err = devm_request_irq(&pdev->dev, pcdev->irq, sh_mobile_ceu_irq,
- 0, dev_name(&pdev->dev), pcdev);
- if (err) {
- dev_err(&pdev->dev, "Unable to register CEU interrupt.\n");
- goto exit_release_mem;
- }
-
- pm_suspend_ignore_children(&pdev->dev, true);
- pm_runtime_enable(&pdev->dev);
- pm_runtime_resume(&pdev->dev);
-
- pcdev->ici.priv = pcdev;
- pcdev->ici.v4l2_dev.dev = &pdev->dev;
- pcdev->ici.nr = pdev->id;
- pcdev->ici.drv_name = dev_name(&pdev->dev);
- pcdev->ici.ops = &sh_mobile_ceu_host_ops;
- pcdev->ici.capabilities = SOCAM_HOST_CAP_STRIDE;
-
- if (pcdev->pdata && pcdev->pdata->asd_sizes) {
- pcdev->ici.asd = pcdev->pdata->asd;
- pcdev->ici.asd_sizes = pcdev->pdata->asd_sizes;
- }
-
- err = soc_camera_host_register(&pcdev->ici);
- if (err)
- goto exit_free_clk;
-
- return 0;
-
-exit_free_clk:
- pm_runtime_disable(&pdev->dev);
-exit_release_mem:
- if (platform_get_resource(pdev, IORESOURCE_MEM, 1))
- dma_release_declared_memory(&pdev->dev);
- return err;
-}
-
-static int sh_mobile_ceu_remove(struct platform_device *pdev)
-{
- struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
-
- soc_camera_host_unregister(soc_host);
- pm_runtime_disable(&pdev->dev);
- if (platform_get_resource(pdev, IORESOURCE_MEM, 1))
- dma_release_declared_memory(&pdev->dev);
-
- return 0;
-}
-
-static int sh_mobile_ceu_runtime_nop(struct device *dev)
-{
- /* Runtime PM callback shared between ->runtime_suspend()
- * and ->runtime_resume(). Simply returns success.
- *
- * This driver re-initializes all registers after
- * pm_runtime_get_sync() anyway so there is no need
- * to save and restore registers here.
- */
- return 0;
-}
-
-static const struct dev_pm_ops sh_mobile_ceu_dev_pm_ops = {
- .runtime_suspend = sh_mobile_ceu_runtime_nop,
- .runtime_resume = sh_mobile_ceu_runtime_nop,
-};
-
-static const struct of_device_id sh_mobile_ceu_of_match[] = {
- { .compatible = "renesas,sh-mobile-ceu" },
- { }
-};
-MODULE_DEVICE_TABLE(of, sh_mobile_ceu_of_match);
-
-static struct platform_driver sh_mobile_ceu_driver = {
- .driver = {
- .name = "sh_mobile_ceu",
- .pm = &sh_mobile_ceu_dev_pm_ops,
- .of_match_table = sh_mobile_ceu_of_match,
- },
- .probe = sh_mobile_ceu_probe,
- .remove = sh_mobile_ceu_remove,
-};
-
-module_platform_driver(sh_mobile_ceu_driver);
-
-MODULE_DESCRIPTION("SuperH Mobile CEU driver");
-MODULE_AUTHOR("Magnus Damm");
-MODULE_LICENSE("GPL");
-MODULE_VERSION("0.1.0");
-MODULE_ALIAS("platform:sh_mobile_ceu");
diff --git a/drivers/media/platform/soc_camera/soc_camera_platform.c b/drivers/media/platform/soc_camera/soc_camera_platform.c
deleted file mode 100644
index 79fbe1fea95f..000000000000
--- a/drivers/media/platform/soc_camera/soc_camera_platform.c
+++ /dev/null
@@ -1,188 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Generic Platform Camera Driver
- *
- * Copyright (C) 2008 Magnus Damm
- * Based on mt9m001 driver,
- * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/videodev2.h>
-#include <media/v4l2-subdev.h>
-#include <media/soc_camera.h>
-#include <linux/platform_data/media/soc_camera_platform.h>
-
-struct soc_camera_platform_priv {
- struct v4l2_subdev subdev;
-};
-
-static struct soc_camera_platform_priv *get_priv(struct platform_device *pdev)
-{
- struct v4l2_subdev *subdev = platform_get_drvdata(pdev);
- return container_of(subdev, struct soc_camera_platform_priv, subdev);
-}
-
-static int soc_camera_platform_s_stream(struct v4l2_subdev *sd, int enable)
-{
- struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
- return p->set_capture(p, enable);
-}
-
-static int soc_camera_platform_fill_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *format)
-{
- struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *mf = &format->format;
-
- mf->width = p->format.width;
- mf->height = p->format.height;
- mf->code = p->format.code;
- mf->colorspace = p->format.colorspace;
- mf->field = p->format.field;
-
- return 0;
-}
-
-static int soc_camera_platform_s_power(struct v4l2_subdev *sd, int on)
-{
- struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
-
- return soc_camera_set_power(p->icd->control, &p->icd->sdesc->subdev_desc, NULL, on);
-}
-
-static const struct v4l2_subdev_core_ops platform_subdev_core_ops = {
- .s_power = soc_camera_platform_s_power,
-};
-
-static int soc_camera_platform_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
-
- if (code->pad || code->index)
- return -EINVAL;
-
- code->code = p->format.code;
- return 0;
-}
-
-static int soc_camera_platform_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_selection *sel)
-{
- struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
-
- if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
- return -EINVAL;
-
- switch (sel->target) {
- case V4L2_SEL_TGT_CROP_BOUNDS:
- case V4L2_SEL_TGT_CROP_DEFAULT:
- case V4L2_SEL_TGT_CROP:
- sel->r.left = 0;
- sel->r.top = 0;
- sel->r.width = p->format.width;
- sel->r.height = p->format.height;
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-static int soc_camera_platform_g_mbus_config(struct v4l2_subdev *sd,
- struct v4l2_mbus_config *cfg)
-{
- struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
-
- cfg->flags = p->mbus_param;
- cfg->type = p->mbus_type;
-
- return 0;
-}
-
-static const struct v4l2_subdev_video_ops platform_subdev_video_ops = {
- .s_stream = soc_camera_platform_s_stream,
- .g_mbus_config = soc_camera_platform_g_mbus_config,
-};
-
-static const struct v4l2_subdev_pad_ops platform_subdev_pad_ops = {
- .enum_mbus_code = soc_camera_platform_enum_mbus_code,
- .get_selection = soc_camera_platform_get_selection,
- .get_fmt = soc_camera_platform_fill_fmt,
- .set_fmt = soc_camera_platform_fill_fmt,
-};
-
-static const struct v4l2_subdev_ops platform_subdev_ops = {
- .core = &platform_subdev_core_ops,
- .video = &platform_subdev_video_ops,
- .pad = &platform_subdev_pad_ops,
-};
-
-static int soc_camera_platform_probe(struct platform_device *pdev)
-{
- struct soc_camera_host *ici;
- struct soc_camera_platform_priv *priv;
- struct soc_camera_platform_info *p = pdev->dev.platform_data;
- struct soc_camera_device *icd;
-
- if (!p)
- return -EINVAL;
-
- if (!p->icd) {
- dev_err(&pdev->dev,
- "Platform has not set soc_camera_device pointer!\n");
- return -EINVAL;
- }
-
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- icd = p->icd;
-
- /* soc-camera convention: control's drvdata points to the subdev */
- platform_set_drvdata(pdev, &priv->subdev);
- /* Set the control device reference */
- icd->control = &pdev->dev;
-
- ici = to_soc_camera_host(icd->parent);
-
- v4l2_subdev_init(&priv->subdev, &platform_subdev_ops);
- v4l2_set_subdevdata(&priv->subdev, p);
- strscpy(priv->subdev.name, dev_name(&pdev->dev),
- sizeof(priv->subdev.name));
-
- return v4l2_device_register_subdev(&ici->v4l2_dev, &priv->subdev);
-}
-
-static int soc_camera_platform_remove(struct platform_device *pdev)
-{
- struct soc_camera_platform_priv *priv = get_priv(pdev);
- struct soc_camera_platform_info *p = v4l2_get_subdevdata(&priv->subdev);
-
- p->icd->control = NULL;
- v4l2_device_unregister_subdev(&priv->subdev);
- return 0;
-}
-
-static struct platform_driver soc_camera_platform_driver = {
- .driver = {
- .name = "soc_camera_platform",
- },
- .probe = soc_camera_platform_probe,
- .remove = soc_camera_platform_remove,
-};
-
-module_platform_driver(soc_camera_platform_driver);
-
-MODULE_DESCRIPTION("SoC Camera Platform driver");
-MODULE_AUTHOR("Magnus Damm");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:soc_camera_platform");
diff --git a/drivers/media/platform/soc_camera/soc_scale_crop.c b/drivers/media/platform/soc_camera/soc_scale_crop.c
deleted file mode 100644
index 8d25ca0490f7..000000000000
--- a/drivers/media/platform/soc_camera/soc_scale_crop.c
+++ /dev/null
@@ -1,426 +0,0 @@
-/*
- * soc-camera generic scaling-cropping manipulation functions
- *
- * Copyright (C) 2013 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/device.h>
-#include <linux/module.h>
-
-#include <media/soc_camera.h>
-#include <media/v4l2-common.h>
-
-#include "soc_scale_crop.h"
-
-#ifdef DEBUG_GEOMETRY
-#define dev_geo dev_info
-#else
-#define dev_geo dev_dbg
-#endif
-
-/* Check if any dimension of r1 is smaller than respective one of r2 */
-static bool is_smaller(const struct v4l2_rect *r1, const struct v4l2_rect *r2)
-{
- return r1->width < r2->width || r1->height < r2->height;
-}
-
-/* Check if r1 fails to cover r2 */
-static bool is_inside(const struct v4l2_rect *r1, const struct v4l2_rect *r2)
-{
- return r1->left > r2->left || r1->top > r2->top ||
- r1->left + r1->width < r2->left + r2->width ||
- r1->top + r1->height < r2->top + r2->height;
-}
-
-/* Get and store current client crop */
-int soc_camera_client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect)
-{
- struct v4l2_subdev_selection sdsel = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- .target = V4L2_SEL_TGT_CROP,
- };
- int ret;
-
- ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &sdsel);
- if (!ret) {
- *rect = sdsel.r;
- return ret;
- }
-
- sdsel.target = V4L2_SEL_TGT_CROP_BOUNDS;
- ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &sdsel);
- if (!ret)
- *rect = sdsel.r;
-
- return ret;
-}
-EXPORT_SYMBOL(soc_camera_client_g_rect);
-
-/* Client crop has changed, update our sub-rectangle to remain within the area */
-static void move_and_crop_subrect(struct v4l2_rect *rect,
- struct v4l2_rect *subrect)
-{
- if (rect->width < subrect->width)
- subrect->width = rect->width;
-
- if (rect->height < subrect->height)
- subrect->height = rect->height;
-
- if (rect->left > subrect->left)
- subrect->left = rect->left;
- else if (rect->left + rect->width <
- subrect->left + subrect->width)
- subrect->left = rect->left + rect->width -
- subrect->width;
-
- if (rect->top > subrect->top)
- subrect->top = rect->top;
- else if (rect->top + rect->height <
- subrect->top + subrect->height)
- subrect->top = rect->top + rect->height -
- subrect->height;
-}
-
-/*
- * The common for both scaling and cropping iterative approach is:
- * 1. try if the client can produce exactly what requested by the user
- * 2. if (1) failed, try to double the client image until we get one big enough
- * 3. if (2) failed, try to request the maximum image
- */
-int soc_camera_client_s_selection(struct v4l2_subdev *sd,
- struct v4l2_selection *sel, struct v4l2_selection *cam_sel,
- struct v4l2_rect *target_rect, struct v4l2_rect *subrect)
-{
- struct v4l2_subdev_selection sdsel = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- .target = sel->target,
- .flags = sel->flags,
- .r = sel->r,
- };
- struct v4l2_subdev_selection bounds = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- .target = V4L2_SEL_TGT_CROP_BOUNDS,
- };
- struct v4l2_rect *rect = &sel->r, *cam_rect = &cam_sel->r;
- struct device *dev = sd->v4l2_dev->dev;
- int ret;
- unsigned int width, height;
-
- v4l2_subdev_call(sd, pad, set_selection, NULL, &sdsel);
- sel->r = sdsel.r;
- ret = soc_camera_client_g_rect(sd, cam_rect);
- if (ret < 0)
- return ret;
-
- /*
- * Now cam_crop contains the current camera input rectangle, and it must
- * be within camera cropcap bounds
- */
- if (!memcmp(rect, cam_rect, sizeof(*rect))) {
- /* Even if camera S_SELECTION failed, but camera rectangle matches */
- dev_dbg(dev, "Camera S_SELECTION successful for %dx%d@%d:%d\n",
- rect->width, rect->height, rect->left, rect->top);
- *target_rect = *cam_rect;
- return 0;
- }
-
- /* Try to fix cropping, that camera hasn't managed to set */
- dev_geo(dev, "Fix camera S_SELECTION for %dx%d@%d:%d to %dx%d@%d:%d\n",
- cam_rect->width, cam_rect->height,
- cam_rect->left, cam_rect->top,
- rect->width, rect->height, rect->left, rect->top);
-
- /* We need sensor maximum rectangle */
- ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &bounds);
- if (ret < 0)
- return ret;
-
- /* Put user requested rectangle within sensor bounds */
- soc_camera_limit_side(&rect->left, &rect->width, sdsel.r.left, 2,
- bounds.r.width);
- soc_camera_limit_side(&rect->top, &rect->height, sdsel.r.top, 4,
- bounds.r.height);
-
- /*
- * Popular special case - some cameras can only handle fixed sizes like
- * QVGA, VGA,... Take care to avoid infinite loop.
- */
- width = max_t(unsigned int, cam_rect->width, 2);
- height = max_t(unsigned int, cam_rect->height, 2);
-
- /*
- * Loop as long as sensor is not covering the requested rectangle and
- * is still within its bounds
- */
- while (!ret && (is_smaller(cam_rect, rect) ||
- is_inside(cam_rect, rect)) &&
- (bounds.r.width > width || bounds.r.height > height)) {
-
- width *= 2;
- height *= 2;
-
- cam_rect->width = width;
- cam_rect->height = height;
-
- /*
- * We do not know what capabilities the camera has to set up
- * left and top borders. We could try to be smarter in iterating
- * them, e.g., if camera current left is to the right of the
- * target left, set it to the middle point between the current
- * left and minimum left. But that would add too much
- * complexity: we would have to iterate each border separately.
- * Instead we just drop to the left and top bounds.
- */
- if (cam_rect->left > rect->left)
- cam_rect->left = bounds.r.left;
-
- if (cam_rect->left + cam_rect->width < rect->left + rect->width)
- cam_rect->width = rect->left + rect->width -
- cam_rect->left;
-
- if (cam_rect->top > rect->top)
- cam_rect->top = bounds.r.top;
-
- if (cam_rect->top + cam_rect->height < rect->top + rect->height)
- cam_rect->height = rect->top + rect->height -
- cam_rect->top;
-
- sdsel.r = *cam_rect;
- v4l2_subdev_call(sd, pad, set_selection, NULL, &sdsel);
- *cam_rect = sdsel.r;
- ret = soc_camera_client_g_rect(sd, cam_rect);
- dev_geo(dev, "Camera S_SELECTION %d for %dx%d@%d:%d\n", ret,
- cam_rect->width, cam_rect->height,
- cam_rect->left, cam_rect->top);
- }
-
- /* S_SELECTION must not modify the rectangle */
- if (is_smaller(cam_rect, rect) || is_inside(cam_rect, rect)) {
- /*
- * The camera failed to configure a suitable cropping,
- * we cannot use the current rectangle, set to max
- */
- sdsel.r = bounds.r;
- v4l2_subdev_call(sd, pad, set_selection, NULL, &sdsel);
- *cam_rect = sdsel.r;
-
- ret = soc_camera_client_g_rect(sd, cam_rect);
- dev_geo(dev, "Camera S_SELECTION %d for max %dx%d@%d:%d\n", ret,
- cam_rect->width, cam_rect->height,
- cam_rect->left, cam_rect->top);
- }
-
- if (!ret) {
- *target_rect = *cam_rect;
- move_and_crop_subrect(target_rect, subrect);
- }
-
- return ret;
-}
-EXPORT_SYMBOL(soc_camera_client_s_selection);
-
-/* Iterative set_fmt, also updates cached client crop on success */
-static int client_set_fmt(struct soc_camera_device *icd,
- struct v4l2_rect *rect, struct v4l2_rect *subrect,
- unsigned int max_width, unsigned int max_height,
- struct v4l2_subdev_format *format, bool host_can_scale)
-{
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct device *dev = icd->parent;
- struct v4l2_mbus_framefmt *mf = &format->format;
- unsigned int width = mf->width, height = mf->height, tmp_w, tmp_h;
- struct v4l2_subdev_selection sdsel = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- .target = V4L2_SEL_TGT_CROP_BOUNDS,
- };
- bool host_1to1;
- int ret;
-
- ret = v4l2_device_call_until_err(sd->v4l2_dev,
- soc_camera_grp_id(icd), pad,
- set_fmt, NULL, format);
- if (ret < 0)
- return ret;
-
- dev_geo(dev, "camera scaled to %ux%u\n", mf->width, mf->height);
-
- if (width == mf->width && height == mf->height) {
- /* Perfect! The client has done it all. */
- host_1to1 = true;
- goto update_cache;
- }
-
- host_1to1 = false;
- if (!host_can_scale)
- goto update_cache;
-
- ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &sdsel);
- if (ret < 0)
- return ret;
-
- if (max_width > sdsel.r.width)
- max_width = sdsel.r.width;
- if (max_height > sdsel.r.height)
- max_height = sdsel.r.height;
-
- /* Camera set a format, but geometry is not precise, try to improve */
- tmp_w = mf->width;
- tmp_h = mf->height;
-
- /* width <= max_width && height <= max_height - guaranteed by try_fmt */
- while ((width > tmp_w || height > tmp_h) &&
- tmp_w < max_width && tmp_h < max_height) {
- tmp_w = min(2 * tmp_w, max_width);
- tmp_h = min(2 * tmp_h, max_height);
- mf->width = tmp_w;
- mf->height = tmp_h;
- ret = v4l2_device_call_until_err(sd->v4l2_dev,
- soc_camera_grp_id(icd), pad,
- set_fmt, NULL, format);
- dev_geo(dev, "Camera scaled to %ux%u\n",
- mf->width, mf->height);
- if (ret < 0) {
- /* This shouldn't happen */
- dev_err(dev, "Client failed to set format: %d\n", ret);
- return ret;
- }
- }
-
-update_cache:
- /* Update cache */
- ret = soc_camera_client_g_rect(sd, rect);
- if (ret < 0)
- return ret;
-
- if (host_1to1)
- *subrect = *rect;
- else
- move_and_crop_subrect(rect, subrect);
-
- return 0;
-}
-
-/**
- * soc_camera_client_scale
- * @icd: soc-camera device
- * @rect: camera cropping window
- * @subrect: part of rect, sent to the user
- * @mf: in- / output camera output window
- * @width: on input: max host input width;
- * on output: user width, mapped back to input
- * @height: on input: max host input height;
- * on output: user height, mapped back to input
- * @host_can_scale: host can scale this pixel format
- * @shift: shift, used for scaling
- */
-int soc_camera_client_scale(struct soc_camera_device *icd,
- struct v4l2_rect *rect, struct v4l2_rect *subrect,
- struct v4l2_mbus_framefmt *mf,
- unsigned int *width, unsigned int *height,
- bool host_can_scale, unsigned int shift)
-{
- struct device *dev = icd->parent;
- struct v4l2_subdev_format fmt_tmp = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- .format = *mf,
- };
- struct v4l2_mbus_framefmt *mf_tmp = &fmt_tmp.format;
- unsigned int scale_h, scale_v;
- int ret;
-
- /*
- * 5. Apply iterative camera S_FMT for camera user window (also updates
- * client crop cache and the imaginary sub-rectangle).
- */
- ret = client_set_fmt(icd, rect, subrect, *width, *height,
- &fmt_tmp, host_can_scale);
- if (ret < 0)
- return ret;
-
- dev_geo(dev, "5: camera scaled to %ux%u\n",
- mf_tmp->width, mf_tmp->height);
-
- /* 6. Retrieve camera output window (g_fmt) */
-
- /* unneeded - it is already in "mf_tmp" */
-
- /* 7. Calculate new client scales. */
- scale_h = soc_camera_calc_scale(rect->width, shift, mf_tmp->width);
- scale_v = soc_camera_calc_scale(rect->height, shift, mf_tmp->height);
-
- mf->width = mf_tmp->width;
- mf->height = mf_tmp->height;
- mf->colorspace = mf_tmp->colorspace;
-
- /*
- * 8. Calculate new host crop - apply camera scales to previously
- * updated "effective" crop.
- */
- *width = soc_camera_shift_scale(subrect->width, shift, scale_h);
- *height = soc_camera_shift_scale(subrect->height, shift, scale_v);
-
- dev_geo(dev, "8: new client sub-window %ux%u\n", *width, *height);
-
- return 0;
-}
-EXPORT_SYMBOL(soc_camera_client_scale);
-
-/*
- * Calculate real client output window by applying new scales to the current
- * client crop. New scales are calculated from the requested output format and
- * host crop, mapped backed onto the client input (subrect).
- */
-void soc_camera_calc_client_output(struct soc_camera_device *icd,
- struct v4l2_rect *rect, struct v4l2_rect *subrect,
- const struct v4l2_pix_format *pix, struct v4l2_mbus_framefmt *mf,
- unsigned int shift)
-{
- struct device *dev = icd->parent;
- unsigned int scale_v, scale_h;
-
- if (subrect->width == rect->width &&
- subrect->height == rect->height) {
- /* No sub-cropping */
- mf->width = pix->width;
- mf->height = pix->height;
- return;
- }
-
- /* 1.-2. Current camera scales and subwin - cached. */
-
- dev_geo(dev, "2: subwin %ux%u@%u:%u\n",
- subrect->width, subrect->height,
- subrect->left, subrect->top);
-
- /*
- * 3. Calculate new combined scales from input sub-window to requested
- * user window.
- */
-
- /*
- * TODO: CEU cannot scale images larger than VGA to smaller than SubQCIF
- * (128x96) or larger than VGA. This and similar limitations have to be
- * taken into account here.
- */
- scale_h = soc_camera_calc_scale(subrect->width, shift, pix->width);
- scale_v = soc_camera_calc_scale(subrect->height, shift, pix->height);
-
- dev_geo(dev, "3: scales %u:%u\n", scale_h, scale_v);
-
- /*
- * 4. Calculate desired client output window by applying combined scales
- * to client (real) input window.
- */
- mf->width = soc_camera_shift_scale(rect->width, shift, scale_h);
- mf->height = soc_camera_shift_scale(rect->height, shift, scale_v);
-}
-EXPORT_SYMBOL(soc_camera_calc_client_output);
-
-MODULE_DESCRIPTION("soc-camera scaling-cropping functions");
-MODULE_AUTHOR("Guennadi Liakhovetski <kernel@pengutronix.de>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/soc_camera/soc_scale_crop.h b/drivers/media/platform/soc_camera/soc_scale_crop.h
deleted file mode 100644
index 9ca469312a1f..000000000000
--- a/drivers/media/platform/soc_camera/soc_scale_crop.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * soc-camera generic scaling-cropping manipulation functions
- *
- * Copyright (C) 2013 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef SOC_SCALE_CROP_H
-#define SOC_SCALE_CROP_H
-
-#include <linux/kernel.h>
-
-struct soc_camera_device;
-
-struct v4l2_selection;
-struct v4l2_mbus_framefmt;
-struct v4l2_pix_format;
-struct v4l2_rect;
-struct v4l2_subdev;
-
-static inline unsigned int soc_camera_shift_scale(unsigned int size,
- unsigned int shift, unsigned int scale)
-{
- return DIV_ROUND_CLOSEST(size << shift, scale);
-}
-
-#define soc_camera_calc_scale(in, shift, out) soc_camera_shift_scale(in, shift, out)
-
-int soc_camera_client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect);
-int soc_camera_client_s_selection(struct v4l2_subdev *sd,
- struct v4l2_selection *sel, struct v4l2_selection *cam_sel,
- struct v4l2_rect *target_rect, struct v4l2_rect *subrect);
-int soc_camera_client_scale(struct soc_camera_device *icd,
- struct v4l2_rect *rect, struct v4l2_rect *subrect,
- struct v4l2_mbus_framefmt *mf,
- unsigned int *width, unsigned int *height,
- bool host_can_scale, unsigned int shift);
-void soc_camera_calc_client_output(struct soc_camera_device *icd,
- struct v4l2_rect *rect, struct v4l2_rect *subrect,
- const struct v4l2_pix_format *pix, struct v4l2_mbus_framefmt *mf,
- unsigned int shift);
-
-#endif
diff --git a/drivers/media/platform/sti/bdisp/bdisp-debug.c b/drivers/media/platform/sti/bdisp/bdisp-debug.c
index c6a4e2de5c0c..77ca7517fa3e 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-debug.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-debug.c
@@ -315,7 +315,7 @@ static void bdisp_dbg_dump_ivmx(struct seq_file *s,
seq_puts(s, "Unknown conversion\n");
}
-static int bdisp_dbg_last_nodes(struct seq_file *s, void *data)
+static int last_nodes_show(struct seq_file *s, void *data)
{
/* Not dumping all fields, focusing on significant ones */
struct bdisp_dev *bdisp = s->private;
@@ -388,7 +388,7 @@ static int bdisp_dbg_last_nodes(struct seq_file *s, void *data)
return 0;
}
-static int bdisp_dbg_last_nodes_raw(struct seq_file *s, void *data)
+static int last_nodes_raw_show(struct seq_file *s, void *data)
{
struct bdisp_dev *bdisp = s->private;
struct bdisp_node *node;
@@ -437,7 +437,7 @@ static const char *bdisp_fmt_to_str(struct bdisp_frame frame)
}
}
-static int bdisp_dbg_last_request(struct seq_file *s, void *data)
+static int last_request_show(struct seq_file *s, void *data)
{
struct bdisp_dev *bdisp = s->private;
struct bdisp_request *request = &bdisp->dbg.copy_request;
@@ -474,7 +474,7 @@ static int bdisp_dbg_last_request(struct seq_file *s, void *data)
#define DUMP(reg) seq_printf(s, #reg " \t0x%08X\n", readl(bdisp->regs + reg))
-static int bdisp_dbg_regs(struct seq_file *s, void *data)
+static int regs_show(struct seq_file *s, void *data)
{
struct bdisp_dev *bdisp = s->private;
int ret;
@@ -582,7 +582,7 @@ static int bdisp_dbg_regs(struct seq_file *s, void *data)
#define SECOND 1000000
-static int bdisp_dbg_perf(struct seq_file *s, void *data)
+static int perf_show(struct seq_file *s, void *data)
{
struct bdisp_dev *bdisp = s->private;
struct bdisp_request *request = &bdisp->dbg.copy_request;
@@ -627,27 +627,15 @@ static int bdisp_dbg_perf(struct seq_file *s, void *data)
return 0;
}
-#define bdisp_dbg_declare(name) \
- static int bdisp_dbg_##name##_open(struct inode *i, struct file *f) \
- { \
- return single_open(f, bdisp_dbg_##name, i->i_private); \
- } \
- static const struct file_operations bdisp_dbg_##name##_fops = { \
- .open = bdisp_dbg_##name##_open, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .release = single_release, \
- }
-
#define bdisp_dbg_create_entry(name) \
debugfs_create_file(#name, S_IRUGO, bdisp->dbg.debugfs_entry, bdisp, \
- &bdisp_dbg_##name##_fops)
+ &name##_fops)
-bdisp_dbg_declare(regs);
-bdisp_dbg_declare(last_nodes);
-bdisp_dbg_declare(last_nodes_raw);
-bdisp_dbg_declare(last_request);
-bdisp_dbg_declare(perf);
+DEFINE_SHOW_ATTRIBUTE(regs);
+DEFINE_SHOW_ATTRIBUTE(last_nodes);
+DEFINE_SHOW_ATTRIBUTE(last_nodes_raw);
+DEFINE_SHOW_ATTRIBUTE(last_request);
+DEFINE_SHOW_ATTRIBUTE(perf);
int bdisp_debugfs_create(struct bdisp_dev *bdisp)
{
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.h b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.h
index 3dbb3a287cc0..c9d6021904cd 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.h
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.h
@@ -193,7 +193,7 @@ struct c8sectpfei {
#define C8SECTPFE_SYS_ENABLE BIT(0)
/*
- * Ponter record data structure required for each input block
+ * Pointer record data structure required for each input block
* see Table 82 on page 167 of functional specification.
*/
diff --git a/drivers/media/platform/sti/delta/delta.h b/drivers/media/platform/sti/delta/delta.h
index 2ba99922c05b..914556030e70 100644
--- a/drivers/media/platform/sti/delta/delta.h
+++ b/drivers/media/platform/sti/delta/delta.h
@@ -286,7 +286,7 @@ struct delta_dec {
* Header parsing must be done using decode(), giving
* explicitly header access unit or first access unit of bitstream.
* If no valid header is found, get_streaminfo will return -ENODATA,
- * in this case the next bistream access unit must be decoded till
+ * in this case the next bitstream access unit must be decoded till
* get_streaminfo becomes successful.
*/
int (*get_streaminfo)(struct delta_ctx *ctx,
diff --git a/drivers/media/platform/sti/hva/hva-debugfs.c b/drivers/media/platform/sti/hva/hva-debugfs.c
index 9f7e8ac875d1..7d12a5b5d914 100644
--- a/drivers/media/platform/sti/hva/hva-debugfs.c
+++ b/drivers/media/platform/sti/hva/hva-debugfs.c
@@ -271,7 +271,7 @@ static void hva_dbg_perf_compute(struct hva_ctx *ctx)
* device debug info
*/
-static int hva_dbg_device(struct seq_file *s, void *data)
+static int device_show(struct seq_file *s, void *data)
{
struct hva_dev *hva = s->private;
@@ -281,7 +281,7 @@ static int hva_dbg_device(struct seq_file *s, void *data)
return 0;
}
-static int hva_dbg_encoders(struct seq_file *s, void *data)
+static int encoders_show(struct seq_file *s, void *data)
{
struct hva_dev *hva = s->private;
unsigned int i = 0;
@@ -299,7 +299,7 @@ static int hva_dbg_encoders(struct seq_file *s, void *data)
return 0;
}
-static int hva_dbg_last(struct seq_file *s, void *data)
+static int last_show(struct seq_file *s, void *data)
{
struct hva_dev *hva = s->private;
struct hva_ctx *last_ctx = &hva->dbg.last_ctx;
@@ -316,7 +316,7 @@ static int hva_dbg_last(struct seq_file *s, void *data)
return 0;
}
-static int hva_dbg_regs(struct seq_file *s, void *data)
+static int regs_show(struct seq_file *s, void *data)
{
struct hva_dev *hva = s->private;
@@ -325,26 +325,14 @@ static int hva_dbg_regs(struct seq_file *s, void *data)
return 0;
}
-#define hva_dbg_declare(name) \
- static int hva_dbg_##name##_open(struct inode *i, struct file *f) \
- { \
- return single_open(f, hva_dbg_##name, i->i_private); \
- } \
- static const struct file_operations hva_dbg_##name##_fops = { \
- .open = hva_dbg_##name##_open, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .release = single_release, \
- }
-
#define hva_dbg_create_entry(name) \
debugfs_create_file(#name, 0444, hva->dbg.debugfs_entry, hva, \
- &hva_dbg_##name##_fops)
+ &name##_fops)
-hva_dbg_declare(device);
-hva_dbg_declare(encoders);
-hva_dbg_declare(last);
-hva_dbg_declare(regs);
+DEFINE_SHOW_ATTRIBUTE(device);
+DEFINE_SHOW_ATTRIBUTE(encoders);
+DEFINE_SHOW_ATTRIBUTE(last);
+DEFINE_SHOW_ATTRIBUTE(regs);
void hva_debugfs_create(struct hva_dev *hva)
{
@@ -380,7 +368,7 @@ void hva_debugfs_remove(struct hva_dev *hva)
* context (instance) debug info
*/
-static int hva_dbg_ctx(struct seq_file *s, void *data)
+static int ctx_show(struct seq_file *s, void *data)
{
struct hva_ctx *ctx = s->private;
@@ -392,7 +380,7 @@ static int hva_dbg_ctx(struct seq_file *s, void *data)
return 0;
}
-hva_dbg_declare(ctx);
+DEFINE_SHOW_ATTRIBUTE(ctx);
void hva_dbg_ctx_create(struct hva_ctx *ctx)
{
@@ -407,7 +395,7 @@ void hva_dbg_ctx_create(struct hva_ctx *ctx)
ctx->dbg.debugfs_entry = debugfs_create_file(name, 0444,
hva->dbg.debugfs_entry,
- ctx, &hva_dbg_ctx_fops);
+ ctx, &ctx_fops);
}
void hva_dbg_ctx_remove(struct hva_ctx *ctx)
diff --git a/drivers/media/platform/sti/hva/hva-h264.c b/drivers/media/platform/sti/hva/hva-h264.c
index b61a5d337d2a..c34f7cf5aed2 100644
--- a/drivers/media/platform/sti/hva/hva-h264.c
+++ b/drivers/media/platform/sti/hva/hva-h264.c
@@ -626,7 +626,7 @@ static int hva_h264_prepare_task(struct hva_ctx *pctx,
td->frame_width = frame_width;
td->frame_height = frame_height;
- /* set frame alignement */
+ /* set frame alignment */
td->window_width = frame_width;
td->window_height = frame_height;
td->window_horizontal_offset = 0;
diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
index 6732874114cf..5fe5b38fa901 100644
--- a/drivers/media/platform/stm32/stm32-dcmi.c
+++ b/drivers/media/platform/stm32/stm32-dcmi.c
@@ -1544,7 +1544,7 @@ static void dcmi_graph_notify_unbind(struct v4l2_async_notifier *notifier,
dev_dbg(dcmi->dev, "Removing %s\n", video_device_node_name(dcmi->vdev));
- /* Checks internaly if vdev has been init or not */
+ /* Checks internally if vdev has been init or not */
video_unregister_device(dcmi->vdev);
}
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
index 6950585edb5a..4c79eb64a7a7 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
@@ -15,6 +15,7 @@
#include <linux/ioctl.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -143,6 +144,15 @@ bool sun6i_csi_is_format_supported(struct sun6i_csi *csi,
break;
}
break;
+
+ case V4L2_PIX_FMT_RGB565:
+ return (mbus_code == MEDIA_BUS_FMT_RGB565_2X8_LE);
+ case V4L2_PIX_FMT_RGB565X:
+ return (mbus_code == MEDIA_BUS_FMT_RGB565_2X8_BE);
+
+ case V4L2_PIX_FMT_JPEG:
+ return (mbus_code == MEDIA_BUS_FMT_JPEG_1X8);
+
default:
dev_dbg(sdev->dev, "Unsupported pixformat: 0x%x\n", pixformat);
break;
@@ -154,6 +164,7 @@ bool sun6i_csi_is_format_supported(struct sun6i_csi *csi,
int sun6i_csi_set_power(struct sun6i_csi *csi, bool enable)
{
struct sun6i_csi_dev *sdev = sun6i_csi_to_dev(csi);
+ struct device *dev = sdev->dev;
struct regmap *regmap = sdev->regmap;
int ret;
@@ -161,6 +172,9 @@ int sun6i_csi_set_power(struct sun6i_csi *csi, bool enable)
regmap_update_bits(regmap, CSI_EN_REG, CSI_EN_CSI_EN, 0);
clk_disable_unprepare(sdev->clk_ram);
+ if (of_device_is_compatible(dev->of_node,
+ "allwinner,sun50i-a64-csi"))
+ clk_rate_exclusive_put(sdev->clk_mod);
clk_disable_unprepare(sdev->clk_mod);
reset_control_assert(sdev->rstc_bus);
return 0;
@@ -172,6 +186,9 @@ int sun6i_csi_set_power(struct sun6i_csi *csi, bool enable)
return ret;
}
+ if (of_device_is_compatible(dev->of_node, "allwinner,sun50i-a64-csi"))
+ clk_set_rate_exclusive(sdev->clk_mod, 300000000);
+
ret = clk_prepare_enable(sdev->clk_ram);
if (ret) {
dev_err(sdev->dev, "Enable clk_dram_csi clk err %d\n", ret);
@@ -191,6 +208,8 @@ int sun6i_csi_set_power(struct sun6i_csi *csi, bool enable)
clk_ram_disable:
clk_disable_unprepare(sdev->clk_ram);
clk_mod_disable:
+ if (of_device_is_compatible(dev->of_node, "allwinner,sun50i-a64-csi"))
+ clk_rate_exclusive_put(sdev->clk_mod);
clk_disable_unprepare(sdev->clk_mod);
return ret;
}
@@ -198,8 +217,8 @@ clk_mod_disable:
static enum csi_input_fmt get_csi_input_format(struct sun6i_csi_dev *sdev,
u32 mbus_code, u32 pixformat)
{
- /* bayer */
- if ((mbus_code & 0xF000) == 0x3000)
+ /* non-YUV */
+ if ((mbus_code & 0xF000) != 0x2000)
return CSI_INPUT_FORMAT_RAW;
switch (pixformat) {
@@ -268,6 +287,14 @@ static enum csi_output_fmt get_csi_output_format(struct sun6i_csi_dev *sdev,
case V4L2_PIX_FMT_YUV422P:
return buf_interlaced ? CSI_FRAME_PLANAR_YUV422 :
CSI_FIELD_PLANAR_YUV422;
+
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_RGB565X:
+ return buf_interlaced ? CSI_FRAME_RGB565 : CSI_FIELD_RGB565;
+
+ case V4L2_PIX_FMT_JPEG:
+ return buf_interlaced ? CSI_FRAME_RAW_8 : CSI_FIELD_RAW_8;
+
default:
dev_warn(sdev->dev, "Unsupported pixformat: 0x%x\n", pixformat);
break;
@@ -279,6 +306,10 @@ static enum csi_output_fmt get_csi_output_format(struct sun6i_csi_dev *sdev,
static enum csi_input_seq get_csi_input_seq(struct sun6i_csi_dev *sdev,
u32 mbus_code, u32 pixformat)
{
+ /* Input sequence does not apply to non-YUV formats */
+ if ((mbus_code & 0xF000) != 0x2000)
+ return 0;
+
switch (pixformat) {
case V4L2_PIX_FMT_HM12:
case V4L2_PIX_FMT_NV12:
@@ -793,7 +824,7 @@ static const struct regmap_config sun6i_csi_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .max_register = 0x1000,
+ .max_register = 0x9c,
};
static int sun6i_csi_resource_request(struct sun6i_csi_dev *sdev,
@@ -893,7 +924,9 @@ static int sun6i_csi_remove(struct platform_device *pdev)
static const struct of_device_id sun6i_csi_of_match[] = {
{ .compatible = "allwinner,sun6i-a31-csi", },
+ { .compatible = "allwinner,sun8i-h3-csi", },
{ .compatible = "allwinner,sun8i-v3s-csi", },
+ { .compatible = "allwinner,sun50i-a64-csi", },
{},
};
MODULE_DEVICE_TABLE(of, sun6i_csi_of_match);
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h
index 0bb000712c33..c626821aaedb 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h
@@ -65,7 +65,7 @@ bool sun6i_csi_is_format_supported(struct sun6i_csi *csi, u32 pixformat,
int sun6i_csi_set_power(struct sun6i_csi *csi, bool enable);
/**
- * sun6i_csi_update_config() - update the csi register setttings
+ * sun6i_csi_update_config() - update the csi register settings
* @csi: pointer to the csi
* @config: see struct sun6i_csi_config
*/
@@ -94,6 +94,7 @@ static inline int sun6i_csi_get_bpp(unsigned int pixformat)
case V4L2_PIX_FMT_SGBRG8:
case V4L2_PIX_FMT_SGRBG8:
case V4L2_PIX_FMT_SRGGB8:
+ case V4L2_PIX_FMT_JPEG:
return 8;
case V4L2_PIX_FMT_SBGGR10:
case V4L2_PIX_FMT_SGBRG10:
@@ -117,6 +118,8 @@ static inline int sun6i_csi_get_bpp(unsigned int pixformat)
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
case V4L2_PIX_FMT_YUV422P:
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_RGB565X:
return 16;
case V4L2_PIX_FMT_RGB24:
case V4L2_PIX_FMT_BGR24:
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
index b04300c3811f..1fd16861f111 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
@@ -56,6 +56,9 @@ static const u32 supported_pixformats[] = {
V4L2_PIX_FMT_NV16,
V4L2_PIX_FMT_NV61,
V4L2_PIX_FMT_YUV422P,
+ V4L2_PIX_FMT_RGB565,
+ V4L2_PIX_FMT_RGB565X,
+ V4L2_PIX_FMT_JPEG,
};
static bool is_pixformat_valid(unsigned int pixformat)
diff --git a/drivers/media/platform/ti-vpe/vpdma.c b/drivers/media/platform/ti-vpe/vpdma.c
index e2cf2b90e500..78d716c93649 100644
--- a/drivers/media/platform/ti-vpe/vpdma.c
+++ b/drivers/media/platform/ti-vpe/vpdma.c
@@ -404,7 +404,7 @@ EXPORT_SYMBOL(vpdma_map_desc_buf);
/*
* unmap descriptor/payload DMA buffer, disabling DMA access and
- * allowing the main processor to acces the data
+ * allowing the main processor to access the data
*/
void vpdma_unmap_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf)
{
@@ -501,7 +501,7 @@ void vpdma_reset_desc_list(struct vpdma_desc_list *list)
EXPORT_SYMBOL(vpdma_reset_desc_list);
/*
- * free the buffer allocated fot the VPDMA descriptor list, this should be
+ * free the buffer allocated for the VPDMA descriptor list, this should be
* called when the user doesn't want to use VPDMA any more.
*/
void vpdma_free_desc_list(struct vpdma_desc_list *list)
@@ -790,7 +790,7 @@ static void dump_dtd(struct vpdma_dtd *dtd)
* append an outbound data transfer descriptor to the given descriptor list,
* this sets up a 'client to memory' VPDMA transfer for the given VPDMA channel
*
- * @list: vpdma desc list to which we add this decriptor
+ * @list: vpdma desc list to which we add this descriptor
* @width: width of the image in pixels in memory
* @c_rect: compose params of output image
* @fmt: vpdma data format of the buffer
@@ -798,7 +798,7 @@ static void dump_dtd(struct vpdma_dtd *dtd)
* max_width: enum for maximum width of data transfer
* max_height: enum for maximum height of data transfer
* chan: VPDMA channel
- * flags: VPDMA flags to configure some descriptor fileds
+ * flags: VPDMA flags to configure some descriptor fields
*/
void vpdma_add_out_dtd(struct vpdma_desc_list *list, int width,
int stride, const struct v4l2_rect *c_rect,
@@ -863,14 +863,14 @@ EXPORT_SYMBOL(vpdma_rawchan_add_out_dtd);
* append an inbound data transfer descriptor to the given descriptor list,
* this sets up a 'memory to client' VPDMA transfer for the given VPDMA channel
*
- * @list: vpdma desc list to which we add this decriptor
+ * @list: vpdma desc list to which we add this descriptor
* @width: width of the image in pixels in memory(not the cropped width)
* @c_rect: crop params of input image
* @fmt: vpdma data format of the buffer
* dma_addr: dma address as seen by VPDMA
* chan: VPDMA channel
* field: top or bottom field info of the input image
- * flags: VPDMA flags to configure some descriptor fileds
+ * flags: VPDMA flags to configure some descriptor fields
* frame_width/height: the complete width/height of the image presented to the
* client (this makes sense when multiple channels are
* connected to the same client, forming a larger frame)
@@ -1008,7 +1008,7 @@ unsigned int vpdma_get_list_mask(struct vpdma_data *vpdma, int irq_num)
}
EXPORT_SYMBOL(vpdma_get_list_mask);
-/* clear previosuly occured list intterupts in the LIST_STAT register */
+/* clear previously occurred list interrupts in the LIST_STAT register */
void vpdma_clear_list_stat(struct vpdma_data *vpdma, int irq_num,
int list_num)
{
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index d70871d0ad2d..207e7e76c048 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -876,7 +876,7 @@ static int set_srcdst_params(struct vpe_ctx *ctx)
/*
* we make sure that the source image has a 16 byte aligned
* stride, we need to do the same for the motion vector buffer
- * by aligning it's stride to the next 16 byte boundry. this
+ * by aligning it's stride to the next 16 byte boundary. this
* extra space will not be used by the de-interlacer, but will
* ensure that vpdma operates correctly
*/
diff --git a/drivers/media/platform/vicodec/codec-fwht.c b/drivers/media/platform/vicodec/codec-fwht.c
index 5630f1dc45e6..d1d6085da9f1 100644
--- a/drivers/media/platform/vicodec/codec-fwht.c
+++ b/drivers/media/platform/vicodec/codec-fwht.c
@@ -10,8 +10,11 @@
*/
#include <linux/string.h>
+#include <linux/kernel.h>
#include "codec-fwht.h"
+#define OVERFLOW_BIT BIT(14)
+
/*
* Note: bit 0 of the header must always be 0. Otherwise it cannot
* be guaranteed that the magic 8 byte sequence (see below) can
@@ -103,16 +106,21 @@ static int rlc(const s16 *in, __be16 *output, int blocktype)
* This function will worst-case increase rlc_in by 65*2 bytes:
* one s16 value for the header and 8 * 8 coefficients of type s16.
*/
-static s16 derlc(const __be16 **rlc_in, s16 *dwht_out)
+static u16 derlc(const __be16 **rlc_in, s16 *dwht_out,
+ const __be16 *end_of_input)
{
/* header */
const __be16 *input = *rlc_in;
- s16 ret = ntohs(*input++);
+ u16 stat;
int dec_count = 0;
s16 block[8 * 8 + 16];
s16 *wp = block;
int i;
+ if (input > end_of_input)
+ return OVERFLOW_BIT;
+ stat = ntohs(*input++);
+
/*
* Now de-compress, it expands one byte to up to 15 bytes
* (or fills the remainder of the 64 bytes with zeroes if it
@@ -122,9 +130,15 @@ static s16 derlc(const __be16 **rlc_in, s16 *dwht_out)
* allow for overflow if the incoming data was malformed.
*/
while (dec_count < 8 * 8) {
- s16 in = ntohs(*input++);
- int length = in & 0xf;
- int coeff = in >> 4;
+ s16 in;
+ int length;
+ int coeff;
+
+ if (input > end_of_input)
+ return OVERFLOW_BIT;
+ in = ntohs(*input++);
+ length = in & 0xf;
+ coeff = in >> 4;
/* fill remainder with zeros */
if (length == 15) {
@@ -149,7 +163,7 @@ static s16 derlc(const __be16 **rlc_in, s16 *dwht_out)
dwht_out[x + y * 8] = *wp++;
}
*rlc_in = input;
- return ret;
+ return stat;
}
static const int quant_table[] = {
@@ -237,8 +251,6 @@ static void fwht(const u8 *block, s16 *output_block, unsigned int stride,
unsigned int i;
/* stage 1 */
- stride *= input_step;
-
for (i = 0; i < 8; i++, tmp += stride, out += 8) {
switch (input_step) {
case 1:
@@ -562,7 +574,7 @@ static void fill_encoder_block(const u8 *input, s16 *dst,
for (i = 0; i < 8; i++) {
for (j = 0; j < 8; j++, input += input_step)
*dst++ = *input;
- input += (stride - 8) * input_step;
+ input += stride - 8 * input_step;
}
}
@@ -660,7 +672,7 @@ static void add_deltas(s16 *deltas, const u8 *ref, int stride)
static u32 encode_plane(u8 *input, u8 *refp, __be16 **rlco, __be16 *rlco_max,
struct fwht_cframe *cf, u32 height, u32 width,
- unsigned int input_step,
+ u32 stride, unsigned int input_step,
bool is_intra, bool next_is_intra)
{
u8 *input_start = input;
@@ -671,7 +683,11 @@ static u32 encode_plane(u8 *input, u8 *refp, __be16 **rlco, __be16 *rlco_max,
unsigned int last_size = 0;
unsigned int i, j;
+ width = round_up(width, 8);
+ height = round_up(height, 8);
+
for (j = 0; j < height / 8; j++) {
+ input = input_start + j * 8 * stride;
for (i = 0; i < width / 8; i++) {
/* intra code, first frame is always intra coded. */
int blocktype = IBLOCK;
@@ -679,9 +695,9 @@ static u32 encode_plane(u8 *input, u8 *refp, __be16 **rlco, __be16 *rlco_max,
if (!is_intra)
blocktype = decide_blocktype(input, refp,
- deltablock, width, input_step);
+ deltablock, stride, input_step);
if (blocktype == IBLOCK) {
- fwht(input, cf->coeffs, width, input_step, 1);
+ fwht(input, cf->coeffs, stride, input_step, 1);
quantize_intra(cf->coeffs, cf->de_coeffs,
cf->i_frame_qp);
} else {
@@ -722,12 +738,12 @@ static u32 encode_plane(u8 *input, u8 *refp, __be16 **rlco, __be16 *rlco_max,
}
last_size = size;
}
- input += width * 7 * input_step;
}
exit_loop:
if (encoding & FWHT_FRAME_UNENCODED) {
u8 *out = (u8 *)rlco_start;
+ u8 *p;
input = input_start;
/*
@@ -736,8 +752,11 @@ exit_loop:
* by 0xfe. Since YUV is limited range such values
* shouldn't appear anyway.
*/
- for (i = 0; i < height * width; i++, input += input_step)
- *out++ = (*input == 0xff) ? 0xfe : *input;
+ for (j = 0; j < height; j++) {
+ for (i = 0, p = input; i < width; i++, p += input_step)
+ *out++ = (*p == 0xff) ? 0xfe : *p;
+ input += stride;
+ }
*rlco = (__be16 *)out;
encoding &= ~FWHT_FRAME_PCODED;
}
@@ -747,30 +766,32 @@ exit_loop:
u32 fwht_encode_frame(struct fwht_raw_frame *frm,
struct fwht_raw_frame *ref_frm,
struct fwht_cframe *cf,
- bool is_intra, bool next_is_intra)
+ bool is_intra, bool next_is_intra,
+ unsigned int width, unsigned int height,
+ unsigned int stride, unsigned int chroma_stride)
{
- unsigned int size = frm->height * frm->width;
+ unsigned int size = height * width;
__be16 *rlco = cf->rlc_data;
__be16 *rlco_max;
u32 encoding;
rlco_max = rlco + size / 2 - 256;
encoding = encode_plane(frm->luma, ref_frm->luma, &rlco, rlco_max, cf,
- frm->height, frm->width,
+ height, width, stride,
frm->luma_alpha_step, is_intra, next_is_intra);
if (encoding & FWHT_FRAME_UNENCODED)
encoding |= FWHT_LUMA_UNENCODED;
encoding &= ~FWHT_FRAME_UNENCODED;
if (frm->components_num >= 3) {
- u32 chroma_h = frm->height / frm->height_div;
- u32 chroma_w = frm->width / frm->width_div;
+ u32 chroma_h = height / frm->height_div;
+ u32 chroma_w = width / frm->width_div;
unsigned int chroma_size = chroma_h * chroma_w;
rlco_max = rlco + chroma_size / 2 - 256;
encoding |= encode_plane(frm->cb, ref_frm->cb, &rlco, rlco_max,
cf, chroma_h, chroma_w,
- frm->chroma_step,
+ chroma_stride, frm->chroma_step,
is_intra, next_is_intra);
if (encoding & FWHT_FRAME_UNENCODED)
encoding |= FWHT_CB_UNENCODED;
@@ -778,7 +799,7 @@ u32 fwht_encode_frame(struct fwht_raw_frame *frm,
rlco_max = rlco + chroma_size / 2 - 256;
encoding |= encode_plane(frm->cr, ref_frm->cr, &rlco, rlco_max,
cf, chroma_h, chroma_w,
- frm->chroma_step,
+ chroma_stride, frm->chroma_step,
is_intra, next_is_intra);
if (encoding & FWHT_FRAME_UNENCODED)
encoding |= FWHT_CR_UNENCODED;
@@ -787,10 +808,10 @@ u32 fwht_encode_frame(struct fwht_raw_frame *frm,
if (frm->components_num == 4) {
rlco_max = rlco + size / 2 - 256;
- encoding = encode_plane(frm->alpha, ref_frm->alpha, &rlco,
- rlco_max, cf, frm->height, frm->width,
- frm->luma_alpha_step,
- is_intra, next_is_intra);
+ encoding |= encode_plane(frm->alpha, ref_frm->alpha, &rlco,
+ rlco_max, cf, height, width,
+ stride, frm->luma_alpha_step,
+ is_intra, next_is_intra);
if (encoding & FWHT_FRAME_UNENCODED)
encoding |= FWHT_ALPHA_UNENCODED;
encoding &= ~FWHT_FRAME_UNENCODED;
@@ -800,18 +821,24 @@ u32 fwht_encode_frame(struct fwht_raw_frame *frm,
return encoding;
}
-static void decode_plane(struct fwht_cframe *cf, const __be16 **rlco, u8 *ref,
- u32 height, u32 width, bool uncompressed)
+static bool decode_plane(struct fwht_cframe *cf, const __be16 **rlco, u8 *ref,
+ u32 height, u32 width, u32 coded_width,
+ bool uncompressed, const __be16 *end_of_rlco_buf)
{
unsigned int copies = 0;
s16 copy[8 * 8];
- s16 stat;
+ u16 stat;
unsigned int i, j;
+ width = round_up(width, 8);
+ height = round_up(height, 8);
+
if (uncompressed) {
+ if (end_of_rlco_buf + 1 < *rlco + width * height / 2)
+ return false;
memcpy(ref, *rlco, width * height);
*rlco += width * height / 2;
- return;
+ return true;
}
/*
@@ -822,19 +849,22 @@ static void decode_plane(struct fwht_cframe *cf, const __be16 **rlco, u8 *ref,
*/
for (j = 0; j < height / 8; j++) {
for (i = 0; i < width / 8; i++) {
- u8 *refp = ref + j * 8 * width + i * 8;
+ u8 *refp = ref + j * 8 * coded_width + i * 8;
if (copies) {
memcpy(cf->de_fwht, copy, sizeof(copy));
if (stat & PFRAME_BIT)
- add_deltas(cf->de_fwht, refp, width);
- fill_decoder_block(refp, cf->de_fwht, width);
+ add_deltas(cf->de_fwht, refp,
+ coded_width);
+ fill_decoder_block(refp, cf->de_fwht,
+ coded_width);
copies--;
continue;
}
- stat = derlc(rlco, cf->coeffs);
-
+ stat = derlc(rlco, cf->coeffs, end_of_rlco_buf);
+ if (stat & OVERFLOW_BIT)
+ return false;
if (stat & PFRAME_BIT)
dequantize_inter(cf->coeffs);
else
@@ -847,35 +877,53 @@ static void decode_plane(struct fwht_cframe *cf, const __be16 **rlco, u8 *ref,
if (copies)
memcpy(copy, cf->de_fwht, sizeof(copy));
if (stat & PFRAME_BIT)
- add_deltas(cf->de_fwht, refp, width);
- fill_decoder_block(refp, cf->de_fwht, width);
+ add_deltas(cf->de_fwht, refp, coded_width);
+ fill_decoder_block(refp, cf->de_fwht, coded_width);
}
}
+ return true;
}
-void fwht_decode_frame(struct fwht_cframe *cf, struct fwht_raw_frame *ref,
- u32 hdr_flags, unsigned int components_num)
+bool fwht_decode_frame(struct fwht_cframe *cf, struct fwht_raw_frame *ref,
+ u32 hdr_flags, unsigned int components_num,
+ unsigned int width, unsigned int height,
+ unsigned int coded_width)
{
const __be16 *rlco = cf->rlc_data;
+ const __be16 *end_of_rlco_buf = cf->rlc_data +
+ (cf->size / sizeof(*rlco)) - 1;
- decode_plane(cf, &rlco, ref->luma, cf->height, cf->width,
- hdr_flags & FWHT_FL_LUMA_IS_UNCOMPRESSED);
+ if (!decode_plane(cf, &rlco, ref->luma, height, width, coded_width,
+ hdr_flags & FWHT_FL_LUMA_IS_UNCOMPRESSED,
+ end_of_rlco_buf))
+ return false;
if (components_num >= 3) {
- u32 h = cf->height;
- u32 w = cf->width;
+ u32 h = height;
+ u32 w = width;
+ u32 c = coded_width;
if (!(hdr_flags & FWHT_FL_CHROMA_FULL_HEIGHT))
h /= 2;
- if (!(hdr_flags & FWHT_FL_CHROMA_FULL_WIDTH))
+ if (!(hdr_flags & FWHT_FL_CHROMA_FULL_WIDTH)) {
w /= 2;
- decode_plane(cf, &rlco, ref->cb, h, w,
- hdr_flags & FWHT_FL_CB_IS_UNCOMPRESSED);
- decode_plane(cf, &rlco, ref->cr, h, w,
- hdr_flags & FWHT_FL_CR_IS_UNCOMPRESSED);
+ c /= 2;
+ }
+ if (!decode_plane(cf, &rlco, ref->cb, h, w, c,
+ hdr_flags & FWHT_FL_CB_IS_UNCOMPRESSED,
+ end_of_rlco_buf))
+ return false;
+ if (!decode_plane(cf, &rlco, ref->cr, h, w, c,
+ hdr_flags & FWHT_FL_CR_IS_UNCOMPRESSED,
+ end_of_rlco_buf))
+ return false;
}
if (components_num == 4)
- decode_plane(cf, &rlco, ref->alpha, cf->height, cf->width,
- hdr_flags & FWHT_FL_ALPHA_IS_UNCOMPRESSED);
+ if (!decode_plane(cf, &rlco, ref->alpha, height, width,
+ coded_width,
+ hdr_flags & FWHT_FL_ALPHA_IS_UNCOMPRESSED,
+ end_of_rlco_buf))
+ return false;
+ return true;
}
diff --git a/drivers/media/platform/vicodec/codec-fwht.h b/drivers/media/platform/vicodec/codec-fwht.h
index 90ff8962fca7..c410512d47c5 100644
--- a/drivers/media/platform/vicodec/codec-fwht.h
+++ b/drivers/media/platform/vicodec/codec-fwht.h
@@ -56,7 +56,7 @@
#define FWHT_MAGIC1 0x4f4f4f4f
#define FWHT_MAGIC2 0xffffffff
-#define FWHT_VERSION 2
+#define FWHT_VERSION 3
/* Set if this is an interlaced format */
#define FWHT_FL_IS_INTERLACED BIT(0)
@@ -76,11 +76,25 @@
#define FWHT_FL_CHROMA_FULL_HEIGHT BIT(7)
#define FWHT_FL_CHROMA_FULL_WIDTH BIT(8)
#define FWHT_FL_ALPHA_IS_UNCOMPRESSED BIT(9)
+#define FWHT_FL_I_FRAME BIT(10)
/* A 4-values flag - the number of components - 1 */
-#define FWHT_FL_COMPONENTS_NUM_MSK GENMASK(17, 16)
+#define FWHT_FL_COMPONENTS_NUM_MSK GENMASK(18, 16)
#define FWHT_FL_COMPONENTS_NUM_OFFSET 16
+#define FWHT_FL_PIXENC_MSK GENMASK(20, 19)
+#define FWHT_FL_PIXENC_OFFSET 19
+#define FWHT_FL_PIXENC_YUV (1 << FWHT_FL_PIXENC_OFFSET)
+#define FWHT_FL_PIXENC_RGB (2 << FWHT_FL_PIXENC_OFFSET)
+#define FWHT_FL_PIXENC_HSV (3 << FWHT_FL_PIXENC_OFFSET)
+
+/*
+ * A macro to calculate the needed padding in order to make sure
+ * both luma and chroma components resolutions are rounded up to
+ * a multiple of 8
+ */
+#define vic_round_dim(dim, div) (round_up((dim) / (div), 8) * (div))
+
struct fwht_cframe_hdr {
u32 magic1;
u32 magic2;
@@ -95,7 +109,6 @@ struct fwht_cframe_hdr {
};
struct fwht_cframe {
- unsigned int width, height;
u16 i_frame_qp;
u16 p_frame_qp;
__be16 *rlc_data;
@@ -106,7 +119,6 @@ struct fwht_cframe {
};
struct fwht_raw_frame {
- unsigned int width, height;
unsigned int width_div;
unsigned int height_div;
unsigned int luma_alpha_step;
@@ -125,8 +137,12 @@ struct fwht_raw_frame {
u32 fwht_encode_frame(struct fwht_raw_frame *frm,
struct fwht_raw_frame *ref_frm,
struct fwht_cframe *cf,
- bool is_intra, bool next_is_intra);
-void fwht_decode_frame(struct fwht_cframe *cf, struct fwht_raw_frame *ref,
- u32 hdr_flags, unsigned int components_num);
+ bool is_intra, bool next_is_intra,
+ unsigned int width, unsigned int height,
+ unsigned int stride, unsigned int chroma_stride);
+bool fwht_decode_frame(struct fwht_cframe *cf, struct fwht_raw_frame *ref,
+ u32 hdr_flags, unsigned int components_num,
+ unsigned int width, unsigned int height,
+ unsigned int coded_width);
#endif
diff --git a/drivers/media/platform/vicodec/codec-v4l2-fwht.c b/drivers/media/platform/vicodec/codec-v4l2-fwht.c
index 8cb0212df67f..6573a471c5ca 100644
--- a/drivers/media/platform/vicodec/codec-v4l2-fwht.c
+++ b/drivers/media/platform/vicodec/codec-v4l2-fwht.c
@@ -11,32 +11,53 @@
#include "codec-v4l2-fwht.h"
static const struct v4l2_fwht_pixfmt_info v4l2_fwht_pixfmts[] = {
- { V4L2_PIX_FMT_YUV420, 1, 3, 2, 1, 1, 2, 2, 3},
- { V4L2_PIX_FMT_YVU420, 1, 3, 2, 1, 1, 2, 2, 3},
- { V4L2_PIX_FMT_YUV422P, 1, 2, 1, 1, 1, 2, 1, 3},
- { V4L2_PIX_FMT_NV12, 1, 3, 2, 1, 2, 2, 2, 3},
- { V4L2_PIX_FMT_NV21, 1, 3, 2, 1, 2, 2, 2, 3},
- { V4L2_PIX_FMT_NV16, 1, 2, 1, 1, 2, 2, 1, 3},
- { V4L2_PIX_FMT_NV61, 1, 2, 1, 1, 2, 2, 1, 3},
- { V4L2_PIX_FMT_NV24, 1, 3, 1, 1, 2, 1, 1, 3},
- { V4L2_PIX_FMT_NV42, 1, 3, 1, 1, 2, 1, 1, 3},
- { V4L2_PIX_FMT_YUYV, 2, 2, 1, 2, 4, 2, 1, 3},
- { V4L2_PIX_FMT_YVYU, 2, 2, 1, 2, 4, 2, 1, 3},
- { V4L2_PIX_FMT_UYVY, 2, 2, 1, 2, 4, 2, 1, 3},
- { V4L2_PIX_FMT_VYUY, 2, 2, 1, 2, 4, 2, 1, 3},
- { V4L2_PIX_FMT_BGR24, 3, 3, 1, 3, 3, 1, 1, 3},
- { V4L2_PIX_FMT_RGB24, 3, 3, 1, 3, 3, 1, 1, 3},
- { V4L2_PIX_FMT_HSV24, 3, 3, 1, 3, 3, 1, 1, 3},
- { V4L2_PIX_FMT_BGR32, 4, 4, 1, 4, 4, 1, 1, 3},
- { V4L2_PIX_FMT_XBGR32, 4, 4, 1, 4, 4, 1, 1, 3},
- { V4L2_PIX_FMT_RGB32, 4, 4, 1, 4, 4, 1, 1, 3},
- { V4L2_PIX_FMT_XRGB32, 4, 4, 1, 4, 4, 1, 1, 3},
- { V4L2_PIX_FMT_HSV32, 4, 4, 1, 4, 4, 1, 1, 3},
- { V4L2_PIX_FMT_ARGB32, 4, 4, 1, 4, 4, 1, 1, 4},
- { V4L2_PIX_FMT_ABGR32, 4, 4, 1, 4, 4, 1, 1, 4},
- { V4L2_PIX_FMT_GREY, 1, 1, 1, 1, 0, 1, 1, 1},
+ { V4L2_PIX_FMT_YUV420, 1, 3, 2, 1, 1, 2, 2, 3, 3, FWHT_FL_PIXENC_YUV},
+ { V4L2_PIX_FMT_YVU420, 1, 3, 2, 1, 1, 2, 2, 3, 3, FWHT_FL_PIXENC_YUV},
+ { V4L2_PIX_FMT_YUV422P, 1, 2, 1, 1, 1, 2, 1, 3, 3, FWHT_FL_PIXENC_YUV},
+ { V4L2_PIX_FMT_NV12, 1, 3, 2, 1, 2, 2, 2, 3, 2, FWHT_FL_PIXENC_YUV},
+ { V4L2_PIX_FMT_NV21, 1, 3, 2, 1, 2, 2, 2, 3, 2, FWHT_FL_PIXENC_YUV},
+ { V4L2_PIX_FMT_NV16, 1, 2, 1, 1, 2, 2, 1, 3, 2, FWHT_FL_PIXENC_YUV},
+ { V4L2_PIX_FMT_NV61, 1, 2, 1, 1, 2, 2, 1, 3, 2, FWHT_FL_PIXENC_YUV},
+ { V4L2_PIX_FMT_NV24, 1, 3, 1, 1, 2, 1, 1, 3, 2, FWHT_FL_PIXENC_YUV},
+ { V4L2_PIX_FMT_NV42, 1, 3, 1, 1, 2, 1, 1, 3, 2, FWHT_FL_PIXENC_YUV},
+ { V4L2_PIX_FMT_YUYV, 2, 2, 1, 2, 4, 2, 1, 3, 1, FWHT_FL_PIXENC_YUV},
+ { V4L2_PIX_FMT_YVYU, 2, 2, 1, 2, 4, 2, 1, 3, 1, FWHT_FL_PIXENC_YUV},
+ { V4L2_PIX_FMT_UYVY, 2, 2, 1, 2, 4, 2, 1, 3, 1, FWHT_FL_PIXENC_YUV},
+ { V4L2_PIX_FMT_VYUY, 2, 2, 1, 2, 4, 2, 1, 3, 1, FWHT_FL_PIXENC_YUV},
+ { V4L2_PIX_FMT_BGR24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_RGB24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_HSV24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_HSV},
+ { V4L2_PIX_FMT_BGR32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_XBGR32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_RGB32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_XRGB32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_HSV32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_HSV},
+ { V4L2_PIX_FMT_ARGB32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_ABGR32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_GREY, 1, 1, 1, 1, 0, 1, 1, 1, 1, FWHT_FL_PIXENC_RGB},
};
+const struct v4l2_fwht_pixfmt_info *v4l2_fwht_default_fmt(u32 width_div,
+ u32 height_div,
+ u32 components_num,
+ u32 pixenc,
+ unsigned int start_idx)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(v4l2_fwht_pixfmts); i++) {
+ if (v4l2_fwht_pixfmts[i].width_div == width_div &&
+ v4l2_fwht_pixfmts[i].height_div == height_div &&
+ (!pixenc || v4l2_fwht_pixfmts[i].pixenc == pixenc) &&
+ v4l2_fwht_pixfmts[i].components_num == components_num) {
+ if (start_idx == 0)
+ return v4l2_fwht_pixfmts + i;
+ start_idx--;
+ }
+ }
+ return NULL;
+}
+
const struct v4l2_fwht_pixfmt_info *v4l2_fwht_find_pixfmt(u32 pixelformat)
{
unsigned int i;
@@ -56,7 +77,8 @@ const struct v4l2_fwht_pixfmt_info *v4l2_fwht_get_pixfmt(u32 idx)
int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
{
- unsigned int size = state->width * state->height;
+ unsigned int size = state->stride * state->coded_height;
+ unsigned int chroma_stride = state->stride;
const struct v4l2_fwht_pixfmt_info *info = state->info;
struct fwht_cframe_hdr *p_hdr;
struct fwht_cframe cf;
@@ -66,8 +88,7 @@ int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
if (!info)
return -EINVAL;
- rf.width = state->width;
- rf.height = state->height;
+
rf.luma = p_in;
rf.width_div = info->width_div;
rf.height_div = info->height_div;
@@ -84,14 +105,17 @@ int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
case V4L2_PIX_FMT_YUV420:
rf.cb = rf.luma + size;
rf.cr = rf.cb + size / 4;
+ chroma_stride /= 2;
break;
case V4L2_PIX_FMT_YVU420:
rf.cr = rf.luma + size;
rf.cb = rf.cr + size / 4;
+ chroma_stride /= 2;
break;
case V4L2_PIX_FMT_YUV422P:
rf.cb = rf.luma + size;
rf.cr = rf.cb + size / 2;
+ chroma_stride /= 2;
break;
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV16:
@@ -163,15 +187,16 @@ int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
return -EINVAL;
}
- cf.width = state->width;
- cf.height = state->height;
cf.i_frame_qp = state->i_frame_qp;
cf.p_frame_qp = state->p_frame_qp;
cf.rlc_data = (__be16 *)(p_out + sizeof(*p_hdr));
encoding = fwht_encode_frame(&rf, &state->ref_frame, &cf,
!state->gop_cnt,
- state->gop_cnt == state->gop_size - 1);
+ state->gop_cnt == state->gop_size - 1,
+ state->visible_width,
+ state->visible_height,
+ state->stride, chroma_stride);
if (!(encoding & FWHT_FRAME_PCODED))
state->gop_cnt = 0;
if (++state->gop_cnt >= state->gop_size)
@@ -181,9 +206,10 @@ int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
p_hdr->magic1 = FWHT_MAGIC1;
p_hdr->magic2 = FWHT_MAGIC2;
p_hdr->version = htonl(FWHT_VERSION);
- p_hdr->width = htonl(cf.width);
- p_hdr->height = htonl(cf.height);
+ p_hdr->width = htonl(state->visible_width);
+ p_hdr->height = htonl(state->visible_height);
flags |= (info->components_num - 1) << FWHT_FL_COMPONENTS_NUM_OFFSET;
+ flags |= info->pixenc;
if (encoding & FWHT_LUMA_UNENCODED)
flags |= FWHT_FL_LUMA_IS_UNCOMPRESSED;
if (encoding & FWHT_CB_UNENCODED)
@@ -192,6 +218,8 @@ int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
flags |= FWHT_FL_CR_IS_UNCOMPRESSED;
if (encoding & FWHT_ALPHA_UNENCODED)
flags |= FWHT_FL_ALPHA_IS_UNCOMPRESSED;
+ if (!(encoding & FWHT_FRAME_PCODED))
+ flags |= FWHT_FL_I_FRAME;
if (rf.height_div == 1)
flags |= FWHT_FL_CHROMA_FULL_HEIGHT;
if (rf.width_div == 1)
@@ -202,65 +230,70 @@ int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
p_hdr->ycbcr_enc = htonl(state->ycbcr_enc);
p_hdr->quantization = htonl(state->quantization);
p_hdr->size = htonl(cf.size);
- state->ref_frame.width = cf.width;
- state->ref_frame.height = cf.height;
return cf.size + sizeof(*p_hdr);
}
int v4l2_fwht_decode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
{
- unsigned int size = state->width * state->height;
- unsigned int chroma_size = size;
- unsigned int i;
+ unsigned int i, j, k;
u32 flags;
- struct fwht_cframe_hdr *p_hdr;
struct fwht_cframe cf;
- u8 *p;
+ u8 *p, *ref_p;
unsigned int components_num = 3;
unsigned int version;
+ const struct v4l2_fwht_pixfmt_info *info;
+ unsigned int hdr_width_div, hdr_height_div;
if (!state->info)
return -EINVAL;
- p_hdr = (struct fwht_cframe_hdr *)p_in;
- cf.width = ntohl(p_hdr->width);
- cf.height = ntohl(p_hdr->height);
+ info = state->info;
- version = ntohl(p_hdr->version);
+ version = ntohl(state->header.version);
if (!version || version > FWHT_VERSION) {
pr_err("version %d is not supported, current version is %d\n",
version, FWHT_VERSION);
return -EINVAL;
}
- if (p_hdr->magic1 != FWHT_MAGIC1 ||
- p_hdr->magic2 != FWHT_MAGIC2 ||
- (cf.width & 7) || (cf.height & 7))
+ if (state->header.magic1 != FWHT_MAGIC1 ||
+ state->header.magic2 != FWHT_MAGIC2)
return -EINVAL;
/* TODO: support resolution changes */
- if (cf.width != state->width || cf.height != state->height)
+ if (ntohl(state->header.width) != state->visible_width ||
+ ntohl(state->header.height) != state->visible_height)
return -EINVAL;
- flags = ntohl(p_hdr->flags);
+ flags = ntohl(state->header.flags);
- if (version == FWHT_VERSION) {
+ if (version >= 2) {
+ if ((flags & FWHT_FL_PIXENC_MSK) != info->pixenc)
+ return -EINVAL;
components_num = 1 + ((flags & FWHT_FL_COMPONENTS_NUM_MSK) >>
- FWHT_FL_COMPONENTS_NUM_OFFSET);
+ FWHT_FL_COMPONENTS_NUM_OFFSET);
}
- state->colorspace = ntohl(p_hdr->colorspace);
- state->xfer_func = ntohl(p_hdr->xfer_func);
- state->ycbcr_enc = ntohl(p_hdr->ycbcr_enc);
- state->quantization = ntohl(p_hdr->quantization);
- cf.rlc_data = (__be16 *)(p_in + sizeof(*p_hdr));
+ if (components_num != info->components_num)
+ return -EINVAL;
- if (!(flags & FWHT_FL_CHROMA_FULL_WIDTH))
- chroma_size /= 2;
- if (!(flags & FWHT_FL_CHROMA_FULL_HEIGHT))
- chroma_size /= 2;
+ state->colorspace = ntohl(state->header.colorspace);
+ state->xfer_func = ntohl(state->header.xfer_func);
+ state->ycbcr_enc = ntohl(state->header.ycbcr_enc);
+ state->quantization = ntohl(state->header.quantization);
+ cf.rlc_data = (__be16 *)p_in;
+ cf.size = ntohl(state->header.size);
- fwht_decode_frame(&cf, &state->ref_frame, flags, components_num);
+ hdr_width_div = (flags & FWHT_FL_CHROMA_FULL_WIDTH) ? 1 : 2;
+ hdr_height_div = (flags & FWHT_FL_CHROMA_FULL_HEIGHT) ? 1 : 2;
+ if (hdr_width_div != info->width_div ||
+ hdr_height_div != info->height_div)
+ return -EINVAL;
+
+ if (!fwht_decode_frame(&cf, &state->ref_frame, flags, components_num,
+ state->visible_width, state->visible_height,
+ state->coded_width))
+ return -EINVAL;
/*
* TODO - handle the case where the compressed stream encodes a
@@ -268,123 +301,226 @@ int v4l2_fwht_decode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
*/
switch (state->info->id) {
case V4L2_PIX_FMT_GREY:
- memcpy(p_out, state->ref_frame.luma, size);
+ ref_p = state->ref_frame.luma;
+ for (i = 0; i < state->coded_height; i++) {
+ memcpy(p_out, ref_p, state->visible_width);
+ p_out += state->stride;
+ ref_p += state->coded_width;
+ }
break;
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YUV422P:
- memcpy(p_out, state->ref_frame.luma, size);
- p_out += size;
- memcpy(p_out, state->ref_frame.cb, chroma_size);
- p_out += chroma_size;
- memcpy(p_out, state->ref_frame.cr, chroma_size);
+ ref_p = state->ref_frame.luma;
+ for (i = 0; i < state->coded_height; i++) {
+ memcpy(p_out, ref_p, state->visible_width);
+ p_out += state->stride;
+ ref_p += state->coded_width;
+ }
+
+ ref_p = state->ref_frame.cb;
+ for (i = 0; i < state->coded_height / 2; i++) {
+ memcpy(p_out, ref_p, state->visible_width / 2);
+ p_out += state->stride / 2;
+ ref_p += state->coded_width / 2;
+ }
+ ref_p = state->ref_frame.cr;
+ for (i = 0; i < state->coded_height / 2; i++) {
+ memcpy(p_out, ref_p, state->visible_width / 2);
+ p_out += state->stride / 2;
+ ref_p += state->coded_width / 2;
+ }
break;
case V4L2_PIX_FMT_YVU420:
- memcpy(p_out, state->ref_frame.luma, size);
- p_out += size;
- memcpy(p_out, state->ref_frame.cr, chroma_size);
- p_out += chroma_size;
- memcpy(p_out, state->ref_frame.cb, chroma_size);
+ ref_p = state->ref_frame.luma;
+ for (i = 0; i < state->coded_height; i++) {
+ memcpy(p_out, ref_p, state->visible_width);
+ p_out += state->stride;
+ ref_p += state->coded_width;
+ }
+
+ ref_p = state->ref_frame.cr;
+ for (i = 0; i < state->coded_height / 2; i++) {
+ memcpy(p_out, ref_p, state->visible_width / 2);
+ p_out += state->stride / 2;
+ ref_p += state->coded_width / 2;
+ }
+ ref_p = state->ref_frame.cb;
+ for (i = 0; i < state->coded_height / 2; i++) {
+ memcpy(p_out, ref_p, state->visible_width / 2);
+ p_out += state->stride / 2;
+ ref_p += state->coded_width / 2;
+ }
break;
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV24:
- memcpy(p_out, state->ref_frame.luma, size);
- p_out += size;
- for (i = 0, p = p_out; i < chroma_size; i++) {
- *p++ = state->ref_frame.cb[i];
- *p++ = state->ref_frame.cr[i];
+ ref_p = state->ref_frame.luma;
+ for (i = 0; i < state->coded_height; i++) {
+ memcpy(p_out, ref_p, state->visible_width);
+ p_out += state->stride;
+ ref_p += state->coded_width;
+ }
+
+ k = 0;
+ for (i = 0; i < state->coded_height / 2; i++) {
+ for (j = 0, p = p_out; j < state->coded_width / 2; j++) {
+ *p++ = state->ref_frame.cb[k];
+ *p++ = state->ref_frame.cr[k];
+ k++;
+ }
+ p_out += state->stride;
}
break;
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV61:
case V4L2_PIX_FMT_NV42:
- memcpy(p_out, state->ref_frame.luma, size);
- p_out += size;
- for (i = 0, p = p_out; i < chroma_size; i++) {
- *p++ = state->ref_frame.cr[i];
- *p++ = state->ref_frame.cb[i];
+ ref_p = state->ref_frame.luma;
+ for (i = 0; i < state->coded_height; i++) {
+ memcpy(p_out, ref_p, state->visible_width);
+ p_out += state->stride;
+ ref_p += state->coded_width;
+ }
+
+ k = 0;
+ for (i = 0; i < state->coded_height / 2; i++) {
+ for (j = 0, p = p_out; j < state->coded_width / 2; j++) {
+ *p++ = state->ref_frame.cr[k];
+ *p++ = state->ref_frame.cb[k];
+ k++;
+ }
+ p_out += state->stride;
}
break;
case V4L2_PIX_FMT_YUYV:
- for (i = 0, p = p_out; i < size; i += 2) {
- *p++ = state->ref_frame.luma[i];
- *p++ = state->ref_frame.cb[i / 2];
- *p++ = state->ref_frame.luma[i + 1];
- *p++ = state->ref_frame.cr[i / 2];
+ k = 0;
+ for (i = 0; i < state->coded_height; i++) {
+ for (j = 0, p = p_out; j < state->coded_width / 2; j++) {
+ *p++ = state->ref_frame.luma[k];
+ *p++ = state->ref_frame.cb[k / 2];
+ *p++ = state->ref_frame.luma[k + 1];
+ *p++ = state->ref_frame.cr[k / 2];
+ k += 2;
+ }
+ p_out += state->stride;
}
break;
case V4L2_PIX_FMT_YVYU:
- for (i = 0, p = p_out; i < size; i += 2) {
- *p++ = state->ref_frame.luma[i];
- *p++ = state->ref_frame.cr[i / 2];
- *p++ = state->ref_frame.luma[i + 1];
- *p++ = state->ref_frame.cb[i / 2];
+ k = 0;
+ for (i = 0; i < state->coded_height; i++) {
+ for (j = 0, p = p_out; j < state->coded_width / 2; j++) {
+ *p++ = state->ref_frame.luma[k];
+ *p++ = state->ref_frame.cr[k / 2];
+ *p++ = state->ref_frame.luma[k + 1];
+ *p++ = state->ref_frame.cb[k / 2];
+ k += 2;
+ }
+ p_out += state->stride;
}
break;
case V4L2_PIX_FMT_UYVY:
- for (i = 0, p = p_out; i < size; i += 2) {
- *p++ = state->ref_frame.cb[i / 2];
- *p++ = state->ref_frame.luma[i];
- *p++ = state->ref_frame.cr[i / 2];
- *p++ = state->ref_frame.luma[i + 1];
+ k = 0;
+ for (i = 0; i < state->coded_height; i++) {
+ for (j = 0, p = p_out; j < state->coded_width / 2; j++) {
+ *p++ = state->ref_frame.cb[k / 2];
+ *p++ = state->ref_frame.luma[k];
+ *p++ = state->ref_frame.cr[k / 2];
+ *p++ = state->ref_frame.luma[k + 1];
+ k += 2;
+ }
+ p_out += state->stride;
}
break;
case V4L2_PIX_FMT_VYUY:
- for (i = 0, p = p_out; i < size; i += 2) {
- *p++ = state->ref_frame.cr[i / 2];
- *p++ = state->ref_frame.luma[i];
- *p++ = state->ref_frame.cb[i / 2];
- *p++ = state->ref_frame.luma[i + 1];
+ k = 0;
+ for (i = 0; i < state->coded_height; i++) {
+ for (j = 0, p = p_out; j < state->coded_width / 2; j++) {
+ *p++ = state->ref_frame.cr[k / 2];
+ *p++ = state->ref_frame.luma[k];
+ *p++ = state->ref_frame.cb[k / 2];
+ *p++ = state->ref_frame.luma[k + 1];
+ k += 2;
+ }
+ p_out += state->stride;
}
break;
case V4L2_PIX_FMT_RGB24:
case V4L2_PIX_FMT_HSV24:
- for (i = 0, p = p_out; i < size; i++) {
- *p++ = state->ref_frame.cr[i];
- *p++ = state->ref_frame.luma[i];
- *p++ = state->ref_frame.cb[i];
+ k = 0;
+ for (i = 0; i < state->coded_height; i++) {
+ for (j = 0, p = p_out; j < state->coded_width; j++) {
+ *p++ = state->ref_frame.cr[k];
+ *p++ = state->ref_frame.luma[k];
+ *p++ = state->ref_frame.cb[k];
+ k++;
+ }
+ p_out += state->stride;
}
break;
case V4L2_PIX_FMT_BGR24:
- for (i = 0, p = p_out; i < size; i++) {
- *p++ = state->ref_frame.cb[i];
- *p++ = state->ref_frame.luma[i];
- *p++ = state->ref_frame.cr[i];
+ k = 0;
+ for (i = 0; i < state->coded_height; i++) {
+ for (j = 0, p = p_out; j < state->coded_width; j++) {
+ *p++ = state->ref_frame.cb[k];
+ *p++ = state->ref_frame.luma[k];
+ *p++ = state->ref_frame.cr[k];
+ k++;
+ }
+ p_out += state->stride;
}
break;
case V4L2_PIX_FMT_RGB32:
case V4L2_PIX_FMT_XRGB32:
case V4L2_PIX_FMT_HSV32:
- for (i = 0, p = p_out; i < size; i++) {
- *p++ = 0;
- *p++ = state->ref_frame.cr[i];
- *p++ = state->ref_frame.luma[i];
- *p++ = state->ref_frame.cb[i];
+ k = 0;
+ for (i = 0; i < state->coded_height; i++) {
+ for (j = 0, p = p_out; j < state->coded_width; j++) {
+ *p++ = 0;
+ *p++ = state->ref_frame.cr[k];
+ *p++ = state->ref_frame.luma[k];
+ *p++ = state->ref_frame.cb[k];
+ k++;
+ }
+ p_out += state->stride;
}
break;
case V4L2_PIX_FMT_BGR32:
case V4L2_PIX_FMT_XBGR32:
- for (i = 0, p = p_out; i < size; i++) {
- *p++ = state->ref_frame.cb[i];
- *p++ = state->ref_frame.luma[i];
- *p++ = state->ref_frame.cr[i];
- *p++ = 0;
+ k = 0;
+ for (i = 0; i < state->coded_height; i++) {
+ for (j = 0, p = p_out; j < state->coded_width; j++) {
+ *p++ = state->ref_frame.cb[k];
+ *p++ = state->ref_frame.luma[k];
+ *p++ = state->ref_frame.cr[k];
+ *p++ = 0;
+ k++;
+ }
+ p_out += state->stride;
}
break;
case V4L2_PIX_FMT_ARGB32:
- for (i = 0, p = p_out; i < size; i++) {
- *p++ = state->ref_frame.alpha[i];
- *p++ = state->ref_frame.cr[i];
- *p++ = state->ref_frame.luma[i];
- *p++ = state->ref_frame.cb[i];
+ k = 0;
+ for (i = 0; i < state->coded_height; i++) {
+ for (j = 0, p = p_out; j < state->coded_width; j++) {
+ *p++ = state->ref_frame.alpha[k];
+ *p++ = state->ref_frame.cr[k];
+ *p++ = state->ref_frame.luma[k];
+ *p++ = state->ref_frame.cb[k];
+ k++;
+ }
+ p_out += state->stride;
}
break;
case V4L2_PIX_FMT_ABGR32:
- for (i = 0, p = p_out; i < size; i++) {
- *p++ = state->ref_frame.cb[i];
- *p++ = state->ref_frame.luma[i];
- *p++ = state->ref_frame.cr[i];
- *p++ = state->ref_frame.alpha[i];
+ k = 0;
+ for (i = 0; i < state->coded_height; i++) {
+ for (j = 0, p = p_out; j < state->coded_width; j++) {
+ *p++ = state->ref_frame.cb[k];
+ *p++ = state->ref_frame.luma[k];
+ *p++ = state->ref_frame.cr[k];
+ *p++ = state->ref_frame.alpha[k];
+ k++;
+ }
+ p_out += state->stride;
}
break;
default:
diff --git a/drivers/media/platform/vicodec/codec-v4l2-fwht.h b/drivers/media/platform/vicodec/codec-v4l2-fwht.h
index ed53e28d4f9c..aa6fa90a48be 100644
--- a/drivers/media/platform/vicodec/codec-v4l2-fwht.h
+++ b/drivers/media/platform/vicodec/codec-v4l2-fwht.h
@@ -19,12 +19,17 @@ struct v4l2_fwht_pixfmt_info {
unsigned int width_div;
unsigned int height_div;
unsigned int components_num;
+ unsigned int planes_num;
+ unsigned int pixenc;
};
struct v4l2_fwht_state {
const struct v4l2_fwht_pixfmt_info *info;
- unsigned int width;
- unsigned int height;
+ unsigned int visible_width;
+ unsigned int visible_height;
+ unsigned int coded_width;
+ unsigned int coded_height;
+ unsigned int stride;
unsigned int gop_size;
unsigned int gop_cnt;
u16 i_frame_qp;
@@ -36,11 +41,17 @@ struct v4l2_fwht_state {
enum v4l2_quantization quantization;
struct fwht_raw_frame ref_frame;
+ struct fwht_cframe_hdr header;
u8 *compressed_frame;
};
const struct v4l2_fwht_pixfmt_info *v4l2_fwht_find_pixfmt(u32 pixelformat);
const struct v4l2_fwht_pixfmt_info *v4l2_fwht_get_pixfmt(u32 idx);
+const struct v4l2_fwht_pixfmt_info *v4l2_fwht_default_fmt(u32 width_div,
+ u32 height_div,
+ u32 components_num,
+ u32 pixenc,
+ unsigned int start_idx);
int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out);
int v4l2_fwht_decode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out);
diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c
index 0d7876f5acf0..d7636fe9e174 100644
--- a/drivers/media/platform/vicodec/vicodec-core.c
+++ b/drivers/media/platform/vicodec/vicodec-core.c
@@ -61,7 +61,7 @@ struct pixfmt_info {
};
static const struct v4l2_fwht_pixfmt_info pixfmt_fwht = {
- V4L2_PIX_FMT_FWHT, 0, 3, 1, 1, 1, 1, 1, 0
+ V4L2_PIX_FMT_FWHT, 0, 3, 1, 1, 1, 1, 1, 0, 1
};
static void vicodec_dev_release(struct device *dev)
@@ -75,8 +75,10 @@ static struct platform_device vicodec_pdev = {
/* Per-queue, driver-specific private data */
struct vicodec_q_data {
- unsigned int width;
- unsigned int height;
+ unsigned int coded_width;
+ unsigned int coded_height;
+ unsigned int visible_width;
+ unsigned int visible_height;
unsigned int sizeimage;
unsigned int sequence;
const struct v4l2_fwht_pixfmt_info *info;
@@ -122,10 +124,12 @@ struct vicodec_ctx {
u32 cur_buf_offset;
u32 comp_max_size;
u32 comp_size;
+ u32 header_size;
u32 comp_magic_cnt;
- u32 comp_frame_size;
bool comp_has_frame;
bool comp_has_next_frame;
+ bool first_source_change_sent;
+ bool source_changed;
};
static inline struct vicodec_ctx *file2ctx(struct file *file)
@@ -182,6 +186,10 @@ static int device_process(struct vicodec_ctx *ctx,
return ret;
vb2_set_plane_payload(&dst_vb->vb2_buf, 0, ret);
} else {
+ unsigned int comp_frame_size = ntohl(ctx->state.header.size);
+
+ if (comp_frame_size > ctx->comp_max_size)
+ return -EINVAL;
state->info = q_dst->info;
ret = v4l2_fwht_decode(state, p_src, p_dst);
if (ret < 0)
@@ -190,18 +198,8 @@ static int device_process(struct vicodec_ctx *ctx,
}
dst_vb->sequence = q_dst->sequence++;
- dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
-
- if (src_vb->flags & V4L2_BUF_FLAG_TIMECODE)
- dst_vb->timecode = src_vb->timecode;
- dst_vb->field = src_vb->field;
dst_vb->flags &= ~V4L2_BUF_FLAG_LAST;
- dst_vb->flags |= src_vb->flags &
- (V4L2_BUF_FLAG_TIMECODE |
- V4L2_BUF_FLAG_KEYFRAME |
- V4L2_BUF_FLAG_PFRAME |
- V4L2_BUF_FLAG_BFRAME |
- V4L2_BUF_FLAG_TSTAMP_SRC_MASK);
+ v4l2_m2m_buf_copy_metadata(src_vb, dst_vb, !ctx->is_enc);
return 0;
}
@@ -209,6 +207,63 @@ static int device_process(struct vicodec_ctx *ctx,
/*
* mem2mem callbacks
*/
+static enum vb2_buffer_state get_next_header(struct vicodec_ctx *ctx,
+ u8 **pp, u32 sz)
+{
+ static const u8 magic[] = {
+ 0x4f, 0x4f, 0x4f, 0x4f, 0xff, 0xff, 0xff, 0xff
+ };
+ u8 *p = *pp;
+ u32 state;
+ u8 *header = (u8 *)&ctx->state.header;
+
+ state = VB2_BUF_STATE_DONE;
+
+ if (!ctx->header_size) {
+ state = VB2_BUF_STATE_ERROR;
+ for (; p < *pp + sz; p++) {
+ u32 copy;
+
+ p = memchr(p, magic[ctx->comp_magic_cnt],
+ *pp + sz - p);
+ if (!p) {
+ ctx->comp_magic_cnt = 0;
+ p = *pp + sz;
+ break;
+ }
+ copy = sizeof(magic) - ctx->comp_magic_cnt;
+ if (*pp + sz - p < copy)
+ copy = *pp + sz - p;
+
+ memcpy(header + ctx->comp_magic_cnt, p, copy);
+ ctx->comp_magic_cnt += copy;
+ if (!memcmp(header, magic, ctx->comp_magic_cnt)) {
+ p += copy;
+ state = VB2_BUF_STATE_DONE;
+ break;
+ }
+ ctx->comp_magic_cnt = 0;
+ }
+ if (ctx->comp_magic_cnt < sizeof(magic)) {
+ *pp = p;
+ return state;
+ }
+ ctx->header_size = sizeof(magic);
+ }
+
+ if (ctx->header_size < sizeof(struct fwht_cframe_hdr)) {
+ u32 copy = sizeof(struct fwht_cframe_hdr) - ctx->header_size;
+
+ if (*pp + sz - p < copy)
+ copy = *pp + sz - p;
+
+ memcpy(header + ctx->header_size, p, copy);
+ p += copy;
+ ctx->header_size += copy;
+ }
+ *pp = p;
+ return state;
+}
/* device_run() - prepares and starts the device */
static void device_run(void *priv)
@@ -249,6 +304,7 @@ static void device_run(void *priv)
}
v4l2_m2m_buf_done(dst_buf, state);
ctx->comp_size = 0;
+ ctx->header_size = 0;
ctx->comp_magic_cnt = 0;
ctx->comp_has_frame = false;
spin_unlock(ctx->lock);
@@ -273,6 +329,96 @@ static void job_remove_src_buf(struct vicodec_ctx *ctx, u32 state)
spin_unlock(ctx->lock);
}
+static const struct v4l2_fwht_pixfmt_info *
+info_from_header(const struct fwht_cframe_hdr *p_hdr)
+{
+ unsigned int flags = ntohl(p_hdr->flags);
+ unsigned int width_div = (flags & FWHT_FL_CHROMA_FULL_WIDTH) ? 1 : 2;
+ unsigned int height_div = (flags & FWHT_FL_CHROMA_FULL_HEIGHT) ? 1 : 2;
+ unsigned int components_num = 3;
+ unsigned int pixenc = 0;
+ unsigned int version = ntohl(p_hdr->version);
+
+ if (version >= 2) {
+ components_num = 1 + ((flags & FWHT_FL_COMPONENTS_NUM_MSK) >>
+ FWHT_FL_COMPONENTS_NUM_OFFSET);
+ pixenc = (flags & FWHT_FL_PIXENC_MSK);
+ }
+ return v4l2_fwht_default_fmt(width_div, height_div,
+ components_num, pixenc, 0);
+}
+
+static bool is_header_valid(const struct fwht_cframe_hdr *p_hdr)
+{
+ const struct v4l2_fwht_pixfmt_info *info;
+ unsigned int w = ntohl(p_hdr->width);
+ unsigned int h = ntohl(p_hdr->height);
+ unsigned int version = ntohl(p_hdr->version);
+ unsigned int flags = ntohl(p_hdr->flags);
+
+ if (!version || version > FWHT_VERSION)
+ return false;
+
+ if (w < MIN_WIDTH || w > MAX_WIDTH || h < MIN_HEIGHT || h > MAX_HEIGHT)
+ return false;
+
+ if (version >= 2) {
+ unsigned int components_num = 1 +
+ ((flags & FWHT_FL_COMPONENTS_NUM_MSK) >>
+ FWHT_FL_COMPONENTS_NUM_OFFSET);
+ unsigned int pixenc = flags & FWHT_FL_PIXENC_MSK;
+
+ if (components_num == 0 || components_num > 4 || !pixenc)
+ return false;
+ }
+
+ info = info_from_header(p_hdr);
+ if (!info)
+ return false;
+ return true;
+}
+
+static void update_capture_data_from_header(struct vicodec_ctx *ctx)
+{
+ struct vicodec_q_data *q_dst = get_q_data(ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ const struct fwht_cframe_hdr *p_hdr = &ctx->state.header;
+ const struct v4l2_fwht_pixfmt_info *info = info_from_header(p_hdr);
+ unsigned int flags = ntohl(p_hdr->flags);
+ unsigned int hdr_width_div = (flags & FWHT_FL_CHROMA_FULL_WIDTH) ? 1 : 2;
+ unsigned int hdr_height_div = (flags & FWHT_FL_CHROMA_FULL_HEIGHT) ? 1 : 2;
+
+ q_dst->info = info;
+ q_dst->visible_width = ntohl(p_hdr->width);
+ q_dst->visible_height = ntohl(p_hdr->height);
+ q_dst->coded_width = vic_round_dim(q_dst->visible_width, hdr_width_div);
+ q_dst->coded_height = vic_round_dim(q_dst->visible_height,
+ hdr_height_div);
+
+ q_dst->sizeimage = q_dst->coded_width * q_dst->coded_height *
+ q_dst->info->sizeimage_mult / q_dst->info->sizeimage_div;
+ ctx->state.colorspace = ntohl(p_hdr->colorspace);
+
+ ctx->state.xfer_func = ntohl(p_hdr->xfer_func);
+ ctx->state.ycbcr_enc = ntohl(p_hdr->ycbcr_enc);
+ ctx->state.quantization = ntohl(p_hdr->quantization);
+}
+
+static void set_last_buffer(struct vb2_v4l2_buffer *dst_buf,
+ const struct vb2_v4l2_buffer *src_buf,
+ struct vicodec_ctx *ctx)
+{
+ struct vicodec_q_data *q_dst = get_q_data(ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, 0);
+ dst_buf->sequence = q_dst->sequence++;
+
+ v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, !ctx->is_enc);
+ dst_buf->flags |= V4L2_BUF_FLAG_LAST;
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
+}
+
static int job_ready(void *priv)
{
static const u8 magic[] = {
@@ -284,7 +430,16 @@ static int job_ready(void *priv)
u8 *p;
u32 sz;
u32 state;
-
+ struct vicodec_q_data *q_dst = get_q_data(ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ unsigned int flags;
+ unsigned int hdr_width_div;
+ unsigned int hdr_height_div;
+ unsigned int max_to_copy;
+ unsigned int comp_frame_size;
+
+ if (ctx->source_changed)
+ return 0;
if (ctx->is_enc || ctx->comp_has_frame)
return 1;
@@ -299,59 +454,27 @@ restart:
state = VB2_BUF_STATE_DONE;
- if (!ctx->comp_size) {
- state = VB2_BUF_STATE_ERROR;
- for (; p < p_src + sz; p++) {
- u32 copy;
-
- p = memchr(p, magic[ctx->comp_magic_cnt],
- p_src + sz - p);
- if (!p) {
- ctx->comp_magic_cnt = 0;
- break;
- }
- copy = sizeof(magic) - ctx->comp_magic_cnt;
- if (p_src + sz - p < copy)
- copy = p_src + sz - p;
-
- memcpy(ctx->state.compressed_frame + ctx->comp_magic_cnt,
- p, copy);
- ctx->comp_magic_cnt += copy;
- if (!memcmp(ctx->state.compressed_frame, magic,
- ctx->comp_magic_cnt)) {
- p += copy;
- state = VB2_BUF_STATE_DONE;
- break;
- }
- ctx->comp_magic_cnt = 0;
- }
- if (ctx->comp_magic_cnt < sizeof(magic)) {
+ if (ctx->header_size < sizeof(struct fwht_cframe_hdr)) {
+ state = get_next_header(ctx, &p, p_src + sz - p);
+ if (ctx->header_size < sizeof(struct fwht_cframe_hdr)) {
job_remove_src_buf(ctx, state);
goto restart;
}
- ctx->comp_size = sizeof(magic);
}
- if (ctx->comp_size < sizeof(struct fwht_cframe_hdr)) {
- struct fwht_cframe_hdr *p_hdr =
- (struct fwht_cframe_hdr *)ctx->state.compressed_frame;
- u32 copy = sizeof(struct fwht_cframe_hdr) - ctx->comp_size;
- if (copy > p_src + sz - p)
- copy = p_src + sz - p;
- memcpy(ctx->state.compressed_frame + ctx->comp_size,
- p, copy);
- p += copy;
- ctx->comp_size += copy;
- if (ctx->comp_size < sizeof(struct fwht_cframe_hdr)) {
- job_remove_src_buf(ctx, state);
- goto restart;
- }
- ctx->comp_frame_size = ntohl(p_hdr->size) + sizeof(*p_hdr);
- if (ctx->comp_frame_size > ctx->comp_max_size)
- ctx->comp_frame_size = ctx->comp_max_size;
- }
- if (ctx->comp_size < ctx->comp_frame_size) {
- u32 copy = ctx->comp_frame_size - ctx->comp_size;
+ comp_frame_size = ntohl(ctx->state.header.size);
+
+ /*
+ * The current scanned frame might be the first frame of a new
+ * resolution so its size might be larger than ctx->comp_max_size.
+ * In that case it is copied up to the current buffer capacity and
+ * the copy will continue after allocating new large enough buffer
+ * when restreaming
+ */
+ max_to_copy = min(comp_frame_size, ctx->comp_max_size);
+
+ if (ctx->comp_size < max_to_copy) {
+ u32 copy = max_to_copy - ctx->comp_size;
if (copy > p_src + sz - p)
copy = p_src + sz - p;
@@ -360,15 +483,17 @@ restart:
p, copy);
p += copy;
ctx->comp_size += copy;
- if (ctx->comp_size < ctx->comp_frame_size) {
+ if (ctx->comp_size < max_to_copy) {
job_remove_src_buf(ctx, state);
goto restart;
}
}
ctx->cur_buf_offset = p - p_src;
- ctx->comp_has_frame = true;
+ if (ctx->comp_size == comp_frame_size)
+ ctx->comp_has_frame = true;
ctx->comp_has_next_frame = false;
- if (sz - ctx->cur_buf_offset >= sizeof(struct fwht_cframe_hdr)) {
+ if (ctx->comp_has_frame && sz - ctx->cur_buf_offset >=
+ sizeof(struct fwht_cframe_hdr)) {
struct fwht_cframe_hdr *p_hdr = (struct fwht_cframe_hdr *)p;
u32 frame_size = ntohl(p_hdr->size);
u32 remaining = sz - ctx->cur_buf_offset - sizeof(*p_hdr);
@@ -376,6 +501,36 @@ restart:
if (!memcmp(p, magic, sizeof(magic)))
ctx->comp_has_next_frame = remaining >= frame_size;
}
+ /*
+ * if the header is invalid the device_run will just drop the frame
+ * with an error
+ */
+ if (!is_header_valid(&ctx->state.header) && ctx->comp_has_frame)
+ return 1;
+ flags = ntohl(ctx->state.header.flags);
+ hdr_width_div = (flags & FWHT_FL_CHROMA_FULL_WIDTH) ? 1 : 2;
+ hdr_height_div = (flags & FWHT_FL_CHROMA_FULL_HEIGHT) ? 1 : 2;
+
+ if (ntohl(ctx->state.header.width) != q_dst->visible_width ||
+ ntohl(ctx->state.header.height) != q_dst->visible_height ||
+ !q_dst->info ||
+ hdr_width_div != q_dst->info->width_div ||
+ hdr_height_div != q_dst->info->height_div) {
+ static const struct v4l2_event rs_event = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
+ };
+
+ struct vb2_v4l2_buffer *dst_buf =
+ v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ update_capture_data_from_header(ctx);
+ ctx->first_source_change_sent = true;
+ v4l2_event_queue_fh(&ctx->fh, &rs_event);
+ set_last_buffer(dst_buf, src_buf, ctx);
+ ctx->source_changed = true;
+ return 0;
+ }
return 1;
}
@@ -403,9 +558,10 @@ static int vidioc_querycap(struct file *file, void *priv,
return 0;
}
-static int enum_fmt(struct v4l2_fmtdesc *f, bool is_enc, bool is_out)
+static int enum_fmt(struct v4l2_fmtdesc *f, struct vicodec_ctx *ctx,
+ bool is_out)
{
- bool is_uncomp = (is_enc && is_out) || (!is_enc && !is_out);
+ bool is_uncomp = (ctx->is_enc && is_out) || (!ctx->is_enc && !is_out);
if (V4L2_TYPE_IS_MULTIPLANAR(f->type) && !multiplanar)
return -EINVAL;
@@ -414,8 +570,16 @@ static int enum_fmt(struct v4l2_fmtdesc *f, bool is_enc, bool is_out)
if (is_uncomp) {
const struct v4l2_fwht_pixfmt_info *info =
- v4l2_fwht_get_pixfmt(f->index);
+ get_q_data(ctx, f->type)->info;
+ if (!info || ctx->is_enc)
+ info = v4l2_fwht_get_pixfmt(f->index);
+ else
+ info = v4l2_fwht_default_fmt(info->width_div,
+ info->height_div,
+ info->components_num,
+ info->pixenc,
+ f->index);
if (!info)
return -EINVAL;
f->pixelformat = info->id;
@@ -432,7 +596,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
{
struct vicodec_ctx *ctx = file2ctx(file);
- return enum_fmt(f, ctx->is_enc, false);
+ return enum_fmt(f, ctx, false);
}
static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
@@ -440,7 +604,7 @@ static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
{
struct vicodec_ctx *ctx = file2ctx(file);
- return enum_fmt(f, ctx->is_enc, true);
+ return enum_fmt(f, ctx, true);
}
static int vidioc_g_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
@@ -458,17 +622,21 @@ static int vidioc_g_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
q_data = get_q_data(ctx, f->type);
info = q_data->info;
+ if (!info)
+ info = v4l2_fwht_get_pixfmt(0);
+
switch (f->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
if (multiplanar)
return -EINVAL;
pix = &f->fmt.pix;
- pix->width = q_data->width;
- pix->height = q_data->height;
+ pix->width = q_data->coded_width;
+ pix->height = q_data->coded_height;
pix->field = V4L2_FIELD_NONE;
pix->pixelformat = info->id;
- pix->bytesperline = q_data->width * info->bytesperline_mult;
+ pix->bytesperline = q_data->coded_width *
+ info->bytesperline_mult;
pix->sizeimage = q_data->sizeimage;
pix->colorspace = ctx->state.colorspace;
pix->xfer_func = ctx->state.xfer_func;
@@ -481,13 +649,13 @@ static int vidioc_g_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
if (!multiplanar)
return -EINVAL;
pix_mp = &f->fmt.pix_mp;
- pix_mp->width = q_data->width;
- pix_mp->height = q_data->height;
+ pix_mp->width = q_data->coded_width;
+ pix_mp->height = q_data->coded_height;
pix_mp->field = V4L2_FIELD_NONE;
pix_mp->pixelformat = info->id;
pix_mp->num_planes = 1;
pix_mp->plane_fmt[0].bytesperline =
- q_data->width * info->bytesperline_mult;
+ q_data->coded_width * info->bytesperline_mult;
pix_mp->plane_fmt[0].sizeimage = q_data->sizeimage;
pix_mp->colorspace = ctx->state.colorspace;
pix_mp->xfer_func = ctx->state.xfer_func;
@@ -528,8 +696,13 @@ static int vidioc_try_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
pix = &f->fmt.pix;
if (pix->pixelformat != V4L2_PIX_FMT_FWHT)
info = find_fmt(pix->pixelformat);
- pix->width = clamp(pix->width, MIN_WIDTH, MAX_WIDTH) & ~7;
- pix->height = clamp(pix->height, MIN_HEIGHT, MAX_HEIGHT) & ~7;
+
+ pix->width = clamp(pix->width, MIN_WIDTH, MAX_WIDTH);
+ pix->width = vic_round_dim(pix->width, info->width_div);
+
+ pix->height = clamp(pix->height, MIN_HEIGHT, MAX_HEIGHT);
+ pix->height = vic_round_dim(pix->height, info->height_div);
+
pix->field = V4L2_FIELD_NONE;
pix->bytesperline =
pix->width * info->bytesperline_mult;
@@ -545,9 +718,14 @@ static int vidioc_try_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
if (pix_mp->pixelformat != V4L2_PIX_FMT_FWHT)
info = find_fmt(pix_mp->pixelformat);
pix_mp->num_planes = 1;
- pix_mp->width = clamp(pix_mp->width, MIN_WIDTH, MAX_WIDTH) & ~7;
- pix_mp->height =
- clamp(pix_mp->height, MIN_HEIGHT, MAX_HEIGHT) & ~7;
+
+ pix_mp->width = clamp(pix_mp->width, MIN_WIDTH, MAX_WIDTH);
+ pix_mp->width = vic_round_dim(pix_mp->width, info->width_div);
+
+ pix_mp->height = clamp(pix_mp->height, MIN_HEIGHT, MAX_HEIGHT);
+ pix_mp->height = vic_round_dim(pix_mp->height,
+ info->height_div);
+
pix_mp->field = V4L2_FIELD_NONE;
plane->bytesperline =
pix_mp->width * info->bytesperline_mult;
@@ -657,9 +835,10 @@ static int vidioc_s_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
pix = &f->fmt.pix;
if (ctx->is_enc && V4L2_TYPE_IS_OUTPUT(f->type))
fmt_changed =
+ !q_data->info ||
q_data->info->id != pix->pixelformat ||
- q_data->width != pix->width ||
- q_data->height != pix->height;
+ q_data->coded_width != pix->width ||
+ q_data->coded_height != pix->height;
if (vb2_is_busy(vq) && fmt_changed)
return -EBUSY;
@@ -668,8 +847,8 @@ static int vidioc_s_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
q_data->info = &pixfmt_fwht;
else
q_data->info = find_fmt(pix->pixelformat);
- q_data->width = pix->width;
- q_data->height = pix->height;
+ q_data->coded_width = pix->width;
+ q_data->coded_height = pix->height;
q_data->sizeimage = pix->sizeimage;
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
@@ -677,9 +856,10 @@ static int vidioc_s_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
pix_mp = &f->fmt.pix_mp;
if (ctx->is_enc && V4L2_TYPE_IS_OUTPUT(f->type))
fmt_changed =
+ !q_data->info ||
q_data->info->id != pix_mp->pixelformat ||
- q_data->width != pix_mp->width ||
- q_data->height != pix_mp->height;
+ q_data->coded_width != pix_mp->width ||
+ q_data->coded_height != pix_mp->height;
if (vb2_is_busy(vq) && fmt_changed)
return -EBUSY;
@@ -688,17 +868,24 @@ static int vidioc_s_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
q_data->info = &pixfmt_fwht;
else
q_data->info = find_fmt(pix_mp->pixelformat);
- q_data->width = pix_mp->width;
- q_data->height = pix_mp->height;
+ q_data->coded_width = pix_mp->width;
+ q_data->coded_height = pix_mp->height;
q_data->sizeimage = pix_mp->plane_fmt[0].sizeimage;
break;
default:
return -EINVAL;
}
+ if (q_data->visible_width > q_data->coded_width)
+ q_data->visible_width = q_data->coded_width;
+ if (q_data->visible_height > q_data->coded_height)
+ q_data->visible_height = q_data->coded_height;
+
dprintk(ctx->dev,
- "Setting format for type %d, wxh: %dx%d, fourcc: %08x\n",
- f->type, q_data->width, q_data->height, q_data->info->id);
+ "Setting format for type %d, coded wxh: %dx%d, visible wxh: %dx%d, fourcc: %08x\n",
+ f->type, q_data->coded_width, q_data->coded_height,
+ q_data->visible_width, q_data->visible_height,
+ q_data->info->id);
return 0;
}
@@ -753,6 +940,84 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
return ret;
}
+static int vidioc_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct vicodec_ctx *ctx = file2ctx(file);
+ struct vicodec_q_data *q_data;
+ enum v4l2_buf_type valid_cap_type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ enum v4l2_buf_type valid_out_type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+
+ if (multiplanar) {
+ valid_cap_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ valid_out_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ }
+
+ if (s->type != valid_cap_type && s->type != valid_out_type)
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, s->type);
+ if (!q_data)
+ return -EINVAL;
+ /*
+ * encoder supports only cropping on the OUTPUT buffer
+ * decoder supports only composing on the CAPTURE buffer
+ */
+ if ((ctx->is_enc && s->type == valid_out_type) ||
+ (!ctx->is_enc && s->type == valid_cap_type)) {
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_CROP:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = q_data->visible_width;
+ s->r.height = q_data->visible_height;
+ return 0;
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = q_data->coded_width;
+ s->r.height = q_data->coded_height;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int vidioc_s_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct vicodec_ctx *ctx = file2ctx(file);
+ struct vicodec_q_data *q_data;
+ enum v4l2_buf_type out_type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+
+ if (multiplanar)
+ out_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+
+ if (s->type != out_type)
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, s->type);
+ if (!q_data)
+ return -EINVAL;
+
+ if (!ctx->is_enc || s->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ s->r.left = 0;
+ s->r.top = 0;
+ q_data->visible_width = clamp(s->r.width, MIN_WIDTH,
+ q_data->coded_width);
+ s->r.width = q_data->visible_width;
+ q_data->visible_height = clamp(s->r.height, MIN_HEIGHT,
+ q_data->coded_height);
+ s->r.height = q_data->visible_height;
+ return 0;
+}
+
static void vicodec_mark_last_buf(struct vicodec_ctx *ctx)
{
static const struct v4l2_event eos_event = {
@@ -853,7 +1118,13 @@ static int vicodec_enum_framesizes(struct file *file, void *fh,
static int vicodec_subscribe_event(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
+ struct vicodec_ctx *ctx = container_of(fh, struct vicodec_ctx, fh);
+
switch (sub->type) {
+ case V4L2_EVENT_SOURCE_CHANGE:
+ if (ctx->is_enc)
+ return -EINVAL;
+ /* fall through */
case V4L2_EVENT_EOS:
return v4l2_event_subscribe(fh, sub, 0, NULL);
default:
@@ -895,6 +1166,9 @@ static const struct v4l2_ioctl_ops vicodec_ioctl_ops = {
.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_g_selection = vidioc_g_selection,
+ .vidioc_s_selection = vidioc_s_selection,
+
.vidioc_try_encoder_cmd = vicodec_try_encoder_cmd,
.vidioc_encoder_cmd = vicodec_encoder_cmd,
.vidioc_try_decoder_cmd = vicodec_try_decoder_cmd,
@@ -960,7 +1234,71 @@ static void vicodec_buf_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vicodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned int sz = vb2_get_plane_payload(&vbuf->vb2_buf, 0);
+ u8 *p_src = vb2_plane_vaddr(&vbuf->vb2_buf, 0);
+ u8 *p = p_src;
+ struct vb2_queue *vq_out = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ struct vb2_queue *vq_cap = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ bool header_valid = false;
+ static const struct v4l2_event rs_event = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
+ };
+ /* buf_queue handles only the first source change event */
+ if (ctx->first_source_change_sent) {
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+ return;
+ }
+
+ /*
+ * if both queues are streaming, the source change event is
+ * handled in job_ready
+ */
+ if (vb2_is_streaming(vq_cap) && vb2_is_streaming(vq_out)) {
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+ return;
+ }
+
+ /*
+ * source change event is relevant only for the decoder
+ * in the compressed stream
+ */
+ if (ctx->is_enc || !V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+ return;
+ }
+
+ do {
+ enum vb2_buffer_state state =
+ get_next_header(ctx, &p, p_src + sz - p);
+
+ if (ctx->header_size < sizeof(struct fwht_cframe_hdr)) {
+ v4l2_m2m_buf_done(vbuf, state);
+ return;
+ }
+ header_valid = is_header_valid(&ctx->state.header);
+ /*
+ * p points right after the end of the header in the
+ * buffer. If the header is invalid we set p to point
+ * to the next byte after the start of the header
+ */
+ if (!header_valid) {
+ p = p - sizeof(struct fwht_cframe_hdr) + 1;
+ if (p < p_src)
+ p = p_src;
+ ctx->header_size = 0;
+ ctx->comp_magic_cnt = 0;
+ }
+
+ } while (!header_valid);
+
+ ctx->cur_buf_offset = p - p_src;
+ update_capture_data_from_header(ctx);
+ ctx->first_source_change_sent = true;
+ v4l2_event_queue_fh(&ctx->fh, &rs_event);
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
@@ -988,47 +1326,70 @@ static int vicodec_start_streaming(struct vb2_queue *q,
struct vicodec_ctx *ctx = vb2_get_drv_priv(q);
struct vicodec_q_data *q_data = get_q_data(ctx, q->type);
struct v4l2_fwht_state *state = &ctx->state;
- unsigned int size = q_data->width * q_data->height;
const struct v4l2_fwht_pixfmt_info *info = q_data->info;
- unsigned int chroma_div = info->width_div * info->height_div;
+ unsigned int size = q_data->coded_width * q_data->coded_height;
+ unsigned int chroma_div;
unsigned int total_planes_size;
+ u8 *new_comp_frame;
- /*
- * we don't know ahead how many components are in the encoding type
- * V4L2_PIX_FMT_FWHT, so we will allocate space for 4 planes.
- */
- if (info->id == V4L2_PIX_FMT_FWHT || info->components_num == 4)
+ if (!info)
+ return -EINVAL;
+
+ chroma_div = info->width_div * info->height_div;
+ q_data->sequence = 0;
+
+ ctx->last_src_buf = NULL;
+ ctx->last_dst_buf = NULL;
+ state->gop_cnt = 0;
+
+ if ((V4L2_TYPE_IS_OUTPUT(q->type) && !ctx->is_enc) ||
+ (!V4L2_TYPE_IS_OUTPUT(q->type) && ctx->is_enc))
+ return 0;
+
+ if (info->id == V4L2_PIX_FMT_FWHT) {
+ vicodec_return_bufs(q, VB2_BUF_STATE_QUEUED);
+ return -EINVAL;
+ }
+ if (info->components_num == 4)
total_planes_size = 2 * size + 2 * (size / chroma_div);
else if (info->components_num == 3)
total_planes_size = size + 2 * (size / chroma_div);
else
total_planes_size = size;
- q_data->sequence = 0;
+ state->visible_width = q_data->visible_width;
+ state->visible_height = q_data->visible_height;
+ state->coded_width = q_data->coded_width;
+ state->coded_height = q_data->coded_height;
+ state->stride = q_data->coded_width *
+ info->bytesperline_mult;
- if (!V4L2_TYPE_IS_OUTPUT(q->type)) {
- if (!ctx->is_enc) {
- state->width = q_data->width;
- state->height = q_data->height;
- }
- return 0;
- }
-
- if (ctx->is_enc) {
- state->width = q_data->width;
- state->height = q_data->height;
- }
- state->ref_frame.width = state->ref_frame.height = 0;
state->ref_frame.luma = kvmalloc(total_planes_size, GFP_KERNEL);
- ctx->comp_max_size = total_planes_size + sizeof(struct fwht_cframe_hdr);
- state->compressed_frame = kvmalloc(ctx->comp_max_size, GFP_KERNEL);
- if (!state->ref_frame.luma || !state->compressed_frame) {
+ ctx->comp_max_size = total_planes_size;
+ new_comp_frame = kvmalloc(ctx->comp_max_size, GFP_KERNEL);
+
+ if (!state->ref_frame.luma || !new_comp_frame) {
kvfree(state->ref_frame.luma);
- kvfree(state->compressed_frame);
+ kvfree(new_comp_frame);
vicodec_return_bufs(q, VB2_BUF_STATE_QUEUED);
return -ENOMEM;
}
- if (info->id == V4L2_PIX_FMT_FWHT || info->components_num >= 3) {
+ /*
+ * if state->compressed_frame was already allocated then
+ * it contain data of the first frame of the new resolution
+ */
+ if (state->compressed_frame) {
+ if (ctx->comp_size > ctx->comp_max_size)
+ ctx->comp_size = ctx->comp_max_size;
+
+ memcpy(new_comp_frame,
+ state->compressed_frame, ctx->comp_size);
+ }
+
+ kvfree(state->compressed_frame);
+ state->compressed_frame = new_comp_frame;
+
+ if (info->components_num >= 3) {
state->ref_frame.cb = state->ref_frame.luma + size;
state->ref_frame.cr = state->ref_frame.cb + size / chroma_div;
} else {
@@ -1036,20 +1397,11 @@ static int vicodec_start_streaming(struct vb2_queue *q,
state->ref_frame.cr = NULL;
}
- if (info->id == V4L2_PIX_FMT_FWHT || info->components_num == 4)
+ if (info->components_num == 4)
state->ref_frame.alpha =
state->ref_frame.cr + size / chroma_div;
else
state->ref_frame.alpha = NULL;
-
- ctx->last_src_buf = NULL;
- ctx->last_dst_buf = NULL;
- state->gop_cnt = 0;
- ctx->cur_buf_offset = 0;
- ctx->comp_size = 0;
- ctx->comp_magic_cnt = 0;
- ctx->comp_has_frame = false;
-
return 0;
}
@@ -1059,11 +1411,20 @@ static void vicodec_stop_streaming(struct vb2_queue *q)
vicodec_return_bufs(q, VB2_BUF_STATE_ERROR);
- if (!V4L2_TYPE_IS_OUTPUT(q->type))
- return;
-
- kvfree(ctx->state.ref_frame.luma);
- kvfree(ctx->state.compressed_frame);
+ if ((!V4L2_TYPE_IS_OUTPUT(q->type) && !ctx->is_enc) ||
+ (V4L2_TYPE_IS_OUTPUT(q->type) && ctx->is_enc)) {
+ kvfree(ctx->state.ref_frame.luma);
+ ctx->comp_max_size = 0;
+ ctx->source_changed = false;
+ }
+ if (V4L2_TYPE_IS_OUTPUT(q->type) && !ctx->is_enc) {
+ ctx->cur_buf_offset = 0;
+ ctx->comp_size = 0;
+ ctx->header_size = 0;
+ ctx->comp_magic_cnt = 0;
+ ctx->comp_has_frame = 0;
+ ctx->comp_has_next_frame = 0;
+ }
}
static const struct vb2_ops vicodec_qops = {
@@ -1204,8 +1565,10 @@ static int vicodec_open(struct file *file)
ctx->q_data[V4L2_M2M_SRC].info =
ctx->is_enc ? v4l2_fwht_get_pixfmt(0) : &pixfmt_fwht;
- ctx->q_data[V4L2_M2M_SRC].width = 1280;
- ctx->q_data[V4L2_M2M_SRC].height = 720;
+ ctx->q_data[V4L2_M2M_SRC].coded_width = 1280;
+ ctx->q_data[V4L2_M2M_SRC].coded_height = 720;
+ ctx->q_data[V4L2_M2M_SRC].visible_width = 1280;
+ ctx->q_data[V4L2_M2M_SRC].visible_height = 720;
size = 1280 * 720 * ctx->q_data[V4L2_M2M_SRC].info->sizeimage_mult /
ctx->q_data[V4L2_M2M_SRC].info->sizeimage_div;
if (ctx->is_enc)
@@ -1213,16 +1576,17 @@ static int vicodec_open(struct file *file)
else
ctx->q_data[V4L2_M2M_SRC].sizeimage =
size + sizeof(struct fwht_cframe_hdr);
- ctx->q_data[V4L2_M2M_DST] = ctx->q_data[V4L2_M2M_SRC];
- ctx->q_data[V4L2_M2M_DST].info =
- ctx->is_enc ? &pixfmt_fwht : v4l2_fwht_get_pixfmt(0);
- size = 1280 * 720 * ctx->q_data[V4L2_M2M_DST].info->sizeimage_mult /
- ctx->q_data[V4L2_M2M_DST].info->sizeimage_div;
- if (ctx->is_enc)
- ctx->q_data[V4L2_M2M_DST].sizeimage =
- size + sizeof(struct fwht_cframe_hdr);
- else
- ctx->q_data[V4L2_M2M_DST].sizeimage = size;
+ if (ctx->is_enc) {
+ ctx->q_data[V4L2_M2M_DST] = ctx->q_data[V4L2_M2M_SRC];
+ ctx->q_data[V4L2_M2M_DST].info = &pixfmt_fwht;
+ ctx->q_data[V4L2_M2M_DST].sizeimage = 1280 * 720 *
+ ctx->q_data[V4L2_M2M_DST].info->sizeimage_mult /
+ ctx->q_data[V4L2_M2M_DST].info->sizeimage_div +
+ sizeof(struct fwht_cframe_hdr);
+ } else {
+ ctx->q_data[V4L2_M2M_DST].info = NULL;
+ }
+
ctx->state.colorspace = V4L2_COLORSPACE_REC709;
if (ctx->is_enc) {
@@ -1310,6 +1674,8 @@ static int vicodec_probe(struct platform_device *pdev)
#ifdef CONFIG_MEDIA_CONTROLLER
dev->mdev.dev = &pdev->dev;
strscpy(dev->mdev.model, "vicodec", sizeof(dev->mdev.model));
+ strscpy(dev->mdev.bus_info, "platform:vicodec",
+ sizeof(dev->mdev.bus_info));
media_device_init(&dev->mdev);
dev->v4l2_dev.mdev = &dev->mdev;
#endif
diff --git a/drivers/media/platform/video-mux.c b/drivers/media/platform/video-mux.c
index c33900e3c23e..0ba30756e1e4 100644
--- a/drivers/media/platform/video-mux.c
+++ b/drivers/media/platform/video-mux.c
@@ -263,6 +263,26 @@ static int video_mux_set_format(struct v4l2_subdev *sd,
case MEDIA_BUS_FMT_UYYVYY16_0_5X48:
case MEDIA_BUS_FMT_JPEG_1X8:
case MEDIA_BUS_FMT_AHSV8888_1X32:
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_SBGGR12_1X12:
+ case MEDIA_BUS_FMT_SGBRG12_1X12:
+ case MEDIA_BUS_FMT_SGRBG12_1X12:
+ case MEDIA_BUS_FMT_SRGGB12_1X12:
+ case MEDIA_BUS_FMT_SBGGR14_1X14:
+ case MEDIA_BUS_FMT_SGBRG14_1X14:
+ case MEDIA_BUS_FMT_SGRBG14_1X14:
+ case MEDIA_BUS_FMT_SRGGB14_1X14:
+ case MEDIA_BUS_FMT_SBGGR16_1X16:
+ case MEDIA_BUS_FMT_SGBRG16_1X16:
+ case MEDIA_BUS_FMT_SGRBG16_1X16:
+ case MEDIA_BUS_FMT_SRGGB16_1X16:
break;
default:
sdformat->format.code = MEDIA_BUS_FMT_Y8_1X8;
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index 89d9c4c21037..34dcaca45d8b 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* A virtual v4l2-mem2mem example device.
*
@@ -34,22 +35,34 @@
MODULE_DESCRIPTION("Virtual device for mem2mem framework testing");
MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
MODULE_LICENSE("GPL");
-MODULE_VERSION("0.1.1");
+MODULE_VERSION("0.2");
MODULE_ALIAS("mem2mem_testdev");
-static unsigned debug;
+static unsigned int debug;
module_param(debug, uint, 0644);
-MODULE_PARM_DESC(debug, "activates debug info");
+MODULE_PARM_DESC(debug, "debug level");
+
+/* Default transaction time in msec */
+static unsigned int default_transtime = 40; /* Max 25 fps */
+module_param(default_transtime, uint, 0644);
+MODULE_PARM_DESC(default_transtime, "default transaction time in ms");
#define MIN_W 32
#define MIN_H 32
#define MAX_W 640
#define MAX_H 480
-#define DIM_ALIGN_MASK 7 /* 8-byte alignment for line length */
+
+/* Pixel alignment for non-bayer formats */
+#define WIDTH_ALIGN 2
+#define HEIGHT_ALIGN 1
+
+/* Pixel alignment for bayer formats */
+#define BAYER_WIDTH_ALIGN 2
+#define BAYER_HEIGHT_ALIGN 2
/* Flags that indicate a format can be used for capture/output */
-#define MEM2MEM_CAPTURE (1 << 0)
-#define MEM2MEM_OUTPUT (1 << 1)
+#define MEM2MEM_CAPTURE BIT(0)
+#define MEM2MEM_OUTPUT BIT(1)
#define MEM2MEM_NAME "vim2m"
@@ -58,18 +71,12 @@ MODULE_PARM_DESC(debug, "activates debug info");
/* In bytes, per queue */
#define MEM2MEM_VID_MEM_LIMIT (16 * 1024 * 1024)
-/* Default transaction time in msec */
-#define MEM2MEM_DEF_TRANSTIME 40
-#define MEM2MEM_COLOR_STEP (0xff >> 4)
-#define MEM2MEM_NUM_TILES 8
-
/* Flags that indicate processing mode */
-#define MEM2MEM_HFLIP (1 << 0)
-#define MEM2MEM_VFLIP (1 << 1)
-
-#define dprintk(dev, fmt, arg...) \
- v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg)
+#define MEM2MEM_HFLIP BIT(0)
+#define MEM2MEM_VFLIP BIT(1)
+#define dprintk(dev, lvl, fmt, arg...) \
+ v4l2_dbg(lvl, debug, &(dev)->v4l2_dev, "%s: " fmt, __func__, ## arg)
static void vim2m_dev_release(struct device *dev)
{}
@@ -83,21 +90,46 @@ struct vim2m_fmt {
u32 fourcc;
int depth;
/* Types the format can be used for */
- u32 types;
+ u32 types;
};
static struct vim2m_fmt formats[] = {
{
- .fourcc = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */
+ .fourcc = V4L2_PIX_FMT_RGB565, /* rrrrrggg gggbbbbb */
.depth = 16,
- /* Both capture and output format */
- .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT,
- },
- {
+ .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB565X, /* gggbbbbb rrrrrggg */
+ .depth = 16,
+ .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB24,
+ .depth = 24,
+ .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT,
+ }, {
+ .fourcc = V4L2_PIX_FMT_BGR24,
+ .depth = 24,
+ .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT,
+ }, {
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = 16,
- /* Output-only format */
- .types = MEM2MEM_OUTPUT,
+ .types = MEM2MEM_CAPTURE,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .depth = 8,
+ .types = MEM2MEM_CAPTURE,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .depth = 8,
+ .types = MEM2MEM_CAPTURE,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .depth = 8,
+ .types = MEM2MEM_CAPTURE,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .depth = 8,
+ .types = MEM2MEM_CAPTURE,
},
};
@@ -120,14 +152,14 @@ enum {
#define V4L2_CID_TRANS_TIME_MSEC (V4L2_CID_USER_BASE + 0x1000)
#define V4L2_CID_TRANS_NUM_BUFS (V4L2_CID_USER_BASE + 0x1001)
-static struct vim2m_fmt *find_format(struct v4l2_format *f)
+static struct vim2m_fmt *find_format(u32 fourcc)
{
struct vim2m_fmt *fmt;
unsigned int k;
for (k = 0; k < NUM_FORMATS; k++) {
fmt = &formats[k];
- if (fmt->fourcc == f->fmt.pix.pixelformat)
+ if (fmt->fourcc == fourcc)
break;
}
@@ -137,6 +169,24 @@ static struct vim2m_fmt *find_format(struct v4l2_format *f)
return &formats[k];
}
+static void get_alignment(u32 fourcc,
+ unsigned int *walign, unsigned int *halign)
+{
+ switch (fourcc) {
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ *walign = BAYER_WIDTH_ALIGN;
+ *halign = BAYER_HEIGHT_ALIGN;
+ return;
+ default:
+ *walign = WIDTH_ALIGN;
+ *halign = HEIGHT_ALIGN;
+ return;
+ }
+}
+
struct vim2m_dev {
struct v4l2_device v4l2_dev;
struct video_device vfd;
@@ -146,9 +196,6 @@ struct vim2m_dev {
atomic_t num_inst;
struct mutex dev_mutex;
- spinlock_t irqlock;
-
- struct delayed_work work_run;
struct v4l2_m2m_dev *m2m_dev;
};
@@ -167,6 +214,10 @@ struct vim2m_ctx {
/* Transaction time (i.e. simulated processing time) in milliseconds */
u32 transtime;
+ struct mutex vb_mutex;
+ struct delayed_work work_run;
+ spinlock_t irqlock;
+
/* Abort requested by m2m */
int aborting;
@@ -188,7 +239,7 @@ static inline struct vim2m_ctx *file2ctx(struct file *file)
}
static struct vim2m_q_data *get_q_data(struct vim2m_ctx *ctx,
- enum v4l2_buf_type type)
+ enum v4l2_buf_type type)
{
switch (type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
@@ -196,28 +247,221 @@ static struct vim2m_q_data *get_q_data(struct vim2m_ctx *ctx,
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
return &ctx->q_data[V4L2_M2M_DST];
default:
- BUG();
+ return NULL;
+ }
+}
+
+static const char *type_name(enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return "Output";
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return "Capture";
+ default:
+ return "Invalid";
+ }
+}
+
+#define CLIP(__color) \
+ (u8)(((__color) > 0xff) ? 0xff : (((__color) < 0) ? 0 : (__color)))
+
+static void copy_line(struct vim2m_q_data *q_data_out,
+ u8 *src, u8 *dst, bool reverse)
+{
+ int x, depth = q_data_out->fmt->depth >> 3;
+
+ if (!reverse) {
+ memcpy(dst, src, q_data_out->width * depth);
+ } else {
+ for (x = 0; x < q_data_out->width >> 1; x++) {
+ memcpy(dst, src, depth);
+ memcpy(dst + depth, src - depth, depth);
+ src -= depth << 1;
+ dst += depth << 1;
+ }
+ return;
}
- return NULL;
}
+static void copy_two_pixels(struct vim2m_q_data *q_data_in,
+ struct vim2m_q_data *q_data_out,
+ u8 *src[2], u8 **dst, int ypos, bool reverse)
+{
+ struct vim2m_fmt *out = q_data_out->fmt;
+ struct vim2m_fmt *in = q_data_in->fmt;
+ u8 _r[2], _g[2], _b[2], *r, *g, *b;
+ int i;
+
+ /* Step 1: read two consecutive pixels from src pointer */
+
+ r = _r;
+ g = _g;
+ b = _b;
+
+ switch (in->fourcc) {
+ case V4L2_PIX_FMT_RGB565: /* rrrrrggg gggbbbbb */
+ for (i = 0; i < 2; i++) {
+ u16 pix = *(u16 *)(src[i]);
+
+ *r++ = (u8)(((pix & 0xf800) >> 11) << 3) | 0x07;
+ *g++ = (u8)((((pix & 0x07e0) >> 5)) << 2) | 0x03;
+ *b++ = (u8)((pix & 0x1f) << 3) | 0x07;
+ }
+ break;
+ case V4L2_PIX_FMT_RGB565X: /* gggbbbbb rrrrrggg */
+ for (i = 0; i < 2; i++) {
+ u16 pix = *(u16 *)(src[i]);
+
+ *r++ = (u8)(((0x00f8 & pix) >> 3) << 3) | 0x07;
+ *g++ = (u8)(((pix & 0x7) << 2) |
+ ((pix & 0xe000) >> 5)) | 0x03;
+ *b++ = (u8)(((pix & 0x1f00) >> 8) << 3) | 0x07;
+ }
+ break;
+ default:
+ case V4L2_PIX_FMT_RGB24:
+ for (i = 0; i < 2; i++) {
+ *r++ = src[i][0];
+ *g++ = src[i][1];
+ *b++ = src[i][2];
+ }
+ break;
+ case V4L2_PIX_FMT_BGR24:
+ for (i = 0; i < 2; i++) {
+ *b++ = src[i][0];
+ *g++ = src[i][1];
+ *r++ = src[i][2];
+ }
+ break;
+ }
+
+ /* Step 2: store two consecutive points, reversing them if needed */
+
+ r = _r;
+ g = _g;
+ b = _b;
+
+ switch (out->fourcc) {
+ case V4L2_PIX_FMT_RGB565: /* rrrrrggg gggbbbbb */
+ for (i = 0; i < 2; i++) {
+ u16 *pix = (u16 *)*dst;
+
+ *pix = ((*r << 8) & 0xf800) | ((*g << 3) & 0x07e0) |
+ (*b >> 3);
+
+ *dst += 2;
+ }
+ return;
+ case V4L2_PIX_FMT_RGB565X: /* gggbbbbb rrrrrggg */
+ for (i = 0; i < 2; i++) {
+ u16 *pix = (u16 *)*dst;
+ u8 green = *g++ >> 2;
+
+ *pix = ((green << 8) & 0xe000) | (green & 0x07) |
+ ((*b++ << 5) & 0x1f00) | ((*r++ & 0xf8));
+
+ *dst += 2;
+ }
+ return;
+ case V4L2_PIX_FMT_RGB24:
+ for (i = 0; i < 2; i++) {
+ *(*dst)++ = *r++;
+ *(*dst)++ = *g++;
+ *(*dst)++ = *b++;
+ }
+ return;
+ case V4L2_PIX_FMT_BGR24:
+ for (i = 0; i < 2; i++) {
+ *(*dst)++ = *b++;
+ *(*dst)++ = *g++;
+ *(*dst)++ = *r++;
+ }
+ return;
+ case V4L2_PIX_FMT_YUYV:
+ default:
+ {
+ u8 y, y1, u, v;
+
+ y = ((8453 * (*r) + 16594 * (*g) + 3223 * (*b)
+ + 524288) >> 15);
+ u = ((-4878 * (*r) - 9578 * (*g) + 14456 * (*b)
+ + 4210688) >> 15);
+ v = ((14456 * (*r++) - 12105 * (*g++) - 2351 * (*b++)
+ + 4210688) >> 15);
+ y1 = ((8453 * (*r) + 16594 * (*g) + 3223 * (*b)
+ + 524288) >> 15);
+
+ *(*dst)++ = y;
+ *(*dst)++ = u;
+
+ *(*dst)++ = y1;
+ *(*dst)++ = v;
+ return;
+ }
+ case V4L2_PIX_FMT_SBGGR8:
+ if (!(ypos & 1)) {
+ *(*dst)++ = *b;
+ *(*dst)++ = *++g;
+ } else {
+ *(*dst)++ = *g;
+ *(*dst)++ = *++r;
+ }
+ return;
+ case V4L2_PIX_FMT_SGBRG8:
+ if (!(ypos & 1)) {
+ *(*dst)++ = *g;
+ *(*dst)++ = *++b;
+ } else {
+ *(*dst)++ = *r;
+ *(*dst)++ = *++g;
+ }
+ return;
+ case V4L2_PIX_FMT_SGRBG8:
+ if (!(ypos & 1)) {
+ *(*dst)++ = *g;
+ *(*dst)++ = *++r;
+ } else {
+ *(*dst)++ = *b;
+ *(*dst)++ = *++g;
+ }
+ return;
+ case V4L2_PIX_FMT_SRGGB8:
+ if (!(ypos & 1)) {
+ *(*dst)++ = *r;
+ *(*dst)++ = *++g;
+ } else {
+ *(*dst)++ = *g;
+ *(*dst)++ = *++b;
+ }
+ return;
+ }
+}
static int device_process(struct vim2m_ctx *ctx,
struct vb2_v4l2_buffer *in_vb,
struct vb2_v4l2_buffer *out_vb)
{
struct vim2m_dev *dev = ctx->dev;
- struct vim2m_q_data *q_data;
- u8 *p_in, *p_out;
- int x, y, t, w;
- int tile_w, bytes_left;
- int width, height, bytesperline;
+ struct vim2m_q_data *q_data_in, *q_data_out;
+ u8 *p_in, *p_line, *p_in_x[2], *p, *p_out;
+ unsigned int width, height, bytesperline, bytes_per_pixel;
+ unsigned int x, y, y_in, y_out, x_int, x_fract, x_err, x_offset;
+ int start, end, step;
+
+ q_data_in = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ if (!q_data_in)
+ return 0;
+ bytesperline = (q_data_in->width * q_data_in->fmt->depth) >> 3;
+ bytes_per_pixel = q_data_in->fmt->depth >> 3;
- q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ q_data_out = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (!q_data_out)
+ return 0;
- width = q_data->width;
- height = q_data->height;
- bytesperline = (q_data->width * q_data->fmt->depth) >> 3;
+ /* As we're doing scaling, use the output dimensions here */
+ height = q_data_out->height;
+ width = q_data_out->width;
p_in = vb2_plane_vaddr(&in_vb->vb2_buf, 0);
p_out = vb2_plane_vaddr(&out_vb->vb2_buf, 0);
@@ -227,109 +471,86 @@ static int device_process(struct vim2m_ctx *ctx,
return -EFAULT;
}
- if (vb2_plane_size(&in_vb->vb2_buf, 0) >
- vb2_plane_size(&out_vb->vb2_buf, 0)) {
- v4l2_err(&dev->v4l2_dev, "Output buffer is too small\n");
- return -EINVAL;
- }
+ out_vb->sequence = q_data_out->sequence++;
+ in_vb->sequence = q_data_in->sequence++;
+ v4l2_m2m_buf_copy_metadata(in_vb, out_vb, true);
- tile_w = (width * (q_data[V4L2_M2M_DST].fmt->depth >> 3))
- / MEM2MEM_NUM_TILES;
- bytes_left = bytesperline - tile_w * MEM2MEM_NUM_TILES;
- w = 0;
-
- out_vb->sequence =
- get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE)->sequence++;
- in_vb->sequence = q_data->sequence++;
- out_vb->vb2_buf.timestamp = in_vb->vb2_buf.timestamp;
-
- if (in_vb->flags & V4L2_BUF_FLAG_TIMECODE)
- out_vb->timecode = in_vb->timecode;
- out_vb->field = in_vb->field;
- out_vb->flags = in_vb->flags &
- (V4L2_BUF_FLAG_TIMECODE |
- V4L2_BUF_FLAG_KEYFRAME |
- V4L2_BUF_FLAG_PFRAME |
- V4L2_BUF_FLAG_BFRAME |
- V4L2_BUF_FLAG_TSTAMP_SRC_MASK);
-
- switch (ctx->mode) {
- case MEM2MEM_HFLIP | MEM2MEM_VFLIP:
- p_out += bytesperline * height - bytes_left;
- for (y = 0; y < height; ++y) {
- for (t = 0; t < MEM2MEM_NUM_TILES; ++t) {
- if (w & 0x1) {
- for (x = 0; x < tile_w; ++x)
- *--p_out = *p_in++ +
- MEM2MEM_COLOR_STEP;
- } else {
- for (x = 0; x < tile_w; ++x)
- *--p_out = *p_in++ -
- MEM2MEM_COLOR_STEP;
- }
- ++w;
- }
- p_in += bytes_left;
- p_out -= bytes_left;
- }
- break;
+ if (ctx->mode & MEM2MEM_VFLIP) {
+ start = height - 1;
+ end = -1;
+ step = -1;
+ } else {
+ start = 0;
+ end = height;
+ step = 1;
+ }
+ y_out = 0;
+
+ /*
+ * When format and resolution are identical,
+ * we can use a faster copy logic
+ */
+ if (q_data_in->fmt->fourcc == q_data_out->fmt->fourcc &&
+ q_data_in->width == q_data_out->width &&
+ q_data_in->height == q_data_out->height) {
+ for (y = start; y != end; y += step, y_out++) {
+ p = p_in + (y * bytesperline);
+ if (ctx->mode & MEM2MEM_HFLIP)
+ p += bytesperline - (q_data_in->fmt->depth >> 3);
+
+ copy_line(q_data_out, p, p_out,
+ ctx->mode & MEM2MEM_HFLIP);
- case MEM2MEM_HFLIP:
- for (y = 0; y < height; ++y) {
- p_out += MEM2MEM_NUM_TILES * tile_w;
- for (t = 0; t < MEM2MEM_NUM_TILES; ++t) {
- if (w & 0x01) {
- for (x = 0; x < tile_w; ++x)
- *--p_out = *p_in++ +
- MEM2MEM_COLOR_STEP;
- } else {
- for (x = 0; x < tile_w; ++x)
- *--p_out = *p_in++ -
- MEM2MEM_COLOR_STEP;
- }
- ++w;
- }
- p_in += bytes_left;
p_out += bytesperline;
}
- break;
+ return 0;
+ }
+
+ /* Slower algorithm with format conversion, hflip, vflip and scaler */
+
+ /* To speed scaler up, use Bresenham for X dimension */
+ x_int = q_data_in->width / q_data_out->width;
+ x_fract = q_data_in->width % q_data_out->width;
+
+ for (y = start; y != end; y += step, y_out++) {
+ y_in = (y * q_data_in->height) / q_data_out->height;
+ x_offset = 0;
+ x_err = 0;
- case MEM2MEM_VFLIP:
- p_out += bytesperline * (height - 1);
- for (y = 0; y < height; ++y) {
- for (t = 0; t < MEM2MEM_NUM_TILES; ++t) {
- if (w & 0x1) {
- for (x = 0; x < tile_w; ++x)
- *p_out++ = *p_in++ +
- MEM2MEM_COLOR_STEP;
- } else {
- for (x = 0; x < tile_w; ++x)
- *p_out++ = *p_in++ -
- MEM2MEM_COLOR_STEP;
- }
- ++w;
+ p_line = p_in + (y_in * bytesperline);
+ if (ctx->mode & MEM2MEM_HFLIP)
+ p_line += bytesperline - (q_data_in->fmt->depth >> 3);
+ p_in_x[0] = p_line;
+
+ for (x = 0; x < width >> 1; x++) {
+ x_offset += x_int;
+ x_err += x_fract;
+ if (x_err > width) {
+ x_offset++;
+ x_err -= width;
}
- p_in += bytes_left;
- p_out += bytes_left - 2 * bytesperline;
- }
- break;
- default:
- for (y = 0; y < height; ++y) {
- for (t = 0; t < MEM2MEM_NUM_TILES; ++t) {
- if (w & 0x1) {
- for (x = 0; x < tile_w; ++x)
- *p_out++ = *p_in++ +
- MEM2MEM_COLOR_STEP;
- } else {
- for (x = 0; x < tile_w; ++x)
- *p_out++ = *p_in++ -
- MEM2MEM_COLOR_STEP;
- }
- ++w;
+ if (ctx->mode & MEM2MEM_HFLIP)
+ p_in_x[1] = p_line - x_offset * bytes_per_pixel;
+ else
+ p_in_x[1] = p_line + x_offset * bytes_per_pixel;
+
+ copy_two_pixels(q_data_in, q_data_out,
+ p_in_x, &p_out, y_out,
+ ctx->mode & MEM2MEM_HFLIP);
+
+ /* Calculate the next p_in_x0 */
+ x_offset += x_int;
+ x_err += x_fract;
+ if (x_err > width) {
+ x_offset++;
+ x_err -= width;
}
- p_in += bytes_left;
- p_out += bytes_left;
+
+ if (ctx->mode & MEM2MEM_HFLIP)
+ p_in_x[0] = p_line - x_offset * bytes_per_pixel;
+ else
+ p_in_x[0] = p_line + x_offset * bytes_per_pixel;
}
}
@@ -349,7 +570,7 @@ static int job_ready(void *priv)
if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < ctx->translen
|| v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < ctx->translen) {
- dprintk(ctx->dev, "Not enough buffers available\n");
+ dprintk(ctx->dev, 1, "Not enough buffers available\n");
return 0;
}
@@ -373,7 +594,6 @@ static void job_abort(void *priv)
static void device_run(void *priv)
{
struct vim2m_ctx *ctx = priv;
- struct vim2m_dev *dev = ctx->dev;
struct vb2_v4l2_buffer *src_buf, *dst_buf;
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
@@ -390,37 +610,38 @@ static void device_run(void *priv)
&ctx->hdl);
/* Run delayed work, which simulates a hardware irq */
- schedule_delayed_work(&dev->work_run, msecs_to_jiffies(ctx->transtime));
+ schedule_delayed_work(&ctx->work_run, msecs_to_jiffies(ctx->transtime));
}
static void device_work(struct work_struct *w)
{
- struct vim2m_dev *vim2m_dev =
- container_of(w, struct vim2m_dev, work_run.work);
struct vim2m_ctx *curr_ctx;
+ struct vim2m_dev *vim2m_dev;
struct vb2_v4l2_buffer *src_vb, *dst_vb;
unsigned long flags;
- curr_ctx = v4l2_m2m_get_curr_priv(vim2m_dev->m2m_dev);
+ curr_ctx = container_of(w, struct vim2m_ctx, work_run.work);
- if (NULL == curr_ctx) {
+ if (!curr_ctx) {
pr_err("Instance released before the end of transaction\n");
return;
}
+ vim2m_dev = curr_ctx->dev;
+
src_vb = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx);
dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx);
curr_ctx->num_processed++;
- spin_lock_irqsave(&vim2m_dev->irqlock, flags);
+ spin_lock_irqsave(&curr_ctx->irqlock, flags);
v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
- spin_unlock_irqrestore(&vim2m_dev->irqlock, flags);
+ spin_unlock_irqrestore(&curr_ctx->irqlock, flags);
if (curr_ctx->num_processed == curr_ctx->translen
|| curr_ctx->aborting) {
- dprintk(curr_ctx->dev, "Finishing transaction\n");
+ dprintk(curr_ctx->dev, 2, "Finishing capture buffer fill\n");
curr_ctx->num_processed = 0;
v4l2_m2m_job_finish(vim2m_dev->m2m_dev, curr_ctx->fh.m2m_ctx);
} else {
@@ -437,7 +658,7 @@ static int vidioc_querycap(struct file *file, void *priv,
strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1);
strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1);
snprintf(cap->bus_info, sizeof(cap->bus_info),
- "platform:%s", MEM2MEM_NAME);
+ "platform:%s", MEM2MEM_NAME);
return 0;
}
@@ -453,8 +674,10 @@ static int enum_fmt(struct v4l2_fmtdesc *f, u32 type)
/* index-th format of type type found ? */
if (num == f->index)
break;
- /* Correct type but haven't reached our index yet,
- * just increment per-type index */
+ /*
+ * Correct type but haven't reached our index yet,
+ * just increment per-type index
+ */
++num;
}
}
@@ -482,6 +705,27 @@ static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
return enum_fmt(f, MEM2MEM_OUTPUT);
}
+static int vidioc_enum_framesizes(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *fsize)
+{
+ if (fsize->index != 0)
+ return -EINVAL;
+
+ if (!find_format(fsize->pixel_format))
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise.min_width = MIN_W;
+ fsize->stepwise.min_height = MIN_H;
+ fsize->stepwise.max_width = MAX_W;
+ fsize->stepwise.max_height = MAX_H;
+
+ get_alignment(fsize->pixel_format,
+ &fsize->stepwise.step_width,
+ &fsize->stepwise.step_height);
+ return 0;
+}
+
static int vidioc_g_fmt(struct vim2m_ctx *ctx, struct v4l2_format *f)
{
struct vb2_queue *vq;
@@ -492,6 +736,8 @@ static int vidioc_g_fmt(struct vim2m_ctx *ctx, struct v4l2_format *f)
return -EINVAL;
q_data = get_q_data(ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
f->fmt.pix.width = q_data->width;
f->fmt.pix.height = q_data->height;
@@ -521,8 +767,11 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_try_fmt(struct v4l2_format *f, struct vim2m_fmt *fmt)
{
- /* V4L2 specification suggests the driver corrects the format struct
- * if any of the dimensions is unsupported */
+ int walign, halign;
+ /*
+ * V4L2 specification specifies the driver corrects the
+ * format struct if any of the dimensions is unsupported
+ */
if (f->fmt.pix.height < MIN_H)
f->fmt.pix.height = MIN_H;
else if (f->fmt.pix.height > MAX_H)
@@ -533,7 +782,9 @@ static int vidioc_try_fmt(struct v4l2_format *f, struct vim2m_fmt *fmt)
else if (f->fmt.pix.width > MAX_W)
f->fmt.pix.width = MAX_W;
- f->fmt.pix.width &= ~DIM_ALIGN_MASK;
+ get_alignment(f->fmt.pix.pixelformat, &walign, &halign);
+ f->fmt.pix.width &= ~(walign - 1);
+ f->fmt.pix.height &= ~(halign - 1);
f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
f->fmt.pix.field = V4L2_FIELD_NONE;
@@ -547,10 +798,10 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct vim2m_fmt *fmt;
struct vim2m_ctx *ctx = file2ctx(file);
- fmt = find_format(f);
+ fmt = find_format(f->fmt.pix.pixelformat);
if (!fmt) {
f->fmt.pix.pixelformat = formats[0].fourcc;
- fmt = find_format(f);
+ fmt = find_format(f->fmt.pix.pixelformat);
}
if (!(fmt->types & MEM2MEM_CAPTURE)) {
v4l2_err(&ctx->dev->v4l2_dev,
@@ -572,10 +823,10 @@ static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
struct vim2m_fmt *fmt;
struct vim2m_ctx *ctx = file2ctx(file);
- fmt = find_format(f);
+ fmt = find_format(f->fmt.pix.pixelformat);
if (!fmt) {
f->fmt.pix.pixelformat = formats[0].fourcc;
- fmt = find_format(f);
+ fmt = find_format(f->fmt.pix.pixelformat);
}
if (!(fmt->types & MEM2MEM_OUTPUT)) {
v4l2_err(&ctx->dev->v4l2_dev,
@@ -607,15 +858,20 @@ static int vidioc_s_fmt(struct vim2m_ctx *ctx, struct v4l2_format *f)
return -EBUSY;
}
- q_data->fmt = find_format(f);
+ q_data->fmt = find_format(f->fmt.pix.pixelformat);
q_data->width = f->fmt.pix.width;
q_data->height = f->fmt.pix.height;
q_data->sizeimage = q_data->width * q_data->height
* q_data->fmt->depth >> 3;
- dprintk(ctx->dev,
- "Setting format for type %d, wxh: %dx%d, fmt: %d\n",
- f->type, q_data->width, q_data->height, q_data->fmt->fourcc);
+ dprintk(ctx->dev, 1,
+ "Format for type %s: %dx%d (%d bpp), fmt: %c%c%c%c\n",
+ type_name(f->type), q_data->width, q_data->height,
+ q_data->fmt->depth,
+ (q_data->fmt->fourcc & 0xff),
+ (q_data->fmt->fourcc >> 8) & 0xff,
+ (q_data->fmt->fourcc >> 16) & 0xff,
+ (q_data->fmt->fourcc >> 24) & 0xff);
return 0;
}
@@ -674,6 +930,8 @@ static int vim2m_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_TRANS_TIME_MSEC:
ctx->transtime = ctrl->val;
+ if (ctx->transtime < 1)
+ ctx->transtime = 1;
break;
case V4L2_CID_TRANS_NUM_BUFS:
@@ -692,11 +950,11 @@ static const struct v4l2_ctrl_ops vim2m_ctrl_ops = {
.s_ctrl = vim2m_s_ctrl,
};
-
static const struct v4l2_ioctl_ops vim2m_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_enum_framesizes = vidioc_enum_framesizes,
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
@@ -721,20 +979,23 @@ static const struct v4l2_ioctl_ops vim2m_ioctl_ops = {
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
-
/*
* Queue operations
*/
static int vim2m_queue_setup(struct vb2_queue *vq,
- unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], struct device *alloc_devs[])
+ unsigned int *nbuffers,
+ unsigned int *nplanes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
{
struct vim2m_ctx *ctx = vb2_get_drv_priv(vq);
struct vim2m_q_data *q_data;
unsigned int size, count = *nbuffers;
q_data = get_q_data(ctx, vq->type);
+ if (!q_data)
+ return -EINVAL;
size = q_data->width * q_data->height * q_data->fmt->depth >> 3;
@@ -748,33 +1009,42 @@ static int vim2m_queue_setup(struct vb2_queue *vq,
*nplanes = 1;
sizes[0] = size;
- dprintk(ctx->dev, "get %d buffer(s) of size %d each.\n", count, size);
+ dprintk(ctx->dev, 1, "%s: get %d buffer(s) of size %d each.\n",
+ type_name(vq->type), count, size);
return 0;
}
-static int vim2m_buf_prepare(struct vb2_buffer *vb)
+static int vim2m_buf_out_validate(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+ if (vbuf->field != V4L2_FIELD_NONE) {
+ dprintk(ctx->dev, 1, "%s field isn't supported\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vim2m_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct vim2m_q_data *q_data;
- dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type);
+ dprintk(ctx->dev, 2, "type: %s\n", type_name(vb->vb2_queue->type));
q_data = get_q_data(ctx, vb->vb2_queue->type);
- if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
- if (vbuf->field == V4L2_FIELD_ANY)
- vbuf->field = V4L2_FIELD_NONE;
- if (vbuf->field != V4L2_FIELD_NONE) {
- dprintk(ctx->dev, "%s field isn't supported\n",
- __func__);
- return -EINVAL;
- }
- }
-
+ if (!q_data)
+ return -EINVAL;
if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
- dprintk(ctx->dev, "%s data will not fit into plane (%lu < %lu)\n",
- __func__, vb2_plane_size(vb, 0), (long)q_data->sizeimage);
+ dprintk(ctx->dev, 1,
+ "%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0),
+ (long)q_data->sizeimage);
return -EINVAL;
}
@@ -791,11 +1061,14 @@ static void vim2m_buf_queue(struct vb2_buffer *vb)
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
-static int vim2m_start_streaming(struct vb2_queue *q, unsigned count)
+static int vim2m_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct vim2m_ctx *ctx = vb2_get_drv_priv(q);
struct vim2m_q_data *q_data = get_q_data(ctx, q->type);
+ if (!q_data)
+ return -EINVAL;
+
q_data->sequence = 0;
return 0;
}
@@ -803,25 +1076,23 @@ static int vim2m_start_streaming(struct vb2_queue *q, unsigned count)
static void vim2m_stop_streaming(struct vb2_queue *q)
{
struct vim2m_ctx *ctx = vb2_get_drv_priv(q);
- struct vim2m_dev *dev = ctx->dev;
struct vb2_v4l2_buffer *vbuf;
unsigned long flags;
- if (v4l2_m2m_get_curr_priv(dev->m2m_dev) == ctx)
- cancel_delayed_work_sync(&dev->work_run);
+ cancel_delayed_work_sync(&ctx->work_run);
for (;;) {
if (V4L2_TYPE_IS_OUTPUT(q->type))
vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
else
vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
- if (vbuf == NULL)
+ if (!vbuf)
return;
v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req,
&ctx->hdl);
- spin_lock_irqsave(&ctx->dev->irqlock, flags);
+ spin_lock_irqsave(&ctx->irqlock, flags);
v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
- spin_unlock_irqrestore(&ctx->dev->irqlock, flags);
+ spin_unlock_irqrestore(&ctx->irqlock, flags);
}
}
@@ -834,6 +1105,7 @@ static void vim2m_buf_request_complete(struct vb2_buffer *vb)
static const struct vb2_ops vim2m_qops = {
.queue_setup = vim2m_queue_setup,
+ .buf_out_validate = vim2m_buf_out_validate,
.buf_prepare = vim2m_buf_prepare,
.buf_queue = vim2m_buf_queue,
.start_streaming = vim2m_start_streaming,
@@ -843,7 +1115,8 @@ static const struct vb2_ops vim2m_qops = {
.buf_request_complete = vim2m_buf_request_complete,
};
-static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
{
struct vim2m_ctx *ctx = priv;
int ret;
@@ -855,7 +1128,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *ds
src_vq->ops = &vim2m_qops;
src_vq->mem_ops = &vb2_vmalloc_memops;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
- src_vq->lock = &ctx->dev->dev_mutex;
+ src_vq->lock = &ctx->vb_mutex;
src_vq->supports_requests = true;
ret = vb2_queue_init(src_vq);
@@ -869,17 +1142,16 @@ static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *ds
dst_vq->ops = &vim2m_qops;
dst_vq->mem_ops = &vb2_vmalloc_memops;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
- dst_vq->lock = &ctx->dev->dev_mutex;
+ dst_vq->lock = &ctx->vb_mutex;
return vb2_queue_init(dst_vq);
}
-static const struct v4l2_ctrl_config vim2m_ctrl_trans_time_msec = {
+static struct v4l2_ctrl_config vim2m_ctrl_trans_time_msec = {
.ops = &vim2m_ctrl_ops,
.id = V4L2_CID_TRANS_TIME_MSEC,
.name = "Transaction Time (msec)",
.type = V4L2_CTRL_TYPE_INTEGER,
- .def = MEM2MEM_DEF_TRANSTIME,
.min = 1,
.max = 10001,
.step = 1,
@@ -921,6 +1193,8 @@ static int vim2m_open(struct file *file)
v4l2_ctrl_handler_init(hdl, 4);
v4l2_ctrl_new_std(hdl, &vim2m_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
v4l2_ctrl_new_std(hdl, &vim2m_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ vim2m_ctrl_trans_time_msec.def = default_transtime;
v4l2_ctrl_new_custom(hdl, &vim2m_ctrl_trans_time_msec, NULL);
v4l2_ctrl_new_custom(hdl, &vim2m_ctrl_trans_num_bufs, NULL);
if (hdl->error) {
@@ -944,6 +1218,10 @@ static int vim2m_open(struct file *file)
ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
+ mutex_init(&ctx->vb_mutex);
+ spin_lock_init(&ctx->irqlock);
+ INIT_DELAYED_WORK(&ctx->work_run, device_work);
+
if (IS_ERR(ctx->fh.m2m_ctx)) {
rc = PTR_ERR(ctx->fh.m2m_ctx);
@@ -956,7 +1234,7 @@ static int vim2m_open(struct file *file)
v4l2_fh_add(&ctx->fh);
atomic_inc(&dev->num_inst);
- dprintk(dev, "Created instance: %p, m2m_ctx: %p\n",
+ dprintk(dev, 1, "Created instance: %p, m2m_ctx: %p\n",
ctx, ctx->fh.m2m_ctx);
open_unlock:
@@ -969,7 +1247,7 @@ static int vim2m_release(struct file *file)
struct vim2m_dev *dev = video_drvdata(file);
struct vim2m_ctx *ctx = file2ctx(file);
- dprintk(dev, "Releasing instance %p\n", ctx);
+ dprintk(dev, 1, "Releasing instance %p\n", ctx);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
@@ -1024,8 +1302,6 @@ static int vim2m_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
- spin_lock_init(&dev->irqlock);
-
ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
if (ret)
return ret;
@@ -1037,7 +1313,6 @@ static int vim2m_probe(struct platform_device *pdev)
vfd = &dev->vfd;
vfd->lock = &dev->dev_mutex;
vfd->v4l2_dev = &dev->v4l2_dev;
- INIT_DELAYED_WORK(&dev->work_run, device_work);
ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
if (ret) {
@@ -1047,7 +1322,7 @@ static int vim2m_probe(struct platform_device *pdev)
video_set_drvdata(vfd, dev);
v4l2_info(&dev->v4l2_dev,
- "Device registered as /dev/video%d\n", vfd->num);
+ "Device registered as /dev/video%d\n", vfd->num);
platform_set_drvdata(pdev, dev);
@@ -1061,12 +1336,14 @@ static int vim2m_probe(struct platform_device *pdev)
#ifdef CONFIG_MEDIA_CONTROLLER
dev->mdev.dev = &pdev->dev;
strscpy(dev->mdev.model, "vim2m", sizeof(dev->mdev.model));
+ strscpy(dev->mdev.bus_info, "platform:vim2m",
+ sizeof(dev->mdev.bus_info));
media_device_init(&dev->mdev);
dev->mdev.ops = &m2m_media_ops;
dev->v4l2_dev.mdev = &dev->mdev;
- ret = v4l2_m2m_register_media_controller(dev->m2m_dev,
- vfd, MEDIA_ENT_F_PROC_VIDEO_SCALER);
+ ret = v4l2_m2m_register_media_controller(dev->m2m_dev, vfd,
+ MEDIA_ENT_F_PROC_VIDEO_SCALER);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller\n");
goto unreg_m2m;
diff --git a/drivers/media/platform/vimc/Makefile b/drivers/media/platform/vimc/Makefile
index 4b2e3de7856e..c4fc8e7d365a 100644
--- a/drivers/media/platform/vimc/Makefile
+++ b/drivers/media/platform/vimc/Makefile
@@ -5,6 +5,7 @@ vimc_common-objs := vimc-common.o
vimc_debayer-objs := vimc-debayer.o
vimc_scaler-objs := vimc-scaler.o
vimc_sensor-objs := vimc-sensor.o
+vimc_streamer-objs := vimc-streamer.o
obj-$(CONFIG_VIDEO_VIMC) += vimc.o vimc_capture.o vimc_common.o vimc-debayer.o \
- vimc_scaler.o vimc_sensor.o
+ vimc_scaler.o vimc_sensor.o vimc_streamer.o
diff --git a/drivers/media/platform/vimc/vimc-capture.c b/drivers/media/platform/vimc/vimc-capture.c
index 3f7e9ed56633..ea869631a3f6 100644
--- a/drivers/media/platform/vimc/vimc-capture.c
+++ b/drivers/media/platform/vimc/vimc-capture.c
@@ -24,6 +24,7 @@
#include <media/videobuf2-vmalloc.h>
#include "vimc-common.h"
+#include "vimc-streamer.h"
#define VIMC_CAP_DRV_NAME "vimc-capture"
@@ -44,7 +45,7 @@ struct vimc_cap_device {
spinlock_t qlock;
struct mutex lock;
u32 sequence;
- struct media_pipeline pipe;
+ struct vimc_stream stream;
};
static const struct v4l2_pix_format fmt_default = {
@@ -69,12 +70,10 @@ struct vimc_cap_buffer {
static int vimc_cap_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct vimc_cap_device *vcap = video_drvdata(file);
-
- strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
+ strscpy(cap->driver, VIMC_PDEV_NAME, sizeof(cap->driver));
strscpy(cap->card, KBUILD_MODNAME, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
- "platform:%s", vcap->vdev.v4l2_dev->name);
+ "platform:%s", VIMC_PDEV_NAME);
return 0;
}
@@ -248,14 +247,13 @@ static int vimc_cap_start_streaming(struct vb2_queue *vq, unsigned int count)
vcap->sequence = 0;
/* Start the media pipeline */
- ret = media_pipeline_start(entity, &vcap->pipe);
+ ret = media_pipeline_start(entity, &vcap->stream.pipe);
if (ret) {
vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_QUEUED);
return ret;
}
- /* Enable streaming from the pipe */
- ret = vimc_pipeline_s_stream(&vcap->vdev.entity, 1);
+ ret = vimc_streamer_s_stream(&vcap->stream, &vcap->ved, 1);
if (ret) {
media_pipeline_stop(entity);
vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_QUEUED);
@@ -273,8 +271,7 @@ static void vimc_cap_stop_streaming(struct vb2_queue *vq)
{
struct vimc_cap_device *vcap = vb2_get_drv_priv(vq);
- /* Disable streaming from the pipe */
- vimc_pipeline_s_stream(&vcap->vdev.entity, 0);
+ vimc_streamer_s_stream(&vcap->stream, &vcap->ved, 0);
/* Stop the media pipeline */
media_pipeline_stop(&vcap->vdev.entity);
@@ -355,8 +352,8 @@ static void vimc_cap_comp_unbind(struct device *comp, struct device *master,
kfree(vcap);
}
-static void vimc_cap_process_frame(struct vimc_ent_device *ved,
- struct media_pad *sink, const void *frame)
+static void *vimc_cap_process_frame(struct vimc_ent_device *ved,
+ const void *frame)
{
struct vimc_cap_device *vcap = container_of(ved, struct vimc_cap_device,
ved);
@@ -370,7 +367,7 @@ static void vimc_cap_process_frame(struct vimc_ent_device *ved,
typeof(*vimc_buf), list);
if (!vimc_buf) {
spin_unlock(&vcap->qlock);
- return;
+ return ERR_PTR(-EAGAIN);
}
/* Remove this entry from the list */
@@ -391,6 +388,7 @@ static void vimc_cap_process_frame(struct vimc_ent_device *ved,
vb2_set_plane_payload(&vimc_buf->vb2.vb2_buf, 0,
vcap->format.sizeimage);
vb2_buffer_done(&vimc_buf->vb2.vb2_buf, VB2_BUF_STATE_DONE);
+ return NULL;
}
static int vimc_cap_comp_bind(struct device *comp, struct device *master,
@@ -431,7 +429,7 @@ static int vimc_cap_comp_bind(struct device *comp, struct device *master,
/* Initialize the vb2 queue */
q = &vcap->queue;
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- q->io_modes = VB2_MMAP | VB2_DMABUF;
+ q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_USERPTR;
q->drv_priv = vcap;
q->buf_struct_size = sizeof(struct vimc_cap_buffer);
q->ops = &vimc_cap_qops;
diff --git a/drivers/media/platform/vimc/vimc-common.c b/drivers/media/platform/vimc/vimc-common.c
index 867e24dbd6b5..c1a74bb2df58 100644
--- a/drivers/media/platform/vimc/vimc-common.c
+++ b/drivers/media/platform/vimc/vimc-common.c
@@ -207,41 +207,6 @@ const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat)
}
EXPORT_SYMBOL_GPL(vimc_pix_map_by_pixelformat);
-int vimc_propagate_frame(struct media_pad *src, const void *frame)
-{
- struct media_link *link;
-
- if (!(src->flags & MEDIA_PAD_FL_SOURCE))
- return -EINVAL;
-
- /* Send this frame to all sink pads that are direct linked */
- list_for_each_entry(link, &src->entity->links, list) {
- if (link->source == src &&
- (link->flags & MEDIA_LNK_FL_ENABLED)) {
- struct vimc_ent_device *ved = NULL;
- struct media_entity *entity = link->sink->entity;
-
- if (is_media_entity_v4l2_subdev(entity)) {
- struct v4l2_subdev *sd =
- container_of(entity, struct v4l2_subdev,
- entity);
- ved = v4l2_get_subdevdata(sd);
- } else if (is_media_entity_v4l2_video_device(entity)) {
- struct video_device *vdev =
- container_of(entity,
- struct video_device,
- entity);
- ved = video_get_drvdata(vdev);
- }
- if (ved && ved->process_frame)
- ved->process_frame(ved, link->sink, frame);
- }
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(vimc_propagate_frame);
-
/* Helper function to allocate and initialize pads */
struct media_pad *vimc_pads_init(u16 num_pads, const unsigned long *pads_flag)
{
diff --git a/drivers/media/platform/vimc/vimc-common.h b/drivers/media/platform/vimc/vimc-common.h
index 2e9981b18166..84539430b5e7 100644
--- a/drivers/media/platform/vimc/vimc-common.h
+++ b/drivers/media/platform/vimc/vimc-common.h
@@ -22,6 +22,8 @@
#include <media/media-device.h>
#include <media/v4l2-device.h>
+#define VIMC_PDEV_NAME "vimc"
+
/* VIMC-specific controls */
#define VIMC_CID_VIMC_BASE (0x00f00000 | 0xf000)
#define VIMC_CID_VIMC_CLASS (0x00f00000 | 1)
@@ -113,24 +115,13 @@ struct vimc_pix_map {
struct vimc_ent_device {
struct media_entity *ent;
struct media_pad *pads;
- void (*process_frame)(struct vimc_ent_device *ved,
- struct media_pad *sink, const void *frame);
+ void * (*process_frame)(struct vimc_ent_device *ved,
+ const void *frame);
void (*vdev_get_format)(struct vimc_ent_device *ved,
struct v4l2_pix_format *fmt);
};
/**
- * vimc_propagate_frame - propagate a frame through the topology
- *
- * @src: the source pad where the frame is being originated
- * @frame: the frame to be propagated
- *
- * This function will call the process_frame callback from the vimc_ent_device
- * struct of the nodes directly connected to the @src pad
- */
-int vimc_propagate_frame(struct media_pad *src, const void *frame);
-
-/**
* vimc_pads_init - initialize pads
*
* @num_pads: number of pads to initialize
diff --git a/drivers/media/platform/vimc/vimc-core.c b/drivers/media/platform/vimc/vimc-core.c
index ce809d2e3d53..0fbb7914098f 100644
--- a/drivers/media/platform/vimc/vimc-core.c
+++ b/drivers/media/platform/vimc/vimc-core.c
@@ -24,7 +24,6 @@
#include "vimc-common.h"
-#define VIMC_PDEV_NAME "vimc"
#define VIMC_MDEV_MODEL_NAME "VIMC MDEV"
#define VIMC_ENT_LINK(src, srcpad, sink, sinkpad, link_flags) { \
@@ -221,6 +220,7 @@ static int vimc_comp_bind(struct device *master)
err_mdev_unregister:
media_device_unregister(&vimc->mdev);
+ media_device_cleanup(&vimc->mdev);
err_comp_unbind_all:
component_unbind_all(master, NULL);
err_v4l2_unregister:
@@ -237,6 +237,7 @@ static void vimc_comp_unbind(struct device *master)
dev_dbg(master, "unbind");
media_device_unregister(&vimc->mdev);
+ media_device_cleanup(&vimc->mdev);
component_unbind_all(master, NULL);
v4l2_device_unregister(&vimc->v4l2_dev);
}
@@ -319,6 +320,8 @@ static int vimc_probe(struct platform_device *pdev)
/* Initialize media device */
strscpy(vimc->mdev.model, VIMC_MDEV_MODEL_NAME,
sizeof(vimc->mdev.model));
+ snprintf(vimc->mdev.bus_info, sizeof(vimc->mdev.bus_info),
+ "platform:%s", VIMC_PDEV_NAME);
vimc->mdev.dev = &pdev->dev;
media_device_init(&vimc->mdev);
diff --git a/drivers/media/platform/vimc/vimc-debayer.c b/drivers/media/platform/vimc/vimc-debayer.c
index 77887f66f323..7d77c63b99d2 100644
--- a/drivers/media/platform/vimc/vimc-debayer.c
+++ b/drivers/media/platform/vimc/vimc-debayer.c
@@ -321,7 +321,6 @@ static void vimc_deb_set_rgb_mbus_fmt_rgb888_1x24(struct vimc_deb_device *vdeb,
static int vimc_deb_s_stream(struct v4l2_subdev *sd, int enable)
{
struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
- int ret;
if (enable) {
const struct vimc_pix_map *vpix;
@@ -351,22 +350,10 @@ static int vimc_deb_s_stream(struct v4l2_subdev *sd, int enable)
if (!vdeb->src_frame)
return -ENOMEM;
- /* Turn the stream on in the subdevices directly connected */
- ret = vimc_pipeline_s_stream(&vdeb->sd.entity, 1);
- if (ret) {
- vfree(vdeb->src_frame);
- vdeb->src_frame = NULL;
- return ret;
- }
} else {
if (!vdeb->src_frame)
return 0;
- /* Disable streaming from the pipe */
- ret = vimc_pipeline_s_stream(&vdeb->sd.entity, 0);
- if (ret)
- return ret;
-
vfree(vdeb->src_frame);
vdeb->src_frame = NULL;
}
@@ -480,9 +467,8 @@ static void vimc_deb_calc_rgb_sink(struct vimc_deb_device *vdeb,
}
}
-static void vimc_deb_process_frame(struct vimc_ent_device *ved,
- struct media_pad *sink,
- const void *sink_frame)
+static void *vimc_deb_process_frame(struct vimc_ent_device *ved,
+ const void *sink_frame)
{
struct vimc_deb_device *vdeb = container_of(ved, struct vimc_deb_device,
ved);
@@ -491,7 +477,7 @@ static void vimc_deb_process_frame(struct vimc_ent_device *ved,
/* If the stream in this node is not active, just return */
if (!vdeb->src_frame)
- return;
+ return ERR_PTR(-EINVAL);
for (i = 0; i < vdeb->sink_fmt.height; i++)
for (j = 0; j < vdeb->sink_fmt.width; j++) {
@@ -499,12 +485,8 @@ static void vimc_deb_process_frame(struct vimc_ent_device *ved,
vdeb->set_rgb_src(vdeb, i, j, rgb);
}
- /* Propagate the frame through all source pads */
- for (i = 1; i < vdeb->sd.entity.num_pads; i++) {
- struct media_pad *pad = &vdeb->sd.entity.pads[i];
+ return vdeb->src_frame;
- vimc_propagate_frame(pad, vdeb->src_frame);
- }
}
static void vimc_deb_comp_unbind(struct device *comp, struct device *master,
diff --git a/drivers/media/platform/vimc/vimc-scaler.c b/drivers/media/platform/vimc/vimc-scaler.c
index b0952ee86296..39b2a73dfcc1 100644
--- a/drivers/media/platform/vimc/vimc-scaler.c
+++ b/drivers/media/platform/vimc/vimc-scaler.c
@@ -217,7 +217,6 @@ static const struct v4l2_subdev_pad_ops vimc_sca_pad_ops = {
static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable)
{
struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
- int ret;
if (enable) {
const struct vimc_pix_map *vpix;
@@ -245,22 +244,10 @@ static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable)
if (!vsca->src_frame)
return -ENOMEM;
- /* Turn the stream on in the subdevices directly connected */
- ret = vimc_pipeline_s_stream(&vsca->sd.entity, 1);
- if (ret) {
- vfree(vsca->src_frame);
- vsca->src_frame = NULL;
- return ret;
- }
} else {
if (!vsca->src_frame)
return 0;
- /* Disable streaming from the pipe */
- ret = vimc_pipeline_s_stream(&vsca->sd.entity, 0);
- if (ret)
- return ret;
-
vfree(vsca->src_frame);
vsca->src_frame = NULL;
}
@@ -346,26 +333,19 @@ static void vimc_sca_fill_src_frame(const struct vimc_sca_device *const vsca,
vimc_sca_scale_pix(vsca, i, j, sink_frame);
}
-static void vimc_sca_process_frame(struct vimc_ent_device *ved,
- struct media_pad *sink,
- const void *sink_frame)
+static void *vimc_sca_process_frame(struct vimc_ent_device *ved,
+ const void *sink_frame)
{
struct vimc_sca_device *vsca = container_of(ved, struct vimc_sca_device,
ved);
- unsigned int i;
/* If the stream in this node is not active, just return */
if (!vsca->src_frame)
- return;
+ return ERR_PTR(-EINVAL);
vimc_sca_fill_src_frame(vsca, sink_frame);
- /* Propagate the frame through all source pads */
- for (i = 1; i < vsca->sd.entity.num_pads; i++) {
- struct media_pad *pad = &vsca->sd.entity.pads[i];
-
- vimc_propagate_frame(pad, vsca->src_frame);
- }
+ return vsca->src_frame;
};
static void vimc_sca_comp_unbind(struct device *comp, struct device *master,
diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/platform/vimc/vimc-sensor.c
index 32ca9c6172b1..59195f262623 100644
--- a/drivers/media/platform/vimc/vimc-sensor.c
+++ b/drivers/media/platform/vimc/vimc-sensor.c
@@ -16,8 +16,6 @@
*/
#include <linux/component.h>
-#include <linux/freezer.h>
-#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
@@ -201,38 +199,20 @@ static const struct v4l2_subdev_pad_ops vimc_sen_pad_ops = {
.set_fmt = vimc_sen_set_fmt,
};
-static int vimc_sen_tpg_thread(void *data)
+static void *vimc_sen_process_frame(struct vimc_ent_device *ved,
+ const void *sink_frame)
{
- struct vimc_sen_device *vsen = data;
- unsigned int i;
-
- set_freezable();
- set_current_state(TASK_UNINTERRUPTIBLE);
-
- for (;;) {
- try_to_freeze();
- if (kthread_should_stop())
- break;
+ struct vimc_sen_device *vsen = container_of(ved, struct vimc_sen_device,
+ ved);
- tpg_fill_plane_buffer(&vsen->tpg, 0, 0, vsen->frame);
-
- /* Send the frame to all source pads */
- for (i = 0; i < vsen->sd.entity.num_pads; i++)
- vimc_propagate_frame(&vsen->sd.entity.pads[i],
- vsen->frame);
-
- /* 60 frames per second */
- schedule_timeout(HZ/60);
- }
-
- return 0;
+ tpg_fill_plane_buffer(&vsen->tpg, 0, 0, vsen->frame);
+ return vsen->frame;
}
static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable)
{
struct vimc_sen_device *vsen =
container_of(sd, struct vimc_sen_device, sd);
- int ret;
if (enable) {
const struct vimc_pix_map *vpix;
@@ -258,26 +238,8 @@ static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable)
/* configure the test pattern generator */
vimc_sen_tpg_s_format(vsen);
- /* Initialize the image generator thread */
- vsen->kthread_sen = kthread_run(vimc_sen_tpg_thread, vsen,
- "%s-sen", vsen->sd.v4l2_dev->name);
- if (IS_ERR(vsen->kthread_sen)) {
- dev_err(vsen->dev, "%s: kernel_thread() failed\n",
- vsen->sd.name);
- vfree(vsen->frame);
- vsen->frame = NULL;
- return PTR_ERR(vsen->kthread_sen);
- }
} else {
- if (!vsen->kthread_sen)
- return 0;
-
- /* Stop image generator */
- ret = kthread_stop(vsen->kthread_sen);
- if (ret)
- return ret;
- vsen->kthread_sen = NULL;
vfree(vsen->frame);
vsen->frame = NULL;
return 0;
@@ -413,6 +375,7 @@ static int vimc_sen_comp_bind(struct device *comp, struct device *master,
if (ret)
goto err_free_hdl;
+ vsen->ved.process_frame = vimc_sen_process_frame;
dev_set_drvdata(comp, &vsen->ved);
vsen->dev = comp;
diff --git a/drivers/media/platform/vimc/vimc-streamer.c b/drivers/media/platform/vimc/vimc-streamer.c
new file mode 100644
index 000000000000..fcc897fb247b
--- /dev/null
+++ b/drivers/media/platform/vimc/vimc-streamer.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vimc-streamer.c Virtual Media Controller Driver
+ *
+ * Copyright (C) 2018 Lucas A. M. Magalhães <lucmaga@gmail.com>
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+
+#include "vimc-streamer.h"
+
+/**
+ * vimc_get_source_entity - get the entity connected with the first sink pad
+ *
+ * @ent: reference media_entity
+ *
+ * Helper function that returns the media entity containing the source pad
+ * linked with the first sink pad from the given media entity pad list.
+ */
+static struct media_entity *vimc_get_source_entity(struct media_entity *ent)
+{
+ struct media_pad *pad;
+ int i;
+
+ for (i = 0; i < ent->num_pads; i++) {
+ if (ent->pads[i].flags & MEDIA_PAD_FL_SOURCE)
+ continue;
+ pad = media_entity_remote_pad(&ent->pads[i]);
+ return pad ? pad->entity : NULL;
+ }
+ return NULL;
+}
+
+/*
+ * vimc_streamer_pipeline_terminate - Disable stream in all ved in stream
+ *
+ * @stream: the pointer to the stream structure with the pipeline to be
+ * disabled.
+ *
+ * Calls s_stream to disable the stream in each entity of the pipeline
+ *
+ */
+static void vimc_streamer_pipeline_terminate(struct vimc_stream *stream)
+{
+ struct media_entity *entity;
+ struct v4l2_subdev *sd;
+
+ while (stream->pipe_size) {
+ stream->pipe_size--;
+ entity = stream->ved_pipeline[stream->pipe_size]->ent;
+ entity = vimc_get_source_entity(entity);
+ stream->ved_pipeline[stream->pipe_size] = NULL;
+
+ if (!is_media_entity_v4l2_subdev(entity))
+ continue;
+
+ sd = media_entity_to_v4l2_subdev(entity);
+ v4l2_subdev_call(sd, video, s_stream, 0);
+ }
+}
+
+/*
+ * vimc_streamer_pipeline_init - initializes the stream structure
+ *
+ * @stream: the pointer to the stream structure to be initialized
+ * @ved: the pointer to the vimc entity initializing the stream
+ *
+ * Initializes the stream structure. Walks through the entity graph to
+ * construct the pipeline used later on the streamer thread.
+ * Calls s_stream to enable stream in all entities of the pipeline.
+ */
+static int vimc_streamer_pipeline_init(struct vimc_stream *stream,
+ struct vimc_ent_device *ved)
+{
+ struct media_entity *entity;
+ struct video_device *vdev;
+ struct v4l2_subdev *sd;
+ int ret = 0;
+
+ stream->pipe_size = 0;
+ while (stream->pipe_size < VIMC_STREAMER_PIPELINE_MAX_SIZE) {
+ if (!ved) {
+ vimc_streamer_pipeline_terminate(stream);
+ return -EINVAL;
+ }
+ stream->ved_pipeline[stream->pipe_size++] = ved;
+
+ entity = vimc_get_source_entity(ved->ent);
+ /* Check if the end of the pipeline was reached*/
+ if (!entity)
+ return 0;
+
+ if (is_media_entity_v4l2_subdev(entity)) {
+ sd = media_entity_to_v4l2_subdev(entity);
+ ret = v4l2_subdev_call(sd, video, s_stream, 1);
+ if (ret && ret != -ENOIOCTLCMD) {
+ vimc_streamer_pipeline_terminate(stream);
+ return ret;
+ }
+ ved = v4l2_get_subdevdata(sd);
+ } else {
+ vdev = container_of(entity,
+ struct video_device,
+ entity);
+ ved = video_get_drvdata(vdev);
+ }
+ }
+
+ vimc_streamer_pipeline_terminate(stream);
+ return -EINVAL;
+}
+
+static int vimc_streamer_thread(void *data)
+{
+ struct vimc_stream *stream = data;
+ int i;
+
+ set_freezable();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+
+ for (;;) {
+ try_to_freeze();
+ if (kthread_should_stop())
+ break;
+
+ for (i = stream->pipe_size - 1; i >= 0; i--) {
+ stream->frame = stream->ved_pipeline[i]->process_frame(
+ stream->ved_pipeline[i],
+ stream->frame);
+ if (!stream->frame)
+ break;
+ if (IS_ERR(stream->frame))
+ break;
+ }
+ //wait for 60hz
+ schedule_timeout(HZ / 60);
+ }
+
+ return 0;
+}
+
+int vimc_streamer_s_stream(struct vimc_stream *stream,
+ struct vimc_ent_device *ved,
+ int enable)
+{
+ int ret;
+
+ if (!stream || !ved)
+ return -EINVAL;
+
+ if (enable) {
+ if (stream->kthread)
+ return 0;
+
+ ret = vimc_streamer_pipeline_init(stream, ved);
+ if (ret)
+ return ret;
+
+ stream->kthread = kthread_run(vimc_streamer_thread, stream,
+ "vimc-streamer thread");
+
+ if (IS_ERR(stream->kthread))
+ return PTR_ERR(stream->kthread);
+
+ } else {
+ if (!stream->kthread)
+ return 0;
+
+ ret = kthread_stop(stream->kthread);
+ if (ret)
+ return ret;
+
+ stream->kthread = NULL;
+
+ vimc_streamer_pipeline_terminate(stream);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vimc_streamer_s_stream);
+
+MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Streamer");
+MODULE_AUTHOR("Lucas A. M. Magalhães <lucmaga@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/vimc-streamer.h b/drivers/media/platform/vimc/vimc-streamer.h
new file mode 100644
index 000000000000..752af2e2d5a2
--- /dev/null
+++ b/drivers/media/platform/vimc/vimc-streamer.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vimc-streamer.h Virtual Media Controller Driver
+ *
+ * Copyright (C) 2018 Lucas A. M. Magalhães <lucmaga@gmail.com>
+ *
+ */
+
+#ifndef _VIMC_STREAMER_H_
+#define _VIMC_STREAMER_H_
+
+#include <media/media-device.h>
+
+#include "vimc-common.h"
+
+#define VIMC_STREAMER_PIPELINE_MAX_SIZE 16
+
+struct vimc_stream {
+ struct media_pipeline pipe;
+ struct vimc_ent_device *ved_pipeline[VIMC_STREAMER_PIPELINE_MAX_SIZE];
+ unsigned int pipe_size;
+ u8 *frame;
+ struct task_struct *kthread;
+};
+
+/**
+ * vimc_streamer_s_streamer - start/stop the stream
+ *
+ * @stream: the pointer to the stream to start or stop
+ * @ved: The last entity of the streamer pipeline
+ * @enable: any non-zero number start the stream, zero stop
+ *
+ */
+int vimc_streamer_s_stream(struct vimc_stream *stream,
+ struct vimc_ent_device *ved,
+ int enable);
+
+#endif //_VIMC_STREAMER_H_
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index c931f007e5b0..342e0e6c103b 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -371,7 +371,7 @@ static int vidioc_s_parm(struct file *file, void *fh,
if (vdev->vfl_dir == VFL_DIR_RX)
return vivid_vid_cap_s_parm(file, fh, parm);
- return vivid_vid_out_g_parm(file, fh, parm);
+ return -ENOTTY;
}
static int vidioc_log_status(struct file *file, void *fh)
@@ -1094,7 +1094,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
q = &dev->vb_vid_cap_q;
q->type = dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
V4L2_BUF_TYPE_VIDEO_CAPTURE;
- q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
+ q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
+ if (!allocator)
+ q->io_modes |= VB2_USERPTR;
q->drv_priv = dev;
q->buf_struct_size = sizeof(struct vivid_buffer);
q->ops = &vivid_vid_cap_qops;
@@ -1115,7 +1117,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
q = &dev->vb_vid_out_q;
q->type = dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE :
V4L2_BUF_TYPE_VIDEO_OUTPUT;
- q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_WRITE;
+ q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_WRITE;
+ if (!allocator)
+ q->io_modes |= VB2_USERPTR;
q->drv_priv = dev;
q->buf_struct_size = sizeof(struct vivid_buffer);
q->ops = &vivid_vid_out_qops;
@@ -1136,7 +1140,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
q = &dev->vb_vbi_cap_q;
q->type = dev->has_raw_vbi_cap ? V4L2_BUF_TYPE_VBI_CAPTURE :
V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
- q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
+ q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
+ if (!allocator)
+ q->io_modes |= VB2_USERPTR;
q->drv_priv = dev;
q->buf_struct_size = sizeof(struct vivid_buffer);
q->ops = &vivid_vbi_cap_qops;
@@ -1157,7 +1163,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
q = &dev->vb_vbi_out_q;
q->type = dev->has_raw_vbi_out ? V4L2_BUF_TYPE_VBI_OUTPUT :
V4L2_BUF_TYPE_SLICED_VBI_OUTPUT;
- q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_WRITE;
+ q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_WRITE;
+ if (!allocator)
+ q->io_modes |= VB2_USERPTR;
q->drv_priv = dev;
q->buf_struct_size = sizeof(struct vivid_buffer);
q->ops = &vivid_vbi_out_qops;
@@ -1177,7 +1185,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
/* initialize sdr_cap queue */
q = &dev->vb_sdr_cap_q;
q->type = V4L2_BUF_TYPE_SDR_CAPTURE;
- q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
+ q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
+ if (!allocator)
+ q->io_modes |= VB2_USERPTR;
q->drv_priv = dev;
q->buf_struct_size = sizeof(struct vivid_buffer);
q->ops = &vivid_sdr_cap_qops;
@@ -1468,9 +1478,6 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
return 0;
unreg_dev:
-#ifdef CONFIG_MEDIA_CONTROLLER
- media_device_unregister(&dev->mdev);
-#endif
video_unregister_device(&dev->radio_tx_dev);
video_unregister_device(&dev->radio_rx_dev);
video_unregister_device(&dev->sdr_cap_dev);
@@ -1543,6 +1550,7 @@ static int vivid_remove(struct platform_device *pdev)
#ifdef CONFIG_MEDIA_CONTROLLER
media_device_unregister(&dev->mdev);
+ media_device_cleanup(&dev->mdev);
#endif
if (dev->has_vid_cap) {
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index c059fc12668a..52eeda624d7e 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -124,7 +124,8 @@ static int vid_cap_queue_setup(struct vb2_queue *vq,
}
} else {
for (p = 0; p < buffers; p++)
- sizes[p] = tpg_g_line_width(&dev->tpg, p) * h +
+ sizes[p] = (tpg_g_line_width(&dev->tpg, p) * h) /
+ dev->fmt_cap->vdownsampling[p] +
dev->fmt_cap->data_offset[p];
}
@@ -161,7 +162,9 @@ static int vid_cap_buf_prepare(struct vb2_buffer *vb)
return -EINVAL;
}
for (p = 0; p < buffers; p++) {
- size = tpg_g_line_width(&dev->tpg, p) * dev->fmt_cap_rect.height +
+ size = (tpg_g_line_width(&dev->tpg, p) *
+ dev->fmt_cap_rect.height) /
+ dev->fmt_cap->vdownsampling[p] +
dev->fmt_cap->data_offset[p];
if (vb2_plane_size(vb, p) < size) {
@@ -545,7 +548,8 @@ int vivid_g_fmt_vid_cap(struct file *file, void *priv,
for (p = 0; p < mp->num_planes; p++) {
mp->plane_fmt[p].bytesperline = tpg_g_bytesperline(&dev->tpg, p);
mp->plane_fmt[p].sizeimage =
- tpg_g_line_width(&dev->tpg, p) * mp->height +
+ (tpg_g_line_width(&dev->tpg, p) * mp->height) /
+ dev->fmt_cap->vdownsampling[p] +
dev->fmt_cap->data_offset[p];
}
return 0;
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
index 661f4015fba1..74b83bcc6119 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.c
+++ b/drivers/media/platform/vivid/vivid-vid-common.c
@@ -169,6 +169,36 @@ struct vivid_fmt vivid_formats[] = {
.alpha_mask = 0x000000ff,
},
{
+ .fourcc = V4L2_PIX_FMT_AYUV32,
+ .vdownsampling = { 1 },
+ .bit_depth = { 32 },
+ .planes = 1,
+ .buffers = 1,
+ .alpha_mask = 0x000000ff,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_XYUV32,
+ .vdownsampling = { 1 },
+ .bit_depth = { 32 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VUYA32,
+ .vdownsampling = { 1 },
+ .bit_depth = { 32 },
+ .planes = 1,
+ .buffers = 1,
+ .alpha_mask = 0xff000000,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VUYX32,
+ .vdownsampling = { 1 },
+ .bit_depth = { 32 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
.fourcc = V4L2_PIX_FMT_GREY,
.vdownsampling = { 1 },
.bit_depth = { 8 },
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
index ea250aee2b2e..e61b91b414f9 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.c
+++ b/drivers/media/platform/vivid/vivid-vid-out.c
@@ -28,11 +28,12 @@ static int vid_out_queue_setup(struct vb2_queue *vq,
const struct vivid_fmt *vfmt = dev->fmt_out;
unsigned planes = vfmt->buffers;
unsigned h = dev->fmt_out_rect.height;
- unsigned size = dev->bytesperline_out[0] * h;
+ unsigned int size = dev->bytesperline_out[0] * h + vfmt->data_offset[0];
unsigned p;
for (p = vfmt->buffers; p < vfmt->planes; p++)
- size += dev->bytesperline_out[p] * h / vfmt->vdownsampling[p];
+ size += dev->bytesperline_out[p] * h / vfmt->vdownsampling[p] +
+ vfmt->data_offset[p];
if (dev->field_out == V4L2_FIELD_ALTERNATE) {
/*
@@ -62,12 +63,14 @@ static int vid_out_queue_setup(struct vb2_queue *vq,
if (sizes[0] < size)
return -EINVAL;
for (p = 1; p < planes; p++) {
- if (sizes[p] < dev->bytesperline_out[p] * h)
+ if (sizes[p] < dev->bytesperline_out[p] * h +
+ vfmt->data_offset[p])
return -EINVAL;
}
} else {
for (p = 0; p < planes; p++)
- sizes[p] = p ? dev->bytesperline_out[p] * h : size;
+ sizes[p] = p ? dev->bytesperline_out[p] * h +
+ vfmt->data_offset[p] : size;
}
if (vq->num_buffers + *nbuffers < 2)
@@ -81,21 +84,38 @@ static int vid_out_queue_setup(struct vb2_queue *vq,
return 0;
}
-static int vid_out_buf_prepare(struct vb2_buffer *vb)
+static int vid_out_buf_out_validate(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
- unsigned long size;
- unsigned planes;
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ if (dev->field_out != V4L2_FIELD_ALTERNATE)
+ vbuf->field = dev->field_out;
+ else if (vbuf->field != V4L2_FIELD_TOP &&
+ vbuf->field != V4L2_FIELD_BOTTOM)
+ return -EINVAL;
+ return 0;
+}
+
+static int vid_out_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ const struct vivid_fmt *vfmt = dev->fmt_out;
+ unsigned int planes = vfmt->buffers;
+ unsigned int h = dev->fmt_out_rect.height;
+ unsigned int size = dev->bytesperline_out[0] * h;
unsigned p;
+ for (p = vfmt->buffers; p < vfmt->planes; p++)
+ size += dev->bytesperline_out[p] * h / vfmt->vdownsampling[p];
+
dprintk(dev, 1, "%s\n", __func__);
if (WARN_ON(NULL == dev->fmt_out))
return -EINVAL;
- planes = dev->fmt_out->planes;
-
if (dev->buf_prepare_error) {
/*
* Error injection: test what happens if buf_prepare() returns
@@ -105,18 +125,13 @@ static int vid_out_buf_prepare(struct vb2_buffer *vb)
return -EINVAL;
}
- if (dev->field_out != V4L2_FIELD_ALTERNATE)
- vbuf->field = dev->field_out;
- else if (vbuf->field != V4L2_FIELD_TOP &&
- vbuf->field != V4L2_FIELD_BOTTOM)
- return -EINVAL;
-
for (p = 0; p < planes; p++) {
- size = dev->bytesperline_out[p] * dev->fmt_out_rect.height +
- vb->planes[p].data_offset;
+ if (p)
+ size = dev->bytesperline_out[p] * h;
+ size += vb->planes[p].data_offset;
if (vb2_get_plane_payload(vb, p) < size) {
- dprintk(dev, 1, "%s the payload is too small for plane %u (%lu < %lu)\n",
+ dprintk(dev, 1, "%s the payload is too small for plane %u (%lu < %u)\n",
__func__, p, vb2_get_plane_payload(vb, p), size);
return -EINVAL;
}
@@ -188,6 +203,7 @@ static void vid_out_buf_request_complete(struct vb2_buffer *vb)
const struct vb2_ops vivid_vid_out_qops = {
.queue_setup = vid_out_queue_setup,
+ .buf_out_validate = vid_out_buf_out_validate,
.buf_prepare = vid_out_buf_prepare,
.buf_queue = vid_out_buf_queue,
.start_streaming = vid_out_start_streaming,
@@ -321,7 +337,8 @@ int vivid_g_fmt_vid_out(struct file *file, void *priv,
for (p = 0; p < mp->num_planes; p++) {
mp->plane_fmt[p].bytesperline = dev->bytesperline_out[p];
mp->plane_fmt[p].sizeimage =
- mp->plane_fmt[p].bytesperline * mp->height;
+ mp->plane_fmt[p].bytesperline * mp->height +
+ fmt->data_offset[p];
}
for (p = fmt->buffers; p < fmt->planes; p++) {
unsigned stride = dev->bytesperline_out[p];
@@ -399,7 +416,7 @@ int vivid_try_fmt_vid_out(struct file *file, void *priv,
pfmt[p].bytesperline = bytesperline;
pfmt[p].sizeimage = (pfmt[p].bytesperline * mp->height) /
- fmt->vdownsampling[p];
+ fmt->vdownsampling[p] + fmt->data_offset[p];
memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved));
}
diff --git a/drivers/media/platform/vsp1/vsp1_brx.c b/drivers/media/platform/vsp1/vsp1_brx.c
index 5e50178b057d..58ad248cd7f7 100644
--- a/drivers/media/platform/vsp1/vsp1_brx.c
+++ b/drivers/media/platform/vsp1/vsp1_brx.c
@@ -296,7 +296,7 @@ static void brx_configure_stream(struct vsp1_entity *entity,
/*
* The hardware is extremely flexible but we have no userspace API to
* expose all the parameters, nor is it clear whether we would have use
- * cases for all the supported modes. Let's just harcode the parameters
+ * cases for all the supported modes. Let's just hardcode the parameters
* to sane default values for now.
*/
@@ -373,7 +373,7 @@ static void brx_configure_stream(struct vsp1_entity *entity,
vsp1_brx_write(brx, dlb, VI6_BRU_CTRL(i), ctrl);
/*
- * Harcode the blending formula to
+ * Hardcode the blending formula to
*
* DSTc = DSTc * (1 - SRCa) + SRCc * SRCa
* DSTa = DSTa * (1 - SRCa) + SRCa
diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c
index 8d86f618ec77..84895385d2e5 100644
--- a/drivers/media/platform/vsp1/vsp1_drm.c
+++ b/drivers/media/platform/vsp1/vsp1_drm.c
@@ -333,19 +333,19 @@ static int vsp1_du_pipeline_setup_brx(struct vsp1_device *vsp1,
* on the BRx sink pad 0 and propagated inside the entity, not on the
* source pad.
*/
- format.pad = pipe->brx->source_pad;
+ format.pad = brx->source_pad;
format.format.width = drm_pipe->width;
format.format.height = drm_pipe->height;
format.format.field = V4L2_FIELD_NONE;
- ret = v4l2_subdev_call(&pipe->brx->subdev, pad, set_fmt, NULL,
+ ret = v4l2_subdev_call(&brx->subdev, pad, set_fmt, NULL,
&format);
if (ret < 0)
return ret;
dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on %s pad %u\n",
__func__, format.format.width, format.format.height,
- format.format.code, BRX_NAME(pipe->brx), pipe->brx->source_pad);
+ format.format.code, BRX_NAME(brx), brx->source_pad);
if (format.format.width != drm_pipe->width ||
format.format.height != drm_pipe->height) {
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index 771dfe1f7c20..7ceaf3222145 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -223,7 +223,7 @@ static void vsp1_video_calculate_partition(struct vsp1_pipeline *pipe,
* If the modulus is less than half of the partition size,
* the penultimate partition is reduced to half, which is added
* to the final partition: |1234|1234|1234|12|341|
- * to prevents this: |1234|1234|1234|1234|1|.
+ * to prevent this: |1234|1234|1234|1234|1|.
*/
if (modulus) {
/*
diff --git a/drivers/media/platform/xilinx/xilinx-vip.c b/drivers/media/platform/xilinx/xilinx-vip.c
index 18f98838111b..08a825c3a3f6 100644
--- a/drivers/media/platform/xilinx/xilinx-vip.c
+++ b/drivers/media/platform/xilinx/xilinx-vip.c
@@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(xvip_set_format_size);
* the register, otherwise the bitmask is cleared from the register
* when the flag @set is false.
*
- * Fox eample, this function can be used to set a control with a boolean value
+ * Fox example, this function can be used to set a control with a boolean value
* requested by users. If the caller knows whether to set or clear in the first
* place, the caller should call xvip_clr() or xvip_set() directly instead of
* using this function.
diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
index 269971145f88..0261f4d28f16 100644
--- a/drivers/media/radio/radio-si476x.c
+++ b/drivers/media/radio/radio-si476x.c
@@ -1550,7 +1550,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
rval = si476x_radio_init_debugfs(radio);
if (rval < 0) {
- dev_err(&pdev->dev, "Could not creat debugfs interface\n");
+ dev_err(&pdev->dev, "Could not create debugfs interface\n");
goto exit;
}
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index 9751ea1d80be..15eea2b2c90f 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -28,6 +28,7 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include "radio-si470x.h"
@@ -350,7 +351,7 @@ static int si470x_i2c_probe(struct i2c_client *client,
unsigned char version_warning = 0;
/* private data allocation and initialization */
- radio = kzalloc(sizeof(struct si470x_device), GFP_KERNEL);
+ radio = devm_kzalloc(&client->dev, sizeof(*radio), GFP_KERNEL);
if (!radio) {
retval = -ENOMEM;
goto err_initial;
@@ -370,7 +371,7 @@ static int si470x_i2c_probe(struct i2c_client *client,
retval = v4l2_device_register(&client->dev, &radio->v4l2_dev);
if (retval < 0) {
dev_err(&client->dev, "couldn't register v4l2_device\n");
- goto err_radio;
+ goto err_initial;
}
v4l2_ctrl_handler_init(&radio->hdl, 2);
@@ -392,18 +393,29 @@ static int si470x_i2c_probe(struct i2c_client *client,
radio->videodev.release = video_device_release_empty;
video_set_drvdata(&radio->videodev, radio);
+ radio->gpio_reset = devm_gpiod_get_optional(&client->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(radio->gpio_reset)) {
+ retval = PTR_ERR(radio->gpio_reset);
+ dev_err(&client->dev, "Failed to request gpio: %d\n", retval);
+ goto err_all;
+ }
+
+ if (radio->gpio_reset)
+ gpiod_set_value(radio->gpio_reset, 1);
+
/* power up : need 110ms */
radio->registers[POWERCFG] = POWERCFG_ENABLE;
if (si470x_set_register(radio, POWERCFG) < 0) {
retval = -EIO;
- goto err_ctrl;
+ goto err_all;
}
msleep(110);
/* get device and chip versions */
if (si470x_get_all_registers(radio) < 0) {
retval = -EIO;
- goto err_ctrl;
+ goto err_all;
}
dev_info(&client->dev, "DeviceID=0x%4.4hx ChipID=0x%4.4hx\n",
radio->registers[DEVICEID], radio->registers[SI_CHIPID]);
@@ -430,10 +442,10 @@ static int si470x_i2c_probe(struct i2c_client *client,
/* rds buffer allocation */
radio->buf_size = rds_buf * 3;
- radio->buffer = kmalloc(radio->buf_size, GFP_KERNEL);
+ radio->buffer = devm_kmalloc(&client->dev, radio->buf_size, GFP_KERNEL);
if (!radio->buffer) {
retval = -EIO;
- goto err_ctrl;
+ goto err_all;
}
/* rds buffer configuration */
@@ -441,12 +453,13 @@ static int si470x_i2c_probe(struct i2c_client *client,
radio->rd_index = 0;
init_waitqueue_head(&radio->read_queue);
- retval = request_threaded_irq(client->irq, NULL, si470x_i2c_interrupt,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT, DRIVER_NAME,
- radio);
+ retval = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+ si470x_i2c_interrupt,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ DRIVER_NAME, radio);
if (retval) {
dev_err(&client->dev, "Failed to register interrupt\n");
- goto err_rds;
+ goto err_all;
}
/* register video device */
@@ -460,15 +473,9 @@ static int si470x_i2c_probe(struct i2c_client *client,
return 0;
err_all:
- free_irq(client->irq, radio);
-err_rds:
- kfree(radio->buffer);
-err_ctrl:
v4l2_ctrl_handler_free(&radio->hdl);
err_dev:
v4l2_device_unregister(&radio->v4l2_dev);
-err_radio:
- kfree(radio);
err_initial:
return retval;
}
@@ -481,9 +488,10 @@ static int si470x_i2c_remove(struct i2c_client *client)
{
struct si470x_device *radio = i2c_get_clientdata(client);
- free_irq(client->irq, radio);
video_unregister_device(&radio->videodev);
- kfree(radio);
+
+ if (radio->gpio_reset)
+ gpiod_set_value(radio->gpio_reset, 0);
return 0;
}
@@ -527,6 +535,13 @@ static int si470x_i2c_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(si470x_i2c_pm, si470x_i2c_suspend, si470x_i2c_resume);
#endif
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id si470x_of_match[] = {
+ { .compatible = "silabs,si470x" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, si470x_of_match);
+#endif
/*
* si470x_i2c_driver - i2c driver interface
@@ -534,6 +549,7 @@ static SIMPLE_DEV_PM_OPS(si470x_i2c_pm, si470x_i2c_suspend, si470x_i2c_resume);
static struct i2c_driver si470x_i2c_driver = {
.driver = {
.name = "si470x",
+ .of_match_table = of_match_ptr(si470x_of_match),
#ifdef CONFIG_PM_SLEEP
.pm = &si470x_i2c_pm,
#endif
diff --git a/drivers/media/radio/si470x/radio-si470x.h b/drivers/media/radio/si470x/radio-si470x.h
index 35fa0f3bbdd2..6fd6a399cb77 100644
--- a/drivers/media/radio/si470x/radio-si470x.h
+++ b/drivers/media/radio/si470x/radio-si470x.h
@@ -189,6 +189,7 @@ struct si470x_device {
#if IS_ENABLED(CONFIG_I2C_SI470X)
struct i2c_client *client;
+ struct gpio_desc *gpio_reset;
#endif
};
diff --git a/drivers/media/radio/wl128x/fmdrv.h b/drivers/media/radio/wl128x/fmdrv.h
index 1ff2eec4ed52..4c0d13539988 100644
--- a/drivers/media/radio/wl128x/fmdrv.h
+++ b/drivers/media/radio/wl128x/fmdrv.h
@@ -133,7 +133,7 @@ struct fm_rds {
/*
* Current RX channel Alternate Frequency cache.
* This info is used to switch to other freq (AF)
- * when current channel signal strengh is below RSSI threshold.
+ * when current channel signal strength is below RSSI threshold.
*/
struct tuned_station_info {
u16 picode;
@@ -228,7 +228,7 @@ struct fmdev {
struct fm_rx rx; /* FM receiver info */
struct fmtx_data tx_data;
- /* V4L2 ctrl framwork handler*/
+ /* V4L2 ctrl framework handler*/
struct v4l2_ctrl_handler ctrl_handler;
/* For core assisted locking */
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index 800d69c3f80b..3c8987af3772 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -908,7 +908,7 @@ static void fm_irq_afjump_setfreq(struct fmdev *fmdev)
u16 frq_index;
u16 payload;
- fmdbg("Swtich to %d KHz\n", fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx]);
+ fmdbg("Switch to %d KHz\n", fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx]);
frq_index = (fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx] -
fmdev->rx.region.bot_freq) / FM_FREQ_MUL;
@@ -1047,7 +1047,7 @@ static void fm_irq_handle_intmsk_cmd_resp(struct fmdev *fmdev)
clear_bit(FM_INTTASK_RUNNING, &fmdev->flag);
}
-/* Returns availability of RDS data in internel buffer */
+/* Returns availability of RDS data in internal buffer */
int fmc_is_rds_data_available(struct fmdev *fmdev, struct file *file,
struct poll_table_struct *pts)
{
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 8a216068a35a..96ce3e5524e0 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -133,6 +133,19 @@ config IR_IMON_DECODER
remote control and you would like to use it with a raw IR
receiver, or if you wish to use an encoder to transmit this IR.
+config IR_RCMM_DECODER
+ tristate "Enable IR raw decoder for the RC-MM protocol"
+ depends on RC_CORE
+ help
+ Enable this option when you have IR with RC-MM protocol, and
+ you need the software decoder. The driver supports 12,
+ 24 and 32 bits RC-MM variants. You can enable or disable the
+ different modes using the following RC protocol keywords:
+ 'rc-mm-12', 'rc-mm-24' and 'rc-mm-32'.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ir-rcmm-decoder.
+
endif #RC_DECODERS
menuconfig RC_DEVICES
@@ -240,7 +253,7 @@ config IR_FINTEK
depends on RC_CORE
---help---
Say Y here to enable support for integrated infrared receiver
- /transciever made by Fintek. This chip is found on assorted
+ /transceiver made by Fintek. This chip is found on assorted
Jetway motherboards (and of course, possibly others).
To compile this driver as a module, choose M here: the
@@ -274,7 +287,7 @@ config IR_NUVOTON
depends on RC_CORE
---help---
Say Y here to enable support for integrated infrared receiver
- /transciever made by Nuvoton (formerly Winbond). This chip is
+ /transceiver made by Nuvoton (formerly Winbond). This chip is
found in the ASRock ION 330HT, as well as assorted Intel
DP55-series motherboards (and of course, possibly others).
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index 92c163816849..48d23433b3c0 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_IR_SHARP_DECODER) += ir-sharp-decoder.o
obj-$(CONFIG_IR_MCE_KBD_DECODER) += ir-mce_kbd-decoder.o
obj-$(CONFIG_IR_XMP_DECODER) += ir-xmp-decoder.o
obj-$(CONFIG_IR_IMON_DECODER) += ir-imon-decoder.o
+obj-$(CONFIG_IR_RCMM_DECODER) += ir-rcmm-decoder.o
# stand-alone IR receivers/transmitters
obj-$(CONFIG_RC_ATI_REMOTE) += ati_remote.o
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index 265e91a2a70d..bc2da64858c3 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -304,7 +304,7 @@ static const struct {
{KIND_LITERAL, 0x7c, BTN_RIGHT},/* right btn down */
{KIND_LITERAL, 0x7d, BTN_RIGHT},/* right btn up */
- /* Artificial "doubleclick" events are generated by the hardware.
+ /* Artificial "double-click" events are generated by the hardware.
* They are mapped to the "side" and "extra" mouse buttons here. */
{KIND_FILTERED, 0x7a, BTN_SIDE}, /* left dblclick */
{KIND_FILTERED, 0x7e, BTN_EXTRA},/* right dblclick */
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index dd2fd307ef85..293ccee2c05e 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -184,7 +184,7 @@ static int ene_hw_detect(struct ene_device *dev)
return 0;
}
-/* Read properities of hw sample buffer */
+/* Read properties of hw sample buffer */
static void ene_rx_setup_hw_buffer(struct ene_device *dev)
{
u16 tmp;
diff --git a/drivers/media/rc/ene_ir.h b/drivers/media/rc/ene_ir.h
index 494646b2a284..0a45352efe40 100644
--- a/drivers/media/rc/ene_ir.h
+++ b/drivers/media/rc/ene_ir.h
@@ -118,7 +118,7 @@
#define ENE_CIRDAT_IN 0xFEC7
-/* RLC configuration - sample period (1us resulution) + idle mode */
+/* RLC configuration - sample period (1us resolution) + idle mode */
#define ENE_CIRRLC_CFG 0xFEC8
#define ENE_CIRRLC_CFG_OVERFLOW 0x80 /* interrupt on overflows if set */
#define ENE_DEFAULT_SAMPLE_PERIOD 50
diff --git a/drivers/media/rc/fintek-cir.h b/drivers/media/rc/fintek-cir.h
index ac34a774d018..dffe0bbfc6eb 100644
--- a/drivers/media/rc/fintek-cir.h
+++ b/drivers/media/rc/fintek-cir.h
@@ -176,7 +176,7 @@ struct fintek_dev {
#define CIR_CR_IRCS 0x05 /* Before host writes command to IR, host
must set to 1. When host finshes write
command to IR, host must clear to 0. */
-#define CIR_CR_COMMAND_DATA 0x06 /* Host read or write comand data */
+#define CIR_CR_COMMAND_DATA 0x06 /* Host read or write command data */
#define CIR_CR_CLASS 0x07 /* 0xff = rx-only, 0x66 = rx + 2 tx,
0x33 = rx + 1 tx */
#define CIR_CR_DEV_EN 0x30 /* bit0 = 1 enables CIR */
diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c
index d96aed1343e4..5cc302fa4daa 100644
--- a/drivers/media/rc/ir-rc6-decoder.c
+++ b/drivers/media/rc/ir-rc6-decoder.c
@@ -40,6 +40,7 @@
#define RC6_6A_MCE_TOGGLE_MASK 0x8000 /* for the body bits */
#define RC6_6A_LCC_MASK 0xffff0000 /* RC6-6A-32 long customer code mask */
#define RC6_6A_MCE_CC 0x800f0000 /* MCE customer code */
+#define RC6_6A_ZOTAC_CC 0x80340000 /* Zotac customer code */
#define RC6_6A_KATHREIN_CC 0x80460000 /* Kathrein RCU-676 customer code */
#ifndef CHAR_BIT
#define CHAR_BIT 8 /* Normally in <limits.h> */
@@ -246,6 +247,7 @@ again:
switch (scancode & RC6_6A_LCC_MASK) {
case RC6_6A_MCE_CC:
case RC6_6A_KATHREIN_CC:
+ case RC6_6A_ZOTAC_CC:
protocol = RC_PROTO_RC6_MCE;
toggle = !!(scancode & RC6_6A_MCE_TOGGLE_MASK);
scancode &= ~RC6_6A_MCE_TOGGLE_MASK;
diff --git a/drivers/media/rc/ir-rcmm-decoder.c b/drivers/media/rc/ir-rcmm-decoder.c
new file mode 100644
index 000000000000..f1096ac1e5c5
--- /dev/null
+++ b/drivers/media/rc/ir-rcmm-decoder.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: GPL-2.0+
+// ir-rcmm-decoder.c - A decoder for the RCMM IR protocol
+//
+// Copyright (C) 2018 by Patrick Lerda <patrick9876@free.fr>
+
+#include "rc-core-priv.h"
+#include <linux/module.h>
+#include <linux/version.h>
+
+#define RCMM_UNIT 166667 /* nanosecs */
+#define RCMM_PREFIX_PULSE 416666 /* 166666.666666666*2.5 */
+#define RCMM_PULSE_0 277777 /* 166666.666666666*(1+2/3) */
+#define RCMM_PULSE_1 444444 /* 166666.666666666*(2+2/3) */
+#define RCMM_PULSE_2 611111 /* 166666.666666666*(3+2/3) */
+#define RCMM_PULSE_3 777778 /* 166666.666666666*(4+2/3) */
+
+enum rcmm_state {
+ STATE_INACTIVE,
+ STATE_LOW,
+ STATE_BUMP,
+ STATE_VALUE,
+ STATE_FINISHED,
+};
+
+static bool rcmm_mode(const struct rcmm_dec *data)
+{
+ return !((0x000c0000 & data->bits) == 0x000c0000);
+}
+
+static int rcmm_miscmode(struct rc_dev *dev, struct rcmm_dec *data)
+{
+ switch (data->count) {
+ case 24:
+ if (dev->enabled_protocols & RC_PROTO_BIT_RCMM24) {
+ rc_keydown(dev, RC_PROTO_RCMM24, data->bits, 0);
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+ return -1;
+
+ case 12:
+ if (dev->enabled_protocols & RC_PROTO_BIT_RCMM12) {
+ rc_keydown(dev, RC_PROTO_RCMM12, data->bits, 0);
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+ return -1;
+ }
+
+ return -1;
+}
+
+/**
+ * ir_rcmm_decode() - Decode one RCMM pulse or space
+ * @dev: the struct rc_dev descriptor of the device
+ * @ev: the struct ir_raw_event descriptor of the pulse/space
+ *
+ * This function returns -EINVAL if the pulse violates the state machine
+ */
+static int ir_rcmm_decode(struct rc_dev *dev, struct ir_raw_event ev)
+{
+ struct rcmm_dec *data = &dev->raw->rcmm;
+ u32 scancode;
+ u8 toggle;
+ int value;
+
+ if (!(dev->enabled_protocols & (RC_PROTO_BIT_RCMM32 |
+ RC_PROTO_BIT_RCMM24 |
+ RC_PROTO_BIT_RCMM12)))
+ return 0;
+
+ if (!is_timing_event(ev)) {
+ if (ev.reset)
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+ switch (data->state) {
+ case STATE_INACTIVE:
+ if (!ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, RCMM_PREFIX_PULSE, RCMM_UNIT / 2))
+ break;
+
+ data->state = STATE_LOW;
+ data->count = 0;
+ data->bits = 0;
+ return 0;
+
+ case STATE_LOW:
+ if (ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, RCMM_PULSE_0, RCMM_UNIT / 2))
+ break;
+
+ data->state = STATE_BUMP;
+ return 0;
+
+ case STATE_BUMP:
+ if (!ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, RCMM_UNIT, RCMM_UNIT / 2))
+ break;
+
+ data->state = STATE_VALUE;
+ return 0;
+
+ case STATE_VALUE:
+ if (ev.pulse)
+ break;
+
+ if (eq_margin(ev.duration, RCMM_PULSE_0, RCMM_UNIT / 2))
+ value = 0;
+ else if (eq_margin(ev.duration, RCMM_PULSE_1, RCMM_UNIT / 2))
+ value = 1;
+ else if (eq_margin(ev.duration, RCMM_PULSE_2, RCMM_UNIT / 2))
+ value = 2;
+ else if (eq_margin(ev.duration, RCMM_PULSE_3, RCMM_UNIT / 2))
+ value = 3;
+ else
+ value = -1;
+
+ if (value == -1) {
+ if (!rcmm_miscmode(dev, data))
+ return 0;
+ break;
+ }
+
+ data->bits <<= 2;
+ data->bits |= value;
+
+ data->count += 2;
+
+ if (data->count < 32)
+ data->state = STATE_BUMP;
+ else
+ data->state = STATE_FINISHED;
+
+ return 0;
+
+ case STATE_FINISHED:
+ if (!ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, RCMM_UNIT, RCMM_UNIT / 2))
+ break;
+
+ if (rcmm_mode(data)) {
+ toggle = !!(0x8000 & data->bits);
+ scancode = data->bits & ~0x8000;
+ } else {
+ toggle = 0;
+ scancode = data->bits;
+ }
+
+ if (dev->enabled_protocols & RC_PROTO_BIT_RCMM32) {
+ rc_keydown(dev, RC_PROTO_RCMM32, scancode, toggle);
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+ break;
+ }
+
+ data->state = STATE_INACTIVE;
+ return -EINVAL;
+}
+
+static const int rcmmspace[] = {
+ RCMM_PULSE_0,
+ RCMM_PULSE_1,
+ RCMM_PULSE_2,
+ RCMM_PULSE_3,
+};
+
+static int ir_rcmm_rawencoder(struct ir_raw_event **ev, unsigned int max,
+ unsigned int n, u32 data)
+{
+ int i;
+ int ret;
+
+ ret = ir_raw_gen_pulse_space(ev, &max, RCMM_PREFIX_PULSE, RCMM_PULSE_0);
+ if (ret)
+ return ret;
+
+ for (i = n - 2; i >= 0; i -= 2) {
+ const unsigned int space = rcmmspace[(data >> i) & 3];
+
+ ret = ir_raw_gen_pulse_space(ev, &max, RCMM_UNIT, space);
+ if (ret)
+ return ret;
+ }
+
+ return ir_raw_gen_pulse_space(ev, &max, RCMM_UNIT, RCMM_PULSE_3 * 2);
+}
+
+static int ir_rcmm_encode(enum rc_proto protocol, u32 scancode,
+ struct ir_raw_event *events, unsigned int max)
+{
+ struct ir_raw_event *e = events;
+ int ret;
+
+ switch (protocol) {
+ case RC_PROTO_RCMM32:
+ ret = ir_rcmm_rawencoder(&e, max, 32, scancode);
+ break;
+ case RC_PROTO_RCMM24:
+ ret = ir_rcmm_rawencoder(&e, max, 24, scancode);
+ break;
+ case RC_PROTO_RCMM12:
+ ret = ir_rcmm_rawencoder(&e, max, 12, scancode);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ return e - events;
+}
+
+static struct ir_raw_handler rcmm_handler = {
+ .protocols = RC_PROTO_BIT_RCMM32 |
+ RC_PROTO_BIT_RCMM24 |
+ RC_PROTO_BIT_RCMM12,
+ .decode = ir_rcmm_decode,
+ .encode = ir_rcmm_encode,
+ .carrier = 36000,
+ .min_timeout = RCMM_PULSE_3 + RCMM_UNIT,
+};
+
+static int __init ir_rcmm_decode_init(void)
+{
+ ir_raw_handler_register(&rcmm_handler);
+
+ pr_info("IR RCMM protocol handler initialized\n");
+ return 0;
+}
+
+static void __exit ir_rcmm_decode_exit(void)
+{
+ ir_raw_handler_unregister(&rcmm_handler);
+}
+
+module_init(ir_rcmm_decode_init);
+module_exit(ir_rcmm_decode_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick Lerda");
+MODULE_DESCRIPTION("RCMM IR protocol decoder");
diff --git a/drivers/media/rc/ir-xmp-decoder.c b/drivers/media/rc/ir-xmp-decoder.c
index c965f51df1c1..2639b0b6d2f8 100644
--- a/drivers/media/rc/ir-xmp-decoder.c
+++ b/drivers/media/rc/ir-xmp-decoder.c
@@ -94,7 +94,7 @@ static int ir_xmp_decode(struct rc_dev *dev, struct ir_raw_event ev)
n = data->durations;
/*
* the 4th nibble should be 15 so base the divider on this
- * to transform durations into nibbles. Substract 2000 from
+ * to transform durations into nibbles. Subtract 2000 from
* the divider to compensate for fluctuations in the signal
*/
divider = (n[3] - XMP_NIBBLE_PREFIX) / 15 - 2000;
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index cd3c60ba8519..1d48a9e59f93 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -515,7 +515,7 @@ static int ite_tx_ir(struct rc_dev *rcdev, unsigned *txbuf, unsigned n)
/* and set the carrier values for reception */
ite_set_carrier_params(dev);
- /* reenable the receiver */
+ /* re-enable the receiver */
if (dev->in_use)
dev->params.enable_rx(dev);
diff --git a/drivers/media/rc/keymaps/rc-behold-columbus.c b/drivers/media/rc/keymaps/rc-behold-columbus.c
index e73057945bd1..b68380a76010 100644
--- a/drivers/media/rc/keymaps/rc-behold-columbus.c
+++ b/drivers/media/rc/keymaps/rc-behold-columbus.c
@@ -14,7 +14,7 @@
* The "ascii-art picture" below (in comments, first row
* is the keycode in hex, and subsequent row(s) shows
* the button labels (several variants when appropriate)
- * helps to descide which keycodes to assign to the buttons.
+ * helps to decide which keycodes to assign to the buttons.
*/
static struct rc_map_table behold_columbus[] = {
@@ -68,7 +68,7 @@ static struct rc_map_table behold_columbus[] = {
{ 0x18, KEY_VOLUMEDOWN },
/* 0x0E 0x1E 0x0F 0x1A *
- * Stop Pause Previouse Next *
+ * Stop Pause Previous Next *
* */
{ 0x0E, KEY_STOP },
diff --git a/drivers/media/rc/keymaps/rc-behold.c b/drivers/media/rc/keymaps/rc-behold.c
index e1b2c8e26883..2b7cddb2f36d 100644
--- a/drivers/media/rc/keymaps/rc-behold.c
+++ b/drivers/media/rc/keymaps/rc-behold.c
@@ -17,7 +17,7 @@
* The "ascii-art picture" below (in comments, first row
* is the keycode in hex, and subsequent row(s) shows
* the button labels (several variants when appropriate)
- * helps to descide which keycodes to assign to the buttons.
+ * helps to decide which keycodes to assign to the buttons.
*/
static struct rc_map_table behold[] = {
diff --git a/drivers/media/rc/keymaps/rc-manli.c b/drivers/media/rc/keymaps/rc-manli.c
index 29c9feaf413b..5e9a49e2dd6a 100644
--- a/drivers/media/rc/keymaps/rc-manli.c
+++ b/drivers/media/rc/keymaps/rc-manli.c
@@ -14,7 +14,7 @@
The "ascii-art picture" below (in comments, first row
is the keycode in hex, and subsequent row(s) shows
the button labels (several variants when appropriate)
- helps to descide which keycodes to assign to the buttons.
+ helps to decide which keycodes to assign to the buttons.
*/
static struct rc_map_table manli[] = {
diff --git a/drivers/media/rc/keymaps/rc-powercolor-real-angel.c b/drivers/media/rc/keymaps/rc-powercolor-real-angel.c
index 4988e71c524c..cf98cf8dc13c 100644
--- a/drivers/media/rc/keymaps/rc-powercolor-real-angel.c
+++ b/drivers/media/rc/keymaps/rc-powercolor-real-angel.c
@@ -26,7 +26,7 @@ static struct rc_map_table powercolor_real_angel[] = {
{ 0x07, KEY_7 },
{ 0x08, KEY_8 },
{ 0x09, KEY_9 },
- { 0x0a, KEY_DIGITS }, /* single, double, tripple digit */
+ { 0x0a, KEY_DIGITS }, /* single, double, triple digit */
{ 0x29, KEY_PREVIOUS }, /* previous channel */
{ 0x12, KEY_BRIGHTNESSUP },
{ 0x13, KEY_BRIGHTNESSDOWN },
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 8d7d3ef88862..fa4840940486 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -79,7 +79,7 @@
#define MCE_CMD 0x1f
#define MCE_PORT_IR 0x4 /* (0x4 << 5) | MCE_CMD = 0x9f */
#define MCE_PORT_SYS 0x7 /* (0x7 << 5) | MCE_CMD = 0xff */
-#define MCE_PORT_SER 0x6 /* 0xc0 thru 0xdf flush & 0x1f bytes */
+#define MCE_PORT_SER 0x6 /* 0xc0 through 0xdf flush & 0x1f bytes */
#define MCE_PORT_MASK 0xe0 /* Mask out command bits */
/* Command port headers */
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index c2cbe7f6266c..9f21b3e8b377 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -131,6 +131,11 @@ struct ir_raw_event_ctrl {
unsigned int bits;
bool stick_keyboard;
} imon;
+ struct rcmm_dec {
+ int state;
+ unsigned int count;
+ u32 bits;
+ } rcmm;
};
/* Mutex for locking raw IR processing and handler change */
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index e10b4644a442..39dd46bbd0c1 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -186,7 +186,7 @@ int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev)
dev->raw->this_ev = *ev;
}
- /* Enter idle mode if nessesary */
+ /* Enter idle mode if necessary */
if (!ev->pulse && dev->timeout &&
dev->raw->this_ev.duration >= dev->timeout)
ir_raw_event_set_idle(dev, true);
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 66a174979b3c..e8fa28e20192 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -70,6 +70,12 @@ static const struct {
[RC_PROTO_CEC] = { .name = "cec", .repeat_period = 0 },
[RC_PROTO_IMON] = { .name = "imon",
.scancode_bits = 0x7fffffff, .repeat_period = 114 },
+ [RC_PROTO_RCMM12] = { .name = "rc-mm-12",
+ .scancode_bits = 0x00000fff, .repeat_period = 114 },
+ [RC_PROTO_RCMM24] = { .name = "rc-mm-24",
+ .scancode_bits = 0x00ffffff, .repeat_period = 114 },
+ [RC_PROTO_RCMM32] = { .name = "rc-mm-32",
+ .scancode_bits = 0xffffffff, .repeat_period = 114 },
};
/* Used to keep track of known keymaps */
@@ -274,6 +280,7 @@ static unsigned int ir_update_mapping(struct rc_dev *dev,
unsigned int new_keycode)
{
int old_keycode = rc_map->scan[index].keycode;
+ int i;
/* Did the user wish to remove the mapping? */
if (new_keycode == KEY_RESERVED || new_keycode == KEY_UNKNOWN) {
@@ -288,9 +295,20 @@ static unsigned int ir_update_mapping(struct rc_dev *dev,
old_keycode == KEY_RESERVED ? "New" : "Replacing",
rc_map->scan[index].scancode, new_keycode);
rc_map->scan[index].keycode = new_keycode;
+ __set_bit(new_keycode, dev->input_dev->keybit);
}
if (old_keycode != KEY_RESERVED) {
+ /* A previous mapping was updated... */
+ __clear_bit(old_keycode, dev->input_dev->keybit);
+ /* ... but another scancode might use the same keycode */
+ for (i = 0; i < rc_map->len; i++) {
+ if (rc_map->scan[i].keycode == old_keycode) {
+ __set_bit(old_keycode, dev->input_dev->keybit);
+ break;
+ }
+ }
+
/* Possibly shrink the keytable, failure is not a problem */
ir_resize_table(dev, rc_map, GFP_ATOMIC);
}
@@ -1006,6 +1024,9 @@ static const struct {
{ RC_PROTO_BIT_XMP, "xmp", "ir-xmp-decoder" },
{ RC_PROTO_BIT_CEC, "cec", NULL },
{ RC_PROTO_BIT_IMON, "imon", "ir-imon-decoder" },
+ { RC_PROTO_BIT_RCMM12 |
+ RC_PROTO_BIT_RCMM24 |
+ RC_PROTO_BIT_RCMM32, "rc-mm", "ir-rcmm-decoder" },
};
/**
@@ -1035,7 +1056,7 @@ struct rc_filter_attribute {
* @buf: a pointer to the output buffer
*
* This routine is a callback routine for input read the IR protocol type(s).
- * it is trigged by reading /sys/class/rc/rc?/protocols.
+ * it is triggered by reading /sys/class/rc/rc?/protocols.
* It returns the protocol names of supported protocols.
* Enabled protocols are printed in brackets.
*
@@ -1206,7 +1227,7 @@ void ir_raw_load_modules(u64 *protocols)
* @len: length of the input buffer
*
* This routine is for changing the IR protocol type.
- * It is trigged by writing to /sys/class/rc/rc?/[wakeup_]protocols.
+ * It is triggered by writing to /sys/class/rc/rc?/[wakeup_]protocols.
* See parse_protocol_change() for the valid commands.
* Returns @len on success or a negative error code.
*
@@ -1290,7 +1311,7 @@ out:
* @buf: a pointer to the output buffer
*
* This routine is a callback routine to read a scancode filter value or mask.
- * It is trigged by reading /sys/class/rc/rc?/[wakeup_]filter[_mask].
+ * It is triggered by reading /sys/class/rc/rc?/[wakeup_]filter[_mask].
* It prints the current scancode filter value or mask of the appropriate filter
* type in hexadecimal into @buf and returns the size of the buffer.
*
@@ -1333,7 +1354,7 @@ static ssize_t show_filter(struct device *device,
* @len: length of the input buffer
*
* This routine is for changing a scancode filter value or mask.
- * It is trigged by writing to /sys/class/rc/rc?/[wakeup_]filter[_mask].
+ * It is triggered by writing to /sys/class/rc/rc?/[wakeup_]filter[_mask].
* Returns -EINVAL if an invalid filter value for the current protocol was
* specified or if scancode filtering is not supported by the driver, otherwise
* returns @len.
@@ -1417,7 +1438,7 @@ unlock:
* @buf: a pointer to the output buffer
*
* This routine is a callback routine for input read the IR protocol type(s).
- * it is trigged by reading /sys/class/rc/rc?/wakeup_protocols.
+ * it is triggered by reading /sys/class/rc/rc?/wakeup_protocols.
* It returns the protocol names of supported protocols.
* The enabled protocols are printed in brackets.
*
@@ -1468,7 +1489,7 @@ static ssize_t show_wakeup_protocols(struct device *device,
* @len: length of the input buffer
*
* This routine is for changing the IR protocol type.
- * It is trigged by writing to /sys/class/rc/rc?/wakeup_protocols.
+ * It is triggered by writing to /sys/class/rc/rc?/wakeup_protocols.
* Returns @len on success or a negative error code.
*
* dev->lock is taken to guard against races between
@@ -1750,7 +1771,6 @@ static int rc_prepare_rx_device(struct rc_dev *dev)
set_bit(EV_REP, dev->input_dev->evbit);
set_bit(EV_MSC, dev->input_dev->evbit);
set_bit(MSC_SCAN, dev->input_dev->mscbit);
- bitmap_fill(dev->input_dev->keybit, KEY_CNT);
/* Pointer/mouse events */
set_bit(EV_REL, dev->input_dev->evbit);
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index 08c51ffd74a0..b82a5c9db12c 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -140,7 +140,7 @@ MODULE_PARM_DESC(length_fuzz, "Length Fuzz (0-127)");
* When receiving a continuous ir stream (for example when a user is
* holding a button down on a remote), this specifies the minimum size
* of a space when the redrat3 sends a irdata packet to the host. Specified
- * in miliseconds. Default value 18ms.
+ * in milliseconds. Default value 18ms.
* The value can be between 2 and 30 inclusive.
*/
static int minimum_pause = 18;
diff --git a/drivers/media/spi/cxd2880-spi.c b/drivers/media/spi/cxd2880-spi.c
index d5c433e20d4a..4077217777f9 100644
--- a/drivers/media/spi/cxd2880-spi.c
+++ b/drivers/media/spi/cxd2880-spi.c
@@ -522,13 +522,15 @@ cxd2880_spi_probe(struct spi_device *spi)
dvb_spi->vcc_supply = devm_regulator_get_optional(&spi->dev, "vcc");
if (IS_ERR(dvb_spi->vcc_supply)) {
- if (PTR_ERR(dvb_spi->vcc_supply) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ if (PTR_ERR(dvb_spi->vcc_supply) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto fail_adapter;
+ }
dvb_spi->vcc_supply = NULL;
} else {
ret = regulator_enable(dvb_spi->vcc_supply);
if (ret)
- return ret;
+ goto fail_adapter;
}
dvb_spi->spi = spi;
diff --git a/drivers/media/tuners/mxl5005s.c b/drivers/media/tuners/mxl5005s.c
index ec584316c812..1c07e2225fb3 100644
--- a/drivers/media/tuners/mxl5005s.c
+++ b/drivers/media/tuners/mxl5005s.c
@@ -3584,7 +3584,7 @@ static u32 MXL_Ceiling(u32 value, u32 resolution)
return value / resolution + (value % resolution > 0 ? 1 : 0);
}
-/* Retrieve the Initialzation Registers */
+/* Retrieve the Initialization Registers */
static u16 MXL_GetInitRegister(struct dvb_frontend *fe, u8 *RegNum,
u8 *RegVal, int *count)
{
diff --git a/drivers/media/tuners/qm1d1b0004.h b/drivers/media/tuners/qm1d1b0004.h
index 7734ed109a22..7950ecd56430 100644
--- a/drivers/media/tuners/qm1d1b0004.h
+++ b/drivers/media/tuners/qm1d1b0004.h
@@ -14,7 +14,7 @@ struct qm1d1b0004_config {
struct dvb_frontend *fe;
u32 lpf_freq; /* LPF frequency[kHz]. Default: symbol rate */
- bool half_step; /* use PLL frequency step of 500Hz istead of 1000Hz */
+ bool half_step; /* use PLL frequency step of 500Hz instead of 1000Hz */
};
/* special values indicating to use the default in qm1d1b0004_config */
diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c
index ba4be08a8551..aed2f130ec74 100644
--- a/drivers/media/tuners/r820t.c
+++ b/drivers/media/tuners/r820t.c
@@ -1664,7 +1664,7 @@ static int r820t_iq_tree(struct r820t_priv *priv,
/*
* record IMC results by input gain/phase location then adjust
- * gain or phase positive 1 step and negtive 1 step,
+ * gain or phase positive 1 step and negative 1 step,
* both record results
*/
@@ -2066,7 +2066,7 @@ static int r820t_imr_callibrate(struct r820t_priv *priv)
}
/*
- * Disables IMR callibration. That emulates the same behaviour
+ * Disables IMR calibration. That emulates the same behaviour
* as what is done by rtl-sdr userspace library. Useful for testing
*/
if (no_imr_cal) {
diff --git a/drivers/media/tuners/tda18271-common.c b/drivers/media/tuners/tda18271-common.c
index 054b3b747dae..d46a2e775e82 100644
--- a/drivers/media/tuners/tda18271-common.c
+++ b/drivers/media/tuners/tda18271-common.c
@@ -528,14 +528,14 @@ int tda18271_init_regs(struct dvb_frontend *fe)
* Standby modes, EP3 [7:5]
*
* | SM || SM_LT || SM_XT || mode description
- * |=====\\=======\\=======\\===================================
+ * |=====\\=======\\=======\\====================================
* | 0 || 0 || 0 || normal mode
- * |-----||-------||-------||-----------------------------------
+ * |-----||-------||-------||------------------------------------
* | || || || standby mode w/ slave tuner output
- * | 1 || 0 || 0 || & loop thru & xtal oscillator on
- * |-----||-------||-------||-----------------------------------
+ * | 1 || 0 || 0 || & loop through & xtal oscillator on
+ * |-----||-------||-------||------------------------------------
* | 1 || 1 || 0 || standby mode w/ xtal oscillator on
- * |-----||-------||-------||-----------------------------------
+ * |-----||-------||-------||------------------------------------
* | 1 || 1 || 1 || power off
*
*/
diff --git a/drivers/media/tuners/tda18271-fe.c b/drivers/media/tuners/tda18271-fe.c
index 4d69029229e4..cac6b8e62b73 100644
--- a/drivers/media/tuners/tda18271-fe.c
+++ b/drivers/media/tuners/tda18271-fe.c
@@ -48,7 +48,7 @@ static int tda18271_toggle_output(struct dvb_frontend *fe, int standby)
if (tda_fail(ret))
goto fail;
- tda_dbg("%s mode: xtal oscillator %s, slave tuner loop thru %s\n",
+ tda_dbg("%s mode: xtal oscillator %s, slave tuner loop through %s\n",
standby ? "standby" : "active",
priv->output_opt & TDA18271_OUTPUT_XT_OFF ? "off" : "on",
priv->output_opt & TDA18271_OUTPUT_LT_OFF ? "off" : "on");
diff --git a/drivers/media/tuners/tda18271.h b/drivers/media/tuners/tda18271.h
index 7e07966c5ace..1a23532586ef 100644
--- a/drivers/media/tuners/tda18271.h
+++ b/drivers/media/tuners/tda18271.h
@@ -69,10 +69,10 @@ enum tda18271_i2c_gate {
};
enum tda18271_output_options {
- /* slave tuner output & loop thru & xtal oscillator always on */
+ /* slave tuner output & loop through & xtal oscillator always on */
TDA18271_OUTPUT_LT_XT_ON = 0,
- /* slave tuner output loop thru off */
+ /* slave tuner output loop through off */
TDA18271_OUTPUT_LT_OFF = 1,
/* xtal oscillator off */
diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
index eb6d65dae748..a351390ee744 100644
--- a/drivers/media/tuners/xc4000.c
+++ b/drivers/media/tuners/xc4000.c
@@ -1471,8 +1471,8 @@ static int xc4000_get_signal(struct dvb_frontend *fe, u16 *strength)
if (rc < 0)
goto ret;
- /* Informations from real testing of DVB-T and radio part,
- coeficient for one dB is 0xff.
+ /* Information from real testing of DVB-T and radio part,
+ coefficient for one dB is 0xff.
*/
tuner_dbg("Signal strength: -%ddB (%05d)\n", value >> 8, value);
diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
index 1fdb1601dc65..3f8c92a70116 100644
--- a/drivers/media/usb/au0828/au0828-core.c
+++ b/drivers/media/usb/au0828/au0828-core.c
@@ -234,7 +234,7 @@ static void au0828_media_graph_notify(struct media_entity *new,
if (!new) {
/*
* Called during au0828 probe time to connect
- * entites that were created prior to registering
+ * entities that were created prior to registering
* the notify handler. Find mixer and decoder.
*/
media_device_for_each_entity(entity, dev->media_dev) {
diff --git a/drivers/media/usb/au0828/au0828-dvb.c b/drivers/media/usb/au0828/au0828-dvb.c
index d9093a3c57c5..6e43028112d1 100644
--- a/drivers/media/usb/au0828/au0828-dvb.c
+++ b/drivers/media/usb/au0828/au0828-dvb.c
@@ -566,7 +566,7 @@ void au0828_dvb_unregister(struct au0828_dev *dev)
dvb->frontend = NULL;
}
-/* All the DVB attach calls go here, this function get's modified
+/* All the DVB attach calls go here, this function gets modified
* for each new card. No other function in this file needs
* to change.
*/
diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
index 004eadef55c7..425c35d16057 100644
--- a/drivers/media/usb/au0828/au0828.h
+++ b/drivers/media/usb/au0828/au0828.h
@@ -52,7 +52,7 @@
#define AU0828_INTERLACED_DEFAULT 1
-/* Defination for AU0828 USB transfer */
+/* Definition for AU0828 USB transfer */
#define AU0828_MAX_ISO_BUFS 12 /* maybe resize this value in the future */
#define AU0828_ISO_PACKETS_PER_URB 128
diff --git a/drivers/media/usb/cpia2/cpia2.h b/drivers/media/usb/cpia2/cpia2.h
index ab238ac8bfc0..d0a464882510 100644
--- a/drivers/media/usb/cpia2/cpia2.h
+++ b/drivers/media/usb/cpia2/cpia2.h
@@ -350,7 +350,7 @@ struct cpia2_sbuf {
};
struct framebuf {
- struct timeval timestamp;
+ u64 ts;
unsigned long seq;
int num;
int length;
diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
index a771e0a52610..e5d8dee38fe4 100644
--- a/drivers/media/usb/cpia2/cpia2_usb.c
+++ b/drivers/media/usb/cpia2/cpia2_usb.c
@@ -324,7 +324,7 @@ static void cpia2_usb_complete(struct urb *urb)
continue;
}
DBG("Start of frame pattern found\n");
- v4l2_get_timestamp(&cam->workbuff->timestamp);
+ cam->workbuff->ts = ktime_get_ns();
cam->workbuff->seq = cam->frame_count++;
cam->workbuff->data[0] = 0xFF;
cam->workbuff->data[1] = 0xD8;
diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c
index 748739c2b8b2..95c0bd4a19dc 100644
--- a/drivers/media/usb/cpia2/cpia2_v4l.c
+++ b/drivers/media/usb/cpia2/cpia2_v4l.c
@@ -833,7 +833,7 @@ static int cpia2_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf)
break;
case FRAME_READY:
buf->bytesused = cam->buffers[buf->index].length;
- buf->timestamp = cam->buffers[buf->index].timestamp;
+ buf->timestamp = ns_to_timeval(cam->buffers[buf->index].ts);
buf->sequence = cam->buffers[buf->index].seq;
buf->flags = V4L2_BUF_FLAG_DONE;
break;
@@ -889,12 +889,7 @@ static int find_earliest_filled_buffer(struct camera_data *cam)
found = i;
} else {
/* find which buffer is earlier */
- struct timeval *tv1, *tv2;
- tv1 = &cam->buffers[i].timestamp;
- tv2 = &cam->buffers[found].timestamp;
- if(tv1->tv_sec < tv2->tv_sec ||
- (tv1->tv_sec == tv2->tv_sec &&
- tv1->tv_usec < tv2->tv_usec))
+ if (cam->buffers[i].ts < cam->buffers[found].ts)
found = i;
}
}
@@ -945,7 +940,7 @@ static int cpia2_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
buf->flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_DONE
| V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
buf->field = V4L2_FIELD_NONE;
- buf->timestamp = cam->buffers[buf->index].timestamp;
+ buf->timestamp = ns_to_timeval(cam->buffers[buf->index].ts);
buf->sequence = cam->buffers[buf->index].seq;
buf->m.offset = cam->buffers[buf->index].data - cam->frame_buffer;
buf->length = cam->frame_size;
diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
index 1c48c497bd6a..0f8ae81f4820 100644
--- a/drivers/media/usb/cx231xx/cx231xx-417.c
+++ b/drivers/media/usb/cx231xx/cx231xx-417.c
@@ -1316,7 +1316,7 @@ static void buffer_copy(struct cx231xx *dev, char *data, int len, struct urb *ur
buf->vb.state = VIDEOBUF_DONE;
buf->vb.field_count++;
- v4l2_get_timestamp(&buf->vb.ts);
+ buf->vb.ts = ktime_get_ns();
list_del(&buf->vb.queue);
wake_up(&buf->vb.done);
dma_q->mpeg_buffer_completed = 0;
@@ -1347,7 +1347,7 @@ static void buffer_filled(char *data, int len, struct urb *urb,
memcpy(vbuf, data, len);
buf->vb.state = VIDEOBUF_DONE;
buf->vb.field_count++;
- v4l2_get_timestamp(&buf->vb.ts);
+ buf->vb.ts = ktime_get_ns();
list_del(&buf->vb.queue);
wake_up(&buf->vb.done);
}
diff --git a/drivers/media/usb/cx231xx/cx231xx-avcore.c b/drivers/media/usb/cx231xx/cx231xx-avcore.c
index fdd3c221fa0d..3374888b3021 100644
--- a/drivers/media/usb/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/usb/cx231xx/cx231xx-avcore.c
@@ -2987,7 +2987,7 @@ int cx231xx_gpio_i2c_write_ack(struct cx231xx *dev)
{
int status = 0;
- /* set SDA to ouput */
+ /* set SDA to output */
dev->gpio_dir |= 1 << dev->board.tuner_sda_gpio;
status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val);
diff --git a/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.h b/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.h
index 8f00b1d38277..bb4f817be0c5 100644
--- a/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.h
+++ b/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.h
@@ -86,7 +86,7 @@ enum TS_PORT{
#define EAVP_MASK 0x8
enum EAV_PRESENT{
NO_EXTERNAL_AV = 0x0, /* 0: No External A/V inputs
- (no need for i2s blcok),
+ (no need for i2s block),
Analog Tuner must be present */
EXTERNAL_AV = 0x8 /* 1: External A/V inputs
present (requires i2s blk) */
diff --git a/drivers/media/usb/cx231xx/cx231xx-vbi.c b/drivers/media/usb/cx231xx/cx231xx-vbi.c
index 10b2eb7338ad..d16b73c04445 100644
--- a/drivers/media/usb/cx231xx/cx231xx-vbi.c
+++ b/drivers/media/usb/cx231xx/cx231xx-vbi.c
@@ -528,7 +528,7 @@ static inline void vbi_buffer_filled(struct cx231xx *dev,
buf->vb.state = VIDEOBUF_DONE;
buf->vb.field_count++;
- v4l2_get_timestamp(&buf->vb.ts);
+ buf->vb.ts = ktime_get_ns();
dev->vbi_mode.bulk_ctl.buf = NULL;
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index 0d451c4ea3b9..aebbaf9d92a6 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -182,7 +182,7 @@ static inline void buffer_filled(struct cx231xx *dev,
cx231xx_isocdbg("[%p/%d] wakeup\n", buf, buf->vb.i);
buf->vb.state = VIDEOBUF_DONE;
buf->vb.field_count++;
- v4l2_get_timestamp(&buf->vb.ts);
+ buf->vb.ts = ktime_get_ns();
if (dev->USE_ISO)
dev->video_mode.isoc_ctl.buf = NULL;
diff --git a/drivers/media/usb/cx231xx/cx231xx.h b/drivers/media/usb/cx231xx/cx231xx.h
index fa640bf20111..86b7f57492b1 100644
--- a/drivers/media/usb/cx231xx/cx231xx.h
+++ b/drivers/media/usb/cx231xx/cx231xx.h
@@ -646,7 +646,7 @@ struct cx231xx {
/* frame properties */
int width; /* current frame width */
int height; /* current frame height */
- int interlaced; /* 1=interlace fileds, 0=just top fileds */
+ int interlaced; /* 1=interlace fields, 0=just top fields */
struct cx231xx_audio adev;
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb.h b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
index 3fd6cc0d6340..728ef5f3ada2 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb.h
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
@@ -146,7 +146,7 @@ struct dvb_usb_rc {
};
/**
- * usb streaming configration for adapter
+ * usb streaming configuration for adapter
* @type: urb type
* @count: count of used urbs
* @endpoint: stream usb endpoint number
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index 602013cf3e69..15944b95970f 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -308,7 +308,7 @@ static void lme2510_int_response(struct urb *lme_urb)
switch (ibuf[0]) {
case 0xaa:
- debug_data_snipet(1, "INT Remote data snipet", ibuf);
+ debug_data_snipet(1, "INT Remote data snippet", ibuf);
if (!adap_to_d(adap)->rc_dev)
break;
@@ -358,13 +358,13 @@ static void lme2510_int_response(struct urb *lme_urb)
lme2510_update_stats(adap);
- debug_data_snipet(5, "INT Remote data snipet in", ibuf);
+ debug_data_snipet(5, "INT Remote data snippet in", ibuf);
break;
case 0xcc:
- debug_data_snipet(1, "INT Control data snipet", ibuf);
+ debug_data_snipet(1, "INT Control data snippet", ibuf);
break;
default:
- debug_data_snipet(1, "INT Unknown data snipet", ibuf);
+ debug_data_snipet(1, "INT Unknown data snippet", ibuf);
break;
}
}
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
index 85cdf593a9ad..5e2d53af68c7 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
@@ -140,7 +140,7 @@ int mxl111sf_write_reg_mask(struct mxl111sf_state *state,
if (mask != 0xff) {
ret = mxl111sf_read_reg(state, addr, &val);
#if 1
- /* dont know why this usually errors out on the first try */
+ /* don't know why this usually errors out on the first try */
if (mxl_fail(ret))
pr_err("error writing addr: 0x%02x, mask: 0x%02x, data: 0x%02x, retrying...",
addr, mask, data);
@@ -783,7 +783,7 @@ static int mxl111sf_attach_demod(struct dvb_usb_adapter *adap, u8 fe_id)
if (mxl_fail(ret))
goto fail;
- /* dont care if this fails */
+ /* don't care if this fails */
mxl111sf_init_port_expander(state);
adap->fe[fe_id] = dvb_attach(mxl111sf_demod_attach, state,
diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
index 16e946e01d2c..0638d907c73e 100644
--- a/drivers/media/usb/dvb-usb/af9005.c
+++ b/drivers/media/usb/dvb-usb/af9005.c
@@ -845,7 +845,7 @@ static int af9005_rc_query(struct dvb_usb_device *d, u32 * event, int *state)
/* deb_info("rc_query\n"); */
st->data[0] = 3; /* rest of packet length low */
- st->data[1] = 0; /* rest of packet lentgh high */
+ st->data[1] = 0; /* rest of packet length high */
st->data[2] = 0x40; /* read remote */
st->data[3] = 1; /* rest of packet length */
st->data[4] = seq = st->sequence++; /* sequence number */
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
index df71df7ed524..4c9f83ba260d 100644
--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
@@ -33,7 +33,7 @@
* This function is probably reusable and may better get placed in a support
* library.
*
- * We replace errornous fields by default TPS fields (the ones with value 0).
+ * We replace erroneous fields by default TPS fields (the ones with value 0).
*/
static uint16_t compute_tps(struct dtv_frontend_properties *op)
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index a51a45c60233..9ddb2000249e 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -1016,7 +1016,7 @@ static int cxusb_dualdig4_rev2_tuner_attach(struct dvb_usb_adapter *adap)
/*
* No need to call dvb7000p_attach here, as it was called
* already, as frontend_attach method is called first, and
- * tuner_attach is only called on sucess.
+ * tuner_attach is only called on success.
*/
tun_i2c = st->dib7000p_ops.get_i2c_master(adap->fe_adap[0].fe,
DIBX000_I2C_INTERFACE_TUNER, 1);
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
index 40ca4eafb137..99951e02a880 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
@@ -98,7 +98,7 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
/*
* when reloading the driver w/o replugging the device
- * sometimes a timeout occures, this helps
+ * sometimes a timeout occurs, this helps
*/
if (d->props.generic_bulk_ctrl_endpoint != 0) {
usb_clear_halt(d->udev, usb_sndbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint));
diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h
index 317ed6a82d19..32829bdd5f22 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb.h
+++ b/drivers/media/usb/dvb-usb/dvb-usb.h
@@ -336,7 +336,7 @@ struct usb_data_stream {
* struct dvb_usb_adapter - a DVB adapter on a USB device
* @id: index of this adapter (starting with 0).
*
- * @feedcount: number of reqested feeds (used for streaming-activation)
+ * @feedcount: number of requested feeds (used for streaming-activation)
* @pid_filtering: is hardware pid_filtering used or not.
*
* @pll_addr: I2C address of the tuner for programming
diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c
index 0af74383083d..150081128196 100644
--- a/drivers/media/usb/dvb-usb/pctv452e.c
+++ b/drivers/media/usb/dvb-usb/pctv452e.c
@@ -528,13 +528,13 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
rx = b0 + 5;
- /* hmm where shoud this should go? */
+ /* hmm where should this should go? */
ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE);
if (ret != 0)
info("%s: Warning set interface returned: %d\n",
__func__, ret);
- /* this is a one-time initialization, dont know where to put */
+ /* this is a one-time initialization, don't know where to put */
b0[0] = 0xaa;
b0[1] = state->c++;
b0[2] = PCTV_CMD_RESET;
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index 02c13d71e6c1..a3155ec196cc 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -296,7 +296,7 @@ static int em28xx_i2c_recv_bytes(struct em28xx *dev, u16 addr, u8 *buf, u16 len)
return ret;
}
/*
- * NOTE: some devices with two i2c busses have the bad habit to return 0
+ * NOTE: some devices with two i2c buses have the bad habit to return 0
* bytes if we are on bus B AND there was no write attempt to the
* specified slave address before AND no device is present at the
* requested slave address.
@@ -427,7 +427,7 @@ static int em25xx_bus_B_recv_bytes(struct em28xx *dev, u16 addr, u8 *buf,
return ret;
}
/*
- * NOTE: some devices with two i2c busses have the bad habit to return 0
+ * NOTE: some devices with two i2c buses have the bad habit to return 0
* bytes if we are on bus B AND there was no write attempt to the
* specified slave address before AND no device is present at the
* requested slave address.
diff --git a/drivers/media/usb/em28xx/em28xx-reg.h b/drivers/media/usb/em28xx/em28xx-reg.h
index f53afe18e92d..d7c60862874a 100644
--- a/drivers/media/usb/em28xx/em28xx-reg.h
+++ b/drivers/media/usb/em28xx/em28xx-reg.h
@@ -67,7 +67,7 @@
#define EM28XX_I2C_CLK_WAIT_ENABLE 0x40
#define EM28XX_I2C_EEPROM_ON_BOARD 0x08
#define EM28XX_I2C_EEPROM_KEY_VALID 0x04
-#define EM2874_I2C_SECONDARY_BUS_SELECT 0x04 /* em2874 has two i2c busses */
+#define EM2874_I2C_SECONDARY_BUS_SELECT 0x04 /* em2874 has two i2c buses */
#define EM28XX_I2C_FREQ_1_5_MHZ 0x03 /* bus frequency (bits [1-0]) */
#define EM28XX_I2C_FREQ_25_KHZ 0x02
#define EM28XX_I2C_FREQ_400_KHZ 0x01
diff --git a/drivers/media/usb/gspca/Kconfig b/drivers/media/usb/gspca/Kconfig
index d3b6665c342d..088566e88467 100644
--- a/drivers/media/usb/gspca/Kconfig
+++ b/drivers/media/usb/gspca/Kconfig
@@ -46,7 +46,7 @@ config USB_GSPCA_CPIA1
depends on VIDEO_V4L2 && USB_GSPCA
help
Say Y here if you want support for USB cameras based on the cpia
- CPiA chip. Note that you need atleast version 0.6.4 of libv4l for
+ CPiA chip. Note that you need at least version 0.6.4 of libv4l for
applications to understand the videoformat generated by this driver.
To compile this driver as a module, choose M here: the
diff --git a/drivers/media/usb/gspca/autogain_functions.c b/drivers/media/usb/gspca/autogain_functions.c
index 6dfab2b077f7..f915cc7c0c63 100644
--- a/drivers/media/usb/gspca/autogain_functions.c
+++ b/drivers/media/usb/gspca/autogain_functions.c
@@ -98,7 +98,7 @@ EXPORT_SYMBOL(gspca_expo_autogain);
80 %) and if that does not help, only then changes exposure. This leads
to a much more stable image then using the knee algorithm which at
certain points of the knee graph will only try to adjust exposure,
- which leads to oscilating as one exposure step is huge.
+ which leads to oscillating as one exposure step is huge.
Returns 0 if no changes were made, 1 if the gain and or exposure settings
where changed. */
diff --git a/drivers/media/usb/gspca/benq.c b/drivers/media/usb/gspca/benq.c
index 8a8db5eb6d5f..1744591b8ba0 100644
--- a/drivers/media/usb/gspca/benq.c
+++ b/drivers/media/usb/gspca/benq.c
@@ -205,12 +205,12 @@ static void sd_isoc_irq(struct urb *urb)
* - 80 ba/bb 00 00 = start of image followed by 'ff d8'
* - 04 ba/bb oo oo = image piece
* where 'oo oo' is the image offset
- (not cheked)
+ (not checked)
* - (other -> bad frame)
* The images are JPEG encoded with full header and
* normal ff escape.
* The end of image ('ff d9') may occur in any URB.
- * (not cheked)
+ * (not checked)
*/
data = (u8 *) urb0->transfer_buffer
+ urb0->iso_frame_desc[i].offset;
diff --git a/drivers/media/usb/gspca/cpia1.c b/drivers/media/usb/gspca/cpia1.c
index 2b09af8865f4..7c817a4a93c4 100644
--- a/drivers/media/usb/gspca/cpia1.c
+++ b/drivers/media/usb/gspca/cpia1.c
@@ -547,10 +547,14 @@ static int do_command(struct gspca_dev *gspca_dev, u16 command,
}
if (sd->params.qx3.button) {
/* button pressed - unlock the latch */
- do_command(gspca_dev, CPIA_COMMAND_WriteMCPort,
+ ret = do_command(gspca_dev, CPIA_COMMAND_WriteMCPort,
3, 0xdf, 0xdf, 0);
- do_command(gspca_dev, CPIA_COMMAND_WriteMCPort,
+ if (ret)
+ return ret;
+ ret = do_command(gspca_dev, CPIA_COMMAND_WriteMCPort,
3, 0xff, 0xff, 0);
+ if (ret)
+ return ret;
}
/* test whether microscope is cradled */
@@ -1430,6 +1434,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
{
struct sd *sd = (struct sd *) gspca_dev;
struct cam *cam;
+ int ret;
sd->mainsFreq = FREQ_DEF == V4L2_CID_POWER_LINE_FREQUENCY_60HZ;
reset_camera_params(gspca_dev);
@@ -1441,7 +1446,10 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->cam_mode = mode;
cam->nmodes = ARRAY_SIZE(mode);
- goto_low_power(gspca_dev);
+ ret = goto_low_power(gspca_dev);
+ if (ret)
+ gspca_err(gspca_dev, "Cannot go to low power mode: %d\n",
+ ret);
/* Check the firmware version. */
sd->params.version.firmwareVersion = 0;
get_version_information(gspca_dev);
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index 3137f5d89d80..ac70b36d67b7 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -912,25 +912,32 @@ static void gspca_set_default_mode(struct gspca_dev *gspca_dev)
}
static int wxh_to_mode(struct gspca_dev *gspca_dev,
- int width, int height)
+ int width, int height, u32 pixelformat)
{
int i;
for (i = 0; i < gspca_dev->cam.nmodes; i++) {
if (width == gspca_dev->cam.cam_mode[i].width
- && height == gspca_dev->cam.cam_mode[i].height)
+ && height == gspca_dev->cam.cam_mode[i].height
+ && pixelformat == gspca_dev->cam.cam_mode[i].pixelformat)
return i;
}
return -EINVAL;
}
static int wxh_to_nearest_mode(struct gspca_dev *gspca_dev,
- int width, int height)
+ int width, int height, u32 pixelformat)
{
int i;
for (i = gspca_dev->cam.nmodes; --i > 0; ) {
if (width >= gspca_dev->cam.cam_mode[i].width
+ && height >= gspca_dev->cam.cam_mode[i].height
+ && pixelformat == gspca_dev->cam.cam_mode[i].pixelformat)
+ return i;
+ }
+ for (i = gspca_dev->cam.nmodes; --i > 0; ) {
+ if (width >= gspca_dev->cam.cam_mode[i].width
&& height >= gspca_dev->cam.cam_mode[i].height)
break;
}
@@ -1058,7 +1065,7 @@ static int try_fmt_vid_cap(struct gspca_dev *gspca_dev,
fmt->fmt.pix.pixelformat, w, h);
/* search the nearest mode for width and height */
- mode = wxh_to_nearest_mode(gspca_dev, w, h);
+ mode = wxh_to_nearest_mode(gspca_dev, w, h, fmt->fmt.pix.pixelformat);
/* OK if right palette */
if (gspca_dev->cam.cam_mode[mode].pixelformat
@@ -1152,7 +1159,8 @@ static int vidioc_enum_frameintervals(struct file *filp, void *priv,
int mode;
__u32 i;
- mode = wxh_to_mode(gspca_dev, fival->width, fival->height);
+ mode = wxh_to_mode(gspca_dev, fival->width, fival->height,
+ fival->pixel_format);
if (mode < 0)
return -EINVAL;
diff --git a/drivers/media/usb/gspca/m5602/m5602_mt9m111.c b/drivers/media/usb/gspca/m5602/m5602_mt9m111.c
index c9947c4a0f63..8fac814f4779 100644
--- a/drivers/media/usb/gspca/m5602/m5602_mt9m111.c
+++ b/drivers/media/usb/gspca/m5602/m5602_mt9m111.c
@@ -199,7 +199,7 @@ static const struct v4l2_ctrl_config mt9m111_greenbal_cfg = {
int mt9m111_probe(struct sd *sd)
{
u8 data[2] = {0x00, 0x00};
- int i;
+ int i, rc = 0;
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
if (force_sensor) {
@@ -217,16 +217,18 @@ int mt9m111_probe(struct sd *sd)
/* Do the preinit */
for (i = 0; i < ARRAY_SIZE(preinit_mt9m111); i++) {
if (preinit_mt9m111[i][0] == BRIDGE) {
- m5602_write_bridge(sd,
+ rc |= m5602_write_bridge(sd,
preinit_mt9m111[i][1],
preinit_mt9m111[i][2]);
} else {
data[0] = preinit_mt9m111[i][2];
data[1] = preinit_mt9m111[i][3];
- m5602_write_sensor(sd,
+ rc |= m5602_write_sensor(sd,
preinit_mt9m111[i][1], data, 2);
}
}
+ if (rc < 0)
+ return rc;
if (m5602_read_sensor(sd, MT9M111_SC_CHIPVER, data, 2))
return -ENODEV;
diff --git a/drivers/media/usb/gspca/m5602/m5602_po1030.c b/drivers/media/usb/gspca/m5602/m5602_po1030.c
index 37d2891e5f5b..5e43b4782f02 100644
--- a/drivers/media/usb/gspca/m5602/m5602_po1030.c
+++ b/drivers/media/usb/gspca/m5602/m5602_po1030.c
@@ -158,6 +158,7 @@ static const struct v4l2_ctrl_config po1030_greenbal_cfg = {
int po1030_probe(struct sd *sd)
{
+ int rc = 0;
u8 dev_id_h = 0, i;
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
@@ -177,11 +178,14 @@ int po1030_probe(struct sd *sd)
for (i = 0; i < ARRAY_SIZE(preinit_po1030); i++) {
u8 data = preinit_po1030[i][2];
if (preinit_po1030[i][0] == SENSOR)
- m5602_write_sensor(sd,
+ rc |= m5602_write_sensor(sd,
preinit_po1030[i][1], &data, 1);
else
- m5602_write_bridge(sd, preinit_po1030[i][1], data);
+ rc |= m5602_write_bridge(sd, preinit_po1030[i][1],
+ data);
}
+ if (rc < 0)
+ return rc;
if (m5602_read_sensor(sd, PO1030_DEVID_H, &dev_id_h, 1))
return -ENODEV;
diff --git a/drivers/media/usb/gspca/mr97310a.c b/drivers/media/usb/gspca/mr97310a.c
index bea196361215..af454663e295 100644
--- a/drivers/media/usb/gspca/mr97310a.c
+++ b/drivers/media/usb/gspca/mr97310a.c
@@ -520,7 +520,7 @@ static int start_cif_cam(struct gspca_dev *gspca_dev)
switch (gspca_dev->pixfmt.width) {
case 160:
data[9] |= 0x04; /* reg 8, 2:1 scale down from 320 */
- /* fall thru */
+ /* fall through */
case 320:
default:
data[3] = 0x28; /* reg 2, H size/8 */
@@ -530,7 +530,7 @@ static int start_cif_cam(struct gspca_dev *gspca_dev)
break;
case 176:
data[9] |= 0x04; /* reg 8, 2:1 scale down from 352 */
- /* fall thru */
+ /* fall through */
case 352:
data[3] = 0x2c; /* reg 2, H size/8 */
data[4] = 0x48; /* reg 3, V size/4 */
@@ -617,10 +617,10 @@ static int start_vga_cam(struct gspca_dev *gspca_dev)
switch (gspca_dev->pixfmt.width) {
case 160:
data[9] |= 0x0c; /* reg 8, 4:1 scale down */
- /* fall thru */
+ /* fall through */
case 320:
data[9] |= 0x04; /* reg 8, 2:1 scale down */
- /* fall thru */
+ /* fall through */
case 640:
default:
data[3] = 0x50; /* reg 2, H size/8 */
@@ -637,7 +637,7 @@ static int start_vga_cam(struct gspca_dev *gspca_dev)
case 176:
data[9] |= 0x04; /* reg 8, 2:1 scale down */
- /* fall thru */
+ /* fall through */
case 352:
data[3] = 0x2c; /* reg 2, H size */
data[4] = 0x48; /* reg 3, V size */
diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c
index 10fcbe9e8614..f2799e8cb8e7 100644
--- a/drivers/media/usb/gspca/ov519.c
+++ b/drivers/media/usb/gspca/ov519.c
@@ -1945,7 +1945,7 @@ static const struct ov_i2c_regvals norm_8610[] = {
{ 0x62, 0x5f }, /* was 0xd7, new from windrv 090403 */
{ 0x63, 0xff },
{ 0x64, 0x53 }, /* new windrv 090403 says 0x57,
- * maybe thats wrong */
+ * maybe that's wrong */
{ 0x65, 0x00 },
{ 0x66, 0x55 },
{ 0x67, 0xb0 },
@@ -3658,7 +3658,7 @@ static void ov518_mode_init_regs(struct sd *sd)
case SEN_OV7620AE:
/*
* HdG: 640x480 needs special handling on device
- * revision 2, we check for device revison > 0 to
+ * revision 2, we check for device revision > 0 to
* avoid regressions, as we don't know the correct
* thing todo for revision 1.
*
diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c
index d06dc0755b9a..02c90ad96b76 100644
--- a/drivers/media/usb/gspca/ov534.c
+++ b/drivers/media/usb/gspca/ov534.c
@@ -103,6 +103,16 @@ static const struct v4l2_pix_format ov772x_mode[] = {
.sizeimage = 640 * 480 * 2,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0},
+ {320, 240, V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE,
+ .bytesperline = 320,
+ .sizeimage = 320 * 240,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .priv = 1},
+ {640, 480, V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE,
+ .bytesperline = 640,
+ .sizeimage = 640 * 480,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .priv = 0},
};
static const struct v4l2_pix_format ov767x_mode[] = {
{320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
@@ -127,6 +137,14 @@ static const struct framerates ov772x_framerates[] = {
.rates = vga_rates,
.nrates = ARRAY_SIZE(vga_rates),
},
+ { /* 320x240 SGBRG8 */
+ .rates = qvga_rates,
+ .nrates = ARRAY_SIZE(qvga_rates),
+ },
+ { /* 640x480 SGBRG8 */
+ .rates = vga_rates,
+ .nrates = ARRAY_SIZE(vga_rates),
+ },
};
struct reg_array {
@@ -411,9 +429,7 @@ static const u8 sensor_start_qvga_767x[][2] = {
};
static const u8 bridge_init_772x[][2] = {
- { 0xc2, 0x0c },
{ 0x88, 0xf8 },
- { 0xc3, 0x69 },
{ 0x89, 0xff },
{ 0x76, 0x03 },
{ 0x92, 0x01 },
@@ -439,7 +455,6 @@ static const u8 bridge_init_772x[][2] = {
{ 0x1f, 0x81 },
{ 0x34, 0x05 },
{ 0xe3, 0x04 },
- { 0x88, 0x00 },
{ 0x89, 0x00 },
{ 0x76, 0x00 },
{ 0xe7, 0x2e },
@@ -447,26 +462,9 @@ static const u8 bridge_init_772x[][2] = {
{ 0x25, 0x42 },
{ 0x21, 0xf0 },
- { 0x1c, 0x00 },
- { 0x1d, 0x40 },
- { 0x1d, 0x02 }, /* payload size 0x0200 * 4 = 2048 bytes */
- { 0x1d, 0x00 }, /* payload size */
-
- { 0x1d, 0x02 }, /* frame size 0x025800 * 4 = 614400 */
- { 0x1d, 0x58 }, /* frame size */
- { 0x1d, 0x00 }, /* frame size */
-
{ 0x1c, 0x0a },
{ 0x1d, 0x08 }, /* turn on UVC header */
{ 0x1d, 0x0e }, /* .. */
-
- { 0x8d, 0x1c },
- { 0x8e, 0x80 },
- { 0xe5, 0x04 },
-
- { 0xc0, 0x50 },
- { 0xc1, 0x3c },
- { 0xc2, 0x0c },
};
static const u8 sensor_init_772x[][2] = {
{ 0x12, 0x80 },
@@ -545,13 +543,10 @@ static const u8 sensor_init_772x[][2] = {
{ 0x8c, 0xe8 },
{ 0x8d, 0x20 },
- { 0x0c, 0x90 },
-
{ 0x2b, 0x00 },
{ 0x22, 0x7f },
{ 0x23, 0x03 },
{ 0x11, 0x01 },
- { 0x0c, 0xd0 },
{ 0x64, 0xff },
{ 0x0d, 0x41 },
@@ -559,9 +554,9 @@ static const u8 sensor_init_772x[][2] = {
{ 0x0e, 0xcd },
{ 0xac, 0xbf },
{ 0x8e, 0x00 }, /* De-noise threshold */
- { 0x0c, 0xd0 }
};
-static const u8 bridge_start_vga_772x[][2] = {
+static const u8 bridge_start_vga_yuyv_772x[][2] = {
+ {0x88, 0x00},
{0x1c, 0x00},
{0x1d, 0x40},
{0x1d, 0x02},
@@ -569,10 +564,14 @@ static const u8 bridge_start_vga_772x[][2] = {
{0x1d, 0x02},
{0x1d, 0x58},
{0x1d, 0x00},
+ {0x8d, 0x1c},
+ {0x8e, 0x80},
{0xc0, 0x50},
{0xc1, 0x3c},
+ {0xc2, 0x0c},
+ {0xc3, 0x69},
};
-static const u8 sensor_start_vga_772x[][2] = {
+static const u8 sensor_start_vga_yuyv_772x[][2] = {
{0x12, 0x00},
{0x17, 0x26},
{0x18, 0xa0},
@@ -581,8 +580,10 @@ static const u8 sensor_start_vga_772x[][2] = {
{0x29, 0xa0},
{0x2c, 0xf0},
{0x65, 0x20},
+ {0x67, 0x00},
};
-static const u8 bridge_start_qvga_772x[][2] = {
+static const u8 bridge_start_qvga_yuyv_772x[][2] = {
+ {0x88, 0x00},
{0x1c, 0x00},
{0x1d, 0x40},
{0x1d, 0x02},
@@ -590,10 +591,14 @@ static const u8 bridge_start_qvga_772x[][2] = {
{0x1d, 0x01},
{0x1d, 0x4b},
{0x1d, 0x00},
+ {0x8d, 0x1c},
+ {0x8e, 0x80},
{0xc0, 0x28},
{0xc1, 0x1e},
+ {0xc2, 0x0c},
+ {0xc3, 0x69},
};
-static const u8 sensor_start_qvga_772x[][2] = {
+static const u8 sensor_start_qvga_yuyv_772x[][2] = {
{0x12, 0x40},
{0x17, 0x3f},
{0x18, 0x50},
@@ -602,6 +607,61 @@ static const u8 sensor_start_qvga_772x[][2] = {
{0x29, 0x50},
{0x2c, 0x78},
{0x65, 0x2f},
+ {0x67, 0x00},
+};
+static const u8 bridge_start_vga_gbrg_772x[][2] = {
+ {0x88, 0x08},
+ {0x1c, 0x00},
+ {0x1d, 0x00},
+ {0x1d, 0x02},
+ {0x1d, 0x00},
+ {0x1d, 0x01},
+ {0x1d, 0x2c},
+ {0x1d, 0x00},
+ {0x8d, 0x00},
+ {0x8e, 0x00},
+ {0xc0, 0x50},
+ {0xc1, 0x3c},
+ {0xc2, 0x01},
+ {0xc3, 0x01},
+};
+static const u8 sensor_start_vga_gbrg_772x[][2] = {
+ {0x12, 0x01},
+ {0x17, 0x26},
+ {0x18, 0xa0},
+ {0x19, 0x07},
+ {0x1a, 0xf0},
+ {0x29, 0xa0},
+ {0x2c, 0xf0},
+ {0x65, 0x20},
+ {0x67, 0x02},
+};
+static const u8 bridge_start_qvga_gbrg_772x[][2] = {
+ {0x88, 0x08},
+ {0x1c, 0x00},
+ {0x1d, 0x00},
+ {0x1d, 0x02},
+ {0x1d, 0x00},
+ {0x1d, 0x00},
+ {0x1d, 0x4b},
+ {0x1d, 0x00},
+ {0x8d, 0x00},
+ {0x8e, 0x00},
+ {0xc0, 0x28},
+ {0xc1, 0x1e},
+ {0xc2, 0x01},
+ {0xc3, 0x01},
+};
+static const u8 sensor_start_qvga_gbrg_772x[][2] = {
+ {0x12, 0x41},
+ {0x17, 0x3f},
+ {0x18, 0x50},
+ {0x19, 0x03},
+ {0x1a, 0x78},
+ {0x29, 0x50},
+ {0x2c, 0x78},
+ {0x65, 0x2f},
+ {0x67, 0x02},
};
static void ov534_reg_write(struct gspca_dev *gspca_dev, u16 reg, u8 val)
@@ -679,7 +739,7 @@ static int sccb_check_status(struct gspca_dev *gspca_dev)
int i;
for (i = 0; i < 5; i++) {
- msleep(10);
+ usleep_range(10000, 20000);
data = ov534_reg_read(gspca_dev, OV534_REG_STATUS);
switch (data) {
@@ -1277,7 +1337,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
/* reset sensor */
sccb_reg_write(gspca_dev, 0x12, 0x80);
- msleep(10);
+ usleep_range(10000, 20000);
/* probe the sensor */
sccb_reg_read(gspca_dev, 0x0a);
@@ -1315,25 +1375,33 @@ static int sd_start(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
int mode;
- static const struct reg_array bridge_start[NSENSORS][2] = {
+ static const struct reg_array bridge_start[NSENSORS][4] = {
[SENSOR_OV767x] = {{bridge_start_qvga_767x,
ARRAY_SIZE(bridge_start_qvga_767x)},
{bridge_start_vga_767x,
ARRAY_SIZE(bridge_start_vga_767x)}},
- [SENSOR_OV772x] = {{bridge_start_qvga_772x,
- ARRAY_SIZE(bridge_start_qvga_772x)},
- {bridge_start_vga_772x,
- ARRAY_SIZE(bridge_start_vga_772x)}},
+ [SENSOR_OV772x] = {{bridge_start_qvga_yuyv_772x,
+ ARRAY_SIZE(bridge_start_qvga_yuyv_772x)},
+ {bridge_start_vga_yuyv_772x,
+ ARRAY_SIZE(bridge_start_vga_yuyv_772x)},
+ {bridge_start_qvga_gbrg_772x,
+ ARRAY_SIZE(bridge_start_qvga_gbrg_772x)},
+ {bridge_start_vga_gbrg_772x,
+ ARRAY_SIZE(bridge_start_vga_gbrg_772x)} },
};
- static const struct reg_array sensor_start[NSENSORS][2] = {
+ static const struct reg_array sensor_start[NSENSORS][4] = {
[SENSOR_OV767x] = {{sensor_start_qvga_767x,
ARRAY_SIZE(sensor_start_qvga_767x)},
{sensor_start_vga_767x,
ARRAY_SIZE(sensor_start_vga_767x)}},
- [SENSOR_OV772x] = {{sensor_start_qvga_772x,
- ARRAY_SIZE(sensor_start_qvga_772x)},
- {sensor_start_vga_772x,
- ARRAY_SIZE(sensor_start_vga_772x)}},
+ [SENSOR_OV772x] = {{sensor_start_qvga_yuyv_772x,
+ ARRAY_SIZE(sensor_start_qvga_yuyv_772x)},
+ {sensor_start_vga_yuyv_772x,
+ ARRAY_SIZE(sensor_start_vga_yuyv_772x)},
+ {sensor_start_qvga_gbrg_772x,
+ ARRAY_SIZE(sensor_start_qvga_gbrg_772x)},
+ {sensor_start_vga_gbrg_772x,
+ ARRAY_SIZE(sensor_start_vga_gbrg_772x)} },
};
/* (from ms-win trace) */
@@ -1439,10 +1507,9 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
/* If this packet is marked as EOF, end the frame */
} else if (data[1] & UVC_STREAM_EOF) {
sd->last_pts = 0;
- if (gspca_dev->pixfmt.pixelformat == V4L2_PIX_FMT_YUYV
+ if (gspca_dev->pixfmt.pixelformat != V4L2_PIX_FMT_JPEG
&& gspca_dev->image_len + len - 12 !=
- gspca_dev->pixfmt.width *
- gspca_dev->pixfmt.height * 2) {
+ gspca_dev->pixfmt.sizeimage) {
gspca_dbg(gspca_dev, D_PACK, "wrong sized frame\n");
goto discard;
}
diff --git a/drivers/media/usb/gspca/pac_common.h b/drivers/media/usb/gspca/pac_common.h
index 31f2a42af4dd..aae97a5534e3 100644
--- a/drivers/media/usb/gspca/pac_common.h
+++ b/drivers/media/usb/gspca/pac_common.h
@@ -21,7 +21,7 @@
/* We calculate the autogain at the end of the transfer of a frame, at this
moment a frame with the old settings is being captured and transmitted. So
- if we adjust the gain or exposure we must ignore atleast the next frame for
+ if we adjust the gain or exposure we must ignore at least the next frame for
the new settings to come into effect before doing any other adjustments. */
#define PAC_AUTOGAIN_IGNORE_FRAMES 2
diff --git a/drivers/media/usb/gspca/sn9c20x.c b/drivers/media/usb/gspca/sn9c20x.c
index 5984bb12bcff..ab912903f8d7 100644
--- a/drivers/media/usb/gspca/sn9c20x.c
+++ b/drivers/media/usb/gspca/sn9c20x.c
@@ -1634,7 +1634,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
break;
case SENSOR_HV7131R:
sd->i2c_intf = 0x81; /* i2c 400 Kb/s */
- /* fall thru */
+ /* fall through */
default:
cam->cam_mode = vga_mode;
cam->nmodes = ARRAY_SIZE(vga_mode);
diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c
index 5f3f2979540a..583c9f10198c 100644
--- a/drivers/media/usb/gspca/sonixb.c
+++ b/drivers/media/usb/gspca/sonixb.c
@@ -121,7 +121,7 @@ struct sensor_data {
/* We calculate the autogain at the end of the transfer of a frame, at this
moment a frame with the old settings is being captured and transmitted. So
- if we adjust the gain or exposure we must ignore atleast the next frame for
+ if we adjust the gain or exposure we must ignore at least the next frame for
the new settings to come into effect before doing any other adjustments. */
#define AUTOGAIN_IGNORE_FRAMES 1
@@ -757,7 +757,7 @@ static void setexposure(struct gspca_dev *gspca_dev)
/* Don't allow this to get below 10 when using autogain, the
steps become very large (relatively) when below 10 causing
- the image to oscilate from much too dark, to much too bright
+ the image to oscillate from much too dark, to much too bright
and back again. */
if (gspca_dev->autogain->val && reg10 < 10)
reg10 = 10;
diff --git a/drivers/media/usb/gspca/sonixj.c b/drivers/media/usb/gspca/sonixj.c
index df8d8482b795..a63f155f1515 100644
--- a/drivers/media/usb/gspca/sonixj.c
+++ b/drivers/media/usb/gspca/sonixj.c
@@ -2677,7 +2677,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
* which is 62 bytes long and is followed by various information
* including statuses and luminosity.
*
- * A marker may be splitted on two packets.
+ * A marker may be split on two packets.
*
* The 6th byte of a marker contains the bits:
* 0x08: USB full
diff --git a/drivers/media/usb/gspca/spca501.c b/drivers/media/usb/gspca/spca501.c
index 2cce74b166d8..3d215952af18 100644
--- a/drivers/media/usb/gspca/spca501.c
+++ b/drivers/media/usb/gspca/spca501.c
@@ -574,7 +574,7 @@ static const __u16 spca501_3com_open_data[][3] = {
{0x0, 0x0001, 0x0010}, /* TG Start Clock */
/* {0x2, 0x006a, 0x0001}, * C/S Enable ISOSYNCH Packet Engine */
- {0x2, 0x0068, 0x0001}, /* C/S Diable ISOSYNCH Packet Engine */
+ {0x2, 0x0068, 0x0001}, /* C/S Disable ISOSYNCH Packet Engine */
{0x2, 0x0000, 0x0005},
{0x2, 0x0043, 0x0000}, /* C/S Set Timing Mode, Disable TG soft reset */
{0x2, 0x0043, 0x0000}, /* C/S Set Timing Mode, Disable TG soft reset */
diff --git a/drivers/media/usb/gspca/sq905.c b/drivers/media/usb/gspca/sq905.c
index ffea9c35b0a0..d5c48216deb7 100644
--- a/drivers/media/usb/gspca/sq905.c
+++ b/drivers/media/usb/gspca/sq905.c
@@ -18,7 +18,7 @@
* History and Acknowledgments
*
* The original Linux driver for SQ905 based cameras was written by
- * Marcell Lengyel and furter developed by many other contributors
+ * Marcell Lengyel and further developed by many other contributors
* and is available from http://sourceforge.net/projects/sqcam/
*
* This driver takes advantage of the reverse engineering work done for
diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c
index 437a3367ab97..e1e2a605a46c 100644
--- a/drivers/media/usb/gspca/sunplus.c
+++ b/drivers/media/usb/gspca/sunplus.c
@@ -555,7 +555,7 @@ static void init_ctl_reg(struct gspca_dev *gspca_dev)
case BRIDGE_SPCA504:
case BRIDGE_SPCA504C:
pollreg = 0;
- /* fall thru */
+ /* fall through */
default:
/* case BRIDGE_SPCA533: */
/* case BRIDGE_SPCA504B: */
@@ -638,7 +638,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
reg_w_riv(gspca_dev, 0x00, 0x2000, 0x00);
reg_w_riv(gspca_dev, 0x00, 0x2301, 0x13);
reg_w_riv(gspca_dev, 0x00, 0x2306, 0x00);
- /* fall thru */
+ /* fall through */
case BRIDGE_SPCA533:
spca504B_PollingDataReady(gspca_dev);
spca50x_GetFirmware(gspca_dev);
diff --git a/drivers/media/usb/gspca/t613.c b/drivers/media/usb/gspca/t613.c
index 445782919446..ed9b925b723e 100644
--- a/drivers/media/usb/gspca/t613.c
+++ b/drivers/media/usb/gspca/t613.c
@@ -966,7 +966,7 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
V4L2_CID_SATURATION, 0, 0xf, 1, 5);
v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_GAMMA, 0, GAMMA_MAX, 1, 10);
- /* Activate lowlight, some apps dont bring up the
+ /* Activate lowlight, some apps don't bring up the
backlight_compensation control) */
v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_BACKLIGHT_COMPENSATION, 0, 1, 1, 1);
diff --git a/drivers/media/usb/gspca/touptek.c b/drivers/media/usb/gspca/touptek.c
index d1b9032d7863..6c056a448231 100644
--- a/drivers/media/usb/gspca/touptek.c
+++ b/drivers/media/usb/gspca/touptek.c
@@ -185,7 +185,7 @@ static const struct v4l2_pix_format vga_mode[] = {
};
/*
- * As theres no known frame sync, the only way to keep synced is to try hard
+ * As there's no known frame sync, the only way to keep synced is to try hard
* to never miss any packets
*/
#if MAX_NURBS < 4
@@ -259,7 +259,7 @@ static void setexposure(struct gspca_dev *gspca_dev, s32 val)
return;
}
gspca_dbg(gspca_dev, D_STREAM, "exposure: 0x%04X ms\n\n", value);
- /* Wonder if theres a good reason for sending it twice */
+ /* Wonder if there's a good reason for sending it twice */
/* probably not but leave it in because...why not */
reg_w(gspca_dev, value, REG_COARSE_INTEGRATION_TIME_);
reg_w(gspca_dev, value, REG_COARSE_INTEGRATION_TIME_);
diff --git a/drivers/media/usb/gspca/w996Xcf.c b/drivers/media/usb/gspca/w996Xcf.c
index abfab3de1866..36cc5a5ce77a 100644
--- a/drivers/media/usb/gspca/w996Xcf.c
+++ b/drivers/media/usb/gspca/w996Xcf.c
@@ -431,7 +431,7 @@ static void w9968cf_set_crop_window(struct sd *sd)
start_cropy = 35;
}
- /* Work around to avoid FP arithmetics */
+ /* Work around to avoid FP arithmetic */
#define SC(x) ((x) << 10)
/* Scaling factors */
diff --git a/drivers/media/usb/gspca/zc3xx-reg.h b/drivers/media/usb/gspca/zc3xx-reg.h
index 71fda38e85e0..26f6153b687f 100644
--- a/drivers/media/usb/gspca/zc3xx-reg.h
+++ b/drivers/media/usb/gspca/zc3xx-reg.h
@@ -26,7 +26,7 @@
/* Test mode */
#define ZC3XX_R00B_TESTMODECONTROL 0x000b
-/* Frame retreiving */
+/* Frame retrieving */
#define ZC3XX_R00C_LASTACQTIME 0x000c
#define ZC3XX_R00D_MONITORRES 0x000d
#define ZC3XX_R00E_TIMESTAMPHIGH 0x000e
diff --git a/drivers/media/usb/gspca/zc3xx.c b/drivers/media/usb/gspca/zc3xx.c
index cf21991e3d99..ad7194029031 100644
--- a/drivers/media/usb/gspca/zc3xx.c
+++ b/drivers/media/usb/gspca/zc3xx.c
@@ -3602,7 +3602,7 @@ static const struct usb_action pas106b_InitialScale[] = { /* 176x144 */
{0xaa, 0x14, 0x0081},
/* Other registers */
{0xa0, 0x37, ZC3XX_R101_SENSORCORRECTION},
-/* Frame retreiving */
+/* Frame retrieving */
{0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS},
/* Gains */
{0xa0, 0xa0, ZC3XX_R1A8_DIGITALGAIN},
@@ -3718,7 +3718,7 @@ static const struct usb_action pas106b_Initial[] = { /* 352x288 */
{0xaa, 0x14, 0x0081},
/* Other registers */
{0xa0, 0x37, ZC3XX_R101_SENSORCORRECTION},
-/* Frame retreiving */
+/* Frame retrieving */
{0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS},
/* Gains */
{0xa0, 0xa0, ZC3XX_R1A8_DIGITALGAIN},
@@ -6775,7 +6775,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
case SENSOR_HV7131R:
case SENSOR_TAS5130C:
reg_r(gspca_dev, 0x0008);
- /* fall thru */
+ /* fall through */
case SENSOR_PO2030:
reg_w(gspca_dev, 0x03, 0x0008);
break;
@@ -6824,7 +6824,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
case SENSOR_TAS5130C:
reg_w(gspca_dev, 0x09, 0x01ad); /* (from win traces) */
reg_w(gspca_dev, 0x15, 0x01ae);
- /* fall thru */
+ /* fall through */
case SENSOR_PAS202B:
case SENSOR_PO2030:
/* reg_w(gspca_dev, 0x40, ZC3XX_R117_GGAIN); in win traces */
diff --git a/drivers/media/usb/hdpvr/hdpvr-i2c.c b/drivers/media/usb/hdpvr/hdpvr-i2c.c
index 5a3cb614a211..d76173f1ced1 100644
--- a/drivers/media/usb/hdpvr/hdpvr-i2c.c
+++ b/drivers/media/usb/hdpvr/hdpvr-i2c.c
@@ -61,10 +61,10 @@ static int hdpvr_i2c_read(struct hdpvr_device *dev, int bus,
return -EINVAL;
if (wlen) {
- memcpy(&dev->i2c_buf, wdata, wlen);
+ memcpy(dev->i2c_buf, wdata, wlen);
ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
REQTYPE_I2C_WRITE, CTRL_WRITE_REQUEST,
- (bus << 8) | addr, 0, &dev->i2c_buf,
+ (bus << 8) | addr, 0, dev->i2c_buf,
wlen, 1000);
if (ret < 0)
return ret;
@@ -72,10 +72,10 @@ static int hdpvr_i2c_read(struct hdpvr_device *dev, int bus,
ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
REQTYPE_I2C_READ, CTRL_READ_REQUEST,
- (bus << 8) | addr, 0, &dev->i2c_buf, len, 1000);
+ (bus << 8) | addr, 0, dev->i2c_buf, len, 1000);
if (ret == len) {
- memcpy(data, &dev->i2c_buf, len);
+ memcpy(data, dev->i2c_buf, len);
ret = 0;
} else if (ret >= 0)
ret = -EIO;
@@ -91,17 +91,17 @@ static int hdpvr_i2c_write(struct hdpvr_device *dev, int bus,
if (len > sizeof(dev->i2c_buf))
return -EINVAL;
- memcpy(&dev->i2c_buf, data, len);
+ memcpy(dev->i2c_buf, data, len);
ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
REQTYPE_I2C_WRITE, CTRL_WRITE_REQUEST,
- (bus << 8) | addr, 0, &dev->i2c_buf, len, 1000);
+ (bus << 8) | addr, 0, dev->i2c_buf, len, 1000);
if (ret < 0)
return ret;
ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
REQTYPE_I2C_WRITE_STATT, CTRL_READ_REQUEST,
- 0, 0, &dev->i2c_buf, 2, 1000);
+ 0, 0, dev->i2c_buf, 2, 1000);
if ((ret == 2) && (dev->i2c_buf[1] == (len - 1)))
ret = 0;
diff --git a/drivers/media/usb/hdpvr/hdpvr.h b/drivers/media/usb/hdpvr/hdpvr.h
index 1d65b4185f57..fa43e1d45ea9 100644
--- a/drivers/media/usb/hdpvr/hdpvr.h
+++ b/drivers/media/usb/hdpvr/hdpvr.h
@@ -215,7 +215,7 @@ enum {
*/
/* :0 s 38 01 1700 0003 0001 1 = 00
- * VIDEO STANDARD or FREQUNCY 0 = 60hz, 1 = 50hz
+ * VIDEO STANDARD or FREQUENCY 0 = 60hz, 1 = 50hz
*/
/* :0 s 38 01 3100 0003 0004 4 = 03030000
diff --git a/drivers/media/usb/pwc/pwc-dec23.c b/drivers/media/usb/pwc/pwc-dec23.c
index 1283b3bd9800..854c36a5dec9 100644
--- a/drivers/media/usb/pwc/pwc-dec23.c
+++ b/drivers/media/usb/pwc/pwc-dec23.c
@@ -41,7 +41,7 @@
* UNROLL_LOOP_FOR_COPYING_BLOCK
* 0: use a loop for a smaller code (but little slower)
* 1: when unrolling the loop, gcc produces some faster code (perhaps only
- * valid for intel processor class). Activating this option, automaticaly
+ * valid for intel processor class). Activating this option, automatically
* activate USE_LOOKUP_TABLE_TO_CLAMP
*/
#define UNROLL_LOOP_FOR_COPY 1
@@ -332,7 +332,7 @@ void pwc_dec23_init(struct pwc_device *pdev, const unsigned char *cmd)
build_table_color(TimonRomTable[version][1], pdec->table_0004_pass2, pdec->table_8004_pass2);
}
- /* Informations can be coded on a variable number of bits but never less than 8 */
+ /* Information can be coded on a variable number of bits but never less than 8 */
shift = 8 - pdec->nbits;
pdec->scalebits = SCALEBITS - shift;
pdec->nbitsmask = 0xFF >> shift;
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index 72704f4d5330..4e94197094ad 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -76,6 +76,9 @@
#include "pwc-dec23.h"
#include "pwc-dec1.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/pwc.h>
+
/* Function prototypes and driver templates */
/* hotplug device table support */
@@ -156,6 +159,32 @@ static const struct video_device pwc_template = {
/***************************************************************************/
/* Private functions */
+static void *pwc_alloc_urb_buffer(struct device *dev,
+ size_t size, dma_addr_t *dma_handle)
+{
+ void *buffer = kmalloc(size, GFP_KERNEL);
+
+ if (!buffer)
+ return NULL;
+
+ *dma_handle = dma_map_single(dev, buffer, size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, *dma_handle)) {
+ kfree(buffer);
+ return NULL;
+ }
+
+ return buffer;
+}
+
+static void pwc_free_urb_buffer(struct device *dev,
+ size_t size,
+ void *buffer,
+ dma_addr_t dma_handle)
+{
+ dma_unmap_single(dev, dma_handle, size, DMA_FROM_DEVICE);
+ kfree(buffer);
+}
+
static struct pwc_frame_buf *pwc_get_next_fill_buf(struct pwc_device *pdev)
{
unsigned long flags = 0;
@@ -260,6 +289,8 @@ static void pwc_isoc_handler(struct urb *urb)
int i, fst, flen;
unsigned char *iso_buf = NULL;
+ trace_pwc_handler_enter(urb, pdev);
+
if (urb->status == -ENOENT || urb->status == -ECONNRESET ||
urb->status == -ESHUTDOWN) {
PWC_DEBUG_OPEN("URB (%p) unlinked %ssynchronously.\n",
@@ -301,6 +332,11 @@ static void pwc_isoc_handler(struct urb *urb)
/* Reset ISOC error counter. We did get here, after all. */
pdev->visoc_errors = 0;
+ dma_sync_single_for_cpu(&urb->dev->dev,
+ urb->transfer_dma,
+ urb->transfer_buffer_length,
+ DMA_FROM_DEVICE);
+
/* vsync: 0 = don't copy data
1 = sync-hunt
2 = synched
@@ -347,7 +383,14 @@ static void pwc_isoc_handler(struct urb *urb)
pdev->vlast_packet_size = flen;
}
+ dma_sync_single_for_device(&urb->dev->dev,
+ urb->transfer_dma,
+ urb->transfer_buffer_length,
+ DMA_FROM_DEVICE);
+
handler_end:
+ trace_pwc_handler_exit(urb, pdev);
+
i = usb_submit_urb(urb, GFP_ATOMIC);
if (i != 0)
PWC_ERROR("Error (%d) re-submitting urb in pwc_isoc_handler.\n", i);
@@ -421,16 +464,15 @@ retry:
urb->dev = udev;
urb->pipe = usb_rcvisocpipe(udev, pdev->vendpoint);
urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
- urb->transfer_buffer = usb_alloc_coherent(udev,
- ISO_BUFFER_SIZE,
- GFP_KERNEL,
- &urb->transfer_dma);
+ urb->transfer_buffer_length = ISO_BUFFER_SIZE;
+ urb->transfer_buffer = pwc_alloc_urb_buffer(&udev->dev,
+ urb->transfer_buffer_length,
+ &urb->transfer_dma);
if (urb->transfer_buffer == NULL) {
PWC_ERROR("Failed to allocate urb buffer %d\n", i);
pwc_isoc_cleanup(pdev);
return -ENOMEM;
}
- urb->transfer_buffer_length = ISO_BUFFER_SIZE;
urb->complete = pwc_isoc_handler;
urb->context = pdev;
urb->start_frame = 0;
@@ -481,15 +523,16 @@ static void pwc_iso_free(struct pwc_device *pdev)
/* Freeing ISOC buffers one by one */
for (i = 0; i < MAX_ISO_BUFS; i++) {
- if (pdev->urbs[i]) {
+ struct urb *urb = pdev->urbs[i];
+
+ if (urb) {
PWC_DEBUG_MEMORY("Freeing URB\n");
- if (pdev->urbs[i]->transfer_buffer) {
- usb_free_coherent(pdev->udev,
- pdev->urbs[i]->transfer_buffer_length,
- pdev->urbs[i]->transfer_buffer,
- pdev->urbs[i]->transfer_dma);
- }
- usb_free_urb(pdev->urbs[i]);
+ if (urb->transfer_buffer)
+ pwc_free_urb_buffer(&urb->dev->dev,
+ urb->transfer_buffer_length,
+ urb->transfer_buffer,
+ urb->transfer_dma);
+ usb_free_urb(urb);
pdev->urbs[i] = NULL;
}
}
@@ -610,7 +653,7 @@ static int buffer_prepare(struct vb2_buffer *vb)
{
struct pwc_device *pdev = vb2_get_drv_priv(vb->vb2_queue);
- /* Don't allow queing new buffers after device disconnection */
+ /* Don't allow queueing new buffers after device disconnection */
if (!pdev->udev)
return -ENODEV;
diff --git a/drivers/media/usb/pwc/pwc-misc.c b/drivers/media/usb/pwc/pwc-misc.c
index 9be5adffa874..03888fc3804d 100644
--- a/drivers/media/usb/pwc/pwc-misc.c
+++ b/drivers/media/usb/pwc/pwc-misc.c
@@ -59,7 +59,7 @@ int pwc_get_size(struct pwc_device *pdev, int width, int height)
return i;
}
- /* Never reached there always is atleast one supported mode */
+ /* Never reached there always is at least one supported mode */
return 0;
}
diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
index 2ffded08407b..4fc03ec8a4f1 100644
--- a/drivers/media/usb/siano/smsusb.c
+++ b/drivers/media/usb/siano/smsusb.c
@@ -75,7 +75,7 @@ static int smsusb_submit_urb(struct smsusb_device_t *dev,
struct smsusb_urb_t *surb);
/*
- * Completing URB's callback handler - bottom half (proccess context)
+ * Completing URB's callback handler - bottom half (process context)
* submits the URB prepared on smsusb_onresponse()
*/
static void do_submit_urb(struct work_struct *work)
diff --git a/drivers/media/usb/stk1160/stk1160-core.c b/drivers/media/usb/stk1160/stk1160-core.c
index 468f5ccf4ae6..a44a44ff3bb1 100644
--- a/drivers/media/usb/stk1160/stk1160-core.c
+++ b/drivers/media/usb/stk1160/stk1160-core.c
@@ -297,7 +297,7 @@ static int stk1160_probe(struct usb_interface *interface,
return -ENOMEM;
/*
- * Scan usb posibilities and populate alt_max_pkt_size array.
+ * Scan usb possibilities and populate alt_max_pkt_size array.
* Also, check if device speed is fast enough.
*/
rc = stk1160_scan_usb(interface, udev, alt_max_pkt_size);
@@ -426,7 +426,7 @@ static void stk1160_disconnect(struct usb_interface *interface)
/*
* This calls stk1160_release if it's the last reference.
- * Otherwise, release is posponed until there are no users left.
+ * Otherwise, release is postponed until there are no users left.
*/
v4l2_device_put(&dev->v4l2_dev);
}
diff --git a/drivers/media/usb/stk1160/stk1160-reg.h b/drivers/media/usb/stk1160/stk1160-reg.h
index 7b08a3cc4504..2e400db0ad0e 100644
--- a/drivers/media/usb/stk1160/stk1160-reg.h
+++ b/drivers/media/usb/stk1160/stk1160-reg.h
@@ -23,7 +23,7 @@
/* GPIO Control */
#define STK1160_GCTRL 0x000
-/* Remote Wakup Control */
+/* Remote Wakeup Control */
#define STK1160_RMCTL 0x00c
/* Power-on Strapping Data */
@@ -104,7 +104,7 @@
#define STK1160_SBUSR_RA 0x208
#define STK1160_SBUSR_RD 0x209
-/* Alternate Serial Inteface Control */
+/* Alternate Serial Interface Control */
#define STK1160_ASIC 0x2fc
/* PLL Select Options */
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index b8ec74d98e8d..8f545861471e 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -1006,7 +1006,7 @@ static int stk_setup_format(struct stk_camera *dev)
stk_camera_write_reg(dev, 0x001c, 0x46);
/*
* Registers 0x0115 0x0114 are the size of each line (bytes),
- * regs 0x0117 0x0116 are the heigth of the image.
+ * regs 0x0117 0x0116 are the height of the image.
*/
stk_camera_write_reg(dev, 0x0115,
((stk_sizes[i].w * depth) >> 8) & 0xff);
@@ -1144,7 +1144,7 @@ static int stk_vidioc_dqbuf(struct file *filp,
sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_QUEUED;
sbuf->v4lbuf.flags |= V4L2_BUF_FLAG_DONE;
sbuf->v4lbuf.sequence = ++dev->sequence;
- v4l2_get_timestamp(&sbuf->v4lbuf.timestamp);
+ sbuf->v4lbuf.timestamp = ns_to_timeval(ktime_get_ns());
*buf = sbuf->v4lbuf;
return 0;
diff --git a/drivers/media/usb/tm6000/tm6000-alsa.c b/drivers/media/usb/tm6000/tm6000-alsa.c
index b965931793b5..d6c79c13b332 100644
--- a/drivers/media/usb/tm6000/tm6000-alsa.c
+++ b/drivers/media/usb/tm6000/tm6000-alsa.c
@@ -58,7 +58,7 @@ module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "enable debug messages");
/****************************************************************************
- Module specific funtions
+ Module specific functions
****************************************************************************/
/*
diff --git a/drivers/media/usb/tm6000/tm6000-core.c b/drivers/media/usb/tm6000/tm6000-core.c
index d3229aa45fcb..2c723706f8c8 100644
--- a/drivers/media/usb/tm6000/tm6000-core.c
+++ b/drivers/media/usb/tm6000/tm6000-core.c
@@ -668,7 +668,7 @@ int tm6000_set_audio_rinput(struct tm6000_core *dev)
areg_f0 = 0x04;
break;
default:
- printk(KERN_INFO "%s: audio input dosn't support\n",
+ printk(KERN_INFO "%s: audio input doesn't support\n",
dev->name);
return 0;
break;
@@ -690,7 +690,7 @@ int tm6000_set_audio_rinput(struct tm6000_core *dev)
areg_eb = 0x04;
break;
default:
- printk(KERN_INFO "%s: audio input dosn't support\n",
+ printk(KERN_INFO "%s: audio input doesn't support\n",
dev->name);
return 0;
break;
diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
index 3a4e545c6037..36eea1950e77 100644
--- a/drivers/media/usb/tm6000/tm6000-dvb.c
+++ b/drivers/media/usb/tm6000/tm6000-dvb.c
@@ -149,7 +149,7 @@ static int tm6000_start_stream(struct tm6000_core *dev)
ret, __func__);
return ret;
} else
- printk(KERN_ERR "tm6000: pipe resetted\n");
+ printk(KERN_ERR "tm6000: pipe reset\n");
/* mutex_lock(&tm6000_driver.open_close_mutex); */
ret = usb_submit_urb(dvb->bulk_urb, GFP_ATOMIC);
diff --git a/drivers/media/usb/tm6000/tm6000-i2c.c b/drivers/media/usb/tm6000/tm6000-i2c.c
index 8c0476dfe54f..b37782d6f79c 100644
--- a/drivers/media/usb/tm6000/tm6000-i2c.c
+++ b/drivers/media/usb/tm6000/tm6000-i2c.c
@@ -155,7 +155,7 @@ static int tm6000_i2c_xfer(struct i2c_adapter *i2c_adap,
/*
* The TM6000 only supports a read transaction
* immediately after a 1 or 2 byte write to select
- * a register. We cannot fulfil this request.
+ * a register. We cannot fulfill this request.
*/
i2c_dprintk(2, " read without preceding write not supported");
rc = -EOPNOTSUPP;
diff --git a/drivers/media/usb/tm6000/tm6000-stds.c b/drivers/media/usb/tm6000/tm6000-stds.c
index c0c75951246b..858cb4f3a9ca 100644
--- a/drivers/media/usb/tm6000/tm6000-stds.c
+++ b/drivers/media/usb/tm6000/tm6000-stds.c
@@ -323,7 +323,7 @@ static int tm6000_set_audio_std(struct tm6000_core *dev)
{
uint8_t areg_02 = 0x04; /* GC1 Fixed gain 0dB */
uint8_t areg_05 = 0x01; /* Auto 4.5 = M Japan, Auto 6.5 = DK */
- uint8_t areg_06 = 0x02; /* Auto de-emphasis, mannual channel mode */
+ uint8_t areg_06 = 0x02; /* Auto de-emphasis, manual channel mode */
if (dev->radio) {
tm6000_set_reg(dev, TM6010_REQ08_R01_A_INIT, 0x00);
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index ee7b5318b351..072210f5f92f 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -106,7 +106,7 @@ static inline void buffer_filled(struct tm6000_core *dev,
dprintk(dev, V4L2_DEBUG_ISOC, "[%p/%d] wakeup\n", buf, buf->vb.i);
buf->vb.state = VIDEOBUF_DONE;
buf->vb.field_count++;
- v4l2_get_timestamp(&buf->vb.ts);
+ buf->vb.ts = ktime_get_ns();
list_del(&buf->vb.queue);
wake_up(&buf->vb.done);
@@ -180,7 +180,7 @@ static int copy_streams(u8 *data, unsigned long len,
field = (header >> 11) & 0x1;
line = (header >> 12) & 0x1ff;
cmd = (header >> 21) & 0x7;
- /* Validates haeder fields */
+ /* Validates header fields */
if (size > TM6000_URB_MSG_LEN)
size = TM6000_URB_MSG_LEN;
pktsize = TM6000_URB_MSG_LEN;
diff --git a/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c b/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
index 6eb84cf007b4..4db7a013e049 100644
--- a/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
+++ b/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
@@ -306,7 +306,7 @@ static int ttusb_boot_dsp(struct ttusb *ttusb)
b[3] = 28;
/* upload dsp code in 32 byte steps (36 didn't work for me ...) */
- /* 32 is max packet size, no messages should be splitted. */
+ /* 32 is max packet size, no messages should be split. */
for (i = 0; i < fw->size; i += 28) {
memcpy(&b[4], &fw->data[i], 28);
diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c
index 44ca66cb9b8f..897ef5e1da71 100644
--- a/drivers/media/usb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c
@@ -284,7 +284,7 @@ static void ttusb_dec_handle_irq( struct urb *urb)
*
* this is an fact a bit too simple implementation;
* the box also reports a keyrepeat signal
- * (with buffer[3] == 0x40) in an intervall of ~100ms.
+ * (with buffer[3] == 0x40) in an interval of ~100ms.
* But to handle this correctly we had to imlemenent some
* kind of timer which signals a 'key up' event if no
* keyrepeat signal is received for lets say 200ms.
diff --git a/drivers/media/usb/usbvision/usbvision-core.c b/drivers/media/usb/usbvision/usbvision-core.c
index 31e0e98d6daf..92d166bf8c12 100644
--- a/drivers/media/usb/usbvision/usbvision-core.c
+++ b/drivers/media/usb/usbvision/usbvision-core.c
@@ -900,7 +900,7 @@ static enum parse_state usbvision_parse_lines_420(struct usb_usbvision *usbvisio
if ((frame->curline + 1) >= frame->frmheight)
return parse_state_next_frame;
- block_split = (pixel_per_line%y_block_size) ? 1 : 0; /* are some blocks splitted into different lines? */
+ block_split = (pixel_per_line%y_block_size) ? 1 : 0; /* are some blocks split into different lines? */
y_odd_offset = (pixel_per_line / y_block_size) * (y_block_size + uv_block_size)
+ block_split * uv_block_size;
@@ -1160,7 +1160,7 @@ static void usbvision_parse_data(struct usb_usbvision *usbvision)
if (newstate == parse_state_next_frame) {
frame->grabstate = frame_state_done;
- v4l2_get_timestamp(&(frame->timestamp));
+ frame->ts = ktime_get_ns();
frame->sequence = usbvision->frame_num;
spin_lock_irqsave(&usbvision->queue_lock, lock_flags);
@@ -1865,7 +1865,7 @@ static int usbvision_set_compress_params(struct usb_usbvision *usbvision)
value[4] = 0xA2; /* Reg.48 BUF_THR I'm not sure if this does something in not compressed mode. */
value[5] = 0x00; /* Reg.49 DVI_YUV This has nothing to do with compression */
- /* catched values for NT1004 */
+ /* caught values for NT1004 */
/* value[0] = 0xFF; Never apply intra mode automatically */
/* value[1] = 0xF1; Use full frame height for virtual strip width; One line per strip */
/* value[2] = 0x01; Force intra mode on all new frames */
@@ -1943,7 +1943,7 @@ int usbvision_set_input(struct usb_usbvision *usbvision)
/* SAA7113 uses 8 bit output */
value[0] = USBVISION_8_422_SYNC;
} else {
- /* I'm sure only about d2-d0 [010] 16 bit 4:2:2 usin sync pulses
+ /* I'm sure only about d2-d0 [010] 16 bit 4:2:2 using sync pulses
* as that is how saa7111 is configured */
value[0] = USBVISION_16_422_SYNC;
/* | USBVISION_VSNC_POL | USBVISION_VCLK_POL);*/
@@ -2146,7 +2146,7 @@ int usbvision_power_on(struct usb_usbvision *usbvision)
/*
* usbvision_begin_streaming()
- * Sure you have to put bit 7 to 0, if not incoming frames are droped, but no
+ * Sure you have to put bit 7 to 0, if not incoming frames are dropped, but no
* idea about the rest
*/
int usbvision_begin_streaming(struct usb_usbvision *usbvision)
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index dd2ff8ed6c6a..e611052ebf59 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -706,7 +706,7 @@ static int vidioc_querybuf(struct file *file,
vb->length = usbvision->curwidth *
usbvision->curheight *
usbvision->palette.bytes_per_pixel;
- vb->timestamp = usbvision->frame[vb->index].timestamp;
+ vb->timestamp = ns_to_timeval(usbvision->frame[vb->index].ts);
vb->sequence = usbvision->frame[vb->index].sequence;
return 0;
}
@@ -775,7 +775,7 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *vb)
V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
vb->index = f->index;
vb->sequence = f->sequence;
- vb->timestamp = f->timestamp;
+ vb->timestamp = ns_to_timeval(f->ts);
vb->field = V4L2_FIELD_NONE;
vb->bytesused = f->scanlength;
diff --git a/drivers/media/usb/usbvision/usbvision.h b/drivers/media/usb/usbvision/usbvision.h
index 017e7baf5747..668167f8951d 100644
--- a/drivers/media/usb/usbvision/usbvision.h
+++ b/drivers/media/usb/usbvision/usbvision.h
@@ -135,11 +135,11 @@
#define MIN_FRAME_WIDTH 64
#define MAX_USB_WIDTH 320 /* 384 */
-#define MAX_FRAME_WIDTH 320 /* 384 */ /* streching sometimes causes crashes*/
+#define MAX_FRAME_WIDTH 320 /* 384 */ /* stretching sometimes causes crashes*/
#define MIN_FRAME_HEIGHT 48
#define MAX_USB_HEIGHT 240 /* 288 */
-#define MAX_FRAME_HEIGHT 240 /* 288 */ /* Streching sometimes causes crashes*/
+#define MAX_FRAME_HEIGHT 240 /* 288 */ /* Stretching sometimes causes crashes*/
#define MAX_FRAME_SIZE (MAX_FRAME_WIDTH * MAX_FRAME_HEIGHT * MAX_BYTES_PER_PIXEL)
#define USBVISION_CLIPMASK_SIZE (MAX_FRAME_WIDTH * MAX_FRAME_HEIGHT / 8) /* bytesize of clipmask */
@@ -177,7 +177,7 @@ enum {
* G = 1.164*(Y-16) - 0.813*(U-128) - 0.391*(V-128)
* R = 1.164*(Y-16) + 1.596*(U-128)
*
- * If you fancy integer arithmetics (as you should), hear this:
+ * If you fancy integer arithmetic (as you should), hear this:
*
* 65536*B = 76284*(Y-16) + 132252*(V-128)
* 65536*G = 76284*(Y-16) - 53281*(U-128) - 25625*(V-128)
@@ -316,7 +316,7 @@ struct usbvision_frame {
long bytes_read; /* amount of scanlength that has been read from data */
struct usbvision_v4l2_format_st v4l2_format; /* format the user needs*/
int v4l2_linesize; /* bytes for one videoline*/
- struct timeval timestamp;
+ u64 ts;
int sequence; /* How many video frames we send to user */
};
@@ -438,7 +438,7 @@ struct usb_usbvision {
int last_compr_level; /* How strong (100) or weak (0) was compression */
int usb_bandwidth; /* Mbit/s */
- /* Statistics that can be overlayed on the screen */
+ /* Statistics that can be overlaid on the screen */
unsigned long isoc_urb_count; /* How many URBs we received so far */
unsigned long urb_length; /* Length of last URB */
unsigned long isoc_data_count; /* How many bytes we received */
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index d45415cbe6e7..14cff91b7aea 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -1212,7 +1212,7 @@ static void uvc_ctrl_fill_event(struct uvc_video_chain *chain,
__uvc_query_v4l2_ctrl(chain, ctrl, mapping, &v4l2_ctrl);
- memset(ev->reserved, 0, sizeof(ev->reserved));
+ memset(ev, 0, sizeof(*ev));
ev->type = V4L2_EVENT_CTRL;
ev->id = v4l2_ctrl.id;
ev->u.ctrl.value = value;
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index b62cbd800111..10cfe8e51626 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -1106,11 +1106,19 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
return -EINVAL;
}
- /* Make sure the terminal type MSB is not null, otherwise it
- * could be confused with a unit.
+ /*
+ * Reject invalid terminal types that would cause issues:
+ *
+ * - The high byte must be non-zero, otherwise it would be
+ * confused with a unit.
+ *
+ * - Bit 15 must be 0, as we use it internally as a terminal
+ * direction flag.
+ *
+ * Other unknown types are accepted.
*/
type = get_unaligned_le16(&buffer[4]);
- if ((type & 0xff00) == 0) {
+ if ((type & 0x7f00) == 0 || (type & 0x8000) != 0) {
uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol "
"interface %d INPUT_TERMINAL %d has invalid "
"type 0x%04x, skipping\n", udev->devnum,
@@ -2175,7 +2183,7 @@ static int uvc_probe(struct usb_interface *intf,
if (udev->serial)
strscpy(dev->mdev.serial, udev->serial,
sizeof(dev->mdev.serial));
- strscpy(dev->mdev.bus_info, udev->devpath, sizeof(dev->mdev.bus_info));
+ usb_make_path(udev, dev->mdev.bus_info, sizeof(dev->mdev.bus_info));
dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
media_device_init(&dev->mdev);
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index 84525ff04745..182dcac49aa3 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -676,6 +676,14 @@ void uvc_video_clock_update(struct uvc_streaming *stream,
if (!uvc_hw_timestamps_param)
return;
+ /*
+ * We will get called from __vb2_queue_cancel() if there are buffers
+ * done but not dequeued by the user, but the sample array has already
+ * been released at that time. Just bail out in that case.
+ */
+ if (!clock->samples)
+ return;
+
spin_lock_irqsave(&clock->lock, flags);
if (clock->count < clock->size)
@@ -2000,7 +2008,7 @@ int uvc_video_init(struct uvc_streaming *stream)
usb_set_interface(stream->dev->udev, stream->intfnum, 0);
/* Set the streaming probe control with default streaming parameters
- * retrieved from the device. Webcams that don't suport GET_DEF
+ * retrieved from the device. Webcams that don't support GET_DEF
* requests on the probe control will just keep their current streaming
* parameters.
*/
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 9b41b14ce076..c7c1baa90dea 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -620,8 +620,10 @@ struct uvc_streaming {
(uvc_urb) < &(uvc_streaming)->uvc_urb[UVC_URBS]; \
++(uvc_urb))
-#define uvc_urb_index(uvc_urb) \
- (unsigned int)((uvc_urb) - (&(uvc_urb)->stream->uvc_urb[0]))
+static inline u32 uvc_urb_index(const struct uvc_urb *uvc_urb)
+{
+ return uvc_urb - &uvc_urb->stream->uvc_urb[0];
+}
struct uvc_device_info {
u32 quirks;
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index ab35554cbffa..96fee8d5b865 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -2,7 +2,7 @@
* Zoran 364xx based USB webcam module version 0.73
*
* Allows you to use your USB webcam with V4L2 applications
- * This is still in heavy developpement !
+ * This is still in heavy development !
*
* Copyright (C) 2004 Antoine Jacquet <royale@zerezo.com>
* http://royale.zerezo.com/zr364xx/
@@ -521,7 +521,7 @@ static void zr364xx_fillbuff(struct zr364xx_camera *cam,
/* tell v4l buffer was filled */
buf->vb.field_count = cam->frame_count * 2;
- v4l2_get_timestamp(&buf->vb.ts);
+ buf->vb.ts = ktime_get_ns();
buf->vb.state = VIDEOBUF_DONE;
}
@@ -549,7 +549,7 @@ static int zr364xx_got_frame(struct zr364xx_camera *cam, int jpgsize)
goto unlock;
}
list_del(&buf->vb.queue);
- v4l2_get_timestamp(&buf->vb.ts);
+ buf->vb.ts = ktime_get_ns();
DBG("[%p/%d] wakeup\n", buf, buf->vb.i);
zr364xx_fillbuff(cam, buf, jpgsize);
wake_up(&buf->vb.done);
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index 50763fb42a1b..663730f088cd 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -398,16 +398,6 @@ __v4l2_find_nearest_size(const void *array, size_t array_size,
}
EXPORT_SYMBOL_GPL(__v4l2_find_nearest_size);
-void v4l2_get_timestamp(struct timeval *tv)
-{
- struct timespec ts;
-
- ktime_get_ts(&ts);
- tv->tv_sec = ts.tv_sec;
- tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
-}
-EXPORT_SYMBOL_GPL(v4l2_get_timestamp);
-
int v4l2_g_parm_cap(struct video_device *vdev,
struct v4l2_subdev *sd, struct v4l2_streamparm *a)
{
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 5e3806feb5d7..b79d3bbd8350 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -825,6 +825,9 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER:return "H264 Number of HC Layers";
case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER_QP:
return "H264 Set QP Value for HC Layers";
+ case V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION:
+ return "H264 Constrained Intra Pred";
+ case V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET: return "H264 Chroma QP Index Offset";
case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B-Frame QP Value";
@@ -1387,7 +1390,7 @@ static u32 user_flags(const struct v4l2_ctrl *ctrl)
static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes)
{
- memset(ev->reserved, 0, sizeof(ev->reserved));
+ memset(ev, 0, sizeof(*ev));
ev->type = V4L2_EVENT_CTRL;
ev->id = ctrl->id;
ev->u.ctrl.changes = changes;
@@ -1661,15 +1664,6 @@ static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx,
return -EINVAL;
}
- if (p_mpeg2_slice_params->backward_ref_index >= VIDEO_MAX_FRAME ||
- p_mpeg2_slice_params->forward_ref_index >= VIDEO_MAX_FRAME)
- return -EINVAL;
-
- if (p_mpeg2_slice_params->pad ||
- p_mpeg2_slice_params->picture.pad ||
- p_mpeg2_slice_params->sequence.pad)
- return -EINVAL;
-
return 0;
case V4L2_CTRL_TYPE_MPEG2_QUANTIZATION:
@@ -4172,9 +4166,9 @@ __poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait)
{
struct v4l2_fh *fh = file->private_data;
+ poll_wait(file, &fh->wait, wait);
if (v4l2_event_pending(fh))
return EPOLLPRI;
- poll_wait(file, &fh->wait, wait);
return 0;
}
EXPORT_SYMBOL(v4l2_ctrl_poll);
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
index 481e3c65cf97..c46d14c996fc 100644
--- a/drivers/media/v4l2-core/v4l2-event.c
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -52,6 +52,7 @@ static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
kev->event.pending = fh->navailable;
*event = kev->event;
+ event->timestamp = ns_to_timespec(kev->ts);
kev->sev->first = sev_pos(kev->sev, 1);
kev->sev->in_use--;
@@ -103,8 +104,8 @@ static struct v4l2_subscribed_event *v4l2_event_subscribed(
return NULL;
}
-static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev,
- const struct timespec *ts)
+static void __v4l2_event_queue_fh(struct v4l2_fh *fh,
+ const struct v4l2_event *ev, u64 ts)
{
struct v4l2_subscribed_event *sev;
struct v4l2_kevent *kev;
@@ -144,7 +145,7 @@ static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *e
if (copy_payload)
kev->event.u = ev->u;
kev->event.id = ev->id;
- kev->event.timestamp = *ts;
+ kev->ts = ts;
kev->event.sequence = fh->sequence;
sev->in_use++;
list_add_tail(&kev->list, &fh->available);
@@ -158,17 +159,17 @@ void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
{
struct v4l2_fh *fh;
unsigned long flags;
- struct timespec timestamp;
+ u64 ts;
if (vdev == NULL)
return;
- ktime_get_ts(&timestamp);
+ ts = ktime_get_ns();
spin_lock_irqsave(&vdev->fh_lock, flags);
list_for_each_entry(fh, &vdev->fh_list, list)
- __v4l2_event_queue_fh(fh, ev, &timestamp);
+ __v4l2_event_queue_fh(fh, ev, ts);
spin_unlock_irqrestore(&vdev->fh_lock, flags);
}
@@ -177,12 +178,10 @@ EXPORT_SYMBOL_GPL(v4l2_event_queue);
void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
{
unsigned long flags;
- struct timespec timestamp;
-
- ktime_get_ts(&timestamp);
+ u64 ts = ktime_get_ns();
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
- __v4l2_event_queue_fh(fh, ev, &timestamp);
+ __v4l2_event_queue_fh(fh, ev, ts);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
}
EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index 9bfedd7596a1..20571846e636 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -46,7 +46,7 @@ static const struct v4l2_fwnode_bus_conv {
enum v4l2_fwnode_bus_type fwnode_bus_type;
enum v4l2_mbus_type mbus_type;
const char *name;
-} busses[] = {
+} buses[] = {
{
V4L2_FWNODE_BUS_TYPE_GUESS,
V4L2_MBUS_UNKNOWN,
@@ -83,9 +83,9 @@ get_v4l2_fwnode_bus_conv_by_fwnode_bus(enum v4l2_fwnode_bus_type type)
{
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(busses); i++)
- if (busses[i].fwnode_bus_type == type)
- return &busses[i];
+ for (i = 0; i < ARRAY_SIZE(buses); i++)
+ if (buses[i].fwnode_bus_type == type)
+ return &buses[i];
return NULL;
}
@@ -113,9 +113,9 @@ get_v4l2_fwnode_bus_conv_by_mbus(enum v4l2_mbus_type type)
{
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(busses); i++)
- if (busses[i].mbus_type == type)
- return &busses[i];
+ for (i = 0; i < ARRAY_SIZE(buses); i++)
+ if (buses[i].mbus_type == type)
+ return &buses[i];
return NULL;
}
@@ -809,7 +809,7 @@ error:
* root node and the value of that property matching with the integer argument
* of the reference, at the same index.
*
- * The child fwnode reched at the end of the iteration is then returned to the
+ * The child fwnode reached at the end of the iteration is then returned to the
* caller.
*
* The core reason for this is that you cannot refer to just any node in ACPI.
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 90aad465f9ed..f6d663934648 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -88,7 +88,7 @@ const char *v4l2_norm_to_name(v4l2_std_id id)
int i;
/* HACK: ppc32 architecture doesn't have __ucmpdi2 function to handle
- 64 bit comparations. So, on that architecture, with some gcc
+ 64 bit comparisons. So, on that architecture, with some gcc
variants, compilation fails. Currently, the max value is 30bit wide.
*/
BUG_ON(myid != id);
@@ -1017,6 +1017,12 @@ static void v4l_sanitize_format(struct v4l2_format *fmt)
{
unsigned int offset;
+ /* Make sure num_planes is not bogus */
+ if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ||
+ fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ fmt->fmt.pix_mp.num_planes = min_t(u32, fmt->fmt.pix_mp.num_planes,
+ VIDEO_MAX_PLANES);
+
/*
* The v4l2_pix_format structure has been extended with fields that were
* not previously required to be set to zero by applications. The priv
@@ -1214,6 +1220,10 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_YUV555: descr = "16-bit A/XYUV 1-5-5-5"; break;
case V4L2_PIX_FMT_YUV565: descr = "16-bit YUV 5-6-5"; break;
case V4L2_PIX_FMT_YUV32: descr = "32-bit A/XYUV 8-8-8-8"; break;
+ case V4L2_PIX_FMT_AYUV32: descr = "32-bit AYUV 8-8-8-8"; break;
+ case V4L2_PIX_FMT_XYUV32: descr = "32-bit XYUV 8-8-8-8"; break;
+ case V4L2_PIX_FMT_VUYA32: descr = "32-bit VUYA 8-8-8-8"; break;
+ case V4L2_PIX_FMT_VUYX32: descr = "32-bit VUYX 8-8-8-8"; break;
case V4L2_PIX_FMT_YUV410: descr = "Planar YUV 4:1:0"; break;
case V4L2_PIX_FMT_YUV420: descr = "Planar YUV 4:2:0"; break;
case V4L2_PIX_FMT_HI240: descr = "8-bit Dithered RGB (BTTV)"; break;
@@ -1553,8 +1563,6 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane))
break;
CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
- if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES)
- break;
for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i],
bytesperline);
@@ -1586,8 +1594,6 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane))
break;
CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
- if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES)
- break;
for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i],
bytesperline);
@@ -1656,8 +1662,6 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane))
break;
CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
- if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES)
- break;
for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i],
bytesperline);
@@ -1689,8 +1693,6 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane))
break;
CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
- if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES)
- break;
for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i],
bytesperline);
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 5bbdec55b7d7..3392833d9541 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -131,7 +131,7 @@ struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL(v4l2_m2m_get_vq);
-void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
+struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
{
struct v4l2_m2m_buffer *b;
unsigned long flags;
@@ -149,7 +149,7 @@ void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
}
EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
-void *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx)
+struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx)
{
struct v4l2_m2m_buffer *b;
unsigned long flags;
@@ -167,7 +167,7 @@ void *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx)
}
EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf);
-void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
+struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
{
struct v4l2_m2m_buffer *b;
unsigned long flags;
@@ -617,36 +617,35 @@ __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
__poll_t rc = 0;
unsigned long flags;
+ src_q = v4l2_m2m_get_src_vq(m2m_ctx);
+ dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
+
+ poll_wait(file, &src_q->done_wq, wait);
+ poll_wait(file, &dst_q->done_wq, wait);
+
if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
struct v4l2_fh *fh = file->private_data;
+ poll_wait(file, &fh->wait, wait);
if (v4l2_event_pending(fh))
rc = EPOLLPRI;
- else if (req_events & EPOLLPRI)
- poll_wait(file, &fh->wait, wait);
if (!(req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM)))
return rc;
}
- src_q = v4l2_m2m_get_src_vq(m2m_ctx);
- dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
-
/*
* There has to be at least one buffer queued on each queued_list, which
* means either in driver already or waiting for driver to claim it
* and start processing.
*/
- if ((!src_q->streaming || list_empty(&src_q->queued_list))
- && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
+ if ((!src_q->streaming || src_q->error ||
+ list_empty(&src_q->queued_list)) &&
+ (!dst_q->streaming || dst_q->error ||
+ list_empty(&dst_q->queued_list))) {
rc |= EPOLLERR;
goto end;
}
- spin_lock_irqsave(&src_q->done_lock, flags);
- if (list_empty(&src_q->done_list))
- poll_wait(file, &src_q->done_wq, wait);
- spin_unlock_irqrestore(&src_q->done_lock, flags);
-
spin_lock_irqsave(&dst_q->done_lock, flags);
if (list_empty(&dst_q->done_list)) {
/*
@@ -657,8 +656,6 @@ __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
spin_unlock_irqrestore(&dst_q->done_lock, flags);
return rc | EPOLLIN | EPOLLRDNORM;
}
-
- poll_wait(file, &dst_q->done_wq, wait);
}
spin_unlock_irqrestore(&dst_q->done_lock, flags);
@@ -975,6 +972,27 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
+void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb,
+ struct vb2_v4l2_buffer *cap_vb,
+ bool copy_frame_flags)
+{
+ u32 mask = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+
+ if (copy_frame_flags)
+ mask |= V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME |
+ V4L2_BUF_FLAG_BFRAME;
+
+ cap_vb->vb2_buf.timestamp = out_vb->vb2_buf.timestamp;
+
+ if (out_vb->flags & V4L2_BUF_FLAG_TIMECODE)
+ cap_vb->timecode = out_vb->timecode;
+ cap_vb->field = out_vb->field;
+ cap_vb->flags &= ~mask;
+ cap_vb->flags |= out_vb->flags & mask;
+ cap_vb->vb2_buf.copied_timestamp = 1;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_buf_copy_metadata);
+
void v4l2_m2m_request_queue(struct media_request *req)
{
struct media_request_object *obj, *obj_safe;
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
index 7491b337002c..bf7dfb2a34af 100644
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -214,7 +214,7 @@ int videobuf_queue_is_busy(struct videobuf_queue *q)
return 1;
}
if (q->bufs[i]->state == VIDEOBUF_ACTIVE) {
- dprintk(1, "busy: buffer #%d avtive\n", i);
+ dprintk(1, "busy: buffer #%d active\n", i);
return 1;
}
}
@@ -367,7 +367,7 @@ static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
}
b->field = vb->field;
- b->timestamp = vb->ts;
+ b->timestamp = ns_to_timeval(vb->ts);
b->bytesused = vb->size;
b->sequence = vb->field_count >> 1;
}
@@ -581,7 +581,7 @@ int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
|| q->type == V4L2_BUF_TYPE_SDR_OUTPUT) {
buf->size = b->bytesused;
buf->field = b->field;
- buf->ts = b->timestamp;
+ buf->ts = v4l2_timeval_to_ns(&b->timestamp);
}
break;
case V4L2_MEMORY_USERPTR:
@@ -1119,13 +1119,14 @@ done:
EXPORT_SYMBOL_GPL(videobuf_read_stream);
__poll_t videobuf_poll_stream(struct file *file,
- struct videobuf_queue *q,
- poll_table *wait)
+ struct videobuf_queue *q,
+ poll_table *wait)
{
__poll_t req_events = poll_requested_events(wait);
struct videobuf_buffer *buf = NULL;
__poll_t rc = 0;
+ poll_wait(file, &buf->done, wait);
videobuf_queue_lock(q);
if (q->streaming) {
if (!list_empty(&q->stream))
@@ -1149,7 +1150,6 @@ __poll_t videobuf_poll_stream(struct file *file,
rc = EPOLLERR;
if (0 == rc) {
- poll_wait(file, &buf->done, wait);
if (buf->state == VIDEOBUF_DONE ||
buf->state == VIDEOBUF_ERROR) {
switch (q->type) {
diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c
index f46132504d88..e1bf50df4c70 100644
--- a/drivers/media/v4l2-core/videobuf-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf-dma-contig.c
@@ -248,7 +248,7 @@ static int __videobuf_iolock(struct videobuf_queue *q,
/* All handling should be done by __videobuf_mmap_mapper() */
if (!mem->vaddr) {
- dev_err(q->dev, "memory is not alloced/mmapped.\n");
+ dev_err(q->dev, "memory is not allocated/mmapped.\n");
return -EINVAL;
}
break;
diff --git a/drivers/media/v4l2-core/videobuf-vmalloc.c b/drivers/media/v4l2-core/videobuf-vmalloc.c
index 45fe781aeeec..cb50f1957828 100644
--- a/drivers/media/v4l2-core/videobuf-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf-vmalloc.c
@@ -171,7 +171,7 @@ static int __videobuf_iolock(struct videobuf_queue *q,
/* All handling should be done by __videobuf_mmap_mapper() */
if (!mem->vaddr) {
- printk(KERN_ERR "memory is not alloced/mmapped.\n");
+ printk(KERN_ERR "memory is not allocated/mmapped.\n");
return -EINVAL;
}
break;
@@ -196,26 +196,6 @@ static int __videobuf_iolock(struct videobuf_queue *q,
}
dprintk(1, "vmalloc is at addr %p (%d pages)\n",
mem->vaddr, pages);
-
-#if 0
- int rc;
- /* Kernel userptr is used also by read() method. In this case,
- there's no need to remap, since data will be copied to user
- */
- if (!vb->baddr)
- return 0;
-
- /* FIXME: to properly support USERPTR, remap should occur.
- The code below won't work, since mem->vma = NULL
- */
- /* Try to remap memory */
- rc = remap_vmalloc_range(mem->vma, (void *)vb->baddr, 0);
- if (rc < 0) {
- printk(KERN_ERR "mmap: remap failed with error %d", rc);
- return -ENOMEM;
- }
-#endif
-
break;
case V4L2_MEMORY_OVERLAY:
default:
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index 24afc36833bf..0a53598d982f 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/sort.h>
@@ -38,6 +39,7 @@
#define MC_ERR_ADR 0x0c
+#define MC_GART_ERROR_REQ 0x30
#define MC_DECERR_EMEM_OTHERS_STATUS 0x58
#define MC_SECURITY_VIOLATION_STATUS 0x74
@@ -51,7 +53,7 @@
static const struct of_device_id tegra_mc_of_match[] = {
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
- { .compatible = "nvidia,tegra20-mc", .data = &tegra20_mc_soc },
+ { .compatible = "nvidia,tegra20-mc-gart", .data = &tegra20_mc_soc },
#endif
#ifdef CONFIG_ARCH_TEGRA_3x_SOC
{ .compatible = "nvidia,tegra30-mc", .data = &tegra30_mc_soc },
@@ -161,7 +163,7 @@ static int tegra_mc_hotreset_assert(struct reset_controller_dev *rcdev,
/* block clients DMA requests */
err = rst_ops->block_dma(mc, rst);
if (err) {
- dev_err(mc->dev, "Failed to block %s DMA: %d\n",
+ dev_err(mc->dev, "failed to block %s DMA: %d\n",
rst->name, err);
return err;
}
@@ -171,7 +173,7 @@ static int tegra_mc_hotreset_assert(struct reset_controller_dev *rcdev,
/* wait for completion of the outstanding DMA requests */
while (!rst_ops->dma_idling(mc, rst)) {
if (!retries--) {
- dev_err(mc->dev, "Failed to flush %s DMA\n",
+ dev_err(mc->dev, "failed to flush %s DMA\n",
rst->name);
return -EBUSY;
}
@@ -184,7 +186,7 @@ static int tegra_mc_hotreset_assert(struct reset_controller_dev *rcdev,
/* clear clients DMA requests sitting before arbitration */
err = rst_ops->hotreset_assert(mc, rst);
if (err) {
- dev_err(mc->dev, "Failed to hot reset %s: %d\n",
+ dev_err(mc->dev, "failed to hot reset %s: %d\n",
rst->name, err);
return err;
}
@@ -213,7 +215,7 @@ static int tegra_mc_hotreset_deassert(struct reset_controller_dev *rcdev,
/* take out client from hot reset */
err = rst_ops->hotreset_deassert(mc, rst);
if (err) {
- dev_err(mc->dev, "Failed to deassert hot reset %s: %d\n",
+ dev_err(mc->dev, "failed to deassert hot reset %s: %d\n",
rst->name, err);
return err;
}
@@ -223,7 +225,7 @@ static int tegra_mc_hotreset_deassert(struct reset_controller_dev *rcdev,
/* allow new DMA requests to proceed to arbitration */
err = rst_ops->unblock_dma(mc, rst);
if (err) {
- dev_err(mc->dev, "Failed to unblock %s DMA : %d\n",
+ dev_err(mc->dev, "failed to unblock %s DMA : %d\n",
rst->name, err);
return err;
}
@@ -575,8 +577,15 @@ static __maybe_unused irqreturn_t tegra20_mc_irq(int irq, void *data)
break;
case MC_INT_INVALID_GART_PAGE:
- dev_err_ratelimited(mc->dev, "%s\n", error);
- continue;
+ reg = MC_GART_ERROR_REQ;
+ value = mc_readl(mc, reg);
+
+ id = (value >> 1) & mc->soc->client_id_mask;
+ desc = error_names[2];
+
+ if (value & BIT(0))
+ direction = "write";
+ break;
case MC_INT_SECURITY_VIOLATION:
reg = MC_SECURITY_VIOLATION_STATUS;
@@ -611,23 +620,18 @@ static __maybe_unused irqreturn_t tegra20_mc_irq(int irq, void *data)
static int tegra_mc_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
struct resource *res;
struct tegra_mc *mc;
void *isr;
int err;
- match = of_match_node(tegra_mc_of_match, pdev->dev.of_node);
- if (!match)
- return -ENODEV;
-
mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
if (!mc)
return -ENOMEM;
platform_set_drvdata(pdev, mc);
spin_lock_init(&mc->lock);
- mc->soc = match->data;
+ mc->soc = of_device_get_match_data(&pdev->dev);
mc->dev = &pdev->dev;
/* length of MC tick in nanoseconds */
@@ -638,38 +642,35 @@ static int tegra_mc_probe(struct platform_device *pdev)
if (IS_ERR(mc->regs))
return PTR_ERR(mc->regs);
+ mc->clk = devm_clk_get(&pdev->dev, "mc");
+ if (IS_ERR(mc->clk)) {
+ dev_err(&pdev->dev, "failed to get MC clock: %ld\n",
+ PTR_ERR(mc->clk));
+ return PTR_ERR(mc->clk);
+ }
+
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
if (mc->soc == &tegra20_mc_soc) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- mc->regs2 = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(mc->regs2))
- return PTR_ERR(mc->regs2);
-
isr = tegra20_mc_irq;
} else
#endif
{
- mc->clk = devm_clk_get(&pdev->dev, "mc");
- if (IS_ERR(mc->clk)) {
- dev_err(&pdev->dev, "failed to get MC clock: %ld\n",
- PTR_ERR(mc->clk));
- return PTR_ERR(mc->clk);
- }
-
err = tegra_mc_setup_latency_allowance(mc);
if (err < 0) {
- dev_err(&pdev->dev, "failed to setup latency allowance: %d\n",
+ dev_err(&pdev->dev,
+ "failed to setup latency allowance: %d\n",
err);
return err;
}
isr = tegra_mc_irq;
- }
- err = tegra_mc_setup_timings(mc);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to setup timings: %d\n", err);
- return err;
+ err = tegra_mc_setup_timings(mc);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to setup timings: %d\n",
+ err);
+ return err;
+ }
}
mc->irq = platform_get_irq(pdev, 0);
@@ -678,11 +679,11 @@ static int tegra_mc_probe(struct platform_device *pdev)
return mc->irq;
}
- WARN(!mc->soc->client_id_mask, "Missing client ID mask for this SoC\n");
+ WARN(!mc->soc->client_id_mask, "missing client ID mask for this SoC\n");
mc_writel(mc, mc->soc->intmask, MC_INTMASK);
- err = devm_request_irq(&pdev->dev, mc->irq, isr, IRQF_SHARED,
+ err = devm_request_irq(&pdev->dev, mc->irq, isr, 0,
dev_name(&pdev->dev), mc);
if (err < 0) {
dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", mc->irq,
@@ -695,20 +696,65 @@ static int tegra_mc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to register reset controller: %d\n",
err);
- if (IS_ENABLED(CONFIG_TEGRA_IOMMU_SMMU)) {
+ if (IS_ENABLED(CONFIG_TEGRA_IOMMU_SMMU) && mc->soc->smmu) {
mc->smmu = tegra_smmu_probe(&pdev->dev, mc->soc->smmu, mc);
- if (IS_ERR(mc->smmu))
+ if (IS_ERR(mc->smmu)) {
dev_err(&pdev->dev, "failed to probe SMMU: %ld\n",
PTR_ERR(mc->smmu));
+ mc->smmu = NULL;
+ }
+ }
+
+ if (IS_ENABLED(CONFIG_TEGRA_IOMMU_GART) && !mc->soc->smmu) {
+ mc->gart = tegra_gart_probe(&pdev->dev, mc);
+ if (IS_ERR(mc->gart)) {
+ dev_err(&pdev->dev, "failed to probe GART: %ld\n",
+ PTR_ERR(mc->gart));
+ mc->gart = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static int tegra_mc_suspend(struct device *dev)
+{
+ struct tegra_mc *mc = dev_get_drvdata(dev);
+ int err;
+
+ if (IS_ENABLED(CONFIG_TEGRA_IOMMU_GART) && mc->gart) {
+ err = tegra_gart_suspend(mc->gart);
+ if (err)
+ return err;
}
return 0;
}
+static int tegra_mc_resume(struct device *dev)
+{
+ struct tegra_mc *mc = dev_get_drvdata(dev);
+ int err;
+
+ if (IS_ENABLED(CONFIG_TEGRA_IOMMU_GART) && mc->gart) {
+ err = tegra_gart_resume(mc->gart);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_mc_pm_ops = {
+ .suspend = tegra_mc_suspend,
+ .resume = tegra_mc_resume,
+};
+
static struct platform_driver tegra_mc_driver = {
.driver = {
.name = "tegra-mc",
.of_match_table = tegra_mc_of_match,
+ .pm = &tegra_mc_pm_ops,
.suppress_bind_attrs = true,
},
.prevent_deferred_probe = true,
diff --git a/drivers/memory/tegra/mc.h b/drivers/memory/tegra/mc.h
index 01065f12ebeb..887a3b07334f 100644
--- a/drivers/memory/tegra/mc.h
+++ b/drivers/memory/tegra/mc.h
@@ -26,19 +26,13 @@
static inline u32 mc_readl(struct tegra_mc *mc, unsigned long offset)
{
- if (mc->regs2 && offset >= 0x24)
- return readl(mc->regs2 + offset - 0x3c);
-
- return readl(mc->regs + offset);
+ return readl_relaxed(mc->regs + offset);
}
static inline void mc_writel(struct tegra_mc *mc, u32 value,
unsigned long offset)
{
- if (mc->regs2 && offset >= 0x24)
- return writel(value, mc->regs2 + offset - 0x3c);
-
- writel(value, mc->regs + offset);
+ writel_relaxed(value, mc->regs + offset);
}
extern const struct tegra_mc_reset_ops terga_mc_reset_ops_common;
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index bcdca9fbef51..e3a5af65dbce 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -644,7 +644,6 @@ static int jmb38x_ms_reset(struct jmb38x_ms_host *host)
writel(HOST_CONTROL_RESET_REQ | HOST_CONTROL_CLOCK_EN
| readl(host->addr + HOST_CONTROL),
host->addr + HOST_CONTROL);
- mmiowb();
for (cnt = 0; cnt < 20; ++cnt) {
if (!(HOST_CONTROL_RESET_REQ
@@ -659,7 +658,6 @@ reset_next:
writel(HOST_CONTROL_RESET | HOST_CONTROL_CLOCK_EN
| readl(host->addr + HOST_CONTROL),
host->addr + HOST_CONTROL);
- mmiowb();
for (cnt = 0; cnt < 20; ++cnt) {
if (!(HOST_CONTROL_RESET
@@ -672,7 +670,6 @@ reset_next:
return -EIO;
reset_ok:
- mmiowb();
writel(INT_STATUS_ALL, host->addr + INT_SIGNAL_ENABLE);
writel(INT_STATUS_ALL, host->addr + INT_STATUS_ENABLE);
return 0;
@@ -1009,7 +1006,6 @@ static void jmb38x_ms_remove(struct pci_dev *dev)
tasklet_kill(&host->notify);
writel(0, host->addr + INT_SIGNAL_ENABLE);
writel(0, host->addr + INT_STATUS_ENABLE);
- mmiowb();
dev_dbg(&jm->pdev->dev, "interrupts off\n");
spin_lock_irqsave(&host->lock, flags);
if (host->req) {
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index ad8b1f400919..26ad6468d13a 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -1065,6 +1065,8 @@ config MFD_SI476X_CORE
config MFD_SM501
tristate "Silicon Motion SM501"
+ depends on HAS_DMA
+ select DMA_DECLARE_COHERENT
---help---
This is the core driver for the Silicon Motion SM501 multimedia
companion chip. This device is a multifunction device which may
@@ -1244,7 +1246,7 @@ config MFD_STA2X11
config MFD_SUN6I_PRCM
bool "Allwinner A31 PRCM controller"
- depends on ARCH_SUNXI
+ depends on ARCH_SUNXI || COMPILE_TEST
select MFD_CORE
help
Support for the PRCM (Power/Reset/Clock Management) unit available
@@ -1674,6 +1676,7 @@ config MFD_TC6393XB
select GPIOLIB
select MFD_CORE
select MFD_TMIO
+ select DMA_DECLARE_COHERENT
help
Support for Toshiba Mobile IO Controller TC6393XB
diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c
index 69df27769c21..43ac71691fe4 100644
--- a/drivers/mfd/sprd-sc27xx-spi.c
+++ b/drivers/mfd/sprd-sc27xx-spi.c
@@ -53,67 +53,67 @@ static const struct sprd_pmic_data sc2731_data = {
static const struct mfd_cell sprd_pmic_devs[] = {
{
.name = "sc27xx-wdt",
- .of_compatible = "sprd,sc27xx-wdt",
+ .of_compatible = "sprd,sc2731-wdt",
}, {
.name = "sc27xx-rtc",
- .of_compatible = "sprd,sc27xx-rtc",
+ .of_compatible = "sprd,sc2731-rtc",
}, {
.name = "sc27xx-charger",
- .of_compatible = "sprd,sc27xx-charger",
+ .of_compatible = "sprd,sc2731-charger",
}, {
.name = "sc27xx-chg-timer",
- .of_compatible = "sprd,sc27xx-chg-timer",
+ .of_compatible = "sprd,sc2731-chg-timer",
}, {
.name = "sc27xx-fast-chg",
- .of_compatible = "sprd,sc27xx-fast-chg",
+ .of_compatible = "sprd,sc2731-fast-chg",
}, {
.name = "sc27xx-chg-wdt",
- .of_compatible = "sprd,sc27xx-chg-wdt",
+ .of_compatible = "sprd,sc2731-chg-wdt",
}, {
.name = "sc27xx-typec",
- .of_compatible = "sprd,sc27xx-typec",
+ .of_compatible = "sprd,sc2731-typec",
}, {
.name = "sc27xx-flash",
- .of_compatible = "sprd,sc27xx-flash",
+ .of_compatible = "sprd,sc2731-flash",
}, {
.name = "sc27xx-eic",
- .of_compatible = "sprd,sc27xx-eic",
+ .of_compatible = "sprd,sc2731-eic",
}, {
.name = "sc27xx-efuse",
- .of_compatible = "sprd,sc27xx-efuse",
+ .of_compatible = "sprd,sc2731-efuse",
}, {
.name = "sc27xx-thermal",
- .of_compatible = "sprd,sc27xx-thermal",
+ .of_compatible = "sprd,sc2731-thermal",
}, {
.name = "sc27xx-adc",
- .of_compatible = "sprd,sc27xx-adc",
+ .of_compatible = "sprd,sc2731-adc",
}, {
.name = "sc27xx-audio-codec",
- .of_compatible = "sprd,sc27xx-audio-codec",
+ .of_compatible = "sprd,sc2731-audio-codec",
}, {
.name = "sc27xx-regulator",
- .of_compatible = "sprd,sc27xx-regulator",
+ .of_compatible = "sprd,sc2731-regulator",
}, {
.name = "sc27xx-vibrator",
- .of_compatible = "sprd,sc27xx-vibrator",
+ .of_compatible = "sprd,sc2731-vibrator",
}, {
.name = "sc27xx-keypad-led",
- .of_compatible = "sprd,sc27xx-keypad-led",
+ .of_compatible = "sprd,sc2731-keypad-led",
}, {
.name = "sc27xx-bltc",
- .of_compatible = "sprd,sc27xx-bltc",
+ .of_compatible = "sprd,sc2731-bltc",
}, {
.name = "sc27xx-fgu",
- .of_compatible = "sprd,sc27xx-fgu",
+ .of_compatible = "sprd,sc2731-fgu",
}, {
.name = "sc27xx-7sreset",
- .of_compatible = "sprd,sc27xx-7sreset",
+ .of_compatible = "sprd,sc2731-7sreset",
}, {
.name = "sc27xx-poweroff",
- .of_compatible = "sprd,sc27xx-poweroff",
+ .of_compatible = "sprd,sc2731-poweroff",
}, {
.name = "sc27xx-syscon",
- .of_compatible = "sprd,sc27xx-syscon",
+ .of_compatible = "sprd,sc2731-syscon",
},
};
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 299016bc46d9..104477b512a2 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -1245,6 +1245,28 @@ free:
return status;
}
+static int __maybe_unused twl_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (client->irq)
+ disable_irq(client->irq);
+
+ return 0;
+}
+
+static int __maybe_unused twl_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (client->irq)
+ enable_irq(client->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(twl_dev_pm_ops, twl_suspend, twl_resume);
+
static const struct i2c_device_id twl_ids[] = {
{ "twl4030", TWL4030_VAUX2 }, /* "Triton 2" */
{ "twl5030", 0 }, /* T2 updated */
@@ -1262,6 +1284,7 @@ static const struct i2c_device_id twl_ids[] = {
/* One Client Driver , 4 Clients */
static struct i2c_driver twl_driver = {
.driver.name = DRIVER_NAME,
+ .driver.pm = &twl_dev_pm_ops,
.id_table = twl_ids,
.probe = twl_probe,
.remove = twl_remove,
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index 25fbbaf39cb9..21bae64e0451 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -34,7 +34,7 @@
/* Current settings - values are 2*2^(reg_val/4) microamps. These are
* exported since they are used by multiple drivers.
*/
-int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1] = {
+const unsigned int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1] = {
2,
2,
3,
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index 79756c83f5f0..cf067424a156 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -35,12 +35,6 @@ static bool wm8400_volatile(struct device *dev, unsigned int reg)
}
}
-int wm8400_block_read(struct wm8400 *wm8400, u8 reg, int count, u16 *data)
-{
- return regmap_bulk_read(wm8400->regmap, reg, data, count);
-}
-EXPORT_SYMBOL_GPL(wm8400_block_read);
-
static int wm8400_register_codec(struct wm8400 *wm8400)
{
const struct mfd_cell cell = {
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 39f832d27288..36d0d5c9cfba 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -1184,6 +1184,7 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
struct fastrpc_session_ctx *sess;
struct device *dev = &pdev->dev;
int i, sessions = 0;
+ int rc;
cctx = dev_get_drvdata(dev->parent);
if (!cctx)
@@ -1213,7 +1214,11 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
}
cctx->sesscount++;
spin_unlock(&cctx->lock);
- dma_set_mask(dev, DMA_BIT_MASK(32));
+ rc = dma_set_mask(dev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_err(dev, "32-bit DMA enable failed\n");
+ return rc;
+ }
return 0;
}
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c
index 3525236ed8d9..19c84214a7ea 100644
--- a/drivers/misc/habanalabs/command_submission.c
+++ b/drivers/misc/habanalabs/command_submission.c
@@ -179,6 +179,12 @@ static void cs_do_release(struct kref *ref)
/* We also need to update CI for internal queues */
if (cs->submitted) {
+ int cs_cnt = atomic_dec_return(&hdev->cs_active_cnt);
+
+ WARN_ONCE((cs_cnt < 0),
+ "hl%d: error in CS active cnt %d\n",
+ hdev->id, cs_cnt);
+
hl_int_hw_queue_update_ci(cs);
spin_lock(&hdev->hw_queues_mirror_lock);
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c
index a53c12aff6ad..974a87789bd8 100644
--- a/drivers/misc/habanalabs/debugfs.c
+++ b/drivers/misc/habanalabs/debugfs.c
@@ -232,6 +232,7 @@ static int vm_show(struct seq_file *s, void *data)
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
enum vm_type_t *vm_type;
bool once = true;
+ u64 j;
int i;
if (!dev_entry->hdev->mmu_enable)
@@ -260,7 +261,7 @@ static int vm_show(struct seq_file *s, void *data)
} else {
phys_pg_pack = hnode->ptr;
seq_printf(s,
- " 0x%-14llx %-10u %-4u\n",
+ " 0x%-14llx %-10llu %-4u\n",
hnode->vaddr, phys_pg_pack->total_size,
phys_pg_pack->handle);
}
@@ -282,9 +283,9 @@ static int vm_show(struct seq_file *s, void *data)
phys_pg_pack->page_size);
seq_puts(s, " physical address\n");
seq_puts(s, "---------------------\n");
- for (i = 0 ; i < phys_pg_pack->npages ; i++) {
+ for (j = 0 ; j < phys_pg_pack->npages ; j++) {
seq_printf(s, " 0x%-14llx\n",
- phys_pg_pack->pages[i]);
+ phys_pg_pack->pages[j]);
}
}
spin_unlock(&vm->idr_lock);
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
index de46aa6ed154..77d51be66c7e 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/device.c
@@ -11,6 +11,8 @@
#include <linux/sched/signal.h>
#include <linux/hwmon.h>
+#define HL_PLDM_PENDING_RESET_PER_SEC (HL_PENDING_RESET_PER_SEC * 10)
+
bool hl_device_disabled_or_in_reset(struct hl_device *hdev)
{
if ((hdev->disabled) || (atomic_read(&hdev->in_reset)))
@@ -216,6 +218,7 @@ static int device_early_init(struct hl_device *hdev)
spin_lock_init(&hdev->hw_queues_mirror_lock);
atomic_set(&hdev->in_reset, 0);
atomic_set(&hdev->fd_open_cnt, 0);
+ atomic_set(&hdev->cs_active_cnt, 0);
return 0;
@@ -413,6 +416,27 @@ int hl_device_suspend(struct hl_device *hdev)
pci_save_state(hdev->pdev);
+ /* Block future CS/VM/JOB completion operations */
+ rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
+ if (rc) {
+ dev_err(hdev->dev, "Can't suspend while in reset\n");
+ return -EIO;
+ }
+
+ /* This blocks all other stuff that is not blocked by in_reset */
+ hdev->disabled = true;
+
+ /*
+ * Flush anyone that is inside the critical section of enqueue
+ * jobs to the H/W
+ */
+ hdev->asic_funcs->hw_queues_lock(hdev);
+ hdev->asic_funcs->hw_queues_unlock(hdev);
+
+ /* Flush processes that are sending message to CPU */
+ mutex_lock(&hdev->send_cpu_message_lock);
+ mutex_unlock(&hdev->send_cpu_message_lock);
+
rc = hdev->asic_funcs->suspend(hdev);
if (rc)
dev_err(hdev->dev,
@@ -440,21 +464,38 @@ int hl_device_resume(struct hl_device *hdev)
pci_set_power_state(hdev->pdev, PCI_D0);
pci_restore_state(hdev->pdev);
- rc = pci_enable_device(hdev->pdev);
+ rc = pci_enable_device_mem(hdev->pdev);
if (rc) {
dev_err(hdev->dev,
"Failed to enable PCI device in resume\n");
return rc;
}
+ pci_set_master(hdev->pdev);
+
rc = hdev->asic_funcs->resume(hdev);
if (rc) {
- dev_err(hdev->dev,
- "Failed to enable PCI access from device CPU\n");
- return rc;
+ dev_err(hdev->dev, "Failed to resume device after suspend\n");
+ goto disable_device;
+ }
+
+
+ hdev->disabled = false;
+ atomic_set(&hdev->in_reset, 0);
+
+ rc = hl_device_reset(hdev, true, false);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to reset device during resume\n");
+ goto disable_device;
}
return 0;
+
+disable_device:
+ pci_clear_master(hdev->pdev);
+ pci_disable_device(hdev->pdev);
+
+ return rc;
}
static void hl_device_hard_reset_pending(struct work_struct *work)
@@ -462,9 +503,16 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
struct hl_device_reset_work *device_reset_work =
container_of(work, struct hl_device_reset_work, reset_work);
struct hl_device *hdev = device_reset_work->hdev;
- u16 pending_cnt = HL_PENDING_RESET_PER_SEC;
+ u16 pending_total, pending_cnt;
struct task_struct *task = NULL;
+ if (hdev->pldm)
+ pending_total = HL_PLDM_PENDING_RESET_PER_SEC;
+ else
+ pending_total = HL_PENDING_RESET_PER_SEC;
+
+ pending_cnt = pending_total;
+
/* Flush all processes that are inside hl_open */
mutex_lock(&hdev->fd_open_cnt_lock);
@@ -489,6 +537,19 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
}
}
+ pending_cnt = pending_total;
+
+ while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) {
+
+ pending_cnt--;
+
+ ssleep(1);
+ }
+
+ if (atomic_read(&hdev->fd_open_cnt))
+ dev_crit(hdev->dev,
+ "Going to hard reset with open user contexts\n");
+
mutex_unlock(&hdev->fd_open_cnt_lock);
hl_device_reset(hdev, true, true);
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 238dd57c541b..3c509e19d69d 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -1201,15 +1201,6 @@ static int goya_stop_external_queues(struct hl_device *hdev)
return retval;
}
-static void goya_resume_external_queues(struct hl_device *hdev)
-{
- WREG32(mmDMA_QM_0_GLBL_CFG1, 0);
- WREG32(mmDMA_QM_1_GLBL_CFG1, 0);
- WREG32(mmDMA_QM_2_GLBL_CFG1, 0);
- WREG32(mmDMA_QM_3_GLBL_CFG1, 0);
- WREG32(mmDMA_QM_4_GLBL_CFG1, 0);
-}
-
/*
* goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU
*
@@ -1697,12 +1688,11 @@ static void goya_init_golden_registers(struct hl_device *hdev)
/*
* Workaround for H2 #HW-23 bug
- * Set DMA max outstanding read requests to 240 on DMA CH 1. Set it
- * to 16 on KMD DMA
- * We need to limit only these DMAs because the user can only read
+ * Set DMA max outstanding read requests to 240 on DMA CH 1.
+ * This limitation is still large enough to not affect Gen4 bandwidth.
+ * We need to only limit that DMA channel because the user can only read
* from Host using DMA CH 1
*/
- WREG32(mmDMA_CH_0_CFG0, 0x0fff0010);
WREG32(mmDMA_CH_1_CFG0, 0x0fff00F0);
goya->hw_cap_initialized |= HW_CAP_GOLDEN;
@@ -2178,36 +2168,6 @@ static int goya_stop_internal_queues(struct hl_device *hdev)
return retval;
}
-static void goya_resume_internal_queues(struct hl_device *hdev)
-{
- WREG32(mmMME_QM_GLBL_CFG1, 0);
- WREG32(mmMME_CMDQ_GLBL_CFG1, 0);
-
- WREG32(mmTPC0_QM_GLBL_CFG1, 0);
- WREG32(mmTPC0_CMDQ_GLBL_CFG1, 0);
-
- WREG32(mmTPC1_QM_GLBL_CFG1, 0);
- WREG32(mmTPC1_CMDQ_GLBL_CFG1, 0);
-
- WREG32(mmTPC2_QM_GLBL_CFG1, 0);
- WREG32(mmTPC2_CMDQ_GLBL_CFG1, 0);
-
- WREG32(mmTPC3_QM_GLBL_CFG1, 0);
- WREG32(mmTPC3_CMDQ_GLBL_CFG1, 0);
-
- WREG32(mmTPC4_QM_GLBL_CFG1, 0);
- WREG32(mmTPC4_CMDQ_GLBL_CFG1, 0);
-
- WREG32(mmTPC5_QM_GLBL_CFG1, 0);
- WREG32(mmTPC5_CMDQ_GLBL_CFG1, 0);
-
- WREG32(mmTPC6_QM_GLBL_CFG1, 0);
- WREG32(mmTPC6_CMDQ_GLBL_CFG1, 0);
-
- WREG32(mmTPC7_QM_GLBL_CFG1, 0);
- WREG32(mmTPC7_CMDQ_GLBL_CFG1, 0);
-}
-
static void goya_dma_stall(struct hl_device *hdev)
{
WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
@@ -2905,20 +2865,6 @@ int goya_suspend(struct hl_device *hdev)
{
int rc;
- rc = goya_stop_internal_queues(hdev);
-
- if (rc) {
- dev_err(hdev->dev, "failed to stop internal queues\n");
- return rc;
- }
-
- rc = goya_stop_external_queues(hdev);
-
- if (rc) {
- dev_err(hdev->dev, "failed to stop external queues\n");
- return rc;
- }
-
rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
if (rc)
dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
@@ -2928,15 +2874,7 @@ int goya_suspend(struct hl_device *hdev)
int goya_resume(struct hl_device *hdev)
{
- int rc;
-
- goya_resume_external_queues(hdev);
- goya_resume_internal_queues(hdev);
-
- rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
- if (rc)
- dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
- return rc;
+ return goya_init_iatu(hdev);
}
static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
@@ -3070,7 +3008,7 @@ void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
*dma_handle = hdev->asic_prop.sram_base_address;
- base = hdev->pcie_bar[SRAM_CFG_BAR_ID];
+ base = (void *) hdev->pcie_bar[SRAM_CFG_BAR_ID];
switch (queue_id) {
case GOYA_QUEUE_ID_MME:
@@ -3754,7 +3692,7 @@ static int goya_validate_dma_pkt_mmu(struct hl_device *hdev,
* WA for HW-23.
* We can't allow user to read from Host using QMANs other than 1.
*/
- if (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1 &&
+ if (parser->hw_queue_id != GOYA_QUEUE_ID_DMA_1 &&
hl_mem_area_inside_range(le64_to_cpu(user_dma_pkt->src_addr),
le32_to_cpu(user_dma_pkt->tsize),
hdev->asic_prop.va_space_host_start_address,
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
index a7c95e9f9b9a..a8ee52c880cd 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -793,11 +793,11 @@ struct hl_vm_hash_node {
* struct hl_vm_phys_pg_pack - physical page pack.
* @vm_type: describes the type of the virtual area descriptor.
* @pages: the physical page array.
+ * @npages: num physical pages in the pack.
+ * @total_size: total size of all the pages in this list.
* @mapping_cnt: number of shared mappings.
* @asid: the context related to this list.
- * @npages: num physical pages in the pack.
* @page_size: size of each page in the pack.
- * @total_size: total size of all the pages in this list.
* @flags: HL_MEM_* flags related to this list.
* @handle: the provided handle related to this list.
* @offset: offset from the first page.
@@ -807,11 +807,11 @@ struct hl_vm_hash_node {
struct hl_vm_phys_pg_pack {
enum vm_type_t vm_type; /* must be first */
u64 *pages;
+ u64 npages;
+ u64 total_size;
atomic_t mapping_cnt;
u32 asid;
- u32 npages;
u32 page_size;
- u32 total_size;
u32 flags;
u32 handle;
u32 offset;
@@ -1056,13 +1056,15 @@ struct hl_device_reset_work {
* @cb_pool_lock: protects the CB pool.
* @user_ctx: current user context executing.
* @dram_used_mem: current DRAM memory consumption.
- * @in_reset: is device in reset flow.
- * @curr_pll_profile: current PLL profile.
- * @fd_open_cnt: number of open user processes.
* @timeout_jiffies: device CS timeout value.
* @max_power: the max power of the device, as configured by the sysadmin. This
* value is saved so in case of hard-reset, KMD will restore this
* value and update the F/W after the re-initialization
+ * @in_reset: is device in reset flow.
+ * @curr_pll_profile: current PLL profile.
+ * @fd_open_cnt: number of open user processes.
+ * @cs_active_cnt: number of active command submissions on this device (active
+ * means already in H/W queues)
* @major: habanalabs KMD major.
* @high_pll: high PLL profile frequency.
* @soft_reset_cnt: number of soft reset since KMD loading.
@@ -1128,11 +1130,12 @@ struct hl_device {
struct hl_ctx *user_ctx;
atomic64_t dram_used_mem;
+ u64 timeout_jiffies;
+ u64 max_power;
atomic_t in_reset;
atomic_t curr_pll_profile;
atomic_t fd_open_cnt;
- u64 timeout_jiffies;
- u64 max_power;
+ atomic_t cs_active_cnt;
u32 major;
u32 high_pll;
u32 soft_reset_cnt;
diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c
index 67bece26417c..ef3bb6951360 100644
--- a/drivers/misc/habanalabs/hw_queue.c
+++ b/drivers/misc/habanalabs/hw_queue.c
@@ -370,12 +370,13 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
spin_unlock(&hdev->hw_queues_mirror_lock);
}
- list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) {
+ atomic_inc(&hdev->cs_active_cnt);
+
+ list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
if (job->ext_queue)
ext_hw_queue_schedule_job(job);
else
int_hw_queue_schedule_job(job);
- }
cs->submitted = true;
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
index 3a12fd1a5274..ce1fda40a8b8 100644
--- a/drivers/misc/habanalabs/memory.c
+++ b/drivers/misc/habanalabs/memory.c
@@ -56,9 +56,9 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
struct hl_device *hdev = ctx->hdev;
struct hl_vm *vm = &hdev->vm;
struct hl_vm_phys_pg_pack *phys_pg_pack;
- u64 paddr = 0;
- u32 total_size, num_pgs, num_curr_pgs, page_size, page_shift;
- int handle, rc, i;
+ u64 paddr = 0, total_size, num_pgs, i;
+ u32 num_curr_pgs, page_size, page_shift;
+ int handle, rc;
bool contiguous;
num_curr_pgs = 0;
@@ -73,7 +73,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size);
if (!paddr) {
dev_err(hdev->dev,
- "failed to allocate %u huge contiguous pages\n",
+ "failed to allocate %llu huge contiguous pages\n",
num_pgs);
return -ENOMEM;
}
@@ -93,7 +93,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
phys_pg_pack->flags = args->flags;
phys_pg_pack->contiguous = contiguous;
- phys_pg_pack->pages = kcalloc(num_pgs, sizeof(u64), GFP_KERNEL);
+ phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
if (!phys_pg_pack->pages) {
rc = -ENOMEM;
goto pages_arr_err;
@@ -148,7 +148,7 @@ page_err:
gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
page_size);
- kfree(phys_pg_pack->pages);
+ kvfree(phys_pg_pack->pages);
pages_arr_err:
kfree(phys_pg_pack);
pages_pack_err:
@@ -267,7 +267,7 @@ static void free_phys_pg_pack(struct hl_device *hdev,
struct hl_vm_phys_pg_pack *phys_pg_pack)
{
struct hl_vm *vm = &hdev->vm;
- int i;
+ u64 i;
if (!phys_pg_pack->created_from_userptr) {
if (phys_pg_pack->contiguous) {
@@ -288,7 +288,7 @@ static void free_phys_pg_pack(struct hl_device *hdev,
}
}
- kfree(phys_pg_pack->pages);
+ kvfree(phys_pg_pack->pages);
kfree(phys_pg_pack);
}
@@ -519,7 +519,7 @@ static inline int add_va_block(struct hl_device *hdev,
* - Return the start address of the virtual block
*/
static u64 get_va_block(struct hl_device *hdev,
- struct hl_va_range *va_range, u32 size, u64 hint_addr,
+ struct hl_va_range *va_range, u64 size, u64 hint_addr,
bool is_userptr)
{
struct hl_vm_va_block *va_block, *new_va_block = NULL;
@@ -577,7 +577,8 @@ static u64 get_va_block(struct hl_device *hdev,
}
if (!new_va_block) {
- dev_err(hdev->dev, "no available va block for size %u\n", size);
+ dev_err(hdev->dev, "no available va block for size %llu\n",
+ size);
goto out;
}
@@ -648,8 +649,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
struct hl_vm_phys_pg_pack *phys_pg_pack;
struct scatterlist *sg;
dma_addr_t dma_addr;
- u64 page_mask;
- u32 npages, total_npages, page_size = PAGE_SIZE;
+ u64 page_mask, total_npages;
+ u32 npages, page_size = PAGE_SIZE;
bool first = true, is_huge_page_opt = true;
int rc, i, j;
@@ -691,7 +692,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
page_mask = ~(((u64) page_size) - 1);
- phys_pg_pack->pages = kcalloc(total_npages, sizeof(u64), GFP_KERNEL);
+ phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
+ GFP_KERNEL);
if (!phys_pg_pack->pages) {
rc = -ENOMEM;
goto page_pack_arr_mem_err;
@@ -750,9 +752,9 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
struct hl_vm_phys_pg_pack *phys_pg_pack)
{
struct hl_device *hdev = ctx->hdev;
- u64 next_vaddr = vaddr, paddr;
+ u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
u32 page_size = phys_pg_pack->page_size;
- int i, rc = 0, mapped_pg_cnt = 0;
+ int rc = 0;
for (i = 0 ; i < phys_pg_pack->npages ; i++) {
paddr = phys_pg_pack->pages[i];
@@ -764,7 +766,7 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size);
if (rc) {
dev_err(hdev->dev,
- "map failed for handle %u, npages: %d, mapped: %d",
+ "map failed for handle %u, npages: %llu, mapped: %llu",
phys_pg_pack->handle, phys_pg_pack->npages,
mapped_pg_cnt);
goto err;
@@ -985,10 +987,10 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
struct hl_vm_hash_node *hnode = NULL;
struct hl_userptr *userptr = NULL;
enum vm_type_t *vm_type;
- u64 next_vaddr;
+ u64 next_vaddr, i;
u32 page_size;
bool is_userptr;
- int i, rc;
+ int rc;
/* protect from double entrance */
mutex_lock(&ctx->mem_hash_lock);
diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/mmu.c
index 2f2e99cb2743..3a5a2cec8305 100644
--- a/drivers/misc/habanalabs/mmu.c
+++ b/drivers/misc/habanalabs/mmu.c
@@ -832,7 +832,7 @@ err:
int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
{
struct hl_device *hdev = ctx->hdev;
- u64 real_virt_addr;
+ u64 real_virt_addr, real_phys_addr;
u32 real_page_size, npages;
int i, rc, mapped_cnt = 0;
@@ -857,14 +857,16 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
npages = page_size / real_page_size;
real_virt_addr = virt_addr;
+ real_phys_addr = phys_addr;
for (i = 0 ; i < npages ; i++) {
- rc = _hl_mmu_map(ctx, real_virt_addr, phys_addr,
+ rc = _hl_mmu_map(ctx, real_virt_addr, real_phys_addr,
real_page_size);
if (rc)
goto err;
real_virt_addr += real_page_size;
+ real_phys_addr += real_page_size;
mapped_cnt++;
}
diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c
index ec0832278170..9d0445a567db 100644
--- a/drivers/misc/ioc4.c
+++ b/drivers/misc/ioc4.c
@@ -156,7 +156,6 @@ ioc4_clock_calibrate(struct ioc4_driver_data *idd)
/* Reset to power-on state */
writel(0, &idd->idd_misc_regs->int_out.raw);
- mmiowb();
/* Set up square wave */
int_out.raw = 0;
@@ -164,7 +163,6 @@ ioc4_clock_calibrate(struct ioc4_driver_data *idd)
int_out.fields.mode = IOC4_INT_OUT_MODE_TOGGLE;
int_out.fields.diag = 0;
writel(int_out.raw, &idd->idd_misc_regs->int_out.raw);
- mmiowb();
/* Check square wave period averaged over some number of cycles */
start = ktime_get_ns();
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 3fbbadfa2ae1..8a47a6fc3fc7 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -350,9 +350,6 @@ static void mei_me_hw_reset_release(struct mei_device *dev)
hcsr |= H_IG;
hcsr &= ~H_RST;
mei_hcsr_set(dev, hcsr);
-
- /* complete this write before we set host ready on another CPU */
- mmiowb();
}
/**
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
index 242dcee14689..6736f72cc14a 100644
--- a/drivers/misc/mic/Kconfig
+++ b/drivers/misc/mic/Kconfig
@@ -4,7 +4,7 @@ comment "Intel MIC Bus Driver"
config INTEL_MIC_BUS
tristate "Intel MIC Bus Driver"
- depends on 64BIT && PCI && X86 && X86_DEV_DMA_OPS
+ depends on 64BIT && PCI && X86
help
This option is selected by any driver which registers a
device or driver on the MIC Bus, such as CONFIG_INTEL_MIC_HOST,
@@ -21,7 +21,7 @@ comment "SCIF Bus Driver"
config SCIF_BUS
tristate "SCIF Bus Driver"
- depends on 64BIT && PCI && X86 && X86_DEV_DMA_OPS
+ depends on 64BIT && PCI && X86
help
This option is selected by any driver which registers a
device or driver on the SCIF Bus, such as CONFIG_INTEL_MIC_HOST
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
index f62216628fa6..0fb9b440dc70 100644
--- a/drivers/misc/mic/scif/scif_rma.c
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -272,21 +272,12 @@ static inline void __scif_release_mm(struct mm_struct *mm)
static inline int
__scif_dec_pinned_vm_lock(struct mm_struct *mm,
- int nr_pages, bool try_lock)
+ int nr_pages)
{
if (!mm || !nr_pages || !scif_ulimit_check)
return 0;
- if (try_lock) {
- if (!down_write_trylock(&mm->mmap_sem)) {
- dev_err(scif_info.mdev.this_device,
- "%s %d err\n", __func__, __LINE__);
- return -1;
- }
- } else {
- down_write(&mm->mmap_sem);
- }
- mm->pinned_vm -= nr_pages;
- up_write(&mm->mmap_sem);
+
+ atomic64_sub(nr_pages, &mm->pinned_vm);
return 0;
}
@@ -298,16 +289,16 @@ static inline int __scif_check_inc_pinned_vm(struct mm_struct *mm,
if (!mm || !nr_pages || !scif_ulimit_check)
return 0;
- locked = nr_pages;
- locked += mm->pinned_vm;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ locked = atomic64_add_return(nr_pages, &mm->pinned_vm);
+
if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
+ atomic64_sub(nr_pages, &mm->pinned_vm);
dev_err(scif_info.mdev.this_device,
"locked(%lu) > lock_limit(%lu)\n",
locked, lock_limit);
return -ENOMEM;
}
- mm->pinned_vm = locked;
return 0;
}
@@ -326,7 +317,7 @@ int scif_destroy_window(struct scif_endpt *ep, struct scif_window *window)
might_sleep();
if (!window->temp && window->mm) {
- __scif_dec_pinned_vm_lock(window->mm, window->nr_pages, 0);
+ __scif_dec_pinned_vm_lock(window->mm, window->nr_pages);
__scif_release_mm(window->mm);
window->mm = NULL;
}
@@ -737,7 +728,7 @@ done:
ep->rma_info.dma_chan);
} else {
if (!__scif_dec_pinned_vm_lock(window->mm,
- window->nr_pages, 1)) {
+ window->nr_pages)) {
__scif_release_mm(window->mm);
window->mm = NULL;
}
@@ -1385,28 +1376,23 @@ int __scif_pin_pages(void *addr, size_t len, int *out_prot,
prot |= SCIF_PROT_WRITE;
retry:
mm = current->mm;
- down_write(&mm->mmap_sem);
if (ulimit) {
err = __scif_check_inc_pinned_vm(mm, nr_pages);
if (err) {
- up_write(&mm->mmap_sem);
pinned_pages->nr_pages = 0;
goto error_unmap;
}
}
- pinned_pages->nr_pages = get_user_pages(
+ pinned_pages->nr_pages = get_user_pages_fast(
(u64)addr,
nr_pages,
(prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0,
- pinned_pages->pages,
- NULL);
- up_write(&mm->mmap_sem);
+ pinned_pages->pages);
if (nr_pages != pinned_pages->nr_pages) {
if (try_upgrade) {
if (ulimit)
- __scif_dec_pinned_vm_lock(mm,
- nr_pages, 0);
+ __scif_dec_pinned_vm_lock(mm, nr_pages);
/* Roll back any pinned pages */
for (i = 0; i < pinned_pages->nr_pages; i++) {
if (pinned_pages->pages[i])
@@ -1433,7 +1419,7 @@ retry:
return err;
dec_pinned:
if (ulimit)
- __scif_dec_pinned_vm_lock(mm, nr_pages, 0);
+ __scif_dec_pinned_vm_lock(mm, nr_pages);
/* Something went wrong! Rollback */
error_unmap:
pinned_pages->nr_pages = nr_pages;
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
index 540845651b8c..309703e9c42e 100644
--- a/drivers/misc/pch_phub.c
+++ b/drivers/misc/pch_phub.c
@@ -64,7 +64,6 @@
#define CLKCFG_UARTCLKSEL (1 << 18)
/* Macros for ML7213 */
-#define PCI_VENDOR_ID_ROHM 0x10db
#define PCI_DEVICE_ID_ROHM_ML7213_PHUB 0x801A
/* Macros for ML7223 */
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index 896e2df9400f..29582fe57151 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -788,6 +788,7 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
static const struct pci_device_id pci_endpoint_test_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) },
+ { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0) },
{ PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 0xedda) },
{ }
};
diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c
index 9ac95b48ef92..cc729f7ab32e 100644
--- a/drivers/misc/tifm_7xx1.c
+++ b/drivers/misc/tifm_7xx1.c
@@ -403,7 +403,6 @@ static void tifm_7xx1_remove(struct pci_dev *dev)
fm->eject = tifm_7xx1_dummy_eject;
fm->has_ms_pif = tifm_7xx1_dummy_has_ms_pif;
writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
- mmiowb();
free_irq(dev->irq, fm);
tifm_remove_adapter(fm);
diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c
index c712b7deb3a9..6e6b7ade4b60 100644
--- a/drivers/mmc/host/alcor.c
+++ b/drivers/mmc/host/alcor.c
@@ -48,7 +48,6 @@ struct alcor_sdmmc_host {
struct mmc_command *cmd;
struct mmc_data *data;
unsigned int dma_on:1;
- unsigned int early_data:1;
struct mutex cmd_mutex;
@@ -144,8 +143,7 @@ static void alcor_data_set_dma(struct alcor_sdmmc_host *host)
host->sg_count--;
}
-static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host,
- bool early)
+static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host)
{
struct alcor_pci_priv *priv = host->alcor_pci;
struct mmc_data *data = host->data;
@@ -155,13 +153,6 @@ static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host,
ctrl |= AU6601_DATA_WRITE;
if (data->host_cookie == COOKIE_MAPPED) {
- if (host->early_data) {
- host->early_data = false;
- return;
- }
-
- host->early_data = early;
-
alcor_data_set_dma(host);
ctrl |= AU6601_DATA_DMA_MODE;
host->dma_on = 1;
@@ -231,6 +222,7 @@ static void alcor_prepare_sg_miter(struct alcor_sdmmc_host *host)
static void alcor_prepare_data(struct alcor_sdmmc_host *host,
struct mmc_command *cmd)
{
+ struct alcor_pci_priv *priv = host->alcor_pci;
struct mmc_data *data = cmd->data;
if (!data)
@@ -248,7 +240,7 @@ static void alcor_prepare_data(struct alcor_sdmmc_host *host,
if (data->host_cookie != COOKIE_MAPPED)
alcor_prepare_sg_miter(host);
- alcor_trigger_data_transfer(host, true);
+ alcor_write8(priv, 0, AU6601_DATA_XFER_CTRL);
}
static void alcor_send_cmd(struct alcor_sdmmc_host *host,
@@ -435,7 +427,7 @@ static int alcor_cmd_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
if (!host->data)
return false;
- alcor_trigger_data_transfer(host, false);
+ alcor_trigger_data_transfer(host);
host->cmd = NULL;
return true;
}
@@ -456,7 +448,7 @@ static void alcor_cmd_irq_thread(struct alcor_sdmmc_host *host, u32 intmask)
if (!host->data)
alcor_request_complete(host, 1);
else
- alcor_trigger_data_transfer(host, false);
+ alcor_trigger_data_transfer(host);
host->cmd = NULL;
}
@@ -487,15 +479,9 @@ static int alcor_data_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
break;
case AU6601_INT_READ_BUF_RDY:
alcor_trf_block_pio(host, true);
- if (!host->blocks)
- break;
- alcor_trigger_data_transfer(host, false);
return 1;
case AU6601_INT_WRITE_BUF_RDY:
alcor_trf_block_pio(host, false);
- if (!host->blocks)
- break;
- alcor_trigger_data_transfer(host, false);
return 1;
case AU6601_INT_DMA_END:
if (!host->sg_count)
@@ -508,8 +494,14 @@ static int alcor_data_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
break;
}
- if (intmask & AU6601_INT_DATA_END)
- return 0;
+ if (intmask & AU6601_INT_DATA_END) {
+ if (!host->dma_on && host->blocks) {
+ alcor_trigger_data_transfer(host);
+ return 1;
+ } else {
+ return 0;
+ }
+ }
return 1;
}
@@ -967,7 +959,6 @@ static void alcor_timeout_timer(struct work_struct *work)
alcor_request_complete(host, 0);
}
- mmiowb();
mutex_unlock(&host->cmd_mutex);
}
@@ -1044,14 +1035,27 @@ static void alcor_init_mmc(struct alcor_sdmmc_host *host)
mmc->caps2 = MMC_CAP2_NO_SDIO;
mmc->ops = &alcor_sdc_ops;
- /* Hardware cannot do scatter lists */
+ /* The hardware does DMA data transfer of 4096 bytes to/from a single
+ * buffer address. Scatterlists are not supported, but upon DMA
+ * completion (signalled via IRQ), the original vendor driver does
+ * then immediately set up another DMA transfer of the next 4096
+ * bytes.
+ *
+ * This means that we need to handle the I/O in 4096 byte chunks.
+ * Lacking a way to limit the sglist entries to 4096 bytes, we instead
+ * impose that only one segment is provided, with maximum size 4096,
+ * which also happens to be the minimum size. This means that the
+ * single-entry sglist handled by this driver can be handed directly
+ * to the hardware, nice and simple.
+ *
+ * Unfortunately though, that means we only do 4096 bytes I/O per
+ * MMC command. A future improvement would be to make the driver
+ * accept sg lists and entries of any size, and simply iterate
+ * through them 4096 bytes at a time.
+ */
mmc->max_segs = AU6601_MAX_DMA_SEGMENTS;
mmc->max_seg_size = AU6601_MAX_DMA_BLOCK_SIZE;
-
- mmc->max_blk_size = mmc->max_seg_size;
- mmc->max_blk_count = mmc->max_segs;
-
- mmc->max_req_size = mmc->max_seg_size * mmc->max_segs;
+ mmc->max_req_size = mmc->max_seg_size;
}
static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 49e0daf2ef5e..f37003df1e01 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -1117,7 +1117,7 @@ static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
{
}
#endif
-static void __init init_mmcsd_host(struct mmc_davinci_host *host)
+static void init_mmcsd_host(struct mmc_davinci_host *host)
{
mmc_davinci_reset_ctrl(host, 1);
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index d54612257b06..45f7b9b53d48 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -290,11 +290,8 @@ static void mxcmci_swap_buffers(struct mmc_data *data)
struct scatterlist *sg;
int i;
- for_each_sg(data->sg, sg, data->sg_len, i) {
- void *buf = kmap_atomic(sg_page(sg) + sg->offset);
- buffer_swap32(buf, sg->length);
- kunmap_atomic(buf);
- }
+ for_each_sg(data->sg, sg, data->sg_len, i)
+ buffer_swap32(sg_virt(sg), sg->length);
}
#else
static inline void mxcmci_swap_buffers(struct mmc_data *data) {}
@@ -611,7 +608,6 @@ static int mxcmci_transfer_data(struct mxcmci_host *host)
{
struct mmc_data *data = host->req->data;
struct scatterlist *sg;
- void *buf;
int stat, i;
host->data = data;
@@ -619,18 +615,14 @@ static int mxcmci_transfer_data(struct mxcmci_host *host)
if (data->flags & MMC_DATA_READ) {
for_each_sg(data->sg, sg, data->sg_len, i) {
- buf = kmap_atomic(sg_page(sg) + sg->offset);
- stat = mxcmci_pull(host, buf, sg->length);
- kunmap(buf);
+ stat = mxcmci_pull(host, sg_virt(sg), sg->length);
if (stat)
return stat;
host->datasize += sg->length;
}
} else {
for_each_sg(data->sg, sg, data->sg_len, i) {
- buf = kmap_atomic(sg_page(sg) + sg->offset);
- stat = mxcmci_push(host, buf, sg->length);
- kunmap(buf);
+ stat = mxcmci_push(host, sg_virt(sg), sg->length);
if (stat)
return stat;
host->datasize += sg->length;
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index c907bf502a12..c1d3f0e38921 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -162,7 +162,7 @@ static void pxamci_dma_irq(void *param);
static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
{
struct dma_async_tx_descriptor *tx;
- enum dma_data_direction direction;
+ enum dma_transfer_direction direction;
struct dma_slave_config config;
struct dma_chan *chan;
unsigned int nob = data->blocks;
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 71e13844df6c..8742e27e4e8b 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -641,6 +641,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
struct renesas_sdhi *priv;
struct resource *res;
int irq, ret, i;
+ u16 ver;
of_data = of_device_get_match_data(&pdev->dev);
@@ -773,12 +774,17 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (ret)
goto efree;
+ ver = sd_ctrl_read16(host, CTL_VERSION);
+ /* GEN2_SDR104 is first known SDHI to use 32bit block count */
+ if (ver < SDHI_VER_GEN2_SDR104 && mmc_data->max_blk_count > U16_MAX)
+ mmc_data->max_blk_count = U16_MAX;
+
ret = tmio_mmc_host_probe(host);
if (ret < 0)
goto edisclk;
/* One Gen2 SDHI incarnation does NOT have a CBSY bit */
- if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN2_SDR50)
+ if (ver == SDHI_VER_GEN2_SDR50)
mmc_data->flags &= ~TMIO_MMC_HAVE_CBSY;
/* Enable tuning iff we have an SCC and a supported mode */
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index b1a66ca3821a..9f20fff9781b 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -797,6 +797,43 @@ void sdhci_omap_reset(struct sdhci_host *host, u8 mask)
sdhci_reset(host, mask);
}
+#define CMD_ERR_MASK (SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX |\
+ SDHCI_INT_TIMEOUT)
+#define CMD_MASK (CMD_ERR_MASK | SDHCI_INT_RESPONSE)
+
+static u32 sdhci_omap_irq(struct sdhci_host *host, u32 intmask)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+
+ if (omap_host->is_tuning && host->cmd && !host->data_early &&
+ (intmask & CMD_ERR_MASK)) {
+
+ /*
+ * Since we are not resetting data lines during tuning
+ * operation, data error or data complete interrupts
+ * might still arrive. Mark this request as a failure
+ * but still wait for the data interrupt
+ */
+ if (intmask & SDHCI_INT_TIMEOUT)
+ host->cmd->error = -ETIMEDOUT;
+ else
+ host->cmd->error = -EILSEQ;
+
+ host->cmd = NULL;
+
+ /*
+ * Sometimes command error interrupts and command complete
+ * interrupt will arrive together. Clear all command related
+ * interrupts here.
+ */
+ sdhci_writel(host, intmask & CMD_MASK, SDHCI_INT_STATUS);
+ intmask &= ~CMD_MASK;
+ }
+
+ return intmask;
+}
+
static struct sdhci_ops sdhci_omap_ops = {
.set_clock = sdhci_omap_set_clock,
.set_power = sdhci_omap_set_power,
@@ -807,6 +844,7 @@ static struct sdhci_ops sdhci_omap_ops = {
.platform_send_init_74_clocks = sdhci_omap_init_74_clocks,
.reset = sdhci_omap_reset,
.set_uhs_signaling = sdhci_omap_set_uhs_signaling,
+ .irq = sdhci_omap_irq,
};
static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host)
@@ -1056,6 +1094,9 @@ static int sdhci_omap_probe(struct platform_device *pdev)
mmc->f_max = 48000000;
}
+ if (!mmc_can_gpio_ro(mmc))
+ mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
+
pltfm_host->clk = devm_clk_get(dev, "fck");
if (IS_ERR(pltfm_host->clk)) {
ret = PTR_ERR(pltfm_host->clk);
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index a8141ff9be03..42e1bad024f4 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1807,7 +1807,6 @@ void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
sdhci_send_command(host, mrq->cmd);
}
- mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
}
EXPORT_SYMBOL_GPL(sdhci_request);
@@ -2010,8 +2009,6 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
*/
if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
-
- mmiowb();
}
EXPORT_SYMBOL_GPL(sdhci_set_ios);
@@ -2105,7 +2102,6 @@ static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
- mmiowb();
}
}
@@ -2353,7 +2349,6 @@ void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
host->tuning_done = 0;
- mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
/* Wait for Buffer Read Ready interrupt */
@@ -2705,7 +2700,6 @@ static bool sdhci_request_done(struct sdhci_host *host)
host->mrqs_done[i] = NULL;
- mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
mmc_request_done(host->mmc, mrq);
@@ -2739,7 +2733,6 @@ static void sdhci_timeout_timer(struct timer_list *t)
sdhci_finish_mrq(host, host->cmd->mrq);
}
- mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
}
@@ -2770,7 +2763,6 @@ static void sdhci_timeout_data_timer(struct timer_list *t)
}
}
- mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
}
@@ -3251,7 +3243,6 @@ int sdhci_resume_host(struct sdhci_host *host)
mmc->ops->set_ios(mmc, &mmc->ios);
} else {
sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
- mmiowb();
}
if (host->irq_wake_enabled) {
@@ -3391,7 +3382,6 @@ void sdhci_cqe_enable(struct mmc_host *mmc)
mmc_hostname(mmc), host->ier,
sdhci_readl(host, SDHCI_INT_STATUS));
- mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
}
EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
@@ -3416,7 +3406,6 @@ void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
mmc_hostname(mmc), host->ier,
sdhci_readl(host, SDHCI_INT_STATUS));
- mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
}
EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
@@ -4255,8 +4244,6 @@ int __sdhci_add_host(struct sdhci_host *host)
goto unirq;
}
- mmiowb();
-
ret = mmc_add_host(mmc);
if (ret)
goto unled;
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index b6644ce296b2..35dd34b82a4d 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -889,7 +889,6 @@ static int tifm_sd_initialize_host(struct tifm_sd *host)
struct tifm_dev *sock = host->dev;
writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
- mmiowb();
host->clk_div = 61;
host->clk_freq = 20000000;
writel(TIFM_MMCSD_RESET, sock->addr + SOCK_MMCSD_SYSTEM_CONTROL);
@@ -940,7 +939,6 @@ static int tifm_sd_initialize_host(struct tifm_sd *host)
writel(TIFM_MMCSD_CERR | TIFM_MMCSD_BRS | TIFM_MMCSD_EOC
| TIFM_MMCSD_ERRMASK,
sock->addr + SOCK_MMCSD_INT_ENABLE);
- mmiowb();
return 0;
}
@@ -1005,7 +1003,6 @@ static void tifm_sd_remove(struct tifm_dev *sock)
spin_lock_irqsave(&sock->lock, flags);
host->eject = 1;
writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
- mmiowb();
spin_unlock_irqrestore(&sock->lock, flags);
tasklet_kill(&host->finish_tasklet);
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index 32c4211506fc..412395ac2935 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -686,7 +686,6 @@ static void via_sdc_request(struct mmc_host *mmc, struct mmc_request *mrq)
via_sdc_send_command(host, mrq->cmd);
}
- mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
}
@@ -711,7 +710,6 @@ static void via_sdc_set_power(struct via_crdr_mmc_host *host,
gatt &= ~VIA_CRDR_PCICLKGATT_PAD_PWRON;
writeb(gatt, host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
- mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
via_pwron_sleep(host);
@@ -770,7 +768,6 @@ static void via_sdc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (readb(addrbase + VIA_CRDR_PCISDCCLK) != clock)
writeb(clock, addrbase + VIA_CRDR_PCISDCCLK);
- mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
if (ios->power_mode != MMC_POWER_OFF)
@@ -830,7 +827,6 @@ static void via_reset_pcictrl(struct via_crdr_mmc_host *host)
via_restore_pcictrlreg(host);
via_restore_sdcreg(host);
- mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
}
@@ -925,7 +921,6 @@ static irqreturn_t via_sdc_isr(int irq, void *dev_id)
result = IRQ_HANDLED;
- mmiowb();
out:
spin_unlock(&sdhost->lock);
@@ -960,7 +955,6 @@ static void via_sdc_timeout(struct timer_list *t)
}
}
- mmiowb();
spin_unlock_irqrestore(&sdhost->lock, flags);
}
@@ -1012,7 +1006,6 @@ static void via_sdc_card_detect(struct work_struct *work)
tasklet_schedule(&host->finish_tasklet);
}
- mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
via_reset_pcictrl(host);
@@ -1020,7 +1013,6 @@ static void via_sdc_card_detect(struct work_struct *work)
spin_lock_irqsave(&host->lock, flags);
}
- mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
via_print_pcictrl(host);
@@ -1188,7 +1180,6 @@ static void via_sd_remove(struct pci_dev *pcidev)
/* Disable generating further interrupts */
writeb(0x0, sdhost->pcictrl_mmiobase + VIA_CRDR_PCIINTCTRL);
- mmiowb();
if (sdhost->mrq) {
pr_err("%s: Controller removed during "
@@ -1197,7 +1188,6 @@ static void via_sd_remove(struct pci_dev *pcidev)
/* make sure all DMA is stopped */
writel(VIA_CRDR_DMACTRL_SFTRST,
sdhost->ddma_mmiobase + VIA_CRDR_DMACTRL);
- mmiowb();
sdhost->mrq->cmd->error = -ENOMEDIUM;
if (sdhost->mrq->stop)
sdhost->mrq->stop->error = -ENOMEDIUM;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 72428b6bfc47..7b7286b4d81e 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -1876,7 +1876,11 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
continue;
}
- if (time_after(jiffies, timeo) && !chip_ready(map, adr))
+ /*
+ * We check "time_after" and "!chip_good" before checking "chip_good" to avoid
+ * the failure due to scheduling.
+ */
+ if (time_after(jiffies, timeo) && !chip_good(map, adr, datum))
break;
if (chip_good(map, adr, datum)) {
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index f38e5c1b87e4..d984538980e2 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -722,12 +722,6 @@ static void marvell_nfc_select_target(struct nand_chip *chip,
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
u32 ndcr_generic;
- if (chip == nfc->selected_chip && die_nr == marvell_nand->selected_die)
- return;
-
- writel_relaxed(marvell_nand->ndtr0, nfc->regs + NDTR0);
- writel_relaxed(marvell_nand->ndtr1, nfc->regs + NDTR1);
-
/*
* Reset the NDCR register to a clean state for this particular chip,
* also clear ND_RUN bit.
@@ -739,6 +733,12 @@ static void marvell_nfc_select_target(struct nand_chip *chip,
/* Also reset the interrupt status register */
marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
+ if (chip == nfc->selected_chip && die_nr == marvell_nand->selected_die)
+ return;
+
+ writel_relaxed(marvell_nand->ndtr0, nfc->regs + NDTR0);
+ writel_relaxed(marvell_nand->ndtr1, nfc->regs + NDTR1);
+
nfc->selected_chip = chip;
marvell_nand->selected_die = die_nr;
}
diff --git a/drivers/mtd/nand/raw/r852.c b/drivers/mtd/nand/raw/r852.c
index 86456216fb93..7b99831aa046 100644
--- a/drivers/mtd/nand/raw/r852.c
+++ b/drivers/mtd/nand/raw/r852.c
@@ -45,7 +45,6 @@ static inline void r852_write_reg(struct r852_device *dev,
int address, uint8_t value)
{
writeb(value, dev->mmio + address);
- mmiowb();
}
@@ -61,7 +60,6 @@ static inline void r852_write_reg_dword(struct r852_device *dev,
int address, uint32_t value)
{
writel(cpu_to_le32(value), dev->mmio + address);
- mmiowb();
}
/* returns pointer to our private structure */
diff --git a/drivers/mtd/nand/raw/txx9ndfmc.c b/drivers/mtd/nand/raw/txx9ndfmc.c
index ddf0420c0997..97978227aa55 100644
--- a/drivers/mtd/nand/raw/txx9ndfmc.c
+++ b/drivers/mtd/nand/raw/txx9ndfmc.c
@@ -159,7 +159,6 @@ static void txx9ndfmc_cmd_ctrl(struct nand_chip *chip, int cmd,
if ((ctrl & NAND_CTRL_CHANGE) && cmd == NAND_CMD_NONE)
txx9ndfmc_write(dev, 0, TXX9_NDFDTR);
}
- mmiowb();
}
static int txx9ndfmc_dev_ready(struct nand_chip *chip)
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 22547d7a84ea..947a8adbc799 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -974,6 +974,36 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
break;
}
+ /* Check a specific PEB for bitflips and scrub it if needed */
+ case UBI_IOCRPEB:
+ {
+ int pnum;
+
+ err = get_user(pnum, (__user int32_t *)argp);
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+
+ err = ubi_bitflip_check(ubi, pnum, 0);
+ break;
+ }
+
+ /* Force scrubbing for a specific PEB */
+ case UBI_IOCSPEB:
+ {
+ int pnum;
+
+ err = get_user(pnum, (__user int32_t *)argp);
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+
+ err = ubi_bitflip_check(ubi, pnum, 1);
+ break;
+ }
+
default:
err = -ENOTTY;
break;
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index d47b9e436e67..a1b9e764d489 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -929,6 +929,7 @@ int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *used_e,
int ubi_is_erase_work(struct ubi_work *wrk);
void ubi_refill_pools(struct ubi_device *ubi);
int ubi_ensure_anchor_pebs(struct ubi_device *ubi);
+int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force_scrub);
/* io.c */
int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 6f2ac865ff05..2709dc02fc24 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -278,6 +278,27 @@ static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
}
/**
+ * in_pq - check if a wear-leveling entry is present in the protection queue.
+ * @ubi: UBI device description object
+ * @e: the wear-leveling entry to check
+ *
+ * This function returns non-zero if @e is in the protection queue and zero
+ * if it is not.
+ */
+static inline int in_pq(const struct ubi_device *ubi, struct ubi_wl_entry *e)
+{
+ struct ubi_wl_entry *p;
+ int i;
+
+ for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
+ list_for_each_entry(p, &ubi->pq[i], u.list)
+ if (p == e)
+ return 1;
+
+ return 0;
+}
+
+/**
* prot_queue_add - add physical eraseblock to the protection queue.
* @ubi: UBI device description object
* @e: the physical eraseblock to add
@@ -1419,6 +1440,150 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
return err;
}
+static bool scrub_possible(struct ubi_device *ubi, struct ubi_wl_entry *e)
+{
+ if (in_wl_tree(e, &ubi->scrub))
+ return false;
+ else if (in_wl_tree(e, &ubi->erroneous))
+ return false;
+ else if (ubi->move_from == e)
+ return false;
+ else if (ubi->move_to == e)
+ return false;
+
+ return true;
+}
+
+/**
+ * ubi_bitflip_check - Check an eraseblock for bitflips and scrub it if needed.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock to schedule
+ * @force: dont't read the block, assume bitflips happened and take action.
+ *
+ * This function reads the given eraseblock and checks if bitflips occured.
+ * In case of bitflips, the eraseblock is scheduled for scrubbing.
+ * If scrubbing is forced with @force, the eraseblock is not read,
+ * but scheduled for scrubbing right away.
+ *
+ * Returns:
+ * %EINVAL, PEB is out of range
+ * %ENOENT, PEB is no longer used by UBI
+ * %EBUSY, PEB cannot be checked now or a check is currently running on it
+ * %EAGAIN, bit flips happened but scrubbing is currently not possible
+ * %EUCLEAN, bit flips happened and PEB is scheduled for scrubbing
+ * %0, no bit flips detected
+ */
+int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force)
+{
+ int err;
+ struct ubi_wl_entry *e;
+
+ if (pnum < 0 || pnum >= ubi->peb_count) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Pause all parallel work, otherwise it can happen that the
+ * erase worker frees a wl entry under us.
+ */
+ down_write(&ubi->work_sem);
+
+ /*
+ * Make sure that the wl entry does not change state while
+ * inspecting it.
+ */
+ spin_lock(&ubi->wl_lock);
+ e = ubi->lookuptbl[pnum];
+ if (!e) {
+ spin_unlock(&ubi->wl_lock);
+ err = -ENOENT;
+ goto out_resume;
+ }
+
+ /*
+ * Does it make sense to check this PEB?
+ */
+ if (!scrub_possible(ubi, e)) {
+ spin_unlock(&ubi->wl_lock);
+ err = -EBUSY;
+ goto out_resume;
+ }
+ spin_unlock(&ubi->wl_lock);
+
+ if (!force) {
+ mutex_lock(&ubi->buf_mutex);
+ err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
+ mutex_unlock(&ubi->buf_mutex);
+ }
+
+ if (force || err == UBI_IO_BITFLIPS) {
+ /*
+ * Okay, bit flip happened, let's figure out what we can do.
+ */
+ spin_lock(&ubi->wl_lock);
+
+ /*
+ * Recheck. We released wl_lock, UBI might have killed the
+ * wl entry under us.
+ */
+ e = ubi->lookuptbl[pnum];
+ if (!e) {
+ spin_unlock(&ubi->wl_lock);
+ err = -ENOENT;
+ goto out_resume;
+ }
+
+ /*
+ * Need to re-check state
+ */
+ if (!scrub_possible(ubi, e)) {
+ spin_unlock(&ubi->wl_lock);
+ err = -EBUSY;
+ goto out_resume;
+ }
+
+ if (in_pq(ubi, e)) {
+ prot_queue_del(ubi, e->pnum);
+ wl_tree_add(e, &ubi->scrub);
+ spin_unlock(&ubi->wl_lock);
+
+ err = ensure_wear_leveling(ubi, 1);
+ } else if (in_wl_tree(e, &ubi->used)) {
+ rb_erase(&e->u.rb, &ubi->used);
+ wl_tree_add(e, &ubi->scrub);
+ spin_unlock(&ubi->wl_lock);
+
+ err = ensure_wear_leveling(ubi, 1);
+ } else if (in_wl_tree(e, &ubi->free)) {
+ rb_erase(&e->u.rb, &ubi->free);
+ ubi->free_count--;
+ spin_unlock(&ubi->wl_lock);
+
+ /*
+ * This PEB is empty we can schedule it for
+ * erasure right away. No wear leveling needed.
+ */
+ err = schedule_erase(ubi, e, UBI_UNKNOWN, UBI_UNKNOWN,
+ force ? 0 : 1, true);
+ } else {
+ spin_unlock(&ubi->wl_lock);
+ err = -EAGAIN;
+ }
+
+ if (!err && !force)
+ err = -EUCLEAN;
+ } else {
+ err = 0;
+ }
+
+out_resume:
+ up_write(&ubi->work_sem);
+out:
+
+ return err;
+}
+
/**
* tree_destroy - destroy an RB-tree.
* @ubi: UBI device description object
@@ -1848,16 +2013,11 @@ static int self_check_in_wl_tree(const struct ubi_device *ubi,
static int self_check_in_pq(const struct ubi_device *ubi,
struct ubi_wl_entry *e)
{
- struct ubi_wl_entry *p;
- int i;
-
if (!ubi_dbg_chk_gen(ubi))
return 0;
- for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
- list_for_each_entry(p, &ubi->pq[i], u.list)
- if (p == e)
- return 0;
+ if (in_pq(ubi, e))
+ return 0;
ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
e->pnum, e->ec);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 5e4ca082cfcd..7a96d168efc4 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -216,8 +216,8 @@ config GENEVE
config GTP
tristate "GPRS Tunneling Protocol datapath (GTP-U)"
- depends on INET && NET_UDP_TUNNEL
- select NET_IP_TUNNEL
+ depends on INET
+ select NET_UDP_TUNNEL
---help---
This allows one to create gtp virtual interfaces that provide
the GPRS Tunneling Protocol datapath (GTP-U). This tunneling protocol
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b59708c35faf..ee610721098e 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3213,8 +3213,12 @@ static int bond_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
if (event_dev->flags & IFF_MASTER) {
+ int ret;
+
netdev_dbg(event_dev, "IFF_MASTER\n");
- return bond_master_netdev_event(event, event_dev);
+ ret = bond_master_netdev_event(event, event_dev);
+ if (ret != NOTIFY_DONE)
+ return ret;
}
if (event_dev->flags & IFF_SLAVE) {
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 2f120b2ffef0..4985268e2273 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -55,7 +55,9 @@ static SLAVE_ATTR_RO(link_failure_count);
static ssize_t perm_hwaddr_show(struct slave *slave, char *buf)
{
- return sprintf(buf, "%pM\n", slave->perm_hwaddr);
+ return sprintf(buf, "%*phC\n",
+ slave->dev->addr_len,
+ slave->perm_hwaddr);
}
static SLAVE_ATTR_RO(perm_hwaddr);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index d516def846ab..b388406ac0f5 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -127,7 +127,7 @@ static u8 *pcan_msg_init_empty(struct pcan_usb_pro_msg *pm,
/*
* add one record to a message being built
*/
-static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, u8 id, ...)
+static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, int id, ...)
{
int len, i;
u8 *pc;
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index e6234d209787..4212bc4a5f31 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -886,6 +886,9 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
fs->m_ext.data[1]))
return -EINVAL;
+ if (fs->location != RX_CLS_LOC_ANY && fs->location >= CFP_NUM_RULES)
+ return -EINVAL;
+
if (fs->location != RX_CLS_LOC_ANY &&
test_bit(fs->location, priv->cfp.used))
return -EBUSY;
@@ -974,6 +977,9 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc)
struct cfp_rule *rule;
int ret;
+ if (loc >= CFP_NUM_RULES)
+ return -EINVAL;
+
/* Refuse deleting unused rules, and those that are not unique since
* that could leave IPv6 rules with one of the chained rule in the
* table.
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 96728d1e9824..f4e2db44ad91 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -569,6 +569,9 @@ int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, int link,
goto restore_link;
}
+ if (speed == SPEED_MAX && chip->info->ops->port_max_speed_mode)
+ mode = chip->info->ops->port_max_speed_mode(port);
+
if (chip->info->ops->port_set_pause) {
err = chip->info->ops->port_set_pause(chip, port, pause);
if (err)
@@ -3067,6 +3070,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed = mv88e6341_port_set_speed,
+ .port_max_speed_mode = mv88e6341_port_max_speed_mode,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -3385,6 +3389,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed = mv88e6390_port_set_speed,
+ .port_max_speed_mode = mv88e6390_port_max_speed_mode,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -3429,6 +3434,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed = mv88e6390x_port_set_speed,
+ .port_max_speed_mode = mv88e6390x_port_max_speed_mode,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -3473,6 +3479,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed = mv88e6390_port_set_speed,
+ .port_max_speed_mode = mv88e6390_port_max_speed_mode,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -3566,6 +3573,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed = mv88e6390_port_set_speed,
+ .port_max_speed_mode = mv88e6390_port_max_speed_mode,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -3697,6 +3705,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed = mv88e6341_port_set_speed,
+ .port_max_speed_mode = mv88e6341_port_max_speed_mode,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -3872,6 +3881,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed = mv88e6390_port_set_speed,
+ .port_max_speed_mode = mv88e6390_port_max_speed_mode,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -3920,6 +3930,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed = mv88e6390x_port_set_speed,
+ .port_max_speed_mode = mv88e6390x_port_max_speed_mode,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index adcf60779895..19c07dff0440 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -377,6 +377,9 @@ struct mv88e6xxx_ops {
*/
int (*port_set_speed)(struct mv88e6xxx_chip *chip, int port, int speed);
+ /* What interface mode should be used for maximum speed? */
+ phy_interface_t (*port_max_speed_mode)(int port);
+
int (*port_tag_remap)(struct mv88e6xxx_chip *chip, int port);
int (*port_set_frame_mode)(struct mv88e6xxx_chip *chip, int port,
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 0796c6feec55..c44b2822e4dd 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -312,6 +312,14 @@ int mv88e6341_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
return mv88e6xxx_port_set_speed(chip, port, speed, !port, true);
}
+phy_interface_t mv88e6341_port_max_speed_mode(int port)
+{
+ if (port == 5)
+ return PHY_INTERFACE_MODE_2500BASEX;
+
+ return PHY_INTERFACE_MODE_NA;
+}
+
/* Support 10, 100, 200, 1000 Mbps (e.g. 88E6352 family) */
int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
{
@@ -345,6 +353,14 @@ int mv88e6390_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
return mv88e6xxx_port_set_speed(chip, port, speed, true, true);
}
+phy_interface_t mv88e6390_port_max_speed_mode(int port)
+{
+ if (port == 9 || port == 10)
+ return PHY_INTERFACE_MODE_2500BASEX;
+
+ return PHY_INTERFACE_MODE_NA;
+}
+
/* Support 10, 100, 200, 1000, 2500, 10000 Mbps (e.g. 88E6190X) */
int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
{
@@ -360,6 +376,14 @@ int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
return mv88e6xxx_port_set_speed(chip, port, speed, true, true);
}
+phy_interface_t mv88e6390x_port_max_speed_mode(int port)
+{
+ if (port == 9 || port == 10)
+ return PHY_INTERFACE_MODE_XAUI;
+
+ return PHY_INTERFACE_MODE_NA;
+}
+
int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode)
{
@@ -403,18 +427,22 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
return 0;
lane = mv88e6390x_serdes_get_lane(chip, port);
- if (lane < 0)
+ if (lane < 0 && lane != -ENODEV)
return lane;
- if (chip->ports[port].serdes_irq) {
- err = mv88e6390_serdes_irq_disable(chip, port, lane);
+ if (lane >= 0) {
+ if (chip->ports[port].serdes_irq) {
+ err = mv88e6390_serdes_irq_disable(chip, port, lane);
+ if (err)
+ return err;
+ }
+
+ err = mv88e6390x_serdes_power(chip, port, false);
if (err)
return err;
}
- err = mv88e6390x_serdes_power(chip, port, false);
- if (err)
- return err;
+ chip->ports[port].cmode = 0;
if (cmode) {
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
@@ -428,6 +456,12 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
if (err)
return err;
+ chip->ports[port].cmode = cmode;
+
+ lane = mv88e6390x_serdes_get_lane(chip, port);
+ if (lane < 0)
+ return lane;
+
err = mv88e6390x_serdes_power(chip, port, true);
if (err)
return err;
@@ -439,8 +473,6 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
}
}
- chip->ports[port].cmode = cmode;
-
return 0;
}
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 4aadf321edb7..c7bed263a0f4 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -285,6 +285,10 @@ int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
int mv88e6390_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
+phy_interface_t mv88e6341_port_max_speed_mode(int port);
+phy_interface_t mv88e6390_port_max_speed_mode(int port);
+phy_interface_t mv88e6390x_port_max_speed_mode(int port);
+
int mv88e6xxx_port_set_state(struct mv88e6xxx_chip *chip, int port, u8 state);
int mv88e6xxx_port_set_vlan_map(struct mv88e6xxx_chip *chip, int port, u16 map);
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 576b37d12a63..c4fa400efdcc 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -481,6 +481,155 @@ qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
qca8k_reg_clear(priv, QCA8K_REG_PORT_STATUS(port), mask);
}
+static u32
+qca8k_port_to_phy(int port)
+{
+ /* From Andrew Lunn:
+ * Port 0 has no internal phy.
+ * Port 1 has an internal PHY at MDIO address 0.
+ * Port 2 has an internal PHY at MDIO address 1.
+ * ...
+ * Port 5 has an internal PHY at MDIO address 4.
+ * Port 6 has no internal PHY.
+ */
+
+ return port - 1;
+}
+
+static int
+qca8k_mdio_write(struct qca8k_priv *priv, int port, u32 regnum, u16 data)
+{
+ u32 phy, val;
+
+ if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
+ return -EINVAL;
+
+ /* callee is responsible for not passing bad ports,
+ * but we still would like to make spills impossible.
+ */
+ phy = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
+ val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
+ QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
+ QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
+ QCA8K_MDIO_MASTER_DATA(data);
+
+ qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val);
+
+ return qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL,
+ QCA8K_MDIO_MASTER_BUSY);
+}
+
+static int
+qca8k_mdio_read(struct qca8k_priv *priv, int port, u32 regnum)
+{
+ u32 phy, val;
+
+ if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
+ return -EINVAL;
+
+ /* callee is responsible for not passing bad ports,
+ * but we still would like to make spills impossible.
+ */
+ phy = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
+ val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
+ QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
+ QCA8K_MDIO_MASTER_REG_ADDR(regnum);
+
+ qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val);
+
+ if (qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL,
+ QCA8K_MDIO_MASTER_BUSY))
+ return -ETIMEDOUT;
+
+ val = (qca8k_read(priv, QCA8K_MDIO_MASTER_CTRL) &
+ QCA8K_MDIO_MASTER_DATA_MASK);
+
+ return val;
+}
+
+static int
+qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ return qca8k_mdio_write(priv, port, regnum, data);
+}
+
+static int
+qca8k_phy_read(struct dsa_switch *ds, int port, int regnum)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ ret = qca8k_mdio_read(priv, port, regnum);
+
+ if (ret < 0)
+ return 0xffff;
+
+ return ret;
+}
+
+static int
+qca8k_setup_mdio_bus(struct qca8k_priv *priv)
+{
+ u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
+ struct device_node *ports, *port;
+ int err;
+
+ ports = of_get_child_by_name(priv->dev->of_node, "ports");
+ if (!ports)
+ return -EINVAL;
+
+ for_each_available_child_of_node(ports, port) {
+ err = of_property_read_u32(port, "reg", &reg);
+ if (err)
+ return err;
+
+ if (!dsa_is_user_port(priv->ds, reg))
+ continue;
+
+ if (of_property_read_bool(port, "phy-handle"))
+ external_mdio_mask |= BIT(reg);
+ else
+ internal_mdio_mask |= BIT(reg);
+ }
+
+ if (!external_mdio_mask && !internal_mdio_mask) {
+ dev_err(priv->dev, "no PHYs are defined.\n");
+ return -EINVAL;
+ }
+
+ /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
+ * the MDIO_MASTER register also _disconnects_ the external MDC
+ * passthrough to the internal PHYs. It's not possible to use both
+ * configurations at the same time!
+ *
+ * Because this came up during the review process:
+ * If the external mdio-bus driver is capable magically disabling
+ * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
+ * accessors for the time being, it would be possible to pull this
+ * off.
+ */
+ if (!!external_mdio_mask && !!internal_mdio_mask) {
+ dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
+ return -EINVAL;
+ }
+
+ if (external_mdio_mask) {
+ /* Make sure to disable the internal mdio bus in cases
+ * a dt-overlay and driver reload changed the configuration
+ */
+
+ qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL,
+ QCA8K_MDIO_MASTER_EN);
+ return 0;
+ }
+
+ priv->ops.phy_read = qca8k_phy_read;
+ priv->ops.phy_write = qca8k_phy_write;
+ return 0;
+}
+
static int
qca8k_setup(struct dsa_switch *ds)
{
@@ -502,6 +651,10 @@ qca8k_setup(struct dsa_switch *ds)
if (IS_ERR(priv->regmap))
pr_warn("regmap initialization failed");
+ ret = qca8k_setup_mdio_bus(priv);
+ if (ret)
+ return ret;
+
/* Initialize CPU port pad mode (xMII type, delays...) */
phy_mode = of_get_phy_mode(ds->ports[QCA8K_CPU_PORT].dn);
if (phy_mode < 0) {
@@ -624,22 +777,6 @@ qca8k_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phy)
qca8k_port_set_status(priv, port, 1);
}
-static int
-qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-
- return mdiobus_read(priv->bus, phy, regnum);
-}
-
-static int
-qca8k_phy_write(struct dsa_switch *ds, int phy, int regnum, u16 val)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-
- return mdiobus_write(priv->bus, phy, regnum, val);
-}
-
static void
qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
{
@@ -879,8 +1016,6 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
.setup = qca8k_setup,
.adjust_link = qca8k_adjust_link,
.get_strings = qca8k_get_strings,
- .phy_read = qca8k_phy_read,
- .phy_write = qca8k_phy_write,
.get_ethtool_stats = qca8k_get_ethtool_stats,
.get_sset_count = qca8k_get_sset_count,
.get_mac_eee = qca8k_get_mac_eee,
@@ -923,7 +1058,8 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
return -ENOMEM;
priv->ds->priv = priv;
- priv->ds->ops = &qca8k_switch_ops;
+ priv->ops = qca8k_switch_ops;
+ priv->ds->ops = &priv->ops;
mutex_init(&priv->reg_mutex);
dev_set_drvdata(&mdiodev->dev, priv);
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
index d146e54c8a6c..249fd62268e5 100644
--- a/drivers/net/dsa/qca8k.h
+++ b/drivers/net/dsa/qca8k.h
@@ -49,6 +49,18 @@
#define QCA8K_MIB_FLUSH BIT(24)
#define QCA8K_MIB_CPU_KEEP BIT(20)
#define QCA8K_MIB_BUSY BIT(17)
+#define QCA8K_MDIO_MASTER_CTRL 0x3c
+#define QCA8K_MDIO_MASTER_BUSY BIT(31)
+#define QCA8K_MDIO_MASTER_EN BIT(30)
+#define QCA8K_MDIO_MASTER_READ BIT(27)
+#define QCA8K_MDIO_MASTER_WRITE 0
+#define QCA8K_MDIO_MASTER_SUP_PRE BIT(26)
+#define QCA8K_MDIO_MASTER_PHY_ADDR(x) ((x) << 21)
+#define QCA8K_MDIO_MASTER_REG_ADDR(x) ((x) << 16)
+#define QCA8K_MDIO_MASTER_DATA(x) (x)
+#define QCA8K_MDIO_MASTER_DATA_MASK GENMASK(15, 0)
+#define QCA8K_MDIO_MASTER_MAX_PORTS 5
+#define QCA8K_MDIO_MASTER_MAX_REG 32
#define QCA8K_GOL_MAC_ADDR0 0x60
#define QCA8K_GOL_MAC_ADDR1 0x64
#define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4)
@@ -169,6 +181,7 @@ struct qca8k_priv {
struct dsa_switch *ds;
struct mutex reg_mutex;
struct device *dev;
+ struct dsa_switch_ops ops;
};
struct qca8k_mib_desc {
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 808abb6b3671..b15752267c8d 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -1521,7 +1521,7 @@ static void update_stats(int ioaddr, struct net_device *dev)
static void set_rx_mode(struct net_device *dev)
{
int ioaddr = dev->base_addr;
- short new_mode;
+ unsigned short new_mode;
if (dev->flags & IFF_PROMISC) {
if (corkscrew_debug > 3)
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index 342ae08ec3c2..d60a86aa8aa8 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -153,8 +153,6 @@ static void dayna_block_input(struct net_device *dev, int count,
static void dayna_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page);
-#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c))
-
/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
static void slow_sane_get_8390_hdr(struct net_device *dev,
struct e8390_pkt_hdr *hdr, int ring_page);
@@ -233,19 +231,26 @@ static enum mac8390_type mac8390_ident(struct nubus_rsrc *fres)
static enum mac8390_access mac8390_testio(unsigned long membase)
{
- unsigned long outdata = 0xA5A0B5B0;
- unsigned long indata = 0x00000000;
+ u32 outdata = 0xA5A0B5B0;
+ u32 indata = 0;
+
/* Try writing 32 bits */
- memcpy_toio((void __iomem *)membase, &outdata, 4);
- /* Now compare them */
- if (memcmp_withio(&outdata, membase, 4) == 0)
+ nubus_writel(outdata, membase);
+ /* Now read it back */
+ indata = nubus_readl(membase);
+ if (outdata == indata)
return ACCESS_32;
+
+ outdata = 0xC5C0D5D0;
+ indata = 0;
+
/* Write 16 bit output */
word_memcpy_tocard(membase, &outdata, 4);
/* Now read it back */
word_memcpy_fromcard(&indata, membase, 4);
if (outdata == indata)
return ACCESS_16;
+
return ACCESS_UNKNOWN;
}
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 61e43802b9a5..645efac6310d 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -289,6 +289,11 @@ static struct hw_info *get_hwinfo(struct pcmcia_device *link)
virt = ioremap(link->resource[2]->start,
resource_size(link->resource[2]));
+ if (unlikely(!virt)) {
+ pcmcia_release_window(link, link->resource[2]);
+ return NULL;
+ }
+
for (i = 0; i < NR_INFO; i++) {
pcmcia_map_mem_page(link, link->resource[2],
hw_info[i].offset & ~(resource_size(link->resource[2])-1));
@@ -1423,6 +1428,11 @@ static int setup_shmem_window(struct pcmcia_device *link, int start_pg,
/* Try scribbling on the buffer */
info->base = ioremap(link->resource[3]->start,
resource_size(link->resource[3]));
+ if (unlikely(!info->base)) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+
for (i = 0; i < (TX_PAGES<<8); i += 2)
__raw_writew((i>>1), info->base+offset+i);
udelay(100);
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 47e5984f16fb..3155f7fa83eb 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -613,7 +613,6 @@ static irqreturn_t greth_interrupt(int irq, void *dev_id)
napi_schedule(&greth->napi);
}
- mmiowb();
spin_unlock(&greth->devlock);
return retval;
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index 16477aa6d61f..4f7e792e50e9 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -345,8 +345,6 @@ static void slic_set_rx_mode(struct net_device *dev)
if (sdev->promisc != set_promisc) {
sdev->promisc = set_promisc;
slic_configure_rcv(sdev);
- /* make sure writes to receiver cant leak out of the lock */
- mmiowb();
}
spin_unlock_bh(&sdev->link_lock);
}
@@ -1461,8 +1459,6 @@ static netdev_tx_t slic_xmit(struct sk_buff *skb, struct net_device *dev)
if (slic_get_free_tx_descs(txq) < SLIC_MAX_REQ_TX_DESCS)
netif_stop_queue(dev);
- /* make sure writes to io-memory cant leak out of tx queue lock */
- mmiowb();
return NETDEV_TX_OK;
drop_skb:
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index b17d435de09f..05798aa5bb73 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -2016,7 +2016,6 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
mb();
writel_relaxed((u32)aenq->head,
dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
- mmiowb();
}
int ena_com_dev_reset(struct ena_com_dev *ena_dev,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 74550ccc7a20..e2ffb159cbe2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -186,11 +186,12 @@ static void aq_rx_checksum(struct aq_ring_s *self,
}
if (buff->is_ip_cso) {
__skb_incr_checksum_unnecessary(skb);
- if (buff->is_udp_cso || buff->is_tcp_cso)
- __skb_incr_checksum_unnecessary(skb);
} else {
skb->ip_summed = CHECKSUM_NONE;
}
+
+ if (buff->is_udp_cso || buff->is_tcp_cso)
+ __skb_incr_checksum_unnecessary(skb);
}
#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 9e07b469066a..f35c9a75be50 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -1721,7 +1721,7 @@ static void atl1_inc_smb(struct atl1_adapter *adapter)
adapter->soft_stats.scc += smb->tx_1_col;
adapter->soft_stats.mcc += smb->tx_2_col;
adapter->soft_stats.latecol += smb->tx_late_col;
- adapter->soft_stats.tx_underun += smb->tx_underrun;
+ adapter->soft_stats.tx_underrun += smb->tx_underrun;
adapter->soft_stats.tx_trunc += smb->tx_trunc;
adapter->soft_stats.tx_pause += smb->tx_pause;
@@ -2439,7 +2439,6 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
atl1_tx_map(adapter, skb, ptpd);
atl1_tx_queue(adapter, count, ptpd);
atl1_update_mailbox(adapter);
- mmiowb();
return NETDEV_TX_OK;
}
@@ -3179,7 +3178,7 @@ static struct atl1_stats atl1_gstrings_stats[] = {
{"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)},
{"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)},
{"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)},
- {"tx_underun", ATL1_STAT(soft_stats.tx_underun)},
+ {"tx_underrun", ATL1_STAT(soft_stats.tx_underrun)},
{"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)},
{"tx_pause", ATL1_STAT(soft_stats.tx_pause)},
{"rx_pause", ATL1_STAT(soft_stats.rx_pause)},
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.h b/drivers/net/ethernet/atheros/atlx/atl1.h
index 34a58cd846a0..eacff19ea05b 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.h
+++ b/drivers/net/ethernet/atheros/atlx/atl1.h
@@ -681,7 +681,7 @@ struct atl1_sft_stats {
u64 scc; /* packets TX after a single collision */
u64 mcc; /* packets TX after multiple collisions */
u64 latecol; /* TX packets w/ late collisions */
- u64 tx_underun; /* TX packets aborted due to TX FIFO underrun
+ u64 tx_underrun; /* TX packets aborted due to TX FIFO underrun
* or TRD FIFO underrun */
u64 tx_trunc; /* TX packets truncated due to size > MTU */
u64 rx_pause; /* num Pause packets received. */
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index d99317b3d891..dd81c5863111 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -553,7 +553,7 @@ static void atl2_intr_tx(struct atl2_adapter *adapter)
netdev->stats.tx_aborted_errors++;
if (txs->late_col)
netdev->stats.tx_window_errors++;
- if (txs->underun)
+ if (txs->underrun)
netdev->stats.tx_fifo_errors++;
} while (1);
@@ -908,7 +908,6 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb,
ATL2_WRITE_REGW(&adapter->hw, REG_MB_TXD_WR_IDX,
(adapter->txd_write_ptr >> 2));
- mmiowb();
dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.h b/drivers/net/ethernet/atheros/atlx/atl2.h
index c64a6bdfa7ae..25ec84cb4853 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.h
+++ b/drivers/net/ethernet/atheros/atlx/atl2.h
@@ -260,7 +260,7 @@ struct tx_pkt_status {
unsigned multi_col:1;
unsigned late_col:1;
unsigned abort_col:1;
- unsigned underun:1; /* current packet is aborted
+ unsigned underrun:1; /* current packet is aborted
* due to txram underrun */
unsigned:3; /* reserved */
unsigned update:1; /* always 1'b1 in tx_status_buf */
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index d63371d70bce..dfdd14eadd57 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -3305,8 +3305,6 @@ next_rx:
BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
- mmiowb();
-
return rx_pkt;
}
@@ -6723,8 +6721,6 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
BNX2_WR16(bp, txr->tx_bidx_addr, prod);
BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
- mmiowb();
-
txr->tx_prod = prod;
if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ecb1bd7eb508..0c8f5b546c6f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -4166,8 +4166,6 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw);
- mmiowb();
-
txdata->tx_bd_prod += nbd;
if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 2462e7aa0c5d..2d57af9c061c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -527,8 +527,6 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
REG_WR_RELAXED(bp, fp->ustorm_rx_prods_offset + i * 4,
((u32 *)&rx_prods)[i]);
- mmiowb(); /* keep prod updates ordered */
-
DP(NETIF_MSG_RX_STATUS,
"queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
@@ -653,7 +651,6 @@ static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags);
/* Make sure that ACK is written */
- mmiowb();
barrier();
}
@@ -674,7 +671,6 @@ static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id,
REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
/* Make sure that ACK is written */
- mmiowb();
barrier();
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 749d0ef44371..0745cccd416d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -2623,7 +2623,6 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
wmb();
DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw);
- mmiowb();
barrier();
num_pkts++;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 626b491f7674..3716c828ff5d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -869,9 +869,6 @@ static void bnx2x_hc_int_disable(struct bnx2x *bp)
"write %x to HC %d (addr 0x%x)\n",
val, port, addr);
- /* flush all outstanding writes */
- mmiowb();
-
REG_WR(bp, addr, val);
if (REG_RD(bp, addr) != val)
BNX2X_ERR("BUG! Proper val not read from IGU!\n");
@@ -887,9 +884,6 @@ static void bnx2x_igu_int_disable(struct bnx2x *bp)
DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
- /* flush all outstanding writes */
- mmiowb();
-
REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
BNX2X_ERR("BUG! Proper val not read from IGU!\n");
@@ -1595,7 +1589,6 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
/*
* Ensure that HC_CONFIG is written before leading/trailing edge config
*/
- mmiowb();
barrier();
if (!CHIP_IS_E1(bp)) {
@@ -1611,9 +1604,6 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
}
-
- /* Make sure that interrupts are indeed enabled from here on */
- mmiowb();
}
static void bnx2x_igu_int_enable(struct bnx2x *bp)
@@ -1674,9 +1664,6 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)
REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
-
- /* Make sure that interrupts are indeed enabled from here on */
- mmiowb();
}
void bnx2x_int_enable(struct bnx2x *bp)
@@ -3833,7 +3820,6 @@ static void bnx2x_sp_prod_update(struct bnx2x *bp)
REG_WR16_RELAXED(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
bp->spq_prod_idx);
- mmiowb();
}
/**
@@ -5244,7 +5230,6 @@ static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
{
/* No memory barriers */
storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
- mmiowb(); /* keep prod updates ordered */
}
static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
@@ -6513,7 +6498,6 @@ void bnx2x_nic_init_cnic(struct bnx2x *bp)
/* flush all */
mb();
- mmiowb();
}
void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
@@ -6553,7 +6537,6 @@ void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
/* flush all before enabling interrupts */
mb();
- mmiowb();
bnx2x_int_enable(bp);
@@ -7775,12 +7758,10 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
data, igu_addr_data);
REG_WR(bp, igu_addr_data, data);
- mmiowb();
barrier();
DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
ctl, igu_addr_ctl);
REG_WR(bp, igu_addr_ctl, ctl);
- mmiowb();
barrier();
/* wait for clean up to finish */
@@ -9550,7 +9531,6 @@ static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n",
close ? "closing" : "opening");
- mmiowb();
}
#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
@@ -9674,7 +9654,6 @@ static void bnx2x_pxp_prep(struct bnx2x *bp)
if (!CHIP_IS_E1(bp)) {
REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
- mmiowb();
}
}
@@ -9774,16 +9753,13 @@ static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
reset_mask1 & (~not_reset_mask1));
barrier();
- mmiowb();
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
reset_mask2 & (~stay_reset2));
barrier();
- mmiowb();
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
- mmiowb();
}
/**
@@ -9867,9 +9843,6 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
REG_WR(bp, MISC_REG_UNPREPARED, 0);
barrier();
- /* Make sure all is written to the chip before the reset */
- mmiowb();
-
/* Wait for 1ms to empty GLUE and PCI-E core queues,
* PSWHST, GRC and PSWRD Tetris buffer.
*/
@@ -14828,7 +14801,6 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
if (rc)
break;
- mmiowb();
barrier();
/* Start accepting on iSCSI L2 ring */
@@ -14863,7 +14835,6 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
if (!bnx2x_wait_sp_comp(bp, sp_bits))
BNX2X_ERR("rx_mode completion timed out!\n");
- mmiowb();
barrier();
/* Unset iSCSI L2 MAC */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 7b22a6d8514c..80d250a6d048 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -5039,7 +5039,6 @@ static inline int bnx2x_q_init(struct bnx2x *bp,
/* As no ramrod is sent, complete the command immediately */
o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
- mmiowb();
smp_mb();
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index c97b642e6537..0edbb0a76847 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -100,13 +100,11 @@ static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
cmd_data.sb_id_and_flags, igu_addr_data);
REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
- mmiowb();
barrier();
DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
ctl, igu_addr_ctl);
REG_WR(bp, igu_addr_ctl, ctl);
- mmiowb();
barrier();
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index a9bdc21873d3..0752b7fa4d9c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -172,8 +172,6 @@ static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
/* Trigger the PF FW */
writeb_relaxed(1, &zone_data->trigger.vf_pf_channel.addr_valid);
- mmiowb();
-
/* Wait for PF to complete */
while ((tout >= 0) && (!*done)) {
msleep(interval);
@@ -957,7 +955,7 @@ int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add)
bnx2x_sample_bulletin(bp);
if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) {
- BNX2X_ERR("Hypervisor will dicline the request, avoiding\n");
+ BNX2X_ERR("Hypervisor will decline the request, avoiding\n");
rc = -EINVAL;
goto out;
}
@@ -1179,7 +1177,6 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
/* ack the FW */
storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
- mmiowb();
/* copy the response header including status-done field,
* must be last dmae, must be after FW is acked
@@ -2174,7 +2171,6 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
*/
storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
/* Firmware ack should be written before unlocking channel */
- mmiowb();
bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
}
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 0bb9d7b3a2b6..2a4341708c0f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -556,8 +556,6 @@ normal_tx:
tx_done:
- mmiowb();
-
if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
if (skb->xmit_more && !tx_buf->is_push)
bnxt_db_write(bp, &txr->tx_db, prod);
@@ -1133,6 +1131,8 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
tpa_info = &rxr->rx_tpa[agg_id];
if (unlikely(cons != rxr->rx_next_cons)) {
+ netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n",
+ cons, rxr->rx_next_cons);
bnxt_sched_reset(bp, rxr);
return;
}
@@ -1585,15 +1585,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
cons = rxcmp->rx_cmp_opaque;
- rx_buf = &rxr->rx_buf_ring[cons];
- data = rx_buf->data;
- data_ptr = rx_buf->data_ptr;
if (unlikely(cons != rxr->rx_next_cons)) {
int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
+ netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
+ cons, rxr->rx_next_cons);
bnxt_sched_reset(bp, rxr);
return rc1;
}
+ rx_buf = &rxr->rx_buf_ring[cons];
+ data = rx_buf->data;
+ data_ptr = rx_buf->data_ptr;
prefetch(data_ptr);
misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
@@ -1610,12 +1612,18 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rx_buf->data = NULL;
if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
+ u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
+
bnxt_reuse_rx_data(rxr, cons, data);
if (agg_bufs)
bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
rc = -EIO;
- goto next_rx;
+ if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
+ netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
+ bnxt_sched_reset(bp, rxr);
+ }
+ goto next_rx_no_len;
}
len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
@@ -1696,12 +1704,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rc = 1;
next_rx:
- rxr->rx_prod = NEXT_RX(prod);
- rxr->rx_next_cons = NEXT_RX(cons);
-
cpr->rx_packets += 1;
cpr->rx_bytes += len;
+next_rx_no_len:
+ rxr->rx_prod = NEXT_RX(prod);
+ rxr->rx_next_cons = NEXT_RX(cons);
+
next_rx_no_prod_no_len:
*raw_cons = tmp_raw_cons;
@@ -2123,7 +2132,6 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
&dim_sample);
net_dim(&cpr->dim, dim_sample);
}
- mmiowb();
return work_done;
}
@@ -5125,10 +5133,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
for (i = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
- u32 cmpl_ring_id;
- cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
+
hwrm_ring_free_send_msg(bp, ring,
RING_FREE_REQ_RING_TYPE_TX,
close_path ? cmpl_ring_id :
@@ -5141,10 +5149,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
u32 grp_idx = rxr->bnapi->index;
- u32 cmpl_ring_id;
- cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
+
hwrm_ring_free_send_msg(bp, ring,
RING_FREE_REQ_RING_TYPE_RX,
close_path ? cmpl_ring_id :
@@ -5163,10 +5171,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
u32 grp_idx = rxr->bnapi->index;
- u32 cmpl_ring_id;
- cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
+
hwrm_ring_free_send_msg(bp, ring, type,
close_path ? cmpl_ring_id :
INVALID_HW_RING_ID);
@@ -5305,17 +5313,16 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
req->num_tx_rings = cpu_to_le16(tx_rings);
if (BNXT_NEW_RM(bp)) {
enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
+ enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
if (bp->flags & BNXT_FLAG_CHIP_P5) {
enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
enables |= tx_rings + ring_grps ?
- FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
- FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
enables |= rx_rings ?
FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
} else {
enables |= cp_rings ?
- FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
- FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
enables |= ring_grps ?
FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
@@ -5355,14 +5362,13 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
+ enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
if (bp->flags & BNXT_FLAG_CHIP_P5) {
enables |= tx_rings + ring_grps ?
- FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
- FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
} else {
enables |= cp_rings ?
- FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
- FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
enables |= ring_grps ?
FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
}
@@ -6743,6 +6749,7 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
struct hwrm_port_qstats_ext_input req = {0};
struct bnxt_pf_info *pf = &bp->pf;
+ u32 tx_stat_size;
int rc;
if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
@@ -6752,13 +6759,16 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
req.port_id = cpu_to_le16(pf->port_id);
req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
- req.tx_stat_size = cpu_to_le16(sizeof(struct tx_port_stats_ext));
+ tx_stat_size = bp->hw_tx_port_stats_ext ?
+ sizeof(*bp->hw_tx_port_stats_ext) : 0;
+ req.tx_stat_size = cpu_to_le16(tx_stat_size);
req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) {
bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
- bp->fw_tx_stats_ext_size = le16_to_cpu(resp->tx_stat_size) / 8;
+ bp->fw_tx_stats_ext_size = tx_stat_size ?
+ le16_to_cpu(resp->tx_stat_size) / 8 : 0;
} else {
bp->fw_rx_stats_ext_size = 0;
bp->fw_tx_stats_ext_size = 0;
@@ -8951,8 +8961,15 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
skip_uc:
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
+ if (rc && vnic->mc_list_count) {
+ netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
+ rc);
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+ vnic->mc_list_count = 0;
+ rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
+ }
if (rc)
- netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
+ netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
rc);
return rc;
@@ -10675,6 +10692,7 @@ init_err_cleanup_tc:
bnxt_clear_int_mode(bp);
init_err_pci_clean:
+ bnxt_free_hwrm_short_cmd_req(bp);
bnxt_free_hwrm_resources(bp);
bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index ea45a9b8179e..cf475873ce81 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -43,9 +43,6 @@ static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id,
if (ulp_id == BNXT_ROCE_ULP) {
unsigned int max_stat_ctxs;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
- return -EOPNOTSUPP;
-
max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
bp->cp_nr_rings == max_stat_ctxs)
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 328373e0578f..2aebd4bbb67d 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -1073,7 +1073,6 @@ static void tg3_int_reenable(struct tg3_napi *tnapi)
struct tg3 *tp = tnapi->tp;
tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
- mmiowb();
/* When doing tagged status, this work check is unnecessary.
* The last_tag we write above tells the chip which piece of
@@ -4283,7 +4282,7 @@ static void tg3_power_down(struct tg3 *tp)
pci_set_power_state(tp->pdev, PCI_D3hot);
}
-static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
+static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
{
switch (val & MII_TG3_AUX_STAT_SPDMASK) {
case MII_TG3_AUX_STAT_10HALF:
@@ -4787,7 +4786,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
bool current_link_up;
u32 bmsr, val;
u32 lcl_adv, rmt_adv;
- u16 current_speed;
+ u32 current_speed;
u8 current_duplex;
int i, err;
@@ -5719,7 +5718,7 @@ out:
static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
{
u32 orig_pause_cfg;
- u16 orig_active_speed;
+ u32 orig_active_speed;
u8 orig_active_duplex;
u32 mac_status;
bool current_link_up;
@@ -5823,7 +5822,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
{
int err = 0;
u32 bmsr, bmcr;
- u16 current_speed = SPEED_UNKNOWN;
+ u32 current_speed = SPEED_UNKNOWN;
u8 current_duplex = DUPLEX_UNKNOWN;
bool current_link_up = false;
u32 local_adv, remote_adv, sgsr;
@@ -6999,7 +6998,6 @@ next_pkt_nopost:
tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
tpr->rx_jmb_prod_idx);
}
- mmiowb();
} else if (work_mask) {
/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
* updated before the producer indices can be updated.
@@ -7210,8 +7208,6 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
dpr->rx_jmb_prod_idx);
- mmiowb();
-
if (err)
tw32_f(HOSTCC_MODE, tp->coal_now);
}
@@ -7278,7 +7274,6 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget)
HOSTCC_MODE_ENABLE |
tnapi->coal_now);
}
- mmiowb();
break;
}
}
@@ -8159,7 +8154,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!skb->xmit_more || netif_xmit_stopped(txq)) {
/* Packets are ready, update Tx producer idx on card. */
tw32_tx_mbox(tnapi->prodmbox, entry);
- mmiowb();
}
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index a772a33b685c..6953d0546acb 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2873,7 +2873,7 @@ struct tg3_tx_ring_info {
struct tg3_link_config {
/* Describes what we're trying to get. */
u32 advertising;
- u16 speed;
+ u32 speed;
u8 duplex;
u8 autoneg;
u8 flowctrl;
@@ -2882,7 +2882,7 @@ struct tg3_link_config {
u8 active_flowctrl;
u8 active_duplex;
- u16 active_speed;
+ u32 active_speed;
u32 rmt_adv;
};
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index ad099fd01b45..3da2795e2486 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -898,7 +898,9 @@ static void macb_tx_interrupt(struct macb_queue *queue)
/* First, update TX stats if needed */
if (skb) {
- if (gem_ptp_do_txstamp(queue, skb, desc) == 0) {
+ if (unlikely(skb_shinfo(skb)->tx_flags &
+ SKBTX_HW_TSTAMP) &&
+ gem_ptp_do_txstamp(queue, skb, desc) == 0) {
/* skb now belongs to timestamp buffer
* and will be removed later
*/
@@ -3370,14 +3372,20 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
*hclk = devm_clk_get(&pdev->dev, "hclk");
}
- if (IS_ERR(*pclk)) {
+ if (IS_ERR_OR_NULL(*pclk)) {
err = PTR_ERR(*pclk);
+ if (!err)
+ err = -ENODEV;
+
dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
return err;
}
- if (IS_ERR(*hclk)) {
+ if (IS_ERR_OR_NULL(*hclk)) {
err = PTR_ERR(*hclk);
+ if (!err)
+ err = -ENODEV;
+
dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
return err;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
index 2df7440f58df..39643be8c30a 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
@@ -38,9 +38,6 @@ int lio_cn6xxx_soft_reset(struct octeon_device *oct)
lio_pci_readq(oct, CN6XXX_CIU_SOFT_RST);
lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_RST);
- /* make sure that the reset is written before starting timer */
- mmiowb();
-
/* Wait for 10ms as Octeon resets. */
mdelay(100);
@@ -487,9 +484,6 @@ void lio_cn6xxx_disable_interrupt(struct octeon_device *oct,
/* Disable Interrupts */
writeq(0, cn6xxx->intr_enb_reg64);
-
- /* make sure interrupts are really disabled */
- mmiowb();
}
static void lio_cn6xxx_get_pcie_qlmport(struct octeon_device *oct)
@@ -555,10 +549,6 @@ static int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
value &= ~(1 << oq_no);
octeon_write_csr(oct, reg, value);
- /* Ensure that the enable register is written.
- */
- mmiowb();
-
spin_unlock(&cn6xxx->lock_for_droq_int_enb_reg);
}
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index e21bf3724611..1c50c10b5a16 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -1211,6 +1211,11 @@ int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
sc = (struct octeon_soft_command *)
octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 16, 0);
+ if (!sc) {
+ netif_info(lio, rx_err, lio->netdev,
+ "Failed to allocate soft command\n");
+ return -ENOMEM;
+ }
ncmd = (union octnet_cmd *)sc->virtdptr;
@@ -1684,6 +1689,11 @@ int liquidio_set_fec(struct lio *lio, int on_off)
sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
sizeof(struct oct_nic_seapi_resp), 0);
+ if (!sc) {
+ dev_err(&oct->pci_dev->dev,
+ "Failed to allocate soft command\n");
+ return -ENOMEM;
+ }
ncmd = sc->virtdptr;
resp = sc->virtrptr;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 9b7819fdc9de..fb6f813cff65 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1192,6 +1192,11 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
sc = (struct octeon_soft_command *)
octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
16, 0);
+ if (!sc) {
+ netif_info(lio, rx_err, lio->netdev,
+ "Failed to allocate octeon_soft_command\n");
+ return;
+ }
ncmd = (union octnet_cmd *)sc->virtdptr;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index ce8c3f818666..934115d18488 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -1449,7 +1449,6 @@ void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq)
iq->pkt_in_done -= iq->pkts_processed;
iq->pkts_processed = 0;
/* this write needs to be flushed before we release the lock */
- mmiowb();
spin_unlock_bh(&iq->lock);
oct = iq->oct_dev;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index a0c099f71524..017169023cca 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -513,8 +513,6 @@ int octeon_retry_droq_refill(struct octeon_droq *droq)
*/
wmb();
writel(desc_refilled, droq->pkts_credit_reg);
- /* make sure mmio write completes */
- mmiowb();
if (pkts_credit + desc_refilled >= CN23XX_SLI_DEF_BP)
reschedule = 0;
@@ -712,8 +710,6 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
*/
wmb();
writel(desc_refilled, droq->pkts_credit_reg);
- /* make sure mmio write completes */
- mmiowb();
}
}
} /* for (each packet)... */
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index c6f4cbda040f..fcf20a8f92d9 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -278,7 +278,6 @@ ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq)
if (atomic_read(&oct->status) == OCT_DEV_RUNNING) {
writel(iq->fill_cnt, iq->doorbell_reg);
/* make sure doorbell write goes through */
- mmiowb();
iq->fill_cnt = 0;
iq->last_db_time = jiffies;
return;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 503cfadff4ac..c032bef1b776 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -32,6 +32,13 @@
#define DRV_NAME "nicvf"
#define DRV_VERSION "1.0"
+/* NOTE: Packets bigger than 1530 are split across multiple pages and XDP needs
+ * the buffer to be contiguous. Allow XDP to be set up only if we don't exceed
+ * this value, keeping headroom for the 14 byte Ethernet header and two
+ * VLAN tags (for QinQ)
+ */
+#define MAX_XDP_MTU (1530 - ETH_HLEN - VLAN_HLEN * 2)
+
/* Supported devices */
static const struct pci_device_id nicvf_id_table[] = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
@@ -1328,10 +1335,11 @@ int nicvf_stop(struct net_device *netdev)
struct nicvf_cq_poll *cq_poll = NULL;
union nic_mbx mbx = {};
- cancel_delayed_work_sync(&nic->link_change_work);
-
/* wait till all queued set_rx_mode tasks completes */
- drain_workqueue(nic->nicvf_rx_mode_wq);
+ if (nic->nicvf_rx_mode_wq) {
+ cancel_delayed_work_sync(&nic->link_change_work);
+ drain_workqueue(nic->nicvf_rx_mode_wq);
+ }
mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
nicvf_send_msg_to_pf(nic, &mbx);
@@ -1452,7 +1460,8 @@ int nicvf_open(struct net_device *netdev)
struct nicvf_cq_poll *cq_poll = NULL;
/* wait till all queued set_rx_mode tasks completes if any */
- drain_workqueue(nic->nicvf_rx_mode_wq);
+ if (nic->nicvf_rx_mode_wq)
+ drain_workqueue(nic->nicvf_rx_mode_wq);
netif_carrier_off(netdev);
@@ -1550,10 +1559,12 @@ int nicvf_open(struct net_device *netdev)
/* Send VF config done msg to PF */
nicvf_send_cfg_done(nic);
- INIT_DELAYED_WORK(&nic->link_change_work,
- nicvf_link_status_check_task);
- queue_delayed_work(nic->nicvf_rx_mode_wq,
- &nic->link_change_work, 0);
+ if (nic->nicvf_rx_mode_wq) {
+ INIT_DELAYED_WORK(&nic->link_change_work,
+ nicvf_link_status_check_task);
+ queue_delayed_work(nic->nicvf_rx_mode_wq,
+ &nic->link_change_work, 0);
+ }
return 0;
cleanup:
@@ -1578,6 +1589,15 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
struct nicvf *nic = netdev_priv(netdev);
int orig_mtu = netdev->mtu;
+ /* For now just support only the usual MTU sized frames,
+ * plus some headroom for VLAN, QinQ.
+ */
+ if (nic->xdp_prog && new_mtu > MAX_XDP_MTU) {
+ netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
+ netdev->mtu);
+ return -EINVAL;
+ }
+
netdev->mtu = new_mtu;
if (!netif_running(netdev))
@@ -1826,8 +1846,10 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
bool bpf_attached = false;
int ret = 0;
- /* For now just support only the usual MTU sized frames */
- if (prog && (dev->mtu > 1500)) {
+ /* For now just support only the usual MTU sized frames,
+ * plus some headroom for VLAN, QinQ.
+ */
+ if (prog && dev->mtu > MAX_XDP_MTU) {
netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
dev->mtu);
return -EOPNOTSUPP;
@@ -2234,6 +2256,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
nic->nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_rx_mode_wq_VF%d",
WQ_MEM_RECLAIM,
nic->vf_id);
+ if (!nic->nicvf_rx_mode_wq) {
+ err = -ENOMEM;
+ dev_err(dev, "Failed to allocate work queue\n");
+ goto err_unregister_interrupts;
+ }
+
INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task);
spin_lock_init(&nic->rx_mode_wq_lock);
mutex_init(&nic->rx_mode_mtx);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 5b4d3badcb73..e246f9733bb8 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -105,20 +105,19 @@ static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic,
/* Check if page can be recycled */
if (page) {
ref_count = page_ref_count(page);
- /* Check if this page has been used once i.e 'put_page'
- * called after packet transmission i.e internal ref_count
- * and page's ref_count are equal i.e page can be recycled.
+ /* This page can be recycled if internal ref_count and page's
+ * ref_count are equal, indicating that the page has been used
+ * once for packet transmission. For non-XDP mode, internal
+ * ref_count is always '1'.
*/
- if (rbdr->is_xdp && (ref_count == pgcache->ref_count))
- pgcache->ref_count--;
- else
- page = NULL;
-
- /* In non-XDP mode, page's ref_count needs to be '1' for it
- * to be recycled.
- */
- if (!rbdr->is_xdp && (ref_count != 1))
+ if (rbdr->is_xdp) {
+ if (ref_count == pgcache->ref_count)
+ pgcache->ref_count--;
+ else
+ page = NULL;
+ } else if (ref_count != 1) {
page = NULL;
+ }
}
if (!page) {
@@ -365,11 +364,10 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
while (head < rbdr->pgcnt) {
pgcache = &rbdr->pgcache[head];
if (pgcache->page && page_ref_count(pgcache->page) != 0) {
- if (!rbdr->is_xdp) {
- put_page(pgcache->page);
- continue;
+ if (rbdr->is_xdp) {
+ page_ref_sub(pgcache->page,
+ pgcache->ref_count - 1);
}
- page_ref_sub(pgcache->page, pgcache->ref_count - 1);
put_page(pgcache->page);
}
head++;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 3130b43bba52..02959035ed3f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2620,7 +2620,7 @@ static inline struct port_info *ethqset2pinfo(struct adapter *adap, int qset)
}
/* should never happen! */
- BUG_ON(1);
+ BUG();
return NULL;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 88773ca58e6b..b3da81e90132 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -476,7 +476,7 @@ static inline int get_buf_size(struct adapter *adapter,
break;
default:
- BUG_ON(1);
+ BUG();
}
return buf_size;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index c62a0c830705..38dd41eb959e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -56,6 +56,7 @@ enum {
CPL_TX_DATA_ISO = 0x1F,
CPL_CLOSE_LISTSRV_RPL = 0x20,
+ CPL_GET_TCB_RPL = 0x22,
CPL_L2T_WRITE_RPL = 0x23,
CPL_PASS_OPEN_RPL = 0x24,
CPL_ACT_OPEN_RPL = 0x25,
@@ -688,6 +689,13 @@ struct cpl_get_tcb {
#define NO_REPLY_V(x) ((x) << NO_REPLY_S)
#define NO_REPLY_F NO_REPLY_V(1U)
+struct cpl_get_tcb_rpl {
+ union opcode_tid ot;
+ __u8 cookie;
+ __u8 status;
+ __be16 len;
+};
+
struct cpl_set_tcb_field {
WR_HDR;
union opcode_tid ot;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
index 3297ce025e8b..1b9afb192f7f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
@@ -41,6 +41,14 @@
#define TCB_SMAC_SEL_V(x) ((x) << TCB_SMAC_SEL_S)
#define TCB_T_FLAGS_W 1
+#define TCB_T_FLAGS_S 0
+#define TCB_T_FLAGS_M 0xffffffffffffffffULL
+#define TCB_T_FLAGS_V(x) ((__u64)(x) << TCB_T_FLAGS_S)
+
+#define TCB_RQ_START_W 30
+#define TCB_RQ_START_S 0
+#define TCB_RQ_START_M 0x3ffffffULL
+#define TCB_RQ_START_V(x) ((x) << TCB_RQ_START_S)
#define TF_CCTRL_ECE_S 60
#define TF_CCTRL_CWR_S 61
@@ -66,4 +74,8 @@
#define TCB_RX_FRAG3_LEN_RAW_W 29
#define TCB_RX_FRAG3_START_IDX_OFFSET_RAW_W 30
#define TCB_PDU_HDR_LEN_W 31
+
+#define TF_RX_PDU_OUT_S 49
+#define TF_RX_PDU_OUT_V(x) ((__u64)(x) << TF_RX_PDU_OUT_S)
+
#endif /* __T4_TCB_H */
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
index 74849be5f004..e2919005ead3 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
@@ -354,7 +354,10 @@ static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total,
ppmax = max;
/* pool size must be multiple of unsigned long */
- bmap = BITS_TO_LONGS(ppmax);
+ bmap = ppmax / BITS_PER_TYPE(unsigned long);
+ if (!bmap)
+ return NULL;
+
ppmax = (bmap * sizeof(unsigned long)) << 3;
alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap;
@@ -402,6 +405,10 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
if (reserve_factor) {
ppmax_pool = ppmax / reserve_factor;
pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max);
+ if (!pool) {
+ ppmax_pool = 0;
+ reserve_factor = 0;
+ }
pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n",
ndev->name, ppmax, ppmax_pool, pool_index_max);
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 9a7f70db20c7..733d9172425b 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -119,7 +119,7 @@ static void enic_init_affinity_hint(struct enic *enic)
for (i = 0; i < enic->intr_count; i++) {
if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i) ||
- (enic->msix[i].affinity_mask &&
+ (cpumask_available(enic->msix[i].affinity_mask) &&
!cpumask_empty(enic->msix[i].affinity_mask)))
continue;
if (zalloc_cpumask_var(&enic->msix[i].affinity_mask,
@@ -148,7 +148,7 @@ static void enic_set_affinity_hint(struct enic *enic)
for (i = 0; i < enic->intr_count; i++) {
if (enic_is_err_intr(enic, i) ||
enic_is_notify_intr(enic, i) ||
- !enic->msix[i].affinity_mask ||
+ !cpumask_available(enic->msix[i].affinity_mask) ||
cpumask_empty(enic->msix[i].affinity_mask))
continue;
err = irq_set_affinity_hint(enic->msix_entry[i].vector,
@@ -161,7 +161,7 @@ static void enic_set_affinity_hint(struct enic *enic)
for (i = 0; i < enic->wq_count; i++) {
int wq_intr = enic_msix_wq_intr(enic, i);
- if (enic->msix[wq_intr].affinity_mask &&
+ if (cpumask_available(enic->msix[wq_intr].affinity_mask) &&
!cpumask_empty(enic->msix[wq_intr].affinity_mask))
netif_set_xps_queue(enic->netdev,
enic->msix[wq_intr].affinity_mask,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 2ba49e959c3f..dc339dc1adb2 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -815,6 +815,14 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
*/
queue_mapping = skb_get_queue_mapping(skb);
fq = &priv->fq[queue_mapping];
+
+ fd_len = dpaa2_fd_get_len(&fd);
+ nq = netdev_get_tx_queue(net_dev, queue_mapping);
+ netdev_tx_sent_queue(nq, fd_len);
+
+ /* Everything that happens after this enqueues might race with
+ * the Tx confirmation callback for this frame
+ */
for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
err = priv->enqueue(priv, fq, &fd, 0);
if (err != -EBUSY)
@@ -825,13 +833,10 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
percpu_stats->tx_errors++;
/* Clean up everything, including freeing the skb */
free_tx_fd(priv, fq, &fd, false);
+ netdev_tx_completed_queue(nq, 1, fd_len);
} else {
- fd_len = dpaa2_fd_get_len(&fd);
percpu_stats->tx_packets++;
percpu_stats->tx_bytes += fd_len;
-
- nq = netdev_get_tx_queue(net_dev, queue_mapping);
- netdev_tx_sent_queue(nq, fd_len);
}
return NETDEV_TX_OK;
@@ -1817,7 +1822,7 @@ static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
dpaa2_fd_set_format(&fd, dpaa2_fd_single);
dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA);
- fq = &priv->fq[smp_processor_id()];
+ fq = &priv->fq[smp_processor_id() % dpaa2_eth_queue_count(priv)];
for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
err = priv->enqueue(priv, fq, &fd, 0);
if (err != -EBUSY)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 697c2427f2b7..a96ad20ee484 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1840,13 +1840,9 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
int ret;
if (enable) {
- ret = clk_prepare_enable(fep->clk_ahb);
- if (ret)
- return ret;
-
ret = clk_prepare_enable(fep->clk_enet_out);
if (ret)
- goto failed_clk_enet_out;
+ return ret;
if (fep->clk_ptp) {
mutex_lock(&fep->ptp_clk_mutex);
@@ -1866,7 +1862,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
phy_reset_after_clk_enable(ndev->phydev);
} else {
- clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_enet_out);
if (fep->clk_ptp) {
mutex_lock(&fep->ptp_clk_mutex);
@@ -1885,8 +1880,6 @@ failed_clk_ref:
failed_clk_ptp:
if (fep->clk_enet_out)
clk_disable_unprepare(fep->clk_enet_out);
-failed_clk_enet_out:
- clk_disable_unprepare(fep->clk_ahb);
return ret;
}
@@ -3470,6 +3463,9 @@ fec_probe(struct platform_device *pdev)
ret = clk_prepare_enable(fep->clk_ipg);
if (ret)
goto failed_clk_ipg;
+ ret = clk_prepare_enable(fep->clk_ahb);
+ if (ret)
+ goto failed_clk_ahb;
fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy");
if (!IS_ERR(fep->reg_phy)) {
@@ -3563,6 +3559,9 @@ failed_reset:
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
failed_regulator:
+ clk_disable_unprepare(fep->clk_ahb);
+failed_clk_ahb:
+ clk_disable_unprepare(fep->clk_ipg);
failed_clk_ipg:
fec_enet_clk_enable(ndev, false);
failed_clk:
@@ -3686,6 +3685,7 @@ static int __maybe_unused fec_runtime_suspend(struct device *dev)
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
+ clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
return 0;
@@ -3695,8 +3695,20 @@ static int __maybe_unused fec_runtime_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
+ int ret;
- return clk_prepare_enable(fep->clk_ipg);
+ ret = clk_prepare_enable(fep->clk_ahb);
+ if (ret)
+ return ret;
+ ret = clk_prepare_enable(fep->clk_ipg);
+ if (ret)
+ goto failed_clk_ipg;
+
+ return 0;
+
+failed_clk_ipg:
+ clk_disable_unprepare(fep->clk_ahb);
+ return ret;
}
static const struct dev_pm_ops fec_pm_ops = {
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index a69cd19a55ae..1eca0fdb9933 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -547,6 +547,11 @@ static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id)
return -1;
base = ioremap(link->resource[2]->start, resource_size(link->resource[2]));
+ if (!base) {
+ pcmcia_release_window(link, link->resource[2]);
+ return -ENOMEM;
+ }
+
pcmcia_map_mem_page(link, link->resource[2], 0);
/*
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 79d03f8ee7b1..c7fa97a7e1f4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -150,7 +150,6 @@ out_buffer_fail:
/* free desc along with its attached buffer */
static void hnae_free_desc(struct hnae_ring *ring)
{
- hnae_free_buffers(ring);
dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
ring->desc_num * sizeof(ring->desc[0]),
ring_to_dma_dir(ring));
@@ -183,6 +182,9 @@ static int hnae_alloc_desc(struct hnae_ring *ring)
/* fini ring, also free the buffer for the ring */
static void hnae_fini_ring(struct hnae_ring *ring)
{
+ if (is_rx_ring(ring))
+ hnae_free_buffers(ring);
+
hnae_free_desc(ring);
kfree(ring->desc_cb);
ring->desc_cb = NULL;
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 08a750fb60c4..d6fb83437230 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -357,7 +357,7 @@ struct hnae_buf_ops {
};
struct hnae_queue {
- void __iomem *io_base;
+ u8 __iomem *io_base;
phys_addr_t phy_base;
struct hnae_ae_dev *dev; /* the device who use this queue */
struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index a97228c93831..6c0507921623 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -370,7 +370,7 @@ int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn)
static void hns_mac_param_get(struct mac_params *param,
struct hns_mac_cb *mac_cb)
{
- param->vaddr = (void *)mac_cb->vaddr;
+ param->vaddr = mac_cb->vaddr;
param->mac_mode = hns_get_enet_interface(mac_cb);
ether_addr_copy(param->addr, mac_cb->addr_entry_idx[0].addr);
param->mac_id = mac_cb->mac_id;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index fbc75341bef7..22589799f1a5 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -187,7 +187,7 @@ struct mac_statistics {
/*mac para struct ,mac get param from nic or dsaf when initialize*/
struct mac_params {
char addr[ETH_ALEN];
- void *vaddr; /*virtual address*/
+ u8 __iomem *vaddr; /*virtual address*/
struct device *dev;
u8 mac_id;
/**< Ethernet operation mode (MAC-PHY interface and speed) */
@@ -402,7 +402,7 @@ struct mac_driver {
enum mac_mode mac_mode;
u8 mac_id;
struct hns_mac_cb *mac_cb;
- void __iomem *io_base;
+ u8 __iomem *io_base;
unsigned int mac_en_flg;/*you'd better don't enable mac twice*/
unsigned int virt_dev_num;
struct device *dev;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index ac55db065f16..61eea6ac846f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -1602,8 +1602,6 @@ static void hns_dsaf_set_mac_key(
DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id);
dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M,
DSAF_TBL_TCAM_KEY_PORT_S, port);
-
- mac_key->low.bits.port_vlan = le16_to_cpu(mac_key->low.bits.port_vlan);
}
/**
@@ -1663,8 +1661,8 @@ int hns_dsaf_set_mac_uc_entry(
/* default config dvc to 0 */
mac_data.tbl_ucast_dvc = 0;
mac_data.tbl_ucast_out_port = mac_entry->port_num;
- tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
- tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
+ tcam_data.tbl_tcam_data_high = mac_key.high.val;
+ tcam_data.tbl_tcam_data_low = mac_key.low.val;
hns_dsaf_tcam_uc_cfg(dsaf_dev, entry_index, &tcam_data, &mac_data);
@@ -1786,9 +1784,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
0xff,
mc_mask);
- mask_key.high.val = le32_to_cpu(mask_key.high.val);
- mask_key.low.val = le32_to_cpu(mask_key.low.val);
-
pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
}
@@ -1840,8 +1835,8 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
dsaf_dev->ae_dev.name, mac_key.high.val,
mac_key.low.val, entry_index);
- tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
- tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
+ tcam_data.tbl_tcam_data_high = mac_key.high.val;
+ tcam_data.tbl_tcam_data_low = mac_key.low.val;
/* config mc entry with mask */
hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data,
@@ -1956,9 +1951,6 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
/* config key mask */
hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask);
- mask_key.high.val = le32_to_cpu(mask_key.high.val);
- mask_key.low.val = le32_to_cpu(mask_key.low.val);
-
pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
}
@@ -2012,8 +2004,8 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
soft_mac_entry += entry_index;
soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
} else { /* not zero, just del port, update */
- tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
- tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
+ tcam_data.tbl_tcam_data_high = mac_key.high.val;
+ tcam_data.tbl_tcam_data_low = mac_key.low.val;
hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index,
&tcam_data,
@@ -2750,6 +2742,17 @@ int hns_dsaf_get_regs_count(void)
return DSAF_DUMP_REGS_NUM;
}
+static int hns_dsaf_get_port_id(u8 port)
+{
+ if (port < DSAF_SERVICE_NW_NUM)
+ return port;
+
+ if (port >= DSAF_BASE_INNER_PORT_NUM)
+ return port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
+
+ return -EINVAL;
+}
+
static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
{
struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80};
@@ -2815,23 +2818,33 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
memset(&temp_key, 0x0, sizeof(temp_key));
mask_entry.addr[0] = 0x01;
hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id,
- port, mask_entry.addr);
+ 0xf, mask_entry.addr);
tbl_tcam_mcast.tbl_mcast_item_vld = 1;
tbl_tcam_mcast.tbl_mcast_old_en = 0;
- if (port < DSAF_SERVICE_NW_NUM) {
- mskid = port;
- } else if (port >= DSAF_BASE_INNER_PORT_NUM) {
- mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
- } else {
+ /* set MAC port to handle multicast */
+ mskid = hns_dsaf_get_port_id(port);
+ if (mskid == -EINVAL) {
dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n",
dsaf_dev->ae_dev.name, port,
mask_key.high.val, mask_key.low.val);
return;
}
+ dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
+ mskid % 32, 1);
+ /* set pool bit map to handle multicast */
+ mskid = hns_dsaf_get_port_id(port_num);
+ if (mskid == -EINVAL) {
+ dev_err(dsaf_dev->dev,
+ "%s, pool bit map pnum(%d)error,key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name, port_num,
+ mask_key.high.val, mask_key.low.val);
+ return;
+ }
dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
mskid % 32, 1);
+
memcpy(&temp_key, &mask_key, sizeof(mask_key));
hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
(struct dsaf_tbl_tcam_data *)(&mask_key),
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 0e1cd99831a6..76cc8887e1a8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -467,4 +467,6 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
u8 mac_id, u8 port_num);
int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
+int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
+
#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 16294cd3c954..19b94879691f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -670,7 +670,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
dsaf_set_field(origin, 1ull << 10, 10, en);
dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin);
} else {
- u8 *base_addr = (u8 *)mac_cb->serdes_vaddr +
+ u8 __iomem *base_addr = mac_cb->serdes_vaddr +
(mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000);
dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en);
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index 3d07c8a7639d..17c019106e6e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -61,7 +61,7 @@ void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb,
}
}
-static void __iomem *
+static u8 __iomem *
hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common)
{
return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET;
@@ -111,8 +111,8 @@ hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
dsaf_dev->ppe_common[comm_index] = NULL;
}
-static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
- int ppe_idx)
+static u8 __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
+ int ppe_idx)
{
return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
index f670e63a5a01..110c6e8222c7 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
@@ -80,7 +80,7 @@ struct hns_ppe_cb {
struct hns_ppe_hw_stats hw_stats;
u8 index; /* index in a ppe common device */
- void __iomem *io_base;
+ u8 __iomem *io_base;
int virq;
u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */
u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */
@@ -89,7 +89,7 @@ struct hns_ppe_cb {
struct ppe_common_cb {
struct device *dev;
struct dsaf_device *dsaf_dev;
- void __iomem *io_base;
+ u8 __iomem *io_base;
enum ppe_common_mode ppe_mode;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 6bf346c11b25..ac3518ca4d7b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -458,7 +458,7 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT;
} else {
ring = &q->tx_ring;
- ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base +
+ ring->io_base = ring_pair_cb->q.io_base +
HNS_RCB_TX_REG_OFFSET;
irq_idx = HNS_RCB_IRQ_IDX_TX;
mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT :
@@ -764,7 +764,7 @@ static int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
}
}
-static void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
+static u8 __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
{
struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index b9733b0b8482..b9e7f11f0896 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -1018,7 +1018,7 @@
#define XGMAC_PAUSE_CTL_RSP_MODE_B 2
#define XGMAC_PAUSE_CTL_TX_XOFF_B 3
-static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
+static inline void dsaf_write_reg(u8 __iomem *base, u32 reg, u32 value)
{
writel(value, base + reg);
}
@@ -1053,7 +1053,7 @@ static inline int dsaf_read_syscon(struct regmap *base, u32 reg, u32 *val)
#define dsaf_set_bit(origin, shift, val) \
dsaf_set_field((origin), (1ull << (shift)), (shift), (val))
-static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask,
+static inline void dsaf_set_reg_field(u8 __iomem *base, u32 reg, u32 mask,
u32 shift, u32 val)
{
u32 origin = dsaf_read_reg(base, reg);
@@ -1073,7 +1073,7 @@ static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask,
#define dsaf_get_bit(origin, shift) \
dsaf_get_field((origin), (1ull << (shift)), (shift))
-static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask,
+static inline u32 dsaf_get_reg_field(u8 __iomem *base, u32 reg, u32 mask,
u32 shift)
{
u32 origin;
@@ -1089,11 +1089,11 @@ static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask,
dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit))
#define dsaf_write_b(addr, data)\
- writeb((data), (__iomem unsigned char *)(addr))
+ writeb((data), (__iomem u8 *)(addr))
#define dsaf_read_b(addr)\
- readb((__iomem unsigned char *)(addr))
+ readb((__iomem u8 *)(addr))
#define hns_mac_reg_read64(drv, offset) \
- readq((__iomem void *)(((u8 *)(drv)->io_base + 0xc00 + (offset))))
+ readq((__iomem void *)(((drv)->io_base + 0xc00 + (offset))))
#endif /* _DSAF_REG_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index ba4316910dea..a60f207768fc 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -129,7 +129,7 @@ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv)
dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0);
dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1);
dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0);
- dsaf_write_reg(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
+ dsaf_write_dev(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
}
/**
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 60e7d7ae3787..4cd86ba1f050 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -29,9 +29,6 @@
#define SERVICE_TIMER_HZ (1 * HZ)
-#define NIC_TX_CLEAN_MAX_NUM 256
-#define NIC_RX_CLEAN_MAX_NUM 64
-
#define RCB_IRQ_NOT_INITED 0
#define RCB_IRQ_INITED 1
#define HNS_BUFFER_SIZE_2048 2048
@@ -376,8 +373,6 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
wmb(); /* commit all data before submit */
assert(skb->queue_mapping < priv->ae_handle->q_num);
hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
- ring->stats.tx_pkts++;
- ring->stats.tx_bytes += skb->len;
return NETDEV_TX_OK;
@@ -999,6 +994,9 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
/* issue prefetch for next Tx descriptor */
prefetch(&ring->desc_cb[ring->next_to_clean]);
}
+ /* update tx ring statistics. */
+ ring->stats.tx_pkts += pkts;
+ ring->stats.tx_bytes += bytes;
NETIF_TX_UNLOCK(ring);
@@ -2152,7 +2150,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
hns_nic_tx_fini_pro_v2;
netif_napi_add(priv->netdev, &rd->napi,
- hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
+ hns_nic_common_poll, NAPI_POLL_WEIGHT);
rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
}
for (i = h->q_num; i < h->q_num * 2; i++) {
@@ -2165,7 +2163,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
hns_nic_rx_fini_pro_v2;
netif_napi_add(priv->netdev, &rd->napi,
- hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
+ hns_nic_common_poll, NAPI_POLL_WEIGHT);
rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 66d7a8b80e76..38b430f11fc1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -194,6 +194,7 @@ struct hnae3_ae_dev {
const struct hnae3_ae_ops *ops;
struct list_head node;
u32 flag;
+ u8 override_pci_need_reset; /* fix to stop multiple reset happening */
enum hnae3_dev_type dev_type;
enum hnae3_reset_type reset_type;
void *priv;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 3cb43b1f1c2e..162cb9afa0e7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -22,6 +22,7 @@
#include "hns3_enet.h"
#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift)))
+#define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
static void hns3_clear_all_ring(struct hnae3_handle *h);
static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
@@ -1079,7 +1080,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
desc_cb->length = size;
- frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) >> HNS3_MAX_BD_SIZE_OFFSET;
+ frag_buf_num = hns3_tx_bd_count(size);
sizeoflast = size & HNS3_TX_LAST_SIZE_M;
sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
@@ -1124,14 +1125,13 @@ static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
int i;
size = skb_headlen(skb);
- buf_num = (size + HNS3_MAX_BD_SIZE - 1) >> HNS3_MAX_BD_SIZE_OFFSET;
+ buf_num = hns3_tx_bd_count(size);
frag_num = skb_shinfo(skb)->nr_frags;
for (i = 0; i < frag_num; i++) {
frag = &skb_shinfo(skb)->frags[i];
size = skb_frag_size(frag);
- bdnum_for_frag = (size + HNS3_MAX_BD_SIZE - 1) >>
- HNS3_MAX_BD_SIZE_OFFSET;
+ bdnum_for_frag = hns3_tx_bd_count(size);
if (unlikely(bdnum_for_frag > HNS3_MAX_BD_PER_FRAG))
return -ENOMEM;
@@ -1139,8 +1139,7 @@ static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
}
if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
- buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) >>
- HNS3_MAX_BD_SIZE_OFFSET;
+ buf_num = hns3_tx_bd_count(skb->len);
if (ring_space(ring) < buf_num)
return -EBUSY;
/* manual split the send packet */
@@ -1169,7 +1168,7 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
buf_num = skb_shinfo(skb)->nr_frags + 1;
if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
- buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
+ buf_num = hns3_tx_bd_count(skb->len);
if (ring_space(ring) < buf_num)
return -EBUSY;
/* manual split the send packet */
@@ -1850,7 +1849,9 @@ static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
/* request the reset */
if (ae_dev->ops->reset_event) {
- ae_dev->ops->reset_event(pdev, NULL);
+ if (!ae_dev->override_pci_need_reset)
+ ae_dev->ops->reset_event(pdev, NULL);
+
return PCI_ERS_RESULT_RECOVERED;
}
@@ -2321,8 +2322,8 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
return;
- if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) ||
- BIT(HNS3_RXD_OL3E_B) ||
+ if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) |
+ BIT(HNS3_RXD_OL3E_B) |
BIT(HNS3_RXD_OL4E_B)))) {
u64_stats_update_begin(&ring->syncp);
ring->stats.l3l4_csum_err++;
@@ -2472,6 +2473,8 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
desc = &ring->desc[ring->next_to_clean];
desc_cb = &ring->desc_cb[ring->next_to_clean];
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+ /* make sure HW write desc complete */
+ dma_rmb();
if (!(bd_base_info & BIT(HNS3_RXD_VLD_B)))
return -ENXIO;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 1db0bd41d209..75669cd0c311 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -193,7 +193,6 @@ enum hns3_nic_state {
#define HNS3_VECTOR_INITED 1
#define HNS3_MAX_BD_SIZE 65535
-#define HNS3_MAX_BD_SIZE_OFFSET 16
#define HNS3_MAX_BD_PER_FRAG 8
#define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
index fffe8c1c45d3..0fb61d440d3b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -3,7 +3,7 @@
# Makefile for the HISILICON network device drivers.
#
-ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
+ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_HNS3_HCLGE) += hclge.o
hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 1feceff1477c..1f52d11f77b5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -1317,8 +1317,10 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
hclge_handle_all_ras_errors(hdev);
} else {
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
- hdev->pdev->revision < 0x21)
+ hdev->pdev->revision < 0x21) {
+ ae_dev->override_pci_need_reset = 1;
return PCI_ERS_RESULT_RECOVERED;
+ }
}
if (status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
@@ -1327,8 +1329,11 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
}
if (status & HCLGE_RAS_REG_NFE_MASK ||
- status & HCLGE_RAS_REG_ROCEE_ERR_MASK)
+ status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
+ ae_dev->override_pci_need_reset = 0;
return PCI_ERS_RESULT_NEED_RESET;
+ }
+ ae_dev->override_pci_need_reset = 1;
return PCI_ERS_RESULT_RECOVERED;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
index fb93bbd35845..6193f8fa7cf3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
@@ -3,7 +3,7 @@
# Makefile for the HISILICON network device drivers.
#
-ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
+ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o \ No newline at end of file
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index baf5cc251f32..8b8a7d00e8e0 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -39,7 +39,7 @@ struct hns_mdio_sc_reg {
};
struct hns_mdio_device {
- void *vbase; /* mdio reg base address */
+ u8 __iomem *vbase; /* mdio reg base address */
struct regmap *subctrl_vbase;
struct hns_mdio_sc_reg sc_reg;
};
@@ -96,21 +96,17 @@ enum mdio_c45_op_seq {
#define MDIO_SC_CLK_ST 0x531C
#define MDIO_SC_RESET_ST 0x5A1C
-static void mdio_write_reg(void *base, u32 reg, u32 value)
+static void mdio_write_reg(u8 __iomem *base, u32 reg, u32 value)
{
- u8 __iomem *reg_addr = (u8 __iomem *)base;
-
- writel_relaxed(value, reg_addr + reg);
+ writel_relaxed(value, base + reg);
}
#define MDIO_WRITE_REG(a, reg, value) \
mdio_write_reg((a)->vbase, (reg), (value))
-static u32 mdio_read_reg(void *base, u32 reg)
+static u32 mdio_read_reg(u8 __iomem *base, u32 reg)
{
- u8 __iomem *reg_addr = (u8 __iomem *)base;
-
- return readl_relaxed(reg_addr + reg);
+ return readl_relaxed(base + reg);
}
#define mdio_set_field(origin, mask, shift, val) \
@@ -121,7 +117,7 @@ static u32 mdio_read_reg(void *base, u32 reg)
#define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask))
-static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
+static void mdio_set_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift,
u32 val)
{
u32 origin = mdio_read_reg(base, reg);
@@ -133,7 +129,7 @@ static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
#define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \
mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val))
-static u32 mdio_get_reg_field(void *base, u32 reg, u32 mask, u32 shift)
+static u32 mdio_get_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift)
{
u32 origin;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 3baabdc89726..90b62c1412c8 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3160,6 +3160,7 @@ static ssize_t ehea_probe_port(struct device *dev,
if (ehea_add_adapter_mr(adapter)) {
pr_err("creating MR failed\n");
+ of_node_put(eth_dn);
return -EIO;
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5ecbb1adcf3b..3dfb2d131eb7 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1885,6 +1885,7 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
*/
adapter->state = VNIC_PROBED;
+ reinit_completion(&adapter->init_done);
rc = init_crq_queue(adapter);
if (rc) {
netdev_err(adapter->netdev,
@@ -3761,6 +3762,7 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
+ netdev_features_t old_hw_features = 0;
union ibmvnic_crq crq;
int i;
@@ -3836,24 +3838,41 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
- adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO;
+ if (adapter->state != VNIC_PROBING) {
+ old_hw_features = adapter->netdev->hw_features;
+ adapter->netdev->hw_features = 0;
+ }
+
+ adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
- adapter->netdev->features |= NETIF_F_IP_CSUM;
+ adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
- adapter->netdev->features |= NETIF_F_IPV6_CSUM;
+ adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
if ((adapter->netdev->features &
(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
- adapter->netdev->features |= NETIF_F_RXCSUM;
+ adapter->netdev->hw_features |= NETIF_F_RXCSUM;
if (buf->large_tx_ipv4)
- adapter->netdev->features |= NETIF_F_TSO;
+ adapter->netdev->hw_features |= NETIF_F_TSO;
if (buf->large_tx_ipv6)
- adapter->netdev->features |= NETIF_F_TSO6;
+ adapter->netdev->hw_features |= NETIF_F_TSO6;
+
+ if (adapter->state == VNIC_PROBING) {
+ adapter->netdev->features |= adapter->netdev->hw_features;
+ } else if (old_hw_features != adapter->netdev->hw_features) {
+ netdev_features_t tmp = 0;
- adapter->netdev->hw_features |= adapter->netdev->features;
+ /* disable features no longer supported */
+ adapter->netdev->features &= adapter->netdev->hw_features;
+ /* turn on features now supported if previously enabled */
+ tmp = (old_hw_features ^ adapter->netdev->hw_features) &
+ adapter->netdev->hw_features;
+ adapter->netdev->features |=
+ tmp & adapter->netdev->wanted_features;
+ }
memset(&crq, 0, sizeof(crq));
crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
@@ -4625,7 +4644,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
old_num_rx_queues = adapter->req_rx_queues;
old_num_tx_queues = adapter->req_tx_queues;
- init_completion(&adapter->init_done);
+ reinit_completion(&adapter->init_done);
adapter->init_done_rc = 0;
ibmvnic_send_crq_init(adapter);
if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
@@ -4680,7 +4699,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
adapter->from_passive_init = false;
- init_completion(&adapter->init_done);
adapter->init_done_rc = 0;
ibmvnic_send_crq_init(adapter);
if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
@@ -4759,6 +4777,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
INIT_LIST_HEAD(&adapter->rwi_list);
spin_lock_init(&adapter->rwi_lock);
+ init_completion(&adapter->init_done);
adapter->resetting = false;
adapter->mac_change_pending = false;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 8fe9af0e2ab7..466bf1ea186d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3270,11 +3270,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
if (!skb->xmit_more ||
netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
- /* we need this if more than one processor can write to
- * our tail at a time, it synchronizes IO on IA64/Altix
- * systems
- */
- mmiowb();
}
} else {
dev_kfree_skb_any(skb);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 7acc61e4f645..022c3ac0e40f 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3816,7 +3816,6 @@ static void e1000_flush_tx_ring(struct e1000_adapter *adapter)
if (tx_ring->next_to_use == tx_ring->count)
tx_ring->next_to_use = 0;
ew32(TDT(0), tx_ring->next_to_use);
- mmiowb();
usleep_range(200, 250);
}
@@ -5904,12 +5903,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
tx_ring->next_to_use);
else
writel(tx_ring->next_to_use, tx_ring->tail);
-
- /* we need this if more than one processor can write
- * to our tail at a time, it synchronizes IO on
- *IA64/Altix systems
- */
- mmiowb();
}
} else {
dev_kfree_skb_any(skb);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index 5d4f1761dc0c..8de77155f2e7 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -321,8 +321,6 @@ static void fm10k_mask_aer_comp_abort(struct pci_dev *pdev)
pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, &err_mask);
err_mask |= PCI_ERR_UNC_COMP_ABORT;
pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, err_mask);
-
- mmiowb();
}
int fm10k_iov_resume(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 5a0419421511..cbf76a96e94e 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -41,6 +41,8 @@ static int __init fm10k_init_module(void)
/* create driver workqueue */
fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
fm10k_driver_name);
+ if (!fm10k_workqueue)
+ return -ENOMEM;
fm10k_dbg_init();
@@ -1037,11 +1039,6 @@ static void fm10k_tx_map(struct fm10k_ring *tx_ring,
/* notify HW of packet */
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail);
-
- /* we need this if more than one processor can write to our tail
- * at a time, it synchronizes IO on IA64/Altix systems
- */
- mmiowb();
}
return;
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index d684998ba2b0..d3cc3427caad 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -790,6 +790,8 @@ struct i40e_vsi {
/* VSI specific handlers */
irqreturn_t (*irq_handler)(int irq, void *data);
+
+ unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
} ____cacheline_internodealigned_in_smp;
struct i40e_netdev_priv {
@@ -1096,20 +1098,6 @@ static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
return !!vsi->xdp_prog;
}
-static inline struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
-{
- bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
- int qid = ring->queue_index;
-
- if (ring_is_xdp(ring))
- qid -= ring->vsi->alloc_queue_pairs;
-
- if (!xdp_on)
- return NULL;
-
- return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
-}
-
int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch);
int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate);
int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 4c885801fa26..7874d0ec7fb0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -2573,8 +2573,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return -EOPNOTSUPP;
/* only magic packet is supported */
- if (wol->wolopts && (wol->wolopts != WAKE_MAGIC)
- | (wol->wolopts != WAKE_FILTER))
+ if (wol->wolopts & ~WAKE_MAGIC)
return -EOPNOTSUPP;
/* is this a new value? */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index da62218eb70a..b1c265012c8a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -3064,6 +3064,26 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
}
/**
+ * i40e_xsk_umem - Retrieve the AF_XDP ZC if XDP and ZC is enabled
+ * @ring: The Tx or Rx ring
+ *
+ * Returns the UMEM or NULL.
+ **/
+static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
+{
+ bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
+ int qid = ring->queue_index;
+
+ if (ring_is_xdp(ring))
+ qid -= ring->vsi->alloc_queue_pairs;
+
+ if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
+ return NULL;
+
+ return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
+}
+
+/**
* i40e_configure_tx_ring - Configure a transmit ring context and rest
* @ring: The Tx ring to configure
*
@@ -10064,6 +10084,12 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
hash_init(vsi->mac_filter_hash);
vsi->irqs_ready = false;
+ if (type == I40E_VSI_MAIN) {
+ vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
+ if (!vsi->af_xdp_zc_qps)
+ goto err_rings;
+ }
+
ret = i40e_set_num_rings_in_vsi(vsi);
if (ret)
goto err_rings;
@@ -10082,6 +10108,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
goto unlock_pf;
err_rings:
+ bitmap_free(vsi->af_xdp_zc_qps);
pf->next_vsi = i - 1;
kfree(vsi);
unlock_pf:
@@ -10162,6 +10189,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
+ bitmap_free(vsi->af_xdp_zc_qps);
i40e_vsi_free_arrays(vsi, true);
i40e_clear_rss_config_user(vsi);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 5fb4353c742b..31575c0bb884 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -146,12 +146,13 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
- struct timespec64 now;
+ struct timespec64 now, then;
+ then = ns_to_timespec64(delta);
mutex_lock(&pf->tmreg_lock);
i40e_ptp_read(pf, &now, NULL);
- timespec64_add_ns(&now, delta);
+ now = timespec64_add(now, then);
i40e_ptp_write(pf, (const struct timespec64 *)&now);
mutex_unlock(&pf->tmreg_lock);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 6c97667d20ef..ffb611bbedfa 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3471,11 +3471,6 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
/* notify HW of packet */
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail);
-
- /* we need this if more than one processor can write to our tail
- * at a time, it synchronizes IO on IA64/Altix systems
- */
- mmiowb();
}
return 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index b5c182e688e3..1b17486543ac 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -102,6 +102,8 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
if (err)
return err;
+ set_bit(qid, vsi->af_xdp_zc_qps);
+
if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
if (if_running) {
@@ -148,6 +150,7 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
return err;
}
+ clear_bit(qid, vsi->af_xdp_zc_qps);
i40e_xsk_umem_dma_unmap(vsi, umem);
if (if_running) {
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 9b4d7cec2e18..6bfef82e7607 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -2360,11 +2360,6 @@ static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
/* notify HW of packet */
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail);
-
- /* we need this if more than one processor can write to our tail
- * at a time, it synchronizes IO on IA64/Altix systems
- */
- mmiowb();
}
return;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index c289d97f477d..1af21bbe180e 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -1356,11 +1356,6 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
/* notify HW of packet */
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail);
-
- /* we need this if more than one processor can write to our tail
- * at a time, it synchronizes IO on IA64/Altix systems
- */
- mmiowb();
}
return;
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 01fcfc6f3415..d2e2c50ce257 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -194,6 +194,8 @@
/* enable link status from external LINK_0 and LINK_1 pins */
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
+#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
+#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */
#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */
#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */
#define E1000_CTRL_RST 0x04000000 /* Global reset */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 69b230c53fed..1d71ec360b1c 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -6028,11 +6028,6 @@ static int igb_tx_map(struct igb_ring *tx_ring,
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail);
-
- /* we need this if more than one processor can write to our tail
- * at a time, it synchronizes IO on IA64/Altix systems
- */
- mmiowb();
}
return 0;
@@ -8740,9 +8735,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
struct e1000_hw *hw = &adapter->hw;
u32 ctrl, rctl, status;
u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
-#ifdef CONFIG_PM
- int retval = 0;
-#endif
+ bool wake;
rtnl_lock();
netif_device_detach(netdev);
@@ -8755,14 +8748,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
igb_clear_interrupt_scheme(adapter);
rtnl_unlock();
-#ifdef CONFIG_PM
- if (!runtime) {
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
- }
-#endif
-
status = rd32(E1000_STATUS);
if (status & E1000_STATUS_LU)
wufc &= ~E1000_WUFC_LNKC;
@@ -8779,10 +8764,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
}
ctrl = rd32(E1000_CTRL);
- /* advertise wake from D3Cold */
- #define E1000_CTRL_ADVD3WUC 0x00100000
- /* phy power management enable */
- #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
ctrl |= E1000_CTRL_ADVD3WUC;
wr32(E1000_CTRL, ctrl);
@@ -8796,12 +8777,15 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
wr32(E1000_WUFC, 0);
}
- *enable_wake = wufc || adapter->en_mng_pt;
- if (!*enable_wake)
+ wake = wufc || adapter->en_mng_pt;
+ if (!wake)
igb_power_down_link(adapter);
else
igb_power_up_link(adapter);
+ if (enable_wake)
+ *enable_wake = wake;
+
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
*/
@@ -8844,22 +8828,7 @@ static void igb_deliver_wake_packet(struct net_device *netdev)
static int __maybe_unused igb_suspend(struct device *dev)
{
- int retval;
- bool wake;
- struct pci_dev *pdev = to_pci_dev(dev);
-
- retval = __igb_shutdown(pdev, &wake, 0);
- if (retval)
- return retval;
-
- if (wake) {
- pci_prepare_to_sleep(pdev);
- } else {
- pci_wake_from_d3(pdev, false);
- pci_set_power_state(pdev, PCI_D3hot);
- }
-
- return 0;
+ return __igb_shutdown(to_pci_dev(dev), NULL, 0);
}
static int __maybe_unused igb_resume(struct device *dev)
@@ -8930,22 +8899,7 @@ static int __maybe_unused igb_runtime_idle(struct device *dev)
static int __maybe_unused igb_runtime_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- int retval;
- bool wake;
-
- retval = __igb_shutdown(pdev, &wake, 1);
- if (retval)
- return retval;
-
- if (wake) {
- pci_prepare_to_sleep(pdev);
- } else {
- pci_wake_from_d3(pdev, false);
- pci_set_power_state(pdev, PCI_D3hot);
- }
-
- return 0;
+ return __igb_shutdown(to_pci_dev(dev), NULL, 1);
}
static int __maybe_unused igb_runtime_resume(struct device *dev)
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 4eab83faec62..34cd30d7162f 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2279,10 +2279,6 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
tx_ring->buffer_info[first].next_to_watch = tx_desc;
tx_ring->next_to_use = i;
writel(i, adapter->hw.hw_addr + tx_ring->tail);
- /* we need this if more than one processor can write to our tail
- * at a time, it synchronizes IO on IA64/Altix systems
- */
- mmiowb();
}
static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 87a11879bf2d..f8d692f6aa4f 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -892,11 +892,6 @@ static int igc_tx_map(struct igc_ring *tx_ring,
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail);
-
- /* we need this if more than one processor can write to our tail
- * at a time, it synchronizes IO on IA64/Altix systems
- */
- mmiowb();
}
return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index e100054a3765..99e23cf6a73a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -8299,11 +8299,6 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail);
-
- /* we need this if more than one processor can write to our tail
- * at a time, it synchronizes IO on IA64/Altix systems
- */
- mmiowb();
}
return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index cc4907f9ff02..2fb97967961c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -905,13 +905,12 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
struct pci_dev *pdev = adapter->pdev;
struct device *dev = &adapter->netdev->dev;
struct mii_bus *bus;
+ int err = -ENODEV;
- adapter->mii_bus = devm_mdiobus_alloc(dev);
- if (!adapter->mii_bus)
+ bus = devm_mdiobus_alloc(dev);
+ if (!bus)
return -ENOMEM;
- bus = adapter->mii_bus;
-
switch (hw->device_id) {
/* C3000 SoCs */
case IXGBE_DEV_ID_X550EM_A_KR:
@@ -949,12 +948,15 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
*/
hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22;
- return mdiobus_register(bus);
+ err = mdiobus_register(bus);
+ if (!err) {
+ adapter->mii_bus = bus;
+ return 0;
+ }
ixgbe_no_mii_bus:
devm_mdiobus_free(dev, bus);
- adapter->mii_bus = NULL;
- return -ENODEV;
+ return err;
}
/**
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 8b3495ee2b6e..49486c10ef81 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -1139,9 +1139,6 @@ static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
/* Make sure write' to descriptors are complete before we tell hardware */
wmb();
sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
-
- /* Synchronize I/O on since next processor may write to tail */
- mmiowb();
}
@@ -1354,7 +1351,6 @@ stopped:
/* reset the Rx prefetch unit */
sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
- mmiowb();
}
/* Clean out receive buffer area, assumes receiver hardware stopped */
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index c81d15bf259c..87e90b5d4d7d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -129,10 +129,6 @@ static int mlx4_reset_slave(struct mlx4_dev *dev)
comm_flags = rst_req << COM_CHAN_RST_REQ_OFFSET;
__raw_writel((__force u32)cpu_to_be32(comm_flags),
(__iomem char *)priv->mfunc.comm + MLX4_COMM_CHAN_FLAGS);
- /* Make sure that our comm channel write doesn't
- * get mixed in with writes from another CPU.
- */
- mmiowb();
end = msecs_to_jiffies(MLX4_COMM_TIME) + jiffies;
while (time_before(jiffies, end)) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index c19e74e6ac94..c678344d22a2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -281,7 +281,6 @@ static int mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
__raw_writel((__force u32) cpu_to_be32(val),
&priv->mfunc.comm->slave_write);
- mmiowb();
mutex_unlock(&dev->persist->device_state_mutex);
return 0;
}
@@ -496,12 +495,6 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
(op_modifier << HCR_OPMOD_SHIFT) |
op), hcr + 6);
- /*
- * Make sure that our HCR writes don't get mixed in with
- * writes from another CPU starting a FW command.
- */
- mmiowb();
-
cmd->toggle = cmd->toggle ^ 1;
ret = 0;
@@ -2206,7 +2199,6 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
}
__raw_writel((__force u32) cpu_to_be32(reply),
&priv->mfunc.comm[slave].slave_read);
- mmiowb();
return;
@@ -2410,7 +2402,6 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
&priv->mfunc.comm[i].slave_write);
__raw_writel((__force u32) 0,
&priv->mfunc.comm[i].slave_read);
- mmiowb();
for (port = 1; port <= MLX4_MAX_PORTS; port++) {
struct mlx4_vport_state *admin_vport;
struct mlx4_vport_state *oper_vport;
@@ -2576,10 +2567,6 @@ void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev)
slave_read |= (u32)COMM_CHAN_EVENT_INTERNAL_ERR;
__raw_writel((__force u32)cpu_to_be32(slave_read),
&priv->mfunc.comm[slave].slave_read);
- /* Make sure that our comm channel write doesn't
- * get mixed in with writes from another CPU.
- */
- mmiowb();
}
}
@@ -2645,6 +2632,8 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
if (!priv->cmd.context)
return -ENOMEM;
+ if (mlx4_is_mfunc(dev))
+ mutex_lock(&priv->cmd.slave_cmd_mutex);
down_write(&priv->cmd.switch_sem);
for (i = 0; i < priv->cmd.max_cmds; ++i) {
priv->cmd.context[i].token = i;
@@ -2670,6 +2659,8 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
down(&priv->cmd.poll_sem);
priv->cmd.use_events = 1;
up_write(&priv->cmd.switch_sem);
+ if (mlx4_is_mfunc(dev))
+ mutex_unlock(&priv->cmd.slave_cmd_mutex);
return err;
}
@@ -2682,6 +2673,8 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
+ if (mlx4_is_mfunc(dev))
+ mutex_lock(&priv->cmd.slave_cmd_mutex);
down_write(&priv->cmd.switch_sem);
priv->cmd.use_events = 0;
@@ -2689,9 +2682,12 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev)
down(&priv->cmd.event_sem);
kfree(priv->cmd.context);
+ priv->cmd.context = NULL;
up(&priv->cmd.poll_sem);
up_write(&priv->cmd.switch_sem);
+ if (mlx4_is_mfunc(dev))
+ mutex_unlock(&priv->cmd.slave_cmd_mutex);
}
struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index eb13d3618162..4356f3a58002 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2719,13 +2719,13 @@ static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
int total_pages;
int total_mem;
int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
+ int tot;
sq_size = 1 << (log_sq_size + log_sq_sride + 4);
rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
total_mem = sq_size + rq_size;
- total_pages =
- roundup_pow_of_two((total_mem + (page_offset << 6)) >>
- page_shift);
+ tot = (total_mem + (page_offset << 6)) >> page_shift;
+ total_pages = !tot ? 1 : roundup_pow_of_two(tot);
return total_pages;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index be48c6440251..c087d1014b09 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -917,7 +917,6 @@ static void cmd_work_handler(struct work_struct *work)
mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
wmb();
iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
- mmiowb();
/* if not in polling don't use ent after this point */
if (cmd_mode == CMD_MODE_POLLING || poll_cmd) {
poll_timeout(ent);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 71c65cc17904..d3eaf2ceaa39 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -858,6 +858,7 @@ void mlx5e_close_channels(struct mlx5e_channels *chs);
* switching channels
*/
typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv);
+int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv);
int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *new_chs,
mlx5e_fp_hw_modify hw_modify);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index 122927f3a600..d5e5afbdca6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -96,9 +96,6 @@ int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
if (!eproto)
return -EINVAL;
- if (ext != MLX5_CAP_PCAM_FEATURE(dev, ptys_extended_ethernet))
- return -EOPNOTSUPP;
-
err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index eac245a93f91..4ab0d030b544 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -122,7 +122,9 @@ out:
return err;
}
-/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) */
+/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B])
+ * minimum speed value is 40Gbps
+ */
static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
{
u32 speed;
@@ -130,10 +132,9 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
int err;
err = mlx5e_port_linkspeed(priv->mdev, &speed);
- if (err) {
- mlx5_core_warn(priv->mdev, "cannot get port speed\n");
- return 0;
- }
+ if (err)
+ speed = SPEED_40000;
+ speed = max_t(u32, speed, SPEED_40000);
xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
@@ -142,7 +143,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
}
static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
- u32 xoff, unsigned int mtu)
+ u32 xoff, unsigned int max_mtu)
{
int i;
@@ -154,11 +155,12 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
}
if (port_buffer->buffer[i].size <
- (xoff + mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
+ (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
return -ENOMEM;
port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
- port_buffer->buffer[i].xon = port_buffer->buffer[i].xoff - mtu;
+ port_buffer->buffer[i].xon =
+ port_buffer->buffer[i].xoff - max_mtu;
}
return 0;
@@ -166,7 +168,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
/**
* update_buffer_lossy()
- * mtu: device's MTU
+ * max_mtu: netdev's max_mtu
* pfc_en: <input> current pfc configuration
* buffer: <input> current prio to buffer mapping
* xoff: <input> xoff value
@@ -183,7 +185,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
* Return 0 if no error.
* Set change to true if buffer configuration is modified.
*/
-static int update_buffer_lossy(unsigned int mtu,
+static int update_buffer_lossy(unsigned int max_mtu,
u8 pfc_en, u8 *buffer, u32 xoff,
struct mlx5e_port_buffer *port_buffer,
bool *change)
@@ -220,7 +222,7 @@ static int update_buffer_lossy(unsigned int mtu,
}
if (changed) {
- err = update_xoff_threshold(port_buffer, xoff, mtu);
+ err = update_xoff_threshold(port_buffer, xoff, max_mtu);
if (err)
return err;
@@ -230,6 +232,7 @@ static int update_buffer_lossy(unsigned int mtu,
return 0;
}
+#define MINIMUM_MAX_MTU 9216
int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
u32 change, unsigned int mtu,
struct ieee_pfc *pfc,
@@ -241,12 +244,14 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
bool update_prio2buffer = false;
u8 buffer[MLX5E_MAX_PRIORITY];
bool update_buffer = false;
+ unsigned int max_mtu;
u32 total_used = 0;
u8 curr_pfc_en;
int err;
int i;
mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change);
+ max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU);
err = mlx5e_port_query_buffer(priv, &port_buffer);
if (err)
@@ -254,7 +259,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
update_buffer = true;
- err = update_xoff_threshold(&port_buffer, xoff, mtu);
+ err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
if (err)
return err;
}
@@ -264,7 +269,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (err)
return err;
- err = update_buffer_lossy(mtu, pfc->pfc_en, buffer, xoff,
+ err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff,
&port_buffer, &update_buffer);
if (err)
return err;
@@ -276,8 +281,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (err)
return err;
- err = update_buffer_lossy(mtu, curr_pfc_en, prio2buffer, xoff,
- &port_buffer, &update_buffer);
+ err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer,
+ xoff, &port_buffer, &update_buffer);
if (err)
return err;
}
@@ -301,7 +306,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
return -EINVAL;
update_buffer = true;
- err = update_xoff_threshold(&port_buffer, xoff, mtu);
+ err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
if (err)
return err;
}
@@ -309,7 +314,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
/* Need to update buffer configuration if xoff value is changed */
if (!update_buffer && xoff != priv->dcbx.xoff) {
update_buffer = true;
- err = update_xoff_threshold(&port_buffer, xoff, mtu);
+ err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
if (err)
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 9d38e62cdf24..476dd97f7f2f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -186,12 +186,17 @@ static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_tx_err_ctx *err_ctx)
static int mlx5e_tx_reporter_recover_all(struct mlx5e_priv *priv)
{
- int err;
+ int err = 0;
rtnl_lock();
mutex_lock(&priv->state_lock);
- mlx5e_close_locked(priv->netdev);
- err = mlx5e_open_locked(priv->netdev);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto out;
+
+ err = mlx5e_safe_reopen_channels(priv);
+
+out:
mutex_unlock(&priv->state_lock);
rtnl_unlock();
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index fa2a3c444cdc..eec07b34b4ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -39,6 +39,10 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
+ if (!(mlx5e_eswitch_rep(*out_dev) &&
+ mlx5e_is_uplink_rep(netdev_priv(*out_dev))))
+ return -EOPNOTSUPP;
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 03b2a9f9c589..cad34d6f5f45 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -33,6 +33,26 @@
#include <linux/bpf_trace.h>
#include "en/xdp.h"
+int mlx5e_xdp_max_mtu(struct mlx5e_params *params)
+{
+ int hr = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
+
+ /* Let S := SKB_DATA_ALIGN(sizeof(struct skb_shared_info)).
+ * The condition checked in mlx5e_rx_is_linear_skb is:
+ * SKB_DATA_ALIGN(sw_mtu + hard_mtu + hr) + S <= PAGE_SIZE (1)
+ * (Note that hw_mtu == sw_mtu + hard_mtu.)
+ * What is returned from this function is:
+ * max_mtu = PAGE_SIZE - S - hr - hard_mtu (2)
+ * After assigning sw_mtu := max_mtu, the left side of (1) turns to
+ * SKB_DATA_ALIGN(PAGE_SIZE - S) + S, which is equal to PAGE_SIZE,
+ * because both PAGE_SIZE and S are already aligned. Any number greater
+ * than max_mtu would make the left side of (1) greater than PAGE_SIZE,
+ * so max_mtu is the maximum MTU allowed.
+ */
+
+ return MLX5E_HW2SW_MTU(params, SKB_MAX_HEAD(hr));
+}
+
static inline bool
mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di,
struct xdp_buff *xdp)
@@ -304,9 +324,9 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
mlx5e_xdpi_fifo_pop(xdpi_fifo);
if (is_redirect) {
- xdp_return_frame(xdpi.xdpf);
dma_unmap_single(sq->pdev, xdpi.dma_addr,
xdpi.xdpf->len, DMA_TO_DEVICE);
+ xdp_return_frame(xdpi.xdpf);
} else {
/* Recycle RX page */
mlx5e_page_release(rq, &xdpi.di, true);
@@ -345,9 +365,9 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq)
mlx5e_xdpi_fifo_pop(xdpi_fifo);
if (is_redirect) {
- xdp_return_frame(xdpi.xdpf);
dma_unmap_single(sq->pdev, xdpi.dma_addr,
xdpi.xdpf->len, DMA_TO_DEVICE);
+ xdp_return_frame(xdpi.xdpf);
} else {
/* Recycle RX page */
mlx5e_page_release(rq, &xdpi.di, false);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index ee27a7c8cd87..553956cadc8a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -34,13 +34,12 @@
#include "en.h"
-#define MLX5E_XDP_MAX_MTU ((int)(PAGE_SIZE - \
- MLX5_SKB_FRAG_SZ(XDP_PACKET_HEADROOM)))
#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
#define MLX5E_XDP_TX_EMPTY_DS_COUNT \
(sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
#define MLX5E_XDP_TX_DS_COUNT (MLX5E_XDP_TX_EMPTY_DS_COUNT + 1 /* SG DS */)
+int mlx5e_xdp_max_mtu(struct mlx5e_params *params);
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
void *va, u16 *rx_headroom, u32 *len);
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 3078491cc0d0..1539cf3de5dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -45,7 +45,9 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
if (err)
return err;
+ mutex_lock(&mdev->mlx5e_res.td.list_lock);
list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
+ mutex_unlock(&mdev->mlx5e_res.td.list_lock);
return 0;
}
@@ -53,8 +55,10 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
struct mlx5e_tir *tir)
{
+ mutex_lock(&mdev->mlx5e_res.td.list_lock);
mlx5_core_destroy_tir(mdev, tir->tirn);
list_del(&tir->list);
+ mutex_unlock(&mdev->mlx5e_res.td.list_lock);
}
static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
@@ -114,6 +118,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
}
INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
+ mutex_init(&mdev->mlx5e_res.td.list_lock);
return 0;
@@ -141,15 +146,17 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_tir *tir;
- int err = -ENOMEM;
+ int err = 0;
u32 tirn = 0;
int inlen;
void *in;
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
+ if (!in) {
+ err = -ENOMEM;
goto out;
+ }
if (enable_uc_lb)
MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
@@ -157,6 +164,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
+ mutex_lock(&mdev->mlx5e_res.td.list_lock);
list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
tirn = tir->tirn;
err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
@@ -168,6 +176,7 @@ out:
kvfree(in);
if (err)
netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
+ mutex_unlock(&mdev->mlx5e_res.td.list_lock);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 0804b478ad19..78dc8fe2a83c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -424,6 +424,9 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
+ if (!netif_is_rxfh_configured(priv->netdev))
+ mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
+ MLX5E_INDIR_RQT_SIZE, count);
goto out;
}
@@ -600,16 +603,18 @@ static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev,
__ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static void ptys2ethtool_adver_link(struct mlx5_core_dev *mdev,
- unsigned long *advertising_modes,
- u32 eth_proto_cap)
+static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
+ u32 eth_proto_cap, bool ext)
{
unsigned long proto_cap = eth_proto_cap;
struct ptys2ethtool_config *table;
u32 max_size;
int proto;
- mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size);
+ table = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
+ max_size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
+ ARRAY_SIZE(ptys2legacy_ethtool_table);
+
for_each_set_bit(proto, &proto_cap, max_size)
bitmap_or(advertising_modes, advertising_modes,
table[proto].advertised,
@@ -791,12 +796,12 @@ static void get_supported(struct mlx5_core_dev *mdev, u32 eth_proto_cap,
ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
}
-static void get_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_cap,
- u8 tx_pause, u8 rx_pause,
- struct ethtool_link_ksettings *link_ksettings)
+static void get_advertising(u32 eth_proto_cap, u8 tx_pause, u8 rx_pause,
+ struct ethtool_link_ksettings *link_ksettings,
+ bool ext)
{
unsigned long *advertising = link_ksettings->link_modes.advertising;
- ptys2ethtool_adver_link(mdev, advertising, eth_proto_cap);
+ ptys2ethtool_adver_link(advertising, eth_proto_cap, ext);
if (rx_pause)
ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
@@ -851,8 +856,9 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp,
struct ethtool_link_ksettings *link_ksettings)
{
unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
+ bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
- ptys2ethtool_adver_link(mdev, lp_advertising, eth_proto_lp);
+ ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext);
}
int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
@@ -869,6 +875,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
u8 an_disable_admin;
u8 an_status;
u8 connector_type;
+ bool admin_ext;
bool ext;
int err;
@@ -883,6 +890,19 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
eth_proto_capability);
eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
eth_proto_admin);
+ /* Fields: eth_proto_admin and ext_eth_proto_admin are
+ * mutually exclusive. Hence try reading legacy advertising
+ * when extended advertising is zero.
+ * admin_ext indicates how eth_proto_admin should be
+ * interpreted
+ */
+ admin_ext = ext;
+ if (ext && !eth_proto_admin) {
+ eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, false,
+ eth_proto_admin);
+ admin_ext = false;
+ }
+
eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
eth_proto_oper);
eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
@@ -896,7 +916,8 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
get_supported(mdev, eth_proto_cap, link_ksettings);
- get_advertising(mdev, eth_proto_admin, tx_pause, rx_pause, link_ksettings);
+ get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings,
+ admin_ext);
get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
@@ -994,19 +1015,17 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
#define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1)
- ext_requested = (link_ksettings->link_modes.advertising[0] >
- MLX5E_PTYS_EXT);
+ ext_requested = !!(link_ksettings->link_modes.advertising[0] >
+ MLX5E_PTYS_EXT ||
+ link_ksettings->link_modes.advertising[1]);
ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
-
- /*when ptys_extended_ethernet is set legacy link modes are deprecated */
- if (ext_requested != ext_supported)
- return -EPROTONOSUPPORT;
+ ext_requested &= ext_supported;
speed = link_ksettings->base.speed;
ethtool2ptys_adver_func = ext_requested ?
mlx5e_ethtool2ptys_ext_adver_link :
mlx5e_ethtool2ptys_adver_link;
- err = mlx5_port_query_eth_proto(mdev, 1, ext_supported, &eproto);
+ err = mlx5_port_query_eth_proto(mdev, 1, ext_requested, &eproto);
if (err) {
netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n",
__func__, err);
@@ -1034,7 +1053,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
if (!an_changes && link_modes == eproto.admin)
goto out;
- mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_supported);
+ mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_requested);
mlx5_toggle_port_link(mdev);
out:
@@ -1567,7 +1586,7 @@ static int mlx5e_get_module_info(struct net_device *netdev,
break;
case MLX5_MODULE_ID_SFP:
modinfo->type = ETH_MODULE_SFF_8472;
- modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ modinfo->eeprom_len = MLX5_EEPROM_PAGE_LENGTH;
break;
default:
netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n",
@@ -1749,7 +1768,8 @@ static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
struct mlx5e_channel *c;
int i;
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state) ||
+ priv->channels.params.xdp_prog)
return 0;
for (i = 0; i < channels->num; i++) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index b5fdbd3190d9..46157e2a1e5a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -951,7 +951,11 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
if (params->rx_dim_enabled)
__set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
- if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE))
+ /* We disable csum_complete when XDP is enabled since
+ * XDP programs might manipulate packets which will render
+ * skb->checksum incorrect.
+ */
+ if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp)
__set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
return 0;
@@ -2937,6 +2941,14 @@ int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
return 0;
}
+int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv)
+{
+ struct mlx5e_channels new_channels = {};
+
+ new_channels.params = priv->channels.params;
+ return mlx5e_safe_switch_channels(priv, &new_channels, NULL);
+}
+
void mlx5e_timestamp_init(struct mlx5e_priv *priv)
{
priv->tstamp.tx_type = HWTSTAMP_TX_OFF;
@@ -3765,7 +3777,7 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
if (params->xdp_prog &&
!mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n",
- new_mtu, MLX5E_XDP_MAX_MTU);
+ new_mtu, mlx5e_xdp_max_mtu(params));
err = -EINVAL;
goto out;
}
@@ -4161,11 +4173,10 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
if (!report_failed)
goto unlock;
- mlx5e_close_locked(priv->netdev);
- err = mlx5e_open_locked(priv->netdev);
+ err = mlx5e_safe_reopen_channels(priv);
if (err)
netdev_err(priv->netdev,
- "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
+ "mlx5e_safe_reopen_channels failed recovering from a tx_timeout, err(%d).\n",
err);
unlock:
@@ -4201,7 +4212,8 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
if (!mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n",
- new_channels.params.sw_mtu, MLX5E_XDP_MAX_MTU);
+ new_channels.params.sw_mtu,
+ mlx5e_xdp_max_mtu(&new_channels.params));
return -EINVAL;
}
@@ -4553,7 +4565,7 @@ void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
{
enum mlx5e_traffic_types tt;
- rss_params->hfunc = ETH_RSS_HASH_XOR;
+ rss_params->hfunc = ETH_RSS_HASH_TOP;
netdev_rss_key_fill(rss_params->toeplitz_hash_key,
sizeof(rss_params->toeplitz_hash_key));
mlx5e_build_default_indir_rqt(rss_params->indirection_rqt,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index a1a3e2774989..a66b6ed80b30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -1129,16 +1129,17 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
- int ret, pf_num;
+ unsigned int fn;
+ int ret;
- ret = mlx5_lag_get_pf_num(priv->mdev, &pf_num);
- if (ret)
- return ret;
+ fn = PCI_FUNC(priv->mdev->pdev->devfn);
+ if (fn >= MLX5_MAX_PORTS)
+ return -EOPNOTSUPP;
if (rep->vport == MLX5_VPORT_UPLINK)
- ret = snprintf(buf, len, "p%d", pf_num);
+ ret = snprintf(buf, len, "p%d", fn);
else
- ret = snprintf(buf, len, "pf%dvf%d", pf_num, rep->vport - 1);
+ ret = snprintf(buf, len, "pf%dvf%d", fn, rep->vport - 1);
if (ret >= len)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index be396e5e4e39..c3b3002ff62f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -692,7 +692,14 @@ static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth,
{
*proto = ((struct ethhdr *)skb->data)->h_proto;
*proto = __vlan_get_protocol(skb, *proto, network_depth);
- return (*proto == htons(ETH_P_IP) || *proto == htons(ETH_P_IPV6));
+
+ if (*proto == htons(ETH_P_IP))
+ return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr));
+
+ if (*proto == htons(ETH_P_IPV6))
+ return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr));
+
+ return false;
}
static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
@@ -712,17 +719,6 @@ static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
rq->stats->ecn_mark += !!rc;
}
-static u32 mlx5e_get_fcs(const struct sk_buff *skb)
-{
- const void *fcs_bytes;
- u32 _fcs_bytes;
-
- fcs_bytes = skb_header_pointer(skb, skb->len - ETH_FCS_LEN,
- ETH_FCS_LEN, &_fcs_bytes);
-
- return __get_unaligned_cpu32(fcs_bytes);
-}
-
static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
{
void *ip_p = skb->data + network_depth;
@@ -733,6 +729,68 @@ static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
+#define MAX_PADDING 8
+
+static void
+tail_padding_csum_slow(struct sk_buff *skb, int offset, int len,
+ struct mlx5e_rq_stats *stats)
+{
+ stats->csum_complete_tail_slow++;
+ skb->csum = csum_block_add(skb->csum,
+ skb_checksum(skb, offset, len, 0),
+ offset);
+}
+
+static void
+tail_padding_csum(struct sk_buff *skb, int offset,
+ struct mlx5e_rq_stats *stats)
+{
+ u8 tail_padding[MAX_PADDING];
+ int len = skb->len - offset;
+ void *tail;
+
+ if (unlikely(len > MAX_PADDING)) {
+ tail_padding_csum_slow(skb, offset, len, stats);
+ return;
+ }
+
+ tail = skb_header_pointer(skb, offset, len, tail_padding);
+ if (unlikely(!tail)) {
+ tail_padding_csum_slow(skb, offset, len, stats);
+ return;
+ }
+
+ stats->csum_complete_tail++;
+ skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset);
+}
+
+static void
+mlx5e_skb_padding_csum(struct sk_buff *skb, int network_depth, __be16 proto,
+ struct mlx5e_rq_stats *stats)
+{
+ struct ipv6hdr *ip6;
+ struct iphdr *ip4;
+ int pkt_len;
+
+ switch (proto) {
+ case htons(ETH_P_IP):
+ ip4 = (struct iphdr *)(skb->data + network_depth);
+ pkt_len = network_depth + ntohs(ip4->tot_len);
+ break;
+ case htons(ETH_P_IPV6):
+ ip6 = (struct ipv6hdr *)(skb->data + network_depth);
+ pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len);
+ break;
+ default:
+ return;
+ }
+
+ if (likely(pkt_len >= skb->len))
+ return;
+
+ tail_padding_csum(skb, pkt_len, stats);
+}
+
static inline void mlx5e_handle_csum(struct net_device *netdev,
struct mlx5_cqe64 *cqe,
struct mlx5e_rq *rq,
@@ -752,7 +810,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
return;
}
- if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)))
+ /* True when explicitly set via priv flag, or XDP prog is loaded */
+ if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))
goto csum_unnecessary;
/* CQE csum doesn't cover padding octets in short ethernet
@@ -780,18 +839,15 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
skb->csum = csum_partial(skb->data + ETH_HLEN,
network_depth - ETH_HLEN,
skb->csum);
- if (unlikely(netdev->features & NETIF_F_RXFCS))
- skb->csum = csum_block_add(skb->csum,
- (__force __wsum)mlx5e_get_fcs(skb),
- skb->len - ETH_FCS_LEN);
+
+ mlx5e_skb_padding_csum(skb, network_depth, proto, stats);
stats->csum_complete++;
return;
}
csum_unnecessary:
if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
- ((cqe->hds_ip_ext & CQE_L4_OK) ||
- (get_cqe_l4_hdr_type(cqe) == CQE_L4_HDR_TYPE_NONE)))) {
+ (cqe->hds_ip_ext & CQE_L4_OK))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (cqe_is_tunneled(cqe)) {
skb->csum_level = 1;
@@ -1295,8 +1351,14 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
skb->protocol = *((__be16 *)(skb->data));
- skb->ip_summed = CHECKSUM_COMPLETE;
- skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
+ if (netdev->features & NETIF_F_RXCSUM) {
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
+ stats->csum_complete++;
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ stats->csum_none++;
+ }
if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
skb_hwtstamps(skb)->hwtstamp =
@@ -1315,7 +1377,6 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
skb->dev = netdev;
- stats->csum_complete++;
stats->packets++;
stats->bytes += cqe_bcnt;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 1a78e05cbba8..b75aa8b8bf04 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -59,6 +59,8 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
@@ -151,6 +153,8 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
s->rx_csum_none += rq_stats->csum_none;
s->rx_csum_complete += rq_stats->csum_complete;
+ s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
+ s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
s->rx_xdp_drop += rq_stats->xdp_drop;
@@ -1190,6 +1194,8 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 4640d4f986f8..16c3b785f282 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -71,6 +71,8 @@ struct mlx5e_sw_stats {
u64 rx_csum_unnecessary;
u64 rx_csum_none;
u64 rx_csum_complete;
+ u64 rx_csum_complete_tail;
+ u64 rx_csum_complete_tail_slow;
u64 rx_csum_unnecessary_inner;
u64 rx_xdp_drop;
u64 rx_xdp_redirect;
@@ -181,6 +183,8 @@ struct mlx5e_rq_stats {
u64 packets;
u64 bytes;
u64 csum_complete;
+ u64 csum_complete_tail;
+ u64 csum_complete_tail_slow;
u64 csum_unnecessary;
u64 csum_unnecessary_inner;
u64 csum_none;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index b4967a0ff8c7..d75dc44eb2ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -2158,6 +2158,52 @@ static bool csum_offload_supported(struct mlx5e_priv *priv,
return true;
}
+struct ip_ttl_word {
+ __u8 ttl;
+ __u8 protocol;
+ __sum16 check;
+};
+
+struct ipv6_hoplimit_word {
+ __be16 payload_len;
+ __u8 nexthdr;
+ __u8 hop_limit;
+};
+
+static bool is_action_keys_supported(const struct flow_action_entry *act)
+{
+ u32 mask, offset;
+ u8 htype;
+
+ htype = act->mangle.htype;
+ offset = act->mangle.offset;
+ mask = ~act->mangle.mask;
+ /* For IPv4 & IPv6 header check 4 byte word,
+ * to determine that modified fields
+ * are NOT ttl & hop_limit only.
+ */
+ if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
+ struct ip_ttl_word *ttl_word =
+ (struct ip_ttl_word *)&mask;
+
+ if (offset != offsetof(struct iphdr, ttl) ||
+ ttl_word->protocol ||
+ ttl_word->check) {
+ return true;
+ }
+ } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
+ struct ipv6_hoplimit_word *hoplimit_word =
+ (struct ipv6_hoplimit_word *)&mask;
+
+ if (offset != offsetof(struct ipv6hdr, payload_len) ||
+ hoplimit_word->payload_len ||
+ hoplimit_word->nexthdr) {
+ return true;
+ }
+ }
+ return false;
+}
+
static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
struct flow_action *flow_action,
u32 actions,
@@ -2165,9 +2211,9 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
{
const struct flow_action_entry *act;
bool modify_ip_header;
- u8 htype, ip_proto;
void *headers_v;
u16 ethertype;
+ u8 ip_proto;
int i;
if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP)
@@ -2187,9 +2233,7 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
act->id != FLOW_ACTION_ADD)
continue;
- htype = act->mangle.htype;
- if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4 ||
- htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
+ if (is_action_keys_supported(act)) {
modify_ip_header = true;
break;
}
@@ -2340,15 +2384,22 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
return 0;
}
-static inline int cmp_encap_info(struct ip_tunnel_key *a,
- struct ip_tunnel_key *b)
+struct encap_key {
+ struct ip_tunnel_key *ip_tun_key;
+ int tunnel_type;
+};
+
+static inline int cmp_encap_info(struct encap_key *a,
+ struct encap_key *b)
{
- return memcmp(a, b, sizeof(*a));
+ return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
+ a->tunnel_type != b->tunnel_type;
}
-static inline int hash_encap_info(struct ip_tunnel_key *key)
+static inline int hash_encap_info(struct encap_key *key)
{
- return jhash(key, sizeof(*key), 0);
+ return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
+ key->tunnel_type);
}
@@ -2379,7 +2430,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct ip_tunnel_info *tun_info;
- struct ip_tunnel_key *key;
+ struct encap_key key, e_key;
struct mlx5e_encap_entry *e;
unsigned short family;
uintptr_t hash_key;
@@ -2389,13 +2440,16 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
parse_attr = attr->parse_attr;
tun_info = &parse_attr->tun_info[out_index];
family = ip_tunnel_info_af(tun_info);
- key = &tun_info->key;
+ key.ip_tun_key = &tun_info->key;
+ key.tunnel_type = mlx5e_tc_tun_get_type(mirred_dev);
- hash_key = hash_encap_info(key);
+ hash_key = hash_encap_info(&key);
hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
encap_hlist, hash_key) {
- if (!cmp_encap_info(&e->tun_info.key, key)) {
+ e_key.ip_tun_key = &e->tun_info.key;
+ e_key.tunnel_type = e->tunnel_type;
+ if (!cmp_encap_info(&e_key, &key)) {
found = true;
break;
}
@@ -2657,7 +2711,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
- err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
+ err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
parse_attr, hdrs, extack);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index d0b28251abf2..8a67fd197b79 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -105,8 +105,7 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
- if (vport)
- MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
in, nic_vport_context);
@@ -134,8 +133,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
MLX5_SET(modify_esw_vport_context_in, in, opcode,
MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
- if (vport)
- MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
+ MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
@@ -431,6 +429,8 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw)
{
int err;
+ memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
+
err = esw_create_legacy_vepa_table(esw);
if (err)
return err;
@@ -1931,7 +1931,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
u64 node_guid;
int err = 0;
- if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
+ if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
return -EPERM;
if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
return -EINVAL;
@@ -2005,7 +2005,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
{
struct mlx5_vport *evport;
- if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
+ if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
return -EPERM;
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
@@ -2157,6 +2157,7 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
/* Star rule to forward all traffic to uplink vport */
memset(spec, 0, sizeof(*spec));
+ memset(&dest, 0, sizeof(dest));
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport.num = MLX5_VPORT_UPLINK;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
@@ -2297,19 +2298,24 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
u32 max_rate, u32 min_rate)
{
- u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
- bool min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
- fw_max_bw_share >= MLX5_MIN_BW_SHARE;
- bool max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
struct mlx5_vport *evport;
+ u32 fw_max_bw_share;
u32 previous_min_rate;
u32 divider;
+ bool min_rate_supported;
+ bool max_rate_supported;
int err = 0;
if (!ESW_ALLOWED(esw))
return -EPERM;
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
+
+ fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
+ min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
+ fw_max_bw_share >= MLX5_MIN_BW_SHARE;
+ max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
+
if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported))
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index f2260391be5b..9b2d78ee22b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1611,6 +1611,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports)
{
int err;
+ memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
err = esw_create_offloads_fdb_tables(esw, nvports);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
index 5cf5f2a9d51f..22a2ef111514 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
@@ -148,14 +148,16 @@ static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
return ret;
}
-static void mlx5_fpga_tls_release_swid(struct idr *idr,
- spinlock_t *idr_spinlock, u32 swid)
+static void *mlx5_fpga_tls_release_swid(struct idr *idr,
+ spinlock_t *idr_spinlock, u32 swid)
{
unsigned long flags;
+ void *ptr;
spin_lock_irqsave(idr_spinlock, flags);
- idr_remove(idr, swid);
+ ptr = idr_remove(idr, swid);
spin_unlock_irqrestore(idr_spinlock, flags);
+ return ptr;
}
static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
@@ -165,20 +167,12 @@ static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
kfree(buf);
}
-struct mlx5_teardown_stream_context {
- struct mlx5_fpga_tls_command_context cmd;
- u32 swid;
-};
-
static void
mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
struct mlx5_fpga_device *fdev,
struct mlx5_fpga_tls_command_context *cmd,
struct mlx5_fpga_dma_buf *resp)
{
- struct mlx5_teardown_stream_context *ctx =
- container_of(cmd, struct mlx5_teardown_stream_context, cmd);
-
if (resp) {
u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
@@ -186,14 +180,6 @@ mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
mlx5_fpga_err(fdev,
"Teardown stream failed with syndrome = %d",
syndrome);
- else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx))
- mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr,
- &fdev->tls->tx_idr_spinlock,
- ctx->swid);
- else
- mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr,
- &fdev->tls->rx_idr_spinlock,
- ctx->swid);
}
mlx5_fpga_tls_put_command_ctx(cmd);
}
@@ -225,8 +211,14 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
rcu_read_lock();
flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
- rcu_read_unlock();
+ if (unlikely(!flow)) {
+ rcu_read_unlock();
+ WARN_ONCE(1, "Received NULL pointer for handle\n");
+ kfree(buf);
+ return -EINVAL;
+ }
mlx5_fpga_tls_flow_to_cmd(flow, cmd);
+ rcu_read_unlock();
MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn));
@@ -238,6 +230,8 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
buf->complete = mlx_tls_kfree_complete;
ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
+ if (ret < 0)
+ kfree(buf);
return ret;
}
@@ -245,7 +239,7 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
void *flow, u32 swid, gfp_t flags)
{
- struct mlx5_teardown_stream_context *ctx;
+ struct mlx5_fpga_tls_command_context *ctx;
struct mlx5_fpga_dma_buf *buf;
void *cmd;
@@ -253,7 +247,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
if (!ctx)
return;
- buf = &ctx->cmd.buf;
+ buf = &ctx->buf;
cmd = (ctx + 1);
MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM);
MLX5_SET(tls_cmd, cmd, swid, swid);
@@ -264,8 +258,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
buf->sg[0].data = cmd;
buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
- ctx->swid = swid;
- mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
+ mlx5_fpga_tls_cmd_send(mdev->fpga, ctx,
mlx5_fpga_tls_teardown_completion);
}
@@ -275,13 +268,14 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
struct mlx5_fpga_tls *tls = mdev->fpga->tls;
void *flow;
- rcu_read_lock();
if (direction_sx)
- flow = idr_find(&tls->tx_idr, swid);
+ flow = mlx5_fpga_tls_release_swid(&tls->tx_idr,
+ &tls->tx_idr_spinlock,
+ swid);
else
- flow = idr_find(&tls->rx_idr, swid);
-
- rcu_read_unlock();
+ flow = mlx5_fpga_tls_release_swid(&tls->rx_idr,
+ &tls->rx_idr_spinlock,
+ swid);
if (!flow) {
mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n",
@@ -289,6 +283,7 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
return;
}
+ synchronize_rcu(); /* before kfree(flow) */
mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index f2cfa012315e..0be3eb86dd84 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -263,10 +263,11 @@ static void nested_down_write_ref_node(struct fs_node *node,
}
}
-static void down_write_ref_node(struct fs_node *node)
+static void down_write_ref_node(struct fs_node *node, bool locked)
{
if (node) {
- down_write(&node->lock);
+ if (!locked)
+ down_write(&node->lock);
refcount_inc(&node->refcount);
}
}
@@ -277,13 +278,14 @@ static void up_read_ref_node(struct fs_node *node)
up_read(&node->lock);
}
-static void up_write_ref_node(struct fs_node *node)
+static void up_write_ref_node(struct fs_node *node, bool locked)
{
refcount_dec(&node->refcount);
- up_write(&node->lock);
+ if (!locked)
+ up_write(&node->lock);
}
-static void tree_put_node(struct fs_node *node)
+static void tree_put_node(struct fs_node *node, bool locked)
{
struct fs_node *parent_node = node->parent;
@@ -294,27 +296,27 @@ static void tree_put_node(struct fs_node *node)
/* Only root namespace doesn't have parent and we just
* need to free its node.
*/
- down_write_ref_node(parent_node);
+ down_write_ref_node(parent_node, locked);
list_del_init(&node->list);
if (node->del_sw_func)
node->del_sw_func(node);
- up_write_ref_node(parent_node);
+ up_write_ref_node(parent_node, locked);
} else {
kfree(node);
}
node = NULL;
}
if (!node && parent_node)
- tree_put_node(parent_node);
+ tree_put_node(parent_node, locked);
}
-static int tree_remove_node(struct fs_node *node)
+static int tree_remove_node(struct fs_node *node, bool locked)
{
if (refcount_read(&node->refcount) > 1) {
refcount_dec(&node->refcount);
return -EEXIST;
}
- tree_put_node(node);
+ tree_put_node(node, locked);
return 0;
}
@@ -420,22 +422,34 @@ static void del_sw_flow_table(struct fs_node *node)
kfree(ft);
}
-static void del_sw_hw_rule(struct fs_node *node)
+static void modify_fte(struct fs_fte *fte)
{
struct mlx5_flow_root_namespace *root;
- struct mlx5_flow_rule *rule;
struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg;
- struct fs_fte *fte;
- int modify_mask;
- struct mlx5_core_dev *dev = get_dev(node);
+ struct mlx5_core_dev *dev;
int err;
- bool update_fte = false;
- fs_get_obj(rule, node);
- fs_get_obj(fte, rule->node.parent);
fs_get_obj(fg, fte->node.parent);
fs_get_obj(ft, fg->node.parent);
+ dev = get_dev(&fte->node);
+
+ root = find_root(&ft->node);
+ err = root->cmds->update_fte(dev, ft, fg->id, fte->modify_mask, fte);
+ if (err)
+ mlx5_core_warn(dev,
+ "%s can't del rule fg id=%d fte_index=%d\n",
+ __func__, fg->id, fte->index);
+ fte->modify_mask = 0;
+}
+
+static void del_sw_hw_rule(struct fs_node *node)
+{
+ struct mlx5_flow_rule *rule;
+ struct fs_fte *fte;
+
+ fs_get_obj(rule, node);
+ fs_get_obj(fte, rule->node.parent);
trace_mlx5_fs_del_rule(rule);
if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
mutex_lock(&rule->dest_attr.ft->lock);
@@ -445,27 +459,19 @@ static void del_sw_hw_rule(struct fs_node *node)
if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
--fte->dests_size) {
- modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
- BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
+ fte->modify_mask |=
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
- update_fte = true;
goto out;
}
if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
--fte->dests_size) {
- modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
- update_fte = true;
+ fte->modify_mask |=
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
}
out:
- root = find_root(&ft->node);
- if (update_fte && fte->dests_size) {
- err = root->cmds->update_fte(dev, ft, fg->id, modify_mask, fte);
- if (err)
- mlx5_core_warn(dev,
- "%s can't del rule fg id=%d fte_index=%d\n",
- __func__, fg->id, fte->index);
- }
kfree(rule);
}
@@ -491,6 +497,7 @@ static void del_hw_fte(struct fs_node *node)
mlx5_core_warn(dev,
"flow steering can't delete fte in index %d of flow group id %d\n",
fte->index, fg->id);
+ node->active = 0;
}
}
@@ -591,7 +598,7 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
fte->node.type = FS_TYPE_FLOW_ENTRY;
fte->action = *flow_act;
- tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
+ tree_init_node(&fte->node, NULL, del_sw_fte);
return fte;
}
@@ -858,7 +865,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
fs_get_obj(fte, rule->node.parent);
if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return -EINVAL;
- down_write_ref_node(&fte->node);
+ down_write_ref_node(&fte->node, false);
fs_get_obj(fg, fte->node.parent);
fs_get_obj(ft, fg->node.parent);
@@ -866,7 +873,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
root = find_root(&ft->node);
err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id,
modify_mask, fte);
- up_write_ref_node(&fte->node);
+ up_write_ref_node(&fte->node, false);
return err;
}
@@ -1016,11 +1023,11 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
if (err)
goto destroy_ft;
ft->node.active = true;
- down_write_ref_node(&fs_prio->node);
+ down_write_ref_node(&fs_prio->node, false);
tree_add_node(&ft->node, &fs_prio->node);
list_add_flow_table(ft, fs_prio);
fs_prio->num_ft++;
- up_write_ref_node(&fs_prio->node);
+ up_write_ref_node(&fs_prio->node, false);
mutex_unlock(&root->chain_lock);
trace_mlx5_fs_add_ft(ft);
return ft;
@@ -1114,17 +1121,17 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
if (ft->autogroup.active)
return ERR_PTR(-EPERM);
- down_write_ref_node(&ft->node);
+ down_write_ref_node(&ft->node, false);
fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
start_index, end_index,
ft->node.children.prev);
- up_write_ref_node(&ft->node);
+ up_write_ref_node(&ft->node, false);
if (IS_ERR(fg))
return fg;
err = root->cmds->create_flow_group(dev, ft, fg_in, &fg->id);
if (err) {
- tree_put_node(&fg->node);
+ tree_put_node(&fg->node, false);
return ERR_PTR(err);
}
trace_mlx5_fs_add_fg(fg);
@@ -1521,10 +1528,10 @@ static void free_match_list(struct match_list_head *head)
struct match_list *iter, *match_tmp;
list_del(&head->first.list);
- tree_put_node(&head->first.g->node);
+ tree_put_node(&head->first.g->node, false);
list_for_each_entry_safe(iter, match_tmp, &head->list,
list) {
- tree_put_node(&iter->g->node);
+ tree_put_node(&iter->g->node, false);
list_del(&iter->list);
kfree(iter);
}
@@ -1601,11 +1608,16 @@ lookup_fte_locked(struct mlx5_flow_group *g,
fte_tmp = NULL;
goto out;
}
+ if (!fte_tmp->node.active) {
+ tree_put_node(&fte_tmp->node, false);
+ fte_tmp = NULL;
+ goto out;
+ }
nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
out:
if (take_write)
- up_write_ref_node(&g->node);
+ up_write_ref_node(&g->node, false);
else
up_read_ref_node(&g->node);
return fte_tmp;
@@ -1647,8 +1659,8 @@ search_again_locked:
continue;
rule = add_rule_fg(g, spec->match_value,
flow_act, dest, dest_num, fte_tmp);
- up_write_ref_node(&fte_tmp->node);
- tree_put_node(&fte_tmp->node);
+ up_write_ref_node(&fte_tmp->node, false);
+ tree_put_node(&fte_tmp->node, false);
kmem_cache_free(steering->ftes_cache, fte);
return rule;
}
@@ -1684,7 +1696,7 @@ skip_search:
err = insert_fte(g, fte);
if (err) {
- up_write_ref_node(&g->node);
+ up_write_ref_node(&g->node, false);
if (err == -ENOSPC)
continue;
kmem_cache_free(steering->ftes_cache, fte);
@@ -1692,11 +1704,11 @@ skip_search:
}
nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
- up_write_ref_node(&g->node);
+ up_write_ref_node(&g->node, false);
rule = add_rule_fg(g, spec->match_value,
flow_act, dest, dest_num, fte);
- up_write_ref_node(&fte->node);
- tree_put_node(&fte->node);
+ up_write_ref_node(&fte->node, false);
+ tree_put_node(&fte->node, false);
return rule;
}
rule = ERR_PTR(-ENOENT);
@@ -1738,7 +1750,7 @@ search_again_locked:
err = build_match_list(&match_head, ft, spec);
if (err) {
if (take_write)
- up_write_ref_node(&ft->node);
+ up_write_ref_node(&ft->node, false);
else
up_read_ref_node(&ft->node);
return ERR_PTR(err);
@@ -1753,7 +1765,7 @@ search_again_locked:
if (!IS_ERR(rule) ||
(PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
if (take_write)
- up_write_ref_node(&ft->node);
+ up_write_ref_node(&ft->node, false);
return rule;
}
@@ -1769,12 +1781,12 @@ search_again_locked:
g = alloc_auto_flow_group(ft, spec);
if (IS_ERR(g)) {
rule = ERR_CAST(g);
- up_write_ref_node(&ft->node);
+ up_write_ref_node(&ft->node, false);
return rule;
}
nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
- up_write_ref_node(&ft->node);
+ up_write_ref_node(&ft->node, false);
err = create_auto_flow_group(ft, g);
if (err)
@@ -1793,17 +1805,17 @@ search_again_locked:
}
nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
- up_write_ref_node(&g->node);
+ up_write_ref_node(&g->node, false);
rule = add_rule_fg(g, spec->match_value, flow_act, dest,
dest_num, fte);
- up_write_ref_node(&fte->node);
- tree_put_node(&fte->node);
- tree_put_node(&g->node);
+ up_write_ref_node(&fte->node, false);
+ tree_put_node(&fte->node, false);
+ tree_put_node(&g->node, false);
return rule;
err_release_fg:
- up_write_ref_node(&g->node);
- tree_put_node(&g->node);
+ up_write_ref_node(&g->node, false);
+ tree_put_node(&g->node, false);
return ERR_PTR(err);
}
@@ -1866,10 +1878,33 @@ EXPORT_SYMBOL(mlx5_add_flow_rules);
void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
{
+ struct fs_fte *fte;
int i;
+ /* In order to consolidate the HW changes we lock the FTE for other
+ * changes, and increase its refcount, in order not to perform the
+ * "del" functions of the FTE. Will handle them here.
+ * The removal of the rules is done under locked FTE.
+ * After removing all the handle's rules, if there are remaining
+ * rules, it means we just need to modify the FTE in FW, and
+ * unlock/decrease the refcount we increased before.
+ * Otherwise, it means the FTE should be deleted. First delete the
+ * FTE in FW. Then, unlock the FTE, and proceed the tree_put_node of
+ * the FTE, which will handle the last decrease of the refcount, as
+ * well as required handling of its parent.
+ */
+ fs_get_obj(fte, handle->rule[0]->node.parent);
+ down_write_ref_node(&fte->node, false);
for (i = handle->num_rules - 1; i >= 0; i--)
- tree_remove_node(&handle->rule[i]->node);
+ tree_remove_node(&handle->rule[i]->node, true);
+ if (fte->modify_mask && fte->dests_size) {
+ modify_fte(fte);
+ up_write_ref_node(&fte->node, false);
+ } else {
+ del_hw_fte(&fte->node);
+ up_write(&fte->node.lock);
+ tree_put_node(&fte->node, false);
+ }
kfree(handle);
}
EXPORT_SYMBOL(mlx5_del_flow_rules);
@@ -1972,7 +2007,7 @@ int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
mutex_unlock(&root->chain_lock);
return err;
}
- if (tree_remove_node(&ft->node))
+ if (tree_remove_node(&ft->node, false))
mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
ft->id);
mutex_unlock(&root->chain_lock);
@@ -1983,7 +2018,7 @@ EXPORT_SYMBOL(mlx5_destroy_flow_table);
void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
{
- if (tree_remove_node(&fg->node))
+ if (tree_remove_node(&fg->node, false))
mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
fg->id);
}
@@ -2367,8 +2402,8 @@ static void clean_tree(struct fs_node *node)
tree_get_node(node);
list_for_each_entry_safe(iter, temp, &node->children, list)
clean_tree(iter);
- tree_put_node(node);
- tree_remove_node(node);
+ tree_put_node(node, false);
+ tree_remove_node(node, false);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 2dc86347af58..87de0e4d9124 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -172,6 +172,7 @@ struct fs_fte {
enum fs_fte_status status;
struct mlx5_fc *counter;
struct rhash_head hash;
+ int modify_mask;
};
/* Type of children is mlx5_flow_table/namespace */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 48aa6e030bcf..959605559858 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -595,27 +595,6 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
err);
}
-int mlx5_lag_get_pf_num(struct mlx5_core_dev *dev, int *pf_num)
-{
- struct mlx5_lag *ldev;
- int n;
-
- ldev = mlx5_lag_dev_get(dev);
- if (!ldev) {
- mlx5_core_warn(dev, "no lag device, can't get pf num\n");
- return -EINVAL;
- }
-
- for (n = 0; n < MLX5_MAX_PORTS; n++)
- if (ldev->pf[n].dev == dev) {
- *pf_num = n;
- return 0;
- }
-
- mlx5_core_warn(dev, "wasn't able to locate pf in the lag device\n");
- return -EINVAL;
-}
-
/* Must be called with intf_mutex held */
void mlx5_lag_remove(struct mlx5_core_dev *dev)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 8391dde869a7..76716419370d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -164,26 +164,6 @@ static struct mlx5_profile profile[] = {
.size = 8,
.limit = 4
},
- .mr_cache[16] = {
- .size = 8,
- .limit = 4
- },
- .mr_cache[17] = {
- .size = 8,
- .limit = 4
- },
- .mr_cache[18] = {
- .size = 8,
- .limit = 4
- },
- .mr_cache[19] = {
- .size = 4,
- .limit = 2
- },
- .mr_cache[20] = {
- .size = 4,
- .limit = 2
- },
},
};
@@ -465,20 +445,17 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev)
void *set_hca_cap;
void *set_ctx;
int set_sz;
+ bool do_set = false;
int err;
- if (!MLX5_CAP_GEN(dev, pg))
+ if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) ||
+ !MLX5_CAP_GEN(dev, pg))
return 0;
err = mlx5_core_get_caps(dev, MLX5_CAP_ODP);
if (err)
return err;
- if (!(MLX5_CAP_ODP_MAX(dev, ud_odp_caps.srq_receive) ||
- MLX5_CAP_ODP_MAX(dev, rc_odp_caps.srq_receive) ||
- MLX5_CAP_ODP_MAX(dev, xrc_odp_caps.srq_receive)))
- return 0;
-
set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
set_ctx = kzalloc(set_sz, GFP_KERNEL);
if (!set_ctx)
@@ -488,19 +465,30 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev)
memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_ODP],
MLX5_ST_SZ_BYTES(odp_cap));
- /* set ODP SRQ support for RC/UD and XRC transports */
- MLX5_SET(odp_cap, set_hca_cap, ud_odp_caps.srq_receive,
- MLX5_CAP_ODP_MAX(dev, ud_odp_caps.srq_receive));
-
- MLX5_SET(odp_cap, set_hca_cap, rc_odp_caps.srq_receive,
- MLX5_CAP_ODP_MAX(dev, rc_odp_caps.srq_receive));
-
- MLX5_SET(odp_cap, set_hca_cap, xrc_odp_caps.srq_receive,
- MLX5_CAP_ODP_MAX(dev, xrc_odp_caps.srq_receive));
-
- err = set_caps(dev, set_ctx, set_sz, MLX5_SET_HCA_CAP_OP_MOD_ODP);
+#define ODP_CAP_SET_MAX(dev, field) \
+ do { \
+ u32 _res = MLX5_CAP_ODP_MAX(dev, field); \
+ if (_res) { \
+ do_set = true; \
+ MLX5_SET(odp_cap, set_hca_cap, field, _res); \
+ } \
+ } while (0)
+
+ ODP_CAP_SET_MAX(dev, ud_odp_caps.srq_receive);
+ ODP_CAP_SET_MAX(dev, rc_odp_caps.srq_receive);
+ ODP_CAP_SET_MAX(dev, xrc_odp_caps.srq_receive);
+ ODP_CAP_SET_MAX(dev, xrc_odp_caps.send);
+ ODP_CAP_SET_MAX(dev, xrc_odp_caps.receive);
+ ODP_CAP_SET_MAX(dev, xrc_odp_caps.write);
+ ODP_CAP_SET_MAX(dev, xrc_odp_caps.read);
+ ODP_CAP_SET_MAX(dev, xrc_odp_caps.atomic);
+
+ if (do_set)
+ err = set_caps(dev, set_ctx, set_sz,
+ MLX5_SET_HCA_CAP_OP_MOD_ODP);
kfree(set_ctx);
+
return err;
}
@@ -577,6 +565,33 @@ query_ex:
return err;
}
+static int set_hca_cap(struct mlx5_core_dev *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+ int err;
+
+ err = handle_hca_cap(dev);
+ if (err) {
+ dev_err(&pdev->dev, "handle_hca_cap failed\n");
+ goto out;
+ }
+
+ err = handle_hca_cap_atomic(dev);
+ if (err) {
+ dev_err(&pdev->dev, "handle_hca_cap_atomic failed\n");
+ goto out;
+ }
+
+ err = handle_hca_cap_odp(dev);
+ if (err) {
+ dev_err(&pdev->dev, "handle_hca_cap_odp failed\n");
+ goto out;
+ }
+
+out:
+ return err;
+}
+
static int set_hca_ctrl(struct mlx5_core_dev *dev)
{
struct mlx5_reg_host_endianness he_in;
@@ -969,21 +984,9 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto reclaim_boot_pages;
}
- err = handle_hca_cap(dev);
- if (err) {
- dev_err(&pdev->dev, "handle_hca_cap failed\n");
- goto reclaim_boot_pages;
- }
-
- err = handle_hca_cap_atomic(dev);
+ err = set_hca_cap(dev);
if (err) {
- dev_err(&pdev->dev, "handle_hca_cap_atomic failed\n");
- goto reclaim_boot_pages;
- }
-
- err = handle_hca_cap_odp(dev);
- if (err) {
- dev_err(&pdev->dev, "handle_hca_cap_odp failed\n");
+ dev_err(&pdev->dev, "set_hca_cap failed\n");
goto reclaim_boot_pages;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 9529cf9623e3..7b331674622c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -188,8 +188,6 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
MLX5_CAP_GEN(dev, lag_master);
}
-int mlx5_lag_get_pf_num(struct mlx5_core_dev *dev, int *pf_num);
-
void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol);
void mlx5_lag_update(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 21b7f05b16a5..361468e0435d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -317,10 +317,6 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
i2c_addr = MLX5_I2C_ADDR_LOW;
- if (offset >= MLX5_EEPROM_PAGE_LENGTH) {
- i2c_addr = MLX5_I2C_ADDR_HIGH;
- offset -= MLX5_EEPROM_PAGE_LENGTH;
- }
MLX5_SET(mcia_reg, in, l, 0);
MLX5_SET(mcia_reg, in, module, module_num);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 370ca94b6775..b8ba74de9555 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -40,6 +40,9 @@
#include "mlx5_core.h"
#include "lib/eq.h"
+static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *dct);
+
static struct mlx5_core_rsc_common *
mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
{
@@ -227,20 +230,49 @@ static void destroy_resource_common(struct mlx5_core_dev *dev,
wait_for_completion(&qp->common.free);
}
+static int _mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *dct, bool need_cleanup)
+{
+ u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
+ struct mlx5_core_qp *qp = &dct->mqp;
+ int err;
+
+ err = mlx5_core_drain_dct(dev, dct);
+ if (err) {
+ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ goto destroy;
+ } else {
+ mlx5_core_warn(
+ dev, "failed drain DCT 0x%x with error 0x%x\n",
+ qp->qpn, err);
+ return err;
+ }
+ }
+ wait_for_completion(&dct->drained);
+destroy:
+ if (need_cleanup)
+ destroy_resource_common(dev, &dct->mqp);
+ MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
+ MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
+ MLX5_SET(destroy_dct_in, in, uid, qp->uid);
+ err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
+ (void *)&out, sizeof(out));
+ return err;
+}
+
int mlx5_core_create_dct(struct mlx5_core_dev *dev,
struct mlx5_core_dct *dct,
- u32 *in, int inlen)
+ u32 *in, int inlen,
+ u32 *out, int outlen)
{
- u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0};
- u32 din[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
- u32 dout[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
struct mlx5_core_qp *qp = &dct->mqp;
int err;
init_completion(&dct->drained);
MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
- err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
if (err) {
mlx5_core_warn(dev, "create DCT failed, ret %d\n", err);
return err;
@@ -254,11 +286,7 @@ int mlx5_core_create_dct(struct mlx5_core_dev *dev,
return 0;
err_cmd:
- MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
- MLX5_SET(destroy_dct_in, din, dctn, qp->qpn);
- MLX5_SET(destroy_dct_in, din, uid, qp->uid);
- mlx5_cmd_exec(dev, (void *)&in, sizeof(din),
- (void *)&out, sizeof(dout));
+ _mlx5_core_destroy_dct(dev, dct, false);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
@@ -323,29 +351,7 @@ static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
struct mlx5_core_dct *dct)
{
- u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
- struct mlx5_core_qp *qp = &dct->mqp;
- int err;
-
- err = mlx5_core_drain_dct(dev, dct);
- if (err) {
- if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
- goto destroy;
- } else {
- mlx5_core_warn(dev, "failed drain DCT 0x%x with error 0x%x\n", qp->qpn, err);
- return err;
- }
- }
- wait_for_completion(&dct->drained);
-destroy:
- destroy_resource_common(dev, &dct->mqp);
- MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
- MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
- MLX5_SET(destroy_dct_in, in, uid, qp->uid);
- err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
- (void *)&out, sizeof(out));
- return err;
+ return _mlx5_core_destroy_dct(dev, dct, true);
}
EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index d23d53c0e284..f26a4ca29363 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -568,7 +568,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
return 0;
- emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0);
+ emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0);
if (!emad_wq)
return -ENOMEM;
mlxsw_core->emad_wq = emad_wq;
@@ -1958,10 +1958,10 @@ static int __init mlxsw_core_module_init(void)
{
int err;
- mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0);
+ mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0);
if (!mlxsw_wq)
return -ENOMEM;
- mlxsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM,
+ mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0,
mlxsw_core_driver_name);
if (!mlxsw_owq) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 7a15e932ed2f..c1c1965d7acc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -113,7 +113,7 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
return 0;
default:
/* Do not consider thresholds for zero temperature. */
- if (!MLXSW_REG_MTMP_TEMP_TO_MC(module_temp)) {
+ if (MLXSW_REG_MTMP_TEMP_TO_MC(module_temp) == 0) {
*temp = 0;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index 0b85c7252f9e..472f63f9fac5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -111,7 +111,6 @@ struct mlxsw_thermal {
struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
enum thermal_device_mode mode;
struct mlxsw_thermal_module *tz_module_arr;
- unsigned int tz_module_num;
};
static inline u8 mlxsw_state_to_duty(int state)
@@ -711,6 +710,9 @@ mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core,
module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
module_tz = &thermal->tz_module_arr[module];
+ /* Skip if parent is already set (case of port split). */
+ if (module_tz->parent)
+ return 0;
module_tz->module = module;
module_tz->parent = thermal;
memcpy(module_tz->trips, default_thermal_trips,
@@ -718,13 +720,7 @@ mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core,
/* Initialize all trip point. */
mlxsw_thermal_module_trips_reset(module_tz);
/* Update trip point according to the module data. */
- err = mlxsw_thermal_module_trips_update(dev, core, module_tz);
- if (err)
- return err;
-
- thermal->tz_module_num++;
-
- return 0;
+ return mlxsw_thermal_module_trips_update(dev, core, module_tz);
}
static void mlxsw_thermal_module_fini(struct mlxsw_thermal_module *module_tz)
@@ -732,6 +728,7 @@ static void mlxsw_thermal_module_fini(struct mlxsw_thermal_module *module_tz)
if (module_tz && module_tz->tzdev) {
mlxsw_thermal_module_tz_fini(module_tz->tzdev);
module_tz->tzdev = NULL;
+ module_tz->parent = NULL;
}
}
@@ -740,6 +737,7 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
struct mlxsw_thermal *thermal)
{
unsigned int module_count = mlxsw_core_max_ports(core);
+ struct mlxsw_thermal_module *module_tz;
int i, err;
thermal->tz_module_arr = kcalloc(module_count,
@@ -754,8 +752,11 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
goto err_unreg_tz_module_arr;
}
- for (i = 0; i < thermal->tz_module_num; i++) {
- err = mlxsw_thermal_module_tz_init(&thermal->tz_module_arr[i]);
+ for (i = 0; i < module_count - 1; i++) {
+ module_tz = &thermal->tz_module_arr[i];
+ if (!module_tz->parent)
+ continue;
+ err = mlxsw_thermal_module_tz_init(module_tz);
if (err)
goto err_unreg_tz_module_arr;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index 68bee9572a1b..00c390024350 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -34,6 +34,18 @@ struct mlxsw_m_port {
u8 module;
};
+static int mlxsw_m_base_mac_get(struct mlxsw_m *mlxsw_m)
+{
+ char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
+ int err;
+
+ err = mlxsw_reg_query(mlxsw_m->core, MLXSW_REG(spad), spad_pl);
+ if (err)
+ return err;
+ mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_m->base_mac);
+ return 0;
+}
+
static int mlxsw_m_port_dummy_open_stop(struct net_device *dev)
{
return 0;
@@ -314,6 +326,12 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
mlxsw_m->core = mlxsw_core;
mlxsw_m->bus_info = mlxsw_bus_info;
+ err = mlxsw_m_base_mac_get(mlxsw_m);
+ if (err) {
+ dev_err(mlxsw_m->bus_info->dev, "Failed to get base mac\n");
+ return err;
+ }
+
err = mlxsw_m_ports_create(mlxsw_m);
if (err) {
dev_err(mlxsw_m->bus_info->dev, "Failed to create ports\n");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index ffee38e36ce8..8648ca171254 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -27,7 +27,7 @@
#define MLXSW_PCI_SW_RESET 0xF0010
#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
-#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 13000
+#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 20000
#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100
#define MLXSW_PCI_FW_READY 0xA1844
#define MLXSW_PCI_FW_READY_MASK 0xFFFF
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 9eb63300c1d3..6b8aa3761899 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -3126,11 +3126,11 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
if (err)
return err;
+ mlxsw_sp_port->link.autoneg = autoneg;
+
if (!netif_running(dev))
return 0;
- mlxsw_sp_port->link.autoneg = autoneg;
-
mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
@@ -3316,7 +3316,7 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
MLXSW_REG_QEEC_HIERARCY_TC,
i + 8, i,
- false, 0);
+ true, 100);
if (err)
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 9a79b5e11597..d633bef5f105 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -70,6 +70,7 @@ static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = {
{MLXSW_REG_SBXX_DIR_EGRESS, 1},
{MLXSW_REG_SBXX_DIR_EGRESS, 2},
{MLXSW_REG_SBXX_DIR_EGRESS, 3},
+ {MLXSW_REG_SBXX_DIR_EGRESS, 15},
};
#define MLXSW_SP_SB_ING_TC_COUNT 8
@@ -428,6 +429,7 @@ static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI),
};
static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
@@ -517,14 +519,14 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = {
MLXSW_SP_SB_CM(0, 7, 4),
MLXSW_SP_SB_CM(0, 7, 4),
MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
+ MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+ MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+ MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+ MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+ MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+ MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+ MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+ MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
MLXSW_SP_SB_CM(1, 0xff, 4),
};
@@ -671,6 +673,7 @@ static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = {
MLXSW_SP_SB_PM(0, 0),
MLXSW_SP_SB_PM(0, 0),
MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(10000, 90000),
};
static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 52fed8c7bf1e..902e766a8ed3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -6781,7 +6781,7 @@ static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
/* A RIF is not created for macvlan netdevs. Their MAC is used to
* populate the FDB
*/
- if (netif_is_macvlan(dev))
+ if (netif_is_macvlan(dev) || netif_is_l3_master(dev))
return 0;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index f6ce386c3036..50111f228d77 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1630,7 +1630,7 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
u16 fid_index;
int err = 0;
- if (switchdev_trans_ph_prepare(trans))
+ if (switchdev_trans_ph_commit(trans))
return 0;
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index bd6e9014bc74..7849119d407a 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -142,6 +142,12 @@ struct ks8851_net {
static int msg_enable;
+/* SPI frame opcodes */
+#define KS_SPIOP_RD (0x00)
+#define KS_SPIOP_WR (0x40)
+#define KS_SPIOP_RXFIFO (0x80)
+#define KS_SPIOP_TXFIFO (0xC0)
+
/* shift for byte-enable data */
#define BYTE_EN(_x) ((_x) << 2)
@@ -535,9 +541,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
/* set dma read address */
ks8851_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI | 0x00);
- /* start the packet dma process, and set auto-dequeue rx */
- ks8851_wrreg16(ks, KS_RXQCR,
- ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE);
+ /* start DMA access */
+ ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
if (rxlen > 4) {
unsigned int rxalign;
@@ -568,7 +573,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
}
}
- ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
+ /* end DMA access and dequeue packet */
+ ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_RRXEF);
}
}
@@ -785,6 +791,15 @@ static void ks8851_tx_work(struct work_struct *work)
static int ks8851_net_open(struct net_device *dev)
{
struct ks8851_net *ks = netdev_priv(dev);
+ int ret;
+
+ ret = request_threaded_irq(dev->irq, NULL, ks8851_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ dev->name, ks);
+ if (ret < 0) {
+ netdev_err(dev, "failed to get irq\n");
+ return ret;
+ }
/* lock the card, even if we may not actually be doing anything
* else at the moment */
@@ -849,6 +864,7 @@ static int ks8851_net_open(struct net_device *dev)
netif_dbg(ks, ifup, ks->netdev, "network device up\n");
mutex_unlock(&ks->lock);
+ mii_check_link(&ks->mii);
return 0;
}
@@ -899,6 +915,8 @@ static int ks8851_net_stop(struct net_device *dev)
dev_kfree_skb(txb);
}
+ free_irq(dev->irq, ks);
+
return 0;
}
@@ -1508,6 +1526,7 @@ static int ks8851_probe(struct spi_device *spi)
spi_set_drvdata(spi, ks);
+ netif_carrier_off(ks->netdev);
ndev->if_port = IF_PORT_100BASET;
ndev->netdev_ops = &ks8851_netdev_ops;
ndev->irq = spi->irq;
@@ -1529,14 +1548,6 @@ static int ks8851_probe(struct spi_device *spi)
ks8851_read_selftest(ks);
ks8851_init_mac(ks);
- ret = request_threaded_irq(spi->irq, NULL, ks8851_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- ndev->name, ks);
- if (ret < 0) {
- dev_err(&spi->dev, "failed to get irq\n");
- goto err_irq;
- }
-
ret = register_netdev(ndev);
if (ret) {
dev_err(&spi->dev, "failed to register network device\n");
@@ -1549,14 +1560,10 @@ static int ks8851_probe(struct spi_device *spi)
return 0;
-
err_netdev:
- free_irq(ndev->irq, ks);
-
-err_irq:
+err_id:
if (gpio_is_valid(gpio))
gpio_set_value(gpio, 0);
-err_id:
regulator_disable(ks->vdd_reg);
err_reg:
regulator_disable(ks->vdd_io);
@@ -1574,7 +1581,6 @@ static int ks8851_remove(struct spi_device *spi)
dev_info(&spi->dev, "remove\n");
unregister_netdev(priv->netdev);
- free_irq(spi->irq, priv);
if (gpio_is_valid(priv->gpio))
gpio_set_value(priv->gpio, 0);
regulator_disable(priv->vdd_reg);
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index 852256ef1f22..23da1e3ee429 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -11,9 +11,15 @@
*/
#define KS_CCR 0x08
+#define CCR_LE (1 << 10) /* KSZ8851-16MLL */
#define CCR_EEPROM (1 << 9)
-#define CCR_SPI (1 << 8)
-#define CCR_32PIN (1 << 0)
+#define CCR_SPI (1 << 8) /* KSZ8851SNL */
+#define CCR_8BIT (1 << 7) /* KSZ8851-16MLL */
+#define CCR_16BIT (1 << 6) /* KSZ8851-16MLL */
+#define CCR_32BIT (1 << 5) /* KSZ8851-16MLL */
+#define CCR_SHARED (1 << 4) /* KSZ8851-16MLL */
+#define CCR_48PIN (1 << 1) /* KSZ8851-16MLL */
+#define CCR_32PIN (1 << 0) /* KSZ8851SNL */
/* MAC address registers */
#define KS_MAR(_m) (0x15 - (_m))
@@ -112,13 +118,13 @@
#define RXCR1_RXE (1 << 0)
#define KS_RXCR2 0x76
-#define RXCR2_SRDBL_MASK (0x7 << 5)
-#define RXCR2_SRDBL_SHIFT (5)
-#define RXCR2_SRDBL_4B (0x0 << 5)
-#define RXCR2_SRDBL_8B (0x1 << 5)
-#define RXCR2_SRDBL_16B (0x2 << 5)
-#define RXCR2_SRDBL_32B (0x3 << 5)
-#define RXCR2_SRDBL_FRAME (0x4 << 5)
+#define RXCR2_SRDBL_MASK (0x7 << 5) /* KSZ8851SNL */
+#define RXCR2_SRDBL_SHIFT (5) /* KSZ8851SNL */
+#define RXCR2_SRDBL_4B (0x0 << 5) /* KSZ8851SNL */
+#define RXCR2_SRDBL_8B (0x1 << 5) /* KSZ8851SNL */
+#define RXCR2_SRDBL_16B (0x2 << 5) /* KSZ8851SNL */
+#define RXCR2_SRDBL_32B (0x3 << 5) /* KSZ8851SNL */
+#define RXCR2_SRDBL_FRAME (0x4 << 5) /* KSZ8851SNL */
#define RXCR2_IUFFP (1 << 4)
#define RXCR2_RXIUFCEZ (1 << 3)
#define RXCR2_UDPLFE (1 << 2)
@@ -143,8 +149,10 @@
#define RXFSHR_RXCE (1 << 0)
#define KS_RXFHBCR 0x7E
+#define RXFHBCR_CNT_MASK (0xfff << 0)
+
#define KS_TXQCR 0x80
-#define TXQCR_AETFE (1 << 2)
+#define TXQCR_AETFE (1 << 2) /* KSZ8851SNL */
#define TXQCR_TXQMAM (1 << 1)
#define TXQCR_METFE (1 << 0)
@@ -167,6 +175,10 @@
#define KS_RXFDPR 0x86
#define RXFDPR_RXFPAI (1 << 14)
+#define RXFDPR_WST (1 << 12) /* KSZ8851-16MLL */
+#define RXFDPR_EMS (1 << 11) /* KSZ8851-16MLL */
+#define RXFDPR_RXFP_MASK (0x7ff << 0)
+#define RXFDPR_RXFP_SHIFT (0)
#define KS_RXDTTR 0x8C
#define KS_RXDBCTR 0x8E
@@ -184,7 +196,7 @@
#define IRQ_RXMPDI (1 << 4)
#define IRQ_LDI (1 << 3)
#define IRQ_EDI (1 << 2)
-#define IRQ_SPIBEI (1 << 1)
+#define IRQ_SPIBEI (1 << 1) /* KSZ8851SNL */
#define IRQ_DEDI (1 << 0)
#define KS_RXFCTR 0x9C
@@ -257,42 +269,37 @@
#define KS_P1ANLPR 0xEE
#define KS_P1SCLMD 0xF4
-#define P1SCLMD_LEDOFF (1 << 15)
-#define P1SCLMD_TXIDS (1 << 14)
-#define P1SCLMD_RESTARTAN (1 << 13)
-#define P1SCLMD_DISAUTOMDIX (1 << 10)
-#define P1SCLMD_FORCEMDIX (1 << 9)
-#define P1SCLMD_AUTONEGEN (1 << 7)
-#define P1SCLMD_FORCE100 (1 << 6)
-#define P1SCLMD_FORCEFDX (1 << 5)
-#define P1SCLMD_ADV_FLOW (1 << 4)
-#define P1SCLMD_ADV_100BT_FDX (1 << 3)
-#define P1SCLMD_ADV_100BT_HDX (1 << 2)
-#define P1SCLMD_ADV_10BT_FDX (1 << 1)
-#define P1SCLMD_ADV_10BT_HDX (1 << 0)
#define KS_P1CR 0xF6
-#define P1CR_HP_MDIX (1 << 15)
-#define P1CR_REV_POL (1 << 13)
-#define P1CR_OP_100M (1 << 10)
-#define P1CR_OP_FDX (1 << 9)
-#define P1CR_OP_MDI (1 << 7)
-#define P1CR_AN_DONE (1 << 6)
-#define P1CR_LINK_GOOD (1 << 5)
-#define P1CR_PNTR_FLOW (1 << 4)
-#define P1CR_PNTR_100BT_FDX (1 << 3)
-#define P1CR_PNTR_100BT_HDX (1 << 2)
-#define P1CR_PNTR_10BT_FDX (1 << 1)
-#define P1CR_PNTR_10BT_HDX (1 << 0)
+#define P1CR_LEDOFF (1 << 15)
+#define P1CR_TXIDS (1 << 14)
+#define P1CR_RESTARTAN (1 << 13)
+#define P1CR_DISAUTOMDIX (1 << 10)
+#define P1CR_FORCEMDIX (1 << 9)
+#define P1CR_AUTONEGEN (1 << 7)
+#define P1CR_FORCE100 (1 << 6)
+#define P1CR_FORCEFDX (1 << 5)
+#define P1CR_ADV_FLOW (1 << 4)
+#define P1CR_ADV_100BT_FDX (1 << 3)
+#define P1CR_ADV_100BT_HDX (1 << 2)
+#define P1CR_ADV_10BT_FDX (1 << 1)
+#define P1CR_ADV_10BT_HDX (1 << 0)
+
+#define KS_P1SR 0xF8
+#define P1SR_HP_MDIX (1 << 15)
+#define P1SR_REV_POL (1 << 13)
+#define P1SR_OP_100M (1 << 10)
+#define P1SR_OP_FDX (1 << 9)
+#define P1SR_OP_MDI (1 << 7)
+#define P1SR_AN_DONE (1 << 6)
+#define P1SR_LINK_GOOD (1 << 5)
+#define P1SR_PNTR_FLOW (1 << 4)
+#define P1SR_PNTR_100BT_FDX (1 << 3)
+#define P1SR_PNTR_100BT_HDX (1 << 2)
+#define P1SR_PNTR_10BT_FDX (1 << 1)
+#define P1SR_PNTR_10BT_HDX (1 << 0)
/* TX Frame control */
-
#define TXFR_TXIC (1 << 15)
#define TXFR_TXFID_MASK (0x3f << 0)
#define TXFR_TXFID_SHIFT (0)
-
-/* SPI frame opcodes */
-#define KS_SPIOP_RD (0x00)
-#define KS_SPIOP_WR (0x40)
-#define KS_SPIOP_RXFIFO (0x80)
-#define KS_SPIOP_TXFIFO (0xC0)
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index 35f8c9ef204d..c946841c0a06 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -40,6 +40,8 @@
#include <linux/of_device.h>
#include <linux/of_net.h>
+#include "ks8851.h"
+
#define DRV_NAME "ks8851_mll"
static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
@@ -48,319 +50,10 @@ static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
#define TX_BUF_SIZE 2000
#define RX_BUF_SIZE 2000
-#define KS_CCR 0x08
-#define CCR_EEPROM (1 << 9)
-#define CCR_SPI (1 << 8)
-#define CCR_8BIT (1 << 7)
-#define CCR_16BIT (1 << 6)
-#define CCR_32BIT (1 << 5)
-#define CCR_SHARED (1 << 4)
-#define CCR_32PIN (1 << 0)
-
-/* MAC address registers */
-#define KS_MARL 0x10
-#define KS_MARM 0x12
-#define KS_MARH 0x14
-
-#define KS_OBCR 0x20
-#define OBCR_ODS_16MA (1 << 6)
-
-#define KS_EEPCR 0x22
-#define EEPCR_EESA (1 << 4)
-#define EEPCR_EESB (1 << 3)
-#define EEPCR_EEDO (1 << 2)
-#define EEPCR_EESCK (1 << 1)
-#define EEPCR_EECS (1 << 0)
-
-#define KS_MBIR 0x24
-#define MBIR_TXMBF (1 << 12)
-#define MBIR_TXMBFA (1 << 11)
-#define MBIR_RXMBF (1 << 4)
-#define MBIR_RXMBFA (1 << 3)
-
-#define KS_GRR 0x26
-#define GRR_QMU (1 << 1)
-#define GRR_GSR (1 << 0)
-
-#define KS_WFCR 0x2A
-#define WFCR_MPRXE (1 << 7)
-#define WFCR_WF3E (1 << 3)
-#define WFCR_WF2E (1 << 2)
-#define WFCR_WF1E (1 << 1)
-#define WFCR_WF0E (1 << 0)
-
-#define KS_WF0CRC0 0x30
-#define KS_WF0CRC1 0x32
-#define KS_WF0BM0 0x34
-#define KS_WF0BM1 0x36
-#define KS_WF0BM2 0x38
-#define KS_WF0BM3 0x3A
-
-#define KS_WF1CRC0 0x40
-#define KS_WF1CRC1 0x42
-#define KS_WF1BM0 0x44
-#define KS_WF1BM1 0x46
-#define KS_WF1BM2 0x48
-#define KS_WF1BM3 0x4A
-
-#define KS_WF2CRC0 0x50
-#define KS_WF2CRC1 0x52
-#define KS_WF2BM0 0x54
-#define KS_WF2BM1 0x56
-#define KS_WF2BM2 0x58
-#define KS_WF2BM3 0x5A
-
-#define KS_WF3CRC0 0x60
-#define KS_WF3CRC1 0x62
-#define KS_WF3BM0 0x64
-#define KS_WF3BM1 0x66
-#define KS_WF3BM2 0x68
-#define KS_WF3BM3 0x6A
-
-#define KS_TXCR 0x70
-#define TXCR_TCGICMP (1 << 8)
-#define TXCR_TCGUDP (1 << 7)
-#define TXCR_TCGTCP (1 << 6)
-#define TXCR_TCGIP (1 << 5)
-#define TXCR_FTXQ (1 << 4)
-#define TXCR_TXFCE (1 << 3)
-#define TXCR_TXPE (1 << 2)
-#define TXCR_TXCRC (1 << 1)
-#define TXCR_TXE (1 << 0)
-
-#define KS_TXSR 0x72
-#define TXSR_TXLC (1 << 13)
-#define TXSR_TXMC (1 << 12)
-#define TXSR_TXFID_MASK (0x3f << 0)
-#define TXSR_TXFID_SHIFT (0)
-#define TXSR_TXFID_GET(_v) (((_v) >> 0) & 0x3f)
-
-
-#define KS_RXCR1 0x74
-#define RXCR1_FRXQ (1 << 15)
-#define RXCR1_RXUDPFCC (1 << 14)
-#define RXCR1_RXTCPFCC (1 << 13)
-#define RXCR1_RXIPFCC (1 << 12)
-#define RXCR1_RXPAFMA (1 << 11)
-#define RXCR1_RXFCE (1 << 10)
-#define RXCR1_RXEFE (1 << 9)
-#define RXCR1_RXMAFMA (1 << 8)
-#define RXCR1_RXBE (1 << 7)
-#define RXCR1_RXME (1 << 6)
-#define RXCR1_RXUE (1 << 5)
-#define RXCR1_RXAE (1 << 4)
-#define RXCR1_RXINVF (1 << 1)
-#define RXCR1_RXE (1 << 0)
#define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \
RXCR1_RXMAFMA | RXCR1_RXPAFMA)
-
-#define KS_RXCR2 0x76
-#define RXCR2_SRDBL_MASK (0x7 << 5)
-#define RXCR2_SRDBL_SHIFT (5)
-#define RXCR2_SRDBL_4B (0x0 << 5)
-#define RXCR2_SRDBL_8B (0x1 << 5)
-#define RXCR2_SRDBL_16B (0x2 << 5)
-#define RXCR2_SRDBL_32B (0x3 << 5)
-/* #define RXCR2_SRDBL_FRAME (0x4 << 5) */
-#define RXCR2_IUFFP (1 << 4)
-#define RXCR2_RXIUFCEZ (1 << 3)
-#define RXCR2_UDPLFE (1 << 2)
-#define RXCR2_RXICMPFCC (1 << 1)
-#define RXCR2_RXSAF (1 << 0)
-
-#define KS_TXMIR 0x78
-
-#define KS_RXFHSR 0x7C
-#define RXFSHR_RXFV (1 << 15)
-#define RXFSHR_RXICMPFCS (1 << 13)
-#define RXFSHR_RXIPFCS (1 << 12)
-#define RXFSHR_RXTCPFCS (1 << 11)
-#define RXFSHR_RXUDPFCS (1 << 10)
-#define RXFSHR_RXBF (1 << 7)
-#define RXFSHR_RXMF (1 << 6)
-#define RXFSHR_RXUF (1 << 5)
-#define RXFSHR_RXMR (1 << 4)
-#define RXFSHR_RXFT (1 << 3)
-#define RXFSHR_RXFTL (1 << 2)
-#define RXFSHR_RXRF (1 << 1)
-#define RXFSHR_RXCE (1 << 0)
-#define RXFSHR_ERR (RXFSHR_RXCE | RXFSHR_RXRF |\
- RXFSHR_RXFTL | RXFSHR_RXMR |\
- RXFSHR_RXICMPFCS | RXFSHR_RXIPFCS |\
- RXFSHR_RXTCPFCS)
-#define KS_RXFHBCR 0x7E
-#define RXFHBCR_CNT_MASK 0x0FFF
-
-#define KS_TXQCR 0x80
-#define TXQCR_AETFE (1 << 2)
-#define TXQCR_TXQMAM (1 << 1)
-#define TXQCR_METFE (1 << 0)
-
-#define KS_RXQCR 0x82
-#define RXQCR_RXDTTS (1 << 12)
-#define RXQCR_RXDBCTS (1 << 11)
-#define RXQCR_RXFCTS (1 << 10)
-#define RXQCR_RXIPHTOE (1 << 9)
-#define RXQCR_RXDTTE (1 << 7)
-#define RXQCR_RXDBCTE (1 << 6)
-#define RXQCR_RXFCTE (1 << 5)
-#define RXQCR_ADRFE (1 << 4)
-#define RXQCR_SDA (1 << 3)
-#define RXQCR_RRXEF (1 << 0)
#define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE)
-#define KS_TXFDPR 0x84
-#define TXFDPR_TXFPAI (1 << 14)
-#define TXFDPR_TXFP_MASK (0x7ff << 0)
-#define TXFDPR_TXFP_SHIFT (0)
-
-#define KS_RXFDPR 0x86
-#define RXFDPR_RXFPAI (1 << 14)
-
-#define KS_RXDTTR 0x8C
-#define KS_RXDBCTR 0x8E
-
-#define KS_IER 0x90
-#define KS_ISR 0x92
-#define IRQ_LCI (1 << 15)
-#define IRQ_TXI (1 << 14)
-#define IRQ_RXI (1 << 13)
-#define IRQ_RXOI (1 << 11)
-#define IRQ_TXPSI (1 << 9)
-#define IRQ_RXPSI (1 << 8)
-#define IRQ_TXSAI (1 << 6)
-#define IRQ_RXWFDI (1 << 5)
-#define IRQ_RXMPDI (1 << 4)
-#define IRQ_LDI (1 << 3)
-#define IRQ_EDI (1 << 2)
-#define IRQ_SPIBEI (1 << 1)
-#define IRQ_DEDI (1 << 0)
-
-#define KS_RXFCTR 0x9C
-#define RXFCTR_THRESHOLD_MASK 0x00FF
-
-#define KS_RXFC 0x9D
-#define RXFCTR_RXFC_MASK (0xff << 8)
-#define RXFCTR_RXFC_SHIFT (8)
-#define RXFCTR_RXFC_GET(_v) (((_v) >> 8) & 0xff)
-#define RXFCTR_RXFCT_MASK (0xff << 0)
-#define RXFCTR_RXFCT_SHIFT (0)
-
-#define KS_TXNTFSR 0x9E
-
-#define KS_MAHTR0 0xA0
-#define KS_MAHTR1 0xA2
-#define KS_MAHTR2 0xA4
-#define KS_MAHTR3 0xA6
-
-#define KS_FCLWR 0xB0
-#define KS_FCHWR 0xB2
-#define KS_FCOWR 0xB4
-
-#define KS_CIDER 0xC0
-#define CIDER_ID 0x8870
-#define CIDER_REV_MASK (0x7 << 1)
-#define CIDER_REV_SHIFT (1)
-#define CIDER_REV_GET(_v) (((_v) >> 1) & 0x7)
-
-#define KS_CGCR 0xC6
-#define KS_IACR 0xC8
-#define IACR_RDEN (1 << 12)
-#define IACR_TSEL_MASK (0x3 << 10)
-#define IACR_TSEL_SHIFT (10)
-#define IACR_TSEL_MIB (0x3 << 10)
-#define IACR_ADDR_MASK (0x1f << 0)
-#define IACR_ADDR_SHIFT (0)
-
-#define KS_IADLR 0xD0
-#define KS_IAHDR 0xD2
-
-#define KS_PMECR 0xD4
-#define PMECR_PME_DELAY (1 << 14)
-#define PMECR_PME_POL (1 << 12)
-#define PMECR_WOL_WAKEUP (1 << 11)
-#define PMECR_WOL_MAGICPKT (1 << 10)
-#define PMECR_WOL_LINKUP (1 << 9)
-#define PMECR_WOL_ENERGY (1 << 8)
-#define PMECR_AUTO_WAKE_EN (1 << 7)
-#define PMECR_WAKEUP_NORMAL (1 << 6)
-#define PMECR_WKEVT_MASK (0xf << 2)
-#define PMECR_WKEVT_SHIFT (2)
-#define PMECR_WKEVT_GET(_v) (((_v) >> 2) & 0xf)
-#define PMECR_WKEVT_ENERGY (0x1 << 2)
-#define PMECR_WKEVT_LINK (0x2 << 2)
-#define PMECR_WKEVT_MAGICPKT (0x4 << 2)
-#define PMECR_WKEVT_FRAME (0x8 << 2)
-#define PMECR_PM_MASK (0x3 << 0)
-#define PMECR_PM_SHIFT (0)
-#define PMECR_PM_NORMAL (0x0 << 0)
-#define PMECR_PM_ENERGY (0x1 << 0)
-#define PMECR_PM_SOFTDOWN (0x2 << 0)
-#define PMECR_PM_POWERSAVE (0x3 << 0)
-
-/* Standard MII PHY data */
-#define KS_P1MBCR 0xE4
-#define P1MBCR_FORCE_FDX (1 << 8)
-
-#define KS_P1MBSR 0xE6
-#define P1MBSR_AN_COMPLETE (1 << 5)
-#define P1MBSR_AN_CAPABLE (1 << 3)
-#define P1MBSR_LINK_UP (1 << 2)
-
-#define KS_PHY1ILR 0xE8
-#define KS_PHY1IHR 0xEA
-#define KS_P1ANAR 0xEC
-#define KS_P1ANLPR 0xEE
-
-#define KS_P1SCLMD 0xF4
-#define P1SCLMD_LEDOFF (1 << 15)
-#define P1SCLMD_TXIDS (1 << 14)
-#define P1SCLMD_RESTARTAN (1 << 13)
-#define P1SCLMD_DISAUTOMDIX (1 << 10)
-#define P1SCLMD_FORCEMDIX (1 << 9)
-#define P1SCLMD_AUTONEGEN (1 << 7)
-#define P1SCLMD_FORCE100 (1 << 6)
-#define P1SCLMD_FORCEFDX (1 << 5)
-#define P1SCLMD_ADV_FLOW (1 << 4)
-#define P1SCLMD_ADV_100BT_FDX (1 << 3)
-#define P1SCLMD_ADV_100BT_HDX (1 << 2)
-#define P1SCLMD_ADV_10BT_FDX (1 << 1)
-#define P1SCLMD_ADV_10BT_HDX (1 << 0)
-
-#define KS_P1CR 0xF6
-#define P1CR_HP_MDIX (1 << 15)
-#define P1CR_REV_POL (1 << 13)
-#define P1CR_OP_100M (1 << 10)
-#define P1CR_OP_FDX (1 << 9)
-#define P1CR_OP_MDI (1 << 7)
-#define P1CR_AN_DONE (1 << 6)
-#define P1CR_LINK_GOOD (1 << 5)
-#define P1CR_PNTR_FLOW (1 << 4)
-#define P1CR_PNTR_100BT_FDX (1 << 3)
-#define P1CR_PNTR_100BT_HDX (1 << 2)
-#define P1CR_PNTR_10BT_FDX (1 << 1)
-#define P1CR_PNTR_10BT_HDX (1 << 0)
-
-/* TX Frame control */
-
-#define TXFR_TXIC (1 << 15)
-#define TXFR_TXFID_MASK (0x3f << 0)
-#define TXFR_TXFID_SHIFT (0)
-
-#define KS_P1SR 0xF8
-#define P1SR_HP_MDIX (1 << 15)
-#define P1SR_REV_POL (1 << 13)
-#define P1SR_OP_100M (1 << 10)
-#define P1SR_OP_FDX (1 << 9)
-#define P1SR_OP_MDI (1 << 7)
-#define P1SR_AN_DONE (1 << 6)
-#define P1SR_LINK_GOOD (1 << 5)
-#define P1SR_PNTR_FLOW (1 << 4)
-#define P1SR_PNTR_100BT_FDX (1 << 3)
-#define P1SR_PNTR_100BT_HDX (1 << 2)
-#define P1SR_PNTR_10BT_FDX (1 << 1)
-#define P1SR_PNTR_10BT_HDX (1 << 0)
-
#define ENUM_BUS_NONE 0
#define ENUM_BUS_8BIT 1
#define ENUM_BUS_16BIT 2
@@ -1475,7 +1168,7 @@ static void ks_setup(struct ks_net *ks)
ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
/* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */
- ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_THRESHOLD_MASK);
+ ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_RXFCT_MASK);
/* Setup RxQ Command Control (RXQCR) */
ks->rc_rxqcr = RXQCR_CMD_CNTL;
@@ -1488,7 +1181,7 @@ static void ks_setup(struct ks_net *ks)
*/
w = ks_rdreg16(ks, KS_P1MBCR);
- w &= ~P1MBCR_FORCE_FDX;
+ w &= ~BMCR_FULLDPLX;
ks_wrreg16(ks, KS_P1MBCR, w);
w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
@@ -1629,7 +1322,7 @@ static int ks8851_probe(struct platform_device *pdev)
ks_setup_int(ks);
data = ks_rdreg16(ks, KS_OBCR);
- ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA);
+ ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16mA);
/* overwriting the default MAC address */
if (pdev->dev.of_node) {
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 4d1b4a24907f..13e6bf13ac4d 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -585,8 +585,7 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
if (adapter->csr.flags &
LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
- flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR |
- LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
+ flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
@@ -599,12 +598,6 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
/* map TX interrupt to vector */
int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector);
lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1);
- if (flags &
- LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) {
- int_vec_en_auto_clr |= INT_VEC_EN_(vector);
- lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR,
- int_vec_en_auto_clr);
- }
/* Remove TX interrupt from shared mask */
intr->vector_list[0].int_mask &= ~int_bit;
@@ -1902,7 +1895,17 @@ static int lan743x_rx_next_index(struct lan743x_rx *rx, int index)
return ((++index) % rx->ring_size);
}
-static int lan743x_rx_allocate_ring_element(struct lan743x_rx *rx, int index)
+static struct sk_buff *lan743x_rx_allocate_skb(struct lan743x_rx *rx)
+{
+ int length = 0;
+
+ length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
+ return __netdev_alloc_skb(rx->adapter->netdev,
+ length, GFP_ATOMIC | GFP_DMA);
+}
+
+static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
+ struct sk_buff *skb)
{
struct lan743x_rx_buffer_info *buffer_info;
struct lan743x_rx_descriptor *descriptor;
@@ -1911,9 +1914,7 @@ static int lan743x_rx_allocate_ring_element(struct lan743x_rx *rx, int index)
length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
descriptor = &rx->ring_cpu_ptr[index];
buffer_info = &rx->buffer_info[index];
- buffer_info->skb = __netdev_alloc_skb(rx->adapter->netdev,
- length,
- GFP_ATOMIC | GFP_DMA);
+ buffer_info->skb = skb;
if (!(buffer_info->skb))
return -ENOMEM;
buffer_info->dma_ptr = dma_map_single(&rx->adapter->pdev->dev,
@@ -2060,8 +2061,19 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
/* packet is available */
if (first_index == last_index) {
/* single buffer packet */
+ struct sk_buff *new_skb = NULL;
int packet_length;
+ new_skb = lan743x_rx_allocate_skb(rx);
+ if (!new_skb) {
+ /* failed to allocate next skb.
+ * Memory is very low.
+ * Drop this packet and reuse buffer.
+ */
+ lan743x_rx_reuse_ring_element(rx, first_index);
+ goto process_extension;
+ }
+
buffer_info = &rx->buffer_info[first_index];
skb = buffer_info->skb;
descriptor = &rx->ring_cpu_ptr[first_index];
@@ -2081,7 +2093,7 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
skb_put(skb, packet_length - 4);
skb->protocol = eth_type_trans(skb,
rx->adapter->netdev);
- lan743x_rx_allocate_ring_element(rx, first_index);
+ lan743x_rx_init_ring_element(rx, first_index, new_skb);
} else {
int index = first_index;
@@ -2094,26 +2106,23 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
if (first_index <= last_index) {
while ((index >= first_index) &&
(index <= last_index)) {
- lan743x_rx_release_ring_element(rx,
- index);
- lan743x_rx_allocate_ring_element(rx,
- index);
+ lan743x_rx_reuse_ring_element(rx,
+ index);
index = lan743x_rx_next_index(rx,
index);
}
} else {
while ((index >= first_index) ||
(index <= last_index)) {
- lan743x_rx_release_ring_element(rx,
- index);
- lan743x_rx_allocate_ring_element(rx,
- index);
+ lan743x_rx_reuse_ring_element(rx,
+ index);
index = lan743x_rx_next_index(rx,
index);
}
}
}
+process_extension:
if (extension_index >= 0) {
descriptor = &rx->ring_cpu_ptr[extension_index];
buffer_info = &rx->buffer_info[extension_index];
@@ -2290,7 +2299,9 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)
rx->last_head = 0;
for (index = 0; index < rx->ring_size; index++) {
- ret = lan743x_rx_allocate_ring_element(rx, index);
+ struct sk_buff *new_skb = lan743x_rx_allocate_skb(rx);
+
+ ret = lan743x_rx_init_ring_element(rx, index, new_skb);
if (ret)
goto cleanup;
}
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index a1d0d6e42533..d715ef4fc92f 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -613,7 +613,7 @@ static int ocelot_mact_mc_add(struct ocelot_port *port,
struct netdev_hw_addr *hw_addr)
{
struct ocelot *ocelot = port->ocelot;
- struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_KERNEL);
+ struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_ATOMIC);
if (!ha)
return -ENOMEM;
@@ -959,10 +959,8 @@ static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data)
ETH_GSTRING_LEN);
}
-static void ocelot_check_stats(struct work_struct *work)
+static void ocelot_update_stats(struct ocelot *ocelot)
{
- struct delayed_work *del_work = to_delayed_work(work);
- struct ocelot *ocelot = container_of(del_work, struct ocelot, stats_work);
int i, j;
mutex_lock(&ocelot->stats_lock);
@@ -986,11 +984,19 @@ static void ocelot_check_stats(struct work_struct *work)
}
}
- cancel_delayed_work(&ocelot->stats_work);
+ mutex_unlock(&ocelot->stats_lock);
+}
+
+static void ocelot_check_stats_work(struct work_struct *work)
+{
+ struct delayed_work *del_work = to_delayed_work(work);
+ struct ocelot *ocelot = container_of(del_work, struct ocelot,
+ stats_work);
+
+ ocelot_update_stats(ocelot);
+
queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
OCELOT_STATS_CHECK_DELAY);
-
- mutex_unlock(&ocelot->stats_lock);
}
static void ocelot_get_ethtool_stats(struct net_device *dev,
@@ -1001,7 +1007,7 @@ static void ocelot_get_ethtool_stats(struct net_device *dev,
int i;
/* check and update now */
- ocelot_check_stats(&ocelot->stats_work.work);
+ ocelot_update_stats(ocelot);
/* Copy all counters */
for (i = 0; i < ocelot->num_stats; i++)
@@ -1809,7 +1815,7 @@ int ocelot_init(struct ocelot *ocelot)
ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6),
ANA_CPUQ_8021_CFG, i);
- INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats);
+ INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work);
queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
OCELOT_STATS_CHECK_DELAY);
return 0;
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index e0340f778d8f..d8b7fba96d58 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -1439,7 +1439,6 @@ myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index)
tx->queue_active = 0;
put_be32(htonl(1), tx->send_stop);
mb();
- mmiowb();
}
__netif_tx_unlock(dev_queue);
}
@@ -2861,7 +2860,6 @@ again:
tx->queue_active = 1;
put_be32(htonl(1), tx->send_go);
mb();
- mmiowb();
}
tx->pkt_start++;
if ((avail - count) < MXGEFW_MAX_SEND_DESC) {
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index feda9644289d..3b2ae1a21678 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -4153,8 +4153,6 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
writeq(val64, &tx_fifo->List_Control);
- mmiowb();
-
put_off++;
if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
put_off = 0;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 7cde387e5ec6..51cd57ab3d95 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -2366,6 +2366,7 @@ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
dma_object->addr))) {
vxge_os_dma_free(devh->pdev, memblock,
&dma_object->acc_handle);
+ memblock = NULL;
goto exit;
}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index b877acec5cde..1d334f2e0a56 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -1826,7 +1826,6 @@ static int vxge_poll_msix(struct napi_struct *napi, int budget)
vxge_hw_channel_msix_unmask(
(struct __vxge_hw_channel *)ring->handle,
ring->rx_vector_no);
- mmiowb();
}
/* We are copying and returning the local variable, in case if after
@@ -2234,8 +2233,6 @@ static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle,
fifo->tx_vector_no);
- mmiowb();
-
return IRQ_HANDLED;
}
@@ -2272,14 +2269,12 @@ vxge_alarm_msix_handle(int irq, void *dev_id)
*/
vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id);
- mmiowb();
status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
vdev->exec_mode);
if (status == VXGE_HW_OK) {
vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
msix_id);
- mmiowb();
continue;
}
vxge_debug_intr(VXGE_ERR,
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
index 59e77e3086bb..709d20d9938f 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
@@ -1399,11 +1399,7 @@ static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
&fifo->nofl_db->control_0);
- mmiowb();
-
writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
-
- mmiowb();
}
/**
diff --git a/drivers/net/ethernet/netronome/nfp/abm/cls.c b/drivers/net/ethernet/netronome/nfp/abm/cls.c
index 9852080cf454..ff3913085665 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/cls.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/cls.c
@@ -39,7 +39,7 @@ nfp_abm_u32_check_knode(struct nfp_abm *abm, struct tc_cls_u32_knode *knode,
}
if (knode->sel->off || knode->sel->offshift || knode->sel->offmask ||
knode->sel->offoff || knode->fshift) {
- NL_SET_ERR_MSG_MOD(extack, "variable offseting not supported");
+ NL_SET_ERR_MSG_MOD(extack, "variable offsetting not supported");
return false;
}
if (knode->sel->hoff || knode->sel->hmask) {
@@ -78,7 +78,7 @@ nfp_abm_u32_check_knode(struct nfp_abm *abm, struct tc_cls_u32_knode *knode,
k = &knode->sel->keys[0];
if (k->offmask) {
- NL_SET_ERR_MSG_MOD(extack, "offset mask - variable offseting not supported");
+ NL_SET_ERR_MSG_MOD(extack, "offset mask - variable offsetting not supported");
return false;
}
if (k->off) {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index eeda4ed98333..e336f6ee94f5 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -48,8 +48,7 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
tmp_push_vlan_tci =
FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) |
- FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid) |
- NFP_FL_PUSH_VLAN_CFI;
+ FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid);
push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 4fcaf11ed56e..0ed51e79db00 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -26,7 +26,7 @@
#define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6)
#define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13)
-#define NFP_FLOWER_MASK_VLAN_CFI BIT(12)
+#define NFP_FLOWER_MASK_VLAN_PRESENT BIT(12)
#define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0)
#define NFP_FLOWER_MASK_MPLS_LB GENMASK(31, 12)
@@ -82,7 +82,6 @@
#define NFP_FL_OUT_FLAGS_TYPE_IDX GENMASK(2, 0)
#define NFP_FL_PUSH_VLAN_PRIO GENMASK(15, 13)
-#define NFP_FL_PUSH_VLAN_CFI BIT(12)
#define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0)
#define IPV6_FLOW_LABEL_MASK cpu_to_be32(0x000fffff)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index e03c8ef2c28c..9b8b843d0340 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -30,20 +30,19 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
flow_rule_match_vlan(rule, &match);
/* Populate the tci field. */
- if (match.key->vlan_id || match.key->vlan_priority) {
- tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
- match.key->vlan_priority) |
- FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
- match.key->vlan_id) |
- NFP_FLOWER_MASK_VLAN_CFI;
- ext->tci = cpu_to_be16(tmp_tci);
- tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
- match.mask->vlan_priority) |
- FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
- match.mask->vlan_id) |
- NFP_FLOWER_MASK_VLAN_CFI;
- msk->tci = cpu_to_be16(tmp_tci);
- }
+ tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
+ tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+ match.key->vlan_priority) |
+ FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+ match.key->vlan_id);
+ ext->tci = cpu_to_be16(tmp_tci);
+
+ tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
+ tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+ match.mask->vlan_priority) |
+ FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+ match.mask->vlan_id);
+ msk->tci = cpu_to_be16(tmp_tci);
}
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 166d7f71442e..372adea10e14 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -392,7 +392,7 @@
#define NFP_NET_CFG_MBOX_SIMPLE_CMD 0x0
#define NFP_NET_CFG_MBOX_SIMPLE_RET 0x4
#define NFP_NET_CFG_MBOX_SIMPLE_VAL 0x8
-#define NFP_NET_CFG_MBOX_SIMPLE_LEN 0x12
+#define NFP_NET_CFG_MBOX_SIMPLE_LEN 12
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD 1
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL 2
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index d2c803bb4e56..94d228c04496 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -195,7 +195,7 @@ static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
ret = dev_queue_xmit(skb);
nfp_repr_inc_tx_stats(netdev, len, ret);
- return ret;
+ return NETDEV_TX_OK;
}
static int nfp_repr_stop(struct net_device *netdev)
@@ -383,7 +383,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS;
- netdev->priv_flags |= IFF_NO_QUEUE;
+ netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
netdev->features |= NETIF_F_LLTX;
if (nfp_app_has_tc(app)) {
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 552d930e3940..528f6b4fd16a 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -27,7 +27,6 @@
#define DRV_VERSION "1.01"
const char pch_driver_version[] = DRV_VERSION;
-#define PCI_DEVICE_ID_INTEL_IOH1_GBE 0x8802 /* Pci device ID */
#define PCH_GBE_MAR_ENTRIES 16
#define PCH_GBE_SHORT_PKT 64
#define DSC_INIT16 0xC000
@@ -37,11 +36,9 @@ const char pch_driver_version[] = DRV_VERSION;
#define PCH_GBE_PCI_BAR 1
#define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */
-/* Macros for ML7223 */
-#define PCI_VENDOR_ID_ROHM 0x10db
-#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013
+#define PCI_DEVICE_ID_INTEL_IOH1_GBE 0x8802
-/* Macros for ML7831 */
+#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013
#define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802
#define PCH_GBE_TX_WEIGHT 64
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 43a57ec296fd..127c89b22ef0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -431,12 +431,16 @@ struct qed_qm_info {
u8 num_pf_rls;
};
+#define QED_OVERFLOW_BIT 1
+
struct qed_db_recovery_info {
struct list_head list;
/* Lock to protect the doorbell recovery mechanism list */
spinlock_t lock;
+ bool dorq_attn;
u32 db_recovery_counter;
+ unsigned long overflow;
};
struct storm_stats {
@@ -920,8 +924,7 @@ u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc);
/* doorbell recovery mechanism */
void qed_db_recovery_dp(struct qed_hwfn *p_hwfn);
-void qed_db_recovery_execute(struct qed_hwfn *p_hwfn,
- enum qed_db_rec_exec db_exec);
+void qed_db_recovery_execute(struct qed_hwfn *p_hwfn);
bool qed_edpm_enabled(struct qed_hwfn *p_hwfn);
/* Other Linux specific common definitions */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 9df8c4b3b54e..866cdc86a3f2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -102,11 +102,15 @@ static void qed_db_recovery_dp_entry(struct qed_hwfn *p_hwfn,
/* Doorbell address sanity (address within doorbell bar range) */
static bool qed_db_rec_sanity(struct qed_dev *cdev,
- void __iomem *db_addr, void *db_data)
+ void __iomem *db_addr,
+ enum qed_db_rec_width db_width,
+ void *db_data)
{
+ u32 width = (db_width == DB_REC_WIDTH_32B) ? 32 : 64;
+
/* Make sure doorbell address is within the doorbell bar */
if (db_addr < cdev->doorbells ||
- (u8 __iomem *)db_addr >
+ (u8 __iomem *)db_addr + width >
(u8 __iomem *)cdev->doorbells + cdev->db_size) {
WARN(true,
"Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n",
@@ -159,7 +163,7 @@ int qed_db_recovery_add(struct qed_dev *cdev,
}
/* Sanitize doorbell address */
- if (!qed_db_rec_sanity(cdev, db_addr, db_data))
+ if (!qed_db_rec_sanity(cdev, db_addr, db_width, db_data))
return -EINVAL;
/* Obtain hwfn from doorbell address */
@@ -205,10 +209,6 @@ int qed_db_recovery_del(struct qed_dev *cdev,
return 0;
}
- /* Sanitize doorbell address */
- if (!qed_db_rec_sanity(cdev, db_addr, db_data))
- return -EINVAL;
-
/* Obtain hwfn from doorbell address */
p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr);
@@ -300,31 +300,24 @@ void qed_db_recovery_dp(struct qed_hwfn *p_hwfn)
/* Ring the doorbell of a single doorbell recovery entry */
static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
- struct qed_db_recovery_entry *db_entry,
- enum qed_db_rec_exec db_exec)
-{
- if (db_exec != DB_REC_ONCE) {
- /* Print according to width */
- if (db_entry->db_width == DB_REC_WIDTH_32B) {
- DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
- "%s doorbell address %p data %x\n",
- db_exec == DB_REC_DRY_RUN ?
- "would have rung" : "ringing",
- db_entry->db_addr,
- *(u32 *)db_entry->db_data);
- } else {
- DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
- "%s doorbell address %p data %llx\n",
- db_exec == DB_REC_DRY_RUN ?
- "would have rung" : "ringing",
- db_entry->db_addr,
- *(u64 *)(db_entry->db_data));
- }
+ struct qed_db_recovery_entry *db_entry)
+{
+ /* Print according to width */
+ if (db_entry->db_width == DB_REC_WIDTH_32B) {
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "ringing doorbell address %p data %x\n",
+ db_entry->db_addr,
+ *(u32 *)db_entry->db_data);
+ } else {
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "ringing doorbell address %p data %llx\n",
+ db_entry->db_addr,
+ *(u64 *)(db_entry->db_data));
}
/* Sanity */
if (!qed_db_rec_sanity(p_hwfn->cdev, db_entry->db_addr,
- db_entry->db_data))
+ db_entry->db_width, db_entry->db_data))
return;
/* Flush the write combined buffer. Since there are multiple doorbelling
@@ -334,14 +327,12 @@ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
wmb();
/* Ring the doorbell */
- if (db_exec == DB_REC_REAL_DEAL || db_exec == DB_REC_ONCE) {
- if (db_entry->db_width == DB_REC_WIDTH_32B)
- DIRECT_REG_WR(db_entry->db_addr,
- *(u32 *)(db_entry->db_data));
- else
- DIRECT_REG_WR64(db_entry->db_addr,
- *(u64 *)(db_entry->db_data));
- }
+ if (db_entry->db_width == DB_REC_WIDTH_32B)
+ DIRECT_REG_WR(db_entry->db_addr,
+ *(u32 *)(db_entry->db_data));
+ else
+ DIRECT_REG_WR64(db_entry->db_addr,
+ *(u64 *)(db_entry->db_data));
/* Flush the write combined buffer. Next doorbell may come from a
* different entity to the same address...
@@ -350,29 +341,21 @@ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
}
/* Traverse the doorbell recovery entry list and ring all the doorbells */
-void qed_db_recovery_execute(struct qed_hwfn *p_hwfn,
- enum qed_db_rec_exec db_exec)
+void qed_db_recovery_execute(struct qed_hwfn *p_hwfn)
{
struct qed_db_recovery_entry *db_entry = NULL;
- if (db_exec != DB_REC_ONCE) {
- DP_NOTICE(p_hwfn,
- "Executing doorbell recovery. Counter was %d\n",
- p_hwfn->db_recovery_info.db_recovery_counter);
+ DP_NOTICE(p_hwfn, "Executing doorbell recovery. Counter was %d\n",
+ p_hwfn->db_recovery_info.db_recovery_counter);
- /* Track amount of times recovery was executed */
- p_hwfn->db_recovery_info.db_recovery_counter++;
- }
+ /* Track amount of times recovery was executed */
+ p_hwfn->db_recovery_info.db_recovery_counter++;
/* Protect the list */
spin_lock_bh(&p_hwfn->db_recovery_info.lock);
list_for_each_entry(db_entry,
- &p_hwfn->db_recovery_info.list, list_entry) {
- qed_db_recovery_ring(p_hwfn, db_entry, db_exec);
- if (db_exec == DB_REC_ONCE)
- break;
- }
-
+ &p_hwfn->db_recovery_info.list, list_entry)
+ qed_db_recovery_ring(p_hwfn, db_entry);
spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index e23980e301b6..fdfedbc8e431 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -378,6 +378,9 @@ static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn,
u32 count = QED_DB_REC_COUNT;
u32 usage = 1;
+ /* Flush any pending (e)dpms as they may never arrive */
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
+
/* wait for usage to zero or count to run out. This is necessary since
* EDPM doorbell transactions can take multiple 64b cycles, and as such
* can "split" over the pci. Possibly, the doorbell drop can happen with
@@ -406,51 +409,74 @@ static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn,
int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- u32 overflow;
+ u32 attn_ovfl, cur_ovfl;
int rc;
- overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
- DP_NOTICE(p_hwfn, "PF Overflow sticky 0x%x\n", overflow);
- if (!overflow) {
- qed_db_recovery_execute(p_hwfn, DB_REC_ONCE);
+ attn_ovfl = test_and_clear_bit(QED_OVERFLOW_BIT,
+ &p_hwfn->db_recovery_info.overflow);
+ cur_ovfl = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
+ if (!cur_ovfl && !attn_ovfl)
return 0;
- }
- if (qed_edpm_enabled(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "PF Overflow sticky: attn %u current %u\n",
+ attn_ovfl, cur_ovfl);
+
+ if (cur_ovfl && !p_hwfn->db_bar_no_edpm) {
rc = qed_db_rec_flush_queue(p_hwfn, p_ptt);
if (rc)
return rc;
}
- /* Flush any pending (e)dpm as they may never arrive */
- qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
-
/* Release overflow sticky indication (stop silently dropping everything) */
qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
/* Repeat all last doorbells (doorbell drop recovery) */
- qed_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL);
+ qed_db_recovery_execute(p_hwfn);
return 0;
}
-static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
+static void qed_dorq_attn_overflow(struct qed_hwfn *p_hwfn)
{
- u32 int_sts, first_drop_reason, details, address, all_drops_reason;
struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt;
+ u32 overflow;
int rc;
- int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
- DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts);
+ overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
+ if (!overflow)
+ goto out;
+
+ /* Run PF doorbell recovery in next periodic handler */
+ set_bit(QED_OVERFLOW_BIT, &p_hwfn->db_recovery_info.overflow);
+
+ if (!p_hwfn->db_bar_no_edpm) {
+ rc = qed_db_rec_flush_queue(p_hwfn, p_ptt);
+ if (rc)
+ goto out;
+ }
+
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
+out:
+ /* Schedule the handler even if overflow was not detected */
+ qed_periodic_db_rec_start(p_hwfn);
+}
+
+static int qed_dorq_attn_int_sts(struct qed_hwfn *p_hwfn)
+{
+ u32 int_sts, first_drop_reason, details, address, all_drops_reason;
+ struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt;
/* int_sts may be zero since all PFs were interrupted for doorbell
* overflow but another one already handled it. Can abort here. If
* This PF also requires overflow recovery we will be interrupted again.
* The masked almost full indication may also be set. Ignoring.
*/
+ int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL))
return 0;
+ DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts);
+
/* check if db_drop or overflow happened */
if (int_sts & (DORQ_REG_INT_STS_DB_DROP |
DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) {
@@ -477,11 +503,6 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4,
first_drop_reason, all_drops_reason);
- rc = qed_db_rec_handler(p_hwfn, p_ptt);
- qed_periodic_db_rec_start(p_hwfn);
- if (rc)
- return rc;
-
/* Clear the doorbell drop details and prepare for next drop */
qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0);
@@ -507,6 +528,25 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
return -EINVAL;
}
+static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
+{
+ p_hwfn->db_recovery_info.dorq_attn = true;
+ qed_dorq_attn_overflow(p_hwfn);
+
+ return qed_dorq_attn_int_sts(p_hwfn);
+}
+
+static void qed_dorq_attn_handler(struct qed_hwfn *p_hwfn)
+{
+ if (p_hwfn->db_recovery_info.dorq_attn)
+ goto out;
+
+ /* Call DORQ callback if the attention was missed */
+ qed_dorq_attn_cb(p_hwfn);
+out:
+ p_hwfn->db_recovery_info.dorq_attn = false;
+}
+
/* Instead of major changes to the data-structure, we have a some 'special'
* identifiers for sources that changed meaning between adapters.
*/
@@ -774,18 +814,12 @@ static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
{
u16 rc = 0, index;
- /* Make certain HW write took affect */
- mmiowb();
-
index = le16_to_cpu(p_sb_desc->sb_attn->sb_index);
if (p_sb_desc->index != index) {
p_sb_desc->index = index;
rc = QED_SB_ATT_IDX;
}
- /* Make certain we got a consistent view with HW */
- mmiowb();
-
return rc;
}
@@ -1080,6 +1114,9 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
}
}
+ /* Handle missed DORQ attention */
+ qed_dorq_attn_handler(p_hwfn);
+
/* Clear IGU indication for the deasserted bits */
DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
GTT_BAR0_MAP_REG_IGU_CMD +
@@ -1170,7 +1207,6 @@ static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn,
/* Both segments (interrupts & acks) are written to same place address;
* Need to guarantee all commands will be received (in-order) by HW.
*/
- mmiowb();
barrier();
}
@@ -1805,9 +1841,6 @@ static void qed_int_igu_enable_attn(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
- /* Flush the writes to IGU */
- mmiowb();
-
/* Unmask AEU signals toward IGU */
qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
}
@@ -1871,9 +1904,6 @@ static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
- /* Flush the write to IGU */
- mmiowb();
-
/* calculate where to read the status bit from */
sb_bit = 1 << (igu_sb_id % 32);
sb_bit_addr = igu_sb_id / 32 * sizeof(u32);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 1f356ed4f761..d473b522afc5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -192,8 +192,8 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev);
/**
* @brief - Doorbell Recovery handler.
- * Run DB_REAL_DEAL doorbell recovery in case of PF overflow
- * (and flush DORQ if needed), otherwise run DB_REC_ONCE.
+ * Run doorbell recovery in case of PF overflow (and flush DORQ if
+ * needed).
*
* @param p_hwfn
* @param p_ptt
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index f164d4acebcb..6de23b56b294 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -970,7 +970,7 @@ static void qed_update_pf_params(struct qed_dev *cdev,
}
}
-#define QED_PERIODIC_DB_REC_COUNT 100
+#define QED_PERIODIC_DB_REC_COUNT 10
#define QED_PERIODIC_DB_REC_INTERVAL_MS 100
#define QED_PERIODIC_DB_REC_INTERVAL \
msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 79b311b86f66..f5f3c03b9dd2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -341,9 +341,6 @@ void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
REG_WR16(p_hwfn, addr, prod);
-
- /* keep prod updates ordered */
- mmiowb();
}
int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 9faaa6df78ed..2f318aaf2b05 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -1591,7 +1591,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
} else {
DP_INFO(p_hwfn,
- "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
+ "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's fastpath HSI %02x.%02x\n",
vf->abs_vf_id,
req->vfdev_info.eth_fp_hsi_major,
req->vfdev_info.eth_fp_hsi_minor,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index c6238083e898..4555c0b161ef 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -1526,14 +1526,6 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
barrier();
writel(txq->tx_db.raw, txq->doorbell_addr);
- /* mmiowb is needed to synchronize doorbell writes from more than one
- * processor. It guarantees that the write arrives to the device before
- * the queue lock is released and another start_xmit is called (possibly
- * on another CPU). Without this barrier, the next doorbell can bypass
- * this doorbell. This is applicable to IA64/Altix systems.
- */
- mmiowb();
-
for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
if (qede_txq_has_work(txq))
break;
@@ -1663,8 +1655,11 @@ static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode)
/* Wait for loopback configuration to apply */
msleep_interruptible(500);
- /* prepare the loopback packet */
- pkt_size = edev->ndev->mtu + ETH_HLEN;
+ /* Setting max packet size to 1.5K to avoid data being split over
+ * multiple BDs in cases where MTU > PAGE_SIZE.
+ */
+ pkt_size = (((edev->ndev->mtu < ETH_DATA_LEN) ?
+ edev->ndev->mtu : ETH_DATA_LEN) + ETH_HLEN);
skb = netdev_alloc_skb(edev->ndev, pkt_size);
if (!skb) {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 31b046e24565..6f7e3622c6b4 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -580,14 +580,6 @@ void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
(u32 *)&rx_prods);
-
- /* mmiowb is needed to synchronize doorbell writes from more than one
- * processor. It guarantees that the write arrives to the device before
- * the napi lock is released and another qede_poll is called (possibly
- * on another CPU). Without this barrier, the next doorbell can bypass
- * this doorbell. This is applicable to IA64/Altix systems.
- */
- mmiowb();
}
static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 5f3f42a25361..bddb2b5982dc 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -490,18 +490,17 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);
if (IS_ERR(ptp->clock)) {
- rc = -EINVAL;
DP_ERR(edev, "PTP clock registration failed\n");
+ qede_ptp_disable(edev);
+ rc = -EINVAL;
goto err2;
}
return 0;
-err2:
- qede_ptp_disable(edev);
- ptp->clock = NULL;
err1:
kfree(ptp);
+err2:
edev->ptp = NULL;
return rc;
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 10b075bc5959..457444894d80 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -1858,7 +1858,6 @@ static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
wmb();
writel_relaxed(qdev->small_buf_q_producer_index,
&port_regs->CommonRegs.rxSmallQProducerIndex);
- mmiowb();
}
}
@@ -3886,6 +3885,12 @@ static int ql3xxx_probe(struct pci_dev *pdev,
netif_stop_queue(ndev);
qdev->workqueue = create_singlethread_workqueue(ndev->name);
+ if (!qdev->workqueue) {
+ unregister_netdev(ndev);
+ err = -ENOMEM;
+ goto err_out_iounmap;
+ }
+
INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 0c443ea98479..374a4d4371f9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -497,7 +497,7 @@ struct qlcnic_hardware_context {
u16 board_type;
u16 supported_type;
- u16 link_speed;
+ u32 link_speed;
u16 link_duplex;
u16 link_autoneg;
u16 module_type;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 3b0adda7cc9c..a4cd6f2cfb86 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1048,6 +1048,8 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
+ if (!skb)
+ break;
qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
skb_put(skb, QLCNIC_ILB_PKT_SIZE);
adapter->ahw->diag_cnt = 0;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 3e71b65a9546..ad7c5eb8a3b6 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -2181,7 +2181,6 @@ static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val)
static inline void ql_write_db_reg(u32 val, void __iomem *addr)
{
writel(val, addr);
- mmiowb();
}
/*
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 096515c27263..6cae33072496 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2695,7 +2695,6 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
wmb();
ql_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
- mmiowb();
netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
"tx queued, slot %d, len %d\n",
tx_ring->prod_idx, skb->len);
@@ -4681,6 +4680,11 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
*/
qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
ndev->name);
+ if (!qdev->workqueue) {
+ err = -ENOMEM;
+ goto err_out2;
+ }
+
INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 69d752f0b621..55d01266e615 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -258,6 +258,7 @@ static const struct pci_device_id rtl8139_pci_tbl[] = {
{0x126c, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
{0x1743, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
{0x021b, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x16ec, 0xab06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
#ifdef CONFIG_SH_SECUREEDGE5410
/* Bogus 8139 silicon reports 8129 without external PROM :-( */
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index cfb67b746595..58e0ca9093d3 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -482,7 +482,7 @@ static void hardware_init(struct net_device *dev)
write_reg_high(ioaddr, IMR, ISRh_RxErr);
lp->tx_unit_busy = 0;
- lp->pac_cnt_in_tx_buf = 0;
+ lp->pac_cnt_in_tx_buf = 0;
lp->saved_tx_size = 0;
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index c29dde064078..ed651dde6ef9 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -28,6 +28,7 @@
#include <linux/pm_runtime.h>
#include <linux/firmware.h>
#include <linux/prefetch.h>
+#include <linux/pci-aspm.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
@@ -678,6 +679,7 @@ struct rtl8169_private {
struct work_struct work;
} wk;
+ unsigned irq_enabled:1;
unsigned supports_gmii:1;
dma_addr_t counters_phys_addr;
struct rtl8169_counters *counters;
@@ -1293,6 +1295,7 @@ static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
static void rtl_irq_disable(struct rtl8169_private *tp)
{
RTL_W16(tp, IntrMask, 0);
+ tp->irq_enabled = 0;
}
#define RTL_EVENT_NAPI_RX (RxOK | RxErr)
@@ -1301,6 +1304,7 @@ static void rtl_irq_disable(struct rtl8169_private *tp)
static void rtl_irq_enable(struct rtl8169_private *tp)
{
+ tp->irq_enabled = 1;
RTL_W16(tp, IntrMask, tp->irq_mask);
}
@@ -5457,7 +5461,7 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
tp->cp_cmd |= PktCntrDisable | INTT_1;
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
- RTL_W16(tp, IntrMitigate, 0x5151);
+ RTL_W16(tp, IntrMitigate, 0x5100);
/* Work around for RxFIFO overflow. */
if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
@@ -6520,9 +6524,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
{
struct rtl8169_private *tp = dev_instance;
u16 status = RTL_R16(tp, IntrStatus);
- u16 irq_mask = RTL_R16(tp, IntrMask);
- if (status == 0xffff || !(status & irq_mask))
+ if (!tp->irq_enabled || status == 0xffff || !(status & tp->irq_mask))
return IRQ_NONE;
if (unlikely(status & SYSErr)) {
@@ -6540,7 +6543,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
}
- if (status & RTL_EVENT_NAPI) {
+ if (status & (RTL_EVENT_NAPI | LinkChg)) {
rtl_irq_disable(tp);
napi_schedule_irqoff(&tp->napi);
}
@@ -7350,6 +7353,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
+ /* Disable ASPM completely as that cause random device stop working
+ * problems as well as full system hangs for some PCIe devices users.
+ */
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
+
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pcim_enable_device(pdev);
if (rc < 0) {
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index d28c8f9ca55b..316b47741d3f 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -458,7 +458,7 @@ static int ravb_dmac_init(struct net_device *ndev)
RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR);
/* Set FIFO size */
- ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC);
+ ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC);
/* Timestamp enable */
ravb_write(ndev, TCCR_TFEN, TCCR);
@@ -728,7 +728,6 @@ static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id)
spin_lock(&priv->lock);
ravb_emac_interrupt_unlocked(ndev);
- mmiowb();
spin_unlock(&priv->lock);
return IRQ_HANDLED;
}
@@ -848,7 +847,6 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
result = IRQ_HANDLED;
}
- mmiowb();
spin_unlock(&priv->lock);
return result;
}
@@ -881,7 +879,6 @@ static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
result = IRQ_HANDLED;
}
- mmiowb();
spin_unlock(&priv->lock);
return result;
}
@@ -898,7 +895,6 @@ static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
if (ravb_queue_interrupt(ndev, q))
result = IRQ_HANDLED;
- mmiowb();
spin_unlock(&priv->lock);
return result;
}
@@ -943,7 +939,6 @@ static int ravb_poll(struct napi_struct *napi, int budget)
ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
ravb_tx_free(ndev, q, true);
netif_wake_subqueue(ndev, q);
- mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
}
}
@@ -959,7 +954,6 @@ static int ravb_poll(struct napi_struct *napi, int budget)
ravb_write(ndev, mask, RIE0);
ravb_write(ndev, mask, TIE);
}
- mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
/* Receive error message handling */
@@ -1008,7 +1002,6 @@ static void ravb_adjust_link(struct net_device *ndev)
if (priv->no_avb_link && phydev->link)
ravb_rcv_snd_enable(ndev);
- mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
if (new_state && netif_msg_link(priv))
@@ -1601,7 +1594,6 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
netif_stop_subqueue(ndev, q);
exit:
- mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
return NETDEV_TX_OK;
@@ -1673,7 +1665,6 @@ static void ravb_set_rx_mode(struct net_device *ndev)
spin_lock_irqsave(&priv->lock, flags);
ravb_modify(ndev, ECMR, ECMR_PRM,
ndev->flags & IFF_PROMISC ? ECMR_PRM : 0);
- mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
}
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
index dce2a40a31e3..9a42580693cb 100644
--- a/drivers/net/ethernet/renesas/ravb_ptp.c
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -196,7 +196,6 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp,
ravb_write(ndev, GIE_PTCS, GIE);
else
ravb_write(ndev, GID_PTCD, GID);
- mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
@@ -259,7 +258,6 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
else
ravb_write(ndev, GID_PTMD0, GID);
}
- mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
return error;
@@ -331,7 +329,6 @@ void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
spin_lock_irqsave(&priv->lock, flags);
ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
ravb_modify(ndev, GCCR, GCCR_TCSS, GCCR_TCSS_ADJGPTP);
- mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
priv->ptp.clock = ptp_clock_register(&priv->ptp.info, &pdev->dev);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 339b2eae2100..ed30aebdb941 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2010,7 +2010,6 @@ static void sh_eth_adjust_link(struct net_device *ndev)
if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link)
sh_eth_rcv_snd_enable(ndev);
- mmiowb();
spin_unlock_irqrestore(&mdp->lock, flags);
if (new_state && netif_msg_link(mdp))
@@ -3181,12 +3180,16 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
struct device_node *np = dev->of_node;
struct sh_eth_plat_data *pdata;
const char *mac_addr;
+ int ret;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
- pdata->phy_interface = of_get_phy_mode(np);
+ ret = of_get_phy_mode(np);
+ if (ret < 0)
+ return NULL;
+ pdata->phy_interface = ret;
mac_addr = of_get_mac_address(np);
if (mac_addr)
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index c883aa89b7ca..a71c900ca04f 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2805,6 +2805,11 @@ static int rocker_switchdev_event(struct notifier_block *unused,
memcpy(&switchdev_work->fdb_info, ptr,
sizeof(switchdev_work->fdb_info));
switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (unlikely(!switchdev_work->fdb_info.addr)) {
+ kfree(switchdev_work);
+ return NOTIFY_BAD;
+ }
+
ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
fdb_info->addr);
/* Take a reference on the rocker device */
diff --git a/drivers/net/ethernet/sfc/falcon/io.h b/drivers/net/ethernet/sfc/falcon/io.h
index 7085ee1d5e2b..c3577643fbda 100644
--- a/drivers/net/ethernet/sfc/falcon/io.h
+++ b/drivers/net/ethernet/sfc/falcon/io.h
@@ -108,7 +108,6 @@ static inline void ef4_writeo(struct ef4_nic *efx, const ef4_oword_t *value,
_ef4_writed(efx, value->u32[2], reg + 8);
_ef4_writed(efx, value->u32[3], reg + 12);
#endif
- mmiowb();
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
@@ -130,7 +129,6 @@ static inline void ef4_sram_writeq(struct ef4_nic *efx, void __iomem *membase,
__raw_writel((__force u32)value->u32[0], membase + addr);
__raw_writel((__force u32)value->u32[1], membase + addr + 4);
#endif
- mmiowb();
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 89563170af52..2774a10f44e9 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -120,7 +120,6 @@ static inline void efx_writeo(struct efx_nic *efx, const efx_oword_t *value,
_efx_writed(efx, value->u32[2], reg + 8);
_efx_writed(efx, value->u32[3], reg + 12);
#endif
- mmiowb();
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
@@ -142,7 +141,6 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
__raw_writel((__force u32)value->u32[0], membase + addr);
__raw_writel((__force u32)value->u32[1], membase + addr + 4);
#endif
- mmiowb();
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index c07fd594fe71..02b3962b0e63 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -251,7 +251,6 @@ enum PMConfigBits {
* use of mdelay() at _sc92031_reset.
* Functions prefixed with _sc92031_ must be called with the lock held;
* functions prefixed with sc92031_ must be called without the lock held.
- * Use mmiowb() before unlocking if the hardware was written to.
*/
/* Locking rules for the interrupt:
@@ -361,7 +360,6 @@ static void sc92031_disable_interrupts(struct net_device *dev)
/* stop interrupts */
iowrite32(0, port_base + IntrMask);
_sc92031_dummy_read(port_base);
- mmiowb();
/* wait for any concurrent interrupt/tasklet to finish */
synchronize_irq(priv->pdev->irq);
@@ -379,7 +377,6 @@ static void sc92031_enable_interrupts(struct net_device *dev)
wmb();
iowrite32(IntrBits, port_base + IntrMask);
- mmiowb();
}
static void _sc92031_disable_tx_rx(struct net_device *dev)
@@ -867,7 +864,6 @@ out:
rmb();
iowrite32(intr_mask, port_base + IntrMask);
- mmiowb();
spin_unlock(&priv->lock);
}
@@ -901,7 +897,6 @@ out_none:
rmb();
iowrite32(intr_mask, port_base + IntrMask);
- mmiowb();
return IRQ_NONE;
}
@@ -978,7 +973,6 @@ static netdev_tx_t sc92031_start_xmit(struct sk_buff *skb,
iowrite32(priv->tx_bufs_dma_addr + entry * TX_BUF_SIZE,
port_base + TxAddr0 + entry * 4);
iowrite32(tx_status, port_base + TxStatus0 + entry * 4);
- mmiowb();
if (priv->tx_head - priv->tx_tail >= NUM_TX_DESC)
netif_stop_queue(dev);
@@ -1024,7 +1018,6 @@ static int sc92031_open(struct net_device *dev)
spin_lock_bh(&priv->lock);
_sc92031_reset(dev);
- mmiowb();
spin_unlock_bh(&priv->lock);
sc92031_enable_interrupts(dev);
@@ -1060,7 +1053,6 @@ static int sc92031_stop(struct net_device *dev)
_sc92031_disable_tx_rx(dev);
_sc92031_tx_clear(dev);
- mmiowb();
spin_unlock_bh(&priv->lock);
@@ -1081,7 +1073,6 @@ static void sc92031_set_multicast_list(struct net_device *dev)
_sc92031_set_mar(dev);
_sc92031_set_rx_config(dev);
- mmiowb();
spin_unlock_bh(&priv->lock);
}
@@ -1098,7 +1089,6 @@ static void sc92031_tx_timeout(struct net_device *dev)
priv->tx_timeouts++;
_sc92031_reset(dev);
- mmiowb();
spin_unlock(&priv->lock);
@@ -1140,7 +1130,6 @@ sc92031_ethtool_get_link_ksettings(struct net_device *dev,
output_status = _sc92031_mii_read(port_base, MII_OutputStatus);
_sc92031_mii_scan(port_base);
- mmiowb();
spin_unlock_bh(&priv->lock);
@@ -1311,7 +1300,6 @@ static int sc92031_ethtool_set_wol(struct net_device *dev,
priv->pm_config = pm_config;
iowrite32(pm_config, port_base + PMConfig);
- mmiowb();
spin_unlock_bh(&priv->lock);
@@ -1337,7 +1325,6 @@ static int sc92031_ethtool_nway_reset(struct net_device *dev)
out:
_sc92031_mii_scan(port_base);
- mmiowb();
spin_unlock_bh(&priv->lock);
@@ -1530,7 +1517,6 @@ static int sc92031_suspend(struct pci_dev *pdev, pm_message_t state)
_sc92031_disable_tx_rx(dev);
_sc92031_tx_clear(dev);
- mmiowb();
spin_unlock_bh(&priv->lock);
@@ -1555,7 +1541,6 @@ static int sc92031_resume(struct pci_dev *pdev)
spin_lock_bh(&priv->lock);
_sc92031_reset(dev);
- mmiowb();
spin_unlock_bh(&priv->lock);
sc92031_enable_interrupts(dev);
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 6073387511f8..67f9bb6e941b 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -730,10 +730,10 @@ static u16 sis900_default_phy(struct net_device * net_dev)
status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
/* Link ON & Not select default PHY & not ghost PHY */
- if ((status & MII_STAT_LINK) && !default_phy &&
- (phy->phy_types != UNKNOWN))
- default_phy = phy;
- else {
+ if ((status & MII_STAT_LINK) && !default_phy &&
+ (phy->phy_types != UNKNOWN)) {
+ default_phy = phy;
+ } else {
status = mdio_read(net_dev, phy->phy_addr, MII_CONTROL);
mdio_write(net_dev, phy->phy_addr, MII_CONTROL,
status | MII_CNTL_AUTO | MII_CNTL_ISOLATE);
@@ -741,7 +741,7 @@ static u16 sis900_default_phy(struct net_device * net_dev)
phy_home = phy;
else if(phy->phy_types == LAN)
phy_lan = phy;
- }
+ }
}
if (!default_phy && phy_home)
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index a18149720aa2..cba5881b2746 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -673,7 +673,8 @@ static void netsec_process_tx(struct netsec_priv *priv)
}
static void *netsec_alloc_rx_data(struct netsec_priv *priv,
- dma_addr_t *dma_handle, u16 *desc_len)
+ dma_addr_t *dma_handle, u16 *desc_len,
+ bool napi)
{
size_t total_len = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
size_t payload_len = NETSEC_RX_BUF_SZ;
@@ -682,7 +683,7 @@ static void *netsec_alloc_rx_data(struct netsec_priv *priv,
total_len += SKB_DATA_ALIGN(payload_len + NETSEC_SKB_PAD);
- buf = napi_alloc_frag(total_len);
+ buf = napi ? napi_alloc_frag(total_len) : netdev_alloc_frag(total_len);
if (!buf)
return NULL;
@@ -765,7 +766,8 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
/* allocate a fresh buffer and map it to the hardware.
* This will eventually replace the old buffer in the hardware
*/
- buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len);
+ buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len,
+ true);
if (unlikely(!buf_addr))
break;
@@ -1069,7 +1071,8 @@ static int netsec_setup_rx_dring(struct netsec_priv *priv)
void *buf;
u16 len;
- buf = netsec_alloc_rx_data(priv, &dma_handle, &len);
+ buf = netsec_alloc_rx_data(priv, &dma_handle, &len,
+ false);
if (!buf) {
netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
goto err_out;
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
index 40d6356a7e73..3dfb07a78952 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -29,11 +29,13 @@
/* Specific functions used for Ring mode */
/* Enhanced descriptors */
-static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
+static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end,
+ int bfsize)
{
- p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
- << ERDES1_BUFFER2_SIZE_SHIFT)
- & ERDES1_BUFFER2_SIZE_MASK);
+ if (bfsize == BUF_SIZE_16KiB)
+ p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
+ << ERDES1_BUFFER2_SIZE_SHIFT)
+ & ERDES1_BUFFER2_SIZE_MASK);
if (end)
p->des1 |= cpu_to_le32(ERDES1_END_RING);
@@ -59,11 +61,15 @@ static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
}
/* Normal descriptors */
-static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end)
+static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize)
{
- p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1)
- << RDES1_BUFFER2_SIZE_SHIFT)
- & RDES1_BUFFER2_SIZE_MASK);
+ if (bfsize >= BUF_SIZE_2KiB) {
+ int bfsize2;
+
+ bfsize2 = min(bfsize - BUF_SIZE_2KiB + 1, BUF_SIZE_2KiB - 1);
+ p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT)
+ & RDES1_BUFFER2_SIZE_MASK);
+ }
if (end)
p->des1 |= cpu_to_le32(RDES1_END_RING);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index 7e2e79dedebf..21428537e231 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -25,9 +25,24 @@
#define SYSCFG_MCU_ETH_MASK BIT(23)
#define SYSCFG_MP1_ETH_MASK GENMASK(23, 16)
+#define SYSCFG_PMCCLRR_OFFSET 0x40
#define SYSCFG_PMCR_ETH_CLK_SEL BIT(16)
#define SYSCFG_PMCR_ETH_REF_CLK_SEL BIT(17)
+
+/* Ethernet PHY interface selection in register SYSCFG Configuration
+ *------------------------------------------
+ * src |BIT(23)| BIT(22)| BIT(21)|BIT(20)|
+ *------------------------------------------
+ * MII | 0 | 0 | 0 | 1 |
+ *------------------------------------------
+ * GMII | 0 | 0 | 0 | 0 |
+ *------------------------------------------
+ * RGMII | 0 | 0 | 1 | n/a |
+ *------------------------------------------
+ * RMII | 1 | 0 | 0 | n/a |
+ *------------------------------------------
+ */
#define SYSCFG_PMCR_ETH_SEL_MII BIT(20)
#define SYSCFG_PMCR_ETH_SEL_RGMII BIT(21)
#define SYSCFG_PMCR_ETH_SEL_RMII BIT(23)
@@ -35,14 +50,54 @@
#define SYSCFG_MCU_ETH_SEL_MII 0
#define SYSCFG_MCU_ETH_SEL_RMII 1
+/* STM32MP1 register definitions
+ *
+ * Below table summarizes the clock requirement and clock sources for
+ * supported phy interface modes.
+ * __________________________________________________________________________
+ *|PHY_MODE | Normal | PHY wo crystal| PHY wo crystal |No 125Mhz from PHY|
+ *| | | 25MHz | 50MHz | |
+ * ---------------------------------------------------------------------------
+ *| MII | - | eth-ck | n/a | n/a |
+ *| | | | | |
+ * ---------------------------------------------------------------------------
+ *| GMII | - | eth-ck | n/a | n/a |
+ *| | | | | |
+ * ---------------------------------------------------------------------------
+ *| RGMII | - | eth-ck | n/a | eth-ck (no pin) |
+ *| | | | | st,eth-clk-sel |
+ * ---------------------------------------------------------------------------
+ *| RMII | - | eth-ck | eth-ck | n/a |
+ *| | | | st,eth-ref-clk-sel | |
+ * ---------------------------------------------------------------------------
+ *
+ * BIT(17) : set this bit in RMII mode when you have PHY without crystal 50MHz
+ * BIT(16) : set this bit in GMII/RGMII PHY when you do not want use 125Mhz
+ * from PHY
+ *-----------------------------------------------------
+ * src | BIT(17) | BIT(16) |
+ *-----------------------------------------------------
+ * MII | n/a | n/a |
+ *-----------------------------------------------------
+ * GMII | n/a | st,eth-clk-sel |
+ *-----------------------------------------------------
+ * RGMII | n/a | st,eth-clk-sel |
+ *-----------------------------------------------------
+ * RMII | st,eth-ref-clk-sel | n/a |
+ *-----------------------------------------------------
+ *
+ */
+
struct stm32_dwmac {
struct clk *clk_tx;
struct clk *clk_rx;
struct clk *clk_eth_ck;
struct clk *clk_ethstp;
struct clk *syscfg_clk;
- bool int_phyclk; /* Clock from RCC to drive PHY */
- u32 mode_reg; /* MAC glue-logic mode register */
+ int eth_clk_sel_reg;
+ int eth_ref_clk_sel_reg;
+ int irq_pwr_wakeup;
+ u32 mode_reg; /* MAC glue-logic mode register */
struct regmap *regmap;
u32 speed;
const struct stm32_ops *ops;
@@ -102,7 +157,7 @@ static int stm32mp1_clk_prepare(struct stm32_dwmac *dwmac, bool prepare)
if (ret)
return ret;
- if (dwmac->int_phyclk) {
+ if (dwmac->clk_eth_ck) {
ret = clk_prepare_enable(dwmac->clk_eth_ck);
if (ret) {
clk_disable_unprepare(dwmac->syscfg_clk);
@@ -111,7 +166,7 @@ static int stm32mp1_clk_prepare(struct stm32_dwmac *dwmac, bool prepare)
}
} else {
clk_disable_unprepare(dwmac->syscfg_clk);
- if (dwmac->int_phyclk)
+ if (dwmac->clk_eth_ck)
clk_disable_unprepare(dwmac->clk_eth_ck);
}
return ret;
@@ -121,7 +176,7 @@ static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
{
struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
u32 reg = dwmac->mode_reg;
- int val;
+ int val, ret;
switch (plat_dat->interface) {
case PHY_INTERFACE_MODE_MII:
@@ -130,19 +185,22 @@ static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
break;
case PHY_INTERFACE_MODE_GMII:
val = SYSCFG_PMCR_ETH_SEL_GMII;
- if (dwmac->int_phyclk)
+ if (dwmac->eth_clk_sel_reg)
val |= SYSCFG_PMCR_ETH_CLK_SEL;
pr_debug("SYSCFG init : PHY_INTERFACE_MODE_GMII\n");
break;
case PHY_INTERFACE_MODE_RMII:
val = SYSCFG_PMCR_ETH_SEL_RMII;
- if (dwmac->int_phyclk)
+ if (dwmac->eth_ref_clk_sel_reg)
val |= SYSCFG_PMCR_ETH_REF_CLK_SEL;
pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RMII\n");
break;
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
val = SYSCFG_PMCR_ETH_SEL_RGMII;
- if (dwmac->int_phyclk)
+ if (dwmac->eth_clk_sel_reg)
val |= SYSCFG_PMCR_ETH_CLK_SEL;
pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RGMII\n");
break;
@@ -153,6 +211,11 @@ static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
return -EINVAL;
}
+ /* Need to update PMCCLRR (clear register) */
+ ret = regmap_write(dwmac->regmap, reg + SYSCFG_PMCCLRR_OFFSET,
+ dwmac->ops->syscfg_eth_mask);
+
+ /* Update PMCSETR (set register) */
return regmap_update_bits(dwmac->regmap, reg,
dwmac->ops->syscfg_eth_mask, val);
}
@@ -180,7 +243,7 @@ static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat)
}
return regmap_update_bits(dwmac->regmap, reg,
- dwmac->ops->syscfg_eth_mask, val);
+ dwmac->ops->syscfg_eth_mask, val << 23);
}
static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac)
@@ -232,24 +295,29 @@ static int stm32_dwmac_parse_data(struct stm32_dwmac *dwmac,
static int stm32mp1_parse_data(struct stm32_dwmac *dwmac,
struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node;
+ int err = 0;
- dwmac->int_phyclk = of_property_read_bool(np, "st,int-phyclk");
+ /* Gigabit Ethernet 125MHz clock selection. */
+ dwmac->eth_clk_sel_reg = of_property_read_bool(np, "st,eth-clk-sel");
- /* Check if internal clk from RCC selected */
- if (dwmac->int_phyclk) {
- /* Get ETH_CLK clocks */
- dwmac->clk_eth_ck = devm_clk_get(dev, "eth-ck");
- if (IS_ERR(dwmac->clk_eth_ck)) {
- dev_err(dev, "No ETH CK clock provided...\n");
- return PTR_ERR(dwmac->clk_eth_ck);
- }
+ /* Ethernet 50Mhz RMII clock selection */
+ dwmac->eth_ref_clk_sel_reg =
+ of_property_read_bool(np, "st,eth-ref-clk-sel");
+
+ /* Get ETH_CLK clocks */
+ dwmac->clk_eth_ck = devm_clk_get(dev, "eth-ck");
+ if (IS_ERR(dwmac->clk_eth_ck)) {
+ dev_warn(dev, "No phy clock provided...\n");
+ dwmac->clk_eth_ck = NULL;
}
/* Clock used for low power mode */
dwmac->clk_ethstp = devm_clk_get(dev, "ethstp");
if (IS_ERR(dwmac->clk_ethstp)) {
- dev_err(dev, "No ETH peripheral clock provided for CStop mode ...\n");
+ dev_err(dev,
+ "No ETH peripheral clock provided for CStop mode ...\n");
return PTR_ERR(dwmac->clk_ethstp);
}
@@ -260,7 +328,29 @@ static int stm32mp1_parse_data(struct stm32_dwmac *dwmac,
return PTR_ERR(dwmac->syscfg_clk);
}
- return 0;
+ /* Get IRQ information early to have an ability to ask for deferred
+ * probe if needed before we went too far with resource allocation.
+ */
+ dwmac->irq_pwr_wakeup = platform_get_irq_byname(pdev,
+ "stm32_pwr_wakeup");
+ if (dwmac->irq_pwr_wakeup == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ if (!dwmac->clk_eth_ck && dwmac->irq_pwr_wakeup >= 0) {
+ err = device_init_wakeup(&pdev->dev, true);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init wake up irq\n");
+ return err;
+ }
+ err = dev_pm_set_dedicated_wake_irq(&pdev->dev,
+ dwmac->irq_pwr_wakeup);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set wake up irq\n");
+ device_init_wakeup(&pdev->dev, false);
+ }
+ device_set_wakeup_enable(&pdev->dev, false);
+ }
+ return err;
}
static int stm32_dwmac_probe(struct platform_device *pdev)
@@ -326,9 +416,15 @@ static int stm32_dwmac_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct stmmac_priv *priv = netdev_priv(ndev);
int ret = stmmac_dvr_remove(&pdev->dev);
+ struct stm32_dwmac *dwmac = priv->plat->bsp_priv;
stm32_dwmac_clk_disable(priv->plat->bsp_priv);
+ if (dwmac->irq_pwr_wakeup >= 0) {
+ dev_pm_clear_wake_irq(&pdev->dev);
+ device_init_wakeup(&pdev->dev, false);
+ }
+
return ret;
}
@@ -342,7 +438,7 @@ static int stm32mp1_suspend(struct stm32_dwmac *dwmac)
clk_disable_unprepare(dwmac->clk_tx);
clk_disable_unprepare(dwmac->syscfg_clk);
- if (dwmac->int_phyclk)
+ if (dwmac->clk_eth_ck)
clk_disable_unprepare(dwmac->clk_eth_ck);
return ret;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 0f660af01a4b..195669f550f0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -1147,7 +1147,10 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
return ret;
}
- plat_dat->interface = of_get_phy_mode(dev->of_node);
+ ret = of_get_phy_mode(dev->of_node);
+ if (ret < 0)
+ return -EINVAL;
+ plat_dat->interface = ret;
/* platform data specifying hardware features and callbacks.
* hardware features were copied from Allwinner drivers.
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 7fbb6a4dbf51..e061e9f5fad7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -296,7 +296,7 @@ exit:
}
static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
- int mode, int end)
+ int mode, int end, int bfsize)
{
dwmac4_set_rx_owner(p, disable_rx_ic);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index 1d858fdec997..98fa471da7c0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -123,7 +123,7 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc,
}
static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
- int mode, int end)
+ int mode, int end, int bfsize)
{
dwxgmac2_set_rx_owner(p, disable_rx_ic);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 5ef91a790f9d..5202d6ad7919 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -201,6 +201,11 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(rdes0 & RDES0_OWN))
return dma_own;
+ if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
+ stats->rx_length_errors++;
+ return discard_frame;
+ }
+
if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
x->rx_desc++;
@@ -231,9 +236,10 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
* It doesn't match with the information reported into the databook.
* At any rate, we need to understand if the CSUM hw computation is ok
* and report this info to the upper layers. */
- ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
- !!(rdes0 & RDES0_FRAME_TYPE),
- !!(rdes0 & ERDES0_RX_MAC_ADDR));
+ if (likely(ret == good_frame))
+ ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
+ !!(rdes0 & RDES0_FRAME_TYPE),
+ !!(rdes0 & ERDES0_RX_MAC_ADDR));
if (unlikely(rdes0 & RDES0_DRIBBLING))
x->dribbling_bit++;
@@ -259,15 +265,19 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
}
static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
- int mode, int end)
+ int mode, int end, int bfsize)
{
+ int bfsize1;
+
p->des0 |= cpu_to_le32(RDES0_OWN);
- p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK);
+
+ bfsize1 = min(bfsize, BUF_SIZE_8KiB);
+ p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK);
if (mode == STMMAC_CHAIN_MODE)
ehn_desc_rx_set_on_chain(p);
else
- ehn_desc_rx_set_on_ring(p, end);
+ ehn_desc_rx_set_on_ring(p, end, bfsize);
if (disable_rx_ic)
p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 92b8944f26e3..5bb00234d961 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -33,7 +33,7 @@ struct dma_extended_desc;
struct stmmac_desc_ops {
/* DMA RX descriptor ring initialization */
void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode,
- int end);
+ int end, int bfsize);
/* DMA TX descriptor ring initialization */
void (*init_tx_desc)(struct dma_desc *p, int mode, int end);
/* Invoked by the xmit function to prepare the tx descriptor */
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index de65bb29feba..6d690678c20e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -91,8 +91,6 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
return dma_own;
if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
- pr_warn("%s: Oversized frame spanned multiple buffers\n",
- __func__);
stats->rx_length_errors++;
return discard_frame;
}
@@ -135,15 +133,19 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
}
static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
- int end)
+ int end, int bfsize)
{
+ int bfsize1;
+
p->des0 |= cpu_to_le32(RDES0_OWN);
- p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK);
+
+ bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1);
+ p->des1 |= cpu_to_le32(bfsize1 & RDES1_BUFFER1_SIZE_MASK);
if (mode == STMMAC_CHAIN_MODE)
ndesc_rx_set_on_chain(p, end);
else
- ndesc_rx_set_on_ring(p, end);
+ ndesc_rx_set_on_ring(p, end, bfsize);
if (disable_rx_ic)
p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index d8c5bc412219..4d9bcb4d0378 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -59,7 +59,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
- STMMAC_RING_MODE, 1, false, skb->len);
+ STMMAC_RING_MODE, 0, false, skb->len);
tx_q->tx_skbuff[entry] = NULL;
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
@@ -79,7 +79,8 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
stmmac_prepare_tx_desc(priv, desc, 0, len, csum,
- STMMAC_RING_MODE, 1, true, skb->len);
+ STMMAC_RING_MODE, 1, !skb_is_nonlinear(skb),
+ skb->len);
} else {
des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
@@ -91,7 +92,8 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
tx_q->tx_skbuff_dma[entry].is_jumbo = true;
desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum,
- STMMAC_RING_MODE, 1, true, skb->len);
+ STMMAC_RING_MODE, 0, !skb_is_nonlinear(skb),
+ skb->len);
}
tx_q->cur_tx = entry;
@@ -111,10 +113,11 @@ static unsigned int is_jumbo_frm(int len, int enh_desc)
static void refill_desc3(void *priv_ptr, struct dma_desc *p)
{
- struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
+ struct stmmac_rx_queue *rx_q = priv_ptr;
+ struct stmmac_priv *priv = rx_q->priv_data;
/* Fill DES3 in case of RING mode */
- if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
+ if (priv->dma_buf_sz == BUF_SIZE_16KiB)
p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index e2a13ec2e30b..48712437d0da 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -480,7 +480,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
struct dma_desc *p, struct sk_buff *skb)
{
struct skb_shared_hwtstamps shhwtstamp;
- u64 ns;
+ u64 ns = 0;
if (!priv->hwts_tx_en)
return;
@@ -519,7 +519,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
{
struct skb_shared_hwtstamps *shhwtstamp = NULL;
struct dma_desc *desc = p;
- u64 ns;
+ u64 ns = 0;
if (!priv->hwts_rx_en)
return;
@@ -564,8 +564,8 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
u32 snap_type_sel = 0;
u32 ts_master_en = 0;
u32 ts_event_en = 0;
+ u32 sec_inc = 0;
u32 value = 0;
- u32 sec_inc;
bool xmac;
xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
@@ -1136,11 +1136,13 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
if (priv->extend_desc)
stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
priv->use_riwt, priv->mode,
- (i == DMA_RX_SIZE - 1));
+ (i == DMA_RX_SIZE - 1),
+ priv->dma_buf_sz);
else
stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
priv->use_riwt, priv->mode,
- (i == DMA_RX_SIZE - 1));
+ (i == DMA_RX_SIZE - 1),
+ priv->dma_buf_sz);
}
/**
@@ -2614,8 +2616,6 @@ static int stmmac_open(struct net_device *dev)
u32 chan;
int ret;
- stmmac_check_ether_addr(priv);
-
if (priv->hw->pcs != STMMAC_PCS_RGMII &&
priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI) {
@@ -3216,14 +3216,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
csum_insertion, priv->mode, 1, last_segment,
skb->len);
-
- /* The own bit must be the latest setting done when prepare the
- * descriptor and then barrier is needed to make sure that
- * all is coherent before granting the DMA engine.
- */
- wmb();
+ } else {
+ stmmac_set_tx_owner(priv, first);
}
+ /* The own bit must be the latest setting done when prepare the
+ * descriptor and then barrier is needed to make sure that
+ * all is coherent before granting the DMA engine.
+ */
+ wmb();
+
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
stmmac_enable_dma_transmission(priv, priv->ioaddr);
@@ -3350,9 +3352,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
- unsigned int entry = rx_q->cur_rx;
+ unsigned int next_entry = rx_q->cur_rx;
int coe = priv->hw->rx_csum;
- unsigned int next_entry;
unsigned int count = 0;
bool xmac;
@@ -3370,10 +3371,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
}
while (count < limit) {
- int status;
+ int entry, status;
struct dma_desc *p;
struct dma_desc *np;
+ entry = next_entry;
+
if (priv->extend_desc)
p = (struct dma_desc *)(rx_q->dma_erx + entry);
else
@@ -3429,11 +3432,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
* ignored
*/
if (frame_len > priv->dma_buf_sz) {
- netdev_err(priv->dev,
- "len %d larger than size (%d)\n",
- frame_len, priv->dma_buf_sz);
+ if (net_ratelimit())
+ netdev_err(priv->dev,
+ "len %d larger than size (%d)\n",
+ frame_len, priv->dma_buf_sz);
priv->dev->stats.rx_length_errors++;
- break;
+ continue;
}
/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
@@ -3468,7 +3472,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
dev_warn(priv->device,
"packet dropped\n");
priv->dev->stats.rx_dropped++;
- break;
+ continue;
}
dma_sync_single_for_cpu(priv->device,
@@ -3488,11 +3492,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
} else {
skb = rx_q->rx_skbuff[entry];
if (unlikely(!skb)) {
- netdev_err(priv->dev,
- "%s: Inconsistent Rx chain\n",
- priv->dev->name);
+ if (net_ratelimit())
+ netdev_err(priv->dev,
+ "%s: Inconsistent Rx chain\n",
+ priv->dev->name);
priv->dev->stats.rx_dropped++;
- break;
+ continue;
}
prefetch(skb->data - NET_IP_ALIGN);
rx_q->rx_skbuff[entry] = NULL;
@@ -3527,7 +3532,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += frame_len;
}
- entry = next_entry;
}
stmmac_rx_refill(priv, queue);
@@ -4297,6 +4301,8 @@ int stmmac_dvr_probe(struct device *device,
if (ret)
goto error_hw_init;
+ stmmac_check_ether_addr(priv);
+
/* Configure real RX and TX queues */
netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index d819e8eaba12..26db6aa002d1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -159,6 +159,12 @@ static const struct dmi_system_id quark_pci_dmi[] = {
},
.driver_data = (void *)&galileo_stmmac_dmi_data,
},
+ /*
+ * There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040.
+ * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
+ * has only one pci network device while other asset tags are
+ * for IOT2040 which has two.
+ */
{
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
@@ -170,8 +176,6 @@ static const struct dmi_system_id quark_pci_dmi[] = {
{
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
- DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
- "6ES7647-0AA00-1YA2"),
},
.driver_data = (void *)&iot2040_stmmac_dmi_data,
},
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 2b800ce1d5bf..3031f2bf15d6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -408,6 +408,9 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
/* Default to phy auto-detection */
plat->phy_addr = -1;
+ /* Get clk_csr from device tree */
+ of_property_read_u32(np, "clk_csr", &plat->clk_csr);
+
/* "snps,phy-addr" is not a standard property. Mark it as deprecated
* and warn of its use. Remove this when phy node support is added.
*/
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 2293e21f789f..cc60b3fb0892 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -105,7 +105,7 @@ static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
struct stmmac_priv *priv =
container_of(ptp, struct stmmac_priv, ptp_clock_ops);
unsigned long flags;
- u64 ns;
+ u64 ns = 0;
spin_lock_irqsave(&priv->ptp_lock, flags);
stmmac_get_systime(priv, priv->ptpaddr, &ns);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index d84501441edd..6f99437a6962 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -7464,6 +7464,7 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
class = CLASS_CODE_USER_PROG4;
break;
default:
+ class = CLASS_CODE_UNRECOG;
break;
}
ret = tcam_user_ip_class_set(np, class, 0,
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 840820402cd0..57450b174fc4 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -2029,7 +2029,6 @@ static const struct dev_pm_ops davinci_emac_pm_ops = {
.resume = davinci_emac_resume,
};
-#if IS_ENABLED(CONFIG_OF)
static const struct emac_platform_data am3517_emac_data = {
.version = EMAC_VERSION_2,
.hw_ram_addr = 0x01e20000,
@@ -2046,14 +2045,13 @@ static const struct of_device_id davinci_emac_of_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, davinci_emac_of_match);
-#endif
/* davinci_emac_driver: EMAC platform driver structure */
static struct platform_driver davinci_emac_driver = {
.driver = {
.name = "davinci_emac",
.pm = &davinci_emac_pm_ops,
- .of_match_table = of_match_ptr(davinci_emac_of_match),
+ .of_match_table = davinci_emac_of_match,
},
.probe = davinci_emac_probe,
.remove = davinci_emac_remove,
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 5174d318901e..0a920c5936b2 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -3657,12 +3657,16 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
- if (ret)
+ if (ret) {
+ of_node_put(interfaces);
return ret;
+ }
ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
- if (ret)
+ if (ret) {
+ of_node_put(interfaces);
return ret;
+ }
/* Create network interfaces */
INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 33949248c829..ab55416a10fa 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -571,7 +571,6 @@ static void rhine_ack_events(struct rhine_private *rp, u32 mask)
if (rp->quirks & rqStatusWBRace)
iowrite8(mask >> 16, ioaddr + IntrStatus2);
iowrite16(mask, ioaddr + IntrStatus);
- mmiowb();
}
/*
@@ -863,7 +862,6 @@ static int rhine_napipoll(struct napi_struct *napi, int budget)
if (work_done < budget) {
napi_complete_done(napi, work_done);
iowrite16(enable_mask, ioaddr + IntrEnable);
- mmiowb();
}
return work_done;
}
@@ -1893,7 +1891,6 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
static void rhine_irq_disable(struct rhine_private *rp)
{
iowrite16(0x0000, rp->base + IntrEnable);
- mmiowb();
}
/* The interrupt handler does all of the Rx thread work and cleans up
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index d8ba512f166a..1713c2d2dccf 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -219,7 +219,6 @@ static inline int __w5100_write_direct(struct net_device *ndev, u32 addr,
static inline int w5100_write_direct(struct net_device *ndev, u32 addr, u8 data)
{
__w5100_write_direct(ndev, addr, data);
- mmiowb();
return 0;
}
@@ -236,7 +235,6 @@ static int w5100_write16_direct(struct net_device *ndev, u32 addr, u16 data)
{
__w5100_write_direct(ndev, addr, data >> 8);
__w5100_write_direct(ndev, addr + 1, data);
- mmiowb();
return 0;
}
@@ -260,8 +258,6 @@ static int w5100_writebulk_direct(struct net_device *ndev, u32 addr,
for (i = 0; i < len; i++, addr++)
__w5100_write_direct(ndev, addr, *buf++);
- mmiowb();
-
return 0;
}
@@ -375,7 +371,6 @@ static int w5100_readbulk_indirect(struct net_device *ndev, u32 addr, u8 *buf,
for (i = 0; i < len; i++)
*buf++ = w5100_read_direct(ndev, W5100_IDM_DR);
- mmiowb();
spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
return 0;
@@ -394,7 +389,6 @@ static int w5100_writebulk_indirect(struct net_device *ndev, u32 addr,
for (i = 0; i < len; i++)
__w5100_write_direct(ndev, W5100_IDM_DR, *buf++);
- mmiowb();
spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
return 0;
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index f9da5d6172e3..3f03eecc0479 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -141,7 +141,6 @@ static u16 w5300_read_indirect(struct w5300_priv *priv, u16 addr)
spin_lock_irqsave(&priv->reg_lock, flags);
w5300_write_direct(priv, W5300_IDM_AR, addr);
- mmiowb();
data = w5300_read_direct(priv, W5300_IDM_DR);
spin_unlock_irqrestore(&priv->reg_lock, flags);
@@ -154,9 +153,7 @@ static void w5300_write_indirect(struct w5300_priv *priv, u16 addr, u16 data)
spin_lock_irqsave(&priv->reg_lock, flags);
w5300_write_direct(priv, W5300_IDM_AR, addr);
- mmiowb();
w5300_write_direct(priv, W5300_IDM_DR, data);
- mmiowb();
spin_unlock_irqrestore(&priv->reg_lock, flags);
}
@@ -192,7 +189,6 @@ static int w5300_command(struct w5300_priv *priv, u16 cmd)
unsigned long timeout = jiffies + msecs_to_jiffies(100);
w5300_write(priv, W5300_S0_CR, cmd);
- mmiowb();
while (w5300_read(priv, W5300_S0_CR) != 0) {
if (time_after(jiffies, timeout))
@@ -241,18 +237,15 @@ static void w5300_write_macaddr(struct w5300_priv *priv)
w5300_write(priv, W5300_SHARH,
ndev->dev_addr[4] << 8 |
ndev->dev_addr[5]);
- mmiowb();
}
static void w5300_hw_reset(struct w5300_priv *priv)
{
w5300_write_direct(priv, W5300_MR, MR_RST);
- mmiowb();
mdelay(5);
w5300_write_direct(priv, W5300_MR, priv->indirect ?
MR_WDF(7) | MR_PB | MR_IND :
MR_WDF(7) | MR_PB);
- mmiowb();
w5300_write(priv, W5300_IMR, 0);
w5300_write_macaddr(priv);
@@ -264,24 +257,20 @@ static void w5300_hw_reset(struct w5300_priv *priv)
w5300_write32(priv, W5300_TMSRL, 64 << 24);
w5300_write32(priv, W5300_TMSRH, 0);
w5300_write(priv, W5300_MTYPE, 0x00ff);
- mmiowb();
}
static void w5300_hw_start(struct w5300_priv *priv)
{
w5300_write(priv, W5300_S0_MR, priv->promisc ?
S0_MR_MACRAW : S0_MR_MACRAW_MF);
- mmiowb();
w5300_command(priv, S0_CR_OPEN);
w5300_write(priv, W5300_S0_IMR, S0_IR_RECV | S0_IR_SENDOK);
w5300_write(priv, W5300_IMR, IR_S0);
- mmiowb();
}
static void w5300_hw_close(struct w5300_priv *priv)
{
w5300_write(priv, W5300_IMR, 0);
- mmiowb();
w5300_command(priv, S0_CR_CLOSE);
}
@@ -372,7 +361,6 @@ static netdev_tx_t w5300_start_tx(struct sk_buff *skb, struct net_device *ndev)
netif_stop_queue(ndev);
w5300_write_frame(priv, skb->data, skb->len);
- mmiowb();
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += skb->len;
dev_kfree_skb(skb);
@@ -419,7 +407,6 @@ static int w5300_napi_poll(struct napi_struct *napi, int budget)
if (rx_count < budget) {
napi_complete_done(napi, rx_count);
w5300_write(priv, W5300_IMR, IR_S0);
- mmiowb();
}
return rx_count;
@@ -434,7 +421,6 @@ static irqreturn_t w5300_interrupt(int irq, void *ndev_instance)
if (!ir)
return IRQ_NONE;
w5300_write(priv, W5300_S0_IR, ir);
- mmiowb();
if (ir & S0_IR_SENDOK) {
netif_dbg(priv, tx_done, ndev, "tx done\n");
@@ -444,7 +430,6 @@ static irqreturn_t w5300_interrupt(int irq, void *ndev_instance)
if (ir & S0_IR_RECV) {
if (napi_schedule_prep(&priv->napi)) {
w5300_write(priv, W5300_IMR, 0);
- mmiowb();
__napi_schedule(&priv->napi);
}
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index ec7e7ec24ff9..4041c75997ba 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1575,12 +1575,14 @@ static int axienet_probe(struct platform_device *pdev)
ret = of_address_to_resource(np, 0, &dmares);
if (ret) {
dev_err(&pdev->dev, "unable to get DMA resource\n");
+ of_node_put(np);
goto free_netdev;
}
lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
if (IS_ERR(lp->dma_regs)) {
dev_err(&pdev->dev, "could not map DMA regs\n");
ret = PTR_ERR(lp->dma_regs);
+ of_node_put(np);
goto free_netdev;
}
lp->rx_irq = irq_of_parse_and_map(np, 1);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index e859ae2e42d5..49f41b64077b 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -987,6 +987,7 @@ struct netvsc_device {
wait_queue_head_t wait_drain;
bool destroy;
+ bool tx_disable; /* if true, do not wake up queue again */
/* Receive buffer allocated by us but manages by NetVSP */
void *recv_buf;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 813d195bbd57..e0dce373cdd9 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -110,6 +110,7 @@ static struct netvsc_device *alloc_net_device(void)
init_waitqueue_head(&net_device->wait_drain);
net_device->destroy = false;
+ net_device->tx_disable = false;
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
@@ -719,7 +720,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
} else {
struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
- if (netif_tx_queue_stopped(txq) &&
+ if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
(hv_get_avail_to_write_percent(&channel->outbound) >
RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
netif_tx_wake_queue(txq);
@@ -874,7 +875,8 @@ static inline int netvsc_send_pkt(
} else if (ret == -EAGAIN) {
netif_tx_stop_queue(txq);
ndev_ctx->eth_stats.stop_queue++;
- if (atomic_read(&nvchan->queue_sends) < 1) {
+ if (atomic_read(&nvchan->queue_sends) < 1 &&
+ !net_device->tx_disable) {
netif_tx_wake_queue(txq);
ndev_ctx->eth_stats.wake_queue++;
ret = -ENOSPC;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index cf4897043e83..b20fb0fb595b 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -109,6 +109,15 @@ static void netvsc_set_rx_mode(struct net_device *net)
rcu_read_unlock();
}
+static void netvsc_tx_enable(struct netvsc_device *nvscdev,
+ struct net_device *ndev)
+{
+ nvscdev->tx_disable = false;
+ virt_wmb(); /* ensure queue wake up mechanism is on */
+
+ netif_tx_wake_all_queues(ndev);
+}
+
static int netvsc_open(struct net_device *net)
{
struct net_device_context *ndev_ctx = netdev_priv(net);
@@ -129,7 +138,7 @@ static int netvsc_open(struct net_device *net)
rdev = nvdev->extension;
if (!rdev->link_state) {
netif_carrier_on(net);
- netif_tx_wake_all_queues(net);
+ netvsc_tx_enable(nvdev, net);
}
if (vf_netdev) {
@@ -184,6 +193,17 @@ static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
}
}
+static void netvsc_tx_disable(struct netvsc_device *nvscdev,
+ struct net_device *ndev)
+{
+ if (nvscdev) {
+ nvscdev->tx_disable = true;
+ virt_wmb(); /* ensure txq will not wake up after stop */
+ }
+
+ netif_tx_disable(ndev);
+}
+
static int netvsc_close(struct net_device *net)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
@@ -192,7 +212,7 @@ static int netvsc_close(struct net_device *net)
struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
int ret;
- netif_tx_disable(net);
+ netvsc_tx_disable(nvdev, net);
/* No need to close rndis filter if it is removed already */
if (!nvdev)
@@ -920,7 +940,7 @@ static int netvsc_detach(struct net_device *ndev,
/* If device was up (receiving) then shutdown */
if (netif_running(ndev)) {
- netif_tx_disable(ndev);
+ netvsc_tx_disable(nvdev, ndev);
ret = rndis_filter_close(nvdev);
if (ret) {
@@ -1908,7 +1928,7 @@ static void netvsc_link_change(struct work_struct *w)
if (rdev->link_state) {
rdev->link_state = false;
netif_carrier_on(net);
- netif_tx_wake_all_queues(net);
+ netvsc_tx_enable(net_device, net);
} else {
notify = true;
}
@@ -1918,7 +1938,7 @@ static void netvsc_link_change(struct work_struct *w)
if (!rdev->link_state) {
rdev->link_state = true;
netif_carrier_off(net);
- netif_tx_stop_all_queues(net);
+ netvsc_tx_disable(net_device, net);
}
kfree(event);
break;
@@ -1927,7 +1947,7 @@ static void netvsc_link_change(struct work_struct *w)
if (!rdev->link_state) {
rdev->link_state = true;
netif_carrier_off(net);
- netif_tx_stop_all_queues(net);
+ netvsc_tx_disable(net_device, net);
event->event = RNDIS_STATUS_MEDIA_CONNECT;
spin_lock_irqsave(&ndev_ctx->lock, flags);
list_add(&event->list, &ndev_ctx->reconfig_events);
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index cd1d8faccca5..cd6b95e673a5 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -1268,6 +1268,10 @@ static int adf7242_probe(struct spi_device *spi)
INIT_DELAYED_WORK(&lp->work, adf7242_rx_cal_work);
lp->wqueue = alloc_ordered_workqueue(dev_name(&spi->dev),
WQ_MEM_RECLAIM);
+ if (unlikely(!lp->wqueue)) {
+ ret = -ENOMEM;
+ goto err_hw_init;
+ }
ret = adf7242_hw_init(lp);
if (ret)
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index b6743f03dce0..3b88846de31b 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -324,7 +324,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
goto out_err;
}
- genlmsg_reply(skb, info);
+ res = genlmsg_reply(skb, info);
break;
}
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
index c589f5ae75bb..8bb53ec8d9cf 100644
--- a/drivers/net/ieee802154/mcr20a.c
+++ b/drivers/net/ieee802154/mcr20a.c
@@ -533,6 +533,8 @@ mcr20a_start(struct ieee802154_hw *hw)
dev_dbg(printdev(lp), "no slotted operation\n");
ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
DAR_PHY_CTRL1_SLOTTED, 0x0);
+ if (ret < 0)
+ return ret;
/* enable irq */
enable_irq(lp->spi->irq);
@@ -540,11 +542,15 @@ mcr20a_start(struct ieee802154_hw *hw)
/* Unmask SEQ interrupt */
ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL2,
DAR_PHY_CTRL2_SEQMSK, 0x0);
+ if (ret < 0)
+ return ret;
/* Start the RX sequence */
dev_dbg(printdev(lp), "start the RX sequence\n");
ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX);
+ if (ret < 0)
+ return ret;
return 0;
}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 071869db44cf..520657945b82 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -7,6 +7,8 @@ menuconfig MDIO_DEVICE
help
MDIO devices and driver infrastructure code.
+if MDIO_DEVICE
+
config MDIO_BUS
tristate
default m if PHYLIB=m
@@ -179,6 +181,7 @@ config MDIO_XGENE
APM X-Gene SoC's.
endif
+endif
config PHYLINK
tristate
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 9605d4fe540b..cb86a3e90c7d 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -323,6 +323,19 @@ static int bcm54xx_config_init(struct phy_device *phydev)
bcm54xx_phydsp_config(phydev);
+ /* Encode link speed into LED1 and LED3 pair (green/amber).
+ * Also flash these two LEDs on activity. This means configuring
+ * them for MULTICOLOR and encoding link/activity into them.
+ */
+ val = BCM5482_SHD_LEDS1_LED1(BCM_LED_SRC_MULTICOLOR1) |
+ BCM5482_SHD_LEDS1_LED3(BCM_LED_SRC_MULTICOLOR1);
+ bcm_phy_write_shadow(phydev, BCM5482_SHD_LEDS1, val);
+
+ val = BCM_LED_MULTICOLOR_IN_PHASE |
+ BCM5482_SHD_LEDS1_LED1(BCM_LED_MULTICOLOR_LINK_ACT) |
+ BCM5482_SHD_LEDS1_LED3(BCM_LED_MULTICOLOR_LINK_ACT);
+ bcm_phy_write_exp(phydev, BCM_EXP_MULTICOLOR, val);
+
return 0;
}
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index bbd8c22067f3..97d45bd5b38e 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -15,6 +15,8 @@
#include <linux/netdevice.h>
#define DP83822_PHY_ID 0x2000a240
+#define DP83825I_PHY_ID 0x2000a150
+
#define DP83822_DEVADDR 0x1f
#define MII_DP83822_PHYSCR 0x11
@@ -304,26 +306,30 @@ static int dp83822_resume(struct phy_device *phydev)
return 0;
}
+#define DP83822_PHY_DRIVER(_id, _name) \
+ { \
+ PHY_ID_MATCH_MODEL(_id), \
+ .name = (_name), \
+ .features = PHY_BASIC_FEATURES, \
+ .soft_reset = dp83822_phy_reset, \
+ .config_init = dp83822_config_init, \
+ .get_wol = dp83822_get_wol, \
+ .set_wol = dp83822_set_wol, \
+ .ack_interrupt = dp83822_ack_interrupt, \
+ .config_intr = dp83822_config_intr, \
+ .suspend = dp83822_suspend, \
+ .resume = dp83822_resume, \
+ }
+
static struct phy_driver dp83822_driver[] = {
- {
- .phy_id = DP83822_PHY_ID,
- .phy_id_mask = 0xfffffff0,
- .name = "TI DP83822",
- .features = PHY_BASIC_FEATURES,
- .config_init = dp83822_config_init,
- .soft_reset = dp83822_phy_reset,
- .get_wol = dp83822_get_wol,
- .set_wol = dp83822_set_wol,
- .ack_interrupt = dp83822_ack_interrupt,
- .config_intr = dp83822_config_intr,
- .suspend = dp83822_suspend,
- .resume = dp83822_resume,
- },
+ DP83822_PHY_DRIVER(DP83822_PHY_ID, "TI DP83822"),
+ DP83822_PHY_DRIVER(DP83825I_PHY_ID, "TI DP83825I"),
};
module_phy_driver(dp83822_driver);
static struct mdio_device_id __maybe_unused dp83822_tbl[] = {
{ DP83822_PHY_ID, 0xfffffff0 },
+ { DP83825I_PHY_ID, 0xfffffff0 },
{ },
};
MODULE_DEVICE_TABLE(mdio, dp83822_tbl);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 3ccba37bd6dd..f76c4048b978 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1489,9 +1489,10 @@ static int marvell_get_sset_count(struct phy_device *phydev)
static void marvell_get_strings(struct phy_device *phydev, u8 *data)
{
+ int count = marvell_get_sset_count(phydev);
int i;
- for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) {
+ for (i = 0; i < count; i++) {
strlcpy(data + i * ETH_GSTRING_LEN,
marvell_hw_stats[i].string, ETH_GSTRING_LEN);
}
@@ -1519,9 +1520,10 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i)
static void marvell_get_stats(struct phy_device *phydev,
struct ethtool_stats *stats, u64 *data)
{
+ int count = marvell_get_sset_count(phydev);
int i;
- for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++)
+ for (i = 0; i < count; i++)
data[i] = marvell_get_stat(phydev, i);
}
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index a238388eb1a5..0eec2913c289 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -201,6 +201,7 @@ static int meson_gxl_ack_interrupt(struct phy_device *phydev)
static int meson_gxl_config_intr(struct phy_device *phydev)
{
u16 val;
+ int ret;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
val = INTSRC_ANEG_PR
@@ -213,6 +214,11 @@ static int meson_gxl_config_intr(struct phy_device *phydev)
val = 0;
}
+ /* Ack any pending IRQ */
+ ret = meson_gxl_ack_interrupt(phydev);
+ if (ret)
+ return ret;
+
return phy_write(phydev, INTSRC_MASK, val);
}
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 49fdd1ee798e..77068c545de0 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1831,7 +1831,7 @@ int genphy_soft_reset(struct phy_device *phydev)
{
int ret;
- ret = phy_write(phydev, MII_BMCR, BMCR_RESET);
+ ret = phy_set_bits(phydev, MII_BMCR, BMCR_RESET);
if (ret < 0)
return ret;
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 92b64e254b44..7475cef17cf7 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -159,6 +159,14 @@ static const struct spi_device_id ks8995_id[] = {
};
MODULE_DEVICE_TABLE(spi, ks8995_id);
+static const struct of_device_id ks8895_spi_of_match[] = {
+ { .compatible = "micrel,ks8995" },
+ { .compatible = "micrel,ksz8864" },
+ { .compatible = "micrel,ksz8795" },
+ { },
+ };
+MODULE_DEVICE_TABLE(of, ks8895_spi_of_match);
+
static inline u8 get_chip_id(u8 val)
{
return (val >> ID1_CHIPID_S) & ID1_CHIPID_M;
@@ -526,6 +534,7 @@ static int ks8995_remove(struct spi_device *spi)
static struct spi_driver ks8995_driver = {
.driver = {
.name = "spi-ks8995",
+ .of_match_table = of_match_ptr(ks8895_spi_of_match),
},
.probe = ks8995_probe,
.remove = ks8995_remove,
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index 7ccdc62c6052..ff61dd8748de 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -222,7 +222,6 @@ static void *mppe_alloc(unsigned char *options, int optlen)
goto out_free;
}
state->sha1->tfm = shash;
- state->sha1->flags = 0;
digestsize = crypto_shash_digestsize(shash);
if (digestsize < MPPE_MAX_KEY_LEN)
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 8f09edd811e9..50c60550f295 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -532,6 +532,7 @@ static void pptp_sock_destruct(struct sock *sk)
pppox_unbind_sock(sk);
}
skb_queue_purge(&sk->sk_receive_queue);
+ dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
}
static int pptp_create(struct net *net, struct socket *sock, int kern)
diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
index f4e93f5fc204..ea90db3c7705 100644
--- a/drivers/net/slip/slhc.c
+++ b/drivers/net/slip/slhc.c
@@ -153,7 +153,7 @@ out_fail:
void
slhc_free(struct slcompress *comp)
{
- if ( comp == NULLSLCOMPR )
+ if ( IS_ERR_OR_NULL(comp) )
return;
if ( comp->tstate != NULLSLSTATE )
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 6ed96fdfd96d..16963f7a88f7 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1156,6 +1156,13 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
return -EINVAL;
}
+ if (netdev_has_upper_dev(dev, port_dev)) {
+ NL_SET_ERR_MSG(extack, "Device is already an upper device of the team interface");
+ netdev_err(dev, "Device %s is already an upper device of the team interface\n",
+ portname);
+ return -EBUSY;
+ }
+
if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
vlan_uses_dev(dev)) {
NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
@@ -1246,6 +1253,23 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
goto err_option_port_add;
}
+ /* set promiscuity level to new slave */
+ if (dev->flags & IFF_PROMISC) {
+ err = dev_set_promiscuity(port_dev, 1);
+ if (err)
+ goto err_set_slave_promisc;
+ }
+
+ /* set allmulti level to new slave */
+ if (dev->flags & IFF_ALLMULTI) {
+ err = dev_set_allmulti(port_dev, 1);
+ if (err) {
+ if (dev->flags & IFF_PROMISC)
+ dev_set_promiscuity(port_dev, -1);
+ goto err_set_slave_promisc;
+ }
+ }
+
netif_addr_lock_bh(dev);
dev_uc_sync_multiple(port_dev, dev);
dev_mc_sync_multiple(port_dev, dev);
@@ -1262,6 +1286,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
return 0;
+err_set_slave_promisc:
+ __team_option_inst_del_port(team, port);
+
err_option_port_add:
team_upper_dev_unlink(team, port);
@@ -1307,6 +1334,12 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
team_port_disable(team, port);
list_del_rcu(&port->list);
+
+ if (dev->flags & IFF_PROMISC)
+ dev_set_promiscuity(port_dev, -1);
+ if (dev->flags & IFF_ALLMULTI)
+ dev_set_allmulti(port_dev, -1);
+
team_upper_dev_unlink(team, port);
netdev_rx_handler_unregister(port_dev);
team_port_disable_netpoll(port);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 1d68921723dc..e9ca1c088d0b 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1763,9 +1763,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
int skb_xdp = 1;
bool frags = tun_napi_frags_enabled(tfile);
- if (!(tun->dev->flags & IFF_UP))
- return -EIO;
-
if (!(tun->flags & IFF_NO_PI)) {
if (len < sizeof(pi))
return -EINVAL;
@@ -1867,6 +1864,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
err = skb_copy_datagram_from_iter(skb, 0, from, len);
if (err) {
+ err = -EFAULT;
+drop:
this_cpu_inc(tun->pcpu_stats->rx_dropped);
kfree_skb(skb);
if (frags) {
@@ -1874,7 +1873,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
mutex_unlock(&tfile->napi_mutex);
}
- return -EFAULT;
+ return err;
}
}
@@ -1958,6 +1957,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
!tfile->detached)
rxhash = __skb_get_hash_symmetric(skb);
+ rcu_read_lock();
+ if (unlikely(!(tun->dev->flags & IFF_UP))) {
+ err = -EIO;
+ rcu_read_unlock();
+ goto drop;
+ }
+
if (frags) {
/* Exercise flow dissector code path. */
u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb));
@@ -1965,6 +1971,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (unlikely(headlen > skb_headlen(skb))) {
this_cpu_inc(tun->pcpu_stats->rx_dropped);
napi_free_frags(&tfile->napi);
+ rcu_read_unlock();
mutex_unlock(&tfile->napi_mutex);
WARN_ON(1);
return -ENOMEM;
@@ -1992,6 +1999,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
} else {
netif_rx_ni(skb);
}
+ rcu_read_unlock();
stats = get_cpu_ptr(tun->pcpu_stats);
u64_stats_update_begin(&stats->syncp);
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index 820a2fe7d027..aff995be2a31 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -1301,6 +1301,20 @@ static const struct driver_info trendnet_info = {
.tx_fixup = aqc111_tx_fixup,
};
+static const struct driver_info qnap_info = {
+ .description = "QNAP QNA-UC5G1T USB to 5GbE Adapter",
+ .bind = aqc111_bind,
+ .unbind = aqc111_unbind,
+ .status = aqc111_status,
+ .link_reset = aqc111_link_reset,
+ .reset = aqc111_reset,
+ .stop = aqc111_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX |
+ FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET,
+ .rx_fixup = aqc111_rx_fixup,
+ .tx_fixup = aqc111_tx_fixup,
+};
+
static int aqc111_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usbnet *dev = usb_get_intfdata(intf);
@@ -1455,6 +1469,7 @@ static const struct usb_device_id products[] = {
{AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)},
{AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)},
{AQC111_USB_ETH_DEV(0x20f4, 0xe05a, trendnet_info)},
+ {AQC111_USB_ETH_DEV(0x1c04, 0x0015, qnap_info)},
{ },/* END */
};
MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 5512a1038721..3e9b2c319e45 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -851,6 +851,14 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* QNAP QNA-UC5G1T USB to 5GbE Adapter (based on AQC111U) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(0x1c04, 0x0015, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* WHITELIST!!!
*
* CDC Ether uses two interfaces, not necessarily consecutive.
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 74bebbdb4b15..679e404a5224 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1122,9 +1122,16 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x0846, 0x68d3, 8)}, /* Netgear Aircard 779S */
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
+ {QMI_FIXED_INTF(0x1435, 0x0918, 3)}, /* Wistron NeWeb D16Q1 */
+ {QMI_FIXED_INTF(0x1435, 0x0918, 4)}, /* Wistron NeWeb D16Q1 */
+ {QMI_FIXED_INTF(0x1435, 0x0918, 5)}, /* Wistron NeWeb D16Q1 */
+ {QMI_FIXED_INTF(0x1435, 0x3185, 4)}, /* Wistron NeWeb M18Q5 */
+ {QMI_FIXED_INTF(0x1435, 0xd111, 4)}, /* M9615A DM11-1 D51QC */
{QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */
{QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */
{QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */
+ {QMI_FIXED_INTF(0x1435, 0xd182, 4)}, /* Wistron NeWeb D18 */
+ {QMI_FIXED_INTF(0x1435, 0xd182, 5)}, /* Wistron NeWeb D18 */
{QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */
{QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */
{QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
@@ -1180,6 +1187,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x0265, 4)}, /* ONDA MT8205 4G LTE */
{QMI_FIXED_INTF(0x19d2, 0x0284, 4)}, /* ZTE MF880 */
{QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */
+ {QMI_FIXED_INTF(0x19d2, 0x0396, 3)}, /* ZTE ZM8620 */
{QMI_FIXED_INTF(0x19d2, 0x0412, 4)}, /* Telewell TW-LTE 4G */
{QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */
{QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */
@@ -1200,9 +1208,12 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
{QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
{QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
+ {QMI_FIXED_INTF(0x19d2, 0x1432, 3)}, /* ZTE ME3620 */
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
+ {QMI_FIXED_INTF(0x2001, 0x7e16, 3)}, /* D-Link DWM-221 */
{QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
{QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
+ {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
{QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 7c1430ed0244..9ee4d7402ca2 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -875,6 +875,7 @@ static const struct net_device_ops vrf_netdev_ops = {
.ndo_init = vrf_dev_init,
.ndo_uninit = vrf_dev_uninit,
.ndo_start_xmit = vrf_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_get_stats64 = vrf_get_stats64,
.ndo_add_slave = vrf_add_slave,
.ndo_del_slave = vrf_del_slave,
@@ -1273,9 +1274,15 @@ static void vrf_setup(struct net_device *dev)
/* default to no qdisc; user can add if desired */
dev->priv_flags |= IFF_NO_QUEUE;
+ dev->priv_flags |= IFF_NO_RX_HANDLER;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
- dev->min_mtu = 0;
- dev->max_mtu = 0;
+ /* VRF devices do not care about MTU, but if the MTU is set
+ * too low then the ipv4 and ipv6 protocols are disabled
+ * which breaks networking.
+ */
+ dev->min_mtu = IPV6_MIN_MTU;
+ dev->max_mtu = ETH_MAX_MTU;
}
static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index a3c46d78d216..d76dfed8d9bb 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1731,6 +1731,14 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
goto drop;
}
+ rcu_read_lock();
+
+ if (unlikely(!(vxlan->dev->flags & IFF_UP))) {
+ rcu_read_unlock();
+ atomic_long_inc(&vxlan->dev->rx_dropped);
+ goto drop;
+ }
+
stats = this_cpu_ptr(vxlan->dev->tstats);
u64_stats_update_begin(&stats->syncp);
stats->rx_packets++;
@@ -1738,6 +1746,9 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
u64_stats_update_end(&stats->syncp);
gro_cells_receive(&vxlan->gro_cells, skb);
+
+ rcu_read_unlock();
+
return 0;
drop:
@@ -2690,7 +2701,7 @@ static void vxlan_cleanup(struct timer_list *t)
for (h = 0; h < FDB_HASH_SIZE; ++h) {
struct hlist_node *p, *n;
- spin_lock_bh(&vxlan->hash_lock);
+ spin_lock(&vxlan->hash_lock);
hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
struct vxlan_fdb *f
= container_of(p, struct vxlan_fdb, hlist);
@@ -2712,7 +2723,7 @@ static void vxlan_cleanup(struct timer_list *t)
} else if (time_before(timeout, next_timer))
next_timer = timeout;
}
- spin_unlock_bh(&vxlan->hash_lock);
+ spin_unlock(&vxlan->hash_lock);
}
mod_timer(&vxlan->age_timer, next_timer);
@@ -2767,6 +2778,8 @@ static void vxlan_uninit(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
+ gro_cells_destroy(&vxlan->gro_cells);
+
vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni);
free_percpu(dev->tstats);
@@ -3942,7 +3955,6 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
vxlan_flush(vxlan, true);
- gro_cells_destroy(&vxlan->gro_cells);
list_del(&vxlan->next);
unregister_netdevice_queue(dev, head);
}
@@ -4323,10 +4335,8 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
/* If vxlan->dev is in the same netns, it has already been added
* to the list by the previous loop.
*/
- if (!net_eq(dev_net(vxlan->dev), net)) {
- gro_cells_destroy(&vxlan->gro_cells);
+ if (!net_eq(dev_net(vxlan->dev), net))
unregister_netdevice_queue(vxlan->dev, head);
- }
}
for (h = 0; h < PORT_HASH_SIZE; ++h)
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 24b983edb357..eca87f7c5b6c 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -1855,7 +1855,7 @@ void ath10k_ce_dump_registers(struct ath10k *ar,
struct ath10k_ce_crash_data ce_data;
u32 addr, id;
- lockdep_assert_held(&ar->data_lock);
+ lockdep_assert_held(&ar->dump_mutex);
ath10k_err(ar, "Copy Engine register dump:\n");
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 835b8de92d55..aff585658fc0 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -3119,6 +3119,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
goto err_free_wq;
mutex_init(&ar->conf_mutex);
+ mutex_init(&ar->dump_mutex);
spin_lock_init(&ar->data_lock);
INIT_LIST_HEAD(&ar->peers);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index e08a17b01e03..e35aae5146f1 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -1063,6 +1063,9 @@ struct ath10k {
/* prevents concurrent FW reconfiguration */
struct mutex conf_mutex;
+ /* protects coredump data */
+ struct mutex dump_mutex;
+
/* protects shared structure data */
spinlock_t data_lock;
diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c
index 33838d9c1cb6..45a355fb62b9 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.c
+++ b/drivers/net/wireless/ath/ath10k/coredump.c
@@ -1102,7 +1102,7 @@ struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar)
{
struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
- lockdep_assert_held(&ar->data_lock);
+ lockdep_assert_held(&ar->dump_mutex);
if (ath10k_coredump_mask == 0)
/* coredump disabled */
@@ -1146,7 +1146,7 @@ static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar)
if (!buf)
return NULL;
- spin_lock_bh(&ar->data_lock);
+ mutex_lock(&ar->dump_mutex);
dump_data = (struct ath10k_dump_file_data *)(buf);
strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP",
@@ -1213,7 +1213,7 @@ static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar)
sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
}
- spin_unlock_bh(&ar->data_lock);
+ mutex_unlock(&ar->dump_mutex);
return dump_data;
}
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index a20ea270d519..1acc622d2183 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -2728,7 +2728,7 @@ static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
num_msdus++;
num_bytes += ret;
}
- ieee80211_return_txq(hw, txq);
+ ieee80211_return_txq(hw, txq, false);
ieee80211_txq_schedule_end(hw, txq->ac);
record->num_msdus = cpu_to_le16(num_msdus);
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index b73c23d4ce86..9c703d287333 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -4089,7 +4089,7 @@ static int ath10k_mac_schedule_txq(struct ieee80211_hw *hw, u32 ac)
if (ret < 0)
break;
}
- ieee80211_return_txq(hw, txq);
+ ieee80211_return_txq(hw, txq, false);
ath10k_htt_tx_txq_update(hw, txq);
if (ret == -EBUSY)
break;
@@ -4374,7 +4374,7 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
if (ret < 0)
break;
}
- ieee80211_return_txq(hw, txq);
+ ieee80211_return_txq(hw, txq, false);
ath10k_htt_tx_txq_update(hw, txq);
out:
ieee80211_txq_schedule_end(hw, ac);
@@ -5774,7 +5774,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_MCAST_RATE &&
- !WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) {
+ !ath10k_mac_vif_chan(arvif->vif, &def)) {
band = def.chan->band;
rateidx = vif->bss_conf.mcast_rate[band] - 1;
@@ -5812,7 +5812,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_BASIC_RATES) {
- if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) {
+ if (ath10k_mac_vif_chan(vif, &def)) {
mutex_unlock(&ar->conf_mutex);
return;
}
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 271f92c24d44..2c27f407a851 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -1441,7 +1441,7 @@ static void ath10k_pci_dump_registers(struct ath10k *ar,
__le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
int i, ret;
- lockdep_assert_held(&ar->data_lock);
+ lockdep_assert_held(&ar->dump_mutex);
ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0],
hi_failure_state,
@@ -1656,7 +1656,7 @@ static void ath10k_pci_dump_memory(struct ath10k *ar,
int ret, i;
u8 *buf;
- lockdep_assert_held(&ar->data_lock);
+ lockdep_assert_held(&ar->dump_mutex);
if (!crash_data)
return;
@@ -1734,14 +1734,19 @@ static void ath10k_pci_dump_memory(struct ath10k *ar,
}
}
-static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
+static void ath10k_pci_fw_dump_work(struct work_struct *work)
{
+ struct ath10k_pci *ar_pci = container_of(work, struct ath10k_pci,
+ dump_work);
struct ath10k_fw_crash_data *crash_data;
+ struct ath10k *ar = ar_pci->ar;
char guid[UUID_STRING_LEN + 1];
- spin_lock_bh(&ar->data_lock);
+ mutex_lock(&ar->dump_mutex);
+ spin_lock_bh(&ar->data_lock);
ar->stats.fw_crash_counter++;
+ spin_unlock_bh(&ar->data_lock);
crash_data = ath10k_coredump_new(ar);
@@ -1756,11 +1761,18 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
ath10k_ce_dump_registers(ar, crash_data);
ath10k_pci_dump_memory(ar, crash_data);
- spin_unlock_bh(&ar->data_lock);
+ mutex_unlock(&ar->dump_mutex);
queue_work(ar->workqueue, &ar->restart_work);
}
+static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ queue_work(ar->workqueue, &ar_pci->dump_work);
+}
+
void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
int force)
{
@@ -3442,6 +3454,8 @@ int ath10k_pci_setup_resource(struct ath10k *ar)
spin_lock_init(&ar_pci->ps_lock);
mutex_init(&ar_pci->ce_diag_mutex);
+ INIT_WORK(&ar_pci->dump_work, ath10k_pci_fw_dump_work);
+
timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0);
if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 3773c79f322f..4455ed6c5275 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -121,6 +121,8 @@ struct ath10k_pci {
/* For protecting ce_diag */
struct mutex ce_diag_mutex;
+ struct work_struct dump_work;
+
struct ath10k_ce ce;
struct timer_list rx_post_retry;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index a2351ef45ae0..65a4c142640d 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -837,7 +837,6 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
txq->link = &ds->ds_link;
ath5k_hw_start_tx_dma(ah, txq->qnum);
- mmiowb();
spin_unlock_bh(&txq->lock);
return 0;
@@ -2174,7 +2173,6 @@ ath5k_beacon_config(struct ath5k_hw *ah)
}
ath5k_hw_set_imr(ah, ah->imask);
- mmiowb();
spin_unlock_bh(&ah->block);
}
@@ -2779,7 +2777,6 @@ int ath5k_start(struct ieee80211_hw *hw)
ret = 0;
done:
- mmiowb();
mutex_unlock(&ah->lock);
set_bit(ATH_STAT_STARTED, ah->status);
@@ -2839,7 +2836,6 @@ void ath5k_stop(struct ieee80211_hw *hw)
"putting device to sleep\n");
}
- mmiowb();
mutex_unlock(&ah->lock);
ath5k_stop_tasklets(ah);
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 16e052d02c94..5e866a193ed0 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -263,7 +263,6 @@ ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
common->curaid = 0;
ath5k_hw_set_bssid(ah);
- mmiowb();
}
if (changes & BSS_CHANGED_BEACON_INT)
@@ -528,7 +527,6 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ret = -EINVAL;
}
- mmiowb();
mutex_unlock(&ah->lock);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 773d428ff1b0..b17e1ca40995 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1938,12 +1938,15 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
goto out;
while ((queue = ieee80211_next_txq(hw, txq->mac80211_qnum))) {
+ bool force;
+
tid = (struct ath_atx_tid *)queue->drv_priv;
ret = ath_tx_sched_aggr(sc, txq, tid);
ath_dbg(common, QUEUE, "ath_tx_sched_aggr returned %d\n", ret);
- ieee80211_return_txq(hw, queue);
+ force = !skb_queue_empty(&tid->retry_q);
+ ieee80211_return_txq(hw, queue, force);
}
out:
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 74be3c809225..4c7980f84591 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -485,7 +485,6 @@ static void b43_ram_write(struct b43_wldev *dev, u16 offset, u32 val)
val = swab32(val);
b43_write32(dev, B43_MMIO_RAM_CONTROL, offset);
- mmiowb();
b43_write32(dev, B43_MMIO_RAM_DATA, val);
}
@@ -656,9 +655,7 @@ static void b43_tsf_write_locked(struct b43_wldev *dev, u64 tsf)
/* The hardware guarantees us an atomic write, if we
* write the low register first. */
b43_write32(dev, B43_MMIO_REV3PLUS_TSF_LOW, low);
- mmiowb();
b43_write32(dev, B43_MMIO_REV3PLUS_TSF_HIGH, high);
- mmiowb();
}
void b43_tsf_write(struct b43_wldev *dev, u64 tsf)
@@ -1822,11 +1819,9 @@ static void b43_beacon_update_trigger_work(struct work_struct *work)
if (b43_bus_host_is_sdio(dev->dev)) {
/* wl->mutex is enough. */
b43_do_beacon_update_trigger_work(dev);
- mmiowb();
} else {
spin_lock_irq(&wl->hardirq_lock);
b43_do_beacon_update_trigger_work(dev);
- mmiowb();
spin_unlock_irq(&wl->hardirq_lock);
}
}
@@ -2078,7 +2073,6 @@ static irqreturn_t b43_interrupt_thread_handler(int irq, void *dev_id)
mutex_lock(&dev->wl->mutex);
b43_do_interrupt_thread(dev);
- mmiowb();
mutex_unlock(&dev->wl->mutex);
return IRQ_HANDLED;
@@ -2143,7 +2137,6 @@ static irqreturn_t b43_interrupt_handler(int irq, void *dev_id)
spin_lock(&dev->wl->hardirq_lock);
ret = b43_do_interrupt(dev);
- mmiowb();
spin_unlock(&dev->wl->hardirq_lock);
return ret;
diff --git a/drivers/net/wireless/broadcom/b43/sysfs.c b/drivers/net/wireless/broadcom/b43/sysfs.c
index 3190493bd07f..93d03b673670 100644
--- a/drivers/net/wireless/broadcom/b43/sysfs.c
+++ b/drivers/net/wireless/broadcom/b43/sysfs.c
@@ -129,7 +129,6 @@ static ssize_t b43_attr_interfmode_store(struct device *dev,
} else
err = -ENOSYS;
- mmiowb();
mutex_unlock(&wldev->wl->mutex);
return err ? err : count;
diff --git a/drivers/net/wireless/broadcom/b43legacy/ilt.c b/drivers/net/wireless/broadcom/b43legacy/ilt.c
index ee5682e54204..6d15fb4d30c6 100644
--- a/drivers/net/wireless/broadcom/b43legacy/ilt.c
+++ b/drivers/net/wireless/broadcom/b43legacy/ilt.c
@@ -315,14 +315,12 @@ const u16 b43legacy_ilt_sigmasqr2[B43legacy_ILT_SIGMASQR_SIZE] = {
void b43legacy_ilt_write(struct b43legacy_wldev *dev, u16 offset, u16 val)
{
b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_CTRL, offset);
- mmiowb();
b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA1, val);
}
void b43legacy_ilt_write32(struct b43legacy_wldev *dev, u16 offset, u32 val)
{
b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_CTRL, offset);
- mmiowb();
b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA2,
(val & 0xFFFF0000) >> 16);
b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA1,
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index 55f411925960..c777efc6dc13 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -264,7 +264,6 @@ static void b43legacy_ram_write(struct b43legacy_wldev *dev, u16 offset,
val = swab32(val);
b43legacy_write32(dev, B43legacy_MMIO_RAM_CONTROL, offset);
- mmiowb();
b43legacy_write32(dev, B43legacy_MMIO_RAM_DATA, val);
}
@@ -341,14 +340,11 @@ void b43legacy_shm_write32(struct b43legacy_wldev *dev,
if (offset & 0x0003) {
/* Unaligned access */
b43legacy_shm_control_word(dev, routing, offset >> 2);
- mmiowb();
b43legacy_write16(dev,
B43legacy_MMIO_SHM_DATA_UNALIGNED,
(value >> 16) & 0xffff);
- mmiowb();
b43legacy_shm_control_word(dev, routing,
(offset >> 2) + 1);
- mmiowb();
b43legacy_write16(dev, B43legacy_MMIO_SHM_DATA,
value & 0xffff);
return;
@@ -356,7 +352,6 @@ void b43legacy_shm_write32(struct b43legacy_wldev *dev,
offset >>= 2;
}
b43legacy_shm_control_word(dev, routing, offset);
- mmiowb();
b43legacy_write32(dev, B43legacy_MMIO_SHM_DATA, value);
}
@@ -368,7 +363,6 @@ void b43legacy_shm_write16(struct b43legacy_wldev *dev, u16 routing, u16 offset,
if (offset & 0x0003) {
/* Unaligned access */
b43legacy_shm_control_word(dev, routing, offset >> 2);
- mmiowb();
b43legacy_write16(dev,
B43legacy_MMIO_SHM_DATA_UNALIGNED,
value);
@@ -377,7 +371,6 @@ void b43legacy_shm_write16(struct b43legacy_wldev *dev, u16 routing, u16 offset,
offset >>= 2;
}
b43legacy_shm_control_word(dev, routing, offset);
- mmiowb();
b43legacy_write16(dev, B43legacy_MMIO_SHM_DATA, value);
}
@@ -471,7 +464,6 @@ static void b43legacy_time_lock(struct b43legacy_wldev *dev)
status = b43legacy_read32(dev, B43legacy_MMIO_MACCTL);
status |= B43legacy_MACCTL_TBTTHOLD;
b43legacy_write32(dev, B43legacy_MMIO_MACCTL, status);
- mmiowb();
}
static void b43legacy_time_unlock(struct b43legacy_wldev *dev)
@@ -494,10 +486,8 @@ static void b43legacy_tsf_write_locked(struct b43legacy_wldev *dev, u64 tsf)
u32 hi = (tsf & 0xFFFFFFFF00000000ULL) >> 32;
b43legacy_write32(dev, B43legacy_MMIO_REV3PLUS_TSF_LOW, 0);
- mmiowb();
b43legacy_write32(dev, B43legacy_MMIO_REV3PLUS_TSF_HIGH,
hi);
- mmiowb();
b43legacy_write32(dev, B43legacy_MMIO_REV3PLUS_TSF_LOW,
lo);
} else {
@@ -507,13 +497,9 @@ static void b43legacy_tsf_write_locked(struct b43legacy_wldev *dev, u64 tsf)
u16 v3 = (tsf & 0xFFFF000000000000ULL) >> 48;
b43legacy_write16(dev, B43legacy_MMIO_TSF_0, 0);
- mmiowb();
b43legacy_write16(dev, B43legacy_MMIO_TSF_3, v3);
- mmiowb();
b43legacy_write16(dev, B43legacy_MMIO_TSF_2, v2);
- mmiowb();
b43legacy_write16(dev, B43legacy_MMIO_TSF_1, v1);
- mmiowb();
b43legacy_write16(dev, B43legacy_MMIO_TSF_0, v0);
}
}
@@ -1250,7 +1236,6 @@ static void b43legacy_beacon_update_trigger_work(struct work_struct *work)
/* The handler might have updated the IRQ mask. */
b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK,
dev->irq_mask);
- mmiowb();
spin_unlock_irq(&wl->irq_lock);
}
mutex_unlock(&wl->mutex);
@@ -1346,7 +1331,6 @@ static void b43legacy_interrupt_tasklet(struct b43legacy_wldev *dev)
dma_reason[2], dma_reason[3],
dma_reason[4], dma_reason[5]);
b43legacy_controller_restart(dev, "DMA error");
- mmiowb();
spin_unlock_irqrestore(&dev->wl->irq_lock, flags);
return;
}
@@ -1396,7 +1380,6 @@ static void b43legacy_interrupt_tasklet(struct b43legacy_wldev *dev)
handle_irq_transmit_status(dev);
b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, dev->irq_mask);
- mmiowb();
spin_unlock_irqrestore(&dev->wl->irq_lock, flags);
}
@@ -1488,7 +1471,6 @@ static irqreturn_t b43legacy_interrupt_handler(int irq, void *dev_id)
dev->irq_reason = reason;
tasklet_schedule(&dev->isr_tasklet);
out:
- mmiowb();
spin_unlock(&dev->wl->irq_lock);
return ret;
@@ -2781,7 +2763,6 @@ static int b43legacy_op_dev_config(struct ieee80211_hw *hw,
spin_lock_irqsave(&wl->irq_lock, flags);
b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, dev->irq_mask);
- mmiowb();
spin_unlock_irqrestore(&wl->irq_lock, flags);
out_unlock_mutex:
mutex_unlock(&wl->mutex);
@@ -2900,7 +2881,6 @@ static void b43legacy_op_bss_info_changed(struct ieee80211_hw *hw,
spin_lock_irqsave(&wl->irq_lock, flags);
b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, dev->irq_mask);
/* XXX: why? */
- mmiowb();
spin_unlock_irqrestore(&wl->irq_lock, flags);
out_unlock_mutex:
mutex_unlock(&wl->mutex);
diff --git a/drivers/net/wireless/broadcom/b43legacy/phy.c b/drivers/net/wireless/broadcom/b43legacy/phy.c
index 995c7d0c212a..f949766d27ca 100644
--- a/drivers/net/wireless/broadcom/b43legacy/phy.c
+++ b/drivers/net/wireless/broadcom/b43legacy/phy.c
@@ -134,7 +134,6 @@ u16 b43legacy_phy_read(struct b43legacy_wldev *dev, u16 offset)
void b43legacy_phy_write(struct b43legacy_wldev *dev, u16 offset, u16 val)
{
b43legacy_write16(dev, B43legacy_MMIO_PHY_CONTROL, offset);
- mmiowb();
b43legacy_write16(dev, B43legacy_MMIO_PHY_DATA, val);
}
diff --git a/drivers/net/wireless/broadcom/b43legacy/pio.h b/drivers/net/wireless/broadcom/b43legacy/pio.h
index 1cd1b9ca5e9c..08cd02282beb 100644
--- a/drivers/net/wireless/broadcom/b43legacy/pio.h
+++ b/drivers/net/wireless/broadcom/b43legacy/pio.h
@@ -92,7 +92,6 @@ void b43legacy_pio_write(struct b43legacy_pioqueue *queue,
u16 offset, u16 value)
{
b43legacy_write16(queue->dev, queue->mmio_base + offset, value);
- mmiowb();
}
diff --git a/drivers/net/wireless/broadcom/b43legacy/radio.c b/drivers/net/wireless/broadcom/b43legacy/radio.c
index eab1c9387846..c6db444ea07e 100644
--- a/drivers/net/wireless/broadcom/b43legacy/radio.c
+++ b/drivers/net/wireless/broadcom/b43legacy/radio.c
@@ -95,7 +95,6 @@ void b43legacy_radio_lock(struct b43legacy_wldev *dev)
B43legacy_WARN_ON(status & B43legacy_MACCTL_RADIOLOCK);
status |= B43legacy_MACCTL_RADIOLOCK;
b43legacy_write32(dev, B43legacy_MMIO_MACCTL, status);
- mmiowb();
udelay(10);
}
@@ -108,7 +107,6 @@ void b43legacy_radio_unlock(struct b43legacy_wldev *dev)
B43legacy_WARN_ON(!(status & B43legacy_MACCTL_RADIOLOCK));
status &= ~B43legacy_MACCTL_RADIOLOCK;
b43legacy_write32(dev, B43legacy_MMIO_MACCTL, status);
- mmiowb();
}
u16 b43legacy_radio_read16(struct b43legacy_wldev *dev, u16 offset)
@@ -141,7 +139,6 @@ u16 b43legacy_radio_read16(struct b43legacy_wldev *dev, u16 offset)
void b43legacy_radio_write16(struct b43legacy_wldev *dev, u16 offset, u16 val)
{
b43legacy_write16(dev, B43legacy_MMIO_RADIO_CONTROL, offset);
- mmiowb();
b43legacy_write16(dev, B43legacy_MMIO_RADIO_DATA_LOW, val);
}
@@ -333,7 +330,6 @@ u8 b43legacy_radio_aci_scan(struct b43legacy_wldev *dev)
void b43legacy_nrssi_hw_write(struct b43legacy_wldev *dev, u16 offset, s16 val)
{
b43legacy_phy_write(dev, B43legacy_PHY_NRSSILT_CTRL, offset);
- mmiowb();
b43legacy_phy_write(dev, B43legacy_PHY_NRSSILT_DATA, (u16)val);
}
diff --git a/drivers/net/wireless/broadcom/b43legacy/sysfs.c b/drivers/net/wireless/broadcom/b43legacy/sysfs.c
index 2a1da15c913b..2db83eec7a11 100644
--- a/drivers/net/wireless/broadcom/b43legacy/sysfs.c
+++ b/drivers/net/wireless/broadcom/b43legacy/sysfs.c
@@ -143,7 +143,6 @@ static ssize_t b43legacy_attr_interfmode_store(struct device *dev,
if (err)
b43legacyerr(wldev->wl, "Interference Mitigation not "
"supported by device\n");
- mmiowb();
spin_unlock_irqrestore(&wldev->wl->irq_lock, flags);
mutex_unlock(&wldev->wl->mutex);
diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h
index b079c64ca014..986646af8dfd 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.h
+++ b/drivers/net/wireless/intel/iwlegacy/common.h
@@ -2030,13 +2030,6 @@ static inline void
_il_release_nic_access(struct il_priv *il)
{
_il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- /*
- * In above we are reading CSR_GP_CNTRL register, what will flush any
- * previous writes, but still want write, which clear MAC_ACCESS_REQ
- * bit, be performed on PCI bus before any other writes scheduled on
- * different CPUs (after we drop reg_lock).
- */
- mmiowb();
}
static inline u32
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index fdc56f821b5a..0a87d87fbb4f 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -82,6 +82,7 @@
#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
#define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-"
#define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-"
+#define IWL_QUZ_A_HR_B_FW_PRE "iwlwifi-QuZ-a0-hr-b0-"
#define IWL_QNJ_B_JF_B_FW_PRE "iwlwifi-QuQnj-b0-jf-b0-"
#define IWL_CC_A_FW_PRE "iwlwifi-cc-a0-"
#define IWL_22000_SO_A_JF_B_FW_PRE "iwlwifi-so-a0-jf-b0-"
@@ -105,8 +106,8 @@
IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \
IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode"
-#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
- IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_QUZ_A_HR_B_MODULE_FIRMWARE(api) \
+ IWL_QUZ_A_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
#define IWL_QNJ_B_JF_B_MODULE_FIRMWARE(api) \
@@ -200,7 +201,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
#define IWL_DEVICE_AX210 \
IWL_DEVICE_AX200_COMMON, \
.device_family = IWL_DEVICE_FAMILY_AX210, \
- .base_params = &iwl_22000_base_params, \
+ .base_params = &iwl_22560_base_params, \
.csr = &iwl_csr_v1, \
.min_txq_size = 128
@@ -235,8 +236,20 @@ const struct iwl_cfg iwl_ax101_cfg_qu_hr = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
-const struct iwl_cfg iwl22260_2ax_cfg = {
- .name = "Intel(R) Wireless-AX 22260",
+const struct iwl_cfg iwl_ax101_cfg_quz_hr = {
+ .name = "Intel(R) Wi-Fi 6 AX101",
+ .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg iwl_ax200_cfg_cc = {
+ .name = "Intel(R) Wi-Fi 6 AX200 160MHz",
.fw_name_pre = IWL_CC_A_FW_PRE,
IWL_DEVICE_22500,
/*
@@ -249,7 +262,7 @@ const struct iwl_cfg iwl22260_2ax_cfg = {
};
const struct iwl_cfg killer1650x_2ax_cfg = {
- .name = "Killer(R) Wireless-AX 1650x Wireless Network Adapter (200NGW)",
+ .name = "Killer(R) Wi-Fi 6 AX1650x 160MHz Wireless Network Adapter (200NGW)",
.fw_name_pre = IWL_CC_A_FW_PRE,
IWL_DEVICE_22500,
/*
@@ -262,7 +275,7 @@ const struct iwl_cfg killer1650x_2ax_cfg = {
};
const struct iwl_cfg killer1650w_2ax_cfg = {
- .name = "Killer(R) Wireless-AX 1650w Wireless Network Adapter (200D2W)",
+ .name = "Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)",
.fw_name_pre = IWL_CC_A_FW_PRE,
IWL_DEVICE_22500,
/*
@@ -328,7 +341,7 @@ const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = {
};
const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
- .name = "Killer(R) Wireless-AX 1650i Wireless Network Adapter (22560NGW)",
+ .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
.fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
@@ -340,7 +353,7 @@ const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
};
const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
- .name = "Killer(R) Wireless-AX 1650s Wireless Network Adapter (22560D2W)",
+ .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
.fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
@@ -444,6 +457,7 @@ MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
index 575a7022d045..3846064d51a5 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
@@ -1,7 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -136,6 +136,7 @@ const struct iwl_cfg iwl5350_agn_cfg = {
.ht_params = &iwl5000_ht_params,
.led_mode = IWL_LED_BLINK,
.internal_wimax_coex = true,
+ .csr = &iwl_csr_v1,
};
#define IWL_DEVICE_5150 \
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index f119c49cd39c..d7380016f1c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1614,6 +1614,7 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
if (!range) {
IWL_ERR(fwrt, "Failed to fill region header: id=%d, type=%d\n",
le32_to_cpu(reg->region_id), type);
+ memset(*data, 0, le32_to_cpu((*data)->len));
return;
}
@@ -1623,6 +1624,7 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
if (range_size < 0) {
IWL_ERR(fwrt, "Failed to dump region: id=%d, type=%d\n",
le32_to_cpu(reg->region_id), type);
+ memset(*data, 0, le32_to_cpu((*data)->len));
return;
}
range = range + range_size;
@@ -1807,12 +1809,12 @@ _iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
trigger = fwrt->dump.active_trigs[id].trig;
- size = sizeof(*dump_file);
- size += iwl_fw_ini_get_trigger_len(fwrt, trigger);
-
+ size = iwl_fw_ini_get_trigger_len(fwrt, trigger);
if (!size)
return NULL;
+ size += sizeof(*dump_file);
+
dump_file = vzalloc(size);
if (!dump_file)
return NULL;
@@ -1942,14 +1944,10 @@ int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt,
iwl_dump_error_desc->len = 0;
ret = iwl_fw_dbg_collect_desc(fwrt, iwl_dump_error_desc, false, 0);
- if (ret) {
+ if (ret)
kfree(iwl_dump_error_desc);
- } else {
- set_bit(STATUS_FW_WAIT_DUMP, &fwrt->trans->status);
-
- /* trigger nmi to halt the fw */
- iwl_force_nmi(fwrt->trans);
- }
+ else
+ iwl_trans_sync_nmi(fwrt->trans);
return ret;
}
@@ -2489,22 +2487,6 @@ IWL_EXPORT_SYMBOL(iwl_fw_dbg_apply_point);
void iwl_fwrt_stop_device(struct iwl_fw_runtime *fwrt)
{
- /* if the wait event timeout elapses instead of wake up then
- * the driver did not receive NMI interrupt and can not assume the FW
- * is halted
- */
- int ret = wait_event_timeout(fwrt->trans->fw_halt_waitq,
- !test_bit(STATUS_FW_WAIT_DUMP,
- &fwrt->trans->status),
- msecs_to_jiffies(2000));
- if (!ret) {
- /* failed to receive NMI interrupt, assuming the FW is stuck */
- set_bit(STATUS_FW_ERROR, &fwrt->trans->status);
-
- clear_bit(STATUS_FW_WAIT_DUMP, &fwrt->trans->status);
- }
-
- /* Assuming the op mode mutex is held at this point */
iwl_fw_dbg_collect_sync(fwrt);
iwl_trans_stop_device(fwrt->trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 641c95d03b15..e06407dc088b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -93,7 +93,7 @@ struct iwl_ucode_header {
} u;
};
-#define IWL_UCODE_INI_TLV_GROUP BIT(24)
+#define IWL_UCODE_INI_TLV_GROUP 0x1000000
/*
* new TLV uCode file layout
@@ -148,11 +148,14 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_UMAC_DEBUG_ADDRS = 54,
IWL_UCODE_TLV_LMAC_DEBUG_ADDRS = 55,
IWL_UCODE_TLV_FW_RECOVERY_INFO = 57,
- IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_INI_TLV_GROUP | 0x1,
- IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_INI_TLV_GROUP | 0x2,
- IWL_UCODE_TLV_TYPE_REGIONS = IWL_UCODE_INI_TLV_GROUP | 0x3,
- IWL_UCODE_TLV_TYPE_TRIGGERS = IWL_UCODE_INI_TLV_GROUP | 0x4,
- IWL_UCODE_TLV_TYPE_DEBUG_FLOW = IWL_UCODE_INI_TLV_GROUP | 0x5,
+
+ IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_INI_TLV_GROUP + 0x1,
+ IWL_UCODE_TLV_DEBUG_BASE = IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION,
+ IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_INI_TLV_GROUP + 0x2,
+ IWL_UCODE_TLV_TYPE_REGIONS = IWL_UCODE_INI_TLV_GROUP + 0x3,
+ IWL_UCODE_TLV_TYPE_TRIGGERS = IWL_UCODE_INI_TLV_GROUP + 0x4,
+ IWL_UCODE_TLV_TYPE_DEBUG_FLOW = IWL_UCODE_INI_TLV_GROUP + 0x5,
+ IWL_UCODE_TLV_DEBUG_MAX = IWL_UCODE_TLV_TYPE_DEBUG_FLOW,
/* TLVs 0x1000-0x2000 are for internal driver usage */
IWL_UCODE_TLV_FW_DBG_DUMP_LST = 0x1000,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c
index 7adf4e4e841a..12310e3d2fc5 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/init.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c
@@ -76,7 +76,6 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
fwrt->ops_ctx = ops_ctx;
INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk);
iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir);
- init_waitqueue_head(&fwrt->trans->fw_halt_waitq);
}
IWL_EXPORT_SYMBOL(iwl_fw_runtime_init);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index f5f87773667b..93070848280a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -549,8 +549,9 @@ extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
extern const struct iwl_cfg iwl_ax101_cfg_qu_hr;
+extern const struct iwl_cfg iwl_ax101_cfg_quz_hr;
extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
-extern const struct iwl_cfg iwl22260_2ax_cfg;
+extern const struct iwl_cfg iwl_ax200_cfg_cc;
extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0;
extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0;
extern const struct iwl_cfg killer1650x_2ax_cfg;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index aea6d03e545a..e539bc94eff7 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -327,6 +327,7 @@ enum {
#define CSR_HW_REV_TYPE_NONE (0x00001F0)
#define CSR_HW_REV_TYPE_QNJ (0x0000360)
#define CSR_HW_REV_TYPE_QNJ_B0 (0x0000364)
+#define CSR_HW_REV_TYPE_QUZ (0x0000354)
#define CSR_HW_REV_TYPE_HR_CDB (0x0000340)
#define CSR_HW_REV_TYPE_SO (0x0000370)
#define CSR_HW_REV_TYPE_TY (0x0000420)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index 5798f434f68f..c7070760a10a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -126,7 +126,8 @@ void iwl_alloc_dbg_tlv(struct iwl_trans *trans, size_t len, const u8 *data,
len -= ALIGN(tlv_len, 4);
data += sizeof(*tlv) + ALIGN(tlv_len, 4);
- if (!(tlv_type & IWL_UCODE_INI_TLV_GROUP))
+ if (tlv_type < IWL_UCODE_TLV_DEBUG_BASE ||
+ tlv_type > IWL_UCODE_TLV_DEBUG_MAX)
continue;
hdr = (void *)&tlv->data[0];
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index bbebbf3efd57..d8690acee40c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -338,7 +338,6 @@ enum iwl_d3_status {
* are sent
* @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
* @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
- * @STATUS_FW_WAIT_DUMP: if set, wait until cleared before collecting dump
*/
enum iwl_trans_status {
STATUS_SYNC_HCMD_ACTIVE,
@@ -351,7 +350,6 @@ enum iwl_trans_status {
STATUS_TRANS_GOING_IDLE,
STATUS_TRANS_IDLE,
STATUS_TRANS_DEAD,
- STATUS_FW_WAIT_DUMP,
};
static inline int
@@ -618,6 +616,7 @@ struct iwl_trans_ops {
struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
u32 dump_mask);
void (*debugfs_cleanup)(struct iwl_trans *trans);
+ void (*sync_nmi)(struct iwl_trans *trans);
};
/**
@@ -831,7 +830,6 @@ struct iwl_trans {
u32 lmac_error_event_table[2];
u32 umac_error_event_table;
unsigned int error_event_table_tlv_status;
- wait_queue_head_t fw_halt_waitq;
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
@@ -1239,10 +1237,12 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans)
/* prevent double restarts due to the same erroneous FW */
if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status))
iwl_op_mode_nic_error(trans->op_mode);
+}
- if (test_and_clear_bit(STATUS_FW_WAIT_DUMP, &trans->status))
- wake_up(&trans->fw_halt_waitq);
-
+static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
+{
+ if (trans->ops->sync_nmi)
+ trans->ops->sync_nmi(trans);
}
/*****************************************************
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index 2453ceabf00d..6925527d8457 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -774,8 +774,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return;
mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
-
- if (!mvmvif->dbgfs_dir) {
+ if (IS_ERR_OR_NULL(mvmvif->dbgfs_dir)) {
IWL_ERR(mvm, "Failed to create debugfs directory under %pd\n",
dbgfs_dir);
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index e9822a3ec373..94132cfd1f56 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -460,9 +460,7 @@ static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id,
static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index,
struct cfg80211_pmsr_result *res)
{
- s64 rtt_avg = res->ftm.rtt_avg * 100;
-
- do_div(rtt_avg, 6666);
+ s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666);
IWL_DEBUG_INFO(mvm, "entry %d\n", index);
IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 00a47f6f1d81..ab68b5d53ec9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -1121,7 +1121,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
ret = iwl_mvm_load_rt_fw(mvm);
if (ret) {
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
- iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER);
+ if (ret != -ERFKILL)
+ iwl_fw_dbg_error_collect(&mvm->fwrt,
+ FW_DBG_TRIGGER_DRIVER);
goto error;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 3a92c09d4692..6a3b11dd2edf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -2714,9 +2714,6 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
iwl_mvm_mac_ctxt_remove(mvm, vif);
- kfree(mvmvif->ap_wep_key);
- mvmvif->ap_wep_key = NULL;
-
mutex_unlock(&mvm->mutex);
}
@@ -3183,24 +3180,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
ret = iwl_mvm_update_sta(mvm, vif, sta);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {
- /* if wep is used, need to set the key for the station now */
- if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) {
- mvm_sta->wep_key =
- kmemdup(mvmvif->ap_wep_key,
- sizeof(*mvmvif->ap_wep_key) +
- mvmvif->ap_wep_key->keylen,
- GFP_KERNEL);
- if (!mvm_sta->wep_key) {
- ret = -ENOMEM;
- goto out_unlock;
- }
-
- ret = iwl_mvm_set_sta_key(mvm, vif, sta,
- mvm_sta->wep_key,
- STA_KEY_IDX_INVALID);
- } else {
- ret = 0;
- }
+ ret = 0;
/* we don't support TDLS during DCM */
if (iwl_mvm_phy_ctx_count(mvm) > 1)
@@ -3242,17 +3222,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
NL80211_TDLS_DISABLE_LINK);
}
- /* Remove STA key if this is an AP using WEP */
- if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) {
- int rm_ret = iwl_mvm_remove_sta_key(mvm, vif, sta,
- mvm_sta->wep_key);
-
- if (!ret)
- ret = rm_ret;
- kfree(mvm_sta->wep_key);
- mvm_sta->wep_key = NULL;
- }
-
if (unlikely(ret &&
test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
&mvm->status)))
@@ -3289,6 +3258,13 @@ static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_sta *sta, u32 changed)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (changed & (IEEE80211_RC_BW_CHANGED |
+ IEEE80211_RC_SUPP_RATES_CHANGED |
+ IEEE80211_RC_NSS_CHANGED))
+ iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
+ true);
if (vif->type == NL80211_IFTYPE_STATION &&
changed & IEEE80211_RC_NSS_CHANGED)
@@ -3439,20 +3415,12 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
break;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
- if (vif->type == NL80211_IFTYPE_AP) {
- struct iwl_mvm_vif *mvmvif =
- iwl_mvm_vif_from_mac80211(vif);
-
- mvmvif->ap_wep_key = kmemdup(key,
- sizeof(*key) + key->keylen,
- GFP_KERNEL);
- if (!mvmvif->ap_wep_key)
- return -ENOMEM;
- }
-
- if (vif->type != NL80211_IFTYPE_STATION)
- return 0;
- break;
+ if (vif->type == NL80211_IFTYPE_STATION)
+ break;
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return -EOPNOTSUPP;
+ /* support HW crypto on TX */
+ return 0;
default:
/* currently FW supports only one optional cipher scheme */
if (hw->n_cipher_schemes &&
@@ -3540,12 +3508,17 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
if (ret) {
IWL_WARN(mvm, "set key failed\n");
+ key->hw_key_idx = STA_KEY_IDX_INVALID;
/*
* can't add key for RX, but we don't need it
- * in the device for TX so still return 0
+ * in the device for TX so still return 0,
+ * unless we have new TX API where we cannot
+ * put key material into the TX_CMD
*/
- key->hw_key_idx = STA_KEY_IDX_INVALID;
- ret = 0;
+ if (iwl_mvm_has_new_tx_api(mvm))
+ ret = -EOPNOTSUPP;
+ else
+ ret = 0;
}
break;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index bca6f6b536d9..a50dc53df086 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -498,7 +498,6 @@ struct iwl_mvm_vif {
netdev_features_t features;
struct iwl_probe_resp_data __rcu *probe_resp_data;
- struct ieee80211_key_conf *ap_wep_key;
};
static inline struct iwl_mvm_vif *
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index ba27dce4c2bb..13681b03c10e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -834,7 +834,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mutex_lock(&mvm->mutex);
iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
err = iwl_run_init_mvm_ucode(mvm, true);
- if (err)
+ if (err && err != -ERFKILL)
iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER);
if (!iwlmvm_mod_params.init_dbg || !err)
iwl_mvm_stop_device(mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 1e03acf30762..b516fd1867ec 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -169,9 +169,9 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
}
/* iwl_mvm_create_skb Adds the rxb to a new skb */
-static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
- u16 len, u8 crypt_len,
- struct iwl_rx_cmd_buffer *rxb)
+static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_hdr *hdr, u16 len, u8 crypt_len,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
@@ -204,6 +204,20 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
* present before copying packet data.
*/
hdrlen += crypt_len;
+
+ if (WARN_ONCE(headlen < hdrlen,
+ "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n",
+ hdrlen, len, crypt_len)) {
+ /*
+ * We warn and trace because we want to be able to see
+ * it in trace-cmd as well.
+ */
+ IWL_DEBUG_RX(mvm,
+ "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n",
+ hdrlen, len, crypt_len);
+ return -EINVAL;
+ }
+
skb_put_data(skb, hdr, hdrlen);
skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
@@ -216,6 +230,8 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
fraglen, rxb->truesize);
}
+
+ return 0;
}
static void iwl_mvm_add_rtap_sniffer_config(struct iwl_mvm *mvm,
@@ -1671,7 +1687,11 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->boottime_ns = ktime_get_boot_ns();
}
- iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
+ if (iwl_mvm_create_skb(mvm, skb, hdr, len, crypt_len, rxb)) {
+ kfree_skb(skb);
+ goto out;
+ }
+
if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue,
sta, csi);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 498c315291cf..98d123dd7177 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -1399,7 +1399,9 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid);
list_del_init(&mvmtxq->list);
+ local_bh_disable();
iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
+ local_bh_enable();
}
mutex_unlock(&mvm->mutex);
@@ -2333,21 +2335,6 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
timeout);
- if (mvmvif->ap_wep_key) {
- u8 key_offset = iwl_mvm_set_fw_key_idx(mvm);
-
- __set_bit(key_offset, mvm->fw_key_table);
-
- if (key_offset == STA_KEY_IDX_INVALID)
- return -ENOSPC;
-
- ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id,
- mvmvif->ap_wep_key, true, 0, NULL, 0,
- key_offset, 0);
- if (ret)
- return ret;
- }
-
return 0;
}
@@ -2419,28 +2406,6 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
- if (mvmvif->ap_wep_key) {
- int i;
-
- if (!__test_and_clear_bit(mvmvif->ap_wep_key->hw_key_idx,
- mvm->fw_key_table)) {
- IWL_ERR(mvm, "offset %d not used in fw key table.\n",
- mvmvif->ap_wep_key->hw_key_idx);
- return -ENOENT;
- }
-
- /* track which key was deleted last */
- for (i = 0; i < STA_KEY_MAX_NUM; i++) {
- if (mvm->fw_key_deleted[i] < U8_MAX)
- mvm->fw_key_deleted[i]++;
- }
- mvm->fw_key_deleted[mvmvif->ap_wep_key->hw_key_idx] = 0;
- ret = __iwl_mvm_remove_sta_key(mvm, mvmvif->mcast_sta.sta_id,
- mvmvif->ap_wep_key, true);
- if (ret)
- return ret;
- }
-
ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
if (ret)
IWL_WARN(mvm, "Failed sending remove station\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 79700c7310a1..b4d4071b865d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -394,7 +394,6 @@ struct iwl_mvm_rxq_dup_data {
* the BA window. To be used for UAPSD only.
* @ptk_pn: per-queue PTK PN data structures
* @dup_data: per queue duplicate packet detection data
- * @wep_key: used in AP mode. Is a duplicate of the WEP key.
* @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID
* @tx_ant: the index of the antenna to use for data tx to this station. Only
* used during connection establishment (e.g. for the 4 way handshake
@@ -426,8 +425,6 @@ struct iwl_mvm_sta {
struct iwl_mvm_key_pn __rcu *ptk_pn[4];
struct iwl_mvm_rxq_dup_data *dup_data;
- struct ieee80211_key_conf *wep_key;
-
u8 reserved_queue;
/* Temporary, until the new TLC will control the Tx protection */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 2b94e4cef56c..9f1af8da9dc1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -953,14 +953,15 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2723, 0x0080, iwl22260_2ax_cfg)},
- {IWL_PCI_DEVICE(0x2723, 0x0084, iwl22260_2ax_cfg)},
- {IWL_PCI_DEVICE(0x2723, 0x0088, iwl22260_2ax_cfg)},
- {IWL_PCI_DEVICE(0x2723, 0x008C, iwl22260_2ax_cfg)},
+ {IWL_PCI_DEVICE(0x2723, 0x0080, iwl_ax200_cfg_cc)},
+ {IWL_PCI_DEVICE(0x2723, 0x0084, iwl_ax200_cfg_cc)},
+ {IWL_PCI_DEVICE(0x2723, 0x0088, iwl_ax200_cfg_cc)},
+ {IWL_PCI_DEVICE(0x2723, 0x008C, iwl_ax200_cfg_cc)},
{IWL_PCI_DEVICE(0x2723, 0x1653, killer1650w_2ax_cfg)},
{IWL_PCI_DEVICE(0x2723, 0x1654, killer1650x_2ax_cfg)},
- {IWL_PCI_DEVICE(0x2723, 0x4080, iwl22260_2ax_cfg)},
- {IWL_PCI_DEVICE(0x2723, 0x4088, iwl22260_2ax_cfg)},
+ {IWL_PCI_DEVICE(0x2723, 0x2080, iwl_ax200_cfg_cc)},
+ {IWL_PCI_DEVICE(0x2723, 0x4080, iwl_ax200_cfg_cc)},
+ {IWL_PCI_DEVICE(0x2723, 0x4088, iwl_ax200_cfg_cc)},
{IWL_PCI_DEVICE(0x1a56, 0x1653, killer1650w_2ax_cfg)},
{IWL_PCI_DEVICE(0x1a56, 0x1654, killer1650x_2ax_cfg)},
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index bf8b61a476c5..59213164f35e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -1043,7 +1043,7 @@ static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
-void iwl_trans_sync_nmi(struct iwl_trans *trans);
+void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index fe8269d023de..4f5eec7e44bd 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -2067,7 +2067,6 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
* MAC_ACCESS_REQ bit to be performed before any other writes
* scheduled on different CPUs (after we drop reg_lock).
*/
- mmiowb();
out:
spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
}
@@ -3318,7 +3317,8 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans)
.unref = iwl_trans_pcie_unref, \
.dump_data = iwl_trans_pcie_dump_data, \
.d3_suspend = iwl_trans_pcie_d3_suspend, \
- .d3_resume = iwl_trans_pcie_d3_resume
+ .d3_resume = iwl_trans_pcie_d3_resume, \
+ .sync_nmi = iwl_trans_pcie_sync_nmi
#ifdef CONFIG_PM_SLEEP
#define IWL_TRANS_PM_OPS \
@@ -3542,6 +3542,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
}
} else if (cfg == &iwl_ax101_cfg_qu_hr) {
if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
+ trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) {
+ trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
+ } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
trans->cfg = &iwl_ax101_cfg_qu_hr;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
@@ -3560,7 +3564,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
}
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
- (trans->cfg != &iwl22260_2ax_cfg ||
+ (trans->cfg != &iwl_ax200_cfg_cc ||
trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
u32 hw_status;
@@ -3637,22 +3641,29 @@ out_no_pci:
return ERR_PTR(ret);
}
-void iwl_trans_sync_nmi(struct iwl_trans *trans)
+void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT;
+ u32 inta_addr, sw_err_bit;
+
+ if (trans_pcie->msix_enabled) {
+ inta_addr = CSR_MSIX_HW_INT_CAUSES_AD;
+ sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR;
+ } else {
+ inta_addr = CSR_INT;
+ sw_err_bit = CSR_INT_BIT_SW_ERR;
+ }
iwl_disable_interrupts(trans);
iwl_force_nmi(trans);
while (time_after(timeout, jiffies)) {
- u32 inta_hw = iwl_read32(trans,
- CSR_MSIX_HW_INT_CAUSES_AD);
+ u32 inta_hw = iwl_read32(trans, inta_addr);
/* Error detected by uCode */
- if (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) {
+ if (inta_hw & sw_err_bit) {
/* Clear causes register */
- iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD,
- inta_hw &
- MSIX_HW_INT_CAUSES_REG_SW_ERR);
+ iwl_write32(trans, inta_addr, inta_hw & sw_err_bit);
break;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 88530d9f4a54..38d110338987 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -965,7 +965,7 @@ static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
cmd_str);
ret = -ETIMEDOUT;
- iwl_trans_sync_nmi(trans);
+ iwl_trans_pcie_sync_nmi(trans);
goto cancel;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 9fbd37d23e85..7be73e2c4681 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -1960,7 +1960,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
iwl_get_cmd_string(trans, cmd->id));
ret = -ETIMEDOUT;
- iwl_trans_sync_nmi(trans);
+ iwl_trans_pcie_sync_nmi(trans);
goto cancel;
}
diff --git a/drivers/net/wireless/intersil/orinoco/mic.c b/drivers/net/wireless/intersil/orinoco/mic.c
index 67b0c05afbdb..a324bc4b7938 100644
--- a/drivers/net/wireless/intersil/orinoco/mic.c
+++ b/drivers/net/wireless/intersil/orinoco/mic.c
@@ -65,7 +65,6 @@ int orinoco_mic(struct crypto_shash *tfm_michael, u8 *key,
hdr[ETH_ALEN * 2 + 3] = 0;
desc->tfm = tfm_michael;
- desc->flags = 0;
err = crypto_shash_setkey(tfm_michael, key, MIC_KEYLEN);
if (err)
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 0838af04d681..c71adb1f1f41 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -521,7 +521,7 @@ struct mac80211_hwsim_data {
unsigned int rx_filter;
bool started, idle, scanning;
struct mutex mutex;
- struct tasklet_hrtimer beacon_timer;
+ struct hrtimer beacon_timer;
enum ps_mode {
PS_DISABLED, PS_ENABLED, PS_AUTO_POLL, PS_MANUAL_POLL
} ps;
@@ -1460,7 +1460,7 @@ static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
{
struct mac80211_hwsim_data *data = hw->priv;
data->started = false;
- tasklet_hrtimer_cancel(&data->beacon_timer);
+ hrtimer_cancel(&data->beacon_timer);
wiphy_dbg(hw->wiphy, "%s\n", __func__);
}
@@ -1583,14 +1583,12 @@ static enum hrtimer_restart
mac80211_hwsim_beacon(struct hrtimer *timer)
{
struct mac80211_hwsim_data *data =
- container_of(timer, struct mac80211_hwsim_data,
- beacon_timer.timer);
+ container_of(timer, struct mac80211_hwsim_data, beacon_timer);
struct ieee80211_hw *hw = data->hw;
u64 bcn_int = data->beacon_int;
- ktime_t next_bcn;
if (!data->started)
- goto out;
+ return HRTIMER_NORESTART;
ieee80211_iterate_active_interfaces_atomic(
hw, IEEE80211_IFACE_ITER_NORMAL,
@@ -1601,12 +1599,9 @@ mac80211_hwsim_beacon(struct hrtimer *timer)
bcn_int -= data->bcn_delta;
data->bcn_delta = 0;
}
-
- next_bcn = ktime_add(hrtimer_get_expires(timer),
- ns_to_ktime(bcn_int * 1000));
- tasklet_hrtimer_start(&data->beacon_timer, next_bcn, HRTIMER_MODE_ABS);
-out:
- return HRTIMER_NORESTART;
+ hrtimer_forward(&data->beacon_timer, hrtimer_get_expires(timer),
+ ns_to_ktime(bcn_int * NSEC_PER_USEC));
+ return HRTIMER_RESTART;
}
static const char * const hwsim_chanwidths[] = {
@@ -1680,15 +1675,15 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
mutex_unlock(&data->mutex);
if (!data->started || !data->beacon_int)
- tasklet_hrtimer_cancel(&data->beacon_timer);
- else if (!hrtimer_is_queued(&data->beacon_timer.timer)) {
+ hrtimer_cancel(&data->beacon_timer);
+ else if (!hrtimer_is_queued(&data->beacon_timer)) {
u64 tsf = mac80211_hwsim_get_tsf(hw, NULL);
u32 bcn_int = data->beacon_int;
u64 until_tbtt = bcn_int - do_div(tsf, bcn_int);
- tasklet_hrtimer_start(&data->beacon_timer,
- ns_to_ktime(until_tbtt * 1000),
- HRTIMER_MODE_REL);
+ hrtimer_start(&data->beacon_timer,
+ ns_to_ktime(until_tbtt * NSEC_PER_USEC),
+ HRTIMER_MODE_REL_SOFT);
}
return 0;
@@ -1751,7 +1746,7 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
info->enable_beacon, info->beacon_int);
vp->bcn_en = info->enable_beacon;
if (data->started &&
- !hrtimer_is_queued(&data->beacon_timer.timer) &&
+ !hrtimer_is_queued(&data->beacon_timer) &&
info->enable_beacon) {
u64 tsf, until_tbtt;
u32 bcn_int;
@@ -1759,9 +1754,10 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
tsf = mac80211_hwsim_get_tsf(hw, vif);
bcn_int = data->beacon_int;
until_tbtt = bcn_int - do_div(tsf, bcn_int);
- tasklet_hrtimer_start(&data->beacon_timer,
- ns_to_ktime(until_tbtt * 1000),
- HRTIMER_MODE_REL);
+
+ hrtimer_start(&data->beacon_timer,
+ ns_to_ktime(until_tbtt * NSEC_PER_USEC),
+ HRTIMER_MODE_REL_SOFT);
} else if (!info->enable_beacon) {
unsigned int count = 0;
ieee80211_iterate_active_interfaces_atomic(
@@ -1770,7 +1766,7 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
wiphy_dbg(hw->wiphy, " beaconing vifs remaining: %u",
count);
if (count == 0) {
- tasklet_hrtimer_cancel(&data->beacon_timer);
+ hrtimer_cancel(&data->beacon_timer);
data->beacon_int = 0;
}
}
@@ -2644,7 +2640,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
enum nl80211_band band;
const struct ieee80211_ops *ops = &mac80211_hwsim_ops;
struct net *net;
- int idx;
+ int idx, i;
int n_limits = 0;
if (WARN_ON(param->channels > 1 && !param->use_chanctx))
@@ -2768,12 +2764,23 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
goto failed_hw;
}
+ data->if_combination.max_interfaces = 0;
+ for (i = 0; i < n_limits; i++)
+ data->if_combination.max_interfaces +=
+ data->if_limits[i].max;
+
data->if_combination.n_limits = n_limits;
- data->if_combination.max_interfaces = 2048;
data->if_combination.limits = data->if_limits;
- hw->wiphy->iface_combinations = &data->if_combination;
- hw->wiphy->n_iface_combinations = 1;
+ /*
+ * If we actually were asked to support combinations,
+ * advertise them - if there's only a single thing like
+ * only IBSS then don't advertise it as combinations.
+ */
+ if (data->if_combination.max_interfaces > 1) {
+ hw->wiphy->iface_combinations = &data->if_combination;
+ hw->wiphy->n_iface_combinations = 1;
+ }
if (param->ciphers) {
memcpy(data->ciphers, param->ciphers,
@@ -2922,9 +2929,9 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
- tasklet_hrtimer_init(&data->beacon_timer,
- mac80211_hwsim_beacon,
- CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ hrtimer_init(&data->beacon_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS_SOFT);
+ data->beacon_timer.function = mac80211_hwsim_beacon;
err = ieee80211_register_hw(hw);
if (err < 0) {
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index a85648342d15..d5a70340a945 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -181,7 +181,7 @@ static int mwifiex_sdio_resume(struct device *dev)
adapter = card->adapter;
- if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
+ if (!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
mwifiex_dbg(adapter, WARN,
"device already resumed\n");
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 6eedc0ec7661..76629b98c78d 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -130,6 +130,8 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
static void
mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
{
+ iowrite32(q->desc_dma, &q->regs->desc_base);
+ iowrite32(q->ndesc, &q->regs->ring_size);
q->head = ioread32(&q->regs->dma_idx);
q->tail = q->head;
iowrite32(q->head, &q->regs->cpu_idx);
@@ -180,7 +182,10 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
else
mt76_dma_sync_idx(dev, q);
- wake = wake && qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
+ wake = wake && q->stopped &&
+ qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
+ if (wake)
+ q->stopped = false;
if (!q->queued)
wake_up(&dev->tx_wait);
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index a033745adb2f..316167404729 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -679,19 +679,15 @@ out:
return ret;
}
-static void
-mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
- int idx = wcid->idx;
- int i;
+ int i, idx = wcid->idx;
rcu_assign_pointer(dev->wcid[idx], NULL);
synchronize_rcu();
- mutex_lock(&dev->mutex);
-
if (dev->drv->sta_remove)
dev->drv->sta_remove(dev, vif, sta);
@@ -699,7 +695,15 @@ mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
mt76_txq_remove(dev, sta->txq[i]);
mt76_wcid_free(dev->wcid_mask, idx);
+}
+EXPORT_SYMBOL_GPL(__mt76_sta_remove);
+static void
+mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ mutex_lock(&dev->mutex);
+ __mt76_sta_remove(dev, vif, sta);
mutex_unlock(&dev->mutex);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 5dfb0601f101..bcbfd3c4a44b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -126,6 +126,7 @@ struct mt76_queue {
int ndesc;
int queued;
int buf_size;
+ bool stopped;
u8 buf_offset;
u8 hw_idx;
@@ -143,6 +144,7 @@ struct mt76_mcu_ops {
const struct mt76_reg_pair *rp, int len);
int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
struct mt76_reg_pair *rp, int len);
+ int (*mcu_restart)(struct mt76_dev *dev);
};
struct mt76_queue_ops {
@@ -693,6 +695,8 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
enum ieee80211_sta_state old_state,
enum ieee80211_sta_state new_state);
+void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
index afcd86f735b4..4dcb465095d1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
@@ -135,8 +135,7 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
out:
mt76_queue_tx_cleanup(dev, MT_TXQ_BEACON, false);
- if (dev->mt76.q_tx[MT_TXQ_BEACON].queued >
- __sw_hweight8(dev->beacon_mask))
+ if (dev->mt76.q_tx[MT_TXQ_BEACON].queued > hweight8(dev->beacon_mask))
dev->beacon_check++;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
index d69e82c66ab2..b3ae0aaea62a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
@@ -27,12 +27,16 @@ static void
mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
{
__le32 *txd = (__le32 *)skb->data;
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_sta *sta;
struct mt7603_sta *msta;
struct mt76_wcid *wcid;
+ void *priv;
int idx;
u32 val;
+ u8 tid;
- if (skb->len < sizeof(MT_TXD_SIZE) + sizeof(struct ieee80211_hdr))
+ if (skb->len < MT_TXD_SIZE + sizeof(struct ieee80211_hdr))
goto free;
val = le32_to_cpu(txd[1]);
@@ -46,10 +50,19 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
if (!wcid)
goto free;
- msta = container_of(wcid, struct mt7603_sta, wcid);
+ priv = msta = container_of(wcid, struct mt7603_sta, wcid);
val = le32_to_cpu(txd[0]);
skb_set_queue_mapping(skb, FIELD_GET(MT_TXD0_Q_IDX, val));
+ val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX);
+ val |= FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_HW_QUEUE_MGMT);
+ txd[0] = cpu_to_le32(val);
+
+ sta = container_of(priv, struct ieee80211_sta, drv_priv);
+ hdr = (struct ieee80211_hdr *) &skb->data[MT_TXD_SIZE];
+ tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+ ieee80211_sta_set_buffered(sta, tid, true);
+
spin_lock_bh(&dev->ps_lock);
__skb_queue_tail(&msta->psq, skb);
if (skb_queue_len(&msta->psq) >= 64) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
index 15cc8f33b34d..3af45949e868 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
@@ -112,7 +112,7 @@ static void
mt7603_phy_init(struct mt7603_dev *dev)
{
int rx_chains = dev->mt76.antenna_mask;
- int tx_chains = __sw_hweight8(rx_chains) - 1;
+ int tx_chains = hweight8(rx_chains) - 1;
mt76_rmw(dev, MT_WF_RMAC_RMCR,
(MT_WF_RMAC_RMCR_SMPS_MODE |
@@ -510,6 +510,8 @@ int mt7603_register_device(struct mt7603_dev *dev)
bus_ops->rmw = mt7603_rmw;
dev->mt76.bus = bus_ops;
+ spin_lock_init(&dev->ps_lock);
+
INIT_DELAYED_WORK(&dev->mac_work, mt7603_mac_work);
tasklet_init(&dev->pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet,
(unsigned long)dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index 0a0115861b51..5abc02b57818 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -343,7 +343,7 @@ void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid)
MT_BA_CONTROL_1_RESET));
}
-void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn,
+void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid,
int ba_size)
{
u32 addr = mt7603_wtbl2_addr(wcid);
@@ -358,43 +358,6 @@ void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn,
mt76_clear(dev, addr + (15 * 4), tid_mask);
return;
}
- mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
-
- mt7603_mac_stop(dev);
- switch (tid) {
- case 0:
- mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID0_SN, ssn);
- break;
- case 1:
- mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID1_SN, ssn);
- break;
- case 2:
- mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID2_SN_LO,
- ssn);
- mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID2_SN_HI,
- ssn >> 8);
- break;
- case 3:
- mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID3_SN, ssn);
- break;
- case 4:
- mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID4_SN, ssn);
- break;
- case 5:
- mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID5_SN_LO,
- ssn);
- mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID5_SN_HI,
- ssn >> 4);
- break;
- case 6:
- mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID6_SN, ssn);
- break;
- case 7:
- mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID7_SN, ssn);
- break;
- }
- mt7603_wtbl_update(dev, wcid, MT_WTBL_UPDATE_WTBL2);
- mt7603_mac_start(dev);
for (i = 7; i > 0; i--) {
if (ba_size >= MT_AGG_SIZE_LIMIT(i))
@@ -827,6 +790,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_rate *rate = &info->control.rates[0];
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
struct ieee80211_vif *vif = info->control.vif;
struct mt7603_vif *mvif;
int wlan_idx;
@@ -834,6 +798,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
int tx_count = 8;
u8 frame_type, frame_subtype;
u16 fc = le16_to_cpu(hdr->frame_control);
+ u16 seqno = 0;
u8 vif_idx = 0;
u32 val;
u8 bw;
@@ -919,7 +884,17 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
tx_count = 0x1f;
val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count) |
- FIELD_PREP(MT_TXD3_SEQ, le16_to_cpu(hdr->seq_ctrl));
+ MT_TXD3_SN_VALID;
+
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ seqno = le16_to_cpu(hdr->seq_ctrl);
+ else if (ieee80211_is_back_req(hdr->frame_control))
+ seqno = le16_to_cpu(bar->start_seq_num);
+ else
+ val &= ~MT_TXD3_SN_VALID;
+
+ val |= FIELD_PREP(MT_TXD3_SEQ, seqno >> 4);
+
txwi[3] = cpu_to_le32(val);
if (key) {
@@ -1072,7 +1047,7 @@ out:
case MT_PHY_TYPE_HT:
final_rate_flags |= IEEE80211_TX_RC_MCS;
final_rate &= GENMASK(5, 0);
- if (i > 15)
+ if (final_rate > 15)
return false;
break;
default:
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index b10775ed92e6..a3c4ef198bfe 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -5,6 +5,7 @@
#include <linux/pci.h>
#include <linux/module.h>
#include "mt7603.h"
+#include "mac.h"
#include "eeprom.h"
static int
@@ -371,7 +372,7 @@ mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
struct sk_buff_head list;
- mt76_stop_tx_queues(&dev->mt76, sta, false);
+ mt76_stop_tx_queues(&dev->mt76, sta, true);
mt7603_wtbl_set_ps(dev, msta, ps);
if (ps)
return;
@@ -386,6 +387,15 @@ mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
}
static void
+mt7603_ps_set_more_data(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+
+ hdr = (struct ieee80211_hdr *) &skb->data[MT_TXD_SIZE];
+ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+}
+
+static void
mt7603_release_buffered_frames(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
u16 tids, int nframes,
@@ -399,6 +409,8 @@ mt7603_release_buffered_frames(struct ieee80211_hw *hw,
__skb_queue_head_init(&list);
+ mt7603_wtbl_set_ps(dev, msta, false);
+
spin_lock_bh(&dev->ps_lock);
skb_queue_walk_safe(&msta->psq, skb, tmp) {
if (!nframes)
@@ -409,11 +421,15 @@ mt7603_release_buffered_frames(struct ieee80211_hw *hw,
skb_set_queue_mapping(skb, MT_TXQ_PSD);
__skb_unlink(skb, &msta->psq);
+ mt7603_ps_set_more_data(skb);
__skb_queue_tail(&list, skb);
nframes--;
}
spin_unlock_bh(&dev->ps_lock);
+ if (!skb_queue_empty(&list))
+ ieee80211_sta_eosp(sta);
+
mt7603_ps_tx_list(dev, &list);
if (nframes)
@@ -568,13 +584,13 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
case IEEE80211_AMPDU_TX_OPERATIONAL:
mtxq->aggr = true;
mtxq->send_bar = false;
- mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, ba_size);
+ mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, ba_size);
break;
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
mtxq->aggr = false;
ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
- mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1);
+ mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1);
break;
case IEEE80211_AMPDU_TX_START:
mtxq->agg_ssn = *ssn << 4;
@@ -582,7 +598,7 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
mtxq->aggr = false;
- mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1);
+ mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
index 4b0713f1fd5e..d06905ea8cc6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
@@ -433,7 +433,7 @@ int mt7603_mcu_set_channel(struct mt7603_dev *dev)
{
struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
struct ieee80211_hw *hw = mt76_hw(dev);
- int n_chains = __sw_hweight8(dev->mt76.antenna_mask);
+ int n_chains = hweight8(dev->mt76.antenna_mask);
struct {
u8 control_chan;
u8 center_chan;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
index 79f332429432..6049f3b7c8fe 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
@@ -200,7 +200,7 @@ void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval);
int mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb);
void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data);
void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid);
-void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn,
+void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid,
int ba_size);
void mt7603_pse_client_reset(struct mt7603_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
index e13fea80d970..b920be1f5718 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
@@ -23,9 +23,9 @@ mt76_wmac_probe(struct platform_device *pdev)
}
mem_base = devm_ioremap_resource(&pdev->dev, res);
- if (!mem_base) {
+ if (IS_ERR(mem_base)) {
dev_err(&pdev->dev, "Failed to get memory resource\n");
- return -EINVAL;
+ return PTR_ERR(mem_base);
}
mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7603_ops,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
index 0290ba5869a5..736f81752b5b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
@@ -46,7 +46,7 @@ static const struct mt76_reg_pair common_mac_reg_table[] = {
{ MT_MM20_PROT_CFG, 0x01742004 },
{ MT_MM40_PROT_CFG, 0x03f42084 },
{ MT_TXOP_CTRL_CFG, 0x0000583f },
- { MT_TX_RTS_CFG, 0x00092b20 },
+ { MT_TX_RTS_CFG, 0x00ffff20 },
{ MT_EXP_ACK_TIME, 0x002400ca },
{ MT_TXOP_HLDR_ET, 0x00000002 },
{ MT_XIFS_TIME_CFG, 0x33a41010 },
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 91718647da02..e5a06f74a6f7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -229,7 +229,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
struct mt76x02_dev *dev;
struct mt76_dev *mdev;
- u32 asic_rev, mac_rev;
+ u32 mac_rev;
int ret;
mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), &mt76x0u_ops,
@@ -262,10 +262,14 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
goto err;
}
- asic_rev = mt76_rr(dev, MT_ASIC_VERSION);
+ mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
mac_rev = mt76_rr(dev, MT_MAC_CSR0);
dev_info(mdev->dev, "ASIC revision: %08x MAC revision: %08x\n",
- asic_rev, mac_rev);
+ mdev->rev, mac_rev);
+ if (!is_mt76x0(dev)) {
+ ret = -ENODEV;
+ goto err;
+ }
/* Note: vendor driver skips this check for MT76X0U */
if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
index 6915cce5def9..07061eb4d1e1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -51,6 +51,7 @@ struct mt76x02_calibration {
u16 false_cca;
s8 avg_rssi_all;
s8 agc_gain_adjust;
+ s8 agc_lowest_gain;
s8 low_gain;
s8 temp_vco;
@@ -114,8 +115,11 @@ struct mt76x02_dev {
struct mt76x02_dfs_pattern_detector dfs_pd;
/* edcca monitor */
+ unsigned long ed_trigger_timeout;
bool ed_tx_blocked;
bool ed_monitor;
+ u8 ed_monitor_enabled;
+ u8 ed_monitor_learning;
u8 ed_trigger;
u8 ed_silent;
ktime_t ed_time;
@@ -188,6 +192,13 @@ void mt76x02_mac_start(struct mt76x02_dev *dev);
void mt76x02_init_debugfs(struct mt76x02_dev *dev);
+static inline bool is_mt76x0(struct mt76x02_dev *dev)
+{
+ return mt76_chip(&dev->mt76) == 0x7610 ||
+ mt76_chip(&dev->mt76) == 0x7630 ||
+ mt76_chip(&dev->mt76) == 0x7650;
+}
+
static inline bool is_mt76x2(struct mt76x02_dev *dev)
{
return mt76_chip(&dev->mt76) == 0x7612 ||
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
index 7580c5c986ff..b1d6fd4861e3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
@@ -116,6 +116,32 @@ static int read_agc(struct seq_file *file, void *data)
return 0;
}
+static int
+mt76_edcca_set(void *data, u64 val)
+{
+ struct mt76x02_dev *dev = data;
+ enum nl80211_dfs_regions region = dev->dfs_pd.region;
+
+ dev->ed_monitor_enabled = !!val;
+ dev->ed_monitor = dev->ed_monitor_enabled &&
+ region == NL80211_DFS_ETSI;
+ mt76x02_edcca_init(dev, true);
+
+ return 0;
+}
+
+static int
+mt76_edcca_get(void *data, u64 *val)
+{
+ struct mt76x02_dev *dev = data;
+
+ *val = dev->ed_monitor_enabled;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_edcca, mt76_edcca_get, mt76_edcca_set,
+ "%lld\n");
+
void mt76x02_init_debugfs(struct mt76x02_dev *dev)
{
struct dentry *dir;
@@ -127,6 +153,7 @@ void mt76x02_init_debugfs(struct mt76x02_dev *dev)
debugfs_create_u8("temperature", 0400, dir, &dev->cal.temp);
debugfs_create_bool("tpc", 0600, dir, &dev->enable_tpc);
+ debugfs_create_file("edcca", 0400, dir, dev, &fops_edcca);
debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat);
debugfs_create_file("dfs_stats", 0400, dir, dev, &fops_dfs_stat);
debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
index e4649103efd4..17d12d212d1b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
@@ -885,7 +885,8 @@ mt76x02_dfs_set_domain(struct mt76x02_dev *dev,
if (dfs_pd->region != region) {
tasklet_disable(&dfs_pd->dfs_tasklet);
- dev->ed_monitor = region == NL80211_DFS_ETSI;
+ dev->ed_monitor = dev->ed_monitor_enabled &&
+ region == NL80211_DFS_ETSI;
mt76x02_edcca_init(dev, true);
dfs_pd->region = region;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index 91ff6598eccf..4fe5a83ca5a4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -67,12 +67,39 @@ int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
}
EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup);
+void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key)
+{
+ enum mt76x02_cipher_type cipher;
+ u8 key_data[32];
+ u32 iv, eiv;
+ u64 pn;
+
+ cipher = mt76x02_mac_get_key_info(key, key_data);
+ iv = mt76_rr(dev, MT_WCID_IV(idx));
+ eiv = mt76_rr(dev, MT_WCID_IV(idx) + 4);
+
+ pn = (u64)eiv << 16;
+ if (cipher == MT_CIPHER_TKIP) {
+ pn |= (iv >> 16) & 0xff;
+ pn |= (iv & 0xff) << 8;
+ } else if (cipher >= MT_CIPHER_AES_CCMP) {
+ pn |= iv & 0xffff;
+ } else {
+ return;
+ }
+
+ atomic64_set(&key->tx_pn, pn);
+}
+
+
int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
struct ieee80211_key_conf *key)
{
enum mt76x02_cipher_type cipher;
u8 key_data[32];
u8 iv_data[8];
+ u64 pn;
cipher = mt76x02_mac_get_key_info(key, key_data);
if (cipher == MT_CIPHER_NONE && key)
@@ -85,9 +112,22 @@ int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
if (key) {
mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
!!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
+
+ pn = atomic64_read(&key->tx_pn);
+
iv_data[3] = key->keyidx << 6;
- if (cipher >= MT_CIPHER_TKIP)
+ if (cipher >= MT_CIPHER_TKIP) {
iv_data[3] |= 0x20;
+ put_unaligned_le32(pn >> 16, &iv_data[4]);
+ }
+
+ if (cipher == MT_CIPHER_TKIP) {
+ iv_data[0] = (pn >> 8) & 0xff;
+ iv_data[1] = (iv_data[0] | 0x20) & 0x7f;
+ iv_data[2] = pn & 0xff;
+ } else if (cipher >= MT_CIPHER_AES_CCMP) {
+ put_unaligned_le16((pn & 0xffff), &iv_data[0]);
+ }
}
mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
@@ -426,7 +466,6 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
return;
rcu_read_lock();
- mt76_tx_status_lock(mdev, &list);
if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid))
wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]);
@@ -439,6 +478,8 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
drv_priv);
}
+ mt76_tx_status_lock(mdev, &list);
+
if (wcid) {
if (stat->pktid >= MT_PACKET_ID_FIRST)
status.skb = mt76_tx_status_skb_get(mdev, wcid,
@@ -458,7 +499,9 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
if (*update == 0 && stat_val == stat_cache &&
stat->wcid == msta->status.wcid && msta->n_frames < 32) {
msta->n_frames++;
- goto out;
+ mt76_tx_status_unlock(mdev, &list);
+ rcu_read_unlock();
+ return;
}
mt76x02_mac_fill_tx_status(dev, status.info, &msta->status,
@@ -474,11 +517,10 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
if (status.skb)
mt76_tx_status_skb_done(mdev, status.skb, &list);
- else
- ieee80211_tx_status_ext(mt76_hw(dev), &status);
-
-out:
mt76_tx_status_unlock(mdev, &list);
+
+ if (!status.skb)
+ ieee80211_tx_status_ext(mt76_hw(dev), &status);
rcu_read_unlock();
}
@@ -920,6 +962,7 @@ void mt76x02_edcca_init(struct mt76x02_dev *dev, bool enable)
}
}
mt76x02_edcca_tx_enable(dev, true);
+ dev->ed_monitor_learning = true;
/* clear previous CCA timer value */
mt76_rr(dev, MT_ED_CCA_TIMER);
@@ -929,6 +972,10 @@ EXPORT_SYMBOL_GPL(mt76x02_edcca_init);
#define MT_EDCCA_TH 92
#define MT_EDCCA_BLOCK_TH 2
+#define MT_EDCCA_LEARN_TH 50
+#define MT_EDCCA_LEARN_CCA 180
+#define MT_EDCCA_LEARN_TIMEOUT (20 * HZ)
+
static void mt76x02_edcca_check(struct mt76x02_dev *dev)
{
ktime_t cur_time;
@@ -951,11 +998,23 @@ static void mt76x02_edcca_check(struct mt76x02_dev *dev)
dev->ed_trigger = 0;
}
- if (dev->ed_trigger > MT_EDCCA_BLOCK_TH &&
- !dev->ed_tx_blocked)
+ if (dev->cal.agc_lowest_gain &&
+ dev->cal.false_cca > MT_EDCCA_LEARN_CCA &&
+ dev->ed_trigger > MT_EDCCA_LEARN_TH) {
+ dev->ed_monitor_learning = false;
+ dev->ed_trigger_timeout = jiffies + 20 * HZ;
+ } else if (!dev->ed_monitor_learning &&
+ time_is_after_jiffies(dev->ed_trigger_timeout)) {
+ dev->ed_monitor_learning = true;
+ mt76x02_edcca_tx_enable(dev, true);
+ }
+
+ if (dev->ed_monitor_learning)
+ return;
+
+ if (dev->ed_trigger > MT_EDCCA_BLOCK_TH && !dev->ed_tx_blocked)
mt76x02_edcca_tx_enable(dev, false);
- else if (dev->ed_silent > MT_EDCCA_BLOCK_TH &&
- dev->ed_tx_blocked)
+ else if (dev->ed_silent > MT_EDCCA_BLOCK_TH && dev->ed_tx_blocked)
mt76x02_edcca_tx_enable(dev, true);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
index 6b1f25d2f64c..caeeef96c42f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
@@ -177,6 +177,8 @@ int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
u8 key_idx, struct ieee80211_key_conf *key);
int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
struct ieee80211_key_conf *key);
+void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key);
void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx, u8 vif_idx,
u8 *mac);
void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index 1229f19f2b02..daaed1220147 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -19,6 +19,7 @@
#include <linux/irq.h>
#include "mt76x02.h"
+#include "mt76x02_mcu.h"
#include "mt76x02_trace.h"
struct beacon_bc_data {
@@ -418,9 +419,66 @@ static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
return i < 4;
}
+static void mt76x02_key_sync(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key, void *data)
+{
+ struct mt76x02_dev *dev = hw->priv;
+ struct mt76_wcid *wcid;
+
+ if (!sta)
+ return;
+
+ wcid = (struct mt76_wcid *) sta->drv_priv;
+
+ if (wcid->hw_key_idx != key->keyidx || wcid->sw_iv)
+ return;
+
+ mt76x02_mac_wcid_sync_pn(dev, wcid->idx, key);
+}
+
+static void mt76x02_reset_state(struct mt76x02_dev *dev)
+{
+ int i;
+
+ lockdep_assert_held(&dev->mt76.mutex);
+
+ clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+
+ rcu_read_lock();
+ ieee80211_iter_keys_rcu(dev->mt76.hw, NULL, mt76x02_key_sync, NULL);
+ rcu_read_unlock();
+
+ for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid); i++) {
+ struct ieee80211_sta *sta;
+ struct ieee80211_vif *vif;
+ struct mt76x02_sta *msta;
+ struct mt76_wcid *wcid;
+ void *priv;
+
+ wcid = rcu_dereference_protected(dev->mt76.wcid[i],
+ lockdep_is_held(&dev->mt76.mutex));
+ if (!wcid)
+ continue;
+
+ priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
+ sta = container_of(priv, struct ieee80211_sta, drv_priv);
+
+ priv = msta->vif;
+ vif = container_of(priv, struct ieee80211_vif, drv_priv);
+
+ __mt76_sta_remove(&dev->mt76, vif, sta);
+ memset(msta, 0, sizeof(*msta));
+ }
+
+ dev->vif_mask = 0;
+ dev->beacon_mask = 0;
+}
+
static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
{
u32 mask = dev->mt76.mmio.irqmask;
+ bool restart = dev->mt76.mcu_ops->mcu_restart;
int i;
ieee80211_stop_queues(dev->mt76.hw);
@@ -434,6 +492,9 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
mutex_lock(&dev->mt76.mutex);
+ if (restart)
+ mt76x02_reset_state(dev);
+
if (dev->beacon_mask)
mt76_clear(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_BEACON_TX |
@@ -452,20 +513,21 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
/* let fw reset DMA */
mt76_set(dev, 0x734, 0x3);
+ if (restart)
+ dev->mt76.mcu_ops->mcu_restart(&dev->mt76);
+
for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++)
mt76_queue_tx_cleanup(dev, i, true);
for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
mt76_queue_rx_reset(dev, i);
- mt76_wr(dev, MT_MAC_SYS_CTRL,
- MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
- mt76_set(dev, MT_WPDMA_GLO_CFG,
- MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN);
+ mt76x02_mac_start(dev);
+
if (dev->ed_monitor)
mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
- if (dev->beacon_mask)
+ if (dev->beacon_mask && !restart)
mt76_set(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_BEACON_TX |
MT_BEACON_TIME_CFG_TBTT_EN);
@@ -486,9 +548,13 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
napi_schedule(&dev->mt76.napi[i]);
}
- ieee80211_wake_queues(dev->mt76.hw);
-
- mt76_txq_schedule_all(&dev->mt76);
+ if (restart) {
+ mt76x02_mcu_function_select(dev, Q_SELECT, 1);
+ ieee80211_restart_hw(dev->mt76.hw);
+ } else {
+ ieee80211_wake_queues(dev->mt76.hw);
+ mt76_txq_schedule_all(&dev->mt76);
+ }
}
static void mt76x02_check_tx_hang(struct mt76x02_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
index a020c757ba5c..a54b63a96eae 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
@@ -194,6 +194,8 @@ bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev)
ret = true;
}
+ dev->cal.agc_lowest_gain = dev->cal.agc_gain_adjust >= limit;
+
return ret;
}
EXPORT_SYMBOL_GPL(mt76x02_phy_adjust_vga_gain);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index 43f07461c8d3..6fb52b596d42 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -85,8 +85,9 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
mt76x02_insert_hdr_pad(skb);
- txwi = skb_push(skb, sizeof(struct mt76x02_txwi));
+ txwi = (struct mt76x02_txwi *)(skb->data - sizeof(struct mt76x02_txwi));
mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
+ skb_push(skb, sizeof(struct mt76x02_txwi));
pid = mt76_tx_status_skb_add(mdev, wcid, skb);
txwi->pktid = pid;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index a48c261b0c63..cd072ac614f7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -237,6 +237,8 @@ int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
int idx = 0;
+ memset(msta, 0, sizeof(*msta));
+
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, ARRAY_SIZE(dev->mt76.wcid));
if (idx < 0)
return -ENOSPC;
@@ -274,6 +276,8 @@ mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
struct mt76_txq *mtxq;
+ memset(mvif, 0, sizeof(*mvif));
+
mvif->idx = idx;
mvif->group_wcid.idx = MT_VIF_WCID(idx);
mvif->group_wcid.hw_key_idx = -1;
@@ -289,6 +293,12 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
struct mt76x02_dev *dev = hw->priv;
unsigned int idx = 0;
+ /* Allow to change address in HW if we create first interface. */
+ if (!dev->vif_mask &&
+ (((vif->addr[0] ^ dev->mt76.macaddr[0]) & ~GENMASK(4, 1)) ||
+ memcmp(vif->addr + 1, dev->mt76.macaddr + 1, ETH_ALEN - 1)))
+ mt76x02_mac_setaddr(dev, vif->addr);
+
if (vif->addr[0] & BIT(1))
idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7);
@@ -311,10 +321,6 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
if (dev->vif_mask & BIT(idx))
return -EBUSY;
- /* Allow to change address in HW if we create first interface. */
- if (!dev->vif_mask && !ether_addr_equal(dev->mt76.macaddr, vif->addr))
- mt76x02_mac_setaddr(dev, vif->addr);
-
dev->vif_mask |= BIT(idx);
mt76x02_vif_init(dev, vif, idx);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
index f8534362e2c8..a30ef2c5a9db 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
@@ -106,7 +106,7 @@ void mt76_write_mac_initvals(struct mt76x02_dev *dev)
{ MT_TX_SW_CFG1, 0x00010000 },
{ MT_TX_SW_CFG2, 0x00000000 },
{ MT_TXOP_CTRL_CFG, 0x0400583f },
- { MT_TX_RTS_CFG, 0x00100020 },
+ { MT_TX_RTS_CFG, 0x00ffff20 },
{ MT_TX_TIMEOUT_CFG, 0x000a2290 },
{ MT_TX_RETRY_CFG, 0x47f01f0f },
{ MT_EXP_ACK_TIME, 0x002c00dc },
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
index 6c619f1c65c9..d7abe3d73bad 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
@@ -71,6 +71,7 @@ int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level,
void mt76x2_cleanup(struct mt76x02_dev *dev);
+int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard);
void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable);
void mt76x2_init_txpower(struct mt76x02_dev *dev,
struct ieee80211_supported_band *sband);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
index 984d9c4c2e1a..d3927a13e92e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
@@ -77,7 +77,7 @@ mt76x2_fixup_xtal(struct mt76x02_dev *dev)
}
}
-static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
+int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
{
const u8 *macaddr = dev->mt76.macaddr;
u32 val;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
index 03e24ae7f66c..605dc66ae83b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
@@ -165,9 +165,30 @@ error:
return -ENOENT;
}
+static int
+mt76pci_mcu_restart(struct mt76_dev *mdev)
+{
+ struct mt76x02_dev *dev;
+ int ret;
+
+ dev = container_of(mdev, struct mt76x02_dev, mt76);
+
+ mt76x02_mcu_cleanup(dev);
+ mt76x2_mac_reset(dev, true);
+
+ ret = mt76pci_load_firmware(dev);
+ if (ret)
+ return ret;
+
+ mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
+
+ return 0;
+}
+
int mt76x2_mcu_init(struct mt76x02_dev *dev)
{
static const struct mt76_mcu_ops mt76x2_mcu_ops = {
+ .mcu_restart = mt76pci_mcu_restart,
.mcu_send_msg = mt76x02_mcu_msg_send,
};
int ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
index 1848e8ab2e21..769a9b972044 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
@@ -260,10 +260,15 @@ mt76x2_phy_set_gain_val(struct mt76x02_dev *dev)
gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust;
- if (dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
+ val = 0x1836 << 16;
+ if (!mt76x2_has_ext_lna(dev) &&
+ dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
val = 0x1e42 << 16;
- else
- val = 0x1836 << 16;
+
+ if (mt76x2_has_ext_lna(dev) &&
+ dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ &&
+ dev->mt76.chandef.width < NL80211_CHAN_WIDTH_40)
+ val = 0x0f36 << 16;
val |= 0xf8;
@@ -280,6 +285,7 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
{
u8 *gain = dev->cal.agc_gain_init;
u8 low_gain_delta, gain_delta;
+ u32 agc_35, agc_37;
bool gain_change;
int low_gain;
u32 val;
@@ -318,6 +324,16 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
else
low_gain_delta = 14;
+ agc_37 = 0x2121262c;
+ if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ)
+ agc_35 = 0x11111516;
+ else if (low_gain == 2)
+ agc_35 = agc_37 = 0x08080808;
+ else if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
+ agc_35 = 0x10101014;
+ else
+ agc_35 = 0x11111116;
+
if (low_gain == 2) {
mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990);
mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808);
@@ -326,15 +342,13 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
dev->cal.agc_gain_adjust = 0;
} else {
mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991);
- if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
- mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014);
- else
- mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116);
- mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C);
gain_delta = 0;
dev->cal.agc_gain_adjust = low_gain_delta;
}
+ mt76_wr(dev, MT_BBP(AGC, 35), agc_35);
+ mt76_wr(dev, MT_BBP(AGC, 37), agc_37);
+
dev->cal.agc_gain_cur[0] = gain[0] - gain_delta;
dev->cal.agc_gain_cur[1] = gain[1] - gain_delta;
mt76x2_phy_set_gain_val(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index ddb6b2c48e01..ac0f13d46299 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -21,11 +21,10 @@
#include "mt76x2u.h"
static const struct usb_device_id mt76x2u_device_table[] = {
- { USB_DEVICE(0x0e8d, 0x7612) }, /* Alfa AWUS036ACM */
{ USB_DEVICE(0x0b05, 0x1833) }, /* Asus USB-AC54 */
{ USB_DEVICE(0x0b05, 0x17eb) }, /* Asus USB-AC55 */
{ USB_DEVICE(0x0b05, 0x180b) }, /* Asus USB-N53 B1 */
- { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USB-AC1200 */
+ { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USBAC1200 - Alfa AWUS036ACM */
{ USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */
{ USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */
{ USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */
@@ -66,6 +65,10 @@ static int mt76x2u_probe(struct usb_interface *intf,
mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev);
+ if (!is_mt76x2(dev)) {
+ err = -ENODEV;
+ goto err;
+ }
err = mt76x2u_register_device(dev);
if (err < 0)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
index 5e84b4535cb1..3b82345756ea 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
@@ -93,7 +93,6 @@ int mt76x2u_mac_reset(struct mt76x02_dev *dev)
mt76_wr(dev, MT_TX_LINK_CFG, 0x1020);
mt76_wr(dev, MT_AUTO_RSP_CFG, 0x13);
mt76_wr(dev, MT_MAX_LEN_CFG, 0x2f00);
- mt76_wr(dev, MT_TX_RTS_CFG, 0x92b20);
mt76_wr(dev, MT_WMM_AIFSN, 0x2273);
mt76_wr(dev, MT_WMM_CWMIN, 0x2344);
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 5a349fe3e576..2585df512335 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -289,8 +289,11 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
dev->queue_ops->kick(dev, q);
- if (q->queued > q->ndesc - 8)
+ if (q->queued > q->ndesc - 8 && !q->stopped) {
ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
+ q->stopped = true;
+ }
+
spin_unlock_bh(&q->lock);
}
EXPORT_SYMBOL_GPL(mt76_tx);
@@ -374,7 +377,10 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
if (last_skb) {
mt76_queue_ps_skb(dev, sta, last_skb, true);
dev->queue_ops->kick(dev, hwq);
+ } else {
+ ieee80211_sta_eosp(sta);
}
+
spin_unlock_bh(&hwq->lock);
}
EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
@@ -577,6 +583,9 @@ void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
struct mt76_queue *hwq = mtxq->hwq;
+ if (!test_bit(MT76_STATE_RUNNING, &dev->state))
+ return;
+
spin_lock_bh(&hwq->lock);
if (list_empty(&mtxq->list))
list_add_tail(&mtxq->list, &hwq->swq);
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index ae6ada370597..4c1abd492405 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -655,7 +655,11 @@ static void mt76u_tx_tasklet(unsigned long data)
spin_lock_bh(&q->lock);
}
mt76_txq_schedule(dev, q);
- wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
+
+ wake = q->stopped && q->queued < q->ndesc - 8;
+ if (wake)
+ q->stopped = false;
+
if (!q->queued)
wake_up(&dev->tx_wait);
diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.c b/drivers/net/wireless/mediatek/mt7601u/usb.c
index d8b7863f7926..6ae7f14dc9bf 100644
--- a/drivers/net/wireless/mediatek/mt7601u/usb.c
+++ b/drivers/net/wireless/mediatek/mt7601u/usb.c
@@ -303,6 +303,10 @@ static int mt7601u_probe(struct usb_interface *usb_intf,
mac_rev = mt7601u_rr(dev, MT_MAC_CSR0);
dev_info(dev->dev, "ASIC revision: %08x MAC revision: %08x\n",
asic_rev, mac_rev);
+ if ((asic_rev >> 16) != 0x7601) {
+ ret = -ENODEV;
+ goto err;
+ }
/* Note: vendor driver skips this check for MT7601U */
if (!(mt7601u_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index 4b1744e9fb78..50b92ca92bd7 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -673,7 +673,6 @@ enum rt2x00_state_flags {
CONFIG_CHANNEL_HT40,
CONFIG_POWERSAVING,
CONFIG_HT_DISABLED,
- CONFIG_QOS_DISABLED,
CONFIG_MONITORING,
/*
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index 2825560e2424..e8462f25d252 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -642,19 +642,9 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
rt2x00dev->intf_associated--;
rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated);
-
- clear_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
}
/*
- * Check for access point which do not support 802.11e . We have to
- * generate data frames sequence number in S/W for such AP, because
- * of H/W bug.
- */
- if (changes & BSS_CHANGED_QOS && !bss_conf->qos)
- set_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
-
- /*
* When the erp information has changed, we should perform
* additional configuration steps. For all other changes we are done.
*/
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
index 92ddc19e7bf7..4834b4eb0206 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
@@ -201,15 +201,18 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) {
/*
* rt2800 has a H/W (or F/W) bug, device incorrectly increase
- * seqno on retransmited data (non-QOS) frames. To workaround
- * the problem let's generate seqno in software if QOS is
- * disabled.
+ * seqno on retransmitted data (non-QOS) and management frames.
+ * To workaround the problem let's generate seqno in software.
+ * Except for beacons which are transmitted periodically by H/W
+ * hence hardware has to assign seqno for them.
*/
- if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags))
- __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
- else
+ if (ieee80211_is_beacon(hdr->frame_control)) {
+ __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
/* H/W will generate sequence number */
return;
+ }
+
+ __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
}
/*
diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c
index b7828fb252f2..b681073ae8ba 100644
--- a/drivers/nfc/s3fwrn5/firmware.c
+++ b/drivers/nfc/s3fwrn5/firmware.c
@@ -449,7 +449,6 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
SHASH_DESC_ON_STACK(desc, tfm);
desc->tfm = tfm;
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
ret = crypto_shash_digest(desc, fw->image, image_size,
hash_data);
diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c
index 2b26f762fbc3..01acb6e53365 100644
--- a/drivers/nfc/st95hf/core.c
+++ b/drivers/nfc/st95hf/core.c
@@ -1074,6 +1074,12 @@ static const struct spi_device_id st95hf_id[] = {
};
MODULE_DEVICE_TABLE(spi, st95hf_id);
+static const struct of_device_id st95hf_spi_of_match[] = {
+ { .compatible = "st,st95hf" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, st95hf_spi_of_match);
+
static int st95hf_probe(struct spi_device *nfc_spi_dev)
{
int ret;
@@ -1260,6 +1266,7 @@ static struct spi_driver st95hf_driver = {
.driver = {
.name = "st95hf",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(st95hf_spi_of_match),
},
.id_table = st95hf_id,
.probe = st95hf_probe,
diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c
index 1dede87dd54f..dcf234680535 100644
--- a/drivers/ntb/hw/idt/ntb_hw_idt.c
+++ b/drivers/ntb/hw/idt/ntb_hw_idt.c
@@ -358,8 +358,6 @@ static void idt_sw_write(struct idt_ntb_dev *ndev,
iowrite32((u32)reg, ndev->cfgspc + (ptrdiff_t)IDT_NT_GASAADDR);
/* Put the new value of the register */
iowrite32(data, ndev->cfgspc + (ptrdiff_t)IDT_NT_GASADATA);
- /* Make sure the PCIe transactions are executed */
- mmiowb();
/* Unlock GASA registers operations */
spin_unlock_irqrestore(&ndev->gasa_lock, irqflags);
}
@@ -750,7 +748,6 @@ static void idt_ntb_local_link_enable(struct idt_ntb_dev *ndev)
spin_lock_irqsave(&ndev->mtbl_lock, irqflags);
idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->part);
idt_nt_write(ndev, IDT_NT_NTMTBLDATA, mtbldata);
- mmiowb();
spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags);
/* Notify the peers by setting and clearing the global signal bit */
@@ -778,7 +775,6 @@ static void idt_ntb_local_link_disable(struct idt_ntb_dev *ndev)
spin_lock_irqsave(&ndev->mtbl_lock, irqflags);
idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->part);
idt_nt_write(ndev, IDT_NT_NTMTBLDATA, 0);
- mmiowb();
spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags);
/* Notify the peers by setting and clearing the global signal bit */
@@ -1339,7 +1335,6 @@ static int idt_ntb_peer_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
idt_nt_write(ndev, IDT_NT_LUTLDATA, (u32)addr);
idt_nt_write(ndev, IDT_NT_LUTMDATA, (u32)(addr >> 32));
idt_nt_write(ndev, IDT_NT_LUTUDATA, data);
- mmiowb();
spin_unlock_irqrestore(&ndev->lut_lock, irqflags);
/* Limit address isn't specified since size is fixed for LUT */
}
@@ -1393,7 +1388,6 @@ static int idt_ntb_peer_mw_clear_trans(struct ntb_dev *ntb, int pidx,
idt_nt_write(ndev, IDT_NT_LUTLDATA, 0);
idt_nt_write(ndev, IDT_NT_LUTMDATA, 0);
idt_nt_write(ndev, IDT_NT_LUTUDATA, 0);
- mmiowb();
spin_unlock_irqrestore(&ndev->lut_lock, irqflags);
}
@@ -1812,7 +1806,6 @@ static int idt_ntb_peer_msg_write(struct ntb_dev *ntb, int pidx, int midx,
/* Set the route and send the data */
idt_sw_write(ndev, partdata_tbl[ndev->part].msgctl[midx], swpmsgctl);
idt_nt_write(ndev, ntdata_tbl.msgs[midx].out, msg);
- mmiowb();
/* Unlock the messages routing table */
spin_unlock_irqrestore(&ndev->msg_locks[midx], irqflags);
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c
index 2ad263f708da..bb57ec239029 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen1.c
+++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c
@@ -180,7 +180,7 @@ int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
return ndev->reg->mw_bar[idx];
}
-static inline int ndev_db_addr(struct intel_ntb_dev *ndev,
+void ndev_db_addr(struct intel_ntb_dev *ndev,
phys_addr_t *db_addr, resource_size_t *db_size,
phys_addr_t reg_addr, unsigned long reg)
{
@@ -196,8 +196,6 @@ static inline int ndev_db_addr(struct intel_ntb_dev *ndev,
*db_size = ndev->reg->db_size;
dev_dbg(&ndev->ntb.pdev->dev, "Peer db size %llx\n", *db_size);
}
-
- return 0;
}
u64 ndev_db_read(struct intel_ntb_dev *ndev,
@@ -1111,13 +1109,28 @@ int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
ndev->self_reg->db_mask);
}
-int intel_ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
- resource_size_t *db_size)
+static int intel_ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
+ resource_size_t *db_size, u64 *db_data, int db_bit)
{
+ u64 db_bits;
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
- return ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr,
+ if (unlikely(db_bit >= BITS_PER_LONG_LONG))
+ return -EINVAL;
+
+ db_bits = BIT_ULL(db_bit);
+
+ if (unlikely(db_bits & ~ntb_ndev(ntb)->db_valid_mask))
+ return -EINVAL;
+
+ ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr,
ndev->peer_reg->db_bell);
+
+ if (db_data)
+ *db_data = db_bits;
+
+
+ return 0;
}
static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.h b/drivers/ntb/hw/intel/ntb_hw_gen1.h
index ad8ec1444436..544cf5c06f4d 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen1.h
+++ b/drivers/ntb/hw/intel/ntb_hw_gen1.h
@@ -147,6 +147,9 @@ extern struct intel_b2b_addr xeon_b2b_dsd_addr;
int ndev_init_isr(struct intel_ntb_dev *ndev, int msix_min, int msix_max,
int msix_shift, int total_shift);
enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd);
+void ndev_db_addr(struct intel_ntb_dev *ndev,
+ phys_addr_t *db_addr, resource_size_t *db_size,
+ phys_addr_t reg_addr, unsigned long reg);
u64 ndev_db_read(struct intel_ntb_dev *ndev, void __iomem *mmio);
int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits,
void __iomem *mmio);
@@ -166,8 +169,6 @@ int intel_ntb_db_vector_count(struct ntb_dev *ntb);
u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector);
int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits);
int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits);
-int intel_ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
- resource_size_t *db_size);
int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb);
int intel_ntb_spad_count(struct ntb_dev *ntb);
u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx);
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen3.c b/drivers/ntb/hw/intel/ntb_hw_gen3.c
index b3fa24778f94..f475b56a3f49 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen3.c
+++ b/drivers/ntb/hw/intel/ntb_hw_gen3.c
@@ -532,6 +532,37 @@ static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
return 0;
}
+int intel_ntb3_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
+ resource_size_t *db_size,
+ u64 *db_data, int db_bit)
+{
+ phys_addr_t db_addr_base;
+ struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+ if (unlikely(db_bit >= BITS_PER_LONG_LONG))
+ return -EINVAL;
+
+ if (unlikely(BIT_ULL(db_bit) & ~ntb_ndev(ntb)->db_valid_mask))
+ return -EINVAL;
+
+ ndev_db_addr(ndev, &db_addr_base, db_size, ndev->peer_addr,
+ ndev->peer_reg->db_bell);
+
+ if (db_addr) {
+ *db_addr = db_addr_base + (db_bit * 4);
+ dev_dbg(&ndev->ntb.pdev->dev, "Peer db addr %llx db bit %d\n",
+ *db_addr, db_bit);
+ }
+
+ if (db_data) {
+ *db_data = 1;
+ dev_dbg(&ndev->ntb.pdev->dev, "Peer db data %llx db bit %d\n",
+ *db_data, db_bit);
+ }
+
+ return 0;
+}
+
static int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
@@ -584,7 +615,7 @@ const struct ntb_dev_ops intel_ntb3_ops = {
.db_clear = intel_ntb3_db_clear,
.db_set_mask = intel_ntb_db_set_mask,
.db_clear_mask = intel_ntb_db_clear_mask,
- .peer_db_addr = intel_ntb_peer_db_addr,
+ .peer_db_addr = intel_ntb3_peer_db_addr,
.peer_db_set = intel_ntb3_peer_db_set,
.spad_is_unsafe = intel_ntb_spad_is_unsafe,
.spad_count = intel_ntb_spad_count,
diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
index f2df2d39c65b..d905d368d28c 100644
--- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
+++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
@@ -236,6 +236,7 @@ static void switchtec_ntb_mw_clr_direct(struct switchtec_ntb *sndev, int idx)
ctl_val &= ~NTB_CTRL_BAR_DIR_WIN_EN;
iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
iowrite32(0, &ctl->bar_entry[bar].win_size);
+ iowrite32(0, &ctl->bar_ext_entry[bar].win_size);
iowrite64(sndev->self_partition, &ctl->bar_entry[bar].xlate_addr);
}
@@ -258,7 +259,9 @@ static void switchtec_ntb_mw_set_direct(struct switchtec_ntb *sndev, int idx,
ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
- iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size);
+ iowrite32(xlate_pos | (lower_32_bits(size) & 0xFFFFF000),
+ &ctl->bar_entry[bar].win_size);
+ iowrite32(upper_32_bits(size), &ctl->bar_ext_entry[bar].win_size);
iowrite64(sndev->self_partition | addr,
&ctl->bar_entry[bar].xlate_addr);
}
@@ -679,11 +682,16 @@ static u64 switchtec_ntb_db_read_mask(struct ntb_dev *ntb)
static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb,
phys_addr_t *db_addr,
- resource_size_t *db_size)
+ resource_size_t *db_size,
+ u64 *db_data,
+ int db_bit)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
unsigned long offset;
+ if (unlikely(db_bit >= BITS_PER_LONG_LONG))
+ return -EINVAL;
+
offset = (unsigned long)sndev->mmio_peer_dbmsg->odb -
(unsigned long)sndev->stdev->mmio;
@@ -693,6 +701,8 @@ static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb,
*db_addr = pci_resource_start(ntb->pdev, 0) + offset;
if (db_size)
*db_size = sizeof(u32);
+ if (db_data)
+ *db_data = BIT_ULL(db_bit) << sndev->db_peer_shift;
return 0;
}
@@ -1025,7 +1035,9 @@ static int crosslink_setup_mws(struct switchtec_ntb *sndev, int ntb_lut_idx,
ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
- iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size);
+ iowrite32(xlate_pos | (lower_32_bits(size) & 0xFFFFF000),
+ &ctl->bar_entry[bar].win_size);
+ iowrite32(upper_32_bits(size), &ctl->bar_ext_entry[bar].win_size);
iowrite64(sndev->peer_partition | addr,
&ctl->bar_entry[bar].xlate_addr);
}
@@ -1092,7 +1104,7 @@ static int crosslink_enum_partition(struct switchtec_ntb *sndev,
dev_dbg(&sndev->stdev->dev,
"Crosslink BAR%d addr: %llx\n",
- i, bar_addr);
+ i*2, bar_addr);
if (bar_addr != bar_space * i)
continue;
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 3bfdb4562408..d4f39ba1d976 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -144,7 +144,9 @@ struct ntb_transport_qp {
struct list_head tx_free_q;
spinlock_t ntb_tx_free_q_lock;
void __iomem *tx_mw;
- dma_addr_t tx_mw_phys;
+ phys_addr_t tx_mw_phys;
+ size_t tx_mw_size;
+ dma_addr_t tx_mw_dma_addr;
unsigned int tx_index;
unsigned int tx_max_entry;
unsigned int tx_max_frame;
@@ -862,6 +864,9 @@ static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
if (!nt->link_is_up)
cancel_delayed_work_sync(&nt->link_work);
+ for (i = 0; i < nt->mw_count; i++)
+ ntb_free_mw(nt, i);
+
/* The scratchpad registers keep the values if the remote side
* goes down, blast them now to give them a sane value the next
* time they are accessed
@@ -1049,6 +1054,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
tx_size = (unsigned int)mw_size / num_qps_mw;
qp_offset = tx_size * (qp_num / mw_count);
+ qp->tx_mw_size = tx_size;
qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset;
if (!qp->tx_mw)
return -EINVAL;
@@ -1644,7 +1650,7 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
dma_cookie_t cookie;
device = chan->device;
- dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index;
+ dest = qp->tx_mw_dma_addr + qp->tx_max_frame * entry->tx_index;
buff_off = (size_t)buf & ~PAGE_MASK;
dest_off = (size_t)dest & ~PAGE_MASK;
@@ -1863,6 +1869,18 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
qp->rx_dma_chan = NULL;
}
+ if (qp->tx_dma_chan) {
+ qp->tx_mw_dma_addr =
+ dma_map_resource(qp->tx_dma_chan->device->dev,
+ qp->tx_mw_phys, qp->tx_mw_size,
+ DMA_FROM_DEVICE, 0);
+ if (dma_mapping_error(qp->tx_dma_chan->device->dev,
+ qp->tx_mw_dma_addr)) {
+ qp->tx_mw_dma_addr = 0;
+ goto err1;
+ }
+ }
+
dev_dbg(&pdev->dev, "Using %s memcpy for TX\n",
qp->tx_dma_chan ? "DMA" : "CPU");
@@ -1904,6 +1922,10 @@ err1:
qp->rx_alloc_entry = 0;
while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
kfree(entry);
+ if (qp->tx_mw_dma_addr)
+ dma_unmap_resource(qp->tx_dma_chan->device->dev,
+ qp->tx_mw_dma_addr, qp->tx_mw_size,
+ DMA_FROM_DEVICE, 0);
if (qp->tx_dma_chan)
dma_release_channel(qp->tx_dma_chan);
if (qp->rx_dma_chan)
@@ -1945,6 +1967,11 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
*/
dma_sync_wait(chan, qp->last_cookie);
dmaengine_terminate_all(chan);
+
+ dma_unmap_resource(chan->device->dev,
+ qp->tx_mw_dma_addr, qp->tx_mw_size,
+ DMA_FROM_DEVICE, 0);
+
dma_release_channel(chan);
}
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index 2a9d6b0d1f19..11a6cd374004 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -284,11 +284,9 @@ static int perf_spad_cmd_send(struct perf_peer *peer, enum perf_cmd cmd,
ntb_peer_spad_write(perf->ntb, peer->pidx,
PERF_SPAD_HDATA(perf->gidx),
upper_32_bits(data));
- mmiowb();
ntb_peer_spad_write(perf->ntb, peer->pidx,
PERF_SPAD_CMD(perf->gidx),
cmd);
- mmiowb();
ntb_peer_db_set(perf->ntb, PERF_SPAD_NOTIFY(peer->gidx));
dev_dbg(&perf->ntb->dev, "DB ring peer %#llx\n",
@@ -379,7 +377,6 @@ static int perf_msg_cmd_send(struct perf_peer *peer, enum perf_cmd cmd,
ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_HDATA,
upper_32_bits(data));
- mmiowb();
/* This call shall trigger peer message event */
ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_CMD, cmd);
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index b123b0dcf274..4671776f5623 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -541,9 +541,9 @@ static int arena_clear_freelist_error(struct arena_info *arena, u32 lane)
static int btt_freelist_init(struct arena_info *arena)
{
- int old, new, ret;
- u32 i, map_entry;
- struct log_entry log_new, log_old;
+ int new, ret;
+ struct log_entry log_new;
+ u32 i, map_entry, log_oldmap, log_newmap;
arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
GFP_KERNEL);
@@ -551,24 +551,26 @@ static int btt_freelist_init(struct arena_info *arena)
return -ENOMEM;
for (i = 0; i < arena->nfree; i++) {
- old = btt_log_read(arena, i, &log_old, LOG_OLD_ENT);
- if (old < 0)
- return old;
-
new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
if (new < 0)
return new;
+ /* old and new map entries with any flags stripped out */
+ log_oldmap = ent_lba(le32_to_cpu(log_new.old_map));
+ log_newmap = ent_lba(le32_to_cpu(log_new.new_map));
+
/* sub points to the next one to be overwritten */
arena->freelist[i].sub = 1 - new;
arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
- arena->freelist[i].block = le32_to_cpu(log_new.old_map);
+ arena->freelist[i].block = log_oldmap;
/*
* FIXME: if error clearing fails during init, we want to make
* the BTT read-only
*/
- if (ent_e_flag(log_new.old_map)) {
+ if (ent_e_flag(log_new.old_map) &&
+ !ent_normal(log_new.old_map)) {
+ arena->freelist[i].has_err = 1;
ret = arena_clear_freelist_error(arena, i);
if (ret)
dev_err_ratelimited(to_dev(arena),
@@ -576,7 +578,7 @@ static int btt_freelist_init(struct arena_info *arena)
}
/* This implies a newly created or untouched flog entry */
- if (log_new.old_map == log_new.new_map)
+ if (log_oldmap == log_newmap)
continue;
/* Check if map recovery is needed */
@@ -584,8 +586,15 @@ static int btt_freelist_init(struct arena_info *arena)
NULL, NULL, 0);
if (ret)
return ret;
- if ((le32_to_cpu(log_new.new_map) != map_entry) &&
- (le32_to_cpu(log_new.old_map) == map_entry)) {
+
+ /*
+ * The map_entry from btt_read_map is stripped of any flag bits,
+ * so use the stripped out versions from the log as well for
+ * testing whether recovery is needed. For restoration, use the
+ * 'raw' version of the log entries as that captured what we
+ * were going to write originally.
+ */
+ if ((log_newmap != map_entry) && (log_oldmap == map_entry)) {
/*
* Last transaction wrote the flog, but wasn't able
* to complete the map write. So fix up the map.
diff --git a/drivers/nvdimm/btt.h b/drivers/nvdimm/btt.h
index db3cb6d4d0d4..ddff49c707b0 100644
--- a/drivers/nvdimm/btt.h
+++ b/drivers/nvdimm/btt.h
@@ -44,6 +44,8 @@
#define ent_e_flag(ent) (!!(ent & MAP_ERR_MASK))
#define ent_z_flag(ent) (!!(ent & MAP_TRIM_MASK))
#define set_e_flag(ent) (ent |= MAP_ERR_MASK)
+/* 'normal' is both e and z flags set */
+#define ent_normal(ent) (ent_e_flag(ent) && ent_z_flag(ent))
enum btt_init_state {
INIT_UNCHECKED = 0,
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index 795ad4ff35ca..9486acc08402 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -159,11 +159,19 @@ static ssize_t size_show(struct device *dev,
}
static DEVICE_ATTR_RO(size);
+static ssize_t log_zero_flags_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Y\n");
+}
+static DEVICE_ATTR_RO(log_zero_flags);
+
static struct attribute *nd_btt_attributes[] = {
&dev_attr_sector_size.attr,
&dev_attr_namespace.attr,
&dev_attr_uuid.attr,
&dev_attr_size.attr,
+ &dev_attr_log_zero_flags.attr,
NULL,
};
@@ -190,14 +198,15 @@ static struct device *__nd_btt_create(struct nd_region *nd_region,
return NULL;
nd_btt->id = ida_simple_get(&nd_region->btt_ida, 0, 0, GFP_KERNEL);
- if (nd_btt->id < 0) {
- kfree(nd_btt);
- return NULL;
- }
+ if (nd_btt->id < 0)
+ goto out_nd_btt;
nd_btt->lbasize = lbasize;
- if (uuid)
+ if (uuid) {
uuid = kmemdup(uuid, 16, GFP_KERNEL);
+ if (!uuid)
+ goto out_put_id;
+ }
nd_btt->uuid = uuid;
dev = &nd_btt->dev;
dev_set_name(dev, "btt%d.%d", nd_region->id, nd_btt->id);
@@ -212,6 +221,13 @@ static struct device *__nd_btt_create(struct nd_region *nd_region,
return NULL;
}
return dev;
+
+out_put_id:
+ ida_simple_remove(&nd_region->btt_ida, nd_btt->id);
+
+out_nd_btt:
+ kfree(nd_btt);
+ return NULL;
}
struct device *nd_btt_create(struct nd_region *nd_region)
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 06f5087547ea..ecbab2d66e38 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -11,6 +11,7 @@
* General Public License for more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
#include <linux/device.h>
#include <linux/ndctl.h>
@@ -25,6 +26,10 @@
static DEFINE_IDA(dimm_ida);
+static bool noblk;
+module_param(noblk, bool, 0444);
+MODULE_PARM_DESC(noblk, "force disable BLK / local alias support");
+
/*
* Retrieve bus and dimm handle and return if this bus supports
* get_config_data commands
@@ -551,6 +556,8 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
nvdimm->dimm_id = dimm_id;
nvdimm->provider_data = provider_data;
+ if (noblk)
+ flags |= 1 << NDD_NOBLK;
nvdimm->flags = flags;
nvdimm->cmd_mask = cmd_mask;
nvdimm->num_flush = num_flush;
diff --git a/drivers/nvdimm/e820.c b/drivers/nvdimm/e820.c
index 521eaf53a52a..36be9b619187 100644
--- a/drivers/nvdimm/e820.c
+++ b/drivers/nvdimm/e820.c
@@ -47,6 +47,7 @@ static int e820_register_one(struct resource *res, void *data)
ndr_desc.res = res;
ndr_desc.attr_groups = e820_pmem_region_attribute_groups;
ndr_desc.numa_node = e820_range_to_nid(res->start);
+ ndr_desc.target_node = ndr_desc.numa_node;
set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
if (!nvdimm_pmem_region_create(nvdimm_bus, &ndr_desc))
return -ENXIO;
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index a11bf4e6b451..f3d753d3169c 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -392,6 +392,7 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
return 0; /* no label, nothing to reserve */
for_each_clear_bit_le(slot, free, nslot) {
+ struct nvdimm *nvdimm = to_nvdimm(ndd->dev);
struct nd_namespace_label *nd_label;
struct nd_region *nd_region = NULL;
u8 label_uuid[NSLABEL_UUID_LEN];
@@ -406,6 +407,8 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
flags = __le32_to_cpu(nd_label->flags);
+ if (test_bit(NDD_NOBLK, &nvdimm->flags))
+ flags &= ~NSLABEL_FLAG_LOCAL;
nd_label_gen_id(&label_id, label_uuid, flags);
res = nvdimm_allocate_dpa(ndd, &label_id,
__le64_to_cpu(nd_label->dpa),
@@ -755,7 +758,7 @@ static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
static int __pmem_label_update(struct nd_region *nd_region,
struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
- int pos)
+ int pos, unsigned long flags)
{
struct nd_namespace_common *ndns = &nspm->nsio.common;
struct nd_interleave_set *nd_set = nd_region->nd_set;
@@ -796,7 +799,7 @@ static int __pmem_label_update(struct nd_region *nd_region,
memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
if (nspm->alt_name)
memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN);
- nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_UPDATING);
+ nd_label->flags = __cpu_to_le32(flags);
nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
nd_label->position = __cpu_to_le16(pos);
nd_label->isetcookie = __cpu_to_le64(cookie);
@@ -1249,13 +1252,13 @@ static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
int nd_pmem_namespace_label_update(struct nd_region *nd_region,
struct nd_namespace_pmem *nspm, resource_size_t size)
{
- int i;
+ int i, rc;
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct resource *res;
- int rc, count = 0;
+ int count = 0;
if (size == 0) {
rc = del_labels(nd_mapping, nspm->uuid);
@@ -1273,7 +1276,20 @@ int nd_pmem_namespace_label_update(struct nd_region *nd_region,
if (rc < 0)
return rc;
- rc = __pmem_label_update(nd_region, nd_mapping, nspm, i);
+ rc = __pmem_label_update(nd_region, nd_mapping, nspm, i,
+ NSLABEL_FLAG_UPDATING);
+ if (rc)
+ return rc;
+ }
+
+ if (size == 0)
+ return 0;
+
+ /* Clear the UPDATING flag per UEFI 2.7 expectations */
+ for (i = 0; i < nd_region->ndr_mappings; i++) {
+ struct nd_mapping *nd_mapping = &nd_region->mapping[i];
+
+ rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0);
if (rc)
return rc;
}
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 4b077555ac70..f293556cbbf6 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -138,6 +138,7 @@ bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
bool pmem_should_map_pages(struct device *dev)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
+ struct nd_namespace_common *ndns = to_ndns(dev);
struct nd_namespace_io *nsio;
if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
@@ -149,6 +150,9 @@ bool pmem_should_map_pages(struct device *dev)
if (is_nd_pfn(dev) || is_nd_btt(dev))
return false;
+ if (ndns->force_raw)
+ return false;
+
nsio = to_nd_namespace_io(dev);
if (region_intersects(nsio->res.start, resource_size(&nsio->res),
IORESOURCE_SYSTEM_RAM,
@@ -1506,13 +1510,13 @@ static ssize_t __holder_class_store(struct device *dev, const char *buf)
if (dev->driver || ndns->claim)
return -EBUSY;
- if (strcmp(buf, "btt") == 0 || strcmp(buf, "btt\n") == 0)
+ if (sysfs_streq(buf, "btt"))
ndns->claim_class = btt_claim_class(dev);
- else if (strcmp(buf, "pfn") == 0 || strcmp(buf, "pfn\n") == 0)
+ else if (sysfs_streq(buf, "pfn"))
ndns->claim_class = NVDIMM_CCLASS_PFN;
- else if (strcmp(buf, "dax") == 0 || strcmp(buf, "dax\n") == 0)
+ else if (sysfs_streq(buf, "dax"))
ndns->claim_class = NVDIMM_CCLASS_DAX;
- else if (strcmp(buf, "") == 0 || strcmp(buf, "\n") == 0)
+ else if (sysfs_streq(buf, ""))
ndns->claim_class = NVDIMM_CCLASS_NONE;
else
return -EINVAL;
@@ -2245,9 +2249,12 @@ static struct device *create_namespace_blk(struct nd_region *nd_region,
if (!nsblk->uuid)
goto blk_err;
memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
- if (name[0])
+ if (name[0]) {
nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
GFP_KERNEL);
+ if (!nsblk->alt_name)
+ goto blk_err;
+ }
res = nsblk_add_resource(nd_region, ndd, nsblk,
__le64_to_cpu(nd_label->dpa));
if (!res)
@@ -2492,6 +2499,12 @@ static int init_active_labels(struct nd_region *nd_region)
if (!label_ent)
break;
label = nd_label_active(ndd, j);
+ if (test_bit(NDD_NOBLK, &nvdimm->flags)) {
+ u32 flags = __le32_to_cpu(label->flags);
+
+ flags &= ~NSLABEL_FLAG_LOCAL;
+ label->flags = __cpu_to_le32(flags);
+ }
label_ent->label = label;
mutex_lock(&nd_mapping->lock);
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 379bf4305e61..a5ac3b240293 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -153,7 +153,7 @@ struct nd_region {
u16 ndr_mappings;
u64 ndr_size;
u64 ndr_start;
- int id, num_lanes, ro, numa_node;
+ int id, num_lanes, ro, numa_node, target_node;
void *provider_data;
struct kernfs_node *bb_state;
struct badblocks bb;
diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c
index 0a701837dfc0..a0c8dcfa0bf9 100644
--- a/drivers/nvdimm/of_pmem.c
+++ b/drivers/nvdimm/of_pmem.c
@@ -68,6 +68,7 @@ static int of_pmem_region_probe(struct platform_device *pdev)
memset(&ndr_desc, 0, sizeof(ndr_desc));
ndr_desc.attr_groups = region_attr_groups;
ndr_desc.numa_node = dev_to_node(&pdev->dev);
+ ndr_desc.target_node = ndr_desc.numa_node;
ndr_desc.res = &pdev->resource[i];
ndr_desc.of_node = np;
set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
@@ -108,7 +109,6 @@ static struct platform_driver of_pmem_region_driver = {
.remove = of_pmem_region_remove,
.driver = {
.name = "of_pmem",
- .owner = THIS_MODULE,
.of_match_table = of_pmem_region_match,
},
};
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 6f22272e8d80..d271bd731af7 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -580,6 +580,11 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
}
EXPORT_SYMBOL(nd_pfn_probe);
+static u32 info_block_reserve(void)
+{
+ return ALIGN(SZ_8K, PAGE_SIZE);
+}
+
/*
* We hotplug memory at section granularity, pad the reserved area from
* the previous section base to the namespace base address.
@@ -593,7 +598,7 @@ static unsigned long init_altmap_base(resource_size_t base)
static unsigned long init_altmap_reserve(resource_size_t base)
{
- unsigned long reserve = PHYS_PFN(SZ_8K);
+ unsigned long reserve = info_block_reserve() >> PAGE_SHIFT;
unsigned long base_pfn = PHYS_PFN(base);
reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
@@ -608,6 +613,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
u64 offset = le64_to_cpu(pfn_sb->dataoff);
u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
+ u32 reserve = info_block_reserve();
struct nd_namespace_common *ndns = nd_pfn->ndns;
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
resource_size_t base = nsio->res.start + start_pad;
@@ -621,7 +627,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
res->end -= end_trunc;
if (nd_pfn->mode == PFN_MODE_RAM) {
- if (offset < SZ_8K)
+ if (offset < reserve)
return -EINVAL;
nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
pgmap->altmap_valid = false;
@@ -634,7 +640,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
le64_to_cpu(nd_pfn->pfn_sb->npfns),
nd_pfn->npfns);
memcpy(altmap, &__altmap, sizeof(*altmap));
- altmap->free = PHYS_PFN(offset - SZ_8K);
+ altmap->free = PHYS_PFN(offset - reserve);
altmap->alloc = 0;
pgmap->altmap_valid = true;
} else
@@ -678,18 +684,17 @@ static void trim_pfn_device(struct nd_pfn *nd_pfn, u32 *start_pad, u32 *end_trun
if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
IORES_DESC_NONE) == REGION_MIXED
|| !IS_ALIGNED(end, nd_pfn->align)
- || nd_region_conflict(nd_region, start, size + adjust))
+ || nd_region_conflict(nd_region, start, size))
*end_trunc = end - phys_pmem_align_down(nd_pfn, end);
}
static int nd_pfn_init(struct nd_pfn *nd_pfn)
{
- u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0;
struct nd_namespace_common *ndns = nd_pfn->ndns;
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+ u32 start_pad, end_trunc, reserve = info_block_reserve();
resource_size_t start, size;
struct nd_region *nd_region;
- u32 start_pad, end_trunc;
struct nd_pfn_sb *pfn_sb;
unsigned long npfns;
phys_addr_t offset;
@@ -734,7 +739,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
*/
start = nsio->res.start + start_pad;
size = resource_size(&nsio->res);
- npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K)
+ npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - reserve)
/ PAGE_SIZE);
if (nd_pfn->mode == PFN_MODE_PMEM) {
/*
@@ -742,11 +747,10 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
* when populating the vmemmap. This *should* be equal to
* PMD_SIZE for most architectures.
*/
- offset = ALIGN(start + SZ_8K + 64 * npfns + dax_label_reserve,
+ offset = ALIGN(start + reserve + 64 * npfns,
max(nd_pfn->align, PMD_SIZE)) - start;
} else if (nd_pfn->mode == PFN_MODE_RAM)
- offset = ALIGN(start + SZ_8K + dax_label_reserve,
- nd_pfn->align) - start;
+ offset = ALIGN(start + reserve, nd_pfn->align) - start;
else
return -ENXIO;
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index bc2f700feef8..0279eb1da3ef 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -113,13 +113,13 @@ static void write_pmem(void *pmem_addr, struct page *page,
while (len) {
mem = kmap_atomic(page);
- chunk = min_t(unsigned int, len, PAGE_SIZE);
+ chunk = min_t(unsigned int, len, PAGE_SIZE - off);
memcpy_flushcache(pmem_addr, mem + off, chunk);
kunmap_atomic(mem);
len -= chunk;
off = 0;
page++;
- pmem_addr += PAGE_SIZE;
+ pmem_addr += chunk;
}
}
@@ -132,7 +132,7 @@ static blk_status_t read_pmem(struct page *page, unsigned int off,
while (len) {
mem = kmap_atomic(page);
- chunk = min_t(unsigned int, len, PAGE_SIZE);
+ chunk = min_t(unsigned int, len, PAGE_SIZE - off);
rem = memcpy_mcsafe(mem + off, pmem_addr, chunk);
kunmap_atomic(mem);
if (rem)
@@ -140,7 +140,7 @@ static blk_status_t read_pmem(struct page *page, unsigned int off,
len -= chunk;
off = 0;
page++;
- pmem_addr += PAGE_SIZE;
+ pmem_addr += chunk;
}
return BLK_STS_OK;
}
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index e2818f94f292..b4ef7d9ff22e 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -1003,6 +1003,13 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
if (test_bit(NDD_UNARMED, &nvdimm->flags))
ro = 1;
+
+ if (test_bit(NDD_NOBLK, &nvdimm->flags)
+ && dev_type == &nd_blk_device_type) {
+ dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not BLK capable\n",
+ caller, dev_name(&nvdimm->dev), i);
+ return NULL;
+ }
}
if (dev_type == &nd_blk_device_type) {
@@ -1065,6 +1072,7 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
nd_region->flags = ndr_desc->flags;
nd_region->ro = ro;
nd_region->numa_node = ndr_desc->numa_node;
+ nd_region->target_node = ndr_desc->target_node;
ida_init(&nd_region->ns_ida);
ida_init(&nd_region->btt_ida);
ida_init(&nd_region->pfn_ida);
diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c
index f8bb746a549f..a570f2263a42 100644
--- a/drivers/nvdimm/security.c
+++ b/drivers/nvdimm/security.c
@@ -22,6 +22,8 @@ static bool key_revalidate = true;
module_param(key_revalidate, bool, 0444);
MODULE_PARM_DESC(key_revalidate, "Require key validation at init.");
+static const char zero_key[NVDIMM_PASSPHRASE_LEN];
+
static void *key_data(struct key *key)
{
struct encrypted_key_payload *epayload = dereference_key_locked(key);
@@ -75,6 +77,16 @@ static struct key *nvdimm_request_key(struct nvdimm *nvdimm)
return key;
}
+static const void *nvdimm_get_key_payload(struct nvdimm *nvdimm,
+ struct key **key)
+{
+ *key = nvdimm_request_key(nvdimm);
+ if (!*key)
+ return zero_key;
+
+ return key_data(*key);
+}
+
static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm,
key_serial_t id, int subclass)
{
@@ -105,36 +117,57 @@ static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm,
return key;
}
-static struct key *nvdimm_key_revalidate(struct nvdimm *nvdimm)
+static const void *nvdimm_get_user_key_payload(struct nvdimm *nvdimm,
+ key_serial_t id, int subclass, struct key **key)
+{
+ *key = NULL;
+ if (id == 0) {
+ if (subclass == NVDIMM_BASE_KEY)
+ return zero_key;
+ else
+ return NULL;
+ }
+
+ *key = nvdimm_lookup_user_key(nvdimm, id, subclass);
+ if (!*key)
+ return NULL;
+
+ return key_data(*key);
+}
+
+
+static int nvdimm_key_revalidate(struct nvdimm *nvdimm)
{
struct key *key;
int rc;
+ const void *data;
if (!nvdimm->sec.ops->change_key)
- return NULL;
+ return -EOPNOTSUPP;
- key = nvdimm_request_key(nvdimm);
- if (!key)
- return NULL;
+ data = nvdimm_get_key_payload(nvdimm, &key);
/*
* Send the same key to the hardware as new and old key to
* verify that the key is good.
*/
- rc = nvdimm->sec.ops->change_key(nvdimm, key_data(key),
- key_data(key), NVDIMM_USER);
+ rc = nvdimm->sec.ops->change_key(nvdimm, data, data, NVDIMM_USER);
if (rc < 0) {
nvdimm_put_key(key);
- key = NULL;
+ return rc;
}
- return key;
+
+ nvdimm_put_key(key);
+ nvdimm->sec.state = nvdimm_security_state(nvdimm, NVDIMM_USER);
+ return 0;
}
static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
{
struct device *dev = &nvdimm->dev;
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
- struct key *key = NULL;
+ struct key *key;
+ const void *data;
int rc;
/* The bus lock should be held at the top level of the call stack */
@@ -160,16 +193,11 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
if (!key_revalidate)
return 0;
- key = nvdimm_key_revalidate(nvdimm);
- if (!key)
- return nvdimm_security_freeze(nvdimm);
+ return nvdimm_key_revalidate(nvdimm);
} else
- key = nvdimm_request_key(nvdimm);
+ data = nvdimm_get_key_payload(nvdimm, &key);
- if (!key)
- return -ENOKEY;
-
- rc = nvdimm->sec.ops->unlock(nvdimm, key_data(key));
+ rc = nvdimm->sec.ops->unlock(nvdimm, data);
dev_dbg(dev, "key: %d unlock: %s\n", key_serial(key),
rc == 0 ? "success" : "fail");
@@ -195,6 +223,7 @@ int nvdimm_security_disable(struct nvdimm *nvdimm, unsigned int keyid)
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
struct key *key;
int rc;
+ const void *data;
/* The bus lock should be held at the top level of the call stack */
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
@@ -214,11 +243,12 @@ int nvdimm_security_disable(struct nvdimm *nvdimm, unsigned int keyid)
return -EBUSY;
}
- key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY);
- if (!key)
+ data = nvdimm_get_user_key_payload(nvdimm, keyid,
+ NVDIMM_BASE_KEY, &key);
+ if (!data)
return -ENOKEY;
- rc = nvdimm->sec.ops->disable(nvdimm, key_data(key));
+ rc = nvdimm->sec.ops->disable(nvdimm, data);
dev_dbg(dev, "key: %d disable: %s\n", key_serial(key),
rc == 0 ? "success" : "fail");
@@ -235,6 +265,7 @@ int nvdimm_security_update(struct nvdimm *nvdimm, unsigned int keyid,
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
struct key *key, *newkey;
int rc;
+ const void *data, *newdata;
/* The bus lock should be held at the top level of the call stack */
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
@@ -249,22 +280,19 @@ int nvdimm_security_update(struct nvdimm *nvdimm, unsigned int keyid,
return -EIO;
}
- if (keyid == 0)
- key = NULL;
- else {
- key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY);
- if (!key)
- return -ENOKEY;
- }
+ data = nvdimm_get_user_key_payload(nvdimm, keyid,
+ NVDIMM_BASE_KEY, &key);
+ if (!data)
+ return -ENOKEY;
- newkey = nvdimm_lookup_user_key(nvdimm, new_keyid, NVDIMM_NEW_KEY);
- if (!newkey) {
+ newdata = nvdimm_get_user_key_payload(nvdimm, new_keyid,
+ NVDIMM_NEW_KEY, &newkey);
+ if (!newdata) {
nvdimm_put_key(key);
return -ENOKEY;
}
- rc = nvdimm->sec.ops->change_key(nvdimm, key ? key_data(key) : NULL,
- key_data(newkey), pass_type);
+ rc = nvdimm->sec.ops->change_key(nvdimm, data, newdata, pass_type);
dev_dbg(dev, "key: %d %d update%s: %s\n",
key_serial(key), key_serial(newkey),
pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
@@ -286,8 +314,9 @@ int nvdimm_security_erase(struct nvdimm *nvdimm, unsigned int keyid,
{
struct device *dev = &nvdimm->dev;
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
- struct key *key;
+ struct key *key = NULL;
int rc;
+ const void *data;
/* The bus lock should be held at the top level of the call stack */
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
@@ -319,11 +348,12 @@ int nvdimm_security_erase(struct nvdimm *nvdimm, unsigned int keyid,
return -EOPNOTSUPP;
}
- key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY);
- if (!key)
+ data = nvdimm_get_user_key_payload(nvdimm, keyid,
+ NVDIMM_BASE_KEY, &key);
+ if (!data)
return -ENOKEY;
- rc = nvdimm->sec.ops->erase(nvdimm, key_data(key), pass_type);
+ rc = nvdimm->sec.ops->erase(nvdimm, data, pass_type);
dev_dbg(dev, "key: %d erase%s: %s\n", key_serial(key),
pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
rc == 0 ? "success" : "fail");
@@ -337,8 +367,9 @@ int nvdimm_security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
{
struct device *dev = &nvdimm->dev;
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
- struct key *key;
+ struct key *key = NULL;
int rc;
+ const void *data;
/* The bus lock should be held at the top level of the call stack */
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
@@ -368,15 +399,12 @@ int nvdimm_security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
return -EBUSY;
}
- if (keyid == 0)
- key = NULL;
- else {
- key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY);
- if (!key)
- return -ENOKEY;
- }
+ data = nvdimm_get_user_key_payload(nvdimm, keyid,
+ NVDIMM_BASE_KEY, &key);
+ if (!data)
+ return -ENOKEY;
- rc = nvdimm->sec.ops->overwrite(nvdimm, key ? key_data(key) : NULL);
+ rc = nvdimm->sec.ops->overwrite(nvdimm, data);
dev_dbg(dev, "key: %d overwrite submission: %s\n", key_serial(key),
rc == 0 ? "success" : "fail");
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 07bf2bff3a76..6265d9225ec8 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -179,8 +179,8 @@ static int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
int ret = 0;
/*
- * Keep a reference until the work is flushed since ->delete_ctrl
- * can free the controller.
+ * Keep a reference until nvme_do_delete_ctrl() complete,
+ * since ->delete_ctrl can free the controller.
*/
nvme_get_ctrl(ctrl);
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
@@ -288,7 +288,7 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved)
"Cancelling I/O %d", req->tag);
nvme_req(req)->status = NVME_SC_ABORT_REQ;
- blk_mq_complete_request(req);
+ blk_mq_complete_request_sync(req);
return true;
}
EXPORT_SYMBOL_GPL(nvme_cancel_request);
@@ -388,7 +388,7 @@ static void nvme_free_ns_head(struct kref *ref)
nvme_mpath_remove_disk(head);
ida_simple_remove(&head->subsys->ns_ida, head->instance);
list_del_init(&head->entry);
- cleanup_srcu_struct_quiesced(&head->srcu);
+ cleanup_srcu_struct(&head->srcu);
nvme_put_subsystem(head->subsys);
kfree(head);
}
@@ -1250,7 +1250,7 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
if (ns) {
if (ctrl->effects)
effects = le32_to_cpu(ctrl->effects->iocs[opcode]);
- if (effects & ~NVME_CMD_EFFECTS_CSUPP)
+ if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
dev_warn(ctrl->device,
"IO command:%02x has unhandled effects:%08x\n",
opcode, effects);
@@ -1495,10 +1495,10 @@ static void nvme_set_chunk_size(struct nvme_ns *ns)
blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size));
}
-static void nvme_config_discard(struct nvme_ns *ns)
+static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
{
struct nvme_ctrl *ctrl = ns->ctrl;
- struct request_queue *queue = ns->queue;
+ struct request_queue *queue = disk->queue;
u32 size = queue_logical_block_size(queue);
if (!(ctrl->oncs & NVME_CTRL_ONCS_DSM)) {
@@ -1526,12 +1526,13 @@ static void nvme_config_discard(struct nvme_ns *ns)
blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
}
-static inline void nvme_config_write_zeroes(struct nvme_ns *ns)
+static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
{
u32 max_sectors;
unsigned short bs = 1 << ns->lba_shift;
- if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES))
+ if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) ||
+ (ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
return;
/*
* Even though NVMe spec explicitly states that MDTS is not
@@ -1548,13 +1549,7 @@ static inline void nvme_config_write_zeroes(struct nvme_ns *ns)
else
max_sectors = ((u32)(ns->ctrl->max_hw_sectors + 1) * bs) >> 9;
- blk_queue_max_write_zeroes_sectors(ns->queue, max_sectors);
-}
-
-static inline void nvme_ns_config_oncs(struct nvme_ns *ns)
-{
- nvme_config_discard(ns);
- nvme_config_write_zeroes(ns);
+ blk_queue_max_write_zeroes_sectors(disk->queue, max_sectors);
}
static void nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
@@ -1610,7 +1605,9 @@ static void nvme_update_disk_info(struct gendisk *disk,
capacity = 0;
set_capacity(disk, capacity);
- nvme_ns_config_oncs(ns);
+
+ nvme_config_discard(disk, ns);
+ nvme_config_write_zeroes(disk, ns);
if (id->nsattr & (1 << 0))
set_disk_ro(disk, true);
@@ -3304,6 +3301,7 @@ static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
mutex_lock(&ctrl->subsys->lock);
list_del_rcu(&ns->siblings);
mutex_unlock(&ctrl->subsys->lock);
+ nvme_put_ns_head(ns->head);
out_free_id:
kfree(id);
out_free_queue:
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index b29b12498a1a..6d8451356eac 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1845,7 +1845,7 @@ nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
memset(queue, 0, sizeof(*queue));
queue->ctrl = ctrl;
queue->qnum = idx;
- atomic_set(&queue->csn, 1);
+ atomic_set(&queue->csn, 0);
queue->dev = ctrl->dev;
if (idx > 0)
@@ -1887,7 +1887,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue)
*/
queue->connection_id = 0;
- atomic_set(&queue->csn, 1);
+ atomic_set(&queue->csn, 0);
}
static void
@@ -2107,7 +2107,7 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
freq->sg_cnt = 0;
- if (!blk_rq_payload_bytes(rq))
+ if (!blk_rq_nr_phys_segments(rq))
return 0;
freq->sg_table.sgl = freq->first_sgl;
@@ -2183,7 +2183,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
{
struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
struct nvme_command *sqe = &cmdiu->sqe;
- u32 csn;
int ret, opstate;
/*
@@ -2198,8 +2197,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
/* format the FC-NVME CMD IU and fcp_req */
cmdiu->connection_id = cpu_to_be64(queue->connection_id);
- csn = atomic_inc_return(&queue->csn);
- cmdiu->csn = cpu_to_be32(csn);
cmdiu->data_len = cpu_to_be32(data_len);
switch (io_dir) {
case NVMEFC_FCP_WRITE:
@@ -2257,11 +2254,24 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
if (!(op->flags & FCOP_FLAGS_AEN))
blk_mq_start_request(op->rq);
+ cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn));
ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
&ctrl->rport->remoteport,
queue->lldd_handle, &op->fcp_req);
if (ret) {
+ /*
+ * If the lld fails to send the command is there an issue with
+ * the csn value? If the command that fails is the Connect,
+ * no - as the connection won't be live. If it is a command
+ * post-connect, it's possible a gap in csn may be created.
+ * Does this matter? As Linux initiators don't send fused
+ * commands, no. The gap would exist, but as there's nothing
+ * that depends on csn order to be delivered on the target
+ * side, it shouldn't hurt. It would be difficult for a
+ * target to even detect the csn gap as it has no idea when the
+ * cmd with the csn was supposed to arrive.
+ */
opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
@@ -2304,12 +2314,23 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ret)
return ret;
- data_len = blk_rq_payload_bytes(rq);
- if (data_len)
+ /*
+ * nvme core doesn't quite treat the rq opaquely. Commands such
+ * as WRITE ZEROES will return a non-zero rq payload_bytes yet
+ * there is no actual payload to be transferred.
+ * To get it right, key data transmission on there being 1 or
+ * more physical segments in the sg list. If there is no
+ * physical segments, there is no payload.
+ */
+ if (blk_rq_nr_phys_segments(rq)) {
+ data_len = blk_rq_payload_bytes(rq);
io_dir = ((rq_data_dir(rq) == WRITE) ?
NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
- else
+ } else {
+ data_len = 0;
io_dir = NVMEFC_FCP_NODATA;
+ }
+
return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
}
@@ -2464,6 +2485,7 @@ static int
nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
{
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+ u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1;
unsigned int nr_io_queues;
int ret;
@@ -2476,6 +2498,13 @@ nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
return ret;
}
+ if (!nr_io_queues && prior_ioq_cnt) {
+ dev_info(ctrl->ctrl.device,
+ "Fail Reconnect: At least 1 io queue "
+ "required (was %d)\n", prior_ioq_cnt);
+ return -ENOSPC;
+ }
+
ctrl->ctrl.queue_count = nr_io_queues + 1;
/* check for io queues existing */
if (ctrl->ctrl.queue_count == 1)
@@ -2489,6 +2518,10 @@ nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
if (ret)
goto out_delete_hw_queues;
+ if (prior_ioq_cnt != nr_io_queues)
+ dev_info(ctrl->ctrl.device,
+ "reconnect: revising io queue count from %d to %d\n",
+ prior_ioq_cnt, nr_io_queues);
blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
return 0;
@@ -3006,7 +3039,10 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ctrl->ctrl.opts = opts;
ctrl->ctrl.nr_reconnects = 0;
- ctrl->ctrl.numa_node = dev_to_node(lport->dev);
+ if (lport->dev)
+ ctrl->ctrl.numa_node = dev_to_node(lport->dev);
+ else
+ ctrl->ctrl.numa_node = NUMA_NO_NODE;
INIT_LIST_HEAD(&ctrl->ctrl_list);
ctrl->lport = lport;
ctrl->rport = rport;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 2839bb70badf..f0716f6ce41f 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -404,15 +404,12 @@ static inline bool nvme_state_is_live(enum nvme_ana_state state)
static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
struct nvme_ns *ns)
{
- enum nvme_ana_state old;
-
mutex_lock(&ns->head->lock);
- old = ns->ana_state;
ns->ana_grpid = le32_to_cpu(desc->grpid);
ns->ana_state = desc->state;
clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
- if (nvme_state_is_live(ns->ana_state) && !nvme_state_is_live(old))
+ if (nvme_state_is_live(ns->ana_state))
nvme_mpath_set_live(ns);
mutex_unlock(&ns->head->lock);
}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index b91f1838bbd5..527d64545023 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -87,6 +87,11 @@ enum nvme_quirks {
* Ignore device provided subnqn.
*/
NVME_QUIRK_IGNORE_DEV_SUBNQN = (1 << 8),
+
+ /*
+ * Broken Write Zeroes.
+ */
+ NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9),
};
/*
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 92bad1c810ac..a90cf5d63aac 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2937,7 +2937,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
- .driver_data = NVME_QUIRK_IDENTIFY_CNS, },
+ .driver_data = NVME_QUIRK_IDENTIFY_CNS |
+ NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 208ee518af65..68c49dd67210 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -463,6 +463,15 @@ static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
queue->data_remaining = le32_to_cpu(pdu->data_length);
+ if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
+ unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
+ dev_err(queue->ctrl->ctrl.device,
+ "queue %d tag %#x SUCCESS set but not last PDU\n",
+ nvme_tcp_queue_id(queue), rq->tag);
+ nvme_tcp_error_recovery(&queue->ctrl->ctrl);
+ return -EPROTO;
+ }
+
return 0;
}
@@ -618,6 +627,14 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
return ret;
}
+static inline void nvme_tcp_end_request(struct request *rq, u16 status)
+{
+ union nvme_result res = {};
+
+ nvme_end_request(rq, cpu_to_le16(status << 1), res);
+}
+
+
static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
unsigned int *offset, size_t *len)
{
@@ -685,6 +702,8 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
} else {
+ if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS)
+ nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
nvme_tcp_init_recv_ctx(queue);
}
}
@@ -695,6 +714,7 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
struct sk_buff *skb, unsigned int *offset, size_t *len)
{
+ struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
char *ddgst = (char *)&queue->recv_ddgst;
size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
@@ -718,6 +738,13 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
return -EIO;
}
+ if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
+ struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
+ pdu->command_id);
+
+ nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
+ }
+
nvme_tcp_init_recv_ctx(queue);
return 0;
}
@@ -815,10 +842,7 @@ static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
{
- union nvme_result res = {};
-
- nvme_end_request(blk_mq_rq_from_pdu(req),
- cpu_to_le16(NVME_SC_DATA_XFER_ERROR), res);
+ nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_DATA_XFER_ERROR);
}
static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c
index 58456de78bb2..5f24ea7a28eb 100644
--- a/drivers/nvme/host/trace.c
+++ b/drivers/nvme/host/trace.c
@@ -50,7 +50,19 @@ static const char *nvme_trace_admin_identify(struct trace_seq *p, u8 *cdw10)
return ret;
}
+static const char *nvme_trace_admin_get_features(struct trace_seq *p,
+ u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 fid = cdw10[0];
+ u8 sel = cdw10[1] & 0x7;
+ u32 cdw11 = get_unaligned_le32(cdw10 + 4);
+
+ trace_seq_printf(p, "fid=0x%x sel=0x%x cdw11=0x%x", fid, sel, cdw11);
+ trace_seq_putc(p, 0);
+ return ret;
+}
static const char *nvme_trace_read_write(struct trace_seq *p, u8 *cdw10)
{
@@ -101,6 +113,8 @@ const char *nvme_trace_parse_admin_cmd(struct trace_seq *p,
return nvme_trace_create_cq(p, cdw10);
case nvme_admin_identify:
return nvme_trace_admin_identify(p, cdw10);
+ case nvme_admin_get_features:
+ return nvme_trace_admin_get_features(p, cdw10);
default:
return nvme_trace_common(p, cdw10);
}
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
index 244d7c177e5a..97d3c77365b8 100644
--- a/drivers/nvme/host/trace.h
+++ b/drivers/nvme/host/trace.h
@@ -108,7 +108,7 @@ TRACE_EVENT(nvme_setup_cmd,
__entry->metadata = le64_to_cpu(cmd->common.metadata);
__assign_disk_name(__entry->disk, req->rq_disk);
memcpy(__entry->cdw10, &cmd->common.cdw10,
- 6 * sizeof(__entry->cdw10));
+ sizeof(__entry->cdw10));
),
TP_printk("nvme%d: %sqid=%d, cmdid=%u, nsid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
__entry->ctrl_id, __print_disk_name(__entry->disk),
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 76250181fee0..9f72d515fc4b 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -24,6 +24,11 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd)
return len;
}
+u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
+{
+ return le64_to_cpu(cmd->get_log_page.lpo);
+}
+
static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
{
nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len));
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index d44ede147263..b3e765a95af8 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -509,7 +509,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
ret = nvmet_p2pmem_ns_enable(ns);
if (ret)
- goto out_unlock;
+ goto out_dev_disable;
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
nvmet_p2pmem_ns_add_p2p(ctrl, ns);
@@ -550,7 +550,7 @@ out_unlock:
out_dev_put:
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
-
+out_dev_disable:
nvmet_ns_dev_disable(ns);
goto out_unlock;
}
@@ -1163,6 +1163,15 @@ static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
put_device(ctrl->p2p_client);
}
+static void nvmet_fatal_error_handler(struct work_struct *work)
+{
+ struct nvmet_ctrl *ctrl =
+ container_of(work, struct nvmet_ctrl, fatal_err_work);
+
+ pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
+ ctrl->ops->delete_ctrl(ctrl);
+}
+
u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
{
@@ -1205,6 +1214,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
INIT_LIST_HEAD(&ctrl->async_events);
INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
+ INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
@@ -1308,21 +1318,11 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
kref_put(&ctrl->ref, nvmet_ctrl_free);
}
-static void nvmet_fatal_error_handler(struct work_struct *work)
-{
- struct nvmet_ctrl *ctrl =
- container_of(work, struct nvmet_ctrl, fatal_err_work);
-
- pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
- ctrl->ops->delete_ctrl(ctrl);
-}
-
void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
{
mutex_lock(&ctrl->lock);
if (!(ctrl->csts & NVME_CSTS_CFS)) {
ctrl->csts |= NVME_CSTS_CFS;
- INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
schedule_work(&ctrl->fatal_err_work);
}
mutex_unlock(&ctrl->lock);
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index c872b47a88f3..33ed95e72d6b 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -131,54 +131,76 @@ static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port
memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
}
+static size_t discovery_log_entries(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_subsys_link *p;
+ struct nvmet_port *r;
+ size_t entries = 0;
+
+ list_for_each_entry(p, &req->port->subsystems, entry) {
+ if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
+ continue;
+ entries++;
+ }
+ list_for_each_entry(r, &req->port->referrals, entry)
+ entries++;
+ return entries;
+}
+
static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
{
const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry);
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvmf_disc_rsp_page_hdr *hdr;
+ u64 offset = nvmet_get_log_page_offset(req->cmd);
size_t data_len = nvmet_get_log_page_len(req->cmd);
- size_t alloc_len = max(data_len, sizeof(*hdr));
- int residual_len = data_len - sizeof(*hdr);
+ size_t alloc_len;
struct nvmet_subsys_link *p;
struct nvmet_port *r;
u32 numrec = 0;
u16 status = 0;
+ void *buffer;
+
+ /* Spec requires dword aligned offsets */
+ if (offset & 0x3) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ goto out;
+ }
/*
* Make sure we're passing at least a buffer of response header size.
* If host provided data len is less than the header size, only the
* number of bytes requested by host will be sent to host.
*/
- hdr = kzalloc(alloc_len, GFP_KERNEL);
- if (!hdr) {
+ down_read(&nvmet_config_sem);
+ alloc_len = sizeof(*hdr) + entry_size * discovery_log_entries(req);
+ buffer = kzalloc(alloc_len, GFP_KERNEL);
+ if (!buffer) {
+ up_read(&nvmet_config_sem);
status = NVME_SC_INTERNAL;
goto out;
}
- down_read(&nvmet_config_sem);
+ hdr = buffer;
list_for_each_entry(p, &req->port->subsystems, entry) {
+ char traddr[NVMF_TRADDR_SIZE];
+
if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
continue;
- if (residual_len >= entry_size) {
- char traddr[NVMF_TRADDR_SIZE];
-
- nvmet_set_disc_traddr(req, req->port, traddr);
- nvmet_format_discovery_entry(hdr, req->port,
- p->subsys->subsysnqn, traddr,
- NVME_NQN_NVME, numrec);
- residual_len -= entry_size;
- }
+
+ nvmet_set_disc_traddr(req, req->port, traddr);
+ nvmet_format_discovery_entry(hdr, req->port,
+ p->subsys->subsysnqn, traddr,
+ NVME_NQN_NVME, numrec);
numrec++;
}
list_for_each_entry(r, &req->port->referrals, entry) {
- if (residual_len >= entry_size) {
- nvmet_format_discovery_entry(hdr, r,
- NVME_DISC_SUBSYS_NAME,
- r->disc_addr.traddr,
- NVME_NQN_DISC, numrec);
- residual_len -= entry_size;
- }
+ nvmet_format_discovery_entry(hdr, r,
+ NVME_DISC_SUBSYS_NAME,
+ r->disc_addr.traddr,
+ NVME_NQN_DISC, numrec);
numrec++;
}
@@ -190,8 +212,8 @@ static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
up_read(&nvmet_config_sem);
- status = nvmet_copy_to_sgl(req, 0, hdr, data_len);
- kfree(hdr);
+ status = nvmet_copy_to_sgl(req, 0, buffer + offset, data_len);
+ kfree(buffer);
out:
nvmet_req_complete(req, status);
}
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 1e9654f04c60..98b7b1f4ee96 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1143,10 +1143,8 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
&tgtport->assoc_list, a_list) {
if (!nvmet_fc_tgt_a_get(assoc))
continue;
- spin_unlock_irqrestore(&tgtport->lock, flags);
- nvmet_fc_delete_target_assoc(assoc);
- nvmet_fc_tgt_a_put(assoc);
- spin_lock_irqsave(&tgtport->lock, flags);
+ if (!schedule_work(&assoc->del_work))
+ nvmet_fc_tgt_a_put(assoc);
}
spin_unlock_irqrestore(&tgtport->lock, flags);
}
@@ -1185,7 +1183,8 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
nvmet_fc_tgtport_put(tgtport);
if (found_ctrl) {
- schedule_work(&assoc->del_work);
+ if (!schedule_work(&assoc->del_work))
+ nvmet_fc_tgt_a_put(assoc);
return;
}
@@ -1503,10 +1502,8 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
(struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
struct fcnvme_ls_disconnect_acc *acc =
(struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
- struct nvmet_fc_tgt_queue *queue = NULL;
struct nvmet_fc_tgt_assoc *assoc;
int ret = 0;
- bool del_assoc = false;
memset(acc, 0, sizeof(*acc));
@@ -1537,18 +1534,7 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
assoc = nvmet_fc_find_target_assoc(tgtport,
be64_to_cpu(rqst->associd.association_id));
iod->assoc = assoc;
- if (assoc) {
- if (rqst->discon_cmd.scope ==
- FCNVME_DISCONN_CONNECTION) {
- queue = nvmet_fc_find_target_queue(tgtport,
- be64_to_cpu(
- rqst->discon_cmd.id));
- if (!queue) {
- nvmet_fc_tgt_a_put(assoc);
- ret = VERR_NO_CONN;
- }
- }
- } else
+ if (!assoc)
ret = VERR_NO_ASSOC;
}
@@ -1576,26 +1562,10 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
sizeof(struct fcnvme_ls_disconnect_acc)),
FCNVME_LS_DISCONNECT);
-
- /* are we to delete a Connection ID (queue) */
- if (queue) {
- int qid = queue->qid;
-
- nvmet_fc_delete_target_queue(queue);
-
- /* release the get taken by find_target_queue */
- nvmet_fc_tgt_q_put(queue);
-
- /* tear association down if io queue terminated */
- if (!qid)
- del_assoc = true;
- }
-
/* release get taken in nvmet_fc_find_target_assoc */
nvmet_fc_tgt_a_put(iod->assoc);
- if (del_assoc)
- nvmet_fc_delete_target_assoc(iod->assoc);
+ nvmet_fc_delete_target_assoc(iod->assoc);
}
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 71dfedbadc26..a065dbfc43b1 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -194,11 +194,11 @@ static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
GFP_KERNEL, 0, bio);
-
- if (ret)
+ if (ret && ret != -EOPNOTSUPP) {
req->error_slba = le64_to_cpu(range->slba);
-
- return blk_to_nvme_status(req, errno_to_blk_status(ret));
+ return blk_to_nvme_status(req, errno_to_blk_status(ret));
+ }
+ return NVME_SC_SUCCESS;
}
static void nvmet_bdev_execute_discard(struct nvmet_req *req)
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 517522305e5c..bc6ebb51b0bf 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -75,11 +75,11 @@ err:
return ret;
}
-static void nvmet_file_init_bvec(struct bio_vec *bv, struct sg_page_iter *iter)
+static void nvmet_file_init_bvec(struct bio_vec *bv, struct scatterlist *sg)
{
- bv->bv_page = sg_page_iter_page(iter);
- bv->bv_offset = iter->sg->offset;
- bv->bv_len = PAGE_SIZE - iter->sg->offset;
+ bv->bv_page = sg_page(sg);
+ bv->bv_offset = sg->offset;
+ bv->bv_len = sg->length;
}
static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
@@ -128,14 +128,14 @@ static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
{
- ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE);
- struct sg_page_iter sg_pg_iter;
+ ssize_t nr_bvec = req->sg_cnt;
unsigned long bv_cnt = 0;
bool is_sync = false;
size_t len = 0, total_len = 0;
ssize_t ret = 0;
loff_t pos;
-
+ int i;
+ struct scatterlist *sg;
if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC)
is_sync = true;
@@ -147,8 +147,8 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
}
memset(&req->f.iocb, 0, sizeof(struct kiocb));
- for_each_sg_page(req->sg, &sg_pg_iter, req->sg_cnt, 0) {
- nvmet_file_init_bvec(&req->f.bvec[bv_cnt], &sg_pg_iter);
+ for_each_sg(req->sg, sg, req->sg_cnt, i) {
+ nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg);
len += req->f.bvec[bv_cnt].bv_len;
total_len += req->f.bvec[bv_cnt].bv_len;
bv_cnt++;
@@ -225,7 +225,7 @@ static void nvmet_file_submit_buffered_io(struct nvmet_req *req)
static void nvmet_file_execute_rw(struct nvmet_req *req)
{
- ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE);
+ ssize_t nr_bvec = req->sg_cnt;
if (!req->sg_cnt || !nr_bvec) {
nvmet_req_complete(req, 0);
@@ -297,7 +297,7 @@ static void nvmet_file_execute_discard(struct nvmet_req *req)
}
ret = vfs_fallocate(req->ns->file, mode, offset, len);
- if (ret) {
+ if (ret && ret != -EOPNOTSUPP) {
req->error_slba = le64_to_cpu(range.slba);
status = errno_to_nvme_status(req, ret);
break;
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 51e49efd7849..1653d19b187f 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -428,6 +428,7 @@ u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
u32 nvmet_get_log_page_len(struct nvme_command *cmd);
+u64 nvmet_get_log_page_offset(struct nvme_command *cmd);
extern struct list_head *nvmet_ports;
void nvmet_port_disc_changed(struct nvmet_port *port,
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index ad3fcad4d75b..37c2ccbefecd 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -43,6 +43,7 @@ config OF_FLATTREE
config OF_EARLY_FLATTREE
bool
+ select DMA_DECLARE_COHERENT if HAS_DMA
select OF_FLATTREE
config OF_PROMTREE
@@ -81,10 +82,9 @@ config OF_MDIO
OpenFirmware MDIO bus (Ethernet PHY) accessors
config OF_RESERVED_MEM
- depends on OF_EARLY_FLATTREE
bool
- help
- Helpers to allow for reservation of memory regions
+ depends on OF_EARLY_FLATTREE
+ default y if DMA_DECLARE_COHERENT || DMA_CMA
config OF_RESOLVE
bool
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 9cc1461aac7d..4734223ab702 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -1181,7 +1181,13 @@ int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
static void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
{
- return memblock_alloc(size, align);
+ void *ptr = memblock_alloc(size, align);
+
+ if (!ptr)
+ panic("%s: Failed to allocate %llu bytes align=0x%llx\n",
+ __func__, size, align);
+
+ return ptr;
}
bool __init early_init_dt_verify(void *params)
diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c
index 810ab0fbcccb..d820f3edd431 100644
--- a/drivers/of/of_net.c
+++ b/drivers/of/of_net.c
@@ -7,7 +7,6 @@
*/
#include <linux/etherdevice.h>
#include <linux/kernel.h>
-#include <linux/nvmem-consumer.h>
#include <linux/of_net.h>
#include <linux/phy.h>
#include <linux/export.h>
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 1977ee0adcb1..6a36bc0b3d64 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -26,33 +26,23 @@
static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
static int reserved_mem_count;
-int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
+static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
phys_addr_t *res_base)
{
phys_addr_t base;
- /*
- * We use __memblock_alloc_base() because memblock_alloc_base()
- * panic()s on allocation failure.
- */
+
end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end;
align = !align ? SMP_CACHE_BYTES : align;
- base = __memblock_alloc_base(size, align, end);
+ base = memblock_find_in_range(start, end, size, align);
if (!base)
return -ENOMEM;
- /*
- * Check if the allocated region fits in to start..end window
- */
- if (base < start) {
- memblock_free(base, size);
- return -ENOMEM;
- }
-
*res_base = base;
if (nomap)
return memblock_remove(base, size);
- return 0;
+
+ return memblock_reserve(base, size);
}
/**
@@ -340,10 +330,6 @@ int of_reserved_mem_device_init_by_idx(struct device *dev,
mutex_lock(&of_rmem_assigned_device_mutex);
list_add(&rd->list, &of_rmem_assigned_device_list);
mutex_unlock(&of_rmem_assigned_device_mutex);
- /* ensure that dma_ops is set for virtual devices
- * using reserved memory
- */
- of_dma_configure(dev, np, true);
dev_info(dev, "assigned reserved memory node %s\n", rmem->name);
} else {
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 84427384654d..cccde756b510 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -1116,15 +1116,22 @@ static void update_node_properties(struct device_node *np,
for (prop = np->properties; prop != NULL; prop = save_next) {
save_next = prop->next;
ret = of_add_property(dup, prop);
- if (ret)
+ if (ret) {
+ if (ret == -EEXIST && !strcmp(prop->name, "name"))
+ continue;
pr_err("unittest internal error: unable to add testdata property %pOF/%s",
np, prop->name);
+ }
}
}
/**
* attach_node_and_children - attaches nodes
- * and its children to live tree
+ * and its children to live tree.
+ * CAUTION: misleading function name - if node @np already exists in
+ * the live tree then children of @np are *not* attached to the live
+ * tree. This works for the current test devicetree nodes because such
+ * nodes do not have child nodes.
*
* @np: Node to attach to live tree
*/
@@ -2234,7 +2241,13 @@ static struct device_node *overlay_base_root;
static void * __init dt_alloc_memory(u64 size, u64 align)
{
- return memblock_alloc(size, align);
+ void *ptr = memblock_alloc(size, align);
+
+ if (!ptr)
+ panic("%s: Failed to allocate %llu bytes align=0x%llx\n",
+ __func__, size, align);
+
+ return ptr;
}
/*
@@ -2514,6 +2527,10 @@ static int __init of_unittest(void)
int res;
/* adding data for unittest */
+
+ if (IS_ENABLED(CONFIG_UML))
+ unittest_unflatten_overlay_base();
+
res = unittest_data_add();
if (res)
return res;
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index d7f97167cac3..0e7703fe733f 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -526,6 +526,60 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
+/**
+ * dev_pm_opp_find_freq_ceil_by_volt() - Find OPP with highest frequency for
+ * target voltage.
+ * @dev: Device for which we do this operation.
+ * @u_volt: Target voltage.
+ *
+ * Search for OPP with highest (ceil) frequency and has voltage <= u_volt.
+ *
+ * Return: matching *opp, else returns ERR_PTR in case of error which should be
+ * handled using IS_ERR.
+ *
+ * Error return values can be:
+ * EINVAL: bad parameters
+ *
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
+ */
+struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
+ unsigned long u_volt)
+{
+ struct opp_table *opp_table;
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+
+ if (!dev || !u_volt) {
+ dev_err(dev, "%s: Invalid argument volt=%lu\n", __func__,
+ u_volt);
+ return ERR_PTR(-EINVAL);
+ }
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table))
+ return ERR_CAST(opp_table);
+
+ mutex_lock(&opp_table->lock);
+
+ list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
+ if (temp_opp->available) {
+ if (temp_opp->supplies[0].u_volt > u_volt)
+ break;
+ opp = temp_opp;
+ }
+ }
+
+ /* Increment the reference count of OPP */
+ if (!IS_ERR(opp))
+ dev_pm_opp_get(opp);
+
+ mutex_unlock(&opp_table->lock);
+ dev_pm_opp_put_opp_table(opp_table);
+
+ return opp;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_by_volt);
+
static int _set_opp_voltage(struct device *dev, struct regulator *reg,
struct dev_pm_opp_supply *supply)
{
@@ -760,7 +814,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
old_freq, freq);
/* Scaling up? Configure required OPPs before frequency */
- if (freq > old_freq) {
+ if (freq >= old_freq) {
ret = _set_required_opps(dev, opp_table, opp);
if (ret)
goto put_opp;
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 62504b18f198..c10c782d15aa 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -173,7 +173,7 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
struct opp_table **required_opp_tables;
struct device **genpd_virt_devs = NULL;
struct device_node *required_np, *np;
- int count, i;
+ int count, count_pd, i;
/* Traversing the first OPP node is all we need */
np = of_get_next_available_child(opp_np, NULL);
@@ -186,7 +186,19 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
if (!count)
goto put_np;
- if (count > 1) {
+ /*
+ * Check the number of power-domains to know if we need to deal
+ * with virtual devices. In some cases we have devices with multiple
+ * power domains but with only one of them being scalable, hence
+ * 'count' could be 1, but we still have to deal with multiple genpds
+ * and virtual devices.
+ */
+ count_pd = of_count_phandle_with_args(dev->of_node, "power-domains",
+ "#power-domain-cells");
+ if (!count_pd)
+ goto put_np;
+
+ if (count_pd > 1) {
genpd_virt_devs = kcalloc(count, sizeof(*genpd_virt_devs),
GFP_KERNEL);
if (!genpd_virt_devs)
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 097b0d43d13c..8937ba70d817 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -712,8 +712,8 @@ ccio_dma_supported(struct device *dev, u64 mask)
return 0;
}
- /* only support 32-bit devices (ie PCI/GSC) */
- return (int)(mask == 0xffffffffUL);
+ /* only support 32-bit or better devices (ie PCI/GSC) */
+ return (int)(mask >= 0xffffffffUL);
}
/**
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 1be571c20062..6bad04cbb1d3 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -157,8 +157,12 @@
#define DBG_IRT(x...)
#endif
+#ifdef CONFIG_64BIT
+#define COMPARE_IRTE_ADDR(irte, hpa) ((irte)->dest_iosapic_addr == (hpa))
+#else
#define COMPARE_IRTE_ADDR(irte, hpa) \
- ((irte)->dest_iosapic_addr == F_EXTEND(hpa))
+ ((irte)->dest_iosapic_addr == ((hpa) | 0xffffffff00000000ULL))
+#endif
#define IOSAPIC_REG_SELECT 0x00
#define IOSAPIC_REG_WINDOW 0x10
diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c
index 56dd83a45e55..5484a46dafda 100644
--- a/drivers/parport/daisy.c
+++ b/drivers/parport/daisy.c
@@ -213,12 +213,10 @@ void parport_daisy_fini(struct parport *port)
struct pardevice *parport_open(int devnum, const char *name)
{
struct daisydev *p = topology;
- struct pardev_cb par_cb;
struct parport *port;
struct pardevice *dev;
int daisy;
- memset(&par_cb, 0, sizeof(par_cb));
spin_lock(&topology_lock);
while (p && p->devnum != devnum)
p = p->next;
@@ -232,7 +230,7 @@ struct pardevice *parport_open(int devnum, const char *name)
port = parport_get_port(p->port);
spin_unlock(&topology_lock);
- dev = parport_register_dev_model(port, name, &par_cb, devnum);
+ dev = parport_register_device(port, name, NULL, NULL, NULL, 0, NULL);
parport_put_port(port);
if (!dev)
return NULL;
@@ -482,31 +480,3 @@ static int assign_addrs(struct parport *port)
kfree(deviceid);
return detected;
}
-
-static int daisy_drv_probe(struct pardevice *par_dev)
-{
- struct device_driver *drv = par_dev->dev.driver;
-
- if (strcmp(drv->name, "daisy_drv"))
- return -ENODEV;
- if (strcmp(par_dev->name, daisy_dev_name))
- return -ENODEV;
-
- return 0;
-}
-
-static struct parport_driver daisy_driver = {
- .name = "daisy_drv",
- .probe = daisy_drv_probe,
- .devmodel = true,
-};
-
-int daisy_drv_init(void)
-{
- return parport_register_driver(&daisy_driver);
-}
-
-void daisy_drv_exit(void)
-{
- parport_unregister_driver(&daisy_driver);
-}
diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c
index e5e6a463a941..e035174ba205 100644
--- a/drivers/parport/probe.c
+++ b/drivers/parport/probe.c
@@ -257,7 +257,7 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer,
ssize_t parport_device_id (int devnum, char *buffer, size_t count)
{
ssize_t retval = -ENXIO;
- struct pardevice *dev = parport_open(devnum, daisy_dev_name);
+ struct pardevice *dev = parport_open (devnum, "Device ID probe");
if (!dev)
return -ENXIO;
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 0171b8dbcdcd..5dc53d420ca8 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -137,19 +137,11 @@ static struct bus_type parport_bus_type = {
int parport_bus_init(void)
{
- int retval;
-
- retval = bus_register(&parport_bus_type);
- if (retval)
- return retval;
- daisy_drv_init();
-
- return 0;
+ return bus_register(&parport_bus_type);
}
void parport_bus_exit(void)
{
- daisy_drv_exit();
bus_unregister(&parport_bus_type);
}
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 5b78f3b1b918..97c08146534a 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -142,6 +142,33 @@ int pci_ats_queue_depth(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_ats_queue_depth);
+/**
+ * pci_ats_page_aligned - Return Page Aligned Request bit status.
+ * @pdev: the PCI device
+ *
+ * Returns 1, if the Untranslated Addresses generated by the device
+ * are always aligned or 0 otherwise.
+ *
+ * Per PCIe spec r4.0, sec 10.5.1.2, if the Page Aligned Request bit
+ * is set, it indicates the Untranslated Addresses generated by the
+ * device are always aligned to a 4096 byte boundary.
+ */
+int pci_ats_page_aligned(struct pci_dev *pdev)
+{
+ u16 cap;
+
+ if (!pdev->ats_cap)
+ return 0;
+
+ pci_read_config_word(pdev, pdev->ats_cap + PCI_ATS_CAP, &cap);
+
+ if (cap & PCI_ATS_CAP_PAGE_ALIGNED)
+ return 1;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_ats_page_aligned);
+
#ifdef CONFIG_PCI_PRI
/**
* pci_enable_pri - Enable PRI capability
@@ -368,6 +395,36 @@ int pci_pasid_features(struct pci_dev *pdev)
}
EXPORT_SYMBOL_GPL(pci_pasid_features);
+/**
+ * pci_prg_resp_pasid_required - Return PRG Response PASID Required bit
+ * status.
+ * @pdev: PCI device structure
+ *
+ * Returns 1 if PASID is required in PRG Response Message, 0 otherwise.
+ *
+ * Even though the PRG response PASID status is read from PRI Status
+ * Register, since this API will mainly be used by PASID users, this
+ * function is defined within #ifdef CONFIG_PCI_PASID instead of
+ * CONFIG_PCI_PRI.
+ */
+int pci_prg_resp_pasid_required(struct pci_dev *pdev)
+{
+ u16 status;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
+ if (!pos)
+ return 0;
+
+ pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
+
+ if (status & PCI_PRI_STATUS_PASID)
+ return 1;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_prg_resp_pasid_required);
+
#define PASID_NUMBER_SHIFT 8
#define PASID_NUMBER_MASK (0x1f << PASID_NUMBER_SHIFT)
/**
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 6671946dbf66..011c57cae4b0 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -175,7 +175,7 @@ config PCIE_IPROC_MSI
config PCIE_ALTERA
bool "Altera PCIe controller"
- depends on ARM || NIOS2 || COMPILE_TEST
+ depends on ARM || NIOS2 || ARM64 || COMPILE_TEST
help
Say Y here if you want to enable PCIe controller support on Altera
FPGA.
@@ -267,6 +267,7 @@ config PCIE_TANGO_SMP8759
config VMD
depends on PCI_MSI && X86_64 && SRCU
+ select X86_DEV_DMA_OPS
tristate "Intel Volume Management Device Driver"
---help---
Adds support for the Intel Volume Management Device (VMD). VMD is a
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 548c58223868..6ea74b1c0d94 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -89,8 +89,8 @@ config PCI_EXYNOS
select PCIE_DW_HOST
config PCI_IMX6
- bool "Freescale i.MX6/7 PCIe controller"
- depends on SOC_IMX6Q || SOC_IMX7D || (ARM && COMPILE_TEST)
+ bool "Freescale i.MX6/7/8 PCIe controller"
+ depends on SOC_IMX6Q || SOC_IMX7D || (ARM64 && ARCH_MXC) || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index 7bcdcdf5024e..b5f3b83cc2b3 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone.o
-obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
+obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o pci-layerscape-ep.o
obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index a32d6dde7a57..ae84a69ae63a 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -81,6 +81,10 @@
#define MSI_REQ_GRANT BIT(0)
#define MSI_VECTOR_SHIFT 7
+#define PCIE_1LANE_2LANE_SELECTION BIT(13)
+#define PCIE_B1C0_MODE_SEL BIT(2)
+#define PCIE_B0_B1_TSYNCEN BIT(0)
+
struct dra7xx_pcie {
struct dw_pcie *pci;
void __iomem *base; /* DT ti_conf */
@@ -93,6 +97,7 @@ struct dra7xx_pcie {
struct dra7xx_pcie_of_data {
enum dw_pcie_device_mode mode;
+ u32 b1co_mode_sel_mask;
};
#define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev)
@@ -389,9 +394,22 @@ static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
+static const struct pci_epc_features dra7xx_pcie_epc_features = {
+ .linkup_notifier = true,
+ .msi_capable = true,
+ .msix_capable = false,
+};
+
+static const struct pci_epc_features*
+dra7xx_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ return &dra7xx_pcie_epc_features;
+}
+
static struct dw_pcie_ep_ops pcie_ep_ops = {
.ep_init = dra7xx_pcie_ep_init,
.raise_irq = dra7xx_pcie_raise_irq,
+ .get_features = dra7xx_pcie_get_features,
};
static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
@@ -499,6 +517,10 @@ static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx)
int i;
for (i = 0; i < phy_count; i++) {
+ ret = phy_set_mode(dra7xx->phy[i], PHY_MODE_PCIE);
+ if (ret < 0)
+ goto err_phy;
+
ret = phy_init(dra7xx->phy[i]);
if (ret < 0)
goto err_phy;
@@ -529,6 +551,26 @@ static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = {
.mode = DW_PCIE_EP_TYPE,
};
+static const struct dra7xx_pcie_of_data dra746_pcie_rc_of_data = {
+ .b1co_mode_sel_mask = BIT(2),
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct dra7xx_pcie_of_data dra726_pcie_rc_of_data = {
+ .b1co_mode_sel_mask = GENMASK(3, 2),
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct dra7xx_pcie_of_data dra746_pcie_ep_of_data = {
+ .b1co_mode_sel_mask = BIT(2),
+ .mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct dra7xx_pcie_of_data dra726_pcie_ep_of_data = {
+ .b1co_mode_sel_mask = GENMASK(3, 2),
+ .mode = DW_PCIE_EP_TYPE,
+};
+
static const struct of_device_id of_dra7xx_pcie_match[] = {
{
.compatible = "ti,dra7-pcie",
@@ -538,6 +580,22 @@ static const struct of_device_id of_dra7xx_pcie_match[] = {
.compatible = "ti,dra7-pcie-ep",
.data = &dra7xx_pcie_ep_of_data,
},
+ {
+ .compatible = "ti,dra746-pcie-rc",
+ .data = &dra746_pcie_rc_of_data,
+ },
+ {
+ .compatible = "ti,dra726-pcie-rc",
+ .data = &dra726_pcie_rc_of_data,
+ },
+ {
+ .compatible = "ti,dra746-pcie-ep",
+ .data = &dra746_pcie_ep_of_data,
+ },
+ {
+ .compatible = "ti,dra726-pcie-ep",
+ .data = &dra726_pcie_ep_of_data,
+ },
{},
};
@@ -583,6 +641,34 @@ static int dra7xx_pcie_unaligned_memaccess(struct device *dev)
return ret;
}
+static int dra7xx_pcie_configure_two_lane(struct device *dev,
+ u32 b1co_mode_sel_mask)
+{
+ struct device_node *np = dev->of_node;
+ struct regmap *pcie_syscon;
+ unsigned int pcie_reg;
+ u32 mask;
+ u32 val;
+
+ pcie_syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-lane-sel");
+ if (IS_ERR(pcie_syscon)) {
+ dev_err(dev, "unable to get ti,syscon-lane-sel\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32_index(np, "ti,syscon-lane-sel", 1,
+ &pcie_reg)) {
+ dev_err(dev, "couldn't get lane selection reg offset\n");
+ return -EINVAL;
+ }
+
+ mask = b1co_mode_sel_mask | PCIE_B0_B1_TSYNCEN;
+ val = PCIE_B1C0_MODE_SEL | PCIE_B0_B1_TSYNCEN;
+ regmap_update_bits(pcie_syscon, pcie_reg, mask, val);
+
+ return 0;
+}
+
static int __init dra7xx_pcie_probe(struct platform_device *pdev)
{
u32 reg;
@@ -603,6 +689,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
const struct of_device_id *match;
const struct dra7xx_pcie_of_data *data;
enum dw_pcie_device_mode mode;
+ u32 b1co_mode_sel_mask;
match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev);
if (!match)
@@ -610,6 +697,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
data = (struct dra7xx_pcie_of_data *)match->data;
mode = (enum dw_pcie_device_mode)data->mode;
+ b1co_mode_sel_mask = data->b1co_mode_sel_mask;
dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
if (!dra7xx)
@@ -665,6 +753,12 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
dra7xx->pci = pci;
dra7xx->phy_count = phy_count;
+ if (phy_count == 2) {
+ ret = dra7xx_pcie_configure_two_lane(dev, b1co_mode_sel_mask);
+ if (ret < 0)
+ dra7xx->phy_count = 1; /* Fallback to x1 lane mode */
+ }
+
ret = dra7xx_pcie_enable_phy(dra7xx);
if (ret) {
dev_err(dev, "failed to enable phy\n");
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 80f843030e36..3d627f94a166 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -8,6 +8,7 @@
* Author: Sean Cross <xobs@kosagi.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio.h>
@@ -18,6 +19,7 @@
#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/of_device.h>
+#include <linux/of_address.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -32,6 +34,12 @@
#include "pcie-designware.h"
+#define IMX8MQ_GPR_PCIE_REF_USE_PAD BIT(9)
+#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN BIT(10)
+#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE BIT(11)
+#define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8)
+#define IMX8MQ_PCIE2_BASE_ADDR 0x33c00000
+
#define to_imx6_pcie(x) dev_get_drvdata((x)->dev)
enum imx6_pcie_variants {
@@ -39,6 +47,15 @@ enum imx6_pcie_variants {
IMX6SX,
IMX6QP,
IMX7D,
+ IMX8MQ,
+};
+
+#define IMX6_PCIE_FLAG_IMX6_PHY BIT(0)
+#define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE BIT(1)
+
+struct imx6_pcie_drvdata {
+ enum imx6_pcie_variants variant;
+ u32 flags;
};
struct imx6_pcie {
@@ -49,11 +66,12 @@ struct imx6_pcie {
struct clk *pcie_phy;
struct clk *pcie_inbound_axi;
struct clk *pcie;
+ struct clk *pcie_aux;
struct regmap *iomuxc_gpr;
+ u32 controller_id;
struct reset_control *pciephy_reset;
struct reset_control *apps_reset;
struct reset_control *turnoff_reset;
- enum imx6_pcie_variants variant;
u32 tx_deemph_gen1;
u32 tx_deemph_gen2_3p5db;
u32 tx_deemph_gen2_6db;
@@ -61,11 +79,13 @@ struct imx6_pcie {
u32 tx_swing_low;
int link_gen;
struct regulator *vpcie;
+ void __iomem *phy_base;
/* power domain for pcie */
struct device *pd_pcie;
/* power domain for pcie phy */
struct device *pd_pcie_phy;
+ const struct imx6_pcie_drvdata *drvdata;
};
/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
@@ -101,7 +121,6 @@ struct imx6_pcie {
#define PCIE_PHY_STAT_ACK_LOC 16
#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
-#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
/* PHY registers (not memory-mapped) */
#define PCIE_PHY_ATEOVRD 0x10
@@ -117,6 +136,23 @@ struct imx6_pcie {
#define PCIE_PHY_RX_ASIC_OUT 0x100D
#define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0)
+/* iMX7 PCIe PHY registers */
+#define PCIE_PHY_CMN_REG4 0x14
+/* These are probably the bits that *aren't* DCC_FB_EN */
+#define PCIE_PHY_CMN_REG4_DCC_FB_EN 0x29
+
+#define PCIE_PHY_CMN_REG15 0x54
+#define PCIE_PHY_CMN_REG15_DLY_4 BIT(2)
+#define PCIE_PHY_CMN_REG15_PLL_PD BIT(5)
+#define PCIE_PHY_CMN_REG15_OVRD_PLL_PD BIT(7)
+
+#define PCIE_PHY_CMN_REG24 0x90
+#define PCIE_PHY_CMN_REG24_RX_EQ BIT(6)
+#define PCIE_PHY_CMN_REG24_RX_EQ_SEL BIT(3)
+
+#define PCIE_PHY_CMN_REG26 0x98
+#define PCIE_PHY_CMN_REG26_ATT_MODE 0xBC
+
#define PHY_RX_OVRD_IN_LO 0x1005
#define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
#define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
@@ -251,6 +287,9 @@ static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
{
u32 tmp;
+ if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
+ return;
+
pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
PHY_RX_OVRD_IN_LO_RX_PLL_EN);
@@ -264,6 +303,7 @@ static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
}
+#ifdef CONFIG_ARM
/* Added for PCI abort handling */
static int imx6q_pcie_abort_handler(unsigned long addr,
unsigned int fsr, struct pt_regs *regs)
@@ -297,6 +337,7 @@ static int imx6q_pcie_abort_handler(unsigned long addr,
return 1;
}
+#endif
static int imx6_pcie_attach_pd(struct device *dev)
{
@@ -342,8 +383,9 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
{
struct device *dev = imx6_pcie->pci->dev;
- switch (imx6_pcie->variant) {
+ switch (imx6_pcie->drvdata->variant) {
case IMX7D:
+ case IMX8MQ:
reset_control_assert(imx6_pcie->pciephy_reset);
reset_control_assert(imx6_pcie->apps_reset);
break;
@@ -378,13 +420,20 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
}
}
+static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
+{
+ WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ);
+ return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
+}
+
static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
{
struct dw_pcie *pci = imx6_pcie->pci;
struct device *dev = pci->dev;
+ unsigned int offset;
int ret = 0;
- switch (imx6_pcie->variant) {
+ switch (imx6_pcie->drvdata->variant) {
case IMX6SX:
ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
if (ret) {
@@ -412,6 +461,25 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
break;
case IMX7D:
break;
+ case IMX8MQ:
+ ret = clk_prepare_enable(imx6_pcie->pcie_aux);
+ if (ret) {
+ dev_err(dev, "unable to enable pcie_aux clock\n");
+ break;
+ }
+
+ offset = imx6_pcie_grp_offset(imx6_pcie);
+ /*
+ * Set the over ride low and enabled
+ * make sure that REF_CLK is turned on.
+ */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE,
+ 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+ IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
+ IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN);
+ break;
}
return ret;
@@ -487,9 +555,32 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
!imx6_pcie->gpio_active_high);
}
- switch (imx6_pcie->variant) {
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8MQ:
+ reset_control_deassert(imx6_pcie->pciephy_reset);
+ break;
case IMX7D:
reset_control_deassert(imx6_pcie->pciephy_reset);
+
+ /* Workaround for ERR010728, failure of PCI-e PLL VCO to
+ * oscillate, especially when cold. This turns off "Duty-cycle
+ * Corrector" and other mysterious undocumented things.
+ */
+ if (likely(imx6_pcie->phy_base)) {
+ /* De-assert DCC_FB_EN */
+ writel(PCIE_PHY_CMN_REG4_DCC_FB_EN,
+ imx6_pcie->phy_base + PCIE_PHY_CMN_REG4);
+ /* Assert RX_EQS and RX_EQS_SEL */
+ writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL
+ | PCIE_PHY_CMN_REG24_RX_EQ,
+ imx6_pcie->phy_base + PCIE_PHY_CMN_REG24);
+ /* Assert ATT_MODE */
+ writel(PCIE_PHY_CMN_REG26_ATT_MODE,
+ imx6_pcie->phy_base + PCIE_PHY_CMN_REG26);
+ } else {
+ dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n");
+ }
+
imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie);
break;
case IMX6SX:
@@ -523,9 +614,37 @@ err_pcie_phy:
}
}
+static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
+{
+ unsigned int mask, val;
+
+ if (imx6_pcie->drvdata->variant == IMX8MQ &&
+ imx6_pcie->controller_id == 1) {
+ mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE;
+ val = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
+ PCI_EXP_TYPE_ROOT_PORT);
+ } else {
+ mask = IMX6Q_GPR12_DEVICE_TYPE;
+ val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE,
+ PCI_EXP_TYPE_ROOT_PORT);
+ }
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val);
+}
+
static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
{
- switch (imx6_pcie->variant) {
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8MQ:
+ /*
+ * TODO: Currently this code assumes external
+ * oscillator is being used
+ */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ imx6_pcie_grp_offset(imx6_pcie),
+ IMX8MQ_GPR_PCIE_REF_USE_PAD,
+ IMX8MQ_GPR_PCIE_REF_USE_PAD);
+ break;
case IMX7D:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
@@ -561,8 +680,7 @@ static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
break;
}
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
+ imx6_pcie_configure_type(imx6_pcie);
}
static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
@@ -571,6 +689,9 @@ static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
int mult, div;
u32 val;
+ if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
+ return 0;
+
switch (phy_rate) {
case 125000000:
/*
@@ -647,7 +768,7 @@ static void imx6_pcie_ltssm_enable(struct device *dev)
{
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
- switch (imx6_pcie->variant) {
+ switch (imx6_pcie->drvdata->variant) {
case IMX6Q:
case IMX6SX:
case IMX6QP:
@@ -656,6 +777,7 @@ static void imx6_pcie_ltssm_enable(struct device *dev)
IMX6Q_GPR12_PCIE_CTL_2);
break;
case IMX7D:
+ case IMX8MQ:
reset_control_deassert(imx6_pcie->apps_reset);
break;
}
@@ -700,7 +822,8 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
tmp |= PORT_LOGIC_SPEED_CHANGE;
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
- if (imx6_pcie->variant != IMX7D) {
+ if (imx6_pcie->drvdata->flags &
+ IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE) {
/*
* On i.MX7, DIRECT_SPEED_CHANGE behaves differently
* from i.MX6 family when no link speed transition
@@ -797,7 +920,7 @@ static void imx6_pcie_ltssm_disable(struct device *dev)
{
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
- switch (imx6_pcie->variant) {
+ switch (imx6_pcie->drvdata->variant) {
case IMX6SX:
case IMX6QP:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
@@ -823,7 +946,7 @@ static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
}
/* Others poke directly at IOMUXC registers */
- switch (imx6_pcie->variant) {
+ switch (imx6_pcie->drvdata->variant) {
case IMX6SX:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_PM_TURN_OFF,
@@ -853,7 +976,7 @@ static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
clk_disable_unprepare(imx6_pcie->pcie_phy);
clk_disable_unprepare(imx6_pcie->pcie_bus);
- switch (imx6_pcie->variant) {
+ switch (imx6_pcie->drvdata->variant) {
case IMX6SX:
clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
break;
@@ -862,6 +985,9 @@ static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
break;
+ case IMX8MQ:
+ clk_disable_unprepare(imx6_pcie->pcie_aux);
+ break;
default:
break;
}
@@ -869,8 +995,8 @@ static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
static inline bool imx6_pcie_supports_suspend(struct imx6_pcie *imx6_pcie)
{
- return (imx6_pcie->variant == IMX7D ||
- imx6_pcie->variant == IMX6SX);
+ return (imx6_pcie->drvdata->variant == IMX7D ||
+ imx6_pcie->drvdata->variant == IMX6SX);
}
static int imx6_pcie_suspend_noirq(struct device *dev)
@@ -919,6 +1045,7 @@ static int imx6_pcie_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct dw_pcie *pci;
struct imx6_pcie *imx6_pcie;
+ struct device_node *np;
struct resource *dbi_base;
struct device_node *node = dev->of_node;
int ret;
@@ -936,8 +1063,24 @@ static int imx6_pcie_probe(struct platform_device *pdev)
pci->ops = &dw_pcie_ops;
imx6_pcie->pci = pci;
- imx6_pcie->variant =
- (enum imx6_pcie_variants)of_device_get_match_data(dev);
+ imx6_pcie->drvdata = of_device_get_match_data(dev);
+
+ /* Find the PHY if one is defined, only imx7d uses it */
+ np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0);
+ if (np) {
+ struct resource res;
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret) {
+ dev_err(dev, "Unable to map PCIe PHY\n");
+ return ret;
+ }
+ imx6_pcie->phy_base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(imx6_pcie->phy_base)) {
+ dev_err(dev, "Unable to map PCIe PHY\n");
+ return PTR_ERR(imx6_pcie->phy_base);
+ }
+ }
dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
@@ -981,7 +1124,7 @@ static int imx6_pcie_probe(struct platform_device *pdev)
return PTR_ERR(imx6_pcie->pcie);
}
- switch (imx6_pcie->variant) {
+ switch (imx6_pcie->drvdata->variant) {
case IMX6SX:
imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
"pcie_inbound_axi");
@@ -990,7 +1133,17 @@ static int imx6_pcie_probe(struct platform_device *pdev)
return PTR_ERR(imx6_pcie->pcie_inbound_axi);
}
break;
+ case IMX8MQ:
+ imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
+ if (IS_ERR(imx6_pcie->pcie_aux)) {
+ dev_err(dev, "pcie_aux clock source missing or invalid\n");
+ return PTR_ERR(imx6_pcie->pcie_aux);
+ }
+ /* fall through */
case IMX7D:
+ if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
+ imx6_pcie->controller_id = 1;
+
imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev,
"pciephy");
if (IS_ERR(imx6_pcie->pciephy_reset)) {
@@ -1087,11 +1240,36 @@ static void imx6_pcie_shutdown(struct platform_device *pdev)
imx6_pcie_assert_core_reset(imx6_pcie);
}
+static const struct imx6_pcie_drvdata drvdata[] = {
+ [IMX6Q] = {
+ .variant = IMX6Q,
+ .flags = IMX6_PCIE_FLAG_IMX6_PHY |
+ IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
+ },
+ [IMX6SX] = {
+ .variant = IMX6SX,
+ .flags = IMX6_PCIE_FLAG_IMX6_PHY |
+ IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
+ },
+ [IMX6QP] = {
+ .variant = IMX6QP,
+ .flags = IMX6_PCIE_FLAG_IMX6_PHY |
+ IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
+ },
+ [IMX7D] = {
+ .variant = IMX7D,
+ },
+ [IMX8MQ] = {
+ .variant = IMX8MQ,
+ },
+};
+
static const struct of_device_id imx6_pcie_of_match[] = {
- { .compatible = "fsl,imx6q-pcie", .data = (void *)IMX6Q, },
- { .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, },
- { .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, },
- { .compatible = "fsl,imx7d-pcie", .data = (void *)IMX7D, },
+ { .compatible = "fsl,imx6q-pcie", .data = &drvdata[IMX6Q], },
+ { .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], },
+ { .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], },
+ { .compatible = "fsl,imx7d-pcie", .data = &drvdata[IMX7D], },
+ { .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], } ,
{},
};
@@ -1108,6 +1286,7 @@ static struct platform_driver imx6_pcie_driver = {
static int __init imx6_pcie_init(void)
{
+#ifdef CONFIG_ARM
/*
* Since probe() can be deferred we need to make sure that
* hook_fault_code is not called after __init memory is freed
@@ -1117,6 +1296,7 @@ static int __init imx6_pcie_init(void)
*/
hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0,
"external abort on non-linefetch");
+#endif
return platform_driver_register(&imx6_pcie_driver);
}
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
new file mode 100644
index 000000000000..a42c9c3ae1cc
--- /dev/null
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe controller EP driver for Freescale Layerscape SoCs
+ *
+ * Copyright (C) 2018 NXP Semiconductor.
+ *
+ * Author: Xiaowei Bao <xiaowei.bao@nxp.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+
+#include "pcie-designware.h"
+
+#define PCIE_DBI2_OFFSET 0x1000 /* DBI2 base address*/
+
+struct ls_pcie_ep {
+ struct dw_pcie *pci;
+};
+
+#define to_ls_pcie_ep(x) dev_get_drvdata((x)->dev)
+
+static int ls_pcie_establish_link(struct dw_pcie *pci)
+{
+ return 0;
+}
+
+static const struct dw_pcie_ops ls_pcie_ep_ops = {
+ .start_link = ls_pcie_establish_link,
+};
+
+static const struct of_device_id ls_pcie_ep_of_match[] = {
+ { .compatible = "fsl,ls-pcie-ep",},
+ { },
+};
+
+static const struct pci_epc_features ls_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+};
+
+static const struct pci_epc_features*
+ls_pcie_ep_get_features(struct dw_pcie_ep *ep)
+{
+ return &ls_pcie_epc_features;
+}
+
+static void ls_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = BAR_0; bar <= BAR_5; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ return dw_pcie_ep_raise_legacy_irq(ep, func_no);
+ case PCI_EPC_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ case PCI_EPC_IRQ_MSIX:
+ return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ return -EINVAL;
+ }
+}
+
+static struct dw_pcie_ep_ops pcie_ep_ops = {
+ .ep_init = ls_pcie_ep_init,
+ .raise_irq = ls_pcie_ep_raise_irq,
+ .get_features = ls_pcie_ep_get_features,
+};
+
+static int __init ls_add_pcie_ep(struct ls_pcie_ep *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ struct dw_pcie_ep *ep;
+ struct resource *res;
+ int ret;
+
+ ep = &pci->ep;
+ ep->ops = &pcie_ep_ops;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+ if (!res)
+ return -EINVAL;
+
+ ep->phys_base = res->start;
+ ep->addr_size = resource_size(res);
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ dev_err(dev, "failed to initialize endpoint\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __init ls_pcie_ep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci;
+ struct ls_pcie_ep *pcie;
+ struct resource *dbi_base;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+
+ pci->dbi_base2 = pci->dbi_base + PCIE_DBI2_OFFSET;
+ pci->dev = dev;
+ pci->ops = &ls_pcie_ep_ops;
+ pcie->pci = pci;
+
+ platform_set_drvdata(pdev, pcie);
+
+ ret = ls_add_pcie_ep(pcie, pdev);
+
+ return ret;
+}
+
+static struct platform_driver ls_pcie_ep_driver = {
+ .driver = {
+ .name = "layerscape-pcie-ep",
+ .of_match_table = ls_pcie_ep_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver_probe(ls_pcie_ep_driver, ls_pcie_ep_probe);
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index a543c45c7224..24f5a775ad34 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -355,6 +355,17 @@ static int dw_pcie_ep_start(struct pci_epc *epc)
return pci->ops->start_link(pci);
}
+static const struct pci_epc_features*
+dw_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+
+ if (!ep->ops->get_features)
+ return NULL;
+
+ return ep->ops->get_features(ep);
+}
+
static const struct pci_epc_ops epc_ops = {
.write_header = dw_pcie_ep_write_header,
.set_bar = dw_pcie_ep_set_bar,
@@ -368,6 +379,7 @@ static const struct pci_epc_ops epc_ops = {
.raise_irq = dw_pcie_ep_raise_irq,
.start = dw_pcie_ep_start,
.stop = dw_pcie_ep_stop,
+ .get_features = dw_pcie_ep_get_features,
};
int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
@@ -465,8 +477,10 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
iounmap(msix_tbl);
- if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT)
+ if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) {
+ dev_dbg(pci->dev, "MSI-X entry ctrl set\n");
return -EPERM;
+ }
ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
epc->mem->page_size);
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 721d60a5d9e4..25087d3c9a82 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -120,9 +120,9 @@ static void dw_chained_msi_isr(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static void dw_pci_setup_msi_msg(struct irq_data *data, struct msi_msg *msg)
+static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct pcie_port *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
u64 msi_target;
@@ -135,61 +135,61 @@ static void dw_pci_setup_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->address_hi = upper_32_bits(msi_target);
if (pp->ops->get_msi_data)
- msg->data = pp->ops->get_msi_data(pp, data->hwirq);
+ msg->data = pp->ops->get_msi_data(pp, d->hwirq);
else
- msg->data = data->hwirq;
+ msg->data = d->hwirq;
dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
- (int)data->hwirq, msg->address_hi, msg->address_lo);
+ (int)d->hwirq, msg->address_hi, msg->address_lo);
}
-static int dw_pci_msi_set_affinity(struct irq_data *irq_data,
+static int dw_pci_msi_set_affinity(struct irq_data *d,
const struct cpumask *mask, bool force)
{
return -EINVAL;
}
-static void dw_pci_bottom_mask(struct irq_data *data)
+static void dw_pci_bottom_mask(struct irq_data *d)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct pcie_port *pp = irq_data_get_irq_chip_data(d);
unsigned int res, bit, ctrl;
unsigned long flags;
raw_spin_lock_irqsave(&pp->lock, flags);
if (pp->ops->msi_clear_irq) {
- pp->ops->msi_clear_irq(pp, data->hwirq);
+ pp->ops->msi_clear_irq(pp, d->hwirq);
} else {
- ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL;
+ ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
- bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
+ bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
- pp->irq_status[ctrl] &= ~(1 << bit);
+ pp->irq_mask[ctrl] |= BIT(bit);
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
- ~pp->irq_status[ctrl]);
+ pp->irq_mask[ctrl]);
}
raw_spin_unlock_irqrestore(&pp->lock, flags);
}
-static void dw_pci_bottom_unmask(struct irq_data *data)
+static void dw_pci_bottom_unmask(struct irq_data *d)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct pcie_port *pp = irq_data_get_irq_chip_data(d);
unsigned int res, bit, ctrl;
unsigned long flags;
raw_spin_lock_irqsave(&pp->lock, flags);
if (pp->ops->msi_set_irq) {
- pp->ops->msi_set_irq(pp, data->hwirq);
+ pp->ops->msi_set_irq(pp, d->hwirq);
} else {
- ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL;
+ ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
- bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
+ bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
- pp->irq_status[ctrl] |= 1 << bit;
+ pp->irq_mask[ctrl] &= ~BIT(bit);
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
- ~pp->irq_status[ctrl]);
+ pp->irq_mask[ctrl]);
}
raw_spin_unlock_irqrestore(&pp->lock, flags);
@@ -207,7 +207,7 @@ static void dw_pci_bottom_ack(struct irq_data *d)
raw_spin_lock_irqsave(&pp->lock, flags);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, 1 << bit);
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, BIT(bit));
if (pp->ops->msi_irq_ack)
pp->ops->msi_irq_ack(d->hwirq, pp);
@@ -255,13 +255,13 @@ static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
static void dw_pcie_irq_domain_free(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs)
{
- struct irq_data *data = irq_domain_get_irq_data(domain, virq);
- struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct pcie_port *pp = irq_data_get_irq_chip_data(d);
unsigned long flags;
raw_spin_lock_irqsave(&pp->lock, flags);
- bitmap_release_region(pp->msi_irq_in_use, data->hwirq,
+ bitmap_release_region(pp->msi_irq_in_use, d->hwirq,
order_base_2(nr_irqs));
raw_spin_unlock_irqrestore(&pp->lock, flags);
@@ -439,7 +439,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
if (ret)
pci->num_viewport = 2;
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_enabled()) {
/*
* If a specific SoC driver needs to change the
* default number of vectors, it needs to implement
@@ -512,8 +512,9 @@ error:
return ret;
}
-static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- u32 devfn, int where, int size, u32 *val)
+static int dw_pcie_access_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+ u32 devfn, int where, int size, u32 *val,
+ bool write)
{
int ret, type;
u32 busdev, cfg_size;
@@ -521,9 +522,6 @@ static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
void __iomem *va_cfg_base;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- if (pp->ops->rd_other_conf)
- return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val);
-
busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
PCIE_ATU_FUNC(PCI_FUNC(devfn));
@@ -542,7 +540,11 @@ static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
type, cpu_addr,
busdev, cfg_size);
- ret = dw_pcie_read(va_cfg_base + where, size, val);
+ if (write)
+ ret = dw_pcie_write(va_cfg_base + where, size, *val);
+ else
+ ret = dw_pcie_read(va_cfg_base + where, size, val);
+
if (pci->num_viewport <= 2)
dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
PCIE_ATU_TYPE_IO, pp->io_base,
@@ -551,43 +553,26 @@ static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
return ret;
}
+static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+ u32 devfn, int where, int size, u32 *val)
+{
+ if (pp->ops->rd_other_conf)
+ return pp->ops->rd_other_conf(pp, bus, devfn, where,
+ size, val);
+
+ return dw_pcie_access_other_conf(pp, bus, devfn, where, size, val,
+ false);
+}
+
static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
u32 devfn, int where, int size, u32 val)
{
- int ret, type;
- u32 busdev, cfg_size;
- u64 cpu_addr;
- void __iomem *va_cfg_base;
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
-
if (pp->ops->wr_other_conf)
- return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val);
-
- busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
- PCIE_ATU_FUNC(PCI_FUNC(devfn));
+ return pp->ops->wr_other_conf(pp, bus, devfn, where,
+ size, val);
- if (bus->parent->number == pp->root_bus_nr) {
- type = PCIE_ATU_TYPE_CFG0;
- cpu_addr = pp->cfg0_base;
- cfg_size = pp->cfg0_size;
- va_cfg_base = pp->va_cfg0_base;
- } else {
- type = PCIE_ATU_TYPE_CFG1;
- cpu_addr = pp->cfg1_base;
- cfg_size = pp->cfg1_size;
- va_cfg_base = pp->va_cfg1_base;
- }
-
- dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
- type, cpu_addr,
- busdev, cfg_size);
- ret = dw_pcie_write(va_cfg_base + where, size, val);
- if (pci->num_viewport <= 2)
- dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
- PCIE_ATU_TYPE_IO, pp->io_base,
- pp->io_bus_addr, pp->io_size);
-
- return ret;
+ return dw_pcie_access_other_conf(pp, bus, devfn, where, size, &val,
+ true);
}
static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
@@ -665,13 +650,13 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
/* Initialize IRQ Status array */
for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
+ pp->irq_mask[ctrl] = ~0;
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK +
(ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- 4, ~0);
+ 4, pp->irq_mask[ctrl]);
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
(ctrl * MSI_REG_CTRL_BLOCK_SIZE),
4, ~0);
- pp->irq_status[ctrl] = 0;
}
/* Setup RC BARs */
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index c12bf794d69c..932dbd0b34b6 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -13,11 +13,9 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/resource.h>
-#include <linux/signal.h>
#include <linux/types.h>
#include <linux/regmap.h>
@@ -70,14 +68,10 @@ static const struct dw_pcie_ops dw_pcie_ops = {
static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- struct pci_epc *epc = ep->epc;
enum pci_barno bar;
for (bar = BAR_0; bar <= BAR_5; bar++)
dw_pcie_ep_reset_bar(pci, bar);
-
- epc->features |= EPC_FEATURE_NO_LINKUP_NOTIFIER;
- epc->features |= EPC_FEATURE_MSIX_AVAILABLE;
}
static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
@@ -100,9 +94,22 @@ static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
+static const struct pci_epc_features dw_plat_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = true,
+};
+
+static const struct pci_epc_features*
+dw_plat_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ return &dw_plat_pcie_epc_features;
+}
+
static struct dw_pcie_ep_ops pcie_ep_ops = {
.ep_init = dw_plat_pcie_ep_init,
.raise_irq = dw_plat_pcie_ep_raise_irq,
+ .get_features = dw_plat_pcie_get_features,
};
static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 93ef8c31fb39..31f6331ca46f 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -22,7 +22,7 @@
int dw_pcie_read(void __iomem *addr, int size, u32 *val)
{
- if ((uintptr_t)addr & (size - 1)) {
+ if (!IS_ALIGNED((uintptr_t)addr, size)) {
*val = 0;
return PCIBIOS_BAD_REGISTER_NUMBER;
}
@@ -43,7 +43,7 @@ int dw_pcie_read(void __iomem *addr, int size, u32 *val)
int dw_pcie_write(void __iomem *addr, int size, u32 val)
{
- if ((uintptr_t)addr & (size - 1))
+ if (!IS_ALIGNED((uintptr_t)addr, size))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (size == 4)
@@ -306,7 +306,7 @@ void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
}
dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~PCIE_ATU_ENABLE);
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, (u32)~PCIE_ATU_ENABLE);
}
int dw_pcie_wait_for_link(struct dw_pcie *pci)
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 9943d8c68335..377f4c0b52da 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -11,6 +11,7 @@
#ifndef _PCIE_DESIGNWARE_H
#define _PCIE_DESIGNWARE_H
+#include <linux/bitfield.h>
#include <linux/dma-mapping.h>
#include <linux/irq.h>
#include <linux/msi.h>
@@ -30,23 +31,25 @@
/* Synopsys-specific PCIe configuration registers */
#define PCIE_PORT_LINK_CONTROL 0x710
-#define PORT_LINK_MODE_MASK (0x3f << 16)
-#define PORT_LINK_MODE_1_LANES (0x1 << 16)
-#define PORT_LINK_MODE_2_LANES (0x3 << 16)
-#define PORT_LINK_MODE_4_LANES (0x7 << 16)
-#define PORT_LINK_MODE_8_LANES (0xf << 16)
+#define PORT_LINK_MODE_MASK GENMASK(21, 16)
+#define PORT_LINK_MODE(n) FIELD_PREP(PORT_LINK_MODE_MASK, n)
+#define PORT_LINK_MODE_1_LANES PORT_LINK_MODE(0x1)
+#define PORT_LINK_MODE_2_LANES PORT_LINK_MODE(0x3)
+#define PORT_LINK_MODE_4_LANES PORT_LINK_MODE(0x7)
+#define PORT_LINK_MODE_8_LANES PORT_LINK_MODE(0xf)
#define PCIE_PORT_DEBUG0 0x728
#define PORT_LOGIC_LTSSM_STATE_MASK 0x1f
#define PORT_LOGIC_LTSSM_STATE_L0 0x11
#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
-#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
-#define PORT_LOGIC_LINK_WIDTH_MASK (0x1f << 8)
-#define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8)
-#define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8)
-#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8)
-#define PORT_LOGIC_LINK_WIDTH_8_LANES (0x8 << 8)
+#define PORT_LOGIC_SPEED_CHANGE BIT(17)
+#define PORT_LOGIC_LINK_WIDTH_MASK GENMASK(12, 8)
+#define PORT_LOGIC_LINK_WIDTH(n) FIELD_PREP(PORT_LOGIC_LINK_WIDTH_MASK, n)
+#define PORT_LOGIC_LINK_WIDTH_1_LANES PORT_LOGIC_LINK_WIDTH(0x1)
+#define PORT_LOGIC_LINK_WIDTH_2_LANES PORT_LOGIC_LINK_WIDTH(0x2)
+#define PORT_LOGIC_LINK_WIDTH_4_LANES PORT_LOGIC_LINK_WIDTH(0x4)
+#define PORT_LOGIC_LINK_WIDTH_8_LANES PORT_LOGIC_LINK_WIDTH(0x8)
#define PCIE_MSI_ADDR_LO 0x820
#define PCIE_MSI_ADDR_HI 0x824
@@ -55,30 +58,30 @@
#define PCIE_MSI_INTR0_STATUS 0x830
#define PCIE_ATU_VIEWPORT 0x900
-#define PCIE_ATU_REGION_INBOUND (0x1 << 31)
-#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31)
-#define PCIE_ATU_REGION_INDEX2 (0x2 << 0)
-#define PCIE_ATU_REGION_INDEX1 (0x1 << 0)
-#define PCIE_ATU_REGION_INDEX0 (0x0 << 0)
+#define PCIE_ATU_REGION_INBOUND BIT(31)
+#define PCIE_ATU_REGION_OUTBOUND 0
+#define PCIE_ATU_REGION_INDEX2 0x2
+#define PCIE_ATU_REGION_INDEX1 0x1
+#define PCIE_ATU_REGION_INDEX0 0x0
#define PCIE_ATU_CR1 0x904
-#define PCIE_ATU_TYPE_MEM (0x0 << 0)
-#define PCIE_ATU_TYPE_IO (0x2 << 0)
-#define PCIE_ATU_TYPE_CFG0 (0x4 << 0)
-#define PCIE_ATU_TYPE_CFG1 (0x5 << 0)
+#define PCIE_ATU_TYPE_MEM 0x0
+#define PCIE_ATU_TYPE_IO 0x2
+#define PCIE_ATU_TYPE_CFG0 0x4
+#define PCIE_ATU_TYPE_CFG1 0x5
#define PCIE_ATU_CR2 0x908
-#define PCIE_ATU_ENABLE (0x1 << 31)
-#define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30)
+#define PCIE_ATU_ENABLE BIT(31)
+#define PCIE_ATU_BAR_MODE_ENABLE BIT(30)
#define PCIE_ATU_LOWER_BASE 0x90C
#define PCIE_ATU_UPPER_BASE 0x910
#define PCIE_ATU_LIMIT 0x914
#define PCIE_ATU_LOWER_TARGET 0x918
-#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24)
-#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19)
-#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
+#define PCIE_ATU_BUS(x) FIELD_PREP(GENMASK(31, 24), x)
+#define PCIE_ATU_DEV(x) FIELD_PREP(GENMASK(23, 19), x)
+#define PCIE_ATU_FUNC(x) FIELD_PREP(GENMASK(18, 16), x)
#define PCIE_ATU_UPPER_TARGET 0x91C
#define PCIE_MISC_CONTROL_1_OFF 0x8BC
-#define PCIE_DBI_RO_WR_EN (0x1 << 0)
+#define PCIE_DBI_RO_WR_EN BIT(0)
/*
* iATU Unroll-specific register definitions
@@ -105,7 +108,7 @@
((region) << 9)
#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \
- (((region) << 9) | (0x1 << 8))
+ (((region) << 9) | BIT(8))
#define MAX_MSI_IRQS 256
#define MAX_MSI_IRQS_PER_CTRL 32
@@ -177,7 +180,7 @@ struct pcie_port {
struct irq_domain *msi_domain;
dma_addr_t msi_data;
u32 num_vectors;
- u32 irq_status[MAX_MSI_CTRLS];
+ u32 irq_mask[MAX_MSI_CTRLS];
raw_spinlock_t lock;
DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
};
@@ -192,6 +195,7 @@ struct dw_pcie_ep_ops {
void (*ep_init)(struct dw_pcie_ep *ep);
int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
enum pci_epc_irq_type type, u16 interrupt_num);
+ const struct pci_epc_features* (*get_features)(struct dw_pcie_ep *ep);
};
struct dw_pcie_ep {
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index d185ea5fe996..a7f703556790 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -1228,7 +1228,7 @@ static int qcom_pcie_probe(struct platform_device *pdev)
pcie->ops = of_device_get_match_data(dev);
- pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
+ pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
if (IS_ERR(pcie->reset)) {
ret = PTR_ERR(pcie->reset);
goto err_pm_runtime_put;
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 750081c1cb48..eb58dfdaba1b 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -466,7 +466,7 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
}
}
-struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
+static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
.read_pcie = advk_pci_bridge_emul_pcie_conf_read,
.write_pcie = advk_pci_bridge_emul_pcie_conf_write,
};
@@ -499,7 +499,7 @@ static void advk_sw_pci_bridge_init(struct advk_pcie *pcie)
bridge->data = pcie;
bridge->ops = &advk_pci_bridge_emul_ops;
- pci_bridge_emul_init(bridge);
+ pci_bridge_emul_init(bridge, 0);
}
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 9ba4d12c179c..95441a35eceb 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -391,14 +391,6 @@ struct hv_interrupt_entry {
u32 data;
};
-#define HV_VP_SET_BANK_COUNT_MAX 5 /* current implementation limit */
-
-struct hv_vp_set {
- u64 format; /* 0 (HvGenericSetSparse4k) */
- u64 valid_banks;
- u64 masks[HV_VP_SET_BANK_COUNT_MAX];
-};
-
/*
* flags for hv_device_interrupt_target.flags
*/
@@ -410,7 +402,7 @@ struct hv_device_interrupt_target {
u32 flags;
union {
u64 vp_mask;
- struct hv_vp_set vp_set;
+ struct hv_vpset vp_set;
};
};
@@ -420,7 +412,7 @@ struct retarget_msi_interrupt {
struct hv_interrupt_entry int_entry;
u64 reserved2;
struct hv_device_interrupt_target int_target;
-} __packed;
+} __packed __aligned(8);
/*
* Driver specific state.
@@ -460,12 +452,16 @@ struct hv_pcibus_device {
struct msi_controller msi_chip;
struct irq_domain *irq_domain;
- /* hypercall arg, must not cross page boundary */
- struct retarget_msi_interrupt retarget_msi_interrupt_params;
-
spinlock_t retarget_msi_interrupt_lock;
struct workqueue_struct *wq;
+
+ /* hypercall arg, must not cross page boundary */
+ struct retarget_msi_interrupt retarget_msi_interrupt_params;
+
+ /*
+ * Don't put anything here: retarget_msi_interrupt_params must be last
+ */
};
/*
@@ -910,12 +906,12 @@ static void hv_irq_unmask(struct irq_data *data)
struct retarget_msi_interrupt *params;
struct hv_pcibus_device *hbus;
struct cpumask *dest;
+ cpumask_var_t tmp;
struct pci_bus *pbus;
struct pci_dev *pdev;
unsigned long flags;
u32 var_size = 0;
- int cpu_vmbus;
- int cpu;
+ int cpu, nr_bank;
u64 res;
dest = irq_data_get_effective_affinity_mask(data);
@@ -955,28 +951,27 @@ static void hv_irq_unmask(struct irq_data *data)
*/
params->int_target.flags |=
HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
- params->int_target.vp_set.valid_banks =
- (1ull << HV_VP_SET_BANK_COUNT_MAX) - 1;
- /*
- * var-sized hypercall, var-size starts after vp_mask (thus
- * vp_set.format does not count, but vp_set.valid_banks does).
- */
- var_size = 1 + HV_VP_SET_BANK_COUNT_MAX;
+ if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) {
+ res = 1;
+ goto exit_unlock;
+ }
- for_each_cpu_and(cpu, dest, cpu_online_mask) {
- cpu_vmbus = hv_cpu_number_to_vp_number(cpu);
+ cpumask_and(tmp, dest, cpu_online_mask);
+ nr_bank = cpumask_to_vpset(&params->int_target.vp_set, tmp);
+ free_cpumask_var(tmp);
- if (cpu_vmbus >= HV_VP_SET_BANK_COUNT_MAX * 64) {
- dev_err(&hbus->hdev->device,
- "too high CPU %d", cpu_vmbus);
- res = 1;
- goto exit_unlock;
- }
-
- params->int_target.vp_set.masks[cpu_vmbus / 64] |=
- (1ULL << (cpu_vmbus & 63));
+ if (nr_bank <= 0) {
+ res = 1;
+ goto exit_unlock;
}
+
+ /*
+ * var-sized hypercall, var-size starts after vp_mask (thus
+ * vp_set.format does not count, but vp_set.valid_bank_mask
+ * does).
+ */
+ var_size = 1 + nr_bank;
} else {
for_each_cpu_and(cpu, dest, cpu_online_mask) {
params->int_target.vp_mask |=
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index fa0fc46edb0c..d3a0419e42f2 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -583,7 +583,7 @@ static void mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port)
bridge->data = port;
bridge->ops = &mvebu_pci_bridge_emul_ops;
- pci_bridge_emul_init(bridge);
+ pci_bridge_emul_init(bridge, PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR);
}
static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys)
diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c
index 7d05e51205b3..27edcebd1726 100644
--- a/drivers/pci/controller/pcie-altera.c
+++ b/drivers/pci/controller/pcie-altera.c
@@ -11,6 +11,7 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/init.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
@@ -37,7 +38,12 @@
#define RP_LTSSM_MASK 0x1f
#define LTSSM_L0 0xf
-#define PCIE_CAP_OFFSET 0x80
+#define S10_RP_TX_CNTRL 0x2004
+#define S10_RP_RXCPL_REG 0x2008
+#define S10_RP_RXCPL_STATUS 0x200C
+#define S10_RP_CFG_ADDR(pcie, reg) \
+ (((pcie)->hip_base) + (reg) + (1 << 20))
+
/* TLP configuration type 0 and 1 */
#define TLP_FMTTYPE_CFGRD0 0x04 /* Configuration Read Type 0 */
#define TLP_FMTTYPE_CFGWR0 0x44 /* Configuration Write Type 0 */
@@ -49,18 +55,19 @@
#define RP_DEVFN 0
#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn))
#define TLP_CFGRD_DW0(pcie, bus) \
- ((((bus == pcie->root_bus_nr) ? TLP_FMTTYPE_CFGRD0 \
- : TLP_FMTTYPE_CFGRD1) << 24) | \
- TLP_PAYLOAD_SIZE)
+ ((((bus == pcie->root_bus_nr) ? pcie->pcie_data->cfgrd0 \
+ : pcie->pcie_data->cfgrd1) << 24) | \
+ TLP_PAYLOAD_SIZE)
#define TLP_CFGWR_DW0(pcie, bus) \
- ((((bus == pcie->root_bus_nr) ? TLP_FMTTYPE_CFGWR0 \
- : TLP_FMTTYPE_CFGWR1) << 24) | \
- TLP_PAYLOAD_SIZE)
+ ((((bus == pcie->root_bus_nr) ? pcie->pcie_data->cfgwr0 \
+ : pcie->pcie_data->cfgwr1) << 24) | \
+ TLP_PAYLOAD_SIZE)
#define TLP_CFG_DW1(pcie, tag, be) \
- (((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be))
+ (((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be))
#define TLP_CFG_DW2(bus, devfn, offset) \
(((bus) << 24) | ((devfn) << 16) | (offset))
#define TLP_COMP_STATUS(s) (((s) >> 13) & 7)
+#define TLP_BYTE_COUNT(s) (((s) >> 0) & 0xfff)
#define TLP_HDR_SIZE 3
#define TLP_LOOP 500
@@ -69,14 +76,47 @@
#define DWORD_MASK 3
+#define S10_TLP_FMTTYPE_CFGRD0 0x05
+#define S10_TLP_FMTTYPE_CFGRD1 0x04
+#define S10_TLP_FMTTYPE_CFGWR0 0x45
+#define S10_TLP_FMTTYPE_CFGWR1 0x44
+
+enum altera_pcie_version {
+ ALTERA_PCIE_V1 = 0,
+ ALTERA_PCIE_V2,
+};
+
struct altera_pcie {
struct platform_device *pdev;
- void __iomem *cra_base; /* DT Cra */
+ void __iomem *cra_base;
+ void __iomem *hip_base;
int irq;
u8 root_bus_nr;
struct irq_domain *irq_domain;
struct resource bus_range;
struct list_head resources;
+ const struct altera_pcie_data *pcie_data;
+};
+
+struct altera_pcie_ops {
+ int (*tlp_read_pkt)(struct altera_pcie *pcie, u32 *value);
+ void (*tlp_write_pkt)(struct altera_pcie *pcie, u32 *headers,
+ u32 data, bool align);
+ bool (*get_link_status)(struct altera_pcie *pcie);
+ int (*rp_read_cfg)(struct altera_pcie *pcie, int where,
+ int size, u32 *value);
+ int (*rp_write_cfg)(struct altera_pcie *pcie, u8 busno,
+ int where, int size, u32 value);
+};
+
+struct altera_pcie_data {
+ const struct altera_pcie_ops *ops;
+ enum altera_pcie_version version;
+ u32 cap_offset; /* PCIe capability structure register offset */
+ u32 cfgrd0;
+ u32 cfgrd1;
+ u32 cfgwr0;
+ u32 cfgwr1;
};
struct tlp_rp_regpair_t {
@@ -101,6 +141,15 @@ static bool altera_pcie_link_up(struct altera_pcie *pcie)
return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0);
}
+static bool s10_altera_pcie_link_up(struct altera_pcie *pcie)
+{
+ void __iomem *addr = S10_RP_CFG_ADDR(pcie,
+ pcie->pcie_data->cap_offset +
+ PCI_EXP_LNKSTA);
+
+ return !!(readw(addr) & PCI_EXP_LNKSTA_DLLLA);
+}
+
/*
* Altera PCIe port uses BAR0 of RC's configuration space as the translation
* from PCI bus to native BUS. Entire DDR region is mapped into PCIe space
@@ -128,12 +177,18 @@ static void tlp_write_tx(struct altera_pcie *pcie,
cra_writel(pcie, tlp_rp_regdata->ctrl, RP_TX_CNTRL);
}
+static void s10_tlp_write_tx(struct altera_pcie *pcie, u32 reg0, u32 ctrl)
+{
+ cra_writel(pcie, reg0, RP_TX_REG0);
+ cra_writel(pcie, ctrl, S10_RP_TX_CNTRL);
+}
+
static bool altera_pcie_valid_device(struct altera_pcie *pcie,
struct pci_bus *bus, int dev)
{
/* If there is no link, then there is no device */
if (bus->number != pcie->root_bus_nr) {
- if (!altera_pcie_link_up(pcie))
+ if (!pcie->pcie_data->ops->get_link_status(pcie))
return false;
}
@@ -183,6 +238,53 @@ static int tlp_read_packet(struct altera_pcie *pcie, u32 *value)
return PCIBIOS_DEVICE_NOT_FOUND;
}
+static int s10_tlp_read_packet(struct altera_pcie *pcie, u32 *value)
+{
+ u32 ctrl;
+ u32 comp_status;
+ u32 dw[4];
+ u32 count;
+ struct device *dev = &pcie->pdev->dev;
+
+ for (count = 0; count < TLP_LOOP; count++) {
+ ctrl = cra_readl(pcie, S10_RP_RXCPL_STATUS);
+ if (ctrl & RP_RXCPL_SOP) {
+ /* Read first DW */
+ dw[0] = cra_readl(pcie, S10_RP_RXCPL_REG);
+ break;
+ }
+
+ udelay(5);
+ }
+
+ /* SOP detection failed, return error */
+ if (count == TLP_LOOP)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ count = 1;
+
+ /* Poll for EOP */
+ while (count < ARRAY_SIZE(dw)) {
+ ctrl = cra_readl(pcie, S10_RP_RXCPL_STATUS);
+ dw[count++] = cra_readl(pcie, S10_RP_RXCPL_REG);
+ if (ctrl & RP_RXCPL_EOP) {
+ comp_status = TLP_COMP_STATUS(dw[1]);
+ if (comp_status)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (value && TLP_BYTE_COUNT(dw[1]) == sizeof(u32) &&
+ count == 4)
+ *value = dw[3];
+
+ return PCIBIOS_SUCCESSFUL;
+ }
+ }
+
+ dev_warn(dev, "Malformed TLP packet\n");
+
+ return PCIBIOS_DEVICE_NOT_FOUND;
+}
+
static void tlp_write_packet(struct altera_pcie *pcie, u32 *headers,
u32 data, bool align)
{
@@ -210,6 +312,15 @@ static void tlp_write_packet(struct altera_pcie *pcie, u32 *headers,
tlp_write_tx(pcie, &tlp_rp_regdata);
}
+static void s10_tlp_write_packet(struct altera_pcie *pcie, u32 *headers,
+ u32 data, bool dummy)
+{
+ s10_tlp_write_tx(pcie, headers[0], RP_TX_SOP);
+ s10_tlp_write_tx(pcie, headers[1], 0);
+ s10_tlp_write_tx(pcie, headers[2], 0);
+ s10_tlp_write_tx(pcie, data, RP_TX_EOP);
+}
+
static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn,
int where, u8 byte_en, u32 *value)
{
@@ -219,9 +330,9 @@ static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn,
headers[1] = TLP_CFG_DW1(pcie, TLP_READ_TAG, byte_en);
headers[2] = TLP_CFG_DW2(bus, devfn, where);
- tlp_write_packet(pcie, headers, 0, false);
+ pcie->pcie_data->ops->tlp_write_pkt(pcie, headers, 0, false);
- return tlp_read_packet(pcie, value);
+ return pcie->pcie_data->ops->tlp_read_pkt(pcie, value);
}
static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn,
@@ -236,11 +347,13 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn,
/* check alignment to Qword */
if ((where & 0x7) == 0)
- tlp_write_packet(pcie, headers, value, true);
+ pcie->pcie_data->ops->tlp_write_pkt(pcie, headers,
+ value, true);
else
- tlp_write_packet(pcie, headers, value, false);
+ pcie->pcie_data->ops->tlp_write_pkt(pcie, headers,
+ value, false);
- ret = tlp_read_packet(pcie, NULL);
+ ret = pcie->pcie_data->ops->tlp_read_pkt(pcie, NULL);
if (ret != PCIBIOS_SUCCESSFUL)
return ret;
@@ -254,6 +367,53 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn,
return PCIBIOS_SUCCESSFUL;
}
+static int s10_rp_read_cfg(struct altera_pcie *pcie, int where,
+ int size, u32 *value)
+{
+ void __iomem *addr = S10_RP_CFG_ADDR(pcie, where);
+
+ switch (size) {
+ case 1:
+ *value = readb(addr);
+ break;
+ case 2:
+ *value = readw(addr);
+ break;
+ default:
+ *value = readl(addr);
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int s10_rp_write_cfg(struct altera_pcie *pcie, u8 busno,
+ int where, int size, u32 value)
+{
+ void __iomem *addr = S10_RP_CFG_ADDR(pcie, where);
+
+ switch (size) {
+ case 1:
+ writeb(value, addr);
+ break;
+ case 2:
+ writew(value, addr);
+ break;
+ default:
+ writel(value, addr);
+ break;
+ }
+
+ /*
+ * Monitor changes to PCI_PRIMARY_BUS register on root port
+ * and update local copy of root bus number accordingly.
+ */
+ if (busno == pcie->root_bus_nr && where == PCI_PRIMARY_BUS)
+ pcie->root_bus_nr = value & 0xff;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno,
unsigned int devfn, int where, int size,
u32 *value)
@@ -262,6 +422,10 @@ static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno,
u32 data;
u8 byte_en;
+ if (busno == pcie->root_bus_nr && pcie->pcie_data->ops->rp_read_cfg)
+ return pcie->pcie_data->ops->rp_read_cfg(pcie, where,
+ size, value);
+
switch (size) {
case 1:
byte_en = 1 << (where & 3);
@@ -302,6 +466,10 @@ static int _altera_pcie_cfg_write(struct altera_pcie *pcie, u8 busno,
u32 shift = 8 * (where & 3);
u8 byte_en;
+ if (busno == pcie->root_bus_nr && pcie->pcie_data->ops->rp_write_cfg)
+ return pcie->pcie_data->ops->rp_write_cfg(pcie, busno,
+ where, size, value);
+
switch (size) {
case 1:
data32 = (value & 0xff) << shift;
@@ -365,7 +533,8 @@ static int altera_read_cap_word(struct altera_pcie *pcie, u8 busno,
int ret;
ret = _altera_pcie_cfg_read(pcie, busno, devfn,
- PCIE_CAP_OFFSET + offset, sizeof(*value),
+ pcie->pcie_data->cap_offset + offset,
+ sizeof(*value),
&data);
*value = data;
return ret;
@@ -375,7 +544,8 @@ static int altera_write_cap_word(struct altera_pcie *pcie, u8 busno,
unsigned int devfn, int offset, u16 value)
{
return _altera_pcie_cfg_write(pcie, busno, devfn,
- PCIE_CAP_OFFSET + offset, sizeof(value),
+ pcie->pcie_data->cap_offset + offset,
+ sizeof(value),
value);
}
@@ -403,7 +573,7 @@ static void altera_wait_link_retrain(struct altera_pcie *pcie)
/* Wait for link is up */
start_jiffies = jiffies;
for (;;) {
- if (altera_pcie_link_up(pcie))
+ if (pcie->pcie_data->ops->get_link_status(pcie))
break;
if (time_after(jiffies, start_jiffies + LINK_UP_TIMEOUT)) {
@@ -418,7 +588,7 @@ static void altera_pcie_retrain(struct altera_pcie *pcie)
{
u16 linkcap, linkstat, linkctl;
- if (!altera_pcie_link_up(pcie))
+ if (!pcie->pcie_data->ops->get_link_status(pcie))
return;
/*
@@ -540,12 +710,20 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie)
struct device *dev = &pcie->pdev->dev;
struct platform_device *pdev = pcie->pdev;
struct resource *cra;
+ struct resource *hip;
cra = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Cra");
pcie->cra_base = devm_ioremap_resource(dev, cra);
if (IS_ERR(pcie->cra_base))
return PTR_ERR(pcie->cra_base);
+ if (pcie->pcie_data->version == ALTERA_PCIE_V2) {
+ hip = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Hip");
+ pcie->hip_base = devm_ioremap_resource(&pdev->dev, hip);
+ if (IS_ERR(pcie->hip_base))
+ return PTR_ERR(pcie->hip_base);
+ }
+
/* setup IRQ */
pcie->irq = platform_get_irq(pdev, 0);
if (pcie->irq < 0) {
@@ -562,6 +740,48 @@ static void altera_pcie_host_init(struct altera_pcie *pcie)
altera_pcie_retrain(pcie);
}
+static const struct altera_pcie_ops altera_pcie_ops_1_0 = {
+ .tlp_read_pkt = tlp_read_packet,
+ .tlp_write_pkt = tlp_write_packet,
+ .get_link_status = altera_pcie_link_up,
+};
+
+static const struct altera_pcie_ops altera_pcie_ops_2_0 = {
+ .tlp_read_pkt = s10_tlp_read_packet,
+ .tlp_write_pkt = s10_tlp_write_packet,
+ .get_link_status = s10_altera_pcie_link_up,
+ .rp_read_cfg = s10_rp_read_cfg,
+ .rp_write_cfg = s10_rp_write_cfg,
+};
+
+static const struct altera_pcie_data altera_pcie_1_0_data = {
+ .ops = &altera_pcie_ops_1_0,
+ .cap_offset = 0x80,
+ .version = ALTERA_PCIE_V1,
+ .cfgrd0 = TLP_FMTTYPE_CFGRD0,
+ .cfgrd1 = TLP_FMTTYPE_CFGRD1,
+ .cfgwr0 = TLP_FMTTYPE_CFGWR0,
+ .cfgwr1 = TLP_FMTTYPE_CFGWR1,
+};
+
+static const struct altera_pcie_data altera_pcie_2_0_data = {
+ .ops = &altera_pcie_ops_2_0,
+ .version = ALTERA_PCIE_V2,
+ .cap_offset = 0x70,
+ .cfgrd0 = S10_TLP_FMTTYPE_CFGRD0,
+ .cfgrd1 = S10_TLP_FMTTYPE_CFGRD1,
+ .cfgwr0 = S10_TLP_FMTTYPE_CFGWR0,
+ .cfgwr1 = S10_TLP_FMTTYPE_CFGWR1,
+};
+
+static const struct of_device_id altera_pcie_of_match[] = {
+ {.compatible = "altr,pcie-root-port-1.0",
+ .data = &altera_pcie_1_0_data },
+ {.compatible = "altr,pcie-root-port-2.0",
+ .data = &altera_pcie_2_0_data },
+ {},
+};
+
static int altera_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -570,6 +790,7 @@ static int altera_pcie_probe(struct platform_device *pdev)
struct pci_bus *child;
struct pci_host_bridge *bridge;
int ret;
+ const struct of_device_id *match;
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
if (!bridge)
@@ -578,6 +799,12 @@ static int altera_pcie_probe(struct platform_device *pdev)
pcie = pci_host_bridge_priv(bridge);
pcie->pdev = pdev;
+ match = of_match_device(altera_pcie_of_match, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+
+ pcie->pcie_data = match->data;
+
ret = altera_pcie_parse_dt(pcie);
if (ret) {
dev_err(dev, "Parsing DT failed\n");
@@ -628,11 +855,6 @@ static int altera_pcie_probe(struct platform_device *pdev)
return ret;
}
-static const struct of_device_id altera_pcie_of_match[] = {
- { .compatible = "altr,pcie-root-port-1.0", },
- {},
-};
-
static struct platform_driver altera_pcie_driver = {
.probe = altera_pcie_probe,
.driver = {
diff --git a/drivers/pci/controller/pcie-cadence-ep.c b/drivers/pci/controller/pcie-cadence-ep.c
index c3a088910f48..def7820cb824 100644
--- a/drivers/pci/controller/pcie-cadence-ep.c
+++ b/drivers/pci/controller/pcie-cadence-ep.c
@@ -396,21 +396,21 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
cfg |= BIT(epf->func_no);
cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, cfg);
- /*
- * The PCIe links are automatically established by the controller
- * once for all at powerup: the software can neither start nor stop
- * those links later at runtime.
- *
- * Then we only have to notify the EP core that our links are already
- * established. However we don't call directly pci_epc_linkup() because
- * we've already locked the epc->lock.
- */
- list_for_each_entry(epf, &epc->pci_epf, list)
- pci_epf_linkup(epf);
-
return 0;
}
+static const struct pci_epc_features cdns_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+};
+
+static const struct pci_epc_features*
+cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
+{
+ return &cdns_pcie_epc_features;
+}
+
static const struct pci_epc_ops cdns_pcie_epc_ops = {
.write_header = cdns_pcie_ep_write_header,
.set_bar = cdns_pcie_ep_set_bar,
@@ -421,6 +421,7 @@ static const struct pci_epc_ops cdns_pcie_epc_ops = {
.get_msi = cdns_pcie_ep_get_msi,
.raise_irq = cdns_pcie_ep_raise_irq,
.start = cdns_pcie_ep_start,
+ .get_features = cdns_pcie_ep_get_features,
};
static const struct of_device_id cdns_pcie_ep_of_match[] = {
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 55e471c18e8d..0b6c72804e03 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -90,6 +90,12 @@
#define AHB2PCIE_SIZE(x) ((x) & GENMASK(4, 0))
#define PCIE_AXI_WINDOW0 0x448
#define WIN_ENABLE BIT(7)
+/*
+ * Define PCIe to AHB window size as 2^33 to support max 8GB address space
+ * translate, support least 4GB DRAM size access from EP DMA(physical DRAM
+ * start from 0x40000000).
+ */
+#define PCIE2AHB_SIZE 0x21
/* PCIe V2 configuration transaction header */
#define PCIE_CFG_HEADER0 0x460
@@ -654,7 +660,6 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
struct resource *mem = &pcie->mem;
const struct mtk_pcie_soc *soc = port->pcie->soc;
u32 val;
- size_t size;
int err;
/* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
@@ -706,15 +711,15 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
mtk_pcie_enable_msi(port);
/* Set AHB to PCIe translation windows */
- size = mem->end - mem->start;
- val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size));
+ val = lower_32_bits(mem->start) |
+ AHB2PCIE_SIZE(fls(resource_size(mem)));
writel(val, port->base + PCIE_AHB_TRANS_BASE0_L);
val = upper_32_bits(mem->start);
writel(val, port->base + PCIE_AHB_TRANS_BASE0_H);
/* Set PCIe to AXI translation memory space.*/
- val = fls(0xffffffff) | WIN_ENABLE;
+ val = PCIE2AHB_SIZE | WIN_ENABLE;
writel(val, port->base + PCIE_AXI_WINDOW0);
return 0;
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index b8163c56a142..a5d799e2dff2 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -499,12 +499,21 @@ static int rockchip_pcie_ep_start(struct pci_epc *epc)
rockchip_pcie_write(rockchip, cfg, PCIE_CORE_PHY_FUNC_CFG);
- list_for_each_entry(epf, &epc->pci_epf, list)
- pci_epf_linkup(epf);
-
return 0;
}
+static const struct pci_epc_features rockchip_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+};
+
+static const struct pci_epc_features*
+rockchip_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
+{
+ return &rockchip_pcie_epc_features;
+}
+
static const struct pci_epc_ops rockchip_pcie_epc_ops = {
.write_header = rockchip_pcie_ep_write_header,
.set_bar = rockchip_pcie_ep_set_bar,
@@ -515,6 +524,7 @@ static const struct pci_epc_ops rockchip_pcie_epc_ops = {
.get_msi = rockchip_pcie_ep_get_msi,
.raise_irq = rockchip_pcie_ep_raise_irq,
.start = rockchip_pcie_ep_start,
+ .get_features = rockchip_pcie_ep_get_features,
};
static int rockchip_pcie_parse_ep_dt(struct rockchip_pcie *rockchip,
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 3890812cdf87..999a5509e57e 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -95,10 +95,8 @@ struct vmd_dev {
struct irq_domain *irq_domain;
struct pci_bus *bus;
-#ifdef CONFIG_X86_DEV_DMA_OPS
struct dma_map_ops dma_ops;
struct dma_domain dma_domain;
-#endif
};
static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus)
@@ -293,7 +291,6 @@ static struct msi_domain_info vmd_msi_domain_info = {
.chip = &vmd_msi_controller,
};
-#ifdef CONFIG_X86_DEV_DMA_OPS
/*
* VMD replaces the requester ID with its own. DMA mappings for devices in a
* VMD domain need to be mapped for the VMD, not the device requiring
@@ -438,10 +435,6 @@ static void vmd_setup_dma_ops(struct vmd_dev *vmd)
add_dma_domain(domain);
}
#undef ASSIGN_VMD_DMA_OPS
-#else
-static void vmd_teardown_dma_ops(struct vmd_dev *vmd) {}
-static void vmd_setup_dma_ops(struct vmd_dev *vmd) {}
-#endif
static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
unsigned int devfn, int reg, int len)
@@ -571,6 +564,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
LIST_HEAD(resources);
resource_size_t offset[2] = {0};
resource_size_t membar2_offset = 0x2000, busn_start = 0;
+ struct pci_bus *child;
/*
* Shadow registers may exist in certain VMD device ids which allow
@@ -698,7 +692,19 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
vmd_attach_resources(vmd);
vmd_setup_dma_ops(vmd);
dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
- pci_rescan_bus(vmd->bus);
+
+ pci_scan_child_bus(vmd->bus);
+ pci_assign_unassigned_bus_resources(vmd->bus);
+
+ /*
+ * VMD root buses are virtual and don't return true on pci_is_pcie()
+ * and will fail pcie_bus_configure_settings() early. It can instead be
+ * run on each of the real root ports.
+ */
+ list_for_each_entry(child, &vmd->bus->children, node)
+ pcie_bus_configure_settings(child);
+
+ pci_bus_add_devices(vmd->bus);
WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
"domain"), "Can't create symlink to domain\n");
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 3e86fa3c7da3..d0b91da49bf4 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -47,9 +47,8 @@ struct pci_epf_test {
void *reg[6];
struct pci_epf *epf;
enum pci_barno test_reg_bar;
- bool linkup_notifier;
- bool msix_available;
struct delayed_work cmd_handler;
+ const struct pci_epc_features *epc_features;
};
struct pci_epf_test_reg {
@@ -71,11 +70,6 @@ static struct pci_epf_header test_header = {
.interrupt_pin = PCI_INTERRUPT_INTA,
};
-struct pci_epf_test_data {
- enum pci_barno test_reg_bar;
- bool linkup_notifier;
-};
-
static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
static int pci_epf_test_copy(struct pci_epf_test *epf_test)
@@ -175,7 +169,7 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
goto err_map_addr;
}
- memcpy(buf, src_addr, reg->size);
+ memcpy_fromio(buf, src_addr, reg->size);
crc32 = crc32_le(~0, buf, reg->size);
if (crc32 != reg->checksum)
@@ -230,7 +224,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
get_random_bytes(buf, reg->size);
reg->checksum = crc32_le(~0, buf, reg->size);
- memcpy(dst_addr, buf, reg->size);
+ memcpy_toio(dst_addr, buf, reg->size);
/*
* wait 1ms inorder for the write to complete. Without this delay L3
@@ -402,13 +396,15 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
struct device *dev = &epf->dev;
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
+ const struct pci_epc_features *epc_features;
+
+ epc_features = epf_test->epc_features;
for (bar = BAR_0; bar <= BAR_5; bar++) {
epf_bar = &epf->bar[bar];
- epf_bar->flags |= upper_32_bits(epf_bar->size) ?
- PCI_BASE_ADDRESS_MEM_TYPE_64 :
- PCI_BASE_ADDRESS_MEM_TYPE_32;
+ if (!!(epc_features->reserved_bar & (1 << bar)))
+ continue;
ret = pci_epc_set_bar(epc, epf->func_no, epf_bar);
if (ret) {
@@ -433,9 +429,13 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
struct device *dev = &epf->dev;
+ struct pci_epf_bar *epf_bar;
void *base;
int bar;
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
+ const struct pci_epc_features *epc_features;
+
+ epc_features = epf_test->epc_features;
base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg),
test_reg_bar);
@@ -446,37 +446,69 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
epf_test->reg[test_reg_bar] = base;
for (bar = BAR_0; bar <= BAR_5; bar++) {
+ epf_bar = &epf->bar[bar];
if (bar == test_reg_bar)
continue;
+
+ if (!!(epc_features->reserved_bar & (1 << bar)))
+ continue;
+
base = pci_epf_alloc_space(epf, bar_size[bar], bar);
if (!base)
dev_err(dev, "Failed to allocate space for BAR%d\n",
bar);
epf_test->reg[bar] = base;
+ if (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
+ bar++;
}
return 0;
}
+static void pci_epf_configure_bar(struct pci_epf *epf,
+ const struct pci_epc_features *epc_features)
+{
+ struct pci_epf_bar *epf_bar;
+ bool bar_fixed_64bit;
+ int i;
+
+ for (i = BAR_0; i <= BAR_5; i++) {
+ epf_bar = &epf->bar[i];
+ bar_fixed_64bit = !!(epc_features->bar_fixed_64bit & (1 << i));
+ if (bar_fixed_64bit)
+ epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
+ if (epc_features->bar_fixed_size[i])
+ bar_size[i] = epc_features->bar_fixed_size[i];
+ }
+}
+
static int pci_epf_test_bind(struct pci_epf *epf)
{
int ret;
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
struct pci_epf_header *header = epf->header;
+ const struct pci_epc_features *epc_features;
+ enum pci_barno test_reg_bar = BAR_0;
struct pci_epc *epc = epf->epc;
struct device *dev = &epf->dev;
+ bool linkup_notifier = false;
+ bool msix_capable = false;
+ bool msi_capable = true;
if (WARN_ON_ONCE(!epc))
return -EINVAL;
- if (epc->features & EPC_FEATURE_NO_LINKUP_NOTIFIER)
- epf_test->linkup_notifier = false;
- else
- epf_test->linkup_notifier = true;
-
- epf_test->msix_available = epc->features & EPC_FEATURE_MSIX_AVAILABLE;
+ epc_features = pci_epc_get_features(epc, epf->func_no);
+ if (epc_features) {
+ linkup_notifier = epc_features->linkup_notifier;
+ msix_capable = epc_features->msix_capable;
+ msi_capable = epc_features->msi_capable;
+ test_reg_bar = pci_epc_get_first_free_bar(epc_features);
+ pci_epf_configure_bar(epf, epc_features);
+ }
- epf_test->test_reg_bar = EPC_FEATURE_GET_BAR(epc->features);
+ epf_test->test_reg_bar = test_reg_bar;
+ epf_test->epc_features = epc_features;
ret = pci_epc_write_header(epc, epf->func_no, header);
if (ret) {
@@ -492,13 +524,15 @@ static int pci_epf_test_bind(struct pci_epf *epf)
if (ret)
return ret;
- ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
- if (ret) {
- dev_err(dev, "MSI configuration failed\n");
- return ret;
+ if (msi_capable) {
+ ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
+ if (ret) {
+ dev_err(dev, "MSI configuration failed\n");
+ return ret;
+ }
}
- if (epf_test->msix_available) {
+ if (msix_capable) {
ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts);
if (ret) {
dev_err(dev, "MSI-X configuration failed\n");
@@ -506,7 +540,7 @@ static int pci_epf_test_bind(struct pci_epf *epf)
}
}
- if (!epf_test->linkup_notifier)
+ if (!linkup_notifier)
queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
return 0;
@@ -523,17 +557,6 @@ static int pci_epf_test_probe(struct pci_epf *epf)
{
struct pci_epf_test *epf_test;
struct device *dev = &epf->dev;
- const struct pci_epf_device_id *match;
- struct pci_epf_test_data *data;
- enum pci_barno test_reg_bar = BAR_0;
- bool linkup_notifier = true;
-
- match = pci_epf_match_device(pci_epf_test_ids, epf);
- data = (struct pci_epf_test_data *)match->driver_data;
- if (data) {
- test_reg_bar = data->test_reg_bar;
- linkup_notifier = data->linkup_notifier;
- }
epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
if (!epf_test)
@@ -541,8 +564,6 @@ static int pci_epf_test_probe(struct pci_epf *epf)
epf->header = &test_header;
epf_test->epf = epf;
- epf_test->test_reg_bar = test_reg_bar;
- epf_test->linkup_notifier = linkup_notifier;
INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index 094dcc3203b8..e4712a0f249c 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -84,6 +84,59 @@ err:
EXPORT_SYMBOL_GPL(pci_epc_get);
/**
+ * pci_epc_get_first_free_bar() - helper to get first unreserved BAR
+ * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
+ *
+ * Invoke to get the first unreserved BAR that can be used for endpoint
+ * function. For any incorrect value in reserved_bar return '0'.
+ */
+unsigned int pci_epc_get_first_free_bar(const struct pci_epc_features
+ *epc_features)
+{
+ int free_bar;
+
+ if (!epc_features)
+ return 0;
+
+ free_bar = ffz(epc_features->reserved_bar);
+ if (free_bar > 5)
+ return 0;
+
+ return free_bar;
+}
+EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar);
+
+/**
+ * pci_epc_get_features() - get the features supported by EPC
+ * @epc: the features supported by *this* EPC device will be returned
+ * @func_no: the features supported by the EPC device specific to the
+ * endpoint function with func_no will be returned
+ *
+ * Invoke to get the features provided by the EPC which may be
+ * specific to an endpoint function. Returns pci_epc_features on success
+ * and NULL for any failures.
+ */
+const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
+ u8 func_no)
+{
+ const struct pci_epc_features *epc_features;
+ unsigned long flags;
+
+ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+ return NULL;
+
+ if (!epc->ops->get_features)
+ return NULL;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ epc_features = epc->ops->get_features(epc, func_no);
+ spin_unlock_irqrestore(&epc->lock, flags);
+
+ return epc_features;
+}
+EXPORT_SYMBOL_GPL(pci_epc_get_features);
+
+/**
* pci_epc_stop() - stop the PCI link
* @epc: the link of the EPC device that has to be stopped
*
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 825fa24427a3..8bfdcd291196 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -131,7 +131,9 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar)
epf->bar[bar].phys_addr = phys_addr;
epf->bar[bar].size = size;
epf->bar[bar].barno = bar;
- epf->bar[bar].flags = PCI_BASE_ADDRESS_SPACE_MEMORY;
+ epf->bar[bar].flags |= upper_32_bits(size) ?
+ PCI_BASE_ADDRESS_MEM_TYPE_64 :
+ PCI_BASE_ADDRESS_MEM_TYPE_32;
return space;
}
diff --git a/drivers/pci/hotplug/ibmphp.h b/drivers/pci/hotplug/ibmphp.h
index b89f850c3a4e..e90a4ebf6550 100644
--- a/drivers/pci/hotplug/ibmphp.h
+++ b/drivers/pci/hotplug/ibmphp.h
@@ -378,7 +378,6 @@ int ibmphp_add_pfmem_from_mem(struct resource_node *);
struct bus_node *ibmphp_find_res_bus(u8);
void ibmphp_print_test(void); /* for debugging purposes */
-void ibmphp_hpc_initvars(void);
int ibmphp_hpc_readslot(struct slot *, u8, u8 *);
int ibmphp_hpc_writeslot(struct slot *, u8);
void ibmphp_lock_operations(void);
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index 08a58e911fc2..17124254d897 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -1277,8 +1277,6 @@ static int __init ibmphp_init(void)
ibmphp_debug = debug;
- ibmphp_hpc_initvars();
-
for (i = 0; i < 16; i++)
irqs[i] = 0;
diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c
index 752c384cbd4c..508a62a6b5f9 100644
--- a/drivers/pci/hotplug/ibmphp_hpc.c
+++ b/drivers/pci/hotplug/ibmphp_hpc.c
@@ -15,13 +15,13 @@
#include <linux/wait.h>
#include <linux/time.h>
+#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/sched.h>
-#include <linux/semaphore.h>
#include <linux/kthread.h>
#include "ibmphp.h"
@@ -88,10 +88,10 @@ static int to_debug = 0;
//----------------------------------------------------------------------------
// global variables
//----------------------------------------------------------------------------
-static struct mutex sem_hpcaccess; // lock access to HPC
-static struct semaphore semOperations; // lock all operations and
+static DEFINE_MUTEX(sem_hpcaccess); // lock access to HPC
+static DEFINE_MUTEX(operations_mutex); // lock all operations and
// access to data structures
-static struct semaphore sem_exit; // make sure polling thread goes away
+static DECLARE_COMPLETION(exit_complete); // make sure polling thread goes away
static struct task_struct *ibmphp_poll_thread;
//----------------------------------------------------------------------------
// local function prototypes
@@ -110,23 +110,6 @@ static int hpc_wait_ctlr_notworking(int, struct controller *, void __iomem *, u8
/*----------------------------------------------------------------------
-* Name: ibmphp_hpc_initvars
-*
-* Action: initialize semaphores and variables
-*---------------------------------------------------------------------*/
-void __init ibmphp_hpc_initvars(void)
-{
- debug("%s - Entry\n", __func__);
-
- mutex_init(&sem_hpcaccess);
- sema_init(&semOperations, 1);
- sema_init(&sem_exit, 0);
- to_debug = 0;
-
- debug("%s - Exit\n", __func__);
-}
-
-/*----------------------------------------------------------------------
* Name: i2c_ctrl_read
*
* Action: read from HPC over I2C
@@ -780,7 +763,7 @@ void free_hpc_access(void)
*---------------------------------------------------------------------*/
void ibmphp_lock_operations(void)
{
- down(&semOperations);
+ mutex_lock(&operations_mutex);
to_debug = 1;
}
@@ -790,7 +773,7 @@ void ibmphp_lock_operations(void)
void ibmphp_unlock_operations(void)
{
debug("%s - Entry\n", __func__);
- up(&semOperations);
+ mutex_unlock(&operations_mutex);
to_debug = 0;
debug("%s - Exit\n", __func__);
}
@@ -816,7 +799,7 @@ static int poll_hpc(void *data)
while (!kthread_should_stop()) {
/* try to get the lock to do some kind of hardware access */
- down(&semOperations);
+ mutex_lock(&operations_mutex);
switch (poll_state) {
case POLL_LATCH_REGISTER:
@@ -871,13 +854,13 @@ static int poll_hpc(void *data)
break;
case POLL_SLEEP:
/* don't sleep with a lock on the hardware */
- up(&semOperations);
+ mutex_unlock(&operations_mutex);
msleep(POLL_INTERVAL_SEC * 1000);
if (kthread_should_stop())
goto out_sleep;
- down(&semOperations);
+ mutex_lock(&operations_mutex);
if (poll_count >= POLL_LATCH_CNT) {
poll_count = 0;
@@ -887,12 +870,12 @@ static int poll_hpc(void *data)
break;
}
/* give up the hardware semaphore */
- up(&semOperations);
+ mutex_unlock(&operations_mutex);
/* sleep for a short time just for good measure */
out_sleep:
msleep(100);
}
- up(&sem_exit);
+ complete(&exit_complete);
debug("%s - Exit\n", __func__);
return 0;
}
@@ -1060,9 +1043,9 @@ void __exit ibmphp_hpc_stop_poll_thread(void)
debug("after locking operations\n");
// wait for poll thread to exit
- debug("before sem_exit down\n");
- down(&sem_exit);
- debug("after sem_exit down\n");
+ debug("before exit_complete down\n");
+ wait_for_completion(&exit_complete);
+ debug("after exit_completion down\n");
// cleanup
debug("before free_hpc_access\n");
@@ -1070,8 +1053,6 @@ void __exit ibmphp_hpc_stop_poll_thread(void)
debug("after free_hpc_access\n");
ibmphp_unlock_operations();
debug("after unlock operations\n");
- up(&sem_exit);
- debug("after sem exit up\n");
debug("%s - Exit\n", __func__);
}
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 3f3df4c29f6e..905282a8ddaa 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -115,6 +115,10 @@ static void remove_board(struct controller *ctrl, bool safe_removal)
* removed from the slot/adapter.
*/
msleep(1000);
+
+ /* Ignore link or presence changes caused by power off */
+ atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC),
+ &ctrl->pending_events);
}
/* turn off Green LED */
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 7dd443aea5a5..6a2365cd794e 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -156,9 +156,9 @@ static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
slot_ctrl |= (cmd & mask);
ctrl->cmd_busy = 1;
smp_mb();
+ ctrl->slot_ctrl = slot_ctrl;
pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl);
ctrl->cmd_started = jiffies;
- ctrl->slot_ctrl = slot_ctrl;
/*
* Controllers with the Intel CF118 and similar errata advertise
@@ -736,12 +736,25 @@ void pcie_clear_hotplug_events(struct controller *ctrl)
void pcie_enable_interrupt(struct controller *ctrl)
{
- pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_HPIE, PCI_EXP_SLTCTL_HPIE);
+ u16 mask;
+
+ mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE;
+ pcie_write_cmd(ctrl, mask, mask);
}
void pcie_disable_interrupt(struct controller *ctrl)
{
- pcie_write_cmd(ctrl, 0, PCI_EXP_SLTCTL_HPIE);
+ u16 mask;
+
+ /*
+ * Mask hot-plug interrupt to prevent it triggering immediately
+ * when the link goes inactive (we still get PME when any of the
+ * enabled events is detected). Same goes with Link Layer State
+ * changed event which generates PME immediately when the link goes
+ * inactive so mask it as well.
+ */
+ mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE;
+ pcie_write_cmd(ctrl, 0, mask);
}
/*
@@ -920,3 +933,5 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0400,
PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0401,
PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_HXT, 0x0401,
+ PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index 4c4217d0c3f1..3d32da15c215 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -113,7 +113,7 @@ struct device_node *of_pci_find_child_device(struct device_node *parent,
* a fake root for all functions of a multi-function
* device we go down them as well.
*/
- if (!strcmp(node->name, "multifunc-device")) {
+ if (of_node_name_eq(node, "multifunc-device")) {
for_each_child_of_node(node, node2) {
if (__of_pci_pci_compare(node2, devfn)) {
of_node_put(node);
diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
index 129738362d90..83fb077d0b41 100644
--- a/drivers/pci/pci-bridge-emul.c
+++ b/drivers/pci/pci-bridge-emul.c
@@ -24,29 +24,6 @@
#define PCI_CAP_PCIE_START PCI_BRIDGE_CONF_END
#define PCI_CAP_PCIE_END (PCI_CAP_PCIE_START + PCI_EXP_SLTSTA2 + 2)
-/*
- * Initialize a pci_bridge_emul structure to represent a fake PCI
- * bridge configuration space. The caller needs to have initialized
- * the PCI configuration space with whatever values make sense
- * (typically at least vendor, device, revision), the ->ops pointer,
- * and optionally ->data and ->has_pcie.
- */
-void pci_bridge_emul_init(struct pci_bridge_emul *bridge)
-{
- bridge->conf.class_revision |= PCI_CLASS_BRIDGE_PCI << 16;
- bridge->conf.header_type = PCI_HEADER_TYPE_BRIDGE;
- bridge->conf.cache_line_size = 0x10;
- bridge->conf.status = PCI_STATUS_CAP_LIST;
-
- if (bridge->has_pcie) {
- bridge->conf.capabilities_pointer = PCI_CAP_PCIE_START;
- bridge->pcie_conf.cap_id = PCI_CAP_ID_EXP;
- /* Set PCIe v2, root port, slot support */
- bridge->pcie_conf.cap = PCI_EXP_TYPE_ROOT_PORT << 4 | 2 |
- PCI_EXP_FLAGS_SLOT;
- }
-}
-
struct pci_bridge_reg_behavior {
/* Read-only bits */
u32 ro;
@@ -284,6 +261,61 @@ const static struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
};
/*
+ * Initialize a pci_bridge_emul structure to represent a fake PCI
+ * bridge configuration space. The caller needs to have initialized
+ * the PCI configuration space with whatever values make sense
+ * (typically at least vendor, device, revision), the ->ops pointer,
+ * and optionally ->data and ->has_pcie.
+ */
+int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
+ unsigned int flags)
+{
+ bridge->conf.class_revision |= PCI_CLASS_BRIDGE_PCI << 16;
+ bridge->conf.header_type = PCI_HEADER_TYPE_BRIDGE;
+ bridge->conf.cache_line_size = 0x10;
+ bridge->conf.status = PCI_STATUS_CAP_LIST;
+ bridge->pci_regs_behavior = kmemdup(pci_regs_behavior,
+ sizeof(pci_regs_behavior),
+ GFP_KERNEL);
+ if (!bridge->pci_regs_behavior)
+ return -ENOMEM;
+
+ if (bridge->has_pcie) {
+ bridge->conf.capabilities_pointer = PCI_CAP_PCIE_START;
+ bridge->pcie_conf.cap_id = PCI_CAP_ID_EXP;
+ /* Set PCIe v2, root port, slot support */
+ bridge->pcie_conf.cap = PCI_EXP_TYPE_ROOT_PORT << 4 | 2 |
+ PCI_EXP_FLAGS_SLOT;
+ bridge->pcie_cap_regs_behavior =
+ kmemdup(pcie_cap_regs_behavior,
+ sizeof(pcie_cap_regs_behavior),
+ GFP_KERNEL);
+ if (!bridge->pcie_cap_regs_behavior) {
+ kfree(bridge->pci_regs_behavior);
+ return -ENOMEM;
+ }
+ }
+
+ if (flags & PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR) {
+ bridge->pci_regs_behavior[PCI_PREF_MEMORY_BASE / 4].ro = ~0;
+ bridge->pci_regs_behavior[PCI_PREF_MEMORY_BASE / 4].rw = 0;
+ }
+
+ return 0;
+}
+
+/*
+ * Cleanup a pci_bridge_emul structure that was previously initilized
+ * using pci_bridge_emul_init().
+ */
+void pci_bridge_emul_cleanup(struct pci_bridge_emul *bridge)
+{
+ if (bridge->has_pcie)
+ kfree(bridge->pcie_cap_regs_behavior);
+ kfree(bridge->pci_regs_behavior);
+}
+
+/*
* Should be called by the PCI controller driver when reading the PCI
* configuration space of the fake bridge. It will call back the
* ->ops->read_base or ->ops->read_pcie operations.
@@ -312,11 +344,11 @@ int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where,
reg -= PCI_CAP_PCIE_START;
read_op = bridge->ops->read_pcie;
cfgspace = (u32 *) &bridge->pcie_conf;
- behavior = pcie_cap_regs_behavior;
+ behavior = bridge->pcie_cap_regs_behavior;
} else {
read_op = bridge->ops->read_base;
cfgspace = (u32 *) &bridge->conf;
- behavior = pci_regs_behavior;
+ behavior = bridge->pci_regs_behavior;
}
if (read_op)
@@ -383,11 +415,11 @@ int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where,
reg -= PCI_CAP_PCIE_START;
write_op = bridge->ops->write_pcie;
cfgspace = (u32 *) &bridge->pcie_conf;
- behavior = pcie_cap_regs_behavior;
+ behavior = bridge->pcie_cap_regs_behavior;
} else {
write_op = bridge->ops->write_base;
cfgspace = (u32 *) &bridge->conf;
- behavior = pci_regs_behavior;
+ behavior = bridge->pci_regs_behavior;
}
/* Keep all bits, except the RW bits */
diff --git a/drivers/pci/pci-bridge-emul.h b/drivers/pci/pci-bridge-emul.h
index 9d510ccf738b..e65b1b79899d 100644
--- a/drivers/pci/pci-bridge-emul.h
+++ b/drivers/pci/pci-bridge-emul.h
@@ -107,15 +107,26 @@ struct pci_bridge_emul_ops {
u32 old, u32 new, u32 mask);
};
+struct pci_bridge_reg_behavior;
+
struct pci_bridge_emul {
struct pci_bridge_emul_conf conf;
struct pci_bridge_emul_pcie_conf pcie_conf;
struct pci_bridge_emul_ops *ops;
+ struct pci_bridge_reg_behavior *pci_regs_behavior;
+ struct pci_bridge_reg_behavior *pcie_cap_regs_behavior;
void *data;
bool has_pcie;
};
-void pci_bridge_emul_init(struct pci_bridge_emul *bridge);
+enum {
+ PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR = BIT(0),
+};
+
+int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
+ unsigned int flags);
+void pci_bridge_emul_cleanup(struct pci_bridge_emul *bridge);
+
int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where,
int size, u32 *value);
int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where,
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 11a877461584..cae630fe6387 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -100,7 +100,7 @@ static ssize_t new_id_store(struct device_driver *driver, const char *buf,
{
struct pci_driver *pdrv = to_pci_driver(driver);
const struct pci_device_id *ids = pdrv->id_table;
- __u32 vendor, device, subvendor = PCI_ANY_ID,
+ u32 vendor, device, subvendor = PCI_ANY_ID,
subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
unsigned long driver_data = 0;
int fields = 0;
@@ -168,7 +168,7 @@ static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
{
struct pci_dynid *dynid, *n;
struct pci_driver *pdrv = to_pci_driver(driver);
- __u32 vendor, device, subvendor = PCI_ANY_ID,
+ u32 vendor, device, subvendor = PCI_ANY_ID,
subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
int fields = 0;
size_t retval = -ENODEV;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index db55acf32a7e..766f5779db92 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -861,7 +861,7 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
&& !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
need_restore = true;
- /* Fall-through: force to D0 */
+ /* Fall-through - force to D0 */
default:
pmcsr = 0;
break;
@@ -1233,7 +1233,6 @@ static void pci_restore_pcie_state(struct pci_dev *dev)
pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
}
-
static int pci_save_pcix_state(struct pci_dev *dev)
{
int pos;
@@ -1270,6 +1269,45 @@ static void pci_restore_pcix_state(struct pci_dev *dev)
pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
}
+static void pci_save_ltr_state(struct pci_dev *dev)
+{
+ int ltr;
+ struct pci_cap_saved_state *save_state;
+ u16 *cap;
+
+ if (!pci_is_pcie(dev))
+ return;
+
+ ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
+ if (!ltr)
+ return;
+
+ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
+ if (!save_state) {
+ pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
+ return;
+ }
+
+ cap = (u16 *)&save_state->cap.data[0];
+ pci_read_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap++);
+ pci_read_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, cap++);
+}
+
+static void pci_restore_ltr_state(struct pci_dev *dev)
+{
+ struct pci_cap_saved_state *save_state;
+ int ltr;
+ u16 *cap;
+
+ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
+ ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
+ if (!save_state || !ltr)
+ return;
+
+ cap = (u16 *)&save_state->cap.data[0];
+ pci_write_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap++);
+ pci_write_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, *cap++);
+}
/**
* pci_save_state - save the PCI configuration space of a device before suspending
@@ -1291,6 +1329,7 @@ int pci_save_state(struct pci_dev *dev)
if (i != 0)
return i;
+ pci_save_ltr_state(dev);
pci_save_dpc_state(dev);
return pci_save_vc_state(dev);
}
@@ -1390,7 +1429,12 @@ void pci_restore_state(struct pci_dev *dev)
if (!dev->state_saved)
return;
- /* PCI Express register must be restored first */
+ /*
+ * Restore max latencies (in the LTR capability) before enabling
+ * LTR itself (in the PCIe capability).
+ */
+ pci_restore_ltr_state(dev);
+
pci_restore_pcie_state(dev);
pci_restore_pasid_state(dev);
pci_restore_pri_state(dev);
@@ -2260,7 +2304,7 @@ static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
case PCI_D2:
if (pci_no_d1d2(dev))
break;
- /* else: fall through */
+ /* else, fall through */
default:
target_state = state;
}
@@ -2501,6 +2545,25 @@ void pci_config_pm_runtime_put(struct pci_dev *pdev)
pm_runtime_put_sync(parent);
}
+static const struct dmi_system_id bridge_d3_blacklist[] = {
+#ifdef CONFIG_X86
+ {
+ /*
+ * Gigabyte X299 root port is not marked as hotplug capable
+ * which allows Linux to power manage it. However, this
+ * confuses the BIOS SMI handler so don't power manage root
+ * ports on that system.
+ */
+ .ident = "X299 DESIGNARE EX-CF",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+ DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
+ },
+ },
+#endif
+ { }
+};
+
/**
* pci_bridge_d3_possible - Is it possible to put the bridge into D3
* @bridge: Bridge to check
@@ -2546,6 +2609,9 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge)
if (bridge->is_hotplug_bridge)
return false;
+ if (dmi_check_system(bridge_d3_blacklist))
+ return false;
+
/*
* It should be safe to put PCIe ports from 2015 or newer
* to D3.
@@ -2998,6 +3064,11 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
if (error)
pci_err(dev, "unable to preallocate PCI-X save buffer\n");
+ error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
+ 2 * sizeof(u16));
+ if (error)
+ pci_err(dev, "unable to allocate suspend buffer for LTR\n");
+
pci_allocate_vc_save_buffers(dev);
}
@@ -5058,39 +5129,42 @@ unlock:
return 0;
}
-/* Save and disable devices from the top of the tree down */
-static void pci_bus_save_and_disable(struct pci_bus *bus)
+/*
+ * Save and disable devices from the top of the tree down while holding
+ * the @dev mutex lock for the entire tree.
+ */
+static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
{
struct pci_dev *dev;
list_for_each_entry(dev, &bus->devices, bus_list) {
- pci_dev_lock(dev);
pci_dev_save_and_disable(dev);
- pci_dev_unlock(dev);
if (dev->subordinate)
- pci_bus_save_and_disable(dev->subordinate);
+ pci_bus_save_and_disable_locked(dev->subordinate);
}
}
/*
- * Restore devices from top of the tree down - parent bridges need to be
- * restored before we can get to subordinate devices.
+ * Restore devices from top of the tree down while holding @dev mutex lock
+ * for the entire tree. Parent bridges need to be restored before we can
+ * get to subordinate devices.
*/
-static void pci_bus_restore(struct pci_bus *bus)
+static void pci_bus_restore_locked(struct pci_bus *bus)
{
struct pci_dev *dev;
list_for_each_entry(dev, &bus->devices, bus_list) {
- pci_dev_lock(dev);
pci_dev_restore(dev);
- pci_dev_unlock(dev);
if (dev->subordinate)
- pci_bus_restore(dev->subordinate);
+ pci_bus_restore_locked(dev->subordinate);
}
}
-/* Save and disable devices from the top of the tree down */
-static void pci_slot_save_and_disable(struct pci_slot *slot)
+/*
+ * Save and disable devices from the top of the tree down while holding
+ * the @dev mutex lock for the entire tree.
+ */
+static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
{
struct pci_dev *dev;
@@ -5099,26 +5173,25 @@ static void pci_slot_save_and_disable(struct pci_slot *slot)
continue;
pci_dev_save_and_disable(dev);
if (dev->subordinate)
- pci_bus_save_and_disable(dev->subordinate);
+ pci_bus_save_and_disable_locked(dev->subordinate);
}
}
/*
- * Restore devices from top of the tree down - parent bridges need to be
- * restored before we can get to subordinate devices.
+ * Restore devices from top of the tree down while holding @dev mutex lock
+ * for the entire tree. Parent bridges need to be restored before we can
+ * get to subordinate devices.
*/
-static void pci_slot_restore(struct pci_slot *slot)
+static void pci_slot_restore_locked(struct pci_slot *slot)
{
struct pci_dev *dev;
list_for_each_entry(dev, &slot->bus->devices, bus_list) {
if (!dev->slot || dev->slot != slot)
continue;
- pci_dev_lock(dev);
pci_dev_restore(dev);
- pci_dev_unlock(dev);
if (dev->subordinate)
- pci_bus_restore(dev->subordinate);
+ pci_bus_restore_locked(dev->subordinate);
}
}
@@ -5177,17 +5250,15 @@ static int __pci_reset_slot(struct pci_slot *slot)
if (rc)
return rc;
- pci_slot_save_and_disable(slot);
-
if (pci_slot_trylock(slot)) {
+ pci_slot_save_and_disable_locked(slot);
might_sleep();
rc = pci_reset_hotplug_slot(slot->hotplug, 0);
+ pci_slot_restore_locked(slot);
pci_slot_unlock(slot);
} else
rc = -EAGAIN;
- pci_slot_restore(slot);
-
return rc;
}
@@ -5273,17 +5344,15 @@ static int __pci_reset_bus(struct pci_bus *bus)
if (rc)
return rc;
- pci_bus_save_and_disable(bus);
-
if (pci_bus_trylock(bus)) {
+ pci_bus_save_and_disable_locked(bus);
might_sleep();
rc = pci_bridge_secondary_bus_reset(bus->self);
+ pci_bus_restore_locked(bus);
pci_bus_unlock(bus);
} else
rc = -EAGAIN;
- pci_bus_restore(bus);
-
return rc;
}
@@ -6000,8 +6069,7 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev)
* to enable the kernel to reassign new resource
* window later on.
*/
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
- (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
r = &dev->resource[i];
if (!(r->flags & IORESOURCE_MEM))
@@ -6194,8 +6262,7 @@ static int __init pci_setup(char *str)
} else if (!strncmp(str, "pcie_scan_all", 13)) {
pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
} else if (!strncmp(str, "disable_acs_redir=", 18)) {
- disable_acs_redir_param =
- kstrdup(str + 18, GFP_KERNEL);
+ disable_acs_redir_param = str + 18;
} else {
printk(KERN_ERR "PCI: Unknown option `%s'\n",
str);
@@ -6206,3 +6273,19 @@ static int __init pci_setup(char *str)
return 0;
}
early_param("pci", pci_setup);
+
+/*
+ * 'disable_acs_redir_param' is initialized in pci_setup(), above, to point
+ * to data in the __initdata section which will be freed after the init
+ * sequence is complete. We can't allocate memory in pci_setup() because some
+ * architectures do not have any memory allocation service available during
+ * an early_param() call. So we allocate memory and copy the variable here
+ * before the init section is freed.
+ */
+static int __init pci_realloc_setup_params(void)
+{
+ disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
+
+ return 0;
+}
+pure_initcall(pci_realloc_setup_params);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 224d88634115..d994839a3e24 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -273,6 +273,7 @@ enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
enum pcie_link_width *width);
void __pcie_print_link_status(struct pci_dev *dev, bool verbose);
+void pcie_report_downtraining(struct pci_dev *dev);
/* Single Root I/O Virtualization */
struct pci_sriov {
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 44742b2e1126..362eb8cfa53b 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -6,10 +6,9 @@ config PCIEPORTBUS
bool "PCI Express Port Bus support"
depends on PCI
help
- This automatically enables PCI Express Port Bus support. Users can
- choose Native Hot-Plug support, Advanced Error Reporting support,
- Power Management Event support and Virtual Channel support to run
- on PCI Express Ports (Root or Switch).
+ This enables PCI Express Port Bus support. Users can then enable
+ support for Native Hot-Plug, Advanced Error Reporting, Power
+ Management Events, and Downstream Port Containment.
#
# Include service Kconfig here
@@ -143,3 +142,11 @@ config PCIE_PTM
This is only useful if you have devices that support PTM, but it
is safe to enable even if you don't.
+
+config PCIE_BW
+ bool "PCI Express Bandwidth Change Notification"
+ depends on PCIEPORTBUS
+ help
+ This enables PCI Express Bandwidth Change Notification. If
+ you know link width or rate changes occur only to correct
+ unreliable links, you may answer Y.
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index ab514083d5d4..efb9d2e71e9e 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_PCIEAER_INJECT) += aer_inject.o
obj-$(CONFIG_PCIE_PME) += pme.o
obj-$(CONFIG_PCIE_DPC) += dpc.o
obj-$(CONFIG_PCIE_PTM) += ptm.o
+obj-$(CONFIG_PCIE_BW) += bw_notification.o
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index fed29de783e0..f8fc2114ad39 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -117,7 +117,7 @@ bool pci_aer_available(void)
static int ecrc_policy = ECRC_POLICY_DEFAULT;
-static const char *ecrc_policy_str[] = {
+static const char * const ecrc_policy_str[] = {
[ECRC_POLICY_DEFAULT] = "bios",
[ECRC_POLICY_OFF] = "off",
[ECRC_POLICY_ON] = "on"
@@ -203,11 +203,8 @@ void pcie_ecrc_get_policy(char *str)
{
int i;
- for (i = 0; i < ARRAY_SIZE(ecrc_policy_str); i++)
- if (!strncmp(str, ecrc_policy_str[i],
- strlen(ecrc_policy_str[i])))
- break;
- if (i >= ARRAY_SIZE(ecrc_policy_str))
+ i = match_string(ecrc_policy_str, ARRAY_SIZE(ecrc_policy_str), str);
+ if (i < 0)
return;
ecrc_policy = i;
diff --git a/drivers/pci/pcie/bw_notification.c b/drivers/pci/pcie/bw_notification.c
new file mode 100644
index 000000000000..4fa9e3523ee1
--- /dev/null
+++ b/drivers/pci/pcie/bw_notification.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PCI Express Link Bandwidth Notification services driver
+ * Author: Alexandru Gagniuc <mr.nuke.me@gmail.com>
+ *
+ * Copyright (C) 2019, Dell Inc
+ *
+ * The PCIe Link Bandwidth Notification provides a way to notify the
+ * operating system when the link width or data rate changes. This
+ * capability is required for all root ports and downstream ports
+ * supporting links wider than x1 and/or multiple link speeds.
+ *
+ * This service port driver hooks into the bandwidth notification interrupt
+ * and warns when links become degraded in operation.
+ */
+
+#include "../pci.h"
+#include "portdrv.h"
+
+static bool pcie_link_bandwidth_notification_supported(struct pci_dev *dev)
+{
+ int ret;
+ u32 lnk_cap;
+
+ ret = pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnk_cap);
+ return (ret == PCIBIOS_SUCCESSFUL) && (lnk_cap & PCI_EXP_LNKCAP_LBNC);
+}
+
+static void pcie_enable_link_bandwidth_notification(struct pci_dev *dev)
+{
+ u16 lnk_ctl;
+
+ pcie_capability_write_word(dev, PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_LBMS);
+
+ pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnk_ctl);
+ lnk_ctl |= PCI_EXP_LNKCTL_LBMIE;
+ pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl);
+}
+
+static void pcie_disable_link_bandwidth_notification(struct pci_dev *dev)
+{
+ u16 lnk_ctl;
+
+ pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnk_ctl);
+ lnk_ctl &= ~PCI_EXP_LNKCTL_LBMIE;
+ pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl);
+}
+
+static irqreturn_t pcie_bw_notification_irq(int irq, void *context)
+{
+ struct pcie_device *srv = context;
+ struct pci_dev *port = srv->port;
+ u16 link_status, events;
+ int ret;
+
+ ret = pcie_capability_read_word(port, PCI_EXP_LNKSTA, &link_status);
+ events = link_status & PCI_EXP_LNKSTA_LBMS;
+
+ if (ret != PCIBIOS_SUCCESSFUL || !events)
+ return IRQ_NONE;
+
+ pcie_capability_write_word(port, PCI_EXP_LNKSTA, events);
+ pcie_update_link_speed(port->subordinate, link_status);
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t pcie_bw_notification_handler(int irq, void *context)
+{
+ struct pcie_device *srv = context;
+ struct pci_dev *port = srv->port;
+ struct pci_dev *dev;
+
+ /*
+ * Print status from downstream devices, not this root port or
+ * downstream switch port.
+ */
+ down_read(&pci_bus_sem);
+ list_for_each_entry(dev, &port->subordinate->devices, bus_list)
+ pcie_report_downtraining(dev);
+ up_read(&pci_bus_sem);
+
+ return IRQ_HANDLED;
+}
+
+static int pcie_bandwidth_notification_probe(struct pcie_device *srv)
+{
+ int ret;
+
+ /* Single-width or single-speed ports do not have to support this. */
+ if (!pcie_link_bandwidth_notification_supported(srv->port))
+ return -ENODEV;
+
+ ret = request_threaded_irq(srv->irq, pcie_bw_notification_irq,
+ pcie_bw_notification_handler,
+ IRQF_SHARED, "PCIe BW notif", srv);
+ if (ret)
+ return ret;
+
+ pcie_enable_link_bandwidth_notification(srv->port);
+
+ return 0;
+}
+
+static void pcie_bandwidth_notification_remove(struct pcie_device *srv)
+{
+ pcie_disable_link_bandwidth_notification(srv->port);
+ free_irq(srv->irq, srv);
+}
+
+static struct pcie_port_service_driver pcie_bandwidth_notification_driver = {
+ .name = "pcie_bw_notification",
+ .port_type = PCIE_ANY_PORT,
+ .service = PCIE_PORT_SERVICE_BWNOTIF,
+ .probe = pcie_bandwidth_notification_probe,
+ .remove = pcie_bandwidth_notification_remove,
+};
+
+int __init pcie_bandwidth_notification_init(void)
+{
+ return pcie_port_service_register(&pcie_bandwidth_notification_driver);
+}
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index e435d12e61a0..7b77754a82de 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -202,6 +202,28 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, status);
}
+static int dpc_get_aer_uncorrect_severity(struct pci_dev *dev,
+ struct aer_err_info *info)
+{
+ int pos = dev->aer_cap;
+ u32 status, mask, sev;
+
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask);
+ status &= ~mask;
+ if (!status)
+ return 0;
+
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev);
+ status &= sev;
+ if (status)
+ info->severity = AER_FATAL;
+ else
+ info->severity = AER_NONFATAL;
+
+ return 1;
+}
+
static irqreturn_t dpc_handler(int irq, void *context)
{
struct aer_err_info info;
@@ -229,9 +251,12 @@ static irqreturn_t dpc_handler(int irq, void *context)
/* show RP PIO error detail information */
if (dpc->rp_extensions && reason == 3 && ext_reason == 0)
dpc_process_rp_pio_error(dpc);
- else if (reason == 0 && aer_get_device_error_info(pdev, &info)) {
+ else if (reason == 0 &&
+ dpc_get_aer_uncorrect_severity(pdev, &info) &&
+ aer_get_device_error_info(pdev, &info)) {
aer_print_error(pdev, &info);
pci_cleanup_aer_uncorrect_error_status(pdev);
+ pci_aer_clear_fatal_status(pdev);
}
/* We configure DPC so it only triggers on ERR_FATAL */
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 0dbcf429089f..54d593d10396 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -363,6 +363,16 @@ static bool pcie_pme_check_wakeup(struct pci_bus *bus)
return false;
}
+static void pcie_pme_disable_interrupt(struct pci_dev *port,
+ struct pcie_pme_service_data *data)
+{
+ spin_lock_irq(&data->lock);
+ pcie_pme_interrupt_enable(port, false);
+ pcie_clear_root_pme_status(port);
+ data->noirq = true;
+ spin_unlock_irq(&data->lock);
+}
+
/**
* pcie_pme_suspend - Suspend PCIe PME service device.
* @srv: PCIe service device to suspend.
@@ -387,11 +397,7 @@ static int pcie_pme_suspend(struct pcie_device *srv)
return 0;
}
- spin_lock_irq(&data->lock);
- pcie_pme_interrupt_enable(port, false);
- pcie_clear_root_pme_status(port);
- data->noirq = true;
- spin_unlock_irq(&data->lock);
+ pcie_pme_disable_interrupt(port, data);
synchronize_irq(srv->irq);
@@ -427,34 +433,12 @@ static int pcie_pme_resume(struct pcie_device *srv)
*/
static void pcie_pme_remove(struct pcie_device *srv)
{
- pcie_pme_suspend(srv);
- free_irq(srv->irq, srv);
- kfree(get_service_data(srv));
-}
-
-static int pcie_pme_runtime_suspend(struct pcie_device *srv)
-{
- struct pcie_pme_service_data *data = get_service_data(srv);
-
- spin_lock_irq(&data->lock);
- pcie_pme_interrupt_enable(srv->port, false);
- pcie_clear_root_pme_status(srv->port);
- data->noirq = true;
- spin_unlock_irq(&data->lock);
-
- return 0;
-}
-
-static int pcie_pme_runtime_resume(struct pcie_device *srv)
-{
struct pcie_pme_service_data *data = get_service_data(srv);
- spin_lock_irq(&data->lock);
- pcie_pme_interrupt_enable(srv->port, true);
- data->noirq = false;
- spin_unlock_irq(&data->lock);
-
- return 0;
+ pcie_pme_disable_interrupt(srv->port, data);
+ free_irq(srv->irq, srv);
+ cancel_work_sync(&data->work);
+ kfree(data);
}
static struct pcie_port_service_driver pcie_pme_driver = {
@@ -464,8 +448,6 @@ static struct pcie_port_service_driver pcie_pme_driver = {
.probe = pcie_pme_probe,
.suspend = pcie_pme_suspend,
- .runtime_suspend = pcie_pme_runtime_suspend,
- .runtime_resume = pcie_pme_runtime_resume,
.resume = pcie_pme_resume,
.remove = pcie_pme_remove,
};
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index fbbf00b0992e..944827a8c7d3 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -20,8 +20,10 @@
#define PCIE_PORT_SERVICE_HP (1 << PCIE_PORT_SERVICE_HP_SHIFT)
#define PCIE_PORT_SERVICE_DPC_SHIFT 3 /* Downstream Port Containment */
#define PCIE_PORT_SERVICE_DPC (1 << PCIE_PORT_SERVICE_DPC_SHIFT)
+#define PCIE_PORT_SERVICE_BWNOTIF_SHIFT 4 /* Bandwidth notification */
+#define PCIE_PORT_SERVICE_BWNOTIF (1 << PCIE_PORT_SERVICE_BWNOTIF_SHIFT)
-#define PCIE_PORT_DEVICE_MAXSERVICES 4
+#define PCIE_PORT_DEVICE_MAXSERVICES 5
#ifdef CONFIG_PCIEAER
int pcie_aer_init(void);
@@ -47,6 +49,12 @@ int pcie_dpc_init(void);
static inline int pcie_dpc_init(void) { return 0; }
#endif
+#ifdef CONFIG_PCIE_BW
+int pcie_bandwidth_notification_init(void);
+#else
+static inline int pcie_bandwidth_notification_init(void) { return 0; }
+#endif
+
/* Port Type */
#define PCIE_ANY_PORT (~0)
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index f458ac9cb70c..1b330129089f 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -55,7 +55,8 @@ static int pcie_message_numbers(struct pci_dev *dev, int mask,
* 7.8.2, 7.10.10, 7.31.2.
*/
- if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP)) {
+ if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP |
+ PCIE_PORT_SERVICE_BWNOTIF)) {
pcie_capability_read_word(dev, PCI_EXP_FLAGS, &reg16);
*pme = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
nvec = *pme + 1;
@@ -99,7 +100,7 @@ static int pcie_message_numbers(struct pci_dev *dev, int mask,
*/
static int pcie_port_enable_irq_vec(struct pci_dev *dev, int *irqs, int mask)
{
- int nr_entries, nvec;
+ int nr_entries, nvec, pcie_irq;
u32 pme = 0, aer = 0, dpc = 0;
/* Allocate the maximum possible number of MSI/MSI-X vectors */
@@ -135,10 +136,13 @@ static int pcie_port_enable_irq_vec(struct pci_dev *dev, int *irqs, int mask)
return nr_entries;
}
- /* PME and hotplug share an MSI/MSI-X vector */
- if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP)) {
- irqs[PCIE_PORT_SERVICE_PME_SHIFT] = pci_irq_vector(dev, pme);
- irqs[PCIE_PORT_SERVICE_HP_SHIFT] = pci_irq_vector(dev, pme);
+ /* PME, hotplug and bandwidth notification share an MSI/MSI-X vector */
+ if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP |
+ PCIE_PORT_SERVICE_BWNOTIF)) {
+ pcie_irq = pci_irq_vector(dev, pme);
+ irqs[PCIE_PORT_SERVICE_PME_SHIFT] = pcie_irq;
+ irqs[PCIE_PORT_SERVICE_HP_SHIFT] = pcie_irq;
+ irqs[PCIE_PORT_SERVICE_BWNOTIF_SHIFT] = pcie_irq;
}
if (mask & PCIE_PORT_SERVICE_AER)
@@ -250,6 +254,10 @@ static int get_port_device_capability(struct pci_dev *dev)
pci_aer_available() && services & PCIE_PORT_SERVICE_AER)
services |= PCIE_PORT_SERVICE_DPC;
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM ||
+ pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
+ services |= PCIE_PORT_SERVICE_BWNOTIF;
+
return services;
}
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 0acca3596807..0a87091a0800 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -182,10 +182,12 @@ static void pcie_portdrv_err_resume(struct pci_dev *dev)
/*
* LINUX Device Driver Model
*/
-static const struct pci_device_id port_pci_ids[] = { {
+static const struct pci_device_id port_pci_ids[] = {
/* handle any PCI-Express port */
- PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), ~0),
- }, { /* end: all zeroes */ }
+ { PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), ~0) },
+ /* subtractive decode PCI-to-PCI bridge, class type is 060401h */
+ { PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x01), ~0) },
+ { },
};
static const struct pci_error_handlers pcie_portdrv_err_handler = {
@@ -238,6 +240,7 @@ static void __init pcie_init_services(void)
pcie_pme_init();
pcie_dpc_init();
pcie_hp_init();
+ pcie_bandwidth_notification_init();
}
static int __init pcie_portdrv_init(void)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 257b9f6f2ebb..7e12d0163863 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -121,13 +121,13 @@ static u64 pci_size(u64 base, u64 maxbase, u64 mask)
* Get the lowest of them to find the decode size, and from that
* the extent.
*/
- size = (size & ~(size-1)) - 1;
+ size = size & ~(size-1);
/*
* base == maxbase can be valid only if the BAR has already been
* programmed with all 1s.
*/
- if (base == maxbase && ((base | size) & mask) != mask)
+ if (base == maxbase && ((base | (size - 1)) & mask) != mask)
return 0;
return size;
@@ -278,7 +278,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
/* Above 32-bit boundary; try to reallocate */
res->flags |= IORESOURCE_UNSET;
res->start = 0;
- res->end = sz64;
+ res->end = sz64 - 1;
pci_info(dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n",
pos, (unsigned long long)l64);
goto out;
@@ -286,7 +286,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
}
region.start = l64;
- region.end = l64 + sz64;
+ region.end = l64 + sz64 - 1;
pcibios_bus_to_resource(dev->bus, res, &region);
pcibios_resource_to_bus(dev->bus, &inverted_region, res);
@@ -348,6 +348,57 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
}
}
+static void pci_read_bridge_windows(struct pci_dev *bridge)
+{
+ u16 io;
+ u32 pmem, tmp;
+
+ pci_read_config_word(bridge, PCI_IO_BASE, &io);
+ if (!io) {
+ pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0);
+ pci_read_config_word(bridge, PCI_IO_BASE, &io);
+ pci_write_config_word(bridge, PCI_IO_BASE, 0x0);
+ }
+ if (io)
+ bridge->io_window = 1;
+
+ /*
+ * DECchip 21050 pass 2 errata: the bridge may miss an address
+ * disconnect boundary by one PCI data phase. Workaround: do not
+ * use prefetching on this device.
+ */
+ if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001)
+ return;
+
+ pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
+ if (!pmem) {
+ pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE,
+ 0xffe0fff0);
+ pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
+ pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
+ }
+ if (!pmem)
+ return;
+
+ bridge->pref_window = 1;
+
+ if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
+
+ /*
+ * Bridge claims to have a 64-bit prefetchable memory
+ * window; verify that the upper bits are actually
+ * writable.
+ */
+ pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &pmem);
+ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
+ 0xffffffff);
+ pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp);
+ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, pmem);
+ if (tmp)
+ bridge->pref_64_window = 1;
+ }
+}
+
static void pci_read_bridge_io(struct pci_bus *child)
{
struct pci_dev *dev = child->self;
@@ -1728,9 +1779,6 @@ int pci_setup_device(struct pci_dev *dev)
break;
case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
- if (class != PCI_CLASS_BRIDGE_PCI)
- goto bad;
-
/*
* The PCI-to-PCI bridge spec requires that subtractive
* decoding (i.e. transparent) bridge must have programming
@@ -1739,6 +1787,7 @@ int pci_setup_device(struct pci_dev *dev)
pci_read_irq(dev);
dev->transparent = ((dev->class & 0xff) == 1);
pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
+ pci_read_bridge_windows(dev);
set_pcie_hotplug_bridge(dev);
pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
if (pos) {
@@ -1856,8 +1905,6 @@ static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
hpp->latency_timer);
pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
- if (hpp->enable_serr)
- pci_bctl |= PCI_BRIDGE_CTL_SERR;
if (hpp->enable_perr)
pci_bctl |= PCI_BRIDGE_CTL_PARITY;
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
@@ -2071,11 +2118,8 @@ static void pci_configure_ltr(struct pci_dev *dev)
{
#ifdef CONFIG_PCIEASPM
struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
- u32 cap;
struct pci_dev *bridge;
-
- if (!host->native_ltr)
- return;
+ u32 cap, ctl;
if (!pci_is_pcie(dev))
return;
@@ -2084,22 +2128,35 @@ static void pci_configure_ltr(struct pci_dev *dev)
if (!(cap & PCI_EXP_DEVCAP2_LTR))
return;
- /*
- * Software must not enable LTR in an Endpoint unless the Root
- * Complex and all intermediate Switches indicate support for LTR.
- * PCIe r3.1, sec 6.18.
- */
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
- dev->ltr_path = 1;
- else {
+ pcie_capability_read_dword(dev, PCI_EXP_DEVCTL2, &ctl);
+ if (ctl & PCI_EXP_DEVCTL2_LTR_EN) {
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
+ dev->ltr_path = 1;
+ return;
+ }
+
bridge = pci_upstream_bridge(dev);
if (bridge && bridge->ltr_path)
dev->ltr_path = 1;
+
+ return;
}
- if (dev->ltr_path)
+ if (!host->native_ltr)
+ return;
+
+ /*
+ * Software must not enable LTR in an Endpoint unless the Root
+ * Complex and all intermediate Switches indicate support for LTR.
+ * PCIe r4.0, sec 6.18.
+ */
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
+ ((bridge = pci_upstream_bridge(dev)) &&
+ bridge->ltr_path)) {
pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
PCI_EXP_DEVCTL2_LTR_EN);
+ dev->ltr_path = 1;
+ }
#endif
}
@@ -2129,6 +2186,24 @@ static void pci_configure_eetlp_prefix(struct pci_dev *dev)
#endif
}
+static void pci_configure_serr(struct pci_dev *dev)
+{
+ u16 control;
+
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+
+ /*
+ * A bridge will not forward ERR_ messages coming from an
+ * endpoint unless SERR# forwarding is enabled.
+ */
+ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &control);
+ if (!(control & PCI_BRIDGE_CTL_SERR)) {
+ control |= PCI_BRIDGE_CTL_SERR;
+ pci_write_config_word(dev, PCI_BRIDGE_CONTROL, control);
+ }
+ }
+}
+
static void pci_configure_device(struct pci_dev *dev)
{
struct hotplug_params hpp;
@@ -2139,6 +2214,7 @@ static void pci_configure_device(struct pci_dev *dev)
pci_configure_relaxed_ordering(dev);
pci_configure_ltr(dev);
pci_configure_eetlp_prefix(dev);
+ pci_configure_serr(dev);
memset(&hpp, 0, sizeof(hpp));
ret = pci_get_hp_params(dev, &hpp);
@@ -2312,7 +2388,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
return dev;
}
-static void pcie_report_downtraining(struct pci_dev *dev)
+void pcie_report_downtraining(struct pci_dev *dev)
{
if (!pci_is_pcie(dev))
return;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 56cd4c4a170c..eb0afc275901 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2139,7 +2139,7 @@ static void quirk_netmos(struct pci_dev *dev)
if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
dev->subsystem_device == 0x0299)
return;
- /* else: fall through */
+ /* else, fall through */
case PCI_DEVICE_ID_NETMOS_9735:
case PCI_DEVICE_ID_NETMOS_9745:
case PCI_DEVICE_ID_NETMOS_9845:
@@ -3877,6 +3877,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128,
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
quirk_dma_func1_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9170,
+ quirk_dma_func1_alias);
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
quirk_dma_func1_alias);
@@ -4520,6 +4522,8 @@ static const struct pci_dev_acs_enabled {
/* QCOM QDF2xxx root ports */
{ PCI_VENDOR_ID_QCOM, 0x0400, pci_quirk_qcom_rp_acs },
{ PCI_VENDOR_ID_QCOM, 0x0401, pci_quirk_qcom_rp_acs },
+ /* HXT SD4800 root ports. The ACS design is same as QCOM QDF2xxx */
+ { PCI_VENDOR_ID_HXT, 0x0401, pci_quirk_qcom_rp_acs },
/* Intel PCH root ports */
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_spt_pch_acs },
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index ed960436df5e..ec44a0f3a7ac 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -735,58 +735,21 @@ int pci_claim_bridge_resource(struct pci_dev *bridge, int i)
base/limit registers must be read-only and read as 0. */
static void pci_bridge_check_ranges(struct pci_bus *bus)
{
- u16 io;
- u32 pmem;
struct pci_dev *bridge = bus->self;
- struct resource *b_res;
+ struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
- b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
b_res[1].flags |= IORESOURCE_MEM;
- pci_read_config_word(bridge, PCI_IO_BASE, &io);
- if (!io) {
- pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0);
- pci_read_config_word(bridge, PCI_IO_BASE, &io);
- pci_write_config_word(bridge, PCI_IO_BASE, 0x0);
- }
- if (io)
+ if (bridge->io_window)
b_res[0].flags |= IORESOURCE_IO;
- /* DECchip 21050 pass 2 errata: the bridge may miss an address
- disconnect boundary by one PCI data phase.
- Workaround: do not use prefetching on this device. */
- if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001)
- return;
-
- pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
- if (!pmem) {
- pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE,
- 0xffe0fff0);
- pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
- pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
- }
- if (pmem) {
+ if (bridge->pref_window) {
b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
- if ((pmem & PCI_PREF_RANGE_TYPE_MASK) ==
- PCI_PREF_RANGE_TYPE_64) {
+ if (bridge->pref_64_window) {
b_res[2].flags |= IORESOURCE_MEM_64;
b_res[2].flags |= PCI_PREF_RANGE_TYPE_64;
}
}
-
- /* double check if bridge does support 64 bit pref */
- if (b_res[2].flags & IORESOURCE_MEM_64) {
- u32 mem_base_hi, tmp;
- pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32,
- &mem_base_hi);
- pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
- 0xffffffff);
- pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp);
- if (!tmp)
- b_res[2].flags &= ~IORESOURCE_MEM_64;
- pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
- mem_base_hi);
- }
}
/* Helper function for sizing routines: find first available
@@ -1223,12 +1186,12 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
if (!b)
continue;
- switch (dev->class >> 8) {
- case PCI_CLASS_BRIDGE_CARDBUS:
+ switch (dev->hdr_type) {
+ case PCI_HEADER_TYPE_CARDBUS:
pci_bus_size_cardbus(b, realloc_head);
break;
- case PCI_CLASS_BRIDGE_PCI:
+ case PCI_HEADER_TYPE_BRIDGE:
default:
__pci_bus_size_bridges(b, realloc_head);
break;
@@ -1239,12 +1202,12 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
if (pci_is_root_bus(bus))
return;
- switch (bus->self->class >> 8) {
- case PCI_CLASS_BRIDGE_CARDBUS:
+ switch (bus->self->hdr_type) {
+ case PCI_HEADER_TYPE_CARDBUS:
/* don't size cardbuses yet. */
break;
- case PCI_CLASS_BRIDGE_PCI:
+ case PCI_HEADER_TYPE_BRIDGE:
pci_bridge_check_ranges(bus);
if (bus->self->is_hotplug_bridge) {
additional_io_size = pci_hotplug_io_size;
@@ -1393,13 +1356,13 @@ void __pci_bus_assign_resources(const struct pci_bus *bus,
__pci_bus_assign_resources(b, realloc_head, fail_head);
- switch (dev->class >> 8) {
- case PCI_CLASS_BRIDGE_PCI:
+ switch (dev->hdr_type) {
+ case PCI_HEADER_TYPE_BRIDGE:
if (!pci_is_enabled(dev))
pci_setup_bridge(b);
break;
- case PCI_CLASS_BRIDGE_CARDBUS:
+ case PCI_HEADER_TYPE_CARDBUS:
pci_setup_cardbus(b);
break;
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index af9bc178495d..a94e586a58b2 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -52,6 +52,15 @@ config ARM_PMU_ACPI
depends on ARM_PMU && ACPI
def_bool y
+config ARM_SMMU_V3_PMU
+ tristate "ARM SMMUv3 Performance Monitors Extension"
+ depends on ARM64 && ACPI && ARM_SMMU_V3
+ help
+ Provides support for the ARM SMMUv3 Performance Monitor Counter
+ Groups (PMCG), which provide monitoring of transactions passing
+ through the SMMU and allow the resulting information to be filtered
+ based on the Stream ID of the corresponding master.
+
config ARM_DSU_PMU
tristate "ARM DynamIQ Shared Unit (DSU) PMU"
depends on ARM64
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index 909f27fd9db3..30489941f3d6 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_ARM_CCN) += arm-ccn.o
obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o
obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o
obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
+obj-$(CONFIG_ARM_SMMU_V3_PMU) += arm_smmuv3_pmu.o
obj-$(CONFIG_HISI_PMU) += hisilicon/
obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o
obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c
index bfd03e023308..8f8606b9bc9e 100644
--- a/drivers/perf/arm-cci.c
+++ b/drivers/perf/arm-cci.c
@@ -1684,21 +1684,24 @@ static int cci_pmu_probe(struct platform_device *pdev)
raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock);
mutex_init(&cci_pmu->reserve_mutex);
atomic_set(&cci_pmu->active_events, 0);
- cci_pmu->cpu = get_cpu();
-
- ret = cci_pmu_init(cci_pmu, pdev);
- if (ret) {
- put_cpu();
- return ret;
- }
+ cci_pmu->cpu = raw_smp_processor_id();
+ g_cci_pmu = cci_pmu;
cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
"perf/arm/cci:online", NULL,
cci_pmu_offline_cpu);
- put_cpu();
- g_cci_pmu = cci_pmu;
+
+ ret = cci_pmu_init(cci_pmu, pdev);
+ if (ret)
+ goto error_pmu_init;
+
pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
return 0;
+
+error_pmu_init:
+ cpuhp_remove_state(CPUHP_AP_PERF_ARM_CCI_ONLINE);
+ g_cci_pmu = NULL;
+ return ret;
}
static int cci_pmu_remove(struct platform_device *pdev)
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index 2ae76026e947..0bb52d9bdcf7 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -167,7 +167,7 @@ struct arm_ccn_dt {
struct hrtimer hrtimer;
- cpumask_t cpu;
+ unsigned int cpu;
struct hlist_node node;
struct pmu pmu;
@@ -559,7 +559,7 @@ static ssize_t arm_ccn_pmu_cpumask_show(struct device *dev,
{
struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
- return cpumap_print_to_pagebuf(true, buf, &ccn->dt.cpu);
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(ccn->dt.cpu));
}
static struct device_attribute arm_ccn_pmu_cpumask_attr =
@@ -759,7 +759,7 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
* mitigate this, we enforce CPU assignment to one, selected
* processor (the one described in the "cpumask" attribute).
*/
- event->cpu = cpumask_first(&ccn->dt.cpu);
+ event->cpu = ccn->dt.cpu;
node_xp = CCN_CONFIG_NODE(event->attr.config);
type = CCN_CONFIG_TYPE(event->attr.config);
@@ -1215,15 +1215,15 @@ static int arm_ccn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt);
unsigned int target;
- if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu))
+ if (cpu != dt->cpu)
return 0;
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
return 0;
perf_pmu_migrate_context(&dt->pmu, cpu, target);
- cpumask_set_cpu(target, &dt->cpu);
+ dt->cpu = target;
if (ccn->irq)
- WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0);
+ WARN_ON(irq_set_affinity_hint(ccn->irq, cpumask_of(dt->cpu)));
return 0;
}
@@ -1299,29 +1299,30 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
}
/* Pick one CPU which we will use to collect data from CCN... */
- cpumask_set_cpu(get_cpu(), &ccn->dt.cpu);
+ ccn->dt.cpu = raw_smp_processor_id();
/* Also make sure that the overflow interrupt is handled by this CPU */
if (ccn->irq) {
- err = irq_set_affinity_hint(ccn->irq, &ccn->dt.cpu);
+ err = irq_set_affinity_hint(ccn->irq, cpumask_of(ccn->dt.cpu));
if (err) {
dev_err(ccn->dev, "Failed to set interrupt affinity!\n");
goto error_set_affinity;
}
}
+ cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
+ &ccn->dt.node);
+
err = perf_pmu_register(&ccn->dt.pmu, name, -1);
if (err)
goto error_pmu_register;
- cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
- &ccn->dt.node);
- put_cpu();
return 0;
error_pmu_register:
+ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
+ &ccn->dt.node);
error_set_affinity:
- put_cpu();
error_choose_name:
ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
for (i = 0; i < ccn->num_xps; i++)
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
new file mode 100644
index 000000000000..da71c741cb46
--- /dev/null
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -0,0 +1,865 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * This driver adds support for perf events to use the Performance
+ * Monitor Counter Groups (PMCG) associated with an SMMUv3 node
+ * to monitor that node.
+ *
+ * SMMUv3 PMCG devices are named as smmuv3_pmcg_<phys_addr_page> where
+ * <phys_addr_page> is the physical page address of the SMMU PMCG wrapped
+ * to 4K boundary. For example, the PMCG at 0xff88840000 is named
+ * smmuv3_pmcg_ff88840
+ *
+ * Filtering by stream id is done by specifying filtering parameters
+ * with the event. options are:
+ * filter_enable - 0 = no filtering, 1 = filtering enabled
+ * filter_span - 0 = exact match, 1 = pattern match
+ * filter_stream_id - pattern to filter against
+ *
+ * To match a partial StreamID where the X most-significant bits must match
+ * but the Y least-significant bits might differ, STREAMID is programmed
+ * with a value that contains:
+ * STREAMID[Y - 1] == 0.
+ * STREAMID[Y - 2:0] == 1 (where Y > 1).
+ * The remainder of implemented bits of STREAMID (X bits, from bit Y upwards)
+ * contain a value to match from the corresponding bits of event StreamID.
+ *
+ * Example: perf stat -e smmuv3_pmcg_ff88840/transaction,filter_enable=1,
+ * filter_span=1,filter_stream_id=0x42/ -a netperf
+ * Applies filter pattern 0x42 to transaction events, which means events
+ * matching stream ids 0x42 and 0x43 are counted. Further filtering
+ * information is available in the SMMU documentation.
+ *
+ * SMMU events are not attributable to a CPU, so task mode and sampling
+ * are not supported.
+ */
+
+#include <linux/acpi.h>
+#include <linux/acpi_iort.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/cpuhotplug.h>
+#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/msi.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/smp.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#define SMMU_PMCG_EVCNTR0 0x0
+#define SMMU_PMCG_EVCNTR(n, stride) (SMMU_PMCG_EVCNTR0 + (n) * (stride))
+#define SMMU_PMCG_EVTYPER0 0x400
+#define SMMU_PMCG_EVTYPER(n) (SMMU_PMCG_EVTYPER0 + (n) * 4)
+#define SMMU_PMCG_SID_SPAN_SHIFT 29
+#define SMMU_PMCG_SMR0 0xA00
+#define SMMU_PMCG_SMR(n) (SMMU_PMCG_SMR0 + (n) * 4)
+#define SMMU_PMCG_CNTENSET0 0xC00
+#define SMMU_PMCG_CNTENCLR0 0xC20
+#define SMMU_PMCG_INTENSET0 0xC40
+#define SMMU_PMCG_INTENCLR0 0xC60
+#define SMMU_PMCG_OVSCLR0 0xC80
+#define SMMU_PMCG_OVSSET0 0xCC0
+#define SMMU_PMCG_CFGR 0xE00
+#define SMMU_PMCG_CFGR_SID_FILTER_TYPE BIT(23)
+#define SMMU_PMCG_CFGR_MSI BIT(21)
+#define SMMU_PMCG_CFGR_RELOC_CTRS BIT(20)
+#define SMMU_PMCG_CFGR_SIZE GENMASK(13, 8)
+#define SMMU_PMCG_CFGR_NCTR GENMASK(5, 0)
+#define SMMU_PMCG_CR 0xE04
+#define SMMU_PMCG_CR_ENABLE BIT(0)
+#define SMMU_PMCG_CEID0 0xE20
+#define SMMU_PMCG_CEID1 0xE28
+#define SMMU_PMCG_IRQ_CTRL 0xE50
+#define SMMU_PMCG_IRQ_CTRL_IRQEN BIT(0)
+#define SMMU_PMCG_IRQ_CFG0 0xE58
+#define SMMU_PMCG_IRQ_CFG1 0xE60
+#define SMMU_PMCG_IRQ_CFG2 0xE64
+
+/* MSI config fields */
+#define MSI_CFG0_ADDR_MASK GENMASK_ULL(51, 2)
+#define MSI_CFG2_MEMATTR_DEVICE_nGnRE 0x1
+
+#define SMMU_PMCG_DEFAULT_FILTER_SPAN 1
+#define SMMU_PMCG_DEFAULT_FILTER_SID GENMASK(31, 0)
+
+#define SMMU_PMCG_MAX_COUNTERS 64
+#define SMMU_PMCG_ARCH_MAX_EVENTS 128
+
+#define SMMU_PMCG_PA_SHIFT 12
+
+#define SMMU_PMCG_EVCNTR_RDONLY BIT(0)
+
+static int cpuhp_state_num;
+
+struct smmu_pmu {
+ struct hlist_node node;
+ struct perf_event *events[SMMU_PMCG_MAX_COUNTERS];
+ DECLARE_BITMAP(used_counters, SMMU_PMCG_MAX_COUNTERS);
+ DECLARE_BITMAP(supported_events, SMMU_PMCG_ARCH_MAX_EVENTS);
+ unsigned int irq;
+ unsigned int on_cpu;
+ struct pmu pmu;
+ unsigned int num_counters;
+ struct device *dev;
+ void __iomem *reg_base;
+ void __iomem *reloc_base;
+ u64 counter_mask;
+ u32 options;
+ bool global_filter;
+ u32 global_filter_span;
+ u32 global_filter_sid;
+};
+
+#define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu))
+
+#define SMMU_PMU_EVENT_ATTR_EXTRACTOR(_name, _config, _start, _end) \
+ static inline u32 get_##_name(struct perf_event *event) \
+ { \
+ return FIELD_GET(GENMASK_ULL(_end, _start), \
+ event->attr._config); \
+ } \
+
+SMMU_PMU_EVENT_ATTR_EXTRACTOR(event, config, 0, 15);
+SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_stream_id, config1, 0, 31);
+SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_span, config1, 32, 32);
+SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_enable, config1, 33, 33);
+
+static inline void smmu_pmu_enable(struct pmu *pmu)
+{
+ struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
+
+ writel(SMMU_PMCG_IRQ_CTRL_IRQEN,
+ smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
+ writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR);
+}
+
+static inline void smmu_pmu_disable(struct pmu *pmu)
+{
+ struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
+
+ writel(0, smmu_pmu->reg_base + SMMU_PMCG_CR);
+ writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
+}
+
+static inline void smmu_pmu_counter_set_value(struct smmu_pmu *smmu_pmu,
+ u32 idx, u64 value)
+{
+ if (smmu_pmu->counter_mask & BIT(32))
+ writeq(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
+ else
+ writel(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
+}
+
+static inline u64 smmu_pmu_counter_get_value(struct smmu_pmu *smmu_pmu, u32 idx)
+{
+ u64 value;
+
+ if (smmu_pmu->counter_mask & BIT(32))
+ value = readq(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
+ else
+ value = readl(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
+
+ return value;
+}
+
+static inline void smmu_pmu_counter_enable(struct smmu_pmu *smmu_pmu, u32 idx)
+{
+ writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENSET0);
+}
+
+static inline void smmu_pmu_counter_disable(struct smmu_pmu *smmu_pmu, u32 idx)
+{
+ writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
+}
+
+static inline void smmu_pmu_interrupt_enable(struct smmu_pmu *smmu_pmu, u32 idx)
+{
+ writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENSET0);
+}
+
+static inline void smmu_pmu_interrupt_disable(struct smmu_pmu *smmu_pmu,
+ u32 idx)
+{
+ writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
+}
+
+static inline void smmu_pmu_set_evtyper(struct smmu_pmu *smmu_pmu, u32 idx,
+ u32 val)
+{
+ writel(val, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
+}
+
+static inline void smmu_pmu_set_smr(struct smmu_pmu *smmu_pmu, u32 idx, u32 val)
+{
+ writel(val, smmu_pmu->reg_base + SMMU_PMCG_SMR(idx));
+}
+
+static void smmu_pmu_event_update(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
+ u64 delta, prev, now;
+ u32 idx = hwc->idx;
+
+ do {
+ prev = local64_read(&hwc->prev_count);
+ now = smmu_pmu_counter_get_value(smmu_pmu, idx);
+ } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
+
+ /* handle overflow. */
+ delta = now - prev;
+ delta &= smmu_pmu->counter_mask;
+
+ local64_add(delta, &event->count);
+}
+
+static void smmu_pmu_set_period(struct smmu_pmu *smmu_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 idx = hwc->idx;
+ u64 new;
+
+ if (smmu_pmu->options & SMMU_PMCG_EVCNTR_RDONLY) {
+ /*
+ * On platforms that require this quirk, if the counter starts
+ * at < half_counter value and wraps, the current logic of
+ * handling the overflow may not work. It is expected that,
+ * those platforms will have full 64 counter bits implemented
+ * so that such a possibility is remote(eg: HiSilicon HIP08).
+ */
+ new = smmu_pmu_counter_get_value(smmu_pmu, idx);
+ } else {
+ /*
+ * We limit the max period to half the max counter value
+ * of the counter size, so that even in the case of extreme
+ * interrupt latency the counter will (hopefully) not wrap
+ * past its initial value.
+ */
+ new = smmu_pmu->counter_mask >> 1;
+ smmu_pmu_counter_set_value(smmu_pmu, idx, new);
+ }
+
+ local64_set(&hwc->prev_count, new);
+}
+
+static void smmu_pmu_set_event_filter(struct perf_event *event,
+ int idx, u32 span, u32 sid)
+{
+ struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
+ u32 evtyper;
+
+ evtyper = get_event(event) | span << SMMU_PMCG_SID_SPAN_SHIFT;
+ smmu_pmu_set_evtyper(smmu_pmu, idx, evtyper);
+ smmu_pmu_set_smr(smmu_pmu, idx, sid);
+}
+
+static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
+ struct perf_event *event, int idx)
+{
+ u32 span, sid;
+ unsigned int num_ctrs = smmu_pmu->num_counters;
+ bool filter_en = !!get_filter_enable(event);
+
+ span = filter_en ? get_filter_span(event) :
+ SMMU_PMCG_DEFAULT_FILTER_SPAN;
+ sid = filter_en ? get_filter_stream_id(event) :
+ SMMU_PMCG_DEFAULT_FILTER_SID;
+
+ /* Support individual filter settings */
+ if (!smmu_pmu->global_filter) {
+ smmu_pmu_set_event_filter(event, idx, span, sid);
+ return 0;
+ }
+
+ /* Requested settings same as current global settings*/
+ if (span == smmu_pmu->global_filter_span &&
+ sid == smmu_pmu->global_filter_sid)
+ return 0;
+
+ if (!bitmap_empty(smmu_pmu->used_counters, num_ctrs))
+ return -EAGAIN;
+
+ smmu_pmu_set_event_filter(event, 0, span, sid);
+ smmu_pmu->global_filter_span = span;
+ smmu_pmu->global_filter_sid = sid;
+ return 0;
+}
+
+static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
+ struct perf_event *event)
+{
+ int idx, err;
+ unsigned int num_ctrs = smmu_pmu->num_counters;
+
+ idx = find_first_zero_bit(smmu_pmu->used_counters, num_ctrs);
+ if (idx == num_ctrs)
+ /* The counters are all in use. */
+ return -EAGAIN;
+
+ err = smmu_pmu_apply_event_filter(smmu_pmu, event, idx);
+ if (err)
+ return err;
+
+ set_bit(idx, smmu_pmu->used_counters);
+
+ return idx;
+}
+
+/*
+ * Implementation of abstract pmu functionality required by
+ * the core perf events code.
+ */
+
+static int smmu_pmu_event_init(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
+ struct device *dev = smmu_pmu->dev;
+ struct perf_event *sibling;
+ u16 event_id;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ if (hwc->sample_period) {
+ dev_dbg(dev, "Sampling not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (event->cpu < 0) {
+ dev_dbg(dev, "Per-task mode not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Verify specified event is supported on this PMU */
+ event_id = get_event(event);
+ if (event_id < SMMU_PMCG_ARCH_MAX_EVENTS &&
+ (!test_bit(event_id, smmu_pmu->supported_events))) {
+ dev_dbg(dev, "Invalid event %d for this PMU\n", event_id);
+ return -EINVAL;
+ }
+
+ /* Don't allow groups with mixed PMUs, except for s/w events */
+ if (event->group_leader->pmu != event->pmu &&
+ !is_software_event(event->group_leader)) {
+ dev_dbg(dev, "Can't create mixed PMU group\n");
+ return -EINVAL;
+ }
+
+ for_each_sibling_event(sibling, event->group_leader) {
+ if (sibling->pmu != event->pmu &&
+ !is_software_event(sibling)) {
+ dev_dbg(dev, "Can't create mixed PMU group\n");
+ return -EINVAL;
+ }
+ }
+
+ hwc->idx = -1;
+
+ /*
+ * Ensure all events are on the same cpu so all events are in the
+ * same cpu context, to avoid races on pmu_enable etc.
+ */
+ event->cpu = smmu_pmu->on_cpu;
+
+ return 0;
+}
+
+static void smmu_pmu_event_start(struct perf_event *event, int flags)
+{
+ struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ hwc->state = 0;
+
+ smmu_pmu_set_period(smmu_pmu, hwc);
+
+ smmu_pmu_counter_enable(smmu_pmu, idx);
+}
+
+static void smmu_pmu_event_stop(struct perf_event *event, int flags)
+{
+ struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ if (hwc->state & PERF_HES_STOPPED)
+ return;
+
+ smmu_pmu_counter_disable(smmu_pmu, idx);
+ /* As the counter gets updated on _start, ignore PERF_EF_UPDATE */
+ smmu_pmu_event_update(event);
+ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+}
+
+static int smmu_pmu_event_add(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx;
+ struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
+
+ idx = smmu_pmu_get_event_idx(smmu_pmu, event);
+ if (idx < 0)
+ return idx;
+
+ hwc->idx = idx;
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ smmu_pmu->events[idx] = event;
+ local64_set(&hwc->prev_count, 0);
+
+ smmu_pmu_interrupt_enable(smmu_pmu, idx);
+
+ if (flags & PERF_EF_START)
+ smmu_pmu_event_start(event, flags);
+
+ /* Propagate changes to the userspace mapping. */
+ perf_event_update_userpage(event);
+
+ return 0;
+}
+
+static void smmu_pmu_event_del(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
+ int idx = hwc->idx;
+
+ smmu_pmu_event_stop(event, flags | PERF_EF_UPDATE);
+ smmu_pmu_interrupt_disable(smmu_pmu, idx);
+ smmu_pmu->events[idx] = NULL;
+ clear_bit(idx, smmu_pmu->used_counters);
+
+ perf_event_update_userpage(event);
+}
+
+static void smmu_pmu_event_read(struct perf_event *event)
+{
+ smmu_pmu_event_update(event);
+}
+
+/* cpumask */
+
+static ssize_t smmu_pmu_cpumask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(smmu_pmu->on_cpu));
+}
+
+static struct device_attribute smmu_pmu_cpumask_attr =
+ __ATTR(cpumask, 0444, smmu_pmu_cpumask_show, NULL);
+
+static struct attribute *smmu_pmu_cpumask_attrs[] = {
+ &smmu_pmu_cpumask_attr.attr,
+ NULL
+};
+
+static struct attribute_group smmu_pmu_cpumask_group = {
+ .attrs = smmu_pmu_cpumask_attrs,
+};
+
+/* Events */
+
+static ssize_t smmu_pmu_event_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+
+ return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+}
+
+#define SMMU_EVENT_ATTR(name, config) \
+ PMU_EVENT_ATTR(name, smmu_event_attr_##name, \
+ config, smmu_pmu_event_show)
+SMMU_EVENT_ATTR(cycles, 0);
+SMMU_EVENT_ATTR(transaction, 1);
+SMMU_EVENT_ATTR(tlb_miss, 2);
+SMMU_EVENT_ATTR(config_cache_miss, 3);
+SMMU_EVENT_ATTR(trans_table_walk_access, 4);
+SMMU_EVENT_ATTR(config_struct_access, 5);
+SMMU_EVENT_ATTR(pcie_ats_trans_rq, 6);
+SMMU_EVENT_ATTR(pcie_ats_trans_passed, 7);
+
+static struct attribute *smmu_pmu_events[] = {
+ &smmu_event_attr_cycles.attr.attr,
+ &smmu_event_attr_transaction.attr.attr,
+ &smmu_event_attr_tlb_miss.attr.attr,
+ &smmu_event_attr_config_cache_miss.attr.attr,
+ &smmu_event_attr_trans_table_walk_access.attr.attr,
+ &smmu_event_attr_config_struct_access.attr.attr,
+ &smmu_event_attr_pcie_ats_trans_rq.attr.attr,
+ &smmu_event_attr_pcie_ats_trans_passed.attr.attr,
+ NULL
+};
+
+static umode_t smmu_pmu_event_is_visible(struct kobject *kobj,
+ struct attribute *attr, int unused)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
+
+ if (test_bit(pmu_attr->id, smmu_pmu->supported_events))
+ return attr->mode;
+
+ return 0;
+}
+
+static struct attribute_group smmu_pmu_events_group = {
+ .name = "events",
+ .attrs = smmu_pmu_events,
+ .is_visible = smmu_pmu_event_is_visible,
+};
+
+/* Formats */
+PMU_FORMAT_ATTR(event, "config:0-15");
+PMU_FORMAT_ATTR(filter_stream_id, "config1:0-31");
+PMU_FORMAT_ATTR(filter_span, "config1:32");
+PMU_FORMAT_ATTR(filter_enable, "config1:33");
+
+static struct attribute *smmu_pmu_formats[] = {
+ &format_attr_event.attr,
+ &format_attr_filter_stream_id.attr,
+ &format_attr_filter_span.attr,
+ &format_attr_filter_enable.attr,
+ NULL
+};
+
+static struct attribute_group smmu_pmu_format_group = {
+ .name = "format",
+ .attrs = smmu_pmu_formats,
+};
+
+static const struct attribute_group *smmu_pmu_attr_grps[] = {
+ &smmu_pmu_cpumask_group,
+ &smmu_pmu_events_group,
+ &smmu_pmu_format_group,
+ NULL
+};
+
+/*
+ * Generic device handlers
+ */
+
+static int smmu_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct smmu_pmu *smmu_pmu;
+ unsigned int target;
+
+ smmu_pmu = hlist_entry_safe(node, struct smmu_pmu, node);
+ if (cpu != smmu_pmu->on_cpu)
+ return 0;
+
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+
+ perf_pmu_migrate_context(&smmu_pmu->pmu, cpu, target);
+ smmu_pmu->on_cpu = target;
+ WARN_ON(irq_set_affinity_hint(smmu_pmu->irq, cpumask_of(target)));
+
+ return 0;
+}
+
+static irqreturn_t smmu_pmu_handle_irq(int irq_num, void *data)
+{
+ struct smmu_pmu *smmu_pmu = data;
+ u64 ovsr;
+ unsigned int idx;
+
+ ovsr = readq(smmu_pmu->reloc_base + SMMU_PMCG_OVSSET0);
+ if (!ovsr)
+ return IRQ_NONE;
+
+ writeq(ovsr, smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
+
+ for_each_set_bit(idx, (unsigned long *)&ovsr, smmu_pmu->num_counters) {
+ struct perf_event *event = smmu_pmu->events[idx];
+ struct hw_perf_event *hwc;
+
+ if (WARN_ON_ONCE(!event))
+ continue;
+
+ smmu_pmu_event_update(event);
+ hwc = &event->hw;
+
+ smmu_pmu_set_period(smmu_pmu, hwc);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void smmu_pmu_free_msis(void *data)
+{
+ struct device *dev = data;
+
+ platform_msi_domain_free_irqs(dev);
+}
+
+static void smmu_pmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+ phys_addr_t doorbell;
+ struct device *dev = msi_desc_to_dev(desc);
+ struct smmu_pmu *pmu = dev_get_drvdata(dev);
+
+ doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
+ doorbell &= MSI_CFG0_ADDR_MASK;
+
+ writeq_relaxed(doorbell, pmu->reg_base + SMMU_PMCG_IRQ_CFG0);
+ writel_relaxed(msg->data, pmu->reg_base + SMMU_PMCG_IRQ_CFG1);
+ writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE,
+ pmu->reg_base + SMMU_PMCG_IRQ_CFG2);
+}
+
+static void smmu_pmu_setup_msi(struct smmu_pmu *pmu)
+{
+ struct msi_desc *desc;
+ struct device *dev = pmu->dev;
+ int ret;
+
+ /* Clear MSI address reg */
+ writeq_relaxed(0, pmu->reg_base + SMMU_PMCG_IRQ_CFG0);
+
+ /* MSI supported or not */
+ if (!(readl(pmu->reg_base + SMMU_PMCG_CFGR) & SMMU_PMCG_CFGR_MSI))
+ return;
+
+ ret = platform_msi_domain_alloc_irqs(dev, 1, smmu_pmu_write_msi_msg);
+ if (ret) {
+ dev_warn(dev, "failed to allocate MSIs\n");
+ return;
+ }
+
+ desc = first_msi_entry(dev);
+ if (desc)
+ pmu->irq = desc->irq;
+
+ /* Add callback to free MSIs on teardown */
+ devm_add_action(dev, smmu_pmu_free_msis, dev);
+}
+
+static int smmu_pmu_setup_irq(struct smmu_pmu *pmu)
+{
+ unsigned long flags = IRQF_NOBALANCING | IRQF_SHARED | IRQF_NO_THREAD;
+ int irq, ret = -ENXIO;
+
+ smmu_pmu_setup_msi(pmu);
+
+ irq = pmu->irq;
+ if (irq)
+ ret = devm_request_irq(pmu->dev, irq, smmu_pmu_handle_irq,
+ flags, "smmuv3-pmu", pmu);
+ return ret;
+}
+
+static void smmu_pmu_reset(struct smmu_pmu *smmu_pmu)
+{
+ u64 counter_present_mask = GENMASK_ULL(smmu_pmu->num_counters - 1, 0);
+
+ smmu_pmu_disable(&smmu_pmu->pmu);
+
+ /* Disable counter and interrupt */
+ writeq_relaxed(counter_present_mask,
+ smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
+ writeq_relaxed(counter_present_mask,
+ smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
+ writeq_relaxed(counter_present_mask,
+ smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
+}
+
+static void smmu_pmu_get_acpi_options(struct smmu_pmu *smmu_pmu)
+{
+ u32 model;
+
+ model = *(u32 *)dev_get_platdata(smmu_pmu->dev);
+
+ switch (model) {
+ case IORT_SMMU_V3_PMCG_HISI_HIP08:
+ /* HiSilicon Erratum 162001800 */
+ smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY;
+ break;
+ }
+
+ dev_notice(smmu_pmu->dev, "option mask 0x%x\n", smmu_pmu->options);
+}
+
+static int smmu_pmu_probe(struct platform_device *pdev)
+{
+ struct smmu_pmu *smmu_pmu;
+ struct resource *res_0, *res_1;
+ u32 cfgr, reg_size;
+ u64 ceid_64[2];
+ int irq, err;
+ char *name;
+ struct device *dev = &pdev->dev;
+
+ smmu_pmu = devm_kzalloc(dev, sizeof(*smmu_pmu), GFP_KERNEL);
+ if (!smmu_pmu)
+ return -ENOMEM;
+
+ smmu_pmu->dev = dev;
+ platform_set_drvdata(pdev, smmu_pmu);
+
+ smmu_pmu->pmu = (struct pmu) {
+ .task_ctx_nr = perf_invalid_context,
+ .pmu_enable = smmu_pmu_enable,
+ .pmu_disable = smmu_pmu_disable,
+ .event_init = smmu_pmu_event_init,
+ .add = smmu_pmu_event_add,
+ .del = smmu_pmu_event_del,
+ .start = smmu_pmu_event_start,
+ .stop = smmu_pmu_event_stop,
+ .read = smmu_pmu_event_read,
+ .attr_groups = smmu_pmu_attr_grps,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ };
+
+ res_0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ smmu_pmu->reg_base = devm_ioremap_resource(dev, res_0);
+ if (IS_ERR(smmu_pmu->reg_base))
+ return PTR_ERR(smmu_pmu->reg_base);
+
+ cfgr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CFGR);
+
+ /* Determine if page 1 is present */
+ if (cfgr & SMMU_PMCG_CFGR_RELOC_CTRS) {
+ res_1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ smmu_pmu->reloc_base = devm_ioremap_resource(dev, res_1);
+ if (IS_ERR(smmu_pmu->reloc_base))
+ return PTR_ERR(smmu_pmu->reloc_base);
+ } else {
+ smmu_pmu->reloc_base = smmu_pmu->reg_base;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq > 0)
+ smmu_pmu->irq = irq;
+
+ ceid_64[0] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID0);
+ ceid_64[1] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID1);
+ bitmap_from_arr32(smmu_pmu->supported_events, (u32 *)ceid_64,
+ SMMU_PMCG_ARCH_MAX_EVENTS);
+
+ smmu_pmu->num_counters = FIELD_GET(SMMU_PMCG_CFGR_NCTR, cfgr) + 1;
+
+ smmu_pmu->global_filter = !!(cfgr & SMMU_PMCG_CFGR_SID_FILTER_TYPE);
+
+ reg_size = FIELD_GET(SMMU_PMCG_CFGR_SIZE, cfgr);
+ smmu_pmu->counter_mask = GENMASK_ULL(reg_size, 0);
+
+ smmu_pmu_reset(smmu_pmu);
+
+ err = smmu_pmu_setup_irq(smmu_pmu);
+ if (err) {
+ dev_err(dev, "Setup irq failed, PMU @%pa\n", &res_0->start);
+ return err;
+ }
+
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "smmuv3_pmcg_%llx",
+ (res_0->start) >> SMMU_PMCG_PA_SHIFT);
+ if (!name) {
+ dev_err(dev, "Create name failed, PMU @%pa\n", &res_0->start);
+ return -EINVAL;
+ }
+
+ smmu_pmu_get_acpi_options(smmu_pmu);
+
+ /* Pick one CPU to be the preferred one to use */
+ smmu_pmu->on_cpu = raw_smp_processor_id();
+ WARN_ON(irq_set_affinity_hint(smmu_pmu->irq,
+ cpumask_of(smmu_pmu->on_cpu)));
+
+ err = cpuhp_state_add_instance_nocalls(cpuhp_state_num,
+ &smmu_pmu->node);
+ if (err) {
+ dev_err(dev, "Error %d registering hotplug, PMU @%pa\n",
+ err, &res_0->start);
+ goto out_cpuhp_err;
+ }
+
+ err = perf_pmu_register(&smmu_pmu->pmu, name, -1);
+ if (err) {
+ dev_err(dev, "Error %d registering PMU @%pa\n",
+ err, &res_0->start);
+ goto out_unregister;
+ }
+
+ dev_info(dev, "Registered PMU @ %pa using %d counters with %s filter settings\n",
+ &res_0->start, smmu_pmu->num_counters,
+ smmu_pmu->global_filter ? "Global(Counter0)" :
+ "Individual");
+
+ return 0;
+
+out_unregister:
+ cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
+out_cpuhp_err:
+ put_cpu();
+ return err;
+}
+
+static int smmu_pmu_remove(struct platform_device *pdev)
+{
+ struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
+
+ perf_pmu_unregister(&smmu_pmu->pmu);
+ cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
+
+ return 0;
+}
+
+static void smmu_pmu_shutdown(struct platform_device *pdev)
+{
+ struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
+
+ smmu_pmu_disable(&smmu_pmu->pmu);
+}
+
+static struct platform_driver smmu_pmu_driver = {
+ .driver = {
+ .name = "arm-smmu-v3-pmcg",
+ },
+ .probe = smmu_pmu_probe,
+ .remove = smmu_pmu_remove,
+ .shutdown = smmu_pmu_shutdown,
+};
+
+static int __init arm_smmu_pmu_init(void)
+{
+ cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "perf/arm/pmcg:online",
+ NULL,
+ smmu_pmu_offline_cpu);
+ if (cpuhp_state_num < 0)
+ return cpuhp_state_num;
+
+ return platform_driver_register(&smmu_pmu_driver);
+}
+module_init(arm_smmu_pmu_init);
+
+static void __exit arm_smmu_pmu_exit(void)
+{
+ platform_driver_unregister(&smmu_pmu_driver);
+ cpuhp_remove_multi_state(cpuhp_state_num);
+}
+
+module_exit(arm_smmu_pmu_exit);
+
+MODULE_DESCRIPTION("PMU driver for ARM SMMUv3 Performance Monitors Extension");
+MODULE_AUTHOR("Neil Leeder <nleeder@codeaurora.org>");
+MODULE_AUTHOR("Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index d4ec04868d59..27574e85f351 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -1052,7 +1052,6 @@ static void xgene_perf_start(struct perf_event *event, int flags)
static void xgene_perf_stop(struct perf_event *event, int flags)
{
struct hw_perf_event *hw = &event->hw;
- u64 config;
if (hw->state & PERF_HES_UPTODATE)
return;
@@ -1064,7 +1063,6 @@ static void xgene_perf_stop(struct perf_event *event, int flags)
if (hw->state & PERF_HES_UPTODATE)
return;
- config = hw->config;
xgene_perf_read(event);
hw->state |= PERF_HES_UPTODATE;
}
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
index 5163097b43df..4bbd9ede38c8 100644
--- a/drivers/phy/allwinner/phy-sun4i-usb.c
+++ b/drivers/phy/allwinner/phy-sun4i-usb.c
@@ -485,8 +485,11 @@ static int sun4i_usb_phy_set_mode(struct phy *_phy,
struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
int new_mode;
- if (phy->index != 0)
+ if (phy->index != 0) {
+ if (mode == PHY_MODE_USB_HOST)
+ return 0;
return -EINVAL;
+ }
switch (mode) {
case PHY_MODE_USB_HOST:
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index f180aa44a422..183d1ffe6a75 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -341,6 +341,7 @@ static const struct gpio_chip bcm2835_gpio_chip = {
.get_direction = bcm2835_gpio_get_direction,
.get = bcm2835_gpio_get,
.set = bcm2835_gpio_set,
+ .set_config = gpiochip_generic_config,
.base = -1,
.ngpio = BCM2835_NUM_GPIOS,
.can_sleep = false,
@@ -960,7 +961,7 @@ static int bcm2835_pinconf_set(struct pinctrl_dev *pctldev,
break;
default:
- return -EINVAL;
+ return -ENOTSUPP;
} /* switch param type */
} /* for each config */
@@ -969,6 +970,7 @@ static int bcm2835_pinconf_set(struct pinctrl_dev *pctldev,
}
static const struct pinconf_ops bcm2835_pinconf_ops = {
+ .is_generic = true,
.pin_config_get = bcm2835_pinconf_get,
.pin_config_set = bcm2835_pinconf_set,
};
diff --git a/drivers/pinctrl/berlin/pinctrl-as370.c b/drivers/pinctrl/berlin/pinctrl-as370.c
index d2bb811fc5fa..44f8ccdbeeff 100644
--- a/drivers/pinctrl/berlin/pinctrl-as370.c
+++ b/drivers/pinctrl/berlin/pinctrl-as370.c
@@ -36,13 +36,13 @@ static const struct berlin_desc_group as370_soc_pinctrl_groups[] = {
BERLIN_PINCTRL_GROUP("I2S1_DO2", 0x0, 0x3, 0x0c,
BERLIN_PINCTRL_FUNCTION(0x0, "por"), /* CORE RSTB */
BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* DO2 */
- BERLIN_PINCTRL_FUNCTION(0x2, "pwm4"),
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm"), /* PWM4 */
BERLIN_PINCTRL_FUNCTION(0x3, "gpio"), /* GPIO4 */
BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG4 */
BERLIN_PINCTRL_GROUP("I2S1_DO3", 0x0, 0x3, 0x0f,
BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO5 */
BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* DO3 */
- BERLIN_PINCTRL_FUNCTION(0x2, "pwm5"),
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm"), /* PWM5 */
BERLIN_PINCTRL_FUNCTION(0x3, "spififib"), /* SPDIFIB */
BERLIN_PINCTRL_FUNCTION(0x4, "spdifo"), /* SPDIFO */
BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG5 */
@@ -61,24 +61,24 @@ static const struct berlin_desc_group as370_soc_pinctrl_groups[] = {
BERLIN_PINCTRL_GROUP("I2S2_DI0", 0x0, 0x3, 0x1b,
BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO9 */
BERLIN_PINCTRL_FUNCTION(0x1, "i2s2"), /* DI0 */
- BERLIN_PINCTRL_FUNCTION(0x2, "pwm2"),
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm"), /* PWM2 */
BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG9 */
BERLIN_PINCTRL_GROUP("I2S2_DI1", 0x4, 0x3, 0x00,
BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO10 */
BERLIN_PINCTRL_FUNCTION(0x1, "i2s2"), /* DI1 */
- BERLIN_PINCTRL_FUNCTION(0x2, "pwm3"),
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm"), /* PWM3 */
BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG10 */
BERLIN_PINCTRL_GROUP("I2S2_DI2", 0x4, 0x3, 0x03,
BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO11 */
BERLIN_PINCTRL_FUNCTION(0x1, "i2s2"), /* DI2 */
- BERLIN_PINCTRL_FUNCTION(0x2, "pwm6"),
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm"), /* PWM6 */
BERLIN_PINCTRL_FUNCTION(0x3, "spdific"), /* SPDIFIC */
BERLIN_PINCTRL_FUNCTION(0x4, "spdifo"), /* SPDIFO */
BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG11 */
BERLIN_PINCTRL_GROUP("I2S2_DI3", 0x4, 0x3, 0x06,
BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO12 */
BERLIN_PINCTRL_FUNCTION(0x1, "i2s2"), /* DI3 */
- BERLIN_PINCTRL_FUNCTION(0x2, "pwm7"),
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm"), /* PWM7 */
BERLIN_PINCTRL_FUNCTION(0x3, "spdifia"), /* SPDIFIA */
BERLIN_PINCTRL_FUNCTION(0x4, "spdifo"), /* SPDIFO */
BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG12 */
@@ -98,14 +98,14 @@ static const struct berlin_desc_group as370_soc_pinctrl_groups[] = {
BERLIN_PINCTRL_GROUP("PDM_DI2", 0x4, 0x3, 0x12,
BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO16 */
BERLIN_PINCTRL_FUNCTION(0x1, "pdm"), /* DI2 */
- BERLIN_PINCTRL_FUNCTION(0x2, "pwm4"),
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm"), /* PWM4 */
BERLIN_PINCTRL_FUNCTION(0x3, "spdifid"), /* SPDIFID */
BERLIN_PINCTRL_FUNCTION(0x4, "spdifo"), /* SPDIFO */
BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG16 */
BERLIN_PINCTRL_GROUP("PDM_DI3", 0x4, 0x3, 0x15,
BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO17 */
BERLIN_PINCTRL_FUNCTION(0x1, "pdm"), /* DI3 */
- BERLIN_PINCTRL_FUNCTION(0x2, "pwm5"),
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm"), /* PWM5 */
BERLIN_PINCTRL_FUNCTION(0x3, "spdifi"), /* SPDIFI */
BERLIN_PINCTRL_FUNCTION(0x4, "spdifo"), /* SPDIFO */
BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG17 */
@@ -139,11 +139,11 @@ static const struct berlin_desc_group as370_soc_pinctrl_groups[] = {
BERLIN_PINCTRL_FUNCTION(0x1, "emmc")), /* DATA7 */
BERLIN_PINCTRL_GROUP("NAND_ALE", 0x8, 0x3, 0x12,
BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* ALE */
- BERLIN_PINCTRL_FUNCTION(0x2, "pwm6"),
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm"), /* PWM6 */
BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO18 */
BERLIN_PINCTRL_GROUP("NAND_CLE", 0x8, 0x3, 0x15,
BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* CLE */
- BERLIN_PINCTRL_FUNCTION(0x2, "pwm7"),
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm"), /* PWM7 */
BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO19 */
BERLIN_PINCTRL_GROUP("NAND_WEn", 0x8, 0x3, 0x18,
BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* WEn */
@@ -169,12 +169,12 @@ static const struct berlin_desc_group as370_soc_pinctrl_groups[] = {
BERLIN_PINCTRL_GROUP("SPI1_SS1n", 0xc, 0x3, 0x0c,
BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS1n */
BERLIN_PINCTRL_FUNCTION(0x2, "gpio"), /* GPIO26 */
- BERLIN_PINCTRL_FUNCTION(0x3, "pwm2")),
+ BERLIN_PINCTRL_FUNCTION(0x3, "pwm")), /* PWM2 */
BERLIN_PINCTRL_GROUP("SPI1_SS2n", 0xc, 0x3, 0x0f,
BERLIN_PINCTRL_FUNCTION(0x0, "uart0"), /* RXD */
BERLIN_PINCTRL_FUNCTION(0x1, "spi1"), /* SS2n */
BERLIN_PINCTRL_FUNCTION(0x2, "gpio"), /* GPIO27 */
- BERLIN_PINCTRL_FUNCTION(0x3, "pwm3")),
+ BERLIN_PINCTRL_FUNCTION(0x3, "pwm")), /* PWM3 */
BERLIN_PINCTRL_GROUP("SPI1_SS3n", 0xc, 0x3, 0x12,
BERLIN_PINCTRL_FUNCTION(0x0, "uart0"), /* TXD */
BERLIN_PINCTRL_FUNCTION(0x1, "spi1"), /* SS3n */
@@ -182,11 +182,11 @@ static const struct berlin_desc_group as370_soc_pinctrl_groups[] = {
BERLIN_PINCTRL_GROUP("SPI1_SCLK", 0xc, 0x3, 0x15,
BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SCLK */
BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO29 */
- BERLIN_PINCTRL_FUNCTION(0x3, "pwm4")),
+ BERLIN_PINCTRL_FUNCTION(0x3, "pwm")), /* PWM4 */
BERLIN_PINCTRL_GROUP("SPI1_SDO", 0xc, 0x3, 0x18,
BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SDO */
BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO30 */
- BERLIN_PINCTRL_FUNCTION(0x3, "pwm5")),
+ BERLIN_PINCTRL_FUNCTION(0x3, "pwm")), /* PWM5 */
BERLIN_PINCTRL_GROUP("SPI1_SDI", 0xc, 0x3, 0x1b,
BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SDI */
BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), /* GPIO31 */
@@ -209,51 +209,51 @@ static const struct berlin_desc_group as370_soc_pinctrl_groups[] = {
BERLIN_PINCTRL_GROUP("TMS", 0x10, 0x3, 0x0f,
BERLIN_PINCTRL_FUNCTION(0x0, "jtag"), /* TMS */
BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */
- BERLIN_PINCTRL_FUNCTION(0x4, "pwm0")),
+ BERLIN_PINCTRL_FUNCTION(0x4, "pwm")), /* PWM0 */
BERLIN_PINCTRL_GROUP("TDI", 0x10, 0x3, 0x12,
BERLIN_PINCTRL_FUNCTION(0x0, "jtag"), /* TDI */
BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO38 */
- BERLIN_PINCTRL_FUNCTION(0x4, "pwm1")),
+ BERLIN_PINCTRL_FUNCTION(0x4, "pwm")), /* PWM1 */
BERLIN_PINCTRL_GROUP("TDO", 0x10, 0x3, 0x15,
BERLIN_PINCTRL_FUNCTION(0x0, "jtag"), /* TDO */
BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO39 */
- BERLIN_PINCTRL_FUNCTION(0x4, "pwm0")),
+ BERLIN_PINCTRL_FUNCTION(0x4, "pwm")), /* PWM0 */
BERLIN_PINCTRL_GROUP("PWM6", 0x10, 0x3, 0x18,
BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO40 */
- BERLIN_PINCTRL_FUNCTION(0x1, "pwm6")),
+ BERLIN_PINCTRL_FUNCTION(0x1, "pwm")), /* PWM6 */
BERLIN_PINCTRL_GROUP("PWM7", 0x10, 0x3, 0x1b,
BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO41 */
- BERLIN_PINCTRL_FUNCTION(0x1, "pwm7")),
+ BERLIN_PINCTRL_FUNCTION(0x1, "pwm")), /* PWM7 */
BERLIN_PINCTRL_GROUP("PWM0", 0x14, 0x3, 0x00,
BERLIN_PINCTRL_FUNCTION(0x0, "por"), /* VDDCPUSOC RSTB */
- BERLIN_PINCTRL_FUNCTION(0x1, "pwm0"),
+ BERLIN_PINCTRL_FUNCTION(0x1, "pwm"), /* PWM0 */
BERLIN_PINCTRL_FUNCTION(0x2, "gpio")), /* GPIO42 */
BERLIN_PINCTRL_GROUP("PWM1", 0x14, 0x3, 0x03,
BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO43 */
- BERLIN_PINCTRL_FUNCTION(0x1, "pwm1")),
+ BERLIN_PINCTRL_FUNCTION(0x1, "pwm")), /* PWM1 */
BERLIN_PINCTRL_GROUP("PWM2", 0x14, 0x3, 0x06,
BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO44 */
- BERLIN_PINCTRL_FUNCTION(0x1, "pwm2")),
+ BERLIN_PINCTRL_FUNCTION(0x1, "pwm")), /* PWM2 */
BERLIN_PINCTRL_GROUP("PWM3", 0x14, 0x3, 0x09,
BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO45 */
- BERLIN_PINCTRL_FUNCTION(0x1, "pwm3")),
+ BERLIN_PINCTRL_FUNCTION(0x1, "pwm")), /* PWM3 */
BERLIN_PINCTRL_GROUP("PWM4", 0x14, 0x3, 0x0c,
BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO46 */
- BERLIN_PINCTRL_FUNCTION(0x1, "pwm4")),
+ BERLIN_PINCTRL_FUNCTION(0x1, "pwm")), /* PWM4 */
BERLIN_PINCTRL_GROUP("PWM5", 0x14, 0x3, 0x0f,
BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO47 */
- BERLIN_PINCTRL_FUNCTION(0x1, "pwm5")),
+ BERLIN_PINCTRL_FUNCTION(0x1, "pwm")), /* PWM5 */
BERLIN_PINCTRL_GROUP("URT1_RTSn", 0x14, 0x3, 0x12,
BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO48 */
BERLIN_PINCTRL_FUNCTION(0x1, "uart1"), /* RTSn */
- BERLIN_PINCTRL_FUNCTION(0x2, "pwm6"),
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm"), /* PWM6 */
BERLIN_PINCTRL_FUNCTION(0x3, "tw1a"), /* SCL */
BERLIN_PINCTRL_FUNCTION(0x4, "aio"), /* DBG0 */
BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG18 */
BERLIN_PINCTRL_GROUP("URT1_CTSn", 0x14, 0x3, 0x15,
BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO49 */
BERLIN_PINCTRL_FUNCTION(0x1, "uart1"), /* CTSn */
- BERLIN_PINCTRL_FUNCTION(0x2, "pwm7"),
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm"), /* PWM7 */
BERLIN_PINCTRL_FUNCTION(0x3, "tw1a"), /* SDA */
BERLIN_PINCTRL_FUNCTION(0x4, "aio"), /* DBG1 */
BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG19 */
@@ -308,11 +308,11 @@ static const struct berlin_desc_group as370_soc_pinctrl_groups[] = {
BERLIN_PINCTRL_GROUP("SD0_CDn", 0x1c, 0x3, 0x00,
BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO62 */
BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* CDn */
- BERLIN_PINCTRL_FUNCTION(0x3, "pwm2")),
+ BERLIN_PINCTRL_FUNCTION(0x3, "pwm")), /* PWM2 */
BERLIN_PINCTRL_GROUP("SD0_WP", 0x1c, 0x3, 0x03,
BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO63 */
BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* WP */
- BERLIN_PINCTRL_FUNCTION(0x3, "pwm3")),
+ BERLIN_PINCTRL_FUNCTION(0x3, "pwm")), /* PWM3 */
};
static const struct berlin_pinctrl_desc as370_soc_pinctrl_data = {
diff --git a/drivers/pinctrl/cirrus/pinctrl-madera-core.c b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
index a5dda832024a..7c9694593f79 100644
--- a/drivers/pinctrl/cirrus/pinctrl-madera-core.c
+++ b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/pinctrl/machine.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/pinctrl/pinconf.h>
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index 72b869d888e2..0d8387851b87 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -122,6 +122,13 @@ config PINCTRL_IMX7ULP
help
Say Y here to enable the imx7ulp pinctrl driver
+config PINCTRL_IMX8MM
+ bool "IMX8MM pinctrl driver"
+ depends on ARCH_MXC && ARM64
+ select PINCTRL_IMX
+ help
+ Say Y here to enable the imx8mm pinctrl driver
+
config PINCTRL_IMX8MQ
bool "IMX8MQ pinctrl driver"
depends on ARCH_MXC && ARM64
@@ -129,9 +136,16 @@ config PINCTRL_IMX8MQ
help
Say Y here to enable the imx8mq pinctrl driver
+config PINCTRL_IMX8QM
+ bool "IMX8QM pinctrl driver"
+ depends on IMX_SCU && ARCH_MXC && ARM64
+ select PINCTRL_IMX_SCU
+ help
+ Say Y here to enable the imx8qm pinctrl driver
+
config PINCTRL_IMX8QXP
bool "IMX8QXP pinctrl driver"
- depends on ARCH_MXC && ARM64
+ depends on IMX_SCU && ARCH_MXC && ARM64
select PINCTRL_IMX_SCU
help
Say Y here to enable the imx8qxp pinctrl driver
diff --git a/drivers/pinctrl/freescale/Makefile b/drivers/pinctrl/freescale/Makefile
index 6ee398a3e406..02020a76bd9c 100644
--- a/drivers/pinctrl/freescale/Makefile
+++ b/drivers/pinctrl/freescale/Makefile
@@ -18,7 +18,9 @@ obj-$(CONFIG_PINCTRL_IMX6SX) += pinctrl-imx6sx.o
obj-$(CONFIG_PINCTRL_IMX6UL) += pinctrl-imx6ul.o
obj-$(CONFIG_PINCTRL_IMX7D) += pinctrl-imx7d.o
obj-$(CONFIG_PINCTRL_IMX7ULP) += pinctrl-imx7ulp.o
+obj-$(CONFIG_PINCTRL_IMX8MM) += pinctrl-imx8mm.o
obj-$(CONFIG_PINCTRL_IMX8MQ) += pinctrl-imx8mq.o
+obj-$(CONFIG_PINCTRL_IMX8QM) += pinctrl-imx8qm.o
obj-$(CONFIG_PINCTRL_IMX8QXP) += pinctrl-imx8qxp.o
obj-$(CONFIG_PINCTRL_VF610) += pinctrl-vf610.o
obj-$(CONFIG_PINCTRL_MXS) += pinctrl-mxs.o
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8mm.c b/drivers/pinctrl/freescale/pinctrl-imx8mm.c
new file mode 100644
index 000000000000..6d1038af59f4
--- /dev/null
+++ b/drivers/pinctrl/freescale/pinctrl-imx8mm.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2017-2018 NXP
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-imx.h"
+
+enum imx8mm_pads {
+ MX8MM_PAD_RESERVE0 = 0,
+ MX8MM_PAD_RESERVE1 = 1,
+ MX8MM_PAD_RESERVE2 = 2,
+ MX8MM_PAD_RESERVE3 = 3,
+ MX8MM_PAD_RESERVE4 = 4,
+ MX8MM_PAD_RESERVE5 = 5,
+ MX8MM_PAD_RESERVE6 = 6,
+ MX8MM_PAD_RESERVE7 = 7,
+ MX8MM_PAD_RESERVE8 = 8,
+ MX8MM_PAD_RESERVE9 = 9,
+ MX8MM_IOMUXC_GPIO1_IO00 = 10,
+ MX8MM_IOMUXC_GPIO1_IO01 = 11,
+ MX8MM_IOMUXC_GPIO1_IO02 = 12,
+ MX8MM_IOMUXC_GPIO1_IO03 = 13,
+ MX8MM_IOMUXC_GPIO1_IO04 = 14,
+ MX8MM_IOMUXC_GPIO1_IO05 = 15,
+ MX8MM_IOMUXC_GPIO1_IO06 = 16,
+ MX8MM_IOMUXC_GPIO1_IO07 = 17,
+ MX8MM_IOMUXC_GPIO1_IO08 = 18,
+ MX8MM_IOMUXC_GPIO1_IO09 = 19,
+ MX8MM_IOMUXC_GPIO1_IO10 = 20,
+ MX8MM_IOMUXC_GPIO1_IO11 = 21,
+ MX8MM_IOMUXC_GPIO1_IO12 = 22,
+ MX8MM_IOMUXC_GPIO1_IO13 = 23,
+ MX8MM_IOMUXC_GPIO1_IO14 = 24,
+ MX8MM_IOMUXC_GPIO1_IO15 = 25,
+ MX8MM_IOMUXC_ENET_MDC = 26,
+ MX8MM_IOMUXC_ENET_MDIO = 27,
+ MX8MM_IOMUXC_ENET_TD3 = 28,
+ MX8MM_IOMUXC_ENET_TD2 = 29,
+ MX8MM_IOMUXC_ENET_TD1 = 30,
+ MX8MM_IOMUXC_ENET_TD0 = 31,
+ MX8MM_IOMUXC_ENET_TX_CTL = 32,
+ MX8MM_IOMUXC_ENET_TXC = 33,
+ MX8MM_IOMUXC_ENET_RX_CTL = 34,
+ MX8MM_IOMUXC_ENET_RXC = 35,
+ MX8MM_IOMUXC_ENET_RD0 = 36,
+ MX8MM_IOMUXC_ENET_RD1 = 37,
+ MX8MM_IOMUXC_ENET_RD2 = 38,
+ MX8MM_IOMUXC_ENET_RD3 = 39,
+ MX8MM_IOMUXC_SD1_CLK = 40,
+ MX8MM_IOMUXC_SD1_CMD = 41,
+ MX8MM_IOMUXC_SD1_DATA0 = 42,
+ MX8MM_IOMUXC_SD1_DATA1 = 43,
+ MX8MM_IOMUXC_SD1_DATA2 = 44,
+ MX8MM_IOMUXC_SD1_DATA3 = 45,
+ MX8MM_IOMUXC_SD1_DATA4 = 46,
+ MX8MM_IOMUXC_SD1_DATA5 = 47,
+ MX8MM_IOMUXC_SD1_DATA6 = 48,
+ MX8MM_IOMUXC_SD1_DATA7 = 49,
+ MX8MM_IOMUXC_SD1_RESET_B = 50,
+ MX8MM_IOMUXC_SD1_STROBE = 51,
+ MX8MM_IOMUXC_SD2_CD_B = 52,
+ MX8MM_IOMUXC_SD2_CLK = 53,
+ MX8MM_IOMUXC_SD2_CMD = 54,
+ MX8MM_IOMUXC_SD2_DATA0 = 55,
+ MX8MM_IOMUXC_SD2_DATA1 = 56,
+ MX8MM_IOMUXC_SD2_DATA2 = 57,
+ MX8MM_IOMUXC_SD2_DATA3 = 58,
+ MX8MM_IOMUXC_SD2_RESET_B = 59,
+ MX8MM_IOMUXC_SD2_WP = 60,
+ MX8MM_IOMUXC_NAND_ALE = 61,
+ MX8MM_IOMUXC_NAND_CE0 = 62,
+ MX8MM_IOMUXC_NAND_CE1 = 63,
+ MX8MM_IOMUXC_NAND_CE2 = 64,
+ MX8MM_IOMUXC_NAND_CE3 = 65,
+ MX8MM_IOMUXC_NAND_CLE = 66,
+ MX8MM_IOMUXC_NAND_DATA00 = 67,
+ MX8MM_IOMUXC_NAND_DATA01 = 68,
+ MX8MM_IOMUXC_NAND_DATA02 = 69,
+ MX8MM_IOMUXC_NAND_DATA03 = 70,
+ MX8MM_IOMUXC_NAND_DATA04 = 71,
+ MX8MM_IOMUXC_NAND_DATA05 = 72,
+ MX8MM_IOMUXC_NAND_DATA06 = 73,
+ MX8MM_IOMUXC_NAND_DATA07 = 74,
+ MX8MM_IOMUXC_NAND_DQS = 75,
+ MX8MM_IOMUXC_NAND_RE_B = 76,
+ MX8MM_IOMUXC_NAND_READY_B = 77,
+ MX8MM_IOMUXC_NAND_WE_B = 78,
+ MX8MM_IOMUXC_NAND_WP_B = 79,
+ MX8MM_IOMUXC_SAI5_RXFS = 80,
+ MX8MM_IOMUXC_SAI5_RXC = 81,
+ MX8MM_IOMUXC_SAI5_RXD0 = 82,
+ MX8MM_IOMUXC_SAI5_RXD1 = 83,
+ MX8MM_IOMUXC_SAI5_RXD2 = 84,
+ MX8MM_IOMUXC_SAI5_RXD3 = 85,
+ MX8MM_IOMUXC_SAI5_MCLK = 86,
+ MX8MM_IOMUXC_SAI1_RXFS = 87,
+ MX8MM_IOMUXC_SAI1_RXC = 88,
+ MX8MM_IOMUXC_SAI1_RXD0 = 89,
+ MX8MM_IOMUXC_SAI1_RXD1 = 90,
+ MX8MM_IOMUXC_SAI1_RXD2 = 91,
+ MX8MM_IOMUXC_SAI1_RXD3 = 92,
+ MX8MM_IOMUXC_SAI1_RXD4 = 93,
+ MX8MM_IOMUXC_SAI1_RXD5 = 94,
+ MX8MM_IOMUXC_SAI1_RXD6 = 95,
+ MX8MM_IOMUXC_SAI1_RXD7 = 96,
+ MX8MM_IOMUXC_SAI1_TXFS = 97,
+ MX8MM_IOMUXC_SAI1_TXC = 98,
+ MX8MM_IOMUXC_SAI1_TXD0 = 99,
+ MX8MM_IOMUXC_SAI1_TXD1 = 100,
+ MX8MM_IOMUXC_SAI1_TXD2 = 101,
+ MX8MM_IOMUXC_SAI1_TXD3 = 102,
+ MX8MM_IOMUXC_SAI1_TXD4 = 103,
+ MX8MM_IOMUXC_SAI1_TXD5 = 104,
+ MX8MM_IOMUXC_SAI1_TXD6 = 105,
+ MX8MM_IOMUXC_SAI1_TXD7 = 106,
+ MX8MM_IOMUXC_SAI1_MCLK = 107,
+ MX8MM_IOMUXC_SAI2_RXFS = 108,
+ MX8MM_IOMUXC_SAI2_RXC = 109,
+ MX8MM_IOMUXC_SAI2_RXD0 = 110,
+ MX8MM_IOMUXC_SAI2_TXFS = 111,
+ MX8MM_IOMUXC_SAI2_TXC = 112,
+ MX8MM_IOMUXC_SAI2_TXD0 = 113,
+ MX8MM_IOMUXC_SAI2_MCLK = 114,
+ MX8MM_IOMUXC_SAI3_RXFS = 115,
+ MX8MM_IOMUXC_SAI3_RXC = 116,
+ MX8MM_IOMUXC_SAI3_RXD = 117,
+ MX8MM_IOMUXC_SAI3_TXFS = 118,
+ MX8MM_IOMUXC_SAI3_TXC = 119,
+ MX8MM_IOMUXC_SAI3_TXD = 120,
+ MX8MM_IOMUXC_SAI3_MCLK = 121,
+ MX8MM_IOMUXC_SPDIF_TX = 122,
+ MX8MM_IOMUXC_SPDIF_RX = 123,
+ MX8MM_IOMUXC_SPDIF_EXT_CLK = 124,
+ MX8MM_IOMUXC_ECSPI1_SCLK = 125,
+ MX8MM_IOMUXC_ECSPI1_MOSI = 126,
+ MX8MM_IOMUXC_ECSPI1_MISO = 127,
+ MX8MM_IOMUXC_ECSPI1_SS0 = 128,
+ MX8MM_IOMUXC_ECSPI2_SCLK = 129,
+ MX8MM_IOMUXC_ECSPI2_MOSI = 130,
+ MX8MM_IOMUXC_ECSPI2_MISO = 131,
+ MX8MM_IOMUXC_ECSPI2_SS0 = 132,
+ MX8MM_IOMUXC_I2C1_SCL = 133,
+ MX8MM_IOMUXC_I2C1_SDA = 134,
+ MX8MM_IOMUXC_I2C2_SCL = 135,
+ MX8MM_IOMUXC_I2C2_SDA = 136,
+ MX8MM_IOMUXC_I2C3_SCL = 137,
+ MX8MM_IOMUXC_I2C3_SDA = 138,
+ MX8MM_IOMUXC_I2C4_SCL = 139,
+ MX8MM_IOMUXC_I2C4_SDA = 140,
+ MX8MM_IOMUXC_UART1_RXD = 141,
+ MX8MM_IOMUXC_UART1_TXD = 142,
+ MX8MM_IOMUXC_UART2_RXD = 143,
+ MX8MM_IOMUXC_UART2_TXD = 144,
+ MX8MM_IOMUXC_UART3_RXD = 145,
+ MX8MM_IOMUXC_UART3_TXD = 146,
+ MX8MM_IOMUXC_UART4_RXD = 147,
+ MX8MM_IOMUXC_UART4_TXD = 148,
+};
+
+/* Pad names for the pinmux subsystem */
+static const struct pinctrl_pin_desc imx8mm_pinctrl_pads[] = {
+ IMX_PINCTRL_PIN(MX8MM_PAD_RESERVE0),
+ IMX_PINCTRL_PIN(MX8MM_PAD_RESERVE1),
+ IMX_PINCTRL_PIN(MX8MM_PAD_RESERVE2),
+ IMX_PINCTRL_PIN(MX8MM_PAD_RESERVE3),
+ IMX_PINCTRL_PIN(MX8MM_PAD_RESERVE4),
+ IMX_PINCTRL_PIN(MX8MM_PAD_RESERVE5),
+ IMX_PINCTRL_PIN(MX8MM_PAD_RESERVE6),
+ IMX_PINCTRL_PIN(MX8MM_PAD_RESERVE7),
+ IMX_PINCTRL_PIN(MX8MM_PAD_RESERVE8),
+ IMX_PINCTRL_PIN(MX8MM_PAD_RESERVE9),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_GPIO1_IO00),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_GPIO1_IO01),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_GPIO1_IO02),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_GPIO1_IO03),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_GPIO1_IO04),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_GPIO1_IO05),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_GPIO1_IO06),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_GPIO1_IO07),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_GPIO1_IO08),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_GPIO1_IO09),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_GPIO1_IO10),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_GPIO1_IO11),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_GPIO1_IO12),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_GPIO1_IO13),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_GPIO1_IO14),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_GPIO1_IO15),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_ENET_MDC),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_ENET_MDIO),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_ENET_TD3),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_ENET_TD2),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_ENET_TD1),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_ENET_TD0),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_ENET_TX_CTL),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_ENET_TXC),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_ENET_RX_CTL),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_ENET_RXC),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_ENET_RD0),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_ENET_RD1),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_ENET_RD2),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_ENET_RD3),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD1_CLK),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD1_CMD),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD1_DATA0),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD1_DATA1),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD1_DATA2),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD1_DATA3),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD1_DATA4),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD1_DATA5),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD1_DATA6),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD1_DATA7),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD1_RESET_B),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD1_STROBE),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD2_CD_B),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD2_CLK),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD2_CMD),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD2_DATA0),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD2_DATA1),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD2_DATA2),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD2_DATA3),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD2_RESET_B),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD2_WP),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_ALE),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_CE0),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_CE1),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_CE2),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_CE3),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_CLE),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_DATA00),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_DATA01),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_DATA02),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_DATA03),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_DATA04),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_DATA05),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_DATA06),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_DATA07),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_DQS),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_RE_B),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_READY_B),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_WE_B),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_WP_B),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI5_RXFS),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI5_RXC),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI5_RXD0),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI5_RXD1),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI5_RXD2),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI5_RXD3),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI5_MCLK),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_RXFS),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_RXC),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_RXD0),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_RXD1),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_RXD2),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_RXD3),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_RXD4),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_RXD5),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_RXD6),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_RXD7),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_TXFS),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_TXC),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_TXD0),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_TXD1),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_TXD2),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_TXD3),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_TXD4),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_TXD5),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_TXD6),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_TXD7),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_MCLK),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI2_RXFS),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI2_RXC),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI2_RXD0),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI2_TXFS),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI2_TXC),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI2_TXD0),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI2_MCLK),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI3_RXFS),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI3_RXC),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI3_RXD),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI3_TXFS),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI3_TXC),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI3_TXD),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI3_MCLK),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SPDIF_TX),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SPDIF_RX),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_SPDIF_EXT_CLK),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_ECSPI1_SCLK),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_ECSPI1_MOSI),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_ECSPI1_MISO),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_ECSPI1_SS0),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_ECSPI2_SCLK),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_ECSPI2_MOSI),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_ECSPI2_MISO),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_ECSPI2_SS0),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_I2C1_SCL),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_I2C1_SDA),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_I2C2_SCL),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_I2C2_SDA),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_I2C3_SCL),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_I2C3_SDA),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_I2C4_SCL),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_I2C4_SDA),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_UART1_RXD),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_UART1_TXD),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_UART2_RXD),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_UART2_TXD),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_UART3_RXD),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_UART3_TXD),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_UART4_RXD),
+ IMX_PINCTRL_PIN(MX8MM_IOMUXC_UART4_TXD),
+};
+
+static const struct imx_pinctrl_soc_info imx8mm_pinctrl_info = {
+ .pins = imx8mm_pinctrl_pads,
+ .npins = ARRAY_SIZE(imx8mm_pinctrl_pads),
+ .gpr_compatible = "fsl,imx8mm-iomuxc-gpr",
+};
+
+static const struct of_device_id imx8mm_pinctrl_of_match[] = {
+ { .compatible = "fsl,imx8mm-iomuxc", .data = &imx8mm_pinctrl_info, },
+ { /* sentinel */ }
+};
+
+static int imx8mm_pinctrl_probe(struct platform_device *pdev)
+{
+ return imx_pinctrl_probe(pdev, &imx8mm_pinctrl_info);
+}
+
+static struct platform_driver imx8mm_pinctrl_driver = {
+ .driver = {
+ .name = "imx8mm-pinctrl",
+ .of_match_table = of_match_ptr(imx8mm_pinctrl_of_match),
+ .suppress_bind_attrs = true,
+ },
+ .probe = imx8mm_pinctrl_probe,
+};
+
+static int __init imx8mm_pinctrl_init(void)
+{
+ return platform_driver_register(&imx8mm_pinctrl_driver);
+}
+arch_initcall(imx8mm_pinctrl_init);
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8qm.c b/drivers/pinctrl/freescale/pinctrl-imx8qm.c
new file mode 100644
index 000000000000..0b6029b29731
--- /dev/null
+++ b/drivers/pinctrl/freescale/pinctrl-imx8qm.c
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017~2018 NXP
+ * Dong Aisheng <aisheng.dong@nxp.com>
+ */
+
+#include <dt-bindings/pinctrl/pads-imx8qm.h>
+#include <linux/err.h>
+#include <linux/firmware/imx/sci.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-imx.h"
+
+static const struct pinctrl_pin_desc imx8qm_pinctrl_pads[] = {
+ IMX_PINCTRL_PIN(IMX8QM_SIM0_CLK),
+ IMX_PINCTRL_PIN(IMX8QM_SIM0_RST),
+ IMX_PINCTRL_PIN(IMX8QM_SIM0_IO),
+ IMX_PINCTRL_PIN(IMX8QM_SIM0_PD),
+ IMX_PINCTRL_PIN(IMX8QM_SIM0_POWER_EN),
+ IMX_PINCTRL_PIN(IMX8QM_SIM0_GPIO0_00),
+ IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_SIM),
+ IMX_PINCTRL_PIN(IMX8QM_M40_I2C0_SCL),
+ IMX_PINCTRL_PIN(IMX8QM_M40_I2C0_SDA),
+ IMX_PINCTRL_PIN(IMX8QM_M40_GPIO0_00),
+ IMX_PINCTRL_PIN(IMX8QM_M40_GPIO0_01),
+ IMX_PINCTRL_PIN(IMX8QM_M41_I2C0_SCL),
+ IMX_PINCTRL_PIN(IMX8QM_M41_I2C0_SDA),
+ IMX_PINCTRL_PIN(IMX8QM_M41_GPIO0_00),
+ IMX_PINCTRL_PIN(IMX8QM_M41_GPIO0_01),
+ IMX_PINCTRL_PIN(IMX8QM_GPT0_CLK),
+ IMX_PINCTRL_PIN(IMX8QM_GPT0_CAPTURE),
+ IMX_PINCTRL_PIN(IMX8QM_GPT0_COMPARE),
+ IMX_PINCTRL_PIN(IMX8QM_GPT1_CLK),
+ IMX_PINCTRL_PIN(IMX8QM_GPT1_CAPTURE),
+ IMX_PINCTRL_PIN(IMX8QM_GPT1_COMPARE),
+ IMX_PINCTRL_PIN(IMX8QM_UART0_RX),
+ IMX_PINCTRL_PIN(IMX8QM_UART0_TX),
+ IMX_PINCTRL_PIN(IMX8QM_UART0_RTS_B),
+ IMX_PINCTRL_PIN(IMX8QM_UART0_CTS_B),
+ IMX_PINCTRL_PIN(IMX8QM_UART1_TX),
+ IMX_PINCTRL_PIN(IMX8QM_UART1_RX),
+ IMX_PINCTRL_PIN(IMX8QM_UART1_RTS_B),
+ IMX_PINCTRL_PIN(IMX8QM_UART1_CTS_B),
+ IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIOLH),
+ IMX_PINCTRL_PIN(IMX8QM_SCU_PMIC_MEMC_ON),
+ IMX_PINCTRL_PIN(IMX8QM_SCU_WDOG_OUT),
+ IMX_PINCTRL_PIN(IMX8QM_PMIC_I2C_SDA),
+ IMX_PINCTRL_PIN(IMX8QM_PMIC_I2C_SCL),
+ IMX_PINCTRL_PIN(IMX8QM_PMIC_EARLY_WARNING),
+ IMX_PINCTRL_PIN(IMX8QM_PMIC_INT_B),
+ IMX_PINCTRL_PIN(IMX8QM_SCU_GPIO0_00),
+ IMX_PINCTRL_PIN(IMX8QM_SCU_GPIO0_01),
+ IMX_PINCTRL_PIN(IMX8QM_SCU_GPIO0_02),
+ IMX_PINCTRL_PIN(IMX8QM_SCU_GPIO0_03),
+ IMX_PINCTRL_PIN(IMX8QM_SCU_GPIO0_04),
+ IMX_PINCTRL_PIN(IMX8QM_SCU_GPIO0_05),
+ IMX_PINCTRL_PIN(IMX8QM_SCU_GPIO0_06),
+ IMX_PINCTRL_PIN(IMX8QM_SCU_GPIO0_07),
+ IMX_PINCTRL_PIN(IMX8QM_SCU_BOOT_MODE0),
+ IMX_PINCTRL_PIN(IMX8QM_SCU_BOOT_MODE1),
+ IMX_PINCTRL_PIN(IMX8QM_SCU_BOOT_MODE2),
+ IMX_PINCTRL_PIN(IMX8QM_SCU_BOOT_MODE3),
+ IMX_PINCTRL_PIN(IMX8QM_SCU_BOOT_MODE4),
+ IMX_PINCTRL_PIN(IMX8QM_SCU_BOOT_MODE5),
+ IMX_PINCTRL_PIN(IMX8QM_LVDS0_GPIO00),
+ IMX_PINCTRL_PIN(IMX8QM_LVDS0_GPIO01),
+ IMX_PINCTRL_PIN(IMX8QM_LVDS0_I2C0_SCL),
+ IMX_PINCTRL_PIN(IMX8QM_LVDS0_I2C0_SDA),
+ IMX_PINCTRL_PIN(IMX8QM_LVDS0_I2C1_SCL),
+ IMX_PINCTRL_PIN(IMX8QM_LVDS0_I2C1_SDA),
+ IMX_PINCTRL_PIN(IMX8QM_LVDS1_GPIO00),
+ IMX_PINCTRL_PIN(IMX8QM_LVDS1_GPIO01),
+ IMX_PINCTRL_PIN(IMX8QM_LVDS1_I2C0_SCL),
+ IMX_PINCTRL_PIN(IMX8QM_LVDS1_I2C0_SDA),
+ IMX_PINCTRL_PIN(IMX8QM_LVDS1_I2C1_SCL),
+ IMX_PINCTRL_PIN(IMX8QM_LVDS1_I2C1_SDA),
+ IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_LVDSGPIO),
+ IMX_PINCTRL_PIN(IMX8QM_MIPI_DSI0_I2C0_SCL),
+ IMX_PINCTRL_PIN(IMX8QM_MIPI_DSI0_I2C0_SDA),
+ IMX_PINCTRL_PIN(IMX8QM_MIPI_DSI0_GPIO0_00),
+ IMX_PINCTRL_PIN(IMX8QM_MIPI_DSI0_GPIO0_01),
+ IMX_PINCTRL_PIN(IMX8QM_MIPI_DSI1_I2C0_SCL),
+ IMX_PINCTRL_PIN(IMX8QM_MIPI_DSI1_I2C0_SDA),
+ IMX_PINCTRL_PIN(IMX8QM_MIPI_DSI1_GPIO0_00),
+ IMX_PINCTRL_PIN(IMX8QM_MIPI_DSI1_GPIO0_01),
+ IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_MIPIDSIGPIO),
+ IMX_PINCTRL_PIN(IMX8QM_MIPI_CSI0_MCLK_OUT),
+ IMX_PINCTRL_PIN(IMX8QM_MIPI_CSI0_I2C0_SCL),
+ IMX_PINCTRL_PIN(IMX8QM_MIPI_CSI0_I2C0_SDA),
+ IMX_PINCTRL_PIN(IMX8QM_MIPI_CSI0_GPIO0_00),
+ IMX_PINCTRL_PIN(IMX8QM_MIPI_CSI0_GPIO0_01),
+ IMX_PINCTRL_PIN(IMX8QM_MIPI_CSI1_MCLK_OUT),
+ IMX_PINCTRL_PIN(IMX8QM_MIPI_CSI1_GPIO0_00),
+ IMX_PINCTRL_PIN(IMX8QM_MIPI_CSI1_GPIO0_01),
+ IMX_PINCTRL_PIN(IMX8QM_MIPI_CSI1_I2C0_SCL),
+ IMX_PINCTRL_PIN(IMX8QM_MIPI_CSI1_I2C0_SDA),
+ IMX_PINCTRL_PIN(IMX8QM_HDMI_TX0_TS_SCL),
+ IMX_PINCTRL_PIN(IMX8QM_HDMI_TX0_TS_SDA),
+ IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_3V3_HDMIGPIO),
+ IMX_PINCTRL_PIN(IMX8QM_ESAI1_FSR),
+ IMX_PINCTRL_PIN(IMX8QM_ESAI1_FST),
+ IMX_PINCTRL_PIN(IMX8QM_ESAI1_SCKR),
+ IMX_PINCTRL_PIN(IMX8QM_ESAI1_SCKT),
+ IMX_PINCTRL_PIN(IMX8QM_ESAI1_TX0),
+ IMX_PINCTRL_PIN(IMX8QM_ESAI1_TX1),
+ IMX_PINCTRL_PIN(IMX8QM_ESAI1_TX2_RX3),
+ IMX_PINCTRL_PIN(IMX8QM_ESAI1_TX3_RX2),
+ IMX_PINCTRL_PIN(IMX8QM_ESAI1_TX4_RX1),
+ IMX_PINCTRL_PIN(IMX8QM_ESAI1_TX5_RX0),
+ IMX_PINCTRL_PIN(IMX8QM_SPDIF0_RX),
+ IMX_PINCTRL_PIN(IMX8QM_SPDIF0_TX),
+ IMX_PINCTRL_PIN(IMX8QM_SPDIF0_EXT_CLK),
+ IMX_PINCTRL_PIN(IMX8QM_SPI3_SCK),
+ IMX_PINCTRL_PIN(IMX8QM_SPI3_SDO),
+ IMX_PINCTRL_PIN(IMX8QM_SPI3_SDI),
+ IMX_PINCTRL_PIN(IMX8QM_SPI3_CS0),
+ IMX_PINCTRL_PIN(IMX8QM_SPI3_CS1),
+ IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIORHB),
+ IMX_PINCTRL_PIN(IMX8QM_ESAI0_FSR),
+ IMX_PINCTRL_PIN(IMX8QM_ESAI0_FST),
+ IMX_PINCTRL_PIN(IMX8QM_ESAI0_SCKR),
+ IMX_PINCTRL_PIN(IMX8QM_ESAI0_SCKT),
+ IMX_PINCTRL_PIN(IMX8QM_ESAI0_TX0),
+ IMX_PINCTRL_PIN(IMX8QM_ESAI0_TX1),
+ IMX_PINCTRL_PIN(IMX8QM_ESAI0_TX2_RX3),
+ IMX_PINCTRL_PIN(IMX8QM_ESAI0_TX3_RX2),
+ IMX_PINCTRL_PIN(IMX8QM_ESAI0_TX4_RX1),
+ IMX_PINCTRL_PIN(IMX8QM_ESAI0_TX5_RX0),
+ IMX_PINCTRL_PIN(IMX8QM_MCLK_IN0),
+ IMX_PINCTRL_PIN(IMX8QM_MCLK_OUT0),
+ IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIORHC),
+ IMX_PINCTRL_PIN(IMX8QM_SPI0_SCK),
+ IMX_PINCTRL_PIN(IMX8QM_SPI0_SDO),
+ IMX_PINCTRL_PIN(IMX8QM_SPI0_SDI),
+ IMX_PINCTRL_PIN(IMX8QM_SPI0_CS0),
+ IMX_PINCTRL_PIN(IMX8QM_SPI0_CS1),
+ IMX_PINCTRL_PIN(IMX8QM_SPI2_SCK),
+ IMX_PINCTRL_PIN(IMX8QM_SPI2_SDO),
+ IMX_PINCTRL_PIN(IMX8QM_SPI2_SDI),
+ IMX_PINCTRL_PIN(IMX8QM_SPI2_CS0),
+ IMX_PINCTRL_PIN(IMX8QM_SPI2_CS1),
+ IMX_PINCTRL_PIN(IMX8QM_SAI1_RXC),
+ IMX_PINCTRL_PIN(IMX8QM_SAI1_RXD),
+ IMX_PINCTRL_PIN(IMX8QM_SAI1_RXFS),
+ IMX_PINCTRL_PIN(IMX8QM_SAI1_TXC),
+ IMX_PINCTRL_PIN(IMX8QM_SAI1_TXD),
+ IMX_PINCTRL_PIN(IMX8QM_SAI1_TXFS),
+ IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIORHT),
+ IMX_PINCTRL_PIN(IMX8QM_ADC_IN7),
+ IMX_PINCTRL_PIN(IMX8QM_ADC_IN6),
+ IMX_PINCTRL_PIN(IMX8QM_ADC_IN5),
+ IMX_PINCTRL_PIN(IMX8QM_ADC_IN4),
+ IMX_PINCTRL_PIN(IMX8QM_ADC_IN3),
+ IMX_PINCTRL_PIN(IMX8QM_ADC_IN2),
+ IMX_PINCTRL_PIN(IMX8QM_ADC_IN1),
+ IMX_PINCTRL_PIN(IMX8QM_ADC_IN0),
+ IMX_PINCTRL_PIN(IMX8QM_MLB_SIG),
+ IMX_PINCTRL_PIN(IMX8QM_MLB_CLK),
+ IMX_PINCTRL_PIN(IMX8QM_MLB_DATA),
+ IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIOLHT),
+ IMX_PINCTRL_PIN(IMX8QM_FLEXCAN0_RX),
+ IMX_PINCTRL_PIN(IMX8QM_FLEXCAN0_TX),
+ IMX_PINCTRL_PIN(IMX8QM_FLEXCAN1_RX),
+ IMX_PINCTRL_PIN(IMX8QM_FLEXCAN1_TX),
+ IMX_PINCTRL_PIN(IMX8QM_FLEXCAN2_RX),
+ IMX_PINCTRL_PIN(IMX8QM_FLEXCAN2_TX),
+ IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIOTHR),
+ IMX_PINCTRL_PIN(IMX8QM_USB_SS3_TC0),
+ IMX_PINCTRL_PIN(IMX8QM_USB_SS3_TC1),
+ IMX_PINCTRL_PIN(IMX8QM_USB_SS3_TC2),
+ IMX_PINCTRL_PIN(IMX8QM_USB_SS3_TC3),
+ IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_3V3_USB3IO),
+ IMX_PINCTRL_PIN(IMX8QM_USDHC1_RESET_B),
+ IMX_PINCTRL_PIN(IMX8QM_USDHC1_VSELECT),
+ IMX_PINCTRL_PIN(IMX8QM_USDHC2_RESET_B),
+ IMX_PINCTRL_PIN(IMX8QM_USDHC2_VSELECT),
+ IMX_PINCTRL_PIN(IMX8QM_USDHC2_WP),
+ IMX_PINCTRL_PIN(IMX8QM_USDHC2_CD_B),
+ IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_VSELSEP),
+ IMX_PINCTRL_PIN(IMX8QM_ENET0_MDIO),
+ IMX_PINCTRL_PIN(IMX8QM_ENET0_MDC),
+ IMX_PINCTRL_PIN(IMX8QM_ENET0_REFCLK_125M_25M),
+ IMX_PINCTRL_PIN(IMX8QM_ENET1_REFCLK_125M_25M),
+ IMX_PINCTRL_PIN(IMX8QM_ENET1_MDIO),
+ IMX_PINCTRL_PIN(IMX8QM_ENET1_MDC),
+ IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIOCT),
+ IMX_PINCTRL_PIN(IMX8QM_QSPI1A_SS0_B),
+ IMX_PINCTRL_PIN(IMX8QM_QSPI1A_SS1_B),
+ IMX_PINCTRL_PIN(IMX8QM_QSPI1A_SCLK),
+ IMX_PINCTRL_PIN(IMX8QM_QSPI1A_DQS),
+ IMX_PINCTRL_PIN(IMX8QM_QSPI1A_DATA3),
+ IMX_PINCTRL_PIN(IMX8QM_QSPI1A_DATA2),
+ IMX_PINCTRL_PIN(IMX8QM_QSPI1A_DATA1),
+ IMX_PINCTRL_PIN(IMX8QM_QSPI1A_DATA0),
+ IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_QSPI1),
+ IMX_PINCTRL_PIN(IMX8QM_QSPI0A_DATA0),
+ IMX_PINCTRL_PIN(IMX8QM_QSPI0A_DATA1),
+ IMX_PINCTRL_PIN(IMX8QM_QSPI0A_DATA2),
+ IMX_PINCTRL_PIN(IMX8QM_QSPI0A_DATA3),
+ IMX_PINCTRL_PIN(IMX8QM_QSPI0A_DQS),
+ IMX_PINCTRL_PIN(IMX8QM_QSPI0A_SS0_B),
+ IMX_PINCTRL_PIN(IMX8QM_QSPI0A_SS1_B),
+ IMX_PINCTRL_PIN(IMX8QM_QSPI0A_SCLK),
+ IMX_PINCTRL_PIN(IMX8QM_QSPI0B_SCLK),
+ IMX_PINCTRL_PIN(IMX8QM_QSPI0B_DATA0),
+ IMX_PINCTRL_PIN(IMX8QM_QSPI0B_DATA1),
+ IMX_PINCTRL_PIN(IMX8QM_QSPI0B_DATA2),
+ IMX_PINCTRL_PIN(IMX8QM_QSPI0B_DATA3),
+ IMX_PINCTRL_PIN(IMX8QM_QSPI0B_DQS),
+ IMX_PINCTRL_PIN(IMX8QM_QSPI0B_SS0_B),
+ IMX_PINCTRL_PIN(IMX8QM_QSPI0B_SS1_B),
+ IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_QSPI0),
+ IMX_PINCTRL_PIN(IMX8QM_PCIE_CTRL0_CLKREQ_B),
+ IMX_PINCTRL_PIN(IMX8QM_PCIE_CTRL0_WAKE_B),
+ IMX_PINCTRL_PIN(IMX8QM_PCIE_CTRL0_PERST_B),
+ IMX_PINCTRL_PIN(IMX8QM_PCIE_CTRL1_CLKREQ_B),
+ IMX_PINCTRL_PIN(IMX8QM_PCIE_CTRL1_WAKE_B),
+ IMX_PINCTRL_PIN(IMX8QM_PCIE_CTRL1_PERST_B),
+ IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_PCIESEP),
+ IMX_PINCTRL_PIN(IMX8QM_USB_HSIC0_DATA),
+ IMX_PINCTRL_PIN(IMX8QM_USB_HSIC0_STROBE),
+ IMX_PINCTRL_PIN(IMX8QM_CALIBRATION_0_HSIC),
+ IMX_PINCTRL_PIN(IMX8QM_CALIBRATION_1_HSIC),
+ IMX_PINCTRL_PIN(IMX8QM_EMMC0_CLK),
+ IMX_PINCTRL_PIN(IMX8QM_EMMC0_CMD),
+ IMX_PINCTRL_PIN(IMX8QM_EMMC0_DATA0),
+ IMX_PINCTRL_PIN(IMX8QM_EMMC0_DATA1),
+ IMX_PINCTRL_PIN(IMX8QM_EMMC0_DATA2),
+ IMX_PINCTRL_PIN(IMX8QM_EMMC0_DATA3),
+ IMX_PINCTRL_PIN(IMX8QM_EMMC0_DATA4),
+ IMX_PINCTRL_PIN(IMX8QM_EMMC0_DATA5),
+ IMX_PINCTRL_PIN(IMX8QM_EMMC0_DATA6),
+ IMX_PINCTRL_PIN(IMX8QM_EMMC0_DATA7),
+ IMX_PINCTRL_PIN(IMX8QM_EMMC0_STROBE),
+ IMX_PINCTRL_PIN(IMX8QM_EMMC0_RESET_B),
+ IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_SD1FIX),
+ IMX_PINCTRL_PIN(IMX8QM_USDHC1_CLK),
+ IMX_PINCTRL_PIN(IMX8QM_USDHC1_CMD),
+ IMX_PINCTRL_PIN(IMX8QM_USDHC1_DATA0),
+ IMX_PINCTRL_PIN(IMX8QM_USDHC1_DATA1),
+ IMX_PINCTRL_PIN(IMX8QM_CTL_NAND_RE_P_N),
+ IMX_PINCTRL_PIN(IMX8QM_USDHC1_DATA2),
+ IMX_PINCTRL_PIN(IMX8QM_USDHC1_DATA3),
+ IMX_PINCTRL_PIN(IMX8QM_CTL_NAND_DQS_P_N),
+ IMX_PINCTRL_PIN(IMX8QM_USDHC1_DATA4),
+ IMX_PINCTRL_PIN(IMX8QM_USDHC1_DATA5),
+ IMX_PINCTRL_PIN(IMX8QM_USDHC1_DATA6),
+ IMX_PINCTRL_PIN(IMX8QM_USDHC1_DATA7),
+ IMX_PINCTRL_PIN(IMX8QM_USDHC1_STROBE),
+ IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_VSEL2),
+ IMX_PINCTRL_PIN(IMX8QM_USDHC2_CLK),
+ IMX_PINCTRL_PIN(IMX8QM_USDHC2_CMD),
+ IMX_PINCTRL_PIN(IMX8QM_USDHC2_DATA0),
+ IMX_PINCTRL_PIN(IMX8QM_USDHC2_DATA1),
+ IMX_PINCTRL_PIN(IMX8QM_USDHC2_DATA2),
+ IMX_PINCTRL_PIN(IMX8QM_USDHC2_DATA3),
+ IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_VSEL3),
+ IMX_PINCTRL_PIN(IMX8QM_ENET0_RGMII_TXC),
+ IMX_PINCTRL_PIN(IMX8QM_ENET0_RGMII_TX_CTL),
+ IMX_PINCTRL_PIN(IMX8QM_ENET0_RGMII_TXD0),
+ IMX_PINCTRL_PIN(IMX8QM_ENET0_RGMII_TXD1),
+ IMX_PINCTRL_PIN(IMX8QM_ENET0_RGMII_TXD2),
+ IMX_PINCTRL_PIN(IMX8QM_ENET0_RGMII_TXD3),
+ IMX_PINCTRL_PIN(IMX8QM_ENET0_RGMII_RXC),
+ IMX_PINCTRL_PIN(IMX8QM_ENET0_RGMII_RX_CTL),
+ IMX_PINCTRL_PIN(IMX8QM_ENET0_RGMII_RXD0),
+ IMX_PINCTRL_PIN(IMX8QM_ENET0_RGMII_RXD1),
+ IMX_PINCTRL_PIN(IMX8QM_ENET0_RGMII_RXD2),
+ IMX_PINCTRL_PIN(IMX8QM_ENET0_RGMII_RXD3),
+ IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB),
+ IMX_PINCTRL_PIN(IMX8QM_ENET1_RGMII_TXC),
+ IMX_PINCTRL_PIN(IMX8QM_ENET1_RGMII_TX_CTL),
+ IMX_PINCTRL_PIN(IMX8QM_ENET1_RGMII_TXD0),
+ IMX_PINCTRL_PIN(IMX8QM_ENET1_RGMII_TXD1),
+ IMX_PINCTRL_PIN(IMX8QM_ENET1_RGMII_TXD2),
+ IMX_PINCTRL_PIN(IMX8QM_ENET1_RGMII_TXD3),
+ IMX_PINCTRL_PIN(IMX8QM_ENET1_RGMII_RXC),
+ IMX_PINCTRL_PIN(IMX8QM_ENET1_RGMII_RX_CTL),
+ IMX_PINCTRL_PIN(IMX8QM_ENET1_RGMII_RXD0),
+ IMX_PINCTRL_PIN(IMX8QM_ENET1_RGMII_RXD1),
+ IMX_PINCTRL_PIN(IMX8QM_ENET1_RGMII_RXD2),
+ IMX_PINCTRL_PIN(IMX8QM_ENET1_RGMII_RXD3),
+ IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_ENET_ENETA),
+};
+
+static const struct imx_pinctrl_soc_info imx8qm_pinctrl_info = {
+ .pins = imx8qm_pinctrl_pads,
+ .npins = ARRAY_SIZE(imx8qm_pinctrl_pads),
+ .flags = IMX_USE_SCU,
+};
+
+static const struct of_device_id imx8qm_pinctrl_of_match[] = {
+ { .compatible = "fsl,imx8qm-iomuxc", },
+ { /* sentinel */ }
+};
+
+static int imx8qm_pinctrl_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = imx_pinctrl_sc_ipc_init(pdev);
+ if (ret)
+ return ret;
+
+ return imx_pinctrl_probe(pdev, &imx8qm_pinctrl_info);
+}
+
+static struct platform_driver imx8qm_pinctrl_driver = {
+ .driver = {
+ .name = "imx8qm-pinctrl",
+ .of_match_table = of_match_ptr(imx8qm_pinctrl_of_match),
+ .suppress_bind_attrs = true,
+ },
+ .probe = imx8qm_pinctrl_probe,
+};
+
+static int __init imx8qm_pinctrl_init(void)
+{
+ return platform_driver_register(&imx8qm_pinctrl_driver);
+}
+arch_initcall(imx8qm_pinctrl_init);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
index 4a9e0d4c2bbc..b1c368455d30 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
@@ -290,7 +290,13 @@ static int mtk_xt_set_gpio_as_eint(void *data, unsigned long eint_n)
return err;
err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SMT, MTK_ENABLE);
- if (err)
+ /* SMT is supposed to be supported by every real GPIO and doesn't
+ * support virtual GPIOs, so the extra condition err != -ENOTSUPP
+ * is just for adding EINT support to these virtual GPIOs. It should
+ * add an extra flag in the pin descriptor when more pins with
+ * distinctive characteristic come out.
+ */
+ if (err && err != -ENOTSUPP)
return err;
return 0;
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index ea87d739f534..96a4a72708e4 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -31,6 +31,9 @@
* In some cases the register ranges for pull enable and pull
* direction are the same and thus there are only 3 register ranges.
*
+ * Since Meson G12A SoC, the ao register ranges for gpio, pull enable
+ * and pull direction are the same, so there are only 2 register ranges.
+ *
* For the pull and GPIO configuration every bank uses a contiguous
* set of bits in the register sets described above; the same register
* can be shared by more banks with different offsets.
@@ -488,21 +491,26 @@ static int meson_pinctrl_parse_dt(struct meson_pinctrl *pc,
return PTR_ERR(pc->reg_mux);
}
- pc->reg_pull = meson_map_resource(pc, gpio_np, "pull");
- if (IS_ERR(pc->reg_pull)) {
- dev_err(pc->dev, "pull registers not found\n");
- return PTR_ERR(pc->reg_pull);
+ pc->reg_gpio = meson_map_resource(pc, gpio_np, "gpio");
+ if (IS_ERR(pc->reg_gpio)) {
+ dev_err(pc->dev, "gpio registers not found\n");
+ return PTR_ERR(pc->reg_gpio);
}
+ pc->reg_pull = meson_map_resource(pc, gpio_np, "pull");
+ /* Use gpio region if pull one is not present */
+ if (IS_ERR(pc->reg_pull))
+ pc->reg_pull = pc->reg_gpio;
+
pc->reg_pullen = meson_map_resource(pc, gpio_np, "pull-enable");
/* Use pull region if pull-enable one is not present */
if (IS_ERR(pc->reg_pullen))
pc->reg_pullen = pc->reg_pull;
- pc->reg_gpio = meson_map_resource(pc, gpio_np, "gpio");
- if (IS_ERR(pc->reg_gpio)) {
- dev_err(pc->dev, "gpio registers not found\n");
- return PTR_ERR(pc->reg_gpio);
+ pc->reg_ds = meson_map_resource(pc, gpio_np, "ds");
+ if (IS_ERR(pc->reg_ds)) {
+ dev_dbg(pc->dev, "ds registers not found - skipping\n");
+ pc->reg_ds = NULL;
}
return 0;
diff --git a/drivers/pinctrl/meson/pinctrl-meson.h b/drivers/pinctrl/meson/pinctrl-meson.h
index eff61ea1c67e..5eaab925f427 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.h
+++ b/drivers/pinctrl/meson/pinctrl-meson.h
@@ -120,6 +120,7 @@ struct meson_pinctrl {
struct regmap *reg_pullen;
struct regmap *reg_pull;
struct regmap *reg_gpio;
+ struct regmap *reg_ds;
struct gpio_chip chip;
struct device_node *of_node;
};
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index 0f140a802137..7f76000cc12e 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -346,6 +346,8 @@ static const unsigned int eth_rx_dv_pins[] = { DIF_1_P };
static const unsigned int eth_rx_clk_pins[] = { DIF_1_N };
static const unsigned int eth_txd0_1_pins[] = { DIF_2_P };
static const unsigned int eth_txd1_1_pins[] = { DIF_2_N };
+static const unsigned int eth_rxd3_pins[] = { DIF_2_P };
+static const unsigned int eth_rxd2_pins[] = { DIF_2_N };
static const unsigned int eth_tx_en_pins[] = { DIF_3_P };
static const unsigned int eth_ref_clk_pins[] = { DIF_3_N };
static const unsigned int eth_mdc_pins[] = { DIF_4_P };
@@ -599,6 +601,8 @@ static struct meson_pmx_group meson8b_cbus_groups[] = {
GROUP(eth_ref_clk, 6, 8),
GROUP(eth_mdc, 6, 9),
GROUP(eth_mdio_en, 6, 10),
+ GROUP(eth_rxd3, 7, 22),
+ GROUP(eth_rxd2, 7, 23),
};
static struct meson_pmx_group meson8b_aobus_groups[] = {
@@ -748,7 +752,7 @@ static const char * const ethernet_groups[] = {
"eth_tx_clk", "eth_tx_en", "eth_txd1_0", "eth_txd1_1",
"eth_txd0_0", "eth_txd0_1", "eth_rx_clk", "eth_rx_dv",
"eth_rxd1", "eth_rxd0", "eth_mdio_en", "eth_mdc", "eth_ref_clk",
- "eth_txd2", "eth_txd3"
+ "eth_txd2", "eth_txd3", "eth_rxd3", "eth_rxd2"
};
static const char * const i2c_a_groups[] = {
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index aa48b3f23c7f..6462d3ca7ceb 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -170,8 +170,8 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
PIN_GRP_GPIO("pwm1", 12, 1, BIT(4), "pwm"),
PIN_GRP_GPIO("pwm2", 13, 1, BIT(5), "pwm"),
PIN_GRP_GPIO("pwm3", 14, 1, BIT(6), "pwm"),
- PIN_GRP_GPIO("pmic1", 17, 1, BIT(7), "pmic"),
- PIN_GRP_GPIO("pmic0", 16, 1, BIT(8), "pmic"),
+ PIN_GRP_GPIO("pmic1", 7, 1, BIT(7), "pmic"),
+ PIN_GRP_GPIO("pmic0", 6, 1, BIT(8), "pmic"),
PIN_GRP_GPIO("i2c2", 2, 2, BIT(9), "i2c"),
PIN_GRP_GPIO("i2c1", 0, 2, BIT(10), "i2c"),
PIN_GRP_GPIO("spi_cs1", 17, 1, BIT(12), "spi"),
@@ -195,8 +195,11 @@ static struct armada_37xx_pin_group armada_37xx_sb_groups[] = {
PIN_GRP_GPIO("usb2_drvvbus1", 1, 1, BIT(1), "drvbus"),
PIN_GRP_GPIO("sdio_sb", 24, 6, BIT(2), "sdio"),
PIN_GRP_GPIO("rgmii", 6, 12, BIT(3), "mii"),
- PIN_GRP_GPIO("pcie1", 3, 2, BIT(4), "pcie"),
- PIN_GRP_GPIO("ptp", 20, 3, BIT(5), "ptp"),
+ PIN_GRP_GPIO("smi", 18, 2, BIT(4), "smi"),
+ PIN_GRP_GPIO("pcie1", 3, 1, BIT(5), "pcie"),
+ PIN_GRP_GPIO("pcie1_clkreq", 4, 1, BIT(9), "pcie"),
+ PIN_GRP_GPIO("pcie1_wakeup", 5, 1, BIT(10), "pcie"),
+ PIN_GRP_GPIO("ptp", 20, 3, BIT(11) | BIT(12) | BIT(13), "ptp"),
PIN_GRP("ptp_clk", 21, 1, BIT(6), "ptp", "mii"),
PIN_GRP("ptp_trig", 22, 1, BIT(7), "ptp", "mii"),
PIN_GRP_GPIO_3("mii_col", 23, 1, BIT(8) | BIT(14), 0, BIT(8), BIT(14),
@@ -1104,8 +1107,8 @@ static int armada_3700_pinctrl_resume(struct device *dev)
* to other IO drivers.
*/
static const struct dev_pm_ops armada_3700_pinctrl_pm_ops = {
- .suspend_late = armada_3700_pinctrl_suspend,
- .resume_early = armada_3700_pinctrl_resume,
+ .suspend_noirq = armada_3700_pinctrl_suspend,
+ .resume_noirq = armada_3700_pinctrl_resume,
};
#define PINCTRL_ARMADA_37XX_DEV_PM_OPS (&armada_3700_pinctrl_pm_ops)
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 4cc2c47f8778..ec02739bd21b 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -1056,17 +1056,22 @@ static struct nmk_gpio_chip *nmk_gpio_populate_chip(struct device_node *np,
}
if (of_property_read_u32(np, "gpio-bank", &id)) {
dev_err(&pdev->dev, "populate: gpio-bank property not found\n");
+ platform_device_put(gpio_pdev);
return ERR_PTR(-EINVAL);
}
/* Already populated? */
nmk_chip = nmk_gpio_chips[id];
- if (nmk_chip)
+ if (nmk_chip) {
+ platform_device_put(gpio_pdev);
return nmk_chip;
+ }
nmk_chip = devm_kzalloc(&pdev->dev, sizeof(*nmk_chip), GFP_KERNEL);
- if (!nmk_chip)
+ if (!nmk_chip) {
+ platform_device_put(gpio_pdev);
return ERR_PTR(-ENOMEM);
+ }
nmk_chip->bank = id;
chip = &nmk_chip->chip;
@@ -1077,13 +1082,17 @@ static struct nmk_gpio_chip *nmk_gpio_populate_chip(struct device_node *np,
res = platform_get_resource(gpio_pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base))
+ if (IS_ERR(base)) {
+ platform_device_put(gpio_pdev);
return ERR_CAST(base);
+ }
nmk_chip->addr = base;
clk = clk_get(&gpio_pdev->dev, NULL);
- if (IS_ERR(clk))
+ if (IS_ERR(clk)) {
+ platform_device_put(gpio_pdev);
return (void *) clk;
+ }
clk_prepare(clk);
nmk_chip->clk = clk;
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
index 2c7229380f08..2678603df14b 100644
--- a/drivers/pinctrl/pinconf.c
+++ b/drivers/pinctrl/pinconf.c
@@ -17,7 +17,6 @@
#include <linux/slab.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
-#include <linux/uaccess.h>
#include <linux/pinctrl/machine.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinconf.h>
@@ -369,225 +368,6 @@ static int pinconf_groups_show(struct seq_file *s, void *what)
DEFINE_SHOW_ATTRIBUTE(pinconf_pins);
DEFINE_SHOW_ATTRIBUTE(pinconf_groups);
-#define MAX_NAME_LEN 15
-
-struct dbg_cfg {
- enum pinctrl_map_type map_type;
- char dev_name[MAX_NAME_LEN + 1];
- char state_name[MAX_NAME_LEN + 1];
- char pin_name[MAX_NAME_LEN + 1];
-};
-
-/*
- * Goal is to keep this structure as global in order to simply read the
- * pinconf-config file after a write to check config is as expected
- */
-static struct dbg_cfg pinconf_dbg_conf;
-
-/**
- * pinconf_dbg_config_print() - display the pinctrl config from the pinctrl
- * map, of the dev/pin/state that was last written to pinconf-config file.
- * @s: string filled in with config description
- * @d: not used
- */
-static int pinconf_dbg_config_print(struct seq_file *s, void *d)
-{
- struct pinctrl_maps *maps_node;
- const struct pinctrl_map *map;
- const struct pinctrl_map *found = NULL;
- struct pinctrl_dev *pctldev;
- struct dbg_cfg *dbg = &pinconf_dbg_conf;
- int i;
-
- mutex_lock(&pinctrl_maps_mutex);
-
- /* Parse the pinctrl map and look for the elected pin/state */
- for_each_maps(maps_node, i, map) {
- if (map->type != dbg->map_type)
- continue;
- if (strcmp(map->dev_name, dbg->dev_name))
- continue;
- if (strcmp(map->name, dbg->state_name))
- continue;
-
- if (!strcmp(map->data.configs.group_or_pin, dbg->pin_name)) {
- /* We found the right pin */
- found = map;
- break;
- }
- }
-
- if (!found) {
- seq_printf(s, "No config found for dev/state/pin, expected:\n");
- seq_printf(s, "Searched dev:%s\n", dbg->dev_name);
- seq_printf(s, "Searched state:%s\n", dbg->state_name);
- seq_printf(s, "Searched pin:%s\n", dbg->pin_name);
- seq_printf(s, "Use: modify config_pin <devname> "\
- "<state> <pinname> <value>\n");
- goto exit;
- }
-
- pctldev = get_pinctrl_dev_from_devname(found->ctrl_dev_name);
- seq_printf(s, "Dev %s has config of %s in state %s:\n",
- dbg->dev_name, dbg->pin_name, dbg->state_name);
- pinconf_show_config(s, pctldev, found->data.configs.configs,
- found->data.configs.num_configs);
-
-exit:
- mutex_unlock(&pinctrl_maps_mutex);
-
- return 0;
-}
-
-/**
- * pinconf_dbg_config_write() - modify the pinctrl config in the pinctrl
- * map, of a dev/pin/state entry based on user entries to pinconf-config
- * @user_buf: contains the modification request with expected format:
- * modify <config> <devicename> <state> <name> <newvalue>
- * modify is literal string, alternatives like add/delete not supported yet
- * <config> is the configuration to be changed. Supported configs are
- * "config_pin" or "config_group", alternatives like config_mux are not
- * supported yet.
- * <devicename> <state> <name> are values that should match the pinctrl-maps
- * <newvalue> reflects the new config and is driver dependent
- */
-static ssize_t pinconf_dbg_config_write(struct file *file,
- const char __user *user_buf, size_t count, loff_t *ppos)
-{
- struct pinctrl_maps *maps_node;
- const struct pinctrl_map *map;
- const struct pinctrl_map *found = NULL;
- struct pinctrl_dev *pctldev;
- const struct pinconf_ops *confops = NULL;
- struct dbg_cfg *dbg = &pinconf_dbg_conf;
- const struct pinctrl_map_configs *configs;
- char config[MAX_NAME_LEN + 1];
- char buf[128];
- char *b = &buf[0];
- int buf_size;
- char *token;
- int i;
-
- /* Get userspace string and assure termination */
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- buf[buf_size] = 0;
-
- /*
- * need to parse entry and extract parameters:
- * modify configs_pin devicename state pinname newvalue
- */
-
- /* Get arg: 'modify' */
- token = strsep(&b, " ");
- if (!token)
- return -EINVAL;
- if (strcmp(token, "modify"))
- return -EINVAL;
-
- /*
- * Get arg type: "config_pin" and "config_group"
- * types are supported so far
- */
- token = strsep(&b, " ");
- if (!token)
- return -EINVAL;
- if (!strcmp(token, "config_pin"))
- dbg->map_type = PIN_MAP_TYPE_CONFIGS_PIN;
- else if (!strcmp(token, "config_group"))
- dbg->map_type = PIN_MAP_TYPE_CONFIGS_GROUP;
- else
- return -EINVAL;
-
- /* get arg 'device_name' */
- token = strsep(&b, " ");
- if (!token)
- return -EINVAL;
- if (strlen(token) >= MAX_NAME_LEN)
- return -EINVAL;
- strncpy(dbg->dev_name, token, MAX_NAME_LEN);
-
- /* get arg 'state_name' */
- token = strsep(&b, " ");
- if (!token)
- return -EINVAL;
- if (strlen(token) >= MAX_NAME_LEN)
- return -EINVAL;
- strncpy(dbg->state_name, token, MAX_NAME_LEN);
-
- /* get arg 'pin_name' */
- token = strsep(&b, " ");
- if (!token)
- return -EINVAL;
- if (strlen(token) >= MAX_NAME_LEN)
- return -EINVAL;
- strncpy(dbg->pin_name, token, MAX_NAME_LEN);
-
- /* get new_value of config' */
- token = strsep(&b, " ");
- if (!token)
- return -EINVAL;
- if (strlen(token) >= MAX_NAME_LEN)
- return -EINVAL;
- strncpy(config, token, MAX_NAME_LEN);
-
- mutex_lock(&pinctrl_maps_mutex);
-
- /* Parse the pinctrl map and look for the selected dev/state/pin */
- for_each_maps(maps_node, i, map) {
- if (strcmp(map->dev_name, dbg->dev_name))
- continue;
- if (map->type != dbg->map_type)
- continue;
- if (strcmp(map->name, dbg->state_name))
- continue;
-
- /* we found the right pin / state, so overwrite config */
- if (!strcmp(map->data.configs.group_or_pin, dbg->pin_name)) {
- found = map;
- break;
- }
- }
-
- if (!found) {
- count = -EINVAL;
- goto exit;
- }
-
- pctldev = get_pinctrl_dev_from_devname(found->ctrl_dev_name);
- if (pctldev)
- confops = pctldev->desc->confops;
-
- if (confops && confops->pin_config_dbg_parse_modify) {
- configs = &found->data.configs;
- for (i = 0; i < configs->num_configs; i++) {
- confops->pin_config_dbg_parse_modify(pctldev,
- config,
- &configs->configs[i]);
- }
- }
-
-exit:
- mutex_unlock(&pinctrl_maps_mutex);
-
- return count;
-}
-
-static int pinconf_dbg_config_open(struct inode *inode, struct file *file)
-{
- return single_open(file, pinconf_dbg_config_print, inode->i_private);
-}
-
-static const struct file_operations pinconf_dbg_pinconfig_fops = {
- .open = pinconf_dbg_config_open,
- .write = pinconf_dbg_config_write,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
void pinconf_init_device_debugfs(struct dentry *devroot,
struct pinctrl_dev *pctldev)
{
@@ -595,8 +375,6 @@ void pinconf_init_device_debugfs(struct dentry *devroot,
devroot, pctldev, &pinconf_pins_fops);
debugfs_create_file("pinconf-groups", S_IFREG | S_IRUGO,
devroot, pctldev, &pinconf_groups_fops);
- debugfs_create_file("pinconf-config", (S_IRUGO | S_IWUSR | S_IWGRP),
- devroot, pctldev, &pinconf_dbg_pinconfig_fops);
}
#endif
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 2a7d638978d8..6689995fa3aa 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -489,7 +489,7 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
/*
* If WAKE_INT_MASTER_REG.MaskStsEn is set, a software write to the
* debounce registers of any GPIO will block wake/interrupt status
- * generation for *all* GPIOs for a lenght of time that depends on
+ * generation for *all* GPIOs for a length of time that depends on
* WAKE_INT_MASTER_REG.MaskStsLength[11:0]. During this period the
* INTERRUPT_ENABLE bit will read as 0.
*
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 3d49bbbcdbc7..cb7c432769a5 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -59,6 +59,9 @@ static int gpio_banks;
#define OUTPUT (1 << 7)
#define OUTPUT_VAL_SHIFT 8
#define OUTPUT_VAL (0x1 << OUTPUT_VAL_SHIFT)
+#define SLEWRATE_SHIFT 9
+#define SLEWRATE_MASK 0x1
+#define SLEWRATE (SLEWRATE_MASK << SLEWRATE_SHIFT)
#define DEBOUNCE (1 << 16)
#define DEBOUNCE_VAL_SHIFT 17
#define DEBOUNCE_VAL (0x3fff << DEBOUNCE_VAL_SHIFT)
@@ -72,10 +75,22 @@ static int gpio_banks;
* DRIVE_STRENGTH_DEFAULT is just a placeholder to avoid changing the drive
* strength when there is no dt config for it.
*/
-#define DRIVE_STRENGTH_DEFAULT (0 << DRIVE_STRENGTH_SHIFT)
-#define DRIVE_STRENGTH_LOW (1 << DRIVE_STRENGTH_SHIFT)
-#define DRIVE_STRENGTH_MED (2 << DRIVE_STRENGTH_SHIFT)
-#define DRIVE_STRENGTH_HI (3 << DRIVE_STRENGTH_SHIFT)
+enum drive_strength_bit {
+ DRIVE_STRENGTH_BIT_DEF,
+ DRIVE_STRENGTH_BIT_LOW,
+ DRIVE_STRENGTH_BIT_MED,
+ DRIVE_STRENGTH_BIT_HI,
+};
+
+#define DRIVE_STRENGTH_BIT_MSK(name) (DRIVE_STRENGTH_BIT_##name << \
+ DRIVE_STRENGTH_SHIFT)
+
+enum slewrate_bit {
+ SLEWRATE_BIT_DIS,
+ SLEWRATE_BIT_ENA,
+};
+
+#define SLEWRATE_BIT_MSK(name) (SLEWRATE_BIT_##name << SLEWRATE_SHIFT)
/**
* struct at91_pmx_func - describes AT91 pinmux functions
@@ -166,6 +181,8 @@ struct at91_pinctrl_mux_ops {
unsigned (*get_drivestrength)(void __iomem *pio, unsigned pin);
void (*set_drivestrength)(void __iomem *pio, unsigned pin,
u32 strength);
+ unsigned (*get_slewrate)(void __iomem *pio, unsigned pin);
+ void (*set_slewrate)(void __iomem *pio, unsigned pin, u32 slewrate);
/* irq */
int (*irq_type)(struct irq_data *d, unsigned type);
};
@@ -551,7 +568,7 @@ static unsigned at91_mux_sama5d3_get_drivestrength(void __iomem *pio,
/* SAMA5 strength is 1:1 with our defines,
* except 0 is equivalent to low per datasheet */
if (!tmp)
- tmp = DRIVE_STRENGTH_LOW;
+ tmp = DRIVE_STRENGTH_BIT_MSK(LOW);
return tmp;
}
@@ -564,11 +581,32 @@ static unsigned at91_mux_sam9x5_get_drivestrength(void __iomem *pio,
/* strength is inverse in SAM9x5s hardware with the pinctrl defines
* hardware: 0 = hi, 1 = med, 2 = low, 3 = rsvd */
- tmp = DRIVE_STRENGTH_HI - tmp;
+ tmp = DRIVE_STRENGTH_BIT_MSK(HI) - tmp;
return tmp;
}
+static unsigned at91_mux_sam9x60_get_drivestrength(void __iomem *pio,
+ unsigned pin)
+{
+ unsigned tmp = readl_relaxed(pio + SAM9X60_PIO_DRIVER1);
+
+ if (tmp & BIT(pin))
+ return DRIVE_STRENGTH_BIT_HI;
+
+ return DRIVE_STRENGTH_BIT_LOW;
+}
+
+static unsigned at91_mux_sam9x60_get_slewrate(void __iomem *pio, unsigned pin)
+{
+ unsigned tmp = readl_relaxed(pio + SAM9X60_PIO_SLEWR);
+
+ if ((tmp & BIT(pin)))
+ return SLEWRATE_BIT_ENA;
+
+ return SLEWRATE_BIT_DIS;
+}
+
static void set_drive_strength(void __iomem *reg, unsigned pin, u32 strength)
{
unsigned tmp = readl_relaxed(reg);
@@ -600,12 +638,51 @@ static void at91_mux_sam9x5_set_drivestrength(void __iomem *pio, unsigned pin,
/* strength is inverse on SAM9x5s with our defines
* 0 = hi, 1 = med, 2 = low, 3 = rsvd */
- setting = DRIVE_STRENGTH_HI - setting;
+ setting = DRIVE_STRENGTH_BIT_MSK(HI) - setting;
set_drive_strength(pio + at91sam9x5_get_drive_register(pin), pin,
setting);
}
+static void at91_mux_sam9x60_set_drivestrength(void __iomem *pio, unsigned pin,
+ u32 setting)
+{
+ unsigned int tmp;
+
+ if (setting <= DRIVE_STRENGTH_BIT_DEF ||
+ setting == DRIVE_STRENGTH_BIT_MED ||
+ setting > DRIVE_STRENGTH_BIT_HI)
+ return;
+
+ tmp = readl_relaxed(pio + SAM9X60_PIO_DRIVER1);
+
+ /* Strength is 0: low, 1: hi */
+ if (setting == DRIVE_STRENGTH_BIT_LOW)
+ tmp &= ~BIT(pin);
+ else
+ tmp |= BIT(pin);
+
+ writel_relaxed(tmp, pio + SAM9X60_PIO_DRIVER1);
+}
+
+static void at91_mux_sam9x60_set_slewrate(void __iomem *pio, unsigned pin,
+ u32 setting)
+{
+ unsigned int tmp;
+
+ if (setting < SLEWRATE_BIT_DIS || setting > SLEWRATE_BIT_ENA)
+ return;
+
+ tmp = readl_relaxed(pio + SAM9X60_PIO_SLEWR);
+
+ if (setting == SLEWRATE_BIT_DIS)
+ tmp &= ~BIT(pin);
+ else
+ tmp |= BIT(pin);
+
+ writel_relaxed(tmp, pio + SAM9X60_PIO_SLEWR);
+}
+
static struct at91_pinctrl_mux_ops at91rm9200_ops = {
.get_periph = at91_mux_get_periph,
.mux_A_periph = at91_mux_set_A_periph,
@@ -634,6 +711,28 @@ static struct at91_pinctrl_mux_ops at91sam9x5_ops = {
.irq_type = alt_gpio_irq_type,
};
+static const struct at91_pinctrl_mux_ops sam9x60_ops = {
+ .get_periph = at91_mux_pio3_get_periph,
+ .mux_A_periph = at91_mux_pio3_set_A_periph,
+ .mux_B_periph = at91_mux_pio3_set_B_periph,
+ .mux_C_periph = at91_mux_pio3_set_C_periph,
+ .mux_D_periph = at91_mux_pio3_set_D_periph,
+ .get_deglitch = at91_mux_pio3_get_deglitch,
+ .set_deglitch = at91_mux_pio3_set_deglitch,
+ .get_debounce = at91_mux_pio3_get_debounce,
+ .set_debounce = at91_mux_pio3_set_debounce,
+ .get_pulldown = at91_mux_pio3_get_pulldown,
+ .set_pulldown = at91_mux_pio3_set_pulldown,
+ .get_schmitt_trig = at91_mux_pio3_get_schmitt_trig,
+ .disable_schmitt_trig = at91_mux_pio3_disable_schmitt_trig,
+ .get_drivestrength = at91_mux_sam9x60_get_drivestrength,
+ .set_drivestrength = at91_mux_sam9x60_set_drivestrength,
+ .get_slewrate = at91_mux_sam9x60_get_slewrate,
+ .set_slewrate = at91_mux_sam9x60_set_slewrate,
+ .irq_type = alt_gpio_irq_type,
+
+};
+
static struct at91_pinctrl_mux_ops sama5d3_ops = {
.get_periph = at91_mux_pio3_get_periph,
.mux_A_periph = at91_mux_pio3_set_A_periph,
@@ -893,6 +992,8 @@ static int at91_pinconf_get(struct pinctrl_dev *pctldev,
if (info->ops->get_drivestrength)
*config |= (info->ops->get_drivestrength(pio, pin)
<< DRIVE_STRENGTH_SHIFT);
+ if (info->ops->get_slewrate)
+ *config |= (info->ops->get_slewrate(pio, pin) << SLEWRATE_SHIFT);
if (at91_mux_get_output(pio, pin, &out))
*config |= OUTPUT | (out << OUTPUT_VAL_SHIFT);
@@ -944,6 +1045,9 @@ static int at91_pinconf_set(struct pinctrl_dev *pctldev,
info->ops->set_drivestrength(pio, pin,
(config & DRIVE_STRENGTH)
>> DRIVE_STRENGTH_SHIFT);
+ if (info->ops->set_slewrate)
+ info->ops->set_slewrate(pio, pin,
+ (config & SLEWRATE) >> SLEWRATE_SHIFT);
} /* for each config */
@@ -959,11 +1063,11 @@ static int at91_pinconf_set(struct pinctrl_dev *pctldev,
} \
} while (0)
-#define DBG_SHOW_FLAG_MASKED(mask,flag) do { \
+#define DBG_SHOW_FLAG_MASKED(mask, flag, name) do { \
if ((config & mask) == flag) { \
if (num_conf) \
seq_puts(s, "|"); \
- seq_puts(s, #flag); \
+ seq_puts(s, #name); \
num_conf++; \
} \
} while (0)
@@ -981,9 +1085,13 @@ static void at91_pinconf_dbg_show(struct pinctrl_dev *pctldev,
DBG_SHOW_FLAG(PULL_DOWN);
DBG_SHOW_FLAG(DIS_SCHMIT);
DBG_SHOW_FLAG(DEGLITCH);
- DBG_SHOW_FLAG_MASKED(DRIVE_STRENGTH, DRIVE_STRENGTH_LOW);
- DBG_SHOW_FLAG_MASKED(DRIVE_STRENGTH, DRIVE_STRENGTH_MED);
- DBG_SHOW_FLAG_MASKED(DRIVE_STRENGTH, DRIVE_STRENGTH_HI);
+ DBG_SHOW_FLAG_MASKED(DRIVE_STRENGTH, DRIVE_STRENGTH_BIT_MSK(LOW),
+ DRIVE_STRENGTH_LOW);
+ DBG_SHOW_FLAG_MASKED(DRIVE_STRENGTH, DRIVE_STRENGTH_BIT_MSK(MED),
+ DRIVE_STRENGTH_MED);
+ DBG_SHOW_FLAG_MASKED(DRIVE_STRENGTH, DRIVE_STRENGTH_BIT_MSK(HI),
+ DRIVE_STRENGTH_HI);
+ DBG_SHOW_FLAG(SLEWRATE);
DBG_SHOW_FLAG(DEBOUNCE);
if (config & DEBOUNCE) {
val = config >> DEBOUNCE_VAL_SHIFT;
@@ -1155,6 +1263,7 @@ static const struct of_device_id at91_pinctrl_of_match[] = {
{ .compatible = "atmel,sama5d3-pinctrl", .data = &sama5d3_ops },
{ .compatible = "atmel,at91sam9x5-pinctrl", .data = &at91sam9x5_ops },
{ .compatible = "atmel,at91rm9200-pinctrl", .data = &at91rm9200_ops },
+ { .compatible = "microchip,sam9x60-pinctrl", .data = &sam9x60_ops },
{ /* sentinel */ }
};
@@ -1697,6 +1806,7 @@ static const struct gpio_chip at91_gpio_template = {
static const struct of_device_id at91_gpio_of_match[] = {
{ .compatible = "atmel,at91sam9x5-gpio", .data = &at91sam9x5_ops, },
{ .compatible = "atmel,at91rm9200-gpio", .data = &at91rm9200_ops },
+ { .compatible = "microchip,sam9x60-gpio", .data = &sam9x60_ops },
{ /* sentinel */ }
};
diff --git a/drivers/pinctrl/pinctrl-at91.h b/drivers/pinctrl/pinctrl-at91.h
index 79b957f1dfa2..223620f14b05 100644
--- a/drivers/pinctrl/pinctrl-at91.h
+++ b/drivers/pinctrl/pinctrl-at91.h
@@ -69,4 +69,7 @@
#define AT91SAM9X5_PIO_DRIVER1 0x114 /*PIO Driver 1 register offset*/
#define AT91SAM9X5_PIO_DRIVER2 0x118 /*PIO Driver 2 register offset*/
+#define SAM9X60_PIO_SLEWR 0x110 /* PIO Slew Rate Control Register */
+#define SAM9X60_PIO_DRIVER1 0x118 /* PIO Driver 1 register offset */
+
#endif
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index db6b48ea5f47..bc21ceb15d68 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -233,6 +233,19 @@ static int jz4725b_pwm_pwm2_pins[] = { 0x4c, };
static int jz4725b_pwm_pwm3_pins[] = { 0x4d, };
static int jz4725b_pwm_pwm4_pins[] = { 0x4e, };
static int jz4725b_pwm_pwm5_pins[] = { 0x4f, };
+static int jz4725b_lcd_8bit_pins[] = {
+ 0x72, 0x73, 0x74,
+ 0x60, 0x61, 0x62, 0x63,
+ 0x64, 0x65, 0x66, 0x67,
+};
+static int jz4725b_lcd_16bit_pins[] = {
+ 0x68, 0x69, 0x6a, 0x6b,
+ 0x6c, 0x6d, 0x6e, 0x6f,
+};
+static int jz4725b_lcd_18bit_pins[] = { 0x70, 0x71, };
+static int jz4725b_lcd_24bit_pins[] = { 0x76, 0x77, 0x78, 0x79, };
+static int jz4725b_lcd_special_pins[] = { 0x76, 0x77, 0x78, 0x79, };
+static int jz4725b_lcd_generic_pins[] = { 0x75, };
static int jz4725b_mmc0_1bit_funcs[] = { 1, 1, 1, };
static int jz4725b_mmc0_4bit_funcs[] = { 1, 0, 1, };
@@ -251,6 +264,12 @@ static int jz4725b_pwm_pwm2_funcs[] = { 0, };
static int jz4725b_pwm_pwm3_funcs[] = { 0, };
static int jz4725b_pwm_pwm4_funcs[] = { 0, };
static int jz4725b_pwm_pwm5_funcs[] = { 0, };
+static int jz4725b_lcd_8bit_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, };
+static int jz4725b_lcd_16bit_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, };
+static int jz4725b_lcd_18bit_funcs[] = { 0, 0, };
+static int jz4725b_lcd_24bit_funcs[] = { 1, 1, 1, 1, };
+static int jz4725b_lcd_special_funcs[] = { 0, 0, 0, 0, };
+static int jz4725b_lcd_generic_funcs[] = { 0, };
static const struct group_desc jz4725b_groups[] = {
INGENIC_PIN_GROUP("mmc0-1bit", jz4725b_mmc0_1bit),
@@ -270,6 +289,12 @@ static const struct group_desc jz4725b_groups[] = {
INGENIC_PIN_GROUP("pwm3", jz4725b_pwm_pwm3),
INGENIC_PIN_GROUP("pwm4", jz4725b_pwm_pwm4),
INGENIC_PIN_GROUP("pwm5", jz4725b_pwm_pwm5),
+ INGENIC_PIN_GROUP("lcd-8bit", jz4725b_lcd_8bit),
+ INGENIC_PIN_GROUP("lcd-16bit", jz4725b_lcd_16bit),
+ INGENIC_PIN_GROUP("lcd-18bit", jz4725b_lcd_18bit),
+ INGENIC_PIN_GROUP("lcd-24bit", jz4725b_lcd_24bit),
+ INGENIC_PIN_GROUP("lcd-special", jz4725b_lcd_special),
+ INGENIC_PIN_GROUP("lcd-generic", jz4725b_lcd_generic),
};
static const char *jz4725b_mmc0_groups[] = { "mmc0-1bit", "mmc0-4bit", };
@@ -285,6 +310,10 @@ static const char *jz4725b_pwm2_groups[] = { "pwm2", };
static const char *jz4725b_pwm3_groups[] = { "pwm3", };
static const char *jz4725b_pwm4_groups[] = { "pwm4", };
static const char *jz4725b_pwm5_groups[] = { "pwm5", };
+static const char *jz4725b_lcd_groups[] = {
+ "lcd-8bit", "lcd-16bit", "lcd-18bit", "lcd-24bit",
+ "lcd-special", "lcd-generic",
+};
static const struct function_desc jz4725b_functions[] = {
{ "mmc0", jz4725b_mmc0_groups, ARRAY_SIZE(jz4725b_mmc0_groups), },
@@ -297,6 +326,7 @@ static const struct function_desc jz4725b_functions[] = {
{ "pwm3", jz4725b_pwm3_groups, ARRAY_SIZE(jz4725b_pwm3_groups), },
{ "pwm4", jz4725b_pwm4_groups, ARRAY_SIZE(jz4725b_pwm4_groups), },
{ "pwm5", jz4725b_pwm5_groups, ARRAY_SIZE(jz4725b_pwm5_groups), },
+ { "lcd", jz4725b_lcd_groups, ARRAY_SIZE(jz4725b_lcd_groups), },
};
static const struct ingenic_chip_info jz4725b_chip_info = {
@@ -321,47 +351,57 @@ static int jz4770_uart0_data_pins[] = { 0xa0, 0xa3, };
static int jz4770_uart0_hwflow_pins[] = { 0xa1, 0xa2, };
static int jz4770_uart1_data_pins[] = { 0x7a, 0x7c, };
static int jz4770_uart1_hwflow_pins[] = { 0x7b, 0x7d, };
-static int jz4770_uart2_data_pins[] = { 0x66, 0x67, };
-static int jz4770_uart2_hwflow_pins[] = { 0x65, 0x64, };
+static int jz4770_uart2_data_pins[] = { 0x5c, 0x5e, };
+static int jz4770_uart2_hwflow_pins[] = { 0x5d, 0x5f, };
static int jz4770_uart3_data_pins[] = { 0x6c, 0x85, };
static int jz4770_uart3_hwflow_pins[] = { 0x88, 0x89, };
-static int jz4770_uart4_data_pins[] = { 0x54, 0x4a, };
-static int jz4770_mmc0_8bit_a_pins[] = { 0x04, 0x05, 0x06, 0x07, 0x18, };
-static int jz4770_mmc0_4bit_a_pins[] = { 0x15, 0x16, 0x17, };
static int jz4770_mmc0_1bit_a_pins[] = { 0x12, 0x13, 0x14, };
-static int jz4770_mmc0_4bit_e_pins[] = { 0x95, 0x96, 0x97, };
+static int jz4770_mmc0_4bit_a_pins[] = { 0x15, 0x16, 0x17, };
static int jz4770_mmc0_1bit_e_pins[] = { 0x9c, 0x9d, 0x94, };
-static int jz4770_mmc1_4bit_d_pins[] = { 0x75, 0x76, 0x77, };
+static int jz4770_mmc0_4bit_e_pins[] = { 0x95, 0x96, 0x97, };
+static int jz4770_mmc0_8bit_e_pins[] = { 0x98, 0x99, 0x9a, 0x9b, };
static int jz4770_mmc1_1bit_d_pins[] = { 0x78, 0x79, 0x74, };
-static int jz4770_mmc1_4bit_e_pins[] = { 0x95, 0x96, 0x97, };
+static int jz4770_mmc1_4bit_d_pins[] = { 0x75, 0x76, 0x77, };
static int jz4770_mmc1_1bit_e_pins[] = { 0x9c, 0x9d, 0x94, };
-static int jz4770_nemc_data_pins[] = {
+static int jz4770_mmc1_4bit_e_pins[] = { 0x95, 0x96, 0x97, };
+static int jz4770_mmc1_8bit_e_pins[] = { 0x98, 0x99, 0x9a, 0x9b, };
+static int jz4770_mmc2_1bit_b_pins[] = { 0x3c, 0x3d, 0x34, };
+static int jz4770_mmc2_4bit_b_pins[] = { 0x35, 0x3e, 0x3f, };
+static int jz4770_mmc2_1bit_e_pins[] = { 0x9c, 0x9d, 0x94, };
+static int jz4770_mmc2_4bit_e_pins[] = { 0x95, 0x96, 0x97, };
+static int jz4770_mmc2_8bit_e_pins[] = { 0x98, 0x99, 0x9a, 0x9b, };
+static int jz4770_nemc_8bit_data_pins[] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
};
+static int jz4770_nemc_16bit_data_pins[] = {
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+};
static int jz4770_nemc_cle_ale_pins[] = { 0x20, 0x21, };
static int jz4770_nemc_addr_pins[] = { 0x22, 0x23, 0x24, 0x25, };
static int jz4770_nemc_rd_we_pins[] = { 0x10, 0x11, };
static int jz4770_nemc_frd_fwe_pins[] = { 0x12, 0x13, };
+static int jz4770_nemc_wait_pins[] = { 0x1b, };
static int jz4770_nemc_cs1_pins[] = { 0x15, };
static int jz4770_nemc_cs2_pins[] = { 0x16, };
static int jz4770_nemc_cs3_pins[] = { 0x17, };
static int jz4770_nemc_cs4_pins[] = { 0x18, };
static int jz4770_nemc_cs5_pins[] = { 0x19, };
static int jz4770_nemc_cs6_pins[] = { 0x1a, };
-static int jz4770_i2c0_pins[] = { 0x6e, 0x6f, };
-static int jz4770_i2c1_pins[] = { 0x8e, 0x8f, };
+static int jz4770_i2c0_pins[] = { 0x7e, 0x7f, };
+static int jz4770_i2c1_pins[] = { 0x9e, 0x9f, };
static int jz4770_i2c2_pins[] = { 0xb0, 0xb1, };
-static int jz4770_i2c3_pins[] = { 0x6a, 0x6b, };
-static int jz4770_i2c4_e_pins[] = { 0x8c, 0x8d, };
-static int jz4770_i2c4_f_pins[] = { 0xb9, 0xb8, };
-static int jz4770_cim_pins[] = {
- 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31,
+static int jz4770_cim_8bit_pins[] = {
+ 0x26, 0x27, 0x28, 0x29,
+ 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31,
+};
+static int jz4770_cim_12bit_pins[] = {
+ 0x32, 0x33, 0xb0, 0xb1,
};
-static int jz4770_lcd_32bit_pins[] = {
+static int jz4770_lcd_24bit_pins[] = {
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
- 0x58, 0x59, 0x51,
+ 0x58, 0x59, 0x5a, 0x5b,
};
static int jz4770_pwm_pwm0_pins[] = { 0x80, };
static int jz4770_pwm_pwm1_pins[] = { 0x81, };
@@ -371,30 +411,41 @@ static int jz4770_pwm_pwm4_pins[] = { 0x84, };
static int jz4770_pwm_pwm5_pins[] = { 0x85, };
static int jz4770_pwm_pwm6_pins[] = { 0x6a, };
static int jz4770_pwm_pwm7_pins[] = { 0x6b, };
+static int jz4770_mac_rmii_pins[] = {
+ 0xa9, 0xab, 0xaa, 0xac, 0xa5, 0xa4, 0xad, 0xae, 0xa6, 0xa8,
+};
+static int jz4770_mac_mii_pins[] = { 0xa7, 0xaf, };
static int jz4770_uart0_data_funcs[] = { 0, 0, };
static int jz4770_uart0_hwflow_funcs[] = { 0, 0, };
static int jz4770_uart1_data_funcs[] = { 0, 0, };
static int jz4770_uart1_hwflow_funcs[] = { 0, 0, };
-static int jz4770_uart2_data_funcs[] = { 1, 1, };
-static int jz4770_uart2_hwflow_funcs[] = { 1, 1, };
+static int jz4770_uart2_data_funcs[] = { 0, 0, };
+static int jz4770_uart2_hwflow_funcs[] = { 0, 0, };
static int jz4770_uart3_data_funcs[] = { 0, 1, };
static int jz4770_uart3_hwflow_funcs[] = { 0, 0, };
-static int jz4770_uart4_data_funcs[] = { 2, 2, };
-static int jz4770_mmc0_8bit_a_funcs[] = { 1, 1, 1, 1, 1, };
-static int jz4770_mmc0_4bit_a_funcs[] = { 1, 1, 1, };
static int jz4770_mmc0_1bit_a_funcs[] = { 1, 1, 0, };
-static int jz4770_mmc0_4bit_e_funcs[] = { 0, 0, 0, };
+static int jz4770_mmc0_4bit_a_funcs[] = { 1, 1, 1, };
static int jz4770_mmc0_1bit_e_funcs[] = { 0, 0, 0, };
-static int jz4770_mmc1_4bit_d_funcs[] = { 0, 0, 0, };
+static int jz4770_mmc0_4bit_e_funcs[] = { 0, 0, 0, };
+static int jz4770_mmc0_8bit_e_funcs[] = { 0, 0, 0, 0, };
static int jz4770_mmc1_1bit_d_funcs[] = { 0, 0, 0, };
-static int jz4770_mmc1_4bit_e_funcs[] = { 1, 1, 1, };
+static int jz4770_mmc1_4bit_d_funcs[] = { 0, 0, 0, };
static int jz4770_mmc1_1bit_e_funcs[] = { 1, 1, 1, };
-static int jz4770_nemc_data_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, };
+static int jz4770_mmc1_4bit_e_funcs[] = { 1, 1, 1, };
+static int jz4770_mmc1_8bit_e_funcs[] = { 1, 1, 1, 1, };
+static int jz4770_mmc2_1bit_b_funcs[] = { 0, 0, 0, };
+static int jz4770_mmc2_4bit_b_funcs[] = { 0, 0, 0, };
+static int jz4770_mmc2_1bit_e_funcs[] = { 2, 2, 2, };
+static int jz4770_mmc2_4bit_e_funcs[] = { 2, 2, 2, };
+static int jz4770_mmc2_8bit_e_funcs[] = { 2, 2, 2, 2, };
+static int jz4770_nemc_8bit_data_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, };
+static int jz4770_nemc_16bit_data_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, };
static int jz4770_nemc_cle_ale_funcs[] = { 0, 0, };
static int jz4770_nemc_addr_funcs[] = { 0, 0, 0, 0, };
static int jz4770_nemc_rd_we_funcs[] = { 0, 0, };
static int jz4770_nemc_frd_fwe_funcs[] = { 0, 0, };
+static int jz4770_nemc_wait_funcs[] = { 0, };
static int jz4770_nemc_cs1_funcs[] = { 0, };
static int jz4770_nemc_cs2_funcs[] = { 0, };
static int jz4770_nemc_cs3_funcs[] = { 0, };
@@ -404,14 +455,13 @@ static int jz4770_nemc_cs6_funcs[] = { 0, };
static int jz4770_i2c0_funcs[] = { 0, 0, };
static int jz4770_i2c1_funcs[] = { 0, 0, };
static int jz4770_i2c2_funcs[] = { 2, 2, };
-static int jz4770_i2c3_funcs[] = { 1, 1, };
-static int jz4770_i2c4_e_funcs[] = { 1, 1, };
-static int jz4770_i2c4_f_funcs[] = { 1, 1, };
-static int jz4770_cim_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, };
-static int jz4770_lcd_32bit_funcs[] = {
+static int jz4770_cim_8bit_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, };
+static int jz4770_cim_12bit_funcs[] = { 0, 0, 0, 0, };
+static int jz4770_lcd_24bit_funcs[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0,
+ 0, 0, 0, 0,
};
static int jz4770_pwm_pwm0_funcs[] = { 0, };
static int jz4770_pwm_pwm1_funcs[] = { 0, };
@@ -421,6 +471,8 @@ static int jz4770_pwm_pwm4_funcs[] = { 0, };
static int jz4770_pwm_pwm5_funcs[] = { 0, };
static int jz4770_pwm_pwm6_funcs[] = { 0, };
static int jz4770_pwm_pwm7_funcs[] = { 0, };
+static int jz4770_mac_rmii_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, };
+static int jz4770_mac_mii_funcs[] = { 0, 0, };
static const struct group_desc jz4770_groups[] = {
INGENIC_PIN_GROUP("uart0-data", jz4770_uart0_data),
@@ -431,21 +483,28 @@ static const struct group_desc jz4770_groups[] = {
INGENIC_PIN_GROUP("uart2-hwflow", jz4770_uart2_hwflow),
INGENIC_PIN_GROUP("uart3-data", jz4770_uart3_data),
INGENIC_PIN_GROUP("uart3-hwflow", jz4770_uart3_hwflow),
- INGENIC_PIN_GROUP("uart4-data", jz4770_uart4_data),
- INGENIC_PIN_GROUP("mmc0-8bit-a", jz4770_mmc0_8bit_a),
- INGENIC_PIN_GROUP("mmc0-4bit-a", jz4770_mmc0_4bit_a),
INGENIC_PIN_GROUP("mmc0-1bit-a", jz4770_mmc0_1bit_a),
- INGENIC_PIN_GROUP("mmc0-4bit-e", jz4770_mmc0_4bit_e),
+ INGENIC_PIN_GROUP("mmc0-4bit-a", jz4770_mmc0_4bit_a),
INGENIC_PIN_GROUP("mmc0-1bit-e", jz4770_mmc0_1bit_e),
- INGENIC_PIN_GROUP("mmc1-4bit-d", jz4770_mmc1_4bit_d),
+ INGENIC_PIN_GROUP("mmc0-4bit-e", jz4770_mmc0_4bit_e),
+ INGENIC_PIN_GROUP("mmc0-8bit-e", jz4770_mmc0_8bit_e),
INGENIC_PIN_GROUP("mmc1-1bit-d", jz4770_mmc1_1bit_d),
- INGENIC_PIN_GROUP("mmc1-4bit-e", jz4770_mmc1_4bit_e),
+ INGENIC_PIN_GROUP("mmc1-4bit-d", jz4770_mmc1_4bit_d),
INGENIC_PIN_GROUP("mmc1-1bit-e", jz4770_mmc1_1bit_e),
- INGENIC_PIN_GROUP("nemc-data", jz4770_nemc_data),
+ INGENIC_PIN_GROUP("mmc1-4bit-e", jz4770_mmc1_4bit_e),
+ INGENIC_PIN_GROUP("mmc1-8bit-e", jz4770_mmc1_8bit_e),
+ INGENIC_PIN_GROUP("mmc2-1bit-b", jz4770_mmc2_1bit_b),
+ INGENIC_PIN_GROUP("mmc2-4bit-b", jz4770_mmc2_4bit_b),
+ INGENIC_PIN_GROUP("mmc2-1bit-e", jz4770_mmc2_1bit_e),
+ INGENIC_PIN_GROUP("mmc2-4bit-e", jz4770_mmc2_4bit_e),
+ INGENIC_PIN_GROUP("mmc2-8bit-e", jz4770_mmc2_8bit_e),
+ INGENIC_PIN_GROUP("nemc-8bit-data", jz4770_nemc_8bit_data),
+ INGENIC_PIN_GROUP("nemc-16bit-data", jz4770_nemc_16bit_data),
INGENIC_PIN_GROUP("nemc-cle-ale", jz4770_nemc_cle_ale),
INGENIC_PIN_GROUP("nemc-addr", jz4770_nemc_addr),
INGENIC_PIN_GROUP("nemc-rd-we", jz4770_nemc_rd_we),
INGENIC_PIN_GROUP("nemc-frd-fwe", jz4770_nemc_frd_fwe),
+ INGENIC_PIN_GROUP("nemc-wait", jz4770_nemc_wait),
INGENIC_PIN_GROUP("nemc-cs1", jz4770_nemc_cs1),
INGENIC_PIN_GROUP("nemc-cs2", jz4770_nemc_cs2),
INGENIC_PIN_GROUP("nemc-cs3", jz4770_nemc_cs3),
@@ -455,11 +514,9 @@ static const struct group_desc jz4770_groups[] = {
INGENIC_PIN_GROUP("i2c0-data", jz4770_i2c0),
INGENIC_PIN_GROUP("i2c1-data", jz4770_i2c1),
INGENIC_PIN_GROUP("i2c2-data", jz4770_i2c2),
- INGENIC_PIN_GROUP("i2c3-data", jz4770_i2c3),
- INGENIC_PIN_GROUP("i2c4-data-e", jz4770_i2c4_e),
- INGENIC_PIN_GROUP("i2c4-data-f", jz4770_i2c4_f),
- INGENIC_PIN_GROUP("cim-data", jz4770_cim),
- INGENIC_PIN_GROUP("lcd-32bit", jz4770_lcd_32bit),
+ INGENIC_PIN_GROUP("cim-data-8bit", jz4770_cim_8bit),
+ INGENIC_PIN_GROUP("cim-data-12bit", jz4770_cim_12bit),
+ INGENIC_PIN_GROUP("lcd-24bit", jz4770_lcd_24bit),
{ "lcd-no-pins", },
INGENIC_PIN_GROUP("pwm0", jz4770_pwm_pwm0),
INGENIC_PIN_GROUP("pwm1", jz4770_pwm_pwm1),
@@ -469,32 +526,41 @@ static const struct group_desc jz4770_groups[] = {
INGENIC_PIN_GROUP("pwm5", jz4770_pwm_pwm5),
INGENIC_PIN_GROUP("pwm6", jz4770_pwm_pwm6),
INGENIC_PIN_GROUP("pwm7", jz4770_pwm_pwm7),
+ INGENIC_PIN_GROUP("mac-rmii", jz4770_mac_rmii),
+ INGENIC_PIN_GROUP("mac-mii", jz4770_mac_mii),
};
static const char *jz4770_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
static const char *jz4770_uart1_groups[] = { "uart1-data", "uart1-hwflow", };
static const char *jz4770_uart2_groups[] = { "uart2-data", "uart2-hwflow", };
static const char *jz4770_uart3_groups[] = { "uart3-data", "uart3-hwflow", };
-static const char *jz4770_uart4_groups[] = { "uart4-data", };
static const char *jz4770_mmc0_groups[] = {
- "mmc0-8bit-a", "mmc0-4bit-a", "mmc0-1bit-a",
- "mmc0-1bit-e", "mmc0-4bit-e",
+ "mmc0-1bit-a", "mmc0-4bit-a",
+ "mmc0-1bit-e", "mmc0-4bit-e", "mmc0-8bit-e",
};
static const char *jz4770_mmc1_groups[] = {
- "mmc1-1bit-d", "mmc1-4bit-d", "mmc1-1bit-e", "mmc1-4bit-e",
+ "mmc1-1bit-d", "mmc1-4bit-d",
+ "mmc1-1bit-e", "mmc1-4bit-e", "mmc1-8bit-e",
+};
+static const char *jz4770_mmc2_groups[] = {
+ "mmc2-1bit-b", "mmc2-4bit-b",
+ "mmc2-1bit-e", "mmc2-4bit-e", "mmc2-8bit-e",
};
static const char *jz4770_nemc_groups[] = {
- "nemc-data", "nemc-cle-ale", "nemc-addr", "nemc-rd-we", "nemc-frd-fwe",
+ "nemc-8bit-data", "nemc-16bit-data", "nemc-cle-ale",
+ "nemc-addr", "nemc-rd-we", "nemc-frd-fwe", "nemc-wait",
};
static const char *jz4770_cs1_groups[] = { "nemc-cs1", };
+static const char *jz4770_cs2_groups[] = { "nemc-cs2", };
+static const char *jz4770_cs3_groups[] = { "nemc-cs3", };
+static const char *jz4770_cs4_groups[] = { "nemc-cs4", };
+static const char *jz4770_cs5_groups[] = { "nemc-cs5", };
static const char *jz4770_cs6_groups[] = { "nemc-cs6", };
static const char *jz4770_i2c0_groups[] = { "i2c0-data", };
static const char *jz4770_i2c1_groups[] = { "i2c1-data", };
static const char *jz4770_i2c2_groups[] = { "i2c2-data", };
-static const char *jz4770_i2c3_groups[] = { "i2c3-data", };
-static const char *jz4770_i2c4_groups[] = { "i2c4-data-e", "i2c4-data-f", };
-static const char *jz4770_cim_groups[] = { "cim-data", };
-static const char *jz4770_lcd_groups[] = { "lcd-32bit", "lcd-no-pins", };
+static const char *jz4770_cim_groups[] = { "cim-data-8bit", "cim-data-12bit", };
+static const char *jz4770_lcd_groups[] = { "lcd-24bit", "lcd-no-pins", };
static const char *jz4770_pwm0_groups[] = { "pwm0", };
static const char *jz4770_pwm1_groups[] = { "pwm1", };
static const char *jz4770_pwm2_groups[] = { "pwm2", };
@@ -503,23 +569,26 @@ static const char *jz4770_pwm4_groups[] = { "pwm4", };
static const char *jz4770_pwm5_groups[] = { "pwm5", };
static const char *jz4770_pwm6_groups[] = { "pwm6", };
static const char *jz4770_pwm7_groups[] = { "pwm7", };
+static const char *jz4770_mac_groups[] = { "mac-rmii", "mac-mii", };
static const struct function_desc jz4770_functions[] = {
{ "uart0", jz4770_uart0_groups, ARRAY_SIZE(jz4770_uart0_groups), },
{ "uart1", jz4770_uart1_groups, ARRAY_SIZE(jz4770_uart1_groups), },
{ "uart2", jz4770_uart2_groups, ARRAY_SIZE(jz4770_uart2_groups), },
{ "uart3", jz4770_uart3_groups, ARRAY_SIZE(jz4770_uart3_groups), },
- { "uart4", jz4770_uart4_groups, ARRAY_SIZE(jz4770_uart4_groups), },
{ "mmc0", jz4770_mmc0_groups, ARRAY_SIZE(jz4770_mmc0_groups), },
{ "mmc1", jz4770_mmc1_groups, ARRAY_SIZE(jz4770_mmc1_groups), },
+ { "mmc2", jz4770_mmc2_groups, ARRAY_SIZE(jz4770_mmc2_groups), },
{ "nemc", jz4770_nemc_groups, ARRAY_SIZE(jz4770_nemc_groups), },
{ "nemc-cs1", jz4770_cs1_groups, ARRAY_SIZE(jz4770_cs1_groups), },
+ { "nemc-cs2", jz4770_cs2_groups, ARRAY_SIZE(jz4770_cs2_groups), },
+ { "nemc-cs3", jz4770_cs3_groups, ARRAY_SIZE(jz4770_cs3_groups), },
+ { "nemc-cs4", jz4770_cs4_groups, ARRAY_SIZE(jz4770_cs4_groups), },
+ { "nemc-cs5", jz4770_cs5_groups, ARRAY_SIZE(jz4770_cs5_groups), },
{ "nemc-cs6", jz4770_cs6_groups, ARRAY_SIZE(jz4770_cs6_groups), },
{ "i2c0", jz4770_i2c0_groups, ARRAY_SIZE(jz4770_i2c0_groups), },
{ "i2c1", jz4770_i2c1_groups, ARRAY_SIZE(jz4770_i2c1_groups), },
{ "i2c2", jz4770_i2c2_groups, ARRAY_SIZE(jz4770_i2c2_groups), },
- { "i2c3", jz4770_i2c3_groups, ARRAY_SIZE(jz4770_i2c3_groups), },
- { "i2c4", jz4770_i2c4_groups, ARRAY_SIZE(jz4770_i2c4_groups), },
{ "cim", jz4770_cim_groups, ARRAY_SIZE(jz4770_cim_groups), },
{ "lcd", jz4770_lcd_groups, ARRAY_SIZE(jz4770_lcd_groups), },
{ "pwm0", jz4770_pwm0_groups, ARRAY_SIZE(jz4770_pwm0_groups), },
@@ -530,6 +599,7 @@ static const struct function_desc jz4770_functions[] = {
{ "pwm5", jz4770_pwm5_groups, ARRAY_SIZE(jz4770_pwm5_groups), },
{ "pwm6", jz4770_pwm6_groups, ARRAY_SIZE(jz4770_pwm6_groups), },
{ "pwm7", jz4770_pwm7_groups, ARRAY_SIZE(jz4770_pwm7_groups), },
+ { "mac", jz4770_mac_groups, ARRAY_SIZE(jz4770_mac_groups), },
};
static const struct ingenic_chip_info jz4770_chip_info = {
@@ -542,7 +612,140 @@ static const struct ingenic_chip_info jz4770_chip_info = {
.pull_downs = jz4770_pull_downs,
};
-static u32 gpio_ingenic_read_reg(struct ingenic_gpio_chip *jzgc, u8 reg)
+static int jz4780_uart2_data_pins[] = { 0x66, 0x67, };
+static int jz4780_uart2_hwflow_pins[] = { 0x65, 0x64, };
+static int jz4780_uart4_data_pins[] = { 0x54, 0x4a, };
+static int jz4780_mmc0_8bit_a_pins[] = { 0x04, 0x05, 0x06, 0x07, 0x18, };
+static int jz4780_i2c3_pins[] = { 0x6a, 0x6b, };
+static int jz4780_i2c4_e_pins[] = { 0x8c, 0x8d, };
+static int jz4780_i2c4_f_pins[] = { 0xb9, 0xb8, };
+
+static int jz4780_uart2_data_funcs[] = { 1, 1, };
+static int jz4780_uart2_hwflow_funcs[] = { 1, 1, };
+static int jz4780_uart4_data_funcs[] = { 2, 2, };
+static int jz4780_mmc0_8bit_a_funcs[] = { 1, 1, 1, 1, 1, };
+static int jz4780_i2c3_funcs[] = { 1, 1, };
+static int jz4780_i2c4_e_funcs[] = { 1, 1, };
+static int jz4780_i2c4_f_funcs[] = { 1, 1, };
+
+static const struct group_desc jz4780_groups[] = {
+ INGENIC_PIN_GROUP("uart0-data", jz4770_uart0_data),
+ INGENIC_PIN_GROUP("uart0-hwflow", jz4770_uart0_hwflow),
+ INGENIC_PIN_GROUP("uart1-data", jz4770_uart1_data),
+ INGENIC_PIN_GROUP("uart1-hwflow", jz4770_uart1_hwflow),
+ INGENIC_PIN_GROUP("uart2-data", jz4780_uart2_data),
+ INGENIC_PIN_GROUP("uart2-hwflow", jz4780_uart2_hwflow),
+ INGENIC_PIN_GROUP("uart3-data", jz4770_uart3_data),
+ INGENIC_PIN_GROUP("uart3-hwflow", jz4770_uart3_hwflow),
+ INGENIC_PIN_GROUP("uart4-data", jz4780_uart4_data),
+ INGENIC_PIN_GROUP("mmc0-1bit-a", jz4770_mmc0_1bit_a),
+ INGENIC_PIN_GROUP("mmc0-4bit-a", jz4770_mmc0_4bit_a),
+ INGENIC_PIN_GROUP("mmc0-8bit-a", jz4780_mmc0_8bit_a),
+ INGENIC_PIN_GROUP("mmc0-1bit-e", jz4770_mmc0_1bit_e),
+ INGENIC_PIN_GROUP("mmc0-4bit-e", jz4770_mmc0_4bit_e),
+ INGENIC_PIN_GROUP("mmc1-1bit-d", jz4770_mmc1_1bit_d),
+ INGENIC_PIN_GROUP("mmc1-4bit-d", jz4770_mmc1_4bit_d),
+ INGENIC_PIN_GROUP("mmc1-1bit-e", jz4770_mmc1_1bit_e),
+ INGENIC_PIN_GROUP("mmc1-4bit-e", jz4770_mmc1_4bit_e),
+ INGENIC_PIN_GROUP("mmc2-1bit-b", jz4770_mmc2_1bit_b),
+ INGENIC_PIN_GROUP("mmc2-4bit-b", jz4770_mmc2_4bit_b),
+ INGENIC_PIN_GROUP("mmc2-1bit-e", jz4770_mmc2_1bit_e),
+ INGENIC_PIN_GROUP("mmc2-4bit-e", jz4770_mmc2_4bit_e),
+ INGENIC_PIN_GROUP("nemc-data", jz4770_nemc_8bit_data),
+ INGENIC_PIN_GROUP("nemc-cle-ale", jz4770_nemc_cle_ale),
+ INGENIC_PIN_GROUP("nemc-addr", jz4770_nemc_addr),
+ INGENIC_PIN_GROUP("nemc-rd-we", jz4770_nemc_rd_we),
+ INGENIC_PIN_GROUP("nemc-frd-fwe", jz4770_nemc_frd_fwe),
+ INGENIC_PIN_GROUP("nemc-wait", jz4770_nemc_wait),
+ INGENIC_PIN_GROUP("nemc-cs1", jz4770_nemc_cs1),
+ INGENIC_PIN_GROUP("nemc-cs2", jz4770_nemc_cs2),
+ INGENIC_PIN_GROUP("nemc-cs3", jz4770_nemc_cs3),
+ INGENIC_PIN_GROUP("nemc-cs4", jz4770_nemc_cs4),
+ INGENIC_PIN_GROUP("nemc-cs5", jz4770_nemc_cs5),
+ INGENIC_PIN_GROUP("nemc-cs6", jz4770_nemc_cs6),
+ INGENIC_PIN_GROUP("i2c0-data", jz4770_i2c0),
+ INGENIC_PIN_GROUP("i2c1-data", jz4770_i2c1),
+ INGENIC_PIN_GROUP("i2c2-data", jz4770_i2c2),
+ INGENIC_PIN_GROUP("i2c3-data", jz4780_i2c3),
+ INGENIC_PIN_GROUP("i2c4-data-e", jz4780_i2c4_e),
+ INGENIC_PIN_GROUP("i2c4-data-f", jz4780_i2c4_f),
+ INGENIC_PIN_GROUP("cim-data", jz4770_cim_8bit),
+ INGENIC_PIN_GROUP("lcd-24bit", jz4770_lcd_24bit),
+ { "lcd-no-pins", },
+ INGENIC_PIN_GROUP("pwm0", jz4770_pwm_pwm0),
+ INGENIC_PIN_GROUP("pwm1", jz4770_pwm_pwm1),
+ INGENIC_PIN_GROUP("pwm2", jz4770_pwm_pwm2),
+ INGENIC_PIN_GROUP("pwm3", jz4770_pwm_pwm3),
+ INGENIC_PIN_GROUP("pwm4", jz4770_pwm_pwm4),
+ INGENIC_PIN_GROUP("pwm5", jz4770_pwm_pwm5),
+ INGENIC_PIN_GROUP("pwm6", jz4770_pwm_pwm6),
+ INGENIC_PIN_GROUP("pwm7", jz4770_pwm_pwm7),
+};
+
+static const char *jz4780_uart2_groups[] = { "uart2-data", "uart2-hwflow", };
+static const char *jz4780_uart4_groups[] = { "uart4-data", };
+static const char *jz4780_mmc0_groups[] = {
+ "mmc0-1bit-a", "mmc0-4bit-a", "mmc0-8bit-a",
+ "mmc0-1bit-e", "mmc0-4bit-e",
+};
+static const char *jz4780_mmc1_groups[] = {
+ "mmc1-1bit-d", "mmc1-4bit-d", "mmc1-1bit-e", "mmc1-4bit-e",
+};
+static const char *jz4780_mmc2_groups[] = {
+ "mmc2-1bit-b", "mmc2-4bit-b", "mmc2-1bit-e", "mmc2-4bit-e",
+};
+static const char *jz4780_nemc_groups[] = {
+ "nemc-data", "nemc-cle-ale", "nemc-addr",
+ "nemc-rd-we", "nemc-frd-fwe", "nemc-wait",
+};
+static const char *jz4780_i2c3_groups[] = { "i2c3-data", };
+static const char *jz4780_i2c4_groups[] = { "i2c4-data-e", "i2c4-data-f", };
+static const char *jz4780_cim_groups[] = { "cim-data", };
+
+static const struct function_desc jz4780_functions[] = {
+ { "uart0", jz4770_uart0_groups, ARRAY_SIZE(jz4770_uart0_groups), },
+ { "uart1", jz4770_uart1_groups, ARRAY_SIZE(jz4770_uart1_groups), },
+ { "uart2", jz4780_uart2_groups, ARRAY_SIZE(jz4780_uart2_groups), },
+ { "uart3", jz4770_uart3_groups, ARRAY_SIZE(jz4770_uart3_groups), },
+ { "uart4", jz4780_uart4_groups, ARRAY_SIZE(jz4780_uart4_groups), },
+ { "mmc0", jz4780_mmc0_groups, ARRAY_SIZE(jz4780_mmc0_groups), },
+ { "mmc1", jz4780_mmc1_groups, ARRAY_SIZE(jz4780_mmc1_groups), },
+ { "mmc2", jz4780_mmc2_groups, ARRAY_SIZE(jz4780_mmc2_groups), },
+ { "nemc", jz4780_nemc_groups, ARRAY_SIZE(jz4780_nemc_groups), },
+ { "nemc-cs1", jz4770_cs1_groups, ARRAY_SIZE(jz4770_cs1_groups), },
+ { "nemc-cs2", jz4770_cs2_groups, ARRAY_SIZE(jz4770_cs2_groups), },
+ { "nemc-cs3", jz4770_cs3_groups, ARRAY_SIZE(jz4770_cs3_groups), },
+ { "nemc-cs4", jz4770_cs4_groups, ARRAY_SIZE(jz4770_cs4_groups), },
+ { "nemc-cs5", jz4770_cs5_groups, ARRAY_SIZE(jz4770_cs5_groups), },
+ { "nemc-cs6", jz4770_cs6_groups, ARRAY_SIZE(jz4770_cs6_groups), },
+ { "i2c0", jz4770_i2c0_groups, ARRAY_SIZE(jz4770_i2c0_groups), },
+ { "i2c1", jz4770_i2c1_groups, ARRAY_SIZE(jz4770_i2c1_groups), },
+ { "i2c2", jz4770_i2c2_groups, ARRAY_SIZE(jz4770_i2c2_groups), },
+ { "i2c3", jz4780_i2c3_groups, ARRAY_SIZE(jz4780_i2c3_groups), },
+ { "i2c4", jz4780_i2c4_groups, ARRAY_SIZE(jz4780_i2c4_groups), },
+ { "cim", jz4780_cim_groups, ARRAY_SIZE(jz4780_cim_groups), },
+ { "lcd", jz4770_lcd_groups, ARRAY_SIZE(jz4770_lcd_groups), },
+ { "pwm0", jz4770_pwm0_groups, ARRAY_SIZE(jz4770_pwm0_groups), },
+ { "pwm1", jz4770_pwm1_groups, ARRAY_SIZE(jz4770_pwm1_groups), },
+ { "pwm2", jz4770_pwm2_groups, ARRAY_SIZE(jz4770_pwm2_groups), },
+ { "pwm3", jz4770_pwm3_groups, ARRAY_SIZE(jz4770_pwm3_groups), },
+ { "pwm4", jz4770_pwm4_groups, ARRAY_SIZE(jz4770_pwm4_groups), },
+ { "pwm5", jz4770_pwm5_groups, ARRAY_SIZE(jz4770_pwm5_groups), },
+ { "pwm6", jz4770_pwm6_groups, ARRAY_SIZE(jz4770_pwm6_groups), },
+ { "pwm7", jz4770_pwm7_groups, ARRAY_SIZE(jz4770_pwm7_groups), },
+};
+
+static const struct ingenic_chip_info jz4780_chip_info = {
+ .num_chips = 6,
+ .groups = jz4780_groups,
+ .num_groups = ARRAY_SIZE(jz4780_groups),
+ .functions = jz4780_functions,
+ .num_functions = ARRAY_SIZE(jz4780_functions),
+ .pull_ups = jz4770_pull_ups,
+ .pull_downs = jz4770_pull_downs,
+};
+
+static u32 ingenic_gpio_read_reg(struct ingenic_gpio_chip *jzgc, u8 reg)
{
unsigned int val;
@@ -551,7 +754,7 @@ static u32 gpio_ingenic_read_reg(struct ingenic_gpio_chip *jzgc, u8 reg)
return (u32) val;
}
-static void gpio_ingenic_set_bit(struct ingenic_gpio_chip *jzgc,
+static void ingenic_gpio_set_bit(struct ingenic_gpio_chip *jzgc,
u8 reg, u8 offset, bool set)
{
if (set)
@@ -565,7 +768,7 @@ static void gpio_ingenic_set_bit(struct ingenic_gpio_chip *jzgc,
static inline bool ingenic_gpio_get_value(struct ingenic_gpio_chip *jzgc,
u8 offset)
{
- unsigned int val = gpio_ingenic_read_reg(jzgc, GPIO_PIN);
+ unsigned int val = ingenic_gpio_read_reg(jzgc, GPIO_PIN);
return !!(val & BIT(offset));
}
@@ -574,9 +777,9 @@ static void ingenic_gpio_set_value(struct ingenic_gpio_chip *jzgc,
u8 offset, int value)
{
if (jzgc->jzpc->version >= ID_JZ4770)
- gpio_ingenic_set_bit(jzgc, JZ4770_GPIO_PAT0, offset, !!value);
+ ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_PAT0, offset, !!value);
else
- gpio_ingenic_set_bit(jzgc, JZ4740_GPIO_DATA, offset, !!value);
+ ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, offset, !!value);
}
static void irq_set_type(struct ingenic_gpio_chip *jzgc,
@@ -594,21 +797,21 @@ static void irq_set_type(struct ingenic_gpio_chip *jzgc,
switch (type) {
case IRQ_TYPE_EDGE_RISING:
- gpio_ingenic_set_bit(jzgc, reg2, offset, true);
- gpio_ingenic_set_bit(jzgc, reg1, offset, true);
+ ingenic_gpio_set_bit(jzgc, reg2, offset, true);
+ ingenic_gpio_set_bit(jzgc, reg1, offset, true);
break;
case IRQ_TYPE_EDGE_FALLING:
- gpio_ingenic_set_bit(jzgc, reg2, offset, false);
- gpio_ingenic_set_bit(jzgc, reg1, offset, true);
+ ingenic_gpio_set_bit(jzgc, reg2, offset, false);
+ ingenic_gpio_set_bit(jzgc, reg1, offset, true);
break;
case IRQ_TYPE_LEVEL_HIGH:
- gpio_ingenic_set_bit(jzgc, reg2, offset, true);
- gpio_ingenic_set_bit(jzgc, reg1, offset, false);
+ ingenic_gpio_set_bit(jzgc, reg2, offset, true);
+ ingenic_gpio_set_bit(jzgc, reg1, offset, false);
break;
case IRQ_TYPE_LEVEL_LOW:
default:
- gpio_ingenic_set_bit(jzgc, reg2, offset, false);
- gpio_ingenic_set_bit(jzgc, reg1, offset, false);
+ ingenic_gpio_set_bit(jzgc, reg2, offset, false);
+ ingenic_gpio_set_bit(jzgc, reg1, offset, false);
break;
}
}
@@ -618,7 +821,7 @@ static void ingenic_gpio_irq_mask(struct irq_data *irqd)
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
- gpio_ingenic_set_bit(jzgc, GPIO_MSK, irqd->hwirq, true);
+ ingenic_gpio_set_bit(jzgc, GPIO_MSK, irqd->hwirq, true);
}
static void ingenic_gpio_irq_unmask(struct irq_data *irqd)
@@ -626,7 +829,7 @@ static void ingenic_gpio_irq_unmask(struct irq_data *irqd)
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
- gpio_ingenic_set_bit(jzgc, GPIO_MSK, irqd->hwirq, false);
+ ingenic_gpio_set_bit(jzgc, GPIO_MSK, irqd->hwirq, false);
}
static void ingenic_gpio_irq_enable(struct irq_data *irqd)
@@ -636,9 +839,9 @@ static void ingenic_gpio_irq_enable(struct irq_data *irqd)
int irq = irqd->hwirq;
if (jzgc->jzpc->version >= ID_JZ4770)
- gpio_ingenic_set_bit(jzgc, JZ4770_GPIO_INT, irq, true);
+ ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, true);
else
- gpio_ingenic_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, true);
+ ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, true);
ingenic_gpio_irq_unmask(irqd);
}
@@ -652,9 +855,9 @@ static void ingenic_gpio_irq_disable(struct irq_data *irqd)
ingenic_gpio_irq_mask(irqd);
if (jzgc->jzpc->version >= ID_JZ4770)
- gpio_ingenic_set_bit(jzgc, JZ4770_GPIO_INT, irq, false);
+ ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, false);
else
- gpio_ingenic_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, false);
+ ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, false);
}
static void ingenic_gpio_irq_ack(struct irq_data *irqd)
@@ -677,9 +880,9 @@ static void ingenic_gpio_irq_ack(struct irq_data *irqd)
}
if (jzgc->jzpc->version >= ID_JZ4770)
- gpio_ingenic_set_bit(jzgc, JZ4770_GPIO_FLAG, irq, false);
+ ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_FLAG, irq, false);
else
- gpio_ingenic_set_bit(jzgc, JZ4740_GPIO_DATA, irq, true);
+ ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, irq, true);
}
static int ingenic_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
@@ -734,9 +937,9 @@ static void ingenic_gpio_irq_handler(struct irq_desc *desc)
chained_irq_enter(irq_chip, desc);
if (jzgc->jzpc->version >= ID_JZ4770)
- flag = gpio_ingenic_read_reg(jzgc, JZ4770_GPIO_FLAG);
+ flag = ingenic_gpio_read_reg(jzgc, JZ4770_GPIO_FLAG);
else
- flag = gpio_ingenic_read_reg(jzgc, JZ4740_GPIO_FLAG);
+ flag = ingenic_gpio_read_reg(jzgc, JZ4740_GPIO_FLAG);
for_each_set_bit(i, &flag, 32)
generic_handle_irq(irq_linear_revmap(gc->irq.domain, i));
@@ -1185,7 +1388,9 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
else
jzpc->version = (enum jz_version)id->driver_data;
- if (jzpc->version >= ID_JZ4770)
+ if (jzpc->version >= ID_JZ4780)
+ chip_info = &jz4780_chip_info;
+ else if (jzpc->version >= ID_JZ4770)
chip_info = &jz4770_chip_info;
else if (jzpc->version >= ID_JZ4725B)
chip_info = &jz4725b_chip_info;
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index 98905d4a79ca..5d7a8514def9 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -68,6 +68,7 @@ struct mcp23s08 {
struct mutex lock;
struct gpio_chip chip;
+ struct irq_chip irq_chip;
struct regmap *regmap;
struct device *dev;
@@ -607,15 +608,6 @@ static void mcp23s08_irq_bus_unlock(struct irq_data *data)
mutex_unlock(&mcp->lock);
}
-static struct irq_chip mcp23s08_irq_chip = {
- .name = "gpio-mcp23xxx",
- .irq_mask = mcp23s08_irq_mask,
- .irq_unmask = mcp23s08_irq_unmask,
- .irq_set_type = mcp23s08_irq_set_type,
- .irq_bus_lock = mcp23s08_irq_bus_lock,
- .irq_bus_sync_unlock = mcp23s08_irq_bus_unlock,
-};
-
static int mcp23s08_irq_setup(struct mcp23s08 *mcp)
{
struct gpio_chip *chip = &mcp->chip;
@@ -645,7 +637,7 @@ static int mcp23s08_irqchip_setup(struct mcp23s08 *mcp)
int err;
err = gpiochip_irqchip_add_nested(chip,
- &mcp23s08_irq_chip,
+ &mcp->irq_chip,
0,
handle_simple_irq,
IRQ_TYPE_NONE);
@@ -656,7 +648,7 @@ static int mcp23s08_irqchip_setup(struct mcp23s08 *mcp)
}
gpiochip_set_nested_irqchip(chip,
- &mcp23s08_irq_chip,
+ &mcp->irq_chip,
mcp->irq);
return 0;
@@ -1047,6 +1039,13 @@ static int mcp230xx_probe(struct i2c_client *client,
return -ENOMEM;
mcp->irq = client->irq;
+ mcp->irq_chip.name = dev_name(&client->dev);
+ mcp->irq_chip.irq_mask = mcp23s08_irq_mask;
+ mcp->irq_chip.irq_unmask = mcp23s08_irq_unmask;
+ mcp->irq_chip.irq_set_type = mcp23s08_irq_set_type;
+ mcp->irq_chip.irq_bus_lock = mcp23s08_irq_bus_lock;
+ mcp->irq_chip.irq_bus_sync_unlock = mcp23s08_irq_bus_unlock;
+
status = mcp23s08_probe_one(mcp, &client->dev, client, client->addr,
id->driver_data, pdata->base, 0);
if (status)
@@ -1144,8 +1143,7 @@ static int mcp23s08_probe(struct spi_device *spi)
return -ENODEV;
data = devm_kzalloc(&spi->dev,
- sizeof(*data) + chips * sizeof(struct mcp23s08),
- GFP_KERNEL);
+ struct_size(data, chip, chips), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -1157,6 +1155,13 @@ static int mcp23s08_probe(struct spi_device *spi)
chips--;
data->mcp[addr] = &data->chip[chips];
data->mcp[addr]->irq = spi->irq;
+ data->mcp[addr]->irq_chip.name = dev_name(&spi->dev);
+ data->mcp[addr]->irq_chip.irq_mask = mcp23s08_irq_mask;
+ data->mcp[addr]->irq_chip.irq_unmask = mcp23s08_irq_unmask;
+ data->mcp[addr]->irq_chip.irq_set_type = mcp23s08_irq_set_type;
+ data->mcp[addr]->irq_chip.irq_bus_lock = mcp23s08_irq_bus_lock;
+ data->mcp[addr]->irq_chip.irq_bus_sync_unlock =
+ mcp23s08_irq_bus_unlock;
status = mcp23s08_probe_one(data->mcp[addr], &spi->dev, spi,
0x40 | (addr << 1), type,
pdata->base, addr);
diff --git a/drivers/pinctrl/qcom/pinctrl-qcs404.c b/drivers/pinctrl/qcom/pinctrl-qcs404.c
index 4ffd56ff809e..1c6ba978c69f 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcs404.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcs404.c
@@ -95,31 +95,6 @@ enum {
.intr_detection_width = -1, \
}
-#define UFS_RESET(pg_name, offset) \
- { \
- .name = #pg_name, \
- .pins = pg_name##_pins, \
- .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
- .ctl_reg = offset, \
- .io_reg = offset + 0x4, \
- .intr_cfg_reg = 0, \
- .intr_status_reg = 0, \
- .intr_target_reg = 0, \
- .tile = NORTH, \
- .mux_bit = -1, \
- .pull_bit = 3, \
- .drv_bit = 0, \
- .oe_bit = -1, \
- .in_bit = -1, \
- .out_bit = 0, \
- .intr_enable_bit = -1, \
- .intr_status_bit = -1, \
- .intr_target_bit = -1, \
- .intr_raw_status_bit = -1, \
- .intr_polarity_bit = -1, \
- .intr_detection_bit = -1, \
- .intr_detection_width = -1, \
- }
static const struct pinctrl_pin_desc qcs404_pins[] = {
PINCTRL_PIN(0, "GPIO_0"),
PINCTRL_PIN(1, "GPIO_1"),
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index cb512c7a5251..76e57ae2f6e8 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -675,11 +675,11 @@ static void pmic_gpio_config_dbg_show(struct pinctrl_dev *pctldev,
else
seq_printf(s, " %-4s",
pad->output_enabled ? "out" : "in");
+ seq_printf(s, " %-4s", pad->out_value ? "high" : "low");
seq_printf(s, " %-7s", pmic_gpio_functions[function]);
seq_printf(s, " vin-%d", pad->power_source);
seq_printf(s, " %-27s", biases[pad->pullup]);
seq_printf(s, " %-10s", buffer_types[pad->buffer_type]);
- seq_printf(s, " %-4s", pad->out_value ? "high" : "low");
seq_printf(s, " %-7s", strengths[pad->strength]);
seq_printf(s, " atest-%d", pad->atest);
seq_printf(s, " dtest-%d", pad->dtest_buffer);
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index f49ea3d92aa1..ebc27b06718c 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -325,13 +325,6 @@ err_domains:
return ret;
}
-static u32 exynos_eint_wake_mask = 0xffffffff;
-
-u32 exynos_get_eint_wake_mask(void)
-{
- return exynos_eint_wake_mask;
-}
-
static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on)
{
struct irq_chip *chip = irq_data_get_irq_chip(irqd);
@@ -342,10 +335,9 @@ static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on)
pr_info("wake %s for irq %d\n", on ? "enabled" : "disabled", irqd->irq);
if (!on)
- exynos_eint_wake_mask |= bit;
+ our_chip->eint_wake_mask_value |= bit;
else
- exynos_eint_wake_mask &= ~bit;
- our_chip->eint_wake_mask_value = exynos_eint_wake_mask;
+ our_chip->eint_wake_mask_value &= ~bit;
return 0;
}
diff --git a/drivers/pinctrl/sh-pfc/pfc-emev2.c b/drivers/pinctrl/sh-pfc/pfc-emev2.c
index dc271c3243df..310c6f3ee7cc 100644
--- a/drivers/pinctrl/sh-pfc/pfc-emev2.c
+++ b/drivers/pinctrl/sh-pfc/pfc-emev2.c
@@ -1260,6 +1260,14 @@ static const char * const dtv_groups[] = {
"dtv_b",
};
+static const char * const err_rst_reqb_groups[] = {
+ "err_rst_reqb",
+};
+
+static const char * const ext_clki_groups[] = {
+ "ext_clki",
+};
+
static const char * const iic0_groups[] = {
"iic0",
};
@@ -1282,6 +1290,10 @@ static const char * const lcd_groups[] = {
"yuv3",
};
+static const char * const lowpwr_groups[] = {
+ "lowpwr",
+};
+
static const char * const ntsc_groups[] = {
"ntsc_clk",
"ntsc_data",
@@ -1295,6 +1307,10 @@ static const char * const pwm1_groups[] = {
"pwm1",
};
+static const char * const ref_clko_groups[] = {
+ "ref_clko",
+};
+
static const char * const sd_groups[] = {
"sd_cki",
};
@@ -1388,13 +1404,17 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(cam),
SH_PFC_FUNCTION(cf),
SH_PFC_FUNCTION(dtv),
+ SH_PFC_FUNCTION(err_rst_reqb),
+ SH_PFC_FUNCTION(ext_clki),
SH_PFC_FUNCTION(iic0),
SH_PFC_FUNCTION(iic1),
SH_PFC_FUNCTION(jtag),
SH_PFC_FUNCTION(lcd),
+ SH_PFC_FUNCTION(lowpwr),
SH_PFC_FUNCTION(ntsc),
SH_PFC_FUNCTION(pwm0),
SH_PFC_FUNCTION(pwm1),
+ SH_PFC_FUNCTION(ref_clko),
SH_PFC_FUNCTION(sd),
SH_PFC_FUNCTION(sdi0),
SH_PFC_FUNCTION(sdi1),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
index 6bcdb4b5e69e..068b5e6334d1 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
@@ -1264,8 +1264,8 @@ static const struct sh_pfc_pin pinmux_pins[] = {
/* Pins not associated with a GPIO port */
SH_PFC_PIN_NAMED(3, 20, C20),
- SH_PFC_PIN_NAMED(20, 1, T1),
- SH_PFC_PIN_NAMED(25, 2, Y2),
+ SH_PFC_PIN_NAMED(1, 20, A20),
+ SH_PFC_PIN_NAMED(2, 25, B25),
};
/* - macro */
@@ -1400,7 +1400,7 @@ HSPI_PFC_DAT(hspi1_a, HSPI_CLK1_A, HSPI_CS1_A,
HSPI_RX1_A, HSPI_TX1_A);
HSPI_PFC_PIN(hspi1_b, RCAR_GP_PIN(0, 27), RCAR_GP_PIN(0, 26),
- PIN_NUMBER(20, 1), PIN_NUMBER(25, 2));
+ PIN_NUMBER(1, 20), PIN_NUMBER(2, 25));
HSPI_PFC_DAT(hspi1_b, HSPI_CLK1_B, HSPI_CS1_B,
HSPI_RX1_B, HSPI_TX1_B);
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
index ab7a35392cd8..a84229cb8cd4 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
@@ -10,7 +10,9 @@
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/sys_soc.h>
+#include "core.h"
#include "sh_pfc.h"
/*
@@ -5691,7 +5693,22 @@ static int r8a7790_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *poc
return 31 - (pin & 0x1f);
}
+static const struct soc_device_attribute r8a7790_tdsel[] = {
+ { .soc_id = "r8a7790", .revision = "ES1.0" },
+ { /* sentinel */ }
+};
+
+static int r8a7790_pinmux_soc_init(struct sh_pfc *pfc)
+{
+ /* Initialize TDSEL on old revisions */
+ if (soc_device_match(r8a7790_tdsel))
+ sh_pfc_write(pfc, 0xe6060088, 0x00155554);
+
+ return 0;
+}
+
static const struct sh_pfc_soc_operations r8a7790_pinmux_ops = {
+ .init = r8a7790_pinmux_soc_init,
.pin_to_pocctrl = r8a7790_pin_to_pocctrl,
};
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
index 2859231aaffc..d8b13d4e9bbf 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -4317,7 +4317,7 @@ static const unsigned int vin1_clk_pins[] = {
static const unsigned int vin1_clk_mux[] = {
VI1_CLK_MARK,
};
-static const union vin_data vin1_b_data_pins = {
+static const union vin_data vin1_data_b_pins = {
.data24 = {
/* B */
RCAR_GP_PIN(3, 0), RCAR_GP_PIN(3, 1),
@@ -4336,7 +4336,7 @@ static const union vin_data vin1_b_data_pins = {
RCAR_GP_PIN(2, 19), RCAR_GP_PIN(2, 20),
},
};
-static const union vin_data vin1_b_data_mux = {
+static const union vin_data vin1_data_b_mux = {
.data24 = {
/* B */
VI1_DATA0_B_MARK, VI1_DATA1_B_MARK,
@@ -4355,7 +4355,7 @@ static const union vin_data vin1_b_data_mux = {
VI1_R6_B_MARK, VI1_R7_B_MARK,
},
};
-static const unsigned int vin1_b_data18_pins[] = {
+static const unsigned int vin1_data18_b_pins[] = {
/* B */
RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9),
RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11),
@@ -4369,7 +4369,7 @@ static const unsigned int vin1_b_data18_pins[] = {
RCAR_GP_PIN(2, 17), RCAR_GP_PIN(2, 18),
RCAR_GP_PIN(2, 19), RCAR_GP_PIN(2, 20),
};
-static const unsigned int vin1_b_data18_mux[] = {
+static const unsigned int vin1_data18_b_mux[] = {
/* B */
VI1_DATA2_B_MARK, VI1_DATA3_B_MARK,
VI1_DATA4_B_MARK, VI1_DATA5_B_MARK,
@@ -4383,30 +4383,30 @@ static const unsigned int vin1_b_data18_mux[] = {
VI1_R4_B_MARK, VI1_R5_B_MARK,
VI1_R6_B_MARK, VI1_R7_B_MARK,
};
-static const unsigned int vin1_b_sync_pins[] = {
+static const unsigned int vin1_sync_b_pins[] = {
RCAR_GP_PIN(3, 17), /* HSYNC */
RCAR_GP_PIN(3, 18), /* VSYNC */
};
-static const unsigned int vin1_b_sync_mux[] = {
+static const unsigned int vin1_sync_b_mux[] = {
VI1_HSYNC_N_B_MARK,
VI1_VSYNC_N_B_MARK,
};
-static const unsigned int vin1_b_field_pins[] = {
+static const unsigned int vin1_field_b_pins[] = {
RCAR_GP_PIN(3, 20),
};
-static const unsigned int vin1_b_field_mux[] = {
+static const unsigned int vin1_field_b_mux[] = {
VI1_FIELD_B_MARK,
};
-static const unsigned int vin1_b_clkenb_pins[] = {
+static const unsigned int vin1_clkenb_b_pins[] = {
RCAR_GP_PIN(3, 19),
};
-static const unsigned int vin1_b_clkenb_mux[] = {
+static const unsigned int vin1_clkenb_b_mux[] = {
VI1_CLKENB_B_MARK,
};
-static const unsigned int vin1_b_clk_pins[] = {
+static const unsigned int vin1_clk_b_pins[] = {
RCAR_GP_PIN(3, 16),
};
-static const unsigned int vin1_b_clk_mux[] = {
+static const unsigned int vin1_clk_b_mux[] = {
VI1_CLK_B_MARK,
};
/* - VIN2 ----------------------------------------------------------------- */
@@ -4784,17 +4784,17 @@ static const struct {
SH_PFC_PIN_GROUP(vin1_field),
SH_PFC_PIN_GROUP(vin1_clkenb),
SH_PFC_PIN_GROUP(vin1_clk),
- VIN_DATA_PIN_GROUP(vin1_b_data, 24),
- VIN_DATA_PIN_GROUP(vin1_b_data, 20),
- SH_PFC_PIN_GROUP(vin1_b_data18),
- VIN_DATA_PIN_GROUP(vin1_b_data, 16),
- VIN_DATA_PIN_GROUP(vin1_b_data, 12),
- VIN_DATA_PIN_GROUP(vin1_b_data, 10),
- VIN_DATA_PIN_GROUP(vin1_b_data, 8),
- SH_PFC_PIN_GROUP(vin1_b_sync),
- SH_PFC_PIN_GROUP(vin1_b_field),
- SH_PFC_PIN_GROUP(vin1_b_clkenb),
- SH_PFC_PIN_GROUP(vin1_b_clk),
+ VIN_DATA_PIN_GROUP(vin1_data, 24, _b),
+ VIN_DATA_PIN_GROUP(vin1_data, 20, _b),
+ SH_PFC_PIN_GROUP(vin1_data18_b),
+ VIN_DATA_PIN_GROUP(vin1_data, 16, _b),
+ VIN_DATA_PIN_GROUP(vin1_data, 12, _b),
+ VIN_DATA_PIN_GROUP(vin1_data, 10, _b),
+ VIN_DATA_PIN_GROUP(vin1_data, 8, _b),
+ SH_PFC_PIN_GROUP(vin1_sync_b),
+ SH_PFC_PIN_GROUP(vin1_field_b),
+ SH_PFC_PIN_GROUP(vin1_clkenb_b),
+ SH_PFC_PIN_GROUP(vin1_clk_b),
SH_PFC_PIN_GROUP(vin2_data8),
SH_PFC_PIN_GROUP(vin2_sync),
SH_PFC_PIN_GROUP(vin2_field),
@@ -5236,7 +5236,7 @@ static const char * const scifb2_groups[] = {
"scifb2_data_b",
"scifb2_clk_b",
"scifb2_ctrl_b",
- "scifb0_data_c",
+ "scifb2_data_c",
"scifb2_clk_c",
"scifb2_data_d",
};
@@ -5335,17 +5335,17 @@ static const char * const vin1_groups[] = {
"vin1_field",
"vin1_clkenb",
"vin1_clk",
- "vin1_b_data24",
- "vin1_b_data20",
- "vin1_b_data18",
- "vin1_b_data16",
- "vin1_b_data12",
- "vin1_b_data10",
- "vin1_b_data8",
- "vin1_b_sync",
- "vin1_b_field",
- "vin1_b_clkenb",
- "vin1_b_clk",
+ "vin1_data24_b",
+ "vin1_data20_b",
+ "vin1_data18_b",
+ "vin1_data16_b",
+ "vin1_data12_b",
+ "vin1_data10_b",
+ "vin1_data8_b",
+ "vin1_sync_b",
+ "vin1_field_b",
+ "vin1_clkenb_b",
+ "vin1_clk_b",
};
static const char * const vin2_groups[] = {
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7792.c b/drivers/pinctrl/sh-pfc/pfc-r8a7792.c
index a623459b234e..d36da5652de6 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7792.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7792.c
@@ -1913,6 +1913,7 @@ static const char * const vin1_groups[] = {
"vin1_data8",
"vin1_data24_b",
"vin1_data20_b",
+ "vin1_data18_b",
"vin1_data16_b",
"vin1_sync",
"vin1_field",
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
index fcf1339c4058..958a5f714c93 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
@@ -8,6 +8,7 @@
*/
#include <linux/kernel.h>
+#include <linux/sys_soc.h>
#include "core.h"
#include "sh_pfc.h"
@@ -5560,7 +5561,22 @@ static int r8a7794_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *poc
return -EINVAL;
}
+static const struct soc_device_attribute r8a7794_tdsel[] = {
+ { .soc_id = "r8a7794", .revision = "ES1.0" },
+ { /* sentinel */ }
+};
+
+static int r8a7794_pinmux_soc_init(struct sh_pfc *pfc)
+{
+ /* Initialize TDSEL on old revisions */
+ if (soc_device_match(r8a7794_tdsel))
+ sh_pfc_write(pfc, 0xe6060068, 0x55555500);
+
+ return 0;
+}
+
static const struct sh_pfc_soc_operations r8a7794_pinmux_ops = {
+ .init = r8a7794_pinmux_soc_init,
.pin_to_pocctrl = r8a7794_pin_to_pocctrl,
};
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
index 01105bb83598..db9add1405c5 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
@@ -4098,67 +4098,29 @@ static const unsigned int vin4_clk_mux[] = {
};
/* - VIN5 ------------------------------------------------------------------- */
-static const unsigned int vin5_data8_pins[] = {
- RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1),
- RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
- RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5),
- RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7),
-};
-static const unsigned int vin5_data8_mux[] = {
- VI5_DATA0_MARK, VI5_DATA1_MARK,
- VI5_DATA2_MARK, VI5_DATA3_MARK,
- VI5_DATA4_MARK, VI5_DATA5_MARK,
- VI5_DATA6_MARK, VI5_DATA7_MARK,
-};
-static const unsigned int vin5_data10_pins[] = {
- RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1),
- RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
- RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5),
- RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7),
- RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13),
-};
-static const unsigned int vin5_data10_mux[] = {
- VI5_DATA0_MARK, VI5_DATA1_MARK,
- VI5_DATA2_MARK, VI5_DATA3_MARK,
- VI5_DATA4_MARK, VI5_DATA5_MARK,
- VI5_DATA6_MARK, VI5_DATA7_MARK,
- VI5_DATA8_MARK, VI5_DATA9_MARK,
-};
-static const unsigned int vin5_data12_pins[] = {
- RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1),
- RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
- RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5),
- RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7),
- RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13),
- RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 15),
-};
-static const unsigned int vin5_data12_mux[] = {
- VI5_DATA0_MARK, VI5_DATA1_MARK,
- VI5_DATA2_MARK, VI5_DATA3_MARK,
- VI5_DATA4_MARK, VI5_DATA5_MARK,
- VI5_DATA6_MARK, VI5_DATA7_MARK,
- VI5_DATA8_MARK, VI5_DATA9_MARK,
- VI5_DATA10_MARK, VI5_DATA11_MARK,
-};
-static const unsigned int vin5_data16_pins[] = {
- RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1),
- RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
- RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5),
- RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7),
- RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13),
- RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 15),
- RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
- RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+static const union vin_data16 vin5_data_pins = {
+ .data16 = {
+ RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1),
+ RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
+ RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5),
+ RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7),
+ RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13),
+ RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 15),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+ },
};
-static const unsigned int vin5_data16_mux[] = {
- VI5_DATA0_MARK, VI5_DATA1_MARK,
- VI5_DATA2_MARK, VI5_DATA3_MARK,
- VI5_DATA4_MARK, VI5_DATA5_MARK,
- VI5_DATA6_MARK, VI5_DATA7_MARK,
- VI5_DATA8_MARK, VI5_DATA9_MARK,
- VI5_DATA10_MARK, VI5_DATA11_MARK,
- VI5_DATA12_MARK, VI5_DATA13_MARK,
- VI5_DATA14_MARK, VI5_DATA15_MARK,
+static const union vin_data16 vin5_data_mux = {
+ .data16 = {
+ VI5_DATA0_MARK, VI5_DATA1_MARK,
+ VI5_DATA2_MARK, VI5_DATA3_MARK,
+ VI5_DATA4_MARK, VI5_DATA5_MARK,
+ VI5_DATA6_MARK, VI5_DATA7_MARK,
+ VI5_DATA8_MARK, VI5_DATA9_MARK,
+ VI5_DATA10_MARK, VI5_DATA11_MARK,
+ VI5_DATA12_MARK, VI5_DATA13_MARK,
+ VI5_DATA14_MARK, VI5_DATA15_MARK,
+ },
};
static const unsigned int vin5_sync_pins[] = {
/* HSYNC#, VSYNC# */
@@ -4530,10 +4492,10 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(vin4_field),
SH_PFC_PIN_GROUP(vin4_clkenb),
SH_PFC_PIN_GROUP(vin4_clk),
- SH_PFC_PIN_GROUP(vin5_data8),
- SH_PFC_PIN_GROUP(vin5_data10),
- SH_PFC_PIN_GROUP(vin5_data12),
- SH_PFC_PIN_GROUP(vin5_data16),
+ VIN_DATA_PIN_GROUP(vin5_data, 8),
+ VIN_DATA_PIN_GROUP(vin5_data, 10),
+ VIN_DATA_PIN_GROUP(vin5_data, 12),
+ VIN_DATA_PIN_GROUP(vin5_data, 16),
SH_PFC_PIN_GROUP(vin5_sync),
SH_PFC_PIN_GROUP(vin5_field),
SH_PFC_PIN_GROUP(vin5_clkenb),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
index 4b98ad8d93d9..72348a4f2ece 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
@@ -4070,67 +4070,29 @@ static const unsigned int vin4_clk_mux[] = {
};
/* - VIN5 ------------------------------------------------------------------- */
-static const unsigned int vin5_data8_pins[] = {
- RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1),
- RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
- RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5),
- RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7),
-};
-static const unsigned int vin5_data8_mux[] = {
- VI5_DATA0_MARK, VI5_DATA1_MARK,
- VI5_DATA2_MARK, VI5_DATA3_MARK,
- VI5_DATA4_MARK, VI5_DATA5_MARK,
- VI5_DATA6_MARK, VI5_DATA7_MARK,
-};
-static const unsigned int vin5_data10_pins[] = {
- RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1),
- RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
- RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5),
- RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7),
- RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13),
-};
-static const unsigned int vin5_data10_mux[] = {
- VI5_DATA0_MARK, VI5_DATA1_MARK,
- VI5_DATA2_MARK, VI5_DATA3_MARK,
- VI5_DATA4_MARK, VI5_DATA5_MARK,
- VI5_DATA6_MARK, VI5_DATA7_MARK,
- VI5_DATA8_MARK, VI5_DATA9_MARK,
-};
-static const unsigned int vin5_data12_pins[] = {
- RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1),
- RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
- RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5),
- RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7),
- RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13),
- RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 15),
-};
-static const unsigned int vin5_data12_mux[] = {
- VI5_DATA0_MARK, VI5_DATA1_MARK,
- VI5_DATA2_MARK, VI5_DATA3_MARK,
- VI5_DATA4_MARK, VI5_DATA5_MARK,
- VI5_DATA6_MARK, VI5_DATA7_MARK,
- VI5_DATA8_MARK, VI5_DATA9_MARK,
- VI5_DATA10_MARK, VI5_DATA11_MARK,
-};
-static const unsigned int vin5_data16_pins[] = {
- RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1),
- RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
- RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5),
- RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7),
- RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13),
- RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 15),
- RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
- RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+static const union vin_data16 vin5_data_pins = {
+ .data16 = {
+ RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1),
+ RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
+ RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5),
+ RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7),
+ RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13),
+ RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 15),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+ },
};
-static const unsigned int vin5_data16_mux[] = {
- VI5_DATA0_MARK, VI5_DATA1_MARK,
- VI5_DATA2_MARK, VI5_DATA3_MARK,
- VI5_DATA4_MARK, VI5_DATA5_MARK,
- VI5_DATA6_MARK, VI5_DATA7_MARK,
- VI5_DATA8_MARK, VI5_DATA9_MARK,
- VI5_DATA10_MARK, VI5_DATA11_MARK,
- VI5_DATA12_MARK, VI5_DATA13_MARK,
- VI5_DATA14_MARK, VI5_DATA15_MARK,
+static const union vin_data16 vin5_data_mux = {
+ .data16 = {
+ VI5_DATA0_MARK, VI5_DATA1_MARK,
+ VI5_DATA2_MARK, VI5_DATA3_MARK,
+ VI5_DATA4_MARK, VI5_DATA5_MARK,
+ VI5_DATA6_MARK, VI5_DATA7_MARK,
+ VI5_DATA8_MARK, VI5_DATA9_MARK,
+ VI5_DATA10_MARK, VI5_DATA11_MARK,
+ VI5_DATA12_MARK, VI5_DATA13_MARK,
+ VI5_DATA14_MARK, VI5_DATA15_MARK,
+ },
};
static const unsigned int vin5_sync_pins[] = {
/* HSYNC#, VSYNC# */
@@ -4468,10 +4430,10 @@ static const struct {
SH_PFC_PIN_GROUP(vin4_field),
SH_PFC_PIN_GROUP(vin4_clkenb),
SH_PFC_PIN_GROUP(vin4_clk),
- SH_PFC_PIN_GROUP(vin5_data8),
- SH_PFC_PIN_GROUP(vin5_data10),
- SH_PFC_PIN_GROUP(vin5_data12),
- SH_PFC_PIN_GROUP(vin5_data16),
+ VIN_DATA_PIN_GROUP(vin5_data, 8),
+ VIN_DATA_PIN_GROUP(vin5_data, 10),
+ VIN_DATA_PIN_GROUP(vin5_data, 12),
+ VIN_DATA_PIN_GROUP(vin5_data, 16),
SH_PFC_PIN_GROUP(vin5_sync),
SH_PFC_PIN_GROUP(vin5_field),
SH_PFC_PIN_GROUP(vin5_clkenb),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
index a7c4882de09e..14c4b671cddf 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
@@ -554,7 +554,7 @@ MOD_SEL0_4_3 MOD_SEL1_4 \
FM(AVB_RX_CTL) FM(AVB_RXC) FM(AVB_RD0) FM(AVB_RD1) FM(AVB_RD2) FM(AVB_RD3) \
FM(AVB_TXCREFCLK) FM(AVB_MDIO) \
FM(PRESETOUT) \
- FM(DU_DOTCLKIN0) FM(DU_DOTCLKIN1) FM(DU_DOTCLKIN2) \
+ FM(DU_DOTCLKIN0) FM(DU_DOTCLKIN1) FM(DU_DOTCLKIN3) \
FM(TMS) FM(TDO) FM(ASEBRK) FM(MLB_REF) FM(TDI) FM(TCK) FM(TRST) FM(EXTALR)
enum {
@@ -1566,7 +1566,7 @@ static const struct sh_pfc_pin pinmux_pins[] = {
SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('E'), 5, QSPI1_MISO_IO1, CFG_FLAGS),
SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('P'), 7, DU_DOTCLKIN0, CFG_FLAGS),
SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('P'), 8, DU_DOTCLKIN1, CFG_FLAGS),
- SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('R'), 8, DU_DOTCLKIN2, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('R'), 8, DU_DOTCLKIN3, CFG_FLAGS),
SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('R'), 26, TRST#, SH_PFC_PIN_CFG_PULL_UP | SH_PFC_PIN_CFG_PULL_DOWN),
SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('R'), 29, TDI, SH_PFC_PIN_CFG_PULL_UP | SH_PFC_PIN_CFG_PULL_DOWN),
SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('R'), 30, TMS, CFG_FLAGS),
@@ -1850,6 +1850,280 @@ static const unsigned int canfd1_data_mux[] = {
CANFD1_TX_MARK, CANFD1_RX_MARK,
};
+/* - DRIF0 --------------------------------------------------------------- */
+static const unsigned int drif0_ctrl_a_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+
+static const unsigned int drif0_ctrl_a_mux[] = {
+ RIF0_CLK_A_MARK, RIF0_SYNC_A_MARK,
+};
+
+static const unsigned int drif0_data0_a_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 10),
+};
+
+static const unsigned int drif0_data0_a_mux[] = {
+ RIF0_D0_A_MARK,
+};
+
+static const unsigned int drif0_data1_a_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 7),
+};
+
+static const unsigned int drif0_data1_a_mux[] = {
+ RIF0_D1_A_MARK,
+};
+
+static const unsigned int drif0_ctrl_b_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 4),
+};
+
+static const unsigned int drif0_ctrl_b_mux[] = {
+ RIF0_CLK_B_MARK, RIF0_SYNC_B_MARK,
+};
+
+static const unsigned int drif0_data0_b_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(5, 1),
+};
+
+static const unsigned int drif0_data0_b_mux[] = {
+ RIF0_D0_B_MARK,
+};
+
+static const unsigned int drif0_data1_b_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(5, 2),
+};
+
+static const unsigned int drif0_data1_b_mux[] = {
+ RIF0_D1_B_MARK,
+};
+
+static const unsigned int drif0_ctrl_c_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(5, 12), RCAR_GP_PIN(5, 15),
+};
+
+static const unsigned int drif0_ctrl_c_mux[] = {
+ RIF0_CLK_C_MARK, RIF0_SYNC_C_MARK,
+};
+
+static const unsigned int drif0_data0_c_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(5, 13),
+};
+
+static const unsigned int drif0_data0_c_mux[] = {
+ RIF0_D0_C_MARK,
+};
+
+static const unsigned int drif0_data1_c_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(5, 14),
+};
+
+static const unsigned int drif0_data1_c_mux[] = {
+ RIF0_D1_C_MARK,
+};
+
+/* - DRIF1 --------------------------------------------------------------- */
+static const unsigned int drif1_ctrl_a_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
+};
+
+static const unsigned int drif1_ctrl_a_mux[] = {
+ RIF1_CLK_A_MARK, RIF1_SYNC_A_MARK,
+};
+
+static const unsigned int drif1_data0_a_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 19),
+};
+
+static const unsigned int drif1_data0_a_mux[] = {
+ RIF1_D0_A_MARK,
+};
+
+static const unsigned int drif1_data1_a_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 20),
+};
+
+static const unsigned int drif1_data1_a_mux[] = {
+ RIF1_D1_A_MARK,
+};
+
+static const unsigned int drif1_ctrl_b_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(5, 9), RCAR_GP_PIN(5, 3),
+};
+
+static const unsigned int drif1_ctrl_b_mux[] = {
+ RIF1_CLK_B_MARK, RIF1_SYNC_B_MARK,
+};
+
+static const unsigned int drif1_data0_b_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(5, 7),
+};
+
+static const unsigned int drif1_data0_b_mux[] = {
+ RIF1_D0_B_MARK,
+};
+
+static const unsigned int drif1_data1_b_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(5, 8),
+};
+
+static const unsigned int drif1_data1_b_mux[] = {
+ RIF1_D1_B_MARK,
+};
+
+static const unsigned int drif1_ctrl_c_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 11),
+};
+
+static const unsigned int drif1_ctrl_c_mux[] = {
+ RIF1_CLK_C_MARK, RIF1_SYNC_C_MARK,
+};
+
+static const unsigned int drif1_data0_c_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(5, 6),
+};
+
+static const unsigned int drif1_data0_c_mux[] = {
+ RIF1_D0_C_MARK,
+};
+
+static const unsigned int drif1_data1_c_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(5, 10),
+};
+
+static const unsigned int drif1_data1_c_mux[] = {
+ RIF1_D1_C_MARK,
+};
+
+/* - DRIF2 --------------------------------------------------------------- */
+static const unsigned int drif2_ctrl_a_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+
+static const unsigned int drif2_ctrl_a_mux[] = {
+ RIF2_CLK_A_MARK, RIF2_SYNC_A_MARK,
+};
+
+static const unsigned int drif2_data0_a_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 7),
+};
+
+static const unsigned int drif2_data0_a_mux[] = {
+ RIF2_D0_A_MARK,
+};
+
+static const unsigned int drif2_data1_a_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 10),
+};
+
+static const unsigned int drif2_data1_a_mux[] = {
+ RIF2_D1_A_MARK,
+};
+
+static const unsigned int drif2_ctrl_b_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 26), RCAR_GP_PIN(6, 27),
+};
+
+static const unsigned int drif2_ctrl_b_mux[] = {
+ RIF2_CLK_B_MARK, RIF2_SYNC_B_MARK,
+};
+
+static const unsigned int drif2_data0_b_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 30),
+};
+
+static const unsigned int drif2_data0_b_mux[] = {
+ RIF2_D0_B_MARK,
+};
+
+static const unsigned int drif2_data1_b_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 31),
+};
+
+static const unsigned int drif2_data1_b_mux[] = {
+ RIF2_D1_B_MARK,
+};
+
+/* - DRIF3 --------------------------------------------------------------- */
+static const unsigned int drif3_ctrl_a_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
+};
+
+static const unsigned int drif3_ctrl_a_mux[] = {
+ RIF3_CLK_A_MARK, RIF3_SYNC_A_MARK,
+};
+
+static const unsigned int drif3_data0_a_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 19),
+};
+
+static const unsigned int drif3_data0_a_mux[] = {
+ RIF3_D0_A_MARK,
+};
+
+static const unsigned int drif3_data1_a_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 20),
+};
+
+static const unsigned int drif3_data1_a_mux[] = {
+ RIF3_D1_A_MARK,
+};
+
+static const unsigned int drif3_ctrl_b_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 24), RCAR_GP_PIN(6, 25),
+};
+
+static const unsigned int drif3_ctrl_b_mux[] = {
+ RIF3_CLK_B_MARK, RIF3_SYNC_B_MARK,
+};
+
+static const unsigned int drif3_data0_b_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 28),
+};
+
+static const unsigned int drif3_data0_b_mux[] = {
+ RIF3_D0_B_MARK,
+};
+
+static const unsigned int drif3_data1_b_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 29),
+};
+
+static const unsigned int drif3_data1_b_mux[] = {
+ RIF3_D1_B_MARK,
+};
+
/* - DU --------------------------------------------------------------------- */
static const unsigned int du_rgb666_pins[] = {
/* R[7:2], G[7:2], B[7:2] */
@@ -3760,6 +4034,42 @@ static const unsigned int ssi9_ctrl_b_mux[] = {
SSI_SCK9_B_MARK, SSI_WS9_B_MARK,
};
+/* - TMU -------------------------------------------------------------------- */
+static const unsigned int tmu_tclk1_a_pins[] = {
+ /* TCLK */
+ RCAR_GP_PIN(6, 23),
+};
+
+static const unsigned int tmu_tclk1_a_mux[] = {
+ TCLK1_A_MARK,
+};
+
+static const unsigned int tmu_tclk1_b_pins[] = {
+ /* TCLK */
+ RCAR_GP_PIN(5, 19),
+};
+
+static const unsigned int tmu_tclk1_b_mux[] = {
+ TCLK1_B_MARK,
+};
+
+static const unsigned int tmu_tclk2_a_pins[] = {
+ /* TCLK */
+ RCAR_GP_PIN(6, 19),
+};
+
+static const unsigned int tmu_tclk2_a_mux[] = {
+ TCLK2_A_MARK,
+};
+
+static const unsigned int tmu_tclk2_b_pins[] = {
+ /* TCLK */
+ RCAR_GP_PIN(6, 28),
+};
+
+static const unsigned int tmu_tclk2_b_mux[] = {
+ TCLK2_B_MARK,
+};
/* - USB0 ------------------------------------------------------------------- */
static const unsigned int usb0_pins[] = {
@@ -4037,6 +4347,36 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(canfd0_data_a),
SH_PFC_PIN_GROUP(canfd0_data_b),
SH_PFC_PIN_GROUP(canfd1_data),
+ SH_PFC_PIN_GROUP(drif0_ctrl_a),
+ SH_PFC_PIN_GROUP(drif0_data0_a),
+ SH_PFC_PIN_GROUP(drif0_data1_a),
+ SH_PFC_PIN_GROUP(drif0_ctrl_b),
+ SH_PFC_PIN_GROUP(drif0_data0_b),
+ SH_PFC_PIN_GROUP(drif0_data1_b),
+ SH_PFC_PIN_GROUP(drif0_ctrl_c),
+ SH_PFC_PIN_GROUP(drif0_data0_c),
+ SH_PFC_PIN_GROUP(drif0_data1_c),
+ SH_PFC_PIN_GROUP(drif1_ctrl_a),
+ SH_PFC_PIN_GROUP(drif1_data0_a),
+ SH_PFC_PIN_GROUP(drif1_data1_a),
+ SH_PFC_PIN_GROUP(drif1_ctrl_b),
+ SH_PFC_PIN_GROUP(drif1_data0_b),
+ SH_PFC_PIN_GROUP(drif1_data1_b),
+ SH_PFC_PIN_GROUP(drif1_ctrl_c),
+ SH_PFC_PIN_GROUP(drif1_data0_c),
+ SH_PFC_PIN_GROUP(drif1_data1_c),
+ SH_PFC_PIN_GROUP(drif2_ctrl_a),
+ SH_PFC_PIN_GROUP(drif2_data0_a),
+ SH_PFC_PIN_GROUP(drif2_data1_a),
+ SH_PFC_PIN_GROUP(drif2_ctrl_b),
+ SH_PFC_PIN_GROUP(drif2_data0_b),
+ SH_PFC_PIN_GROUP(drif2_data1_b),
+ SH_PFC_PIN_GROUP(drif3_ctrl_a),
+ SH_PFC_PIN_GROUP(drif3_data0_a),
+ SH_PFC_PIN_GROUP(drif3_data1_a),
+ SH_PFC_PIN_GROUP(drif3_ctrl_b),
+ SH_PFC_PIN_GROUP(drif3_data0_b),
+ SH_PFC_PIN_GROUP(drif3_data1_b),
SH_PFC_PIN_GROUP(du_rgb666),
SH_PFC_PIN_GROUP(du_rgb888),
SH_PFC_PIN_GROUP(du_clk_out_0),
@@ -4280,6 +4620,10 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(ssi9_data_b),
SH_PFC_PIN_GROUP(ssi9_ctrl_a),
SH_PFC_PIN_GROUP(ssi9_ctrl_b),
+ SH_PFC_PIN_GROUP(tmu_tclk1_a),
+ SH_PFC_PIN_GROUP(tmu_tclk1_b),
+ SH_PFC_PIN_GROUP(tmu_tclk2_a),
+ SH_PFC_PIN_GROUP(tmu_tclk2_b),
SH_PFC_PIN_GROUP(usb0),
SH_PFC_PIN_GROUP(usb1),
SH_PFC_PIN_GROUP(usb30),
@@ -4367,6 +4711,48 @@ static const char * const canfd1_groups[] = {
"canfd1_data",
};
+static const char * const drif0_groups[] = {
+ "drif0_ctrl_a",
+ "drif0_data0_a",
+ "drif0_data1_a",
+ "drif0_ctrl_b",
+ "drif0_data0_b",
+ "drif0_data1_b",
+ "drif0_ctrl_c",
+ "drif0_data0_c",
+ "drif0_data1_c",
+};
+
+static const char * const drif1_groups[] = {
+ "drif1_ctrl_a",
+ "drif1_data0_a",
+ "drif1_data1_a",
+ "drif1_ctrl_b",
+ "drif1_data0_b",
+ "drif1_data1_b",
+ "drif1_ctrl_c",
+ "drif1_data0_c",
+ "drif1_data1_c",
+};
+
+static const char * const drif2_groups[] = {
+ "drif2_ctrl_a",
+ "drif2_data0_a",
+ "drif2_data1_a",
+ "drif2_ctrl_b",
+ "drif2_data0_b",
+ "drif2_data1_b",
+};
+
+static const char * const drif3_groups[] = {
+ "drif3_ctrl_a",
+ "drif3_data0_a",
+ "drif3_data1_a",
+ "drif3_ctrl_b",
+ "drif3_data0_b",
+ "drif3_data1_b",
+};
+
static const char * const du_groups[] = {
"du_rgb666",
"du_rgb888",
@@ -4711,6 +5097,13 @@ static const char * const ssi_groups[] = {
"ssi9_ctrl_b",
};
+static const char * const tmu_groups[] = {
+ "tmu_tclk1_a",
+ "tmu_tclk1_b",
+ "tmu_tclk2_a",
+ "tmu_tclk2_b",
+};
+
static const char * const usb0_groups[] = {
"usb0",
};
@@ -4763,6 +5156,10 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(can_clk),
SH_PFC_FUNCTION(canfd0),
SH_PFC_FUNCTION(canfd1),
+ SH_PFC_FUNCTION(drif0),
+ SH_PFC_FUNCTION(drif1),
+ SH_PFC_FUNCTION(drif2),
+ SH_PFC_FUNCTION(drif3),
SH_PFC_FUNCTION(du),
SH_PFC_FUNCTION(hscif0),
SH_PFC_FUNCTION(hscif1),
@@ -4797,6 +5194,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(sdhi2),
SH_PFC_FUNCTION(sdhi3),
SH_PFC_FUNCTION(ssi),
+ SH_PFC_FUNCTION(tmu),
SH_PFC_FUNCTION(usb0),
SH_PFC_FUNCTION(usb1),
SH_PFC_FUNCTION(usb30),
@@ -5740,7 +6138,7 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
[31] = PIN_A_NUMBER('P', 8), /* DU_DOTCLKIN1 */
} },
{ PINMUX_BIAS_REG("PUEN3", 0xe606040c, "PUD3", 0xe606044c) {
- [ 0] = PIN_A_NUMBER('R', 8), /* DU_DOTCLKIN2 */
+ [ 0] = PIN_A_NUMBER('R', 8), /* DU_DOTCLKIN3 */
[ 1] = PIN_NONE,
[ 2] = PIN_A_NUMBER('D', 38), /* FSCLKST */
[ 3] = PIN_A_NUMBER('D', 39), /* EXTALR*/
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77970.c b/drivers/pinctrl/sh-pfc/pfc-r8a77970.c
index 4a7ab84b366b..c5e67ba29f7c 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77970.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77970.c
@@ -1578,47 +1578,25 @@ static const unsigned int tmu_tclk2_b_mux[] = {
};
/* - VIN0 ------------------------------------------------------------------- */
-static const unsigned int vin0_data8_pins[] = {
- RCAR_GP_PIN(2, 4), RCAR_GP_PIN(2, 5),
- RCAR_GP_PIN(2, 6), RCAR_GP_PIN(2, 7),
- RCAR_GP_PIN(2, 8), RCAR_GP_PIN(2, 9),
- RCAR_GP_PIN(2, 10), RCAR_GP_PIN(2, 11),
-};
-static const unsigned int vin0_data8_mux[] = {
- VI0_DATA0_MARK, VI0_DATA1_MARK,
- VI0_DATA2_MARK, VI0_DATA3_MARK,
- VI0_DATA4_MARK, VI0_DATA5_MARK,
- VI0_DATA6_MARK, VI0_DATA7_MARK,
+static const union vin_data12 vin0_data_pins = {
+ .data12 = {
+ RCAR_GP_PIN(2, 4), RCAR_GP_PIN(2, 5),
+ RCAR_GP_PIN(2, 6), RCAR_GP_PIN(2, 7),
+ RCAR_GP_PIN(2, 8), RCAR_GP_PIN(2, 9),
+ RCAR_GP_PIN(2, 10), RCAR_GP_PIN(2, 11),
+ RCAR_GP_PIN(2, 12), RCAR_GP_PIN(2, 13),
+ RCAR_GP_PIN(2, 14), RCAR_GP_PIN(2, 15),
+ },
};
-static const unsigned int vin0_data10_pins[] = {
- RCAR_GP_PIN(2, 4), RCAR_GP_PIN(2, 5),
- RCAR_GP_PIN(2, 6), RCAR_GP_PIN(2, 7),
- RCAR_GP_PIN(2, 8), RCAR_GP_PIN(2, 9),
- RCAR_GP_PIN(2, 10), RCAR_GP_PIN(2, 11),
- RCAR_GP_PIN(2, 12), RCAR_GP_PIN(2, 13),
-};
-static const unsigned int vin0_data10_mux[] = {
- VI0_DATA0_MARK, VI0_DATA1_MARK,
- VI0_DATA2_MARK, VI0_DATA3_MARK,
- VI0_DATA4_MARK, VI0_DATA5_MARK,
- VI0_DATA6_MARK, VI0_DATA7_MARK,
- VI0_DATA8_MARK, VI0_DATA9_MARK,
-};
-static const unsigned int vin0_data12_pins[] = {
- RCAR_GP_PIN(2, 4), RCAR_GP_PIN(2, 5),
- RCAR_GP_PIN(2, 6), RCAR_GP_PIN(2, 7),
- RCAR_GP_PIN(2, 8), RCAR_GP_PIN(2, 9),
- RCAR_GP_PIN(2, 10), RCAR_GP_PIN(2, 11),
- RCAR_GP_PIN(2, 12), RCAR_GP_PIN(2, 13),
- RCAR_GP_PIN(2, 14), RCAR_GP_PIN(2, 15),
-};
-static const unsigned int vin0_data12_mux[] = {
- VI0_DATA0_MARK, VI0_DATA1_MARK,
- VI0_DATA2_MARK, VI0_DATA3_MARK,
- VI0_DATA4_MARK, VI0_DATA5_MARK,
- VI0_DATA6_MARK, VI0_DATA7_MARK,
- VI0_DATA8_MARK, VI0_DATA9_MARK,
- VI0_DATA10_MARK, VI0_DATA11_MARK,
+static const union vin_data12 vin0_data_mux = {
+ .data12 = {
+ VI0_DATA0_MARK, VI0_DATA1_MARK,
+ VI0_DATA2_MARK, VI0_DATA3_MARK,
+ VI0_DATA4_MARK, VI0_DATA5_MARK,
+ VI0_DATA6_MARK, VI0_DATA7_MARK,
+ VI0_DATA8_MARK, VI0_DATA9_MARK,
+ VI0_DATA10_MARK, VI0_DATA11_MARK,
+ },
};
static const unsigned int vin0_sync_pins[] = {
/* HSYNC#, VSYNC# */
@@ -1650,47 +1628,25 @@ static const unsigned int vin0_clk_mux[] = {
};
/* - VIN1 ------------------------------------------------------------------- */
-static const unsigned int vin1_data8_pins[] = {
- RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5),
- RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7),
- RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9),
- RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11),
-};
-static const unsigned int vin1_data8_mux[] = {
- VI1_DATA0_MARK, VI1_DATA1_MARK,
- VI1_DATA2_MARK, VI1_DATA3_MARK,
- VI1_DATA4_MARK, VI1_DATA5_MARK,
- VI1_DATA6_MARK, VI1_DATA7_MARK,
-};
-static const unsigned int vin1_data10_pins[] = {
- RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5),
- RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7),
- RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9),
- RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11),
- RCAR_GP_PIN(3, 12), RCAR_GP_PIN(3, 13),
-};
-static const unsigned int vin1_data10_mux[] = {
- VI1_DATA0_MARK, VI1_DATA1_MARK,
- VI1_DATA2_MARK, VI1_DATA3_MARK,
- VI1_DATA4_MARK, VI1_DATA5_MARK,
- VI1_DATA6_MARK, VI1_DATA7_MARK,
- VI1_DATA8_MARK, VI1_DATA9_MARK,
-};
-static const unsigned int vin1_data12_pins[] = {
- RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5),
- RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7),
- RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9),
- RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11),
- RCAR_GP_PIN(3, 12), RCAR_GP_PIN(3, 13),
- RCAR_GP_PIN(3, 14), RCAR_GP_PIN(3, 15),
+static const union vin_data12 vin1_data_pins = {
+ .data12 = {
+ RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5),
+ RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7),
+ RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9),
+ RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11),
+ RCAR_GP_PIN(3, 12), RCAR_GP_PIN(3, 13),
+ RCAR_GP_PIN(3, 14), RCAR_GP_PIN(3, 15),
+ },
};
-static const unsigned int vin1_data12_mux[] = {
- VI1_DATA0_MARK, VI1_DATA1_MARK,
- VI1_DATA2_MARK, VI1_DATA3_MARK,
- VI1_DATA4_MARK, VI1_DATA5_MARK,
- VI1_DATA6_MARK, VI1_DATA7_MARK,
- VI1_DATA8_MARK, VI1_DATA9_MARK,
- VI1_DATA10_MARK, VI1_DATA11_MARK,
+static const union vin_data12 vin1_data_mux = {
+ .data12 = {
+ VI1_DATA0_MARK, VI1_DATA1_MARK,
+ VI1_DATA2_MARK, VI1_DATA3_MARK,
+ VI1_DATA4_MARK, VI1_DATA5_MARK,
+ VI1_DATA6_MARK, VI1_DATA7_MARK,
+ VI1_DATA8_MARK, VI1_DATA9_MARK,
+ VI1_DATA10_MARK, VI1_DATA11_MARK,
+ },
};
static const unsigned int vin1_sync_pins[] = {
/* HSYNC#, VSYNC# */
@@ -1831,16 +1787,16 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(tmu_tclk1_b),
SH_PFC_PIN_GROUP(tmu_tclk2_a),
SH_PFC_PIN_GROUP(tmu_tclk2_b),
- SH_PFC_PIN_GROUP(vin0_data8),
- SH_PFC_PIN_GROUP(vin0_data10),
- SH_PFC_PIN_GROUP(vin0_data12),
+ VIN_DATA_PIN_GROUP(vin0_data, 8),
+ VIN_DATA_PIN_GROUP(vin0_data, 10),
+ VIN_DATA_PIN_GROUP(vin0_data, 12),
SH_PFC_PIN_GROUP(vin0_sync),
SH_PFC_PIN_GROUP(vin0_field),
SH_PFC_PIN_GROUP(vin0_clkenb),
SH_PFC_PIN_GROUP(vin0_clk),
- SH_PFC_PIN_GROUP(vin1_data8),
- SH_PFC_PIN_GROUP(vin1_data10),
- SH_PFC_PIN_GROUP(vin1_data12),
+ VIN_DATA_PIN_GROUP(vin1_data, 8),
+ VIN_DATA_PIN_GROUP(vin1_data, 10),
+ VIN_DATA_PIN_GROUP(vin1_data, 12),
SH_PFC_PIN_GROUP(vin1_sync),
SH_PFC_PIN_GROUP(vin1_field),
SH_PFC_PIN_GROUP(vin1_clkenb),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77980.c b/drivers/pinctrl/sh-pfc/pfc-r8a77980.c
index 8bef24502f0c..b807b67ae143 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77980.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77980.c
@@ -1970,47 +1970,25 @@ static const unsigned int vin0_clk_mux[] = {
};
/* - VIN1 ------------------------------------------------------------------- */
-static const unsigned int vin1_data8_pins[] = {
- RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5),
- RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7),
- RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9),
- RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11),
-};
-static const unsigned int vin1_data8_mux[] = {
- VI1_DATA0_MARK, VI1_DATA1_MARK,
- VI1_DATA2_MARK, VI1_DATA3_MARK,
- VI1_DATA4_MARK, VI1_DATA5_MARK,
- VI1_DATA6_MARK, VI1_DATA7_MARK,
-};
-static const unsigned int vin1_data10_pins[] = {
- RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5),
- RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7),
- RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9),
- RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11),
- RCAR_GP_PIN(3, 12), RCAR_GP_PIN(3, 13),
-};
-static const unsigned int vin1_data10_mux[] = {
- VI1_DATA0_MARK, VI1_DATA1_MARK,
- VI1_DATA2_MARK, VI1_DATA3_MARK,
- VI1_DATA4_MARK, VI1_DATA5_MARK,
- VI1_DATA6_MARK, VI1_DATA7_MARK,
- VI1_DATA8_MARK, VI1_DATA9_MARK,
-};
-static const unsigned int vin1_data12_pins[] = {
- RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5),
- RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7),
- RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9),
- RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11),
- RCAR_GP_PIN(3, 12), RCAR_GP_PIN(3, 13),
- RCAR_GP_PIN(3, 14), RCAR_GP_PIN(3, 15),
+static const union vin_data12 vin1_data_pins = {
+ .data12 = {
+ RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5),
+ RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7),
+ RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9),
+ RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11),
+ RCAR_GP_PIN(3, 12), RCAR_GP_PIN(3, 13),
+ RCAR_GP_PIN(3, 14), RCAR_GP_PIN(3, 15),
+ },
};
-static const unsigned int vin1_data12_mux[] = {
- VI1_DATA0_MARK, VI1_DATA1_MARK,
- VI1_DATA2_MARK, VI1_DATA3_MARK,
- VI1_DATA4_MARK, VI1_DATA5_MARK,
- VI1_DATA6_MARK, VI1_DATA7_MARK,
- VI1_DATA8_MARK, VI1_DATA9_MARK,
- VI1_DATA10_MARK, VI1_DATA11_MARK,
+static const union vin_data12 vin1_data_mux = {
+ .data12 = {
+ VI1_DATA0_MARK, VI1_DATA1_MARK,
+ VI1_DATA2_MARK, VI1_DATA3_MARK,
+ VI1_DATA4_MARK, VI1_DATA5_MARK,
+ VI1_DATA6_MARK, VI1_DATA7_MARK,
+ VI1_DATA8_MARK, VI1_DATA9_MARK,
+ VI1_DATA10_MARK, VI1_DATA11_MARK,
+ },
};
static const unsigned int vin1_sync_pins[] = {
/* VI1_VSYNC#, VI1_HSYNC# */
@@ -2182,9 +2160,9 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(vin0_field),
SH_PFC_PIN_GROUP(vin0_clkenb),
SH_PFC_PIN_GROUP(vin0_clk),
- SH_PFC_PIN_GROUP(vin1_data8),
- SH_PFC_PIN_GROUP(vin1_data10),
- SH_PFC_PIN_GROUP(vin1_data12),
+ VIN_DATA_PIN_GROUP(vin1_data, 8),
+ VIN_DATA_PIN_GROUP(vin1_data, 10),
+ VIN_DATA_PIN_GROUP(vin1_data, 12),
SH_PFC_PIN_GROUP(vin1_sync),
SH_PFC_PIN_GROUP(vin1_field),
SH_PFC_PIN_GROUP(vin1_clkenb),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
index e40908dc37e0..151640c30e9d 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
@@ -30,7 +30,16 @@
PORT_GP_CFG_1(3, 15, fn, sfx, CFG_FLAGS), \
PORT_GP_CFG_11(4, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \
PORT_GP_CFG_20(5, fn, sfx, CFG_FLAGS), \
- PORT_GP_CFG_18(6, fn, sfx, CFG_FLAGS)
+ PORT_GP_CFG_9(6, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(6, 9, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
+ PORT_GP_CFG_1(6, 10, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(6, 11, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(6, 12, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(6, 13, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(6, 14, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(6, 15, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(6, 16, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(6, 17, fn, sfx, CFG_FLAGS)
/*
* F_() : just information
* FM() : macro for FN_xxx / xxx_MARK
@@ -391,29 +400,33 @@ FM(IP12_23_20) IP12_23_20 FM(IP13_23_20) IP13_23_20 FM(IP14_23_20) IP14_23_20 FM
FM(IP12_27_24) IP12_27_24 FM(IP13_27_24) IP13_27_24 FM(IP14_27_24) IP14_27_24 FM(IP15_27_24) IP15_27_24 \
FM(IP12_31_28) IP12_31_28 FM(IP13_31_28) IP13_31_28 FM(IP14_31_28) IP14_31_28 FM(IP15_31_28) IP15_31_28
+/* The bit numbering in MOD_SEL fields is reversed */
+#define REV4(f0, f1, f2, f3) f0 f2 f1 f3
+#define REV8(f0, f1, f2, f3, f4, f5, f6, f7) f0 f4 f2 f6 f1 f5 f3 f7
+
/* MOD_SEL0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */
-#define MOD_SEL0_30_29 FM(SEL_ADGB_0) FM(SEL_ADGB_1) FM(SEL_ADGB_2) F_(0, 0)
+#define MOD_SEL0_30_29 REV4(FM(SEL_ADGB_0), FM(SEL_ADGB_1), FM(SEL_ADGB_2), F_(0, 0))
#define MOD_SEL0_28 FM(SEL_DRIF0_0) FM(SEL_DRIF0_1)
-#define MOD_SEL0_27_26 FM(SEL_FM_0) FM(SEL_FM_1) FM(SEL_FM_2) F_(0, 0)
+#define MOD_SEL0_27_26 REV4(FM(SEL_FM_0), FM(SEL_FM_1), FM(SEL_FM_2), F_(0, 0))
#define MOD_SEL0_25 FM(SEL_FSO_0) FM(SEL_FSO_1)
#define MOD_SEL0_24 FM(SEL_HSCIF0_0) FM(SEL_HSCIF0_1)
#define MOD_SEL0_23 FM(SEL_HSCIF1_0) FM(SEL_HSCIF1_1)
#define MOD_SEL0_22 FM(SEL_HSCIF2_0) FM(SEL_HSCIF2_1)
-#define MOD_SEL0_21_20 FM(SEL_I2C1_0) FM(SEL_I2C1_1) FM(SEL_I2C1_2) FM(SEL_I2C1_3)
-#define MOD_SEL0_19_18_17 FM(SEL_I2C2_0) FM(SEL_I2C2_1) FM(SEL_I2C2_2) FM(SEL_I2C2_3) FM(SEL_I2C2_4) F_(0, 0) F_(0, 0) F_(0, 0)
+#define MOD_SEL0_21_20 REV4(FM(SEL_I2C1_0), FM(SEL_I2C1_1), FM(SEL_I2C1_2), FM(SEL_I2C1_3))
+#define MOD_SEL0_19_18_17 REV8(FM(SEL_I2C2_0), FM(SEL_I2C2_1), FM(SEL_I2C2_2), FM(SEL_I2C2_3), FM(SEL_I2C2_4), F_(0, 0), F_(0, 0), F_(0, 0))
#define MOD_SEL0_16 FM(SEL_NDFC_0) FM(SEL_NDFC_1)
#define MOD_SEL0_15 FM(SEL_PWM0_0) FM(SEL_PWM0_1)
#define MOD_SEL0_14 FM(SEL_PWM1_0) FM(SEL_PWM1_1)
-#define MOD_SEL0_13_12 FM(SEL_PWM2_0) FM(SEL_PWM2_1) FM(SEL_PWM2_2) F_(0, 0)
-#define MOD_SEL0_11_10 FM(SEL_PWM3_0) FM(SEL_PWM3_1) FM(SEL_PWM3_2) F_(0, 0)
+#define MOD_SEL0_13_12 REV4(FM(SEL_PWM2_0), FM(SEL_PWM2_1), FM(SEL_PWM2_2), F_(0, 0))
+#define MOD_SEL0_11_10 REV4(FM(SEL_PWM3_0), FM(SEL_PWM3_1), FM(SEL_PWM3_2), F_(0, 0))
#define MOD_SEL0_9 FM(SEL_PWM4_0) FM(SEL_PWM4_1)
#define MOD_SEL0_8 FM(SEL_PWM5_0) FM(SEL_PWM5_1)
#define MOD_SEL0_7 FM(SEL_PWM6_0) FM(SEL_PWM6_1)
-#define MOD_SEL0_6_5 FM(SEL_REMOCON_0) FM(SEL_REMOCON_1) FM(SEL_REMOCON_2) F_(0, 0)
+#define MOD_SEL0_6_5 REV4(FM(SEL_REMOCON_0), FM(SEL_REMOCON_1), FM(SEL_REMOCON_2), F_(0, 0))
#define MOD_SEL0_4 FM(SEL_SCIF_0) FM(SEL_SCIF_1)
#define MOD_SEL0_3 FM(SEL_SCIF0_0) FM(SEL_SCIF0_1)
#define MOD_SEL0_2 FM(SEL_SCIF2_0) FM(SEL_SCIF2_1)
-#define MOD_SEL0_1_0 FM(SEL_SPEED_PULSE_IF_0) FM(SEL_SPEED_PULSE_IF_1) FM(SEL_SPEED_PULSE_IF_2) F_(0, 0)
+#define MOD_SEL0_1_0 REV4(FM(SEL_SPEED_PULSE_IF_0), FM(SEL_SPEED_PULSE_IF_1), FM(SEL_SPEED_PULSE_IF_2), F_(0, 0))
/* MOD_SEL1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */
#define MOD_SEL1_31 FM(SEL_SIMCARD_0) FM(SEL_SIMCARD_1)
@@ -422,18 +435,18 @@ FM(IP12_31_28) IP12_31_28 FM(IP13_31_28) IP13_31_28 FM(IP14_31_28) IP14_31_28 FM
#define MOD_SEL1_28 FM(SEL_USB_20_CH0_0) FM(SEL_USB_20_CH0_1)
#define MOD_SEL1_26 FM(SEL_DRIF2_0) FM(SEL_DRIF2_1)
#define MOD_SEL1_25 FM(SEL_DRIF3_0) FM(SEL_DRIF3_1)
-#define MOD_SEL1_24_23_22 FM(SEL_HSCIF3_0) FM(SEL_HSCIF3_1) FM(SEL_HSCIF3_2) FM(SEL_HSCIF3_3) FM(SEL_HSCIF3_4) F_(0, 0) F_(0, 0) F_(0, 0)
-#define MOD_SEL1_21_20_19 FM(SEL_HSCIF4_0) FM(SEL_HSCIF4_1) FM(SEL_HSCIF4_2) FM(SEL_HSCIF4_3) FM(SEL_HSCIF4_4) F_(0, 0) F_(0, 0) F_(0, 0)
+#define MOD_SEL1_24_23_22 REV8(FM(SEL_HSCIF3_0), FM(SEL_HSCIF3_1), FM(SEL_HSCIF3_2), FM(SEL_HSCIF3_3), FM(SEL_HSCIF3_4), F_(0, 0), F_(0, 0), F_(0, 0))
+#define MOD_SEL1_21_20_19 REV8(FM(SEL_HSCIF4_0), FM(SEL_HSCIF4_1), FM(SEL_HSCIF4_2), FM(SEL_HSCIF4_3), FM(SEL_HSCIF4_4), F_(0, 0), F_(0, 0), F_(0, 0))
#define MOD_SEL1_18 FM(SEL_I2C6_0) FM(SEL_I2C6_1)
#define MOD_SEL1_17 FM(SEL_I2C7_0) FM(SEL_I2C7_1)
#define MOD_SEL1_16 FM(SEL_MSIOF2_0) FM(SEL_MSIOF2_1)
#define MOD_SEL1_15 FM(SEL_MSIOF3_0) FM(SEL_MSIOF3_1)
-#define MOD_SEL1_14_13 FM(SEL_SCIF3_0) FM(SEL_SCIF3_1) FM(SEL_SCIF3_2) F_(0, 0)
-#define MOD_SEL1_12_11 FM(SEL_SCIF4_0) FM(SEL_SCIF4_1) FM(SEL_SCIF4_2) F_(0, 0)
-#define MOD_SEL1_10_9 FM(SEL_SCIF5_0) FM(SEL_SCIF5_1) FM(SEL_SCIF5_2) F_(0, 0)
+#define MOD_SEL1_14_13 REV4(FM(SEL_SCIF3_0), FM(SEL_SCIF3_1), FM(SEL_SCIF3_2), F_(0, 0))
+#define MOD_SEL1_12_11 REV4(FM(SEL_SCIF4_0), FM(SEL_SCIF4_1), FM(SEL_SCIF4_2), F_(0, 0))
+#define MOD_SEL1_10_9 REV4(FM(SEL_SCIF5_0), FM(SEL_SCIF5_1), FM(SEL_SCIF5_2), F_(0, 0))
#define MOD_SEL1_8 FM(SEL_VIN4_0) FM(SEL_VIN4_1)
#define MOD_SEL1_7 FM(SEL_VIN5_0) FM(SEL_VIN5_1)
-#define MOD_SEL1_6_5 FM(SEL_ADGC_0) FM(SEL_ADGC_1) FM(SEL_ADGC_2) F_(0, 0)
+#define MOD_SEL1_6_5 REV4(FM(SEL_ADGC_0), FM(SEL_ADGC_1), FM(SEL_ADGC_2), F_(0, 0))
#define MOD_SEL1_4 FM(SEL_SSI9_0) FM(SEL_SSI9_1)
#define PINMUX_MOD_SELS \
@@ -1060,7 +1073,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP11_11_8, RIF1_SYNC),
PINMUX_IPSR_GPSR(IP11_11_8, TS_SCK1),
- PINMUX_IPSR_GPSR(IP11_15_12, TX0_A),
+ PINMUX_IPSR_MSEL(IP11_15_12, TX0_A, SEL_SCIF0_0),
PINMUX_IPSR_GPSR(IP11_15_12, HTX1_A),
PINMUX_IPSR_MSEL(IP11_15_12, SSI_WS2_A, SEL_SSI2_0),
PINMUX_IPSR_GPSR(IP11_15_12, RIF1_D0),
@@ -1099,7 +1112,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP12_3_0, SSI_WS9_B, SEL_SSI9_1),
PINMUX_IPSR_GPSR(IP12_3_0, AUDIO_CLKOUT3_B),
- PINMUX_IPSR_GPSR(IP12_7_4, SCK2_A),
+ PINMUX_IPSR_MSEL(IP12_7_4, SCK2_A, SEL_SCIF2_0),
PINMUX_IPSR_MSEL(IP12_7_4, HSCK0_A, SEL_HSCIF0_0),
PINMUX_IPSR_MSEL(IP12_7_4, AUDIO_CLKB_A, SEL_ADGB_0),
PINMUX_IPSR_GPSR(IP12_7_4, CTS1_N),
@@ -1107,14 +1120,14 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP12_7_4, REMOCON_A, SEL_REMOCON_0),
PINMUX_IPSR_MSEL(IP12_7_4, SCIF_CLK_B, SEL_SCIF_1),
- PINMUX_IPSR_GPSR(IP12_11_8, TX2_A),
+ PINMUX_IPSR_MSEL(IP12_11_8, TX2_A, SEL_SCIF2_0),
PINMUX_IPSR_MSEL(IP12_11_8, HRX0_A, SEL_HSCIF0_0),
PINMUX_IPSR_GPSR(IP12_11_8, AUDIO_CLKOUT2_A),
PINMUX_IPSR_MSEL(IP12_11_8, SCL1_A, SEL_I2C1_0),
PINMUX_IPSR_MSEL(IP12_11_8, FSO_CFE_0_N_A, SEL_FSO_0),
PINMUX_IPSR_GPSR(IP12_11_8, TS_SDEN1),
- PINMUX_IPSR_GPSR(IP12_15_12, RX2_A),
+ PINMUX_IPSR_MSEL(IP12_15_12, RX2_A, SEL_SCIF2_0),
PINMUX_IPSR_GPSR(IP12_15_12, HTX0_A),
PINMUX_IPSR_GPSR(IP12_15_12, AUDIO_CLKOUT3_A),
PINMUX_IPSR_MSEL(IP12_15_12, SDA1_A, SEL_I2C1_0),
@@ -1126,11 +1139,11 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP12_23_20, MSIOF0_RXD),
PINMUX_IPSR_GPSR(IP12_23_20, SSI_WS78),
- PINMUX_IPSR_GPSR(IP12_23_20, TX2_B),
+ PINMUX_IPSR_MSEL(IP12_23_20, TX2_B, SEL_SCIF2_1),
PINMUX_IPSR_GPSR(IP12_27_24, MSIOF0_TXD),
PINMUX_IPSR_GPSR(IP12_27_24, SSI_SDATA7),
- PINMUX_IPSR_GPSR(IP12_27_24, RX2_B),
+ PINMUX_IPSR_MSEL(IP12_27_24, RX2_B, SEL_SCIF2_1),
PINMUX_IPSR_GPSR(IP12_31_28, MSIOF0_SYNC),
PINMUX_IPSR_GPSR(IP12_31_28, AUDIO_CLKOUT_B),
@@ -1170,7 +1183,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP13_19_16, SIM0_D_A, SEL_SIMCARD_0),
PINMUX_IPSR_GPSR(IP13_23_20, MLB_DAT),
- PINMUX_IPSR_GPSR(IP13_23_20, TX0_B),
+ PINMUX_IPSR_MSEL(IP13_23_20, TX0_B, SEL_SCIF0_1),
PINMUX_IPSR_MSEL(IP13_23_20, RIF0_SYNC_A, SEL_DRIF0_0),
PINMUX_IPSR_GPSR(IP13_23_20, SIM0_CLK_A),
@@ -1586,6 +1599,199 @@ static const unsigned int canfd1_data_mux[] = {
CANFD1_TX_MARK, CANFD1_RX_MARK,
};
+/* - DRIF0 --------------------------------------------------------------- */
+static const unsigned int drif0_ctrl_a_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(5, 7), RCAR_GP_PIN(5, 19),
+};
+
+static const unsigned int drif0_ctrl_a_mux[] = {
+ RIF0_CLK_A_MARK, RIF0_SYNC_A_MARK,
+};
+
+static const unsigned int drif0_data0_a_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(5, 17),
+};
+
+static const unsigned int drif0_data0_a_mux[] = {
+ RIF0_D0_A_MARK,
+};
+
+static const unsigned int drif0_data1_a_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(5, 18),
+};
+
+static const unsigned int drif0_data1_a_mux[] = {
+ RIF0_D1_A_MARK,
+};
+
+static const unsigned int drif0_ctrl_b_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(3, 12), RCAR_GP_PIN(3, 15),
+};
+
+static const unsigned int drif0_ctrl_b_mux[] = {
+ RIF0_CLK_B_MARK, RIF0_SYNC_B_MARK,
+};
+
+static const unsigned int drif0_data0_b_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(3, 13),
+};
+
+static const unsigned int drif0_data0_b_mux[] = {
+ RIF0_D0_B_MARK,
+};
+
+static const unsigned int drif0_data1_b_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(3, 14),
+};
+
+static const unsigned int drif0_data1_b_mux[] = {
+ RIF0_D1_B_MARK,
+};
+
+/* - DRIF1 --------------------------------------------------------------- */
+static const unsigned int drif1_ctrl_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 1),
+};
+
+static const unsigned int drif1_ctrl_mux[] = {
+ RIF1_CLK_MARK, RIF1_SYNC_MARK,
+};
+
+static const unsigned int drif1_data0_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(5, 2),
+};
+
+static const unsigned int drif1_data0_mux[] = {
+ RIF1_D0_MARK,
+};
+
+static const unsigned int drif1_data1_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(5, 3),
+};
+
+static const unsigned int drif1_data1_mux[] = {
+ RIF1_D1_MARK,
+};
+
+/* - DRIF2 --------------------------------------------------------------- */
+static const unsigned int drif2_ctrl_a_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(2, 6), RCAR_GP_PIN(2, 7),
+};
+
+static const unsigned int drif2_ctrl_a_mux[] = {
+ RIF2_CLK_A_MARK, RIF2_SYNC_A_MARK,
+};
+
+static const unsigned int drif2_data0_a_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(2, 8),
+};
+
+static const unsigned int drif2_data0_a_mux[] = {
+ RIF2_D0_A_MARK,
+};
+
+static const unsigned int drif2_data1_a_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(2, 9),
+};
+
+static const unsigned int drif2_data1_a_mux[] = {
+ RIF2_D1_A_MARK,
+};
+
+static const unsigned int drif2_ctrl_b_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
+};
+
+static const unsigned int drif2_ctrl_b_mux[] = {
+ RIF2_CLK_B_MARK, RIF2_SYNC_B_MARK,
+};
+
+static const unsigned int drif2_data0_b_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(1, 6),
+};
+
+static const unsigned int drif2_data0_b_mux[] = {
+ RIF2_D0_B_MARK,
+};
+
+static const unsigned int drif2_data1_b_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(1, 7),
+};
+
+static const unsigned int drif2_data1_b_mux[] = {
+ RIF2_D1_B_MARK,
+};
+
+/* - DRIF3 --------------------------------------------------------------- */
+static const unsigned int drif3_ctrl_a_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(2, 10), RCAR_GP_PIN(2, 11),
+};
+
+static const unsigned int drif3_ctrl_a_mux[] = {
+ RIF3_CLK_A_MARK, RIF3_SYNC_A_MARK,
+};
+
+static const unsigned int drif3_data0_a_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(2, 12),
+};
+
+static const unsigned int drif3_data0_a_mux[] = {
+ RIF3_D0_A_MARK,
+};
+
+static const unsigned int drif3_data1_a_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(2, 13),
+};
+
+static const unsigned int drif3_data1_a_mux[] = {
+ RIF3_D1_A_MARK,
+};
+
+static const unsigned int drif3_ctrl_b_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(0, 8), RCAR_GP_PIN(0, 9),
+};
+
+static const unsigned int drif3_ctrl_b_mux[] = {
+ RIF3_CLK_B_MARK, RIF3_SYNC_B_MARK,
+};
+
+static const unsigned int drif3_data0_b_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(0, 10),
+};
+
+static const unsigned int drif3_data0_b_mux[] = {
+ RIF3_D0_B_MARK,
+};
+
+static const unsigned int drif3_data1_b_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(0, 11),
+};
+
+static const unsigned int drif3_data1_b_mux[] = {
+ RIF3_D1_B_MARK,
+};
+
/* - DU --------------------------------------------------------------------- */
static const unsigned int du_rgb666_pins[] = {
/* R[7:2], G[7:2], B[7:2] */
@@ -3243,6 +3449,43 @@ static const unsigned int ssi9_ctrl_b_mux[] = {
SSI_SCK9_B_MARK, SSI_WS9_B_MARK,
};
+/* - TMU -------------------------------------------------------------------- */
+static const unsigned int tmu_tclk1_a_pins[] = {
+ /* TCLK */
+ RCAR_GP_PIN(3, 12),
+};
+
+static const unsigned int tmu_tclk1_a_mux[] = {
+ TCLK1_A_MARK,
+};
+
+static const unsigned int tmu_tclk1_b_pins[] = {
+ /* TCLK */
+ RCAR_GP_PIN(5, 17),
+};
+
+static const unsigned int tmu_tclk1_b_mux[] = {
+ TCLK1_B_MARK,
+};
+
+static const unsigned int tmu_tclk2_a_pins[] = {
+ /* TCLK */
+ RCAR_GP_PIN(3, 13),
+};
+
+static const unsigned int tmu_tclk2_a_mux[] = {
+ TCLK2_A_MARK,
+};
+
+static const unsigned int tmu_tclk2_b_pins[] = {
+ /* TCLK */
+ RCAR_GP_PIN(5, 18),
+};
+
+static const unsigned int tmu_tclk2_b_mux[] = {
+ TCLK2_B_MARK,
+};
+
/* - USB0 ------------------------------------------------------------------- */
static const unsigned int usb0_a_pins[] = {
/* PWEN, OVC */
@@ -3523,8 +3766,8 @@ static const unsigned int vin5_clk_b_mux[] = {
};
static const struct {
- struct sh_pfc_pin_group common[241];
- struct sh_pfc_pin_group automotive[2];
+ struct sh_pfc_pin_group common[245];
+ struct sh_pfc_pin_group automotive[23];
} pinmux_groups = {
.common = {
SH_PFC_PIN_GROUP(audio_clk_a),
@@ -3735,6 +3978,10 @@ static const struct {
SH_PFC_PIN_GROUP(ssi9_data),
SH_PFC_PIN_GROUP(ssi9_ctrl_a),
SH_PFC_PIN_GROUP(ssi9_ctrl_b),
+ SH_PFC_PIN_GROUP(tmu_tclk1_a),
+ SH_PFC_PIN_GROUP(tmu_tclk1_b),
+ SH_PFC_PIN_GROUP(tmu_tclk2_a),
+ SH_PFC_PIN_GROUP(tmu_tclk2_b),
SH_PFC_PIN_GROUP(usb0_a),
SH_PFC_PIN_GROUP(usb0_b),
SH_PFC_PIN_GROUP(usb0_id),
@@ -3772,6 +4019,27 @@ static const struct {
.automotive = {
SH_PFC_PIN_GROUP(canfd0_data),
SH_PFC_PIN_GROUP(canfd1_data),
+ SH_PFC_PIN_GROUP(drif0_ctrl_a),
+ SH_PFC_PIN_GROUP(drif0_data0_a),
+ SH_PFC_PIN_GROUP(drif0_data1_a),
+ SH_PFC_PIN_GROUP(drif0_ctrl_b),
+ SH_PFC_PIN_GROUP(drif0_data0_b),
+ SH_PFC_PIN_GROUP(drif0_data1_b),
+ SH_PFC_PIN_GROUP(drif1_ctrl),
+ SH_PFC_PIN_GROUP(drif1_data0),
+ SH_PFC_PIN_GROUP(drif1_data1),
+ SH_PFC_PIN_GROUP(drif2_ctrl_a),
+ SH_PFC_PIN_GROUP(drif2_data0_a),
+ SH_PFC_PIN_GROUP(drif2_data1_a),
+ SH_PFC_PIN_GROUP(drif2_ctrl_b),
+ SH_PFC_PIN_GROUP(drif2_data0_b),
+ SH_PFC_PIN_GROUP(drif2_data1_b),
+ SH_PFC_PIN_GROUP(drif3_ctrl_a),
+ SH_PFC_PIN_GROUP(drif3_data0_a),
+ SH_PFC_PIN_GROUP(drif3_data1_a),
+ SH_PFC_PIN_GROUP(drif3_ctrl_b),
+ SH_PFC_PIN_GROUP(drif3_data0_b),
+ SH_PFC_PIN_GROUP(drif3_data1_b),
}
};
@@ -3826,6 +4094,39 @@ static const char * const canfd1_groups[] = {
"canfd1_data",
};
+static const char * const drif0_groups[] = {
+ "drif0_ctrl_a",
+ "drif0_data0_a",
+ "drif0_data1_a",
+ "drif0_ctrl_b",
+ "drif0_data0_b",
+ "drif0_data1_b",
+};
+
+static const char * const drif1_groups[] = {
+ "drif1_ctrl",
+ "drif1_data0",
+ "drif1_data1",
+};
+
+static const char * const drif2_groups[] = {
+ "drif2_ctrl_a",
+ "drif2_data0_a",
+ "drif2_data1_a",
+ "drif2_ctrl_b",
+ "drif2_data0_b",
+ "drif2_data1_b",
+};
+
+static const char * const drif3_groups[] = {
+ "drif3_ctrl_a",
+ "drif3_data0_a",
+ "drif3_data1_a",
+ "drif3_ctrl_b",
+ "drif3_data0_b",
+ "drif3_data1_b",
+};
+
static const char * const du_groups[] = {
"du_rgb666",
"du_rgb888",
@@ -4111,6 +4412,13 @@ static const char * const ssi_groups[] = {
"ssi9_ctrl_b",
};
+static const char * const tmu_groups[] = {
+ "tmu_tclk1_a",
+ "tmu_tclk1_b",
+ "tmu_tclk2_a",
+ "tmu_tclk2_b",
+};
+
static const char * const usb0_groups[] = {
"usb0_a",
"usb0_b",
@@ -4157,8 +4465,8 @@ static const char * const vin5_groups[] = {
};
static const struct {
- struct sh_pfc_function common[44];
- struct sh_pfc_function automotive[2];
+ struct sh_pfc_function common[45];
+ struct sh_pfc_function automotive[6];
} pinmux_functions = {
.common = {
SH_PFC_FUNCTION(audio_clk),
@@ -4201,6 +4509,7 @@ static const struct {
SH_PFC_FUNCTION(sdhi1),
SH_PFC_FUNCTION(sdhi3),
SH_PFC_FUNCTION(ssi),
+ SH_PFC_FUNCTION(tmu),
SH_PFC_FUNCTION(usb0),
SH_PFC_FUNCTION(usb30),
SH_PFC_FUNCTION(vin4),
@@ -4209,6 +4518,10 @@ static const struct {
.automotive = {
SH_PFC_FUNCTION(canfd0),
SH_PFC_FUNCTION(canfd1),
+ SH_PFC_FUNCTION(drif0),
+ SH_PFC_FUNCTION(drif1),
+ SH_PFC_FUNCTION(drif2),
+ SH_PFC_FUNCTION(drif3),
}
};
@@ -4914,17 +5227,6 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
{ /* sentinel */ },
};
-static bool pin_has_pud(unsigned int pin)
-{
- /* Some pins are pull-up only */
- switch (pin) {
- case RCAR_GP_PIN(6, 9): /* USB30_OVC */
- return false;
- }
-
- return true;
-}
-
static unsigned int r8a77990_pinmux_get_bias(struct sh_pfc *pfc,
unsigned int pin)
{
@@ -4937,7 +5239,7 @@ static unsigned int r8a77990_pinmux_get_bias(struct sh_pfc *pfc,
if (!(sh_pfc_read(pfc, reg->puen) & BIT(bit)))
return PIN_CONFIG_BIAS_DISABLE;
- else if (!pin_has_pud(pin) || (sh_pfc_read(pfc, reg->pud) & BIT(bit)))
+ else if (sh_pfc_read(pfc, reg->pud) & BIT(bit))
return PIN_CONFIG_BIAS_PULL_UP;
else
return PIN_CONFIG_BIAS_PULL_DOWN;
@@ -4958,13 +5260,11 @@ static void r8a77990_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
if (bias != PIN_CONFIG_BIAS_DISABLE)
enable |= BIT(bit);
- if (pin_has_pud(pin)) {
- updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
- if (bias == PIN_CONFIG_BIAS_PULL_UP)
- updown |= BIT(bit);
+ updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
+ if (bias == PIN_CONFIG_BIAS_PULL_UP)
+ updown |= BIT(bit);
- sh_pfc_write(pfc, reg->pud, updown);
- }
+ sh_pfc_write(pfc, reg->pud, updown);
sh_pfc_write(pfc, reg->puen, enable);
}
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77995.c b/drivers/pinctrl/sh-pfc/pfc-r8a77995.c
index 84d78db381e3..9e377e3b9cb3 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77995.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77995.c
@@ -381,6 +381,9 @@ FM(IP12_23_20) IP12_23_20 \
FM(IP12_27_24) IP12_27_24 \
FM(IP12_31_28) IP12_31_28 \
+/* The bit numbering in MOD_SEL fields is reversed */
+#define REV4(f0, f1, f2, f3) f0 f2 f1 f3
+
/* MOD_SEL0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */
#define MOD_SEL0_30 FM(SEL_MSIOF2_0) FM(SEL_MSIOF2_1)
#define MOD_SEL0_29 FM(SEL_I2C3_0) FM(SEL_I2C3_1)
@@ -388,10 +391,10 @@ FM(IP12_31_28) IP12_31_28 \
#define MOD_SEL0_27 FM(SEL_MSIOF3_0) FM(SEL_MSIOF3_1)
#define MOD_SEL0_26 FM(SEL_HSCIF3_0) FM(SEL_HSCIF3_1)
#define MOD_SEL0_25 FM(SEL_SCIF4_0) FM(SEL_SCIF4_1)
-#define MOD_SEL0_24_23 FM(SEL_PWM0_0) FM(SEL_PWM0_1) FM(SEL_PWM0_2) F_(0, 0)
-#define MOD_SEL0_22_21 FM(SEL_PWM1_0) FM(SEL_PWM1_1) FM(SEL_PWM1_2) F_(0, 0)
-#define MOD_SEL0_20_19 FM(SEL_PWM2_0) FM(SEL_PWM2_1) FM(SEL_PWM2_2) F_(0, 0)
-#define MOD_SEL0_18_17 FM(SEL_PWM3_0) FM(SEL_PWM3_1) FM(SEL_PWM3_2) F_(0, 0)
+#define MOD_SEL0_24_23 REV4(FM(SEL_PWM0_0), FM(SEL_PWM0_1), FM(SEL_PWM0_2), F_(0, 0))
+#define MOD_SEL0_22_21 REV4(FM(SEL_PWM1_0), FM(SEL_PWM1_1), FM(SEL_PWM1_2), F_(0, 0))
+#define MOD_SEL0_20_19 REV4(FM(SEL_PWM2_0), FM(SEL_PWM2_1), FM(SEL_PWM2_2), F_(0, 0))
+#define MOD_SEL0_18_17 REV4(FM(SEL_PWM3_0), FM(SEL_PWM3_1), FM(SEL_PWM3_2), F_(0, 0))
#define MOD_SEL0_15 FM(SEL_IRQ_0_0) FM(SEL_IRQ_0_1)
#define MOD_SEL0_14 FM(SEL_IRQ_1_0) FM(SEL_IRQ_1_1)
#define MOD_SEL0_13 FM(SEL_IRQ_2_0) FM(SEL_IRQ_2_1)
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
index 085770e66895..ef3da8bf1d87 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
@@ -3354,7 +3354,8 @@ static const char * const fsic_groups[] = {
"fsic_sclk_out",
"fsic_data_in",
"fsic_data_out",
- "fsic_spdif",
+ "fsic_spdif_0",
+ "fsic_spdif_1",
};
static const char * const fsid_groups[] = {
diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
index 274d5ff87078..c97d2ba7677c 100644
--- a/drivers/pinctrl/sh-pfc/pinctrl.c
+++ b/drivers/pinctrl/sh-pfc/pinctrl.c
@@ -347,6 +347,8 @@ static int sh_pfc_func_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
unsigned int i;
int ret = 0;
+ dev_dbg(pctldev->dev, "Configuring pin group %s\n", grp->name);
+
spin_lock_irqsave(&pfc->lock, flags);
for (i = 0; i < grp->nr_pins; ++i) {
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 46d477ff5109..56016cb76769 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -126,7 +126,8 @@ struct pinmux_cfg_reg {
* one for each possible combination of the register field bit values.
*/
#define PINMUX_CFG_REG(name, r, r_width, f_width) \
- .reg = r, .reg_width = r_width, .field_width = f_width, \
+ .reg = r, .reg_width = r_width, \
+ .field_width = f_width + BUILD_BUG_ON_ZERO(r_width % f_width), \
.enum_ids = (const u16 [(r_width / f_width) * (1 << f_width)])
/*
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c
index 4ba171827428..8a0eee044264 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas7.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c
@@ -6007,8 +6007,8 @@ static int atlas7_gpio_probe(struct platform_device *pdev)
}
/* retrieve gpio descriptor data */
- a7gc = devm_kzalloc(&pdev->dev, sizeof(*a7gc) +
- sizeof(struct atlas7_gpio_bank) * nbank, GFP_KERNEL);
+ a7gc = devm_kzalloc(&pdev->dev, struct_size(a7gc, banks, nbank),
+ GFP_KERNEL);
if (!a7gc)
return -ENOMEM;
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index 2e42d738b589..2b3bd1a41f21 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -782,7 +782,7 @@ static void sirfsoc_gpio_set_pulldown(struct sirfsoc_gpio_chip *sgpio,
static int sirfsoc_gpio_probe(struct device_node *np)
{
int i, err = 0;
- static struct sirfsoc_gpio_chip *sgpio;
+ struct sirfsoc_gpio_chip *sgpio;
struct sirfsoc_gpio_bank *bank;
void __iomem *regs;
struct platform_device *pdev;
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 813eccbb9aaf..0b9ff5aa6bb5 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -414,7 +414,7 @@ static int stm32_pctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
unsigned int num_configs;
bool has_config = 0;
unsigned reserve = 0;
- int num_pins, num_funcs, maps_per_pin, i, err;
+ int num_pins, num_funcs, maps_per_pin, i, err = 0;
pctl = pinctrl_dev_get_drvdata(pctldev);
@@ -441,41 +441,45 @@ static int stm32_pctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
if (has_config && num_pins >= 1)
maps_per_pin++;
- if (!num_pins || !maps_per_pin)
- return -EINVAL;
+ if (!num_pins || !maps_per_pin) {
+ err = -EINVAL;
+ goto exit;
+ }
reserve = num_pins * maps_per_pin;
err = pinctrl_utils_reserve_map(pctldev, map,
reserved_maps, num_maps, reserve);
if (err)
- return err;
+ goto exit;
for (i = 0; i < num_pins; i++) {
err = of_property_read_u32_index(node, "pinmux",
i, &pinfunc);
if (err)
- return err;
+ goto exit;
pin = STM32_GET_PIN_NO(pinfunc);
func = STM32_GET_PIN_FUNC(pinfunc);
if (!stm32_pctrl_is_function_valid(pctl, pin, func)) {
dev_err(pctl->dev, "invalid function.\n");
- return -EINVAL;
+ err = -EINVAL;
+ goto exit;
}
grp = stm32_pctrl_find_group_by_pin(pctl, pin);
if (!grp) {
dev_err(pctl->dev, "unable to match pin %d to group\n",
pin);
- return -EINVAL;
+ err = -EINVAL;
+ goto exit;
}
err = stm32_pctrl_dt_node_to_map_func(pctl, pin, func, grp, map,
reserved_maps, num_maps);
if (err)
- return err;
+ goto exit;
if (has_config) {
err = pinctrl_utils_add_map_configs(pctldev, map,
@@ -483,11 +487,13 @@ static int stm32_pctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
configs, num_configs,
PIN_MAP_TYPE_CONFIGS_GROUP);
if (err)
- return err;
+ goto exit;
}
}
- return 0;
+exit:
+ kfree(configs);
+ return err;
}
static int stm32_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
@@ -577,8 +583,8 @@ static int stm32_pmx_get_func_groups(struct pinctrl_dev *pctldev,
return 0;
}
-static void stm32_pmx_set_mode(struct stm32_gpio_bank *bank,
- int pin, u32 mode, u32 alt)
+static int stm32_pmx_set_mode(struct stm32_gpio_bank *bank,
+ int pin, u32 mode, u32 alt)
{
struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
u32 val;
@@ -614,6 +620,8 @@ static void stm32_pmx_set_mode(struct stm32_gpio_bank *bank,
unlock:
spin_unlock_irqrestore(&bank->lock, flags);
clk_disable(bank->clk);
+
+ return err;
}
void stm32_pmx_get_mode(struct stm32_gpio_bank *bank, int pin, u32 *mode,
@@ -670,9 +678,7 @@ static int stm32_pmx_set_mux(struct pinctrl_dev *pctldev,
mode = stm32_gpio_get_mode(function);
alt = stm32_gpio_get_alt(function);
- stm32_pmx_set_mode(bank, pin, mode, alt);
-
- return 0;
+ return stm32_pmx_set_mode(bank, pin, mode, alt);
}
static int stm32_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
@@ -682,9 +688,7 @@ static int stm32_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
struct stm32_gpio_bank *bank = gpiochip_get_data(range->gc);
int pin = stm32_gpio_pin(gpio);
- stm32_pmx_set_mode(bank, pin, !input, 0);
-
- return 0;
+ return stm32_pmx_set_mode(bank, pin, !input, 0);
}
static const struct pinmux_ops stm32_pmx_ops = {
@@ -698,8 +702,8 @@ static const struct pinmux_ops stm32_pmx_ops = {
/* Pinconf functions */
-static void stm32_pconf_set_driving(struct stm32_gpio_bank *bank,
- unsigned offset, u32 drive)
+static int stm32_pconf_set_driving(struct stm32_gpio_bank *bank,
+ unsigned offset, u32 drive)
{
struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
unsigned long flags;
@@ -728,6 +732,8 @@ static void stm32_pconf_set_driving(struct stm32_gpio_bank *bank,
unlock:
spin_unlock_irqrestore(&bank->lock, flags);
clk_disable(bank->clk);
+
+ return err;
}
static u32 stm32_pconf_get_driving(struct stm32_gpio_bank *bank,
@@ -748,8 +754,8 @@ static u32 stm32_pconf_get_driving(struct stm32_gpio_bank *bank,
return (val >> offset);
}
-static void stm32_pconf_set_speed(struct stm32_gpio_bank *bank,
- unsigned offset, u32 speed)
+static int stm32_pconf_set_speed(struct stm32_gpio_bank *bank,
+ unsigned offset, u32 speed)
{
struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
unsigned long flags;
@@ -778,6 +784,8 @@ static void stm32_pconf_set_speed(struct stm32_gpio_bank *bank,
unlock:
spin_unlock_irqrestore(&bank->lock, flags);
clk_disable(bank->clk);
+
+ return err;
}
static u32 stm32_pconf_get_speed(struct stm32_gpio_bank *bank,
@@ -798,8 +806,8 @@ static u32 stm32_pconf_get_speed(struct stm32_gpio_bank *bank,
return (val >> (offset * 2));
}
-static void stm32_pconf_set_bias(struct stm32_gpio_bank *bank,
- unsigned offset, u32 bias)
+static int stm32_pconf_set_bias(struct stm32_gpio_bank *bank,
+ unsigned offset, u32 bias)
{
struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
unsigned long flags;
@@ -828,6 +836,8 @@ static void stm32_pconf_set_bias(struct stm32_gpio_bank *bank,
unlock:
spin_unlock_irqrestore(&bank->lock, flags);
clk_disable(bank->clk);
+
+ return err;
}
static u32 stm32_pconf_get_bias(struct stm32_gpio_bank *bank,
@@ -890,22 +900,22 @@ static int stm32_pconf_parse_conf(struct pinctrl_dev *pctldev,
switch (param) {
case PIN_CONFIG_DRIVE_PUSH_PULL:
- stm32_pconf_set_driving(bank, offset, 0);
+ ret = stm32_pconf_set_driving(bank, offset, 0);
break;
case PIN_CONFIG_DRIVE_OPEN_DRAIN:
- stm32_pconf_set_driving(bank, offset, 1);
+ ret = stm32_pconf_set_driving(bank, offset, 1);
break;
case PIN_CONFIG_SLEW_RATE:
- stm32_pconf_set_speed(bank, offset, arg);
+ ret = stm32_pconf_set_speed(bank, offset, arg);
break;
case PIN_CONFIG_BIAS_DISABLE:
- stm32_pconf_set_bias(bank, offset, 0);
+ ret = stm32_pconf_set_bias(bank, offset, 0);
break;
case PIN_CONFIG_BIAS_PULL_UP:
- stm32_pconf_set_bias(bank, offset, 1);
+ ret = stm32_pconf_set_bias(bank, offset, 1);
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
- stm32_pconf_set_bias(bank, offset, 2);
+ ret = stm32_pconf_set_bias(bank, offset, 2);
break;
case PIN_CONFIG_OUTPUT:
__stm32_gpio_set(bank, offset, arg);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
index c63086c98335..e05dd9a5551d 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
@@ -153,6 +153,7 @@ static const struct sunxi_pinctrl_desc sun9i_a80_r_pinctrl_data = {
.pin_base = PL_BASE,
.irq_banks = 2,
.disable_strict_mode = true,
+ .has_io_bias_cfg = true,
};
static int sun9i_a80_r_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
index 5553c0eb0f41..da37d594a13d 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
@@ -722,6 +722,7 @@ static const struct sunxi_pinctrl_desc sun9i_a80_pinctrl_data = {
.npins = ARRAY_SIZE(sun9i_a80_pins),
.irq_banks = 5,
.disable_strict_mode = true,
+ .has_io_bias_cfg = true,
};
static int sun9i_a80_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 0e7fa69e93df..8dd25caea2cf 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -603,6 +603,45 @@ static const struct pinconf_ops sunxi_pconf_ops = {
.pin_config_group_set = sunxi_pconf_group_set,
};
+static int sunxi_pinctrl_set_io_bias_cfg(struct sunxi_pinctrl *pctl,
+ unsigned pin,
+ struct regulator *supply)
+{
+ u32 val, reg;
+ int uV;
+
+ if (!pctl->desc->has_io_bias_cfg)
+ return 0;
+
+ uV = regulator_get_voltage(supply);
+ if (uV < 0)
+ return uV;
+
+ /* Might be dummy regulator with no voltage set */
+ if (uV == 0)
+ return 0;
+
+ /* Configured value must be equal or greater to actual voltage */
+ if (uV <= 1800000)
+ val = 0x0; /* 1.8V */
+ else if (uV <= 2500000)
+ val = 0x6; /* 2.5V */
+ else if (uV <= 2800000)
+ val = 0x9; /* 2.8V */
+ else if (uV <= 3000000)
+ val = 0xA; /* 3.0V */
+ else
+ val = 0xD; /* 3.3V */
+
+ pin -= pctl->desc->pin_base;
+
+ reg = readl(pctl->membase + sunxi_grp_config_reg(pin));
+ reg &= ~IO_BIAS_MASK;
+ writel(reg | val, pctl->membase + sunxi_grp_config_reg(pin));
+
+ return 0;
+}
+
static int sunxi_pmx_get_funcs_cnt(struct pinctrl_dev *pctldev)
{
struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
@@ -725,6 +764,8 @@ static int sunxi_pmx_request(struct pinctrl_dev *pctldev, unsigned offset)
goto out;
}
+ sunxi_pinctrl_set_io_bias_cfg(pctl, offset, reg);
+
s_reg->regulator = reg;
refcount_set(&s_reg->refcount, 1);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index 034c0317c8d6..ee15ab067b5f 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -79,6 +79,10 @@
#define IRQ_LEVEL_LOW 0x03
#define IRQ_EDGE_BOTH 0x04
+#define GRP_CFG_REG 0x300
+
+#define IO_BIAS_MASK GENMASK(3, 0)
+
#define SUN4I_FUNC_INPUT 0
#define SUN4I_FUNC_IRQ 6
@@ -113,6 +117,7 @@ struct sunxi_pinctrl_desc {
const unsigned int *irq_bank_map;
bool irq_read_needs_mux;
bool disable_strict_mode;
+ bool has_io_bias_cfg;
};
struct sunxi_pinctrl_function {
@@ -338,6 +343,13 @@ static inline u32 sunxi_irq_status_offset(u16 irq)
return irq_num * IRQ_STATUS_IRQ_BITS;
}
+static inline u32 sunxi_grp_config_reg(u16 pin)
+{
+ u8 bank = pin / PINS_PER_BANK;
+
+ return GRP_CFG_REG + bank * 0x4;
+}
+
int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
const struct sunxi_pinctrl_desc *desc,
unsigned long variant);
diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
index a4bc506a01a3..e5e7f1f22813 100644
--- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
+++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
@@ -263,9 +263,9 @@ static int ti_iodelay_pinconf_set(struct ti_iodelay_device *iod,
reg_val |= reg->unlock_val << __ffs(reg->lock_mask);
r = regmap_update_bits(iod->regmap, cfg->offset, reg_mask, reg_val);
- dev_info(dev, "Set reg 0x%x Delay(a: %d g: %d), Elements(C=%d F=%d)0x%x\n",
- cfg->offset, cfg->a_delay, cfg->g_delay, c_elements,
- f_elements, reg_val);
+ dev_dbg(dev, "Set reg 0x%x Delay(a: %d g: %d), Elements(C=%d F=%d)0x%x\n",
+ cfg->offset, cfg->a_delay, cfg->g_delay, c_elements,
+ f_elements, reg_val);
return r;
}
@@ -923,7 +923,6 @@ static struct platform_driver ti_iodelay_driver = {
.probe = ti_iodelay_probe,
.remove = ti_iodelay_remove,
.driver = {
- .owner = THIS_MODULE,
.name = DRIVER_NAME,
.of_match_table = ti_iodelay_of_match,
},
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index 5e2fde5ff63d..9186d81a51cc 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -152,4 +152,6 @@ config CROS_EC_SYSFS
To compile this driver as a module, choose M here: the
module will be called cros_ec_sysfs.
+source "drivers/platform/chrome/wilco_ec/Kconfig"
+
endif # CHROMEOS_PLATFORMS
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index fdbee501931b..1e2f0029b597 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -14,3 +14,5 @@ obj-$(CONFIG_CROS_EC_LIGHTBAR) += cros_ec_lightbar.o
obj-$(CONFIG_CROS_EC_VBC) += cros_ec_vbc.o
obj-$(CONFIG_CROS_EC_DEBUGFS) += cros_ec_debugfs.o
obj-$(CONFIG_CROS_EC_SYSFS) += cros_ec_sysfs.o
+
+obj-$(CONFIG_WILCO_EC) += wilco_ec/
diff --git a/drivers/platform/chrome/chromeos_pstore.c b/drivers/platform/chrome/chromeos_pstore.c
index b0693fdec8c6..d13770785fb5 100644
--- a/drivers/platform/chrome/chromeos_pstore.c
+++ b/drivers/platform/chrome/chromeos_pstore.c
@@ -1,12 +1,7 @@
-/*
- * chromeos_pstore.c - Driver to instantiate Chromebook ramoops device
- *
- * Copyright (C) 2013 Google, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, version 2 of the License.
- */
+// SPDX-License-Identifier: GPL-2.0
+// Driver to instantiate Chromebook ramoops device.
+//
+// Copyright (C) 2013 Google, Inc.
#include <linux/acpi.h>
#include <linux/dmi.h>
@@ -138,5 +133,5 @@ static void __exit chromeos_pstore_exit(void)
module_init(chromeos_pstore_init);
module_exit(chromeos_pstore_exit);
-MODULE_DESCRIPTION("Chrome OS pstore module");
-MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ChromeOS pstore module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
index 6cacac53dfce..71308766e891 100644
--- a/drivers/platform/chrome/cros_ec_debugfs.c
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -1,21 +1,7 @@
-/*
- * cros_ec_debugfs - debug logs for Chrome OS EC
- *
- * Copyright 2015 Google, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Debug logs for the ChromeOS EC
+//
+// Copyright (C) 2015 Google, Inc.
#include <linux/circ_buf.h>
#include <linux/debugfs.h>
@@ -454,7 +440,7 @@ static int cros_ec_debugfs_probe(struct platform_device *pd)
ret = cros_ec_create_pdinfo(debug_info);
if (ret)
- goto remove_debugfs;
+ goto remove_log;
ec->debug_info = debug_info;
@@ -462,6 +448,8 @@ static int cros_ec_debugfs_probe(struct platform_device *pd)
return 0;
+remove_log:
+ cros_ec_cleanup_console_log(debug_info);
remove_debugfs:
debugfs_remove_recursive(debug_info->dir);
return ret;
@@ -481,7 +469,8 @@ static int __maybe_unused cros_ec_debugfs_suspend(struct device *dev)
{
struct cros_ec_dev *ec = dev_get_drvdata(dev);
- cancel_delayed_work_sync(&ec->debug_info->log_poll_work);
+ if (ec->debug_info->log_buffer.buf)
+ cancel_delayed_work_sync(&ec->debug_info->log_poll_work);
return 0;
}
@@ -490,7 +479,8 @@ static int __maybe_unused cros_ec_debugfs_resume(struct device *dev)
{
struct cros_ec_dev *ec = dev_get_drvdata(dev);
- schedule_delayed_work(&ec->debug_info->log_poll_work, 0);
+ if (ec->debug_info->log_buffer.buf)
+ schedule_delayed_work(&ec->debug_info->log_poll_work, 0);
return 0;
}
diff --git a/drivers/platform/chrome/cros_ec_i2c.c b/drivers/platform/chrome/cros_ec_i2c.c
index 9a009eaa4ada..61d75395f86d 100644
--- a/drivers/platform/chrome/cros_ec_i2c.c
+++ b/drivers/platform/chrome/cros_ec_i2c.c
@@ -1,17 +1,7 @@
-/*
- * ChromeOS EC multi-function device (I2C)
- *
- * Copyright (C) 2012 Google, Inc
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0
+// I2C interface for ChromeOS Embedded Controller
+//
+// Copyright (C) 2012 Google, Inc
#include <linux/acpi.h>
#include <linux/delay.h>
@@ -372,5 +362,5 @@ static struct i2c_driver cros_ec_driver = {
module_i2c_driver(cros_ec_driver);
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("ChromeOS EC multi function device");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("I2C interface for ChromeOS Embedded Controller");
diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c
index c3e4e6e5211d..d30a6650b0b5 100644
--- a/drivers/platform/chrome/cros_ec_lightbar.c
+++ b/drivers/platform/chrome/cros_ec_lightbar.c
@@ -1,23 +1,7 @@
-/*
- * cros_ec_lightbar - expose the Chromebook Pixel lightbar to userspace
- *
- * Copyright (C) 2014 Google, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#define pr_fmt(fmt) "cros_ec_lightbar: " fmt
+// SPDX-License-Identifier: GPL-2.0+
+// Expose the Chromebook Pixel lightbar to userspace
+//
+// Copyright (C) 2014 Google, Inc.
#include <linux/ctype.h>
#include <linux/delay.h>
diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
index 14684a56e40f..c9c240fbe7c6 100644
--- a/drivers/platform/chrome/cros_ec_lpc.c
+++ b/drivers/platform/chrome/cros_ec_lpc.c
@@ -1,25 +1,15 @@
-/*
- * cros_ec_lpc - LPC access to the Chrome OS Embedded Controller
- *
- * Copyright (C) 2012-2015 Google, Inc
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This driver uses the Chrome OS EC byte-level message-based protocol for
- * communicating the keyboard state (which keys are pressed) from a keyboard EC
- * to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing,
- * but everything else (including deghosting) is done here. The main
- * motivation for this is to keep the EC firmware as simple as possible, since
- * it cannot be easily upgraded and EC flash/IRAM space is relatively
- * expensive.
- */
+// SPDX-License-Identifier: GPL-2.0
+// LPC interface for ChromeOS Embedded Controller
+//
+// Copyright (C) 2012-2015 Google, Inc
+//
+// This driver uses the ChromeOS EC byte-level message-based protocol for
+// communicating the keyboard state (which keys are pressed) from a keyboard EC
+// to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing,
+// but everything else (including deghosting) is done here. The main
+// motivation for this is to keep the EC firmware as simple as possible, since
+// it cannot be easily upgraded and EC flash/IRAM space is relatively
+// expensive.
#include <linux/acpi.h>
#include <linux/dmi.h>
diff --git a/drivers/platform/chrome/cros_ec_lpc_mec.c b/drivers/platform/chrome/cros_ec_lpc_mec.c
index c4edfa83e493..d8890bafb55d 100644
--- a/drivers/platform/chrome/cros_ec_lpc_mec.c
+++ b/drivers/platform/chrome/cros_ec_lpc_mec.c
@@ -1,29 +1,10 @@
-/*
- * cros_ec_lpc_mec - LPC variant I/O for Microchip EC
- *
- * Copyright (C) 2016 Google, Inc
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This driver uses the Chrome OS EC byte-level message-based protocol for
- * communicating the keyboard state (which keys are pressed) from a keyboard EC
- * to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing,
- * but everything else (including deghosting) is done here. The main
- * motivation for this is to keep the EC firmware as simple as possible, since
- * it cannot be easily upgraded and EC flash/IRAM space is relatively
- * expensive.
- */
+// SPDX-License-Identifier: GPL-2.0
+// LPC variant I/O for Microchip EC
+//
+// Copyright (C) 2016 Google, Inc
#include <linux/delay.h>
#include <linux/io.h>
-#include <linux/mfd/cros_ec_commands.h>
#include <linux/mutex.h>
#include <linux/types.h>
@@ -34,6 +15,7 @@
* EC mutex because memmap data may be accessed without it being held.
*/
static struct mutex io_mutex;
+static u16 mec_emi_base, mec_emi_end;
/*
* cros_ec_lpc_mec_emi_write_address
@@ -46,10 +28,37 @@ static struct mutex io_mutex;
static void cros_ec_lpc_mec_emi_write_address(u16 addr,
enum cros_ec_lpc_mec_emi_access_mode access_type)
{
- /* Address relative to start of EMI range */
- addr -= MEC_EMI_RANGE_START;
- outb((addr & 0xfc) | access_type, MEC_EMI_EC_ADDRESS_B0);
- outb((addr >> 8) & 0x7f, MEC_EMI_EC_ADDRESS_B1);
+ outb((addr & 0xfc) | access_type, MEC_EMI_EC_ADDRESS_B0(mec_emi_base));
+ outb((addr >> 8) & 0x7f, MEC_EMI_EC_ADDRESS_B1(mec_emi_base));
+}
+
+/**
+ * cros_ec_lpc_mec_in_range() - Determine if addresses are in MEC EMI range.
+ *
+ * @offset: Address offset
+ * @length: Number of bytes to check
+ *
+ * Return: 1 if in range, 0 if not, and -EINVAL on failure
+ * such as the mec range not being initialized
+ */
+int cros_ec_lpc_mec_in_range(unsigned int offset, unsigned int length)
+{
+ if (length == 0)
+ return -EINVAL;
+
+ if (WARN_ON(mec_emi_base == 0 || mec_emi_end == 0))
+ return -EINVAL;
+
+ if (offset >= mec_emi_base && offset < mec_emi_end) {
+ if (WARN_ON(offset + length - 1 >= mec_emi_end))
+ return -EINVAL;
+ return 1;
+ }
+
+ if (WARN_ON(offset + length > mec_emi_base && offset < mec_emi_end))
+ return -EINVAL;
+
+ return 0;
}
/*
@@ -71,6 +80,11 @@ u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type,
u8 sum = 0;
enum cros_ec_lpc_mec_emi_access_mode access, new_access;
+ /* Return checksum of 0 if window is not initialized */
+ WARN_ON(mec_emi_base == 0 || mec_emi_end == 0);
+ if (mec_emi_base == 0 || mec_emi_end == 0)
+ return 0;
+
/*
* Long access cannot be used on misaligned data since reading B0 loads
* the data register and writing B3 flushes.
@@ -86,9 +100,9 @@ u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type,
cros_ec_lpc_mec_emi_write_address(offset, access);
/* Skip bytes in case of misaligned offset */
- io_addr = MEC_EMI_EC_DATA_B0 + (offset & 0x3);
+ io_addr = MEC_EMI_EC_DATA_B0(mec_emi_base) + (offset & 0x3);
while (i < length) {
- while (io_addr <= MEC_EMI_EC_DATA_B3) {
+ while (io_addr <= MEC_EMI_EC_DATA_B3(mec_emi_base)) {
if (io_type == MEC_IO_READ)
buf[i] = inb(io_addr++);
else
@@ -118,7 +132,7 @@ u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type,
}
/* Access [B0, B3] on each loop pass */
- io_addr = MEC_EMI_EC_DATA_B0;
+ io_addr = MEC_EMI_EC_DATA_B0(mec_emi_base);
}
done:
@@ -128,9 +142,11 @@ done:
}
EXPORT_SYMBOL(cros_ec_lpc_io_bytes_mec);
-void cros_ec_lpc_mec_init(void)
+void cros_ec_lpc_mec_init(unsigned int base, unsigned int end)
{
mutex_init(&io_mutex);
+ mec_emi_base = base;
+ mec_emi_end = end;
}
EXPORT_SYMBOL(cros_ec_lpc_mec_init);
diff --git a/drivers/platform/chrome/cros_ec_lpc_mec.h b/drivers/platform/chrome/cros_ec_lpc_mec.h
index 105068c0e919..aa1018f6b0f2 100644
--- a/drivers/platform/chrome/cros_ec_lpc_mec.h
+++ b/drivers/platform/chrome/cros_ec_lpc_mec.h
@@ -1,31 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * cros_ec_lpc_mec - LPC variant I/O for Microchip EC
+ * LPC variant I/O for Microchip EC
*
* Copyright (C) 2016 Google, Inc
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This driver uses the Chrome OS EC byte-level message-based protocol for
- * communicating the keyboard state (which keys are pressed) from a keyboard EC
- * to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing,
- * but everything else (including deghosting) is done here. The main
- * motivation for this is to keep the EC firmware as simple as possible, since
- * it cannot be easily upgraded and EC flash/IRAM space is relatively
- * expensive.
*/
#ifndef __CROS_EC_LPC_MEC_H
#define __CROS_EC_LPC_MEC_H
-#include <linux/mfd/cros_ec_commands.h>
-
enum cros_ec_lpc_mec_emi_access_mode {
/* 8-bit access */
ACCESS_TYPE_BYTE = 0x0,
@@ -45,27 +27,23 @@ enum cros_ec_lpc_mec_io_type {
MEC_IO_WRITE,
};
-/* Access IO ranges 0x800 thru 0x9ff using EMI interface instead of LPC */
-#define MEC_EMI_RANGE_START EC_HOST_CMD_REGION0
-#define MEC_EMI_RANGE_END (EC_LPC_ADDR_MEMMAP + EC_MEMMAP_SIZE)
-
/* EMI registers are relative to base */
-#define MEC_EMI_BASE 0x800
-#define MEC_EMI_HOST_TO_EC (MEC_EMI_BASE + 0)
-#define MEC_EMI_EC_TO_HOST (MEC_EMI_BASE + 1)
-#define MEC_EMI_EC_ADDRESS_B0 (MEC_EMI_BASE + 2)
-#define MEC_EMI_EC_ADDRESS_B1 (MEC_EMI_BASE + 3)
-#define MEC_EMI_EC_DATA_B0 (MEC_EMI_BASE + 4)
-#define MEC_EMI_EC_DATA_B1 (MEC_EMI_BASE + 5)
-#define MEC_EMI_EC_DATA_B2 (MEC_EMI_BASE + 6)
-#define MEC_EMI_EC_DATA_B3 (MEC_EMI_BASE + 7)
+#define MEC_EMI_HOST_TO_EC(MEC_EMI_BASE) ((MEC_EMI_BASE) + 0)
+#define MEC_EMI_EC_TO_HOST(MEC_EMI_BASE) ((MEC_EMI_BASE) + 1)
+#define MEC_EMI_EC_ADDRESS_B0(MEC_EMI_BASE) ((MEC_EMI_BASE) + 2)
+#define MEC_EMI_EC_ADDRESS_B1(MEC_EMI_BASE) ((MEC_EMI_BASE) + 3)
+#define MEC_EMI_EC_DATA_B0(MEC_EMI_BASE) ((MEC_EMI_BASE) + 4)
+#define MEC_EMI_EC_DATA_B1(MEC_EMI_BASE) ((MEC_EMI_BASE) + 5)
+#define MEC_EMI_EC_DATA_B2(MEC_EMI_BASE) ((MEC_EMI_BASE) + 6)
+#define MEC_EMI_EC_DATA_B3(MEC_EMI_BASE) ((MEC_EMI_BASE) + 7)
-/*
- * cros_ec_lpc_mec_init
+/**
+ * cros_ec_lpc_mec_init() - Initialize MEC I/O.
*
- * Initialize MEC I/O.
+ * @base: MEC EMI Base address
+ * @end: MEC EMI End address
*/
-void cros_ec_lpc_mec_init(void);
+void cros_ec_lpc_mec_init(unsigned int base, unsigned int end);
/*
* cros_ec_lpc_mec_destroy
@@ -75,6 +53,17 @@ void cros_ec_lpc_mec_init(void);
void cros_ec_lpc_mec_destroy(void);
/**
+ * cros_ec_lpc_mec_in_range() - Determine if addresses are in MEC EMI range.
+ *
+ * @offset: Address offset
+ * @length: Number of bytes to check
+ *
+ * Return: 1 if in range, 0 if not, and -EINVAL on failure
+ * such as the mec range not being initialized
+ */
+int cros_ec_lpc_mec_in_range(unsigned int offset, unsigned int length);
+
+/**
* cros_ec_lpc_io_bytes_mec - Read / write bytes to MEC EMI port
*
* @io_type: MEC_IO_READ or MEC_IO_WRITE, depending on request
diff --git a/drivers/platform/chrome/cros_ec_lpc_reg.c b/drivers/platform/chrome/cros_ec_lpc_reg.c
index fc23d535c404..0f5cd0ac8b49 100644
--- a/drivers/platform/chrome/cros_ec_lpc_reg.c
+++ b/drivers/platform/chrome/cros_ec_lpc_reg.c
@@ -1,25 +1,7 @@
-/*
- * cros_ec_lpc_reg - LPC access to the Chrome OS Embedded Controller
- *
- * Copyright (C) 2016 Google, Inc
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This driver uses the Chrome OS EC byte-level message-based protocol for
- * communicating the keyboard state (which keys are pressed) from a keyboard EC
- * to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing,
- * but everything else (including deghosting) is done here. The main
- * motivation for this is to keep the EC firmware as simple as possible, since
- * it cannot be easily upgraded and EC flash/IRAM space is relatively
- * expensive.
- */
+// SPDX-License-Identifier: GPL-2.0
+// LPC interface for ChromeOS Embedded Controller
+//
+// Copyright (C) 2016 Google, Inc
#include <linux/io.h>
#include <linux/mfd/cros_ec.h>
@@ -59,51 +41,36 @@ static u8 lpc_write_bytes(unsigned int offset, unsigned int length, u8 *msg)
u8 cros_ec_lpc_read_bytes(unsigned int offset, unsigned int length, u8 *dest)
{
- if (length == 0)
- return 0;
-
- /* Access desired range through EMI interface */
- if (offset >= MEC_EMI_RANGE_START && offset <= MEC_EMI_RANGE_END) {
- /* Ensure we don't straddle EMI region */
- if (WARN_ON(offset + length - 1 > MEC_EMI_RANGE_END))
- return 0;
+ int in_range = cros_ec_lpc_mec_in_range(offset, length);
- return cros_ec_lpc_io_bytes_mec(MEC_IO_READ, offset, length,
- dest);
- }
-
- if (WARN_ON(offset + length > MEC_EMI_RANGE_START &&
- offset < MEC_EMI_RANGE_START))
+ if (in_range < 0)
return 0;
- return lpc_read_bytes(offset, length, dest);
+ return in_range ?
+ cros_ec_lpc_io_bytes_mec(MEC_IO_READ,
+ offset - EC_HOST_CMD_REGION0,
+ length, dest) :
+ lpc_read_bytes(offset, length, dest);
}
u8 cros_ec_lpc_write_bytes(unsigned int offset, unsigned int length, u8 *msg)
{
- if (length == 0)
- return 0;
-
- /* Access desired range through EMI interface */
- if (offset >= MEC_EMI_RANGE_START && offset <= MEC_EMI_RANGE_END) {
- /* Ensure we don't straddle EMI region */
- if (WARN_ON(offset + length - 1 > MEC_EMI_RANGE_END))
- return 0;
+ int in_range = cros_ec_lpc_mec_in_range(offset, length);
- return cros_ec_lpc_io_bytes_mec(MEC_IO_WRITE, offset, length,
- msg);
- }
-
- if (WARN_ON(offset + length > MEC_EMI_RANGE_START &&
- offset < MEC_EMI_RANGE_START))
+ if (in_range < 0)
return 0;
- return lpc_write_bytes(offset, length, msg);
+ return in_range ?
+ cros_ec_lpc_io_bytes_mec(MEC_IO_WRITE,
+ offset - EC_HOST_CMD_REGION0,
+ length, msg) :
+ lpc_write_bytes(offset, length, msg);
}
void cros_ec_lpc_reg_init(void)
{
- cros_ec_lpc_mec_init();
+ cros_ec_lpc_mec_init(EC_HOST_CMD_REGION0,
+ EC_LPC_ADDR_MEMMAP + EC_MEMMAP_SIZE);
}
void cros_ec_lpc_reg_destroy(void)
diff --git a/drivers/platform/chrome/cros_ec_lpc_reg.h b/drivers/platform/chrome/cros_ec_lpc_reg.h
index 1c12c38b306a..416fd2572182 100644
--- a/drivers/platform/chrome/cros_ec_lpc_reg.h
+++ b/drivers/platform/chrome/cros_ec_lpc_reg.h
@@ -1,24 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * cros_ec_lpc_reg - LPC access to the Chrome OS Embedded Controller
+ * LPC interface for ChromeOS Embedded Controller
*
* Copyright (C) 2016 Google, Inc
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This driver uses the Chrome OS EC byte-level message-based protocol for
- * communicating the keyboard state (which keys are pressed) from a keyboard EC
- * to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing,
- * but everything else (including deghosting) is done here. The main
- * motivation for this is to keep the EC firmware as simple as possible, since
- * it cannot be easily upgraded and EC flash/IRAM space is relatively
- * expensive.
*/
#ifndef __CROS_EC_LPC_REG_H
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index cc7baf0ecb3c..97a068dff192 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -1,18 +1,7 @@
-/*
- * ChromeOS EC communication protocol helper functions
- *
- * Copyright (C) 2015 Google, Inc
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0
+// ChromeOS EC communication protocol helper functions
+//
+// Copyright (C) 2015 Google, Inc
#include <linux/mfd/cros_ec.h>
#include <linux/delay.h>
diff --git a/drivers/platform/chrome/cros_ec_spi.c b/drivers/platform/chrome/cros_ec_spi.c
index 6cfbc2835beb..ffc38f9d4829 100644
--- a/drivers/platform/chrome/cros_ec_spi.c
+++ b/drivers/platform/chrome/cros_ec_spi.c
@@ -1,17 +1,7 @@
-/*
- * ChromeOS EC multi-function device (SPI)
- *
- * Copyright (C) 2012 Google, Inc
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0
+// SPI interface for ChromeOS Embedded Controller
+//
+// Copyright (C) 2012 Google, Inc
#include <linux/delay.h>
#include <linux/kernel.h>
@@ -729,4 +719,4 @@ static struct spi_driver cros_ec_driver_spi = {
module_spi_driver(cros_ec_driver_spi);
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("ChromeOS EC multi function device (SPI)");
+MODULE_DESCRIPTION("SPI interface for ChromeOS Embedded Controller");
diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c
index 0ff5aa30c070..fe0b7614ae1b 100644
--- a/drivers/platform/chrome/cros_ec_sysfs.c
+++ b/drivers/platform/chrome/cros_ec_sysfs.c
@@ -1,23 +1,7 @@
-/*
- * cros_ec_sysfs - expose the Chrome OS EC through sysfs
- *
- * Copyright (C) 2014 Google, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#define pr_fmt(fmt) "cros_ec_sysfs: " fmt
+// SPDX-License-Identifier: GPL-2.0+
+// Expose the ChromeOS EC through sysfs
+//
+// Copyright (C) 2014 Google, Inc.
#include <linux/ctype.h>
#include <linux/delay.h>
@@ -389,5 +373,5 @@ static struct platform_driver cros_ec_sysfs_driver = {
module_platform_driver(cros_ec_sysfs_driver);
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("ChromeOS EC control driver");
+MODULE_DESCRIPTION("Expose the ChromeOS EC through sysfs");
MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/chrome/cros_ec_vbc.c b/drivers/platform/chrome/cros_ec_vbc.c
index af9bfe6f385c..8392a1ec33a7 100644
--- a/drivers/platform/chrome/cros_ec_vbc.c
+++ b/drivers/platform/chrome/cros_ec_vbc.c
@@ -1,22 +1,8 @@
-/*
- * cros_ec_vbc - Expose the vboot context nvram to userspace
- *
- * Copyright (C) 2015 Collabora Ltd.
- *
- * based on vendor driver,
- *
- * Copyright (C) 2012 The Chromium OS Authors
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Expose the vboot context nvram to userspace
+//
+// Copyright (C) 2012 Google, Inc.
+// Copyright (C) 2015 Collabora Ltd.
#include <linux/of.h>
#include <linux/platform_device.h>
diff --git a/drivers/platform/chrome/cros_kbd_led_backlight.c b/drivers/platform/chrome/cros_kbd_led_backlight.c
index ca3e4da852b4..aa409f0201fb 100644
--- a/drivers/platform/chrome/cros_kbd_led_backlight.c
+++ b/drivers/platform/chrome/cros_kbd_led_backlight.c
@@ -1,18 +1,7 @@
-/*
- * Keyboard backlight LED driver for Chrome OS.
- *
- * Copyright (C) 2012 Google, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Keyboard backlight LED driver for ChromeOS
+//
+// Copyright (C) 2012 Google, Inc.
#include <linux/acpi.h>
#include <linux/leds.h>
diff --git a/drivers/platform/chrome/wilco_ec/Kconfig b/drivers/platform/chrome/wilco_ec/Kconfig
new file mode 100644
index 000000000000..e09e4cebe9b4
--- /dev/null
+++ b/drivers/platform/chrome/wilco_ec/Kconfig
@@ -0,0 +1,20 @@
+config WILCO_EC
+ tristate "ChromeOS Wilco Embedded Controller"
+ depends on ACPI && X86 && CROS_EC_LPC && CROS_EC_LPC_MEC
+ help
+ If you say Y here, you get support for talking to the ChromeOS
+ Wilco EC over an eSPI bus. This uses a simple byte-level protocol
+ with a checksum.
+
+ To compile this driver as a module, choose M here: the
+ module will be called wilco_ec.
+
+config WILCO_EC_DEBUGFS
+ tristate "Enable raw access to EC via debugfs"
+ depends on WILCO_EC
+ help
+ If you say Y here, you get support for sending raw commands to
+ the Wilco EC via debugfs. These commands do not do any byte
+ manipulation and allow for testing arbitrary commands. This
+ interface is intended for debug only and will not be present
+ on production devices.
diff --git a/drivers/platform/chrome/wilco_ec/Makefile b/drivers/platform/chrome/wilco_ec/Makefile
new file mode 100644
index 000000000000..063e7fb4ea17
--- /dev/null
+++ b/drivers/platform/chrome/wilco_ec/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+
+wilco_ec-objs := core.o mailbox.o
+obj-$(CONFIG_WILCO_EC) += wilco_ec.o
+wilco_ec_debugfs-objs := debugfs.o
+obj-$(CONFIG_WILCO_EC_DEBUGFS) += wilco_ec_debugfs.o
diff --git a/drivers/platform/chrome/wilco_ec/core.c b/drivers/platform/chrome/wilco_ec/core.c
new file mode 100644
index 000000000000..05e1e2be1c91
--- /dev/null
+++ b/drivers/platform/chrome/wilco_ec/core.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Core driver for Wilco Embedded Controller
+ *
+ * Copyright 2018 Google LLC
+ *
+ * This is the entry point for the drivers that control the Wilco EC.
+ * This driver is responsible for several tasks:
+ * - Initialize the register interface that is used by wilco_ec_mailbox()
+ * - Create a platform device which is picked up by the debugfs driver
+ * - Create a platform device which is picked up by the RTC driver
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/platform_data/wilco-ec.h>
+#include <linux/platform_device.h>
+
+#include "../cros_ec_lpc_mec.h"
+
+#define DRV_NAME "wilco-ec"
+
+static struct resource *wilco_get_resource(struct platform_device *pdev,
+ int index)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, index);
+ if (!res) {
+ dev_dbg(dev, "Couldn't find IO resource %d\n", index);
+ return res;
+ }
+
+ return devm_request_region(dev, res->start, resource_size(res),
+ dev_name(dev));
+}
+
+static int wilco_ec_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct wilco_ec_device *ec;
+ int ret;
+
+ ec = devm_kzalloc(dev, sizeof(*ec), GFP_KERNEL);
+ if (!ec)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ec);
+ ec->dev = dev;
+ mutex_init(&ec->mailbox_lock);
+
+ /* Largest data buffer size requirement is extended data response */
+ ec->data_size = sizeof(struct wilco_ec_response) +
+ EC_MAILBOX_DATA_SIZE_EXTENDED;
+ ec->data_buffer = devm_kzalloc(dev, ec->data_size, GFP_KERNEL);
+ if (!ec->data_buffer)
+ return -ENOMEM;
+
+ /* Prepare access to IO regions provided by ACPI */
+ ec->io_data = wilco_get_resource(pdev, 0); /* Host Data */
+ ec->io_command = wilco_get_resource(pdev, 1); /* Host Command */
+ ec->io_packet = wilco_get_resource(pdev, 2); /* MEC EMI */
+ if (!ec->io_data || !ec->io_command || !ec->io_packet)
+ return -ENODEV;
+
+ /* Initialize cros_ec register interface for communication */
+ cros_ec_lpc_mec_init(ec->io_packet->start,
+ ec->io_packet->start + EC_MAILBOX_DATA_SIZE);
+
+ /*
+ * Register a child device that will be found by the debugfs driver.
+ * Ignore failure.
+ */
+ ec->debugfs_pdev = platform_device_register_data(dev,
+ "wilco-ec-debugfs",
+ PLATFORM_DEVID_AUTO,
+ NULL, 0);
+
+ /* Register a child device that will be found by the RTC driver. */
+ ec->rtc_pdev = platform_device_register_data(dev, "rtc-wilco-ec",
+ PLATFORM_DEVID_AUTO,
+ NULL, 0);
+ if (IS_ERR(ec->rtc_pdev)) {
+ dev_err(dev, "Failed to create RTC platform device\n");
+ ret = PTR_ERR(ec->rtc_pdev);
+ goto unregister_debugfs;
+ }
+
+ return 0;
+
+unregister_debugfs:
+ if (ec->debugfs_pdev)
+ platform_device_unregister(ec->debugfs_pdev);
+ cros_ec_lpc_mec_destroy();
+ return ret;
+}
+
+static int wilco_ec_remove(struct platform_device *pdev)
+{
+ struct wilco_ec_device *ec = platform_get_drvdata(pdev);
+
+ platform_device_unregister(ec->rtc_pdev);
+ if (ec->debugfs_pdev)
+ platform_device_unregister(ec->debugfs_pdev);
+
+ /* Teardown cros_ec interface */
+ cros_ec_lpc_mec_destroy();
+
+ return 0;
+}
+
+static const struct acpi_device_id wilco_ec_acpi_device_ids[] = {
+ { "GOOG000C", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, wilco_ec_acpi_device_ids);
+
+static struct platform_driver wilco_ec_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .acpi_match_table = wilco_ec_acpi_device_ids,
+ },
+ .probe = wilco_ec_probe,
+ .remove = wilco_ec_remove,
+};
+
+module_platform_driver(wilco_ec_driver);
+
+MODULE_AUTHOR("Nick Crews <ncrews@chromium.org>");
+MODULE_AUTHOR("Duncan Laurie <dlaurie@chromium.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ChromeOS Wilco Embedded Controller driver");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/chrome/wilco_ec/debugfs.c b/drivers/platform/chrome/wilco_ec/debugfs.c
new file mode 100644
index 000000000000..c090db2cd5be
--- /dev/null
+++ b/drivers/platform/chrome/wilco_ec/debugfs.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * debugfs attributes for Wilco EC
+ *
+ * Copyright 2019 Google LLC
+ *
+ * There is only one attribute used for debugging, called raw.
+ * You can write a hexadecimal sentence to raw, and that series of bytes
+ * will be sent to the EC. Then, you can read the bytes of response
+ * by reading from raw.
+ *
+ * For writing:
+ * Bytes 0-1 indicate the message type:
+ * 00 F0 = Execute Legacy Command
+ * 00 F2 = Read/Write NVRAM Property
+ * Byte 2 provides the command code
+ * Bytes 3+ consist of the data passed in the request
+ *
+ * When referencing the EC interface spec, byte 2 corresponds to MBOX[0],
+ * byte 3 corresponds to MBOX[1], etc.
+ *
+ * At least three bytes are required, for the msg type and command,
+ * with additional bytes optional for additional data.
+ *
+ * Example:
+ * // Request EC info type 3 (EC firmware build date)
+ * $ echo 00 f0 38 00 03 00 > raw
+ * // View the result. The decoded ASCII result "12/21/18" is
+ * // included after the raw hex.
+ * $ cat raw
+ * 00 31 32 2f 32 31 2f 31 38 00 38 00 01 00 2f 00 .12/21/18.8...
+ */
+
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/platform_data/wilco-ec.h>
+#include <linux/platform_device.h>
+
+#define DRV_NAME "wilco-ec-debugfs"
+
+/* The 256 raw bytes will take up more space when represented as a hex string */
+#define FORMATTED_BUFFER_SIZE (EC_MAILBOX_DATA_SIZE_EXTENDED * 4)
+
+struct wilco_ec_debugfs {
+ struct wilco_ec_device *ec;
+ struct dentry *dir;
+ size_t response_size;
+ u8 raw_data[EC_MAILBOX_DATA_SIZE_EXTENDED];
+ u8 formatted_data[FORMATTED_BUFFER_SIZE];
+};
+static struct wilco_ec_debugfs *debug_info;
+
+/**
+ * parse_hex_sentence() - Convert a ascii hex representation into byte array.
+ * @in: Input buffer of ascii.
+ * @isize: Length of input buffer.
+ * @out: Output buffer.
+ * @osize: Length of output buffer, e.g. max number of bytes to parse.
+ *
+ * An valid input is a series of ascii hexadecimal numbers, separated by spaces.
+ * An example valid input is
+ * " 00 f2 0 000076 6 0 ff"
+ *
+ * If an individual "word" within the hex sentence is longer than MAX_WORD_SIZE,
+ * then the sentence is illegal, and parsing will fail.
+ *
+ * Return: Number of bytes parsed, or negative error code on failure.
+ */
+static int parse_hex_sentence(const char *in, int isize, u8 *out, int osize)
+{
+ int n_parsed = 0;
+ int word_start = 0;
+ int word_end;
+ int word_len;
+ /* Temp buffer for holding a "word" of chars that represents one byte */
+ #define MAX_WORD_SIZE 16
+ char tmp[MAX_WORD_SIZE + 1];
+ u8 byte;
+
+ while (word_start < isize && n_parsed < osize) {
+ /* Find the start of the next word */
+ while (word_start < isize && isspace(in[word_start]))
+ word_start++;
+ /* reached the end of the input before next word? */
+ if (word_start >= isize)
+ break;
+
+ /* Find the end of this word */
+ word_end = word_start;
+ while (word_end < isize && !isspace(in[word_end]))
+ word_end++;
+
+ /* Copy to a tmp NULL terminated string */
+ word_len = word_end - word_start;
+ if (word_len > MAX_WORD_SIZE)
+ return -EINVAL;
+ memcpy(tmp, in + word_start, word_len);
+ tmp[word_len] = '\0';
+
+ /*
+ * Convert from hex string, place in output. If fails to parse,
+ * just return -EINVAL because specific error code is only
+ * relevant for this one word, returning it would be confusing.
+ */
+ if (kstrtou8(tmp, 16, &byte))
+ return -EINVAL;
+ out[n_parsed++] = byte;
+
+ word_start = word_end;
+ }
+ return n_parsed;
+}
+
+/* The message type takes up two bytes*/
+#define TYPE_AND_DATA_SIZE ((EC_MAILBOX_DATA_SIZE) + 2)
+
+static ssize_t raw_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char *buf = debug_info->formatted_data;
+ struct wilco_ec_message msg;
+ u8 request_data[TYPE_AND_DATA_SIZE];
+ ssize_t kcount;
+ int ret;
+
+ if (count > FORMATTED_BUFFER_SIZE)
+ return -EINVAL;
+
+ kcount = simple_write_to_buffer(buf, FORMATTED_BUFFER_SIZE, ppos,
+ user_buf, count);
+ if (kcount < 0)
+ return kcount;
+
+ ret = parse_hex_sentence(buf, kcount, request_data, TYPE_AND_DATA_SIZE);
+ if (ret < 0)
+ return ret;
+ /* Need at least two bytes for message type and one for command */
+ if (ret < 3)
+ return -EINVAL;
+
+ /* Clear response data buffer */
+ memset(debug_info->raw_data, '\0', EC_MAILBOX_DATA_SIZE_EXTENDED);
+
+ msg.type = request_data[0] << 8 | request_data[1];
+ msg.flags = WILCO_EC_FLAG_RAW;
+ msg.command = request_data[2];
+ msg.request_data = ret > 3 ? request_data + 3 : 0;
+ msg.request_size = ret - 3;
+ msg.response_data = debug_info->raw_data;
+ msg.response_size = EC_MAILBOX_DATA_SIZE;
+
+ /* Telemetry commands use extended response data */
+ if (msg.type == WILCO_EC_MSG_TELEMETRY_LONG) {
+ msg.flags |= WILCO_EC_FLAG_EXTENDED_DATA;
+ msg.response_size = EC_MAILBOX_DATA_SIZE_EXTENDED;
+ }
+
+ ret = wilco_ec_mailbox(debug_info->ec, &msg);
+ if (ret < 0)
+ return ret;
+ debug_info->response_size = ret;
+
+ return count;
+}
+
+static ssize_t raw_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ int fmt_len = 0;
+
+ if (debug_info->response_size) {
+ fmt_len = hex_dump_to_buffer(debug_info->raw_data,
+ debug_info->response_size,
+ 16, 1, debug_info->formatted_data,
+ FORMATTED_BUFFER_SIZE, true);
+ /* Only return response the first time it is read */
+ debug_info->response_size = 0;
+ }
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ debug_info->formatted_data, fmt_len);
+}
+
+static const struct file_operations fops_raw = {
+ .owner = THIS_MODULE,
+ .read = raw_read,
+ .write = raw_write,
+ .llseek = no_llseek,
+};
+
+/**
+ * wilco_ec_debugfs_probe() - Create the debugfs node
+ * @pdev: The platform device, probably created in core.c
+ *
+ * Try to create a debugfs node. If it fails, then we don't want to change
+ * behavior at all, this is for debugging after all. Just fail silently.
+ *
+ * Return: 0 always.
+ */
+static int wilco_ec_debugfs_probe(struct platform_device *pdev)
+{
+ struct wilco_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
+
+ debug_info = devm_kzalloc(&pdev->dev, sizeof(*debug_info), GFP_KERNEL);
+ if (!debug_info)
+ return 0;
+ debug_info->ec = ec;
+ debug_info->dir = debugfs_create_dir("wilco_ec", NULL);
+ if (!debug_info->dir)
+ return 0;
+ debugfs_create_file("raw", 0644, debug_info->dir, NULL, &fops_raw);
+
+ return 0;
+}
+
+static int wilco_ec_debugfs_remove(struct platform_device *pdev)
+{
+ debugfs_remove_recursive(debug_info->dir);
+
+ return 0;
+}
+
+static struct platform_driver wilco_ec_debugfs_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ },
+ .probe = wilco_ec_debugfs_probe,
+ .remove = wilco_ec_debugfs_remove,
+};
+
+module_platform_driver(wilco_ec_debugfs_driver);
+
+MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_AUTHOR("Nick Crews <ncrews@chromium.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Wilco EC debugfs driver");
diff --git a/drivers/platform/chrome/wilco_ec/mailbox.c b/drivers/platform/chrome/wilco_ec/mailbox.c
new file mode 100644
index 000000000000..14355668ddfa
--- /dev/null
+++ b/drivers/platform/chrome/wilco_ec/mailbox.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Mailbox interface for Wilco Embedded Controller
+ *
+ * Copyright 2018 Google LLC
+ *
+ * The Wilco EC is similar to a typical ChromeOS embedded controller.
+ * It uses the same MEC based low-level communication and a similar
+ * protocol, but with some important differences. The EC firmware does
+ * not support the same mailbox commands so it is not registered as a
+ * cros_ec device type.
+ *
+ * Most messages follow a standard format, but there are some exceptions
+ * and an interface is provided to do direct/raw transactions that do not
+ * make assumptions about byte placement.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/platform_data/wilco-ec.h>
+#include <linux/platform_device.h>
+
+#include "../cros_ec_lpc_mec.h"
+
+/* Version of mailbox interface */
+#define EC_MAILBOX_VERSION 0
+
+/* Command to start mailbox transaction */
+#define EC_MAILBOX_START_COMMAND 0xda
+
+/* Version of EC protocol */
+#define EC_MAILBOX_PROTO_VERSION 3
+
+/* Number of header bytes to be counted as data bytes */
+#define EC_MAILBOX_DATA_EXTRA 2
+
+/* Maximum timeout */
+#define EC_MAILBOX_TIMEOUT HZ
+
+/* EC response flags */
+#define EC_CMDR_DATA BIT(0) /* Data ready for host to read */
+#define EC_CMDR_PENDING BIT(1) /* Write pending to EC */
+#define EC_CMDR_BUSY BIT(2) /* EC is busy processing a command */
+#define EC_CMDR_CMD BIT(3) /* Last host write was a command */
+
+/**
+ * wilco_ec_response_timed_out() - Wait for EC response.
+ * @ec: EC device.
+ *
+ * Return: true if EC timed out, false if EC did not time out.
+ */
+static bool wilco_ec_response_timed_out(struct wilco_ec_device *ec)
+{
+ unsigned long timeout = jiffies + EC_MAILBOX_TIMEOUT;
+
+ do {
+ if (!(inb(ec->io_command->start) &
+ (EC_CMDR_PENDING | EC_CMDR_BUSY)))
+ return false;
+ usleep_range(100, 200);
+ } while (time_before(jiffies, timeout));
+
+ return true;
+}
+
+/**
+ * wilco_ec_checksum() - Compute 8-bit checksum over data range.
+ * @data: Data to checksum.
+ * @size: Number of bytes to checksum.
+ *
+ * Return: 8-bit checksum of provided data.
+ */
+static u8 wilco_ec_checksum(const void *data, size_t size)
+{
+ u8 *data_bytes = (u8 *)data;
+ u8 checksum = 0;
+ size_t i;
+
+ for (i = 0; i < size; i++)
+ checksum += data_bytes[i];
+
+ return checksum;
+}
+
+/**
+ * wilco_ec_prepare() - Prepare the request structure for the EC.
+ * @msg: EC message with request information.
+ * @rq: EC request structure to fill.
+ */
+static void wilco_ec_prepare(struct wilco_ec_message *msg,
+ struct wilco_ec_request *rq)
+{
+ memset(rq, 0, sizeof(*rq));
+
+ /* Handle messages without trimming bytes from the request */
+ if (msg->request_size && msg->flags & WILCO_EC_FLAG_RAW_REQUEST) {
+ rq->reserved_raw = *(u8 *)msg->request_data;
+ msg->request_size--;
+ memmove(msg->request_data, msg->request_data + 1,
+ msg->request_size);
+ }
+
+ /* Fill in request packet */
+ rq->struct_version = EC_MAILBOX_PROTO_VERSION;
+ rq->mailbox_id = msg->type;
+ rq->mailbox_version = EC_MAILBOX_VERSION;
+ rq->data_size = msg->request_size + EC_MAILBOX_DATA_EXTRA;
+ rq->command = msg->command;
+
+ /* Checksum header and data */
+ rq->checksum = wilco_ec_checksum(rq, sizeof(*rq));
+ rq->checksum += wilco_ec_checksum(msg->request_data, msg->request_size);
+ rq->checksum = -rq->checksum;
+}
+
+/**
+ * wilco_ec_transfer() - Perform actual data transfer.
+ * @ec: EC device.
+ * @msg: EC message data for request and response.
+ * @rq: Filled in request structure
+ *
+ * Context: ec->mailbox_lock should be held while using this function.
+ * Return: number of bytes received or negative error code on failure.
+ */
+static int wilco_ec_transfer(struct wilco_ec_device *ec,
+ struct wilco_ec_message *msg,
+ struct wilco_ec_request *rq)
+{
+ struct wilco_ec_response *rs;
+ u8 checksum;
+ u8 flag;
+ size_t size;
+
+ /* Write request header, then data */
+ cros_ec_lpc_io_bytes_mec(MEC_IO_WRITE, 0, sizeof(*rq), (u8 *)rq);
+ cros_ec_lpc_io_bytes_mec(MEC_IO_WRITE, sizeof(*rq), msg->request_size,
+ msg->request_data);
+
+ /* Start the command */
+ outb(EC_MAILBOX_START_COMMAND, ec->io_command->start);
+
+ /* For some commands (eg shutdown) the EC will not respond, that's OK */
+ if (msg->flags & WILCO_EC_FLAG_NO_RESPONSE) {
+ dev_dbg(ec->dev, "EC does not respond to this command\n");
+ return 0;
+ }
+
+ /* Wait for it to complete */
+ if (wilco_ec_response_timed_out(ec)) {
+ dev_dbg(ec->dev, "response timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Check result */
+ flag = inb(ec->io_data->start);
+ if (flag) {
+ dev_dbg(ec->dev, "bad response: 0x%02x\n", flag);
+ return -EIO;
+ }
+
+ if (msg->flags & WILCO_EC_FLAG_EXTENDED_DATA)
+ size = EC_MAILBOX_DATA_SIZE_EXTENDED;
+ else
+ size = EC_MAILBOX_DATA_SIZE;
+
+ /* Read back response */
+ rs = ec->data_buffer;
+ checksum = cros_ec_lpc_io_bytes_mec(MEC_IO_READ, 0,
+ sizeof(*rs) + size, (u8 *)rs);
+ if (checksum) {
+ dev_dbg(ec->dev, "bad packet checksum 0x%02x\n", rs->checksum);
+ return -EBADMSG;
+ }
+
+ /* Check that the EC reported success */
+ msg->result = rs->result;
+ if (msg->result) {
+ dev_dbg(ec->dev, "bad response: 0x%02x\n", msg->result);
+ return -EBADMSG;
+ }
+
+ /* Check the returned data size, skipping the header */
+ if (rs->data_size != size) {
+ dev_dbg(ec->dev, "unexpected packet size (%u != %zu)",
+ rs->data_size, size);
+ return -EMSGSIZE;
+ }
+
+ /* Skip 1 response data byte unless specified */
+ size = (msg->flags & WILCO_EC_FLAG_RAW_RESPONSE) ? 0 : 1;
+ if ((ssize_t) rs->data_size - size < msg->response_size) {
+ dev_dbg(ec->dev, "response data too short (%zd < %zu)",
+ (ssize_t) rs->data_size - size, msg->response_size);
+ return -EMSGSIZE;
+ }
+
+ /* Ignore response data bytes as requested */
+ memcpy(msg->response_data, rs->data + size, msg->response_size);
+
+ /* Return actual amount of data received */
+ return msg->response_size;
+}
+
+/**
+ * wilco_ec_mailbox() - Send EC request and receive EC response.
+ * @ec: EC device.
+ * @msg: EC message data for request and response.
+ *
+ * On entry msg->type, msg->flags, msg->command, msg->request_size,
+ * msg->response_size, and msg->request_data should all be filled in.
+ *
+ * On exit msg->result and msg->response_data will be filled.
+ *
+ * Return: number of bytes received or negative error code on failure.
+ */
+int wilco_ec_mailbox(struct wilco_ec_device *ec, struct wilco_ec_message *msg)
+{
+ struct wilco_ec_request *rq;
+ int ret;
+
+ dev_dbg(ec->dev, "cmd=%02x type=%04x flags=%02x rslen=%zu rqlen=%zu\n",
+ msg->command, msg->type, msg->flags, msg->response_size,
+ msg->request_size);
+
+ mutex_lock(&ec->mailbox_lock);
+ /* Prepare request packet */
+ rq = ec->data_buffer;
+ wilco_ec_prepare(msg, rq);
+
+ ret = wilco_ec_transfer(ec, msg, rq);
+ mutex_unlock(&ec->mailbox_lock);
+
+ return ret;
+
+}
+EXPORT_SYMBOL_GPL(wilco_ec_mailbox);
diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c
index b6d44550d98c..687ce6817d0d 100644
--- a/drivers/platform/mellanox/mlxreg-hotplug.c
+++ b/drivers/platform/mellanox/mlxreg-hotplug.c
@@ -248,7 +248,8 @@ mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data *priv,
struct mlxreg_core_item *item)
{
struct mlxreg_core_data *data;
- u32 asserted, regval, bit;
+ unsigned long asserted;
+ u32 regval, bit;
int ret;
/*
@@ -281,7 +282,7 @@ mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data *priv,
asserted = item->cache ^ regval;
item->cache = regval;
- for_each_set_bit(bit, (unsigned long *)&asserted, 8) {
+ for_each_set_bit(bit, &asserted, 8) {
data = item->data + bit;
if (regval & BIT(bit)) {
if (item->inversed)
@@ -495,7 +496,9 @@ static int mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data *priv)
{
struct mlxreg_core_hotplug_platform_data *pdata;
struct mlxreg_core_item *item;
- int i, ret;
+ struct mlxreg_core_data *data;
+ u32 regval;
+ int i, j, ret;
pdata = dev_get_platdata(&priv->pdev->dev);
item = pdata->items;
@@ -507,6 +510,25 @@ static int mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data *priv)
if (ret)
goto out;
+ /*
+ * Verify if hardware configuration requires to disable
+ * interrupt capability for some of components.
+ */
+ data = item->data;
+ for (j = 0; j < item->count; j++, data++) {
+ /* Verify if the attribute has capability register. */
+ if (data->capability) {
+ /* Read capability register. */
+ ret = regmap_read(priv->regmap,
+ data->capability, &regval);
+ if (ret)
+ goto out;
+
+ if (!(regval & data->bit))
+ item->mask &= ~BIT(j);
+ }
+ }
+
/* Set group initial status as mask and unmask group event. */
if (item->inversed) {
item->cache = item->mask;
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 37b5de541270..ee1fa93708ec 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -2265,12 +2265,12 @@ static int asus_wmi_probe(struct platform_device *pdev)
int ret;
if (!wmi_has_guid(ASUS_WMI_MGMT_GUID)) {
- pr_warn("Management GUID not found\n");
+ pr_warn("ASUS Management GUID not found\n");
return -ENODEV;
}
if (wdrv->event_guid && !wmi_has_guid(wdrv->event_guid)) {
- pr_warn("Event GUID not found\n");
+ pr_warn("ASUS Event GUID not found\n");
return -ENODEV;
}
@@ -2320,11 +2320,6 @@ EXPORT_SYMBOL_GPL(asus_wmi_unregister_driver);
static int __init asus_wmi_init(void)
{
- if (!wmi_has_guid(ASUS_WMI_MGMT_GUID)) {
- pr_info("Asus Management GUID not found\n");
- return -ENODEV;
- }
-
pr_info("ASUS WMI generic driver loaded\n");
return 0;
}
diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c
index cf2229ece9ff..c3ed3c8c17b9 100644
--- a/drivers/platform/x86/dell-smbios-wmi.c
+++ b/drivers/platform/x86/dell-smbios-wmi.c
@@ -277,4 +277,4 @@ void exit_dell_smbios_wmi(void)
wmi_driver_unregister(&dell_smbios_wmi_driver);
}
-MODULE_ALIAS("wmi:" DELL_WMI_SMBIOS_GUID);
+MODULE_DEVICE_TABLE(wmi, dell_smbios_wmi_id_table);
diff --git a/drivers/platform/x86/dell-wmi-descriptor.c b/drivers/platform/x86/dell-wmi-descriptor.c
index 072821aa47fc..14ab250b7d5a 100644
--- a/drivers/platform/x86/dell-wmi-descriptor.c
+++ b/drivers/platform/x86/dell-wmi-descriptor.c
@@ -207,7 +207,7 @@ static struct wmi_driver dell_wmi_descriptor_driver = {
module_wmi_driver(dell_wmi_descriptor_driver);
-MODULE_ALIAS("wmi:" DELL_WMI_DESCRIPTOR_GUID);
+MODULE_DEVICE_TABLE(wmi, dell_wmi_descriptor_id_table);
MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
MODULE_DESCRIPTION("Dell WMI descriptor driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 16c7f3d9a335..d118bb73fcae 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -50,8 +50,6 @@ MODULE_LICENSE("GPL");
static bool wmi_requires_smbios_request;
-MODULE_ALIAS("wmi:"DELL_EVENT_GUID);
-
struct dell_wmi_priv {
struct input_dev *input_dev;
u32 interface_version;
@@ -267,6 +265,9 @@ static const struct key_entry dell_wmi_keymap_type_0010[] = {
/* Fn-lock switched to multimedia keys */
{ KE_IGNORE, 0x1, { KEY_RESERVED } },
+ /* Keyboard backlight change notification */
+ { KE_IGNORE, 0x3f, { KEY_RESERVED } },
+
/* Mic mute */
{ KE_KEY, 0x150, { KEY_MICMUTE } },
@@ -738,3 +739,5 @@ static void __exit dell_wmi_exit(void)
wmi_driver_unregister(&dell_wmi_driver);
}
module_exit(dell_wmi_exit);
+
+MODULE_DEVICE_TABLE(wmi, dell_wmi_id_table);
diff --git a/drivers/platform/x86/dell_rbu.c b/drivers/platform/x86/dell_rbu.c
index ccefa84f7305..031c68903583 100644
--- a/drivers/platform/x86/dell_rbu.c
+++ b/drivers/platform/x86/dell_rbu.c
@@ -59,7 +59,6 @@ static struct _rbu_data {
unsigned long image_update_buffer_size;
unsigned long bios_image_size;
int image_update_ordernum;
- int dma_alloc;
spinlock_t lock;
unsigned long packet_read_count;
unsigned long num_packets;
@@ -89,7 +88,6 @@ static struct packet_data packet_data_head;
static struct platform_device *rbu_device;
static int context;
-static dma_addr_t dell_rbu_dmaaddr;
static void init_packet_head(void)
{
@@ -380,12 +378,8 @@ static void img_update_free(void)
*/
memset(rbu_data.image_update_buffer, 0,
rbu_data.image_update_buffer_size);
- if (rbu_data.dma_alloc == 1)
- dma_free_coherent(NULL, rbu_data.bios_image_size,
- rbu_data.image_update_buffer, dell_rbu_dmaaddr);
- else
- free_pages((unsigned long) rbu_data.image_update_buffer,
- rbu_data.image_update_ordernum);
+ free_pages((unsigned long) rbu_data.image_update_buffer,
+ rbu_data.image_update_ordernum);
/*
* Re-initialize the rbu_data variables after a free
@@ -394,7 +388,6 @@ static void img_update_free(void)
rbu_data.image_update_buffer = NULL;
rbu_data.image_update_buffer_size = 0;
rbu_data.bios_image_size = 0;
- rbu_data.dma_alloc = 0;
}
/*
@@ -410,10 +403,8 @@ static void img_update_free(void)
static int img_update_realloc(unsigned long size)
{
unsigned char *image_update_buffer = NULL;
- unsigned long rc;
unsigned long img_buf_phys_addr;
int ordernum;
- int dma_alloc = 0;
/*
* check if the buffer of sufficient size has been
@@ -444,36 +435,23 @@ static int img_update_realloc(unsigned long size)
ordernum = get_order(size);
image_update_buffer =
- (unsigned char *) __get_free_pages(GFP_KERNEL, ordernum);
-
- img_buf_phys_addr =
- (unsigned long) virt_to_phys(image_update_buffer);
-
- if (img_buf_phys_addr > BIOS_SCAN_LIMIT) {
- free_pages((unsigned long) image_update_buffer, ordernum);
- ordernum = -1;
- image_update_buffer = dma_alloc_coherent(NULL, size,
- &dell_rbu_dmaaddr, GFP_KERNEL);
- dma_alloc = 1;
- }
-
+ (unsigned char *)__get_free_pages(GFP_DMA32, ordernum);
spin_lock(&rbu_data.lock);
-
- if (image_update_buffer != NULL) {
- rbu_data.image_update_buffer = image_update_buffer;
- rbu_data.image_update_buffer_size = size;
- rbu_data.bios_image_size =
- rbu_data.image_update_buffer_size;
- rbu_data.image_update_ordernum = ordernum;
- rbu_data.dma_alloc = dma_alloc;
- rc = 0;
- } else {
+ if (!image_update_buffer) {
pr_debug("Not enough memory for image update:"
"size = %ld\n", size);
- rc = -ENOMEM;
+ return -ENOMEM;
}
- return rc;
+ img_buf_phys_addr = (unsigned long)virt_to_phys(image_update_buffer);
+ if (WARN_ON_ONCE(img_buf_phys_addr > BIOS_SCAN_LIMIT))
+ return -EINVAL; /* can't happen per definition */
+
+ rbu_data.image_update_buffer = image_update_buffer;
+ rbu_data.image_update_buffer_size = size;
+ rbu_data.bios_image_size = rbu_data.image_update_buffer_size;
+ rbu_data.image_update_ordernum = ordernum;
+ return 0;
}
static ssize_t read_packet_data(char *buffer, loff_t pos, size_t count)
diff --git a/drivers/platform/x86/huawei-wmi.c b/drivers/platform/x86/huawei-wmi.c
index 59872f87b741..52fcac5b393a 100644
--- a/drivers/platform/x86/huawei-wmi.c
+++ b/drivers/platform/x86/huawei-wmi.c
@@ -201,8 +201,7 @@ static struct wmi_driver huawei_wmi_driver = {
module_wmi_driver(huawei_wmi_driver);
-MODULE_ALIAS("wmi:"WMI0_EVENT_GUID);
-MODULE_ALIAS("wmi:"AMW0_EVENT_GUID);
+MODULE_DEVICE_TABLE(wmi, huawei_wmi_id_table);
MODULE_AUTHOR("Ayman Bagabas <ayman.bagabas@gmail.com>");
MODULE_DESCRIPTION("Huawei WMI hotkeys");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/i2c-multi-instantiate.c b/drivers/platform/x86/i2c-multi-instantiate.c
index 3d893e0ac250..197d8a192721 100644
--- a/drivers/platform/x86/i2c-multi-instantiate.c
+++ b/drivers/platform/x86/i2c-multi-instantiate.c
@@ -159,6 +159,14 @@ static const struct i2c_inst_data bsg1160_data[] = {
{}
};
+static const struct i2c_inst_data bsg2150_data[] = {
+ { "bmc150_accel", IRQ_RESOURCE_GPIO, 0 },
+ { "bmc150_magn" },
+ /* The resources describe a 3th client, but it is not really there. */
+ { "bsg2150_dummy_dev" },
+ {}
+};
+
static const struct i2c_inst_data int3515_data[] = {
{ "tps6598x", IRQ_RESOURCE_APIC, 0 },
{ "tps6598x", IRQ_RESOURCE_APIC, 1 },
@@ -173,6 +181,7 @@ static const struct i2c_inst_data int3515_data[] = {
*/
static const struct acpi_device_id i2c_multi_inst_acpi_ids[] = {
{ "BSG1160", (unsigned long)bsg1160_data },
+ { "BSG2150", (unsigned long)bsg2150_data },
{ "INT3515", (unsigned long)int3515_data },
{ }
};
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 1589dffab9fa..c53ae86b59c7 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -989,7 +989,7 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
.ident = "Lenovo RESCUER R720-15IKBN",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_BOARD_NAME, "80WW"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo R720-15IKBN"),
},
},
{
@@ -1091,6 +1091,27 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
},
},
{
+ .ident = "Lenovo ideapad 330-15ICH",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 330-15ICH"),
+ },
+ },
+ {
+ .ident = "Lenovo ideapad 530S-14ARR",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 530S-14ARR"),
+ },
+ },
+ {
+ .ident = "Lenovo ideapad S130-14IGM",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad S130-14IGM"),
+ },
+ },
+ {
.ident = "Lenovo ideapad Y700-14ISK",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
@@ -1154,6 +1175,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
},
},
{
+ .ident = "Lenovo Legion Y530-15ICH-1060",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Legion Y530-15ICH-1060"),
+ },
+ },
+ {
.ident = "Lenovo Legion Y720-15IKB",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
@@ -1245,6 +1273,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
},
},
{
+ .ident = "Lenovo YOGA C930-13IKB",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA C930-13IKB"),
+ },
+ },
+ {
.ident = "Lenovo Zhaoyang E42-80",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index e28bcf61b126..bc0d55a59015 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -363,7 +363,7 @@ wakeup:
* the 5-button array, but still send notifies with power button
* event code to this device object on power button actions.
*
- * Report the power button press; catch and ignore the button release.
+ * Report the power button press and release.
*/
if (!priv->array) {
if (event == 0xce) {
@@ -372,8 +372,11 @@ wakeup:
return;
}
- if (event == 0xcf)
+ if (event == 0xcf) {
+ input_report_key(priv->input_dev, KEY_POWER, 0);
+ input_sync(priv->input_dev);
return;
+ }
}
/* 0xC0 is for HID events, other values are for 5 button array */
diff --git a/drivers/platform/x86/intel-wmi-thunderbolt.c b/drivers/platform/x86/intel-wmi-thunderbolt.c
index 9ded8e2af312..4dfa61434a76 100644
--- a/drivers/platform/x86/intel-wmi-thunderbolt.c
+++ b/drivers/platform/x86/intel-wmi-thunderbolt.c
@@ -88,7 +88,7 @@ static struct wmi_driver intel_wmi_thunderbolt_driver = {
module_wmi_driver(intel_wmi_thunderbolt_driver);
-MODULE_ALIAS("wmi:" INTEL_WMI_THUNDERBOLT_GUID);
+MODULE_DEVICE_TABLE(wmi, intel_wmi_thunderbolt_id_table);
MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
MODULE_DESCRIPTION("Intel WMI Thunderbolt force power driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_int0002_vgpio.c b/drivers/platform/x86/intel_int0002_vgpio.c
index 4b8f7305fc8a..1694a9aec77c 100644
--- a/drivers/platform/x86/intel_int0002_vgpio.c
+++ b/drivers/platform/x86/intel_int0002_vgpio.c
@@ -51,11 +51,14 @@
#define GPE0A_STS_PORT 0x420
#define GPE0A_EN_PORT 0x428
-#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
+#define BAYTRAIL 0x01
+#define CHERRYTRAIL 0x02
+
+#define ICPU(model, data) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, data }
static const struct x86_cpu_id int0002_cpu_ids[] = {
- ICPU(INTEL_FAM6_ATOM_SILVERMONT), /* Valleyview, Bay Trail */
- ICPU(INTEL_FAM6_ATOM_AIRMONT), /* Braswell, Cherry Trail */
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT, BAYTRAIL), /* Valleyview, Bay Trail */
+ ICPU(INTEL_FAM6_ATOM_AIRMONT, CHERRYTRAIL), /* Braswell, Cherry Trail */
{}
};
@@ -135,7 +138,7 @@ static irqreturn_t int0002_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static struct irq_chip int0002_irqchip = {
+static struct irq_chip int0002_byt_irqchip = {
.name = DRV_NAME,
.irq_ack = int0002_irq_ack,
.irq_mask = int0002_irq_mask,
@@ -143,10 +146,22 @@ static struct irq_chip int0002_irqchip = {
.irq_set_wake = int0002_irq_set_wake,
};
+static struct irq_chip int0002_cht_irqchip = {
+ .name = DRV_NAME,
+ .irq_ack = int0002_irq_ack,
+ .irq_mask = int0002_irq_mask,
+ .irq_unmask = int0002_irq_unmask,
+ /*
+ * No set_wake, on CHT the IRQ is typically shared with the ACPI SCI
+ * and we don't want to mess with the ACPI SCI irq settings.
+ */
+};
+
static int int0002_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct x86_cpu_id *cpu_id;
+ struct irq_chip *irq_chip;
struct gpio_chip *chip;
int irq, ret;
@@ -195,14 +210,19 @@ static int int0002_probe(struct platform_device *pdev)
return ret;
}
- ret = gpiochip_irqchip_add(chip, &int0002_irqchip, 0, handle_edge_irq,
+ if (cpu_id->driver_data == BAYTRAIL)
+ irq_chip = &int0002_byt_irqchip;
+ else
+ irq_chip = &int0002_cht_irqchip;
+
+ ret = gpiochip_irqchip_add(chip, irq_chip, 0, handle_edge_irq,
IRQ_TYPE_NONE);
if (ret) {
dev_err(dev, "Error adding irqchip: %d\n", ret);
return ret;
}
- gpiochip_set_chained_irqchip(chip, &int0002_irqchip, irq, NULL);
+ gpiochip_set_chained_irqchip(chip, irq_chip, irq, NULL);
return 0;
}
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
index 22dbf115782e..f2c621b55f49 100644
--- a/drivers/platform/x86/intel_pmc_core.c
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -15,6 +15,7 @@
#include <linux/bitfield.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/dmi.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -22,14 +23,24 @@
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
+#include <asm/msr.h>
#include "intel_pmc_core.h"
-#define ICPU(model, data) \
- { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (kernel_ulong_t)data }
-
static struct pmc_dev pmc;
+/* PKGC MSRs are common across Intel Core SoCs */
+static const struct pmc_bit_map msr_map[] = {
+ {"Package C2", MSR_PKG_C2_RESIDENCY},
+ {"Package C3", MSR_PKG_C3_RESIDENCY},
+ {"Package C6", MSR_PKG_C6_RESIDENCY},
+ {"Package C7", MSR_PKG_C7_RESIDENCY},
+ {"Package C8", MSR_PKG_C8_RESIDENCY},
+ {"Package C9", MSR_PKG_C9_RESIDENCY},
+ {"Package C10", MSR_PKG_C10_RESIDENCY},
+ {}
+};
+
static const struct pmc_bit_map spt_pll_map[] = {
{"MIPI PLL", SPT_PMC_BIT_MPHY_CMN_LANE0},
{"GEN2 USB2PCIE2 PLL", SPT_PMC_BIT_MPHY_CMN_LANE1},
@@ -108,6 +119,7 @@ static const struct pmc_bit_map spt_ltr_show_map[] = {
{"SATA", SPT_PMC_LTR_SATA},
{"GIGABIT_ETHERNET", SPT_PMC_LTR_GBE},
{"XHCI", SPT_PMC_LTR_XHCI},
+ {"Reserved", SPT_PMC_LTR_RESERVED},
{"ME", SPT_PMC_LTR_ME},
/* EVA is Enterprise Value Add, doesn't really exist on PCH */
{"EVA", SPT_PMC_LTR_EVA},
@@ -131,6 +143,7 @@ static const struct pmc_reg_map spt_reg_map = {
.mphy_sts = spt_mphy_map,
.pll_sts = spt_pll_map,
.ltr_show_sts = spt_ltr_show_map,
+ .msr_sts = msr_map,
.slp_s0_offset = SPT_PMC_SLP_S0_RES_COUNTER_OFFSET,
.ltr_ignore_offset = SPT_PMC_LTR_IGNORE_OFFSET,
.regmap_length = SPT_PMC_MMIO_REG_LEN,
@@ -139,6 +152,7 @@ static const struct pmc_reg_map spt_reg_map = {
.pm_cfg_offset = SPT_PMC_PM_CFG_OFFSET,
.pm_read_disable_bit = SPT_PMC_READ_DISABLE_BIT,
.ltr_ignore_max = SPT_NUM_IP_IGN_ALLOWED,
+ .pm_vric1_offset = SPT_PMC_VRIC1_OFFSET,
};
/* Cannonlake: PGD PFET Enable Ack Status Register(s) bitmap */
@@ -168,25 +182,26 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
{"SDX", BIT(4)},
{"SPE", BIT(5)},
{"Fuse", BIT(6)},
- {"Res_23", BIT(7)},
+ /* Reserved for Cannonlake but valid for Icelake */
+ {"SBR8", BIT(7)},
{"CSME_FSC", BIT(0)},
{"USB3_OTG", BIT(1)},
{"EXI", BIT(2)},
{"CSE", BIT(3)},
- {"csme_kvm", BIT(4)},
- {"csme_pmt", BIT(5)},
- {"csme_clink", BIT(6)},
- {"csme_ptio", BIT(7)},
-
- {"csme_usbr", BIT(0)},
- {"csme_susram", BIT(1)},
- {"csme_smt1", BIT(2)},
+ {"CSME_KVM", BIT(4)},
+ {"CSME_PMT", BIT(5)},
+ {"CSME_CLINK", BIT(6)},
+ {"CSME_PTIO", BIT(7)},
+
+ {"CSME_USBR", BIT(0)},
+ {"CSME_SUSRAM", BIT(1)},
+ {"CSME_SMT1", BIT(2)},
{"CSME_SMT4", BIT(3)},
- {"csme_sms2", BIT(4)},
- {"csme_sms1", BIT(5)},
- {"csme_rtc", BIT(6)},
- {"csme_psf", BIT(7)},
+ {"CSME_SMS2", BIT(4)},
+ {"CSME_SMS1", BIT(5)},
+ {"CSME_RTC", BIT(6)},
+ {"CSME_PSF", BIT(7)},
{"SBR0", BIT(0)},
{"SBR1", BIT(1)},
@@ -203,7 +218,7 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
{"CNVI", BIT(3)},
{"UFS0", BIT(4)},
{"EMMC", BIT(5)},
- {"Res_6", BIT(6)},
+ {"SPF", BIT(6)},
{"SBR6", BIT(7)},
{"SBR7", BIT(0)},
@@ -211,6 +226,20 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
{"HDA_PGD4", BIT(2)},
{"HDA_PGD5", BIT(3)},
{"HDA_PGD6", BIT(4)},
+ /* Reserved for Cannonlake but valid for Icelake */
+ {"PSF6", BIT(5)},
+ {"PSF7", BIT(6)},
+ {"PSF8", BIT(7)},
+
+ /* Icelake generation onwards only */
+ {"RES_65", BIT(0)},
+ {"RES_66", BIT(1)},
+ {"RES_67", BIT(2)},
+ {"TAM", BIT(3)},
+ {"GBETSN", BIT(4)},
+ {"TBTLSX", BIT(5)},
+ {"RES_71", BIT(6)},
+ {"RES_72", BIT(7)},
{}
};
@@ -276,6 +305,7 @@ static const struct pmc_bit_map cnp_ltr_show_map[] = {
{"SATA", CNP_PMC_LTR_SATA},
{"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE},
{"XHCI", CNP_PMC_LTR_XHCI},
+ {"Reserved", CNP_PMC_LTR_RESERVED},
{"ME", CNP_PMC_LTR_ME},
/* EVA is Enterprise Value Add, doesn't really exist on PCH */
{"EVA", CNP_PMC_LTR_EVA},
@@ -291,6 +321,8 @@ static const struct pmc_bit_map cnp_ltr_show_map[] = {
{"ISH", CNP_PMC_LTR_ISH},
{"UFSX2", CNP_PMC_LTR_UFSX2},
{"EMMC", CNP_PMC_LTR_EMMC},
+ /* Reserved for Cannonlake but valid for Icelake */
+ {"WIGIG", ICL_PMC_LTR_WIGIG},
/* Below two cannot be used for LTR_IGNORE */
{"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT},
{"AGGREGATED_SYSTEM", CNP_PMC_LTR_CUR_ASLT},
@@ -302,6 +334,7 @@ static const struct pmc_reg_map cnp_reg_map = {
.slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
.slps0_dbg_maps = cnp_slps0_dbg_maps,
.ltr_show_sts = cnp_ltr_show_map,
+ .msr_sts = msr_map,
.slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET,
.ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
.regmap_length = CNP_PMC_MMIO_REG_LEN,
@@ -312,6 +345,22 @@ static const struct pmc_reg_map cnp_reg_map = {
.ltr_ignore_max = CNP_NUM_IP_IGN_ALLOWED,
};
+static const struct pmc_reg_map icl_reg_map = {
+ .pfear_sts = cnp_pfear_map,
+ .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slps0_dbg_maps = cnp_slps0_dbg_maps,
+ .ltr_show_sts = cnp_ltr_show_map,
+ .msr_sts = msr_map,
+ .slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET,
+ .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
+ .regmap_length = CNP_PMC_MMIO_REG_LEN,
+ .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
+ .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES,
+ .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
+ .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+ .ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED,
+};
+
static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset)
{
return readb(pmcdev->regbase + offset);
@@ -328,9 +377,9 @@ static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int
writel(val, pmcdev->regbase + reg_offset);
}
-static inline u32 pmc_core_adjust_slp_s0_step(u32 value)
+static inline u64 pmc_core_adjust_slp_s0_step(u32 value)
{
- return value * SPT_PMC_SLP_S0_RES_COUNTER_STEP;
+ return (u64)value * SPT_PMC_SLP_S0_RES_COUNTER_STEP;
}
static int pmc_core_dev_state_get(void *data, u64 *val)
@@ -380,7 +429,8 @@ static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++)
pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter);
- for (index = 0; map[index].name; index++)
+ for (index = 0; map[index].name &&
+ index < pmcdev->map->ppfear_buckets * 8; index++)
pmc_core_display_map(s, index, pf_regs[index / 8], map);
return 0;
@@ -677,6 +727,25 @@ static int pmc_core_ltr_show(struct seq_file *s, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(pmc_core_ltr);
+static int pmc_core_pkgc_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ const struct pmc_bit_map *map = pmcdev->map->msr_sts;
+ u64 pcstate_count;
+ int index;
+
+ for (index = 0; map[index].name ; index++) {
+ if (rdmsrl_safe(map[index].bit_mask, &pcstate_count))
+ continue;
+
+ seq_printf(s, "%-8s : 0x%llx\n", map[index].name,
+ pcstate_count);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_pkgc);
+
static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
{
debugfs_remove_recursive(pmcdev->dbgfs_dir);
@@ -701,7 +770,10 @@ static int pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
debugfs_create_file("ltr_ignore", 0644, dir, pmcdev,
&pmc_core_ltr_ignore_ops);
- debugfs_create_file("ltr_show", 0644, dir, pmcdev, &pmc_core_ltr_fops);
+ debugfs_create_file("ltr_show", 0444, dir, pmcdev, &pmc_core_ltr_fops);
+
+ debugfs_create_file("package_cstate_show", 0444, dir, pmcdev,
+ &pmc_core_pkgc_fops);
if (pmcdev->map->pll_sts)
debugfs_create_file("pll_status", 0444, dir, pmcdev,
@@ -735,11 +807,12 @@ static inline void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
#endif /* CONFIG_DEBUG_FS */
static const struct x86_cpu_id intel_pmc_core_ids[] = {
- ICPU(INTEL_FAM6_SKYLAKE_MOBILE, &spt_reg_map),
- ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, &spt_reg_map),
- ICPU(INTEL_FAM6_KABYLAKE_MOBILE, &spt_reg_map),
- ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, &spt_reg_map),
- ICPU(INTEL_FAM6_CANNONLAKE_MOBILE, &cnp_reg_map),
+ INTEL_CPU_FAM6(SKYLAKE_MOBILE, spt_reg_map),
+ INTEL_CPU_FAM6(SKYLAKE_DESKTOP, spt_reg_map),
+ INTEL_CPU_FAM6(KABYLAKE_MOBILE, spt_reg_map),
+ INTEL_CPU_FAM6(KABYLAKE_DESKTOP, spt_reg_map),
+ INTEL_CPU_FAM6(CANNONLAKE_MOBILE, cnp_reg_map),
+ INTEL_CPU_FAM6(ICELAKE_MOBILE, icl_reg_map),
{}
};
@@ -750,6 +823,37 @@ static const struct pci_device_id pmc_pci_ids[] = {
{ 0, },
};
+/*
+ * This quirk can be used on those platforms where
+ * the platform BIOS enforces 24Mhx Crystal to shutdown
+ * before PMC can assert SLP_S0#.
+ */
+int quirk_xtal_ignore(const struct dmi_system_id *id)
+{
+ struct pmc_dev *pmcdev = &pmc;
+ u32 value;
+
+ value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_vric1_offset);
+ /* 24MHz Crystal Shutdown Qualification Disable */
+ value |= SPT_PMC_VRIC1_XTALSDQDIS;
+ /* Low Voltage Mode Enable */
+ value &= ~SPT_PMC_VRIC1_SLPS0LVEN;
+ pmc_core_reg_write(pmcdev, pmcdev->map->pm_vric1_offset, value);
+ return 0;
+}
+
+static const struct dmi_system_id pmc_core_dmi_table[] = {
+ {
+ .callback = quirk_xtal_ignore,
+ .ident = "HP Elite x2 1013 G3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite x2 1013 G3"),
+ },
+ },
+ {}
+};
+
static int __init pmc_core_probe(void)
{
struct pmc_dev *pmcdev = &pmc;
@@ -768,7 +872,7 @@ static int __init pmc_core_probe(void)
* Sunrisepoint PCH regmap can't be used. Use Cannonlake PCH regmap
* in this case.
*/
- if (!pci_dev_present(pmc_pci_ids))
+ if (pmcdev->map == &spt_reg_map && !pci_dev_present(pmc_pci_ids))
pmcdev->map = &cnp_reg_map;
if (lpit_read_residency_count_address(&slp_s0_addr))
@@ -791,6 +895,7 @@ static int __init pmc_core_probe(void)
return err;
}
+ dmi_check_system(pmc_core_dmi_table);
pr_info(" initialized\n");
return 0;
}
diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h
index 89554cba5758..88d9c0653a5f 100644
--- a/drivers/platform/x86/intel_pmc_core.h
+++ b/drivers/platform/x86/intel_pmc_core.h
@@ -25,6 +25,7 @@
#define SPT_PMC_MTPMC_OFFSET 0x20
#define SPT_PMC_MFPMC_OFFSET 0x38
#define SPT_PMC_LTR_IGNORE_OFFSET 0x30C
+#define SPT_PMC_VRIC1_OFFSET 0x31c
#define SPT_PMC_MPHY_CORE_STS_0 0x1143
#define SPT_PMC_MPHY_CORE_STS_1 0x1142
#define SPT_PMC_MPHY_COM_STS_0 0x1155
@@ -32,7 +33,7 @@
#define SPT_PMC_SLP_S0_RES_COUNTER_STEP 0x64
#define PMC_BASE_ADDR_MASK ~(SPT_PMC_MMIO_REG_LEN - 1)
#define MTPMC_MASK 0xffff0000
-#define PPFEAR_MAX_NUM_ENTRIES 5
+#define PPFEAR_MAX_NUM_ENTRIES 12
#define SPT_PPFEAR_NUM_ENTRIES 5
#define SPT_PMC_READ_DISABLE_BIT 0x16
#define SPT_PMC_MSG_FULL_STS_BIT 0x18
@@ -46,6 +47,7 @@
#define SPT_PMC_LTR_SATA 0x368
#define SPT_PMC_LTR_GBE 0x36C
#define SPT_PMC_LTR_XHCI 0x370
+#define SPT_PMC_LTR_RESERVED 0x374
#define SPT_PMC_LTR_ME 0x378
#define SPT_PMC_LTR_EVA 0x37C
#define SPT_PMC_LTR_SPC 0x380
@@ -135,6 +137,9 @@ enum ppfear_regs {
#define SPT_PMC_BIT_MPHY_CMN_LANE2 BIT(2)
#define SPT_PMC_BIT_MPHY_CMN_LANE3 BIT(3)
+#define SPT_PMC_VRIC1_SLPS0LVEN BIT(13)
+#define SPT_PMC_VRIC1_XTALSDQDIS BIT(22)
+
/* Cannonlake Power Management Controller register offsets */
#define CNP_PMC_SLPS0_DBG_OFFSET 0x10B4
#define CNP_PMC_PM_CFG_OFFSET 0x1818
@@ -156,6 +161,7 @@ enum ppfear_regs {
#define CNP_PMC_LTR_SATA 0x1B68
#define CNP_PMC_LTR_GBE 0x1B6C
#define CNP_PMC_LTR_XHCI 0x1B70
+#define CNP_PMC_LTR_RESERVED 0x1B74
#define CNP_PMC_LTR_ME 0x1B78
#define CNP_PMC_LTR_EVA 0x1B7C
#define CNP_PMC_LTR_SPC 0x1B80
@@ -176,6 +182,10 @@ enum ppfear_regs {
#define LTR_REQ_SNOOP BIT(15)
#define LTR_REQ_NONSNOOP BIT(31)
+#define ICL_PPFEAR_NUM_ENTRIES 9
+#define ICL_NUM_IP_IGN_ALLOWED 20
+#define ICL_PMC_LTR_WIGIG 0x1BFC
+
struct pmc_bit_map {
const char *name;
u32 bit_mask;
@@ -208,6 +218,7 @@ struct pmc_reg_map {
const struct pmc_bit_map *pll_sts;
const struct pmc_bit_map **slps0_dbg_maps;
const struct pmc_bit_map *ltr_show_sts;
+ const struct pmc_bit_map *msr_sts;
const u32 slp_s0_offset;
const u32 ltr_ignore_offset;
const int regmap_length;
@@ -217,6 +228,7 @@ struct pmc_reg_map {
const int pm_read_disable_bit;
const u32 slps0_dbg_offset;
const u32 ltr_ignore_max;
+ const u32 pm_vric1_offset;
};
/**
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index df3fcd36776a..48fa7573e29b 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -25,6 +25,7 @@
#define MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET 0x00
#define MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET 0x01
#define MLXPLAT_CPLD_LPC_REG_CPLD3_VER_OFFSET 0x02
+#define MLXPLAT_CPLD_LPC_REG_CPLD4_VER_OFFSET 0x03
#define MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET 0x1d
#define MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET 0x1e
#define MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET 0x1f
@@ -33,6 +34,7 @@
#define MLXPLAT_CPLD_LPC_REG_LED3_OFFSET 0x22
#define MLXPLAT_CPLD_LPC_REG_LED4_OFFSET 0x23
#define MLXPLAT_CPLD_LPC_REG_LED5_OFFSET 0x24
+#define MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION 0x2a
#define MLXPLAT_CPLD_LPC_REG_GP1_OFFSET 0x30
#define MLXPLAT_CPLD_LPC_REG_WP1_OFFSET 0x31
#define MLXPLAT_CPLD_LPC_REG_GP2_OFFSET 0x32
@@ -67,6 +69,9 @@
#define MLXPLAT_CPLD_LPC_REG_TACHO10_OFFSET 0xee
#define MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET 0xef
#define MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET 0xf0
+#define MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET 0xf5
+#define MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET 0xf6
+#define MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET 0xf7
#define MLXPLAT_CPLD_LPC_IO_RANGE 0x100
#define MLXPLAT_CPLD_LPC_I2C_CH1_OFF 0xdb
#define MLXPLAT_CPLD_LPC_I2C_CH2_OFF 0xda
@@ -584,36 +589,48 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_fan_items_data[] = {
.label = "fan1",
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = BIT(0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(0),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
{
.label = "fan2",
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = BIT(1),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(1),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
{
.label = "fan3",
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = BIT(2),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(2),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
{
.label = "fan4",
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = BIT(3),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(3),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
{
.label = "fan5",
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = BIT(4),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(4),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
{
.label = "fan6",
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = BIT(5),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(5),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
};
@@ -816,61 +833,90 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_led_data[] = {
.label = "fan1:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(0),
},
{
.label = "fan1:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(0),
},
{
.label = "fan2:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(1),
},
{
.label = "fan2:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(1),
},
{
.label = "fan3:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(2),
},
{
.label = "fan3:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(2),
},
{
.label = "fan4:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(3),
},
{
.label = "fan4:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(3),
},
{
.label = "fan5:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(4),
},
{
.label = "fan5:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(4),
},
{
.label = "fan6:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(5),
},
{
.label = "fan6:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(5),
+ },
+ {
+ .label = "uid:blue",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED5_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
},
};
@@ -1100,6 +1146,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "cpld4_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD4_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
.label = "reset_long_pb",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
@@ -1184,6 +1236,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.bit = 1,
.mode = 0444,
},
+ {
+ .label = "fan_dir",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
};
static struct mlxreg_core_platform_data mlxplat_default_ng_regs_io_data = {
@@ -1201,61 +1259,85 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_data[] = {
.label = "tacho1",
.reg = MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET,
.mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .bit = BIT(0),
},
{
.label = "tacho2",
.reg = MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET,
.mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .bit = BIT(1),
},
{
.label = "tacho3",
.reg = MLXPLAT_CPLD_LPC_REG_TACHO3_OFFSET,
.mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .bit = BIT(2),
},
{
.label = "tacho4",
.reg = MLXPLAT_CPLD_LPC_REG_TACHO4_OFFSET,
.mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .bit = BIT(3),
},
{
.label = "tacho5",
.reg = MLXPLAT_CPLD_LPC_REG_TACHO5_OFFSET,
.mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .bit = BIT(4),
},
{
.label = "tacho6",
.reg = MLXPLAT_CPLD_LPC_REG_TACHO6_OFFSET,
.mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .bit = BIT(5),
},
{
.label = "tacho7",
.reg = MLXPLAT_CPLD_LPC_REG_TACHO7_OFFSET,
.mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .bit = BIT(6),
},
{
.label = "tacho8",
.reg = MLXPLAT_CPLD_LPC_REG_TACHO8_OFFSET,
.mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .bit = BIT(7),
},
{
.label = "tacho9",
.reg = MLXPLAT_CPLD_LPC_REG_TACHO9_OFFSET,
.mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET,
+ .bit = BIT(0),
},
{
.label = "tacho10",
.reg = MLXPLAT_CPLD_LPC_REG_TACHO10_OFFSET,
.mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET,
+ .bit = BIT(1),
},
{
.label = "tacho11",
.reg = MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET,
.mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET,
+ .bit = BIT(2),
},
{
.label = "tacho12",
.reg = MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET,
.mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET,
+ .bit = BIT(3),
},
};
@@ -1299,6 +1381,7 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD3_VER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD4_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET:
@@ -1307,6 +1390,7 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_LED3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION:
case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
@@ -1341,6 +1425,9 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET:
return true;
}
return false;
@@ -1352,6 +1439,7 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD3_VER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD4_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET:
@@ -1360,6 +1448,7 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_LED3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION:
case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET:
@@ -1392,6 +1481,9 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET:
return true;
}
return false;
@@ -1614,6 +1706,13 @@ static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
},
},
{
+ .callback = mlxplat_dmi_qmb7xx_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MSN38"),
+ },
+ },
+ {
.callback = mlxplat_dmi_default_matched,
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "VMOD0001"),
@@ -1643,6 +1742,12 @@ static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
DMI_MATCH(DMI_BOARD_NAME, "VMOD0005"),
},
},
+ {
+ .callback = mlxplat_dmi_qmb7xx_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0007"),
+ },
+ },
{ }
};
diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
index 8f018b3f3cd4..c7039f52ad51 100644
--- a/drivers/platform/x86/pmc_atom.c
+++ b/drivers/platform/x86/pmc_atom.c
@@ -17,6 +17,7 @@
#include <linux/debugfs.h>
#include <linux/device.h>
+#include <linux/dmi.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/platform_data/x86/clk-pmc-atom.h>
@@ -391,11 +392,27 @@ static int pmc_dbgfs_register(struct pmc_dev *pmc)
}
#endif /* CONFIG_DEBUG_FS */
+/*
+ * Some systems need one or more of their pmc_plt_clks to be
+ * marked as critical.
+ */
+static const struct dmi_system_id critclk_systems[] = {
+ {
+ .ident = "MPL CEC1x",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MPL AG"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CEC10 Family"),
+ },
+ },
+ { /*sentinel*/ }
+};
+
static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap,
const struct pmc_data *pmc_data)
{
struct platform_device *clkdev;
struct pmc_clk_data *clk_data;
+ const struct dmi_system_id *d = dmi_first_match(critclk_systems);
clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
if (!clk_data)
@@ -403,6 +420,10 @@ static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap,
clk_data->base = pmc_regmap; /* offset is added by client */
clk_data->clks = pmc_data->clks;
+ if (d) {
+ clk_data->critical = true;
+ pr_info("%s critclks quirk enabled\n", d->ident);
+ }
clkdev = platform_device_register_data(&pdev->dev, "clk-pmc-atom",
PLATFORM_DEVID_NONE,
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 8c5d47c0aea6..2d56ff7c8230 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -41,6 +41,20 @@ static const struct ts_dmi_data chuwi_hi8_data = {
.properties = chuwi_hi8_props,
};
+static const struct property_entry chuwi_hi8_air_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-chuwi-hi8-air.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ { }
+};
+
+static const struct ts_dmi_data chuwi_hi8_air_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = chuwi_hi8_air_props,
+};
+
static const struct property_entry chuwi_hi8_pro_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 6),
PROPERTY_ENTRY_U32("touchscreen-min-y", 3),
@@ -58,6 +72,25 @@ static const struct ts_dmi_data chuwi_hi8_pro_data = {
.properties = chuwi_hi8_pro_props,
};
+static const struct property_entry chuwi_hi10_air_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1981),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1271),
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 99),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 9),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_U32("touchscreen-fuzz-x", 5),
+ PROPERTY_ENTRY_U32("touchscreen-fuzz-y", 4),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10-air.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data chuwi_hi10_air_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = chuwi_hi10_air_props,
+};
+
static const struct property_entry chuwi_vi8_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 4),
PROPERTY_ENTRY_U32("touchscreen-min-y", 6),
@@ -369,6 +402,24 @@ static const struct ts_dmi_data pov_mobii_wintab_p800w_v21_data = {
.properties = pov_mobii_wintab_p800w_v21_props,
};
+static const struct property_entry pov_mobii_wintab_p1006w_v10_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 3),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1984),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1520),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_STRING("firmware-name",
+ "gsl3692-pov-mobii-wintab-p1006w-v10.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data pov_mobii_wintab_p1006w_v10_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = pov_mobii_wintab_p1006w_v10_props,
+};
+
static const struct property_entry teclast_x3_plus_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
@@ -498,6 +549,15 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Chuwi Hi8 Air (CWI543) */
+ .driver_data = (void *)&chuwi_hi8_air_data,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Default string"),
+ DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Hi8 Air"),
+ },
+ },
+ {
/* Chuwi Hi8 Pro (CWI513) */
.driver_data = (void *)&chuwi_hi8_pro_data,
.matches = {
@@ -506,6 +566,14 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Chuwi Hi10 Air */
+ .driver_data = (void *)&chuwi_hi10_air_data,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+ DMI_MATCH(DMI_PRODUCT_SKU, "P1W6_C109D_B"),
+ },
+ },
+ {
/* Chuwi Vi8 (CWI506) */
.driver_data = (void *)&chuwi_vi8_data,
.matches = {
@@ -707,6 +775,17 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Point of View mobii wintab p1006w (v1.0) */
+ .driver_data = (void *)&pov_mobii_wintab_p1006w_v10_data,
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "BayTrail"),
+ /* Note 105b is Foxcon's USB/PCI vendor id */
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "105B"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "0E57"),
+ },
+ },
+ {
/* Teclast X3 Plus */
.driver_data = (void *)&teclast_x3_plus_data,
.matches = {
diff --git a/drivers/platform/x86/wmi-bmof.c b/drivers/platform/x86/wmi-bmof.c
index c4530ba715e8..8751a13134be 100644
--- a/drivers/platform/x86/wmi-bmof.c
+++ b/drivers/platform/x86/wmi-bmof.c
@@ -119,7 +119,7 @@ static struct wmi_driver wmi_bmof_driver = {
module_wmi_driver(wmi_bmof_driver);
-MODULE_ALIAS("wmi:" WMI_BMOF_GUID);
+MODULE_DEVICE_TABLE(wmi, wmi_bmof_id_table);
MODULE_AUTHOR("Andrew Lutomirski <luto@kernel.org>");
MODULE_DESCRIPTION("WMI embedded Binary MOF driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index bea35be68706..7b26b6ccf1a0 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -768,7 +768,10 @@ static int wmi_dev_match(struct device *dev, struct device_driver *driver)
struct wmi_block *wblock = dev_to_wblock(dev);
const struct wmi_device_id *id = wmi_driver->id_table;
- while (id->guid_string) {
+ if (id == NULL)
+ return 0;
+
+ while (*id->guid_string) {
uuid_le driver_guid;
if (WARN_ON(uuid_le_to_bin(id->guid_string, &driver_guid)))
diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
index 08d5037fd052..6887870ba32c 100644
--- a/drivers/power/supply/cpcap-battery.c
+++ b/drivers/power/supply/cpcap-battery.c
@@ -221,6 +221,9 @@ static int cpcap_battery_cc_raw_div(struct cpcap_battery_ddata *ddata,
int avg_current;
u32 cc_lsb;
+ if (!divider)
+ return 0;
+
sample &= 0xffffff; /* 24-bits, unsigned */
offset &= 0x7ff; /* 10-bits, signed */
diff --git a/drivers/power/supply/goldfish_battery.c b/drivers/power/supply/goldfish_battery.c
index ad969d9fc981..c2644a9fe80f 100644
--- a/drivers/power/supply/goldfish_battery.c
+++ b/drivers/power/supply/goldfish_battery.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL
+// SPDX-License-Identifier: GPL-2.0
/*
* Power supply driver for the goldfish emulator
*
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index dce24f596160..5358a80d854f 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -383,15 +383,11 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env)
char *prop_buf;
char *attrname;
- dev_dbg(dev, "uevent\n");
-
if (!psy || !psy->desc) {
dev_dbg(dev, "No power supply yet\n");
return ret;
}
- dev_dbg(dev, "POWER_SUPPLY_NAME=%s\n", psy->desc->name);
-
ret = add_uevent_var(env, "POWER_SUPPLY_NAME=%s", psy->desc->name);
if (ret)
return ret;
@@ -427,8 +423,6 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env)
goto out;
}
- dev_dbg(dev, "prop %s=%s\n", attrname, prop_buf);
-
ret = add_uevent_var(env, "POWER_SUPPLY_%s=%s", attrname, prop_buf);
kfree(attrname);
if (ret)
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index a8f47df0655a..54f8238aac0d 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -192,14 +192,23 @@ config PWM_IMG
To compile this driver as a module, choose M here: the module
will be called pwm-img
-config PWM_IMX
- tristate "i.MX PWM support"
+config PWM_IMX1
+ tristate "i.MX1 PWM support"
depends on ARCH_MXC
help
- Generic PWM framework driver for i.MX.
+ Generic PWM framework driver for i.MX1 and i.MX21
To compile this driver as a module, choose M here: the module
- will be called pwm-imx.
+ will be called pwm-imx1.
+
+config PWM_IMX27
+ tristate "i.MX27 PWM support"
+ depends on ARCH_MXC
+ help
+ Generic PWM framework driver for i.MX27 and later i.MX SoCs.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-imx27.
config PWM_JZ4740
tristate "Ingenic JZ47xx PWM support"
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 9c676a0dadf5..448825e892bc 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -17,7 +17,8 @@ obj-$(CONFIG_PWM_EP93XX) += pwm-ep93xx.o
obj-$(CONFIG_PWM_FSL_FTM) += pwm-fsl-ftm.o
obj-$(CONFIG_PWM_HIBVT) += pwm-hibvt.o
obj-$(CONFIG_PWM_IMG) += pwm-img.o
-obj-$(CONFIG_PWM_IMX) += pwm-imx.o
+obj-$(CONFIG_PWM_IMX1) += pwm-imx1.o
+obj-$(CONFIG_PWM_IMX27) += pwm-imx27.o
obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o
obj-$(CONFIG_PWM_LP3943) += pwm-lp3943.o
obj-$(CONFIG_PWM_LPC18XX_SCT) += pwm-lpc18xx-sct.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 1581f6ab1b1f..3149204567f3 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -472,7 +472,10 @@ int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state)
state->duty_cycle > state->period)
return -EINVAL;
- if (!memcmp(state, &pwm->state, sizeof(*state)))
+ if (state->period == pwm->state.period &&
+ state->duty_cycle == pwm->state.duty_cycle &&
+ state->polarity == pwm->state.polarity &&
+ state->enabled == pwm->state.enabled)
return 0;
if (pwm->chip->ops->apply) {
@@ -1033,10 +1036,7 @@ static int pwm_seq_show(struct seq_file *s, void *v)
dev_name(chip->dev), chip->npwm,
(chip->npwm != 1) ? "s" : "");
- if (chip->ops->dbg_show)
- chip->ops->dbg_show(chip, s);
- else
- pwm_dbg_show(chip, s);
+ pwm_dbg_show(chip, s);
return 0;
}
diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
index 530d7dc5f1b5..a9fd6f0d408c 100644
--- a/drivers/pwm/pwm-atmel.c
+++ b/drivers/pwm/pwm-atmel.c
@@ -48,16 +48,6 @@
#define PWMV2_CPRD 0x0C
#define PWMV2_CPRDUPD 0x10
-/*
- * Max value for duty and period
- *
- * Although the duty and period register is 32 bit,
- * however only the LSB 16 bits are significant.
- */
-#define PWM_MAX_DTY 0xFFFF
-#define PWM_MAX_PRD 0xFFFF
-#define PRD_MAX_PRES 10
-
struct atmel_pwm_registers {
u8 period;
u8 period_upd;
@@ -65,11 +55,21 @@ struct atmel_pwm_registers {
u8 duty_upd;
};
+struct atmel_pwm_config {
+ u32 max_period;
+ u32 max_pres;
+};
+
+struct atmel_pwm_data {
+ struct atmel_pwm_registers regs;
+ struct atmel_pwm_config cfg;
+};
+
struct atmel_pwm_chip {
struct pwm_chip chip;
struct clk *clk;
void __iomem *base;
- const struct atmel_pwm_registers *regs;
+ const struct atmel_pwm_data *data;
unsigned int updated_pwms;
/* ISR is cleared when read, ensure only one thread does that */
@@ -121,10 +121,10 @@ static int atmel_pwm_calculate_cprd_and_pres(struct pwm_chip *chip,
cycles *= clk_get_rate(atmel_pwm->clk);
do_div(cycles, NSEC_PER_SEC);
- for (*pres = 0; cycles > PWM_MAX_PRD; cycles >>= 1)
+ for (*pres = 0; cycles > atmel_pwm->data->cfg.max_period; cycles >>= 1)
(*pres)++;
- if (*pres > PRD_MAX_PRES) {
+ if (*pres > atmel_pwm->data->cfg.max_pres) {
dev_err(chip->dev, "pres exceeds the maximum value\n");
return -EINVAL;
}
@@ -150,15 +150,15 @@ static void atmel_pwm_update_cdty(struct pwm_chip *chip, struct pwm_device *pwm,
struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
u32 val;
- if (atmel_pwm->regs->duty_upd ==
- atmel_pwm->regs->period_upd) {
+ if (atmel_pwm->data->regs.duty_upd ==
+ atmel_pwm->data->regs.period_upd) {
val = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR);
val &= ~PWM_CMR_UPD_CDTY;
atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val);
}
atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm,
- atmel_pwm->regs->duty_upd, cdty);
+ atmel_pwm->data->regs.duty_upd, cdty);
}
static void atmel_pwm_set_cprd_cdty(struct pwm_chip *chip,
@@ -168,9 +168,9 @@ static void atmel_pwm_set_cprd_cdty(struct pwm_chip *chip,
struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm,
- atmel_pwm->regs->duty, cdty);
+ atmel_pwm->data->regs.duty, cdty);
atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm,
- atmel_pwm->regs->period, cprd);
+ atmel_pwm->data->regs.period, cprd);
}
static void atmel_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -225,7 +225,7 @@ static int atmel_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
cstate.polarity == state->polarity &&
cstate.period == state->period) {
cprd = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm,
- atmel_pwm->regs->period);
+ atmel_pwm->data->regs.period);
atmel_pwm_calculate_cdty(state, cprd, &cdty);
atmel_pwm_update_cdty(chip, pwm, cdty);
return 0;
@@ -277,27 +277,55 @@ static const struct pwm_ops atmel_pwm_ops = {
.owner = THIS_MODULE,
};
-static const struct atmel_pwm_registers atmel_pwm_regs_v1 = {
- .period = PWMV1_CPRD,
- .period_upd = PWMV1_CUPD,
- .duty = PWMV1_CDTY,
- .duty_upd = PWMV1_CUPD,
+static const struct atmel_pwm_data atmel_sam9rl_pwm_data = {
+ .regs = {
+ .period = PWMV1_CPRD,
+ .period_upd = PWMV1_CUPD,
+ .duty = PWMV1_CDTY,
+ .duty_upd = PWMV1_CUPD,
+ },
+ .cfg = {
+ /* 16 bits to keep period and duty. */
+ .max_period = 0xffff,
+ .max_pres = 10,
+ },
+};
+
+static const struct atmel_pwm_data atmel_sama5_pwm_data = {
+ .regs = {
+ .period = PWMV2_CPRD,
+ .period_upd = PWMV2_CPRDUPD,
+ .duty = PWMV2_CDTY,
+ .duty_upd = PWMV2_CDTYUPD,
+ },
+ .cfg = {
+ /* 16 bits to keep period and duty. */
+ .max_period = 0xffff,
+ .max_pres = 10,
+ },
};
-static const struct atmel_pwm_registers atmel_pwm_regs_v2 = {
- .period = PWMV2_CPRD,
- .period_upd = PWMV2_CPRDUPD,
- .duty = PWMV2_CDTY,
- .duty_upd = PWMV2_CDTYUPD,
+static const struct atmel_pwm_data mchp_sam9x60_pwm_data = {
+ .regs = {
+ .period = PWMV1_CPRD,
+ .period_upd = PWMV1_CUPD,
+ .duty = PWMV1_CDTY,
+ .duty_upd = PWMV1_CUPD,
+ },
+ .cfg = {
+ /* 32 bits to keep period and duty. */
+ .max_period = 0xffffffff,
+ .max_pres = 10,
+ },
};
static const struct platform_device_id atmel_pwm_devtypes[] = {
{
.name = "at91sam9rl-pwm",
- .driver_data = (kernel_ulong_t)&atmel_pwm_regs_v1,
+ .driver_data = (kernel_ulong_t)&atmel_sam9rl_pwm_data,
}, {
.name = "sama5d3-pwm",
- .driver_data = (kernel_ulong_t)&atmel_pwm_regs_v2,
+ .driver_data = (kernel_ulong_t)&atmel_sama5_pwm_data,
}, {
/* sentinel */
},
@@ -307,20 +335,23 @@ MODULE_DEVICE_TABLE(platform, atmel_pwm_devtypes);
static const struct of_device_id atmel_pwm_dt_ids[] = {
{
.compatible = "atmel,at91sam9rl-pwm",
- .data = &atmel_pwm_regs_v1,
+ .data = &atmel_sam9rl_pwm_data,
}, {
.compatible = "atmel,sama5d3-pwm",
- .data = &atmel_pwm_regs_v2,
+ .data = &atmel_sama5_pwm_data,
}, {
.compatible = "atmel,sama5d2-pwm",
- .data = &atmel_pwm_regs_v2,
+ .data = &atmel_sama5_pwm_data,
+ }, {
+ .compatible = "microchip,sam9x60-pwm",
+ .data = &mchp_sam9x60_pwm_data,
}, {
/* sentinel */
},
};
MODULE_DEVICE_TABLE(of, atmel_pwm_dt_ids);
-static inline const struct atmel_pwm_registers *
+static inline const struct atmel_pwm_data *
atmel_pwm_get_driver_data(struct platform_device *pdev)
{
const struct platform_device_id *id;
@@ -330,18 +361,18 @@ atmel_pwm_get_driver_data(struct platform_device *pdev)
id = platform_get_device_id(pdev);
- return (struct atmel_pwm_registers *)id->driver_data;
+ return (struct atmel_pwm_data *)id->driver_data;
}
static int atmel_pwm_probe(struct platform_device *pdev)
{
- const struct atmel_pwm_registers *regs;
+ const struct atmel_pwm_data *data;
struct atmel_pwm_chip *atmel_pwm;
struct resource *res;
int ret;
- regs = atmel_pwm_get_driver_data(pdev);
- if (!regs)
+ data = atmel_pwm_get_driver_data(pdev);
+ if (!data)
return -ENODEV;
atmel_pwm = devm_kzalloc(&pdev->dev, sizeof(*atmel_pwm), GFP_KERNEL);
@@ -373,7 +404,7 @@ static int atmel_pwm_probe(struct platform_device *pdev)
atmel_pwm->chip.base = -1;
atmel_pwm->chip.npwm = 4;
- atmel_pwm->regs = regs;
+ atmel_pwm->data = data;
atmel_pwm->updated_pwms = 0;
mutex_init(&atmel_pwm->isr_lock);
diff --git a/drivers/pwm/pwm-bcm-kona.c b/drivers/pwm/pwm-bcm-kona.c
index 09a95aeb3a70..81da91df2529 100644
--- a/drivers/pwm/pwm-bcm-kona.c
+++ b/drivers/pwm/pwm-bcm-kona.c
@@ -45,25 +45,25 @@
* high or low depending on its state at that exact instant.
*/
-#define PWM_CONTROL_OFFSET (0x00000000)
+#define PWM_CONTROL_OFFSET 0x00000000
#define PWM_CONTROL_SMOOTH_SHIFT(chan) (24 + (chan))
#define PWM_CONTROL_TYPE_SHIFT(chan) (16 + (chan))
#define PWM_CONTROL_POLARITY_SHIFT(chan) (8 + (chan))
#define PWM_CONTROL_TRIGGER_SHIFT(chan) (chan)
-#define PRESCALE_OFFSET (0x00000004)
+#define PRESCALE_OFFSET 0x00000004
#define PRESCALE_SHIFT(chan) ((chan) << 2)
#define PRESCALE_MASK(chan) (0x7 << PRESCALE_SHIFT(chan))
-#define PRESCALE_MIN (0x00000000)
-#define PRESCALE_MAX (0x00000007)
+#define PRESCALE_MIN 0x00000000
+#define PRESCALE_MAX 0x00000007
#define PERIOD_COUNT_OFFSET(chan) (0x00000008 + ((chan) << 3))
-#define PERIOD_COUNT_MIN (0x00000002)
-#define PERIOD_COUNT_MAX (0x00ffffff)
+#define PERIOD_COUNT_MIN 0x00000002
+#define PERIOD_COUNT_MAX 0x00ffffff
#define DUTY_CYCLE_HIGH_OFFSET(chan) (0x0000000c + ((chan) << 3))
-#define DUTY_CYCLE_HIGH_MIN (0x00000000)
-#define DUTY_CYCLE_HIGH_MAX (0x00ffffff)
+#define DUTY_CYCLE_HIGH_MIN 0x00000000
+#define DUTY_CYCLE_HIGH_MAX 0x00ffffff
struct kona_pwmc {
struct pwm_chip chip;
diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c
index 27c107e78d59..a0b09603d13d 100644
--- a/drivers/pwm/pwm-hibvt.c
+++ b/drivers/pwm/pwm-hibvt.c
@@ -49,15 +49,30 @@ struct hibvt_pwm_chip {
struct clk *clk;
void __iomem *base;
struct reset_control *rstc;
+ const struct hibvt_pwm_soc *soc;
};
struct hibvt_pwm_soc {
u32 num_pwms;
+ bool quirk_force_enable;
};
-static const struct hibvt_pwm_soc pwm_soc[2] = {
- { .num_pwms = 4 },
- { .num_pwms = 8 },
+static const struct hibvt_pwm_soc hi3516cv300_soc_info = {
+ .num_pwms = 4,
+};
+
+static const struct hibvt_pwm_soc hi3519v100_soc_info = {
+ .num_pwms = 8,
+};
+
+static const struct hibvt_pwm_soc hi3559v100_shub_soc_info = {
+ .num_pwms = 8,
+ .quirk_force_enable = true,
+};
+
+static const struct hibvt_pwm_soc hi3559v100_soc_info = {
+ .num_pwms = 2,
+ .quirk_force_enable = true,
};
static inline struct hibvt_pwm_chip *to_hibvt_pwm_chip(struct pwm_chip *chip)
@@ -148,13 +163,23 @@ static void hibvt_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
static int hibvt_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
{
+ struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip);
+
if (state->polarity != pwm->state.polarity)
hibvt_pwm_set_polarity(chip, pwm, state->polarity);
if (state->period != pwm->state.period ||
- state->duty_cycle != pwm->state.duty_cycle)
+ state->duty_cycle != pwm->state.duty_cycle) {
hibvt_pwm_config(chip, pwm, state->duty_cycle, state->period);
+ /*
+ * Some implementations require the PWM to be enabled twice
+ * each time the duty cycle is refreshed.
+ */
+ if (hi_pwm_chip->soc->quirk_force_enable && state->enabled)
+ hibvt_pwm_enable(chip, pwm);
+ }
+
if (state->enabled != pwm->state.enabled) {
if (state->enabled)
hibvt_pwm_enable(chip, pwm);
@@ -198,6 +223,7 @@ static int hibvt_pwm_probe(struct platform_device *pdev)
pwm_chip->chip.npwm = soc->num_pwms;
pwm_chip->chip.of_xlate = of_pwm_xlate_with_flags;
pwm_chip->chip.of_pwm_n_cells = 3;
+ pwm_chip->soc = soc;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pwm_chip->base = devm_ioremap_resource(&pdev->dev, res);
@@ -250,8 +276,14 @@ static int hibvt_pwm_remove(struct platform_device *pdev)
}
static const struct of_device_id hibvt_pwm_of_match[] = {
- { .compatible = "hisilicon,hi3516cv300-pwm", .data = &pwm_soc[0] },
- { .compatible = "hisilicon,hi3519v100-pwm", .data = &pwm_soc[1] },
+ { .compatible = "hisilicon,hi3516cv300-pwm",
+ .data = &hi3516cv300_soc_info },
+ { .compatible = "hisilicon,hi3519v100-pwm",
+ .data = &hi3519v100_soc_info },
+ { .compatible = "hisilicon,hi3559v100-shub-pwm",
+ .data = &hi3559v100_shub_soc_info },
+ { .compatible = "hisilicon,hi3559v100-pwm",
+ .data = &hi3559v100_soc_info },
{ }
};
MODULE_DEVICE_TABLE(of, hibvt_pwm_of_match);
diff --git a/drivers/pwm/pwm-imx1.c b/drivers/pwm/pwm-imx1.c
new file mode 100644
index 000000000000..f8b2c2e001a7
--- /dev/null
+++ b/drivers/pwm/pwm-imx1.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * simple driver for PWM (Pulse Width Modulator) controller
+ *
+ * Derived from pxa PWM driver by eric miao <eric.miao@marvell.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
+
+#define MX1_PWMC 0x00 /* PWM Control Register */
+#define MX1_PWMS 0x04 /* PWM Sample Register */
+#define MX1_PWMP 0x08 /* PWM Period Register */
+
+#define MX1_PWMC_EN BIT(4)
+
+struct pwm_imx1_chip {
+ struct clk *clk_ipg;
+ struct clk *clk_per;
+ void __iomem *mmio_base;
+ struct pwm_chip chip;
+};
+
+#define to_pwm_imx1_chip(chip) container_of(chip, struct pwm_imx1_chip, chip)
+
+static int pwm_imx1_clk_prepare_enable(struct pwm_chip *chip)
+{
+ struct pwm_imx1_chip *imx = to_pwm_imx1_chip(chip);
+ int ret;
+
+ ret = clk_prepare_enable(imx->clk_ipg);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(imx->clk_per);
+ if (ret) {
+ clk_disable_unprepare(imx->clk_ipg);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void pwm_imx1_clk_disable_unprepare(struct pwm_chip *chip)
+{
+ struct pwm_imx1_chip *imx = to_pwm_imx1_chip(chip);
+
+ clk_disable_unprepare(imx->clk_per);
+ clk_disable_unprepare(imx->clk_ipg);
+}
+
+static int pwm_imx1_config(struct pwm_chip *chip,
+ struct pwm_device *pwm, int duty_ns, int period_ns)
+{
+ struct pwm_imx1_chip *imx = to_pwm_imx1_chip(chip);
+ u32 max, p;
+
+ /*
+ * The PWM subsystem allows for exact frequencies. However,
+ * I cannot connect a scope on my device to the PWM line and
+ * thus cannot provide the program the PWM controller
+ * exactly. Instead, I'm relying on the fact that the
+ * Bootloader (u-boot or WinCE+haret) has programmed the PWM
+ * function group already. So I'll just modify the PWM sample
+ * register to follow the ratio of duty_ns vs. period_ns
+ * accordingly.
+ *
+ * This is good enough for programming the brightness of
+ * the LCD backlight.
+ *
+ * The real implementation would divide PERCLK[0] first by
+ * both the prescaler (/1 .. /128) and then by CLKSEL
+ * (/2 .. /16).
+ */
+ max = readl(imx->mmio_base + MX1_PWMP);
+ p = max * duty_ns / period_ns;
+
+ writel(max - p, imx->mmio_base + MX1_PWMS);
+
+ return 0;
+}
+
+static int pwm_imx1_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct pwm_imx1_chip *imx = to_pwm_imx1_chip(chip);
+ u32 value;
+ int ret;
+
+ ret = pwm_imx1_clk_prepare_enable(chip);
+ if (ret < 0)
+ return ret;
+
+ value = readl(imx->mmio_base + MX1_PWMC);
+ value |= MX1_PWMC_EN;
+ writel(value, imx->mmio_base + MX1_PWMC);
+
+ return 0;
+}
+
+static void pwm_imx1_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct pwm_imx1_chip *imx = to_pwm_imx1_chip(chip);
+ u32 value;
+
+ value = readl(imx->mmio_base + MX1_PWMC);
+ value &= ~MX1_PWMC_EN;
+ writel(value, imx->mmio_base + MX1_PWMC);
+
+ pwm_imx1_clk_disable_unprepare(chip);
+}
+
+static const struct pwm_ops pwm_imx1_ops = {
+ .enable = pwm_imx1_enable,
+ .disable = pwm_imx1_disable,
+ .config = pwm_imx1_config,
+ .owner = THIS_MODULE,
+};
+
+static const struct of_device_id pwm_imx1_dt_ids[] = {
+ { .compatible = "fsl,imx1-pwm", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, pwm_imx1_dt_ids);
+
+static int pwm_imx1_probe(struct platform_device *pdev)
+{
+ struct pwm_imx1_chip *imx;
+ struct resource *r;
+
+ imx = devm_kzalloc(&pdev->dev, sizeof(*imx), GFP_KERNEL);
+ if (!imx)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, imx);
+
+ imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(imx->clk_ipg)) {
+ dev_err(&pdev->dev, "getting ipg clock failed with %ld\n",
+ PTR_ERR(imx->clk_ipg));
+ return PTR_ERR(imx->clk_ipg);
+ }
+
+ imx->clk_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(imx->clk_per)) {
+ int ret = PTR_ERR(imx->clk_per);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed to get peripheral clock: %d\n",
+ ret);
+
+ return ret;
+ }
+
+ imx->chip.ops = &pwm_imx1_ops;
+ imx->chip.dev = &pdev->dev;
+ imx->chip.base = -1;
+ imx->chip.npwm = 1;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ imx->mmio_base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(imx->mmio_base))
+ return PTR_ERR(imx->mmio_base);
+
+ return pwmchip_add(&imx->chip);
+}
+
+static int pwm_imx1_remove(struct platform_device *pdev)
+{
+ struct pwm_imx1_chip *imx = platform_get_drvdata(pdev);
+
+ pwm_imx1_clk_disable_unprepare(&imx->chip);
+
+ return pwmchip_remove(&imx->chip);
+}
+
+static struct platform_driver pwm_imx1_driver = {
+ .driver = {
+ .name = "pwm-imx1",
+ .of_match_table = pwm_imx1_dt_ids,
+ },
+ .probe = pwm_imx1_probe,
+ .remove = pwm_imx1_remove,
+};
+module_platform_driver(pwm_imx1_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
diff --git a/drivers/pwm/pwm-imx.c b/drivers/pwm/pwm-imx27.c
index 55a3a363d5be..806130654211 100644
--- a/drivers/pwm/pwm-imx.c
+++ b/drivers/pwm/pwm-imx27.c
@@ -19,16 +19,6 @@
#include <linux/pwm.h>
#include <linux/slab.h>
-/* i.MX1 and i.MX21 share the same PWM function block: */
-
-#define MX1_PWMC 0x00 /* PWM Control Register */
-#define MX1_PWMS 0x04 /* PWM Sample Register */
-#define MX1_PWMP 0x08 /* PWM Period Register */
-
-#define MX1_PWMC_EN BIT(4)
-
-/* i.MX27, i.MX31, i.MX35 share the same PWM function block: */
-
#define MX3_PWMCR 0x00 /* PWM Control Register */
#define MX3_PWMSR 0x04 /* PWM Status Register */
#define MX3_PWMSAR 0x0C /* PWM Sample Register */
@@ -86,21 +76,18 @@
/* PWMPR register value of 0xffff has the same effect as 0xfffe */
#define MX3_PWMPR_MAX 0xfffe
-struct imx_chip {
+struct pwm_imx27_chip {
struct clk *clk_ipg;
-
struct clk *clk_per;
-
void __iomem *mmio_base;
-
struct pwm_chip chip;
};
-#define to_imx_chip(chip) container_of(chip, struct imx_chip, chip)
+#define to_pwm_imx27_chip(chip) container_of(chip, struct pwm_imx27_chip, chip)
-static int imx_pwm_clk_prepare_enable(struct pwm_chip *chip)
+static int pwm_imx27_clk_prepare_enable(struct pwm_chip *chip)
{
- struct imx_chip *imx = to_imx_chip(chip);
+ struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip);
int ret;
ret = clk_prepare_enable(imx->clk_ipg);
@@ -116,35 +103,32 @@ static int imx_pwm_clk_prepare_enable(struct pwm_chip *chip)
return 0;
}
-static void imx_pwm_clk_disable_unprepare(struct pwm_chip *chip)
+static void pwm_imx27_clk_disable_unprepare(struct pwm_chip *chip)
{
- struct imx_chip *imx = to_imx_chip(chip);
+ struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip);
clk_disable_unprepare(imx->clk_per);
clk_disable_unprepare(imx->clk_ipg);
}
-static void imx_pwm_get_state(struct pwm_chip *chip,
- struct pwm_device *pwm, struct pwm_state *state)
+static void pwm_imx27_get_state(struct pwm_chip *chip,
+ struct pwm_device *pwm, struct pwm_state *state)
{
- struct imx_chip *imx = to_imx_chip(chip);
- u32 period, prescaler, pwm_clk, ret, val;
+ struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip);
+ u32 period, prescaler, pwm_clk, val;
u64 tmp;
+ int ret;
- ret = imx_pwm_clk_prepare_enable(chip);
+ ret = pwm_imx27_clk_prepare_enable(chip);
if (ret < 0)
return;
val = readl(imx->mmio_base + MX3_PWMCR);
- if (val & MX3_PWMCR_EN) {
+ if (val & MX3_PWMCR_EN)
state->enabled = true;
- ret = imx_pwm_clk_prepare_enable(chip);
- if (ret)
- return;
- } else {
+ else
state->enabled = false;
- }
switch (FIELD_GET(MX3_PWMCR_POUTC, val)) {
case MX3_PWMCR_POUTC_NORMAL:
@@ -176,70 +160,13 @@ static void imx_pwm_get_state(struct pwm_chip *chip,
state->duty_cycle = 0;
}
- imx_pwm_clk_disable_unprepare(chip);
-}
-
-static int imx_pwm_config_v1(struct pwm_chip *chip,
- struct pwm_device *pwm, int duty_ns, int period_ns)
-{
- struct imx_chip *imx = to_imx_chip(chip);
-
- /*
- * The PWM subsystem allows for exact frequencies. However,
- * I cannot connect a scope on my device to the PWM line and
- * thus cannot provide the program the PWM controller
- * exactly. Instead, I'm relying on the fact that the
- * Bootloader (u-boot or WinCE+haret) has programmed the PWM
- * function group already. So I'll just modify the PWM sample
- * register to follow the ratio of duty_ns vs. period_ns
- * accordingly.
- *
- * This is good enough for programming the brightness of
- * the LCD backlight.
- *
- * The real implementation would divide PERCLK[0] first by
- * both the prescaler (/1 .. /128) and then by CLKSEL
- * (/2 .. /16).
- */
- u32 max = readl(imx->mmio_base + MX1_PWMP);
- u32 p = max * duty_ns / period_ns;
- writel(max - p, imx->mmio_base + MX1_PWMS);
-
- return 0;
-}
-
-static int imx_pwm_enable_v1(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct imx_chip *imx = to_imx_chip(chip);
- u32 val;
- int ret;
-
- ret = imx_pwm_clk_prepare_enable(chip);
- if (ret < 0)
- return ret;
-
- val = readl(imx->mmio_base + MX1_PWMC);
- val |= MX1_PWMC_EN;
- writel(val, imx->mmio_base + MX1_PWMC);
-
- return 0;
-}
-
-static void imx_pwm_disable_v1(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct imx_chip *imx = to_imx_chip(chip);
- u32 val;
-
- val = readl(imx->mmio_base + MX1_PWMC);
- val &= ~MX1_PWMC_EN;
- writel(val, imx->mmio_base + MX1_PWMC);
-
- imx_pwm_clk_disable_unprepare(chip);
+ if (!state->enabled)
+ pwm_imx27_clk_disable_unprepare(chip);
}
-static void imx_pwm_sw_reset(struct pwm_chip *chip)
+static void pwm_imx27_sw_reset(struct pwm_chip *chip)
{
- struct imx_chip *imx = to_imx_chip(chip);
+ struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip);
struct device *dev = chip->dev;
int wait_count = 0;
u32 cr;
@@ -255,10 +182,10 @@ static void imx_pwm_sw_reset(struct pwm_chip *chip)
dev_warn(dev, "software reset timeout\n");
}
-static void imx_pwm_wait_fifo_slot(struct pwm_chip *chip,
- struct pwm_device *pwm)
+static void pwm_imx27_wait_fifo_slot(struct pwm_chip *chip,
+ struct pwm_device *pwm)
{
- struct imx_chip *imx = to_imx_chip(chip);
+ struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip);
struct device *dev = chip->dev;
unsigned int period_ms;
int fifoav;
@@ -277,11 +204,11 @@ static void imx_pwm_wait_fifo_slot(struct pwm_chip *chip,
}
}
-static int imx_pwm_apply_v2(struct pwm_chip *chip, struct pwm_device *pwm,
- struct pwm_state *state)
+static int pwm_imx27_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
{
unsigned long period_cycles, duty_cycles, prescale;
- struct imx_chip *imx = to_imx_chip(chip);
+ struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip);
struct pwm_state cstate;
unsigned long long c;
int ret;
@@ -318,13 +245,13 @@ static int imx_pwm_apply_v2(struct pwm_chip *chip, struct pwm_device *pwm,
* enabled.
*/
if (cstate.enabled) {
- imx_pwm_wait_fifo_slot(chip, pwm);
+ pwm_imx27_wait_fifo_slot(chip, pwm);
} else {
- ret = imx_pwm_clk_prepare_enable(chip);
+ ret = pwm_imx27_clk_prepare_enable(chip);
if (ret)
return ret;
- imx_pwm_sw_reset(chip);
+ pwm_imx27_sw_reset(chip);
}
writel(duty_cycles, imx->mmio_base + MX3_PWMSAR);
@@ -343,64 +270,35 @@ static int imx_pwm_apply_v2(struct pwm_chip *chip, struct pwm_device *pwm,
} else if (cstate.enabled) {
writel(0, imx->mmio_base + MX3_PWMCR);
- imx_pwm_clk_disable_unprepare(chip);
+ pwm_imx27_clk_disable_unprepare(chip);
}
return 0;
}
-static const struct pwm_ops imx_pwm_ops_v1 = {
- .enable = imx_pwm_enable_v1,
- .disable = imx_pwm_disable_v1,
- .config = imx_pwm_config_v1,
+static const struct pwm_ops pwm_imx27_ops = {
+ .apply = pwm_imx27_apply,
+ .get_state = pwm_imx27_get_state,
.owner = THIS_MODULE,
};
-static const struct pwm_ops imx_pwm_ops_v2 = {
- .apply = imx_pwm_apply_v2,
- .get_state = imx_pwm_get_state,
- .owner = THIS_MODULE,
-};
-
-struct imx_pwm_data {
- bool polarity_supported;
- const struct pwm_ops *ops;
-};
-
-static struct imx_pwm_data imx_pwm_data_v1 = {
- .ops = &imx_pwm_ops_v1,
-};
-
-static struct imx_pwm_data imx_pwm_data_v2 = {
- .polarity_supported = true,
- .ops = &imx_pwm_ops_v2,
-};
-
-static const struct of_device_id imx_pwm_dt_ids[] = {
- { .compatible = "fsl,imx1-pwm", .data = &imx_pwm_data_v1, },
- { .compatible = "fsl,imx27-pwm", .data = &imx_pwm_data_v2, },
+static const struct of_device_id pwm_imx27_dt_ids[] = {
+ { .compatible = "fsl,imx27-pwm", },
{ /* sentinel */ }
};
-MODULE_DEVICE_TABLE(of, imx_pwm_dt_ids);
+MODULE_DEVICE_TABLE(of, pwm_imx27_dt_ids);
-static int imx_pwm_probe(struct platform_device *pdev)
+static int pwm_imx27_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id =
- of_match_device(imx_pwm_dt_ids, &pdev->dev);
- const struct imx_pwm_data *data;
- struct imx_chip *imx;
+ struct pwm_imx27_chip *imx;
struct resource *r;
- int ret = 0;
-
- if (!of_id)
- return -ENODEV;
-
- data = of_id->data;
imx = devm_kzalloc(&pdev->dev, sizeof(*imx), GFP_KERNEL);
if (imx == NULL)
return -ENOMEM;
+ platform_set_drvdata(pdev, imx);
+
imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(imx->clk_ipg)) {
dev_err(&pdev->dev, "getting ipg clock failed with %ld\n",
@@ -410,57 +308,51 @@ static int imx_pwm_probe(struct platform_device *pdev)
imx->clk_per = devm_clk_get(&pdev->dev, "per");
if (IS_ERR(imx->clk_per)) {
- dev_err(&pdev->dev, "getting per clock failed with %ld\n",
- PTR_ERR(imx->clk_per));
- return PTR_ERR(imx->clk_per);
+ int ret = PTR_ERR(imx->clk_per);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed to get peripheral clock: %d\n",
+ ret);
+
+ return ret;
}
- imx->chip.ops = data->ops;
+ imx->chip.ops = &pwm_imx27_ops;
imx->chip.dev = &pdev->dev;
imx->chip.base = -1;
imx->chip.npwm = 1;
- if (data->polarity_supported) {
- dev_dbg(&pdev->dev, "PWM supports output inversion\n");
- imx->chip.of_xlate = of_pwm_xlate_with_flags;
- imx->chip.of_pwm_n_cells = 3;
- }
+ imx->chip.of_xlate = of_pwm_xlate_with_flags;
+ imx->chip.of_pwm_n_cells = 3;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
imx->mmio_base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(imx->mmio_base))
return PTR_ERR(imx->mmio_base);
- ret = pwmchip_add(&imx->chip);
- if (ret < 0)
- return ret;
-
- platform_set_drvdata(pdev, imx);
- return 0;
+ return pwmchip_add(&imx->chip);
}
-static int imx_pwm_remove(struct platform_device *pdev)
+static int pwm_imx27_remove(struct platform_device *pdev)
{
- struct imx_chip *imx;
+ struct pwm_imx27_chip *imx;
imx = platform_get_drvdata(pdev);
- if (imx == NULL)
- return -ENODEV;
- imx_pwm_clk_disable_unprepare(&imx->chip);
+ pwm_imx27_clk_disable_unprepare(&imx->chip);
return pwmchip_remove(&imx->chip);
}
static struct platform_driver imx_pwm_driver = {
- .driver = {
- .name = "imx-pwm",
- .of_match_table = imx_pwm_dt_ids,
+ .driver = {
+ .name = "pwm-imx27",
+ .of_match_table = pwm_imx27_dt_ids,
},
- .probe = imx_pwm_probe,
- .remove = imx_pwm_remove,
+ .probe = pwm_imx27_probe,
+ .remove = pwm_imx27_remove,
};
-
module_platform_driver(imx_pwm_driver);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c
index 893940d45f0d..15803c71fe80 100644
--- a/drivers/pwm/pwm-mtk-disp.c
+++ b/drivers/pwm/pwm-mtk-disp.c
@@ -277,10 +277,21 @@ static const struct mtk_pwm_data mt8173_pwm_data = {
.commit_mask = 0x1,
};
+static const struct mtk_pwm_data mt8183_pwm_data = {
+ .enable_mask = BIT(0),
+ .con0 = 0x18,
+ .con0_sel = 0x0,
+ .con1 = 0x1c,
+ .has_commit = false,
+ .bls_debug = 0x80,
+ .bls_debug_mask = 0x3,
+};
+
static const struct of_device_id mtk_disp_pwm_of_match[] = {
{ .compatible = "mediatek,mt2701-disp-pwm", .data = &mt2701_pwm_data},
{ .compatible = "mediatek,mt6595-disp-pwm", .data = &mt8173_pwm_data},
{ .compatible = "mediatek,mt8173-disp-pwm", .data = &mt8173_pwm_data},
+ { .compatible = "mediatek,mt8183-disp-pwm", .data = &mt8183_pwm_data},
{ }
};
MODULE_DEVICE_TABLE(of, mtk_disp_pwm_of_match);
diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
index a41812fc6f95..cfe7dd1b448e 100644
--- a/drivers/pwm/pwm-rcar.c
+++ b/drivers/pwm/pwm-rcar.c
@@ -8,6 +8,8 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/log2.h>
+#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -68,19 +70,15 @@ static void rcar_pwm_update(struct rcar_pwm_chip *rp, u32 mask, u32 data,
static int rcar_pwm_get_clock_division(struct rcar_pwm_chip *rp, int period_ns)
{
unsigned long clk_rate = clk_get_rate(rp->clk);
- unsigned long long max; /* max cycle / nanoseconds */
- unsigned int div;
+ u64 div, tmp;
if (clk_rate == 0)
return -EINVAL;
- for (div = 0; div <= RCAR_PWM_MAX_DIVISION; div++) {
- max = (unsigned long long)NSEC_PER_SEC * RCAR_PWM_MAX_CYCLE *
- (1 << div);
- do_div(max, clk_rate);
- if (period_ns <= max)
- break;
- }
+ div = (u64)NSEC_PER_SEC * RCAR_PWM_MAX_CYCLE;
+ tmp = (u64)period_ns * clk_rate + div - 1;
+ tmp = div64_u64(tmp, div);
+ div = ilog2(tmp - 1) + 1;
return (div <= RCAR_PWM_MAX_DIVISION) ? div : -ERANGE;
}
@@ -139,39 +137,8 @@ static void rcar_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
pm_runtime_put(chip->dev);
}
-static int rcar_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns)
+static int rcar_pwm_enable(struct rcar_pwm_chip *rp)
{
- struct rcar_pwm_chip *rp = to_rcar_pwm_chip(chip);
- int div, ret;
-
- div = rcar_pwm_get_clock_division(rp, period_ns);
- if (div < 0)
- return div;
-
- /*
- * Let the core driver set pwm->period if disabled and duty_ns == 0.
- * But, this driver should prevent to set the new duty_ns if current
- * duty_cycle is not set
- */
- if (!pwm_is_enabled(pwm) && !duty_ns && !pwm->state.duty_cycle)
- return 0;
-
- rcar_pwm_update(rp, RCAR_PWMCR_SYNC, RCAR_PWMCR_SYNC, RCAR_PWMCR);
-
- ret = rcar_pwm_set_counter(rp, div, duty_ns, period_ns);
- if (!ret)
- rcar_pwm_set_clock_control(rp, div);
-
- /* The SYNC should be set to 0 even if rcar_pwm_set_counter failed */
- rcar_pwm_update(rp, RCAR_PWMCR_SYNC, 0, RCAR_PWMCR);
-
- return ret;
-}
-
-static int rcar_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct rcar_pwm_chip *rp = to_rcar_pwm_chip(chip);
u32 value;
/* Don't enable the PWM device if CYC0 or PH0 is 0 */
@@ -185,19 +152,51 @@ static int rcar_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
return 0;
}
-static void rcar_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+static void rcar_pwm_disable(struct rcar_pwm_chip *rp)
+{
+ rcar_pwm_update(rp, RCAR_PWMCR_EN0, 0, RCAR_PWMCR);
+}
+
+static int rcar_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
{
struct rcar_pwm_chip *rp = to_rcar_pwm_chip(chip);
+ struct pwm_state cur_state;
+ int div, ret;
- rcar_pwm_update(rp, RCAR_PWMCR_EN0, 0, RCAR_PWMCR);
+ /* This HW/driver only supports normal polarity */
+ pwm_get_state(pwm, &cur_state);
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -ENOTSUPP;
+
+ if (!state->enabled) {
+ rcar_pwm_disable(rp);
+ return 0;
+ }
+
+ div = rcar_pwm_get_clock_division(rp, state->period);
+ if (div < 0)
+ return div;
+
+ rcar_pwm_update(rp, RCAR_PWMCR_SYNC, RCAR_PWMCR_SYNC, RCAR_PWMCR);
+
+ ret = rcar_pwm_set_counter(rp, div, state->duty_cycle, state->period);
+ if (!ret)
+ rcar_pwm_set_clock_control(rp, div);
+
+ /* The SYNC should be set to 0 even if rcar_pwm_set_counter failed */
+ rcar_pwm_update(rp, RCAR_PWMCR_SYNC, 0, RCAR_PWMCR);
+
+ if (!ret && state->enabled)
+ ret = rcar_pwm_enable(rp);
+
+ return ret;
}
static const struct pwm_ops rcar_pwm_ops = {
.request = rcar_pwm_request,
.free = rcar_pwm_free,
- .config = rcar_pwm_config,
- .enable = rcar_pwm_enable,
- .disable = rcar_pwm_disable,
+ .apply = rcar_pwm_apply,
.owner = THIS_MODULE,
};
@@ -279,18 +278,16 @@ static int rcar_pwm_suspend(struct device *dev)
static int rcar_pwm_resume(struct device *dev)
{
struct pwm_device *pwm = rcar_pwm_dev_to_pwm_dev(dev);
+ struct pwm_state state;
if (!test_bit(PWMF_REQUESTED, &pwm->flags))
return 0;
pm_runtime_get_sync(dev);
- rcar_pwm_config(pwm->chip, pwm, pwm->state.duty_cycle,
- pwm->state.period);
- if (pwm_is_enabled(pwm))
- rcar_pwm_enable(pwm->chip, pwm);
+ pwm_get_state(pwm, &state);
- return 0;
+ return rcar_pwm_apply(pwm->chip, pwm, &state);
}
#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(rcar_pwm_pm_ops, rcar_pwm_suspend, rcar_pwm_resume);
diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
index 2d9ec378a8bc..88e4f3ff0cb8 100644
--- a/drivers/ras/cec.c
+++ b/drivers/ras/cec.c
@@ -286,10 +286,10 @@ int cec_add_elem(u64 pfn)
if (!ce_arr.array || ce_arr.disabled)
return -ENODEV;
- ca->ces_entered++;
-
mutex_lock(&ce_mutex);
+ ca->ces_entered++;
+
if (ca->n == MAX_ELEMS)
WARN_ON(!del_lru_elem_unlocked(ca));
diff --git a/drivers/regulator/88pm800.c b/drivers/regulator/88pm800.c
index 89bbd6e8bad1..9fd379732d18 100644
--- a/drivers/regulator/88pm800.c
+++ b/drivers/regulator/88pm800.c
@@ -77,11 +77,6 @@ struct pm800_regulator_info {
int max_ua;
};
-struct pm800_regulators {
- struct pm80x_chip *chip;
- struct regmap *map;
-};
-
/*
* vreg - the buck regs string.
* ereg - the string for the enable register.
@@ -235,7 +230,6 @@ static int pm800_regulator_probe(struct platform_device *pdev)
{
struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct pm80x_platform_data *pdata = dev_get_platdata(pdev->dev.parent);
- struct pm800_regulators *pm800_data;
struct regulator_config config = { };
struct regulator_init_data *init_data;
int i, ret;
@@ -252,18 +246,8 @@ static int pm800_regulator_probe(struct platform_device *pdev)
return -EINVAL;
}
- pm800_data = devm_kzalloc(&pdev->dev, sizeof(*pm800_data),
- GFP_KERNEL);
- if (!pm800_data)
- return -ENOMEM;
-
- pm800_data->map = chip->subchip->regmap_power;
- pm800_data->chip = chip;
-
- platform_set_drvdata(pdev, pm800_data);
-
config.dev = chip->dev;
- config.regmap = pm800_data->map;
+ config.regmap = chip->subchip->regmap_power;
for (i = 0; i < PM800_ID_RG_MAX; i++) {
struct regulator_dev *regulator;
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index 753a6a1b30c3..35d767aeeb57 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -235,6 +235,8 @@ static const struct regulator_ops pm8606_preg_ops = {
{ \
.desc = { \
.name = "PREG", \
+ .of_match = of_match_ptr("PREG"), \
+ .regulators_node = of_match_ptr("regulators"), \
.ops = &pm8606_preg_ops, \
.type = REGULATOR_CURRENT, \
.id = PM8606_ID_PREG, \
@@ -249,6 +251,8 @@ static const struct regulator_ops pm8606_preg_ops = {
{ \
.desc = { \
.name = #vreg, \
+ .of_match = of_match_ptr(#vreg), \
+ .regulators_node = of_match_ptr("regulators"), \
.ops = &pm8607_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.id = PM8607_ID_##vreg, \
@@ -270,6 +274,8 @@ static const struct regulator_ops pm8606_preg_ops = {
{ \
.desc = { \
.name = "LDO" #_id, \
+ .of_match = of_match_ptr("LDO" #_id), \
+ .regulators_node = of_match_ptr("regulators"), \
.ops = &pm8607_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.id = PM8607_ID_LDO##_id, \
@@ -309,36 +315,6 @@ static struct pm8607_regulator_info pm8606_regulator_info[] = {
PM8606_PREG(PREREGULATORB, 5),
};
-#ifdef CONFIG_OF
-static int pm8607_regulator_dt_init(struct platform_device *pdev,
- struct pm8607_regulator_info *info,
- struct regulator_config *config)
-{
- struct device_node *nproot, *np;
- nproot = pdev->dev.parent->of_node;
- if (!nproot)
- return -ENODEV;
- nproot = of_get_child_by_name(nproot, "regulators");
- if (!nproot) {
- dev_err(&pdev->dev, "failed to find regulators node\n");
- return -ENODEV;
- }
- for_each_child_of_node(nproot, np) {
- if (of_node_name_eq(np, info->desc.name)) {
- config->init_data =
- of_get_regulator_init_data(&pdev->dev, np,
- &info->desc);
- config->of_node = np;
- break;
- }
- }
- of_node_put(nproot);
- return 0;
-}
-#else
-#define pm8607_regulator_dt_init(x, y, z) (-1)
-#endif
-
static int pm8607_regulator_probe(struct platform_device *pdev)
{
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
@@ -373,12 +349,11 @@ static int pm8607_regulator_probe(struct platform_device *pdev)
if ((i == PM8607_ID_BUCK3) && chip->buck3_double)
info->slope_double = 1;
- config.dev = &pdev->dev;
+ config.dev = chip->dev;
config.driver_data = info;
- if (pm8607_regulator_dt_init(pdev, info, &config))
- if (pdata)
- config.init_data = pdata;
+ if (pdata)
+ config.init_data = pdata;
if (chip->id == CHIP_PM8607)
config.regmap = chip->regmap;
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index b7f249ee5e68..6c37f0df9323 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -223,6 +223,7 @@ config REGULATOR_CPCAP
config REGULATOR_DA903X
tristate "Dialog Semiconductor DA9030/DA9034 regulators"
depends on PMIC_DA903X
+ depends on !CC_IS_CLANG # https://bugs.llvm.org/show_bug.cgi?id=38789
help
Say y here to support the BUCKs and LDOs regulators found on
Dialog Semiconductor DA9030/DA9034 PMIC.
@@ -839,6 +840,13 @@ config REGULATOR_STM32_VREFBUF
This driver can also be built as a module. If so, the module
will be called stm32-vrefbuf.
+config REGULATOR_STM32_PWR
+ bool "STMicroelectronics STM32 PWR"
+ depends on ARCH_STM32 || COMPILE_TEST
+ help
+ This driver supports internal regulators (1V1, 1V8, 3V3) in the
+ STMicroelectronics STM32 chips.
+
config REGULATOR_STPMIC1
tristate "STMicroelectronics STPMIC1 PMIC Regulators"
depends on MFD_STPMIC1
@@ -1010,7 +1018,8 @@ config REGULATOR_TWL4030
config REGULATOR_UNIPHIER
tristate "UniPhier regulator driver"
depends on ARCH_UNIPHIER || COMPILE_TEST
- depends on OF && MFD_SYSCON
+ depends on OF
+ select REGMAP_MMIO
default ARCH_UNIPHIER
help
Support for regulators implemented on Socionext UniPhier SoCs.
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 1169f8a27d91..93f53840e8f1 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -105,6 +105,7 @@ obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o
obj-$(CONFIG_REGULATOR_SC2731) += sc2731-regulator.o
obj-$(CONFIG_REGULATOR_SKY81452) += sky81452-regulator.o
obj-$(CONFIG_REGULATOR_STM32_VREFBUF) += stm32-vrefbuf.o
+obj-$(CONFIG_REGULATOR_STM32_PWR) += stm32-pwr.o
obj-$(CONFIG_REGULATOR_STPMIC1) += stpmic1_regulator.o
obj-$(CONFIG_REGULATOR_STW481X_VMMC) += stw481x-vmmc.o
obj-$(CONFIG_REGULATOR_SY8106A) += sy8106a-regulator.o
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index de2644490f0d..438509f55f05 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -48,7 +48,6 @@
* @regreg: regulator register number in the AB3100
*/
struct ab3100_regulator {
- struct regulator_dev *rdev;
struct device *dev;
struct ab3100_platform_data *plfdata;
u8 regreg;
@@ -354,14 +353,13 @@ static int ab3100_get_voltage_regulator_external(struct regulator_dev *reg)
return 0;
}
-static struct regulator_ops regulator_ops_fixed = {
- .list_voltage = regulator_list_voltage_linear,
+static const struct regulator_ops regulator_ops_fixed = {
.enable = ab3100_enable_regulator,
.disable = ab3100_disable_regulator,
.is_enabled = ab3100_is_enabled_regulator,
};
-static struct regulator_ops regulator_ops_variable = {
+static const struct regulator_ops regulator_ops_variable = {
.enable = ab3100_enable_regulator,
.disable = ab3100_disable_regulator,
.is_enabled = ab3100_is_enabled_regulator,
@@ -370,7 +368,7 @@ static struct regulator_ops regulator_ops_variable = {
.list_voltage = regulator_list_voltage_table,
};
-static struct regulator_ops regulator_ops_variable_sleepable = {
+static const struct regulator_ops regulator_ops_variable_sleepable = {
.enable = ab3100_enable_regulator,
.disable = ab3100_disable_regulator,
.is_enabled = ab3100_is_enabled_regulator,
@@ -386,14 +384,14 @@ static struct regulator_ops regulator_ops_variable_sleepable = {
* is an on/off switch plain an simple. The external
* voltage is defined in the board set-up if any.
*/
-static struct regulator_ops regulator_ops_external = {
+static const struct regulator_ops regulator_ops_external = {
.enable = ab3100_enable_regulator,
.disable = ab3100_disable_regulator,
.is_enabled = ab3100_is_enabled_regulator,
.get_voltage = ab3100_get_voltage_regulator_external,
};
-static struct regulator_desc
+static const struct regulator_desc
ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
{
.name = "LDO_A",
@@ -402,7 +400,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
.n_voltages = 1,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
- .min_uV = LDO_A_VOLTAGE,
+ .fixed_uV = LDO_A_VOLTAGE,
.enable_time = 200,
},
{
@@ -412,7 +410,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
.n_voltages = 1,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
- .min_uV = LDO_C_VOLTAGE,
+ .fixed_uV = LDO_C_VOLTAGE,
.enable_time = 200,
},
{
@@ -422,7 +420,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
.n_voltages = 1,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
- .min_uV = LDO_D_VOLTAGE,
+ .fixed_uV = LDO_D_VOLTAGE,
.enable_time = 200,
},
{
@@ -500,7 +498,7 @@ static int ab3100_regulator_register(struct platform_device *pdev,
struct device_node *np,
unsigned long id)
{
- struct regulator_desc *desc;
+ const struct regulator_desc *desc;
struct ab3100_regulator *reg;
struct regulator_dev *rdev;
struct regulator_config config = { };
@@ -545,8 +543,6 @@ static int ab3100_regulator_register(struct platform_device *pdev,
return err;
}
- /* Then set a pointer back to the registered regulator */
- reg->rdev = rdev;
return 0;
}
@@ -609,18 +605,6 @@ static const u8 ab3100_reg_initvals[] = {
LDO_D_SETTING,
};
-static int ab3100_regulators_remove(struct platform_device *pdev)
-{
- int i;
-
- for (i = 0; i < AB3100_NUM_REGULATORS; i++) {
- struct ab3100_regulator *reg = &ab3100_regulators[i];
-
- reg->rdev = NULL;
- }
- return 0;
-}
-
static int
ab3100_regulator_of_probe(struct platform_device *pdev, struct device_node *np)
{
@@ -647,10 +631,8 @@ ab3100_regulator_of_probe(struct platform_device *pdev, struct device_node *np)
pdev, NULL, ab3100_regulator_matches[i].init_data,
ab3100_regulator_matches[i].of_node,
(unsigned long)ab3100_regulator_matches[i].driver_data);
- if (err) {
- ab3100_regulators_remove(pdev);
+ if (err)
return err;
- }
}
return 0;
@@ -705,14 +687,12 @@ static int ab3100_regulators_probe(struct platform_device *pdev)
/* Register the regulators */
for (i = 0; i < AB3100_NUM_REGULATORS; i++) {
- struct regulator_desc *desc = &ab3100_regulator_desc[i];
+ const struct regulator_desc *desc = &ab3100_regulator_desc[i];
err = ab3100_regulator_register(pdev, plfdata, NULL, NULL,
desc->id);
- if (err) {
- ab3100_regulators_remove(pdev);
+ if (err)
return err;
- }
}
return 0;
@@ -723,7 +703,6 @@ static struct platform_driver ab3100_regulators_driver = {
.name = "ab3100-regulators",
},
.probe = ab3100_regulators_probe,
- .remove = ab3100_regulators_remove,
};
static __init int ab3100_regulators_init(void)
diff --git a/drivers/regulator/ab8500-ext.c b/drivers/regulator/ab8500-ext.c
index 2ca00045eb99..95704446d89e 100644
--- a/drivers/regulator/ab8500-ext.c
+++ b/drivers/regulator/ab8500-ext.c
@@ -479,7 +479,6 @@ static struct ab8500_regulator_platform_data ab8500_regulator_plat_data = {
* struct ab8500_ext_regulator_info - ab8500 regulator information
* @dev: device pointer
* @desc: regulator description
- * @rdev: regulator device
* @cfg: regulator configuration (extension of regulator FW configuration)
* @update_bank: bank to control on/off
* @update_reg: register to control on/off
@@ -495,7 +494,6 @@ static struct ab8500_regulator_platform_data ab8500_regulator_plat_data = {
struct ab8500_ext_regulator_info {
struct device *dev;
struct regulator_desc desc;
- struct regulator_dev *rdev;
struct ab8500_ext_regulator_cfg *cfg;
u8 update_bank;
u8 update_reg;
@@ -530,7 +528,7 @@ static int ab8500_ext_regulator_enable(struct regulator_dev *rdev)
info->update_bank, info->update_reg,
info->update_mask, regval);
if (ret < 0) {
- dev_err(rdev_get_dev(info->rdev),
+ dev_err(rdev_get_dev(rdev),
"couldn't set enable bits for regulator\n");
return ret;
}
@@ -566,7 +564,7 @@ static int ab8500_ext_regulator_disable(struct regulator_dev *rdev)
info->update_bank, info->update_reg,
info->update_mask, regval);
if (ret < 0) {
- dev_err(rdev_get_dev(info->rdev),
+ dev_err(rdev_get_dev(rdev),
"couldn't set disable bits for regulator\n");
return ret;
}
@@ -720,7 +718,7 @@ static int ab8500_ext_list_voltage(struct regulator_dev *rdev,
return -EINVAL;
}
-static struct regulator_ops ab8500_ext_regulator_ops = {
+static const struct regulator_ops ab8500_ext_regulator_ops = {
.enable = ab8500_ext_regulator_enable,
.disable = ab8500_ext_regulator_disable,
.is_enabled = ab8500_ext_regulator_is_enabled,
@@ -735,6 +733,7 @@ static struct ab8500_ext_regulator_info
[AB8500_EXT_SUPPLY1] = {
.desc = {
.name = "VEXTSUPPLY1",
+ .of_match = of_match_ptr("ab8500_ext1"),
.ops = &ab8500_ext_regulator_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_EXT_SUPPLY1,
@@ -752,6 +751,7 @@ static struct ab8500_ext_regulator_info
[AB8500_EXT_SUPPLY2] = {
.desc = {
.name = "VEXTSUPPLY2",
+ .of_match = of_match_ptr("ab8500_ext2"),
.ops = &ab8500_ext_regulator_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_EXT_SUPPLY2,
@@ -769,6 +769,7 @@ static struct ab8500_ext_regulator_info
[AB8500_EXT_SUPPLY3] = {
.desc = {
.name = "VEXTSUPPLY3",
+ .of_match = of_match_ptr("ab8500_ext3"),
.ops = &ab8500_ext_regulator_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_EXT_SUPPLY3,
@@ -785,30 +786,13 @@ static struct ab8500_ext_regulator_info
},
};
-static struct of_regulator_match ab8500_ext_regulator_match[] = {
- { .name = "ab8500_ext1", .driver_data = (void *) AB8500_EXT_SUPPLY1, },
- { .name = "ab8500_ext2", .driver_data = (void *) AB8500_EXT_SUPPLY2, },
- { .name = "ab8500_ext3", .driver_data = (void *) AB8500_EXT_SUPPLY3, },
-};
-
static int ab8500_ext_regulator_probe(struct platform_device *pdev)
{
struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
struct ab8500_regulator_platform_data *pdata = &ab8500_regulator_plat_data;
- struct device_node *np = pdev->dev.of_node;
struct regulator_config config = { };
- int i, err;
-
- if (np) {
- err = of_regulator_match(&pdev->dev, np,
- ab8500_ext_regulator_match,
- ARRAY_SIZE(ab8500_ext_regulator_match));
- if (err < 0) {
- dev_err(&pdev->dev,
- "Error parsing regulator init data: %d\n", err);
- return err;
- }
- }
+ struct regulator_dev *rdev;
+ int i;
if (!ab8500) {
dev_err(&pdev->dev, "null mfd parent\n");
@@ -844,23 +828,18 @@ static int ab8500_ext_regulator_probe(struct platform_device *pdev)
config.dev = &pdev->dev;
config.driver_data = info;
- config.of_node = ab8500_ext_regulator_match[i].of_node;
- config.init_data = (np) ?
- ab8500_ext_regulator_match[i].init_data :
- &pdata->ext_regulator[i];
+ config.init_data = &pdata->ext_regulator[i];
/* register regulator with framework */
- info->rdev = devm_regulator_register(&pdev->dev, &info->desc,
- &config);
- if (IS_ERR(info->rdev)) {
- err = PTR_ERR(info->rdev);
+ rdev = devm_regulator_register(&pdev->dev, &info->desc,
+ &config);
+ if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
info->desc.name);
- return err;
+ return PTR_ERR(rdev);
}
- dev_dbg(rdev_get_dev(info->rdev),
- "%s-probed\n", info->desc.name);
+ dev_dbg(&pdev->dev, "%s-probed\n", info->desc.name);
}
return 0;
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index 83dba3fbfe0c..3fcb4cbaab02 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -44,7 +44,6 @@ struct ab8500_shared_mode {
* struct ab8500_regulator_info - ab8500 regulator information
* @dev: device pointer
* @desc: regulator description
- * @regulator_dev: regulator device
* @shared_mode: used when mode is shared between two regulators
* @load_lp_uA: maximum load in idle (low power) mode
* @update_bank: bank to control on/off
@@ -65,7 +64,6 @@ struct ab8500_shared_mode {
struct ab8500_regulator_info {
struct device *dev;
struct regulator_desc desc;
- struct regulator_dev *regulator;
struct ab8500_shared_mode *shared_mode;
int load_lp_uA;
u8 update_bank;
@@ -510,7 +508,7 @@ static int ab8500_regulator_set_voltage_sel(struct regulator_dev *rdev,
return ret;
}
-static struct regulator_ops ab8500_regulator_volt_mode_ops = {
+static const struct regulator_ops ab8500_regulator_volt_mode_ops = {
.enable = ab8500_regulator_enable,
.disable = ab8500_regulator_disable,
.is_enabled = ab8500_regulator_is_enabled,
@@ -522,7 +520,7 @@ static struct regulator_ops ab8500_regulator_volt_mode_ops = {
.list_voltage = regulator_list_voltage_table,
};
-static struct regulator_ops ab8500_regulator_volt_ops = {
+static const struct regulator_ops ab8500_regulator_volt_ops = {
.enable = ab8500_regulator_enable,
.disable = ab8500_regulator_disable,
.is_enabled = ab8500_regulator_is_enabled,
@@ -531,7 +529,7 @@ static struct regulator_ops ab8500_regulator_volt_ops = {
.list_voltage = regulator_list_voltage_table,
};
-static struct regulator_ops ab8500_regulator_mode_ops = {
+static const struct regulator_ops ab8500_regulator_mode_ops = {
.enable = ab8500_regulator_enable,
.disable = ab8500_regulator_disable,
.is_enabled = ab8500_regulator_is_enabled,
@@ -541,14 +539,14 @@ static struct regulator_ops ab8500_regulator_mode_ops = {
.list_voltage = regulator_list_voltage_table,
};
-static struct regulator_ops ab8500_regulator_ops = {
+static const struct regulator_ops ab8500_regulator_ops = {
.enable = ab8500_regulator_enable,
.disable = ab8500_regulator_disable,
.is_enabled = ab8500_regulator_is_enabled,
.list_voltage = regulator_list_voltage_table,
};
-static struct regulator_ops ab8500_regulator_anamic_mode_ops = {
+static const struct regulator_ops ab8500_regulator_anamic_mode_ops = {
.enable = ab8500_regulator_enable,
.disable = ab8500_regulator_disable,
.is_enabled = ab8500_regulator_is_enabled,
@@ -1600,6 +1598,7 @@ static int ab8500_regulator_register(struct platform_device *pdev,
struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
struct ab8500_regulator_info *info = NULL;
struct regulator_config config = { };
+ struct regulator_dev *rdev;
/* assign per-regulator data */
info = &abx500_regulator.info[id];
@@ -1621,12 +1620,11 @@ static int ab8500_regulator_register(struct platform_device *pdev,
}
/* register regulator with framework */
- info->regulator = devm_regulator_register(&pdev->dev, &info->desc,
- &config);
- if (IS_ERR(info->regulator)) {
+ rdev = devm_regulator_register(&pdev->dev, &info->desc, &config);
+ if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
info->desc.name);
- return PTR_ERR(info->regulator);
+ return PTR_ERR(rdev);
}
return 0;
diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c
index e0239cf3f56d..19d9ee2dac1f 100644
--- a/drivers/regulator/act8865-regulator.c
+++ b/drivers/regulator/act8865-regulator.c
@@ -226,7 +226,7 @@ static const struct regulator_linear_range act8600_sudcdc_voltage_ranges[] = {
REGULATOR_LINEAR_RANGE(41400000, 248, 255, 0),
};
-static struct regulator_ops act8865_ops = {
+static const struct regulator_ops act8865_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -236,7 +236,7 @@ static struct regulator_ops act8865_ops = {
.is_enabled = regulator_is_enabled_regmap,
};
-static struct regulator_ops act8865_ldo_ops = {
+static const struct regulator_ops act8865_ldo_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -245,6 +245,8 @@ static struct regulator_ops act8865_ldo_ops = {
#define ACT88xx_REG(_name, _family, _id, _vsel_reg, _supply) \
[_family##_ID_##_id] = { \
.name = _name, \
+ .of_match = of_match_ptr(_name), \
+ .regulators_node = of_match_ptr("regulators"), \
.supply_name = _supply, \
.id = _family##_ID_##_id, \
.type = REGULATOR_VOLTAGE, \
@@ -265,6 +267,8 @@ static const struct regulator_desc act8600_regulators[] = {
ACT88xx_REG("DCDC3", ACT8600, DCDC3, VSET, "vp3"),
{
.name = "SUDCDC_REG4",
+ .of_match = of_match_ptr("SUDCDC_REG4"),
+ .regulators_node = of_match_ptr("regulators"),
.id = ACT8600_ID_SUDCDC4,
.ops = &act8865_ops,
.type = REGULATOR_VOLTAGE,
@@ -283,6 +287,8 @@ static const struct regulator_desc act8600_regulators[] = {
ACT88xx_REG("LDO8", ACT8600, LDO8, VSET, "inl"),
{
.name = "LDO_REG9",
+ .of_match = of_match_ptr("LDO_REG9"),
+ .regulators_node = of_match_ptr("regulators"),
.id = ACT8600_ID_LDO9,
.ops = &act8865_ldo_ops,
.type = REGULATOR_VOLTAGE,
@@ -294,6 +300,8 @@ static const struct regulator_desc act8600_regulators[] = {
},
{
.name = "LDO_REG10",
+ .of_match = of_match_ptr("LDO_REG10"),
+ .regulators_node = of_match_ptr("regulators"),
.id = ACT8600_ID_LDO10,
.ops = &act8865_ldo_ops,
.type = REGULATOR_VOLTAGE,
@@ -348,110 +356,6 @@ static const struct of_device_id act8865_dt_ids[] = {
{ }
};
MODULE_DEVICE_TABLE(of, act8865_dt_ids);
-
-static struct of_regulator_match act8846_matches[] = {
- [ACT8846_ID_REG1] = { .name = "REG1" },
- [ACT8846_ID_REG2] = { .name = "REG2" },
- [ACT8846_ID_REG3] = { .name = "REG3" },
- [ACT8846_ID_REG4] = { .name = "REG4" },
- [ACT8846_ID_REG5] = { .name = "REG5" },
- [ACT8846_ID_REG6] = { .name = "REG6" },
- [ACT8846_ID_REG7] = { .name = "REG7" },
- [ACT8846_ID_REG8] = { .name = "REG8" },
- [ACT8846_ID_REG9] = { .name = "REG9" },
- [ACT8846_ID_REG10] = { .name = "REG10" },
- [ACT8846_ID_REG11] = { .name = "REG11" },
- [ACT8846_ID_REG12] = { .name = "REG12" },
-};
-
-static struct of_regulator_match act8865_matches[] = {
- [ACT8865_ID_DCDC1] = { .name = "DCDC_REG1"},
- [ACT8865_ID_DCDC2] = { .name = "DCDC_REG2"},
- [ACT8865_ID_DCDC3] = { .name = "DCDC_REG3"},
- [ACT8865_ID_LDO1] = { .name = "LDO_REG1"},
- [ACT8865_ID_LDO2] = { .name = "LDO_REG2"},
- [ACT8865_ID_LDO3] = { .name = "LDO_REG3"},
- [ACT8865_ID_LDO4] = { .name = "LDO_REG4"},
-};
-
-static struct of_regulator_match act8600_matches[] = {
- [ACT8600_ID_DCDC1] = { .name = "DCDC_REG1"},
- [ACT8600_ID_DCDC2] = { .name = "DCDC_REG2"},
- [ACT8600_ID_DCDC3] = { .name = "DCDC_REG3"},
- [ACT8600_ID_SUDCDC4] = { .name = "SUDCDC_REG4"},
- [ACT8600_ID_LDO5] = { .name = "LDO_REG5"},
- [ACT8600_ID_LDO6] = { .name = "LDO_REG6"},
- [ACT8600_ID_LDO7] = { .name = "LDO_REG7"},
- [ACT8600_ID_LDO8] = { .name = "LDO_REG8"},
- [ACT8600_ID_LDO9] = { .name = "LDO_REG9"},
- [ACT8600_ID_LDO10] = { .name = "LDO_REG10"},
-};
-
-static int act8865_pdata_from_dt(struct device *dev,
- struct act8865_platform_data *pdata,
- unsigned long type)
-{
- int matched, i, num_matches;
- struct device_node *np;
- struct act8865_regulator_data *regulator;
- struct of_regulator_match *matches;
-
- switch (type) {
- case ACT8600:
- matches = act8600_matches;
- num_matches = ARRAY_SIZE(act8600_matches);
- break;
- case ACT8846:
- matches = act8846_matches;
- num_matches = ARRAY_SIZE(act8846_matches);
- break;
- case ACT8865:
- matches = act8865_matches;
- num_matches = ARRAY_SIZE(act8865_matches);
- break;
- default:
- dev_err(dev, "invalid device id %lu\n", type);
- return -EINVAL;
- }
-
- np = of_get_child_by_name(dev->of_node, "regulators");
- if (!np) {
- dev_err(dev, "missing 'regulators' subnode in DT\n");
- return -EINVAL;
- }
-
- matched = of_regulator_match(dev, np, matches, num_matches);
- of_node_put(np);
- if (matched <= 0)
- return matched;
-
- pdata->regulators = devm_kcalloc(dev,
- num_matches,
- sizeof(struct act8865_regulator_data),
- GFP_KERNEL);
- if (!pdata->regulators)
- return -ENOMEM;
-
- pdata->num_regulators = num_matches;
- regulator = pdata->regulators;
-
- for (i = 0; i < num_matches; i++) {
- regulator->id = i;
- regulator->name = matches[i].name;
- regulator->init_data = matches[i].init_data;
- regulator->of_node = matches[i].of_node;
- regulator++;
- }
-
- return 0;
-}
-#else
-static inline int act8865_pdata_from_dt(struct device *dev,
- struct act8865_platform_data *pdata,
- unsigned long type)
-{
- return 0;
-}
#endif
static struct act8865_regulator_data *act8865_get_regulator_data(
@@ -459,9 +363,6 @@ static struct act8865_regulator_data *act8865_get_regulator_data(
{
int i;
- if (!pdata)
- return NULL;
-
for (i = 0; i < pdata->num_regulators; i++) {
if (pdata->regulators[i].id == id)
return &pdata->regulators[i];
@@ -484,7 +385,7 @@ static int act8865_pmic_probe(struct i2c_client *client,
const struct i2c_device_id *i2c_id)
{
const struct regulator_desc *regulators;
- struct act8865_platform_data pdata_of, *pdata;
+ struct act8865_platform_data *pdata = NULL;
struct device *dev = &client->dev;
int i, ret, num_regulators;
struct act8865 *act8865;
@@ -493,9 +394,7 @@ static int act8865_pmic_probe(struct i2c_client *client,
int off_reg, off_mask;
int voltage_select = 0;
- pdata = dev_get_platdata(dev);
-
- if (dev->of_node && !pdata) {
+ if (dev->of_node) {
const struct of_device_id *id;
id = of_match_device(of_match_ptr(act8865_dt_ids), dev);
@@ -509,6 +408,7 @@ static int act8865_pmic_probe(struct i2c_client *client,
NULL);
} else {
type = i2c_id->driver_data;
+ pdata = dev_get_platdata(dev);
}
switch (type) {
@@ -543,14 +443,6 @@ static int act8865_pmic_probe(struct i2c_client *client,
return -EINVAL;
}
- if (dev->of_node && !pdata) {
- ret = act8865_pdata_from_dt(dev, &pdata_of, type);
- if (ret < 0)
- return ret;
-
- pdata = &pdata_of;
- }
-
act8865 = devm_kzalloc(dev, sizeof(struct act8865), GFP_KERNEL);
if (!act8865)
return -ENOMEM;
@@ -577,17 +469,20 @@ static int act8865_pmic_probe(struct i2c_client *client,
for (i = 0; i < num_regulators; i++) {
const struct regulator_desc *desc = &regulators[i];
struct regulator_config config = { };
- struct act8865_regulator_data *rdata;
struct regulator_dev *rdev;
config.dev = dev;
config.driver_data = act8865;
config.regmap = act8865->regmap;
- rdata = act8865_get_regulator_data(desc->id, pdata);
- if (rdata) {
- config.init_data = rdata->init_data;
- config.of_node = rdata->of_node;
+ if (pdata) {
+ struct act8865_regulator_data *rdata;
+
+ rdata = act8865_get_regulator_data(desc->id, pdata);
+ if (rdata) {
+ config.init_data = rdata->init_data;
+ config.of_node = rdata->of_node;
+ }
}
rdev = devm_regulator_register(dev, desc, &config);
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index d9d8155ed8cb..754739d004e5 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -23,18 +23,10 @@
#define LDO_FET_FULL_ON 0x1f
struct anatop_regulator {
- u32 control_reg;
- struct regmap *anatop;
- int vol_bit_shift;
- int vol_bit_width;
u32 delay_reg;
int delay_bit_shift;
int delay_bit_width;
- int min_bit_val;
- int min_voltage;
- int max_voltage;
struct regulator_desc rdesc;
- struct regulator_init_data *initdata;
bool bypass;
int sel;
};
@@ -55,7 +47,7 @@ static int anatop_regmap_set_voltage_time_sel(struct regulator_dev *reg,
* to calculate how many steps LDO need to
* ramp up, and how much delay needed. (us)
*/
- regmap_read(anatop_reg->anatop, anatop_reg->delay_reg, &val);
+ regmap_read(reg->regmap, anatop_reg->delay_reg, &val);
val = (val >> anatop_reg->delay_bit_shift) &
((1 << anatop_reg->delay_bit_width) - 1);
ret = (new_sel - old_sel) * (LDO_RAMP_UP_UNIT_IN_CYCLES <<
@@ -170,6 +162,13 @@ static int anatop_regulator_probe(struct platform_device *pdev)
struct anatop_regulator *sreg;
struct regulator_init_data *initdata;
struct regulator_config config = { };
+ struct regmap *regmap;
+ u32 control_reg;
+ u32 vol_bit_shift;
+ u32 vol_bit_width;
+ u32 min_bit_val;
+ u32 min_voltage;
+ u32 max_voltage;
int ret = 0;
u32 val;
@@ -192,48 +191,41 @@ static int anatop_regulator_probe(struct platform_device *pdev)
return -ENOMEM;
initdata->supply_regulator = "vin";
- sreg->initdata = initdata;
anatop_np = of_get_parent(np);
if (!anatop_np)
return -ENODEV;
- sreg->anatop = syscon_node_to_regmap(anatop_np);
+ regmap = syscon_node_to_regmap(anatop_np);
of_node_put(anatop_np);
- if (IS_ERR(sreg->anatop))
- return PTR_ERR(sreg->anatop);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
- ret = of_property_read_u32(np, "anatop-reg-offset",
- &sreg->control_reg);
+ ret = of_property_read_u32(np, "anatop-reg-offset", &control_reg);
if (ret) {
dev_err(dev, "no anatop-reg-offset property set\n");
return ret;
}
- ret = of_property_read_u32(np, "anatop-vol-bit-width",
- &sreg->vol_bit_width);
+ ret = of_property_read_u32(np, "anatop-vol-bit-width", &vol_bit_width);
if (ret) {
dev_err(dev, "no anatop-vol-bit-width property set\n");
return ret;
}
- ret = of_property_read_u32(np, "anatop-vol-bit-shift",
- &sreg->vol_bit_shift);
+ ret = of_property_read_u32(np, "anatop-vol-bit-shift", &vol_bit_shift);
if (ret) {
dev_err(dev, "no anatop-vol-bit-shift property set\n");
return ret;
}
- ret = of_property_read_u32(np, "anatop-min-bit-val",
- &sreg->min_bit_val);
+ ret = of_property_read_u32(np, "anatop-min-bit-val", &min_bit_val);
if (ret) {
dev_err(dev, "no anatop-min-bit-val property set\n");
return ret;
}
- ret = of_property_read_u32(np, "anatop-min-voltage",
- &sreg->min_voltage);
+ ret = of_property_read_u32(np, "anatop-min-voltage", &min_voltage);
if (ret) {
dev_err(dev, "no anatop-min-voltage property set\n");
return ret;
}
- ret = of_property_read_u32(np, "anatop-max-voltage",
- &sreg->max_voltage);
+ ret = of_property_read_u32(np, "anatop-max-voltage", &max_voltage);
if (ret) {
dev_err(dev, "no anatop-max-voltage property set\n");
return ret;
@@ -247,24 +239,23 @@ static int anatop_regulator_probe(struct platform_device *pdev)
of_property_read_u32(np, "anatop-delay-bit-shift",
&sreg->delay_bit_shift);
- rdesc->n_voltages = (sreg->max_voltage - sreg->min_voltage) / 25000 + 1
- + sreg->min_bit_val;
- rdesc->min_uV = sreg->min_voltage;
+ rdesc->n_voltages = (max_voltage - min_voltage) / 25000 + 1
+ + min_bit_val;
+ rdesc->min_uV = min_voltage;
rdesc->uV_step = 25000;
- rdesc->linear_min_sel = sreg->min_bit_val;
- rdesc->vsel_reg = sreg->control_reg;
- rdesc->vsel_mask = ((1 << sreg->vol_bit_width) - 1) <<
- sreg->vol_bit_shift;
+ rdesc->linear_min_sel = min_bit_val;
+ rdesc->vsel_reg = control_reg;
+ rdesc->vsel_mask = ((1 << vol_bit_width) - 1) << vol_bit_shift;
rdesc->min_dropout_uV = 125000;
config.dev = &pdev->dev;
config.init_data = initdata;
config.driver_data = sreg;
config.of_node = pdev->dev.of_node;
- config.regmap = sreg->anatop;
+ config.regmap = regmap;
/* Only core regulators have the ramp up delay configuration. */
- if (sreg->control_reg && sreg->delay_bit_width) {
+ if (control_reg && sreg->delay_bit_width) {
rdesc->ops = &anatop_core_rops;
ret = regmap_read(config.regmap, rdesc->vsel_reg, &val);
@@ -273,7 +264,7 @@ static int anatop_regulator_probe(struct platform_device *pdev)
return ret;
}
- sreg->sel = (val & rdesc->vsel_mask) >> sreg->vol_bit_shift;
+ sreg->sel = (val & rdesc->vsel_mask) >> vol_bit_shift;
if (sreg->sel == LDO_FET_FULL_ON) {
sreg->sel = 0;
sreg->bypass = true;
@@ -306,7 +297,7 @@ static int anatop_regulator_probe(struct platform_device *pdev)
anatop_rops.disable = regulator_disable_regmap;
anatop_rops.is_enabled = regulator_is_enabled_regmap;
- rdesc->enable_reg = sreg->control_reg;
+ rdesc->enable_reg = control_reg;
rdesc->enable_mask = BIT(enable_bit);
}
}
diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
index bf3ab405eed1..e4bc7b1e5ccd 100644
--- a/drivers/regulator/arizona-ldo1.c
+++ b/drivers/regulator/arizona-ldo1.c
@@ -1,15 +1,10 @@
-/*
- * arizona-ldo1.c -- LDO1 supply for Arizona devices
- *
- * Copyright 2012 Wolfson Microelectronics PLC.
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// arizona-ldo1.c -- LDO1 supply for Arizona devices
+//
+// Copyright 2012 Wolfson Microelectronics PLC.
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/module.h>
#include <linux/moduleparam.h>
diff --git a/drivers/regulator/arizona-micsupp.c b/drivers/regulator/arizona-micsupp.c
index 120de94caf02..be0d46da51a1 100644
--- a/drivers/regulator/arizona-micsupp.c
+++ b/drivers/regulator/arizona-micsupp.c
@@ -1,15 +1,10 @@
-/*
- * arizona-micsupp.c -- Microphone supply for Arizona devices
- *
- * Copyright 2012 Wolfson Microelectronics PLC.
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// arizona-micsupp.c -- Microphone supply for Arizona devices
+//
+// Copyright 2012 Wolfson Microelectronics PLC.
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/module.h>
#include <linux/moduleparam.h>
diff --git a/drivers/regulator/as3711-regulator.c b/drivers/regulator/as3711-regulator.c
index f7fe218bb3e4..ece88103f2fd 100644
--- a/drivers/regulator/as3711-regulator.c
+++ b/drivers/regulator/as3711-regulator.c
@@ -17,14 +17,6 @@
#include <linux/regulator/of_regulator.h>
#include <linux/slab.h>
-struct as3711_regulator_info {
- struct regulator_desc desc;
-};
-
-struct as3711_regulator {
- struct as3711_regulator_info *reg_info;
-};
-
/*
* The regulator API supports 4 modes of operataion: FAST, NORMAL, IDLE and
* STANDBY. We map them in the following way to AS3711 SD1-4 DCDC modes:
@@ -129,7 +121,6 @@ static const struct regulator_linear_range as3711_dldo_ranges[] = {
#define AS3711_REG(_id, _en_reg, _en_bit, _vmask, _sfx) \
[AS3711_REGULATOR_ ## _id] = { \
- .desc = { \
.name = "as3711-regulator-" # _id, \
.id = AS3711_REGULATOR_ ## _id, \
.n_voltages = (_vmask + 1), \
@@ -142,10 +133,9 @@ static const struct regulator_linear_range as3711_dldo_ranges[] = {
.enable_mask = BIT(_en_bit), \
.linear_ranges = as3711_ ## _sfx ## _ranges, \
.n_linear_ranges = ARRAY_SIZE(as3711_ ## _sfx ## _ranges), \
- }, \
}
-static struct as3711_regulator_info as3711_reg_info[] = {
+static const struct regulator_desc as3711_reg_desc[] = {
AS3711_REG(SD_1, SD_CONTROL, 0, 0x7f, sd),
AS3711_REG(SD_2, SD_CONTROL, 1, 0x7f, sd),
AS3711_REG(SD_3, SD_CONTROL, 2, 0x7f, sd),
@@ -161,7 +151,7 @@ static struct as3711_regulator_info as3711_reg_info[] = {
/* StepUp output voltage depends on supplying regulator */
};
-#define AS3711_REGULATOR_NUM ARRAY_SIZE(as3711_reg_info)
+#define AS3711_REGULATOR_NUM ARRAY_SIZE(as3711_reg_desc)
static struct of_regulator_match
as3711_regulator_matches[AS3711_REGULATOR_NUM] = {
@@ -215,11 +205,8 @@ static int as3711_regulator_probe(struct platform_device *pdev)
struct as3711_regulator_pdata *pdata = dev_get_platdata(&pdev->dev);
struct as3711 *as3711 = dev_get_drvdata(pdev->dev.parent);
struct regulator_config config = {.dev = &pdev->dev,};
- struct as3711_regulator *reg = NULL;
- struct as3711_regulator *regs;
struct device_node *of_node[AS3711_REGULATOR_NUM] = {};
struct regulator_dev *rdev;
- struct as3711_regulator_info *ri;
int ret;
int id;
@@ -236,30 +223,20 @@ static int as3711_regulator_probe(struct platform_device *pdev)
}
}
- regs = devm_kcalloc(&pdev->dev,
- AS3711_REGULATOR_NUM,
- sizeof(struct as3711_regulator),
- GFP_KERNEL);
- if (!regs)
- return -ENOMEM;
-
- for (id = 0, ri = as3711_reg_info; id < AS3711_REGULATOR_NUM; ++id, ri++) {
- reg = &regs[id];
- reg->reg_info = ri;
-
+ for (id = 0; id < AS3711_REGULATOR_NUM; id++) {
config.init_data = pdata->init_data[id];
- config.driver_data = reg;
config.regmap = as3711->regmap;
config.of_node = of_node[id];
- rdev = devm_regulator_register(&pdev->dev, &ri->desc, &config);
+ rdev = devm_regulator_register(&pdev->dev, &as3711_reg_desc[id],
+ &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "Failed to register regulator %s\n",
- ri->desc.name);
+ as3711_reg_desc[id].name);
return PTR_ERR(rdev);
}
}
- platform_set_drvdata(pdev, regs);
+
return 0;
}
diff --git a/drivers/regulator/as3722-regulator.c b/drivers/regulator/as3722-regulator.c
index e5fed289b52d..dc75c33bd950 100644
--- a/drivers/regulator/as3722-regulator.c
+++ b/drivers/regulator/as3722-regulator.c
@@ -81,7 +81,6 @@ struct as3722_regulator_config_data {
struct as3722_regulators {
struct device *dev;
struct as3722 *as3722;
- struct regulator_dev *rdevs[AS3722_REGULATOR_ID_MAX];
struct regulator_desc desc[AS3722_REGULATOR_ID_MAX];
struct as3722_regulator_config_data
reg_config_data[AS3722_REGULATOR_ID_MAX];
@@ -314,63 +313,10 @@ static const struct as3722_register_mapping as3722_reg_lookup[] = {
},
};
-
-static const int as3722_ldo_current[] = { 150000, 300000 };
-static const int as3722_sd016_current[] = { 2500000, 3000000, 3500000 };
-
-static int as3722_current_to_index(int min_uA, int max_uA,
- const int *curr_table, int n_currents)
-{
- int i;
-
- for (i = n_currents - 1; i >= 0; i--) {
- if ((min_uA <= curr_table[i]) && (curr_table[i] <= max_uA))
- return i;
- }
- return -EINVAL;
-}
-
-static int as3722_ldo_get_current_limit(struct regulator_dev *rdev)
-{
- struct as3722_regulators *as3722_regs = rdev_get_drvdata(rdev);
- struct as3722 *as3722 = as3722_regs->as3722;
- int id = rdev_get_id(rdev);
- u32 val;
- int ret;
-
- ret = as3722_read(as3722, as3722_reg_lookup[id].vsel_reg, &val);
- if (ret < 0) {
- dev_err(as3722_regs->dev, "Reg 0x%02x read failed: %d\n",
- as3722_reg_lookup[id].vsel_reg, ret);
- return ret;
- }
- if (val & AS3722_LDO_ILIMIT_MASK)
- return 300000;
- return 150000;
-}
-
-static int as3722_ldo_set_current_limit(struct regulator_dev *rdev,
- int min_uA, int max_uA)
-{
- struct as3722_regulators *as3722_regs = rdev_get_drvdata(rdev);
- struct as3722 *as3722 = as3722_regs->as3722;
- int id = rdev_get_id(rdev);
- int ret;
- u32 reg = 0;
-
- ret = as3722_current_to_index(min_uA, max_uA, as3722_ldo_current,
- ARRAY_SIZE(as3722_ldo_current));
- if (ret < 0) {
- dev_err(as3722_regs->dev,
- "Current range min:max = %d:%d does not support\n",
- min_uA, max_uA);
- return ret;
- }
- if (ret)
- reg = AS3722_LDO_ILIMIT_BIT;
- return as3722_update_bits(as3722, as3722_reg_lookup[id].vsel_reg,
- AS3722_LDO_ILIMIT_MASK, reg);
-}
+static const unsigned int as3722_ldo_current[] = { 150000, 300000 };
+static const unsigned int as3722_sd016_current[] = {
+ 2500000, 3000000, 3500000
+};
static const struct regulator_ops as3722_ldo0_ops = {
.is_enabled = regulator_is_enabled_regmap,
@@ -379,16 +325,16 @@ static const struct regulator_ops as3722_ldo0_ops = {
.list_voltage = regulator_list_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
- .get_current_limit = as3722_ldo_get_current_limit,
- .set_current_limit = as3722_ldo_set_current_limit,
+ .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
};
static const struct regulator_ops as3722_ldo0_extcntrl_ops = {
.list_voltage = regulator_list_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
- .get_current_limit = as3722_ldo_get_current_limit,
- .set_current_limit = as3722_ldo_set_current_limit,
+ .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
};
static int as3722_ldo3_set_tracking_mode(struct as3722_regulators *as3722_reg,
@@ -440,8 +386,8 @@ static const struct regulator_ops as3722_ldo6_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear_range,
- .get_current_limit = as3722_ldo_get_current_limit,
- .set_current_limit = as3722_ldo_set_current_limit,
+ .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
.get_bypass = regulator_get_bypass_regmap,
.set_bypass = regulator_set_bypass_regmap,
};
@@ -451,8 +397,8 @@ static const struct regulator_ops as3722_ldo6_extcntrl_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear_range,
- .get_current_limit = as3722_ldo_get_current_limit,
- .set_current_limit = as3722_ldo_set_current_limit,
+ .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
.get_bypass = regulator_get_bypass_regmap,
.set_bypass = regulator_set_bypass_regmap,
};
@@ -471,8 +417,8 @@ static const struct regulator_ops as3722_ldo_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear_range,
- .get_current_limit = as3722_ldo_get_current_limit,
- .set_current_limit = as3722_ldo_set_current_limit,
+ .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
};
static const struct regulator_ops as3722_ldo_extcntrl_ops = {
@@ -480,8 +426,8 @@ static const struct regulator_ops as3722_ldo_extcntrl_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear_range,
- .get_current_limit = as3722_ldo_get_current_limit,
- .set_current_limit = as3722_ldo_set_current_limit,
+ .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
};
static unsigned int as3722_sd_get_mode(struct regulator_dev *rdev)
@@ -539,85 +485,6 @@ static int as3722_sd_set_mode(struct regulator_dev *rdev,
return ret;
}
-static int as3722_sd016_get_current_limit(struct regulator_dev *rdev)
-{
- struct as3722_regulators *as3722_regs = rdev_get_drvdata(rdev);
- struct as3722 *as3722 = as3722_regs->as3722;
- int id = rdev_get_id(rdev);
- u32 val, reg;
- int mask;
- int ret;
-
- switch (id) {
- case AS3722_REGULATOR_ID_SD0:
- reg = AS3722_OVCURRENT_REG;
- mask = AS3722_OVCURRENT_SD0_TRIP_MASK;
- break;
- case AS3722_REGULATOR_ID_SD1:
- reg = AS3722_OVCURRENT_REG;
- mask = AS3722_OVCURRENT_SD1_TRIP_MASK;
- break;
- case AS3722_REGULATOR_ID_SD6:
- reg = AS3722_OVCURRENT_DEB_REG;
- mask = AS3722_OVCURRENT_SD6_TRIP_MASK;
- break;
- default:
- return -EINVAL;
- }
- ret = as3722_read(as3722, reg, &val);
- if (ret < 0) {
- dev_err(as3722_regs->dev, "Reg 0x%02x read failed: %d\n",
- reg, ret);
- return ret;
- }
- val &= mask;
- val >>= ffs(mask) - 1;
- if (val == 3)
- return -EINVAL;
- return as3722_sd016_current[val];
-}
-
-static int as3722_sd016_set_current_limit(struct regulator_dev *rdev,
- int min_uA, int max_uA)
-{
- struct as3722_regulators *as3722_regs = rdev_get_drvdata(rdev);
- struct as3722 *as3722 = as3722_regs->as3722;
- int id = rdev_get_id(rdev);
- int ret;
- int val;
- int mask;
- u32 reg;
-
- ret = as3722_current_to_index(min_uA, max_uA, as3722_sd016_current,
- ARRAY_SIZE(as3722_sd016_current));
- if (ret < 0) {
- dev_err(as3722_regs->dev,
- "Current range min:max = %d:%d does not support\n",
- min_uA, max_uA);
- return ret;
- }
-
- switch (id) {
- case AS3722_REGULATOR_ID_SD0:
- reg = AS3722_OVCURRENT_REG;
- mask = AS3722_OVCURRENT_SD0_TRIP_MASK;
- break;
- case AS3722_REGULATOR_ID_SD1:
- reg = AS3722_OVCURRENT_REG;
- mask = AS3722_OVCURRENT_SD1_TRIP_MASK;
- break;
- case AS3722_REGULATOR_ID_SD6:
- reg = AS3722_OVCURRENT_DEB_REG;
- mask = AS3722_OVCURRENT_SD6_TRIP_MASK;
- break;
- default:
- return -EINVAL;
- }
- ret <<= ffs(mask) - 1;
- val = ret & mask;
- return as3722_update_bits(as3722, reg, mask, val);
-}
-
static bool as3722_sd0_is_low_voltage(struct as3722_regulators *as3722_regs)
{
int err;
@@ -649,8 +516,8 @@ static const struct regulator_ops as3722_sd016_ops = {
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
- .get_current_limit = as3722_sd016_get_current_limit,
- .set_current_limit = as3722_sd016_set_current_limit,
+ .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
.get_mode = as3722_sd_get_mode,
.set_mode = as3722_sd_set_mode,
};
@@ -660,8 +527,8 @@ static const struct regulator_ops as3722_sd016_extcntrl_ops = {
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
- .get_current_limit = as3722_sd016_get_current_limit,
- .set_current_limit = as3722_sd016_set_current_limit,
+ .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
.get_mode = as3722_sd_get_mode,
.set_mode = as3722_sd_set_mode,
};
@@ -807,42 +674,45 @@ static int as3722_regulator_probe(struct platform_device *pdev)
config.regmap = as3722->regmap;
for (id = 0; id < AS3722_REGULATOR_ID_MAX; id++) {
+ struct regulator_desc *desc;
+
+ desc = &as3722_regs->desc[id];
reg_config = &as3722_regs->reg_config_data[id];
- as3722_regs->desc[id].name = as3722_reg_lookup[id].name;
- as3722_regs->desc[id].supply_name = as3722_reg_lookup[id].sname;
- as3722_regs->desc[id].id = as3722_reg_lookup[id].regulator_id;
- as3722_regs->desc[id].n_voltages =
- as3722_reg_lookup[id].n_voltages;
- as3722_regs->desc[id].type = REGULATOR_VOLTAGE;
- as3722_regs->desc[id].owner = THIS_MODULE;
- as3722_regs->desc[id].enable_reg =
- as3722_reg_lookup[id].enable_reg;
- as3722_regs->desc[id].enable_mask =
- as3722_reg_lookup[id].enable_mask;
- as3722_regs->desc[id].vsel_reg = as3722_reg_lookup[id].vsel_reg;
- as3722_regs->desc[id].vsel_mask =
- as3722_reg_lookup[id].vsel_mask;
+ desc->name = as3722_reg_lookup[id].name;
+ desc->supply_name = as3722_reg_lookup[id].sname;
+ desc->id = as3722_reg_lookup[id].regulator_id;
+ desc->n_voltages = as3722_reg_lookup[id].n_voltages;
+ desc->type = REGULATOR_VOLTAGE;
+ desc->owner = THIS_MODULE;
+ desc->enable_reg = as3722_reg_lookup[id].enable_reg;
+ desc->enable_mask = as3722_reg_lookup[id].enable_mask;
+ desc->vsel_reg = as3722_reg_lookup[id].vsel_reg;
+ desc->vsel_mask = as3722_reg_lookup[id].vsel_mask;
switch (id) {
case AS3722_REGULATOR_ID_LDO0:
if (reg_config->ext_control)
ops = &as3722_ldo0_extcntrl_ops;
else
ops = &as3722_ldo0_ops;
- as3722_regs->desc[id].min_uV = 825000;
- as3722_regs->desc[id].uV_step = 25000;
- as3722_regs->desc[id].linear_min_sel = 1;
- as3722_regs->desc[id].enable_time = 500;
+ desc->min_uV = 825000;
+ desc->uV_step = 25000;
+ desc->linear_min_sel = 1;
+ desc->enable_time = 500;
+ desc->curr_table = as3722_ldo_current;
+ desc->n_current_limits = ARRAY_SIZE(as3722_ldo_current);
+ desc->csel_reg = as3722_reg_lookup[id].vsel_reg;
+ desc->csel_mask = AS3722_LDO_ILIMIT_MASK;
break;
case AS3722_REGULATOR_ID_LDO3:
if (reg_config->ext_control)
ops = &as3722_ldo3_extcntrl_ops;
else
ops = &as3722_ldo3_ops;
- as3722_regs->desc[id].min_uV = 620000;
- as3722_regs->desc[id].uV_step = 20000;
- as3722_regs->desc[id].linear_min_sel = 1;
- as3722_regs->desc[id].enable_time = 500;
+ desc->min_uV = 620000;
+ desc->uV_step = 20000;
+ desc->linear_min_sel = 1;
+ desc->enable_time = 500;
if (reg_config->enable_tracking) {
ret = as3722_ldo3_set_tracking_mode(as3722_regs,
id, AS3722_LDO3_MODE_PMOS_TRACKING);
@@ -859,18 +729,17 @@ static int as3722_regulator_probe(struct platform_device *pdev)
ops = &as3722_ldo6_extcntrl_ops;
else
ops = &as3722_ldo6_ops;
- as3722_regs->desc[id].enable_time = 500;
- as3722_regs->desc[id].bypass_reg =
- AS3722_LDO6_VOLTAGE_REG;
- as3722_regs->desc[id].bypass_mask =
- AS3722_LDO_VSEL_MASK;
- as3722_regs->desc[id].bypass_val_on =
- AS3722_LDO6_VSEL_BYPASS;
- as3722_regs->desc[id].bypass_val_off =
- AS3722_LDO6_VSEL_BYPASS;
- as3722_regs->desc[id].linear_ranges = as3722_ldo_ranges;
- as3722_regs->desc[id].n_linear_ranges =
- ARRAY_SIZE(as3722_ldo_ranges);
+ desc->enable_time = 500;
+ desc->bypass_reg = AS3722_LDO6_VOLTAGE_REG;
+ desc->bypass_mask = AS3722_LDO_VSEL_MASK;
+ desc->bypass_val_on = AS3722_LDO6_VSEL_BYPASS;
+ desc->bypass_val_off = AS3722_LDO6_VSEL_BYPASS;
+ desc->linear_ranges = as3722_ldo_ranges;
+ desc->n_linear_ranges = ARRAY_SIZE(as3722_ldo_ranges);
+ desc->curr_table = as3722_ldo_current;
+ desc->n_current_limits = ARRAY_SIZE(as3722_ldo_current);
+ desc->csel_reg = as3722_reg_lookup[id].vsel_reg;
+ desc->csel_mask = AS3722_LDO_ILIMIT_MASK;
break;
case AS3722_REGULATOR_ID_SD0:
case AS3722_REGULATOR_ID_SD1:
@@ -889,9 +758,25 @@ static int as3722_regulator_probe(struct platform_device *pdev)
AS3722_SD0_VSEL_MAX + 1;
as3722_regs->desc[id].min_uV = 610000;
}
- as3722_regs->desc[id].uV_step = 10000;
- as3722_regs->desc[id].linear_min_sel = 1;
- as3722_regs->desc[id].enable_time = 600;
+ desc->uV_step = 10000;
+ desc->linear_min_sel = 1;
+ desc->enable_time = 600;
+ desc->curr_table = as3722_sd016_current;
+ desc->n_current_limits =
+ ARRAY_SIZE(as3722_sd016_current);
+ if (id == AS3722_REGULATOR_ID_SD0) {
+ desc->csel_reg = AS3722_OVCURRENT_REG;
+ desc->csel_mask =
+ AS3722_OVCURRENT_SD0_TRIP_MASK;
+ } else if (id == AS3722_REGULATOR_ID_SD1) {
+ desc->csel_reg = AS3722_OVCURRENT_REG;
+ desc->csel_mask =
+ AS3722_OVCURRENT_SD1_TRIP_MASK;
+ } else if (id == AS3722_REGULATOR_ID_SD6) {
+ desc->csel_reg = AS3722_OVCURRENT_DEB_REG;
+ desc->csel_mask =
+ AS3722_OVCURRENT_SD6_TRIP_MASK;
+ }
break;
case AS3722_REGULATOR_ID_SD2:
case AS3722_REGULATOR_ID_SD3:
@@ -901,9 +786,8 @@ static int as3722_regulator_probe(struct platform_device *pdev)
ops = &as3722_sd2345_extcntrl_ops;
else
ops = &as3722_sd2345_ops;
- as3722_regs->desc[id].linear_ranges =
- as3722_sd2345_ranges;
- as3722_regs->desc[id].n_linear_ranges =
+ desc->linear_ranges = as3722_sd2345_ranges;
+ desc->n_linear_ranges =
ARRAY_SIZE(as3722_sd2345_ranges);
break;
default:
@@ -911,17 +795,19 @@ static int as3722_regulator_probe(struct platform_device *pdev)
ops = &as3722_ldo_extcntrl_ops;
else
ops = &as3722_ldo_ops;
- as3722_regs->desc[id].enable_time = 500;
- as3722_regs->desc[id].linear_ranges = as3722_ldo_ranges;
- as3722_regs->desc[id].n_linear_ranges =
- ARRAY_SIZE(as3722_ldo_ranges);
+ desc->enable_time = 500;
+ desc->linear_ranges = as3722_ldo_ranges;
+ desc->n_linear_ranges = ARRAY_SIZE(as3722_ldo_ranges);
+ desc->curr_table = as3722_ldo_current;
+ desc->n_current_limits = ARRAY_SIZE(as3722_ldo_current);
+ desc->csel_reg = as3722_reg_lookup[id].vsel_reg;
+ desc->csel_mask = AS3722_LDO_ILIMIT_MASK;
break;
}
- as3722_regs->desc[id].ops = ops;
+ desc->ops = ops;
config.init_data = reg_config->reg_init;
config.of_node = as3722_regulator_matches[id].of_node;
- rdev = devm_regulator_register(&pdev->dev,
- &as3722_regs->desc[id], &config);
+ rdev = devm_regulator_register(&pdev->dev, desc, &config);
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
dev_err(&pdev->dev, "regulator %d register failed %d\n",
@@ -929,7 +815,6 @@ static int as3722_regulator_probe(struct platform_device *pdev)
return ret;
}
- as3722_regs->rdevs[id] = rdev;
if (reg_config->ext_control) {
ret = regulator_enable_regmap(rdev);
if (ret < 0) {
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index fba8f58ab769..152053361862 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -367,16 +367,14 @@ static const int axp209_dcdc2_ldo3_slew_rates[] = {
static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp)
{
struct axp20x_dev *axp20x = rdev_get_drvdata(rdev);
- const struct regulator_desc *desc;
+ int id = rdev_get_id(rdev);
u8 reg, mask, enable, cfg = 0xff;
const int *slew_rates;
int rate_count = 0;
- desc = rdev->desc;
-
switch (axp20x->variant) {
case AXP209_ID:
- if (desc->id == AXP20X_DCDC2) {
+ if (id == AXP20X_DCDC2) {
slew_rates = axp209_dcdc2_ldo3_slew_rates;
rate_count = ARRAY_SIZE(axp209_dcdc2_ldo3_slew_rates);
reg = AXP20X_DCDC2_LDO3_V_RAMP;
@@ -388,7 +386,7 @@ static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp)
break;
}
- if (desc->id == AXP20X_LDO3) {
+ if (id == AXP20X_LDO3) {
slew_rates = axp209_dcdc2_ldo3_slew_rates;
rate_count = ARRAY_SIZE(axp209_dcdc2_ldo3_slew_rates);
reg = AXP20X_DCDC2_LDO3_V_RAMP;
@@ -435,16 +433,11 @@ static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp)
static int axp20x_regulator_enable_regmap(struct regulator_dev *rdev)
{
struct axp20x_dev *axp20x = rdev_get_drvdata(rdev);
- const struct regulator_desc *desc;
-
- if (!rdev)
- return -EINVAL;
-
- desc = rdev->desc;
+ int id = rdev_get_id(rdev);
switch (axp20x->variant) {
case AXP209_ID:
- if ((desc->id == AXP20X_LDO3) &&
+ if ((id == AXP20X_LDO3) &&
rdev->constraints && rdev->constraints->soft_start) {
int v_out;
int ret;
@@ -1028,7 +1021,7 @@ static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
* (See include/linux/mfd/axp20x.h)
*/
reg = AXP803_DCDC_FREQ_CTRL;
- /* Fall through to the check below.*/
+ /* Fall through - to the check below.*/
case AXP806_ID:
/*
* AXP806 also have DCDC work frequency setting register at a
@@ -1119,12 +1112,12 @@ static int axp20x_set_dcdc_workmode(struct regulator_dev *rdev, int id, u32 work
break;
case AXP806_ID:
- reg = AXP806_DCDC_MODE_CTRL2;
/*
* AXP806 DCDC regulator IDs have the same range as AXP22X.
- * Fall through to the check below.
* (See include/linux/mfd/axp20x.h)
*/
+ reg = AXP806_DCDC_MODE_CTRL2;
+ /* Fall through - to the check below. */
case AXP221_ID:
case AXP223_ID:
case AXP809_ID:
diff --git a/drivers/regulator/bcm590xx-regulator.c b/drivers/regulator/bcm590xx-regulator.c
index e49c0a7d5dd5..85ccc93b2bb6 100644
--- a/drivers/regulator/bcm590xx-regulator.c
+++ b/drivers/regulator/bcm590xx-regulator.c
@@ -103,10 +103,6 @@
((n > BCM590XX_REG_VSR) && (n < BCM590XX_REG_VBUS))
#define BCM590XX_REG_IS_VBUS(n) (n == BCM590XX_REG_VBUS)
-struct bcm590xx_board {
- struct regulator_init_data *bcm590xx_pmu_init_data[BCM590XX_NUM_REGS];
-};
-
/* LDO group A: supported voltages in microvolts */
static const unsigned int ldo_a_table[] = {
1200000, 1800000, 2500000, 2700000, 2800000,
@@ -280,105 +276,15 @@ static const struct regulator_ops bcm590xx_ops_vbus = {
.disable = regulator_disable_regmap,
};
-#define BCM590XX_MATCH(_name, _id) \
- { \
- .name = #_name, \
- .driver_data = (void *)&bcm590xx_regs[BCM590XX_REG_##_id], \
- }
-
-static struct of_regulator_match bcm590xx_matches[] = {
- BCM590XX_MATCH(rfldo, RFLDO),
- BCM590XX_MATCH(camldo1, CAMLDO1),
- BCM590XX_MATCH(camldo2, CAMLDO2),
- BCM590XX_MATCH(simldo1, SIMLDO1),
- BCM590XX_MATCH(simldo2, SIMLDO2),
- BCM590XX_MATCH(sdldo, SDLDO),
- BCM590XX_MATCH(sdxldo, SDXLDO),
- BCM590XX_MATCH(mmcldo1, MMCLDO1),
- BCM590XX_MATCH(mmcldo2, MMCLDO2),
- BCM590XX_MATCH(audldo, AUDLDO),
- BCM590XX_MATCH(micldo, MICLDO),
- BCM590XX_MATCH(usbldo, USBLDO),
- BCM590XX_MATCH(vibldo, VIBLDO),
- BCM590XX_MATCH(csr, CSR),
- BCM590XX_MATCH(iosr1, IOSR1),
- BCM590XX_MATCH(iosr2, IOSR2),
- BCM590XX_MATCH(msr, MSR),
- BCM590XX_MATCH(sdsr1, SDSR1),
- BCM590XX_MATCH(sdsr2, SDSR2),
- BCM590XX_MATCH(vsr, VSR),
- BCM590XX_MATCH(gpldo1, GPLDO1),
- BCM590XX_MATCH(gpldo2, GPLDO2),
- BCM590XX_MATCH(gpldo3, GPLDO3),
- BCM590XX_MATCH(gpldo4, GPLDO4),
- BCM590XX_MATCH(gpldo5, GPLDO5),
- BCM590XX_MATCH(gpldo6, GPLDO6),
- BCM590XX_MATCH(vbus, VBUS),
-};
-
-static struct bcm590xx_board *bcm590xx_parse_dt_reg_data(
- struct platform_device *pdev,
- struct of_regulator_match **bcm590xx_reg_matches)
-{
- struct bcm590xx_board *data;
- struct device_node *np = pdev->dev.parent->of_node;
- struct device_node *regulators;
- struct of_regulator_match *matches = bcm590xx_matches;
- int count = ARRAY_SIZE(bcm590xx_matches);
- int idx = 0;
- int ret;
-
- if (!np) {
- dev_err(&pdev->dev, "of node not found\n");
- return NULL;
- }
-
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return NULL;
-
- np = of_node_get(np);
- regulators = of_get_child_by_name(np, "regulators");
- if (!regulators) {
- dev_warn(&pdev->dev, "regulator node not found\n");
- return NULL;
- }
-
- ret = of_regulator_match(&pdev->dev, regulators, matches, count);
- of_node_put(regulators);
- if (ret < 0) {
- dev_err(&pdev->dev, "Error parsing regulator init data: %d\n",
- ret);
- return NULL;
- }
-
- *bcm590xx_reg_matches = matches;
-
- for (idx = 0; idx < count; idx++) {
- if (!matches[idx].init_data || !matches[idx].of_node)
- continue;
-
- data->bcm590xx_pmu_init_data[idx] = matches[idx].init_data;
- }
-
- return data;
-}
-
static int bcm590xx_probe(struct platform_device *pdev)
{
struct bcm590xx *bcm590xx = dev_get_drvdata(pdev->dev.parent);
- struct bcm590xx_board *pmu_data = NULL;
struct bcm590xx_reg *pmu;
struct regulator_config config = { };
struct bcm590xx_info *info;
- struct regulator_init_data *reg_data;
struct regulator_dev *rdev;
- struct of_regulator_match *bcm590xx_reg_matches = NULL;
int i;
- pmu_data = bcm590xx_parse_dt_reg_data(pdev,
- &bcm590xx_reg_matches);
-
pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
if (!pmu)
return -ENOMEM;
@@ -397,13 +303,10 @@ static int bcm590xx_probe(struct platform_device *pdev)
info = bcm590xx_regs;
for (i = 0; i < BCM590XX_NUM_REGS; i++, info++) {
- if (pmu_data)
- reg_data = pmu_data->bcm590xx_pmu_init_data[i];
- else
- reg_data = NULL;
-
/* Register the regulators */
pmu->desc[i].name = info->name;
+ pmu->desc[i].of_match = of_match_ptr(info->name);
+ pmu->desc[i].regulators_node = of_match_ptr("regulators");
pmu->desc[i].supply_name = info->vin_name;
pmu->desc[i].id = i;
pmu->desc[i].volt_table = info->volt_table;
@@ -433,16 +336,12 @@ static int bcm590xx_probe(struct platform_device *pdev)
pmu->desc[i].owner = THIS_MODULE;
config.dev = bcm590xx->dev;
- config.init_data = reg_data;
config.driver_data = pmu;
if (BCM590XX_REG_IS_GPLDO(i) || BCM590XX_REG_IS_VBUS(i))
config.regmap = bcm590xx->regmap_sec;
else
config.regmap = bcm590xx->regmap_pri;
- if (bcm590xx_reg_matches)
- config.of_node = bcm590xx_reg_matches[i].of_node;
-
rdev = devm_regulator_register(&pdev->dev, &pmu->desc[i],
&config);
if (IS_ERR(rdev)) {
diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c
index b2191be49670..fde4264da6ff 100644
--- a/drivers/regulator/bd718x7-regulator.c
+++ b/drivers/regulator/bd718x7-regulator.c
@@ -27,8 +27,8 @@
static int bd718xx_buck1234_set_ramp_delay(struct regulator_dev *rdev,
int ramp_delay)
{
- int id = rdev->desc->id;
- unsigned int ramp_value = BUCK_RAMPRATE_10P00MV;
+ int id = rdev_get_id(rdev);
+ unsigned int ramp_value;
dev_dbg(&rdev->dev, "Buck[%d] Set Ramp = %d\n", id + 1,
ramp_delay);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 68473d0cc57e..955a0a15b9cb 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1339,9 +1339,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
* We'll only apply the initial system load if an
* initial mode wasn't specified.
*/
- regulator_lock(rdev);
drms_uA_update(rdev);
- regulator_unlock(rdev);
}
if ((rdev->constraints->ramp_delay || rdev->constraints->ramp_disable)
@@ -2256,6 +2254,7 @@ static void regulator_ena_gpio_free(struct regulator_dev *rdev)
if (pin->gpiod == rdev->ena_pin->gpiod) {
if (pin->request_count <= 1) {
pin->request_count = 0;
+ gpiod_put(pin->gpiod);
list_del(&pin->list);
kfree(pin);
rdev->ena_pin = NULL;
@@ -3004,7 +3003,7 @@ EXPORT_SYMBOL_GPL(regulator_get_linear_step);
* @min_uV: Minimum required voltage in uV.
* @max_uV: Maximum required voltage in uV.
*
- * Returns a boolean or a negative error code.
+ * Returns a boolean.
*/
int regulator_is_supported_voltage(struct regulator *regulator,
int min_uV, int max_uV)
@@ -3028,7 +3027,7 @@ int regulator_is_supported_voltage(struct regulator *regulator,
ret = regulator_count_voltages(regulator);
if (ret < 0)
- return ret;
+ return 0;
voltages = ret;
for (i = 0; i < voltages; i++) {
@@ -3322,15 +3321,12 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
/* for not coupled regulators this will just set the voltage */
ret = regulator_balance_voltage(rdev, state);
- if (ret < 0)
- goto out2;
+ if (ret < 0) {
+ voltage->min_uV = old_min_uV;
+ voltage->max_uV = old_max_uV;
+ }
out:
- return 0;
-out2:
- voltage->min_uV = old_min_uV;
- voltage->max_uV = old_max_uV;
-
return ret;
}
@@ -4347,8 +4343,6 @@ int regulator_bulk_get(struct device *dev, int num_consumers,
consumers[i].supply);
if (IS_ERR(consumers[i].consumer)) {
ret = PTR_ERR(consumers[i].consumer);
- dev_err(dev, "Failed to get supply '%s': %d\n",
- consumers[i].supply, ret);
consumers[i].consumer = NULL;
goto err;
}
@@ -4357,6 +4351,13 @@ int regulator_bulk_get(struct device *dev, int num_consumers,
return 0;
err:
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get supply '%s': %d\n",
+ consumers[i].supply, ret);
+ else
+ dev_dbg(dev, "Failed to get supply '%s', deferring\n",
+ consumers[i].supply);
+
while (--i >= 0)
regulator_put(consumers[i].consumer);
@@ -5064,10 +5065,11 @@ void regulator_unregister(struct regulator_dev *rdev)
regulator_put(rdev->supply);
}
+ flush_work(&rdev->disable_work.work);
+
mutex_lock(&regulator_list_mutex);
debugfs_remove_recursive(rdev->debugfs);
- flush_work(&rdev->disable_work.work);
WARN_ON(rdev->open_count);
regulator_remove_coupling(rdev);
unset_regulator_supplies(rdev);
diff --git a/drivers/regulator/cpcap-regulator.c b/drivers/regulator/cpcap-regulator.c
index e7dab5c4d1d1..d3284361e594 100644
--- a/drivers/regulator/cpcap-regulator.c
+++ b/drivers/regulator/cpcap-regulator.c
@@ -505,17 +505,12 @@ MODULE_DEVICE_TABLE(of, cpcap_regulator_id_table);
static int cpcap_regulator_probe(struct platform_device *pdev)
{
struct cpcap_ddata *ddata;
- const struct of_device_id *match;
+ const struct cpcap_regulator *match_data;
struct regulator_config config;
- struct regulator_init_data init_data;
int i;
- match = of_match_device(of_match_ptr(cpcap_regulator_id_table),
- &pdev->dev);
- if (!match)
- return -EINVAL;
-
- if (!match->data) {
+ match_data = of_device_get_match_data(&pdev->dev);
+ if (!match_data) {
dev_err(&pdev->dev, "no configuration data found\n");
return -ENODEV;
@@ -530,14 +525,12 @@ static int cpcap_regulator_probe(struct platform_device *pdev)
return -ENODEV;
ddata->dev = &pdev->dev;
- ddata->soc = match->data;
+ ddata->soc = match_data;
platform_set_drvdata(pdev, ddata);
memset(&config, 0, sizeof(config));
- memset(&init_data, 0, sizeof(init_data));
config.dev = &pdev->dev;
config.regmap = ddata->reg;
- config.init_data = &init_data;
for (i = 0; i < CPCAP_NR_REGULATORS; i++) {
const struct cpcap_regulator *regulator = &ddata->soc[i];
diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c
index 33e8f3b8d2bd..5493c3a86426 100644
--- a/drivers/regulator/da903x.c
+++ b/drivers/regulator/da903x.c
@@ -1,13 +1,9 @@
-/*
- * Regulators driver for Dialog Semiconductor DA903x
- *
- * Copyright (C) 2006-2008 Marvell International Ltd.
- * Copyright (C) 2008 Compulab Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Regulators driver for Dialog Semiconductor DA903x
+//
+// Copyright (C) 2006-2008 Marvell International Ltd.
+// Copyright (C) 2008 Compulab Ltd.
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
index cefa3558236d..e18d291c7f21 100644
--- a/drivers/regulator/da9052-regulator.c
+++ b/drivers/regulator/da9052-regulator.c
@@ -1,16 +1,10 @@
-/*
-* da9052-regulator.c: Regulator driver for DA9052
-*
-* Copyright(c) 2011 Dialog Semiconductor Ltd.
-*
-* Author: David Dajun Chen <dchen@diasemi.com>
-*
-* This program is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License as published by
-* the Free Software Foundation; either version 2 of the License, or
-* (at your option) any later version.
-*
-*/
+// SPDX-License-Identifier: GPL-2.0+
+//
+// da9052-regulator.c: Regulator driver for DA9052
+//
+// Copyright(c) 2011 Dialog Semiconductor Ltd.
+//
+// Author: David Dajun Chen <dchen@diasemi.com>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -19,10 +13,8 @@
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
-#ifdef CONFIG_OF
#include <linux/of.h>
#include <linux/regulator/of_regulator.h>
-#endif
#include <linux/mfd/da9052/da9052.h>
#include <linux/mfd/da9052/reg.h>
@@ -294,6 +286,8 @@ static const struct regulator_ops da9052_ldo_ops = {
{\
.reg_desc = {\
.name = #_name,\
+ .of_match = of_match_ptr(#_name),\
+ .regulators_node = of_match_ptr("regulators"),\
.ops = &da9052_ldo_ops,\
.type = REGULATOR_VOLTAGE,\
.id = DA9052_ID_##_id,\
@@ -314,6 +308,8 @@ static const struct regulator_ops da9052_ldo_ops = {
{\
.reg_desc = {\
.name = #_name,\
+ .of_match = of_match_ptr(#_name),\
+ .regulators_node = of_match_ptr("regulators"),\
.ops = &da9052_dcdc_ops,\
.type = REGULATOR_VOLTAGE,\
.id = DA9052_ID_##_id,\
@@ -417,36 +413,11 @@ static int da9052_regulator_probe(struct platform_device *pdev)
return -EINVAL;
}
- config.dev = &pdev->dev;
+ config.dev = da9052->dev;
config.driver_data = regulator;
config.regmap = da9052->regmap;
- if (pdata) {
+ if (pdata)
config.init_data = pdata->regulators[cell->id];
- } else {
-#ifdef CONFIG_OF
- struct device_node *nproot = da9052->dev->of_node;
- struct device_node *np;
-
- if (!nproot)
- return -ENODEV;
-
- nproot = of_get_child_by_name(nproot, "regulators");
- if (!nproot)
- return -ENODEV;
-
- for_each_child_of_node(nproot, np) {
- if (of_node_name_eq(np,
- regulator->info->reg_desc.name)) {
- config.init_data = of_get_regulator_init_data(
- &pdev->dev, np,
- &regulator->info->reg_desc);
- config.of_node = np;
- break;
- }
- }
- of_node_put(nproot);
-#endif
- }
regulator->rdev = devm_regulator_register(&pdev->dev,
&regulator->info->reg_desc,
diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c
index 3c6fac793658..c025ccb1a30a 100644
--- a/drivers/regulator/da9055-regulator.c
+++ b/drivers/regulator/da9055-regulator.c
@@ -1,16 +1,10 @@
-/*
-* Regulator driver for DA9055 PMIC
-*
-* Copyright(c) 2012 Dialog Semiconductor Ltd.
-*
-* Author: David Dajun Chen <dchen@diasemi.com>
-*
-* This program is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License as published by
-* the Free Software Foundation; either version 2 of the License, or
-* (at your option) any later version.
-*
-*/
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Regulator driver for DA9055 PMIC
+//
+// Copyright(c) 2012 Dialog Semiconductor Ltd.
+//
+// Author: David Dajun Chen <dchen@diasemi.com>
#include <linux/module.h>
#include <linux/init.h>
@@ -338,6 +332,8 @@ static const struct regulator_ops da9055_ldo_ops = {
{\
.reg_desc = {\
.name = #_id,\
+ .of_match = of_match_ptr(#_id),\
+ .regulators_node = of_match_ptr("regulators"),\
.ops = &da9055_ldo_ops,\
.type = REGULATOR_VOLTAGE,\
.id = DA9055_ID_##_id,\
@@ -366,6 +362,8 @@ static const struct regulator_ops da9055_ldo_ops = {
{\
.reg_desc = {\
.name = #_id,\
+ .of_match = of_match_ptr(#_id),\
+ .regulators_node = of_match_ptr("regulators"),\
.ops = &da9055_buck_ops,\
.type = REGULATOR_VOLTAGE,\
.id = DA9055_ID_##_id,\
@@ -487,8 +485,10 @@ static irqreturn_t da9055_ldo5_6_oc_irq(int irq, void *data)
{
struct da9055_regulator *regulator = data;
+ regulator_lock(regulator->rdev);
regulator_notifier_call_chain(regulator->rdev,
REGULATOR_EVENT_OVER_CURRENT, NULL);
+ regulator_unlock(regulator->rdev);
return IRQ_HANDLED;
}
@@ -507,59 +507,6 @@ static inline struct da9055_regulator_info *find_regulator_info(int id)
return NULL;
}
-#ifdef CONFIG_OF
-static struct of_regulator_match da9055_reg_matches[] = {
- { .name = "BUCK1", },
- { .name = "BUCK2", },
- { .name = "LDO1", },
- { .name = "LDO2", },
- { .name = "LDO3", },
- { .name = "LDO4", },
- { .name = "LDO5", },
- { .name = "LDO6", },
-};
-
-static int da9055_regulator_dt_init(struct platform_device *pdev,
- struct da9055_regulator *regulator,
- struct regulator_config *config,
- int regid)
-{
- struct device_node *nproot, *np;
- int ret;
-
- nproot = of_node_get(pdev->dev.parent->of_node);
- if (!nproot)
- return -ENODEV;
-
- np = of_get_child_by_name(nproot, "regulators");
- if (!np)
- return -ENODEV;
-
- ret = of_regulator_match(&pdev->dev, np, &da9055_reg_matches[regid], 1);
- of_node_put(nproot);
- if (ret < 0) {
- dev_err(&pdev->dev, "Error matching regulator: %d\n", ret);
- return ret;
- }
-
- config->init_data = da9055_reg_matches[regid].init_data;
- config->of_node = da9055_reg_matches[regid].of_node;
-
- if (!config->of_node)
- return -ENODEV;
-
- return 0;
-}
-#else
-static inline int da9055_regulator_dt_init(struct platform_device *pdev,
- struct da9055_regulator *regulator,
- struct regulator_config *config,
- int regid)
-{
- return -ENODEV;
-}
-#endif /* CONFIG_OF */
-
static int da9055_regulator_probe(struct platform_device *pdev)
{
struct regulator_config config = { };
@@ -580,18 +527,12 @@ static int da9055_regulator_probe(struct platform_device *pdev)
}
regulator->da9055 = da9055;
- config.dev = &pdev->dev;
+ config.dev = da9055->dev;
config.driver_data = regulator;
config.regmap = da9055->regmap;
- if (pdata) {
+ if (pdata)
config.init_data = pdata->regulators[pdev->id];
- } else {
- ret = da9055_regulator_dt_init(pdev, regulator, &config,
- pdev->id);
- if (ret < 0)
- return ret;
- }
ret = da9055_gpio_init(regulator, &config, pdata, pdev->id);
if (ret < 0)
diff --git a/drivers/regulator/da9062-regulator.c b/drivers/regulator/da9062-regulator.c
index b064d8a19d4c..a02e0488410f 100644
--- a/drivers/regulator/da9062-regulator.c
+++ b/drivers/regulator/da9062-regulator.c
@@ -1,17 +1,8 @@
-/*
- * Regulator device driver for DA9061 and DA9062.
- * Copyright (C) 2015-2017 Dialog Semiconductor
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Regulator device driver for DA9061 and DA9062.
+// Copyright (C) 2015-2017 Dialog Semiconductor
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -53,16 +44,12 @@ enum {
/* Regulator capabilities and registers description */
struct da9062_regulator_info {
struct regulator_desc desc;
- /* Current limiting */
- unsigned int n_current_limits;
- const int *current_limits;
/* Main register fields */
struct reg_field mode;
struct reg_field suspend;
struct reg_field sleep;
struct reg_field suspend_sleep;
unsigned int suspend_vsel_reg;
- struct reg_field ilimit;
/* Event detection bit */
struct reg_field oc_event;
};
@@ -78,7 +65,6 @@ struct da9062_regulator {
struct regmap_field *suspend;
struct regmap_field *sleep;
struct regmap_field *suspend_sleep;
- struct regmap_field *ilimit;
};
/* Encapsulates all information for the regulators driver */
@@ -104,7 +90,7 @@ enum {
* - DA9062_ID_[BUCK1|BUCK2|BUCK4]
* Entry indexes corresponds to register values.
*/
-static const int da9062_buck_a_limits[] = {
+static const unsigned int da9062_buck_a_limits[] = {
500000, 600000, 700000, 800000, 900000, 1000000, 1100000, 1200000,
1300000, 1400000, 1500000, 1600000, 1700000, 1800000, 1900000, 2000000
};
@@ -114,44 +100,11 @@ static const int da9062_buck_a_limits[] = {
* - DA9062_ID_BUCK3
* Entry indexes corresponds to register values.
*/
-static const int da9062_buck_b_limits[] = {
+static const unsigned int da9062_buck_b_limits[] = {
1500000, 1600000, 1700000, 1800000, 1900000, 2000000, 2100000, 2200000,
2300000, 2400000, 2500000, 2600000, 2700000, 2800000, 2900000, 3000000
};
-static int da9062_set_current_limit(struct regulator_dev *rdev,
- int min_ua, int max_ua)
-{
- struct da9062_regulator *regl = rdev_get_drvdata(rdev);
- const struct da9062_regulator_info *rinfo = regl->info;
- int n, tval;
-
- for (n = rinfo->n_current_limits - 1; n >= 0; n--) {
- tval = rinfo->current_limits[n];
- if (tval >= min_ua && tval <= max_ua)
- return regmap_field_write(regl->ilimit, n);
- }
-
- return -EINVAL;
-}
-
-static int da9062_get_current_limit(struct regulator_dev *rdev)
-{
- struct da9062_regulator *regl = rdev_get_drvdata(rdev);
- const struct da9062_regulator_info *rinfo = regl->info;
- unsigned int sel;
- int ret;
-
- ret = regmap_field_read(regl->ilimit, &sel);
- if (ret < 0)
- return ret;
-
- if (sel >= rinfo->n_current_limits)
- sel = rinfo->n_current_limits - 1;
-
- return rinfo->current_limits[sel];
-}
-
static int da9062_buck_set_mode(struct regulator_dev *rdev, unsigned mode)
{
struct da9062_regulator *regl = rdev_get_drvdata(rdev);
@@ -395,8 +348,8 @@ static const struct regulator_ops da9062_buck_ops = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
- .set_current_limit = da9062_set_current_limit,
- .get_current_limit = da9062_get_current_limit,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
.set_mode = da9062_buck_set_mode,
.get_mode = da9062_buck_get_mode,
.get_status = da9062_buck_get_status,
@@ -433,8 +386,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
.desc.min_uV = (300) * 1000,
.desc.uV_step = (10) * 1000,
.desc.n_voltages = ((1570) - (300))/(10) + 1,
- .current_limits = da9062_buck_a_limits,
- .n_current_limits = ARRAY_SIZE(da9062_buck_a_limits),
+ .desc.curr_table = da9062_buck_a_limits,
+ .desc.n_current_limits = ARRAY_SIZE(da9062_buck_a_limits),
+ .desc.csel_reg = DA9062AA_BUCK_ILIM_C,
+ .desc.csel_mask = DA9062AA_BUCK1_ILIM_MASK,
.desc.enable_reg = DA9062AA_BUCK1_CONT,
.desc.enable_mask = DA9062AA_BUCK1_EN_MASK,
.desc.vsel_reg = DA9062AA_VBUCK1_A,
@@ -457,10 +412,6 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
__builtin_ffs((int)DA9062AA_VBUCK1_SEL_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_VBUCK1_SEL_MASK)) - 1),
- .ilimit = REG_FIELD(DA9062AA_BUCK_ILIM_C,
- __builtin_ffs((int)DA9062AA_BUCK1_ILIM_MASK) - 1,
- sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_BUCK1_ILIM_MASK)) - 1),
},
{
.desc.id = DA9061_ID_BUCK2,
@@ -471,8 +422,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
.desc.min_uV = (800) * 1000,
.desc.uV_step = (20) * 1000,
.desc.n_voltages = ((3340) - (800))/(20) + 1,
- .current_limits = da9062_buck_b_limits,
- .n_current_limits = ARRAY_SIZE(da9062_buck_b_limits),
+ .desc.curr_table = da9062_buck_b_limits,
+ .desc.n_current_limits = ARRAY_SIZE(da9062_buck_b_limits),
+ .desc.csel_reg = DA9062AA_BUCK_ILIM_A,
+ .desc.csel_mask = DA9062AA_BUCK3_ILIM_MASK,
.desc.enable_reg = DA9062AA_BUCK3_CONT,
.desc.enable_mask = DA9062AA_BUCK3_EN_MASK,
.desc.vsel_reg = DA9062AA_VBUCK3_A,
@@ -495,10 +448,6 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
__builtin_ffs((int)DA9062AA_VBUCK3_SEL_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_VBUCK3_SEL_MASK)) - 1),
- .ilimit = REG_FIELD(DA9062AA_BUCK_ILIM_A,
- __builtin_ffs((int)DA9062AA_BUCK3_ILIM_MASK) - 1,
- sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_BUCK3_ILIM_MASK)) - 1),
},
{
.desc.id = DA9061_ID_BUCK3,
@@ -509,8 +458,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
.desc.min_uV = (530) * 1000,
.desc.uV_step = (10) * 1000,
.desc.n_voltages = ((1800) - (530))/(10) + 1,
- .current_limits = da9062_buck_a_limits,
- .n_current_limits = ARRAY_SIZE(da9062_buck_a_limits),
+ .desc.curr_table = da9062_buck_a_limits,
+ .desc.n_current_limits = ARRAY_SIZE(da9062_buck_a_limits),
+ .desc.csel_reg = DA9062AA_BUCK_ILIM_B,
+ .desc.csel_mask = DA9062AA_BUCK4_ILIM_MASK,
.desc.enable_reg = DA9062AA_BUCK4_CONT,
.desc.enable_mask = DA9062AA_BUCK4_EN_MASK,
.desc.vsel_reg = DA9062AA_VBUCK4_A,
@@ -533,10 +484,6 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
__builtin_ffs((int)DA9062AA_VBUCK4_SEL_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_VBUCK4_SEL_MASK)) - 1),
- .ilimit = REG_FIELD(DA9062AA_BUCK_ILIM_B,
- __builtin_ffs((int)DA9062AA_BUCK4_ILIM_MASK) - 1,
- sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_BUCK4_ILIM_MASK)) - 1),
},
{
.desc.id = DA9061_ID_LDO1,
@@ -679,8 +626,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
.desc.min_uV = (300) * 1000,
.desc.uV_step = (10) * 1000,
.desc.n_voltages = ((1570) - (300))/(10) + 1,
- .current_limits = da9062_buck_a_limits,
- .n_current_limits = ARRAY_SIZE(da9062_buck_a_limits),
+ .desc.curr_table = da9062_buck_a_limits,
+ .desc.n_current_limits = ARRAY_SIZE(da9062_buck_a_limits),
+ .desc.csel_reg = DA9062AA_BUCK_ILIM_C,
+ .desc.csel_mask = DA9062AA_BUCK1_ILIM_MASK,
.desc.enable_reg = DA9062AA_BUCK1_CONT,
.desc.enable_mask = DA9062AA_BUCK1_EN_MASK,
.desc.vsel_reg = DA9062AA_VBUCK1_A,
@@ -703,10 +652,6 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
__builtin_ffs((int)DA9062AA_VBUCK1_SEL_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_VBUCK1_SEL_MASK)) - 1),
- .ilimit = REG_FIELD(DA9062AA_BUCK_ILIM_C,
- __builtin_ffs((int)DA9062AA_BUCK1_ILIM_MASK) - 1,
- sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_BUCK1_ILIM_MASK)) - 1),
},
{
.desc.id = DA9062_ID_BUCK2,
@@ -717,8 +662,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
.desc.min_uV = (300) * 1000,
.desc.uV_step = (10) * 1000,
.desc.n_voltages = ((1570) - (300))/(10) + 1,
- .current_limits = da9062_buck_a_limits,
- .n_current_limits = ARRAY_SIZE(da9062_buck_a_limits),
+ .desc.curr_table = da9062_buck_a_limits,
+ .desc.n_current_limits = ARRAY_SIZE(da9062_buck_a_limits),
+ .desc.csel_reg = DA9062AA_BUCK_ILIM_C,
+ .desc.csel_mask = DA9062AA_BUCK2_ILIM_MASK,
.desc.enable_reg = DA9062AA_BUCK2_CONT,
.desc.enable_mask = DA9062AA_BUCK2_EN_MASK,
.desc.vsel_reg = DA9062AA_VBUCK2_A,
@@ -741,10 +688,6 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
__builtin_ffs((int)DA9062AA_VBUCK2_SEL_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_VBUCK2_SEL_MASK)) - 1),
- .ilimit = REG_FIELD(DA9062AA_BUCK_ILIM_C,
- __builtin_ffs((int)DA9062AA_BUCK2_ILIM_MASK) - 1,
- sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_BUCK2_ILIM_MASK)) - 1),
},
{
.desc.id = DA9062_ID_BUCK3,
@@ -755,8 +698,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
.desc.min_uV = (800) * 1000,
.desc.uV_step = (20) * 1000,
.desc.n_voltages = ((3340) - (800))/(20) + 1,
- .current_limits = da9062_buck_b_limits,
- .n_current_limits = ARRAY_SIZE(da9062_buck_b_limits),
+ .desc.curr_table = da9062_buck_b_limits,
+ .desc.n_current_limits = ARRAY_SIZE(da9062_buck_b_limits),
+ .desc.csel_reg = DA9062AA_BUCK_ILIM_A,
+ .desc.csel_mask = DA9062AA_BUCK3_ILIM_MASK,
.desc.enable_reg = DA9062AA_BUCK3_CONT,
.desc.enable_mask = DA9062AA_BUCK3_EN_MASK,
.desc.vsel_reg = DA9062AA_VBUCK3_A,
@@ -779,10 +724,6 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
__builtin_ffs((int)DA9062AA_VBUCK3_SEL_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_VBUCK3_SEL_MASK)) - 1),
- .ilimit = REG_FIELD(DA9062AA_BUCK_ILIM_A,
- __builtin_ffs((int)DA9062AA_BUCK3_ILIM_MASK) - 1,
- sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_BUCK3_ILIM_MASK)) - 1),
},
{
.desc.id = DA9062_ID_BUCK4,
@@ -793,8 +734,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
.desc.min_uV = (530) * 1000,
.desc.uV_step = (10) * 1000,
.desc.n_voltages = ((1800) - (530))/(10) + 1,
- .current_limits = da9062_buck_a_limits,
- .n_current_limits = ARRAY_SIZE(da9062_buck_a_limits),
+ .desc.curr_table = da9062_buck_a_limits,
+ .desc.n_current_limits = ARRAY_SIZE(da9062_buck_a_limits),
+ .desc.csel_reg = DA9062AA_BUCK_ILIM_B,
+ .desc.csel_mask = DA9062AA_BUCK4_ILIM_MASK,
.desc.enable_reg = DA9062AA_BUCK4_CONT,
.desc.enable_mask = DA9062AA_BUCK4_EN_MASK,
.desc.vsel_reg = DA9062AA_VBUCK4_A,
@@ -817,10 +760,6 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
__builtin_ffs((int)DA9062AA_VBUCK4_SEL_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_VBUCK4_SEL_MASK)) - 1),
- .ilimit = REG_FIELD(DA9062AA_BUCK_ILIM_B,
- __builtin_ffs((int)DA9062AA_BUCK4_ILIM_MASK) - 1,
- sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_BUCK4_ILIM_MASK)) - 1),
},
{
.desc.id = DA9062_ID_LDO1,
@@ -974,8 +913,10 @@ static irqreturn_t da9062_ldo_lim_event(int irq, void *data)
continue;
if (BIT(regl->info->oc_event.lsb) & bits) {
+ regulator_lock(regl->rdev);
regulator_notifier_call_chain(regl->rdev,
REGULATOR_EVENT_OVER_CURRENT, NULL);
+ regulator_unlock(regl->rdev);
handled = IRQ_HANDLED;
}
}
@@ -1063,15 +1004,6 @@ static int da9062_regulator_probe(struct platform_device *pdev)
return PTR_ERR(regl->suspend_sleep);
}
- if (regl->info->ilimit.reg) {
- regl->ilimit = devm_regmap_field_alloc(
- &pdev->dev,
- chip->regmap,
- regl->info->ilimit);
- if (IS_ERR(regl->ilimit))
- return PTR_ERR(regl->ilimit);
- }
-
/* Register regulator */
memset(&config, 0, sizeof(config));
config.dev = chip->dev;
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
index 2b0c7a85306a..6f9ce1a6e44d 100644
--- a/drivers/regulator/da9063-regulator.c
+++ b/drivers/regulator/da9063-regulator.c
@@ -1,18 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Regulator driver for DA9063 PMIC series
+//
+// Copyright 2012 Dialog Semiconductors Ltd.
+// Copyright 2013 Philipp Zabel, Pengutronix
+//
+// Author: Krystian Garbaciak <krystian.garbaciak@diasemi.com>
-/*
- * Regulator driver for DA9063 PMIC series
- *
- * Copyright 2012 Dialog Semiconductors Ltd.
- * Copyright 2013 Philipp Zabel, Pengutronix
- *
- * Author: Krystian Garbaciak <krystian.garbaciak@diasemi.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -38,17 +32,12 @@
struct da9063_regulator_info {
struct regulator_desc desc;
- /* Current limiting */
- unsigned n_current_limits;
- const int *current_limits;
-
/* DA9063 main register fields */
struct reg_field mode; /* buck mode of operation */
struct reg_field suspend;
struct reg_field sleep;
struct reg_field suspend_sleep;
unsigned int suspend_vsel_reg;
- struct reg_field ilimit;
/* DA9063 event detection bit */
struct reg_field oc_event;
@@ -73,15 +62,18 @@ struct da9063_regulator_info {
.suspend_vsel_reg = DA9063_REG_V##regl_name##_B
/* Macros for voltage DC/DC converters (BUCKs) */
-#define DA9063_BUCK(chip, regl_name, min_mV, step_mV, max_mV, limits_array) \
+#define DA9063_BUCK(chip, regl_name, min_mV, step_mV, max_mV, limits_array, \
+ creg, cmask) \
.desc.id = chip##_ID_##regl_name, \
.desc.name = __stringify(chip##_##regl_name), \
.desc.ops = &da9063_buck_ops, \
.desc.min_uV = (min_mV) * 1000, \
.desc.uV_step = (step_mV) * 1000, \
.desc.n_voltages = ((max_mV) - (min_mV))/(step_mV) + 1, \
- .current_limits = limits_array, \
- .n_current_limits = ARRAY_SIZE(limits_array)
+ .desc.csel_reg = (creg), \
+ .desc.csel_mask = (cmask), \
+ .desc.curr_table = limits_array, \
+ .desc.n_current_limits = ARRAY_SIZE(limits_array)
#define DA9063_BUCK_COMMON_FIELDS(regl_name) \
.desc.enable_reg = DA9063_REG_##regl_name##_CONT, \
@@ -112,7 +104,6 @@ struct da9063_regulator {
struct regmap_field *suspend;
struct regmap_field *sleep;
struct regmap_field *suspend_sleep;
- struct regmap_field *ilimit;
};
/* Encapsulates all information for the regulators driver */
@@ -134,65 +125,32 @@ enum {
/* Current limits array (in uA) for BCORE1, BCORE2, BPRO.
Entry indexes corresponds to register values. */
-static const int da9063_buck_a_limits[] = {
+static const unsigned int da9063_buck_a_limits[] = {
500000, 600000, 700000, 800000, 900000, 1000000, 1100000, 1200000,
1300000, 1400000, 1500000, 1600000, 1700000, 1800000, 1900000, 2000000
};
/* Current limits array (in uA) for BMEM, BIO, BPERI.
Entry indexes corresponds to register values. */
-static const int da9063_buck_b_limits[] = {
+static const unsigned int da9063_buck_b_limits[] = {
1500000, 1600000, 1700000, 1800000, 1900000, 2000000, 2100000, 2200000,
2300000, 2400000, 2500000, 2600000, 2700000, 2800000, 2900000, 3000000
};
/* Current limits array (in uA) for merged BCORE1 and BCORE2.
Entry indexes corresponds to register values. */
-static const int da9063_bcores_merged_limits[] = {
+static const unsigned int da9063_bcores_merged_limits[] = {
1000000, 1200000, 1400000, 1600000, 1800000, 2000000, 2200000, 2400000,
2600000, 2800000, 3000000, 3200000, 3400000, 3600000, 3800000, 4000000
};
/* Current limits array (in uA) for merged BMEM and BIO.
Entry indexes corresponds to register values. */
-static const int da9063_bmem_bio_merged_limits[] = {
+static const unsigned int da9063_bmem_bio_merged_limits[] = {
3000000, 3200000, 3400000, 3600000, 3800000, 4000000, 4200000, 4400000,
4600000, 4800000, 5000000, 5200000, 5400000, 5600000, 5800000, 6000000
};
-static int da9063_set_current_limit(struct regulator_dev *rdev,
- int min_uA, int max_uA)
-{
- struct da9063_regulator *regl = rdev_get_drvdata(rdev);
- const struct da9063_regulator_info *rinfo = regl->info;
- int n, tval;
-
- for (n = rinfo->n_current_limits - 1; n >= 0; n--) {
- tval = rinfo->current_limits[n];
- if (tval >= min_uA && tval <= max_uA)
- return regmap_field_write(regl->ilimit, n);
- }
-
- return -EINVAL;
-}
-
-static int da9063_get_current_limit(struct regulator_dev *rdev)
-{
- struct da9063_regulator *regl = rdev_get_drvdata(rdev);
- const struct da9063_regulator_info *rinfo = regl->info;
- unsigned int sel;
- int ret;
-
- ret = regmap_field_read(regl->ilimit, &sel);
- if (ret < 0)
- return ret;
-
- if (sel >= rinfo->n_current_limits)
- sel = rinfo->n_current_limits - 1;
-
- return rinfo->current_limits[sel];
-}
-
static int da9063_buck_set_mode(struct regulator_dev *rdev, unsigned mode)
{
struct da9063_regulator *regl = rdev_get_drvdata(rdev);
@@ -434,8 +392,8 @@ static const struct regulator_ops da9063_buck_ops = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
- .set_current_limit = da9063_set_current_limit,
- .get_current_limit = da9063_get_current_limit,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
.set_mode = da9063_buck_set_mode,
.get_mode = da9063_buck_get_mode,
.get_status = da9063_buck_get_status,
@@ -465,69 +423,61 @@ static const struct regulator_ops da9063_ldo_ops = {
static const struct da9063_regulator_info da9063_regulator_info[] = {
{
DA9063_BUCK(DA9063, BCORE1, 300, 10, 1570,
- da9063_buck_a_limits),
+ da9063_buck_a_limits,
+ DA9063_REG_BUCK_ILIM_C, DA9063_BCORE1_ILIM_MASK),
DA9063_BUCK_COMMON_FIELDS(BCORE1),
.suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBCORE1_SEL),
- .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_C,
- DA9063_BCORE1_ILIM_MASK),
},
{
DA9063_BUCK(DA9063, BCORE2, 300, 10, 1570,
- da9063_buck_a_limits),
+ da9063_buck_a_limits,
+ DA9063_REG_BUCK_ILIM_C, DA9063_BCORE2_ILIM_MASK),
DA9063_BUCK_COMMON_FIELDS(BCORE2),
.suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBCORE2_SEL),
- .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_C,
- DA9063_BCORE2_ILIM_MASK),
},
{
DA9063_BUCK(DA9063, BPRO, 530, 10, 1800,
- da9063_buck_a_limits),
+ da9063_buck_a_limits,
+ DA9063_REG_BUCK_ILIM_B, DA9063_BPRO_ILIM_MASK),
DA9063_BUCK_COMMON_FIELDS(BPRO),
.suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBPRO_SEL),
- .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_B,
- DA9063_BPRO_ILIM_MASK),
},
{
DA9063_BUCK(DA9063, BMEM, 800, 20, 3340,
- da9063_buck_b_limits),
+ da9063_buck_b_limits,
+ DA9063_REG_BUCK_ILIM_A, DA9063_BMEM_ILIM_MASK),
DA9063_BUCK_COMMON_FIELDS(BMEM),
.suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBMEM_SEL),
- .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_A,
- DA9063_BMEM_ILIM_MASK),
},
{
DA9063_BUCK(DA9063, BIO, 800, 20, 3340,
- da9063_buck_b_limits),
+ da9063_buck_b_limits,
+ DA9063_REG_BUCK_ILIM_A, DA9063_BIO_ILIM_MASK),
DA9063_BUCK_COMMON_FIELDS(BIO),
.suspend = BFIELD(DA9063_REG_DVC_2, DA9063_VBIO_SEL),
- .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_A,
- DA9063_BIO_ILIM_MASK),
},
{
DA9063_BUCK(DA9063, BPERI, 800, 20, 3340,
- da9063_buck_b_limits),
+ da9063_buck_b_limits,
+ DA9063_REG_BUCK_ILIM_B, DA9063_BPERI_ILIM_MASK),
DA9063_BUCK_COMMON_FIELDS(BPERI),
.suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBPERI_SEL),
- .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_B,
- DA9063_BPERI_ILIM_MASK),
},
{
DA9063_BUCK(DA9063, BCORES_MERGED, 300, 10, 1570,
- da9063_bcores_merged_limits),
+ da9063_bcores_merged_limits,
+ DA9063_REG_BUCK_ILIM_C, DA9063_BCORE1_ILIM_MASK),
/* BCORES_MERGED uses the same register fields as BCORE1 */
DA9063_BUCK_COMMON_FIELDS(BCORE1),
.suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBCORE1_SEL),
- .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_C,
- DA9063_BCORE1_ILIM_MASK),
},
{
DA9063_BUCK(DA9063, BMEM_BIO_MERGED, 800, 20, 3340,
- da9063_bmem_bio_merged_limits),
+ da9063_bmem_bio_merged_limits,
+ DA9063_REG_BUCK_ILIM_A, DA9063_BMEM_ILIM_MASK),
/* BMEM_BIO_MERGED uses the same register fields as BMEM */
DA9063_BUCK_COMMON_FIELDS(BMEM),
.suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBMEM_SEL),
- .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_A,
- DA9063_BMEM_ILIM_MASK),
},
{
DA9063_LDO(DA9063, LDO3, 900, 20, 3440),
@@ -615,9 +565,12 @@ static irqreturn_t da9063_ldo_lim_event(int irq, void *data)
if (regl->info->oc_event.reg != DA9063_REG_STATUS_D)
continue;
- if (BIT(regl->info->oc_event.lsb) & bits)
+ if (BIT(regl->info->oc_event.lsb) & bits) {
+ regulator_lock(regl->rdev);
regulator_notifier_call_chain(regl->rdev,
REGULATOR_EVENT_OVER_CURRENT, NULL);
+ regulator_unlock(regl->rdev);
+ }
}
return IRQ_HANDLED;
@@ -861,13 +814,6 @@ static int da9063_regulator_probe(struct platform_device *pdev)
return PTR_ERR(regl->suspend_sleep);
}
- if (regl->info->ilimit.reg) {
- regl->ilimit = devm_regmap_field_alloc(&pdev->dev,
- da9063->regmap, regl->info->ilimit);
- if (IS_ERR(regl->ilimit))
- return PTR_ERR(regl->ilimit);
- }
-
/* Register regulator */
memset(&config, 0, sizeof(config));
config.dev = &pdev->dev;
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c
index 528303771723..f9448ed50e05 100644
--- a/drivers/regulator/da9210-regulator.c
+++ b/drivers/regulator/da9210-regulator.c
@@ -1,22 +1,7 @@
-/*
- * da9210-regulator.c - Regulator device driver for DA9210
- * Copyright (C) 2013 Dialog Semiconductor Ltd.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License along with this library; if not, write to the
- * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
- * Boston, MA 02110-1301, USA.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// da9210-regulator.c - Regulator device driver for DA9210
+// Copyright (C) 2013 Dialog Semiconductor Ltd.
#include <linux/err.h>
#include <linux/i2c.h>
diff --git a/drivers/regulator/da9210-regulator.h b/drivers/regulator/da9210-regulator.h
index 749c550808b6..b1f1a607c208 100644
--- a/drivers/regulator/da9210-regulator.h
+++ b/drivers/regulator/da9210-regulator.h
@@ -1,22 +1,7 @@
-
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* da9210-regulator.h - Regulator definitions for DA9210
* Copyright (C) 2013 Dialog Semiconductor Ltd.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License along with this library; if not, write to the
- * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
- * Boston, MA 02110-1301, USA.
*/
#ifndef __DA9210_REGISTERS_H__
diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
index 109ee12d4362..da37b4ccd834 100644
--- a/drivers/regulator/da9211-regulator.c
+++ b/drivers/regulator/da9211-regulator.c
@@ -1,18 +1,8 @@
-/*
- * da9211-regulator.c - Regulator device driver for DA9211/DA9212
- * /DA9213/DA9223/DA9214/DA9224/DA9215/DA9225
- * Copyright (C) 2015 Dialog Semiconductor Ltd.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Library General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// da9211-regulator.c - Regulator device driver for DA9211/DA9212
+// /DA9213/DA9223/DA9214/DA9224/DA9215/DA9225
+// Copyright (C) 2015 Dialog Semiconductor Ltd.
#include <linux/err.h>
#include <linux/i2c.h>
@@ -322,8 +312,10 @@ static irqreturn_t da9211_irq_handler(int irq, void *data)
goto error_i2c;
if (reg_val & DA9211_E_OV_CURR_A) {
+ regulator_lock(chip->rdev[0]);
regulator_notifier_call_chain(chip->rdev[0],
REGULATOR_EVENT_OVER_CURRENT, NULL);
+ regulator_unlock(chip->rdev[0]);
err = regmap_write(chip->regmap, DA9211_REG_EVENT_B,
DA9211_E_OV_CURR_A);
@@ -334,8 +326,10 @@ static irqreturn_t da9211_irq_handler(int irq, void *data)
}
if (reg_val & DA9211_E_OV_CURR_B) {
+ regulator_lock(chip->rdev[1]);
regulator_notifier_call_chain(chip->rdev[1],
REGULATOR_EVENT_OVER_CURRENT, NULL);
+ regulator_unlock(chip->rdev[1]);
err = regmap_write(chip->regmap, DA9211_REG_EVENT_B,
DA9211_E_OV_CURR_B);
diff --git a/drivers/regulator/da9211-regulator.h b/drivers/regulator/da9211-regulator.h
index 2cb32aab4f82..1201e7cc056c 100644
--- a/drivers/regulator/da9211-regulator.h
+++ b/drivers/regulator/da9211-regulator.h
@@ -1,17 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* da9211-regulator.h - Regulator definitions for DA9211/DA9212
* /DA9213/DA9223/DA9214/DA9224/DA9215/DA9225
* Copyright (C) 2015 Dialog Semiconductor Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DA9211_REGISTERS_H__
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
index 7cec535cf0bc..eb317663f875 100644
--- a/drivers/regulator/db8500-prcmu.c
+++ b/drivers/regulator/db8500-prcmu.c
@@ -75,7 +75,7 @@ static int db8500_regulator_is_enabled(struct regulator_dev *rdev)
}
/* db8500 regulator operations */
-static struct regulator_ops db8500_regulator_ops = {
+static const struct regulator_ops db8500_regulator_ops = {
.enable = db8500_regulator_enable,
.disable = db8500_regulator_disable,
.is_enabled = db8500_regulator_is_enabled,
@@ -200,7 +200,7 @@ static int db8500_regulator_switch_is_enabled(struct regulator_dev *rdev)
return info->is_enabled;
}
-static struct regulator_ops db8500_regulator_switch_ops = {
+static const struct regulator_ops db8500_regulator_switch_ops = {
.enable = db8500_regulator_switch_enable,
.disable = db8500_regulator_switch_disable,
.is_enabled = db8500_regulator_switch_is_enabled,
@@ -214,6 +214,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_VAPE] = {
.desc = {
.name = "db8500-vape",
+ .of_match = of_match_ptr("db8500_vape"),
.id = DB8500_REGULATOR_VAPE,
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
@@ -223,6 +224,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_VARM] = {
.desc = {
.name = "db8500-varm",
+ .of_match = of_match_ptr("db8500_varm"),
.id = DB8500_REGULATOR_VARM,
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
@@ -232,6 +234,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_VMODEM] = {
.desc = {
.name = "db8500-vmodem",
+ .of_match = of_match_ptr("db8500_vmodem"),
.id = DB8500_REGULATOR_VMODEM,
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
@@ -241,6 +244,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_VPLL] = {
.desc = {
.name = "db8500-vpll",
+ .of_match = of_match_ptr("db8500_vpll"),
.id = DB8500_REGULATOR_VPLL,
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
@@ -250,6 +254,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_VSMPS1] = {
.desc = {
.name = "db8500-vsmps1",
+ .of_match = of_match_ptr("db8500_vsmps1"),
.id = DB8500_REGULATOR_VSMPS1,
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
@@ -259,6 +264,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_VSMPS2] = {
.desc = {
.name = "db8500-vsmps2",
+ .of_match = of_match_ptr("db8500_vsmps2"),
.id = DB8500_REGULATOR_VSMPS2,
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
@@ -271,6 +277,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_VSMPS3] = {
.desc = {
.name = "db8500-vsmps3",
+ .of_match = of_match_ptr("db8500_vsmps3"),
.id = DB8500_REGULATOR_VSMPS3,
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
@@ -280,6 +287,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_VRF1] = {
.desc = {
.name = "db8500-vrf1",
+ .of_match = of_match_ptr("db8500_vrf1"),
.id = DB8500_REGULATOR_VRF1,
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
@@ -289,6 +297,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_SWITCH_SVAMMDSP] = {
.desc = {
.name = "db8500-sva-mmdsp",
+ .of_match = of_match_ptr("db8500_sva_mmdsp"),
.id = DB8500_REGULATOR_SWITCH_SVAMMDSP,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
@@ -299,6 +308,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_SWITCH_SVAMMDSPRET] = {
.desc = {
.name = "db8500-sva-mmdsp-ret",
+ .of_match = of_match_ptr("db8500_sva_mmdsp_ret"),
.id = DB8500_REGULATOR_SWITCH_SVAMMDSPRET,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
@@ -310,6 +320,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_SWITCH_SVAPIPE] = {
.desc = {
.name = "db8500-sva-pipe",
+ .of_match = of_match_ptr("db8500_sva_pipe"),
.id = DB8500_REGULATOR_SWITCH_SVAPIPE,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
@@ -320,6 +331,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_SWITCH_SIAMMDSP] = {
.desc = {
.name = "db8500-sia-mmdsp",
+ .of_match = of_match_ptr("db8500_sia_mmdsp"),
.id = DB8500_REGULATOR_SWITCH_SIAMMDSP,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
@@ -330,6 +342,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_SWITCH_SIAMMDSPRET] = {
.desc = {
.name = "db8500-sia-mmdsp-ret",
+ .of_match = of_match_ptr("db8500_sia_mmdsp_ret"),
.id = DB8500_REGULATOR_SWITCH_SIAMMDSPRET,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
@@ -341,6 +354,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_SWITCH_SIAPIPE] = {
.desc = {
.name = "db8500-sia-pipe",
+ .of_match = of_match_ptr("db8500_sia_pipe"),
.id = DB8500_REGULATOR_SWITCH_SIAPIPE,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
@@ -351,6 +365,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_SWITCH_SGA] = {
.desc = {
.name = "db8500-sga",
+ .of_match = of_match_ptr("db8500_sga"),
.id = DB8500_REGULATOR_SWITCH_SGA,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
@@ -361,6 +376,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_SWITCH_B2R2_MCDE] = {
.desc = {
.name = "db8500-b2r2-mcde",
+ .of_match = of_match_ptr("db8500_b2r2_mcde"),
.id = DB8500_REGULATOR_SWITCH_B2R2_MCDE,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
@@ -371,6 +387,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_SWITCH_ESRAM12] = {
.desc = {
.name = "db8500-esram12",
+ .of_match = of_match_ptr("db8500_esram12"),
.id = DB8500_REGULATOR_SWITCH_ESRAM12,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
@@ -382,6 +399,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_SWITCH_ESRAM12RET] = {
.desc = {
.name = "db8500-esram12-ret",
+ .of_match = of_match_ptr("db8500_esram12_ret"),
.id = DB8500_REGULATOR_SWITCH_ESRAM12RET,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
@@ -393,6 +411,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_SWITCH_ESRAM34] = {
.desc = {
.name = "db8500-esram34",
+ .of_match = of_match_ptr("db8500_esram34"),
.id = DB8500_REGULATOR_SWITCH_ESRAM34,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
@@ -404,6 +423,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_SWITCH_ESRAM34RET] = {
.desc = {
.name = "db8500-esram34-ret",
+ .of_match = of_match_ptr("db8500_esram34_ret"),
.id = DB8500_REGULATOR_SWITCH_ESRAM34RET,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
@@ -414,113 +434,38 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
},
};
-static int db8500_regulator_register(struct platform_device *pdev,
- struct regulator_init_data *init_data,
- int id,
- struct device_node *np)
+static int db8500_regulator_probe(struct platform_device *pdev)
{
+ struct regulator_init_data *db8500_init_data;
struct dbx500_regulator_info *info;
struct regulator_config config = { };
- int err;
-
- /* assign per-regulator data */
- info = &dbx500_regulator_info[id];
- info->dev = &pdev->dev;
-
- config.dev = &pdev->dev;
- config.init_data = init_data;
- config.driver_data = info;
- config.of_node = np;
-
- /* register with the regulator framework */
- info->rdev = devm_regulator_register(&pdev->dev, &info->desc, &config);
- if (IS_ERR(info->rdev)) {
- err = PTR_ERR(info->rdev);
- dev_err(&pdev->dev, "failed to register %s: err %i\n",
- info->desc.name, err);
- return err;
- }
-
- dev_dbg(rdev_get_dev(info->rdev),
- "regulator-%s-probed\n", info->desc.name);
+ struct regulator_dev *rdev;
+ int err, i;
- return 0;
-}
-
-static struct of_regulator_match db8500_regulator_matches[] = {
- { .name = "db8500_vape", .driver_data = (void *) DB8500_REGULATOR_VAPE, },
- { .name = "db8500_varm", .driver_data = (void *) DB8500_REGULATOR_VARM, },
- { .name = "db8500_vmodem", .driver_data = (void *) DB8500_REGULATOR_VMODEM, },
- { .name = "db8500_vpll", .driver_data = (void *) DB8500_REGULATOR_VPLL, },
- { .name = "db8500_vsmps1", .driver_data = (void *) DB8500_REGULATOR_VSMPS1, },
- { .name = "db8500_vsmps2", .driver_data = (void *) DB8500_REGULATOR_VSMPS2, },
- { .name = "db8500_vsmps3", .driver_data = (void *) DB8500_REGULATOR_VSMPS3, },
- { .name = "db8500_vrf1", .driver_data = (void *) DB8500_REGULATOR_VRF1, },
- { .name = "db8500_sva_mmdsp", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSP, },
- { .name = "db8500_sva_mmdsp_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSPRET, },
- { .name = "db8500_sva_pipe", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAPIPE, },
- { .name = "db8500_sia_mmdsp", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSP, },
- { .name = "db8500_sia_mmdsp_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSPRET, },
- { .name = "db8500_sia_pipe", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAPIPE, },
- { .name = "db8500_sga", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SGA, },
- { .name = "db8500_b2r2_mcde", .driver_data = (void *) DB8500_REGULATOR_SWITCH_B2R2_MCDE, },
- { .name = "db8500_esram12", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12, },
- { .name = "db8500_esram12_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12RET, },
- { .name = "db8500_esram34", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34, },
- { .name = "db8500_esram34_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34RET, },
-};
-
-static int
-db8500_regulator_of_probe(struct platform_device *pdev,
- struct device_node *np)
-{
- int i, err;
+ db8500_init_data = dev_get_platdata(&pdev->dev);
for (i = 0; i < ARRAY_SIZE(dbx500_regulator_info); i++) {
- err = db8500_regulator_register(
- pdev, db8500_regulator_matches[i].init_data,
- i, db8500_regulator_matches[i].of_node);
- if (err)
+ /* assign per-regulator data */
+ info = &dbx500_regulator_info[i];
+
+ config.driver_data = info;
+ config.dev = &pdev->dev;
+ if (db8500_init_data)
+ config.init_data = &db8500_init_data[i];
+
+ rdev = devm_regulator_register(&pdev->dev, &info->desc,
+ &config);
+ if (IS_ERR(rdev)) {
+ err = PTR_ERR(rdev);
+ dev_err(&pdev->dev, "failed to register %s: err %i\n",
+ info->desc.name, err);
return err;
- }
-
- return 0;
-}
-
-static int db8500_regulator_probe(struct platform_device *pdev)
-{
- struct regulator_init_data *db8500_init_data =
- dev_get_platdata(&pdev->dev);
- struct device_node *np = pdev->dev.of_node;
- int i, err;
-
- /* register all regulators */
- if (np) {
- err = of_regulator_match(&pdev->dev, np,
- db8500_regulator_matches,
- ARRAY_SIZE(db8500_regulator_matches));
- if (err < 0) {
- dev_err(&pdev->dev,
- "Error parsing regulator init data: %d\n", err);
- return err;
- }
-
- err = db8500_regulator_of_probe(pdev, np);
- if (err)
- return err;
- } else {
- for (i = 0; i < ARRAY_SIZE(dbx500_regulator_info); i++) {
- err = db8500_regulator_register(pdev,
- &db8500_init_data[i],
- i, NULL);
- if (err)
- return err;
}
+ dev_dbg(&pdev->dev, "regulator-%s-probed\n", info->desc.name);
}
- err = ux500_regulator_debug_init(pdev,
- dbx500_regulator_info,
- ARRAY_SIZE(dbx500_regulator_info));
+ ux500_regulator_debug_init(pdev, dbx500_regulator_info,
+ ARRAY_SIZE(dbx500_regulator_info));
return 0;
}
diff --git a/drivers/regulator/dbx500-prcmu.h b/drivers/regulator/dbx500-prcmu.h
index c8e51ace9f06..6e20dab611ac 100644
--- a/drivers/regulator/dbx500-prcmu.h
+++ b/drivers/regulator/dbx500-prcmu.h
@@ -15,18 +15,14 @@
/**
* struct dbx500_regulator_info - dbx500 regulator information
- * @dev: device pointer
* @desc: regulator description
- * @rdev: regulator device pointer
* @is_enabled: status of the regulator
* @epod_id: id for EPOD (power domain)
* @is_ramret: RAM retention switch for EPOD (power domain)
*
*/
struct dbx500_regulator_info {
- struct device *dev;
struct regulator_desc desc;
- struct regulator_dev *rdev;
bool is_enabled;
u16 epod_id;
bool is_ramret;
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index 771a06d1900d..dbe477da4e55 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -1,17 +1,13 @@
-/*
- * FAN53555 Fairchild Digitally Programmable TinyBuck Regulator Driver.
- *
- * Supported Part Numbers:
- * FAN53555UC00X/01X/03X/04X/05X
- *
- * Copyright (c) 2012 Marvell Technology Ltd.
- * Yunfan Zhang <yfzhang@marvell.com>
- *
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// FAN53555 Fairchild Digitally Programmable TinyBuck Regulator Driver.
+//
+// Supported Part Numbers:
+// FAN53555UC00X/01X/03X/04X/05X
+//
+// Copyright (c) 2012 Marvell Technology Ltd.
+// Yunfan Zhang <yfzhang@marvell.com>
+
#include <linux/module.h>
#include <linux/param.h>
#include <linux/err.h>
@@ -91,10 +87,8 @@ enum {
struct fan53555_device_info {
enum fan53555_vendor vendor;
- struct regmap *regmap;
struct device *dev;
struct regulator_desc desc;
- struct regulator_dev *rdev;
struct regulator_init_data *regulator;
/* IC Type and Rev */
int chip_id;
@@ -106,8 +100,6 @@ struct fan53555_device_info {
unsigned int vsel_min;
unsigned int vsel_step;
unsigned int vsel_count;
- /* Voltage slew rate limiting */
- unsigned int slew_rate;
/* Mode */
unsigned int mode_reg;
unsigned int mode_mask;
@@ -125,7 +117,7 @@ static int fan53555_set_suspend_voltage(struct regulator_dev *rdev, int uV)
ret = regulator_map_voltage_linear(rdev, uV, uV);
if (ret < 0)
return ret;
- ret = regmap_update_bits(di->regmap, di->sleep_reg,
+ ret = regmap_update_bits(rdev->regmap, di->sleep_reg,
di->desc.vsel_mask, ret);
if (ret < 0)
return ret;
@@ -140,7 +132,7 @@ static int fan53555_set_suspend_enable(struct regulator_dev *rdev)
{
struct fan53555_device_info *di = rdev_get_drvdata(rdev);
- return regmap_update_bits(di->regmap, di->sleep_reg,
+ return regmap_update_bits(rdev->regmap, di->sleep_reg,
VSEL_BUCK_EN, VSEL_BUCK_EN);
}
@@ -148,7 +140,7 @@ static int fan53555_set_suspend_disable(struct regulator_dev *rdev)
{
struct fan53555_device_info *di = rdev_get_drvdata(rdev);
- return regmap_update_bits(di->regmap, di->sleep_reg,
+ return regmap_update_bits(rdev->regmap, di->sleep_reg,
VSEL_BUCK_EN, 0);
}
@@ -158,11 +150,11 @@ static int fan53555_set_mode(struct regulator_dev *rdev, unsigned int mode)
switch (mode) {
case REGULATOR_MODE_FAST:
- regmap_update_bits(di->regmap, di->mode_reg,
+ regmap_update_bits(rdev->regmap, di->mode_reg,
di->mode_mask, di->mode_mask);
break;
case REGULATOR_MODE_NORMAL:
- regmap_update_bits(di->regmap, di->vol_reg, di->mode_mask, 0);
+ regmap_update_bits(rdev->regmap, di->vol_reg, di->mode_mask, 0);
break;
default:
return -EINVAL;
@@ -176,7 +168,7 @@ static unsigned int fan53555_get_mode(struct regulator_dev *rdev)
unsigned int val;
int ret = 0;
- ret = regmap_read(di->regmap, di->mode_reg, &val);
+ ret = regmap_read(rdev->regmap, di->mode_reg, &val);
if (ret < 0)
return ret;
if (val & di->mode_mask)
@@ -213,7 +205,7 @@ static int fan53555_set_ramp(struct regulator_dev *rdev, int ramp)
return -EINVAL;
}
- return regmap_update_bits(di->regmap, FAN53555_CONTROL,
+ return regmap_update_bits(rdev->regmap, FAN53555_CONTROL,
CTL_SLEW_MASK, regval << CTL_SLEW_SHIFT);
}
@@ -396,6 +388,7 @@ static int fan53555_regulator_register(struct fan53555_device_info *di,
struct regulator_config *config)
{
struct regulator_desc *rdesc = &di->desc;
+ struct regulator_dev *rdev;
rdesc->name = "fan53555-reg";
rdesc->supply_name = "vin";
@@ -410,8 +403,8 @@ static int fan53555_regulator_register(struct fan53555_device_info *di,
rdesc->vsel_mask = di->vsel_count - 1;
rdesc->owner = THIS_MODULE;
- di->rdev = devm_regulator_register(di->dev, &di->desc, config);
- return PTR_ERR_OR_ZERO(di->rdev);
+ rdev = devm_regulator_register(di->dev, &di->desc, config);
+ return PTR_ERR_OR_ZERO(rdev);
}
static const struct regmap_config fan53555_regmap_config = {
@@ -466,6 +459,7 @@ static int fan53555_regulator_probe(struct i2c_client *client,
struct fan53555_device_info *di;
struct fan53555_platform_data *pdata;
struct regulator_config config = { };
+ struct regmap *regmap;
unsigned int val;
int ret;
@@ -502,22 +496,22 @@ static int fan53555_regulator_probe(struct i2c_client *client,
di->vendor = id->driver_data;
}
- di->regmap = devm_regmap_init_i2c(client, &fan53555_regmap_config);
- if (IS_ERR(di->regmap)) {
+ regmap = devm_regmap_init_i2c(client, &fan53555_regmap_config);
+ if (IS_ERR(regmap)) {
dev_err(&client->dev, "Failed to allocate regmap!\n");
- return PTR_ERR(di->regmap);
+ return PTR_ERR(regmap);
}
di->dev = &client->dev;
i2c_set_clientdata(client, di);
/* Get chip ID */
- ret = regmap_read(di->regmap, FAN53555_ID1, &val);
+ ret = regmap_read(regmap, FAN53555_ID1, &val);
if (ret < 0) {
dev_err(&client->dev, "Failed to get chip ID!\n");
return ret;
}
di->chip_id = val & DIE_ID;
/* Get chip revision */
- ret = regmap_read(di->regmap, FAN53555_ID2, &val);
+ ret = regmap_read(regmap, FAN53555_ID2, &val);
if (ret < 0) {
dev_err(&client->dev, "Failed to get chip Rev!\n");
return ret;
@@ -534,7 +528,7 @@ static int fan53555_regulator_probe(struct i2c_client *client,
/* Register regulator */
config.dev = di->dev;
config.init_data = di->regulator;
- config.regmap = di->regmap;
+ config.regmap = regmap;
config.driver_data = di;
config.of_node = np;
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 6157001df0a4..f50d86a66138 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -36,7 +36,6 @@
struct gpio_regulator_data {
struct regulator_desc desc;
- struct regulator_dev *dev;
struct gpio_desc **gpiods;
int nr_gpios;
@@ -125,7 +124,7 @@ static int gpio_regulator_set_current_limit(struct regulator_dev *dev,
return 0;
}
-static struct regulator_ops gpio_regulator_voltage_ops = {
+static const struct regulator_ops gpio_regulator_voltage_ops = {
.get_voltage = gpio_regulator_get_value,
.set_voltage = gpio_regulator_set_voltage,
.list_voltage = gpio_regulator_list_voltage,
@@ -221,7 +220,7 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np,
return config;
}
-static struct regulator_ops gpio_regulator_current_ops = {
+static const struct regulator_ops gpio_regulator_current_ops = {
.get_current_limit = gpio_regulator_get_value,
.set_current_limit = gpio_regulator_set_current_limit,
};
@@ -233,6 +232,7 @@ static int gpio_regulator_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct gpio_regulator_data *drvdata;
struct regulator_config cfg = { };
+ struct regulator_dev *rdev;
enum gpiod_flags gflags;
int ptr, ret, state, i;
@@ -326,9 +326,9 @@ static int gpio_regulator_probe(struct platform_device *pdev)
if (IS_ERR(cfg.ena_gpiod))
return PTR_ERR(cfg.ena_gpiod);
- drvdata->dev = regulator_register(&drvdata->desc, &cfg);
- if (IS_ERR(drvdata->dev)) {
- ret = PTR_ERR(drvdata->dev);
+ rdev = devm_regulator_register(dev, &drvdata->desc, &cfg);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
dev_err(dev, "Failed to register regulator: %d\n", ret);
return ret;
}
@@ -338,15 +338,6 @@ static int gpio_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int gpio_regulator_remove(struct platform_device *pdev)
-{
- struct gpio_regulator_data *drvdata = platform_get_drvdata(pdev);
-
- regulator_unregister(drvdata->dev);
-
- return 0;
-}
-
#if defined(CONFIG_OF)
static const struct of_device_id regulator_gpio_of_match[] = {
{ .compatible = "regulator-gpio", },
@@ -357,7 +348,6 @@ MODULE_DEVICE_TABLE(of, regulator_gpio_of_match);
static struct platform_driver gpio_regulator_driver = {
.probe = gpio_regulator_probe,
- .remove = gpio_regulator_remove,
.driver = {
.name = "gpio-regulator",
.of_match_table = of_match_ptr(regulator_gpio_of_match),
diff --git a/drivers/regulator/hi6421-regulator.c b/drivers/regulator/hi6421-regulator.c
index 259c3a865ac6..5ac3d7c29725 100644
--- a/drivers/regulator/hi6421-regulator.c
+++ b/drivers/regulator/hi6421-regulator.c
@@ -1,17 +1,13 @@
-/*
- * Device driver for regulators in Hi6421 IC
- *
- * Copyright (c) <2011-2014> HiSilicon Technologies Co., Ltd.
- * http://www.hisilicon.com
- * Copyright (c) <2013-2014> Linaro Ltd.
- * http://www.linaro.org
- *
- * Author: Guodong Xu <guodong.xu@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Device driver for regulators in Hi6421 IC
+//
+// Copyright (c) <2011-2014> HiSilicon Technologies Co., Ltd.
+// http://www.hisilicon.com
+// Copyright (c) <2013-2014> Linaro Ltd.
+// http://www.linaro.org
+//
+// Author: Guodong Xu <guodong.xu@linaro.org>
#include <linux/slab.h>
#include <linux/device.h>
@@ -78,43 +74,6 @@ enum hi6421_regulator_id {
HI6421_NUM_REGULATORS,
};
-#define HI6421_REGULATOR_OF_MATCH(_name, id) \
-{ \
- .name = #_name, \
- .driver_data = (void *) HI6421_##id, \
-}
-
-static struct of_regulator_match hi6421_regulator_match[] = {
- HI6421_REGULATOR_OF_MATCH(hi6421_vout0, LDO0),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout1, LDO1),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout2, LDO2),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout3, LDO3),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout4, LDO4),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout5, LDO5),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout6, LDO6),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout7, LDO7),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout8, LDO8),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout9, LDO9),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout10, LDO10),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout11, LDO11),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout12, LDO12),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout13, LDO13),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout14, LDO14),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout15, LDO15),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout16, LDO16),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout17, LDO17),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout18, LDO18),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout19, LDO19),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout20, LDO20),
- HI6421_REGULATOR_OF_MATCH(hi6421_vout_audio, LDOAUDIO),
- HI6421_REGULATOR_OF_MATCH(hi6421_buck0, BUCK0),
- HI6421_REGULATOR_OF_MATCH(hi6421_buck1, BUCK1),
- HI6421_REGULATOR_OF_MATCH(hi6421_buck2, BUCK2),
- HI6421_REGULATOR_OF_MATCH(hi6421_buck3, BUCK3),
- HI6421_REGULATOR_OF_MATCH(hi6421_buck4, BUCK4),
- HI6421_REGULATOR_OF_MATCH(hi6421_buck5, BUCK5),
-};
-
/* LDO 0, 4~7, 9~14, 16~20 have same voltage table. */
static const unsigned int ldo_0_voltages[] = {
1500000, 1800000, 2400000, 2500000,
@@ -157,6 +116,7 @@ static const struct regulator_ops hi6421_buck345_ops;
#define HI6421_LDO_ENABLE_TIME (350)
/*
* _id - LDO id name string
+ * _match - of match name string
* v_table - voltage table
* vreg - voltage select register
* vmask - voltage select mask
@@ -166,11 +126,13 @@ static const struct regulator_ops hi6421_buck345_ops;
* ecomask - eco mode mask
* ecoamp - eco mode load uppler limit in uA
*/
-#define HI6421_LDO(_id, v_table, vreg, vmask, ereg, emask, \
+#define HI6421_LDO(_id, _match, v_table, vreg, vmask, ereg, emask, \
odelay, ecomask, ecoamp) \
[HI6421_##_id] = { \
.desc = { \
.name = #_id, \
+ .of_match = of_match_ptr(#_match), \
+ .regulators_node = of_match_ptr("regulators"), \
.ops = &hi6421_ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.id = HI6421_##_id, \
@@ -191,6 +153,7 @@ static const struct regulator_ops hi6421_buck345_ops;
/* HI6421 LDO1~3 are linear voltage regulators at fixed uV_step
*
* _id - LDO id name string
+ * _match - of match name string
* _min_uV - minimum voltage supported in uV
* n_volt - number of votages available
* vstep - voltage increase in each linear step in uV
@@ -202,11 +165,13 @@ static const struct regulator_ops hi6421_buck345_ops;
* ecomask - eco mode mask
* ecoamp - eco mode load uppler limit in uA
*/
-#define HI6421_LDO_LINEAR(_id, _min_uV, n_volt, vstep, vreg, vmask, \
+#define HI6421_LDO_LINEAR(_id, _match, _min_uV, n_volt, vstep, vreg, vmask,\
ereg, emask, odelay, ecomask, ecoamp) \
[HI6421_##_id] = { \
.desc = { \
.name = #_id, \
+ .of_match = of_match_ptr(#_match), \
+ .regulators_node = of_match_ptr("regulators"), \
.ops = &hi6421_ldo_linear_ops, \
.type = REGULATOR_VOLTAGE, \
.id = HI6421_##_id, \
@@ -228,6 +193,7 @@ static const struct regulator_ops hi6421_buck345_ops;
/* HI6421 LDOAUDIO is a linear voltage regulator with two 4-step ranges
*
* _id - LDO id name string
+ * _match - of match name string
* n_volt - number of votages available
* volt_ranges - array of regulator_linear_range
* vstep - voltage increase in each linear step in uV
@@ -239,11 +205,13 @@ static const struct regulator_ops hi6421_buck345_ops;
* ecomask - eco mode mask
* ecoamp - eco mode load uppler limit in uA
*/
-#define HI6421_LDO_LINEAR_RANGE(_id, n_volt, volt_ranges, vreg, vmask, \
+#define HI6421_LDO_LINEAR_RANGE(_id, _match, n_volt, volt_ranges, vreg, vmask,\
ereg, emask, odelay, ecomask, ecoamp) \
[HI6421_##_id] = { \
.desc = { \
.name = #_id, \
+ .of_match = of_match_ptr(#_match), \
+ .regulators_node = of_match_ptr("regulators"), \
.ops = &hi6421_ldo_linear_range_ops, \
.type = REGULATOR_VOLTAGE, \
.id = HI6421_##_id, \
@@ -265,6 +233,7 @@ static const struct regulator_ops hi6421_buck345_ops;
/* HI6421 BUCK0/1/2 are linear voltage regulators at fixed uV_step
*
* _id - BUCK0/1/2 id name string
+ * _match - of match name string
* vreg - voltage select register
* vmask - voltage select mask
* ereg - enable register
@@ -273,11 +242,13 @@ static const struct regulator_ops hi6421_buck345_ops;
* etime - enable time
* odelay - off/on delay time in uS
*/
-#define HI6421_BUCK012(_id, vreg, vmask, ereg, emask, sleepmask, \
+#define HI6421_BUCK012(_id, _match, vreg, vmask, ereg, emask, sleepmask,\
etime, odelay) \
[HI6421_##_id] = { \
.desc = { \
.name = #_id, \
+ .of_match = of_match_ptr(#_match), \
+ .regulators_node = of_match_ptr("regulators"), \
.ops = &hi6421_buck012_ops, \
.type = REGULATOR_VOLTAGE, \
.id = HI6421_##_id, \
@@ -299,6 +270,7 @@ static const struct regulator_ops hi6421_buck345_ops;
* that it supports SLEEP mode, so has different .ops.
*
* _id - LDO id name string
+ * _match - of match name string
* v_table - voltage table
* vreg - voltage select register
* vmask - voltage select mask
@@ -307,11 +279,13 @@ static const struct regulator_ops hi6421_buck345_ops;
* odelay - off/on delay time in uS
* sleepmask - mask of sleep mode
*/
-#define HI6421_BUCK345(_id, v_table, vreg, vmask, ereg, emask, \
+#define HI6421_BUCK345(_id, _match, v_table, vreg, vmask, ereg, emask, \
odelay, sleepmask) \
[HI6421_##_id] = { \
.desc = { \
.name = #_id, \
+ .of_match = of_match_ptr(#_match), \
+ .regulators_node = of_match_ptr("regulators"), \
.ops = &hi6421_buck345_ops, \
.type = REGULATOR_VOLTAGE, \
.id = HI6421_##_id, \
@@ -331,59 +305,63 @@ static const struct regulator_ops hi6421_buck345_ops;
/* HI6421 regulator information */
static struct hi6421_regulator_info
hi6421_regulator_info[HI6421_NUM_REGULATORS] = {
- HI6421_LDO(LDO0, ldo_0_voltages, 0x20, 0x07, 0x20, 0x10,
+ HI6421_LDO(LDO0, hi6421_vout0, ldo_0_voltages, 0x20, 0x07, 0x20, 0x10,
10000, 0x20, 8000),
- HI6421_LDO_LINEAR(LDO1, 1700000, 4, 100000, 0x21, 0x03, 0x21, 0x10,
- 10000, 0x20, 5000),
- HI6421_LDO_LINEAR(LDO2, 1050000, 8, 50000, 0x22, 0x07, 0x22, 0x10,
- 20000, 0x20, 8000),
- HI6421_LDO_LINEAR(LDO3, 1050000, 8, 50000, 0x23, 0x07, 0x23, 0x10,
- 20000, 0x20, 8000),
- HI6421_LDO(LDO4, ldo_0_voltages, 0x24, 0x07, 0x24, 0x10,
+ HI6421_LDO_LINEAR(LDO1, hi6421_vout1, 1700000, 4, 100000, 0x21, 0x03,
+ 0x21, 0x10, 10000, 0x20, 5000),
+ HI6421_LDO_LINEAR(LDO2, hi6421_vout2, 1050000, 8, 50000, 0x22, 0x07,
+ 0x22, 0x10, 20000, 0x20, 8000),
+ HI6421_LDO_LINEAR(LDO3, hi6421_vout3, 1050000, 8, 50000, 0x23, 0x07,
+ 0x23, 0x10, 20000, 0x20, 8000),
+ HI6421_LDO(LDO4, hi6421_vout4, ldo_0_voltages, 0x24, 0x07, 0x24, 0x10,
20000, 0x20, 8000),
- HI6421_LDO(LDO5, ldo_0_voltages, 0x25, 0x07, 0x25, 0x10,
+ HI6421_LDO(LDO5, hi6421_vout5, ldo_0_voltages, 0x25, 0x07, 0x25, 0x10,
20000, 0x20, 8000),
- HI6421_LDO(LDO6, ldo_0_voltages, 0x26, 0x07, 0x26, 0x10,
+ HI6421_LDO(LDO6, hi6421_vout6, ldo_0_voltages, 0x26, 0x07, 0x26, 0x10,
20000, 0x20, 8000),
- HI6421_LDO(LDO7, ldo_0_voltages, 0x27, 0x07, 0x27, 0x10,
+ HI6421_LDO(LDO7, hi6421_vout7, ldo_0_voltages, 0x27, 0x07, 0x27, 0x10,
20000, 0x20, 5000),
- HI6421_LDO(LDO8, ldo_8_voltages, 0x28, 0x07, 0x28, 0x10,
+ HI6421_LDO(LDO8, hi6421_vout8, ldo_8_voltages, 0x28, 0x07, 0x28, 0x10,
20000, 0x20, 8000),
- HI6421_LDO(LDO9, ldo_0_voltages, 0x29, 0x07, 0x29, 0x10,
+ HI6421_LDO(LDO9, hi6421_vout9, ldo_0_voltages, 0x29, 0x07, 0x29, 0x10,
40000, 0x20, 8000),
- HI6421_LDO(LDO10, ldo_0_voltages, 0x2a, 0x07, 0x2a, 0x10,
+ HI6421_LDO(LDO10, hi6421_vout10, ldo_0_voltages, 0x2a, 0x07, 0x2a, 0x10,
40000, 0x20, 8000),
- HI6421_LDO(LDO11, ldo_0_voltages, 0x2b, 0x07, 0x2b, 0x10,
+ HI6421_LDO(LDO11, hi6421_vout11, ldo_0_voltages, 0x2b, 0x07, 0x2b, 0x10,
40000, 0x20, 8000),
- HI6421_LDO(LDO12, ldo_0_voltages, 0x2c, 0x07, 0x2c, 0x10,
+ HI6421_LDO(LDO12, hi6421_vout12, ldo_0_voltages, 0x2c, 0x07, 0x2c, 0x10,
40000, 0x20, 8000),
- HI6421_LDO(LDO13, ldo_0_voltages, 0x2d, 0x07, 0x2d, 0x10,
+ HI6421_LDO(LDO13, hi6421_vout13, ldo_0_voltages, 0x2d, 0x07, 0x2d, 0x10,
40000, 0x20, 8000),
- HI6421_LDO(LDO14, ldo_0_voltages, 0x2e, 0x07, 0x2e, 0x10,
+ HI6421_LDO(LDO14, hi6421_vout14, ldo_0_voltages, 0x2e, 0x07, 0x2e, 0x10,
40000, 0x20, 8000),
- HI6421_LDO(LDO15, ldo_8_voltages, 0x2f, 0x07, 0x2f, 0x10,
+ HI6421_LDO(LDO15, hi6421_vout15, ldo_8_voltages, 0x2f, 0x07, 0x2f, 0x10,
40000, 0x20, 8000),
- HI6421_LDO(LDO16, ldo_0_voltages, 0x30, 0x07, 0x30, 0x10,
+ HI6421_LDO(LDO16, hi6421_vout16, ldo_0_voltages, 0x30, 0x07, 0x30, 0x10,
40000, 0x20, 8000),
- HI6421_LDO(LDO17, ldo_0_voltages, 0x31, 0x07, 0x31, 0x10,
+ HI6421_LDO(LDO17, hi6421_vout17, ldo_0_voltages, 0x31, 0x07, 0x31, 0x10,
40000, 0x20, 8000),
- HI6421_LDO(LDO18, ldo_0_voltages, 0x32, 0x07, 0x32, 0x10,
+ HI6421_LDO(LDO18, hi6421_vout18, ldo_0_voltages, 0x32, 0x07, 0x32, 0x10,
40000, 0x20, 8000),
- HI6421_LDO(LDO19, ldo_0_voltages, 0x33, 0x07, 0x33, 0x10,
+ HI6421_LDO(LDO19, hi6421_vout19, ldo_0_voltages, 0x33, 0x07, 0x33, 0x10,
40000, 0x20, 8000),
- HI6421_LDO(LDO20, ldo_0_voltages, 0x34, 0x07, 0x34, 0x10,
+ HI6421_LDO(LDO20, hi6421_vout20, ldo_0_voltages, 0x34, 0x07, 0x34, 0x10,
40000, 0x20, 8000),
- HI6421_LDO_LINEAR_RANGE(LDOAUDIO, 8, ldo_audio_volt_range, 0x36,
- 0x70, 0x36, 0x01, 40000, 0x02, 5000),
- HI6421_BUCK012(BUCK0, 0x0d, 0x7f, 0x0c, 0x01, 0x10, 400, 20000),
- HI6421_BUCK012(BUCK1, 0x0f, 0x7f, 0x0e, 0x01, 0x10, 400, 20000),
- HI6421_BUCK012(BUCK2, 0x11, 0x7f, 0x10, 0x01, 0x10, 350, 100),
- HI6421_BUCK345(BUCK3, buck_3_voltages, 0x13, 0x07, 0x12, 0x01,
- 20000, 0x10),
- HI6421_BUCK345(BUCK4, buck_4_voltages, 0x15, 0x07, 0x14, 0x01,
- 20000, 0x10),
- HI6421_BUCK345(BUCK5, buck_5_voltages, 0x17, 0x07, 0x16, 0x01,
- 20000, 0x10),
+ HI6421_LDO_LINEAR_RANGE(LDOAUDIO, hi6421_vout_audio, 8,
+ ldo_audio_volt_range, 0x36, 0x70, 0x36, 0x01,
+ 40000, 0x02, 5000),
+ HI6421_BUCK012(BUCK0, hi6421_buck0, 0x0d, 0x7f, 0x0c, 0x01, 0x10, 400,
+ 20000),
+ HI6421_BUCK012(BUCK1, hi6421_buck1, 0x0f, 0x7f, 0x0e, 0x01, 0x10, 400,
+ 20000),
+ HI6421_BUCK012(BUCK2, hi6421_buck2, 0x11, 0x7f, 0x10, 0x01, 0x10, 350,
+ 100),
+ HI6421_BUCK345(BUCK3, hi6421_buck3, buck_3_voltages, 0x13, 0x07, 0x12,
+ 0x01, 20000, 0x10),
+ HI6421_BUCK345(BUCK4, hi6421_buck4, buck_4_voltages, 0x15, 0x07, 0x14,
+ 0x01, 20000, 0x10),
+ HI6421_BUCK345(BUCK5, hi6421_buck5, buck_5_voltages, 0x17, 0x07, 0x16,
+ 0x01, 20000, 0x10),
};
static int hi6421_regulator_enable(struct regulator_dev *rdev)
@@ -552,42 +530,14 @@ static const struct regulator_ops hi6421_buck345_ops = {
.set_mode = hi6421_regulator_buck_set_mode,
};
-static int hi6421_regulator_register(struct platform_device *pdev,
- struct regmap *rmap,
- struct regulator_init_data *init_data,
- int id, struct device_node *np)
-{
- struct hi6421_regulator_info *info = NULL;
- struct regulator_config config = { };
- struct regulator_dev *rdev;
-
- /* assign per-regulator data */
- info = &hi6421_regulator_info[id];
-
- config.dev = &pdev->dev;
- config.init_data = init_data;
- config.driver_data = info;
- config.regmap = rmap;
- config.of_node = np;
-
- /* register regulator with framework */
- rdev = devm_regulator_register(&pdev->dev, &info->desc, &config);
- if (IS_ERR(rdev)) {
- dev_err(&pdev->dev, "failed to register regulator %s\n",
- info->desc.name);
- return PTR_ERR(rdev);
- }
-
- return 0;
-}
-
static int hi6421_regulator_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct device_node *np;
- struct hi6421_pmic *pmic;
+ struct hi6421_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
struct hi6421_regulator_pdata *pdata;
- int i, ret = 0;
+ struct hi6421_regulator_info *info;
+ struct regulator_config config = { };
+ struct regulator_dev *rdev;
+ int i;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
@@ -595,27 +545,21 @@ static int hi6421_regulator_probe(struct platform_device *pdev)
mutex_init(&pdata->lock);
platform_set_drvdata(pdev, pdata);
- np = of_get_child_by_name(dev->parent->of_node, "regulators");
- if (!np)
- return -ENODEV;
-
- ret = of_regulator_match(dev, np,
- hi6421_regulator_match,
- ARRAY_SIZE(hi6421_regulator_match));
- of_node_put(np);
- if (ret < 0) {
- dev_err(dev, "Error parsing regulator init data: %d\n", ret);
- return ret;
- }
-
- pmic = dev_get_drvdata(dev->parent);
-
for (i = 0; i < ARRAY_SIZE(hi6421_regulator_info); i++) {
- ret = hi6421_regulator_register(pdev, pmic->regmap,
- hi6421_regulator_match[i].init_data, i,
- hi6421_regulator_match[i].of_node);
- if (ret)
- return ret;
+ /* assign per-regulator data */
+ info = &hi6421_regulator_info[i];
+
+ config.dev = pdev->dev.parent;
+ config.driver_data = info;
+ config.regmap = pmic->regmap;
+
+ rdev = devm_regulator_register(&pdev->dev, &info->desc,
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to register regulator %s\n",
+ info->desc.name);
+ return PTR_ERR(rdev);
+ }
}
return 0;
diff --git a/drivers/regulator/hi6421v530-regulator.c b/drivers/regulator/hi6421v530-regulator.c
index c09bc71538a5..06ae65199afd 100644
--- a/drivers/regulator/hi6421v530-regulator.c
+++ b/drivers/regulator/hi6421v530-regulator.c
@@ -1,18 +1,14 @@
-/*
- * Device driver for regulators in Hi6421V530 IC
- *
- * Copyright (c) <2017> HiSilicon Technologies Co., Ltd.
- * http://www.hisilicon.com
- * Copyright (c) <2017> Linaro Ltd.
- * http://www.linaro.org
- *
- * Author: Wang Xiaoyin <hw.wangxiaoyin@hisilicon.com>
- * Guodong Xu <guodong.xu@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Device driver for regulators in Hi6421V530 IC
+//
+// Copyright (c) <2017> HiSilicon Technologies Co., Ltd.
+// http://www.hisilicon.com
+// Copyright (c) <2017> Linaro Ltd.
+// http://www.linaro.org
+//
+// Author: Wang Xiaoyin <hw.wangxiaoyin@hisilicon.com>
+// Guodong Xu <guodong.xu@linaro.org>
#include <linux/mfd/hi6421-pmic.h>
#include <linux/module.h>
diff --git a/drivers/regulator/hi655x-regulator.c b/drivers/regulator/hi655x-regulator.c
index bba24a6fdb1e..ac2ee2030211 100644
--- a/drivers/regulator/hi655x-regulator.c
+++ b/drivers/regulator/hi655x-regulator.c
@@ -1,16 +1,12 @@
-/*
- * Device driver for regulators in Hi655x IC
- *
- * Copyright (c) 2016 Hisilicon.
- *
- * Authors:
- * Chen Feng <puck.chen@hisilicon.com>
- * Fei Wang <w.f@huawei.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Device driver for regulators in Hi655x IC
+//
+// Copyright (c) 2016 Hisilicon.
+//
+// Authors:
+// Chen Feng <puck.chen@hisilicon.com>
+// Fei Wang <w.f@huawei.com>
#include <linux/bitops.h>
#include <linux/device.h>
@@ -28,7 +24,6 @@
struct hi655x_regulator {
unsigned int disable_reg;
unsigned int status_reg;
- unsigned int ctrl_mask;
struct regulator_desc rdesc;
};
@@ -77,22 +72,18 @@ enum hi655x_regulator_id {
static int hi655x_is_enabled(struct regulator_dev *rdev)
{
unsigned int value = 0;
-
struct hi655x_regulator *regulator = rdev_get_drvdata(rdev);
regmap_read(rdev->regmap, regulator->status_reg, &value);
- return (value & BIT(regulator->ctrl_mask));
+ return (value & rdev->desc->enable_mask);
}
static int hi655x_disable(struct regulator_dev *rdev)
{
- int ret = 0;
-
struct hi655x_regulator *regulator = rdev_get_drvdata(rdev);
- ret = regmap_write(rdev->regmap, regulator->disable_reg,
- BIT(regulator->ctrl_mask));
- return ret;
+ return regmap_write(rdev->regmap, regulator->disable_reg,
+ rdev->desc->enable_mask);
}
static const struct regulator_ops hi655x_regulator_ops = {
@@ -132,7 +123,6 @@ static const struct regulator_ops hi655x_ldo_linear_ops = {
}, \
.disable_reg = HI655X_BUS_ADDR(dreg), \
.status_reg = HI655X_BUS_ADDR(sreg), \
- .ctrl_mask = cmask, \
}
#define HI655X_LDO_LINEAR(_ID, vreg, vmask, ereg, dreg, \
@@ -155,10 +145,9 @@ static const struct regulator_ops hi655x_ldo_linear_ops = {
}, \
.disable_reg = HI655X_BUS_ADDR(dreg), \
.status_reg = HI655X_BUS_ADDR(sreg), \
- .ctrl_mask = cmask, \
}
-static struct hi655x_regulator regulators[] = {
+static const struct hi655x_regulator regulators[] = {
HI655X_LDO_LINEAR(LDO2, 0x72, 0x07, 0x29, 0x2a, 0x2b, 0x01,
2500000, 8, 100000),
HI655X_LDO(LDO7, 0x78, 0x07, 0x29, 0x2a, 0x2b, 0x06, ldo7_voltages),
diff --git a/drivers/regulator/lm363x-regulator.c b/drivers/regulator/lm363x-regulator.c
index c876e161052a..e02fdd1dd092 100644
--- a/drivers/regulator/lm363x-regulator.c
+++ b/drivers/regulator/lm363x-regulator.c
@@ -48,7 +48,7 @@ static const int ldo_cont_enable_time[] = {
static int lm363x_regulator_enable_time(struct regulator_dev *rdev)
{
enum lm363x_regulator_id id = rdev_get_id(rdev);
- u8 val, addr, mask;
+ unsigned int val, addr, mask;
switch (id) {
case LM3631_LDO_CONT:
@@ -71,7 +71,7 @@ static int lm363x_regulator_enable_time(struct regulator_dev *rdev)
return 0;
}
- if (regmap_read(rdev->regmap, addr, (unsigned int *)&val))
+ if (regmap_read(rdev->regmap, addr, &val))
return -EINVAL;
val = (val & mask) >> LM3631_ENTIME_SHIFT;
@@ -82,13 +82,13 @@ static int lm363x_regulator_enable_time(struct regulator_dev *rdev)
return ENABLE_TIME_USEC * val;
}
-static struct regulator_ops lm363x_boost_voltage_table_ops = {
+static const struct regulator_ops lm363x_boost_voltage_table_ops = {
.list_voltage = regulator_list_voltage_linear,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
};
-static struct regulator_ops lm363x_regulator_voltage_table_ops = {
+static const struct regulator_ops lm363x_regulator_voltage_table_ops = {
.list_voltage = regulator_list_voltage_linear,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
diff --git a/drivers/regulator/lp8755.c b/drivers/regulator/lp8755.c
index 14fd38807134..2e16a6ab491d 100644
--- a/drivers/regulator/lp8755.c
+++ b/drivers/regulator/lp8755.c
@@ -372,10 +372,13 @@ static irqreturn_t lp8755_irq_handler(int irq, void *data)
for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
if ((flag0 & (0x4 << icnt))
&& (pchip->irqmask & (0x04 << icnt))
- && (pchip->rdev[icnt] != NULL))
+ && (pchip->rdev[icnt] != NULL)) {
+ regulator_lock(pchip->rdev[icnt]);
regulator_notifier_call_chain(pchip->rdev[icnt],
LP8755_EVENT_PWR_FAULT,
NULL);
+ regulator_unlock(pchip->rdev[icnt]);
+ }
/* read flag1 register */
ret = lp8755_read(pchip, 0x0E, &flag1);
@@ -389,18 +392,24 @@ static irqreturn_t lp8755_irq_handler(int irq, void *data)
/* send OCP event to all regulator devices */
if ((flag1 & 0x01) && (pchip->irqmask & 0x01))
for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
- if (pchip->rdev[icnt] != NULL)
+ if (pchip->rdev[icnt] != NULL) {
+ regulator_lock(pchip->rdev[icnt]);
regulator_notifier_call_chain(pchip->rdev[icnt],
LP8755_EVENT_OCP,
NULL);
+ regulator_unlock(pchip->rdev[icnt]);
+ }
/* send OVP event to all regulator devices */
if ((flag1 & 0x02) && (pchip->irqmask & 0x02))
for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
- if (pchip->rdev[icnt] != NULL)
+ if (pchip->rdev[icnt] != NULL) {
+ regulator_lock(pchip->rdev[icnt]);
regulator_notifier_call_chain(pchip->rdev[icnt],
LP8755_EVENT_OVP,
NULL);
+ regulator_unlock(pchip->rdev[icnt]);
+ }
return IRQ_HANDLED;
err_i2c:
diff --git a/drivers/regulator/lp87565-regulator.c b/drivers/regulator/lp87565-regulator.c
index 4ed41731a5b1..81eb4b890c0c 100644
--- a/drivers/regulator/lp87565-regulator.c
+++ b/drivers/regulator/lp87565-regulator.c
@@ -34,6 +34,10 @@
.ramp_delay = _delay, \
.linear_ranges = _lr, \
.n_linear_ranges = ARRAY_SIZE(_lr), \
+ .curr_table = lp87565_buck_uA, \
+ .n_current_limits = ARRAY_SIZE(lp87565_buck_uA),\
+ .csel_reg = (_cr), \
+ .csel_mask = LP87565_BUCK_CTRL_2_ILIM, \
}, \
.ctrl2_reg = _cr, \
}
@@ -102,44 +106,7 @@ static int lp87565_buck_set_ramp_delay(struct regulator_dev *rdev,
return 0;
}
-static int lp87565_buck_set_current_limit(struct regulator_dev *rdev,
- int min_uA, int max_uA)
-{
- int id = rdev_get_id(rdev);
- struct lp87565 *lp87565 = rdev_get_drvdata(rdev);
- int i;
-
- for (i = ARRAY_SIZE(lp87565_buck_uA) - 1; i >= 0; i--) {
- if (lp87565_buck_uA[i] >= min_uA &&
- lp87565_buck_uA[i] <= max_uA)
- return regmap_update_bits(lp87565->regmap,
- regulators[id].ctrl2_reg,
- LP87565_BUCK_CTRL_2_ILIM,
- i << __ffs(LP87565_BUCK_CTRL_2_ILIM));
- }
-
- return -EINVAL;
-}
-
-static int lp87565_buck_get_current_limit(struct regulator_dev *rdev)
-{
- int id = rdev_get_id(rdev);
- struct lp87565 *lp87565 = rdev_get_drvdata(rdev);
- int ret;
- unsigned int val;
-
- ret = regmap_read(lp87565->regmap, regulators[id].ctrl2_reg, &val);
- if (ret)
- return ret;
-
- val = (val & LP87565_BUCK_CTRL_2_ILIM) >>
- __ffs(LP87565_BUCK_CTRL_2_ILIM);
-
- return (val < ARRAY_SIZE(lp87565_buck_uA)) ?
- lp87565_buck_uA[val] : -EINVAL;
-}
-
-/* Operations permitted on BUCK0, BUCK1 */
+/* Operations permitted on BUCKs */
static const struct regulator_ops lp87565_buck_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
@@ -150,8 +117,8 @@ static const struct regulator_ops lp87565_buck_ops = {
.map_voltage = regulator_map_voltage_linear_range,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_ramp_delay = lp87565_buck_set_ramp_delay,
- .set_current_limit = lp87565_buck_set_current_limit,
- .get_current_limit = lp87565_buck_get_current_limit,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
};
static const struct lp87565_regulator regulators[] = {
@@ -193,7 +160,7 @@ static int lp87565_regulator_probe(struct platform_device *pdev)
struct lp87565 *lp87565 = dev_get_drvdata(pdev->dev.parent);
struct regulator_config config = { };
struct regulator_dev *rdev;
- int i, min_idx = LP87565_BUCK_1, max_idx = LP87565_BUCK_3;
+ int i, min_idx = LP87565_BUCK_0, max_idx = LP87565_BUCK_3;
platform_set_drvdata(pdev, lp87565);
diff --git a/drivers/regulator/ltc3589.c b/drivers/regulator/ltc3589.c
index 63f724f260ef..9a037fdc5fc5 100644
--- a/drivers/regulator/ltc3589.c
+++ b/drivers/regulator/ltc3589.c
@@ -1,21 +1,9 @@
-/*
- * Linear Technology LTC3589,LTC3589-1 regulator support
- *
- * Copyright (c) 2014 Philipp Zabel <p.zabel@pengutronix.de>, Pengutronix
- *
- * See file CREDITS for list of people who contributed to this
- * project.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Linear Technology LTC3589,LTC3589-1 regulator support
+//
+// Copyright (c) 2014 Philipp Zabel <p.zabel@pengutronix.de>, Pengutronix
+
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -84,19 +72,11 @@ enum ltc3589_reg {
LTC3589_NUM_REGULATORS,
};
-struct ltc3589_regulator {
- struct regulator_desc desc;
-
- /* External feedback voltage divider */
- unsigned int r1;
- unsigned int r2;
-};
-
struct ltc3589 {
struct regmap *regmap;
struct device *dev;
enum ltc3589_variant variant;
- struct ltc3589_regulator regulator_descs[LTC3589_NUM_REGULATORS];
+ struct regulator_desc regulator_descs[LTC3589_NUM_REGULATORS];
struct regulator_dev *regulators[LTC3589_NUM_REGULATORS];
};
@@ -196,131 +176,90 @@ static const struct regulator_ops ltc3589_table_regulator_ops = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
};
+static inline unsigned int ltc3589_scale(unsigned int uV, u32 r1, u32 r2)
+{
+ uint64_t tmp;
-#define LTC3589_REG(_name, _ops, en_bit, dtv1_reg, dtv_mask, go_bit) \
- [LTC3589_ ## _name] = { \
- .desc = { \
- .name = #_name, \
- .n_voltages = (dtv_mask) + 1, \
- .min_uV = (go_bit) ? 362500 : 0, \
- .uV_step = (go_bit) ? 12500 : 0, \
- .ramp_delay = (go_bit) ? 1750 : 0, \
- .fixed_uV = (dtv_mask) ? 0 : 800000, \
- .ops = &ltc3589_ ## _ops ## _regulator_ops, \
- .type = REGULATOR_VOLTAGE, \
- .id = LTC3589_ ## _name, \
- .owner = THIS_MODULE, \
- .vsel_reg = (dtv1_reg), \
- .vsel_mask = (dtv_mask), \
- .apply_reg = (go_bit) ? LTC3589_VCCR : 0, \
- .apply_bit = (go_bit), \
- .enable_reg = (en_bit) ? LTC3589_OVEN : 0, \
- .enable_mask = (en_bit), \
- }, \
- }
-
-#define LTC3589_LINEAR_REG(_name, _dtv1) \
- LTC3589_REG(_name, linear, LTC3589_OVEN_ ## _name, \
- LTC3589_ ## _dtv1, 0x1f, \
- LTC3589_VCCR_ ## _name ## _GO)
-
-#define LTC3589_FIXED_REG(_name) \
- LTC3589_REG(_name, fixed, LTC3589_OVEN_ ## _name, 0, 0, 0)
-
-static struct ltc3589_regulator ltc3589_regulators[LTC3589_NUM_REGULATORS] = {
- LTC3589_LINEAR_REG(SW1, B1DTV1),
- LTC3589_LINEAR_REG(SW2, B2DTV1),
- LTC3589_LINEAR_REG(SW3, B3DTV1),
- LTC3589_FIXED_REG(BB_OUT),
- LTC3589_REG(LDO1, fixed_standby, 0, 0, 0, 0),
- LTC3589_LINEAR_REG(LDO2, L2DTV1),
- LTC3589_FIXED_REG(LDO3),
- LTC3589_REG(LDO4, table, LTC3589_OVEN_LDO4, LTC3589_L2DTV2, 0x60, 0),
-};
+ if (uV == 0)
+ return 0;
-#ifdef CONFIG_OF
-static struct of_regulator_match ltc3589_matches[LTC3589_NUM_REGULATORS] = {
- { .name = "sw1", },
- { .name = "sw2", },
- { .name = "sw3", },
- { .name = "bb-out", },
- { .name = "ldo1", }, /* standby */
- { .name = "ldo2", },
- { .name = "ldo3", },
- { .name = "ldo4", },
-};
+ tmp = (uint64_t)uV * r1;
+ do_div(tmp, r2);
+ return uV + (unsigned int)tmp;
+}
-static int ltc3589_parse_regulators_dt(struct ltc3589 *ltc3589)
+static int ltc3589_of_parse_cb(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *config)
{
- struct device *dev = ltc3589->dev;
- struct device_node *node;
- int i, ret;
+ struct ltc3589 *ltc3589 = config->driver_data;
+ struct regulator_desc *rdesc = &ltc3589->regulator_descs[desc->id];
+ u32 r[2];
+ int ret;
- node = of_get_child_by_name(dev->of_node, "regulators");
- if (!node) {
- dev_err(dev, "regulators node not found\n");
- return -EINVAL;
- }
+ /* Parse feedback voltage dividers. LDO3 and LDO4 don't have them */
+ if (desc->id >= LTC3589_LDO3)
+ return 0;
- ret = of_regulator_match(dev, node, ltc3589_matches,
- ARRAY_SIZE(ltc3589_matches));
- of_node_put(node);
- if (ret < 0) {
- dev_err(dev, "Error parsing regulator init data: %d\n", ret);
- return ret;
- }
- if (ret != LTC3589_NUM_REGULATORS) {
- dev_err(dev, "Only %d regulators described in device tree\n",
+ ret = of_property_read_u32_array(np, "lltc,fb-voltage-divider", r, 2);
+ if (ret) {
+ dev_err(ltc3589->dev, "Failed to parse voltage divider: %d\n",
ret);
- return -EINVAL;
+ return ret;
}
- /* Parse feedback voltage dividers. LDO3 and LDO4 don't have them */
- for (i = 0; i < LTC3589_LDO3; i++) {
- struct ltc3589_regulator *desc = &ltc3589->regulator_descs[i];
- struct device_node *np = ltc3589_matches[i].of_node;
- u32 vdiv[2];
-
- ret = of_property_read_u32_array(np, "lltc,fb-voltage-divider",
- vdiv, 2);
- if (ret) {
- dev_err(dev, "Failed to parse voltage divider: %d\n",
- ret);
- return ret;
- }
+ if (!r[0] || !r[1])
+ return 0;
- desc->r1 = vdiv[0];
- desc->r2 = vdiv[1];
- }
+ rdesc->min_uV = ltc3589_scale(desc->min_uV, r[0], r[1]);
+ rdesc->uV_step = ltc3589_scale(desc->uV_step, r[0], r[1]);
+ rdesc->fixed_uV = ltc3589_scale(desc->fixed_uV, r[0], r[1]);
return 0;
}
-static inline struct regulator_init_data *match_init_data(int index)
-{
- return ltc3589_matches[index].init_data;
-}
-
-static inline struct device_node *match_of_node(int index)
-{
- return ltc3589_matches[index].of_node;
-}
-#else
-static inline int ltc3589_parse_regulators_dt(struct ltc3589 *ltc3589)
-{
- return 0;
-}
+#define LTC3589_REG(_name, _of_name, _ops, en_bit, dtv1_reg, dtv_mask, go_bit)\
+ [LTC3589_ ## _name] = { \
+ .name = #_name, \
+ .of_match = of_match_ptr(#_of_name), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .of_parse_cb = ltc3589_of_parse_cb, \
+ .n_voltages = (dtv_mask) + 1, \
+ .min_uV = (go_bit) ? 362500 : 0, \
+ .uV_step = (go_bit) ? 12500 : 0, \
+ .ramp_delay = (go_bit) ? 1750 : 0, \
+ .fixed_uV = (dtv_mask) ? 0 : 800000, \
+ .ops = &ltc3589_ ## _ops ## _regulator_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = LTC3589_ ## _name, \
+ .owner = THIS_MODULE, \
+ .vsel_reg = (dtv1_reg), \
+ .vsel_mask = (dtv_mask), \
+ .apply_reg = (go_bit) ? LTC3589_VCCR : 0, \
+ .apply_bit = (go_bit), \
+ .enable_reg = (en_bit) ? LTC3589_OVEN : 0, \
+ .enable_mask = (en_bit), \
+ }
-static inline struct regulator_init_data *match_init_data(int index)
-{
- return NULL;
-}
+#define LTC3589_LINEAR_REG(_name, _of_name, _dtv1) \
+ LTC3589_REG(_name, _of_name, linear, LTC3589_OVEN_ ## _name, \
+ LTC3589_ ## _dtv1, 0x1f, \
+ LTC3589_VCCR_ ## _name ## _GO)
-static inline struct device_node *match_of_node(int index)
-{
- return NULL;
-}
-#endif
+#define LTC3589_FIXED_REG(_name, _of_name) \
+ LTC3589_REG(_name, _of_name, fixed, LTC3589_OVEN_ ## _name, 0, 0, 0)
+
+static const struct regulator_desc ltc3589_regulators[] = {
+ LTC3589_LINEAR_REG(SW1, sw1, B1DTV1),
+ LTC3589_LINEAR_REG(SW2, sw2, B2DTV1),
+ LTC3589_LINEAR_REG(SW3, sw3, B3DTV1),
+ LTC3589_FIXED_REG(BB_OUT, bb-out),
+ LTC3589_REG(LDO1, ldo1, fixed_standby, 0, 0, 0, 0),
+ LTC3589_LINEAR_REG(LDO2, ldo2, L2DTV1),
+ LTC3589_FIXED_REG(LDO3, ldo3),
+ LTC3589_REG(LDO4, ldo4, table, LTC3589_OVEN_LDO4, LTC3589_L2DTV2,
+ 0x60, 0),
+};
static bool ltc3589_writeable_reg(struct device *dev, unsigned int reg)
{
@@ -409,7 +348,6 @@ static const struct regmap_config ltc3589_regmap_config = {
.cache_type = REGCACHE_RBTREE,
};
-
static irqreturn_t ltc3589_isr(int irq, void *dev_id)
{
struct ltc3589 *ltc3589 = dev_id;
@@ -419,16 +357,22 @@ static irqreturn_t ltc3589_isr(int irq, void *dev_id)
if (irqstat & LTC3589_IRQSTAT_THERMAL_WARN) {
event = REGULATOR_EVENT_OVER_TEMP;
- for (i = 0; i < LTC3589_NUM_REGULATORS; i++)
+ for (i = 0; i < LTC3589_NUM_REGULATORS; i++) {
+ regulator_lock(ltc3589->regulators[i]);
regulator_notifier_call_chain(ltc3589->regulators[i],
event, NULL);
+ regulator_unlock(ltc3589->regulators[i]);
+ }
}
if (irqstat & LTC3589_IRQSTAT_UNDERVOLT_WARN) {
event = REGULATOR_EVENT_UNDER_VOLTAGE;
- for (i = 0; i < LTC3589_NUM_REGULATORS; i++)
+ for (i = 0; i < LTC3589_NUM_REGULATORS; i++) {
+ regulator_lock(ltc3589->regulators[i]);
regulator_notifier_call_chain(ltc3589->regulators[i],
event, NULL);
+ regulator_unlock(ltc3589->regulators[i]);
+ }
}
/* Clear warning condition */
@@ -437,33 +381,11 @@ static irqreturn_t ltc3589_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static inline unsigned int ltc3589_scale(unsigned int uV, u32 r1, u32 r2)
-{
- uint64_t tmp;
- if (uV == 0)
- return 0;
- tmp = (uint64_t)uV * r1;
- do_div(tmp, r2);
- return uV + (unsigned int)tmp;
-}
-
-static void ltc3589_apply_fb_voltage_divider(struct ltc3589_regulator *rdesc)
-{
- struct regulator_desc *desc = &rdesc->desc;
-
- if (!rdesc->r1 || !rdesc->r2)
- return;
-
- desc->min_uV = ltc3589_scale(desc->min_uV, rdesc->r1, rdesc->r2);
- desc->uV_step = ltc3589_scale(desc->uV_step, rdesc->r1, rdesc->r2);
- desc->fixed_uV = ltc3589_scale(desc->fixed_uV, rdesc->r1, rdesc->r2);
-}
-
static int ltc3589_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
- struct ltc3589_regulator *descs;
+ struct regulator_desc *descs;
struct ltc3589 *ltc3589;
int i, ret;
@@ -482,11 +404,11 @@ static int ltc3589_probe(struct i2c_client *client,
descs = ltc3589->regulator_descs;
memcpy(descs, ltc3589_regulators, sizeof(ltc3589_regulators));
if (ltc3589->variant == LTC3589) {
- descs[LTC3589_LDO3].desc.fixed_uV = 1800000;
- descs[LTC3589_LDO4].desc.volt_table = ltc3589_ldo4;
+ descs[LTC3589_LDO3].fixed_uV = 1800000;
+ descs[LTC3589_LDO4].volt_table = ltc3589_ldo4;
} else {
- descs[LTC3589_LDO3].desc.fixed_uV = 2800000;
- descs[LTC3589_LDO4].desc.volt_table = ltc3589_12_ldo4;
+ descs[LTC3589_LDO3].fixed_uV = 2800000;
+ descs[LTC3589_LDO4].volt_table = ltc3589_12_ldo4;
}
ltc3589->regmap = devm_regmap_init_i2c(client, &ltc3589_regmap_config);
@@ -496,25 +418,12 @@ static int ltc3589_probe(struct i2c_client *client,
return ret;
}
- ret = ltc3589_parse_regulators_dt(ltc3589);
- if (ret)
- return ret;
-
for (i = 0; i < LTC3589_NUM_REGULATORS; i++) {
- struct ltc3589_regulator *rdesc = &ltc3589->regulator_descs[i];
- struct regulator_desc *desc = &rdesc->desc;
- struct regulator_init_data *init_data;
+ struct regulator_desc *desc = &ltc3589->regulator_descs[i];
struct regulator_config config = { };
- init_data = match_init_data(i);
-
- if (i < LTC3589_LDO3)
- ltc3589_apply_fb_voltage_divider(rdesc);
-
config.dev = dev;
- config.init_data = init_data;
config.driver_data = ltc3589;
- config.of_node = match_of_node(i);
ltc3589->regulators[i] = devm_regulator_register(dev, desc,
&config);
diff --git a/drivers/regulator/ltc3676.c b/drivers/regulator/ltc3676.c
index e6d66e492b85..4be90c78c720 100644
--- a/drivers/regulator/ltc3676.c
+++ b/drivers/regulator/ltc3676.c
@@ -285,17 +285,23 @@ static irqreturn_t ltc3676_isr(int irq, void *dev_id)
if (irqstat & LTC3676_IRQSTAT_THERMAL_WARN) {
dev_warn(dev, "Over-temperature Warning\n");
event = REGULATOR_EVENT_OVER_TEMP;
- for (i = 0; i < LTC3676_NUM_REGULATORS; i++)
+ for (i = 0; i < LTC3676_NUM_REGULATORS; i++) {
+ regulator_lock(ltc3676->regulators[i]);
regulator_notifier_call_chain(ltc3676->regulators[i],
event, NULL);
+ regulator_unlock(ltc3676->regulators[i]);
+ }
}
if (irqstat & LTC3676_IRQSTAT_UNDERVOLT_WARN) {
dev_info(dev, "Undervoltage Warning\n");
event = REGULATOR_EVENT_UNDER_VOLTAGE;
- for (i = 0; i < LTC3676_NUM_REGULATORS; i++)
+ for (i = 0; i < LTC3676_NUM_REGULATORS; i++) {
+ regulator_lock(ltc3676->regulators[i]);
regulator_notifier_call_chain(ltc3676->regulators[i],
event, NULL);
+ regulator_unlock(ltc3676->regulators[i]);
+ }
}
/* Clear warning condition */
diff --git a/drivers/regulator/max14577-regulator.c b/drivers/regulator/max14577-regulator.c
index 85a88a9e4d42..07a150c9bbf2 100644
--- a/drivers/regulator/max14577-regulator.c
+++ b/drivers/regulator/max14577-regulator.c
@@ -155,58 +155,6 @@ static const struct regulator_desc max77836_supported_regulators[] = {
[MAX77836_LDO2] = MAX77836_LDO_REG(2),
};
-#ifdef CONFIG_OF
-static struct of_regulator_match max14577_regulator_matches[] = {
- { .name = "SAFEOUT", },
- { .name = "CHARGER", },
-};
-
-static struct of_regulator_match max77836_regulator_matches[] = {
- { .name = "SAFEOUT", },
- { .name = "CHARGER", },
- { .name = "LDO1", },
- { .name = "LDO2", },
-};
-
-static inline struct regulator_init_data *match_init_data(int index,
- enum maxim_device_type dev_type)
-{
- switch (dev_type) {
- case MAXIM_DEVICE_TYPE_MAX77836:
- return max77836_regulator_matches[index].init_data;
-
- case MAXIM_DEVICE_TYPE_MAX14577:
- default:
- return max14577_regulator_matches[index].init_data;
- }
-}
-
-static inline struct device_node *match_of_node(int index,
- enum maxim_device_type dev_type)
-{
- switch (dev_type) {
- case MAXIM_DEVICE_TYPE_MAX77836:
- return max77836_regulator_matches[index].of_node;
-
- case MAXIM_DEVICE_TYPE_MAX14577:
- default:
- return max14577_regulator_matches[index].of_node;
- }
-}
-#else /* CONFIG_OF */
-static inline struct regulator_init_data *match_init_data(int index,
- enum maxim_device_type dev_type)
-{
- return NULL;
-}
-
-static inline struct device_node *match_of_node(int index,
- enum maxim_device_type dev_type)
-{
- return NULL;
-}
-#endif /* CONFIG_OF */
-
/**
* Registers for regulators of max77836 use different I2C slave addresses so
* different regmaps must be used for them.
@@ -265,9 +213,6 @@ static int max14577_regulator_probe(struct platform_device *pdev)
if (pdata && pdata->regulators) {
config.init_data = pdata->regulators[i].initdata;
config.of_node = pdata->regulators[i].of_node;
- } else {
- config.init_data = match_init_data(i, dev_type);
- config.of_node = match_of_node(i, dev_type);
}
config.regmap = max14577_get_regmap(max14577,
supported_regulators[i].id);
diff --git a/drivers/regulator/max77620-regulator.c b/drivers/regulator/max77620-regulator.c
index 1607ac673e44..0ad91a7f9cb9 100644
--- a/drivers/regulator/max77620-regulator.c
+++ b/drivers/regulator/max77620-regulator.c
@@ -803,7 +803,7 @@ static int max77620_regulator_probe(struct platform_device *pdev)
continue;
rdesc = &rinfo[id].desc;
- pmic->rinfo[id] = &max77620_regs_info[id];
+ pmic->rinfo[id] = &rinfo[id];
pmic->enable_power_mode[id] = MAX77620_POWER_MODE_NORMAL;
pmic->reg_pdata[id].active_fps_src = -1;
pmic->reg_pdata[id].active_fps_pd_slot = -1;
diff --git a/drivers/regulator/max77650-regulator.c b/drivers/regulator/max77650-regulator.c
index 31ebf34b01ec..5c4f86c98510 100644
--- a/drivers/regulator/max77650-regulator.c
+++ b/drivers/regulator/max77650-regulator.c
@@ -41,7 +41,7 @@ struct max77650_regulator_desc {
unsigned int regB;
};
-static const u32 max77651_sbb1_regulator_volt_table[] = {
+static const unsigned int max77651_sbb1_regulator_volt_table[] = {
2400000, 3200000, 4000000, 4800000,
2450000, 3250000, 4050000, 4850000,
2500000, 3300000, 4100000, 4900000,
diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c
index 39b63ddefeb2..aed6727982cd 100644
--- a/drivers/regulator/max8925-regulator.c
+++ b/drivers/regulator/max8925-regulator.c
@@ -159,6 +159,8 @@ static const struct regulator_ops max8925_regulator_ldo_ops = {
{ \
.desc = { \
.name = "SDV" #_id, \
+ .of_match = of_match_ptr("SDV" #_id), \
+ .regulators_node = of_match_ptr("regulators"), \
.ops = &max8925_regulator_sdv_ops, \
.type = REGULATOR_VOLTAGE, \
.id = MAX8925_ID_SD##_id, \
@@ -175,6 +177,8 @@ static const struct regulator_ops max8925_regulator_ldo_ops = {
{ \
.desc = { \
.name = "LDO" #_id, \
+ .of_match = of_match_ptr("LDO" #_id), \
+ .regulators_node = of_match_ptr("regulators"), \
.ops = &max8925_regulator_ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.id = MAX8925_ID_LDO##_id, \
@@ -187,34 +191,6 @@ static const struct regulator_ops max8925_regulator_ldo_ops = {
.enable_reg = MAX8925_LDOCTL##_id, \
}
-#ifdef CONFIG_OF
-static struct of_regulator_match max8925_regulator_matches[] = {
- { .name = "SDV1",},
- { .name = "SDV2",},
- { .name = "SDV3",},
- { .name = "LDO1",},
- { .name = "LDO2",},
- { .name = "LDO3",},
- { .name = "LDO4",},
- { .name = "LDO5",},
- { .name = "LDO6",},
- { .name = "LDO7",},
- { .name = "LDO8",},
- { .name = "LDO9",},
- { .name = "LDO10",},
- { .name = "LDO11",},
- { .name = "LDO12",},
- { .name = "LDO13",},
- { .name = "LDO14",},
- { .name = "LDO15",},
- { .name = "LDO16",},
- { .name = "LDO17",},
- { .name = "LDO18",},
- { .name = "LDO19",},
- { .name = "LDO20",},
-};
-#endif
-
static struct max8925_regulator_info max8925_regulator_info[] = {
MAX8925_SDV(1, 637.5, 1425, 12.5),
MAX8925_SDV(2, 650, 2225, 25),
@@ -242,37 +218,6 @@ static struct max8925_regulator_info max8925_regulator_info[] = {
MAX8925_LDO(20, 750, 3900, 50),
};
-#ifdef CONFIG_OF
-static int max8925_regulator_dt_init(struct platform_device *pdev,
- struct regulator_config *config,
- int ridx)
-{
- struct device_node *nproot, *np;
- int rcount;
-
- nproot = pdev->dev.parent->of_node;
- if (!nproot)
- return -ENODEV;
- np = of_get_child_by_name(nproot, "regulators");
- if (!np) {
- dev_err(&pdev->dev, "failed to find regulators node\n");
- return -ENODEV;
- }
-
- rcount = of_regulator_match(&pdev->dev, np,
- &max8925_regulator_matches[ridx], 1);
- of_node_put(np);
- if (rcount < 0)
- return rcount;
- config->init_data = max8925_regulator_matches[ridx].init_data;
- config->of_node = max8925_regulator_matches[ridx].of_node;
-
- return 0;
-}
-#else
-#define max8925_regulator_dt_init(x, y, z) (-1)
-#endif
-
static int max8925_regulator_probe(struct platform_device *pdev)
{
struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
@@ -281,7 +226,7 @@ static int max8925_regulator_probe(struct platform_device *pdev)
struct max8925_regulator_info *ri;
struct resource *res;
struct regulator_dev *rdev;
- int i, regulator_idx;
+ int i;
res = platform_get_resource(pdev, IORESOURCE_REG, 0);
if (!res) {
@@ -290,10 +235,8 @@ static int max8925_regulator_probe(struct platform_device *pdev)
}
for (i = 0; i < ARRAY_SIZE(max8925_regulator_info); i++) {
ri = &max8925_regulator_info[i];
- if (ri->vol_reg == res->start) {
- regulator_idx = i;
+ if (ri->vol_reg == res->start)
break;
- }
}
if (i == ARRAY_SIZE(max8925_regulator_info)) {
@@ -303,12 +246,11 @@ static int max8925_regulator_probe(struct platform_device *pdev)
}
ri->i2c = chip->i2c;
- config.dev = &pdev->dev;
+ config.dev = chip->dev;
config.driver_data = ri;
- if (max8925_regulator_dt_init(pdev, &config, regulator_idx))
- if (pdata)
- config.init_data = pdata;
+ if (pdata)
+ config.init_data = pdata;
rdev = devm_regulator_register(&pdev->dev, &ri->desc, &config);
if (IS_ERR(rdev)) {
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index 271bb736f3f5..60599c3bb845 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -33,72 +33,6 @@ struct max8998_data {
unsigned int buck2_idx;
};
-struct voltage_map_desc {
- int min;
- int max;
- int step;
-};
-
-/* Voltage maps in uV*/
-static const struct voltage_map_desc ldo23_voltage_map_desc = {
- .min = 800000, .step = 50000, .max = 1300000,
-};
-static const struct voltage_map_desc ldo456711_voltage_map_desc = {
- .min = 1600000, .step = 100000, .max = 3600000,
-};
-static const struct voltage_map_desc ldo8_voltage_map_desc = {
- .min = 3000000, .step = 100000, .max = 3600000,
-};
-static const struct voltage_map_desc ldo9_voltage_map_desc = {
- .min = 2800000, .step = 100000, .max = 3100000,
-};
-static const struct voltage_map_desc ldo10_voltage_map_desc = {
- .min = 950000, .step = 50000, .max = 1300000,
-};
-static const struct voltage_map_desc ldo1213_voltage_map_desc = {
- .min = 800000, .step = 100000, .max = 3300000,
-};
-static const struct voltage_map_desc ldo1415_voltage_map_desc = {
- .min = 1200000, .step = 100000, .max = 3300000,
-};
-static const struct voltage_map_desc ldo1617_voltage_map_desc = {
- .min = 1600000, .step = 100000, .max = 3600000,
-};
-static const struct voltage_map_desc buck12_voltage_map_desc = {
- .min = 750000, .step = 25000, .max = 1525000,
-};
-static const struct voltage_map_desc buck3_voltage_map_desc = {
- .min = 1600000, .step = 100000, .max = 3600000,
-};
-static const struct voltage_map_desc buck4_voltage_map_desc = {
- .min = 800000, .step = 100000, .max = 2300000,
-};
-
-static const struct voltage_map_desc *ldo_voltage_map[] = {
- NULL,
- NULL,
- &ldo23_voltage_map_desc, /* LDO2 */
- &ldo23_voltage_map_desc, /* LDO3 */
- &ldo456711_voltage_map_desc, /* LDO4 */
- &ldo456711_voltage_map_desc, /* LDO5 */
- &ldo456711_voltage_map_desc, /* LDO6 */
- &ldo456711_voltage_map_desc, /* LDO7 */
- &ldo8_voltage_map_desc, /* LDO8 */
- &ldo9_voltage_map_desc, /* LDO9 */
- &ldo10_voltage_map_desc, /* LDO10 */
- &ldo456711_voltage_map_desc, /* LDO11 */
- &ldo1213_voltage_map_desc, /* LDO12 */
- &ldo1213_voltage_map_desc, /* LDO13 */
- &ldo1415_voltage_map_desc, /* LDO14 */
- &ldo1415_voltage_map_desc, /* LDO15 */
- &ldo1617_voltage_map_desc, /* LDO16 */
- &ldo1617_voltage_map_desc, /* LDO17 */
- &buck12_voltage_map_desc, /* BUCK1 */
- &buck12_voltage_map_desc, /* BUCK2 */
- &buck3_voltage_map_desc, /* BUCK3 */
- &buck4_voltage_map_desc, /* BUCK4 */
-};
-
static int max8998_get_enable_register(struct regulator_dev *rdev,
int *reg, int *shift)
{
@@ -400,7 +334,6 @@ static int max8998_set_voltage_buck_time_sel(struct regulator_dev *rdev,
{
struct max8998_data *max8998 = rdev_get_drvdata(rdev);
struct i2c_client *i2c = max8998->iodev->i2c;
- const struct voltage_map_desc *desc;
int buck = rdev_get_id(rdev);
u8 val = 0;
int difference, ret;
@@ -408,8 +341,6 @@ static int max8998_set_voltage_buck_time_sel(struct regulator_dev *rdev,
if (buck < MAX8998_BUCK1 || buck > MAX8998_BUCK4)
return -EINVAL;
- desc = ldo_voltage_map[buck];
-
/* Voltage stabilization */
ret = max8998_read_reg(i2c, MAX8998_REG_ONOFF4, &val);
if (ret)
@@ -420,14 +351,14 @@ static int max8998_set_voltage_buck_time_sel(struct regulator_dev *rdev,
if (max8998->iodev->type == TYPE_MAX8998 && !(val & MAX8998_ENRAMP))
return 0;
- difference = (new_selector - old_selector) * desc->step / 1000;
+ difference = (new_selector - old_selector) * rdev->desc->uV_step / 1000;
if (difference > 0)
return DIV_ROUND_UP(difference, (val & 0x0f) + 1);
return 0;
}
-static struct regulator_ops max8998_ldo_ops = {
+static const struct regulator_ops max8998_ldo_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = max8998_ldo_is_enabled,
@@ -437,7 +368,7 @@ static struct regulator_ops max8998_ldo_ops = {
.set_voltage_sel = max8998_set_voltage_ldo_sel,
};
-static struct regulator_ops max8998_buck_ops = {
+static const struct regulator_ops max8998_buck_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = max8998_ldo_is_enabled,
@@ -448,164 +379,59 @@ static struct regulator_ops max8998_buck_ops = {
.set_voltage_time_sel = max8998_set_voltage_buck_time_sel,
};
-static struct regulator_ops max8998_others_ops = {
+static const struct regulator_ops max8998_others_ops = {
.is_enabled = max8998_ldo_is_enabled,
.enable = max8998_ldo_enable,
.disable = max8998_ldo_disable,
};
-static struct regulator_desc regulators[] = {
- {
- .name = "LDO2",
- .id = MAX8998_LDO2,
- .ops = &max8998_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO3",
- .id = MAX8998_LDO3,
- .ops = &max8998_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO4",
- .id = MAX8998_LDO4,
- .ops = &max8998_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO5",
- .id = MAX8998_LDO5,
- .ops = &max8998_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO6",
- .id = MAX8998_LDO6,
- .ops = &max8998_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO7",
- .id = MAX8998_LDO7,
- .ops = &max8998_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO8",
- .id = MAX8998_LDO8,
- .ops = &max8998_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO9",
- .id = MAX8998_LDO9,
- .ops = &max8998_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO10",
- .id = MAX8998_LDO10,
- .ops = &max8998_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO11",
- .id = MAX8998_LDO11,
- .ops = &max8998_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO12",
- .id = MAX8998_LDO12,
- .ops = &max8998_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO13",
- .id = MAX8998_LDO13,
- .ops = &max8998_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO14",
- .id = MAX8998_LDO14,
- .ops = &max8998_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO15",
- .id = MAX8998_LDO15,
- .ops = &max8998_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO16",
- .id = MAX8998_LDO16,
- .ops = &max8998_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO17",
- .id = MAX8998_LDO17,
- .ops = &max8998_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "BUCK1",
- .id = MAX8998_BUCK1,
- .ops = &max8998_buck_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "BUCK2",
- .id = MAX8998_BUCK2,
- .ops = &max8998_buck_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "BUCK3",
- .id = MAX8998_BUCK3,
- .ops = &max8998_buck_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "BUCK4",
- .id = MAX8998_BUCK4,
- .ops = &max8998_buck_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "EN32KHz-AP",
- .id = MAX8998_EN32KHZ_AP,
- .ops = &max8998_others_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "EN32KHz-CP",
- .id = MAX8998_EN32KHZ_CP,
- .ops = &max8998_others_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "ENVICHG",
- .id = MAX8998_ENVICHG,
- .ops = &max8998_others_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "ESAFEOUT1",
- .id = MAX8998_ESAFEOUT1,
- .ops = &max8998_others_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
- }, {
- .name = "ESAFEOUT2",
- .id = MAX8998_ESAFEOUT2,
- .ops = &max8998_others_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
+#define MAX8998_LINEAR_REG(_name, _ops, _min, _step, _max) \
+ { \
+ .name = #_name, \
+ .id = MAX8998_##_name, \
+ .ops = _ops, \
+ .min_uV = (_min), \
+ .uV_step = (_step), \
+ .n_voltages = ((_max) - (_min)) / (_step) + 1, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
}
+
+#define MAX8998_OTHERS_REG(_name, _id) \
+ { \
+ .name = #_name, \
+ .id = _id, \
+ .ops = &max8998_others_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }
+
+static const struct regulator_desc regulators[] = {
+ MAX8998_LINEAR_REG(LDO2, &max8998_ldo_ops, 800000, 50000, 1300000),
+ MAX8998_LINEAR_REG(LDO3, &max8998_ldo_ops, 800000, 50000, 1300000),
+ MAX8998_LINEAR_REG(LDO4, &max8998_ldo_ops, 1600000, 100000, 3600000),
+ MAX8998_LINEAR_REG(LDO5, &max8998_ldo_ops, 1600000, 100000, 3600000),
+ MAX8998_LINEAR_REG(LDO6, &max8998_ldo_ops, 1600000, 100000, 3600000),
+ MAX8998_LINEAR_REG(LDO7, &max8998_ldo_ops, 1600000, 100000, 3600000),
+ MAX8998_LINEAR_REG(LDO8, &max8998_ldo_ops, 3000000, 100000, 3600000),
+ MAX8998_LINEAR_REG(LDO9, &max8998_ldo_ops, 2800000, 100000, 3100000),
+ MAX8998_LINEAR_REG(LDO10, &max8998_ldo_ops, 950000, 50000, 1300000),
+ MAX8998_LINEAR_REG(LDO11, &max8998_ldo_ops, 1600000, 100000, 3600000),
+ MAX8998_LINEAR_REG(LDO12, &max8998_ldo_ops, 800000, 100000, 3300000),
+ MAX8998_LINEAR_REG(LDO13, &max8998_ldo_ops, 800000, 100000, 3300000),
+ MAX8998_LINEAR_REG(LDO14, &max8998_ldo_ops, 1200000, 100000, 3300000),
+ MAX8998_LINEAR_REG(LDO15, &max8998_ldo_ops, 1200000, 100000, 3300000),
+ MAX8998_LINEAR_REG(LDO16, &max8998_ldo_ops, 1600000, 100000, 3600000),
+ MAX8998_LINEAR_REG(LDO17, &max8998_ldo_ops, 1600000, 100000, 3600000),
+ MAX8998_LINEAR_REG(BUCK1, &max8998_buck_ops, 750000, 25000, 1525000),
+ MAX8998_LINEAR_REG(BUCK2, &max8998_buck_ops, 750000, 25000, 1525000),
+ MAX8998_LINEAR_REG(BUCK3, &max8998_buck_ops, 1600000, 100000, 3600000),
+ MAX8998_LINEAR_REG(BUCK4, &max8998_buck_ops, 800000, 100000, 2300000),
+ MAX8998_OTHERS_REG(EN32KHz-AP, MAX8998_EN32KHZ_AP),
+ MAX8998_OTHERS_REG(EN32KHz-CP, MAX8998_EN32KHZ_CP),
+ MAX8998_OTHERS_REG(ENVICHG, MAX8998_ENVICHG),
+ MAX8998_OTHERS_REG(ESAFEOUT1, MAX8998_ESAFEOUT1),
+ MAX8998_OTHERS_REG(ESAFEOUT2, MAX8998_ESAFEOUT2),
};
static int max8998_pmic_dt_parse_dvs_gpio(struct max8998_dev *iodev,
@@ -796,9 +622,11 @@ static int max8998_pmic_probe(struct platform_device *pdev)
/* Set predefined values for BUCK1 registers */
for (v = 0; v < ARRAY_SIZE(pdata->buck1_voltage); ++v) {
+ int index = MAX8998_BUCK1 - MAX8998_LDO2;
+
i = 0;
- while (buck12_voltage_map_desc.min +
- buck12_voltage_map_desc.step*i
+ while (regulators[index].min_uV +
+ regulators[index].uV_step * i
< pdata->buck1_voltage[v])
i++;
@@ -824,9 +652,11 @@ static int max8998_pmic_probe(struct platform_device *pdev)
/* Set predefined values for BUCK2 registers */
for (v = 0; v < ARRAY_SIZE(pdata->buck2_voltage); ++v) {
+ int index = MAX8998_BUCK2 - MAX8998_LDO2;
+
i = 0;
- while (buck12_voltage_map_desc.min +
- buck12_voltage_map_desc.step*i
+ while (regulators[index].min_uV +
+ regulators[index].uV_step * i
< pdata->buck2_voltage[v])
i++;
@@ -839,18 +669,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
}
for (i = 0; i < pdata->num_regulators; i++) {
- const struct voltage_map_desc *desc;
- int id = pdata->regulators[i].id;
- int index = id - MAX8998_LDO2;
-
- desc = ldo_voltage_map[id];
- if (desc && regulators[index].ops != &max8998_others_ops) {
- int count = (desc->max - desc->min) / desc->step + 1;
-
- regulators[index].n_voltages = count;
- regulators[index].min_uV = desc->min;
- regulators[index].uV_step = desc->step;
- }
+ int index = pdata->regulators[i].id - MAX8998_LDO2;
config.dev = max8998->dev;
config.of_node = pdata->regulators[i].reg_node;
@@ -867,7 +686,6 @@ static int max8998_pmic_probe(struct platform_device *pdev)
}
}
-
return 0;
}
diff --git a/drivers/regulator/mcp16502.c b/drivers/regulator/mcp16502.c
index 3a8004abe044..e5a02711cb46 100644
--- a/drivers/regulator/mcp16502.c
+++ b/drivers/regulator/mcp16502.c
@@ -119,8 +119,6 @@ enum {
* @lpm: LPM GPIO descriptor
*/
struct mcp16502 {
- struct regulator_dev *rdev[NUM_REGULATORS];
- struct regmap *rmap;
struct gpio_desc *lpm;
};
@@ -179,13 +177,12 @@ static unsigned int mcp16502_get_mode(struct regulator_dev *rdev)
{
unsigned int val;
int ret, reg;
- struct mcp16502 *mcp = rdev_get_drvdata(rdev);
reg = mcp16502_get_reg(rdev, MCP16502_OPMODE_ACTIVE);
if (reg < 0)
return reg;
- ret = regmap_read(mcp->rmap, reg, &val);
+ ret = regmap_read(rdev->regmap, reg, &val);
if (ret)
return ret;
@@ -211,7 +208,6 @@ static int _mcp16502_set_mode(struct regulator_dev *rdev, unsigned int mode,
{
int val;
int reg;
- struct mcp16502 *mcp = rdev_get_drvdata(rdev);
reg = mcp16502_get_reg(rdev, op_mode);
if (reg < 0)
@@ -228,7 +224,7 @@ static int _mcp16502_set_mode(struct regulator_dev *rdev, unsigned int mode,
return -EINVAL;
}
- reg = regmap_update_bits(mcp->rmap, reg, MCP16502_MODE, val);
+ reg = regmap_update_bits(rdev->regmap, reg, MCP16502_MODE, val);
return reg;
}
@@ -247,9 +243,8 @@ static int mcp16502_get_status(struct regulator_dev *rdev)
{
int ret;
unsigned int val;
- struct mcp16502 *mcp = rdev_get_drvdata(rdev);
- ret = regmap_read(mcp->rmap, MCP16502_STAT_BASE(rdev_get_id(rdev)),
+ ret = regmap_read(rdev->regmap, MCP16502_STAT_BASE(rdev_get_id(rdev)),
&val);
if (ret)
return ret;
@@ -290,7 +285,6 @@ static int mcp16502_suspend_get_target_reg(struct regulator_dev *rdev)
*/
static int mcp16502_set_suspend_voltage(struct regulator_dev *rdev, int uV)
{
- struct mcp16502 *mcp = rdev_get_drvdata(rdev);
int sel = regulator_map_voltage_linear_range(rdev, uV, uV);
int reg = mcp16502_suspend_get_target_reg(rdev);
@@ -300,7 +294,7 @@ static int mcp16502_set_suspend_voltage(struct regulator_dev *rdev, int uV)
if (reg < 0)
return reg;
- return regmap_update_bits(mcp->rmap, reg, MCP16502_VSEL, sel);
+ return regmap_update_bits(rdev->regmap, reg, MCP16502_VSEL, sel);
}
/*
@@ -328,13 +322,12 @@ static int mcp16502_set_suspend_mode(struct regulator_dev *rdev,
*/
static int mcp16502_set_suspend_enable(struct regulator_dev *rdev)
{
- struct mcp16502 *mcp = rdev_get_drvdata(rdev);
int reg = mcp16502_suspend_get_target_reg(rdev);
if (reg < 0)
return reg;
- return regmap_update_bits(mcp->rmap, reg, MCP16502_EN, MCP16502_EN);
+ return regmap_update_bits(rdev->regmap, reg, MCP16502_EN, MCP16502_EN);
}
/*
@@ -342,13 +335,12 @@ static int mcp16502_set_suspend_enable(struct regulator_dev *rdev)
*/
static int mcp16502_set_suspend_disable(struct regulator_dev *rdev)
{
- struct mcp16502 *mcp = rdev_get_drvdata(rdev);
int reg = mcp16502_suspend_get_target_reg(rdev);
if (reg < 0)
return reg;
- return regmap_update_bits(mcp->rmap, reg, MCP16502_EN, 0);
+ return regmap_update_bits(rdev->regmap, reg, MCP16502_EN, 0);
}
#endif /* CONFIG_SUSPEND */
@@ -435,36 +427,15 @@ static const struct regmap_config mcp16502_regmap_config = {
.wr_table = &mcp16502_yes_reg_table,
};
-/*
- * set_up_regulators() - initialize all regulators
- */
-static int setup_regulators(struct mcp16502 *mcp, struct device *dev,
- struct regulator_config config)
-{
- int i;
-
- for (i = 0; i < NUM_REGULATORS; i++) {
- mcp->rdev[i] = devm_regulator_register(dev,
- &mcp16502_desc[i],
- &config);
- if (IS_ERR(mcp->rdev[i])) {
- dev_err(dev,
- "failed to register %s regulator %ld\n",
- mcp16502_desc[i].name, PTR_ERR(mcp->rdev[i]));
- return PTR_ERR(mcp->rdev[i]);
- }
- }
-
- return 0;
-}
-
static int mcp16502_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct regulator_config config = { };
+ struct regulator_dev *rdev;
struct device *dev;
struct mcp16502 *mcp;
- int ret = 0;
+ struct regmap *rmap;
+ int i, ret;
dev = &client->dev;
config.dev = dev;
@@ -473,15 +444,15 @@ static int mcp16502_probe(struct i2c_client *client,
if (!mcp)
return -ENOMEM;
- mcp->rmap = devm_regmap_init_i2c(client, &mcp16502_regmap_config);
- if (IS_ERR(mcp->rmap)) {
- ret = PTR_ERR(mcp->rmap);
+ rmap = devm_regmap_init_i2c(client, &mcp16502_regmap_config);
+ if (IS_ERR(rmap)) {
+ ret = PTR_ERR(rmap);
dev_err(dev, "regmap init failed: %d\n", ret);
return ret;
}
i2c_set_clientdata(client, mcp);
- config.regmap = mcp->rmap;
+ config.regmap = rmap;
config.driver_data = mcp;
mcp->lpm = devm_gpiod_get(dev, "lpm", GPIOD_OUT_LOW);
@@ -490,9 +461,15 @@ static int mcp16502_probe(struct i2c_client *client,
return PTR_ERR(mcp->lpm);
}
- ret = setup_regulators(mcp, dev, config);
- if (ret != 0)
- return ret;
+ for (i = 0; i < NUM_REGULATORS; i++) {
+ rdev = devm_regulator_register(dev, &mcp16502_desc[i], &config);
+ if (IS_ERR(rdev)) {
+ dev_err(dev,
+ "failed to register %s regulator %ld\n",
+ mcp16502_desc[i].name, PTR_ERR(rdev));
+ return PTR_ERR(rdev);
+ }
+ }
mcp16502_gpio_set_mode(mcp, MCP16502_OPMODE_ACTIVE);
diff --git a/drivers/regulator/mt6311-regulator.c b/drivers/regulator/mt6311-regulator.c
index 01d69f43d2b0..af95449d3590 100644
--- a/drivers/regulator/mt6311-regulator.c
+++ b/drivers/regulator/mt6311-regulator.c
@@ -1,16 +1,7 @@
-/*
- * Copyright (c) 2015 MediaTek Inc.
- * Author: Henry Chen <henryc.chen@mediatek.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2015 MediaTek Inc.
+// Author: Henry Chen <henryc.chen@mediatek.com>
#include <linux/err.h>
#include <linux/gpio.h>
diff --git a/drivers/regulator/mt6311-regulator.h b/drivers/regulator/mt6311-regulator.h
index 5218db46a798..4904d6751714 100644
--- a/drivers/regulator/mt6311-regulator.h
+++ b/drivers/regulator/mt6311-regulator.h
@@ -1,15 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2015 MediaTek Inc.
* Author: Henry Chen <henryc.chen@mediatek.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __MT6311_REGULATOR_H__
diff --git a/drivers/regulator/mt6323-regulator.c b/drivers/regulator/mt6323-regulator.c
index b7b9670f0979..893ea190788a 100644
--- a/drivers/regulator/mt6323-regulator.c
+++ b/drivers/regulator/mt6323-regulator.c
@@ -1,11 +1,7 @@
-/*
- * Copyright (c) 2016 MediaTek Inc.
- * Author: Chen Zhong <chen.zhong@mediatek.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2016 MediaTek Inc.
+// Author: Chen Zhong <chen.zhong@mediatek.com>
#include <linux/module.h>
#include <linux/of.h>
@@ -118,43 +114,43 @@ static const struct regulator_linear_range buck_volt_range3[] = {
REGULATOR_LINEAR_RANGE(500000, 0, 0x3f, 50000),
};
-static const u32 ldo_volt_table1[] = {
+static const unsigned int ldo_volt_table1[] = {
3300000, 3400000, 3500000, 3600000,
};
-static const u32 ldo_volt_table2[] = {
+static const unsigned int ldo_volt_table2[] = {
1500000, 1800000, 2500000, 2800000,
};
-static const u32 ldo_volt_table3[] = {
+static const unsigned int ldo_volt_table3[] = {
1800000, 3300000,
};
-static const u32 ldo_volt_table4[] = {
+static const unsigned int ldo_volt_table4[] = {
3000000, 3300000,
};
-static const u32 ldo_volt_table5[] = {
+static const unsigned int ldo_volt_table5[] = {
1200000, 1300000, 1500000, 1800000, 2000000, 2800000, 3000000, 3300000,
};
-static const u32 ldo_volt_table6[] = {
+static const unsigned int ldo_volt_table6[] = {
1200000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 2000000,
};
-static const u32 ldo_volt_table7[] = {
+static const unsigned int ldo_volt_table7[] = {
1200000, 1300000, 1500000, 1800000,
};
-static const u32 ldo_volt_table8[] = {
+static const unsigned int ldo_volt_table8[] = {
1800000, 3000000,
};
-static const u32 ldo_volt_table9[] = {
+static const unsigned int ldo_volt_table9[] = {
1200000, 1350000, 1500000, 1800000,
};
-static const u32 ldo_volt_table10[] = {
+static const unsigned int ldo_volt_table10[] = {
1200000, 1300000, 1500000, 1800000,
};
diff --git a/drivers/regulator/mt6380-regulator.c b/drivers/regulator/mt6380-regulator.c
index 127dd720cbcc..b6aed090b5e0 100644
--- a/drivers/regulator/mt6380-regulator.c
+++ b/drivers/regulator/mt6380-regulator.c
@@ -1,16 +1,7 @@
-/*
- * Copyright (c) 2017 MediaTek Inc.
- * Author: Chenglin Xu <chenglin.xu@mediatek.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2017 MediaTek Inc.
+// Author: Chenglin Xu <chenglin.xu@mediatek.com>
#include <linux/module.h>
#include <linux/of.h>
@@ -173,19 +164,19 @@ static const struct regulator_linear_range buck_volt_range3[] = {
REGULATOR_LINEAR_RANGE(1200000, 0, 0x3c, 25000),
};
-static const u32 ldo_volt_table1[] = {
+static const unsigned int ldo_volt_table1[] = {
1400000, 1350000, 1300000, 1250000, 1200000, 1150000, 1100000, 1050000,
};
-static const u32 ldo_volt_table2[] = {
+static const unsigned int ldo_volt_table2[] = {
2200000, 3300000,
};
-static const u32 ldo_volt_table3[] = {
+static const unsigned int ldo_volt_table3[] = {
1240000, 1390000, 1540000, 1840000,
};
-static const u32 ldo_volt_table4[] = {
+static const unsigned int ldo_volt_table4[] = {
2200000, 3300000,
};
diff --git a/drivers/regulator/mt6397-regulator.c b/drivers/regulator/mt6397-regulator.c
index c6c6aa85e4e8..fd9ed864a0c1 100644
--- a/drivers/regulator/mt6397-regulator.c
+++ b/drivers/regulator/mt6397-regulator.c
@@ -1,16 +1,7 @@
-/*
- * Copyright (c) 2014 MediaTek Inc.
- * Author: Flora Fu <flora.fu@mediatek.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2014 MediaTek Inc.
+// Author: Flora Fu <flora.fu@mediatek.com>
#include <linux/module.h>
#include <linux/of.h>
@@ -123,35 +114,35 @@ static const struct regulator_linear_range buck_volt_range3[] = {
REGULATOR_LINEAR_RANGE(1500000, 0, 0x1f, 20000),
};
-static const u32 ldo_volt_table1[] = {
+static const unsigned int ldo_volt_table1[] = {
1500000, 1800000, 2500000, 2800000,
};
-static const u32 ldo_volt_table2[] = {
+static const unsigned int ldo_volt_table2[] = {
1800000, 3300000,
};
-static const u32 ldo_volt_table3[] = {
+static const unsigned int ldo_volt_table3[] = {
3000000, 3300000,
};
-static const u32 ldo_volt_table4[] = {
+static const unsigned int ldo_volt_table4[] = {
1220000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 3300000,
};
-static const u32 ldo_volt_table5[] = {
+static const unsigned int ldo_volt_table5[] = {
1200000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 3300000,
};
-static const u32 ldo_volt_table5_v2[] = {
+static const unsigned int ldo_volt_table5_v2[] = {
1200000, 1000000, 1500000, 1800000, 2500000, 2800000, 3000000, 3300000,
};
-static const u32 ldo_volt_table6[] = {
+static const unsigned int ldo_volt_table6[] = {
1200000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 2000000,
};
-static const u32 ldo_volt_table7[] = {
+static const unsigned int ldo_volt_table7[] = {
1300000, 1500000, 1800000, 2000000, 2500000, 2800000, 3000000, 3300000,
};
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 7b6bf3536271..6dca0ba044d8 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -371,8 +371,9 @@ int of_regulator_match(struct device *dev, struct device_node *node,
}
EXPORT_SYMBOL_GPL(of_regulator_match);
-struct device_node *regulator_of_get_init_node(struct device *dev,
- const struct regulator_desc *desc)
+static struct
+device_node *regulator_of_get_init_node(struct device *dev,
+ const struct regulator_desc *desc)
{
struct device_node *search, *child;
const char *name;
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 7fb9e8dd834e..f13c7c8b1061 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -991,9 +991,6 @@ static int palmas_ldo_registration(struct palmas_pmic *pmic,
return PTR_ERR(rdev);
}
- /* Save regulator for cleanup */
- pmic->rdev[id] = rdev;
-
/* Initialise sleep/init values from platform data */
if (pdata) {
reg_init = pdata->reg_init[id];
@@ -1101,9 +1098,6 @@ static int tps65917_ldo_registration(struct palmas_pmic *pmic,
return PTR_ERR(rdev);
}
- /* Save regulator for cleanup */
- pmic->rdev[id] = rdev;
-
/* Initialise sleep/init values from platform data */
if (pdata) {
reg_init = pdata->reg_init[id];
@@ -1288,9 +1282,6 @@ static int palmas_smps_registration(struct palmas_pmic *pmic,
pdev_name);
return PTR_ERR(rdev);
}
-
- /* Save regulator for cleanup */
- pmic->rdev[id] = rdev;
}
return 0;
@@ -1395,9 +1386,6 @@ static int tps65917_smps_registration(struct palmas_pmic *pmic,
pdev_name);
return PTR_ERR(rdev);
}
-
- /* Save regulator for cleanup */
- pmic->rdev[id] = rdev;
}
return 0;
diff --git a/drivers/regulator/pv88060-regulator.c b/drivers/regulator/pv88060-regulator.c
index 1600f9821891..3d3415839ba2 100644
--- a/drivers/regulator/pv88060-regulator.c
+++ b/drivers/regulator/pv88060-regulator.c
@@ -1,17 +1,7 @@
-/*
- * pv88060-regulator.c - Regulator device driver for PV88060
- * Copyright (C) 2015 Powerventure Semiconductor Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// pv88060-regulator.c - Regulator device driver for PV88060
+// Copyright (C) 2015 Powerventure Semiconductor Ltd.
#include <linux/err.h>
#include <linux/i2c.h>
@@ -244,9 +234,11 @@ static irqreturn_t pv88060_irq_handler(int irq, void *data)
if (reg_val & PV88060_E_VDD_FLT) {
for (i = 0; i < PV88060_MAX_REGULATORS; i++) {
if (chip->rdev[i] != NULL) {
+ regulator_lock(chip->rdev[i]);
regulator_notifier_call_chain(chip->rdev[i],
REGULATOR_EVENT_UNDER_VOLTAGE,
NULL);
+ regulator_unlock(chip->rdev[i]);
}
}
@@ -261,9 +253,11 @@ static irqreturn_t pv88060_irq_handler(int irq, void *data)
if (reg_val & PV88060_E_OVER_TEMP) {
for (i = 0; i < PV88060_MAX_REGULATORS; i++) {
if (chip->rdev[i] != NULL) {
+ regulator_lock(chip->rdev[i]);
regulator_notifier_call_chain(chip->rdev[i],
REGULATOR_EVENT_OVER_TEMP,
NULL);
+ regulator_unlock(chip->rdev[i]);
}
}
diff --git a/drivers/regulator/pv88060-regulator.h b/drivers/regulator/pv88060-regulator.h
index 02ca9203a172..d333dbf3be94 100644
--- a/drivers/regulator/pv88060-regulator.h
+++ b/drivers/regulator/pv88060-regulator.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* pv88060-regulator.h - Regulator definitions for PV88060
* Copyright (C) 2015 Powerventure Semiconductor Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __PV88060_REGISTERS_H__
diff --git a/drivers/regulator/pv88080-regulator.c b/drivers/regulator/pv88080-regulator.c
index bdddacdbeb99..a444f68af1a8 100644
--- a/drivers/regulator/pv88080-regulator.c
+++ b/drivers/regulator/pv88080-regulator.c
@@ -1,17 +1,7 @@
-/*
- * pv88080-regulator.c - Regulator device driver for PV88080
- * Copyright (C) 2016 Powerventure Semiconductor Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// pv88080-regulator.c - Regulator device driver for PV88080
+// Copyright (C) 2016 Powerventure Semiconductor Ltd.
#include <linux/err.h>
#include <linux/i2c.h>
@@ -345,9 +335,11 @@ static irqreturn_t pv88080_irq_handler(int irq, void *data)
if (reg_val & PV88080_E_VDD_FLT) {
for (i = 0; i < PV88080_MAX_REGULATORS; i++) {
if (chip->rdev[i] != NULL) {
+ regulator_lock(chip->rdev[i]);
regulator_notifier_call_chain(chip->rdev[i],
REGULATOR_EVENT_UNDER_VOLTAGE,
NULL);
+ regulator_unlock(chip->rdev[i]);
}
}
@@ -362,9 +354,11 @@ static irqreturn_t pv88080_irq_handler(int irq, void *data)
if (reg_val & PV88080_E_OVER_TEMP) {
for (i = 0; i < PV88080_MAX_REGULATORS; i++) {
if (chip->rdev[i] != NULL) {
+ regulator_lock(chip->rdev[i]);
regulator_notifier_call_chain(chip->rdev[i],
REGULATOR_EVENT_OVER_TEMP,
NULL);
+ regulator_unlock(chip->rdev[i]);
}
}
diff --git a/drivers/regulator/pv88080-regulator.h b/drivers/regulator/pv88080-regulator.h
index ae25ff360e3d..7d7f8f11a75a 100644
--- a/drivers/regulator/pv88080-regulator.h
+++ b/drivers/regulator/pv88080-regulator.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* pv88080-regulator.h - Regulator definitions for PV88080
* Copyright (C) 2016 Powerventure Semiconductor Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __PV88080_REGISTERS_H__
diff --git a/drivers/regulator/pv88090-regulator.c b/drivers/regulator/pv88090-regulator.c
index 6e97cc6df2ee..b1d0d97ae935 100644
--- a/drivers/regulator/pv88090-regulator.c
+++ b/drivers/regulator/pv88090-regulator.c
@@ -1,17 +1,7 @@
-/*
- * pv88090-regulator.c - Regulator device driver for PV88090
- * Copyright (C) 2015 Powerventure Semiconductor Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// pv88090-regulator.c - Regulator device driver for PV88090
+// Copyright (C) 2015 Powerventure Semiconductor Ltd.
#include <linux/err.h>
#include <linux/i2c.h>
@@ -237,9 +227,11 @@ static irqreturn_t pv88090_irq_handler(int irq, void *data)
if (reg_val & PV88090_E_VDD_FLT) {
for (i = 0; i < PV88090_MAX_REGULATORS; i++) {
if (chip->rdev[i] != NULL) {
+ regulator_lock(chip->rdev[i]);
regulator_notifier_call_chain(chip->rdev[i],
REGULATOR_EVENT_UNDER_VOLTAGE,
NULL);
+ regulator_unlock(chip->rdev[i]);
}
}
@@ -254,9 +246,11 @@ static irqreturn_t pv88090_irq_handler(int irq, void *data)
if (reg_val & PV88090_E_OVER_TEMP) {
for (i = 0; i < PV88090_MAX_REGULATORS; i++) {
if (chip->rdev[i] != NULL) {
+ regulator_lock(chip->rdev[i]);
regulator_notifier_call_chain(chip->rdev[i],
REGULATOR_EVENT_OVER_TEMP,
NULL);
+ regulator_unlock(chip->rdev[i]);
}
}
diff --git a/drivers/regulator/pv88090-regulator.h b/drivers/regulator/pv88090-regulator.h
index 62d9029277f4..f814ee52cff3 100644
--- a/drivers/regulator/pv88090-regulator.h
+++ b/drivers/regulator/pv88090-regulator.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* pv88090-regulator.h - Regulator definitions for PV88090
* Copyright (C) 2015 Powerventure Semiconductor Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __PV88090_REGISTERS_H__
diff --git a/drivers/regulator/rc5t583-regulator.c b/drivers/regulator/rc5t583-regulator.c
index 2ec51af43673..9446653e7a89 100644
--- a/drivers/regulator/rc5t583-regulator.c
+++ b/drivers/regulator/rc5t583-regulator.c
@@ -47,18 +47,13 @@ struct rc5t583_regulator_info {
struct regulator_desc desc;
};
-struct rc5t583_regulator {
- struct rc5t583_regulator_info *reg_info;
- struct regulator_dev *rdev;
-};
-
static int rc5t583_regulator_enable_time(struct regulator_dev *rdev)
{
- struct rc5t583_regulator *reg = rdev_get_drvdata(rdev);
+ struct rc5t583_regulator_info *reg_info = rdev_get_drvdata(rdev);
int vsel = regulator_get_voltage_sel_regmap(rdev);
int curr_uV = regulator_list_voltage_linear(rdev, vsel);
- return DIV_ROUND_UP(curr_uV, reg->reg_info->enable_uv_per_us);
+ return DIV_ROUND_UP(curr_uV, reg_info->enable_uv_per_us);
}
static const struct regulator_ops rc5t583_ops = {
@@ -120,8 +115,6 @@ static int rc5t583_regulator_probe(struct platform_device *pdev)
struct rc5t583 *rc5t583 = dev_get_drvdata(pdev->dev.parent);
struct rc5t583_platform_data *pdata = dev_get_platdata(rc5t583->dev);
struct regulator_config config = { };
- struct rc5t583_regulator *reg = NULL;
- struct rc5t583_regulator *regs;
struct regulator_dev *rdev;
struct rc5t583_regulator_info *ri;
int ret;
@@ -132,18 +125,8 @@ static int rc5t583_regulator_probe(struct platform_device *pdev)
return -ENODEV;
}
- regs = devm_kcalloc(&pdev->dev,
- RC5T583_REGULATOR_MAX,
- sizeof(struct rc5t583_regulator),
- GFP_KERNEL);
- if (!regs)
- return -ENOMEM;
-
-
for (id = 0; id < RC5T583_REGULATOR_MAX; ++id) {
- reg = &regs[id];
ri = &rc5t583_reg_info[id];
- reg->reg_info = ri;
if (ri->deepsleep_id == RC5T583_DS_NONE)
goto skip_ext_pwr_config;
@@ -163,7 +146,7 @@ static int rc5t583_regulator_probe(struct platform_device *pdev)
skip_ext_pwr_config:
config.dev = &pdev->dev;
config.init_data = pdata->reg_init_data[id];
- config.driver_data = reg;
+ config.driver_data = ri;
config.regmap = rc5t583->regmap;
rdev = devm_regulator_register(&pdev->dev, &ri->desc, &config);
@@ -172,9 +155,7 @@ skip_ext_pwr_config:
ri->desc.name);
return PTR_ERR(rdev);
}
- reg->rdev = rdev;
}
- platform_set_drvdata(pdev, regs);
return 0;
}
diff --git a/drivers/regulator/rn5t618-regulator.c b/drivers/regulator/rn5t618-regulator.c
index 790a4a73ea2c..a79c0c43b9f8 100644
--- a/drivers/regulator/rn5t618-regulator.c
+++ b/drivers/regulator/rn5t618-regulator.c
@@ -46,7 +46,7 @@ static const struct regulator_ops rn5t618_reg_ops = {
.vsel_mask = (vmask), \
}
-static struct regulator_desc rn5t567_regulators[] = {
+static const struct regulator_desc rn5t567_regulators[] = {
/* DCDC */
REG(DCDC1, DC1CTL, BIT(0), DC1DAC, 0xff, 600000, 3500000, 12500),
REG(DCDC2, DC2CTL, BIT(0), DC2DAC, 0xff, 600000, 3500000, 12500),
@@ -63,7 +63,7 @@ static struct regulator_desc rn5t567_regulators[] = {
REG(LDORTC2, LDOEN2, BIT(5), LDORTC2DAC, 0x7f, 900000, 3500000, 25000),
};
-static struct regulator_desc rn5t618_regulators[] = {
+static const struct regulator_desc rn5t618_regulators[] = {
/* DCDC */
REG(DCDC1, DC1CTL, BIT(0), DC1DAC, 0xff, 600000, 3500000, 12500),
REG(DCDC2, DC2CTL, BIT(0), DC2DAC, 0xff, 600000, 3500000, 12500),
@@ -79,7 +79,7 @@ static struct regulator_desc rn5t618_regulators[] = {
REG(LDORTC2, LDOEN2, BIT(5), LDORTC2DAC, 0x7f, 900000, 3500000, 25000),
};
-static struct regulator_desc rc5t619_regulators[] = {
+static const struct regulator_desc rc5t619_regulators[] = {
/* DCDC */
REG(DCDC1, DC1CTL, BIT(0), DC1DAC, 0xff, 600000, 3500000, 12500),
REG(DCDC2, DC2CTL, BIT(0), DC2DAC, 0xff, 600000, 3500000, 12500),
@@ -107,7 +107,7 @@ static int rn5t618_regulator_probe(struct platform_device *pdev)
struct rn5t618 *rn5t618 = dev_get_drvdata(pdev->dev.parent);
struct regulator_config config = { };
struct regulator_dev *rdev;
- struct regulator_desc *regulators;
+ const struct regulator_desc *regulators;
int i;
int num_regulators = 0;
diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c
index 58a1fe583a6c..51f7e8b74d8c 100644
--- a/drivers/regulator/s2mpa01.c
+++ b/drivers/regulator/s2mpa01.c
@@ -17,10 +17,7 @@
#include <linux/mfd/samsung/core.h>
#include <linux/mfd/samsung/s2mpa01.h>
-#define S2MPA01_REGULATOR_CNT ARRAY_SIZE(regulators)
-
struct s2mpa01_info {
- struct of_regulator_match rdata[S2MPA01_REGULATOR_MAX];
int ramp_delay24;
int ramp_delay3;
int ramp_delay5;
@@ -232,6 +229,8 @@ static const struct regulator_ops s2mpa01_buck_ops = {
#define regulator_desc_ldo(num, step) { \
.name = "LDO"#num, \
+ .of_match = of_match_ptr("LDO"#num), \
+ .regulators_node = of_match_ptr("regulators"), \
.id = S2MPA01_LDO##num, \
.ops = &s2mpa01_ldo_ops, \
.type = REGULATOR_VOLTAGE, \
@@ -247,6 +246,8 @@ static const struct regulator_ops s2mpa01_buck_ops = {
#define regulator_desc_buck1_4(num) { \
.name = "BUCK"#num, \
+ .of_match = of_match_ptr("BUCK"#num), \
+ .regulators_node = of_match_ptr("regulators"), \
.id = S2MPA01_BUCK##num, \
.ops = &s2mpa01_buck_ops, \
.type = REGULATOR_VOLTAGE, \
@@ -263,6 +264,8 @@ static const struct regulator_ops s2mpa01_buck_ops = {
#define regulator_desc_buck5 { \
.name = "BUCK5", \
+ .of_match = of_match_ptr("BUCK5"), \
+ .regulators_node = of_match_ptr("regulators"), \
.id = S2MPA01_BUCK5, \
.ops = &s2mpa01_buck_ops, \
.type = REGULATOR_VOLTAGE, \
@@ -279,6 +282,8 @@ static const struct regulator_ops s2mpa01_buck_ops = {
#define regulator_desc_buck6_10(num, min, step) { \
.name = "BUCK"#num, \
+ .of_match = of_match_ptr("BUCK"#num), \
+ .regulators_node = of_match_ptr("regulators"), \
.id = S2MPA01_BUCK##num, \
.ops = &s2mpa01_buck_ops, \
.type = REGULATOR_VOLTAGE, \
@@ -336,9 +341,7 @@ static int s2mpa01_pmic_probe(struct platform_device *pdev)
{
struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct sec_platform_data *pdata = dev_get_platdata(iodev->dev);
- struct device_node *reg_np = NULL;
struct regulator_config config = { };
- struct of_regulator_match *rdata;
struct s2mpa01_info *s2mpa01;
int i;
@@ -346,39 +349,15 @@ static int s2mpa01_pmic_probe(struct platform_device *pdev)
if (!s2mpa01)
return -ENOMEM;
- rdata = s2mpa01->rdata;
- for (i = 0; i < S2MPA01_REGULATOR_CNT; i++)
- rdata[i].name = regulators[i].name;
-
- if (iodev->dev->of_node) {
- reg_np = of_get_child_by_name(iodev->dev->of_node,
- "regulators");
- if (!reg_np) {
- dev_err(&pdev->dev,
- "could not find regulators sub-node\n");
- return -EINVAL;
- }
-
- of_regulator_match(&pdev->dev, reg_np, rdata,
- S2MPA01_REGULATOR_MAX);
- of_node_put(reg_np);
- }
-
- platform_set_drvdata(pdev, s2mpa01);
-
- config.dev = &pdev->dev;
+ config.dev = iodev->dev;
config.regmap = iodev->regmap_pmic;
config.driver_data = s2mpa01;
for (i = 0; i < S2MPA01_REGULATOR_MAX; i++) {
struct regulator_dev *rdev;
+
if (pdata)
config.init_data = pdata->regulators[i].initdata;
- else
- config.init_data = rdata[i].init_data;
-
- if (reg_np)
- config.of_node = rdata[i].of_node;
rdev = devm_regulator_register(&pdev->dev,
&regulators[i], &config);
diff --git a/drivers/regulator/sc2731-regulator.c b/drivers/regulator/sc2731-regulator.c
index eb2bdf060b7b..0f21f95c8981 100644
--- a/drivers/regulator/sc2731-regulator.c
+++ b/drivers/regulator/sc2731-regulator.c
@@ -146,7 +146,7 @@ static const struct regulator_ops sc2731_regu_linear_ops = {
.vsel_mask = vmask, \
}
-static struct regulator_desc regulators[] = {
+static const struct regulator_desc regulators[] = {
SC2731_REGU_LINEAR(BUCK_CPU0, SC2731_POWER_PD_SW,
SC2731_DCDC_CPU0_PD_MASK, SC2731_DCDC_CPU0_VOL,
SC2731_DCDC_CPU0_VOL_MASK, 3125, 400000, 1996875),
diff --git a/drivers/regulator/sky81452-regulator.c b/drivers/regulator/sky81452-regulator.c
index 647860611916..177dede82a61 100644
--- a/drivers/regulator/sky81452-regulator.c
+++ b/drivers/regulator/sky81452-regulator.c
@@ -1,21 +1,9 @@
-/*
- * sky81452-regulator.c SKY81452 regulator driver
- *
- * Copyright 2014 Skyworks Solutions Inc.
- * Author : Gyungoh Yoo <jack.yoo@skyworksinc.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// sky81452-regulator.c SKY81452 regulator driver
+//
+// Copyright 2014 Skyworks Solutions Inc.
+// Author : Gyungoh Yoo <jack.yoo@skyworksinc.com>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -34,7 +22,7 @@
#define SKY81452_LEN 0x40
#define SKY81452_LOUT 0x1F
-static struct regulator_ops sky81452_reg_ops = {
+static const struct regulator_ops sky81452_reg_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
diff --git a/drivers/regulator/stm32-pwr.c b/drivers/regulator/stm32-pwr.c
new file mode 100644
index 000000000000..e0e627b0106e
--- /dev/null
+++ b/drivers/regulator/stm32-pwr.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) STMicroelectronics 2019
+// Authors: Gabriel Fernandez <gabriel.fernandez@st.com>
+// Pascal Paillet <p.paillet@st.com>.
+
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+
+/*
+ * Registers description
+ */
+#define REG_PWR_CR3 0x0C
+
+#define USB_3_3_EN BIT(24)
+#define USB_3_3_RDY BIT(26)
+#define REG_1_8_EN BIT(28)
+#define REG_1_8_RDY BIT(29)
+#define REG_1_1_EN BIT(30)
+#define REG_1_1_RDY BIT(31)
+
+/* list of supported regulators */
+enum {
+ PWR_REG11,
+ PWR_REG18,
+ PWR_USB33,
+ STM32PWR_REG_NUM_REGS
+};
+
+static u32 ready_mask_table[STM32PWR_REG_NUM_REGS] = {
+ [PWR_REG11] = REG_1_1_RDY,
+ [PWR_REG18] = REG_1_8_RDY,
+ [PWR_USB33] = USB_3_3_RDY,
+};
+
+struct stm32_pwr_reg {
+ void __iomem *base;
+ u32 ready_mask;
+};
+
+static int stm32_pwr_reg_is_ready(struct regulator_dev *rdev)
+{
+ struct stm32_pwr_reg *priv = rdev_get_drvdata(rdev);
+ u32 val;
+
+ val = readl_relaxed(priv->base + REG_PWR_CR3);
+
+ return (val & priv->ready_mask);
+}
+
+static int stm32_pwr_reg_is_enabled(struct regulator_dev *rdev)
+{
+ struct stm32_pwr_reg *priv = rdev_get_drvdata(rdev);
+ u32 val;
+
+ val = readl_relaxed(priv->base + REG_PWR_CR3);
+
+ return (val & rdev->desc->enable_mask);
+}
+
+static int stm32_pwr_reg_enable(struct regulator_dev *rdev)
+{
+ struct stm32_pwr_reg *priv = rdev_get_drvdata(rdev);
+ int ret;
+ u32 val;
+
+ val = readl_relaxed(priv->base + REG_PWR_CR3);
+ val |= rdev->desc->enable_mask;
+ writel_relaxed(val, priv->base + REG_PWR_CR3);
+
+ /* use an arbitrary timeout of 20ms */
+ ret = readx_poll_timeout(stm32_pwr_reg_is_ready, rdev, val, val,
+ 100, 20 * 1000);
+ if (ret)
+ dev_err(&rdev->dev, "regulator enable timed out!\n");
+
+ return ret;
+}
+
+static int stm32_pwr_reg_disable(struct regulator_dev *rdev)
+{
+ struct stm32_pwr_reg *priv = rdev_get_drvdata(rdev);
+ int ret;
+ u32 val;
+
+ val = readl_relaxed(priv->base + REG_PWR_CR3);
+ val &= ~rdev->desc->enable_mask;
+ writel_relaxed(val, priv->base + REG_PWR_CR3);
+
+ /* use an arbitrary timeout of 20ms */
+ ret = readx_poll_timeout(stm32_pwr_reg_is_ready, rdev, val, !val,
+ 100, 20 * 1000);
+ if (ret)
+ dev_err(&rdev->dev, "regulator disable timed out!\n");
+
+ return ret;
+}
+
+static const struct regulator_ops stm32_pwr_reg_ops = {
+ .enable = stm32_pwr_reg_enable,
+ .disable = stm32_pwr_reg_disable,
+ .is_enabled = stm32_pwr_reg_is_enabled,
+};
+
+#define PWR_REG(_id, _name, _volt, _en, _supply) \
+ [_id] = { \
+ .id = _id, \
+ .name = _name, \
+ .of_match = of_match_ptr(_name), \
+ .n_voltages = 1, \
+ .type = REGULATOR_VOLTAGE, \
+ .fixed_uV = _volt, \
+ .ops = &stm32_pwr_reg_ops, \
+ .enable_mask = _en, \
+ .owner = THIS_MODULE, \
+ .supply_name = _supply, \
+ } \
+
+static const struct regulator_desc stm32_pwr_desc[] = {
+ PWR_REG(PWR_REG11, "reg11", 1100000, REG_1_1_EN, "vdd"),
+ PWR_REG(PWR_REG18, "reg18", 1800000, REG_1_8_EN, "vdd"),
+ PWR_REG(PWR_USB33, "usb33", 3300000, USB_3_3_EN, "vdd_3v3_usbfs"),
+};
+
+static int stm32_pwr_regulator_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct stm32_pwr_reg *priv;
+ void __iomem *base;
+ struct regulator_dev *rdev;
+ struct regulator_config config = { };
+ int i, ret = 0;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ dev_err(&pdev->dev, "Unable to map IO memory\n");
+ return -ENOMEM;
+ }
+
+ config.dev = &pdev->dev;
+
+ for (i = 0; i < STM32PWR_REG_NUM_REGS; i++) {
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct stm32_pwr_reg),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->base = base;
+ priv->ready_mask = ready_mask_table[i];
+ config.driver_data = priv;
+
+ rdev = devm_regulator_register(&pdev->dev,
+ &stm32_pwr_desc[i],
+ &config);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ dev_err(&pdev->dev,
+ "Failed to register regulator: %d\n", ret);
+ break;
+ }
+ }
+ return ret;
+}
+
+static const struct of_device_id stm32_pwr_of_match[] = {
+ { .compatible = "st,stm32mp1,pwr-reg", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_pwr_of_match);
+
+static struct platform_driver stm32_pwr_driver = {
+ .probe = stm32_pwr_regulator_probe,
+ .driver = {
+ .name = "stm32-pwr-regulator",
+ .of_match_table = of_match_ptr(stm32_pwr_of_match),
+ },
+};
+module_platform_driver(stm32_pwr_driver);
+
+MODULE_DESCRIPTION("STM32MP1 PWR voltage regulator driver");
+MODULE_AUTHOR("Pascal Paillet <p.paillet@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/sy8106a-regulator.c b/drivers/regulator/sy8106a-regulator.c
index 65fbd1f0b612..42e03b2c10a0 100644
--- a/drivers/regulator/sy8106a-regulator.c
+++ b/drivers/regulator/sy8106a-regulator.c
@@ -22,12 +22,6 @@
*/
#define SY8106A_GO_BIT BIT(7)
-struct sy8106a {
- struct regulator_dev *rdev;
- struct regmap *regmap;
- u32 fixed_voltage;
-};
-
static const struct regmap_config sy8106a_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -70,36 +64,32 @@ static const struct regulator_desc sy8106a_reg = {
static int sy8106a_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
- struct sy8106a *chip;
struct device *dev = &i2c->dev;
- struct regulator_dev *rdev = NULL;
+ struct regulator_dev *rdev;
struct regulator_config config = { };
+ struct regmap *regmap;
unsigned int reg, vsel;
+ u32 fixed_voltage;
int error;
- chip = devm_kzalloc(&i2c->dev, sizeof(struct sy8106a), GFP_KERNEL);
- if (!chip)
- return -ENOMEM;
-
error = of_property_read_u32(dev->of_node, "silergy,fixed-microvolt",
- &chip->fixed_voltage);
+ &fixed_voltage);
if (error)
return error;
- if (chip->fixed_voltage < SY8106A_MIN_MV * 1000 ||
- chip->fixed_voltage > SY8106A_MAX_MV * 1000)
+ if (fixed_voltage < SY8106A_MIN_MV * 1000 ||
+ fixed_voltage > SY8106A_MAX_MV * 1000)
return -EINVAL;
- chip->regmap = devm_regmap_init_i2c(i2c, &sy8106a_regmap_config);
- if (IS_ERR(chip->regmap)) {
- error = PTR_ERR(chip->regmap);
+ regmap = devm_regmap_init_i2c(i2c, &sy8106a_regmap_config);
+ if (IS_ERR(regmap)) {
+ error = PTR_ERR(regmap);
dev_err(dev, "Failed to allocate register map: %d\n", error);
return error;
}
config.dev = &i2c->dev;
- config.regmap = chip->regmap;
- config.driver_data = chip;
+ config.regmap = regmap;
config.of_node = dev->of_node;
config.init_data = of_get_regulator_init_data(dev, dev->of_node,
@@ -109,15 +99,15 @@ static int sy8106a_i2c_probe(struct i2c_client *i2c,
return -ENOMEM;
/* Ensure GO_BIT is enabled when probing */
- error = regmap_read(chip->regmap, SY8106A_REG_VOUT1_SEL, &reg);
+ error = regmap_read(regmap, SY8106A_REG_VOUT1_SEL, &reg);
if (error)
return error;
if (!(reg & SY8106A_GO_BIT)) {
- vsel = (chip->fixed_voltage / 1000 - SY8106A_MIN_MV) /
+ vsel = (fixed_voltage / 1000 - SY8106A_MIN_MV) /
SY8106A_STEP_MV;
- error = regmap_write(chip->regmap, SY8106A_REG_VOUT1_SEL,
+ error = regmap_write(regmap, SY8106A_REG_VOUT1_SEL,
vsel | SY8106A_GO_BIT);
if (error)
return error;
@@ -131,10 +121,6 @@ static int sy8106a_i2c_probe(struct i2c_client *i2c,
return error;
}
- chip->rdev = rdev;
-
- i2c_set_clientdata(i2c, chip);
-
return 0;
}
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index c179a3a221af..a1b7fab91dd4 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -115,7 +115,6 @@ static struct tps_info tps6507x_pmic_regs[] = {
struct tps6507x_pmic {
struct regulator_desc desc[TPS6507X_NUM_REGULATOR];
struct tps6507x_dev *mfd;
- struct regulator_dev *rdev[TPS6507X_NUM_REGULATOR];
struct tps_info *info[TPS6507X_NUM_REGULATOR];
struct mutex io_lock;
};
@@ -349,7 +348,7 @@ static int tps6507x_pmic_set_voltage_sel(struct regulator_dev *dev,
return tps6507x_pmic_reg_write(tps, reg, data);
}
-static struct regulator_ops tps6507x_pmic_ops = {
+static const struct regulator_ops tps6507x_pmic_ops = {
.is_enabled = tps6507x_pmic_is_enabled,
.enable = tps6507x_pmic_enable,
.disable = tps6507x_pmic_disable,
@@ -359,66 +358,20 @@ static struct regulator_ops tps6507x_pmic_ops = {
.map_voltage = regulator_map_voltage_ascend,
};
-static struct of_regulator_match tps6507x_matches[] = {
- { .name = "VDCDC1"},
- { .name = "VDCDC2"},
- { .name = "VDCDC3"},
- { .name = "LDO1"},
- { .name = "LDO2"},
-};
-
-static struct tps6507x_board *tps6507x_parse_dt_reg_data(
- struct platform_device *pdev,
- struct of_regulator_match **tps6507x_reg_matches)
+static int tps6507x_pmic_of_parse_cb(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *config)
{
- struct tps6507x_board *tps_board;
- struct device_node *np = pdev->dev.parent->of_node;
- struct device_node *regulators;
- struct of_regulator_match *matches;
- struct regulator_init_data *reg_data;
- int idx = 0, count, ret;
-
- tps_board = devm_kzalloc(&pdev->dev, sizeof(*tps_board),
- GFP_KERNEL);
- if (!tps_board)
- return NULL;
-
- regulators = of_get_child_by_name(np, "regulators");
- if (!regulators) {
- dev_err(&pdev->dev, "regulator node not found\n");
- return NULL;
- }
-
- count = ARRAY_SIZE(tps6507x_matches);
- matches = tps6507x_matches;
-
- ret = of_regulator_match(&pdev->dev, regulators, matches, count);
- of_node_put(regulators);
- if (ret < 0) {
- dev_err(&pdev->dev, "Error parsing regulator init data: %d\n",
- ret);
- return NULL;
- }
+ struct tps6507x_pmic *tps = config->driver_data;
+ struct tps_info *info = tps->info[desc->id];
+ u32 prop;
+ int ret;
- *tps6507x_reg_matches = matches;
-
- reg_data = devm_kzalloc(&pdev->dev, (sizeof(struct regulator_init_data)
- * TPS6507X_NUM_REGULATOR), GFP_KERNEL);
- if (!reg_data)
- return NULL;
-
- tps_board->tps6507x_pmic_init_data = reg_data;
-
- for (idx = 0; idx < count; idx++) {
- if (!matches[idx].init_data || !matches[idx].of_node)
- continue;
-
- memcpy(&reg_data[idx], matches[idx].init_data,
- sizeof(struct regulator_init_data));
-
- }
+ ret = of_property_read_u32(np, "ti,defdcdc_default", &prop);
+ if (!ret)
+ info->defdcdc_default = prop;
- return tps_board;
+ return 0;
}
static int tps6507x_pmic_probe(struct platform_device *pdev)
@@ -426,14 +379,11 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent);
struct tps_info *info = &tps6507x_pmic_regs[0];
struct regulator_config config = { };
- struct regulator_init_data *init_data;
+ struct regulator_init_data *init_data = NULL;
struct regulator_dev *rdev;
struct tps6507x_pmic *tps;
struct tps6507x_board *tps_board;
- struct of_regulator_match *tps6507x_reg_matches = NULL;
int i;
- int error;
- unsigned int prop;
/**
* tps_board points to pmic related constants
@@ -441,20 +391,8 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
*/
tps_board = dev_get_platdata(tps6507x_dev->dev);
- if (IS_ENABLED(CONFIG_OF) && !tps_board &&
- tps6507x_dev->dev->of_node)
- tps_board = tps6507x_parse_dt_reg_data(pdev,
- &tps6507x_reg_matches);
- if (!tps_board)
- return -EINVAL;
-
- /**
- * init_data points to array of regulator_init structures
- * coming from the board-evm file.
- */
- init_data = tps_board->tps6507x_pmic_init_data;
- if (!init_data)
- return -EINVAL;
+ if (tps_board)
+ init_data = tps_board->tps6507x_pmic_init_data;
tps = devm_kzalloc(&pdev->dev, sizeof(*tps), GFP_KERNEL);
if (!tps)
@@ -468,13 +406,16 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
for (i = 0; i < TPS6507X_NUM_REGULATOR; i++, info++, init_data++) {
/* Register the regulators */
tps->info[i] = info;
- if (init_data->driver_data) {
+ if (init_data && init_data->driver_data) {
struct tps6507x_reg_platform_data *data =
init_data->driver_data;
- tps->info[i]->defdcdc_default = data->defdcdc_default;
+ info->defdcdc_default = data->defdcdc_default;
}
tps->desc[i].name = info->name;
+ tps->desc[i].of_match = of_match_ptr(info->name);
+ tps->desc[i].regulators_node = of_match_ptr("regulators");
+ tps->desc[i].of_parse_cb = tps6507x_pmic_of_parse_cb;
tps->desc[i].id = i;
tps->desc[i].n_voltages = info->table_len;
tps->desc[i].volt_table = info->table;
@@ -486,17 +427,6 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
config.init_data = init_data;
config.driver_data = tps;
- if (tps6507x_reg_matches) {
- error = of_property_read_u32(
- tps6507x_reg_matches[i].of_node,
- "ti,defdcdc_default", &prop);
-
- if (!error)
- tps->info[i]->defdcdc_default = prop;
-
- config.of_node = tps6507x_reg_matches[i].of_node;
- }
-
rdev = devm_regulator_register(&pdev->dev, &tps->desc[i],
&config);
if (IS_ERR(rdev)) {
@@ -505,9 +435,6 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
pdev->name);
return PTR_ERR(rdev);
}
-
- /* Save regulator for cleanup */
- tps->rdev[i] = rdev;
}
tps6507x_dev->pmic = tps;
diff --git a/drivers/regulator/tps65086-regulator.c b/drivers/regulator/tps65086-regulator.c
index 45e96e154690..5a5e9b5bf4be 100644
--- a/drivers/regulator/tps65086-regulator.c
+++ b/drivers/regulator/tps65086-regulator.c
@@ -90,8 +90,8 @@ static const struct regulator_linear_range tps65086_buck345_25mv_ranges[] = {
static const struct regulator_linear_range tps65086_ldoa1_ranges[] = {
REGULATOR_LINEAR_RANGE(1350000, 0x0, 0x0, 0),
REGULATOR_LINEAR_RANGE(1500000, 0x1, 0x7, 100000),
- REGULATOR_LINEAR_RANGE(2300000, 0x8, 0xA, 100000),
- REGULATOR_LINEAR_RANGE(2700000, 0xB, 0xD, 150000),
+ REGULATOR_LINEAR_RANGE(2300000, 0x8, 0xB, 100000),
+ REGULATOR_LINEAR_RANGE(2850000, 0xC, 0xD, 150000),
REGULATOR_LINEAR_RANGE(3300000, 0xE, 0xE, 0),
};
diff --git a/drivers/regulator/tps65132-regulator.c b/drivers/regulator/tps65132-regulator.c
index 73978dd440f7..6e22f5ebba2e 100644
--- a/drivers/regulator/tps65132-regulator.c
+++ b/drivers/regulator/tps65132-regulator.c
@@ -55,10 +55,7 @@ struct tps65132_reg_pdata {
struct tps65132_regulator {
struct device *dev;
- struct regmap *rmap;
- struct regulator_desc *rdesc[TPS65132_MAX_REGULATORS];
struct tps65132_reg_pdata reg_pdata[TPS65132_MAX_REGULATORS];
- struct regulator_dev *rdev[TPS65132_MAX_REGULATORS];
};
static int tps65132_regulator_enable(struct regulator_dev *rdev)
@@ -120,7 +117,7 @@ static int tps65132_regulator_is_enabled(struct regulator_dev *rdev)
return 1;
}
-static struct regulator_ops tps65132_regulator_ops = {
+static const struct regulator_ops tps65132_regulator_ops = {
.enable = tps65132_regulator_enable,
.disable = tps65132_regulator_disable,
.is_enabled = tps65132_regulator_is_enabled,
@@ -196,7 +193,7 @@ static int tps65132_of_parse_cb(struct device_node *np,
.owner = THIS_MODULE, \
}
-static struct regulator_desc tps_regs_desc[TPS65132_MAX_REGULATORS] = {
+static const struct regulator_desc tps_regs_desc[TPS65132_MAX_REGULATORS] = {
TPS65132_REGULATOR_DESC(VPOS, outp),
TPS65132_REGULATOR_DESC(VNEG, outn),
};
@@ -225,6 +222,8 @@ static int tps65132_probe(struct i2c_client *client,
{
struct device *dev = &client->dev;
struct tps65132_regulator *tps;
+ struct regulator_dev *rdev;
+ struct regmap *rmap;
struct regulator_config config = { };
int id;
int ret;
@@ -233,9 +232,9 @@ static int tps65132_probe(struct i2c_client *client,
if (!tps)
return -ENOMEM;
- tps->rmap = devm_regmap_init_i2c(client, &tps65132_regmap_config);
- if (IS_ERR(tps->rmap)) {
- ret = PTR_ERR(tps->rmap);
+ rmap = devm_regmap_init_i2c(client, &tps65132_regmap_config);
+ if (IS_ERR(rmap)) {
+ ret = PTR_ERR(rmap);
dev_err(dev, "regmap init failed: %d\n", ret);
return ret;
}
@@ -244,18 +243,16 @@ static int tps65132_probe(struct i2c_client *client,
tps->dev = dev;
for (id = 0; id < TPS65132_MAX_REGULATORS; ++id) {
- tps->rdesc[id] = &tps_regs_desc[id];
-
- config.regmap = tps->rmap;
+ config.regmap = rmap;
config.dev = dev;
config.driver_data = tps;
- tps->rdev[id] = devm_regulator_register(dev,
- tps->rdesc[id], &config);
- if (IS_ERR(tps->rdev[id])) {
- ret = PTR_ERR(tps->rdev[id]);
+ rdev = devm_regulator_register(dev, &tps_regs_desc[id],
+ &config);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
dev_err(dev, "regulator %s register failed: %d\n",
- tps->rdesc[id]->name, ret);
+ tps_regs_desc[id].name, ret);
return ret;
}
}
diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c
index d84fab616abf..67ba78da77ec 100644
--- a/drivers/regulator/tps65217-regulator.c
+++ b/drivers/regulator/tps65217-regulator.c
@@ -58,10 +58,9 @@ static const unsigned int LDO1_VSEL_table[] = {
static const struct regulator_linear_range tps65217_uv1_ranges[] = {
REGULATOR_LINEAR_RANGE(900000, 0, 24, 25000),
- REGULATOR_LINEAR_RANGE(1550000, 25, 30, 50000),
- REGULATOR_LINEAR_RANGE(1850000, 31, 52, 50000),
+ REGULATOR_LINEAR_RANGE(1550000, 25, 52, 50000),
REGULATOR_LINEAR_RANGE(3000000, 53, 55, 100000),
- REGULATOR_LINEAR_RANGE(3300000, 56, 62, 0),
+ REGULATOR_LINEAR_RANGE(3300000, 56, 63, 0),
};
static const struct regulator_linear_range tps65217_uv2_ranges[] = {
@@ -150,7 +149,7 @@ static int tps65217_pmic_set_suspend_disable(struct regulator_dev *dev)
}
/* Operations permitted on DCDCx, LDO2, LDO3 and LDO4 */
-static struct regulator_ops tps65217_pmic_ops = {
+static const struct regulator_ops tps65217_pmic_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = tps65217_pmic_enable,
.disable = tps65217_pmic_disable,
@@ -163,7 +162,7 @@ static struct regulator_ops tps65217_pmic_ops = {
};
/* Operations permitted on LDO1 */
-static struct regulator_ops tps65217_pmic_ldo1_ops = {
+static const struct regulator_ops tps65217_pmic_ldo1_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = tps65217_pmic_enable,
.disable = tps65217_pmic_disable,
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
index 95708d34876b..b72035610013 100644
--- a/drivers/regulator/tps65218-regulator.c
+++ b/drivers/regulator/tps65218-regulator.c
@@ -29,7 +29,8 @@
#include <linux/mfd/tps65218.h>
#define TPS65218_REGULATOR(_name, _of, _id, _type, _ops, _n, _vr, _vm, _er, \
- _em, _cr, _cm, _lr, _nlr, _delay, _fuv, _sr, _sm) \
+ _em, _cr, _cm, _lr, _nlr, _delay, _fuv, _sr, _sm, \
+ _ct, _ncl) \
{ \
.name = _name, \
.of_match = _of, \
@@ -42,6 +43,8 @@
.vsel_mask = _vm, \
.csel_reg = _cr, \
.csel_mask = _cm, \
+ .curr_table = _ct, \
+ .n_current_limits = _ncl, \
.enable_reg = _er, \
.enable_mask = _em, \
.volt_table = NULL, \
@@ -162,7 +165,7 @@ static int tps65218_pmic_set_suspend_disable(struct regulator_dev *dev)
}
/* Operations permitted on DCDC1, DCDC2 */
-static struct regulator_ops tps65218_dcdc12_ops = {
+static const struct regulator_ops tps65218_dcdc12_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = tps65218_pmic_enable,
.disable = tps65218_pmic_disable,
@@ -176,7 +179,7 @@ static struct regulator_ops tps65218_dcdc12_ops = {
};
/* Operations permitted on DCDC3, DCDC4 and LDO1 */
-static struct regulator_ops tps65218_ldo1_dcdc34_ops = {
+static const struct regulator_ops tps65218_ldo1_dcdc34_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = tps65218_pmic_enable,
.disable = tps65218_pmic_disable,
@@ -188,8 +191,7 @@ static struct regulator_ops tps65218_ldo1_dcdc34_ops = {
.set_suspend_disable = tps65218_pmic_set_suspend_disable,
};
-static const int ls3_currents[] = { 100000, 200000, 500000, 1000000 };
-
+static const unsigned int ls3_currents[] = { 100000, 200000, 500000, 1000000 };
static int tps65218_pmic_set_input_current_lim(struct regulator_dev *dev,
int lim_uA)
@@ -229,33 +231,17 @@ static int tps65218_pmic_set_current_limit(struct regulator_dev *dev,
TPS65218_PROTECT_L1);
}
-static int tps65218_pmic_get_current_limit(struct regulator_dev *dev)
-{
- int retval;
- unsigned int index;
- struct tps65218 *tps = rdev_get_drvdata(dev);
-
- retval = regmap_read(tps->regmap, dev->desc->csel_reg, &index);
- if (retval < 0)
- return retval;
-
- index = (index & dev->desc->csel_mask) >>
- __builtin_ctz(dev->desc->csel_mask);
-
- return ls3_currents[index];
-}
-
-static struct regulator_ops tps65218_ls23_ops = {
+static const struct regulator_ops tps65218_ls23_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = tps65218_pmic_enable,
.disable = tps65218_pmic_disable,
.set_input_current_limit = tps65218_pmic_set_input_current_lim,
.set_current_limit = tps65218_pmic_set_current_limit,
- .get_current_limit = tps65218_pmic_get_current_limit,
+ .get_current_limit = regulator_get_current_limit_regmap,
};
/* Operations permitted on DCDC5, DCDC6 */
-static struct regulator_ops tps65218_dcdc56_pmic_ops = {
+static const struct regulator_ops tps65218_dcdc56_pmic_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = tps65218_pmic_enable,
.disable = tps65218_pmic_disable,
@@ -270,53 +256,57 @@ static const struct regulator_desc regulators[] = {
TPS65218_CONTROL_DCDC1_MASK, TPS65218_REG_ENABLE1,
TPS65218_ENABLE1_DC1_EN, 0, 0, dcdc1_dcdc2_ranges,
2, 4000, 0, TPS65218_REG_SEQ3,
- TPS65218_SEQ3_DC1_SEQ_MASK),
+ TPS65218_SEQ3_DC1_SEQ_MASK, NULL, 0),
TPS65218_REGULATOR("DCDC2", "regulator-dcdc2", TPS65218_DCDC_2,
REGULATOR_VOLTAGE, tps65218_dcdc12_ops, 64,
TPS65218_REG_CONTROL_DCDC2,
TPS65218_CONTROL_DCDC2_MASK, TPS65218_REG_ENABLE1,
TPS65218_ENABLE1_DC2_EN, 0, 0, dcdc1_dcdc2_ranges,
2, 4000, 0, TPS65218_REG_SEQ3,
- TPS65218_SEQ3_DC2_SEQ_MASK),
+ TPS65218_SEQ3_DC2_SEQ_MASK, NULL, 0),
TPS65218_REGULATOR("DCDC3", "regulator-dcdc3", TPS65218_DCDC_3,
REGULATOR_VOLTAGE, tps65218_ldo1_dcdc34_ops, 64,
TPS65218_REG_CONTROL_DCDC3,
TPS65218_CONTROL_DCDC3_MASK, TPS65218_REG_ENABLE1,
TPS65218_ENABLE1_DC3_EN, 0, 0, ldo1_dcdc3_ranges, 2,
- 0, 0, TPS65218_REG_SEQ4, TPS65218_SEQ4_DC3_SEQ_MASK),
+ 0, 0, TPS65218_REG_SEQ4, TPS65218_SEQ4_DC3_SEQ_MASK,
+ NULL, 0),
TPS65218_REGULATOR("DCDC4", "regulator-dcdc4", TPS65218_DCDC_4,
REGULATOR_VOLTAGE, tps65218_ldo1_dcdc34_ops, 53,
TPS65218_REG_CONTROL_DCDC4,
TPS65218_CONTROL_DCDC4_MASK, TPS65218_REG_ENABLE1,
TPS65218_ENABLE1_DC4_EN, 0, 0, dcdc4_ranges, 2,
- 0, 0, TPS65218_REG_SEQ4, TPS65218_SEQ4_DC4_SEQ_MASK),
+ 0, 0, TPS65218_REG_SEQ4, TPS65218_SEQ4_DC4_SEQ_MASK,
+ NULL, 0),
TPS65218_REGULATOR("DCDC5", "regulator-dcdc5", TPS65218_DCDC_5,
REGULATOR_VOLTAGE, tps65218_dcdc56_pmic_ops, 1, -1,
-1, TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC5_EN, 0,
0, NULL, 0, 0, 1000000, TPS65218_REG_SEQ5,
- TPS65218_SEQ5_DC5_SEQ_MASK),
+ TPS65218_SEQ5_DC5_SEQ_MASK, NULL, 0),
TPS65218_REGULATOR("DCDC6", "regulator-dcdc6", TPS65218_DCDC_6,
REGULATOR_VOLTAGE, tps65218_dcdc56_pmic_ops, 1, -1,
-1, TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC6_EN, 0,
0, NULL, 0, 0, 1800000, TPS65218_REG_SEQ5,
- TPS65218_SEQ5_DC6_SEQ_MASK),
+ TPS65218_SEQ5_DC6_SEQ_MASK, NULL, 0),
TPS65218_REGULATOR("LDO1", "regulator-ldo1", TPS65218_LDO_1,
REGULATOR_VOLTAGE, tps65218_ldo1_dcdc34_ops, 64,
TPS65218_REG_CONTROL_LDO1,
TPS65218_CONTROL_LDO1_MASK, TPS65218_REG_ENABLE2,
TPS65218_ENABLE2_LDO1_EN, 0, 0, ldo1_dcdc3_ranges,
2, 0, 0, TPS65218_REG_SEQ6,
- TPS65218_SEQ6_LDO1_SEQ_MASK),
+ TPS65218_SEQ6_LDO1_SEQ_MASK, NULL, 0),
TPS65218_REGULATOR("LS2", "regulator-ls2", TPS65218_LS_2,
REGULATOR_CURRENT, tps65218_ls23_ops, 0, 0, 0,
TPS65218_REG_ENABLE2, TPS65218_ENABLE2_LS2_EN,
TPS65218_REG_CONFIG2, TPS65218_CONFIG2_LS2ILIM_MASK,
- NULL, 0, 0, 0, 0, 0),
+ NULL, 0, 0, 0, 0, 0, ls3_currents,
+ ARRAY_SIZE(ls3_currents)),
TPS65218_REGULATOR("LS3", "regulator-ls3", TPS65218_LS_3,
REGULATOR_CURRENT, tps65218_ls23_ops, 0, 0, 0,
TPS65218_REG_ENABLE2, TPS65218_ENABLE2_LS3_EN,
TPS65218_REG_CONFIG2, TPS65218_CONFIG2_LS3ILIM_MASK,
- NULL, 0, 0, 0, 0, 0),
+ NULL, 0, 0, 0, 0, 0, ls3_currents,
+ ARRAY_SIZE(ls3_currents)),
};
static int tps65218_regulator_probe(struct platform_device *pdev)
diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c
index 67cac2682f50..740aeccdfb1f 100644
--- a/drivers/regulator/tps6524x-regulator.c
+++ b/drivers/regulator/tps6524x-regulator.c
@@ -137,7 +137,6 @@ struct tps6524x {
struct spi_device *spi;
struct mutex lock;
struct regulator_desc desc[N_REGULATORS];
- struct regulator_dev *rdev[N_REGULATORS];
};
static int __read_reg(struct tps6524x *hw, int reg)
@@ -565,7 +564,7 @@ static int is_supply_enabled(struct regulator_dev *rdev)
return read_field(hw, &info->enable);
}
-static struct regulator_ops regulator_ops = {
+static const struct regulator_ops regulator_ops = {
.is_enabled = is_supply_enabled,
.enable = enable_supply,
.disable = disable_supply,
@@ -584,6 +583,7 @@ static int pmic_probe(struct spi_device *spi)
const struct supply_info *info = supply_info;
struct regulator_init_data *init_data;
struct regulator_config config = { };
+ struct regulator_dev *rdev;
int i;
init_data = dev_get_platdata(dev);
@@ -616,10 +616,9 @@ static int pmic_probe(struct spi_device *spi)
config.init_data = init_data;
config.driver_data = hw;
- hw->rdev[i] = devm_regulator_register(dev, &hw->desc[i],
- &config);
- if (IS_ERR(hw->rdev[i]))
- return PTR_ERR(hw->rdev[i]);
+ rdev = devm_regulator_register(dev, &hw->desc[i], &config);
+ if (IS_ERR(rdev))
+ return PTR_ERR(rdev);
}
return 0;
diff --git a/drivers/regulator/tps80031-regulator.c b/drivers/regulator/tps80031-regulator.c
index 1001147404c3..85a6a8ca8c1b 100644
--- a/drivers/regulator/tps80031-regulator.c
+++ b/drivers/regulator/tps80031-regulator.c
@@ -1,27 +1,13 @@
-/*
- * tps80031-regulator.c -- TI TPS80031 regulator driver.
- *
- * Regulator driver for TI TPS80031/TPS80032 Fully Integrated Power
- * Management with Power Path and Battery Charger.
- *
- * Copyright (c) 2012, NVIDIA Corporation.
- *
- * Author: Laxman Dewangan <ldewangan@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
- * whether express or implied; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307, USA
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// tps80031-regulator.c -- TI TPS80031 regulator driver.
+//
+// Regulator driver for TI TPS80031/TPS80032 Fully Integrated Power
+// Management with Power Path and Battery Charger.
+//
+// Copyright (c) 2012, NVIDIA Corporation.
+//
+// Author: Laxman Dewangan <ldewangan@nvidia.com>
#include <linux/delay.h>
#include <linux/err.h>
@@ -85,7 +71,6 @@ struct tps80031_regulator_info {
struct tps80031_regulator {
struct device *dev;
- struct regulator_dev *rdev;
struct tps80031_regulator_info *rinfo;
u8 device_flags;
@@ -155,7 +140,7 @@ static int tps80031_reg_disable(struct regulator_dev *rdev)
}
/* DCDC voltages for the selector of 58 to 63 */
-static int tps80031_dcdc_voltages[4][5] = {
+static const int tps80031_dcdc_voltages[4][5] = {
{ 1350, 1500, 1800, 1900, 2100},
{ 1350, 1500, 1800, 1900, 2100},
{ 2084, 2315, 2778, 2932, 3241},
@@ -378,7 +363,7 @@ static int tps80031_vbus_disable(struct regulator_dev *rdev)
return ret;
}
-static struct regulator_ops tps80031_dcdc_ops = {
+static const struct regulator_ops tps80031_dcdc_ops = {
.list_voltage = tps80031_dcdc_list_voltage,
.set_voltage_sel = tps80031_dcdc_set_voltage_sel,
.get_voltage_sel = tps80031_dcdc_get_voltage_sel,
@@ -387,7 +372,7 @@ static struct regulator_ops tps80031_dcdc_ops = {
.is_enabled = tps80031_reg_is_enabled,
};
-static struct regulator_ops tps80031_ldo_ops = {
+static const struct regulator_ops tps80031_ldo_ops = {
.list_voltage = tps80031_ldo_list_voltage,
.map_voltage = tps80031_ldo_map_voltage,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
@@ -397,18 +382,18 @@ static struct regulator_ops tps80031_ldo_ops = {
.is_enabled = tps80031_reg_is_enabled,
};
-static struct regulator_ops tps80031_vbus_sw_ops = {
+static const struct regulator_ops tps80031_vbus_sw_ops = {
.list_voltage = regulator_list_voltage_linear,
.enable = tps80031_vbus_enable,
.disable = tps80031_vbus_disable,
.is_enabled = tps80031_vbus_is_enabled,
};
-static struct regulator_ops tps80031_vbus_hw_ops = {
+static const struct regulator_ops tps80031_vbus_hw_ops = {
.list_voltage = regulator_list_voltage_linear,
};
-static struct regulator_ops tps80031_ext_reg_ops = {
+static const struct regulator_ops tps80031_ext_reg_ops = {
.list_voltage = regulator_list_voltage_linear,
.enable = tps80031_reg_enable,
.disable = tps80031_reg_disable,
@@ -736,7 +721,6 @@ static int tps80031_regulator_probe(struct platform_device *pdev)
ri->rinfo->desc.name);
return PTR_ERR(rdev);
}
- ri->rdev = rdev;
}
platform_set_drvdata(pdev, pmic);
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 402ea43c77d1..cdd81e1985ff 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -392,7 +392,7 @@ static int twl4030ldo_get_voltage_sel(struct regulator_dev *rdev)
return vsel;
}
-static struct regulator_ops twl4030ldo_ops = {
+static const struct regulator_ops twl4030ldo_ops = {
.list_voltage = twl4030ldo_list_voltage,
.set_voltage_sel = twl4030ldo_set_voltage_sel,
@@ -430,14 +430,14 @@ static int twl4030smps_get_voltage(struct regulator_dev *rdev)
return vsel * 12500 + 600000;
}
-static struct regulator_ops twl4030smps_ops = {
+static const struct regulator_ops twl4030smps_ops = {
.set_voltage = twl4030smps_set_voltage,
.get_voltage = twl4030smps_get_voltage,
};
/*----------------------------------------------------------------------*/
-static struct regulator_ops twl4030fixed_ops = {
+static const struct regulator_ops twl4030fixed_ops = {
.list_voltage = regulator_list_voltage_linear,
.enable = twl4030reg_enable,
diff --git a/drivers/regulator/vctrl-regulator.c b/drivers/regulator/vctrl-regulator.c
index 78de002037c7..259864520a06 100644
--- a/drivers/regulator/vctrl-regulator.c
+++ b/drivers/regulator/vctrl-regulator.c
@@ -334,10 +334,8 @@ static int vctrl_init_vtable(struct platform_device *pdev)
ctrl_uV = regulator_list_voltage(ctrl_reg, i);
if (ctrl_uV < vrange_ctrl->min_uV ||
- ctrl_uV > vrange_ctrl->max_uV) {
+ ctrl_uV > vrange_ctrl->max_uV)
rdesc->n_voltages--;
- continue;
- }
}
if (rdesc->n_voltages == 0) {
diff --git a/drivers/regulator/vexpress-regulator.c b/drivers/regulator/vexpress-regulator.c
index c810cbbd463f..1235f46e633e 100644
--- a/drivers/regulator/vexpress-regulator.c
+++ b/drivers/regulator/vexpress-regulator.c
@@ -1,15 +1,6 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2012 ARM Limited
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2012 ARM Limited
#define DRVNAME "vexpress-regulator"
#define pr_fmt(fmt) DRVNAME ": " fmt
@@ -23,17 +14,10 @@
#include <linux/regulator/of_regulator.h>
#include <linux/vexpress.h>
-struct vexpress_regulator {
- struct regulator_desc desc;
- struct regulator_dev *regdev;
- struct regmap *regmap;
-};
-
static int vexpress_regulator_get_voltage(struct regulator_dev *regdev)
{
- struct vexpress_regulator *reg = rdev_get_drvdata(regdev);
- u32 uV;
- int err = regmap_read(reg->regmap, 0, &uV);
+ unsigned int uV;
+ int err = regmap_read(regdev->regmap, 0, &uV);
return err ? err : uV;
}
@@ -41,60 +25,58 @@ static int vexpress_regulator_get_voltage(struct regulator_dev *regdev)
static int vexpress_regulator_set_voltage(struct regulator_dev *regdev,
int min_uV, int max_uV, unsigned *selector)
{
- struct vexpress_regulator *reg = rdev_get_drvdata(regdev);
-
- return regmap_write(reg->regmap, 0, min_uV);
+ return regmap_write(regdev->regmap, 0, min_uV);
}
-static struct regulator_ops vexpress_regulator_ops_ro = {
+static const struct regulator_ops vexpress_regulator_ops_ro = {
.get_voltage = vexpress_regulator_get_voltage,
};
-static struct regulator_ops vexpress_regulator_ops = {
+static const struct regulator_ops vexpress_regulator_ops = {
.get_voltage = vexpress_regulator_get_voltage,
.set_voltage = vexpress_regulator_set_voltage,
};
static int vexpress_regulator_probe(struct platform_device *pdev)
{
- struct vexpress_regulator *reg;
+ struct regulator_desc *desc;
struct regulator_init_data *init_data;
struct regulator_config config = { };
+ struct regulator_dev *rdev;
+ struct regmap *regmap;
- reg = devm_kzalloc(&pdev->dev, sizeof(*reg), GFP_KERNEL);
- if (!reg)
+ desc = devm_kzalloc(&pdev->dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
return -ENOMEM;
- reg->regmap = devm_regmap_init_vexpress_config(&pdev->dev);
- if (IS_ERR(reg->regmap))
- return PTR_ERR(reg->regmap);
+ regmap = devm_regmap_init_vexpress_config(&pdev->dev);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
- reg->desc.name = dev_name(&pdev->dev);
- reg->desc.type = REGULATOR_VOLTAGE;
- reg->desc.owner = THIS_MODULE;
- reg->desc.continuous_voltage_range = true;
+ desc->name = dev_name(&pdev->dev);
+ desc->type = REGULATOR_VOLTAGE;
+ desc->owner = THIS_MODULE;
+ desc->continuous_voltage_range = true;
init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node,
- &reg->desc);
+ desc);
if (!init_data)
return -EINVAL;
init_data->constraints.apply_uV = 0;
if (init_data->constraints.min_uV && init_data->constraints.max_uV)
- reg->desc.ops = &vexpress_regulator_ops;
+ desc->ops = &vexpress_regulator_ops;
else
- reg->desc.ops = &vexpress_regulator_ops_ro;
+ desc->ops = &vexpress_regulator_ops_ro;
+ config.regmap = regmap;
config.dev = &pdev->dev;
config.init_data = init_data;
- config.driver_data = reg;
config.of_node = pdev->dev.of_node;
- reg->regdev = devm_regulator_register(&pdev->dev, &reg->desc, &config);
- if (IS_ERR(reg->regdev))
- return PTR_ERR(reg->regdev);
-
- platform_set_drvdata(pdev, reg);
+ rdev = devm_regulator_register(&pdev->dev, desc, &config);
+ if (IS_ERR(rdev))
+ return PTR_ERR(rdev);
return 0;
}
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 12b422373580..b422eef97b77 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -1,15 +1,10 @@
-/*
- * wm831x-dcdc.c -- DC-DC buck convertor driver for the WM831x series
- *
- * Copyright 2009 Wolfson Microelectronics PLC.
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// wm831x-dcdc.c -- DC-DC buck converter driver for the WM831x series
+//
+// Copyright 2009 Wolfson Microelectronics PLC.
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -183,9 +178,11 @@ static irqreturn_t wm831x_dcdc_uv_irq(int irq, void *data)
{
struct wm831x_dcdc *dcdc = data;
+ regulator_lock(dcdc->regulator);
regulator_notifier_call_chain(dcdc->regulator,
REGULATOR_EVENT_UNDER_VOLTAGE,
NULL);
+ regulator_unlock(dcdc->regulator);
return IRQ_HANDLED;
}
@@ -194,9 +191,11 @@ static irqreturn_t wm831x_dcdc_oc_irq(int irq, void *data)
{
struct wm831x_dcdc *dcdc = data;
+ regulator_lock(dcdc->regulator);
regulator_notifier_call_chain(dcdc->regulator,
REGULATOR_EVENT_OVER_CURRENT,
NULL);
+ regulator_unlock(dcdc->regulator);
return IRQ_HANDLED;
}
diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c
index 6dd891d7eee3..ff3d2bf50410 100644
--- a/drivers/regulator/wm831x-isink.c
+++ b/drivers/regulator/wm831x-isink.c
@@ -1,15 +1,10 @@
-/*
- * wm831x-isink.c -- Current sink driver for the WM831x series
- *
- * Copyright 2009 Wolfson Microelectronics PLC.
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// wm831x-isink.c -- Current sink driver for the WM831x series
+//
+// Copyright 2009 Wolfson Microelectronics PLC.
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -92,57 +87,23 @@ static int wm831x_isink_is_enabled(struct regulator_dev *rdev)
return 0;
}
-static int wm831x_isink_set_current(struct regulator_dev *rdev,
- int min_uA, int max_uA)
-{
- struct wm831x_isink *isink = rdev_get_drvdata(rdev);
- struct wm831x *wm831x = isink->wm831x;
- int ret, i;
-
- for (i = 0; i < ARRAY_SIZE(wm831x_isinkv_values); i++) {
- int val = wm831x_isinkv_values[i];
- if (min_uA <= val && val <= max_uA) {
- ret = wm831x_set_bits(wm831x, isink->reg,
- WM831X_CS1_ISEL_MASK, i);
- return ret;
- }
- }
-
- return -EINVAL;
-}
-
-static int wm831x_isink_get_current(struct regulator_dev *rdev)
-{
- struct wm831x_isink *isink = rdev_get_drvdata(rdev);
- struct wm831x *wm831x = isink->wm831x;
- int ret;
-
- ret = wm831x_reg_read(wm831x, isink->reg);
- if (ret < 0)
- return ret;
-
- ret &= WM831X_CS1_ISEL_MASK;
- if (ret > WM831X_ISINK_MAX_ISEL)
- ret = WM831X_ISINK_MAX_ISEL;
-
- return wm831x_isinkv_values[ret];
-}
-
static const struct regulator_ops wm831x_isink_ops = {
.is_enabled = wm831x_isink_is_enabled,
.enable = wm831x_isink_enable,
.disable = wm831x_isink_disable,
- .set_current_limit = wm831x_isink_set_current,
- .get_current_limit = wm831x_isink_get_current,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
};
static irqreturn_t wm831x_isink_irq(int irq, void *data)
{
struct wm831x_isink *isink = data;
+ regulator_lock(isink->regulator);
regulator_notifier_call_chain(isink->regulator,
REGULATOR_EVENT_OVER_CURRENT,
NULL);
+ regulator_unlock(isink->regulator);
return IRQ_HANDLED;
}
@@ -187,10 +148,15 @@ static int wm831x_isink_probe(struct platform_device *pdev)
isink->desc.ops = &wm831x_isink_ops;
isink->desc.type = REGULATOR_CURRENT;
isink->desc.owner = THIS_MODULE;
+ isink->desc.curr_table = wm831x_isinkv_values,
+ isink->desc.n_current_limits = ARRAY_SIZE(wm831x_isinkv_values),
+ isink->desc.csel_reg = isink->reg,
+ isink->desc.csel_mask = WM831X_CS1_ISEL_MASK,
config.dev = pdev->dev.parent;
config.init_data = pdata->isink[id];
config.driver_data = isink;
+ config.regmap = wm831x->regmap;
isink->regulator = devm_regulator_register(&pdev->dev, &isink->desc,
&config);
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index e4a6f888484e..56754686c982 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -1,15 +1,10 @@
-/*
- * wm831x-ldo.c -- LDO driver for the WM831x series
- *
- * Copyright 2009 Wolfson Microelectronics PLC.
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// wm831x-ldo.c -- LDO driver for the WM831x series
+//
+// Copyright 2009 Wolfson Microelectronics PLC.
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -51,9 +46,11 @@ static irqreturn_t wm831x_ldo_uv_irq(int irq, void *data)
{
struct wm831x_ldo *ldo = data;
+ regulator_lock(ldo->regulator);
regulator_notifier_call_chain(ldo->regulator,
REGULATOR_EVENT_UNDER_VOLTAGE,
NULL);
+ regulator_unlock(ldo->regulator);
return IRQ_HANDLED;
}
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index a1c7dfee5c37..56d6168a888d 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -1,16 +1,11 @@
-/*
- * wm8350.c -- Voltage and current regulation for the Wolfson WM8350 PMIC
- *
- * Copyright 2007, 2008 Wolfson Microelectronics PLC.
- *
- * Author: Liam Girdwood
- * linux@wolfsonmicro.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// wm8350.c -- Voltage and current regulation for the Wolfson WM8350 PMIC
+//
+// Copyright 2007, 2008 Wolfson Microelectronics PLC.
+//
+// Author: Liam Girdwood
+// linux@wolfsonmicro.com
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -28,7 +23,7 @@
#define WM8350_DCDC_MAX_VSEL 0x66
/* Microamps */
-static const int isink_cur[] = {
+static const unsigned int isink_cur[] = {
4,
5,
6,
@@ -95,73 +90,6 @@ static const int isink_cur[] = {
223191
};
-static int get_isink_val(int min_uA, int max_uA, u16 *setting)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(isink_cur); i++) {
- if (min_uA <= isink_cur[i] && max_uA >= isink_cur[i]) {
- *setting = i;
- return 0;
- }
- }
- return -EINVAL;
-}
-
-static int wm8350_isink_set_current(struct regulator_dev *rdev, int min_uA,
- int max_uA)
-{
- struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
- int isink = rdev_get_id(rdev);
- u16 val, setting;
- int ret;
-
- ret = get_isink_val(min_uA, max_uA, &setting);
- if (ret != 0)
- return ret;
-
- switch (isink) {
- case WM8350_ISINK_A:
- val = wm8350_reg_read(wm8350, WM8350_CURRENT_SINK_DRIVER_A) &
- ~WM8350_CS1_ISEL_MASK;
- wm8350_reg_write(wm8350, WM8350_CURRENT_SINK_DRIVER_A,
- val | setting);
- break;
- case WM8350_ISINK_B:
- val = wm8350_reg_read(wm8350, WM8350_CURRENT_SINK_DRIVER_B) &
- ~WM8350_CS1_ISEL_MASK;
- wm8350_reg_write(wm8350, WM8350_CURRENT_SINK_DRIVER_B,
- val | setting);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int wm8350_isink_get_current(struct regulator_dev *rdev)
-{
- struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
- int isink = rdev_get_id(rdev);
- u16 val;
-
- switch (isink) {
- case WM8350_ISINK_A:
- val = wm8350_reg_read(wm8350, WM8350_CURRENT_SINK_DRIVER_A) &
- WM8350_CS1_ISEL_MASK;
- break;
- case WM8350_ISINK_B:
- val = wm8350_reg_read(wm8350, WM8350_CURRENT_SINK_DRIVER_B) &
- WM8350_CS1_ISEL_MASK;
- break;
- default:
- return 0;
- }
-
- return isink_cur[val];
-}
-
/* turn on ISINK followed by DCDC */
static int wm8350_isink_enable(struct regulator_dev *rdev)
{
@@ -982,8 +910,8 @@ static const struct regulator_ops wm8350_ldo_ops = {
};
static const struct regulator_ops wm8350_isink_ops = {
- .set_current_limit = wm8350_isink_set_current,
- .get_current_limit = wm8350_isink_get_current,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
.enable = wm8350_isink_enable,
.disable = wm8350_isink_disable,
.is_enabled = wm8350_isink_is_enabled,
@@ -1138,6 +1066,10 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
.irq = WM8350_IRQ_CS1,
.type = REGULATOR_CURRENT,
.owner = THIS_MODULE,
+ .curr_table = isink_cur,
+ .n_current_limits = ARRAY_SIZE(isink_cur),
+ .csel_reg = WM8350_CURRENT_SINK_DRIVER_A,
+ .csel_mask = WM8350_CS1_ISEL_MASK,
},
{
.name = "ISINKB",
@@ -1146,6 +1078,10 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
.irq = WM8350_IRQ_CS2,
.type = REGULATOR_CURRENT,
.owner = THIS_MODULE,
+ .curr_table = isink_cur,
+ .n_current_limits = ARRAY_SIZE(isink_cur),
+ .csel_reg = WM8350_CURRENT_SINK_DRIVER_B,
+ .csel_mask = WM8350_CS2_ISEL_MASK,
},
};
diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c
index fb1837657b64..6f331b51e479 100644
--- a/drivers/regulator/wm8400-regulator.c
+++ b/drivers/regulator/wm8400-regulator.c
@@ -1,16 +1,10 @@
-/*
- * Regulator support for WM8400
- *
- * Copyright 2008 Wolfson Microelectronics PLC.
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Regulator support for WM8400
+//
+// Copyright 2008 Wolfson Microelectronics PLC.
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/bug.h>
#include <linux/err.h>
@@ -36,13 +30,12 @@ static const struct regulator_ops wm8400_ldo_ops = {
static unsigned int wm8400_dcdc_get_mode(struct regulator_dev *dev)
{
- struct wm8400 *wm8400 = rdev_get_drvdata(dev);
+ struct regmap *rmap = rdev_get_regmap(dev);
int offset = (rdev_get_id(dev) - WM8400_DCDC1) * 2;
u16 data[2];
int ret;
- ret = wm8400_block_read(wm8400, WM8400_DCDC1_CONTROL_1 + offset, 2,
- data);
+ ret = regmap_bulk_read(rmap, WM8400_DCDC1_CONTROL_1 + offset, data, 2);
if (ret != 0)
return 0;
@@ -63,36 +56,36 @@ static unsigned int wm8400_dcdc_get_mode(struct regulator_dev *dev)
static int wm8400_dcdc_set_mode(struct regulator_dev *dev, unsigned int mode)
{
- struct wm8400 *wm8400 = rdev_get_drvdata(dev);
+ struct regmap *rmap = rdev_get_regmap(dev);
int offset = (rdev_get_id(dev) - WM8400_DCDC1) * 2;
int ret;
switch (mode) {
case REGULATOR_MODE_FAST:
/* Datasheet: active with force PWM */
- ret = wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_2 + offset,
+ ret = regmap_update_bits(rmap, WM8400_DCDC1_CONTROL_2 + offset,
WM8400_DC1_FRC_PWM, WM8400_DC1_FRC_PWM);
if (ret != 0)
return ret;
- return wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_1 + offset,
+ return regmap_update_bits(rmap, WM8400_DCDC1_CONTROL_1 + offset,
WM8400_DC1_ACTIVE | WM8400_DC1_SLEEP,
WM8400_DC1_ACTIVE);
case REGULATOR_MODE_NORMAL:
/* Datasheet: active */
- ret = wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_2 + offset,
+ ret = regmap_update_bits(rmap, WM8400_DCDC1_CONTROL_2 + offset,
WM8400_DC1_FRC_PWM, 0);
if (ret != 0)
return ret;
- return wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_1 + offset,
+ return regmap_update_bits(rmap, WM8400_DCDC1_CONTROL_1 + offset,
WM8400_DC1_ACTIVE | WM8400_DC1_SLEEP,
WM8400_DC1_ACTIVE);
case REGULATOR_MODE_IDLE:
/* Datasheet: standby */
- return wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_1 + offset,
+ return regmap_update_bits(rmap, WM8400_DCDC1_CONTROL_1 + offset,
WM8400_DC1_ACTIVE | WM8400_DC1_SLEEP, 0);
default:
return -EINVAL;
@@ -195,7 +188,7 @@ static struct regulator_desc regulators[] = {
.id = WM8400_DCDC2,
.ops = &wm8400_dcdc_ops,
.enable_reg = WM8400_DCDC2_CONTROL_1,
- .enable_mask = WM8400_DC1_ENA_MASK,
+ .enable_mask = WM8400_DC2_ENA_MASK,
.n_voltages = WM8400_DC2_VSEL_MASK + 1,
.vsel_reg = WM8400_DCDC2_CONTROL_1,
.vsel_mask = WM8400_DC2_VSEL_MASK,
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
index 38928cdcb6e6..cadea0344486 100644
--- a/drivers/regulator/wm8994-regulator.c
+++ b/drivers/regulator/wm8994-regulator.c
@@ -1,15 +1,10 @@
-/*
- * wm8994-regulator.c -- Regulator driver for the WM8994
- *
- * Copyright 2009 Wolfson Microelectronics PLC.
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// wm8994-regulator.c -- Regulator driver for the WM8994
+//
+// Copyright 2009 Wolfson Microelectronics PLC.
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/module.h>
#include <linux/moduleparam.h>
diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
index 79374d1de311..1f3ef9ee493c 100644
--- a/drivers/remoteproc/qcom_q6v5_adsp.c
+++ b/drivers/remoteproc/qcom_q6v5_adsp.c
@@ -48,7 +48,7 @@
/* list of clocks required by ADSP PIL */
static const char * const adsp_clk_id[] = {
- "sway_cbcr", "lpass_aon", "lpass_ahbs_aon_cbcr", "lpass_ahbm_aon_cbcr",
+ "sway_cbcr", "lpass_ahbs_aon_cbcr", "lpass_ahbm_aon_cbcr",
"qdsp6ss_xo", "qdsp6ss_sleep", "qdsp6ss_core",
};
@@ -439,6 +439,10 @@ static int adsp_probe(struct platform_device *pdev)
adsp->sysmon = qcom_add_sysmon_subdev(rproc,
desc->sysmon_name,
desc->ssctl_id);
+ if (IS_ERR(adsp->sysmon)) {
+ ret = PTR_ERR(adsp->sysmon);
+ goto disable_pm;
+ }
ret = rproc_add(rproc);
if (ret)
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index 01be7314e176..eacdf10fcfaf 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -25,6 +25,8 @@
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/remoteproc.h>
@@ -131,6 +133,8 @@ struct rproc_hexagon_res {
char **proxy_clk_names;
char **reset_clk_names;
char **active_clk_names;
+ char **active_pd_names;
+ char **proxy_pd_names;
int version;
bool need_mem_protection;
bool has_alt_reset;
@@ -156,9 +160,13 @@ struct q6v5 {
struct clk *active_clks[8];
struct clk *reset_clks[4];
struct clk *proxy_clks[4];
+ struct device *active_pds[1];
+ struct device *proxy_pds[3];
int active_clk_count;
int reset_clk_count;
int proxy_clk_count;
+ int active_pd_count;
+ int proxy_pd_count;
struct reg_info active_regs[1];
struct reg_info proxy_regs[3];
@@ -188,6 +196,7 @@ struct q6v5 {
bool has_alt_reset;
int mpss_perm;
int mba_perm;
+ const char *hexagon_mdt_image;
int version;
};
@@ -321,6 +330,41 @@ static void q6v5_clk_disable(struct device *dev,
clk_disable_unprepare(clks[i]);
}
+static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds,
+ size_t pd_count)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < pd_count; i++) {
+ dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
+ ret = pm_runtime_get_sync(pds[i]);
+ if (ret < 0)
+ goto unroll_pd_votes;
+ }
+
+ return 0;
+
+unroll_pd_votes:
+ for (i--; i >= 0; i--) {
+ dev_pm_genpd_set_performance_state(pds[i], 0);
+ pm_runtime_put(pds[i]);
+ }
+
+ return ret;
+};
+
+static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds,
+ size_t pd_count)
+{
+ int i;
+
+ for (i = 0; i < pd_count; i++) {
+ dev_pm_genpd_set_performance_state(pds[i], 0);
+ pm_runtime_put(pds[i]);
+ }
+}
+
static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
bool remote_owner, phys_addr_t addr,
size_t size)
@@ -690,11 +734,23 @@ static int q6v5_mba_load(struct q6v5 *qproc)
qcom_q6v5_prepare(&qproc->q6v5);
+ ret = q6v5_pds_enable(qproc, qproc->active_pds, qproc->active_pd_count);
+ if (ret < 0) {
+ dev_err(qproc->dev, "failed to enable active power domains\n");
+ goto disable_irqs;
+ }
+
+ ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
+ if (ret < 0) {
+ dev_err(qproc->dev, "failed to enable proxy power domains\n");
+ goto disable_active_pds;
+ }
+
ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
qproc->proxy_reg_count);
if (ret) {
dev_err(qproc->dev, "failed to enable proxy supplies\n");
- goto disable_irqs;
+ goto disable_proxy_pds;
}
ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
@@ -791,6 +847,10 @@ disable_proxy_clk:
disable_proxy_reg:
q6v5_regulator_disable(qproc, qproc->proxy_regs,
qproc->proxy_reg_count);
+disable_proxy_pds:
+ q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
+disable_active_pds:
+ q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
disable_irqs:
qcom_q6v5_unprepare(&qproc->q6v5);
@@ -830,6 +890,7 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc)
qproc->active_clk_count);
q6v5_regulator_disable(qproc, qproc->active_regs,
qproc->active_reg_count);
+ q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
/* In case of failure or coredump scenario where reclaiming MBA memory
* could not happen reclaim it here.
@@ -841,6 +902,8 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc)
ret = qcom_q6v5_unprepare(&qproc->q6v5);
if (ret) {
+ q6v5_pds_disable(qproc, qproc->proxy_pds,
+ qproc->proxy_pd_count);
q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
qproc->proxy_clk_count);
q6v5_regulator_disable(qproc, qproc->proxy_regs,
@@ -860,17 +923,26 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
phys_addr_t min_addr = PHYS_ADDR_MAX;
phys_addr_t max_addr = 0;
bool relocate = false;
- char seg_name[10];
+ char *fw_name;
+ size_t fw_name_len;
ssize_t offset;
size_t size = 0;
void *ptr;
int ret;
int i;
- ret = request_firmware(&fw, "modem.mdt", qproc->dev);
+ fw_name_len = strlen(qproc->hexagon_mdt_image);
+ if (fw_name_len <= 4)
+ return -EINVAL;
+
+ fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL);
+ if (!fw_name)
+ return -ENOMEM;
+
+ ret = request_firmware(&fw, fw_name, qproc->dev);
if (ret < 0) {
- dev_err(qproc->dev, "unable to load modem.mdt\n");
- return ret;
+ dev_err(qproc->dev, "unable to load %s\n", fw_name);
+ goto out;
}
/* Initialize the RMB validator */
@@ -918,10 +990,11 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
ptr = qproc->mpss_region + offset;
if (phdr->p_filesz) {
- snprintf(seg_name, sizeof(seg_name), "modem.b%02d", i);
- ret = request_firmware(&seg_fw, seg_name, qproc->dev);
+ /* Replace "xxx.xxx" with "xxx.bxx" */
+ sprintf(fw_name + fw_name_len - 3, "b%02d", i);
+ ret = request_firmware(&seg_fw, fw_name, qproc->dev);
if (ret) {
- dev_err(qproc->dev, "failed to load %s\n", seg_name);
+ dev_err(qproc->dev, "failed to load %s\n", fw_name);
goto release_firmware;
}
@@ -960,6 +1033,8 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
release_firmware:
release_firmware(fw);
+out:
+ kfree(fw_name);
return ret < 0 ? ret : 0;
}
@@ -1075,9 +1150,10 @@ static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
unsigned long i;
int ret;
- ret = request_firmware(&fw, "modem.mdt", qproc->dev);
+ ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev);
if (ret < 0) {
- dev_err(qproc->dev, "unable to load modem.mdt\n");
+ dev_err(qproc->dev, "unable to load %s\n",
+ qproc->hexagon_mdt_image);
return ret;
}
@@ -1121,6 +1197,7 @@ static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
qproc->proxy_clk_count);
q6v5_regulator_disable(qproc, qproc->proxy_regs,
qproc->proxy_reg_count);
+ q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
}
static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
@@ -1181,6 +1258,45 @@ static int q6v5_init_clocks(struct device *dev, struct clk **clks,
return i;
}
+static int q6v5_pds_attach(struct device *dev, struct device **devs,
+ char **pd_names)
+{
+ size_t num_pds = 0;
+ int ret;
+ int i;
+
+ if (!pd_names)
+ return 0;
+
+ while (pd_names[num_pds])
+ num_pds++;
+
+ for (i = 0; i < num_pds; i++) {
+ devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
+ if (IS_ERR(devs[i])) {
+ ret = PTR_ERR(devs[i]);
+ goto unroll_attach;
+ }
+ }
+
+ return num_pds;
+
+unroll_attach:
+ for (i--; i >= 0; i--)
+ dev_pm_domain_detach(devs[i], false);
+
+ return ret;
+};
+
+static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds,
+ size_t pd_count)
+{
+ int i;
+
+ for (i = 0; i < pd_count; i++)
+ dev_pm_domain_detach(pds[i], false);
+}
+
static int q6v5_init_reset(struct q6v5 *qproc)
{
qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
@@ -1253,6 +1369,7 @@ static int q6v5_probe(struct platform_device *pdev)
const struct rproc_hexagon_res *desc;
struct q6v5 *qproc;
struct rproc *rproc;
+ const char *mba_image;
int ret;
desc = of_device_get_match_data(&pdev->dev);
@@ -1262,16 +1379,30 @@ static int q6v5_probe(struct platform_device *pdev)
if (desc->need_mem_protection && !qcom_scm_is_available())
return -EPROBE_DEFER;
+ mba_image = desc->hexagon_mba_image;
+ ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
+ 0, &mba_image);
+ if (ret < 0 && ret != -EINVAL)
+ return ret;
+
rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
- desc->hexagon_mba_image, sizeof(*qproc));
+ mba_image, sizeof(*qproc));
if (!rproc) {
dev_err(&pdev->dev, "failed to allocate rproc\n");
return -ENOMEM;
}
+ rproc->auto_boot = false;
+
qproc = (struct q6v5 *)rproc->priv;
qproc->dev = &pdev->dev;
qproc->rproc = rproc;
+ qproc->hexagon_mdt_image = "modem.mdt";
+ ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
+ 1, &qproc->hexagon_mdt_image);
+ if (ret < 0 && ret != -EINVAL)
+ return ret;
+
platform_set_drvdata(pdev, qproc);
ret = q6v5_init_mem(qproc, pdev);
@@ -1322,10 +1453,26 @@ static int q6v5_probe(struct platform_device *pdev)
}
qproc->active_reg_count = ret;
+ ret = q6v5_pds_attach(&pdev->dev, qproc->active_pds,
+ desc->active_pd_names);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to attach active power domains\n");
+ goto free_rproc;
+ }
+ qproc->active_pd_count = ret;
+
+ ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds,
+ desc->proxy_pd_names);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to init power domains\n");
+ goto detach_active_pds;
+ }
+ qproc->proxy_pd_count = ret;
+
qproc->has_alt_reset = desc->has_alt_reset;
ret = q6v5_init_reset(qproc);
if (ret)
- goto free_rproc;
+ goto detach_proxy_pds;
qproc->version = desc->version;
qproc->need_mem_protection = desc->need_mem_protection;
@@ -1333,7 +1480,7 @@ static int q6v5_probe(struct platform_device *pdev)
ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM,
qcom_msa_handover);
if (ret)
- goto free_rproc;
+ goto detach_proxy_pds;
qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
@@ -1341,13 +1488,21 @@ static int q6v5_probe(struct platform_device *pdev)
qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
+ if (IS_ERR(qproc->sysmon)) {
+ ret = PTR_ERR(qproc->sysmon);
+ goto detach_proxy_pds;
+ }
ret = rproc_add(rproc);
if (ret)
- goto free_rproc;
+ goto detach_proxy_pds;
return 0;
+detach_proxy_pds:
+ q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
+detach_active_pds:
+ q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
free_rproc:
rproc_free(rproc);
@@ -1364,6 +1519,10 @@ static int q6v5_remove(struct platform_device *pdev)
qcom_remove_glink_subdev(qproc->rproc, &qproc->glink_subdev);
qcom_remove_smd_subdev(qproc->rproc, &qproc->smd_subdev);
qcom_remove_ssr_subdev(qproc->rproc, &qproc->ssr_subdev);
+
+ q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
+ q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
+
rproc_free(qproc->rproc);
return 0;
@@ -1388,6 +1547,16 @@ static const struct rproc_hexagon_res sdm845_mss = {
"mnoc_axi",
NULL
},
+ .active_pd_names = (char*[]){
+ "load_state",
+ NULL
+ },
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mx",
+ "mss",
+ NULL
+ },
.need_mem_protection = true,
.has_alt_reset = true,
.version = MSS_SDM845,
@@ -1395,16 +1564,26 @@ static const struct rproc_hexagon_res sdm845_mss = {
static const struct rproc_hexagon_res msm8996_mss = {
.hexagon_mba_image = "mba.mbn",
+ .proxy_supply = (struct qcom_mss_reg_res[]) {
+ {
+ .supply = "pll",
+ .uA = 100000,
+ },
+ {}
+ },
.proxy_clk_names = (char*[]){
"xo",
"pnoc",
+ "qdss",
NULL
},
.active_clk_names = (char*[]){
"iface",
"bus",
"mem",
- "gpll0_mss_clk",
+ "gpll0_mss",
+ "snoc_axi",
+ "mnoc_axi",
NULL
},
.need_mem_protection = true,
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index b1e63fcd5fdf..f280f196d007 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -258,6 +258,7 @@ static int adsp_probe(struct platform_device *pdev)
const struct adsp_data *desc;
struct qcom_adsp *adsp;
struct rproc *rproc;
+ const char *fw_name;
int ret;
desc = of_device_get_match_data(&pdev->dev);
@@ -267,8 +268,14 @@ static int adsp_probe(struct platform_device *pdev)
if (!qcom_scm_is_available())
return -EPROBE_DEFER;
+ fw_name = desc->firmware_name;
+ ret = of_property_read_string(pdev->dev.of_node, "firmware-name",
+ &fw_name);
+ if (ret < 0 && ret != -EINVAL)
+ return ret;
+
rproc = rproc_alloc(&pdev->dev, pdev->name, &adsp_ops,
- desc->firmware_name, sizeof(*adsp));
+ fw_name, sizeof(*adsp));
if (!rproc) {
dev_err(&pdev->dev, "unable to allocate remoteproc\n");
return -ENOMEM;
@@ -304,6 +311,10 @@ static int adsp_probe(struct platform_device *pdev)
adsp->sysmon = qcom_add_sysmon_subdev(rproc,
desc->sysmon_name,
desc->ssctl_id);
+ if (IS_ERR(adsp->sysmon)) {
+ ret = PTR_ERR(adsp->sysmon);
+ goto free_rproc;
+ }
ret = rproc_add(rproc);
if (ret)
diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c
index e976a602b015..c231314eab66 100644
--- a/drivers/remoteproc/qcom_sysmon.c
+++ b/drivers/remoteproc/qcom_sysmon.c
@@ -6,8 +6,9 @@
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/slab.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/notifier.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/remoteproc/qcom_rproc.h>
@@ -25,6 +26,7 @@ struct qcom_sysmon {
const char *name;
+ int shutdown_irq;
int ssctl_version;
int ssctl_instance;
@@ -34,6 +36,8 @@ struct qcom_sysmon {
struct rpmsg_endpoint *ept;
struct completion comp;
+ struct completion ind_comp;
+ struct completion shutdown_comp;
struct mutex lock;
bool ssr_ack;
@@ -137,6 +141,7 @@ static int sysmon_callback(struct rpmsg_device *rpdev, void *data, int count,
}
#define SSCTL_SHUTDOWN_REQ 0x21
+#define SSCTL_SHUTDOWN_READY_IND 0x21
#define SSCTL_SUBSYS_EVENT_REQ 0x23
#define SSCTL_MAX_MSG_LEN 7
@@ -252,6 +257,29 @@ static struct qmi_elem_info ssctl_subsys_event_resp_ei[] = {
{}
};
+static struct qmi_elem_info ssctl_shutdown_ind_ei[] = {
+ {}
+};
+
+static void sysmon_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *data)
+{
+ struct qcom_sysmon *sysmon = container_of(qmi, struct qcom_sysmon, qmi);
+
+ complete(&sysmon->ind_comp);
+}
+
+static struct qmi_msg_handler qmi_indication_handler[] = {
+ {
+ .type = QMI_INDICATION,
+ .msg_id = SSCTL_SHUTDOWN_READY_IND,
+ .ei = ssctl_shutdown_ind_ei,
+ .decoded_size = 0,
+ .fn = sysmon_ind_cb
+ },
+ {}
+};
+
/**
* ssctl_request_shutdown() - request shutdown via SSCTL QMI service
* @sysmon: sysmon context
@@ -262,6 +290,8 @@ static void ssctl_request_shutdown(struct qcom_sysmon *sysmon)
struct qmi_txn txn;
int ret;
+ reinit_completion(&sysmon->ind_comp);
+ reinit_completion(&sysmon->shutdown_comp);
ret = qmi_txn_init(&sysmon->qmi, &txn, ssctl_shutdown_resp_ei, &resp);
if (ret < 0) {
dev_err(sysmon->dev, "failed to allocate QMI txn\n");
@@ -283,6 +313,17 @@ static void ssctl_request_shutdown(struct qcom_sysmon *sysmon)
dev_err(sysmon->dev, "shutdown request failed\n");
else
dev_dbg(sysmon->dev, "shutdown request completed\n");
+
+ if (sysmon->shutdown_irq > 0) {
+ ret = wait_for_completion_timeout(&sysmon->shutdown_comp,
+ 10 * HZ);
+ if (!ret) {
+ ret = try_wait_for_completion(&sysmon->ind_comp);
+ if (!ret)
+ dev_err(sysmon->dev,
+ "timeout waiting for shutdown ack\n");
+ }
+ }
}
/**
@@ -432,6 +473,15 @@ static int sysmon_notify(struct notifier_block *nb, unsigned long event,
return NOTIFY_DONE;
}
+static irqreturn_t sysmon_shutdown_interrupt(int irq, void *data)
+{
+ struct qcom_sysmon *sysmon = data;
+
+ complete(&sysmon->shutdown_comp);
+
+ return IRQ_HANDLED;
+}
+
/**
* qcom_add_sysmon_subdev() - create a sysmon subdev for the given remoteproc
* @rproc: rproc context to associate the subdev with
@@ -449,7 +499,7 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
sysmon = kzalloc(sizeof(*sysmon), GFP_KERNEL);
if (!sysmon)
- return NULL;
+ return ERR_PTR(-ENOMEM);
sysmon->dev = rproc->dev.parent;
sysmon->rproc = rproc;
@@ -458,13 +508,37 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
sysmon->ssctl_instance = ssctl_instance;
init_completion(&sysmon->comp);
+ init_completion(&sysmon->ind_comp);
+ init_completion(&sysmon->shutdown_comp);
mutex_init(&sysmon->lock);
- ret = qmi_handle_init(&sysmon->qmi, SSCTL_MAX_MSG_LEN, &ssctl_ops, NULL);
+ sysmon->shutdown_irq = of_irq_get_byname(sysmon->dev->of_node,
+ "shutdown-ack");
+ if (sysmon->shutdown_irq < 0) {
+ if (sysmon->shutdown_irq != -ENODATA) {
+ dev_err(sysmon->dev,
+ "failed to retrieve shutdown-ack IRQ\n");
+ return ERR_PTR(sysmon->shutdown_irq);
+ }
+ } else {
+ ret = devm_request_threaded_irq(sysmon->dev,
+ sysmon->shutdown_irq,
+ NULL, sysmon_shutdown_interrupt,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "q6v5 shutdown-ack", sysmon);
+ if (ret) {
+ dev_err(sysmon->dev,
+ "failed to acquire shutdown-ack IRQ\n");
+ return ERR_PTR(ret);
+ }
+ }
+
+ ret = qmi_handle_init(&sysmon->qmi, SSCTL_MAX_MSG_LEN, &ssctl_ops,
+ qmi_indication_handler);
if (ret < 0) {
dev_err(sysmon->dev, "failed to initialize qmi handle\n");
kfree(sysmon);
- return NULL;
+ return ERR_PTR(ret);
}
qmi_add_lookup(&sysmon->qmi, 43, 0, 0);
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index b0e07e9f42d5..adcce523971e 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -553,6 +553,10 @@ static int wcnss_probe(struct platform_device *pdev)
qcom_add_smd_subdev(rproc, &wcnss->smd_subdev);
wcnss->sysmon = qcom_add_sysmon_subdev(rproc, "wcnss", WCNSS_SSCTL_ID);
+ if (IS_ERR(wcnss->sysmon)) {
+ ret = PTR_ERR(wcnss->sysmon);
+ goto free_rproc;
+ }
ret = rproc_add(rproc);
if (ret)
@@ -622,5 +626,5 @@ static void __exit wcnss_exit(void)
}
module_exit(wcnss_exit);
-MODULE_DESCRIPTION("Qualcomm Peripherial Image Loader for Wireless Subsystem");
+MODULE_DESCRIPTION("Qualcomm Peripheral Image Loader for Wireless Subsystem");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 54ec38fc5dca..48feebd6d0a2 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -39,12 +39,16 @@
#include <linux/idr.h>
#include <linux/elf.h>
#include <linux/crc32.h>
+#include <linux/of_reserved_mem.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_ring.h>
#include <asm/byteorder.h>
+#include <linux/platform_device.h>
#include "remoteproc_internal.h"
+#define HIGH_BITS_MASK 0xFFFFFFFF00000000ULL
+
static DEFINE_MUTEX(rproc_list_mutex);
static LIST_HEAD(rproc_list);
@@ -145,7 +149,7 @@ static void rproc_disable_iommu(struct rproc *rproc)
iommu_domain_free(domain);
}
-static phys_addr_t rproc_va_to_pa(void *cpu_addr)
+phys_addr_t rproc_va_to_pa(void *cpu_addr)
{
/*
* Return physical address according to virtual address location
@@ -160,6 +164,7 @@ static phys_addr_t rproc_va_to_pa(void *cpu_addr)
WARN_ON(!virt_addr_valid(cpu_addr));
return virt_to_phys(cpu_addr);
}
+EXPORT_SYMBOL(rproc_va_to_pa);
/**
* rproc_da_to_va() - lookup the kernel virtual address for a remoteproc address
@@ -204,6 +209,10 @@ void *rproc_da_to_va(struct rproc *rproc, u64 da, int len)
list_for_each_entry(carveout, &rproc->carveouts, node) {
int offset = da - carveout->da;
+ /* Verify that carveout is allocated */
+ if (!carveout->va)
+ continue;
+
/* try next carveout if da is too small */
if (offset < 0)
continue;
@@ -272,25 +281,27 @@ rproc_find_carveout_by_name(struct rproc *rproc, const char *name, ...)
* @len: associated area size
*
* This function is a helper function to verify requested device area (couple
- * da, len) is part of specified carevout.
+ * da, len) is part of specified carveout.
+ * If da is not set (defined as FW_RSC_ADDR_ANY), only requested length is
+ * checked.
*
- * Return: 0 if carveout match request else -ENOMEM
+ * Return: 0 if carveout matches request else error
*/
-int rproc_check_carveout_da(struct rproc *rproc, struct rproc_mem_entry *mem,
- u32 da, u32 len)
+static int rproc_check_carveout_da(struct rproc *rproc,
+ struct rproc_mem_entry *mem, u32 da, u32 len)
{
struct device *dev = &rproc->dev;
- int delta = 0;
+ int delta;
/* Check requested resource length */
if (len > mem->len) {
dev_err(dev, "Registered carveout doesn't fit len request\n");
- return -ENOMEM;
+ return -EINVAL;
}
if (da != FW_RSC_ADDR_ANY && mem->da == FW_RSC_ADDR_ANY) {
- /* Update existing carveout da */
- mem->da = da;
+ /* Address doesn't match registered carveout configuration */
+ return -EINVAL;
} else if (da != FW_RSC_ADDR_ANY && mem->da != FW_RSC_ADDR_ANY) {
delta = da - mem->da;
@@ -298,13 +309,13 @@ int rproc_check_carveout_da(struct rproc *rproc, struct rproc_mem_entry *mem,
if (delta < 0) {
dev_err(dev,
"Registered carveout doesn't fit da request\n");
- return -ENOMEM;
+ return -EINVAL;
}
if (delta + len > mem->len) {
dev_err(dev,
"Registered carveout doesn't fit len request\n");
- return -ENOMEM;
+ return -EINVAL;
}
}
@@ -418,8 +429,25 @@ static int rproc_vdev_do_start(struct rproc_subdev *subdev)
static void rproc_vdev_do_stop(struct rproc_subdev *subdev, bool crashed)
{
struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
+ int ret;
- rproc_remove_virtio_dev(rvdev);
+ ret = device_for_each_child(&rvdev->dev, NULL, rproc_remove_virtio_dev);
+ if (ret)
+ dev_warn(&rvdev->dev, "can't remove vdev child device: %d\n", ret);
+}
+
+/**
+ * rproc_rvdev_release() - release the existence of a rvdev
+ *
+ * @dev: the subdevice's dev
+ */
+static void rproc_rvdev_release(struct device *dev)
+{
+ struct rproc_vdev *rvdev = container_of(dev, struct rproc_vdev, dev);
+
+ of_reserved_mem_device_release(dev);
+
+ kfree(rvdev);
}
/**
@@ -455,6 +483,7 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
struct device *dev = &rproc->dev;
struct rproc_vdev *rvdev;
int i, ret;
+ char name[16];
/* make sure resource isn't truncated */
if (sizeof(*rsc) + rsc->num_of_vrings * sizeof(struct fw_rsc_vdev_vring)
@@ -488,6 +517,29 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
rvdev->rproc = rproc;
rvdev->index = rproc->nb_vdev++;
+ /* Initialise vdev subdevice */
+ snprintf(name, sizeof(name), "vdev%dbuffer", rvdev->index);
+ rvdev->dev.parent = rproc->dev.parent;
+ rvdev->dev.release = rproc_rvdev_release;
+ dev_set_name(&rvdev->dev, "%s#%s", dev_name(rvdev->dev.parent), name);
+ dev_set_drvdata(&rvdev->dev, rvdev);
+
+ ret = device_register(&rvdev->dev);
+ if (ret) {
+ put_device(&rvdev->dev);
+ return ret;
+ }
+ /* Make device dma capable by inheriting from parent's capabilities */
+ set_dma_ops(&rvdev->dev, get_dma_ops(rproc->dev.parent));
+
+ ret = dma_coerce_mask_and_coherent(&rvdev->dev,
+ dma_get_mask(rproc->dev.parent));
+ if (ret) {
+ dev_warn(dev,
+ "Failed to set DMA mask %llx. Trying to continue... %x\n",
+ dma_get_mask(rproc->dev.parent), ret);
+ }
+
/* parse the vrings */
for (i = 0; i < rsc->num_of_vrings; i++) {
ret = rproc_parse_vring(rvdev, rsc, i);
@@ -518,7 +570,7 @@ unwind_vring_allocations:
for (i--; i >= 0; i--)
rproc_free_vring(&rvdev->vring[i]);
free_rvdev:
- kfree(rvdev);
+ device_unregister(&rvdev->dev);
return ret;
}
@@ -536,7 +588,7 @@ void rproc_vdev_release(struct kref *ref)
rproc_remove_subdev(rproc, &rvdev->subdev);
list_del(&rvdev->node);
- kfree(rvdev);
+ device_unregister(&rvdev->dev);
}
/**
@@ -558,9 +610,8 @@ void rproc_vdev_release(struct kref *ref)
static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
int offset, int avail)
{
- struct rproc_mem_entry *trace;
+ struct rproc_debug_trace *trace;
struct device *dev = &rproc->dev;
- void *ptr;
char name[15];
if (sizeof(*rsc) > avail) {
@@ -574,28 +625,23 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
return -EINVAL;
}
- /* what's the kernel address of this resource ? */
- ptr = rproc_da_to_va(rproc, rsc->da, rsc->len);
- if (!ptr) {
- dev_err(dev, "erroneous trace resource entry\n");
- return -EINVAL;
- }
-
trace = kzalloc(sizeof(*trace), GFP_KERNEL);
if (!trace)
return -ENOMEM;
/* set the trace buffer dma properties */
- trace->len = rsc->len;
- trace->va = ptr;
+ trace->trace_mem.len = rsc->len;
+ trace->trace_mem.da = rsc->da;
+
+ /* set pointer on rproc device */
+ trace->rproc = rproc;
/* make sure snprintf always null terminates, even if truncating */
snprintf(name, sizeof(name), "trace%d", rproc->num_traces);
/* create the debugfs entry */
- trace->priv = rproc_create_trace_file(name, rproc, trace);
- if (!trace->priv) {
- trace->va = NULL;
+ trace->tfile = rproc_create_trace_file(name, rproc, trace);
+ if (!trace->tfile) {
kfree(trace);
return -EINVAL;
}
@@ -604,8 +650,8 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
rproc->num_traces++;
- dev_dbg(dev, "%s added: va %pK, da 0x%x, len 0x%x\n",
- name, ptr, rsc->da, rsc->len);
+ dev_dbg(dev, "%s added: da 0x%x, len 0x%x\n",
+ name, rsc->da, rsc->len);
return 0;
}
@@ -715,6 +761,18 @@ static int rproc_alloc_carveout(struct rproc *rproc,
dev_dbg(dev, "carveout va %pK, dma %pad, len 0x%x\n",
va, &dma, mem->len);
+ if (mem->da != FW_RSC_ADDR_ANY && !rproc->domain) {
+ /*
+ * Check requested da is equal to dma address
+ * and print a warn message in case of missalignment.
+ * Don't stop rproc_start sequence as coprocessor may
+ * build pa to da translation on its side.
+ */
+ if (mem->da != (u32)dma)
+ dev_warn(dev->parent,
+ "Allocated carveout doesn't fit device address request\n");
+ }
+
/*
* Ok, this is non-standard.
*
@@ -732,15 +790,7 @@ static int rproc_alloc_carveout(struct rproc *rproc,
* to use the iommu-based DMA API: we expect 'dma' to contain the
* physical address in this case.
*/
-
- if (mem->da != FW_RSC_ADDR_ANY) {
- if (!rproc->domain) {
- dev_err(dev->parent,
- "Bad carveout rsc configuration\n");
- ret = -ENOMEM;
- goto dma_free;
- }
-
+ if (mem->da != FW_RSC_ADDR_ANY && rproc->domain) {
mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
if (!mapping) {
ret = -ENOMEM;
@@ -767,11 +817,17 @@ static int rproc_alloc_carveout(struct rproc *rproc,
dev_dbg(dev, "carveout mapped 0x%x to %pad\n",
mem->da, &dma);
- } else {
+ }
+
+ if (mem->da == FW_RSC_ADDR_ANY) {
+ /* Update device address as undefined by requester */
+ if ((u64)dma & HIGH_BITS_MASK)
+ dev_warn(dev, "DMA address cast in 32bit to fit resource table format\n");
+
mem->da = (u32)dma;
}
- mem->dma = (u32)dma;
+ mem->dma = dma;
mem->va = va;
return 0;
@@ -900,7 +956,8 @@ EXPORT_SYMBOL(rproc_add_carveout);
* @dma: dma address
* @len: memory carveout length
* @da: device address
- * @release: memory carveout function
+ * @alloc: memory carveout allocation function
+ * @release: memory carveout release function
* @name: carveout name
*
* This function allocates a rproc_mem_entry struct and fill it with parameters
@@ -1110,6 +1167,7 @@ static int rproc_alloc_registered_carveouts(struct rproc *rproc)
struct rproc_mem_entry *entry, *tmp;
struct fw_rsc_carveout *rsc;
struct device *dev = &rproc->dev;
+ u64 pa;
int ret;
list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) {
@@ -1146,10 +1204,15 @@ static int rproc_alloc_registered_carveouts(struct rproc *rproc)
/* Use va if defined else dma to generate pa */
if (entry->va)
- rsc->pa = (u32)rproc_va_to_pa(entry->va);
+ pa = (u64)rproc_va_to_pa(entry->va);
else
- rsc->pa = (u32)entry->dma;
+ pa = (u64)entry->dma;
+
+ if (((u64)pa) & HIGH_BITS_MASK)
+ dev_warn(dev,
+ "Physical address cast in 32bit to fit resource table format\n");
+ rsc->pa = (u32)pa;
rsc->da = entry->da;
rsc->len = entry->len;
}
@@ -1182,15 +1245,16 @@ static void rproc_coredump_cleanup(struct rproc *rproc)
static void rproc_resource_cleanup(struct rproc *rproc)
{
struct rproc_mem_entry *entry, *tmp;
+ struct rproc_debug_trace *trace, *ttmp;
struct rproc_vdev *rvdev, *rvtmp;
struct device *dev = &rproc->dev;
/* clean up debugfs trace entries */
- list_for_each_entry_safe(entry, tmp, &rproc->traces, node) {
- rproc_remove_trace_file(entry->priv);
+ list_for_each_entry_safe(trace, ttmp, &rproc->traces, node) {
+ rproc_remove_trace_file(trace->tfile);
rproc->num_traces--;
- list_del(&entry->node);
- kfree(entry);
+ list_del(&trace->node);
+ kfree(trace);
}
/* clean up iommu mapping entries */
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
index e90135c64af0..6da934b8dc4b 100644
--- a/drivers/remoteproc/remoteproc_debugfs.c
+++ b/drivers/remoteproc/remoteproc_debugfs.c
@@ -47,10 +47,23 @@ static struct dentry *rproc_dbg;
static ssize_t rproc_trace_read(struct file *filp, char __user *userbuf,
size_t count, loff_t *ppos)
{
- struct rproc_mem_entry *trace = filp->private_data;
- int len = strnlen(trace->va, trace->len);
+ struct rproc_debug_trace *data = filp->private_data;
+ struct rproc_mem_entry *trace = &data->trace_mem;
+ void *va;
+ char buf[100];
+ int len;
+
+ va = rproc_da_to_va(data->rproc, trace->da, trace->len);
- return simple_read_from_buffer(userbuf, count, ppos, trace->va, len);
+ if (!va) {
+ len = scnprintf(buf, sizeof(buf), "Trace %s not available\n",
+ trace->name);
+ va = buf;
+ } else {
+ len = strnlen(va, trace->len);
+ }
+
+ return simple_read_from_buffer(userbuf, count, ppos, va, len);
}
static const struct file_operations trace_rproc_ops = {
@@ -155,6 +168,30 @@ static const struct file_operations rproc_recovery_ops = {
.llseek = generic_file_llseek,
};
+/* expose the crash trigger via debugfs */
+static ssize_t
+rproc_crash_write(struct file *filp, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct rproc *rproc = filp->private_data;
+ unsigned int type;
+ int ret;
+
+ ret = kstrtouint_from_user(user_buf, count, 0, &type);
+ if (ret < 0)
+ return ret;
+
+ rproc_report_crash(rproc, type);
+
+ return count;
+}
+
+static const struct file_operations rproc_crash_ops = {
+ .write = rproc_crash_write,
+ .open = simple_open,
+ .llseek = generic_file_llseek,
+};
+
/* Expose resource table content via debugfs */
static int rproc_rsc_table_show(struct seq_file *seq, void *p)
{
@@ -288,7 +325,7 @@ void rproc_remove_trace_file(struct dentry *tfile)
}
struct dentry *rproc_create_trace_file(const char *name, struct rproc *rproc,
- struct rproc_mem_entry *trace)
+ struct rproc_debug_trace *trace)
{
struct dentry *tfile;
@@ -325,6 +362,8 @@ void rproc_create_debug_dir(struct rproc *rproc)
rproc, &rproc_name_ops);
debugfs_create_file("recovery", 0400, rproc->dbg_dir,
rproc, &rproc_recovery_ops);
+ debugfs_create_file("crash", 0200, rproc->dbg_dir,
+ rproc, &rproc_crash_ops);
debugfs_create_file("resource_table", 0400, rproc->dbg_dir,
rproc, &rproc_rsc_table_ops);
debugfs_create_file("carveout_memories", 0400, rproc->dbg_dir,
diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h
index f6cad243d7ca..45ff76a06c72 100644
--- a/drivers/remoteproc/remoteproc_internal.h
+++ b/drivers/remoteproc/remoteproc_internal.h
@@ -25,6 +25,13 @@
struct rproc;
+struct rproc_debug_trace {
+ struct rproc *rproc;
+ struct dentry *tfile;
+ struct list_head node;
+ struct rproc_mem_entry trace_mem;
+};
+
/* from remoteproc_core.c */
void rproc_release(struct kref *kref);
irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int vq_id);
@@ -32,12 +39,12 @@ void rproc_vdev_release(struct kref *ref);
/* from remoteproc_virtio.c */
int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id);
-void rproc_remove_virtio_dev(struct rproc_vdev *rvdev);
+int rproc_remove_virtio_dev(struct device *dev, void *data);
/* from remoteproc_debugfs.c */
void rproc_remove_trace_file(struct dentry *tfile);
struct dentry *rproc_create_trace_file(const char *name, struct rproc *rproc,
- struct rproc_mem_entry *trace);
+ struct rproc_debug_trace *trace);
void rproc_delete_debug_dir(struct rproc *rproc);
void rproc_create_debug_dir(struct rproc *rproc);
void rproc_init_debugfs(void);
@@ -52,6 +59,7 @@ void rproc_free_vring(struct rproc_vring *rvring);
int rproc_alloc_vring(struct rproc_vdev *rvdev, int i);
void *rproc_da_to_va(struct rproc *rproc, u64 da, int len);
+phys_addr_t rproc_va_to_pa(void *cpu_addr);
int rproc_trigger_recovery(struct rproc *rproc);
int rproc_elf_sanity_check(struct rproc *rproc, const struct firmware *fw);
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index 2d7cd344f3bf..44774de6f17b 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -17,7 +17,9 @@
* GNU General Public License for more details.
*/
+#include <linux/dma-mapping.h>
#include <linux/export.h>
+#include <linux/of_reserved_mem.h>
#include <linux/remoteproc.h>
#include <linux/virtio.h>
#include <linux/virtio_config.h>
@@ -316,6 +318,8 @@ static void rproc_virtio_dev_release(struct device *dev)
struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
struct rproc *rproc = vdev_to_rproc(vdev);
+ kfree(vdev);
+
kref_put(&rvdev->refcount, rproc_vdev_release);
put_device(&rproc->dev);
@@ -333,10 +337,53 @@ static void rproc_virtio_dev_release(struct device *dev)
int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
{
struct rproc *rproc = rvdev->rproc;
- struct device *dev = &rproc->dev;
- struct virtio_device *vdev = &rvdev->vdev;
+ struct device *dev = &rvdev->dev;
+ struct virtio_device *vdev;
+ struct rproc_mem_entry *mem;
int ret;
+ /* Try to find dedicated vdev buffer carveout */
+ mem = rproc_find_carveout_by_name(rproc, "vdev%dbuffer", rvdev->index);
+ if (mem) {
+ phys_addr_t pa;
+
+ if (mem->of_resm_idx != -1) {
+ struct device_node *np = rproc->dev.parent->of_node;
+
+ /* Associate reserved memory to vdev device */
+ ret = of_reserved_mem_device_init_by_idx(dev, np,
+ mem->of_resm_idx);
+ if (ret) {
+ dev_err(dev, "Can't associate reserved memory\n");
+ goto out;
+ }
+ } else {
+ if (mem->va) {
+ dev_warn(dev, "vdev %d buffer already mapped\n",
+ rvdev->index);
+ pa = rproc_va_to_pa(mem->va);
+ } else {
+ /* Use dma address as carveout no memmapped yet */
+ pa = (phys_addr_t)mem->dma;
+ }
+
+ /* Associate vdev buffer memory pool to vdev subdev */
+ ret = dma_declare_coherent_memory(dev, pa,
+ mem->da,
+ mem->len);
+ if (ret < 0) {
+ dev_err(dev, "Failed to associate buffer\n");
+ goto out;
+ }
+ }
+ }
+
+ /* Allocate virtio device */
+ vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
+ if (!vdev) {
+ ret = -ENOMEM;
+ goto out;
+ }
vdev->id.device = id,
vdev->config = &rproc_virtio_config_ops,
vdev->dev.parent = dev;
@@ -370,11 +417,15 @@ out:
/**
* rproc_remove_virtio_dev() - remove an rproc-induced virtio device
- * @rvdev: the remote vdev
+ * @dev: the virtio device
+ * @data: must be null
*
* This function unregisters an existing virtio device.
*/
-void rproc_remove_virtio_dev(struct rproc_vdev *rvdev)
+int rproc_remove_virtio_dev(struct device *dev, void *data)
{
- unregister_virtio_device(&rvdev->vdev);
+ struct virtio_device *vdev = dev_to_virtio(dev);
+
+ unregister_virtio_device(vdev);
+ return 0;
}
diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c
index aacef0ea3b90..51049d17b1e5 100644
--- a/drivers/remoteproc/st_remoteproc.c
+++ b/drivers/remoteproc/st_remoteproc.c
@@ -19,6 +19,7 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
@@ -91,6 +92,77 @@ static void st_rproc_kick(struct rproc *rproc, int vqid)
dev_err(dev, "failed to send message via mbox: %d\n", ret);
}
+static int st_rproc_mem_alloc(struct rproc *rproc,
+ struct rproc_mem_entry *mem)
+{
+ struct device *dev = rproc->dev.parent;
+ void *va;
+
+ va = ioremap_wc(mem->dma, mem->len);
+ if (!va) {
+ dev_err(dev, "Unable to map memory region: %pa+%zx\n",
+ &mem->dma, mem->len);
+ return -ENOMEM;
+ }
+
+ /* Update memory entry va */
+ mem->va = va;
+
+ return 0;
+}
+
+static int st_rproc_mem_release(struct rproc *rproc,
+ struct rproc_mem_entry *mem)
+{
+ iounmap(mem->va);
+
+ return 0;
+}
+
+static int st_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+{
+ struct device *dev = rproc->dev.parent;
+ struct device_node *np = dev->of_node;
+ struct rproc_mem_entry *mem;
+ struct reserved_mem *rmem;
+ struct of_phandle_iterator it;
+ int index = 0;
+
+ of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
+ while (of_phandle_iterator_next(&it) == 0) {
+ rmem = of_reserved_mem_lookup(it.node);
+ if (!rmem) {
+ dev_err(dev, "unable to acquire memory-region\n");
+ return -EINVAL;
+ }
+
+ /* No need to map vdev buffer */
+ if (strcmp(it.node->name, "vdev0buffer")) {
+ /* Register memory region */
+ mem = rproc_mem_entry_init(dev, NULL,
+ (dma_addr_t)rmem->base,
+ rmem->size, rmem->base,
+ st_rproc_mem_alloc,
+ st_rproc_mem_release,
+ it.node->name);
+ } else {
+ /* Register reserved memory for vdev buffer allocation */
+ mem = rproc_of_resm_mem_entry_init(dev, index,
+ rmem->size,
+ rmem->base,
+ it.node->name);
+ }
+
+ if (!mem)
+ return -ENOMEM;
+
+ rproc_add_carveout(rproc, mem);
+ index++;
+ }
+
+ return rproc_elf_load_rsc_table(rproc, fw);
+}
+
static int st_rproc_start(struct rproc *rproc)
{
struct st_rproc *ddata = rproc->priv;
@@ -158,9 +230,14 @@ static int st_rproc_stop(struct rproc *rproc)
}
static const struct rproc_ops st_rproc_ops = {
- .kick = st_rproc_kick,
- .start = st_rproc_start,
- .stop = st_rproc_stop,
+ .kick = st_rproc_kick,
+ .start = st_rproc_start,
+ .stop = st_rproc_stop,
+ .parse_fw = st_rproc_parse_fw,
+ .load = rproc_elf_load_segments,
+ .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
+ .sanity_check = rproc_elf_sanity_check,
+ .get_boot_addr = rproc_elf_get_boot_addr,
};
/*
@@ -254,12 +331,6 @@ static int st_rproc_parse_dt(struct platform_device *pdev)
return -EINVAL;
}
- err = of_reserved_mem_device_init(dev);
- if (err) {
- dev_err(dev, "Failed to obtain shared memory\n");
- return err;
- }
-
err = clk_prepare(ddata->clk);
if (err)
dev_err(dev, "failed to get clock\n");
@@ -387,8 +458,6 @@ static int st_rproc_remove(struct platform_device *pdev)
clk_disable_unprepare(ddata->clk);
- of_reserved_mem_device_release(&pdev->dev);
-
for (i = 0; i < ST_RPROC_MAX_VRING * MBOX_MAX; i++)
mbox_free_channel(ddata->mbox_chan[i]);
diff --git a/drivers/reset/reset-meson-audio-arb.c b/drivers/reset/reset-meson-audio-arb.c
index 91751617b37a..c53a2185a039 100644
--- a/drivers/reset/reset-meson-audio-arb.c
+++ b/drivers/reset/reset-meson-audio-arb.c
@@ -130,6 +130,7 @@ static int meson_audio_arb_probe(struct platform_device *pdev)
arb->rstc.nr_resets = ARRAY_SIZE(axg_audio_arb_reset_bits);
arb->rstc.ops = &meson_audio_arb_rstc_ops;
arb->rstc.of_node = dev->of_node;
+ arb->rstc.owner = THIS_MODULE;
/*
* Enable general :
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 664f957012cd..5d3685bd76a2 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -11,21 +11,21 @@
#define pr_fmt(fmt) "%s: " fmt, __func__
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/virtio.h>
-#include <linux/virtio_ids.h>
-#include <linux/virtio_config.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/rpmsg.h>
#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
#include <linux/slab.h>
-#include <linux/idr.h>
-#include <linux/jiffies.h>
#include <linux/sched.h>
+#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
#include <linux/wait.h>
-#include <linux/rpmsg.h>
-#include <linux/mutex.h>
-#include <linux/of_device.h>
#include "rpmsg_internal.h"
@@ -912,7 +912,7 @@ static int rpmsg_probe(struct virtio_device *vdev)
total_buf_space = vrp->num_bufs * vrp->buf_size;
/* allocate coherent memory for the buffers */
- bufs_va = dma_alloc_coherent(vdev->dev.parent->parent,
+ bufs_va = dma_alloc_coherent(vdev->dev.parent,
total_buf_space, &vrp->bufs_dma,
GFP_KERNEL);
if (!bufs_va) {
@@ -980,7 +980,7 @@ static int rpmsg_probe(struct virtio_device *vdev)
return 0;
free_coherent:
- dma_free_coherent(vdev->dev.parent->parent, total_buf_space,
+ dma_free_coherent(vdev->dev.parent, total_buf_space,
bufs_va, vrp->bufs_dma);
vqs_del:
vdev->config->del_vqs(vrp->vdev);
@@ -1015,7 +1015,7 @@ static void rpmsg_remove(struct virtio_device *vdev)
vdev->config->del_vqs(vrp->vdev);
- dma_free_coherent(vdev->dev.parent->parent, total_buf_space,
+ dma_free_coherent(vdev->dev.parent, total_buf_space,
vrp->rbufs, vrp->bufs_dma);
kfree(vrp);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index e1a1f2b1cbba..f933c06bff4f 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -667,9 +667,9 @@ config RTC_DRV_S5M
will be called rtc-s5m.
config RTC_DRV_SD3078
- tristate "ZXW Crystal SD3078"
+ tristate "ZXW Shenzhen whwave SD3078"
help
- If you say yes here you get support for the ZXW Crystal
+ If you say yes here you get support for the ZXW Shenzhen whwave
SD3078 RTC chips.
This driver can also be built as a module. If so, the module
@@ -1864,4 +1864,15 @@ config RTC_DRV_GOLDFISH
Goldfish is a code name for the virtual platform developed by Google
for Android emulation.
+config RTC_DRV_WILCO_EC
+ tristate "Wilco EC RTC"
+ depends on WILCO_EC
+ default m
+ help
+ If you say yes here, you get read/write support for the Real Time
+ Clock on the Wilco Embedded Controller (Wilco is a kind of Chromebook)
+
+ This can also be built as a module. If so, the module will
+ be named "rtc_wilco_ec".
+
endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 1bf9f75a7865..fe3962496685 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -177,6 +177,7 @@ obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o
obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o
obj-$(CONFIG_RTC_DRV_VRTC) += rtc-mrst.o
obj-$(CONFIG_RTC_DRV_VT8500) += rtc-vt8500.o
+obj-$(CONFIG_RTC_DRV_WILCO_EC) += rtc-wilco-ec.o
obj-$(CONFIG_RTC_DRV_WM831X) += rtc-wm831x.o
obj-$(CONFIG_RTC_DRV_WM8350) += rtc-wm8350.o
obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o
diff --git a/drivers/rtc/rtc-cros-ec.c b/drivers/rtc/rtc-cros-ec.c
index e5444296075e..4d6bf9304ceb 100644
--- a/drivers/rtc/rtc-cros-ec.c
+++ b/drivers/rtc/rtc-cros-ec.c
@@ -298,7 +298,7 @@ static int cros_ec_rtc_suspend(struct device *dev)
struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev);
if (device_may_wakeup(dev))
- enable_irq_wake(cros_ec_rtc->cros_ec->irq);
+ return enable_irq_wake(cros_ec_rtc->cros_ec->irq);
return 0;
}
@@ -309,7 +309,7 @@ static int cros_ec_rtc_resume(struct device *dev)
struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev);
if (device_may_wakeup(dev))
- disable_irq_wake(cros_ec_rtc->cros_ec->irq);
+ return disable_irq_wake(cros_ec_rtc->cros_ec->irq);
return 0;
}
diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c
index b4e054c64bad..69b54e5556c0 100644
--- a/drivers/rtc/rtc-da9063.c
+++ b/drivers/rtc/rtc-da9063.c
@@ -480,6 +480,13 @@ static int da9063_rtc_probe(struct platform_device *pdev)
da9063_data_to_tm(data, &rtc->alarm_time, rtc);
rtc->rtc_sync = false;
+ /*
+ * TODO: some models have alarms on a minute boundary but still support
+ * real hardware interrupts. Add this once the core supports it.
+ */
+ if (config->rtc_data_start != RTC_SEC)
+ rtc->rtc_dev->uie_unsupported = 1;
+
irq_alarm = platform_get_irq_byname(pdev, "ALARM");
ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL,
da9063_alarm_event,
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index d417b203cbc5..1d3de2a3d1a4 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -374,7 +374,7 @@ static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm)
static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off)
{
unsigned int byte;
- int value = 0xff; /* return 0xff for ignored values */
+ int value = -1; /* return -1 for ignored values */
byte = readb(rtc->regbase + reg_off);
if (byte & AR_ENB) {
diff --git a/drivers/rtc/rtc-wilco-ec.c b/drivers/rtc/rtc-wilco-ec.c
new file mode 100644
index 000000000000..e62bda0cb53e
--- /dev/null
+++ b/drivers/rtc/rtc-wilco-ec.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RTC interface for Wilco Embedded Controller with R/W abilities
+ *
+ * Copyright 2018 Google LLC
+ *
+ * The corresponding platform device is typically registered in
+ * drivers/platform/chrome/wilco_ec/core.c
+ */
+
+#include <linux/bcd.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/wilco-ec.h>
+#include <linux/rtc.h>
+#include <linux/timekeeping.h>
+
+#define EC_COMMAND_CMOS 0x7c
+#define EC_CMOS_TOD_WRITE 0x02
+#define EC_CMOS_TOD_READ 0x08
+
+/**
+ * struct ec_rtc_read - Format of RTC returned by EC.
+ * @second: Second value (0..59)
+ * @minute: Minute value (0..59)
+ * @hour: Hour value (0..23)
+ * @day: Day value (1..31)
+ * @month: Month value (1..12)
+ * @year: Year value (full year % 100)
+ * @century: Century value (full year / 100)
+ *
+ * All values are presented in binary (not BCD).
+ */
+struct ec_rtc_read {
+ u8 second;
+ u8 minute;
+ u8 hour;
+ u8 day;
+ u8 month;
+ u8 year;
+ u8 century;
+} __packed;
+
+/**
+ * struct ec_rtc_write - Format of RTC sent to the EC.
+ * @param: EC_CMOS_TOD_WRITE
+ * @century: Century value (full year / 100)
+ * @year: Year value (full year % 100)
+ * @month: Month value (1..12)
+ * @day: Day value (1..31)
+ * @hour: Hour value (0..23)
+ * @minute: Minute value (0..59)
+ * @second: Second value (0..59)
+ * @weekday: Day of the week (0=Saturday)
+ *
+ * All values are presented in BCD.
+ */
+struct ec_rtc_write {
+ u8 param;
+ u8 century;
+ u8 year;
+ u8 month;
+ u8 day;
+ u8 hour;
+ u8 minute;
+ u8 second;
+ u8 weekday;
+} __packed;
+
+static int wilco_ec_rtc_read(struct device *dev, struct rtc_time *tm)
+{
+ struct wilco_ec_device *ec = dev_get_drvdata(dev->parent);
+ u8 param = EC_CMOS_TOD_READ;
+ struct ec_rtc_read rtc;
+ struct wilco_ec_message msg = {
+ .type = WILCO_EC_MSG_LEGACY,
+ .flags = WILCO_EC_FLAG_RAW_RESPONSE,
+ .command = EC_COMMAND_CMOS,
+ .request_data = &param,
+ .request_size = sizeof(param),
+ .response_data = &rtc,
+ .response_size = sizeof(rtc),
+ };
+ int ret;
+
+ ret = wilco_ec_mailbox(ec, &msg);
+ if (ret < 0)
+ return ret;
+
+ tm->tm_sec = rtc.second;
+ tm->tm_min = rtc.minute;
+ tm->tm_hour = rtc.hour;
+ tm->tm_mday = rtc.day;
+ tm->tm_mon = rtc.month - 1;
+ tm->tm_year = rtc.year + (rtc.century * 100) - 1900;
+ tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
+
+ /* Don't compute day of week, we don't need it. */
+ tm->tm_wday = -1;
+
+ return 0;
+}
+
+static int wilco_ec_rtc_write(struct device *dev, struct rtc_time *tm)
+{
+ struct wilco_ec_device *ec = dev_get_drvdata(dev->parent);
+ struct ec_rtc_write rtc;
+ struct wilco_ec_message msg = {
+ .type = WILCO_EC_MSG_LEGACY,
+ .flags = WILCO_EC_FLAG_RAW_RESPONSE,
+ .command = EC_COMMAND_CMOS,
+ .request_data = &rtc,
+ .request_size = sizeof(rtc),
+ };
+ int year = tm->tm_year + 1900;
+ /*
+ * Convert from 0=Sunday to 0=Saturday for the EC
+ * We DO need to set weekday because the EC controls battery charging
+ * schedules that depend on the day of the week.
+ */
+ int wday = tm->tm_wday == 6 ? 0 : tm->tm_wday + 1;
+ int ret;
+
+ rtc.param = EC_CMOS_TOD_WRITE;
+ rtc.century = bin2bcd(year / 100);
+ rtc.year = bin2bcd(year % 100);
+ rtc.month = bin2bcd(tm->tm_mon + 1);
+ rtc.day = bin2bcd(tm->tm_mday);
+ rtc.hour = bin2bcd(tm->tm_hour);
+ rtc.minute = bin2bcd(tm->tm_min);
+ rtc.second = bin2bcd(tm->tm_sec);
+ rtc.weekday = bin2bcd(wday);
+
+ ret = wilco_ec_mailbox(ec, &msg);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static const struct rtc_class_ops wilco_ec_rtc_ops = {
+ .read_time = wilco_ec_rtc_read,
+ .set_time = wilco_ec_rtc_write,
+};
+
+static int wilco_ec_rtc_probe(struct platform_device *pdev)
+{
+ struct rtc_device *rtc;
+
+ rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
+
+ rtc->ops = &wilco_ec_rtc_ops;
+ /* EC only supports this century */
+ rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ rtc->range_max = RTC_TIMESTAMP_END_2099;
+ rtc->owner = THIS_MODULE;
+
+ return rtc_register_device(rtc);
+}
+
+static struct platform_driver wilco_ec_rtc_driver = {
+ .driver = {
+ .name = "rtc-wilco-ec",
+ },
+ .probe = wilco_ec_rtc_probe,
+};
+
+module_platform_driver(wilco_ec_rtc_driver);
+
+MODULE_ALIAS("platform:rtc-wilco-ec");
+MODULE_AUTHOR("Nick Crews <ncrews@chromium.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Wilco EC RTC driver");
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 6e294b4d3635..f89f9d02e788 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2004,14 +2004,14 @@ static int dasd_eckd_end_analysis(struct dasd_block *block)
blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
raw:
- block->blocks = (private->real_cyl *
+ block->blocks = ((unsigned long) private->real_cyl *
private->rdc_data.trk_per_cyl *
blk_per_trk);
dev_info(&device->cdev->dev,
- "DASD with %d KB/block, %d KB total size, %d KB/track, "
+ "DASD with %u KB/block, %lu KB total size, %u KB/track, "
"%s\n", (block->bp_block >> 10),
- ((private->real_cyl *
+ (((unsigned long) private->real_cyl *
private->rdc_data.trk_per_cyl *
blk_per_trk * (block->bp_block >> 9)) >> 1),
((blk_per_trk * block->bp_block) >> 10),
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index fd2146bcc0ad..e17364e13d2f 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -629,7 +629,7 @@ con3270_init(void)
(void (*)(unsigned long)) con3270_read_tasklet,
(unsigned long) condev->read);
- raw3270_add_view(&condev->view, &con3270_fn, 1);
+ raw3270_add_view(&condev->view, &con3270_fn, 1, RAW3270_VIEW_LOCK_IRQ);
INIT_LIST_HEAD(&condev->freemem);
for (i = 0; i < CON3270_STRING_PAGES; i++) {
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 8f3a2eeb28dc..8b48ba9c598e 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -463,7 +463,8 @@ fs3270_open(struct inode *inode, struct file *filp)
init_waitqueue_head(&fp->wait);
fp->fs_pid = get_pid(task_pid(current));
- rc = raw3270_add_view(&fp->view, &fs3270_fn, minor);
+ rc = raw3270_add_view(&fp->view, &fs3270_fn, minor,
+ RAW3270_VIEW_LOCK_BH);
if (rc) {
fs3270_free_view(&fp->view);
goto out;
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index f8cd2935fbfd..63a41b168761 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -920,7 +920,7 @@ raw3270_deactivate_view(struct raw3270_view *view)
* Add view to device with minor "minor".
*/
int
-raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
+raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor, int subclass)
{
unsigned long flags;
struct raw3270 *rp;
@@ -942,6 +942,7 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
view->cols = rp->cols;
view->ascebc = rp->ascebc;
spin_lock_init(&view->lock);
+ lockdep_set_subclass(&view->lock, subclass);
list_add(&view->list, &rp->view_list);
rc = 0;
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
index 114ca7cbf889..3afaa35f7351 100644
--- a/drivers/s390/char/raw3270.h
+++ b/drivers/s390/char/raw3270.h
@@ -150,6 +150,8 @@ struct raw3270_fn {
struct raw3270_view {
struct list_head list;
spinlock_t lock;
+#define RAW3270_VIEW_LOCK_IRQ 0
+#define RAW3270_VIEW_LOCK_BH 1
atomic_t ref_count;
struct raw3270 *dev;
struct raw3270_fn *fn;
@@ -158,7 +160,7 @@ struct raw3270_view {
unsigned char *ascebc; /* ascii -> ebcdic table */
};
-int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int);
+int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int, int);
int raw3270_activate_view(struct raw3270_view *);
void raw3270_del_view(struct raw3270_view *);
void raw3270_deactivate_view(struct raw3270_view *);
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index e9aa71cdfc44..d2ab3f07c008 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -45,8 +45,8 @@ static struct list_head sclp_req_queue;
/* Data for read and and init requests. */
static struct sclp_req sclp_read_req;
static struct sclp_req sclp_init_req;
-static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
-static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
+static void *sclp_read_sccb;
+static struct init_sccb *sclp_init_sccb;
/* Suspend request */
static DECLARE_COMPLETION(sclp_request_queue_flushed);
@@ -753,9 +753,8 @@ EXPORT_SYMBOL(sclp_remove_processed);
static inline void
__sclp_make_init_req(sccb_mask_t receive_mask, sccb_mask_t send_mask)
{
- struct init_sccb *sccb;
+ struct init_sccb *sccb = sclp_init_sccb;
- sccb = (struct init_sccb *) sclp_init_sccb;
clear_page(sccb);
memset(&sclp_init_req, 0, sizeof(struct sclp_req));
sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
@@ -782,7 +781,7 @@ static int
sclp_init_mask(int calculate)
{
unsigned long flags;
- struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb;
+ struct init_sccb *sccb = sclp_init_sccb;
sccb_mask_t receive_mask;
sccb_mask_t send_mask;
int retry;
@@ -1175,6 +1174,9 @@ sclp_init(void)
if (sclp_init_state != sclp_init_state_uninitialized)
goto fail_unlock;
sclp_init_state = sclp_init_state_initializing;
+ sclp_read_sccb = (void *) __get_free_page(GFP_ATOMIC | GFP_DMA);
+ sclp_init_sccb = (void *) __get_free_page(GFP_ATOMIC | GFP_DMA);
+ BUG_ON(!sclp_read_sccb || !sclp_init_sccb);
/* Set up variables */
INIT_LIST_HEAD(&sclp_req_queue);
INIT_LIST_HEAD(&sclp_reg_list);
@@ -1207,6 +1209,8 @@ fail_unregister_reboot_notifier:
unregister_reboot_notifier(&sclp_reboot_notifier);
fail_init_state_uninitialized:
sclp_init_state = sclp_init_state_uninitialized;
+ free_page((unsigned long) sclp_read_sccb);
+ free_page((unsigned long) sclp_init_sccb);
fail_unlock:
spin_unlock_irqrestore(&sclp_lock, flags);
return rc;
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 367e9d384d85..196333013e54 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -197,7 +197,9 @@ struct read_info_sccb {
u32 hmfai; /* 124-127 */
u8 _pad_128[134 - 128]; /* 128-133 */
u8 byte_134; /* 134 */
- u8 _pad_135[4096 - 135]; /* 135-4095 */
+ u8 cpudirq; /* 135 */
+ u16 cbl; /* 136-137 */
+ u8 _pad_138[4096 - 138]; /* 138-4095 */
} __packed __aligned(PAGE_SIZE);
struct read_storage_sccb {
@@ -319,7 +321,7 @@ extern int sclp_console_drop;
extern unsigned long sclp_console_full;
extern bool sclp_mask_compat_mode;
-extern char sclp_early_sccb[PAGE_SIZE];
+extern char *sclp_early_sccb;
void sclp_early_wait_irq(void);
int sclp_early_cmd(sclp_cmdw_t cmd, void *sccb);
@@ -365,14 +367,14 @@ sclp_ascebc(unsigned char ch)
/* translate string from EBCDIC to ASCII */
static inline void
-sclp_ebcasc_str(unsigned char *str, int nr)
+sclp_ebcasc_str(char *str, int nr)
{
(MACHINE_IS_VM) ? EBCASC(str, nr) : EBCASC_500(str, nr);
}
/* translate string from ASCII to EBCDIC */
static inline void
-sclp_ascebc_str(unsigned char *str, int nr)
+sclp_ascebc_str(char *str, int nr)
{
(MACHINE_IS_VM) ? ASCEBC(str, nr) : ASCEBC_500(str, nr);
}
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index 8332788681c4..6c90aa725f23 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -40,6 +40,8 @@ static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb)
sclp.has_gisaf = !!(sccb->fac118 & 0x08);
sclp.has_hvs = !!(sccb->fac119 & 0x80);
sclp.has_kss = !!(sccb->fac98 & 0x01);
+ sclp.has_sipl = !!(sccb->cbl & 0x02);
+ sclp.has_sipl_g2 = !!(sccb->cbl & 0x04);
if (sccb->fac85 & 0x02)
S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
if (sccb->fac91 & 0x40)
@@ -93,6 +95,7 @@ static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb)
sclp.mtid_prev = (sccb->fac42 & 0x80) ? (sccb->fac66 & 31) : 0;
sclp.hmfai = sccb->hmfai;
+ sclp.has_dirq = !!(sccb->cpudirq & 0x80);
}
/*
@@ -144,7 +147,7 @@ static void __init sclp_early_console_detect(struct init_sccb *sccb)
void __init sclp_early_detect(void)
{
- void *sccb = &sclp_early_sccb;
+ void *sccb = sclp_early_sccb;
sclp_early_facilities_detect(sccb);
sclp_early_init_core_info(sccb);
diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c
index 387c114ded3f..7737470f8498 100644
--- a/drivers/s390/char/sclp_early_core.c
+++ b/drivers/s390/char/sclp_early_core.c
@@ -16,7 +16,7 @@
static struct read_info_sccb __bootdata(sclp_info_sccb);
static int __bootdata(sclp_info_sccb_valid);
-char sclp_early_sccb[PAGE_SIZE] __aligned(PAGE_SIZE) __section(.data);
+char *sclp_early_sccb = (char *) EARLY_SCCB_OFFSET;
int sclp_init_state __section(.data) = sclp_init_state_uninitialized;
/*
* Used to keep track of the size of the event masks. Qemu until version 2.11
@@ -91,8 +91,8 @@ static void sclp_early_print_lm(const char *str, unsigned int len)
struct mto *mto;
struct go *go;
- sccb = (struct write_sccb *) &sclp_early_sccb;
- end = (unsigned char *) sccb + sizeof(sclp_early_sccb) - 1;
+ sccb = (struct write_sccb *) sclp_early_sccb;
+ end = (unsigned char *) sccb + EARLY_SCCB_SIZE - 1;
memset(sccb, 0, sizeof(*sccb));
ptr = (unsigned char *) &sccb->msg.mdb.mto;
offset = 0;
@@ -139,9 +139,9 @@ static void sclp_early_print_vt220(const char *str, unsigned int len)
{
struct vt220_sccb *sccb;
- sccb = (struct vt220_sccb *) &sclp_early_sccb;
- if (sizeof(*sccb) + len >= sizeof(sclp_early_sccb))
- len = sizeof(sclp_early_sccb) - sizeof(*sccb);
+ sccb = (struct vt220_sccb *) sclp_early_sccb;
+ if (sizeof(*sccb) + len >= EARLY_SCCB_SIZE)
+ len = EARLY_SCCB_SIZE - sizeof(*sccb);
memset(sccb, 0, sizeof(*sccb));
memcpy(&sccb->msg.data, str, len);
sccb->header.length = sizeof(*sccb) + len;
@@ -199,7 +199,7 @@ static int sclp_early_setup(int disable, int *have_linemode, int *have_vt220)
BUILD_BUG_ON(sizeof(struct init_sccb) > PAGE_SIZE);
*have_linemode = *have_vt220 = 0;
- sccb = (struct init_sccb *) &sclp_early_sccb;
+ sccb = (struct init_sccb *) sclp_early_sccb;
receive_mask = disable ? 0 : EVTYP_OPCMD_MASK;
send_mask = disable ? 0 : EVTYP_VT220MSG_MASK | EVTYP_MSG_MASK;
rc = sclp_early_set_event_mask(sccb, receive_mask, send_mask);
@@ -304,7 +304,7 @@ int __init sclp_early_get_hsa_size(unsigned long *hsa_size)
void __weak __init add_mem_detect_block(u64 start, u64 end) {}
int __init sclp_early_read_storage_info(void)
{
- struct read_storage_sccb *sccb = (struct read_storage_sccb *)&sclp_early_sccb;
+ struct read_storage_sccb *sccb = (struct read_storage_sccb *)sclp_early_sccb;
int rc, id, max_id = 0;
unsigned long rn, rzm;
sclp_cmdw_t command;
@@ -320,8 +320,8 @@ int __init sclp_early_read_storage_info(void)
rzm <<= 20;
for (id = 0; id <= max_id; id++) {
- memset(sclp_early_sccb, 0, sizeof(sclp_early_sccb));
- sccb->header.length = sizeof(sclp_early_sccb);
+ memset(sclp_early_sccb, 0, EARLY_SCCB_SIZE);
+ sccb->header.length = EARLY_SCCB_SIZE;
command = SCLP_CMDW_READ_STORAGE_INFO | (id << 8);
rc = sclp_early_cmd(command, sccb);
if (rc)
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index 8e0b69a2f11a..13f97fd73aca 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -29,7 +29,7 @@ static struct sclp_register sclp_sdias_register = {
.send_mask = EVTYP_SDIAS_MASK,
};
-static struct sdias_sccb sccb __attribute__((aligned(4096)));
+static struct sdias_sccb *sclp_sdias_sccb;
static struct sdias_evbuf sdias_evbuf;
static DECLARE_COMPLETION(evbuf_accepted);
@@ -58,6 +58,7 @@ static void sdias_callback(struct sclp_req *request, void *data)
static int sdias_sclp_send(struct sclp_req *req)
{
+ struct sdias_sccb *sccb = sclp_sdias_sccb;
int retries;
int rc;
@@ -78,16 +79,16 @@ static int sdias_sclp_send(struct sclp_req *req)
continue;
}
/* if not accepted, retry */
- if (!(sccb.evbuf.hdr.flags & 0x80)) {
+ if (!(sccb->evbuf.hdr.flags & 0x80)) {
TRACE("sclp request failed: flags=%x\n",
- sccb.evbuf.hdr.flags);
+ sccb->evbuf.hdr.flags);
continue;
}
/*
* for the sync interface the response is in the initial sccb
*/
if (!sclp_sdias_register.receiver_fn) {
- memcpy(&sdias_evbuf, &sccb.evbuf, sizeof(sdias_evbuf));
+ memcpy(&sdias_evbuf, &sccb->evbuf, sizeof(sdias_evbuf));
TRACE("sync request done\n");
return 0;
}
@@ -104,23 +105,24 @@ static int sdias_sclp_send(struct sclp_req *req)
*/
int sclp_sdias_blk_count(void)
{
+ struct sdias_sccb *sccb = sclp_sdias_sccb;
struct sclp_req request;
int rc;
mutex_lock(&sdias_mutex);
- memset(&sccb, 0, sizeof(sccb));
+ memset(sccb, 0, sizeof(*sccb));
memset(&request, 0, sizeof(request));
- sccb.hdr.length = sizeof(sccb);
- sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
- sccb.evbuf.hdr.type = EVTYP_SDIAS;
- sccb.evbuf.event_qual = SDIAS_EQ_SIZE;
- sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP;
- sccb.evbuf.event_id = 4712;
- sccb.evbuf.dbs = 1;
+ sccb->hdr.length = sizeof(*sccb);
+ sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf);
+ sccb->evbuf.hdr.type = EVTYP_SDIAS;
+ sccb->evbuf.event_qual = SDIAS_EQ_SIZE;
+ sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP;
+ sccb->evbuf.event_id = 4712;
+ sccb->evbuf.dbs = 1;
- request.sccb = &sccb;
+ request.sccb = sccb;
request.command = SCLP_CMDW_WRITE_EVENT_DATA;
request.status = SCLP_REQ_FILLED;
request.callback = sdias_callback;
@@ -130,8 +132,8 @@ int sclp_sdias_blk_count(void)
pr_err("sclp_send failed for get_nr_blocks\n");
goto out;
}
- if (sccb.hdr.response_code != 0x0020) {
- TRACE("send failed: %x\n", sccb.hdr.response_code);
+ if (sccb->hdr.response_code != 0x0020) {
+ TRACE("send failed: %x\n", sccb->hdr.response_code);
rc = -EIO;
goto out;
}
@@ -163,30 +165,31 @@ out:
*/
int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
{
+ struct sdias_sccb *sccb = sclp_sdias_sccb;
struct sclp_req request;
int rc;
mutex_lock(&sdias_mutex);
- memset(&sccb, 0, sizeof(sccb));
+ memset(sccb, 0, sizeof(*sccb));
memset(&request, 0, sizeof(request));
- sccb.hdr.length = sizeof(sccb);
- sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
- sccb.evbuf.hdr.type = EVTYP_SDIAS;
- sccb.evbuf.hdr.flags = 0;
- sccb.evbuf.event_qual = SDIAS_EQ_STORE_DATA;
- sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP;
- sccb.evbuf.event_id = 4712;
- sccb.evbuf.asa_size = SDIAS_ASA_SIZE_64;
- sccb.evbuf.event_status = 0;
- sccb.evbuf.blk_cnt = nr_blks;
- sccb.evbuf.asa = (unsigned long)dest;
- sccb.evbuf.fbn = start_blk;
- sccb.evbuf.lbn = 0;
- sccb.evbuf.dbs = 1;
-
- request.sccb = &sccb;
+ sccb->hdr.length = sizeof(*sccb);
+ sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf);
+ sccb->evbuf.hdr.type = EVTYP_SDIAS;
+ sccb->evbuf.hdr.flags = 0;
+ sccb->evbuf.event_qual = SDIAS_EQ_STORE_DATA;
+ sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP;
+ sccb->evbuf.event_id = 4712;
+ sccb->evbuf.asa_size = SDIAS_ASA_SIZE_64;
+ sccb->evbuf.event_status = 0;
+ sccb->evbuf.blk_cnt = nr_blks;
+ sccb->evbuf.asa = (unsigned long)dest;
+ sccb->evbuf.fbn = start_blk;
+ sccb->evbuf.lbn = 0;
+ sccb->evbuf.dbs = 1;
+
+ request.sccb = sccb;
request.command = SCLP_CMDW_WRITE_EVENT_DATA;
request.status = SCLP_REQ_FILLED;
request.callback = sdias_callback;
@@ -196,8 +199,8 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
pr_err("sclp_send failed: %x\n", rc);
goto out;
}
- if (sccb.hdr.response_code != 0x0020) {
- TRACE("copy failed: %x\n", sccb.hdr.response_code);
+ if (sccb->hdr.response_code != 0x0020) {
+ TRACE("copy failed: %x\n", sccb->hdr.response_code);
rc = -EIO;
goto out;
}
@@ -256,6 +259,8 @@ int __init sclp_sdias_init(void)
{
if (ipl_info.type != IPL_TYPE_FCP_DUMP)
return 0;
+ sclp_sdias_sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
+ BUG_ON(!sclp_sdias_sccb);
sdias_dbf = debug_register("dump_sdias", 4, 1, 4 * sizeof(long));
debug_register_view(sdias_dbf, &debug_sprintf_view);
debug_set_level(sdias_dbf, 6);
@@ -264,6 +269,7 @@ int __init sclp_sdias_init(void)
if (sclp_sdias_init_async() == 0)
goto out;
TRACE("init failed\n");
+ free_page((unsigned long) sclp_sdias_sccb);
return -ENODEV;
out:
TRACE("init done\n");
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 2b0c36c2c568..98d7fc152e32 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -980,7 +980,8 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
return PTR_ERR(tp);
rc = raw3270_add_view(&tp->view, &tty3270_fn,
- tty->index + RAW3270_FIRSTMINOR);
+ tty->index + RAW3270_FIRSTMINOR,
+ RAW3270_VIEW_LOCK_BH);
if (rc) {
tty3270_free_view(tp);
return rc;
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 76d3c50bf078..f75d3bfb5af3 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -51,7 +51,7 @@ static struct dentry *zcore_dir;
static struct dentry *zcore_memmap_file;
static struct dentry *zcore_reipl_file;
static struct dentry *zcore_hsa_file;
-static struct ipl_parameter_block *ipl_block;
+static struct ipl_parameter_block *zcore_ipl_block;
static char hsa_buf[PAGE_SIZE] __aligned(PAGE_SIZE);
@@ -182,8 +182,8 @@ static const struct file_operations zcore_memmap_fops = {
static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
size_t count, loff_t *ppos)
{
- if (ipl_block) {
- diag308(DIAG308_SET, ipl_block);
+ if (zcore_ipl_block) {
+ diag308(DIAG308_SET, zcore_ipl_block);
diag308(DIAG308_LOAD_CLEAR, NULL);
}
return count;
@@ -265,18 +265,20 @@ static int __init zcore_reipl_init(void)
return rc;
if (ipib_info.ipib == 0)
return 0;
- ipl_block = (void *) __get_free_page(GFP_KERNEL);
- if (!ipl_block)
+ zcore_ipl_block = (void *) __get_free_page(GFP_KERNEL);
+ if (!zcore_ipl_block)
return -ENOMEM;
if (ipib_info.ipib < sclp.hsa_size)
- rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE);
+ rc = memcpy_hsa_kernel(zcore_ipl_block, ipib_info.ipib,
+ PAGE_SIZE);
else
- rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE);
- if (rc || (__force u32)csum_partial(ipl_block, ipl_block->hdr.len, 0) !=
+ rc = memcpy_real(zcore_ipl_block, (void *) ipib_info.ipib,
+ PAGE_SIZE);
+ if (rc || (__force u32)csum_partial(zcore_ipl_block, zcore_ipl_block->hdr.len, 0) !=
ipib_info.checksum) {
TRACE("Checksum does not match\n");
- free_page((unsigned long) ipl_block);
- ipl_block = NULL;
+ free_page((unsigned long) zcore_ipl_block);
+ zcore_ipl_block = NULL;
}
return 0;
}
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index f230516abb96..f6a8db04177c 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -20,5 +20,6 @@ obj-$(CONFIG_CCWGROUP) += ccwgroup.o
qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o
obj-$(CONFIG_QDIO) += qdio.o
-vfio_ccw-objs += vfio_ccw_drv.o vfio_ccw_cp.o vfio_ccw_ops.o vfio_ccw_fsm.o
+vfio_ccw-objs += vfio_ccw_drv.o vfio_ccw_cp.o vfio_ccw_ops.o vfio_ccw_fsm.o \
+ vfio_ccw_async.o
obj-$(CONFIG_VFIO_CCW) += vfio_ccw.o
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index a45011e4529e..4534afc63591 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -27,6 +27,8 @@
static DEFINE_SPINLOCK(airq_lists_lock);
static struct hlist_head airq_lists[MAX_ISC+1];
+static struct kmem_cache *airq_iv_cache;
+
/**
* register_adapter_interrupt() - register adapter interrupt handler
* @airq: pointer to adapter interrupt descriptor
@@ -95,7 +97,7 @@ static irqreturn_t do_airq_interrupt(int irq, void *dummy)
rcu_read_lock();
hlist_for_each_entry_rcu(airq, head, list)
if ((*airq->lsi_ptr & airq->lsi_mask) != 0)
- airq->handler(airq);
+ airq->handler(airq, !tpi_info->directed_irq);
rcu_read_unlock();
return IRQ_HANDLED;
@@ -129,10 +131,21 @@ struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags)
if (!iv)
goto out;
iv->bits = bits;
+ iv->flags = flags;
size = BITS_TO_LONGS(bits) * sizeof(unsigned long);
- iv->vector = kzalloc(size, GFP_KERNEL);
- if (!iv->vector)
- goto out_free;
+
+ if (flags & AIRQ_IV_CACHELINE) {
+ if ((cache_line_size() * BITS_PER_BYTE) < bits)
+ goto out_free;
+
+ iv->vector = kmem_cache_zalloc(airq_iv_cache, GFP_KERNEL);
+ if (!iv->vector)
+ goto out_free;
+ } else {
+ iv->vector = kzalloc(size, GFP_KERNEL);
+ if (!iv->vector)
+ goto out_free;
+ }
if (flags & AIRQ_IV_ALLOC) {
iv->avail = kmalloc(size, GFP_KERNEL);
if (!iv->avail)
@@ -165,7 +178,10 @@ out_free:
kfree(iv->ptr);
kfree(iv->bitlock);
kfree(iv->avail);
- kfree(iv->vector);
+ if (iv->flags & AIRQ_IV_CACHELINE)
+ kmem_cache_free(airq_iv_cache, iv->vector);
+ else
+ kfree(iv->vector);
kfree(iv);
out:
return NULL;
@@ -181,7 +197,10 @@ void airq_iv_release(struct airq_iv *iv)
kfree(iv->data);
kfree(iv->ptr);
kfree(iv->bitlock);
- kfree(iv->vector);
+ if (iv->flags & AIRQ_IV_CACHELINE)
+ kmem_cache_free(airq_iv_cache, iv->vector);
+ else
+ kfree(iv->vector);
kfree(iv->avail);
kfree(iv);
}
@@ -275,3 +294,13 @@ unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
return bit;
}
EXPORT_SYMBOL(airq_iv_scan);
+
+static int __init airq_init(void)
+{
+ airq_iv_cache = kmem_cache_create("airq_iv_cache", cache_line_size(),
+ cache_line_size(), 0, NULL);
+ if (!airq_iv_cache)
+ return -ENOMEM;
+ return 0;
+}
+subsys_initcall(airq_init);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index a0baee25134c..a835b31aad99 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -24,6 +24,7 @@
#include <asm/crw.h>
#include <asm/isc.h>
#include <asm/ebcdic.h>
+#include <asm/ap.h>
#include "css.h"
#include "cio.h"
@@ -586,6 +587,15 @@ static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area)
" failed (rc=%d).\n", ret);
}
+static void chsc_process_sei_ap_cfg_chg(struct chsc_sei_nt0_area *sei_area)
+{
+ CIO_CRW_EVENT(3, "chsc: ap config changed\n");
+ if (sei_area->rs != 5)
+ return;
+
+ ap_bus_cfg_chg();
+}
+
static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
{
switch (sei_area->cc) {
@@ -612,6 +622,9 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
case 2: /* i/o resource accessibility */
chsc_process_sei_res_acc(sei_area);
break;
+ case 3: /* ap config changed */
+ chsc_process_sei_ap_cfg_chg(sei_area);
+ break;
case 7: /* channel-path-availability information */
chsc_process_sei_chp_avail(sei_area);
break;
@@ -1382,3 +1395,40 @@ int chsc_pnso_brinfo(struct subchannel_id schid,
return chsc_error_from_response(brinfo_area->response.code);
}
EXPORT_SYMBOL_GPL(chsc_pnso_brinfo);
+
+int chsc_sgib(u32 origin)
+{
+ struct {
+ struct chsc_header request;
+ u16 op;
+ u8 reserved01[2];
+ u8 reserved02:4;
+ u8 fmt:4;
+ u8 reserved03[7];
+ /* operation data area begin */
+ u8 reserved04[4];
+ u32 gib_origin;
+ u8 reserved05[10];
+ u8 aix;
+ u8 reserved06[4029];
+ struct chsc_header response;
+ u8 reserved07[4];
+ } *sgib_area;
+ int ret;
+
+ spin_lock_irq(&chsc_page_lock);
+ memset(chsc_page, 0, PAGE_SIZE);
+ sgib_area = chsc_page;
+ sgib_area->request.length = 0x0fe0;
+ sgib_area->request.code = 0x0021;
+ sgib_area->op = 0x1;
+ sgib_area->gib_origin = origin;
+
+ ret = chsc(sgib_area);
+ if (ret == 0)
+ ret = chsc_error_from_response(sgib_area->response.code);
+ spin_unlock_irq(&chsc_page_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(chsc_sgib);
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 78aba8d94eec..e57d68e325a3 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -164,6 +164,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp);
int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd);
int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
u64 summary_indicator_addr, u64 subchannel_indicator_addr);
+int chsc_sgib(u32 origin);
int chsc_error_from_response(int response);
int chsc_siosl(struct subchannel_id schid);
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index de744ca158fd..18f5458f90e8 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -564,7 +564,7 @@ static irqreturn_t do_cio_interrupt(int irq, void *dummy)
}
static struct irqaction io_interrupt = {
- .name = "IO",
+ .name = "I/O",
.handler = do_cio_interrupt,
};
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 9811fd8a0c73..06a91743335a 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -51,7 +51,7 @@ struct tpi_info {
struct subchannel_id schid;
u32 intparm;
u32 adapter_IO:1;
- u32 :1;
+ u32 directed_irq:1;
u32 isc:3;
u32 :27;
u32 type:3;
@@ -115,7 +115,7 @@ struct subchannel {
struct schib_config config;
} __attribute__ ((aligned(8)));
-DECLARE_PER_CPU(struct irb, cio_irb);
+DECLARE_PER_CPU_ALIGNED(struct irb, cio_irb);
#define to_subchannel(n) container_of(n, struct subchannel, dev)
diff --git a/drivers/s390/cio/ioasm.c b/drivers/s390/cio/ioasm.c
index 14d328338ce2..08eb10283b18 100644
--- a/drivers/s390/cio/ioasm.c
+++ b/drivers/s390/cio/ioasm.c
@@ -233,6 +233,7 @@ int hsch(struct subchannel_id schid)
return ccode;
}
+EXPORT_SYMBOL(hsch);
static inline int __xsch(struct subchannel_id schid)
{
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index a6f7c2986b94..a06944399865 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -228,9 +228,6 @@ struct qdio_q {
*/
int first_to_check;
- /* first_to_check of the last time */
- int last_move;
-
/* beginning position for calling the program */
int first_to_kick;
@@ -341,8 +338,7 @@ static inline int multicast_outbound(struct qdio_q *q)
(q->nr == q->irq_ptr->nr_output_qs - 1);
}
-#define pci_out_supported(q) \
- (q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
+#define pci_out_supported(irq) ((irq)->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
#define is_qebsm(q) (q->irq_ptr->sch_token != 0)
#define need_siga_in(q) (q->irq_ptr->siga_flag.input)
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index d2f98e5829d4..35410e6eda2e 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -121,15 +121,14 @@ static int qstat_show(struct seq_file *m, void *v)
seq_printf(m, "Timestamp: %Lx Last AI: %Lx\n",
q->timestamp, last_ai_time);
- seq_printf(m, "nr_used: %d ftc: %d last_move: %d\n",
- atomic_read(&q->nr_buf_used),
- q->first_to_check, q->last_move);
+ seq_printf(m, "nr_used: %d ftc: %d\n",
+ atomic_read(&q->nr_buf_used), q->first_to_check);
if (q->is_input_q) {
seq_printf(m, "polling: %d ack start: %d ack count: %d\n",
q->u.in.polling, q->u.in.ack_start,
q->u.in.ack_count);
- seq_printf(m, "DSCI: %d IRQs disabled: %u\n",
- *(u32 *)q->irq_ptr->dsci,
+ seq_printf(m, "DSCI: %x IRQs disabled: %u\n",
+ *(u8 *)q->irq_ptr->dsci,
test_bit(QDIO_QUEUE_IRQS_DISABLED,
&q->u.in.queue_irq_state));
}
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 9537e656e927..cfce255521ac 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -371,7 +371,7 @@ static inline int qdio_siga_input(struct qdio_q *q)
static inline void qdio_sync_queues(struct qdio_q *q)
{
/* PCI capable outbound queues will also be scanned so sync them too */
- if (pci_out_supported(q))
+ if (pci_out_supported(q->irq_ptr))
qdio_siga_sync_all(q);
else
qdio_siga_sync_q(q);
@@ -415,7 +415,8 @@ static inline void account_sbals(struct qdio_q *q, unsigned int count)
q->q_stats.nr_sbals[pos]++;
}
-static void process_buffer_error(struct qdio_q *q, int count)
+static void process_buffer_error(struct qdio_q *q, unsigned int start,
+ int count)
{
unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
SLSB_P_OUTPUT_NOT_INIT;
@@ -424,29 +425,29 @@ static void process_buffer_error(struct qdio_q *q, int count)
/* special handling for no target buffer empty */
if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q &&
- q->sbal[q->first_to_check]->element[15].sflags == 0x10) {
+ q->sbal[start]->element[15].sflags == 0x10) {
qperf_inc(q, target_full);
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
- q->first_to_check);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start);
goto set;
}
DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
- DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
+ DBF_ERROR("FTC:%3d C:%3d", start, count);
DBF_ERROR("F14:%2x F15:%2x",
- q->sbal[q->first_to_check]->element[14].sflags,
- q->sbal[q->first_to_check]->element[15].sflags);
+ q->sbal[start]->element[14].sflags,
+ q->sbal[start]->element[15].sflags);
set:
/*
* Interrupts may be avoided as long as the error is present
* so change the buffer state immediately to avoid starvation.
*/
- set_buf_states(q, q->first_to_check, state, count);
+ set_buf_states(q, start, state, count);
}
-static inline void inbound_primed(struct qdio_q *q, int count)
+static inline void inbound_primed(struct qdio_q *q, unsigned int start,
+ int count)
{
int new;
@@ -457,7 +458,7 @@ static inline void inbound_primed(struct qdio_q *q, int count)
if (!q->u.in.polling) {
q->u.in.polling = 1;
q->u.in.ack_count = count;
- q->u.in.ack_start = q->first_to_check;
+ q->u.in.ack_start = start;
return;
}
@@ -465,7 +466,7 @@ static inline void inbound_primed(struct qdio_q *q, int count)
set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
q->u.in.ack_count);
q->u.in.ack_count = count;
- q->u.in.ack_start = q->first_to_check;
+ q->u.in.ack_start = start;
return;
}
@@ -473,7 +474,7 @@ static inline void inbound_primed(struct qdio_q *q, int count)
* ACK the newest buffer. The ACK will be removed in qdio_stop_polling
* or by the next inbound run.
*/
- new = add_buf(q->first_to_check, count - 1);
+ new = add_buf(start, count - 1);
if (q->u.in.polling) {
/* reset the previous ACK but first set the new one */
set_buf_state(q, new, SLSB_P_INPUT_ACK);
@@ -488,10 +489,10 @@ static inline void inbound_primed(struct qdio_q *q, int count)
if (!count)
return;
/* need to change ALL buffers to get more interrupts */
- set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
+ set_buf_states(q, start, SLSB_P_INPUT_NOT_INIT, count);
}
-static int get_inbound_buffer_frontier(struct qdio_q *q)
+static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
{
unsigned char state = 0;
int count;
@@ -504,64 +505,58 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
*/
count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
if (!count)
- goto out;
+ return 0;
/*
* No siga sync here, as a PCI or we after a thin interrupt
* already sync'ed the queues.
*/
- count = get_buf_states(q, q->first_to_check, &state, count, 1, 0);
+ count = get_buf_states(q, start, &state, count, 1, 0);
if (!count)
- goto out;
+ return 0;
switch (state) {
case SLSB_P_INPUT_PRIMED:
- inbound_primed(q, count);
- q->first_to_check = add_buf(q->first_to_check, count);
+ inbound_primed(q, start, count);
if (atomic_sub_return(count, &q->nr_buf_used) == 0)
qperf_inc(q, inbound_queue_full);
if (q->irq_ptr->perf_stat_enabled)
account_sbals(q, count);
- break;
+ return count;
case SLSB_P_INPUT_ERROR:
- process_buffer_error(q, count);
- q->first_to_check = add_buf(q->first_to_check, count);
+ process_buffer_error(q, start, count);
if (atomic_sub_return(count, &q->nr_buf_used) == 0)
qperf_inc(q, inbound_queue_full);
if (q->irq_ptr->perf_stat_enabled)
account_sbals_error(q, count);
- break;
+ return count;
case SLSB_CU_INPUT_EMPTY:
case SLSB_P_INPUT_NOT_INIT:
case SLSB_P_INPUT_ACK:
if (q->irq_ptr->perf_stat_enabled)
q->q_stats.nr_sbal_nop++;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x",
- q->nr, q->first_to_check);
- break;
+ q->nr, start);
+ return 0;
default:
WARN_ON_ONCE(1);
+ return 0;
}
-out:
- return q->first_to_check;
}
-static int qdio_inbound_q_moved(struct qdio_q *q)
+static int qdio_inbound_q_moved(struct qdio_q *q, unsigned int start)
{
- int bufnr;
+ int count;
- bufnr = get_inbound_buffer_frontier(q);
+ count = get_inbound_buffer_frontier(q, start);
- if (bufnr != q->last_move) {
- q->last_move = bufnr;
- if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
- q->u.in.timestamp = get_tod_clock();
- return 1;
- } else
- return 0;
+ if (count && !is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
+ q->u.in.timestamp = get_tod_clock();
+
+ return count;
}
-static inline int qdio_inbound_q_done(struct qdio_q *q)
+static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
{
unsigned char state = 0;
@@ -570,7 +565,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
if (need_siga_sync(q))
qdio_siga_sync_q(q);
- get_buf_state(q, q->first_to_check, &state, 0);
+ get_buf_state(q, start, &state, 0);
if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
/* more work coming */
@@ -588,8 +583,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
* has (probably) not moved (see qdio_inbound_processing).
*/
if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
- q->first_to_check);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", start);
return 1;
} else
return 0;
@@ -637,17 +631,13 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
return phys_aob;
}
-static void qdio_kick_handler(struct qdio_q *q)
+static void qdio_kick_handler(struct qdio_q *q, unsigned int count)
{
int start = q->first_to_kick;
- int end = q->first_to_check;
- int count;
if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
return;
- count = sub_buf(end, start);
-
if (q->is_input_q) {
qperf_inc(q, inbound_handler);
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
@@ -663,7 +653,7 @@ static void qdio_kick_handler(struct qdio_q *q)
q->irq_ptr->int_parm);
/* for the next time */
- q->first_to_kick = end;
+ q->first_to_kick = add_buf(start, count);
q->qdio_error = 0;
}
@@ -678,14 +668,20 @@ static inline int qdio_tasklet_schedule(struct qdio_q *q)
static void __qdio_inbound_processing(struct qdio_q *q)
{
+ unsigned int start = q->first_to_check;
+ int count;
+
qperf_inc(q, tasklet_inbound);
- if (!qdio_inbound_q_moved(q))
+ count = qdio_inbound_q_moved(q, start);
+ if (count == 0)
return;
- qdio_kick_handler(q);
+ start = add_buf(start, count);
+ q->first_to_check = start;
+ qdio_kick_handler(q, count);
- if (!qdio_inbound_q_done(q)) {
+ if (!qdio_inbound_q_done(q, start)) {
/* means poll time is not yet over */
qperf_inc(q, tasklet_inbound_resched);
if (!qdio_tasklet_schedule(q))
@@ -697,7 +693,7 @@ static void __qdio_inbound_processing(struct qdio_q *q)
* We need to check again to not lose initiative after
* resetting the ACK state.
*/
- if (!qdio_inbound_q_done(q)) {
+ if (!qdio_inbound_q_done(q, start)) {
qperf_inc(q, tasklet_inbound_resched2);
qdio_tasklet_schedule(q);
}
@@ -709,7 +705,7 @@ void qdio_inbound_processing(unsigned long data)
__qdio_inbound_processing(q);
}
-static int get_outbound_buffer_frontier(struct qdio_q *q)
+static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start)
{
unsigned char state = 0;
int count;
@@ -718,7 +714,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
if (need_siga_sync(q))
if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
- !pci_out_supported(q)) ||
+ !pci_out_supported(q->irq_ptr)) ||
(queue_type(q) == QDIO_IQDIO_QFMT &&
multicast_outbound(q)))
qdio_siga_sync_q(q);
@@ -729,12 +725,11 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
*/
count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
if (!count)
- goto out;
+ return 0;
- count = get_buf_states(q, q->first_to_check, &state, count, 0,
- q->u.out.use_cq);
+ count = get_buf_states(q, start, &state, count, 0, q->u.out.use_cq);
if (!count)
- goto out;
+ return 0;
switch (state) {
case SLSB_P_OUTPUT_EMPTY:
@@ -743,34 +738,29 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
"out empty:%1d %02x", q->nr, count);
atomic_sub(count, &q->nr_buf_used);
- q->first_to_check = add_buf(q->first_to_check, count);
if (q->irq_ptr->perf_stat_enabled)
account_sbals(q, count);
-
- break;
+ return count;
case SLSB_P_OUTPUT_ERROR:
- process_buffer_error(q, count);
- q->first_to_check = add_buf(q->first_to_check, count);
+ process_buffer_error(q, start, count);
atomic_sub(count, &q->nr_buf_used);
if (q->irq_ptr->perf_stat_enabled)
account_sbals_error(q, count);
- break;
+ return count;
case SLSB_CU_OUTPUT_PRIMED:
/* the adapter has not fetched the output yet */
if (q->irq_ptr->perf_stat_enabled)
q->q_stats.nr_sbal_nop++;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
q->nr);
- break;
+ return 0;
case SLSB_P_OUTPUT_NOT_INIT:
case SLSB_P_OUTPUT_HALTED:
- break;
+ return 0;
default:
WARN_ON_ONCE(1);
+ return 0;
}
-
-out:
- return q->first_to_check;
}
/* all buffers processed? */
@@ -779,18 +769,16 @@ static inline int qdio_outbound_q_done(struct qdio_q *q)
return atomic_read(&q->nr_buf_used) == 0;
}
-static inline int qdio_outbound_q_moved(struct qdio_q *q)
+static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start)
{
- int bufnr;
+ int count;
- bufnr = get_outbound_buffer_frontier(q);
+ count = get_outbound_buffer_frontier(q, start);
- if (bufnr != q->last_move) {
- q->last_move = bufnr;
+ if (count)
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
- return 1;
- } else
- return 0;
+
+ return count;
}
static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
@@ -837,15 +825,21 @@ retry:
static void __qdio_outbound_processing(struct qdio_q *q)
{
+ unsigned int start = q->first_to_check;
+ int count;
+
qperf_inc(q, tasklet_outbound);
WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
- if (qdio_outbound_q_moved(q))
- qdio_kick_handler(q);
+ count = qdio_outbound_q_moved(q, start);
+ if (count) {
+ q->first_to_check = add_buf(start, count);
+ qdio_kick_handler(q, count);
+ }
- if (queue_type(q) == QDIO_ZFCP_QFMT)
- if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
- goto sched;
+ if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) &&
+ !qdio_outbound_q_done(q))
+ goto sched;
if (q->u.out.pci_out_enabled)
return;
@@ -881,37 +875,40 @@ void qdio_outbound_timer(struct timer_list *t)
qdio_tasklet_schedule(q);
}
-static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
+static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq)
{
struct qdio_q *out;
int i;
- if (!pci_out_supported(q))
+ if (!pci_out_supported(irq))
return;
- for_each_output_queue(q->irq_ptr, out, i)
+ for_each_output_queue(irq, out, i)
if (!qdio_outbound_q_done(out))
qdio_tasklet_schedule(out);
}
static void __tiqdio_inbound_processing(struct qdio_q *q)
{
+ unsigned int start = q->first_to_check;
+ int count;
+
qperf_inc(q, tasklet_inbound);
if (need_siga_sync(q) && need_siga_sync_after_ai(q))
qdio_sync_queues(q);
- /*
- * The interrupt could be caused by a PCI request. Check the
- * PCI capable outbound queues.
- */
- qdio_check_outbound_after_thinint(q);
+ /* The interrupt could be caused by a PCI request: */
+ qdio_check_outbound_pci_queues(q->irq_ptr);
- if (!qdio_inbound_q_moved(q))
+ count = qdio_inbound_q_moved(q, start);
+ if (count == 0)
return;
- qdio_kick_handler(q);
+ start = add_buf(start, count);
+ q->first_to_check = start;
+ qdio_kick_handler(q, count);
- if (!qdio_inbound_q_done(q)) {
+ if (!qdio_inbound_q_done(q, start)) {
qperf_inc(q, tasklet_inbound_resched);
if (!qdio_tasklet_schedule(q))
return;
@@ -922,7 +919,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
* We need to check again to not lose initiative after
* resetting the ACK state.
*/
- if (!qdio_inbound_q_done(q)) {
+ if (!qdio_inbound_q_done(q, start)) {
qperf_inc(q, tasklet_inbound_resched2);
qdio_tasklet_schedule(q);
}
@@ -976,7 +973,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
}
}
- if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
+ if (!pci_out_supported(irq_ptr))
return;
for_each_output_queue(irq_ptr, q, i) {
@@ -1642,7 +1639,7 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
*/
if (test_nonshared_ind(irq_ptr))
goto rescan;
- if (!qdio_inbound_q_done(q))
+ if (!qdio_inbound_q_done(q, q->first_to_check))
goto rescan;
return 0;
@@ -1672,12 +1669,14 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
int *error)
{
struct qdio_q *q;
- int start, end;
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ unsigned int start;
+ int count;
if (!irq_ptr)
return -ENODEV;
q = irq_ptr->input_qs[nr];
+ start = q->first_to_check;
/*
* Cannot rely on automatic sync after interrupt since queues may
@@ -1686,25 +1685,27 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
if (need_siga_sync(q))
qdio_sync_queues(q);
- /* check the PCI capable outbound queues. */
- qdio_check_outbound_after_thinint(q);
+ qdio_check_outbound_pci_queues(irq_ptr);
- if (!qdio_inbound_q_moved(q))
+ count = qdio_inbound_q_moved(q, start);
+ if (count == 0)
return 0;
+ start = add_buf(start, count);
+ q->first_to_check = start;
+
/* Note: upper-layer MUST stop processing immediately here ... */
if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
return -EIO;
- start = q->first_to_kick;
- end = q->first_to_check;
- *bufnr = start;
+ *bufnr = q->first_to_kick;
*error = q->qdio_error;
/* for the next time */
- q->first_to_kick = end;
+ q->first_to_kick = add_buf(q->first_to_kick, count);
q->qdio_error = 0;
- return sub_buf(end, start);
+
+ return count;
}
EXPORT_SYMBOL(qdio_get_next_buffers);
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index a59887fad13e..99d7d2566a3a 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -523,7 +523,7 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
irq_ptr->schid.sch_no,
is_thinint_irq(irq_ptr),
(irq_ptr->sch_token) ? 1 : 0,
- (irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) ? 1 : 0,
+ pci_out_supported(irq_ptr) ? 1 : 0,
css_general_characteristics.aif_tdd,
(irq_ptr->siga_flag.input) ? "R" : " ",
(irq_ptr->siga_flag.output) ? "W" : " ",
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 07dea602205b..28d59ac2204c 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -40,7 +40,7 @@ static LIST_HEAD(tiq_list);
static DEFINE_MUTEX(tiq_list_lock);
/* Adapter interrupt definitions */
-static void tiqdio_thinint_handler(struct airq_struct *airq);
+static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating);
static struct airq_struct tiqdio_airq = {
.handler = tiqdio_thinint_handler,
@@ -179,7 +179,7 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
* tiqdio_thinint_handler - thin interrupt handler for qdio
* @airq: pointer to adapter interrupt descriptor
*/
-static void tiqdio_thinint_handler(struct airq_struct *airq)
+static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating)
{
u32 si_used = clear_shared_ind();
struct qdio_q *q;
diff --git a/drivers/s390/cio/vfio_ccw_async.c b/drivers/s390/cio/vfio_ccw_async.c
new file mode 100644
index 000000000000..8c1d2357ef5b
--- /dev/null
+++ b/drivers/s390/cio/vfio_ccw_async.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Async I/O region for vfio_ccw
+ *
+ * Copyright Red Hat, Inc. 2019
+ *
+ * Author(s): Cornelia Huck <cohuck@redhat.com>
+ */
+
+#include <linux/vfio.h>
+#include <linux/mdev.h>
+
+#include "vfio_ccw_private.h"
+
+static ssize_t vfio_ccw_async_region_read(struct vfio_ccw_private *private,
+ char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS;
+ loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
+ struct ccw_cmd_region *region;
+ int ret;
+
+ if (pos + count > sizeof(*region))
+ return -EINVAL;
+
+ mutex_lock(&private->io_mutex);
+ region = private->region[i].data;
+ if (copy_to_user(buf, (void *)region + pos, count))
+ ret = -EFAULT;
+ else
+ ret = count;
+ mutex_unlock(&private->io_mutex);
+ return ret;
+}
+
+static ssize_t vfio_ccw_async_region_write(struct vfio_ccw_private *private,
+ const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS;
+ loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
+ struct ccw_cmd_region *region;
+ int ret;
+
+ if (pos + count > sizeof(*region))
+ return -EINVAL;
+
+ if (!mutex_trylock(&private->io_mutex))
+ return -EAGAIN;
+
+ region = private->region[i].data;
+ if (copy_from_user((void *)region + pos, buf, count)) {
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_ASYNC_REQ);
+
+ ret = region->ret_code ? region->ret_code : count;
+
+out_unlock:
+ mutex_unlock(&private->io_mutex);
+ return ret;
+}
+
+static void vfio_ccw_async_region_release(struct vfio_ccw_private *private,
+ struct vfio_ccw_region *region)
+{
+
+}
+
+const struct vfio_ccw_regops vfio_ccw_async_region_ops = {
+ .read = vfio_ccw_async_region_read,
+ .write = vfio_ccw_async_region_write,
+ .release = vfio_ccw_async_region_release,
+};
+
+int vfio_ccw_register_async_dev_regions(struct vfio_ccw_private *private)
+{
+ return vfio_ccw_register_dev_region(private,
+ VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD,
+ &vfio_ccw_async_region_ops,
+ sizeof(struct ccw_cmd_region),
+ VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE,
+ private->cmd_region);
+}
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 384b3987eeb4..0e79799e9a71 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -362,6 +362,7 @@ static void cp_unpin_free(struct channel_program *cp)
struct ccwchain *chain, *temp;
int i;
+ cp->initialized = false;
list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) {
for (i = 0; i < chain->ch_len; i++) {
pfn_array_table_unpin_free(chain->ch_pat + i,
@@ -732,6 +733,9 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
*/
cp->orb.cmd.c64 = 1;
+ if (!ret)
+ cp->initialized = true;
+
return ret;
}
@@ -746,7 +750,8 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
*/
void cp_free(struct channel_program *cp)
{
- cp_unpin_free(cp);
+ if (cp->initialized)
+ cp_unpin_free(cp);
}
/**
@@ -791,6 +796,10 @@ int cp_prefetch(struct channel_program *cp)
struct ccwchain *chain;
int len, idx, ret;
+ /* this is an error in the caller */
+ if (!cp->initialized)
+ return -EINVAL;
+
list_for_each_entry(chain, &cp->ccwchain_list, next) {
len = chain->ch_len;
for (idx = 0; idx < len; idx++) {
@@ -826,6 +835,10 @@ union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm)
struct ccwchain *chain;
struct ccw1 *cpa;
+ /* this is an error in the caller */
+ if (!cp->initialized)
+ return NULL;
+
orb = &cp->orb;
orb->cmd.intparm = intparm;
@@ -862,6 +875,9 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
u32 cpa = scsw->cmd.cpa;
u32 ccw_head;
+ if (!cp->initialized)
+ return;
+
/*
* LATER:
* For now, only update the cmd.cpa part. We may need to deal with
@@ -898,6 +914,9 @@ bool cp_iova_pinned(struct channel_program *cp, u64 iova)
struct ccwchain *chain;
int i;
+ if (!cp->initialized)
+ return false;
+
list_for_each_entry(chain, &cp->ccwchain_list, next) {
for (i = 0; i < chain->ch_len; i++)
if (pfn_array_table_iova_pinned(chain->ch_pat + i,
diff --git a/drivers/s390/cio/vfio_ccw_cp.h b/drivers/s390/cio/vfio_ccw_cp.h
index a4b74fb1aa57..3c20cd208da5 100644
--- a/drivers/s390/cio/vfio_ccw_cp.h
+++ b/drivers/s390/cio/vfio_ccw_cp.h
@@ -21,6 +21,7 @@
* @ccwchain_list: list head of ccwchains
* @orb: orb for the currently processed ssch request
* @mdev: the mediated device to perform page pinning/unpinning
+ * @initialized: whether this instance is actually initialized
*
* @ccwchain_list is the head of a ccwchain list, that contents the
* translated result of the guest channel program that pointed out by
@@ -30,6 +31,7 @@ struct channel_program {
struct list_head ccwchain_list;
union orb orb;
struct device *mdev;
+ bool initialized;
};
extern int cp_init(struct channel_program *cp, struct device *mdev,
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index a10cec0e86eb..ee8767f5845a 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -3,9 +3,11 @@
* VFIO based Physical Subchannel device driver
*
* Copyright IBM Corp. 2017
+ * Copyright Red Hat, Inc. 2019
*
* Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
* Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
+ * Cornelia Huck <cohuck@redhat.com>
*/
#include <linux/module.h>
@@ -23,6 +25,7 @@
struct workqueue_struct *vfio_ccw_work_q;
static struct kmem_cache *vfio_ccw_io_region;
+static struct kmem_cache *vfio_ccw_cmd_region;
/*
* Helpers
@@ -40,26 +43,30 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch)
if (ret != -EBUSY)
goto out_unlock;
+ iretry = 255;
do {
- iretry = 255;
ret = cio_cancel_halt_clear(sch, &iretry);
- while (ret == -EBUSY) {
- /*
- * Flush all I/O and wait for
- * cancel/halt/clear completion.
- */
- private->completion = &completion;
- spin_unlock_irq(sch->lock);
- wait_for_completion_timeout(&completion, 3*HZ);
+ if (ret == -EIO) {
+ pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n",
+ sch->schid.ssid, sch->schid.sch_no);
+ break;
+ }
+
+ /*
+ * Flush all I/O and wait for
+ * cancel/halt/clear completion.
+ */
+ private->completion = &completion;
+ spin_unlock_irq(sch->lock);
- spin_lock_irq(sch->lock);
- private->completion = NULL;
- flush_workqueue(vfio_ccw_work_q);
- ret = cio_cancel_halt_clear(sch, &iretry);
- };
+ if (ret == -EBUSY)
+ wait_for_completion_timeout(&completion, 3*HZ);
+ private->completion = NULL;
+ flush_workqueue(vfio_ccw_work_q);
+ spin_lock_irq(sch->lock);
ret = cio_disable_subchannel(sch);
} while (ret == -EBUSY);
out_unlock:
@@ -72,20 +79,26 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
{
struct vfio_ccw_private *private;
struct irb *irb;
+ bool is_final;
private = container_of(work, struct vfio_ccw_private, io_work);
irb = &private->irb;
+ is_final = !(scsw_actl(&irb->scsw) &
+ (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
if (scsw_is_solicited(&irb->scsw)) {
cp_update_scsw(&private->cp, &irb->scsw);
- cp_free(&private->cp);
+ if (is_final)
+ cp_free(&private->cp);
}
+ mutex_lock(&private->io_mutex);
memcpy(private->io_region->irb_area, irb, sizeof(*irb));
+ mutex_unlock(&private->io_mutex);
if (private->io_trigger)
eventfd_signal(private->io_trigger, 1);
- if (private->mdev)
+ if (private->mdev && is_final)
private->state = VFIO_CCW_STATE_IDLE;
}
@@ -104,7 +117,7 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
{
struct pmcw *pmcw = &sch->schib.pmcw;
struct vfio_ccw_private *private;
- int ret;
+ int ret = -ENOMEM;
if (pmcw->qf) {
dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n",
@@ -118,13 +131,17 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
GFP_KERNEL | GFP_DMA);
- if (!private->io_region) {
- kfree(private);
- return -ENOMEM;
- }
+ if (!private->io_region)
+ goto out_free;
+
+ private->cmd_region = kmem_cache_zalloc(vfio_ccw_cmd_region,
+ GFP_KERNEL | GFP_DMA);
+ if (!private->cmd_region)
+ goto out_free;
private->sch = sch;
dev_set_drvdata(&sch->dev, private);
+ mutex_init(&private->io_mutex);
spin_lock_irq(sch->lock);
private->state = VFIO_CCW_STATE_NOT_OPER;
@@ -148,7 +165,10 @@ out_disable:
cio_disable_subchannel(sch);
out_free:
dev_set_drvdata(&sch->dev, NULL);
- kmem_cache_free(vfio_ccw_io_region, private->io_region);
+ if (private->cmd_region)
+ kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
+ if (private->io_region)
+ kmem_cache_free(vfio_ccw_io_region, private->io_region);
kfree(private);
return ret;
}
@@ -163,6 +183,7 @@ static int vfio_ccw_sch_remove(struct subchannel *sch)
dev_set_drvdata(&sch->dev, NULL);
+ kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
kmem_cache_free(vfio_ccw_io_region, private->io_region);
kfree(private);
@@ -237,7 +258,7 @@ static struct css_driver vfio_ccw_sch_driver = {
static int __init vfio_ccw_sch_init(void)
{
- int ret;
+ int ret = -ENOMEM;
vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw");
if (!vfio_ccw_work_q)
@@ -247,20 +268,30 @@ static int __init vfio_ccw_sch_init(void)
sizeof(struct ccw_io_region), 0,
SLAB_ACCOUNT, 0,
sizeof(struct ccw_io_region), NULL);
- if (!vfio_ccw_io_region) {
- destroy_workqueue(vfio_ccw_work_q);
- return -ENOMEM;
- }
+ if (!vfio_ccw_io_region)
+ goto out_err;
+
+ vfio_ccw_cmd_region = kmem_cache_create_usercopy("vfio_ccw_cmd_region",
+ sizeof(struct ccw_cmd_region), 0,
+ SLAB_ACCOUNT, 0,
+ sizeof(struct ccw_cmd_region), NULL);
+ if (!vfio_ccw_cmd_region)
+ goto out_err;
isc_register(VFIO_CCW_ISC);
ret = css_driver_register(&vfio_ccw_sch_driver);
if (ret) {
isc_unregister(VFIO_CCW_ISC);
- kmem_cache_destroy(vfio_ccw_io_region);
- destroy_workqueue(vfio_ccw_work_q);
+ goto out_err;
}
return ret;
+
+out_err:
+ kmem_cache_destroy(vfio_ccw_cmd_region);
+ kmem_cache_destroy(vfio_ccw_io_region);
+ destroy_workqueue(vfio_ccw_work_q);
+ return ret;
}
static void __exit vfio_ccw_sch_exit(void)
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index cab17865aafe..49d9d3da0282 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -3,8 +3,10 @@
* Finite state machine for vfio-ccw device handling
*
* Copyright IBM Corp. 2017
+ * Copyright Red Hat, Inc. 2019
*
* Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
+ * Cornelia Huck <cohuck@redhat.com>
*/
#include <linux/vfio.h>
@@ -28,9 +30,12 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
sch = private->sch;
spin_lock_irqsave(sch->lock, flags);
- private->state = VFIO_CCW_STATE_BUSY;
orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
+ if (!orb) {
+ ret = -EIO;
+ goto out;
+ }
/* Issue "Start Subchannel" */
ccode = ssch(sch->schid, orb);
@@ -42,6 +47,7 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
*/
sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
ret = 0;
+ private->state = VFIO_CCW_STATE_CP_PENDING;
break;
case 1: /* Status pending */
case 2: /* Busy */
@@ -64,6 +70,76 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
default:
ret = ccode;
}
+out:
+ spin_unlock_irqrestore(sch->lock, flags);
+ return ret;
+}
+
+static int fsm_do_halt(struct vfio_ccw_private *private)
+{
+ struct subchannel *sch;
+ unsigned long flags;
+ int ccode;
+ int ret;
+
+ sch = private->sch;
+
+ spin_lock_irqsave(sch->lock, flags);
+
+ /* Issue "Halt Subchannel" */
+ ccode = hsch(sch->schid);
+
+ switch (ccode) {
+ case 0:
+ /*
+ * Initialize device status information
+ */
+ sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
+ ret = 0;
+ break;
+ case 1: /* Status pending */
+ case 2: /* Busy */
+ ret = -EBUSY;
+ break;
+ case 3: /* Device not operational */
+ ret = -ENODEV;
+ break;
+ default:
+ ret = ccode;
+ }
+ spin_unlock_irqrestore(sch->lock, flags);
+ return ret;
+}
+
+static int fsm_do_clear(struct vfio_ccw_private *private)
+{
+ struct subchannel *sch;
+ unsigned long flags;
+ int ccode;
+ int ret;
+
+ sch = private->sch;
+
+ spin_lock_irqsave(sch->lock, flags);
+
+ /* Issue "Clear Subchannel" */
+ ccode = csch(sch->schid);
+
+ switch (ccode) {
+ case 0:
+ /*
+ * Initialize device status information
+ */
+ sch->schib.scsw.cmd.actl = SCSW_ACTL_CLEAR_PEND;
+ /* TODO: check what else we might need to clear */
+ ret = 0;
+ break;
+ case 3: /* Device not operational */
+ ret = -ENODEV;
+ break;
+ default:
+ ret = ccode;
+ }
spin_unlock_irqrestore(sch->lock, flags);
return ret;
}
@@ -102,6 +178,30 @@ static void fsm_io_busy(struct vfio_ccw_private *private,
private->io_region->ret_code = -EBUSY;
}
+static void fsm_io_retry(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ private->io_region->ret_code = -EAGAIN;
+}
+
+static void fsm_async_error(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ struct ccw_cmd_region *cmd_region = private->cmd_region;
+
+ pr_err("vfio-ccw: FSM: %s request from state:%d\n",
+ cmd_region->command == VFIO_CCW_ASYNC_CMD_HSCH ? "halt" :
+ cmd_region->command == VFIO_CCW_ASYNC_CMD_CSCH ? "clear" :
+ "<unknown>", private->state);
+ cmd_region->ret_code = -EIO;
+}
+
+static void fsm_async_retry(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ private->cmd_region->ret_code = -EAGAIN;
+}
+
static void fsm_disabled_irq(struct vfio_ccw_private *private,
enum vfio_ccw_event event)
{
@@ -130,8 +230,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
struct mdev_device *mdev = private->mdev;
char *errstr = "request";
- private->state = VFIO_CCW_STATE_BUSY;
-
+ private->state = VFIO_CCW_STATE_CP_PROCESSING;
memcpy(scsw, io_region->scsw_area, sizeof(*scsw));
if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) {
@@ -166,22 +265,42 @@ static void fsm_io_request(struct vfio_ccw_private *private,
}
return;
} else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
- /* XXX: Handle halt. */
+ /* halt is handled via the async cmd region */
io_region->ret_code = -EOPNOTSUPP;
goto err_out;
} else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
- /* XXX: Handle clear. */
+ /* clear is handled via the async cmd region */
io_region->ret_code = -EOPNOTSUPP;
goto err_out;
}
err_out:
- private->state = VFIO_CCW_STATE_IDLE;
trace_vfio_ccw_io_fctl(scsw->cmd.fctl, get_schid(private),
io_region->ret_code, errstr);
}
/*
+ * Deal with an async request from userspace.
+ */
+static void fsm_async_request(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ struct ccw_cmd_region *cmd_region = private->cmd_region;
+
+ switch (cmd_region->command) {
+ case VFIO_CCW_ASYNC_CMD_HSCH:
+ cmd_region->ret_code = fsm_do_halt(private);
+ break;
+ case VFIO_CCW_ASYNC_CMD_CSCH:
+ cmd_region->ret_code = fsm_do_clear(private);
+ break;
+ default:
+ /* should not happen? */
+ cmd_region->ret_code = -EINVAL;
+ }
+}
+
+/*
* Got an interrupt for a normal io (state busy).
*/
static void fsm_irq(struct vfio_ccw_private *private,
@@ -204,21 +323,31 @@ fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = {
[VFIO_CCW_STATE_NOT_OPER] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_nop,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
+ [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_disabled_irq,
},
[VFIO_CCW_STATE_STANDBY] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
+ [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
},
[VFIO_CCW_STATE_IDLE] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_request,
+ [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request,
+ [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ },
+ [VFIO_CCW_STATE_CP_PROCESSING] = {
+ [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
+ [VFIO_CCW_EVENT_IO_REQ] = fsm_io_retry,
+ [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_retry,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
},
- [VFIO_CCW_STATE_BUSY] = {
+ [VFIO_CCW_STATE_CP_PENDING] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_busy,
+ [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
},
};
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index f673e106c041..5eb61116ca6f 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -3,13 +3,17 @@
* Physical device callbacks for vfio_ccw
*
* Copyright IBM Corp. 2017
+ * Copyright Red Hat, Inc. 2019
*
* Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
* Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
+ * Cornelia Huck <cohuck@redhat.com>
*/
#include <linux/vfio.h>
#include <linux/mdev.h>
+#include <linux/nospec.h>
+#include <linux/slab.h>
#include "vfio_ccw_private.h"
@@ -130,11 +134,12 @@ static int vfio_ccw_mdev_remove(struct mdev_device *mdev)
if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
(private->state != VFIO_CCW_STATE_STANDBY)) {
- if (!vfio_ccw_mdev_reset(mdev))
+ if (!vfio_ccw_sch_quiesce(private->sch))
private->state = VFIO_CCW_STATE_STANDBY;
/* The state will be NOT_OPER on error. */
}
+ cp_free(&private->cp);
private->mdev = NULL;
atomic_inc(&private->avail);
@@ -146,20 +151,66 @@ static int vfio_ccw_mdev_open(struct mdev_device *mdev)
struct vfio_ccw_private *private =
dev_get_drvdata(mdev_parent_dev(mdev));
unsigned long events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
+ int ret;
private->nb.notifier_call = vfio_ccw_mdev_notifier;
- return vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
- &events, &private->nb);
+ ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+ &events, &private->nb);
+ if (ret)
+ return ret;
+
+ ret = vfio_ccw_register_async_dev_regions(private);
+ if (ret)
+ vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+ &private->nb);
+ return ret;
}
static void vfio_ccw_mdev_release(struct mdev_device *mdev)
{
struct vfio_ccw_private *private =
dev_get_drvdata(mdev_parent_dev(mdev));
+ int i;
+ if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
+ (private->state != VFIO_CCW_STATE_STANDBY)) {
+ if (!vfio_ccw_mdev_reset(mdev))
+ private->state = VFIO_CCW_STATE_STANDBY;
+ /* The state will be NOT_OPER on error. */
+ }
+
+ cp_free(&private->cp);
vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
&private->nb);
+
+ for (i = 0; i < private->num_regions; i++)
+ private->region[i].ops->release(private, &private->region[i]);
+
+ private->num_regions = 0;
+ kfree(private->region);
+ private->region = NULL;
+}
+
+static ssize_t vfio_ccw_mdev_read_io_region(struct vfio_ccw_private *private,
+ char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
+ struct ccw_io_region *region;
+ int ret;
+
+ if (pos + count > sizeof(*region))
+ return -EINVAL;
+
+ mutex_lock(&private->io_mutex);
+ region = private->io_region;
+ if (copy_to_user(buf, (void *)region + pos, count))
+ ret = -EFAULT;
+ else
+ ret = count;
+ mutex_unlock(&private->io_mutex);
+ return ret;
}
static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev,
@@ -167,18 +218,54 @@ static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev,
size_t count,
loff_t *ppos)
{
+ unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
struct vfio_ccw_private *private;
+
+ private = dev_get_drvdata(mdev_parent_dev(mdev));
+
+ if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
+ return -EINVAL;
+
+ switch (index) {
+ case VFIO_CCW_CONFIG_REGION_INDEX:
+ return vfio_ccw_mdev_read_io_region(private, buf, count, ppos);
+ default:
+ index -= VFIO_CCW_NUM_REGIONS;
+ return private->region[index].ops->read(private, buf, count,
+ ppos);
+ }
+
+ return -EINVAL;
+}
+
+static ssize_t vfio_ccw_mdev_write_io_region(struct vfio_ccw_private *private,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
struct ccw_io_region *region;
+ int ret;
- if (*ppos + count > sizeof(*region))
+ if (pos + count > sizeof(*region))
return -EINVAL;
- private = dev_get_drvdata(mdev_parent_dev(mdev));
+ if (!mutex_trylock(&private->io_mutex))
+ return -EAGAIN;
+
region = private->io_region;
- if (copy_to_user(buf, (void *)region + *ppos, count))
- return -EFAULT;
+ if (copy_from_user((void *)region + pos, buf, count)) {
+ ret = -EFAULT;
+ goto out_unlock;
+ }
- return count;
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
+ if (region->ret_code != 0)
+ private->state = VFIO_CCW_STATE_IDLE;
+ ret = (region->ret_code != 0) ? region->ret_code : count;
+
+out_unlock:
+ mutex_unlock(&private->io_mutex);
+ return ret;
}
static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
@@ -186,42 +273,47 @@ static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
size_t count,
loff_t *ppos)
{
+ unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
struct vfio_ccw_private *private;
- struct ccw_io_region *region;
-
- if (*ppos + count > sizeof(*region))
- return -EINVAL;
private = dev_get_drvdata(mdev_parent_dev(mdev));
- if (private->state != VFIO_CCW_STATE_IDLE)
- return -EACCES;
- region = private->io_region;
- if (copy_from_user((void *)region + *ppos, buf, count))
- return -EFAULT;
+ if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
+ return -EINVAL;
- vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
- if (region->ret_code != 0) {
- private->state = VFIO_CCW_STATE_IDLE;
- return region->ret_code;
+ switch (index) {
+ case VFIO_CCW_CONFIG_REGION_INDEX:
+ return vfio_ccw_mdev_write_io_region(private, buf, count, ppos);
+ default:
+ index -= VFIO_CCW_NUM_REGIONS;
+ return private->region[index].ops->write(private, buf, count,
+ ppos);
}
- return count;
+ return -EINVAL;
}
-static int vfio_ccw_mdev_get_device_info(struct vfio_device_info *info)
+static int vfio_ccw_mdev_get_device_info(struct vfio_device_info *info,
+ struct mdev_device *mdev)
{
+ struct vfio_ccw_private *private;
+
+ private = dev_get_drvdata(mdev_parent_dev(mdev));
info->flags = VFIO_DEVICE_FLAGS_CCW | VFIO_DEVICE_FLAGS_RESET;
- info->num_regions = VFIO_CCW_NUM_REGIONS;
+ info->num_regions = VFIO_CCW_NUM_REGIONS + private->num_regions;
info->num_irqs = VFIO_CCW_NUM_IRQS;
return 0;
}
static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info,
- u16 *cap_type_id,
- void **cap_type)
+ struct mdev_device *mdev,
+ unsigned long arg)
{
+ struct vfio_ccw_private *private;
+ int i;
+
+ private = dev_get_drvdata(mdev_parent_dev(mdev));
switch (info->index) {
case VFIO_CCW_CONFIG_REGION_INDEX:
info->offset = 0;
@@ -229,9 +321,55 @@ static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info,
info->flags = VFIO_REGION_INFO_FLAG_READ
| VFIO_REGION_INFO_FLAG_WRITE;
return 0;
- default:
- return -EINVAL;
+ default: /* all other regions are handled via capability chain */
+ {
+ struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
+ struct vfio_region_info_cap_type cap_type = {
+ .header.id = VFIO_REGION_INFO_CAP_TYPE,
+ .header.version = 1 };
+ int ret;
+
+ if (info->index >=
+ VFIO_CCW_NUM_REGIONS + private->num_regions)
+ return -EINVAL;
+
+ info->index = array_index_nospec(info->index,
+ VFIO_CCW_NUM_REGIONS +
+ private->num_regions);
+
+ i = info->index - VFIO_CCW_NUM_REGIONS;
+
+ info->offset = VFIO_CCW_INDEX_TO_OFFSET(info->index);
+ info->size = private->region[i].size;
+ info->flags = private->region[i].flags;
+
+ cap_type.type = private->region[i].type;
+ cap_type.subtype = private->region[i].subtype;
+
+ ret = vfio_info_add_capability(&caps, &cap_type.header,
+ sizeof(cap_type));
+ if (ret)
+ return ret;
+
+ info->flags |= VFIO_REGION_INFO_FLAG_CAPS;
+ if (info->argsz < sizeof(*info) + caps.size) {
+ info->argsz = sizeof(*info) + caps.size;
+ info->cap_offset = 0;
+ } else {
+ vfio_info_cap_shift(&caps, sizeof(*info));
+ if (copy_to_user((void __user *)arg + sizeof(*info),
+ caps.buf, caps.size)) {
+ kfree(caps.buf);
+ return -EFAULT;
+ }
+ info->cap_offset = sizeof(*info);
+ }
+
+ kfree(caps.buf);
+
}
+ }
+ return 0;
}
static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
@@ -308,6 +446,32 @@ static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev,
}
}
+int vfio_ccw_register_dev_region(struct vfio_ccw_private *private,
+ unsigned int subtype,
+ const struct vfio_ccw_regops *ops,
+ size_t size, u32 flags, void *data)
+{
+ struct vfio_ccw_region *region;
+
+ region = krealloc(private->region,
+ (private->num_regions + 1) * sizeof(*region),
+ GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ private->region = region;
+ private->region[private->num_regions].type = VFIO_REGION_TYPE_CCW;
+ private->region[private->num_regions].subtype = subtype;
+ private->region[private->num_regions].ops = ops;
+ private->region[private->num_regions].size = size;
+ private->region[private->num_regions].flags = flags;
+ private->region[private->num_regions].data = data;
+
+ private->num_regions++;
+
+ return 0;
+}
+
static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
unsigned int cmd,
unsigned long arg)
@@ -328,7 +492,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
if (info.argsz < minsz)
return -EINVAL;
- ret = vfio_ccw_mdev_get_device_info(&info);
+ ret = vfio_ccw_mdev_get_device_info(&info, mdev);
if (ret)
return ret;
@@ -337,8 +501,6 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
case VFIO_DEVICE_GET_REGION_INFO:
{
struct vfio_region_info info;
- u16 cap_type_id = 0;
- void *cap_type = NULL;
minsz = offsetofend(struct vfio_region_info, offset);
@@ -348,8 +510,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
if (info.argsz < minsz)
return -EINVAL;
- ret = vfio_ccw_mdev_get_region_info(&info, &cap_type_id,
- &cap_type);
+ ret = vfio_ccw_mdev_get_region_info(&info, mdev, arg);
if (ret)
return ret;
diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h
index 08e9a7dc9176..f1092c3dc1b1 100644
--- a/drivers/s390/cio/vfio_ccw_private.h
+++ b/drivers/s390/cio/vfio_ccw_private.h
@@ -3,9 +3,11 @@
* Private stuff for vfio_ccw driver
*
* Copyright IBM Corp. 2017
+ * Copyright Red Hat, Inc. 2019
*
* Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
* Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
+ * Cornelia Huck <cohuck@redhat.com>
*/
#ifndef _VFIO_CCW_PRIVATE_H_
@@ -19,6 +21,40 @@
#include "css.h"
#include "vfio_ccw_cp.h"
+#define VFIO_CCW_OFFSET_SHIFT 10
+#define VFIO_CCW_OFFSET_TO_INDEX(off) (off >> VFIO_CCW_OFFSET_SHIFT)
+#define VFIO_CCW_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_CCW_OFFSET_SHIFT)
+#define VFIO_CCW_OFFSET_MASK (((u64)(1) << VFIO_CCW_OFFSET_SHIFT) - 1)
+
+/* capability chain handling similar to vfio-pci */
+struct vfio_ccw_private;
+struct vfio_ccw_region;
+
+struct vfio_ccw_regops {
+ ssize_t (*read)(struct vfio_ccw_private *private, char __user *buf,
+ size_t count, loff_t *ppos);
+ ssize_t (*write)(struct vfio_ccw_private *private,
+ const char __user *buf, size_t count, loff_t *ppos);
+ void (*release)(struct vfio_ccw_private *private,
+ struct vfio_ccw_region *region);
+};
+
+struct vfio_ccw_region {
+ u32 type;
+ u32 subtype;
+ const struct vfio_ccw_regops *ops;
+ void *data;
+ size_t size;
+ u32 flags;
+};
+
+int vfio_ccw_register_dev_region(struct vfio_ccw_private *private,
+ unsigned int subtype,
+ const struct vfio_ccw_regops *ops,
+ size_t size, u32 flags, void *data);
+
+int vfio_ccw_register_async_dev_regions(struct vfio_ccw_private *private);
+
/**
* struct vfio_ccw_private
* @sch: pointer to the subchannel
@@ -28,6 +64,10 @@
* @mdev: pointer to the mediated device
* @nb: notifier for vfio events
* @io_region: MMIO region to input/output I/O arguments/results
+ * @io_mutex: protect against concurrent update of I/O regions
+ * @region: additional regions for other subchannel operations
+ * @cmd_region: MMIO region for asynchronous I/O commands other than START
+ * @num_regions: number of additional regions
* @cp: channel program for the current I/O operation
* @irb: irb info received from interrupt
* @scsw: scsw info
@@ -42,6 +82,10 @@ struct vfio_ccw_private {
struct mdev_device *mdev;
struct notifier_block nb;
struct ccw_io_region *io_region;
+ struct mutex io_mutex;
+ struct vfio_ccw_region *region;
+ struct ccw_cmd_region *cmd_region;
+ int num_regions;
struct channel_program cp;
struct irb irb;
@@ -63,7 +107,8 @@ enum vfio_ccw_state {
VFIO_CCW_STATE_NOT_OPER,
VFIO_CCW_STATE_STANDBY,
VFIO_CCW_STATE_IDLE,
- VFIO_CCW_STATE_BUSY,
+ VFIO_CCW_STATE_CP_PROCESSING,
+ VFIO_CCW_STATE_CP_PENDING,
/* last element! */
NR_VFIO_CCW_STATES
};
@@ -75,6 +120,7 @@ enum vfio_ccw_event {
VFIO_CCW_EVENT_NOT_OPER,
VFIO_CCW_EVENT_IO_REQ,
VFIO_CCW_EVENT_INTERRUPT,
+ VFIO_CCW_EVENT_ASYNC_REQ,
/* last element! */
NR_VFIO_CCW_EVENTS
};
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index e15816ff1265..cc30e4f07fff 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -116,7 +116,7 @@ static int user_set_domain;
static struct bus_type ap_bus_type;
/* Adapter interrupt definitions */
-static void ap_interrupt_handler(struct airq_struct *airq);
+static void ap_interrupt_handler(struct airq_struct *airq, bool floating);
static int ap_airq_flag;
@@ -393,7 +393,7 @@ static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
* ap_interrupt_handler() - Schedule ap_tasklet on interrupt
* @airq: pointer to adapter interrupt descriptor
*/
-static void ap_interrupt_handler(struct airq_struct *airq)
+static void ap_interrupt_handler(struct airq_struct *airq, bool floating)
{
inc_irq_stat(IRQIO_APB);
if (!ap_suspend_flag)
@@ -810,11 +810,18 @@ static int ap_device_remove(struct device *dev)
struct ap_device *ap_dev = to_ap_dev(dev);
struct ap_driver *ap_drv = ap_dev->drv;
+ /* prepare ap queue device removal */
if (is_queue_dev(dev))
- ap_queue_remove(to_ap_queue(dev));
+ ap_queue_prepare_remove(to_ap_queue(dev));
+
+ /* driver's chance to clean up gracefully */
if (ap_drv->remove)
ap_drv->remove(ap_dev);
+ /* now do the ap queue device remove */
+ if (is_queue_dev(dev))
+ ap_queue_remove(to_ap_queue(dev));
+
/* Remove queue/card from list of active queues/cards */
spin_lock_bh(&ap_list_lock);
if (is_card_dev(dev))
@@ -861,6 +868,16 @@ void ap_bus_force_rescan(void)
EXPORT_SYMBOL(ap_bus_force_rescan);
/*
+* A config change has happened, force an ap bus rescan.
+*/
+void ap_bus_cfg_chg(void)
+{
+ AP_DBF(DBF_INFO, "%s config change, forcing bus rescan\n", __func__);
+
+ ap_bus_force_rescan();
+}
+
+/*
* hex2bitmap() - parse hex mask string and set bitmap.
* Valid strings are "0x012345678" with at least one valid hex number.
* Rest of the bitmap to the right is padded with 0. No spaces allowed
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index d0059eae5d94..15a98a673c5c 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -91,6 +91,7 @@ enum ap_state {
AP_STATE_WORKING,
AP_STATE_QUEUE_FULL,
AP_STATE_SUSPEND_WAIT,
+ AP_STATE_REMOVE, /* about to be removed from driver */
AP_STATE_UNBOUND, /* momentary not bound to a driver */
AP_STATE_BORKED, /* broken */
NR_AP_STATES
@@ -252,6 +253,7 @@ void ap_bus_force_rescan(void);
void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg);
struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
+void ap_queue_prepare_remove(struct ap_queue *aq);
void ap_queue_remove(struct ap_queue *aq);
void ap_queue_suspend(struct ap_device *ap_dev);
void ap_queue_resume(struct ap_device *ap_dev);
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index ba261210c6da..5ea83dc4f1d7 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -420,6 +420,10 @@ static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
[AP_EVENT_POLL] = ap_sm_suspend_read,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
},
+ [AP_STATE_REMOVE] = {
+ [AP_EVENT_POLL] = ap_sm_nop,
+ [AP_EVENT_TIMEOUT] = ap_sm_nop,
+ },
[AP_STATE_UNBOUND] = {
[AP_EVENT_POLL] = ap_sm_nop,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
@@ -740,18 +744,31 @@ void ap_flush_queue(struct ap_queue *aq)
}
EXPORT_SYMBOL(ap_flush_queue);
-void ap_queue_remove(struct ap_queue *aq)
+void ap_queue_prepare_remove(struct ap_queue *aq)
{
- ap_flush_queue(aq);
+ spin_lock_bh(&aq->lock);
+ /* flush queue */
+ __ap_flush_queue(aq);
+ /* set REMOVE state to prevent new messages are queued in */
+ aq->state = AP_STATE_REMOVE;
+ spin_unlock_bh(&aq->lock);
del_timer_sync(&aq->timeout);
+}
- /* reset with zero, also clears irq registration */
+void ap_queue_remove(struct ap_queue *aq)
+{
+ /*
+ * all messages have been flushed and the state is
+ * AP_STATE_REMOVE. Now reset with zero which also
+ * clears the irq registration and move the state
+ * to AP_STATE_UNBOUND to signal that this queue
+ * is not used by any driver currently.
+ */
spin_lock_bh(&aq->lock);
ap_zapq(aq->qid);
aq->state = AP_STATE_UNBOUND;
spin_unlock_bh(&aq->lock);
}
-EXPORT_SYMBOL(ap_queue_remove);
void ap_queue_reinit_state(struct ap_queue *aq)
{
@@ -760,4 +777,3 @@ void ap_queue_reinit_state(struct ap_queue *aq)
ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
spin_unlock_bh(&aq->lock);
}
-EXPORT_SYMBOL(ap_queue_reinit_state);
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 3e85d665c572..45eb0c14b880 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -51,7 +51,8 @@ static debug_info_t *debug_info;
static void __init pkey_debug_init(void)
{
- debug_info = debug_register("pkey", 1, 1, 4 * sizeof(long));
+ /* 5 arguments per dbf entry (including the format string ptr) */
+ debug_info = debug_register("pkey", 1, 1, 5 * sizeof(long));
debug_register_view(debug_info, &debug_sprintf_view);
debug_set_level(debug_info, 3);
}
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index eb93c2d27d0a..c31b2d31cd83 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -586,6 +586,7 @@ static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue)
static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
struct zcrypt_queue *zq,
+ struct module **pmod,
unsigned int weight)
{
if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner))
@@ -595,15 +596,15 @@ static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
atomic_add(weight, &zc->load);
atomic_add(weight, &zq->load);
zq->request_count++;
+ *pmod = zq->queue->ap_dev.drv->driver.owner;
return zq;
}
static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
struct zcrypt_queue *zq,
+ struct module *mod,
unsigned int weight)
{
- struct module *mod = zq->queue->ap_dev.drv->driver.owner;
-
zq->request_count--;
atomic_sub(weight, &zc->load);
atomic_sub(weight, &zq->load);
@@ -653,10 +654,12 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
unsigned int weight, pref_weight;
unsigned int func_code;
int qid = 0, rc = -ENODEV;
+ struct module *mod;
trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
if (mex->outputdatalength < mex->inputdatalength) {
+ func_code = 0;
rc = -EINVAL;
goto out;
}
@@ -706,7 +709,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
pref_weight = weight;
}
}
- pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
@@ -718,7 +721,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
rc = pref_zq->ops->rsa_modexpo(pref_zq, mex);
spin_lock(&zcrypt_list_lock);
- zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
spin_unlock(&zcrypt_list_lock);
out:
@@ -735,10 +738,12 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
unsigned int weight, pref_weight;
unsigned int func_code;
int qid = 0, rc = -ENODEV;
+ struct module *mod;
trace_s390_zcrypt_req(crt, TP_ICARSACRT);
if (crt->outputdatalength < crt->inputdatalength) {
+ func_code = 0;
rc = -EINVAL;
goto out;
}
@@ -788,7 +793,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
pref_weight = weight;
}
}
- pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
@@ -800,7 +805,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt);
spin_lock(&zcrypt_list_lock);
- zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
spin_unlock(&zcrypt_list_lock);
out:
@@ -819,6 +824,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
unsigned int func_code;
unsigned short *domain;
int qid = 0, rc = -ENODEV;
+ struct module *mod;
trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
@@ -865,7 +871,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
pref_weight = weight;
}
}
- pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
@@ -881,7 +887,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg);
spin_lock(&zcrypt_list_lock);
- zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
spin_unlock(&zcrypt_list_lock);
out:
@@ -932,6 +938,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
unsigned int func_code;
struct ap_message ap_msg;
int qid = 0, rc = -ENODEV;
+ struct module *mod;
trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
@@ -946,6 +953,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL);
if (!targets) {
+ func_code = 0;
rc = -ENOMEM;
goto out;
}
@@ -953,6 +961,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
uptr = (struct ep11_target_dev __force __user *) xcrb->targets;
if (copy_from_user(targets, uptr,
target_num * sizeof(*targets))) {
+ func_code = 0;
rc = -EFAULT;
goto out_free;
}
@@ -1000,7 +1009,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
pref_weight = weight;
}
}
- pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
@@ -1012,7 +1021,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg);
spin_lock(&zcrypt_list_lock);
- zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
spin_unlock(&zcrypt_list_lock);
out_free:
@@ -1033,6 +1042,7 @@ static long zcrypt_rng(char *buffer)
struct ap_message ap_msg;
unsigned int domain;
int qid = 0, rc = -ENODEV;
+ struct module *mod;
trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
@@ -1064,7 +1074,7 @@ static long zcrypt_rng(char *buffer)
pref_weight = weight;
}
}
- pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
@@ -1076,7 +1086,7 @@ static long zcrypt_rng(char *buffer)
rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
spin_lock(&zcrypt_list_lock);
- zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
spin_unlock(&zcrypt_list_lock);
out:
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 7617d21cb296..f63c5c871d3d 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1595,6 +1595,7 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
if (priv->channel[direction] == NULL) {
if (direction == CTCM_WRITE)
channel_free(priv->channel[CTCM_READ]);
+ result = -ENODEV;
goto out_dev;
}
priv->channel[direction]->netdev = dev;
diff --git a/drivers/s390/net/ism.h b/drivers/s390/net/ism.h
index 0aab90817326..66eac2b9704d 100644
--- a/drivers/s390/net/ism.h
+++ b/drivers/s390/net/ism.h
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include <linux/pci.h>
#include <net/smc.h>
+#include <asm/pci_insn.h>
#define UTIL_STR_LEN 16
@@ -194,8 +195,6 @@ struct ism_dev {
struct pci_dev *pdev;
struct smcd_dev *smcd;
- void __iomem *ctl;
-
struct ism_sba *sba;
dma_addr_t sba_dma_addr;
DECLARE_BITMAP(sba_bitmap, ISM_NR_DMBS);
@@ -209,13 +208,37 @@ struct ism_dev {
#define ISM_CREATE_REQ(dmb, idx, sf, offset) \
((dmb) | (idx) << 24 | (sf) << 23 | (offset))
+static inline void __ism_read_cmd(struct ism_dev *ism, void *data,
+ unsigned long offset, unsigned long len)
+{
+ struct zpci_dev *zdev = to_zpci(ism->pdev);
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, 2, 8);
+
+ while (len > 0) {
+ __zpci_load(data, req, offset);
+ offset += 8;
+ data += 8;
+ len -= 8;
+ }
+}
+
+static inline void __ism_write_cmd(struct ism_dev *ism, void *data,
+ unsigned long offset, unsigned long len)
+{
+ struct zpci_dev *zdev = to_zpci(ism->pdev);
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, 2, len);
+
+ if (len)
+ __zpci_store_block(data, req, offset);
+}
+
static inline int __ism_move(struct ism_dev *ism, u64 dmb_req, void *data,
unsigned int size)
{
struct zpci_dev *zdev = to_zpci(ism->pdev);
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, size);
- return zpci_write_block(req, data, dmb_req);
+ return __zpci_store_block(data, req, dmb_req);
}
#endif /* S390_ISM_H */
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index 3e132592c1fe..4fc2056bd227 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -38,19 +38,18 @@ static int ism_cmd(struct ism_dev *ism, void *cmd)
struct ism_req_hdr *req = cmd;
struct ism_resp_hdr *resp = cmd;
- memcpy_toio(ism->ctl + sizeof(*req), req + 1, req->len - sizeof(*req));
- memcpy_toio(ism->ctl, req, sizeof(*req));
+ __ism_write_cmd(ism, req + 1, sizeof(*req), req->len - sizeof(*req));
+ __ism_write_cmd(ism, req, 0, sizeof(*req));
WRITE_ONCE(resp->ret, ISM_ERROR);
- memcpy_fromio(resp, ism->ctl, sizeof(*resp));
+ __ism_read_cmd(ism, resp, 0, sizeof(*resp));
if (resp->ret) {
debug_text_event(ism_debug_info, 0, "cmd failure");
debug_event(ism_debug_info, 0, resp, sizeof(*resp));
goto out;
}
- memcpy_fromio(resp + 1, ism->ctl + sizeof(*resp),
- resp->len - sizeof(*resp));
+ __ism_read_cmd(ism, resp + 1, sizeof(*resp), resp->len - sizeof(*resp));
out:
return resp->ret;
}
@@ -512,13 +511,9 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto err_disable;
- ism->ctl = pci_iomap(pdev, 2, 0);
- if (!ism->ctl)
- goto err_resource;
-
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (ret)
- goto err_unmap;
+ goto err_resource;
dma_set_seg_boundary(&pdev->dev, SZ_1M - 1);
dma_set_max_seg_size(&pdev->dev, SZ_1M);
@@ -527,7 +522,7 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops,
ISM_NR_DMBS);
if (!ism->smcd)
- goto err_unmap;
+ goto err_resource;
ism->smcd->priv = ism;
ret = ism_dev_init(ism);
@@ -538,8 +533,6 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_free:
smcd_free_dev(ism->smcd);
-err_unmap:
- pci_iounmap(pdev, ism->ctl);
err_resource:
pci_release_mem_regions(pdev);
err_disable:
@@ -568,7 +561,6 @@ static void ism_remove(struct pci_dev *pdev)
ism_dev_exit(ism);
smcd_free_dev(ism->smcd);
- pci_iounmap(pdev, ism->ctl);
pci_release_mem_regions(pdev);
pci_disable_device(pdev);
dev_set_drvdata(&pdev->dev, NULL);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 197b0f5b63e7..44bd6f04c145 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1150,13 +1150,16 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
{
+ struct sk_buff *skb;
+
/* release may never happen from within CQ tasklet scope */
WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
qeth_notify_skbs(buf->q, buf, TX_NOTIFY_GENERALERROR);
- __skb_queue_purge(&buf->skb_list);
+ while ((skb = __skb_dequeue(&buf->skb_list)) != NULL)
+ consume_skb(skb);
}
static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 8efb2e8ff8f4..c3067fd3bd9e 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -629,8 +629,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
} /* else fall through */
QETH_TXQ_STAT_INC(queue, tx_dropped);
- QETH_TXQ_STAT_INC(queue, tx_errors);
- dev_kfree_skb_any(skb);
+ kfree_skb(skb);
netif_wake_queue(dev);
return NETDEV_TX_OK;
}
@@ -645,6 +644,8 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc;
+ qeth_l2_vnicc_set_defaults(card);
+
if (gdev->dev.type == &qeth_generic_devtype) {
rc = qeth_l2_create_device_attributes(&gdev->dev);
if (rc)
@@ -652,8 +653,6 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
}
hash_init(card->mac_htable);
- card->info.hwtrap = 0;
- qeth_l2_vnicc_set_defaults(card);
return 0;
}
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 7e68d9d16859..53712cf26406 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2096,8 +2096,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
tx_drop:
QETH_TXQ_STAT_INC(queue, tx_dropped);
- QETH_TXQ_STAT_INC(queue, tx_errors);
- dev_kfree_skb_any(skb);
+ kfree_skb(skb);
netif_wake_queue(dev);
return NETDEV_TX_OK;
}
@@ -2253,14 +2252,15 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc;
+ hash_init(card->ip_htable);
+
if (gdev->dev.type == &qeth_generic_devtype) {
rc = qeth_l3_create_device_attributes(&gdev->dev);
if (rc)
return rc;
}
- hash_init(card->ip_htable);
+
hash_init(card->ip_mc_htable);
- card->info.hwtrap = 0;
return 0;
}
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 744a64680d5b..e8fc28dba8df 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -624,6 +624,20 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
add_timer(&erp_action->timer);
}
+void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
+ int clear, char *dbftag)
+{
+ unsigned long flags;
+ struct zfcp_port *port;
+
+ write_lock_irqsave(&adapter->erp_lock, flags);
+ read_lock(&adapter->port_list_lock);
+ list_for_each_entry(port, &adapter->port_list, list)
+ _zfcp_erp_port_forced_reopen(port, clear, dbftag);
+ read_unlock(&adapter->port_list_lock);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
int clear, char *dbftag)
{
@@ -1341,6 +1355,9 @@ static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
int lun_status;
+ if (sdev->sdev_state == SDEV_DEL ||
+ sdev->sdev_state == SDEV_CANCEL)
+ continue;
if (zsdev->port != port)
continue;
/* LUN under port of interest */
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 3fce47b0b21b..c6acca521ffe 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -70,6 +70,8 @@ extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear,
char *dbftag);
extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
+extern void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
+ int clear, char *dbftag);
extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index db00b5e3abbe..33eddb02ee30 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -239,10 +239,6 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
list_for_each_entry(port, &adapter->port_list, list) {
if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
zfcp_fc_test_link(port);
- if (!port->d_id)
- zfcp_erp_port_reopen(port,
- ZFCP_STATUS_COMMON_ERP_FAILED,
- "fcrscn1");
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
@@ -250,6 +246,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
{
struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
+ struct zfcp_adapter *adapter = fsf_req->adapter;
struct fc_els_rscn *head;
struct fc_els_rscn_page *page;
u16 i;
@@ -263,6 +260,22 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
no_entries = be16_to_cpu(head->rscn_plen) /
sizeof(struct fc_els_rscn_page);
+ if (no_entries > 1) {
+ /* handle failed ports */
+ unsigned long flags;
+ struct zfcp_port *port;
+
+ read_lock_irqsave(&adapter->port_list_lock, flags);
+ list_for_each_entry(port, &adapter->port_list, list) {
+ if (port->d_id)
+ continue;
+ zfcp_erp_port_reopen(port,
+ ZFCP_STATUS_COMMON_ERP_FAILED,
+ "fcrscn1");
+ }
+ read_unlock_irqrestore(&adapter->port_list_lock, flags);
+ }
+
for (i = 1; i < no_entries; i++) {
/* skip head and start with 1st element */
page++;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index f4f6a07c5222..221d0dfb8493 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -368,6 +368,10 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
int ret = SUCCESS, fc_ret;
+ if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) {
+ zfcp_erp_port_forced_reopen_all(adapter, 0, "schrh_p");
+ zfcp_erp_wait(adapter);
+ }
zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
zfcp_erp_wait(adapter);
fc_ret = fc_block_scsi_eh(scpnt);
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index ae1d56da671d..991420caa4f2 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -182,7 +182,7 @@ static void drop_airq_indicator(struct virtqueue *vq, struct airq_info *info)
write_unlock_irqrestore(&info->lock, flags);
}
-static void virtio_airq_handler(struct airq_struct *airq)
+static void virtio_airq_handler(struct airq_struct *airq, bool floating)
{
struct airq_info *info = container_of(airq, struct airq_info, airq);
unsigned long ai;
@@ -272,6 +272,8 @@ static void virtio_ccw_drop_indicators(struct virtio_ccw_device *vcdev)
{
struct virtio_ccw_vq_info *info;
+ if (!vcdev->airq_info)
+ return;
list_for_each_entry(info, &vcdev->virtqueues, node)
drop_airq_indicator(info->vq, vcdev->airq_info);
}
@@ -413,7 +415,7 @@ static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
if (ret)
return ret;
- return vcdev->config_block->num;
+ return vcdev->config_block->num ?: -ENOENT;
}
static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
@@ -973,6 +975,13 @@ static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
kfree(ccw);
}
+static const char *virtio_ccw_bus_name(struct virtio_device *vdev)
+{
+ struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+
+ return dev_name(&vcdev->cdev->dev);
+}
+
static const struct virtio_config_ops virtio_ccw_config_ops = {
.get_features = virtio_ccw_get_features,
.finalize_features = virtio_ccw_finalize_features,
@@ -983,6 +992,7 @@ static const struct virtio_config_ops virtio_ccw_config_ops = {
.reset = virtio_ccw_reset,
.find_vqs = virtio_ccw_find_vqs,
.del_vqs = virtio_ccw_del_vqs,
+ .bus_name = virtio_ccw_bus_name,
};
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 8f9d9e9fa695..d528018e6fa8 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -665,7 +665,7 @@ config SCSI_DMX3191D
config SCSI_GDTH
tristate "Intel/ICP (former GDT SCSI Disk Array) RAID Controller support"
- depends on (ISA || EISA || PCI) && SCSI && ISA_DMA_API
+ depends on PCI && SCSI
---help---
Formerly called GDT SCSI Disk Array Controller Support.
@@ -1196,8 +1196,6 @@ config SCSI_AM53C974
PCscsi/PCnet (Am53/79C974) solutions.
This is a new implementation base on the generic esp_scsi driver.
- Documentation can be found in <file:Documentation/scsi/tmscsim.txt>.
-
Note that this driver does NOT support Tekram DC390W/U/F, which are
based on NCR/Symbios chips. Use "NCR53C8XX SCSI support" for those.
@@ -1517,6 +1515,4 @@ source "drivers/scsi/pcmcia/Kconfig"
source "drivers/scsi/device_handler/Kconfig"
-source "drivers/scsi/osd/Kconfig"
-
endmenu
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index fcb41ae329c4..8826111fdf4a 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -150,7 +150,6 @@ obj-$(CONFIG_CHR_DEV_SG) += sg.o
obj-$(CONFIG_CHR_DEV_SCH) += ch.o
obj-$(CONFIG_SCSI_ENCLOSURE) += ses.o
-obj-$(CONFIG_SCSI_OSD_INITIATOR) += osd/
obj-$(CONFIG_SCSI_HISI_SAS) += hisi_sas/
# This goes last, so that "real" scsi devices probe earlier
diff --git a/drivers/scsi/aacraid/Makefile b/drivers/scsi/aacraid/Makefile
index 1bd9fd18f7f3..3893b95b140b 100644
--- a/drivers/scsi/aacraid/Makefile
+++ b/drivers/scsi/aacraid/Makefile
@@ -4,5 +4,3 @@ obj-$(CONFIG_SCSI_AACRAID) := aacraid.o
aacraid-objs := linit.o aachba.o commctrl.o comminit.o commsup.o \
dpcsup.o rx.o sa.o rkt.o nark.o src.o
-
-ccflags-y := -Idrivers/scsi
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 75ab5ff6b78c..6085aa087a2f 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -3455,7 +3455,7 @@ static int delete_disk(struct aac_dev *dev, void __user *arg)
}
}
-int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg)
+int aac_dev_ioctl(struct aac_dev *dev, unsigned int cmd, void __user *arg)
{
switch (cmd) {
case FSACTL_QUERY_DISK:
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 3291d1c16864..11fb68d7e60d 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -2640,9 +2640,14 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
return capacity;
}
+static inline int aac_pci_offline(struct aac_dev *dev)
+{
+ return pci_channel_offline(dev->pdev) || dev->handle_pci_error;
+}
+
static inline int aac_adapter_check_health(struct aac_dev *dev)
{
- if (unlikely(pci_channel_offline(dev->pdev)))
+ if (unlikely(aac_pci_offline(dev)))
return -1;
return (dev)->a_ops.adapter_check_health(dev);
@@ -2706,12 +2711,12 @@ void aac_set_intx_mode(struct aac_dev *dev);
int aac_get_config_status(struct aac_dev *dev, int commit_flag);
int aac_get_containers(struct aac_dev *dev);
int aac_scsi_cmd(struct scsi_cmnd *cmd);
-int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg);
+int aac_dev_ioctl(struct aac_dev *dev, unsigned int cmd, void __user *arg);
#ifndef shost_to_class
#define shost_to_class(shost) &shost->shost_dev
#endif
ssize_t aac_get_serial_number(struct device *dev, char *buf);
-int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg);
+int aac_do_ioctl(struct aac_dev *dev, unsigned int cmd, void __user *arg);
int aac_rx_init(struct aac_dev *dev);
int aac_rkt_init(struct aac_dev *dev);
int aac_nark_init(struct aac_dev *dev);
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index e2899ff7913e..f0ff40332753 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -1060,7 +1060,7 @@ static int aac_send_reset_adapter(struct aac_dev *dev, void __user *arg)
return retval;
}
-int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
+int aac_do_ioctl(struct aac_dev *dev, unsigned int cmd, void __user *arg)
{
int status;
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index d5a6aa9676c8..78430a7b294c 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -672,7 +672,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
return -ETIMEDOUT;
}
- if (unlikely(pci_channel_offline(dev->pdev)))
+ if (unlikely(aac_pci_offline(dev)))
return -EFAULT;
if ((blink = aac_adapter_check_health(dev)) > 0) {
@@ -772,7 +772,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
spin_unlock_irqrestore(&fibptr->event_lock, flags);
- if (unlikely(pci_channel_offline(dev->pdev)))
+ if (unlikely(aac_pci_offline(dev)))
return -EFAULT;
fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
@@ -1303,8 +1303,9 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
ADD : DELETE;
break;
}
- case AifBuManagerEvent:
- aac_handle_aif_bu(dev, aifcmd);
+ break;
+ case AifBuManagerEvent:
+ aac_handle_aif_bu(dev, aifcmd);
break;
}
@@ -1376,18 +1377,19 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
container = 0;
retry_next:
- if (device_config_needed == NOTHING)
- for (; container < dev->maximum_num_containers; ++container) {
- if ((dev->fsa_dev[container].config_waiting_on == 0) &&
- (dev->fsa_dev[container].config_needed != NOTHING) &&
- time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
- device_config_needed =
- dev->fsa_dev[container].config_needed;
- dev->fsa_dev[container].config_needed = NOTHING;
- channel = CONTAINER_TO_CHANNEL(container);
- id = CONTAINER_TO_ID(container);
- lun = CONTAINER_TO_LUN(container);
- break;
+ if (device_config_needed == NOTHING) {
+ for (; container < dev->maximum_num_containers; ++container) {
+ if ((dev->fsa_dev[container].config_waiting_on == 0) &&
+ (dev->fsa_dev[container].config_needed != NOTHING) &&
+ time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
+ device_config_needed =
+ dev->fsa_dev[container].config_needed;
+ dev->fsa_dev[container].config_needed = NOTHING;
+ channel = CONTAINER_TO_CHANNEL(container);
+ id = CONTAINER_TO_ID(container);
+ lun = CONTAINER_TO_LUN(container);
+ break;
+ }
}
}
if (device_config_needed == NOTHING)
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 7e56a11836c1..8e28a505f7e8 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -413,13 +413,16 @@ static int aac_slave_configure(struct scsi_device *sdev)
if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS && aac->sa_firmware) {
devtype = aac->hba_map[chn][tid].devtype;
- if (devtype == AAC_DEVTYPE_NATIVE_RAW)
+ if (devtype == AAC_DEVTYPE_NATIVE_RAW) {
depth = aac->hba_map[chn][tid].qd_limit;
- else if (devtype == AAC_DEVTYPE_ARC_RAW)
+ set_timeout = 1;
+ goto common_config;
+ }
+ if (devtype == AAC_DEVTYPE_ARC_RAW) {
set_qd_dev_type = true;
-
- set_timeout = 1;
- goto common_config;
+ set_timeout = 1;
+ goto common_config;
+ }
}
if (aac->jbod && (sdev->type == TYPE_DISK))
@@ -616,7 +619,8 @@ static struct device_attribute *aac_dev_attrs[] = {
NULL,
};
-static int aac_ioctl(struct scsi_device *sdev, int cmd, void __user * arg)
+static int aac_ioctl(struct scsi_device *sdev, unsigned int cmd,
+ void __user *arg)
{
struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
if (!capable(CAP_SYS_RAWIO))
@@ -852,8 +856,7 @@ static u8 aac_eh_tmf_hard_reset_fib(struct aac_hba_map_info *info,
address = (u64)fib->hw_error_pa;
rst->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
- rst->error_ptr_lo = cpu_to_le32
- ((u32)(address & 0xffffffff));
+ rst->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
rst->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
fib->hbacmd_size = sizeof(*rst);
@@ -1206,7 +1209,8 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long
return ret;
}
-static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+static int aac_compat_ioctl(struct scsi_device *sdev, unsigned int cmd,
+ void __user *arg)
{
struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
if (!capable(CAP_SYS_RAWIO))
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 8377aec0649d..97bb9e9d201c 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -1157,7 +1157,7 @@ out:
dev_err(&dev->pdev->dev, "%s: %s status = %d", __func__,
state_str[state], rc);
-return rc;
+ return rc;
}
/**
* aac_srcv_init - initialize an SRCv card
diff --git a/drivers/scsi/aic7xxx/Makefile b/drivers/scsi/aic7xxx/Makefile
index c15be2590d1c..e0188ecd85b2 100644
--- a/drivers/scsi/aic7xxx/Makefile
+++ b/drivers/scsi/aic7xxx/Makefile
@@ -34,7 +34,6 @@ aic79xx-y += aic79xx_osm.o \
aic79xx_proc.o \
aic79xx_osm_pci.o
-ccflags-y += -Idrivers/scsi
ifdef WARNINGS_BECOME_ERRORS
ccflags-y += -Werror
endif
diff --git a/drivers/scsi/aic7xxx/aic7770_osm.c b/drivers/scsi/aic7xxx/aic7770_osm.c
index 3d401d02c019..bdd177e3d762 100644
--- a/drivers/scsi/aic7xxx/aic7770_osm.c
+++ b/drivers/scsi/aic7xxx/aic7770_osm.c
@@ -91,6 +91,7 @@ aic7770_probe(struct device *dev)
ahc = ahc_alloc(&aic7xxx_driver_template, name);
if (ahc == NULL)
return (ENOMEM);
+ ahc->dev = dev;
error = aic7770_config(ahc, aic7770_ident_table + edev->id.driver_data,
eisaBase);
if (error != 0) {
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 9ee75c9a9aa1..7e5044bf05c0 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -2285,6 +2285,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
switch (scb->hscb->task_management) {
case SIU_TASKMGMT_ABORT_TASK:
tag = SCB_GET_TAG(scb);
+ /* fall through */
case SIU_TASKMGMT_ABORT_TASK_SET:
case SIU_TASKMGMT_CLEAR_TASK_SET:
lun = scb->hscb->lun;
@@ -2295,6 +2296,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
break;
case SIU_TASKMGMT_LUN_RESET:
lun = scb->hscb->lun;
+ /* fall through */
case SIU_TASKMGMT_TARGET_RESET:
{
struct ahd_devinfo devinfo;
@@ -6550,8 +6552,8 @@ ahd_fini_scbdata(struct ahd_softc *ahd)
kfree(sns_map);
}
ahd_dma_tag_destroy(ahd, scb_data->sense_dmat);
- /* FALLTHROUGH */
}
+ /* fall through */
case 6:
{
struct map_node *sg_map;
@@ -6565,8 +6567,8 @@ ahd_fini_scbdata(struct ahd_softc *ahd)
kfree(sg_map);
}
ahd_dma_tag_destroy(ahd, scb_data->sg_dmat);
- /* FALLTHROUGH */
}
+ /* fall through */
case 5:
{
struct map_node *hscb_map;
@@ -7209,6 +7211,7 @@ ahd_init(struct ahd_softc *ahd)
case FLX_CSTAT_OVER:
case FLX_CSTAT_UNDER:
warn_user++;
+ /* fall through */
case FLX_CSTAT_INVALID:
case FLX_CSTAT_OKAY:
if (warn_user == 0 && bootverbose == 0)
@@ -8413,7 +8416,7 @@ ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel,
if ((scb->flags & SCB_ACTIVE) == 0)
printk("Inactive SCB in Waiting List\n");
ahd_done_with_status(ahd, scb, status);
- /* FALLTHROUGH */
+ /* fall through */
case SEARCH_REMOVE:
ahd_rem_wscb(ahd, scbid, prev, next, tid);
*list_tail = prev;
@@ -8422,6 +8425,7 @@ ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel,
break;
case SEARCH_PRINT:
printk("0x%x ", scbid);
+ /* fall through */
case SEARCH_COUNT:
prev = scbid;
break;
@@ -9547,8 +9551,8 @@ ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts)
{
fmt3_ins = &instr.format3;
fmt3_ins->address = ahd_resolve_seqaddr(ahd, fmt3_ins->address);
- /* FALLTHROUGH */
}
+ /* fall through */
case AIC_OP_OR:
case AIC_OP_AND:
case AIC_OP_XOR:
@@ -9559,7 +9563,7 @@ ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts)
fmt1_ins->immediate = dconsts[fmt1_ins->immediate];
}
fmt1_ins->parity = 0;
- /* FALLTHROUGH */
+ /* fall through */
case AIC_OP_ROL:
{
int i, count;
diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h
index 5614921b4041..88b90f9806c9 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.h
+++ b/drivers/scsi/aic7xxx/aic7xxx.h
@@ -943,6 +943,7 @@ struct ahc_softc {
* Platform specific device information.
*/
ahc_dev_softc_t dev_softc;
+ struct device *dev;
/*
* Bus specific device information.
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index 3c9c17450bb3..d5c4a0d23706 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -860,8 +860,8 @@ int
ahc_dmamem_alloc(struct ahc_softc *ahc, bus_dma_tag_t dmat, void** vaddr,
int flags, bus_dmamap_t *mapp)
{
- *vaddr = pci_alloc_consistent(ahc->dev_softc,
- dmat->maxsize, mapp);
+ /* XXX: check if we really need the GFP_ATOMIC and unwind this mess! */
+ *vaddr = dma_alloc_coherent(ahc->dev, dmat->maxsize, mapp, GFP_ATOMIC);
if (*vaddr == NULL)
return ENOMEM;
return 0;
@@ -871,8 +871,7 @@ void
ahc_dmamem_free(struct ahc_softc *ahc, bus_dma_tag_t dmat,
void* vaddr, bus_dmamap_t map)
{
- pci_free_consistent(ahc->dev_softc, dmat->maxsize,
- vaddr, map);
+ dma_free_coherent(ahc->dev, dmat->maxsize, vaddr, map);
}
int
@@ -1123,8 +1122,7 @@ ahc_linux_register_host(struct ahc_softc *ahc, struct scsi_host_template *templa
host->transportt = ahc_linux_transport_template;
- retval = scsi_add_host(host,
- (ahc->dev_softc ? &ahc->dev_softc->dev : NULL));
+ retval = scsi_add_host(host, ahc->dev);
if (retval) {
printk(KERN_WARNING "aic7xxx: scsi_add_host failed\n");
scsi_host_put(host);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index 0fc14dac7070..717d8d1082ce 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -250,6 +250,7 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
ahc->dev_softc = pci;
+ ahc->dev = &pci->dev;
error = ahc_pci_config(ahc, entry);
if (error != 0) {
ahc_free(ahc);
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index 9c397a2794d6..9220bcf8388f 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -49,7 +49,7 @@ struct device_attribute;
#define ARCMSR_MAX_OUTSTANDING_CMD 1024
#define ARCMSR_DEFAULT_OUTSTANDING_CMD 128
#define ARCMSR_MIN_OUTSTANDING_CMD 32
-#define ARCMSR_DRIVER_VERSION "v1.40.00.09-20180709"
+#define ARCMSR_DRIVER_VERSION "v1.40.00.10-20190116"
#define ARCMSR_SCSI_INITIATOR_ID 255
#define ARCMSR_MAX_XFER_SECTORS 512
#define ARCMSR_MAX_XFER_SECTORS_B 4096
@@ -739,7 +739,7 @@ struct AdapterControlBlock
#define ACB_ADAPTER_TYPE_C 0x00000002 /* hbc L IOP */
#define ACB_ADAPTER_TYPE_D 0x00000003 /* hbd M IOP */
#define ACB_ADAPTER_TYPE_E 0x00000004 /* hba L IOP */
- u32 roundup_ccbsize;
+ u32 ioqueue_size;
struct pci_dev * pdev;
struct Scsi_Host * host;
unsigned long vir2phy_offset;
@@ -747,6 +747,7 @@ struct AdapterControlBlock
uint32_t outbound_int_enable;
uint32_t cdb_phyaddr_hi32;
uint32_t reg_mu_acc_handle0;
+ uint64_t cdb_phyadd_hipart;
spinlock_t eh_lock;
spinlock_t ccblist_lock;
spinlock_t postq_lock;
@@ -855,11 +856,11 @@ struct AdapterControlBlock
*******************************************************************************
*/
struct CommandControlBlock{
- /*x32:sizeof struct_CCB=(32+60)byte, x64:sizeof struct_CCB=(64+60)byte*/
+ /*x32:sizeof struct_CCB=(64+60)byte, x64:sizeof struct_CCB=(64+60)byte*/
struct list_head list; /*x32: 8byte, x64: 16byte*/
struct scsi_cmnd *pcmd; /*8 bytes pointer of linux scsi command */
struct AdapterControlBlock *acb; /*x32: 4byte, x64: 8byte*/
- uint32_t cdb_phyaddr; /*x32: 4byte, x64: 4byte*/
+ unsigned long cdb_phyaddr; /*x32: 4byte, x64: 8byte*/
uint32_t arc_cdb_size; /*x32:4byte,x64:4byte*/
uint16_t ccb_flags; /*x32: 2byte, x64: 2byte*/
#define CCB_FLAG_READ 0x0000
@@ -875,10 +876,10 @@ struct CommandControlBlock{
uint32_t smid;
#if BITS_PER_LONG == 64
/* ======================512+64 bytes======================== */
- uint32_t reserved[4]; /*16 byte*/
+ uint32_t reserved[3]; /*12 byte*/
#else
/* ======================512+32 bytes======================== */
- // uint32_t reserved; /*4 byte*/
+ uint32_t reserved[8]; /*32 byte*/
#endif
/* ======================================================= */
struct ARCMSR_CDB arcmsr_cdb;
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 57c6fa388bf6..88053b15c363 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -91,6 +91,10 @@ static int cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN;
module_param(cmd_per_lun, int, S_IRUGO);
MODULE_PARM_DESC(cmd_per_lun, " device queue depth(1 ~ 128), default is 32");
+static int dma_mask_64 = 0;
+module_param(dma_mask_64, int, S_IRUGO);
+MODULE_PARM_DESC(dma_mask_64, " set DMA mask to 64 bits(0 ~ 1), dma_mask_64=1(64 bits), =0(32 bits)");
+
static int set_date_time = 0;
module_param(set_date_time, int, S_IRUGO);
MODULE_PARM_DESC(set_date_time, " send date, time to iop(0 ~ 1), set_date_time=1(enable), default(=0) is disable");
@@ -223,13 +227,13 @@ static struct pci_driver arcmsr_pci_driver = {
****************************************************************************
*/
-static void arcmsr_free_mu(struct AdapterControlBlock *acb)
+static void arcmsr_free_io_queue(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_B:
case ACB_ADAPTER_TYPE_D:
case ACB_ADAPTER_TYPE_E: {
- dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize,
+ dma_free_coherent(&acb->pdev->dev, acb->ioqueue_size,
acb->dma_coherent2, acb->dma_coherent_handle2);
break;
}
@@ -576,6 +580,58 @@ static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
}
}
+static void arcmsr_hbaB_assign_regAddr(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_B *reg = acb->pmuB;
+
+ if (acb->pdev->device == PCI_DEVICE_ID_ARECA_1203) {
+ reg->drv2iop_doorbell = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_1203);
+ reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK_1203);
+ reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_1203);
+ reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK_1203);
+ } else {
+ reg->drv2iop_doorbell= MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL);
+ reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK);
+ reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL);
+ reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK);
+ }
+ reg->message_wbuffer = MEM_BASE1(ARCMSR_MESSAGE_WBUFFER);
+ reg->message_rbuffer = MEM_BASE1(ARCMSR_MESSAGE_RBUFFER);
+ reg->message_rwbuffer = MEM_BASE1(ARCMSR_MESSAGE_RWBUFFER);
+}
+
+static void arcmsr_hbaD_assign_regAddr(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_D *reg = acb->pmuD;
+
+ reg->chip_id = MEM_BASE0(ARCMSR_ARC1214_CHIP_ID);
+ reg->cpu_mem_config = MEM_BASE0(ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION);
+ reg->i2o_host_interrupt_mask = MEM_BASE0(ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK);
+ reg->sample_at_reset = MEM_BASE0(ARCMSR_ARC1214_SAMPLE_RESET);
+ reg->reset_request = MEM_BASE0(ARCMSR_ARC1214_RESET_REQUEST);
+ reg->host_int_status = MEM_BASE0(ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS);
+ reg->pcief0_int_enable = MEM_BASE0(ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE);
+ reg->inbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE0);
+ reg->inbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE1);
+ reg->outbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE0);
+ reg->outbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE1);
+ reg->inbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_INBOUND_DOORBELL);
+ reg->outbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL);
+ reg->outbound_doorbell_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE);
+ reg->inboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW);
+ reg->inboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH);
+ reg->inboundlist_write_pointer = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER);
+ reg->outboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW);
+ reg->outboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH);
+ reg->outboundlist_copy_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER);
+ reg->outboundlist_read_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER);
+ reg->outboundlist_interrupt_cause = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE);
+ reg->outboundlist_interrupt_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE);
+ reg->message_wbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_WBUFFER);
+ reg->message_rbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RBUFFER);
+ reg->msgcode_rwbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER);
+}
+
static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
{
bool rtn = true;
@@ -585,88 +641,39 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_B: {
- struct MessageUnit_B *reg;
- acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_B), 32);
- dma_coherent = dma_alloc_coherent(&pdev->dev,
- acb->roundup_ccbsize,
- &dma_coherent_handle,
- GFP_KERNEL);
+ acb->ioqueue_size = roundup(sizeof(struct MessageUnit_B), 32);
+ dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
+ &dma_coherent_handle, GFP_KERNEL);
if (!dma_coherent) {
pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
return false;
}
acb->dma_coherent_handle2 = dma_coherent_handle;
acb->dma_coherent2 = dma_coherent;
- reg = (struct MessageUnit_B *)dma_coherent;
- acb->pmuB = reg;
- if (acb->pdev->device == PCI_DEVICE_ID_ARECA_1203) {
- reg->drv2iop_doorbell = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_1203);
- reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK_1203);
- reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_1203);
- reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK_1203);
- } else {
- reg->drv2iop_doorbell = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL);
- reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK);
- reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL);
- reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK);
- }
- reg->message_wbuffer = MEM_BASE1(ARCMSR_MESSAGE_WBUFFER);
- reg->message_rbuffer = MEM_BASE1(ARCMSR_MESSAGE_RBUFFER);
- reg->message_rwbuffer = MEM_BASE1(ARCMSR_MESSAGE_RWBUFFER);
+ acb->pmuB = (struct MessageUnit_B *)dma_coherent;
+ arcmsr_hbaB_assign_regAddr(acb);
}
break;
case ACB_ADAPTER_TYPE_D: {
- struct MessageUnit_D *reg;
-
- acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_D), 32);
- dma_coherent = dma_alloc_coherent(&pdev->dev,
- acb->roundup_ccbsize,
- &dma_coherent_handle,
- GFP_KERNEL);
+ acb->ioqueue_size = roundup(sizeof(struct MessageUnit_D), 32);
+ dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
+ &dma_coherent_handle, GFP_KERNEL);
if (!dma_coherent) {
pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
return false;
}
acb->dma_coherent_handle2 = dma_coherent_handle;
acb->dma_coherent2 = dma_coherent;
- reg = (struct MessageUnit_D *)dma_coherent;
- acb->pmuD = reg;
- reg->chip_id = MEM_BASE0(ARCMSR_ARC1214_CHIP_ID);
- reg->cpu_mem_config = MEM_BASE0(ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION);
- reg->i2o_host_interrupt_mask = MEM_BASE0(ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK);
- reg->sample_at_reset = MEM_BASE0(ARCMSR_ARC1214_SAMPLE_RESET);
- reg->reset_request = MEM_BASE0(ARCMSR_ARC1214_RESET_REQUEST);
- reg->host_int_status = MEM_BASE0(ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS);
- reg->pcief0_int_enable = MEM_BASE0(ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE);
- reg->inbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE0);
- reg->inbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE1);
- reg->outbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE0);
- reg->outbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE1);
- reg->inbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_INBOUND_DOORBELL);
- reg->outbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL);
- reg->outbound_doorbell_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE);
- reg->inboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW);
- reg->inboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH);
- reg->inboundlist_write_pointer = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER);
- reg->outboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW);
- reg->outboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH);
- reg->outboundlist_copy_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER);
- reg->outboundlist_read_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER);
- reg->outboundlist_interrupt_cause = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE);
- reg->outboundlist_interrupt_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE);
- reg->message_wbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_WBUFFER);
- reg->message_rbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RBUFFER);
- reg->msgcode_rwbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER);
+ acb->pmuD = (struct MessageUnit_D *)dma_coherent;
+ arcmsr_hbaD_assign_regAddr(acb);
}
break;
case ACB_ADAPTER_TYPE_E: {
uint32_t completeQ_size;
completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128;
- acb->roundup_ccbsize = roundup(completeQ_size, 32);
- dma_coherent = dma_alloc_coherent(&pdev->dev,
- acb->roundup_ccbsize,
- &dma_coherent_handle,
- GFP_KERNEL);
+ acb->ioqueue_size = roundup(completeQ_size, 32);
+ dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
+ &dma_coherent_handle, GFP_KERNEL);
if (!dma_coherent){
pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
return false;
@@ -674,7 +681,7 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
acb->dma_coherent_handle2 = dma_coherent_handle;
acb->dma_coherent2 = dma_coherent;
acb->pCompletionQ = dma_coherent;
- acb->completionQ_entry = acb->roundup_ccbsize / sizeof(struct deliver_completeQ);
+ acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ);
acb->doneq_index = 0;
}
break;
@@ -691,11 +698,11 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
dma_addr_t dma_coherent_handle;
struct CommandControlBlock *ccb_tmp;
int i = 0, j = 0;
- dma_addr_t cdb_phyaddr;
+ unsigned long cdb_phyaddr, next_ccb_phy;
unsigned long roundup_ccbsize;
unsigned long max_xfer_len;
unsigned long max_sg_entrys;
- uint32_t firm_config_version;
+ uint32_t firm_config_version, curr_phy_upper32;
for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
@@ -712,6 +719,7 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
acb->host->sg_tablesize = max_sg_entrys;
roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32);
acb->uncache_size = roundup_ccbsize * acb->maxFreeCCB;
+ acb->uncache_size += acb->ioqueue_size;
dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL);
if(!dma_coherent){
printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no);
@@ -722,9 +730,10 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
memset(dma_coherent, 0, acb->uncache_size);
acb->ccbsize = roundup_ccbsize;
ccb_tmp = dma_coherent;
+ curr_phy_upper32 = upper_32_bits(dma_coherent_handle);
acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle;
for(i = 0; i < acb->maxFreeCCB; i++){
- cdb_phyaddr = dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb);
+ cdb_phyaddr = (unsigned long)dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb);
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
case ACB_ADAPTER_TYPE_B:
@@ -740,10 +749,34 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
ccb_tmp->acb = acb;
ccb_tmp->smid = (u32)i << 16;
INIT_LIST_HEAD(&ccb_tmp->list);
- list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
+ next_ccb_phy = dma_coherent_handle + roundup_ccbsize;
+ if (upper_32_bits(next_ccb_phy) != curr_phy_upper32) {
+ acb->maxFreeCCB = i;
+ acb->host->can_queue = i;
+ break;
+ }
+ else
+ list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize);
- dma_coherent_handle = dma_coherent_handle + roundup_ccbsize;
+ dma_coherent_handle = next_ccb_phy;
}
+ acb->dma_coherent_handle2 = dma_coherent_handle;
+ acb->dma_coherent2 = ccb_tmp;
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_B:
+ acb->pmuB = (struct MessageUnit_B *)acb->dma_coherent2;
+ arcmsr_hbaB_assign_regAddr(acb);
+ break;
+ case ACB_ADAPTER_TYPE_D:
+ acb->pmuD = (struct MessageUnit_D *)acb->dma_coherent2;
+ arcmsr_hbaD_assign_regAddr(acb);
+ break;
+ case ACB_ADAPTER_TYPE_E:
+ acb->pCompletionQ = acb->dma_coherent2;
+ acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ);
+ acb->doneq_index = 0;
+ break;
+ }
return 0;
}
@@ -894,6 +927,31 @@ static void arcmsr_init_set_datetime_timer(struct AdapterControlBlock *pacb)
add_timer(&pacb->refresh_timer);
}
+static int arcmsr_set_dma_mask(struct AdapterControlBlock *acb)
+{
+ struct pci_dev *pcidev = acb->pdev;
+
+ if (IS_DMA64) {
+ if (((acb->adapter_type == ACB_ADAPTER_TYPE_A) && !dma_mask_64) ||
+ dma_set_mask(&pcidev->dev, DMA_BIT_MASK(64)))
+ goto dma32;
+ if (dma_set_coherent_mask(&pcidev->dev, DMA_BIT_MASK(64)) ||
+ dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64))) {
+ printk("arcmsr: set DMA 64 mask failed\n");
+ return -ENXIO;
+ }
+ } else {
+dma32:
+ if (dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32)) ||
+ dma_set_coherent_mask(&pcidev->dev, DMA_BIT_MASK(32)) ||
+ dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32))) {
+ printk("arcmsr: set DMA 32-bit mask failed\n");
+ return -ENXIO;
+ }
+ }
+ return 0;
+}
+
static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct Scsi_Host *host;
@@ -908,22 +966,15 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if(!host){
goto pci_disable_dev;
}
- error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
- if(error){
- error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if(error){
- printk(KERN_WARNING
- "scsi%d: No suitable DMA mask available\n",
- host->host_no);
- goto scsi_host_release;
- }
- }
init_waitqueue_head(&wait_q);
bus = pdev->bus->number;
dev_fun = pdev->devfn;
acb = (struct AdapterControlBlock *) host->hostdata;
memset(acb,0,sizeof(struct AdapterControlBlock));
acb->pdev = pdev;
+ acb->adapter_type = id->driver_data;
+ if (arcmsr_set_dma_mask(acb))
+ goto scsi_host_release;
acb->host = host;
host->max_lun = ARCMSR_MAX_TARGETLUN;
host->max_id = ARCMSR_MAX_TARGETID; /*16:8*/
@@ -953,7 +1004,6 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ACB_F_MESSAGE_WQBUFFER_READED);
acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
INIT_LIST_HEAD(&acb->ccb_free_list);
- acb->adapter_type = id->driver_data;
error = arcmsr_remap_pciregion(acb);
if(!error){
goto pci_release_regs;
@@ -965,9 +1015,10 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if(!error){
goto free_hbb_mu;
}
+ arcmsr_free_io_queue(acb);
error = arcmsr_alloc_ccb_pool(acb);
if(error){
- goto free_hbb_mu;
+ goto unmap_pci_region;
}
error = scsi_add_host(host, &pdev->dev);
if(error){
@@ -995,8 +1046,9 @@ scsi_host_remove:
scsi_remove_host(host);
free_ccb_pool:
arcmsr_free_ccb_pool(acb);
+ goto unmap_pci_region;
free_hbb_mu:
- arcmsr_free_mu(acb);
+ arcmsr_free_io_queue(acb);
unmap_pci_region:
arcmsr_unmap_pciregion(acb);
pci_release_regs:
@@ -1042,7 +1094,6 @@ static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state)
static int arcmsr_resume(struct pci_dev *pdev)
{
- int error;
struct Scsi_Host *host = pci_get_drvdata(pdev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *)host->hostdata;
@@ -1054,24 +1105,30 @@ static int arcmsr_resume(struct pci_dev *pdev)
pr_warn("%s: pci_enable_device error\n", __func__);
return -ENODEV;
}
- error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (error) {
- error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (error) {
- pr_warn("scsi%d: No suitable DMA mask available\n",
- host->host_no);
- goto controller_unregister;
- }
- }
+ if (arcmsr_set_dma_mask(acb))
+ goto controller_unregister;
pci_set_master(pdev);
if (arcmsr_request_irq(pdev, acb) == FAILED)
goto controller_stop;
- if (acb->adapter_type == ACB_ADAPTER_TYPE_E) {
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_B: {
+ struct MessageUnit_B *reg = acb->pmuB;
+ uint32_t i;
+ for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
+ reg->post_qbuffer[i] = 0;
+ reg->done_qbuffer[i] = 0;
+ }
+ reg->postq_index = 0;
+ reg->doneq_index = 0;
+ break;
+ }
+ case ACB_ADAPTER_TYPE_E:
writel(0, &acb->pmuE->host_int_status);
writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell);
acb->in_doorbell = 0;
acb->out_doorbell = 0;
acb->doneq_index = 0;
+ break;
}
arcmsr_iop_init(acb);
arcmsr_init_get_devmap_timer(acb);
@@ -1351,10 +1408,12 @@ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct Comma
static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
{
int i = 0;
- uint32_t flag_ccb, ccb_cdb_phy;
+ uint32_t flag_ccb;
struct ARCMSR_CDB *pARCMSR_CDB;
bool error;
struct CommandControlBlock *pCCB;
+ unsigned long ccb_cdb_phy, cdb_phy_hipart;
+
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
@@ -1366,7 +1425,10 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
while(((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF)
&& (i++ < acb->maxOutstanding)) {
- pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
+ ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
+ if (acb->cdb_phyadd_hipart)
+ ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
+ pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
arcmsr_drain_donequeue(acb, pCCB, error);
@@ -1382,7 +1444,10 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
flag_ccb = reg->done_qbuffer[i];
if (flag_ccb != 0) {
reg->done_qbuffer[i] = 0;
- pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/
+ ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
+ if (acb->cdb_phyadd_hipart)
+ ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
+ pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
arcmsr_drain_donequeue(acb, pCCB, error);
@@ -1399,7 +1464,9 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
/*need to do*/
flag_ccb = readl(&reg->outbound_queueport_low);
ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
- pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+ccb_cdb_phy);/*frame must be 32 bytes aligned*/
+ if (acb->cdb_phyadd_hipart)
+ ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
+ pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
arcmsr_drain_donequeue(acb, pCCB, error);
@@ -1427,9 +1494,13 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
((toggle ^ 0x4000) + 1);
doneq_index = pmu->doneq_index;
spin_unlock_irqrestore(&acb->doneq_lock, flags);
+ cdb_phy_hipart = pmu->done_qbuffer[doneq_index &
+ 0xFFF].addressHigh;
addressLow = pmu->done_qbuffer[doneq_index &
0xFFF].addressLow;
ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
+ if (acb->cdb_phyadd_hipart)
+ ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
pARCMSR_CDB = (struct ARCMSR_CDB *)
(acb->vir2phy_offset + ccb_cdb_phy);
pCCB = container_of(pARCMSR_CDB,
@@ -1506,7 +1577,6 @@ static void arcmsr_free_pcidev(struct AdapterControlBlock *acb)
pdev = acb->pdev;
arcmsr_free_irq(pdev, acb);
arcmsr_free_ccb_pool(acb);
- arcmsr_free_mu(acb);
arcmsr_unmap_pciregion(acb);
pci_release_regions(pdev);
scsi_host_put(host);
@@ -1564,7 +1634,6 @@ static void arcmsr_remove(struct pci_dev *pdev)
}
arcmsr_free_irq(pdev, acb);
arcmsr_free_ccb_pool(acb);
- arcmsr_free_mu(acb);
arcmsr_unmap_pciregion(acb);
pci_release_regions(pdev);
scsi_host_put(host);
@@ -1749,12 +1818,8 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr
arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
ccb_post_stamp = (cdb_phyaddr | ((arc_cdb_size - 1) >> 6) | 1);
- if (acb->cdb_phyaddr_hi32) {
- writel(acb->cdb_phyaddr_hi32, &phbcmu->inbound_queueport_high);
- writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
- } else {
- writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
- }
+ writel(upper_32_bits(ccb->cdb_phyaddr), &phbcmu->inbound_queueport_high);
+ writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
}
break;
case ACB_ADAPTER_TYPE_D: {
@@ -1767,8 +1832,8 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr
spin_lock_irqsave(&acb->postq_lock, flags);
postq_index = pmu->postq_index;
pinbound_srb = (struct InBound_SRB *)&(pmu->post_qbuffer[postq_index & 0xFF]);
- pinbound_srb->addressHigh = dma_addr_hi32(cdb_phyaddr);
- pinbound_srb->addressLow = dma_addr_lo32(cdb_phyaddr);
+ pinbound_srb->addressHigh = upper_32_bits(ccb->cdb_phyaddr);
+ pinbound_srb->addressLow = cdb_phyaddr;
pinbound_srb->length = ccb->arc_cdb_size >> 2;
arcmsr_cdb->msgContext = dma_addr_lo32(cdb_phyaddr);
toggle = postq_index & 0x4000;
@@ -2304,8 +2369,13 @@ static void arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock *acb)
struct ARCMSR_CDB *pARCMSR_CDB;
struct CommandControlBlock *pCCB;
bool error;
+ unsigned long cdb_phy_addr;
+
while ((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) {
- pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
+ cdb_phy_addr = (flag_ccb << 5) & 0xffffffff;
+ if (acb->cdb_phyadd_hipart)
+ cdb_phy_addr = cdb_phy_addr | acb->cdb_phyadd_hipart;
+ pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + cdb_phy_addr);
pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
arcmsr_drain_donequeue(acb, pCCB, error);
@@ -2319,13 +2389,18 @@ static void arcmsr_hbaB_postqueue_isr(struct AdapterControlBlock *acb)
struct ARCMSR_CDB *pARCMSR_CDB;
struct CommandControlBlock *pCCB;
bool error;
+ unsigned long cdb_phy_addr;
+
index = reg->doneq_index;
while ((flag_ccb = reg->done_qbuffer[index]) != 0) {
- reg->done_qbuffer[index] = 0;
- pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/
+ cdb_phy_addr = (flag_ccb << 5) & 0xffffffff;
+ if (acb->cdb_phyadd_hipart)
+ cdb_phy_addr = cdb_phy_addr | acb->cdb_phyadd_hipart;
+ pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + cdb_phy_addr);
pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
arcmsr_drain_donequeue(acb, pCCB, error);
+ reg->done_qbuffer[index] = 0;
index++;
index %= ARCMSR_MAX_HBB_POSTQUEUE;
reg->doneq_index = index;
@@ -2337,7 +2412,8 @@ static void arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock *acb)
struct MessageUnit_C __iomem *phbcmu;
struct ARCMSR_CDB *arcmsr_cdb;
struct CommandControlBlock *ccb;
- uint32_t flag_ccb, ccb_cdb_phy, throttling = 0;
+ uint32_t flag_ccb, throttling = 0;
+ unsigned long ccb_cdb_phy;
int error;
phbcmu = acb->pmuC;
@@ -2347,6 +2423,8 @@ static void arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock *acb)
while ((flag_ccb = readl(&phbcmu->outbound_queueport_low)) !=
0xFFFFFFFF) {
ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
+ if (acb->cdb_phyadd_hipart)
+ ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
+ ccb_cdb_phy);
ccb = container_of(arcmsr_cdb, struct CommandControlBlock,
@@ -2367,12 +2445,12 @@ static void arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock *acb)
static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb)
{
u32 outbound_write_pointer, doneq_index, index_stripped, toggle;
- uint32_t addressLow, ccb_cdb_phy;
+ uint32_t addressLow;
int error;
struct MessageUnit_D *pmu;
struct ARCMSR_CDB *arcmsr_cdb;
struct CommandControlBlock *ccb;
- unsigned long flags;
+ unsigned long flags, ccb_cdb_phy, cdb_phy_hipart;
spin_lock_irqsave(&acb->doneq_lock, flags);
pmu = acb->pmuD;
@@ -2386,9 +2464,13 @@ static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb)
pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
((toggle ^ 0x4000) + 1);
doneq_index = pmu->doneq_index;
+ cdb_phy_hipart = pmu->done_qbuffer[doneq_index &
+ 0xFFF].addressHigh;
addressLow = pmu->done_qbuffer[doneq_index &
0xFFF].addressLow;
ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
+ if (acb->cdb_phyadd_hipart)
+ ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
+ ccb_cdb_phy);
ccb = container_of(arcmsr_cdb,
@@ -3229,7 +3311,9 @@ static int arcmsr_hbaA_polling_ccbdone(struct AdapterControlBlock *acb,
uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
int rtn;
bool error;
- polling_hba_ccb_retry:
+ unsigned long ccb_cdb_phy;
+
+polling_hba_ccb_retry:
poll_count++;
outbound_intstatus = readl(&reg->outbound_intstatus) & acb->outbound_int_enable;
writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
@@ -3247,7 +3331,10 @@ static int arcmsr_hbaA_polling_ccbdone(struct AdapterControlBlock *acb,
goto polling_hba_ccb_retry;
}
}
- arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));
+ ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
+ if (acb->cdb_phyadd_hipart)
+ ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
+ arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
@@ -3285,8 +3372,9 @@ static int arcmsr_hbaB_polling_ccbdone(struct AdapterControlBlock *acb,
uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0;
int index, rtn;
bool error;
- polling_hbb_ccb_retry:
+ unsigned long ccb_cdb_phy;
+polling_hbb_ccb_retry:
poll_count++;
/* clear doorbell interrupt */
writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
@@ -3312,7 +3400,10 @@ static int arcmsr_hbaB_polling_ccbdone(struct AdapterControlBlock *acb,
index %= ARCMSR_MAX_HBB_POSTQUEUE;
reg->doneq_index = index;
/* check if command done with no error*/
- arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));
+ ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
+ if (acb->cdb_phyadd_hipart)
+ ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
+ arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
@@ -3345,12 +3436,14 @@ static int arcmsr_hbaC_polling_ccbdone(struct AdapterControlBlock *acb,
struct CommandControlBlock *poll_ccb)
{
struct MessageUnit_C __iomem *reg = acb->pmuC;
- uint32_t flag_ccb, ccb_cdb_phy;
+ uint32_t flag_ccb;
struct ARCMSR_CDB *arcmsr_cdb;
bool error;
struct CommandControlBlock *pCCB;
uint32_t poll_ccb_done = 0, poll_count = 0;
int rtn;
+ unsigned long ccb_cdb_phy;
+
polling_hbc_ccb_retry:
poll_count++;
while (1) {
@@ -3369,7 +3462,9 @@ polling_hbc_ccb_retry:
}
flag_ccb = readl(&reg->outbound_queueport_low);
ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
- arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);/*frame must be 32 bytes aligned*/
+ if (acb->cdb_phyadd_hipart)
+ ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
+ arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
/* check ifcommand done with no error*/
@@ -3403,9 +3498,9 @@ static int arcmsr_hbaD_polling_ccbdone(struct AdapterControlBlock *acb,
struct CommandControlBlock *poll_ccb)
{
bool error;
- uint32_t poll_ccb_done = 0, poll_count = 0, flag_ccb, ccb_cdb_phy;
+ uint32_t poll_ccb_done = 0, poll_count = 0, flag_ccb;
int rtn, doneq_index, index_stripped, outbound_write_pointer, toggle;
- unsigned long flags;
+ unsigned long flags, ccb_cdb_phy, cdb_phy_hipart;
struct ARCMSR_CDB *arcmsr_cdb;
struct CommandControlBlock *pCCB;
struct MessageUnit_D *pmu = acb->pmuD;
@@ -3437,8 +3532,12 @@ polling_hbaD_ccb_retry:
((toggle ^ 0x4000) + 1);
doneq_index = pmu->doneq_index;
spin_unlock_irqrestore(&acb->doneq_lock, flags);
+ cdb_phy_hipart = pmu->done_qbuffer[doneq_index &
+ 0xFFF].addressHigh;
flag_ccb = pmu->done_qbuffer[doneq_index & 0xFFF].addressLow;
ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
+ if (acb->cdb_phyadd_hipart)
+ ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset +
ccb_cdb_phy);
pCCB = container_of(arcmsr_cdb, struct CommandControlBlock,
@@ -3680,6 +3779,7 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
cdb_phyaddr = lower_32_bits(dma_coherent_handle);
cdb_phyaddr_hi32 = upper_32_bits(dma_coherent_handle);
acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
+ acb->cdb_phyadd_hipart = ((uint64_t)cdb_phyaddr_hi32) << 32;
/*
***********************************************************************
** if adapter type B, set window of "post command Q"
@@ -3744,7 +3844,6 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
}
break;
case ACB_ADAPTER_TYPE_C: {
- if (cdb_phyaddr_hi32 != 0) {
struct MessageUnit_C __iomem *reg = acb->pmuC;
printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x\n",
@@ -3759,7 +3858,6 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
return 1;
}
}
- }
break;
case ACB_ADAPTER_TYPE_D: {
uint32_t __iomem *rwbuffer;
@@ -3793,7 +3891,7 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
cdb_phyaddr_hi32 = (uint32_t)((dma_coherent_handle >> 16) >> 16);
writel(cdb_phyaddr, &reg->msgcode_rwbuffer[5]);
writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[6]);
- writel(acb->roundup_ccbsize, &reg->msgcode_rwbuffer[7]);
+ writel(acb->ioqueue_size, &reg->msgcode_rwbuffer[7]);
writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
writel(acb->out_doorbell, &reg->iobound_doorbell);
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
index 0e119d838e1b..762cb77253b9 100644
--- a/drivers/scsi/bfa/bfa.h
+++ b/drivers/scsi/bfa/bfa.h
@@ -62,8 +62,7 @@ void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \
writel((__bfa)->iocfc.req_cq_pi[__reqq], \
(__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq]); \
- mmiowb(); \
- } while (0)
+ } while (0)
#define bfa_rspq_pi(__bfa, __rspq) \
(*(u32 *)((__bfa)->iocfc.rsp_cq_shadow_pi[__rspq].kva))
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index b4f2c1d8742e..646f09f66443 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -6430,9 +6430,7 @@ bfa_fcs_vport_sm_logo_for_stop(struct bfa_fcs_vport_s *vport,
switch (event) {
case BFA_FCS_VPORT_SM_OFFLINE:
bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
- /*
- * !!! fall through !!!
- */
+ /* fall through */
case BFA_FCS_VPORT_SM_RSP_OK:
case BFA_FCS_VPORT_SM_RSP_ERROR:
@@ -6458,9 +6456,7 @@ bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
switch (event) {
case BFA_FCS_VPORT_SM_OFFLINE:
bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
- /*
- * !!! fall through !!!
- */
+ /* fall through */
case BFA_FCS_VPORT_SM_RSP_OK:
case BFA_FCS_VPORT_SM_RSP_ERROR:
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index de50349a39ce..1e400f2aaece 100644
--- a/drivers/scsi/bfa/bfa_fcs_rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -427,17 +427,13 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
case RPSM_EVENT_LOGO_RCVD:
bfa_fcs_rport_send_logo_acc(rport);
- /*
- * !! fall through !!
- */
+ /* fall through */
case RPSM_EVENT_PRLO_RCVD:
if (rport->prlo == BFA_TRUE)
bfa_fcs_rport_send_prlo_acc(rport);
bfa_fcxp_discard(rport->fcxp);
- /*
- * !! fall through !!
- */
+ /* fall through */
case RPSM_EVENT_FAILED:
if (rport->plogi_retries < BFA_FCS_RPORT_MAX_RETRIES) {
rport->plogi_retries++;
@@ -868,9 +864,7 @@ bfa_fcs_rport_sm_adisc_online(struct bfa_fcs_rport_s *rport,
* At least go offline when a PLOGI is received.
*/
bfa_fcxp_discard(rport->fcxp);
- /*
- * !!! fall through !!!
- */
+ /* fall through */
case RPSM_EVENT_FAILED:
case RPSM_EVENT_ADDRESS_CHANGE:
@@ -1056,6 +1050,7 @@ bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
case RPSM_EVENT_LOGO_RCVD:
bfa_fcs_rport_send_logo_acc(rport);
+ /* fall through */
case RPSM_EVENT_PRLO_RCVD:
if (rport->prlo == BFA_TRUE)
bfa_fcs_rport_send_prlo_acc(rport);
@@ -1144,9 +1139,7 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
bfa_fcs_rport_send_plogiacc(rport, NULL);
break;
}
- /*
- * !! fall through !!
- */
+ /* fall through */
case RPSM_EVENT_ADDRESS_CHANGE:
if (!bfa_fcs_lport_is_online(rport->port)) {
@@ -1303,6 +1296,7 @@ bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
case RPSM_EVENT_LOGO_RCVD:
bfa_fcs_rport_send_logo_acc(rport);
+ /* fall through */
case RPSM_EVENT_PRLO_RCVD:
if (rport->prlo == BFA_TRUE)
bfa_fcs_rport_send_prlo_acc(rport);
@@ -1346,6 +1340,7 @@ bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
case RPSM_EVENT_LOGO_RCVD:
bfa_fcs_rport_send_logo_acc(rport);
+ /* fall through */
case RPSM_EVENT_PRLO_RCVD:
if (rport->prlo == BFA_TRUE)
bfa_fcs_rport_send_prlo_acc(rport);
diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c
index c4a0c0eb88a5..4a0d881b2602 100644
--- a/drivers/scsi/bfa/bfa_hw_cb.c
+++ b/drivers/scsi/bfa/bfa_hw_cb.c
@@ -61,7 +61,6 @@ bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq, u32 ci)
bfa_rspq_ci(bfa, rspq) = ci;
writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
- mmiowb();
}
void
@@ -72,7 +71,6 @@ bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
bfa_rspq_ci(bfa, rspq) = ci;
writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
- mmiowb();
}
void
diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c
index b0ff378dece2..b7be5f4f02a5 100644
--- a/drivers/scsi/bfa/bfa_hw_ct.c
+++ b/drivers/scsi/bfa/bfa_hw_ct.c
@@ -81,7 +81,6 @@ bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
bfa_rspq_ci(bfa, rspq) = ci;
writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
- mmiowb();
}
/*
@@ -94,7 +93,6 @@ bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
{
bfa_rspq_ci(bfa, rspq) = ci;
writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
- mmiowb();
}
void
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 9631877aba4f..79a55c3615be 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -978,9 +978,7 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
case IOCPF_E_INITFAIL:
bfa_iocpf_timer_stop(ioc);
- /*
- * !!! fall through !!!
- */
+ /* fall through */
case IOCPF_E_TIMEOUT:
writel(1, ioc->ioc_regs.ioc_sem_reg);
@@ -1056,9 +1054,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
case IOCPF_E_FAIL:
bfa_iocpf_timer_stop(ioc);
- /*
- * !!! fall through !!!
- */
+ /* fall through */
case IOCPF_E_TIMEOUT:
bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
@@ -6007,6 +6003,7 @@ bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
case BFA_DCONF_SM_IOCDISABLE:
case BFA_DCONF_SM_FLASH_COMP:
bfa_timer_stop(&dconf->timer);
+ /* fall through */
case BFA_DCONF_SM_TIMEOUT:
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index 349cfe7d055e..bfcd87c0dc74 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -460,11 +460,6 @@ bfad_debugfs_init(struct bfad_port_s *port)
if (!bfa_debugfs_root) {
bfa_debugfs_root = debugfs_create_dir("bfa", NULL);
atomic_set(&bfa_debugfs_port_count, 0);
- if (!bfa_debugfs_root) {
- printk(KERN_WARNING
- "BFA debugfs root dir creation failed\n");
- goto err;
- }
}
/* Setup the pci_dev debugfs directory for the port */
@@ -472,12 +467,6 @@ bfad_debugfs_init(struct bfad_port_s *port)
if (!port->port_debugfs_root) {
port->port_debugfs_root =
debugfs_create_dir(name, bfa_debugfs_root);
- if (!port->port_debugfs_root) {
- printk(KERN_WARNING
- "bfa %s: debugfs root creation failed\n",
- bfad->pci_name);
- goto err;
- }
atomic_inc(&bfa_debugfs_port_count);
@@ -489,16 +478,9 @@ bfad_debugfs_init(struct bfad_port_s *port)
port->port_debugfs_root,
port,
file->fops);
- if (!bfad->bfad_dentry_files[i]) {
- printk(KERN_WARNING
- "bfa %s: debugfs %s creation failed\n",
- bfad->pci_name, file->name);
- goto err;
- }
}
}
-err:
return;
}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 2e4e7159ebf9..a75e74ad1698 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -1438,7 +1438,7 @@ bind_err:
static struct bnx2fc_interface *
bnx2fc_interface_create(struct bnx2fc_hba *hba,
struct net_device *netdev,
- enum fip_state fip_mode)
+ enum fip_mode fip_mode)
{
struct fcoe_ctlr_device *ctlr_dev;
struct bnx2fc_interface *interface;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 039328d9ef13..19734ec7f42e 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -991,7 +991,6 @@ void bnx2fc_arm_cq(struct bnx2fc_rport *tgt)
FCOE_CQE_TOGGLE_BIT_SHIFT);
msg = *((u32 *)rx_db);
writel(cpu_to_le32(msg), tgt->ctx_base);
- mmiowb();
}
@@ -1409,7 +1408,6 @@ void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
(tgt->sq_curr_toggle_bit << 15);
msg = *((u32 *)sq_db);
writel(cpu_to_le32(msg), tgt->ctx_base);
- mmiowb();
}
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index fae6f71e677d..12666313b937 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -253,7 +253,6 @@ void bnx2i_put_rq_buf(struct bnx2i_conn *bnx2i_conn, int count)
writew(ep->qp.rq_prod_idx,
ep->qp.ctx_base + CNIC_RECV_DOORBELL);
}
- mmiowb();
}
@@ -279,8 +278,6 @@ static void bnx2i_ring_sq_dbell(struct bnx2i_conn *bnx2i_conn, int count)
bnx2i_ring_577xx_doorbell(bnx2i_conn);
} else
writew(count, ep->qp.ctx_base + CNIC_SEND_DOORBELL);
-
- mmiowb(); /* flush posted PCI writes */
}
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 69c75426c5eb..c5fa5f3b00e9 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -577,7 +577,7 @@ static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba)
hba->dummy_buffer, hba->dummy_buf_dma);
hba->dummy_buffer = NULL;
}
- return;
+ return;
}
/**
diff --git a/drivers/scsi/csiostor/csio_attr.c b/drivers/scsi/csiostor/csio_attr.c
index 9bd2bd8dc2be..200e50089711 100644
--- a/drivers/scsi/csiostor/csio_attr.c
+++ b/drivers/scsi/csiostor/csio_attr.c
@@ -497,7 +497,6 @@ out:
static int
csio_fcoe_free_vnp(struct csio_hw *hw, struct csio_lnode *ln)
{
- struct csio_lnode *pln;
struct csio_mb *mbp;
struct fw_fcoe_vnp_cmd *rsp;
int ret = 0;
@@ -514,8 +513,6 @@ csio_fcoe_free_vnp(struct csio_hw *hw, struct csio_lnode *ln)
goto out;
}
- pln = ln->pln;
-
csio_fcoe_vnp_free_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
ln->fcf_flowid, ln->vnp_flowid,
NULL);
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index 616b25bf7941..a6dd704d7f2d 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -167,14 +167,10 @@ csio_dfs_destroy(struct csio_hw *hw)
* csio_dfs_init - Debug filesystem initialization for the module.
*
*/
-static int
+static void
csio_dfs_init(void)
{
csio_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (!csio_debugfs_root)
- pr_warn("Could not create debugfs entry, continuing\n");
-
- return 0;
}
/*
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index bc5547a62c00..469d0bc9f5fe 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -1713,8 +1713,11 @@ csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req)
}
out:
- if (req->nsge > 0)
+ if (req->nsge > 0) {
scsi_dma_unmap(cmnd);
+ if (req->dcopy && (host_status == DID_OK))
+ host_status = csio_scsi_copy_to_sgl(hw, req);
+ }
cmnd->result = (((host_status) << 16) | scsi_status);
cmnd->scsi_done(cmnd);
@@ -1984,15 +1987,15 @@ inval_scmnd:
/* FW successfully aborted the request */
if (host_byte(cmnd->result) == DID_REQUEUE) {
csio_info(hw,
- "Aborted SCSI command to (%d:%llu) serial#:0x%lx\n",
+ "Aborted SCSI command to (%d:%llu) tag %u\n",
cmnd->device->id, cmnd->device->lun,
- cmnd->serial_number);
+ cmnd->request->tag);
return SUCCESS;
} else {
csio_info(hw,
- "Failed to abort SCSI command, (%d:%llu) serial#:0x%lx\n",
+ "Failed to abort SCSI command, (%d:%llu) tag %u\n",
cmnd->device->id, cmnd->device->lun,
- cmnd->serial_number);
+ cmnd->request->tag);
return FAILED;
}
}
diff --git a/drivers/scsi/cxgbi/Makefile b/drivers/scsi/cxgbi/Makefile
index a73781ac1800..f78c9cc460a2 100644
--- a/drivers/scsi/cxgbi/Makefile
+++ b/drivers/scsi/cxgbi/Makefile
@@ -1,4 +1,4 @@
-ccflags-y += -Idrivers/net/ethernet/chelsio/libcxgb
+ccflags-y += -I $(srctree)/drivers/net/ethernet/chelsio/libcxgb
obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libcxgbi.o cxgb3i/
obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libcxgbi.o cxgb4i/
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index d26f50af00ea..d44914e5e415 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1210,7 +1210,8 @@ static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
csk->skb_ulp_lhdr = skb;
cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
- if (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt) {
+ if ((CHELSIO_CHIP_VERSION(lldi->adapter_type) <= CHELSIO_T5) &&
+ (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt)) {
pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n",
csk->tid, cxgbi_skcb_tcp_seq(skb),
csk->rcv_nxt);
@@ -2134,8 +2135,7 @@ static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
cdev->itp = &cxgb4i_iscsi_transport;
cdev->owner = THIS_MODULE;
- cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0]))
- << FW_VIID_PFN_S;
+ cdev->pfvf = FW_PFVF_CMD_PFN_V(lldi->pf);
pr_info("cdev 0x%p,%s, pfvf %u.\n",
cdev, lldi->ports[0]->name, cdev->pfvf);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 245742557c03..006372b3fba2 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -1212,7 +1212,7 @@ scmd_get_params(struct scsi_cmnd *sc, struct scatterlist **sgl,
unsigned int *sgcnt, unsigned int *dlen,
unsigned int prot)
{
- struct scsi_data_buffer *sdb = prot ? scsi_prot(sc) : scsi_out(sc);
+ struct scsi_data_buffer *sdb = prot ? scsi_prot(sc) : &sc->sdb;
*sgl = sdb->table.sgl;
*sgcnt = sdb->table.nents;
@@ -1428,8 +1428,7 @@ static void task_release_itt(struct iscsi_task *task, itt_t hdr_itt)
log_debug(1 << CXGBI_DBG_DDP,
"cdev 0x%p, task 0x%p, release tag 0x%x.\n",
cdev, task, tag);
- if (sc &&
- (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) &&
+ if (sc && sc->sc_data_direction == DMA_FROM_DEVICE &&
cxgbi_ppm_is_ddp_tag(ppm, tag)) {
struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo;
@@ -1461,9 +1460,7 @@ static int task_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt)
u32 tag = 0;
int err = -EINVAL;
- if (sc &&
- (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE)
- ) {
+ if (sc && sc->sc_data_direction == DMA_FROM_DEVICE) {
struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo;
@@ -1897,7 +1894,7 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) &&
(opcode == ISCSI_OP_SCSI_DATA_OUT ||
(opcode == ISCSI_OP_SCSI_CMD &&
- (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE))))
+ sc->sc_data_direction == DMA_TO_DEVICE)))
/* data could goes into skb head */
headroom += min_t(unsigned int,
SKB_MAX_HEAD(cdev->skb_tx_rsvd),
@@ -1972,7 +1969,7 @@ int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
return 0;
if (task->sc) {
- struct scsi_data_buffer *sdb = scsi_out(task->sc);
+ struct scsi_data_buffer *sdb = &task->sc->sdb;
struct scatterlist *sg = NULL;
int err;
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
index 8908a20065c8..4d90106fcb37 100644
--- a/drivers/scsi/cxlflash/common.h
+++ b/drivers/scsi/cxlflash/common.h
@@ -334,7 +334,8 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t c, res_hndl_t r, u8 mode);
void cxlflash_list_init(void);
void cxlflash_term_global_luns(void);
void cxlflash_free_errpage(void);
-int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg);
+int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd,
+ void __user *arg);
void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg);
int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg);
void cxlflash_term_local_luns(struct cxlflash_cfg *cfg);
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index c8bad2c093b8..7096810fd222 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -3282,7 +3282,7 @@ static int cxlflash_chr_open(struct inode *inode, struct file *file)
*
* Return: A string identifying the decoded host ioctl.
*/
-static char *decode_hioctl(int cmd)
+static char *decode_hioctl(unsigned int cmd)
{
switch (cmd) {
case HT_CXLFLASH_LUN_PROVISION:
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index acac6152f50b..1a94a469051e 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -1924,7 +1924,7 @@ out:
*
* Return: A string identifying the decoded ioctl.
*/
-static char *decode_ioctl(int cmd)
+static char *decode_ioctl(unsigned int cmd)
{
switch (cmd) {
case DK_CXLFLASH_ATTACH:
@@ -2051,7 +2051,7 @@ err1:
*
* Return: 0 on success, -errno on failure
*/
-static int ioctl_common(struct scsi_device *sdev, int cmd)
+static int ioctl_common(struct scsi_device *sdev, unsigned int cmd)
{
struct cxlflash_cfg *cfg = shost_priv(sdev->host);
struct device *dev = &cfg->dev->dev;
@@ -2096,7 +2096,7 @@ out:
*
* Return: 0 on success, -errno on failure
*/
-int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg)
{
typedef int (*sioctl) (struct scsi_device *, void *);
@@ -2179,8 +2179,7 @@ int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
}
if (unlikely(copy_from_user(&buf, arg, size))) {
- dev_err(dev, "%s: copy_from_user() fail "
- "size=%lu cmd=%d (%s) arg=%p\n",
+ dev_err(dev, "%s: copy_from_user() fail size=%lu cmd=%u (%s) arg=%p\n",
__func__, size, cmd, decode_ioctl(cmd), arg);
rc = -EFAULT;
goto cxlflash_ioctl_exit;
@@ -2203,8 +2202,7 @@ int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
rc = do_ioctl(sdev, (void *)&buf);
if (likely(!rc))
if (unlikely(copy_to_user(arg, &buf, size))) {
- dev_err(dev, "%s: copy_to_user() fail "
- "size=%lu cmd=%d (%s) arg=%p\n",
+ dev_err(dev, "%s: copy_to_user() fail size=%lu cmd=%u (%s) arg=%p\n",
__func__, size, cmd, decode_ioctl(cmd), arg);
rc = -EFAULT;
}
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 70d1a18278af..abdc34affdf6 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -589,46 +589,6 @@ static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
}
/*
- * Turn a struct scsi_cmnd * into a unique 32 bit 'context'.
- */
-static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
-{
- return (u32)cmd->serial_number;
-}
-
-/*
- * Go from a u32 'context' to a struct scsi_cmnd * .
- * This could probably be made more efficient.
- */
-static struct scsi_cmnd *
- adpt_cmd_from_context(adpt_hba * pHba, u32 context)
-{
- struct scsi_cmnd * cmd;
- struct scsi_device * d;
-
- if (context == 0)
- return NULL;
-
- spin_unlock(pHba->host->host_lock);
- shost_for_each_device(d, pHba->host) {
- unsigned long flags;
- spin_lock_irqsave(&d->list_lock, flags);
- list_for_each_entry(cmd, &d->cmd_list, list) {
- if (((u32)cmd->serial_number == context)) {
- spin_unlock_irqrestore(&d->list_lock, flags);
- scsi_device_put(d);
- spin_lock(pHba->host->host_lock);
- return cmd;
- }
- }
- spin_unlock_irqrestore(&d->list_lock, flags);
- }
- spin_lock(pHba->host->host_lock);
-
- return NULL;
-}
-
-/*
* Turn a pointer to ioctl reply data into an u32 'context'
*/
static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
@@ -685,9 +645,6 @@ static int adpt_abort(struct scsi_cmnd * cmd)
u32 msg[5];
int rcode;
- if(cmd->serial_number == 0){
- return FAILED;
- }
pHba = (adpt_hba*) cmd->device->host->hostdata[0];
printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
@@ -699,8 +656,9 @@ static int adpt_abort(struct scsi_cmnd * cmd)
msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
msg[2] = 0;
- msg[3]= 0;
- msg[4] = adpt_cmd_to_context(cmd);
+ msg[3]= 0;
+ /* Add 1 to avoid firmware treating it as invalid command */
+ msg[4] = cmd->request->tag + 1;
if (pHba->host)
spin_lock_irq(pHba->host->host_lock);
rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
@@ -2198,20 +2156,27 @@ static irqreturn_t adpt_isr(int irq, void *dev_id)
status = I2O_POST_WAIT_OK;
}
if(!(context & 0x40000000)) {
- cmd = adpt_cmd_from_context(pHba,
- readl(reply+12));
+ /*
+ * The request tag is one less than the command tag
+ * as the firmware might treat a 0 tag as invalid
+ */
+ cmd = scsi_host_find_tag(pHba->host,
+ readl(reply + 12) - 1);
if(cmd != NULL) {
printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
}
}
adpt_i2o_post_wait_complete(context, status);
} else { // SCSI message
- cmd = adpt_cmd_from_context (pHba, readl(reply+12));
+ /*
+ * The request tag is one less than the command tag
+ * as the firmware might treat a 0 tag as invalid
+ */
+ cmd = scsi_host_find_tag(pHba->host,
+ readl(reply + 12) - 1);
if(cmd != NULL){
scsi_dma_unmap(cmd);
- if(cmd->serial_number != 0) { // If not timedout
- adpt_i2o_to_scsi(reply, cmd);
- }
+ adpt_i2o_to_scsi(reply, cmd);
}
}
writel(m, pHba->reply_port);
@@ -2277,7 +2242,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
// I2O_CMD_SCSI_EXEC
msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
msg[2] = 0;
- msg[3] = adpt_cmd_to_context(cmd); /* Want SCSI control block back */
+ /* Add 1 to avoid firmware treating it as invalid command */
+ msg[3] = cmd->request->tag + 1;
// Our cards use the transaction context as the tag for queueing
// Adaptec/DPT Private stuff
msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
@@ -2693,9 +2659,6 @@ static void adpt_fail_posted_scbs(adpt_hba* pHba)
unsigned long flags;
spin_lock_irqsave(&d->list_lock, flags);
list_for_each_entry(cmd, &d->cmd_list, list) {
- if(cmd->serial_number == 0){
- continue;
- }
cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
cmd->scsi_done(cmd);
}
diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h
index 858c3b33db78..7f43b95f4e94 100644
--- a/drivers/scsi/esas2r/esas2r.h
+++ b/drivers/scsi/esas2r/esas2r.h
@@ -965,8 +965,8 @@ struct esas2r_adapter {
const char *esas2r_info(struct Scsi_Host *);
int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq,
struct esas2r_sas_nvram *data);
-int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg);
-int esas2r_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
+int esas2r_ioctl_handler(void *hostdata, unsigned int cmd, void __user *arg);
+int esas2r_ioctl(struct scsi_device *dev, unsigned int cmd, void __user *arg);
u8 handle_hba_ioctl(struct esas2r_adapter *a,
struct atto_ioctl *ioctl_hba);
int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd);
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index 46b2c83ba21f..950cd92df2ff 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -1241,6 +1241,7 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a,
a->init_msg = ESAS2R_INIT_MSG_GET_INIT;
break;
}
+ /* fall through */
case ESAS2R_INIT_MSG_GET_INIT:
if (msg == ESAS2R_INIT_MSG_GET_INIT) {
@@ -1254,7 +1255,7 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a,
esas2r_hdebug("FAILED");
}
}
- /* fall through */
+ /* fall through */
default:
rq->req_stat = RS_SUCCESS;
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
index 34bcc8c04ff4..3d130523c288 100644
--- a/drivers/scsi/esas2r/esas2r_ioctl.c
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -1274,7 +1274,7 @@ int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq,
/* This function only cares about ATTO-specific ioctls (atto_express_ioctl) */
-int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg)
+int esas2r_ioctl_handler(void *hostdata, unsigned int cmd, void __user *arg)
{
struct atto_express_ioctl *ioctl = NULL;
struct esas2r_adapter *a;
@@ -1292,9 +1292,8 @@ int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg)
ioctl = memdup_user(arg, sizeof(struct atto_express_ioctl));
if (IS_ERR(ioctl)) {
esas2r_log(ESAS2R_LOG_WARN,
- "ioctl_handler access_ok failed for cmd %d, "
- "address %p", cmd,
- arg);
+ "ioctl_handler access_ok failed for cmd %u, address %p",
+ cmd, arg);
return PTR_ERR(ioctl);
}
@@ -1493,7 +1492,7 @@ int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg)
ioctl_done:
if (err < 0) {
- esas2r_log(ESAS2R_LOG_WARN, "err %d on ioctl cmd %d", err,
+ esas2r_log(ESAS2R_LOG_WARN, "err %d on ioctl cmd %u", err,
cmd);
switch (err) {
@@ -1518,9 +1517,8 @@ ioctl_done:
err = __copy_to_user(arg, ioctl, sizeof(struct atto_express_ioctl));
if (err != 0) {
esas2r_log(ESAS2R_LOG_WARN,
- "ioctl_handler copy_to_user didn't copy "
- "everything (err %d, cmd %d)", err,
- cmd);
+ "ioctl_handler copy_to_user didn't copy everything (err %d, cmd %u)",
+ err, cmd);
kfree(ioctl);
return -EFAULT;
@@ -1531,7 +1529,7 @@ ioctl_done:
return 0;
}
-int esas2r_ioctl(struct scsi_device *sd, int cmd, void __user *arg)
+int esas2r_ioctl(struct scsi_device *sd, unsigned int cmd, void __user *arg)
{
return esas2r_ioctl_handler(sd->host->hostdata, cmd, arg);
}
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index 64397d441bae..fdbda5c05aa0 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -623,7 +623,7 @@ static int esas2r_proc_major;
long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
{
return esas2r_ioctl_handler(esas2r_proc_host->hostdata,
- (int)cmd, (void __user *)arg);
+ cmd, (void __user *)arg);
}
static void __exit esas2r_exit(void)
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index cd19be3f3405..8ba8862d3292 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -389,7 +389,7 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
* Returns: pointer to a struct fcoe_interface or NULL on error
*/
static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
- enum fip_state fip_mode)
+ enum fip_mode fip_mode)
{
struct fcoe_ctlr_device *ctlr_dev;
struct fcoe_ctlr *ctlr;
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 54da3166da8d..7dc4ffa24430 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -147,7 +147,7 @@ static void fcoe_ctlr_map_dest(struct fcoe_ctlr *fip)
* fcoe_ctlr_init() - Initialize the FCoE Controller instance
* @fip: The FCoE controller to initialize
*/
-void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode)
+void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_mode mode)
{
fcoe_ctlr_set_state(fip, FIP_ST_LINK_WAIT);
fip->mode = mode;
@@ -454,7 +454,10 @@ void fcoe_ctlr_link_up(struct fcoe_ctlr *fip)
mutex_unlock(&fip->ctlr_mutex);
fc_linkup(fip->lp);
} else if (fip->state == FIP_ST_LINK_WAIT) {
- fcoe_ctlr_set_state(fip, fip->mode);
+ if (fip->mode == FIP_MODE_NON_FIP)
+ fcoe_ctlr_set_state(fip, FIP_ST_NON_FIP);
+ else
+ fcoe_ctlr_set_state(fip, FIP_ST_AUTO);
switch (fip->mode) {
default:
LIBFCOE_FIP_DBG(fip, "invalid mode %d\n", fip->mode);
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
index 5c8310bade61..c3dcbdc3aa64 100644
--- a/drivers/scsi/fcoe/fcoe_sysfs.c
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -671,8 +671,19 @@ static const struct device_type fcoe_fcf_device_type = {
.release = fcoe_fcf_device_release,
};
-static BUS_ATTR(ctlr_create, S_IWUSR, NULL, fcoe_ctlr_create_store);
-static BUS_ATTR(ctlr_destroy, S_IWUSR, NULL, fcoe_ctlr_destroy_store);
+static ssize_t ctlr_create_store(struct bus_type *bus, const char *buf,
+ size_t count)
+{
+ return fcoe_ctlr_create_store(bus, buf, count);
+}
+static BUS_ATTR_WO(ctlr_create);
+
+static ssize_t ctlr_destroy_store(struct bus_type *bus, const char *buf,
+ size_t count)
+{
+ return fcoe_ctlr_destroy_store(bus, buf, count);
+}
+static BUS_ATTR_WO(ctlr_destroy);
static struct attribute *fcoe_bus_attrs[] = {
&bus_attr_ctlr_create.attr,
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index f4909cd206d3..29fe3426f9f2 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -855,7 +855,6 @@ out_nodev:
mutex_unlock(&ft_mutex);
return rc;
}
-EXPORT_SYMBOL(fcoe_ctlr_destroy_store);
/**
* fcoe_transport_create() - Create a fcoe interface
@@ -873,7 +872,7 @@ static int fcoe_transport_create(const char *buffer,
int rc = -ENODEV;
struct net_device *netdev = NULL;
struct fcoe_transport *ft = NULL;
- enum fip_state fip_mode = (enum fip_state)(long)kp->arg;
+ enum fip_mode fip_mode = (enum fip_mode)kp->arg;
mutex_lock(&ft_mutex);
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index d094ba59ed15..477513dc23b7 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -39,7 +39,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.6.0.34"
+#define DRV_VERSION "1.6.0.47"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
@@ -49,7 +49,7 @@
#define FNIC_MAX_IO_REQ 1024 /* scsi_cmnd tag map entries */
#define FNIC_DFLT_IO_REQ 256 /* Default scsi_cmnd tag map entries */
#define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */
-#define FNIC_DFLT_QUEUE_DEPTH 32
+#define FNIC_DFLT_QUEUE_DEPTH 256
#define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */
/*
@@ -128,6 +128,7 @@
__fnic_set_state_flags(fnicp, st_flags, 1)
extern unsigned int fnic_log_level;
+extern unsigned int io_completions;
#define FNIC_MAIN_LOGGING 0x01
#define FNIC_FCS_LOGGING 0x02
@@ -196,6 +197,7 @@ enum fnic_state {
#define FNIC_WQ_MAX 1
#define FNIC_RQ_MAX 1
#define FNIC_CQ_MAX (FNIC_WQ_COPY_MAX + FNIC_WQ_MAX + FNIC_RQ_MAX)
+#define FNIC_DFLT_IO_COMPLETIONS 256
struct mempool;
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index 139fffa3658a..21991c99db7c 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -54,23 +54,9 @@ int fnic_debugfs_init(void)
{
int rc = -1;
fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL);
- if (!fnic_trace_debugfs_root) {
- printk(KERN_DEBUG "Cannot create debugfs root\n");
- return rc;
- }
-
- if (!fnic_trace_debugfs_root) {
- printk(KERN_DEBUG
- "fnic root directory doesn't exist in debugfs\n");
- return rc;
- }
fnic_stats_debugfs_root = debugfs_create_dir("statistics",
fnic_trace_debugfs_root);
- if (!fnic_stats_debugfs_root) {
- printk(KERN_DEBUG "Cannot create Statistics directory\n");
- return rc;
- }
/* Allocate memory to structure */
fc_trc_flag = (struct fc_trace_flag_type *)
@@ -356,39 +342,19 @@ static const struct file_operations fnic_trace_debugfs_fops = {
* it will also create file trace_enable to control enable/disable of
* trace logging into trace buffer.
*/
-int fnic_trace_debugfs_init(void)
+void fnic_trace_debugfs_init(void)
{
- int rc = -1;
- if (!fnic_trace_debugfs_root) {
- printk(KERN_DEBUG
- "FNIC Debugfs root directory doesn't exist\n");
- return rc;
- }
fnic_trace_enable = debugfs_create_file("tracing_enable",
S_IFREG|S_IRUGO|S_IWUSR,
fnic_trace_debugfs_root,
&(fc_trc_flag->fnic_trace),
&fnic_trace_ctrl_fops);
- if (!fnic_trace_enable) {
- printk(KERN_DEBUG
- "Cannot create trace_enable file under debugfs\n");
- return rc;
- }
-
fnic_trace_debugfs_file = debugfs_create_file("trace",
S_IFREG|S_IRUGO|S_IWUSR,
fnic_trace_debugfs_root,
&(fc_trc_flag->fnic_trace),
&fnic_trace_debugfs_fops);
-
- if (!fnic_trace_debugfs_file) {
- printk(KERN_DEBUG
- "Cannot create trace file under debugfs\n");
- return rc;
- }
- rc = 0;
- return rc;
}
/*
@@ -419,37 +385,20 @@ void fnic_trace_debugfs_terminate(void)
* trace logging into trace buffer.
*/
-int fnic_fc_trace_debugfs_init(void)
+void fnic_fc_trace_debugfs_init(void)
{
- int rc = -1;
-
- if (!fnic_trace_debugfs_root) {
- pr_err("fnic:Debugfs root directory doesn't exist\n");
- return rc;
- }
-
fnic_fc_trace_enable = debugfs_create_file("fc_trace_enable",
S_IFREG|S_IRUGO|S_IWUSR,
fnic_trace_debugfs_root,
&(fc_trc_flag->fc_trace),
&fnic_trace_ctrl_fops);
- if (!fnic_fc_trace_enable) {
- pr_err("fnic: Failed create fc_trace_enable file\n");
- return rc;
- }
-
fnic_fc_trace_clear = debugfs_create_file("fc_trace_clear",
S_IFREG|S_IRUGO|S_IWUSR,
fnic_trace_debugfs_root,
&(fc_trc_flag->fc_clear),
&fnic_trace_ctrl_fops);
- if (!fnic_fc_trace_clear) {
- pr_err("fnic: Failed to create fc_trace_enable file\n");
- return rc;
- }
-
fnic_fc_rdata_trace_debugfs_file =
debugfs_create_file("fc_trace_rdata",
S_IFREG|S_IRUGO|S_IWUSR,
@@ -457,24 +406,12 @@ int fnic_fc_trace_debugfs_init(void)
&(fc_trc_flag->fc_normal_file),
&fnic_trace_debugfs_fops);
- if (!fnic_fc_rdata_trace_debugfs_file) {
- pr_err("fnic: Failed create fc_rdata_trace file\n");
- return rc;
- }
-
fnic_fc_trace_debugfs_file =
debugfs_create_file("fc_trace",
S_IFREG|S_IRUGO|S_IWUSR,
fnic_trace_debugfs_root,
&(fc_trc_flag->fc_row_file),
&fnic_trace_debugfs_fops);
-
- if (!fnic_fc_trace_debugfs_file) {
- pr_err("fnic: Failed to create fc_trace file\n");
- return rc;
- }
- rc = 0;
- return rc;
}
/*
@@ -757,45 +694,26 @@ static const struct file_operations fnic_reset_debugfs_fops = {
* It will create file stats and reset_stats under statistics/host# directory
* to log per fnic stats.
*/
-int fnic_stats_debugfs_init(struct fnic *fnic)
+void fnic_stats_debugfs_init(struct fnic *fnic)
{
- int rc = -1;
char name[16];
snprintf(name, sizeof(name), "host%d", fnic->lport->host->host_no);
- if (!fnic_stats_debugfs_root) {
- printk(KERN_DEBUG "fnic_stats root doesn't exist\n");
- return rc;
- }
fnic->fnic_stats_debugfs_host = debugfs_create_dir(name,
fnic_stats_debugfs_root);
- if (!fnic->fnic_stats_debugfs_host) {
- printk(KERN_DEBUG "Cannot create host directory\n");
- return rc;
- }
fnic->fnic_stats_debugfs_file = debugfs_create_file("stats",
S_IFREG|S_IRUGO|S_IWUSR,
fnic->fnic_stats_debugfs_host,
fnic,
&fnic_stats_debugfs_fops);
- if (!fnic->fnic_stats_debugfs_file) {
- printk(KERN_DEBUG "Cannot create host stats file\n");
- return rc;
- }
fnic->fnic_reset_debugfs_file = debugfs_create_file("reset_stats",
S_IFREG|S_IRUGO|S_IWUSR,
fnic->fnic_stats_debugfs_host,
fnic,
&fnic_reset_debugfs_fops);
- if (!fnic->fnic_reset_debugfs_file) {
- printk(KERN_DEBUG "Cannot create host stats file\n");
- return rc;
- }
- rc = 0;
- return rc;
}
/*
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 844ef688fa91..911a5adc289c 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -65,11 +65,21 @@ void fnic_handle_link(struct work_struct *work)
fnic->link_status = vnic_dev_link_status(fnic->vdev);
fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
+ atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed,
+ vnic_dev_port_speed(fnic->vdev));
+ shost_printk(KERN_INFO, fnic->lport->host, "Current vnic speed set to : %llu\n",
+ (u64)atomic64_read(
+ &fnic->fnic_stats.misc_stats.current_port_speed));
+
switch (vnic_dev_port_speed(fnic->vdev)) {
case DCEM_PORTSPEED_10G:
fc_host_speed(fnic->lport->host) = FC_PORTSPEED_10GBIT;
fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT;
break;
+ case DCEM_PORTSPEED_20G:
+ fc_host_speed(fnic->lport->host) = FC_PORTSPEED_20GBIT;
+ fnic->lport->link_supported_speeds = FC_PORTSPEED_20GBIT;
+ break;
case DCEM_PORTSPEED_25G:
fc_host_speed(fnic->lport->host) = FC_PORTSPEED_25GBIT;
fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT;
diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h
index e0bc659ed71f..1cb6a68c8e4e 100644
--- a/drivers/scsi/fnic/fnic_io.h
+++ b/drivers/scsi/fnic/fnic_io.h
@@ -70,9 +70,10 @@ enum fnic_port_speeds {
DCEM_PORTSPEED_NONE = 0,
DCEM_PORTSPEED_1G = 1000,
DCEM_PORTSPEED_10G = 10000,
+ DCEM_PORTSPEED_20G = 20000,
+ DCEM_PORTSPEED_25G = 25000,
DCEM_PORTSPEED_40G = 40000,
DCEM_PORTSPEED_4x10G = 41000,
- DCEM_PORTSPEED_25G = 25000,
DCEM_PORTSPEED_100G = 100000,
};
#endif /* _FNIC_IO_H_ */
diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c
index 4e3a50202e8c..da4602b63495 100644
--- a/drivers/scsi/fnic/fnic_isr.c
+++ b/drivers/scsi/fnic/fnic_isr.c
@@ -51,7 +51,7 @@ static irqreturn_t fnic_isr_legacy(int irq, void *data)
}
if (pba & (1 << FNIC_INTX_WQ_RQ_COPYWQ)) {
- work_done += fnic_wq_copy_cmpl_handler(fnic, -1);
+ work_done += fnic_wq_copy_cmpl_handler(fnic, io_completions);
work_done += fnic_wq_cmpl_handler(fnic, -1);
work_done += fnic_rq_cmpl_handler(fnic, -1);
@@ -72,7 +72,7 @@ static irqreturn_t fnic_isr_msi(int irq, void *data)
fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
- work_done += fnic_wq_copy_cmpl_handler(fnic, -1);
+ work_done += fnic_wq_copy_cmpl_handler(fnic, io_completions);
work_done += fnic_wq_cmpl_handler(fnic, -1);
work_done += fnic_rq_cmpl_handler(fnic, -1);
@@ -125,7 +125,7 @@ static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data)
fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
- wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, -1);
+ wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, io_completions);
vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY],
wq_copy_work_done,
1 /* unmask intr */,
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 5b3534b0deda..18584ab27c32 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -69,6 +69,11 @@ unsigned int fnic_log_level;
module_param(fnic_log_level, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels");
+
+unsigned int io_completions = FNIC_DFLT_IO_COMPLETIONS;
+module_param(io_completions, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(io_completions, "Max CQ entries to process at a time");
+
unsigned int fnic_trace_max_pages = 16;
module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages "
@@ -178,6 +183,9 @@ static void fnic_get_host_speed(struct Scsi_Host *shost)
case DCEM_PORTSPEED_10G:
fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
break;
+ case DCEM_PORTSPEED_20G:
+ fc_host_speed(shost) = FC_PORTSPEED_20GBIT;
+ break;
case DCEM_PORTSPEED_25G:
fc_host_speed(shost) = FC_PORTSPEED_25GBIT;
break;
@@ -500,7 +508,7 @@ static int fnic_cleanup(struct fnic *fnic)
}
/* Clean up completed IOs and FCS frames */
- fnic_wq_copy_cmpl_handler(fnic, -1);
+ fnic_wq_copy_cmpl_handler(fnic, io_completions);
fnic_wq_cmpl_handler(fnic, -1);
fnic_rq_cmpl_handler(fnic, -1);
@@ -578,12 +586,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
host->transportt = fnic_fc_transport;
- err = fnic_stats_debugfs_init(fnic);
- if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Failed to initialize debugfs for stats\n");
- fnic_stats_debugfs_remove(fnic);
- }
+ fnic_stats_debugfs_init(fnic);
/* Setup PCI resources */
pci_set_drvdata(pdev, fnic);
@@ -650,12 +653,20 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_iounmap;
}
+ err = vnic_dev_cmd_init(fnic->vdev);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "vnic_dev_cmd_init() returns %d, aborting\n",
+ err);
+ goto err_out_vnic_unregister;
+ }
+
err = fnic_dev_wait(fnic->vdev, vnic_dev_open,
- vnic_dev_open_done, 0);
+ vnic_dev_open_done, CMD_OPENF_RQ_ENABLE_THEN_POST);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"vNIC dev open failed, aborting.\n");
- goto err_out_vnic_unregister;
+ goto err_out_dev_cmd_deinit;
}
err = vnic_dev_init(fnic->vdev, 0);
@@ -796,6 +807,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* allocate RQ buffers and post them to RQ*/
for (i = 0; i < fnic->rq_count; i++) {
+ vnic_rq_enable(&fnic->rq[i]);
err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
@@ -870,15 +882,11 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Enable all queues */
for (i = 0; i < fnic->raw_wq_count; i++)
vnic_wq_enable(&fnic->wq[i]);
- for (i = 0; i < fnic->rq_count; i++)
- vnic_rq_enable(&fnic->rq[i]);
for (i = 0; i < fnic->wq_copy_count; i++)
vnic_wq_copy_enable(&fnic->wq_copy[i]);
fc_fabric_login(lp);
- vnic_dev_enable(fnic->vdev);
-
err = fnic_request_intr(fnic);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
@@ -886,6 +894,8 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_free_exch_mgr;
}
+ vnic_dev_enable(fnic->vdev);
+
for (i = 0; i < fnic->intr_count; i++)
vnic_intr_unmask(&fnic->intr[i]);
@@ -914,6 +924,7 @@ err_out_clear_intr:
fnic_clear_intr_mode(fnic);
err_out_dev_close:
vnic_dev_close(fnic->vdev);
+err_out_dev_cmd_deinit:
err_out_vnic_unregister:
vnic_dev_unregister(fnic->vdev);
err_out_iounmap:
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index cafbcfb85bfa..80608b53897b 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -180,20 +180,19 @@ void
__fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
unsigned long clearbits)
{
- struct Scsi_Host *host = fnic->lport->host;
- int sh_locked = spin_is_locked(host->host_lock);
unsigned long flags = 0;
+ unsigned long host_lock_flags = 0;
- if (!sh_locked)
- spin_lock_irqsave(host->host_lock, flags);
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ spin_lock_irqsave(fnic->lport->host->host_lock, host_lock_flags);
if (clearbits)
fnic->state_flags &= ~st_flags;
else
fnic->state_flags |= st_flags;
- if (!sh_locked)
- spin_unlock_irqrestore(host->host_lock, flags);
+ spin_unlock_irqrestore(fnic->lport->host->host_lock, host_lock_flags);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return;
}
@@ -1326,13 +1325,32 @@ int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
unsigned int wq_work_done = 0;
unsigned int i, cq_index;
unsigned int cur_work_done;
+ struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
+ u64 start_jiffies = 0;
+ u64 end_jiffies = 0;
+ u64 delta_jiffies = 0;
+ u64 delta_ms = 0;
for (i = 0; i < fnic->wq_copy_count; i++) {
cq_index = i + fnic->raw_wq_count + fnic->rq_count;
+
+ start_jiffies = jiffies;
cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
fnic_fcpio_cmpl_handler,
copy_work_to_do);
+ end_jiffies = jiffies;
+
wq_work_done += cur_work_done;
+ delta_jiffies = end_jiffies - start_jiffies;
+ if (delta_jiffies >
+ (u64) atomic64_read(&misc_stats->max_isr_jiffies)) {
+ atomic64_set(&misc_stats->max_isr_jiffies,
+ delta_jiffies);
+ delta_ms = jiffies_to_msecs(delta_jiffies);
+ atomic64_set(&misc_stats->max_isr_time_ms, delta_ms);
+ atomic64_set(&misc_stats->corr_work_done,
+ cur_work_done);
+ }
}
return wq_work_done;
}
@@ -1397,8 +1415,9 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
cleanup_scsi_cmd:
sc->result = DID_TRANSPORT_DISRUPTED << 16;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "%s: sc duration = %lu DID_TRANSPORT_DISRUPTED\n",
- __func__, (jiffies - start_time));
+ "%s: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
+ __func__, sc->request->tag, sc,
+ (jiffies - start_time));
if (atomic64_read(&fnic->io_cmpl_skip))
atomic64_dec(&fnic->io_cmpl_skip);
@@ -1407,6 +1426,11 @@ cleanup_scsi_cmd:
/* Complete the command to SCSI */
if (sc->scsi_done) {
+ if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED))
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n",
+ sc->request->tag, sc);
+
FNIC_TRACE(fnic_cleanup_io,
sc->device->host->host_no, i, sc,
jiffies_to_msecs(jiffies - start_time),
diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h
index 9daa6ada6fa0..086f729f3c46 100644
--- a/drivers/scsi/fnic/fnic_stats.h
+++ b/drivers/scsi/fnic/fnic_stats.h
@@ -97,6 +97,9 @@ struct vlan_stats {
struct misc_stats {
u64 last_isr_time;
u64 last_ack_time;
+ atomic64_t max_isr_jiffies;
+ atomic64_t max_isr_time_ms;
+ atomic64_t corr_work_done;
atomic64_t isr_count;
atomic64_t max_cq_entries;
atomic64_t ack_index_out_of_range;
@@ -113,6 +116,7 @@ struct misc_stats {
atomic64_t queue_fulls;
atomic64_t rport_not_ready;
atomic64_t frame_errors;
+ atomic64_t current_port_speed;
};
struct fnic_stats {
@@ -134,6 +138,6 @@ struct stats_debug_info {
};
int fnic_get_stats_data(struct stats_debug_info *, struct fnic_stats *);
-int fnic_stats_debugfs_init(struct fnic *);
+void fnic_stats_debugfs_init(struct fnic *);
void fnic_stats_debugfs_remove(struct fnic *);
#endif /* _FNIC_STATS_H_ */
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index bf0fd2aeb92e..9621831e17ba 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -409,6 +409,9 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
len += snprintf(debug->debug_buffer + len, buf_size - len,
"Last ISR time: %llu (%8llu.%09lu)\n"
"Last ACK time: %llu (%8llu.%09lu)\n"
+ "Max ISR jiffies: %llu\n"
+ "Max ISR time (ms) (0 denotes < 1 ms): %llu\n"
+ "Corr. work done: %llu\n"
"Number of ISRs: %lld\n"
"Maximum CQ Entries: %lld\n"
"Number of ACK index out of range: %lld\n"
@@ -428,6 +431,9 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
(s64)val1.tv_sec, val1.tv_nsec,
(u64)stats->misc_stats.last_ack_time,
(s64)val2.tv_sec, val2.tv_nsec,
+ (u64)atomic64_read(&stats->misc_stats.max_isr_jiffies),
+ (u64)atomic64_read(&stats->misc_stats.max_isr_time_ms),
+ (u64)atomic64_read(&stats->misc_stats.corr_work_done),
(u64)atomic64_read(&stats->misc_stats.isr_count),
(u64)atomic64_read(&stats->misc_stats.max_cq_entries),
(u64)atomic64_read(&stats->misc_stats.ack_index_out_of_range),
@@ -446,6 +452,11 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
(u64)atomic64_read(&stats->misc_stats.rport_not_ready),
(u64)atomic64_read(&stats->misc_stats.frame_errors));
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "Firmware reported port seed: %llu\n",
+ (u64)atomic64_read(
+ &stats->misc_stats.current_port_speed));
+
return len;
}
@@ -503,15 +514,10 @@ int fnic_trace_buf_init(void)
fnic_trace_entries.page_offset[i] = fnic_buf_head;
fnic_buf_head += FNIC_ENTRY_SIZE_BYTES;
}
- err = fnic_trace_debugfs_init();
- if (err < 0) {
- pr_err("fnic: Failed to initialize debugfs for tracing\n");
- goto err_fnic_trace_debugfs_init;
- }
+ fnic_trace_debugfs_init();
pr_info("fnic: Successfully Initialized Trace Buffer\n");
return err;
-err_fnic_trace_debugfs_init:
- fnic_trace_free();
+
err_fnic_trace_buf_init:
return err;
}
@@ -596,16 +602,10 @@ int fnic_fc_trace_init(void)
fc_trace_entries.page_offset[i] = fc_trace_buf_head;
fc_trace_buf_head += FC_TRC_SIZE_BYTES;
}
- err = fnic_fc_trace_debugfs_init();
- if (err < 0) {
- pr_err("fnic: Failed to initialize FC_CTLR tracing.\n");
- goto err_fnic_fc_ctlr_trace_debugfs_init;
- }
+ fnic_fc_trace_debugfs_init();
pr_info("fnic: Successfully Initialized FC_CTLR Trace Buffer\n");
return err;
-err_fnic_fc_ctlr_trace_debugfs_init:
- fnic_fc_trace_free();
err_fnic_fc_ctlr_trace_buf_init:
return err;
}
diff --git a/drivers/scsi/fnic/fnic_trace.h b/drivers/scsi/fnic/fnic_trace.h
index e375d0c2eaaf..8aa55c1e2025 100644
--- a/drivers/scsi/fnic/fnic_trace.h
+++ b/drivers/scsi/fnic/fnic_trace.h
@@ -111,7 +111,7 @@ int fnic_trace_buf_init(void);
void fnic_trace_free(void);
int fnic_debugfs_init(void);
void fnic_debugfs_terminate(void);
-int fnic_trace_debugfs_init(void);
+void fnic_trace_debugfs_init(void);
void fnic_trace_debugfs_terminate(void);
/* Fnic FC CTLR Trace releated function */
@@ -123,7 +123,7 @@ int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag);
void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
fnic_dbgfs_t *fnic_dbgfs_prt,
int *len, u8 rdata_flag);
-int fnic_fc_trace_debugfs_init(void);
+void fnic_fc_trace_debugfs_init(void);
void fnic_fc_trace_debugfs_terminate(void);
#endif
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index 434447ea24b8..78af9cc2009b 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -27,6 +27,24 @@
#include "vnic_devcmd.h"
#include "vnic_dev.h"
#include "vnic_stats.h"
+#include "vnic_wq.h"
+
+struct devcmd2_controller {
+ struct vnic_wq_ctrl *wq_ctrl;
+ struct vnic_dev_ring results_ring;
+ struct vnic_wq wq;
+ struct vnic_devcmd2 *cmd_ring;
+ struct devcmd2_result *result;
+ u16 next_result;
+ u16 result_size;
+ int color;
+};
+
+enum vnic_proxy_type {
+ PROXY_NONE,
+ PROXY_BY_BDF,
+ PROXY_BY_INDEX,
+};
struct vnic_res {
void __iomem *vaddr;
@@ -48,6 +66,12 @@ struct vnic_dev {
dma_addr_t stats_pa;
struct vnic_devcmd_fw_info *fw_info;
dma_addr_t fw_info_pa;
+ enum vnic_proxy_type proxy;
+ u32 proxy_index;
+ u64 args[VNIC_DEVCMD_NARGS];
+ struct devcmd2_controller *devcmd2;
+ int (*devcmd_rtn)(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ int wait);
};
#define VNIC_MAX_RES_HDR_SIZE \
@@ -119,6 +143,7 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
}
break;
case RES_TYPE_INTR_PBA_LEGACY:
+ case RES_TYPE_DEVCMD2:
case RES_TYPE_DEVCMD:
len = count;
break;
@@ -229,8 +254,7 @@ void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
}
}
-int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
- u64 *a0, u64 *a1, int wait)
+int vnic_dev_cmd1(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait)
{
struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
int delay;
@@ -244,6 +268,8 @@ int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
EBUSY, /* ERR_EBUSY */
};
int err;
+ u64 *a0 = &vdev->args[0];
+ u64 *a1 = &vdev->args[1];
status = ioread32(&devcmd->status);
if (status & STAT_BUSY) {
@@ -290,6 +316,223 @@ int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
return -ETIMEDOUT;
}
+int vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ int wait)
+{
+ struct devcmd2_controller *dc2c = vdev->devcmd2;
+ struct devcmd2_result *result;
+ u8 color;
+ unsigned int i;
+ int delay;
+ int err;
+ u32 fetch_index;
+ u32 posted;
+ u32 new_posted;
+
+ posted = ioread32(&dc2c->wq_ctrl->posted_index);
+ fetch_index = ioread32(&dc2c->wq_ctrl->fetch_index);
+
+ if (posted == 0xFFFFFFFF || fetch_index == 0xFFFFFFFF) {
+ /* Hardware surprise removal: return error */
+ pr_err("%s: devcmd2 invalid posted or fetch index on cmd %d\n",
+ pci_name(vdev->pdev), _CMD_N(cmd));
+ pr_err("%s: fetch index: %u, posted index: %u\n",
+ pci_name(vdev->pdev), fetch_index, posted);
+
+ return -ENODEV;
+
+ }
+
+ new_posted = (posted + 1) % DEVCMD2_RING_SIZE;
+
+ if (new_posted == fetch_index) {
+ pr_err("%s: devcmd2 wq full while issuing cmd %d\n",
+ pci_name(vdev->pdev), _CMD_N(cmd));
+ pr_err("%s: fetch index: %u, posted index: %u\n",
+ pci_name(vdev->pdev), fetch_index, posted);
+ return -EBUSY;
+
+ }
+ dc2c->cmd_ring[posted].cmd = cmd;
+ dc2c->cmd_ring[posted].flags = 0;
+
+ if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
+ dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT;
+ if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
+ for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
+ dc2c->cmd_ring[posted].args[i] = vdev->args[i];
+
+ }
+
+ /* Adding write memory barrier prevents compiler and/or CPU
+ * reordering, thus avoiding descriptor posting before
+ * descriptor is initialized. Otherwise, hardware can read
+ * stale descriptor fields.
+ */
+ wmb();
+ iowrite32(new_posted, &dc2c->wq_ctrl->posted_index);
+
+ if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
+ return 0;
+
+ result = dc2c->result + dc2c->next_result;
+ color = dc2c->color;
+
+ dc2c->next_result++;
+ if (dc2c->next_result == dc2c->result_size) {
+ dc2c->next_result = 0;
+ dc2c->color = dc2c->color ? 0 : 1;
+ }
+
+ for (delay = 0; delay < wait; delay++) {
+ udelay(100);
+ if (result->color == color) {
+ if (result->error) {
+ err = -(int) result->error;
+ if (err != ERR_ECMDUNKNOWN ||
+ cmd != CMD_CAPABILITY)
+ pr_err("%s:Error %d devcmd %d\n",
+ pci_name(vdev->pdev),
+ err, _CMD_N(cmd));
+ return err;
+ }
+ if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
+ rmb(); /*prevent reorder while reding result*/
+ for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
+ vdev->args[i] = result->results[i];
+ }
+ return 0;
+ }
+ }
+
+ pr_err("%s:Timed out devcmd %d\n", pci_name(vdev->pdev), _CMD_N(cmd));
+
+ return -ETIMEDOUT;
+}
+
+
+int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
+{
+ vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
+ if (!vdev->devcmd)
+ return -ENODEV;
+
+ vdev->devcmd_rtn = &vnic_dev_cmd1;
+ return 0;
+}
+
+
+int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
+{
+ int err;
+ unsigned int fetch_index;
+
+ if (vdev->devcmd2)
+ return 0;
+
+ vdev->devcmd2 = kzalloc(sizeof(*vdev->devcmd2), GFP_ATOMIC);
+ if (!vdev->devcmd2)
+ return -ENOMEM;
+
+ vdev->devcmd2->color = 1;
+ vdev->devcmd2->result_size = DEVCMD2_RING_SIZE;
+ err = vnic_wq_devcmd2_alloc(vdev, &vdev->devcmd2->wq,
+ DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
+ if (err)
+ goto err_free_devcmd2;
+
+ fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index);
+ if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
+ pr_err("error in devcmd2 init");
+ return -ENODEV;
+ }
+
+ /*
+ * Don't change fetch_index ever and
+ * set posted_index same as fetch_index
+ * when setting up the WQ for devcmd2.
+ */
+ vnic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index,
+ fetch_index, 0, 0);
+
+ vnic_wq_enable(&vdev->devcmd2->wq);
+
+ err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
+ DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
+ if (err)
+ goto err_free_wq;
+
+ vdev->devcmd2->result =
+ (struct devcmd2_result *) vdev->devcmd2->results_ring.descs;
+ vdev->devcmd2->cmd_ring =
+ (struct vnic_devcmd2 *) vdev->devcmd2->wq.ring.descs;
+ vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl;
+ vdev->args[0] = (u64) vdev->devcmd2->results_ring.base_addr |
+ VNIC_PADDR_TARGET;
+ vdev->args[1] = DEVCMD2_RING_SIZE;
+
+ err = vnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, 1000);
+ if (err)
+ goto err_free_desc_ring;
+
+ vdev->devcmd_rtn = &vnic_dev_cmd2;
+
+ return 0;
+
+err_free_desc_ring:
+ vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
+err_free_wq:
+ vnic_wq_disable(&vdev->devcmd2->wq);
+ vnic_wq_free(&vdev->devcmd2->wq);
+err_free_devcmd2:
+ kfree(vdev->devcmd2);
+ vdev->devcmd2 = NULL;
+
+ return err;
+}
+
+
+void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
+{
+ vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
+ vnic_wq_disable(&vdev->devcmd2->wq);
+ vnic_wq_free(&vdev->devcmd2->wq);
+ kfree(vdev->devcmd2);
+ vdev->devcmd2 = NULL;
+ vdev->devcmd_rtn = &vnic_dev_cmd1;
+}
+
+
+int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
+ enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
+{
+ int err;
+
+ vdev->args[0] = *a0;
+ vdev->args[1] = *a1;
+
+ err = (*vdev->devcmd_rtn)(vdev, cmd, wait);
+
+ *a0 = vdev->args[0];
+ *a1 = vdev->args[1];
+
+ return err;
+}
+
+
+int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ u64 *a0, u64 *a1, int wait)
+{
+ memset(vdev->args, 0, sizeof(vdev->args));
+
+ switch (vdev->proxy) {
+ case PROXY_NONE:
+ default:
+ return vnic_dev_cmd_no_proxy(vdev, cmd, a0, a1, wait);
+ }
+}
+
+
int vnic_dev_fw_info(struct vnic_dev *vdev,
struct vnic_devcmd_fw_info **fw_info)
{
@@ -664,6 +907,8 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_fw_info),
vdev->fw_info, vdev->fw_info_pa);
+ if (vdev->devcmd2)
+ vnic_dev_deinit_devcmd2(vdev);
kfree(vdev);
}
}
@@ -683,13 +928,26 @@ struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
if (vnic_dev_discover_res(vdev, bar))
goto err_out;
- vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
- if (!vdev->devcmd)
- goto err_out;
-
return vdev;
err_out:
vnic_dev_unregister(vdev);
return NULL;
}
+
+int vnic_dev_cmd_init(struct vnic_dev *vdev)
+{
+ int err;
+ void *p;
+
+ p = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
+ if (p) {
+ pr_err("fnic: DEVCMD2 resource found!\n");
+ err = vnic_dev_init_devcmd2(vdev);
+ } else {
+ pr_err("fnic: DEVCMD2 not found, fall back to Devcmd\n");
+ err = vnic_dev_init_devcmd1(vdev);
+ }
+
+ return err;
+}
diff --git a/drivers/scsi/fnic/vnic_dev.h b/drivers/scsi/fnic/vnic_dev.h
index 40d4195f562b..ef5309a5df5d 100644
--- a/drivers/scsi/fnic/vnic_dev.h
+++ b/drivers/scsi/fnic/vnic_dev.h
@@ -36,6 +36,7 @@
#define vnic_dev_fw_info fnic_dev_fw_info
#define vnic_dev_spec fnic_dev_spec
#define vnic_dev_stats_clear fnic_dev_stats_clear
+#define vnic_dev_cmd_init fnic_dev_cmd_init
#define vnic_dev_stats_dump fnic_dev_stats_dump
#define vnic_dev_hang_notify fnic_dev_hang_notify
#define vnic_dev_packet_filter fnic_dev_packet_filter
@@ -128,6 +129,7 @@ int vnic_dev_fw_info(struct vnic_dev *vdev,
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset,
unsigned int size, void *value);
int vnic_dev_stats_clear(struct vnic_dev *vdev);
+int vnic_dev_cmd_init(struct vnic_dev *vdev);
int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
int vnic_dev_hang_notify(struct vnic_dev *vdev);
void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
diff --git a/drivers/scsi/fnic/vnic_devcmd.h b/drivers/scsi/fnic/vnic_devcmd.h
index 3e2fcbda6aed..c5dde556dc7c 100644
--- a/drivers/scsi/fnic/vnic_devcmd.h
+++ b/drivers/scsi/fnic/vnic_devcmd.h
@@ -170,7 +170,8 @@ enum vnic_devcmd_cmd {
/* variant of CMD_INIT, with provisioning info
* (u64)a0=paddr of vnic_devcmd_provinfo
- * (u32)a1=sizeof provision info */
+ * (u32)a1=sizeof provision info
+ */
CMD_INIT_PROV_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 27),
/* enable virtual link */
@@ -262,12 +263,132 @@ enum vnic_devcmd_cmd {
* non-zero for resetting vlan to the default
* out: (u16)a0=old default vlan
*/
- CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46)
+ CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46),
+
+ /* init_prov_info2:
+ * Variant of CMD_INIT_PROV_INFO, where it will not try to enable
+ * the vnic until CMD_ENABLE2 is issued.
+ * (u64)a0=paddr of vnic_devcmd_provinfo
+ * (u32)a1=sizeof provision info
+ */
+ CMD_INIT_PROV_INFO2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 47),
+
+ /* enable2:
+ * (u32)a0=0 ==> standby
+ * =CMD_ENABLE2_ACTIVE ==> active
+ */
+ CMD_ENABLE2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 48),
+
+ /*
+ * cmd_status:
+ * Returns the status of the specified command
+ * Input:
+ * a0 = command for which status is being queried.
+ * Possible values are:
+ * CMD_SOFT_RESET
+ * CMD_HANG_RESET
+ * CMD_OPEN
+ * CMD_INIT
+ * CMD_INIT_PROV_INFO
+ * CMD_DEINIT
+ * CMD_INIT_PROV_INFO2
+ * CMD_ENABLE2
+ * Output:
+ * if status == STAT_ERROR
+ * a0 = ERR_ENOTSUPPORTED - status for command in a0 is
+ * not supported
+ * if status == STAT_NONE
+ * a0 = status of the devcmd specified in a0 as follows.
+ * ERR_SUCCESS - command in a0 completed successfully
+ * ERR_EINPROGRESS - command in a0 is still in progress
+ */
+ CMD_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 49),
+
+ /*
+ * Returns interrupt coalescing timer conversion factors.
+ * After calling this devcmd, ENIC driver can convert
+ * interrupt coalescing timer in usec into CPU cycles as follows:
+ *
+ * intr_timer_cycles = intr_timer_usec * multiplier / divisor
+ *
+ * Interrupt coalescing timer in usecs can be be converted/obtained
+ * from CPU cycles as follows:
+ *
+ * intr_timer_usec = intr_timer_cycles * divisor / multiplier
+ *
+ * in: none
+ * out: (u32)a0 = multiplier
+ * (u32)a1 = divisor
+ * (u32)a2 = maximum timer value in usec
+ */
+ CMD_INTR_COAL_CONVERT = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 50),
+
+ /*
+ * ISCSI DUMP API:
+ * in: (u64)a0=paddr of the param or param itself
+ * (u32)a1=ISCSI_CMD_xxx
+ */
+ CMD_ISCSI_DUMP_REQ = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 51),
+
+ /*
+ * ISCSI DUMP STATUS API:
+ * in: (u32)a0=cmd tag
+ * in: (u32)a1=ISCSI_CMD_xxx
+ * out: (u32)a0=cmd status
+ */
+ CMD_ISCSI_DUMP_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 52),
+
+ /*
+ * Subvnic migration from MQ <--> VF.
+ * Enable the LIF migration from MQ to VF and vice versa. MQ and VF
+ * indexes are statically bound at the time of initialization.
+ * Based on the
+ * direction of migration, the resources of either MQ or the VF shall
+ * be attached to the LIF.
+ * in: (u32)a0=Direction of Migration
+ * 0=> Migrate to VF
+ * 1=> Migrate to MQ
+ * (u32)a1=VF index (MQ index)
+ */
+ CMD_MIGRATE_SUBVNIC = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 53),
+
+ /*
+ * Register / Deregister the notification block for MQ subvnics
+ * in:
+ * (u64)a0=paddr to notify (set paddr=0 to unset)
+ * (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify)
+ * (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr)
+ * out:
+ * (u32)a1 = effective size
+ */
+ CMD_SUBVNIC_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 54),
+
+ /*
+ * Set the predefined mac address as default
+ * in:
+ * (u48)a0=mac addr
+ */
+ CMD_SET_MAC_ADDR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 55),
+
+ /* Update the provisioning info of the given VIF
+ * (u64)a0=paddr of vnic_devcmd_provinfo
+ * (u32)a1=sizeof provision info
+ */
+ CMD_PROV_INFO_UPDATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 56),
+
+ /*
+ * Initialization for the devcmd2 interface.
+ * in: (u64) a0=host result buffer physical address
+ * in: (u16) a1=number of entries in result buffer
+ */
+ CMD_INITIALIZE_DEVCMD2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 57)
};
/* flags for CMD_OPEN */
#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */
+#define CMD_OPENF_RQ_ENABLE_THEN_POST 0x2
+
/* flags for CMD_INIT */
#define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */
@@ -345,4 +466,39 @@ struct vnic_devcmd {
u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */
};
+/*
+ * Version 2 of the interface.
+ *
+ * Some things are carried over, notably the vnic_devcmd_cmd enum.
+ */
+
+/*
+ * Flags for vnic_devcmd2.flags
+ */
+
+#define DEVCMD2_FNORESULT 0x1 /* Don't copy result to host */
+
+#define VNIC_DEVCMD2_NARGS VNIC_DEVCMD_NARGS
+
+struct vnic_devcmd2 {
+ u16 pad;
+ u16 flags;
+ u32 cmd; /* same command #defines as original */
+ u64 args[VNIC_DEVCMD2_NARGS];
+};
+
+#define VNIC_DEVCMD2_NRESULTS VNIC_DEVCMD_NARGS
+struct devcmd2_result {
+ u64 results[VNIC_DEVCMD2_NRESULTS];
+ u32 pad;
+ u16 completed_index; /* into copy WQ */
+ u8 error; /* same error codes as original */
+ u8 color; /* 0 or 1 as with completion queues */
+};
+
+#define DEVCMD2_RING_SIZE 32
+#define DEVCMD2_DESC_SIZE 128
+
+#define DEVCMD2_RESULTS_SIZE_MAX ((1 << 16) - 1)
+
#endif /* _VNIC_DEVCMD_H_ */
diff --git a/drivers/scsi/fnic/vnic_resource.h b/drivers/scsi/fnic/vnic_resource.h
index 2d842f79d41a..7c6163f73bd3 100644
--- a/drivers/scsi/fnic/vnic_resource.h
+++ b/drivers/scsi/fnic/vnic_resource.h
@@ -41,6 +41,13 @@ enum vnic_res_type {
RES_TYPE_RSVD7,
RES_TYPE_DEVCMD, /* Device command region */
RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */
+ RES_TYPE_SUBVNIC, /* subvnic resource type */
+ RES_TYPE_MQ_WQ, /* MQ Work queues */
+ RES_TYPE_MQ_RQ, /* MQ Receive queues */
+ RES_TYPE_MQ_CQ, /* MQ Completion queues */
+ RES_TYPE_DEPRECATED1, /* Old version of devcmd 2 */
+ RES_TYPE_DEPRECATED2, /* Old version of devcmd 2 */
+ RES_TYPE_DEVCMD2, /* Device control region */
RES_TYPE_MAX, /* Count of resource types */
};
diff --git a/drivers/scsi/fnic/vnic_rq.c b/drivers/scsi/fnic/vnic_rq.c
index fd2068f5ae16..6a35b1be0032 100644
--- a/drivers/scsi/fnic/vnic_rq.c
+++ b/drivers/scsi/fnic/vnic_rq.c
@@ -27,12 +27,9 @@
static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
{
struct vnic_rq_buf *buf;
- struct vnic_dev *vdev;
unsigned int i, j, count = rq->ring.desc_count;
unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
- vdev = rq->vdev;
-
for (i = 0; i < blks; i++) {
rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC);
if (!rq->bufs[i]) {
@@ -171,7 +168,7 @@ void vnic_rq_clean(struct vnic_rq *rq,
struct vnic_rq_buf *buf;
u32 fetch_index;
- BUG_ON(ioread32(&rq->ctrl->enable));
+ WARN_ON(ioread32(&rq->ctrl->enable));
buf = rq->to_clean;
diff --git a/drivers/scsi/fnic/vnic_wq.c b/drivers/scsi/fnic/vnic_wq.c
index a414135460db..015af2cdabaf 100644
--- a/drivers/scsi/fnic/vnic_wq.c
+++ b/drivers/scsi/fnic/vnic_wq.c
@@ -24,15 +24,32 @@
#include "vnic_dev.h"
#include "vnic_wq.h"
+
+int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq,
+ unsigned int index, enum vnic_res_type res_type)
+{
+ wq->ctrl = vnic_dev_get_res(vdev, res_type, index);
+
+ if (!wq->ctrl)
+ return -EINVAL;
+
+ return 0;
+}
+
+
+int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq,
+ unsigned int desc_count, unsigned int desc_size)
+{
+ return vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
+}
+
+
static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
{
struct vnic_wq_buf *buf;
- struct vnic_dev *vdev;
unsigned int i, j, count = wq->ring.desc_count;
unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
- vdev = wq->vdev;
-
for (i = 0; i < blks; i++) {
wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC);
if (!wq->bufs[i]) {
@@ -111,6 +128,52 @@ int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
return 0;
}
+
+int vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
+ unsigned int desc_count, unsigned int desc_size)
+{
+ int err;
+
+ wq->index = 0;
+ wq->vdev = vdev;
+
+ err = vnic_wq_get_ctrl(vdev, wq, 0, RES_TYPE_DEVCMD2);
+ if (err) {
+ pr_err("Failed to get devcmd2 resource\n");
+ return err;
+ }
+ vnic_wq_disable(wq);
+
+ err = vnic_wq_alloc_ring(vdev, wq, desc_count, desc_size);
+ if (err)
+ return err;
+ return 0;
+}
+
+void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
+ unsigned int fetch_index, unsigned int posted_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset)
+{
+ u64 paddr;
+ unsigned int count = wq->ring.desc_count;
+
+ paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
+ writeq(paddr, &wq->ctrl->ring_base);
+ iowrite32(count, &wq->ctrl->ring_size);
+ iowrite32(fetch_index, &wq->ctrl->fetch_index);
+ iowrite32(posted_index, &wq->ctrl->posted_index);
+ iowrite32(cq_index, &wq->ctrl->cq_index);
+ iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
+ iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
+ iowrite32(0, &wq->ctrl->error_status);
+
+ wq->to_use = wq->to_clean =
+ &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES]
+ [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES];
+}
+
+
void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
diff --git a/drivers/scsi/fnic/vnic_wq.h b/drivers/scsi/fnic/vnic_wq.h
index 5cd094f79281..5d1e0a44d94a 100644
--- a/drivers/scsi/fnic/vnic_wq.h
+++ b/drivers/scsi/fnic/vnic_wq.h
@@ -33,6 +33,8 @@
#define vnic_wq_service fnic_wq_service
#define vnic_wq_free fnic_wq_free
#define vnic_wq_alloc fnic_wq_alloc
+#define vnic_wq_devcmd2_alloc fnic_wq_devcmd2_alloc
+#define vnic_wq_init_start fnic_wq_init_start
#define vnic_wq_init fnic_wq_init
#define vnic_wq_error_status fnic_wq_error_status
#define vnic_wq_enable fnic_wq_enable
@@ -163,6 +165,12 @@ static inline void vnic_wq_service(struct vnic_wq *wq,
void vnic_wq_free(struct vnic_wq *wq);
int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
unsigned int desc_count, unsigned int desc_size);
+int vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
+ unsigned int desc_count, unsigned int desc_size);
+void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
+ unsigned int fetch_index, unsigned int posted_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset);
void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset);
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 194c294f9b6c..e7f1dd4f3b66 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -1,6 +1,6 @@
/************************************************************************
* Linux driver for *
- * ICP vortex GmbH: GDT ISA/EISA/PCI Disk Array Controllers *
+ * ICP vortex GmbH: GDT PCI Disk Array Controllers *
* Intel Corporation: Storage RAID Controllers *
* *
* gdth.c *
@@ -32,15 +32,10 @@
************************************************************************/
/* All GDT Disk Array Controllers are fully supported by this driver.
- * This includes the PCI/EISA/ISA SCSI Disk Array Controllers and the
+ * This includes the PCI SCSI Disk Array Controllers and the
* PCI Fibre Channel Disk Array Controllers. See gdth.h for a complete
* list of all controller types.
*
- * If you have one or more GDT3000/3020 EISA controllers with
- * controller BIOS disabled, you have to set the IRQ values with the
- * command line option "gdth=irq1,irq2,...", where the irq1,irq2,... are
- * the IRQ values for the EISA controllers.
- *
* After the optional list of IRQ values, other possible
* command line options are:
* disable:Y disable driver
@@ -61,14 +56,12 @@
* access a shared resource from several nodes,
* appropriate controller firmware required
* shared_access:N enable driver reserve/release protocol
- * probe_eisa_isa:Y scan for EISA/ISA controllers
- * probe_eisa_isa:N do not scan for EISA/ISA controllers
* force_dma32:Y use only 32 bit DMA mode
* force_dma32:N use 64 bit DMA mode, if supported
*
* The default values are: "gdth=disable:N,reserve_mode:1,reverse_scan:N,
* max_ids:127,rescan:N,hdr_channel:0,
- * shared_access:Y,probe_eisa_isa:N,force_dma32:N".
+ * shared_access:Y,force_dma32:N".
* Here is another example: "gdth=reserve_list:0,1,2,0,0,1,3,0,rescan:Y".
*
* When loading the gdth driver as a module, the same options are available.
@@ -79,7 +72,7 @@
*
* Default: "modprobe gdth disable=0 reserve_mode=1 reverse_scan=0
* max_ids=127 rescan=0 hdr_channel=0 shared_access=0
- * probe_eisa_isa=0 force_dma32=0"
+ * force_dma32=0"
* The other example: "modprobe gdth reserve_list=0,1,2,0,0,1,3,0 rescan=1".
*/
@@ -96,10 +89,6 @@
* phase: unused
*/
-
-/* interrupt coalescing */
-/* #define INT_COAL */
-
/* statistics */
#define GDTH_STATISTICS
@@ -122,10 +111,6 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-
-#ifdef GDTH_RTC
-#include <linux/mc146818rtc.h>
-#endif
#include <linux/reboot.h>
#include <asm/dma.h>
@@ -192,79 +177,9 @@ static void gdth_scsi_done(struct scsi_cmnd *scp);
#ifdef DEBUG_GDTH
static u8 DebugState = DEBUG_GDTH;
-
-#ifdef __SERIAL__
-#define MAX_SERBUF 160
-static void ser_init(void);
-static void ser_puts(char *str);
-static void ser_putc(char c);
-static int ser_printk(const char *fmt, ...);
-static char strbuf[MAX_SERBUF+1];
-#ifdef __COM2__
-#define COM_BASE 0x2f8
-#else
-#define COM_BASE 0x3f8
-#endif
-static void ser_init()
-{
- unsigned port=COM_BASE;
-
- outb(0x80,port+3);
- outb(0,port+1);
- /* 19200 Baud, if 9600: outb(12,port) */
- outb(6, port);
- outb(3,port+3);
- outb(0,port+1);
- /*
- ser_putc('I');
- ser_putc(' ');
- */
-}
-
-static void ser_puts(char *str)
-{
- char *ptr;
-
- ser_init();
- for (ptr=str;*ptr;++ptr)
- ser_putc(*ptr);
-}
-
-static void ser_putc(char c)
-{
- unsigned port=COM_BASE;
-
- while ((inb(port+5) & 0x20)==0);
- outb(c,port);
- if (c==0x0a)
- {
- while ((inb(port+5) & 0x20)==0);
- outb(0x0d,port);
- }
-}
-
-static int ser_printk(const char *fmt, ...)
-{
- va_list args;
- int i;
-
- va_start(args,fmt);
- i = vsprintf(strbuf,fmt,args);
- ser_puts(strbuf);
- va_end(args);
- return i;
-}
-
-#define TRACE(a) {if (DebugState==1) {ser_printk a;}}
-#define TRACE2(a) {if (DebugState==1 || DebugState==2) {ser_printk a;}}
-#define TRACE3(a) {if (DebugState!=0) {ser_printk a;}}
-
-#else /* !__SERIAL__ */
#define TRACE(a) {if (DebugState==1) {printk a;}}
#define TRACE2(a) {if (DebugState==1 || DebugState==2) {printk a;}}
#define TRACE3(a) {if (DebugState!=0) {printk a;}}
-#endif
-
#else /* !DEBUG */
#define TRACE(a)
#define TRACE2(a)
@@ -273,9 +188,6 @@ static int ser_printk(const char *fmt, ...)
#ifdef GDTH_STATISTICS
static u32 max_rq=0, max_index=0, max_sg=0;
-#ifdef INT_COAL
-static u32 max_int_coal=0;
-#endif
static u32 act_ints=0, act_ios=0, act_stats=0, act_rq=0;
static struct timer_list gdth_timer;
#endif
@@ -286,12 +198,6 @@ static struct timer_list gdth_timer;
#define BUS_L2P(a,b) ((b)>(a)->virt_bus ? (b-1):(b))
-#ifdef CONFIG_ISA
-static u8 gdth_drq_tab[4] = {5,6,7,7}; /* DRQ table */
-#endif
-#if defined(CONFIG_EISA) || defined(CONFIG_ISA)
-static u8 gdth_irq_tab[6] = {0,10,11,12,14,0}; /* IRQ table */
-#endif
static u8 gdth_polling; /* polling if TRUE */
static int gdth_ctr_count = 0; /* controller count */
static LIST_HEAD(gdth_instances); /* controller list */
@@ -325,10 +231,6 @@ static u8 gdth_direction_tab[0x100] = {
};
/* LILO and modprobe/insmod parameters */
-/* IRQ list for GDT3000/3020 EISA controllers */
-static int irq[MAXHA] __initdata =
-{0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
- 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff};
/* disable driver flag */
static int disable __initdata = 0;
/* reserve flag */
@@ -348,13 +250,10 @@ static int max_ids = MAXID;
static int rescan = 0;
/* shared access */
static int shared_access = 1;
-/* enable support for EISA and ISA controllers */
-static int probe_eisa_isa = 0;
/* 64 bit DMA mode, support for drives > 2 TB, if force_dma32 = 0 */
static int force_dma32 = 0;
/* parameters for modprobe/insmod */
-module_param_hw_array(irq, int, irq, NULL, 0);
module_param(disable, int, 0);
module_param(reserve_mode, int, 0);
module_param_array(reserve_list, int, NULL, 0);
@@ -363,7 +262,6 @@ module_param(hdr_channel, int, 0);
module_param(max_ids, int, 0);
module_param(rescan, int, 0);
module_param(shared_access, int, 0);
-module_param(probe_eisa_isa, int, 0);
module_param(force_dma32, int, 0);
MODULE_AUTHOR("Achim Leubner");
MODULE_LICENSE("GPL");
@@ -515,45 +413,6 @@ static void gdth_eval_mapping(u32 size, u32 *cyls, int *heads, int *secs)
}
}
-/* controller search and initialization functions */
-#ifdef CONFIG_EISA
-static int __init gdth_search_eisa(u16 eisa_adr)
-{
- u32 id;
-
- TRACE(("gdth_search_eisa() adr. %x\n",eisa_adr));
- id = inl(eisa_adr+ID0REG);
- if (id == GDT3A_ID || id == GDT3B_ID) { /* GDT3000A or GDT3000B */
- if ((inb(eisa_adr+EISAREG) & 8) == 0)
- return 0; /* not EISA configured */
- return 1;
- }
- if (id == GDT3_ID) /* GDT3000 */
- return 1;
-
- return 0;
-}
-#endif /* CONFIG_EISA */
-
-#ifdef CONFIG_ISA
-static int __init gdth_search_isa(u32 bios_adr)
-{
- void __iomem *addr;
- u32 id;
-
- TRACE(("gdth_search_isa() bios adr. %x\n",bios_adr));
- if ((addr = ioremap(bios_adr+BIOS_ID_OFFS, sizeof(u32))) != NULL) {
- id = readl(addr);
- iounmap(addr);
- if (id == GDT2_ID) /* GDT2000 */
- return 1;
- }
- return 0;
-}
-#endif /* CONFIG_ISA */
-
-#ifdef CONFIG_PCI
-
static bool gdth_search_vortex(u16 device)
{
if (device <= PCI_DEVICE_ID_VORTEX_GDT6555)
@@ -656,204 +515,7 @@ static int gdth_pci_init_one(struct pci_dev *pdev,
return 0;
}
-#endif /* CONFIG_PCI */
-#ifdef CONFIG_EISA
-static int __init gdth_init_eisa(u16 eisa_adr,gdth_ha_str *ha)
-{
- u32 retries,id;
- u8 prot_ver,eisacf,i,irq_found;
-
- TRACE(("gdth_init_eisa() adr. %x\n",eisa_adr));
-
- /* disable board interrupts, deinitialize services */
- outb(0xff,eisa_adr+EDOORREG);
- outb(0x00,eisa_adr+EDENABREG);
- outb(0x00,eisa_adr+EINTENABREG);
-
- outb(0xff,eisa_adr+LDOORREG);
- retries = INIT_RETRIES;
- gdth_delay(20);
- while (inb(eisa_adr+EDOORREG) != 0xff) {
- if (--retries == 0) {
- printk("GDT-EISA: Initialization error (DEINIT failed)\n");
- return 0;
- }
- gdth_delay(1);
- TRACE2(("wait for DEINIT: retries=%d\n",retries));
- }
- prot_ver = inb(eisa_adr+MAILBOXREG);
- outb(0xff,eisa_adr+EDOORREG);
- if (prot_ver != PROTOCOL_VERSION) {
- printk("GDT-EISA: Illegal protocol version\n");
- return 0;
- }
- ha->bmic = eisa_adr;
- ha->brd_phys = (u32)eisa_adr >> 12;
-
- outl(0,eisa_adr+MAILBOXREG);
- outl(0,eisa_adr+MAILBOXREG+4);
- outl(0,eisa_adr+MAILBOXREG+8);
- outl(0,eisa_adr+MAILBOXREG+12);
-
- /* detect IRQ */
- if ((id = inl(eisa_adr+ID0REG)) == GDT3_ID) {
- ha->oem_id = OEM_ID_ICP;
- ha->type = GDT_EISA;
- ha->stype = id;
- outl(1,eisa_adr+MAILBOXREG+8);
- outb(0xfe,eisa_adr+LDOORREG);
- retries = INIT_RETRIES;
- gdth_delay(20);
- while (inb(eisa_adr+EDOORREG) != 0xfe) {
- if (--retries == 0) {
- printk("GDT-EISA: Initialization error (get IRQ failed)\n");
- return 0;
- }
- gdth_delay(1);
- }
- ha->irq = inb(eisa_adr+MAILBOXREG);
- outb(0xff,eisa_adr+EDOORREG);
- TRACE2(("GDT3000/3020: IRQ=%d\n",ha->irq));
- /* check the result */
- if (ha->irq == 0) {
- TRACE2(("Unknown IRQ, use IRQ table from cmd line !\n"));
- for (i = 0, irq_found = FALSE;
- i < MAXHA && irq[i] != 0xff; ++i) {
- if (irq[i]==10 || irq[i]==11 || irq[i]==12 || irq[i]==14) {
- irq_found = TRUE;
- break;
- }
- }
- if (irq_found) {
- ha->irq = irq[i];
- irq[i] = 0;
- printk("GDT-EISA: Can not detect controller IRQ,\n");
- printk("Use IRQ setting from command line (IRQ = %d)\n",
- ha->irq);
- } else {
- printk("GDT-EISA: Initialization error (unknown IRQ), Enable\n");
- printk("the controller BIOS or use command line parameters\n");
- return 0;
- }
- }
- } else {
- eisacf = inb(eisa_adr+EISAREG) & 7;
- if (eisacf > 4) /* level triggered */
- eisacf -= 4;
- ha->irq = gdth_irq_tab[eisacf];
- ha->oem_id = OEM_ID_ICP;
- ha->type = GDT_EISA;
- ha->stype = id;
- }
-
- ha->dma64_support = 0;
- return 1;
-}
-#endif /* CONFIG_EISA */
-
-#ifdef CONFIG_ISA
-static int __init gdth_init_isa(u32 bios_adr,gdth_ha_str *ha)
-{
- register gdt2_dpram_str __iomem *dp2_ptr;
- int i;
- u8 irq_drq,prot_ver;
- u32 retries;
-
- TRACE(("gdth_init_isa() bios adr. %x\n",bios_adr));
-
- ha->brd = ioremap(bios_adr, sizeof(gdt2_dpram_str));
- if (ha->brd == NULL) {
- printk("GDT-ISA: Initialization error (DPMEM remap error)\n");
- return 0;
- }
- dp2_ptr = ha->brd;
- writeb(1, &dp2_ptr->io.memlock); /* switch off write protection */
- /* reset interface area */
- memset_io(&dp2_ptr->u, 0, sizeof(dp2_ptr->u));
- if (readl(&dp2_ptr->u) != 0) {
- printk("GDT-ISA: Initialization error (DPMEM write error)\n");
- iounmap(ha->brd);
- return 0;
- }
-
- /* disable board interrupts, read DRQ and IRQ */
- writeb(0xff, &dp2_ptr->io.irqdel);
- writeb(0x00, &dp2_ptr->io.irqen);
- writeb(0x00, &dp2_ptr->u.ic.S_Status);
- writeb(0x00, &dp2_ptr->u.ic.Cmd_Index);
-
- irq_drq = readb(&dp2_ptr->io.rq);
- for (i=0; i<3; ++i) {
- if ((irq_drq & 1)==0)
- break;
- irq_drq >>= 1;
- }
- ha->drq = gdth_drq_tab[i];
-
- irq_drq = readb(&dp2_ptr->io.rq) >> 3;
- for (i=1; i<5; ++i) {
- if ((irq_drq & 1)==0)
- break;
- irq_drq >>= 1;
- }
- ha->irq = gdth_irq_tab[i];
-
- /* deinitialize services */
- writel(bios_adr, &dp2_ptr->u.ic.S_Info[0]);
- writeb(0xff, &dp2_ptr->u.ic.S_Cmd_Indx);
- writeb(0, &dp2_ptr->io.event);
- retries = INIT_RETRIES;
- gdth_delay(20);
- while (readb(&dp2_ptr->u.ic.S_Status) != 0xff) {
- if (--retries == 0) {
- printk("GDT-ISA: Initialization error (DEINIT failed)\n");
- iounmap(ha->brd);
- return 0;
- }
- gdth_delay(1);
- }
- prot_ver = (u8)readl(&dp2_ptr->u.ic.S_Info[0]);
- writeb(0, &dp2_ptr->u.ic.Status);
- writeb(0xff, &dp2_ptr->io.irqdel);
- if (prot_ver != PROTOCOL_VERSION) {
- printk("GDT-ISA: Illegal protocol version\n");
- iounmap(ha->brd);
- return 0;
- }
-
- ha->oem_id = OEM_ID_ICP;
- ha->type = GDT_ISA;
- ha->ic_all_size = sizeof(dp2_ptr->u);
- ha->stype= GDT2_ID;
- ha->brd_phys = bios_adr >> 4;
-
- /* special request to controller BIOS */
- writel(0x00, &dp2_ptr->u.ic.S_Info[0]);
- writel(0x00, &dp2_ptr->u.ic.S_Info[1]);
- writel(0x01, &dp2_ptr->u.ic.S_Info[2]);
- writel(0x00, &dp2_ptr->u.ic.S_Info[3]);
- writeb(0xfe, &dp2_ptr->u.ic.S_Cmd_Indx);
- writeb(0, &dp2_ptr->io.event);
- retries = INIT_RETRIES;
- gdth_delay(20);
- while (readb(&dp2_ptr->u.ic.S_Status) != 0xfe) {
- if (--retries == 0) {
- printk("GDT-ISA: Initialization error\n");
- iounmap(ha->brd);
- return 0;
- }
- gdth_delay(1);
- }
- writeb(0, &dp2_ptr->u.ic.Status);
- writeb(0xff, &dp2_ptr->io.irqdel);
-
- ha->dma64_support = 0;
- return 1;
-}
-#endif /* CONFIG_ISA */
-
-#ifdef CONFIG_PCI
static int gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
gdth_ha_str *ha)
{
@@ -1228,30 +890,19 @@ static int gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
return 1;
}
-#endif /* CONFIG_PCI */
/* controller protocol functions */
static void gdth_enable_int(gdth_ha_str *ha)
{
unsigned long flags;
- gdt2_dpram_str __iomem *dp2_ptr;
gdt6_dpram_str __iomem *dp6_ptr;
gdt6m_dpram_str __iomem *dp6m_ptr;
TRACE(("gdth_enable_int() hanum %d\n",ha->hanum));
spin_lock_irqsave(&ha->smp_lock, flags);
- if (ha->type == GDT_EISA) {
- outb(0xff, ha->bmic + EDOORREG);
- outb(0xff, ha->bmic + EDENABREG);
- outb(0x01, ha->bmic + EINTENABREG);
- } else if (ha->type == GDT_ISA) {
- dp2_ptr = ha->brd;
- writeb(1, &dp2_ptr->io.irqdel);
- writeb(0, &dp2_ptr->u.ic.Cmd_Index);
- writeb(1, &dp2_ptr->io.irqen);
- } else if (ha->type == GDT_PCI) {
+ if (ha->type == GDT_PCI) {
dp6_ptr = ha->brd;
writeb(1, &dp6_ptr->io.irqdel);
writeb(0, &dp6_ptr->u.ic.Cmd_Index);
@@ -1275,12 +926,7 @@ static u8 gdth_get_status(gdth_ha_str *ha)
TRACE(("gdth_get_status() irq %d ctr_count %d\n", ha->irq, gdth_ctr_count));
- if (ha->type == GDT_EISA)
- IStatus = inb((u16)ha->bmic + EDOORREG);
- else if (ha->type == GDT_ISA)
- IStatus =
- readb(&((gdt2_dpram_str __iomem *)ha->brd)->u.ic.Cmd_Index);
- else if (ha->type == GDT_PCI)
+ if (ha->type == GDT_PCI)
IStatus =
readb(&((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Cmd_Index);
else if (ha->type == GDT_PCINEW)
@@ -1298,11 +944,7 @@ static int gdth_test_busy(gdth_ha_str *ha)
TRACE(("gdth_test_busy() hanum %d\n", ha->hanum));
- if (ha->type == GDT_EISA)
- gdtsema0 = (int)inb(ha->bmic + SEMA0REG);
- else if (ha->type == GDT_ISA)
- gdtsema0 = (int)readb(&((gdt2_dpram_str __iomem *)ha->brd)->u.ic.Sema0);
- else if (ha->type == GDT_PCI)
+ if (ha->type == GDT_PCI)
gdtsema0 = (int)readb(&((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Sema0);
else if (ha->type == GDT_PCINEW)
gdtsema0 = (int)inb(PTR2USHORT(&ha->plx->sema0_reg));
@@ -1336,11 +978,7 @@ static void gdth_set_sema0(gdth_ha_str *ha)
{
TRACE(("gdth_set_sema0() hanum %d\n", ha->hanum));
- if (ha->type == GDT_EISA) {
- outb(1, ha->bmic + SEMA0REG);
- } else if (ha->type == GDT_ISA) {
- writeb(1, &((gdt2_dpram_str __iomem *)ha->brd)->u.ic.Sema0);
- } else if (ha->type == GDT_PCI) {
+ if (ha->type == GDT_PCI) {
writeb(1, &((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Sema0);
} else if (ha->type == GDT_PCINEW) {
outb(1, PTR2USHORT(&ha->plx->sema0_reg));
@@ -1356,7 +994,6 @@ static void gdth_copy_command(gdth_ha_str *ha)
register gdt6m_dpram_str __iomem *dp6m_ptr;
register gdt6c_dpram_str __iomem *dp6c_ptr;
gdt6_dpram_str __iomem *dp6_ptr;
- gdt2_dpram_str __iomem *dp2_ptr;
u16 cp_count,dp_offset,cmd_no;
TRACE(("gdth_copy_command() hanum %d\n", ha->hanum));
@@ -1367,8 +1004,6 @@ static void gdth_copy_command(gdth_ha_str *ha)
cmd_ptr = ha->pccb;
++ha->cmd_cnt;
- if (ha->type == GDT_EISA)
- return; /* no DPMEM, no copy */
/* set cpcount dword aligned */
if (cp_count & 3)
@@ -1377,14 +1012,7 @@ static void gdth_copy_command(gdth_ha_str *ha)
ha->cmd_offs_dpmem += cp_count;
/* set offset and service, copy command to DPMEM */
- if (ha->type == GDT_ISA) {
- dp2_ptr = ha->brd;
- writew(dp_offset + DPMEM_COMMAND_OFFSET,
- &dp2_ptr->u.ic.comm_queue[cmd_no].offset);
- writew((u16)cmd_ptr->Service,
- &dp2_ptr->u.ic.comm_queue[cmd_no].serv_id);
- memcpy_toio(&dp2_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
- } else if (ha->type == GDT_PCI) {
+ if (ha->type == GDT_PCI) {
dp6_ptr = ha->brd;
writew(dp_offset + DPMEM_COMMAND_OFFSET,
&dp6_ptr->u.ic.comm_queue[cmd_no].offset);
@@ -1430,13 +1058,7 @@ static void gdth_release_event(gdth_ha_str *ha)
if (ha->pccb->OpCode == GDT_INIT)
ha->pccb->Service |= 0x80;
- if (ha->type == GDT_EISA) {
- if (ha->pccb->OpCode == GDT_INIT) /* store DMA buffer */
- outl(ha->ccb_phys, ha->bmic + MAILBOXREG);
- outb(ha->pccb->Service, ha->bmic + LDOORREG);
- } else if (ha->type == GDT_ISA) {
- writeb(0, &((gdt2_dpram_str __iomem *)ha->brd)->io.event);
- } else if (ha->type == GDT_PCI) {
+ if (ha->type == GDT_PCI) {
writeb(0, &((gdt6_dpram_str __iomem *)ha->brd)->io.event);
} else if (ha->type == GDT_PCINEW) {
outb(1, PTR2USHORT(&ha->plx->ldoor_reg));
@@ -1560,15 +1182,7 @@ static int gdth_search_drives(gdth_ha_str *ha)
gdth_arcdl_str *alst;
gdth_alist_str *alst2;
gdth_oem_str_ioctl *oemstr;
-#ifdef INT_COAL
- gdth_perf_modes *pmod;
-#endif
-#ifdef GDTH_RTC
- u8 rtc[12];
- unsigned long flags;
-#endif
-
TRACE(("gdth_search_drives() hanum %d\n", ha->hanum));
ok = 0;
@@ -1588,29 +1202,6 @@ static int gdth_search_drives(gdth_ha_str *ha)
}
TRACE2(("gdth_search_drives(): SCREENSERVICE initialized\n"));
-#ifdef GDTH_RTC
- /* read realtime clock info, send to controller */
- /* 1. wait for the falling edge of update flag */
- spin_lock_irqsave(&rtc_lock, flags);
- for (j = 0; j < 1000000; ++j)
- if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)
- break;
- for (j = 0; j < 1000000; ++j)
- if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
- break;
- /* 2. read info */
- do {
- for (j = 0; j < 12; ++j)
- rtc[j] = CMOS_READ(j);
- } while (rtc[0] != CMOS_READ(0));
- spin_unlock_irqrestore(&rtc_lock, flags);
- TRACE2(("gdth_search_drives(): RTC: %x/%x/%x\n",*(u32 *)&rtc[0],
- *(u32 *)&rtc[4], *(u32 *)&rtc[8]));
- /* 3. send to controller firmware */
- gdth_internal_cmd(ha, SCREENSERVICE, GDT_REALTIME, *(u32 *)&rtc[0],
- *(u32 *)&rtc[4], *(u32 *)&rtc[8]);
-#endif
-
/* unfreeze all IOs */
gdth_internal_cmd(ha, CACHESERVICE, GDT_UNFREEZE_IO, 0, 0, 0);
@@ -1633,35 +1224,6 @@ static int gdth_search_drives(gdth_ha_str *ha)
cdev_cnt = (u16)ha->info;
ha->fw_vers = ha->service;
-#ifdef INT_COAL
- if (ha->type == GDT_PCIMPR) {
- /* set perf. modes */
- pmod = (gdth_perf_modes *)ha->pscratch;
- pmod->version = 1;
- pmod->st_mode = 1; /* enable one status buffer */
- *((u64 *)&pmod->st_buff_addr1) = ha->coal_stat_phys;
- pmod->st_buff_indx1 = COALINDEX;
- pmod->st_buff_addr2 = 0;
- pmod->st_buff_u_addr2 = 0;
- pmod->st_buff_indx2 = 0;
- pmod->st_buff_size = sizeof(gdth_coal_status) * MAXOFFSETS;
- pmod->cmd_mode = 0; // disable all cmd buffers
- pmod->cmd_buff_addr1 = 0;
- pmod->cmd_buff_u_addr1 = 0;
- pmod->cmd_buff_indx1 = 0;
- pmod->cmd_buff_addr2 = 0;
- pmod->cmd_buff_u_addr2 = 0;
- pmod->cmd_buff_indx2 = 0;
- pmod->cmd_buff_size = 0;
- pmod->reserved1 = 0;
- pmod->reserved2 = 0;
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, SET_PERF_MODES,
- INVALID_CHANNEL,sizeof(gdth_perf_modes))) {
- printk("GDT-HA %d: Interrupt coalescing activated\n", ha->hanum);
- }
- }
-#endif
-
/* detect number of buses - try new IOCTL */
iocr = (gdth_raw_iochan_str *)ha->pscratch;
iocr->hdr.version = 0xffffffff;
@@ -2433,9 +1995,6 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp,
TRACE(("gdth_fill_cache_cmd() cmd 0x%x cmdsize %d hdrive %d\n",
scp->cmnd[0],scp->cmd_len,hdrive));
- if (ha->type==GDT_EISA && ha->cmd_cnt>0)
- return 0;
-
mode64 = (ha->cache_feat & GDT_64BIT) ? TRUE : FALSE;
/* test for READ_16, WRITE_16 if !mode64 ? ---
not required, should not occur due to error return on
@@ -2518,9 +2077,9 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp,
if (scsi_bufflen(scp)) {
cmndinfo->dma_dir = (read_write == 1 ?
- PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
- sgcnt = pci_map_sg(ha->pdev, scsi_sglist(scp), scsi_sg_count(scp),
- cmndinfo->dma_dir);
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ sgcnt = dma_map_sg(&ha->pdev->dev, scsi_sglist(scp),
+ scsi_sg_count(scp), cmndinfo->dma_dir);
if (mode64) {
struct scatterlist *sl;
@@ -2528,12 +2087,6 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp,
cmdp->u.cache64.sg_canz = sgcnt;
scsi_for_each_sg(scp, sl, sgcnt, i) {
cmdp->u.cache64.sg_lst[i].sg_ptr = sg_dma_address(sl);
-#ifdef GDTH_DMA_STATISTICS
- if (cmdp->u.cache64.sg_lst[i].sg_ptr > (u64)0xffffffff)
- ha->dma64_cnt++;
- else
- ha->dma32_cnt++;
-#endif
cmdp->u.cache64.sg_lst[i].sg_len = sg_dma_len(sl);
}
} else {
@@ -2543,9 +2096,6 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp,
cmdp->u.cache.sg_canz = sgcnt;
scsi_for_each_sg(scp, sl, sgcnt, i) {
cmdp->u.cache.sg_lst[i].sg_ptr = sg_dma_address(sl);
-#ifdef GDTH_DMA_STATISTICS
- ha->dma32_cnt++;
-#endif
cmdp->u.cache.sg_lst[i].sg_len = sg_dma_len(sl);
}
}
@@ -2603,8 +2153,6 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b)
dma_addr_t sense_paddr;
int cmd_index, sgcnt, mode64;
u8 t,l;
- struct page *page;
- unsigned long offset;
struct gdth_cmndinfo *cmndinfo;
t = scp->device->id;
@@ -2613,9 +2161,6 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b)
TRACE(("gdth_fill_raw_cmd() cmd 0x%x bus %d ID %d LUN %d\n",
scp->cmnd[0],b,t,l));
- if (ha->type==GDT_EISA && ha->cmd_cnt>0)
- return 0;
-
mode64 = (ha->raw_feat & GDT_64BIT) ? TRUE : FALSE;
cmdp->Service = SCSIRAWSERVICE;
@@ -2649,10 +2194,8 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b)
}
} else {
- page = virt_to_page(scp->sense_buffer);
- offset = (unsigned long)scp->sense_buffer & ~PAGE_MASK;
- sense_paddr = pci_map_page(ha->pdev,page,offset,
- 16,PCI_DMA_FROMDEVICE);
+ sense_paddr = dma_map_single(&ha->pdev->dev, scp->sense_buffer, 16,
+ DMA_FROM_DEVICE);
cmndinfo->sense_paddr = sense_paddr;
cmdp->OpCode = GDT_WRITE; /* always */
@@ -2693,9 +2236,9 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b)
}
if (scsi_bufflen(scp)) {
- cmndinfo->dma_dir = PCI_DMA_BIDIRECTIONAL;
- sgcnt = pci_map_sg(ha->pdev, scsi_sglist(scp), scsi_sg_count(scp),
- cmndinfo->dma_dir);
+ cmndinfo->dma_dir = DMA_BIDIRECTIONAL;
+ sgcnt = dma_map_sg(&ha->pdev->dev, scsi_sglist(scp),
+ scsi_sg_count(scp), cmndinfo->dma_dir);
if (mode64) {
struct scatterlist *sl;
@@ -2703,12 +2246,6 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b)
cmdp->u.raw64.sg_ranz = sgcnt;
scsi_for_each_sg(scp, sl, sgcnt, i) {
cmdp->u.raw64.sg_lst[i].sg_ptr = sg_dma_address(sl);
-#ifdef GDTH_DMA_STATISTICS
- if (cmdp->u.raw64.sg_lst[i].sg_ptr > (u64)0xffffffff)
- ha->dma64_cnt++;
- else
- ha->dma32_cnt++;
-#endif
cmdp->u.raw64.sg_lst[i].sg_len = sg_dma_len(sl);
}
} else {
@@ -2718,9 +2255,6 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b)
cmdp->u.raw.sg_ranz = sgcnt;
scsi_for_each_sg(scp, sl, sgcnt, i) {
cmdp->u.raw.sg_lst[i].sg_ptr = sg_dma_address(sl);
-#ifdef GDTH_DMA_STATISTICS
- ha->dma32_cnt++;
-#endif
cmdp->u.raw.sg_lst[i].sg_len = sg_dma_len(sl);
}
}
@@ -2778,9 +2312,6 @@ static int gdth_special_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp)
cmdp= ha->pccb;
TRACE2(("gdth_special_cmd(): "));
- if (ha->type==GDT_EISA && ha->cmd_cnt>0)
- return 0;
-
*cmdp = *cmndinfo->internal_cmd_str;
cmdp->RequestBuffer = scp;
@@ -2959,18 +2490,11 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
{
gdt6m_dpram_str __iomem *dp6m_ptr = NULL;
gdt6_dpram_str __iomem *dp6_ptr;
- gdt2_dpram_str __iomem *dp2_ptr;
struct scsi_cmnd *scp;
int rval, i;
u8 IStatus;
u16 Service;
unsigned long flags = 0;
-#ifdef INT_COAL
- int coalesced = FALSE;
- int next = FALSE;
- gdth_coal_status *pcs = NULL;
- int act_int_coal = 0;
-#endif
TRACE(("gdth_interrupt() IRQ %d\n", ha->irq));
@@ -2997,53 +2521,7 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
++act_ints;
#endif
-#ifdef INT_COAL
- /* See if the fw is returning coalesced status */
- if (IStatus == COALINDEX) {
- /* Coalesced status. Setup the initial status
- buffer pointer and flags */
- pcs = ha->coal_stat;
- coalesced = TRUE;
- next = TRUE;
- }
-
- do {
- if (coalesced) {
- /* For coalesced requests all status
- information is found in the status buffer */
- IStatus = (u8)(pcs->status & 0xff);
- }
-#endif
-
- if (ha->type == GDT_EISA) {
- if (IStatus & 0x80) { /* error flag */
- IStatus &= ~0x80;
- ha->status = inw(ha->bmic + MAILBOXREG+8);
- TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
- } else /* no error */
- ha->status = S_OK;
- ha->info = inl(ha->bmic + MAILBOXREG+12);
- ha->service = inw(ha->bmic + MAILBOXREG+10);
- ha->info2 = inl(ha->bmic + MAILBOXREG+4);
-
- outb(0xff, ha->bmic + EDOORREG); /* acknowledge interrupt */
- outb(0x00, ha->bmic + SEMA1REG); /* reset status semaphore */
- } else if (ha->type == GDT_ISA) {
- dp2_ptr = ha->brd;
- if (IStatus & 0x80) { /* error flag */
- IStatus &= ~0x80;
- ha->status = readw(&dp2_ptr->u.ic.Status);
- TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
- } else /* no error */
- ha->status = S_OK;
- ha->info = readl(&dp2_ptr->u.ic.Info[0]);
- ha->service = readw(&dp2_ptr->u.ic.Service);
- ha->info2 = readl(&dp2_ptr->u.ic.Info[1]);
-
- writeb(0xff, &dp2_ptr->io.irqdel); /* acknowledge interrupt */
- writeb(0, &dp2_ptr->u.ic.Cmd_Index);/* reset command index */
- writeb(0, &dp2_ptr->io.Sema1); /* reset status semaphore */
- } else if (ha->type == GDT_PCI) {
+ if (ha->type == GDT_PCI) {
dp6_ptr = ha->brd;
if (IStatus & 0x80) { /* error flag */
IStatus &= ~0x80;
@@ -3075,28 +2553,15 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
dp6m_ptr = ha->brd;
if (IStatus & 0x80) { /* error flag */
IStatus &= ~0x80;
-#ifdef INT_COAL
- if (coalesced)
- ha->status = pcs->ext_status & 0xffff;
- else
-#endif
- ha->status = readw(&dp6m_ptr->i960r.status);
+ ha->status = readw(&dp6m_ptr->i960r.status);
TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
} else /* no error */
ha->status = S_OK;
-#ifdef INT_COAL
- /* get information */
- if (coalesced) {
- ha->info = pcs->info0;
- ha->info2 = pcs->info1;
- ha->service = (pcs->ext_status >> 16) & 0xffff;
- } else
-#endif
- {
- ha->info = readl(&dp6m_ptr->i960r.info[0]);
- ha->service = readw(&dp6m_ptr->i960r.service);
- ha->info2 = readl(&dp6m_ptr->i960r.info[1]);
- }
+
+ ha->info = readl(&dp6m_ptr->i960r.info[0]);
+ ha->service = readw(&dp6m_ptr->i960r.service);
+ ha->info2 = readl(&dp6m_ptr->i960r.info[1]);
+
/* event string */
if (IStatus == ASYNCINDEX) {
if (ha->service != SCREENSERVICE &&
@@ -3111,15 +2576,8 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
}
}
}
-#ifdef INT_COAL
- /* Make sure that non coalesced interrupts get cleared
- before being handled by gdth_async_event/gdth_sync_event */
- if (!coalesced)
-#endif
- {
- writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
- writeb(0, &dp6m_ptr->i960r.sema1_reg);
- }
+ writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
+ writeb(0, &dp6m_ptr->i960r.sema1_reg);
} else {
TRACE2(("gdth_interrupt() unknown controller type\n"));
if (!gdth_polling)
@@ -3182,31 +2640,6 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
gdth_scsi_done(scp);
}
-#ifdef INT_COAL
- if (coalesced) {
- /* go to the next status in the status buffer */
- ++pcs;
-#ifdef GDTH_STATISTICS
- ++act_int_coal;
- if (act_int_coal > max_int_coal) {
- max_int_coal = act_int_coal;
- printk("GDT: max_int_coal = %d\n",(u16)max_int_coal);
- }
-#endif
- /* see if there is another status */
- if (pcs->status == 0)
- /* Stop the coalesce loop */
- next = FALSE;
- }
- } while (next);
-
- /* coalescing only for new GDT_PCIMPR controllers available */
- if (ha->type == GDT_PCIMPR && coalesced) {
- writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
- writeb(0, &dp6m_ptr->i960r.sema1_reg);
- }
-#endif
-
gdth_next(ha);
return IRQ_HANDLED;
}
@@ -3313,12 +2746,12 @@ static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index,
return 2;
}
if (scsi_bufflen(scp))
- pci_unmap_sg(ha->pdev, scsi_sglist(scp), scsi_sg_count(scp),
+ dma_unmap_sg(&ha->pdev->dev, scsi_sglist(scp), scsi_sg_count(scp),
cmndinfo->dma_dir);
if (cmndinfo->sense_paddr)
- pci_unmap_page(ha->pdev, cmndinfo->sense_paddr, 16,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&ha->pdev->dev, cmndinfo->sense_paddr, 16,
+ DMA_FROM_DEVICE);
if (ha->status == S_OK) {
cmndinfo->status = S_OK;
@@ -3610,12 +3043,7 @@ static int gdth_async_event(gdth_ha_str *ha)
+ sizeof(u64);
ha->cmd_cnt = 0;
gdth_copy_command(ha);
- if (ha->type == GDT_EISA)
- printk("[EISA slot %d] ",(u16)ha->brd_phys);
- else if (ha->type == GDT_ISA)
- printk("[DPMEM 0x%4X] ",(u16)ha->brd_phys);
- else
- printk("[PCI %d/%d] ",(u16)(ha->brd_phys>>8),
+ printk("[PCI %d/%d] ",(u16)(ha->brd_phys>>8),
(u16)((ha->brd_phys>>3)&0x1f));
gdth_release_event(ha);
}
@@ -3756,23 +3184,12 @@ static inline void gdth_timer_init(void)
static void __init internal_setup(char *str,int *ints)
{
- int i, argc;
+ int i;
char *cur_str, *argv;
TRACE2(("internal_setup() str %s ints[0] %d\n",
str ? str:"NULL", ints ? ints[0]:0));
- /* read irq[] from ints[] */
- if (ints) {
- argc = ints[0];
- if (argc > 0) {
- if (argc > MAXHA)
- argc = MAXHA;
- for (i = 0; i < argc; ++i)
- irq[i] = ints[i+1];
- }
- }
-
/* analyse string */
argv = str;
while (argv && (cur_str = strchr(argv, ':'))) {
@@ -3799,8 +3216,6 @@ static void __init internal_setup(char *str,int *ints)
rescan = val;
else if (!strncmp(argv, "shared_access:", 14))
shared_access = val;
- else if (!strncmp(argv, "probe_eisa_isa:", 15))
- probe_eisa_isa = val;
else if (!strncmp(argv, "reserve_list:", 13)) {
reserve_list[0] = val;
for (i = 1; i < MAX_RES_ARGS; i++) {
@@ -3847,18 +3262,7 @@ static const char *gdth_ctr_name(gdth_ha_str *ha)
{
TRACE2(("gdth_ctr_name()\n"));
- if (ha->type == GDT_EISA) {
- switch (ha->stype) {
- case GDT3_ID:
- return("GDT3000/3020");
- case GDT3A_ID:
- return("GDT3000A/3020A/3050A");
- case GDT3B_ID:
- return("GDT3000B/3010A");
- }
- } else if (ha->type == GDT_ISA) {
- return("GDT2000/2020");
- } else if (ha->type == GDT_PCI) {
+ if (ha->type == GDT_PCI) {
switch (ha->pdev->device) {
case PCI_DEVICE_ID_VORTEX_GDT60x0:
return("GDT6000/6020/6050");
@@ -4155,131 +3559,147 @@ static int ioc_resetdrv(void __user *arg, char *cmnd)
return 0;
}
-static int ioc_general(void __user *arg, char *cmnd)
+static void gdth_ioc_cacheservice(gdth_ha_str *ha, gdth_ioctl_general *gen,
+ u64 paddr)
{
- gdth_ioctl_general gen;
- char *buf = NULL;
- u64 paddr;
- gdth_ha_str *ha;
- int rval;
-
- if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
- return -EFAULT;
- ha = gdth_find_ha(gen.ionode);
- if (!ha)
- return -EFAULT;
-
- if (gen.data_len > INT_MAX)
- return -EINVAL;
- if (gen.sense_len > INT_MAX)
- return -EINVAL;
- if (gen.data_len + gen.sense_len > INT_MAX)
- return -EINVAL;
-
- if (gen.data_len + gen.sense_len != 0) {
- if (!(buf = gdth_ioctl_alloc(ha, gen.data_len + gen.sense_len,
- FALSE, &paddr)))
- return -EFAULT;
- if (copy_from_user(buf, arg + sizeof(gdth_ioctl_general),
- gen.data_len + gen.sense_len)) {
- gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr);
- return -EFAULT;
- }
+ if (ha->cache_feat & GDT_64BIT) {
+ /* copy elements from 32-bit IOCTL structure */
+ gen->command.u.cache64.BlockCnt = gen->command.u.cache.BlockCnt;
+ gen->command.u.cache64.BlockNo = gen->command.u.cache.BlockNo;
+ gen->command.u.cache64.DeviceNo = gen->command.u.cache.DeviceNo;
+
+ if (ha->cache_feat & SCATTER_GATHER) {
+ gen->command.u.cache64.DestAddr = (u64)-1;
+ gen->command.u.cache64.sg_canz = 1;
+ gen->command.u.cache64.sg_lst[0].sg_ptr = paddr;
+ gen->command.u.cache64.sg_lst[0].sg_len = gen->data_len;
+ gen->command.u.cache64.sg_lst[1].sg_len = 0;
+ } else {
+ gen->command.u.cache64.DestAddr = paddr;
+ gen->command.u.cache64.sg_canz = 0;
+ }
+ } else {
+ if (ha->cache_feat & SCATTER_GATHER) {
+ gen->command.u.cache.DestAddr = 0xffffffff;
+ gen->command.u.cache.sg_canz = 1;
+ gen->command.u.cache.sg_lst[0].sg_ptr = (u32)paddr;
+ gen->command.u.cache.sg_lst[0].sg_len = gen->data_len;
+ gen->command.u.cache.sg_lst[1].sg_len = 0;
+ } else {
+ gen->command.u.cache.DestAddr = paddr;
+ gen->command.u.cache.sg_canz = 0;
+ }
+ }
+}
- if (gen.command.OpCode == GDT_IOCTL) {
- gen.command.u.ioctl.p_param = paddr;
- } else if (gen.command.Service == CACHESERVICE) {
- if (ha->cache_feat & GDT_64BIT) {
- /* copy elements from 32-bit IOCTL structure */
- gen.command.u.cache64.BlockCnt = gen.command.u.cache.BlockCnt;
- gen.command.u.cache64.BlockNo = gen.command.u.cache.BlockNo;
- gen.command.u.cache64.DeviceNo = gen.command.u.cache.DeviceNo;
- /* addresses */
- if (ha->cache_feat & SCATTER_GATHER) {
- gen.command.u.cache64.DestAddr = (u64)-1;
- gen.command.u.cache64.sg_canz = 1;
- gen.command.u.cache64.sg_lst[0].sg_ptr = paddr;
- gen.command.u.cache64.sg_lst[0].sg_len = gen.data_len;
- gen.command.u.cache64.sg_lst[1].sg_len = 0;
- } else {
- gen.command.u.cache64.DestAddr = paddr;
- gen.command.u.cache64.sg_canz = 0;
- }
- } else {
- if (ha->cache_feat & SCATTER_GATHER) {
- gen.command.u.cache.DestAddr = 0xffffffff;
- gen.command.u.cache.sg_canz = 1;
- gen.command.u.cache.sg_lst[0].sg_ptr = (u32)paddr;
- gen.command.u.cache.sg_lst[0].sg_len = gen.data_len;
- gen.command.u.cache.sg_lst[1].sg_len = 0;
- } else {
- gen.command.u.cache.DestAddr = paddr;
- gen.command.u.cache.sg_canz = 0;
- }
- }
- } else if (gen.command.Service == SCSIRAWSERVICE) {
- if (ha->raw_feat & GDT_64BIT) {
- /* copy elements from 32-bit IOCTL structure */
- char cmd[16];
- gen.command.u.raw64.sense_len = gen.command.u.raw.sense_len;
- gen.command.u.raw64.bus = gen.command.u.raw.bus;
- gen.command.u.raw64.lun = gen.command.u.raw.lun;
- gen.command.u.raw64.target = gen.command.u.raw.target;
- memcpy(cmd, gen.command.u.raw.cmd, 16);
- memcpy(gen.command.u.raw64.cmd, cmd, 16);
- gen.command.u.raw64.clen = gen.command.u.raw.clen;
- gen.command.u.raw64.sdlen = gen.command.u.raw.sdlen;
- gen.command.u.raw64.direction = gen.command.u.raw.direction;
- /* addresses */
- if (ha->raw_feat & SCATTER_GATHER) {
- gen.command.u.raw64.sdata = (u64)-1;
- gen.command.u.raw64.sg_ranz = 1;
- gen.command.u.raw64.sg_lst[0].sg_ptr = paddr;
- gen.command.u.raw64.sg_lst[0].sg_len = gen.data_len;
- gen.command.u.raw64.sg_lst[1].sg_len = 0;
- } else {
- gen.command.u.raw64.sdata = paddr;
- gen.command.u.raw64.sg_ranz = 0;
+static void gdth_ioc_scsiraw(gdth_ha_str *ha, gdth_ioctl_general *gen,
+ u64 paddr)
+{
+ if (ha->raw_feat & GDT_64BIT) {
+ /* copy elements from 32-bit IOCTL structure */
+ char cmd[16];
+
+ gen->command.u.raw64.sense_len = gen->command.u.raw.sense_len;
+ gen->command.u.raw64.bus = gen->command.u.raw.bus;
+ gen->command.u.raw64.lun = gen->command.u.raw.lun;
+ gen->command.u.raw64.target = gen->command.u.raw.target;
+ memcpy(cmd, gen->command.u.raw.cmd, 16);
+ memcpy(gen->command.u.raw64.cmd, cmd, 16);
+ gen->command.u.raw64.clen = gen->command.u.raw.clen;
+ gen->command.u.raw64.sdlen = gen->command.u.raw.sdlen;
+ gen->command.u.raw64.direction = gen->command.u.raw.direction;
+
+ /* addresses */
+ if (ha->raw_feat & SCATTER_GATHER) {
+ gen->command.u.raw64.sdata = (u64)-1;
+ gen->command.u.raw64.sg_ranz = 1;
+ gen->command.u.raw64.sg_lst[0].sg_ptr = paddr;
+ gen->command.u.raw64.sg_lst[0].sg_len = gen->data_len;
+ gen->command.u.raw64.sg_lst[1].sg_len = 0;
+ } else {
+ gen->command.u.raw64.sdata = paddr;
+ gen->command.u.raw64.sg_ranz = 0;
}
- gen.command.u.raw64.sense_data = paddr + gen.data_len;
- } else {
- if (ha->raw_feat & SCATTER_GATHER) {
- gen.command.u.raw.sdata = 0xffffffff;
- gen.command.u.raw.sg_ranz = 1;
- gen.command.u.raw.sg_lst[0].sg_ptr = (u32)paddr;
- gen.command.u.raw.sg_lst[0].sg_len = gen.data_len;
- gen.command.u.raw.sg_lst[1].sg_len = 0;
- } else {
- gen.command.u.raw.sdata = paddr;
- gen.command.u.raw.sg_ranz = 0;
+
+ gen->command.u.raw64.sense_data = paddr + gen->data_len;
+ } else {
+ if (ha->raw_feat & SCATTER_GATHER) {
+ gen->command.u.raw.sdata = 0xffffffff;
+ gen->command.u.raw.sg_ranz = 1;
+ gen->command.u.raw.sg_lst[0].sg_ptr = (u32)paddr;
+ gen->command.u.raw.sg_lst[0].sg_len = gen->data_len;
+ gen->command.u.raw.sg_lst[1].sg_len = 0;
+ } else {
+ gen->command.u.raw.sdata = paddr;
+ gen->command.u.raw.sg_ranz = 0;
}
- gen.command.u.raw.sense_data = (u32)paddr + gen.data_len;
- }
- } else {
- gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr);
- return -EFAULT;
- }
- }
- rval = __gdth_execute(ha->sdev, &gen.command, cmnd, gen.timeout, &gen.info);
- if (rval < 0) {
- gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr);
- return rval;
- }
- gen.status = rval;
+ gen->command.u.raw.sense_data = (u32)paddr + gen->data_len;
+ }
+}
- if (copy_to_user(arg + sizeof(gdth_ioctl_general), buf,
- gen.data_len + gen.sense_len)) {
- gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr);
- return -EFAULT;
- }
- if (copy_to_user(arg, &gen,
- sizeof(gdth_ioctl_general) - sizeof(gdth_cmd_str))) {
- gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr);
- return -EFAULT;
- }
- gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr);
- return 0;
+static int ioc_general(void __user *arg, char *cmnd)
+{
+ gdth_ioctl_general gen;
+ gdth_ha_str *ha;
+ char *buf = NULL;
+ dma_addr_t paddr;
+ int rval;
+
+ if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
+ return -EFAULT;
+ ha = gdth_find_ha(gen.ionode);
+ if (!ha)
+ return -EFAULT;
+
+ if (gen.data_len > INT_MAX)
+ return -EINVAL;
+ if (gen.sense_len > INT_MAX)
+ return -EINVAL;
+ if (gen.data_len + gen.sense_len > INT_MAX)
+ return -EINVAL;
+
+ if (gen.data_len + gen.sense_len > 0) {
+ buf = dma_alloc_coherent(&ha->pdev->dev,
+ gen.data_len + gen.sense_len, &paddr,
+ GFP_KERNEL);
+ if (!buf)
+ return -EFAULT;
+
+ rval = -EFAULT;
+ if (copy_from_user(buf, arg + sizeof(gdth_ioctl_general),
+ gen.data_len + gen.sense_len))
+ goto out_free_buf;
+
+ if (gen.command.OpCode == GDT_IOCTL)
+ gen.command.u.ioctl.p_param = paddr;
+ else if (gen.command.Service == CACHESERVICE)
+ gdth_ioc_cacheservice(ha, &gen, paddr);
+ else if (gen.command.Service == SCSIRAWSERVICE)
+ gdth_ioc_scsiraw(ha, &gen, paddr);
+ else
+ goto out_free_buf;
+ }
+
+ rval = __gdth_execute(ha->sdev, &gen.command, cmnd, gen.timeout,
+ &gen.info);
+ if (rval < 0)
+ goto out_free_buf;
+ gen.status = rval;
+
+ rval = -EFAULT;
+ if (copy_to_user(arg + sizeof(gdth_ioctl_general), buf,
+ gen.data_len + gen.sense_len))
+ goto out_free_buf;
+ if (copy_to_user(arg, &gen,
+ sizeof(gdth_ioctl_general) - sizeof(gdth_cmd_str)))
+ goto out_free_buf;
+
+ rval = 0;
+out_free_buf:
+ dma_free_coherent(&ha->pdev->dev, gen.data_len + gen.sense_len, buf,
+ paddr);
+ return rval;
}
static int ioc_hdrlist(void __user *arg, char *cmnd)
@@ -4514,22 +3934,17 @@ static int gdth_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
(NULL == (ha = gdth_find_ha(ctrt.ionode))))
return -EFAULT;
- if (ha->type == GDT_ISA || ha->type == GDT_EISA) {
- ctrt.type = (u8)((ha->stype>>20) - 0x10);
+ if (ha->type != GDT_PCIMPR) {
+ ctrt.type = (u8)((ha->stype<<4) + 6);
} else {
- if (ha->type != GDT_PCIMPR) {
- ctrt.type = (u8)((ha->stype<<4) + 6);
- } else {
- ctrt.type =
- (ha->oem_id == OEM_ID_INTEL ? 0xfd : 0xfe);
- if (ha->stype >= 0x300)
- ctrt.ext_type = 0x6000 | ha->pdev->subsystem_device;
- else
- ctrt.ext_type = 0x6000 | ha->stype;
- }
- ctrt.device_id = ha->pdev->device;
- ctrt.sub_device_id = ha->pdev->subsystem_device;
+ ctrt.type = (ha->oem_id == OEM_ID_INTEL ? 0xfd : 0xfe);
+ if (ha->stype >= 0x300)
+ ctrt.ext_type = 0x6000 | ha->pdev->subsystem_device;
+ else
+ ctrt.ext_type = 0x6000 | ha->stype;
}
+ ctrt.device_id = ha->pdev->device;
+ ctrt.sub_device_id = ha->pdev->subsystem_device;
ctrt.info = ha->brd_phys;
ctrt.oem_id = ha->oem_id;
if (copy_to_user(argp, &ctrt, sizeof(gdth_ioctl_ctrtype)))
@@ -4683,272 +4098,6 @@ static struct scsi_host_template gdth_template = {
.no_write_same = 1,
};
-#ifdef CONFIG_ISA
-static int __init gdth_isa_probe_one(u32 isa_bios)
-{
- struct Scsi_Host *shp;
- gdth_ha_str *ha;
- dma_addr_t scratch_dma_handle = 0;
- int error, i;
-
- if (!gdth_search_isa(isa_bios))
- return -ENXIO;
-
- shp = scsi_host_alloc(&gdth_template, sizeof(gdth_ha_str));
- if (!shp)
- return -ENOMEM;
- ha = shost_priv(shp);
-
- error = -ENODEV;
- if (!gdth_init_isa(isa_bios,ha))
- goto out_host_put;
-
- /* controller found and initialized */
- printk("Configuring GDT-ISA HA at BIOS 0x%05X IRQ %u DRQ %u\n",
- isa_bios, ha->irq, ha->drq);
-
- error = request_irq(ha->irq, gdth_interrupt, 0, "gdth", ha);
- if (error) {
- printk("GDT-ISA: Unable to allocate IRQ\n");
- goto out_host_put;
- }
-
- error = request_dma(ha->drq, "gdth");
- if (error) {
- printk("GDT-ISA: Unable to allocate DMA channel\n");
- goto out_free_irq;
- }
-
- set_dma_mode(ha->drq,DMA_MODE_CASCADE);
- enable_dma(ha->drq);
- shp->unchecked_isa_dma = 1;
- shp->irq = ha->irq;
- shp->dma_channel = ha->drq;
-
- ha->hanum = gdth_ctr_count++;
- ha->shost = shp;
-
- ha->pccb = &ha->cmdext;
- ha->ccb_phys = 0L;
- ha->pdev = NULL;
-
- error = -ENOMEM;
-
- ha->pscratch = pci_alloc_consistent(ha->pdev, GDTH_SCRATCH,
- &scratch_dma_handle);
- if (!ha->pscratch)
- goto out_dec_counters;
- ha->scratch_phys = scratch_dma_handle;
-
- ha->pmsg = pci_alloc_consistent(ha->pdev, sizeof(gdth_msg_str),
- &scratch_dma_handle);
- if (!ha->pmsg)
- goto out_free_pscratch;
- ha->msg_phys = scratch_dma_handle;
-
-#ifdef INT_COAL
- ha->coal_stat = pci_alloc_consistent(ha->pdev,
- sizeof(gdth_coal_status) * MAXOFFSETS,
- &scratch_dma_handle);
- if (!ha->coal_stat)
- goto out_free_pmsg;
- ha->coal_stat_phys = scratch_dma_handle;
-#endif
-
- ha->scratch_busy = FALSE;
- ha->req_first = NULL;
- ha->tid_cnt = MAX_HDRIVES;
- if (max_ids > 0 && max_ids < ha->tid_cnt)
- ha->tid_cnt = max_ids;
- for (i = 0; i < GDTH_MAXCMDS; ++i)
- ha->cmd_tab[i].cmnd = UNUSED_CMND;
- ha->scan_mode = rescan ? 0x10 : 0;
-
- error = -ENODEV;
- if (!gdth_search_drives(ha)) {
- printk("GDT-ISA: Error during device scan\n");
- goto out_free_coal_stat;
- }
-
- if (hdr_channel < 0 || hdr_channel > ha->bus_cnt)
- hdr_channel = ha->bus_cnt;
- ha->virt_bus = hdr_channel;
-
- if (ha->cache_feat & ha->raw_feat & ha->screen_feat & GDT_64BIT)
- shp->max_cmd_len = 16;
-
- shp->max_id = ha->tid_cnt;
- shp->max_lun = MAXLUN;
- shp->max_channel = ha->bus_cnt;
-
- spin_lock_init(&ha->smp_lock);
- gdth_enable_int(ha);
-
- error = scsi_add_host(shp, NULL);
- if (error)
- goto out_free_coal_stat;
- list_add_tail(&ha->list, &gdth_instances);
- gdth_timer_init();
-
- scsi_scan_host(shp);
-
- return 0;
-
- out_free_coal_stat:
-#ifdef INT_COAL
- pci_free_consistent(ha->pdev, sizeof(gdth_coal_status) * MAXOFFSETS,
- ha->coal_stat, ha->coal_stat_phys);
- out_free_pmsg:
-#endif
- pci_free_consistent(ha->pdev, sizeof(gdth_msg_str),
- ha->pmsg, ha->msg_phys);
- out_free_pscratch:
- pci_free_consistent(ha->pdev, GDTH_SCRATCH,
- ha->pscratch, ha->scratch_phys);
- out_dec_counters:
- gdth_ctr_count--;
- out_free_irq:
- free_irq(ha->irq, ha);
- out_host_put:
- scsi_host_put(shp);
- return error;
-}
-#endif /* CONFIG_ISA */
-
-#ifdef CONFIG_EISA
-static int __init gdth_eisa_probe_one(u16 eisa_slot)
-{
- struct Scsi_Host *shp;
- gdth_ha_str *ha;
- dma_addr_t scratch_dma_handle = 0;
- int error, i;
-
- if (!gdth_search_eisa(eisa_slot))
- return -ENXIO;
-
- shp = scsi_host_alloc(&gdth_template, sizeof(gdth_ha_str));
- if (!shp)
- return -ENOMEM;
- ha = shost_priv(shp);
-
- error = -ENODEV;
- if (!gdth_init_eisa(eisa_slot,ha))
- goto out_host_put;
-
- /* controller found and initialized */
- printk("Configuring GDT-EISA HA at Slot %d IRQ %u\n",
- eisa_slot >> 12, ha->irq);
-
- error = request_irq(ha->irq, gdth_interrupt, 0, "gdth", ha);
- if (error) {
- printk("GDT-EISA: Unable to allocate IRQ\n");
- goto out_host_put;
- }
-
- shp->unchecked_isa_dma = 0;
- shp->irq = ha->irq;
- shp->dma_channel = 0xff;
-
- ha->hanum = gdth_ctr_count++;
- ha->shost = shp;
-
- TRACE2(("EISA detect Bus 0: hanum %d\n", ha->hanum));
-
- ha->pccb = &ha->cmdext;
- ha->ccb_phys = 0L;
-
- error = -ENOMEM;
-
- ha->pdev = NULL;
- ha->pscratch = pci_alloc_consistent(ha->pdev, GDTH_SCRATCH,
- &scratch_dma_handle);
- if (!ha->pscratch)
- goto out_free_irq;
- ha->scratch_phys = scratch_dma_handle;
-
- ha->pmsg = pci_alloc_consistent(ha->pdev, sizeof(gdth_msg_str),
- &scratch_dma_handle);
- if (!ha->pmsg)
- goto out_free_pscratch;
- ha->msg_phys = scratch_dma_handle;
-
-#ifdef INT_COAL
- ha->coal_stat = pci_alloc_consistent(ha->pdev,
- sizeof(gdth_coal_status) * MAXOFFSETS,
- &scratch_dma_handle);
- if (!ha->coal_stat)
- goto out_free_pmsg;
- ha->coal_stat_phys = scratch_dma_handle;
-#endif
-
- ha->ccb_phys = pci_map_single(ha->pdev,ha->pccb,
- sizeof(gdth_cmd_str), PCI_DMA_BIDIRECTIONAL);
- if (!ha->ccb_phys)
- goto out_free_coal_stat;
-
- ha->scratch_busy = FALSE;
- ha->req_first = NULL;
- ha->tid_cnt = MAX_HDRIVES;
- if (max_ids > 0 && max_ids < ha->tid_cnt)
- ha->tid_cnt = max_ids;
- for (i = 0; i < GDTH_MAXCMDS; ++i)
- ha->cmd_tab[i].cmnd = UNUSED_CMND;
- ha->scan_mode = rescan ? 0x10 : 0;
-
- if (!gdth_search_drives(ha)) {
- printk("GDT-EISA: Error during device scan\n");
- error = -ENODEV;
- goto out_free_ccb_phys;
- }
-
- if (hdr_channel < 0 || hdr_channel > ha->bus_cnt)
- hdr_channel = ha->bus_cnt;
- ha->virt_bus = hdr_channel;
-
- if (ha->cache_feat & ha->raw_feat & ha->screen_feat & GDT_64BIT)
- shp->max_cmd_len = 16;
-
- shp->max_id = ha->tid_cnt;
- shp->max_lun = MAXLUN;
- shp->max_channel = ha->bus_cnt;
-
- spin_lock_init(&ha->smp_lock);
- gdth_enable_int(ha);
-
- error = scsi_add_host(shp, NULL);
- if (error)
- goto out_free_ccb_phys;
- list_add_tail(&ha->list, &gdth_instances);
- gdth_timer_init();
-
- scsi_scan_host(shp);
-
- return 0;
-
- out_free_ccb_phys:
- pci_unmap_single(ha->pdev,ha->ccb_phys, sizeof(gdth_cmd_str),
- PCI_DMA_BIDIRECTIONAL);
- out_free_coal_stat:
-#ifdef INT_COAL
- pci_free_consistent(ha->pdev, sizeof(gdth_coal_status) * MAXOFFSETS,
- ha->coal_stat, ha->coal_stat_phys);
- out_free_pmsg:
-#endif
- pci_free_consistent(ha->pdev, sizeof(gdth_msg_str),
- ha->pmsg, ha->msg_phys);
- out_free_pscratch:
- pci_free_consistent(ha->pdev, GDTH_SCRATCH,
- ha->pscratch, ha->scratch_phys);
- out_free_irq:
- free_irq(ha->irq, ha);
- gdth_ctr_count--;
- out_host_put:
- scsi_host_put(shp);
- return error;
-}
-#endif /* CONFIG_EISA */
-
-#ifdef CONFIG_PCI
static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out)
{
struct Scsi_Host *shp;
@@ -4993,27 +4142,18 @@ static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out)
error = -ENOMEM;
- ha->pscratch = pci_alloc_consistent(ha->pdev, GDTH_SCRATCH,
- &scratch_dma_handle);
+ ha->pscratch = dma_alloc_coherent(&ha->pdev->dev, GDTH_SCRATCH,
+ &scratch_dma_handle, GFP_KERNEL);
if (!ha->pscratch)
goto out_free_irq;
ha->scratch_phys = scratch_dma_handle;
- ha->pmsg = pci_alloc_consistent(ha->pdev, sizeof(gdth_msg_str),
- &scratch_dma_handle);
+ ha->pmsg = dma_alloc_coherent(&ha->pdev->dev, sizeof(gdth_msg_str),
+ &scratch_dma_handle, GFP_KERNEL);
if (!ha->pmsg)
goto out_free_pscratch;
ha->msg_phys = scratch_dma_handle;
-#ifdef INT_COAL
- ha->coal_stat = pci_alloc_consistent(ha->pdev,
- sizeof(gdth_coal_status) * MAXOFFSETS,
- &scratch_dma_handle);
- if (!ha->coal_stat)
- goto out_free_pmsg;
- ha->coal_stat_phys = scratch_dma_handle;
-#endif
-
ha->scratch_busy = FALSE;
ha->req_first = NULL;
ha->tid_cnt = pdev->device >= 0x200 ? MAXID : MAX_HDRIVES;
@@ -5026,7 +4166,7 @@ static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out)
error = -ENODEV;
if (!gdth_search_drives(ha)) {
printk("GDT-PCI %d: Error during device scan\n", ha->hanum);
- goto out_free_coal_stat;
+ goto out_free_pmsg;
}
if (hdr_channel < 0 || hdr_channel > ha->bus_cnt)
@@ -5036,19 +4176,19 @@ static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out)
/* 64-bit DMA only supported from FW >= x.43 */
if (!(ha->cache_feat & ha->raw_feat & ha->screen_feat & GDT_64BIT) ||
!ha->dma64_support) {
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_WARNING "GDT-PCI %d: "
"Unable to set 32-bit DMA\n", ha->hanum);
- goto out_free_coal_stat;
+ goto out_free_pmsg;
}
} else {
shp->max_cmd_len = 16;
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
printk("GDT-PCI %d: 64-bit DMA enabled\n", ha->hanum);
- } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_WARNING "GDT-PCI %d: "
"Unable to set 64/32-bit DMA\n", ha->hanum);
- goto out_free_coal_stat;
+ goto out_free_pmsg;
}
}
@@ -5061,7 +4201,7 @@ static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out)
error = scsi_add_host(shp, &pdev->dev);
if (error)
- goto out_free_coal_stat;
+ goto out_free_pmsg;
list_add_tail(&ha->list, &gdth_instances);
pci_set_drvdata(ha->pdev, ha);
@@ -5073,16 +4213,11 @@ static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out)
return 0;
- out_free_coal_stat:
-#ifdef INT_COAL
- pci_free_consistent(ha->pdev, sizeof(gdth_coal_status) * MAXOFFSETS,
- ha->coal_stat, ha->coal_stat_phys);
out_free_pmsg:
-#endif
- pci_free_consistent(ha->pdev, sizeof(gdth_msg_str),
+ dma_free_coherent(&ha->pdev->dev, sizeof(gdth_msg_str),
ha->pmsg, ha->msg_phys);
out_free_pscratch:
- pci_free_consistent(ha->pdev, GDTH_SCRATCH,
+ dma_free_coherent(&ha->pdev->dev, GDTH_SCRATCH,
ha->pscratch, ha->scratch_phys);
out_free_irq:
free_irq(ha->irq, ha);
@@ -5091,7 +4226,6 @@ static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out)
scsi_host_put(shp);
return error;
}
-#endif /* CONFIG_PCI */
static void gdth_remove_one(gdth_ha_str *ha)
{
@@ -5111,24 +4245,15 @@ static void gdth_remove_one(gdth_ha_str *ha)
if (shp->irq)
free_irq(shp->irq,ha);
-#ifdef CONFIG_ISA
- if (shp->dma_channel != 0xff)
- free_dma(shp->dma_channel);
-#endif
-#ifdef INT_COAL
- if (ha->coal_stat)
- pci_free_consistent(ha->pdev, sizeof(gdth_coal_status) *
- MAXOFFSETS, ha->coal_stat, ha->coal_stat_phys);
-#endif
if (ha->pscratch)
- pci_free_consistent(ha->pdev, GDTH_SCRATCH,
+ dma_free_coherent(&ha->pdev->dev, GDTH_SCRATCH,
ha->pscratch, ha->scratch_phys);
if (ha->pmsg)
- pci_free_consistent(ha->pdev, sizeof(gdth_msg_str),
+ dma_free_coherent(&ha->pdev->dev, sizeof(gdth_msg_str),
ha->pmsg, ha->msg_phys);
if (ha->ccb_phys)
- pci_unmap_single(ha->pdev,ha->ccb_phys,
- sizeof(gdth_cmd_str),PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_single(&ha->pdev->dev, ha->ccb_phys,
+ sizeof(gdth_cmd_str), DMA_BIDIRECTIONAL);
scsi_host_put(shp);
}
@@ -5167,26 +4292,6 @@ static int __init gdth_init(void)
gdth_clear_events();
timer_setup(&gdth_timer, gdth_timeout, 0);
- /* As default we do not probe for EISA or ISA controllers */
- if (probe_eisa_isa) {
- /* scanning for controllers, at first: ISA controller */
-#ifdef CONFIG_ISA
- u32 isa_bios;
- for (isa_bios = 0xc8000UL; isa_bios <= 0xd8000UL;
- isa_bios += 0x8000UL)
- gdth_isa_probe_one(isa_bios);
-#endif
-#ifdef CONFIG_EISA
- {
- u16 eisa_slot;
- for (eisa_slot = 0x1000; eisa_slot <= 0x8000;
- eisa_slot += 0x1000)
- gdth_eisa_probe_one(eisa_slot);
- }
-#endif
- }
-
-#ifdef CONFIG_PCI
/* scanning for PCI controllers */
if (pci_register_driver(&gdth_pci_driver)) {
gdth_ha_str *ha;
@@ -5195,7 +4300,6 @@ static int __init gdth_init(void)
gdth_remove_one(ha);
return -ENODEV;
}
-#endif /* CONFIG_PCI */
TRACE2(("gdth_detect() %d controller detected\n", gdth_ctr_count));
@@ -5216,9 +4320,7 @@ static void __exit gdth_exit(void)
del_timer_sync(&gdth_timer);
#endif
-#ifdef CONFIG_PCI
pci_unregister_driver(&gdth_pci_driver);
-#endif
list_for_each_entry(ha, &gdth_instances, list)
gdth_remove_one(ha);
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
index ee6ffcf388e8..5a13d406d40e 100644
--- a/drivers/scsi/gdth.h
+++ b/drivers/scsi/gdth.h
@@ -38,17 +38,9 @@
#define OEM_ID_INTEL 0x8000
/* controller classes */
-#define GDT_ISA 0x01 /* ISA controller */
-#define GDT_EISA 0x02 /* EISA controller */
#define GDT_PCI 0x03 /* PCI controller */
#define GDT_PCINEW 0x04 /* new PCI controller */
#define GDT_PCIMPR 0x05 /* PCI MPR controller */
-/* GDT_EISA, controller subtypes EISA */
-#define GDT3_ID 0x0130941c /* GDT3000/3020 */
-#define GDT3A_ID 0x0230941c /* GDT3000A/3020A/3050A */
-#define GDT3B_ID 0x0330941c /* GDT3000B/3010A */
-/* GDT_ISA */
-#define GDT2_ID 0x0120941c /* GDT2000/2020 */
#ifndef PCI_DEVICE_ID_VORTEX_GDT60x0
/* GDT_PCI */
@@ -281,17 +273,6 @@
#define GDTH_DATA_IN 0x01000000L /* data from target */
#define GDTH_DATA_OUT 0x00000000L /* data to target */
-/* BMIC registers (EISA controllers) */
-#define ID0REG 0x0c80 /* board ID */
-#define EINTENABREG 0x0c89 /* interrupt enable */
-#define SEMA0REG 0x0c8a /* command semaphore */
-#define SEMA1REG 0x0c8b /* status semaphore */
-#define LDOORREG 0x0c8d /* local doorbell */
-#define EDENABREG 0x0c8e /* EISA system doorbell enab. */
-#define EDOORREG 0x0c8f /* EISA system doorbell */
-#define MAILBOXREG 0x0c90 /* mailbox reg. (16 bytes) */
-#define EISAREG 0x0cc0 /* EISA configuration */
-
/* other defines */
#define LINUX_OS 8 /* used for cache optim. */
#define SECS32 0x1f /* round capacity */
@@ -706,21 +687,11 @@ typedef struct {
u8 fw_magic; /* contr. ID from firmware */
} __attribute__((packed)) gdt_pci_sram;
-/* SRAM structure EISA controllers (but NOT GDT3000/3020) */
-typedef struct {
- u8 os_used[16]; /* OS code per service */
- u16 need_deinit; /* switch betw. BIOS/driver */
- u8 switch_support; /* see need_deinit */
- u8 padding;
-} __attribute__((packed)) gdt_eisa_sram;
-
-
/* DPRAM ISA controllers */
typedef struct {
union {
struct {
u8 bios_used[0x3c00-32]; /* 15KB - 32Bytes BIOS */
- u32 magic; /* controller (EISA) ID */
u16 need_deinit; /* switch betw. BIOS/driver */
u8 switch_support; /* see need_deinit */
u8 padding[9];
@@ -843,7 +814,6 @@ typedef struct {
u16 cache_feat; /* feat. cache serv. (s/g,..)*/
u16 raw_feat; /* feat. raw service (s/g,..)*/
u16 screen_feat; /* feat. raw service (s/g,..)*/
- u16 bmic; /* BMIC address (EISA) */
void __iomem *brd; /* DPRAM address */
u32 brd_phys; /* slot number/BIOS address */
gdt6c_plx_regs *plx; /* PLX regs (new PCI contr.) */
diff --git a/drivers/scsi/gdth_ioctl.h b/drivers/scsi/gdth_ioctl.h
index 4c91894ac244..ee4c9bf1022a 100644
--- a/drivers/scsi/gdth_ioctl.h
+++ b/drivers/scsi/gdth_ioctl.h
@@ -27,11 +27,7 @@
#define GDTH_MAXSG 32 /* max. s/g elements */
#define MAX_LDRIVES 255 /* max. log. drive count */
-#ifdef GDTH_IOCTL_PROC
-#define MAX_HDRIVES 100 /* max. host drive count */
-#else
#define MAX_HDRIVES MAX_LDRIVES /* max. host drive count */
-#endif
/* scatter/gather element */
typedef struct {
@@ -178,91 +174,6 @@ typedef struct {
gdth_evt_data event_data;
} __attribute__((packed)) gdth_evt_str;
-
-#ifdef GDTH_IOCTL_PROC
-/* IOCTL structure (write) */
-typedef struct {
- u32 magic; /* IOCTL magic */
- u16 ioctl; /* IOCTL */
- u16 ionode; /* controller number */
- u16 service; /* controller service */
- u16 timeout; /* timeout */
- union {
- struct {
- u8 command[512]; /* controller command */
- u8 data[1]; /* add. data */
- } general;
- struct {
- u8 lock; /* lock/unlock */
- u8 drive_cnt; /* drive count */
- u16 drives[MAX_HDRIVES];/* drives */
- } lockdrv;
- struct {
- u8 lock; /* lock/unlock */
- u8 channel; /* channel */
- } lockchn;
- struct {
- int erase; /* erase event ? */
- int handle;
- u8 evt[EVENT_SIZE]; /* event structure */
- } event;
- struct {
- u8 bus; /* SCSI bus */
- u8 target; /* target ID */
- u8 lun; /* LUN */
- u8 cmd_len; /* command length */
- u8 cmd[12]; /* SCSI command */
- } scsi;
- struct {
- u16 hdr_no; /* host drive number */
- u8 flag; /* old meth./add/remove */
- } rescan;
- } iu;
-} gdth_iowr_str;
-
-/* IOCTL structure (read) */
-typedef struct {
- u32 size; /* buffer size */
- u32 status; /* IOCTL error code */
- union {
- struct {
- u8 data[1]; /* data */
- } general;
- struct {
- u16 version; /* driver version */
- } drvers;
- struct {
- u8 type; /* controller type */
- u16 info; /* slot etc. */
- u16 oem_id; /* OEM ID */
- u16 bios_ver; /* not used */
- u16 access; /* not used */
- u16 ext_type; /* extended type */
- u16 device_id; /* device ID */
- u16 sub_device_id; /* sub device ID */
- } ctrtype;
- struct {
- u8 version; /* OS version */
- u8 subversion; /* OS subversion */
- u16 revision; /* revision */
- } osvers;
- struct {
- u16 count; /* controller count */
- } ctrcnt;
- struct {
- int handle;
- u8 evt[EVENT_SIZE]; /* event structure */
- } event;
- struct {
- u8 bus; /* SCSI bus, 0xff: invalid */
- u8 target; /* target ID */
- u8 lun; /* LUN */
- u8 cluster_type; /* cluster properties */
- } hdr_list[MAX_HDRIVES]; /* index is host drive number */
- } iu;
-} gdth_iord_str;
-#endif
-
/* GDTIOCTL_GENERAL */
typedef struct {
u16 ionode; /* controller number */
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index 3a9751a80225..381d849726ac 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -31,7 +31,6 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
int i, found;
gdth_cmd_str gdtcmd;
gdth_cpar_str *pcpar;
- u64 paddr;
char cmnd[MAX_COMMAND_SIZE];
memset(cmnd, 0xff, 12);
@@ -113,13 +112,23 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
}
if (wb_mode) {
- if (!gdth_ioctl_alloc(ha, sizeof(gdth_cpar_str), TRUE, &paddr))
- return(-EBUSY);
+ unsigned long flags;
+
+ BUILD_BUG_ON(sizeof(gdth_cpar_str) > GDTH_SCRATCH);
+
+ spin_lock_irqsave(&ha->smp_lock, flags);
+ if (ha->scratch_busy) {
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+ return -EBUSY;
+ }
+ ha->scratch_busy = TRUE;
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+
pcpar = (gdth_cpar_str *)ha->pscratch;
memcpy( pcpar, &ha->cpar, sizeof(gdth_cpar_str) );
gdtcmd.Service = CACHESERVICE;
gdtcmd.OpCode = GDT_IOCTL;
- gdtcmd.u.ioctl.p_param = paddr;
+ gdtcmd.u.ioctl.p_param = ha->scratch_phys;
gdtcmd.u.ioctl.param_size = sizeof(gdth_cpar_str);
gdtcmd.u.ioctl.subfunc = CACHE_CONFIG;
gdtcmd.u.ioctl.channel = INVALID_CHANNEL;
@@ -127,7 +136,10 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
gdth_execute(host, &gdtcmd, cmnd, 30, NULL);
- gdth_ioctl_free(ha, GDTH_SCRATCH, ha->pscratch, paddr);
+ spin_lock_irqsave(&ha->smp_lock, flags);
+ ha->scratch_busy = FALSE;
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+
printk("Done.\n");
return(orig_length);
}
@@ -143,7 +155,7 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
int id, i, j, k, sec, flag;
int no_mdrv = 0, drv_no, is_mirr;
u32 cnt;
- u64 paddr;
+ dma_addr_t paddr;
int rc = -ENOMEM;
gdth_cmd_str *gdtcmd;
@@ -217,20 +229,14 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
" Serial No.: \t0x%8X\tCache RAM size:\t%d KB\n",
ha->binfo.ser_no, ha->binfo.memsize / 1024);
-#ifdef GDTH_DMA_STATISTICS
- /* controller statistics */
- seq_puts(m, "\nController Statistics:\n");
- seq_printf(m,
- " 32-bit DMA buffer:\t%lu\t64-bit DMA buffer:\t%lu\n",
- ha->dma32_cnt, ha->dma64_cnt);
-#endif
-
if (ha->more_proc) {
+ size_t size = max_t(size_t, GDTH_SCRATCH, sizeof(gdth_hget_str));
+
/* more information: 2. about physical devices */
seq_puts(m, "\nPhysical Devices:");
flag = FALSE;
- buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr);
+ buf = dma_alloc_coherent(&ha->pdev->dev, size, &paddr, GFP_KERNEL);
if (!buf)
goto stop_output;
for (i = 0; i < ha->bus_cnt; ++i) {
@@ -323,7 +329,6 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
}
}
}
- gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
if (!flag)
seq_puts(m, "\n --\n");
@@ -332,9 +337,6 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
seq_puts(m, "\nLogical Drives:");
flag = FALSE;
- buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr);
- if (!buf)
- goto stop_output;
for (i = 0; i < MAX_LDRIVES; ++i) {
if (!ha->hdr[i].is_logdrv)
continue;
@@ -408,8 +410,7 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
seq_printf(m,
" To Array Drv.:\t%s\n", hrec);
}
- gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
-
+
if (!flag)
seq_puts(m, "\n --\n");
@@ -417,9 +418,6 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
seq_puts(m, "\nArray Drives:");
flag = FALSE;
- buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr);
- if (!buf)
- goto stop_output;
for (i = 0; i < MAX_LDRIVES; ++i) {
if (!(ha->hdr[i].is_arraydrv && ha->hdr[i].is_master))
continue;
@@ -468,8 +466,7 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
hrec);
}
}
- gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
-
+
if (!flag)
seq_puts(m, "\n --\n");
@@ -477,9 +474,6 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
seq_puts(m, "\nHost Drives:");
flag = FALSE;
- buf = gdth_ioctl_alloc(ha, sizeof(gdth_hget_str), FALSE, &paddr);
- if (!buf)
- goto stop_output;
for (i = 0; i < MAX_LDRIVES; ++i) {
if (!ha->hdr[i].is_logdrv ||
(ha->hdr[i].is_arraydrv && !ha->hdr[i].is_master))
@@ -510,7 +504,7 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
}
}
}
- gdth_ioctl_free(ha, sizeof(gdth_hget_str), buf, paddr);
+ dma_free_coherent(&ha->pdev->dev, size, buf, paddr);
for (i = 0; i < MAX_HDRIVES; ++i) {
if (!(ha->hdr[i].present))
@@ -563,65 +557,6 @@ free_fail:
return rc;
}
-static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch,
- u64 *paddr)
-{
- unsigned long flags;
- char *ret_val;
-
- if (size == 0)
- return NULL;
-
- spin_lock_irqsave(&ha->smp_lock, flags);
-
- if (!ha->scratch_busy && size <= GDTH_SCRATCH) {
- ha->scratch_busy = TRUE;
- ret_val = ha->pscratch;
- *paddr = ha->scratch_phys;
- } else if (scratch) {
- ret_val = NULL;
- } else {
- dma_addr_t dma_addr;
-
- ret_val = pci_alloc_consistent(ha->pdev, size, &dma_addr);
- *paddr = dma_addr;
- }
-
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- return ret_val;
-}
-
-static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, u64 paddr)
-{
- unsigned long flags;
-
- if (buf == ha->pscratch) {
- spin_lock_irqsave(&ha->smp_lock, flags);
- ha->scratch_busy = FALSE;
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- } else {
- pci_free_consistent(ha->pdev, size, buf, paddr);
- }
-}
-
-#ifdef GDTH_IOCTL_PROC
-static int gdth_ioctl_check_bin(gdth_ha_str *ha, u16 size)
-{
- unsigned long flags;
- int ret_val;
-
- spin_lock_irqsave(&ha->smp_lock, flags);
-
- ret_val = FALSE;
- if (ha->scratch_busy) {
- if (((gdth_iord_str *)ha->pscratch)->size == (u32)size)
- ret_val = TRUE;
- }
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- return ret_val;
-}
-#endif
-
static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id)
{
unsigned long flags;
diff --git a/drivers/scsi/gdth_proc.h b/drivers/scsi/gdth_proc.h
index d7d0aa283695..4cc5377cb92e 100644
--- a/drivers/scsi/gdth_proc.h
+++ b/drivers/scsi/gdth_proc.h
@@ -12,9 +12,6 @@ int gdth_execute(struct Scsi_Host *shost, gdth_cmd_str *gdtcmd, char *cmnd,
static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
int length, gdth_ha_str *ha);
-static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch,
- u64 *paddr);
-static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, u64 paddr);
static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id);
#endif
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index af291947a54d..9bfa9f12d81e 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -14,9 +14,11 @@
#include <linux/acpi.h>
#include <linux/clk.h>
+#include <linux/debugfs.h>
#include <linux/dmapool.h>
#include <linux/iopoll.h>
#include <linux/lcm.h>
+#include <linux/libata.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_address.h>
@@ -29,7 +31,7 @@
#define HISI_SAS_MAX_PHYS 9
#define HISI_SAS_MAX_QUEUES 32
-#define HISI_SAS_QUEUE_SLOTS 512
+#define HISI_SAS_QUEUE_SLOTS 4096
#define HISI_SAS_MAX_ITCT_ENTRIES 1024
#define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES
#define HISI_SAS_RESET_BIT 0
@@ -40,20 +42,25 @@
#define HISI_SAS_COMMAND_TABLE_SZ (sizeof(union hisi_sas_command_table))
#define hisi_sas_status_buf_addr(buf) \
- (buf + offsetof(struct hisi_sas_slot_buf_table, status_buffer))
-#define hisi_sas_status_buf_addr_mem(slot) hisi_sas_status_buf_addr(slot->buf)
+ ((buf) + offsetof(struct hisi_sas_slot_buf_table, status_buffer))
+#define hisi_sas_status_buf_addr_mem(slot) hisi_sas_status_buf_addr((slot)->buf)
#define hisi_sas_status_buf_addr_dma(slot) \
- hisi_sas_status_buf_addr(slot->buf_dma)
+ hisi_sas_status_buf_addr((slot)->buf_dma)
#define hisi_sas_cmd_hdr_addr(buf) \
- (buf + offsetof(struct hisi_sas_slot_buf_table, command_header))
-#define hisi_sas_cmd_hdr_addr_mem(slot) hisi_sas_cmd_hdr_addr(slot->buf)
-#define hisi_sas_cmd_hdr_addr_dma(slot) hisi_sas_cmd_hdr_addr(slot->buf_dma)
+ ((buf) + offsetof(struct hisi_sas_slot_buf_table, command_header))
+#define hisi_sas_cmd_hdr_addr_mem(slot) hisi_sas_cmd_hdr_addr((slot)->buf)
+#define hisi_sas_cmd_hdr_addr_dma(slot) hisi_sas_cmd_hdr_addr((slot)->buf_dma)
#define hisi_sas_sge_addr(buf) \
- (buf + offsetof(struct hisi_sas_slot_buf_table, sge_page))
-#define hisi_sas_sge_addr_mem(slot) hisi_sas_sge_addr(slot->buf)
-#define hisi_sas_sge_addr_dma(slot) hisi_sas_sge_addr(slot->buf_dma)
+ ((buf) + offsetof(struct hisi_sas_slot_buf_table, sge_page))
+#define hisi_sas_sge_addr_mem(slot) hisi_sas_sge_addr((slot)->buf)
+#define hisi_sas_sge_addr_dma(slot) hisi_sas_sge_addr((slot)->buf_dma)
+
+#define hisi_sas_sge_dif_addr(buf) \
+ ((buf) + offsetof(struct hisi_sas_slot_dif_buf_table, sge_dif_page))
+#define hisi_sas_sge_dif_addr_mem(slot) hisi_sas_sge_dif_addr((slot)->buf)
+#define hisi_sas_sge_dif_addr_dma(slot) hisi_sas_sge_dif_addr((slot)->buf_dma)
#define HISI_SAS_MAX_SSP_RESP_SZ (sizeof(struct ssp_frame_hdr) + 1024)
#define HISI_SAS_MAX_SMP_RESP_SZ 1028
@@ -73,7 +80,13 @@
SHOST_DIF_TYPE2_PROTECTION | \
SHOST_DIF_TYPE3_PROTECTION)
-#define HISI_SAS_PROT_MASK (HISI_SAS_DIF_PROT_MASK)
+#define HISI_SAS_DIX_PROT_MASK (SHOST_DIX_TYPE1_PROTECTION | \
+ SHOST_DIX_TYPE2_PROTECTION | \
+ SHOST_DIX_TYPE3_PROTECTION)
+
+#define HISI_SAS_PROT_MASK (HISI_SAS_DIF_PROT_MASK | HISI_SAS_DIX_PROT_MASK)
+
+#define HISI_SAS_WAIT_PHYUP_TIMEOUT 20
struct hisi_hba;
@@ -83,8 +96,8 @@ enum {
};
enum dev_status {
+ HISI_SAS_DEV_INIT,
HISI_SAS_DEV_NORMAL,
- HISI_SAS_DEV_EH,
};
enum {
@@ -145,6 +158,7 @@ struct hisi_sas_phy {
struct asd_sas_phy sas_phy;
struct sas_identify identify;
struct completion *reset_completion;
+ struct timer_list timer;
spinlock_t lock;
u64 port_id; /* from hw */
u64 frame_rcvd_size;
@@ -153,6 +167,7 @@ struct hisi_sas_phy {
u8 in_reset;
u8 reserved[2];
u32 phy_type;
+ u32 code_violation_err_count;
enum sas_linkrate minimum_linkrate;
enum sas_linkrate maximum_linkrate;
};
@@ -165,6 +180,7 @@ struct hisi_sas_port {
struct hisi_sas_cq {
struct hisi_hba *hisi_hba;
+ const struct cpumask *pci_irq_mask;
struct tasklet_struct tasklet;
int rd_point;
int id;
@@ -185,9 +201,10 @@ struct hisi_sas_device {
struct hisi_sas_dq *dq;
struct list_head list;
enum sas_device_type dev_type;
+ enum dev_status dev_status;
int device_id;
int sata_idx;
- u8 dev_status;
+ spinlock_t lock; /* For protecting slots */
};
struct hisi_sas_tmf_task {
@@ -203,12 +220,14 @@ struct hisi_sas_slot {
struct sas_task *task;
struct hisi_sas_port *port;
u64 n_elem;
+ u64 n_elem_dif;
int dlvry_queue;
int dlvry_queue_slot;
int cmplt_queue;
int cmplt_queue_slot;
int abort;
int ready;
+ int device_id;
void *cmd_hdr;
dma_addr_t cmd_hdr_dma;
struct timer_list internal_abort_timer;
@@ -220,6 +239,24 @@ struct hisi_sas_slot {
u16 idx;
};
+#define HISI_SAS_DEBUGFS_REG(x) {#x, x}
+
+struct hisi_sas_debugfs_reg_lu {
+ char *name;
+ int off;
+};
+
+struct hisi_sas_debugfs_reg {
+ const struct hisi_sas_debugfs_reg_lu *lu;
+ int count;
+ int base_off;
+ union {
+ u32 (*read_global_reg)(struct hisi_hba *hisi_hba, u32 off);
+ u32 (*read_port_reg)(struct hisi_hba *hisi_hba, int port,
+ u32 off);
+ };
+};
+
struct hisi_sas_hw {
int (*hw_init)(struct hisi_hba *hisi_hba);
void (*setup_itct)(struct hisi_hba *hisi_hba,
@@ -227,7 +264,7 @@ struct hisi_sas_hw {
int (*slot_index_alloc)(struct hisi_hba *hisi_hba,
struct domain_device *device);
struct hisi_sas_device *(*alloc_dev)(struct domain_device *device);
- void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no);
+ void (*sl_notify_ssp)(struct hisi_hba *hisi_hba, int phy_no);
int (*get_free_slot)(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq);
void (*start_delivery)(struct hisi_sas_dq *dq);
void (*prep_ssp)(struct hisi_hba *hisi_hba,
@@ -259,11 +296,16 @@ struct hisi_sas_hw {
u32 (*get_phys_state)(struct hisi_hba *hisi_hba);
int (*write_gpio)(struct hisi_hba *hisi_hba, u8 reg_type,
u8 reg_index, u8 reg_count, u8 *write_data);
- void (*wait_cmds_complete_timeout)(struct hisi_hba *hisi_hba,
- int delay_ms, int timeout_ms);
+ int (*wait_cmds_complete_timeout)(struct hisi_hba *hisi_hba,
+ int delay_ms, int timeout_ms);
+ void (*snapshot_prepare)(struct hisi_hba *hisi_hba);
+ void (*snapshot_restore)(struct hisi_hba *hisi_hba);
int max_command_entries;
int complete_hdr_size;
struct scsi_host_template *sht;
+
+ const struct hisi_sas_debugfs_reg *debugfs_reg_global;
+ const struct hisi_sas_debugfs_reg *debugfs_reg_port;
};
struct hisi_hba {
@@ -329,9 +371,25 @@ struct hisi_hba {
const struct hisi_sas_hw *hw; /* Low level hw interface */
unsigned long sata_dev_bitmap[BITS_TO_LONGS(HISI_SAS_MAX_DEVICES)];
struct work_struct rst_work;
+ struct work_struct debugfs_work;
u32 phy_state;
u32 intr_coal_ticks; /* Time of interrupt coalesce in us */
u32 intr_coal_count; /* Interrupt count to coalesce */
+
+ int cq_nvecs;
+ unsigned int *reply_map;
+
+ /* debugfs memories */
+ u32 *debugfs_global_reg;
+ u32 *debugfs_port_reg[HISI_SAS_MAX_PHYS];
+ void *debugfs_complete_hdr[HISI_SAS_MAX_QUEUES];
+ struct hisi_sas_cmd_hdr *debugfs_cmd_hdr[HISI_SAS_MAX_QUEUES];
+ struct hisi_sas_iost *debugfs_iost;
+ struct hisi_sas_itct *debugfs_itct;
+
+ struct dentry *debugfs_dir;
+ struct dentry *debugfs_dump_dentry;
+ bool debugfs_snapshot;
};
/* Generic HW DMA host memory structures */
@@ -430,6 +488,11 @@ struct hisi_sas_sge_page {
struct hisi_sas_sge sge[HISI_SAS_SGE_PAGE_CNT];
} __aligned(16);
+#define HISI_SAS_SGE_DIF_PAGE_CNT SG_CHUNK_SIZE
+struct hisi_sas_sge_dif_page {
+ struct hisi_sas_sge sge[HISI_SAS_SGE_DIF_PAGE_CNT];
+} __aligned(16);
+
struct hisi_sas_command_table_ssp {
struct ssp_frame_hdr hdr;
union {
@@ -460,9 +523,18 @@ struct hisi_sas_slot_buf_table {
struct hisi_sas_sge_page sge_page;
};
+struct hisi_sas_slot_dif_buf_table {
+ struct hisi_sas_slot_buf_table slot_buf;
+ struct hisi_sas_sge_dif_page sge_dif_page;
+};
+
extern struct scsi_transport_template *hisi_sas_stt;
+
+extern bool hisi_sas_debugfs_enable;
+extern struct dentry *hisi_sas_debugfs_dir;
+
extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba);
-extern int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost);
+extern int hisi_sas_alloc(struct hisi_hba *hisi_hba);
extern void hisi_sas_free(struct hisi_hba *hisi_hba);
extern u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis,
int direction);
@@ -487,10 +559,14 @@ extern void hisi_sas_init_mem(struct hisi_hba *hisi_hba);
extern void hisi_sas_rst_work_handler(struct work_struct *work);
extern void hisi_sas_sync_rst_work_handler(struct work_struct *work);
extern void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba);
+extern void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no);
extern bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
enum hisi_sas_phy_event event);
extern void hisi_sas_release_tasks(struct hisi_hba *hisi_hba);
extern u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max);
extern void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba);
extern void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba);
+extern void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba);
+extern void hisi_sas_debugfs_exit(struct hisi_hba *hisi_hba);
+extern void hisi_sas_debugfs_work_handler(struct work_struct *work);
#endif
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index bc17fa0d8375..14bac4966c87 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -10,6 +10,7 @@
*/
#include "hisi_sas.h"
+#include "../libsas/sas_internal.h"
#define DRV_NAME "hisi_sas"
#define DEV_IS_GONE(dev) \
@@ -144,7 +145,7 @@ EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
*/
u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max)
{
- u16 rate = 0;
+ u8 rate = 0;
int i;
max -= SAS_LINK_RATE_1_5_GBPS;
@@ -241,8 +242,9 @@ static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
struct hisi_sas_slot *slot)
{
- struct hisi_sas_dq *dq = &hisi_hba->dq[slot->dlvry_queue];
unsigned long flags;
+ int device_id = slot->device_id;
+ struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id];
if (task) {
struct device *dev = hisi_hba->dev;
@@ -252,17 +254,24 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
task->lldd_task = NULL;
- if (!sas_protocol_ata(task->task_proto))
+ if (!sas_protocol_ata(task->task_proto)) {
+ struct sas_ssp_task *ssp_task = &task->ssp_task;
+ struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
+
if (slot->n_elem)
dma_unmap_sg(dev, task->scatter,
task->num_scatter,
task->data_dir);
+ if (slot->n_elem_dif)
+ dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
+ scsi_prot_sg_count(scsi_cmnd),
+ task->data_dir);
+ }
}
-
- spin_lock_irqsave(&dq->lock, flags);
+ spin_lock_irqsave(&sas_dev->lock, flags);
list_del_init(&slot->entry);
- spin_unlock_irqrestore(&dq->lock, flags);
+ spin_unlock_irqrestore(&sas_dev->lock, flags);
memset(slot, 0, offsetof(struct hisi_sas_slot, buf));
@@ -380,6 +389,59 @@ prep_out:
return rc;
}
+static void hisi_sas_dif_dma_unmap(struct hisi_hba *hisi_hba,
+ struct sas_task *task, int n_elem_dif)
+{
+ struct device *dev = hisi_hba->dev;
+
+ if (n_elem_dif) {
+ struct sas_ssp_task *ssp_task = &task->ssp_task;
+ struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
+
+ dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
+ scsi_prot_sg_count(scsi_cmnd),
+ task->data_dir);
+ }
+}
+
+static int hisi_sas_dif_dma_map(struct hisi_hba *hisi_hba,
+ int *n_elem_dif, struct sas_task *task)
+{
+ struct device *dev = hisi_hba->dev;
+ struct sas_ssp_task *ssp_task;
+ struct scsi_cmnd *scsi_cmnd;
+ int rc;
+
+ if (task->num_scatter) {
+ ssp_task = &task->ssp_task;
+ scsi_cmnd = ssp_task->cmd;
+
+ if (scsi_prot_sg_count(scsi_cmnd)) {
+ *n_elem_dif = dma_map_sg(dev,
+ scsi_prot_sglist(scsi_cmnd),
+ scsi_prot_sg_count(scsi_cmnd),
+ task->data_dir);
+
+ if (!*n_elem_dif)
+ return -ENOMEM;
+
+ if (*n_elem_dif > HISI_SAS_SGE_DIF_PAGE_CNT) {
+ dev_err(dev, "task prep: n_elem_dif(%d) too large\n",
+ *n_elem_dif);
+ rc = -EINVAL;
+ goto err_out_dif_dma_unmap;
+ }
+ }
+ }
+
+ return 0;
+
+err_out_dif_dma_unmap:
+ dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
+ scsi_prot_sg_count(scsi_cmnd), task->data_dir);
+ return rc;
+}
+
static int hisi_sas_task_prep(struct sas_task *task,
struct hisi_sas_dq **dq_pointer,
bool is_tmf, struct hisi_sas_tmf_task *tmf,
@@ -394,7 +456,7 @@ static int hisi_sas_task_prep(struct sas_task *task,
struct asd_sas_port *sas_port = device->port;
struct device *dev = hisi_hba->dev;
int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
- int n_elem = 0, n_elem_req = 0, n_elem_resp = 0;
+ int n_elem = 0, n_elem_dif = 0, n_elem_req = 0, n_elem_resp = 0;
struct hisi_sas_dq *dq;
unsigned long flags;
int wr_q_index;
@@ -410,7 +472,14 @@ static int hisi_sas_task_prep(struct sas_task *task,
return -ECOMM;
}
- *dq_pointer = dq = sas_dev->dq;
+ if (hisi_hba->reply_map) {
+ int cpu = raw_smp_processor_id();
+ unsigned int dq_index = hisi_hba->reply_map[cpu];
+
+ *dq_pointer = dq = &hisi_hba->dq[dq_index];
+ } else {
+ *dq_pointer = dq = sas_dev->dq;
+ }
port = to_hisi_sas_port(sas_port);
if (port && !port->port_attached) {
@@ -427,6 +496,12 @@ static int hisi_sas_task_prep(struct sas_task *task,
if (rc < 0)
goto prep_out;
+ if (!sas_protocol_ata(task->task_proto)) {
+ rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task);
+ if (rc < 0)
+ goto err_out_dma_unmap;
+ }
+
if (hisi_hba->hw->slot_index_alloc)
rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
else {
@@ -445,7 +520,7 @@ static int hisi_sas_task_prep(struct sas_task *task,
rc = hisi_sas_slot_index_alloc(hisi_hba, scsi_cmnd);
}
if (rc < 0)
- goto err_out_dma_unmap;
+ goto err_out_dif_dma_unmap;
slot_idx = rc;
slot = &hisi_hba->slot_info[slot_idx];
@@ -459,13 +534,17 @@ static int hisi_sas_task_prep(struct sas_task *task,
}
list_add_tail(&slot->delivery, &dq->list);
- list_add_tail(&slot->entry, &sas_dev->list);
spin_unlock_irqrestore(&dq->lock, flags);
+ spin_lock_irqsave(&sas_dev->lock, flags);
+ list_add_tail(&slot->entry, &sas_dev->list);
+ spin_unlock_irqrestore(&sas_dev->lock, flags);
dlvry_queue = dq->id;
dlvry_queue_slot = wr_q_index;
+ slot->device_id = sas_dev->device_id;
slot->n_elem = n_elem;
+ slot->n_elem_dif = n_elem_dif;
slot->dlvry_queue = dlvry_queue;
slot->dlvry_queue_slot = dlvry_queue_slot;
cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
@@ -509,6 +588,9 @@ static int hisi_sas_task_prep(struct sas_task *task,
err_out_tag:
hisi_sas_slot_index_free(hisi_hba, slot_idx);
+err_out_dif_dma_unmap:
+ if (!sas_protocol_ata(task->task_proto))
+ hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif);
err_out_dma_unmap:
hisi_sas_dma_unmap(hisi_hba, task, n_elem,
n_elem_req, n_elem_resp);
@@ -626,11 +708,12 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
hisi_hba->devices[i].device_id = i;
sas_dev = &hisi_hba->devices[i];
- sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
+ sas_dev->dev_status = HISI_SAS_DEV_INIT;
sas_dev->dev_type = device->dev_type;
sas_dev->hisi_hba = hisi_hba;
sas_dev->sas_device = device;
sas_dev->dq = dq;
+ spin_lock_init(&sas_dev->lock);
INIT_LIST_HEAD(&hisi_hba->devices[i].list);
break;
}
@@ -650,6 +733,8 @@ static int hisi_sas_init_device(struct domain_device *device)
struct hisi_sas_tmf_task tmf_task;
int retry = HISI_SAS_SRST_ATA_DISK_CNT;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
+ struct device *dev = hisi_hba->dev;
+ struct sas_phy *local_phy;
switch (device->dev_type) {
case SAS_END_DEVICE:
@@ -665,6 +750,31 @@ static int hisi_sas_init_device(struct domain_device *device)
case SAS_SATA_PM:
case SAS_SATA_PM_PORT:
case SAS_SATA_PENDING:
+ /*
+ * send HARD RESET to clear previous affiliation of
+ * STP target port
+ */
+ local_phy = sas_get_local_phy(device);
+ if (!scsi_is_sas_phy_local(local_phy)) {
+ unsigned long deadline = ata_deadline(jiffies, 20000);
+ struct sata_device *sata_dev = &device->sata_dev;
+ struct ata_host *ata_host = sata_dev->ata_host;
+ struct ata_port_operations *ops = ata_host->ops;
+ struct ata_port *ap = sata_dev->ap;
+ struct ata_link *link;
+ unsigned int classes;
+
+ ata_for_each_link(link, ap, EDGE)
+ rc = ops->hardreset(link, &classes,
+ deadline);
+ }
+ sas_put_local_phy(local_phy);
+ if (rc) {
+ dev_warn(dev, "SATA disk hardreset fail: 0x%x\n",
+ rc);
+ return rc;
+ }
+
while (retry-- > 0) {
rc = hisi_sas_softreset_ata_disk(device);
if (!rc)
@@ -727,6 +837,7 @@ static int hisi_sas_dev_found(struct domain_device *device)
rc = hisi_sas_init_device(device);
if (rc)
goto err_out;
+ sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
return 0;
err_out:
@@ -778,7 +889,8 @@ static void hisi_sas_phyup_work(struct work_struct *work)
struct asd_sas_phy *sas_phy = &phy->sas_phy;
int phy_no = sas_phy->id;
- hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
+ if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP)
+ hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no);
hisi_sas_bytes_dmaed(hisi_hba, phy_no);
}
@@ -808,6 +920,30 @@ bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
}
EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
+static void hisi_sas_wait_phyup_timedout(struct timer_list *t)
+{
+ struct hisi_sas_phy *phy = from_timer(phy, t, timer);
+ struct hisi_hba *hisi_hba = phy->hisi_hba;
+ struct device *dev = hisi_hba->dev;
+ int phy_no = phy->sas_phy.id;
+
+ dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
+}
+
+void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
+{
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ struct device *dev = hisi_hba->dev;
+
+ if (!timer_pending(&phy->timer)) {
+ dev_dbg(dev, "phy%d OOB ready\n", phy_no);
+ phy->timer.expires = jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT * HZ;
+ add_timer(&phy->timer);
+ }
+}
+EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready);
+
static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
{
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
@@ -836,6 +972,8 @@ static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
spin_lock_init(&phy->lock);
+
+ timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0);
}
static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
@@ -872,7 +1010,8 @@ static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags &=
~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
- task->task_state_flags |= SAS_TASK_STATE_DONE;
+ if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP)
+ task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
}
@@ -926,7 +1065,7 @@ static void hisi_sas_dev_gone(struct domain_device *device)
if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0);
+ HISI_SAS_INT_ABT_DEV, 0);
hisi_sas_dereg_device(hisi_hba, device);
@@ -946,7 +1085,7 @@ static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
}
-static void hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
+static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
struct sas_phy_linkrates *r)
{
struct sas_phy_linkrates _r;
@@ -955,6 +1094,9 @@ static void hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
struct asd_sas_phy *sas_phy = &phy->sas_phy;
enum sas_linkrate min, max;
+ if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS)
+ return -EINVAL;
+
if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
max = sas_phy->phy->maximum_linkrate;
min = r->minimum_linkrate;
@@ -962,7 +1104,7 @@ static void hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
max = r->maximum_linkrate;
min = sas_phy->phy->minimum_linkrate;
} else
- return;
+ return -EINVAL;
_r.maximum_linkrate = max;
_r.minimum_linkrate = min;
@@ -974,6 +1116,8 @@ static void hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
msleep(100);
hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
hisi_hba->hw->phy_start(hisi_hba, phy_no);
+
+ return 0;
}
static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
@@ -999,8 +1143,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
break;
case PHY_FUNC_SET_LINK_RATE:
- hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata);
- break;
+ return hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata);
case PHY_FUNC_GET_EVENTS:
if (hisi_hba->hw->get_events) {
hisi_hba->hw->get_events(hisi_hba, phy_no);
@@ -1068,7 +1211,7 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
task->task_done = hisi_sas_task_done;
task->slow_task->timer.function = hisi_sas_tmf_timedout;
- task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
+ task->slow_task->timer.expires = jiffies + TASK_TIMEOUT * HZ;
add_timer(&task->slow_task->timer);
res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
@@ -1429,6 +1572,9 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
struct Scsi_Host *shost = hisi_hba->shost;
int rc;
+ if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct)
+ queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
+
if (!hisi_hba->hw->soft_reset)
return -1;
@@ -1491,7 +1637,6 @@ static int hisi_sas_abort_task(struct sas_task *task)
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
spin_unlock_irqrestore(&task->task_state_lock, flags);
- sas_dev->dev_status = HISI_SAS_DEV_EH;
if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
struct scsi_cmnd *cmnd = task->uldd_task;
struct hisi_sas_slot *slot = task->lldd_task;
@@ -1527,7 +1672,8 @@ static int hisi_sas_abort_task(struct sas_task *task)
task->task_proto & SAS_PROTOCOL_STP) {
if (task->dev->dev_type == SAS_SATA_DEV) {
rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0);
+ HISI_SAS_INT_ABT_DEV,
+ 0);
if (rc < 0) {
dev_err(dev, "abort task: internal abort failed\n");
goto out;
@@ -1542,7 +1688,7 @@ static int hisi_sas_abort_task(struct sas_task *task)
struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue];
rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_CMD, tag);
+ HISI_SAS_INT_ABT_CMD, tag);
if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
task->lldd_task) {
/*
@@ -1568,7 +1714,7 @@ static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
int rc = TMF_RESP_FUNC_FAILED;
rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0);
+ HISI_SAS_INT_ABT_DEV, 0);
if (rc < 0) {
dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
return TMF_RESP_FUNC_FAILED;
@@ -1586,8 +1732,8 @@ static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
{
- int rc = TMF_RESP_FUNC_FAILED;
struct hisi_sas_tmf_task tmf_task;
+ int rc;
tmf_task.tmf = TMF_CLEAR_ACA;
rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
@@ -1598,20 +1744,23 @@ static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
{
struct sas_phy *local_phy = sas_get_local_phy(device);
- int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
- (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct sas_ha_struct *sas_ha = &hisi_hba->sha;
struct asd_sas_phy *sas_phy = sas_ha->sas_phy[local_phy->number];
struct hisi_sas_phy *phy = container_of(sas_phy,
struct hisi_sas_phy, sas_phy);
DECLARE_COMPLETION_ONSTACK(phyreset);
+ int rc, reset_type;
if (scsi_is_sas_phy_local(local_phy)) {
phy->in_reset = 1;
phy->reset_completion = &phyreset;
}
+ reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT ||
+ !dev_is_sata(device)) ? 1 : 0;
+
rc = sas_phy_reset(local_phy, reset_type);
sas_put_local_phy(local_phy);
@@ -1627,31 +1776,37 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
/* report PHY down if timed out */
if (!ret)
hisi_sas_phy_down(hisi_hba, sas_phy->id, 0);
- } else
+ } else if (sas_dev->dev_status != HISI_SAS_DEV_INIT) {
+ /*
+ * If in init state, we rely on caller to wait for link to be
+ * ready; otherwise, delay.
+ */
msleep(2000);
+ }
return rc;
}
static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
{
- struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct device *dev = hisi_hba->dev;
- int rc = TMF_RESP_FUNC_FAILED;
-
- if (sas_dev->dev_status != HISI_SAS_DEV_EH)
- return TMF_RESP_FUNC_FAILED;
- sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
+ int rc;
rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0);
+ HISI_SAS_INT_ABT_DEV, 0);
if (rc < 0) {
dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
return TMF_RESP_FUNC_FAILED;
}
hisi_sas_dereg_device(hisi_hba, device);
+ if (dev_is_sata(device)) {
+ rc = hisi_sas_softreset_ata_disk(device);
+ if (rc)
+ return TMF_RESP_FUNC_FAILED;
+ }
+
rc = hisi_sas_debug_I_T_nexus_reset(device);
if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
@@ -1667,7 +1822,6 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
struct device *dev = hisi_hba->dev;
int rc = TMF_RESP_FUNC_FAILED;
- sas_dev->dev_status = HISI_SAS_DEV_EH;
if (dev_is_sata(device)) {
struct sas_phy *phy;
@@ -1691,7 +1845,7 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0);
+ HISI_SAS_INT_ABT_DEV, 0);
if (rc < 0) {
dev_err(dev, "lu_reset: internal abort failed\n");
goto out;
@@ -1777,7 +1931,7 @@ static int hisi_sas_query_task(struct sas_task *task)
static int
hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
struct sas_task *task, int abort_flag,
- int task_tag)
+ int task_tag, struct hisi_sas_dq *dq)
{
struct domain_device *device = task->dev;
struct hisi_sas_device *sas_dev = device->lldd_dev;
@@ -1786,7 +1940,6 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
struct hisi_sas_slot *slot;
struct asd_sas_port *sas_port = device->port;
struct hisi_sas_cmd_hdr *cmd_hdr_base;
- struct hisi_sas_dq *dq = sas_dev->dq;
int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
unsigned long flags, flags_dq = 0;
int wr_q_index;
@@ -1816,10 +1969,14 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
}
list_add_tail(&slot->delivery, &dq->list);
spin_unlock_irqrestore(&dq->lock, flags_dq);
+ spin_lock_irqsave(&sas_dev->lock, flags);
+ list_add_tail(&slot->entry, &sas_dev->list);
+ spin_unlock_irqrestore(&sas_dev->lock, flags);
dlvry_queue = dq->id;
dlvry_queue_slot = wr_q_index;
+ slot->device_id = sas_dev->device_id;
slot->n_elem = n_elem;
slot->dlvry_queue = dlvry_queue;
slot->dlvry_queue_slot = dlvry_queue_slot;
@@ -1843,7 +2000,6 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
WRITE_ONCE(slot->ready, 1);
/* send abort command to the chip */
spin_lock_irqsave(&dq->lock, flags);
- list_add_tail(&slot->entry, &sas_dev->list);
hisi_hba->hw->start_delivery(dq);
spin_unlock_irqrestore(&dq->lock, flags);
@@ -1858,18 +2014,19 @@ err_out:
}
/**
- * hisi_sas_internal_task_abort -- execute an internal
+ * _hisi_sas_internal_task_abort -- execute an internal
* abort command for single IO command or a device
* @hisi_hba: host controller struct
* @device: domain device
* @abort_flag: mode of operation, device or single IO
* @tag: tag of IO to be aborted (only relevant to single
* IO mode)
+ * @dq: delivery queue for this internal abort command
*/
static int
-hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
- struct domain_device *device,
- int abort_flag, int tag)
+_hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
+ struct domain_device *device, int abort_flag,
+ int tag, struct hisi_sas_dq *dq)
{
struct sas_task *task;
struct hisi_sas_device *sas_dev = device->lldd_dev;
@@ -1893,11 +2050,11 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
task->task_proto = device->tproto;
task->task_done = hisi_sas_task_done;
task->slow_task->timer.function = hisi_sas_tmf_timedout;
- task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT*HZ;
+ task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT * HZ;
add_timer(&task->slow_task->timer);
res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
- task, abort_flag, tag);
+ task, abort_flag, tag, dq);
if (res) {
del_timer(&task->slow_task->timer);
dev_err(dev, "internal task abort: executing internal task failed: %d\n",
@@ -1923,6 +2080,7 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
slot->task = NULL;
}
dev_err(dev, "internal task abort: timeout and not done.\n");
+
res = -EIO;
goto exit;
} else
@@ -1953,6 +2111,46 @@ exit:
return res;
}
+static int
+hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
+ struct domain_device *device,
+ int abort_flag, int tag)
+{
+ struct hisi_sas_slot *slot;
+ struct device *dev = hisi_hba->dev;
+ struct hisi_sas_dq *dq;
+ int i, rc;
+
+ switch (abort_flag) {
+ case HISI_SAS_INT_ABT_CMD:
+ slot = &hisi_hba->slot_info[tag];
+ dq = &hisi_hba->dq[slot->dlvry_queue];
+ return _hisi_sas_internal_task_abort(hisi_hba, device,
+ abort_flag, tag, dq);
+ case HISI_SAS_INT_ABT_DEV:
+ for (i = 0; i < hisi_hba->cq_nvecs; i++) {
+ struct hisi_sas_cq *cq = &hisi_hba->cq[i];
+ const struct cpumask *mask = cq->pci_irq_mask;
+
+ if (mask && !cpumask_intersects(cpu_online_mask, mask))
+ continue;
+ dq = &hisi_hba->dq[i];
+ rc = _hisi_sas_internal_task_abort(hisi_hba, device,
+ abort_flag, tag,
+ dq);
+ if (rc)
+ return rc;
+ }
+ break;
+ default:
+ dev_err(dev, "Unrecognised internal abort flag (%d)\n",
+ abort_flag);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
{
hisi_sas_port_notify_formed(sas_phy);
@@ -1972,9 +2170,18 @@ static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
{
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ struct sas_phy *sphy = sas_phy->phy;
+ struct sas_phy_data *d = sphy->hostdata;
+
phy->phy_attached = 0;
phy->phy_type = 0;
phy->port = NULL;
+
+ if (d->enable)
+ sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
+ else
+ sphy->negotiated_linkrate = SAS_PHY_DISABLED;
}
void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
@@ -2019,7 +2226,7 @@ void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
{
int i;
- for (i = 0; i < hisi_hba->queue_count; i++) {
+ for (i = 0; i < hisi_hba->cq_nvecs; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
tasklet_kill(&cq->tasklet);
@@ -2048,14 +2255,18 @@ static struct sas_domain_function_template hisi_sas_transport_ops = {
void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
{
- int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
+ int i, s, j, max_command_entries = hisi_hba->hw->max_command_entries;
+ struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint;
for (i = 0; i < hisi_hba->queue_count; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
struct hisi_sas_dq *dq = &hisi_hba->dq[i];
+ struct hisi_sas_cmd_hdr *cmd_hdr = hisi_hba->cmd_hdr[i];
+
+ s = sizeof(struct hisi_sas_cmd_hdr);
+ for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++)
+ memset(&cmd_hdr[j], 0, s);
- s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
- memset(hisi_hba->cmd_hdr[i], 0, s);
dq->wr_point = 0;
s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
@@ -2072,12 +2283,13 @@ void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
memset(hisi_hba->breakpoint, 0, s);
- s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
- memset(hisi_hba->sata_breakpoint, 0, s);
+ s = sizeof(struct hisi_sas_sata_breakpoint);
+ for (j = 0; j < HISI_SAS_MAX_ITCT_ENTRIES; j++)
+ memset(&sata_breakpoint[j], 0, s);
}
EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
-int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
+int hisi_sas_alloc(struct hisi_hba *hisi_hba)
{
struct device *dev = hisi_hba->dev;
int i, j, s, max_command_entries = hisi_hba->hw->max_command_entries;
@@ -2095,7 +2307,7 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
hisi_hba->devices[i].device_id = i;
- hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
+ hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT;
}
for (i = 0; i < hisi_hba->queue_count; i++) {
@@ -2131,10 +2343,9 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (!hisi_hba->itct)
goto err_out;
- memset(hisi_hba->itct, 0, s);
hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
sizeof(struct hisi_sas_slot),
@@ -2144,19 +2355,24 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
/* roundup to avoid overly large block size */
max_command_entries_ru = roundup(max_command_entries, 64);
- sz_slot_buf_ru = roundup(sizeof(struct hisi_sas_slot_buf_table), 64);
+ if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK)
+ sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table);
+ else
+ sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table);
+ sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64);
s = lcm(max_command_entries_ru, sz_slot_buf_ru);
blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s;
slots_per_blk = s / sz_slot_buf_ru;
+
for (i = 0; i < blk_cnt; i++) {
- struct hisi_sas_slot_buf_table *buf;
- dma_addr_t buf_dma;
int slot_index = i * slots_per_blk;
+ dma_addr_t buf_dma;
+ void *buf;
- buf = dmam_alloc_coherent(dev, s, &buf_dma, GFP_KERNEL);
+ buf = dmam_alloc_coherent(dev, s, &buf_dma,
+ GFP_KERNEL | __GFP_ZERO);
if (!buf)
goto err_out;
- memset(buf, 0, s);
for (j = 0; j < slots_per_blk; j++, slot_index++) {
struct hisi_sas_slot *slot;
@@ -2166,8 +2382,8 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
slot->buf_dma = buf_dma;
slot->idx = slot_index;
- buf++;
- buf_dma += sizeof(*buf);
+ buf += sz_slot_buf_ru;
+ buf_dma += sz_slot_buf_ru;
}
}
@@ -2365,7 +2581,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
goto err_out;
}
- if (hisi_sas_alloc(hisi_hba, shost)) {
+ if (hisi_sas_alloc(hisi_hba)) {
hisi_sas_free(hisi_hba);
goto err_out;
}
@@ -2461,6 +2677,555 @@ err_out_ha:
}
EXPORT_SYMBOL_GPL(hisi_sas_probe);
+struct dentry *hisi_sas_debugfs_dir;
+
+static void hisi_sas_debugfs_snapshot_cq_reg(struct hisi_hba *hisi_hba)
+{
+ int queue_entry_size = hisi_hba->hw->complete_hdr_size;
+ int i;
+
+ for (i = 0; i < hisi_hba->queue_count; i++)
+ memcpy(hisi_hba->debugfs_complete_hdr[i],
+ hisi_hba->complete_hdr[i],
+ HISI_SAS_QUEUE_SLOTS * queue_entry_size);
+}
+
+static void hisi_sas_debugfs_snapshot_dq_reg(struct hisi_hba *hisi_hba)
+{
+ int queue_entry_size = sizeof(struct hisi_sas_cmd_hdr);
+ int i;
+
+ for (i = 0; i < hisi_hba->queue_count; i++) {
+ struct hisi_sas_cmd_hdr *debugfs_cmd_hdr, *cmd_hdr;
+ int j;
+
+ debugfs_cmd_hdr = hisi_hba->debugfs_cmd_hdr[i];
+ cmd_hdr = hisi_hba->cmd_hdr[i];
+
+ for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++)
+ memcpy(&debugfs_cmd_hdr[j], &cmd_hdr[j],
+ queue_entry_size);
+ }
+}
+
+static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba)
+{
+ const struct hisi_sas_debugfs_reg *port =
+ hisi_hba->hw->debugfs_reg_port;
+ int i, phy_cnt;
+ u32 offset;
+ u32 *databuf;
+
+ for (phy_cnt = 0; phy_cnt < hisi_hba->n_phy; phy_cnt++) {
+ databuf = (u32 *)hisi_hba->debugfs_port_reg[phy_cnt];
+ for (i = 0; i < port->count; i++, databuf++) {
+ offset = port->base_off + 4 * i;
+ *databuf = port->read_port_reg(hisi_hba, phy_cnt,
+ offset);
+ }
+ }
+}
+
+static void hisi_sas_debugfs_snapshot_global_reg(struct hisi_hba *hisi_hba)
+{
+ u32 *databuf = (u32 *)hisi_hba->debugfs_global_reg;
+ const struct hisi_sas_debugfs_reg *global =
+ hisi_hba->hw->debugfs_reg_global;
+ int i;
+
+ for (i = 0; i < global->count; i++, databuf++)
+ *databuf = global->read_global_reg(hisi_hba, 4 * i);
+}
+
+static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba *hisi_hba)
+{
+ void *databuf = hisi_hba->debugfs_itct;
+ struct hisi_sas_itct *itct;
+ int i;
+
+ itct = hisi_hba->itct;
+
+ for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) {
+ memcpy(databuf, itct, sizeof(struct hisi_sas_itct));
+ databuf += sizeof(struct hisi_sas_itct);
+ }
+}
+
+static void hisi_sas_debugfs_snapshot_iost_reg(struct hisi_hba *hisi_hba)
+{
+ int max_command_entries = hisi_hba->hw->max_command_entries;
+ void *databuf = hisi_hba->debugfs_iost;
+ struct hisi_sas_iost *iost;
+ int i;
+
+ iost = hisi_hba->iost;
+
+ for (i = 0; i < max_command_entries; i++, iost++) {
+ memcpy(databuf, iost, sizeof(struct hisi_sas_iost));
+ databuf += sizeof(struct hisi_sas_iost);
+ }
+}
+
+static const char *
+hisi_sas_debugfs_to_reg_name(int off, int base_off,
+ const struct hisi_sas_debugfs_reg_lu *lu)
+{
+ for (; lu->name; lu++) {
+ if (off == lu->off - base_off)
+ return lu->name;
+ }
+
+ return NULL;
+}
+
+static void hisi_sas_debugfs_print_reg(u32 *regs_val, const void *ptr,
+ struct seq_file *s)
+{
+ const struct hisi_sas_debugfs_reg *reg = ptr;
+ int i;
+
+ for (i = 0; i < reg->count; i++) {
+ int off = i * 4;
+ const char *name;
+
+ name = hisi_sas_debugfs_to_reg_name(off, reg->base_off,
+ reg->lu);
+
+ if (name)
+ seq_printf(s, "0x%08x 0x%08x %s\n", off,
+ regs_val[i], name);
+ else
+ seq_printf(s, "0x%08x 0x%08x\n", off,
+ regs_val[i]);
+ }
+}
+
+static int hisi_sas_debugfs_global_show(struct seq_file *s, void *p)
+{
+ struct hisi_hba *hisi_hba = s->private;
+ const struct hisi_sas_hw *hw = hisi_hba->hw;
+ const struct hisi_sas_debugfs_reg *reg_global = hw->debugfs_reg_global;
+
+ hisi_sas_debugfs_print_reg(hisi_hba->debugfs_global_reg,
+ reg_global, s);
+
+ return 0;
+}
+
+static int hisi_sas_debugfs_global_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, hisi_sas_debugfs_global_show,
+ inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_global_fops = {
+ .open = hisi_sas_debugfs_global_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int hisi_sas_debugfs_port_show(struct seq_file *s, void *p)
+{
+ struct hisi_sas_phy *phy = s->private;
+ struct hisi_hba *hisi_hba = phy->hisi_hba;
+ const struct hisi_sas_hw *hw = hisi_hba->hw;
+ const struct hisi_sas_debugfs_reg *reg_port = hw->debugfs_reg_port;
+ u32 *databuf = hisi_hba->debugfs_port_reg[phy->sas_phy.id];
+
+ hisi_sas_debugfs_print_reg(databuf, reg_port, s);
+
+ return 0;
+}
+
+static int hisi_sas_debugfs_port_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, hisi_sas_debugfs_port_show, inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_port_fops = {
+ .open = hisi_sas_debugfs_port_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int hisi_sas_show_row_64(struct seq_file *s, int index,
+ int sz, __le64 *ptr)
+{
+ int i;
+
+ /* completion header size not fixed per HW version */
+ seq_printf(s, "index %04d:\n\t", index);
+ for (i = 1; i <= sz / 8; i++, ptr++) {
+ seq_printf(s, " 0x%016llx", le64_to_cpu(*ptr));
+ if (!(i % 2))
+ seq_puts(s, "\n\t");
+ }
+
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static int hisi_sas_show_row_32(struct seq_file *s, int index,
+ int sz, __le32 *ptr)
+{
+ int i;
+
+ /* completion header size not fixed per HW version */
+ seq_printf(s, "index %04d:\n\t", index);
+ for (i = 1; i <= sz / 4; i++, ptr++) {
+ seq_printf(s, " 0x%08x", le32_to_cpu(*ptr));
+ if (!(i % 4))
+ seq_puts(s, "\n\t");
+ }
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static int hisi_sas_cq_show_slot(struct seq_file *s, int slot, void *cq_ptr)
+{
+ struct hisi_sas_cq *cq = cq_ptr;
+ struct hisi_hba *hisi_hba = cq->hisi_hba;
+ void *complete_queue = hisi_hba->debugfs_complete_hdr[cq->id];
+ __le32 *complete_hdr = complete_queue +
+ (hisi_hba->hw->complete_hdr_size * slot);
+
+ return hisi_sas_show_row_32(s, slot,
+ hisi_hba->hw->complete_hdr_size,
+ complete_hdr);
+}
+
+static int hisi_sas_debugfs_cq_show(struct seq_file *s, void *p)
+{
+ struct hisi_sas_cq *cq = s->private;
+ int slot, ret;
+
+ for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) {
+ ret = hisi_sas_cq_show_slot(s, slot, cq);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int hisi_sas_debugfs_cq_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, hisi_sas_debugfs_cq_show, inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_cq_fops = {
+ .open = hisi_sas_debugfs_cq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int hisi_sas_dq_show_slot(struct seq_file *s, int slot, void *dq_ptr)
+{
+ struct hisi_sas_dq *dq = dq_ptr;
+ struct hisi_hba *hisi_hba = dq->hisi_hba;
+ void *cmd_queue = hisi_hba->debugfs_cmd_hdr[dq->id];
+ __le32 *cmd_hdr = cmd_queue +
+ sizeof(struct hisi_sas_cmd_hdr) * slot;
+
+ return hisi_sas_show_row_32(s, slot, sizeof(struct hisi_sas_cmd_hdr),
+ cmd_hdr);
+}
+
+static int hisi_sas_debugfs_dq_show(struct seq_file *s, void *p)
+{
+ int slot, ret;
+
+ for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) {
+ ret = hisi_sas_dq_show_slot(s, slot, s->private);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int hisi_sas_debugfs_dq_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, hisi_sas_debugfs_dq_show, inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_dq_fops = {
+ .open = hisi_sas_debugfs_dq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int hisi_sas_debugfs_iost_show(struct seq_file *s, void *p)
+{
+ struct hisi_hba *hisi_hba = s->private;
+ struct hisi_sas_iost *debugfs_iost = hisi_hba->debugfs_iost;
+ int i, ret, max_command_entries = hisi_hba->hw->max_command_entries;
+ __le64 *iost = &debugfs_iost->qw0;
+
+ for (i = 0; i < max_command_entries; i++, debugfs_iost++) {
+ ret = hisi_sas_show_row_64(s, i, sizeof(*debugfs_iost),
+ iost);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hisi_sas_debugfs_iost_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, hisi_sas_debugfs_iost_show, inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_iost_fops = {
+ .open = hisi_sas_debugfs_iost_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int hisi_sas_debugfs_itct_show(struct seq_file *s, void *p)
+{
+ int i, ret;
+ struct hisi_hba *hisi_hba = s->private;
+ struct hisi_sas_itct *debugfs_itct = hisi_hba->debugfs_itct;
+ __le64 *itct = &debugfs_itct->qw0;
+
+ for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, debugfs_itct++) {
+ ret = hisi_sas_show_row_64(s, i, sizeof(*debugfs_itct),
+ itct);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hisi_sas_debugfs_itct_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, hisi_sas_debugfs_itct_show, inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_itct_fops = {
+ .open = hisi_sas_debugfs_itct_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba)
+{
+ struct dentry *dump_dentry;
+ struct dentry *dentry;
+ char name[256];
+ int p;
+ int c;
+ int d;
+
+ /* Create dump dir inside device dir */
+ dump_dentry = debugfs_create_dir("dump", hisi_hba->debugfs_dir);
+ hisi_hba->debugfs_dump_dentry = dump_dentry;
+
+ debugfs_create_file("global", 0400, dump_dentry, hisi_hba,
+ &hisi_sas_debugfs_global_fops);
+
+ /* Create port dir and files */
+ dentry = debugfs_create_dir("port", dump_dentry);
+ for (p = 0; p < hisi_hba->n_phy; p++) {
+ snprintf(name, 256, "%d", p);
+
+ debugfs_create_file(name, 0400, dentry, &hisi_hba->phy[p],
+ &hisi_sas_debugfs_port_fops);
+ }
+
+ /* Create CQ dir and files */
+ dentry = debugfs_create_dir("cq", dump_dentry);
+ for (c = 0; c < hisi_hba->queue_count; c++) {
+ snprintf(name, 256, "%d", c);
+
+ debugfs_create_file(name, 0400, dentry, &hisi_hba->cq[c],
+ &hisi_sas_debugfs_cq_fops);
+ }
+
+ /* Create DQ dir and files */
+ dentry = debugfs_create_dir("dq", dump_dentry);
+ for (d = 0; d < hisi_hba->queue_count; d++) {
+ snprintf(name, 256, "%d", d);
+
+ debugfs_create_file(name, 0400, dentry, &hisi_hba->dq[d],
+ &hisi_sas_debugfs_dq_fops);
+ }
+
+ debugfs_create_file("iost", 0400, dump_dentry, hisi_hba,
+ &hisi_sas_debugfs_iost_fops);
+
+ debugfs_create_file("itct", 0400, dump_dentry, hisi_hba,
+ &hisi_sas_debugfs_itct_fops);
+
+ return;
+}
+
+static void hisi_sas_debugfs_snapshot_regs(struct hisi_hba *hisi_hba)
+{
+ hisi_hba->hw->snapshot_prepare(hisi_hba);
+
+ hisi_sas_debugfs_snapshot_global_reg(hisi_hba);
+ hisi_sas_debugfs_snapshot_port_reg(hisi_hba);
+ hisi_sas_debugfs_snapshot_cq_reg(hisi_hba);
+ hisi_sas_debugfs_snapshot_dq_reg(hisi_hba);
+ hisi_sas_debugfs_snapshot_itct_reg(hisi_hba);
+ hisi_sas_debugfs_snapshot_iost_reg(hisi_hba);
+
+ hisi_sas_debugfs_create_files(hisi_hba);
+
+ hisi_hba->hw->snapshot_restore(hisi_hba);
+}
+
+static ssize_t hisi_sas_debugfs_trigger_dump_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hisi_hba *hisi_hba = file->f_inode->i_private;
+ char buf[8];
+
+ /* A bit racy, but don't care too much since it's only debugfs */
+ if (hisi_hba->debugfs_snapshot)
+ return -EFAULT;
+
+ if (count > 8)
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ if (buf[0] != '1')
+ return -EFAULT;
+
+ queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
+
+ return count;
+}
+
+static const struct file_operations hisi_sas_debugfs_trigger_dump_fops = {
+ .write = &hisi_sas_debugfs_trigger_dump_write,
+ .owner = THIS_MODULE,
+};
+
+void hisi_sas_debugfs_work_handler(struct work_struct *work)
+{
+ struct hisi_hba *hisi_hba =
+ container_of(work, struct hisi_hba, debugfs_work);
+
+ if (hisi_hba->debugfs_snapshot)
+ return;
+ hisi_hba->debugfs_snapshot = true;
+
+ hisi_sas_debugfs_snapshot_regs(hisi_hba);
+}
+EXPORT_SYMBOL_GPL(hisi_sas_debugfs_work_handler);
+
+void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba)
+{
+ int max_command_entries = hisi_hba->hw->max_command_entries;
+ struct device *dev = hisi_hba->dev;
+ int p, i, c, d;
+ size_t sz;
+
+ hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev),
+ hisi_sas_debugfs_dir);
+ debugfs_create_file("trigger_dump", 0600,
+ hisi_hba->debugfs_dir,
+ hisi_hba,
+ &hisi_sas_debugfs_trigger_dump_fops);
+
+ /* Alloc buffer for global */
+ sz = hisi_hba->hw->debugfs_reg_global->count * 4;
+ hisi_hba->debugfs_global_reg =
+ devm_kmalloc(dev, sz, GFP_KERNEL);
+
+ if (!hisi_hba->debugfs_global_reg)
+ goto fail_global;
+
+ /* Alloc buffer for port */
+ sz = hisi_hba->hw->debugfs_reg_port->count * 4;
+ for (p = 0; p < hisi_hba->n_phy; p++) {
+ hisi_hba->debugfs_port_reg[p] =
+ devm_kmalloc(dev, sz, GFP_KERNEL);
+
+ if (!hisi_hba->debugfs_port_reg[p])
+ goto fail_port;
+ }
+
+ /* Alloc buffer for cq */
+ sz = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
+ for (c = 0; c < hisi_hba->queue_count; c++) {
+ hisi_hba->debugfs_complete_hdr[c] =
+ devm_kmalloc(dev, sz, GFP_KERNEL);
+
+ if (!hisi_hba->debugfs_complete_hdr[c])
+ goto fail_cq;
+ }
+
+ /* Alloc buffer for dq */
+ sz = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
+ for (d = 0; d < hisi_hba->queue_count; d++) {
+ hisi_hba->debugfs_cmd_hdr[d] =
+ devm_kmalloc(dev, sz, GFP_KERNEL);
+
+ if (!hisi_hba->debugfs_cmd_hdr[d])
+ goto fail_iost_dq;
+ }
+
+ /* Alloc buffer for iost */
+ sz = max_command_entries * sizeof(struct hisi_sas_iost);
+
+ hisi_hba->debugfs_iost = devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!hisi_hba->debugfs_iost)
+ goto fail_iost_dq;
+
+ /* Alloc buffer for itct */
+ /* New memory allocation must be locate before itct */
+ sz = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
+
+ hisi_hba->debugfs_itct = devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!hisi_hba->debugfs_itct)
+ goto fail_itct;
+
+ return;
+fail_itct:
+ devm_kfree(dev, hisi_hba->debugfs_iost);
+fail_iost_dq:
+ for (i = 0; i < d; i++)
+ devm_kfree(dev, hisi_hba->debugfs_cmd_hdr[i]);
+fail_cq:
+ for (i = 0; i < c; i++)
+ devm_kfree(dev, hisi_hba->debugfs_complete_hdr[i]);
+fail_port:
+ for (i = 0; i < p; i++)
+ devm_kfree(dev, hisi_hba->debugfs_port_reg[i]);
+ devm_kfree(dev, hisi_hba->debugfs_global_reg);
+fail_global:
+ debugfs_remove_recursive(hisi_hba->debugfs_dir);
+ dev_dbg(dev, "failed to init debugfs!\n");
+}
+EXPORT_SYMBOL_GPL(hisi_sas_debugfs_init);
+
+void hisi_sas_debugfs_exit(struct hisi_hba *hisi_hba)
+{
+ debugfs_remove_recursive(hisi_hba->debugfs_dir);
+}
+EXPORT_SYMBOL_GPL(hisi_sas_debugfs_exit);
+
int hisi_sas_remove(struct platform_device *pdev)
{
struct sas_ha_struct *sha = platform_get_drvdata(pdev);
@@ -2479,18 +3244,28 @@ int hisi_sas_remove(struct platform_device *pdev)
}
EXPORT_SYMBOL_GPL(hisi_sas_remove);
+bool hisi_sas_debugfs_enable;
+EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable);
+module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444);
+MODULE_PARM_DESC(hisi_sas_debugfs_enable, "Enable driver debugfs (default disabled)");
+
static __init int hisi_sas_init(void)
{
hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
if (!hisi_sas_stt)
return -ENOMEM;
+ if (hisi_sas_debugfs_enable)
+ hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL);
+
return 0;
}
static __exit void hisi_sas_exit(void)
{
sas_release_transport(hisi_sas_stt);
+
+ debugfs_remove(hisi_sas_debugfs_dir);
}
module_init(hisi_sas_init);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 28ab52a021cf..293807443480 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -835,7 +835,7 @@ static void phys_init_v1_hw(struct hisi_hba *hisi_hba)
mod_timer(timer, jiffies + HZ);
}
-static void sl_notify_v1_hw(struct hisi_hba *hisi_hba, int phy_no)
+static void sl_notify_ssp_v1_hw(struct hisi_hba *hisi_hba, int phy_no)
{
u32 sl_control;
@@ -1749,6 +1749,8 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
}
}
+ hisi_hba->cq_nvecs = hisi_hba->queue_count;
+
return 0;
}
@@ -1826,7 +1828,7 @@ static struct scsi_host_template sht_v1_hw = {
static const struct hisi_sas_hw hisi_sas_v1_hw = {
.hw_init = hisi_sas_v1_init,
.setup_itct = setup_itct_v1_hw,
- .sl_notify = sl_notify_v1_hw,
+ .sl_notify_ssp = sl_notify_ssp_v1_hw,
.clear_itct = clear_itct_v1_hw,
.prep_smp = prep_smp_v1_hw,
.prep_ssp = prep_ssp_v1_hw,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index c8ebff3ba559..89160ab3efb0 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -868,12 +868,13 @@ hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device)
hisi_hba->devices[i].device_id = i;
sas_dev = &hisi_hba->devices[i];
- sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
+ sas_dev->dev_status = HISI_SAS_DEV_INIT;
sas_dev->dev_type = device->dev_type;
sas_dev->hisi_hba = hisi_hba;
sas_dev->sas_device = device;
sas_dev->sata_idx = sata_idx;
sas_dev->dq = dq;
+ spin_lock_init(&sas_dev->lock);
INIT_LIST_HEAD(&hisi_hba->devices[i].list);
break;
}
@@ -1589,7 +1590,7 @@ static void phys_init_v2_hw(struct hisi_hba *hisi_hba)
}
}
-static void sl_notify_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
+static void sl_notify_ssp_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
{
u32 sl_control;
@@ -2677,6 +2678,8 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
if (is_sata_phy_v2_hw(hisi_hba, phy_no))
goto end;
+ del_timer(&phy->timer);
+
if (phy_no == 8) {
u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE);
@@ -2756,6 +2759,7 @@ static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
struct hisi_sas_port *port = phy->port;
struct device *dev = hisi_hba->dev;
+ del_timer(&phy->timer);
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
@@ -2944,6 +2948,9 @@ static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
if (irq_value0 & CHL_INT0_SL_RX_BCST_ACK_MSK)
phy_bcast_v2_hw(phy_no, hisi_hba);
+ if (irq_value0 & CHL_INT0_PHY_RDY_MSK)
+ hisi_sas_phy_oob_ready(hisi_hba, phy_no);
+
hisi_sas_phy_write32(hisi_hba, phy_no,
CHL_INT0, irq_value0
& (~CHL_INT0_HOTPLUG_TOUT_MSK)
@@ -3227,6 +3234,8 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
unsigned long flags;
int phy_no, offset;
+ del_timer(&phy->timer);
+
phy_no = sas_phy->id;
initial_fis = &hisi_hba->initial_fis[phy_no];
fis = &initial_fis->fis;
@@ -3393,6 +3402,8 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
tasklet_init(t, cq_tasklet_v2_hw, (unsigned long)cq);
}
+ hisi_hba->cq_nvecs = hisi_hba->queue_count;
+
return 0;
free_cq_int_irqs:
@@ -3542,8 +3553,8 @@ static int write_gpio_v2_hw(struct hisi_hba *hisi_hba, u8 reg_type,
return 0;
}
-static void wait_cmds_complete_timeout_v2_hw(struct hisi_hba *hisi_hba,
- int delay_ms, int timeout_ms)
+static int wait_cmds_complete_timeout_v2_hw(struct hisi_hba *hisi_hba,
+ int delay_ms, int timeout_ms)
{
struct device *dev = hisi_hba->dev;
int entries, entries_old = 0, time;
@@ -3557,7 +3568,12 @@ static void wait_cmds_complete_timeout_v2_hw(struct hisi_hba *hisi_hba,
msleep(delay_ms);
}
+ if (time >= timeout_ms)
+ return -ETIMEDOUT;
+
dev_dbg(dev, "wait commands complete %dms\n", time);
+
+ return 0;
}
static struct device_attribute *host_attrs_v2_hw[] = {
@@ -3590,7 +3606,7 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
.setup_itct = setup_itct_v2_hw,
.slot_index_alloc = slot_index_alloc_quirk_v2_hw,
.alloc_dev = alloc_dev_quirk_v2_hw,
- .sl_notify = sl_notify_v2_hw,
+ .sl_notify_ssp = sl_notify_ssp_v2_hw,
.get_wideport_bitmap = get_wideport_bitmap_v2_hw,
.clear_itct = clear_itct_v2_hw,
.free_device = free_device_v2_hw,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index e0570fd8466e..086695a4099f 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -11,7 +11,7 @@
#include "hisi_sas.h"
#define DRV_NAME "hisi_sas_v3_hw"
-/* global registers need init*/
+/* global registers need init */
#define DLVRY_QUEUE_ENABLE 0x0
#define IOST_BASE_ADDR_LO 0x8
#define IOST_BASE_ADDR_HI 0xc
@@ -129,6 +129,7 @@
#define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF)
#define CMD_HDR_PIR_OFF 8
#define CMD_HDR_PIR_MSK (0x1 << CMD_HDR_PIR_OFF)
+#define SERDES_CFG (PORT_BASE + 0x1c)
#define SL_CFG (PORT_BASE + 0x84)
#define AIP_LIMIT (PORT_BASE + 0x90)
#define SL_CONTROL (PORT_BASE + 0x94)
@@ -181,11 +182,14 @@
#define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22
#define CHL_INT2 (PORT_BASE + 0x1bc)
#define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0
+#define CHL_INT2_RX_DISP_ERR_OFF 28
+#define CHL_INT2_RX_CODE_ERR_OFF 29
#define CHL_INT2_RX_INVLD_DW_OFF 30
#define CHL_INT2_STP_LINK_TIMEOUT_OFF 31
#define CHL_INT0_MSK (PORT_BASE + 0x1c0)
#define CHL_INT1_MSK (PORT_BASE + 0x1c4)
#define CHL_INT2_MSK (PORT_BASE + 0x1c8)
+#define SAS_EC_INT_COAL_TIME (PORT_BASE + 0x1cc)
#define CHL_INT_COAL_EN (PORT_BASE + 0x1d0)
#define SAS_RX_TRAIN_TIMER (PORT_BASE + 0x2a4)
#define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0)
@@ -205,6 +209,7 @@
#define ERR_CNT_DWS_LOST (PORT_BASE + 0x380)
#define ERR_CNT_RESET_PROB (PORT_BASE + 0x384)
#define ERR_CNT_INVLD_DW (PORT_BASE + 0x390)
+#define ERR_CNT_CODE_ERR (PORT_BASE + 0x394)
#define ERR_CNT_DISP_ERR (PORT_BASE + 0x398)
#define DEFAULT_ITCT_HW 2048 /* reset value, not reprogrammed */
@@ -397,6 +402,11 @@ struct hisi_sas_err_record_v3 {
#define USR_DATA_BLOCK_SZ_OFF 20
#define USR_DATA_BLOCK_SZ_MSK (0x3 << USR_DATA_BLOCK_SZ_OFF)
#define T10_CHK_MSK_OFF 16
+#define T10_CHK_REF_TAG_MSK (0xf0 << T10_CHK_MSK_OFF)
+#define T10_CHK_APP_TAG_MSK (0xc << T10_CHK_MSK_OFF)
+
+#define BASE_VECTORS_V3_HW 16
+#define MIN_AFFINE_VECTORS_V3_HW (BASE_VECTORS_V3_HW + 1)
static bool hisi_sas_intr_conv;
MODULE_PARM_DESC(intr_conv, "interrupt converge enable (0-1)");
@@ -406,6 +416,11 @@ static int prot_mask;
module_param(prot_mask, int, 0);
MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=0x0 ");
+static bool auto_affine_msi_experimental;
+module_param(auto_affine_msi_experimental, bool, 0444);
+MODULE_PARM_DESC(auto_affine_msi_experimental, "Enable auto-affinity of MSI IRQs as experimental:\n"
+ "default is off");
+
static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
{
void __iomem *regs = hisi_hba->regs + off;
@@ -511,6 +526,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
}
hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE,
prog_phy_link_rate);
+ hisi_sas_phy_write32(hisi_hba, i, SERDES_CFG, 0xffc00);
hisi_sas_phy_write32(hisi_hba, i, SAS_RX_TRAIN_TIMER, 0x13e80);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
@@ -532,6 +548,8 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120);
hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01);
hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG, 0x32);
+ hisi_sas_phy_write32(hisi_hba, i, SAS_EC_INT_COAL_TIME,
+ 0x30f4240);
/* used for 12G negotiate */
hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e);
hisi_sas_phy_write32(hisi_hba, i, AIP_LIMIT, 0x2ffff);
@@ -716,7 +734,7 @@ static void clear_itct_v3_hw(struct hisi_hba *hisi_hba,
hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
ENT_INT_SRC3_ITC_INT_MSK);
- /* clear the itct table*/
+ /* clear the itct table */
reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
@@ -868,7 +886,7 @@ static void phys_init_v3_hw(struct hisi_hba *hisi_hba)
}
}
-static void sl_notify_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
+static void sl_notify_ssp_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
{
u32 sl_control;
@@ -967,19 +985,44 @@ static void prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba,
hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot));
- hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
+ hdr->sg_len |= cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
+}
+
+static void prep_prd_sge_dif_v3_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot,
+ struct hisi_sas_cmd_hdr *hdr,
+ struct scatterlist *scatter,
+ int n_elem)
+{
+ struct hisi_sas_sge_dif_page *sge_dif_page;
+ struct scatterlist *sg;
+ int i;
+
+ sge_dif_page = hisi_sas_sge_dif_addr_mem(slot);
+
+ for_each_sg(scatter, sg, n_elem, i) {
+ struct hisi_sas_sge *entry = &sge_dif_page->sge[i];
+
+ entry->addr = cpu_to_le64(sg_dma_address(sg));
+ entry->page_ctrl_0 = 0;
+ entry->page_ctrl_1 = 0;
+ entry->data_len = cpu_to_le32(sg_dma_len(sg));
+ entry->data_off = 0;
+ }
+
+ hdr->dif_prd_table_addr =
+ cpu_to_le64(hisi_sas_sge_dif_addr_dma(slot));
+
+ hdr->sg_len |= cpu_to_le32(n_elem << CMD_HDR_DIF_SGL_LEN_OFF);
}
static u32 get_prot_chk_msk_v3_hw(struct scsi_cmnd *scsi_cmnd)
{
unsigned char prot_flags = scsi_cmnd->prot_flags;
- if (prot_flags & SCSI_PROT_TRANSFER_PI) {
- if (prot_flags & SCSI_PROT_REF_CHECK)
- return 0xc << 16;
- return 0xfc << 16;
- }
- return 0;
+ if (prot_flags & SCSI_PROT_REF_CHECK)
+ return T10_CHK_APP_TAG_MSK;
+ return T10_CHK_REF_TAG_MSK | T10_CHK_APP_TAG_MSK;
}
static void fill_prot_v3_hw(struct scsi_cmnd *scsi_cmnd,
@@ -990,15 +1033,33 @@ static void fill_prot_v3_hw(struct scsi_cmnd *scsi_cmnd,
u32 lbrt_chk_val = t10_pi_ref_tag(scsi_cmnd->request);
switch (prot_op) {
+ case SCSI_PROT_READ_INSERT:
+ prot->dw0 |= T10_INSRT_EN_MSK;
+ prot->lbrtgv = lbrt_chk_val;
+ break;
case SCSI_PROT_READ_STRIP:
prot->dw0 |= (T10_RMV_EN_MSK | T10_CHK_EN_MSK);
prot->lbrtcv = lbrt_chk_val;
prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd);
break;
+ case SCSI_PROT_READ_PASS:
+ prot->dw0 |= T10_CHK_EN_MSK;
+ prot->lbrtcv = lbrt_chk_val;
+ prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd);
+ break;
case SCSI_PROT_WRITE_INSERT:
prot->dw0 |= T10_INSRT_EN_MSK;
prot->lbrtgv = lbrt_chk_val;
break;
+ case SCSI_PROT_WRITE_STRIP:
+ prot->dw0 |= (T10_RMV_EN_MSK | T10_CHK_EN_MSK);
+ prot->lbrtcv = lbrt_chk_val;
+ break;
+ case SCSI_PROT_WRITE_PASS:
+ prot->dw0 |= T10_CHK_EN_MSK;
+ prot->lbrtcv = lbrt_chk_val;
+ prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd);
+ break;
default:
WARN(1, "prot_op(0x%x) is not valid\n", prot_op);
break;
@@ -1033,8 +1094,8 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
struct sas_ssp_task *ssp_task = &task->ssp_task;
struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
struct hisi_sas_tmf_task *tmf = slot->tmf;
- unsigned char prot_op = scsi_get_prot_op(scsi_cmnd);
int has_data = 0, priority = !!tmf;
+ unsigned char prot_op;
u8 *buf_cmd;
u32 dw1 = 0, dw2 = 0, len = 0;
@@ -1049,6 +1110,7 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF;
dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF;
} else {
+ prot_op = scsi_get_prot_op(scsi_cmnd);
dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF;
switch (scsi_cmnd->sc_data_direction) {
case DMA_TO_DEVICE:
@@ -1074,9 +1136,15 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
hdr->dw2 = cpu_to_le32(dw2);
hdr->transfer_tags = cpu_to_le32(slot->idx);
- if (has_data)
+ if (has_data) {
prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter,
- slot->n_elem);
+ slot->n_elem);
+
+ if (scsi_prot_sg_count(scsi_cmnd))
+ prep_prd_sge_dif_v3_hw(hisi_hba, slot, hdr,
+ scsi_prot_sglist(scsi_cmnd),
+ slot->n_elem_dif);
+ }
hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
@@ -1117,18 +1185,19 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
fill_prot_v3_hw(scsi_cmnd, &prot);
memcpy(buf_cmd_prot, &prot,
sizeof(struct hisi_sas_protect_iu_v3_hw));
-
/*
* For READ, we need length of info read to memory, while for
* WRITE we need length of data written to the disk.
*/
- if (prot_op == SCSI_PROT_WRITE_INSERT) {
+ if (prot_op == SCSI_PROT_WRITE_INSERT ||
+ prot_op == SCSI_PROT_READ_INSERT ||
+ prot_op == SCSI_PROT_WRITE_PASS ||
+ prot_op == SCSI_PROT_READ_PASS) {
unsigned int interval = scsi_prot_interval(scsi_cmnd);
unsigned int ilog2_interval = ilog2(interval);
len = (task->total_xfer_len >> ilog2_interval) * 8;
}
-
}
hdr->dw1 = cpu_to_le32(dw1);
@@ -1281,13 +1350,15 @@ static void prep_abort_v3_hw(struct hisi_hba *hisi_hba,
static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
{
- int i, res;
+ int i;
+ irqreturn_t res;
u32 context, port_id, link_rate;
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct device *dev = hisi_hba->dev;
unsigned long flags;
+ del_timer(&phy->timer);
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
@@ -1381,9 +1452,11 @@ end:
static irqreturn_t phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
{
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
u32 phy_state, sl_ctrl, txid_auto;
struct device *dev = hisi_hba->dev;
+ del_timer(&phy->timer);
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
@@ -1509,6 +1582,39 @@ static void handle_chl_int1_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT1, irq_value);
}
+static void phy_get_events_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ struct sas_phy *sphy = sas_phy->phy;
+ unsigned long flags;
+ u32 reg_value;
+
+ spin_lock_irqsave(&phy->lock, flags);
+
+ /* loss dword sync */
+ reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DWS_LOST);
+ sphy->loss_of_dword_sync_count += reg_value;
+
+ /* phy reset problem */
+ reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_RESET_PROB);
+ sphy->phy_reset_problem_count += reg_value;
+
+ /* invalid dword */
+ reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW);
+ sphy->invalid_dword_count += reg_value;
+
+ /* disparity err */
+ reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR);
+ sphy->running_disparity_error_count += reg_value;
+
+ /* code violation error */
+ reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_CODE_ERR);
+ phy->code_violation_err_count += reg_value;
+
+ spin_unlock_irqrestore(&phy->lock, flags);
+}
+
static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
{
u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2_MSK);
@@ -1516,6 +1622,9 @@ static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct pci_dev *pci_dev = hisi_hba->pci_dev;
struct device *dev = hisi_hba->dev;
+ static const u32 msk = BIT(CHL_INT2_RX_DISP_ERR_OFF) |
+ BIT(CHL_INT2_RX_CODE_ERR_OFF) |
+ BIT(CHL_INT2_RX_INVLD_DW_OFF);
irq_value &= ~irq_msk;
if (!irq_value)
@@ -1536,6 +1645,25 @@ static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
}
+ if (pci_dev->revision > 0x20 && (irq_value & msk)) {
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ struct sas_phy *sphy = sas_phy->phy;
+
+ phy_get_events_v3_hw(hisi_hba, phy_no);
+
+ if (irq_value & BIT(CHL_INT2_RX_INVLD_DW_OFF))
+ dev_info(dev, "phy%d invalid dword cnt: %u\n", phy_no,
+ sphy->invalid_dword_count);
+
+ if (irq_value & BIT(CHL_INT2_RX_CODE_ERR_OFF))
+ dev_info(dev, "phy%d code violation cnt: %u\n", phy_no,
+ phy->code_violation_err_count);
+
+ if (irq_value & BIT(CHL_INT2_RX_DISP_ERR_OFF))
+ dev_info(dev, "phy%d disparity error cnt: %u\n", phy_no,
+ sphy->running_disparity_error_count);
+ }
+
if ((irq_value & BIT(CHL_INT2_RX_INVLD_DW_OFF)) &&
(pci_dev->revision == 0x20)) {
u32 reg_value;
@@ -1552,6 +1680,19 @@ static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, irq_value);
}
+static void handle_chl_int0_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT0);
+
+ if (irq_value0 & CHL_INT0_PHY_RDY_MSK)
+ hisi_sas_phy_oob_ready(hisi_hba, phy_no);
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
+ irq_value0 & (~CHL_INT0_SL_RX_BCST_ACK_MSK)
+ & (~CHL_INT0_SL_PHY_ENABLE_MSK)
+ & (~CHL_INT0_NOT_RDY_MSK));
+}
+
static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
{
struct hisi_hba *hisi_hba = p;
@@ -1562,8 +1703,8 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
& 0xeeeeeeee;
while (irq_msk) {
- u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no,
- CHL_INT0);
+ if (irq_msk & (2 << (phy_no * 4)))
+ handle_chl_int0_v3_hw(hisi_hba, phy_no);
if (irq_msk & (4 << (phy_no * 4)))
handle_chl_int1_v3_hw(hisi_hba, phy_no);
@@ -1571,13 +1712,6 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
if (irq_msk & (8 << (phy_no * 4)))
handle_chl_int2_v3_hw(hisi_hba, phy_no);
- if (irq_msk & (2 << (phy_no * 4)) && irq_value0) {
- hisi_sas_phy_write32(hisi_hba, phy_no,
- CHL_INT0, irq_value0
- & (~CHL_INT0_SL_RX_BCST_ACK_MSK)
- & (~CHL_INT0_SL_PHY_ENABLE_MSK)
- & (~CHL_INT0_NOT_RDY_MSK));
- }
irq_msk &= ~(0xe << (phy_no * 4));
phy_no++;
}
@@ -1644,6 +1778,7 @@ static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p)
u32 irq_value, irq_msk;
struct hisi_hba *hisi_hba = p;
struct device *dev = hisi_hba->dev;
+ struct pci_dev *pdev = hisi_hba->pci_dev;
int i;
irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
@@ -1675,6 +1810,17 @@ static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p)
error->msg, irq_value);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
+
+ if (pdev->revision < 0x21) {
+ u32 reg_val;
+
+ reg_val = hisi_sas_read32(hisi_hba,
+ AXI_MASTER_CFG_BASE +
+ AM_CTRL_GLOBAL);
+ reg_val |= AM_CTRL_SHUTDOWN_REQ_MSK;
+ hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE +
+ AM_CTRL_GLOBAL, reg_val);
+ }
}
if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) {
@@ -1959,21 +2105,68 @@ static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p)
return IRQ_HANDLED;
}
+static void setup_reply_map_v3_hw(struct hisi_hba *hisi_hba, int nvecs)
+{
+ const struct cpumask *mask;
+ int queue, cpu;
+
+ for (queue = 0; queue < nvecs; queue++) {
+ struct hisi_sas_cq *cq = &hisi_hba->cq[queue];
+
+ mask = pci_irq_get_affinity(hisi_hba->pci_dev, queue +
+ BASE_VECTORS_V3_HW);
+ if (!mask)
+ goto fallback;
+ cq->pci_irq_mask = mask;
+ for_each_cpu(cpu, mask)
+ hisi_hba->reply_map[cpu] = queue;
+ }
+ return;
+
+fallback:
+ for_each_possible_cpu(cpu)
+ hisi_hba->reply_map[cpu] = cpu % hisi_hba->queue_count;
+ /* Don't clean all CQ masks */
+}
+
static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
{
struct device *dev = hisi_hba->dev;
struct pci_dev *pdev = hisi_hba->pci_dev;
int vectors, rc;
int i, k;
- int max_msi = HISI_SAS_MSI_COUNT_V3_HW;
-
- vectors = pci_alloc_irq_vectors(hisi_hba->pci_dev, 1,
- max_msi, PCI_IRQ_MSI);
- if (vectors < max_msi) {
- dev_err(dev, "could not allocate all msi (%d)\n", vectors);
- return -ENOENT;
+ int max_msi = HISI_SAS_MSI_COUNT_V3_HW, min_msi;
+
+ if (auto_affine_msi_experimental) {
+ struct irq_affinity desc = {
+ .pre_vectors = BASE_VECTORS_V3_HW,
+ };
+
+ min_msi = MIN_AFFINE_VECTORS_V3_HW;
+
+ hisi_hba->reply_map = devm_kcalloc(dev, nr_cpu_ids,
+ sizeof(unsigned int),
+ GFP_KERNEL);
+ if (!hisi_hba->reply_map)
+ return -ENOMEM;
+ vectors = pci_alloc_irq_vectors_affinity(hisi_hba->pci_dev,
+ min_msi, max_msi,
+ PCI_IRQ_MSI |
+ PCI_IRQ_AFFINITY,
+ &desc);
+ if (vectors < 0)
+ return -ENOENT;
+ setup_reply_map_v3_hw(hisi_hba, vectors - BASE_VECTORS_V3_HW);
+ } else {
+ min_msi = max_msi;
+ vectors = pci_alloc_irq_vectors(hisi_hba->pci_dev, min_msi,
+ max_msi, PCI_IRQ_MSI);
+ if (vectors < 0)
+ return vectors;
}
+ hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW;
+
rc = devm_request_irq(dev, pci_irq_vector(pdev, 1),
int_phy_up_down_bcast_v3_hw, 0,
DRV_NAME " phy", hisi_hba);
@@ -2002,7 +2195,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
}
/* Init tasklets for cq only */
- for (i = 0; i < hisi_hba->queue_count; i++) {
+ for (i = 0; i < hisi_hba->cq_nvecs; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
struct tasklet_struct *t = &cq->tasklet;
int nr = hisi_sas_intr_conv ? 16 : 16 + i;
@@ -2099,31 +2292,6 @@ static u32 get_phys_state_v3_hw(struct hisi_hba *hisi_hba)
return hisi_sas_read32(hisi_hba, PHY_STATE);
}
-static void phy_get_events_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
-{
- struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
- struct asd_sas_phy *sas_phy = &phy->sas_phy;
- struct sas_phy *sphy = sas_phy->phy;
- u32 reg_value;
-
- /* loss dword sync */
- reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DWS_LOST);
- sphy->loss_of_dword_sync_count += reg_value;
-
- /* phy reset problem */
- reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_RESET_PROB);
- sphy->phy_reset_problem_count += reg_value;
-
- /* invalid dword */
- reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW);
- sphy->invalid_dword_count += reg_value;
-
- /* disparity err */
- reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR);
- sphy->running_disparity_error_count += reg_value;
-
-}
-
static int disable_host_v3_hw(struct hisi_hba *hisi_hba)
{
struct device *dev = hisi_hba->dev;
@@ -2201,8 +2369,8 @@ static int write_gpio_v3_hw(struct hisi_hba *hisi_hba, u8 reg_type,
return 0;
}
-static void wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba,
- int delay_ms, int timeout_ms)
+static int wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba,
+ int delay_ms, int timeout_ms)
{
struct device *dev = hisi_hba->dev;
int entries, entries_old = 0, time;
@@ -2216,7 +2384,12 @@ static void wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba,
msleep(delay_ms);
}
+ if (time >= timeout_ms)
+ return -ETIMEDOUT;
+
dev_dbg(dev, "wait commands complete %dms\n", time);
+
+ return 0;
}
static ssize_t intr_conv_v3_hw_show(struct device *dev,
@@ -2332,6 +2505,159 @@ static struct device_attribute *host_attrs_v3_hw[] = {
NULL
};
+static const struct hisi_sas_debugfs_reg_lu debugfs_port_reg_lu[] = {
+ HISI_SAS_DEBUGFS_REG(PHY_CFG),
+ HISI_SAS_DEBUGFS_REG(HARD_PHY_LINKRATE),
+ HISI_SAS_DEBUGFS_REG(PROG_PHY_LINK_RATE),
+ HISI_SAS_DEBUGFS_REG(PHY_CTRL),
+ HISI_SAS_DEBUGFS_REG(SL_CFG),
+ HISI_SAS_DEBUGFS_REG(AIP_LIMIT),
+ HISI_SAS_DEBUGFS_REG(SL_CONTROL),
+ HISI_SAS_DEBUGFS_REG(RX_PRIMS_STATUS),
+ HISI_SAS_DEBUGFS_REG(TX_ID_DWORD0),
+ HISI_SAS_DEBUGFS_REG(TX_ID_DWORD1),
+ HISI_SAS_DEBUGFS_REG(TX_ID_DWORD2),
+ HISI_SAS_DEBUGFS_REG(TX_ID_DWORD3),
+ HISI_SAS_DEBUGFS_REG(TX_ID_DWORD4),
+ HISI_SAS_DEBUGFS_REG(TX_ID_DWORD5),
+ HISI_SAS_DEBUGFS_REG(TX_ID_DWORD6),
+ HISI_SAS_DEBUGFS_REG(TXID_AUTO),
+ HISI_SAS_DEBUGFS_REG(RX_IDAF_DWORD0),
+ HISI_SAS_DEBUGFS_REG(RXOP_CHECK_CFG_H),
+ HISI_SAS_DEBUGFS_REG(STP_LINK_TIMER),
+ HISI_SAS_DEBUGFS_REG(STP_LINK_TIMEOUT_STATE),
+ HISI_SAS_DEBUGFS_REG(CON_CFG_DRIVER),
+ HISI_SAS_DEBUGFS_REG(SAS_SSP_CON_TIMER_CFG),
+ HISI_SAS_DEBUGFS_REG(SAS_SMP_CON_TIMER_CFG),
+ HISI_SAS_DEBUGFS_REG(SAS_STP_CON_TIMER_CFG),
+ HISI_SAS_DEBUGFS_REG(CHL_INT0),
+ HISI_SAS_DEBUGFS_REG(CHL_INT1),
+ HISI_SAS_DEBUGFS_REG(CHL_INT2),
+ HISI_SAS_DEBUGFS_REG(CHL_INT0_MSK),
+ HISI_SAS_DEBUGFS_REG(CHL_INT1_MSK),
+ HISI_SAS_DEBUGFS_REG(CHL_INT2_MSK),
+ HISI_SAS_DEBUGFS_REG(SAS_EC_INT_COAL_TIME),
+ HISI_SAS_DEBUGFS_REG(CHL_INT_COAL_EN),
+ HISI_SAS_DEBUGFS_REG(SAS_RX_TRAIN_TIMER),
+ HISI_SAS_DEBUGFS_REG(PHY_CTRL_RDY_MSK),
+ HISI_SAS_DEBUGFS_REG(PHYCTRL_NOT_RDY_MSK),
+ HISI_SAS_DEBUGFS_REG(PHYCTRL_DWS_RESET_MSK),
+ HISI_SAS_DEBUGFS_REG(PHYCTRL_PHY_ENA_MSK),
+ HISI_SAS_DEBUGFS_REG(SL_RX_BCAST_CHK_MSK),
+ HISI_SAS_DEBUGFS_REG(PHYCTRL_OOB_RESTART_MSK),
+ HISI_SAS_DEBUGFS_REG(DMA_TX_STATUS),
+ HISI_SAS_DEBUGFS_REG(DMA_RX_STATUS),
+ HISI_SAS_DEBUGFS_REG(COARSETUNE_TIME),
+ HISI_SAS_DEBUGFS_REG(ERR_CNT_DWS_LOST),
+ HISI_SAS_DEBUGFS_REG(ERR_CNT_RESET_PROB),
+ HISI_SAS_DEBUGFS_REG(ERR_CNT_INVLD_DW),
+ HISI_SAS_DEBUGFS_REG(ERR_CNT_CODE_ERR),
+ HISI_SAS_DEBUGFS_REG(ERR_CNT_DISP_ERR),
+ {}
+};
+
+static const struct hisi_sas_debugfs_reg debugfs_port_reg = {
+ .lu = debugfs_port_reg_lu,
+ .count = 0x100,
+ .base_off = PORT_BASE,
+ .read_port_reg = hisi_sas_phy_read32,
+};
+
+static const struct hisi_sas_debugfs_reg_lu debugfs_global_reg_lu[] = {
+ HISI_SAS_DEBUGFS_REG(DLVRY_QUEUE_ENABLE),
+ HISI_SAS_DEBUGFS_REG(PHY_CONTEXT),
+ HISI_SAS_DEBUGFS_REG(PHY_STATE),
+ HISI_SAS_DEBUGFS_REG(PHY_PORT_NUM_MA),
+ HISI_SAS_DEBUGFS_REG(PHY_CONN_RATE),
+ HISI_SAS_DEBUGFS_REG(ITCT_CLR),
+ HISI_SAS_DEBUGFS_REG(IO_SATA_BROKEN_MSG_ADDR_LO),
+ HISI_SAS_DEBUGFS_REG(IO_SATA_BROKEN_MSG_ADDR_HI),
+ HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_LO),
+ HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_HI),
+ HISI_SAS_DEBUGFS_REG(CFG_MAX_TAG),
+ HISI_SAS_DEBUGFS_REG(HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL),
+ HISI_SAS_DEBUGFS_REG(HGC_SAS_TXFAIL_RETRY_CTRL),
+ HISI_SAS_DEBUGFS_REG(HGC_GET_ITV_TIME),
+ HISI_SAS_DEBUGFS_REG(DEVICE_MSG_WORK_MODE),
+ HISI_SAS_DEBUGFS_REG(OPENA_WT_CONTI_TIME),
+ HISI_SAS_DEBUGFS_REG(I_T_NEXUS_LOSS_TIME),
+ HISI_SAS_DEBUGFS_REG(MAX_CON_TIME_LIMIT_TIME),
+ HISI_SAS_DEBUGFS_REG(BUS_INACTIVE_LIMIT_TIME),
+ HISI_SAS_DEBUGFS_REG(REJECT_TO_OPEN_LIMIT_TIME),
+ HISI_SAS_DEBUGFS_REG(CQ_INT_CONVERGE_EN),
+ HISI_SAS_DEBUGFS_REG(CFG_AGING_TIME),
+ HISI_SAS_DEBUGFS_REG(HGC_DFX_CFG2),
+ HISI_SAS_DEBUGFS_REG(CFG_ABT_SET_QUERY_IPTT),
+ HISI_SAS_DEBUGFS_REG(CFG_ABT_SET_IPTT_DONE),
+ HISI_SAS_DEBUGFS_REG(HGC_IOMB_PROC1_STATUS),
+ HISI_SAS_DEBUGFS_REG(CHNL_INT_STATUS),
+ HISI_SAS_DEBUGFS_REG(HGC_AXI_FIFO_ERR_INFO),
+ HISI_SAS_DEBUGFS_REG(INT_COAL_EN),
+ HISI_SAS_DEBUGFS_REG(OQ_INT_COAL_TIME),
+ HISI_SAS_DEBUGFS_REG(OQ_INT_COAL_CNT),
+ HISI_SAS_DEBUGFS_REG(ENT_INT_COAL_TIME),
+ HISI_SAS_DEBUGFS_REG(ENT_INT_COAL_CNT),
+ HISI_SAS_DEBUGFS_REG(OQ_INT_SRC),
+ HISI_SAS_DEBUGFS_REG(OQ_INT_SRC_MSK),
+ HISI_SAS_DEBUGFS_REG(ENT_INT_SRC1),
+ HISI_SAS_DEBUGFS_REG(ENT_INT_SRC2),
+ HISI_SAS_DEBUGFS_REG(ENT_INT_SRC3),
+ HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK1),
+ HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK2),
+ HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK3),
+ HISI_SAS_DEBUGFS_REG(CHNL_PHYUPDOWN_INT_MSK),
+ HISI_SAS_DEBUGFS_REG(CHNL_ENT_INT_MSK),
+ HISI_SAS_DEBUGFS_REG(HGC_COM_INT_MSK),
+ HISI_SAS_DEBUGFS_REG(SAS_ECC_INTR),
+ HISI_SAS_DEBUGFS_REG(SAS_ECC_INTR_MSK),
+ HISI_SAS_DEBUGFS_REG(HGC_ERR_STAT_EN),
+ HISI_SAS_DEBUGFS_REG(CQE_SEND_CNT),
+ HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_DEPTH),
+ HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_WR_PTR),
+ HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_RD_PTR),
+ HISI_SAS_DEBUGFS_REG(HYPER_STREAM_ID_EN_CFG),
+ HISI_SAS_DEBUGFS_REG(OQ0_INT_SRC_MSK),
+ HISI_SAS_DEBUGFS_REG(COMPL_Q_0_DEPTH),
+ HISI_SAS_DEBUGFS_REG(COMPL_Q_0_WR_PTR),
+ HISI_SAS_DEBUGFS_REG(COMPL_Q_0_RD_PTR),
+ HISI_SAS_DEBUGFS_REG(AWQOS_AWCACHE_CFG),
+ HISI_SAS_DEBUGFS_REG(ARQOS_ARCACHE_CFG),
+ HISI_SAS_DEBUGFS_REG(HILINK_ERR_DFX),
+ HISI_SAS_DEBUGFS_REG(SAS_GPIO_CFG_0),
+ HISI_SAS_DEBUGFS_REG(SAS_GPIO_CFG_1),
+ HISI_SAS_DEBUGFS_REG(SAS_GPIO_TX_0_1),
+ HISI_SAS_DEBUGFS_REG(SAS_CFG_DRIVE_VLD),
+ {}
+};
+
+static const struct hisi_sas_debugfs_reg debugfs_global_reg = {
+ .lu = debugfs_global_reg_lu,
+ .count = 0x800,
+ .read_global_reg = hisi_sas_read32,
+};
+
+static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba *hisi_hba)
+{
+ struct device *dev = hisi_hba->dev;
+
+ set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+
+ hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0);
+
+ if (wait_cmds_complete_timeout_v3_hw(hisi_hba, 100, 5000) == -ETIMEDOUT)
+ dev_dbg(dev, "Wait commands complete timeout!\n");
+
+ hisi_sas_kill_tasklets(hisi_hba);
+}
+
+static void debugfs_snapshot_restore_v3_hw(struct hisi_hba *hisi_hba)
+{
+ hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
+ (u32)((1ULL << hisi_hba->queue_count) - 1));
+
+ clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+}
+
static struct scsi_host_template sht_v3_hw = {
.name = DRV_NAME,
.module = THIS_MODULE,
@@ -2344,6 +2670,7 @@ static struct scsi_host_template sht_v3_hw = {
.bios_param = sas_bios_param,
.this_id = -1,
.sg_tablesize = HISI_SAS_SGE_PAGE_CNT,
+ .sg_prot_tablesize = HISI_SAS_SGE_PAGE_CNT,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
@@ -2360,7 +2687,7 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = {
.get_wideport_bitmap = get_wideport_bitmap_v3_hw,
.complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr),
.clear_itct = clear_itct_v3_hw,
- .sl_notify = sl_notify_v3_hw,
+ .sl_notify_ssp = sl_notify_ssp_v3_hw,
.prep_ssp = prep_ssp_v3_hw,
.prep_smp = prep_smp_v3_hw,
.prep_stp = prep_ata_v3_hw,
@@ -2380,6 +2707,10 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = {
.get_events = phy_get_events_v3_hw,
.write_gpio = write_gpio_v3_hw,
.wait_cmds_complete_timeout = wait_cmds_complete_timeout_v3_hw,
+ .debugfs_reg_global = &debugfs_global_reg,
+ .debugfs_reg_port = &debugfs_port_reg,
+ .snapshot_prepare = debugfs_snapshot_prepare_v3_hw,
+ .snapshot_restore = debugfs_snapshot_restore_v3_hw,
};
static struct Scsi_Host *
@@ -2397,6 +2728,7 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev)
hisi_hba = shost_priv(shost);
INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
+ INIT_WORK(&hisi_hba->debugfs_work, hisi_sas_debugfs_work_handler);
hisi_hba->hw = &hisi_sas_v3_hw;
hisi_hba->pci_dev = pdev;
hisi_hba->dev = dev;
@@ -2414,7 +2746,7 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev)
if (hisi_sas_get_fw_info(hisi_hba) < 0)
goto err_out;
- if (hisi_sas_alloc(hisi_hba, shost)) {
+ if (hisi_sas_alloc(hisi_hba)) {
hisi_sas_free(hisi_hba);
goto err_out;
}
@@ -2513,8 +2845,14 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n",
prot_mask);
scsi_host_set_prot(hisi_hba->shost, prot_mask);
+ if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK)
+ scsi_host_set_guard(hisi_hba->shost,
+ SHOST_DIX_GUARD_CRC);
}
+ if (hisi_sas_debugfs_enable)
+ hisi_sas_debugfs_init(hisi_hba);
+
rc = scsi_add_host(shost, dev);
if (rc)
goto err_out_ha;
@@ -2551,7 +2889,7 @@ hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba)
free_irq(pci_irq_vector(pdev, 1), hisi_hba);
free_irq(pci_irq_vector(pdev, 2), hisi_hba);
free_irq(pci_irq_vector(pdev, 11), hisi_hba);
- for (i = 0; i < hisi_hba->queue_count; i++) {
+ for (i = 0; i < hisi_hba->cq_nvecs; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
int nr = hisi_sas_intr_conv ? 16 : 16 + i;
@@ -2567,6 +2905,8 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
struct hisi_hba *hisi_hba = sha->lldd_ha;
struct Scsi_Host *shost = sha->core.shost;
+ hisi_sas_debugfs_exit(hisi_hba);
+
if (timer_pending(&hisi_hba->timer))
del_timer(&hisi_hba->timer);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index ff67ef5d5347..f044e7d10d63 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -251,10 +251,11 @@ static int number_of_controllers;
static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
-static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
+static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd,
+ void __user *arg);
#ifdef CONFIG_COMPAT
-static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
+static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd,
void __user *arg);
#endif
@@ -1327,7 +1328,7 @@ static int hpsa_scsi_add_entry(struct ctlr_info *h,
dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
" suspect firmware bug or unsupported hardware "
"configuration.\n");
- return -1;
+ return -1;
}
lun_assigned:
@@ -4110,7 +4111,7 @@ static int hpsa_gather_lun_info(struct ctlr_info *h,
"maximum logical LUNs (%d) exceeded. "
"%d LUNs ignored.\n", HPSA_MAX_LUN,
*nlogicals - HPSA_MAX_LUN);
- *nlogicals = HPSA_MAX_LUN;
+ *nlogicals = HPSA_MAX_LUN;
}
if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
dev_warn(&h->pdev->dev,
@@ -6127,7 +6128,7 @@ static void cmd_free(struct ctlr_info *h, struct CommandList *c)
#ifdef CONFIG_COMPAT
-static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
+static int hpsa_ioctl32_passthru(struct scsi_device *dev, unsigned int cmd,
void __user *arg)
{
IOCTL32_Command_struct __user *arg32 =
@@ -6164,7 +6165,7 @@ static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
}
static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
- int cmd, void __user *arg)
+ unsigned int cmd, void __user *arg)
{
BIG_IOCTL32_Command_struct __user *arg32 =
(BIG_IOCTL32_Command_struct __user *) arg;
@@ -6201,7 +6202,8 @@ static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
return err;
}
-static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
+static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd,
+ void __user *arg)
{
switch (cmd) {
case CCISS_GETPCIINFO:
@@ -6521,7 +6523,8 @@ static void check_ioctl_unit_attention(struct ctlr_info *h,
/*
* ioctl
*/
-static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
+static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd,
+ void __user *arg)
{
struct ctlr_info *h;
void __user *argp = (void __user *)arg;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index dbaa4f131433..3ad997ac3510 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -139,6 +139,7 @@ static const struct {
{ IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
{ IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
+ { IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." },
};
static void ibmvfc_npiv_login(struct ibmvfc_host *);
@@ -1494,9 +1495,9 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt)
if (rsp->flags & FCP_RSP_LEN_VALID)
rsp_code = rsp->data.info.rsp_code;
- scmd_printk(KERN_ERR, cmnd, "Command (%02X) failed: %s (%x:%x) "
+ scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) "
"flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
- cmnd->cmnd[0], err, vfc_cmd->status, vfc_cmd->error,
+ cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error),
rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
}
@@ -2022,7 +2023,7 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
"flags: %x fcp_rsp: %x, scsi_status: %x\n", desc,
ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
- rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
+ be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
fc_rsp->scsi_status);
rsp_rc = -EIO;
} else
@@ -2381,7 +2382,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
"flags: %x fcp_rsp: %x, scsi_status: %x\n",
ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
- rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
+ be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
fc_rsp->scsi_status);
rsp_rc = -EIO;
} else
@@ -2755,16 +2756,18 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
if (crq->format == IBMVFC_PARTITION_MIGRATED) {
/* We need to re-setup the interpartition connection */
- dev_info(vhost->dev, "Re-enabling adapter\n");
+ dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
vhost->client_migrated = 1;
ibmvfc_purge_requests(vhost, DID_REQUEUE);
ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
- } else {
- dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format);
+ } else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
+ dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
ibmvfc_purge_requests(vhost, DID_ERROR);
ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
+ } else {
+ dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format);
}
return;
case IBMVFC_CRQ_CMD_RSP:
@@ -3348,7 +3351,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
- rsp->status, rsp->error, status);
+ be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status);
break;
}
@@ -3446,9 +3449,10 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
- ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), rsp->status, rsp->error,
- ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), rsp->fc_type,
- ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), rsp->fc_explain, status);
+ ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
+ be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
+ ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
+ ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status);
break;
}
@@ -3619,7 +3623,7 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)),
- mad->iu.status, mad->iu.error,
+ be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error),
ibmvfc_get_fc_type(fc_reason), fc_reason,
ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
break;
@@ -3831,9 +3835,10 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
- rsp->status, rsp->error, ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)),
- rsp->fc_type, ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)),
- rsp->fc_explain, status);
+ be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
+ ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
+ ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain),
+ status);
break;
}
@@ -3959,7 +3964,7 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
level += ibmvfc_retry_host_init(vhost);
ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
- rsp->status, rsp->error);
+ be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
break;
case IBMVFC_MAD_DRIVER_FAILED:
break;
@@ -4024,7 +4029,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
- rsp->status, rsp->error);
+ be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
ibmvfc_free_event(evt);
return;
case IBMVFC_MAD_CRQ_ERROR:
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index b81a53c4a9a8..459cc288ba1d 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -78,9 +78,14 @@ enum ibmvfc_crq_valid {
IBMVFC_CRQ_XPORT_EVENT = 0xFF,
};
-enum ibmvfc_crq_format {
+enum ibmvfc_crq_init_msg {
IBMVFC_CRQ_INIT = 0x01,
IBMVFC_CRQ_INIT_COMPLETE = 0x02,
+};
+
+enum ibmvfc_crq_xport_evts {
+ IBMVFC_PARTNER_FAILED = 0x01,
+ IBMVFC_PARTNER_DEREGISTER = 0x02,
IBMVFC_PARTITION_MIGRATED = 0x06,
};
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 1135e74646e2..8cec5230fe31 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -96,6 +96,7 @@ static int client_reserve = 1;
static char partition_name[96] = "UNKNOWN";
static unsigned int partition_number = -1;
static LIST_HEAD(ibmvscsi_head);
+static DEFINE_SPINLOCK(ibmvscsi_driver_lock);
static struct scsi_transport_template *ibmvscsi_transport_template;
@@ -2270,7 +2271,9 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
}
dev_set_drvdata(&vdev->dev, hostdata);
+ spin_lock(&ibmvscsi_driver_lock);
list_add_tail(&hostdata->host_list, &ibmvscsi_head);
+ spin_unlock(&ibmvscsi_driver_lock);
return 0;
add_srp_port_failed:
@@ -2292,15 +2295,27 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
static int ibmvscsi_remove(struct vio_dev *vdev)
{
struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
- list_del(&hostdata->host_list);
- unmap_persist_bufs(hostdata);
+ unsigned long flags;
+
+ srp_remove_host(hostdata->host);
+ scsi_remove_host(hostdata->host);
+
+ purge_requests(hostdata, DID_ERROR);
+
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
release_event_pool(&hostdata->pool, hostdata);
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+
ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
max_events);
kthread_stop(hostdata->work_thread);
- srp_remove_host(hostdata->host);
- scsi_remove_host(hostdata->host);
+ unmap_persist_bufs(hostdata);
+
+ spin_lock(&ibmvscsi_driver_lock);
+ list_del(&hostdata->host_list);
+ spin_unlock(&ibmvscsi_driver_lock);
+
scsi_host_put(hostdata->host);
return 0;
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index cc9cae469c4b..7ca277e28d63 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -3788,11 +3788,6 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
return 0;
}
-static int ibmvscsis_write_pending_status(struct se_cmd *se_cmd)
-{
- return 0;
-}
-
static void ibmvscsis_set_default_node_attrs(struct se_node_acl *nacl)
{
}
@@ -4053,7 +4048,6 @@ static const struct target_core_fabric_ops ibmvscsis_ops = {
.release_cmd = ibmvscsis_release_cmd,
.sess_get_index = ibmvscsis_sess_get_index,
.write_pending = ibmvscsis_write_pending,
- .write_pending_status = ibmvscsis_write_pending_status,
.set_default_node_attributes = ibmvscsis_set_default_node_attrs,
.get_cmd_state = ibmvscsis_get_cmd_state,
.queue_data_in = ibmvscsis_queue_data_in,
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index d1b4025a4503..6d053e220153 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -6696,7 +6696,8 @@ err_nodev:
* Return value:
* 0 on success / other on failure
**/
-static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+static int ipr_ioctl(struct scsi_device *sdev, unsigned int cmd,
+ void __user *arg)
{
struct ipr_resource_entry *res;
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index cae6368ebb98..ed3debce2819 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -518,7 +518,7 @@ static int iscsi_sw_tcp_pdu_init(struct iscsi_task *task,
if (!task->sc)
iscsi_sw_tcp_send_linear_data_prep(conn, task->data, count);
else {
- struct scsi_data_buffer *sdb = scsi_out(task->sc);
+ struct scsi_data_buffer *sdb = &task->sc->sdb;
err = iscsi_sw_tcp_send_data_prep(conn, sdb->table.sgl,
sdb->table.nents, offset,
@@ -952,12 +952,6 @@ static umode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param)
return 0;
}
-static int iscsi_sw_tcp_slave_alloc(struct scsi_device *sdev)
-{
- blk_queue_flag_set(QUEUE_FLAG_BIDI, sdev->request_queue);
- return 0;
-}
-
static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev)
{
struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(sdev->host);
@@ -985,7 +979,6 @@ static struct scsi_host_template iscsi_sw_tcp_sht = {
.eh_device_reset_handler= iscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_recover_target,
.dma_boundary = PAGE_SIZE - 1,
- .slave_alloc = iscsi_sw_tcp_slave_alloc,
.slave_configure = iscsi_sw_tcp_slave_configure,
.target_alloc = iscsi_target_alloc,
.proc_name = "iscsi_tcp",
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index dfba4921b265..5bf61431434b 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -2162,7 +2162,6 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
fc_rport_state(rdata));
- rdata->flags &= ~FC_RP_STARTED;
fc_rport_enter_delete(rdata, RPORT_EV_STOP);
mutex_unlock(&rdata->rp_mutex);
kref_put(&rdata->kref, fc_rport_destroy);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 120fc520f27a..e893949a3d11 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -228,32 +228,6 @@ static int iscsi_prep_ecdb_ahs(struct iscsi_task *task)
return 0;
}
-static int iscsi_prep_bidi_ahs(struct iscsi_task *task)
-{
- struct scsi_cmnd *sc = task->sc;
- struct iscsi_rlength_ahdr *rlen_ahdr;
- int rc;
-
- rlen_ahdr = iscsi_next_hdr(task);
- rc = iscsi_add_hdr(task, sizeof(*rlen_ahdr));
- if (rc)
- return rc;
-
- rlen_ahdr->ahslength =
- cpu_to_be16(sizeof(rlen_ahdr->read_length) +
- sizeof(rlen_ahdr->reserved));
- rlen_ahdr->ahstype = ISCSI_AHSTYPE_RLENGTH;
- rlen_ahdr->reserved = 0;
- rlen_ahdr->read_length = cpu_to_be32(scsi_in(sc)->length);
-
- ISCSI_DBG_SESSION(task->conn->session,
- "bidi-in rlen_ahdr->read_length(%d) "
- "rlen_ahdr->ahslength(%d)\n",
- be32_to_cpu(rlen_ahdr->read_length),
- be16_to_cpu(rlen_ahdr->ahslength));
- return 0;
-}
-
/**
* iscsi_check_tmf_restrictions - check if a task is affected by TMF
* @task: iscsi task
@@ -392,13 +366,6 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
memcpy(hdr->cdb, sc->cmnd, cmd_len);
task->imm_count = 0;
- if (scsi_bidi_cmnd(sc)) {
- hdr->flags |= ISCSI_FLAG_CMD_READ;
- rc = iscsi_prep_bidi_ahs(task);
- if (rc)
- return rc;
- }
-
if (scsi_get_prot_op(sc) != SCSI_PROT_NORMAL)
task->protected = true;
@@ -473,12 +440,10 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
conn->scsicmd_pdus_cnt++;
ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x "
- "itt 0x%x len %d bidi_len %d cmdsn %d win %d]\n",
- scsi_bidi_cmnd(sc) ? "bidirectional" :
+ "itt 0x%x len %d cmdsn %d win %d]\n",
sc->sc_data_direction == DMA_TO_DEVICE ?
"write" : "read", conn->id, sc, sc->cmnd[0],
task->itt, transfer_length,
- scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
session->cmdsn,
session->max_cmdsn - session->exp_cmdsn + 1);
return 0;
@@ -647,12 +612,7 @@ static void fail_scsi_task(struct iscsi_task *task, int err)
state = ISCSI_TASK_ABRT_TMF;
sc->result = err << 16;
- if (!scsi_bidi_cmnd(sc))
- scsi_set_resid(sc, scsi_bufflen(sc));
- else {
- scsi_out(sc)->resid = scsi_out(sc)->length;
- scsi_in(sc)->resid = scsi_in(sc)->length;
- }
+ scsi_set_resid(sc, scsi_bufflen(sc));
/* regular RX path uses back_lock */
spin_lock_bh(&conn->session->back_lock);
@@ -838,7 +798,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
* @datalen: len of buffer
*
* iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and
- * then completes the command and task.
+ * then completes the command and task. called under back_lock
**/
static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
struct iscsi_task *task, char *data,
@@ -907,14 +867,7 @@ invalid_datalen:
if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
ISCSI_FLAG_CMD_BIDI_OVERFLOW)) {
- int res_count = be32_to_cpu(rhdr->bi_residual_count);
-
- if (scsi_bidi_cmnd(sc) && res_count > 0 &&
- (rhdr->flags & ISCSI_FLAG_CMD_BIDI_OVERFLOW ||
- res_count <= scsi_in(sc)->length))
- scsi_in(sc)->resid = res_count;
- else
- sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
+ sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
}
if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW |
@@ -941,6 +894,9 @@ out:
* @conn: iscsi connection
* @hdr: iscsi pdu
* @task: scsi command task
+ *
+ * iscsi_data_in_rsp sets up the scsi_cmnd fields based on the data received
+ * then completes the command and task. called under back_lock
**/
static void
iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
@@ -961,8 +917,8 @@ iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
if (res_count > 0 &&
(rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
- res_count <= scsi_in(sc)->length))
- scsi_in(sc)->resid = res_count;
+ res_count <= sc->sdb.length))
+ scsi_set_resid(sc, res_count);
else
sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
}
@@ -1025,6 +981,16 @@ static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
return 0;
}
+/**
+ * iscsi_nop_out_rsp - SCSI NOP Response processing
+ * @task: scsi command task
+ * @nop: the nop structure
+ * @data: where to put the data
+ * @datalen: length of data
+ *
+ * iscsi_nop_out_rsp handles nop response from use or
+ * from user space. called under back_lock
+ **/
static int iscsi_nop_out_rsp(struct iscsi_task *task,
struct iscsi_nopin *nop, char *data, int datalen)
{
@@ -1797,7 +1763,9 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
return 0;
prepd_reject:
+ spin_lock_bh(&session->back_lock);
iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
+ spin_unlock_bh(&session->back_lock);
reject:
spin_unlock_bh(&session->frwd_lock);
ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n",
@@ -1805,17 +1773,14 @@ reject:
return SCSI_MLQUEUE_TARGET_BUSY;
prepd_fault:
+ spin_lock_bh(&session->back_lock);
iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
+ spin_unlock_bh(&session->back_lock);
fault:
spin_unlock_bh(&session->frwd_lock);
ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n",
sc->cmnd[0], reason);
- if (!scsi_bidi_cmnd(sc))
- scsi_set_resid(sc, scsi_bufflen(sc));
- else {
- scsi_out(sc)->resid = scsi_out(sc)->length;
- scsi_in(sc)->resid = scsi_in(sc)->length;
- }
+ scsi_set_resid(sc, scsi_bufflen(sc));
sc->scsi_done(sc);
return 0;
}
@@ -3127,8 +3092,9 @@ fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn)
state = ISCSI_TASK_ABRT_SESS_RECOV;
if (task->state == ISCSI_TASK_PENDING)
state = ISCSI_TASK_COMPLETED;
+ spin_lock_bh(&session->back_lock);
iscsi_complete_task(task, state);
-
+ spin_unlock_bh(&session->back_lock);
}
}
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 8a6b1b3f8277..c3fe3f3a78f5 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -129,12 +129,17 @@ static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
BUG_ON(sg->length == 0);
/*
+ * We always map for the recv path.
+ *
* If the page count is greater than one it is ok to send
* to the network layer's zero copy send path. If not we
- * have to go the slow sendmsg path. We always map for the
- * recv path.
+ * have to go the slow sendmsg path.
+ *
+ * Same goes for slab pages: skb_can_coalesce() allows
+ * coalescing neighboring slab objects into a single frag which
+ * triggers one of hardened usercopy checks.
*/
- if (page_count(sg_page(sg)) >= 1 && !recv)
+ if (!recv && page_count(sg_page(sg)) >= 1 && !PageSlab(sg_page(sg)))
return;
if (recv) {
@@ -495,7 +500,7 @@ static int iscsi_tcp_data_in(struct iscsi_conn *conn, struct iscsi_task *task)
struct iscsi_tcp_task *tcp_task = task->dd_data;
struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
int datasn = be32_to_cpu(rhdr->datasn);
- unsigned total_in_length = scsi_in(task->sc)->length;
+ unsigned total_in_length = task->sc->sdb.length;
/*
* lib iscsi will update this in the completion handling if there
@@ -580,11 +585,11 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
data_length, session->max_burst);
data_offset = be32_to_cpu(rhdr->data_offset);
- if (data_offset + data_length > scsi_out(task->sc)->length) {
+ if (data_offset + data_length > task->sc->sdb.length) {
iscsi_conn_printk(KERN_ERR, conn,
"invalid R2T with data len %u at offset %u "
"and total length %d\n", data_length,
- data_offset, scsi_out(task->sc)->length);
+ data_offset, task->sc->sdb.length);
return ISCSI_ERR_DATALEN;
}
@@ -696,7 +701,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
if (tcp_conn->in.datalen) {
struct iscsi_tcp_task *tcp_task = task->dd_data;
struct ahash_request *rx_hash = NULL;
- struct scsi_data_buffer *sdb = scsi_in(task->sc);
+ struct scsi_data_buffer *sdb = &task->sc->sdb;
/*
* Setup copy of Data-In into the struct scsi_cmnd
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index f21c93bbb35c..17b45a0c7bc3 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -25,6 +25,7 @@
#include <linux/scatterlist.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
+#include <asm/unaligned.h>
#include "sas_internal.h"
@@ -614,7 +615,14 @@ int sas_smp_phy_control(struct domain_device *dev, int phy_id,
}
res = smp_execute_task(dev, pc_req, PC_REQ_SIZE, pc_resp,PC_RESP_SIZE);
-
+ if (res) {
+ pr_err("ex %016llx phy%02d PHY control failed: %d\n",
+ SAS_ADDR(dev->sas_addr), phy_id, res);
+ } else if (pc_resp[2] != SMP_RESP_FUNC_ACC) {
+ pr_err("ex %016llx phy%02d PHY control failed: function result 0x%x\n",
+ SAS_ADDR(dev->sas_addr), phy_id, pc_resp[2]);
+ res = pc_resp[2];
+ }
kfree(pc_resp);
kfree(pc_req);
return res;
@@ -689,10 +697,10 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
if (res)
goto out;
- phy->invalid_dword_count = scsi_to_u32(&resp[12]);
- phy->running_disparity_error_count = scsi_to_u32(&resp[16]);
- phy->loss_of_dword_sync_count = scsi_to_u32(&resp[20]);
- phy->phy_reset_problem_count = scsi_to_u32(&resp[24]);
+ phy->invalid_dword_count = get_unaligned_be32(&resp[12]);
+ phy->running_disparity_error_count = get_unaligned_be32(&resp[16]);
+ phy->loss_of_dword_sync_count = get_unaligned_be32(&resp[20]);
+ phy->phy_reset_problem_count = get_unaligned_be32(&resp[24]);
out:
kfree(req);
@@ -817,6 +825,26 @@ static struct domain_device *sas_ex_discover_end_dev(
#ifdef CONFIG_SCSI_SAS_ATA
if ((phy->attached_tproto & SAS_PROTOCOL_STP) || phy->attached_sata_dev) {
+ if (child->linkrate > parent->min_linkrate) {
+ struct sas_phy_linkrates rates = {
+ .maximum_linkrate = parent->min_linkrate,
+ .minimum_linkrate = parent->min_linkrate,
+ };
+ int ret;
+
+ pr_notice("ex %016llx phy%02d SATA device linkrate > min pathway connection rate, attempting to lower device linkrate\n",
+ SAS_ADDR(child->sas_addr), phy_id);
+ ret = sas_smp_phy_control(parent, phy_id,
+ PHY_FUNC_LINK_RESET, &rates);
+ if (ret) {
+ pr_err("ex %016llx phy%02d SATA device could not set linkrate (%d)\n",
+ SAS_ADDR(child->sas_addr), phy_id, ret);
+ goto out_free;
+ }
+ pr_notice("ex %016llx phy%02d SATA device set linkrate successfully\n",
+ SAS_ADDR(child->sas_addr), phy_id);
+ child->linkrate = child->min_linkrate;
+ }
res = sas_get_ata_info(child, phy);
if (res)
goto out_free;
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index c43a00a9d819..b775445892af 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -799,7 +799,7 @@ out:
shost->host_failed, tries);
}
-int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+int sas_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg)
{
struct domain_device *dev = sdev_to_domain_dev(sdev);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index ebdfe5b26937..41d849f283f6 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -84,8 +84,6 @@ struct lpfc_sli2_slim;
#define LPFC_HB_MBOX_INTERVAL 5 /* Heart beat interval in seconds. */
#define LPFC_HB_MBOX_TIMEOUT 30 /* Heart beat timeout in seconds. */
-#define LPFC_LOOK_AHEAD_OFF 0 /* Look ahead logic is turned off */
-
/* Error Attention event polling interval */
#define LPFC_ERATT_POLL_INTERVAL 5 /* EATT poll interval in seconds */
@@ -146,6 +144,7 @@ struct lpfc_nvmet_ctxbuf {
struct lpfc_nvmet_rcv_ctx *context;
struct lpfc_iocbq *iocbq;
struct lpfc_sglq *sglq;
+ struct work_struct defer_work;
};
struct lpfc_dma_pool {
@@ -235,8 +234,6 @@ typedef struct lpfc_vpd {
} sli3Feat;
} lpfc_vpd_t;
-struct lpfc_scsi_buf;
-
/*
* lpfc stat counters
@@ -466,6 +463,7 @@ struct lpfc_vport {
uint32_t cfg_use_adisc;
uint32_t cfg_discovery_threads;
uint32_t cfg_log_verbose;
+ uint32_t cfg_enable_fc4_type;
uint32_t cfg_max_luns;
uint32_t cfg_enable_da_id;
uint32_t cfg_max_scsicmpl_time;
@@ -479,6 +477,7 @@ struct lpfc_vport {
struct dentry *debug_disc_trc;
struct dentry *debug_nodelist;
struct dentry *debug_nvmestat;
+ struct dentry *debug_scsistat;
struct dentry *debug_nvmektime;
struct dentry *debug_cpucheck;
struct dentry *vport_debugfs_root;
@@ -596,6 +595,13 @@ struct lpfc_mbox_ext_buf_ctx {
struct list_head ext_dmabuf_list;
};
+struct lpfc_epd_pool {
+ /* Expedite pool */
+ struct list_head list;
+ u32 count;
+ spinlock_t lock; /* lock for expedite pool */
+};
+
struct lpfc_ras_fwlog {
uint8_t *fwlog_buff;
uint32_t fw_buffcount; /* Buffer size posted to FW */
@@ -617,20 +623,19 @@ struct lpfc_ras_fwlog {
struct lpfc_hba {
/* SCSI interface function jump table entries */
- int (*lpfc_new_scsi_buf)
- (struct lpfc_vport *, int);
- struct lpfc_scsi_buf * (*lpfc_get_scsi_buf)
- (struct lpfc_hba *, struct lpfc_nodelist *);
+ struct lpfc_io_buf * (*lpfc_get_scsi_buf)
+ (struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+ struct scsi_cmnd *cmnd);
int (*lpfc_scsi_prep_dma_buf)
- (struct lpfc_hba *, struct lpfc_scsi_buf *);
+ (struct lpfc_hba *, struct lpfc_io_buf *);
void (*lpfc_scsi_unprep_dma_buf)
- (struct lpfc_hba *, struct lpfc_scsi_buf *);
+ (struct lpfc_hba *, struct lpfc_io_buf *);
void (*lpfc_release_scsi_buf)
- (struct lpfc_hba *, struct lpfc_scsi_buf *);
+ (struct lpfc_hba *, struct lpfc_io_buf *);
void (*lpfc_rampdown_queue_depth)
(struct lpfc_hba *);
void (*lpfc_scsi_prep_cmnd)
- (struct lpfc_vport *, struct lpfc_scsi_buf *,
+ (struct lpfc_vport *, struct lpfc_io_buf *,
struct lpfc_nodelist *);
/* IOCB interface function jump table entries */
@@ -673,13 +678,17 @@ struct lpfc_hba {
(struct lpfc_hba *);
int (*lpfc_bg_scsi_prep_dma_buf)
- (struct lpfc_hba *, struct lpfc_scsi_buf *);
+ (struct lpfc_hba *, struct lpfc_io_buf *);
/* Add new entries here */
+ /* expedite pool */
+ struct lpfc_epd_pool epd_pool;
+
/* SLI4 specific HBA data structure */
struct lpfc_sli4_hba sli4_hba;
struct workqueue_struct *wq;
+ struct delayed_work eq_delay_work;
struct lpfc_sli sli;
uint8_t pci_dev_grp; /* lpfc PCI dev group: 0x0, 0x1, 0x2,... */
@@ -713,7 +722,6 @@ struct lpfc_hba {
#define HBA_FCOE_MODE 0x4 /* HBA function in FCoE Mode */
#define HBA_SP_QUEUE_EVT 0x8 /* Slow-path qevt posted to worker thread*/
#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
-#define FCP_XRI_ABORT_EVENT 0x20
#define ELS_XRI_ABORT_EVENT 0x40
#define ASYNC_EVENT 0x80
#define LINK_DISABLED 0x100 /* Link disabled by user */
@@ -784,12 +792,12 @@ struct lpfc_hba {
uint8_t nvmet_support; /* driver supports NVMET */
#define LPFC_NVMET_MAX_PORTS 32
uint8_t mds_diags_support;
- uint32_t initial_imax;
uint8_t bbcredit_support;
uint8_t enab_exp_wqcq_pages;
/* HBA Config Parameters */
uint32_t cfg_ack0;
+ uint32_t cfg_xri_rebalancing;
uint32_t cfg_enable_npiv;
uint32_t cfg_enable_rrq;
uint32_t cfg_topology;
@@ -811,12 +819,14 @@ struct lpfc_hba {
uint32_t cfg_use_msi;
uint32_t cfg_auto_imax;
uint32_t cfg_fcp_imax;
+ uint32_t cfg_cq_poll_threshold;
+ uint32_t cfg_cq_max_proc_limit;
uint32_t cfg_fcp_cpu_map;
- uint32_t cfg_fcp_io_channel;
+ uint32_t cfg_hdw_queue;
+ uint32_t cfg_irq_chann;
uint32_t cfg_suppress_rsp;
uint32_t cfg_nvme_oas;
uint32_t cfg_nvme_embed_cmd;
- uint32_t cfg_nvme_io_channel;
uint32_t cfg_nvmet_mrq_post;
uint32_t cfg_nvmet_mrq;
uint32_t cfg_enable_nvmet;
@@ -852,6 +862,7 @@ struct lpfc_hba {
uint32_t cfg_prot_guard;
uint32_t cfg_hostmem_hgp;
uint32_t cfg_log_verbose;
+ uint32_t cfg_enable_fc4_type;
uint32_t cfg_aer_support;
uint32_t cfg_sriov_nr_virtfn;
uint32_t cfg_request_firmware_upgrade;
@@ -872,15 +883,12 @@ struct lpfc_hba {
uint32_t cfg_ras_fwlog_level;
uint32_t cfg_ras_fwlog_buffsize;
uint32_t cfg_ras_fwlog_func;
- uint32_t cfg_enable_fc4_type;
uint32_t cfg_enable_bbcr; /* Enable BB Credit Recovery */
uint32_t cfg_enable_dpp; /* Enable Direct Packet Push */
- uint32_t cfg_xri_split;
#define LPFC_ENABLE_FCP 1
#define LPFC_ENABLE_NVME 2
#define LPFC_ENABLE_BOTH 3
uint32_t cfg_enable_pbde;
- uint32_t io_channel_irqs; /* number of irqs for io channels */
struct nvmet_fc_target_port *targetport;
lpfc_vpd_t vpd; /* vital product data */
@@ -952,14 +960,6 @@ struct lpfc_hba {
struct timer_list eratt_poll;
uint32_t eratt_poll_interval;
- /*
- * stat counters
- */
- atomic_t fc4ScsiInputRequests;
- atomic_t fc4ScsiOutputRequests;
- atomic_t fc4ScsiControlRequests;
- atomic_t fc4ScsiIoCmpls;
-
uint64_t bg_guard_err_cnt;
uint64_t bg_apptag_err_cnt;
uint64_t bg_reftag_err_cnt;
@@ -970,13 +970,6 @@ struct lpfc_hba {
struct list_head lpfc_scsi_buf_list_get;
struct list_head lpfc_scsi_buf_list_put;
uint32_t total_scsi_bufs;
- spinlock_t nvme_buf_list_get_lock; /* NVME buf alloc list lock */
- spinlock_t nvme_buf_list_put_lock; /* NVME buf free list lock */
- struct list_head lpfc_nvme_buf_list_get;
- struct list_head lpfc_nvme_buf_list_put;
- uint32_t total_nvme_bufs;
- uint32_t get_nvme_bufs;
- uint32_t put_nvme_bufs;
struct list_head lpfc_iocb_list;
uint32_t total_iocbq_bufs;
struct list_head active_rrq_list;
@@ -1033,6 +1026,7 @@ struct lpfc_hba {
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct dentry *hba_debugfs_root;
atomic_t debugfs_vport_count;
+ struct dentry *debug_multixri_pools;
struct dentry *debug_hbqinfo;
struct dentry *debug_dumpHostSlim;
struct dentry *debug_dumpHBASlim;
@@ -1050,6 +1044,10 @@ struct lpfc_hba {
struct dentry *debug_nvmeio_trc;
struct lpfc_debugfs_nvmeio_trc *nvmeio_trc;
+ struct dentry *debug_hdwqinfo;
+#ifdef LPFC_HDWQ_LOCK_STAT
+ struct dentry *debug_lockstat;
+#endif
atomic_t nvmeio_trc_cnt;
uint32_t nvmeio_trc_size;
uint32_t nvmeio_trc_output_idx;
@@ -1090,7 +1088,6 @@ struct lpfc_hba {
uint8_t temp_sensor_support;
/* Fields used for heart beat. */
- unsigned long last_eqdelay_time;
unsigned long last_completion_time;
unsigned long skipped_hb;
struct timer_list hb_tmofunc;
@@ -1164,16 +1161,12 @@ struct lpfc_hba {
uint16_t sfp_warning;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-#define LPFC_CHECK_CPU_CNT 32
- uint32_t cpucheck_rcv_io[LPFC_CHECK_CPU_CNT];
- uint32_t cpucheck_xmt_io[LPFC_CHECK_CPU_CNT];
- uint32_t cpucheck_cmpl_io[LPFC_CHECK_CPU_CNT];
- uint32_t cpucheck_ccmpl_io[LPFC_CHECK_CPU_CNT];
uint16_t cpucheck_on;
#define LPFC_CHECK_OFF 0
#define LPFC_CHECK_NVME_IO 1
#define LPFC_CHECK_NVMET_RCV 2
#define LPFC_CHECK_NVMET_IO 4
+#define LPFC_CHECK_SCSI_IO 8
uint16_t ktime_on;
uint64_t ktime_data_samples;
uint64_t ktime_status_samples;
@@ -1297,3 +1290,23 @@ lpfc_phba_elsring(struct lpfc_hba *phba)
}
return &phba->sli.sli3_ring[LPFC_ELS_RING];
}
+
+/**
+ * lpfc_sli4_mod_hba_eq_delay - update EQ delay
+ * @phba: Pointer to HBA context object.
+ * @q: The Event Queue to update.
+ * @delay: The delay value (in us) to be written.
+ *
+ **/
+static inline void
+lpfc_sli4_mod_hba_eq_delay(struct lpfc_hba *phba, struct lpfc_queue *eq,
+ u32 delay)
+{
+ struct lpfc_register reg_data;
+
+ reg_data.word0 = 0;
+ bf_set(lpfc_sliport_eqdelay_id, &reg_data, eq->queue_id);
+ bf_set(lpfc_sliport_eqdelay_delay, &reg_data, delay);
+ writel(reg_data.word0, phba->sli4_hba.u.if_type2.EQDregaddr);
+ eq->q_mode = delay;
+}
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 4bae72cbf3f6..ce3e541434dc 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -64,9 +64,6 @@
#define LPFC_MIN_MRQ_POST 512
#define LPFC_MAX_MRQ_POST 2048
-#define LPFC_MAX_NVME_INFO_TMP_LEN 100
-#define LPFC_NVME_INFO_MORE_STR "\nCould be more info...\n"
-
/*
* Write key size should be multiple of 4. If write key is changed
* make sure that library write key is also changed.
@@ -155,7 +152,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
struct lpfc_nvme_rport *rport;
struct lpfc_nodelist *ndlp;
struct nvme_fc_remote_port *nrport;
- struct lpfc_nvme_ctrl_stat *cstat;
+ struct lpfc_fc4_ctrl_stat *cstat;
uint64_t data1, data2, data3;
uint64_t totin, totout, tot;
char *statep;
@@ -163,7 +160,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
int len = 0;
char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0};
- if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
+ if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n");
return len;
}
@@ -334,11 +331,10 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
rcu_read_lock();
scnprintf(tmp, sizeof(tmp),
- "XRI Dist lpfc%d Total %d NVME %d SCSI %d ELS %d\n",
+ "XRI Dist lpfc%d Total %d IO %d ELS %d\n",
phba->brd_no,
phba->sli4_hba.max_cfg_param.max_xri,
- phba->sli4_hba.nvme_xri_max,
- phba->sli4_hba.scsi_xri_max,
+ phba->sli4_hba.io_xri_max,
lpfc_sli4_get_els_iocb_cnt(phba));
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
goto buffer_done;
@@ -457,13 +453,13 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
totin = 0;
totout = 0;
- for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
- cstat = &lport->cstat[i];
- tot = atomic_read(&cstat->fc4NvmeIoCmpls);
+ for (i = 0; i < phba->cfg_hdw_queue; i++) {
+ cstat = &phba->sli4_hba.hdwq[i].nvme_cstat;
+ tot = cstat->io_cmpls;
totin += tot;
- data1 = atomic_read(&cstat->fc4NvmeInputRequests);
- data2 = atomic_read(&cstat->fc4NvmeOutputRequests);
- data3 = atomic_read(&cstat->fc4NvmeControlRequests);
+ data1 = cstat->input_requests;
+ data2 = cstat->output_requests;
+ data3 = cstat->control_requests;
totout += (data1 + data2 + data3);
}
scnprintf(tmp, sizeof(tmp),
@@ -510,6 +506,57 @@ buffer_done:
}
static ssize_t
+lpfc_scsi_stat_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = shost_priv(shost);
+ struct lpfc_hba *phba = vport->phba;
+ int len;
+ struct lpfc_fc4_ctrl_stat *cstat;
+ u64 data1, data2, data3;
+ u64 tot, totin, totout;
+ int i;
+ char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0};
+
+ if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ||
+ (phba->sli_rev != LPFC_SLI_REV4))
+ return 0;
+
+ scnprintf(buf, PAGE_SIZE, "SCSI HDWQ Statistics\n");
+
+ totin = 0;
+ totout = 0;
+ for (i = 0; i < phba->cfg_hdw_queue; i++) {
+ cstat = &phba->sli4_hba.hdwq[i].scsi_cstat;
+ tot = cstat->io_cmpls;
+ totin += tot;
+ data1 = cstat->input_requests;
+ data2 = cstat->output_requests;
+ data3 = cstat->control_requests;
+ totout += (data1 + data2 + data3);
+
+ scnprintf(tmp, sizeof(tmp), "HDWQ (%d): Rd %016llx Wr %016llx "
+ "IO %016llx ", i, data1, data2, data3);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp), "Cmpl %016llx OutIO %016llx\n",
+ tot, ((data1 + data2 + data3) - tot));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+ }
+ scnprintf(tmp, sizeof(tmp), "Total FCP Cmpl %016llx Issue %016llx "
+ "OutIO %016llx\n", totin, totout, totout - totin);
+ strlcat(buf, tmp, PAGE_SIZE);
+
+buffer_done:
+ len = strnlen(buf, PAGE_SIZE);
+
+ return len;
+}
+
+static ssize_t
lpfc_bg_info_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -2574,6 +2621,7 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
static DEVICE_ATTR(nvme_info, 0444, lpfc_nvme_info_show, NULL);
+static DEVICE_ATTR(scsi_stat, 0444, lpfc_scsi_stat_show, NULL);
static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL);
static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL);
static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL);
@@ -3724,29 +3772,13 @@ LPFC_ATTR_R(nvmet_mrq_post,
* lpfc_enable_fc4_type: Defines what FC4 types are supported.
* Supported Values: 1 - register just FCP
* 3 - register both FCP and NVME
- * Supported values are [1,3]. Default value is 1
+ * Supported values are [1,3]. Default value is 3
*/
-LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_FCP,
+LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH,
LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
"Enable FC4 Protocol support - FCP / NVME");
/*
- * lpfc_xri_split: Defines the division of XRI resources between SCSI and NVME
- * This parameter is only used if:
- * lpfc_enable_fc4_type is 3 - register both FCP and NVME and
- * port is not configured for NVMET.
- *
- * ELS/CT always get 10% of XRIs, up to a maximum of 250
- * The remaining XRIs get split up based on lpfc_xri_split per port:
- *
- * Supported Values are in percentages
- * the xri_split value is the percentage the SCSI port will get. The remaining
- * percentage will go to NVME.
- */
-LPFC_ATTR_R(xri_split, 50, 10, 90,
- "Percentage of FCP XRI resources versus NVME");
-
-/*
# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
# deluged with LOTS of information.
# You can set a bit mask to record specific types of verbose messages:
@@ -4903,6 +4935,8 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
struct lpfc_hba *phba = vport->phba;
+ struct lpfc_eq_intr_info *eqi;
+ uint32_t usdelay;
int val = 0, i;
/* fcp_imax is only valid for SLI4 */
@@ -4923,12 +4957,27 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
if (val && (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX))
return -EINVAL;
+ phba->cfg_auto_imax = (val) ? 0 : 1;
+ if (phba->cfg_fcp_imax && !val) {
+ queue_delayed_work(phba->wq, &phba->eq_delay_work,
+ msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
+
+ for_each_present_cpu(i) {
+ eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
+ eqi->icnt = 0;
+ }
+ }
+
phba->cfg_fcp_imax = (uint32_t)val;
- phba->initial_imax = phba->cfg_fcp_imax;
- for (i = 0; i < phba->io_channel_irqs; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
+ if (phba->cfg_fcp_imax)
+ usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
+ else
+ usdelay = 0;
+
+ for (i = 0; i < phba->cfg_irq_chann; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
lpfc_modify_hba_eq_delay(phba, i, LPFC_MAX_EQ_DELAY_EQID_CNT,
- val);
+ usdelay);
return strlen(buf);
}
@@ -4982,15 +5031,119 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
static DEVICE_ATTR_RW(lpfc_fcp_imax);
+/**
+ * lpfc_cq_max_proc_limit_store
+ *
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: string with the cq max processing limit of cqes
+ * @count: unused variable.
+ *
+ * Description:
+ * If val is in a valid range, then set value on each cq
+ *
+ * Returns:
+ * The length of the buf: if successful
+ * -ERANGE: if val is not in the valid range
+ * -EINVAL: if bad value format or intended mode is not supported.
+ **/
+static ssize_t
+lpfc_cq_max_proc_limit_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_queue *eq, *cq;
+ unsigned long val;
+ int i;
+
+ /* cq_max_proc_limit is only valid for SLI4 */
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ return -EINVAL;
+
+ /* Sanity check on user data */
+ if (!isdigit(buf[0]))
+ return -EINVAL;
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val < LPFC_CQ_MIN_PROC_LIMIT || val > LPFC_CQ_MAX_PROC_LIMIT)
+ return -ERANGE;
+
+ phba->cfg_cq_max_proc_limit = (uint32_t)val;
+
+ /* set the values on the cq's */
+ for (i = 0; i < phba->cfg_irq_chann; i++) {
+ eq = phba->sli4_hba.hdwq[i].hba_eq;
+ if (!eq)
+ continue;
+
+ list_for_each_entry(cq, &eq->child_list, list)
+ cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
+ cq->entry_count);
+ }
+
+ return strlen(buf);
+}
+
/*
- * lpfc_auto_imax: Controls Auto-interrupt coalescing values support.
- * 0 No auto_imax support
- * 1 auto imax on
- * Auto imax will change the value of fcp_imax on a per EQ basis, using
- * the EQ Delay Multiplier, depending on the activity for that EQ.
- * Value range [0,1]. Default value is 1.
+ * lpfc_cq_max_proc_limit: The maximum number CQE entries processed in an
+ * itteration of CQ processing.
*/
-LPFC_ATTR_RW(auto_imax, 1, 0, 1, "Enable Auto imax");
+static int lpfc_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT;
+module_param(lpfc_cq_max_proc_limit, int, 0644);
+MODULE_PARM_DESC(lpfc_cq_max_proc_limit,
+ "Set the maximum number CQEs processed in an iteration of "
+ "CQ processing");
+lpfc_param_show(cq_max_proc_limit)
+
+/*
+ * lpfc_cq_poll_threshold: Set the threshold of CQE completions in a
+ * single handler call which should request a polled completion rather
+ * than re-enabling interrupts.
+ */
+LPFC_ATTR_RW(cq_poll_threshold, LPFC_CQ_DEF_THRESHOLD_TO_POLL,
+ LPFC_CQ_MIN_THRESHOLD_TO_POLL,
+ LPFC_CQ_MAX_THRESHOLD_TO_POLL,
+ "CQE Processing Threshold to enable Polling");
+
+/**
+ * lpfc_cq_max_proc_limit_init - Set the initial cq max_proc_limit
+ * @phba: lpfc_hba pointer.
+ * @val: entry limit
+ *
+ * Description:
+ * If val is in a valid range, then initialize the adapter's maximum
+ * value.
+ *
+ * Returns:
+ * Always returns 0 for success, even if value not always set to
+ * requested value. If value out of range or not supported, will fall
+ * back to default.
+ **/
+static int
+lpfc_cq_max_proc_limit_init(struct lpfc_hba *phba, int val)
+{
+ phba->cfg_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT;
+
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ return 0;
+
+ if (val >= LPFC_CQ_MIN_PROC_LIMIT && val <= LPFC_CQ_MAX_PROC_LIMIT) {
+ phba->cfg_cq_max_proc_limit = val;
+ return 0;
+ }
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0371 "LPFC_DRIVER_NAME"_cq_max_proc_limit: "
+ "%d out of range, using default\n",
+ phba->cfg_cq_max_proc_limit);
+
+ return 0;
+}
+
+static DEVICE_ATTR_RW(lpfc_cq_max_proc_limit);
/**
* lpfc_state_show - Display current driver CPU affinity
@@ -5023,50 +5176,70 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
case 1:
len += snprintf(buf + len, PAGE_SIZE-len,
"fcp_cpu_map: HBA centric mapping (%d): "
- "%d online CPUs\n",
- phba->cfg_fcp_cpu_map,
- phba->sli4_hba.num_online_cpu);
- break;
- case 2:
- len += snprintf(buf + len, PAGE_SIZE-len,
- "fcp_cpu_map: Driver centric mapping (%d): "
- "%d online CPUs\n",
- phba->cfg_fcp_cpu_map,
- phba->sli4_hba.num_online_cpu);
+ "%d of %d CPUs online from %d possible CPUs\n",
+ phba->cfg_fcp_cpu_map, num_online_cpus(),
+ num_present_cpus(),
+ phba->sli4_hba.num_possible_cpu);
break;
}
- while (phba->sli4_hba.curr_disp_cpu < phba->sli4_hba.num_present_cpu) {
+ while (phba->sli4_hba.curr_disp_cpu <
+ phba->sli4_hba.num_possible_cpu) {
cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu];
- /* margin should fit in this and the truncated message */
- if (cpup->irq == LPFC_VECTOR_MAP_EMPTY)
- len += snprintf(buf + len, PAGE_SIZE-len,
- "CPU %02d io_chan %02d "
- "physid %d coreid %d\n",
+ if (!cpu_present(phba->sli4_hba.curr_disp_cpu))
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "CPU %02d not present\n",
+ phba->sli4_hba.curr_disp_cpu);
+ else if (cpup->irq == LPFC_VECTOR_MAP_EMPTY) {
+ if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
+ len += snprintf(
+ buf + len, PAGE_SIZE - len,
+ "CPU %02d hdwq None "
+ "physid %d coreid %d ht %d\n",
phba->sli4_hba.curr_disp_cpu,
- cpup->channel_id, cpup->phys_id,
- cpup->core_id);
- else
- len += snprintf(buf + len, PAGE_SIZE-len,
- "CPU %02d io_chan %02d "
- "physid %d coreid %d IRQ %d\n",
+ cpup->phys_id,
+ cpup->core_id, cpup->hyper);
+ else
+ len += snprintf(
+ buf + len, PAGE_SIZE - len,
+ "CPU %02d EQ %04d hdwq %04d "
+ "physid %d coreid %d ht %d\n",
+ phba->sli4_hba.curr_disp_cpu,
+ cpup->eq, cpup->hdwq, cpup->phys_id,
+ cpup->core_id, cpup->hyper);
+ } else {
+ if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
+ len += snprintf(
+ buf + len, PAGE_SIZE - len,
+ "CPU %02d hdwq None "
+ "physid %d coreid %d ht %d IRQ %d\n",
+ phba->sli4_hba.curr_disp_cpu,
+ cpup->phys_id,
+ cpup->core_id, cpup->hyper, cpup->irq);
+ else
+ len += snprintf(
+ buf + len, PAGE_SIZE - len,
+ "CPU %02d EQ %04d hdwq %04d "
+ "physid %d coreid %d ht %d IRQ %d\n",
phba->sli4_hba.curr_disp_cpu,
- cpup->channel_id, cpup->phys_id,
- cpup->core_id, cpup->irq);
+ cpup->eq, cpup->hdwq, cpup->phys_id,
+ cpup->core_id, cpup->hyper, cpup->irq);
+ }
phba->sli4_hba.curr_disp_cpu++;
/* display max number of CPUs keeping some margin */
if (phba->sli4_hba.curr_disp_cpu <
- phba->sli4_hba.num_present_cpu &&
+ phba->sli4_hba.num_possible_cpu &&
(len >= (PAGE_SIZE - 64))) {
- len += snprintf(buf + len, PAGE_SIZE-len, "more...\n");
+ len += snprintf(buf + len,
+ PAGE_SIZE - len, "more...\n");
break;
}
}
- if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_present_cpu)
+ if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_possible_cpu)
phba->sli4_hba.curr_disp_cpu = 0;
return len;
@@ -5094,14 +5267,13 @@ lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr,
# lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors
# for the HBA.
#
-# Value range is [0 to 2]. Default value is LPFC_DRIVER_CPU_MAP (2).
+# Value range is [0 to 1]. Default value is LPFC_HBA_CPU_MAP (1).
# 0 - Do not affinitze IRQ vectors
# 1 - Affintize HBA vectors with respect to each HBA
# (start with CPU0 for each HBA)
-# 2 - Affintize HBA vectors with respect to the entire driver
-# (round robin thru all CPUs across all HBAs)
+# This also defines how Hardware Queues are mapped to specific CPUs.
*/
-static int lpfc_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
+static int lpfc_fcp_cpu_map = LPFC_HBA_CPU_MAP;
module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(lpfc_fcp_cpu_map,
"Defines how to map CPUs to IRQ vectors per HBA");
@@ -5135,7 +5307,7 @@ lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3326 lpfc_fcp_cpu_map: %d out of range, using "
"default\n", val);
- phba->cfg_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
+ phba->cfg_fcp_cpu_map = LPFC_HBA_CPU_MAP;
return 0;
}
@@ -5235,13 +5407,20 @@ static DEVICE_ATTR_RW(lpfc_max_scsicmpl_time);
LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
/*
+# lpfc_xri_rebalancing: enable or disable XRI rebalancing feature
+# range is [0,1]. Default value is 1.
+*/
+LPFC_ATTR_R(xri_rebalancing, 1, 0, 1, "Enable/Disable XRI rebalancing");
+
+/*
* lpfc_io_sched: Determine scheduling algrithmn for issuing FCP cmds
* range is [0,1]. Default value is 0.
- * For [0], FCP commands are issued to Work Queues ina round robin fashion.
+ * For [0], FCP commands are issued to Work Queues based on upper layer
+ * hardware queue index.
* For [1], FCP commands are issued to a Work Queue associated with the
* current CPU.
*
- * LPFC_FCP_SCHED_ROUND_ROBIN == 0
+ * LPFC_FCP_SCHED_BY_HDWQ == 0
* LPFC_FCP_SCHED_BY_CPU == 1
*
* The driver dynamically sets this to 1 (BY_CPU) if it's able to set up cpu
@@ -5249,11 +5428,11 @@ LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
* CPU. Otherwise, the default 0 (Round Robin) scheduling of FCP/NVME I/Os
* through WQs will be used.
*/
-LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_ROUND_ROBIN,
- LPFC_FCP_SCHED_ROUND_ROBIN,
+LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_BY_CPU,
+ LPFC_FCP_SCHED_BY_HDWQ,
LPFC_FCP_SCHED_BY_CPU,
"Determine scheduling algorithm for "
- "issuing commands [0] - Round Robin, [1] - Current CPU");
+ "issuing commands [0] - Hardware Queue, [1] - Current CPU");
/*
* lpfc_ns_query: Determine algrithmn for NameServer queries after RSCN
@@ -5415,41 +5594,39 @@ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
"Embed NVME Command in WQE");
/*
- * lpfc_fcp_io_channel: Set the number of FCP IO channels the driver
- * will advertise it supports to the SCSI layer. This also will map to
- * the number of WQs the driver will create.
+ * lpfc_hdw_queue: Set the number of Hardware Queues the driver
+ * will advertise it supports to the NVME and SCSI layers. This also
+ * will map to the number of CQ/WQ pairs the driver will create.
*
- * 0 = Configure the number of io channels to the number of active CPUs.
- * 1,32 = Manually specify how many io channels to use.
+ * The NVME Layer will try to create this many, plus 1 administrative
+ * hardware queue. The administrative queue will always map to WQ 0
+ * A hardware IO queue maps (qidx) to a specific driver CQ/WQ.
*
- * Value range is [0,32]. Default value is 4.
+ * 0 = Configure the number of hdw queues to the number of active CPUs.
+ * 1,128 = Manually specify how many hdw queues to use.
+ *
+ * Value range is [0,128]. Default value is 0.
*/
-LPFC_ATTR_R(fcp_io_channel,
- LPFC_FCP_IO_CHAN_DEF,
- LPFC_HBA_IO_CHAN_MIN, LPFC_HBA_IO_CHAN_MAX,
- "Set the number of FCP I/O channels");
+LPFC_ATTR_R(hdw_queue,
+ LPFC_HBA_HDWQ_DEF,
+ LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
+ "Set the number of I/O Hardware Queues");
/*
- * lpfc_nvme_io_channel: Set the number of IO hardware queues the driver
- * will advertise it supports to the NVME layer. This also will map to
- * the number of WQs the driver will create.
- *
- * This module parameter is valid when lpfc_enable_fc4_type is set
- * to support NVME.
- *
- * The NVME Layer will try to create this many, plus 1 administrative
- * hardware queue. The administrative queue will always map to WQ 0
- * A hardware IO queue maps (qidx) to a specific driver WQ.
+ * lpfc_irq_chann: Set the number of IRQ vectors that are available
+ * for Hardware Queues to utilize. This also will map to the number
+ * of EQ / MSI-X vectors the driver will create. This should never be
+ * more than the number of Hardware Queues
*
- * 0 = Configure the number of io channels to the number of active CPUs.
- * 1,32 = Manually specify how many io channels to use.
+ * 0 = Configure number of IRQ Channels to the number of active CPUs.
+ * 1,128 = Manually specify how many IRQ Channels to use.
*
- * Value range is [0,32]. Default value is 0.
+ * Value range is [0,128]. Default value is 0.
*/
-LPFC_ATTR_R(nvme_io_channel,
- LPFC_NVME_IO_CHAN_DEF,
- LPFC_HBA_IO_CHAN_MIN, LPFC_HBA_IO_CHAN_MAX,
- "Set the number of NVME I/O channels");
+LPFC_ATTR_R(irq_chann,
+ LPFC_HBA_HDWQ_DEF,
+ LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
+ "Set the number of I/O IRQ Channels");
/*
# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
@@ -5492,16 +5669,6 @@ LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature.");
LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
/*
-# lpfc_fcp_look_ahead: Look ahead for completions in FCP start routine
-# 0 = disabled (default)
-# 1 = enabled
-# Value range is [0,1]. Default value is 0.
-#
-# This feature in under investigation and may be supported in the future.
-*/
-unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF;
-
-/*
# lpfc_prot_mask: i
# - Bit mask of host protection capabilities used to register with the
# SCSI mid-layer
@@ -5677,6 +5844,7 @@ LPFC_ATTR_RW(enable_dpp, 1, 0, 1, "Enable Direct Packet Push");
struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_nvme_info,
+ &dev_attr_scsi_stat,
&dev_attr_bg_info,
&dev_attr_bg_guard_err,
&dev_attr_bg_apptag_err,
@@ -5704,11 +5872,11 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_nodev_tmo,
&dev_attr_lpfc_devloss_tmo,
&dev_attr_lpfc_enable_fc4_type,
- &dev_attr_lpfc_xri_split,
&dev_attr_lpfc_fcp_class,
&dev_attr_lpfc_use_adisc,
&dev_attr_lpfc_first_burst_size,
&dev_attr_lpfc_ack0,
+ &dev_attr_lpfc_xri_rebalancing,
&dev_attr_lpfc_topology,
&dev_attr_lpfc_scan_down,
&dev_attr_lpfc_link_speed,
@@ -5742,12 +5910,13 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_use_msi,
&dev_attr_lpfc_nvme_oas,
&dev_attr_lpfc_nvme_embed_cmd,
- &dev_attr_lpfc_auto_imax,
&dev_attr_lpfc_fcp_imax,
+ &dev_attr_lpfc_cq_poll_threshold,
+ &dev_attr_lpfc_cq_max_proc_limit,
&dev_attr_lpfc_fcp_cpu_map,
- &dev_attr_lpfc_fcp_io_channel,
+ &dev_attr_lpfc_hdw_queue,
+ &dev_attr_lpfc_irq_chann,
&dev_attr_lpfc_suppress_rsp,
- &dev_attr_lpfc_nvme_io_channel,
&dev_attr_lpfc_nvmet_mrq,
&dev_attr_lpfc_nvmet_mrq_post,
&dev_attr_lpfc_nvme_enable_fb,
@@ -6775,6 +6944,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_multi_ring_rctl_init(phba, lpfc_multi_ring_rctl);
lpfc_multi_ring_type_init(phba, lpfc_multi_ring_type);
lpfc_ack0_init(phba, lpfc_ack0);
+ lpfc_xri_rebalancing_init(phba, lpfc_xri_rebalancing);
lpfc_topology_init(phba, lpfc_topology);
lpfc_link_speed_init(phba, lpfc_link_speed);
lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
@@ -6787,8 +6957,9 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_use_msi_init(phba, lpfc_use_msi);
lpfc_nvme_oas_init(phba, lpfc_nvme_oas);
lpfc_nvme_embed_cmd_init(phba, lpfc_nvme_embed_cmd);
- lpfc_auto_imax_init(phba, lpfc_auto_imax);
lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
+ lpfc_cq_poll_threshold_init(phba, lpfc_cq_poll_threshold);
+ lpfc_cq_max_proc_limit_init(phba, lpfc_cq_max_proc_limit);
lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
@@ -6824,8 +6995,8 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
/* Initialize first burst. Target vs Initiator are different. */
lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
- lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
- lpfc_nvme_io_channel_init(phba, lpfc_nvme_io_channel);
+ lpfc_hdw_queue_init(phba, lpfc_hdw_queue);
+ lpfc_irq_chann_init(phba, lpfc_irq_chann);
lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
lpfc_enable_dpp_init(phba, lpfc_enable_dpp);
@@ -6834,38 +7005,27 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
phba->nvmet_support = 0;
phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
phba->cfg_enable_bbcr = 0;
+ phba->cfg_xri_rebalancing = 0;
} else {
/* We MUST have FCP support */
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
phba->cfg_enable_fc4_type |= LPFC_ENABLE_FCP;
}
- if (phba->cfg_auto_imax && !phba->cfg_fcp_imax)
- phba->cfg_auto_imax = 0;
- phba->initial_imax = phba->cfg_fcp_imax;
+ phba->cfg_auto_imax = (phba->cfg_fcp_imax) ? 0 : 1;
phba->cfg_enable_pbde = 0;
/* A value of 0 means use the number of CPUs found in the system */
- if (phba->cfg_fcp_io_channel == 0)
- phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu;
- if (phba->cfg_nvme_io_channel == 0)
- phba->cfg_nvme_io_channel = phba->sli4_hba.num_present_cpu;
-
- if (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
- phba->cfg_fcp_io_channel = 0;
-
- if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)
- phba->cfg_nvme_io_channel = 0;
-
- if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
- phba->io_channel_irqs = phba->cfg_fcp_io_channel;
- else
- phba->io_channel_irqs = phba->cfg_nvme_io_channel;
+ if (phba->cfg_hdw_queue == 0)
+ phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
+ if (phba->cfg_irq_chann == 0)
+ phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu;
+ if (phba->cfg_irq_chann > phba->cfg_hdw_queue)
+ phba->cfg_irq_chann = phba->cfg_hdw_queue;
phba->cfg_soft_wwnn = 0L;
phba->cfg_soft_wwpn = 0L;
- lpfc_xri_split_init(phba, lpfc_xri_split);
lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
@@ -6903,16 +7063,16 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
void
lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
{
- if (phba->cfg_nvme_io_channel > phba->sli4_hba.num_present_cpu)
- phba->cfg_nvme_io_channel = phba->sli4_hba.num_present_cpu;
-
- if (phba->cfg_fcp_io_channel > phba->sli4_hba.num_present_cpu)
- phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu;
+ if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu)
+ phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
+ if (phba->cfg_irq_chann > phba->sli4_hba.num_present_cpu)
+ phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu;
+ if (phba->cfg_irq_chann > phba->cfg_hdw_queue)
+ phba->cfg_irq_chann = phba->cfg_hdw_queue;
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
phba->nvmet_support) {
phba->cfg_enable_fc4_type &= ~LPFC_ENABLE_FCP;
- phba->cfg_fcp_io_channel = 0;
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
"6013 %s x%x fb_size x%x, fb_max x%x\n",
@@ -6929,11 +7089,11 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
}
if (!phba->cfg_nvmet_mrq)
- phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+ phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
/* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
- if (phba->cfg_nvmet_mrq > phba->cfg_nvme_io_channel) {
- phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+ if (phba->cfg_nvmet_mrq > phba->cfg_irq_chann) {
+ phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
"6018 Adjust lpfc_nvmet_mrq to %d\n",
phba->cfg_nvmet_mrq);
@@ -6947,11 +7107,6 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
phba->cfg_nvmet_fb_size = 0;
}
-
- if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
- phba->io_channel_irqs = phba->cfg_fcp_io_channel;
- else
- phba->io_channel_irqs = phba->cfg_nvme_io_channel;
}
/**
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 2dc564e59430..f2494d3b365c 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -2947,7 +2947,7 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
cmd->un.cont64[i].tus.f.bdeSize =
((struct lpfc_dmabufext *)mp[i])->size;
- cmd->ulpBdeCount = ++i;
+ cmd->ulpBdeCount = ++i;
if ((--num_bde > 0) && (i < 2))
continue;
@@ -4682,7 +4682,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
* Don't allow mailbox commands to be sent when blocked or when in
* the middle of discovery
*/
- if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
+ if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
rc = -EAGAIN;
goto job_done;
}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 39f3fa988732..e0b14d791b8c 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -199,11 +199,6 @@ void lpfc_reset_hba(struct lpfc_hba *);
int lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *hd,
spinlock_t *slock);
-int lpfc_fof_queue_create(struct lpfc_hba *);
-int lpfc_fof_queue_setup(struct lpfc_hba *);
-int lpfc_fof_queue_destroy(struct lpfc_hba *);
-irqreturn_t lpfc_sli4_fof_intr_handler(int, void *);
-
int lpfc_sli_setup(struct lpfc_hba *);
int lpfc_sli4_setup(struct lpfc_hba *phba);
void lpfc_sli_queue_init(struct lpfc_hba *phba);
@@ -320,8 +315,8 @@ void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
struct lpfc_iocbq *, uint32_t);
-int lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t rnum,
- struct lpfc_iocbq *iocbq);
+int lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
+ struct lpfc_iocbq *pwqe);
struct lpfc_sglq *__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xri);
struct lpfc_sglq *__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba,
struct lpfc_iocbq *piocbq);
@@ -445,7 +440,6 @@ extern spinlock_t _dump_buf_lock;
extern int _dump_buf_done;
extern spinlock_t pgcnt_lock;
extern unsigned int pgcnt;
-extern unsigned int lpfc_fcp_look_ahead;
/* Interface exported by fabric iocb scheduler */
void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
@@ -520,8 +514,13 @@ int lpfc_sli4_read_config(struct lpfc_hba *);
void lpfc_sli4_node_prep(struct lpfc_hba *);
int lpfc_sli4_els_sgl_update(struct lpfc_hba *phba);
int lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba);
-int lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba);
-int lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba);
+int lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *sglist);
+int lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf);
+int lpfc_sli4_io_sgl_update(struct lpfc_hba *phba);
+int lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
+ struct list_head *blist, int xricnt);
+int lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc);
+void lpfc_io_free(struct lpfc_hba *phba);
void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);
int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t);
@@ -574,6 +573,21 @@ void lpfc_nvme_mod_param_dep(struct lpfc_hba *phba);
void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocb,
struct lpfc_wcqe_complete *abts_cmpl);
+void lpfc_create_multixri_pools(struct lpfc_hba *phba);
+void lpfc_create_destroy_pools(struct lpfc_hba *phba);
+void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid);
+void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 cnt);
+void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid);
+void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid);
+void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid);
+#ifdef LPFC_MXP_STAT
+void lpfc_snapshot_mxp(struct lpfc_hba *, u32);
+#endif
+struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
+ struct lpfc_nodelist *ndlp, u32 hwqid,
+ int);
+void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd,
+ struct lpfc_sli4_hdw_queue *qp);
void lpfc_nvme_cmd_template(void);
void lpfc_nvmet_cmd_template(void);
extern int lpfc_enable_nvmet_cnt;
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 552da8bf43e4..7290573110fe 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -1656,16 +1656,16 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
CtReq->un.rft.PortId = cpu_to_be32(vport->fc_myDID);
/* Register FC4 FCP type if enabled. */
- if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
- (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP))
+ if (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
+ vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)
CtReq->un.rft.fcpReg = 1;
/* Register NVME type if enabled. Defined LE and swapped.
* rsvd[0] is used as word1 because of the hard-coded
* word0 usage in the ct_request data structure.
*/
- if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
- (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME))
+ if (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
+ vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
CtReq->un.rft.rsvd[0] =
cpu_to_be32(LPFC_FC4_TYPE_BITMASK);
@@ -1732,8 +1732,8 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
* caller can specify NVME (type x28) as well. But only
* these that FC4 type is supported.
*/
- if (((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
- (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) &&
+ if (((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+ (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) &&
(context == FC_TYPE_NVME)) {
if ((vport == phba->pport) && phba->nvmet_support) {
CtReq->un.rff.fbits = (FC4_FEATURE_TARGET |
@@ -1744,8 +1744,8 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
}
CtReq->un.rff.type_code = context;
- } else if (((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
- (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) &&
+ } else if (((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+ (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) &&
(context == FC_TYPE_FCP))
CtReq->un.rff.type_code = context;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index a58f0b3f03a9..1215eaa530db 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2007-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -378,6 +378,271 @@ skipit:
return len;
}
+static int lpfc_debugfs_last_xripool;
+
+/**
+ * lpfc_debugfs_common_xri_data - Dump Hardware Queue info to a buffer
+ * @phba: The HBA to gather host buffer info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the Hardware Queue info from the @phba to @buf up to
+ * @size number of bytes. A header that describes the current hdwq state will be
+ * dumped to @buf first and then info on each hdwq entry will be dumped to @buf
+ * until @size bytes have been dumped or all the hdwq info has been dumped.
+ *
+ * Notes:
+ * This routine will rotate through each configured Hardware Queue each
+ * time called.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_commonxripools_data(struct lpfc_hba *phba, char *buf, int size)
+{
+ struct lpfc_sli4_hdw_queue *qp;
+ int len = 0;
+ int i, out;
+ unsigned long iflag;
+
+ for (i = 0; i < phba->cfg_hdw_queue; i++) {
+ if (len > (LPFC_DUMP_MULTIXRIPOOL_SIZE - 80))
+ break;
+ qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_xripool];
+
+ len += snprintf(buf + len, size - len, "HdwQ %d Info ", i);
+ spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag);
+ spin_lock(&qp->abts_nvme_buf_list_lock);
+ spin_lock(&qp->io_buf_list_get_lock);
+ spin_lock(&qp->io_buf_list_put_lock);
+ out = qp->total_io_bufs - (qp->get_io_bufs + qp->put_io_bufs +
+ qp->abts_scsi_io_bufs + qp->abts_nvme_io_bufs);
+ len += snprintf(buf + len, size - len,
+ "tot:%d get:%d put:%d mt:%d "
+ "ABTS scsi:%d nvme:%d Out:%d\n",
+ qp->total_io_bufs, qp->get_io_bufs, qp->put_io_bufs,
+ qp->empty_io_bufs, qp->abts_scsi_io_bufs,
+ qp->abts_nvme_io_bufs, out);
+ spin_unlock(&qp->io_buf_list_put_lock);
+ spin_unlock(&qp->io_buf_list_get_lock);
+ spin_unlock(&qp->abts_nvme_buf_list_lock);
+ spin_unlock_irqrestore(&qp->abts_scsi_buf_list_lock, iflag);
+
+ lpfc_debugfs_last_xripool++;
+ if (lpfc_debugfs_last_xripool >= phba->cfg_hdw_queue)
+ lpfc_debugfs_last_xripool = 0;
+ }
+
+ return len;
+}
+
+/**
+ * lpfc_debugfs_multixripools_data - Display multi-XRI pools information
+ * @phba: The HBA to gather host buffer info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine displays current multi-XRI pools information including XRI
+ * count in public, private and txcmplq. It also displays current high and
+ * low watermark.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_multixripools_data(struct lpfc_hba *phba, char *buf, int size)
+{
+ u32 i;
+ u32 hwq_count;
+ struct lpfc_sli4_hdw_queue *qp;
+ struct lpfc_multixri_pool *multixri_pool;
+ struct lpfc_pvt_pool *pvt_pool;
+ struct lpfc_pbl_pool *pbl_pool;
+ u32 txcmplq_cnt;
+ char tmp[LPFC_DEBUG_OUT_LINE_SZ] = {0};
+
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ return 0;
+
+ if (!phba->sli4_hba.hdwq)
+ return 0;
+
+ if (!phba->cfg_xri_rebalancing) {
+ i = lpfc_debugfs_commonxripools_data(phba, buf, size);
+ return i;
+ }
+
+ /*
+ * Pbl: Current number of free XRIs in public pool
+ * Pvt: Current number of free XRIs in private pool
+ * Busy: Current number of outstanding XRIs
+ * HWM: Current high watermark
+ * pvt_empty: Incremented by 1 when IO submission fails (no xri)
+ * pbl_empty: Incremented by 1 when all pbl_pool are empty during
+ * IO submission
+ */
+ scnprintf(tmp, sizeof(tmp),
+ "HWQ: Pbl Pvt Busy HWM | pvt_empty pbl_empty ");
+ if (strlcat(buf, tmp, size) >= size)
+ return strnlen(buf, size);
+
+#ifdef LPFC_MXP_STAT
+ /*
+ * MAXH: Max high watermark seen so far
+ * above_lmt: Incremented by 1 if xri_owned > xri_limit during
+ * IO submission
+ * below_lmt: Incremented by 1 if xri_owned <= xri_limit during
+ * IO submission
+ * locPbl_hit: Incremented by 1 if successfully get a batch of XRI from
+ * local pbl_pool
+ * othPbl_hit: Incremented by 1 if successfully get a batch of XRI from
+ * other pbl_pool
+ */
+ scnprintf(tmp, sizeof(tmp),
+ "MAXH above_lmt below_lmt locPbl_hit othPbl_hit");
+ if (strlcat(buf, tmp, size) >= size)
+ return strnlen(buf, size);
+
+ /*
+ * sPbl: snapshot of Pbl 15 sec after stat gets cleared
+ * sPvt: snapshot of Pvt 15 sec after stat gets cleared
+ * sBusy: snapshot of Busy 15 sec after stat gets cleared
+ */
+ scnprintf(tmp, sizeof(tmp),
+ " | sPbl sPvt sBusy");
+ if (strlcat(buf, tmp, size) >= size)
+ return strnlen(buf, size);
+#endif
+
+ scnprintf(tmp, sizeof(tmp), "\n");
+ if (strlcat(buf, tmp, size) >= size)
+ return strnlen(buf, size);
+
+ hwq_count = phba->cfg_hdw_queue;
+ for (i = 0; i < hwq_count; i++) {
+ qp = &phba->sli4_hba.hdwq[i];
+ multixri_pool = qp->p_multixri_pool;
+ if (!multixri_pool)
+ continue;
+ pbl_pool = &multixri_pool->pbl_pool;
+ pvt_pool = &multixri_pool->pvt_pool;
+ txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
+ if (qp->nvme_wq)
+ txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
+
+ scnprintf(tmp, sizeof(tmp),
+ "%03d: %4d %4d %4d %4d | %10d %10d ",
+ i, pbl_pool->count, pvt_pool->count,
+ txcmplq_cnt, pvt_pool->high_watermark,
+ qp->empty_io_bufs, multixri_pool->pbl_empty_count);
+ if (strlcat(buf, tmp, size) >= size)
+ break;
+
+#ifdef LPFC_MXP_STAT
+ scnprintf(tmp, sizeof(tmp),
+ "%4d %10d %10d %10d %10d",
+ multixri_pool->stat_max_hwm,
+ multixri_pool->above_limit_count,
+ multixri_pool->below_limit_count,
+ multixri_pool->local_pbl_hit_count,
+ multixri_pool->other_pbl_hit_count);
+ if (strlcat(buf, tmp, size) >= size)
+ break;
+
+ scnprintf(tmp, sizeof(tmp),
+ " | %4d %4d %5d",
+ multixri_pool->stat_pbl_count,
+ multixri_pool->stat_pvt_count,
+ multixri_pool->stat_busy_count);
+ if (strlcat(buf, tmp, size) >= size)
+ break;
+#endif
+
+ scnprintf(tmp, sizeof(tmp), "\n");
+ if (strlcat(buf, tmp, size) >= size)
+ break;
+ }
+ return strnlen(buf, size);
+}
+
+
+#ifdef LPFC_HDWQ_LOCK_STAT
+static int lpfc_debugfs_last_lock;
+
+/**
+ * lpfc_debugfs_lockstat_data - Dump Hardware Queue info to a buffer
+ * @phba: The HBA to gather host buffer info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the Hardware Queue info from the @phba to @buf up to
+ * @size number of bytes. A header that describes the current hdwq state will be
+ * dumped to @buf first and then info on each hdwq entry will be dumped to @buf
+ * until @size bytes have been dumped or all the hdwq info has been dumped.
+ *
+ * Notes:
+ * This routine will rotate through each configured Hardware Queue each
+ * time called.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_lockstat_data(struct lpfc_hba *phba, char *buf, int size)
+{
+ struct lpfc_sli4_hdw_queue *qp;
+ int len = 0;
+ int i;
+
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ return 0;
+
+ if (!phba->sli4_hba.hdwq)
+ return 0;
+
+ for (i = 0; i < phba->cfg_hdw_queue; i++) {
+ if (len > (LPFC_HDWQINFO_SIZE - 100))
+ break;
+ qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_lock];
+
+ len += snprintf(buf + len, size - len, "HdwQ %03d Lock ", i);
+ if (phba->cfg_xri_rebalancing) {
+ len += snprintf(buf + len, size - len,
+ "get_pvt:%d mv_pvt:%d "
+ "mv2pub:%d mv2pvt:%d "
+ "put_pvt:%d put_pub:%d wq:%d\n",
+ qp->lock_conflict.alloc_pvt_pool,
+ qp->lock_conflict.mv_from_pvt_pool,
+ qp->lock_conflict.mv_to_pub_pool,
+ qp->lock_conflict.mv_to_pvt_pool,
+ qp->lock_conflict.free_pvt_pool,
+ qp->lock_conflict.free_pub_pool,
+ qp->lock_conflict.wq_access);
+ } else {
+ len += snprintf(buf + len, size - len,
+ "get:%d put:%d free:%d wq:%d\n",
+ qp->lock_conflict.alloc_xri_get,
+ qp->lock_conflict.alloc_xri_put,
+ qp->lock_conflict.free_xri,
+ qp->lock_conflict.wq_access);
+ }
+
+ lpfc_debugfs_last_lock++;
+ if (lpfc_debugfs_last_lock >= phba->cfg_hdw_queue)
+ lpfc_debugfs_last_lock = 0;
+ }
+
+ return len;
+}
+#endif
+
static int lpfc_debugfs_last_hba_slim_off;
/**
@@ -773,11 +1038,11 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
struct nvme_fc_local_port *localport;
- struct lpfc_nvme_ctrl_stat *cstat;
+ struct lpfc_fc4_ctrl_stat *cstat;
struct lpfc_nvme_lport *lport;
uint64_t data1, data2, data3;
uint64_t tot, totin, totout;
- int cnt, i, maxch;
+ int cnt, i;
int len = 0;
if (phba->nvmet_support) {
@@ -863,17 +1128,17 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
len += snprintf(buf + len, size - len, "\n");
cnt = 0;
- spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_for_each_entry_safe(ctxp, next_ctxp,
&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
list) {
cnt++;
}
- spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
if (cnt) {
len += snprintf(buf + len, size - len,
"ABORT: %d ctx entries\n", cnt);
- spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_for_each_entry_safe(ctxp, next_ctxp,
&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
list) {
@@ -885,7 +1150,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
ctxp->oxid, ctxp->state,
ctxp->flag);
}
- spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
}
/* Calculate outstanding IOs */
@@ -901,7 +1166,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
phba->sli4_hba.nvmet_io_wait_total,
tot);
} else {
- if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
+ if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
return len;
localport = vport->localport;
@@ -912,26 +1177,22 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
return len;
len += snprintf(buf + len, size - len,
- "\nNVME Lport Statistics\n");
+ "\nNVME HDWQ Statistics\n");
len += snprintf(buf + len, size - len,
"LS: Xmt %016x Cmpl %016x\n",
atomic_read(&lport->fc4NvmeLsRequests),
atomic_read(&lport->fc4NvmeLsCmpls));
- if (phba->cfg_nvme_io_channel < 32)
- maxch = phba->cfg_nvme_io_channel;
- else
- maxch = 32;
totin = 0;
totout = 0;
- for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
- cstat = &lport->cstat[i];
- tot = atomic_read(&cstat->fc4NvmeIoCmpls);
+ for (i = 0; i < phba->cfg_hdw_queue; i++) {
+ cstat = &phba->sli4_hba.hdwq[i].nvme_cstat;
+ tot = cstat->io_cmpls;
totin += tot;
- data1 = atomic_read(&cstat->fc4NvmeInputRequests);
- data2 = atomic_read(&cstat->fc4NvmeOutputRequests);
- data3 = atomic_read(&cstat->fc4NvmeControlRequests);
+ data1 = cstat->input_requests;
+ data2 = cstat->output_requests;
+ data3 = cstat->control_requests;
totout += (data1 + data2 + data3);
/* Limit to 32, debugfs display buffer limitation */
@@ -939,7 +1200,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
continue;
len += snprintf(buf + len, PAGE_SIZE - len,
- "FCP (%d): Rd %016llx Wr %016llx "
+ "HDWQ (%d): Rd %016llx Wr %016llx "
"IO %016llx ",
i, data1, data2, data3);
len += snprintf(buf + len, PAGE_SIZE - len,
@@ -979,6 +1240,66 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
return len;
}
+/**
+ * lpfc_debugfs_scsistat_data - Dump target node list to a buffer
+ * @vport: The vport to gather target node info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the SCSI statistics associated with @vport
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_scsistat_data(struct lpfc_vport *vport, char *buf, int size)
+{
+ int len;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_fc4_ctrl_stat *cstat;
+ u64 data1, data2, data3;
+ u64 tot, totin, totout;
+ int i;
+ char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0};
+
+ if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ||
+ (phba->sli_rev != LPFC_SLI_REV4))
+ return 0;
+
+ scnprintf(buf, size, "SCSI HDWQ Statistics\n");
+
+ totin = 0;
+ totout = 0;
+ for (i = 0; i < phba->cfg_hdw_queue; i++) {
+ cstat = &phba->sli4_hba.hdwq[i].scsi_cstat;
+ tot = cstat->io_cmpls;
+ totin += tot;
+ data1 = cstat->input_requests;
+ data2 = cstat->output_requests;
+ data3 = cstat->control_requests;
+ totout += (data1 + data2 + data3);
+
+ scnprintf(tmp, sizeof(tmp), "HDWQ (%d): Rd %016llx Wr %016llx "
+ "IO %016llx ", i, data1, data2, data3);
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp), "Cmpl %016llx OutIO %016llx\n",
+ tot, ((data1 + data2 + data3) - tot));
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
+ }
+ scnprintf(tmp, sizeof(tmp), "Total FCP Cmpl %016llx Issue %016llx "
+ "OutIO %016llx\n", totin, totout, totout - totin);
+ strlcat(buf, tmp, size);
+
+buffer_done:
+ len = strnlen(buf, size);
+
+ return len;
+}
/**
* lpfc_debugfs_nvmektime_data - Dump target node list to a buffer
@@ -1299,62 +1620,73 @@ static int
lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size)
{
struct lpfc_hba *phba = vport->phba;
- int i;
+ struct lpfc_sli4_hdw_queue *qp;
+ int i, j, max_cnt;
int len = 0;
- uint32_t tot_xmt = 0;
- uint32_t tot_rcv = 0;
- uint32_t tot_cmpl = 0;
- uint32_t tot_ccmpl = 0;
+ uint32_t tot_xmt;
+ uint32_t tot_rcv;
+ uint32_t tot_cmpl;
- if (phba->nvmet_support == 0) {
- /* NVME Initiator */
- len += snprintf(buf + len, PAGE_SIZE - len,
- "CPUcheck %s\n",
- (phba->cpucheck_on & LPFC_CHECK_NVME_IO ?
- "Enabled" : "Disabled"));
- for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
- if (i >= LPFC_CHECK_CPU_CNT)
- break;
- len += snprintf(buf + len, PAGE_SIZE - len,
- "%02d: xmit x%08x cmpl x%08x\n",
- i, phba->cpucheck_xmt_io[i],
- phba->cpucheck_cmpl_io[i]);
- tot_xmt += phba->cpucheck_xmt_io[i];
- tot_cmpl += phba->cpucheck_cmpl_io[i];
- }
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "CPUcheck %s ",
+ (phba->cpucheck_on & LPFC_CHECK_NVME_IO ?
+ "Enabled" : "Disabled"));
+ if (phba->nvmet_support) {
len += snprintf(buf + len, PAGE_SIZE - len,
- "tot:xmit x%08x cmpl x%08x\n",
- tot_xmt, tot_cmpl);
- return len;
+ "%s\n",
+ (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV ?
+ "Rcv Enabled\n" : "Rcv Disabled\n"));
+ } else {
+ len += snprintf(buf + len, PAGE_SIZE - len, "\n");
}
+ max_cnt = size - LPFC_DEBUG_OUT_LINE_SZ;
+
+ for (i = 0; i < phba->cfg_hdw_queue; i++) {
+ qp = &phba->sli4_hba.hdwq[i];
+
+ tot_rcv = 0;
+ tot_xmt = 0;
+ tot_cmpl = 0;
+ for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) {
+ tot_xmt += qp->cpucheck_xmt_io[j];
+ tot_cmpl += qp->cpucheck_cmpl_io[j];
+ if (phba->nvmet_support)
+ tot_rcv += qp->cpucheck_rcv_io[j];
+ }
+
+ /* Only display Hardware Qs with something */
+ if (!tot_xmt && !tot_cmpl && !tot_rcv)
+ continue;
- /* NVME Target */
- len += snprintf(buf + len, PAGE_SIZE - len,
- "CPUcheck %s ",
- (phba->cpucheck_on & LPFC_CHECK_NVMET_IO ?
- "IO Enabled - " : "IO Disabled - "));
- len += snprintf(buf + len, PAGE_SIZE - len,
- "%s\n",
- (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV ?
- "Rcv Enabled\n" : "Rcv Disabled\n"));
- for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
- if (i >= LPFC_CHECK_CPU_CNT)
- break;
len += snprintf(buf + len, PAGE_SIZE - len,
- "%02d: xmit x%08x ccmpl x%08x "
- "cmpl x%08x rcv x%08x\n",
- i, phba->cpucheck_xmt_io[i],
- phba->cpucheck_ccmpl_io[i],
- phba->cpucheck_cmpl_io[i],
- phba->cpucheck_rcv_io[i]);
- tot_xmt += phba->cpucheck_xmt_io[i];
- tot_rcv += phba->cpucheck_rcv_io[i];
- tot_cmpl += phba->cpucheck_cmpl_io[i];
- tot_ccmpl += phba->cpucheck_ccmpl_io[i];
+ "HDWQ %03d: ", i);
+ for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) {
+ /* Only display non-zero counters */
+ if (!qp->cpucheck_xmt_io[j] &&
+ !qp->cpucheck_cmpl_io[j] &&
+ !qp->cpucheck_rcv_io[j])
+ continue;
+ if (phba->nvmet_support) {
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "CPU %03d: %x/%x/%x ", j,
+ qp->cpucheck_rcv_io[j],
+ qp->cpucheck_xmt_io[j],
+ qp->cpucheck_cmpl_io[j]);
+ } else {
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "CPU %03d: %x/%x ", j,
+ qp->cpucheck_xmt_io[j],
+ qp->cpucheck_cmpl_io[j]);
+ }
+ }
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "Total: %x\n", tot_xmt);
+ if (len >= max_cnt) {
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "Truncated ...\n");
+ return len;
+ }
}
- len += snprintf(buf + len, PAGE_SIZE - len,
- "tot:xmit x%08x ccmpl x%08x cmpl x%08x rcv x%08x\n",
- tot_xmt, tot_ccmpl, tot_cmpl, tot_rcv);
return len;
}
@@ -1501,7 +1833,7 @@ lpfc_debugfs_disc_trc_open(struct inode *inode, struct file *file)
int rc = -ENOMEM;
if (!lpfc_debugfs_max_disc_trc) {
- rc = -ENOSPC;
+ rc = -ENOSPC;
goto out;
}
@@ -1551,7 +1883,7 @@ lpfc_debugfs_slow_ring_trc_open(struct inode *inode, struct file *file)
int rc = -ENOMEM;
if (!lpfc_debugfs_max_slow_ring_trc) {
- rc = -ENOSPC;
+ rc = -ENOSPC;
goto out;
}
@@ -1620,6 +1952,135 @@ out:
}
/**
+ * lpfc_debugfs_multixripools_open - Open the multixripool debugfs buffer
+ * @inode: The inode pointer that contains a hba pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the hba from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this hba, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return a negative
+ * error value.
+ **/
+static int
+lpfc_debugfs_multixripools_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_hba *phba = inode->i_private;
+ struct lpfc_debug *debug;
+ int rc = -ENOMEM;
+
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ goto out;
+
+ /* Round to page boundary */
+ debug->buffer = kzalloc(LPFC_DUMP_MULTIXRIPOOL_SIZE, GFP_KERNEL);
+ if (!debug->buffer) {
+ kfree(debug);
+ goto out;
+ }
+
+ debug->len = lpfc_debugfs_multixripools_data(
+ phba, debug->buffer, LPFC_DUMP_MULTIXRIPOOL_SIZE);
+
+ debug->i_private = inode->i_private;
+ file->private_data = debug;
+
+ rc = 0;
+out:
+ return rc;
+}
+
+#ifdef LPFC_HDWQ_LOCK_STAT
+/**
+ * lpfc_debugfs_lockstat_open - Open the lockstat debugfs buffer
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return a negative
+ * error value.
+ **/
+static int
+lpfc_debugfs_lockstat_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_hba *phba = inode->i_private;
+ struct lpfc_debug *debug;
+ int rc = -ENOMEM;
+
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ goto out;
+
+ /* Round to page boundary */
+ debug->buffer = kmalloc(LPFC_HDWQINFO_SIZE, GFP_KERNEL);
+ if (!debug->buffer) {
+ kfree(debug);
+ goto out;
+ }
+
+ debug->len = lpfc_debugfs_lockstat_data(phba, debug->buffer,
+ LPFC_HBQINFO_SIZE);
+ file->private_data = debug;
+
+ rc = 0;
+out:
+ return rc;
+}
+
+static ssize_t
+lpfc_debugfs_lockstat_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ struct lpfc_sli4_hdw_queue *qp;
+ char mybuf[64];
+ char *pbuf;
+ int i;
+
+ /* Protect copy from user */
+ if (!access_ok(buf, nbytes))
+ return -EFAULT;
+
+ memset(mybuf, 0, sizeof(mybuf));
+
+ if (copy_from_user(mybuf, buf, nbytes))
+ return -EFAULT;
+ pbuf = &mybuf[0];
+
+ if ((strncmp(pbuf, "reset", strlen("reset")) == 0) ||
+ (strncmp(pbuf, "zero", strlen("zero")) == 0)) {
+ for (i = 0; i < phba->cfg_hdw_queue; i++) {
+ qp = &phba->sli4_hba.hdwq[i];
+ qp->lock_conflict.alloc_xri_get = 0;
+ qp->lock_conflict.alloc_xri_put = 0;
+ qp->lock_conflict.free_xri = 0;
+ qp->lock_conflict.wq_access = 0;
+ qp->lock_conflict.alloc_pvt_pool = 0;
+ qp->lock_conflict.mv_from_pvt_pool = 0;
+ qp->lock_conflict.mv_to_pub_pool = 0;
+ qp->lock_conflict.mv_to_pvt_pool = 0;
+ qp->lock_conflict.free_pvt_pool = 0;
+ qp->lock_conflict.free_pub_pool = 0;
+ qp->lock_conflict.wq_access = 0;
+ }
+ }
+ return nbytes;
+}
+#endif
+
+/**
* lpfc_debugfs_dumpHBASlim_open - Open the Dump HBA SLIM debugfs buffer
* @inode: The inode pointer that contains a vport pointer.
* @file: The file pointer to attach the log output.
@@ -2008,6 +2469,75 @@ lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file)
return 0;
}
+/**
+ * lpfc_debugfs_multixripools_write - Clear multi-XRI pools statistics
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine clears multi-XRI pools statistics when buf contains "clear".
+ *
+ * Return Value:
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ **/
+static ssize_t
+lpfc_debugfs_multixripools_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ char mybuf[64];
+ char *pbuf;
+ u32 i;
+ u32 hwq_count;
+ struct lpfc_sli4_hdw_queue *qp;
+ struct lpfc_multixri_pool *multixri_pool;
+
+ if (nbytes > 64)
+ nbytes = 64;
+
+ /* Protect copy from user */
+ if (!access_ok(buf, nbytes))
+ return -EFAULT;
+
+ memset(mybuf, 0, sizeof(mybuf));
+
+ if (copy_from_user(mybuf, buf, nbytes))
+ return -EFAULT;
+ pbuf = &mybuf[0];
+
+ if ((strncmp(pbuf, "clear", strlen("clear"))) == 0) {
+ hwq_count = phba->cfg_hdw_queue;
+ for (i = 0; i < hwq_count; i++) {
+ qp = &phba->sli4_hba.hdwq[i];
+ multixri_pool = qp->p_multixri_pool;
+ if (!multixri_pool)
+ continue;
+
+ qp->empty_io_bufs = 0;
+ multixri_pool->pbl_empty_count = 0;
+#ifdef LPFC_MXP_STAT
+ multixri_pool->above_limit_count = 0;
+ multixri_pool->below_limit_count = 0;
+ multixri_pool->stat_max_hwm = 0;
+ multixri_pool->local_pbl_hit_count = 0;
+ multixri_pool->other_pbl_hit_count = 0;
+
+ multixri_pool->stat_pbl_count = 0;
+ multixri_pool->stat_pvt_count = 0;
+ multixri_pool->stat_busy_count = 0;
+ multixri_pool->stat_snapshot_taken = 0;
+#endif
+ }
+ return strlen(pbuf);
+ }
+
+ return -EINVAL;
+}
static int
lpfc_debugfs_nvmestat_open(struct inode *inode, struct file *file)
@@ -2098,6 +2628,64 @@ lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf,
}
static int
+lpfc_debugfs_scsistat_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_vport *vport = inode->i_private;
+ struct lpfc_debug *debug;
+ int rc = -ENOMEM;
+
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ goto out;
+
+ /* Round to page boundary */
+ debug->buffer = kzalloc(LPFC_SCSISTAT_SIZE, GFP_KERNEL);
+ if (!debug->buffer) {
+ kfree(debug);
+ goto out;
+ }
+
+ debug->len = lpfc_debugfs_scsistat_data(vport, debug->buffer,
+ LPFC_SCSISTAT_SIZE);
+
+ debug->i_private = inode->i_private;
+ file->private_data = debug;
+
+ rc = 0;
+out:
+ return rc;
+}
+
+static ssize_t
+lpfc_debugfs_scsistat_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private;
+ struct lpfc_hba *phba = vport->phba;
+ char mybuf[6] = {0};
+ int i;
+
+ /* Protect copy from user */
+ if (!access_ok(buf, nbytes))
+ return -EFAULT;
+
+ if (copy_from_user(mybuf, buf, (nbytes >= sizeof(mybuf)) ?
+ (sizeof(mybuf) - 1) : nbytes))
+ return -EFAULT;
+
+ if ((strncmp(&mybuf[0], "reset", strlen("reset")) == 0) ||
+ (strncmp(&mybuf[0], "zero", strlen("zero")) == 0)) {
+ for (i = 0; i < phba->cfg_hdw_queue; i++) {
+ memset(&phba->sli4_hba.hdwq[i].scsi_cstat, 0,
+ sizeof(phba->sli4_hba.hdwq[i].scsi_cstat));
+ }
+ }
+
+ return nbytes;
+}
+
+static int
lpfc_debugfs_nvmektime_open(struct inode *inode, struct file *file)
{
struct lpfc_vport *vport = inode->i_private;
@@ -2348,7 +2936,7 @@ lpfc_debugfs_cpucheck_open(struct inode *inode, struct file *file)
}
debug->len = lpfc_debugfs_cpucheck_data(vport, debug->buffer,
- LPFC_NVMEKTIME_SIZE);
+ LPFC_CPUCHECK_SIZE);
debug->i_private = inode->i_private;
file->private_data = debug;
@@ -2365,9 +2953,10 @@ lpfc_debugfs_cpucheck_write(struct file *file, const char __user *buf,
struct lpfc_debug *debug = file->private_data;
struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private;
struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli4_hdw_queue *qp;
char mybuf[64];
char *pbuf;
- int i;
+ int i, j;
if (nbytes > 64)
nbytes = 64;
@@ -2382,8 +2971,18 @@ lpfc_debugfs_cpucheck_write(struct file *file, const char __user *buf,
if (phba->nvmet_support)
phba->cpucheck_on |= LPFC_CHECK_NVMET_IO;
else
+ phba->cpucheck_on |= (LPFC_CHECK_NVME_IO |
+ LPFC_CHECK_SCSI_IO);
+ return strlen(pbuf);
+ } else if ((strncmp(pbuf, "nvme_on", sizeof("nvme_on") - 1) == 0)) {
+ if (phba->nvmet_support)
+ phba->cpucheck_on |= LPFC_CHECK_NVMET_IO;
+ else
phba->cpucheck_on |= LPFC_CHECK_NVME_IO;
return strlen(pbuf);
+ } else if ((strncmp(pbuf, "scsi_on", sizeof("scsi_on") - 1) == 0)) {
+ phba->cpucheck_on |= LPFC_CHECK_SCSI_IO;
+ return strlen(pbuf);
} else if ((strncmp(pbuf, "rcv",
sizeof("rcv") - 1) == 0)) {
if (phba->nvmet_support)
@@ -2397,13 +2996,14 @@ lpfc_debugfs_cpucheck_write(struct file *file, const char __user *buf,
return strlen(pbuf);
} else if ((strncmp(pbuf, "zero",
sizeof("zero") - 1) == 0)) {
- for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
- if (i >= LPFC_CHECK_CPU_CNT)
- break;
- phba->cpucheck_rcv_io[i] = 0;
- phba->cpucheck_xmt_io[i] = 0;
- phba->cpucheck_cmpl_io[i] = 0;
- phba->cpucheck_ccmpl_io[i] = 0;
+ for (i = 0; i < phba->cfg_hdw_queue; i++) {
+ qp = &phba->sli4_hba.hdwq[i];
+
+ for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) {
+ qp->cpucheck_rcv_io[j] = 0;
+ qp->cpucheck_xmt_io[j] = 0;
+ qp->cpucheck_cmpl_io[j] = 0;
+ }
}
return strlen(pbuf);
}
@@ -3166,10 +3766,10 @@ __lpfc_idiag_print_wq(struct lpfc_queue *qp, char *wqtype,
(unsigned long long)qp->q_cnt_4);
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"\t\tWQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
- "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]",
+ "HST-IDX[%04d], PRT-IDX[%04d], NTFI[%03d]",
qp->queue_id, qp->entry_count,
qp->entry_size, qp->host_index,
- qp->hba_index, qp->entry_repost);
+ qp->hba_index, qp->notify_interval);
len += snprintf(pbuffer + len,
LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
return len;
@@ -3182,21 +3782,23 @@ lpfc_idiag_wqs_for_cq(struct lpfc_hba *phba, char *wqtype, char *pbuffer,
struct lpfc_queue *qp;
int qidx;
- for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) {
- qp = phba->sli4_hba.fcp_wq[qidx];
+ for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+ qp = phba->sli4_hba.hdwq[qidx].fcp_wq;
if (qp->assoc_qid != cq_id)
continue;
*len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
if (*len >= max_cnt)
return 1;
}
- for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
- qp = phba->sli4_hba.nvme_wq[qidx];
- if (qp->assoc_qid != cq_id)
- continue;
- *len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
- if (*len >= max_cnt)
- return 1;
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+ qp = phba->sli4_hba.hdwq[qidx].nvme_wq;
+ if (qp->assoc_qid != cq_id)
+ continue;
+ *len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
+ if (*len >= max_cnt)
+ return 1;
+ }
}
return 0;
}
@@ -3217,10 +3819,10 @@ __lpfc_idiag_print_cq(struct lpfc_queue *qp, char *cqtype,
qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"\tCQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
- "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]",
+ "HST-IDX[%04d], NTFI[%03d], PLMT[%03d]",
qp->queue_id, qp->entry_count,
qp->entry_size, qp->host_index,
- qp->hba_index, qp->entry_repost);
+ qp->notify_interval, qp->max_proc_limit);
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
@@ -3243,15 +3845,15 @@ __lpfc_idiag_print_rqpair(struct lpfc_queue *qp, struct lpfc_queue *datqp,
qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"\t\tHQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
- "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]\n",
+ "HST-IDX[%04d], PRT-IDX[%04d], NTFI[%03d]\n",
qp->queue_id, qp->entry_count, qp->entry_size,
- qp->host_index, qp->hba_index, qp->entry_repost);
+ qp->host_index, qp->hba_index, qp->notify_interval);
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"\t\tDQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
- "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]\n",
+ "HST-IDX[%04d], PRT-IDX[%04d], NTFI[%03d]\n",
datqp->queue_id, datqp->entry_count,
datqp->entry_size, datqp->host_index,
- datqp->hba_index, datqp->entry_repost);
+ datqp->hba_index, datqp->notify_interval);
return len;
}
@@ -3260,31 +3862,25 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
int *len, int max_cnt, int eqidx, int eq_id)
{
struct lpfc_queue *qp;
- int qidx, rc;
+ int rc;
- for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) {
- qp = phba->sli4_hba.fcp_cq[qidx];
- if (qp->assoc_qid != eq_id)
- continue;
+ qp = phba->sli4_hba.hdwq[eqidx].fcp_cq;
- *len = __lpfc_idiag_print_cq(qp, "FCP", pbuffer, *len);
+ *len = __lpfc_idiag_print_cq(qp, "FCP", pbuffer, *len);
- /* Reset max counter */
- qp->CQ_max_cqe = 0;
+ /* Reset max counter */
+ qp->CQ_max_cqe = 0;
- if (*len >= max_cnt)
- return 1;
+ if (*len >= max_cnt)
+ return 1;
- rc = lpfc_idiag_wqs_for_cq(phba, "FCP", pbuffer, len,
- max_cnt, qp->queue_id);
- if (rc)
- return 1;
- }
+ rc = lpfc_idiag_wqs_for_cq(phba, "FCP", pbuffer, len,
+ max_cnt, qp->queue_id);
+ if (rc)
+ return 1;
- for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
- qp = phba->sli4_hba.nvme_cq[qidx];
- if (qp->assoc_qid != eq_id)
- continue;
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ qp = phba->sli4_hba.hdwq[eqidx].nvme_cq;
*len = __lpfc_idiag_print_cq(qp, "NVME", pbuffer, *len);
@@ -3295,7 +3891,7 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
return 1;
rc = lpfc_idiag_wqs_for_cq(phba, "NVME", pbuffer, len,
- max_cnt, qp->queue_id);
+ max_cnt, qp->queue_id);
if (rc)
return 1;
}
@@ -3338,9 +3934,10 @@ __lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype,
(unsigned long long)qp->q_cnt_4, qp->q_mode);
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"EQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
- "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]",
+ "HST-IDX[%04d], NTFI[%03d], PLMT[%03d], AFFIN[%03d]",
qp->queue_id, qp->entry_count, qp->entry_size,
- qp->host_index, qp->hba_index, qp->entry_repost);
+ qp->host_index, qp->notify_interval,
+ qp->max_proc_limit, qp->chann);
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
return len;
@@ -3387,24 +3984,19 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
spin_lock_irq(&phba->hbalock);
/* Fast-path event queue */
- if (phba->sli4_hba.hba_eq && phba->io_channel_irqs) {
+ if (phba->sli4_hba.hdwq && phba->cfg_hdw_queue) {
x = phba->lpfc_idiag_last_eq;
- if (phba->cfg_fof && (x >= phba->io_channel_irqs)) {
- phba->lpfc_idiag_last_eq = 0;
- goto fof;
- }
phba->lpfc_idiag_last_eq++;
- if (phba->lpfc_idiag_last_eq >= phba->io_channel_irqs)
- if (phba->cfg_fof == 0)
- phba->lpfc_idiag_last_eq = 0;
+ if (phba->lpfc_idiag_last_eq >= phba->cfg_hdw_queue)
+ phba->lpfc_idiag_last_eq = 0;
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
- "EQ %d out of %d HBA EQs\n",
- x, phba->io_channel_irqs);
+ "HDWQ %d out of %d HBA HDWQs\n",
+ x, phba->cfg_hdw_queue);
/* Fast-path EQ */
- qp = phba->sli4_hba.hba_eq[x];
+ qp = phba->sli4_hba.hdwq[x].hba_eq;
if (!qp)
goto out;
@@ -3479,35 +4071,6 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
goto out;
}
-fof:
- if (phba->cfg_fof) {
- /* FOF EQ */
- qp = phba->sli4_hba.fof_eq;
- len = __lpfc_idiag_print_eq(qp, "FOF", pbuffer, len);
-
- /* Reset max counter */
- if (qp)
- qp->EQ_max_eqe = 0;
-
- if (len >= max_cnt)
- goto too_big;
-
- /* OAS CQ */
- qp = phba->sli4_hba.oas_cq;
- len = __lpfc_idiag_print_cq(qp, "OAS", pbuffer, len);
- /* Reset max counter */
- if (qp)
- qp->CQ_max_cqe = 0;
- if (len >= max_cnt)
- goto too_big;
-
- /* OAS WQ */
- qp = phba->sli4_hba.oas_wq;
- len = __lpfc_idiag_print_wq(qp, "OAS", pbuffer, len);
- if (len >= max_cnt)
- goto too_big;
- }
-
spin_unlock_irq(&phba->hbalock);
return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
@@ -3725,9 +4288,9 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
switch (quetp) {
case LPFC_IDIAG_EQ:
/* HBA event queue */
- if (phba->sli4_hba.hba_eq) {
- for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) {
- qp = phba->sli4_hba.hba_eq[qidx];
+ if (phba->sli4_hba.hdwq) {
+ for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+ qp = phba->sli4_hba.hdwq[qidx].hba_eq;
if (qp && qp->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(qp,
@@ -3776,10 +4339,10 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check;
}
/* FCP complete queue */
- if (phba->sli4_hba.fcp_cq) {
- for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
+ if (phba->sli4_hba.hdwq) {
+ for (qidx = 0; qidx < phba->cfg_hdw_queue;
qidx++) {
- qp = phba->sli4_hba.fcp_cq[qidx];
+ qp = phba->sli4_hba.hdwq[qidx].fcp_cq;
if (qp && qp->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
@@ -3792,23 +4355,20 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
}
}
/* NVME complete queue */
- if (phba->sli4_hba.nvme_cq) {
+ if (phba->sli4_hba.hdwq) {
qidx = 0;
do {
- if (phba->sli4_hba.nvme_cq[qidx] &&
- phba->sli4_hba.nvme_cq[qidx]->queue_id ==
- queid) {
+ qp = phba->sli4_hba.hdwq[qidx].nvme_cq;
+ if (qp && qp->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
- phba->sli4_hba.nvme_cq[qidx],
- index, count);
+ qp, index, count);
if (rc)
goto error_out;
- idiag.ptr_private =
- phba->sli4_hba.nvme_cq[qidx];
+ idiag.ptr_private = qp;
goto pass_check;
}
- } while (++qidx < phba->cfg_nvme_io_channel);
+ } while (++qidx < phba->cfg_hdw_queue);
}
goto error_out;
break;
@@ -3849,11 +4409,11 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
idiag.ptr_private = phba->sli4_hba.nvmels_wq;
goto pass_check;
}
- /* FCP work queue */
- if (phba->sli4_hba.fcp_wq) {
- for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
- qidx++) {
- qp = phba->sli4_hba.fcp_wq[qidx];
+
+ if (phba->sli4_hba.hdwq) {
+ /* FCP/SCSI work queue */
+ for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+ qp = phba->sli4_hba.hdwq[qidx].fcp_wq;
if (qp && qp->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
@@ -3864,12 +4424,9 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check;
}
}
- }
- /* NVME work queue */
- if (phba->sli4_hba.nvme_wq) {
- for (qidx = 0; qidx < phba->cfg_nvme_io_channel;
- qidx++) {
- qp = phba->sli4_hba.nvme_wq[qidx];
+ /* NVME work queue */
+ for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+ qp = phba->sli4_hba.hdwq[qidx].nvme_wq;
if (qp && qp->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
@@ -3882,26 +4439,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
}
}
- /* NVME work queues */
- if (phba->sli4_hba.nvme_wq) {
- for (qidx = 0; qidx < phba->cfg_nvme_io_channel;
- qidx++) {
- if (!phba->sli4_hba.nvme_wq[qidx])
- continue;
- if (phba->sli4_hba.nvme_wq[qidx]->queue_id ==
- queid) {
- /* Sanity check */
- rc = lpfc_idiag_que_param_check(
- phba->sli4_hba.nvme_wq[qidx],
- index, count);
- if (rc)
- goto error_out;
- idiag.ptr_private =
- phba->sli4_hba.nvme_wq[qidx];
- goto pass_check;
- }
- }
- }
goto error_out;
break;
case LPFC_IDIAG_RQ:
@@ -4866,6 +5403,16 @@ static const struct file_operations lpfc_debugfs_op_nodelist = {
.release = lpfc_debugfs_release,
};
+#undef lpfc_debugfs_op_multixripools
+static const struct file_operations lpfc_debugfs_op_multixripools = {
+ .owner = THIS_MODULE,
+ .open = lpfc_debugfs_multixripools_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_debugfs_read,
+ .write = lpfc_debugfs_multixripools_write,
+ .release = lpfc_debugfs_release,
+};
+
#undef lpfc_debugfs_op_hbqinfo
static const struct file_operations lpfc_debugfs_op_hbqinfo = {
.owner = THIS_MODULE,
@@ -4875,6 +5422,18 @@ static const struct file_operations lpfc_debugfs_op_hbqinfo = {
.release = lpfc_debugfs_release,
};
+#ifdef LPFC_HDWQ_LOCK_STAT
+#undef lpfc_debugfs_op_lockstat
+static const struct file_operations lpfc_debugfs_op_lockstat = {
+ .owner = THIS_MODULE,
+ .open = lpfc_debugfs_lockstat_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_debugfs_read,
+ .write = lpfc_debugfs_lockstat_write,
+ .release = lpfc_debugfs_release,
+};
+#endif
+
#undef lpfc_debugfs_op_dumpHBASlim
static const struct file_operations lpfc_debugfs_op_dumpHBASlim = {
.owner = THIS_MODULE,
@@ -4903,6 +5462,16 @@ static const struct file_operations lpfc_debugfs_op_nvmestat = {
.release = lpfc_debugfs_release,
};
+#undef lpfc_debugfs_op_scsistat
+static const struct file_operations lpfc_debugfs_op_scsistat = {
+ .owner = THIS_MODULE,
+ .open = lpfc_debugfs_scsistat_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_debugfs_read,
+ .write = lpfc_debugfs_scsistat_write,
+ .release = lpfc_debugfs_release,
+};
+
#undef lpfc_debugfs_op_nvmektime
static const struct file_operations lpfc_debugfs_op_nvmektime = {
.owner = THIS_MODULE,
@@ -5280,11 +5849,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
if (!lpfc_debugfs_root) {
lpfc_debugfs_root = debugfs_create_dir("lpfc", NULL);
atomic_set(&lpfc_debugfs_hba_count, 0);
- if (!lpfc_debugfs_root) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0408 Cannot create debugfs root\n");
- goto debug_failed;
- }
}
if (!lpfc_debugfs_start_time)
lpfc_debugfs_start_time = jiffies;
@@ -5295,25 +5859,42 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
pport_setup = true;
phba->hba_debugfs_root =
debugfs_create_dir(name, lpfc_debugfs_root);
- if (!phba->hba_debugfs_root) {
+ atomic_inc(&lpfc_debugfs_hba_count);
+ atomic_set(&phba->debugfs_vport_count, 0);
+
+ /* Multi-XRI pools */
+ snprintf(name, sizeof(name), "multixripools");
+ phba->debug_multixri_pools =
+ debugfs_create_file(name, S_IFREG | 0644,
+ phba->hba_debugfs_root,
+ phba,
+ &lpfc_debugfs_op_multixripools);
+ if (!phba->debug_multixri_pools) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0412 Cannot create debugfs hba\n");
+ "0527 Cannot create debugfs multixripools\n");
goto debug_failed;
}
- atomic_inc(&lpfc_debugfs_hba_count);
- atomic_set(&phba->debugfs_vport_count, 0);
/* Setup hbqinfo */
snprintf(name, sizeof(name), "hbqinfo");
phba->debug_hbqinfo =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- phba->hba_debugfs_root,
- phba, &lpfc_debugfs_op_hbqinfo);
- if (!phba->debug_hbqinfo) {
+ debugfs_create_file(name, S_IFREG | 0644,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_hbqinfo);
+
+#ifdef LPFC_HDWQ_LOCK_STAT
+ /* Setup lockstat */
+ snprintf(name, sizeof(name), "lockstat");
+ phba->debug_lockstat =
+ debugfs_create_file(name, S_IFREG | 0644,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_lockstat);
+ if (!phba->debug_lockstat) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0411 Cannot create debugfs hbqinfo\n");
+ "0913 Cant create debugfs lockstat\n");
goto debug_failed;
}
+#endif
/* Setup dumpHBASlim */
if (phba->sli_rev < LPFC_SLI_REV4) {
@@ -5323,12 +5904,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
S_IFREG|S_IRUGO|S_IWUSR,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_dumpHBASlim);
- if (!phba->debug_dumpHBASlim) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0413 Cannot create debugfs "
- "dumpHBASlim\n");
- goto debug_failed;
- }
} else
phba->debug_dumpHBASlim = NULL;
@@ -5340,12 +5915,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
S_IFREG|S_IRUGO|S_IWUSR,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_dumpHostSlim);
- if (!phba->debug_dumpHostSlim) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0414 Cannot create debugfs "
- "dumpHostSlim\n");
- goto debug_failed;
- }
} else
phba->debug_dumpHostSlim = NULL;
@@ -5355,11 +5924,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_dumpData);
- if (!phba->debug_dumpData) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0800 Cannot create debugfs dumpData\n");
- goto debug_failed;
- }
/* Setup dumpDif */
snprintf(name, sizeof(name), "dumpDif");
@@ -5367,11 +5931,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_dumpDif);
- if (!phba->debug_dumpDif) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0801 Cannot create debugfs dumpDif\n");
- goto debug_failed;
- }
/* Setup DIF Error Injections */
snprintf(name, sizeof(name), "InjErrLBA");
@@ -5379,11 +5938,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_dif_err);
- if (!phba->debug_InjErrLBA) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0807 Cannot create debugfs InjErrLBA\n");
- goto debug_failed;
- }
phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
snprintf(name, sizeof(name), "InjErrNPortID");
@@ -5391,88 +5945,48 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_dif_err);
- if (!phba->debug_InjErrNPortID) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0809 Cannot create debugfs InjErrNPortID\n");
- goto debug_failed;
- }
snprintf(name, sizeof(name), "InjErrWWPN");
phba->debug_InjErrWWPN =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_dif_err);
- if (!phba->debug_InjErrWWPN) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0810 Cannot create debugfs InjErrWWPN\n");
- goto debug_failed;
- }
snprintf(name, sizeof(name), "writeGuardInjErr");
phba->debug_writeGuard =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_dif_err);
- if (!phba->debug_writeGuard) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0802 Cannot create debugfs writeGuard\n");
- goto debug_failed;
- }
snprintf(name, sizeof(name), "writeAppInjErr");
phba->debug_writeApp =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_dif_err);
- if (!phba->debug_writeApp) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0803 Cannot create debugfs writeApp\n");
- goto debug_failed;
- }
snprintf(name, sizeof(name), "writeRefInjErr");
phba->debug_writeRef =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_dif_err);
- if (!phba->debug_writeRef) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0804 Cannot create debugfs writeRef\n");
- goto debug_failed;
- }
snprintf(name, sizeof(name), "readGuardInjErr");
phba->debug_readGuard =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_dif_err);
- if (!phba->debug_readGuard) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0808 Cannot create debugfs readGuard\n");
- goto debug_failed;
- }
snprintf(name, sizeof(name), "readAppInjErr");
phba->debug_readApp =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_dif_err);
- if (!phba->debug_readApp) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0805 Cannot create debugfs readApp\n");
- goto debug_failed;
- }
snprintf(name, sizeof(name), "readRefInjErr");
phba->debug_readRef =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_dif_err);
- if (!phba->debug_readRef) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0806 Cannot create debugfs readApp\n");
- goto debug_failed;
- }
/* Setup slow ring trace */
if (lpfc_debugfs_max_slow_ring_trc) {
@@ -5496,12 +6010,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_slow_ring_trc);
- if (!phba->debug_slow_ring_trc) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0415 Cannot create debugfs "
- "slow_ring_trace\n");
- goto debug_failed;
- }
if (!phba->slow_ring_trc) {
phba->slow_ring_trc = kmalloc(
(sizeof(struct lpfc_debugfs_trc) *
@@ -5524,11 +6032,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
debugfs_create_file(name, 0644,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_nvmeio_trc);
- if (!phba->debug_nvmeio_trc) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0574 No create debugfs nvmeio_trc\n");
- goto debug_failed;
- }
atomic_set(&phba->nvmeio_trc_cnt, 0);
if (lpfc_debugfs_max_nvmeio_trc) {
@@ -5576,11 +6079,6 @@ nvmeio_off:
if (!vport->vport_debugfs_root) {
vport->vport_debugfs_root =
debugfs_create_dir(name, phba->hba_debugfs_root);
- if (!vport->vport_debugfs_root) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0417 Can't create debugfs\n");
- goto debug_failed;
- }
atomic_inc(&phba->debugfs_vport_count);
}
@@ -5617,31 +6115,26 @@ nvmeio_off:
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
vport->vport_debugfs_root,
vport, &lpfc_debugfs_op_disc_trc);
- if (!vport->debug_disc_trc) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0419 Cannot create debugfs "
- "discovery_trace\n");
- goto debug_failed;
- }
snprintf(name, sizeof(name), "nodelist");
vport->debug_nodelist =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
vport->vport_debugfs_root,
vport, &lpfc_debugfs_op_nodelist);
- if (!vport->debug_nodelist) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "2985 Can't create debugfs nodelist\n");
- goto debug_failed;
- }
snprintf(name, sizeof(name), "nvmestat");
vport->debug_nvmestat =
debugfs_create_file(name, 0644,
vport->vport_debugfs_root,
vport, &lpfc_debugfs_op_nvmestat);
- if (!vport->debug_nvmestat) {
+
+ snprintf(name, sizeof(name), "scsistat");
+ vport->debug_scsistat =
+ debugfs_create_file(name, 0644,
+ vport->vport_debugfs_root,
+ vport, &lpfc_debugfs_op_scsistat);
+ if (!vport->debug_scsistat) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0811 Cannot create debugfs nvmestat\n");
+ "0914 Cannot create debugfs scsistat\n");
goto debug_failed;
}
@@ -5650,22 +6143,12 @@ nvmeio_off:
debugfs_create_file(name, 0644,
vport->vport_debugfs_root,
vport, &lpfc_debugfs_op_nvmektime);
- if (!vport->debug_nvmektime) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0815 Cannot create debugfs nvmektime\n");
- goto debug_failed;
- }
snprintf(name, sizeof(name), "cpucheck");
vport->debug_cpucheck =
debugfs_create_file(name, 0644,
vport->vport_debugfs_root,
vport, &lpfc_debugfs_op_cpucheck);
- if (!vport->debug_cpucheck) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0819 Cannot create debugfs cpucheck\n");
- goto debug_failed;
- }
/*
* The following section is for additional directories/files for the
@@ -5685,11 +6168,6 @@ nvmeio_off:
if (!phba->idiag_root) {
phba->idiag_root =
debugfs_create_dir(name, phba->hba_debugfs_root);
- if (!phba->idiag_root) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "2922 Can't create idiag debugfs\n");
- goto debug_failed;
- }
/* Initialize iDiag data structure */
memset(&idiag, 0, sizeof(idiag));
}
@@ -5700,11 +6178,6 @@ nvmeio_off:
phba->idiag_pci_cfg =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
phba->idiag_root, phba, &lpfc_idiag_op_pciCfg);
- if (!phba->idiag_pci_cfg) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "2923 Can't create idiag debugfs\n");
- goto debug_failed;
- }
idiag.offset.last_rd = 0;
}
@@ -5714,11 +6187,6 @@ nvmeio_off:
phba->idiag_bar_acc =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
phba->idiag_root, phba, &lpfc_idiag_op_barAcc);
- if (!phba->idiag_bar_acc) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "3056 Can't create idiag debugfs\n");
- goto debug_failed;
- }
idiag.offset.last_rd = 0;
}
@@ -5728,11 +6196,6 @@ nvmeio_off:
phba->idiag_que_info =
debugfs_create_file(name, S_IFREG|S_IRUGO,
phba->idiag_root, phba, &lpfc_idiag_op_queInfo);
- if (!phba->idiag_que_info) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "2924 Can't create idiag debugfs\n");
- goto debug_failed;
- }
}
/* iDiag access PCI function queue */
@@ -5741,11 +6204,6 @@ nvmeio_off:
phba->idiag_que_acc =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
phba->idiag_root, phba, &lpfc_idiag_op_queAcc);
- if (!phba->idiag_que_acc) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "2926 Can't create idiag debugfs\n");
- goto debug_failed;
- }
}
/* iDiag access PCI function doorbell registers */
@@ -5754,11 +6212,6 @@ nvmeio_off:
phba->idiag_drb_acc =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
phba->idiag_root, phba, &lpfc_idiag_op_drbAcc);
- if (!phba->idiag_drb_acc) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "2927 Can't create idiag debugfs\n");
- goto debug_failed;
- }
}
/* iDiag access PCI function control registers */
@@ -5767,11 +6220,6 @@ nvmeio_off:
phba->idiag_ctl_acc =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
phba->idiag_root, phba, &lpfc_idiag_op_ctlAcc);
- if (!phba->idiag_ctl_acc) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "2981 Can't create idiag debugfs\n");
- goto debug_failed;
- }
}
/* iDiag access mbox commands */
@@ -5780,11 +6228,6 @@ nvmeio_off:
phba->idiag_mbx_acc =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
phba->idiag_root, phba, &lpfc_idiag_op_mbxAcc);
- if (!phba->idiag_mbx_acc) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "2980 Can't create idiag debugfs\n");
- goto debug_failed;
- }
}
/* iDiag extents access commands */
@@ -5796,12 +6239,6 @@ nvmeio_off:
S_IFREG|S_IRUGO|S_IWUSR,
phba->idiag_root, phba,
&lpfc_idiag_op_extAcc);
- if (!phba->idiag_ext_acc) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "2986 Cant create "
- "idiag debugfs\n");
- goto debug_failed;
- }
}
}
@@ -5839,6 +6276,9 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
debugfs_remove(vport->debug_nvmestat); /* nvmestat */
vport->debug_nvmestat = NULL;
+ debugfs_remove(vport->debug_scsistat); /* scsistat */
+ vport->debug_scsistat = NULL;
+
debugfs_remove(vport->debug_nvmektime); /* nvmektime */
vport->debug_nvmektime = NULL;
@@ -5853,9 +6293,16 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
if (atomic_read(&phba->debugfs_vport_count) == 0) {
+ debugfs_remove(phba->debug_multixri_pools); /* multixripools*/
+ phba->debug_multixri_pools = NULL;
+
debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */
phba->debug_hbqinfo = NULL;
+#ifdef LPFC_HDWQ_LOCK_STAT
+ debugfs_remove(phba->debug_lockstat); /* lockstat */
+ phba->debug_lockstat = NULL;
+#endif
debugfs_remove(phba->debug_dumpHBASlim); /* HBASlim */
phba->debug_dumpHBASlim = NULL;
@@ -5988,11 +6435,13 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
lpfc_debug_dump_wq(phba, DUMP_ELS, 0);
lpfc_debug_dump_wq(phba, DUMP_NVMELS, 0);
- for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
lpfc_debug_dump_wq(phba, DUMP_FCP, idx);
- for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++)
- lpfc_debug_dump_wq(phba, DUMP_NVME, idx);
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
+ lpfc_debug_dump_wq(phba, DUMP_NVME, idx);
+ }
lpfc_debug_dump_hdr_rq(phba);
lpfc_debug_dump_dat_rq(phba);
@@ -6003,15 +6452,17 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
lpfc_debug_dump_cq(phba, DUMP_ELS, 0);
lpfc_debug_dump_cq(phba, DUMP_NVMELS, 0);
- for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
lpfc_debug_dump_cq(phba, DUMP_FCP, idx);
- for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++)
- lpfc_debug_dump_cq(phba, DUMP_NVME, idx);
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
+ lpfc_debug_dump_cq(phba, DUMP_NVME, idx);
+ }
/*
* Dump Event Queues (EQs)
*/
- for (idx = 0; idx < phba->io_channel_irqs; idx++)
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
lpfc_debug_dump_hba_eq(phba, idx);
}
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 30efc7bf91bd..93ab7dfb8ee0 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2007-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -50,6 +50,9 @@
#define LPFC_CPUCHECK_SIZE 8192
#define LPFC_NVMEIO_TRC_SIZE 8192
+/* scsistat output buffer size */
+#define LPFC_SCSISTAT_SIZE 8192
+
#define LPFC_DEBUG_OUT_LINE_SZ 80
/*
@@ -284,6 +287,9 @@ struct lpfc_idiag {
#endif
+/* multixripool output buffer size */
+#define LPFC_DUMP_MULTIXRIPOOL_SIZE 8192
+
enum {
DUMP_FCP,
DUMP_NVME,
@@ -410,10 +416,10 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx)
char *qtypestr;
if (qtype == DUMP_FCP) {
- wq = phba->sli4_hba.fcp_wq[wqidx];
+ wq = phba->sli4_hba.hdwq[wqidx].fcp_wq;
qtypestr = "FCP";
} else if (qtype == DUMP_NVME) {
- wq = phba->sli4_hba.nvme_wq[wqidx];
+ wq = phba->sli4_hba.hdwq[wqidx].nvme_wq;
qtypestr = "NVME";
} else if (qtype == DUMP_MBX) {
wq = phba->sli4_hba.mbx_wq;
@@ -454,14 +460,15 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx)
int eqidx;
/* fcp/nvme wq and cq are 1:1, thus same indexes */
+ eq = NULL;
if (qtype == DUMP_FCP) {
- wq = phba->sli4_hba.fcp_wq[wqidx];
- cq = phba->sli4_hba.fcp_cq[wqidx];
+ wq = phba->sli4_hba.hdwq[wqidx].fcp_wq;
+ cq = phba->sli4_hba.hdwq[wqidx].fcp_cq;
qtypestr = "FCP";
} else if (qtype == DUMP_NVME) {
- wq = phba->sli4_hba.nvme_wq[wqidx];
- cq = phba->sli4_hba.nvme_cq[wqidx];
+ wq = phba->sli4_hba.hdwq[wqidx].nvme_wq;
+ cq = phba->sli4_hba.hdwq[wqidx].nvme_cq;
qtypestr = "NVME";
} else if (qtype == DUMP_MBX) {
wq = phba->sli4_hba.mbx_wq;
@@ -478,17 +485,17 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx)
} else
return;
- for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++) {
- if (cq->assoc_qid == phba->sli4_hba.hba_eq[eqidx]->queue_id)
+ for (eqidx = 0; eqidx < phba->cfg_hdw_queue; eqidx++) {
+ eq = phba->sli4_hba.hdwq[eqidx].hba_eq;
+ if (cq->assoc_qid == eq->queue_id)
break;
}
- if (eqidx == phba->io_channel_irqs) {
+ if (eqidx == phba->cfg_hdw_queue) {
pr_err("Couldn't find EQ for CQ. Using EQ[0]\n");
eqidx = 0;
+ eq = phba->sli4_hba.hdwq[0].hba_eq;
}
- eq = phba->sli4_hba.hba_eq[eqidx];
-
if (qtype == DUMP_FCP || qtype == DUMP_NVME)
pr_err("%s CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]"
"->EQ[Idx:%d|Qid:%d]:\n",
@@ -516,7 +523,7 @@ lpfc_debug_dump_hba_eq(struct lpfc_hba *phba, int qidx)
{
struct lpfc_queue *qp;
- qp = phba->sli4_hba.hba_eq[qidx];
+ qp = phba->sli4_hba.hdwq[qidx].hba_eq;
pr_err("EQ[Idx:%d|Qid:%d]\n", qidx, qp->queue_id);
@@ -564,21 +571,21 @@ lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid)
{
int wq_idx;
- for (wq_idx = 0; wq_idx < phba->cfg_fcp_io_channel; wq_idx++)
- if (phba->sli4_hba.fcp_wq[wq_idx]->queue_id == qid)
+ for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++)
+ if (phba->sli4_hba.hdwq[wq_idx].fcp_wq->queue_id == qid)
break;
- if (wq_idx < phba->cfg_fcp_io_channel) {
+ if (wq_idx < phba->cfg_hdw_queue) {
pr_err("FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
- lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[wq_idx]);
+ lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].fcp_wq);
return;
}
- for (wq_idx = 0; wq_idx < phba->cfg_nvme_io_channel; wq_idx++)
- if (phba->sli4_hba.nvme_wq[wq_idx]->queue_id == qid)
+ for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++)
+ if (phba->sli4_hba.hdwq[wq_idx].nvme_wq->queue_id == qid)
break;
- if (wq_idx < phba->cfg_nvme_io_channel) {
+ if (wq_idx < phba->cfg_hdw_queue) {
pr_err("NVME WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
- lpfc_debug_dump_q(phba->sli4_hba.nvme_wq[wq_idx]);
+ lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].nvme_wq);
return;
}
@@ -646,23 +653,23 @@ lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid)
{
int cq_idx;
- for (cq_idx = 0; cq_idx < phba->cfg_fcp_io_channel; cq_idx++)
- if (phba->sli4_hba.fcp_cq[cq_idx]->queue_id == qid)
+ for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++)
+ if (phba->sli4_hba.hdwq[cq_idx].fcp_cq->queue_id == qid)
break;
- if (cq_idx < phba->cfg_fcp_io_channel) {
+ if (cq_idx < phba->cfg_hdw_queue) {
pr_err("FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
- lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[cq_idx]);
+ lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].fcp_cq);
return;
}
- for (cq_idx = 0; cq_idx < phba->cfg_nvme_io_channel; cq_idx++)
- if (phba->sli4_hba.nvme_cq[cq_idx]->queue_id == qid)
+ for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++)
+ if (phba->sli4_hba.hdwq[cq_idx].nvme_cq->queue_id == qid)
break;
- if (cq_idx < phba->cfg_nvme_io_channel) {
+ if (cq_idx < phba->cfg_hdw_queue) {
pr_err("NVME CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
- lpfc_debug_dump_q(phba->sli4_hba.nvme_cq[cq_idx]);
+ lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].nvme_cq);
return;
}
@@ -697,13 +704,13 @@ lpfc_debug_dump_eq_by_id(struct lpfc_hba *phba, int qid)
{
int eq_idx;
- for (eq_idx = 0; eq_idx < phba->io_channel_irqs; eq_idx++)
- if (phba->sli4_hba.hba_eq[eq_idx]->queue_id == qid)
+ for (eq_idx = 0; eq_idx < phba->cfg_hdw_queue; eq_idx++)
+ if (phba->sli4_hba.hdwq[eq_idx].hba_eq->queue_id == qid)
break;
- if (eq_idx < phba->io_channel_irqs) {
+ if (eq_idx < phba->cfg_hdw_queue) {
printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid);
- lpfc_debug_dump_q(phba->sli4_hba.hba_eq[eq_idx]);
+ lpfc_debug_dump_q(phba->sli4_hba.hdwq[eq_idx].hba_eq);
return;
}
}
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b3a4789468c3..fc077cb87900 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -2827,8 +2827,8 @@ out:
!(vport->fc_flag & FC_PT2PT_PLOGI)) {
phba->pport->fc_myDID = 0;
- if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
- (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
+ if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+ (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
if (phba->nvmet_support)
lpfc_nvmet_update_targetport(phba);
else
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index b183b882d506..aa4961a2caf8 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -638,8 +638,6 @@ lpfc_work_done(struct lpfc_hba *phba)
if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
if (phba->hba_flag & HBA_RRQ_ACTIVE)
lpfc_handle_rrq_active(phba);
- if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
- lpfc_sli4_fcp_xri_abort_event_proc(phba);
if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
lpfc_sli4_els_xri_abort_event_proc(phba);
if (phba->hba_flag & ASYNC_EVENT)
@@ -859,10 +857,9 @@ lpfc_port_link_failure(struct lpfc_vport *vport)
void
lpfc_linkdown_port(struct lpfc_vport *vport)
{
- struct lpfc_hba *phba = vport->phba;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
+ if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
fc_host_post_event(shost, fc_get_event_number(),
FCH_EVT_LINKDOWN, 0);
@@ -925,8 +922,8 @@ lpfc_linkdown(struct lpfc_hba *phba)
vports[i]->fc_myDID = 0;
- if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
- (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
+ if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+ (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
if (phba->nvmet_support)
lpfc_nvmet_update_targetport(phba);
else
@@ -1012,7 +1009,7 @@ lpfc_linkup_port(struct lpfc_vport *vport)
(vport != phba->pport))
return;
- if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
+ if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
fc_host_post_event(shost, fc_get_event_number(),
FCH_EVT_LINKUP, 0);
@@ -3660,8 +3657,8 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
spin_unlock_irq(shost->host_lock);
vport->fc_myDID = 0;
- if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
- (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
+ if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+ (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
if (phba->nvmet_support)
lpfc_nvmet_update_targetport(phba);
else
@@ -3923,11 +3920,9 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
int
lpfc_issue_gidft(struct lpfc_vport *vport)
{
- struct lpfc_hba *phba = vport->phba;
-
/* Good status, issue CT Request to NameServer */
- if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
- (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) {
+ if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+ (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) {
if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_FCP)) {
/* Cannot issue NameServer FCP Query, so finish up
* discovery
@@ -3942,8 +3937,8 @@ lpfc_issue_gidft(struct lpfc_vport *vport)
vport->gidft_inp++;
}
- if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
- (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
+ if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+ (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_NVME)) {
/* Cannot issue NameServer NVME Query, so finish up
* discovery
@@ -4059,12 +4054,12 @@ out:
lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
- if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
- (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP))
+ if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+ (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP))
lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, FC_TYPE_FCP);
- if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
- (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME))
+ if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+ (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME))
lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0,
FC_TYPE_NVME);
@@ -4100,7 +4095,7 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct fc_rport_identifiers rport_ids;
struct lpfc_hba *phba = vport->phba;
- if (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
+ if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
return;
/* Remote port has reappeared. Re-register w/ FC transport */
@@ -4175,9 +4170,8 @@ lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
{
struct fc_rport *rport = ndlp->rport;
struct lpfc_vport *vport = ndlp->vport;
- struct lpfc_hba *phba = vport->phba;
- if (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
+ if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
return;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index c15b9b6fb840..ff875b833192 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -194,7 +194,7 @@ struct lpfc_sli_intf {
#define LPFC_ACT_INTR_CNT 4
/* Algrithmns for scheduling FCP commands to WQs */
-#define LPFC_FCP_SCHED_ROUND_ROBIN 0
+#define LPFC_FCP_SCHED_BY_HDWQ 0
#define LPFC_FCP_SCHED_BY_CPU 1
/* Algrithmns for NameServer Query after RSCN */
@@ -208,12 +208,18 @@ struct lpfc_sli_intf {
/* Configuration of Interrupts / sec for entire HBA port */
#define LPFC_MIN_IMAX 5000
#define LPFC_MAX_IMAX 5000000
-#define LPFC_DEF_IMAX 150000
+#define LPFC_DEF_IMAX 0
+
+#define LPFC_IMAX_THRESHOLD 1000
+#define LPFC_MAX_AUTO_EQ_DELAY 120
+#define LPFC_EQ_DELAY_STEP 15
+#define LPFC_EQD_ISR_TRIGGER 20000
+/* 1s intervals */
+#define LPFC_EQ_DELAY_MSECS 1000
#define LPFC_MIN_CPU_MAP 0
-#define LPFC_MAX_CPU_MAP 2
+#define LPFC_MAX_CPU_MAP 1
#define LPFC_HBA_CPU_MAP 1
-#define LPFC_DRIVER_CPU_MAP 2 /* Default */
/* PORT_CAPABILITIES constants. */
#define LPFC_MAX_SUPPORTED_PAGES 8
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index e1129260ed18..7fcdaed3fa94 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -37,6 +37,7 @@
#include <linux/miscdevice.h>
#include <linux/percpu.h>
#include <linux/msi.h>
+#include <linux/irq.h>
#include <linux/bitops.h>
#include <scsi/scsi.h>
@@ -71,7 +72,6 @@ unsigned long _dump_buf_dif_order;
spinlock_t _dump_buf_lock;
/* Used when mapping IRQ vectors in a driver centric manner */
-uint16_t *lpfc_used_cpu;
uint32_t lpfc_present_cpu;
static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
@@ -93,6 +93,8 @@ static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
static void lpfc_sli4_disable_intr(struct lpfc_hba *);
static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
+static uint16_t lpfc_find_eq_handle(struct lpfc_hba *, uint16_t);
+static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
static struct scsi_transport_template *lpfc_transport_template = NULL;
static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -1037,14 +1039,14 @@ lpfc_hba_down_post_s3(struct lpfc_hba *phba)
static int
lpfc_hba_down_post_s4(struct lpfc_hba *phba)
{
- struct lpfc_scsi_buf *psb, *psb_next;
+ struct lpfc_io_buf *psb, *psb_next;
struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next;
+ struct lpfc_sli4_hdw_queue *qp;
LIST_HEAD(aborts);
LIST_HEAD(nvme_aborts);
LIST_HEAD(nvmet_aborts);
- unsigned long iflag = 0;
struct lpfc_sglq *sglq_entry = NULL;
- int cnt;
+ int cnt, idx;
lpfc_sli_hbqbuf_free_all(phba);
@@ -1071,55 +1073,65 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
spin_unlock(&phba->sli4_hba.sgl_list_lock);
- /* abts_scsi_buf_list_lock required because worker thread uses this
+
+ /* abts_xxxx_buf_list_lock required because worker thread uses this
* list.
*/
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
- spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
- list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
- &aborts);
- spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
- }
-
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
- list_splice_init(&phba->sli4_hba.lpfc_abts_nvme_buf_list,
- &nvme_aborts);
- list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
- &nvmet_aborts);
- spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
- }
+ cnt = 0;
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+ qp = &phba->sli4_hba.hdwq[idx];
- spin_unlock_irq(&phba->hbalock);
-
- list_for_each_entry_safe(psb, psb_next, &aborts, list) {
- psb->pCmd = NULL;
- psb->status = IOSTAT_SUCCESS;
- }
- spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
- list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
- spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
+ spin_lock(&qp->abts_scsi_buf_list_lock);
+ list_splice_init(&qp->lpfc_abts_scsi_buf_list,
+ &aborts);
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- cnt = 0;
- list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) {
+ list_for_each_entry_safe(psb, psb_next, &aborts, list) {
psb->pCmd = NULL;
psb->status = IOSTAT_SUCCESS;
cnt++;
}
- spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
- phba->put_nvme_bufs += cnt;
- list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put);
- spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
+ spin_lock(&qp->io_buf_list_put_lock);
+ list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
+ qp->put_io_bufs += qp->abts_scsi_io_bufs;
+ qp->abts_scsi_io_bufs = 0;
+ spin_unlock(&qp->io_buf_list_put_lock);
+ spin_unlock(&qp->abts_scsi_buf_list_lock);
+
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ spin_lock(&qp->abts_nvme_buf_list_lock);
+ list_splice_init(&qp->lpfc_abts_nvme_buf_list,
+ &nvme_aborts);
+ list_for_each_entry_safe(psb, psb_next, &nvme_aborts,
+ list) {
+ psb->pCmd = NULL;
+ psb->status = IOSTAT_SUCCESS;
+ cnt++;
+ }
+ spin_lock(&qp->io_buf_list_put_lock);
+ qp->put_io_bufs += qp->abts_nvme_io_bufs;
+ qp->abts_nvme_io_bufs = 0;
+ list_splice_init(&nvme_aborts,
+ &qp->lpfc_io_buf_list_put);
+ spin_unlock(&qp->io_buf_list_put_lock);
+ spin_unlock(&qp->abts_nvme_buf_list_lock);
+
+ }
+ }
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
+ list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
+ &nvmet_aborts);
+ spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
}
}
+ spin_unlock_irq(&phba->hbalock);
lpfc_sli4_free_sp_events(phba);
- return 0;
+ return cnt;
}
/**
@@ -1239,6 +1251,96 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
return;
}
+static void
+lpfc_hb_eq_delay_work(struct work_struct *work)
+{
+ struct lpfc_hba *phba = container_of(to_delayed_work(work),
+ struct lpfc_hba, eq_delay_work);
+ struct lpfc_eq_intr_info *eqi, *eqi_new;
+ struct lpfc_queue *eq, *eq_next;
+ unsigned char *eqcnt = NULL;
+ uint32_t usdelay;
+ int i;
+
+ if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
+ return;
+
+ if (phba->link_state == LPFC_HBA_ERROR ||
+ phba->pport->fc_flag & FC_OFFLINE_MODE)
+ goto requeue;
+
+ eqcnt = kcalloc(num_possible_cpus(), sizeof(unsigned char),
+ GFP_KERNEL);
+ if (!eqcnt)
+ goto requeue;
+
+ for (i = 0; i < phba->cfg_irq_chann; i++) {
+ eq = phba->sli4_hba.hdwq[i].hba_eq;
+ if (eq && eqcnt[eq->last_cpu] < 2)
+ eqcnt[eq->last_cpu]++;
+ continue;
+ }
+
+ for_each_present_cpu(i) {
+ if (phba->cfg_irq_chann > 1 && eqcnt[i] < 2)
+ continue;
+
+ eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
+
+ usdelay = (eqi->icnt / LPFC_IMAX_THRESHOLD) *
+ LPFC_EQ_DELAY_STEP;
+ if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
+ usdelay = LPFC_MAX_AUTO_EQ_DELAY;
+
+ eqi->icnt = 0;
+
+ list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
+ if (eq->last_cpu != i) {
+ eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
+ eq->last_cpu);
+ list_move_tail(&eq->cpu_list, &eqi_new->list);
+ continue;
+ }
+ if (usdelay != eq->q_mode)
+ lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
+ usdelay);
+ }
+ }
+
+ kfree(eqcnt);
+
+requeue:
+ queue_delayed_work(phba->wq, &phba->eq_delay_work,
+ msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
+}
+
+/**
+ * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * For each heartbeat, this routine does some heuristic methods to adjust
+ * XRI distribution. The goal is to fully utilize free XRIs.
+ **/
+static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
+{
+ u32 i;
+ u32 hwq_count;
+
+ hwq_count = phba->cfg_hdw_queue;
+ for (i = 0; i < hwq_count; i++) {
+ /* Adjust XRIs in private pool */
+ lpfc_adjust_pvt_pool_count(phba, i);
+
+ /* Adjust high watermark */
+ lpfc_adjust_high_watermark(phba, i);
+
+#ifdef LPFC_MXP_STAT
+ /* Snapshot pbl, pvt and busy count */
+ lpfc_snapshot_mxp(phba, i);
+#endif
+ }
+}
+
/**
* lpfc_hb_timeout_handler - The HBA-timer timeout handler
* @phba: pointer to lpfc hba data structure.
@@ -1264,16 +1366,11 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
int retval, i;
struct lpfc_sli *psli = &phba->sli;
LIST_HEAD(completions);
- struct lpfc_queue *qp;
- unsigned long time_elapsed;
- uint32_t tick_cqe, max_cqe, val;
- uint64_t tot, data1, data2, data3;
- struct lpfc_nvmet_tgtport *tgtp;
- struct lpfc_register reg_data;
- struct nvme_fc_local_port *localport;
- struct lpfc_nvme_lport *lport;
- struct lpfc_nvme_ctrl_stat *cstat;
- void __iomem *eqdreg = phba->sli4_hba.u.if_type2.EQDregaddr;
+
+ if (phba->cfg_xri_rebalancing) {
+ /* Multi-XRI pools handler */
+ lpfc_hb_mxp_handler(phba);
+ }
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
@@ -1288,107 +1385,6 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
(phba->pport->fc_flag & FC_OFFLINE_MODE))
return;
- if (phba->cfg_auto_imax) {
- if (!phba->last_eqdelay_time) {
- phba->last_eqdelay_time = jiffies;
- goto skip_eqdelay;
- }
- time_elapsed = jiffies - phba->last_eqdelay_time;
- phba->last_eqdelay_time = jiffies;
-
- tot = 0xffff;
- /* Check outstanding IO count */
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- if (phba->nvmet_support) {
- tgtp = phba->targetport->private;
- /* Calculate outstanding IOs */
- tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
- tot += atomic_read(&tgtp->xmt_fcp_release);
- tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
- } else {
- localport = phba->pport->localport;
- if (!localport || !localport->private)
- goto skip_eqdelay;
- lport = (struct lpfc_nvme_lport *)
- localport->private;
- tot = 0;
- for (i = 0;
- i < phba->cfg_nvme_io_channel; i++) {
- cstat = &lport->cstat[i];
- data1 = atomic_read(
- &cstat->fc4NvmeInputRequests);
- data2 = atomic_read(
- &cstat->fc4NvmeOutputRequests);
- data3 = atomic_read(
- &cstat->fc4NvmeControlRequests);
- tot += (data1 + data2 + data3);
- tot -= atomic_read(
- &cstat->fc4NvmeIoCmpls);
- }
- }
- }
-
- /* Interrupts per sec per EQ */
- val = phba->cfg_fcp_imax / phba->io_channel_irqs;
- tick_cqe = val / CONFIG_HZ; /* Per tick per EQ */
-
- /* Assume 1 CQE/ISR, calc max CQEs allowed for time duration */
- max_cqe = time_elapsed * tick_cqe;
-
- for (i = 0; i < phba->io_channel_irqs; i++) {
- /* Fast-path EQ */
- qp = phba->sli4_hba.hba_eq[i];
- if (!qp)
- continue;
-
- /* Use no EQ delay if we don't have many outstanding
- * IOs, or if we are only processing 1 CQE/ISR or less.
- * Otherwise, assume we can process up to lpfc_fcp_imax
- * interrupts per HBA.
- */
- if (tot < LPFC_NODELAY_MAX_IO ||
- qp->EQ_cqe_cnt <= max_cqe)
- val = 0;
- else
- val = phba->cfg_fcp_imax;
-
- if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
- /* Use EQ Delay Register method */
-
- /* Convert for EQ Delay register */
- if (val) {
- /* First, interrupts per sec per EQ */
- val = phba->cfg_fcp_imax /
- phba->io_channel_irqs;
-
- /* us delay between each interrupt */
- val = LPFC_SEC_TO_USEC / val;
- }
- if (val != qp->q_mode) {
- reg_data.word0 = 0;
- bf_set(lpfc_sliport_eqdelay_id,
- &reg_data, qp->queue_id);
- bf_set(lpfc_sliport_eqdelay_delay,
- &reg_data, val);
- writel(reg_data.word0, eqdreg);
- }
- } else {
- /* Use mbox command method */
- if (val != qp->q_mode)
- lpfc_modify_hba_eq_delay(phba, i,
- 1, val);
- }
-
- /*
- * val is cfg_fcp_imax or 0 for mbox delay or us delay
- * between interrupts for EQDR.
- */
- qp->q_mode = val;
- qp->EQ_cqe_cnt = 0;
- }
- }
-
-skip_eqdelay:
spin_lock_irq(&phba->pport->work_port_lock);
if (time_after(phba->last_completion_time +
@@ -2943,7 +2939,9 @@ lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
void
lpfc_stop_hba_timers(struct lpfc_hba *phba)
{
- lpfc_stop_vport_timers(phba->pport);
+ if (phba->pport)
+ lpfc_stop_vport_timers(phba->pport);
+ cancel_delayed_work_sync(&phba->eq_delay_work);
del_timer_sync(&phba->sli.mbox_tmo);
del_timer_sync(&phba->fabric_block_timer);
del_timer_sync(&phba->eratt_poll);
@@ -3071,6 +3069,242 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba)
}
/**
+ * lpfc_create_expedite_pool - create expedite pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0
+ * to expedite pool. Mark them as expedite.
+ **/
+void lpfc_create_expedite_pool(struct lpfc_hba *phba)
+{
+ struct lpfc_sli4_hdw_queue *qp;
+ struct lpfc_io_buf *lpfc_ncmd;
+ struct lpfc_io_buf *lpfc_ncmd_next;
+ struct lpfc_epd_pool *epd_pool;
+ unsigned long iflag;
+
+ epd_pool = &phba->epd_pool;
+ qp = &phba->sli4_hba.hdwq[0];
+
+ spin_lock_init(&epd_pool->lock);
+ spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
+ spin_lock(&epd_pool->lock);
+ INIT_LIST_HEAD(&epd_pool->list);
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+ &qp->lpfc_io_buf_list_put, list) {
+ list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
+ lpfc_ncmd->expedite = true;
+ qp->put_io_bufs--;
+ epd_pool->count++;
+ if (epd_pool->count >= XRI_BATCH)
+ break;
+ }
+ spin_unlock(&epd_pool->lock);
+ spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
+}
+
+/**
+ * lpfc_destroy_expedite_pool - destroy expedite pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put
+ * of HWQ 0. Clear the mark.
+ **/
+void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
+{
+ struct lpfc_sli4_hdw_queue *qp;
+ struct lpfc_io_buf *lpfc_ncmd;
+ struct lpfc_io_buf *lpfc_ncmd_next;
+ struct lpfc_epd_pool *epd_pool;
+ unsigned long iflag;
+
+ epd_pool = &phba->epd_pool;
+ qp = &phba->sli4_hba.hdwq[0];
+
+ spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
+ spin_lock(&epd_pool->lock);
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+ &epd_pool->list, list) {
+ list_move_tail(&lpfc_ncmd->list,
+ &qp->lpfc_io_buf_list_put);
+ lpfc_ncmd->flags = false;
+ qp->put_io_bufs++;
+ epd_pool->count--;
+ }
+ spin_unlock(&epd_pool->lock);
+ spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
+}
+
+/**
+ * lpfc_create_multixri_pools - create multi-XRI pools
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine initialize public, private per HWQ. Then, move XRIs from
+ * lpfc_io_buf_list_put to public pool. High and low watermark are also
+ * Initialized.
+ **/
+void lpfc_create_multixri_pools(struct lpfc_hba *phba)
+{
+ u32 i, j;
+ u32 hwq_count;
+ u32 count_per_hwq;
+ struct lpfc_io_buf *lpfc_ncmd;
+ struct lpfc_io_buf *lpfc_ncmd_next;
+ unsigned long iflag;
+ struct lpfc_sli4_hdw_queue *qp;
+ struct lpfc_multixri_pool *multixri_pool;
+ struct lpfc_pbl_pool *pbl_pool;
+ struct lpfc_pvt_pool *pvt_pool;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
+ phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
+ phba->sli4_hba.io_xri_cnt);
+
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ lpfc_create_expedite_pool(phba);
+
+ hwq_count = phba->cfg_hdw_queue;
+ count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
+
+ for (i = 0; i < hwq_count; i++) {
+ multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
+
+ if (!multixri_pool) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "1238 Failed to allocate memory for "
+ "multixri_pool\n");
+
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ lpfc_destroy_expedite_pool(phba);
+
+ j = 0;
+ while (j < i) {
+ qp = &phba->sli4_hba.hdwq[j];
+ kfree(qp->p_multixri_pool);
+ j++;
+ }
+ phba->cfg_xri_rebalancing = 0;
+ return;
+ }
+
+ qp = &phba->sli4_hba.hdwq[i];
+ qp->p_multixri_pool = multixri_pool;
+
+ multixri_pool->xri_limit = count_per_hwq;
+ multixri_pool->rrb_next_hwqid = i;
+
+ /* Deal with public free xri pool */
+ pbl_pool = &multixri_pool->pbl_pool;
+ spin_lock_init(&pbl_pool->lock);
+ spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
+ spin_lock(&pbl_pool->lock);
+ INIT_LIST_HEAD(&pbl_pool->list);
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+ &qp->lpfc_io_buf_list_put, list) {
+ list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
+ qp->put_io_bufs--;
+ pbl_pool->count++;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
+ pbl_pool->count, i);
+ spin_unlock(&pbl_pool->lock);
+ spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
+
+ /* Deal with private free xri pool */
+ pvt_pool = &multixri_pool->pvt_pool;
+ pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
+ pvt_pool->low_watermark = XRI_BATCH;
+ spin_lock_init(&pvt_pool->lock);
+ spin_lock_irqsave(&pvt_pool->lock, iflag);
+ INIT_LIST_HEAD(&pvt_pool->list);
+ pvt_pool->count = 0;
+ spin_unlock_irqrestore(&pvt_pool->lock, iflag);
+ }
+}
+
+/**
+ * lpfc_destroy_multixri_pools - destroy multi-XRI pools
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine returns XRIs from public/private to lpfc_io_buf_list_put.
+ **/
+void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
+{
+ u32 i;
+ u32 hwq_count;
+ struct lpfc_io_buf *lpfc_ncmd;
+ struct lpfc_io_buf *lpfc_ncmd_next;
+ unsigned long iflag;
+ struct lpfc_sli4_hdw_queue *qp;
+ struct lpfc_multixri_pool *multixri_pool;
+ struct lpfc_pbl_pool *pbl_pool;
+ struct lpfc_pvt_pool *pvt_pool;
+
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ lpfc_destroy_expedite_pool(phba);
+
+ hwq_count = phba->cfg_hdw_queue;
+
+ for (i = 0; i < hwq_count; i++) {
+ qp = &phba->sli4_hba.hdwq[i];
+ multixri_pool = qp->p_multixri_pool;
+ if (!multixri_pool)
+ continue;
+
+ qp->p_multixri_pool = NULL;
+
+ spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
+
+ /* Deal with public free xri pool */
+ pbl_pool = &multixri_pool->pbl_pool;
+ spin_lock(&pbl_pool->lock);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
+ pbl_pool->count, i);
+
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+ &pbl_pool->list, list) {
+ list_move_tail(&lpfc_ncmd->list,
+ &qp->lpfc_io_buf_list_put);
+ qp->put_io_bufs++;
+ pbl_pool->count--;
+ }
+
+ INIT_LIST_HEAD(&pbl_pool->list);
+ pbl_pool->count = 0;
+
+ spin_unlock(&pbl_pool->lock);
+
+ /* Deal with private free xri pool */
+ pvt_pool = &multixri_pool->pvt_pool;
+ spin_lock(&pvt_pool->lock);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
+ pvt_pool->count, i);
+
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+ &pvt_pool->list, list) {
+ list_move_tail(&lpfc_ncmd->list,
+ &qp->lpfc_io_buf_list_put);
+ qp->put_io_bufs++;
+ pvt_pool->count--;
+ }
+
+ INIT_LIST_HEAD(&pvt_pool->list);
+ pvt_pool->count = 0;
+
+ spin_unlock(&pvt_pool->lock);
+ spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
+
+ kfree(multixri_pool);
+ }
+}
+
+/**
* lpfc_online - Initialize and bring a HBA online
* @phba: pointer to lpfc hba data structure.
*
@@ -3152,6 +3386,9 @@ lpfc_online(struct lpfc_hba *phba)
}
lpfc_destroy_vport_work_array(phba, vports);
+ if (phba->cfg_xri_rebalancing)
+ lpfc_create_multixri_pools(phba);
+
lpfc_unblock_mgmt_io(phba);
return 0;
}
@@ -3310,6 +3547,9 @@ lpfc_offline(struct lpfc_hba *phba)
spin_unlock_irq(shost->host_lock);
}
lpfc_destroy_vport_work_array(phba, vports);
+
+ if (phba->cfg_xri_rebalancing)
+ lpfc_destroy_multixri_pools(phba);
}
/**
@@ -3323,7 +3563,7 @@ lpfc_offline(struct lpfc_hba *phba)
static void
lpfc_scsi_free(struct lpfc_hba *phba)
{
- struct lpfc_scsi_buf *sb, *sb_next;
+ struct lpfc_io_buf *sb, *sb_next;
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
return;
@@ -3355,50 +3595,57 @@ lpfc_scsi_free(struct lpfc_hba *phba)
spin_unlock(&phba->scsi_buf_list_get_lock);
spin_unlock_irq(&phba->hbalock);
}
+
/**
- * lpfc_nvme_free - Free all the NVME buffers and IOCBs from driver lists
+ * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists
* @phba: pointer to lpfc hba data structure.
*
- * This routine is to free all the NVME buffers and IOCBs from the driver
+ * This routine is to free all the IO buffers and IOCBs from the driver
* list back to kernel. It is called from lpfc_pci_remove_one to free
* the internal resources before the device is removed from the system.
**/
-static void
-lpfc_nvme_free(struct lpfc_hba *phba)
+void
+lpfc_io_free(struct lpfc_hba *phba)
{
- struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
-
- if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
- return;
+ struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
+ struct lpfc_sli4_hdw_queue *qp;
+ int idx;
spin_lock_irq(&phba->hbalock);
- /* Release all the lpfc_nvme_bufs maintained by this host. */
- spin_lock(&phba->nvme_buf_list_put_lock);
- list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
- &phba->lpfc_nvme_buf_list_put, list) {
- list_del(&lpfc_ncmd->list);
- phba->put_nvme_bufs--;
- dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
- lpfc_ncmd->dma_handle);
- kfree(lpfc_ncmd);
- phba->total_nvme_bufs--;
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+ qp = &phba->sli4_hba.hdwq[idx];
+ /* Release all the lpfc_nvme_bufs maintained by this host. */
+ spin_lock(&qp->io_buf_list_put_lock);
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+ &qp->lpfc_io_buf_list_put,
+ list) {
+ list_del(&lpfc_ncmd->list);
+ qp->put_io_bufs--;
+ dma_pool_free(phba->lpfc_sg_dma_buf_pool,
+ lpfc_ncmd->data, lpfc_ncmd->dma_handle);
+ kfree(lpfc_ncmd);
+ qp->total_io_bufs--;
+ }
+ spin_unlock(&qp->io_buf_list_put_lock);
+
+ spin_lock(&qp->io_buf_list_get_lock);
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+ &qp->lpfc_io_buf_list_get,
+ list) {
+ list_del(&lpfc_ncmd->list);
+ qp->get_io_bufs--;
+ dma_pool_free(phba->lpfc_sg_dma_buf_pool,
+ lpfc_ncmd->data, lpfc_ncmd->dma_handle);
+ kfree(lpfc_ncmd);
+ qp->total_io_bufs--;
+ }
+ spin_unlock(&qp->io_buf_list_get_lock);
}
- spin_unlock(&phba->nvme_buf_list_put_lock);
- spin_lock(&phba->nvme_buf_list_get_lock);
- list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
- &phba->lpfc_nvme_buf_list_get, list) {
- list_del(&lpfc_ncmd->list);
- phba->get_nvme_bufs--;
- dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
- lpfc_ncmd->dma_handle);
- kfree(lpfc_ncmd);
- phba->total_nvme_bufs--;
- }
- spin_unlock(&phba->nvme_buf_list_get_lock);
spin_unlock_irq(&phba->hbalock);
}
+
/**
* lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
* @phba: pointer to lpfc hba data structure.
@@ -3640,8 +3887,102 @@ out_free_mem:
return rc;
}
+int
+lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
+{
+ LIST_HEAD(blist);
+ struct lpfc_sli4_hdw_queue *qp;
+ struct lpfc_io_buf *lpfc_cmd;
+ struct lpfc_io_buf *iobufp, *prev_iobufp;
+ int idx, cnt, xri, inserted;
+
+ cnt = 0;
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+ qp = &phba->sli4_hba.hdwq[idx];
+ spin_lock_irq(&qp->io_buf_list_get_lock);
+ spin_lock(&qp->io_buf_list_put_lock);
+
+ /* Take everything off the get and put lists */
+ list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
+ list_splice(&qp->lpfc_io_buf_list_put, &blist);
+ INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
+ INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
+ cnt += qp->get_io_bufs + qp->put_io_bufs;
+ qp->get_io_bufs = 0;
+ qp->put_io_bufs = 0;
+ qp->total_io_bufs = 0;
+ spin_unlock(&qp->io_buf_list_put_lock);
+ spin_unlock_irq(&qp->io_buf_list_get_lock);
+ }
+
+ /*
+ * Take IO buffers off blist and put on cbuf sorted by XRI.
+ * This is because POST_SGL takes a sequential range of XRIs
+ * to post to the firmware.
+ */
+ for (idx = 0; idx < cnt; idx++) {
+ list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
+ if (!lpfc_cmd)
+ return cnt;
+ if (idx == 0) {
+ list_add_tail(&lpfc_cmd->list, cbuf);
+ continue;
+ }
+ xri = lpfc_cmd->cur_iocbq.sli4_xritag;
+ inserted = 0;
+ prev_iobufp = NULL;
+ list_for_each_entry(iobufp, cbuf, list) {
+ if (xri < iobufp->cur_iocbq.sli4_xritag) {
+ if (prev_iobufp)
+ list_add(&lpfc_cmd->list,
+ &prev_iobufp->list);
+ else
+ list_add(&lpfc_cmd->list, cbuf);
+ inserted = 1;
+ break;
+ }
+ prev_iobufp = iobufp;
+ }
+ if (!inserted)
+ list_add_tail(&lpfc_cmd->list, cbuf);
+ }
+ return cnt;
+}
+
+int
+lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
+{
+ struct lpfc_sli4_hdw_queue *qp;
+ struct lpfc_io_buf *lpfc_cmd;
+ int idx, cnt;
+
+ qp = phba->sli4_hba.hdwq;
+ cnt = 0;
+ while (!list_empty(cbuf)) {
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+ list_remove_head(cbuf, lpfc_cmd,
+ struct lpfc_io_buf, list);
+ if (!lpfc_cmd)
+ return cnt;
+ cnt++;
+ qp = &phba->sli4_hba.hdwq[idx];
+ lpfc_cmd->hdwq_no = idx;
+ lpfc_cmd->hdwq = qp;
+ lpfc_cmd->cur_iocbq.wqe_cmpl = NULL;
+ lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
+ spin_lock(&qp->io_buf_list_put_lock);
+ list_add_tail(&lpfc_cmd->list,
+ &qp->lpfc_io_buf_list_put);
+ qp->put_io_bufs++;
+ qp->total_io_bufs++;
+ spin_unlock(&qp->io_buf_list_put_lock);
+ }
+ }
+ return cnt;
+}
+
/**
- * lpfc_sli4_scsi_sgl_update - update xri-sgl sizing and mapping
+ * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping
* @phba: pointer to lpfc hba data structure.
*
* This routine first calculates the sizes of the current els and allocated
@@ -3653,94 +3994,192 @@ out_free_mem:
* 0 - successful (for now, it always returns 0)
**/
int
-lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba)
+lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
{
- struct lpfc_scsi_buf *psb, *psb_next;
- uint16_t i, lxri, els_xri_cnt, scsi_xri_cnt;
- LIST_HEAD(scsi_sgl_list);
- int rc;
-
- /*
- * update on pci function's els xri-sgl list
- */
- els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
- phba->total_scsi_bufs = 0;
+ struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
+ uint16_t i, lxri, els_xri_cnt;
+ uint16_t io_xri_cnt, io_xri_max;
+ LIST_HEAD(io_sgl_list);
+ int rc, cnt;
/*
- * update on pci function's allocated scsi xri-sgl list
+ * update on pci function's allocated nvme xri-sgl list
*/
- /* maximum number of xris available for scsi buffers */
- phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri -
- els_xri_cnt;
- if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
- return 0;
+ /* maximum number of xris available for nvme buffers */
+ els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
+ io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
+ phba->sli4_hba.io_xri_max = io_xri_max;
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
- phba->sli4_hba.scsi_xri_max = /* Split them up */
- (phba->sli4_hba.scsi_xri_max *
- phba->cfg_xri_split) / 100;
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "6074 Current allocated XRI sgl count:%d, "
+ "maximum XRI count:%d\n",
+ phba->sli4_hba.io_xri_cnt,
+ phba->sli4_hba.io_xri_max);
- spin_lock_irq(&phba->scsi_buf_list_get_lock);
- spin_lock(&phba->scsi_buf_list_put_lock);
- list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
- list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
- spin_unlock(&phba->scsi_buf_list_put_lock);
- spin_unlock_irq(&phba->scsi_buf_list_get_lock);
+ cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "6060 Current allocated SCSI xri-sgl count:%d, "
- "maximum SCSI xri count:%d (split:%d)\n",
- phba->sli4_hba.scsi_xri_cnt,
- phba->sli4_hba.scsi_xri_max, phba->cfg_xri_split);
-
- if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
- /* max scsi xri shrinked below the allocated scsi buffers */
- scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt -
- phba->sli4_hba.scsi_xri_max;
- /* release the extra allocated scsi buffers */
- for (i = 0; i < scsi_xri_cnt; i++) {
- list_remove_head(&scsi_sgl_list, psb,
- struct lpfc_scsi_buf, list);
- if (psb) {
+ if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
+ /* max nvme xri shrunk below the allocated nvme buffers */
+ io_xri_cnt = phba->sli4_hba.io_xri_cnt -
+ phba->sli4_hba.io_xri_max;
+ /* release the extra allocated nvme buffers */
+ for (i = 0; i < io_xri_cnt; i++) {
+ list_remove_head(&io_sgl_list, lpfc_ncmd,
+ struct lpfc_io_buf, list);
+ if (lpfc_ncmd) {
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
- psb->data, psb->dma_handle);
- kfree(psb);
+ lpfc_ncmd->data,
+ lpfc_ncmd->dma_handle);
+ kfree(lpfc_ncmd);
}
}
- spin_lock_irq(&phba->scsi_buf_list_get_lock);
- phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
- spin_unlock_irq(&phba->scsi_buf_list_get_lock);
+ phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
}
- /* update xris associated to remaining allocated scsi buffers */
- psb = NULL;
- psb_next = NULL;
- list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) {
+ /* update xris associated to remaining allocated nvme buffers */
+ lpfc_ncmd = NULL;
+ lpfc_ncmd_next = NULL;
+ phba->sli4_hba.io_xri_cnt = cnt;
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+ &io_sgl_list, list) {
lxri = lpfc_sli4_next_xritag(phba);
if (lxri == NO_XRI) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "2560 Failed to allocate xri for "
- "scsi buffer\n");
+ "6075 Failed to allocate xri for "
+ "nvme buffer\n");
rc = -ENOMEM;
goto out_free_mem;
}
- psb->cur_iocbq.sli4_lxritag = lxri;
- psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
+ lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
+ lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
}
- spin_lock_irq(&phba->scsi_buf_list_get_lock);
- spin_lock(&phba->scsi_buf_list_put_lock);
- list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
- INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
- spin_unlock(&phba->scsi_buf_list_put_lock);
- spin_unlock_irq(&phba->scsi_buf_list_get_lock);
+ cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
return 0;
out_free_mem:
- lpfc_scsi_free(phba);
+ lpfc_io_free(phba);
return rc;
}
+/**
+ * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
+ * @vport: The virtual port for which this call being executed.
+ * @num_to_allocate: The requested number of buffers to allocate.
+ *
+ * This routine allocates nvme buffers for device with SLI-4 interface spec,
+ * the nvme buffer contains all the necessary information needed to initiate
+ * an I/O. After allocating up to @num_to_allocate IO buffers and put
+ * them on a list, it post them to the port by using SGL block post.
+ *
+ * Return codes:
+ * int - number of IO buffers that were allocated and posted.
+ * 0 = failure, less than num_to_alloc is a partial failure.
+ **/
+int
+lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
+{
+ struct lpfc_io_buf *lpfc_ncmd;
+ struct lpfc_iocbq *pwqeq;
+ uint16_t iotag, lxri = 0;
+ int bcnt, num_posted;
+ LIST_HEAD(prep_nblist);
+ LIST_HEAD(post_nblist);
+ LIST_HEAD(nvme_nblist);
+
+ /* Sanity check to ensure our sizing is right for both SCSI and NVME */
+ if (sizeof(struct lpfc_io_buf) > LPFC_COMMON_IO_BUF_SZ) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "6426 Common buffer size %zd exceeds %d\n",
+ sizeof(struct lpfc_io_buf),
+ LPFC_COMMON_IO_BUF_SZ);
+ return 0;
+ }
+
+ phba->sli4_hba.io_xri_cnt = 0;
+ for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
+ lpfc_ncmd = kzalloc(LPFC_COMMON_IO_BUF_SZ, GFP_KERNEL);
+ if (!lpfc_ncmd)
+ break;
+ /*
+ * Get memory from the pci pool to map the virt space to
+ * pci bus space for an I/O. The DMA buffer includes the
+ * number of SGE's necessary to support the sg_tablesize.
+ */
+ lpfc_ncmd->data = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
+ GFP_KERNEL,
+ &lpfc_ncmd->dma_handle);
+ if (!lpfc_ncmd->data) {
+ kfree(lpfc_ncmd);
+ break;
+ }
+ memset(lpfc_ncmd->data, 0, phba->cfg_sg_dma_buf_size);
+
+ /*
+ * 4K Page alignment is CRITICAL to BlockGuard, double check
+ * to be sure.
+ */
+ if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
+ (((unsigned long)(lpfc_ncmd->data) &
+ (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "3369 Memory alignment err: addr=%lx\n",
+ (unsigned long)lpfc_ncmd->data);
+ dma_pool_free(phba->lpfc_sg_dma_buf_pool,
+ lpfc_ncmd->data, lpfc_ncmd->dma_handle);
+ kfree(lpfc_ncmd);
+ break;
+ }
+
+ lxri = lpfc_sli4_next_xritag(phba);
+ if (lxri == NO_XRI) {
+ dma_pool_free(phba->lpfc_sg_dma_buf_pool,
+ lpfc_ncmd->data, lpfc_ncmd->dma_handle);
+ kfree(lpfc_ncmd);
+ break;
+ }
+ pwqeq = &lpfc_ncmd->cur_iocbq;
+
+ /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
+ iotag = lpfc_sli_next_iotag(phba, pwqeq);
+ if (iotag == 0) {
+ dma_pool_free(phba->lpfc_sg_dma_buf_pool,
+ lpfc_ncmd->data, lpfc_ncmd->dma_handle);
+ kfree(lpfc_ncmd);
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6121 Failed to allocate IOTAG for"
+ " XRI:0x%x\n", lxri);
+ lpfc_sli4_free_xri(phba, lxri);
+ break;
+ }
+ pwqeq->sli4_lxritag = lxri;
+ pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
+ pwqeq->context1 = lpfc_ncmd;
+
+ /* Initialize local short-hand pointers. */
+ lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
+ lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
+ lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
+ spin_lock_init(&lpfc_ncmd->buf_lock);
+
+ /* add the nvme buffer to a post list */
+ list_add_tail(&lpfc_ncmd->list, &post_nblist);
+ phba->sli4_hba.io_xri_cnt++;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
+ "6114 Allocate %d out of %d requested new NVME "
+ "buffers\n", bcnt, num_to_alloc);
+
+ /* post the list of nvme buffer sgls to port if available */
+ if (!list_empty(&post_nblist))
+ num_posted = lpfc_sli4_post_io_sgl_list(
+ phba, &post_nblist, bcnt);
+ else
+ num_posted = 0;
+
+ return num_posted;
+}
+
static uint64_t
lpfc_get_wwpn(struct lpfc_hba *phba)
{
@@ -3777,111 +4216,6 @@ lpfc_get_wwpn(struct lpfc_hba *phba)
}
/**
- * lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine first calculates the sizes of the current els and allocated
- * scsi sgl lists, and then goes through all sgls to updates the physical
- * XRIs assigned due to port function reset. During port initialization, the
- * current els and allocated scsi sgl lists are 0s.
- *
- * Return codes
- * 0 - successful (for now, it always returns 0)
- **/
-int
-lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
-{
- struct lpfc_nvme_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
- uint16_t i, lxri, els_xri_cnt;
- uint16_t nvme_xri_cnt, nvme_xri_max;
- LIST_HEAD(nvme_sgl_list);
- int rc, cnt;
-
- phba->total_nvme_bufs = 0;
- phba->get_nvme_bufs = 0;
- phba->put_nvme_bufs = 0;
-
- if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
- return 0;
- /*
- * update on pci function's allocated nvme xri-sgl list
- */
-
- /* maximum number of xris available for nvme buffers */
- els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
- nvme_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
- phba->sli4_hba.nvme_xri_max = nvme_xri_max;
- phba->sli4_hba.nvme_xri_max -= phba->sli4_hba.scsi_xri_max;
-
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "6074 Current allocated NVME xri-sgl count:%d, "
- "maximum NVME xri count:%d\n",
- phba->sli4_hba.nvme_xri_cnt,
- phba->sli4_hba.nvme_xri_max);
-
- spin_lock_irq(&phba->nvme_buf_list_get_lock);
- spin_lock(&phba->nvme_buf_list_put_lock);
- list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list);
- list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list);
- cnt = phba->get_nvme_bufs + phba->put_nvme_bufs;
- phba->get_nvme_bufs = 0;
- phba->put_nvme_bufs = 0;
- spin_unlock(&phba->nvme_buf_list_put_lock);
- spin_unlock_irq(&phba->nvme_buf_list_get_lock);
-
- if (phba->sli4_hba.nvme_xri_cnt > phba->sli4_hba.nvme_xri_max) {
- /* max nvme xri shrunk below the allocated nvme buffers */
- spin_lock_irq(&phba->nvme_buf_list_get_lock);
- nvme_xri_cnt = phba->sli4_hba.nvme_xri_cnt -
- phba->sli4_hba.nvme_xri_max;
- spin_unlock_irq(&phba->nvme_buf_list_get_lock);
- /* release the extra allocated nvme buffers */
- for (i = 0; i < nvme_xri_cnt; i++) {
- list_remove_head(&nvme_sgl_list, lpfc_ncmd,
- struct lpfc_nvme_buf, list);
- if (lpfc_ncmd) {
- dma_pool_free(phba->lpfc_sg_dma_buf_pool,
- lpfc_ncmd->data,
- lpfc_ncmd->dma_handle);
- kfree(lpfc_ncmd);
- }
- }
- spin_lock_irq(&phba->nvme_buf_list_get_lock);
- phba->sli4_hba.nvme_xri_cnt -= nvme_xri_cnt;
- spin_unlock_irq(&phba->nvme_buf_list_get_lock);
- }
-
- /* update xris associated to remaining allocated nvme buffers */
- lpfc_ncmd = NULL;
- lpfc_ncmd_next = NULL;
- list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
- &nvme_sgl_list, list) {
- lxri = lpfc_sli4_next_xritag(phba);
- if (lxri == NO_XRI) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "6075 Failed to allocate xri for "
- "nvme buffer\n");
- rc = -ENOMEM;
- goto out_free_mem;
- }
- lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
- lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
- }
- spin_lock_irq(&phba->nvme_buf_list_get_lock);
- spin_lock(&phba->nvme_buf_list_put_lock);
- list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get);
- phba->get_nvme_bufs = cnt;
- INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
- spin_unlock(&phba->nvme_buf_list_put_lock);
- spin_unlock_irq(&phba->nvme_buf_list_get_lock);
- return 0;
-
-out_free_mem:
- lpfc_nvme_free(phba);
- return rc;
-}
-
-/**
* lpfc_create_port - Create an FC port
* @phba: pointer to lpfc hba data structure.
* @instance: a unique integer ID to this FC port.
@@ -3956,17 +4290,29 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
vport->fc_rscn_flush = 0;
lpfc_get_vport_cfgparam(vport);
+ /* Adjust value in vport */
+ vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
+
shost->unique_id = instance;
shost->max_id = LPFC_MAX_TARGET;
shost->max_lun = vport->cfg_max_luns;
shost->this_id = -1;
shost->max_cmd_len = 16;
- shost->nr_hw_queues = phba->cfg_fcp_io_channel;
+
if (phba->sli_rev == LPFC_SLI_REV4) {
+ if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ)
+ shost->nr_hw_queues = phba->cfg_hdw_queue;
+ else
+ shost->nr_hw_queues = phba->sli4_hba.num_present_cpu;
+
shost->dma_boundary =
phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
- }
+ } else
+ /* SLI-3 has a limited number of hardware queues (3),
+ * thus there is only one for FCP processing.
+ */
+ shost->nr_hw_queues = 1;
/*
* Set initial can_queue value since 0 is no longer supported and
@@ -4220,7 +4566,8 @@ lpfc_stop_port_s4(struct lpfc_hba *phba)
{
/* Reset some HBA SLI4 setup states */
lpfc_stop_hba_timers(phba);
- phba->pport->work_port_events = 0;
+ if (phba->pport)
+ phba->pport->work_port_events = 0;
phba->sli4_hba.intr_enable = 0;
}
@@ -5819,24 +6166,11 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
"NVME" : " "),
(phba->nvmet_support ? "NVMET" : " "));
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
- /* Initialize the scsi buffer list used by driver for scsi IO */
- spin_lock_init(&phba->scsi_buf_list_get_lock);
- INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
- spin_lock_init(&phba->scsi_buf_list_put_lock);
- INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
- }
-
- if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
- (phba->nvmet_support == 0)) {
- /* Initialize the NVME buffer list used by driver for NVME IO */
- spin_lock_init(&phba->nvme_buf_list_get_lock);
- INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get);
- phba->get_nvme_bufs = 0;
- spin_lock_init(&phba->nvme_buf_list_put_lock);
- INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
- phba->put_nvme_bufs = 0;
- }
+ /* Initialize the IO buffer list used by driver for SLI3 SCSI */
+ spin_lock_init(&phba->scsi_buf_list_get_lock);
+ INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
+ spin_lock_init(&phba->scsi_buf_list_put_lock);
+ INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
/* Initialize the fabric iocb list */
INIT_LIST_HEAD(&phba->fabric_iocb_list);
@@ -5860,6 +6194,8 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
/* Heartbeat timer */
timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
+ INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
+
return 0;
}
@@ -5877,7 +6213,7 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
static int
lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
{
- int rc;
+ int rc, entry_sz;
/*
* Initialize timers used by driver
@@ -5922,6 +6258,11 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ entry_sz = sizeof(struct sli4_sge);
+ else
+ entry_sz = sizeof(struct ulp_bde64);
+
/* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
if (phba->cfg_enable_bg) {
/*
@@ -5935,7 +6276,7 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
*/
phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
sizeof(struct fcp_rsp) +
- (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64));
+ (LPFC_MAX_SG_SEG_CNT * entry_sz);
if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
@@ -5950,7 +6291,7 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
*/
phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
sizeof(struct fcp_rsp) +
- ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
+ ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
/* Total BDEs in BPL for scsi_sg_list */
phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
@@ -6031,14 +6372,13 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
struct lpfc_mqe *mqe;
int longs;
- int fof_vectors = 0;
int extra;
uint64_t wwn;
u32 if_type;
u32 if_fam;
- phba->sli4_hba.num_online_cpu = num_online_cpus();
phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
+ phba->sli4_hba.num_possible_cpu = num_possible_cpus();
phba->sli4_hba.curr_disp_cpu = 0;
/* Get all the module params for configuring this host */
@@ -6200,8 +6540,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
/* Initialize the Abort nvme buffer list used by driver */
- spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
- INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
+ spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
}
@@ -6337,6 +6676,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
" NVME_TARGET_FC infrastructure"
" is not in kernel\n");
#endif
+ /* Not supported for NVMET */
+ phba->cfg_xri_rebalancing = 0;
break;
}
}
@@ -6405,8 +6746,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Verify OAS is supported */
lpfc_sli4_oas_verify(phba);
- if (phba->cfg_fof)
- fof_vectors = 1;
/* Verify RAS support on adapter */
lpfc_sli4_ras_init(phba);
@@ -6450,9 +6789,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
goto out_remove_rpi_hdrs;
}
- phba->sli4_hba.hba_eq_hdl = kcalloc(fof_vectors + phba->io_channel_irqs,
- sizeof(struct lpfc_hba_eq_hdl),
- GFP_KERNEL);
+ phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
+ sizeof(struct lpfc_hba_eq_hdl),
+ GFP_KERNEL);
if (!phba->sli4_hba.hba_eq_hdl) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2572 Failed allocate memory for "
@@ -6461,7 +6800,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
goto out_free_fcf_rr_bmask;
}
- phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_present_cpu,
+ phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
sizeof(struct lpfc_vector_map_info),
GFP_KERNEL);
if (!phba->sli4_hba.cpu_map) {
@@ -6471,21 +6810,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
rc = -ENOMEM;
goto out_free_hba_eq_hdl;
}
- if (lpfc_used_cpu == NULL) {
- lpfc_used_cpu = kcalloc(lpfc_present_cpu, sizeof(uint16_t),
- GFP_KERNEL);
- if (!lpfc_used_cpu) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3335 Failed allocate memory for msi-x "
- "interrupt vector mapping\n");
- kfree(phba->sli4_hba.cpu_map);
- rc = -ENOMEM;
- goto out_free_hba_eq_hdl;
- }
- for (i = 0; i < lpfc_present_cpu; i++)
- lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY;
- }
+ phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
+ if (!phba->sli4_hba.eq_info) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3321 Failed allocation for per_cpu stats\n");
+ rc = -ENOMEM;
+ goto out_free_hba_cpu_map;
+ }
/*
* Enable sr-iov virtual functions if supported and configured
* through the module parameter.
@@ -6505,6 +6837,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
return 0;
+out_free_hba_cpu_map:
+ kfree(phba->sli4_hba.cpu_map);
out_free_hba_eq_hdl:
kfree(phba->sli4_hba.hba_eq_hdl);
out_free_fcf_rr_bmask:
@@ -6534,10 +6868,12 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
{
struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
+ free_percpu(phba->sli4_hba.eq_info);
+
/* Free memory allocated for msi-x interrupt vector to CPU mapping */
kfree(phba->sli4_hba.cpu_map);
+ phba->sli4_hba.num_possible_cpu = 0;
phba->sli4_hba.num_present_cpu = 0;
- phba->sli4_hba.num_online_cpu = 0;
phba->sli4_hba.curr_disp_cpu = 0;
/* Free memory allocated for fast-path work queue handles */
@@ -6875,11 +7211,8 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
/* els xri-sgl book keeping */
phba->sli4_hba.els_xri_cnt = 0;
- /* scsi xri-buffer book keeping */
- phba->sli4_hba.scsi_xri_cnt = 0;
-
/* nvme xri-buffer book keeping */
- phba->sli4_hba.nvme_xri_cnt = 0;
+ phba->sli4_hba.io_xri_cnt = 0;
}
/**
@@ -7093,6 +7426,9 @@ lpfc_hba_alloc(struct pci_dev *pdev)
static void
lpfc_hba_free(struct lpfc_hba *phba)
{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ kfree(phba->sli4_hba.hdwq);
+
/* Release the driver assigned board number */
idr_remove(&lpfc_hba_index, phba->brd_no);
@@ -7128,10 +7464,6 @@ lpfc_create_shost(struct lpfc_hba *phba)
phba->fc_arbtov = FF_DEF_ARBTOV;
atomic_set(&phba->sdev_cnt, 0);
- atomic_set(&phba->fc4ScsiInputRequests, 0);
- atomic_set(&phba->fc4ScsiOutputRequests, 0);
- atomic_set(&phba->fc4ScsiControlRequests, 0);
- atomic_set(&phba->fc4ScsiIoCmpls, 0);
vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
if (!vport)
return -ENODEV;
@@ -7909,7 +8241,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
struct lpfc_rsrc_desc_fcfcoe *desc;
char *pdesc_0;
uint16_t forced_link_speed;
- uint32_t if_type;
+ uint32_t if_type, qmin;
int length, i, rc = 0, rc2;
pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -8014,38 +8346,44 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
phba->sli4_hba.max_cfg_param.max_rq);
/*
- * Calculate NVME queue resources based on how
- * many WQ/CQs are available.
+ * Calculate queue resources based on how
+ * many WQ/CQ/EQs are available.
*/
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- length = phba->sli4_hba.max_cfg_param.max_wq;
- if (phba->sli4_hba.max_cfg_param.max_cq <
- phba->sli4_hba.max_cfg_param.max_wq)
- length = phba->sli4_hba.max_cfg_param.max_cq;
+ qmin = phba->sli4_hba.max_cfg_param.max_wq;
+ if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
+ qmin = phba->sli4_hba.max_cfg_param.max_cq;
+ if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
+ qmin = phba->sli4_hba.max_cfg_param.max_eq;
+ /*
+ * Whats left after this can go toward NVME / FCP.
+ * The minus 4 accounts for ELS, NVME LS, MBOX
+ * plus one extra. When configured for
+ * NVMET, FCP io channel WQs are not created.
+ */
+ qmin -= 4;
- /*
- * Whats left after this can go toward NVME.
- * The minus 6 accounts for ELS, NVME LS, MBOX
- * fof plus a couple extra. When configured for
- * NVMET, FCP io channel WQs are not created.
- */
- length -= 6;
- if (!phba->nvmet_support)
- length -= phba->cfg_fcp_io_channel;
-
- if (phba->cfg_nvme_io_channel > length) {
- lpfc_printf_log(
- phba, KERN_ERR, LOG_SLI,
- "2005 Reducing NVME IO channel to %d: "
- "WQ %d CQ %d NVMEIO %d FCPIO %d\n",
- length,
+ /* If NVME is configured, double the number of CQ/WQs needed */
+ if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
+ !phba->nvmet_support)
+ qmin /= 2;
+
+ /* Check to see if there is enough for NVME */
+ if ((phba->cfg_irq_chann > qmin) ||
+ (phba->cfg_hdw_queue > qmin)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2005 Reducing Queues: "
+ "WQ %d CQ %d EQ %d: min %d: "
+ "IRQ %d HDWQ %d\n",
phba->sli4_hba.max_cfg_param.max_wq,
phba->sli4_hba.max_cfg_param.max_cq,
- phba->cfg_nvme_io_channel,
- phba->cfg_fcp_io_channel);
+ phba->sli4_hba.max_cfg_param.max_eq,
+ qmin, phba->cfg_irq_chann,
+ phba->cfg_hdw_queue);
- phba->cfg_nvme_io_channel = length;
- }
+ if (phba->cfg_irq_chann > qmin)
+ phba->cfg_irq_chann = qmin;
+ if (phba->cfg_hdw_queue > qmin)
+ phba->cfg_hdw_queue = qmin;
}
}
@@ -8257,53 +8595,22 @@ lpfc_setup_endian_order(struct lpfc_hba *phba)
static int
lpfc_sli4_queue_verify(struct lpfc_hba *phba)
{
- int io_channel;
- int fof_vectors = phba->cfg_fof ? 1 : 0;
-
/*
* Sanity check for configured queue parameters against the run-time
* device parameters
*/
- /* Sanity check on HBA EQ parameters */
- io_channel = phba->io_channel_irqs;
-
- if (phba->sli4_hba.num_online_cpu < io_channel) {
- lpfc_printf_log(phba,
- KERN_ERR, LOG_INIT,
- "3188 Reducing IO channels to match number of "
- "online CPUs: from %d to %d\n",
- io_channel, phba->sli4_hba.num_online_cpu);
- io_channel = phba->sli4_hba.num_online_cpu;
- }
-
- if (io_channel + fof_vectors > phba->sli4_hba.max_cfg_param.max_eq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2575 Reducing IO channels to match number of "
- "available EQs: from %d to %d\n",
- io_channel,
- phba->sli4_hba.max_cfg_param.max_eq);
- io_channel = phba->sli4_hba.max_cfg_param.max_eq - fof_vectors;
- }
-
- /* The actual number of FCP / NVME event queues adopted */
- if (io_channel != phba->io_channel_irqs)
- phba->io_channel_irqs = io_channel;
- if (phba->cfg_fcp_io_channel > io_channel)
- phba->cfg_fcp_io_channel = io_channel;
- if (phba->cfg_nvme_io_channel > io_channel)
- phba->cfg_nvme_io_channel = io_channel;
if (phba->nvmet_support) {
- if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
- phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+ if (phba->cfg_irq_chann < phba->cfg_nvmet_mrq)
+ phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
}
if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n",
- phba->io_channel_irqs, phba->cfg_fcp_io_channel,
- phba->cfg_nvme_io_channel, phba->cfg_nvmet_mrq);
+ "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
+ phba->cfg_hdw_queue, phba->cfg_irq_chann,
+ phba->cfg_nvmet_mrq);
/* Get EQ depth from module parameter, fake the default for now */
phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
@@ -8330,7 +8637,9 @@ lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
return 1;
}
qdesc->qe_valid = 1;
- phba->sli4_hba.nvme_cq[wqidx] = qdesc;
+ qdesc->hdwq = wqidx;
+ qdesc->chann = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ);
+ phba->sli4_hba.hdwq[wqidx].nvme_cq = qdesc;
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT);
@@ -8340,7 +8649,9 @@ lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
wqidx);
return 1;
}
- phba->sli4_hba.nvme_wq[wqidx] = qdesc;
+ qdesc->hdwq = wqidx;
+ qdesc->chann = wqidx;
+ phba->sli4_hba.hdwq[wqidx].nvme_wq = qdesc;
list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
return 0;
}
@@ -8368,7 +8679,9 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
return 1;
}
qdesc->qe_valid = 1;
- phba->sli4_hba.fcp_cq[wqidx] = qdesc;
+ qdesc->hdwq = wqidx;
+ qdesc->chann = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ);
+ phba->sli4_hba.hdwq[wqidx].fcp_cq = qdesc;
/* Create Fast Path FCP WQs */
if (phba->enab_exp_wqcq_pages) {
@@ -8389,7 +8702,9 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
wqidx);
return 1;
}
- phba->sli4_hba.fcp_wq[wqidx] = qdesc;
+ qdesc->hdwq = wqidx;
+ qdesc->chann = wqidx;
+ phba->sli4_hba.hdwq[wqidx].fcp_wq = qdesc;
list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
return 0;
}
@@ -8412,16 +8727,14 @@ int
lpfc_sli4_queue_create(struct lpfc_hba *phba)
{
struct lpfc_queue *qdesc;
- int idx, io_channel;
+ int idx, eqidx;
+ struct lpfc_sli4_hdw_queue *qp;
+ struct lpfc_eq_intr_info *eqi;
/*
* Create HBA Record arrays.
* Both NVME and FCP will share that same vectors / EQs
*/
- io_channel = phba->io_channel_irqs;
- if (!io_channel)
- return -ERANGE;
-
phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
@@ -8433,87 +8746,36 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
- phba->sli4_hba.hba_eq = kcalloc(io_channel,
- sizeof(struct lpfc_queue *),
- GFP_KERNEL);
- if (!phba->sli4_hba.hba_eq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2576 Failed allocate memory for "
- "fast-path EQ record array\n");
- goto out_error;
- }
-
- if (phba->cfg_fcp_io_channel) {
- phba->sli4_hba.fcp_cq = kcalloc(phba->cfg_fcp_io_channel,
- sizeof(struct lpfc_queue *),
- GFP_KERNEL);
- if (!phba->sli4_hba.fcp_cq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2577 Failed allocate memory for "
- "fast-path CQ record array\n");
- goto out_error;
- }
- phba->sli4_hba.fcp_wq = kcalloc(phba->cfg_fcp_io_channel,
- sizeof(struct lpfc_queue *),
- GFP_KERNEL);
- if (!phba->sli4_hba.fcp_wq) {
+ if (!phba->sli4_hba.hdwq) {
+ phba->sli4_hba.hdwq = kcalloc(
+ phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
+ GFP_KERNEL);
+ if (!phba->sli4_hba.hdwq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2578 Failed allocate memory for "
- "fast-path FCP WQ record array\n");
+ "6427 Failed allocate memory for "
+ "fast-path Hardware Queue array\n");
goto out_error;
}
- /*
- * Since the first EQ can have multiple CQs associated with it,
- * this array is used to quickly see if we have a FCP fast-path
- * CQ match.
- */
- phba->sli4_hba.fcp_cq_map = kcalloc(phba->cfg_fcp_io_channel,
- sizeof(uint16_t),
- GFP_KERNEL);
- if (!phba->sli4_hba.fcp_cq_map) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2545 Failed allocate memory for "
- "fast-path CQ map\n");
- goto out_error;
+ /* Prepare hardware queues to take IO buffers */
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+ qp = &phba->sli4_hba.hdwq[idx];
+ spin_lock_init(&qp->io_buf_list_get_lock);
+ spin_lock_init(&qp->io_buf_list_put_lock);
+ INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
+ INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
+ qp->get_io_bufs = 0;
+ qp->put_io_bufs = 0;
+ qp->total_io_bufs = 0;
+ spin_lock_init(&qp->abts_scsi_buf_list_lock);
+ INIT_LIST_HEAD(&qp->lpfc_abts_scsi_buf_list);
+ qp->abts_scsi_io_bufs = 0;
+ spin_lock_init(&qp->abts_nvme_buf_list_lock);
+ INIT_LIST_HEAD(&qp->lpfc_abts_nvme_buf_list);
+ qp->abts_nvme_io_bufs = 0;
}
}
- if (phba->cfg_nvme_io_channel) {
- phba->sli4_hba.nvme_cq = kcalloc(phba->cfg_nvme_io_channel,
- sizeof(struct lpfc_queue *),
- GFP_KERNEL);
- if (!phba->sli4_hba.nvme_cq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "6077 Failed allocate memory for "
- "fast-path CQ record array\n");
- goto out_error;
- }
-
- phba->sli4_hba.nvme_wq = kcalloc(phba->cfg_nvme_io_channel,
- sizeof(struct lpfc_queue *),
- GFP_KERNEL);
- if (!phba->sli4_hba.nvme_wq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2581 Failed allocate memory for "
- "fast-path NVME WQ record array\n");
- goto out_error;
- }
-
- /*
- * Since the first EQ can have multiple CQs associated with it,
- * this array is used to quickly see if we have a NVME fast-path
- * CQ match.
- */
- phba->sli4_hba.nvme_cq_map = kcalloc(phba->cfg_nvme_io_channel,
- sizeof(uint16_t),
- GFP_KERNEL);
- if (!phba->sli4_hba.nvme_cq_map) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "6078 Failed allocate memory for "
- "fast-path CQ map\n");
- goto out_error;
- }
-
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
if (phba->nvmet_support) {
phba->sli4_hba.nvmet_cqset = kcalloc(
phba->cfg_nvmet_mrq,
@@ -8551,8 +8813,19 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
/* Create HBA Event Queues (EQs) */
- for (idx = 0; idx < io_channel; idx++) {
- /* Create EQs */
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+ /*
+ * If there are more Hardware Queues than available
+ * CQs, multiple Hardware Queues may share a common EQ.
+ */
+ if (idx >= phba->cfg_irq_chann) {
+ /* Share an existing EQ */
+ eqidx = lpfc_find_eq_handle(phba, idx);
+ phba->sli4_hba.hdwq[idx].hba_eq =
+ phba->sli4_hba.hdwq[eqidx].hba_eq;
+ continue;
+ }
+ /* Create an EQ */
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.eq_esize,
phba->sli4_hba.eq_ecount);
@@ -8562,33 +8835,51 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
goto out_error;
}
qdesc->qe_valid = 1;
- phba->sli4_hba.hba_eq[idx] = qdesc;
+ qdesc->hdwq = idx;
+
+ /* Save the CPU this EQ is affinitised to */
+ eqidx = lpfc_find_eq_handle(phba, idx);
+ qdesc->chann = lpfc_find_cpu_handle(phba, eqidx,
+ LPFC_FIND_BY_EQ);
+ phba->sli4_hba.hdwq[idx].hba_eq = qdesc;
+ qdesc->last_cpu = qdesc->chann;
+ eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
+ list_add(&qdesc->cpu_list, &eqi->list);
}
- /* FCP and NVME io channels are not required to be balanced */
- for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
+ /* Allocate SCSI SLI4 CQ/WQs */
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
if (lpfc_alloc_fcp_wq_cq(phba, idx))
goto out_error;
+ }
- for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++)
- if (lpfc_alloc_nvme_wq_cq(phba, idx))
- goto out_error;
+ /* Allocate NVME SLI4 CQ/WQs */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+ if (lpfc_alloc_nvme_wq_cq(phba, idx))
+ goto out_error;
+ }
- if (phba->nvmet_support) {
- for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
- qdesc = lpfc_sli4_queue_alloc(phba,
+ if (phba->nvmet_support) {
+ for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
+ qdesc = lpfc_sli4_queue_alloc(
+ phba,
LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
- if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3142 Failed allocate NVME "
- "CQ Set (%d)\n", idx);
- goto out_error;
+ if (!qdesc) {
+ lpfc_printf_log(
+ phba, KERN_ERR, LOG_INIT,
+ "3142 Failed allocate NVME "
+ "CQ Set (%d)\n", idx);
+ goto out_error;
+ }
+ qdesc->qe_valid = 1;
+ qdesc->hdwq = idx;
+ qdesc->chann = idx;
+ phba->sli4_hba.nvmet_cqset[idx] = qdesc;
}
- qdesc->qe_valid = 1;
- phba->sli4_hba.nvmet_cqset[idx] = qdesc;
}
}
@@ -8618,6 +8909,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
goto out_error;
}
qdesc->qe_valid = 1;
+ qdesc->chann = 0;
phba->sli4_hba.els_cq = qdesc;
@@ -8635,6 +8927,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
"0505 Failed allocate slow-path MQ\n");
goto out_error;
}
+ qdesc->chann = 0;
phba->sli4_hba.mbx_wq = qdesc;
/*
@@ -8650,6 +8943,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
"0504 Failed allocate slow-path ELS WQ\n");
goto out_error;
}
+ qdesc->chann = 0;
phba->sli4_hba.els_wq = qdesc;
list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
@@ -8663,6 +8957,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
"6079 Failed allocate NVME LS CQ\n");
goto out_error;
}
+ qdesc->chann = 0;
qdesc->qe_valid = 1;
phba->sli4_hba.nvmels_cq = qdesc;
@@ -8675,6 +8970,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
"6080 Failed allocate NVME LS WQ\n");
goto out_error;
}
+ qdesc->chann = 0;
phba->sli4_hba.nvmels_wq = qdesc;
list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
}
@@ -8705,7 +9001,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
}
phba->sli4_hba.dat_rq = qdesc;
- if (phba->nvmet_support) {
+ if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
+ phba->nvmet_support) {
for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
/* Create NVMET Receive Queue for header */
qdesc = lpfc_sli4_queue_alloc(phba,
@@ -8718,6 +9015,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
"receive HRQ\n");
goto out_error;
}
+ qdesc->hdwq = idx;
phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
/* Only needed for header of RQ pair */
@@ -8744,13 +9042,29 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
"receive DRQ\n");
goto out_error;
}
+ qdesc->hdwq = idx;
phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
}
}
- /* Create the Queues needed for Flash Optimized Fabric operations */
- if (phba->cfg_fof)
- lpfc_fof_queue_create(phba);
+#if defined(BUILD_NVME)
+ /* Clear NVME stats */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+ memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
+ sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
+ }
+ }
+#endif
+
+ /* Clear SCSI stats */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+ memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
+ sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
+ }
+ }
+
return 0;
out_error:
@@ -8783,11 +9097,25 @@ lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
}
static inline void
-lpfc_sli4_release_queue_map(uint16_t **qmap)
+lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
{
- if (*qmap != NULL) {
- kfree(*qmap);
- *qmap = NULL;
+ struct lpfc_sli4_hdw_queue *hdwq;
+ uint32_t idx;
+
+ hdwq = phba->sli4_hba.hdwq;
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+ if (idx < phba->cfg_irq_chann)
+ lpfc_sli4_queue_free(hdwq[idx].hba_eq);
+ hdwq[idx].hba_eq = NULL;
+
+ lpfc_sli4_queue_free(hdwq[idx].fcp_cq);
+ lpfc_sli4_queue_free(hdwq[idx].nvme_cq);
+ lpfc_sli4_queue_free(hdwq[idx].fcp_wq);
+ lpfc_sli4_queue_free(hdwq[idx].nvme_wq);
+ hdwq[idx].fcp_cq = NULL;
+ hdwq[idx].nvme_cq = NULL;
+ hdwq[idx].fcp_wq = NULL;
+ hdwq[idx].nvme_wq = NULL;
}
}
@@ -8806,33 +9134,9 @@ lpfc_sli4_release_queue_map(uint16_t **qmap)
void
lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
{
- if (phba->cfg_fof)
- lpfc_fof_queue_destroy(phba);
-
/* Release HBA eqs */
- lpfc_sli4_release_queues(&phba->sli4_hba.hba_eq, phba->io_channel_irqs);
-
- /* Release FCP cqs */
- lpfc_sli4_release_queues(&phba->sli4_hba.fcp_cq,
- phba->cfg_fcp_io_channel);
-
- /* Release FCP wqs */
- lpfc_sli4_release_queues(&phba->sli4_hba.fcp_wq,
- phba->cfg_fcp_io_channel);
-
- /* Release FCP CQ mapping array */
- lpfc_sli4_release_queue_map(&phba->sli4_hba.fcp_cq_map);
-
- /* Release NVME cqs */
- lpfc_sli4_release_queues(&phba->sli4_hba.nvme_cq,
- phba->cfg_nvme_io_channel);
-
- /* Release NVME wqs */
- lpfc_sli4_release_queues(&phba->sli4_hba.nvme_wq,
- phba->cfg_nvme_io_channel);
-
- /* Release NVME CQ mapping array */
- lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map);
+ if (phba->sli4_hba.hdwq)
+ lpfc_sli4_release_hdwq(phba);
if (phba->nvmet_support) {
lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
@@ -8913,10 +9217,9 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
qidx, (uint32_t)rc);
return rc;
}
- cq->chann = qidx;
if (qtype != LPFC_MBOX) {
- /* Setup nvme_cq_map for fast lookup */
+ /* Setup cq_map for fast lookup */
if (cq_map)
*cq_map = cq->queue_id;
@@ -8933,7 +9236,6 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
/* no need to tear down cq - caller will do so */
return rc;
}
- wq->chann = qidx;
/* Bind this CQ/WQ to the NVME ring */
pring = wq->pring;
@@ -8963,6 +9265,38 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
}
/**
+ * lpfc_setup_cq_lookup - Setup the CQ lookup table
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine will populate the cq_lookup table by all
+ * available CQ queue_id's.
+ **/
+void
+lpfc_setup_cq_lookup(struct lpfc_hba *phba)
+{
+ struct lpfc_queue *eq, *childq;
+ struct lpfc_sli4_hdw_queue *qp;
+ int qidx;
+
+ qp = phba->sli4_hba.hdwq;
+ memset(phba->sli4_hba.cq_lookup, 0,
+ (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
+ for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
+ eq = qp[qidx].hba_eq;
+ if (!eq)
+ continue;
+ list_for_each_entry(childq, &eq->child_list, list) {
+ if (childq->queue_id > phba->sli4_hba.cq_max)
+ continue;
+ if ((childq->subtype == LPFC_FCP) ||
+ (childq->subtype == LPFC_NVME))
+ phba->sli4_hba.cq_lookup[childq->queue_id] =
+ childq;
+ }
+ }
+}
+
+/**
* lpfc_sli4_queue_setup - Set up all the SLI4 queues
* @phba: pointer to lpfc hba data structure.
*
@@ -8979,9 +9313,10 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
{
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ struct lpfc_sli4_hdw_queue *qp;
LPFC_MBOXQ_t *mboxq;
int qidx;
- uint32_t length, io_channel;
+ uint32_t length, usdelay;
int rc = -ENOMEM;
/* Check for dual-ULP support */
@@ -9032,25 +9367,25 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
/*
* Set up HBA Event Queues (EQs)
*/
- io_channel = phba->io_channel_irqs;
+ qp = phba->sli4_hba.hdwq;
/* Set up HBA event queue */
- if (io_channel && !phba->sli4_hba.hba_eq) {
+ if (!qp) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3147 Fast-path EQs not allocated\n");
rc = -ENOMEM;
goto out_error;
}
- for (qidx = 0; qidx < io_channel; qidx++) {
- if (!phba->sli4_hba.hba_eq[qidx]) {
+ for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
+ if (!qp[qidx].hba_eq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0522 Fast-path EQ (%d) not "
"allocated\n", qidx);
rc = -ENOMEM;
goto out_destroy;
}
- rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[qidx],
- phba->cfg_fcp_imax);
+ rc = lpfc_eq_create(phba, qp[qidx].hba_eq,
+ phba->cfg_fcp_imax);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0523 Failed setup of fast-path EQ "
@@ -9059,26 +9394,17 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
goto out_destroy;
}
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2584 HBA EQ setup: queue[%d]-id=%d\n",
- qidx, phba->sli4_hba.hba_eq[qidx]->queue_id);
+ "2584 HBA EQ setup: queue[%d]-id=%d\n", qidx,
+ qp[qidx].hba_eq->queue_id);
}
- if (phba->cfg_nvme_io_channel) {
- if (!phba->sli4_hba.nvme_cq || !phba->sli4_hba.nvme_wq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "6084 Fast-path NVME %s array not allocated\n",
- (phba->sli4_hba.nvme_cq) ? "CQ" : "WQ");
- rc = -ENOMEM;
- goto out_destroy;
- }
-
- for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
rc = lpfc_create_wq_cq(phba,
- phba->sli4_hba.hba_eq[
- qidx % io_channel],
- phba->sli4_hba.nvme_cq[qidx],
- phba->sli4_hba.nvme_wq[qidx],
- &phba->sli4_hba.nvme_cq_map[qidx],
+ qp[qidx].hba_eq,
+ qp[qidx].nvme_cq,
+ qp[qidx].nvme_wq,
+ &phba->sli4_hba.hdwq[qidx].nvme_cq_map,
qidx, LPFC_NVME);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -9090,31 +9416,19 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
}
}
- if (phba->cfg_fcp_io_channel) {
- /* Set up fast-path FCP Response Complete Queue */
- if (!phba->sli4_hba.fcp_cq || !phba->sli4_hba.fcp_wq) {
+ for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+ rc = lpfc_create_wq_cq(phba,
+ qp[qidx].hba_eq,
+ qp[qidx].fcp_cq,
+ qp[qidx].fcp_wq,
+ &phba->sli4_hba.hdwq[qidx].fcp_cq_map,
+ qidx, LPFC_FCP);
+ if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3148 Fast-path FCP %s array not allocated\n",
- phba->sli4_hba.fcp_cq ? "WQ" : "CQ");
- rc = -ENOMEM;
- goto out_destroy;
- }
-
- for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) {
- rc = lpfc_create_wq_cq(phba,
- phba->sli4_hba.hba_eq[
- qidx % io_channel],
- phba->sli4_hba.fcp_cq[qidx],
- phba->sli4_hba.fcp_wq[qidx],
- &phba->sli4_hba.fcp_cq_map[qidx],
- qidx, LPFC_FCP);
- if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0535 Failed to setup fastpath "
"FCP WQ/CQ (%d), rc = 0x%x\n",
qidx, (uint32_t)rc);
- goto out_destroy;
- }
+ goto out_destroy;
}
}
@@ -9133,7 +9447,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
goto out_destroy;
}
- rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
+ rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
phba->sli4_hba.mbx_cq,
phba->sli4_hba.mbx_wq,
NULL, 0, LPFC_MBOX);
@@ -9154,7 +9468,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
if (phba->cfg_nvmet_mrq > 1) {
rc = lpfc_cq_create_set(phba,
phba->sli4_hba.nvmet_cqset,
- phba->sli4_hba.hba_eq,
+ qp,
LPFC_WCQ, LPFC_NVMET);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -9166,7 +9480,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
} else {
/* Set up NVMET Receive Complete Queue */
rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
- phba->sli4_hba.hba_eq[0],
+ qp[0].hba_eq,
LPFC_WCQ, LPFC_NVMET);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -9180,7 +9494,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
"6090 NVMET CQ setup: cq-id=%d, "
"parent eq-id=%d\n",
phba->sli4_hba.nvmet_cqset[0]->queue_id,
- phba->sli4_hba.hba_eq[0]->queue_id);
+ qp[0].hba_eq->queue_id);
}
}
@@ -9192,14 +9506,14 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
rc = -ENOMEM;
goto out_destroy;
}
- rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
- phba->sli4_hba.els_cq,
- phba->sli4_hba.els_wq,
- NULL, 0, LPFC_ELS);
+ rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
+ phba->sli4_hba.els_cq,
+ phba->sli4_hba.els_wq,
+ NULL, 0, LPFC_ELS);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0529 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
- (uint32_t)rc);
+ "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
+ (uint32_t)rc);
goto out_destroy;
}
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -9207,7 +9521,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.els_wq->queue_id,
phba->sli4_hba.els_cq->queue_id);
- if (phba->cfg_nvme_io_channel) {
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
/* Set up NVME LS Complete Queue */
if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -9216,14 +9530,14 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
rc = -ENOMEM;
goto out_destroy;
}
- rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
- phba->sli4_hba.nvmels_cq,
- phba->sli4_hba.nvmels_wq,
- NULL, 0, LPFC_NVME_LS);
+ rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
+ phba->sli4_hba.nvmels_cq,
+ phba->sli4_hba.nvmels_wq,
+ NULL, 0, LPFC_NVME_LS);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0529 Failed setup of NVVME LS WQ/CQ: "
- "rc = 0x%x\n", (uint32_t)rc);
+ "0526 Failed setup of NVVME LS WQ/CQ: "
+ "rc = 0x%x\n", (uint32_t)rc);
goto out_destroy;
}
@@ -9309,20 +9623,29 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.dat_rq->queue_id,
phba->sli4_hba.els_cq->queue_id);
- if (phba->cfg_fof) {
- rc = lpfc_fof_queue_setup(phba);
- if (rc) {
+ if (phba->cfg_fcp_imax)
+ usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
+ else
+ usdelay = 0;
+
+ for (qidx = 0; qidx < phba->cfg_irq_chann;
+ qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
+ lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
+ usdelay);
+
+ if (phba->sli4_hba.cq_max) {
+ kfree(phba->sli4_hba.cq_lookup);
+ phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
+ sizeof(struct lpfc_queue *), GFP_KERNEL);
+ if (!phba->sli4_hba.cq_lookup) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0549 Failed setup of FOF Queues: "
- "rc = 0x%x\n", rc);
+ "0549 Failed setup of CQ Lookup table: "
+ "size 0x%x\n", phba->sli4_hba.cq_max);
+ rc = -ENOMEM;
goto out_destroy;
}
+ lpfc_setup_cq_lookup(phba);
}
-
- for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
- lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
- phba->cfg_fcp_imax);
-
return 0;
out_destroy:
@@ -9346,12 +9669,9 @@ out_error:
void
lpfc_sli4_queue_unset(struct lpfc_hba *phba)
{
+ struct lpfc_sli4_hdw_queue *qp;
int qidx;
- /* Unset the queues created for Flash Optimized Fabric operations */
- if (phba->cfg_fof)
- lpfc_fof_queue_destroy(phba);
-
/* Unset mailbox command work queue */
if (phba->sli4_hba.mbx_wq)
lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
@@ -9369,17 +9689,6 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
phba->sli4_hba.dat_rq);
- /* Unset FCP work queue */
- if (phba->sli4_hba.fcp_wq)
- for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
- lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[qidx]);
-
- /* Unset NVME work queue */
- if (phba->sli4_hba.nvme_wq) {
- for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
- lpfc_wq_destroy(phba, phba->sli4_hba.nvme_wq[qidx]);
- }
-
/* Unset mailbox command complete queue */
if (phba->sli4_hba.mbx_cq)
lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
@@ -9392,11 +9701,6 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
if (phba->sli4_hba.nvmels_cq)
lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
- /* Unset NVME response complete queue */
- if (phba->sli4_hba.nvme_cq)
- for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
- lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]);
-
if (phba->nvmet_support) {
/* Unset NVMET MRQ queue */
if (phba->sli4_hba.nvmet_mrq_hdr) {
@@ -9415,15 +9719,22 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
}
}
- /* Unset FCP response complete queue */
- if (phba->sli4_hba.fcp_cq)
- for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
- lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[qidx]);
+ /* Unset fast-path SLI4 queues */
+ if (phba->sli4_hba.hdwq) {
+ for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+ qp = &phba->sli4_hba.hdwq[qidx];
+ lpfc_wq_destroy(phba, qp->fcp_wq);
+ lpfc_wq_destroy(phba, qp->nvme_wq);
+ lpfc_cq_destroy(phba, qp->fcp_cq);
+ lpfc_cq_destroy(phba, qp->nvme_cq);
+ if (qidx < phba->cfg_irq_chann)
+ lpfc_eq_destroy(phba, qp->hba_eq);
+ }
+ }
- /* Unset fast-path event queue */
- if (phba->sli4_hba.hba_eq)
- for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
- lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[qidx]);
+ kfree(phba->sli4_hba.cq_lookup);
+ phba->sli4_hba.cq_lookup = NULL;
+ phba->sli4_hba.cq_max = 0;
}
/**
@@ -9741,7 +10052,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
{
struct pci_dev *pdev = phba->pcidev;
unsigned long bar0map_len, bar1map_len, bar2map_len;
- int error = -ENODEV;
+ int error;
uint32_t if_type;
if (!pdev)
@@ -9760,7 +10071,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
*/
if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
&phba->sli4_hba.sli_intf.word0)) {
- return error;
+ return -ENODEV;
}
/* There is no SLI3 failback for SLI4 devices. */
@@ -9770,7 +10081,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
"2894 SLI_INTF reg contents invalid "
"sli_intf reg 0x%x\n",
phba->sli4_hba.sli_intf.word0);
- return error;
+ return -ENODEV;
}
if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
@@ -9794,7 +10105,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
dev_printk(KERN_ERR, &pdev->dev,
"ioremap failed for SLI4 PCI config "
"registers.\n");
- goto out;
+ return -ENODEV;
}
phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
/* Set up BAR0 PCI config space register memory map */
@@ -9805,7 +10116,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
dev_printk(KERN_ERR, &pdev->dev,
"FATAL - No BAR0 mapping for SLI4, if_type 2\n");
- goto out;
+ return -ENODEV;
}
phba->sli4_hba.conf_regs_memmap_p =
ioremap(phba->pci_bar0_map, bar0map_len);
@@ -9813,7 +10124,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
dev_printk(KERN_ERR, &pdev->dev,
"ioremap failed for SLI4 PCI config "
"registers.\n");
- goto out;
+ return -ENODEV;
}
lpfc_sli4_bar0_register_memmap(phba, if_type);
}
@@ -9859,6 +10170,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
if (!phba->sli4_hba.drbl_regs_memmap_p) {
dev_err(&pdev->dev,
"ioremap failed for SLI4 HBA doorbell registers.\n");
+ error = -ENOMEM;
goto out_iounmap_conf;
}
phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
@@ -9908,6 +10220,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
if (!phba->sli4_hba.dpp_regs_memmap_p) {
dev_err(&pdev->dev,
"ioremap failed for SLI4 HBA dpp registers.\n");
+ error = -ENOMEM;
goto out_iounmap_ctrl;
}
phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
@@ -9918,13 +10231,13 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
case LPFC_SLI_INTF_IF_TYPE_0:
case LPFC_SLI_INTF_IF_TYPE_2:
phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
- phba->sli4_hba.sli4_eq_release = lpfc_sli4_eq_release;
- phba->sli4_hba.sli4_cq_release = lpfc_sli4_cq_release;
+ phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
+ phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
break;
case LPFC_SLI_INTF_IF_TYPE_6:
phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
- phba->sli4_hba.sli4_eq_release = lpfc_sli4_if6_eq_release;
- phba->sli4_hba.sli4_cq_release = lpfc_sli4_if6_cq_release;
+ phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
+ phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
break;
default:
break;
@@ -9938,7 +10251,7 @@ out_iounmap_ctrl:
iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
out_iounmap_conf:
iounmap(phba->sli4_hba.conf_regs_memmap_p);
-out:
+
return error;
}
@@ -10205,25 +10518,97 @@ lpfc_sli_disable_intr(struct lpfc_hba *phba)
}
/**
+ * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified EQ
+ * @phba: pointer to lpfc hba data structure.
+ * @id: EQ vector index or Hardware Queue index
+ * @match: LPFC_FIND_BY_EQ = match by EQ
+ * LPFC_FIND_BY_HDWQ = match by Hardware Queue
+ */
+static uint16_t
+lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
+{
+ struct lpfc_vector_map_info *cpup;
+ int cpu;
+
+ /* Find the desired phys_id for the specified EQ */
+ for_each_present_cpu(cpu) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+ if ((match == LPFC_FIND_BY_EQ) &&
+ (cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
+ (cpup->eq == id))
+ return cpu;
+ if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
+ return cpu;
+ }
+ return 0;
+}
+
+/**
+ * lpfc_find_eq_handle - Find the EQ that corresponds to the specified
+ * Hardware Queue
+ * @phba: pointer to lpfc hba data structure.
+ * @hdwq: Hardware Queue index
+ */
+static uint16_t
+lpfc_find_eq_handle(struct lpfc_hba *phba, uint16_t hdwq)
+{
+ struct lpfc_vector_map_info *cpup;
+ int cpu;
+
+ /* Find the desired phys_id for the specified EQ */
+ for_each_present_cpu(cpu) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+ if (cpup->hdwq == hdwq)
+ return cpup->eq;
+ }
+ return 0;
+}
+
+#ifdef CONFIG_X86
+/**
+ * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
+ * @phba: pointer to lpfc hba data structure.
+ * @cpu: CPU map index
+ * @phys_id: CPU package physical id
+ * @core_id: CPU core id
+ */
+static int
+lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
+ uint16_t phys_id, uint16_t core_id)
+{
+ struct lpfc_vector_map_info *cpup;
+ int idx;
+
+ for_each_present_cpu(idx) {
+ cpup = &phba->sli4_hba.cpu_map[idx];
+ /* Does the cpup match the one we are looking for */
+ if ((cpup->phys_id == phys_id) &&
+ (cpup->core_id == core_id) &&
+ (cpu != idx))
+ return 1;
+ }
+ return 0;
+}
+#endif
+
+/**
* lpfc_cpu_affinity_check - Check vector CPU affinity mappings
* @phba: pointer to lpfc hba data structure.
* @vectors: number of msix vectors allocated.
*
* The routine will figure out the CPU affinity assignment for every
- * MSI-X vector allocated for the HBA. The hba_eq_hdl will be updated
- * with a pointer to the CPU mask that defines ALL the CPUs this vector
- * can be associated with. If the vector can be unquely associated with
- * a single CPU, that CPU will be recorded in hba_eq_hdl[index].cpu.
+ * MSI-X vector allocated for the HBA.
* In addition, the CPU to IO channel mapping will be calculated
* and the phba->sli4_hba.cpu_map array will reflect this.
*/
static void
lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
{
+ int i, cpu, idx;
+ int max_phys_id, min_phys_id;
+ int max_core_id, min_core_id;
struct lpfc_vector_map_info *cpup;
- int index = 0;
- int vec = 0;
- int cpu;
+ const struct cpumask *maskp;
#ifdef CONFIG_X86
struct cpuinfo_x86 *cpuinfo;
#endif
@@ -10231,32 +10616,71 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
/* Init cpu_map array */
memset(phba->sli4_hba.cpu_map, 0xff,
(sizeof(struct lpfc_vector_map_info) *
- phba->sli4_hba.num_present_cpu));
+ phba->sli4_hba.num_possible_cpu));
+
+ max_phys_id = 0;
+ min_phys_id = 0xffff;
+ max_core_id = 0;
+ min_core_id = 0xffff;
/* Update CPU map with physical id and core id of each CPU */
- cpup = phba->sli4_hba.cpu_map;
- for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
+ for_each_present_cpu(cpu) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
#ifdef CONFIG_X86
cpuinfo = &cpu_data(cpu);
cpup->phys_id = cpuinfo->phys_proc_id;
cpup->core_id = cpuinfo->cpu_core_id;
+ cpup->hyper = lpfc_find_hyper(phba, cpu,
+ cpup->phys_id, cpup->core_id);
#else
/* No distinction between CPUs for other platforms */
cpup->phys_id = 0;
- cpup->core_id = 0;
+ cpup->core_id = cpu;
+ cpup->hyper = 0;
#endif
- cpup->channel_id = index; /* For now round robin */
- cpup->irq = pci_irq_vector(phba->pcidev, vec);
- vec++;
- if (vec >= vectors)
- vec = 0;
- index++;
- if (index >= phba->cfg_fcp_io_channel)
- index = 0;
- cpup++;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3328 CPU physid %d coreid %d\n",
+ cpup->phys_id, cpup->core_id);
+
+ if (cpup->phys_id > max_phys_id)
+ max_phys_id = cpup->phys_id;
+ if (cpup->phys_id < min_phys_id)
+ min_phys_id = cpup->phys_id;
+
+ if (cpup->core_id > max_core_id)
+ max_core_id = cpup->core_id;
+ if (cpup->core_id < min_core_id)
+ min_core_id = cpup->core_id;
}
-}
+ for_each_possible_cpu(i) {
+ struct lpfc_eq_intr_info *eqi =
+ per_cpu_ptr(phba->sli4_hba.eq_info, i);
+
+ INIT_LIST_HEAD(&eqi->list);
+ eqi->icnt = 0;
+ }
+
+ for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
+ maskp = pci_irq_get_affinity(phba->pcidev, idx);
+ if (!maskp)
+ continue;
+
+ for_each_cpu_and(cpu, maskp, cpu_present_mask) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+ cpup->eq = idx;
+ cpup->hdwq = idx;
+ cpup->irq = pci_irq_vector(phba->pcidev, idx);
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3336 Set Affinity: CPU %d "
+ "hdwq %d irq %d\n",
+ cpu, cpup->hdwq, cpup->irq);
+ }
+ }
+ return;
+}
/**
* lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
@@ -10276,12 +10700,10 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
char *name;
/* Set up MSI-X multi-message vectors */
- vectors = phba->io_channel_irqs;
- if (phba->cfg_fof)
- vectors++;
+ vectors = phba->cfg_irq_chann;
rc = pci_alloc_irq_vectors(phba->pcidev,
- (phba->nvmet_support) ? 1 : 2,
+ 1,
vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
if (rc < 0) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -10299,17 +10721,10 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
phba->sli4_hba.hba_eq_hdl[index].idx = index;
phba->sli4_hba.hba_eq_hdl[index].phba = phba;
- atomic_set(&phba->sli4_hba.hba_eq_hdl[index].hba_eq_in_use, 1);
- if (phba->cfg_fof && (index == (vectors - 1)))
- rc = request_irq(pci_irq_vector(phba->pcidev, index),
- &lpfc_sli4_fof_intr_handler, 0,
- name,
- &phba->sli4_hba.hba_eq_hdl[index]);
- else
- rc = request_irq(pci_irq_vector(phba->pcidev, index),
- &lpfc_sli4_hba_intr_handler, 0,
- name,
- &phba->sli4_hba.hba_eq_hdl[index]);
+ rc = request_irq(pci_irq_vector(phba->pcidev, index),
+ &lpfc_sli4_hba_intr_handler, 0,
+ name,
+ &phba->sli4_hba.hba_eq_hdl[index]);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0486 MSI-X fast-path (%d) "
@@ -10318,24 +10733,16 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
}
}
- if (phba->cfg_fof)
- vectors--;
-
- if (vectors != phba->io_channel_irqs) {
+ if (vectors != phba->cfg_irq_chann) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3238 Reducing IO channels to match number of "
"MSI-X vectors, requested %d got %d\n",
- phba->io_channel_irqs, vectors);
- if (phba->cfg_fcp_io_channel > vectors)
- phba->cfg_fcp_io_channel = vectors;
- if (phba->cfg_nvme_io_channel > vectors)
- phba->cfg_nvme_io_channel = vectors;
- if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
- phba->io_channel_irqs = phba->cfg_fcp_io_channel;
- else
- phba->io_channel_irqs = phba->cfg_nvme_io_channel;
+ phba->cfg_irq_chann, vectors);
+ if (phba->cfg_irq_chann > vectors)
+ phba->cfg_irq_chann = vectors;
+ if (phba->cfg_nvmet_mrq > vectors)
+ phba->cfg_nvmet_mrq = vectors;
}
- lpfc_cpu_affinity_check(phba, vectors);
return rc;
@@ -10390,15 +10797,11 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
return rc;
}
- for (index = 0; index < phba->io_channel_irqs; index++) {
+ for (index = 0; index < phba->cfg_irq_chann; index++) {
phba->sli4_hba.hba_eq_hdl[index].idx = index;
phba->sli4_hba.hba_eq_hdl[index].phba = phba;
}
- if (phba->cfg_fof) {
- phba->sli4_hba.hba_eq_hdl[index].idx = index;
- phba->sli4_hba.hba_eq_hdl[index].phba = phba;
- }
return 0;
}
@@ -10459,17 +10862,10 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
phba->intr_type = INTx;
intr_mode = 0;
- for (idx = 0; idx < phba->io_channel_irqs; idx++) {
- eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
- eqhdl->idx = idx;
- eqhdl->phba = phba;
- atomic_set(&eqhdl->hba_eq_in_use, 1);
- }
- if (phba->cfg_fof) {
+ for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
eqhdl->idx = idx;
eqhdl->phba = phba;
- atomic_set(&eqhdl->hba_eq_in_use, 1);
}
}
}
@@ -10493,13 +10889,13 @@ lpfc_sli4_disable_intr(struct lpfc_hba *phba)
int index;
/* Free up MSI-X multi-message vectors */
- for (index = 0; index < phba->io_channel_irqs; index++)
- free_irq(pci_irq_vector(phba->pcidev, index),
- &phba->sli4_hba.hba_eq_hdl[index]);
-
- if (phba->cfg_fof)
+ for (index = 0; index < phba->cfg_irq_chann; index++) {
+ irq_set_affinity_hint(
+ pci_irq_vector(phba->pcidev, index),
+ NULL);
free_irq(pci_irq_vector(phba->pcidev, index),
&phba->sli4_hba.hba_eq_hdl[index]);
+ }
} else {
free_irq(phba->pcidev->irq, phba);
}
@@ -10560,8 +10956,10 @@ lpfc_unset_hba(struct lpfc_hba *phba)
static void
lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
{
+ struct lpfc_sli4_hdw_queue *qp;
+ int idx, ccnt, fcnt;
int wait_time = 0;
- int nvme_xri_cmpl = 1;
+ int io_xri_cmpl = 1;
int nvmet_xri_cmpl = 1;
int fcp_xri_cmpl = 1;
int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
@@ -10576,17 +10974,32 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
lpfc_nvme_wait_for_io_drain(phba);
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
- fcp_xri_cmpl =
- list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
+ ccnt = 0;
+ fcnt = 0;
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+ qp = &phba->sli4_hba.hdwq[idx];
+ fcp_xri_cmpl = list_empty(
+ &qp->lpfc_abts_scsi_buf_list);
+ if (!fcp_xri_cmpl) /* if list is NOT empty */
+ fcnt++;
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ io_xri_cmpl = list_empty(
+ &qp->lpfc_abts_nvme_buf_list);
+ if (!io_xri_cmpl) /* if list is NOT empty */
+ ccnt++;
+ }
+ }
+ if (ccnt)
+ io_xri_cmpl = 0;
+ if (fcnt)
+ fcp_xri_cmpl = 0;
+
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- nvme_xri_cmpl =
- list_empty(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
nvmet_xri_cmpl =
list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
}
- while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl ||
+ while (!fcp_xri_cmpl || !els_xri_cmpl || !io_xri_cmpl ||
!nvmet_xri_cmpl) {
if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
if (!nvmet_xri_cmpl)
@@ -10594,7 +11007,7 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
"6424 NVMET XRI exchange busy "
"wait time: %d seconds.\n",
wait_time/1000);
- if (!nvme_xri_cmpl)
+ if (!io_xri_cmpl)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6100 NVME XRI exchange busy "
"wait time: %d seconds.\n",
@@ -10615,17 +11028,31 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
}
+
+ ccnt = 0;
+ fcnt = 0;
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+ qp = &phba->sli4_hba.hdwq[idx];
+ fcp_xri_cmpl = list_empty(
+ &qp->lpfc_abts_scsi_buf_list);
+ if (!fcp_xri_cmpl) /* if list is NOT empty */
+ fcnt++;
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ io_xri_cmpl = list_empty(
+ &qp->lpfc_abts_nvme_buf_list);
+ if (!io_xri_cmpl) /* if list is NOT empty */
+ ccnt++;
+ }
+ }
+ if (ccnt)
+ io_xri_cmpl = 0;
+ if (fcnt)
+ fcp_xri_cmpl = 0;
+
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- nvme_xri_cmpl = list_empty(
- &phba->sli4_hba.lpfc_abts_nvme_buf_list);
nvmet_xri_cmpl = list_empty(
&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
}
-
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
- fcp_xri_cmpl = list_empty(
- &phba->sli4_hba.lpfc_abts_scsi_buf_list);
-
els_xri_cmpl =
list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
@@ -10650,7 +11077,8 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
struct pci_dev *pdev = phba->pcidev;
lpfc_stop_hba_timers(phba);
- phba->sli4_hba.intr_enable = 0;
+ if (phba->pport)
+ phba->sli4_hba.intr_enable = 0;
/*
* Gracefully wait out the potential current outstanding asynchronous
@@ -10711,7 +11139,8 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
lpfc_sli4_ras_dma_free(phba);
/* Stop the SLI4 device port */
- phba->pport->work_port_events = 0;
+ if (phba->pport)
+ phba->pport->work_port_events = 0;
}
/**
@@ -10869,8 +11298,6 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
phba->nvme_support = 0;
phba->nvmet_support = 0;
phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
- phba->cfg_nvme_io_channel = 0;
- phba->io_channel_irqs = phba->cfg_fcp_io_channel;
lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
"6101 Disabling NVME support: "
"Not supported by firmware: %d %d\n",
@@ -11195,6 +11622,8 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
* corresponding pools here.
*/
lpfc_scsi_free(phba);
+ lpfc_free_iocb_list(phba);
+
lpfc_mem_free_all(phba);
dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
@@ -11820,28 +12249,11 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Get the default values for Model Name and Description */
lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
- /* Create SCSI host to the physical port */
- error = lpfc_create_shost(phba);
- if (error) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "1415 Failed to create scsi host.\n");
- goto out_unset_driver_resource;
- }
-
- /* Configure sysfs attributes */
- vport = phba->pport;
- error = lpfc_alloc_sysfs_attr(vport);
- if (error) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "1416 Failed to allocate sysfs attr\n");
- goto out_destroy_shost;
- }
-
- shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
/* Now, trying to enable interrupt and bring up the device */
cfg_mode = phba->cfg_use_msi;
/* Put device to a known state before enabling interrupt */
+ phba->pport = NULL;
lpfc_stop_port(phba);
/* Configure and enable interrupt */
@@ -11850,18 +12262,34 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0426 Failed to enable interrupt.\n");
error = -ENODEV;
- goto out_free_sysfs_attr;
+ goto out_unset_driver_resource;
}
/* Default to single EQ for non-MSI-X */
if (phba->intr_type != MSIX) {
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
- phba->cfg_fcp_io_channel = 1;
+ phba->cfg_irq_chann = 1;
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- phba->cfg_nvme_io_channel = 1;
if (phba->nvmet_support)
phba->cfg_nvmet_mrq = 1;
}
- phba->io_channel_irqs = 1;
+ }
+ lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
+
+ /* Create SCSI host to the physical port */
+ error = lpfc_create_shost(phba);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1415 Failed to create scsi host.\n");
+ goto out_disable_intr;
+ }
+ vport = phba->pport;
+ shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
+
+ /* Configure sysfs attributes */
+ error = lpfc_alloc_sysfs_attr(vport);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1416 Failed to allocate sysfs attr\n");
+ goto out_destroy_shost;
}
/* Set up SLI-4 HBA */
@@ -11869,7 +12297,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1421 Failed to set up hba\n");
error = -ENODEV;
- goto out_disable_intr;
+ goto out_free_sysfs_attr;
}
/* Log the current active interrupt mode */
@@ -11882,19 +12310,20 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* NVME support in FW earlier in the driver load corrects the
* FC4 type making a check for nvme_support unnecessary.
*/
- if ((phba->nvmet_support == 0) &&
- (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
- /* Create NVME binding with nvme_fc_transport. This
- * ensures the vport is initialized. If the localport
- * create fails, it should not unload the driver to
- * support field issues.
- */
- error = lpfc_nvme_create_localport(vport);
- if (error) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "6004 NVME registration failed, "
- "error x%x\n",
- error);
+ if (phba->nvmet_support == 0) {
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ /* Create NVME binding with nvme_fc_transport. This
+ * ensures the vport is initialized. If the localport
+ * create fails, it should not unload the driver to
+ * support field issues.
+ */
+ error = lpfc_nvme_create_localport(vport);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6004 NVME registration "
+ "failed, error x%x\n",
+ error);
+ }
}
}
@@ -11910,12 +12339,12 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
return 0;
-out_disable_intr:
- lpfc_sli4_disable_intr(phba);
out_free_sysfs_attr:
lpfc_free_sysfs_attr(vport);
out_destroy_shost:
lpfc_destroy_shost(phba);
+out_disable_intr:
+ lpfc_sli4_disable_intr(phba);
out_unset_driver_resource:
lpfc_unset_driver_resource_phase2(phba);
out_unset_driver_resource_s4:
@@ -11978,13 +12407,16 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
lpfc_nvmet_destroy_targetport(phba);
lpfc_nvme_destroy_localport(vport);
+ /* De-allocate multi-XRI pools */
+ if (phba->cfg_xri_rebalancing)
+ lpfc_destroy_multixri_pools(phba);
+
/*
* Bring down the SLI Layer. This step disables all interrupts,
* clears the rings, discards all mailbox commands, and resets
* the HBA FCoE function.
*/
lpfc_debugfs_terminate(vport);
- lpfc_sli4_hba_unset(phba);
lpfc_stop_hba_timers(phba);
spin_lock_irq(&phba->port_list_lock);
@@ -11994,9 +12426,9 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
/* Perform scsi free before driver resource_unset since scsi
* buffers are released to their corresponding pools here.
*/
- lpfc_scsi_free(phba);
- lpfc_nvme_free(phba);
+ lpfc_io_free(phba);
lpfc_free_iocb_list(phba);
+ lpfc_sli4_hba_unset(phba);
lpfc_unset_driver_resource_phase2(phba);
lpfc_sli4_driver_resource_unset(phba);
@@ -12658,165 +13090,6 @@ lpfc_sli4_ras_init(struct lpfc_hba *phba)
}
}
-/**
- * lpfc_fof_queue_setup - Set up all the fof queues
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine is invoked to set up all the fof queues for the FC HBA
- * operation.
- *
- * Return codes
- * 0 - successful
- * -ENOMEM - No available memory
- **/
-int
-lpfc_fof_queue_setup(struct lpfc_hba *phba)
-{
- struct lpfc_sli_ring *pring;
- int rc;
-
- rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX);
- if (rc)
- return -ENOMEM;
-
- if (phba->cfg_fof) {
-
- rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq,
- phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP);
- if (rc)
- goto out_oas_cq;
-
- rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq,
- phba->sli4_hba.oas_cq, LPFC_FCP);
- if (rc)
- goto out_oas_wq;
-
- /* Bind this CQ/WQ to the NVME ring */
- pring = phba->sli4_hba.oas_wq->pring;
- pring->sli.sli4.wqp =
- (void *)phba->sli4_hba.oas_wq;
- phba->sli4_hba.oas_cq->pring = pring;
- }
-
- return 0;
-
-out_oas_wq:
- lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
-out_oas_cq:
- lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq);
- return rc;
-
-}
-
-/**
- * lpfc_fof_queue_create - Create all the fof queues
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine is invoked to allocate all the fof queues for the FC HBA
- * operation. For each SLI4 queue type, the parameters such as queue entry
- * count (queue depth) shall be taken from the module parameter. For now,
- * we just use some constant number as place holder.
- *
- * Return codes
- * 0 - successful
- * -ENOMEM - No availble memory
- * -EIO - The mailbox failed to complete successfully.
- **/
-int
-lpfc_fof_queue_create(struct lpfc_hba *phba)
-{
- struct lpfc_queue *qdesc;
- uint32_t wqesize;
-
- /* Create FOF EQ */
- qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
- phba->sli4_hba.eq_esize,
- phba->sli4_hba.eq_ecount);
- if (!qdesc)
- goto out_error;
-
- qdesc->qe_valid = 1;
- phba->sli4_hba.fof_eq = qdesc;
-
- if (phba->cfg_fof) {
-
- /* Create OAS CQ */
- if (phba->enab_exp_wqcq_pages)
- qdesc = lpfc_sli4_queue_alloc(phba,
- LPFC_EXPANDED_PAGE_SIZE,
- phba->sli4_hba.cq_esize,
- LPFC_CQE_EXP_COUNT);
- else
- qdesc = lpfc_sli4_queue_alloc(phba,
- LPFC_DEFAULT_PAGE_SIZE,
- phba->sli4_hba.cq_esize,
- phba->sli4_hba.cq_ecount);
- if (!qdesc)
- goto out_error;
-
- qdesc->qe_valid = 1;
- phba->sli4_hba.oas_cq = qdesc;
-
- /* Create OAS WQ */
- if (phba->enab_exp_wqcq_pages) {
- wqesize = (phba->fcp_embed_io) ?
- LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
- qdesc = lpfc_sli4_queue_alloc(phba,
- LPFC_EXPANDED_PAGE_SIZE,
- wqesize,
- LPFC_WQE_EXP_COUNT);
- } else
- qdesc = lpfc_sli4_queue_alloc(phba,
- LPFC_DEFAULT_PAGE_SIZE,
- phba->sli4_hba.wq_esize,
- phba->sli4_hba.wq_ecount);
-
- if (!qdesc)
- goto out_error;
-
- phba->sli4_hba.oas_wq = qdesc;
- list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
-
- }
- return 0;
-
-out_error:
- lpfc_fof_queue_destroy(phba);
- return -ENOMEM;
-}
-
-/**
- * lpfc_fof_queue_destroy - Destroy all the fof queues
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine is invoked to release all the SLI4 queues with the FC HBA
- * operation.
- *
- * Return codes
- * 0 - successful
- **/
-int
-lpfc_fof_queue_destroy(struct lpfc_hba *phba)
-{
- /* Release FOF Event queue */
- if (phba->sli4_hba.fof_eq != NULL) {
- lpfc_sli4_queue_free(phba->sli4_hba.fof_eq);
- phba->sli4_hba.fof_eq = NULL;
- }
-
- /* Release OAS Completion queue */
- if (phba->sli4_hba.oas_cq != NULL) {
- lpfc_sli4_queue_free(phba->sli4_hba.oas_cq);
- phba->sli4_hba.oas_cq = NULL;
- }
-
- /* Release OAS Work queue */
- if (phba->sli4_hba.oas_wq != NULL) {
- lpfc_sli4_queue_free(phba->sli4_hba.oas_wq);
- phba->sli4_hba.oas_wq = NULL;
- }
- return 0;
-}
MODULE_DEVICE_TABLE(pci, lpfc_id_table);
@@ -12888,7 +13161,6 @@ lpfc_init(void)
lpfc_nvmet_cmd_template();
/* Initialize in case vector mapping is needed */
- lpfc_used_cpu = NULL;
lpfc_present_cpu = num_present_cpus();
error = pci_register_driver(&lpfc_driver);
@@ -12927,7 +13199,6 @@ lpfc_exit(void)
(1L << _dump_buf_dif_order), _dump_buf_dif);
free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
}
- kfree(lpfc_used_cpu);
idr_destroy(&lpfc_hba_index);
}
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 4d3b94317515..8abe933bad09 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2095,8 +2095,8 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
if (phba->nvmet_support) {
bf_set(lpfc_mbx_rq_ftr_rq_mrqp, &mboxq->u.mqe.un.req_ftrs, 1);
/* iaab/iaar NOT set for now */
- bf_set(lpfc_mbx_rq_ftr_rq_iaab, &mboxq->u.mqe.un.req_ftrs, 0);
- bf_set(lpfc_mbx_rq_ftr_rq_iaar, &mboxq->u.mqe.un.req_ftrs, 0);
+ bf_set(lpfc_mbx_rq_ftr_rq_iaab, &mboxq->u.mqe.un.req_ftrs, 0);
+ bf_set(lpfc_mbx_rq_ftr_rq_iaar, &mboxq->u.mqe.un.req_ftrs, 0);
}
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 96bc3789a166..6172682a24ba 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -825,7 +825,7 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
"rport rolechg: role:x%x did:x%x flg:x%x",
roles, ndlp->nlp_DID, ndlp->nlp_flag);
- if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
+ if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
fc_remote_port_rolechg(rport, roles);
}
}
@@ -1789,8 +1789,8 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
* is configured try it.
*/
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
- if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
- (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
+ if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+ (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
/* We need to update the localport also */
lpfc_nvme_update_localport(vport);
@@ -1804,7 +1804,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
* should just issue PRLI for FCP. Otherwise issue
* GFT_ID to determine if remote port supports NVME.
*/
- if (phba->cfg_enable_fc4_type != LPFC_ENABLE_FCP) {
+ if (vport->cfg_enable_fc4_type != LPFC_ENABLE_FCP) {
rc = lpfc_ns_cmd(vport, SLI_CTNS_GFT_ID,
0, ndlp->nlp_DID);
return ndlp->nlp_state;
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 8c9f79042228..1aa00d2c3f74 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -56,12 +56,12 @@
/* NVME initiator-based functions */
-static struct lpfc_nvme_buf *
+static struct lpfc_io_buf *
lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
- int expedite);
+ int idx, int expedite);
static void
-lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_nvme_buf *);
+lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *);
static struct nvme_fc_port_template lpfc_nvme_template;
@@ -239,7 +239,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
if (qidx) {
str = "IO "; /* IO queue */
qhandle->index = ((qidx - 1) %
- vport->phba->cfg_nvme_io_channel);
+ lpfc_nvme_template.max_hw_queues);
} else {
str = "ADM"; /* Admin queue */
qhandle->index = qidx;
@@ -247,7 +247,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
"6073 Binding %s HdwQueue %d (cpu %d) to "
- "io_channel %d qhandle %p\n", str,
+ "hdw_queue %d qhandle %p\n", str,
qidx, qhandle->cpu_id, qhandle->index, qhandle);
*handle = (void *)qhandle;
return 0;
@@ -529,7 +529,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
lpfc_nvmeio_data(phba, "NVME LS XMIT: xri x%x iotag x%x to x%06x\n",
genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);
- rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, genwqe);
+ rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe);
if (rc) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"6045 Issue GEN REQ WQE to NPORT x%x "
@@ -761,7 +761,7 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
/* Fix up the existing sgls for NVME IO. */
static inline void
lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
- struct lpfc_nvme_buf *lpfc_ncmd,
+ struct lpfc_io_buf *lpfc_ncmd,
struct nvmefc_fcp_req *nCmd)
{
struct lpfc_hba *phba = vport->phba;
@@ -784,7 +784,7 @@ lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
* rather than the virtual memory to ease the restore
* operation.
*/
- sgl = lpfc_ncmd->nvme_sgl;
+ sgl = lpfc_ncmd->dma_sgl;
sgl->sge_len = cpu_to_le32(nCmd->cmdlen);
if (phba->cfg_nvme_embed_cmd) {
sgl->addr_hi = 0;
@@ -858,7 +858,7 @@ lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
static void
lpfc_nvme_ktime(struct lpfc_hba *phba,
- struct lpfc_nvme_buf *lpfc_ncmd)
+ struct lpfc_io_buf *lpfc_ncmd)
{
uint64_t seg1, seg2, seg3, seg4;
uint64_t segsum;
@@ -956,57 +956,54 @@ static void
lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
struct lpfc_wcqe_complete *wcqe)
{
- struct lpfc_nvme_buf *lpfc_ncmd =
- (struct lpfc_nvme_buf *)pwqeIn->context1;
+ struct lpfc_io_buf *lpfc_ncmd =
+ (struct lpfc_io_buf *)pwqeIn->context1;
struct lpfc_vport *vport = pwqeIn->vport;
struct nvmefc_fcp_req *nCmd;
struct nvme_fc_ersp_iu *ep;
struct nvme_fc_cmd_iu *cp;
- struct lpfc_nvme_rport *rport;
struct lpfc_nodelist *ndlp;
struct lpfc_nvme_fcpreq_priv *freqpriv;
struct lpfc_nvme_lport *lport;
- struct lpfc_nvme_ctrl_stat *cstat;
- unsigned long flags;
uint32_t code, status, idx;
uint16_t cid, sqhd, data;
uint32_t *ptr;
/* Sanity check on return of outstanding command */
- if (!lpfc_ncmd || !lpfc_ncmd->nvmeCmd || !lpfc_ncmd->nrport) {
- if (!lpfc_ncmd) {
- lpfc_printf_vlog(vport, KERN_ERR,
- LOG_NODE | LOG_NVME_IOERR,
- "6071 Null lpfc_ncmd pointer. No "
- "release, skip completion\n");
- return;
- }
+ if (!lpfc_ncmd) {
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_NODE | LOG_NVME_IOERR,
+ "6071 Null lpfc_ncmd pointer. No "
+ "release, skip completion\n");
+ return;
+ }
+
+ /* Guard against abort handler being called at same time */
+ spin_lock(&lpfc_ncmd->buf_lock);
+ if (!lpfc_ncmd->nvmeCmd) {
+ spin_unlock(&lpfc_ncmd->buf_lock);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
"6066 Missing cmpl ptrs: lpfc_ncmd %p, "
- "nvmeCmd %p nrport %p\n",
- lpfc_ncmd, lpfc_ncmd->nvmeCmd,
- lpfc_ncmd->nrport);
+ "nvmeCmd %p\n",
+ lpfc_ncmd, lpfc_ncmd->nvmeCmd);
/* Release the lpfc_ncmd regardless of the missing elements. */
lpfc_release_nvme_buf(phba, lpfc_ncmd);
return;
}
nCmd = lpfc_ncmd->nvmeCmd;
- rport = lpfc_ncmd->nrport;
status = bf_get(lpfc_wcqe_c_status, wcqe);
+ idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
+ phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++;
+
if (vport->localport) {
lport = (struct lpfc_nvme_lport *)vport->localport->private;
- if (lport) {
- idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
- cstat = &lport->cstat[idx];
- atomic_inc(&cstat->fc4NvmeIoCmpls);
- if (status) {
- if (bf_get(lpfc_wcqe_c_xb, wcqe))
- atomic_inc(&lport->cmpl_fcp_xb);
- atomic_inc(&lport->cmpl_fcp_err);
- }
+ if (lport && status) {
+ if (bf_get(lpfc_wcqe_c_xb, wcqe))
+ atomic_inc(&lport->cmpl_fcp_xb);
+ atomic_inc(&lport->cmpl_fcp_err);
}
}
@@ -1017,18 +1014,11 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
* Catch race where our node has transitioned, but the
* transport is still transitioning.
*/
- ndlp = rport->ndlp;
+ ndlp = lpfc_ncmd->ndlp;
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
- "6061 rport %p, DID x%06x node not ready.\n",
- rport, rport->remoteport->port_id);
-
- ndlp = lpfc_findnode_did(vport, rport->remoteport->port_id);
- if (!ndlp) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
- "6062 Ignoring NVME cmpl. No ndlp\n");
- goto out_err;
- }
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
+ "6062 Ignoring NVME cmpl. No ndlp\n");
+ goto out_err;
}
code = bf_get(lpfc_wcqe_c_code, wcqe);
@@ -1148,13 +1138,18 @@ out_err:
lpfc_nvme_ktime(phba, lpfc_ncmd);
}
if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
- if (lpfc_ncmd->cpu != smp_processor_id())
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
- "6701 CPU Check cmpl: "
- "cpu %d expect %d\n",
- smp_processor_id(), lpfc_ncmd->cpu);
- if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT)
- phba->cpucheck_cmpl_io[lpfc_ncmd->cpu]++;
+ uint32_t cpu;
+ idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
+ cpu = smp_processor_id();
+ if (cpu < LPFC_CHECK_CPU_CNT) {
+ if (lpfc_ncmd->cpu != cpu)
+ lpfc_printf_vlog(vport,
+ KERN_INFO, LOG_NVME_IOERR,
+ "6701 CPU Check cmpl: "
+ "cpu %d expect %d\n",
+ cpu, lpfc_ncmd->cpu);
+ phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
+ }
}
#endif
@@ -1165,13 +1160,11 @@ out_err:
if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
freqpriv = nCmd->private;
freqpriv->nvme_buf = NULL;
- nCmd->done(nCmd);
lpfc_ncmd->nvmeCmd = NULL;
- }
-
- spin_lock_irqsave(&phba->hbalock, flags);
- lpfc_ncmd->nrport = NULL;
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock(&lpfc_ncmd->buf_lock);
+ nCmd->done(nCmd);
+ } else
+ spin_unlock(&lpfc_ncmd->buf_lock);
/* Call release with XB=1 to queue the IO into the abort list. */
lpfc_release_nvme_buf(phba, lpfc_ncmd);
@@ -1196,9 +1189,9 @@ out_err:
**/
static int
lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
- struct lpfc_nvme_buf *lpfc_ncmd,
+ struct lpfc_io_buf *lpfc_ncmd,
struct lpfc_nodelist *pnode,
- struct lpfc_nvme_ctrl_stat *cstat)
+ struct lpfc_fc4_ctrl_stat *cstat)
{
struct lpfc_hba *phba = vport->phba;
struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
@@ -1206,7 +1199,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
union lpfc_wqe128 *wqe = &pwqeq->wqe;
uint32_t req_len;
- if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+ if (!NLP_CHK_NODE_ACT(pnode))
return -EINVAL;
/*
@@ -1236,7 +1229,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
} else {
wqe->fcp_iwrite.initial_xfer_len = 0;
}
- atomic_inc(&cstat->fc4NvmeOutputRequests);
+ cstat->output_requests++;
} else {
/* From the iread template, initialize words 7 - 11 */
memcpy(&wqe->words[7],
@@ -1249,13 +1242,13 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
/* Word 5 */
wqe->fcp_iread.rsrvd5 = 0;
- atomic_inc(&cstat->fc4NvmeInputRequests);
+ cstat->input_requests++;
}
} else {
/* From the icmnd template, initialize words 4 - 11 */
memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
sizeof(uint32_t) * 8);
- atomic_inc(&cstat->fc4NvmeControlRequests);
+ cstat->control_requests++;
}
/*
* Finish initializing those WQE fields that are independent
@@ -1302,12 +1295,12 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
**/
static int
lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
- struct lpfc_nvme_buf *lpfc_ncmd)
+ struct lpfc_io_buf *lpfc_ncmd)
{
struct lpfc_hba *phba = vport->phba;
struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe;
- struct sli4_sge *sgl = lpfc_ncmd->nvme_sgl;
+ struct sli4_sge *sgl = lpfc_ncmd->dma_sgl;
struct scatterlist *data_sg;
struct sli4_sge *first_data_sgl;
struct ulp_bde64 *bde;
@@ -1396,6 +1389,8 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
}
} else {
+ lpfc_ncmd->seg_cnt = 0;
+
/* For this clause to be valid, the payload_length
* and sg_cnt must zero.
*/
@@ -1435,13 +1430,13 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
{
int ret = 0;
int expedite = 0;
- int idx;
+ int idx, cpu;
struct lpfc_nvme_lport *lport;
- struct lpfc_nvme_ctrl_stat *cstat;
+ struct lpfc_fc4_ctrl_stat *cstat;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
struct lpfc_nodelist *ndlp;
- struct lpfc_nvme_buf *lpfc_ncmd;
+ struct lpfc_io_buf *lpfc_ncmd;
struct lpfc_nvme_rport *rport;
struct lpfc_nvme_qhandle *lpfc_queue_info;
struct lpfc_nvme_fcpreq_priv *freqpriv;
@@ -1559,7 +1554,15 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
}
}
- lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, expedite);
+ /* Lookup Hardware Queue index based on fcp_io_sched module parameter */
+ if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
+ idx = lpfc_queue_info->index;
+ } else {
+ cpu = smp_processor_id();
+ idx = phba->sli4_hba.cpu_map[cpu].hdwq;
+ }
+
+ lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, idx, expedite);
if (lpfc_ncmd == NULL) {
atomic_inc(&lport->xmt_fcp_noxri);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
@@ -1586,9 +1589,8 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
*/
freqpriv->nvme_buf = lpfc_ncmd;
lpfc_ncmd->nvmeCmd = pnvme_fcreq;
- lpfc_ncmd->nrport = rport;
lpfc_ncmd->ndlp = ndlp;
- lpfc_ncmd->start_time = jiffies;
+ lpfc_ncmd->qidx = lpfc_queue_info->qidx;
/*
* Issue the IO on the WQ indicated by index in the hw_queue_handle.
@@ -1598,9 +1600,8 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
* index to use and that they have affinitized a CPU to this hardware
* queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ.
*/
- idx = lpfc_queue_info->index;
lpfc_ncmd->cur_iocbq.hba_wqidx = idx;
- cstat = &lport->cstat[idx];
+ cstat = &phba->sli4_hba.hdwq[idx].nvme_cstat;
lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat);
ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
@@ -1618,7 +1619,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
lpfc_ncmd->cur_iocbq.sli4_xritag,
lpfc_queue_info->index, ndlp->nlp_DID);
- ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq);
+ ret = lpfc_sli4_issue_wqe(phba, lpfc_ncmd->hdwq, &lpfc_ncmd->cur_iocbq);
if (ret) {
atomic_inc(&lport->xmt_fcp_wqerr);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
@@ -1629,26 +1630,26 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
goto out_free_nvme_buf;
}
+ if (phba->cfg_xri_rebalancing)
+ lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_ncmd->hdwq_no);
+
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (lpfc_ncmd->ts_cmd_start)
lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
- lpfc_ncmd->cpu = smp_processor_id();
- if (lpfc_ncmd->cpu != lpfc_queue_info->index) {
- /* Check for admin queue */
- if (lpfc_queue_info->qidx) {
+ cpu = smp_processor_id();
+ if (cpu < LPFC_CHECK_CPU_CNT) {
+ lpfc_ncmd->cpu = cpu;
+ if (idx != cpu)
lpfc_printf_vlog(vport,
- KERN_ERR, LOG_NVME_IOERR,
+ KERN_INFO, LOG_NVME_IOERR,
"6702 CPU Check cmd: "
"cpu %d wq %d\n",
lpfc_ncmd->cpu,
lpfc_queue_info->index);
- }
- lpfc_ncmd->cpu = lpfc_queue_info->index;
+ phba->sli4_hba.hdwq[idx].cpucheck_xmt_io[cpu]++;
}
- if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT)
- phba->cpucheck_xmt_io[lpfc_ncmd->cpu]++;
}
#endif
return 0;
@@ -1656,11 +1657,11 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
out_free_nvme_buf:
if (lpfc_ncmd->nvmeCmd->sg_cnt) {
if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
- atomic_dec(&cstat->fc4NvmeOutputRequests);
+ cstat->output_requests--;
else
- atomic_dec(&cstat->fc4NvmeInputRequests);
+ cstat->input_requests--;
} else
- atomic_dec(&cstat->fc4NvmeControlRequests);
+ cstat->control_requests--;
lpfc_release_nvme_buf(phba, lpfc_ncmd);
out_fail:
return ret;
@@ -1720,7 +1721,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_lport *lport;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
- struct lpfc_nvme_buf *lpfc_nbuf;
+ struct lpfc_io_buf *lpfc_nbuf;
struct lpfc_iocbq *abts_buf;
struct lpfc_iocbq *nvmereq_wqe;
struct lpfc_nvme_fcpreq_priv *freqpriv;
@@ -1788,6 +1789,9 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
}
nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
+ /* Guard against IO completion being called at same time */
+ spin_lock(&lpfc_nbuf->buf_lock);
+
/*
* The lpfc_nbuf and the mapped nvme_fcreq in the driver's
* state must match the nvme_fcreq passed by the nvme
@@ -1796,24 +1800,22 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
* has not seen it yet.
*/
if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
"6143 NVME req mismatch: "
"lpfc_nbuf %p nvmeCmd %p, "
"pnvme_fcreq %p. Skipping Abort xri x%x\n",
lpfc_nbuf, lpfc_nbuf->nvmeCmd,
pnvme_fcreq, nvmereq_wqe->sli4_xritag);
- return;
+ goto out_unlock;
}
/* Don't abort IOs no longer on the pending queue. */
if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
"6142 NVME IO req %p not queued - skipping "
"abort req xri x%x\n",
pnvme_fcreq, nvmereq_wqe->sli4_xritag);
- return;
+ goto out_unlock;
}
atomic_inc(&lport->xmt_fcp_abort);
@@ -1823,24 +1825,22 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
/* Outstanding abort is in progress */
if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
"6144 Outstanding NVME I/O Abort Request "
"still pending on nvme_fcreq %p, "
"lpfc_ncmd %p xri x%x\n",
pnvme_fcreq, lpfc_nbuf,
nvmereq_wqe->sli4_xritag);
- return;
+ goto out_unlock;
}
abts_buf = __lpfc_sli_get_iocbq(phba);
if (!abts_buf) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
"6136 No available abort wqes. Skipping "
"Abts req for nvme_fcreq %p xri x%x\n",
pnvme_fcreq, nvmereq_wqe->sli4_xritag);
- return;
+ goto out_unlock;
}
/* Ready - mark outstanding as aborted by driver. */
@@ -1883,7 +1883,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
abts_buf->hba_wqidx = nvmereq_wqe->hba_wqidx;
abts_buf->vport = vport;
abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
- ret_val = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_buf);
+ ret_val = lpfc_sli4_issue_wqe(phba, lpfc_nbuf->hdwq, abts_buf);
+ spin_unlock(&lpfc_nbuf->buf_lock);
spin_unlock_irqrestore(&phba->hbalock, flags);
if (ret_val) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
@@ -1899,6 +1900,12 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
"ox_id x%x on reqtag x%x\n",
nvmereq_wqe->sli4_xritag,
abts_buf->iotag);
+ return;
+
+out_unlock:
+ spin_unlock(&lpfc_nbuf->buf_lock);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return;
}
/* Declare and initialization an instance of the FC NVME template. */
@@ -1928,454 +1935,63 @@ static struct nvme_fc_port_template lpfc_nvme_template = {
};
/**
- * lpfc_sli4_post_nvme_sgl_block - post a block of nvme sgl list to firmware
- * @phba: pointer to lpfc hba data structure.
- * @nblist: pointer to nvme buffer list.
- * @count: number of scsi buffers on the list.
- *
- * This routine is invoked to post a block of @count scsi sgl pages from a
- * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
- * No Lock is held.
- *
- **/
-static int
-lpfc_sli4_post_nvme_sgl_block(struct lpfc_hba *phba,
- struct list_head *nblist,
- int count)
-{
- struct lpfc_nvme_buf *lpfc_ncmd;
- struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
- struct sgl_page_pairs *sgl_pg_pairs;
- void *viraddr;
- LPFC_MBOXQ_t *mbox;
- uint32_t reqlen, alloclen, pg_pairs;
- uint32_t mbox_tmo;
- uint16_t xritag_start = 0;
- int rc = 0;
- uint32_t shdr_status, shdr_add_status;
- dma_addr_t pdma_phys_bpl1;
- union lpfc_sli4_cfg_shdr *shdr;
-
- /* Calculate the requested length of the dma memory */
- reqlen = count * sizeof(struct sgl_page_pairs) +
- sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
- if (reqlen > SLI4_PAGE_SIZE) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
- "6118 Block sgl registration required DMA "
- "size (%d) great than a page\n", reqlen);
- return -ENOMEM;
- }
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "6119 Failed to allocate mbox cmd memory\n");
- return -ENOMEM;
- }
-
- /* Allocate DMA memory and set up the non-embedded mailbox command */
- alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
- LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
- LPFC_SLI4_MBX_NEMBED);
-
- if (alloclen < reqlen) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "6120 Allocated DMA memory size (%d) is "
- "less than the requested DMA memory "
- "size (%d)\n", alloclen, reqlen);
- lpfc_sli4_mbox_cmd_free(phba, mbox);
- return -ENOMEM;
- }
-
- /* Get the first SGE entry from the non-embedded DMA memory */
- viraddr = mbox->sge_array->addr[0];
-
- /* Set up the SGL pages in the non-embedded DMA pages */
- sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
- sgl_pg_pairs = &sgl->sgl_pg_pairs;
-
- pg_pairs = 0;
- list_for_each_entry(lpfc_ncmd, nblist, list) {
- /* Set up the sge entry */
- sgl_pg_pairs->sgl_pg0_addr_lo =
- cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
- sgl_pg_pairs->sgl_pg0_addr_hi =
- cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
- if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
- pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
- SGL_PAGE_SIZE;
- else
- pdma_phys_bpl1 = 0;
- sgl_pg_pairs->sgl_pg1_addr_lo =
- cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
- sgl_pg_pairs->sgl_pg1_addr_hi =
- cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
- /* Keep the first xritag on the list */
- if (pg_pairs == 0)
- xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
- sgl_pg_pairs++;
- pg_pairs++;
- }
- bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
- bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
- /* Perform endian conversion if necessary */
- sgl->word0 = cpu_to_le32(sgl->word0);
-
- if (!phba->sli4_hba.intr_enable)
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
- else {
- mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
- rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
- }
- shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
- shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
- shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
- if (rc != MBX_TIMEOUT)
- lpfc_sli4_mbox_cmd_free(phba, mbox);
- if (shdr_status || shdr_add_status || rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "6125 POST_SGL_BLOCK mailbox command failed "
- "status x%x add_status x%x mbx status x%x\n",
- shdr_status, shdr_add_status, rc);
- rc = -ENXIO;
- }
- return rc;
-}
-
-/**
- * lpfc_post_nvme_sgl_list - Post blocks of nvme buffer sgls from a list
- * @phba: pointer to lpfc hba data structure.
- * @post_nblist: pointer to the nvme buffer list.
- *
- * This routine walks a list of nvme buffers that was passed in. It attempts
- * to construct blocks of nvme buffer sgls which contains contiguous xris and
- * uses the non-embedded SGL block post mailbox commands to post to the port.
- * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
- * embedded SGL post mailbox command for posting. The @post_nblist passed in
- * must be local list, thus no lock is needed when manipulate the list.
- *
- * Returns: 0 = failure, non-zero number of successfully posted buffers.
- **/
-static int
-lpfc_post_nvme_sgl_list(struct lpfc_hba *phba,
- struct list_head *post_nblist, int sb_count)
-{
- struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
- int status, sgl_size;
- int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
- dma_addr_t pdma_phys_sgl1;
- int last_xritag = NO_XRI;
- int cur_xritag;
- LIST_HEAD(prep_nblist);
- LIST_HEAD(blck_nblist);
- LIST_HEAD(nvme_nblist);
-
- /* sanity check */
- if (sb_count <= 0)
- return -EINVAL;
-
- sgl_size = phba->cfg_sg_dma_buf_size;
-
- list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
- list_del_init(&lpfc_ncmd->list);
- block_cnt++;
- if ((last_xritag != NO_XRI) &&
- (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
- /* a hole in xri block, form a sgl posting block */
- list_splice_init(&prep_nblist, &blck_nblist);
- post_cnt = block_cnt - 1;
- /* prepare list for next posting block */
- list_add_tail(&lpfc_ncmd->list, &prep_nblist);
- block_cnt = 1;
- } else {
- /* prepare list for next posting block */
- list_add_tail(&lpfc_ncmd->list, &prep_nblist);
- /* enough sgls for non-embed sgl mbox command */
- if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
- list_splice_init(&prep_nblist, &blck_nblist);
- post_cnt = block_cnt;
- block_cnt = 0;
- }
- }
- num_posting++;
- last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
-
- /* end of repost sgl list condition for NVME buffers */
- if (num_posting == sb_count) {
- if (post_cnt == 0) {
- /* last sgl posting block */
- list_splice_init(&prep_nblist, &blck_nblist);
- post_cnt = block_cnt;
- } else if (block_cnt == 1) {
- /* last single sgl with non-contiguous xri */
- if (sgl_size > SGL_PAGE_SIZE)
- pdma_phys_sgl1 =
- lpfc_ncmd->dma_phys_sgl +
- SGL_PAGE_SIZE;
- else
- pdma_phys_sgl1 = 0;
- cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
- status = lpfc_sli4_post_sgl(phba,
- lpfc_ncmd->dma_phys_sgl,
- pdma_phys_sgl1, cur_xritag);
- if (status) {
- /* failure, put on abort nvme list */
- lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
- } else {
- /* success, put on NVME buffer list */
- lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
- lpfc_ncmd->status = IOSTAT_SUCCESS;
- num_posted++;
- }
- /* success, put on NVME buffer sgl list */
- list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
- }
- }
-
- /* continue until a nembed page worth of sgls */
- if (post_cnt == 0)
- continue;
-
- /* post block of NVME buffer list sgls */
- status = lpfc_sli4_post_nvme_sgl_block(phba, &blck_nblist,
- post_cnt);
-
- /* don't reset xirtag due to hole in xri block */
- if (block_cnt == 0)
- last_xritag = NO_XRI;
-
- /* reset NVME buffer post count for next round of posting */
- post_cnt = 0;
-
- /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
- while (!list_empty(&blck_nblist)) {
- list_remove_head(&blck_nblist, lpfc_ncmd,
- struct lpfc_nvme_buf, list);
- if (status) {
- /* failure, put on abort nvme list */
- lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
- } else {
- /* success, put on NVME buffer list */
- lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
- lpfc_ncmd->status = IOSTAT_SUCCESS;
- num_posted++;
- }
- list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
- }
- }
- /* Push NVME buffers with sgl posted to the available list */
- while (!list_empty(&nvme_nblist)) {
- list_remove_head(&nvme_nblist, lpfc_ncmd,
- struct lpfc_nvme_buf, list);
- lpfc_release_nvme_buf(phba, lpfc_ncmd);
- }
- return num_posted;
-}
-
-/**
- * lpfc_repost_nvme_sgl_list - Repost all the allocated nvme buffer sgls
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine walks the list of nvme buffers that have been allocated and
- * repost them to the port by using SGL block post. This is needed after a
- * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
- * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
- * to the lpfc_nvme_buf_list. If the repost fails, reject all nvme buffers.
- *
- * Returns: 0 = success, non-zero failure.
- **/
-int
-lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba)
-{
- LIST_HEAD(post_nblist);
- int num_posted, rc = 0;
-
- /* get all NVME buffers need to repost to a local list */
- spin_lock_irq(&phba->nvme_buf_list_get_lock);
- spin_lock(&phba->nvme_buf_list_put_lock);
- list_splice_init(&phba->lpfc_nvme_buf_list_get, &post_nblist);
- list_splice(&phba->lpfc_nvme_buf_list_put, &post_nblist);
- phba->get_nvme_bufs = 0;
- phba->put_nvme_bufs = 0;
- spin_unlock(&phba->nvme_buf_list_put_lock);
- spin_unlock_irq(&phba->nvme_buf_list_get_lock);
-
- /* post the list of nvme buffer sgls to port if available */
- if (!list_empty(&post_nblist)) {
- num_posted = lpfc_post_nvme_sgl_list(phba, &post_nblist,
- phba->sli4_hba.nvme_xri_cnt);
- /* failed to post any nvme buffer, return error */
- if (num_posted == 0)
- rc = -EIO;
- }
- return rc;
-}
-
-/**
- * lpfc_new_nvme_buf - Scsi buffer allocator for HBA with SLI4 IF spec
- * @vport: The virtual port for which this call being executed.
- * @num_to_allocate: The requested number of buffers to allocate.
+ * lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA
+ * @phba: The HBA for which this call is being executed.
*
- * This routine allocates nvme buffers for device with SLI-4 interface spec,
- * the nvme buffer contains all the necessary information needed to initiate
- * a NVME I/O. After allocating up to @num_to_allocate NVME buffers and put
- * them on a list, it post them to the port by using SGL block post.
+ * This routine removes a nvme buffer from head of @hdwq io_buf_list
+ * and returns to caller.
*
* Return codes:
- * int - number of nvme buffers that were allocated and posted.
- * 0 = failure, less than num_to_alloc is a partial failure.
+ * NULL - Error
+ * Pointer to lpfc_nvme_buf - Success
**/
-static int
-lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
+static struct lpfc_io_buf *
+lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+ int idx, int expedite)
{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_nvme_buf *lpfc_ncmd;
+ struct lpfc_io_buf *lpfc_ncmd;
+ struct lpfc_sli4_hdw_queue *qp;
+ struct sli4_sge *sgl;
struct lpfc_iocbq *pwqeq;
union lpfc_wqe128 *wqe;
- struct sli4_sge *sgl;
- dma_addr_t pdma_phys_sgl;
- uint16_t iotag, lxri = 0;
- int bcnt, num_posted;
- LIST_HEAD(prep_nblist);
- LIST_HEAD(post_nblist);
- LIST_HEAD(nvme_nblist);
-
- for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
- lpfc_ncmd = kzalloc(sizeof(struct lpfc_nvme_buf), GFP_KERNEL);
- if (!lpfc_ncmd)
- break;
- /*
- * Get memory from the pci pool to map the virt space to
- * pci bus space for an I/O. The DMA buffer includes the
- * number of SGE's necessary to support the sg_tablesize.
- */
- lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
- GFP_KERNEL,
- &lpfc_ncmd->dma_handle);
- if (!lpfc_ncmd->data) {
- kfree(lpfc_ncmd);
- break;
- }
- lxri = lpfc_sli4_next_xritag(phba);
- if (lxri == NO_XRI) {
- dma_pool_free(phba->lpfc_sg_dma_buf_pool,
- lpfc_ncmd->data, lpfc_ncmd->dma_handle);
- kfree(lpfc_ncmd);
- break;
- }
+ lpfc_ncmd = lpfc_get_io_buf(phba, NULL, idx, expedite);
+
+ if (lpfc_ncmd) {
pwqeq = &(lpfc_ncmd->cur_iocbq);
wqe = &pwqeq->wqe;
- /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
- iotag = lpfc_sli_next_iotag(phba, pwqeq);
- if (iotag == 0) {
- dma_pool_free(phba->lpfc_sg_dma_buf_pool,
- lpfc_ncmd->data, lpfc_ncmd->dma_handle);
- kfree(lpfc_ncmd);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6121 Failed to allocated IOTAG for"
- " XRI:0x%x\n", lxri);
- lpfc_sli4_free_xri(phba, lxri);
- break;
- }
- pwqeq->sli4_lxritag = lxri;
- pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
- pwqeq->iocb_flag |= LPFC_IO_NVME;
- pwqeq->context1 = lpfc_ncmd;
+ /* Setup key fields in buffer that may have been changed
+ * if other protocols used this buffer.
+ */
+ pwqeq->iocb_flag = LPFC_IO_NVME;
pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
-
- /* Initialize local short-hand pointers. */
- lpfc_ncmd->nvme_sgl = lpfc_ncmd->data;
- sgl = lpfc_ncmd->nvme_sgl;
- pdma_phys_sgl = lpfc_ncmd->dma_handle;
- lpfc_ncmd->dma_phys_sgl = pdma_phys_sgl;
+ lpfc_ncmd->start_time = jiffies;
+ lpfc_ncmd->flags = 0;
/* Rsp SGE will be filled in when we rcv an IO
* from the NVME Layer to be sent.
* The cmd is going to be embedded so we need a SKIP SGE.
*/
+ sgl = lpfc_ncmd->dma_sgl;
bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
bf_set(lpfc_sli4_sge_last, sgl, 0);
sgl->word2 = cpu_to_le32(sgl->word2);
/* Fill in word 3 / sgl_len during cmd submission */
- lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
-
/* Initialize WQE */
memset(wqe, 0, sizeof(union lpfc_wqe));
- /* add the nvme buffer to a post list */
- list_add_tail(&lpfc_ncmd->list, &post_nblist);
- spin_lock_irq(&phba->nvme_buf_list_get_lock);
- phba->sli4_hba.nvme_xri_cnt++;
- spin_unlock_irq(&phba->nvme_buf_list_get_lock);
- }
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
- "6114 Allocate %d out of %d requested new NVME "
- "buffers\n", bcnt, num_to_alloc);
-
- /* post the list of nvme buffer sgls to port if available */
- if (!list_empty(&post_nblist))
- num_posted = lpfc_post_nvme_sgl_list(phba,
- &post_nblist, bcnt);
- else
- num_posted = 0;
-
- return num_posted;
-}
-
-static inline struct lpfc_nvme_buf *
-lpfc_nvme_buf(struct lpfc_hba *phba)
-{
- struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
-
- list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
- &phba->lpfc_nvme_buf_list_get, list) {
- list_del_init(&lpfc_ncmd->list);
- phba->get_nvme_bufs--;
- return lpfc_ncmd;
- }
- return NULL;
-}
-
-/**
- * lpfc_get_nvme_buf - Get a nvme buffer from lpfc_nvme_buf_list of the HBA
- * @phba: The HBA for which this call is being executed.
- *
- * This routine removes a nvme buffer from head of @phba lpfc_nvme_buf_list list
- * and returns to caller.
- *
- * Return codes:
- * NULL - Error
- * Pointer to lpfc_nvme_buf - Success
- **/
-static struct lpfc_nvme_buf *
-lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
- int expedite)
-{
- struct lpfc_nvme_buf *lpfc_ncmd = NULL;
- unsigned long iflag = 0;
+ if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
+ atomic_inc(&ndlp->cmd_pending);
+ lpfc_ncmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
+ }
- spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag);
- if (phba->get_nvme_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
- lpfc_ncmd = lpfc_nvme_buf(phba);
- if (!lpfc_ncmd) {
- spin_lock(&phba->nvme_buf_list_put_lock);
- list_splice(&phba->lpfc_nvme_buf_list_put,
- &phba->lpfc_nvme_buf_list_get);
- phba->get_nvme_bufs += phba->put_nvme_bufs;
- INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
- phba->put_nvme_bufs = 0;
- spin_unlock(&phba->nvme_buf_list_put_lock);
- if (phba->get_nvme_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
- lpfc_ncmd = lpfc_nvme_buf(phba);
+ } else {
+ qp = &phba->sli4_hba.hdwq[idx];
+ qp->empty_io_bufs++;
}
- spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag);
- if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_ncmd) {
- atomic_inc(&ndlp->cmd_pending);
- lpfc_ncmd->flags |= LPFC_BUMP_QDEPTH;
- }
return lpfc_ncmd;
}
@@ -2385,22 +2001,23 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
* @lpfc_ncmd: The nvme buffer which is being released.
*
* This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba
- * lpfc_nvme_buf_list list. For SLI4 XRI's are tied to the nvme buffer
+ * lpfc_io_buf_list list. For SLI4 XRI's are tied to the nvme buffer
* and cannot be reused for at least RA_TOV amount of time if it was
* aborted.
**/
static void
-lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
+lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
{
+ struct lpfc_sli4_hdw_queue *qp;
unsigned long iflag = 0;
- if ((lpfc_ncmd->flags & LPFC_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
+ if ((lpfc_ncmd->flags & LPFC_SBUF_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
atomic_dec(&lpfc_ncmd->ndlp->cmd_pending);
- lpfc_ncmd->nonsg_phys = 0;
lpfc_ncmd->ndlp = NULL;
- lpfc_ncmd->flags &= ~LPFC_BUMP_QDEPTH;
+ lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
+ qp = lpfc_ncmd->hdwq;
if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6310 XB release deferred for "
@@ -2408,20 +2025,13 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
lpfc_ncmd->cur_iocbq.sli4_xritag,
lpfc_ncmd->cur_iocbq.iotag);
- spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock,
- iflag);
+ spin_lock_irqsave(&qp->abts_nvme_buf_list_lock, iflag);
list_add_tail(&lpfc_ncmd->list,
- &phba->sli4_hba.lpfc_abts_nvme_buf_list);
- spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
- iflag);
- } else {
- lpfc_ncmd->nvmeCmd = NULL;
- lpfc_ncmd->cur_iocbq.iocb_flag = LPFC_IO_NVME;
- spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
- list_add_tail(&lpfc_ncmd->list, &phba->lpfc_nvme_buf_list_put);
- phba->put_nvme_bufs++;
- spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
- }
+ &qp->lpfc_abts_nvme_buf_list);
+ qp->abts_nvme_io_bufs++;
+ spin_unlock_irqrestore(&qp->abts_nvme_buf_list_lock, iflag);
+ } else
+ lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp);
}
/**
@@ -2448,8 +2058,6 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
struct nvme_fc_port_info nfcp_info;
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
- struct lpfc_nvme_ctrl_stat *cstat;
- int len, i;
/* Initialize this localport instance. The vport wwn usage ensures
* that NPIV is accounted for.
@@ -2464,12 +2072,13 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
* allocate + 3, one for cmd, one for rsp and one for this alignment
*/
lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
- lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel;
- cstat = kmalloc((sizeof(struct lpfc_nvme_ctrl_stat) *
- phba->cfg_nvme_io_channel), GFP_KERNEL);
- if (!cstat)
- return -ENOMEM;
+ /* Advertise how many hw queues we support based on fcp_io_sched */
+ if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ)
+ lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
+ else
+ lpfc_nvme_template.max_hw_queues =
+ phba->sli4_hba.num_present_cpu;
/* localport is allocated from the stack, but the registration
* call allocates heap memory as well as the private area.
@@ -2493,7 +2102,6 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
lport = (struct lpfc_nvme_lport *)localport->private;
vport->localport = localport;
lport->vport = vport;
- lport->cstat = cstat;
vport->nvmei_support = 1;
atomic_set(&lport->xmt_fcp_noxri, 0);
@@ -2510,25 +2118,6 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
atomic_set(&lport->cmpl_ls_err, 0);
atomic_set(&lport->fc4NvmeLsRequests, 0);
atomic_set(&lport->fc4NvmeLsCmpls, 0);
-
- for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
- cstat = &lport->cstat[i];
- atomic_set(&cstat->fc4NvmeInputRequests, 0);
- atomic_set(&cstat->fc4NvmeOutputRequests, 0);
- atomic_set(&cstat->fc4NvmeControlRequests, 0);
- atomic_set(&cstat->fc4NvmeIoCmpls, 0);
- }
-
- /* Don't post more new bufs if repost already recovered
- * the nvme sgls.
- */
- if (phba->sli4_hba.nvme_xri_cnt == 0) {
- len = lpfc_new_nvme_buf(vport,
- phba->sli4_hba.nvme_xri_max);
- vport->phba->total_nvme_bufs += len;
- }
- } else {
- kfree(cstat);
}
return ret;
@@ -2591,7 +2180,6 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
#if (IS_ENABLED(CONFIG_NVME_FC))
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
- struct lpfc_nvme_ctrl_stat *cstat;
int ret;
DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp);
@@ -2600,7 +2188,6 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
localport = vport->localport;
lport = (struct lpfc_nvme_lport *)localport->private;
- cstat = lport->cstat;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
"6011 Destroying NVME localport %p\n",
@@ -2617,7 +2204,6 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
*/
lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp);
vport->localport = NULL;
- kfree(cstat);
/* Regardless of the unregister upcall response, clear
* nvmei_support. All rports are unregistered and the
@@ -2909,27 +2495,28 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
**/
void
lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
- struct sli4_wcqe_xri_aborted *axri)
+ struct sli4_wcqe_xri_aborted *axri, int idx)
{
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
- struct lpfc_nvme_buf *lpfc_ncmd, *next_lpfc_ncmd;
+ struct lpfc_io_buf *lpfc_ncmd, *next_lpfc_ncmd;
struct nvmefc_fcp_req *nvme_cmd = NULL;
struct lpfc_nodelist *ndlp;
+ struct lpfc_sli4_hdw_queue *qp;
unsigned long iflag = 0;
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
return;
+ qp = &phba->sli4_hba.hdwq[idx];
spin_lock_irqsave(&phba->hbalock, iflag);
- spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ spin_lock(&qp->abts_nvme_buf_list_lock);
list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd,
- &phba->sli4_hba.lpfc_abts_nvme_buf_list,
- list) {
+ &qp->lpfc_abts_nvme_buf_list, list) {
if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) {
list_del_init(&lpfc_ncmd->list);
+ qp->abts_nvme_io_bufs--;
lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
lpfc_ncmd->status = IOSTAT_SUCCESS;
- spin_unlock(
- &phba->sli4_hba.abts_nvme_buf_list_lock);
+ spin_unlock(&qp->abts_nvme_buf_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
ndlp = lpfc_ncmd->ndlp;
@@ -2955,7 +2542,7 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
return;
}
}
- spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ spin_unlock(&qp->abts_nvme_buf_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
@@ -2979,14 +2566,16 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
struct lpfc_sli_ring *pring;
u32 i, wait_cnt = 0;
- if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.nvme_wq)
+ if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq)
return;
/* Cycle through all NVME rings and make sure all outstanding
* WQEs have been removed from the txcmplqs.
*/
- for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
- pring = phba->sli4_hba.nvme_wq[i]->pring;
+ for (i = 0; i < phba->cfg_hdw_queue; i++) {
+ if (!phba->sli4_hba.hdwq[i].nvme_wq)
+ continue;
+ pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
if (!pring)
continue;
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index b234d0298994..593c48ff634e 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -30,6 +30,9 @@
#define LPFC_NVME_FB_SHIFT 9
#define LPFC_NVME_MAX_FB (1 << 20) /* 1M */
+#define LPFC_MAX_NVME_INFO_TMP_LEN 100
+#define LPFC_NVME_INFO_MORE_STR "\nCould be more info...\n"
+
#define lpfc_ndlp_get_nrport(ndlp) \
((!ndlp->nrport || (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG)) \
? NULL : ndlp->nrport)
@@ -40,19 +43,11 @@ struct lpfc_nvme_qhandle {
uint32_t cpu_id; /* current cpu id at time of create */
};
-struct lpfc_nvme_ctrl_stat {
- atomic_t fc4NvmeInputRequests;
- atomic_t fc4NvmeOutputRequests;
- atomic_t fc4NvmeControlRequests;
- atomic_t fc4NvmeIoCmpls;
-};
-
/* Declare nvme-based local and remote port definitions. */
struct lpfc_nvme_lport {
struct lpfc_vport *vport;
struct completion *lport_unreg_cmp;
/* Add stats counters here */
- struct lpfc_nvme_ctrl_stat *cstat;
atomic_t fc4NvmeLsRequests;
atomic_t fc4NvmeLsCmpls;
atomic_t xmt_fcp_noxri;
@@ -76,57 +71,6 @@ struct lpfc_nvme_rport {
struct completion rport_unreg_done;
};
-struct lpfc_nvme_buf {
- struct list_head list;
- struct nvmefc_fcp_req *nvmeCmd;
- struct lpfc_nvme_rport *nrport;
- struct lpfc_nodelist *ndlp;
-
- uint32_t timeout;
-
- uint16_t flags; /* TBD convert exch_busy to flags */
-#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */
-#define LPFC_BUMP_QDEPTH 0x2 /* bumped queue depth counter */
- uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
- uint16_t status; /* From IOCB Word 7- ulpStatus */
- uint16_t cpu;
- uint16_t qidx;
- uint16_t sqid;
- uint32_t result; /* From IOCB Word 4. */
-
- uint32_t seg_cnt; /* Number of scatter-gather segments returned by
- * dma_map_sg. The driver needs this for calls
- * to dma_unmap_sg.
- */
- dma_addr_t nonsg_phys; /* Non scatter-gather physical address. */
-
- /*
- * data and dma_handle are the kernel virtual and bus address of the
- * dma-able buffer containing the fcp_cmd, fcp_rsp and a scatter
- * gather bde list that supports the sg_tablesize value.
- */
- void *data;
- dma_addr_t dma_handle;
-
- struct sli4_sge *nvme_sgl;
- dma_addr_t dma_phys_sgl;
-
- /* cur_iocbq has phys of the dma-able buffer.
- * Iotag is in here
- */
- struct lpfc_iocbq cur_iocbq;
-
- wait_queue_head_t *waitq;
- unsigned long start_time;
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- uint64_t ts_cmd_start;
- uint64_t ts_last_cmd;
- uint64_t ts_cmd_wqput;
- uint64_t ts_isr_cmpl;
- uint64_t ts_data_nvme;
-#endif
-};
-
struct lpfc_nvme_fcpreq_priv {
- struct lpfc_nvme_buf *nvme_buf;
+ struct lpfc_io_buf *nvme_buf;
};
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 95fee83090eb..361e2b103648 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channsel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -73,6 +73,9 @@ static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
uint32_t, uint16_t);
static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_nvmet_rcv_ctx *);
+static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *);
+
+static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf);
static union lpfc_wqe128 lpfc_tsend_cmd_template;
static union lpfc_wqe128 lpfc_treceive_cmd_template;
@@ -220,21 +223,19 @@ lpfc_nvmet_cmd_template(void)
void
lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
{
- unsigned long iflag;
+ lockdep_assert_held(&ctxp->ctxlock);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6313 NVMET Defer ctx release xri x%x flg x%x\n",
ctxp->oxid, ctxp->flag);
- spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
- if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
- spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
- iflag);
+ if (ctxp->flag & LPFC_NVMET_CTX_RLS)
return;
- }
+
ctxp->flag |= LPFC_NVMET_CTX_RLS;
+ spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
- spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
+ spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
}
/**
@@ -325,7 +326,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
struct rqb_dmabuf *nvmebuf;
struct lpfc_nvmet_ctx_info *infop;
uint32_t *payload;
- uint32_t size, oxid, sid, rc;
+ uint32_t size, oxid, sid;
int cpu;
unsigned long iflag;
@@ -341,6 +342,20 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
"6411 NVMET free, already free IO x%x: %d %d\n",
ctxp->oxid, ctxp->state, ctxp->entry_cnt);
}
+
+ if (ctxp->rqb_buffer) {
+ nvmebuf = ctxp->rqb_buffer;
+ spin_lock_irqsave(&ctxp->ctxlock, iflag);
+ ctxp->rqb_buffer = NULL;
+ if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
+ ctxp->flag &= ~LPFC_NVMET_CTX_REUSE_WQ;
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+ nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
+ } else {
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+ lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
+ }
+ }
ctxp->state = LPFC_NVMET_STE_FREE;
spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
@@ -388,46 +403,30 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
}
#endif
atomic_inc(&tgtp->rcv_fcp_cmd_in);
- /*
- * The calling sequence should be:
- * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
- * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
- * When we return from nvmet_fc_rcv_fcp_req, all relevant info
- * the NVME command / FC header is stored.
- * A buffer has already been reposted for this IO, so just free
- * the nvmebuf.
- */
- rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
- payload, size);
- /* Process FCP command */
- if (rc == 0) {
- ctxp->rqb_buffer = NULL;
- atomic_inc(&tgtp->rcv_fcp_cmd_out);
- nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
- return;
- }
+ /* flag new work queued, replacement buffer has already
+ * been reposted
+ */
+ spin_lock_irqsave(&ctxp->ctxlock, iflag);
+ ctxp->flag |= LPFC_NVMET_CTX_REUSE_WQ;
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
- /* Processing of FCP command is deferred */
- if (rc == -EOVERFLOW) {
- lpfc_nvmeio_data(phba,
- "NVMET RCV BUSY: xri x%x sz %d "
- "from %06x\n",
- oxid, size, sid);
- atomic_inc(&tgtp->rcv_fcp_cmd_out);
- return;
+ if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
+ atomic_inc(&tgtp->rcv_fcp_cmd_drop);
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+ "6181 Unable to queue deferred work "
+ "for oxid x%x. "
+ "FCP Drop IO [x%x x%x x%x]\n",
+ ctxp->oxid,
+ atomic_read(&tgtp->rcv_fcp_cmd_in),
+ atomic_read(&tgtp->rcv_fcp_cmd_out),
+ atomic_read(&tgtp->xmt_fcp_release));
+
+ spin_lock_irqsave(&ctxp->ctxlock, iflag);
+ lpfc_nvmet_defer_release(phba, ctxp);
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+ lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
}
- atomic_inc(&tgtp->rcv_fcp_cmd_drop);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
- ctxp->oxid, rc,
- atomic_read(&tgtp->rcv_fcp_cmd_in),
- atomic_read(&tgtp->rcv_fcp_cmd_out),
- atomic_read(&tgtp->xmt_fcp_release));
-
- lpfc_nvmet_defer_release(phba, ctxp);
- lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
- nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
return;
}
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
@@ -744,16 +743,6 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
ktime_get_ns();
}
}
- if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
- id = smp_processor_id();
- if (ctxp->cpu != id)
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6703 CPU Check cmpl: "
- "cpu %d expect %d\n",
- id, ctxp->cpu);
- if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
- phba->cpucheck_cmpl_io[id]++;
- }
#endif
rsp->done(rsp);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -771,19 +760,22 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
ctxp->ts_isr_data = cmdwqe->isr_timestamp;
ctxp->ts_data_nvme = ktime_get_ns();
}
- if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
- id = smp_processor_id();
+#endif
+ rsp->done(rsp);
+ }
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
+ id = smp_processor_id();
+ if (id < LPFC_CHECK_CPU_CNT) {
if (ctxp->cpu != id)
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
"6704 CPU Check cmdcmpl: "
"cpu %d expect %d\n",
id, ctxp->cpu);
- if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
- phba->cpucheck_ccmpl_io[id]++;
+ phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_cmpl_io[id]++;
}
-#endif
- rsp->done(rsp);
}
+#endif
}
static int
@@ -852,7 +844,7 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
lpfc_nvmeio_data(phba, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n",
ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
- rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq);
+ rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
if (rc == WQE_SUCCESS) {
/*
* Okay to repost buffer here, but wait till cmpl
@@ -908,18 +900,22 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
else
ctxp->ts_nvme_data = ktime_get_ns();
}
+
+ /* Setup the hdw queue if not already set */
+ if (!ctxp->hdwq)
+ ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
+
if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
int id = smp_processor_id();
- ctxp->cpu = id;
- if (id < LPFC_CHECK_CPU_CNT)
- phba->cpucheck_xmt_io[id]++;
- if (rsp->hwqid != id) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6705 CPU Check OP: "
- "cpu %d expect %d\n",
- id, rsp->hwqid);
- ctxp->cpu = rsp->hwqid;
+ if (id < LPFC_CHECK_CPU_CNT) {
+ if (rsp->hwqid != id)
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
+ "6705 CPU Check OP: "
+ "cpu %d expect %d\n",
+ id, rsp->hwqid);
+ phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_xmt_io[id]++;
}
+ ctxp->cpu = id; /* Setup cpu for cmpl check */
}
#endif
@@ -954,7 +950,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
ctxp->oxid, rsp->op, rsp->rsplen);
ctxp->flag |= LPFC_NVMET_IO_INP;
- rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
+ rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
if (rc == WQE_SUCCESS) {
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (!ctxp->ts_cmd_nvme)
@@ -973,7 +969,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
* WQE release CQE
*/
ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
- wq = phba->sli4_hba.nvme_wq[rsp->hwqid];
+ wq = ctxp->hdwq->nvme_wq;
pring = wq->pring;
spin_lock_irqsave(&pring->ring_lock, iflags);
list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
@@ -1024,6 +1020,9 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
if (phba->pport->load_flag & FC_UNLOADING)
return;
+ if (!ctxp->hdwq)
+ ctxp->hdwq = &phba->sli4_hba.hdwq[0];
+
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
ctxp->oxid, ctxp->flag, ctxp->state);
@@ -1034,7 +1033,6 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
spin_lock_irqsave(&ctxp->ctxlock, flags);
- ctxp->state = LPFC_NVMET_STE_ABORT;
/* Since iaab/iaar are NOT set, we need to check
* if the firmware is in process of aborting IO
@@ -1046,13 +1044,14 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
ctxp->flag |= LPFC_NVMET_ABORT_OP;
if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) {
+ spin_unlock_irqrestore(&ctxp->ctxlock, flags);
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
ctxp->oxid);
- wq = phba->sli4_hba.nvme_wq[ctxp->wqeq->hba_wqidx];
- spin_unlock_irqrestore(&ctxp->ctxlock, flags);
+ wq = ctxp->hdwq->nvme_wq;
lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
return;
}
+ spin_unlock_irqrestore(&ctxp->ctxlock, flags);
/* An state of LPFC_NVMET_STE_RCV means we have just received
* the NVME command and have not started processing it.
@@ -1064,7 +1063,6 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
else
lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
ctxp->oxid);
- spin_unlock_irqrestore(&ctxp->ctxlock, flags);
}
static void
@@ -1078,14 +1076,18 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
unsigned long flags;
bool aborting = false;
- if (ctxp->state != LPFC_NVMET_STE_DONE &&
- ctxp->state != LPFC_NVMET_STE_ABORT) {
+ spin_lock_irqsave(&ctxp->ctxlock, flags);
+ if (ctxp->flag & LPFC_NVMET_XBUSY)
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
+ "6027 NVMET release with XBUSY flag x%x"
+ " oxid x%x\n",
+ ctxp->flag, ctxp->oxid);
+ else if (ctxp->state != LPFC_NVMET_STE_DONE &&
+ ctxp->state != LPFC_NVMET_STE_ABORT)
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6413 NVMET release bad state %d %d oxid x%x\n",
ctxp->state, ctxp->entry_cnt, ctxp->oxid);
- }
- spin_lock_irqsave(&ctxp->ctxlock, flags);
if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
(ctxp->flag & LPFC_NVMET_XBUSY)) {
aborting = true;
@@ -1114,6 +1116,8 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
struct lpfc_hba *phba = ctxp->phba;
+ unsigned long iflag;
+
lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
ctxp->oxid, ctxp->size, smp_processor_id());
@@ -1132,6 +1136,9 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
/* Free the nvmebuf since a new buffer already replaced it */
nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
+ spin_lock_irqsave(&ctxp->ctxlock, iflag);
+ ctxp->rqb_buffer = NULL;
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
}
static struct nvmet_fc_target_template lpfc_tgttemplate = {
@@ -1163,9 +1170,9 @@ __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
list_for_each_entry_safe(ctx_buf, next_ctx_buf,
&infop->nvmet_ctx_list, list) {
- spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_del_init(&ctx_buf->list);
- spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
__lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
ctx_buf->sglq->state = SGL_FREED;
@@ -1195,9 +1202,9 @@ lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
/* Cycle the the entire CPU context list for every MRQ */
for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
- for (j = 0; j < phba->sli4_hba.num_present_cpu; j++) {
+ for_each_present_cpu(j) {
+ infop = lpfc_get_ctx_list(phba, j, i);
__lpfc_nvmet_clean_io_for_cpu(phba, infop);
- infop++; /* next */
}
}
kfree(phba->sli4_hba.nvmet_ctx_info);
@@ -1212,14 +1219,14 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
union lpfc_wqe128 *wqe;
struct lpfc_nvmet_ctx_info *last_infop;
struct lpfc_nvmet_ctx_info *infop;
- int i, j, idx;
+ int i, j, idx, cpu;
lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
"6403 Allocate NVMET resources for %d XRIs\n",
phba->sli4_hba.nvmet_xri_cnt);
phba->sli4_hba.nvmet_ctx_info = kcalloc(
- phba->sli4_hba.num_present_cpu * phba->cfg_nvmet_mrq,
+ phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq,
sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
if (!phba->sli4_hba.nvmet_ctx_info) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -1247,13 +1254,12 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
* of the IO completion. Thus a context that was allocated for MRQ A
* whose IO completed on CPU B will be freed to cpuB/mrqA.
*/
- infop = phba->sli4_hba.nvmet_ctx_info;
- for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ for_each_possible_cpu(i) {
for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
+ infop = lpfc_get_ctx_list(phba, i, j);
INIT_LIST_HEAD(&infop->nvmet_ctx_list);
spin_lock_init(&infop->nvmet_ctx_list_lock);
infop->nvmet_ctx_list_cnt = 0;
- infop++;
}
}
@@ -1263,8 +1269,10 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
* MRQ 1 cycling thru CPUs 0 - X, and so on.
*/
for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
- last_infop = lpfc_get_ctx_list(phba, 0, j);
- for (i = phba->sli4_hba.num_present_cpu - 1; i >= 0; i--) {
+ last_infop = lpfc_get_ctx_list(phba,
+ cpumask_first(cpu_present_mask),
+ j);
+ for (i = phba->sli4_hba.num_possible_cpu - 1; i >= 0; i--) {
infop = lpfc_get_ctx_list(phba, i, j);
infop->nvmet_ctx_next_cpu = last_infop;
last_infop = infop;
@@ -1275,6 +1283,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
* received command on a per xri basis.
*/
idx = 0;
+ cpu = cpumask_first(cpu_present_mask);
for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
if (!ctx_buf) {
@@ -1322,13 +1331,14 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
"6407 Ran out of NVMET XRIs\n");
return -ENOMEM;
}
+ INIT_WORK(&ctx_buf->defer_work, lpfc_nvmet_fcp_rqst_defer_work);
/*
* Add ctx to MRQidx context list. Our initial assumption
* is MRQidx will be associated with CPUidx. This association
* can change on the fly.
*/
- infop = lpfc_get_ctx_list(phba, idx, idx);
+ infop = lpfc_get_ctx_list(phba, cpu, idx);
spin_lock(&infop->nvmet_ctx_list_lock);
list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
infop->nvmet_ctx_list_cnt++;
@@ -1336,11 +1346,18 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
/* Spread ctx structures evenly across all MRQs */
idx++;
- if (idx >= phba->cfg_nvmet_mrq)
+ if (idx >= phba->cfg_nvmet_mrq) {
idx = 0;
+ cpu = cpumask_first(cpu_present_mask);
+ continue;
+ }
+ cpu = cpumask_next(cpu, cpu_present_mask);
+ if (cpu == nr_cpu_ids)
+ cpu = cpumask_first(cpu_present_mask);
+
}
- for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ for_each_present_cpu(i) {
for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
infop = lpfc_get_ctx_list(phba, i, j);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
@@ -1378,7 +1395,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
* allocate + 3, one for cmd, one for rsp and one for this alignment
*/
lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
- lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
+ lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue;
lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
@@ -1503,13 +1520,14 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
}
spin_lock_irqsave(&phba->hbalock, iflag);
- spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_for_each_entry_safe(ctxp, next_ctxp,
&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
list) {
if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
continue;
+ spin_lock(&ctxp->ctxlock);
/* Check if we already received a free context call
* and we have completed processing an abort situation.
*/
@@ -1519,7 +1537,8 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
released = true;
}
ctxp->flag &= ~LPFC_NVMET_XBUSY;
- spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ spin_unlock(&ctxp->ctxlock);
+ spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
rrq_empty = list_empty(&phba->active_rrq_list);
spin_unlock_irqrestore(&phba->hbalock, iflag);
@@ -1543,14 +1562,13 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
lpfc_worker_wake_up(phba);
return;
}
- spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
}
int
lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
struct fc_frame_header *fc_hdr)
-
{
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
struct lpfc_hba *phba = vport->phba;
@@ -1562,14 +1580,14 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
xri = be16_to_cpu(fc_hdr->fh_ox_id);
spin_lock_irqsave(&phba->hbalock, iflag);
- spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_for_each_entry_safe(ctxp, next_ctxp,
&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
list) {
if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
continue;
- spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
spin_lock_irqsave(&ctxp->ctxlock, iflag);
@@ -1590,7 +1608,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
return 0;
}
- spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
@@ -1658,6 +1676,7 @@ lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *nvmewqeq;
+ struct lpfc_nvmet_rcv_ctx *ctxp;
unsigned long iflags;
int rc;
@@ -1671,7 +1690,8 @@ lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
list);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
- rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
+ ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmewqeq->context2;
+ rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
spin_lock_irqsave(&pring->ring_lock, iflags);
if (rc == -EBUSY) {
/* WQ was full again, so put it back on the list */
@@ -1699,8 +1719,8 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
return;
if (phba->targetport) {
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
- for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
- wq = phba->sli4_hba.nvme_wq[qidx];
+ for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+ wq = phba->sli4_hba.hdwq[qidx].nvme_wq;
lpfc_nvmet_wqfull_flush(phba, wq, NULL);
}
tgtp->tport_unreg_cmp = &tport_unreg_cmp;
@@ -1775,6 +1795,7 @@ dropit:
ctxp->state = LPFC_NVMET_STE_LS_RCV;
ctxp->entry_cnt = 1;
ctxp->rqb_buffer = (void *)nvmebuf;
+ ctxp->hdwq = &phba->sli4_hba.hdwq[0];
lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n",
oxid, size, sid);
@@ -1814,6 +1835,86 @@ dropit:
#endif
}
+static void
+lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
+{
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
+ struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
+ struct lpfc_hba *phba = ctxp->phba;
+ struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
+ struct lpfc_nvmet_tgtport *tgtp;
+ uint32_t *payload;
+ uint32_t rc;
+ unsigned long iflags;
+
+ if (!nvmebuf) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6159 process_rcv_fcp_req, nvmebuf is NULL, "
+ "oxid: x%x flg: x%x state: x%x\n",
+ ctxp->oxid, ctxp->flag, ctxp->state);
+ spin_lock_irqsave(&ctxp->ctxlock, iflags);
+ lpfc_nvmet_defer_release(phba, ctxp);
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
+ lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
+ ctxp->oxid);
+ return;
+ }
+
+ payload = (uint32_t *)(nvmebuf->dbuf.virt);
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ /*
+ * The calling sequence should be:
+ * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
+ * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
+ * When we return from nvmet_fc_rcv_fcp_req, all relevant info
+ * the NVME command / FC header is stored.
+ * A buffer has already been reposted for this IO, so just free
+ * the nvmebuf.
+ */
+ rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
+ payload, ctxp->size);
+ /* Process FCP command */
+ if (rc == 0) {
+ atomic_inc(&tgtp->rcv_fcp_cmd_out);
+ return;
+ }
+
+ /* Processing of FCP command is deferred */
+ if (rc == -EOVERFLOW) {
+ lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d "
+ "from %06x\n",
+ ctxp->oxid, ctxp->size, ctxp->sid);
+ atomic_inc(&tgtp->rcv_fcp_cmd_out);
+ atomic_inc(&tgtp->defer_fod);
+ return;
+ }
+ atomic_inc(&tgtp->rcv_fcp_cmd_drop);
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
+ ctxp->oxid, rc,
+ atomic_read(&tgtp->rcv_fcp_cmd_in),
+ atomic_read(&tgtp->rcv_fcp_cmd_out),
+ atomic_read(&tgtp->xmt_fcp_release));
+ lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
+ ctxp->oxid, ctxp->size, ctxp->sid);
+ spin_lock_irqsave(&ctxp->ctxlock, iflags);
+ lpfc_nvmet_defer_release(phba, ctxp);
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
+ lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
+#endif
+}
+
+static void
+lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work)
+{
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
+ struct lpfc_nvmet_ctxbuf *ctx_buf =
+ container_of(work, struct lpfc_nvmet_ctxbuf, defer_work);
+
+ lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
+#endif
+}
+
static struct lpfc_nvmet_ctxbuf *
lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
struct lpfc_nvmet_ctx_info *current_infop)
@@ -1838,7 +1939,7 @@ lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
else
get_infop = current_infop->nvmet_ctx_next_cpu;
- for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) {
if (get_infop == current_infop) {
get_infop = get_infop->nvmet_ctx_next_cpu;
continue;
@@ -1896,12 +1997,9 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
struct lpfc_nvmet_ctxbuf *ctx_buf;
struct lpfc_nvmet_ctx_info *current_infop;
uint32_t *payload;
- uint32_t size, oxid, sid, rc, qno;
+ uint32_t size, oxid, sid, qno;
unsigned long iflag;
int current_cpu;
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- uint32_t id;
-#endif
if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
return;
@@ -1910,11 +2008,9 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
if (!nvmebuf || !phba->targetport) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6157 NVMET FCP Drop IO\n");
- oxid = 0;
- size = 0;
- sid = 0;
- ctxp = NULL;
- goto dropit;
+ if (nvmebuf)
+ lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
+ return;
}
/*
@@ -1942,9 +2038,14 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
- id = smp_processor_id();
- if (id < LPFC_CHECK_CPU_CNT)
- phba->cpucheck_rcv_io[id]++;
+ if (current_cpu < LPFC_CHECK_CPU_CNT) {
+ if (idx != current_cpu)
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
+ "6703 CPU Check rcv: "
+ "cpu %d expect %d\n",
+ current_cpu, idx);
+ phba->sli4_hba.hdwq[idx].cpucheck_rcv_io[current_cpu]++;
+ }
}
#endif
@@ -1995,6 +2096,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
ctxp->flag = 0;
ctxp->ctxbuf = ctx_buf;
ctxp->rqb_buffer = (void *)nvmebuf;
+ ctxp->hdwq = NULL;
spin_lock_init(&ctxp->ctxlock);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -2015,67 +2117,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
#endif
atomic_inc(&tgtp->rcv_fcp_cmd_in);
- /*
- * The calling sequence should be:
- * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
- * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
- * When we return from nvmet_fc_rcv_fcp_req, all relevant info in
- * the NVME command / FC header is stored, so we are free to repost
- * the buffer.
- */
- rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
- payload, size);
-
- /* Process FCP command */
- if (rc == 0) {
- ctxp->rqb_buffer = NULL;
- atomic_inc(&tgtp->rcv_fcp_cmd_out);
- lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
- return;
- }
-
- /* Processing of FCP command is deferred */
- if (rc == -EOVERFLOW) {
- /*
- * Post a brand new DMA buffer to RQ and defer
- * freeing rcv buffer till .defer_rcv callback
- */
- qno = nvmebuf->idx;
- lpfc_post_rq_buffer(
- phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
- phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
-
- lpfc_nvmeio_data(phba,
- "NVMET RCV BUSY: xri x%x sz %d from %06x\n",
- oxid, size, sid);
- atomic_inc(&tgtp->rcv_fcp_cmd_out);
- atomic_inc(&tgtp->defer_fod);
- return;
- }
- ctxp->rqb_buffer = nvmebuf;
-
- atomic_inc(&tgtp->rcv_fcp_cmd_drop);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
- ctxp->oxid, rc,
- atomic_read(&tgtp->rcv_fcp_cmd_in),
- atomic_read(&tgtp->rcv_fcp_cmd_out),
- atomic_read(&tgtp->xmt_fcp_release));
-dropit:
- lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
- oxid, size, sid);
- if (oxid) {
- lpfc_nvmet_defer_release(phba, ctxp);
- lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
- lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
- return;
- }
-
- if (ctx_buf)
- lpfc_nvmet_ctxbuf_post(phba, ctx_buf);
-
- if (nvmebuf)
- lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
+ lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
}
/**
@@ -2660,15 +2702,17 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
if (ctxp->flag & LPFC_NVMET_ABORT_OP)
atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
+ spin_lock_irqsave(&ctxp->ctxlock, flags);
ctxp->state = LPFC_NVMET_STE_DONE;
/* Check if we already received a free context call
* and we have completed processing an abort situation.
*/
- spin_lock_irqsave(&ctxp->ctxlock, flags);
if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
!(ctxp->flag & LPFC_NVMET_XBUSY)) {
+ spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_del(&ctxp->list);
+ spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
released = true;
}
ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
@@ -2734,6 +2778,7 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
}
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ spin_lock_irqsave(&ctxp->ctxlock, flags);
if (ctxp->flag & LPFC_NVMET_ABORT_OP)
atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
@@ -2748,10 +2793,11 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
* and we have completed processing an abort situation.
*/
ctxp->state = LPFC_NVMET_STE_DONE;
- spin_lock_irqsave(&ctxp->ctxlock, flags);
if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
!(ctxp->flag & LPFC_NVMET_XBUSY)) {
+ spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_del(&ctxp->list);
+ spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
released = true;
}
ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
@@ -2957,12 +3003,15 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
/* No failure to an ABTS request. */
+ spin_lock_irqsave(&ctxp->ctxlock, flags);
ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+ spin_unlock_irqrestore(&ctxp->ctxlock, flags);
return 0;
}
/* Issue ABTS for this WQE based on iotag */
ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
+ spin_lock_irqsave(&ctxp->ctxlock, flags);
if (!ctxp->abort_wqeq) {
atomic_inc(&tgtp->xmt_abort_rsp_error);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
@@ -2970,11 +3019,13 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
"xri: x%x\n", ctxp->oxid);
/* No failure to an ABTS request. */
ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+ spin_unlock_irqrestore(&ctxp->ctxlock, flags);
return 0;
}
abts_wqeq = ctxp->abort_wqeq;
abts_wqe = &abts_wqeq->wqe;
ctxp->state = LPFC_NVMET_STE_ABORT;
+ spin_unlock_irqrestore(&ctxp->ctxlock, flags);
/* Announce entry to new IO submit field. */
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
@@ -2995,7 +3046,9 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
"NVME Req now. hba_flag x%x oxid x%x\n",
phba->hba_flag, ctxp->oxid);
lpfc_sli_release_iocbq(phba, abts_wqeq);
+ spin_lock_irqsave(&ctxp->ctxlock, flags);
ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+ spin_unlock_irqrestore(&ctxp->ctxlock, flags);
return 0;
}
@@ -3008,7 +3061,9 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
"still pending on oxid x%x\n",
ctxp->oxid);
lpfc_sli_release_iocbq(phba, abts_wqeq);
+ spin_lock_irqsave(&ctxp->ctxlock, flags);
ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+ spin_unlock_irqrestore(&ctxp->ctxlock, flags);
return 0;
}
@@ -3052,7 +3107,10 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
abts_wqeq->iocb_flag |= LPFC_IO_NVME;
abts_wqeq->context2 = ctxp;
abts_wqeq->vport = phba->pport;
- rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
+ if (!ctxp->hdwq)
+ ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
+
+ rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
spin_unlock_irqrestore(&phba->hbalock, flags);
if (rc == WQE_SUCCESS) {
atomic_inc(&tgtp->xmt_abort_sol);
@@ -3060,7 +3118,9 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
}
atomic_inc(&tgtp->xmt_abort_rsp_error);
+ spin_lock_irqsave(&ctxp->ctxlock, flags);
ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+ spin_unlock_irqrestore(&ctxp->ctxlock, flags);
lpfc_sli_release_iocbq(phba, abts_wqeq);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
"6166 Failed ABORT issue_wqe with status x%x "
@@ -3069,7 +3129,6 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
return 1;
}
-
static int
lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
struct lpfc_nvmet_rcv_ctx *ctxp,
@@ -3078,6 +3137,7 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_iocbq *abts_wqeq;
unsigned long flags;
+ bool released = false;
int rc;
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
@@ -3104,7 +3164,10 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
abts_wqeq->iocb_cmpl = NULL;
abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
- rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
+ if (!ctxp->hdwq)
+ ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
+
+ rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
spin_unlock_irqrestore(&phba->hbalock, flags);
if (rc == WQE_SUCCESS) {
return 0;
@@ -3112,8 +3175,12 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
aerr:
spin_lock_irqsave(&ctxp->ctxlock, flags);
- if (ctxp->flag & LPFC_NVMET_CTX_RLS)
+ if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
+ spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_del(&ctxp->list);
+ spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
+ released = true;
+ }
ctxp->flag &= ~(LPFC_NVMET_ABORT_OP | LPFC_NVMET_CTX_RLS);
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
@@ -3121,7 +3188,8 @@ aerr:
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
"6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
ctxp->oxid, rc);
- lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
+ if (released)
+ lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
return 1;
}
@@ -3173,7 +3241,7 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
abts_wqeq->iocb_cmpl = 0;
abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
- rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq);
+ rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
spin_unlock_irqrestore(&phba->hbalock, flags);
if (rc == WQE_SUCCESS) {
atomic_inc(&tgtp->xmt_abort_unsol);
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 0ec1082ce7ef..368deea2bcf8 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -137,9 +137,11 @@ struct lpfc_nvmet_rcv_ctx {
#define LPFC_NVMET_XBUSY 0x4 /* XB bit set on IO cmpl */
#define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */
#define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */
+#define LPFC_NVMET_CTX_REUSE_WQ 0x20 /* ctx reused via WQ */
#define LPFC_NVMET_DEFER_WQFULL 0x40 /* Waiting on a free WQE */
struct rqb_dmabuf *rqb_buffer;
struct lpfc_nvmet_ctxbuf *ctxbuf;
+ struct lpfc_sli4_hdw_queue *hdwq;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint64_t ts_isr_cmd;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index b4f1a840b3b4..a497b2c0cb79 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -83,9 +83,9 @@ lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
}
static void
-lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
+lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
static void
-lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
+lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
static int
lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
@@ -180,9 +180,9 @@ lpfc_cmd_guard_csum(struct scsi_cmnd *sc)
**/
static void
lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
- struct lpfc_scsi_buf *lpfc_cmd)
+ struct lpfc_io_buf *lpfc_cmd)
{
- struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+ struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
if (sgl) {
sgl += 1;
sgl->word2 = le32_to_cpu(sgl->word2);
@@ -200,7 +200,7 @@ lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
* function updates the statistical data for the command completion.
**/
static void
-lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
{
struct lpfc_rport_data *rdata;
struct lpfc_nodelist *pnode;
@@ -389,12 +389,12 @@ static int
lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_scsi_buf *psb;
+ struct lpfc_io_buf *psb;
struct ulp_bde64 *bpl;
IOCB_t *iocb;
dma_addr_t pdma_phys_fcp_cmd;
dma_addr_t pdma_phys_fcp_rsp;
- dma_addr_t pdma_phys_bpl;
+ dma_addr_t pdma_phys_sgl;
uint16_t iotag;
int bcnt, bpl_size;
@@ -408,7 +408,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
(int)sizeof(struct fcp_rsp), bpl_size);
for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
- psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
+ psb = kzalloc(sizeof(struct lpfc_io_buf), GFP_KERNEL);
if (!psb)
break;
@@ -438,14 +438,14 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
psb->fcp_cmnd = psb->data;
psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
- psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
+ psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) +
sizeof(struct fcp_rsp);
/* Initialize local short-hand pointers. */
- bpl = psb->fcp_bpl;
+ bpl = (struct ulp_bde64 *)psb->dma_sgl;
pdma_phys_fcp_cmd = psb->dma_handle;
pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
- pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
+ pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) +
sizeof(struct fcp_rsp);
/*
@@ -496,9 +496,9 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
iocb->un.fcpi64.bdl.bdeSize =
(2 * sizeof(struct ulp_bde64));
iocb->un.fcpi64.bdl.addrLow =
- putPaddrLow(pdma_phys_bpl);
+ putPaddrLow(pdma_phys_sgl);
iocb->un.fcpi64.bdl.addrHigh =
- putPaddrHigh(pdma_phys_bpl);
+ putPaddrHigh(pdma_phys_sgl);
iocb->ulpBdeCount = 1;
iocb->ulpLe = 1;
}
@@ -506,6 +506,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
psb->status = IOSTAT_SUCCESS;
/* Put it back into the SCSI buffer list */
psb->cur_iocbq.context1 = psb;
+ spin_lock_init(&psb->buf_lock);
lpfc_release_scsi_buf_s3(phba, psb);
}
@@ -524,20 +525,27 @@ void
lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_scsi_buf *psb, *next_psb;
+ struct lpfc_io_buf *psb, *next_psb;
+ struct lpfc_sli4_hdw_queue *qp;
unsigned long iflag = 0;
+ int idx;
- if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
+ if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
return;
+
spin_lock_irqsave(&phba->hbalock, iflag);
- spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
- list_for_each_entry_safe(psb, next_psb,
- &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
- if (psb->rdata && psb->rdata->pnode
- && psb->rdata->pnode->vport == vport)
- psb->rdata = NULL;
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+ qp = &phba->sli4_hba.hdwq[idx];
+
+ spin_lock(&qp->abts_scsi_buf_list_lock);
+ list_for_each_entry_safe(psb, next_psb,
+ &qp->lpfc_abts_scsi_buf_list, list) {
+ if (psb->rdata && psb->rdata->pnode &&
+ psb->rdata->pnode->vport == vport)
+ psb->rdata = NULL;
+ }
+ spin_unlock(&qp->abts_scsi_buf_list_lock);
}
- spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
}
@@ -551,11 +559,12 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
**/
void
lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
- struct sli4_wcqe_xri_aborted *axri)
+ struct sli4_wcqe_xri_aborted *axri, int idx)
{
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
- struct lpfc_scsi_buf *psb, *next_psb;
+ struct lpfc_io_buf *psb, *next_psb;
+ struct lpfc_sli4_hdw_queue *qp;
unsigned long iflag = 0;
struct lpfc_iocbq *iocbq;
int i;
@@ -565,16 +574,19 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
return;
+
+ qp = &phba->sli4_hba.hdwq[idx];
spin_lock_irqsave(&phba->hbalock, iflag);
- spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+ spin_lock(&qp->abts_scsi_buf_list_lock);
list_for_each_entry_safe(psb, next_psb,
- &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
+ &qp->lpfc_abts_scsi_buf_list, list) {
if (psb->cur_iocbq.sli4_xritag == xri) {
list_del(&psb->list);
+ qp->abts_scsi_io_bufs--;
psb->exch_busy = 0;
psb->status = IOSTAT_SUCCESS;
spin_unlock(
- &phba->sli4_hba.abts_scsi_buf_list_lock);
+ &qp->abts_scsi_buf_list_lock);
if (psb->rdata && psb->rdata->pnode)
ndlp = psb->rdata->pnode;
else
@@ -593,7 +605,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
return;
}
}
- spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+ spin_unlock(&qp->abts_scsi_buf_list_lock);
for (i = 1; i <= phba->sli.last_iotag; i++) {
iocbq = phba->sli.iocbq_lookup[i];
@@ -602,7 +614,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
continue;
if (iocbq->sli4_xritag != xri)
continue;
- psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
+ psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
psb->exch_busy = 0;
spin_unlock_irqrestore(&phba->hbalock, iflag);
if (!list_empty(&pring->txq))
@@ -614,359 +626,6 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
}
/**
- * lpfc_sli4_post_scsi_sgl_list - Post blocks of scsi buffer sgls from a list
- * @phba: pointer to lpfc hba data structure.
- * @post_sblist: pointer to the scsi buffer list.
- *
- * This routine walks a list of scsi buffers that was passed in. It attempts
- * to construct blocks of scsi buffer sgls which contains contiguous xris and
- * uses the non-embedded SGL block post mailbox commands to post to the port.
- * For single SCSI buffer sgl with non-contiguous xri, if any, it shall use
- * embedded SGL post mailbox command for posting. The @post_sblist passed in
- * must be local list, thus no lock is needed when manipulate the list.
- *
- * Returns: 0 = failure, non-zero number of successfully posted buffers.
- **/
-static int
-lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
- struct list_head *post_sblist, int sb_count)
-{
- struct lpfc_scsi_buf *psb, *psb_next;
- int status, sgl_size;
- int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
- dma_addr_t pdma_phys_bpl1;
- int last_xritag = NO_XRI;
- LIST_HEAD(prep_sblist);
- LIST_HEAD(blck_sblist);
- LIST_HEAD(scsi_sblist);
-
- /* sanity check */
- if (sb_count <= 0)
- return -EINVAL;
-
- sgl_size = phba->cfg_sg_dma_buf_size -
- (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
-
- list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
- list_del_init(&psb->list);
- block_cnt++;
- if ((last_xritag != NO_XRI) &&
- (psb->cur_iocbq.sli4_xritag != last_xritag + 1)) {
- /* a hole in xri block, form a sgl posting block */
- list_splice_init(&prep_sblist, &blck_sblist);
- post_cnt = block_cnt - 1;
- /* prepare list for next posting block */
- list_add_tail(&psb->list, &prep_sblist);
- block_cnt = 1;
- } else {
- /* prepare list for next posting block */
- list_add_tail(&psb->list, &prep_sblist);
- /* enough sgls for non-embed sgl mbox command */
- if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
- list_splice_init(&prep_sblist, &blck_sblist);
- post_cnt = block_cnt;
- block_cnt = 0;
- }
- }
- num_posting++;
- last_xritag = psb->cur_iocbq.sli4_xritag;
-
- /* end of repost sgl list condition for SCSI buffers */
- if (num_posting == sb_count) {
- if (post_cnt == 0) {
- /* last sgl posting block */
- list_splice_init(&prep_sblist, &blck_sblist);
- post_cnt = block_cnt;
- } else if (block_cnt == 1) {
- /* last single sgl with non-contiguous xri */
- if (sgl_size > SGL_PAGE_SIZE)
- pdma_phys_bpl1 = psb->dma_phys_bpl +
- SGL_PAGE_SIZE;
- else
- pdma_phys_bpl1 = 0;
- status = lpfc_sli4_post_sgl(phba,
- psb->dma_phys_bpl,
- pdma_phys_bpl1,
- psb->cur_iocbq.sli4_xritag);
- if (status) {
- /* failure, put on abort scsi list */
- psb->exch_busy = 1;
- } else {
- /* success, put on SCSI buffer list */
- psb->exch_busy = 0;
- psb->status = IOSTAT_SUCCESS;
- num_posted++;
- }
- /* success, put on SCSI buffer sgl list */
- list_add_tail(&psb->list, &scsi_sblist);
- }
- }
-
- /* continue until a nembed page worth of sgls */
- if (post_cnt == 0)
- continue;
-
- /* post block of SCSI buffer list sgls */
- status = lpfc_sli4_post_scsi_sgl_block(phba, &blck_sblist,
- post_cnt);
-
- /* don't reset xirtag due to hole in xri block */
- if (block_cnt == 0)
- last_xritag = NO_XRI;
-
- /* reset SCSI buffer post count for next round of posting */
- post_cnt = 0;
-
- /* put posted SCSI buffer-sgl posted on SCSI buffer sgl list */
- while (!list_empty(&blck_sblist)) {
- list_remove_head(&blck_sblist, psb,
- struct lpfc_scsi_buf, list);
- if (status) {
- /* failure, put on abort scsi list */
- psb->exch_busy = 1;
- } else {
- /* success, put on SCSI buffer list */
- psb->exch_busy = 0;
- psb->status = IOSTAT_SUCCESS;
- num_posted++;
- }
- list_add_tail(&psb->list, &scsi_sblist);
- }
- }
- /* Push SCSI buffers with sgl posted to the availble list */
- while (!list_empty(&scsi_sblist)) {
- list_remove_head(&scsi_sblist, psb,
- struct lpfc_scsi_buf, list);
- lpfc_release_scsi_buf_s4(phba, psb);
- }
- return num_posted;
-}
-
-/**
- * lpfc_sli4_repost_scsi_sgl_list - Repost all the allocated scsi buffer sgls
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine walks the list of scsi buffers that have been allocated and
- * repost them to the port by using SGL block post. This is needed after a
- * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
- * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
- * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
- *
- * Returns: 0 = success, non-zero failure.
- **/
-int
-lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
-{
- LIST_HEAD(post_sblist);
- int num_posted, rc = 0;
-
- /* get all SCSI buffers need to repost to a local list */
- spin_lock_irq(&phba->scsi_buf_list_get_lock);
- spin_lock(&phba->scsi_buf_list_put_lock);
- list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist);
- list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist);
- spin_unlock(&phba->scsi_buf_list_put_lock);
- spin_unlock_irq(&phba->scsi_buf_list_get_lock);
-
- /* post the list of scsi buffer sgls to port if available */
- if (!list_empty(&post_sblist)) {
- num_posted = lpfc_sli4_post_scsi_sgl_list(phba, &post_sblist,
- phba->sli4_hba.scsi_xri_cnt);
- /* failed to post any scsi buffer, return error */
- if (num_posted == 0)
- rc = -EIO;
- }
- return rc;
-}
-
-/**
- * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
- * @vport: The virtual port for which this call being executed.
- * @num_to_allocate: The requested number of buffers to allocate.
- *
- * This routine allocates scsi buffers for device with SLI-4 interface spec,
- * the scsi buffer contains all the necessary information needed to initiate
- * a SCSI I/O. After allocating up to @num_to_allocate SCSI buffers and put
- * them on a list, it post them to the port by using SGL block post.
- *
- * Return codes:
- * int - number of scsi buffers that were allocated and posted.
- * 0 = failure, less than num_to_alloc is a partial failure.
- **/
-static int
-lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
-{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_scsi_buf *psb;
- struct sli4_sge *sgl;
- IOCB_t *iocb;
- dma_addr_t pdma_phys_fcp_cmd;
- dma_addr_t pdma_phys_fcp_rsp;
- dma_addr_t pdma_phys_bpl;
- uint16_t iotag, lxri = 0;
- int bcnt, num_posted, sgl_size;
- LIST_HEAD(prep_sblist);
- LIST_HEAD(post_sblist);
- LIST_HEAD(scsi_sblist);
-
- sgl_size = phba->cfg_sg_dma_buf_size -
- (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
-
- lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
- "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
- num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size,
- (int)sizeof(struct fcp_cmnd),
- (int)sizeof(struct fcp_rsp));
-
- for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
- psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
- if (!psb)
- break;
- /*
- * Get memory from the pci pool to map the virt space to
- * pci bus space for an I/O. The DMA buffer includes space
- * for the struct fcp_cmnd, struct fcp_rsp and the number
- * of bde's necessary to support the sg_tablesize.
- */
- psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
- GFP_KERNEL, &psb->dma_handle);
- if (!psb->data) {
- kfree(psb);
- break;
- }
-
- /*
- * 4K Page alignment is CRITICAL to BlockGuard, double check
- * to be sure.
- */
- if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
- (((unsigned long)(psb->data) &
- (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "3369 Memory alignment error "
- "addr=%lx\n",
- (unsigned long)psb->data);
- dma_pool_free(phba->lpfc_sg_dma_buf_pool,
- psb->data, psb->dma_handle);
- kfree(psb);
- break;
- }
-
-
- lxri = lpfc_sli4_next_xritag(phba);
- if (lxri == NO_XRI) {
- dma_pool_free(phba->lpfc_sg_dma_buf_pool,
- psb->data, psb->dma_handle);
- kfree(psb);
- break;
- }
-
- /* Allocate iotag for psb->cur_iocbq. */
- iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
- if (iotag == 0) {
- dma_pool_free(phba->lpfc_sg_dma_buf_pool,
- psb->data, psb->dma_handle);
- kfree(psb);
- lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "3368 Failed to allocate IOTAG for"
- " XRI:0x%x\n", lxri);
- lpfc_sli4_free_xri(phba, lxri);
- break;
- }
- psb->cur_iocbq.sli4_lxritag = lxri;
- psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
- psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
- psb->fcp_bpl = psb->data;
- psb->fcp_cmnd = (psb->data + sgl_size);
- psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
- sizeof(struct fcp_cmnd));
-
- /* Initialize local short-hand pointers. */
- sgl = (struct sli4_sge *)psb->fcp_bpl;
- pdma_phys_bpl = psb->dma_handle;
- pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size);
- pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
-
- /*
- * The first two bdes are the FCP_CMD and FCP_RSP.
- * The balance are sg list bdes. Initialize the
- * first two and leave the rest for queuecommand.
- */
- sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
- sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
- sgl->word2 = le32_to_cpu(sgl->word2);
- bf_set(lpfc_sli4_sge_last, sgl, 0);
- sgl->word2 = cpu_to_le32(sgl->word2);
- sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
- sgl++;
-
- /* Setup the physical region for the FCP RSP */
- sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
- sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
- sgl->word2 = le32_to_cpu(sgl->word2);
- bf_set(lpfc_sli4_sge_last, sgl, 1);
- sgl->word2 = cpu_to_le32(sgl->word2);
- sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
-
- /*
- * Since the IOCB for the FCP I/O is built into this
- * lpfc_scsi_buf, initialize it with all known data now.
- */
- iocb = &psb->cur_iocbq.iocb;
- iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
- iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
- /* setting the BLP size to 2 * sizeof BDE may not be correct.
- * We are setting the bpl to point to out sgl. An sgl's
- * entries are 16 bytes, a bpl entries are 12 bytes.
- */
- iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
- iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
- iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
- iocb->ulpBdeCount = 1;
- iocb->ulpLe = 1;
- iocb->ulpClass = CLASS3;
- psb->cur_iocbq.context1 = psb;
- psb->dma_phys_bpl = pdma_phys_bpl;
-
- /* add the scsi buffer to a post list */
- list_add_tail(&psb->list, &post_sblist);
- spin_lock_irq(&phba->scsi_buf_list_get_lock);
- phba->sli4_hba.scsi_xri_cnt++;
- spin_unlock_irq(&phba->scsi_buf_list_get_lock);
- }
- lpfc_printf_log(phba, KERN_INFO, LOG_BG | LOG_FCP,
- "3021 Allocate %d out of %d requested new SCSI "
- "buffers\n", bcnt, num_to_alloc);
-
- /* post the list of scsi buffer sgls to port if available */
- if (!list_empty(&post_sblist))
- num_posted = lpfc_sli4_post_scsi_sgl_list(phba,
- &post_sblist, bcnt);
- else
- num_posted = 0;
-
- return num_posted;
-}
-
-/**
- * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
- * @vport: The virtual port for which this call being executed.
- * @num_to_allocate: The requested number of buffers to allocate.
- *
- * This routine wraps the actual SCSI buffer allocator function pointer from
- * the lpfc_hba struct.
- *
- * Return codes:
- * int - number of scsi buffers that were allocated.
- * 0 = failure, less than num_to_alloc is a partial failure.
- **/
-static inline int
-lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
-{
- return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
-}
-
-/**
* lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
* @phba: The HBA for which this call is being executed.
*
@@ -977,15 +636,16 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
* NULL - Error
* Pointer to lpfc_scsi_buf - Success
**/
-static struct lpfc_scsi_buf*
-lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+static struct lpfc_io_buf *
+lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+ struct scsi_cmnd *cmnd)
{
- struct lpfc_scsi_buf * lpfc_cmd = NULL;
+ struct lpfc_io_buf *lpfc_cmd = NULL;
struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
unsigned long iflag = 0;
spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
- list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf,
+ list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf,
list);
if (!lpfc_cmd) {
spin_lock(&phba->scsi_buf_list_put_lock);
@@ -993,7 +653,7 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
&phba->lpfc_scsi_buf_list_get);
INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
list_remove_head(scsi_buf_list_get, lpfc_cmd,
- struct lpfc_scsi_buf, list);
+ struct lpfc_io_buf, list);
spin_unlock(&phba->scsi_buf_list_put_lock);
}
spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
@@ -1005,52 +665,107 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
return lpfc_cmd;
}
/**
- * lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
+ * lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA
* @phba: The HBA for which this call is being executed.
*
- * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
+ * This routine removes a scsi buffer from head of @hdwq io_buf_list
* and returns to caller.
*
* Return codes:
* NULL - Error
* Pointer to lpfc_scsi_buf - Success
**/
-static struct lpfc_scsi_buf*
-lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+static struct lpfc_io_buf *
+lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+ struct scsi_cmnd *cmnd)
{
- struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next;
- unsigned long iflag = 0;
- int found = 0;
+ struct lpfc_io_buf *lpfc_cmd;
+ struct lpfc_sli4_hdw_queue *qp;
+ struct sli4_sge *sgl;
+ IOCB_t *iocb;
+ dma_addr_t pdma_phys_fcp_rsp;
+ dma_addr_t pdma_phys_fcp_cmd;
+ uint32_t sgl_size, cpu, idx;
+ int tag;
- spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
- list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
- &phba->lpfc_scsi_buf_list_get, list) {
- if (lpfc_test_rrq_active(phba, ndlp,
- lpfc_cmd->cur_iocbq.sli4_lxritag))
- continue;
- list_del_init(&lpfc_cmd->list);
- found = 1;
- break;
- }
- if (!found) {
- spin_lock(&phba->scsi_buf_list_put_lock);
- list_splice(&phba->lpfc_scsi_buf_list_put,
- &phba->lpfc_scsi_buf_list_get);
- INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
- spin_unlock(&phba->scsi_buf_list_put_lock);
- list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
- &phba->lpfc_scsi_buf_list_get, list) {
- if (lpfc_test_rrq_active(
- phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag))
- continue;
- list_del_init(&lpfc_cmd->list);
- found = 1;
- break;
- }
+ cpu = smp_processor_id();
+ if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
+ tag = blk_mq_unique_tag(cmnd->request);
+ idx = blk_mq_unique_tag_to_hwq(tag);
+ } else {
+ idx = phba->sli4_hba.cpu_map[cpu].hdwq;
}
- spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
- if (!found)
+
+ lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx,
+ !phba->cfg_xri_rebalancing);
+ if (!lpfc_cmd) {
+ qp = &phba->sli4_hba.hdwq[idx];
+ qp->empty_io_bufs++;
return NULL;
+ }
+
+ sgl_size = phba->cfg_sg_dma_buf_size -
+ (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+
+ /* Setup key fields in buffer that may have been changed
+ * if other protocols used this buffer.
+ */
+ lpfc_cmd->cur_iocbq.iocb_flag = LPFC_IO_FCP;
+ lpfc_cmd->prot_seg_cnt = 0;
+ lpfc_cmd->seg_cnt = 0;
+ lpfc_cmd->timeout = 0;
+ lpfc_cmd->flags = 0;
+ lpfc_cmd->start_time = jiffies;
+ lpfc_cmd->waitq = NULL;
+ lpfc_cmd->cpu = cpu;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ lpfc_cmd->prot_data_type = 0;
+#endif
+ lpfc_cmd->fcp_cmnd = (lpfc_cmd->data + sgl_size);
+ lpfc_cmd->fcp_rsp = (struct fcp_rsp *)((uint8_t *)lpfc_cmd->fcp_cmnd +
+ sizeof(struct fcp_cmnd));
+
+ /*
+ * The first two SGEs are the FCP_CMD and FCP_RSP.
+ * The balance are sg list bdes. Initialize the
+ * first two and leave the rest for queuecommand.
+ */
+ sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
+ pdma_phys_fcp_cmd = (lpfc_cmd->dma_handle + sgl_size);
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
+ sgl->word2 = le32_to_cpu(sgl->word2);
+ bf_set(lpfc_sli4_sge_last, sgl, 0);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
+ sgl++;
+
+ /* Setup the physical region for the FCP RSP */
+ pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
+ sgl->word2 = le32_to_cpu(sgl->word2);
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
+
+ /*
+ * Since the IOCB for the FCP I/O is built into this
+ * lpfc_io_buf, initialize it with all known data now.
+ */
+ iocb = &lpfc_cmd->cur_iocbq.iocb;
+ iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
+ iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
+ /* setting the BLP size to 2 * sizeof BDE may not be correct.
+ * We are setting the bpl to point to out sgl. An sgl's
+ * entries are 16 bytes, a bpl entries are 12 bytes.
+ */
+ iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
+ iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
+ iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
+ iocb->ulpBdeCount = 1;
+ iocb->ulpLe = 1;
+ iocb->ulpClass = CLASS3;
if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
atomic_inc(&ndlp->cmd_pending);
@@ -1069,10 +784,11 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
* NULL - Error
* Pointer to lpfc_scsi_buf - Success
**/
-static struct lpfc_scsi_buf*
-lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+static struct lpfc_io_buf*
+lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+ struct scsi_cmnd *cmnd)
{
- return phba->lpfc_get_scsi_buf(phba, ndlp);
+ return phba->lpfc_get_scsi_buf(phba, ndlp, cmnd);
}
/**
@@ -1084,12 +800,11 @@ lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
* lpfc_scsi_buf_list list.
**/
static void
-lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
{
unsigned long iflag = 0;
psb->seg_cnt = 0;
- psb->nonsg_phys = 0;
psb->prot_seg_cnt = 0;
spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
@@ -1104,34 +819,29 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
* @phba: The Hba for which this call is being executed.
* @psb: The scsi buffer which is being released.
*
- * This routine releases @psb scsi buffer by adding it to tail of @phba
- * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
+ * This routine releases @psb scsi buffer by adding it to tail of @hdwq
+ * io_buf_list list. For SLI4 XRI's are tied to the scsi buffer
* and cannot be reused for at least RA_TOV amount of time if it was
* aborted.
**/
static void
-lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
{
+ struct lpfc_sli4_hdw_queue *qp;
unsigned long iflag = 0;
psb->seg_cnt = 0;
- psb->nonsg_phys = 0;
psb->prot_seg_cnt = 0;
+ qp = psb->hdwq;
if (psb->exch_busy) {
- spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
- iflag);
+ spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag);
psb->pCmd = NULL;
- list_add_tail(&psb->list,
- &phba->sli4_hba.lpfc_abts_scsi_buf_list);
- spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
- iflag);
+ list_add_tail(&psb->list, &qp->lpfc_abts_scsi_buf_list);
+ qp->abts_scsi_io_bufs++;
+ spin_unlock_irqrestore(&qp->abts_scsi_buf_list_lock, iflag);
} else {
- psb->pCmd = NULL;
- psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
- spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
- list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
- spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
+ lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp);
}
}
@@ -1144,7 +854,7 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
* lpfc_scsi_buf_list list.
**/
static void
-lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
{
if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp)
atomic_dec(&psb->ndlp->cmd_pending);
@@ -1168,12 +878,12 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
* 0 - Success
**/
static int
-lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
{
struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
struct scatterlist *sgel = NULL;
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
- struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
+ struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
@@ -1320,7 +1030,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
{
struct scatterlist *sgpe; /* s/g prot entry */
- struct lpfc_scsi_buf *lpfc_cmd = NULL;
+ struct lpfc_io_buf *lpfc_cmd = NULL;
struct scsi_dif_tuple *src = NULL;
struct lpfc_nodelist *ndlp;
struct lpfc_rport_data *rdata;
@@ -1379,7 +1089,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
if (sgpe) {
src = (struct scsi_dif_tuple *)sg_virt(sgpe);
src += blockoff;
- lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble;
+ lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble;
}
/* Should we change the Reference Tag */
@@ -2684,7 +2394,7 @@ lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
**/
static int
lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
- struct lpfc_scsi_buf *lpfc_cmd)
+ struct lpfc_io_buf *lpfc_cmd)
{
struct scsi_cmnd *sc = lpfc_cmd->pCmd;
int fcpdl;
@@ -2724,11 +2434,11 @@ lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
**/
static int
lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
- struct lpfc_scsi_buf *lpfc_cmd)
+ struct lpfc_io_buf *lpfc_cmd)
{
struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
- struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
+ struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
uint32_t num_bde = 0;
int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
@@ -2904,7 +2614,7 @@ lpfc_bg_csum(uint8_t *data, int count)
* what type of T10-DIF error occurred.
*/
static void
-lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
{
struct scatterlist *sgpe; /* s/g prot entry */
struct scatterlist *sgde; /* s/g data entry */
@@ -3089,8 +2799,8 @@ out:
* -1 - Internal error (bad profile, ...etc)
*/
static int
-lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
- struct lpfc_iocbq *pIocbOut)
+lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
+ struct lpfc_iocbq *pIocbOut)
{
struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
@@ -3256,12 +2966,12 @@ out:
* 0 - Success
**/
static int
-lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
{
struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
struct scatterlist *sgel = NULL;
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
- struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+ struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
struct sli4_sge *first_data_sgl;
IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
dma_addr_t physaddr;
@@ -3388,6 +3098,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
scsi_cmnd->device->hostdata)->priority;
}
+
return 0;
}
@@ -3402,11 +3113,11 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
**/
static int
lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
- struct lpfc_scsi_buf *lpfc_cmd)
+ struct lpfc_io_buf *lpfc_cmd)
{
struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
- struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
+ struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl);
IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
uint32_t num_sge = 0;
int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
@@ -3578,7 +3289,7 @@ err:
* 0 - Success
**/
static inline int
-lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
{
return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
}
@@ -3597,7 +3308,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
* 0 - Success
**/
static inline int
-lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
{
return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
}
@@ -3614,7 +3325,7 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
**/
static void
lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
- struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
+ struct lpfc_io_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
uint32_t resp_info = fcprsp->rspStatus2;
@@ -3706,7 +3417,7 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
* field of @lpfc_cmd for device with SLI-3 interface spec.
**/
static void
-lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
{
/*
* There are only two special cases to consider. (1) the scsi command
@@ -3725,7 +3436,7 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
/**
* lpfc_handler_fcp_err - FCP response handler
* @vport: The virtual port for which this call is being executed.
- * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
+ * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
* @rsp_iocb: The response IOCB which contains FCP error.
*
* This routine is called to process response IOCB with status field
@@ -3733,7 +3444,7 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
* based upon SCSI and FCP error.
**/
static void
-lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
+lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
struct lpfc_iocbq *rsp_iocb)
{
struct lpfc_hba *phba = vport->phba;
@@ -3912,49 +3623,6 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
}
/**
- * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
- * @phba: Pointer to HBA context object.
- *
- * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
- * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
- * held.
- * If scsi-mq is enabled, get the default block layer mapping of software queues
- * to hardware queues. This information is saved in request tag.
- *
- * Return: index into SLI4 fast-path FCP queue index.
- **/
-int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
- struct lpfc_scsi_buf *lpfc_cmd)
-{
- struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
- struct lpfc_vector_map_info *cpup;
- int chann, cpu;
- uint32_t tag;
- uint16_t hwq;
-
- if (cmnd) {
- tag = blk_mq_unique_tag(cmnd->request);
- hwq = blk_mq_unique_tag_to_hwq(tag);
-
- return hwq;
- }
-
- if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU
- && phba->cfg_fcp_io_channel > 1) {
- cpu = smp_processor_id();
- if (cpu < phba->sli4_hba.num_present_cpu) {
- cpup = phba->sli4_hba.cpu_map;
- cpup += cpu;
- return cpup->channel_id;
- }
- }
- chann = atomic_add_return(1, &phba->fcp_qidx);
- chann = chann % phba->cfg_fcp_io_channel;
- return chann;
-}
-
-
-/**
* lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
* @phba: The Hba for which this call is being executed.
* @pIocbIn: The command IOCBQ for the scsi cmnd.
@@ -3968,8 +3636,8 @@ static void
lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
struct lpfc_iocbq *pIocbOut)
{
- struct lpfc_scsi_buf *lpfc_cmd =
- (struct lpfc_scsi_buf *) pIocbIn->context1;
+ struct lpfc_io_buf *lpfc_cmd =
+ (struct lpfc_io_buf *) pIocbIn->context1;
struct lpfc_vport *vport = pIocbIn->vport;
struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
struct lpfc_nodelist *pnode = rdata->pnode;
@@ -3977,14 +3645,35 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
unsigned long flags;
struct lpfc_fast_path_event *fast_path_evt;
struct Scsi_Host *shost;
+ int idx;
uint32_t logit = LOG_FCP;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ int cpu;
+#endif
- atomic_inc(&phba->fc4ScsiIoCmpls);
+ /* Guard against abort handler being called at same time */
+ spin_lock(&lpfc_cmd->buf_lock);
/* Sanity check on return of outstanding command */
cmd = lpfc_cmd->pCmd;
- if (!cmd)
+ if (!cmd) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "2621 IO completion: Not an active IO\n");
+ spin_unlock(&lpfc_cmd->buf_lock);
return;
+ }
+
+ idx = lpfc_cmd->cur_iocbq.hba_wqidx;
+ if (phba->sli4_hba.hdwq)
+ phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
+ cpu = smp_processor_id();
+ if (cpu < LPFC_CHECK_CPU_CNT)
+ phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
+ }
+#endif
shost = cmd->device->host;
lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
@@ -4178,29 +3867,23 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
}
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
- /* If pCmd was set to NULL from abort path, do not call scsi_done */
- if (xchg(&lpfc_cmd->pCmd, NULL) == NULL) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
- "5688 FCP cmd already NULL, sid: 0x%06x, "
- "did: 0x%06x, oxid: 0x%04x\n",
- vport->fc_myDID,
- (pnode) ? pnode->nlp_DID : 0,
- phba->sli_rev == LPFC_SLI_REV4 ?
- lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff);
- return;
- }
+ lpfc_cmd->pCmd = NULL;
+ spin_unlock(&lpfc_cmd->buf_lock);
/* The sdev is not guaranteed to be valid post scsi_done upcall. */
cmd->scsi_done(cmd);
/*
- * If there is a thread waiting for command completion
+ * If there is an abort thread waiting for command completion
* wake up the thread.
*/
- spin_lock_irqsave(shost->host_lock, flags);
- if (lpfc_cmd->waitq)
+ spin_lock(&lpfc_cmd->buf_lock);
+ lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ if (lpfc_cmd->waitq) {
wake_up(lpfc_cmd->waitq);
- spin_unlock_irqrestore(shost->host_lock, flags);
+ lpfc_cmd->waitq = NULL;
+ }
+ spin_unlock(&lpfc_cmd->buf_lock);
lpfc_release_scsi_buf(phba, lpfc_cmd);
}
@@ -4233,7 +3916,7 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
* to transfer for device with SLI3 interface spec.
**/
static void
-lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
+lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
struct lpfc_nodelist *pnode)
{
struct lpfc_hba *phba = vport->phba;
@@ -4241,7 +3924,9 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
+ struct lpfc_sli4_hdw_queue *hdwq = NULL;
int datadir = scsi_cmnd->sc_data_direction;
+ int idx;
uint8_t *ptr;
bool sli4;
uint32_t fcpdl;
@@ -4267,6 +3952,9 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
sli4 = (phba->sli_rev == LPFC_SLI_REV4);
piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
+ idx = lpfc_cmd->hdwq_no;
+ if (phba->sli4_hba.hdwq)
+ hdwq = &phba->sli4_hba.hdwq[idx];
/*
* There are three possibilities here - use scatter-gather segment, use
@@ -4288,19 +3976,22 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
vport->cfg_first_burst_size;
}
fcp_cmnd->fcpCntl3 = WRITE_DATA;
- atomic_inc(&phba->fc4ScsiOutputRequests);
+ if (hdwq)
+ hdwq->scsi_cstat.output_requests++;
} else {
iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
iocb_cmd->ulpPU = PARM_READ_CHECK;
fcp_cmnd->fcpCntl3 = READ_DATA;
- atomic_inc(&phba->fc4ScsiInputRequests);
+ if (hdwq)
+ hdwq->scsi_cstat.input_requests++;
}
} else {
iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
iocb_cmd->un.fcpi.fcpi_parm = 0;
iocb_cmd->ulpPU = 0;
fcp_cmnd->fcpCntl3 = 0;
- atomic_inc(&phba->fc4ScsiControlRequests);
+ if (hdwq)
+ hdwq->scsi_cstat.control_requests++;
}
if (phba->sli_rev == 3 &&
!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
@@ -4328,7 +4019,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
/**
* lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
* @vport: The virtual port for which this call is being executed.
- * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
+ * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
* @lun: Logical unit number.
* @task_mgmt_cmd: SCSI task management command.
*
@@ -4341,7 +4032,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
**/
static int
lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
- struct lpfc_scsi_buf *lpfc_cmd,
+ struct lpfc_io_buf *lpfc_cmd,
uint64_t lun,
uint8_t task_mgmt_cmd)
{
@@ -4413,14 +4104,12 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
switch (dev_grp) {
case LPFC_PCI_DEV_LP:
- phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
break;
case LPFC_PCI_DEV_OC:
- phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
@@ -4452,8 +4141,8 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocbq,
struct lpfc_iocbq *rspiocbq)
{
- struct lpfc_scsi_buf *lpfc_cmd =
- (struct lpfc_scsi_buf *) cmdiocbq->context1;
+ struct lpfc_io_buf *lpfc_cmd =
+ (struct lpfc_io_buf *) cmdiocbq->context1;
if (lpfc_cmd)
lpfc_release_scsi_buf(phba, lpfc_cmd);
return;
@@ -4652,9 +4341,12 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
struct lpfc_hba *phba = vport->phba;
struct lpfc_rport_data *rdata;
struct lpfc_nodelist *ndlp;
- struct lpfc_scsi_buf *lpfc_cmd;
+ struct lpfc_io_buf *lpfc_cmd;
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
- int err;
+ int err, idx;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ int cpu;
+#endif
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
@@ -4718,7 +4410,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
}
}
- lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
+ lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, cmnd);
if (lpfc_cmd == NULL) {
lpfc_rampdown_queue_depth(phba);
@@ -4735,8 +4427,6 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
lpfc_cmd->pCmd = cmnd;
lpfc_cmd->rdata = rdata;
lpfc_cmd->ndlp = ndlp;
- lpfc_cmd->timeout = 0;
- lpfc_cmd->start_time = jiffies;
cmnd->host_scribble = (unsigned char *)lpfc_cmd;
if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
@@ -4771,6 +4461,16 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
+ cpu = smp_processor_id();
+ if (cpu < LPFC_CHECK_CPU_CNT) {
+ struct lpfc_sli4_hdw_queue *hdwq =
+ &phba->sli4_hba.hdwq[lpfc_cmd->hdwq_no];
+ hdwq->cpucheck_xmt_io[cpu]++;
+ }
+ }
+#endif
err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
&lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
if (err) {
@@ -4791,16 +4491,6 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
(uint32_t)
(cmnd->request->timeout / 1000));
- switch (lpfc_cmd->fcp_cmnd->fcpCntl3) {
- case WRITE_DATA:
- atomic_dec(&phba->fc4ScsiOutputRequests);
- break;
- case READ_DATA:
- atomic_dec(&phba->fc4ScsiInputRequests);
- break;
- default:
- atomic_dec(&phba->fc4ScsiControlRequests);
- }
goto out_host_busy_free_buf;
}
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
@@ -4811,10 +4501,26 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
lpfc_poll_rearm_timer(phba);
}
+ if (phba->cfg_xri_rebalancing)
+ lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no);
+
return 0;
out_host_busy_free_buf:
+ idx = lpfc_cmd->hdwq_no;
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
+ if (phba->sli4_hba.hdwq) {
+ switch (lpfc_cmd->fcp_cmnd->fcpCntl3) {
+ case WRITE_DATA:
+ phba->sli4_hba.hdwq[idx].scsi_cstat.output_requests--;
+ break;
+ case READ_DATA:
+ phba->sli4_hba.hdwq[idx].scsi_cstat.input_requests--;
+ break;
+ default:
+ phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--;
+ }
+ }
lpfc_release_scsi_buf(phba, lpfc_cmd);
out_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
@@ -4846,7 +4552,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *iocb;
struct lpfc_iocbq *abtsiocb;
- struct lpfc_scsi_buf *lpfc_cmd;
+ struct lpfc_io_buf *lpfc_cmd;
IOCB_t *cmd, *icmd;
int ret = SUCCESS, status = 0;
struct lpfc_sli_ring *pring_s4 = NULL;
@@ -4858,65 +4564,59 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
if (status != 0 && status != SUCCESS)
return status;
+ lpfc_cmd = (struct lpfc_io_buf *)cmnd->host_scribble;
+ if (!lpfc_cmd)
+ return ret;
+
spin_lock_irqsave(&phba->hbalock, flags);
/* driver queued commands are in process of being flushed */
if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"3168 SCSI Layer abort requested I/O has been "
"flushed by LLD.\n");
- return FAILED;
+ ret = FAILED;
+ goto out_unlock;
}
- lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
- if (!lpfc_cmd || !lpfc_cmd->pCmd) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ /* Guard against IO completion being called at same time */
+ spin_lock(&lpfc_cmd->buf_lock);
+
+ if (!lpfc_cmd->pCmd) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"2873 SCSI Layer I/O Abort Request IO CMPL Status "
"x%x ID %d LUN %llu\n",
SUCCESS, cmnd->device->id, cmnd->device->lun);
- return SUCCESS;
+ goto out_unlock_buf;
}
iocb = &lpfc_cmd->cur_iocbq;
if (phba->sli_rev == LPFC_SLI_REV4) {
- if (!(phba->cfg_fof) ||
- (!(iocb->iocb_flag & LPFC_IO_FOF))) {
- pring_s4 =
- phba->sli4_hba.fcp_wq[iocb->hba_wqidx]->pring;
- } else {
- iocb->hba_wqidx = 0;
- pring_s4 = phba->sli4_hba.oas_wq->pring;
- }
+ pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].fcp_wq->pring;
if (!pring_s4) {
ret = FAILED;
- goto out_unlock;
+ goto out_unlock_buf;
}
spin_lock(&pring_s4->ring_lock);
}
/* the command is in process of being cancelled */
if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
- if (phba->sli_rev == LPFC_SLI_REV4)
- spin_unlock(&pring_s4->ring_lock);
- spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"3169 SCSI Layer abort requested I/O has been "
"cancelled by LLD.\n");
- return FAILED;
+ ret = FAILED;
+ goto out_unlock_ring;
}
/*
- * If pCmd field of the corresponding lpfc_scsi_buf structure
+ * If pCmd field of the corresponding lpfc_io_buf structure
* points to a different SCSI command, then the driver has
* already completed this command, but the midlayer did not
* see the completion before the eh fired. Just return SUCCESS.
*/
if (lpfc_cmd->pCmd != cmnd) {
- if (phba->sli_rev == LPFC_SLI_REV4)
- spin_unlock(&pring_s4->ring_lock);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"3170 SCSI Layer abort requested I/O has been "
"completed by LLD.\n");
- goto out_unlock;
+ goto out_unlock_ring;
}
BUG_ON(iocb->context1 != lpfc_cmd);
@@ -4927,6 +4627,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
"3389 SCSI Layer I/O Abort Request is pending\n");
if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring_s4->ring_lock);
+ spin_unlock(&lpfc_cmd->buf_lock);
spin_unlock_irqrestore(&phba->hbalock, flags);
goto wait_for_cmpl;
}
@@ -4934,9 +4635,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
abtsiocb = __lpfc_sli_get_iocbq(phba);
if (abtsiocb == NULL) {
ret = FAILED;
- if (phba->sli_rev == LPFC_SLI_REV4)
- spin_unlock(&pring_s4->ring_lock);
- goto out_unlock;
+ goto out_unlock_ring;
}
/* Indicate the IO is being aborted by the driver. */
@@ -4986,24 +4685,18 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
/* no longer need the lock after this point */
spin_unlock_irqrestore(&phba->hbalock, flags);
-
if (ret_val == IOCB_ERROR) {
- if (phba->sli_rev == LPFC_SLI_REV4)
- spin_lock_irqsave(&pring_s4->ring_lock, flags);
- else
- spin_lock_irqsave(&phba->hbalock, flags);
/* Indicate the IO is not being aborted by the driver. */
iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
lpfc_cmd->waitq = NULL;
- if (phba->sli_rev == LPFC_SLI_REV4)
- spin_unlock_irqrestore(&pring_s4->ring_lock, flags);
- else
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock(&lpfc_cmd->buf_lock);
lpfc_sli_release_iocbq(phba, abtsiocb);
ret = FAILED;
goto out;
}
+ spin_unlock(&lpfc_cmd->buf_lock);
+
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
lpfc_sli_handle_fast_ring_event(phba,
&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
@@ -5014,9 +4707,7 @@ wait_for_cmpl:
(lpfc_cmd->pCmd != cmnd),
msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
- spin_lock_irqsave(shost->host_lock, flags);
- lpfc_cmd->waitq = NULL;
- spin_unlock_irqrestore(shost->host_lock, flags);
+ spin_lock(&lpfc_cmd->buf_lock);
if (lpfc_cmd->pCmd == cmnd) {
ret = FAILED;
@@ -5027,8 +4718,14 @@ wait_for_cmpl:
iocb->sli4_xritag, ret,
cmnd->device->id, cmnd->device->lun);
}
+ spin_unlock(&lpfc_cmd->buf_lock);
goto out;
+out_unlock_ring:
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring_s4->ring_lock);
+out_unlock_buf:
+ spin_unlock(&lpfc_cmd->buf_lock);
out_unlock:
spin_unlock_irqrestore(&phba->hbalock, flags);
out:
@@ -5066,7 +4763,7 @@ lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
/**
* lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
* @vport: The virtual port for which this call is being executed.
- * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
+ * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
*
* This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
*
@@ -5075,7 +4772,7 @@ lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
* 0x2002 - Success
**/
static int
-lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd)
+lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
{
struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
uint32_t rsp_info;
@@ -5150,7 +4847,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
uint8_t task_mgmt_cmd)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_scsi_buf *lpfc_cmd;
+ struct lpfc_io_buf *lpfc_cmd;
struct lpfc_iocbq *iocbq;
struct lpfc_iocbq *iocbqrsp;
struct lpfc_rport_data *rdata;
@@ -5163,7 +4860,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
return FAILED;
pnode = rdata->pnode;
- lpfc_cmd = lpfc_get_scsi_buf(phba, pnode);
+ lpfc_cmd = lpfc_get_scsi_buf(phba, pnode, NULL);
if (lpfc_cmd == NULL)
return FAILED;
lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
@@ -5671,6 +5368,12 @@ lpfc_slave_alloc(struct scsi_device *sdev)
}
sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
+ /* For SLI4, all IO buffers are pre-allocated */
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return 0;
+
+ /* This code path is now ONLY for SLI3 adapters */
+
/*
* Populate the cmds_per_lun count scsi_bufs into this host's globally
* available list of scsi buffers. Don't allocate more than the
@@ -5702,7 +5405,7 @@ lpfc_slave_alloc(struct scsi_device *sdev)
(phba->cfg_hba_queue_depth - total));
num_to_alloc = phba->cfg_hba_queue_depth - total;
}
- num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
+ num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc);
if (num_to_alloc != num_allocated) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0708 Allocation request of %d "
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index b759b089432c..f76667b7da7b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -130,62 +130,6 @@ struct lpfc_scsicmd_bkt {
uint32_t cmd_count;
};
-struct lpfc_scsi_buf {
- struct list_head list;
- struct scsi_cmnd *pCmd;
- struct lpfc_rport_data *rdata;
- struct lpfc_nodelist *ndlp;
-
- uint32_t timeout;
-
- uint16_t flags; /* TBD convert exch_busy to flags */
-#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */
-#define LPFC_SBUF_BUMP_QDEPTH 0x8 /* bumped queue depth counter */
- uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
- uint16_t status; /* From IOCB Word 7- ulpStatus */
- uint32_t result; /* From IOCB Word 4. */
-
- uint32_t seg_cnt; /* Number of scatter-gather segments returned by
- * dma_map_sg. The driver needs this for calls
- * to dma_unmap_sg. */
- uint32_t prot_seg_cnt; /* seg_cnt's counterpart for protection data */
-
- dma_addr_t nonsg_phys; /* Non scatter-gather physical address. */
-
- /*
- * data and dma_handle are the kernel virtual and bus address of the
- * dma-able buffer containing the fcp_cmd, fcp_rsp and a scatter
- * gather bde list that supports the sg_tablesize value.
- */
- void *data;
- dma_addr_t dma_handle;
-
- struct fcp_cmnd *fcp_cmnd;
- struct fcp_rsp *fcp_rsp;
- struct ulp_bde64 *fcp_bpl;
-
- dma_addr_t dma_phys_bpl;
-
- /* cur_iocbq has phys of the dma-able buffer.
- * Iotag is in here
- */
- struct lpfc_iocbq cur_iocbq;
- uint16_t cpu;
-
- wait_queue_head_t *waitq;
- unsigned long start_time;
-
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- /* Used to restore any changes to protection data for error injection */
- void *prot_data_segment;
- uint32_t prot_data;
- uint32_t prot_data_type;
-#define LPFC_INJERR_REFTAG 1
-#define LPFC_INJERR_APPTAG 2
-#define LPFC_INJERR_GUARD 3
-#endif
-};
-
#define LPFC_SCSI_DMA_EXT_SIZE 264
#define LPFC_BPL_SIZE 1024
#define MDAC_DIRECT_CMD 0x22
@@ -200,5 +144,6 @@ struct lpfc_scsi_buf {
#define TXRDY_PAYLOAD_LEN 12
-int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
- struct lpfc_scsi_buf *lpfc_cmd);
+/* For sysfs/debugfs tmp string max len */
+#define LPFC_MAX_SCSI_INFO_TMP_LEN 79
+
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 2242e9b3ca12..57b4a463b589 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -78,12 +78,13 @@ static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
struct hbq_dmabuf *);
static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
struct hbq_dmabuf *dmabuf);
-static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *,
- struct lpfc_cqe *);
+static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
+ struct lpfc_queue *cq, struct lpfc_cqe *cqe);
static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
int);
static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
- struct lpfc_eqe *eqe, uint32_t qidx);
+ struct lpfc_queue *eq,
+ struct lpfc_eqe *eqe);
static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba,
@@ -160,7 +161,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
}
q->WQ_posted++;
/* set consumption flag every once in a while */
- if (!((q->host_index + 1) % q->entry_repost))
+ if (!((q->host_index + 1) % q->notify_interval))
bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
else
bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
@@ -325,29 +326,16 @@ lpfc_sli4_mq_release(struct lpfc_queue *q)
static struct lpfc_eqe *
lpfc_sli4_eq_get(struct lpfc_queue *q)
{
- struct lpfc_hba *phba;
struct lpfc_eqe *eqe;
- uint32_t idx;
/* sanity check on queue memory */
if (unlikely(!q))
return NULL;
- phba = q->phba;
- eqe = q->qe[q->hba_index].eqe;
+ eqe = q->qe[q->host_index].eqe;
/* If the next EQE is not valid then we are done */
if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
return NULL;
- /* If the host has not yet processed the next entry then we are done */
- idx = ((q->hba_index + 1) % q->entry_count);
- if (idx == q->host_index)
- return NULL;
-
- q->hba_index = idx;
- /* if the index wrapped around, toggle the valid bit */
- if (phba->sli4_hba.pc_sli4_params.eqav && !q->hba_index)
- q->qe_valid = (q->qe_valid) ? 0 : 1;
-
/*
* insert barrier for instruction interlock : data from the hardware
@@ -397,44 +385,25 @@ lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
}
/**
- * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
+ * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
+ * @phba: adapter with EQ
* @q: The Event Queue that the host has completed processing for.
+ * @count: Number of elements that have been consumed
* @arm: Indicates whether the host wants to arms this CQ.
*
- * This routine will mark all Event Queue Entries on @q, from the last
- * known completed entry to the last entry that was processed, as completed
- * by clearing the valid bit for each completion queue entry. Then it will
- * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
- * The internal host index in the @q will be updated by this routine to indicate
- * that the host has finished processing the entries. The @arm parameter
- * indicates that the queue should be rearmed when ringing the doorbell.
- *
- * This function will return the number of EQEs that were popped.
+ * This routine will notify the HBA, by ringing the doorbell, that count
+ * number of EQEs have been processed. The @arm parameter indicates whether
+ * the queue should be rearmed when ringing the doorbell.
**/
-uint32_t
-lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
+void
+lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
+ uint32_t count, bool arm)
{
- uint32_t released = 0;
- struct lpfc_hba *phba;
- struct lpfc_eqe *temp_eqe;
struct lpfc_register doorbell;
/* sanity check on queue memory */
- if (unlikely(!q))
- return 0;
- phba = q->phba;
-
- /* while there are valid entries */
- while (q->hba_index != q->host_index) {
- if (!phba->sli4_hba.pc_sli4_params.eqav) {
- temp_eqe = q->qe[q->host_index].eqe;
- bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
- }
- released++;
- q->host_index = ((q->host_index + 1) % q->entry_count);
- }
- if (unlikely(released == 0 && !arm))
- return 0;
+ if (unlikely(!q || (count == 0 && !arm)))
+ return;
/* ring doorbell for number popped */
doorbell.word0 = 0;
@@ -442,7 +411,7 @@ lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
}
- bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
+ bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
(q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
@@ -451,60 +420,112 @@ lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
/* PCI read to flush PCI pipeline on re-arming for INTx mode */
if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
readl(q->phba->sli4_hba.EQDBregaddr);
- return released;
}
/**
- * lpfc_sli4_if6_eq_release - Indicates the host has finished processing an EQ
+ * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
+ * @phba: adapter with EQ
* @q: The Event Queue that the host has completed processing for.
+ * @count: Number of elements that have been consumed
* @arm: Indicates whether the host wants to arms this CQ.
*
- * This routine will mark all Event Queue Entries on @q, from the last
- * known completed entry to the last entry that was processed, as completed
- * by clearing the valid bit for each completion queue entry. Then it will
- * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
- * The internal host index in the @q will be updated by this routine to indicate
- * that the host has finished processing the entries. The @arm parameter
- * indicates that the queue should be rearmed when ringing the doorbell.
- *
- * This function will return the number of EQEs that were popped.
+ * This routine will notify the HBA, by ringing the doorbell, that count
+ * number of EQEs have been processed. The @arm parameter indicates whether
+ * the queue should be rearmed when ringing the doorbell.
**/
-uint32_t
-lpfc_sli4_if6_eq_release(struct lpfc_queue *q, bool arm)
+void
+lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
+ uint32_t count, bool arm)
{
- uint32_t released = 0;
- struct lpfc_hba *phba;
- struct lpfc_eqe *temp_eqe;
struct lpfc_register doorbell;
/* sanity check on queue memory */
- if (unlikely(!q))
- return 0;
- phba = q->phba;
-
- /* while there are valid entries */
- while (q->hba_index != q->host_index) {
- if (!phba->sli4_hba.pc_sli4_params.eqav) {
- temp_eqe = q->qe[q->host_index].eqe;
- bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
- }
- released++;
- q->host_index = ((q->host_index + 1) % q->entry_count);
- }
- if (unlikely(released == 0 && !arm))
- return 0;
+ if (unlikely(!q || (count == 0 && !arm)))
+ return;
/* ring doorbell for number popped */
doorbell.word0 = 0;
if (arm)
bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
- bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, released);
+ bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
/* PCI read to flush PCI pipeline on re-arming for INTx mode */
if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
readl(q->phba->sli4_hba.EQDBregaddr);
- return released;
+}
+
+static void
+__lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
+ struct lpfc_eqe *eqe)
+{
+ if (!phba->sli4_hba.pc_sli4_params.eqav)
+ bf_set_le32(lpfc_eqe_valid, eqe, 0);
+
+ eq->host_index = ((eq->host_index + 1) % eq->entry_count);
+
+ /* if the index wrapped around, toggle the valid bit */
+ if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
+ eq->qe_valid = (eq->qe_valid) ? 0 : 1;
+}
+
+static void
+lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
+{
+ struct lpfc_eqe *eqe;
+ uint32_t count = 0;
+
+ /* walk all the EQ entries and drop on the floor */
+ eqe = lpfc_sli4_eq_get(eq);
+ while (eqe) {
+ __lpfc_sli4_consume_eqe(phba, eq, eqe);
+ count++;
+ eqe = lpfc_sli4_eq_get(eq);
+ }
+
+ /* Clear and re-arm the EQ */
+ phba->sli4_hba.sli4_write_eq_db(phba, eq, count, LPFC_QUEUE_REARM);
+}
+
+static int
+lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq)
+{
+ struct lpfc_eqe *eqe;
+ int count = 0, consumed = 0;
+
+ if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
+ goto rearm_and_exit;
+
+ eqe = lpfc_sli4_eq_get(eq);
+ while (eqe) {
+ lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
+ __lpfc_sli4_consume_eqe(phba, eq, eqe);
+
+ consumed++;
+ if (!(++count % eq->max_proc_limit))
+ break;
+
+ if (!(count % eq->notify_interval)) {
+ phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
+ LPFC_QUEUE_NOARM);
+ consumed = 0;
+ }
+
+ eqe = lpfc_sli4_eq_get(eq);
+ }
+ eq->EQ_processed += count;
+
+ /* Track the max number of EQEs processed in 1 intr */
+ if (count > eq->EQ_max_eqe)
+ eq->EQ_max_eqe = count;
+
+ eq->queue_claimed = 0;
+
+rearm_and_exit:
+ /* Always clear and re-arm the EQ */
+ phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, LPFC_QUEUE_REARM);
+
+ return count;
}
/**
@@ -519,28 +540,16 @@ lpfc_sli4_if6_eq_release(struct lpfc_queue *q, bool arm)
static struct lpfc_cqe *
lpfc_sli4_cq_get(struct lpfc_queue *q)
{
- struct lpfc_hba *phba;
struct lpfc_cqe *cqe;
- uint32_t idx;
/* sanity check on queue memory */
if (unlikely(!q))
return NULL;
- phba = q->phba;
- cqe = q->qe[q->hba_index].cqe;
+ cqe = q->qe[q->host_index].cqe;
/* If the next CQE is not valid then we are done */
if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
return NULL;
- /* If the host has not yet processed the next entry then we are done */
- idx = ((q->hba_index + 1) % q->entry_count);
- if (idx == q->host_index)
- return NULL;
-
- q->hba_index = idx;
- /* if the index wrapped around, toggle the valid bit */
- if (phba->sli4_hba.pc_sli4_params.cqav && !q->hba_index)
- q->qe_valid = (q->qe_valid) ? 0 : 1;
/*
* insert barrier for instruction interlock : data from the hardware
@@ -554,107 +563,81 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
return cqe;
}
+static void
+__lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
+ struct lpfc_cqe *cqe)
+{
+ if (!phba->sli4_hba.pc_sli4_params.cqav)
+ bf_set_le32(lpfc_cqe_valid, cqe, 0);
+
+ cq->host_index = ((cq->host_index + 1) % cq->entry_count);
+
+ /* if the index wrapped around, toggle the valid bit */
+ if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
+ cq->qe_valid = (cq->qe_valid) ? 0 : 1;
+}
+
/**
- * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
+ * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
+ * @phba: the adapter with the CQ
* @q: The Completion Queue that the host has completed processing for.
+ * @count: the number of elements that were consumed
* @arm: Indicates whether the host wants to arms this CQ.
*
- * This routine will mark all Completion queue entries on @q, from the last
- * known completed entry to the last entry that was processed, as completed
- * by clearing the valid bit for each completion queue entry. Then it will
- * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
- * The internal host index in the @q will be updated by this routine to indicate
- * that the host has finished processing the entries. The @arm parameter
- * indicates that the queue should be rearmed when ringing the doorbell.
- *
- * This function will return the number of CQEs that were released.
+ * This routine will notify the HBA, by ringing the doorbell, that the
+ * CQEs have been processed. The @arm parameter specifies whether the
+ * queue should be rearmed when ringing the doorbell.
**/
-uint32_t
-lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
+void
+lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
+ uint32_t count, bool arm)
{
- uint32_t released = 0;
- struct lpfc_hba *phba;
- struct lpfc_cqe *temp_qe;
struct lpfc_register doorbell;
/* sanity check on queue memory */
- if (unlikely(!q))
- return 0;
- phba = q->phba;
-
- /* while there are valid entries */
- while (q->hba_index != q->host_index) {
- if (!phba->sli4_hba.pc_sli4_params.cqav) {
- temp_qe = q->qe[q->host_index].cqe;
- bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
- }
- released++;
- q->host_index = ((q->host_index + 1) % q->entry_count);
- }
- if (unlikely(released == 0 && !arm))
- return 0;
+ if (unlikely(!q || (count == 0 && !arm)))
+ return;
/* ring doorbell for number popped */
doorbell.word0 = 0;
if (arm)
bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
- bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
+ bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
(q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
- return released;
}
/**
- * lpfc_sli4_if6_cq_release - Indicates the host has finished processing a CQ
+ * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
+ * @phba: the adapter with the CQ
* @q: The Completion Queue that the host has completed processing for.
+ * @count: the number of elements that were consumed
* @arm: Indicates whether the host wants to arms this CQ.
*
- * This routine will mark all Completion queue entries on @q, from the last
- * known completed entry to the last entry that was processed, as completed
- * by clearing the valid bit for each completion queue entry. Then it will
- * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
- * The internal host index in the @q will be updated by this routine to indicate
- * that the host has finished processing the entries. The @arm parameter
- * indicates that the queue should be rearmed when ringing the doorbell.
- *
- * This function will return the number of CQEs that were released.
+ * This routine will notify the HBA, by ringing the doorbell, that the
+ * CQEs have been processed. The @arm parameter specifies whether the
+ * queue should be rearmed when ringing the doorbell.
**/
-uint32_t
-lpfc_sli4_if6_cq_release(struct lpfc_queue *q, bool arm)
+void
+lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
+ uint32_t count, bool arm)
{
- uint32_t released = 0;
- struct lpfc_hba *phba;
- struct lpfc_cqe *temp_qe;
struct lpfc_register doorbell;
/* sanity check on queue memory */
- if (unlikely(!q))
- return 0;
- phba = q->phba;
-
- /* while there are valid entries */
- while (q->hba_index != q->host_index) {
- if (!phba->sli4_hba.pc_sli4_params.cqav) {
- temp_qe = q->qe[q->host_index].cqe;
- bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
- }
- released++;
- q->host_index = ((q->host_index + 1) % q->entry_count);
- }
- if (unlikely(released == 0 && !arm))
- return 0;
+ if (unlikely(!q || (count == 0 && !arm)))
+ return;
/* ring doorbell for number popped */
doorbell.word0 = 0;
if (arm)
bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
- bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, released);
+ bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
- return released;
}
/**
@@ -703,15 +686,15 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
hq->RQ_buf_posted++;
/* Ring The Header Receive Queue Doorbell */
- if (!(hq->host_index % hq->entry_repost)) {
+ if (!(hq->host_index % hq->notify_interval)) {
doorbell.word0 = 0;
if (hq->db_format == LPFC_DB_RING_FORMAT) {
bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
- hq->entry_repost);
+ hq->notify_interval);
bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
} else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
- hq->entry_repost);
+ hq->notify_interval);
bf_set(lpfc_rq_db_list_fm_index, &doorbell,
hq->host_index);
bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
@@ -1025,7 +1008,7 @@ lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
if (!ndlp->active_rrqs_xri_bitmap)
return 0;
if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
- return 1;
+ return 1;
else
return 0;
}
@@ -1133,14 +1116,14 @@ __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
struct lpfc_sglq *sglq = NULL;
struct lpfc_sglq *start_sglq = NULL;
- struct lpfc_scsi_buf *lpfc_cmd;
+ struct lpfc_io_buf *lpfc_cmd;
struct lpfc_nodelist *ndlp;
int found = 0;
lockdep_assert_held(&phba->hbalock);
if (piocbq->iocb_flag & LPFC_IO_FCP) {
- lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
+ lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
ndlp = lpfc_cmd->rdata->pnode;
} else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
!(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
@@ -1596,6 +1579,7 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
list_add_tail(&piocb->list, &pring->txcmplq);
piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
+ pring->txcmplq_cnt++;
if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
(piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
@@ -3008,6 +2992,7 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
/* remove from txcmpl queue list */
list_del_init(&cmd_iocb->list);
cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+ pring->txcmplq_cnt--;
return cmd_iocb;
}
}
@@ -3045,6 +3030,7 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
/* remove from txcmpl queue list */
list_del_init(&cmd_iocb->list);
cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+ pring->txcmplq_cnt--;
return cmd_iocb;
}
}
@@ -3981,8 +3967,8 @@ lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
/* Look on all the FCP Rings for the iotag */
if (phba->sli_rev >= LPFC_SLI_REV4) {
- for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
- pring = phba->sli4_hba.fcp_wq[i]->pring;
+ for (i = 0; i < phba->cfg_hdw_queue; i++) {
+ pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
lpfc_sli_abort_iocb_ring(phba, pring);
}
} else {
@@ -4006,12 +3992,13 @@ lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba)
struct lpfc_sli_ring *pring;
uint32_t i;
- if (phba->sli_rev < LPFC_SLI_REV4)
+ if ((phba->sli_rev < LPFC_SLI_REV4) ||
+ !(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
return;
/* Abort all IO on each NVME ring. */
- for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
- pring = phba->sli4_hba.nvme_wq[i]->pring;
+ for (i = 0; i < phba->cfg_hdw_queue; i++) {
+ pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
lpfc_sli_abort_wqe_ring(phba, pring);
}
}
@@ -4044,8 +4031,8 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
/* Look on all the FCP Rings for the iotag */
if (phba->sli_rev >= LPFC_SLI_REV4) {
- for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
- pring = phba->sli4_hba.fcp_wq[i]->pring;
+ for (i = 0; i < phba->cfg_hdw_queue; i++) {
+ pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
spin_lock_irq(&pring->ring_lock);
/* Retrieve everything on txq */
@@ -4110,7 +4097,8 @@ lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
uint32_t i;
struct lpfc_iocbq *piocb, *next_iocb;
- if (phba->sli_rev < LPFC_SLI_REV4)
+ if ((phba->sli_rev < LPFC_SLI_REV4) ||
+ !(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
return;
/* Hint to other driver operations that a flush is in progress. */
@@ -4122,8 +4110,8 @@ lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
* a local driver reason code. This is a flush so no
* abort exchange to FW.
*/
- for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
- pring = phba->sli4_hba.nvme_wq[i]->pring;
+ for (i = 0; i < phba->cfg_hdw_queue; i++) {
+ pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
spin_lock_irq(&pring->ring_lock);
list_for_each_entry_safe(piocb, next_iocb,
@@ -5564,41 +5552,35 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
{
int qidx;
struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
+ struct lpfc_sli4_hdw_queue *qp;
- sli4_hba->sli4_cq_release(sli4_hba->mbx_cq, LPFC_QUEUE_REARM);
- sli4_hba->sli4_cq_release(sli4_hba->els_cq, LPFC_QUEUE_REARM);
+ sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
+ sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
if (sli4_hba->nvmels_cq)
- sli4_hba->sli4_cq_release(sli4_hba->nvmels_cq,
- LPFC_QUEUE_REARM);
+ sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
+ LPFC_QUEUE_REARM);
- if (sli4_hba->fcp_cq)
- for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
- sli4_hba->sli4_cq_release(sli4_hba->fcp_cq[qidx],
- LPFC_QUEUE_REARM);
-
- if (sli4_hba->nvme_cq)
- for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
- sli4_hba->sli4_cq_release(sli4_hba->nvme_cq[qidx],
- LPFC_QUEUE_REARM);
-
- if (phba->cfg_fof)
- sli4_hba->sli4_cq_release(sli4_hba->oas_cq, LPFC_QUEUE_REARM);
+ qp = sli4_hba->hdwq;
+ if (sli4_hba->hdwq) {
+ for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+ sli4_hba->sli4_write_cq_db(phba, qp[qidx].fcp_cq, 0,
+ LPFC_QUEUE_REARM);
+ sli4_hba->sli4_write_cq_db(phba, qp[qidx].nvme_cq, 0,
+ LPFC_QUEUE_REARM);
+ }
- if (sli4_hba->hba_eq)
- for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
- sli4_hba->sli4_eq_release(sli4_hba->hba_eq[qidx],
- LPFC_QUEUE_REARM);
+ for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++)
+ sli4_hba->sli4_write_eq_db(phba, qp[qidx].hba_eq,
+ 0, LPFC_QUEUE_REARM);
+ }
if (phba->nvmet_support) {
for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
- sli4_hba->sli4_cq_release(
- sli4_hba->nvmet_cqset[qidx],
+ sli4_hba->sli4_write_cq_db(phba,
+ sli4_hba->nvmet_cqset[qidx], 0,
LPFC_QUEUE_REARM);
}
}
-
- if (phba->cfg_fof)
- sli4_hba->sli4_eq_release(sli4_hba->fof_eq, LPFC_QUEUE_REARM);
}
/**
@@ -6027,11 +6009,8 @@ lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
list_add_tail(&rsrc_blks->list, ext_blk_list);
rsrc_start = rsrc_id;
if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
- phba->sli4_hba.scsi_xri_start = rsrc_start +
+ phba->sli4_hba.io_xri_start = rsrc_start +
lpfc_sli4_get_iocb_cnt(phba);
- phba->sli4_hba.nvme_xri_start =
- phba->sli4_hba.scsi_xri_start +
- phba->sli4_hba.scsi_xri_max;
}
while (rsrc_id < (rsrc_start + rsrc_size)) {
@@ -7056,6 +7035,38 @@ lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
return total_cnt;
}
+/**
+ * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine walks the list of nvme buffers that have been allocated and
+ * repost them to the port by using SGL block post. This is needed after a
+ * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
+ * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
+ * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
+ *
+ * Returns: 0 = success, non-zero failure.
+ **/
+int
+lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
+{
+ LIST_HEAD(post_nblist);
+ int num_posted, rc = 0;
+
+ /* get all NVME buffers need to repost to a local list */
+ lpfc_io_buf_flush(phba, &post_nblist);
+
+ /* post the list of nvme buffer sgls to port if available */
+ if (!list_empty(&post_nblist)) {
+ num_posted = lpfc_sli4_post_io_sgl_list(
+ phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
+ /* failed to post any nvme buffer, return error */
+ if (num_posted == 0)
+ rc = -EIO;
+ }
+ return rc;
+}
+
void
lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
{
@@ -7144,7 +7155,7 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
int
lpfc_sli4_hba_setup(struct lpfc_hba *phba)
{
- int rc, i, cnt;
+ int rc, i, cnt, len;
LPFC_MBOXQ_t *mboxq;
struct lpfc_mqe *mqe;
uint8_t *vpd;
@@ -7517,24 +7528,26 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* We need 1 iocbq for every SGL, for IO processing */
cnt += phba->sli4_hba.nvmet_xri_cnt;
} else {
- /* update host scsi xri-sgl sizes and mappings */
- rc = lpfc_sli4_scsi_sgl_update(phba);
+ /* update host common xri-sgl sizes and mappings */
+ rc = lpfc_sli4_io_sgl_update(phba);
if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
- "6309 Failed to update scsi-sgl size "
+ "6082 Failed to update nvme-sgl size "
"and mapping: %d\n", rc);
goto out_destroy_queue;
}
- /* update host nvme xri-sgl sizes and mappings */
- rc = lpfc_sli4_nvme_sgl_update(phba);
+ /* register the allocated common sgl pool to the port */
+ rc = lpfc_sli4_repost_io_sgl_list(phba);
if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
- "6082 Failed to update nvme-sgl size "
- "and mapping: %d\n", rc);
+ "6116 Error %d during nvme sgl post "
+ "operation\n", rc);
+ /* Some NVME buffers were moved to abort nvme list */
+ /* A pci function reset will repost them */
+ rc = -ENODEV;
goto out_destroy_queue;
}
-
cnt = phba->cfg_iocb_cnt * 1024;
}
@@ -7571,36 +7584,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
}
}
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
- /* register the allocated scsi sgl pool to the port */
- rc = lpfc_sli4_repost_scsi_sgl_list(phba);
- if (unlikely(rc)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
- "0383 Error %d during scsi sgl post "
- "operation\n", rc);
- /* Some Scsi buffers were moved to abort scsi list */
- /* A pci function reset will repost them */
- rc = -ENODEV;
- goto out_destroy_queue;
- }
- }
-
- if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
- (phba->nvmet_support == 0)) {
-
- /* register the allocated nvme sgl pool to the port */
- rc = lpfc_repost_nvme_sgl_list(phba);
- if (unlikely(rc)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
- "6116 Error %d during nvme sgl post "
- "operation\n", rc);
- /* Some NVME buffers were moved to abort nvme list */
- /* A pci function reset will repost them */
- rc = -ENODEV;
- goto out_destroy_queue;
- }
- }
-
/* Post the rpi header region to the device. */
rc = lpfc_sli4_post_all_rpi_hdrs(phba);
if (unlikely(rc)) {
@@ -7650,6 +7633,25 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
lpfc_sli_read_link_ste(phba);
}
+ /* Don't post more new bufs if repost already recovered
+ * the nvme sgls.
+ */
+ if (phba->nvmet_support == 0) {
+ if (phba->sli4_hba.io_xri_cnt == 0) {
+ len = lpfc_new_io_buf(
+ phba, phba->sli4_hba.io_xri_max);
+ if (len == 0) {
+ rc = -ENOMEM;
+ goto out_unset_queue;
+ }
+
+ if (phba->cfg_xri_rebalancing)
+ lpfc_create_multixri_pools(phba);
+ }
+ } else {
+ phba->cfg_xri_rebalancing = 0;
+ }
+
/* Arm the CQs and then EQs on device */
lpfc_sli4_arm_cqeq_intr(phba);
@@ -7678,6 +7680,11 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
phba->hb_outstanding = 0;
phba->last_completion_time = jiffies;
+ /* start eq_delay heartbeat */
+ if (phba->cfg_auto_imax)
+ queue_delayed_work(phba->wq, &phba->eq_delay_work,
+ msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
+
/* Start error attention (ERATT) polling timer */
mod_timer(&phba->eratt_poll,
jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
@@ -7729,18 +7736,21 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
"3104 Adapter failed to issue "
"DOWN_LINK mbox cmd, rc:x%x\n", rc);
- goto out_unset_queue;
+ goto out_io_buff_free;
}
} else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
/* don't perform init_link on SLI4 FC port loopback test */
if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
if (rc)
- goto out_unset_queue;
+ goto out_io_buff_free;
}
}
mempool_free(mboxq, phba->mbox_mem_pool);
return rc;
+out_io_buff_free:
+ /* Free allocated IO Buffers */
+ lpfc_io_free(phba);
out_unset_queue:
/* Unset all the queues set up in this routine when error out */
lpfc_sli4_queue_unset(phba);
@@ -7846,7 +7856,6 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
uint32_t eqidx;
struct lpfc_queue *fpeq = NULL;
- struct lpfc_eqe *eqe;
bool mbox_pending;
if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
@@ -7854,11 +7863,11 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
/* Find the eq associated with the mcq */
- if (sli4_hba->hba_eq)
- for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++)
- if (sli4_hba->hba_eq[eqidx]->queue_id ==
+ if (sli4_hba->hdwq)
+ for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++)
+ if (sli4_hba->hdwq[eqidx].hba_eq->queue_id ==
sli4_hba->mbx_cq->assoc_qid) {
- fpeq = sli4_hba->hba_eq[eqidx];
+ fpeq = sli4_hba->hdwq[eqidx].hba_eq;
break;
}
if (!fpeq)
@@ -7880,14 +7889,11 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
*/
if (mbox_pending)
- while ((eqe = lpfc_sli4_eq_get(fpeq))) {
- lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx);
- fpeq->EQ_processed++;
- }
-
- /* Always clear and re-arm the EQ */
-
- sli4_hba->sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
+ /* process and rearm the EQ */
+ lpfc_sli4_process_eq(phba, fpeq);
+ else
+ /* Always clear and re-arm the EQ */
+ sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
return mbox_pending;
@@ -8557,7 +8563,6 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
if (rc)
goto exit;
-
/*
* Initialize the bootstrap memory region to avoid stale data areas
* in the mailbox post. Then copy the caller's mailbox contents to
@@ -9476,7 +9481,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
if (phba->fcp_embed_io) {
- struct lpfc_scsi_buf *lpfc_cmd;
+ struct lpfc_io_buf *lpfc_cmd;
struct sli4_sge *sgl;
struct fcp_cmnd *fcp_cmnd;
uint32_t *ptr;
@@ -9484,7 +9489,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
/* 128 byte wqe support here */
lpfc_cmd = iocbq->context1;
- sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+ sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
fcp_cmnd = lpfc_cmd->fcp_cmnd;
/* Word 0-2 - FCP_CMND */
@@ -9540,7 +9545,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
if (phba->fcp_embed_io) {
- struct lpfc_scsi_buf *lpfc_cmd;
+ struct lpfc_io_buf *lpfc_cmd;
struct sli4_sge *sgl;
struct fcp_cmnd *fcp_cmnd;
uint32_t *ptr;
@@ -9548,7 +9553,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
/* 128 byte wqe support here */
lpfc_cmd = iocbq->context1;
- sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+ sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
fcp_cmnd = lpfc_cmd->fcp_cmnd;
/* Word 0-2 - FCP_CMND */
@@ -9597,7 +9602,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
/* Note, word 10 is already initialized to 0 */
if (phba->fcp_embed_io) {
- struct lpfc_scsi_buf *lpfc_cmd;
+ struct lpfc_io_buf *lpfc_cmd;
struct sli4_sge *sgl;
struct fcp_cmnd *fcp_cmnd;
uint32_t *ptr;
@@ -9605,7 +9610,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
/* 128 byte wqe support here */
lpfc_cmd = iocbq->context1;
- sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+ sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
fcp_cmnd = lpfc_cmd->fcp_cmnd;
/* Word 0-2 - FCP_CMND */
@@ -9864,10 +9869,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
/* Get the WQ */
if ((piocb->iocb_flag & LPFC_IO_FCP) ||
(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
- if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS)))
- wq = phba->sli4_hba.fcp_wq[piocb->hba_wqidx];
- else
- wq = phba->sli4_hba.oas_wq;
+ wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq;
} else {
wq = phba->sli4_hba.els_wq;
}
@@ -9879,7 +9881,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
* The WQE can be either 64 or 128 bytes,
*/
- lockdep_assert_held(&phba->hbalock);
+ lockdep_assert_held(&pring->ring_lock);
if (piocb->sli4_xritag == NO_XRI) {
if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
@@ -10001,29 +10003,20 @@ lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
struct lpfc_sli_ring *
lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
{
+ struct lpfc_io_buf *lpfc_cmd;
+
if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
- if (!(phba->cfg_fof) ||
- (!(piocb->iocb_flag & LPFC_IO_FOF))) {
- if (unlikely(!phba->sli4_hba.fcp_wq))
- return NULL;
- /*
- * for abort iocb hba_wqidx should already
- * be setup based on what work queue we used.
- */
- if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
- piocb->hba_wqidx =
- lpfc_sli4_scmd_to_wqidx_distr(phba,
- piocb->context1);
- piocb->hba_wqidx = piocb->hba_wqidx %
- phba->cfg_fcp_io_channel;
- }
- return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring;
- } else {
- if (unlikely(!phba->sli4_hba.oas_wq))
- return NULL;
- piocb->hba_wqidx = 0;
- return phba->sli4_hba.oas_wq->pring;
+ if (unlikely(!phba->sli4_hba.hdwq))
+ return NULL;
+ /*
+ * for abort iocb hba_wqidx should already
+ * be setup based on what work queue we used.
+ */
+ if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
+ lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
+ piocb->hba_wqidx = lpfc_cmd->hdwq_no;
}
+ return phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq->pring;
} else {
if (unlikely(!phba->sli4_hba.els_wq))
return NULL;
@@ -10049,12 +10042,9 @@ int
lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_iocbq *piocb, uint32_t flag)
{
- struct lpfc_hba_eq_hdl *hba_eq_hdl;
struct lpfc_sli_ring *pring;
- struct lpfc_queue *fpeq;
- struct lpfc_eqe *eqe;
unsigned long iflags;
- int rc, idx;
+ int rc;
if (phba->sli_rev == LPFC_SLI_REV4) {
pring = lpfc_sli4_calc_ring(phba, piocb);
@@ -10064,34 +10054,6 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
spin_lock_irqsave(&pring->ring_lock, iflags);
rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
-
- if (lpfc_fcp_look_ahead && (piocb->iocb_flag & LPFC_IO_FCP)) {
- idx = piocb->hba_wqidx;
- hba_eq_hdl = &phba->sli4_hba.hba_eq_hdl[idx];
-
- if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) {
-
- /* Get associated EQ with this index */
- fpeq = phba->sli4_hba.hba_eq[idx];
-
- /* Turn off interrupts from this EQ */
- phba->sli4_hba.sli4_eq_clr_intr(fpeq);
-
- /*
- * Process all the events on FCP EQ
- */
- while ((eqe = lpfc_sli4_eq_get(fpeq))) {
- lpfc_sli4_hba_handle_eqe(phba,
- eqe, idx);
- fpeq->EQ_processed++;
- }
-
- /* Always clear and re-arm the EQ */
- phba->sli4_hba.sli4_eq_release(fpeq,
- LPFC_QUEUE_REARM);
- }
- atomic_inc(&hba_eq_hdl->hba_eq_in_use);
- }
} else {
/* For now, SLI2/3 will still use hbalock */
spin_lock_irqsave(&phba->hbalock, iflags);
@@ -10506,19 +10468,11 @@ lpfc_sli4_queue_init(struct lpfc_hba *phba)
INIT_LIST_HEAD(&psli->mboxq);
INIT_LIST_HEAD(&psli->mboxq_cmpl);
/* Initialize list headers for txq and txcmplq as double linked lists */
- for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
- pring = phba->sli4_hba.fcp_wq[i]->pring;
- pring->flag = 0;
- pring->ringno = LPFC_FCP_RING;
- INIT_LIST_HEAD(&pring->txq);
- INIT_LIST_HEAD(&pring->txcmplq);
- INIT_LIST_HEAD(&pring->iocb_continueq);
- spin_lock_init(&pring->ring_lock);
- }
- for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
- pring = phba->sli4_hba.nvme_wq[i]->pring;
+ for (i = 0; i < phba->cfg_hdw_queue; i++) {
+ pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
pring->flag = 0;
pring->ringno = LPFC_FCP_RING;
+ pring->txcmplq_cnt = 0;
INIT_LIST_HEAD(&pring->txq);
INIT_LIST_HEAD(&pring->txcmplq);
INIT_LIST_HEAD(&pring->iocb_continueq);
@@ -10527,25 +10481,27 @@ lpfc_sli4_queue_init(struct lpfc_hba *phba)
pring = phba->sli4_hba.els_wq->pring;
pring->flag = 0;
pring->ringno = LPFC_ELS_RING;
+ pring->txcmplq_cnt = 0;
INIT_LIST_HEAD(&pring->txq);
INIT_LIST_HEAD(&pring->txcmplq);
INIT_LIST_HEAD(&pring->iocb_continueq);
spin_lock_init(&pring->ring_lock);
- if (phba->cfg_nvme_io_channel) {
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ for (i = 0; i < phba->cfg_hdw_queue; i++) {
+ pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
+ pring->flag = 0;
+ pring->ringno = LPFC_FCP_RING;
+ pring->txcmplq_cnt = 0;
+ INIT_LIST_HEAD(&pring->txq);
+ INIT_LIST_HEAD(&pring->txcmplq);
+ INIT_LIST_HEAD(&pring->iocb_continueq);
+ spin_lock_init(&pring->ring_lock);
+ }
pring = phba->sli4_hba.nvmels_wq->pring;
pring->flag = 0;
pring->ringno = LPFC_ELS_RING;
- INIT_LIST_HEAD(&pring->txq);
- INIT_LIST_HEAD(&pring->txcmplq);
- INIT_LIST_HEAD(&pring->iocb_continueq);
- spin_lock_init(&pring->ring_lock);
- }
-
- if (phba->cfg_fof) {
- pring = phba->sli4_hba.oas_wq->pring;
- pring->flag = 0;
- pring->ringno = LPFC_FCP_RING;
+ pring->txcmplq_cnt = 0;
INIT_LIST_HEAD(&pring->txq);
INIT_LIST_HEAD(&pring->txcmplq);
INIT_LIST_HEAD(&pring->iocb_continueq);
@@ -11327,6 +11283,7 @@ lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *abtsiocbp;
union lpfc_wqe128 *abts_wqe;
int retval;
+ int idx = cmdiocb->hba_wqidx;
/*
* There are certain command types we don't want to abort. And we
@@ -11382,7 +11339,8 @@ lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
abtsiocbp->iocb_flag |= LPFC_IO_NVME;
abtsiocbp->vport = vport;
abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
- retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp);
+ retval = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[idx],
+ abtsiocbp);
if (retval) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
"6147 Failed abts issue_wqe with status x%x "
@@ -11457,7 +11415,7 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
uint16_t tgt_id, uint64_t lun_id,
lpfc_ctx_cmd ctx_cmd)
{
- struct lpfc_scsi_buf *lpfc_cmd;
+ struct lpfc_io_buf *lpfc_cmd;
int rc = 1;
if (iocbq->vport != vport)
@@ -11467,7 +11425,7 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
!(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
return rc;
- lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
+ lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
if (lpfc_cmd->pCmd == NULL)
return rc;
@@ -11694,14 +11652,14 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_scsi_buf *lpfc_cmd;
+ struct lpfc_io_buf *lpfc_cmd;
struct lpfc_iocbq *abtsiocbq;
struct lpfc_nodelist *ndlp;
struct lpfc_iocbq *iocbq;
IOCB_t *icmd;
int sum, i, ret_val;
unsigned long iflags;
- struct lpfc_sli_ring *pring_s4;
+ struct lpfc_sli_ring *pring_s4 = NULL;
spin_lock_irqsave(&phba->hbalock, iflags);
@@ -11719,17 +11677,46 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
cmd) != 0)
continue;
+ /* Guard against IO completion being called at same time */
+ lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
+ spin_lock(&lpfc_cmd->buf_lock);
+
+ if (!lpfc_cmd->pCmd) {
+ spin_unlock(&lpfc_cmd->buf_lock);
+ continue;
+ }
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ pring_s4 =
+ phba->sli4_hba.hdwq[iocbq->hba_wqidx].fcp_wq->pring;
+ if (!pring_s4) {
+ spin_unlock(&lpfc_cmd->buf_lock);
+ continue;
+ }
+ /* Note: both hbalock and ring_lock must be set here */
+ spin_lock(&pring_s4->ring_lock);
+ }
+
/*
* If the iocbq is already being aborted, don't take a second
* action, but do count it.
*/
- if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
+ if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
+ !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring_s4->ring_lock);
+ spin_unlock(&lpfc_cmd->buf_lock);
continue;
+ }
/* issue ABTS for this IOCB based on iotag */
abtsiocbq = __lpfc_sli_get_iocbq(phba);
- if (abtsiocbq == NULL)
+ if (!abtsiocbq) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring_s4->ring_lock);
+ spin_unlock(&lpfc_cmd->buf_lock);
continue;
+ }
icmd = &iocbq->iocb;
abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
@@ -11750,7 +11737,6 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
if (iocbq->iocb_flag & LPFC_IO_FOF)
abtsiocbq->iocb_flag |= LPFC_IO_FOF;
- lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
ndlp = lpfc_cmd->rdata->pnode;
if (lpfc_is_link_up(phba) &&
@@ -11769,11 +11755,6 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
if (phba->sli_rev == LPFC_SLI_REV4) {
- pring_s4 = lpfc_sli4_calc_ring(phba, abtsiocbq);
- if (!pring_s4)
- continue;
- /* Note: both hbalock and ring_lock must be set here */
- spin_lock(&pring_s4->ring_lock);
ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
abtsiocbq, 0);
spin_unlock(&pring_s4->ring_lock);
@@ -11782,6 +11763,7 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
abtsiocbq, 0);
}
+ spin_unlock(&lpfc_cmd->buf_lock);
if (ret_val == IOCB_ERROR)
__lpfc_sli_release_iocbq(phba, abtsiocbq);
@@ -11816,7 +11798,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
{
wait_queue_head_t *pdone_q;
unsigned long iflags;
- struct lpfc_scsi_buf *lpfc_cmd;
+ struct lpfc_io_buf *lpfc_cmd;
spin_lock_irqsave(&phba->hbalock, iflags);
if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
@@ -11845,7 +11827,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
/* Set the exchange busy flag for task management commands */
if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
!(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
- lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
+ lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
cur_iocbq);
lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
}
@@ -12919,35 +12901,6 @@ lpfc_sli_intr_handler(int irq, void *dev_id)
} /* lpfc_sli_intr_handler */
/**
- * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine is invoked by the worker thread to process all the pending
- * SLI4 FCP abort XRI events.
- **/
-void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
-{
- struct lpfc_cq_event *cq_event;
-
- /* First, declare the fcp xri abort event has been handled */
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
- spin_unlock_irq(&phba->hbalock);
- /* Now, handle all the fcp xri abort events */
- while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
- /* Get the first event from the head of the event queue */
- spin_lock_irq(&phba->hbalock);
- list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
- cq_event, struct lpfc_cq_event, list);
- spin_unlock_irq(&phba->hbalock);
- /* Notify aborted XRI for FCP work queue */
- lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
- /* Free the event processed back to the free pool */
- lpfc_sli4_cq_event_release(phba, cq_event);
- }
-}
-
-/**
* lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
* @phba: pointer to lpfc hba data structure.
*
@@ -13320,11 +13273,14 @@ out_no_mqe_complete:
* Return: true if work posted to worker thread, otherwise false.
**/
static bool
-lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
+lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
+ struct lpfc_cqe *cqe)
{
struct lpfc_mcqe mcqe;
bool workposted;
+ cq->CQ_mbox++;
+
/* Copy the mailbox MCQE and convert endian order as needed */
lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
@@ -13443,17 +13399,8 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
switch (cq->subtype) {
case LPFC_FCP:
- cq_event = lpfc_cq_event_setup(
- phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
- if (!cq_event)
- return false;
- spin_lock_irqsave(&phba->hbalock, iflags);
- list_add_tail(&cq_event->list,
- &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
- /* Set the fcp xri abort event flag */
- phba->hba_flag |= FCP_XRI_ABORT_EVENT;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- workposted = true;
+ lpfc_sli4_fcp_xri_aborted(phba, wcqe, cq->hdwq);
+ workposted = false;
break;
case LPFC_NVME_LS: /* NVME LS uses ELS resources */
case LPFC_ELS:
@@ -13461,6 +13408,7 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
if (!cq_event)
return false;
+ cq_event->hdwq = cq->hdwq;
spin_lock_irqsave(&phba->hbalock, iflags);
list_add_tail(&cq_event->list,
&phba->sli4_hba.sp_els_xri_aborted_work_queue);
@@ -13474,7 +13422,7 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
if (phba->nvmet_support)
lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
else
- lpfc_sli4_nvme_xri_aborted(phba, wcqe);
+ lpfc_sli4_nvme_xri_aborted(phba, wcqe, cq->hdwq);
workposted = false;
break;
@@ -13592,7 +13540,7 @@ out:
* lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
* @phba: Pointer to HBA context object.
* @cq: Pointer to the completion queue.
- * @wcqe: Pointer to a completion queue entry.
+ * @cqe: Pointer to a completion queue entry.
*
* This routine process a slow-path work-queue or receive queue completion queue
* entry.
@@ -13684,7 +13632,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
/* Save EQ associated with this CQ */
cq->assoc_qp = speq;
- if (!queue_work(phba->wq, &cq->spwork))
+ if (!queue_work_on(cq->chann, phba->wq, &cq->spwork))
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0390 Cannot schedule soft IRQ "
"for CQ eqcqid=%d, cqid=%d on CPU %d\n",
@@ -13692,60 +13640,129 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
}
/**
- * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
+ * __lpfc_sli4_process_cq - Process elements of a CQ
* @phba: Pointer to HBA context object.
+ * @cq: Pointer to CQ to be processed
+ * @handler: Routine to process each cqe
+ * @delay: Pointer to usdelay to set in case of rescheduling of the handler
*
- * This routine process a event queue entry from the slow-path event queue.
- * It will check the MajorCode and MinorCode to determine this is for a
- * completion event on a completion queue, if not, an error shall be logged
- * and just return. Otherwise, it will get to the corresponding completion
- * queue and process all the entries on that completion queue, rearm the
- * completion queue, and then return.
+ * This routine processes completion queue entries in a CQ. While a valid
+ * queue element is found, the handler is called. During processing checks
+ * are made for periodic doorbell writes to let the hardware know of
+ * element consumption.
*
+ * If the max limit on cqes to process is hit, or there are no more valid
+ * entries, the loop stops. If we processed a sufficient number of elements,
+ * meaning there is sufficient load, rather than rearming and generating
+ * another interrupt, a cq rescheduling delay will be set. A delay of 0
+ * indicates no rescheduling.
+ *
+ * Returns True if work scheduled, False otherwise.
**/
-static void
-lpfc_sli4_sp_process_cq(struct work_struct *work)
+static bool
+__lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
+ bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
+ struct lpfc_cqe *), unsigned long *delay)
{
- struct lpfc_queue *cq =
- container_of(work, struct lpfc_queue, spwork);
- struct lpfc_hba *phba = cq->phba;
struct lpfc_cqe *cqe;
bool workposted = false;
- int ccount = 0;
+ int count = 0, consumed = 0;
+ bool arm = true;
+
+ /* default - no reschedule */
+ *delay = 0;
+
+ if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
+ goto rearm_and_exit;
/* Process all the entries to the CQ */
+ cqe = lpfc_sli4_cq_get(cq);
+ while (cqe) {
+#if defined(CONFIG_SCSI_LPFC_DEBUG_FS) && defined(BUILD_NVME)
+ if (phba->ktime_on)
+ cq->isr_timestamp = ktime_get_ns();
+ else
+ cq->isr_timestamp = 0;
+#endif
+ workposted |= handler(phba, cq, cqe);
+ __lpfc_sli4_consume_cqe(phba, cq, cqe);
+
+ consumed++;
+ if (!(++count % cq->max_proc_limit))
+ break;
+
+ if (!(count % cq->notify_interval)) {
+ phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
+ LPFC_QUEUE_NOARM);
+ consumed = 0;
+ }
+
+ cqe = lpfc_sli4_cq_get(cq);
+ }
+ if (count >= phba->cfg_cq_poll_threshold) {
+ *delay = 1;
+ arm = false;
+ }
+
+ /* Track the max number of CQEs processed in 1 EQ */
+ if (count > cq->CQ_max_cqe)
+ cq->CQ_max_cqe = count;
+
+ cq->assoc_qp->EQ_cqe_cnt += count;
+
+ /* Catch the no cq entry condition */
+ if (unlikely(count == 0))
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "0369 No entry from completion queue "
+ "qid=%d\n", cq->queue_id);
+
+ cq->queue_claimed = 0;
+
+rearm_and_exit:
+ phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
+ arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
+
+ return workposted;
+}
+
+/**
+ * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
+ * @cq: pointer to CQ to process
+ *
+ * This routine calls the cq processing routine with a handler specific
+ * to the type of queue bound to it.
+ *
+ * The CQ routine returns two values: the first is the calling status,
+ * which indicates whether work was queued to the background discovery
+ * thread. If true, the routine should wakeup the discovery thread;
+ * the second is the delay parameter. If non-zero, rather than rearming
+ * the CQ and yet another interrupt, the CQ handler should be queued so
+ * that it is processed in a subsequent polling action. The value of
+ * the delay indicates when to reschedule it.
+ **/
+static void
+__lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
+{
+ struct lpfc_hba *phba = cq->phba;
+ unsigned long delay;
+ bool workposted = false;
+
+ /* Process and rearm the CQ */
switch (cq->type) {
case LPFC_MCQ:
- while ((cqe = lpfc_sli4_cq_get(cq))) {
- workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
- if (!(++ccount % cq->entry_repost))
- break;
- cq->CQ_mbox++;
- }
+ workposted |= __lpfc_sli4_process_cq(phba, cq,
+ lpfc_sli4_sp_handle_mcqe,
+ &delay);
break;
case LPFC_WCQ:
- while ((cqe = lpfc_sli4_cq_get(cq))) {
- if (cq->subtype == LPFC_FCP ||
- cq->subtype == LPFC_NVME) {
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->ktime_on)
- cq->isr_timestamp = ktime_get_ns();
- else
- cq->isr_timestamp = 0;
-#endif
- workposted |= lpfc_sli4_fp_handle_cqe(phba, cq,
- cqe);
- } else {
- workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
- cqe);
- }
- if (!(++ccount % cq->entry_repost))
- break;
- }
-
- /* Track the max number of CQEs processed in 1 EQ */
- if (ccount > cq->CQ_max_cqe)
- cq->CQ_max_cqe = ccount;
+ if (cq->subtype == LPFC_FCP || cq->subtype == LPFC_NVME)
+ workposted |= __lpfc_sli4_process_cq(phba, cq,
+ lpfc_sli4_fp_handle_cqe,
+ &delay);
+ else
+ workposted |= __lpfc_sli4_process_cq(phba, cq,
+ lpfc_sli4_sp_handle_cqe,
+ &delay);
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -13754,14 +13771,14 @@ lpfc_sli4_sp_process_cq(struct work_struct *work)
return;
}
- /* Catch the no cq entry condition, log an error */
- if (unlikely(ccount == 0))
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0371 No entry from the CQ: identifier "
- "(x%x), type (%d)\n", cq->queue_id, cq->type);
-
- /* In any case, flash and re-arm the RCQ */
- phba->sli4_hba.sli4_cq_release(cq, LPFC_QUEUE_REARM);
+ if (delay) {
+ if (!queue_delayed_work_on(cq->chann, phba->wq,
+ &cq->sched_spwork, delay))
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0394 Cannot schedule soft IRQ "
+ "for cqid=%d on CPU %d\n",
+ cq->queue_id, cq->chann);
+ }
/* wake up worker thread if there are works to be done */
if (workposted)
@@ -13769,6 +13786,36 @@ lpfc_sli4_sp_process_cq(struct work_struct *work)
}
/**
+ * lpfc_sli4_sp_process_cq - slow-path work handler when started by
+ * interrupt
+ * @work: pointer to work element
+ *
+ * translates from the work handler and calls the slow-path handler.
+ **/
+static void
+lpfc_sli4_sp_process_cq(struct work_struct *work)
+{
+ struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
+
+ __lpfc_sli4_sp_process_cq(cq);
+}
+
+/**
+ * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
+ * @work: pointer to work element
+ *
+ * translates from the work handler and calls the slow-path handler.
+ **/
+static void
+lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
+{
+ struct lpfc_queue *cq = container_of(to_delayed_work(work),
+ struct lpfc_queue, sched_spwork);
+
+ __lpfc_sli4_sp_process_cq(cq);
+}
+
+/**
* lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
* @phba: Pointer to HBA context object.
* @cq: Pointer to associated CQ
@@ -13999,13 +14046,16 @@ out:
/**
* lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
+ * @phba: adapter with cq
* @cq: Pointer to the completion queue.
* @eqe: Pointer to fast-path completion queue entry.
*
* This routine process a fast-path work queue completion entry from fast-path
* event queue for FCP command response completion.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
**/
-static int
+static bool
lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
struct lpfc_cqe *cqe)
{
@@ -14072,10 +14122,11 @@ lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
* completion queue, and then return.
**/
static void
-lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
- uint32_t qidx)
+lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
+ struct lpfc_eqe *eqe)
{
struct lpfc_queue *cq = NULL;
+ uint32_t qidx = eq->hdwq;
uint16_t cqid, id;
if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
@@ -14090,6 +14141,14 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
/* Get the reference to the corresponding CQ */
cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
+ /* Use the fast lookup method first */
+ if (cqid <= phba->sli4_hba.cq_max) {
+ cq = phba->sli4_hba.cq_lookup[cqid];
+ if (cq)
+ goto work_cq;
+ }
+
+ /* Next check for NVMET completion */
if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
@@ -14099,20 +14158,6 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
}
}
- if (phba->sli4_hba.nvme_cq_map &&
- (cqid == phba->sli4_hba.nvme_cq_map[qidx])) {
- /* Process NVME / NVMET command completion */
- cq = phba->sli4_hba.nvme_cq[qidx];
- goto process_cq;
- }
-
- if (phba->sli4_hba.fcp_cq_map &&
- (cqid == phba->sli4_hba.fcp_cq_map[qidx])) {
- /* Process FCP command completion */
- cq = phba->sli4_hba.fcp_cq[qidx];
- goto process_cq;
- }
-
if (phba->sli4_hba.nvmels_cq &&
(cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
/* Process NVME unsol rcv */
@@ -14121,7 +14166,8 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
/* Otherwise this is a Slow path event */
if (cq == NULL) {
- lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hba_eq[qidx]);
+ lpfc_sli4_sp_handle_eqe(phba, eqe,
+ phba->sli4_hba.hdwq[qidx].hba_eq);
return;
}
@@ -14134,10 +14180,8 @@ process_cq:
return;
}
- /* Save EQ associated with this CQ */
- cq->assoc_qp = phba->sli4_hba.hba_eq[qidx];
-
- if (!queue_work(phba->wq, &cq->irqwork))
+work_cq:
+ if (!queue_work_on(cq->chann, phba->wq, &cq->irqwork))
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0363 Cannot schedule soft IRQ "
"for CQ eqcqid=%d, cqid=%d on CPU %d\n",
@@ -14145,219 +14189,73 @@ process_cq:
}
/**
- * lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
- * @phba: Pointer to HBA context object.
- * @eqe: Pointer to fast-path event queue entry.
+ * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
+ * @cq: Pointer to CQ to be processed
*
- * This routine process a event queue entry from the fast-path event queue.
- * It will check the MajorCode and MinorCode to determine this is for a
- * completion event on a completion queue, if not, an error shall be logged
- * and just return. Otherwise, it will get to the corresponding completion
- * queue and process all the entries on the completion queue, rearm the
- * completion queue, and then return.
+ * This routine calls the cq processing routine with the handler for
+ * fast path CQEs.
+ *
+ * The CQ routine returns two values: the first is the calling status,
+ * which indicates whether work was queued to the background discovery
+ * thread. If true, the routine should wakeup the discovery thread;
+ * the second is the delay parameter. If non-zero, rather than rearming
+ * the CQ and yet another interrupt, the CQ handler should be queued so
+ * that it is processed in a subsequent polling action. The value of
+ * the delay indicates when to reschedule it.
**/
static void
-lpfc_sli4_hba_process_cq(struct work_struct *work)
+__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq)
{
- struct lpfc_queue *cq =
- container_of(work, struct lpfc_queue, irqwork);
struct lpfc_hba *phba = cq->phba;
- struct lpfc_cqe *cqe;
+ unsigned long delay;
bool workposted = false;
- int ccount = 0;
-
- /* Process all the entries to the CQ */
- while ((cqe = lpfc_sli4_cq_get(cq))) {
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->ktime_on)
- cq->isr_timestamp = ktime_get_ns();
- else
- cq->isr_timestamp = 0;
-#endif
- workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
- if (!(++ccount % cq->entry_repost))
- break;
- }
- /* Track the max number of CQEs processed in 1 EQ */
- if (ccount > cq->CQ_max_cqe)
- cq->CQ_max_cqe = ccount;
- cq->assoc_qp->EQ_cqe_cnt += ccount;
+ /* process and rearm the CQ */
+ workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
+ &delay);
- /* Catch the no cq entry condition */
- if (unlikely(ccount == 0))
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0369 No entry from fast-path completion "
- "queue fcpcqid=%d\n", cq->queue_id);
-
- /* In any case, flash and re-arm the CQ */
- phba->sli4_hba.sli4_cq_release(cq, LPFC_QUEUE_REARM);
+ if (delay) {
+ if (!queue_delayed_work_on(cq->chann, phba->wq,
+ &cq->sched_irqwork, delay))
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0367 Cannot schedule soft IRQ "
+ "for cqid=%d on CPU %d\n",
+ cq->queue_id, cq->chann);
+ }
/* wake up worker thread if there are works to be done */
if (workposted)
lpfc_worker_wake_up(phba);
}
-static void
-lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
-{
- struct lpfc_eqe *eqe;
-
- /* walk all the EQ entries and drop on the floor */
- while ((eqe = lpfc_sli4_eq_get(eq)))
- ;
-
- /* Clear and re-arm the EQ */
- phba->sli4_hba.sli4_eq_release(eq, LPFC_QUEUE_REARM);
-}
-
-
/**
- * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue
- * entry
- * @phba: Pointer to HBA context object.
- * @eqe: Pointer to fast-path event queue entry.
+ * lpfc_sli4_hba_process_cq - fast-path work handler when started by
+ * interrupt
+ * @work: pointer to work element
*
- * This routine process a event queue entry from the Flash Optimized Fabric
- * event queue. It will check the MajorCode and MinorCode to determine this
- * is for a completion event on a completion queue, if not, an error shall be
- * logged and just return. Otherwise, it will get to the corresponding
- * completion queue and process all the entries on the completion queue, rearm
- * the completion queue, and then return.
+ * translates from the work handler and calls the fast-path handler.
**/
static void
-lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
+lpfc_sli4_hba_process_cq(struct work_struct *work)
{
- struct lpfc_queue *cq;
- uint16_t cqid;
-
- if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "9147 Not a valid completion "
- "event: majorcode=x%x, minorcode=x%x\n",
- bf_get_le32(lpfc_eqe_major_code, eqe),
- bf_get_le32(lpfc_eqe_minor_code, eqe));
- return;
- }
+ struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
- /* Get the reference to the corresponding CQ */
- cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
-
- /* Next check for OAS */
- cq = phba->sli4_hba.oas_cq;
- if (unlikely(!cq)) {
- if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "9148 OAS completion queue "
- "does not exist\n");
- return;
- }
-
- if (unlikely(cqid != cq->queue_id)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "9149 Miss-matched fast-path compl "
- "queue id: eqcqid=%d, fcpcqid=%d\n",
- cqid, cq->queue_id);
- return;
- }
-
- /* Save EQ associated with this CQ */
- cq->assoc_qp = phba->sli4_hba.fof_eq;
-
- /* CQ work will be processed on CPU affinitized to this IRQ */
- if (!queue_work(phba->wq, &cq->irqwork))
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0367 Cannot schedule soft IRQ "
- "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
- cqid, cq->queue_id, smp_processor_id());
+ __lpfc_sli4_hba_process_cq(cq);
}
/**
- * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device
- * @irq: Interrupt number.
- * @dev_id: The device context pointer.
+ * lpfc_sli4_hba_process_cq - fast-path work handler when started by timer
+ * @work: pointer to work element
*
- * This function is directly called from the PCI layer as an interrupt
- * service routine when device with SLI-4 interface spec is enabled with
- * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric
- * IOCB ring event in the HBA. However, when the device is enabled with either
- * MSI or Pin-IRQ interrupt mode, this function is called as part of the
- * device-level interrupt handler. When the PCI slot is in error recovery
- * or the HBA is undergoing initialization, the interrupt handler will not
- * process the interrupt. The Flash Optimized Fabric ring event are handled in
- * the intrrupt context. This function is called without any lock held.
- * It gets the hbalock to access and update SLI data structures. Note that,
- * the EQ to CQ are one-to-one map such that the EQ index is
- * equal to that of CQ index.
- *
- * This function returns IRQ_HANDLED when interrupt is handled else it
- * returns IRQ_NONE.
+ * translates from the work handler and calls the fast-path handler.
**/
-irqreturn_t
-lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
+static void
+lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
{
- struct lpfc_hba *phba;
- struct lpfc_hba_eq_hdl *hba_eq_hdl;
- struct lpfc_queue *eq;
- struct lpfc_eqe *eqe;
- unsigned long iflag;
- int ecount = 0;
-
- /* Get the driver's phba structure from the dev_id */
- hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
- phba = hba_eq_hdl->phba;
+ struct lpfc_queue *cq = container_of(to_delayed_work(work),
+ struct lpfc_queue, sched_irqwork);
- if (unlikely(!phba))
- return IRQ_NONE;
-
- /* Get to the EQ struct associated with this vector */
- eq = phba->sli4_hba.fof_eq;
- if (unlikely(!eq))
- return IRQ_NONE;
-
- /* Check device state for handling interrupt */
- if (unlikely(lpfc_intr_state_check(phba))) {
- /* Check again for link_state with lock held */
- spin_lock_irqsave(&phba->hbalock, iflag);
- if (phba->link_state < LPFC_LINK_DOWN)
- /* Flush, clear interrupt, and rearm the EQ */
- lpfc_sli4_eq_flush(phba, eq);
- spin_unlock_irqrestore(&phba->hbalock, iflag);
- return IRQ_NONE;
- }
-
- /*
- * Process all the event on FCP fast-path EQ
- */
- while ((eqe = lpfc_sli4_eq_get(eq))) {
- lpfc_sli4_fof_handle_eqe(phba, eqe);
- if (!(++ecount % eq->entry_repost))
- break;
- eq->EQ_processed++;
- }
-
- /* Track the max number of EQEs processed in 1 intr */
- if (ecount > eq->EQ_max_eqe)
- eq->EQ_max_eqe = ecount;
-
-
- if (unlikely(ecount == 0)) {
- eq->EQ_no_entry++;
-
- if (phba->intr_type == MSIX)
- /* MSI-X treated interrupt served as no EQ share INT */
- lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
- "9145 MSI-X interrupt with no EQE\n");
- else {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "9146 ISR interrupt with no EQE\n");
- /* Non MSI-X treated on interrupt as EQ share INT */
- return IRQ_NONE;
- }
- }
- /* Always clear and re-arm the fast-path EQ */
- phba->sli4_hba.sli4_eq_release(eq, LPFC_QUEUE_REARM);
- return IRQ_HANDLED;
+ __lpfc_sli4_hba_process_cq(cq);
}
/**
@@ -14392,10 +14290,11 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
struct lpfc_hba *phba;
struct lpfc_hba_eq_hdl *hba_eq_hdl;
struct lpfc_queue *fpeq;
- struct lpfc_eqe *eqe;
unsigned long iflag;
int ecount = 0;
int hba_eqidx;
+ struct lpfc_eq_intr_info *eqi;
+ uint32_t icnt;
/* Get the driver's phba structure from the dev_id */
hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
@@ -14404,23 +14303,14 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
if (unlikely(!phba))
return IRQ_NONE;
- if (unlikely(!phba->sli4_hba.hba_eq))
+ if (unlikely(!phba->sli4_hba.hdwq))
return IRQ_NONE;
/* Get to the EQ struct associated with this vector */
- fpeq = phba->sli4_hba.hba_eq[hba_eqidx];
+ fpeq = phba->sli4_hba.hdwq[hba_eqidx].hba_eq;
if (unlikely(!fpeq))
return IRQ_NONE;
- if (lpfc_fcp_look_ahead) {
- if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use))
- phba->sli4_hba.sli4_eq_clr_intr(fpeq);
- else {
- atomic_inc(&hba_eq_hdl->hba_eq_in_use);
- return IRQ_NONE;
- }
- }
-
/* Check device state for handling interrupt */
if (unlikely(lpfc_intr_state_check(phba))) {
/* Check again for link_state with lock held */
@@ -14429,36 +14319,25 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
/* Flush, clear interrupt, and rearm the EQ */
lpfc_sli4_eq_flush(phba, fpeq);
spin_unlock_irqrestore(&phba->hbalock, iflag);
- if (lpfc_fcp_look_ahead)
- atomic_inc(&hba_eq_hdl->hba_eq_in_use);
return IRQ_NONE;
}
- /*
- * Process all the event on FCP fast-path EQ
- */
- while ((eqe = lpfc_sli4_eq_get(fpeq))) {
- lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
- if (!(++ecount % fpeq->entry_repost))
- break;
- fpeq->EQ_processed++;
- }
+ eqi = phba->sli4_hba.eq_info;
+ icnt = this_cpu_inc_return(eqi->icnt);
+ fpeq->last_cpu = smp_processor_id();
- /* Track the max number of EQEs processed in 1 intr */
- if (ecount > fpeq->EQ_max_eqe)
- fpeq->EQ_max_eqe = ecount;
+ if (icnt > LPFC_EQD_ISR_TRIGGER &&
+ phba->cfg_irq_chann == 1 &&
+ phba->cfg_auto_imax &&
+ fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
+ phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
+ lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
- /* Always clear and re-arm the fast-path EQ */
- phba->sli4_hba.sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
+ /* process and rearm the EQ */
+ ecount = lpfc_sli4_process_eq(phba, fpeq);
if (unlikely(ecount == 0)) {
fpeq->EQ_no_entry++;
-
- if (lpfc_fcp_look_ahead) {
- atomic_inc(&hba_eq_hdl->hba_eq_in_use);
- return IRQ_NONE;
- }
-
if (phba->intr_type == MSIX)
/* MSI-X treated interrupt served as no EQ share INT */
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
@@ -14468,9 +14347,6 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
return IRQ_NONE;
}
- if (lpfc_fcp_look_ahead)
- atomic_inc(&hba_eq_hdl->hba_eq_in_use);
-
return IRQ_HANDLED;
} /* lpfc_sli4_fp_intr_handler */
@@ -14508,20 +14384,13 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
/*
* Invoke fast-path host attention interrupt handling as appropriate.
*/
- for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) {
+ for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
&phba->sli4_hba.hba_eq_hdl[qidx]);
if (hba_irq_rc == IRQ_HANDLED)
hba_handled |= true;
}
- if (phba->cfg_fof) {
- hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
- &phba->sli4_hba.hba_eq_hdl[qidx]);
- if (hba_irq_rc == IRQ_HANDLED)
- hba_handled |= true;
- }
-
return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
} /* lpfc_sli4_intr_handler */
@@ -14553,6 +14422,9 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
kfree(queue->rqbp);
}
+ if (!list_empty(&queue->cpu_list))
+ list_del(&queue->cpu_list);
+
if (!list_empty(&queue->wq_list))
list_del(&queue->wq_list);
@@ -14601,6 +14473,7 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
INIT_LIST_HEAD(&queue->wqfull_list);
INIT_LIST_HEAD(&queue->page_list);
INIT_LIST_HEAD(&queue->child_list);
+ INIT_LIST_HEAD(&queue->cpu_list);
/* Set queue parameters now. If the system cannot provide memory
* resources, the free routine needs to know what was allocated.
@@ -14633,8 +14506,10 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
}
INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
+ INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
+ INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
- /* entry_repost will be set during q creation */
+ /* notify_interval will be set during q creation */
return queue;
out_fail:
@@ -14671,43 +14546,76 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
}
/**
- * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on FCP EQs
- * @phba: HBA structure that indicates port to create a queue on.
- * @startq: The starting FCP EQ to modify
+ * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
+ * @phba: HBA structure that EQs are on.
+ * @startq: The starting EQ index to modify
+ * @numq: The number of EQs (consecutive indexes) to modify
+ * @usdelay: amount of delay
*
- * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
- * The command allows up to LPFC_MAX_EQ_DELAY_EQID_CNT EQ ID's to be
- * updated in one mailbox command.
+ * This function revises the EQ delay on 1 or more EQs. The EQ delay
+ * is set either by writing to a register (if supported by the SLI Port)
+ * or by mailbox command. The mailbox command allows several EQs to be
+ * updated at once.
*
- * The @phba struct is used to send mailbox command to HBA. The @startq
- * is used to get the starting FCP EQ to change.
- * This function is asynchronous and will wait for the mailbox
- * command to finish before continuing.
+ * The @phba struct is used to send a mailbox command to HBA. The @startq
+ * is used to get the starting EQ index to change. The @numq value is
+ * used to specify how many consecutive EQ indexes, starting at EQ index,
+ * are to be changed. This function is asynchronous and will wait for any
+ * mailbox commands to finish before returning.
*
- * On success this function will return a zero. If unable to allocate enough
- * memory this function will return -ENOMEM. If the queue create mailbox command
- * fails this function will return -ENXIO.
+ * On success this function will return a zero. If unable to allocate
+ * enough memory this function will return -ENOMEM. If a mailbox command
+ * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
+ * have had their delay multipler changed.
**/
-int
+void
lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
- uint32_t numq, uint32_t imax)
+ uint32_t numq, uint32_t usdelay)
{
struct lpfc_mbx_modify_eq_delay *eq_delay;
LPFC_MBOXQ_t *mbox;
struct lpfc_queue *eq;
- int cnt, rc, length, status = 0;
+ int cnt = 0, rc, length;
uint32_t shdr_status, shdr_add_status;
- uint32_t result, val;
+ uint32_t dmult;
int qidx;
union lpfc_sli4_cfg_shdr *shdr;
- uint16_t dmult;
- if (startq >= phba->io_channel_irqs)
- return 0;
+ if (startq >= phba->cfg_irq_chann)
+ return;
+
+ if (usdelay > 0xFFFF) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
+ "6429 usdelay %d too large. Scaled down to "
+ "0xFFFF.\n", usdelay);
+ usdelay = 0xFFFF;
+ }
+
+ /* set values by EQ_DELAY register if supported */
+ if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
+ for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
+ eq = phba->sli4_hba.hdwq[qidx].hba_eq;
+ if (!eq)
+ continue;
+
+ lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
+
+ if (++cnt >= numq)
+ break;
+ }
+
+ return;
+ }
+
+ /* Otherwise, set values by mailbox cmd */
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox)
- return -ENOMEM;
+ if (!mbox) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_FCP | LOG_NVME,
+ "6428 Failed allocating mailbox cmd buffer."
+ " EQ delay was not set.\n");
+ return;
+ }
length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
sizeof(struct lpfc_sli4_cfg_mhdr));
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
@@ -14716,45 +14624,22 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
eq_delay = &mbox->u.mqe.un.eq_delay;
/* Calculate delay multiper from maximum interrupt per second */
- result = imax / phba->io_channel_irqs;
- if (result > LPFC_DMULT_CONST || result == 0)
- dmult = 0;
- else
- dmult = LPFC_DMULT_CONST/result - 1;
+ dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
+ if (dmult)
+ dmult--;
if (dmult > LPFC_DMULT_MAX)
dmult = LPFC_DMULT_MAX;
- cnt = 0;
- for (qidx = startq; qidx < phba->io_channel_irqs; qidx++) {
- eq = phba->sli4_hba.hba_eq[qidx];
+ for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
+ eq = phba->sli4_hba.hdwq[qidx].hba_eq;
if (!eq)
continue;
- eq->q_mode = imax;
+ eq->q_mode = usdelay;
eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
eq_delay->u.request.eq[cnt].phase = 0;
eq_delay->u.request.eq[cnt].delay_multi = dmult;
- cnt++;
-
- /* q_mode is only used for auto_imax */
- if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
- /* Use EQ Delay Register method for q_mode */
-
- /* Convert for EQ Delay register */
- val = phba->cfg_fcp_imax;
- if (val) {
- /* First, interrupts per sec per EQ */
- val = phba->cfg_fcp_imax /
- phba->io_channel_irqs;
-
- /* us delay between each interrupt */
- val = LPFC_SEC_TO_USEC / val;
- }
- eq->q_mode = val;
- } else {
- eq->q_mode = imax;
- }
- if (cnt >= numq)
+ if (++cnt >= numq)
break;
}
eq_delay->u.request.num_eq = cnt;
@@ -14772,10 +14657,9 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
"2512 MODIFY_EQ_DELAY mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
- status = -ENXIO;
}
mempool_free(mbox, phba->mbox_mem_pool);
- return status;
+ return;
}
/**
@@ -14900,8 +14784,8 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
if (eq->queue_id == 0xFFFF)
status = -ENXIO;
eq->host_index = 0;
- eq->hba_index = 0;
- eq->entry_repost = LPFC_EQ_REPOST;
+ eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
+ eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
mempool_free(mbox, phba->mbox_mem_pool);
return status;
@@ -15039,10 +14923,13 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
cq->subtype = subtype;
cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
cq->assoc_qid = eq->queue_id;
+ cq->assoc_qp = eq;
cq->host_index = 0;
- cq->hba_index = 0;
- cq->entry_repost = LPFC_CQ_REPOST;
+ cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
+ cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
+ if (cq->queue_id > phba->sli4_hba.cq_max)
+ phba->sli4_hba.cq_max = cq->queue_id;
out:
mempool_free(mbox, phba->mbox_mem_pool);
return status;
@@ -15052,7 +14939,7 @@ out:
* lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
* @phba: HBA structure that indicates port to create a queue on.
* @cqp: The queue structure array to use to create the completion queues.
- * @eqp: The event queue array to bind these completion queues to.
+ * @hdwq: The hardware queue array with the EQ to bind completion queues to.
*
* This function creates a set of completion queue, s to support MRQ
* as detailed in @cqp, on a port,
@@ -15072,7 +14959,8 @@ out:
**/
int
lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
- struct lpfc_queue **eqp, uint32_t type, uint32_t subtype)
+ struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
+ uint32_t subtype)
{
struct lpfc_queue *cq;
struct lpfc_queue *eq;
@@ -15087,7 +14975,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
/* sanity check on queue memory */
numcq = phba->cfg_nvmet_mrq;
- if (!cqp || !eqp || !numcq)
+ if (!cqp || !hdwq || !numcq)
return -ENODEV;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -15114,7 +15002,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
for (idx = 0; idx < numcq; idx++) {
cq = cqp[idx];
- eq = eqp[idx];
+ eq = hdwq[idx].hba_eq;
if (!cq || !eq) {
status = -ENOMEM;
goto out;
@@ -15247,9 +15135,11 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
cq->type = type;
cq->subtype = subtype;
cq->assoc_qid = eq->queue_id;
+ cq->assoc_qp = eq;
cq->host_index = 0;
- cq->hba_index = 0;
- cq->entry_repost = LPFC_CQ_REPOST;
+ cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
+ cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
+ cq->entry_count);
cq->chann = idx;
rc = 0;
@@ -15287,6 +15177,8 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
for (idx = 0; idx < numcq; idx++) {
cq = cqp[idx];
cq->queue_id = rc + idx;
+ if (cq->queue_id > phba->sli4_hba.cq_max)
+ phba->sli4_hba.cq_max = cq->queue_id;
}
out:
@@ -15499,7 +15391,6 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
mq->subtype = subtype;
mq->host_index = 0;
mq->hba_index = 0;
- mq->entry_repost = LPFC_MQ_REPOST;
/* link the mq onto the parent cq child list */
list_add_tail(&mq->list, &cq->child_list);
@@ -15765,7 +15656,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
wq->subtype = subtype;
wq->host_index = 0;
wq->hba_index = 0;
- wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
+ wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
/* link the wq onto the parent cq child list */
list_add_tail(&wq->list, &cq->child_list);
@@ -15959,7 +15850,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
hrq->subtype = subtype;
hrq->host_index = 0;
hrq->hba_index = 0;
- hrq->entry_repost = LPFC_RQ_REPOST;
+ hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
/* now create the data queue */
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
@@ -16052,7 +15943,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
drq->subtype = subtype;
drq->host_index = 0;
drq->hba_index = 0;
- drq->entry_repost = LPFC_RQ_REPOST;
+ drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
/* link the header and data RQs onto the parent cq child list */
list_add_tail(&hrq->list, &cq->child_list);
@@ -16210,7 +16101,7 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
hrq->subtype = subtype;
hrq->host_index = 0;
hrq->hba_index = 0;
- hrq->entry_repost = LPFC_RQ_REPOST;
+ hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
drq->db_format = LPFC_DB_RING_FORMAT;
drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
@@ -16219,7 +16110,7 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
drq->subtype = subtype;
drq->host_index = 0;
drq->hba_index = 0;
- drq->entry_repost = LPFC_RQ_REPOST;
+ drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
list_add_tail(&hrq->list, &cq->child_list);
list_add_tail(&drq->list, &cq->child_list);
@@ -16279,6 +16170,7 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
/* sanity check on queue memory */
if (!eq)
return -ENODEV;
+
mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
@@ -16828,22 +16720,21 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
}
/**
- * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
+ * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
* @phba: pointer to lpfc hba data structure.
- * @sblist: pointer to scsi buffer list.
+ * @nblist: pointer to nvme buffer list.
* @count: number of scsi buffers on the list.
*
* This routine is invoked to post a block of @count scsi sgl pages from a
- * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
+ * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
* No Lock is held.
*
**/
-int
-lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
- struct list_head *sblist,
- int count)
+static int
+lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
+ int count)
{
- struct lpfc_scsi_buf *psb;
+ struct lpfc_io_buf *lpfc_ncmd;
struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
struct sgl_page_pairs *sgl_pg_pairs;
void *viraddr;
@@ -16861,25 +16752,25 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
if (reqlen > SLI4_PAGE_SIZE) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
- "0217 Block sgl registration required DMA "
+ "6118 Block sgl registration required DMA "
"size (%d) great than a page\n", reqlen);
return -ENOMEM;
}
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0283 Failed to allocate mbox cmd memory\n");
+ "6119 Failed to allocate mbox cmd memory\n");
return -ENOMEM;
}
/* Allocate DMA memory and set up the non-embedded mailbox command */
alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
- LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
- LPFC_SLI4_MBX_NEMBED);
+ LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
+ reqlen, LPFC_SLI4_MBX_NEMBED);
if (alloclen < reqlen) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2561 Allocated DMA memory size (%d) is "
+ "6120 Allocated DMA memory size (%d) is "
"less than the requested DMA memory "
"size (%d)\n", alloclen, reqlen);
lpfc_sli4_mbox_cmd_free(phba, mbox);
@@ -16894,14 +16785,15 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
sgl_pg_pairs = &sgl->sgl_pg_pairs;
pg_pairs = 0;
- list_for_each_entry(psb, sblist, list) {
+ list_for_each_entry(lpfc_ncmd, nblist, list) {
/* Set up the sge entry */
sgl_pg_pairs->sgl_pg0_addr_lo =
- cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
+ cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
sgl_pg_pairs->sgl_pg0_addr_hi =
- cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
+ cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
- pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
+ pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
+ SGL_PAGE_SIZE;
else
pdma_phys_bpl1 = 0;
sgl_pg_pairs->sgl_pg1_addr_lo =
@@ -16910,7 +16802,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
/* Keep the first xritag on the list */
if (pg_pairs == 0)
- xritag_start = psb->cur_iocbq.sli4_xritag;
+ xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
sgl_pg_pairs++;
pg_pairs++;
}
@@ -16919,20 +16811,20 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
/* Perform endian conversion if necessary */
sgl->word0 = cpu_to_le32(sgl->word0);
- if (!phba->sli4_hba.intr_enable)
+ if (!phba->sli4_hba.intr_enable) {
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
- else {
+ } else {
mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
}
- shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
+ shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (rc != MBX_TIMEOUT)
lpfc_sli4_mbox_cmd_free(phba, mbox);
if (shdr_status || shdr_add_status || rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "2564 POST_SGL_BLOCK mailbox command failed "
+ "6125 POST_SGL_BLOCK mailbox command failed "
"status x%x add_status x%x mbx status x%x\n",
shdr_status, shdr_add_status, rc);
rc = -ENXIO;
@@ -16941,6 +16833,134 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
}
/**
+ * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
+ * @phba: pointer to lpfc hba data structure.
+ * @post_nblist: pointer to the nvme buffer list.
+ *
+ * This routine walks a list of nvme buffers that was passed in. It attempts
+ * to construct blocks of nvme buffer sgls which contains contiguous xris and
+ * uses the non-embedded SGL block post mailbox commands to post to the port.
+ * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
+ * embedded SGL post mailbox command for posting. The @post_nblist passed in
+ * must be local list, thus no lock is needed when manipulate the list.
+ *
+ * Returns: 0 = failure, non-zero number of successfully posted buffers.
+ **/
+int
+lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
+ struct list_head *post_nblist, int sb_count)
+{
+ struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
+ int status, sgl_size;
+ int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
+ dma_addr_t pdma_phys_sgl1;
+ int last_xritag = NO_XRI;
+ int cur_xritag;
+ LIST_HEAD(prep_nblist);
+ LIST_HEAD(blck_nblist);
+ LIST_HEAD(nvme_nblist);
+
+ /* sanity check */
+ if (sb_count <= 0)
+ return -EINVAL;
+
+ sgl_size = phba->cfg_sg_dma_buf_size;
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
+ list_del_init(&lpfc_ncmd->list);
+ block_cnt++;
+ if ((last_xritag != NO_XRI) &&
+ (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
+ /* a hole in xri block, form a sgl posting block */
+ list_splice_init(&prep_nblist, &blck_nblist);
+ post_cnt = block_cnt - 1;
+ /* prepare list for next posting block */
+ list_add_tail(&lpfc_ncmd->list, &prep_nblist);
+ block_cnt = 1;
+ } else {
+ /* prepare list for next posting block */
+ list_add_tail(&lpfc_ncmd->list, &prep_nblist);
+ /* enough sgls for non-embed sgl mbox command */
+ if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
+ list_splice_init(&prep_nblist, &blck_nblist);
+ post_cnt = block_cnt;
+ block_cnt = 0;
+ }
+ }
+ num_posting++;
+ last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
+
+ /* end of repost sgl list condition for NVME buffers */
+ if (num_posting == sb_count) {
+ if (post_cnt == 0) {
+ /* last sgl posting block */
+ list_splice_init(&prep_nblist, &blck_nblist);
+ post_cnt = block_cnt;
+ } else if (block_cnt == 1) {
+ /* last single sgl with non-contiguous xri */
+ if (sgl_size > SGL_PAGE_SIZE)
+ pdma_phys_sgl1 =
+ lpfc_ncmd->dma_phys_sgl +
+ SGL_PAGE_SIZE;
+ else
+ pdma_phys_sgl1 = 0;
+ cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
+ status = lpfc_sli4_post_sgl(
+ phba, lpfc_ncmd->dma_phys_sgl,
+ pdma_phys_sgl1, cur_xritag);
+ if (status) {
+ /* Post error. Buffer unavailable. */
+ lpfc_ncmd->flags |=
+ LPFC_SBUF_NOT_POSTED;
+ } else {
+ /* Post success. Bffer available. */
+ lpfc_ncmd->flags &=
+ ~LPFC_SBUF_NOT_POSTED;
+ lpfc_ncmd->status = IOSTAT_SUCCESS;
+ num_posted++;
+ }
+ /* success, put on NVME buffer sgl list */
+ list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
+ }
+ }
+
+ /* continue until a nembed page worth of sgls */
+ if (post_cnt == 0)
+ continue;
+
+ /* post block of NVME buffer list sgls */
+ status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
+ post_cnt);
+
+ /* don't reset xirtag due to hole in xri block */
+ if (block_cnt == 0)
+ last_xritag = NO_XRI;
+
+ /* reset NVME buffer post count for next round of posting */
+ post_cnt = 0;
+
+ /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
+ while (!list_empty(&blck_nblist)) {
+ list_remove_head(&blck_nblist, lpfc_ncmd,
+ struct lpfc_io_buf, list);
+ if (status) {
+ /* Post error. Mark buffer unavailable. */
+ lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
+ } else {
+ /* Post success, Mark buffer available. */
+ lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
+ lpfc_ncmd->status = IOSTAT_SUCCESS;
+ num_posted++;
+ }
+ list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
+ }
+ }
+ /* Push NVME buffers with sgl posted to the available list */
+ lpfc_io_buf_replenish(phba, &nvme_nblist);
+
+ return num_posted;
+}
+
+/**
* lpfc_fc_frame_check - Check that this frame is a valid frame to handle
* @phba: pointer to lpfc_hba struct that the frame was received on
* @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
@@ -19500,7 +19520,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
if (phba->link_flag & LS_MDS_LOOPBACK) {
/* MDS WQE are posted only to first WQ*/
- wq = phba->sli4_hba.fcp_wq[0];
+ wq = phba->sli4_hba.hdwq[0].fcp_wq;
if (unlikely(!wq))
return 0;
pring = wq->pring;
@@ -19708,7 +19728,7 @@ lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
* @pwqe: Pointer to command WQE.
**/
int
-lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
+lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
struct lpfc_iocbq *pwqe)
{
union lpfc_wqe128 *wqe = &pwqe->wqe;
@@ -19722,7 +19742,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
/* NVME_LS and NVME_LS ABTS requests. */
if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
pring = phba->sli4_hba.nvmels_wq->pring;
- spin_lock_irqsave(&pring->ring_lock, iflags);
+ lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
+ qp, wq_access);
sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
if (!sglq) {
spin_unlock_irqrestore(&pring->ring_lock, iflags);
@@ -19750,12 +19771,13 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
/* NVME_FCREQ and NVME_ABTS requests */
if (pwqe->iocb_flag & LPFC_IO_NVME) {
/* Get the IO distribution (hba_wqidx) for WQ assignment. */
- pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
+ wq = qp->nvme_wq;
+ pring = wq->pring;
- spin_lock_irqsave(&pring->ring_lock, iflags);
- wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
- bf_set(wqe_cqid, &wqe->generic.wqe_com,
- phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
+ bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map);
+
+ lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
+ qp, wq_access);
ret = lpfc_sli4_wq_put(wq, wqe);
if (ret) {
spin_unlock_irqrestore(&pring->ring_lock, iflags);
@@ -19769,9 +19791,9 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
/* NVMET requests */
if (pwqe->iocb_flag & LPFC_IO_NVMET) {
/* Get the IO distribution (hba_wqidx) for WQ assignment. */
- pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
+ wq = qp->nvme_wq;
+ pring = wq->pring;
- spin_lock_irqsave(&pring->ring_lock, iflags);
ctxp = pwqe->context2;
sglq = ctxp->ctxbuf->sglq;
if (pwqe->sli4_xritag == NO_XRI) {
@@ -19780,9 +19802,10 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
}
bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
pwqe->sli4_xritag);
- wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
- bf_set(wqe_cqid, &wqe->generic.wqe_com,
- phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
+ bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map);
+
+ lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
+ qp, wq_access);
ret = lpfc_sli4_wq_put(wq, wqe);
if (ret) {
spin_unlock_irqrestore(&pring->ring_lock, iflags);
@@ -19794,3 +19817,647 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
}
return WQE_ERROR;
}
+
+#ifdef LPFC_MXP_STAT
+/**
+ * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
+ * @phba: pointer to lpfc hba data structure.
+ * @hwqid: belong to which HWQ.
+ *
+ * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
+ * 15 seconds after a test case is running.
+ *
+ * The user should call lpfc_debugfs_multixripools_write before running a test
+ * case to clear stat_snapshot_taken. Then the user starts a test case. During
+ * test case is running, stat_snapshot_taken is incremented by 1 every time when
+ * this routine is called from heartbeat timer. When stat_snapshot_taken is
+ * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
+ **/
+void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
+{
+ struct lpfc_sli4_hdw_queue *qp;
+ struct lpfc_multixri_pool *multixri_pool;
+ struct lpfc_pvt_pool *pvt_pool;
+ struct lpfc_pbl_pool *pbl_pool;
+ u32 txcmplq_cnt;
+
+ qp = &phba->sli4_hba.hdwq[hwqid];
+ multixri_pool = qp->p_multixri_pool;
+ if (!multixri_pool)
+ return;
+
+ if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
+ pvt_pool = &qp->p_multixri_pool->pvt_pool;
+ pbl_pool = &qp->p_multixri_pool->pbl_pool;
+ txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
+ if (qp->nvme_wq)
+ txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
+
+ multixri_pool->stat_pbl_count = pbl_pool->count;
+ multixri_pool->stat_pvt_count = pvt_pool->count;
+ multixri_pool->stat_busy_count = txcmplq_cnt;
+ }
+
+ multixri_pool->stat_snapshot_taken++;
+}
+#endif
+
+/**
+ * lpfc_adjust_pvt_pool_count - Adjust private pool count
+ * @phba: pointer to lpfc hba data structure.
+ * @hwqid: belong to which HWQ.
+ *
+ * This routine moves some XRIs from private to public pool when private pool
+ * is not busy.
+ **/
+void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
+{
+ struct lpfc_multixri_pool *multixri_pool;
+ u32 io_req_count;
+ u32 prev_io_req_count;
+
+ multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
+ if (!multixri_pool)
+ return;
+ io_req_count = multixri_pool->io_req_count;
+ prev_io_req_count = multixri_pool->prev_io_req_count;
+
+ if (prev_io_req_count != io_req_count) {
+ /* Private pool is busy */
+ multixri_pool->prev_io_req_count = io_req_count;
+ } else {
+ /* Private pool is not busy.
+ * Move XRIs from private to public pool.
+ */
+ lpfc_move_xri_pvt_to_pbl(phba, hwqid);
+ }
+}
+
+/**
+ * lpfc_adjust_high_watermark - Adjust high watermark
+ * @phba: pointer to lpfc hba data structure.
+ * @hwqid: belong to which HWQ.
+ *
+ * This routine sets high watermark as number of outstanding XRIs,
+ * but make sure the new value is between xri_limit/2 and xri_limit.
+ **/
+void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
+{
+ u32 new_watermark;
+ u32 watermark_max;
+ u32 watermark_min;
+ u32 xri_limit;
+ u32 txcmplq_cnt;
+ u32 abts_io_bufs;
+ struct lpfc_multixri_pool *multixri_pool;
+ struct lpfc_sli4_hdw_queue *qp;
+
+ qp = &phba->sli4_hba.hdwq[hwqid];
+ multixri_pool = qp->p_multixri_pool;
+ if (!multixri_pool)
+ return;
+ xri_limit = multixri_pool->xri_limit;
+
+ watermark_max = xri_limit;
+ watermark_min = xri_limit / 2;
+
+ txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
+ abts_io_bufs = qp->abts_scsi_io_bufs;
+ if (qp->nvme_wq) {
+ txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
+ abts_io_bufs += qp->abts_nvme_io_bufs;
+ }
+
+ new_watermark = txcmplq_cnt + abts_io_bufs;
+ new_watermark = min(watermark_max, new_watermark);
+ new_watermark = max(watermark_min, new_watermark);
+ multixri_pool->pvt_pool.high_watermark = new_watermark;
+
+#ifdef LPFC_MXP_STAT
+ multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
+ new_watermark);
+#endif
+}
+
+/**
+ * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
+ * @phba: pointer to lpfc hba data structure.
+ * @hwqid: belong to which HWQ.
+ *
+ * This routine is called from hearbeat timer when pvt_pool is idle.
+ * All free XRIs are moved from private to public pool on hwqid with 2 steps.
+ * The first step moves (all - low_watermark) amount of XRIs.
+ * The second step moves the rest of XRIs.
+ **/
+void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
+{
+ struct lpfc_pbl_pool *pbl_pool;
+ struct lpfc_pvt_pool *pvt_pool;
+ struct lpfc_sli4_hdw_queue *qp;
+ struct lpfc_io_buf *lpfc_ncmd;
+ struct lpfc_io_buf *lpfc_ncmd_next;
+ unsigned long iflag;
+ struct list_head tmp_list;
+ u32 tmp_count;
+
+ qp = &phba->sli4_hba.hdwq[hwqid];
+ pbl_pool = &qp->p_multixri_pool->pbl_pool;
+ pvt_pool = &qp->p_multixri_pool->pvt_pool;
+ tmp_count = 0;
+
+ lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
+ lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
+
+ if (pvt_pool->count > pvt_pool->low_watermark) {
+ /* Step 1: move (all - low_watermark) from pvt_pool
+ * to pbl_pool
+ */
+
+ /* Move low watermark of bufs from pvt_pool to tmp_list */
+ INIT_LIST_HEAD(&tmp_list);
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+ &pvt_pool->list, list) {
+ list_move_tail(&lpfc_ncmd->list, &tmp_list);
+ tmp_count++;
+ if (tmp_count >= pvt_pool->low_watermark)
+ break;
+ }
+
+ /* Move all bufs from pvt_pool to pbl_pool */
+ list_splice_init(&pvt_pool->list, &pbl_pool->list);
+
+ /* Move all bufs from tmp_list to pvt_pool */
+ list_splice(&tmp_list, &pvt_pool->list);
+
+ pbl_pool->count += (pvt_pool->count - tmp_count);
+ pvt_pool->count = tmp_count;
+ } else {
+ /* Step 2: move the rest from pvt_pool to pbl_pool */
+ list_splice_init(&pvt_pool->list, &pbl_pool->list);
+ pbl_pool->count += pvt_pool->count;
+ pvt_pool->count = 0;
+ }
+
+ spin_unlock(&pvt_pool->lock);
+ spin_unlock_irqrestore(&pbl_pool->lock, iflag);
+}
+
+/**
+ * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
+ * @phba: pointer to lpfc hba data structure
+ * @pbl_pool: specified public free XRI pool
+ * @pvt_pool: specified private free XRI pool
+ * @count: number of XRIs to move
+ *
+ * This routine tries to move some free common bufs from the specified pbl_pool
+ * to the specified pvt_pool. It might move less than count XRIs if there's not
+ * enough in public pool.
+ *
+ * Return:
+ * true - if XRIs are successfully moved from the specified pbl_pool to the
+ * specified pvt_pool
+ * false - if the specified pbl_pool is empty or locked by someone else
+ **/
+static bool
+_lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
+ struct lpfc_pbl_pool *pbl_pool,
+ struct lpfc_pvt_pool *pvt_pool, u32 count)
+{
+ struct lpfc_io_buf *lpfc_ncmd;
+ struct lpfc_io_buf *lpfc_ncmd_next;
+ unsigned long iflag;
+ int ret;
+
+ ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
+ if (ret) {
+ if (pbl_pool->count) {
+ /* Move a batch of XRIs from public to private pool */
+ lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
+ list_for_each_entry_safe(lpfc_ncmd,
+ lpfc_ncmd_next,
+ &pbl_pool->list,
+ list) {
+ list_move_tail(&lpfc_ncmd->list,
+ &pvt_pool->list);
+ pvt_pool->count++;
+ pbl_pool->count--;
+ count--;
+ if (count == 0)
+ break;
+ }
+
+ spin_unlock(&pvt_pool->lock);
+ spin_unlock_irqrestore(&pbl_pool->lock, iflag);
+ return true;
+ }
+ spin_unlock_irqrestore(&pbl_pool->lock, iflag);
+ }
+
+ return false;
+}
+
+/**
+ * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
+ * @phba: pointer to lpfc hba data structure.
+ * @hwqid: belong to which HWQ.
+ * @count: number of XRIs to move
+ *
+ * This routine tries to find some free common bufs in one of public pools with
+ * Round Robin method. The search always starts from local hwqid, then the next
+ * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
+ * a batch of free common bufs are moved to private pool on hwqid.
+ * It might move less than count XRIs if there's not enough in public pool.
+ **/
+void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
+{
+ struct lpfc_multixri_pool *multixri_pool;
+ struct lpfc_multixri_pool *next_multixri_pool;
+ struct lpfc_pvt_pool *pvt_pool;
+ struct lpfc_pbl_pool *pbl_pool;
+ struct lpfc_sli4_hdw_queue *qp;
+ u32 next_hwqid;
+ u32 hwq_count;
+ int ret;
+
+ qp = &phba->sli4_hba.hdwq[hwqid];
+ multixri_pool = qp->p_multixri_pool;
+ pvt_pool = &multixri_pool->pvt_pool;
+ pbl_pool = &multixri_pool->pbl_pool;
+
+ /* Check if local pbl_pool is available */
+ ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
+ if (ret) {
+#ifdef LPFC_MXP_STAT
+ multixri_pool->local_pbl_hit_count++;
+#endif
+ return;
+ }
+
+ hwq_count = phba->cfg_hdw_queue;
+
+ /* Get the next hwqid which was found last time */
+ next_hwqid = multixri_pool->rrb_next_hwqid;
+
+ do {
+ /* Go to next hwq */
+ next_hwqid = (next_hwqid + 1) % hwq_count;
+
+ next_multixri_pool =
+ phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
+ pbl_pool = &next_multixri_pool->pbl_pool;
+
+ /* Check if the public free xri pool is available */
+ ret = _lpfc_move_xri_pbl_to_pvt(
+ phba, qp, pbl_pool, pvt_pool, count);
+
+ /* Exit while-loop if success or all hwqid are checked */
+ } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
+
+ /* Starting point for the next time */
+ multixri_pool->rrb_next_hwqid = next_hwqid;
+
+ if (!ret) {
+ /* stats: all public pools are empty*/
+ multixri_pool->pbl_empty_count++;
+ }
+
+#ifdef LPFC_MXP_STAT
+ if (ret) {
+ if (next_hwqid == hwqid)
+ multixri_pool->local_pbl_hit_count++;
+ else
+ multixri_pool->other_pbl_hit_count++;
+ }
+#endif
+}
+
+/**
+ * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
+ * @phba: pointer to lpfc hba data structure.
+ * @qp: belong to which HWQ.
+ *
+ * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
+ * low watermark.
+ **/
+void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
+{
+ struct lpfc_multixri_pool *multixri_pool;
+ struct lpfc_pvt_pool *pvt_pool;
+
+ multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
+ pvt_pool = &multixri_pool->pvt_pool;
+
+ if (pvt_pool->count < pvt_pool->low_watermark)
+ lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
+}
+
+/**
+ * lpfc_release_io_buf - Return one IO buf back to free pool
+ * @phba: pointer to lpfc hba data structure.
+ * @lpfc_ncmd: IO buf to be returned.
+ * @qp: belong to which HWQ.
+ *
+ * This routine returns one IO buf back to free pool. If this is an urgent IO,
+ * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
+ * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
+ * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to
+ * lpfc_io_buf_list_put.
+ **/
+void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
+ struct lpfc_sli4_hdw_queue *qp)
+{
+ unsigned long iflag;
+ struct lpfc_pbl_pool *pbl_pool;
+ struct lpfc_pvt_pool *pvt_pool;
+ struct lpfc_epd_pool *epd_pool;
+ u32 txcmplq_cnt;
+ u32 xri_owned;
+ u32 xri_limit;
+ u32 abts_io_bufs;
+
+ /* MUST zero fields if buffer is reused by another protocol */
+ lpfc_ncmd->nvmeCmd = NULL;
+ lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
+ lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
+
+ if (phba->cfg_xri_rebalancing) {
+ if (lpfc_ncmd->expedite) {
+ /* Return to expedite pool */
+ epd_pool = &phba->epd_pool;
+ spin_lock_irqsave(&epd_pool->lock, iflag);
+ list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
+ epd_pool->count++;
+ spin_unlock_irqrestore(&epd_pool->lock, iflag);
+ return;
+ }
+
+ /* Avoid invalid access if an IO sneaks in and is being rejected
+ * just _after_ xri pools are destroyed in lpfc_offline.
+ * Nothing much can be done at this point.
+ */
+ if (!qp->p_multixri_pool)
+ return;
+
+ pbl_pool = &qp->p_multixri_pool->pbl_pool;
+ pvt_pool = &qp->p_multixri_pool->pvt_pool;
+
+ txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
+ abts_io_bufs = qp->abts_scsi_io_bufs;
+ if (qp->nvme_wq) {
+ txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
+ abts_io_bufs += qp->abts_nvme_io_bufs;
+ }
+
+ xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
+ xri_limit = qp->p_multixri_pool->xri_limit;
+
+#ifdef LPFC_MXP_STAT
+ if (xri_owned <= xri_limit)
+ qp->p_multixri_pool->below_limit_count++;
+ else
+ qp->p_multixri_pool->above_limit_count++;
+#endif
+
+ /* XRI goes to either public or private free xri pool
+ * based on watermark and xri_limit
+ */
+ if ((pvt_pool->count < pvt_pool->low_watermark) ||
+ (xri_owned < xri_limit &&
+ pvt_pool->count < pvt_pool->high_watermark)) {
+ lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
+ qp, free_pvt_pool);
+ list_add_tail(&lpfc_ncmd->list,
+ &pvt_pool->list);
+ pvt_pool->count++;
+ spin_unlock_irqrestore(&pvt_pool->lock, iflag);
+ } else {
+ lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
+ qp, free_pub_pool);
+ list_add_tail(&lpfc_ncmd->list,
+ &pbl_pool->list);
+ pbl_pool->count++;
+ spin_unlock_irqrestore(&pbl_pool->lock, iflag);
+ }
+ } else {
+ lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
+ qp, free_xri);
+ list_add_tail(&lpfc_ncmd->list,
+ &qp->lpfc_io_buf_list_put);
+ qp->put_io_bufs++;
+ spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
+ iflag);
+ }
+}
+
+/**
+ * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
+ * @phba: pointer to lpfc hba data structure.
+ * @pvt_pool: pointer to private pool data structure.
+ * @ndlp: pointer to lpfc nodelist data structure.
+ *
+ * This routine tries to get one free IO buf from private pool.
+ *
+ * Return:
+ * pointer to one free IO buf - if private pool is not empty
+ * NULL - if private pool is empty
+ **/
+static struct lpfc_io_buf *
+lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
+ struct lpfc_sli4_hdw_queue *qp,
+ struct lpfc_pvt_pool *pvt_pool,
+ struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_io_buf *lpfc_ncmd;
+ struct lpfc_io_buf *lpfc_ncmd_next;
+ unsigned long iflag;
+
+ lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+ &pvt_pool->list, list) {
+ if (lpfc_test_rrq_active(
+ phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
+ continue;
+ list_del(&lpfc_ncmd->list);
+ pvt_pool->count--;
+ spin_unlock_irqrestore(&pvt_pool->lock, iflag);
+ return lpfc_ncmd;
+ }
+ spin_unlock_irqrestore(&pvt_pool->lock, iflag);
+
+ return NULL;
+}
+
+/**
+ * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine tries to get one free IO buf from expedite pool.
+ *
+ * Return:
+ * pointer to one free IO buf - if expedite pool is not empty
+ * NULL - if expedite pool is empty
+ **/
+static struct lpfc_io_buf *
+lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
+{
+ struct lpfc_io_buf *lpfc_ncmd;
+ struct lpfc_io_buf *lpfc_ncmd_next;
+ unsigned long iflag;
+ struct lpfc_epd_pool *epd_pool;
+
+ epd_pool = &phba->epd_pool;
+ lpfc_ncmd = NULL;
+
+ spin_lock_irqsave(&epd_pool->lock, iflag);
+ if (epd_pool->count > 0) {
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+ &epd_pool->list, list) {
+ list_del(&lpfc_ncmd->list);
+ epd_pool->count--;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&epd_pool->lock, iflag);
+
+ return lpfc_ncmd;
+}
+
+/**
+ * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
+ * @phba: pointer to lpfc hba data structure.
+ * @ndlp: pointer to lpfc nodelist data structure.
+ * @hwqid: belong to which HWQ
+ * @expedite: 1 means this request is urgent.
+ *
+ * This routine will do the following actions and then return a pointer to
+ * one free IO buf.
+ *
+ * 1. If private free xri count is empty, move some XRIs from public to
+ * private pool.
+ * 2. Get one XRI from private free xri pool.
+ * 3. If we fail to get one from pvt_pool and this is an expedite request,
+ * get one free xri from expedite pool.
+ *
+ * Note: ndlp is only used on SCSI side for RRQ testing.
+ * The caller should pass NULL for ndlp on NVME side.
+ *
+ * Return:
+ * pointer to one free IO buf - if private pool is not empty
+ * NULL - if private pool is empty
+ **/
+static struct lpfc_io_buf *
+lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
+ struct lpfc_nodelist *ndlp,
+ int hwqid, int expedite)
+{
+ struct lpfc_sli4_hdw_queue *qp;
+ struct lpfc_multixri_pool *multixri_pool;
+ struct lpfc_pvt_pool *pvt_pool;
+ struct lpfc_io_buf *lpfc_ncmd;
+
+ qp = &phba->sli4_hba.hdwq[hwqid];
+ lpfc_ncmd = NULL;
+ multixri_pool = qp->p_multixri_pool;
+ pvt_pool = &multixri_pool->pvt_pool;
+ multixri_pool->io_req_count++;
+
+ /* If pvt_pool is empty, move some XRIs from public to private pool */
+ if (pvt_pool->count == 0)
+ lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
+
+ /* Get one XRI from private free xri pool */
+ lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
+
+ if (lpfc_ncmd) {
+ lpfc_ncmd->hdwq = qp;
+ lpfc_ncmd->hdwq_no = hwqid;
+ } else if (expedite) {
+ /* If we fail to get one from pvt_pool and this is an expedite
+ * request, get one free xri from expedite pool.
+ */
+ lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
+ }
+
+ return lpfc_ncmd;
+}
+
+static inline struct lpfc_io_buf *
+lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
+{
+ struct lpfc_sli4_hdw_queue *qp;
+ struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
+
+ qp = &phba->sli4_hba.hdwq[idx];
+ list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
+ &qp->lpfc_io_buf_list_get, list) {
+ if (lpfc_test_rrq_active(phba, ndlp,
+ lpfc_cmd->cur_iocbq.sli4_lxritag))
+ continue;
+
+ if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
+ continue;
+
+ list_del_init(&lpfc_cmd->list);
+ qp->get_io_bufs--;
+ lpfc_cmd->hdwq = qp;
+ lpfc_cmd->hdwq_no = idx;
+ return lpfc_cmd;
+ }
+ return NULL;
+}
+
+/**
+ * lpfc_get_io_buf - Get one IO buffer from free pool
+ * @phba: The HBA for which this call is being executed.
+ * @ndlp: pointer to lpfc nodelist data structure.
+ * @hwqid: belong to which HWQ
+ * @expedite: 1 means this request is urgent.
+ *
+ * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
+ * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
+ * a IO buffer from head of @hdwq io_buf_list and returns to caller.
+ *
+ * Note: ndlp is only used on SCSI side for RRQ testing.
+ * The caller should pass NULL for ndlp on NVME side.
+ *
+ * Return codes:
+ * NULL - Error
+ * Pointer to lpfc_io_buf - Success
+ **/
+struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
+ struct lpfc_nodelist *ndlp,
+ u32 hwqid, int expedite)
+{
+ struct lpfc_sli4_hdw_queue *qp;
+ unsigned long iflag;
+ struct lpfc_io_buf *lpfc_cmd;
+
+ qp = &phba->sli4_hba.hdwq[hwqid];
+ lpfc_cmd = NULL;
+
+ if (phba->cfg_xri_rebalancing)
+ lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
+ phba, ndlp, hwqid, expedite);
+ else {
+ lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
+ qp, alloc_xri_get);
+ if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
+ lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
+ if (!lpfc_cmd) {
+ lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
+ qp, alloc_xri_put);
+ list_splice(&qp->lpfc_io_buf_list_put,
+ &qp->lpfc_io_buf_list_get);
+ qp->get_io_bufs += qp->put_io_bufs;
+ INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
+ qp->put_io_bufs = 0;
+ spin_unlock(&qp->io_buf_list_put_lock);
+ if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
+ expedite)
+ lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
+ }
+ spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
+ }
+
+ return lpfc_cmd;
+}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 7abb395bb64a..7a1a761efdd6 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -20,6 +20,10 @@
* included with this package. *
*******************************************************************/
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS)
+#define CONFIG_SCSI_LPFC_DEBUG_FS
+#endif
+
/* forward declaration for LPFC_IOCB_t's use */
struct lpfc_hba;
struct lpfc_vport;
@@ -33,6 +37,7 @@ typedef enum _lpfc_ctx_cmd {
struct lpfc_cq_event {
struct list_head list;
+ uint16_t hdwq;
union {
struct lpfc_mcqe mcqe_cmpl;
struct lpfc_acqe_link acqe_link;
@@ -351,3 +356,85 @@ struct lpfc_sli {
#define LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO 300
/* Timeout for other flash-based outstanding mbox command (Seconds) */
#define LPFC_MBOX_TMO_FLASH_CMD 300
+
+struct lpfc_io_buf {
+ /* Common fields */
+ struct list_head list;
+ void *data;
+ dma_addr_t dma_handle;
+ dma_addr_t dma_phys_sgl;
+ struct sli4_sge *dma_sgl;
+ struct lpfc_iocbq cur_iocbq;
+ struct lpfc_sli4_hdw_queue *hdwq;
+ uint16_t hdwq_no;
+ uint16_t cpu;
+
+ struct lpfc_nodelist *ndlp;
+ uint32_t timeout;
+ uint16_t flags; /* TBD convert exch_busy to flags */
+#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */
+#define LPFC_SBUF_BUMP_QDEPTH 0x2 /* bumped queue depth counter */
+ /* External DIF device IO conversions */
+#define LPFC_SBUF_NORMAL_DIF 0x4 /* normal mode to insert/strip */
+#define LPFC_SBUF_PASS_DIF 0x8 /* insert/strip mode to passthru */
+#define LPFC_SBUF_NOT_POSTED 0x10 /* SGL failed post to FW. */
+ uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
+ uint16_t status; /* From IOCB Word 7- ulpStatus */
+ uint32_t result; /* From IOCB Word 4. */
+
+ uint32_t seg_cnt; /* Number of scatter-gather segments returned by
+ * dma_map_sg. The driver needs this for calls
+ * to dma_unmap_sg.
+ */
+ unsigned long start_time;
+ spinlock_t buf_lock; /* lock used in case of simultaneous abort */
+ bool expedite; /* this is an expedite io_buf */
+
+ union {
+ /* SCSI specific fields */
+ struct {
+ struct scsi_cmnd *pCmd;
+ struct lpfc_rport_data *rdata;
+ uint32_t prot_seg_cnt; /* seg_cnt's counterpart for
+ * protection data
+ */
+
+ /*
+ * data and dma_handle are the kernel virtual and bus
+ * address of the dma-able buffer containing the
+ * fcp_cmd, fcp_rsp and a scatter gather bde list that
+ * supports the sg_tablesize value.
+ */
+ struct fcp_cmnd *fcp_cmnd;
+ struct fcp_rsp *fcp_rsp;
+
+ wait_queue_head_t *waitq;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ /* Used to restore any changes to protection data for
+ * error injection
+ */
+ void *prot_data_segment;
+ uint32_t prot_data;
+ uint32_t prot_data_type;
+#define LPFC_INJERR_REFTAG 1
+#define LPFC_INJERR_APPTAG 2
+#define LPFC_INJERR_GUARD 3
+#endif
+ };
+
+ /* NVME specific fields */
+ struct {
+ struct nvmefc_fcp_req *nvmeCmd;
+ uint16_t qidx;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ uint64_t ts_cmd_start;
+ uint64_t ts_last_cmd;
+ uint64_t ts_cmd_wqput;
+ uint64_t ts_isr_cmpl;
+ uint64_t ts_data_nvme;
+#endif
+ };
+ };
+};
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 6b2d2350e2c6..40c85091c805 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -20,6 +20,10 @@
* included with this package. *
*******************************************************************/
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS)
+#define CONFIG_SCSI_LPFC_DEBUG_FS
+#endif
+
#define LPFC_ACTIVE_MBOX_WAIT_CNT 100
#define LPFC_XRI_EXCH_BUSY_WAIT_TMO 10000
#define LPFC_XRI_EXCH_BUSY_WAIT_T1 10
@@ -36,14 +40,12 @@
#define LPFC_NEMBED_MBOX_SGL_CNT 254
/* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */
-#define LPFC_HBA_IO_CHAN_MIN 0
-#define LPFC_HBA_IO_CHAN_MAX 32
-#define LPFC_FCP_IO_CHAN_DEF 4
-#define LPFC_NVME_IO_CHAN_DEF 0
-
-/* Number of channels used for Flash Optimized Fabric (FOF) operations */
+#define LPFC_HBA_HDWQ_MIN 0
+#define LPFC_HBA_HDWQ_MAX 128
+#define LPFC_HBA_HDWQ_DEF 0
-#define LPFC_FOF_IO_CHAN_NUM 1
+/* Common buffer size to accomidate SCSI and NVME IO buffers */
+#define LPFC_COMMON_IO_BUF_SZ 768
/*
* Provide the default FCF Record attributes used by the driver
@@ -152,28 +154,58 @@ struct lpfc_queue {
struct list_head child_list;
struct list_head page_list;
struct list_head sgl_list;
+ struct list_head cpu_list;
uint32_t entry_count; /* Number of entries to support on the queue */
uint32_t entry_size; /* Size of each queue entry. */
- uint32_t entry_repost; /* Count of entries before doorbell is rung */
-#define LPFC_EQ_REPOST 8
-#define LPFC_MQ_REPOST 8
-#define LPFC_CQ_REPOST 64
-#define LPFC_RQ_REPOST 64
-#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 /* For WQs */
+ uint32_t notify_interval; /* Queue Notification Interval
+ * For chip->host queues (EQ, CQ, RQ):
+ * specifies the interval (number of
+ * entries) where the doorbell is rung to
+ * notify the chip of entry consumption.
+ * For host->chip queues (WQ):
+ * specifies the interval (number of
+ * entries) where consumption CQE is
+ * requested to indicate WQ entries
+ * consumed by the chip.
+ * Not used on an MQ.
+ */
+#define LPFC_EQ_NOTIFY_INTRVL 16
+#define LPFC_CQ_NOTIFY_INTRVL 16
+#define LPFC_WQ_NOTIFY_INTRVL 16
+#define LPFC_RQ_NOTIFY_INTRVL 16
+ uint32_t max_proc_limit; /* Queue Processing Limit
+ * For chip->host queues (EQ, CQ):
+ * specifies the maximum number of
+ * entries to be consumed in one
+ * processing iteration sequence. Queue
+ * will be rearmed after each iteration.
+ * Not used on an MQ, RQ or WQ.
+ */
+#define LPFC_EQ_MAX_PROC_LIMIT 256
+#define LPFC_CQ_MIN_PROC_LIMIT 64
+#define LPFC_CQ_MAX_PROC_LIMIT LPFC_CQE_EXP_COUNT // 4096
+#define LPFC_CQ_DEF_MAX_PROC_LIMIT LPFC_CQE_DEF_COUNT // 1024
+#define LPFC_CQ_MIN_THRESHOLD_TO_POLL 64
+#define LPFC_CQ_MAX_THRESHOLD_TO_POLL LPFC_CQ_DEF_MAX_PROC_LIMIT
+#define LPFC_CQ_DEF_THRESHOLD_TO_POLL LPFC_CQ_DEF_MAX_PROC_LIMIT
+ uint32_t queue_claimed; /* indicates queue is being processed */
uint32_t queue_id; /* Queue ID assigned by the hardware */
uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */
uint32_t host_index; /* The host's index for putting or getting */
uint32_t hba_index; /* The last known hba index for get or put */
+ uint32_t q_mode;
struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */
struct lpfc_rqb *rqbp; /* ptr to RQ buffers */
- uint32_t q_mode;
uint16_t page_count; /* Number of pages allocated for this queue */
uint16_t page_size; /* size of page allocated for this queue */
#define LPFC_EXPANDED_PAGE_SIZE 16384
#define LPFC_DEFAULT_PAGE_SIZE 4096
- uint16_t chann; /* IO channel this queue is associated with */
+ uint16_t chann; /* Hardware Queue association WQ/CQ */
+ /* CPU affinity for EQ */
+#define LPFC_FIND_BY_EQ 0
+#define LPFC_FIND_BY_HDWQ 1
uint8_t db_format;
#define LPFC_DB_RING_FORMAT 0x01
#define LPFC_DB_LIST_FORMAT 0x02
@@ -212,10 +244,14 @@ struct lpfc_queue {
#define RQ_buf_posted q_cnt_3
#define RQ_rcv_buf q_cnt_4
- struct work_struct irqwork;
- struct work_struct spwork;
+ struct work_struct irqwork;
+ struct work_struct spwork;
+ struct delayed_work sched_irqwork;
+ struct delayed_work sched_spwork;
uint64_t isr_timestamp;
+ uint16_t hdwq;
+ uint16_t last_cpu; /* most recent cpu */
uint8_t qe_valid;
struct lpfc_queue *assoc_qp;
union sli4_qe qe[1]; /* array to index entries (must be last) */
@@ -428,11 +464,6 @@ struct lpfc_hba_eq_hdl {
uint32_t idx;
char handler_name[LPFC_SLI4_HANDLER_NAME_SZ];
struct lpfc_hba *phba;
- atomic_t hba_eq_in_use;
- struct cpumask *cpumask;
- /* CPU affinitsed to or 0xffffffff if multiple */
- uint32_t cpu;
-#define LPFC_MULTI_CPU_AFFINITY 0xffffffff
};
/*BB Credit recovery value*/
@@ -526,11 +557,165 @@ struct lpfc_vector_map_info {
uint16_t phys_id;
uint16_t core_id;
uint16_t irq;
- uint16_t channel_id;
+ uint16_t eq;
+ uint16_t hdwq;
+ uint16_t hyper;
};
#define LPFC_VECTOR_MAP_EMPTY 0xffff
+/* Multi-XRI pool */
+#define XRI_BATCH 8
+
+struct lpfc_pbl_pool {
+ struct list_head list;
+ u32 count;
+ spinlock_t lock; /* lock for pbl_pool*/
+};
+
+struct lpfc_pvt_pool {
+ u32 low_watermark;
+ u32 high_watermark;
+
+ struct list_head list;
+ u32 count;
+ spinlock_t lock; /* lock for pvt_pool */
+};
+
+struct lpfc_multixri_pool {
+ u32 xri_limit;
+
+ /* Starting point when searching a pbl_pool with round-robin method */
+ u32 rrb_next_hwqid;
+
+ /* Used by lpfc_adjust_pvt_pool_count.
+ * io_req_count is incremented by 1 during IO submission. The heartbeat
+ * handler uses these two variables to determine if pvt_pool is idle or
+ * busy.
+ */
+ u32 prev_io_req_count;
+ u32 io_req_count;
+
+ /* statistics */
+ u32 pbl_empty_count;
+#ifdef LPFC_MXP_STAT
+ u32 above_limit_count;
+ u32 below_limit_count;
+ u32 local_pbl_hit_count;
+ u32 other_pbl_hit_count;
+ u32 stat_max_hwm;
+
+#define LPFC_MXP_SNAPSHOT_TAKEN 3 /* snapshot is taken at 3rd heartbeats */
+ u32 stat_pbl_count;
+ u32 stat_pvt_count;
+ u32 stat_busy_count;
+ u32 stat_snapshot_taken;
+#endif
+
+ /* TODO: Separate pvt_pool into get and put list */
+ struct lpfc_pbl_pool pbl_pool; /* Public free XRI pool */
+ struct lpfc_pvt_pool pvt_pool; /* Private free XRI pool */
+};
+
+struct lpfc_fc4_ctrl_stat {
+ u32 input_requests;
+ u32 output_requests;
+ u32 control_requests;
+ u32 io_cmpls;
+};
+
+#ifdef LPFC_HDWQ_LOCK_STAT
+struct lpfc_lock_stat {
+ uint32_t alloc_xri_get;
+ uint32_t alloc_xri_put;
+ uint32_t free_xri;
+ uint32_t wq_access;
+ uint32_t alloc_pvt_pool;
+ uint32_t mv_from_pvt_pool;
+ uint32_t mv_to_pub_pool;
+ uint32_t mv_to_pvt_pool;
+ uint32_t free_pub_pool;
+ uint32_t free_pvt_pool;
+};
+#endif
+
+struct lpfc_eq_intr_info {
+ struct list_head list;
+ uint32_t icnt;
+};
+
/* SLI4 HBA data structure entries */
+struct lpfc_sli4_hdw_queue {
+ /* Pointers to the constructed SLI4 queues */
+ struct lpfc_queue *hba_eq; /* Event queues for HBA */
+ struct lpfc_queue *fcp_cq; /* Fast-path FCP compl queue */
+ struct lpfc_queue *nvme_cq; /* Fast-path NVME compl queue */
+ struct lpfc_queue *fcp_wq; /* Fast-path FCP work queue */
+ struct lpfc_queue *nvme_wq; /* Fast-path NVME work queue */
+ uint16_t fcp_cq_map;
+ uint16_t nvme_cq_map;
+
+ /* Keep track of IO buffers for this hardware queue */
+ spinlock_t io_buf_list_get_lock; /* Common buf alloc list lock */
+ struct list_head lpfc_io_buf_list_get;
+ spinlock_t io_buf_list_put_lock; /* Common buf free list lock */
+ struct list_head lpfc_io_buf_list_put;
+ spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
+ struct list_head lpfc_abts_scsi_buf_list;
+ spinlock_t abts_nvme_buf_list_lock; /* list of aborted NVME IOs */
+ struct list_head lpfc_abts_nvme_buf_list;
+ uint32_t total_io_bufs;
+ uint32_t get_io_bufs;
+ uint32_t put_io_bufs;
+ uint32_t empty_io_bufs;
+ uint32_t abts_scsi_io_bufs;
+ uint32_t abts_nvme_io_bufs;
+
+ /* Multi-XRI pool per HWQ */
+ struct lpfc_multixri_pool *p_multixri_pool;
+
+ /* FC-4 Stats counters */
+ struct lpfc_fc4_ctrl_stat nvme_cstat;
+ struct lpfc_fc4_ctrl_stat scsi_cstat;
+#ifdef LPFC_HDWQ_LOCK_STAT
+ struct lpfc_lock_stat lock_conflict;
+#endif
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+#define LPFC_CHECK_CPU_CNT 128
+ uint32_t cpucheck_rcv_io[LPFC_CHECK_CPU_CNT];
+ uint32_t cpucheck_xmt_io[LPFC_CHECK_CPU_CNT];
+ uint32_t cpucheck_cmpl_io[LPFC_CHECK_CPU_CNT];
+#endif
+};
+
+#ifdef LPFC_HDWQ_LOCK_STAT
+/* compile time trylock stats */
+#define lpfc_qp_spin_lock_irqsave(lock, flag, qp, lstat) \
+ { \
+ int only_once = 1; \
+ while (spin_trylock_irqsave(lock, flag) == 0) { \
+ if (only_once) { \
+ only_once = 0; \
+ qp->lock_conflict.lstat++; \
+ } \
+ } \
+ }
+#define lpfc_qp_spin_lock(lock, qp, lstat) \
+ { \
+ int only_once = 1; \
+ while (spin_trylock(lock) == 0) { \
+ if (only_once) { \
+ only_once = 0; \
+ qp->lock_conflict.lstat++; \
+ } \
+ } \
+ }
+#else
+#define lpfc_qp_spin_lock_irqsave(lock, flag, qp, lstat) \
+ spin_lock_irqsave(lock, flag)
+#define lpfc_qp_spin_lock(lock, qp, lstat) spin_lock(lock)
+#endif
+
struct lpfc_sli4_hba {
void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
* config space registers
@@ -599,21 +784,19 @@ struct lpfc_sli4_hba {
struct lpfc_hba_eq_hdl *hba_eq_hdl; /* HBA per-WQ handle */
void (*sli4_eq_clr_intr)(struct lpfc_queue *q);
- uint32_t (*sli4_eq_release)(struct lpfc_queue *q, bool arm);
- uint32_t (*sli4_cq_release)(struct lpfc_queue *q, bool arm);
+ void (*sli4_write_eq_db)(struct lpfc_hba *phba, struct lpfc_queue *eq,
+ uint32_t count, bool arm);
+ void (*sli4_write_cq_db)(struct lpfc_hba *phba, struct lpfc_queue *cq,
+ uint32_t count, bool arm);
/* Pointers to the constructed SLI4 queues */
- struct lpfc_queue **hba_eq; /* Event queues for HBA */
- struct lpfc_queue **fcp_cq; /* Fast-path FCP compl queue */
- struct lpfc_queue **nvme_cq; /* Fast-path NVME compl queue */
+ struct lpfc_sli4_hdw_queue *hdwq;
+ struct list_head lpfc_wq_list;
+
+ /* Pointers to the constructed SLI4 queues for NVMET */
struct lpfc_queue **nvmet_cqset; /* Fast-path NVMET CQ Set queues */
struct lpfc_queue **nvmet_mrq_hdr; /* Fast-path NVMET hdr MRQs */
struct lpfc_queue **nvmet_mrq_data; /* Fast-path NVMET data MRQs */
- struct lpfc_queue **fcp_wq; /* Fast-path FCP work queue */
- struct lpfc_queue **nvme_wq; /* Fast-path NVME work queue */
- uint16_t *fcp_cq_map;
- uint16_t *nvme_cq_map;
- struct list_head lpfc_wq_list;
struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
@@ -631,13 +814,7 @@ struct lpfc_sli4_hba {
uint32_t ulp0_mode; /* ULP0 protocol mode */
uint32_t ulp1_mode; /* ULP1 protocol mode */
- struct lpfc_queue *fof_eq; /* Flash Optimized Fabric Event queue */
-
/* Optimized Access Storage specific queues/structures */
-
- struct lpfc_queue *oas_cq; /* OAS completion queue */
- struct lpfc_queue *oas_wq; /* OAS Work queue */
- struct lpfc_sli_ring *oas_ring;
uint64_t oas_next_lun;
uint8_t oas_next_tgt_wwpn[8];
uint8_t oas_next_vpt_wwpn[8];
@@ -663,22 +840,22 @@ struct lpfc_sli4_hba {
uint16_t rpi_hdrs_in_use; /* must post rpi hdrs if set. */
uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
uint16_t next_rpi;
- uint16_t nvme_xri_max;
- uint16_t nvme_xri_cnt;
- uint16_t nvme_xri_start;
- uint16_t scsi_xri_max;
- uint16_t scsi_xri_cnt;
- uint16_t scsi_xri_start;
+ uint16_t io_xri_max;
+ uint16_t io_xri_cnt;
+ uint16_t io_xri_start;
uint16_t els_xri_cnt;
uint16_t nvmet_xri_cnt;
uint16_t nvmet_io_wait_cnt;
uint16_t nvmet_io_wait_total;
+ uint16_t cq_max;
+ struct lpfc_queue **cq_lookup;
struct list_head lpfc_els_sgl_list;
struct list_head lpfc_abts_els_sgl_list;
+ spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
+ struct list_head lpfc_abts_scsi_buf_list;
struct list_head lpfc_nvmet_sgl_list;
+ spinlock_t abts_nvmet_buf_list_lock; /* list of aborted NVMET IOs */
struct list_head lpfc_abts_nvmet_ctx_list;
- struct list_head lpfc_abts_scsi_buf_list;
- struct list_head lpfc_abts_nvme_buf_list;
struct list_head lpfc_nvmet_io_wait_list;
struct lpfc_nvmet_ctx_info *nvmet_ctx_info;
struct lpfc_sglq **lpfc_sglq_active_list;
@@ -707,17 +884,16 @@ struct lpfc_sli4_hba {
#define LPFC_SLI4_PPNAME_NON 0
#define LPFC_SLI4_PPNAME_GET 1
struct lpfc_iov iov;
- spinlock_t abts_nvme_buf_list_lock; /* list of aborted SCSI IOs */
- spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
spinlock_t sgl_list_lock; /* list of aborted els IOs */
spinlock_t nvmet_io_wait_lock; /* IOs waiting for ctx resources */
uint32_t physical_port;
/* CPU to vector mapping information */
struct lpfc_vector_map_info *cpu_map;
- uint16_t num_online_cpu;
+ uint16_t num_possible_cpu;
uint16_t num_present_cpu;
uint16_t curr_disp_cpu;
+ struct lpfc_eq_intr_info __percpu *eq_info;
uint32_t conf_trunk;
#define lpfc_conf_trunk_port0_WORD conf_trunk
#define lpfc_conf_trunk_port0_SHIFT 0
@@ -818,12 +994,12 @@ struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
uint32_t, uint32_t);
void lpfc_sli4_queue_free(struct lpfc_queue *);
int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
-int lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
- uint32_t numq, uint32_t imax);
+void lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
+ uint32_t numq, uint32_t usdelay);
int lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, uint32_t, uint32_t);
int lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
- struct lpfc_queue **eqp, uint32_t type,
+ struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
uint32_t subtype);
int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, uint32_t);
@@ -843,12 +1019,10 @@ int lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *,
int lpfc_sli4_queue_setup(struct lpfc_hba *);
void lpfc_sli4_queue_unset(struct lpfc_hba *);
int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
-int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
-int lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba);
+int lpfc_repost_io_sgl_list(struct lpfc_hba *phba);
uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
void lpfc_sli4_free_xri(struct lpfc_hba *, int);
int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
-int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
@@ -868,9 +1042,9 @@ int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
- struct sli4_wcqe_xri_aborted *);
+ struct sli4_wcqe_xri_aborted *, int);
void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
- struct sli4_wcqe_xri_aborted *axri);
+ struct sli4_wcqe_xri_aborted *axri, int idx);
void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri);
void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
@@ -884,11 +1058,15 @@ int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *);
int lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba);
int lpfc_sli4_init_vpi(struct lpfc_vport *);
inline void lpfc_sli4_eq_clr_intr(struct lpfc_queue *);
-uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool);
-uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool);
+void lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
+ uint32_t count, bool arm);
+void lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
+ uint32_t count, bool arm);
inline void lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q);
-uint32_t lpfc_sli4_if6_cq_release(struct lpfc_queue *q, bool arm);
-uint32_t lpfc_sli4_if6_eq_release(struct lpfc_queue *q, bool arm);
+void lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
+ uint32_t count, bool arm);
+void lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
+ uint32_t count, bool arm);
void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t);
int lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *, uint16_t);
int lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *, uint16_t);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 3f4398ffb567..43fd693cf042 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.0.0.10"
+#define LPFC_DRIVER_VERSION "12.2.0.0"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 102a011ff6d4..343bc71d4615 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -313,11 +313,11 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
goto error_out;
}
- /* NPIV is not supported if HBA has NVME enabled */
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ /* NPIV is not supported if HBA has NVME Target enabled */
+ if (phba->nvmet_support) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
"3189 Create VPORT failed: "
- "NPIV is not supported on NVME\n");
+ "NPIV is not supported on NVME Target\n");
rc = VPORT_INVAL;
goto error_out;
}
@@ -403,6 +403,9 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
/* Set the DFT_LUN_Q_DEPTH accordingly */
vport->cfg_lun_queue_depth = phba->pport->cfg_lun_queue_depth;
+ /* Only the physical port can support NVME for now */
+ vport->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
+
*(struct lpfc_vport **)fc_vport->dd_data = vport;
vport->fc_vport = fc_vport;
@@ -415,22 +418,6 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
vport->fdmi_port_mask = phba->pport->fdmi_port_mask;
}
- if ((phba->nvmet_support == 0) &&
- ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
- (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME))) {
- /* Create NVME binding with nvme_fc_transport. This
- * ensures the vport is initialized.
- */
- rc = lpfc_nvme_create_localport(vport);
- if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "6003 %s status x%x\n",
- "NVME registration failed, ",
- rc);
- goto error_out;
- }
- }
-
/*
* In SLI4, the vpi must be activated before it can be used
* by the port.
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 16536c41f0c5..6fd57f7f0b1e 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,8 +33,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.707.50.00-rc1"
-#define MEGASAS_RELDATE "December 18, 2018"
+#define MEGASAS_VERSION "07.707.51.00-rc1"
+#define MEGASAS_RELDATE "February 7, 2019"
/*
* Device IDs
@@ -790,6 +790,38 @@ struct MR_LD_TARGETID_LIST {
u8 targetId[MAX_LOGICAL_DRIVES_EXT];
};
+struct MR_HOST_DEVICE_LIST_ENTRY {
+ struct {
+ union {
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u8 reserved:7;
+ u8 is_sys_pd:1;
+#else
+ u8 is_sys_pd:1;
+ u8 reserved:7;
+#endif
+ } bits;
+ u8 byte;
+ } u;
+ } flags;
+ u8 scsi_type;
+ __le16 target_id;
+ u8 reserved[4];
+ __le64 sas_addr[2];
+} __packed;
+
+struct MR_HOST_DEVICE_LIST {
+ __le32 size;
+ __le32 count;
+ __le32 reserved[2];
+ struct MR_HOST_DEVICE_LIST_ENTRY host_device_list[1];
+} __packed;
+
+#define HOST_DEVICE_LIST_SZ (sizeof(struct MR_HOST_DEVICE_LIST) + \
+ (sizeof(struct MR_HOST_DEVICE_LIST_ENTRY) * \
+ (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT - 1)))
+
/*
* SAS controller properties
@@ -870,13 +902,17 @@ struct megasas_ctrl_prop {
u8 viewSpace;
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
- u16 reserved2:11;
+ u16 reserved3:9;
+ u16 enable_fw_dev_list:1;
+ u16 reserved2:1;
u16 enable_snap_dump:1;
u16 reserved1:4;
#else
u16 reserved1:4;
u16 enable_snap_dump:1;
- u16 reserved2:11;
+ u16 reserved2:1;
+ u16 enable_fw_dev_list:1;
+ u16 reserved3:9;
#endif
} on_off_properties2;
};
@@ -1685,7 +1721,8 @@ union megasas_sgl_frame {
typedef union _MFI_CAPABILITIES {
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
- u32 reserved:17;
+ u32 reserved:16;
+ u32 support_fw_exposed_dev_list:1;
u32 support_nvme_passthru:1;
u32 support_64bit_mode:1;
u32 support_pd_map_target_id:1;
@@ -1717,7 +1754,8 @@ typedef union _MFI_CAPABILITIES {
u32 support_pd_map_target_id:1;
u32 support_64bit_mode:1;
u32 support_nvme_passthru:1;
- u32 reserved:17;
+ u32 support_fw_exposed_dev_list:1;
+ u32 reserved:16;
#endif
} mfi_capabilities;
__le32 reg;
@@ -2202,6 +2240,9 @@ struct megasas_instance {
struct MR_LD_TARGETID_LIST *ld_targetid_list_buf;
dma_addr_t ld_targetid_list_buf_h;
+ struct MR_HOST_DEVICE_LIST *host_device_list_buf;
+ dma_addr_t host_device_list_buf_h;
+
struct MR_SNAPDUMP_PROPERTIES *snapdump_prop;
dma_addr_t snapdump_prop_h;
@@ -2337,6 +2378,7 @@ struct megasas_instance {
u8 task_abort_tmo;
u8 max_reset_tmo;
u8 snapdump_wait_time;
+ u8 enable_fw_dev_list;
};
struct MR_LD_VF_MAP {
u32 size;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index fcbff83c0097..59a6546fd602 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -815,7 +815,6 @@ megasas_fire_cmd_skinny(struct megasas_instance *instance,
&(regs)->inbound_high_queue_port);
writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
&(regs)->inbound_low_queue_port);
- mmiowb();
spin_unlock_irqrestore(&instance->hba_lock, flags);
}
@@ -3924,12 +3923,12 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
/*
* The cur_state should not last for more than max_wait secs
*/
- for (i = 0; i < max_wait; i++) {
+ for (i = 0; i < max_wait * 50; i++) {
curr_abs_state = instance->instancet->
read_fw_status_reg(instance);
if (abs_state == curr_abs_state) {
- msleep(1000);
+ msleep(20);
} else
break;
}
@@ -4188,6 +4187,7 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
if (megasas_create_frame_pool(instance)) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
megasas_free_cmds(instance);
+ return -ENOMEM;
}
return 0;
@@ -4634,6 +4634,123 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
return ret;
}
+/**
+ * dcmd.opcode - MR_DCMD_CTRL_DEVICE_LIST_GET
+ * dcmd.mbox - reserved
+ * dcmd.sge IN - ptr to return MR_HOST_DEVICE_LIST structure
+ * Desc: This DCMD will return the combined device list
+ * Status: MFI_STAT_OK - List returned successfully
+ * MFI_STAT_INVALID_CMD - Firmware support for the feature has been
+ * disabled
+ * @instance: Adapter soft state
+ * @is_probe: Driver probe check
+ * Return: 0 if DCMD succeeded
+ * non-zero if failed
+ */
+int
+megasas_host_device_list_query(struct megasas_instance *instance,
+ bool is_probe)
+{
+ int ret, i, target_id;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct MR_HOST_DEVICE_LIST *ci;
+ u32 count;
+ dma_addr_t ci_h;
+
+ ci = instance->host_device_list_buf;
+ ci_h = instance->host_device_list_buf_h;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ dev_warn(&instance->pdev->dev,
+ "%s: failed to get cmd\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ memset(ci, 0, sizeof(*ci));
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->mbox.b[0] = is_probe ? 0 : 1;
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = cpu_to_le32(HOST_DEVICE_LIST_SZ);
+ dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_DEVICE_LIST_GET);
+
+ megasas_set_dma_settings(instance, dcmd, ci_h, HOST_DEVICE_LIST_SZ);
+
+ if (!instance->mask_interrupts) {
+ ret = megasas_issue_blocked_cmd(instance, cmd,
+ MFI_IO_TIMEOUT_SECS);
+ } else {
+ ret = megasas_issue_polled(instance, cmd);
+ cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ }
+
+ switch (ret) {
+ case DCMD_SUCCESS:
+ /* Fill the internal pd_list and ld_ids array based on
+ * targetIds returned by FW
+ */
+ count = le32_to_cpu(ci->count);
+
+ memset(instance->local_pd_list, 0,
+ MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
+ memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
+ for (i = 0; i < count; i++) {
+ target_id = le16_to_cpu(ci->host_device_list[i].target_id);
+ if (ci->host_device_list[i].flags.u.bits.is_sys_pd) {
+ instance->local_pd_list[target_id].tid = target_id;
+ instance->local_pd_list[target_id].driveType =
+ ci->host_device_list[i].scsi_type;
+ instance->local_pd_list[target_id].driveState =
+ MR_PD_STATE_SYSTEM;
+ } else {
+ instance->ld_ids[target_id] = target_id;
+ }
+ }
+
+ memcpy(instance->pd_list, instance->local_pd_list,
+ sizeof(instance->pd_list));
+ break;
+
+ case DCMD_TIMEOUT:
+ switch (dcmd_timeout_ocr_possible(instance)) {
+ case INITIATE_OCR:
+ cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ megasas_reset_fusion(instance->host,
+ MFI_IO_TIMEOUT_OCR);
+ break;
+ case KILL_ADAPTER:
+ megaraid_sas_kill_hba(instance);
+ break;
+ case IGNORE_TIMEOUT:
+ dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
+ __func__, __LINE__);
+ break;
+ }
+ break;
+ case DCMD_FAILED:
+ dev_err(&instance->pdev->dev,
+ "%s: MR_DCMD_CTRL_DEVICE_LIST_GET failed\n",
+ __func__);
+ break;
+ }
+
+ if (ret != DCMD_TIMEOUT)
+ megasas_return_cmd(instance, cmd);
+
+ return ret;
+}
+
/*
* megasas_update_ext_vd_details : Update details w.r.t Extended VD
* instance : Controller's instance
@@ -4861,6 +4978,9 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
(ci->properties.on_off_properties2.enable_snap_dump ?
MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME : 0);
+ instance->enable_fw_dev_list =
+ ci->properties.on_off_properties2.enable_fw_dev_list;
+
dev_info(&instance->pdev->dev,
"controller type\t: %s(%dMB)\n",
instance->is_imr ? "iMR" : "MR",
@@ -5320,6 +5440,40 @@ fallback:
}
/**
+ * megasas_get_device_list - Get the PD and LD device list from FW.
+ * @instance: Adapter soft state
+ * @return: Success or failure
+ *
+ * Issue DCMDs to Firmware to get the PD and LD list.
+ * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
+ * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
+ */
+static
+int megasas_get_device_list(struct megasas_instance *instance)
+{
+ memset(instance->pd_list, 0,
+ (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
+ memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
+
+ if (instance->enable_fw_dev_list) {
+ if (megasas_host_device_list_query(instance, true))
+ return FAILED;
+ } else {
+ if (megasas_get_pd_list(instance) < 0) {
+ dev_err(&instance->pdev->dev, "failed to get PD list\n");
+ return FAILED;
+ }
+
+ if (megasas_ld_list_query(instance,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) {
+ dev_err(&instance->pdev->dev, "failed to get LD list\n");
+ return FAILED;
+ }
+ }
+
+ return SUCCESS;
+}
+/**
* megasas_init_fw - Initializes the FW
* @instance: Adapter soft state
*
@@ -5571,18 +5725,13 @@ static int megasas_init_fw(struct megasas_instance *instance)
megasas_setup_jbod_map(instance);
- /** for passthrough
- * the following function will get the PD LIST.
- */
- memset(instance->pd_list, 0,
- (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
- if (megasas_get_pd_list(instance) < 0) {
- dev_err(&instance->pdev->dev, "failed to get PD list\n");
+ if (megasas_get_device_list(instance) != SUCCESS) {
+ dev_err(&instance->pdev->dev,
+ "%s: megasas_get_device_list failed\n",
+ __func__);
goto fail_get_ld_pd_list;
}
- memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
-
/* stream detection initialization */
if (instance->adapter_type >= VENTURA_SERIES) {
fusion->stream_detect_by_ld =
@@ -5612,10 +5761,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
}
}
- if (megasas_ld_list_query(instance,
- MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
- goto fail_get_ld_pd_list;
-
/*
* Compute the max allowed sectors per IO: The controller info has two
* limits on max sectors. Driver should use the minimum of these two.
@@ -6424,6 +6569,18 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
if (!instance->snapdump_prop)
dev_err(&pdev->dev,
"Failed to allocate snapdump properties buffer\n");
+
+ instance->host_device_list_buf = dma_alloc_coherent(&pdev->dev,
+ HOST_DEVICE_LIST_SZ,
+ &instance->host_device_list_buf_h,
+ GFP_KERNEL);
+
+ if (!instance->host_device_list_buf) {
+ dev_err(&pdev->dev,
+ "Failed to allocate targetid list buffer\n");
+ return -ENOMEM;
+ }
+
}
instance->pd_list_buf =
@@ -6573,6 +6730,13 @@ void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
sizeof(struct MR_SNAPDUMP_PROPERTIES),
instance->snapdump_prop,
instance->snapdump_prop_h);
+
+ if (instance->host_device_list_buf)
+ dma_free_coherent(&pdev->dev,
+ HOST_DEVICE_LIST_SZ,
+ instance->host_device_list_buf,
+ instance->host_device_list_buf_h);
+
}
/*
@@ -6746,7 +6910,9 @@ static int megasas_probe_one(struct pci_dev *pdev,
/*
* Trigger SCSI to scan our drives
*/
- scsi_scan_host(host);
+ if (!instance->enable_fw_dev_list ||
+ (instance->host_device_list_buf->count > 0))
+ scsi_scan_host(host);
/*
* Initiate AEN (Asynchronous Event Notification)
@@ -7866,6 +8032,139 @@ static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
scsi_device_put(sdev);
}
+/**
+ * megasas_update_device_list - Update the PD and LD device list from FW
+ * after an AEN event notification
+ * @instance: Adapter soft state
+ * @event_type: Indicates type of event (PD or LD event)
+ *
+ * @return: Success or failure
+ *
+ * Issue DCMDs to Firmware to update the internal device list in driver.
+ * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
+ * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
+ */
+static
+int megasas_update_device_list(struct megasas_instance *instance,
+ int event_type)
+{
+ int dcmd_ret = DCMD_SUCCESS;
+
+ if (instance->enable_fw_dev_list) {
+ dcmd_ret = megasas_host_device_list_query(instance, false);
+ if (dcmd_ret != DCMD_SUCCESS)
+ goto out;
+ } else {
+ if (event_type & SCAN_PD_CHANNEL) {
+ dcmd_ret = megasas_get_pd_list(instance);
+
+ if (dcmd_ret != DCMD_SUCCESS)
+ goto out;
+ }
+
+ if (event_type & SCAN_VD_CHANNEL) {
+ if (!instance->requestorId ||
+ (instance->requestorId &&
+ megasas_get_ld_vf_affiliation(instance, 0))) {
+ dcmd_ret = megasas_ld_list_query(instance,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
+ if (dcmd_ret != DCMD_SUCCESS)
+ goto out;
+ }
+ }
+ }
+
+out:
+ return dcmd_ret;
+}
+
+/**
+ * megasas_add_remove_devices - Add/remove devices to SCSI mid-layer
+ * after an AEN event notification
+ * @instance: Adapter soft state
+ * @scan_type: Indicates type of devices (PD/LD) to add
+ * @return void
+ */
+static
+void megasas_add_remove_devices(struct megasas_instance *instance,
+ int scan_type)
+{
+ int i, j;
+ u16 pd_index = 0;
+ u16 ld_index = 0;
+ u16 channel = 0, id = 0;
+ struct Scsi_Host *host;
+ struct scsi_device *sdev1;
+ struct MR_HOST_DEVICE_LIST *targetid_list = NULL;
+ struct MR_HOST_DEVICE_LIST_ENTRY *targetid_entry = NULL;
+
+ host = instance->host;
+
+ if (instance->enable_fw_dev_list) {
+ targetid_list = instance->host_device_list_buf;
+ for (i = 0; i < targetid_list->count; i++) {
+ targetid_entry = &targetid_list->host_device_list[i];
+ if (targetid_entry->flags.u.bits.is_sys_pd) {
+ channel = le16_to_cpu(targetid_entry->target_id) /
+ MEGASAS_MAX_DEV_PER_CHANNEL;
+ id = le16_to_cpu(targetid_entry->target_id) %
+ MEGASAS_MAX_DEV_PER_CHANNEL;
+ } else {
+ channel = MEGASAS_MAX_PD_CHANNELS +
+ (le16_to_cpu(targetid_entry->target_id) /
+ MEGASAS_MAX_DEV_PER_CHANNEL);
+ id = le16_to_cpu(targetid_entry->target_id) %
+ MEGASAS_MAX_DEV_PER_CHANNEL;
+ }
+ sdev1 = scsi_device_lookup(host, channel, id, 0);
+ if (!sdev1) {
+ scsi_add_device(host, channel, id, 0);
+ } else {
+ scsi_device_put(sdev1);
+ }
+ }
+ }
+
+ if (scan_type & SCAN_PD_CHANNEL) {
+ for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
+ for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
+ pd_index = i * MEGASAS_MAX_DEV_PER_CHANNEL + j;
+ sdev1 = scsi_device_lookup(host, i, j, 0);
+ if (instance->pd_list[pd_index].driveState ==
+ MR_PD_STATE_SYSTEM) {
+ if (!sdev1)
+ scsi_add_device(host, i, j, 0);
+ else
+ scsi_device_put(sdev1);
+ } else {
+ if (sdev1)
+ megasas_remove_scsi_device(sdev1);
+ }
+ }
+ }
+ }
+
+ if (scan_type & SCAN_VD_CHANNEL) {
+ for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+ for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
+ ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+ sdev1 = scsi_device_lookup(host,
+ MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+ if (instance->ld_ids[ld_index] != 0xff) {
+ if (!sdev1)
+ scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+ else
+ scsi_device_put(sdev1);
+ } else {
+ if (sdev1)
+ megasas_remove_scsi_device(sdev1);
+ }
+ }
+ }
+ }
+
+}
+
static void
megasas_aen_polling(struct work_struct *work)
{
@@ -7873,11 +8172,7 @@ megasas_aen_polling(struct work_struct *work)
container_of(work, struct megasas_aen_event, hotplug_work.work);
struct megasas_instance *instance = ev->instance;
union megasas_evt_class_locale class_locale;
- struct Scsi_Host *host;
- struct scsi_device *sdev1;
- u16 pd_index = 0;
- u16 ld_index = 0;
- int i, j, doscan = 0;
+ int event_type = 0;
u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
int error;
u8 dcmd_ret = DCMD_SUCCESS;
@@ -7896,7 +8191,6 @@ megasas_aen_polling(struct work_struct *work)
mutex_lock(&instance->reset_mutex);
instance->ev = NULL;
- host = instance->host;
if (instance->evt_detail) {
megasas_decode_evt(instance);
@@ -7904,40 +8198,20 @@ megasas_aen_polling(struct work_struct *work)
case MR_EVT_PD_INSERTED:
case MR_EVT_PD_REMOVED:
- dcmd_ret = megasas_get_pd_list(instance);
- if (dcmd_ret == DCMD_SUCCESS)
- doscan = SCAN_PD_CHANNEL;
+ event_type = SCAN_PD_CHANNEL;
break;
case MR_EVT_LD_OFFLINE:
case MR_EVT_CFG_CLEARED:
case MR_EVT_LD_DELETED:
case MR_EVT_LD_CREATED:
- if (!instance->requestorId ||
- (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
- dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
-
- if (dcmd_ret == DCMD_SUCCESS)
- doscan = SCAN_VD_CHANNEL;
-
+ event_type = SCAN_VD_CHANNEL;
break;
case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
case MR_EVT_FOREIGN_CFG_IMPORTED:
case MR_EVT_LD_STATE_CHANGE:
- dcmd_ret = megasas_get_pd_list(instance);
-
- if (dcmd_ret != DCMD_SUCCESS)
- break;
-
- if (!instance->requestorId ||
- (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
- dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
-
- if (dcmd_ret != DCMD_SUCCESS)
- break;
-
- doscan = SCAN_VD_CHANNEL | SCAN_PD_CHANNEL;
+ event_type = SCAN_PD_CHANNEL | SCAN_VD_CHANNEL;
dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
instance->host->host_no);
break;
@@ -7953,7 +8227,7 @@ megasas_aen_polling(struct work_struct *work)
}
break;
default:
- doscan = 0;
+ event_type = 0;
break;
}
} else {
@@ -7963,44 +8237,13 @@ megasas_aen_polling(struct work_struct *work)
return;
}
- mutex_unlock(&instance->reset_mutex);
+ if (event_type)
+ dcmd_ret = megasas_update_device_list(instance, event_type);
- if (doscan & SCAN_PD_CHANNEL) {
- for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
- for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
- pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
- sdev1 = scsi_device_lookup(host, i, j, 0);
- if (instance->pd_list[pd_index].driveState ==
- MR_PD_STATE_SYSTEM) {
- if (!sdev1)
- scsi_add_device(host, i, j, 0);
- else
- scsi_device_put(sdev1);
- } else {
- if (sdev1)
- megasas_remove_scsi_device(sdev1);
- }
- }
- }
- }
+ mutex_unlock(&instance->reset_mutex);
- if (doscan & SCAN_VD_CHANNEL) {
- for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
- for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
- ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
- sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
- if (instance->ld_ids[ld_index] != 0xff) {
- if (!sdev1)
- scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
- else
- scsi_device_put(sdev1);
- } else {
- if (sdev1)
- megasas_remove_scsi_device(sdev1);
- }
- }
- }
- }
+ if (event_type && dcmd_ret == DCMD_SUCCESS)
+ megasas_add_remove_devices(instance, event_type);
if (dcmd_ret == DCMD_SUCCESS)
seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 647f48a28f85..e35c2b64c145 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -242,7 +242,6 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance,
&instance->reg_set->inbound_low_queue_port);
writel(le32_to_cpu(req_desc->u.high),
&instance->reg_set->inbound_high_queue_port);
- mmiowb();
spin_unlock_irqrestore(&instance->hba_lock, flags);
#endif
}
@@ -937,11 +936,9 @@ wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
{
int i;
struct megasas_header *frame_hdr = &cmd->frame->hdr;
- struct fusion_context *fusion;
u32 msecs = seconds * 1000;
- fusion = instance->ctrl_context;
/*
* Wait for cmd_status to change
*/
@@ -1074,6 +1071,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
drv_ops->mfi_capabilities.support_qd_throttling = 1;
drv_ops->mfi_capabilities.support_pd_map_target_id = 1;
drv_ops->mfi_capabilities.support_nvme_passthru = 1;
+ drv_ops->mfi_capabilities.support_fw_exposed_dev_list = 1;
if (instance->consistent_mask_64bit)
drv_ops->mfi_capabilities.support_64bit_mode = 1;
@@ -1330,7 +1328,6 @@ megasas_sync_map_info(struct megasas_instance *instance)
struct megasas_cmd *cmd;
struct megasas_dcmd_frame *dcmd;
u16 num_lds;
- u32 size_sync_info;
struct fusion_context *fusion;
struct MR_LD_TARGET_SYNC *ci = NULL;
struct MR_DRV_RAID_MAP_ALL *map;
@@ -1359,8 +1356,6 @@ megasas_sync_map_info(struct megasas_instance *instance)
dcmd = &cmd->frame->dcmd;
- size_sync_info = sizeof(struct MR_LD_TARGET_SYNC) *num_lds;
-
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
ci = (struct MR_LD_TARGET_SYNC *)
@@ -1639,15 +1634,12 @@ static inline void megasas_free_ioc_init_cmd(struct megasas_instance *instance)
u32
megasas_init_adapter_fusion(struct megasas_instance *instance)
{
- struct megasas_register_set __iomem *reg_set;
struct fusion_context *fusion;
u32 scratch_pad_1;
int i = 0, count;
fusion = instance->ctrl_context;
- reg_set = instance->reg_set;
-
megasas_fusion_update_can_queue(instance, PROBE_CONTEXT);
/*
@@ -1926,7 +1918,6 @@ static bool
megasas_is_prp_possible(struct megasas_instance *instance,
struct scsi_cmnd *scmd, int sge_count)
{
- struct fusion_context *fusion;
int i;
u32 data_length = 0;
struct scatterlist *sg_scmd;
@@ -1935,7 +1926,6 @@ megasas_is_prp_possible(struct megasas_instance *instance,
mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
MR_DEFAULT_NVME_PAGE_SIZE);
- fusion = instance->ctrl_context;
data_length = scsi_bufflen(scmd);
sg_scmd = scsi_sglist(scmd);
@@ -2048,12 +2038,9 @@ megasas_make_prp_nvme(struct megasas_instance *instance, struct scsi_cmnd *scmd,
u32 first_prp_len;
bool build_prp = false;
int data_len = scsi_bufflen(scmd);
- struct fusion_context *fusion;
u32 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
MR_DEFAULT_NVME_PAGE_SIZE);
- fusion = instance->ctrl_context;
-
build_prp = megasas_is_prp_possible(instance, scmd, sge_count);
if (!build_prp)
@@ -2621,7 +2608,6 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
u32 start_lba_lo, start_lba_hi, device_id, datalength = 0;
u32 scsi_buff_len;
struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
- union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
struct IO_REQUEST_INFO io_info;
struct fusion_context *fusion;
struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
@@ -2644,8 +2630,6 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
rctx->status = 0;
rctx->ex_status = 0;
- req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
-
start_lba_lo = 0;
start_lba_hi = 0;
fp_possible = false;
@@ -3246,9 +3230,6 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
struct megasas_cmd_fusion *cmd, *r1_cmd = NULL;
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
u32 index;
- struct fusion_context *fusion;
-
- fusion = instance->ctrl_context;
if ((megasas_cmd_type(scmd) == READ_WRITE_LDIO) &&
instance->ldio_threshold &&
@@ -4401,14 +4382,11 @@ int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
{
struct megasas_instance *instance;
u16 smid, devhandle;
- struct fusion_context *fusion;
int ret;
struct MR_PRIV_DEVICE *mr_device_priv_data;
mr_device_priv_data = scmd->device->hostdata;
-
instance = (struct megasas_instance *)scmd->device->host->hostdata;
- fusion = instance->ctrl_context;
scmd_printk(KERN_INFO, scmd, "task abort called for scmd(%p)\n", scmd);
scsi_print_command(scmd);
@@ -4428,7 +4406,6 @@ int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
goto out;
}
-
if (!mr_device_priv_data->is_tm_capable) {
ret = FAILED;
goto out;
@@ -4487,12 +4464,10 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
struct megasas_instance *instance;
int ret = FAILED;
u16 devhandle;
- struct fusion_context *fusion;
struct MR_PRIV_DEVICE *mr_device_priv_data;
mr_device_priv_data = scmd->device->hostdata;
instance = (struct megasas_instance *)scmd->device->host->hostdata;
- fusion = instance->ctrl_context;
sdev_printk(KERN_INFO, scmd->device,
"target reset called for scmd(%p)\n", scmd);
@@ -4512,7 +4487,6 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
goto out;
}
-
if (!mr_device_priv_data->is_tm_capable) {
ret = FAILED;
goto out;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index ca73c50fe723..1481bf029490 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -724,6 +724,7 @@ struct MPI2_IOC_INIT_REQUEST {
#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111 0x03200200
#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS 0x03150200
#define MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES 0x01200100
+#define MR_DCMD_CTRL_DEVICE_LIST_GET 0x01190600
struct MR_DEV_HANDLE_INFO {
__le16 curDevHdl;
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 398fa6fde960..a2f4a55c51be 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -548,7 +548,8 @@ typedef struct _MPI2_CONFIG_REPLY {
#define MPI2_MFGPAGE_DEVID_SAS2308_1 (0x0086)
#define MPI2_MFGPAGE_DEVID_SAS2308_2 (0x0087)
#define MPI2_MFGPAGE_DEVID_SAS2308_3 (0x006E)
-#define MPI2_MFGPAGE_DEVID_SAS2308_MPI_EP (0x02B0)
+#define MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP (0x02B0)
+#define MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1 (0x02B1)
/*MPI v2.5 SAS products */
#define MPI25_MFGPAGE_DEVID_SAS3004 (0x0096)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 0a6cb8f0680c..f60b9e0a6ca6 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -3281,12 +3281,18 @@ mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
if (smid < ioc->hi_priority_smid) {
struct scsiio_tracker *st;
+ void *request;
st = _get_st_from_smid(ioc, smid);
if (!st) {
_base_recovery_check(ioc);
return;
}
+
+ /* Clear MPI request frame */
+ request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(request, 0, ioc->request_sz);
+
mpt3sas_base_clear_st(ioc, st);
_base_recovery_check(ioc);
return;
@@ -3327,7 +3333,6 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
spin_lock_irqsave(writeq_lock, flags);
__raw_writel((u32)(b), addr);
__raw_writel((u32)(b >> 32), (addr + 4));
- mmiowb();
spin_unlock_irqrestore(writeq_lock, flags);
}
@@ -3563,6 +3568,7 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
ioc->pdev->subsystem_device);
break;
}
+ break;
case MPI2_MFGPAGE_DEVID_SAS2308_2:
switch (ioc->pdev->subsystem_device) {
case MPT2SAS_INTEL_RS25GB008_SSDID:
@@ -3598,6 +3604,7 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
ioc->pdev->subsystem_device);
break;
}
+ break;
case MPI25_MFGPAGE_DEVID_SAS3008:
switch (ioc->pdev->subsystem_device) {
case MPT3SAS_INTEL_RMS3JC080_SSDID:
@@ -3742,6 +3749,7 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
ioc->pdev->subsystem_device);
break;
}
+ break;
case MPI2_MFGPAGE_DEVID_SAS2308_2:
switch (ioc->pdev->subsystem_device) {
case MPT2SAS_HP_2_4_INTERNAL_SSDID:
@@ -3765,6 +3773,7 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
ioc->pdev->subsystem_device);
break;
}
+ break;
default:
ioc_info(ioc, "HP SAS HBA: Subsystem ID: 0x%X\n",
ioc->pdev->subsystem_device);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 800351932cc3..19158cb59101 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -75,9 +75,9 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "27.101.00.00"
+#define MPT3SAS_DRIVER_VERSION "27.102.00.00"
#define MPT3SAS_MAJOR_VERSION 27
-#define MPT3SAS_MINOR_VERSION 101
+#define MPT3SAS_MINOR_VERSION 102
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -193,6 +193,9 @@ struct mpt3sas_nvme_cmd {
#define SAS2_PCI_DEVICE_B0_REVISION (0x01)
#define SAS3_PCI_DEVICE_C0_REVISION (0x02)
+/* Atlas PCIe Switch Management Port */
+#define MPI26_ATLAS_PCIe_SWITCH_DEVID (0x00B2)
+
/*
* Intel HBA branding
*/
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 6be39dc27103..1ccfbc7eebe0 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1462,11 +1462,23 @@ mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
struct scsi_cmnd *scmd = NULL;
struct scsiio_tracker *st;
+ Mpi25SCSIIORequest_t *mpi_request;
if (smid > 0 &&
smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
u32 unique_tag = smid - 1;
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+
+ /*
+ * If SCSI IO request is outstanding at driver level then
+ * DevHandle filed must be non-zero. If DevHandle is zero
+ * then it means that this smid is free at driver level,
+ * so return NULL.
+ */
+ if (!mpi_request->DevHandle)
+ return scmd;
+
scmd = scsi_host_find_tag(ioc->shost, unique_tag);
if (scmd) {
st = scsi_cmd_priv(scmd);
@@ -10256,7 +10268,8 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
case MPI2_MFGPAGE_DEVID_SAS2308_1:
case MPI2_MFGPAGE_DEVID_SAS2308_2:
case MPI2_MFGPAGE_DEVID_SAS2308_3:
- case MPI2_MFGPAGE_DEVID_SAS2308_MPI_EP:
+ case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
+ case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
return MPI2_VERSION;
case MPI25_MFGPAGE_DEVID_SAS3004:
case MPI25_MFGPAGE_DEVID_SAS3008:
@@ -10282,6 +10295,7 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
case MPI26_MFGPAGE_DEVID_SAS3516_1:
case MPI26_MFGPAGE_DEVID_SAS3416:
case MPI26_MFGPAGE_DEVID_SAS3616:
+ case MPI26_ATLAS_PCIe_SWITCH_DEVID:
case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
@@ -10343,7 +10357,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->is_warpdrive = 1;
ioc->hide_ir_msg = 1;
break;
- case MPI2_MFGPAGE_DEVID_SAS2308_MPI_EP:
+ case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
+ case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
ioc->is_mcpu_endpoint = 1;
break;
default:
@@ -10371,6 +10386,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
case MPI26_MFGPAGE_DEVID_SAS3516_1:
case MPI26_MFGPAGE_DEVID_SAS3416:
case MPI26_MFGPAGE_DEVID_SAS3616:
+ case MPI26_ATLAS_PCIe_SWITCH_DEVID:
ioc->is_gen35_ioc = 1;
break;
case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
@@ -10783,7 +10799,9 @@ static const struct pci_device_id mpt3sas_pci_table[] = {
PCI_ANY_ID, PCI_ANY_ID },
{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
PCI_ANY_ID, PCI_ANY_ID },
- { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_MPI_EP,
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1,
PCI_ANY_ID, PCI_ANY_ID },
/* SSS6200 */
{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
@@ -10849,6 +10867,10 @@ static const struct pci_device_id mpt3sas_pci_table[] = {
{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
PCI_ANY_ID, PCI_ANY_ID },
+ /* Atlas PCIe Switch Management Port */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
+ PCI_ANY_ID, PCI_ANY_ID },
+
/* Sea SI 0x00E5 Configurable Secure
* 0x00E6 Hard Secure
*/
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 36f64205ecfa..3df02691a092 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -718,8 +718,8 @@ static int mvumi_host_reset(struct scsi_cmnd *scmd)
mhba = (struct mvumi_hba *) scmd->device->host->hostdata;
- scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n",
- scmd->serial_number, scmd->cmnd[0], scmd->retries);
+ scmd_printk(KERN_NOTICE, scmd, "RESET -%u cmd=%x retries=%x\n",
+ scmd->request->tag, scmd->cmnd[0], scmd->retries);
return mhba->instancet->reset_host(mhba);
}
@@ -2104,7 +2104,6 @@ static int mvumi_queue_command(struct Scsi_Host *shost,
unsigned long irq_flags;
spin_lock_irqsave(shost->host_lock, irq_flags);
- scsi_cmd_get_serial(shost, scmd);
mhba = (struct mvumi_hba *) shost->hostdata;
scmd->result = 0;
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 00e3cbee55b8..da4d6e1106c4 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -2441,7 +2441,6 @@ static void nsp32_set_sync_entry(nsp32_hw_data *data,
period = data->synct[entry].period_num;
ackwidth = data->synct[entry].ackwidth;
- offset = offset;
sample_rate = data->synct[entry].sample_rate;
target->syncreg = TO_SYNCREG(period, offset);
diff --git a/drivers/scsi/osd/Kbuild b/drivers/scsi/osd/Kbuild
deleted file mode 100644
index 58cecd45b0f5..000000000000
--- a/drivers/scsi/osd/Kbuild
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Kbuild for the OSD modules
-#
-# Copyright (C) 2008 Panasas Inc. All rights reserved.
-#
-# Authors:
-# Boaz Harrosh <ooo@electrozaur.com>
-# Benny Halevy <bhalevy@panasas.com>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2
-#
-
-# libosd.ko - osd-initiator library
-libosd-y := osd_initiator.o
-obj-$(CONFIG_SCSI_OSD_INITIATOR) += libosd.o
-
-# osd.ko - SCSI ULD and char-device
-osd-y := osd_uld.o
-obj-$(CONFIG_SCSI_OSD_ULD) += osd.o
diff --git a/drivers/scsi/osd/Kconfig b/drivers/scsi/osd/Kconfig
deleted file mode 100644
index 347cc5e33749..000000000000
--- a/drivers/scsi/osd/Kconfig
+++ /dev/null
@@ -1,49 +0,0 @@
-#
-# Kernel configuration file for the OSD scsi protocol
-#
-# Copyright (C) 2008 Panasas Inc. All rights reserved.
-#
-# Authors:
-# Boaz Harrosh <ooo@electrozaur.com>
-# Benny Halevy <bhalevy@panasas.com>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public version 2 License as
-# published by the Free Software Foundation
-#
-config SCSI_OSD_INITIATOR
- tristate "OSD-Initiator library"
- depends on SCSI
- help
- Enable the OSD-Initiator library (libosd.ko).
- NOTE: You must also select CRYPTO_SHA1 + CRYPTO_HMAC and their
- dependencies
-
-config SCSI_OSD_ULD
- tristate "OSD Upper Level driver"
- depends on SCSI_OSD_INITIATOR
- help
- Build a SCSI upper layer driver that exports /dev/osdX devices
- to user-mode for testing and controlling OSD devices. It is also
- needed by exofs, for mounting an OSD based file system.
-
-config SCSI_OSD_DPRINT_SENSE
- int "(0-2) When sense is returned, DEBUG print all sense descriptors"
- default 1
- depends on SCSI_OSD_INITIATOR
- help
- When a CHECK_CONDITION status is returned from a target, and a
- sense-buffer is retrieved, turning this on will dump a full
- sense-decoding message. Setting to 2 will also print recoverable
- errors that might be regularly returned for some filesystem
- operations.
-
-config SCSI_OSD_DEBUG
- bool "Compile All OSD modules with lots of DEBUG prints"
- default n
- depends on SCSI_OSD_INITIATOR
- help
- OSD Code is populated with lots of OSD_DEBUG(..) printouts to
- dmesg. Enable this if you found a bug and you want to help us
- track the problem (see also MAINTAINERS). Setting this will also
- force SCSI_OSD_DPRINT_SENSE=2.
diff --git a/drivers/scsi/osd/osd_debug.h b/drivers/scsi/osd/osd_debug.h
deleted file mode 100644
index 26341261bb5c..000000000000
--- a/drivers/scsi/osd/osd_debug.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * osd_debug.h - Some kprintf macros
- *
- * Copyright (C) 2008 Panasas Inc. All rights reserved.
- *
- * Authors:
- * Boaz Harrosh <ooo@electrozaur.com>
- * Benny Halevy <bhalevy@panasas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- *
- */
-#ifndef __OSD_DEBUG_H__
-#define __OSD_DEBUG_H__
-
-#define OSD_ERR(fmt, a...) printk(KERN_ERR "osd: " fmt, ##a)
-#define OSD_INFO(fmt, a...) printk(KERN_NOTICE "osd: " fmt, ##a)
-
-#ifdef CONFIG_SCSI_OSD_DEBUG
-#define OSD_DEBUG(fmt, a...) \
- printk(KERN_NOTICE "osd @%s:%d: " fmt, __func__, __LINE__, ##a)
-#else
-#define OSD_DEBUG(fmt, a...) do {} while (0)
-#endif
-
-/* u64 has problems with printk this will cast it to unsigned long long */
-#define _LLU(x) (unsigned long long)(x)
-
-#endif /* ndef __OSD_DEBUG_H__ */
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
deleted file mode 100644
index 60cf7c5eb880..000000000000
--- a/drivers/scsi/osd/osd_initiator.c
+++ /dev/null
@@ -1,2076 +0,0 @@
-/*
- * osd_initiator - Main body of the osd initiator library.
- *
- * Note: The file does not contain the advanced security functionality which
- * is only needed by the security_manager's initiators.
- *
- * Copyright (C) 2008 Panasas Inc. All rights reserved.
- *
- * Authors:
- * Boaz Harrosh <ooo@electrozaur.com>
- * Benny Halevy <bhalevy@panasas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the Panasas company nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-#include <linux/module.h>
-
-#include <scsi/osd_initiator.h>
-#include <scsi/osd_sec.h>
-#include <scsi/osd_attributes.h>
-#include <scsi/osd_sense.h>
-
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_request.h>
-
-#include "osd_debug.h"
-
-#ifndef __unused
-# define __unused __attribute__((unused))
-#endif
-
-enum { OSD_REQ_RETRIES = 1 };
-
-MODULE_AUTHOR("Boaz Harrosh <ooo@electrozaur.com>");
-MODULE_DESCRIPTION("open-osd initiator library libosd.ko");
-MODULE_LICENSE("GPL");
-
-static inline void build_test(void)
-{
- /* structures were not packed */
- BUILD_BUG_ON(sizeof(struct osd_capability) != OSD_CAP_LEN);
- BUILD_BUG_ON(sizeof(struct osdv2_cdb) != OSD_TOTAL_CDB_LEN);
- BUILD_BUG_ON(sizeof(struct osdv1_cdb) != OSDv1_TOTAL_CDB_LEN);
-}
-
-static const char *_osd_ver_desc(struct osd_request *or)
-{
- return osd_req_is_ver1(or) ? "OSD1" : "OSD2";
-}
-
-#define ATTR_DEF_RI(id, len) ATTR_DEF(OSD_APAGE_ROOT_INFORMATION, id, len)
-
-static int _osd_get_print_system_info(struct osd_dev *od,
- void *caps, struct osd_dev_info *odi)
-{
- struct osd_request *or;
- struct osd_attr get_attrs[] = {
- ATTR_DEF_RI(OSD_ATTR_RI_VENDOR_IDENTIFICATION, 8),
- ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_IDENTIFICATION, 16),
- ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_MODEL, 32),
- ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_REVISION_LEVEL, 4),
- ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER, 64 /*variable*/),
- ATTR_DEF_RI(OSD_ATTR_RI_OSD_NAME, 64 /*variable*/),
- ATTR_DEF_RI(OSD_ATTR_RI_TOTAL_CAPACITY, 8),
- ATTR_DEF_RI(OSD_ATTR_RI_USED_CAPACITY, 8),
- ATTR_DEF_RI(OSD_ATTR_RI_NUMBER_OF_PARTITIONS, 8),
- ATTR_DEF_RI(OSD_ATTR_RI_CLOCK, 6),
- /* IBM-OSD-SIM Has a bug with this one put it last */
- ATTR_DEF_RI(OSD_ATTR_RI_OSD_SYSTEM_ID, 20),
- };
- void *iter = NULL, *pFirst;
- int nelem = ARRAY_SIZE(get_attrs), a = 0;
- int ret;
-
- or = osd_start_request(od);
- if (!or)
- return -ENOMEM;
-
- /* get attrs */
- osd_req_get_attributes(or, &osd_root_object);
- osd_req_add_get_attr_list(or, get_attrs, ARRAY_SIZE(get_attrs));
-
- ret = osd_finalize_request(or, 0, caps, NULL);
- if (ret)
- goto out;
-
- ret = osd_execute_request(or);
- if (ret) {
- OSD_ERR("Failed to detect %s => %d\n", _osd_ver_desc(or), ret);
- goto out;
- }
-
- osd_req_decode_get_attr_list(or, get_attrs, &nelem, &iter);
-
- OSD_INFO("Detected %s device\n",
- _osd_ver_desc(or));
-
- pFirst = get_attrs[a++].val_ptr;
- OSD_INFO("VENDOR_IDENTIFICATION [%s]\n",
- (char *)pFirst);
-
- pFirst = get_attrs[a++].val_ptr;
- OSD_INFO("PRODUCT_IDENTIFICATION [%s]\n",
- (char *)pFirst);
-
- pFirst = get_attrs[a++].val_ptr;
- OSD_INFO("PRODUCT_MODEL [%s]\n",
- (char *)pFirst);
-
- pFirst = get_attrs[a++].val_ptr;
- OSD_INFO("PRODUCT_REVISION_LEVEL [%u]\n",
- pFirst ? get_unaligned_be32(pFirst) : ~0U);
-
- pFirst = get_attrs[a++].val_ptr;
- OSD_INFO("PRODUCT_SERIAL_NUMBER [%s]\n",
- (char *)pFirst);
-
- odi->osdname_len = get_attrs[a].len;
- /* Avoid NULL for memcmp optimization 0-length is good enough */
- odi->osdname = kzalloc(odi->osdname_len + 1, GFP_KERNEL);
- if (!odi->osdname) {
- ret = -ENOMEM;
- goto out;
- }
- if (odi->osdname_len)
- memcpy(odi->osdname, get_attrs[a].val_ptr, odi->osdname_len);
- OSD_INFO("OSD_NAME [%s]\n", odi->osdname);
- a++;
-
- pFirst = get_attrs[a++].val_ptr;
- OSD_INFO("TOTAL_CAPACITY [0x%llx]\n",
- pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
-
- pFirst = get_attrs[a++].val_ptr;
- OSD_INFO("USED_CAPACITY [0x%llx]\n",
- pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
-
- pFirst = get_attrs[a++].val_ptr;
- OSD_INFO("NUMBER_OF_PARTITIONS [%llu]\n",
- pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
-
- if (a >= nelem)
- goto out;
-
- /* FIXME: Where are the time utilities */
- pFirst = get_attrs[a++].val_ptr;
- OSD_INFO("CLOCK [0x%6phN]\n", pFirst);
-
- if (a < nelem) { /* IBM-OSD-SIM bug, Might not have it */
- unsigned len = get_attrs[a].len;
- char sid_dump[32*4 + 2]; /* 2nibbles+space+ASCII */
-
- hex_dump_to_buffer(get_attrs[a].val_ptr, len, 32, 1,
- sid_dump, sizeof(sid_dump), true);
- OSD_INFO("OSD_SYSTEM_ID(%d)\n"
- " [%s]\n", len, sid_dump);
-
- if (unlikely(len > sizeof(odi->systemid))) {
- OSD_ERR("OSD Target error: OSD_SYSTEM_ID too long(%d). "
- "device identification might not work\n", len);
- len = sizeof(odi->systemid);
- }
- odi->systemid_len = len;
- memcpy(odi->systemid, get_attrs[a].val_ptr, len);
- a++;
- }
-out:
- osd_end_request(or);
- return ret;
-}
-
-int osd_auto_detect_ver(struct osd_dev *od,
- void *caps, struct osd_dev_info *odi)
-{
- int ret;
-
- /* Auto-detect the osd version */
- ret = _osd_get_print_system_info(od, caps, odi);
- if (ret) {
- osd_dev_set_ver(od, OSD_VER1);
- OSD_DEBUG("converting to OSD1\n");
- ret = _osd_get_print_system_info(od, caps, odi);
- }
-
- return ret;
-}
-EXPORT_SYMBOL(osd_auto_detect_ver);
-
-static unsigned _osd_req_cdb_len(struct osd_request *or)
-{
- return osd_req_is_ver1(or) ? OSDv1_TOTAL_CDB_LEN : OSD_TOTAL_CDB_LEN;
-}
-
-static unsigned _osd_req_alist_elem_size(struct osd_request *or, unsigned len)
-{
- return osd_req_is_ver1(or) ?
- osdv1_attr_list_elem_size(len) :
- osdv2_attr_list_elem_size(len);
-}
-
-static void _osd_req_alist_elem_encode(struct osd_request *or,
- void *attr_last, const struct osd_attr *oa)
-{
- if (osd_req_is_ver1(or)) {
- struct osdv1_attributes_list_element *attr = attr_last;
-
- attr->attr_page = cpu_to_be32(oa->attr_page);
- attr->attr_id = cpu_to_be32(oa->attr_id);
- attr->attr_bytes = cpu_to_be16(oa->len);
- memcpy(attr->attr_val, oa->val_ptr, oa->len);
- } else {
- struct osdv2_attributes_list_element *attr = attr_last;
-
- attr->attr_page = cpu_to_be32(oa->attr_page);
- attr->attr_id = cpu_to_be32(oa->attr_id);
- attr->attr_bytes = cpu_to_be16(oa->len);
- memcpy(attr->attr_val, oa->val_ptr, oa->len);
- }
-}
-
-static int _osd_req_alist_elem_decode(struct osd_request *or,
- void *cur_p, struct osd_attr *oa, unsigned max_bytes)
-{
- unsigned inc;
- if (osd_req_is_ver1(or)) {
- struct osdv1_attributes_list_element *attr = cur_p;
-
- if (max_bytes < sizeof(*attr))
- return -1;
-
- oa->len = be16_to_cpu(attr->attr_bytes);
- inc = _osd_req_alist_elem_size(or, oa->len);
- if (inc > max_bytes)
- return -1;
-
- oa->attr_page = be32_to_cpu(attr->attr_page);
- oa->attr_id = be32_to_cpu(attr->attr_id);
-
- /* OSD1: On empty attributes we return a pointer to 2 bytes
- * of zeros. This keeps similar behaviour with OSD2.
- * (See below)
- */
- oa->val_ptr = likely(oa->len) ? attr->attr_val :
- (u8 *)&attr->attr_bytes;
- } else {
- struct osdv2_attributes_list_element *attr = cur_p;
-
- if (max_bytes < sizeof(*attr))
- return -1;
-
- oa->len = be16_to_cpu(attr->attr_bytes);
- inc = _osd_req_alist_elem_size(or, oa->len);
- if (inc > max_bytes)
- return -1;
-
- oa->attr_page = be32_to_cpu(attr->attr_page);
- oa->attr_id = be32_to_cpu(attr->attr_id);
-
- /* OSD2: For convenience, on empty attributes, we return 8 bytes
- * of zeros here. This keeps the same behaviour with OSD2r04,
- * and is nice with null terminating ASCII fields.
- * oa->val_ptr == NULL marks the end-of-list, or error.
- */
- oa->val_ptr = likely(oa->len) ? attr->attr_val : attr->reserved;
- }
- return inc;
-}
-
-static unsigned _osd_req_alist_size(struct osd_request *or, void *list_head)
-{
- return osd_req_is_ver1(or) ?
- osdv1_list_size(list_head) :
- osdv2_list_size(list_head);
-}
-
-static unsigned _osd_req_sizeof_alist_header(struct osd_request *or)
-{
- return osd_req_is_ver1(or) ?
- sizeof(struct osdv1_attributes_list_header) :
- sizeof(struct osdv2_attributes_list_header);
-}
-
-static void _osd_req_set_alist_type(struct osd_request *or,
- void *list, int list_type)
-{
- if (osd_req_is_ver1(or)) {
- struct osdv1_attributes_list_header *attr_list = list;
-
- memset(attr_list, 0, sizeof(*attr_list));
- attr_list->type = list_type;
- } else {
- struct osdv2_attributes_list_header *attr_list = list;
-
- memset(attr_list, 0, sizeof(*attr_list));
- attr_list->type = list_type;
- }
-}
-
-static bool _osd_req_is_alist_type(struct osd_request *or,
- void *list, int list_type)
-{
- if (!list)
- return false;
-
- if (osd_req_is_ver1(or)) {
- struct osdv1_attributes_list_header *attr_list = list;
-
- return attr_list->type == list_type;
- } else {
- struct osdv2_attributes_list_header *attr_list = list;
-
- return attr_list->type == list_type;
- }
-}
-
-/* This is for List-objects not Attributes-Lists */
-static void _osd_req_encode_olist(struct osd_request *or,
- struct osd_obj_id_list *list)
-{
- struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
-
- if (osd_req_is_ver1(or)) {
- cdbh->v1.list_identifier = list->list_identifier;
- cdbh->v1.start_address = list->continuation_id;
- } else {
- cdbh->v2.list_identifier = list->list_identifier;
- cdbh->v2.start_address = list->continuation_id;
- }
-}
-
-static osd_cdb_offset osd_req_encode_offset(struct osd_request *or,
- u64 offset, unsigned *padding)
-{
- return __osd_encode_offset(offset, padding,
- osd_req_is_ver1(or) ?
- OSDv1_OFFSET_MIN_SHIFT : OSD_OFFSET_MIN_SHIFT,
- OSD_OFFSET_MAX_SHIFT);
-}
-
-static struct osd_security_parameters *
-_osd_req_sec_params(struct osd_request *or)
-{
- struct osd_cdb *ocdb = &or->cdb;
-
- if (osd_req_is_ver1(or))
- return (struct osd_security_parameters *)&ocdb->v1.sec_params;
- else
- return (struct osd_security_parameters *)&ocdb->v2.sec_params;
-}
-
-void osd_dev_init(struct osd_dev *osdd, struct scsi_device *scsi_device)
-{
- memset(osdd, 0, sizeof(*osdd));
- osdd->scsi_device = scsi_device;
- osdd->def_timeout = BLK_DEFAULT_SG_TIMEOUT;
-#ifdef OSD_VER1_SUPPORT
- osdd->version = OSD_VER2;
-#endif
- /* TODO: Allocate pools for osd_request attributes ... */
-}
-EXPORT_SYMBOL(osd_dev_init);
-
-void osd_dev_fini(struct osd_dev *osdd)
-{
- /* TODO: De-allocate pools */
-
- osdd->scsi_device = NULL;
-}
-EXPORT_SYMBOL(osd_dev_fini);
-
-static struct osd_request *_osd_request_alloc(gfp_t gfp)
-{
- struct osd_request *or;
-
- /* TODO: Use mempool with one saved request */
- or = kzalloc(sizeof(*or), gfp);
- return or;
-}
-
-static void _osd_request_free(struct osd_request *or)
-{
- kfree(or);
-}
-
-struct osd_request *osd_start_request(struct osd_dev *dev)
-{
- struct osd_request *or;
-
- or = _osd_request_alloc(GFP_KERNEL);
- if (!or)
- return NULL;
-
- or->osd_dev = dev;
- or->timeout = dev->def_timeout;
- or->retries = OSD_REQ_RETRIES;
-
- return or;
-}
-EXPORT_SYMBOL(osd_start_request);
-
-static void _osd_free_seg(struct osd_request *or __unused,
- struct _osd_req_data_segment *seg)
-{
- if (!seg->buff || !seg->alloc_size)
- return;
-
- kfree(seg->buff);
- seg->buff = NULL;
- seg->alloc_size = 0;
-}
-
-static void _put_request(struct request *rq)
-{
- /*
- * If osd_finalize_request() was called but the request was not
- * executed through the block layer, then we must release BIOs.
- * TODO: Keep error code in or->async_error. Need to audit all
- * code paths.
- */
- if (unlikely(rq->bio))
- blk_mq_end_request(rq, BLK_STS_IOERR);
- else
- blk_put_request(rq);
-}
-
-void osd_end_request(struct osd_request *or)
-{
- struct request *rq = or->request;
-
- if (rq) {
- if (rq->next_rq) {
- _put_request(rq->next_rq);
- rq->next_rq = NULL;
- }
-
- _put_request(rq);
- }
-
- _osd_free_seg(or, &or->get_attr);
- _osd_free_seg(or, &or->enc_get_attr);
- _osd_free_seg(or, &or->set_attr);
- _osd_free_seg(or, &or->cdb_cont);
-
- _osd_request_free(or);
-}
-EXPORT_SYMBOL(osd_end_request);
-
-static void _set_error_resid(struct osd_request *or, struct request *req,
- blk_status_t error)
-{
- or->async_error = error;
- or->req_errors = scsi_req(req)->result;
- or->sense_len = scsi_req(req)->sense_len;
- if (or->sense_len)
- memcpy(or->sense, scsi_req(req)->sense, or->sense_len);
- if (or->out.req)
- or->out.residual = scsi_req(or->out.req)->resid_len;
- if (or->in.req)
- or->in.residual = scsi_req(or->in.req)->resid_len;
-}
-
-int osd_execute_request(struct osd_request *or)
-{
- blk_execute_rq(or->request->q, NULL, or->request, 0);
-
- if (scsi_req(or->request)->result) {
- _set_error_resid(or, or->request, BLK_STS_IOERR);
- return -EIO;
- }
-
- _set_error_resid(or, or->request, BLK_STS_OK);
- return 0;
-}
-EXPORT_SYMBOL(osd_execute_request);
-
-static void osd_request_async_done(struct request *req, blk_status_t error)
-{
- struct osd_request *or = req->end_io_data;
-
- _set_error_resid(or, req, error);
- if (req->next_rq) {
- blk_put_request(req->next_rq);
- req->next_rq = NULL;
- }
-
- blk_put_request(req);
- or->request = NULL;
- or->in.req = NULL;
- or->out.req = NULL;
-
- if (or->async_done)
- or->async_done(or, or->async_private);
- else
- osd_end_request(or);
-}
-
-int osd_execute_request_async(struct osd_request *or,
- osd_req_done_fn *done, void *private)
-{
- or->request->end_io_data = or;
- or->async_private = private;
- or->async_done = done;
-
- blk_execute_rq_nowait(or->request->q, NULL, or->request, 0,
- osd_request_async_done);
- return 0;
-}
-EXPORT_SYMBOL(osd_execute_request_async);
-
-u8 sg_out_pad_buffer[1 << OSDv1_OFFSET_MIN_SHIFT];
-u8 sg_in_pad_buffer[1 << OSDv1_OFFSET_MIN_SHIFT];
-
-static int _osd_realloc_seg(struct osd_request *or,
- struct _osd_req_data_segment *seg, unsigned max_bytes)
-{
- void *buff;
-
- if (seg->alloc_size >= max_bytes)
- return 0;
-
- buff = krealloc(seg->buff, max_bytes, GFP_KERNEL);
- if (!buff) {
- OSD_ERR("Failed to Realloc %d-bytes was-%d\n", max_bytes,
- seg->alloc_size);
- return -ENOMEM;
- }
-
- memset(buff + seg->alloc_size, 0, max_bytes - seg->alloc_size);
- seg->buff = buff;
- seg->alloc_size = max_bytes;
- return 0;
-}
-
-static int _alloc_cdb_cont(struct osd_request *or, unsigned total_bytes)
-{
- OSD_DEBUG("total_bytes=%d\n", total_bytes);
- return _osd_realloc_seg(or, &or->cdb_cont, total_bytes);
-}
-
-static int _alloc_set_attr_list(struct osd_request *or,
- const struct osd_attr *oa, unsigned nelem, unsigned add_bytes)
-{
- unsigned total_bytes = add_bytes;
-
- for (; nelem; --nelem, ++oa)
- total_bytes += _osd_req_alist_elem_size(or, oa->len);
-
- OSD_DEBUG("total_bytes=%d\n", total_bytes);
- return _osd_realloc_seg(or, &or->set_attr, total_bytes);
-}
-
-static int _alloc_get_attr_desc(struct osd_request *or, unsigned max_bytes)
-{
- OSD_DEBUG("total_bytes=%d\n", max_bytes);
- return _osd_realloc_seg(or, &or->enc_get_attr, max_bytes);
-}
-
-static int _alloc_get_attr_list(struct osd_request *or)
-{
- OSD_DEBUG("total_bytes=%d\n", or->get_attr.total_bytes);
- return _osd_realloc_seg(or, &or->get_attr, or->get_attr.total_bytes);
-}
-
-/*
- * Common to all OSD commands
- */
-
-static void _osdv1_req_encode_common(struct osd_request *or,
- __be16 act, const struct osd_obj_id *obj, u64 offset, u64 len)
-{
- struct osdv1_cdb *ocdb = &or->cdb.v1;
-
- /*
- * For speed, the commands
- * OSD_ACT_PERFORM_SCSI_COMMAND , V1 0x8F7E, V2 0x8F7C
- * OSD_ACT_SCSI_TASK_MANAGEMENT , V1 0x8F7F, V2 0x8F7D
- * are not supported here. Should pass zero and set after the call
- */
- act &= cpu_to_be16(~0x0080); /* V1 action code */
-
- OSD_DEBUG("OSDv1 execute opcode 0x%x\n", be16_to_cpu(act));
-
- ocdb->h.varlen_cdb.opcode = VARIABLE_LENGTH_CMD;
- ocdb->h.varlen_cdb.additional_cdb_length = OSD_ADDITIONAL_CDB_LENGTH;
- ocdb->h.varlen_cdb.service_action = act;
-
- ocdb->h.partition = cpu_to_be64(obj->partition);
- ocdb->h.object = cpu_to_be64(obj->id);
- ocdb->h.v1.length = cpu_to_be64(len);
- ocdb->h.v1.start_address = cpu_to_be64(offset);
-}
-
-static void _osdv2_req_encode_common(struct osd_request *or,
- __be16 act, const struct osd_obj_id *obj, u64 offset, u64 len)
-{
- struct osdv2_cdb *ocdb = &or->cdb.v2;
-
- OSD_DEBUG("OSDv2 execute opcode 0x%x\n", be16_to_cpu(act));
-
- ocdb->h.varlen_cdb.opcode = VARIABLE_LENGTH_CMD;
- ocdb->h.varlen_cdb.additional_cdb_length = OSD_ADDITIONAL_CDB_LENGTH;
- ocdb->h.varlen_cdb.service_action = act;
-
- ocdb->h.partition = cpu_to_be64(obj->partition);
- ocdb->h.object = cpu_to_be64(obj->id);
- ocdb->h.v2.length = cpu_to_be64(len);
- ocdb->h.v2.start_address = cpu_to_be64(offset);
-}
-
-static void _osd_req_encode_common(struct osd_request *or,
- __be16 act, const struct osd_obj_id *obj, u64 offset, u64 len)
-{
- if (osd_req_is_ver1(or))
- _osdv1_req_encode_common(or, act, obj, offset, len);
- else
- _osdv2_req_encode_common(or, act, obj, offset, len);
-}
-
-/*
- * Device commands
- */
-/*TODO: void osd_req_set_master_seed_xchg(struct osd_request *, ...); */
-/*TODO: void osd_req_set_master_key(struct osd_request *, ...); */
-
-void osd_req_format(struct osd_request *or, u64 tot_capacity)
-{
- _osd_req_encode_common(or, OSD_ACT_FORMAT_OSD, &osd_root_object, 0,
- tot_capacity);
-}
-EXPORT_SYMBOL(osd_req_format);
-
-int osd_req_list_dev_partitions(struct osd_request *or,
- osd_id initial_id, struct osd_obj_id_list *list, unsigned nelem)
-{
- return osd_req_list_partition_objects(or, 0, initial_id, list, nelem);
-}
-EXPORT_SYMBOL(osd_req_list_dev_partitions);
-
-static void _osd_req_encode_flush(struct osd_request *or,
- enum osd_options_flush_scope_values op)
-{
- struct osd_cdb_head *ocdb = osd_cdb_head(&or->cdb);
-
- ocdb->command_specific_options = op;
-}
-
-void osd_req_flush_obsd(struct osd_request *or,
- enum osd_options_flush_scope_values op)
-{
- _osd_req_encode_common(or, OSD_ACT_FLUSH_OSD, &osd_root_object, 0, 0);
- _osd_req_encode_flush(or, op);
-}
-EXPORT_SYMBOL(osd_req_flush_obsd);
-
-/*TODO: void osd_req_perform_scsi_command(struct osd_request *,
- const u8 *cdb, ...); */
-/*TODO: void osd_req_task_management(struct osd_request *, ...); */
-
-/*
- * Partition commands
- */
-static void _osd_req_encode_partition(struct osd_request *or,
- __be16 act, osd_id partition)
-{
- struct osd_obj_id par = {
- .partition = partition,
- .id = 0,
- };
-
- _osd_req_encode_common(or, act, &par, 0, 0);
-}
-
-void osd_req_create_partition(struct osd_request *or, osd_id partition)
-{
- _osd_req_encode_partition(or, OSD_ACT_CREATE_PARTITION, partition);
-}
-EXPORT_SYMBOL(osd_req_create_partition);
-
-void osd_req_remove_partition(struct osd_request *or, osd_id partition)
-{
- _osd_req_encode_partition(or, OSD_ACT_REMOVE_PARTITION, partition);
-}
-EXPORT_SYMBOL(osd_req_remove_partition);
-
-/*TODO: void osd_req_set_partition_key(struct osd_request *,
- osd_id partition, u8 new_key_id[OSD_CRYPTO_KEYID_SIZE],
- u8 seed[OSD_CRYPTO_SEED_SIZE]); */
-
-static int _osd_req_list_objects(struct osd_request *or,
- __be16 action, const struct osd_obj_id *obj, osd_id initial_id,
- struct osd_obj_id_list *list, unsigned nelem)
-{
- struct request_queue *q = osd_request_queue(or->osd_dev);
- u64 len = nelem * sizeof(osd_id) + sizeof(*list);
- struct bio *bio;
-
- _osd_req_encode_common(or, action, obj, (u64)initial_id, len);
-
- if (list->list_identifier)
- _osd_req_encode_olist(or, list);
-
- WARN_ON(or->in.bio);
- bio = bio_map_kern(q, list, len, GFP_KERNEL);
- if (IS_ERR(bio)) {
- OSD_ERR("!!! Failed to allocate list_objects BIO\n");
- return PTR_ERR(bio);
- }
-
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
- or->in.bio = bio;
- or->in.total_bytes = bio->bi_iter.bi_size;
- return 0;
-}
-
-int osd_req_list_partition_collections(struct osd_request *or,
- osd_id partition, osd_id initial_id, struct osd_obj_id_list *list,
- unsigned nelem)
-{
- struct osd_obj_id par = {
- .partition = partition,
- .id = 0,
- };
-
- return osd_req_list_collection_objects(or, &par, initial_id, list,
- nelem);
-}
-EXPORT_SYMBOL(osd_req_list_partition_collections);
-
-int osd_req_list_partition_objects(struct osd_request *or,
- osd_id partition, osd_id initial_id, struct osd_obj_id_list *list,
- unsigned nelem)
-{
- struct osd_obj_id par = {
- .partition = partition,
- .id = 0,
- };
-
- return _osd_req_list_objects(or, OSD_ACT_LIST, &par, initial_id, list,
- nelem);
-}
-EXPORT_SYMBOL(osd_req_list_partition_objects);
-
-void osd_req_flush_partition(struct osd_request *or,
- osd_id partition, enum osd_options_flush_scope_values op)
-{
- _osd_req_encode_partition(or, OSD_ACT_FLUSH_PARTITION, partition);
- _osd_req_encode_flush(or, op);
-}
-EXPORT_SYMBOL(osd_req_flush_partition);
-
-/*
- * Collection commands
- */
-/*TODO: void osd_req_create_collection(struct osd_request *,
- const struct osd_obj_id *); */
-/*TODO: void osd_req_remove_collection(struct osd_request *,
- const struct osd_obj_id *); */
-
-int osd_req_list_collection_objects(struct osd_request *or,
- const struct osd_obj_id *obj, osd_id initial_id,
- struct osd_obj_id_list *list, unsigned nelem)
-{
- return _osd_req_list_objects(or, OSD_ACT_LIST_COLLECTION, obj,
- initial_id, list, nelem);
-}
-EXPORT_SYMBOL(osd_req_list_collection_objects);
-
-/*TODO: void query(struct osd_request *, ...); V2 */
-
-void osd_req_flush_collection(struct osd_request *or,
- const struct osd_obj_id *obj, enum osd_options_flush_scope_values op)
-{
- _osd_req_encode_common(or, OSD_ACT_FLUSH_PARTITION, obj, 0, 0);
- _osd_req_encode_flush(or, op);
-}
-EXPORT_SYMBOL(osd_req_flush_collection);
-
-/*TODO: void get_member_attrs(struct osd_request *, ...); V2 */
-/*TODO: void set_member_attrs(struct osd_request *, ...); V2 */
-
-/*
- * Object commands
- */
-void osd_req_create_object(struct osd_request *or, struct osd_obj_id *obj)
-{
- _osd_req_encode_common(or, OSD_ACT_CREATE, obj, 0, 0);
-}
-EXPORT_SYMBOL(osd_req_create_object);
-
-void osd_req_remove_object(struct osd_request *or, struct osd_obj_id *obj)
-{
- _osd_req_encode_common(or, OSD_ACT_REMOVE, obj, 0, 0);
-}
-EXPORT_SYMBOL(osd_req_remove_object);
-
-
-/*TODO: void osd_req_create_multi(struct osd_request *or,
- struct osd_obj_id *first, struct osd_obj_id_list *list, unsigned nelem);
-*/
-
-void osd_req_write(struct osd_request *or,
- const struct osd_obj_id *obj, u64 offset,
- struct bio *bio, u64 len)
-{
- _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len);
- WARN_ON(or->out.bio || or->out.total_bytes);
- WARN_ON(!op_is_write(bio_op(bio)));
- or->out.bio = bio;
- or->out.total_bytes = len;
-}
-EXPORT_SYMBOL(osd_req_write);
-
-int osd_req_write_kern(struct osd_request *or,
- const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
-{
- struct request_queue *req_q = osd_request_queue(or->osd_dev);
- struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
-
- if (IS_ERR(bio))
- return PTR_ERR(bio);
-
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
- osd_req_write(or, obj, offset, bio, len);
- return 0;
-}
-EXPORT_SYMBOL(osd_req_write_kern);
-
-/*TODO: void osd_req_append(struct osd_request *,
- const struct osd_obj_id *, struct bio *data_out); */
-/*TODO: void osd_req_create_write(struct osd_request *,
- const struct osd_obj_id *, struct bio *data_out, u64 offset); */
-/*TODO: void osd_req_clear(struct osd_request *,
- const struct osd_obj_id *, u64 offset, u64 len); */
-/*TODO: void osd_req_punch(struct osd_request *,
- const struct osd_obj_id *, u64 offset, u64 len); V2 */
-
-void osd_req_flush_object(struct osd_request *or,
- const struct osd_obj_id *obj, enum osd_options_flush_scope_values op,
- /*V2*/ u64 offset, /*V2*/ u64 len)
-{
- if (unlikely(osd_req_is_ver1(or) && (offset || len))) {
- OSD_DEBUG("OSD Ver1 flush on specific range ignored\n");
- offset = 0;
- len = 0;
- }
-
- _osd_req_encode_common(or, OSD_ACT_FLUSH, obj, offset, len);
- _osd_req_encode_flush(or, op);
-}
-EXPORT_SYMBOL(osd_req_flush_object);
-
-void osd_req_read(struct osd_request *or,
- const struct osd_obj_id *obj, u64 offset,
- struct bio *bio, u64 len)
-{
- _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
- WARN_ON(or->in.bio || or->in.total_bytes);
- WARN_ON(op_is_write(bio_op(bio)));
- or->in.bio = bio;
- or->in.total_bytes = len;
-}
-EXPORT_SYMBOL(osd_req_read);
-
-int osd_req_read_kern(struct osd_request *or,
- const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
-{
- struct request_queue *req_q = osd_request_queue(or->osd_dev);
- struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
-
- if (IS_ERR(bio))
- return PTR_ERR(bio);
-
- osd_req_read(or, obj, offset, bio, len);
- return 0;
-}
-EXPORT_SYMBOL(osd_req_read_kern);
-
-static int _add_sg_continuation_descriptor(struct osd_request *or,
- const struct osd_sg_entry *sglist, unsigned numentries, u64 *len)
-{
- struct osd_sg_continuation_descriptor *oscd;
- u32 oscd_size;
- unsigned i;
- int ret;
-
- oscd_size = sizeof(*oscd) + numentries * sizeof(oscd->entries[0]);
-
- if (!or->cdb_cont.total_bytes) {
- /* First time, jump over the header, we will write to:
- * cdb_cont.buff + cdb_cont.total_bytes
- */
- or->cdb_cont.total_bytes =
- sizeof(struct osd_continuation_segment_header);
- }
-
- ret = _alloc_cdb_cont(or, or->cdb_cont.total_bytes + oscd_size);
- if (unlikely(ret))
- return ret;
-
- oscd = or->cdb_cont.buff + or->cdb_cont.total_bytes;
- oscd->hdr.type = cpu_to_be16(SCATTER_GATHER_LIST);
- oscd->hdr.pad_length = 0;
- oscd->hdr.length = cpu_to_be32(oscd_size - sizeof(*oscd));
-
- *len = 0;
- /* copy the sg entries and convert to network byte order */
- for (i = 0; i < numentries; i++) {
- oscd->entries[i].offset = cpu_to_be64(sglist[i].offset);
- oscd->entries[i].len = cpu_to_be64(sglist[i].len);
- *len += sglist[i].len;
- }
-
- or->cdb_cont.total_bytes += oscd_size;
- OSD_DEBUG("total_bytes=%d oscd_size=%d numentries=%d\n",
- or->cdb_cont.total_bytes, oscd_size, numentries);
- return 0;
-}
-
-static int _osd_req_finalize_cdb_cont(struct osd_request *or, const u8 *cap_key)
-{
- struct request_queue *req_q = osd_request_queue(or->osd_dev);
- struct bio *bio;
- struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
- struct osd_continuation_segment_header *cont_seg_hdr;
-
- if (!or->cdb_cont.total_bytes)
- return 0;
-
- cont_seg_hdr = or->cdb_cont.buff;
- cont_seg_hdr->format = CDB_CONTINUATION_FORMAT_V2;
- cont_seg_hdr->service_action = cdbh->varlen_cdb.service_action;
-
- /* create a bio for continuation segment */
- bio = bio_map_kern(req_q, or->cdb_cont.buff, or->cdb_cont.total_bytes,
- GFP_KERNEL);
- if (IS_ERR(bio))
- return PTR_ERR(bio);
-
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
-
- /* integrity check the continuation before the bio is linked
- * with the other data segments since the continuation
- * integrity is separate from the other data segments.
- */
- osd_sec_sign_data(cont_seg_hdr->integrity_check, bio, cap_key);
-
- cdbh->v2.cdb_continuation_length = cpu_to_be32(or->cdb_cont.total_bytes);
-
- /* we can't use _req_append_segment, because we need to link in the
- * continuation bio to the head of the bio list - the
- * continuation segment (if it exists) is always the first segment in
- * the out data buffer.
- */
- bio->bi_next = or->out.bio;
- or->out.bio = bio;
- or->out.total_bytes += or->cdb_cont.total_bytes;
-
- return 0;
-}
-
-/* osd_req_write_sg: Takes a @bio that points to the data out buffer and an
- * @sglist that has the scatter gather entries. Scatter-gather enables a write
- * of multiple none-contiguous areas of an object, in a single call. The extents
- * may overlap and/or be in any order. The only constrain is that:
- * total_bytes(sglist) >= total_bytes(bio)
- */
-int osd_req_write_sg(struct osd_request *or,
- const struct osd_obj_id *obj, struct bio *bio,
- const struct osd_sg_entry *sglist, unsigned numentries)
-{
- u64 len;
- int ret = _add_sg_continuation_descriptor(or, sglist, numentries, &len);
-
- if (ret)
- return ret;
- osd_req_write(or, obj, 0, bio, len);
-
- return 0;
-}
-EXPORT_SYMBOL(osd_req_write_sg);
-
-/* osd_req_read_sg: Read multiple extents of an object into @bio
- * See osd_req_write_sg
- */
-int osd_req_read_sg(struct osd_request *or,
- const struct osd_obj_id *obj, struct bio *bio,
- const struct osd_sg_entry *sglist, unsigned numentries)
-{
- u64 len;
- u64 off;
- int ret;
-
- if (numentries > 1) {
- off = 0;
- ret = _add_sg_continuation_descriptor(or, sglist, numentries,
- &len);
- if (ret)
- return ret;
- } else {
- /* Optimize the case of single segment, read_sg is a
- * bidi operation.
- */
- len = sglist->len;
- off = sglist->offset;
- }
- osd_req_read(or, obj, off, bio, len);
-
- return 0;
-}
-EXPORT_SYMBOL(osd_req_read_sg);
-
-/* SG-list write/read Kern API
- *
- * osd_req_{write,read}_sg_kern takes an array of @buff pointers and an array
- * of sg_entries. @numentries indicates how many pointers and sg_entries there
- * are. By requiring an array of buff pointers. This allows a caller to do a
- * single write/read and scatter into multiple buffers.
- * NOTE: Each buffer + len should not cross a page boundary.
- */
-static struct bio *_create_sg_bios(struct osd_request *or,
- void **buff, const struct osd_sg_entry *sglist, unsigned numentries)
-{
- struct request_queue *q = osd_request_queue(or->osd_dev);
- struct bio *bio;
- unsigned i;
-
- bio = bio_kmalloc(GFP_KERNEL, numentries);
- if (unlikely(!bio)) {
- OSD_DEBUG("Failed to allocate BIO size=%u\n", numentries);
- return ERR_PTR(-ENOMEM);
- }
-
- for (i = 0; i < numentries; i++) {
- unsigned offset = offset_in_page(buff[i]);
- struct page *page = virt_to_page(buff[i]);
- unsigned len = sglist[i].len;
- unsigned added_len;
-
- BUG_ON(offset + len > PAGE_SIZE);
- added_len = bio_add_pc_page(q, bio, page, len, offset);
- if (unlikely(len != added_len)) {
- OSD_DEBUG("bio_add_pc_page len(%d) != added_len(%d)\n",
- len, added_len);
- bio_put(bio);
- return ERR_PTR(-ENOMEM);
- }
- }
-
- return bio;
-}
-
-int osd_req_write_sg_kern(struct osd_request *or,
- const struct osd_obj_id *obj, void **buff,
- const struct osd_sg_entry *sglist, unsigned numentries)
-{
- struct bio *bio = _create_sg_bios(or, buff, sglist, numentries);
- if (IS_ERR(bio))
- return PTR_ERR(bio);
-
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
- osd_req_write_sg(or, obj, bio, sglist, numentries);
-
- return 0;
-}
-EXPORT_SYMBOL(osd_req_write_sg_kern);
-
-int osd_req_read_sg_kern(struct osd_request *or,
- const struct osd_obj_id *obj, void **buff,
- const struct osd_sg_entry *sglist, unsigned numentries)
-{
- struct bio *bio = _create_sg_bios(or, buff, sglist, numentries);
- if (IS_ERR(bio))
- return PTR_ERR(bio);
-
- osd_req_read_sg(or, obj, bio, sglist, numentries);
-
- return 0;
-}
-EXPORT_SYMBOL(osd_req_read_sg_kern);
-
-
-
-void osd_req_get_attributes(struct osd_request *or,
- const struct osd_obj_id *obj)
-{
- _osd_req_encode_common(or, OSD_ACT_GET_ATTRIBUTES, obj, 0, 0);
-}
-EXPORT_SYMBOL(osd_req_get_attributes);
-
-void osd_req_set_attributes(struct osd_request *or,
- const struct osd_obj_id *obj)
-{
- _osd_req_encode_common(or, OSD_ACT_SET_ATTRIBUTES, obj, 0, 0);
-}
-EXPORT_SYMBOL(osd_req_set_attributes);
-
-/*
- * Attributes List-mode
- */
-
-int osd_req_add_set_attr_list(struct osd_request *or,
- const struct osd_attr *oa, unsigned nelem)
-{
- unsigned total_bytes = or->set_attr.total_bytes;
- void *attr_last;
- int ret;
-
- if (or->attributes_mode &&
- or->attributes_mode != OSD_CDB_GET_SET_ATTR_LISTS) {
- WARN_ON(1);
- return -EINVAL;
- }
- or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS;
-
- if (!total_bytes) { /* first-time: allocate and put list header */
- total_bytes = _osd_req_sizeof_alist_header(or);
- ret = _alloc_set_attr_list(or, oa, nelem, total_bytes);
- if (ret)
- return ret;
- _osd_req_set_alist_type(or, or->set_attr.buff,
- OSD_ATTR_LIST_SET_RETRIEVE);
- }
- attr_last = or->set_attr.buff + total_bytes;
-
- for (; nelem; --nelem) {
- unsigned elem_size = _osd_req_alist_elem_size(or, oa->len);
-
- total_bytes += elem_size;
- if (unlikely(or->set_attr.alloc_size < total_bytes)) {
- or->set_attr.total_bytes = total_bytes - elem_size;
- ret = _alloc_set_attr_list(or, oa, nelem, total_bytes);
- if (ret)
- return ret;
- attr_last =
- or->set_attr.buff + or->set_attr.total_bytes;
- }
-
- _osd_req_alist_elem_encode(or, attr_last, oa);
-
- attr_last += elem_size;
- ++oa;
- }
-
- or->set_attr.total_bytes = total_bytes;
- return 0;
-}
-EXPORT_SYMBOL(osd_req_add_set_attr_list);
-
-static int _req_append_segment(struct osd_request *or,
- unsigned padding, struct _osd_req_data_segment *seg,
- struct _osd_req_data_segment *last_seg, struct _osd_io_info *io)
-{
- void *pad_buff;
- int ret;
-
- if (padding) {
- /* check if we can just add it to last buffer */
- if (last_seg &&
- (padding <= last_seg->alloc_size - last_seg->total_bytes))
- pad_buff = last_seg->buff + last_seg->total_bytes;
- else
- pad_buff = io->pad_buff;
-
- ret = blk_rq_map_kern(io->req->q, io->req, pad_buff, padding,
- GFP_KERNEL);
- if (ret)
- return ret;
- io->total_bytes += padding;
- }
-
- ret = blk_rq_map_kern(io->req->q, io->req, seg->buff, seg->total_bytes,
- GFP_KERNEL);
- if (ret)
- return ret;
-
- io->total_bytes += seg->total_bytes;
- OSD_DEBUG("padding=%d buff=%p total_bytes=%d\n", padding, seg->buff,
- seg->total_bytes);
- return 0;
-}
-
-static int _osd_req_finalize_set_attr_list(struct osd_request *or)
-{
- struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
- unsigned padding;
- int ret;
-
- if (!or->set_attr.total_bytes) {
- cdbh->attrs_list.set_attr_offset = OSD_OFFSET_UNUSED;
- return 0;
- }
-
- cdbh->attrs_list.set_attr_bytes = cpu_to_be32(or->set_attr.total_bytes);
- cdbh->attrs_list.set_attr_offset =
- osd_req_encode_offset(or, or->out.total_bytes, &padding);
-
- ret = _req_append_segment(or, padding, &or->set_attr,
- or->out.last_seg, &or->out);
- if (ret)
- return ret;
-
- or->out.last_seg = &or->set_attr;
- return 0;
-}
-
-int osd_req_add_get_attr_list(struct osd_request *or,
- const struct osd_attr *oa, unsigned nelem)
-{
- unsigned total_bytes = or->enc_get_attr.total_bytes;
- void *attr_last;
- int ret;
-
- if (or->attributes_mode &&
- or->attributes_mode != OSD_CDB_GET_SET_ATTR_LISTS) {
- WARN_ON(1);
- return -EINVAL;
- }
- or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS;
-
- /* first time calc data-in list header size */
- if (!or->get_attr.total_bytes)
- or->get_attr.total_bytes = _osd_req_sizeof_alist_header(or);
-
- /* calc data-out info */
- if (!total_bytes) { /* first-time: allocate and put list header */
- unsigned max_bytes;
-
- total_bytes = _osd_req_sizeof_alist_header(or);
- max_bytes = total_bytes +
- nelem * sizeof(struct osd_attributes_list_attrid);
- ret = _alloc_get_attr_desc(or, max_bytes);
- if (ret)
- return ret;
-
- _osd_req_set_alist_type(or, or->enc_get_attr.buff,
- OSD_ATTR_LIST_GET);
- }
- attr_last = or->enc_get_attr.buff + total_bytes;
-
- for (; nelem; --nelem) {
- struct osd_attributes_list_attrid *attrid;
- const unsigned cur_size = sizeof(*attrid);
-
- total_bytes += cur_size;
- if (unlikely(or->enc_get_attr.alloc_size < total_bytes)) {
- or->enc_get_attr.total_bytes = total_bytes - cur_size;
- ret = _alloc_get_attr_desc(or,
- total_bytes + nelem * sizeof(*attrid));
- if (ret)
- return ret;
- attr_last = or->enc_get_attr.buff +
- or->enc_get_attr.total_bytes;
- }
-
- attrid = attr_last;
- attrid->attr_page = cpu_to_be32(oa->attr_page);
- attrid->attr_id = cpu_to_be32(oa->attr_id);
-
- attr_last += cur_size;
-
- /* calc data-in size */
- or->get_attr.total_bytes +=
- _osd_req_alist_elem_size(or, oa->len);
- ++oa;
- }
-
- or->enc_get_attr.total_bytes = total_bytes;
-
- OSD_DEBUG(
- "get_attr.total_bytes=%u(%u) enc_get_attr.total_bytes=%u(%zu)\n",
- or->get_attr.total_bytes,
- or->get_attr.total_bytes - _osd_req_sizeof_alist_header(or),
- or->enc_get_attr.total_bytes,
- (or->enc_get_attr.total_bytes - _osd_req_sizeof_alist_header(or))
- / sizeof(struct osd_attributes_list_attrid));
-
- return 0;
-}
-EXPORT_SYMBOL(osd_req_add_get_attr_list);
-
-static int _osd_req_finalize_get_attr_list(struct osd_request *or)
-{
- struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
- unsigned out_padding;
- unsigned in_padding;
- int ret;
-
- if (!or->enc_get_attr.total_bytes) {
- cdbh->attrs_list.get_attr_desc_offset = OSD_OFFSET_UNUSED;
- cdbh->attrs_list.get_attr_offset = OSD_OFFSET_UNUSED;
- return 0;
- }
-
- ret = _alloc_get_attr_list(or);
- if (ret)
- return ret;
-
- /* The out-going buffer info update */
- OSD_DEBUG("out-going\n");
- cdbh->attrs_list.get_attr_desc_bytes =
- cpu_to_be32(or->enc_get_attr.total_bytes);
-
- cdbh->attrs_list.get_attr_desc_offset =
- osd_req_encode_offset(or, or->out.total_bytes, &out_padding);
-
- ret = _req_append_segment(or, out_padding, &or->enc_get_attr,
- or->out.last_seg, &or->out);
- if (ret)
- return ret;
- or->out.last_seg = &or->enc_get_attr;
-
- /* The incoming buffer info update */
- OSD_DEBUG("in-coming\n");
- cdbh->attrs_list.get_attr_alloc_length =
- cpu_to_be32(or->get_attr.total_bytes);
-
- cdbh->attrs_list.get_attr_offset =
- osd_req_encode_offset(or, or->in.total_bytes, &in_padding);
-
- ret = _req_append_segment(or, in_padding, &or->get_attr, NULL,
- &or->in);
- if (ret)
- return ret;
- or->in.last_seg = &or->get_attr;
-
- return 0;
-}
-
-int osd_req_decode_get_attr_list(struct osd_request *or,
- struct osd_attr *oa, int *nelem, void **iterator)
-{
- unsigned cur_bytes, returned_bytes;
- int n;
- const unsigned sizeof_attr_list = _osd_req_sizeof_alist_header(or);
- void *cur_p;
-
- if (!_osd_req_is_alist_type(or, or->get_attr.buff,
- OSD_ATTR_LIST_SET_RETRIEVE)) {
- oa->attr_page = 0;
- oa->attr_id = 0;
- oa->val_ptr = NULL;
- oa->len = 0;
- *iterator = NULL;
- return 0;
- }
-
- if (*iterator) {
- BUG_ON((*iterator < or->get_attr.buff) ||
- (or->get_attr.buff + or->get_attr.alloc_size < *iterator));
- cur_p = *iterator;
- cur_bytes = (*iterator - or->get_attr.buff) - sizeof_attr_list;
- returned_bytes = or->get_attr.total_bytes;
- } else { /* first time decode the list header */
- cur_bytes = sizeof_attr_list;
- returned_bytes = _osd_req_alist_size(or, or->get_attr.buff) +
- sizeof_attr_list;
-
- cur_p = or->get_attr.buff + sizeof_attr_list;
-
- if (returned_bytes > or->get_attr.alloc_size) {
- OSD_DEBUG("target report: space was not big enough! "
- "Allocate=%u Needed=%u\n",
- or->get_attr.alloc_size,
- returned_bytes + sizeof_attr_list);
-
- returned_bytes =
- or->get_attr.alloc_size - sizeof_attr_list;
- }
- or->get_attr.total_bytes = returned_bytes;
- }
-
- for (n = 0; (n < *nelem) && (cur_bytes < returned_bytes); ++n) {
- int inc = _osd_req_alist_elem_decode(or, cur_p, oa,
- returned_bytes - cur_bytes);
-
- if (inc < 0) {
- OSD_ERR("BAD FOOD from target. list not valid!"
- "c=%d r=%d n=%d\n",
- cur_bytes, returned_bytes, n);
- oa->val_ptr = NULL;
- cur_bytes = returned_bytes; /* break the caller loop */
- break;
- }
-
- cur_bytes += inc;
- cur_p += inc;
- ++oa;
- }
-
- *iterator = (returned_bytes - cur_bytes) ? cur_p : NULL;
- *nelem = n;
- return returned_bytes - cur_bytes;
-}
-EXPORT_SYMBOL(osd_req_decode_get_attr_list);
-
-/*
- * Attributes Page-mode
- */
-
-int osd_req_add_get_attr_page(struct osd_request *or,
- u32 page_id, void *attar_page, unsigned max_page_len,
- const struct osd_attr *set_one_attr)
-{
- struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
-
- if (or->attributes_mode &&
- or->attributes_mode != OSD_CDB_GET_ATTR_PAGE_SET_ONE) {
- WARN_ON(1);
- return -EINVAL;
- }
- or->attributes_mode = OSD_CDB_GET_ATTR_PAGE_SET_ONE;
-
- or->get_attr.buff = attar_page;
- or->get_attr.total_bytes = max_page_len;
-
- cdbh->attrs_page.get_attr_page = cpu_to_be32(page_id);
- cdbh->attrs_page.get_attr_alloc_length = cpu_to_be32(max_page_len);
-
- if (!set_one_attr || !set_one_attr->attr_page)
- return 0; /* The set is optional */
-
- or->set_attr.buff = set_one_attr->val_ptr;
- or->set_attr.total_bytes = set_one_attr->len;
-
- cdbh->attrs_page.set_attr_page = cpu_to_be32(set_one_attr->attr_page);
- cdbh->attrs_page.set_attr_id = cpu_to_be32(set_one_attr->attr_id);
- cdbh->attrs_page.set_attr_length = cpu_to_be32(set_one_attr->len);
- return 0;
-}
-EXPORT_SYMBOL(osd_req_add_get_attr_page);
-
-static int _osd_req_finalize_attr_page(struct osd_request *or)
-{
- struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
- unsigned in_padding, out_padding;
- int ret;
-
- /* returned page */
- cdbh->attrs_page.get_attr_offset =
- osd_req_encode_offset(or, or->in.total_bytes, &in_padding);
-
- ret = _req_append_segment(or, in_padding, &or->get_attr, NULL,
- &or->in);
- if (ret)
- return ret;
-
- if (or->set_attr.total_bytes == 0)
- return 0;
-
- /* set one value */
- cdbh->attrs_page.set_attr_offset =
- osd_req_encode_offset(or, or->out.total_bytes, &out_padding);
-
- ret = _req_append_segment(or, out_padding, &or->set_attr, NULL,
- &or->out);
- return ret;
-}
-
-static inline void osd_sec_parms_set_out_offset(bool is_v1,
- struct osd_security_parameters *sec_parms, osd_cdb_offset offset)
-{
- if (is_v1)
- sec_parms->v1.data_out_integrity_check_offset = offset;
- else
- sec_parms->v2.data_out_integrity_check_offset = offset;
-}
-
-static inline void osd_sec_parms_set_in_offset(bool is_v1,
- struct osd_security_parameters *sec_parms, osd_cdb_offset offset)
-{
- if (is_v1)
- sec_parms->v1.data_in_integrity_check_offset = offset;
- else
- sec_parms->v2.data_in_integrity_check_offset = offset;
-}
-
-static int _osd_req_finalize_data_integrity(struct osd_request *or,
- bool has_in, bool has_out, struct bio *out_data_bio, u64 out_data_bytes,
- const u8 *cap_key)
-{
- struct osd_security_parameters *sec_parms = _osd_req_sec_params(or);
- int ret;
-
- if (!osd_is_sec_alldata(sec_parms))
- return 0;
-
- if (has_out) {
- struct _osd_req_data_segment seg = {
- .buff = &or->out_data_integ,
- .total_bytes = sizeof(or->out_data_integ),
- };
- unsigned pad;
-
- or->out_data_integ.data_bytes = cpu_to_be64(out_data_bytes);
- or->out_data_integ.set_attributes_bytes = cpu_to_be64(
- or->set_attr.total_bytes);
- or->out_data_integ.get_attributes_bytes = cpu_to_be64(
- or->enc_get_attr.total_bytes);
-
- osd_sec_parms_set_out_offset(osd_req_is_ver1(or), sec_parms,
- osd_req_encode_offset(or, or->out.total_bytes, &pad));
-
- ret = _req_append_segment(or, pad, &seg, or->out.last_seg,
- &or->out);
- if (ret)
- return ret;
- or->out.last_seg = NULL;
-
- /* they are now all chained to request sign them all together */
- osd_sec_sign_data(&or->out_data_integ, out_data_bio,
- cap_key);
- }
-
- if (has_in) {
- struct _osd_req_data_segment seg = {
- .buff = &or->in_data_integ,
- .total_bytes = sizeof(or->in_data_integ),
- };
- unsigned pad;
-
- osd_sec_parms_set_in_offset(osd_req_is_ver1(or), sec_parms,
- osd_req_encode_offset(or, or->in.total_bytes, &pad));
-
- ret = _req_append_segment(or, pad, &seg, or->in.last_seg,
- &or->in);
- if (ret)
- return ret;
-
- or->in.last_seg = NULL;
- }
-
- return 0;
-}
-
-/*
- * osd_finalize_request and helpers
- */
-static struct request *_make_request(struct request_queue *q, bool has_write,
- struct _osd_io_info *oii)
-{
- struct request *req;
- struct bio *bio = oii->bio;
- int ret;
-
- req = blk_get_request(q, has_write ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
- 0);
- if (IS_ERR(req))
- return req;
-
- for_each_bio(bio) {
- struct bio *bounce_bio = bio;
-
- ret = blk_rq_append_bio(req, &bounce_bio);
- if (ret)
- return ERR_PTR(ret);
- }
-
- return req;
-}
-
-static int _init_blk_request(struct osd_request *or,
- bool has_in, bool has_out)
-{
- struct scsi_device *scsi_device = or->osd_dev->scsi_device;
- struct request_queue *q = scsi_device->request_queue;
- struct request *req;
- int ret;
-
- req = _make_request(q, has_out, has_out ? &or->out : &or->in);
- if (IS_ERR(req)) {
- ret = PTR_ERR(req);
- goto out;
- }
-
- or->request = req;
- req->rq_flags |= RQF_QUIET;
-
- req->timeout = or->timeout;
- scsi_req(req)->retries = or->retries;
-
- if (has_out) {
- or->out.req = req;
- if (has_in) {
- /* allocate bidi request */
- req = _make_request(q, false, &or->in);
- if (IS_ERR(req)) {
- OSD_DEBUG("blk_get_request for bidi failed\n");
- ret = PTR_ERR(req);
- goto out;
- }
- or->in.req = or->request->next_rq = req;
- }
- } else if (has_in)
- or->in.req = req;
-
- ret = 0;
-out:
- OSD_DEBUG("or=%p has_in=%d has_out=%d => %d, %p\n",
- or, has_in, has_out, ret, or->request);
- return ret;
-}
-
-int osd_finalize_request(struct osd_request *or,
- u8 options, const void *cap, const u8 *cap_key)
-{
- struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
- bool has_in, has_out;
- /* Save for data_integrity without the cdb_continuation */
- struct bio *out_data_bio = or->out.bio;
- u64 out_data_bytes = or->out.total_bytes;
- int ret;
-
- if (options & OSD_REQ_FUA)
- cdbh->options |= OSD_CDB_FUA;
-
- if (options & OSD_REQ_DPO)
- cdbh->options |= OSD_CDB_DPO;
-
- if (options & OSD_REQ_BYPASS_TIMESTAMPS)
- cdbh->timestamp_control = OSD_CDB_BYPASS_TIMESTAMPS;
-
- osd_set_caps(&or->cdb, cap);
-
- has_in = or->in.bio || or->get_attr.total_bytes;
- has_out = or->out.bio || or->cdb_cont.total_bytes ||
- or->set_attr.total_bytes || or->enc_get_attr.total_bytes;
-
- ret = _osd_req_finalize_cdb_cont(or, cap_key);
- if (ret) {
- OSD_DEBUG("_osd_req_finalize_cdb_cont failed\n");
- return ret;
- }
- ret = _init_blk_request(or, has_in, has_out);
- if (ret) {
- OSD_DEBUG("_init_blk_request failed\n");
- return ret;
- }
-
- or->out.pad_buff = sg_out_pad_buffer;
- or->in.pad_buff = sg_in_pad_buffer;
-
- if (!or->attributes_mode)
- or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS;
- cdbh->command_specific_options |= or->attributes_mode;
- if (or->attributes_mode == OSD_CDB_GET_ATTR_PAGE_SET_ONE) {
- ret = _osd_req_finalize_attr_page(or);
- if (ret) {
- OSD_DEBUG("_osd_req_finalize_attr_page failed\n");
- return ret;
- }
- } else {
- /* TODO: I think that for the GET_ATTR command these 2 should
- * be reversed to keep them in execution order (for embedded
- * targets with low memory footprint)
- */
- ret = _osd_req_finalize_set_attr_list(or);
- if (ret) {
- OSD_DEBUG("_osd_req_finalize_set_attr_list failed\n");
- return ret;
- }
-
- ret = _osd_req_finalize_get_attr_list(or);
- if (ret) {
- OSD_DEBUG("_osd_req_finalize_get_attr_list failed\n");
- return ret;
- }
- }
-
- ret = _osd_req_finalize_data_integrity(or, has_in, has_out,
- out_data_bio, out_data_bytes,
- cap_key);
- if (ret)
- return ret;
-
- osd_sec_sign_cdb(&or->cdb, cap_key);
-
- scsi_req(or->request)->cmd = or->cdb.buff;
- scsi_req(or->request)->cmd_len = _osd_req_cdb_len(or);
-
- return 0;
-}
-EXPORT_SYMBOL(osd_finalize_request);
-
-static bool _is_osd_security_code(int code)
-{
- return (code == osd_security_audit_value_frozen) ||
- (code == osd_security_working_key_frozen) ||
- (code == osd_nonce_not_unique) ||
- (code == osd_nonce_timestamp_out_of_range) ||
- (code == osd_invalid_dataout_buffer_integrity_check_value);
-}
-
-#define OSD_SENSE_PRINT1(fmt, a...) \
- do { \
- if (__cur_sense_need_output) \
- OSD_ERR(fmt, ##a); \
- } while (0)
-
-#define OSD_SENSE_PRINT2(fmt, a...) OSD_SENSE_PRINT1(" " fmt, ##a)
-
-int osd_req_decode_sense_full(struct osd_request *or,
- struct osd_sense_info *osi, bool silent,
- struct osd_obj_id *bad_obj_list __unused, int max_obj __unused,
- struct osd_attr *bad_attr_list, int max_attr)
-{
- int sense_len, original_sense_len;
- struct osd_sense_info local_osi;
- struct scsi_sense_descriptor_based *ssdb;
- void *cur_descriptor;
-#if (CONFIG_SCSI_OSD_DPRINT_SENSE == 0)
- const bool __cur_sense_need_output = false;
-#else
- bool __cur_sense_need_output = !silent;
-#endif
- int ret;
-
- if (likely(!or->req_errors))
- return 0;
-
- osi = osi ? : &local_osi;
- memset(osi, 0, sizeof(*osi));
-
- ssdb = (typeof(ssdb))or->sense;
- sense_len = or->sense_len;
- if ((sense_len < (int)sizeof(*ssdb) || !ssdb->sense_key)) {
- OSD_ERR("Block-layer returned error(0x%x) but "
- "sense_len(%u) || key(%d) is empty\n",
- or->req_errors, sense_len, ssdb->sense_key);
- goto analyze;
- }
-
- if ((ssdb->response_code != 0x72) && (ssdb->response_code != 0x73)) {
- OSD_ERR("Unrecognized scsi sense: rcode=%x length=%d\n",
- ssdb->response_code, sense_len);
- goto analyze;
- }
-
- osi->key = ssdb->sense_key;
- osi->additional_code = be16_to_cpu(ssdb->additional_sense_code);
- original_sense_len = ssdb->additional_sense_length + 8;
-
-#if (CONFIG_SCSI_OSD_DPRINT_SENSE == 1)
- if (__cur_sense_need_output)
- __cur_sense_need_output = (osi->key > scsi_sk_recovered_error);
-#endif
- OSD_SENSE_PRINT1("Main Sense information key=0x%x length(%d, %d) "
- "additional_code=0x%x async_error=%d errors=0x%x\n",
- osi->key, original_sense_len, sense_len,
- osi->additional_code, or->async_error,
- or->req_errors);
-
- if (original_sense_len < sense_len)
- sense_len = original_sense_len;
-
- cur_descriptor = ssdb->ssd;
- sense_len -= sizeof(*ssdb);
- while (sense_len > 0) {
- struct scsi_sense_descriptor *ssd = cur_descriptor;
- int cur_len = ssd->additional_length + 2;
-
- sense_len -= cur_len;
-
- if (sense_len < 0)
- break; /* sense was truncated */
-
- switch (ssd->descriptor_type) {
- case scsi_sense_information:
- case scsi_sense_command_specific_information:
- {
- struct scsi_sense_command_specific_data_descriptor
- *sscd = cur_descriptor;
-
- osi->command_info =
- get_unaligned_be64(&sscd->information) ;
- OSD_SENSE_PRINT2(
- "command_specific_information 0x%llx \n",
- _LLU(osi->command_info));
- break;
- }
- case scsi_sense_key_specific:
- {
- struct scsi_sense_key_specific_data_descriptor
- *ssks = cur_descriptor;
-
- osi->sense_info = get_unaligned_be16(&ssks->value);
- OSD_SENSE_PRINT2(
- "sense_key_specific_information %u"
- "sksv_cd_bpv_bp (0x%x)\n",
- osi->sense_info, ssks->sksv_cd_bpv_bp);
- break;
- }
- case osd_sense_object_identification:
- { /*FIXME: Keep first not last, Store in array*/
- struct osd_sense_identification_data_descriptor
- *osidd = cur_descriptor;
-
- osi->not_initiated_command_functions =
- le32_to_cpu(osidd->not_initiated_functions);
- osi->completed_command_functions =
- le32_to_cpu(osidd->completed_functions);
- osi->obj.partition = be64_to_cpu(osidd->partition_id);
- osi->obj.id = be64_to_cpu(osidd->object_id);
- OSD_SENSE_PRINT2(
- "object_identification pid=0x%llx oid=0x%llx\n",
- _LLU(osi->obj.partition), _LLU(osi->obj.id));
- OSD_SENSE_PRINT2(
- "not_initiated_bits(%x) "
- "completed_command_bits(%x)\n",
- osi->not_initiated_command_functions,
- osi->completed_command_functions);
- break;
- }
- case osd_sense_response_integrity_check:
- {
- struct osd_sense_response_integrity_check_descriptor
- *d = cur_descriptor;
- /* 2nibbles+space+ASCII */
- char dump[sizeof(d->integrity_check_value) * 4 + 2];
-
- hex_dump_to_buffer(d->integrity_check_value,
- sizeof(d->integrity_check_value),
- 32, 1, dump, sizeof(dump), true);
- OSD_SENSE_PRINT2("response_integrity [%s]\n", dump);
- }
- case osd_sense_attribute_identification:
- {
- struct osd_sense_attributes_data_descriptor
- *osadd = cur_descriptor;
- unsigned len = min(cur_len, sense_len);
- struct osd_sense_attr *pattr = osadd->sense_attrs;
-
- while (len >= sizeof(*pattr)) {
- u32 attr_page = be32_to_cpu(pattr->attr_page);
- u32 attr_id = be32_to_cpu(pattr->attr_id);
-
- if (!osi->attr.attr_page) {
- osi->attr.attr_page = attr_page;
- osi->attr.attr_id = attr_id;
- }
-
- if (bad_attr_list && max_attr) {
- bad_attr_list->attr_page = attr_page;
- bad_attr_list->attr_id = attr_id;
- bad_attr_list++;
- max_attr--;
- }
-
- len -= sizeof(*pattr);
- OSD_SENSE_PRINT2(
- "osd_sense_attribute_identification"
- "attr_page=0x%x attr_id=0x%x\n",
- attr_page, attr_id);
- }
- }
- /*These are not legal for OSD*/
- case scsi_sense_field_replaceable_unit:
- OSD_SENSE_PRINT2("scsi_sense_field_replaceable_unit\n");
- break;
- case scsi_sense_stream_commands:
- OSD_SENSE_PRINT2("scsi_sense_stream_commands\n");
- break;
- case scsi_sense_block_commands:
- OSD_SENSE_PRINT2("scsi_sense_block_commands\n");
- break;
- case scsi_sense_ata_return:
- OSD_SENSE_PRINT2("scsi_sense_ata_return\n");
- break;
- default:
- if (ssd->descriptor_type <= scsi_sense_Reserved_last)
- OSD_SENSE_PRINT2(
- "scsi_sense Reserved descriptor (0x%x)",
- ssd->descriptor_type);
- else
- OSD_SENSE_PRINT2(
- "scsi_sense Vendor descriptor (0x%x)",
- ssd->descriptor_type);
- }
-
- cur_descriptor += cur_len;
- }
-
-analyze:
- if (!osi->key) {
- /* scsi sense is Empty, the request was never issued to target
- * linux return code might tell us what happened.
- */
- if (or->async_error == BLK_STS_RESOURCE)
- osi->osd_err_pri = OSD_ERR_PRI_RESOURCE;
- else
- osi->osd_err_pri = OSD_ERR_PRI_UNREACHABLE;
- ret = or->async_error;
- } else if (osi->key <= scsi_sk_recovered_error) {
- osi->osd_err_pri = 0;
- ret = 0;
- } else if (osi->additional_code == scsi_invalid_field_in_cdb) {
- if (osi->cdb_field_offset == OSD_CFO_STARTING_BYTE) {
- osi->osd_err_pri = OSD_ERR_PRI_CLEAR_PAGES;
- ret = -EFAULT; /* caller should recover from this */
- } else if (osi->cdb_field_offset == OSD_CFO_OBJECT_ID) {
- osi->osd_err_pri = OSD_ERR_PRI_NOT_FOUND;
- ret = -ENOENT;
- } else if (osi->cdb_field_offset == OSD_CFO_PERMISSIONS) {
- osi->osd_err_pri = OSD_ERR_PRI_NO_ACCESS;
- ret = -EACCES;
- } else {
- osi->osd_err_pri = OSD_ERR_PRI_BAD_CRED;
- ret = -EINVAL;
- }
- } else if (osi->additional_code == osd_quota_error) {
- osi->osd_err_pri = OSD_ERR_PRI_NO_SPACE;
- ret = -ENOSPC;
- } else if (_is_osd_security_code(osi->additional_code)) {
- osi->osd_err_pri = OSD_ERR_PRI_BAD_CRED;
- ret = -EINVAL;
- } else {
- osi->osd_err_pri = OSD_ERR_PRI_EIO;
- ret = -EIO;
- }
-
- if (!or->out.residual)
- or->out.residual = or->out.total_bytes;
- if (!or->in.residual)
- or->in.residual = or->in.total_bytes;
-
- return ret;
-}
-EXPORT_SYMBOL(osd_req_decode_sense_full);
-
-/*
- * Implementation of osd_sec.h API
- * TODO: Move to a separate osd_sec.c file at a later stage.
- */
-
-enum { OSD_SEC_CAP_V1_ALL_CAPS =
- OSD_SEC_CAP_APPEND | OSD_SEC_CAP_OBJ_MGMT | OSD_SEC_CAP_REMOVE |
- OSD_SEC_CAP_CREATE | OSD_SEC_CAP_SET_ATTR | OSD_SEC_CAP_GET_ATTR |
- OSD_SEC_CAP_WRITE | OSD_SEC_CAP_READ | OSD_SEC_CAP_POL_SEC |
- OSD_SEC_CAP_GLOBAL | OSD_SEC_CAP_DEV_MGMT
-};
-
-enum { OSD_SEC_CAP_V2_ALL_CAPS =
- OSD_SEC_CAP_V1_ALL_CAPS | OSD_SEC_CAP_QUERY | OSD_SEC_CAP_M_OBJECT
-};
-
-void osd_sec_init_nosec_doall_caps(void *caps,
- const struct osd_obj_id *obj, bool is_collection, const bool is_v1)
-{
- struct osd_capability *cap = caps;
- u8 type;
- u8 descriptor_type;
-
- if (likely(obj->id)) {
- if (unlikely(is_collection)) {
- type = OSD_SEC_OBJ_COLLECTION;
- descriptor_type = is_v1 ? OSD_SEC_OBJ_DESC_OBJ :
- OSD_SEC_OBJ_DESC_COL;
- } else {
- type = OSD_SEC_OBJ_USER;
- descriptor_type = OSD_SEC_OBJ_DESC_OBJ;
- }
- WARN_ON(!obj->partition);
- } else {
- type = obj->partition ? OSD_SEC_OBJ_PARTITION :
- OSD_SEC_OBJ_ROOT;
- descriptor_type = OSD_SEC_OBJ_DESC_PAR;
- }
-
- memset(cap, 0, sizeof(*cap));
-
- cap->h.format = OSD_SEC_CAP_FORMAT_VER1;
- cap->h.integrity_algorithm__key_version = 0; /* MAKE_BYTE(0, 0); */
- cap->h.security_method = OSD_SEC_NOSEC;
-/* cap->expiration_time;
- cap->AUDIT[30-10];
- cap->discriminator[42-30];
- cap->object_created_time; */
- cap->h.object_type = type;
- osd_sec_set_caps(&cap->h, OSD_SEC_CAP_V1_ALL_CAPS);
- cap->h.object_descriptor_type = descriptor_type;
- cap->od.obj_desc.policy_access_tag = 0;
- cap->od.obj_desc.allowed_partition_id = cpu_to_be64(obj->partition);
- cap->od.obj_desc.allowed_object_id = cpu_to_be64(obj->id);
-}
-EXPORT_SYMBOL(osd_sec_init_nosec_doall_caps);
-
-/* FIXME: Extract version from caps pointer.
- * Also Pete's target only supports caps from OSDv1 for now
- */
-void osd_set_caps(struct osd_cdb *cdb, const void *caps)
-{
- /* NOTE: They start at same address */
- memcpy(&cdb->v1.caps, caps, OSDv1_CAP_LEN);
-}
-
-bool osd_is_sec_alldata(struct osd_security_parameters *sec_parms __unused)
-{
- return false;
-}
-
-void osd_sec_sign_cdb(struct osd_cdb *ocdb __unused, const u8 *cap_key __unused)
-{
-}
-
-void osd_sec_sign_data(void *data_integ __unused,
- struct bio *bio __unused, const u8 *cap_key __unused)
-{
-}
-
-/*
- * Declared in osd_protocol.h
- * 4.12.5 Data-In and Data-Out buffer offsets
- * byte offset = mantissa * (2^(exponent+8))
- * Returns the smallest allowed encoded offset that contains given @offset
- * The actual encoded offset returned is @offset + *@padding.
- */
-osd_cdb_offset __osd_encode_offset(
- u64 offset, unsigned *padding, int min_shift, int max_shift)
-{
- u64 try_offset = -1, mod, align;
- osd_cdb_offset be32_offset;
- int shift;
-
- *padding = 0;
- if (!offset)
- return 0;
-
- for (shift = min_shift; shift < max_shift; ++shift) {
- try_offset = offset >> shift;
- if (try_offset < (1 << OSD_OFFSET_MAX_BITS))
- break;
- }
-
- BUG_ON(shift == max_shift);
-
- align = 1 << shift;
- mod = offset & (align - 1);
- if (mod) {
- *padding = align - mod;
- try_offset += 1;
- }
-
- try_offset |= ((shift - 8) & 0xf) << 28;
- be32_offset = cpu_to_be32((u32)try_offset);
-
- OSD_DEBUG("offset=%llu mantissa=%llu exp=%d encoded=%x pad=%d\n",
- _LLU(offset), _LLU(try_offset & 0x0FFFFFFF), shift,
- be32_offset, *padding);
- return be32_offset;
-}
diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c
deleted file mode 100644
index eaf36ccf58db..000000000000
--- a/drivers/scsi/osd/osd_uld.c
+++ /dev/null
@@ -1,571 +0,0 @@
-/*
- * osd_uld.c - OSD Upper Layer Driver
- *
- * A Linux driver module that registers as a SCSI ULD and probes
- * for OSD type SCSI devices.
- * It's main function is to export osd devices to in-kernel users like
- * osdfs and pNFS-objects-LD. It also provides one ioctl for running
- * in Kernel tests.
- *
- * Copyright (C) 2008 Panasas Inc. All rights reserved.
- *
- * Authors:
- * Boaz Harrosh <ooo@electrozaur.com>
- * Benny Halevy <bhalevy@panasas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the Panasas company nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/namei.h>
-#include <linux/cdev.h>
-#include <linux/fs.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/idr.h>
-#include <linux/major.h>
-#include <linux/file.h>
-#include <linux/slab.h>
-
-#include <scsi/scsi.h>
-#include <scsi/scsi_driver.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_ioctl.h>
-
-#include <scsi/osd_initiator.h>
-#include <scsi/osd_sec.h>
-
-#include "osd_debug.h"
-
-#ifndef TYPE_OSD
-# define TYPE_OSD 0x11
-#endif
-
-#ifndef SCSI_OSD_MAJOR
-# define SCSI_OSD_MAJOR 260
-#endif
-#define SCSI_OSD_MAX_MINOR MINORMASK
-
-static const char osd_name[] = "osd";
-static const char *osd_version_string = "open-osd 0.2.1";
-
-MODULE_AUTHOR("Boaz Harrosh <ooo@electrozaur.com>");
-MODULE_DESCRIPTION("open-osd Upper-Layer-Driver osd.ko");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_CHARDEV_MAJOR(SCSI_OSD_MAJOR);
-MODULE_ALIAS_SCSI_DEVICE(TYPE_OSD);
-
-struct osd_uld_device {
- int minor;
- struct device class_dev;
- struct cdev cdev;
- struct osd_dev od;
- struct osd_dev_info odi;
- struct gendisk *disk;
-};
-
-struct osd_dev_handle {
- struct osd_dev od;
- struct file *file;
- struct osd_uld_device *oud;
-} ;
-
-static DEFINE_IDA(osd_minor_ida);
-
-/*
- * scsi sysfs attribute operations
- */
-static ssize_t osdname_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct osd_uld_device *ould = container_of(dev, struct osd_uld_device,
- class_dev);
- return sprintf(buf, "%s\n", ould->odi.osdname);
-}
-static DEVICE_ATTR_RO(osdname);
-
-static ssize_t systemid_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct osd_uld_device *ould = container_of(dev, struct osd_uld_device,
- class_dev);
-
- memcpy(buf, ould->odi.systemid, ould->odi.systemid_len);
- return ould->odi.systemid_len;
-}
-static DEVICE_ATTR_RO(systemid);
-
-static struct attribute *osd_uld_attrs[] = {
- &dev_attr_osdname.attr,
- &dev_attr_systemid.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(osd_uld);
-
-static struct class osd_uld_class = {
- .owner = THIS_MODULE,
- .name = "scsi_osd",
- .dev_groups = osd_uld_groups,
-};
-
-/*
- * Char Device operations
- */
-
-static int osd_uld_open(struct inode *inode, struct file *file)
-{
- struct osd_uld_device *oud = container_of(inode->i_cdev,
- struct osd_uld_device, cdev);
-
- get_device(&oud->class_dev);
- /* cache osd_uld_device on file handle */
- file->private_data = oud;
- OSD_DEBUG("osd_uld_open %p\n", oud);
- return 0;
-}
-
-static int osd_uld_release(struct inode *inode, struct file *file)
-{
- struct osd_uld_device *oud = file->private_data;
-
- OSD_DEBUG("osd_uld_release %p\n", file->private_data);
- file->private_data = NULL;
- put_device(&oud->class_dev);
- return 0;
-}
-
-/* FIXME: Only one vector for now */
-unsigned g_test_ioctl;
-do_test_fn *g_do_test;
-
-int osduld_register_test(unsigned ioctl, do_test_fn *do_test)
-{
- if (g_test_ioctl)
- return -EINVAL;
-
- g_test_ioctl = ioctl;
- g_do_test = do_test;
- return 0;
-}
-EXPORT_SYMBOL(osduld_register_test);
-
-void osduld_unregister_test(unsigned ioctl)
-{
- if (ioctl == g_test_ioctl) {
- g_test_ioctl = 0;
- g_do_test = NULL;
- }
-}
-EXPORT_SYMBOL(osduld_unregister_test);
-
-static do_test_fn *_find_ioctl(unsigned cmd)
-{
- if (g_test_ioctl == cmd)
- return g_do_test;
- else
- return NULL;
-}
-
-static long osd_uld_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct osd_uld_device *oud = file->private_data;
- int ret;
- do_test_fn *do_test;
-
- do_test = _find_ioctl(cmd);
- if (do_test)
- ret = do_test(&oud->od, cmd, arg);
- else {
- OSD_ERR("Unknown ioctl %d: osd_uld_device=%p\n", cmd, oud);
- ret = -ENOIOCTLCMD;
- }
- return ret;
-}
-
-static const struct file_operations osd_fops = {
- .owner = THIS_MODULE,
- .open = osd_uld_open,
- .release = osd_uld_release,
- .unlocked_ioctl = osd_uld_ioctl,
- .llseek = noop_llseek,
-};
-
-struct osd_dev *osduld_path_lookup(const char *name)
-{
- struct osd_uld_device *oud;
- struct osd_dev_handle *odh;
- struct file *file;
- int error;
-
- if (!name || !*name) {
- OSD_ERR("Mount with !path || !*path\n");
- return ERR_PTR(-EINVAL);
- }
-
- odh = kzalloc(sizeof(*odh), GFP_KERNEL);
- if (unlikely(!odh))
- return ERR_PTR(-ENOMEM);
-
- file = filp_open(name, O_RDWR, 0);
- if (IS_ERR(file)) {
- error = PTR_ERR(file);
- goto free_od;
- }
-
- if (file->f_op != &osd_fops){
- error = -EINVAL;
- goto close_file;
- }
-
- oud = file->private_data;
-
- odh->od = oud->od;
- odh->file = file;
- odh->oud = oud;
-
- return &odh->od;
-
-close_file:
- fput(file);
-free_od:
- kfree(odh);
- return ERR_PTR(error);
-}
-EXPORT_SYMBOL(osduld_path_lookup);
-
-static inline bool _the_same_or_null(const u8 *a1, unsigned a1_len,
- const u8 *a2, unsigned a2_len)
-{
- if (!a2_len) /* User string is Empty means don't care */
- return true;
-
- if (a1_len != a2_len)
- return false;
-
- return 0 == memcmp(a1, a2, a1_len);
-}
-
-static int _match_odi(struct device *dev, const void *find_data)
-{
- struct osd_uld_device *oud = container_of(dev, struct osd_uld_device,
- class_dev);
- const struct osd_dev_info *odi = find_data;
-
- if (_the_same_or_null(oud->odi.systemid, oud->odi.systemid_len,
- odi->systemid, odi->systemid_len) &&
- _the_same_or_null(oud->odi.osdname, oud->odi.osdname_len,
- odi->osdname, odi->osdname_len)) {
- OSD_DEBUG("found device sysid_len=%d osdname=%d\n",
- odi->systemid_len, odi->osdname_len);
- return 1;
- } else {
- return 0;
- }
-}
-
-/* osduld_info_lookup - Loop through all devices, return the requested osd_dev.
- *
- * if @odi->systemid_len and/or @odi->osdname_len are zero, they act as a don't
- * care. .e.g if they're both zero /dev/osd0 is returned.
- */
-struct osd_dev *osduld_info_lookup(const struct osd_dev_info *odi)
-{
- struct device *dev = class_find_device(&osd_uld_class, NULL, odi, _match_odi);
- if (likely(dev)) {
- struct osd_dev_handle *odh = kzalloc(sizeof(*odh), GFP_KERNEL);
- struct osd_uld_device *oud = container_of(dev,
- struct osd_uld_device, class_dev);
-
- if (unlikely(!odh)) {
- put_device(dev);
- return ERR_PTR(-ENOMEM);
- }
-
- odh->od = oud->od;
- odh->oud = oud;
-
- return &odh->od;
- }
-
- return ERR_PTR(-ENODEV);
-}
-EXPORT_SYMBOL(osduld_info_lookup);
-
-void osduld_put_device(struct osd_dev *od)
-{
- if (od && !IS_ERR(od)) {
- struct osd_dev_handle *odh =
- container_of(od, struct osd_dev_handle, od);
- struct osd_uld_device *oud = odh->oud;
-
- BUG_ON(od->scsi_device != oud->od.scsi_device);
-
- /* If scsi has released the device (logout), and exofs has last
- * reference on oud it will be freed by above osd_uld_release
- * within fput below. But this will oops in cdev_release which
- * is called after the fops->release. A get_/put_ pair makes
- * sure we have a cdev for the duration of fput
- */
- if (odh->file) {
- get_device(&oud->class_dev);
- fput(odh->file);
- }
- put_device(&oud->class_dev);
- kfree(odh);
- }
-}
-EXPORT_SYMBOL(osduld_put_device);
-
-const struct osd_dev_info *osduld_device_info(struct osd_dev *od)
-{
- struct osd_dev_handle *odh =
- container_of(od, struct osd_dev_handle, od);
- return &odh->oud->odi;
-}
-EXPORT_SYMBOL(osduld_device_info);
-
-bool osduld_device_same(struct osd_dev *od, const struct osd_dev_info *odi)
-{
- struct osd_dev_handle *odh =
- container_of(od, struct osd_dev_handle, od);
- struct osd_uld_device *oud = odh->oud;
-
- return (oud->odi.systemid_len == odi->systemid_len) &&
- _the_same_or_null(oud->odi.systemid, oud->odi.systemid_len,
- odi->systemid, odi->systemid_len) &&
- (oud->odi.osdname_len == odi->osdname_len) &&
- _the_same_or_null(oud->odi.osdname, oud->odi.osdname_len,
- odi->osdname, odi->osdname_len);
-}
-EXPORT_SYMBOL(osduld_device_same);
-
-/*
- * Scsi Device operations
- */
-
-static int __detect_osd(struct osd_uld_device *oud)
-{
- struct scsi_device *scsi_device = oud->od.scsi_device;
- struct scsi_sense_hdr sense_hdr;
- char caps[OSD_CAP_LEN];
- int error;
-
- /* sending a test_unit_ready as first command seems to be needed
- * by some targets
- */
- OSD_DEBUG("start scsi_test_unit_ready %p %p %p\n",
- oud, scsi_device, scsi_device->request_queue);
- error = scsi_test_unit_ready(scsi_device, 10*HZ, 5, &sense_hdr);
- if (error)
- OSD_ERR("warning: scsi_test_unit_ready failed\n");
-
- osd_sec_init_nosec_doall_caps(caps, &osd_root_object, false, true);
- if (osd_auto_detect_ver(&oud->od, caps, &oud->odi))
- return -ENODEV;
-
- return 0;
-}
-
-static void __remove(struct device *dev)
-{
- struct osd_uld_device *oud = container_of(dev, struct osd_uld_device,
- class_dev);
- struct scsi_device *scsi_device = oud->od.scsi_device;
-
- kfree(oud->odi.osdname);
-
- osd_dev_fini(&oud->od);
- scsi_device_put(scsi_device);
-
- OSD_INFO("osd_remove %s\n",
- oud->disk ? oud->disk->disk_name : NULL);
-
- if (oud->disk)
- put_disk(oud->disk);
-
- kfree(oud);
-}
-
-static int osd_probe(struct device *dev)
-{
- struct scsi_device *scsi_device = to_scsi_device(dev);
- struct gendisk *disk;
- struct osd_uld_device *oud;
- int minor;
- int error;
-
- if (scsi_device->type != TYPE_OSD)
- return -ENODEV;
-
- minor = ida_alloc_max(&osd_minor_ida, SCSI_OSD_MAX_MINOR, GFP_KERNEL);
- if (minor == -ENOSPC)
- return -EBUSY;
- if (minor < 0)
- return -ENODEV;
-
- error = -ENOMEM;
- oud = kzalloc(sizeof(*oud), GFP_KERNEL);
- if (NULL == oud)
- goto err_retract_minor;
-
- /* class device member */
- device_initialize(&oud->class_dev);
- dev_set_drvdata(dev, oud);
- oud->minor = minor;
- oud->class_dev.devt = MKDEV(SCSI_OSD_MAJOR, oud->minor);
- oud->class_dev.class = &osd_uld_class;
- oud->class_dev.parent = dev;
- oud->class_dev.release = __remove;
-
- /* hold one more reference to the scsi_device that will get released
- * in __release, in case a logout is happening while fs is mounted
- */
- if (scsi_device_get(scsi_device))
- goto err_retract_minor;
- osd_dev_init(&oud->od, scsi_device);
-
- /* allocate a disk and set it up */
- /* FIXME: do we need this since sg has already done that */
- disk = alloc_disk(1);
- if (!disk) {
- OSD_ERR("alloc_disk failed\n");
- goto err_free_osd;
- }
- disk->major = SCSI_OSD_MAJOR;
- disk->first_minor = oud->minor;
- sprintf(disk->disk_name, "osd%d", oud->minor);
- oud->disk = disk;
-
- /* Detect the OSD Version */
- error = __detect_osd(oud);
- if (error) {
- OSD_ERR("osd detection failed, non-compatible OSD device\n");
- goto err_free_osd;
- }
-
- /* init the char-device for communication with user-mode */
- cdev_init(&oud->cdev, &osd_fops);
- oud->cdev.owner = THIS_MODULE;
-
- error = dev_set_name(&oud->class_dev, "%s", disk->disk_name);
- if (error) {
- OSD_ERR("dev_set_name failed => %d\n", error);
- goto err_free_osd;
- }
-
- error = cdev_device_add(&oud->cdev, &oud->class_dev);
- if (error) {
- OSD_ERR("device_register failed => %d\n", error);
- goto err_free_osd;
- }
-
- OSD_INFO("osd_probe %s\n", disk->disk_name);
- return 0;
-
-err_free_osd:
- put_device(&oud->class_dev);
-err_retract_minor:
- ida_free(&osd_minor_ida, minor);
- return error;
-}
-
-static int osd_remove(struct device *dev)
-{
- struct scsi_device *scsi_device = to_scsi_device(dev);
- struct osd_uld_device *oud = dev_get_drvdata(dev);
-
- if (oud->od.scsi_device != scsi_device) {
- OSD_ERR("Half cooked osd-device %p, || %p!=%p",
- dev, oud->od.scsi_device, scsi_device);
- }
-
- cdev_device_del(&oud->cdev, &oud->class_dev);
- ida_free(&osd_minor_ida, oud->minor);
- put_device(&oud->class_dev);
-
- return 0;
-}
-
-/*
- * Global driver and scsi registration
- */
-
-static struct scsi_driver osd_driver = {
- .gendrv = {
- .name = osd_name,
- .owner = THIS_MODULE,
- .probe = osd_probe,
- .remove = osd_remove,
- }
-};
-
-static int __init osd_uld_init(void)
-{
- int err;
-
- err = class_register(&osd_uld_class);
- if (err) {
- OSD_ERR("Unable to register sysfs class => %d\n", err);
- return err;
- }
-
- err = register_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0),
- SCSI_OSD_MAX_MINOR, osd_name);
- if (err) {
- OSD_ERR("Unable to register major %d for osd ULD => %d\n",
- SCSI_OSD_MAJOR, err);
- goto err_out;
- }
-
- err = scsi_register_driver(&osd_driver.gendrv);
- if (err) {
- OSD_ERR("scsi_register_driver failed => %d\n", err);
- goto err_out_chrdev;
- }
-
- OSD_INFO("LOADED %s\n", osd_version_string);
- return 0;
-
-err_out_chrdev:
- unregister_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0), SCSI_OSD_MAX_MINOR);
-err_out:
- class_unregister(&osd_uld_class);
- return err;
-}
-
-static void __exit osd_uld_exit(void)
-{
- scsi_unregister_driver(&osd_driver.gendrv);
- unregister_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0), SCSI_OSD_MAX_MINOR);
- class_unregister(&osd_uld_class);
- OSD_INFO("UNLOADED %s\n", osd_version_string);
-}
-
-module_init(osd_uld_init);
-module_exit(osd_uld_exit);
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 664c1238a87f..be3c73ebbfde 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -139,7 +139,7 @@ static int debugging = 1;
#define OSST_TIMEOUT (200 * HZ)
#define OSST_LONG_TIMEOUT (1800 * HZ)
-#define TAPE_NR(x) (iminor(x) & ~(-1 << ST_MODE_SHIFT))
+#define TAPE_NR(x) (iminor(x) & ((1 << ST_MODE_SHIFT)-1))
#define TAPE_MODE(x) ((iminor(x) & ST_MODE_MASK) >> ST_MODE_SHIFT)
#define TAPE_REWIND(x) ((iminor(x) & 0x80) == 0)
#define TAPE_IS_RAW(x) (TAPE_MODE(x) & (ST_NBR_MODES >> 1))
diff --git a/drivers/scsi/pcmcia/Makefile b/drivers/scsi/pcmcia/Makefile
index faa87a4b2d2b..a5a24dd44e7e 100644
--- a/drivers/scsi/pcmcia/Makefile
+++ b/drivers/scsi/pcmcia/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-ccflags-y := -Idrivers/scsi
+ccflags-y := -I $(srctree)/drivers/scsi
# 16-bit client drivers
obj-$(CONFIG_PCMCIA_QLOGIC) += qlogic_cs.o
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 1bd6825a4f14..a81748e6e8fb 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1134,7 +1134,8 @@ static irqreturn_t nspintr(int irq, void *dev_id)
//*sync_neg = SYNC_NOT_YET;
- if ((tmpSC->SCp.Message == MSG_COMMAND_COMPLETE)) { /* all command complete and return status */
+ /* all command complete and return status */
+ if (tmpSC->SCp.Message == MSG_COMMAND_COMPLETE) {
tmpSC->result = (DID_OK << 16) |
((tmpSC->SCp.Message & 0xff) << 8) |
((tmpSC->SCp.Status & 0xff) << 0);
diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c
index c29c162a494f..a32d8ee4666e 100644
--- a/drivers/scsi/qedf/qedf_debugfs.c
+++ b/drivers/scsi/qedf/qedf_debugfs.c
@@ -27,30 +27,19 @@ qedf_dbg_host_init(struct qedf_dbg_ctx *qedf,
const struct file_operations *fops)
{
char host_dirname[32];
- struct dentry *file_dentry = NULL;
QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "Creating debugfs host node\n");
/* create pf dir */
sprintf(host_dirname, "host%u", qedf->host_no);
qedf->bdf_dentry = debugfs_create_dir(host_dirname, qedf_dbg_root);
- if (!qedf->bdf_dentry)
- return;
/* create debugfs files */
while (dops) {
if (!(dops->name))
break;
- file_dentry = debugfs_create_file(dops->name, 0600,
- qedf->bdf_dentry, qedf,
- fops);
- if (!file_dentry) {
- QEDF_INFO(qedf, QEDF_LOG_DEBUGFS,
- "Debugfs entry %s creation failed\n",
- dops->name);
- debugfs_remove_recursive(qedf->bdf_dentry);
- return;
- }
+ debugfs_create_file(dops->name, 0600, qedf->bdf_dentry, qedf,
+ fops);
dops++;
fops++;
}
@@ -80,9 +69,6 @@ qedf_dbg_init(char *drv_name)
/* create qed dir in root of debugfs. NULL means debugfs root */
qedf_dbg_root = debugfs_create_dir(drv_name, NULL);
- if (!qedf_dbg_root)
- QEDF_INFO(NULL, QEDF_LOG_DEBUGFS, "Init of debugfs "
- "failed\n");
}
/**
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index 6bbc38b1b465..53e8221f6816 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -807,7 +807,6 @@ void qedf_ring_doorbell(struct qedf_rport *fcport)
writel(*(u32 *)&dbell, fcport->p_doorbell);
/* Make sure SQ index is updated so f/w prcesses requests in order */
wmb();
- mmiowb();
}
static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
@@ -1128,12 +1127,6 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
return;
}
- if (!sc_cmd->request->special) {
- QEDF_WARN(&(qedf->dbg_ctx), "request->special is NULL so "
- "request not valid, sc_cmd=%p.\n", sc_cmd);
- return;
- }
-
if (!sc_cmd->request->q) {
QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request "
"is not valid, sc_cmd=%p.\n", sc_cmd);
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 9bbc19fc190b..9f9431a4cc0e 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1418,7 +1418,7 @@ static struct libfc_function_template qedf_lport_template = {
static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf)
{
- fcoe_ctlr_init(&qedf->ctlr, FIP_ST_AUTO);
+ fcoe_ctlr_init(&qedf->ctlr, FIP_MODE_AUTO);
qedf->ctlr.send = qedf_fip_send;
qedf->ctlr.get_src_addr = qedf_get_src_mac;
diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c
index fd914ca4149a..5667e4752e2e 100644
--- a/drivers/scsi/qedi/qedi_debugfs.c
+++ b/drivers/scsi/qedi/qedi_debugfs.c
@@ -23,27 +23,16 @@ qedi_dbg_host_init(struct qedi_dbg_ctx *qedi,
const struct file_operations *fops)
{
char host_dirname[32];
- struct dentry *file_dentry = NULL;
sprintf(host_dirname, "host%u", qedi->host_no);
qedi->bdf_dentry = debugfs_create_dir(host_dirname, qedi_dbg_root);
- if (!qedi->bdf_dentry)
- return;
while (dops) {
if (!(dops->name))
break;
- file_dentry = debugfs_create_file(dops->name, 0600,
- qedi->bdf_dentry, qedi,
- fops);
- if (!file_dentry) {
- QEDI_INFO(qedi, QEDI_LOG_DEBUGFS,
- "Debugfs entry %s creation failed\n",
- dops->name);
- debugfs_remove_recursive(qedi->bdf_dentry);
- return;
- }
+ debugfs_create_file(dops->name, 0600, qedi->bdf_dentry, qedi,
+ fops);
dops++;
fops++;
}
@@ -60,8 +49,6 @@ void
qedi_dbg_init(char *drv_name)
{
qedi_dbg_root = debugfs_create_dir(drv_name, NULL);
- if (!qedi_dbg_root)
- QEDI_INFO(NULL, QEDI_LOG_DEBUGFS, "Init of debugfs failed\n");
}
void
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 25d763ae5d5a..f8f86774f77f 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -616,13 +616,6 @@ static void qedi_scsi_completion(struct qedi_ctx *qedi,
goto error;
}
- if (!sc_cmd->request->special) {
- QEDI_WARN(&qedi->dbg_ctx,
- "request->special is NULL so request not valid, sc_cmd=%p.\n",
- sc_cmd);
- goto error;
- }
-
if (!sc_cmd->request->q) {
QEDI_WARN(&qedi->dbg_ctx,
"request->q is NULL so request is not valid, sc_cmd=%p.\n",
@@ -992,7 +985,6 @@ static void qedi_ring_doorbell(struct qedi_conn *qedi_conn)
* others they are two different assembly operations.
*/
wmb();
- mmiowb();
QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_MP_REQ,
"prod_idx=0x%x, fw_prod_idx=0x%x, cid=0x%x\n",
qedi_conn->ep->sq_prod_idx, qedi_conn->ep->fw_sq_prod_idx,
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index e74a62448ba4..e5db9a9954dc 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1392,10 +1392,8 @@ static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi)
static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi)
{
- struct qedi_nvm_iscsi_image nvm_image;
-
qedi->iscsi_image = dma_alloc_coherent(&qedi->pdev->dev,
- sizeof(nvm_image),
+ sizeof(struct qedi_nvm_iscsi_image),
&qedi->nvm_buf_dma, GFP_KERNEL);
if (!qedi->iscsi_image) {
QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
@@ -2236,14 +2234,13 @@ static void qedi_boot_release(void *data)
static int qedi_get_boot_info(struct qedi_ctx *qedi)
{
int ret = 1;
- struct qedi_nvm_iscsi_image nvm_image;
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"Get NVM iSCSI CFG image\n");
ret = qedi_ops->common->nvm_get_image(qedi->cdev,
QED_NVM_IMAGE_ISCSI_CFG,
(char *)qedi->iscsi_image,
- sizeof(nvm_image));
+ sizeof(struct qedi_nvm_iscsi_image));
if (ret)
QEDI_ERR(&qedi->dbg_ctx,
"Could not get NVM image. ret = %d\n", ret);
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 6856dfdfa473..327eff67a1ee 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -3004,8 +3004,6 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
sp->flags |= SRB_SENT;
ha->actthreads++;
WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index);
- /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
- mmiowb();
out:
if (status)
@@ -3254,8 +3252,6 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
sp->flags |= SRB_SENT;
ha->actthreads++;
WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index);
- /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
- mmiowb();
out:
if (status)
@@ -3367,19 +3363,8 @@ qla1280_isp_cmd(struct scsi_qla_host *ha)
/*
* Update request index to mailbox4 (Request Queue In).
- * The mmiowb() ensures that this write is ordered with writes by other
- * CPUs. Without the mmiowb(), it is possible for the following:
- * CPUA posts write of index 5 to mailbox4
- * CPUA releases host lock
- * CPUB acquires host lock
- * CPUB posts write of index 6 to mailbox4
- * On PCI bus, order reverses and write of 6 posts, then index 5,
- * causing chip to issue full queue of stale commands
- * The mmiowb() prevents future writes from crossing the barrier.
- * See Documentation/driver-api/device-io.rst for more information.
*/
WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index);
- mmiowb();
LEAVE("qla1280_isp_cmd");
}
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index ac504a1ff0ff..f928c4d3a1ef 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -543,6 +543,9 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
if (unlikely(pci_channel_offline(ha->pdev)))
return 0;
+ if (qla2x00_chip_is_down(vha))
+ return 0;
+
if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
!ha->isp_ops->write_nvram)
return 0;
@@ -1002,7 +1005,7 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon)
/* Scsi_Host attributes. */
static ssize_t
-qla2x00_drvr_version_show(struct device *dev,
+qla2x00_driver_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
@@ -1632,6 +1635,94 @@ qla2x00_max_speed_sup_show(struct device *dev, struct device_attribute *attr,
ha->max_speed_sup ? "32Gps" : "16Gps");
}
+static ssize_t
+qla2x00_port_speed_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev));
+ ulong type, speed;
+ int oldspeed, rval;
+ int mode = QLA_SET_DATA_RATE_LR;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA27XX(vha->hw)) {
+ ql_log(ql_log_warn, vha, 0x70d8,
+ "Speed setting not supported \n");
+ return -EINVAL;
+ }
+
+ rval = kstrtol(buf, 10, &type);
+ if (rval)
+ return rval;
+ speed = type;
+ if (type == 40 || type == 80 || type == 160 ||
+ type == 320) {
+ ql_dbg(ql_dbg_user, vha, 0x70d9,
+ "Setting will be affected after a loss of sync\n");
+ type = type/10;
+ mode = QLA_SET_DATA_RATE_NOLR;
+ }
+
+ oldspeed = ha->set_data_rate;
+
+ switch (type) {
+ case 0:
+ ha->set_data_rate = PORT_SPEED_AUTO;
+ break;
+ case 4:
+ ha->set_data_rate = PORT_SPEED_4GB;
+ break;
+ case 8:
+ ha->set_data_rate = PORT_SPEED_8GB;
+ break;
+ case 16:
+ ha->set_data_rate = PORT_SPEED_16GB;
+ break;
+ case 32:
+ ha->set_data_rate = PORT_SPEED_32GB;
+ break;
+ default:
+ ql_log(ql_log_warn, vha, 0x1199,
+ "Unrecognized speed setting:%lx. Setting Autoneg\n",
+ speed);
+ ha->set_data_rate = PORT_SPEED_AUTO;
+ }
+
+ if (qla2x00_chip_is_down(vha) || (oldspeed == ha->set_data_rate))
+ return -EINVAL;
+
+ ql_log(ql_log_info, vha, 0x70da,
+ "Setting speed to %lx Gbps \n", type);
+
+ rval = qla2x00_set_data_rate(vha, mode);
+ if (rval != QLA_SUCCESS)
+ return -EIO;
+
+ return strlen(buf);
+}
+
+static ssize_t
+qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+ ssize_t rval;
+ char *spd[7] = {"0", "0", "0", "4", "8", "16", "32"};
+
+ rval = qla2x00_get_data_rate(vha);
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x70db,
+ "Unable to get port speed rval:%zd\n", rval);
+ return -EINVAL;
+ }
+
+ ql_log(ql_log_info, vha, 0x70d6,
+ "port speed:%d\n", ha->link_data_rate);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", spd[ha->link_data_rate]);
+}
+
/* ----- */
static ssize_t
@@ -2059,7 +2150,21 @@ ql2xiniexchg_store(struct device *dev, struct device_attribute *attr,
return strlen(buf);
}
-static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
+static ssize_t
+qla2x00_dif_bundle_statistics_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+
+ return scnprintf(buf, PAGE_SIZE,
+ "cross=%llu read=%llu write=%llu kalloc=%llu dma_alloc=%llu unusable=%u\n",
+ ha->dif_bundle_crossed_pages, ha->dif_bundle_reads,
+ ha->dif_bundle_writes, ha->dif_bundle_kallocs,
+ ha->dif_bundle_dma_allocs, ha->pool.unusable.count);
+}
+
+static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_driver_version_show, NULL);
static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
@@ -2112,6 +2217,10 @@ static DEVICE_ATTR(zio_threshold, 0644,
static DEVICE_ATTR_RW(qlini_mode);
static DEVICE_ATTR_RW(ql2xexchoffld);
static DEVICE_ATTR_RW(ql2xiniexchg);
+static DEVICE_ATTR(dif_bundle_statistics, 0444,
+ qla2x00_dif_bundle_statistics_show, NULL);
+static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show,
+ qla2x00_port_speed_store);
struct device_attribute *qla2x00_host_attrs[] = {
@@ -2150,6 +2259,8 @@ struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_min_link_speed,
&dev_attr_max_speed_sup,
&dev_attr_zio_threshold,
+ &dev_attr_dif_bundle_statistics,
+ &dev_attr_port_speed,
NULL, /* reserve for qlini_mode */
NULL, /* reserve for ql2xiniexchg */
NULL, /* reserve for ql2xexchoffld */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index d1fc4958222a..3d46975a5e5c 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -314,6 +314,7 @@ struct srb_cmd {
#define SRB_CRC_PROT_DMA_VALID BIT_4 /* DIF: prot DMA valid */
#define SRB_CRC_CTX_DSD_VALID BIT_5 /* DIF: dsd_list valid */
#define SRB_WAKEUP_ON_COMP BIT_6
+#define SRB_DIF_BUNDL_DMA_VALID BIT_7 /* DIF: DMA list valid */
/* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */
#define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID)
@@ -1892,6 +1893,13 @@ struct crc_context {
/* List of DMA context transfers */
struct list_head dsd_list;
+ /* List of DIF Bundling context DMA address */
+ struct list_head ldif_dsd_list;
+ u8 no_ldif_dsd;
+
+ struct list_head ldif_dma_hndl_list;
+ u32 dif_bundl_len;
+ u8 no_dif_bundl;
/* This structure should not exceed 512 bytes */
};
@@ -2359,7 +2367,9 @@ typedef struct fc_port {
#define NVME_PRLI_SP_INITIATOR BIT_5
#define NVME_PRLI_SP_TARGET BIT_4
#define NVME_PRLI_SP_DISCOVERY BIT_3
+#define NVME_PRLI_SP_FIRST_BURST BIT_0
uint8_t nvme_flag;
+ uint32_t nvme_first_burst_size;
#define NVME_FLAG_REGISTERED 4
#define NVME_FLAG_DELETING 2
#define NVME_FLAG_RESETTING 1
@@ -3688,12 +3698,14 @@ struct qla_hw_data {
#define PORT_SPEED_UNKNOWN 0xFFFF
#define PORT_SPEED_1GB 0x00
#define PORT_SPEED_2GB 0x01
+#define PORT_SPEED_AUTO 0x02
#define PORT_SPEED_4GB 0x03
#define PORT_SPEED_8GB 0x04
#define PORT_SPEED_16GB 0x05
#define PORT_SPEED_32GB 0x06
#define PORT_SPEED_10GB 0x13
uint16_t link_data_rate; /* F/W operating speed */
+ uint16_t set_data_rate; /* Set by user */
uint8_t current_topology;
uint8_t prev_topology;
@@ -3958,6 +3970,10 @@ struct qla_hw_data {
uint16_t fw_subminor_version;
uint16_t fw_attributes;
uint16_t fw_attributes_h;
+#define FW_ATTR_H_NVME_FBURST BIT_1
+#define FW_ATTR_H_NVME BIT_10
+#define FW_ATTR_H_NVME_UPDATED BIT_14
+
uint16_t fw_attributes_ext[2];
uint32_t fw_memory_size;
uint32_t fw_transfer_size;
@@ -4184,12 +4200,32 @@ struct qla_hw_data {
uint16_t min_link_speed;
uint16_t max_speed_sup;
+ /* DMA pool for the DIF bundling buffers */
+ struct dma_pool *dif_bundl_pool;
+ #define DIF_BUNDLING_DMA_POOL_SIZE 1024
+ struct {
+ struct {
+ struct list_head head;
+ uint count;
+ } good;
+ struct {
+ struct list_head head;
+ uint count;
+ } unusable;
+ } pool;
+
+ unsigned long long dif_bundle_crossed_pages;
+ unsigned long long dif_bundle_reads;
+ unsigned long long dif_bundle_writes;
+ unsigned long long dif_bundle_kallocs;
+ unsigned long long dif_bundle_dma_allocs;
+
atomic_t nvme_active_aen_cnt;
uint16_t nvme_last_rptd_aen; /* Last recorded aen count */
atomic_t zio_threshold;
uint16_t last_zio_threshold;
-#define DEFAULT_ZIO_THRESHOLD 64
+#define DEFAULT_ZIO_THRESHOLD 5
};
#define FW_ABILITY_MAX_SPEED_MASK 0xFUL
@@ -4198,6 +4234,10 @@ struct qla_hw_data {
#define FW_ABILITY_MAX_SPEED(ha) \
(ha->fw_ability_mask & FW_ABILITY_MAX_SPEED_MASK)
+#define QLA_GET_DATA_RATE 0
+#define QLA_SET_DATA_RATE_NOLR 1
+#define QLA_SET_DATA_RATE_LR 2 /* Set speed and initiate LR */
+
/*
* Qlogic scsi host structure
*/
@@ -4229,6 +4269,7 @@ typedef struct scsi_qla_host {
uint32_t qpairs_req_created:1;
uint32_t qpairs_rsp_created:1;
uint32_t nvme_enabled:1;
+ uint32_t nvme_first_burst:1;
} flags;
atomic_t loop_state;
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 0b190082aa8d..5819a45ac5ef 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -193,6 +193,8 @@ qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
for (i = 0; i < vha->hw->max_qpairs; i++) {
qpair = vha->hw->queue_pair_map[i];
+ if (!qpair)
+ continue;
qla_core_sbt_cmd += qpair->tgt_counters.qla_core_sbt_cmd;
core_qla_que_buf += qpair->tgt_counters.core_qla_que_buf;
qla_core_ret_ctio += qpair->tgt_counters.qla_core_ret_ctio;
@@ -446,11 +448,6 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha)
atomic_set(&qla2x00_dfs_root_count, 0);
qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL);
- if (!qla2x00_dfs_root) {
- ql_log(ql_log_warn, vha, 0x00f7,
- "Unable to create debugfs root directory.\n");
- goto out;
- }
create_dir:
if (ha->dfs_dir)
@@ -458,64 +455,28 @@ create_dir:
mutex_init(&ha->fce_mutex);
ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root);
- if (!ha->dfs_dir) {
- ql_log(ql_log_warn, vha, 0x00f8,
- "Unable to create debugfs ha directory.\n");
- goto out;
- }
atomic_inc(&qla2x00_dfs_root_count);
create_nodes:
ha->dfs_fw_resource_cnt = debugfs_create_file("fw_resource_count",
S_IRUSR, ha->dfs_dir, vha, &dfs_fw_resource_cnt_ops);
- if (!ha->dfs_fw_resource_cnt) {
- ql_log(ql_log_warn, vha, 0x00fd,
- "Unable to create debugFS fw_resource_count node.\n");
- goto out;
- }
ha->dfs_tgt_counters = debugfs_create_file("tgt_counters", S_IRUSR,
ha->dfs_dir, vha, &dfs_tgt_counters_ops);
- if (!ha->dfs_tgt_counters) {
- ql_log(ql_log_warn, vha, 0xd301,
- "Unable to create debugFS tgt_counters node.\n");
- goto out;
- }
ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database",
S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_port_database_ops);
- if (!ha->tgt.dfs_tgt_port_database) {
- ql_log(ql_log_warn, vha, 0xd03f,
- "Unable to create debugFS tgt_port_database node.\n");
- goto out;
- }
ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
&dfs_fce_ops);
- if (!ha->dfs_fce) {
- ql_log(ql_log_warn, vha, 0x00f9,
- "Unable to create debugfs fce node.\n");
- goto out;
- }
ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess",
S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_sess_ops);
- if (!ha->tgt.dfs_tgt_sess) {
- ql_log(ql_log_warn, vha, 0xd040,
- "Unable to create debugFS tgt_sess node.\n");
- goto out;
- }
- if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) {
+ if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
ha->tgt.dfs_naqp = debugfs_create_file("naqp",
0400, ha->dfs_dir, vha, &dfs_naqp_ops);
- if (!ha->tgt.dfs_naqp) {
- ql_log(ql_log_warn, vha, 0xd011,
- "Unable to create debugFS naqp node.\n");
- goto out;
- }
- }
out:
return 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 3673fcdb033a..4eefe69ca807 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -160,6 +160,7 @@ extern int ql2xautodetectsfp;
extern int ql2xenablemsix;
extern int qla2xuseresexchforels;
extern int ql2xexlogins;
+extern int ql2xdifbundlinginternalbuffers;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -269,8 +270,8 @@ extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *,
uint16_t, struct req_que *);
extern int qla2x00_start_scsi(srb_t *sp);
extern int qla24xx_start_scsi(srb_t *sp);
-int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
- uint16_t, uint64_t, uint8_t);
+int qla2x00_marker(struct scsi_qla_host *, struct qla_qpair *,
+ uint16_t, uint64_t, uint8_t);
extern int qla2x00_start_sp(srb_t *);
extern int qla24xx_dif_start_scsi(srb_t *);
extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t);
@@ -285,7 +286,7 @@ extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *,
extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *,
uint32_t *, uint16_t, struct qla_tc_param *);
extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
- uint32_t *, uint16_t, struct qla_tc_param *);
+ uint32_t *, uint16_t, struct qla_tgt_cmd *);
extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *);
extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *,
@@ -898,5 +899,6 @@ void qlt_update_host_map(struct scsi_qla_host *, port_id_t);
void qlt_remove_target_resources(struct qla_hw_data *);
void qlt_clr_qp_table(struct scsi_qla_host *vha);
void qlt_set_mode(struct scsi_qla_host *);
+int qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index cbc3bc49d4d1..c6fdad12428e 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -657,15 +657,16 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
sp->done = qla2x00_async_sns_sp_done;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - hdl=%x portid %06x.\n",
+ sp->name, sp->handle, d_id->b24);
+
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_disc, vha, 0x2043,
"RFT_ID issue IOCB failed (%d).\n", rval);
goto done_free_sp;
}
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Async-%s - hdl=%x portid %06x.\n",
- sp->name, sp->handle, d_id->b24);
return rval;
done_free_sp:
sp->free(sp);
@@ -752,6 +753,10 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
sp->done = qla2x00_async_sns_sp_done;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - hdl=%x portid %06x feature %x type %x.\n",
+ sp->name, sp->handle, d_id->b24, fc4feature, fc4type);
+
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_disc, vha, 0x2047,
@@ -759,9 +764,6 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
goto done_free_sp;
}
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Async-%s - hdl=%x portid %06x feature %x type %x.\n",
- sp->name, sp->handle, d_id->b24, fc4feature, fc4type);
return rval;
done_free_sp:
@@ -844,15 +846,16 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
sp->done = qla2x00_async_sns_sp_done;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - hdl=%x portid %06x\n",
+ sp->name, sp->handle, d_id->b24);
+
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_disc, vha, 0x204d,
"RNN_ID issue IOCB failed (%d).\n", rval);
goto done_free_sp;
}
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Async-%s - hdl=%x portid %06x\n",
- sp->name, sp->handle, d_id->b24);
return rval;
@@ -957,15 +960,16 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
sp->done = qla2x00_async_sns_sp_done;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - hdl=%x.\n",
+ sp->name, sp->handle);
+
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_disc, vha, 0x2043,
"RFT_ID issue IOCB failed (%d).\n", rval);
goto done_free_sp;
}
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Async-%s - hdl=%x.\n",
- sp->name, sp->handle);
return rval;
@@ -3578,14 +3582,14 @@ int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
sp->done = qla24xx_async_gffid_sp_done;
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
-
ql_dbg(ql_dbg_disc, vha, 0x2132,
"Async-%s hdl=%x %8phC.\n", sp->name,
sp->handle, fcport->port_name);
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
return rval;
done_free_sp:
sp->free(sp);
@@ -4067,6 +4071,10 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
sp->done = qla2x00_async_gpnft_gnnft_sp_done;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s hdl=%x FC4Type %x.\n", sp->name,
+ sp->handle, ct_req->req.gpn_ft.port_type);
+
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
spin_lock_irqsave(&vha->work_lock, flags);
@@ -4075,9 +4083,6 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
goto done_free_sp;
}
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Async-%s hdl=%x FC4Type %x.\n", sp->name,
- sp->handle, ct_req->req.gpn_ft.port_type);
return rval;
done_free_sp:
@@ -4158,7 +4163,8 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_flags &= ~SF_SCANNING;
spin_unlock_irqrestore(&vha->work_lock, flags);
- goto done_free_sp;
+ qla2x00_rel_sp(sp);
+ return rval;
}
sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
@@ -4177,7 +4183,13 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_flags &= ~SF_SCANNING;
spin_unlock_irqrestore(&vha->work_lock, flags);
- goto done_free_sp;
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ qla2x00_rel_sp(sp);
+ return rval;
}
sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz;
@@ -4214,6 +4226,10 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
sp->done = qla2x00_async_gpnft_gnnft_sp_done;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s hdl=%x FC4Type %x.\n", sp->name,
+ sp->handle, ct_req->req.gpn_ft.port_type);
+
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
spin_lock_irqsave(&vha->work_lock, flags);
@@ -4222,9 +4238,6 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
goto done_free_sp;
}
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Async-%s hdl=%x FC4Type %x.\n", sp->name,
- sp->handle, ct_req->req.gpn_ft.port_type);
return rval;
done_free_sp:
@@ -4345,13 +4358,14 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
sp->done = qla2x00_async_gnnid_sp_done;
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
sp->name, fcport->port_name,
sp->handle, fcport->loop_id, fcport->d_id.b24);
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
return rval;
done_free_sp:
@@ -4475,14 +4489,15 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
sp->done = qla2x00_async_gfpnid_sp_done;
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
-
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
sp->name, fcport->port_name,
sp->handle, fcport->loop_id, fcport->d_id.b24);
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
return rval;
done_free_sp:
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 8d1acc802a67..0c700b140ce7 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -366,14 +366,16 @@ qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
sp->done = qla2x00_async_prlo_sp_done;
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
ql_dbg(ql_dbg_disc, vha, 0x2070,
"Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
sp->handle, fcport->loop_id, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa);
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
return rval;
done_free_sp:
@@ -471,9 +473,11 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
{
srb_t *sp;
struct srb_iocb *lio;
- int rval;
+ int rval = QLA_FUNCTION_FAILED;
+
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
- rval = QLA_FUNCTION_FAILED;
fcport->flags |= FCF_ASYNC_SENT;
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
@@ -644,11 +648,14 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
break;
case DSC_LS_PORT_UNAVAIL:
default:
- if (fcport->loop_id != FC_NO_LOOP_ID)
- qla2x00_clear_loop_id(fcport);
-
- fcport->loop_id = loop_id;
- fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
+ qla2x00_find_new_loop_id(vha, fcport);
+ fcport->fw_login_state =
+ DSC_LS_PORT_UNAVAIL;
+ }
+ ql_dbg(ql_dbg_disc, vha, 0x20e5,
+ "%s %d %8phC\n", __func__, __LINE__,
+ fcport->port_name);
qla24xx_fcport_handle_login(vha, fcport);
break;
}
@@ -931,14 +938,14 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
sp->done = qla24xx_async_gnl_sp_done;
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
-
ql_dbg(ql_dbg_disc, vha, 0x20da,
"Async-%s - OUT WWPN %8phC hndl %x\n",
sp->name, fcport->port_name, sp->handle);
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
return rval;
done_free_sp:
@@ -1072,6 +1079,11 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
if (fcport->fc4f_nvme)
lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI;
+ ql_dbg(ql_dbg_disc, vha, 0x211b,
+ "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n",
+ fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24,
+ fcport->login_retry, fcport->fc4f_nvme ? "nvme" : "fc");
+
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
fcport->flags |= FCF_LOGIN_NEEDED;
@@ -1079,11 +1091,6 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
goto done_free_sp;
}
- ql_dbg(ql_dbg_disc, vha, 0x211b,
- "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n",
- fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24,
- fcport->login_retry, fcport->fc4f_nvme ? "nvme" : "fc");
-
return rval;
done_free_sp:
@@ -1471,29 +1478,6 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
return 0;
}
-static
-void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea)
-{
- fcport->rscn_gen++;
-
- ql_dbg(ql_dbg_disc, fcport->vha, 0x210c,
- "%s %8phC DS %d LS %d\n",
- __func__, fcport->port_name, fcport->disc_state,
- fcport->fw_login_state);
-
- if (fcport->flags & FCF_ASYNC_SENT)
- return;
-
- switch (fcport->disc_state) {
- case DSC_DELETED:
- case DSC_LOGIN_COMPLETE:
- qla24xx_post_gpnid_work(fcport->vha, &ea->id);
- break;
- default:
- break;
- }
-}
-
int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
u8 *port_name, u8 *node_name, void *pla, u8 fc4_type)
{
@@ -1560,8 +1544,6 @@ static void qla_handle_els_plogi_done(scsi_qla_host_t *vha,
void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
{
- fc_port_t *f, *tf;
- uint32_t id = 0, mask, rid;
fc_port_t *fcport;
switch (ea->event) {
@@ -1574,10 +1556,6 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
case FCME_RSCN:
if (test_bit(UNLOADING, &vha->dpc_flags))
return;
- switch (ea->id.b.rsvd_1) {
- case RSCN_PORT_ADDR:
-#define BIGSCAN 1
-#if defined BIGSCAN & BIGSCAN > 0
{
unsigned long flags;
fcport = qla2x00_find_fcport_by_nportid
@@ -1596,59 +1574,6 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
}
spin_unlock_irqrestore(&vha->work_lock, flags);
}
-#else
- {
- int rc;
- fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
- if (!fcport) {
- /* cable moved */
- rc = qla24xx_post_gpnid_work(vha, &ea->id);
- if (rc) {
- ql_log(ql_log_warn, vha, 0xd044,
- "RSCN GPNID work failed %06x\n",
- ea->id.b24);
- }
- } else {
- ea->fcport = fcport;
- fcport->scan_needed = 1;
- qla24xx_handle_rscn_event(fcport, ea);
- }
- }
-#endif
- break;
- case RSCN_AREA_ADDR:
- case RSCN_DOM_ADDR:
- if (ea->id.b.rsvd_1 == RSCN_AREA_ADDR) {
- mask = 0xffff00;
- ql_dbg(ql_dbg_async, vha, 0x5044,
- "RSCN: Area 0x%06x was affected\n",
- ea->id.b24);
- } else {
- mask = 0xff0000;
- ql_dbg(ql_dbg_async, vha, 0x507a,
- "RSCN: Domain 0x%06x was affected\n",
- ea->id.b24);
- }
-
- rid = ea->id.b24 & mask;
- list_for_each_entry_safe(f, tf, &vha->vp_fcports,
- list) {
- id = f->d_id.b24 & mask;
- if (rid == id) {
- ea->fcport = f;
- qla24xx_handle_rscn_event(f, ea);
- }
- }
- break;
- case RSCN_FAB_ADDR:
- default:
- ql_log(ql_log_warn, vha, 0xd045,
- "RSCN: Fabric was affected. Addr format %d\n",
- ea->id.b.rsvd_1);
- qla2x00_mark_all_devices_lost(vha, 1);
- set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
- }
break;
case FCME_GNL_DONE:
qla24xx_handle_gnl_done_event(vha, ea);
@@ -1709,11 +1634,7 @@ void qla_rscn_replay(fc_port_t *fcport)
ea.event = FCME_RSCN;
ea.id = fcport->d_id;
ea.id.b.rsvd_1 = RSCN_PORT_ADDR;
-#if defined BIGSCAN & BIGSCAN > 0
qla2x00_fcport_event_handler(fcport->vha, &ea);
-#else
- qla24xx_post_gpnid_work(fcport->vha, &ea.id);
-#endif
}
}
@@ -1784,8 +1705,8 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
lun = (uint16_t)tm_iocb->u.tmf.lun;
/* Issue Marker IOCB */
- qla2x00_marker(vha, vha->hw->req_q_map[0],
- vha->hw->rsp_q_map[0], fcport->loop_id, lun,
+ qla2x00_marker(vha, vha->hw->base_qpair,
+ fcport->loop_id, lun,
flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
}
@@ -1829,7 +1750,7 @@ qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
int rval = QLA_FUNCTION_FAILED;
sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!sp)
goto done;
@@ -1912,6 +1833,12 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
ea->fcport->logout_on_delete = 1;
+ ea->fcport->nvme_prli_service_param = ea->iop[0];
+ if (ea->iop[0] & NVME_PRLI_SP_FIRST_BURST)
+ ea->fcport->nvme_first_burst_size =
+ (ea->iop[1] & 0xffff) * 512;
+ else
+ ea->fcport->nvme_first_burst_size = 0;
qla24xx_post_gpdb_work(vha, ea->fcport, 0);
break;
default:
@@ -3955,8 +3882,17 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
}
+
qlt_24xx_config_rings(vha);
+ /* If the user has configured the speed, set it here */
+ if (ha->set_data_rate) {
+ ql_dbg(ql_dbg_init, vha, 0x00fd,
+ "Speed set by user : %s Gbps \n",
+ qla2x00_get_link_speed_str(ha, ha->set_data_rate));
+ icb->firmware_options_3 = (ha->set_data_rate << 13);
+ }
+
/* PCI posting */
RD_REG_DWORD(&ioreg->hccr);
}
@@ -4755,6 +4691,16 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
if (!fcport)
return NULL;
+ fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma,
+ flags);
+ if (!fcport->ct_desc.ct_sns) {
+ ql_log(ql_log_warn, vha, 0xd049,
+ "Failed to allocate ct_sns request.\n");
+ kfree(fcport);
+ return NULL;
+ }
+
/* Setup fcport template structure. */
fcport->vha = vha;
fcport->port_type = FCT_UNKNOWN;
@@ -4763,13 +4709,11 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
fcport->supported_classes = FC_COS_UNSPECIFIED;
fcport->fp_speed = PORT_SPEED_UNKNOWN;
- fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma,
- flags);
fcport->disc_state = DSC_DELETED;
fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
fcport->deleted = QLA_SESS_DELETED;
fcport->login_retry = vha->hw->login_retry_count;
+ fcport->chip_reset = vha->hw->base_qpair->chip_reset;
fcport->logout_on_delete = 1;
if (!fcport->ct_desc.ct_sns) {
@@ -4778,6 +4722,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
kfree(fcport);
fcport = NULL;
}
+
INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
INIT_LIST_HEAD(&fcport->gnl_entry);
@@ -5047,10 +4992,12 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
continue;
/* Bypass if not same domain and area of adapter. */
- if (area && domain &&
- (area != vha->d_id.b.area || domain != vha->d_id.b.domain))
+ if (area && domain && ((area != vha->d_id.b.area) ||
+ (domain != vha->d_id.b.domain)) &&
+ (ha->current_topology == ISP_CFG_NL))
continue;
+
/* Bypass invalid local loop ID. */
if (loop_id > LAST_LOCAL_LOOP_ID)
continue;
@@ -6101,11 +6048,6 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
{
int rval = QLA_SUCCESS;
uint32_t wait_time;
- struct req_que *req;
- struct rsp_que *rsp;
-
- req = vha->req;
- rsp = req->rsp;
clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
if (vha->flags.online) {
@@ -6118,8 +6060,8 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
* Issue a marker after FW becomes
* ready.
*/
- qla2x00_marker(vha, req, rsp, 0, 0,
- MK_SYNC_ALL);
+ qla2x00_marker(vha, vha->hw->base_qpair,
+ 0, 0, MK_SYNC_ALL);
vha->marker_needed = 0;
}
@@ -6857,8 +6799,6 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
{
int status = 0;
struct qla_hw_data *ha = vha->hw;
- struct req_que *req = ha->req_q_map[0];
- struct rsp_que *rsp = ha->rsp_q_map[0];
/* If firmware needs to be loaded */
if (qla2x00_isp_firmware(vha)) {
@@ -6878,7 +6818,7 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
status = qla2x00_fw_ready(vha);
if (!status) {
/* Issue a marker after FW becomes ready. */
- qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
+ qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
}
@@ -7933,22 +7873,15 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha)
uint16_t mb[MAILBOX_REGISTER_COUNT];
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- struct req_que *req;
- struct rsp_que *rsp;
if (!vha->vp_idx)
return -EINVAL;
rval = qla2x00_fw_ready(base_vha);
- if (vha->qpair)
- req = vha->qpair->req;
- else
- req = ha->req_q_map[0];
- rsp = req->rsp;
if (rval == QLA_SUCCESS) {
clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
- qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
+ qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
}
vha->flags.management_server_logged_in = 0;
@@ -8340,8 +8273,6 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
{
int status, rval;
struct qla_hw_data *ha = vha->hw;
- struct req_que *req = ha->req_q_map[0];
- struct rsp_que *rsp = ha->rsp_q_map[0];
struct scsi_qla_host *vp;
unsigned long flags;
@@ -8353,7 +8284,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
status = qla2x00_fw_ready(vha);
if (!status) {
/* Issue a marker after FW becomes ready. */
- qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
+ qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
vha->flags.online = 1;
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 032635321ad6..456a41d2e2c6 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -336,7 +336,7 @@ qla2x00_start_scsi(srb_t *sp)
/* Send marker if required */
if (vha->marker_needed != 0) {
- if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+ if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
QLA_SUCCESS) {
return (QLA_FUNCTION_FAILED);
}
@@ -490,8 +490,7 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
/**
* qla2x00_marker() - Send a marker IOCB to the firmware.
* @vha: HA context
- * @req: request queue
- * @rsp: response queue
+ * @qpair: queue pair pointer
* @loop_id: loop ID
* @lun: LUN
* @type: marker modifier
@@ -501,18 +500,16 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
* Returns non-zero if a failure occurred, else zero.
*/
static int
-__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
- struct rsp_que *rsp, uint16_t loop_id,
- uint64_t lun, uint8_t type)
+__qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
+ uint16_t loop_id, uint64_t lun, uint8_t type)
{
mrk_entry_t *mrk;
struct mrk_entry_24xx *mrk24 = NULL;
-
+ struct req_que *req = qpair->req;
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
- req = ha->req_q_map[0];
- mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
+ mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL);
if (mrk == NULL) {
ql_log(ql_log_warn, base_vha, 0x3026,
"Failed to allocate Marker IOCB.\n");
@@ -543,16 +540,15 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
}
int
-qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
- struct rsp_que *rsp, uint16_t loop_id, uint64_t lun,
- uint8_t type)
+qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
+ uint16_t loop_id, uint64_t lun, uint8_t type)
{
int ret;
unsigned long flags = 0;
- spin_lock_irqsave(&vha->hw->hardware_lock, flags);
- ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
- spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ ret = __qla2x00_marker(vha, qpair, loop_id, lun, type);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
return (ret);
}
@@ -567,11 +563,11 @@ qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
{
if (ha_locked) {
- if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
+ if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
MK_SYNC_ALL) != QLA_SUCCESS)
return QLA_FUNCTION_FAILED;
} else {
- if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
+ if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
MK_SYNC_ALL) != QLA_SUCCESS)
return QLA_FUNCTION_FAILED;
}
@@ -1098,88 +1094,300 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
int
qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
- uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
+ uint32_t *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
{
- void *next_dsd;
- uint8_t avail_dsds = 0;
- uint32_t dsd_list_len;
- struct dsd_dma *dsd_ptr;
+ struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
struct scatterlist *sg, *sgl;
- int i;
- struct scsi_cmnd *cmd;
- uint32_t *cur_dsd = dsd;
- uint16_t used_dsds = tot_dsds;
+ struct crc_context *difctx = NULL;
struct scsi_qla_host *vha;
+ uint dsd_list_len;
+ uint avail_dsds = 0;
+ uint used_dsds = tot_dsds;
+ bool dif_local_dma_alloc = false;
+ bool direction_to_device = false;
+ int i;
if (sp) {
- cmd = GET_CMD_SP(sp);
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
sgl = scsi_prot_sglist(cmd);
vha = sp->vha;
+ difctx = sp->u.scmd.ctx;
+ direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
+ ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
+ "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
+ __func__, cmd, difctx, sp);
} else if (tc) {
vha = tc->vha;
sgl = tc->prot_sg;
+ difctx = tc->ctx;
+ direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE;
} else {
BUG();
return 1;
}
- ql_dbg(ql_dbg_tgt, vha, 0xe021,
- "%s: enter\n", __func__);
-
- for_each_sg(sgl, sg, tot_dsds, i) {
- dma_addr_t sle_dma;
-
- /* Allocate additional continuation packets? */
- if (avail_dsds == 0) {
- avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
- QLA_DSDS_PER_IOCB : used_dsds;
- dsd_list_len = (avail_dsds + 1) * 12;
- used_dsds -= avail_dsds;
-
- /* allocate tracking DS */
- dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
- if (!dsd_ptr)
- return 1;
-
- /* allocate new list */
- dsd_ptr->dsd_addr = next_dsd =
- dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
- &dsd_ptr->dsd_list_dma);
-
- if (!next_dsd) {
- /*
- * Need to cleanup only this dsd_ptr, rest
- * will be done by sp_free_dma()
- */
- kfree(dsd_ptr);
- return 1;
+ ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
+ "%s: enter (write=%u)\n", __func__, direction_to_device);
+
+ /* if initiator doing write or target doing read */
+ if (direction_to_device) {
+ for_each_sg(sgl, sg, tot_dsds, i) {
+ u64 sle_phys = sg_phys(sg);
+
+ /* If SGE addr + len flips bits in upper 32-bits */
+ if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) {
+ ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022,
+ "%s: page boundary crossing (phys=%llx len=%x)\n",
+ __func__, sle_phys, sg->length);
+
+ if (difctx) {
+ ha->dif_bundle_crossed_pages++;
+ dif_local_dma_alloc = true;
+ } else {
+ ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
+ vha, 0xe022,
+ "%s: difctx pointer is NULL\n",
+ __func__);
+ }
+ break;
}
+ }
+ ha->dif_bundle_writes++;
+ } else {
+ ha->dif_bundle_reads++;
+ }
+
+ if (ql2xdifbundlinginternalbuffers)
+ dif_local_dma_alloc = direction_to_device;
+
+ if (dif_local_dma_alloc) {
+ u32 track_difbundl_buf = 0;
+ u32 ldma_sg_len = 0;
+ u8 ldma_needed = 1;
+
+ difctx->no_dif_bundl = 0;
+ difctx->dif_bundl_len = 0;
+
+ /* Track DSD buffers */
+ INIT_LIST_HEAD(&difctx->ldif_dsd_list);
+ /* Track local DMA buffers */
+ INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list);
+
+ for_each_sg(sgl, sg, tot_dsds, i) {
+ u32 sglen = sg_dma_len(sg);
+
+ ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023,
+ "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n",
+ __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len,
+ difctx->dif_bundl_len, ldma_needed);
+
+ while (sglen) {
+ u32 xfrlen = 0;
+
+ if (ldma_needed) {
+ /*
+ * Allocate list item to store
+ * the DMA buffers
+ */
+ dsd_ptr = kzalloc(sizeof(*dsd_ptr),
+ GFP_ATOMIC);
+ if (!dsd_ptr) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe024,
+ "%s: failed alloc dsd_ptr\n",
+ __func__);
+ return 1;
+ }
+ ha->dif_bundle_kallocs++;
+
+ /* allocate dma buffer */
+ dsd_ptr->dsd_addr = dma_pool_alloc
+ (ha->dif_bundl_pool, GFP_ATOMIC,
+ &dsd_ptr->dsd_list_dma);
+ if (!dsd_ptr->dsd_addr) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe024,
+ "%s: failed alloc ->dsd_ptr\n",
+ __func__);
+ /*
+ * need to cleanup only this
+ * dsd_ptr rest will be done
+ * by sp_free_dma()
+ */
+ kfree(dsd_ptr);
+ ha->dif_bundle_kallocs--;
+ return 1;
+ }
+ ha->dif_bundle_dma_allocs++;
+ ldma_needed = 0;
+ difctx->no_dif_bundl++;
+ list_add_tail(&dsd_ptr->list,
+ &difctx->ldif_dma_hndl_list);
+ }
+
+ /* xfrlen is min of dma pool size and sglen */
+ xfrlen = (sglen >
+ (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ?
+ DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len :
+ sglen;
+
+ /* replace with local allocated dma buffer */
+ sg_pcopy_to_buffer(sgl, sg_nents(sgl),
+ dsd_ptr->dsd_addr + ldma_sg_len, xfrlen,
+ difctx->dif_bundl_len);
+ difctx->dif_bundl_len += xfrlen;
+ sglen -= xfrlen;
+ ldma_sg_len += xfrlen;
+ if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE ||
+ sg_is_last(sg)) {
+ ldma_needed = 1;
+ ldma_sg_len = 0;
+ }
+ }
+ }
- if (sp) {
- list_add_tail(&dsd_ptr->list,
- &((struct crc_context *)
- sp->u.scmd.ctx)->dsd_list);
+ track_difbundl_buf = used_dsds = difctx->no_dif_bundl;
+ ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025,
+ "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n",
+ difctx->dif_bundl_len, difctx->no_dif_bundl,
+ track_difbundl_buf);
- sp->flags |= SRB_CRC_CTX_DSD_VALID;
- } else {
- list_add_tail(&dsd_ptr->list,
- &(tc->ctx->dsd_list));
- *tc->ctx_dsd_alloced = 1;
+ if (sp)
+ sp->flags |= SRB_DIF_BUNDL_DMA_VALID;
+ else
+ tc->prot_flags = DIF_BUNDL_DMA_VALID;
+
+ list_for_each_entry_safe(dif_dsd, nxt_dsd,
+ &difctx->ldif_dma_hndl_list, list) {
+ u32 sglen = (difctx->dif_bundl_len >
+ DIF_BUNDLING_DMA_POOL_SIZE) ?
+ DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len;
+
+ BUG_ON(track_difbundl_buf == 0);
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha,
+ 0xe024,
+ "%s: adding continuation iocb's\n",
+ __func__);
+ avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
+ QLA_DSDS_PER_IOCB : used_dsds;
+ dsd_list_len = (avail_dsds + 1) * 12;
+ used_dsds -= avail_dsds;
+
+ /* allocate tracking DS */
+ dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
+ if (!dsd_ptr) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe026,
+ "%s: failed alloc dsd_ptr\n",
+ __func__);
+ return 1;
+ }
+ ha->dif_bundle_kallocs++;
+
+ difctx->no_ldif_dsd++;
+ /* allocate new list */
+ dsd_ptr->dsd_addr =
+ dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
+ &dsd_ptr->dsd_list_dma);
+ if (!dsd_ptr->dsd_addr) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe026,
+ "%s: failed alloc ->dsd_addr\n",
+ __func__);
+ /*
+ * need to cleanup only this dsd_ptr
+ * rest will be done by sp_free_dma()
+ */
+ kfree(dsd_ptr);
+ ha->dif_bundle_kallocs--;
+ return 1;
+ }
+ ha->dif_bundle_dma_allocs++;
+
+ if (sp) {
+ list_add_tail(&dsd_ptr->list,
+ &difctx->ldif_dsd_list);
+ sp->flags |= SRB_CRC_CTX_DSD_VALID;
+ } else {
+ list_add_tail(&dsd_ptr->list,
+ &difctx->ldif_dsd_list);
+ tc->ctx_dsd_alloced = 1;
+ }
+
+ /* add new list to cmd iocb or last list */
+ *cur_dsd++ =
+ cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ =
+ cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = dsd_list_len;
+ cur_dsd = dsd_ptr->dsd_addr;
}
-
- /* add new list to cmd iocb or last list */
- *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
- *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
- *cur_dsd++ = dsd_list_len;
- cur_dsd = (uint32_t *)next_dsd;
+ *cur_dsd++ = cpu_to_le32(LSD(dif_dsd->dsd_list_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(dif_dsd->dsd_list_dma));
+ *cur_dsd++ = cpu_to_le32(sglen);
+ avail_dsds--;
+ difctx->dif_bundl_len -= sglen;
+ track_difbundl_buf--;
}
- sle_dma = sg_dma_address(sg);
-
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
- avail_dsds--;
+ ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026,
+ "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__,
+ difctx->no_ldif_dsd, difctx->no_dif_bundl);
+ } else {
+ for_each_sg(sgl, sg, tot_dsds, i) {
+ dma_addr_t sle_dma;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
+ QLA_DSDS_PER_IOCB : used_dsds;
+ dsd_list_len = (avail_dsds + 1) * 12;
+ used_dsds -= avail_dsds;
+
+ /* allocate tracking DS */
+ dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
+ if (!dsd_ptr) {
+ ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
+ vha, 0xe027,
+ "%s: failed alloc dsd_dma...\n",
+ __func__);
+ return 1;
+ }
+
+ /* allocate new list */
+ dsd_ptr->dsd_addr =
+ dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
+ &dsd_ptr->dsd_list_dma);
+ if (!dsd_ptr->dsd_addr) {
+ /* need to cleanup only this dsd_ptr */
+ /* rest will be done by sp_free_dma() */
+ kfree(dsd_ptr);
+ return 1;
+ }
+
+ if (sp) {
+ list_add_tail(&dsd_ptr->list,
+ &difctx->dsd_list);
+ sp->flags |= SRB_CRC_CTX_DSD_VALID;
+ } else {
+ list_add_tail(&dsd_ptr->list,
+ &difctx->dsd_list);
+ tc->ctx_dsd_alloced = 1;
+ }
+
+ /* add new list to cmd iocb or last list */
+ *cur_dsd++ =
+ cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ =
+ cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = dsd_list_len;
+ cur_dsd = dsd_ptr->dsd_addr;
+ }
+ sle_dma = sg_dma_address(sg);
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ avail_dsds--;
+ }
}
/* Null termination */
*cur_dsd++ = 0;
@@ -1187,7 +1395,6 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
*cur_dsd++ = 0;
return 0;
}
-
/**
* qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
* Type 6 IOCB types.
@@ -1416,21 +1623,19 @@ qla24xx_start_scsi(srb_t *sp)
uint16_t req_cnt;
uint16_t tot_dsds;
struct req_que *req = NULL;
- struct rsp_que *rsp = NULL;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct scsi_qla_host *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
/* Setup device pointers. */
req = vha->req;
- rsp = req->rsp;
/* So we know we haven't pci_map'ed anything yet */
tot_dsds = 0;
/* Send marker if required */
if (vha->marker_needed != 0) {
- if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+ if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
QLA_SUCCESS)
return QLA_FUNCTION_FAILED;
vha->marker_needed = 0;
@@ -1583,7 +1788,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
/* Send marker if required */
if (vha->marker_needed != 0) {
- if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+ if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
QLA_SUCCESS)
return QLA_FUNCTION_FAILED;
vha->marker_needed = 0;
@@ -1754,7 +1959,6 @@ qla2xxx_start_scsi_mq(srb_t *sp)
uint16_t req_cnt;
uint16_t tot_dsds;
struct req_que *req = NULL;
- struct rsp_que *rsp = NULL;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct scsi_qla_host *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
@@ -1764,7 +1968,6 @@ qla2xxx_start_scsi_mq(srb_t *sp)
spin_lock_irqsave(&qpair->qp_lock, flags);
/* Setup qpair pointers */
- rsp = qpair->rsp;
req = qpair->req;
/* So we know we haven't pci_map'ed anything yet */
@@ -1772,7 +1975,7 @@ qla2xxx_start_scsi_mq(srb_t *sp)
/* Send marker if required */
if (vha->marker_needed != 0) {
- if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+ if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
QLA_SUCCESS) {
spin_unlock_irqrestore(&qpair->qp_lock, flags);
return QLA_FUNCTION_FAILED;
@@ -1940,7 +2143,7 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
/* Send marker if required */
if (vha->marker_needed != 0) {
- if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+ if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
QLA_SUCCESS) {
spin_unlock_irqrestore(&qpair->qp_lock, flags);
return QLA_FUNCTION_FAILED;
@@ -2208,8 +2411,11 @@ qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
- if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI)
+ if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) {
logio->control_flags |= LCF_NVME_PRLI;
+ if (sp->vha->flags.nvme_first_burst)
+ logio->io_parameter[0] = NVME_PRLI_SP_FIRST_BURST;
+ }
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
@@ -2991,8 +3197,8 @@ qla82xx_start_scsi(srb_t *sp)
/* Send marker if required */
if (vha->marker_needed != 0) {
- if (qla2x00_marker(vha, req,
- rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
+ if (qla2x00_marker(vha, ha->base_qpair,
+ 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x300c,
"qla2x00_marker failed for cmd=%p.\n", cmd);
return QLA_FUNCTION_FAILED;
@@ -3434,23 +3640,22 @@ qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
int
qla2x00_start_sp(srb_t *sp)
{
- int rval;
+ int rval = QLA_SUCCESS;
scsi_qla_host_t *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
struct qla_qpair *qp = sp->qpair;
void *pkt;
unsigned long flags;
- rval = QLA_FUNCTION_FAILED;
spin_lock_irqsave(qp->qp_lock_ptr, flags);
pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
if (!pkt) {
+ rval = EAGAIN;
ql_log(ql_log_warn, vha, 0x700c,
"qla2x00_alloc_iocbs failed.\n");
goto done;
}
- rval = QLA_SUCCESS;
switch (sp->type) {
case SRB_LOGIN_CMD:
IS_FWI2_CAPABLE(ha) ?
@@ -3646,8 +3851,8 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
/* Send marker if required */
if (vha->marker_needed != 0) {
- if (qla2x00_marker(vha, req,
- rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
+ if (qla2x00_marker(vha, ha->base_qpair,
+ 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
return EXT_STATUS_MAILBOX;
vha->marker_needed = 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 8507c43b918c..69bbea9239cc 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -834,7 +834,8 @@ skip_rio:
* Restore for Physical Port only
*/
if (!vha->vp_idx) {
- if (ha->flags.fawwpn_enabled) {
+ if (ha->flags.fawwpn_enabled &&
+ (ha->current_topology == ISP_CFG_F)) {
void *wwpn = ha->init_cb->port_name;
memcpy(vha->port_name, wwpn, WWN_SIZE);
fc_host_port_name(vha->host) =
@@ -1714,6 +1715,15 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
vha->hw->exch_starvation = 0;
data[0] = MBS_COMMAND_COMPLETE;
+
+ if (sp->type == SRB_PRLI_CMD) {
+ lio->u.logio.iop[0] =
+ le32_to_cpu(logio->io_parameter[0]);
+ lio->u.logio.iop[1] =
+ le32_to_cpu(logio->io_parameter[1]);
+ goto logio_done;
+ }
+
if (sp->type != SRB_LOGIN_CMD)
goto logio_done;
@@ -2725,6 +2735,17 @@ check_scsi_status:
cp->device->vendor);
break;
+ case CS_DMA:
+ ql_log(ql_log_info, fcport->vha, 0x3022,
+ "CS_DMA error: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%06x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
+ comp_status, scsi_status, res, vha->host_no,
+ cp->device->id, cp->device->lun, fcport->d_id.b24,
+ ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len,
+ resid_len, fw_resid_len, sp, cp);
+ ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee,
+ pkt, sizeof(*sts24));
+ res = DID_ERROR << 16;
+ break;
default:
res = DID_ERROR << 16;
break;
@@ -3410,7 +3431,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
min_vecs++;
}
- if (USER_CTRL_IRQ(ha)) {
+ if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
/* user wants to control IRQ setting for target mode */
ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
ha->msix_count, PCI_IRQ_MSIX);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 191b6b7c8747..5400696e1f6b 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1109,7 +1109,12 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
* FW supports nvme and driver load parameter requested nvme.
* BIT 26 of fw_attributes indicates NVMe support.
*/
- if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable) {
+ if ((ha->fw_attributes_h &
+ (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) &&
+ ql2xnvmeenable) {
+ if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST)
+ vha->flags.nvme_first_burst = 1;
+
vha->flags.nvme_enabled = 1;
ql_log(ql_log_info, vha, 0xd302,
"%s: FC-NVMe is Enabled (0x%x)\n",
@@ -1508,16 +1513,12 @@ qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
scsi_qla_host_t *vha;
- struct req_que *req;
- struct rsp_que *rsp;
vha = fcport->vha;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
"Entered %s.\n", __func__);
- req = vha->hw->req_q_map[0];
- rsp = req->rsp;
mcp->mb[0] = MBC_ABORT_TARGET;
mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
if (HAS_EXTENDED_IDS(vha->hw)) {
@@ -1540,7 +1541,7 @@ qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
}
/* Issue marker IOCB. */
- rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0,
+ rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, 0,
MK_SYNC_ID);
if (rval2 != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x1040,
@@ -1560,16 +1561,12 @@ qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
scsi_qla_host_t *vha;
- struct req_que *req;
- struct rsp_que *rsp;
vha = fcport->vha;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
"Entered %s.\n", __func__);
- req = vha->hw->req_q_map[0];
- rsp = req->rsp;
mcp->mb[0] = MBC_LUN_RESET;
mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
if (HAS_EXTENDED_IDS(vha->hw))
@@ -1589,7 +1586,7 @@ qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
}
/* Issue marker IOCB. */
- rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
+ rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l,
MK_SYNC_ID_LUN);
if (rval2 != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x1044,
@@ -2244,10 +2241,7 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
mcp->out_mb = MBX_2|MBX_1|MBX_0;
} else if (IS_FWI2_CAPABLE(vha->hw)) {
mcp->mb[0] = MBC_LIP_FULL_LOGIN;
- if (N2N_TOPO(vha->hw))
- mcp->mb[1] = BIT_4; /* re-init */
- else
- mcp->mb[1] = BIT_6; /* LIP */
+ mcp->mb[1] = BIT_4;
mcp->mb[2] = 0;
mcp->mb[3] = vha->hw->loop_reset_delay;
mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -2757,7 +2751,7 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
"Entered %s.\n", __func__);
mcp->mb[0] = MBC_LIP_FULL_LOGIN;
- mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
+ mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_4 : 0;
mcp->mb[2] = 0;
mcp->mb[3] = 0;
mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -3184,7 +3178,6 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
scsi_qla_host_t *vha;
struct qla_hw_data *ha;
struct req_que *req;
- struct rsp_que *rsp;
struct qla_qpair *qpair;
vha = fcport->vha;
@@ -3197,10 +3190,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
if (vha->vp_idx && vha->qpair) {
/* NPIV port */
qpair = vha->qpair;
- rsp = qpair->rsp;
req = qpair->req;
- } else {
- rsp = req->rsp;
}
tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
@@ -3257,7 +3247,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
}
/* Issue marker IOCB. */
- rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
+ rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l,
type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
if (rval2 != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x1099,
@@ -5248,6 +5238,66 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
return rval;
}
+/* Set the specified data rate */
+int
+qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t val;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
+ "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate,
+ mode);
+
+ if (!IS_FWI2_CAPABLE(ha))
+ return QLA_FUNCTION_FAILED;
+
+ memset(mcp, 0, sizeof(*mcp));
+ switch (ha->set_data_rate) {
+ case PORT_SPEED_AUTO:
+ case PORT_SPEED_4GB:
+ case PORT_SPEED_8GB:
+ case PORT_SPEED_16GB:
+ case PORT_SPEED_32GB:
+ val = ha->set_data_rate;
+ break;
+ default:
+ ql_log(ql_log_warn, vha, 0x1199,
+ "Unrecognized speed setting:%d. Setting Autoneg\n",
+ ha->set_data_rate);
+ val = ha->set_data_rate = PORT_SPEED_AUTO;
+ break;
+ }
+
+ mcp->mb[0] = MBC_DATA_RATE;
+ mcp->mb[1] = mode;
+ mcp->mb[2] = val;
+
+ mcp->out_mb = MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_2|MBX_1|MBX_0;
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ mcp->in_mb |= MBX_4|MBX_3;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1107,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ if (mcp->mb[1] != 0x7)
+ ql_dbg(ql_dbg_mbx, vha, 0x1179,
+ "Speed set:0x%x\n", mcp->mb[1]);
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
int
qla2x00_get_data_rate(scsi_qla_host_t *vha)
{
@@ -5263,7 +5313,7 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
return QLA_FUNCTION_FAILED;
mcp->mb[0] = MBC_DATA_RATE;
- mcp->mb[1] = 0;
+ mcp->mb[1] = QLA_GET_DATA_RATE;
mcp->out_mb = MBX_1|MBX_0;
mcp->in_mb = MBX_2|MBX_1|MBX_0;
if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
@@ -6268,8 +6318,6 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
fcport->d_id.b.rsvd_1 = 0;
if (fcport->fc4f_nvme) {
- fcport->nvme_prli_service_param =
- pd->prli_nvme_svc_param_word_3;
fcport->port_type = FCT_NVME;
} else {
/* If not target must be initiator or unknown type. */
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 39d892bbd219..41c85da3ab32 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -185,6 +185,14 @@ static void qla_nvme_abort_work(struct work_struct *work)
struct qla_hw_data *ha = fcport->vha->hw;
int rval;
+ if (fcport)
+ ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
+ "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n",
+ __func__, sp, sp->handle, fcport, fcport->deleted);
+
+ if (!ha->flags.fw_started && (fcport && fcport->deleted))
+ return;
+
rval = ha->isp_ops->abort_command(sp);
ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
@@ -358,17 +366,24 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
/* No data transfer how do we check buffer len == 0?? */
if (fd->io_dir == NVMEFC_FCP_READ) {
- cmd_pkt->control_flags =
- cpu_to_le16(CF_READ_DATA | CF_NVME_ENABLE);
+ cmd_pkt->control_flags = CF_READ_DATA;
vha->qla_stats.input_bytes += fd->payload_length;
vha->qla_stats.input_requests++;
} else if (fd->io_dir == NVMEFC_FCP_WRITE) {
- cmd_pkt->control_flags =
- cpu_to_le16(CF_WRITE_DATA | CF_NVME_ENABLE);
+ cmd_pkt->control_flags = CF_WRITE_DATA;
+ if ((vha->flags.nvme_first_burst) &&
+ (sp->fcport->nvme_prli_service_param &
+ NVME_PRLI_SP_FIRST_BURST)) {
+ if ((fd->payload_length <=
+ sp->fcport->nvme_first_burst_size) ||
+ (sp->fcport->nvme_first_burst_size == 0))
+ cmd_pkt->control_flags |=
+ CF_NVME_FIRST_BURST_ENABLE;
+ }
vha->qla_stats.output_bytes += fd->payload_length;
vha->qla_stats.output_requests++;
} else if (fd->io_dir == 0) {
- cmd_pkt->control_flags = cpu_to_le16(CF_NVME_ENABLE);
+ cmd_pkt->control_flags = 0;
}
/* Set NPORT-ID */
@@ -600,6 +615,7 @@ static void qla_nvme_unregister_remote_port(struct work_struct *work)
struct fc_port *fcport = container_of(work, struct fc_port,
nvme_del_work);
struct qla_nvme_rport *qla_rport, *trport;
+ scsi_qla_host_t *base_vha;
if (!IS_ENABLED(CONFIG_NVME_FC))
return;
@@ -607,6 +623,15 @@ static void qla_nvme_unregister_remote_port(struct work_struct *work)
ql_log(ql_log_warn, NULL, 0x2112,
"%s: unregister remoteport on %p\n",__func__, fcport);
+ base_vha = pci_get_drvdata(fcport->vha->hw->pdev);
+ if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags)) {
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x2114,
+ "%s: Notify FC-NVMe transport, set devloss=0\n",
+ __func__);
+
+ nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
+ }
+
list_for_each_entry_safe(qla_rport, trport,
&fcport->vha->nvme_rport_list, list) {
if (qla_rport->fcport == fcport) {
@@ -623,23 +648,11 @@ static void qla_nvme_unregister_remote_port(struct work_struct *work)
void qla_nvme_delete(struct scsi_qla_host *vha)
{
- struct qla_nvme_rport *qla_rport, *trport;
- fc_port_t *fcport;
int nv_ret;
if (!IS_ENABLED(CONFIG_NVME_FC))
return;
- list_for_each_entry_safe(qla_rport, trport,
- &vha->nvme_rport_list, list) {
- fcport = qla_rport->fcport;
-
- ql_log(ql_log_info, fcport->vha, 0x2114, "%s: fcport=%p\n",
- __func__, fcport);
-
- nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
- }
-
if (vha->nvme_local_port) {
init_completion(&vha->nvme_del_done);
ql_log(ql_log_info, vha, 0x2116,
diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h
index 4941d107fb1c..da8dad5ad693 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.h
+++ b/drivers/scsi/qla2xxx/qla_nvme.h
@@ -57,7 +57,7 @@ struct cmd_nvme {
uint64_t rsvd;
uint16_t control_flags; /* Control Flags */
-#define CF_NVME_ENABLE BIT_9
+#define CF_NVME_FIRST_BURST_ENABLE BIT_11
#define CF_DIF_SEG_DESCR_ENABLE BIT_3
#define CF_DATA_SEG_DESCR_ENABLE BIT_2
#define CF_READ_DATA BIT_1
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index c6ef83d0d99b..91f576d743fe 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -285,6 +285,27 @@ MODULE_PARM_DESC(qla2xuseresexchforels,
"Reserve 1/2 of emergency exchanges for ELS.\n"
" 0 (default): disabled");
+int ql2xprotmask;
+module_param(ql2xprotmask, int, 0644);
+MODULE_PARM_DESC(ql2xprotmask,
+ "Override DIF/DIX protection capabilities mask\n"
+ "Default is 0 which sets protection mask based on "
+ "capabilities reported by HBA firmware.\n");
+
+int ql2xprotguard;
+module_param(ql2xprotguard, int, 0644);
+MODULE_PARM_DESC(ql2xprotguard, "Override choice of DIX checksum\n"
+ " 0 -- Let HBA firmware decide\n"
+ " 1 -- Force T10 CRC\n"
+ " 2 -- Force IP checksum\n");
+
+int ql2xdifbundlinginternalbuffers;
+module_param(ql2xdifbundlinginternalbuffers, int, 0644);
+MODULE_PARM_DESC(ql2xdifbundlinginternalbuffers,
+ "Force using internal buffers for DIF information\n"
+ "0 (Default). Based on check.\n"
+ "1 Force using internal buffers\n");
+
/*
* SCSI host template entry points
*/
@@ -804,7 +825,44 @@ qla2xxx_qpair_sp_free_dma(void *ptr)
ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
mempool_free(ctx1, ha->ctx_mempool);
+ sp->flags &= ~SRB_FCP_CMND_DMA_VALID;
+ }
+ if (sp->flags & SRB_DIF_BUNDL_DMA_VALID) {
+ struct crc_context *difctx = sp->u.scmd.ctx;
+ struct dsd_dma *dif_dsd, *nxt_dsd;
+
+ list_for_each_entry_safe(dif_dsd, nxt_dsd,
+ &difctx->ldif_dma_hndl_list, list) {
+ list_del(&dif_dsd->list);
+ dma_pool_free(ha->dif_bundl_pool, dif_dsd->dsd_addr,
+ dif_dsd->dsd_list_dma);
+ kfree(dif_dsd);
+ difctx->no_dif_bundl--;
+ }
+
+ list_for_each_entry_safe(dif_dsd, nxt_dsd,
+ &difctx->ldif_dsd_list, list) {
+ list_del(&dif_dsd->list);
+ dma_pool_free(ha->dl_dma_pool, dif_dsd->dsd_addr,
+ dif_dsd->dsd_list_dma);
+ kfree(dif_dsd);
+ difctx->no_ldif_dsd--;
+ }
+
+ if (difctx->no_ldif_dsd) {
+ ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022,
+ "%s: difctx->no_ldif_dsd=%x\n",
+ __func__, difctx->no_ldif_dsd);
+ }
+
+ if (difctx->no_dif_bundl) {
+ ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022,
+ "%s: difctx->no_dif_bundl=%x\n",
+ __func__, difctx->no_dif_bundl);
+ }
+ sp->flags &= ~SRB_DIF_BUNDL_DMA_VALID;
}
+
end:
CMD_SP(cmd) = NULL;
qla2xxx_rel_qpair_sp(sp->qpair, sp);
@@ -1459,7 +1517,7 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
goto eh_reset_failed;
}
err = 2;
- if (do_reset(fcport, cmd->device->lun, blk_mq_rq_cpu(cmd->request) + 1)
+ if (do_reset(fcport, cmd->device->lun, 1)
!= QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x800c,
"do_reset failed for cmd=%p.\n", cmd);
@@ -3342,13 +3400,16 @@ skip_dpc:
"Registering for DIF/DIX type 1 and 3 protection.\n");
if (ql2xenabledif == 1)
prot = SHOST_DIX_TYPE0_PROTECTION;
- scsi_host_set_prot(host,
- prot | SHOST_DIF_TYPE1_PROTECTION
- | SHOST_DIF_TYPE2_PROTECTION
- | SHOST_DIF_TYPE3_PROTECTION
- | SHOST_DIX_TYPE1_PROTECTION
- | SHOST_DIX_TYPE2_PROTECTION
- | SHOST_DIX_TYPE3_PROTECTION);
+ if (ql2xprotmask)
+ scsi_host_set_prot(host, ql2xprotmask);
+ else
+ scsi_host_set_prot(host,
+ prot | SHOST_DIF_TYPE1_PROTECTION
+ | SHOST_DIF_TYPE2_PROTECTION
+ | SHOST_DIF_TYPE3_PROTECTION
+ | SHOST_DIX_TYPE1_PROTECTION
+ | SHOST_DIX_TYPE2_PROTECTION
+ | SHOST_DIX_TYPE3_PROTECTION);
guard = SHOST_DIX_GUARD_CRC;
@@ -3356,7 +3417,10 @@ skip_dpc:
(ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
guard |= SHOST_DIX_GUARD_IP;
- scsi_host_set_guard(host, guard);
+ if (ql2xprotguard)
+ scsi_host_set_guard(host, ql2xprotguard);
+ else
+ scsi_host_set_guard(host, guard);
} else
base_vha->flags.difdix_supported = 0;
}
@@ -3997,9 +4061,86 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
"Failed to allocate memory for fcp_cmnd_dma_pool.\n");
goto fail_dl_dma_pool;
}
+
+ if (ql2xenabledif) {
+ u64 bufsize = DIF_BUNDLING_DMA_POOL_SIZE;
+ struct dsd_dma *dsd, *nxt;
+ uint i;
+ /* Creata a DMA pool of buffers for DIF bundling */
+ ha->dif_bundl_pool = dma_pool_create(name,
+ &ha->pdev->dev, DIF_BUNDLING_DMA_POOL_SIZE, 8, 0);
+ if (!ha->dif_bundl_pool) {
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024,
+ "%s: failed create dif_bundl_pool\n",
+ __func__);
+ goto fail_dif_bundl_dma_pool;
+ }
+
+ INIT_LIST_HEAD(&ha->pool.good.head);
+ INIT_LIST_HEAD(&ha->pool.unusable.head);
+ ha->pool.good.count = 0;
+ ha->pool.unusable.count = 0;
+ for (i = 0; i < 128; i++) {
+ dsd = kzalloc(sizeof(*dsd), GFP_ATOMIC);
+ if (!dsd) {
+ ql_dbg_pci(ql_dbg_init, ha->pdev,
+ 0xe0ee, "%s: failed alloc dsd\n",
+ __func__);
+ return 1;
+ }
+ ha->dif_bundle_kallocs++;
+
+ dsd->dsd_addr = dma_pool_alloc(
+ ha->dif_bundl_pool, GFP_ATOMIC,
+ &dsd->dsd_list_dma);
+ if (!dsd->dsd_addr) {
+ ql_dbg_pci(ql_dbg_init, ha->pdev,
+ 0xe0ee,
+ "%s: failed alloc ->dsd_addr\n",
+ __func__);
+ kfree(dsd);
+ ha->dif_bundle_kallocs--;
+ continue;
+ }
+ ha->dif_bundle_dma_allocs++;
+
+ /*
+ * if DMA buffer crosses 4G boundary,
+ * put it on bad list
+ */
+ if (MSD(dsd->dsd_list_dma) ^
+ MSD(dsd->dsd_list_dma + bufsize)) {
+ list_add_tail(&dsd->list,
+ &ha->pool.unusable.head);
+ ha->pool.unusable.count++;
+ } else {
+ list_add_tail(&dsd->list,
+ &ha->pool.good.head);
+ ha->pool.good.count++;
+ }
+ }
+
+ /* return the good ones back to the pool */
+ list_for_each_entry_safe(dsd, nxt,
+ &ha->pool.good.head, list) {
+ list_del(&dsd->list);
+ dma_pool_free(ha->dif_bundl_pool,
+ dsd->dsd_addr, dsd->dsd_list_dma);
+ ha->dif_bundle_dma_allocs--;
+ kfree(dsd);
+ ha->dif_bundle_kallocs--;
+ }
+
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024,
+ "%s: dif dma pool (good=%u unusable=%u)\n",
+ __func__, ha->pool.good.count,
+ ha->pool.unusable.count);
+ }
+
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025,
- "dl_dma_pool=%p fcp_cmnd_dma_pool=%p.\n",
- ha->dl_dma_pool, ha->fcp_cmnd_dma_pool);
+ "dl_dma_pool=%p fcp_cmnd_dma_pool=%p dif_bundl_pool=%p.\n",
+ ha->dl_dma_pool, ha->fcp_cmnd_dma_pool,
+ ha->dif_bundl_pool);
}
/* Allocate memory for SNS commands */
@@ -4164,6 +4305,24 @@ fail_free_ms_iocb:
dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
ha->sns_cmd, ha->sns_cmd_dma);
fail_dma_pool:
+ if (ql2xenabledif) {
+ struct dsd_dma *dsd, *nxt;
+
+ list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head,
+ list) {
+ list_del(&dsd->list);
+ dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
+ dsd->dsd_list_dma);
+ ha->dif_bundle_dma_allocs--;
+ kfree(dsd);
+ ha->dif_bundle_kallocs--;
+ ha->pool.unusable.count--;
+ }
+ dma_pool_destroy(ha->dif_bundl_pool);
+ ha->dif_bundl_pool = NULL;
+ }
+
+fail_dif_bundl_dma_pool:
if (IS_QLA82XX(ha) || ql2xenabledif) {
dma_pool_destroy(ha->fcp_cmnd_dma_pool);
ha->fcp_cmnd_dma_pool = NULL;
@@ -4544,6 +4703,32 @@ qla2x00_mem_free(struct qla_hw_data *ha)
mempool_destroy(ha->ctx_mempool);
+ if (ql2xenabledif) {
+ struct dsd_dma *dsd, *nxt;
+
+ list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head,
+ list) {
+ list_del(&dsd->list);
+ dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
+ dsd->dsd_list_dma);
+ ha->dif_bundle_dma_allocs--;
+ kfree(dsd);
+ ha->dif_bundle_kallocs--;
+ ha->pool.unusable.count--;
+ }
+ list_for_each_entry_safe(dsd, nxt, &ha->pool.good.head, list) {
+ list_del(&dsd->list);
+ dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
+ dsd->dsd_list_dma);
+ ha->dif_bundle_dma_allocs--;
+ kfree(dsd);
+ ha->dif_bundle_kallocs--;
+ }
+ }
+
+ if (ha->dif_bundl_pool)
+ dma_pool_destroy(ha->dif_bundl_pool);
+
qlt_mem_free(ha);
if (ha->init_cb)
@@ -5019,14 +5204,14 @@ qla2x00_do_work(struct scsi_qla_host *vha)
struct qla_work_evt *e, *tmp;
unsigned long flags;
LIST_HEAD(work);
+ int rc;
spin_lock_irqsave(&vha->work_lock, flags);
list_splice_init(&vha->work_list, &work);
spin_unlock_irqrestore(&vha->work_lock, flags);
list_for_each_entry_safe(e, tmp, &work, list) {
- list_del_init(&e->list);
-
+ rc = QLA_SUCCESS;
switch (e->type) {
case QLA_EVT_AEN:
fc_host_post_event(vha->host, fc_get_event_number(),
@@ -5040,7 +5225,7 @@ qla2x00_do_work(struct scsi_qla_host *vha)
e->u.logio.data);
break;
case QLA_EVT_ASYNC_LOGOUT:
- qla2x00_async_logout(vha, e->u.logio.fcport);
+ rc = qla2x00_async_logout(vha, e->u.logio.fcport);
break;
case QLA_EVT_ASYNC_LOGOUT_DONE:
qla2x00_async_logout_done(vha, e->u.logio.fcport,
@@ -5085,7 +5270,7 @@ qla2x00_do_work(struct scsi_qla_host *vha)
qla24xx_do_nack_work(vha, e);
break;
case QLA_EVT_ASYNC_PRLO:
- qla2x00_async_prlo(vha, e->u.logio.fcport);
+ rc = qla2x00_async_prlo(vha, e->u.logio.fcport);
break;
case QLA_EVT_ASYNC_PRLO_DONE:
qla2x00_async_prlo_done(vha, e->u.logio.fcport,
@@ -5118,6 +5303,15 @@ qla2x00_do_work(struct scsi_qla_host *vha)
e->u.fcport.fcport, false);
break;
}
+
+ if (rc == EAGAIN) {
+ /* put 'work' at head of 'vha->work_list' */
+ spin_lock_irqsave(&vha->work_lock, flags);
+ list_splice(&work, &vha->work_list);
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+ break;
+ }
+ list_del_init(&e->list);
if (e->flags & QLA_EVT_FLAG_FREE)
kfree(e);
@@ -6930,13 +7124,64 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
ha->flags.eeh_busy = 0;
}
+static void
+qla_pci_reset_prepare(struct pci_dev *pdev)
+{
+ scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
+ struct qla_hw_data *ha = base_vha->hw;
+ struct qla_qpair *qpair;
+
+ ql_log(ql_log_warn, base_vha, 0xffff,
+ "%s.\n", __func__);
+
+ /*
+ * PCI FLR/function reset is about to reset the
+ * slot. Stop the chip to stop all DMA access.
+ * It is assumed that pci_reset_done will be called
+ * after FLR to resume Chip operation.
+ */
+ ha->flags.eeh_busy = 1;
+ mutex_lock(&ha->mq_lock);
+ list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
+ qpair->online = 0;
+ mutex_unlock(&ha->mq_lock);
+
+ set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+ qla2x00_abort_isp_cleanup(base_vha);
+ qla2x00_abort_all_cmds(base_vha, DID_RESET << 16);
+}
+
+static void
+qla_pci_reset_done(struct pci_dev *pdev)
+{
+ scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
+ struct qla_hw_data *ha = base_vha->hw;
+ struct qla_qpair *qpair;
+
+ ql_log(ql_log_warn, base_vha, 0xffff,
+ "%s.\n", __func__);
+
+ /*
+ * FLR just completed by PCI layer. Resume adapter
+ */
+ ha->flags.eeh_busy = 0;
+ mutex_lock(&ha->mq_lock);
+ list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
+ qpair->online = 1;
+ mutex_unlock(&ha->mq_lock);
+
+ base_vha->flags.online = 1;
+ ha->isp_ops->abort_isp(base_vha);
+ clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+}
+
static int qla2xxx_map_queues(struct Scsi_Host *shost)
{
int rc;
scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
struct blk_mq_queue_map *qmap = &shost->tag_set.map[0];
- if (USER_CTRL_IRQ(vha->hw))
+ if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase)
rc = blk_mq_map_queues(qmap);
else
rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
@@ -6948,6 +7193,8 @@ static const struct pci_error_handlers qla2xxx_err_handler = {
.mmio_enabled = qla2xxx_pci_mmio_enabled,
.slot_reset = qla2xxx_pci_slot_reset,
.resume = qla2xxx_pci_resume,
+ .reset_prepare = qla_pci_reset_prepare,
+ .reset_done = qla_pci_reset_done,
};
static struct pci_device_id qla2xxx_pci_tbl[] = {
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 510337eac106..582d1663f971 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -660,14 +660,14 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
sp->u.iocb_cmd.u.nack.ntfy = ntfy;
sp->done = qla2x00_async_nack_sp_done;
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
-
ql_dbg(ql_dbg_disc, vha, 0x20f4,
"Async-%s %8phC hndl %x %s\n",
sp->name, fcport->port_name, sp->handle, c);
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
return rval;
done_free_sp:
@@ -684,6 +684,9 @@ void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
switch (e->u.nack.type) {
case SRB_NACK_PRLI:
+ t = e->u.nack.fcport;
+ flush_work(&t->del_work);
+ flush_work(&t->free_work);
mutex_lock(&vha->vha_tgt.tgt_mutex);
t = qlt_create_sess(vha, e->u.nack.fcport, 0);
mutex_unlock(&vha->vha_tgt.tgt_mutex);
@@ -3230,7 +3233,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
- prm->prot_seg_cnt, &tc))
+ prm->prot_seg_cnt, cmd))
goto crc_queuing_error;
}
return QLA_SUCCESS;
@@ -3257,13 +3260,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
unsigned long flags = 0;
int res;
- if (cmd->sess && cmd->sess->deleted) {
+ if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
+ (cmd->sess && cmd->sess->deleted)) {
cmd->state = QLA_TGT_STATE_PROCESSED;
- if (cmd->sess->logout_completed)
- /* no need to terminate. FW already freed exchange. */
- qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
- else
- qlt_send_term_exchange(qpair, cmd, &cmd->atio, 0, 0);
+ qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
return 0;
}
@@ -6343,7 +6343,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
struct atio_from_isp *a = &prm->tm_iocb2;
struct scsi_qla_host *vha = tgt->vha;
struct qla_hw_data *ha = vha->hw;
- struct fc_port *sess = NULL;
+ struct fc_port *sess;
unsigned long flags;
uint8_t *s_id = NULL; /* to hide compiler warnings */
int rc;
@@ -6369,7 +6369,6 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
goto out_term2;
} else {
if (sess->deleted) {
- sess = NULL;
goto out_term2;
}
@@ -6377,7 +6376,6 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
ql_dbg(ql_dbg_tgt_tmr, vha, 0xf020,
"%s: kref_get fail %8phC\n",
__func__, sess->port_name);
- sess = NULL;
goto out_term2;
}
}
@@ -6396,8 +6394,6 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
return;
out_term2:
- if (sess)
- ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
out_term:
qlt_send_term_exchange(ha->base_qpair, NULL, &prm->tm_iocb2, 1, 0);
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 577e1786a3f1..f3de75000a08 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -928,6 +928,8 @@ struct qla_tgt_cmd {
uint64_t lba;
uint16_t a_guard, e_guard, a_app_tag, e_app_tag;
uint32_t a_ref_tag, e_ref_tag;
+#define DIF_BUNDL_DMA_VALID 1
+ uint16_t prot_flags;
uint64_t jiffies_at_alloc;
uint64_t jiffies_at_free;
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 0ccd06f11f12..9e52500caff0 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -221,7 +221,13 @@ qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf)
ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY;
}
-static int
+static inline struct qla27xx_fwdt_entry *
+qla27xx_next_entry(struct qla27xx_fwdt_entry *ent)
+{
+ return (void *)ent + ent->hdr.size;
+}
+
+static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t0(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
@@ -229,10 +235,10 @@ qla27xx_fwdt_entry_t0(struct scsi_qla_host *vha,
"%s: nop [%lx]\n", __func__, *len);
qla27xx_skip_entry(ent, buf);
- return false;
+ return qla27xx_next_entry(ent);
}
-static int
+static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t255(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
@@ -241,10 +247,10 @@ qla27xx_fwdt_entry_t255(struct scsi_qla_host *vha,
qla27xx_skip_entry(ent, buf);
/* terminate */
- return true;
+ return NULL;
}
-static int
+static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
@@ -255,10 +261,10 @@ qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha,
qla27xx_read_window(reg, ent->t256.base_addr, ent->t256.pci_offset,
ent->t256.reg_count, ent->t256.reg_width, buf, len);
- return false;
+ return qla27xx_next_entry(ent);
}
-static int
+static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
@@ -269,10 +275,10 @@ qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha,
qla27xx_write_reg(reg, IOBASE_ADDR, ent->t257.base_addr, buf);
qla27xx_write_reg(reg, ent->t257.pci_offset, ent->t257.write_data, buf);
- return false;
+ return qla27xx_next_entry(ent);
}
-static int
+static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
@@ -284,10 +290,10 @@ qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha,
qla27xx_read_window(reg, ent->t258.base_addr, ent->t258.pci_offset,
ent->t258.reg_count, ent->t258.reg_width, buf, len);
- return false;
+ return qla27xx_next_entry(ent);
}
-static int
+static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
@@ -299,10 +305,10 @@ qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha,
qla27xx_write_reg(reg, ent->t259.banksel_offset, ent->t259.bank, buf);
qla27xx_write_reg(reg, ent->t259.pci_offset, ent->t259.write_data, buf);
- return false;
+ return qla27xx_next_entry(ent);
}
-static int
+static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
@@ -313,10 +319,10 @@ qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha,
qla27xx_insert32(ent->t260.pci_offset, buf, len);
qla27xx_read_reg(reg, ent->t260.pci_offset, buf, len);
- return false;
+ return qla27xx_next_entry(ent);
}
-static int
+static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
@@ -326,10 +332,10 @@ qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha,
"%s: wrpci [%lx]\n", __func__, *len);
qla27xx_write_reg(reg, ent->t261.pci_offset, ent->t261.write_data, buf);
- return false;
+ return qla27xx_next_entry(ent);
}
-static int
+static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
@@ -362,6 +368,11 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
ent->t262.start_addr = start;
ent->t262.end_addr = end;
}
+ } else if (ent->t262.ram_area == T262_RAM_AREA_MISC) {
+ if (buf) {
+ ent->t262.start_addr = start;
+ ent->t262.end_addr = end;
+ }
} else {
ql_dbg(ql_dbg_misc, vha, 0xd022,
"%s: unknown area %x\n", __func__, ent->t262.ram_area);
@@ -384,10 +395,10 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
}
*len += dwords * sizeof(uint32_t);
done:
- return false;
+ return qla27xx_next_entry(ent);
}
-static int
+static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
@@ -450,10 +461,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
qla27xx_skip_entry(ent, buf);
}
- return false;
+ return qla27xx_next_entry(ent);
}
-static int
+static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t264(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
@@ -478,10 +489,10 @@ qla27xx_fwdt_entry_t264(struct scsi_qla_host *vha,
qla27xx_skip_entry(ent, buf);
}
- return false;
+ return qla27xx_next_entry(ent);
}
-static int
+static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
@@ -492,10 +503,10 @@ qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha,
if (buf)
qla24xx_pause_risc(reg, vha->hw);
- return false;
+ return qla27xx_next_entry(ent);
}
-static int
+static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
@@ -504,10 +515,10 @@ qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha,
if (buf)
qla24xx_soft_reset(vha->hw);
- return false;
+ return qla27xx_next_entry(ent);
}
-static int
+static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
@@ -517,10 +528,10 @@ qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha,
"%s: dis intr [%lx]\n", __func__, *len);
qla27xx_write_reg(reg, ent->t267.pci_offset, ent->t267.data, buf);
- return false;
+ return qla27xx_next_entry(ent);
}
-static int
+static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
@@ -587,10 +598,10 @@ qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
break;
}
- return false;
+ return qla27xx_next_entry(ent);
}
-static int
+static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t269(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
@@ -604,10 +615,10 @@ qla27xx_fwdt_entry_t269(struct scsi_qla_host *vha,
if (buf)
ent->t269.scratch_size = 5 * sizeof(uint32_t);
- return false;
+ return qla27xx_next_entry(ent);
}
-static int
+static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
@@ -625,10 +636,10 @@ qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
addr += sizeof(uint32_t);
}
- return false;
+ return qla27xx_next_entry(ent);
}
-static int
+static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
@@ -642,10 +653,10 @@ qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha,
qla27xx_write_reg(reg, 0xc4, data, buf);
qla27xx_write_reg(reg, 0xc0, addr, buf);
- return false;
+ return qla27xx_next_entry(ent);
}
-static int
+static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
@@ -662,10 +673,10 @@ qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha,
}
*len += dwords * sizeof(uint32_t);
- return false;
+ return qla27xx_next_entry(ent);
}
-static int
+static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
@@ -685,10 +696,10 @@ qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha,
addr += sizeof(uint32_t);
}
- return false;
+ return qla27xx_next_entry(ent);
}
-static int
+static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
@@ -746,10 +757,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
qla27xx_skip_entry(ent, buf);
}
- return false;
+ return qla27xx_next_entry(ent);
}
-static int
+static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
@@ -763,7 +774,7 @@ qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha,
qla27xx_skip_entry(ent, buf);
goto done;
}
- if (offset + ent->t275.length > ent->hdr.entry_size) {
+ if (offset + ent->t275.length > ent->hdr.size) {
ql_dbg(ql_dbg_misc, vha, 0xd030,
"%s: buffer overflow\n", __func__);
qla27xx_skip_entry(ent, buf);
@@ -772,59 +783,103 @@ qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha,
qla27xx_insertbuf(ent->t275.buffer, ent->t275.length, buf, len);
done:
- return false;
+ return qla27xx_next_entry(ent);
}
-static int
+static struct qla27xx_fwdt_entry *
+qla27xx_fwdt_entry_t276(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ uint type = vha->hw->pdev->device >> 4 & 0xf;
+ uint func = vha->hw->port_no & 0x3;
+
+ ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd214,
+ "%s: cond [%lx]\n", __func__, *len);
+
+ if (type != ent->t276.cond1 || func != ent->t276.cond2) {
+ ent = qla27xx_next_entry(ent);
+ qla27xx_skip_entry(ent, buf);
+ }
+
+ return qla27xx_next_entry(ent);
+}
+
+static struct qla27xx_fwdt_entry *
+qla27xx_fwdt_entry_t277(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+ ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd215,
+ "%s: rdpep [%lx]\n", __func__, *len);
+ qla27xx_insert32(ent->t277.wr_cmd_data, buf, len);
+ qla27xx_write_reg(reg, ent->t277.cmd_addr, ent->t277.wr_cmd_data, buf);
+ qla27xx_read_reg(reg, ent->t277.data_addr, buf, len);
+
+ return qla27xx_next_entry(ent);
+}
+
+static struct qla27xx_fwdt_entry *
+qla27xx_fwdt_entry_t278(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+ ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd216,
+ "%s: wrpep [%lx]\n", __func__, *len);
+ qla27xx_write_reg(reg, ent->t278.data_addr, ent->t278.wr_data, buf);
+ qla27xx_write_reg(reg, ent->t278.cmd_addr, ent->t278.wr_cmd_data, buf);
+
+ return qla27xx_next_entry(ent);
+}
+
+static struct qla27xx_fwdt_entry *
qla27xx_fwdt_entry_other(struct scsi_qla_host *vha,
struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
{
ql_dbg(ql_dbg_misc, vha, 0xd2ff,
- "%s: type %x [%lx]\n", __func__, ent->hdr.entry_type, *len);
+ "%s: type %x [%lx]\n", __func__, ent->hdr.type, *len);
qla27xx_skip_entry(ent, buf);
- return false;
+ return qla27xx_next_entry(ent);
}
-struct qla27xx_fwdt_entry_call {
+static struct {
uint type;
- int (*call)(
- struct scsi_qla_host *,
- struct qla27xx_fwdt_entry *,
- void *,
- ulong *);
-};
-
-static struct qla27xx_fwdt_entry_call ql27xx_fwdt_entry_call_list[] = {
- { ENTRY_TYPE_NOP , qla27xx_fwdt_entry_t0 } ,
- { ENTRY_TYPE_TMP_END , qla27xx_fwdt_entry_t255 } ,
- { ENTRY_TYPE_RD_IOB_T1 , qla27xx_fwdt_entry_t256 } ,
- { ENTRY_TYPE_WR_IOB_T1 , qla27xx_fwdt_entry_t257 } ,
- { ENTRY_TYPE_RD_IOB_T2 , qla27xx_fwdt_entry_t258 } ,
- { ENTRY_TYPE_WR_IOB_T2 , qla27xx_fwdt_entry_t259 } ,
- { ENTRY_TYPE_RD_PCI , qla27xx_fwdt_entry_t260 } ,
- { ENTRY_TYPE_WR_PCI , qla27xx_fwdt_entry_t261 } ,
- { ENTRY_TYPE_RD_RAM , qla27xx_fwdt_entry_t262 } ,
- { ENTRY_TYPE_GET_QUEUE , qla27xx_fwdt_entry_t263 } ,
- { ENTRY_TYPE_GET_FCE , qla27xx_fwdt_entry_t264 } ,
- { ENTRY_TYPE_PSE_RISC , qla27xx_fwdt_entry_t265 } ,
- { ENTRY_TYPE_RST_RISC , qla27xx_fwdt_entry_t266 } ,
- { ENTRY_TYPE_DIS_INTR , qla27xx_fwdt_entry_t267 } ,
- { ENTRY_TYPE_GET_HBUF , qla27xx_fwdt_entry_t268 } ,
- { ENTRY_TYPE_SCRATCH , qla27xx_fwdt_entry_t269 } ,
- { ENTRY_TYPE_RDREMREG , qla27xx_fwdt_entry_t270 } ,
- { ENTRY_TYPE_WRREMREG , qla27xx_fwdt_entry_t271 } ,
- { ENTRY_TYPE_RDREMRAM , qla27xx_fwdt_entry_t272 } ,
- { ENTRY_TYPE_PCICFG , qla27xx_fwdt_entry_t273 } ,
- { ENTRY_TYPE_GET_SHADOW , qla27xx_fwdt_entry_t274 } ,
- { ENTRY_TYPE_WRITE_BUF , qla27xx_fwdt_entry_t275 } ,
- { -1 , qla27xx_fwdt_entry_other }
+ typeof(qla27xx_fwdt_entry_other)(*call);
+} qla27xx_fwdt_entry_call[] = {
+ { ENTRY_TYPE_NOP, qla27xx_fwdt_entry_t0 },
+ { ENTRY_TYPE_TMP_END, qla27xx_fwdt_entry_t255 },
+ { ENTRY_TYPE_RD_IOB_T1, qla27xx_fwdt_entry_t256 },
+ { ENTRY_TYPE_WR_IOB_T1, qla27xx_fwdt_entry_t257 },
+ { ENTRY_TYPE_RD_IOB_T2, qla27xx_fwdt_entry_t258 },
+ { ENTRY_TYPE_WR_IOB_T2, qla27xx_fwdt_entry_t259 },
+ { ENTRY_TYPE_RD_PCI, qla27xx_fwdt_entry_t260 },
+ { ENTRY_TYPE_WR_PCI, qla27xx_fwdt_entry_t261 },
+ { ENTRY_TYPE_RD_RAM, qla27xx_fwdt_entry_t262 },
+ { ENTRY_TYPE_GET_QUEUE, qla27xx_fwdt_entry_t263 },
+ { ENTRY_TYPE_GET_FCE, qla27xx_fwdt_entry_t264 },
+ { ENTRY_TYPE_PSE_RISC, qla27xx_fwdt_entry_t265 },
+ { ENTRY_TYPE_RST_RISC, qla27xx_fwdt_entry_t266 },
+ { ENTRY_TYPE_DIS_INTR, qla27xx_fwdt_entry_t267 },
+ { ENTRY_TYPE_GET_HBUF, qla27xx_fwdt_entry_t268 },
+ { ENTRY_TYPE_SCRATCH, qla27xx_fwdt_entry_t269 },
+ { ENTRY_TYPE_RDREMREG, qla27xx_fwdt_entry_t270 },
+ { ENTRY_TYPE_WRREMREG, qla27xx_fwdt_entry_t271 },
+ { ENTRY_TYPE_RDREMRAM, qla27xx_fwdt_entry_t272 },
+ { ENTRY_TYPE_PCICFG, qla27xx_fwdt_entry_t273 },
+ { ENTRY_TYPE_GET_SHADOW, qla27xx_fwdt_entry_t274 },
+ { ENTRY_TYPE_WRITE_BUF, qla27xx_fwdt_entry_t275 },
+ { ENTRY_TYPE_CONDITIONAL, qla27xx_fwdt_entry_t276 },
+ { ENTRY_TYPE_RDPEPREG, qla27xx_fwdt_entry_t277 },
+ { ENTRY_TYPE_WRPEPREG, qla27xx_fwdt_entry_t278 },
+ { -1, qla27xx_fwdt_entry_other }
};
-static inline int (*qla27xx_find_entry(uint type))
- (struct scsi_qla_host *, struct qla27xx_fwdt_entry *, void *, ulong *)
+static inline
+typeof(qla27xx_fwdt_entry_call->call)(qla27xx_find_entry(uint type))
{
- struct qla27xx_fwdt_entry_call *list = ql27xx_fwdt_entry_call_list;
+ typeof(*qla27xx_fwdt_entry_call) *list = qla27xx_fwdt_entry_call;
while (list->type < type)
list++;
@@ -834,14 +889,6 @@ static inline int (*qla27xx_find_entry(uint type))
return qla27xx_fwdt_entry_other;
}
-static inline void *
-qla27xx_next_entry(void *p)
-{
- struct qla27xx_fwdt_entry *ent = p;
-
- return p + ent->hdr.entry_size;
-}
-
static void
qla27xx_walk_template(struct scsi_qla_host *vha,
struct qla27xx_fwdt_template *tmp, void *buf, ulong *len)
@@ -852,18 +899,16 @@ qla27xx_walk_template(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_misc, vha, 0xd01a,
"%s: entry count %lx\n", __func__, count);
while (count--) {
- if (buf && *len >= vha->hw->fw_dump_len)
+ ent = qla27xx_find_entry(ent->hdr.type)(vha, ent, buf, len);
+ if (!ent)
break;
- if (qla27xx_find_entry(ent->hdr.entry_type)(vha, ent, buf, len))
- break;
- ent = qla27xx_next_entry(ent);
}
if (count)
ql_dbg(ql_dbg_misc, vha, 0xd018,
"%s: entry residual count (%lx)\n", __func__, count);
- if (ent->hdr.entry_type != ENTRY_TYPE_TMP_END)
+ if (ent)
ql_dbg(ql_dbg_misc, vha, 0xd019,
"%s: missing end entry (%lx)\n", __func__, count);
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h
index 141c1c5e73f4..5c2c2a8a19c4 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.h
+++ b/drivers/scsi/qla2xxx/qla_tmpl.h
@@ -54,6 +54,9 @@ struct __packed qla27xx_fwdt_template {
#define ENTRY_TYPE_PCICFG 273
#define ENTRY_TYPE_GET_SHADOW 274
#define ENTRY_TYPE_WRITE_BUF 275
+#define ENTRY_TYPE_CONDITIONAL 276
+#define ENTRY_TYPE_RDPEPREG 277
+#define ENTRY_TYPE_WRPEPREG 278
#define CAPTURE_FLAG_PHYS_ONLY BIT_0
#define CAPTURE_FLAG_PHYS_VIRT BIT_1
@@ -62,8 +65,8 @@ struct __packed qla27xx_fwdt_template {
struct __packed qla27xx_fwdt_entry {
struct __packed {
- uint32_t entry_type;
- uint32_t entry_size;
+ uint32_t type;
+ uint32_t size;
uint32_t reserved_1;
uint8_t capture_flags;
@@ -199,6 +202,24 @@ struct __packed qla27xx_fwdt_entry {
uint32_t length;
uint8_t buffer[];
} t275;
+
+ struct __packed {
+ uint32_t cond1;
+ uint32_t cond2;
+ } t276;
+
+ struct __packed {
+ uint32_t cmd_addr;
+ uint32_t wr_cmd_data;
+ uint32_t data_addr;
+ } t277;
+
+ struct __packed {
+ uint32_t cmd_addr;
+ uint32_t wr_cmd_data;
+ uint32_t data_addr;
+ uint32_t wr_data;
+ } t278;
};
};
@@ -206,6 +227,7 @@ struct __packed qla27xx_fwdt_entry {
#define T262_RAM_AREA_EXTERNAL_RAM 2
#define T262_RAM_AREA_SHARED_RAM 3
#define T262_RAM_AREA_DDR_RAM 4
+#define T262_RAM_AREA_MISC 5
#define T263_QUEUE_TYPE_REQ 1
#define T263_QUEUE_TYPE_RSP 2
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index ca7945cb959b..0690dac24081 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.00.00.12-k"
+#define QLA2XXX_VERSION "10.00.00.14-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 283e6b80abb5..8a3075d17c63 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -420,26 +420,6 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
return qlt_rdy_to_xfer(cmd);
}
-static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
-{
- unsigned long flags;
- /*
- * Check for WRITE_PENDING status to determine if we need to wait for
- * CTIO aborts to be posted via hardware in tcm_qla2xxx_handle_data().
- */
- spin_lock_irqsave(&se_cmd->t_state_lock, flags);
- if (se_cmd->t_state == TRANSPORT_WRITE_PENDING ||
- se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
- spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
- wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
- 50);
- return 0;
- }
- spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
-
- return 0;
-}
-
static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl)
{
return;
@@ -537,15 +517,6 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
cmd->qpair->tgt_counters.qla_core_ret_ctio++;
if (!cmd->write_data_transferred) {
- /*
- * Check if se_cmd has already been aborted via LUN_RESET, and
- * waiting upon completion in tcm_qla2xxx_write_pending_status()
- */
- if (cmd->se_cmd.transport_state & CMD_T_ABORTED) {
- complete(&cmd->se_cmd.t_transport_stop_comp);
- return;
- }
-
switch (cmd->dif_err_code) {
case DIF_ERR_GRD:
cmd->se_cmd.pi_err =
@@ -1902,7 +1873,6 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
.sess_get_index = tcm_qla2xxx_sess_get_index,
.sess_get_initiator_sid = NULL,
.write_pending = tcm_qla2xxx_write_pending,
- .write_pending_status = tcm_qla2xxx_write_pending_status,
.set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs,
.get_cmd_state = tcm_qla2xxx_get_cmd_state,
.queue_data_in = tcm_qla2xxx_queue_data_in,
@@ -1943,7 +1913,6 @@ static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
.sess_get_index = tcm_qla2xxx_sess_get_index,
.sess_get_initiator_sid = NULL,
.write_pending = tcm_qla2xxx_write_pending,
- .write_pending_status = tcm_qla2xxx_write_pending_status,
.set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs,
.get_cmd_state = tcm_qla2xxx_get_cmd_state,
.queue_data_in = tcm_qla2xxx_queue_data_in,
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index a77bfb224248..6e4f4931ae17 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -2875,7 +2875,7 @@ static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
chap_tbl.secret_len);
}
}
- /* allow fall-through */
+ /* fall through */
default:
return iscsi_session_get_param(cls_sess, param, buf);
}
@@ -3203,6 +3203,8 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
return -EINVAL;
ep = iscsi_lookup_endpoint(transport_fd);
+ if (!ep)
+ return -EINVAL;
conn = cls_conn->dd_data;
qla_conn = conn->dd_data;
qla_conn->qla_ep = ep->dd_data;
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index e35ce762d454..0e22512bd3e4 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -1314,8 +1314,7 @@ static int qpti_sbus_probe(struct platform_device *op)
qpti->qhost = host;
qpti->op = op;
qpti->qpti_id = nqptis;
- strcpy(qpti->prom_name, op->dev.of_node->name);
- qpti->is_pti = strcmp(qpti->prom_name, "QLGC,isp");
+ qpti->is_pti = !of_node_name_eq(op->dev.of_node, "QLGC,isp");
if (qpti_map_regs(qpti) < 0)
goto fail_unlink;
diff --git a/drivers/scsi/qlogicpti.h b/drivers/scsi/qlogicpti.h
index 884ad72ade57..2b6374e08a7d 100644
--- a/drivers/scsi/qlogicpti.h
+++ b/drivers/scsi/qlogicpti.h
@@ -364,7 +364,6 @@ struct qlogicpti {
int qpti_id;
int scsi_id;
int prom_node;
- char prom_name[64];
int irq;
char differential, ultra, clock;
unsigned char bursts;
@@ -379,7 +378,7 @@ struct qlogicpti {
#define SREG_IMASK 0x0c /* Interrupt level */
#define SREG_SPMASK 0x03 /* Mask for switch pack */
unsigned char swsreg;
- unsigned int
+ unsigned int
gotirq : 1, /* this instance got an irq */
is_pti : 1; /* Non-zero if this is a PTI board. */
};
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 7675ff0ca2ea..99a7b9f520ae 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -175,22 +175,6 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
#endif
/**
- * scsi_cmd_get_serial - Assign a serial number to a command
- * @host: the scsi host
- * @cmd: command to assign serial number to
- *
- * Description: a serial number identifies a request for error recovery
- * and debugging purposes. Protected by the Host_Lock of host.
- */
-void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd)
-{
- cmd->serial_number = host->cmd_serial_number++;
- if (cmd->serial_number == 0)
- cmd->serial_number = host->cmd_serial_number++;
-}
-EXPORT_SYMBOL(scsi_cmd_get_serial);
-
-/**
* scsi_finish_command - cleanup and pass command back to upper layer
* @cmd: the command
*
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index e27f4df24021..2740a90501a0 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -76,6 +76,7 @@ static const char *sdebug_version_date = "20190125";
#define LBA_OUT_OF_RANGE 0x21
#define INVALID_FIELD_IN_CDB 0x24
#define INVALID_FIELD_IN_PARAM_LIST 0x26
+#define WRITE_PROTECTED 0x27
#define UA_RESET_ASC 0x29
#define UA_CHANGED_ASC 0x2a
#define TARGET_CHANGED_ASC 0x3f
@@ -351,12 +352,11 @@ enum sdeb_opcode_index {
SDEB_I_ATA_PT = 22, /* 12, 16 */
SDEB_I_SEND_DIAG = 23,
SDEB_I_UNMAP = 24,
- SDEB_I_XDWRITEREAD = 25, /* 10 only */
- SDEB_I_WRITE_BUFFER = 26,
- SDEB_I_WRITE_SAME = 27, /* 10, 16 */
- SDEB_I_SYNC_CACHE = 28, /* 10, 16 */
- SDEB_I_COMP_WRITE = 29,
- SDEB_I_LAST_ELEMENT = 30, /* keep this last (previous + 1) */
+ SDEB_I_WRITE_BUFFER = 25,
+ SDEB_I_WRITE_SAME = 26, /* 10, 16 */
+ SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
+ SDEB_I_COMP_WRITE = 28,
+ SDEB_I_LAST_ELEMENT = 29, /* keep this last (previous + 1) */
};
@@ -377,7 +377,7 @@ static const unsigned char opcode_ind_arr[256] = {
/* 0x40; 0x40->0x5f: 10 byte cdbs */
0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
- 0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
+ 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
SDEB_I_RELEASE,
0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
/* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
@@ -430,7 +430,6 @@ static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
-static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
@@ -600,9 +599,6 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
{10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
/* 25 */
- {0, 0x53, 0, F_D_IN | F_D_OUT | FF_MEDIA_IO, resp_xdwriteread_10,
- NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
- 0, 0, 0, 0, 0, 0} }, /* XDWRITEREAD(10) */
{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
{10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
0, 0, 0, 0} }, /* WRITE_BUFFER */
@@ -618,7 +614,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
{16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
-/* 30 */
+/* 29 */
{0xff, 0, 0, 0, NULL, NULL, /* terminating element */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
@@ -673,6 +669,7 @@ static bool sdebug_verbose;
static bool have_dif_prot;
static bool write_since_sync;
static bool sdebug_statistics = DEF_STATISTICS;
+static bool sdebug_wp;
static unsigned int sdebug_store_sectors;
static sector_t sdebug_capacity; /* in sectors */
@@ -836,7 +833,8 @@ static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
}
-static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
+static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
+ void __user *arg)
{
if (sdebug_verbose) {
if (0x1261 == cmd)
@@ -1010,16 +1008,16 @@ static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
int arr_len)
{
int act_len;
- struct scsi_data_buffer *sdb = scsi_in(scp);
+ struct scsi_data_buffer *sdb = &scp->sdb;
if (!sdb->length)
return 0;
- if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
+ if (scp->sc_data_direction != DMA_FROM_DEVICE)
return DID_ERROR << 16;
act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
arr, arr_len);
- sdb->resid = scsi_bufflen(scp) - act_len;
+ scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
return 0;
}
@@ -1033,20 +1031,21 @@ static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
int arr_len, unsigned int off_dst)
{
int act_len, n;
- struct scsi_data_buffer *sdb = scsi_in(scp);
+ struct scsi_data_buffer *sdb = &scp->sdb;
off_t skip = off_dst;
if (sdb->length <= off_dst)
return 0;
- if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
+ if (scp->sc_data_direction != DMA_FROM_DEVICE)
return DID_ERROR << 16;
act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
arr, arr_len, skip);
pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
- __func__, off_dst, scsi_bufflen(scp), act_len, sdb->resid);
+ __func__, off_dst, scsi_bufflen(scp), act_len,
+ scsi_get_resid(scp));
n = (int)scsi_bufflen(scp) - ((int)off_dst + act_len);
- sdb->resid = min(sdb->resid, n);
+ scsi_set_resid(scp, min(scsi_get_resid(scp), n));
return 0;
}
@@ -1058,7 +1057,7 @@ static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
{
if (!scsi_bufflen(scp))
return 0;
- if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
+ if (scp->sc_data_direction != DMA_TO_DEVICE)
return -1;
return scsi_sg_copy_to_buffer(scp, arr, arr_len);
@@ -2146,9 +2145,11 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
(devip->target * 1000) - 3;
/* for disks set DPOFUA bit and clear write protect (WP) bit */
- if (is_disk)
+ if (is_disk) {
dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
- else
+ if (sdebug_wp)
+ dev_spec |= 0x80;
+ } else
dev_spec = 0x0;
if (msense_6) {
arr[2] = dev_spec;
@@ -2331,6 +2332,10 @@ static int resp_mode_select(struct scsi_cmnd *scp,
if (ctrl_m_pg[1] == arr[off + 1]) {
memcpy(ctrl_m_pg + 2, arr + off + 2,
sizeof(ctrl_m_pg) - 2);
+ if (ctrl_m_pg[4] & 0x8)
+ sdebug_wp = true;
+ else
+ sdebug_wp = false;
sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
goto set_mode_changed_ua;
}
@@ -2455,8 +2460,8 @@ static int resp_log_sense(struct scsi_cmnd *scp,
min(len, SDEBUG_MAX_INQ_ARR_SZ));
}
-static int check_device_access_params(struct scsi_cmnd *scp,
- unsigned long long lba, unsigned int num)
+static inline int check_device_access_params(struct scsi_cmnd *scp,
+ unsigned long long lba, unsigned int num, bool write)
{
if (lba + num > sdebug_capacity) {
mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
@@ -2468,6 +2473,10 @@ static int check_device_access_params(struct scsi_cmnd *scp,
mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
return check_condition_result;
}
+ if (write && unlikely(sdebug_wp)) {
+ mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
+ return check_condition_result;
+ }
return 0;
}
@@ -2477,21 +2486,19 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
{
int ret;
u64 block, rest = 0;
- struct scsi_data_buffer *sdb;
+ struct scsi_data_buffer *sdb = &scmd->sdb;
enum dma_data_direction dir;
if (do_write) {
- sdb = scsi_out(scmd);
dir = DMA_TO_DEVICE;
write_since_sync = true;
} else {
- sdb = scsi_in(scmd);
dir = DMA_FROM_DEVICE;
}
if (!sdb->length)
return 0;
- if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
+ if (scmd->sc_data_direction != dir)
return -1;
block = do_div(lba, sdebug_store_sectors);
@@ -2728,18 +2735,9 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
} else
sqcp = NULL;
- /* inline check_device_access_params() */
- if (unlikely(lba + num > sdebug_capacity)) {
- mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
- return check_condition_result;
- }
- /* transfer length excessive (tie in to block limits VPD page) */
- if (unlikely(num > sdebug_store_sectors)) {
- /* needs work to find which cdb byte 'num' comes from */
- mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
- return check_condition_result;
- }
-
+ ret = check_device_access_params(scp, lba, num, false);
+ if (ret)
+ return ret;
if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
(lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
((lba + num) > sdebug_medium_error_start))) {
@@ -2774,7 +2772,7 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
if (unlikely(ret == -1))
return DID_ERROR << 16;
- scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
+ scsi_set_resid(scp, scsi_bufflen(scp) - ret);
if (unlikely(sqcp)) {
if (sqcp->inj_recovered) {
@@ -3031,19 +3029,9 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
"to DIF device\n");
}
-
- /* inline check_device_access_params() */
- if (unlikely(lba + num > sdebug_capacity)) {
- mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
- return check_condition_result;
- }
- /* transfer length excessive (tie in to block limits VPD page) */
- if (unlikely(num > sdebug_store_sectors)) {
- /* needs work to find which cdb byte 'num' comes from */
- mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
- return check_condition_result;
- }
-
+ ret = check_device_access_params(scp, lba, num, true);
+ if (ret)
+ return ret;
write_lock_irqsave(&atomic_rw, iflags);
/* DIX + T10 DIF */
@@ -3182,7 +3170,7 @@ static int resp_write_scat(struct scsi_cmnd *scp,
my_name, __func__, k, lba, num, sg_off);
if (num == 0)
continue;
- ret = check_device_access_params(scp, lba, num);
+ ret = check_device_access_params(scp, lba, num, true);
if (ret)
goto err_out_unlock;
num_by = num * lb_size;
@@ -3268,7 +3256,7 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
u64 block, lbaa;
u8 *fs1p;
- ret = check_device_access_params(scp, lba, num);
+ ret = check_device_access_params(scp, lba, num, true);
if (ret)
return ret;
@@ -3440,18 +3428,9 @@ static int resp_comp_write(struct scsi_cmnd *scp,
(cmd[1] & 0xe0) == 0)
sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
"to DIF device\n");
-
- /* inline check_device_access_params() */
- if (lba + num > sdebug_capacity) {
- mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
- return check_condition_result;
- }
- /* transfer length excessive (tie in to block limits VPD page) */
- if (num > sdebug_store_sectors) {
- /* needs work to find which cdb byte 'num' comes from */
- mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
- return check_condition_result;
- }
+ ret = check_device_access_params(scp, lba, num, false);
+ if (ret)
+ return ret;
dnum = 2 * num;
arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
if (NULL == arr) {
@@ -3534,7 +3513,7 @@ static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
unsigned long long lba = get_unaligned_be64(&desc[i].lba);
unsigned int num = get_unaligned_be32(&desc[i].blocks);
- ret = check_device_access_params(scp, lba, num);
+ ret = check_device_access_params(scp, lba, num, true);
if (ret)
goto out;
@@ -3567,7 +3546,7 @@ static int resp_get_lba_status(struct scsi_cmnd *scp,
if (alloc_len < 24)
return 0;
- ret = check_device_access_params(scp, lba, 1);
+ ret = check_device_access_params(scp, lba, 1, false);
if (ret)
return ret;
@@ -3719,68 +3698,6 @@ static int resp_report_luns(struct scsi_cmnd *scp,
return res;
}
-static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
- unsigned int num, struct sdebug_dev_info *devip)
-{
- int j;
- unsigned char *kaddr, *buf;
- unsigned int offset;
- struct scsi_data_buffer *sdb = scsi_in(scp);
- struct sg_mapping_iter miter;
-
- /* better not to use temporary buffer. */
- buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
- if (!buf) {
- mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
- INSUFF_RES_ASCQ);
- return check_condition_result;
- }
-
- scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
-
- offset = 0;
- sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
- SG_MITER_ATOMIC | SG_MITER_TO_SG);
-
- while (sg_miter_next(&miter)) {
- kaddr = miter.addr;
- for (j = 0; j < miter.length; j++)
- *(kaddr + j) ^= *(buf + offset + j);
-
- offset += miter.length;
- }
- sg_miter_stop(&miter);
- kfree(buf);
-
- return 0;
-}
-
-static int resp_xdwriteread_10(struct scsi_cmnd *scp,
- struct sdebug_dev_info *devip)
-{
- u8 *cmd = scp->cmnd;
- u64 lba;
- u32 num;
- int errsts;
-
- if (!scsi_bidi_cmnd(scp)) {
- mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
- INSUFF_RES_ASCQ);
- return check_condition_result;
- }
- errsts = resp_read_dt0(scp, devip);
- if (errsts)
- return errsts;
- if (!(cmd[1] & 0x4)) { /* DISABLE_WRITE is not set */
- errsts = resp_write_dt0(scp, devip);
- if (errsts)
- return errsts;
- }
- lba = get_unaligned_be32(cmd + 2);
- num = get_unaligned_be16(cmd + 7);
- return resp_xdwriteread(scp, lba, num, devip);
-}
-
static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
{
u32 tag = blk_mq_unique_tag(cmnd->request);
@@ -3954,7 +3871,6 @@ static int scsi_debug_slave_alloc(struct scsi_device *sdp)
if (sdebug_verbose)
pr_info("slave_alloc <%u %u %u %llu>\n",
sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
- blk_queue_flag_set(QUEUE_FLAG_BIDI, sdp->request_queue);
return 0;
}
@@ -4554,6 +4470,7 @@ module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
S_IRUGO | S_IWUSR);
+module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
module_param_named(write_same_length, sdebug_write_same_length, int,
S_IRUGO | S_IWUSR);
@@ -4613,6 +4530,7 @@ MODULE_PARM_DESC(uuid_ctl,
"1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
+MODULE_PARM_DESC(wp, "Write Protect (def=0)");
MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
#define SDEBUG_INFO_LEN 256
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index c4cbfd07b916..a08ff3bd6310 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -238,6 +238,7 @@ static struct {
{"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+ {"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
{"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
{"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 5a58cbf3a75d..c14006ac98f9 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -75,6 +75,7 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
{"NETAPP", "INF-01-00", "rdac", },
{"LSI", "INF-01-00", "rdac", },
{"ENGENIO", "INF-01-00", "rdac", },
+ {"LENOVO", "DE_Series", "rdac", },
{NULL, NULL, NULL },
};
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 16eef068e9e9..1b8378f36139 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -965,7 +965,6 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
ses->cmnd = scmd->cmnd;
ses->data_direction = scmd->sc_data_direction;
ses->sdb = scmd->sdb;
- ses->next_rq = scmd->request->next_rq;
ses->result = scmd->result;
ses->underflow = scmd->underflow;
ses->prot_op = scmd->prot_op;
@@ -976,7 +975,6 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
scmd->cmnd = ses->eh_cmnd;
memset(scmd->cmnd, 0, BLK_MAX_CDB);
memset(&scmd->sdb, 0, sizeof(scmd->sdb));
- scmd->request->next_rq = NULL;
scmd->result = 0;
if (sense_bytes) {
@@ -1029,7 +1027,6 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
scmd->cmnd = ses->cmnd;
scmd->sc_data_direction = ses->data_direction;
scmd->sdb = ses->sdb;
- scmd->request->next_rq = ses->next_rq;
scmd->result = ses->result;
scmd->underflow = ses->underflow;
scmd->prot_op = ses->prot_op;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index ca5fd3ae81f8..07dfc17d4824 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -316,7 +316,6 @@ EXPORT_SYMBOL(__scsi_execute);
*/
static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
{
- cmd->serial_number = 0;
scsi_set_resid(cmd, 0);
memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
if (cmd->cmd_len == 0)
@@ -556,15 +555,8 @@ static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
{
- struct scsi_data_buffer *sdb;
-
if (cmd->sdb.table.nents)
sg_free_table_chained(&cmd->sdb.table, true);
- if (cmd->request->next_rq) {
- sdb = cmd->request->next_rq->special;
- if (sdb)
- sg_free_table_chained(&sdb->table, true);
- }
if (scsi_prot_sg_count(cmd))
sg_free_table_chained(&cmd->prot_sdb->table, true);
}
@@ -578,7 +570,7 @@ static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
/* Returns false when no more bytes to process, true if there are more */
static bool scsi_end_request(struct request *req, blk_status_t error,
- unsigned int bytes, unsigned int bidi_bytes)
+ unsigned int bytes)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
struct scsi_device *sdev = cmd->device;
@@ -587,21 +579,23 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
if (blk_update_request(req, error, bytes))
return true;
- /* Bidi request must be completed as a whole */
- if (unlikely(bidi_bytes) &&
- blk_update_request(req->next_rq, error, bidi_bytes))
- return true;
-
if (blk_queue_add_random(q))
add_disk_randomness(req->rq_disk);
if (!blk_rq_is_scsi(req)) {
WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
cmd->flags &= ~SCMD_INITIALIZED;
- destroy_rcu_head(&cmd->rcu);
}
/*
+ * Calling rcu_barrier() is not necessary here because the
+ * SCSI error handler guarantees that the function called by
+ * call_rcu() has been called before scsi_end_request() is
+ * called.
+ */
+ destroy_rcu_head(&cmd->rcu);
+
+ /*
* In the MQ case the command gets freed by __blk_mq_end_request,
* so we have to do all cleanup that depends on it earlier.
*
@@ -817,7 +811,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
scsi_print_command(cmd);
}
}
- if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req), 0))
+ if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req)))
return;
/*FALLTHRU*/
case ACTION_REPREP:
@@ -951,30 +945,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
* scsi_result_to_blk_status may have reset the host_byte
*/
scsi_req(req)->result = cmd->result;
- scsi_req(req)->resid_len = scsi_get_resid(cmd);
-
- if (unlikely(scsi_bidi_cmnd(cmd))) {
- /*
- * Bidi commands Must be complete as a whole,
- * both sides at once.
- */
- scsi_req(req->next_rq)->resid_len = scsi_in(cmd)->resid;
- if (scsi_end_request(req, BLK_STS_OK, blk_rq_bytes(req),
- blk_rq_bytes(req->next_rq)))
- WARN_ONCE(true,
- "Bidi command with remaining bytes");
- return;
- }
- }
-
- /* no bidi support yet, other than in pass-through */
- if (unlikely(blk_bidi_rq(req))) {
- WARN_ONCE(true, "Only support bidi command in passthrough");
- scmd_printk(KERN_ERR, cmd, "Killing bidi command\n");
- if (scsi_end_request(req, BLK_STS_IOERR, blk_rq_bytes(req),
- blk_rq_bytes(req->next_rq)))
- WARN_ONCE(true, "Bidi command with remaining bytes");
- return;
}
/*
@@ -991,13 +961,13 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
* to retry code. Fast path should return in this block.
*/
if (likely(blk_rq_bytes(req) > 0 || blk_stat == BLK_STS_OK)) {
- if (likely(!scsi_end_request(req, blk_stat, good_bytes, 0)))
+ if (likely(!scsi_end_request(req, blk_stat, good_bytes)))
return; /* no bytes remaining */
}
/* Kill remainder if no retries. */
if (unlikely(blk_stat && scsi_noretry_cmd(cmd))) {
- if (scsi_end_request(req, blk_stat, blk_rq_bytes(req), 0))
+ if (scsi_end_request(req, blk_stat, blk_rq_bytes(req)))
WARN_ONCE(true,
"Bytes remaining after failed, no-retry command");
return;
@@ -1059,12 +1029,6 @@ blk_status_t scsi_init_io(struct scsi_cmnd *cmd)
if (ret)
return ret;
- if (blk_bidi_rq(rq)) {
- ret = scsi_init_sgtable(rq->next_rq, rq->next_rq->special);
- if (ret)
- goto out_free_sgtables;
- }
-
if (blk_integrity_rq(rq)) {
struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
int ivecs, count;
@@ -1608,10 +1572,7 @@ static blk_status_t scsi_mq_prep_fn(struct request *req)
scsi_init_command(sdev, cmd);
- req->special = cmd;
-
cmd->request = req;
-
cmd->tag = req->tag;
cmd->prot_op = SCSI_PROT_NORMAL;
@@ -1625,17 +1586,6 @@ static blk_status_t scsi_mq_prep_fn(struct request *req)
(struct scatterlist *)(cmd->prot_sdb + 1);
}
- if (blk_bidi_rq(req)) {
- struct request *next_rq = req->next_rq;
- struct scsi_data_buffer *bidi_sdb = blk_mq_rq_to_pdu(next_rq);
-
- memset(bidi_sdb, 0, sizeof(struct scsi_data_buffer));
- bidi_sdb->table.sgl =
- (struct scatterlist *)(bidi_sdb + 1);
-
- next_rq->special = bidi_sdb;
- }
-
blk_mq_start_request(req);
return scsi_setup_cmnd(sdev, req);
@@ -1713,13 +1663,13 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
if (!scsi_host_queue_ready(q, shost, sdev))
goto out_dec_target_busy;
- clear_bit(SCMD_STATE_COMPLETE, &cmd->state);
if (!(req->rq_flags & RQF_DONTPREP)) {
ret = scsi_mq_prep_fn(req);
if (ret != BLK_STS_OK)
goto out_dec_host_busy;
req->rq_flags |= RQF_DONTPREP;
} else {
+ clear_bit(SCMD_STATE_COMPLETE, &cmd->state);
blk_mq_start_request(req);
}
@@ -1756,8 +1706,12 @@ out_put_budget:
ret = BLK_STS_DEV_RESOURCE;
break;
default:
+ if (unlikely(!scsi_device_online(sdev)))
+ scsi_req(req)->result = DID_NO_CONNECT << 16;
+ else
+ scsi_req(req)->result = DID_ERROR << 16;
/*
- * Make sure to release all allocated ressources when
+ * Make sure to release all allocated resources when
* we hit an error, as we will never see this command
* again.
*/
@@ -2598,8 +2552,10 @@ void scsi_device_resume(struct scsi_device *sdev)
* device deleted during suspend)
*/
mutex_lock(&sdev->state_mutex);
- sdev->quiesced_by = NULL;
- blk_clear_pm_only(sdev->request_queue);
+ if (sdev->quiesced_by) {
+ sdev->quiesced_by = NULL;
+ blk_clear_pm_only(sdev->request_queue);
+ }
if (sdev->sdev_state == SDEV_QUIESCE)
scsi_device_set_state(sdev, SDEV_RUNNING);
mutex_unlock(&sdev->state_mutex);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index dd0d516f65e2..53380e07b40e 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -220,7 +220,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!sdev)
goto out;
@@ -788,7 +788,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
*/
sdev->inquiry = kmemdup(inq_result,
max_t(size_t, sdev->inquiry_len, 36),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (sdev->inquiry == NULL)
return SCSI_SCAN_NO_RESPONSE;
@@ -1079,7 +1079,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
if (!sdev)
goto out;
- result = kmalloc(result_len, GFP_ATOMIC |
+ result = kmalloc(result_len, GFP_KERNEL |
((shost->unchecked_isa_dma) ? __GFP_DMA : 0));
if (!result)
goto out_free_sdev;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 6a9040faed00..3b119ca0cc0c 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -771,6 +771,12 @@ store_state_field(struct device *dev, struct device_attribute *attr,
mutex_lock(&sdev->state_mutex);
ret = scsi_device_set_state(sdev, state);
+ /*
+ * If the device state changes to SDEV_RUNNING, we need to run
+ * the queue to avoid I/O hang.
+ */
+ if (ret == 0 && state == SDEV_RUNNING)
+ blk_mq_run_hw_queues(sdev->request_queue, true);
mutex_unlock(&sdev->state_mutex);
return ret == 0 ? count : -EINVAL;
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 0508831d6fb9..0a82e93566dc 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2200,6 +2200,8 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
/* flush running scans then delete devices */
flush_work(&session->scan_work);
+ /* flush running unbind operations */
+ flush_work(&session->unbind_work);
__iscsi_unbind_session(&session->unbind_work);
/* hw iscsi may not have removed all connections from session */
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 692b46937e52..60f1a81d2034 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -213,7 +213,6 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
to_sas_host_attrs(shost)->q = q;
}
- blk_queue_flag_set(QUEUE_FLAG_BIDI, q);
return 0;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 5464d467e23e..2b2bc4b49d78 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -665,6 +665,68 @@ static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer,
}
#endif /* CONFIG_BLK_SED_OPAL */
+/*
+ * Look up the DIX operation based on whether the command is read or
+ * write and whether dix and dif are enabled.
+ */
+static unsigned int sd_prot_op(bool write, bool dix, bool dif)
+{
+ /* Lookup table: bit 2 (write), bit 1 (dix), bit 0 (dif) */
+ static const unsigned int ops[] = { /* wrt dix dif */
+ SCSI_PROT_NORMAL, /* 0 0 0 */
+ SCSI_PROT_READ_STRIP, /* 0 0 1 */
+ SCSI_PROT_READ_INSERT, /* 0 1 0 */
+ SCSI_PROT_READ_PASS, /* 0 1 1 */
+ SCSI_PROT_NORMAL, /* 1 0 0 */
+ SCSI_PROT_WRITE_INSERT, /* 1 0 1 */
+ SCSI_PROT_WRITE_STRIP, /* 1 1 0 */
+ SCSI_PROT_WRITE_PASS, /* 1 1 1 */
+ };
+
+ return ops[write << 2 | dix << 1 | dif];
+}
+
+/*
+ * Returns a mask of the protection flags that are valid for a given DIX
+ * operation.
+ */
+static unsigned int sd_prot_flag_mask(unsigned int prot_op)
+{
+ static const unsigned int flag_mask[] = {
+ [SCSI_PROT_NORMAL] = 0,
+
+ [SCSI_PROT_READ_STRIP] = SCSI_PROT_TRANSFER_PI |
+ SCSI_PROT_GUARD_CHECK |
+ SCSI_PROT_REF_CHECK |
+ SCSI_PROT_REF_INCREMENT,
+
+ [SCSI_PROT_READ_INSERT] = SCSI_PROT_REF_INCREMENT |
+ SCSI_PROT_IP_CHECKSUM,
+
+ [SCSI_PROT_READ_PASS] = SCSI_PROT_TRANSFER_PI |
+ SCSI_PROT_GUARD_CHECK |
+ SCSI_PROT_REF_CHECK |
+ SCSI_PROT_REF_INCREMENT |
+ SCSI_PROT_IP_CHECKSUM,
+
+ [SCSI_PROT_WRITE_INSERT] = SCSI_PROT_TRANSFER_PI |
+ SCSI_PROT_REF_INCREMENT,
+
+ [SCSI_PROT_WRITE_STRIP] = SCSI_PROT_GUARD_CHECK |
+ SCSI_PROT_REF_CHECK |
+ SCSI_PROT_REF_INCREMENT |
+ SCSI_PROT_IP_CHECKSUM,
+
+ [SCSI_PROT_WRITE_PASS] = SCSI_PROT_TRANSFER_PI |
+ SCSI_PROT_GUARD_CHECK |
+ SCSI_PROT_REF_CHECK |
+ SCSI_PROT_REF_INCREMENT |
+ SCSI_PROT_IP_CHECKSUM,
+ };
+
+ return flag_mask[prot_op];
+}
+
static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
unsigned int dix, unsigned int dif)
{
@@ -761,8 +823,8 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
{
struct scsi_device *sdp = cmd->device;
struct request *rq = cmd->request;
- u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9);
- u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
+ u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
+ u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
unsigned int data_len = 24;
char *buf;
@@ -781,13 +843,12 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
buf = page_address(rq->special_vec.bv_page);
put_unaligned_be16(6 + 16, &buf[0]);
put_unaligned_be16(16, &buf[2]);
- put_unaligned_be64(sector, &buf[8]);
- put_unaligned_be32(nr_sectors, &buf[16]);
+ put_unaligned_be64(lba, &buf[8]);
+ put_unaligned_be32(nr_blocks, &buf[16]);
cmd->allowed = SD_MAX_RETRIES;
cmd->transfersize = data_len;
rq->timeout = SD_TIMEOUT;
- scsi_req(rq)->resid_len = data_len;
return scsi_init_io(cmd);
}
@@ -797,8 +858,8 @@ static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
{
struct scsi_device *sdp = cmd->device;
struct request *rq = cmd->request;
- u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9);
- u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
+ u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
+ u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
u32 data_len = sdp->sector_size;
rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
@@ -813,13 +874,12 @@ static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
cmd->cmnd[0] = WRITE_SAME_16;
if (unmap)
cmd->cmnd[1] = 0x8; /* UNMAP */
- put_unaligned_be64(sector, &cmd->cmnd[2]);
- put_unaligned_be32(nr_sectors, &cmd->cmnd[10]);
+ put_unaligned_be64(lba, &cmd->cmnd[2]);
+ put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
cmd->allowed = SD_MAX_RETRIES;
cmd->transfersize = data_len;
rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
- scsi_req(rq)->resid_len = data_len;
return scsi_init_io(cmd);
}
@@ -829,8 +889,8 @@ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
{
struct scsi_device *sdp = cmd->device;
struct request *rq = cmd->request;
- u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9);
- u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
+ u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
+ u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
u32 data_len = sdp->sector_size;
rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
@@ -845,13 +905,12 @@ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
cmd->cmnd[0] = WRITE_SAME;
if (unmap)
cmd->cmnd[1] = 0x8; /* UNMAP */
- put_unaligned_be32(sector, &cmd->cmnd[2]);
- put_unaligned_be16(nr_sectors, &cmd->cmnd[7]);
+ put_unaligned_be32(lba, &cmd->cmnd[2]);
+ put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
cmd->allowed = SD_MAX_RETRIES;
cmd->transfersize = data_len;
rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
- scsi_req(rq)->resid_len = data_len;
return scsi_init_io(cmd);
}
@@ -861,8 +920,8 @@ static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
struct request *rq = cmd->request;
struct scsi_device *sdp = cmd->device;
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
- u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9);
- u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
+ u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
+ u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
if (!(rq->cmd_flags & REQ_NOUNMAP)) {
switch (sdkp->zeroing_mode) {
@@ -876,7 +935,7 @@ static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
if (sdp->no_write_same)
return BLK_STS_TARGET;
- if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff)
+ if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff)
return sd_setup_write_same16_cmnd(cmd, false);
return sd_setup_write_same10_cmnd(cmd, false);
@@ -957,9 +1016,8 @@ static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
struct scsi_device *sdp = cmd->device;
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
struct bio *bio = rq->bio;
- sector_t sector = blk_rq_pos(rq);
- unsigned int nr_sectors = blk_rq_sectors(rq);
- unsigned int nr_bytes = blk_rq_bytes(rq);
+ u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
+ u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
blk_status_t ret;
if (sdkp->device->no_write_same)
@@ -967,21 +1025,18 @@ static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
- sector >>= ilog2(sdp->sector_size) - 9;
- nr_sectors >>= ilog2(sdp->sector_size) - 9;
-
rq->timeout = SD_WRITE_SAME_TIMEOUT;
- if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff) {
+ if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff) {
cmd->cmd_len = 16;
cmd->cmnd[0] = WRITE_SAME_16;
- put_unaligned_be64(sector, &cmd->cmnd[2]);
- put_unaligned_be32(nr_sectors, &cmd->cmnd[10]);
+ put_unaligned_be64(lba, &cmd->cmnd[2]);
+ put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
} else {
cmd->cmd_len = 10;
cmd->cmnd[0] = WRITE_SAME;
- put_unaligned_be32(sector, &cmd->cmnd[2]);
- put_unaligned_be16(nr_sectors, &cmd->cmnd[7]);
+ put_unaligned_be32(lba, &cmd->cmnd[2]);
+ put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
}
cmd->transfersize = sdp->sector_size;
@@ -999,7 +1054,7 @@ static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
*/
rq->__data_len = sdp->sector_size;
ret = scsi_init_io(cmd);
- rq->__data_len = nr_bytes;
+ rq->__data_len = blk_rq_bytes(rq);
return ret;
}
@@ -1020,224 +1075,186 @@ static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
return BLK_STS_OK;
}
-static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
+static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write,
+ sector_t lba, unsigned int nr_blocks,
+ unsigned char flags)
{
- struct request *rq = SCpnt->request;
- struct scsi_device *sdp = SCpnt->device;
- struct gendisk *disk = rq->rq_disk;
- struct scsi_disk *sdkp = scsi_disk(disk);
- sector_t block = blk_rq_pos(rq);
+ cmd->cmnd = mempool_alloc(sd_cdb_pool, GFP_ATOMIC);
+ if (unlikely(cmd->cmnd == NULL))
+ return BLK_STS_RESOURCE;
+
+ cmd->cmd_len = SD_EXT_CDB_SIZE;
+ memset(cmd->cmnd, 0, cmd->cmd_len);
+
+ cmd->cmnd[0] = VARIABLE_LENGTH_CMD;
+ cmd->cmnd[7] = 0x18; /* Additional CDB len */
+ cmd->cmnd[9] = write ? WRITE_32 : READ_32;
+ cmd->cmnd[10] = flags;
+ put_unaligned_be64(lba, &cmd->cmnd[12]);
+ put_unaligned_be32(lba, &cmd->cmnd[20]); /* Expected Indirect LBA */
+ put_unaligned_be32(nr_blocks, &cmd->cmnd[28]);
+
+ return BLK_STS_OK;
+}
+
+static blk_status_t sd_setup_rw16_cmnd(struct scsi_cmnd *cmd, bool write,
+ sector_t lba, unsigned int nr_blocks,
+ unsigned char flags)
+{
+ cmd->cmd_len = 16;
+ cmd->cmnd[0] = write ? WRITE_16 : READ_16;
+ cmd->cmnd[1] = flags;
+ cmd->cmnd[14] = 0;
+ cmd->cmnd[15] = 0;
+ put_unaligned_be64(lba, &cmd->cmnd[2]);
+ put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
+
+ return BLK_STS_OK;
+}
+
+static blk_status_t sd_setup_rw10_cmnd(struct scsi_cmnd *cmd, bool write,
+ sector_t lba, unsigned int nr_blocks,
+ unsigned char flags)
+{
+ cmd->cmd_len = 10;
+ cmd->cmnd[0] = write ? WRITE_10 : READ_10;
+ cmd->cmnd[1] = flags;
+ cmd->cmnd[6] = 0;
+ cmd->cmnd[9] = 0;
+ put_unaligned_be32(lba, &cmd->cmnd[2]);
+ put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
+
+ return BLK_STS_OK;
+}
+
+static blk_status_t sd_setup_rw6_cmnd(struct scsi_cmnd *cmd, bool write,
+ sector_t lba, unsigned int nr_blocks,
+ unsigned char flags)
+{
+ /* Avoid that 0 blocks gets translated into 256 blocks. */
+ if (WARN_ON_ONCE(nr_blocks == 0))
+ return BLK_STS_IOERR;
+
+ if (unlikely(flags & 0x8)) {
+ /*
+ * This happens only if this drive failed 10byte rw
+ * command with ILLEGAL_REQUEST during operation and
+ * thus turned off use_10_for_rw.
+ */
+ scmd_printk(KERN_ERR, cmd, "FUA write on READ/WRITE(6) drive\n");
+ return BLK_STS_IOERR;
+ }
+
+ cmd->cmd_len = 6;
+ cmd->cmnd[0] = write ? WRITE_6 : READ_6;
+ cmd->cmnd[1] = (lba >> 16) & 0x1f;
+ cmd->cmnd[2] = (lba >> 8) & 0xff;
+ cmd->cmnd[3] = lba & 0xff;
+ cmd->cmnd[4] = nr_blocks;
+ cmd->cmnd[5] = 0;
+
+ return BLK_STS_OK;
+}
+
+static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
+{
+ struct request *rq = cmd->request;
+ struct scsi_device *sdp = cmd->device;
+ struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq));
sector_t threshold;
- unsigned int this_count = blk_rq_sectors(rq);
- unsigned int dif, dix;
- unsigned char protect;
+ unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
+ bool dif, dix;
+ unsigned int mask = logical_to_sectors(sdp, 1) - 1;
+ bool write = rq_data_dir(rq) == WRITE;
+ unsigned char protect, fua;
blk_status_t ret;
- ret = scsi_init_io(SCpnt);
+ ret = scsi_init_io(cmd);
if (ret != BLK_STS_OK)
return ret;
- WARN_ON_ONCE(SCpnt != rq->special);
- SCSI_LOG_HLQUEUE(1,
- scmd_printk(KERN_INFO, SCpnt,
- "%s: block=%llu, count=%d\n",
- __func__, (unsigned long long)block, this_count));
-
- if (!sdp || !scsi_device_online(sdp) ||
- block + blk_rq_sectors(rq) > get_capacity(disk)) {
- SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
- "Finishing %u sectors\n",
- blk_rq_sectors(rq)));
- SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
- "Retry with 0x%p\n", SCpnt));
+ if (!scsi_device_online(sdp) || sdp->changed) {
+ scmd_printk(KERN_ERR, cmd, "device offline or changed\n");
return BLK_STS_IOERR;
}
- if (sdp->changed) {
- /*
- * quietly refuse to do anything to a changed disc until
- * the changed bit has been reset
- */
- /* printk("SCSI disk has been changed or is not present. Prohibiting further I/O.\n"); */
+ if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->rq_disk)) {
+ scmd_printk(KERN_ERR, cmd, "access beyond end of device\n");
+ return BLK_STS_IOERR;
+ }
+
+ if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) {
+ scmd_printk(KERN_ERR, cmd, "request not aligned to the logical block size\n");
return BLK_STS_IOERR;
}
/*
- * Some SD card readers can't handle multi-sector accesses which touch
- * the last one or two hardware sectors. Split accesses as needed.
+ * Some SD card readers can't handle accesses which touch the
+ * last one or two logical blocks. Split accesses as needed.
*/
- threshold = get_capacity(disk) - SD_LAST_BUGGY_SECTORS *
- (sdp->sector_size / 512);
+ threshold = sdkp->capacity - SD_LAST_BUGGY_SECTORS;
- if (unlikely(sdp->last_sector_bug && block + this_count > threshold)) {
- if (block < threshold) {
+ if (unlikely(sdp->last_sector_bug && lba + nr_blocks > threshold)) {
+ if (lba < threshold) {
/* Access up to the threshold but not beyond */
- this_count = threshold - block;
+ nr_blocks = threshold - lba;
} else {
- /* Access only a single hardware sector */
- this_count = sdp->sector_size / 512;
+ /* Access only a single logical block */
+ nr_blocks = 1;
}
}
- SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, "block=%llu\n",
- (unsigned long long)block));
+ fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0;
+ dix = scsi_prot_sg_count(cmd);
+ dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type);
- /*
- * If we have a 1K hardware sectorsize, prevent access to single
- * 512 byte sectors. In theory we could handle this - in fact
- * the scsi cdrom driver must be able to handle this because
- * we typically use 1K blocksizes, and cdroms typically have
- * 2K hardware sectorsizes. Of course, things are simpler
- * with the cdrom, since it is read-only. For performance
- * reasons, the filesystems should be able to handle this
- * and not force the scsi disk driver to use bounce buffers
- * for this.
- */
- if (sdp->sector_size == 1024) {
- if ((block & 1) || (blk_rq_sectors(rq) & 1)) {
- scmd_printk(KERN_ERR, SCpnt,
- "Bad block number requested\n");
- return BLK_STS_IOERR;
- }
- block = block >> 1;
- this_count = this_count >> 1;
- }
- if (sdp->sector_size == 2048) {
- if ((block & 3) || (blk_rq_sectors(rq) & 3)) {
- scmd_printk(KERN_ERR, SCpnt,
- "Bad block number requested\n");
- return BLK_STS_IOERR;
- }
- block = block >> 2;
- this_count = this_count >> 2;
- }
- if (sdp->sector_size == 4096) {
- if ((block & 7) || (blk_rq_sectors(rq) & 7)) {
- scmd_printk(KERN_ERR, SCpnt,
- "Bad block number requested\n");
- return BLK_STS_IOERR;
- }
- block = block >> 3;
- this_count = this_count >> 3;
- }
- if (rq_data_dir(rq) == WRITE) {
- SCpnt->cmnd[0] = WRITE_6;
-
- if (blk_integrity_rq(rq))
- t10_pi_prepare(SCpnt->request, sdkp->protection_type);
-
- } else if (rq_data_dir(rq) == READ) {
- SCpnt->cmnd[0] = READ_6;
- } else {
- scmd_printk(KERN_ERR, SCpnt, "Unknown command %d\n", req_op(rq));
- return BLK_STS_IOERR;
- }
-
- SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
- "%s %d/%u 512 byte blocks.\n",
- (rq_data_dir(rq) == WRITE) ?
- "writing" : "reading", this_count,
- blk_rq_sectors(rq)));
-
- dix = scsi_prot_sg_count(SCpnt);
- dif = scsi_host_dif_capable(SCpnt->device->host, sdkp->protection_type);
+ if (write && dix)
+ t10_pi_prepare(cmd->request, sdkp->protection_type);
if (dif || dix)
- protect = sd_setup_protect_cmnd(SCpnt, dix, dif);
+ protect = sd_setup_protect_cmnd(cmd, dix, dif);
else
protect = 0;
if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) {
- SCpnt->cmnd = mempool_alloc(sd_cdb_pool, GFP_ATOMIC);
-
- if (unlikely(!SCpnt->cmnd))
- return BLK_STS_RESOURCE;
-
- SCpnt->cmd_len = SD_EXT_CDB_SIZE;
- memset(SCpnt->cmnd, 0, SCpnt->cmd_len);
- SCpnt->cmnd[0] = VARIABLE_LENGTH_CMD;
- SCpnt->cmnd[7] = 0x18;
- SCpnt->cmnd[9] = (rq_data_dir(rq) == READ) ? READ_32 : WRITE_32;
- SCpnt->cmnd[10] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0);
-
- /* LBA */
- SCpnt->cmnd[12] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0;
- SCpnt->cmnd[13] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0;
- SCpnt->cmnd[14] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0;
- SCpnt->cmnd[15] = sizeof(block) > 4 ? (unsigned char) (block >> 32) & 0xff : 0;
- SCpnt->cmnd[16] = (unsigned char) (block >> 24) & 0xff;
- SCpnt->cmnd[17] = (unsigned char) (block >> 16) & 0xff;
- SCpnt->cmnd[18] = (unsigned char) (block >> 8) & 0xff;
- SCpnt->cmnd[19] = (unsigned char) block & 0xff;
-
- /* Expected Indirect LBA */
- SCpnt->cmnd[20] = (unsigned char) (block >> 24) & 0xff;
- SCpnt->cmnd[21] = (unsigned char) (block >> 16) & 0xff;
- SCpnt->cmnd[22] = (unsigned char) (block >> 8) & 0xff;
- SCpnt->cmnd[23] = (unsigned char) block & 0xff;
-
- /* Transfer length */
- SCpnt->cmnd[28] = (unsigned char) (this_count >> 24) & 0xff;
- SCpnt->cmnd[29] = (unsigned char) (this_count >> 16) & 0xff;
- SCpnt->cmnd[30] = (unsigned char) (this_count >> 8) & 0xff;
- SCpnt->cmnd[31] = (unsigned char) this_count & 0xff;
- } else if (sdp->use_16_for_rw || (this_count > 0xffff)) {
- SCpnt->cmnd[0] += READ_16 - READ_6;
- SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0);
- SCpnt->cmnd[2] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0;
- SCpnt->cmnd[3] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0;
- SCpnt->cmnd[4] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0;
- SCpnt->cmnd[5] = sizeof(block) > 4 ? (unsigned char) (block >> 32) & 0xff : 0;
- SCpnt->cmnd[6] = (unsigned char) (block >> 24) & 0xff;
- SCpnt->cmnd[7] = (unsigned char) (block >> 16) & 0xff;
- SCpnt->cmnd[8] = (unsigned char) (block >> 8) & 0xff;
- SCpnt->cmnd[9] = (unsigned char) block & 0xff;
- SCpnt->cmnd[10] = (unsigned char) (this_count >> 24) & 0xff;
- SCpnt->cmnd[11] = (unsigned char) (this_count >> 16) & 0xff;
- SCpnt->cmnd[12] = (unsigned char) (this_count >> 8) & 0xff;
- SCpnt->cmnd[13] = (unsigned char) this_count & 0xff;
- SCpnt->cmnd[14] = SCpnt->cmnd[15] = 0;
- } else if ((this_count > 0xff) || (block > 0x1fffff) ||
- scsi_device_protection(SCpnt->device) ||
- SCpnt->device->use_10_for_rw) {
- SCpnt->cmnd[0] += READ_10 - READ_6;
- SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0);
- SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
- SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff;
- SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff;
- SCpnt->cmnd[5] = (unsigned char) block & 0xff;
- SCpnt->cmnd[6] = SCpnt->cmnd[9] = 0;
- SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff;
- SCpnt->cmnd[8] = (unsigned char) this_count & 0xff;
+ ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks,
+ protect | fua);
+ } else if (sdp->use_16_for_rw || (nr_blocks > 0xffff)) {
+ ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks,
+ protect | fua);
+ } else if ((nr_blocks > 0xff) || (lba > 0x1fffff) ||
+ sdp->use_10_for_rw || protect) {
+ ret = sd_setup_rw10_cmnd(cmd, write, lba, nr_blocks,
+ protect | fua);
} else {
- if (unlikely(rq->cmd_flags & REQ_FUA)) {
- /*
- * This happens only if this drive failed
- * 10byte rw command with ILLEGAL_REQUEST
- * during operation and thus turned off
- * use_10_for_rw.
- */
- scmd_printk(KERN_ERR, SCpnt,
- "FUA write on READ/WRITE(6) drive\n");
- return BLK_STS_IOERR;
- }
-
- SCpnt->cmnd[1] |= (unsigned char) ((block >> 16) & 0x1f);
- SCpnt->cmnd[2] = (unsigned char) ((block >> 8) & 0xff);
- SCpnt->cmnd[3] = (unsigned char) block & 0xff;
- SCpnt->cmnd[4] = (unsigned char) this_count;
- SCpnt->cmnd[5] = 0;
+ ret = sd_setup_rw6_cmnd(cmd, write, lba, nr_blocks,
+ protect | fua);
}
- SCpnt->sdb.length = this_count * sdp->sector_size;
+
+ if (unlikely(ret != BLK_STS_OK))
+ return ret;
/*
* We shouldn't disconnect in the middle of a sector, so with a dumb
* host adapter, it's safe to assume that we can at least transfer
* this many bytes between each connect / disconnect.
*/
- SCpnt->transfersize = sdp->sector_size;
- SCpnt->underflow = this_count << 9;
- SCpnt->allowed = SD_MAX_RETRIES;
+ cmd->transfersize = sdp->sector_size;
+ cmd->underflow = nr_blocks << 9;
+ cmd->allowed = SD_MAX_RETRIES;
+ cmd->sdb.length = nr_blocks * sdp->sector_size;
+
+ SCSI_LOG_HLQUEUE(1,
+ scmd_printk(KERN_INFO, cmd,
+ "%s: block=%llu, count=%d\n", __func__,
+ (unsigned long long)blk_rq_pos(rq),
+ blk_rq_sectors(rq)));
+ SCSI_LOG_HLQUEUE(2,
+ scmd_printk(KERN_INFO, cmd,
+ "%s %d/%u 512 byte blocks.\n",
+ write ? "writing" : "reading", nr_blocks,
+ blk_rq_sectors(rq)));
/*
* This indicates that the command is ready from our end to be
@@ -1398,11 +1415,6 @@ static void sd_release(struct gendisk *disk, fmode_t mode)
scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
}
- /*
- * XXX and what if there are packets in flight and this close()
- * XXX is followed by a "rmmod sd_mod"?
- */
-
scsi_disk_put(sdkp);
}
@@ -2549,25 +2561,25 @@ sd_print_capacity(struct scsi_disk *sdkp,
int sector_size = sdkp->device->sector_size;
char cap_str_2[10], cap_str_10[10];
+ if (!sdkp->first_scan && old_capacity == sdkp->capacity)
+ return;
+
string_get_size(sdkp->capacity, sector_size,
STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
string_get_size(sdkp->capacity, sector_size,
- STRING_UNITS_10, cap_str_10,
- sizeof(cap_str_10));
+ STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
- if (sdkp->first_scan || old_capacity != sdkp->capacity) {
- sd_printk(KERN_NOTICE, sdkp,
- "%llu %d-byte logical blocks: (%s/%s)\n",
- (unsigned long long)sdkp->capacity,
- sector_size, cap_str_10, cap_str_2);
+ sd_printk(KERN_NOTICE, sdkp,
+ "%llu %d-byte logical blocks: (%s/%s)\n",
+ (unsigned long long)sdkp->capacity,
+ sector_size, cap_str_10, cap_str_2);
- if (sdkp->physical_block_size != sector_size)
- sd_printk(KERN_NOTICE, sdkp,
- "%u-byte physical blocks\n",
- sdkp->physical_block_size);
+ if (sdkp->physical_block_size != sector_size)
+ sd_printk(KERN_NOTICE, sdkp,
+ "%u-byte physical blocks\n",
+ sdkp->physical_block_size);
- sd_zbc_print_zones(sdkp);
- }
+ sd_zbc_print_zones(sdkp);
}
/* called with buffer of length 512 */
@@ -3047,6 +3059,58 @@ static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer)
sdkp->security = 1;
}
+/*
+ * Determine the device's preferred I/O size for reads and writes
+ * unless the reported value is unreasonably small, large, not a
+ * multiple of the physical block size, or simply garbage.
+ */
+static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
+ unsigned int dev_max)
+{
+ struct scsi_device *sdp = sdkp->device;
+ unsigned int opt_xfer_bytes =
+ logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
+
+ if (sdkp->opt_xfer_blocks == 0)
+ return false;
+
+ if (sdkp->opt_xfer_blocks > dev_max) {
+ sd_first_printk(KERN_WARNING, sdkp,
+ "Optimal transfer size %u logical blocks " \
+ "> dev_max (%u logical blocks)\n",
+ sdkp->opt_xfer_blocks, dev_max);
+ return false;
+ }
+
+ if (sdkp->opt_xfer_blocks > SD_DEF_XFER_BLOCKS) {
+ sd_first_printk(KERN_WARNING, sdkp,
+ "Optimal transfer size %u logical blocks " \
+ "> sd driver limit (%u logical blocks)\n",
+ sdkp->opt_xfer_blocks, SD_DEF_XFER_BLOCKS);
+ return false;
+ }
+
+ if (opt_xfer_bytes < PAGE_SIZE) {
+ sd_first_printk(KERN_WARNING, sdkp,
+ "Optimal transfer size %u bytes < " \
+ "PAGE_SIZE (%u bytes)\n",
+ opt_xfer_bytes, (unsigned int)PAGE_SIZE);
+ return false;
+ }
+
+ if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) {
+ sd_first_printk(KERN_WARNING, sdkp,
+ "Optimal transfer size %u bytes not a " \
+ "multiple of physical block size (%u bytes)\n",
+ opt_xfer_bytes, sdkp->physical_block_size);
+ return false;
+ }
+
+ sd_first_printk(KERN_INFO, sdkp, "Optimal transfer size %u bytes\n",
+ opt_xfer_bytes);
+ return true;
+}
+
/**
* sd_revalidate_disk - called the first time a new disk is seen,
* performs disk spin up, read_capacity, etc.
@@ -3125,15 +3189,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
- /*
- * Determine the device's preferred I/O size for reads and writes
- * unless the reported value is unreasonably small, large, or
- * garbage.
- */
- if (sdkp->opt_xfer_blocks &&
- sdkp->opt_xfer_blocks <= dev_max &&
- sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
- logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_SIZE) {
+ if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
} else
@@ -3447,9 +3503,21 @@ static void scsi_disk_release(struct device *dev)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct gendisk *disk = sdkp->disk;
-
+ struct request_queue *q = disk->queue;
+
ida_free(&sd_index_ida, sdkp->index);
+ /*
+ * Wait until all requests that are in progress have completed.
+ * This is necessary to avoid that e.g. scsi_end_request() crashes
+ * due to clearing the disk->private_data pointer. Wait from inside
+ * scsi_disk_release() instead of from sd_release() to avoid that
+ * freezing and unfreezing the request queue affects user space I/O
+ * in case multiple processes open a /dev/sd... node concurrently.
+ */
+ blk_mq_freeze_queue(q);
+ blk_mq_unfreeze_queue(q);
+
disk->private_data = NULL;
put_disk(disk);
put_device(&sdkp->device->sdev_gendev);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 7f43e6839bce..5796ace76225 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -132,7 +132,7 @@ static inline struct scsi_disk *scsi_disk(struct gendisk *disk)
#define sd_first_printk(prefix, sdsk, fmt, a...) \
do { \
- if ((sdkp)->first_scan) \
+ if ((sdsk)->first_scan) \
sd_printk(prefix, sdsk, fmt, ##a); \
} while (0)
@@ -188,68 +188,6 @@ static inline sector_t sectors_to_logical(struct scsi_device *sdev, sector_t sec
return sector >> (ilog2(sdev->sector_size) - 9);
}
-/*
- * Look up the DIX operation based on whether the command is read or
- * write and whether dix and dif are enabled.
- */
-static inline unsigned int sd_prot_op(bool write, bool dix, bool dif)
-{
- /* Lookup table: bit 2 (write), bit 1 (dix), bit 0 (dif) */
- const unsigned int ops[] = { /* wrt dix dif */
- SCSI_PROT_NORMAL, /* 0 0 0 */
- SCSI_PROT_READ_STRIP, /* 0 0 1 */
- SCSI_PROT_READ_INSERT, /* 0 1 0 */
- SCSI_PROT_READ_PASS, /* 0 1 1 */
- SCSI_PROT_NORMAL, /* 1 0 0 */
- SCSI_PROT_WRITE_INSERT, /* 1 0 1 */
- SCSI_PROT_WRITE_STRIP, /* 1 1 0 */
- SCSI_PROT_WRITE_PASS, /* 1 1 1 */
- };
-
- return ops[write << 2 | dix << 1 | dif];
-}
-
-/*
- * Returns a mask of the protection flags that are valid for a given DIX
- * operation.
- */
-static inline unsigned int sd_prot_flag_mask(unsigned int prot_op)
-{
- const unsigned int flag_mask[] = {
- [SCSI_PROT_NORMAL] = 0,
-
- [SCSI_PROT_READ_STRIP] = SCSI_PROT_TRANSFER_PI |
- SCSI_PROT_GUARD_CHECK |
- SCSI_PROT_REF_CHECK |
- SCSI_PROT_REF_INCREMENT,
-
- [SCSI_PROT_READ_INSERT] = SCSI_PROT_REF_INCREMENT |
- SCSI_PROT_IP_CHECKSUM,
-
- [SCSI_PROT_READ_PASS] = SCSI_PROT_TRANSFER_PI |
- SCSI_PROT_GUARD_CHECK |
- SCSI_PROT_REF_CHECK |
- SCSI_PROT_REF_INCREMENT |
- SCSI_PROT_IP_CHECKSUM,
-
- [SCSI_PROT_WRITE_INSERT] = SCSI_PROT_TRANSFER_PI |
- SCSI_PROT_REF_INCREMENT,
-
- [SCSI_PROT_WRITE_STRIP] = SCSI_PROT_GUARD_CHECK |
- SCSI_PROT_REF_CHECK |
- SCSI_PROT_REF_INCREMENT |
- SCSI_PROT_IP_CHECKSUM,
-
- [SCSI_PROT_WRITE_PASS] = SCSI_PROT_TRANSFER_PI |
- SCSI_PROT_GUARD_CHECK |
- SCSI_PROT_REF_CHECK |
- SCSI_PROT_REF_INCREMENT |
- SCSI_PROT_IP_CHECKSUM,
- };
-
- return flag_mask[prot_op];
-}
-
#ifdef CONFIG_BLK_DEV_INTEGRITY
extern void sd_dif_config_host(struct scsi_disk *);
diff --git a/drivers/scsi/smartpqi/Makefile b/drivers/scsi/smartpqi/Makefile
index e6b779930230..a03a6edb0060 100644
--- a/drivers/scsi/smartpqi/Makefile
+++ b/drivers/scsi/smartpqi/Makefile
@@ -1,3 +1,2 @@
-ccflags-y += -I.
obj-$(CONFIG_SCSI_SMARTPQI) += smartpqi.o
smartpqi-objs := smartpqi_init.o smartpqi_sis.o smartpqi_sas_transport.o
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index f564af8949e8..75ec43aa8df3 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -2764,6 +2764,12 @@ static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
sshdr.sense_key == HARDWARE_ERROR &&
sshdr.asc == 0x3e &&
sshdr.ascq == 0x1) {
+ struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
+ struct pqi_scsi_dev *device = scmd->device->hostdata;
+
+ if (printk_ratelimit())
+ scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
+ ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
pqi_take_device_offline(scmd->device, "RAID");
host_byte = DID_NO_CONNECT;
}
@@ -6043,7 +6049,8 @@ out:
return rc;
}
-static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
+ void __user *arg)
{
int rc;
struct pqi_ctrl_info *ctrl_info;
diff --git a/drivers/scsi/snic/snic_debugfs.c b/drivers/scsi/snic/snic_debugfs.c
index 0abe17c1a73b..2b349365592f 100644
--- a/drivers/scsi/snic/snic_debugfs.c
+++ b/drivers/scsi/snic/snic_debugfs.c
@@ -30,33 +30,13 @@
* fnic directory and statistics directory for trace buffer and
* stats logging
*/
-
-int
-snic_debugfs_init(void)
+void snic_debugfs_init(void)
{
- int rc = -1;
- struct dentry *de = NULL;
-
- de = debugfs_create_dir("snic", NULL);
- if (!de) {
- SNIC_DBG("Cannot create debugfs root\n");
-
- return rc;
- }
- snic_glob->trc_root = de;
-
- de = debugfs_create_dir("statistics", snic_glob->trc_root);
- if (!de) {
- SNIC_DBG("Cannot create Statistics directory\n");
+ snic_glob->trc_root = debugfs_create_dir("snic", NULL);
- return rc;
- }
- snic_glob->stats_root = de;
-
- rc = 0;
-
- return rc;
-} /* end of snic_debugfs_init */
+ snic_glob->stats_root = debugfs_create_dir("statistics",
+ snic_glob->trc_root);
+}
/*
* snic_debugfs_term - Tear down debugfs intrastructure
@@ -391,56 +371,23 @@ static const struct file_operations snic_reset_stats_fops = {
* It will create file stats and reset_stats under statistics/host# directory
* to log per snic stats
*/
-int
-snic_stats_debugfs_init(struct snic *snic)
+void snic_stats_debugfs_init(struct snic *snic)
{
- int rc = -1;
char name[16];
- struct dentry *de = NULL;
snprintf(name, sizeof(name), "host%d", snic->shost->host_no);
- if (!snic_glob->stats_root) {
- SNIC_DBG("snic_stats root doesn't exist\n");
-
- return rc;
- }
-
- de = debugfs_create_dir(name, snic_glob->stats_root);
- if (!de) {
- SNIC_DBG("Cannot create host directory\n");
-
- return rc;
- }
- snic->stats_host = de;
-
- de = debugfs_create_file("stats",
- S_IFREG|S_IRUGO,
- snic->stats_host,
- snic,
- &snic_stats_fops);
- if (!de) {
- SNIC_DBG("Cannot create host's stats file\n");
-
- return rc;
- }
- snic->stats_file = de;
-
- de = debugfs_create_file("reset_stats",
- S_IFREG|S_IRUGO|S_IWUSR,
- snic->stats_host,
- snic,
- &snic_reset_stats_fops);
- if (!de) {
- SNIC_DBG("Cannot create host's reset_stats file\n");
+ snic->stats_host = debugfs_create_dir(name, snic_glob->stats_root);
- return rc;
- }
- snic->reset_stats_file = de;
- rc = 0;
+ snic->stats_file = debugfs_create_file("stats", S_IFREG|S_IRUGO,
+ snic->stats_host, snic,
+ &snic_stats_fops);
- return rc;
-} /* end of snic_stats_debugfs_init */
+ snic->reset_stats_file = debugfs_create_file("reset_stats",
+ S_IFREG|S_IRUGO|S_IWUSR,
+ snic->stats_host, snic,
+ &snic_reset_stats_fops);
+}
/*
* snic_stats_debugfs_remove - Tear down debugfs infrastructure of stats
@@ -517,46 +464,18 @@ static const struct file_operations snic_trc_fops = {
* snic_trc_debugfs_init : creates trace/tracing_enable files for trace
* under debugfs
*/
-int
-snic_trc_debugfs_init(void)
+void snic_trc_debugfs_init(void)
{
- struct dentry *de = NULL;
- int ret = -1;
-
- if (!snic_glob->trc_root) {
- SNIC_ERR("Debugfs root directory for snic doesn't exist.\n");
-
- return ret;
- }
-
- de = debugfs_create_bool("tracing_enable",
- S_IFREG | S_IRUGO | S_IWUSR,
- snic_glob->trc_root,
- &snic_glob->trc.enable);
-
- if (!de) {
- SNIC_ERR("Can't create trace_enable file.\n");
-
- return ret;
- }
- snic_glob->trc.trc_enable = de;
-
- de = debugfs_create_file("trace",
- S_IFREG | S_IRUGO | S_IWUSR,
- snic_glob->trc_root,
- NULL,
- &snic_trc_fops);
-
- if (!de) {
- SNIC_ERR("Cannot create trace file.\n");
-
- return ret;
- }
- snic_glob->trc.trc_file = de;
- ret = 0;
-
- return ret;
-} /* end of snic_trc_debugfs_init */
+ snic_glob->trc.trc_enable = debugfs_create_bool("tracing_enable",
+ S_IFREG | S_IRUGO | S_IWUSR,
+ snic_glob->trc_root,
+ &snic_glob->trc.enable);
+
+ snic_glob->trc.trc_file = debugfs_create_file("trace",
+ S_IFREG | S_IRUGO | S_IWUSR,
+ snic_glob->trc_root, NULL,
+ &snic_trc_fops);
+}
/*
* snic_trc_debugfs_term : cleans up the files created for trace under debugfs
diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c
index 5e824fd6047a..14f4ce665e58 100644
--- a/drivers/scsi/snic/snic_main.c
+++ b/drivers/scsi/snic/snic_main.c
@@ -397,12 +397,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
/* Per snic debugfs init */
- ret = snic_stats_debugfs_init(snic);
- if (ret) {
- SNIC_HOST_ERR(snic->shost,
- "Failed to initialize debugfs stats\n");
- snic_stats_debugfs_remove(snic);
- }
+ snic_stats_debugfs_init(snic);
#endif
/* Setup PCI Resources */
@@ -850,12 +845,7 @@ snic_global_data_init(void)
#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
/* Debugfs related Initialization */
/* Create debugfs entries for snic */
- ret = snic_debugfs_init();
- if (ret < 0) {
- SNIC_ERR("Failed to create sysfs dir for tracing and stats.\n");
- snic_debugfs_term();
- /* continue even if it fails */
- }
+ snic_debugfs_init();
/* Trace related Initialization */
/* Allocate memory for trace buffer */
diff --git a/drivers/scsi/snic/snic_stats.h b/drivers/scsi/snic/snic_stats.h
index fd1066b1cad5..faf0cb601954 100644
--- a/drivers/scsi/snic/snic_stats.h
+++ b/drivers/scsi/snic/snic_stats.h
@@ -99,7 +99,7 @@ struct snic_stats {
atomic64_t io_cmpl_skip;
};
-int snic_stats_debugfs_init(struct snic *);
+void snic_stats_debugfs_init(struct snic *);
void snic_stats_debugfs_remove(struct snic *);
/* Auxillary function to update active IO counter */
diff --git a/drivers/scsi/snic/snic_trc.c b/drivers/scsi/snic/snic_trc.c
index 458eaba24c78..f23fe2f88438 100644
--- a/drivers/scsi/snic/snic_trc.c
+++ b/drivers/scsi/snic/snic_trc.c
@@ -138,12 +138,7 @@ snic_trc_init(void)
trc->buf = (struct snic_trc_data *) tbuf;
spin_lock_init(&trc->lock);
- ret = snic_trc_debugfs_init();
- if (ret) {
- SNIC_ERR("Failed to create Debugfs Files.\n");
-
- goto error;
- }
+ snic_trc_debugfs_init();
trc->max_idx = (tbuf_sz / SNIC_TRC_ENTRY_SZ);
trc->rd_idx = trc->wr_idx = 0;
@@ -153,11 +148,6 @@ snic_trc_init(void)
ret = 0;
return ret;
-
-error:
- snic_trc_free();
-
- return ret;
} /* end of snic_trc_init */
/*
diff --git a/drivers/scsi/snic/snic_trc.h b/drivers/scsi/snic/snic_trc.h
index b37f8867bfde..87dcc7457d15 100644
--- a/drivers/scsi/snic/snic_trc.h
+++ b/drivers/scsi/snic/snic_trc.h
@@ -53,12 +53,12 @@ struct snic_trc {
int snic_trc_init(void);
void snic_trc_free(void);
-int snic_trc_debugfs_init(void);
+void snic_trc_debugfs_init(void);
void snic_trc_debugfs_term(void);
struct snic_trc_data *snic_get_trc_buf(void);
int snic_get_trc_data(char *buf, int buf_sz);
-int snic_debugfs_init(void);
+void snic_debugfs_init(void);
void snic_debugfs_term(void);
static inline void
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 38ddbbfe5f3c..039c27c2d7b3 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -394,7 +394,6 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt)
ret = scsi_init_io(SCpnt);
if (ret != BLK_STS_OK)
goto out;
- WARN_ON_ONCE(SCpnt != rq->special);
cd = scsi_cd(rq->rq_disk);
/* from here on until we're complete, any goto out
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 7ff22d3f03e3..19c022e66d63 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -169,7 +169,7 @@ static int debugging = DEBUG;
/* Remove mode bits and auto-rewind bit (7) */
#define TAPE_NR(x) ( ((iminor(x) & ~255) >> (ST_NBR_MODE_BITS + 1)) | \
- (iminor(x) & ~(-1 << ST_MODE_SHIFT)) )
+ (iminor(x) & ((1 << ST_MODE_SHIFT)-1)))
#define TAPE_MODE(x) ((iminor(x) & ST_MODE_MASK) >> ST_MODE_SHIFT)
/* Construct the minor number from the device (d), mode (m), and non-rewind (n) data */
@@ -337,12 +337,14 @@ static void st_analyze_sense(struct st_request *SRpnt, struct st_cmdstatus *s)
switch (sense[0] & 0x7f) {
case 0x71:
s->deferred = 1;
+ /* fall through */
case 0x70:
s->fixed_format = 1;
s->flags = sense[2] & 0xe0;
break;
case 0x73:
s->deferred = 1;
+ /* fall through */
case 0x72:
s->fixed_format = 0;
ucp = scsi_sense_desc_find(sense, SCSI_SENSE_BUFFERSIZE, 4);
@@ -2721,6 +2723,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
switch (cmd_in) {
case MTFSFM:
chg_eof = 0; /* Changed from the FSF after this */
+ /* fall through */
case MTFSF:
cmd[0] = SPACE;
cmd[1] = 0x01; /* Space FileMarks */
@@ -2735,6 +2738,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
break;
case MTBSFM:
chg_eof = 0; /* Changed from the FSF after this */
+ /* fall through */
case MTBSF:
cmd[0] = SPACE;
cmd[1] = 0x01; /* Space FileMarks */
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 84380bae20f1..8472de1007ff 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -385,7 +385,7 @@ enum storvsc_request_type {
* This is the end of Protocol specific defines.
*/
-static int storvsc_ringbuffer_size = (256 * PAGE_SIZE);
+static int storvsc_ringbuffer_size = (128 * 1024);
static u32 max_outstanding_req_per_channel;
static int storvsc_vcpus_per_sub_channel = 4;
@@ -668,13 +668,22 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns)
{
struct device *dev = &device->device;
struct storvsc_device *stor_device;
- int num_cpus = num_online_cpus();
int num_sc;
struct storvsc_cmd_request *request;
struct vstor_packet *vstor_packet;
int ret, t;
- num_sc = ((max_chns > num_cpus) ? num_cpus : max_chns);
+ /*
+ * If the number of CPUs is artificially restricted, such as
+ * with maxcpus=1 on the kernel boot line, Hyper-V could offer
+ * sub-channels >= the number of CPUs. These sub-channels
+ * should not be created. The primary channel is already created
+ * and assigned to one CPU, so check against # CPUs - 1.
+ */
+ num_sc = min((int)(num_online_cpus() - 1), max_chns);
+ if (!num_sc)
+ return;
+
stor_device = get_out_stor_device(device);
if (!stor_device)
return;
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 2ddbb26d9c26..6db37cf306b0 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -99,7 +99,6 @@ config SCSI_UFS_DWC_TC_PLATFORM
config SCSI_UFS_QCOM
tristate "QCOM specific hooks to UFS controller platform driver"
depends on SCSI_UFSHCD_PLATFORM && ARCH_QCOM
- select PHY_QCOM_UFS
help
This selects the QCOM specific additions to UFSHCD platform driver.
UFS host on QCOM needs some vendor specific configuration before
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
index 452e19f8fb47..0e855b5afe82 100644
--- a/drivers/scsi/ufs/ufs-hisi.c
+++ b/drivers/scsi/ufs/ufs-hisi.c
@@ -66,7 +66,7 @@ static int ufs_hisi_check_hibern8(struct ufs_hba *hba)
return err;
}
-static void ufs_hi3660_clk_init(struct ufs_hba *hba)
+static void ufs_hisi_clk_init(struct ufs_hba *hba)
{
struct ufs_hisi_host *host = ufshcd_get_variant(hba);
@@ -80,7 +80,7 @@ static void ufs_hi3660_clk_init(struct ufs_hba *hba)
ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
}
-static void ufs_hi3660_soc_init(struct ufs_hba *hba)
+static void ufs_hisi_soc_init(struct ufs_hba *hba)
{
struct ufs_hisi_host *host = ufshcd_get_variant(hba);
u32 reg;
@@ -139,6 +139,7 @@ static void ufs_hi3660_soc_init(struct ufs_hba *hba)
static int ufs_hisi_link_startup_pre_change(struct ufs_hba *hba)
{
+ struct ufs_hisi_host *host = ufshcd_get_variant(hba);
int err;
uint32_t value;
uint32_t reg;
@@ -153,6 +154,14 @@ static int ufs_hisi_link_startup_pre_change(struct ufs_hba *hba)
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8121, 0x0), 0x2D);
/* MPHY CBOVRCTRL3 */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8122, 0x0), 0x1);
+
+ if (host->caps & UFS_HISI_CAP_PHY10nm) {
+ /* MPHY CBOVRCTRL4 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8127, 0x0), 0x98);
+ /* MPHY CBOVRCTRL5 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8128, 0x0), 0x1);
+ }
+
/* Unipro VS_MphyCfgUpdt */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
/* MPHY RXOVRCTRL4 rx0 */
@@ -173,10 +182,21 @@ static int ufs_hisi_link_startup_pre_change(struct ufs_hba *hba)
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8113, 0x0), 0x1);
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
- /* Tactive RX */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x4), 0x7);
- /* Tactive RX */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x5), 0x7);
+ if (host->caps & UFS_HISI_CAP_PHY10nm) {
+ /* RX_Hibern8Time_Capability*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0092, 0x4), 0xA);
+ /* RX_Hibern8Time_Capability*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0092, 0x5), 0xA);
+ /* RX_Min_ActivateTime */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008f, 0x4), 0xA);
+ /* RX_Min_ActivateTime*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008f, 0x5), 0xA);
+ } else {
+ /* Tactive RX */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x4), 0x7);
+ /* Tactive RX */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x5), 0x7);
+ }
/* Gear3 Synclength */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x4), 0x4F);
@@ -208,7 +228,8 @@ static int ufs_hisi_link_startup_pre_change(struct ufs_hba *hba)
if (err)
dev_err(hba->dev, "ufs_hisi_check_hibern8 error\n");
- ufshcd_writel(hba, UFS_HCLKDIV_NORMAL_VALUE, UFS_REG_HCLKDIV);
+ if (!(host->caps & UFS_HISI_CAP_PHY10nm))
+ ufshcd_writel(hba, UFS_HCLKDIV_NORMAL_VALUE, UFS_REG_HCLKDIV);
/* disable auto H8 */
reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
@@ -253,7 +274,7 @@ static int ufs_hisi_link_startup_post_change(struct ufs_hba *hba)
return 0;
}
-static int ufs_hi3660_link_startup_notify(struct ufs_hba *hba,
+static int ufs_hisi_link_startup_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
int err = 0;
@@ -391,6 +412,28 @@ static void ufs_hisi_set_dev_cap(struct ufs_hisi_dev_params *hisi_param)
static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
{
+ struct ufs_hisi_host *host = ufshcd_get_variant(hba);
+
+ if (host->caps & UFS_HISI_CAP_PHY10nm) {
+ /*
+ * Boston platform need to set SaveConfigTime to 0x13,
+ * and change sync length to maximum value
+ */
+ /* VS_DebugSaveConfigTime */
+ ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0xD0A0), 0x13);
+ /* g1 sync length */
+ ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1552), 0x4f);
+ /* g2 sync length */
+ ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1554), 0x4f);
+ /* g3 sync length */
+ ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1556), 0x4f);
+ /* PA_Hibern8Time */
+ ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x15a7), 0xA);
+ /* PA_Tactivate */
+ ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x15a8), 0xA);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xd085, 0x0), 0x01);
+ }
+
if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME) {
pr_info("ufs flash device must set VS_DebugSaveConfigTime 0x10\n");
/* VS_DebugSaveConfigTime */
@@ -429,7 +472,7 @@ static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
ufshcd_dme_set(hba, UIC_ARG_MIB(0xd046), 32767);
}
-static int ufs_hi3660_pwr_change_notify(struct ufs_hba *hba,
+static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status,
struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
@@ -567,25 +610,69 @@ static int ufs_hi3660_init(struct ufs_hba *hba)
return ret;
}
- ufs_hi3660_clk_init(hba);
+ ufs_hisi_clk_init(hba);
+
+ ufs_hisi_soc_init(hba);
+
+ return 0;
+}
+
+static int ufs_hi3670_init(struct ufs_hba *hba)
+{
+ int ret = 0;
+ struct device *dev = hba->dev;
+ struct ufs_hisi_host *host;
+
+ ret = ufs_hisi_init_common(hba);
+ if (ret) {
+ dev_err(dev, "%s: ufs common init fail\n", __func__);
+ return ret;
+ }
+
+ ufs_hisi_clk_init(hba);
+
+ ufs_hisi_soc_init(hba);
- ufs_hi3660_soc_init(hba);
+ /* Add cap for 10nm PHY variant on HI3670 SoC */
+ host = ufshcd_get_variant(hba);
+ host->caps |= UFS_HISI_CAP_PHY10nm;
return 0;
}
-static struct ufs_hba_variant_ops ufs_hba_hisi_vops = {
+static const struct ufs_hba_variant_ops ufs_hba_hi3660_vops = {
.name = "hi3660",
.init = ufs_hi3660_init,
- .link_startup_notify = ufs_hi3660_link_startup_notify,
- .pwr_change_notify = ufs_hi3660_pwr_change_notify,
+ .link_startup_notify = ufs_hisi_link_startup_notify,
+ .pwr_change_notify = ufs_hisi_pwr_change_notify,
.suspend = ufs_hisi_suspend,
.resume = ufs_hisi_resume,
};
+static const struct ufs_hba_variant_ops ufs_hba_hi3670_vops = {
+ .name = "hi3670",
+ .init = ufs_hi3670_init,
+ .link_startup_notify = ufs_hisi_link_startup_notify,
+ .pwr_change_notify = ufs_hisi_pwr_change_notify,
+ .suspend = ufs_hisi_suspend,
+ .resume = ufs_hisi_resume,
+};
+
+static const struct of_device_id ufs_hisi_of_match[] = {
+ { .compatible = "hisilicon,hi3660-ufs", .data = &ufs_hba_hi3660_vops },
+ { .compatible = "hisilicon,hi3670-ufs", .data = &ufs_hba_hi3670_vops },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, ufs_hisi_of_match);
+
static int ufs_hisi_probe(struct platform_device *pdev)
{
- return ufshcd_pltfrm_init(pdev, &ufs_hba_hisi_vops);
+ const struct of_device_id *of_id;
+
+ of_id = of_match_node(ufs_hisi_of_match, pdev->dev.of_node);
+
+ return ufshcd_pltfrm_init(pdev, of_id->data);
}
static int ufs_hisi_remove(struct platform_device *pdev)
@@ -596,13 +683,6 @@ static int ufs_hisi_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id ufs_hisi_of_match[] = {
- { .compatible = "hisilicon,hi3660-ufs" },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, ufs_hisi_of_match);
-
static const struct dev_pm_ops ufs_hisi_pm_ops = {
.suspend = ufshcd_pltfrm_suspend,
.resume = ufshcd_pltfrm_resume,
diff --git a/drivers/scsi/ufs/ufs-hisi.h b/drivers/scsi/ufs/ufs-hisi.h
index 3df9cd7acc29..667dfe39b57e 100644
--- a/drivers/scsi/ufs/ufs-hisi.h
+++ b/drivers/scsi/ufs/ufs-hisi.h
@@ -91,6 +91,9 @@ enum {
#define UFS_HISI_LIMIT_HS_RATE PA_HS_MODE_B
#define UFS_HISI_LIMIT_DESIRED_MODE FAST
+#define UFS_HISI_CAP_RESERVED BIT(0)
+#define UFS_HISI_CAP_PHY10nm BIT(1)
+
struct ufs_hisi_host {
struct ufs_hba *hba;
void __iomem *ufs_sys_ctrl;
@@ -112,4 +115,5 @@ struct ufs_hisi_host {
ufs_sys_ctrl_writel((host), \
((~(mask)) & (ufs_sys_ctrl_readl((host), (reg)))), \
(reg))
+
#endif /* UFS_HISI_H_ */
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 6d176815e6ce..21e4ccb5ba6e 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -514,7 +514,6 @@ struct ufs_vreg {
struct regulator *reg;
const char *name;
bool enabled;
- bool unused;
int min_uV;
int max_uV;
int min_uA;
diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c
index 775bb4e5e36e..869e71f861d6 100644
--- a/drivers/scsi/ufs/ufs_bsg.c
+++ b/drivers/scsi/ufs/ufs_bsg.c
@@ -27,15 +27,11 @@ static int ufs_bsg_get_query_desc_size(struct ufs_hba *hba, int *desc_len,
static int ufs_bsg_verify_query_size(struct ufs_hba *hba,
unsigned int request_len,
- unsigned int reply_len,
- int desc_len, enum query_opcode desc_op)
+ unsigned int reply_len)
{
int min_req_len = sizeof(struct ufs_bsg_request);
int min_rsp_len = sizeof(struct ufs_bsg_reply);
- if (desc_op == UPIU_QUERY_OPCODE_WRITE_DESC)
- min_req_len += desc_len;
-
if (min_req_len > request_len || min_rsp_len > reply_len) {
dev_err(hba->dev, "not enough space assigned\n");
return -EINVAL;
@@ -44,21 +40,16 @@ static int ufs_bsg_verify_query_size(struct ufs_hba *hba,
return 0;
}
-static int ufs_bsg_verify_query_params(struct ufs_hba *hba,
- struct ufs_bsg_request *bsg_request,
- unsigned int request_len,
- unsigned int reply_len,
- uint8_t *desc_buff, int *desc_len,
- enum query_opcode desc_op)
+static int ufs_bsg_alloc_desc_buffer(struct ufs_hba *hba, struct bsg_job *job,
+ uint8_t **desc_buff, int *desc_len,
+ enum query_opcode desc_op)
{
+ struct ufs_bsg_request *bsg_request = job->request;
struct utp_upiu_query *qr;
+ u8 *descp;
- if (desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
- dev_err(hba->dev, "unsupported opcode %d\n", desc_op);
- return -ENOTSUPP;
- }
-
- if (desc_op != UPIU_QUERY_OPCODE_WRITE_DESC)
+ if (desc_op != UPIU_QUERY_OPCODE_WRITE_DESC &&
+ desc_op != UPIU_QUERY_OPCODE_READ_DESC)
goto out;
qr = &bsg_request->upiu_req.qr;
@@ -67,11 +58,21 @@ static int ufs_bsg_verify_query_params(struct ufs_hba *hba,
return -EINVAL;
}
- if (ufs_bsg_verify_query_size(hba, request_len, reply_len, *desc_len,
- desc_op))
+ if (*desc_len > job->request_payload.payload_len) {
+ dev_err(hba->dev, "Illegal desc size\n");
return -EINVAL;
+ }
- desc_buff = (uint8_t *)(bsg_request + 1);
+ descp = kzalloc(*desc_len, GFP_KERNEL);
+ if (!descp)
+ return -ENOMEM;
+
+ if (desc_op == UPIU_QUERY_OPCODE_WRITE_DESC)
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt, descp,
+ *desc_len);
+
+ *desc_buff = descp;
out:
return 0;
@@ -91,7 +92,7 @@ static int ufs_bsg_request(struct bsg_job *job)
enum query_opcode desc_op = UPIU_QUERY_OPCODE_NOP;
int ret;
- ret = ufs_bsg_verify_query_size(hba, req_len, reply_len, 0, desc_op);
+ ret = ufs_bsg_verify_query_size(hba, req_len, reply_len);
if (ret)
goto out;
@@ -101,9 +102,8 @@ static int ufs_bsg_request(struct bsg_job *job)
switch (msgcode) {
case UPIU_TRANSACTION_QUERY_REQ:
desc_op = bsg_request->upiu_req.qr.opcode;
- ret = ufs_bsg_verify_query_params(hba, bsg_request, req_len,
- reply_len, desc_buff,
- &desc_len, desc_op);
+ ret = ufs_bsg_alloc_desc_buffer(hba, job, &desc_buff,
+ &desc_len, desc_op);
if (ret)
goto out;
@@ -135,11 +135,20 @@ static int ufs_bsg_request(struct bsg_job *job)
break;
}
+ if (!desc_buff)
+ goto out;
+
+ if (desc_op == UPIU_QUERY_OPCODE_READ_DESC && desc_len)
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ desc_buff, desc_len);
+
+ kfree(desc_buff);
+
out:
bsg_reply->result = ret;
- job->reply_len = sizeof(struct ufs_bsg_reply) +
- bsg_reply->reply_payload_rcv_len;
-
+ job->reply_len = sizeof(struct ufs_bsg_reply);
bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len);
return ret;
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index 5d2dfdb41a6f..a9bbd34d6b16 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -45,21 +45,6 @@ struct ufs_dev_fix {
}
/*
- * If UFS device is having issue in processing LCC (Line Control
- * Command) coming from UFS host controller then enable this quirk.
- * When this quirk is enabled, host controller driver should disable
- * the LCC transmission on UFS host controller (by clearing
- * TX_LCC_ENABLE attribute of host to 0).
- */
-#define UFS_DEVICE_QUIRK_BROKEN_LCC (1 << 0)
-
-/*
- * Some UFS devices don't need VCCQ rail for device operations. Enabling this
- * quirk for such devices will make sure that VCCQ rail is not voted.
- */
-#define UFS_DEVICE_NO_VCCQ (1 << 1)
-
-/*
* Some vendor's UFS device sends back to back NACs for the DL data frames
* causing the host controller to raise the DFES error status. Sometimes
* such UFS devices send back to back NAC without waiting for new
@@ -85,13 +70,6 @@ struct ufs_dev_fix {
#define UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS (1 << 2)
/*
- * Some UFS devices may not work properly after resume if the link was kept
- * in off state during suspend. Enabling this quirk will not allow the
- * link to be kept in off state during suspend.
- */
-#define UFS_DEVICE_QUIRK_NO_LINK_OFF (1 << 3)
-
-/*
* Few Toshiba UFS device models advertise RX_MIN_ACTIVATETIME_CAPABILITY as
* 600us which may not be enough for reliable hibern8 exit hardware sequence
* from UFS device.
@@ -101,13 +79,6 @@ struct ufs_dev_fix {
#define UFS_DEVICE_QUIRK_PA_TACTIVATE (1 << 4)
/*
- * Some UFS memory devices may have really low read/write throughput in
- * FAST AUTO mode, enable this quirk to make sure that FAST AUTO mode is
- * never enabled for such devices.
- */
-#define UFS_DEVICE_NO_FASTAUTO (1 << 5)
-
-/*
* It seems some UFS devices may keep drawing more than sleep current
* (atleast for 500us) from UFS rails (especially from VCCQ rail).
* To avoid this situation, add 2ms delay before putting these UFS
diff --git a/drivers/scsi/ufs/ufshcd-dwc.c b/drivers/scsi/ufs/ufshcd-dwc.c
index 5fd16c72207f..977b21871a5d 100644
--- a/drivers/scsi/ufs/ufshcd-dwc.c
+++ b/drivers/scsi/ufs/ufshcd-dwc.c
@@ -50,7 +50,7 @@ static void ufshcd_dwc_program_clk_div(struct ufs_hba *hba, u32 divider_val)
/**
* ufshcd_dwc_link_is_up()
* Check if link is up
- * @hba: private structure poitner
+ * @hba: private structure pointer
*
* Returns 0 on success, non-zero value on failure
*/
@@ -110,7 +110,7 @@ static int ufshcd_dwc_connection_setup(struct ufs_hba *hba)
/**
* ufshcd_dwc_link_startup_notify()
* UFS Host DWC specific link startup sequence
- * @hba: private structure poitner
+ * @hba: private structure pointer
* @status: Callback notify status
*
* Returns 0 on success, non-zero value on failure
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 895a9b5ac989..27213676329c 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -297,7 +297,7 @@ static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
* Returns 0 on success, non-zero value on failure
*/
int ufshcd_pltfrm_init(struct platform_device *pdev,
- struct ufs_hba_variant_ops *vops)
+ const struct ufs_hba_variant_ops *vops)
{
struct ufs_hba *hba;
void __iomem *mmio_base;
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.h b/drivers/scsi/ufs/ufshcd-pltfrm.h
index df64c4180340..1f29e1fd6d52 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.h
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.h
@@ -17,7 +17,7 @@
#include "ufshcd.h"
int ufshcd_pltfrm_init(struct platform_device *pdev,
- struct ufs_hba_variant_ops *vops);
+ const struct ufs_hba_variant_ops *vops);
void ufshcd_pltfrm_shutdown(struct platform_device *pdev);
#ifdef CONFIG_PM
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 2ddf24466a62..e040f9dd9ff3 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -219,12 +219,9 @@ static struct ufs_dev_fix ufs_fixups[] = {
/* UFS cards deviations table */
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
- UFS_DEVICE_NO_FASTAUTO),
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
@@ -232,7 +229,6 @@ static struct ufs_dev_fix ufs_fixups[] = {
UFS_DEVICE_QUIRK_PA_TACTIVATE),
UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
UFS_DEVICE_QUIRK_PA_TACTIVATE),
- UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/,
@@ -251,7 +247,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba);
static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
bool skip_ref_clk);
static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
-static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
@@ -399,15 +394,20 @@ static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
struct ufs_uic_err_reg_hist *err_hist, char *err_name)
{
int i;
+ bool found = false;
for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
- int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH;
+ int p = (i + err_hist->pos) % UIC_ERR_REG_HIST_LENGTH;
if (err_hist->reg[p] == 0)
continue;
dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, i,
err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
+ found = true;
}
+
+ if (!found)
+ dev_err(hba->dev, "No record of %s uic errors\n", err_name);
}
static void ufshcd_print_host_regs(struct ufs_hba *hba)
@@ -5775,6 +5775,20 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
/* just copy the upiu response as it is */
memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
+ if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
+ u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
+ u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
+ MASK_QUERY_DATA_SEG_LEN;
+
+ if (*buff_len >= resp_len) {
+ memcpy(desc_buff, descp, resp_len);
+ *buff_len = resp_len;
+ } else {
+ dev_warn(hba->dev, "rsp size is bigger than buffer");
+ *buff_len = 0;
+ err = -EINVAL;
+ }
+ }
ufshcd_put_dev_cmd_tag(hba, tag);
wake_up(&hba->dev_cmd.tag_wq);
@@ -5810,11 +5824,6 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
int ocs_value;
u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
- if (desc_buff && desc_op != UPIU_QUERY_OPCODE_WRITE_DESC) {
- err = -ENOTSUPP;
- goto out;
- }
-
switch (msgcode) {
case UPIU_TRANSACTION_NOP_OUT:
cmd_type = DEV_CMD_TYPE_NOP;
@@ -5855,7 +5864,6 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
break;
}
-out:
return err;
}
@@ -6825,11 +6833,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
ufs_fixup_device_setup(hba, &card);
ufshcd_tune_unipro_params(hba);
- ret = ufshcd_set_vccq_rail_unused(hba,
- (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
- if (ret)
- goto out;
-
/* UFS device is also active now */
ufshcd_set_ufs_dev_active(hba);
ufshcd_force_reset_auto_bkops(hba);
@@ -7013,24 +7016,13 @@ static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
struct ufs_vreg *vreg)
{
- if (!vreg)
- return 0;
- else if (vreg->unused)
- return 0;
- else
- return ufshcd_config_vreg_load(hba->dev, vreg,
- UFS_VREG_LPM_LOAD_UA);
+ return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
}
static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
struct ufs_vreg *vreg)
{
- if (!vreg)
- return 0;
- else if (vreg->unused)
- return 0;
- else
- return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
+ return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
}
static int ufshcd_config_vreg(struct device *dev,
@@ -7068,9 +7060,7 @@ static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
{
int ret = 0;
- if (!vreg)
- goto out;
- else if (vreg->enabled || vreg->unused)
+ if (!vreg || vreg->enabled)
goto out;
ret = ufshcd_config_vreg(dev, vreg, true);
@@ -7090,9 +7080,7 @@ static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
{
int ret = 0;
- if (!vreg)
- goto out;
- else if (!vreg->enabled || vreg->unused)
+ if (!vreg || !vreg->enabled)
goto out;
ret = regulator_disable(vreg->reg);
@@ -7198,36 +7186,6 @@ static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
return 0;
}
-static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
-{
- int ret = 0;
- struct ufs_vreg_info *info = &hba->vreg_info;
-
- if (!info)
- goto out;
- else if (!info->vccq)
- goto out;
-
- if (unused) {
- /* shut off the rail here */
- ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
- /*
- * Mark this rail as no longer used, so it doesn't get enabled
- * later by mistake
- */
- if (!ret)
- info->vccq->unused = true;
- } else {
- /*
- * rail should have been already enabled hence just make sure
- * that unused flag is cleared.
- */
- info->vccq->unused = false;
- }
-out:
- return ret;
-}
-
static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
bool skip_ref_clk)
{
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 69ba7445d2b3..ecfa898b9ccc 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -546,7 +546,7 @@ struct ufs_hba {
int nutrs;
int nutmrs;
u32 ufs_version;
- struct ufs_hba_variant_ops *vops;
+ const struct ufs_hba_variant_ops *vops;
void *priv;
unsigned int irq;
bool is_irq_enabled;
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 772b976e4ee4..f8cb7c23305b 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -100,16 +100,8 @@ static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev)
static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
{
- if (!resid)
- return;
-
- if (!scsi_bidi_cmnd(sc)) {
+ if (resid)
scsi_set_resid(sc, resid);
- return;
- }
-
- scsi_in(sc)->resid = min(resid, scsi_in(sc)->length);
- scsi_out(sc)->resid = resid - scsi_in(sc)->resid;
}
/**
@@ -403,9 +395,9 @@ static int virtscsi_add_cmd(struct virtqueue *vq,
if (sc && sc->sc_data_direction != DMA_NONE) {
if (sc->sc_data_direction != DMA_FROM_DEVICE)
- out = &scsi_out(sc)->table;
+ out = &sc->sdb.table;
if (sc->sc_data_direction != DMA_TO_DEVICE)
- in = &scsi_in(sc)->table;
+ in = &sc->sdb.table;
}
/* Request header. */
@@ -594,7 +586,6 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc)
return FAILED;
memset(cmd, 0, sizeof(*cmd));
- cmd->sc = sc;
cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
.type = VIRTIO_SCSI_T_TMF,
.subtype = cpu_to_virtio32(vscsi->vdev,
@@ -653,7 +644,6 @@ static int virtscsi_abort(struct scsi_cmnd *sc)
return FAILED;
memset(cmd, 0, sizeof(*cmd));
- cmd->sc = sc;
cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
.type = VIRTIO_SCSI_T_TMF,
.subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
@@ -803,6 +793,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
/* We need to know how many queues before we allocate. */
num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
+ num_queues = min_t(unsigned int, nr_cpu_ids, num_queues);
num_targets = virtscsi_config_get(vdev, max_target) + 1;
diff --git a/drivers/soc/bcm/bcm2835-power.c b/drivers/soc/bcm/bcm2835-power.c
index 9351349cf0a9..1e0041ec8132 100644
--- a/drivers/soc/bcm/bcm2835-power.c
+++ b/drivers/soc/bcm/bcm2835-power.c
@@ -150,7 +150,12 @@ struct bcm2835_power {
static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
{
- u64 start = ktime_get_ns();
+ u64 start;
+
+ if (!reg)
+ return 0;
+
+ start = ktime_get_ns();
/* Enable the module's async AXI bridges. */
ASB_WRITE(reg, ASB_READ(reg) & ~ASB_REQ_STOP);
@@ -165,7 +170,12 @@ static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg)
{
- u64 start = ktime_get_ns();
+ u64 start;
+
+ if (!reg)
+ return 0;
+
+ start = ktime_get_ns();
/* Enable the module's async AXI bridges. */
ASB_WRITE(reg, ASB_READ(reg) | ASB_REQ_STOP);
@@ -475,7 +485,7 @@ static int bcm2835_power_pd_power_off(struct generic_pm_domain *domain)
}
}
-static void
+static int
bcm2835_init_power_domain(struct bcm2835_power *power,
int pd_xlate_index, const char *name)
{
@@ -483,6 +493,17 @@ bcm2835_init_power_domain(struct bcm2835_power *power,
struct bcm2835_power_domain *dom = &power->domains[pd_xlate_index];
dom->clk = devm_clk_get(dev->parent, name);
+ if (IS_ERR(dom->clk)) {
+ int ret = PTR_ERR(dom->clk);
+
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
+ /* Some domains don't have a clk, so make sure that we
+ * don't deref an error pointer later.
+ */
+ dom->clk = NULL;
+ }
dom->base.name = name;
dom->base.power_on = bcm2835_power_pd_power_on;
@@ -495,6 +516,8 @@ bcm2835_init_power_domain(struct bcm2835_power *power,
pm_genpd_init(&dom->base, NULL, true);
power->pd_xlate.domains[pd_xlate_index] = &dom->base;
+
+ return 0;
}
/** bcm2835_reset_reset - Resets a block that has a reset line in the
@@ -592,7 +615,7 @@ static int bcm2835_power_probe(struct platform_device *pdev)
{ BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM0 },
{ BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM1 },
};
- int ret, i;
+ int ret = 0, i;
u32 id;
power = devm_kzalloc(dev, sizeof(*power), GFP_KERNEL);
@@ -619,8 +642,11 @@ static int bcm2835_power_probe(struct platform_device *pdev)
power->pd_xlate.num_domains = ARRAY_SIZE(power_domain_names);
- for (i = 0; i < ARRAY_SIZE(power_domain_names); i++)
- bcm2835_init_power_domain(power, i, power_domain_names[i]);
+ for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) {
+ ret = bcm2835_init_power_domain(power, i, power_domain_names[i]);
+ if (ret)
+ goto fail;
+ }
for (i = 0; i < ARRAY_SIZE(domain_deps); i++) {
pm_genpd_add_subdomain(&power->domains[domain_deps[i].parent].base,
@@ -634,12 +660,21 @@ static int bcm2835_power_probe(struct platform_device *pdev)
ret = devm_reset_controller_register(dev, &power->reset);
if (ret)
- return ret;
+ goto fail;
of_genpd_add_provider_onecell(dev->parent->of_node, &power->pd_xlate);
dev_info(dev, "Broadcom BCM2835 power domains driver");
return 0;
+
+fail:
+ for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) {
+ struct generic_pm_domain *dom = &power->domains[i].base;
+
+ if (dom->name)
+ pm_genpd_remove(dom);
+ }
+ return ret;
}
static int bcm2835_power_remove(struct platform_device *pdev)
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index f761655e2a36..0fba8f400c59 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -426,6 +426,12 @@ config SPI_MT65XX
say Y or M here.If you are not sure, say N.
SPI drivers for Mediatek MT65XX and MT81XX series ARM SoCs.
+config SPI_MT7621
+ tristate "MediaTek MT7621 SPI Controller"
+ depends on RALINK || COMPILE_TEST
+ help
+ This selects a driver for the MediaTek MT7621 SPI Controller.
+
config SPI_NPCM_PSPI
tristate "Nuvoton NPCM PSPI Controller"
depends on ARCH_NPCM || COMPILE_TEST
@@ -842,9 +848,17 @@ config SPI_XTENSA_XTFPGA
16 bit words in SPI mode 0, automatically asserting CS on transfer
start and deasserting on end.
+config SPI_ZYNQ_QSPI
+ tristate "Xilinx Zynq QSPI controller"
+ depends on ARCH_ZYNQ || COMPILE_TEST
+ help
+ This enables support for the Zynq Quad SPI controller
+ in master mode.
+ This controller only supports SPI memory interface.
+
config SPI_ZYNQMP_GQSPI
tristate "Xilinx ZynqMP GQSPI controller"
- depends on SPI_MASTER && HAS_DMA
+ depends on (SPI_MASTER && HAS_DMA) || COMPILE_TEST
help
Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index d8fc03c9faa2..f2f78d03dc28 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -60,6 +60,7 @@ obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mpc512x-psc.o
obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o
obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o
obj-$(CONFIG_SPI_MT65XX) += spi-mt65xx.o
+obj-$(CONFIG_SPI_MT7621) += spi-mt7621.o
obj-$(CONFIG_SPI_MXIC) += spi-mxic.o
obj-$(CONFIG_SPI_MXS) += spi-mxs.o
obj-$(CONFIG_SPI_NPCM_PSPI) += spi-npcm-pspi.o
@@ -118,6 +119,7 @@ obj-$(CONFIG_SPI_XCOMM) += spi-xcomm.o
obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o
obj-$(CONFIG_SPI_XLP) += spi-xlp.o
obj-$(CONFIG_SPI_XTENSA_XTFPGA) += spi-xtensa-xtfpga.o
+obj-$(CONFIG_SPI_ZYNQ_QSPI) += spi-zynq-qspi.o
obj-$(CONFIG_SPI_ZYNQMP_GQSPI) += spi-zynqmp-gqspi.o
# SPI slave protocol handlers
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index fffc21cd5f79..9f24d5f0b431 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -366,7 +366,7 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
return err;
}
-const char *atmel_qspi_get_name(struct spi_mem *spimem)
+static const char *atmel_qspi_get_name(struct spi_mem *spimem)
{
return dev_name(spimem->spi->dev.parent);
}
@@ -570,7 +570,8 @@ static int atmel_qspi_remove(struct platform_device *pdev)
static int __maybe_unused atmel_qspi_suspend(struct device *dev)
{
- struct atmel_qspi *aq = dev_get_drvdata(dev);
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
clk_disable_unprepare(aq->qspick);
clk_disable_unprepare(aq->pclk);
@@ -580,7 +581,8 @@ static int __maybe_unused atmel_qspi_suspend(struct device *dev)
static int __maybe_unused atmel_qspi_resume(struct device *dev)
{
- struct atmel_qspi *aq = dev_get_drvdata(dev);
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
clk_prepare_enable(aq->pclk);
clk_prepare_enable(aq->qspick);
diff --git a/drivers/spi/spi-at91-usart.c b/drivers/spi/spi-at91-usart.c
index a694d702e574..f763e14bdf12 100644
--- a/drivers/spi/spi-at91-usart.c
+++ b/drivers/spi/spi-at91-usart.c
@@ -178,12 +178,6 @@ static int at91_usart_spi_setup(struct spi_device *spi)
struct at91_usart_spi *aus = spi_master_get_devdata(spi->controller);
u32 *ausd = spi->controller_state;
unsigned int mr = at91_usart_spi_readl(aus, MR);
- u8 bits = spi->bits_per_word;
-
- if (bits != 8) {
- dev_dbg(&spi->dev, "Only 8 bits per word are supported\n");
- return -EINVAL;
- }
if (spi->mode & SPI_CPOL)
mr |= US_MR_CPOL;
@@ -212,7 +206,7 @@ static int at91_usart_spi_setup(struct spi_device *spi)
dev_dbg(&spi->dev,
"setup: bpw %u mode 0x%x -> mr %d %08x\n",
- bits, spi->mode, spi->chip_select, mr);
+ spi->bits_per_word, spi->mode, spi->chip_select, mr);
return 0;
}
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 35aebdfd3b4e..8aa22713c483 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -335,20 +335,6 @@ static int bcm2835_spi_transfer_one_irq(struct spi_master *master,
return 1;
}
-/*
- * DMA support
- *
- * this implementation has currently a few issues in so far as it does
- * not work arrount limitations of the HW.
- *
- * the main one being that DMA transfers are limited to 16 bit
- * (so 0 to 65535 bytes) by the SPI HW due to BCM2835_SPI_DLEN
- *
- * there may be a few more border-cases we may need to address as well
- * but unfortunately this would mean splitting up the scatter-gather
- * list making it slightly unpractical...
- */
-
/**
* bcm2835_spi_transfer_prologue() - transfer first few bytes without DMA
* @master: SPI master
@@ -630,19 +616,6 @@ static bool bcm2835_spi_can_dma(struct spi_master *master,
if (tfr->len < BCM2835_SPI_DMA_MIN_LENGTH)
return false;
- /* BCM2835_SPI_DLEN has defined a max transfer size as
- * 16 bit, so max is 65535
- * we can revisit this by using an alternative transfer
- * method - ideally this would get done without any more
- * interaction...
- */
- if (tfr->len > 65535) {
- dev_warn_once(&spi->dev,
- "transfer size of %d too big for dma-transfer\n",
- tfr->len);
- return false;
- }
-
/* return OK */
return true;
}
@@ -707,7 +680,6 @@ static void bcm2835_dma_init(struct spi_master *master, struct device *dev)
/* all went well, so set can_dma */
master->can_dma = bcm2835_spi_can_dma;
- master->max_dma_len = 65535; /* limitation by BCM2835_SPI_DLEN */
/* need to do TX AND RX DMA, so we need dummy buffers */
master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
@@ -844,6 +816,17 @@ static int bcm2835_spi_prepare_message(struct spi_master *master,
struct spi_device *spi = msg->spi;
struct bcm2835_spi *bs = spi_master_get_devdata(master);
u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
+ int ret;
+
+ /*
+ * DMA transfers are limited to 16 bit (0 to 65535 bytes) by the SPI HW
+ * due to DLEN. Split up transfers (32-bit FIFO aligned) if the limit is
+ * exceeded.
+ */
+ ret = spi_split_transfers_maxsize(master, msg, 65532,
+ GFP_KERNEL | GFP_DMA);
+ if (ret)
+ return ret;
cs &= ~(BCM2835_SPI_CS_CPOL | BCM2835_SPI_CS_CPHA);
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index f7e054848ca5..bbf87adb3ff8 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -21,6 +21,7 @@
#include <linux/clk.h>
#include <linux/completion.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -36,6 +37,12 @@
#include <linux/spi/spi.h>
#include <linux/spinlock.h>
+/* define polling limits */
+static unsigned int polling_limit_us = 30;
+module_param(polling_limit_us, uint, 0664);
+MODULE_PARM_DESC(polling_limit_us,
+ "time in us to run a transfer in polling mode - if zero no polling is used\n");
+
/*
* spi register defines
*
@@ -88,10 +95,6 @@
#define BCM2835_AUX_SPI_STAT_BUSY 0x00000040
#define BCM2835_AUX_SPI_STAT_BITCOUNT 0x0000003F
-/* timeout values */
-#define BCM2835_AUX_SPI_POLLING_LIMIT_US 30
-#define BCM2835_AUX_SPI_POLLING_JIFFIES 2
-
struct bcm2835aux_spi {
void __iomem *regs;
struct clk *clk;
@@ -102,8 +105,53 @@ struct bcm2835aux_spi {
int tx_len;
int rx_len;
int pending;
+
+ u64 count_transfer_polling;
+ u64 count_transfer_irq;
+ u64 count_transfer_irq_after_poll;
+
+ struct dentry *debugfs_dir;
};
+#if defined(CONFIG_DEBUG_FS)
+static void bcm2835aux_debugfs_create(struct bcm2835aux_spi *bs,
+ const char *dname)
+{
+ char name[64];
+ struct dentry *dir;
+
+ /* get full name */
+ snprintf(name, sizeof(name), "spi-bcm2835aux-%s", dname);
+
+ /* the base directory */
+ dir = debugfs_create_dir(name, NULL);
+ bs->debugfs_dir = dir;
+
+ /* the counters */
+ debugfs_create_u64("count_transfer_polling", 0444, dir,
+ &bs->count_transfer_polling);
+ debugfs_create_u64("count_transfer_irq", 0444, dir,
+ &bs->count_transfer_irq);
+ debugfs_create_u64("count_transfer_irq_after_poll", 0444, dir,
+ &bs->count_transfer_irq_after_poll);
+}
+
+static void bcm2835aux_debugfs_remove(struct bcm2835aux_spi *bs)
+{
+ debugfs_remove_recursive(bs->debugfs_dir);
+ bs->debugfs_dir = NULL;
+}
+#else
+static void bcm2835aux_debugfs_create(struct bcm2835aux_spi *bs,
+ const char *dname)
+{
+}
+
+static void bcm2835aux_debugfs_remove(struct bcm2835aux_spi *bs)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
static inline u32 bcm2835aux_rd(struct bcm2835aux_spi *bs, unsigned reg)
{
return readl(bs->regs + reg);
@@ -123,9 +171,6 @@ static inline void bcm2835aux_rd_fifo(struct bcm2835aux_spi *bs)
data = bcm2835aux_rd(bs, BCM2835_AUX_SPI_IO);
if (bs->rx_buf) {
switch (count) {
- case 4:
- *bs->rx_buf++ = (data >> 24) & 0xff;
- /* fallthrough */
case 3:
*bs->rx_buf++ = (data >> 16) & 0xff;
/* fallthrough */
@@ -178,24 +223,14 @@ static void bcm2835aux_spi_reset_hw(struct bcm2835aux_spi *bs)
BCM2835_AUX_SPI_CNTL0_CLEARFIFO);
}
-static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
+static void bcm2835aux_spi_transfer_helper(struct bcm2835aux_spi *bs)
{
- struct spi_master *master = dev_id;
- struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
- irqreturn_t ret = IRQ_NONE;
-
- /* IRQ may be shared, so return if our interrupts are disabled */
- if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) &
- (BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE)))
- return ret;
+ u32 stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT);
/* check if we have data to read */
- while (bs->rx_len &&
- (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
- BCM2835_AUX_SPI_STAT_RX_EMPTY))) {
+ for (; bs->rx_len && (stat & BCM2835_AUX_SPI_STAT_RX_LVL);
+ stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT))
bcm2835aux_rd_fifo(bs);
- ret = IRQ_HANDLED;
- }
/* check if we have data to write */
while (bs->tx_len &&
@@ -203,16 +238,21 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
(!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
BCM2835_AUX_SPI_STAT_TX_FULL))) {
bcm2835aux_wr_fifo(bs);
- ret = IRQ_HANDLED;
}
+}
- /* and check if we have reached "done" */
- while (bs->rx_len &&
- (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
- BCM2835_AUX_SPI_STAT_BUSY))) {
- bcm2835aux_rd_fifo(bs);
- ret = IRQ_HANDLED;
- }
+static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+
+ /* IRQ may be shared, so return if our interrupts are disabled */
+ if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) &
+ (BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE)))
+ return IRQ_NONE;
+
+ /* do common fifo handling */
+ bcm2835aux_spi_transfer_helper(bs);
if (!bs->tx_len) {
/* disable tx fifo empty interrupt */
@@ -226,8 +266,7 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
complete(&master->xfer_completion);
}
- /* and return */
- return ret;
+ return IRQ_HANDLED;
}
static int __bcm2835aux_spi_transfer_one_irq(struct spi_master *master,
@@ -251,6 +290,9 @@ static int bcm2835aux_spi_transfer_one_irq(struct spi_master *master,
{
struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+ /* update statistics */
+ bs->count_transfer_irq++;
+
/* fill in registers and fifos before enabling interrupts */
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL0, bs->cntl[0]);
@@ -273,35 +315,22 @@ static int bcm2835aux_spi_transfer_one_poll(struct spi_master *master,
{
struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
unsigned long timeout;
- u32 stat;
+
+ /* update statistics */
+ bs->count_transfer_polling++;
/* configure spi */
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL0, bs->cntl[0]);
- /* set the timeout */
- timeout = jiffies + BCM2835_AUX_SPI_POLLING_JIFFIES;
+ /* set the timeout to at least 2 jiffies */
+ timeout = jiffies + 2 + HZ * polling_limit_us / 1000000;
/* loop until finished the transfer */
while (bs->rx_len) {
- /* read status */
- stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT);
- /* fill in tx fifo with remaining data */
- if ((bs->tx_len) && (!(stat & BCM2835_AUX_SPI_STAT_TX_FULL))) {
- bcm2835aux_wr_fifo(bs);
- continue;
- }
-
- /* read data from fifo for both cases */
- if (!(stat & BCM2835_AUX_SPI_STAT_RX_EMPTY)) {
- bcm2835aux_rd_fifo(bs);
- continue;
- }
- if (!(stat & BCM2835_AUX_SPI_STAT_BUSY)) {
- bcm2835aux_rd_fifo(bs);
- continue;
- }
+ /* do common fifo handling */
+ bcm2835aux_spi_transfer_helper(bs);
/* there is still data pending to read check the timeout */
if (bs->rx_len && time_after(jiffies, timeout)) {
@@ -310,6 +339,7 @@ static int bcm2835aux_spi_transfer_one_poll(struct spi_master *master,
jiffies - timeout,
bs->tx_len, bs->rx_len);
/* forward to interrupt handler */
+ bs->count_transfer_irq_after_poll++;
return __bcm2835aux_spi_transfer_one_irq(master,
spi, tfr);
}
@@ -324,8 +354,8 @@ static int bcm2835aux_spi_transfer_one(struct spi_master *master,
struct spi_transfer *tfr)
{
struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
- unsigned long spi_hz, clk_hz, speed;
- unsigned long spi_used_hz;
+ unsigned long spi_hz, clk_hz, speed, spi_used_hz;
+ unsigned long hz_per_byte, byte_limit;
/* calculate the registers to handle
*
@@ -369,14 +399,15 @@ static int bcm2835aux_spi_transfer_one(struct spi_master *master,
* of Hz per byte per polling limit. E.g., we can transfer 1 byte in
* 30 µs per 300,000 Hz of bus clock.
*/
-#define HZ_PER_BYTE ((9 * 1000000) / BCM2835_AUX_SPI_POLLING_LIMIT_US)
+ hz_per_byte = polling_limit_us ? (9 * 1000000) / polling_limit_us : 0;
+ byte_limit = hz_per_byte ? spi_used_hz / hz_per_byte : 1;
+
/* run in polling mode for short transfers */
- if (tfr->len < spi_used_hz / HZ_PER_BYTE)
+ if (tfr->len < byte_limit)
return bcm2835aux_spi_transfer_one_poll(master, spi, tfr);
/* run in interrupt mode for all others */
return bcm2835aux_spi_transfer_one_irq(master, spi, tfr);
-#undef HZ_PER_BYTE
}
static int bcm2835aux_spi_prepare_message(struct spi_master *master,
@@ -421,6 +452,50 @@ static void bcm2835aux_spi_handle_err(struct spi_master *master,
bcm2835aux_spi_reset_hw(bs);
}
+static int bcm2835aux_spi_setup(struct spi_device *spi)
+{
+ int ret;
+
+ /* sanity check for native cs */
+ if (spi->mode & SPI_NO_CS)
+ return 0;
+ if (gpio_is_valid(spi->cs_gpio)) {
+ /* with gpio-cs set the GPIO to the correct level
+ * and as output (in case the dt has the gpio not configured
+ * as output but native cs)
+ */
+ ret = gpio_direction_output(spi->cs_gpio,
+ (spi->mode & SPI_CS_HIGH) ? 0 : 1);
+ if (ret)
+ dev_err(&spi->dev,
+ "could not set gpio %i as output: %i\n",
+ spi->cs_gpio, ret);
+
+ return ret;
+ }
+
+ /* for dt-backwards compatibility: only support native on CS0
+ * known things not supported with broken native CS:
+ * * multiple chip-selects: cs0-cs2 are all
+ * simultaniously asserted whenever there is a transfer
+ * this even includes SPI_NO_CS
+ * * SPI_CS_HIGH: cs are always asserted low
+ * * cs_change: cs is deasserted after each spi_transfer
+ * * cs_delay_usec: cs is always deasserted one SCK cycle
+ * after the last transfer
+ * probably more...
+ */
+ dev_warn(&spi->dev,
+ "Native CS is not supported - please configure cs-gpio in device-tree\n");
+
+ if (spi->chip_select == 0)
+ return 0;
+
+ dev_warn(&spi->dev, "Native CS is not working for cs > 0\n");
+
+ return -EINVAL;
+}
+
static int bcm2835aux_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
@@ -438,7 +513,19 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
master->mode_bits = (SPI_CPOL | SPI_CS_HIGH | SPI_NO_CS);
master->bits_per_word_mask = SPI_BPW_MASK(8);
- master->num_chipselect = -1;
+ /* even though the driver never officially supported native CS
+ * allow a single native CS for legacy DT support purposes when
+ * no cs-gpio is configured.
+ * Known limitations for native cs are:
+ * * multiple chip-selects: cs0-cs2 are all simultaniously asserted
+ * whenever there is a transfer - this even includes SPI_NO_CS
+ * * SPI_CS_HIGH: is ignores - cs are always asserted low
+ * * cs_change: cs is deasserted after each spi_transfer
+ * * cs_delay_usec: cs is always deasserted one SCK cycle after
+ * a spi_transfer
+ */
+ master->num_chipselect = 1;
+ master->setup = bcm2835aux_spi_setup;
master->transfer_one = bcm2835aux_spi_transfer_one;
master->handle_err = bcm2835aux_spi_handle_err;
master->prepare_message = bcm2835aux_spi_prepare_message;
@@ -502,6 +589,8 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
goto out_clk_disable;
}
+ bcm2835aux_debugfs_create(bs, dev_name(&pdev->dev));
+
return 0;
out_clk_disable:
@@ -516,6 +605,8 @@ static int bcm2835aux_spi_remove(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+ bcm2835aux_debugfs_remove(bs);
+
bcm2835aux_spi_reset_hw(bs);
/* disable the HW block by releasing the clock */
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index dd9a8c54a693..4243e53f9f7b 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -335,6 +335,42 @@ static void spi_bitbang_set_cs(struct spi_device *spi, bool enable)
/*----------------------------------------------------------------------*/
+int spi_bitbang_init(struct spi_bitbang *bitbang)
+{
+ struct spi_master *master = bitbang->master;
+
+ if (!master || !bitbang->chipselect)
+ return -EINVAL;
+
+ mutex_init(&bitbang->lock);
+
+ if (!master->mode_bits)
+ master->mode_bits = SPI_CPOL | SPI_CPHA | bitbang->flags;
+
+ if (master->transfer || master->transfer_one_message)
+ return -EINVAL;
+
+ master->prepare_transfer_hardware = spi_bitbang_prepare_hardware;
+ master->unprepare_transfer_hardware = spi_bitbang_unprepare_hardware;
+ master->transfer_one = spi_bitbang_transfer_one;
+ master->set_cs = spi_bitbang_set_cs;
+
+ if (!bitbang->txrx_bufs) {
+ bitbang->use_dma = 0;
+ bitbang->txrx_bufs = spi_bitbang_bufs;
+ if (!master->setup) {
+ if (!bitbang->setup_transfer)
+ bitbang->setup_transfer =
+ spi_bitbang_setup_transfer;
+ master->setup = spi_bitbang_setup;
+ master->cleanup = spi_bitbang_cleanup;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_bitbang_init);
+
/**
* spi_bitbang_start - start up a polled/bitbanging SPI master driver
* @bitbang: driver handle
@@ -368,33 +404,9 @@ int spi_bitbang_start(struct spi_bitbang *bitbang)
struct spi_master *master = bitbang->master;
int ret;
- if (!master || !bitbang->chipselect)
- return -EINVAL;
-
- mutex_init(&bitbang->lock);
-
- if (!master->mode_bits)
- master->mode_bits = SPI_CPOL | SPI_CPHA | bitbang->flags;
-
- if (master->transfer || master->transfer_one_message)
- return -EINVAL;
-
- master->prepare_transfer_hardware = spi_bitbang_prepare_hardware;
- master->unprepare_transfer_hardware = spi_bitbang_unprepare_hardware;
- master->transfer_one = spi_bitbang_transfer_one;
- master->set_cs = spi_bitbang_set_cs;
-
- if (!bitbang->txrx_bufs) {
- bitbang->use_dma = 0;
- bitbang->txrx_bufs = spi_bitbang_bufs;
- if (!master->setup) {
- if (!bitbang->setup_transfer)
- bitbang->setup_transfer =
- spi_bitbang_setup_transfer;
- master->setup = spi_bitbang_setup;
- master->cleanup = spi_bitbang_cleanup;
- }
- }
+ ret = spi_bitbang_init(bitbang);
+ if (ret)
+ return ret;
/* driver may get busy before register() returns, especially
* if someone registered boardinfo for devices
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index 4bd59a93d988..de952b17bc10 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -30,6 +30,7 @@
struct dw_spi_mmio {
struct dw_spi dws;
struct clk *clk;
+ struct clk *pclk;
void *priv;
};
@@ -172,6 +173,14 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
if (ret)
return ret;
+ /* Optional clock needed to access the registers */
+ dwsmmio->pclk = devm_clk_get_optional(&pdev->dev, "pclk");
+ if (IS_ERR(dwsmmio->pclk))
+ return PTR_ERR(dwsmmio->pclk);
+ ret = clk_prepare_enable(dwsmmio->pclk);
+ if (ret)
+ goto out_clk;
+
dws->bus_num = pdev->id;
dws->max_freq = clk_get_rate(dwsmmio->clk);
@@ -199,6 +208,8 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
return 0;
out:
+ clk_disable_unprepare(dwsmmio->pclk);
+out_clk:
clk_disable_unprepare(dwsmmio->clk);
return ret;
}
@@ -208,6 +219,7 @@ static int dw_spi_mmio_remove(struct platform_device *pdev)
struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev);
dw_spi_remove_host(&dwsmmio->dws);
+ clk_disable_unprepare(dwsmmio->pclk);
clk_disable_unprepare(dwsmmio->clk);
return 0;
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index 79fc3940245a..81889389280b 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -28,7 +28,6 @@
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/scatterlist.h>
-#include <linux/gpio.h>
#include <linux/spi/spi.h>
#include <linux/platform_data/dma-ep93xx.h>
@@ -652,7 +651,6 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
struct resource *res;
int irq;
int error;
- int i;
info = dev_get_platdata(&pdev->dev);
if (!info) {
@@ -676,6 +674,7 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
if (!master)
return -ENOMEM;
+ master->use_gpio_descriptors = true;
master->prepare_transfer_hardware = ep93xx_spi_prepare_hardware;
master->unprepare_transfer_hardware = ep93xx_spi_unprepare_hardware;
master->prepare_message = ep93xx_spi_prepare_message;
@@ -683,31 +682,11 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
master->bus_num = pdev->id;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
-
- master->num_chipselect = info->num_chipselect;
- master->cs_gpios = devm_kcalloc(&master->dev,
- master->num_chipselect, sizeof(int),
- GFP_KERNEL);
- if (!master->cs_gpios) {
- error = -ENOMEM;
- goto fail_release_master;
- }
-
- for (i = 0; i < master->num_chipselect; i++) {
- master->cs_gpios[i] = info->chipselect[i];
-
- if (!gpio_is_valid(master->cs_gpios[i]))
- continue;
-
- error = devm_gpio_request_one(&pdev->dev, master->cs_gpios[i],
- GPIOF_OUT_INIT_HIGH,
- "ep93xx-spi");
- if (error) {
- dev_err(&pdev->dev, "could not request cs gpio %d\n",
- master->cs_gpios[i]);
- goto fail_release_master;
- }
- }
+ /*
+ * The SPI core will count the number of GPIO descriptors to figure
+ * out the number of chip selects available on the platform.
+ */
+ master->num_chipselect = 0;
platform_set_drvdata(pdev, master);
diff --git a/drivers/spi/spi-fsl-lib.h b/drivers/spi/spi-fsl-lib.h
index f303f306b38e..483734bc1b1e 100644
--- a/drivers/spi/spi-fsl-lib.h
+++ b/drivers/spi/spi-fsl-lib.h
@@ -95,8 +95,10 @@ static inline u32 mpc8xxx_spi_read_reg(__be32 __iomem *reg)
struct mpc8xxx_spi_probe_info {
struct fsl_spi_platform_data pdata;
+ int ngpios;
int *gpios;
bool *alow_flags;
+ __be32 __iomem *immr_spi_cs;
};
extern u32 mpc8xxx_spi_tx_buf_u8(struct mpc8xxx_spi *mpc8xxx_spi);
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 391863914043..d08e9324140e 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -8,7 +8,10 @@
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
+#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
@@ -16,7 +19,12 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
+#include <linux/platform_data/dma-imx.h>
+#include <linux/platform_data/spi-imx.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
@@ -24,6 +32,11 @@
#define DRIVER_NAME "fsl_lpspi"
+#define FSL_LPSPI_RPM_TIMEOUT 50 /* 50ms */
+
+/* The maximum bytes that edma can transfer once.*/
+#define FSL_LPSPI_MAX_EDMA_BYTES ((1 << 15) - 1)
+
/* i.MX7ULP LPSPI registers */
#define IMX7ULP_VERID 0x0
#define IMX7ULP_PARAM 0x4
@@ -57,12 +70,14 @@
#define IER_FCIE BIT(9)
#define IER_RDIE BIT(1)
#define IER_TDIE BIT(0)
+#define DER_RDDE BIT(1)
+#define DER_TDDE BIT(0)
#define CFGR1_PCSCFG BIT(27)
#define CFGR1_PINCFG (BIT(24)|BIT(25))
#define CFGR1_PCSPOL BIT(8)
#define CFGR1_NOSTALL BIT(3)
#define CFGR1_MASTER BIT(0)
-#define FSR_RXCOUNT (BIT(16)|BIT(17)|BIT(18))
+#define FSR_TXCOUNT (0xFF)
#define RSR_RXEMPTY BIT(1)
#define TCR_CPOL BIT(31)
#define TCR_CPHA BIT(30)
@@ -84,8 +99,11 @@ struct lpspi_config {
struct fsl_lpspi_data {
struct device *dev;
void __iomem *base;
- struct clk *clk;
+ unsigned long base_phys;
+ struct clk *clk_ipg;
+ struct clk *clk_per;
bool is_slave;
+ bool is_first_byte;
void *rx_buf;
const void *tx_buf;
@@ -101,6 +119,13 @@ struct fsl_lpspi_data {
struct completion xfer_done;
bool slave_aborted;
+
+ /* DMA */
+ bool usedma;
+ struct completion dma_rx_completion;
+ struct completion dma_tx_completion;
+
+ int chipselect[0];
};
static const struct of_device_id fsl_lpspi_dt_ids[] = {
@@ -147,12 +172,48 @@ static void fsl_lpspi_intctrl(struct fsl_lpspi_data *fsl_lpspi,
writel(enable, fsl_lpspi->base + IMX7ULP_IER);
}
+static int fsl_lpspi_bytes_per_word(const int bpw)
+{
+ return DIV_ROUND_UP(bpw, BITS_PER_BYTE);
+}
+
+static bool fsl_lpspi_can_dma(struct spi_controller *controller,
+ struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ unsigned int bytes_per_word;
+
+ if (!controller->dma_rx)
+ return false;
+
+ bytes_per_word = fsl_lpspi_bytes_per_word(transfer->bits_per_word);
+
+ switch (bytes_per_word)
+ {
+ case 1:
+ case 2:
+ case 4:
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
static int lpspi_prepare_xfer_hardware(struct spi_controller *controller)
{
struct fsl_lpspi_data *fsl_lpspi =
spi_controller_get_devdata(controller);
+ int ret;
+
+ ret = pm_runtime_get_sync(fsl_lpspi->dev);
+ if (ret < 0) {
+ dev_err(fsl_lpspi->dev, "failed to enable clock\n");
+ return ret;
+ }
- return clk_prepare_enable(fsl_lpspi->clk);
+ return 0;
}
static int lpspi_unprepare_xfer_hardware(struct spi_controller *controller)
@@ -160,7 +221,22 @@ static int lpspi_unprepare_xfer_hardware(struct spi_controller *controller)
struct fsl_lpspi_data *fsl_lpspi =
spi_controller_get_devdata(controller);
- clk_disable_unprepare(fsl_lpspi->clk);
+ pm_runtime_mark_last_busy(fsl_lpspi->dev);
+ pm_runtime_put_autosuspend(fsl_lpspi->dev);
+
+ return 0;
+}
+
+static int fsl_lpspi_prepare_message(struct spi_controller *controller,
+ struct spi_message *msg)
+{
+ struct fsl_lpspi_data *fsl_lpspi =
+ spi_controller_get_devdata(controller);
+ struct spi_device *spi = msg->spi;
+ int gpio = fsl_lpspi->chipselect[spi->chip_select];
+
+ if (gpio_is_valid(gpio))
+ gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1);
return 0;
}
@@ -197,8 +273,7 @@ static void fsl_lpspi_read_rx_fifo(struct fsl_lpspi_data *fsl_lpspi)
fsl_lpspi->rx(fsl_lpspi);
}
-static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi,
- bool is_first_xfer)
+static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi)
{
u32 temp = 0;
@@ -213,11 +288,13 @@ static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi,
* For the first transfer, clear TCR_CONTC to assert SS.
* For subsequent transfer, set TCR_CONTC to keep SS asserted.
*/
- temp |= TCR_CONT;
- if (is_first_xfer)
- temp &= ~TCR_CONTC;
- else
- temp |= TCR_CONTC;
+ if (!fsl_lpspi->usedma) {
+ temp |= TCR_CONT;
+ if (fsl_lpspi->is_first_byte)
+ temp &= ~TCR_CONTC;
+ else
+ temp |= TCR_CONTC;
+ }
}
writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
@@ -228,7 +305,11 @@ static void fsl_lpspi_set_watermark(struct fsl_lpspi_data *fsl_lpspi)
{
u32 temp;
- temp = fsl_lpspi->watermark >> 1 | (fsl_lpspi->watermark >> 1) << 16;
+ if (!fsl_lpspi->usedma)
+ temp = fsl_lpspi->watermark >> 1 |
+ (fsl_lpspi->watermark >> 1) << 16;
+ else
+ temp = fsl_lpspi->watermark >> 1;
writel(temp, fsl_lpspi->base + IMX7ULP_FCR);
@@ -241,7 +322,14 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
unsigned int perclk_rate, scldiv;
u8 prescale;
- perclk_rate = clk_get_rate(fsl_lpspi->clk);
+ perclk_rate = clk_get_rate(fsl_lpspi->clk_per);
+
+ if (config.speed_hz > perclk_rate / 2) {
+ dev_err(fsl_lpspi->dev,
+ "per-clk should be at least two times of transfer speed");
+ return -EINVAL;
+ }
+
for (prescale = 0; prescale < 8; prescale++) {
scldiv = perclk_rate /
(clkdivs[prescale] * config.speed_hz) - 2;
@@ -257,12 +345,59 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
writel(scldiv | (scldiv << 8) | ((scldiv >> 1) << 16),
fsl_lpspi->base + IMX7ULP_CCR);
- dev_dbg(fsl_lpspi->dev, "perclk=%d, speed=%d, prescale =%d, scldiv=%d\n",
+ dev_dbg(fsl_lpspi->dev, "perclk=%d, speed=%d, prescale=%d, scldiv=%d\n",
perclk_rate, config.speed_hz, prescale, scldiv);
return 0;
}
+static int fsl_lpspi_dma_configure(struct spi_controller *controller)
+{
+ int ret;
+ enum dma_slave_buswidth buswidth;
+ struct dma_slave_config rx = {}, tx = {};
+ struct fsl_lpspi_data *fsl_lpspi =
+ spi_controller_get_devdata(controller);
+
+ switch (fsl_lpspi_bytes_per_word(fsl_lpspi->config.bpw)) {
+ case 4:
+ buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ break;
+ case 2:
+ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ break;
+ case 1:
+ buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ tx.direction = DMA_MEM_TO_DEV;
+ tx.dst_addr = fsl_lpspi->base_phys + IMX7ULP_TDR;
+ tx.dst_addr_width = buswidth;
+ tx.dst_maxburst = 1;
+ ret = dmaengine_slave_config(controller->dma_tx, &tx);
+ if (ret) {
+ dev_err(fsl_lpspi->dev, "TX dma configuration failed with %d\n",
+ ret);
+ return ret;
+ }
+
+ rx.direction = DMA_DEV_TO_MEM;
+ rx.src_addr = fsl_lpspi->base_phys + IMX7ULP_RDR;
+ rx.src_addr_width = buswidth;
+ rx.src_maxburst = 1;
+ ret = dmaengine_slave_config(controller->dma_rx, &rx);
+ if (ret) {
+ dev_err(fsl_lpspi->dev, "RX dma configuration failed with %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi)
{
u32 temp;
@@ -288,18 +423,27 @@ static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi)
temp |= CR_RRF | CR_RTF | CR_MEN;
writel(temp, fsl_lpspi->base + IMX7ULP_CR);
+ temp = 0;
+ if (fsl_lpspi->usedma)
+ temp = DER_TDDE | DER_RDDE;
+ writel(temp, fsl_lpspi->base + IMX7ULP_DER);
+
return 0;
}
-static void fsl_lpspi_setup_transfer(struct spi_device *spi,
+static int fsl_lpspi_setup_transfer(struct spi_controller *controller,
+ struct spi_device *spi,
struct spi_transfer *t)
{
struct fsl_lpspi_data *fsl_lpspi =
spi_controller_get_devdata(spi->controller);
+ if (t == NULL)
+ return -EINVAL;
+
fsl_lpspi->config.mode = spi->mode;
- fsl_lpspi->config.bpw = t ? t->bits_per_word : spi->bits_per_word;
- fsl_lpspi->config.speed_hz = t ? t->speed_hz : spi->max_speed_hz;
+ fsl_lpspi->config.bpw = t->bits_per_word;
+ fsl_lpspi->config.speed_hz = t->speed_hz;
fsl_lpspi->config.chip_select = spi->chip_select;
if (!fsl_lpspi->config.speed_hz)
@@ -324,7 +468,12 @@ static void fsl_lpspi_setup_transfer(struct spi_device *spi,
else
fsl_lpspi->watermark = fsl_lpspi->txfifosize;
- fsl_lpspi_config(fsl_lpspi);
+ if (fsl_lpspi_can_dma(controller, spi, t))
+ fsl_lpspi->usedma = 1;
+ else
+ fsl_lpspi->usedma = 0;
+
+ return fsl_lpspi_config(fsl_lpspi);
}
static int fsl_lpspi_slave_abort(struct spi_controller *controller)
@@ -333,7 +482,13 @@ static int fsl_lpspi_slave_abort(struct spi_controller *controller)
spi_controller_get_devdata(controller);
fsl_lpspi->slave_aborted = true;
- complete(&fsl_lpspi->xfer_done);
+ if (!fsl_lpspi->usedma)
+ complete(&fsl_lpspi->xfer_done);
+ else {
+ complete(&fsl_lpspi->dma_tx_completion);
+ complete(&fsl_lpspi->dma_rx_completion);
+ }
+
return 0;
}
@@ -362,8 +517,10 @@ static int fsl_lpspi_reset(struct fsl_lpspi_data *fsl_lpspi)
{
u32 temp;
- /* Disable all interrupt */
- fsl_lpspi_intctrl(fsl_lpspi, 0);
+ if (!fsl_lpspi->usedma) {
+ /* Disable all interrupt */
+ fsl_lpspi_intctrl(fsl_lpspi, 0);
+ }
/* W1C for all flags in SR */
temp = 0x3F << 8;
@@ -376,8 +533,177 @@ static int fsl_lpspi_reset(struct fsl_lpspi_data *fsl_lpspi)
return 0;
}
-static int fsl_lpspi_transfer_one(struct spi_controller *controller,
- struct spi_device *spi,
+static void fsl_lpspi_dma_rx_callback(void *cookie)
+{
+ struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie;
+
+ complete(&fsl_lpspi->dma_rx_completion);
+}
+
+static void fsl_lpspi_dma_tx_callback(void *cookie)
+{
+ struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie;
+
+ complete(&fsl_lpspi->dma_tx_completion);
+}
+
+static int fsl_lpspi_calculate_timeout(struct fsl_lpspi_data *fsl_lpspi,
+ int size)
+{
+ unsigned long timeout = 0;
+
+ /* Time with actual data transfer and CS change delay related to HW */
+ timeout = (8 + 4) * size / fsl_lpspi->config.speed_hz;
+
+ /* Add extra second for scheduler related activities */
+ timeout += 1;
+
+ /* Double calculated timeout */
+ return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
+}
+
+static int fsl_lpspi_dma_transfer(struct spi_controller *controller,
+ struct fsl_lpspi_data *fsl_lpspi,
+ struct spi_transfer *transfer)
+{
+ struct dma_async_tx_descriptor *desc_tx, *desc_rx;
+ unsigned long transfer_timeout;
+ unsigned long timeout;
+ struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
+ int ret;
+
+ ret = fsl_lpspi_dma_configure(controller);
+ if (ret)
+ return ret;
+
+ desc_rx = dmaengine_prep_slave_sg(controller->dma_rx,
+ rx->sgl, rx->nents, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_rx)
+ return -EINVAL;
+
+ desc_rx->callback = fsl_lpspi_dma_rx_callback;
+ desc_rx->callback_param = (void *)fsl_lpspi;
+ dmaengine_submit(desc_rx);
+ reinit_completion(&fsl_lpspi->dma_rx_completion);
+ dma_async_issue_pending(controller->dma_rx);
+
+ desc_tx = dmaengine_prep_slave_sg(controller->dma_tx,
+ tx->sgl, tx->nents, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_tx) {
+ dmaengine_terminate_all(controller->dma_tx);
+ return -EINVAL;
+ }
+
+ desc_tx->callback = fsl_lpspi_dma_tx_callback;
+ desc_tx->callback_param = (void *)fsl_lpspi;
+ dmaengine_submit(desc_tx);
+ reinit_completion(&fsl_lpspi->dma_tx_completion);
+ dma_async_issue_pending(controller->dma_tx);
+
+ fsl_lpspi->slave_aborted = false;
+
+ if (!fsl_lpspi->is_slave) {
+ transfer_timeout = fsl_lpspi_calculate_timeout(fsl_lpspi,
+ transfer->len);
+
+ /* Wait eDMA to finish the data transfer.*/
+ timeout = wait_for_completion_timeout(&fsl_lpspi->dma_tx_completion,
+ transfer_timeout);
+ if (!timeout) {
+ dev_err(fsl_lpspi->dev, "I/O Error in DMA TX\n");
+ dmaengine_terminate_all(controller->dma_tx);
+ dmaengine_terminate_all(controller->dma_rx);
+ fsl_lpspi_reset(fsl_lpspi);
+ return -ETIMEDOUT;
+ }
+
+ timeout = wait_for_completion_timeout(&fsl_lpspi->dma_rx_completion,
+ transfer_timeout);
+ if (!timeout) {
+ dev_err(fsl_lpspi->dev, "I/O Error in DMA RX\n");
+ dmaengine_terminate_all(controller->dma_tx);
+ dmaengine_terminate_all(controller->dma_rx);
+ fsl_lpspi_reset(fsl_lpspi);
+ return -ETIMEDOUT;
+ }
+ } else {
+ if (wait_for_completion_interruptible(&fsl_lpspi->dma_tx_completion) ||
+ fsl_lpspi->slave_aborted) {
+ dev_dbg(fsl_lpspi->dev,
+ "I/O Error in DMA TX interrupted\n");
+ dmaengine_terminate_all(controller->dma_tx);
+ dmaengine_terminate_all(controller->dma_rx);
+ fsl_lpspi_reset(fsl_lpspi);
+ return -EINTR;
+ }
+
+ if (wait_for_completion_interruptible(&fsl_lpspi->dma_rx_completion) ||
+ fsl_lpspi->slave_aborted) {
+ dev_dbg(fsl_lpspi->dev,
+ "I/O Error in DMA RX interrupted\n");
+ dmaengine_terminate_all(controller->dma_tx);
+ dmaengine_terminate_all(controller->dma_rx);
+ fsl_lpspi_reset(fsl_lpspi);
+ return -EINTR;
+ }
+ }
+
+ fsl_lpspi_reset(fsl_lpspi);
+
+ return 0;
+}
+
+static void fsl_lpspi_dma_exit(struct spi_controller *controller)
+{
+ if (controller->dma_rx) {
+ dma_release_channel(controller->dma_rx);
+ controller->dma_rx = NULL;
+ }
+
+ if (controller->dma_tx) {
+ dma_release_channel(controller->dma_tx);
+ controller->dma_tx = NULL;
+ }
+}
+
+static int fsl_lpspi_dma_init(struct device *dev,
+ struct fsl_lpspi_data *fsl_lpspi,
+ struct spi_controller *controller)
+{
+ int ret;
+
+ /* Prepare for TX DMA: */
+ controller->dma_tx = dma_request_slave_channel_reason(dev, "tx");
+ if (IS_ERR(controller->dma_tx)) {
+ ret = PTR_ERR(controller->dma_tx);
+ dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
+ controller->dma_tx = NULL;
+ goto err;
+ }
+
+ /* Prepare for RX DMA: */
+ controller->dma_rx = dma_request_slave_channel_reason(dev, "rx");
+ if (IS_ERR(controller->dma_rx)) {
+ ret = PTR_ERR(controller->dma_rx);
+ dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
+ controller->dma_rx = NULL;
+ goto err;
+ }
+
+ init_completion(&fsl_lpspi->dma_rx_completion);
+ init_completion(&fsl_lpspi->dma_tx_completion);
+ controller->can_dma = fsl_lpspi_can_dma;
+ controller->max_dma_len = FSL_LPSPI_MAX_EDMA_BYTES;
+
+ return 0;
+err:
+ fsl_lpspi_dma_exit(controller);
+ return ret;
+}
+
+static int fsl_lpspi_pio_transfer(struct spi_controller *controller,
struct spi_transfer *t)
{
struct fsl_lpspi_data *fsl_lpspi =
@@ -402,37 +728,30 @@ static int fsl_lpspi_transfer_one(struct spi_controller *controller,
return 0;
}
-static int fsl_lpspi_transfer_one_msg(struct spi_controller *controller,
- struct spi_message *msg)
+static int fsl_lpspi_transfer_one(struct spi_controller *controller,
+ struct spi_device *spi,
+ struct spi_transfer *t)
{
struct fsl_lpspi_data *fsl_lpspi =
- spi_controller_get_devdata(controller);
- struct spi_device *spi = msg->spi;
- struct spi_transfer *xfer;
- bool is_first_xfer = true;
- int ret = 0;
-
- msg->status = 0;
- msg->actual_length = 0;
-
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- fsl_lpspi_setup_transfer(spi, xfer);
- fsl_lpspi_set_cmd(fsl_lpspi, is_first_xfer);
-
- is_first_xfer = false;
+ spi_controller_get_devdata(controller);
+ int ret;
- ret = fsl_lpspi_transfer_one(controller, spi, xfer);
- if (ret < 0)
- goto complete;
+ fsl_lpspi->is_first_byte = true;
+ ret = fsl_lpspi_setup_transfer(controller, spi, t);
+ if (ret < 0)
+ return ret;
- msg->actual_length += xfer->len;
- }
+ fsl_lpspi_set_cmd(fsl_lpspi);
+ fsl_lpspi->is_first_byte = false;
-complete:
- msg->status = ret;
- spi_finalize_current_message(controller);
+ if (fsl_lpspi->usedma)
+ ret = fsl_lpspi_dma_transfer(controller, fsl_lpspi, t);
+ else
+ ret = fsl_lpspi_pio_transfer(controller, t);
+ if (ret < 0)
+ return ret;
- return ret;
+ return 0;
}
static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id)
@@ -452,7 +771,7 @@ static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id)
}
if (temp_SR & SR_MBF ||
- readl(fsl_lpspi->base + IMX7ULP_FSR) & FSR_RXCOUNT) {
+ readl(fsl_lpspi->base + IMX7ULP_FSR) & FSR_TXCOUNT) {
writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR);
fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE);
return IRQ_HANDLED;
@@ -467,15 +786,67 @@ static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id)
return IRQ_NONE;
}
+#ifdef CONFIG_PM
+static int fsl_lpspi_runtime_resume(struct device *dev)
+{
+ struct spi_controller *controller = dev_get_drvdata(dev);
+ struct fsl_lpspi_data *fsl_lpspi;
+ int ret;
+
+ fsl_lpspi = spi_controller_get_devdata(controller);
+
+ ret = clk_prepare_enable(fsl_lpspi->clk_per);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(fsl_lpspi->clk_ipg);
+ if (ret) {
+ clk_disable_unprepare(fsl_lpspi->clk_per);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int fsl_lpspi_runtime_suspend(struct device *dev)
+{
+ struct spi_controller *controller = dev_get_drvdata(dev);
+ struct fsl_lpspi_data *fsl_lpspi;
+
+ fsl_lpspi = spi_controller_get_devdata(controller);
+
+ clk_disable_unprepare(fsl_lpspi->clk_per);
+ clk_disable_unprepare(fsl_lpspi->clk_ipg);
+
+ return 0;
+}
+#endif
+
+static int fsl_lpspi_init_rpm(struct fsl_lpspi_data *fsl_lpspi)
+{
+ struct device *dev = fsl_lpspi->dev;
+
+ pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev, FSL_LPSPI_RPM_TIMEOUT);
+ pm_runtime_use_autosuspend(dev);
+
+ return 0;
+}
+
static int fsl_lpspi_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
struct fsl_lpspi_data *fsl_lpspi;
struct spi_controller *controller;
+ struct spi_imx_master *lpspi_platform_info =
+ dev_get_platdata(&pdev->dev);
struct resource *res;
- int ret, irq;
+ int i, ret, irq;
u32 temp;
+ bool is_slave;
- if (of_property_read_bool((&pdev->dev)->of_node, "spi-slave"))
+ is_slave = of_property_read_bool((&pdev->dev)->of_node, "spi-slave");
+ if (is_slave)
controller = spi_alloc_slave(&pdev->dev,
sizeof(struct fsl_lpspi_data));
else
@@ -487,15 +858,35 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, controller);
- controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
- controller->bus_num = pdev->id;
-
fsl_lpspi = spi_controller_get_devdata(controller);
fsl_lpspi->dev = &pdev->dev;
- fsl_lpspi->is_slave = of_property_read_bool((&pdev->dev)->of_node,
- "spi-slave");
+ fsl_lpspi->is_slave = is_slave;
- controller->transfer_one_message = fsl_lpspi_transfer_one_msg;
+ if (!fsl_lpspi->is_slave) {
+ for (i = 0; i < controller->num_chipselect; i++) {
+ int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
+
+ if (!gpio_is_valid(cs_gpio) && lpspi_platform_info)
+ cs_gpio = lpspi_platform_info->chipselect[i];
+
+ fsl_lpspi->chipselect[i] = cs_gpio;
+ if (!gpio_is_valid(cs_gpio))
+ continue;
+
+ ret = devm_gpio_request(&pdev->dev,
+ fsl_lpspi->chipselect[i],
+ DRIVER_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "can't get cs gpios\n");
+ goto out_controller_put;
+ }
+ }
+ controller->cs_gpios = fsl_lpspi->chipselect;
+ controller->prepare_message = fsl_lpspi_prepare_message;
+ }
+
+ controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
+ controller->transfer_one = fsl_lpspi_transfer_one;
controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
@@ -512,6 +903,7 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
ret = PTR_ERR(fsl_lpspi->base);
goto out_controller_put;
}
+ fsl_lpspi->base_phys = res->start;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
@@ -526,23 +918,39 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
goto out_controller_put;
}
- fsl_lpspi->clk = devm_clk_get(&pdev->dev, "ipg");
- if (IS_ERR(fsl_lpspi->clk)) {
- ret = PTR_ERR(fsl_lpspi->clk);
+ fsl_lpspi->clk_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(fsl_lpspi->clk_per)) {
+ ret = PTR_ERR(fsl_lpspi->clk_per);
goto out_controller_put;
}
- ret = clk_prepare_enable(fsl_lpspi->clk);
- if (ret) {
- dev_err(&pdev->dev, "can't enable lpspi clock, ret=%d\n", ret);
+ fsl_lpspi->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(fsl_lpspi->clk_ipg)) {
+ ret = PTR_ERR(fsl_lpspi->clk_ipg);
+ goto out_controller_put;
+ }
+
+ /* enable the clock */
+ ret = fsl_lpspi_init_rpm(fsl_lpspi);
+ if (ret)
goto out_controller_put;
+
+ ret = pm_runtime_get_sync(fsl_lpspi->dev);
+ if (ret < 0) {
+ dev_err(fsl_lpspi->dev, "failed to enable clock\n");
+ return ret;
}
temp = readl(fsl_lpspi->base + IMX7ULP_PARAM);
fsl_lpspi->txfifosize = 1 << (temp & 0x0f);
fsl_lpspi->rxfifosize = 1 << ((temp >> 8) & 0x0f);
- clk_disable_unprepare(fsl_lpspi->clk);
+ ret = fsl_lpspi_dma_init(&pdev->dev, fsl_lpspi, controller);
+ if (ret == -EPROBE_DEFER)
+ goto out_controller_put;
+
+ if (ret < 0)
+ dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret);
ret = devm_spi_register_controller(&pdev->dev, controller);
if (ret < 0) {
@@ -564,15 +972,50 @@ static int fsl_lpspi_remove(struct platform_device *pdev)
struct fsl_lpspi_data *fsl_lpspi =
spi_controller_get_devdata(controller);
- clk_disable_unprepare(fsl_lpspi->clk);
+ pm_runtime_disable(fsl_lpspi->dev);
+
+ spi_master_put(controller);
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int fsl_lpspi_suspend(struct device *dev)
+{
+ int ret;
+
+ pinctrl_pm_select_sleep_state(dev);
+ ret = pm_runtime_force_suspend(dev);
+ return ret;
+}
+
+static int fsl_lpspi_resume(struct device *dev)
+{
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret) {
+ dev_err(dev, "Error in resume: %d\n", ret);
+ return ret;
+ }
+
+ pinctrl_pm_select_default_state(dev);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops fsl_lpspi_pm_ops = {
+ SET_RUNTIME_PM_OPS(fsl_lpspi_runtime_suspend,
+ fsl_lpspi_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(fsl_lpspi_suspend, fsl_lpspi_resume)
+};
+
static struct platform_driver fsl_lpspi_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = fsl_lpspi_dt_ids,
+ .pm = &fsl_lpspi_pm_ops,
},
.probe = fsl_lpspi_probe,
.remove = fsl_lpspi_remove,
diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
index 6a713f78a62e..41a49b93ca60 100644
--- a/drivers/spi/spi-fsl-qspi.c
+++ b/drivers/spi/spi-fsl-qspi.c
@@ -882,7 +882,7 @@ static int fsl_qspi_probe(struct platform_device *pdev)
ctlr->dev.of_node = np;
- ret = spi_register_controller(ctlr);
+ ret = devm_spi_register_controller(dev, ctlr);
if (ret)
goto err_destroy_mutex;
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 8f2e97857e8b..b36ac6aa3b1f 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -39,6 +39,14 @@
#include <linux/spi/spi_bitbang.h>
#include <linux/types.h>
+#ifdef CONFIG_FSL_SOC
+#include <sysdev/fsl_soc.h>
+#endif
+
+/* Specific to the MPC8306/MPC8309 */
+#define IMMR_SPI_CS_OFFSET 0x14c
+#define SPI_BOOT_SEL_BIT 0x80000000
+
#include "spi-fsl-lib.h"
#include "spi-fsl-cpm.h"
#include "spi-fsl-spi.h"
@@ -355,33 +363,50 @@ static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
static int fsl_spi_do_one_msg(struct spi_master *master,
struct spi_message *m)
{
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
struct spi_device *spi = m->spi;
struct spi_transfer *t, *first;
unsigned int cs_change;
const int nsecs = 50;
- int status;
+ int status, last_bpw;
+
+ /*
+ * In CPU mode, optimize large byte transfers to use larger
+ * bits_per_word values to reduce number of interrupts taken.
+ */
+ if (!(mpc8xxx_spi->flags & SPI_CPM_MODE)) {
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->len < 256 || t->bits_per_word != 8)
+ continue;
+ if ((t->len & 3) == 0)
+ t->bits_per_word = 32;
+ else if ((t->len & 1) == 0)
+ t->bits_per_word = 16;
+ }
+ }
/* Don't allow changes if CS is active */
- first = list_first_entry(&m->transfers, struct spi_transfer,
- transfer_list);
+ cs_change = 1;
list_for_each_entry(t, &m->transfers, transfer_list) {
- if ((first->bits_per_word != t->bits_per_word) ||
- (first->speed_hz != t->speed_hz)) {
+ if (cs_change)
+ first = t;
+ cs_change = t->cs_change;
+ if (first->speed_hz != t->speed_hz) {
dev_err(&spi->dev,
- "bits_per_word/speed_hz should be same for the same SPI transfer\n");
+ "speed_hz cannot change while CS is active\n");
return -EINVAL;
}
}
+ last_bpw = -1;
cs_change = 1;
status = -EINVAL;
list_for_each_entry(t, &m->transfers, transfer_list) {
- if (t->bits_per_word || t->speed_hz) {
- if (cs_change)
- status = fsl_spi_setup_transfer(spi, t);
- if (status < 0)
- break;
- }
+ if (cs_change || last_bpw != t->bits_per_word)
+ status = fsl_spi_setup_transfer(spi, t);
+ if (status < 0)
+ break;
+ last_bpw = t->bits_per_word;
if (cs_change) {
fsl_spi_chipselect(spi, BITBANG_CS_ACTIVE);
@@ -701,10 +726,17 @@ static void fsl_spi_cs_control(struct spi_device *spi, bool on)
struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
u16 cs = spi->chip_select;
- int gpio = pinfo->gpios[cs];
- bool alow = pinfo->alow_flags[cs];
- gpio_set_value(gpio, on ^ alow);
+ if (cs < pinfo->ngpios) {
+ int gpio = pinfo->gpios[cs];
+ bool alow = pinfo->alow_flags[cs];
+
+ gpio_set_value(gpio, on ^ alow);
+ } else {
+ if (WARN_ON_ONCE(cs > pinfo->ngpios || !pinfo->immr_spi_cs))
+ return;
+ iowrite32be(on ? SPI_BOOT_SEL_BIT : 0, pinfo->immr_spi_cs);
+ }
}
static int of_fsl_spi_get_chipselects(struct device *dev)
@@ -712,12 +744,15 @@ static int of_fsl_spi_get_chipselects(struct device *dev)
struct device_node *np = dev->of_node;
struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
+ bool spisel_boot = IS_ENABLED(CONFIG_FSL_SOC) &&
+ of_property_read_bool(np, "fsl,spisel_boot");
int ngpios;
int i = 0;
int ret;
ngpios = of_gpio_count(np);
- if (ngpios <= 0) {
+ ngpios = max(ngpios, 0);
+ if (ngpios == 0 && !spisel_boot) {
/*
* SPI w/o chip-select line. One SPI device is still permitted
* though.
@@ -726,6 +761,7 @@ static int of_fsl_spi_get_chipselects(struct device *dev)
return 0;
}
+ pinfo->ngpios = ngpios;
pinfo->gpios = kmalloc_array(ngpios, sizeof(*pinfo->gpios),
GFP_KERNEL);
if (!pinfo->gpios)
@@ -769,7 +805,18 @@ static int of_fsl_spi_get_chipselects(struct device *dev)
}
}
- pdata->max_chipselect = ngpios;
+#if IS_ENABLED(CONFIG_FSL_SOC)
+ if (spisel_boot) {
+ pinfo->immr_spi_cs = ioremap(get_immrbase() + IMMR_SPI_CS_OFFSET, 4);
+ if (!pinfo->immr_spi_cs) {
+ ret = -ENOMEM;
+ i = ngpios - 1;
+ goto err_loop;
+ }
+ }
+#endif
+
+ pdata->max_chipselect = ngpios + spisel_boot;
pdata->cs_control = fsl_spi_cs_control;
return 0;
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 53b35c56a557..487ee55d26f7 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -35,20 +35,16 @@
* platform_device->driver_data ... points to spi_gpio
*
* spi->controller_state ... reserved for bitbang framework code
- * spi->controller_data ... holds chipselect GPIO
*
* spi->master->dev.driver_data ... points to spi_gpio->bitbang
*/
struct spi_gpio {
struct spi_bitbang bitbang;
- struct spi_gpio_platform_data pdata;
- struct platform_device *pdev;
struct gpio_desc *sck;
struct gpio_desc *miso;
struct gpio_desc *mosi;
struct gpio_desc **cs_gpios;
- bool has_cs;
};
/*----------------------------------------------------------------------*/
@@ -96,12 +92,6 @@ spi_to_spi_gpio(const struct spi_device *spi)
return spi_gpio;
}
-static inline struct spi_gpio_platform_data *__pure
-spi_to_pdata(const struct spi_device *spi)
-{
- return &spi_to_spi_gpio(spi)->pdata;
-}
-
/* These helpers are in turn called by the bitbang inlines */
static inline void setsck(const struct spi_device *spi, int is_on)
{
@@ -224,7 +214,7 @@ static void spi_gpio_chipselect(struct spi_device *spi, int is_active)
gpiod_set_value_cansleep(spi_gpio->sck, spi->mode & SPI_CPOL);
/* Drive chip select line, if we have one */
- if (spi_gpio->has_cs) {
+ if (spi_gpio->cs_gpios) {
struct gpio_desc *cs = spi_gpio->cs_gpios[spi->chip_select];
/* SPI chip selects are normally active-low */
@@ -242,10 +232,12 @@ static int spi_gpio_setup(struct spi_device *spi)
* The CS GPIOs have already been
* initialized from the descriptor lookup.
*/
- cs = spi_gpio->cs_gpios[spi->chip_select];
- if (!spi->controller_state && cs)
- status = gpiod_direction_output(cs,
- !(spi->mode & SPI_CS_HIGH));
+ if (spi_gpio->cs_gpios) {
+ cs = spi_gpio->cs_gpios[spi->chip_select];
+ if (!spi->controller_state && cs)
+ status = gpiod_direction_output(cs,
+ !(spi->mode & SPI_CS_HIGH));
+ }
if (!status)
status = spi_bitbang_setup(spi);
@@ -296,40 +288,20 @@ static void spi_gpio_cleanup(struct spi_device *spi)
* floating signals. (A weak pulldown would save power too, but many
* drivers expect to see all-ones data as the no slave "response".)
*/
-static int spi_gpio_request(struct device *dev,
- struct spi_gpio *spi_gpio,
- unsigned int num_chipselects,
- u16 *mflags)
+static int spi_gpio_request(struct device *dev, struct spi_gpio *spi_gpio)
{
- int i;
-
spi_gpio->mosi = devm_gpiod_get_optional(dev, "mosi", GPIOD_OUT_LOW);
if (IS_ERR(spi_gpio->mosi))
return PTR_ERR(spi_gpio->mosi);
- if (!spi_gpio->mosi)
- /* HW configuration without MOSI pin */
- *mflags |= SPI_MASTER_NO_TX;
spi_gpio->miso = devm_gpiod_get_optional(dev, "miso", GPIOD_IN);
if (IS_ERR(spi_gpio->miso))
return PTR_ERR(spi_gpio->miso);
- /*
- * No setting SPI_MASTER_NO_RX here - if there is only a MOSI
- * pin connected the host can still do RX by changing the
- * direction of the line.
- */
spi_gpio->sck = devm_gpiod_get(dev, "sck", GPIOD_OUT_LOW);
if (IS_ERR(spi_gpio->sck))
return PTR_ERR(spi_gpio->sck);
- for (i = 0; i < num_chipselects; i++) {
- spi_gpio->cs_gpios[i] = devm_gpiod_get_index(dev, "cs",
- i, GPIOD_OUT_HIGH);
- if (IS_ERR(spi_gpio->cs_gpios[i]))
- return PTR_ERR(spi_gpio->cs_gpios[i]);
- }
-
return 0;
}
@@ -340,142 +312,134 @@ static const struct of_device_id spi_gpio_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, spi_gpio_dt_ids);
-static int spi_gpio_probe_dt(struct platform_device *pdev)
+static int spi_gpio_probe_dt(struct platform_device *pdev,
+ struct spi_master *master)
{
- int ret;
- u32 tmp;
- struct spi_gpio_platform_data *pdata;
- struct device_node *np = pdev->dev.of_node;
- const struct of_device_id *of_id =
- of_match_device(spi_gpio_dt_ids, &pdev->dev);
-
- if (!of_id)
- return 0;
+ master->dev.of_node = pdev->dev.of_node;
+ master->use_gpio_descriptors = true;
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
+ return 0;
+}
+#else
+static inline int spi_gpio_probe_dt(struct platform_device *pdev,
+ struct spi_master *master)
+{
+ return 0;
+}
+#endif
+static int spi_gpio_probe_pdata(struct platform_device *pdev,
+ struct spi_master *master)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_gpio_platform_data *pdata = dev_get_platdata(dev);
+ struct spi_gpio *spi_gpio = spi_master_get_devdata(master);
+ int i;
- ret = of_property_read_u32(np, "num-chipselects", &tmp);
- if (ret < 0) {
- dev_err(&pdev->dev, "num-chipselects property not found\n");
- goto error_free;
- }
+#ifdef GENERIC_BITBANG
+ if (!pdata || !pdata->num_chipselect)
+ return -ENODEV;
+#endif
+ /*
+ * The master needs to think there is a chipselect even if not
+ * connected
+ */
+ master->num_chipselect = pdata->num_chipselect ?: 1;
- pdata->num_chipselect = tmp;
- pdev->dev.platform_data = pdata;
+ spi_gpio->cs_gpios = devm_kcalloc(dev, master->num_chipselect,
+ sizeof(*spi_gpio->cs_gpios),
+ GFP_KERNEL);
+ if (!spi_gpio->cs_gpios)
+ return -ENOMEM;
- return 1;
+ for (i = 0; i < master->num_chipselect; i++) {
+ spi_gpio->cs_gpios[i] = devm_gpiod_get_index(dev, "cs", i,
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(spi_gpio->cs_gpios[i]))
+ return PTR_ERR(spi_gpio->cs_gpios[i]);
+ }
-error_free:
- devm_kfree(&pdev->dev, pdata);
- return ret;
+ return 0;
}
-#else
-static inline int spi_gpio_probe_dt(struct platform_device *pdev)
+
+static void spi_gpio_put(void *data)
{
- return 0;
+ spi_master_put(data);
}
-#endif
static int spi_gpio_probe(struct platform_device *pdev)
{
int status;
struct spi_master *master;
struct spi_gpio *spi_gpio;
- struct spi_gpio_platform_data *pdata;
- u16 master_flags = 0;
- bool use_of = 0;
+ struct device *dev = &pdev->dev;
+ struct spi_bitbang *bb;
+ const struct of_device_id *of_id;
- status = spi_gpio_probe_dt(pdev);
- if (status < 0)
- return status;
- if (status > 0)
- use_of = 1;
-
- pdata = dev_get_platdata(&pdev->dev);
-#ifdef GENERIC_BITBANG
- if (!pdata || (!use_of && !pdata->num_chipselect))
- return -ENODEV;
-#endif
+ of_id = of_match_device(spi_gpio_dt_ids, &pdev->dev);
- master = spi_alloc_master(&pdev->dev, sizeof(*spi_gpio));
+ master = spi_alloc_master(dev, sizeof(*spi_gpio));
if (!master)
return -ENOMEM;
- spi_gpio = spi_master_get_devdata(master);
-
- spi_gpio->cs_gpios = devm_kcalloc(&pdev->dev,
- pdata->num_chipselect,
- sizeof(*spi_gpio->cs_gpios),
- GFP_KERNEL);
- if (!spi_gpio->cs_gpios)
- return -ENOMEM;
+ status = devm_add_action_or_reset(&pdev->dev, spi_gpio_put, master);
+ if (status)
+ return status;
- platform_set_drvdata(pdev, spi_gpio);
+ if (of_id)
+ status = spi_gpio_probe_dt(pdev, master);
+ else
+ status = spi_gpio_probe_pdata(pdev, master);
- /* Determine if we have chip selects connected */
- spi_gpio->has_cs = !!pdata->num_chipselect;
+ if (status)
+ return status;
- spi_gpio->pdev = pdev;
- if (pdata)
- spi_gpio->pdata = *pdata;
+ spi_gpio = spi_master_get_devdata(master);
- status = spi_gpio_request(&pdev->dev, spi_gpio,
- pdata->num_chipselect, &master_flags);
+ status = spi_gpio_request(dev, spi_gpio);
if (status)
return status;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
master->mode_bits = SPI_3WIRE | SPI_3WIRE_HIZ | SPI_CPHA | SPI_CPOL |
SPI_CS_HIGH;
- master->flags = master_flags;
+ if (!spi_gpio->mosi) {
+ /* HW configuration without MOSI pin
+ *
+ * No setting SPI_MASTER_NO_RX here - if there is only
+ * a MOSI pin connected the host can still do RX by
+ * changing the direction of the line.
+ */
+ master->flags = SPI_MASTER_NO_TX;
+ }
+
master->bus_num = pdev->id;
- /* The master needs to think there is a chipselect even if not connected */
- master->num_chipselect = spi_gpio->has_cs ? pdata->num_chipselect : 1;
master->setup = spi_gpio_setup;
master->cleanup = spi_gpio_cleanup;
-#ifdef CONFIG_OF
- master->dev.of_node = pdev->dev.of_node;
-#endif
- spi_gpio->bitbang.master = master;
- spi_gpio->bitbang.chipselect = spi_gpio_chipselect;
- spi_gpio->bitbang.set_line_direction = spi_gpio_set_direction;
+ bb = &spi_gpio->bitbang;
+ bb->master = master;
+ bb->chipselect = spi_gpio_chipselect;
+ bb->set_line_direction = spi_gpio_set_direction;
- if ((master_flags & SPI_MASTER_NO_TX) == 0) {
- spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0;
- spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1;
- spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2;
- spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_txrx_word_mode3;
+ if (master->flags & SPI_MASTER_NO_TX) {
+ bb->txrx_word[SPI_MODE_0] = spi_gpio_spec_txrx_word_mode0;
+ bb->txrx_word[SPI_MODE_1] = spi_gpio_spec_txrx_word_mode1;
+ bb->txrx_word[SPI_MODE_2] = spi_gpio_spec_txrx_word_mode2;
+ bb->txrx_word[SPI_MODE_3] = spi_gpio_spec_txrx_word_mode3;
} else {
- spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_spec_txrx_word_mode0;
- spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_spec_txrx_word_mode1;
- spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_spec_txrx_word_mode2;
- spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_spec_txrx_word_mode3;
+ bb->txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0;
+ bb->txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1;
+ bb->txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2;
+ bb->txrx_word[SPI_MODE_3] = spi_gpio_txrx_word_mode3;
}
- spi_gpio->bitbang.setup_transfer = spi_bitbang_setup_transfer;
+ bb->setup_transfer = spi_bitbang_setup_transfer;
- status = spi_bitbang_start(&spi_gpio->bitbang);
+ status = spi_bitbang_init(&spi_gpio->bitbang);
if (status)
- spi_master_put(master);
-
- return status;
-}
-
-static int spi_gpio_remove(struct platform_device *pdev)
-{
- struct spi_gpio *spi_gpio;
-
- spi_gpio = platform_get_drvdata(pdev);
-
- /* stop() unregisters child devices too */
- spi_bitbang_stop(&spi_gpio->bitbang);
-
- spi_master_put(spi_gpio->bitbang.master);
+ return status;
- return 0;
+ return devm_spi_register_master(&pdev->dev, spi_master_get(master));
}
MODULE_ALIAS("platform:" DRIVER_NAME);
@@ -486,7 +450,6 @@ static struct platform_driver spi_gpio_driver = {
.of_match_table = of_match_ptr(spi_gpio_dt_ids),
},
.probe = spi_gpio_probe,
- .remove = spi_gpio_remove,
};
module_platform_driver(spi_gpio_driver);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 6ec647bbba77..09c9a1edb2c6 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -28,6 +28,10 @@
#define DRIVER_NAME "spi_imx"
+static bool use_dma = true;
+module_param(use_dma, bool, 0644);
+MODULE_PARM_DESC(use_dma, "Enable usage of DMA when available (default)");
+
#define MXC_CSPIRXDATA 0x00
#define MXC_CSPITXDATA 0x04
#define MXC_CSPICTRL 0x08
@@ -219,6 +223,9 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+ if (!use_dma)
+ return false;
+
if (!master->dma_rx)
return false;
@@ -1494,7 +1501,7 @@ static int spi_imx_transfer(struct spi_device *spi,
/* flush rxfifo before transfer */
while (spi_imx->devtype_data->rx_available(spi_imx))
- spi_imx->rx(spi_imx);
+ readl(spi_imx->base + MXC_CSPIRXDATA);
if (spi_imx->slave_mode)
return spi_imx_pio_transfer_slave(spi, transfer);
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index a4d8d19ecff9..9f0fa9f3116d 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -135,8 +135,8 @@ static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
return -ENOTSUPP;
}
-static bool spi_mem_default_supports_op(struct spi_mem *mem,
- const struct spi_mem_op *op)
+bool spi_mem_default_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
{
if (spi_check_buswidth_req(mem, op->cmd.buswidth, true))
return false;
@@ -622,7 +622,7 @@ void devm_spi_mem_dirmap_destroy(struct device *dev,
EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy);
/**
- * spi_mem_dirmap_dirmap_read() - Read data through a direct mapping
+ * spi_mem_dirmap_read() - Read data through a direct mapping
* @desc: direct mapping descriptor
* @offs: offset to start reading from. Note that this is not an absolute
* offset, but the offset within the direct mapping which already has
@@ -668,7 +668,7 @@ ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
EXPORT_SYMBOL_GPL(spi_mem_dirmap_read);
/**
- * spi_mem_dirmap_dirmap_write() - Write data through a direct mapping
+ * spi_mem_dirmap_write() - Write data through a direct mapping
* @desc: direct mapping descriptor
* @offs: offset to start writing from. Note that this is not an absolute
* offset, but the offset within the direct mapping which already has
diff --git a/drivers/staging/mt7621-spi/spi-mt7621.c b/drivers/spi/spi-mt7621.c
index b509f9fe3346..ae836114ee3d 100644
--- a/drivers/staging/mt7621-spi/spi-mt7621.c
+++ b/drivers/spi/spi-mt7621.c
@@ -1,15 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/*
- * spi-mt7621.c -- MediaTek MT7621 SPI controller driver
- *
- * Copyright (C) 2011 Sergiy <piratfm@gmail.com>
- * Copyright (C) 2011-2013 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (C) 2014-2015 Felix Fietkau <nbd@nbd.name>
- *
- * Some parts are based on spi-orion.c:
- * Author: Shadi Ammouri <shadi@marvell.com>
- * Copyright (C) 2007-2008 Marvell Ltd.
- */
+//
+// spi-mt7621.c -- MediaTek MT7621 SPI controller driver
+//
+// Copyright (C) 2011 Sergiy <piratfm@gmail.com>
+// Copyright (C) 2011-2013 Gabor Juhos <juhosg@openwrt.org>
+// Copyright (C) 2014-2015 Felix Fietkau <nbd@nbd.name>
+//
+// Some parts are based on spi-orion.c:
+// Author: Shadi Ammouri <shadi@marvell.com>
+// Copyright (C) 2007-2008 Marvell Ltd.
#include <linux/clk.h>
#include <linux/delay.h>
@@ -52,7 +51,7 @@
#define MT7621_LSB_FIRST BIT(3)
struct mt7621_spi {
- struct spi_master *master;
+ struct spi_controller *master;
void __iomem *base;
unsigned int sys_freq;
unsigned int speed;
@@ -64,7 +63,7 @@ struct mt7621_spi {
static inline struct mt7621_spi *spidev_to_mt7621_spi(struct spi_device *spi)
{
- return spi_master_get_devdata(spi->master);
+ return spi_controller_get_devdata(spi->master);
}
static inline u32 mt7621_spi_read(struct mt7621_spi *rs, u32 reg)
@@ -77,29 +76,25 @@ static inline void mt7621_spi_write(struct mt7621_spi *rs, u32 reg, u32 val)
iowrite32(val, rs->base + reg);
}
-static void mt7621_spi_reset(struct mt7621_spi *rs)
+static void mt7621_spi_set_cs(struct spi_device *spi, int enable)
{
- u32 master = mt7621_spi_read(rs, MT7621_SPI_MASTER);
+ struct mt7621_spi *rs = spidev_to_mt7621_spi(spi);
+ int cs = spi->chip_select;
+ u32 polar = 0;
+ u32 master;
/*
* Select SPI device 7, enable "more buffer mode" and disable
* full-duplex (only half-duplex really works on this chip
* reliably)
*/
+ master = mt7621_spi_read(rs, MT7621_SPI_MASTER);
master |= MASTER_RS_SLAVE_SEL | MASTER_MORE_BUFMODE;
master &= ~MASTER_FULL_DUPLEX;
-
mt7621_spi_write(rs, MT7621_SPI_MASTER, master);
- rs->pending_write = 0;
-}
-static void mt7621_spi_set_cs(struct spi_device *spi, int enable)
-{
- struct mt7621_spi *rs = spidev_to_mt7621_spi(spi);
- int cs = spi->chip_select;
- u32 polar = 0;
+ rs->pending_write = 0;
- mt7621_spi_reset(rs);
if (enable)
polar = BIT(cs);
mt7621_spi_write(rs, MT7621_SPI_POLAR, polar);
@@ -163,13 +158,14 @@ static inline int mt7621_spi_wait_till_ready(struct mt7621_spi *rs)
static void mt7621_spi_read_half_duplex(struct mt7621_spi *rs,
int rx_len, u8 *buf)
{
+ int tx_len;
+
/*
* Combine with any pending write, and perform one or more half-duplex
* transactions reading 'len' bytes. Data to be written is already in
* MT7621_SPI_DATA.
*/
- int tx_len = rs->pending_write;
-
+ tx_len = rs->pending_write;
rs->pending_write = 0;
while (rx_len || tx_len) {
@@ -209,8 +205,8 @@ static inline void mt7621_spi_flush(struct mt7621_spi *rs)
static void mt7621_spi_write_half_duplex(struct mt7621_spi *rs,
int tx_len, const u8 *buf)
{
- int val = 0;
int len = rs->pending_write;
+ int val = 0;
if (len & 3) {
val = mt7621_spi_read(rs, MT7621_SPI_OPCODE + (len & ~3));
@@ -238,6 +234,7 @@ static void mt7621_spi_write_half_duplex(struct mt7621_spi *rs,
}
tx_len -= 1;
}
+
if (len & 3) {
if (len < 4) {
val = swab32(val);
@@ -245,13 +242,14 @@ static void mt7621_spi_write_half_duplex(struct mt7621_spi *rs,
}
mt7621_spi_write(rs, MT7621_SPI_OPCODE + (len & ~3), val);
}
+
rs->pending_write = len;
}
-static int mt7621_spi_transfer_one_message(struct spi_master *master,
+static int mt7621_spi_transfer_one_message(struct spi_controller *master,
struct spi_message *m)
{
- struct mt7621_spi *rs = spi_master_get_devdata(master);
+ struct mt7621_spi *rs = spi_controller_get_devdata(master);
struct spi_device *spi = m->spi;
unsigned int speed = spi->max_speed_hz;
struct spi_transfer *t = NULL;
@@ -268,11 +266,14 @@ static int mt7621_spi_transfer_one_message(struct spi_master *master,
goto msg_done;
}
+ /* Assert CS */
mt7621_spi_set_cs(spi, 1);
+
m->actual_length = 0;
list_for_each_entry(t, &m->transfers, transfer_list) {
if ((t->rx_buf) && (t->tx_buf)) {
- /* This controller will shift some extra data out
+ /*
+ * This controller will shift some extra data out
* of spi_opcode if (mosi_bit_cnt > 0) &&
* (cmd_bit_cnt == 0). So the claimed full-duplex
* support is broken since we have no way to read
@@ -287,8 +288,9 @@ static int mt7621_spi_transfer_one_message(struct spi_master *master,
}
m->actual_length += t->len;
}
- mt7621_spi_flush(rs);
+ /* Flush data and deassert CS */
+ mt7621_spi_flush(rs);
mt7621_spi_set_cs(spi, 0);
msg_done:
@@ -303,7 +305,7 @@ static int mt7621_spi_setup(struct spi_device *spi)
struct mt7621_spi *rs = spidev_to_mt7621_spi(spi);
if ((spi->max_speed_hz == 0) ||
- (spi->max_speed_hz > (rs->sys_freq / 2)))
+ (spi->max_speed_hz > (rs->sys_freq / 2)))
spi->max_speed_hz = (rs->sys_freq / 2);
if (spi->max_speed_hz < (rs->sys_freq / 4097)) {
@@ -324,7 +326,7 @@ MODULE_DEVICE_TABLE(of, mt7621_spi_match);
static int mt7621_spi_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
- struct spi_master *master;
+ struct spi_controller *master;
struct mt7621_spi *rs;
void __iomem *base;
struct resource *r;
@@ -361,7 +363,7 @@ static int mt7621_spi_probe(struct platform_device *pdev)
}
master->mode_bits = SPI_LSB_FIRST;
-
+ master->flags = SPI_CONTROLLER_HALF_DUPLEX;
master->setup = mt7621_spi_setup;
master->transfer_one_message = mt7621_spi_transfer_one_message;
master->bits_per_word_mask = SPI_BPW_MASK(8);
@@ -370,7 +372,7 @@ static int mt7621_spi_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, master);
- rs = spi_master_get_devdata(master);
+ rs = spi_controller_get_devdata(master);
rs->base = base;
rs->clk = clk;
rs->master = master;
@@ -385,21 +387,18 @@ static int mt7621_spi_probe(struct platform_device *pdev)
return ret;
}
- mt7621_spi_reset(rs);
-
- return spi_register_master(master);
+ return devm_spi_register_controller(&pdev->dev, master);
}
static int mt7621_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *master;
struct mt7621_spi *rs;
master = dev_get_drvdata(&pdev->dev);
- rs = spi_master_get_devdata(master);
+ rs = spi_controller_get_devdata(master);
- clk_disable(rs->clk);
- spi_unregister_master(master);
+ clk_disable_unprepare(rs->clk);
return 0;
}
diff --git a/drivers/spi/spi-mxic.c b/drivers/spi/spi-mxic.c
index e41ae6ef0f8a..f48563c09b97 100644
--- a/drivers/spi/spi-mxic.c
+++ b/drivers/spi/spi-mxic.c
@@ -492,8 +492,7 @@ static int mxic_spi_transfer_one(struct spi_master *master,
static int __maybe_unused mxic_spi_runtime_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_master *master = dev_get_drvdata(dev);
struct mxic_spi *mxic = spi_master_get_devdata(master);
mxic_spi_clk_disable(mxic);
@@ -504,8 +503,7 @@ static int __maybe_unused mxic_spi_runtime_suspend(struct device *dev)
static int __maybe_unused mxic_spi_runtime_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_master *master = dev_get_drvdata(dev);
struct mxic_spi *mxic = spi_master_get_devdata(master);
int ret;
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 7f280567093e..25ea4a9e0dbc 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -470,6 +470,8 @@ orion_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer)
if (orion_spi_write_read_8bit(spi, &tx, &rx) < 0)
goto out;
count--;
+ if (xfer->word_delay_usecs)
+ udelay(xfer->word_delay_usecs);
} while (count);
} else if (word_len == 16) {
const u16 *tx = xfer->tx_buf;
@@ -479,6 +481,8 @@ orion_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer)
if (orion_spi_write_read_16bit(spi, &tx, &rx) < 0)
goto out;
count -= 2;
+ if (xfer->word_delay_usecs)
+ udelay(xfer->word_delay_usecs);
} while (count);
}
diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c
index 131849adc570..d9f374c8b709 100644
--- a/drivers/spi/spi-pic32.c
+++ b/drivers/spi/spi-pic32.c
@@ -559,7 +559,7 @@ static int pic32_spi_one_transfer(struct spi_master *master,
dev_err(&spi->dev, "wait error/timedout\n");
if (dma_issued) {
dmaengine_terminate_all(master->dma_rx);
- dmaengine_terminate_all(master->dma_rx);
+ dmaengine_terminate_all(master->dma_tx);
}
ret = -ETIMEDOUT;
} else {
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index 15592598273e..e5c26c1779ab 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -239,13 +239,15 @@ int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
u32 *threshold)
{
struct pxa2xx_spi_chip *chip_info = spi->controller_data;
+ struct driver_data *drv_data = spi_controller_get_devdata(spi->controller);
+ u32 dma_burst_size = drv_data->controller_info->dma_burst_size;
/*
* If the DMA burst size is given in chip_info we use that,
* otherwise we use the default. Also we use the default FIFO
* thresholds for now.
*/
- *burst_code = chip_info ? chip_info->dma_burst_size : 1;
+ *burst_code = chip_info ? chip_info->dma_burst_size : dma_burst_size;
*threshold = SSCR1_RxTresh(RX_THRESH_DFLT)
| SSCR1_TxTresh(TX_THRESH_DFLT);
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 1727fdfbac28..d456c5251b5d 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -5,7 +5,6 @@
*/
#include <linux/clk-provider.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/spi/pxa2xx_spi.h>
@@ -35,6 +34,8 @@ struct pxa_spi_info {
void *tx_param;
void *rx_param;
+ int dma_burst_size;
+
int (*setup)(struct pci_dev *pdev, struct pxa_spi_info *c);
};
@@ -133,6 +134,7 @@ static int mrfld_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c)
rx->dma_dev = &dma_dev->dev;
c->dma_filter = lpss_dma_filter;
+ c->dma_burst_size = 8;
return 0;
}
@@ -223,6 +225,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
spi_pdata.tx_param = c->tx_param;
spi_pdata.rx_param = c->rx_param;
spi_pdata.enable_dma = c->rx_param && c->tx_param;
+ spi_pdata.dma_burst_size = c->dma_burst_size ? c->dma_burst_size : 1;
ssp = &spi_pdata.ssp;
ssp->phys_base = pci_resource_start(dev, 0);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index b6ddba833d02..e59c8b27b155 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -884,10 +884,14 @@ static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
rate = min_t(int, ssp_clk, rate);
+ /*
+ * Calculate the divisor for the SCR (Serial Clock Rate), avoiding
+ * that the SSP transmission rate can be greater than the device rate
+ */
if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP)
- return (ssp_clk / (2 * rate) - 1) & 0xff;
+ return (DIV_ROUND_UP(ssp_clk, 2 * rate) - 1) & 0xff;
else
- return (ssp_clk / rate - 1) & 0xfff;
+ return (DIV_ROUND_UP(ssp_clk, rate) - 1) & 0xfff;
}
static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data,
@@ -925,7 +929,7 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
{
struct driver_data *drv_data = spi_controller_get_devdata(controller);
struct spi_message *message = controller->cur_msg;
- struct chip_data *chip = spi_get_ctldata(message->spi);
+ struct chip_data *chip = spi_get_ctldata(spi);
u32 dma_thresh = chip->dma_threshold;
u32 dma_burst = chip->dma_burst_size;
u32 change_mask = pxa2xx_spi_get_ssrc1_change_mask(drv_data);
@@ -943,21 +947,21 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
/* reject already-mapped transfers; PIO won't always work */
if (message->is_dma_mapped
|| transfer->rx_dma || transfer->tx_dma) {
- dev_err(&drv_data->pdev->dev,
+ dev_err(&spi->dev,
"Mapped transfer length of %u is greater than %d\n",
transfer->len, MAX_DMA_LEN);
return -EINVAL;
}
/* warn ... we force this to PIO mode */
- dev_warn_ratelimited(&message->spi->dev,
+ dev_warn_ratelimited(&spi->dev,
"DMA disabled for transfer length %ld greater than %d\n",
(long)transfer->len, MAX_DMA_LEN);
}
/* Setup the transfer state based on the type of transfer */
if (pxa2xx_spi_flush(drv_data) == 0) {
- dev_err(&drv_data->pdev->dev, "Flush failed\n");
+ dev_err(&spi->dev, "Flush failed\n");
return -EIO;
}
drv_data->n_bytes = chip->n_bytes;
@@ -999,15 +1003,15 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
*/
if (chip->enable_dma) {
if (pxa2xx_spi_set_dma_burst_and_threshold(chip,
- message->spi,
+ spi,
bits, &dma_burst,
&dma_thresh))
- dev_warn_ratelimited(&message->spi->dev,
+ dev_warn_ratelimited(&spi->dev,
"DMA burst size reduced to match bits_per_word\n");
}
dma_mapped = controller->can_dma &&
- controller->can_dma(controller, message->spi, transfer) &&
+ controller->can_dma(controller, spi, transfer) &&
controller->cur_msg_mapped;
if (dma_mapped) {
@@ -1035,12 +1039,12 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
/* NOTE: PXA25x_SSP _could_ use external clocking ... */
cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits);
if (!pxa25x_ssp_comp(drv_data))
- dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
+ dev_dbg(&spi->dev, "%u Hz actual, %s\n",
controller->max_speed_hz
/ (1 + ((cr0 & SSCR0_SCR(0xfff)) >> 8)),
dma_mapped ? "DMA" : "PIO");
else
- dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
+ dev_dbg(&spi->dev, "%u Hz actual, %s\n",
controller->max_speed_hz / 2
/ (1 + ((cr0 & SSCR0_SCR(0x0ff)) >> 8)),
dma_mapped ? "DMA" : "PIO");
@@ -1333,6 +1337,9 @@ static int setup(struct spi_device *spi)
dev_warn(&spi->dev,
"in setup: DMA burst size reduced to match bits_per_word\n");
}
+ dev_dbg(&spi->dev,
+ "in setup: DMA burst size set to %u\n",
+ chip->dma_burst_size);
}
switch (drv_data->ssp_type) {
@@ -1451,6 +1458,10 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
{ PCI_VDEVICE(INTEL, 0xa32a), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0xa32b), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0xa37b), LPSS_CNL_SSP },
+ /* CML-LP */
+ { PCI_VDEVICE(INTEL, 0x02aa), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x02ab), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x02fb), LPSS_CNL_SSP },
{ },
};
@@ -1564,6 +1575,7 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
pdata->is_slave = of_property_read_bool(pdev->dev.of_node, "spi-slave");
pdata->num_chipselect = 1;
pdata->enable_dma = true;
+ pdata->dma_burst_size = 1;
return pdata;
}
@@ -1692,7 +1704,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
if (platform_info->enable_dma) {
status = pxa2xx_spi_dma_setup(drv_data);
if (status) {
- dev_dbg(dev, "no DMA channels available, using PIO\n");
+ dev_warn(dev, "no DMA channels available, using PIO\n");
platform_info->enable_dma = false;
} else {
controller->can_dma = pxa2xx_spi_can_dma;
@@ -1953,3 +1965,5 @@ static void __exit pxa2xx_spi_exit(void)
platform_driver_unregister(&driver);
}
module_exit(pxa2xx_spi_exit);
+
+MODULE_SOFTDEP("pre: dw_dmac");
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 556870dcdf79..15f5723d9f95 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -271,7 +271,8 @@ static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
/* Sets parity, interrupt mask */
rspi_write8(rspi, 0x00, RSPI_SPCR2);
- /* Sets SPCMD */
+ /* Resets sequencer */
+ rspi_write8(rspi, 0, RSPI_SPSCR);
rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
@@ -315,7 +316,8 @@ static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size)
rspi_write8(rspi, 0x00, RSPI_SSLND);
rspi_write8(rspi, 0x00, RSPI_SPND);
- /* Sets SPCMD */
+ /* Resets sequencer */
+ rspi_write8(rspi, 0, RSPI_SPSCR);
rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
@@ -366,7 +368,8 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
/* Sets buffer to allow normal operation */
rspi_write8(rspi, 0x00, QSPI_SPBFCR);
- /* Sets SPCMD */
+ /* Resets sequencer */
+ rspi_write8(rspi, 0, RSPI_SPSCR);
rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
/* Sets RSPI mode */
@@ -736,27 +739,22 @@ static int qspi_trigger_transfer_out_in(struct rspi_data *rspi, const u8 *tx,
while (len > 0) {
n = qspi_set_send_trigger(rspi, len);
qspi_set_receive_trigger(rspi, len);
- if (n == QSPI_BUFFER_SIZE) {
- ret = rspi_wait_for_tx_empty(rspi);
- if (ret < 0) {
- dev_err(&rspi->ctlr->dev, "transmit timeout\n");
- return ret;
- }
- for (i = 0; i < n; i++)
- rspi_write_data(rspi, *tx++);
+ ret = rspi_wait_for_tx_empty(rspi);
+ if (ret < 0) {
+ dev_err(&rspi->ctlr->dev, "transmit timeout\n");
+ return ret;
+ }
+ for (i = 0; i < n; i++)
+ rspi_write_data(rspi, *tx++);
- ret = rspi_wait_for_rx_full(rspi);
- if (ret < 0) {
- dev_err(&rspi->ctlr->dev, "receive timeout\n");
- return ret;
- }
- for (i = 0; i < n; i++)
- *rx++ = rspi_read_data(rspi);
- } else {
- ret = rspi_pio_transfer(rspi, tx, rx, n);
- if (ret < 0)
- return ret;
+ ret = rspi_wait_for_rx_full(rspi);
+ if (ret < 0) {
+ dev_err(&rspi->ctlr->dev, "receive timeout\n");
+ return ret;
}
+ for (i = 0; i < n; i++)
+ *rx++ = rspi_read_data(rspi);
+
len -= n;
}
@@ -793,19 +791,14 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
while (n > 0) {
len = qspi_set_send_trigger(rspi, n);
- if (len == QSPI_BUFFER_SIZE) {
- ret = rspi_wait_for_tx_empty(rspi);
- if (ret < 0) {
- dev_err(&rspi->ctlr->dev, "transmit timeout\n");
- return ret;
- }
- for (i = 0; i < len; i++)
- rspi_write_data(rspi, *tx++);
- } else {
- ret = rspi_pio_transfer(rspi, tx, NULL, len);
- if (ret < 0)
- return ret;
+ ret = rspi_wait_for_tx_empty(rspi);
+ if (ret < 0) {
+ dev_err(&rspi->ctlr->dev, "transmit timeout\n");
+ return ret;
}
+ for (i = 0; i < len; i++)
+ rspi_write_data(rspi, *tx++);
+
n -= len;
}
@@ -830,19 +823,14 @@ static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
while (n > 0) {
len = qspi_set_receive_trigger(rspi, n);
- if (len == QSPI_BUFFER_SIZE) {
- ret = rspi_wait_for_rx_full(rspi);
- if (ret < 0) {
- dev_err(&rspi->ctlr->dev, "receive timeout\n");
- return ret;
- }
- for (i = 0; i < len; i++)
- *rx++ = rspi_read_data(rspi);
- } else {
- ret = rspi_pio_transfer(rspi, NULL, rx, len);
- if (ret < 0)
- return ret;
+ ret = rspi_wait_for_rx_full(rspi);
+ if (ret < 0) {
+ dev_err(&rspi->ctlr->dev, "receive timeout\n");
+ return ret;
}
+ for (i = 0; i < len; i++)
+ *rx++ = rspi_read_data(rspi);
+
n -= len;
}
@@ -868,28 +856,6 @@ static int qspi_transfer_one(struct spi_controller *ctlr,
}
}
-static int rspi_setup(struct spi_device *spi)
-{
- struct rspi_data *rspi = spi_controller_get_devdata(spi->controller);
-
- rspi->max_speed_hz = spi->max_speed_hz;
-
- rspi->spcmd = SPCMD_SSLKP;
- if (spi->mode & SPI_CPOL)
- rspi->spcmd |= SPCMD_CPOL;
- if (spi->mode & SPI_CPHA)
- rspi->spcmd |= SPCMD_CPHA;
-
- /* CMOS output mode and MOSI signal from previous transfer */
- rspi->sppcr = 0;
- if (spi->mode & SPI_LOOP)
- rspi->sppcr |= SPPCR_SPLP;
-
- set_config_register(rspi, 8);
-
- return 0;
-}
-
static u16 qspi_transfer_mode(const struct spi_transfer *xfer)
{
if (xfer->tx_buf)
@@ -959,8 +925,24 @@ static int rspi_prepare_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
+ struct spi_device *spi = msg->spi;
int ret;
+ rspi->max_speed_hz = spi->max_speed_hz;
+
+ rspi->spcmd = SPCMD_SSLKP;
+ if (spi->mode & SPI_CPOL)
+ rspi->spcmd |= SPCMD_CPOL;
+ if (spi->mode & SPI_CPHA)
+ rspi->spcmd |= SPCMD_CPHA;
+
+ /* CMOS output mode and MOSI signal from previous transfer */
+ rspi->sppcr = 0;
+ if (spi->mode & SPI_LOOP)
+ rspi->sppcr |= SPPCR_SPLP;
+
+ set_config_register(rspi, 8);
+
if (msg->spi->mode &
(SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)) {
/* Setup sequencer for messages with multiple transfer modes */
@@ -1267,7 +1249,6 @@ static int rspi_probe(struct platform_device *pdev)
init_waitqueue_head(&rspi->wait);
ctlr->bus_num = pdev->id;
- ctlr->setup = rspi_setup;
ctlr->auto_runtime_pm = true;
ctlr->transfer_one = ops->transfer_one;
ctlr->prepare_message = rspi_prepare_message;
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index e2eb466db10a..6aab7b2136db 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -18,6 +18,7 @@
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -82,111 +83,113 @@ struct sh_msiof_spi_priv {
#define RFDR 0x60 /* Receive FIFO Data Register */
/* TMDR1 and RMDR1 */
-#define MDR1_TRMD 0x80000000 /* Transfer Mode (1 = Master mode) */
-#define MDR1_SYNCMD_MASK 0x30000000 /* SYNC Mode */
-#define MDR1_SYNCMD_SPI 0x20000000 /* Level mode/SPI */
-#define MDR1_SYNCMD_LR 0x30000000 /* L/R mode */
-#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
-#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
-#define MDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */
-#define MDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */
-#define MDR1_FLD_MASK 0x0000000c /* Frame Sync Signal Interval (0-3) */
-#define MDR1_FLD_SHIFT 2
-#define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */
+#define MDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */
+#define MDR1_SYNCMD_MASK GENMASK(29, 28) /* SYNC Mode */
+#define MDR1_SYNCMD_SPI (2 << 28)/* Level mode/SPI */
+#define MDR1_SYNCMD_LR (3 << 28)/* L/R mode */
+#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
+#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
+#define MDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */
+#define MDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */
+#define MDR1_FLD_MASK GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */
+#define MDR1_FLD_SHIFT 2
+#define MDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */
/* TMDR1 */
-#define TMDR1_PCON 0x40000000 /* Transfer Signal Connection */
-#define TMDR1_SYNCCH_MASK 0xc000000 /* Synchronization Signal Channel Select */
-#define TMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
+#define TMDR1_PCON BIT(30) /* Transfer Signal Connection */
+#define TMDR1_SYNCCH_MASK GENMASK(27, 26) /* Sync Signal Channel Select */
+#define TMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
/* TMDR2 and RMDR2 */
#define MDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */
#define MDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
-#define MDR2_GRPMASK1 0x00000001 /* Group Output Mask 1 (SH, A1) */
+#define MDR2_GRPMASK1 BIT(0) /* Group Output Mask 1 (SH, A1) */
/* TSCR and RSCR */
-#define SCR_BRPS_MASK 0x1f00 /* Prescaler Setting (1-32) */
+#define SCR_BRPS_MASK GENMASK(12, 8) /* Prescaler Setting (1-32) */
#define SCR_BRPS(i) (((i) - 1) << 8)
-#define SCR_BRDV_MASK 0x0007 /* Baud Rate Generator's Division Ratio */
-#define SCR_BRDV_DIV_2 0x0000
-#define SCR_BRDV_DIV_4 0x0001
-#define SCR_BRDV_DIV_8 0x0002
-#define SCR_BRDV_DIV_16 0x0003
-#define SCR_BRDV_DIV_32 0x0004
-#define SCR_BRDV_DIV_1 0x0007
+#define SCR_BRDV_MASK GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */
+#define SCR_BRDV_DIV_2 0
+#define SCR_BRDV_DIV_4 1
+#define SCR_BRDV_DIV_8 2
+#define SCR_BRDV_DIV_16 3
+#define SCR_BRDV_DIV_32 4
+#define SCR_BRDV_DIV_1 7
/* CTR */
-#define CTR_TSCKIZ_MASK 0xc0000000 /* Transmit Clock I/O Polarity Select */
-#define CTR_TSCKIZ_SCK 0x80000000 /* Disable SCK when TX disabled */
-#define CTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */
-#define CTR_RSCKIZ_MASK 0x30000000 /* Receive Clock Polarity Select */
-#define CTR_RSCKIZ_SCK 0x20000000 /* Must match CTR_TSCKIZ_SCK */
-#define CTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */
-#define CTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */
-#define CTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */
-#define CTR_TXDIZ_MASK 0x00c00000 /* Pin Output When TX is Disabled */
-#define CTR_TXDIZ_LOW 0x00000000 /* 0 */
-#define CTR_TXDIZ_HIGH 0x00400000 /* 1 */
-#define CTR_TXDIZ_HIZ 0x00800000 /* High-impedance */
-#define CTR_TSCKE 0x00008000 /* Transmit Serial Clock Output Enable */
-#define CTR_TFSE 0x00004000 /* Transmit Frame Sync Signal Output Enable */
-#define CTR_TXE 0x00000200 /* Transmit Enable */
-#define CTR_RXE 0x00000100 /* Receive Enable */
+#define CTR_TSCKIZ_MASK GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */
+#define CTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */
+#define CTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */
+#define CTR_RSCKIZ_MASK GENMASK(29, 28) /* Receive Clock Polarity Select */
+#define CTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */
+#define CTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */
+#define CTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */
+#define CTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */
+#define CTR_TXDIZ_MASK GENMASK(23, 22) /* Pin Output When TX is Disabled */
+#define CTR_TXDIZ_LOW (0 << 22) /* 0 */
+#define CTR_TXDIZ_HIGH (1 << 22) /* 1 */
+#define CTR_TXDIZ_HIZ (2 << 22) /* High-impedance */
+#define CTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */
+#define CTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */
+#define CTR_TXE BIT(9) /* Transmit Enable */
+#define CTR_RXE BIT(8) /* Receive Enable */
+#define CTR_TXRST BIT(1) /* Transmit Reset */
+#define CTR_RXRST BIT(0) /* Receive Reset */
/* FCTR */
-#define FCTR_TFWM_MASK 0xe0000000 /* Transmit FIFO Watermark */
-#define FCTR_TFWM_64 0x00000000 /* Transfer Request when 64 empty stages */
-#define FCTR_TFWM_32 0x20000000 /* Transfer Request when 32 empty stages */
-#define FCTR_TFWM_24 0x40000000 /* Transfer Request when 24 empty stages */
-#define FCTR_TFWM_16 0x60000000 /* Transfer Request when 16 empty stages */
-#define FCTR_TFWM_12 0x80000000 /* Transfer Request when 12 empty stages */
-#define FCTR_TFWM_8 0xa0000000 /* Transfer Request when 8 empty stages */
-#define FCTR_TFWM_4 0xc0000000 /* Transfer Request when 4 empty stages */
-#define FCTR_TFWM_1 0xe0000000 /* Transfer Request when 1 empty stage */
-#define FCTR_TFUA_MASK 0x07f00000 /* Transmit FIFO Usable Area */
-#define FCTR_TFUA_SHIFT 20
+#define FCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */
+#define FCTR_TFWM_64 (0 << 29) /* Transfer Request when 64 empty stages */
+#define FCTR_TFWM_32 (1 << 29) /* Transfer Request when 32 empty stages */
+#define FCTR_TFWM_24 (2 << 29) /* Transfer Request when 24 empty stages */
+#define FCTR_TFWM_16 (3 << 29) /* Transfer Request when 16 empty stages */
+#define FCTR_TFWM_12 (4 << 29) /* Transfer Request when 12 empty stages */
+#define FCTR_TFWM_8 (5 << 29) /* Transfer Request when 8 empty stages */
+#define FCTR_TFWM_4 (6 << 29) /* Transfer Request when 4 empty stages */
+#define FCTR_TFWM_1 (7 << 29) /* Transfer Request when 1 empty stage */
+#define FCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */
+#define FCTR_TFUA_SHIFT 20
#define FCTR_TFUA(i) ((i) << FCTR_TFUA_SHIFT)
-#define FCTR_RFWM_MASK 0x0000e000 /* Receive FIFO Watermark */
-#define FCTR_RFWM_1 0x00000000 /* Transfer Request when 1 valid stages */
-#define FCTR_RFWM_4 0x00002000 /* Transfer Request when 4 valid stages */
-#define FCTR_RFWM_8 0x00004000 /* Transfer Request when 8 valid stages */
-#define FCTR_RFWM_16 0x00006000 /* Transfer Request when 16 valid stages */
-#define FCTR_RFWM_32 0x00008000 /* Transfer Request when 32 valid stages */
-#define FCTR_RFWM_64 0x0000a000 /* Transfer Request when 64 valid stages */
-#define FCTR_RFWM_128 0x0000c000 /* Transfer Request when 128 valid stages */
-#define FCTR_RFWM_256 0x0000e000 /* Transfer Request when 256 valid stages */
-#define FCTR_RFUA_MASK 0x00001ff0 /* Receive FIFO Usable Area (0x40 = full) */
-#define FCTR_RFUA_SHIFT 4
+#define FCTR_RFWM_MASK GENMASK(15, 13) /* Receive FIFO Watermark */
+#define FCTR_RFWM_1 (0 << 13) /* Transfer Request when 1 valid stages */
+#define FCTR_RFWM_4 (1 << 13) /* Transfer Request when 4 valid stages */
+#define FCTR_RFWM_8 (2 << 13) /* Transfer Request when 8 valid stages */
+#define FCTR_RFWM_16 (3 << 13) /* Transfer Request when 16 valid stages */
+#define FCTR_RFWM_32 (4 << 13) /* Transfer Request when 32 valid stages */
+#define FCTR_RFWM_64 (5 << 13) /* Transfer Request when 64 valid stages */
+#define FCTR_RFWM_128 (6 << 13) /* Transfer Request when 128 valid stages */
+#define FCTR_RFWM_256 (7 << 13) /* Transfer Request when 256 valid stages */
+#define FCTR_RFUA_MASK GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */
+#define FCTR_RFUA_SHIFT 4
#define FCTR_RFUA(i) ((i) << FCTR_RFUA_SHIFT)
/* STR */
-#define STR_TFEMP 0x20000000 /* Transmit FIFO Empty */
-#define STR_TDREQ 0x10000000 /* Transmit Data Transfer Request */
-#define STR_TEOF 0x00800000 /* Frame Transmission End */
-#define STR_TFSERR 0x00200000 /* Transmit Frame Synchronization Error */
-#define STR_TFOVF 0x00100000 /* Transmit FIFO Overflow */
-#define STR_TFUDF 0x00080000 /* Transmit FIFO Underflow */
-#define STR_RFFUL 0x00002000 /* Receive FIFO Full */
-#define STR_RDREQ 0x00001000 /* Receive Data Transfer Request */
-#define STR_REOF 0x00000080 /* Frame Reception End */
-#define STR_RFSERR 0x00000020 /* Receive Frame Synchronization Error */
-#define STR_RFUDF 0x00000010 /* Receive FIFO Underflow */
-#define STR_RFOVF 0x00000008 /* Receive FIFO Overflow */
+#define STR_TFEMP BIT(29) /* Transmit FIFO Empty */
+#define STR_TDREQ BIT(28) /* Transmit Data Transfer Request */
+#define STR_TEOF BIT(23) /* Frame Transmission End */
+#define STR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */
+#define STR_TFOVF BIT(20) /* Transmit FIFO Overflow */
+#define STR_TFUDF BIT(19) /* Transmit FIFO Underflow */
+#define STR_RFFUL BIT(13) /* Receive FIFO Full */
+#define STR_RDREQ BIT(12) /* Receive Data Transfer Request */
+#define STR_REOF BIT(7) /* Frame Reception End */
+#define STR_RFSERR BIT(5) /* Receive Frame Synchronization Error */
+#define STR_RFUDF BIT(4) /* Receive FIFO Underflow */
+#define STR_RFOVF BIT(3) /* Receive FIFO Overflow */
/* IER */
-#define IER_TDMAE 0x80000000 /* Transmit Data DMA Transfer Req. Enable */
-#define IER_TFEMPE 0x20000000 /* Transmit FIFO Empty Enable */
-#define IER_TDREQE 0x10000000 /* Transmit Data Transfer Request Enable */
-#define IER_TEOFE 0x00800000 /* Frame Transmission End Enable */
-#define IER_TFSERRE 0x00200000 /* Transmit Frame Sync Error Enable */
-#define IER_TFOVFE 0x00100000 /* Transmit FIFO Overflow Enable */
-#define IER_TFUDFE 0x00080000 /* Transmit FIFO Underflow Enable */
-#define IER_RDMAE 0x00008000 /* Receive Data DMA Transfer Req. Enable */
-#define IER_RFFULE 0x00002000 /* Receive FIFO Full Enable */
-#define IER_RDREQE 0x00001000 /* Receive Data Transfer Request Enable */
-#define IER_REOFE 0x00000080 /* Frame Reception End Enable */
-#define IER_RFSERRE 0x00000020 /* Receive Frame Sync Error Enable */
-#define IER_RFUDFE 0x00000010 /* Receive FIFO Underflow Enable */
-#define IER_RFOVFE 0x00000008 /* Receive FIFO Overflow Enable */
+#define IER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */
+#define IER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */
+#define IER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */
+#define IER_TEOFE BIT(23) /* Frame Transmission End Enable */
+#define IER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */
+#define IER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */
+#define IER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */
+#define IER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */
+#define IER_RFFULE BIT(13) /* Receive FIFO Full Enable */
+#define IER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */
+#define IER_REOFE BIT(7) /* Frame Reception End Enable */
+#define IER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */
+#define IER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */
+#define IER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */
static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
@@ -219,21 +222,14 @@ static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p,
{
u32 mask = clr | set;
u32 data;
- int k;
data = sh_msiof_read(p, CTR);
data &= ~clr;
data |= set;
sh_msiof_write(p, CTR, data);
- for (k = 100; k > 0; k--) {
- if ((sh_msiof_read(p, CTR) & mask) == set)
- break;
-
- udelay(10);
- }
-
- return k > 0 ? 0 : -ETIMEDOUT;
+ return readl_poll_timeout_atomic(p->mapbase + CTR, data,
+ (data & mask) == set, 10, 1000);
}
static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
@@ -247,6 +243,19 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
return IRQ_HANDLED;
}
+static void sh_msiof_spi_reset_regs(struct sh_msiof_spi_priv *p)
+{
+ u32 mask = CTR_TXRST | CTR_RXRST;
+ u32 data;
+
+ data = sh_msiof_read(p, CTR);
+ data |= mask;
+ sh_msiof_write(p, CTR, data);
+
+ readl_poll_timeout_atomic(p->mapbase + CTR, data, !(data & mask), 1,
+ 100);
+}
+
static const u32 sh_msiof_spi_div_array[] = {
SCR_BRDV_DIV_1, SCR_BRDV_DIV_2, SCR_BRDV_DIV_4,
SCR_BRDV_DIV_8, SCR_BRDV_DIV_16, SCR_BRDV_DIV_32,
@@ -540,25 +549,11 @@ static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
static int sh_msiof_spi_setup(struct spi_device *spi)
{
- struct device_node *np = spi->controller->dev.of_node;
struct sh_msiof_spi_priv *p =
spi_controller_get_devdata(spi->controller);
u32 clr, set, tmp;
- if (!np) {
- /*
- * Use spi->controller_data for CS (same strategy as spi_gpio),
- * if any. otherwise let HW control CS
- */
- spi->cs_gpio = (uintptr_t)spi->controller_data;
- }
-
- if (gpio_is_valid(spi->cs_gpio)) {
- gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
- return 0;
- }
-
- if (spi_controller_is_slave(p->ctlr))
+ if (spi->cs_gpiod || spi_controller_is_slave(p->ctlr))
return 0;
if (p->native_cs_inited &&
@@ -591,7 +586,7 @@ static int sh_msiof_prepare_message(struct spi_controller *ctlr,
u32 ss, cs_high;
/* Configure pins before asserting CS */
- if (gpio_is_valid(spi->cs_gpio)) {
+ if (spi->cs_gpiod) {
ss = p->unused_ss;
cs_high = p->native_cs_high;
} else {
@@ -926,6 +921,9 @@ static int sh_msiof_transfer_one(struct spi_controller *ctlr,
bool swab;
int ret;
+ /* reset registers */
+ sh_msiof_spi_reset_regs(p);
+
/* setup clocks (clock already enabled in chipselect()) */
if (!spi_controller_is_slave(p->ctlr))
sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk), t->speed_hz);
@@ -1144,6 +1142,7 @@ static int sh_msiof_get_cs_gpios(struct sh_msiof_spi_priv *p)
gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS);
if (!IS_ERR(gpiod)) {
+ devm_gpiod_put(dev, gpiod);
cs_gpios++;
continue;
}
@@ -1395,6 +1394,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
ctlr->bits_per_word_mask = chipdata->bits_per_word_mask;
ctlr->auto_runtime_pm = true;
ctlr->transfer_one = sh_msiof_transfer_one;
+ ctlr->use_gpio_descriptors = true;
ret = sh_msiof_request_dma(p);
if (ret < 0)
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index 3b2a9a6b990d..42f8e3c6aa1f 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -5,6 +5,8 @@
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -13,6 +15,7 @@
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/sizes.h>
@@ -76,7 +79,6 @@
#define QSPI_PSMAR 0x28
#define QSPI_PIR 0x2c
#define QSPI_LPTR 0x30
-#define LPTR_DFT_TIMEOUT 0x10
#define STM32_QSPI_MAX_MMAP_SZ SZ_256M
#define STM32_QSPI_MAX_NORCHIP 2
@@ -84,6 +86,7 @@
#define STM32_FIFO_TIMEOUT_US 30000
#define STM32_BUSY_TIMEOUT_US 100000
#define STM32_ABT_TIMEOUT_US 100000
+#define STM32_COMP_TIMEOUT_MS 1000
struct stm32_qspi_flash {
struct stm32_qspi *qspi;
@@ -93,6 +96,8 @@ struct stm32_qspi_flash {
struct stm32_qspi {
struct device *dev;
+ struct spi_controller *ctrl;
+ phys_addr_t phys_base;
void __iomem *io_base;
void __iomem *mm_base;
resource_size_t mm_size;
@@ -102,6 +107,13 @@ struct stm32_qspi {
struct completion data_completion;
u32 fmode;
+ struct dma_chan *dma_chtx;
+ struct dma_chan *dma_chrx;
+ struct completion dma_completion;
+
+ u32 cr_reg;
+ u32 dcr_reg;
+
/*
* to protect device configuration, could be different between
* 2 flash access (bk1, bk2)
@@ -177,6 +189,81 @@ static int stm32_qspi_tx_mm(struct stm32_qspi *qspi,
return 0;
}
+static void stm32_qspi_dma_callback(void *arg)
+{
+ struct completion *dma_completion = arg;
+
+ complete(dma_completion);
+}
+
+static int stm32_qspi_tx_dma(struct stm32_qspi *qspi,
+ const struct spi_mem_op *op)
+{
+ struct dma_async_tx_descriptor *desc;
+ enum dma_transfer_direction dma_dir;
+ struct dma_chan *dma_ch;
+ struct sg_table sgt;
+ dma_cookie_t cookie;
+ u32 cr, t_out;
+ int err;
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ dma_dir = DMA_DEV_TO_MEM;
+ dma_ch = qspi->dma_chrx;
+ } else {
+ dma_dir = DMA_MEM_TO_DEV;
+ dma_ch = qspi->dma_chtx;
+ }
+
+ /*
+ * spi_map_buf return -EINVAL if the buffer is not DMA-able
+ * (DMA-able: in vmalloc | kmap | virt_addr_valid)
+ */
+ err = spi_controller_dma_map_mem_op_data(qspi->ctrl, op, &sgt);
+ if (err)
+ return err;
+
+ desc = dmaengine_prep_slave_sg(dma_ch, sgt.sgl, sgt.nents,
+ dma_dir, DMA_PREP_INTERRUPT);
+ if (!desc) {
+ err = -ENOMEM;
+ goto out_unmap;
+ }
+
+ cr = readl_relaxed(qspi->io_base + QSPI_CR);
+
+ reinit_completion(&qspi->dma_completion);
+ desc->callback = stm32_qspi_dma_callback;
+ desc->callback_param = &qspi->dma_completion;
+ cookie = dmaengine_submit(desc);
+ err = dma_submit_error(cookie);
+ if (err)
+ goto out;
+
+ dma_async_issue_pending(dma_ch);
+
+ writel_relaxed(cr | CR_DMAEN, qspi->io_base + QSPI_CR);
+
+ t_out = sgt.nents * STM32_COMP_TIMEOUT_MS;
+ if (!wait_for_completion_interruptible_timeout(&qspi->dma_completion,
+ msecs_to_jiffies(t_out)))
+ err = -ETIMEDOUT;
+
+ if (dma_async_is_tx_complete(dma_ch, cookie,
+ NULL, NULL) != DMA_COMPLETE)
+ err = -ETIMEDOUT;
+
+ if (err)
+ dmaengine_terminate_all(dma_ch);
+
+out:
+ writel_relaxed(cr & ~CR_DMAEN, qspi->io_base + QSPI_CR);
+out_unmap:
+ spi_controller_dma_unmap_mem_op_data(qspi->ctrl, op, &sgt);
+
+ return err;
+}
+
static int stm32_qspi_tx(struct stm32_qspi *qspi, const struct spi_mem_op *op)
{
if (!op->data.nbytes)
@@ -184,6 +271,10 @@ static int stm32_qspi_tx(struct stm32_qspi *qspi, const struct spi_mem_op *op)
if (qspi->fmode == CCR_FMODE_MM)
return stm32_qspi_tx_mm(qspi, op);
+ else if ((op->data.dir == SPI_MEM_DATA_IN && qspi->dma_chrx) ||
+ (op->data.dir == SPI_MEM_DATA_OUT && qspi->dma_chtx))
+ if (!stm32_qspi_tx_dma(qspi, op))
+ return 0;
return stm32_qspi_tx_poll(qspi, op);
}
@@ -214,7 +305,7 @@ static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
writel_relaxed(cr | CR_TCIE | CR_TEIE, qspi->io_base + QSPI_CR);
if (!wait_for_completion_interruptible_timeout(&qspi->data_completion,
- msecs_to_jiffies(1000))) {
+ msecs_to_jiffies(STM32_COMP_TIMEOUT_MS))) {
err = -ETIMEDOUT;
} else {
sr = readl_relaxed(qspi->io_base + QSPI_SR);
@@ -356,7 +447,7 @@ static int stm32_qspi_setup(struct spi_device *spi)
struct spi_controller *ctrl = spi->master;
struct stm32_qspi *qspi = spi_controller_get_devdata(ctrl);
struct stm32_qspi_flash *flash;
- u32 cr, presc;
+ u32 presc;
if (ctrl->busy)
return -EBUSY;
@@ -372,17 +463,60 @@ static int stm32_qspi_setup(struct spi_device *spi)
flash->presc = presc;
mutex_lock(&qspi->lock);
- writel_relaxed(LPTR_DFT_TIMEOUT, qspi->io_base + QSPI_LPTR);
- cr = FIELD_PREP(CR_FTHRES_MASK, 3) | CR_TCEN | CR_SSHIFT | CR_EN;
- writel_relaxed(cr, qspi->io_base + QSPI_CR);
+ qspi->cr_reg = FIELD_PREP(CR_FTHRES_MASK, 3) | CR_SSHIFT | CR_EN;
+ writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
/* set dcr fsize to max address */
- writel_relaxed(DCR_FSIZE_MASK, qspi->io_base + QSPI_DCR);
+ qspi->dcr_reg = DCR_FSIZE_MASK;
+ writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR);
mutex_unlock(&qspi->lock);
return 0;
}
+static void stm32_qspi_dma_setup(struct stm32_qspi *qspi)
+{
+ struct dma_slave_config dma_cfg;
+ struct device *dev = qspi->dev;
+
+ memset(&dma_cfg, 0, sizeof(dma_cfg));
+
+ dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma_cfg.src_addr = qspi->phys_base + QSPI_DR;
+ dma_cfg.dst_addr = qspi->phys_base + QSPI_DR;
+ dma_cfg.src_maxburst = 4;
+ dma_cfg.dst_maxburst = 4;
+
+ qspi->dma_chrx = dma_request_slave_channel(dev, "rx");
+ if (qspi->dma_chrx) {
+ if (dmaengine_slave_config(qspi->dma_chrx, &dma_cfg)) {
+ dev_err(dev, "dma rx config failed\n");
+ dma_release_channel(qspi->dma_chrx);
+ qspi->dma_chrx = NULL;
+ }
+ }
+
+ qspi->dma_chtx = dma_request_slave_channel(dev, "tx");
+ if (qspi->dma_chtx) {
+ if (dmaengine_slave_config(qspi->dma_chtx, &dma_cfg)) {
+ dev_err(dev, "dma tx config failed\n");
+ dma_release_channel(qspi->dma_chtx);
+ qspi->dma_chtx = NULL;
+ }
+ }
+
+ init_completion(&qspi->dma_completion);
+}
+
+static void stm32_qspi_dma_free(struct stm32_qspi *qspi)
+{
+ if (qspi->dma_chtx)
+ dma_release_channel(qspi->dma_chtx);
+ if (qspi->dma_chrx)
+ dma_release_channel(qspi->dma_chrx);
+}
+
/*
* no special host constraint, so use default spi_mem_default_supports_op
* to check supported mode.
@@ -395,8 +529,10 @@ static void stm32_qspi_release(struct stm32_qspi *qspi)
{
/* disable qspi */
writel_relaxed(0, qspi->io_base + QSPI_CR);
+ stm32_qspi_dma_free(qspi);
mutex_destroy(&qspi->lock);
clk_disable_unprepare(qspi->clk);
+ spi_master_put(qspi->ctrl);
}
static int stm32_qspi_probe(struct platform_device *pdev)
@@ -413,43 +549,62 @@ static int stm32_qspi_probe(struct platform_device *pdev)
return -ENOMEM;
qspi = spi_controller_get_devdata(ctrl);
+ qspi->ctrl = ctrl;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi");
qspi->io_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(qspi->io_base))
- return PTR_ERR(qspi->io_base);
+ if (IS_ERR(qspi->io_base)) {
+ ret = PTR_ERR(qspi->io_base);
+ goto err;
+ }
+
+ qspi->phys_base = res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mm");
qspi->mm_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(qspi->mm_base))
- return PTR_ERR(qspi->mm_base);
+ if (IS_ERR(qspi->mm_base)) {
+ ret = PTR_ERR(qspi->mm_base);
+ goto err;
+ }
qspi->mm_size = resource_size(res);
- if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ)
- return -EINVAL;
+ if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ) {
+ ret = -EINVAL;
+ goto err;
+ }
irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ if (irq != -EPROBE_DEFER)
+ dev_err(dev, "IRQ error missing or invalid\n");
+ return irq;
+ }
+
ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0,
dev_name(dev), qspi);
if (ret) {
dev_err(dev, "failed to request irq\n");
- return ret;
+ goto err;
}
init_completion(&qspi->data_completion);
qspi->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(qspi->clk))
- return PTR_ERR(qspi->clk);
+ if (IS_ERR(qspi->clk)) {
+ ret = PTR_ERR(qspi->clk);
+ goto err;
+ }
qspi->clk_rate = clk_get_rate(qspi->clk);
- if (!qspi->clk_rate)
- return -EINVAL;
+ if (!qspi->clk_rate) {
+ ret = -EINVAL;
+ goto err;
+ }
ret = clk_prepare_enable(qspi->clk);
if (ret) {
dev_err(dev, "can not enable the clock\n");
- return ret;
+ goto err;
}
rstc = devm_reset_control_get_exclusive(dev, NULL);
@@ -461,6 +616,7 @@ static int stm32_qspi_probe(struct platform_device *pdev)
qspi->dev = dev;
platform_set_drvdata(pdev, qspi);
+ stm32_qspi_dma_setup(qspi);
mutex_init(&qspi->lock);
ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD
@@ -472,14 +628,11 @@ static int stm32_qspi_probe(struct platform_device *pdev)
ctrl->dev.of_node = dev->of_node;
ret = devm_spi_register_master(dev, ctrl);
- if (ret)
- goto err_spi_register;
-
- return 0;
+ if (!ret)
+ return 0;
-err_spi_register:
+err:
stm32_qspi_release(qspi);
-
return ret;
}
@@ -491,6 +644,31 @@ static int stm32_qspi_remove(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused stm32_qspi_suspend(struct device *dev)
+{
+ struct stm32_qspi *qspi = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(qspi->clk);
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_qspi_resume(struct device *dev)
+{
+ struct stm32_qspi *qspi = dev_get_drvdata(dev);
+
+ pinctrl_pm_select_default_state(dev);
+ clk_prepare_enable(qspi->clk);
+
+ writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
+ writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(stm32_qspi_pm_ops, stm32_qspi_suspend, stm32_qspi_resume);
+
static const struct of_device_id stm32_qspi_match[] = {
{.compatible = "st,stm32f469-qspi"},
{}
@@ -503,6 +681,7 @@ static struct platform_driver stm32_qspi_driver = {
.driver = {
.name = "stm32-qspi",
.of_match_table = stm32_qspi_match,
+ .pm = &stm32_qspi_pm_ops,
},
};
module_platform_driver(stm32_qspi_driver);
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index 4186ed20d796..b222ce8d083e 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -1839,8 +1839,9 @@ static int stm32_spi_probe(struct platform_device *pdev)
spi->irq = platform_get_irq(pdev, 0);
if (spi->irq <= 0) {
- dev_err(&pdev->dev, "no irq: %d\n", spi->irq);
- ret = -ENOENT;
+ ret = spi->irq;
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get irq: %d\n", ret);
goto err_master_put;
}
ret = devm_request_threaded_irq(&pdev->dev, spi->irq,
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index a76acedd7e2f..b1f31bb16659 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -149,6 +149,8 @@
#define SPI_TX_FIFO 0x108
#define SPI_RX_FIFO 0x188
+#define SPI_INTR_MASK 0x18c
+#define SPI_INTR_ALL_MASK (0x1fUL << 25)
#define MAX_CHIP_SELECT 4
#define SPI_FIFO_DEPTH 64
#define DATA_DIR_TX (1 << 0)
@@ -161,6 +163,10 @@
#define MAX_HOLD_CYCLES 16
#define SPI_DEFAULT_SPEED 25000000
+struct tegra_spi_soc_data {
+ bool has_intr_mask_reg;
+};
+
struct tegra_spi_data {
struct device *dev;
struct spi_master *master;
@@ -211,6 +217,7 @@ struct tegra_spi_data {
u32 *tx_dma_buf;
dma_addr_t tx_dma_phys;
struct dma_async_tx_descriptor *tx_dma_desc;
+ const struct tegra_spi_soc_data *soc_data;
};
static int tegra_spi_runtime_suspend(struct device *dev);
@@ -259,7 +266,8 @@ static unsigned tegra_spi_calculate_curr_xfer_param(
tspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
- if (bits_per_word == 8 || bits_per_word == 16) {
+ if ((bits_per_word == 8 || bits_per_word == 16 ||
+ bits_per_word == 32) && t->len > 3) {
tspi->is_packed = 1;
tspi->words_per_32bit = 32/bits_per_word;
} else {
@@ -307,10 +315,16 @@ static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf(
x |= (u32)(*tx_buf++) << (i * 8);
tegra_spi_writel(tspi, x, SPI_TX_FIFO);
}
+
+ tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
} else {
+ unsigned int write_bytes;
max_n_32bit = min(tspi->curr_dma_words, tx_empty_count);
written_words = max_n_32bit;
nbytes = written_words * tspi->bytes_per_word;
+ if (nbytes > t->len - tspi->cur_pos)
+ nbytes = t->len - tspi->cur_pos;
+ write_bytes = nbytes;
for (count = 0; count < max_n_32bit; count++) {
u32 x = 0;
@@ -319,8 +333,10 @@ static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf(
x |= (u32)(*tx_buf++) << (i * 8);
tegra_spi_writel(tspi, x, SPI_TX_FIFO);
}
+
+ tspi->cur_tx_pos += write_bytes;
}
- tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
+
return written_words;
}
@@ -344,20 +360,27 @@ static unsigned int tegra_spi_read_rx_fifo_to_client_rxbuf(
for (i = 0; len && (i < 4); i++, len--)
*rx_buf++ = (x >> i*8) & 0xFF;
}
- tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
read_words += tspi->curr_dma_words;
+ tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
} else {
u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
+ u8 bytes_per_word = tspi->bytes_per_word;
+ unsigned int read_bytes;
+ len = rx_full_count * bytes_per_word;
+ if (len > t->len - tspi->cur_pos)
+ len = t->len - tspi->cur_pos;
+ read_bytes = len;
for (count = 0; count < rx_full_count; count++) {
u32 x = tegra_spi_readl(tspi, SPI_RX_FIFO) & rx_mask;
- for (i = 0; (i < tspi->bytes_per_word); i++)
+ for (i = 0; len && (i < bytes_per_word); i++, len--)
*rx_buf++ = (x >> (i*8)) & 0xFF;
}
- tspi->cur_rx_pos += rx_full_count * tspi->bytes_per_word;
read_words += rx_full_count;
+ tspi->cur_rx_pos += read_bytes;
}
+
return read_words;
}
@@ -372,12 +395,17 @@ static void tegra_spi_copy_client_txbuf_to_spi_txbuf(
unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
+ tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
} else {
unsigned int i;
unsigned int count;
u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
+ unsigned int write_bytes;
+ if (consume > t->len - tspi->cur_pos)
+ consume = t->len - tspi->cur_pos;
+ write_bytes = consume;
for (count = 0; count < tspi->curr_dma_words; count++) {
u32 x = 0;
@@ -386,8 +414,9 @@ static void tegra_spi_copy_client_txbuf_to_spi_txbuf(
x |= (u32)(*tx_buf++) << (i * 8);
tspi->tx_dma_buf[count] = x;
}
+
+ tspi->cur_tx_pos += write_bytes;
}
- tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
/* Make the dma buffer to read by dma */
dma_sync_single_for_device(tspi->dev, tspi->tx_dma_phys,
@@ -405,20 +434,28 @@ static void tegra_spi_copy_spi_rxbuf_to_client_rxbuf(
unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len);
+ tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
} else {
unsigned int i;
unsigned int count;
unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
+ unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
+ unsigned int read_bytes;
+ if (consume > t->len - tspi->cur_pos)
+ consume = t->len - tspi->cur_pos;
+ read_bytes = consume;
for (count = 0; count < tspi->curr_dma_words; count++) {
u32 x = tspi->rx_dma_buf[count] & rx_mask;
- for (i = 0; (i < tspi->bytes_per_word); i++)
+ for (i = 0; consume && (i < tspi->bytes_per_word);
+ i++, consume--)
*rx_buf++ = (x >> (i*8)) & 0xFF;
}
+
+ tspi->cur_rx_pos += read_bytes;
}
- tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
/* Make the dma buffer to read by dma */
dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
@@ -470,22 +507,39 @@ static int tegra_spi_start_rx_dma(struct tegra_spi_data *tspi, int len)
return 0;
}
-static int tegra_spi_start_dma_based_transfer(
- struct tegra_spi_data *tspi, struct spi_transfer *t)
+static int tegra_spi_flush_fifos(struct tegra_spi_data *tspi)
{
- u32 val;
- unsigned int len;
- int ret = 0;
+ unsigned long timeout = jiffies + HZ;
u32 status;
- /* Make sure that Rx and Tx fifo are empty */
status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
if ((status & SPI_FIFO_EMPTY) != SPI_FIFO_EMPTY) {
- dev_err(tspi->dev, "Rx/Tx fifo are not empty status 0x%08x\n",
- (unsigned)status);
- return -EIO;
+ status |= SPI_RX_FIFO_FLUSH | SPI_TX_FIFO_FLUSH;
+ tegra_spi_writel(tspi, status, SPI_FIFO_STATUS);
+ while ((status & SPI_FIFO_EMPTY) != SPI_FIFO_EMPTY) {
+ status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
+ if (time_after(jiffies, timeout)) {
+ dev_err(tspi->dev,
+ "timeout waiting for fifo flush\n");
+ return -EIO;
+ }
+
+ udelay(1);
+ }
}
+ return 0;
+}
+
+static int tegra_spi_start_dma_based_transfer(
+ struct tegra_spi_data *tspi, struct spi_transfer *t)
+{
+ u32 val;
+ unsigned int len;
+ int ret = 0;
+ u8 dma_burst;
+ struct dma_slave_config dma_sconfig = {0};
+
val = SPI_DMA_BLK_SET(tspi->curr_dma_words - 1);
tegra_spi_writel(tspi, val, SPI_DMA_BLK);
@@ -496,23 +550,40 @@ static int tegra_spi_start_dma_based_transfer(
len = tspi->curr_dma_words * 4;
/* Set attention level based on length of transfer */
- if (len & 0xF)
+ if (len & 0xF) {
val |= SPI_TX_TRIG_1 | SPI_RX_TRIG_1;
- else if (((len) >> 4) & 0x1)
+ dma_burst = 1;
+ } else if (((len) >> 4) & 0x1) {
val |= SPI_TX_TRIG_4 | SPI_RX_TRIG_4;
- else
+ dma_burst = 4;
+ } else {
val |= SPI_TX_TRIG_8 | SPI_RX_TRIG_8;
+ dma_burst = 8;
+ }
- if (tspi->cur_direction & DATA_DIR_TX)
- val |= SPI_IE_TX;
+ if (!tspi->soc_data->has_intr_mask_reg) {
+ if (tspi->cur_direction & DATA_DIR_TX)
+ val |= SPI_IE_TX;
- if (tspi->cur_direction & DATA_DIR_RX)
- val |= SPI_IE_RX;
+ if (tspi->cur_direction & DATA_DIR_RX)
+ val |= SPI_IE_RX;
+ }
tegra_spi_writel(tspi, val, SPI_DMA_CTL);
tspi->dma_control_reg = val;
+ dma_sconfig.device_fc = true;
if (tspi->cur_direction & DATA_DIR_TX) {
+ dma_sconfig.dst_addr = tspi->phys + SPI_TX_FIFO;
+ dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.dst_maxburst = dma_burst;
+ ret = dmaengine_slave_config(tspi->tx_dma_chan, &dma_sconfig);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "DMA slave config failed: %d\n", ret);
+ return ret;
+ }
+
tegra_spi_copy_client_txbuf_to_spi_txbuf(tspi, t);
ret = tegra_spi_start_tx_dma(tspi, len);
if (ret < 0) {
@@ -523,6 +594,16 @@ static int tegra_spi_start_dma_based_transfer(
}
if (tspi->cur_direction & DATA_DIR_RX) {
+ dma_sconfig.src_addr = tspi->phys + SPI_RX_FIFO;
+ dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.src_maxburst = dma_burst;
+ ret = dmaengine_slave_config(tspi->rx_dma_chan, &dma_sconfig);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "DMA slave config failed: %d\n", ret);
+ return ret;
+ }
+
/* Make the dma buffer to read by dma */
dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
tspi->dma_buf_size, DMA_FROM_DEVICE);
@@ -570,8 +651,9 @@ static int tegra_spi_start_cpu_based_transfer(
tspi->is_curr_dma_xfer = false;
- val |= SPI_DMA_EN;
- tegra_spi_writel(tspi, val, SPI_DMA_CTL);
+ val = tspi->command1_reg;
+ val |= SPI_PIO;
+ tegra_spi_writel(tspi, val, SPI_COMMAND1);
return 0;
}
@@ -582,7 +664,6 @@ static int tegra_spi_init_dma_param(struct tegra_spi_data *tspi,
u32 *dma_buf;
dma_addr_t dma_phys;
int ret;
- struct dma_slave_config dma_sconfig;
dma_chan = dma_request_slave_channel_reason(tspi->dev,
dma_to_memory ? "rx" : "tx");
@@ -603,19 +684,6 @@ static int tegra_spi_init_dma_param(struct tegra_spi_data *tspi,
}
if (dma_to_memory) {
- dma_sconfig.src_addr = tspi->phys + SPI_RX_FIFO;
- dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- dma_sconfig.src_maxburst = 0;
- } else {
- dma_sconfig.dst_addr = tspi->phys + SPI_TX_FIFO;
- dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- dma_sconfig.dst_maxburst = 0;
- }
-
- ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
- if (ret)
- goto scrub;
- if (dma_to_memory) {
tspi->rx_dma_chan = dma_chan;
tspi->rx_dma_buf = dma_buf;
tspi->rx_dma_phys = dma_phys;
@@ -625,11 +693,6 @@ static int tegra_spi_init_dma_param(struct tegra_spi_data *tspi,
tspi->tx_dma_phys = dma_phys;
}
return 0;
-
-scrub:
- dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
- dma_release_channel(dma_chan);
- return ret;
}
static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi,
@@ -696,6 +759,16 @@ static u32 tegra_spi_setup_transfer_one(struct spi_device *spi,
else if (req_mode == SPI_MODE_3)
command1 |= SPI_CONTROL_MODE_3;
+ if (spi->mode & SPI_LSB_FIRST)
+ command1 |= SPI_LSBIT_FE;
+ else
+ command1 &= ~SPI_LSBIT_FE;
+
+ if (spi->mode & SPI_3WIRE)
+ command1 |= SPI_BIDIROE;
+ else
+ command1 &= ~SPI_BIDIROE;
+
if (tspi->cs_control) {
if (tspi->cs_control != spi)
tegra_spi_writel(tspi, command1, SPI_COMMAND1);
@@ -728,8 +801,15 @@ static int tegra_spi_start_transfer_one(struct spi_device *spi,
total_fifo_words = tegra_spi_calculate_curr_xfer_param(spi, tspi, t);
+ if (t->rx_nbits == SPI_NBITS_DUAL || t->tx_nbits == SPI_NBITS_DUAL)
+ command1 |= SPI_BOTH_EN_BIT;
+ else
+ command1 &= ~SPI_BOTH_EN_BIT;
+
if (tspi->is_packed)
command1 |= SPI_PACKED;
+ else
+ command1 &= ~SPI_PACKED;
command1 &= ~(SPI_CS_SEL_MASK | SPI_TX_EN | SPI_RX_EN);
tspi->cur_direction = 0;
@@ -748,6 +828,9 @@ static int tegra_spi_start_transfer_one(struct spi_device *spi,
dev_dbg(tspi->dev, "The def 0x%x and written 0x%x\n",
tspi->def_command1_reg, (unsigned)command1);
+ ret = tegra_spi_flush_fifos(tspi);
+ if (ret < 0)
+ return ret;
if (total_fifo_words > SPI_FIFO_DEPTH)
ret = tegra_spi_start_dma_based_transfer(tspi, t);
else
@@ -774,6 +857,12 @@ static int tegra_spi_setup(struct spi_device *spi)
return ret;
}
+ if (tspi->soc_data->has_intr_mask_reg) {
+ val = tegra_spi_readl(tspi, SPI_INTR_MASK);
+ val &= ~SPI_INTR_ALL_MASK;
+ tegra_spi_writel(tspi, val, SPI_INTR_MASK);
+ }
+
spin_lock_irqsave(&tspi->lock, flags);
val = tspi->def_command1_reg;
if (spi->mode & SPI_CS_HIGH)
@@ -799,6 +888,33 @@ static void tegra_spi_transfer_delay(int delay)
udelay(delay % 1000);
}
+static void tegra_spi_transfer_end(struct spi_device *spi)
+{
+ struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1;
+
+ if (cs_val)
+ tspi->command1_reg |= SPI_CS_SW_VAL;
+ else
+ tspi->command1_reg &= ~SPI_CS_SW_VAL;
+ tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1);
+ tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
+}
+
+static void tegra_spi_dump_regs(struct tegra_spi_data *tspi)
+{
+ dev_dbg(tspi->dev, "============ SPI REGISTER DUMP ============\n");
+ dev_dbg(tspi->dev, "Command1: 0x%08x | Command2: 0x%08x\n",
+ tegra_spi_readl(tspi, SPI_COMMAND1),
+ tegra_spi_readl(tspi, SPI_COMMAND2));
+ dev_dbg(tspi->dev, "DMA_CTL: 0x%08x | DMA_BLK: 0x%08x\n",
+ tegra_spi_readl(tspi, SPI_DMA_CTL),
+ tegra_spi_readl(tspi, SPI_DMA_BLK));
+ dev_dbg(tspi->dev, "TRANS_STAT: 0x%08x | FIFO_STATUS: 0x%08x\n",
+ tegra_spi_readl(tspi, SPI_TRANS_STATUS),
+ tegra_spi_readl(tspi, SPI_FIFO_STATUS));
+}
+
static int tegra_spi_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
@@ -838,21 +954,32 @@ static int tegra_spi_transfer_one_message(struct spi_master *master,
if (WARN_ON(ret == 0)) {
dev_err(tspi->dev,
"spi transfer timeout, err %d\n", ret);
+ if (tspi->is_curr_dma_xfer &&
+ (tspi->cur_direction & DATA_DIR_TX))
+ dmaengine_terminate_all(tspi->tx_dma_chan);
+ if (tspi->is_curr_dma_xfer &&
+ (tspi->cur_direction & DATA_DIR_RX))
+ dmaengine_terminate_all(tspi->rx_dma_chan);
ret = -EIO;
+ tegra_spi_dump_regs(tspi);
+ tegra_spi_flush_fifos(tspi);
+ reset_control_assert(tspi->rst);
+ udelay(2);
+ reset_control_deassert(tspi->rst);
goto complete_xfer;
}
if (tspi->tx_status || tspi->rx_status) {
dev_err(tspi->dev, "Error in Transfer\n");
ret = -EIO;
+ tegra_spi_dump_regs(tspi);
goto complete_xfer;
}
msg->actual_length += xfer->len;
complete_xfer:
if (ret < 0 || skip) {
- tegra_spi_writel(tspi, tspi->def_command1_reg,
- SPI_COMMAND1);
+ tegra_spi_transfer_end(spi);
tegra_spi_transfer_delay(xfer->delay_usecs);
goto exit;
} else if (list_is_last(&xfer->transfer_list,
@@ -860,13 +987,11 @@ complete_xfer:
if (xfer->cs_change)
tspi->cs_control = spi;
else {
- tegra_spi_writel(tspi, tspi->def_command1_reg,
- SPI_COMMAND1);
+ tegra_spi_transfer_end(spi);
tegra_spi_transfer_delay(xfer->delay_usecs);
}
} else if (xfer->cs_change) {
- tegra_spi_writel(tspi, tspi->def_command1_reg,
- SPI_COMMAND1);
+ tegra_spi_transfer_end(spi);
tegra_spi_transfer_delay(xfer->delay_usecs);
}
@@ -889,11 +1014,14 @@ static irqreturn_t handle_cpu_based_xfer(struct tegra_spi_data *tspi)
tspi->status_reg);
dev_err(tspi->dev, "CpuXfer 0x%08x:0x%08x\n",
tspi->command1_reg, tspi->dma_control_reg);
+ tegra_spi_dump_regs(tspi);
+ tegra_spi_flush_fifos(tspi);
+ complete(&tspi->xfer_completion);
+ spin_unlock_irqrestore(&tspi->lock, flags);
reset_control_assert(tspi->rst);
udelay(2);
reset_control_deassert(tspi->rst);
- complete(&tspi->xfer_completion);
- goto exit;
+ return IRQ_HANDLED;
}
if (tspi->cur_direction & DATA_DIR_RX)
@@ -961,11 +1089,13 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_spi_data *tspi)
tspi->status_reg);
dev_err(tspi->dev, "DmaXfer 0x%08x:0x%08x\n",
tspi->command1_reg, tspi->dma_control_reg);
+ tegra_spi_dump_regs(tspi);
+ tegra_spi_flush_fifos(tspi);
+ complete(&tspi->xfer_completion);
+ spin_unlock_irqrestore(&tspi->lock, flags);
reset_control_assert(tspi->rst);
udelay(2);
reset_control_deassert(tspi->rst);
- complete(&tspi->xfer_completion);
- spin_unlock_irqrestore(&tspi->lock, flags);
return IRQ_HANDLED;
}
@@ -1021,8 +1151,29 @@ static irqreturn_t tegra_spi_isr(int irq, void *context_data)
return IRQ_WAKE_THREAD;
}
+static struct tegra_spi_soc_data tegra114_spi_soc_data = {
+ .has_intr_mask_reg = false,
+};
+
+static struct tegra_spi_soc_data tegra124_spi_soc_data = {
+ .has_intr_mask_reg = false,
+};
+
+static struct tegra_spi_soc_data tegra210_spi_soc_data = {
+ .has_intr_mask_reg = true,
+};
+
static const struct of_device_id tegra_spi_of_match[] = {
- { .compatible = "nvidia,tegra114-spi", },
+ {
+ .compatible = "nvidia,tegra114-spi",
+ .data = &tegra114_spi_soc_data,
+ }, {
+ .compatible = "nvidia,tegra124-spi",
+ .data = &tegra124_spi_soc_data,
+ }, {
+ .compatible = "nvidia,tegra210-spi",
+ .data = &tegra210_spi_soc_data,
+ },
{}
};
MODULE_DEVICE_TABLE(of, tegra_spi_of_match);
@@ -1033,6 +1184,7 @@ static int tegra_spi_probe(struct platform_device *pdev)
struct tegra_spi_data *tspi;
struct resource *r;
int ret, spi_irq;
+ int bus_num;
master = spi_alloc_master(&pdev->dev, sizeof(*tspi));
if (!master) {
@@ -1047,16 +1199,28 @@ static int tegra_spi_probe(struct platform_device *pdev)
master->max_speed_hz = 25000000; /* 25MHz */
/* the spi->mode bits understood by this driver: */
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST |
+ SPI_TX_DUAL | SPI_RX_DUAL | SPI_3WIRE;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
master->setup = tegra_spi_setup;
master->transfer_one_message = tegra_spi_transfer_one_message;
master->num_chipselect = MAX_CHIP_SELECT;
master->auto_runtime_pm = true;
+ bus_num = of_alias_get_id(pdev->dev.of_node, "spi");
+ if (bus_num >= 0)
+ master->bus_num = bus_num;
tspi->master = master;
tspi->dev = &pdev->dev;
spin_lock_init(&tspi->lock);
+ tspi->soc_data = of_device_get_match_data(&pdev->dev);
+ if (!tspi->soc_data) {
+ dev_err(&pdev->dev, "unsupported tegra\n");
+ ret = -ENODEV;
+ goto exit_free_master;
+ }
+
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
tspi->base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(tspi->base)) {
@@ -1067,27 +1231,19 @@ static int tegra_spi_probe(struct platform_device *pdev)
spi_irq = platform_get_irq(pdev, 0);
tspi->irq = spi_irq;
- ret = request_threaded_irq(tspi->irq, tegra_spi_isr,
- tegra_spi_isr_thread, IRQF_ONESHOT,
- dev_name(&pdev->dev), tspi);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
- tspi->irq);
- goto exit_free_master;
- }
tspi->clk = devm_clk_get(&pdev->dev, "spi");
if (IS_ERR(tspi->clk)) {
dev_err(&pdev->dev, "can not get clock\n");
ret = PTR_ERR(tspi->clk);
- goto exit_free_irq;
+ goto exit_free_master;
}
tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
if (IS_ERR(tspi->rst)) {
dev_err(&pdev->dev, "can not get reset\n");
ret = PTR_ERR(tspi->rst);
- goto exit_free_irq;
+ goto exit_free_master;
}
tspi->max_buf_size = SPI_FIFO_DEPTH << 2;
@@ -1095,7 +1251,7 @@ static int tegra_spi_probe(struct platform_device *pdev)
ret = tegra_spi_init_dma_param(tspi, true);
if (ret < 0)
- goto exit_free_irq;
+ goto exit_free_master;
ret = tegra_spi_init_dma_param(tspi, false);
if (ret < 0)
goto exit_rx_dma_free;
@@ -1117,18 +1273,32 @@ static int tegra_spi_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
goto exit_pm_disable;
}
+
+ reset_control_assert(tspi->rst);
+ udelay(2);
+ reset_control_deassert(tspi->rst);
tspi->def_command1_reg = SPI_M_S;
tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
pm_runtime_put(&pdev->dev);
+ ret = request_threaded_irq(tspi->irq, tegra_spi_isr,
+ tegra_spi_isr_thread, IRQF_ONESHOT,
+ dev_name(&pdev->dev), tspi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
+ tspi->irq);
+ goto exit_pm_disable;
+ }
master->dev.of_node = pdev->dev.of_node;
ret = devm_spi_register_master(&pdev->dev, master);
if (ret < 0) {
dev_err(&pdev->dev, "can not register to master err %d\n", ret);
- goto exit_pm_disable;
+ goto exit_free_irq;
}
return ret;
+exit_free_irq:
+ free_irq(spi_irq, tspi);
exit_pm_disable:
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
@@ -1136,8 +1306,6 @@ exit_pm_disable:
tegra_spi_deinit_dma_param(tspi, false);
exit_rx_dma_free:
tegra_spi_deinit_dma_param(tspi, true);
-exit_free_irq:
- free_irq(spi_irq, tspi);
exit_free_master:
spi_master_put(master);
return ret;
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index 1427f343b39a..6d4679126213 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -717,9 +717,6 @@ static int tegra_slink_start_transfer_one(struct spi_device *spi,
command2 = tspi->command2_reg;
command2 &= ~(SLINK_RXEN | SLINK_TXEN);
- tegra_slink_writel(tspi, command, SLINK_COMMAND);
- tspi->command_reg = command;
-
tspi->cur_direction = 0;
if (t->rx_buf) {
command2 |= SLINK_RXEN;
@@ -729,9 +726,18 @@ static int tegra_slink_start_transfer_one(struct spi_device *spi,
command2 |= SLINK_TXEN;
tspi->cur_direction |= DATA_DIR_TX;
}
+
+ /*
+ * Writing to the command2 register bevore the command register prevents
+ * a spike in chip_select line 0. This selects the chip_select line
+ * before changing the chip_select value.
+ */
tegra_slink_writel(tspi, command2, SLINK_COMMAND2);
tspi->command2_reg = command2;
+ tegra_slink_writel(tspi, command, SLINK_COMMAND);
+ tspi->command_reg = command;
+
if (total_fifo_words > SLINK_FIFO_DEPTH)
ret = tegra_slink_start_dma_based_transfer(tspi, t);
else
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index e7e8ea1edcce..8a5966963834 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -92,7 +92,6 @@
#define PCH_MAX_SPBR 1023
/* Definition for ML7213/ML7223/ML7831 by LAPIS Semiconductor */
-#define PCI_VENDOR_ID_ROHM 0x10DB
#define PCI_DEVICE_ID_ML7213_SPI 0x802c
#define PCI_DEVICE_ID_ML7223_SPI 0x800F
#define PCI_DEVICE_ID_ML7831_SPI 0x8816
@@ -1300,18 +1299,27 @@ static void pch_free_dma_buf(struct pch_spi_board_data *board_dat,
dma->rx_buf_virt, dma->rx_buf_dma);
}
-static void pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
+static int pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
struct pch_spi_data *data)
{
struct pch_spi_dma_ctrl *dma;
+ int ret;
dma = &data->dma;
+ ret = 0;
/* Get Consistent memory for Tx DMA */
dma->tx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
PCH_BUF_SIZE, &dma->tx_buf_dma, GFP_KERNEL);
+ if (!dma->tx_buf_virt)
+ ret = -ENOMEM;
+
/* Get Consistent memory for Rx DMA */
dma->rx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
PCH_BUF_SIZE, &dma->rx_buf_dma, GFP_KERNEL);
+ if (!dma->rx_buf_virt)
+ ret = -ENOMEM;
+
+ return ret;
}
static int pch_spi_pd_probe(struct platform_device *plat_dev)
@@ -1388,7 +1396,9 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev)
if (use_dma) {
dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
- pch_alloc_dma_buf(board_dat, data);
+ ret = pch_alloc_dma_buf(board_dat, data);
+ if (ret)
+ goto err_spi_register_master;
}
ret = spi_register_master(master);
diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
new file mode 100644
index 000000000000..c6bee67decb5
--- /dev/null
+++ b/drivers/spi/spi-zynq-qspi.c
@@ -0,0 +1,761 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ * Author: Naga Sureshkumar Relli <nagasure@xilinx.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/workqueue.h>
+#include <linux/spi/spi-mem.h>
+
+/* Register offset definitions */
+#define ZYNQ_QSPI_CONFIG_OFFSET 0x00 /* Configuration Register, RW */
+#define ZYNQ_QSPI_STATUS_OFFSET 0x04 /* Interrupt Status Register, RO */
+#define ZYNQ_QSPI_IEN_OFFSET 0x08 /* Interrupt Enable Register, WO */
+#define ZYNQ_QSPI_IDIS_OFFSET 0x0C /* Interrupt Disable Reg, WO */
+#define ZYNQ_QSPI_IMASK_OFFSET 0x10 /* Interrupt Enabled Mask Reg,RO */
+#define ZYNQ_QSPI_ENABLE_OFFSET 0x14 /* Enable/Disable Register, RW */
+#define ZYNQ_QSPI_DELAY_OFFSET 0x18 /* Delay Register, RW */
+#define ZYNQ_QSPI_TXD_00_00_OFFSET 0x1C /* Transmit 4-byte inst, WO */
+#define ZYNQ_QSPI_TXD_00_01_OFFSET 0x80 /* Transmit 1-byte inst, WO */
+#define ZYNQ_QSPI_TXD_00_10_OFFSET 0x84 /* Transmit 2-byte inst, WO */
+#define ZYNQ_QSPI_TXD_00_11_OFFSET 0x88 /* Transmit 3-byte inst, WO */
+#define ZYNQ_QSPI_RXD_OFFSET 0x20 /* Data Receive Register, RO */
+#define ZYNQ_QSPI_SIC_OFFSET 0x24 /* Slave Idle Count Register, RW */
+#define ZYNQ_QSPI_TX_THRESH_OFFSET 0x28 /* TX FIFO Watermark Reg, RW */
+#define ZYNQ_QSPI_RX_THRESH_OFFSET 0x2C /* RX FIFO Watermark Reg, RW */
+#define ZYNQ_QSPI_GPIO_OFFSET 0x30 /* GPIO Register, RW */
+#define ZYNQ_QSPI_LINEAR_CFG_OFFSET 0xA0 /* Linear Adapter Config Ref, RW */
+#define ZYNQ_QSPI_MOD_ID_OFFSET 0xFC /* Module ID Register, RO */
+
+/*
+ * QSPI Configuration Register bit Masks
+ *
+ * This register contains various control bits that effect the operation
+ * of the QSPI controller
+ */
+#define ZYNQ_QSPI_CONFIG_IFMODE_MASK BIT(31) /* Flash Memory Interface */
+#define ZYNQ_QSPI_CONFIG_MANSRT_MASK BIT(16) /* Manual TX Start */
+#define ZYNQ_QSPI_CONFIG_MANSRTEN_MASK BIT(15) /* Enable Manual TX Mode */
+#define ZYNQ_QSPI_CONFIG_SSFORCE_MASK BIT(14) /* Manual Chip Select */
+#define ZYNQ_QSPI_CONFIG_BDRATE_MASK GENMASK(5, 3) /* Baud Rate Mask */
+#define ZYNQ_QSPI_CONFIG_CPHA_MASK BIT(2) /* Clock Phase Control */
+#define ZYNQ_QSPI_CONFIG_CPOL_MASK BIT(1) /* Clock Polarity Control */
+#define ZYNQ_QSPI_CONFIG_SSCTRL_MASK BIT(10) /* Slave Select Mask */
+#define ZYNQ_QSPI_CONFIG_FWIDTH_MASK GENMASK(7, 6) /* FIFO width */
+#define ZYNQ_QSPI_CONFIG_MSTREN_MASK BIT(0) /* Master Mode */
+
+/*
+ * QSPI Configuration Register - Baud rate and slave select
+ *
+ * These are the values used in the calculation of baud rate divisor and
+ * setting the slave select.
+ */
+#define ZYNQ_QSPI_BAUD_DIV_MAX GENMASK(2, 0) /* Baud rate maximum */
+#define ZYNQ_QSPI_BAUD_DIV_SHIFT 3 /* Baud rate divisor shift in CR */
+#define ZYNQ_QSPI_SS_SHIFT 10 /* Slave Select field shift in CR */
+
+/*
+ * QSPI Interrupt Registers bit Masks
+ *
+ * All the four interrupt registers (Status/Mask/Enable/Disable) have the same
+ * bit definitions.
+ */
+#define ZYNQ_QSPI_IXR_RX_OVERFLOW_MASK BIT(0) /* QSPI RX FIFO Overflow */
+#define ZYNQ_QSPI_IXR_TXNFULL_MASK BIT(2) /* QSPI TX FIFO Overflow */
+#define ZYNQ_QSPI_IXR_TXFULL_MASK BIT(3) /* QSPI TX FIFO is full */
+#define ZYNQ_QSPI_IXR_RXNEMTY_MASK BIT(4) /* QSPI RX FIFO Not Empty */
+#define ZYNQ_QSPI_IXR_RXF_FULL_MASK BIT(5) /* QSPI RX FIFO is full */
+#define ZYNQ_QSPI_IXR_TXF_UNDRFLOW_MASK BIT(6) /* QSPI TX FIFO Underflow */
+#define ZYNQ_QSPI_IXR_ALL_MASK (ZYNQ_QSPI_IXR_RX_OVERFLOW_MASK | \
+ ZYNQ_QSPI_IXR_TXNFULL_MASK | \
+ ZYNQ_QSPI_IXR_TXFULL_MASK | \
+ ZYNQ_QSPI_IXR_RXNEMTY_MASK | \
+ ZYNQ_QSPI_IXR_RXF_FULL_MASK | \
+ ZYNQ_QSPI_IXR_TXF_UNDRFLOW_MASK)
+#define ZYNQ_QSPI_IXR_RXTX_MASK (ZYNQ_QSPI_IXR_TXNFULL_MASK | \
+ ZYNQ_QSPI_IXR_RXNEMTY_MASK)
+
+/*
+ * QSPI Enable Register bit Masks
+ *
+ * This register is used to enable or disable the QSPI controller
+ */
+#define ZYNQ_QSPI_ENABLE_ENABLE_MASK BIT(0) /* QSPI Enable Bit Mask */
+
+/*
+ * QSPI Linear Configuration Register
+ *
+ * It is named Linear Configuration but it controls other modes when not in
+ * linear mode also.
+ */
+#define ZYNQ_QSPI_LCFG_TWO_MEM_MASK BIT(30) /* LQSPI Two memories Mask */
+#define ZYNQ_QSPI_LCFG_SEP_BUS_MASK BIT(29) /* LQSPI Separate bus Mask */
+#define ZYNQ_QSPI_LCFG_U_PAGE_MASK BIT(28) /* LQSPI Upper Page Mask */
+
+#define ZYNQ_QSPI_LCFG_DUMMY_SHIFT 8
+
+#define ZYNQ_QSPI_FAST_READ_QOUT_CODE 0x6B /* read instruction code */
+#define ZYNQ_QSPI_FIFO_DEPTH 63 /* FIFO depth in words */
+#define ZYNQ_QSPI_RX_THRESHOLD 32 /* Rx FIFO threshold level */
+#define ZYNQ_QSPI_TX_THRESHOLD 1 /* Tx FIFO threshold level */
+
+/*
+ * The modebits configurable by the driver to make the SPI support different
+ * data formats
+ */
+#define ZYNQ_QSPI_MODEBITS (SPI_CPOL | SPI_CPHA)
+
+/* Default number of chip selects */
+#define ZYNQ_QSPI_DEFAULT_NUM_CS 1
+
+/**
+ * struct zynq_qspi - Defines qspi driver instance
+ * @regs: Virtual address of the QSPI controller registers
+ * @refclk: Pointer to the peripheral clock
+ * @pclk: Pointer to the APB clock
+ * @irq: IRQ number
+ * @txbuf: Pointer to the TX buffer
+ * @rxbuf: Pointer to the RX buffer
+ * @tx_bytes: Number of bytes left to transfer
+ * @rx_bytes: Number of bytes left to receive
+ * @data_completion: completion structure
+ */
+struct zynq_qspi {
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *refclk;
+ struct clk *pclk;
+ int irq;
+ u8 *txbuf;
+ u8 *rxbuf;
+ int tx_bytes;
+ int rx_bytes;
+ struct completion data_completion;
+};
+
+/*
+ * Inline functions for the QSPI controller read/write
+ */
+static inline u32 zynq_qspi_read(struct zynq_qspi *xqspi, u32 offset)
+{
+ return readl_relaxed(xqspi->regs + offset);
+}
+
+static inline void zynq_qspi_write(struct zynq_qspi *xqspi, u32 offset,
+ u32 val)
+{
+ writel_relaxed(val, xqspi->regs + offset);
+}
+
+/**
+ * zynq_qspi_init_hw - Initialize the hardware
+ * @xqspi: Pointer to the zynq_qspi structure
+ *
+ * The default settings of the QSPI controller's configurable parameters on
+ * reset are
+ * - Master mode
+ * - Baud rate divisor is set to 2
+ * - Tx threshold set to 1l Rx threshold set to 32
+ * - Flash memory interface mode enabled
+ * - Size of the word to be transferred as 8 bit
+ * This function performs the following actions
+ * - Disable and clear all the interrupts
+ * - Enable manual slave select
+ * - Enable manual start
+ * - Deselect all the chip select lines
+ * - Set the size of the word to be transferred as 32 bit
+ * - Set the little endian mode of TX FIFO and
+ * - Enable the QSPI controller
+ */
+static void zynq_qspi_init_hw(struct zynq_qspi *xqspi)
+{
+ u32 config_reg;
+
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_ENABLE_OFFSET, 0);
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_IDIS_OFFSET, ZYNQ_QSPI_IXR_ALL_MASK);
+
+ /* Disable linear mode as the boot loader may have used it */
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET, 0);
+
+ /* Clear the RX FIFO */
+ while (zynq_qspi_read(xqspi, ZYNQ_QSPI_STATUS_OFFSET) &
+ ZYNQ_QSPI_IXR_RXNEMTY_MASK)
+ zynq_qspi_read(xqspi, ZYNQ_QSPI_RXD_OFFSET);
+
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_STATUS_OFFSET, ZYNQ_QSPI_IXR_ALL_MASK);
+ config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
+ config_reg &= ~(ZYNQ_QSPI_CONFIG_MSTREN_MASK |
+ ZYNQ_QSPI_CONFIG_CPOL_MASK |
+ ZYNQ_QSPI_CONFIG_CPHA_MASK |
+ ZYNQ_QSPI_CONFIG_BDRATE_MASK |
+ ZYNQ_QSPI_CONFIG_SSFORCE_MASK |
+ ZYNQ_QSPI_CONFIG_MANSRTEN_MASK |
+ ZYNQ_QSPI_CONFIG_MANSRT_MASK);
+ config_reg |= (ZYNQ_QSPI_CONFIG_MSTREN_MASK |
+ ZYNQ_QSPI_CONFIG_SSFORCE_MASK |
+ ZYNQ_QSPI_CONFIG_FWIDTH_MASK |
+ ZYNQ_QSPI_CONFIG_IFMODE_MASK);
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_CONFIG_OFFSET, config_reg);
+
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_RX_THRESH_OFFSET,
+ ZYNQ_QSPI_RX_THRESHOLD);
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_TX_THRESH_OFFSET,
+ ZYNQ_QSPI_TX_THRESHOLD);
+
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_ENABLE_OFFSET,
+ ZYNQ_QSPI_ENABLE_ENABLE_MASK);
+}
+
+static bool zynq_qspi_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ if (!spi_mem_default_supports_op(mem, op))
+ return false;
+
+ /*
+ * The number of address bytes should be equal to or less than 3 bytes.
+ */
+ if (op->addr.nbytes > 3)
+ return false;
+
+ return true;
+}
+
+/**
+ * zynq_qspi_rxfifo_op - Read 1..4 bytes from RxFIFO to RX buffer
+ * @xqspi: Pointer to the zynq_qspi structure
+ * @size: Number of bytes to be read (1..4)
+ */
+static void zynq_qspi_rxfifo_op(struct zynq_qspi *xqspi, unsigned int size)
+{
+ u32 data;
+
+ data = zynq_qspi_read(xqspi, ZYNQ_QSPI_RXD_OFFSET);
+
+ if (xqspi->rxbuf) {
+ memcpy(xqspi->rxbuf, ((u8 *)&data) + 4 - size, size);
+ xqspi->rxbuf += size;
+ }
+
+ xqspi->rx_bytes -= size;
+ if (xqspi->rx_bytes < 0)
+ xqspi->rx_bytes = 0;
+}
+
+/**
+ * zynq_qspi_txfifo_op - Write 1..4 bytes from TX buffer to TxFIFO
+ * @xqspi: Pointer to the zynq_qspi structure
+ * @size: Number of bytes to be written (1..4)
+ */
+static void zynq_qspi_txfifo_op(struct zynq_qspi *xqspi, unsigned int size)
+{
+ static const unsigned int offset[4] = {
+ ZYNQ_QSPI_TXD_00_01_OFFSET, ZYNQ_QSPI_TXD_00_10_OFFSET,
+ ZYNQ_QSPI_TXD_00_11_OFFSET, ZYNQ_QSPI_TXD_00_00_OFFSET };
+ u32 data;
+
+ if (xqspi->txbuf) {
+ data = 0xffffffff;
+ memcpy(&data, xqspi->txbuf, size);
+ xqspi->txbuf += size;
+ } else {
+ data = 0;
+ }
+
+ xqspi->tx_bytes -= size;
+ zynq_qspi_write(xqspi, offset[size - 1], data);
+}
+
+/**
+ * zynq_qspi_chipselect - Select or deselect the chip select line
+ * @spi: Pointer to the spi_device structure
+ * @assert: 1 for select or 0 for deselect the chip select line
+ */
+static void zynq_qspi_chipselect(struct spi_device *spi, bool assert)
+{
+ struct spi_controller *ctrl = spi->master;
+ struct zynq_qspi *xqspi = spi_controller_get_devdata(ctrl);
+ u32 config_reg;
+
+ config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
+ if (assert) {
+ /* Select the slave */
+ config_reg &= ~ZYNQ_QSPI_CONFIG_SSCTRL_MASK;
+ config_reg |= (((~(BIT(spi->chip_select))) <<
+ ZYNQ_QSPI_SS_SHIFT) &
+ ZYNQ_QSPI_CONFIG_SSCTRL_MASK);
+ } else {
+ config_reg |= ZYNQ_QSPI_CONFIG_SSCTRL_MASK;
+ }
+
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_CONFIG_OFFSET, config_reg);
+}
+
+/**
+ * zynq_qspi_config_op - Configure QSPI controller for specified transfer
+ * @xqspi: Pointer to the zynq_qspi structure
+ * @qspi: Pointer to the spi_device structure
+ *
+ * Sets the operational mode of QSPI controller for the next QSPI transfer and
+ * sets the requested clock frequency.
+ *
+ * Return: 0 on success and -EINVAL on invalid input parameter
+ *
+ * Note: If the requested frequency is not an exact match with what can be
+ * obtained using the prescalar value, the driver sets the clock frequency which
+ * is lower than the requested frequency (maximum lower) for the transfer. If
+ * the requested frequency is higher or lower than that is supported by the QSPI
+ * controller the driver will set the highest or lowest frequency supported by
+ * controller.
+ */
+static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi)
+{
+ u32 config_reg, baud_rate_val = 0;
+
+ /*
+ * Set the clock frequency
+ * The baud rate divisor is not a direct mapping to the value written
+ * into the configuration register (config_reg[5:3])
+ * i.e. 000 - divide by 2
+ * 001 - divide by 4
+ * ----------------
+ * 111 - divide by 256
+ */
+ while ((baud_rate_val < ZYNQ_QSPI_BAUD_DIV_MAX) &&
+ (clk_get_rate(xqspi->refclk) / (2 << baud_rate_val)) >
+ spi->max_speed_hz)
+ baud_rate_val++;
+
+ config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
+
+ /* Set the QSPI clock phase and clock polarity */
+ config_reg &= (~ZYNQ_QSPI_CONFIG_CPHA_MASK) &
+ (~ZYNQ_QSPI_CONFIG_CPOL_MASK);
+ if (spi->mode & SPI_CPHA)
+ config_reg |= ZYNQ_QSPI_CONFIG_CPHA_MASK;
+ if (spi->mode & SPI_CPOL)
+ config_reg |= ZYNQ_QSPI_CONFIG_CPOL_MASK;
+
+ config_reg &= ~ZYNQ_QSPI_CONFIG_BDRATE_MASK;
+ config_reg |= (baud_rate_val << ZYNQ_QSPI_BAUD_DIV_SHIFT);
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_CONFIG_OFFSET, config_reg);
+
+ return 0;
+}
+
+/**
+ * zynq_qspi_setup - Configure the QSPI controller
+ * @spi: Pointer to the spi_device structure
+ *
+ * Sets the operational mode of QSPI controller for the next QSPI transfer, baud
+ * rate and divisor value to setup the requested qspi clock.
+ *
+ * Return: 0 on success and error value on failure
+ */
+static int zynq_qspi_setup_op(struct spi_device *spi)
+{
+ struct spi_controller *ctrl = spi->master;
+ struct zynq_qspi *qspi = spi_controller_get_devdata(ctrl);
+
+ if (ctrl->busy)
+ return -EBUSY;
+
+ clk_enable(qspi->refclk);
+ clk_enable(qspi->pclk);
+ zynq_qspi_write(qspi, ZYNQ_QSPI_ENABLE_OFFSET,
+ ZYNQ_QSPI_ENABLE_ENABLE_MASK);
+
+ return 0;
+}
+
+/**
+ * zynq_qspi_write_op - Fills the TX FIFO with as many bytes as possible
+ * @xqspi: Pointer to the zynq_qspi structure
+ * @txcount: Maximum number of words to write
+ * @txempty: Indicates that TxFIFO is empty
+ */
+static void zynq_qspi_write_op(struct zynq_qspi *xqspi, int txcount,
+ bool txempty)
+{
+ int count, len, k;
+
+ len = xqspi->tx_bytes;
+ if (len && len < 4) {
+ /*
+ * We must empty the TxFIFO between accesses to TXD0,
+ * TXD1, TXD2, TXD3.
+ */
+ if (txempty)
+ zynq_qspi_txfifo_op(xqspi, len);
+
+ return;
+ }
+
+ count = len / 4;
+ if (count > txcount)
+ count = txcount;
+
+ if (xqspi->txbuf) {
+ iowrite32_rep(xqspi->regs + ZYNQ_QSPI_TXD_00_00_OFFSET,
+ xqspi->txbuf, count);
+ xqspi->txbuf += count * 4;
+ } else {
+ for (k = 0; k < count; k++)
+ writel_relaxed(0, xqspi->regs +
+ ZYNQ_QSPI_TXD_00_00_OFFSET);
+ }
+
+ xqspi->tx_bytes -= count * 4;
+}
+
+/**
+ * zynq_qspi_read_op - Drains the RX FIFO by as many bytes as possible
+ * @xqspi: Pointer to the zynq_qspi structure
+ * @rxcount: Maximum number of words to read
+ */
+static void zynq_qspi_read_op(struct zynq_qspi *xqspi, int rxcount)
+{
+ int count, len, k;
+
+ len = xqspi->rx_bytes - xqspi->tx_bytes;
+ count = len / 4;
+ if (count > rxcount)
+ count = rxcount;
+ if (xqspi->rxbuf) {
+ ioread32_rep(xqspi->regs + ZYNQ_QSPI_RXD_OFFSET,
+ xqspi->rxbuf, count);
+ xqspi->rxbuf += count * 4;
+ } else {
+ for (k = 0; k < count; k++)
+ readl_relaxed(xqspi->regs + ZYNQ_QSPI_RXD_OFFSET);
+ }
+ xqspi->rx_bytes -= count * 4;
+ len -= count * 4;
+
+ if (len && len < 4 && count < rxcount)
+ zynq_qspi_rxfifo_op(xqspi, len);
+}
+
+/**
+ * zynq_qspi_irq - Interrupt service routine of the QSPI controller
+ * @irq: IRQ number
+ * @dev_id: Pointer to the xqspi structure
+ *
+ * This function handles TX empty only.
+ * On TX empty interrupt this function reads the received data from RX FIFO and
+ * fills the TX FIFO if there is any data remaining to be transferred.
+ *
+ * Return: IRQ_HANDLED when interrupt is handled; IRQ_NONE otherwise.
+ */
+static irqreturn_t zynq_qspi_irq(int irq, void *dev_id)
+{
+ u32 intr_status;
+ bool txempty;
+ struct zynq_qspi *xqspi = (struct zynq_qspi *)dev_id;
+
+ intr_status = zynq_qspi_read(xqspi, ZYNQ_QSPI_STATUS_OFFSET);
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_STATUS_OFFSET, intr_status);
+
+ if ((intr_status & ZYNQ_QSPI_IXR_TXNFULL_MASK) ||
+ (intr_status & ZYNQ_QSPI_IXR_RXNEMTY_MASK)) {
+ /*
+ * This bit is set when Tx FIFO has < THRESHOLD entries.
+ * We have the THRESHOLD value set to 1,
+ * so this bit indicates Tx FIFO is empty.
+ */
+ txempty = !!(intr_status & ZYNQ_QSPI_IXR_TXNFULL_MASK);
+ /* Read out the data from the RX FIFO */
+ zynq_qspi_read_op(xqspi, ZYNQ_QSPI_RX_THRESHOLD);
+ if (xqspi->tx_bytes) {
+ /* There is more data to send */
+ zynq_qspi_write_op(xqspi, ZYNQ_QSPI_RX_THRESHOLD,
+ txempty);
+ } else {
+ /*
+ * If transfer and receive is completed then only send
+ * complete signal.
+ */
+ if (!xqspi->rx_bytes) {
+ zynq_qspi_write(xqspi,
+ ZYNQ_QSPI_IDIS_OFFSET,
+ ZYNQ_QSPI_IXR_RXTX_MASK);
+ complete(&xqspi->data_completion);
+ }
+ }
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+/**
+ * zynq_qspi_exec_mem_op() - Initiates the QSPI transfer
+ * @mem: the SPI memory
+ * @op: the memory operation to execute
+ *
+ * Executes a memory operation.
+ *
+ * This function first selects the chip and starts the memory operation.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct zynq_qspi *xqspi = spi_controller_get_devdata(mem->spi->master);
+ int err = 0, i;
+ u8 *tmpbuf;
+
+ dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
+ op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
+ op->dummy.buswidth, op->data.buswidth);
+
+ zynq_qspi_chipselect(mem->spi, true);
+ zynq_qspi_config_op(xqspi, mem->spi);
+
+ if (op->cmd.opcode) {
+ reinit_completion(&xqspi->data_completion);
+ xqspi->txbuf = (u8 *)&op->cmd.opcode;
+ xqspi->rxbuf = NULL;
+ xqspi->tx_bytes = sizeof(op->cmd.opcode);
+ xqspi->rx_bytes = sizeof(op->cmd.opcode);
+ zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
+ ZYNQ_QSPI_IXR_RXTX_MASK);
+ if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
+ msecs_to_jiffies(1000)))
+ err = -ETIMEDOUT;
+ }
+
+ if (op->addr.nbytes) {
+ for (i = 0; i < op->addr.nbytes; i++) {
+ xqspi->txbuf[i] = op->addr.val >>
+ (8 * (op->addr.nbytes - i - 1));
+ }
+
+ reinit_completion(&xqspi->data_completion);
+ xqspi->rxbuf = NULL;
+ xqspi->tx_bytes = op->addr.nbytes;
+ xqspi->rx_bytes = op->addr.nbytes;
+ zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
+ ZYNQ_QSPI_IXR_RXTX_MASK);
+ if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
+ msecs_to_jiffies(1000)))
+ err = -ETIMEDOUT;
+ }
+
+ if (op->dummy.nbytes) {
+ tmpbuf = kzalloc(op->dummy.nbytes, GFP_KERNEL);
+ memset(tmpbuf, 0xff, op->dummy.nbytes);
+ reinit_completion(&xqspi->data_completion);
+ xqspi->txbuf = tmpbuf;
+ xqspi->rxbuf = NULL;
+ xqspi->tx_bytes = op->dummy.nbytes;
+ xqspi->rx_bytes = op->dummy.nbytes;
+ zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
+ ZYNQ_QSPI_IXR_RXTX_MASK);
+ if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
+ msecs_to_jiffies(1000)))
+ err = -ETIMEDOUT;
+
+ kfree(tmpbuf);
+ }
+
+ if (op->data.nbytes) {
+ reinit_completion(&xqspi->data_completion);
+ if (op->data.dir == SPI_MEM_DATA_OUT) {
+ xqspi->txbuf = (u8 *)op->data.buf.out;
+ xqspi->tx_bytes = op->data.nbytes;
+ xqspi->rxbuf = NULL;
+ xqspi->rx_bytes = op->data.nbytes;
+ } else {
+ xqspi->txbuf = NULL;
+ xqspi->rxbuf = (u8 *)op->data.buf.in;
+ xqspi->rx_bytes = op->data.nbytes;
+ xqspi->tx_bytes = op->data.nbytes;
+ }
+
+ zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
+ ZYNQ_QSPI_IXR_RXTX_MASK);
+ if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
+ msecs_to_jiffies(1000)))
+ err = -ETIMEDOUT;
+ }
+ zynq_qspi_chipselect(mem->spi, false);
+
+ return err;
+}
+
+static const struct spi_controller_mem_ops zynq_qspi_mem_ops = {
+ .supports_op = zynq_qspi_supports_op,
+ .exec_op = zynq_qspi_exec_mem_op,
+};
+
+/**
+ * zynq_qspi_probe - Probe method for the QSPI driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function initializes the driver data structures and the hardware.
+ *
+ * Return: 0 on success and error value on failure
+ */
+static int zynq_qspi_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct spi_controller *ctlr;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct zynq_qspi *xqspi;
+ struct resource *res;
+ u32 num_cs;
+
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(*xqspi));
+ if (!ctlr)
+ return -ENOMEM;
+
+ xqspi = spi_controller_get_devdata(ctlr);
+ xqspi->dev = dev;
+ platform_set_drvdata(pdev, xqspi);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xqspi->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xqspi->regs)) {
+ ret = PTR_ERR(xqspi->regs);
+ goto remove_master;
+ }
+
+ xqspi->pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(xqspi->pclk)) {
+ dev_err(&pdev->dev, "pclk clock not found.\n");
+ ret = PTR_ERR(xqspi->pclk);
+ goto remove_master;
+ }
+
+ init_completion(&xqspi->data_completion);
+
+ xqspi->refclk = devm_clk_get(&pdev->dev, "ref_clk");
+ if (IS_ERR(xqspi->refclk)) {
+ dev_err(&pdev->dev, "ref_clk clock not found.\n");
+ ret = PTR_ERR(xqspi->refclk);
+ goto remove_master;
+ }
+
+ ret = clk_prepare_enable(xqspi->pclk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable APB clock.\n");
+ goto remove_master;
+ }
+
+ ret = clk_prepare_enable(xqspi->refclk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable device clock.\n");
+ goto clk_dis_pclk;
+ }
+
+ /* QSPI controller initializations */
+ zynq_qspi_init_hw(xqspi);
+
+ xqspi->irq = platform_get_irq(pdev, 0);
+ if (xqspi->irq <= 0) {
+ ret = -ENXIO;
+ dev_err(&pdev->dev, "irq resource not found\n");
+ goto remove_master;
+ }
+ ret = devm_request_irq(&pdev->dev, xqspi->irq, zynq_qspi_irq,
+ 0, pdev->name, xqspi);
+ if (ret != 0) {
+ ret = -ENXIO;
+ dev_err(&pdev->dev, "request_irq failed\n");
+ goto remove_master;
+ }
+
+ ret = of_property_read_u32(np, "num-cs",
+ &num_cs);
+ if (ret < 0)
+ ctlr->num_chipselect = ZYNQ_QSPI_DEFAULT_NUM_CS;
+ else
+ ctlr->num_chipselect = num_cs;
+
+ ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
+ SPI_TX_DUAL | SPI_TX_QUAD;
+ ctlr->mem_ops = &zynq_qspi_mem_ops;
+ ctlr->setup = zynq_qspi_setup_op;
+ ctlr->max_speed_hz = clk_get_rate(xqspi->refclk) / 2;
+ ctlr->dev.of_node = np;
+ ret = spi_register_controller(ctlr);
+ if (ret) {
+ dev_err(&pdev->dev, "spi_register_master failed\n");
+ goto clk_dis_all;
+ }
+
+ return ret;
+
+clk_dis_all:
+ clk_disable_unprepare(xqspi->refclk);
+clk_dis_pclk:
+ clk_disable_unprepare(xqspi->pclk);
+remove_master:
+ spi_controller_put(ctlr);
+
+ return ret;
+}
+
+/**
+ * zynq_qspi_remove - Remove method for the QSPI driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function is called if a device is physically removed from the system or
+ * if the driver module is being unloaded. It frees all resources allocated to
+ * the device.
+ *
+ * Return: 0 on success and error value on failure
+ */
+static int zynq_qspi_remove(struct platform_device *pdev)
+{
+ struct zynq_qspi *xqspi = platform_get_drvdata(pdev);
+
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_ENABLE_OFFSET, 0);
+
+ clk_disable_unprepare(xqspi->refclk);
+ clk_disable_unprepare(xqspi->pclk);
+
+ return 0;
+}
+
+static const struct of_device_id zynq_qspi_of_match[] = {
+ { .compatible = "xlnx,zynq-qspi-1.0", },
+ { /* end of table */ }
+};
+
+MODULE_DEVICE_TABLE(of, zynq_qspi_of_match);
+
+/*
+ * zynq_qspi_driver - This structure defines the QSPI platform driver
+ */
+static struct platform_driver zynq_qspi_driver = {
+ .probe = zynq_qspi_probe,
+ .remove = zynq_qspi_remove,
+ .driver = {
+ .name = "zynq-qspi",
+ .of_match_table = zynq_qspi_of_match,
+ },
+};
+
+module_platform_driver(zynq_qspi_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx Zynq QSPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 93986f879b09..5e75944ad5d1 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -36,6 +36,8 @@
#define CREATE_TRACE_POINTS
#include <trace/events/spi.h>
+EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
+EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
#include "internals.h"
@@ -1039,6 +1041,8 @@ static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
if (max_tx || max_rx) {
list_for_each_entry(xfer, &msg->transfers,
transfer_list) {
+ if (!xfer->len)
+ continue;
if (!xfer->tx_buf)
xfer->tx_buf = ctlr->dummy_tx;
if (!xfer->rx_buf)
@@ -1177,10 +1181,10 @@ out:
if (msg->status && ctlr->handle_err)
ctlr->handle_err(ctlr, msg);
- spi_res_release(ctlr, msg);
-
spi_finalize_current_message(ctlr);
+ spi_res_release(ctlr, msg);
+
return ret;
}
@@ -2195,6 +2199,8 @@ static int spi_get_gpio_descs(struct spi_controller *ctlr)
*/
cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
GPIOD_OUT_LOW);
+ if (IS_ERR(cs[i]))
+ return PTR_ERR(cs[i]);
if (cs[i]) {
/*
@@ -2261,7 +2267,7 @@ int spi_register_controller(struct spi_controller *ctlr)
{
struct device *dev = ctlr->dev.parent;
struct boardinfo *bi;
- int status = -ENODEV;
+ int status;
int id, first_dynamic;
if (!dev)
@@ -2275,24 +2281,6 @@ int spi_register_controller(struct spi_controller *ctlr)
if (status)
return status;
- if (!spi_controller_is_slave(ctlr)) {
- if (ctlr->use_gpio_descriptors) {
- status = spi_get_gpio_descs(ctlr);
- if (status)
- return status;
- /*
- * A controller using GPIO descriptors always
- * supports SPI_CS_HIGH if need be.
- */
- ctlr->mode_bits |= SPI_CS_HIGH;
- } else {
- /* Legacy code path for GPIOs from DT */
- status = of_spi_register_master(ctlr);
- if (status)
- return status;
- }
- }
-
/* even if it's just one always-selected device, there must
* be at least one chipselect
*/
@@ -2349,6 +2337,25 @@ int spi_register_controller(struct spi_controller *ctlr)
* registration fails if the bus ID is in use.
*/
dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
+
+ if (!spi_controller_is_slave(ctlr)) {
+ if (ctlr->use_gpio_descriptors) {
+ status = spi_get_gpio_descs(ctlr);
+ if (status)
+ return status;
+ /*
+ * A controller using GPIO descriptors always
+ * supports SPI_CS_HIGH if need be.
+ */
+ ctlr->mode_bits |= SPI_CS_HIGH;
+ } else {
+ /* Legacy code path for GPIOs from DT */
+ status = of_spi_register_master(ctlr);
+ if (status)
+ return status;
+ }
+ }
+
status = device_add(&ctlr->dev);
if (status < 0) {
/* free bus id */
@@ -2781,11 +2788,6 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
size_t offset;
size_t count, i;
- /* warn once about this fact that we are splitting a transfer */
- dev_warn_once(&msg->spi->dev,
- "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n",
- xfer->len, maxsize);
-
/* calculate how many we have to replace */
count = DIV_ROUND_UP(xfer->len, maxsize);
@@ -2943,6 +2945,11 @@ int spi_setup(struct spi_device *spi)
* so it is ignored here.
*/
bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD);
+ /* nothing prevents from working with active-high CS in case if it
+ * is driven by GPIO.
+ */
+ if (gpio_is_valid(spi->cs_gpio))
+ bad_bits &= ~SPI_CS_HIGH;
ugly_bits = bad_bits &
(SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
@@ -2988,6 +2995,21 @@ int spi_setup(struct spi_device *spi)
}
EXPORT_SYMBOL_GPL(spi_setup);
+/**
+ * spi_set_cs_timing - configure CS setup, hold, and inactive delays
+ * @spi: the device that requires specific CS timing configuration
+ * @setup: CS setup time in terms of clock count
+ * @hold: CS hold time in terms of clock count
+ * @inactive_dly: CS inactive delay between transfers in terms of clock count
+ */
+void spi_set_cs_timing(struct spi_device *spi, u8 setup, u8 hold,
+ u8 inactive_dly)
+{
+ if (spi->controller->set_cs_timing)
+ spi->controller->set_cs_timing(spi, setup, hold, inactive_dly);
+}
+EXPORT_SYMBOL_GPL(spi_set_cs_timing);
+
static int __spi_validate(struct spi_device *spi, struct spi_message *message)
{
struct spi_controller *ctlr = spi->controller;
@@ -3062,8 +3084,6 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
if (!xfer->speed_hz)
xfer->speed_hz = spi->max_speed_hz;
- if (!xfer->speed_hz)
- xfer->speed_hz = ctlr->max_speed_hz;
if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
xfer->speed_hz = ctlr->max_speed_hz;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index b0c76e2626ce..70966e10be7e 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -276,17 +276,19 @@ static int spidev_message(struct spidev_data *spidev,
k_tmp->bits_per_word = u_tmp->bits_per_word;
k_tmp->delay_usecs = u_tmp->delay_usecs;
k_tmp->speed_hz = u_tmp->speed_hz;
+ k_tmp->word_delay_usecs = u_tmp->word_delay_usecs;
if (!k_tmp->speed_hz)
k_tmp->speed_hz = spidev->speed_hz;
#ifdef VERBOSE
dev_dbg(&spidev->spi->dev,
- " xfer len %u %s%s%s%dbits %u usec %uHz\n",
+ " xfer len %u %s%s%s%dbits %u usec %u usec %uHz\n",
u_tmp->len,
u_tmp->rx_buf ? "rx " : "",
u_tmp->tx_buf ? "tx " : "",
u_tmp->cs_change ? "cs " : "",
u_tmp->bits_per_word ? : spidev->spi->bits_per_word,
u_tmp->delay_usecs,
+ u_tmp->word_delay_usecs,
u_tmp->speed_hz ? : spidev->spi->max_speed_hz);
#endif
spi_message_add_tail(k_tmp, &msg);
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index 84807a9b4b13..da2d2ab8104d 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -305,7 +305,6 @@ static int sprom_do_write(struct ssb_bus *bus, const u16 *sprom)
else if (i % 2)
pr_cont(".");
writew(sprom[i], bus->mmio + bus->sprom_offset + (i * 2));
- mmiowb();
msleep(20);
}
err = pci_read_config_dword(pdev, SSB_SPROMCTL, &spromctl);
diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c
index 567013f8a8be..d7d730c245c5 100644
--- a/drivers/ssb/pcmcia.c
+++ b/drivers/ssb/pcmcia.c
@@ -338,7 +338,6 @@ static void ssb_pcmcia_write8(struct ssb_device *dev, u16 offset, u8 value)
err = select_core_and_segment(dev, &offset);
if (likely(!err))
writeb(value, bus->mmio + offset);
- mmiowb();
spin_unlock_irqrestore(&bus->bar_lock, flags);
}
@@ -352,7 +351,6 @@ static void ssb_pcmcia_write16(struct ssb_device *dev, u16 offset, u16 value)
err = select_core_and_segment(dev, &offset);
if (likely(!err))
writew(value, bus->mmio + offset);
- mmiowb();
spin_unlock_irqrestore(&bus->bar_lock, flags);
}
@@ -368,7 +366,6 @@ static void ssb_pcmcia_write32(struct ssb_device *dev, u16 offset, u32 value)
writew((value & 0x0000FFFF), bus->mmio + offset);
writew(((value & 0xFFFF0000) >> 16), bus->mmio + offset + 2);
}
- mmiowb();
spin_unlock_irqrestore(&bus->bar_lock, flags);
}
@@ -424,7 +421,6 @@ static void ssb_pcmcia_block_write(struct ssb_device *dev, const void *buffer,
WARN_ON(1);
}
unlock:
- mmiowb();
spin_unlock_irqrestore(&bus->bar_lock, flags);
}
#endif /* CONFIG_SSB_BLOCKIO */
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index c0901b96cfe4..c7d7dc89deba 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -106,16 +106,12 @@ source "drivers/staging/mt7621-pci-phy/Kconfig"
source "drivers/staging/mt7621-pinctrl/Kconfig"
-source "drivers/staging/mt7621-spi/Kconfig"
-
source "drivers/staging/mt7621-dma/Kconfig"
source "drivers/staging/ralink-gdma/Kconfig"
source "drivers/staging/mt7621-mmc/Kconfig"
-source "drivers/staging/mt7621-eth/Kconfig"
-
source "drivers/staging/mt7621-dts/Kconfig"
source "drivers/staging/gasket/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 57c6bce13ff4..19c0a25ff98b 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -43,11 +43,9 @@ obj-$(CONFIG_PI433) += pi433/
obj-$(CONFIG_PCI_MT7621) += mt7621-pci/
obj-$(CONFIG_PCI_MT7621_PHY) += mt7621-pci-phy/
obj-$(CONFIG_PINCTRL_RT2880) += mt7621-pinctrl/
-obj-$(CONFIG_SPI_MT7621) += mt7621-spi/
obj-$(CONFIG_SOC_MT7621) += mt7621-dma/
obj-$(CONFIG_DMA_RALINK) += ralink-gdma/
obj-$(CONFIG_MTK_MMC) += mt7621-mmc/
-obj-$(CONFIG_NET_MEDIATEK_SOC_STAGING) += mt7621-eth/
obj-$(CONFIG_SOC_MT7621) += mt7621-dts/
obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/
obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/
diff --git a/drivers/staging/axis-fifo/Kconfig b/drivers/staging/axis-fifo/Kconfig
index 687537203d9c..d9725888af6f 100644
--- a/drivers/staging/axis-fifo/Kconfig
+++ b/drivers/staging/axis-fifo/Kconfig
@@ -3,6 +3,7 @@
#
config XIL_AXIS_FIFO
tristate "Xilinx AXI-Stream FIFO IP core driver"
+ depends on OF
default n
help
This adds support for the Xilinx AXI-Stream
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index a7d569cfca5d..0dff1ac057cd 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -1001,6 +1001,8 @@ int comedi_dio_insn_config(struct comedi_device *dev,
unsigned int mask);
unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
unsigned int *data);
+unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
+ struct comedi_cmd *cmd);
unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s);
unsigned int comedi_nscans_left(struct comedi_subdevice *s,
unsigned int nscans);
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index eefa62f42c0f..5a32b8fc000e 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -394,11 +394,13 @@ unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
EXPORT_SYMBOL_GPL(comedi_dio_update_state);
/**
- * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
+ * comedi_bytes_per_scan_cmd() - Get length of asynchronous command "scan" in
+ * bytes
* @s: COMEDI subdevice.
+ * @cmd: COMEDI command.
*
* Determines the overall scan length according to the subdevice type and the
- * number of channels in the scan.
+ * number of channels in the scan for the specified command.
*
* For digital input, output or input/output subdevices, samples for
* multiple channels are assumed to be packed into one or more unsigned
@@ -408,9 +410,9 @@ EXPORT_SYMBOL_GPL(comedi_dio_update_state);
*
* Returns the overall scan length in bytes.
*/
-unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
+unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
{
- struct comedi_cmd *cmd = &s->async->cmd;
unsigned int num_samples;
unsigned int bits_per_sample;
@@ -427,6 +429,29 @@ unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
}
return comedi_samples_to_bytes(s, num_samples);
}
+EXPORT_SYMBOL_GPL(comedi_bytes_per_scan_cmd);
+
+/**
+ * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
+ * @s: COMEDI subdevice.
+ *
+ * Determines the overall scan length according to the subdevice type and the
+ * number of channels in the scan for the current command.
+ *
+ * For digital input, output or input/output subdevices, samples for
+ * multiple channels are assumed to be packed into one or more unsigned
+ * short or unsigned int values according to the subdevice's %SDF_LSAMPL
+ * flag. For other types of subdevice, samples are assumed to occupy a
+ * whole unsigned short or unsigned int according to the %SDF_LSAMPL flag.
+ *
+ * Returns the overall scan length in bytes.
+ */
+unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
+{
+ struct comedi_cmd *cmd = &s->async->cmd;
+
+ return comedi_bytes_per_scan_cmd(s, cmd);
+}
EXPORT_SYMBOL_GPL(comedi_bytes_per_scan);
static unsigned int __comedi_nscans_left(struct comedi_subdevice *s,
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
index 61e03ad84123..639ec1586976 100644
--- a/drivers/staging/comedi/drivers/mite.c
+++ b/drivers/staging/comedi/drivers/mite.c
@@ -371,7 +371,6 @@ static unsigned int mite_get_status(struct mite_channel *mite_chan)
writel(CHOR_CLRDONE,
mite->mmio + MITE_CHOR(mite_chan->channel));
}
- mmiowb();
spin_unlock_irqrestore(&mite->lock, flags);
return status;
}
@@ -451,7 +450,6 @@ void mite_dma_arm(struct mite_channel *mite_chan)
mite_chan->done = 0;
/* arm */
writel(CHOR_START, mite->mmio + MITE_CHOR(mite_chan->channel));
- mmiowb();
spin_unlock_irqrestore(&mite->lock, flags);
}
EXPORT_SYMBOL_GPL(mite_dma_arm);
@@ -638,7 +636,6 @@ void mite_release_channel(struct mite_channel *mite_chan)
CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
mite->mmio + MITE_CHCR(mite_chan->channel));
mite_chan->ring = NULL;
- mmiowb();
}
spin_unlock_irqrestore(&mite->lock, flags);
}
diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
index 405573e927cf..4ee9b260eab0 100644
--- a/drivers/staging/comedi/drivers/ni_660x.c
+++ b/drivers/staging/comedi/drivers/ni_660x.c
@@ -320,7 +320,6 @@ static inline void ni_660x_set_dma_channel(struct comedi_device *dev,
ni_660x_write(dev, chip, devpriv->dma_cfg[chip] |
NI660X_DMA_CFG_RESET(mite_channel),
NI660X_DMA_CFG);
- mmiowb();
}
static inline void ni_660x_unset_dma_channel(struct comedi_device *dev,
@@ -333,7 +332,6 @@ static inline void ni_660x_unset_dma_channel(struct comedi_device *dev,
devpriv->dma_cfg[chip] &= ~NI660X_DMA_CFG_SEL_MASK(mite_channel);
devpriv->dma_cfg[chip] |= NI660X_DMA_CFG_SEL_NONE(mite_channel);
ni_660x_write(dev, chip, devpriv->dma_cfg[chip], NI660X_DMA_CFG);
- mmiowb();
}
static int ni_660x_request_mite_channel(struct comedi_device *dev,
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 5edf59ac6706..668f2aa16baa 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -547,7 +547,6 @@ static inline void ni_set_bitfield(struct comedi_device *dev, int reg,
reg);
break;
}
- mmiowb();
spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags);
}
@@ -3545,6 +3544,7 @@ static int ni_cdio_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
{
struct ni_private *devpriv = dev->private;
+ unsigned int bytes_per_scan;
int err = 0;
/* Step 1 : check if triggers are trivially valid */
@@ -3579,9 +3579,12 @@ static int ni_cdio_cmdtest(struct comedi_device *dev,
err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
cmd->chanlist_len);
- err |= comedi_check_trigger_arg_max(&cmd->stop_arg,
- s->async->prealloc_bufsz /
- comedi_bytes_per_scan(s));
+ bytes_per_scan = comedi_bytes_per_scan_cmd(s, cmd);
+ if (bytes_per_scan) {
+ err |= comedi_check_trigger_arg_max(&cmd->stop_arg,
+ s->async->prealloc_bufsz /
+ bytes_per_scan);
+ }
if (err)
return 3;
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index 4bdef87d5dd7..8f3864799c19 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -310,7 +310,6 @@ static int ni_pcidio_request_di_mite_channel(struct comedi_device *dev)
writeb(primary_DMAChannel_bits(devpriv->di_mite_chan->channel) |
secondary_DMAChannel_bits(devpriv->di_mite_chan->channel),
dev->mmio + DMA_LINE_CONTROL_GROUP1);
- mmiowb();
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
return 0;
}
@@ -327,7 +326,6 @@ static void ni_pcidio_release_di_mite_channel(struct comedi_device *dev)
writeb(primary_DMAChannel_bits(0) |
secondary_DMAChannel_bits(0),
dev->mmio + DMA_LINE_CONTROL_GROUP1);
- mmiowb();
}
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
}
diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c
index 048cb35723ad..c1131a1622c0 100644
--- a/drivers/staging/comedi/drivers/ni_tio.c
+++ b/drivers/staging/comedi/drivers/ni_tio.c
@@ -234,7 +234,6 @@ static void ni_tio_set_bits_transient(struct ni_gpct *counter,
regs[reg] &= ~mask;
regs[reg] |= (value & mask);
ni_tio_write(counter, regs[reg] | transient, reg);
- mmiowb();
spin_unlock_irqrestore(&counter_dev->regs_lock, flags);
}
}
diff --git a/drivers/staging/comedi/drivers/ni_usb6501.c b/drivers/staging/comedi/drivers/ni_usb6501.c
index 808ed92ed66f..1bb1cb651349 100644
--- a/drivers/staging/comedi/drivers/ni_usb6501.c
+++ b/drivers/staging/comedi/drivers/ni_usb6501.c
@@ -463,10 +463,8 @@ static int ni6501_alloc_usb_buffers(struct comedi_device *dev)
size = usb_endpoint_maxp(devpriv->ep_tx);
devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
- if (!devpriv->usb_tx_buf) {
- kfree(devpriv->usb_rx_buf);
+ if (!devpriv->usb_tx_buf)
return -ENOMEM;
- }
return 0;
}
@@ -518,6 +516,9 @@ static int ni6501_auto_attach(struct comedi_device *dev,
if (!devpriv)
return -ENOMEM;
+ mutex_init(&devpriv->mut);
+ usb_set_intfdata(intf, devpriv);
+
ret = ni6501_find_endpoints(dev);
if (ret)
return ret;
@@ -526,9 +527,6 @@ static int ni6501_auto_attach(struct comedi_device *dev,
if (ret)
return ret;
- mutex_init(&devpriv->mut);
- usb_set_intfdata(intf, devpriv);
-
ret = comedi_alloc_subdevices(dev, 2);
if (ret)
return ret;
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index f5af6f4069dc..39049d3c56d7 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -108,7 +108,6 @@ static void s626_mc_enable(struct comedi_device *dev,
{
unsigned int val = (cmd << 16) | cmd;
- mmiowb();
writel(val, dev->mmio + reg);
}
@@ -116,7 +115,6 @@ static void s626_mc_disable(struct comedi_device *dev,
unsigned int cmd, unsigned int reg)
{
writel(cmd << 16, dev->mmio + reg);
- mmiowb();
}
static bool s626_mc_test(struct comedi_device *dev,
diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c
index 6234b649d887..65dc6c51037e 100644
--- a/drivers/staging/comedi/drivers/vmk80xx.c
+++ b/drivers/staging/comedi/drivers/vmk80xx.c
@@ -682,10 +682,8 @@ static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev)
size = usb_endpoint_maxp(devpriv->ep_tx);
devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
- if (!devpriv->usb_tx_buf) {
- kfree(devpriv->usb_rx_buf);
+ if (!devpriv->usb_tx_buf)
return -ENOMEM;
- }
return 0;
}
@@ -800,6 +798,8 @@ static int vmk80xx_auto_attach(struct comedi_device *dev,
devpriv->model = board->model;
+ sema_init(&devpriv->limit_sem, 8);
+
ret = vmk80xx_find_usb_endpoints(dev);
if (ret)
return ret;
@@ -808,8 +808,6 @@ static int vmk80xx_auto_attach(struct comedi_device *dev,
if (ret)
return ret;
- sema_init(&devpriv->limit_sem, 8);
-
usb_set_intfdata(intf, devpriv);
if (devpriv->model == VMK8055_MODEL)
diff --git a/drivers/staging/erofs/data.c b/drivers/staging/erofs/data.c
index 526e0dbea5b5..81af768e7248 100644
--- a/drivers/staging/erofs/data.c
+++ b/drivers/staging/erofs/data.c
@@ -298,7 +298,7 @@ submit_bio_retry:
*last_block = current_block;
/* shift in advance in case of it followed by too many gaps */
- if (unlikely(bio->bi_vcnt >= bio->bi_max_vecs)) {
+ if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) {
/* err should reassign to 0 after submitting */
err = 0;
goto submit_bio_out;
diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c
index 829f7b12e0dc..9bbc68729c11 100644
--- a/drivers/staging/erofs/dir.c
+++ b/drivers/staging/erofs/dir.c
@@ -23,6 +23,21 @@ static const unsigned char erofs_filetype_table[EROFS_FT_MAX] = {
[EROFS_FT_SYMLINK] = DT_LNK,
};
+static void debug_one_dentry(unsigned char d_type, const char *de_name,
+ unsigned int de_namelen)
+{
+#ifdef CONFIG_EROFS_FS_DEBUG
+ /* since the on-disk name could not have the trailing '\0' */
+ unsigned char dbg_namebuf[EROFS_NAME_LEN + 1];
+
+ memcpy(dbg_namebuf, de_name, de_namelen);
+ dbg_namebuf[de_namelen] = '\0';
+
+ debugln("found dirent %s de_len %u d_type %d", dbg_namebuf,
+ de_namelen, d_type);
+#endif
+}
+
static int erofs_fill_dentries(struct dir_context *ctx,
void *dentry_blk, unsigned int *ofs,
unsigned int nameoff, unsigned int maxsize)
@@ -33,14 +48,10 @@ static int erofs_fill_dentries(struct dir_context *ctx,
de = dentry_blk + *ofs;
while (de < end) {
const char *de_name;
- int de_namelen;
+ unsigned int de_namelen;
unsigned char d_type;
-#ifdef CONFIG_EROFS_FS_DEBUG
- unsigned int dbg_namelen;
- unsigned char dbg_namebuf[EROFS_NAME_LEN];
-#endif
- if (unlikely(de->file_type < EROFS_FT_MAX))
+ if (de->file_type < EROFS_FT_MAX)
d_type = erofs_filetype_table[de->file_type];
else
d_type = DT_UNKNOWN;
@@ -48,26 +59,20 @@ static int erofs_fill_dentries(struct dir_context *ctx,
nameoff = le16_to_cpu(de->nameoff);
de_name = (char *)dentry_blk + nameoff;
- de_namelen = unlikely(de + 1 >= end) ?
- /* last directory entry */
- strnlen(de_name, maxsize - nameoff) :
- le16_to_cpu(de[1].nameoff) - nameoff;
+ /* the last dirent in the block? */
+ if (de + 1 >= end)
+ de_namelen = strnlen(de_name, maxsize - nameoff);
+ else
+ de_namelen = le16_to_cpu(de[1].nameoff) - nameoff;
/* a corrupted entry is found */
- if (unlikely(de_namelen < 0)) {
+ if (unlikely(nameoff + de_namelen > maxsize ||
+ de_namelen > EROFS_NAME_LEN)) {
DBG_BUGON(1);
return -EIO;
}
-#ifdef CONFIG_EROFS_FS_DEBUG
- dbg_namelen = min(EROFS_NAME_LEN - 1, de_namelen);
- memcpy(dbg_namebuf, de_name, dbg_namelen);
- dbg_namebuf[dbg_namelen] = '\0';
-
- debugln("%s, found de_name %s de_len %d d_type %d", __func__,
- dbg_namebuf, de_namelen, d_type);
-#endif
-
+ debug_one_dentry(d_type, de_name, de_namelen);
if (!dir_emit(ctx, de_name, de_namelen,
le64_to_cpu(de->nid), d_type))
/* stopped by some reason */
diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
index 8715bc50e09c..31eef8395774 100644
--- a/drivers/staging/erofs/unzip_vle.c
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -972,6 +972,7 @@ repeat:
overlapped = false;
compressed_pages = grp->compressed_pages;
+ err = 0;
for (i = 0; i < clusterpages; ++i) {
unsigned int pagenr;
@@ -981,26 +982,39 @@ repeat:
DBG_BUGON(!page);
DBG_BUGON(!page->mapping);
- if (z_erofs_is_stagingpage(page))
- continue;
+ if (!z_erofs_is_stagingpage(page)) {
#ifdef EROFS_FS_HAS_MANAGED_CACHE
- if (page->mapping == MNGD_MAPPING(sbi)) {
- DBG_BUGON(!PageUptodate(page));
- continue;
- }
+ if (page->mapping == MNGD_MAPPING(sbi)) {
+ if (unlikely(!PageUptodate(page)))
+ err = -EIO;
+ continue;
+ }
#endif
- /* only non-head page could be reused as a compressed page */
- pagenr = z_erofs_onlinepage_index(page);
+ /*
+ * only if non-head page can be selected
+ * for inplace decompression
+ */
+ pagenr = z_erofs_onlinepage_index(page);
- DBG_BUGON(pagenr >= nr_pages);
- DBG_BUGON(pages[pagenr]);
- ++sparsemem_pages;
- pages[pagenr] = page;
+ DBG_BUGON(pagenr >= nr_pages);
+ DBG_BUGON(pages[pagenr]);
+ ++sparsemem_pages;
+ pages[pagenr] = page;
- overlapped = true;
+ overlapped = true;
+ }
+
+ /* PG_error needs checking for inplaced and staging pages */
+ if (unlikely(PageError(page))) {
+ DBG_BUGON(PageUptodate(page));
+ err = -EIO;
+ }
}
+ if (unlikely(err))
+ goto out;
+
llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
@@ -1029,6 +1043,10 @@ repeat:
skip_allocpage:
vout = erofs_vmap(pages, nr_pages);
+ if (!vout) {
+ err = -ENOMEM;
+ goto out;
+ }
err = z_erofs_vle_unzip_vmap(compressed_pages,
clusterpages, vout, llen, work->pageofs, overlapped);
@@ -1194,6 +1212,7 @@ repeat:
if (page->mapping == mc) {
WRITE_ONCE(grp->compressed_pages[nr], page);
+ ClearPageError(page);
if (!PagePrivate(page)) {
/*
* impossible to be !PagePrivate(page) for
diff --git a/drivers/staging/erofs/unzip_vle_lz4.c b/drivers/staging/erofs/unzip_vle_lz4.c
index 48b263a2731a..0daac9b984a8 100644
--- a/drivers/staging/erofs/unzip_vle_lz4.c
+++ b/drivers/staging/erofs/unzip_vle_lz4.c
@@ -136,10 +136,13 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
nr_pages = DIV_ROUND_UP(outlen + pageofs, PAGE_SIZE);
- if (clusterpages == 1)
+ if (clusterpages == 1) {
vin = kmap_atomic(compressed_pages[0]);
- else
+ } else {
vin = erofs_vmap(compressed_pages, clusterpages);
+ if (!vin)
+ return -ENOMEM;
+ }
preempt_disable();
vout = erofs_pcpubuf[smp_processor_id()].data;
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index acdbc07fd259..2fc8bc22b57b 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -109,10 +109,10 @@
#define AD7192_CH_AIN3 BIT(6) /* AIN3 - AINCOM */
#define AD7192_CH_AIN4 BIT(7) /* AIN4 - AINCOM */
-#define AD7193_CH_AIN1P_AIN2M 0x000 /* AIN1(+) - AIN2(-) */
-#define AD7193_CH_AIN3P_AIN4M 0x001 /* AIN3(+) - AIN4(-) */
-#define AD7193_CH_AIN5P_AIN6M 0x002 /* AIN5(+) - AIN6(-) */
-#define AD7193_CH_AIN7P_AIN8M 0x004 /* AIN7(+) - AIN8(-) */
+#define AD7193_CH_AIN1P_AIN2M 0x001 /* AIN1(+) - AIN2(-) */
+#define AD7193_CH_AIN3P_AIN4M 0x002 /* AIN3(+) - AIN4(-) */
+#define AD7193_CH_AIN5P_AIN6M 0x004 /* AIN5(+) - AIN6(-) */
+#define AD7193_CH_AIN7P_AIN8M 0x008 /* AIN7(+) - AIN8(-) */
#define AD7193_CH_TEMP 0x100 /* Temp senseor */
#define AD7193_CH_AIN2P_AIN2M 0x200 /* AIN2(+) - AIN2(-) */
#define AD7193_CH_AIN1 0x401 /* AIN1 - AINCOM */
diff --git a/drivers/staging/iio/meter/ade7854.c b/drivers/staging/iio/meter/ade7854.c
index 029c3bf42d4d..07774c000c5a 100644
--- a/drivers/staging/iio/meter/ade7854.c
+++ b/drivers/staging/iio/meter/ade7854.c
@@ -269,7 +269,7 @@ static IIO_DEV_ATTR_VPEAK(0644,
static IIO_DEV_ATTR_IPEAK(0644,
ade7854_read_32bit,
ade7854_write_32bit,
- ADE7854_VPEAK);
+ ADE7854_IPEAK);
static IIO_DEV_ATTR_APHCAL(0644,
ade7854_read_16bit,
ade7854_write_16bit,
diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
index 06ebea0be118..122d4c0af363 100644
--- a/drivers/staging/ks7010/ks_hostif.c
+++ b/drivers/staging/ks7010/ks_hostif.c
@@ -219,7 +219,6 @@ michael_mic(u8 *key, u8 *data, unsigned int len, u8 priority, u8 *result)
}
desc->tfm = tfm;
- desc->flags = 0;
ret = crypto_shash_init(desc);
if (ret < 0)
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index 19cadd17e542..1da5c20d65c0 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -25,10 +25,6 @@ source "drivers/staging/media/davinci_vpfe/Kconfig"
source "drivers/staging/media/imx/Kconfig"
-source "drivers/staging/media/imx074/Kconfig"
-
-source "drivers/staging/media/mt9t031/Kconfig"
-
source "drivers/staging/media/omap4iss/Kconfig"
source "drivers/staging/media/rockchip/vpu/Kconfig"
@@ -41,4 +37,6 @@ source "drivers/staging/media/zoran/Kconfig"
source "drivers/staging/media/ipu3/Kconfig"
+source "drivers/staging/media/soc_camera/Kconfig"
+
endif
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index edde1960b030..0355e3030504 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -1,8 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_I2C_BCM2048) += bcm2048/
obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx/
-obj-$(CONFIG_SOC_CAMERA_IMX074) += imx074/
-obj-$(CONFIG_SOC_CAMERA_MT9T031) += mt9t031/
obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci_vpfe/
obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
obj-$(CONFIG_VIDEO_SUNXI) += sunxi/
@@ -10,3 +8,4 @@ obj-$(CONFIG_TEGRA_VDE) += tegra-vde/
obj-$(CONFIG_VIDEO_ZORAN) += zoran/
obj-$(CONFIG_VIDEO_ROCKCHIP_VPU) += rockchip/vpu/
obj-$(CONFIG_VIDEO_IPU3_IMGU) += ipu3/
+obj-$(CONFIG_SOC_CAMERA) += soc_camera/
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
index 5618c804c7e4..565a3dc5bed1 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
@@ -781,7 +781,7 @@ ipipe_set_3d_lut_regs(void __iomem *base_addr, void __iomem *isp5_base_addr,
if (!lut_3d->en)
return;
- /* valied table */
+ /* valid table */
tbl = lut_3d->table;
for (i = 0; i < VPFE_IPIPE_MAX_SIZE_3D_LUT; i++) {
/*
diff --git a/drivers/staging/media/davinci_vpfe/dm365_isif.c b/drivers/staging/media/davinci_vpfe/dm365_isif.c
index 625d0aa8367f..0a6d038fcec9 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_isif.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_isif.c
@@ -675,7 +675,7 @@ static void isif_config_bclamp(struct vpfe_isif_device *isif,
val = (bc->bc_mode_color & ISIF_BC_MODE_COLOR_MASK) <<
ISIF_BC_MODE_COLOR_SHIFT;
- /* Enable BC and horizontal clamp calculation paramaters */
+ /* Enable BC and horizontal clamp calculation parameters */
val = val | 1 | ((bc->horz.mode & ISIF_HORZ_BC_MODE_MASK) <<
ISIF_HORZ_BC_MODE_SHIFT);
@@ -712,7 +712,7 @@ static void isif_config_bclamp(struct vpfe_isif_device *isif,
isif_write(isif->isif_cfg.base_addr, val, CLHWIN2);
}
- /* vertical clamp calculation paramaters */
+ /* vertical clamp calculation parameters */
/* OB H Valid */
val = bc->vert.ob_h_sz_calc & ISIF_VERT_BC_OB_H_SZ_MASK;
diff --git a/drivers/staging/media/davinci_vpfe/dm365_resizer.c b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
index 6098f43ac51b..9d726298b406 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_resizer.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
@@ -1284,7 +1284,7 @@ static int resizer_set_stream(struct v4l2_subdev *sd, int enable)
* @cfg: V4L2 subdev pad config
* @pad: pad number.
* @which: wanted subdev format.
- * Retun wanted mbus frame format.
+ * Return wanted mbus frame format.
*/
static struct v4l2_mbus_framefmt *
__resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
@@ -1785,7 +1785,7 @@ void vpfe_resizer_unregister_entities(struct vpfe_resizer_device *vpfe_rsz)
/*
* vpfe_resizer_register_entities() - Register entity
- * @resizer - pointer to resizer devive.
+ * @resizer - pointer to resizer device.
* @vdev: pointer to v4l2 device structure.
*/
int vpfe_resizer_register_entities(struct vpfe_resizer_device *resizer,
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
index 34d63c2e9199..57b93605bc58 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
@@ -528,7 +528,7 @@ static void vpfe_cleanup_modules(struct vpfe_device *vpfe_dev,
* @vpfe_dev - ptr to vpfe capture device
* @pdev - pointer to platform device
*
- * intialize all v4l2 subdevs and media entities
+ * initialize all v4l2 subdevs and media entities
*/
static int vpfe_initialize_modules(struct vpfe_device *vpfe_dev,
struct platform_device *pdev)
diff --git a/drivers/staging/media/imx/Kconfig b/drivers/staging/media/imx/Kconfig
index bfc17de56b17..36b276ea2ecc 100644
--- a/drivers/staging/media/imx/Kconfig
+++ b/drivers/staging/media/imx/Kconfig
@@ -11,7 +11,7 @@ config VIDEO_IMX_MEDIA
driver for the i.MX5/6 SOC.
if VIDEO_IMX_MEDIA
-menu "i.MX5/6 Media Sub devices"
+menu "i.MX5/6/7 Media Sub devices"
config VIDEO_IMX_CSI
tristate "i.MX5/6 Camera Sensor Interface driver"
@@ -20,5 +20,12 @@ config VIDEO_IMX_CSI
---help---
A video4linux camera sensor interface driver for i.MX5/6.
+config VIDEO_IMX7_CSI
+ tristate "i.MX7 Camera Sensor Interface driver"
+ depends on VIDEO_IMX_MEDIA && VIDEO_DEV && I2C
+ default y
+ help
+ Enable support for video4linux camera sensor interface driver for
+ i.MX7.
endmenu
endif
diff --git a/drivers/staging/media/imx/Makefile b/drivers/staging/media/imx/Makefile
index 698a4210316e..d2d909a36239 100644
--- a/drivers/staging/media/imx/Makefile
+++ b/drivers/staging/media/imx/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
imx-media-objs := imx-media-dev.o imx-media-internal-sd.o imx-media-of.o
+imx-media-objs += imx-media-dev-common.o
imx-media-common-objs := imx-media-utils.o imx-media-fim.o
imx-media-ic-objs := imx-ic-common.o imx-ic-prp.o imx-ic-prpencvf.o
@@ -11,3 +12,6 @@ obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx-media-ic.o
obj-$(CONFIG_VIDEO_IMX_CSI) += imx-media-csi.o
obj-$(CONFIG_VIDEO_IMX_CSI) += imx6-mipi-csi2.o
+
+obj-$(CONFIG_VIDEO_IMX7_CSI) += imx7-media-csi.o
+obj-$(CONFIG_VIDEO_IMX7_CSI) += imx7-mipi-csis.o
diff --git a/drivers/staging/media/imx/TODO b/drivers/staging/media/imx/TODO
index aeeb15494a49..6f29b5ca5324 100644
--- a/drivers/staging/media/imx/TODO
+++ b/drivers/staging/media/imx/TODO
@@ -45,3 +45,12 @@
Which means a port must not contain mixed-use endpoints, they
must all refer to media links between V4L2 subdevices.
+
+- i.MX7: all of the above, since it uses the imx media core
+
+- i.MX7: use Frame Interval Monitor
+
+- i.MX7: runtime testing with parallel sensor, links setup and streaming
+
+- i.MX7: runtime testing with different formats, for the time only 10-bit bayer
+ is tested
diff --git a/drivers/staging/media/imx/imx-ic-common.c b/drivers/staging/media/imx/imx-ic-common.c
index cfdd4900a3be..765919487a73 100644
--- a/drivers/staging/media/imx/imx-ic-common.c
+++ b/drivers/staging/media/imx/imx-ic-common.c
@@ -41,13 +41,13 @@ static int imx_ic_probe(struct platform_device *pdev)
pdata = priv->dev->platform_data;
priv->ipu_id = pdata->ipu_id;
switch (pdata->grp_id) {
- case IMX_MEDIA_GRP_ID_IC_PRP:
+ case IMX_MEDIA_GRP_ID_IPU_IC_PRP:
priv->task_id = IC_TASK_PRP;
break;
- case IMX_MEDIA_GRP_ID_IC_PRPENC:
+ case IMX_MEDIA_GRP_ID_IPU_IC_PRPENC:
priv->task_id = IC_TASK_ENCODER;
break;
- case IMX_MEDIA_GRP_ID_IC_PRPVF:
+ case IMX_MEDIA_GRP_ID_IPU_IC_PRPVF:
priv->task_id = IC_TASK_VIEWFINDER;
break;
default:
diff --git a/drivers/staging/media/imx/imx-ic-prp.c b/drivers/staging/media/imx/imx-ic-prp.c
index 98923fc844ce..3d43cdcb4bb9 100644
--- a/drivers/staging/media/imx/imx-ic-prp.c
+++ b/drivers/staging/media/imx/imx-ic-prp.c
@@ -77,7 +77,7 @@ static int prp_start(struct prp_priv *priv)
priv->ipu = priv->md->ipu[ic_priv->ipu_id];
/* set IC to receive from CSI or VDI depending on source */
- src_is_vdic = !!(priv->src_sd->grp_id & IMX_MEDIA_GRP_ID_VDIC);
+ src_is_vdic = !!(priv->src_sd->grp_id & IMX_MEDIA_GRP_ID_IPU_VDIC);
ipu_set_ic_src_mux(priv->ipu, priv->csi_id, src_is_vdic);
@@ -237,8 +237,8 @@ static int prp_link_setup(struct media_entity *entity,
ret = -EBUSY;
goto out;
}
- if (priv->sink_sd_prpenc && (remote_sd->grp_id &
- IMX_MEDIA_GRP_ID_VDIC)) {
+ if (priv->sink_sd_prpenc &&
+ (remote_sd->grp_id & IMX_MEDIA_GRP_ID_IPU_VDIC)) {
ret = -EINVAL;
goto out;
}
@@ -259,7 +259,7 @@ static int prp_link_setup(struct media_entity *entity,
goto out;
}
if (priv->src_sd && (priv->src_sd->grp_id &
- IMX_MEDIA_GRP_ID_VDIC)) {
+ IMX_MEDIA_GRP_ID_IPU_VDIC)) {
ret = -EINVAL;
goto out;
}
@@ -309,13 +309,13 @@ static int prp_link_validate(struct v4l2_subdev *sd,
return ret;
csi = imx_media_find_upstream_subdev(priv->md, &ic_priv->sd.entity,
- IMX_MEDIA_GRP_ID_CSI);
+ IMX_MEDIA_GRP_ID_IPU_CSI);
if (IS_ERR(csi))
csi = NULL;
mutex_lock(&priv->lock);
- if (priv->src_sd->grp_id & IMX_MEDIA_GRP_ID_VDIC) {
+ if (priv->src_sd->grp_id & IMX_MEDIA_GRP_ID_IPU_VDIC) {
/*
* the ->PRPENC link cannot be enabled if the source
* is the VDIC
@@ -334,10 +334,10 @@ static int prp_link_validate(struct v4l2_subdev *sd,
if (csi) {
switch (csi->grp_id) {
- case IMX_MEDIA_GRP_ID_CSI0:
+ case IMX_MEDIA_GRP_ID_IPU_CSI0:
priv->csi_id = 0;
break;
- case IMX_MEDIA_GRP_ID_CSI1:
+ case IMX_MEDIA_GRP_ID_IPU_CSI1:
priv->csi_id = 1;
break;
default:
@@ -422,9 +422,14 @@ static int prp_s_frame_interval(struct v4l2_subdev *sd,
if (fi->pad >= PRP_NUM_PADS)
return -EINVAL;
- /* No limits on frame interval */
mutex_lock(&priv->lock);
- priv->frame_interval = fi->interval;
+
+ /* No limits on valid frame intervals */
+ if (fi->interval.numerator == 0 || fi->interval.denominator == 0)
+ fi->interval = priv->frame_interval;
+ else
+ priv->frame_interval = fi->interval;
+
mutex_unlock(&priv->lock);
return 0;
diff --git a/drivers/staging/media/imx/imx-ic-prpencvf.c b/drivers/staging/media/imx/imx-ic-prpencvf.c
index 28f41caba05d..5c8e6ad8c025 100644
--- a/drivers/staging/media/imx/imx-ic-prpencvf.c
+++ b/drivers/staging/media/imx/imx-ic-prpencvf.c
@@ -48,7 +48,7 @@
#define MAX_W_SRC 1024
#define MAX_H_SRC 1024
-#define W_ALIGN_SRC 4 /* multiple of 16 pixels */
+#define W_ALIGN_SRC 1 /* multiple of 2 pixels */
#define H_ALIGN_SRC 1 /* multiple of 2 lines */
#define S_ALIGN 1 /* multiple of 2 */
@@ -106,6 +106,7 @@ struct prp_priv {
u32 frame_sequence; /* frame sequence counter */
bool last_eof; /* waiting for last EOF at stream off */
bool nfb4eof; /* NFB4EOF encountered during streaming */
+ bool interweave_swap; /* swap top/bottom lines when interweaving */
struct completion last_eof_comp;
};
@@ -235,6 +236,9 @@ static void prp_vb2_buf_done(struct prp_priv *priv, struct ipuv3_channel *ch)
if (ipu_idmac_buffer_is_ready(ch, priv->ipu_buf_num))
ipu_idmac_clear_buffer(ch, priv->ipu_buf_num);
+ if (priv->interweave_swap && ch == priv->out_ch)
+ phys += vdev->fmt.fmt.pix.bytesperline;
+
ipu_cpmem_set_buffer(ch, priv->ipu_buf_num, phys);
}
@@ -354,20 +358,30 @@ static int prp_setup_channel(struct prp_priv *priv,
{
struct imx_media_video_dev *vdev = priv->vdev;
const struct imx_media_pixfmt *outcc;
- struct v4l2_mbus_framefmt *infmt;
+ struct v4l2_mbus_framefmt *outfmt;
unsigned int burst_size;
struct ipu_image image;
+ bool interweave;
int ret;
- infmt = &priv->format_mbus[PRPENCVF_SINK_PAD];
+ outfmt = &priv->format_mbus[PRPENCVF_SRC_PAD];
outcc = vdev->cc;
ipu_cpmem_zero(channel);
memset(&image, 0, sizeof(image));
image.pix = vdev->fmt.fmt.pix;
- image.rect.width = image.pix.width;
- image.rect.height = image.pix.height;
+ image.rect = vdev->compose;
+
+ /*
+ * If the field type at capture interface is interlaced, and
+ * the output IDMAC pad is sequential, enable interweave at
+ * the IDMAC output channel.
+ */
+ interweave = V4L2_FIELD_IS_INTERLACED(image.pix.field) &&
+ V4L2_FIELD_IS_SEQUENTIAL(outfmt->field);
+ priv->interweave_swap = interweave &&
+ image.pix.field == V4L2_FIELD_INTERLACED_BT;
if (rot_swap_width_height) {
swap(image.pix.width, image.pix.height);
@@ -378,15 +392,25 @@ static int prp_setup_channel(struct prp_priv *priv,
(image.pix.width * outcc->bpp) >> 3;
}
+ if (priv->interweave_swap && channel == priv->out_ch) {
+ /* start interweave scan at 1st top line (2nd line) */
+ image.rect.top = 1;
+ }
+
image.phys0 = addr0;
image.phys1 = addr1;
- if (channel == priv->out_ch || channel == priv->rot_out_ch) {
+ /*
+ * Skip writing U and V components to odd rows in the output
+ * channels for planar 4:2:0 (but not when enabling IDMAC
+ * interweaving, they are incompatible).
+ */
+ if ((channel == priv->out_ch && !interweave) ||
+ channel == priv->rot_out_ch) {
switch (image.pix.pixelformat) {
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
case V4L2_PIX_FMT_NV12:
- /* Skip writing U and V components to odd rows */
ipu_cpmem_skip_odd_chroma_rows(channel);
break;
}
@@ -409,10 +433,12 @@ static int prp_setup_channel(struct prp_priv *priv,
if (rot_mode)
ipu_cpmem_set_rotation(channel, rot_mode);
- if (image.pix.field == V4L2_FIELD_NONE &&
- V4L2_FIELD_HAS_BOTH(infmt->field) &&
- channel == priv->out_ch)
- ipu_cpmem_interlaced_scan(channel, image.pix.bytesperline);
+ if (interweave && channel == priv->out_ch)
+ ipu_cpmem_interlaced_scan(channel,
+ priv->interweave_swap ?
+ -image.pix.bytesperline :
+ image.pix.bytesperline,
+ image.pix.pixelformat);
ret = ipu_ic_task_idma_init(priv->ic, channel,
image.pix.width, image.pix.height,
@@ -680,12 +706,23 @@ static int prp_start(struct prp_priv *priv)
goto out_free_nfb4eof_irq;
}
+ /* start upstream */
+ ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 1);
+ ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
+ if (ret) {
+ v4l2_err(&ic_priv->sd,
+ "upstream stream on failed: %d\n", ret);
+ goto out_free_eof_irq;
+ }
+
/* start the EOF timeout timer */
mod_timer(&priv->eof_timeout_timer,
jiffies + msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
return 0;
+out_free_eof_irq:
+ devm_free_irq(ic_priv->dev, priv->eof_irq, priv);
out_free_nfb4eof_irq:
devm_free_irq(ic_priv->dev, priv->nfb4eof_irq, priv);
out_unsetup:
@@ -717,6 +754,12 @@ static void prp_stop(struct prp_priv *priv)
if (ret == 0)
v4l2_warn(&ic_priv->sd, "wait last EOF timeout\n");
+ /* stop upstream */
+ ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
+ if (ret && ret != -ENOIOCTLCMD)
+ v4l2_warn(&ic_priv->sd,
+ "upstream stream off failed: %d\n", ret);
+
devm_free_irq(ic_priv->dev, priv->eof_irq, priv);
devm_free_irq(ic_priv->dev, priv->nfb4eof_irq, priv);
@@ -838,8 +881,7 @@ static void prp_try_fmt(struct prp_priv *priv,
infmt = __prp_get_fmt(priv, cfg, PRPENCVF_SINK_PAD, sdformat->which);
if (sdformat->pad == PRPENCVF_SRC_PAD) {
- if (sdformat->format.field != V4L2_FIELD_NONE)
- sdformat->format.field = infmt->field;
+ sdformat->format.field = infmt->field;
prp_bound_align_output(&sdformat->format, infmt,
priv->rot_mode);
@@ -870,6 +912,7 @@ static int prp_set_fmt(struct v4l2_subdev *sd,
const struct imx_media_pixfmt *cc;
struct v4l2_pix_format vdev_fmt;
struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_rect vdev_compose;
int ret = 0;
if (sdformat->pad >= PRPENCVF_NUM_PADS)
@@ -911,11 +954,11 @@ static int prp_set_fmt(struct v4l2_subdev *sd,
priv->cc[sdformat->pad] = cc;
/* propagate output pad format to capture device */
- imx_media_mbus_fmt_to_pix_fmt(&vdev_fmt,
+ imx_media_mbus_fmt_to_pix_fmt(&vdev_fmt, &vdev_compose,
&priv->format_mbus[PRPENCVF_SRC_PAD],
priv->cc[PRPENCVF_SRC_PAD]);
mutex_unlock(&priv->lock);
- imx_media_capture_device_set_format(vdev, &vdev_fmt);
+ imx_media_capture_device_set_format(vdev, &vdev_fmt, &vdev_compose);
return 0;
out:
@@ -1148,15 +1191,6 @@ static int prp_s_stream(struct v4l2_subdev *sd, int enable)
if (ret)
goto out;
- /* start/stop upstream */
- ret = v4l2_subdev_call(priv->src_sd, video, s_stream, enable);
- ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
- if (ret) {
- if (enable)
- prp_stop(priv);
- goto out;
- }
-
update_count:
priv->stream_count += enable ? 1 : -1;
if (priv->stream_count < 0)
@@ -1189,9 +1223,14 @@ static int prp_s_frame_interval(struct v4l2_subdev *sd,
if (fi->pad >= PRPENCVF_NUM_PADS)
return -EINVAL;
- /* No limits on frame interval */
mutex_lock(&priv->lock);
- priv->frame_interval = fi->interval;
+
+ /* No limits on valid frame intervals */
+ if (fi->interval.numerator == 0 || fi->interval.denominator == 0)
+ fi->interval = priv->frame_interval;
+ else
+ priv->frame_interval = fi->interval;
+
mutex_unlock(&priv->lock);
return 0;
diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
index b37e1186eb2f..9703c85b19c4 100644
--- a/drivers/staging/media/imx/imx-media-capture.c
+++ b/drivers/staging/media/imx/imx-media-capture.c
@@ -203,21 +203,14 @@ static int capture_g_fmt_vid_cap(struct file *file, void *fh,
return 0;
}
-static int capture_try_fmt_vid_cap(struct file *file, void *fh,
- struct v4l2_format *f)
+static int __capture_try_fmt_vid_cap(struct capture_priv *priv,
+ struct v4l2_subdev_format *fmt_src,
+ struct v4l2_format *f,
+ struct v4l2_rect *compose)
{
- struct capture_priv *priv = video_drvdata(file);
- struct v4l2_subdev_format fmt_src;
const struct imx_media_pixfmt *cc, *cc_src;
- int ret;
-
- fmt_src.pad = priv->src_sd_pad;
- fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- ret = v4l2_subdev_call(priv->src_sd, pad, get_fmt, NULL, &fmt_src);
- if (ret)
- return ret;
- cc_src = imx_media_find_ipu_format(fmt_src.format.code, CS_SEL_ANY);
+ cc_src = imx_media_find_ipu_format(fmt_src->format.code, CS_SEL_ANY);
if (cc_src) {
u32 fourcc, cs_sel;
@@ -231,7 +224,7 @@ static int capture_try_fmt_vid_cap(struct file *file, void *fh,
cc = imx_media_find_format(fourcc, cs_sel, false);
}
} else {
- cc_src = imx_media_find_mbus_format(fmt_src.format.code,
+ cc_src = imx_media_find_mbus_format(fmt_src->format.code,
CS_SEL_ANY, true);
if (WARN_ON(!cc_src))
return -EINVAL;
@@ -239,15 +232,48 @@ static int capture_try_fmt_vid_cap(struct file *file, void *fh,
cc = cc_src;
}
- imx_media_mbus_fmt_to_pix_fmt(&f->fmt.pix, &fmt_src.format, cc);
+ /* allow IDMAC interweave but enforce field order from source */
+ if (V4L2_FIELD_IS_INTERLACED(f->fmt.pix.field)) {
+ switch (fmt_src->format.field) {
+ case V4L2_FIELD_SEQ_TB:
+ fmt_src->format.field = V4L2_FIELD_INTERLACED_TB;
+ break;
+ case V4L2_FIELD_SEQ_BT:
+ fmt_src->format.field = V4L2_FIELD_INTERLACED_BT;
+ break;
+ default:
+ break;
+ }
+ }
+
+ imx_media_mbus_fmt_to_pix_fmt(&f->fmt.pix, compose,
+ &fmt_src->format, cc);
return 0;
}
+static int capture_try_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct capture_priv *priv = video_drvdata(file);
+ struct v4l2_subdev_format fmt_src;
+ int ret;
+
+ fmt_src.pad = priv->src_sd_pad;
+ fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(priv->src_sd, pad, get_fmt, NULL, &fmt_src);
+ if (ret)
+ return ret;
+
+ return __capture_try_fmt_vid_cap(priv, &fmt_src, f, NULL);
+}
+
static int capture_s_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_format *f)
{
struct capture_priv *priv = video_drvdata(file);
+ struct v4l2_subdev_format fmt_src;
+ struct v4l2_rect compose;
int ret;
if (vb2_is_busy(&priv->q)) {
@@ -255,13 +281,20 @@ static int capture_s_fmt_vid_cap(struct file *file, void *fh,
return -EBUSY;
}
- ret = capture_try_fmt_vid_cap(file, priv, f);
+ fmt_src.pad = priv->src_sd_pad;
+ fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(priv->src_sd, pad, get_fmt, NULL, &fmt_src);
+ if (ret)
+ return ret;
+
+ ret = __capture_try_fmt_vid_cap(priv, &fmt_src, f, &compose);
if (ret)
return ret;
priv->vdev.fmt.fmt.pix = f->fmt.pix;
priv->vdev.cc = imx_media_find_format(f->fmt.pix.pixelformat,
CS_SEL_ANY, true);
+ priv->vdev.compose = compose;
return 0;
}
@@ -290,6 +323,36 @@ static int capture_s_std(struct file *file, void *fh, v4l2_std_id std)
return v4l2_subdev_call(priv->src_sd, video, s_std, std);
}
+static int capture_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct capture_priv *priv = video_drvdata(file);
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ /* The compose rectangle is fixed to the source format. */
+ s->r = priv->vdev.compose;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_PADDED:
+ /*
+ * The hardware writes with a configurable but fixed DMA burst
+ * size. If the source format width is not burst size aligned,
+ * the written frame contains padding to the right.
+ */
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = priv->vdev.fmt.fmt.pix.width;
+ s->r.height = priv->vdev.fmt.fmt.pix.height;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int capture_g_parm(struct file *file, void *fh,
struct v4l2_streamparm *a)
{
@@ -335,6 +398,21 @@ static int capture_s_parm(struct file *file, void *fh,
return 0;
}
+static int capture_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_IMX_FRAME_INTERVAL_ERROR:
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
+ case V4L2_EVENT_SOURCE_CHANGE:
+ return v4l2_src_change_event_subscribe(fh, sub);
+ case V4L2_EVENT_CTRL:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct v4l2_ioctl_ops capture_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
@@ -350,6 +428,8 @@ static const struct v4l2_ioctl_ops capture_ioctl_ops = {
.vidioc_g_std = capture_g_std,
.vidioc_s_std = capture_s_std,
+ .vidioc_g_selection = capture_g_selection,
+
.vidioc_g_parm = capture_g_parm,
.vidioc_s_parm = capture_s_parm,
@@ -362,6 +442,9 @@ static const struct v4l2_ioctl_ops capture_ioctl_ops = {
.vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_subscribe_event = capture_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
/*
@@ -572,7 +655,8 @@ static struct video_device capture_videodev = {
};
void imx_media_capture_device_set_format(struct imx_media_video_dev *vdev,
- struct v4l2_pix_format *pix)
+ const struct v4l2_pix_format *pix,
+ const struct v4l2_rect *compose)
{
struct capture_priv *priv = to_capture_priv(vdev);
@@ -580,6 +664,7 @@ void imx_media_capture_device_set_format(struct imx_media_video_dev *vdev,
priv->vdev.fmt.fmt.pix = *pix;
priv->vdev.cc = imx_media_find_format(pix->pixelformat, CS_SEL_ANY,
true);
+ priv->vdev.compose = *compose;
mutex_unlock(&priv->mutex);
}
EXPORT_SYMBOL_GPL(imx_media_capture_device_set_format);
@@ -685,7 +770,7 @@ int imx_media_capture_device_register(struct imx_media_video_dev *vdev)
}
vdev->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- imx_media_mbus_fmt_to_pix_fmt(&vdev->fmt.fmt.pix,
+ imx_media_mbus_fmt_to_pix_fmt(&vdev->fmt.fmt.pix, &vdev->compose,
&fmt_src.format, NULL);
vdev->cc = imx_media_find_format(vdev->fmt.fmt.pix.pixelformat,
CS_SEL_ANY, false);
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index 4223f8d418ae..3b7517348666 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -41,7 +41,7 @@
#define MIN_H 144
#define MAX_W 4096
#define MAX_H 4096
-#define W_ALIGN 4 /* multiple of 16 pixels */
+#define W_ALIGN 1 /* multiple of 2 pixels */
#define H_ALIGN 1 /* multiple of 2 lines */
#define S_ALIGN 1 /* multiple of 2 */
@@ -114,6 +114,7 @@ struct csi_priv {
u32 frame_sequence; /* frame sequence counter */
bool last_eof; /* waiting for last EOF at stream off */
bool nfb4eof; /* NFB4EOF encountered during streaming */
+ bool interweave_swap; /* swap top/bottom lines when interweaving */
struct completion last_eof_comp;
};
@@ -286,6 +287,9 @@ static void csi_vb2_buf_done(struct csi_priv *priv)
if (ipu_idmac_buffer_is_ready(priv->idmac_ch, priv->ipu_buf_num))
ipu_idmac_clear_buffer(priv->idmac_ch, priv->ipu_buf_num);
+ if (priv->interweave_swap)
+ phys += vdev->fmt.fmt.pix.bytesperline;
+
ipu_cpmem_set_buffer(priv->idmac_ch, priv->ipu_buf_num, phys);
}
@@ -398,23 +402,24 @@ static int csi_idmac_setup_channel(struct csi_priv *priv)
struct imx_media_video_dev *vdev = priv->vdev;
const struct imx_media_pixfmt *incc;
struct v4l2_mbus_framefmt *infmt;
+ struct v4l2_mbus_framefmt *outfmt;
+ bool passthrough, interweave;
struct ipu_image image;
u32 passthrough_bits;
u32 passthrough_cycles;
dma_addr_t phys[2];
- bool passthrough;
u32 burst_size;
int ret;
infmt = &priv->format_mbus[CSI_SINK_PAD];
incc = priv->cc[CSI_SINK_PAD];
+ outfmt = &priv->format_mbus[CSI_SRC_PAD_IDMAC];
ipu_cpmem_zero(priv->idmac_ch);
memset(&image, 0, sizeof(image));
image.pix = vdev->fmt.fmt.pix;
- image.rect.width = image.pix.width;
- image.rect.height = image.pix.height;
+ image.rect = vdev->compose;
csi_idmac_setup_vb2_buf(priv, phys);
@@ -424,6 +429,16 @@ static int csi_idmac_setup_channel(struct csi_priv *priv)
passthrough = requires_passthrough(&priv->upstream_ep, infmt, incc);
passthrough_cycles = 1;
+ /*
+ * If the field type at capture interface is interlaced, and
+ * the output IDMAC pad is sequential, enable interweave at
+ * the IDMAC output channel.
+ */
+ interweave = V4L2_FIELD_IS_INTERLACED(image.pix.field) &&
+ V4L2_FIELD_IS_SEQUENTIAL(outfmt->field);
+ priv->interweave_swap = interweave &&
+ image.pix.field == V4L2_FIELD_INTERLACED_BT;
+
switch (image.pix.pixelformat) {
case V4L2_PIX_FMT_SBGGR8:
case V4L2_PIX_FMT_SGBRG8:
@@ -442,13 +457,18 @@ static int csi_idmac_setup_channel(struct csi_priv *priv)
passthrough_bits = 16;
break;
case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
case V4L2_PIX_FMT_NV12:
burst_size = (image.pix.width & 0x3f) ?
((image.pix.width & 0x1f) ?
((image.pix.width & 0xf) ? 8 : 16) : 32) : 64;
passthrough_bits = 16;
- /* Skip writing U and V components to odd rows */
- ipu_cpmem_skip_odd_chroma_rows(priv->idmac_ch);
+ /*
+ * Skip writing U and V components to odd rows (but not
+ * when enabling IDMAC interweaving, they are incompatible).
+ */
+ if (!interweave)
+ ipu_cpmem_skip_odd_chroma_rows(priv->idmac_ch);
break;
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_UYVY:
@@ -471,6 +491,12 @@ static int csi_idmac_setup_channel(struct csi_priv *priv)
}
if (passthrough) {
+ if (priv->interweave_swap) {
+ /* start interweave scan at 1st top line (2nd line) */
+ image.phys0 += image.pix.bytesperline;
+ image.phys1 += image.pix.bytesperline;
+ }
+
ipu_cpmem_set_resolution(priv->idmac_ch,
image.rect.width * passthrough_cycles,
image.rect.height);
@@ -480,6 +506,11 @@ static int csi_idmac_setup_channel(struct csi_priv *priv)
ipu_cpmem_set_format_passthrough(priv->idmac_ch,
passthrough_bits);
} else {
+ if (priv->interweave_swap) {
+ /* start interweave scan at 1st top line (2nd line) */
+ image.rect.top = 1;
+ }
+
ret = ipu_cpmem_set_image(priv->idmac_ch, &image);
if (ret)
goto unsetup_vb2;
@@ -509,10 +540,12 @@ static int csi_idmac_setup_channel(struct csi_priv *priv)
ipu_smfc_set_burstsize(priv->smfc, burst_size);
- if (image.pix.field == V4L2_FIELD_NONE &&
- V4L2_FIELD_HAS_BOTH(infmt->field))
+ if (interweave)
ipu_cpmem_interlaced_scan(priv->idmac_ch,
- image.pix.bytesperline);
+ priv->interweave_swap ?
+ -image.pix.bytesperline :
+ image.pix.bytesperline,
+ image.pix.pixelformat);
ipu_idmac_set_double_buffer(priv->idmac_ch, true);
@@ -629,7 +662,7 @@ out_put_ipu:
return ret;
}
-static void csi_idmac_stop(struct csi_priv *priv)
+static void csi_idmac_wait_last_eof(struct csi_priv *priv)
{
unsigned long flags;
int ret;
@@ -646,7 +679,10 @@ static void csi_idmac_stop(struct csi_priv *priv)
&priv->last_eof_comp, msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
if (ret == 0)
v4l2_warn(&priv->sd, "wait last EOF timeout\n");
+}
+static void csi_idmac_stop(struct csi_priv *priv)
+{
devm_free_irq(priv->dev, priv->eof_irq, priv);
devm_free_irq(priv->dev, priv->nfb4eof_irq, priv);
@@ -679,12 +715,7 @@ static int csi_setup(struct csi_priv *priv)
priv->upstream_ep.bus.parallel.flags :
priv->upstream_ep.bus.mipi_csi2.flags;
- /*
- * we need to pass input frame to CSI interface, but
- * with translated field type from output format
- */
if_fmt = *infmt;
- if_fmt.field = outfmt->field;
crop = priv->crop;
/*
@@ -702,7 +733,7 @@ static int csi_setup(struct csi_priv *priv)
priv->crop.width == 2 * priv->compose.width,
priv->crop.height == 2 * priv->compose.height);
- ipu_csi_init_interface(priv->csi, &mbus_cfg, &if_fmt);
+ ipu_csi_init_interface(priv->csi, &mbus_cfg, &if_fmt, outfmt);
ipu_csi_set_dest(priv->csi, priv->dest);
@@ -722,10 +753,16 @@ static int csi_start(struct csi_priv *priv)
output_fi = &priv->frame_interval[priv->active_output_pad];
+ /* start upstream */
+ ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 1);
+ ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
+ if (ret)
+ return ret;
+
if (priv->dest == IPU_CSI_DEST_IDMAC) {
ret = csi_idmac_start(priv);
if (ret)
- return ret;
+ goto stop_upstream;
}
ret = csi_setup(priv);
@@ -753,11 +790,26 @@ fim_off:
idmac_stop:
if (priv->dest == IPU_CSI_DEST_IDMAC)
csi_idmac_stop(priv);
+stop_upstream:
+ v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
return ret;
}
static void csi_stop(struct csi_priv *priv)
{
+ if (priv->dest == IPU_CSI_DEST_IDMAC)
+ csi_idmac_wait_last_eof(priv);
+
+ /*
+ * Disable the CSI asap, after syncing with the last EOF.
+ * Doing so after the IDMA channel is disabled has shown to
+ * create hard system-wide hangs.
+ */
+ ipu_csi_disable(priv->csi);
+
+ /* stop upstream */
+ v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
+
if (priv->dest == IPU_CSI_DEST_IDMAC) {
csi_idmac_stop(priv);
@@ -765,8 +817,6 @@ static void csi_stop(struct csi_priv *priv)
if (priv->fim)
imx_media_fim_set_stream(priv->fim, NULL, false);
}
-
- ipu_csi_disable(priv->csi);
}
static const struct csi_skip_desc csi_skip[12] = {
@@ -876,7 +926,10 @@ static int csi_s_frame_interval(struct v4l2_subdev *sd,
switch (fi->pad) {
case CSI_SINK_PAD:
- /* No limits on input frame interval */
+ /* No limits on valid input frame intervals */
+ if (fi->interval.numerator == 0 ||
+ fi->interval.denominator == 0)
+ fi->interval = *input_fi;
/* Reset output intervals and frame skipping ratio to 1:1 */
priv->frame_interval[CSI_SRC_PAD_IDMAC] = fi->interval;
priv->frame_interval[CSI_SRC_PAD_DIRECT] = fi->interval;
@@ -927,23 +980,13 @@ static int csi_s_stream(struct v4l2_subdev *sd, int enable)
goto update_count;
if (enable) {
- /* upstream must be started first, before starting CSI */
- ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 1);
- ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
- if (ret)
- goto out;
-
dev_dbg(priv->dev, "stream ON\n");
ret = csi_start(priv);
- if (ret) {
- v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
+ if (ret)
goto out;
- }
} else {
dev_dbg(priv->dev, "stream OFF\n");
- /* CSI must be stopped first, then stop upstream */
csi_stop(priv);
- v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
}
update_count:
@@ -1001,6 +1044,8 @@ static int csi_link_setup(struct media_entity *entity,
v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
v4l2_ctrl_handler_init(&priv->ctrl_hdlr, 0);
priv->sink = NULL;
+ /* do not apply IC burst alignment in csi_try_crop */
+ priv->active_output_pad = CSI_SRC_PAD_IDMAC;
goto out;
}
@@ -1029,10 +1074,10 @@ static int csi_link_setup(struct media_entity *entity,
remote_sd = media_entity_to_v4l2_subdev(remote->entity);
switch (remote_sd->grp_id) {
- case IMX_MEDIA_GRP_ID_VDIC:
+ case IMX_MEDIA_GRP_ID_IPU_VDIC:
priv->dest = IPU_CSI_DEST_VDIC;
break;
- case IMX_MEDIA_GRP_ID_IC_PRP:
+ case IMX_MEDIA_GRP_ID_IPU_IC_PRP:
priv->dest = IPU_CSI_DEST_IC;
break;
default:
@@ -1137,12 +1182,21 @@ static void csi_try_crop(struct csi_priv *priv,
struct v4l2_mbus_framefmt *infmt,
struct v4l2_fwnode_endpoint *upstream_ep)
{
+ u32 in_height;
+
crop->width = min_t(__u32, infmt->width, crop->width);
if (crop->left + crop->width > infmt->width)
crop->left = infmt->width - crop->width;
/* adjust crop left/width to h/w alignment restrictions */
crop->left &= ~0x3;
- crop->width &= ~0x7;
+ if (priv->active_output_pad == CSI_SRC_PAD_DIRECT)
+ crop->width &= ~0x7; /* multiple of 8 pixels (IC burst) */
+ else
+ crop->width &= ~0x1; /* multiple of 2 pixels */
+
+ in_height = infmt->height;
+ if (infmt->field == V4L2_FIELD_ALTERNATE)
+ in_height *= 2;
/*
* FIXME: not sure why yet, but on interlaced bt.656,
@@ -1153,12 +1207,12 @@ static void csi_try_crop(struct csi_priv *priv,
if (upstream_ep->bus_type == V4L2_MBUS_BT656 &&
(V4L2_FIELD_HAS_BOTH(infmt->field) ||
infmt->field == V4L2_FIELD_ALTERNATE)) {
- crop->height = infmt->height;
- crop->top = (infmt->height == 480) ? 2 : 0;
+ crop->height = in_height;
+ crop->top = (in_height == 480) ? 2 : 0;
} else {
- crop->height = min_t(__u32, infmt->height, crop->height);
- if (crop->top + crop->height > infmt->height)
- crop->top = infmt->height - crop->height;
+ crop->height = min_t(__u32, in_height, crop->height);
+ if (crop->top + crop->height > in_height)
+ crop->top = in_height - crop->height;
}
}
@@ -1308,6 +1362,49 @@ out:
return ret;
}
+static void csi_try_field(struct csi_priv *priv,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct v4l2_mbus_framefmt *infmt =
+ __csi_get_fmt(priv, cfg, CSI_SINK_PAD, sdformat->which);
+
+ /* no restrictions on sink pad field type */
+ if (sdformat->pad == CSI_SINK_PAD)
+ return;
+
+ switch (infmt->field) {
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_SEQ_BT:
+ /*
+ * If the user requests sequential at the source pad,
+ * allow it (along with possibly inverting field order).
+ * Otherwise passthrough the field type.
+ */
+ if (!V4L2_FIELD_IS_SEQUENTIAL(sdformat->format.field))
+ sdformat->format.field = infmt->field;
+ break;
+ case V4L2_FIELD_ALTERNATE:
+ /*
+ * This driver does not support alternate field mode, and
+ * the CSI captures a whole frame, so the CSI never presents
+ * alternate mode at its source pads. If user has not
+ * already requested sequential, translate ALTERNATE at
+ * sink pad to SEQ_TB or SEQ_BT at the source pad depending
+ * on input height (assume NTSC BT order if 480 total active
+ * frame lines, otherwise PAL TB order).
+ */
+ if (!V4L2_FIELD_IS_SEQUENTIAL(sdformat->format.field))
+ sdformat->format.field = (infmt->height == 480 / 2) ?
+ V4L2_FIELD_SEQ_BT : V4L2_FIELD_SEQ_TB;
+ break;
+ default:
+ /* Passthrough for all other input field types */
+ sdformat->format.field = infmt->field;
+ break;
+ }
+}
+
static void csi_try_fmt(struct csi_priv *priv,
struct v4l2_fwnode_endpoint *upstream_ep,
struct v4l2_subdev_pad_config *cfg,
@@ -1347,42 +1444,20 @@ static void csi_try_fmt(struct csi_priv *priv,
}
}
- if (sdformat->pad == CSI_SRC_PAD_DIRECT ||
- sdformat->format.field != V4L2_FIELD_NONE)
- sdformat->format.field = infmt->field;
-
- /*
- * translate V4L2_FIELD_ALTERNATE to SEQ_TB or SEQ_BT
- * depending on input height (assume NTSC top-bottom
- * order if 480 lines, otherwise PAL bottom-top order).
- */
- if (sdformat->format.field == V4L2_FIELD_ALTERNATE) {
- sdformat->format.field = (infmt->height == 480) ?
- V4L2_FIELD_SEQ_TB : V4L2_FIELD_SEQ_BT;
- }
+ csi_try_field(priv, cfg, sdformat);
/* propagate colorimetry from sink */
sdformat->format.colorspace = infmt->colorspace;
sdformat->format.xfer_func = infmt->xfer_func;
sdformat->format.quantization = infmt->quantization;
sdformat->format.ycbcr_enc = infmt->ycbcr_enc;
+
break;
case CSI_SINK_PAD:
v4l_bound_align_image(&sdformat->format.width, MIN_W, MAX_W,
W_ALIGN, &sdformat->format.height,
MIN_H, MAX_H, H_ALIGN, S_ALIGN);
- /* Reset crop and compose rectangles */
- crop->left = 0;
- crop->top = 0;
- crop->width = sdformat->format.width;
- crop->height = sdformat->format.height;
- csi_try_crop(priv, crop, cfg, &sdformat->format, upstream_ep);
- compose->left = 0;
- compose->top = 0;
- compose->width = crop->width;
- compose->height = crop->height;
-
*cc = imx_media_find_mbus_format(sdformat->format.code,
CS_SEL_ANY, true);
if (!*cc) {
@@ -1393,9 +1468,25 @@ static void csi_try_fmt(struct csi_priv *priv,
sdformat->format.code = (*cc)->codes[0];
}
+ csi_try_field(priv, cfg, sdformat);
+
imx_media_fill_default_mbus_fields(
&sdformat->format, infmt,
priv->active_output_pad == CSI_SRC_PAD_DIRECT);
+
+ /* Reset crop and compose rectangles */
+ crop->left = 0;
+ crop->top = 0;
+ crop->width = sdformat->format.width;
+ crop->height = sdformat->format.height;
+ if (sdformat->format.field == V4L2_FIELD_ALTERNATE)
+ crop->height *= 2;
+ csi_try_crop(priv, crop, cfg, &sdformat->format, upstream_ep);
+ compose->left = 0;
+ compose->top = 0;
+ compose->width = crop->width;
+ compose->height = crop->height;
+
break;
}
}
@@ -1411,6 +1502,7 @@ static int csi_set_fmt(struct v4l2_subdev *sd,
struct v4l2_pix_format vdev_fmt;
struct v4l2_mbus_framefmt *fmt;
struct v4l2_rect *crop, *compose;
+ struct v4l2_rect vdev_compose;
int ret;
if (sdformat->pad >= CSI_NUM_PADS)
@@ -1466,11 +1558,11 @@ static int csi_set_fmt(struct v4l2_subdev *sd,
priv->cc[sdformat->pad] = cc;
/* propagate IDMAC output pad format to capture device */
- imx_media_mbus_fmt_to_pix_fmt(&vdev_fmt,
+ imx_media_mbus_fmt_to_pix_fmt(&vdev_fmt, &vdev_compose,
&priv->format_mbus[CSI_SRC_PAD_IDMAC],
priv->cc[CSI_SRC_PAD_IDMAC]);
mutex_unlock(&priv->lock);
- imx_media_capture_device_set_format(vdev, &vdev_fmt);
+ imx_media_capture_device_set_format(vdev, &vdev_fmt, &vdev_compose);
return 0;
out:
@@ -1502,6 +1594,8 @@ static int csi_get_selection(struct v4l2_subdev *sd,
sel->r.top = 0;
sel->r.width = infmt->width;
sel->r.height = infmt->height;
+ if (infmt->field == V4L2_FIELD_ALTERNATE)
+ sel->r.height *= 2;
break;
case V4L2_SEL_TGT_CROP:
sel->r = *crop;
@@ -1787,7 +1881,7 @@ static int imx_csi_parse_endpoint(struct device *dev,
struct v4l2_fwnode_endpoint *vep,
struct v4l2_async_subdev *asd)
{
- return fwnode_device_is_available(asd->match.fwnode) ? 0 : -EINVAL;
+ return fwnode_device_is_available(asd->match.fwnode) ? 0 : -ENOTCONN;
}
static int imx_csi_async_register(struct csi_priv *priv)
@@ -1864,6 +1958,8 @@ static int imx_csi_probe(struct platform_device *pdev)
priv->csi_id = pdata->csi;
priv->smfc_id = (priv->csi_id == 0) ? 0 : 2;
+ priv->active_output_pad = CSI_SRC_PAD_IDMAC;
+
timer_setup(&priv->eof_timeout_timer, csi_idmac_eof_timeout, 0);
spin_lock_init(&priv->irqlock);
@@ -1877,7 +1973,7 @@ static int imx_csi_probe(struct platform_device *pdev)
priv->sd.owner = THIS_MODULE;
priv->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
priv->sd.grp_id = priv->csi_id ?
- IMX_MEDIA_GRP_ID_CSI1 : IMX_MEDIA_GRP_ID_CSI0;
+ IMX_MEDIA_GRP_ID_IPU_CSI1 : IMX_MEDIA_GRP_ID_IPU_CSI0;
imx_media_grp_id_to_sd_name(priv->sd.name, sizeof(priv->sd.name),
priv->sd.grp_id, ipu_get_num(priv->ipu));
diff --git a/drivers/staging/media/imx/imx-media-dev-common.c b/drivers/staging/media/imx/imx-media-dev-common.c
new file mode 100644
index 000000000000..910594125889
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media-dev-common.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * V4L2 Media Controller Driver for Freescale common i.MX5/6/7 SOC
+ *
+ * Copyright (c) 2019 Linaro Ltd
+ * Copyright (c) 2016 Mentor Graphics Inc.
+ */
+
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+#include "imx-media.h"
+
+static const struct v4l2_async_notifier_operations imx_media_subdev_ops = {
+ .bound = imx_media_subdev_bound,
+ .complete = imx_media_probe_complete,
+};
+
+static const struct media_device_ops imx_media_md_ops = {
+ .link_notify = imx_media_link_notify,
+};
+
+struct imx_media_dev *imx_media_dev_init(struct device *dev)
+{
+ struct imx_media_dev *imxmd;
+ int ret;
+
+ imxmd = devm_kzalloc(dev, sizeof(*imxmd), GFP_KERNEL);
+ if (!imxmd)
+ return ERR_PTR(-ENOMEM);
+
+ dev_set_drvdata(dev, imxmd);
+
+ strlcpy(imxmd->md.model, "imx-media", sizeof(imxmd->md.model));
+ imxmd->md.ops = &imx_media_md_ops;
+ imxmd->md.dev = dev;
+
+ mutex_init(&imxmd->mutex);
+
+ imxmd->v4l2_dev.mdev = &imxmd->md;
+ imxmd->v4l2_dev.notify = imx_media_notify;
+ strlcpy(imxmd->v4l2_dev.name, "imx-media",
+ sizeof(imxmd->v4l2_dev.name));
+
+ media_device_init(&imxmd->md);
+
+ ret = v4l2_device_register(dev, &imxmd->v4l2_dev);
+ if (ret < 0) {
+ v4l2_err(&imxmd->v4l2_dev,
+ "Failed to register v4l2_device: %d\n", ret);
+ goto cleanup;
+ }
+
+ dev_set_drvdata(imxmd->v4l2_dev.dev, imxmd);
+
+ INIT_LIST_HEAD(&imxmd->vdev_list);
+
+ v4l2_async_notifier_init(&imxmd->notifier);
+
+ return imxmd;
+
+cleanup:
+ media_device_cleanup(&imxmd->md);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(imx_media_dev_init);
+
+int imx_media_dev_notifier_register(struct imx_media_dev *imxmd)
+{
+ int ret;
+
+ /* no subdevs? just bail */
+ if (list_empty(&imxmd->notifier.asd_list)) {
+ v4l2_err(&imxmd->v4l2_dev, "no subdevs\n");
+ return -ENODEV;
+ }
+
+ /* prepare the async subdev notifier and register it */
+ imxmd->notifier.ops = &imx_media_subdev_ops;
+ ret = v4l2_async_notifier_register(&imxmd->v4l2_dev,
+ &imxmd->notifier);
+ if (ret) {
+ v4l2_err(&imxmd->v4l2_dev,
+ "v4l2_async_notifier_register failed with %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(imx_media_dev_notifier_register);
diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c
index 4b344a4a3706..28a3d23aad5b 100644
--- a/drivers/staging/media/imx/imx-media-dev.c
+++ b/drivers/staging/media/imx/imx-media-dev.c
@@ -116,16 +116,16 @@ static int imx_media_get_ipu(struct imx_media_dev *imxmd,
}
/* async subdev bound notifier */
-static int imx_media_subdev_bound(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *sd,
- struct v4l2_async_subdev *asd)
+int imx_media_subdev_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
{
struct imx_media_dev *imxmd = notifier2dev(notifier);
int ret = 0;
mutex_lock(&imxmd->mutex);
- if (sd->grp_id & IMX_MEDIA_GRP_ID_CSI) {
+ if (sd->grp_id & IMX_MEDIA_GRP_ID_IPU_CSI) {
ret = imx_media_get_ipu(imxmd, sd);
if (ret)
goto out;
@@ -149,13 +149,13 @@ static int imx_media_create_links(struct v4l2_async_notifier *notifier)
list_for_each_entry(sd, &imxmd->v4l2_dev.subdevs, list) {
switch (sd->grp_id) {
- case IMX_MEDIA_GRP_ID_VDIC:
- case IMX_MEDIA_GRP_ID_IC_PRP:
- case IMX_MEDIA_GRP_ID_IC_PRPENC:
- case IMX_MEDIA_GRP_ID_IC_PRPVF:
- case IMX_MEDIA_GRP_ID_CSI0:
- case IMX_MEDIA_GRP_ID_CSI1:
- ret = imx_media_create_internal_links(imxmd, sd);
+ case IMX_MEDIA_GRP_ID_IPU_VDIC:
+ case IMX_MEDIA_GRP_ID_IPU_IC_PRP:
+ case IMX_MEDIA_GRP_ID_IPU_IC_PRPENC:
+ case IMX_MEDIA_GRP_ID_IPU_IC_PRPVF:
+ case IMX_MEDIA_GRP_ID_IPU_CSI0:
+ case IMX_MEDIA_GRP_ID_IPU_CSI1:
+ ret = imx_media_create_ipu_internal_links(imxmd, sd);
if (ret)
return ret;
/*
@@ -163,9 +163,13 @@ static int imx_media_create_links(struct v4l2_async_notifier *notifier)
* internal entities, so create the external links
* to the CSI sink pads.
*/
- if (sd->grp_id & IMX_MEDIA_GRP_ID_CSI)
+ if (sd->grp_id & IMX_MEDIA_GRP_ID_IPU_CSI)
imx_media_create_csi_of_links(imxmd, sd);
break;
+ case IMX_MEDIA_GRP_ID_CSI:
+ imx_media_create_csi_of_links(imxmd, sd);
+
+ break;
default:
/*
* if this subdev has fwnode links, create media
@@ -302,7 +306,7 @@ static int imx_media_create_pad_vdev_lists(struct imx_media_dev *imxmd)
}
/* async subdev complete notifier */
-static int imx_media_probe_complete(struct v4l2_async_notifier *notifier)
+int imx_media_probe_complete(struct v4l2_async_notifier *notifier)
{
struct imx_media_dev *imxmd = notifier2dev(notifier);
int ret;
@@ -326,11 +330,6 @@ unlock:
return media_device_register(&imxmd->md);
}
-static const struct v4l2_async_notifier_operations imx_media_subdev_ops = {
- .bound = imx_media_subdev_bound,
- .complete = imx_media_probe_complete,
-};
-
/*
* adds controls to a video device from an entity subdevice.
* Continues upstream from the entity's sink pads.
@@ -374,8 +373,8 @@ static int imx_media_inherit_controls(struct imx_media_dev *imxmd,
return ret;
}
-static int imx_media_link_notify(struct media_link *link, u32 flags,
- unsigned int notification)
+int imx_media_link_notify(struct media_link *link, u32 flags,
+ unsigned int notification)
{
struct media_entity *source = link->source->entity;
struct imx_media_pad_vdev *pad_vdev;
@@ -438,9 +437,27 @@ static int imx_media_link_notify(struct media_link *link, u32 flags,
return ret;
}
-static const struct media_device_ops imx_media_md_ops = {
- .link_notify = imx_media_link_notify,
-};
+void imx_media_notify(struct v4l2_subdev *sd, unsigned int notification,
+ void *arg)
+{
+ struct media_entity *entity = &sd->entity;
+ int i;
+
+ if (notification != V4L2_DEVICE_NOTIFY_EVENT)
+ return;
+
+ for (i = 0; i < entity->num_pads; i++) {
+ struct media_pad *pad = &entity->pads[i];
+ struct imx_media_pad_vdev *pad_vdev;
+ struct list_head *pad_vdev_list;
+
+ pad_vdev_list = to_pad_vdev_list(sd, pad->index);
+ if (!pad_vdev_list)
+ continue;
+ list_for_each_entry(pad_vdev, pad_vdev_list, list)
+ v4l2_event_queue(pad_vdev->vdev->vfd, arg);
+ }
+}
static int imx_media_probe(struct platform_device *pdev)
{
@@ -449,76 +466,37 @@ static int imx_media_probe(struct platform_device *pdev)
struct imx_media_dev *imxmd;
int ret;
- imxmd = devm_kzalloc(dev, sizeof(*imxmd), GFP_KERNEL);
- if (!imxmd)
- return -ENOMEM;
-
- dev_set_drvdata(dev, imxmd);
-
- strscpy(imxmd->md.model, "imx-media", sizeof(imxmd->md.model));
- imxmd->md.ops = &imx_media_md_ops;
- imxmd->md.dev = dev;
-
- mutex_init(&imxmd->mutex);
-
- imxmd->v4l2_dev.mdev = &imxmd->md;
- strscpy(imxmd->v4l2_dev.name, "imx-media",
- sizeof(imxmd->v4l2_dev.name));
-
- media_device_init(&imxmd->md);
-
- ret = v4l2_device_register(dev, &imxmd->v4l2_dev);
- if (ret < 0) {
- v4l2_err(&imxmd->v4l2_dev,
- "Failed to register v4l2_device: %d\n", ret);
- goto cleanup;
- }
-
- dev_set_drvdata(imxmd->v4l2_dev.dev, imxmd);
-
- INIT_LIST_HEAD(&imxmd->vdev_list);
-
- v4l2_async_notifier_init(&imxmd->notifier);
+ imxmd = imx_media_dev_init(dev);
+ if (IS_ERR(imxmd))
+ return PTR_ERR(imxmd);
ret = imx_media_add_of_subdevs(imxmd, node);
if (ret) {
v4l2_err(&imxmd->v4l2_dev,
"add_of_subdevs failed with %d\n", ret);
- goto notifier_cleanup;
+ goto cleanup;
}
ret = imx_media_add_internal_subdevs(imxmd);
if (ret) {
v4l2_err(&imxmd->v4l2_dev,
"add_internal_subdevs failed with %d\n", ret);
- goto notifier_cleanup;
- }
-
- /* no subdevs? just bail */
- if (list_empty(&imxmd->notifier.asd_list)) {
- ret = -ENODEV;
- goto notifier_cleanup;
+ goto cleanup;
}
- /* prepare the async subdev notifier and register it */
- imxmd->notifier.ops = &imx_media_subdev_ops;
- ret = v4l2_async_notifier_register(&imxmd->v4l2_dev,
- &imxmd->notifier);
- if (ret) {
- v4l2_err(&imxmd->v4l2_dev,
- "v4l2_async_notifier_register failed with %d\n", ret);
+ ret = imx_media_dev_notifier_register(imxmd);
+ if (ret)
goto del_int;
- }
return 0;
del_int:
imx_media_remove_internal_subdevs(imxmd);
-notifier_cleanup:
+cleanup:
v4l2_async_notifier_cleanup(&imxmd->notifier);
v4l2_device_unregister(&imxmd->v4l2_dev);
-cleanup:
media_device_cleanup(&imxmd->md);
+
return ret;
}
@@ -532,8 +510,8 @@ static int imx_media_remove(struct platform_device *pdev)
v4l2_async_notifier_unregister(&imxmd->notifier);
imx_media_remove_internal_subdevs(imxmd);
v4l2_async_notifier_cleanup(&imxmd->notifier);
- v4l2_device_unregister(&imxmd->v4l2_dev);
media_device_unregister(&imxmd->md);
+ v4l2_device_unregister(&imxmd->v4l2_dev);
media_device_cleanup(&imxmd->md);
return 0;
diff --git a/drivers/staging/media/imx/imx-media-internal-sd.c b/drivers/staging/media/imx/imx-media-internal-sd.c
index 0fdc45dbfb76..5e10d95e5529 100644
--- a/drivers/staging/media/imx/imx-media-internal-sd.c
+++ b/drivers/staging/media/imx/imx-media-internal-sd.c
@@ -30,32 +30,32 @@ static const struct internal_subdev_id {
} isd_id[num_isd] = {
[isd_csi0] = {
.index = isd_csi0,
- .grp_id = IMX_MEDIA_GRP_ID_CSI0,
+ .grp_id = IMX_MEDIA_GRP_ID_IPU_CSI0,
.name = "imx-ipuv3-csi",
},
[isd_csi1] = {
.index = isd_csi1,
- .grp_id = IMX_MEDIA_GRP_ID_CSI1,
+ .grp_id = IMX_MEDIA_GRP_ID_IPU_CSI1,
.name = "imx-ipuv3-csi",
},
[isd_vdic] = {
.index = isd_vdic,
- .grp_id = IMX_MEDIA_GRP_ID_VDIC,
+ .grp_id = IMX_MEDIA_GRP_ID_IPU_VDIC,
.name = "imx-ipuv3-vdic",
},
[isd_ic_prp] = {
.index = isd_ic_prp,
- .grp_id = IMX_MEDIA_GRP_ID_IC_PRP,
+ .grp_id = IMX_MEDIA_GRP_ID_IPU_IC_PRP,
.name = "imx-ipuv3-ic",
},
[isd_ic_prpenc] = {
.index = isd_ic_prpenc,
- .grp_id = IMX_MEDIA_GRP_ID_IC_PRPENC,
+ .grp_id = IMX_MEDIA_GRP_ID_IPU_IC_PRPENC,
.name = "imx-ipuv3-ic",
},
[isd_ic_prpvf] = {
.index = isd_ic_prpvf,
- .grp_id = IMX_MEDIA_GRP_ID_IC_PRPVF,
+ .grp_id = IMX_MEDIA_GRP_ID_IPU_IC_PRPVF,
.name = "imx-ipuv3-ic",
},
};
@@ -229,8 +229,8 @@ static int create_ipu_internal_link(struct imx_media_dev *imxmd,
return ret;
}
-int imx_media_create_internal_links(struct imx_media_dev *imxmd,
- struct v4l2_subdev *sd)
+int imx_media_create_ipu_internal_links(struct imx_media_dev *imxmd,
+ struct v4l2_subdev *sd)
{
const struct internal_subdev *intsd;
const struct internal_pad *intpad;
@@ -312,8 +312,8 @@ static int add_ipu_internal_subdevs(struct imx_media_dev *imxmd, int ipu_id)
* of_parse_subdev().
*/
switch (isd->id->grp_id) {
- case IMX_MEDIA_GRP_ID_CSI0:
- case IMX_MEDIA_GRP_ID_CSI1:
+ case IMX_MEDIA_GRP_ID_IPU_CSI0:
+ case IMX_MEDIA_GRP_ID_IPU_CSI1:
ret = 0;
break;
default:
diff --git a/drivers/staging/media/imx/imx-media-of.c b/drivers/staging/media/imx/imx-media-of.c
index a01327f6e045..03446335ac03 100644
--- a/drivers/staging/media/imx/imx-media-of.c
+++ b/drivers/staging/media/imx/imx-media-of.c
@@ -20,7 +20,8 @@
#include <video/imx-ipu-v3.h>
#include "imx-media.h"
-static int of_add_csi(struct imx_media_dev *imxmd, struct device_node *csi_np)
+int imx_media_of_add_csi(struct imx_media_dev *imxmd,
+ struct device_node *csi_np)
{
int ret;
@@ -45,6 +46,7 @@ static int of_add_csi(struct imx_media_dev *imxmd, struct device_node *csi_np)
return 0;
}
+EXPORT_SYMBOL_GPL(imx_media_of_add_csi);
int imx_media_add_of_subdevs(struct imx_media_dev *imxmd,
struct device_node *np)
@@ -57,7 +59,7 @@ int imx_media_add_of_subdevs(struct imx_media_dev *imxmd,
if (!csi_np)
break;
- ret = of_add_csi(imxmd, csi_np);
+ ret = imx_media_of_add_csi(imxmd, csi_np);
of_node_put(csi_np);
if (ret)
return ret;
diff --git a/drivers/staging/media/imx/imx-media-utils.c b/drivers/staging/media/imx/imx-media-utils.c
index 0eaa353d5cb3..1c63a2765a81 100644
--- a/drivers/staging/media/imx/imx-media-utils.c
+++ b/drivers/staging/media/imx/imx-media-utils.c
@@ -577,9 +577,11 @@ void imx_media_fill_default_mbus_fields(struct v4l2_mbus_framefmt *tryfmt,
EXPORT_SYMBOL_GPL(imx_media_fill_default_mbus_fields);
int imx_media_mbus_fmt_to_pix_fmt(struct v4l2_pix_format *pix,
- struct v4l2_mbus_framefmt *mbus,
+ struct v4l2_rect *compose,
+ const struct v4l2_mbus_framefmt *mbus,
const struct imx_media_pixfmt *cc)
{
+ u32 width;
u32 stride;
if (!cc) {
@@ -602,9 +604,16 @@ int imx_media_mbus_fmt_to_pix_fmt(struct v4l2_pix_format *pix,
cc = imx_media_find_mbus_format(code, CS_SEL_YUV, false);
}
- stride = cc->planar ? mbus->width : (mbus->width * cc->bpp) >> 3;
+ /* Round up width for minimum burst size */
+ width = round_up(mbus->width, 8);
- pix->width = mbus->width;
+ /* Round up stride for IDMAC line start address alignment */
+ if (cc->planar)
+ stride = round_up(width, 16);
+ else
+ stride = round_up((width * cc->bpp) >> 3, 8);
+
+ pix->width = width;
pix->height = mbus->height;
pix->pixelformat = cc->fourcc;
pix->colorspace = mbus->colorspace;
@@ -613,7 +622,19 @@ int imx_media_mbus_fmt_to_pix_fmt(struct v4l2_pix_format *pix,
pix->quantization = mbus->quantization;
pix->field = mbus->field;
pix->bytesperline = stride;
- pix->sizeimage = (pix->width * pix->height * cc->bpp) >> 3;
+ pix->sizeimage = cc->planar ? ((stride * pix->height * cc->bpp) >> 3) :
+ stride * pix->height;
+
+ /*
+ * set capture compose rectangle, which is fixed to the
+ * source subdevice mbus format.
+ */
+ if (compose) {
+ compose->left = 0;
+ compose->top = 0;
+ compose->width = mbus->width;
+ compose->height = mbus->height;
+ }
return 0;
}
@@ -626,13 +647,11 @@ int imx_media_mbus_fmt_to_ipu_image(struct ipu_image *image,
memset(image, 0, sizeof(*image));
- ret = imx_media_mbus_fmt_to_pix_fmt(&image->pix, mbus, NULL);
+ ret = imx_media_mbus_fmt_to_pix_fmt(&image->pix, &image->rect,
+ mbus, NULL);
if (ret)
return ret;
- image->rect.width = mbus->width;
- image->rect.height = mbus->height;
-
return 0;
}
EXPORT_SYMBOL_GPL(imx_media_mbus_fmt_to_ipu_image);
@@ -696,20 +715,20 @@ void imx_media_grp_id_to_sd_name(char *sd_name, int sz, u32 grp_id, int ipu_id)
int id;
switch (grp_id) {
- case IMX_MEDIA_GRP_ID_CSI0...IMX_MEDIA_GRP_ID_CSI1:
- id = (grp_id >> IMX_MEDIA_GRP_ID_CSI_BIT) - 1;
+ case IMX_MEDIA_GRP_ID_IPU_CSI0...IMX_MEDIA_GRP_ID_IPU_CSI1:
+ id = (grp_id >> IMX_MEDIA_GRP_ID_IPU_CSI_BIT) - 1;
snprintf(sd_name, sz, "ipu%d_csi%d", ipu_id + 1, id);
break;
- case IMX_MEDIA_GRP_ID_VDIC:
+ case IMX_MEDIA_GRP_ID_IPU_VDIC:
snprintf(sd_name, sz, "ipu%d_vdic", ipu_id + 1);
break;
- case IMX_MEDIA_GRP_ID_IC_PRP:
+ case IMX_MEDIA_GRP_ID_IPU_IC_PRP:
snprintf(sd_name, sz, "ipu%d_ic_prp", ipu_id + 1);
break;
- case IMX_MEDIA_GRP_ID_IC_PRPENC:
+ case IMX_MEDIA_GRP_ID_IPU_IC_PRPENC:
snprintf(sd_name, sz, "ipu%d_ic_prpenc", ipu_id + 1);
break;
- case IMX_MEDIA_GRP_ID_IC_PRPVF:
+ case IMX_MEDIA_GRP_ID_IPU_IC_PRPVF:
snprintf(sd_name, sz, "ipu%d_ic_prpvf", ipu_id + 1);
break;
default:
diff --git a/drivers/staging/media/imx/imx-media-vdic.c b/drivers/staging/media/imx/imx-media-vdic.c
index 482250d47e7c..2808662e2597 100644
--- a/drivers/staging/media/imx/imx-media-vdic.c
+++ b/drivers/staging/media/imx/imx-media-vdic.c
@@ -219,26 +219,18 @@ static void __maybe_unused prepare_vdi_in_buffers(struct vdic_priv *priv,
switch (priv->fieldtype) {
case V4L2_FIELD_SEQ_TB:
- prev_phys = vb2_dma_contig_plane_dma_addr(prev_vb, 0);
- curr_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0) + fs;
- next_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0);
- break;
case V4L2_FIELD_SEQ_BT:
prev_phys = vb2_dma_contig_plane_dma_addr(prev_vb, 0) + fs;
curr_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0);
next_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0) + fs;
break;
+ case V4L2_FIELD_INTERLACED_TB:
case V4L2_FIELD_INTERLACED_BT:
+ case V4L2_FIELD_INTERLACED:
prev_phys = vb2_dma_contig_plane_dma_addr(prev_vb, 0) + is;
curr_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0);
next_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0) + is;
break;
- default:
- /* assume V4L2_FIELD_INTERLACED_TB */
- prev_phys = vb2_dma_contig_plane_dma_addr(prev_vb, 0);
- curr_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0) + is;
- next_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0);
- break;
}
ipu_cpmem_set_buffer(priv->vdi_in_ch_p, 0, prev_phys);
@@ -263,10 +255,10 @@ static int setup_vdi_channel(struct vdic_priv *priv,
memset(&image, 0, sizeof(image));
image.pix = vdev->fmt.fmt.pix;
+ image.rect = vdev->compose;
/* one field to VDIC channels */
image.pix.height /= 2;
- image.rect.width = image.pix.width;
- image.rect.height = image.pix.height;
+ image.rect.height /= 2;
image.phys0 = phys0;
image.phys1 = phys1;
@@ -826,7 +818,10 @@ static int vdic_s_frame_interval(struct v4l2_subdev *sd,
switch (fi->pad) {
case VDIC_SINK_PAD_DIRECT:
case VDIC_SINK_PAD_IDMAC:
- /* No limits on input frame interval */
+ /* No limits on valid input frame intervals */
+ if (fi->interval.numerator == 0 ||
+ fi->interval.denominator == 0)
+ fi->interval = priv->frame_interval[fi->pad];
/* Reset output interval */
*output_fi = fi->interval;
if (priv->csi_direct)
diff --git a/drivers/staging/media/imx/imx-media.h b/drivers/staging/media/imx/imx-media.h
index bc7feb81937c..ae964c8d5be1 100644
--- a/drivers/staging/media/imx/imx-media.h
+++ b/drivers/staging/media/imx/imx-media.h
@@ -80,6 +80,8 @@ struct imx_media_video_dev {
/* the user format */
struct v4l2_format fmt;
+ /* the compose rectangle */
+ struct v4l2_rect compose;
const struct imx_media_pixfmt *cc;
/* links this vdev to master list */
@@ -178,7 +180,8 @@ void imx_media_fill_default_mbus_fields(struct v4l2_mbus_framefmt *tryfmt,
struct v4l2_mbus_framefmt *fmt,
bool ic_route);
int imx_media_mbus_fmt_to_pix_fmt(struct v4l2_pix_format *pix,
- struct v4l2_mbus_framefmt *mbus,
+ struct v4l2_rect *compose,
+ const struct v4l2_mbus_framefmt *mbus,
const struct imx_media_pixfmt *cc);
int imx_media_mbus_fmt_to_ipu_image(struct ipu_image *image,
struct v4l2_mbus_framefmt *mbus);
@@ -226,6 +229,18 @@ int imx_media_add_async_subdev(struct imx_media_dev *imxmd,
struct fwnode_handle *fwnode,
struct platform_device *pdev);
+int imx_media_subdev_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd);
+int imx_media_link_notify(struct media_link *link, u32 flags,
+ unsigned int notification);
+void imx_media_notify(struct v4l2_subdev *sd, unsigned int notification,
+ void *arg);
+int imx_media_probe_complete(struct v4l2_async_notifier *notifier);
+
+struct imx_media_dev *imx_media_dev_init(struct device *dev);
+int imx_media_dev_notifier_register(struct imx_media_dev *imxmd);
+
/* imx-media-fim.c */
struct imx_media_fim;
void imx_media_fim_eof_monitor(struct imx_media_fim *fim, ktime_t timestamp);
@@ -238,8 +253,8 @@ void imx_media_fim_free(struct imx_media_fim *fim);
/* imx-media-internal-sd.c */
int imx_media_add_internal_subdevs(struct imx_media_dev *imxmd);
-int imx_media_create_internal_links(struct imx_media_dev *imxmd,
- struct v4l2_subdev *sd);
+int imx_media_create_ipu_internal_links(struct imx_media_dev *imxmd,
+ struct v4l2_subdev *sd);
void imx_media_remove_internal_subdevs(struct imx_media_dev *imxmd);
/* imx-media-of.c */
@@ -249,6 +264,8 @@ int imx_media_create_of_links(struct imx_media_dev *imxmd,
struct v4l2_subdev *sd);
int imx_media_create_csi_of_links(struct imx_media_dev *imxmd,
struct v4l2_subdev *csi);
+int imx_media_of_add_csi(struct imx_media_dev *imxmd,
+ struct device_node *csi_np);
/* imx-media-capture.c */
struct imx_media_video_dev *
@@ -259,18 +276,20 @@ void imx_media_capture_device_unregister(struct imx_media_video_dev *vdev);
struct imx_media_buffer *
imx_media_capture_device_next_buf(struct imx_media_video_dev *vdev);
void imx_media_capture_device_set_format(struct imx_media_video_dev *vdev,
- struct v4l2_pix_format *pix);
+ const struct v4l2_pix_format *pix,
+ const struct v4l2_rect *compose);
void imx_media_capture_device_error(struct imx_media_video_dev *vdev);
/* subdev group ids */
-#define IMX_MEDIA_GRP_ID_CSI2 BIT(8)
-#define IMX_MEDIA_GRP_ID_CSI_BIT 9
-#define IMX_MEDIA_GRP_ID_CSI (0x3 << IMX_MEDIA_GRP_ID_CSI_BIT)
-#define IMX_MEDIA_GRP_ID_CSI0 BIT(IMX_MEDIA_GRP_ID_CSI_BIT)
-#define IMX_MEDIA_GRP_ID_CSI1 (2 << IMX_MEDIA_GRP_ID_CSI_BIT)
-#define IMX_MEDIA_GRP_ID_VDIC BIT(11)
-#define IMX_MEDIA_GRP_ID_IC_PRP BIT(12)
-#define IMX_MEDIA_GRP_ID_IC_PRPENC BIT(13)
-#define IMX_MEDIA_GRP_ID_IC_PRPVF BIT(14)
+#define IMX_MEDIA_GRP_ID_CSI2 BIT(8)
+#define IMX_MEDIA_GRP_ID_CSI BIT(9)
+#define IMX_MEDIA_GRP_ID_IPU_CSI_BIT 10
+#define IMX_MEDIA_GRP_ID_IPU_CSI (0x3 << IMX_MEDIA_GRP_ID_IPU_CSI_BIT)
+#define IMX_MEDIA_GRP_ID_IPU_CSI0 BIT(IMX_MEDIA_GRP_ID_IPU_CSI_BIT)
+#define IMX_MEDIA_GRP_ID_IPU_CSI1 (2 << IMX_MEDIA_GRP_ID_IPU_CSI_BIT)
+#define IMX_MEDIA_GRP_ID_IPU_VDIC BIT(12)
+#define IMX_MEDIA_GRP_ID_IPU_IC_PRP BIT(13)
+#define IMX_MEDIA_GRP_ID_IPU_IC_PRPENC BIT(14)
+#define IMX_MEDIA_GRP_ID_IPU_IC_PRPVF BIT(15)
#endif
diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
new file mode 100644
index 000000000000..3fba7c27c0ec
--- /dev/null
+++ b/drivers/staging/media/imx/imx7-media-csi.c
@@ -0,0 +1,1369 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * V4L2 Capture CSI Subdev for Freescale i.MX7 SOC
+ *
+ * Copyright (c) 2019 Linaro Ltd
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gcd.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include <media/imx.h>
+#include "imx-media.h"
+
+#define IMX7_CSI_PAD_SINK 0
+#define IMX7_CSI_PAD_SRC 1
+#define IMX7_CSI_PADS_NUM 2
+
+/* reset values */
+#define CSICR1_RESET_VAL 0x40000800
+#define CSICR2_RESET_VAL 0x0
+#define CSICR3_RESET_VAL 0x0
+
+/* csi control reg 1 */
+#define BIT_SWAP16_EN BIT(31)
+#define BIT_EXT_VSYNC BIT(30)
+#define BIT_EOF_INT_EN BIT(29)
+#define BIT_PRP_IF_EN BIT(28)
+#define BIT_CCIR_MODE BIT(27)
+#define BIT_COF_INT_EN BIT(26)
+#define BIT_SF_OR_INTEN BIT(25)
+#define BIT_RF_OR_INTEN BIT(24)
+#define BIT_SFF_DMA_DONE_INTEN BIT(22)
+#define BIT_STATFF_INTEN BIT(21)
+#define BIT_FB2_DMA_DONE_INTEN BIT(20)
+#define BIT_FB1_DMA_DONE_INTEN BIT(19)
+#define BIT_RXFF_INTEN BIT(18)
+#define BIT_SOF_POL BIT(17)
+#define BIT_SOF_INTEN BIT(16)
+#define BIT_MCLKDIV (0xF << 12)
+#define BIT_HSYNC_POL BIT(11)
+#define BIT_CCIR_EN BIT(10)
+#define BIT_MCLKEN BIT(9)
+#define BIT_FCC BIT(8)
+#define BIT_PACK_DIR BIT(7)
+#define BIT_CLR_STATFIFO BIT(6)
+#define BIT_CLR_RXFIFO BIT(5)
+#define BIT_GCLK_MODE BIT(4)
+#define BIT_INV_DATA BIT(3)
+#define BIT_INV_PCLK BIT(2)
+#define BIT_REDGE BIT(1)
+#define BIT_PIXEL_BIT BIT(0)
+
+#define SHIFT_MCLKDIV 12
+
+/* control reg 3 */
+#define BIT_FRMCNT (0xFFFF << 16)
+#define BIT_FRMCNT_RST BIT(15)
+#define BIT_DMA_REFLASH_RFF BIT(14)
+#define BIT_DMA_REFLASH_SFF BIT(13)
+#define BIT_DMA_REQ_EN_RFF BIT(12)
+#define BIT_DMA_REQ_EN_SFF BIT(11)
+#define BIT_STATFF_LEVEL (0x7 << 8)
+#define BIT_HRESP_ERR_EN BIT(7)
+#define BIT_RXFF_LEVEL (0x7 << 4)
+#define BIT_TWO_8BIT_SENSOR BIT(3)
+#define BIT_ZERO_PACK_EN BIT(2)
+#define BIT_ECC_INT_EN BIT(1)
+#define BIT_ECC_AUTO_EN BIT(0)
+
+#define SHIFT_FRMCNT 16
+#define SHIFT_RXFIFO_LEVEL 4
+
+/* csi status reg */
+#define BIT_ADDR_CH_ERR_INT BIT(28)
+#define BIT_FIELD0_INT BIT(27)
+#define BIT_FIELD1_INT BIT(26)
+#define BIT_SFF_OR_INT BIT(25)
+#define BIT_RFF_OR_INT BIT(24)
+#define BIT_DMA_TSF_DONE_SFF BIT(22)
+#define BIT_STATFF_INT BIT(21)
+#define BIT_DMA_TSF_DONE_FB2 BIT(20)
+#define BIT_DMA_TSF_DONE_FB1 BIT(19)
+#define BIT_RXFF_INT BIT(18)
+#define BIT_EOF_INT BIT(17)
+#define BIT_SOF_INT BIT(16)
+#define BIT_F2_INT BIT(15)
+#define BIT_F1_INT BIT(14)
+#define BIT_COF_INT BIT(13)
+#define BIT_HRESP_ERR_INT BIT(7)
+#define BIT_ECC_INT BIT(1)
+#define BIT_DRDY BIT(0)
+
+/* csi control reg 18 */
+#define BIT_CSI_HW_ENABLE BIT(31)
+#define BIT_MIPI_DATA_FORMAT_RAW8 (0x2a << 25)
+#define BIT_MIPI_DATA_FORMAT_RAW10 (0x2b << 25)
+#define BIT_MIPI_DATA_FORMAT_RAW12 (0x2c << 25)
+#define BIT_MIPI_DATA_FORMAT_RAW14 (0x2d << 25)
+#define BIT_MIPI_DATA_FORMAT_YUV422_8B (0x1e << 25)
+#define BIT_MIPI_DATA_FORMAT_MASK (0x3F << 25)
+#define BIT_MIPI_DATA_FORMAT_OFFSET 25
+#define BIT_DATA_FROM_MIPI BIT(22)
+#define BIT_MIPI_YU_SWAP BIT(21)
+#define BIT_MIPI_DOUBLE_CMPNT BIT(20)
+#define BIT_BASEADDR_CHG_ERR_EN BIT(9)
+#define BIT_BASEADDR_SWITCH_SEL BIT(5)
+#define BIT_BASEADDR_SWITCH_EN BIT(4)
+#define BIT_PARALLEL24_EN BIT(3)
+#define BIT_DEINTERLACE_EN BIT(2)
+#define BIT_TVDECODER_IN_EN BIT(1)
+#define BIT_NTSC_EN BIT(0)
+
+#define CSI_MCLK_VF 1
+#define CSI_MCLK_ENC 2
+#define CSI_MCLK_RAW 4
+#define CSI_MCLK_I2C 8
+
+#define CSI_CSICR1 0x0
+#define CSI_CSICR2 0x4
+#define CSI_CSICR3 0x8
+#define CSI_STATFIFO 0xC
+#define CSI_CSIRXFIFO 0x10
+#define CSI_CSIRXCNT 0x14
+#define CSI_CSISR 0x18
+
+#define CSI_CSIDBG 0x1C
+#define CSI_CSIDMASA_STATFIFO 0x20
+#define CSI_CSIDMATS_STATFIFO 0x24
+#define CSI_CSIDMASA_FB1 0x28
+#define CSI_CSIDMASA_FB2 0x2C
+#define CSI_CSIFBUF_PARA 0x30
+#define CSI_CSIIMAG_PARA 0x34
+
+#define CSI_CSICR18 0x48
+#define CSI_CSICR19 0x4c
+
+static const char * const imx7_csi_clk_id[] = {"axi", "dcic", "mclk"};
+
+struct imx7_csi {
+ struct device *dev;
+ struct v4l2_subdev sd;
+ struct imx_media_video_dev *vdev;
+ struct imx_media_dev *imxmd;
+ struct media_pad pad[IMX7_CSI_PADS_NUM];
+
+ /* lock to protect members below */
+ struct mutex lock;
+ /* lock to protect irq handler when stop streaming */
+ spinlock_t irqlock;
+
+ struct v4l2_subdev *src_sd;
+
+ struct media_entity *sink;
+
+ struct v4l2_fwnode_endpoint upstream_ep;
+
+ struct v4l2_mbus_framefmt format_mbus[IMX7_CSI_PADS_NUM];
+ const struct imx_media_pixfmt *cc[IMX7_CSI_PADS_NUM];
+ struct v4l2_fract frame_interval[IMX7_CSI_PADS_NUM];
+
+ struct v4l2_ctrl_handler ctrl_hdlr;
+
+ void __iomem *regbase;
+ int irq;
+
+ int num_clks;
+ struct clk_bulk_data *clks;
+
+ /* active vb2 buffers to send to video dev sink */
+ struct imx_media_buffer *active_vb2_buf[2];
+ struct imx_media_dma_buf underrun_buf;
+
+ int buf_num;
+ u32 frame_sequence;
+
+ bool last_eof;
+ bool is_init;
+ bool is_streaming;
+ bool is_csi2;
+
+ struct completion last_eof_completion;
+};
+
+#define imx7_csi_reg_read(_csi, _offset) \
+ __raw_readl((_csi)->regbase + (_offset))
+#define imx7_csi_reg_write(_csi, _val, _offset) \
+ __raw_writel(_val, (_csi)->regbase + (_offset))
+
+static void imx7_csi_clk_enable(struct imx7_csi *csi)
+{
+ int ret;
+
+ ret = clk_bulk_prepare_enable(csi->num_clks, csi->clks);
+ if (ret < 0)
+ dev_err(csi->dev, "failed to enable clocks\n");
+}
+
+static void imx7_csi_clk_disable(struct imx7_csi *csi)
+{
+ clk_bulk_disable_unprepare(csi->num_clks, csi->clks);
+}
+
+static void imx7_csi_hw_reset(struct imx7_csi *csi)
+{
+ imx7_csi_reg_write(csi,
+ imx7_csi_reg_read(csi, CSI_CSICR3) | BIT_FRMCNT_RST,
+ CSI_CSICR3);
+
+ imx7_csi_reg_write(csi, CSICR1_RESET_VAL, CSI_CSICR1);
+ imx7_csi_reg_write(csi, CSICR2_RESET_VAL, CSI_CSICR2);
+ imx7_csi_reg_write(csi, CSICR3_RESET_VAL, CSI_CSICR3);
+}
+
+static unsigned long imx7_csi_irq_clear(struct imx7_csi *csi)
+{
+ unsigned long isr;
+
+ isr = imx7_csi_reg_read(csi, CSI_CSISR);
+ imx7_csi_reg_write(csi, isr, CSI_CSISR);
+
+ return isr;
+}
+
+static void imx7_csi_init_interface(struct imx7_csi *csi)
+{
+ unsigned int val = 0;
+ unsigned int imag_para;
+
+ val = BIT_SOF_POL | BIT_REDGE | BIT_GCLK_MODE | BIT_HSYNC_POL |
+ BIT_FCC | 1 << SHIFT_MCLKDIV | BIT_MCLKEN;
+ imx7_csi_reg_write(csi, val, CSI_CSICR1);
+
+ imag_para = (800 << 16) | 600;
+ imx7_csi_reg_write(csi, imag_para, CSI_CSIIMAG_PARA);
+
+ val = BIT_DMA_REFLASH_RFF;
+ imx7_csi_reg_write(csi, val, CSI_CSICR3);
+}
+
+static void imx7_csi_hw_enable_irq(struct imx7_csi *csi)
+{
+ unsigned long cr1 = imx7_csi_reg_read(csi, CSI_CSICR1);
+
+ cr1 |= BIT_SOF_INTEN;
+ cr1 |= BIT_RFF_OR_INT;
+
+ /* still capture needs DMA interrupt */
+ cr1 |= BIT_FB1_DMA_DONE_INTEN;
+ cr1 |= BIT_FB2_DMA_DONE_INTEN;
+
+ cr1 |= BIT_EOF_INT_EN;
+
+ imx7_csi_reg_write(csi, cr1, CSI_CSICR1);
+}
+
+static void imx7_csi_hw_disable_irq(struct imx7_csi *csi)
+{
+ unsigned long cr1 = imx7_csi_reg_read(csi, CSI_CSICR1);
+
+ cr1 &= ~BIT_SOF_INTEN;
+ cr1 &= ~BIT_RFF_OR_INT;
+ cr1 &= ~BIT_FB1_DMA_DONE_INTEN;
+ cr1 &= ~BIT_FB2_DMA_DONE_INTEN;
+ cr1 &= ~BIT_EOF_INT_EN;
+
+ imx7_csi_reg_write(csi, cr1, CSI_CSICR1);
+}
+
+static void imx7_csi_hw_enable(struct imx7_csi *csi)
+{
+ unsigned long cr = imx7_csi_reg_read(csi, CSI_CSICR18);
+
+ cr |= BIT_CSI_HW_ENABLE;
+
+ imx7_csi_reg_write(csi, cr, CSI_CSICR18);
+}
+
+static void imx7_csi_hw_disable(struct imx7_csi *csi)
+{
+ unsigned long cr = imx7_csi_reg_read(csi, CSI_CSICR18);
+
+ cr &= ~BIT_CSI_HW_ENABLE;
+
+ imx7_csi_reg_write(csi, cr, CSI_CSICR18);
+}
+
+static void imx7_csi_dma_reflash(struct imx7_csi *csi)
+{
+ unsigned long cr3 = imx7_csi_reg_read(csi, CSI_CSICR18);
+
+ cr3 = imx7_csi_reg_read(csi, CSI_CSICR3);
+ cr3 |= BIT_DMA_REFLASH_RFF;
+ imx7_csi_reg_write(csi, cr3, CSI_CSICR3);
+}
+
+static void imx7_csi_rx_fifo_clear(struct imx7_csi *csi)
+{
+ unsigned long cr1;
+
+ cr1 = imx7_csi_reg_read(csi, CSI_CSICR1);
+ imx7_csi_reg_write(csi, cr1 & ~BIT_FCC, CSI_CSICR1);
+ cr1 = imx7_csi_reg_read(csi, CSI_CSICR1);
+ imx7_csi_reg_write(csi, cr1 | BIT_CLR_RXFIFO, CSI_CSICR1);
+
+ cr1 = imx7_csi_reg_read(csi, CSI_CSICR1);
+ imx7_csi_reg_write(csi, cr1 | BIT_FCC, CSI_CSICR1);
+}
+
+static void imx7_csi_buf_stride_set(struct imx7_csi *csi, u32 stride)
+{
+ imx7_csi_reg_write(csi, stride, CSI_CSIFBUF_PARA);
+}
+
+static void imx7_csi_deinterlace_enable(struct imx7_csi *csi, bool enable)
+{
+ unsigned long cr18 = imx7_csi_reg_read(csi, CSI_CSICR18);
+
+ if (enable)
+ cr18 |= BIT_DEINTERLACE_EN;
+ else
+ cr18 &= ~BIT_DEINTERLACE_EN;
+
+ imx7_csi_reg_write(csi, cr18, CSI_CSICR18);
+}
+
+static void imx7_csi_dmareq_rff_enable(struct imx7_csi *csi)
+{
+ unsigned long cr3 = imx7_csi_reg_read(csi, CSI_CSICR3);
+ unsigned long cr2 = imx7_csi_reg_read(csi, CSI_CSICR2);
+
+ /* Burst Type of DMA Transfer from RxFIFO. INCR16 */
+ cr2 |= 0xC0000000;
+
+ cr3 |= BIT_DMA_REQ_EN_RFF;
+ cr3 |= BIT_HRESP_ERR_EN;
+ cr3 &= ~BIT_RXFF_LEVEL;
+ cr3 |= 0x2 << 4;
+
+ imx7_csi_reg_write(csi, cr3, CSI_CSICR3);
+ imx7_csi_reg_write(csi, cr2, CSI_CSICR2);
+}
+
+static void imx7_csi_dmareq_rff_disable(struct imx7_csi *csi)
+{
+ unsigned long cr3 = imx7_csi_reg_read(csi, CSI_CSICR3);
+
+ cr3 &= ~BIT_DMA_REQ_EN_RFF;
+ cr3 &= ~BIT_HRESP_ERR_EN;
+ imx7_csi_reg_write(csi, cr3, CSI_CSICR3);
+}
+
+static void imx7_csi_set_imagpara(struct imx7_csi *csi, int width, int height)
+{
+ int imag_para;
+ int rx_count;
+
+ rx_count = (width * height) >> 2;
+ imx7_csi_reg_write(csi, rx_count, CSI_CSIRXCNT);
+
+ imag_para = (width << 16) | height;
+ imx7_csi_reg_write(csi, imag_para, CSI_CSIIMAG_PARA);
+
+ /* reflash the embedded DMA controller */
+ imx7_csi_dma_reflash(csi);
+}
+
+static void imx7_csi_sw_reset(struct imx7_csi *csi)
+{
+ imx7_csi_hw_disable(csi);
+
+ imx7_csi_rx_fifo_clear(csi);
+
+ imx7_csi_dma_reflash(csi);
+
+ usleep_range(2000, 3000);
+
+ imx7_csi_irq_clear(csi);
+
+ imx7_csi_hw_enable(csi);
+}
+
+static void imx7_csi_error_recovery(struct imx7_csi *csi)
+{
+ imx7_csi_hw_disable(csi);
+
+ imx7_csi_rx_fifo_clear(csi);
+
+ imx7_csi_dma_reflash(csi);
+
+ imx7_csi_hw_enable(csi);
+}
+
+static void imx7_csi_init(struct imx7_csi *csi)
+{
+ if (csi->is_init)
+ return;
+
+ imx7_csi_clk_enable(csi);
+ imx7_csi_hw_reset(csi);
+ imx7_csi_init_interface(csi);
+ imx7_csi_dmareq_rff_enable(csi);
+
+ csi->is_init = true;
+}
+
+static void imx7_csi_deinit(struct imx7_csi *csi)
+{
+ if (!csi->is_init)
+ return;
+
+ imx7_csi_hw_reset(csi);
+ imx7_csi_init_interface(csi);
+ imx7_csi_dmareq_rff_disable(csi);
+ imx7_csi_clk_disable(csi);
+
+ csi->is_init = false;
+}
+
+static int imx7_csi_get_upstream_endpoint(struct imx7_csi *csi,
+ struct v4l2_fwnode_endpoint *ep,
+ bool skip_mux)
+{
+ struct device_node *endpoint, *port;
+ struct media_entity *src;
+ struct v4l2_subdev *sd;
+ struct media_pad *pad;
+
+ if (!csi->src_sd)
+ return -EPIPE;
+
+ src = &csi->src_sd->entity;
+
+skip_video_mux:
+ /* get source pad of entity directly upstream from src */
+ pad = imx_media_find_upstream_pad(csi->imxmd, src, 0);
+ if (IS_ERR(pad))
+ return PTR_ERR(pad);
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+
+ /* To get bus type we may need to skip video mux */
+ if (skip_mux && src->function == MEDIA_ENT_F_VID_MUX) {
+ src = &sd->entity;
+ goto skip_video_mux;
+ }
+
+ /*
+ * NOTE: this assumes an OF-graph port id is the same as a
+ * media pad index.
+ */
+ port = of_graph_get_port_by_id(sd->dev->of_node, pad->index);
+ if (!port)
+ return -ENODEV;
+
+ endpoint = of_get_next_child(port, NULL);
+ of_node_put(port);
+ if (!endpoint)
+ return -ENODEV;
+
+ v4l2_fwnode_endpoint_parse(of_fwnode_handle(endpoint), ep);
+ of_node_put(endpoint);
+
+ return 0;
+}
+
+static int imx7_csi_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct imx7_csi *csi = v4l2_get_subdevdata(sd);
+ struct v4l2_subdev *remote_sd;
+ int ret = 0;
+
+ dev_dbg(csi->dev, "link setup %s -> %s\n", remote->entity->name,
+ local->entity->name);
+
+ mutex_lock(&csi->lock);
+
+ if (local->flags & MEDIA_PAD_FL_SINK) {
+ if (!is_media_entity_v4l2_subdev(remote->entity)) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ remote_sd = media_entity_to_v4l2_subdev(remote->entity);
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (csi->src_sd) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+ csi->src_sd = remote_sd;
+ } else {
+ csi->src_sd = NULL;
+ }
+
+ goto init;
+ }
+
+ /* source pad */
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (csi->sink) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+ csi->sink = remote->entity;
+ } else {
+ v4l2_ctrl_handler_free(&csi->ctrl_hdlr);
+ v4l2_ctrl_handler_init(&csi->ctrl_hdlr, 0);
+ csi->sink = NULL;
+ }
+
+init:
+ if (csi->sink || csi->src_sd)
+ imx7_csi_init(csi);
+ else
+ imx7_csi_deinit(csi);
+
+unlock:
+ mutex_unlock(&csi->lock);
+
+ return ret;
+}
+
+static int imx7_csi_pad_link_validate(struct v4l2_subdev *sd,
+ struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ struct imx7_csi *csi = v4l2_get_subdevdata(sd);
+ struct v4l2_fwnode_endpoint upstream_ep = {};
+ int ret;
+
+ ret = v4l2_subdev_link_validate_default(sd, link, source_fmt, sink_fmt);
+ if (ret)
+ return ret;
+
+ ret = imx7_csi_get_upstream_endpoint(csi, &upstream_ep, true);
+ if (ret) {
+ v4l2_err(&csi->sd, "failed to find upstream endpoint\n");
+ return ret;
+ }
+
+ mutex_lock(&csi->lock);
+
+ csi->upstream_ep = upstream_ep;
+ csi->is_csi2 = (upstream_ep.bus_type == V4L2_MBUS_CSI2_DPHY);
+
+ mutex_unlock(&csi->lock);
+
+ return 0;
+}
+
+static void imx7_csi_update_buf(struct imx7_csi *csi, dma_addr_t phys,
+ int buf_num)
+{
+ if (buf_num == 1)
+ imx7_csi_reg_write(csi, phys, CSI_CSIDMASA_FB2);
+ else
+ imx7_csi_reg_write(csi, phys, CSI_CSIDMASA_FB1);
+}
+
+static void imx7_csi_setup_vb2_buf(struct imx7_csi *csi)
+{
+ struct imx_media_video_dev *vdev = csi->vdev;
+ struct imx_media_buffer *buf;
+ struct vb2_buffer *vb2_buf;
+ dma_addr_t phys[2];
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ buf = imx_media_capture_device_next_buf(vdev);
+ if (buf) {
+ csi->active_vb2_buf[i] = buf;
+ vb2_buf = &buf->vbuf.vb2_buf;
+ phys[i] = vb2_dma_contig_plane_dma_addr(vb2_buf, 0);
+ } else {
+ csi->active_vb2_buf[i] = NULL;
+ phys[i] = csi->underrun_buf.phys;
+ }
+
+ imx7_csi_update_buf(csi, phys[i], i);
+ }
+}
+
+static void imx7_csi_dma_unsetup_vb2_buf(struct imx7_csi *csi,
+ enum vb2_buffer_state return_status)
+{
+ struct imx_media_buffer *buf;
+ int i;
+
+ /* return any remaining active frames with return_status */
+ for (i = 0; i < 2; i++) {
+ buf = csi->active_vb2_buf[i];
+ if (buf) {
+ struct vb2_buffer *vb = &buf->vbuf.vb2_buf;
+
+ vb->timestamp = ktime_get_ns();
+ vb2_buffer_done(vb, return_status);
+ }
+ }
+}
+
+static void imx7_csi_vb2_buf_done(struct imx7_csi *csi)
+{
+ struct imx_media_video_dev *vdev = csi->vdev;
+ struct imx_media_buffer *done, *next;
+ struct vb2_buffer *vb;
+ dma_addr_t phys;
+
+ done = csi->active_vb2_buf[csi->buf_num];
+ if (done) {
+ done->vbuf.field = vdev->fmt.fmt.pix.field;
+ done->vbuf.sequence = csi->frame_sequence;
+ vb = &done->vbuf.vb2_buf;
+ vb->timestamp = ktime_get_ns();
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ }
+ csi->frame_sequence++;
+
+ /* get next queued buffer */
+ next = imx_media_capture_device_next_buf(vdev);
+ if (next) {
+ phys = vb2_dma_contig_plane_dma_addr(&next->vbuf.vb2_buf, 0);
+ csi->active_vb2_buf[csi->buf_num] = next;
+ } else {
+ phys = csi->underrun_buf.phys;
+ csi->active_vb2_buf[csi->buf_num] = NULL;
+ }
+
+ imx7_csi_update_buf(csi, phys, csi->buf_num);
+}
+
+static irqreturn_t imx7_csi_irq_handler(int irq, void *data)
+{
+ struct imx7_csi *csi = data;
+ unsigned long status;
+
+ spin_lock(&csi->irqlock);
+
+ status = imx7_csi_irq_clear(csi);
+
+ if (status & BIT_RFF_OR_INT) {
+ dev_warn(csi->dev, "Rx fifo overflow\n");
+ imx7_csi_error_recovery(csi);
+ }
+
+ if (status & BIT_HRESP_ERR_INT) {
+ dev_warn(csi->dev, "Hresponse error detected\n");
+ imx7_csi_error_recovery(csi);
+ }
+
+ if (status & BIT_ADDR_CH_ERR_INT) {
+ imx7_csi_hw_disable(csi);
+
+ imx7_csi_dma_reflash(csi);
+
+ imx7_csi_hw_enable(csi);
+ }
+
+ if ((status & BIT_DMA_TSF_DONE_FB1) &&
+ (status & BIT_DMA_TSF_DONE_FB2)) {
+ /*
+ * For both FB1 and FB2 interrupter bits set case,
+ * CSI DMA is work in one of FB1 and FB2 buffer,
+ * but software can not know the state.
+ * Skip it to avoid base address updated
+ * when csi work in field0 and field1 will write to
+ * new base address.
+ */
+ } else if (status & BIT_DMA_TSF_DONE_FB1) {
+ csi->buf_num = 0;
+ } else if (status & BIT_DMA_TSF_DONE_FB2) {
+ csi->buf_num = 1;
+ }
+
+ if ((status & BIT_DMA_TSF_DONE_FB1) ||
+ (status & BIT_DMA_TSF_DONE_FB2)) {
+ imx7_csi_vb2_buf_done(csi);
+
+ if (csi->last_eof) {
+ complete(&csi->last_eof_completion);
+ csi->last_eof = false;
+ }
+ }
+
+ spin_unlock(&csi->irqlock);
+
+ return IRQ_HANDLED;
+}
+
+static int imx7_csi_dma_start(struct imx7_csi *csi)
+{
+ struct imx_media_video_dev *vdev = csi->vdev;
+ struct v4l2_pix_format *out_pix = &vdev->fmt.fmt.pix;
+ int ret;
+
+ ret = imx_media_alloc_dma_buf(csi->imxmd, &csi->underrun_buf,
+ out_pix->sizeimage);
+ if (ret < 0) {
+ v4l2_warn(&csi->sd, "consider increasing the CMA area\n");
+ return ret;
+ }
+
+ csi->frame_sequence = 0;
+ csi->last_eof = false;
+ init_completion(&csi->last_eof_completion);
+
+ imx7_csi_setup_vb2_buf(csi);
+
+ return 0;
+}
+
+static void imx7_csi_dma_stop(struct imx7_csi *csi)
+{
+ unsigned long timeout_jiffies;
+ unsigned long flags;
+ int ret;
+
+ /* mark next EOF interrupt as the last before stream off */
+ spin_lock_irqsave(&csi->irqlock, flags);
+ csi->last_eof = true;
+ spin_unlock_irqrestore(&csi->irqlock, flags);
+
+ /*
+ * and then wait for interrupt handler to mark completion.
+ */
+ timeout_jiffies = msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT);
+ ret = wait_for_completion_timeout(&csi->last_eof_completion,
+ timeout_jiffies);
+ if (ret == 0)
+ v4l2_warn(&csi->sd, "wait last EOF timeout\n");
+
+ imx7_csi_hw_disable_irq(csi);
+
+ imx7_csi_dma_unsetup_vb2_buf(csi, VB2_BUF_STATE_ERROR);
+
+ imx_media_free_dma_buf(csi->imxmd, &csi->underrun_buf);
+}
+
+static int imx7_csi_configure(struct imx7_csi *csi)
+{
+ struct imx_media_video_dev *vdev = csi->vdev;
+ struct v4l2_pix_format *out_pix = &vdev->fmt.fmt.pix;
+ __u32 in_code = csi->format_mbus[IMX7_CSI_PAD_SINK].code;
+ u32 cr1, cr18;
+
+ if (out_pix->field == V4L2_FIELD_INTERLACED) {
+ imx7_csi_deinterlace_enable(csi, true);
+ imx7_csi_buf_stride_set(csi, out_pix->width);
+ } else {
+ imx7_csi_deinterlace_enable(csi, false);
+ imx7_csi_buf_stride_set(csi, 0);
+ }
+
+ imx7_csi_set_imagpara(csi, out_pix->width, out_pix->height);
+
+ if (!csi->is_csi2)
+ return 0;
+
+ cr1 = imx7_csi_reg_read(csi, CSI_CSICR1);
+ cr1 &= ~BIT_GCLK_MODE;
+
+ cr18 = imx7_csi_reg_read(csi, CSI_CSICR18);
+ cr18 &= BIT_MIPI_DATA_FORMAT_MASK;
+ cr18 |= BIT_DATA_FROM_MIPI;
+
+ switch (out_pix->pixelformat) {
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_YUYV:
+ cr18 |= BIT_MIPI_DATA_FORMAT_YUV422_8B;
+ break;
+ case V4L2_PIX_FMT_SBGGR8:
+ cr18 |= BIT_MIPI_DATA_FORMAT_RAW8;
+ break;
+ case V4L2_PIX_FMT_SBGGR16:
+ if (in_code == MEDIA_BUS_FMT_SBGGR10_1X10)
+ cr18 |= BIT_MIPI_DATA_FORMAT_RAW10;
+ else if (in_code == MEDIA_BUS_FMT_SBGGR12_1X12)
+ cr18 |= BIT_MIPI_DATA_FORMAT_RAW12;
+ else if (in_code == MEDIA_BUS_FMT_SBGGR14_1X14)
+ cr18 |= BIT_MIPI_DATA_FORMAT_RAW14;
+ cr1 |= BIT_PIXEL_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ imx7_csi_reg_write(csi, cr1, CSI_CSICR1);
+ imx7_csi_reg_write(csi, cr18, CSI_CSICR18);
+
+ return 0;
+}
+
+static int imx7_csi_enable(struct imx7_csi *csi)
+{
+ imx7_csi_sw_reset(csi);
+
+ if (csi->is_csi2) {
+ imx7_csi_dmareq_rff_enable(csi);
+ imx7_csi_hw_enable_irq(csi);
+ imx7_csi_hw_enable(csi);
+ return 0;
+ }
+
+ return 0;
+}
+
+static void imx7_csi_disable(struct imx7_csi *csi)
+{
+ imx7_csi_dmareq_rff_disable(csi);
+
+ imx7_csi_hw_disable_irq(csi);
+
+ imx7_csi_buf_stride_set(csi, 0);
+
+ imx7_csi_hw_disable(csi);
+}
+
+static int imx7_csi_streaming_start(struct imx7_csi *csi)
+{
+ int ret;
+
+ ret = imx7_csi_dma_start(csi);
+ if (ret < 0)
+ return ret;
+
+ ret = imx7_csi_configure(csi);
+ if (ret < 0)
+ goto dma_stop;
+
+ imx7_csi_enable(csi);
+
+ return 0;
+
+dma_stop:
+ imx7_csi_dma_stop(csi);
+
+ return ret;
+}
+
+static int imx7_csi_streaming_stop(struct imx7_csi *csi)
+{
+ imx7_csi_dma_stop(csi);
+
+ imx7_csi_disable(csi);
+
+ return 0;
+}
+
+static int imx7_csi_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct imx7_csi *csi = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ mutex_lock(&csi->lock);
+
+ if (!csi->src_sd || !csi->sink) {
+ ret = -EPIPE;
+ goto out_unlock;
+ }
+
+ if (csi->is_streaming == !!enable)
+ goto out_unlock;
+
+ if (enable) {
+ ret = v4l2_subdev_call(csi->src_sd, video, s_stream, 1);
+ if (ret < 0)
+ goto out_unlock;
+
+ ret = imx7_csi_streaming_start(csi);
+ if (ret < 0) {
+ v4l2_subdev_call(csi->src_sd, video, s_stream, 0);
+ goto out_unlock;
+ }
+ } else {
+ imx7_csi_streaming_stop(csi);
+
+ v4l2_subdev_call(csi->src_sd, video, s_stream, 0);
+ }
+
+ csi->is_streaming = !!enable;
+
+out_unlock:
+ mutex_unlock(&csi->lock);
+
+ return ret;
+}
+
+static struct v4l2_mbus_framefmt *
+imx7_csi_get_format(struct imx7_csi *csi,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&csi->sd, cfg, pad);
+
+ return &csi->format_mbus[pad];
+}
+
+static int imx7_csi_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct imx7_csi *csi = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *in_fmt;
+ int ret = 0;
+
+ mutex_lock(&csi->lock);
+
+ in_fmt = imx7_csi_get_format(csi, cfg, IMX7_CSI_PAD_SINK, code->which);
+
+ switch (code->pad) {
+ case IMX7_CSI_PAD_SINK:
+ ret = imx_media_enum_mbus_format(&code->code, code->index,
+ CS_SEL_ANY, true);
+ break;
+ case IMX7_CSI_PAD_SRC:
+ if (code->index != 0) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ code->code = in_fmt->code;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+out_unlock:
+ mutex_unlock(&csi->lock);
+
+ return ret;
+}
+
+static int imx7_csi_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct imx7_csi *csi = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *fmt;
+ int ret = 0;
+
+ mutex_lock(&csi->lock);
+
+ fmt = imx7_csi_get_format(csi, cfg, sdformat->pad, sdformat->which);
+ if (!fmt) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ sdformat->format = *fmt;
+
+out_unlock:
+ mutex_unlock(&csi->lock);
+
+ return ret;
+}
+
+static int imx7_csi_try_fmt(struct imx7_csi *csi,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *sdformat,
+ const struct imx_media_pixfmt **cc)
+{
+ const struct imx_media_pixfmt *in_cc;
+ struct v4l2_mbus_framefmt *in_fmt;
+ u32 code;
+
+ in_fmt = imx7_csi_get_format(csi, cfg, IMX7_CSI_PAD_SINK,
+ sdformat->which);
+ if (!in_fmt)
+ return -EINVAL;
+
+ switch (sdformat->pad) {
+ case IMX7_CSI_PAD_SRC:
+ in_cc = imx_media_find_mbus_format(in_fmt->code, CS_SEL_ANY,
+ true);
+
+ sdformat->format.width = in_fmt->width;
+ sdformat->format.height = in_fmt->height;
+ sdformat->format.code = in_fmt->code;
+ *cc = in_cc;
+
+ sdformat->format.colorspace = in_fmt->colorspace;
+ sdformat->format.xfer_func = in_fmt->xfer_func;
+ sdformat->format.quantization = in_fmt->quantization;
+ sdformat->format.ycbcr_enc = in_fmt->ycbcr_enc;
+ break;
+ case IMX7_CSI_PAD_SINK:
+ *cc = imx_media_find_mbus_format(sdformat->format.code,
+ CS_SEL_ANY, true);
+ if (!*cc) {
+ imx_media_enum_mbus_format(&code, 0, CS_SEL_ANY, false);
+ *cc = imx_media_find_mbus_format(code, CS_SEL_ANY,
+ false);
+ sdformat->format.code = (*cc)->codes[0];
+ }
+
+ imx_media_fill_default_mbus_fields(&sdformat->format, in_fmt,
+ false);
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ return 0;
+}
+
+static int imx7_csi_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct imx7_csi *csi = v4l2_get_subdevdata(sd);
+ struct imx_media_video_dev *vdev = csi->vdev;
+ const struct imx_media_pixfmt *outcc;
+ struct v4l2_mbus_framefmt *outfmt;
+ struct v4l2_pix_format vdev_fmt;
+ struct v4l2_rect vdev_compose;
+ const struct imx_media_pixfmt *cc;
+ struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_subdev_format format;
+ int ret = 0;
+
+ if (sdformat->pad >= IMX7_CSI_PADS_NUM)
+ return -EINVAL;
+
+ mutex_lock(&csi->lock);
+
+ if (csi->is_streaming) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ imx7_csi_try_fmt(csi, cfg, sdformat, &cc);
+
+ fmt = imx7_csi_get_format(csi, cfg, sdformat->pad, sdformat->which);
+ if (!fmt) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ *fmt = sdformat->format;
+
+ if (sdformat->pad == IMX7_CSI_PAD_SINK) {
+ /* propagate format to source pads */
+ format.pad = IMX7_CSI_PAD_SRC;
+ format.which = sdformat->which;
+ format.format = sdformat->format;
+ if (imx7_csi_try_fmt(csi, cfg, &format, &outcc)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ outfmt = imx7_csi_get_format(csi, cfg, IMX7_CSI_PAD_SRC,
+ sdformat->which);
+ *outfmt = format.format;
+
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ csi->cc[IMX7_CSI_PAD_SRC] = outcc;
+ }
+
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_TRY)
+ goto out_unlock;
+
+ csi->cc[sdformat->pad] = cc;
+
+ /* propagate output pad format to capture device */
+ imx_media_mbus_fmt_to_pix_fmt(&vdev_fmt, &vdev_compose,
+ &csi->format_mbus[IMX7_CSI_PAD_SRC],
+ csi->cc[IMX7_CSI_PAD_SRC]);
+ mutex_unlock(&csi->lock);
+ imx_media_capture_device_set_format(vdev, &vdev_fmt, &vdev_compose);
+
+ return 0;
+
+out_unlock:
+ mutex_unlock(&csi->lock);
+
+ return ret;
+}
+
+static int imx7_csi_registered(struct v4l2_subdev *sd)
+{
+ struct imx7_csi *csi = v4l2_get_subdevdata(sd);
+ int ret;
+ int i;
+
+ for (i = 0; i < IMX7_CSI_PADS_NUM; i++) {
+ csi->pad[i].flags = (i == IMX7_CSI_PAD_SINK) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+
+ /* set a default mbus format */
+ ret = imx_media_init_mbus_fmt(&csi->format_mbus[i],
+ 800, 600, 0, V4L2_FIELD_NONE,
+ &csi->cc[i]);
+ if (ret < 0)
+ return ret;
+
+ /* init default frame interval */
+ csi->frame_interval[i].numerator = 1;
+ csi->frame_interval[i].denominator = 30;
+ }
+
+ ret = media_entity_pads_init(&sd->entity, IMX7_CSI_PADS_NUM, csi->pad);
+ if (ret < 0)
+ return ret;
+
+ ret = imx_media_capture_device_register(csi->vdev);
+ if (ret < 0)
+ return ret;
+
+ ret = imx_media_add_video_device(csi->imxmd, csi->vdev);
+ if (ret < 0) {
+ imx_media_capture_device_unregister(csi->vdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void imx7_csi_unregistered(struct v4l2_subdev *sd)
+{
+ struct imx7_csi *csi = v4l2_get_subdevdata(sd);
+
+ imx_media_capture_device_unregister(csi->vdev);
+}
+
+static int imx7_csi_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg)
+{
+ struct imx7_csi *csi = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf;
+ int ret;
+ int i;
+
+ for (i = 0; i < IMX7_CSI_PADS_NUM; i++) {
+ mf = v4l2_subdev_get_try_format(sd, cfg, i);
+
+ ret = imx_media_init_mbus_fmt(mf, 800, 600, 0, V4L2_FIELD_NONE,
+ &csi->cc[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct media_entity_operations imx7_csi_entity_ops = {
+ .link_setup = imx7_csi_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_video_ops imx7_csi_video_ops = {
+ .s_stream = imx7_csi_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops imx7_csi_pad_ops = {
+ .init_cfg = imx7_csi_init_cfg,
+ .enum_mbus_code = imx7_csi_enum_mbus_code,
+ .get_fmt = imx7_csi_get_fmt,
+ .set_fmt = imx7_csi_set_fmt,
+ .link_validate = imx7_csi_pad_link_validate,
+};
+
+static const struct v4l2_subdev_ops imx7_csi_subdev_ops = {
+ .video = &imx7_csi_video_ops,
+ .pad = &imx7_csi_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops imx7_csi_internal_ops = {
+ .registered = imx7_csi_registered,
+ .unregistered = imx7_csi_unregistered,
+};
+
+static int imx7_csi_parse_endpoint(struct device *dev,
+ struct v4l2_fwnode_endpoint *vep,
+ struct v4l2_async_subdev *asd)
+{
+ return fwnode_device_is_available(asd->match.fwnode) ? 0 : -EINVAL;
+}
+
+static int imx7_csi_clocks_get(struct imx7_csi *csi)
+{
+ struct device *dev = csi->dev;
+ int i;
+
+ csi->num_clks = ARRAY_SIZE(imx7_csi_clk_id);
+ csi->clks = devm_kcalloc(dev, csi->num_clks, sizeof(*csi->clks),
+ GFP_KERNEL);
+
+ if (!csi->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < csi->num_clks; i++)
+ csi->clks[i].id = imx7_csi_clk_id[i];
+
+ return devm_clk_bulk_get(dev, csi->num_clks, csi->clks);
+}
+
+static int imx7_csi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct imx_media_dev *imxmd;
+ struct imx7_csi *csi;
+ struct resource *res;
+ int ret;
+
+ csi = devm_kzalloc(&pdev->dev, sizeof(*csi), GFP_KERNEL);
+ if (!csi)
+ return -ENOMEM;
+
+ csi->dev = dev;
+
+ ret = imx7_csi_clocks_get(csi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get clocks");
+ return -ENODEV;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ csi->irq = platform_get_irq(pdev, 0);
+ if (!res || csi->irq < 0) {
+ dev_err(dev, "Missing platform resources data\n");
+ return -ENODEV;
+ }
+
+ csi->regbase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(csi->regbase)) {
+ dev_err(dev, "Failed platform resources map\n");
+ return -ENODEV;
+ }
+
+ spin_lock_init(&csi->irqlock);
+ mutex_init(&csi->lock);
+
+ /* install interrupt handler */
+ ret = devm_request_irq(dev, csi->irq, imx7_csi_irq_handler, 0, "csi",
+ (void *)csi);
+ if (ret < 0) {
+ dev_err(dev, "Request CSI IRQ failed.\n");
+ ret = -ENODEV;
+ goto destroy_mutex;
+ }
+
+ /* add media device */
+ imxmd = imx_media_dev_init(dev);
+ if (IS_ERR(imxmd)) {
+ ret = PTR_ERR(imxmd);
+ goto destroy_mutex;
+ }
+ platform_set_drvdata(pdev, &csi->sd);
+
+ ret = imx_media_of_add_csi(imxmd, node);
+ if (ret < 0)
+ goto cleanup;
+
+ ret = imx_media_dev_notifier_register(imxmd);
+ if (ret < 0)
+ goto cleanup;
+
+ csi->imxmd = imxmd;
+ v4l2_subdev_init(&csi->sd, &imx7_csi_subdev_ops);
+ v4l2_set_subdevdata(&csi->sd, csi);
+ csi->sd.internal_ops = &imx7_csi_internal_ops;
+ csi->sd.entity.ops = &imx7_csi_entity_ops;
+ csi->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ csi->sd.dev = &pdev->dev;
+ csi->sd.owner = THIS_MODULE;
+ csi->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+ csi->sd.grp_id = IMX_MEDIA_GRP_ID_CSI;
+ snprintf(csi->sd.name, sizeof(csi->sd.name), "csi");
+
+ csi->vdev = imx_media_capture_device_init(&csi->sd, IMX7_CSI_PAD_SRC);
+ if (IS_ERR(csi->vdev))
+ return PTR_ERR(csi->vdev);
+
+ v4l2_ctrl_handler_init(&csi->ctrl_hdlr, 0);
+ csi->sd.ctrl_handler = &csi->ctrl_hdlr;
+
+ ret = v4l2_async_register_fwnode_subdev(&csi->sd,
+ sizeof(struct v4l2_async_subdev),
+ NULL, 0,
+ imx7_csi_parse_endpoint);
+ if (ret)
+ goto free;
+
+ return 0;
+
+free:
+ imx_media_capture_device_unregister(csi->vdev);
+ imx_media_capture_device_remove(csi->vdev);
+ v4l2_ctrl_handler_free(&csi->ctrl_hdlr);
+
+cleanup:
+ v4l2_async_notifier_cleanup(&imxmd->notifier);
+ v4l2_device_unregister(&imxmd->v4l2_dev);
+ media_device_unregister(&imxmd->md);
+ media_device_cleanup(&imxmd->md);
+
+destroy_mutex:
+ mutex_destroy(&csi->lock);
+
+ return ret;
+}
+
+static int imx7_csi_remove(struct platform_device *pdev)
+{
+ struct v4l2_subdev *sd = platform_get_drvdata(pdev);
+ struct imx7_csi *csi = v4l2_get_subdevdata(sd);
+ struct imx_media_dev *imxmd = csi->imxmd;
+
+ v4l2_async_notifier_unregister(&imxmd->notifier);
+ v4l2_async_notifier_cleanup(&imxmd->notifier);
+
+ media_device_unregister(&imxmd->md);
+ v4l2_device_unregister(&imxmd->v4l2_dev);
+ media_device_cleanup(&imxmd->md);
+
+ imx_media_capture_device_unregister(csi->vdev);
+ imx_media_capture_device_remove(csi->vdev);
+
+ v4l2_async_unregister_subdev(sd);
+ v4l2_ctrl_handler_free(&csi->ctrl_hdlr);
+
+ mutex_destroy(&csi->lock);
+
+ return 0;
+}
+
+static const struct of_device_id imx7_csi_of_match[] = {
+ { .compatible = "fsl,imx7-csi" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, imx7_csi_of_match);
+
+static struct platform_driver imx7_csi_driver = {
+ .probe = imx7_csi_probe,
+ .remove = imx7_csi_remove,
+ .driver = {
+ .of_match_table = imx7_csi_of_match,
+ .name = "imx7-csi",
+ },
+};
+module_platform_driver(imx7_csi_driver);
+
+MODULE_DESCRIPTION("i.MX7 CSI subdev driver");
+MODULE_AUTHOR("Rui Miguel Silva <rui.silva@linaro.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:imx7-csi");
diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c
new file mode 100644
index 000000000000..2ddcc42ab8ff
--- /dev/null
+++ b/drivers/staging/media/imx/imx7-mipi-csis.c
@@ -0,0 +1,1160 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Freescale i.MX7 SoC series MIPI-CSI V3.3 receiver driver
+ *
+ * Copyright (C) 2019 Linaro Ltd
+ * Copyright (C) 2015-2016 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright (C) 2011 - 2013 Samsung Electronics Co., Ltd.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spinlock.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+#include "imx-media.h"
+
+#define CSIS_DRIVER_NAME "imx7-mipi-csis"
+#define CSIS_SUBDEV_NAME CSIS_DRIVER_NAME
+
+#define CSIS_PAD_SINK 0
+#define CSIS_PAD_SOURCE 1
+#define CSIS_PADS_NUM 2
+
+#define MIPI_CSIS_DEF_PIX_WIDTH 640
+#define MIPI_CSIS_DEF_PIX_HEIGHT 480
+
+/* Register map definition */
+
+/* CSIS common control */
+#define MIPI_CSIS_CMN_CTRL 0x04
+#define MIPI_CSIS_CMN_CTRL_UPDATE_SHADOW BIT(16)
+#define MIPI_CSIS_CMN_CTRL_INTER_MODE BIT(10)
+#define MIPI_CSIS_CMN_CTRL_UPDATE_SHADOW_CTRL BIT(2)
+#define MIPI_CSIS_CMN_CTRL_RESET BIT(1)
+#define MIPI_CSIS_CMN_CTRL_ENABLE BIT(0)
+
+#define MIPI_CSIS_CMN_CTRL_LANE_NR_OFFSET 8
+#define MIPI_CSIS_CMN_CTRL_LANE_NR_MASK (3 << 8)
+
+/* CSIS clock control */
+#define MIPI_CSIS_CLK_CTRL 0x08
+#define MIPI_CSIS_CLK_CTRL_CLKGATE_TRAIL_CH3(x) ((x) << 28)
+#define MIPI_CSIS_CLK_CTRL_CLKGATE_TRAIL_CH2(x) ((x) << 24)
+#define MIPI_CSIS_CLK_CTRL_CLKGATE_TRAIL_CH1(x) ((x) << 20)
+#define MIPI_CSIS_CLK_CTRL_CLKGATE_TRAIL_CH0(x) ((x) << 16)
+#define MIPI_CSIS_CLK_CTRL_CLKGATE_EN_MSK (0xf << 4)
+#define MIPI_CSIS_CLK_CTRL_WCLK_SRC BIT(0)
+
+/* CSIS Interrupt mask */
+#define MIPI_CSIS_INTMSK 0x10
+#define MIPI_CSIS_INTMSK_EVEN_BEFORE BIT(31)
+#define MIPI_CSIS_INTMSK_EVEN_AFTER BIT(30)
+#define MIPI_CSIS_INTMSK_ODD_BEFORE BIT(29)
+#define MIPI_CSIS_INTMSK_ODD_AFTER BIT(28)
+#define MIPI_CSIS_INTMSK_FRAME_START BIT(24)
+#define MIPI_CSIS_INTMSK_FRAME_END BIT(20)
+#define MIPI_CSIS_INTMSK_ERR_SOT_HS BIT(16)
+#define MIPI_CSIS_INTMSK_ERR_LOST_FS BIT(12)
+#define MIPI_CSIS_INTMSK_ERR_LOST_FE BIT(8)
+#define MIPI_CSIS_INTMSK_ERR_OVER BIT(4)
+#define MIPI_CSIS_INTMSK_ERR_WRONG_CFG BIT(3)
+#define MIPI_CSIS_INTMSK_ERR_ECC BIT(2)
+#define MIPI_CSIS_INTMSK_ERR_CRC BIT(1)
+#define MIPI_CSIS_INTMSK_ERR_UNKNOWN BIT(0)
+
+/* CSIS Interrupt source */
+#define MIPI_CSIS_INTSRC 0x14
+#define MIPI_CSIS_INTSRC_EVEN_BEFORE BIT(31)
+#define MIPI_CSIS_INTSRC_EVEN_AFTER BIT(30)
+#define MIPI_CSIS_INTSRC_EVEN BIT(30)
+#define MIPI_CSIS_INTSRC_ODD_BEFORE BIT(29)
+#define MIPI_CSIS_INTSRC_ODD_AFTER BIT(28)
+#define MIPI_CSIS_INTSRC_ODD (0x3 << 28)
+#define MIPI_CSIS_INTSRC_NON_IMAGE_DATA (0xf << 28)
+#define MIPI_CSIS_INTSRC_FRAME_START BIT(24)
+#define MIPI_CSIS_INTSRC_FRAME_END BIT(20)
+#define MIPI_CSIS_INTSRC_ERR_SOT_HS BIT(16)
+#define MIPI_CSIS_INTSRC_ERR_LOST_FS BIT(12)
+#define MIPI_CSIS_INTSRC_ERR_LOST_FE BIT(8)
+#define MIPI_CSIS_INTSRC_ERR_OVER BIT(4)
+#define MIPI_CSIS_INTSRC_ERR_WRONG_CFG BIT(3)
+#define MIPI_CSIS_INTSRC_ERR_ECC BIT(2)
+#define MIPI_CSIS_INTSRC_ERR_CRC BIT(1)
+#define MIPI_CSIS_INTSRC_ERR_UNKNOWN BIT(0)
+#define MIPI_CSIS_INTSRC_ERRORS 0xfffff
+
+/* D-PHY status control */
+#define MIPI_CSIS_DPHYSTATUS 0x20
+#define MIPI_CSIS_DPHYSTATUS_ULPS_DAT BIT(8)
+#define MIPI_CSIS_DPHYSTATUS_STOPSTATE_DAT BIT(4)
+#define MIPI_CSIS_DPHYSTATUS_ULPS_CLK BIT(1)
+#define MIPI_CSIS_DPHYSTATUS_STOPSTATE_CLK BIT(0)
+
+/* D-PHY common control */
+#define MIPI_CSIS_DPHYCTRL 0x24
+#define MIPI_CSIS_DPHYCTRL_HSS_MASK (0xff << 24)
+#define MIPI_CSIS_DPHYCTRL_HSS_OFFSET 24
+#define MIPI_CSIS_DPHYCTRL_SCLKS_MASK (0x3 << 22)
+#define MIPI_CSIS_DPHYCTRL_SCLKS_OFFSET 22
+#define MIPI_CSIS_DPHYCTRL_DPDN_SWAP_CLK BIT(6)
+#define MIPI_CSIS_DPHYCTRL_DPDN_SWAP_DAT BIT(5)
+#define MIPI_CSIS_DPHYCTRL_ENABLE_DAT BIT(1)
+#define MIPI_CSIS_DPHYCTRL_ENABLE_CLK BIT(0)
+#define MIPI_CSIS_DPHYCTRL_ENABLE (0x1f << 0)
+
+/* D-PHY Master and Slave Control register Low */
+#define MIPI_CSIS_DPHYBCTRL_L 0x30
+/* D-PHY Master and Slave Control register High */
+#define MIPI_CSIS_DPHYBCTRL_H 0x34
+/* D-PHY Slave Control register Low */
+#define MIPI_CSIS_DPHYSCTRL_L 0x38
+/* D-PHY Slave Control register High */
+#define MIPI_CSIS_DPHYSCTRL_H 0x3c
+
+/* ISP Configuration register */
+#define MIPI_CSIS_ISPCONFIG_CH0 0x40
+#define MIPI_CSIS_ISPCONFIG_CH1 0x50
+#define MIPI_CSIS_ISPCONFIG_CH2 0x60
+#define MIPI_CSIS_ISPCONFIG_CH3 0x70
+
+#define MIPI_CSIS_ISPCFG_MEM_FULL_GAP_MSK (0xff << 24)
+#define MIPI_CSIS_ISPCFG_MEM_FULL_GAP(x) ((x) << 24)
+#define MIPI_CSIS_ISPCFG_DOUBLE_CMPNT BIT(12)
+#define MIPI_CSIS_ISPCFG_ALIGN_32BIT BIT(11)
+#define MIPI_CSIS_ISPCFG_FMT_YCBCR422_8BIT (0x1e << 2)
+#define MIPI_CSIS_ISPCFG_FMT_RAW8 (0x2a << 2)
+#define MIPI_CSIS_ISPCFG_FMT_RAW10 (0x2b << 2)
+#define MIPI_CSIS_ISPCFG_FMT_RAW12 (0x2c << 2)
+
+/* User defined formats, x = 1...4 */
+#define MIPI_CSIS_ISPCFG_FMT_USER(x) ((0x30 + (x) - 1) << 2)
+#define MIPI_CSIS_ISPCFG_FMT_MASK (0x3f << 2)
+
+/* ISP Image Resolution register */
+#define MIPI_CSIS_ISPRESOL_CH0 0x44
+#define MIPI_CSIS_ISPRESOL_CH1 0x54
+#define MIPI_CSIS_ISPRESOL_CH2 0x64
+#define MIPI_CSIS_ISPRESOL_CH3 0x74
+#define CSIS_MAX_PIX_WIDTH 0xffff
+#define CSIS_MAX_PIX_HEIGHT 0xffff
+
+/* ISP SYNC register */
+#define MIPI_CSIS_ISPSYNC_CH0 0x48
+#define MIPI_CSIS_ISPSYNC_CH1 0x58
+#define MIPI_CSIS_ISPSYNC_CH2 0x68
+#define MIPI_CSIS_ISPSYNC_CH3 0x78
+
+#define MIPI_CSIS_ISPSYNC_HSYNC_LINTV_OFFSET 18
+#define MIPI_CSIS_ISPSYNC_VSYNC_SINTV_OFFSET 12
+#define MIPI_CSIS_ISPSYNC_VSYNC_EINTV_OFFSET 0
+
+/* Non-image packet data buffers */
+#define MIPI_CSIS_PKTDATA_ODD 0x2000
+#define MIPI_CSIS_PKTDATA_EVEN 0x3000
+#define MIPI_CSIS_PKTDATA_SIZE SZ_4K
+
+#define DEFAULT_SCLK_CSIS_FREQ 166000000UL
+
+enum {
+ ST_POWERED = 1,
+ ST_STREAMING = 2,
+ ST_SUSPENDED = 4,
+};
+
+struct mipi_csis_event {
+ u32 mask;
+ const char * const name;
+ unsigned int counter;
+};
+
+static const struct mipi_csis_event mipi_csis_events[] = {
+ /* Errors */
+ { MIPI_CSIS_INTSRC_ERR_SOT_HS, "SOT Error" },
+ { MIPI_CSIS_INTSRC_ERR_LOST_FS, "Lost Frame Start Error" },
+ { MIPI_CSIS_INTSRC_ERR_LOST_FE, "Lost Frame End Error" },
+ { MIPI_CSIS_INTSRC_ERR_OVER, "FIFO Overflow Error" },
+ { MIPI_CSIS_INTSRC_ERR_WRONG_CFG, "Wrong Configuration Error" },
+ { MIPI_CSIS_INTSRC_ERR_ECC, "ECC Error" },
+ { MIPI_CSIS_INTSRC_ERR_CRC, "CRC Error" },
+ { MIPI_CSIS_INTSRC_ERR_UNKNOWN, "Unknown Error" },
+ /* Non-image data receive events */
+ { MIPI_CSIS_INTSRC_EVEN_BEFORE, "Non-image data before even frame" },
+ { MIPI_CSIS_INTSRC_EVEN_AFTER, "Non-image data after even frame" },
+ { MIPI_CSIS_INTSRC_ODD_BEFORE, "Non-image data before odd frame" },
+ { MIPI_CSIS_INTSRC_ODD_AFTER, "Non-image data after odd frame" },
+ /* Frame start/end */
+ { MIPI_CSIS_INTSRC_FRAME_START, "Frame Start" },
+ { MIPI_CSIS_INTSRC_FRAME_END, "Frame End" },
+};
+
+#define MIPI_CSIS_NUM_EVENTS ARRAY_SIZE(mipi_csis_events)
+
+static const char * const mipi_csis_clk_id[] = {"pclk", "wrap", "phy"};
+
+struct csis_hw_reset {
+ struct regmap *src;
+ u8 req_src;
+ u8 rst_bit;
+};
+
+struct csi_state {
+ /* lock elements below */
+ struct mutex lock;
+ /* lock for event handler */
+ spinlock_t slock;
+ struct device *dev;
+ struct media_pad pads[CSIS_PADS_NUM];
+ struct v4l2_subdev mipi_sd;
+ struct v4l2_subdev *src_sd;
+
+ u8 index;
+ struct platform_device *pdev;
+ struct phy *phy;
+ void __iomem *regs;
+ struct clk *wrap_clk;
+ int irq;
+ u32 flags;
+
+ struct dentry *debugfs_root;
+ bool debug;
+
+ int num_clks;
+ struct clk_bulk_data *clks;
+
+ u32 clk_frequency;
+ u32 hs_settle;
+
+ struct reset_control *mrst;
+
+ const struct csis_pix_format *csis_fmt;
+ struct v4l2_mbus_framefmt format_mbus;
+
+ struct v4l2_fwnode_bus_mipi_csi2 bus;
+
+ struct mipi_csis_event events[MIPI_CSIS_NUM_EVENTS];
+
+ struct v4l2_async_notifier subdev_notifier;
+
+ struct csis_hw_reset hw_reset;
+ struct regulator *mipi_phy_regulator;
+ bool sink_linked;
+};
+
+struct csis_pix_format {
+ unsigned int pix_width_alignment;
+ u32 code;
+ u32 fmt_reg;
+ u8 data_alignment;
+};
+
+static const struct csis_pix_format mipi_csis_formats[] = {
+ {
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW10,
+ .data_alignment = 16,
+ }, {
+ .code = MEDIA_BUS_FMT_VYUY8_2X8,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_YCBCR422_8BIT,
+ .data_alignment = 16,
+ }, {
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW8,
+ .data_alignment = 8,
+ }, {
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_YCBCR422_8BIT,
+ .data_alignment = 16,
+ }
+};
+
+#define mipi_csis_write(__csis, __r, __v) writel(__v, (__csis)->regs + (__r))
+#define mipi_csis_read(__csis, __r) readl((__csis)->regs + (__r))
+
+static int mipi_csis_dump_regs(struct csi_state *state)
+{
+ struct device *dev = &state->pdev->dev;
+ unsigned int i;
+ u32 cfg;
+ struct {
+ u32 offset;
+ const char * const name;
+ } registers[] = {
+ { 0x04, "CTRL" },
+ { 0x24, "DPHYCTRL" },
+ { 0x08, "CLKCTRL" },
+ { 0x20, "DPHYSTS" },
+ { 0x10, "INTMSK" },
+ { 0x40, "CONFIG_CH0" },
+ { 0xC0, "DBG_CONFIG" },
+ { 0x38, "DPHYSLAVE_L" },
+ { 0x3C, "DPHYSLAVE_H" },
+ };
+
+ dev_info(dev, "--- REGISTERS ---\n");
+
+ for (i = 0; i < ARRAY_SIZE(registers); i++) {
+ cfg = mipi_csis_read(state, registers[i].offset);
+ dev_info(dev, "%12s: 0x%08x\n", registers[i].name, cfg);
+ }
+
+ return 0;
+}
+
+static struct csi_state *mipi_sd_to_csis_state(struct v4l2_subdev *sdev)
+{
+ return container_of(sdev, struct csi_state, mipi_sd);
+}
+
+static const struct csis_pix_format *find_csis_format(u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(mipi_csis_formats); i++)
+ if (code == mipi_csis_formats[i].code)
+ return &mipi_csis_formats[i];
+ return NULL;
+}
+
+static void mipi_csis_enable_interrupts(struct csi_state *state, bool on)
+{
+ mipi_csis_write(state, MIPI_CSIS_INTMSK, on ? 0xffffffff : 0);
+}
+
+static void mipi_csis_sw_reset(struct csi_state *state)
+{
+ u32 val = mipi_csis_read(state, MIPI_CSIS_CMN_CTRL);
+
+ mipi_csis_write(state, MIPI_CSIS_CMN_CTRL,
+ val | MIPI_CSIS_CMN_CTRL_RESET);
+ usleep_range(10, 20);
+}
+
+static int mipi_csis_phy_init(struct csi_state *state)
+{
+ state->mipi_phy_regulator = devm_regulator_get(state->dev, "phy");
+
+ return regulator_set_voltage(state->mipi_phy_regulator, 1000000,
+ 1000000);
+}
+
+static void mipi_csis_phy_reset(struct csi_state *state)
+{
+ reset_control_assert(state->mrst);
+
+ msleep(20);
+
+ reset_control_deassert(state->mrst);
+}
+
+static void mipi_csis_system_enable(struct csi_state *state, int on)
+{
+ u32 val, mask;
+
+ val = mipi_csis_read(state, MIPI_CSIS_CMN_CTRL);
+ if (on)
+ val |= MIPI_CSIS_CMN_CTRL_ENABLE;
+ else
+ val &= ~MIPI_CSIS_CMN_CTRL_ENABLE;
+ mipi_csis_write(state, MIPI_CSIS_CMN_CTRL, val);
+
+ val = mipi_csis_read(state, MIPI_CSIS_DPHYCTRL);
+ val &= ~MIPI_CSIS_DPHYCTRL_ENABLE;
+ if (on) {
+ mask = (1 << (state->bus.num_data_lanes + 1)) - 1;
+ val |= (mask & MIPI_CSIS_DPHYCTRL_ENABLE);
+ }
+ mipi_csis_write(state, MIPI_CSIS_DPHYCTRL, val);
+}
+
+/* Called with the state.lock mutex held */
+static void __mipi_csis_set_format(struct csi_state *state)
+{
+ struct v4l2_mbus_framefmt *mf = &state->format_mbus;
+ u32 val;
+
+ /* Color format */
+ val = mipi_csis_read(state, MIPI_CSIS_ISPCONFIG_CH0);
+ val = (val & ~MIPI_CSIS_ISPCFG_FMT_MASK) | state->csis_fmt->fmt_reg;
+ mipi_csis_write(state, MIPI_CSIS_ISPCONFIG_CH0, val);
+
+ /* Pixel resolution */
+ val = mf->width | (mf->height << 16);
+ mipi_csis_write(state, MIPI_CSIS_ISPRESOL_CH0, val);
+}
+
+static void mipi_csis_set_hsync_settle(struct csi_state *state, int hs_settle)
+{
+ u32 val = mipi_csis_read(state, MIPI_CSIS_DPHYCTRL);
+
+ val = ((val & ~MIPI_CSIS_DPHYCTRL_HSS_MASK) | (hs_settle << 24));
+
+ mipi_csis_write(state, MIPI_CSIS_DPHYCTRL, val);
+}
+
+static void mipi_csis_set_params(struct csi_state *state)
+{
+ int lanes = state->bus.num_data_lanes;
+ u32 val;
+
+ val = mipi_csis_read(state, MIPI_CSIS_CMN_CTRL);
+ val &= ~MIPI_CSIS_CMN_CTRL_LANE_NR_MASK;
+ val |= (lanes - 1) << MIPI_CSIS_CMN_CTRL_LANE_NR_OFFSET;
+ mipi_csis_write(state, MIPI_CSIS_CMN_CTRL, val);
+
+ __mipi_csis_set_format(state);
+
+ mipi_csis_set_hsync_settle(state, state->hs_settle);
+
+ val = mipi_csis_read(state, MIPI_CSIS_ISPCONFIG_CH0);
+ if (state->csis_fmt->data_alignment == 32)
+ val |= MIPI_CSIS_ISPCFG_ALIGN_32BIT;
+ else
+ val &= ~MIPI_CSIS_ISPCFG_ALIGN_32BIT;
+ mipi_csis_write(state, MIPI_CSIS_ISPCONFIG_CH0, val);
+
+ val = (0 << MIPI_CSIS_ISPSYNC_HSYNC_LINTV_OFFSET) |
+ (0 << MIPI_CSIS_ISPSYNC_VSYNC_SINTV_OFFSET) |
+ (0 << MIPI_CSIS_ISPSYNC_VSYNC_EINTV_OFFSET);
+ mipi_csis_write(state, MIPI_CSIS_ISPSYNC_CH0, val);
+
+ val = mipi_csis_read(state, MIPI_CSIS_CLK_CTRL);
+ val &= ~MIPI_CSIS_CLK_CTRL_WCLK_SRC;
+ if (state->wrap_clk)
+ val |= MIPI_CSIS_CLK_CTRL_WCLK_SRC;
+ else
+ val &= ~MIPI_CSIS_CLK_CTRL_WCLK_SRC;
+
+ val |= MIPI_CSIS_CLK_CTRL_CLKGATE_TRAIL_CH0(15);
+ val &= ~MIPI_CSIS_CLK_CTRL_CLKGATE_EN_MSK;
+ mipi_csis_write(state, MIPI_CSIS_CLK_CTRL, val);
+
+ mipi_csis_write(state, MIPI_CSIS_DPHYBCTRL_L, 0x1f4);
+ mipi_csis_write(state, MIPI_CSIS_DPHYBCTRL_H, 0);
+
+ /* Update the shadow register. */
+ val = mipi_csis_read(state, MIPI_CSIS_CMN_CTRL);
+ mipi_csis_write(state, MIPI_CSIS_CMN_CTRL,
+ val | MIPI_CSIS_CMN_CTRL_UPDATE_SHADOW |
+ MIPI_CSIS_CMN_CTRL_UPDATE_SHADOW_CTRL);
+}
+
+static void mipi_csis_clk_enable(struct csi_state *state)
+{
+ int ret;
+
+ ret = clk_bulk_prepare_enable(state->num_clks, state->clks);
+ if (ret < 0)
+ dev_err(state->dev, "failed to enable clocks\n");
+}
+
+static void mipi_csis_clk_disable(struct csi_state *state)
+{
+ clk_bulk_disable_unprepare(state->num_clks, state->clks);
+}
+
+static int mipi_csis_clk_get(struct csi_state *state)
+{
+ struct device *dev = &state->pdev->dev;
+ unsigned int i;
+ int ret;
+
+ state->num_clks = ARRAY_SIZE(mipi_csis_clk_id);
+ state->clks = devm_kcalloc(dev, state->num_clks, sizeof(*state->clks),
+ GFP_KERNEL);
+
+ if (!state->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < state->num_clks; i++)
+ state->clks[i].id = mipi_csis_clk_id[i];
+
+ ret = devm_clk_bulk_get(dev, state->num_clks, state->clks);
+ if (ret < 0)
+ return ret;
+
+ state->wrap_clk = devm_clk_get(dev, "wrap");
+ if (IS_ERR(state->wrap_clk))
+ return IS_ERR(state->wrap_clk);
+
+ /* Set clock rate */
+ ret = clk_set_rate(state->wrap_clk, state->clk_frequency);
+ if (ret < 0)
+ dev_err(dev, "set rate=%d failed: %d\n", state->clk_frequency,
+ ret);
+
+ return ret;
+}
+
+static void mipi_csis_start_stream(struct csi_state *state)
+{
+ mipi_csis_sw_reset(state);
+ mipi_csis_set_params(state);
+ mipi_csis_system_enable(state, true);
+ mipi_csis_enable_interrupts(state, true);
+}
+
+static void mipi_csis_stop_stream(struct csi_state *state)
+{
+ mipi_csis_enable_interrupts(state, false);
+ mipi_csis_system_enable(state, false);
+}
+
+static void mipi_csis_clear_counters(struct csi_state *state)
+{
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&state->slock, flags);
+ for (i = 0; i < MIPI_CSIS_NUM_EVENTS; i++)
+ state->events[i].counter = 0;
+ spin_unlock_irqrestore(&state->slock, flags);
+}
+
+static void mipi_csis_log_counters(struct csi_state *state, bool non_errors)
+{
+ int i = non_errors ? MIPI_CSIS_NUM_EVENTS : MIPI_CSIS_NUM_EVENTS - 4;
+ struct device *dev = &state->pdev->dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state->slock, flags);
+
+ for (i--; i >= 0; i--) {
+ if (state->events[i].counter > 0 || state->debug)
+ dev_info(dev, "%s events: %d\n", state->events[i].name,
+ state->events[i].counter);
+ }
+ spin_unlock_irqrestore(&state->slock, flags);
+}
+
+/*
+ * V4L2 subdev operations
+ */
+static int mipi_csis_s_stream(struct v4l2_subdev *mipi_sd, int enable)
+{
+ struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
+ int ret = 0;
+
+ if (enable) {
+ mipi_csis_clear_counters(state);
+ ret = pm_runtime_get_sync(&state->pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&state->pdev->dev);
+ return ret;
+ }
+ ret = v4l2_subdev_call(state->src_sd, core, s_power, 1);
+ if (ret < 0)
+ return ret;
+ }
+
+ mutex_lock(&state->lock);
+ if (enable) {
+ if (state->flags & ST_SUSPENDED) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ mipi_csis_start_stream(state);
+ ret = v4l2_subdev_call(state->src_sd, video, s_stream, 1);
+ if (ret < 0)
+ goto unlock;
+
+ mipi_csis_log_counters(state, true);
+
+ state->flags |= ST_STREAMING;
+ } else {
+ v4l2_subdev_call(state->src_sd, video, s_stream, 0);
+ ret = v4l2_subdev_call(state->src_sd, core, s_power, 1);
+ mipi_csis_stop_stream(state);
+ state->flags &= ~ST_STREAMING;
+ if (state->debug)
+ mipi_csis_log_counters(state, true);
+ }
+
+unlock:
+ mutex_unlock(&state->lock);
+ if (!enable)
+ pm_runtime_put(&state->pdev->dev);
+
+ return ret;
+}
+
+static int mipi_csis_link_setup(struct media_entity *entity,
+ const struct media_pad *local_pad,
+ const struct media_pad *remote_pad, u32 flags)
+{
+ struct v4l2_subdev *mipi_sd = media_entity_to_v4l2_subdev(entity);
+ struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
+ struct v4l2_subdev *remote_sd;
+ int ret = 0;
+
+ dev_dbg(state->dev, "link setup %s -> %s", remote_pad->entity->name,
+ local_pad->entity->name);
+
+ remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity);
+
+ mutex_lock(&state->lock);
+
+ if (local_pad->flags & MEDIA_PAD_FL_SOURCE) {
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (state->sink_linked) {
+ ret = -EBUSY;
+ goto out;
+ }
+ state->sink_linked = true;
+ } else {
+ state->sink_linked = false;
+ }
+ } else {
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (state->src_sd) {
+ ret = -EBUSY;
+ goto out;
+ }
+ state->src_sd = remote_sd;
+ } else {
+ state->src_sd = NULL;
+ }
+ }
+
+out:
+ mutex_unlock(&state->lock);
+ return ret;
+}
+
+static int mipi_csis_init_cfg(struct v4l2_subdev *mipi_sd,
+ struct v4l2_subdev_pad_config *cfg)
+{
+ struct v4l2_mbus_framefmt *mf;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < CSIS_PADS_NUM; i++) {
+ mf = v4l2_subdev_get_try_format(mipi_sd, cfg, i);
+
+ ret = imx_media_init_mbus_fmt(mf, MIPI_CSIS_DEF_PIX_HEIGHT,
+ MIPI_CSIS_DEF_PIX_WIDTH, 0,
+ V4L2_FIELD_NONE, NULL);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct csis_pix_format const *
+mipi_csis_try_format(struct v4l2_subdev *mipi_sd, struct v4l2_mbus_framefmt *mf)
+{
+ struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
+ struct csis_pix_format const *csis_fmt;
+
+ csis_fmt = find_csis_format(mf->code);
+ if (!csis_fmt)
+ csis_fmt = &mipi_csis_formats[0];
+
+ v4l_bound_align_image(&mf->width, 1, CSIS_MAX_PIX_WIDTH,
+ csis_fmt->pix_width_alignment,
+ &mf->height, 1, CSIS_MAX_PIX_HEIGHT, 1,
+ 0);
+
+ state->format_mbus.code = csis_fmt->code;
+ state->format_mbus.width = mf->width;
+ state->format_mbus.height = mf->height;
+
+ return csis_fmt;
+}
+
+static struct v4l2_mbus_framefmt *
+mipi_csis_get_format(struct csi_state *state,
+ struct v4l2_subdev_pad_config *cfg,
+ enum v4l2_subdev_format_whence which,
+ unsigned int pad)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&state->mipi_sd, cfg, pad);
+
+ return &state->format_mbus;
+}
+
+static int mipi_csis_set_fmt(struct v4l2_subdev *mipi_sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
+ struct csis_pix_format const *csis_fmt;
+ struct v4l2_mbus_framefmt *fmt;
+
+ if (sdformat->pad >= CSIS_PADS_NUM)
+ return -EINVAL;
+
+ fmt = mipi_csis_get_format(state, cfg, sdformat->which, sdformat->pad);
+
+ mutex_lock(&state->lock);
+ if (fmt && sdformat->pad == CSIS_PAD_SOURCE) {
+ sdformat->format = *fmt;
+ goto unlock;
+ }
+
+ csis_fmt = mipi_csis_try_format(mipi_sd, &sdformat->format);
+
+ sdformat->format = *fmt;
+
+ if (csis_fmt && sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ state->csis_fmt = csis_fmt;
+ else
+ cfg->try_fmt = sdformat->format;
+
+unlock:
+ mutex_unlock(&state->lock);
+
+ return 0;
+}
+
+static int mipi_csis_get_fmt(struct v4l2_subdev *mipi_sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
+ struct v4l2_mbus_framefmt *fmt;
+
+ mutex_lock(&state->lock);
+
+ fmt = mipi_csis_get_format(state, cfg, sdformat->which, sdformat->pad);
+
+ sdformat->format = *fmt;
+
+ mutex_unlock(&state->lock);
+
+ return 0;
+}
+
+static int mipi_csis_log_status(struct v4l2_subdev *mipi_sd)
+{
+ struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
+
+ mutex_lock(&state->lock);
+ mipi_csis_log_counters(state, true);
+ if (state->debug && (state->flags & ST_POWERED))
+ mipi_csis_dump_regs(state);
+ mutex_unlock(&state->lock);
+
+ return 0;
+}
+
+static irqreturn_t mipi_csis_irq_handler(int irq, void *dev_id)
+{
+ struct csi_state *state = dev_id;
+ unsigned long flags;
+ unsigned int i;
+ u32 status;
+
+ status = mipi_csis_read(state, MIPI_CSIS_INTSRC);
+
+ spin_lock_irqsave(&state->slock, flags);
+
+ /* Update the event/error counters */
+ if ((status & MIPI_CSIS_INTSRC_ERRORS) || state->debug) {
+ for (i = 0; i < MIPI_CSIS_NUM_EVENTS; i++) {
+ if (!(status & state->events[i].mask))
+ continue;
+ state->events[i].counter++;
+ }
+ }
+ spin_unlock_irqrestore(&state->slock, flags);
+
+ mipi_csis_write(state, MIPI_CSIS_INTSRC, status);
+
+ return IRQ_HANDLED;
+}
+
+static const struct v4l2_subdev_core_ops mipi_csis_core_ops = {
+ .log_status = mipi_csis_log_status,
+};
+
+static const struct media_entity_operations mipi_csis_entity_ops = {
+ .link_setup = mipi_csis_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_video_ops mipi_csis_video_ops = {
+ .s_stream = mipi_csis_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops mipi_csis_pad_ops = {
+ .init_cfg = mipi_csis_init_cfg,
+ .get_fmt = mipi_csis_get_fmt,
+ .set_fmt = mipi_csis_set_fmt,
+};
+
+static const struct v4l2_subdev_ops mipi_csis_subdev_ops = {
+ .core = &mipi_csis_core_ops,
+ .video = &mipi_csis_video_ops,
+ .pad = &mipi_csis_pad_ops,
+};
+
+static int mipi_csis_parse_dt(struct platform_device *pdev,
+ struct csi_state *state)
+{
+ struct device_node *node = pdev->dev.of_node;
+
+ if (of_property_read_u32(node, "clock-frequency",
+ &state->clk_frequency))
+ state->clk_frequency = DEFAULT_SCLK_CSIS_FREQ;
+
+ /* Get MIPI PHY resets */
+ state->mrst = devm_reset_control_get_exclusive(&pdev->dev, "mrst");
+ if (IS_ERR(state->mrst))
+ return PTR_ERR(state->mrst);
+
+ /* Get MIPI CSI-2 bus configuration from the endpoint node. */
+ of_property_read_u32(node, "fsl,csis-hs-settle", &state->hs_settle);
+
+ return 0;
+}
+
+static int mipi_csis_pm_resume(struct device *dev, bool runtime);
+
+static int mipi_csis_parse_endpoint(struct device *dev,
+ struct v4l2_fwnode_endpoint *ep,
+ struct v4l2_async_subdev *asd)
+{
+ struct v4l2_subdev *mipi_sd = dev_get_drvdata(dev);
+ struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
+
+ if (ep->bus_type != V4L2_MBUS_CSI2_DPHY) {
+ dev_err(dev, "invalid bus type, must be MIPI CSI2\n");
+ return -EINVAL;
+ }
+
+ state->bus = ep->bus.mipi_csi2;
+
+ dev_dbg(state->dev, "data lanes: %d\n", state->bus.num_data_lanes);
+ dev_dbg(state->dev, "flags: 0x%08x\n", state->bus.flags);
+
+ return 0;
+}
+
+static int mipi_csis_subdev_init(struct v4l2_subdev *mipi_sd,
+ struct platform_device *pdev,
+ const struct v4l2_subdev_ops *ops)
+{
+ struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
+ unsigned int sink_port = 0;
+ int ret;
+
+ v4l2_subdev_init(mipi_sd, ops);
+ mipi_sd->owner = THIS_MODULE;
+ snprintf(mipi_sd->name, sizeof(mipi_sd->name), "%s.%d",
+ CSIS_SUBDEV_NAME, state->index);
+
+ mipi_sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ mipi_sd->ctrl_handler = NULL;
+
+ mipi_sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ mipi_sd->entity.ops = &mipi_csis_entity_ops;
+
+ mipi_sd->dev = &pdev->dev;
+
+ state->csis_fmt = &mipi_csis_formats[0];
+ state->format_mbus.code = mipi_csis_formats[0].code;
+ state->format_mbus.width = MIPI_CSIS_DEF_PIX_WIDTH;
+ state->format_mbus.height = MIPI_CSIS_DEF_PIX_HEIGHT;
+ state->format_mbus.field = V4L2_FIELD_NONE;
+
+ v4l2_set_subdevdata(mipi_sd, &pdev->dev);
+
+ ret = v4l2_async_register_fwnode_subdev(mipi_sd,
+ sizeof(struct v4l2_async_subdev),
+ &sink_port, 1,
+ mipi_csis_parse_endpoint);
+ if (ret < 0)
+ dev_err(&pdev->dev, "async fwnode register failed: %d\n", ret);
+
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+
+static int mipi_csis_dump_regs_show(struct seq_file *m, void *private)
+{
+ struct csi_state *state = m->private;
+
+ return mipi_csis_dump_regs(state);
+}
+DEFINE_SHOW_ATTRIBUTE(mipi_csis_dump_regs);
+
+static int __init_or_module mipi_csis_debugfs_init(struct csi_state *state)
+{
+ struct dentry *d;
+
+ if (!debugfs_initialized())
+ return -ENODEV;
+
+ state->debugfs_root = debugfs_create_dir(dev_name(state->dev), NULL);
+ if (!state->debugfs_root)
+ return -ENOMEM;
+
+ d = debugfs_create_bool("debug_enable", 0600, state->debugfs_root,
+ &state->debug);
+ if (!d)
+ goto remove_debugfs;
+
+ d = debugfs_create_file("dump_regs", 0600, state->debugfs_root,
+ state, &mipi_csis_dump_regs_fops);
+ if (!d)
+ goto remove_debugfs;
+
+ return 0;
+
+remove_debugfs:
+ debugfs_remove_recursive(state->debugfs_root);
+
+ return -ENOMEM;
+}
+
+static void mipi_csis_debugfs_exit(struct csi_state *state)
+{
+ debugfs_remove_recursive(state->debugfs_root);
+}
+
+#else
+static int mipi_csis_debugfs_init(struct csi_state *state __maybe_unused)
+{
+ return 0;
+}
+
+static void mipi_csis_debugfs_exit(struct csi_state *state __maybe_unused)
+{
+}
+#endif
+
+static int mipi_csis_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *mem_res;
+ struct csi_state *state;
+ int ret = -ENOMEM;
+
+ state = devm_kzalloc(dev, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ spin_lock_init(&state->slock);
+
+ state->pdev = pdev;
+ state->dev = dev;
+
+ ret = mipi_csis_parse_dt(pdev, state);
+ if (ret < 0) {
+ dev_err(dev, "Failed to parse device tree: %d\n", ret);
+ return ret;
+ }
+
+ mipi_csis_phy_init(state);
+ mipi_csis_phy_reset(state);
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ state->regs = devm_ioremap_resource(dev, mem_res);
+ if (IS_ERR(state->regs))
+ return PTR_ERR(state->regs);
+
+ state->irq = platform_get_irq(pdev, 0);
+ if (state->irq < 0) {
+ dev_err(dev, "Failed to get irq\n");
+ return state->irq;
+ }
+
+ ret = mipi_csis_clk_get(state);
+ if (ret < 0)
+ return ret;
+
+ mipi_csis_clk_enable(state);
+
+ ret = devm_request_irq(dev, state->irq, mipi_csis_irq_handler,
+ 0, dev_name(dev), state);
+ if (ret) {
+ dev_err(dev, "Interrupt request failed\n");
+ goto disable_clock;
+ }
+
+ platform_set_drvdata(pdev, &state->mipi_sd);
+
+ mutex_init(&state->lock);
+ ret = mipi_csis_subdev_init(&state->mipi_sd, pdev,
+ &mipi_csis_subdev_ops);
+ if (ret < 0)
+ goto disable_clock;
+
+ state->pads[CSIS_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ state->pads[CSIS_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&state->mipi_sd.entity, CSIS_PADS_NUM,
+ state->pads);
+ if (ret < 0)
+ goto unregister_subdev;
+
+ memcpy(state->events, mipi_csis_events, sizeof(state->events));
+
+ mipi_csis_debugfs_init(state);
+ pm_runtime_enable(dev);
+ if (!pm_runtime_enabled(dev)) {
+ ret = mipi_csis_pm_resume(dev, true);
+ if (ret < 0)
+ goto unregister_all;
+ }
+
+ dev_info(&pdev->dev, "lanes: %d, hs_settle: %d, wclk: %d, freq: %u\n",
+ state->bus.num_data_lanes, state->hs_settle,
+ state->wrap_clk ? 1 : 0, state->clk_frequency);
+
+ return 0;
+
+unregister_all:
+ mipi_csis_debugfs_exit(state);
+ media_entity_cleanup(&state->mipi_sd.entity);
+unregister_subdev:
+ v4l2_async_unregister_subdev(&state->mipi_sd);
+disable_clock:
+ mipi_csis_clk_disable(state);
+ mutex_destroy(&state->lock);
+
+ return ret;
+}
+
+static int mipi_csis_pm_suspend(struct device *dev, bool runtime)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct v4l2_subdev *mipi_sd = platform_get_drvdata(pdev);
+ struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
+ int ret = 0;
+
+ mutex_lock(&state->lock);
+ if (state->flags & ST_POWERED) {
+ mipi_csis_stop_stream(state);
+ ret = regulator_disable(state->mipi_phy_regulator);
+ if (ret)
+ goto unlock;
+ mipi_csis_clk_disable(state);
+ state->flags &= ~ST_POWERED;
+ if (!runtime)
+ state->flags |= ST_SUSPENDED;
+ }
+
+unlock:
+ mutex_unlock(&state->lock);
+
+ return ret ? -EAGAIN : 0;
+}
+
+static int mipi_csis_pm_resume(struct device *dev, bool runtime)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct v4l2_subdev *mipi_sd = platform_get_drvdata(pdev);
+ struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
+ int ret = 0;
+
+ mutex_lock(&state->lock);
+ if (!runtime && !(state->flags & ST_SUSPENDED))
+ goto unlock;
+
+ if (!(state->flags & ST_POWERED)) {
+ ret = regulator_enable(state->mipi_phy_regulator);
+ if (ret)
+ goto unlock;
+
+ state->flags |= ST_POWERED;
+ mipi_csis_clk_enable(state);
+ }
+ if (state->flags & ST_STREAMING)
+ mipi_csis_start_stream(state);
+
+ state->flags &= ~ST_SUSPENDED;
+
+unlock:
+ mutex_unlock(&state->lock);
+
+ return ret ? -EAGAIN : 0;
+}
+
+static int __maybe_unused mipi_csis_suspend(struct device *dev)
+{
+ return mipi_csis_pm_suspend(dev, false);
+}
+
+static int __maybe_unused mipi_csis_resume(struct device *dev)
+{
+ return mipi_csis_pm_resume(dev, false);
+}
+
+static int __maybe_unused mipi_csis_runtime_suspend(struct device *dev)
+{
+ return mipi_csis_pm_suspend(dev, true);
+}
+
+static int __maybe_unused mipi_csis_runtime_resume(struct device *dev)
+{
+ return mipi_csis_pm_resume(dev, true);
+}
+
+static int mipi_csis_remove(struct platform_device *pdev)
+{
+ struct v4l2_subdev *mipi_sd = platform_get_drvdata(pdev);
+ struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
+
+ mipi_csis_debugfs_exit(state);
+ v4l2_async_unregister_subdev(&state->mipi_sd);
+ v4l2_async_notifier_unregister(&state->subdev_notifier);
+
+ pm_runtime_disable(&pdev->dev);
+ mipi_csis_pm_suspend(&pdev->dev, true);
+ mipi_csis_clk_disable(state);
+ media_entity_cleanup(&state->mipi_sd.entity);
+ mutex_destroy(&state->lock);
+ pm_runtime_set_suspended(&pdev->dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mipi_csis_pm_ops = {
+ SET_RUNTIME_PM_OPS(mipi_csis_runtime_suspend, mipi_csis_runtime_resume,
+ NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(mipi_csis_suspend, mipi_csis_resume)
+};
+
+static const struct of_device_id mipi_csis_of_match[] = {
+ { .compatible = "fsl,imx7-mipi-csi2", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, mipi_csis_of_match);
+
+static struct platform_driver mipi_csis_driver = {
+ .probe = mipi_csis_probe,
+ .remove = mipi_csis_remove,
+ .driver = {
+ .of_match_table = mipi_csis_of_match,
+ .name = CSIS_DRIVER_NAME,
+ .pm = &mipi_csis_pm_ops,
+ },
+};
+
+module_platform_driver(mipi_csis_driver);
+
+MODULE_DESCRIPTION("i.MX7 MIPI CSI-2 Receiver driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:imx7-mipi-csi2");
diff --git a/drivers/staging/media/imx074/Kconfig b/drivers/staging/media/imx074/Kconfig
deleted file mode 100644
index 229cbeea580b..000000000000
--- a/drivers/staging/media/imx074/Kconfig
+++ /dev/null
@@ -1,5 +0,0 @@
-config SOC_CAMERA_IMX074
- tristate "imx074 support (DEPRECATED)"
- depends on SOC_CAMERA && I2C
- help
- This driver supports IMX074 cameras from Sony
diff --git a/drivers/staging/media/imx074/Makefile b/drivers/staging/media/imx074/Makefile
deleted file mode 100644
index 7d183574aa84..000000000000
--- a/drivers/staging/media/imx074/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_SOC_CAMERA_IMX074) += imx074.o
diff --git a/drivers/staging/media/imx074/TODO b/drivers/staging/media/imx074/TODO
deleted file mode 100644
index 15580a4f950c..000000000000
--- a/drivers/staging/media/imx074/TODO
+++ /dev/null
@@ -1,5 +0,0 @@
-This sensor driver needs to be converted to a regular
-v4l2 subdev driver. The soc_camera framework is deprecated and
-will be removed in the future. Unless someone does this work this
-sensor driver will be deleted when the soc_camera framework is
-deleted.
diff --git a/drivers/staging/media/ipu3/Makefile b/drivers/staging/media/ipu3/Makefile
index fb146d178bd4..fa7fa3372bcb 100644
--- a/drivers/staging/media/ipu3/Makefile
+++ b/drivers/staging/media/ipu3/Makefile
@@ -9,3 +9,9 @@ ipu3-imgu-objs += \
ipu3-css.o ipu3-v4l2.o ipu3.o
obj-$(CONFIG_VIDEO_IPU3_IMGU) += ipu3-imgu.o
+
+# HACK! While this driver is in bad shape, don't enable several warnings
+# that would be otherwise enabled with W=1
+ccflags-y += $(call cc-disable-warning, packed-not-aligned)
+ccflags-y += $(call cc-disable-warning, type-limits)
+ccflags-y += $(call cc-disable-warning, unused-const-variable)
diff --git a/drivers/staging/media/ipu3/TODO b/drivers/staging/media/ipu3/TODO
index 905bbb190217..5e55baeaea1a 100644
--- a/drivers/staging/media/ipu3/TODO
+++ b/drivers/staging/media/ipu3/TODO
@@ -8,11 +8,6 @@ staging directory.
- Using ENABLED and IMMUTABLE link flags for the links where those are
relevant. (Sakari)
-- Prefix imgu for all public APIs, i.e. change ipu3_v4l2_register() to
- imgu_v4l2_register(). (Sakari)
-
-- Use V4L2_CTRL_TYPE_MENU for dual-pipe mode control. (Sakari)
-
- IPU3 driver documentation (Laurent)
Add diagram in driver rst to describe output capability.
Comments on configuring v4l2 subdevs for CIO2 and ImgU.
@@ -32,3 +27,5 @@ staging directory.
- Document different operation modes, and which buffer queues are relevant
in each mode. To process an image, which queues require a buffer an in
which ones is it optional?
+
+- Make sure it builds fine with no warnings with W=1
diff --git a/drivers/staging/media/ipu3/include/intel-ipu3.h b/drivers/staging/media/ipu3/include/intel-ipu3.h
index ec0b74829351..1e7184e4311d 100644
--- a/drivers/staging/media/ipu3/include/intel-ipu3.h
+++ b/drivers/staging/media/ipu3/include/intel-ipu3.h
@@ -16,12 +16,6 @@
#define V4L2_CID_INTEL_IPU3_BASE (V4L2_CID_USER_BASE + 0x10c0)
#define V4L2_CID_INTEL_IPU3_MODE (V4L2_CID_INTEL_IPU3_BASE + 1)
-/* custom ctrl to set pipe mode */
-enum ipu3_running_mode {
- IPU3_RUNNING_MODE_VIDEO = 0,
- IPU3_RUNNING_MODE_STILL = 1,
-};
-
/******************* ipu3_uapi_stats_3a *******************/
#define IPU3_UAPI_MAX_STRIPES 2
@@ -438,11 +432,11 @@ struct ipu3_uapi_awb_fr_raw_buffer {
*
* @grid_cfg: grid config, default 16x16.
* @bayer_coeff: 1D Filter 1x11 center symmetry/anti-symmetry.
- * coeffcients defaults { 0, 0, 0, 0, 0, 128 }.
+ * coefficients defaults { 0, 0, 0, 0, 0, 128 }.
* Applied on whole image for each Bayer channel separately
* by a weighted sum of its 11x1 neighbors.
* @reserved1: reserved
- * @bayer_sign: sign of filter coeffcients, default 0.
+ * @bayer_sign: sign of filter coefficients, default 0.
* @bayer_nf: normalization factor for the convolution coeffs, to make sure
* total memory needed is within pre-determined range.
* NF should be the log2 of the sum of the abs values of the
diff --git a/drivers/staging/media/ipu3/ipu3-abi.h b/drivers/staging/media/ipu3/ipu3-abi.h
index 25be56ff01c8..e1185602c7fd 100644
--- a/drivers/staging/media/ipu3/ipu3-abi.h
+++ b/drivers/staging/media/ipu3/ipu3-abi.h
@@ -1510,7 +1510,7 @@ struct imgu_abi_blob_info {
/* offset wrt hdr in bytes */
u32 prog_name_offset; /* offset wrt hdr in bytes */
u32 size; /* Size of blob */
- u32 padding_size; /* total cummulative of bytes added
+ u32 padding_size; /* total cumulative of bytes added
* due to section alignment
*/
u32 icache_source; /* Position of icache in blob */
diff --git a/drivers/staging/media/ipu3/ipu3-css-fw.c b/drivers/staging/media/ipu3/ipu3-css-fw.c
index 55861aa8fb03..4122d4e42db6 100644
--- a/drivers/staging/media/ipu3/ipu3-css-fw.c
+++ b/drivers/staging/media/ipu3/ipu3-css-fw.c
@@ -10,7 +10,7 @@
#include "ipu3-css-fw.h"
#include "ipu3-dmamap.h"
-static void ipu3_css_fw_show_binary(struct device *dev, struct imgu_fw_info *bi,
+static void imgu_css_fw_show_binary(struct device *dev, struct imgu_fw_info *bi,
const char *name)
{
unsigned int i;
@@ -54,7 +54,7 @@ static void ipu3_css_fw_show_binary(struct device *dev, struct imgu_fw_info *bi,
dev_dbg(dev, "\n");
}
-unsigned int ipu3_css_fw_obgrid_size(const struct imgu_fw_info *bi)
+unsigned int imgu_css_fw_obgrid_size(const struct imgu_fw_info *bi)
{
unsigned int width = DIV_ROUND_UP(bi->info.isp.sp.internal.max_width,
IMGU_OBGRID_TILE_SIZE * 2) + 1;
@@ -69,7 +69,7 @@ unsigned int ipu3_css_fw_obgrid_size(const struct imgu_fw_info *bi)
return obgrid_size;
}
-void *ipu3_css_fw_pipeline_params(struct ipu3_css *css, unsigned int pipe,
+void *imgu_css_fw_pipeline_params(struct imgu_css *css, unsigned int pipe,
enum imgu_abi_param_class cls,
enum imgu_abi_memories mem,
struct imgu_fw_isp_parameter *par,
@@ -91,7 +91,7 @@ void *ipu3_css_fw_pipeline_params(struct ipu3_css *css, unsigned int pipe,
return binary_params + par->offset;
}
-void ipu3_css_fw_cleanup(struct ipu3_css *css)
+void imgu_css_fw_cleanup(struct imgu_css *css)
{
struct imgu_device *imgu = dev_get_drvdata(css->dev);
@@ -99,7 +99,7 @@ void ipu3_css_fw_cleanup(struct ipu3_css *css)
unsigned int i;
for (i = 0; i < css->fwp->file_header.binary_nr; i++)
- ipu3_dmamap_free(imgu, &css->binary[i]);
+ imgu_dmamap_free(imgu, &css->binary[i]);
kfree(css->binary);
}
if (css->fw)
@@ -109,7 +109,7 @@ void ipu3_css_fw_cleanup(struct ipu3_css *css)
css->fw = NULL;
}
-int ipu3_css_fw_init(struct ipu3_css *css)
+int imgu_css_fw_init(struct imgu_css *css)
{
static const u32 BLOCK_MAX = 65536;
struct imgu_device *imgu = dev_get_drvdata(css->dev);
@@ -227,7 +227,7 @@ int ipu3_css_fw_init(struct ipu3_css *css)
css->fw->size)
goto bad_fw;
- ipu3_css_fw_show_binary(dev, bi, name);
+ imgu_css_fw_show_binary(dev, bi, name);
}
if (css->fw_bl == -1 || css->fw_sp[0] == -1 || css->fw_sp[1] == -1)
@@ -246,7 +246,7 @@ int ipu3_css_fw_init(struct ipu3_css *css)
void *blob = (void *)css->fwp + bi->blob.offset;
size_t size = bi->blob.size;
- if (!ipu3_dmamap_alloc(imgu, &css->binary[i], size)) {
+ if (!imgu_dmamap_alloc(imgu, &css->binary[i], size)) {
r = -ENOMEM;
goto error_out;
}
@@ -260,6 +260,6 @@ bad_fw:
r = -ENODEV;
error_out:
- ipu3_css_fw_cleanup(css);
+ imgu_css_fw_cleanup(css);
return r;
}
diff --git a/drivers/staging/media/ipu3/ipu3-css-fw.h b/drivers/staging/media/ipu3/ipu3-css-fw.h
index 07d8bb8b25f3..79ffa7045139 100644
--- a/drivers/staging/media/ipu3/ipu3-css-fw.h
+++ b/drivers/staging/media/ipu3/ipu3-css-fw.h
@@ -175,11 +175,11 @@ struct imgu_fw_header {
/******************* Firmware functions *******************/
-int ipu3_css_fw_init(struct ipu3_css *css);
-void ipu3_css_fw_cleanup(struct ipu3_css *css);
+int imgu_css_fw_init(struct imgu_css *css);
+void imgu_css_fw_cleanup(struct imgu_css *css);
-unsigned int ipu3_css_fw_obgrid_size(const struct imgu_fw_info *bi);
-void *ipu3_css_fw_pipeline_params(struct ipu3_css *css, unsigned int pipe,
+unsigned int imgu_css_fw_obgrid_size(const struct imgu_fw_info *bi);
+void *imgu_css_fw_pipeline_params(struct imgu_css *css, unsigned int pipe,
enum imgu_abi_param_class cls,
enum imgu_abi_memories mem,
struct imgu_fw_isp_parameter *par,
diff --git a/drivers/staging/media/ipu3/ipu3-css-params.c b/drivers/staging/media/ipu3/ipu3-css-params.c
index 776206ded83b..4533dacad4be 100644
--- a/drivers/staging/media/ipu3/ipu3-css-params.c
+++ b/drivers/staging/media/ipu3/ipu3-css-params.c
@@ -6,6 +6,7 @@
#include "ipu3-css.h"
#include "ipu3-css-fw.h"
#include "ipu3-tables.h"
+#include "ipu3-css-params.h"
#define DIV_ROUND_CLOSEST_DOWN(a, b) (((a) + ((b) / 2) - 1) / (b))
#define roundclosest_down(a, b) (DIV_ROUND_CLOSEST_DOWN(a, b) * (b))
@@ -13,7 +14,7 @@
#define IPU3_UAPI_ANR_MAX_RESET ((1 << 12) - 1)
#define IPU3_UAPI_ANR_MIN_RESET (((-1) << 12) + 1)
-struct ipu3_css_scaler_info {
+struct imgu_css_scaler_info {
unsigned int phase_step; /* Same for luma/chroma */
int exp_shift;
@@ -24,7 +25,7 @@ struct ipu3_css_scaler_info {
int crop_top;
};
-static unsigned int ipu3_css_scaler_get_exp(unsigned int counter,
+static unsigned int imgu_css_scaler_get_exp(unsigned int counter,
unsigned int divider)
{
int i = fls(divider) - fls(counter);
@@ -40,13 +41,13 @@ static unsigned int ipu3_css_scaler_get_exp(unsigned int counter,
/* Set up the CSS scaler look up table */
static void
-ipu3_css_scaler_setup_lut(unsigned int taps, unsigned int input_width,
+imgu_css_scaler_setup_lut(unsigned int taps, unsigned int input_width,
unsigned int output_width, int phase_step_correction,
const int *coeffs, unsigned int coeffs_size,
- s8 coeff_lut[], struct ipu3_css_scaler_info *info)
+ s8 coeff_lut[], struct imgu_css_scaler_info *info)
{
int tap, phase, phase_sum_left, phase_sum_right;
- int exponent = ipu3_css_scaler_get_exp(output_width, input_width);
+ int exponent = imgu_css_scaler_get_exp(output_width, input_width);
int mantissa = (1 << exponent) * output_width;
unsigned int phase_step;
@@ -113,8 +114,8 @@ ipu3_css_scaler_setup_lut(unsigned int taps, unsigned int input_width,
* (must be perfectly aligned with hardware).
*/
static unsigned int
-ipu3_css_scaler_calc_scaled_output(unsigned int input,
- struct ipu3_css_scaler_info *info)
+imgu_css_scaler_calc_scaled_output(unsigned int input,
+ struct imgu_css_scaler_info *info)
{
unsigned int arg1 = input * info->phase_step +
(1 - IMGU_SCALER_TAPS_Y / 2) * IMGU_SCALER_FIR_PHASES -
@@ -132,10 +133,10 @@ ipu3_css_scaler_calc_scaled_output(unsigned int input,
* and chroma details of a scaler
*/
static void
-ipu3_css_scaler_calc(u32 input_width, u32 input_height, u32 target_width,
+imgu_css_scaler_calc(u32 input_width, u32 input_height, u32 target_width,
u32 target_height, struct imgu_abi_osys_config *cfg,
- struct ipu3_css_scaler_info *info_luma,
- struct ipu3_css_scaler_info *info_chroma,
+ struct imgu_css_scaler_info *info_luma,
+ struct imgu_css_scaler_info *info_chroma,
unsigned int *output_width, unsigned int *output_height,
unsigned int *procmode)
{
@@ -164,24 +165,24 @@ ipu3_css_scaler_calc(u32 input_width, u32 input_height, u32 target_width,
do {
phase_step_correction++;
- ipu3_css_scaler_setup_lut(IMGU_SCALER_TAPS_Y,
+ imgu_css_scaler_setup_lut(IMGU_SCALER_TAPS_Y,
input_width, target_width,
phase_step_correction,
- ipu3_css_downscale_4taps,
+ imgu_css_downscale_4taps,
IMGU_SCALER_DOWNSCALE_4TAPS_LEN,
cfg->scaler_coeffs_luma, info_luma);
- ipu3_css_scaler_setup_lut(IMGU_SCALER_TAPS_UV,
+ imgu_css_scaler_setup_lut(IMGU_SCALER_TAPS_UV,
input_width, target_width,
phase_step_correction,
- ipu3_css_downscale_2taps,
+ imgu_css_downscale_2taps,
IMGU_SCALER_DOWNSCALE_2TAPS_LEN,
cfg->scaler_coeffs_chroma,
info_chroma);
- out_width = ipu3_css_scaler_calc_scaled_output(input_width,
+ out_width = imgu_css_scaler_calc_scaled_output(input_width,
info_luma);
- out_height = ipu3_css_scaler_calc_scaled_output(input_height,
+ out_height = imgu_css_scaler_calc_scaled_output(input_height,
info_luma);
} while ((out_width < target_width || out_height < target_height ||
!IS_ALIGNED(out_height, height_alignment)) &&
@@ -193,7 +194,7 @@ ipu3_css_scaler_calc(u32 input_width, u32 input_height, u32 target_width,
/********************** Osys routines for scaler****************************/
-static void ipu3_css_osys_set_format(enum imgu_abi_frame_format host_format,
+static void imgu_css_osys_set_format(enum imgu_abi_frame_format host_format,
unsigned int *osys_format,
unsigned int *osys_tiling)
{
@@ -230,7 +231,7 @@ static void ipu3_css_osys_set_format(enum imgu_abi_frame_format host_format,
* Function calculates input frame stripe offset, based
* on output frame stripe offset and filter parameters.
*/
-static int ipu3_css_osys_calc_stripe_offset(int stripe_offset_out,
+static int imgu_css_osys_calc_stripe_offset(int stripe_offset_out,
int fir_phases, int phase_init,
int phase_step, int pad_left)
{
@@ -244,12 +245,12 @@ static int ipu3_css_osys_calc_stripe_offset(int stripe_offset_out,
* Calculate input frame phase, given the output frame
* stripe offset and filter parameters
*/
-static int ipu3_css_osys_calc_stripe_phase_init(int stripe_offset_out,
+static int imgu_css_osys_calc_stripe_phase_init(int stripe_offset_out,
int fir_phases, int phase_init,
int phase_step, int pad_left)
{
int stripe_offset_inp =
- ipu3_css_osys_calc_stripe_offset(stripe_offset_out,
+ imgu_css_osys_calc_stripe_offset(stripe_offset_out,
fir_phases, phase_init,
phase_step, pad_left);
@@ -261,7 +262,7 @@ static int ipu3_css_osys_calc_stripe_phase_init(int stripe_offset_out,
* This function calculates input frame stripe width,
* based on output frame stripe offset and filter parameters
*/
-static int ipu3_css_osys_calc_inp_stripe_width(int stripe_width_out,
+static int imgu_css_osys_calc_inp_stripe_width(int stripe_width_out,
int fir_phases, int phase_init,
int phase_step, int fir_taps,
int pad_left, int pad_right)
@@ -278,7 +279,7 @@ static int ipu3_css_osys_calc_inp_stripe_width(int stripe_width_out,
* This function calculates output frame stripe width, basedi
* on output frame stripe offset and filter parameters
*/
-static int ipu3_css_osys_out_stripe_width(int stripe_width_inp, int fir_phases,
+static int imgu_css_osys_out_stripe_width(int stripe_width_inp, int fir_phases,
int phase_init, int phase_step,
int fir_taps, int pad_left,
int pad_right, int column_offset)
@@ -291,7 +292,7 @@ static int ipu3_css_osys_out_stripe_width(int stripe_width_inp, int fir_phases,
return stripe_width_out - (fir_taps - 1);
}
-struct ipu3_css_reso {
+struct imgu_css_reso {
unsigned int input_width;
unsigned int input_height;
enum imgu_abi_frame_format input_format;
@@ -305,7 +306,7 @@ struct ipu3_css_reso {
int block_width;
};
-struct ipu3_css_frame_params {
+struct imgu_css_frame_params {
/* Output pins */
unsigned int enable;
unsigned int format;
@@ -321,7 +322,7 @@ struct ipu3_css_frame_params {
unsigned int crop_top;
};
-struct ipu3_css_stripe_params {
+struct imgu_css_stripe_params {
unsigned int processing_mode;
unsigned int phase_step;
unsigned int exp_shift;
@@ -358,20 +359,20 @@ struct ipu3_css_stripe_params {
* frame_params - size IMGU_ABI_OSYS_PINS
* stripe_params - size IPU3_UAPI_MAX_STRIPES
*/
-static int ipu3_css_osys_calc_frame_and_stripe_params(
- struct ipu3_css *css, unsigned int stripes,
+static int imgu_css_osys_calc_frame_and_stripe_params(
+ struct imgu_css *css, unsigned int stripes,
struct imgu_abi_osys_config *osys,
- struct ipu3_css_scaler_info *scaler_luma,
- struct ipu3_css_scaler_info *scaler_chroma,
- struct ipu3_css_frame_params frame_params[],
- struct ipu3_css_stripe_params stripe_params[],
+ struct imgu_css_scaler_info *scaler_luma,
+ struct imgu_css_scaler_info *scaler_chroma,
+ struct imgu_css_frame_params frame_params[],
+ struct imgu_css_stripe_params stripe_params[],
unsigned int pipe)
{
- struct ipu3_css_reso reso;
+ struct imgu_css_reso reso;
unsigned int output_width, pin, s;
u32 input_width, input_height, target_width, target_height;
unsigned int procmode = 0;
- struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
input_width = css_pipe->rect[IPU3_CSS_RECT_GDC].width;
input_height = css_pipe->rect[IPU3_CSS_RECT_GDC].height;
@@ -463,7 +464,7 @@ static int ipu3_css_osys_calc_frame_and_stripe_params(
scaled = 1;
}
}
- ipu3_css_osys_set_format(reso.pin_format[pin], &format,
+ imgu_css_osys_set_format(reso.pin_format[pin], &format,
&tiling);
} else {
enable = 0;
@@ -475,7 +476,7 @@ static int ipu3_css_osys_calc_frame_and_stripe_params(
frame_params[pin].scaled = scaled;
}
- ipu3_css_scaler_calc(input_width, input_height, target_width,
+ imgu_css_scaler_calc(input_width, input_height, target_width,
target_height, osys, scaler_luma, scaler_chroma,
&reso.pin_width[IMGU_ABI_OSYS_PIN_VF],
&reso.pin_height[IMGU_ABI_OSYS_PIN_VF], &procmode);
@@ -580,14 +581,14 @@ static int ipu3_css_osys_calc_frame_and_stripe_params(
stripe_offset_out_uv = stripe_offset_out_y /
IMGU_LUMA_TO_CHROMA_RATIO;
stripe_offset_inp_y =
- ipu3_css_osys_calc_stripe_offset(
+ imgu_css_osys_calc_stripe_offset(
stripe_offset_out_y,
IMGU_OSYS_FIR_PHASES,
scaler_luma->phase_init,
scaler_luma->phase_step,
scaler_luma->pad_left);
stripe_offset_inp_uv =
- ipu3_css_osys_calc_stripe_offset(
+ imgu_css_osys_calc_stripe_offset(
stripe_offset_out_uv,
IMGU_OSYS_FIR_PHASES,
scaler_chroma->phase_init,
@@ -596,14 +597,14 @@ static int ipu3_css_osys_calc_frame_and_stripe_params(
/* Calculate stripe phase init */
stripe_phase_init_y =
- ipu3_css_osys_calc_stripe_phase_init(
+ imgu_css_osys_calc_stripe_phase_init(
stripe_offset_out_y,
IMGU_OSYS_FIR_PHASES,
scaler_luma->phase_init,
scaler_luma->phase_step,
scaler_luma->pad_left);
stripe_phase_init_uv =
- ipu3_css_osys_calc_stripe_phase_init(
+ imgu_css_osys_calc_stripe_phase_init(
stripe_offset_out_uv,
IMGU_OSYS_FIR_PHASES,
scaler_chroma->phase_init,
@@ -707,7 +708,7 @@ static int ipu3_css_osys_calc_frame_and_stripe_params(
IMGU_LUMA_TO_CHROMA_RATIO;
/* Calculate input stripe width */
stripe_input_width_y = stripe_offset_col_y +
- ipu3_css_osys_calc_inp_stripe_width(
+ imgu_css_osys_calc_inp_stripe_width(
stripe_output_width_y,
IMGU_OSYS_FIR_PHASES,
stripe_phase_init_y,
@@ -717,7 +718,7 @@ static int ipu3_css_osys_calc_frame_and_stripe_params(
stripe_pad_right_y);
stripe_input_width_uv = stripe_offset_col_uv +
- ipu3_css_osys_calc_inp_stripe_width(
+ imgu_css_osys_calc_inp_stripe_width(
stripe_output_width_uv,
IMGU_OSYS_FIR_PHASES,
stripe_phase_init_uv,
@@ -752,7 +753,7 @@ static int ipu3_css_osys_calc_frame_and_stripe_params(
*/
stripe_input_width_y = ALIGN(stripe_input_width_y, 8);
stripe_output_width_y =
- ipu3_css_osys_out_stripe_width(
+ imgu_css_osys_out_stripe_width(
stripe_input_width_y,
IMGU_OSYS_FIR_PHASES,
stripe_phase_init_y,
@@ -846,23 +847,23 @@ static int ipu3_css_osys_calc_frame_and_stripe_params(
* This function configures the Output Formatter System, given the number of
* stripes, scaler luma and chrome parameters
*/
-static int ipu3_css_osys_calc(struct ipu3_css *css, unsigned int pipe,
+static int imgu_css_osys_calc(struct imgu_css *css, unsigned int pipe,
unsigned int stripes,
struct imgu_abi_osys_config *osys,
- struct ipu3_css_scaler_info *scaler_luma,
- struct ipu3_css_scaler_info *scaler_chroma,
+ struct imgu_css_scaler_info *scaler_luma,
+ struct imgu_css_scaler_info *scaler_chroma,
struct imgu_abi_stripes block_stripes[])
{
- struct ipu3_css_frame_params frame_params[IMGU_ABI_OSYS_PINS];
- struct ipu3_css_stripe_params stripe_params[IPU3_UAPI_MAX_STRIPES];
+ struct imgu_css_frame_params frame_params[IMGU_ABI_OSYS_PINS];
+ struct imgu_css_stripe_params stripe_params[IPU3_UAPI_MAX_STRIPES];
struct imgu_abi_osys_formatter_params *param;
unsigned int pin, s;
- struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
memset(osys, 0, sizeof(*osys));
/* Compute the frame and stripe params */
- if (ipu3_css_osys_calc_frame_and_stripe_params(css, stripes, osys,
+ if (imgu_css_osys_calc_frame_and_stripe_params(css, stripes, osys,
scaler_luma,
scaler_chroma,
frame_params,
@@ -1251,7 +1252,7 @@ static int ipu3_css_osys_calc(struct ipu3_css *css, unsigned int pipe,
*/
static int
-ipu3_css_shd_ops_calc(struct imgu_abi_shd_intra_frame_operations_data *ops,
+imgu_css_shd_ops_calc(struct imgu_abi_shd_intra_frame_operations_data *ops,
const struct ipu3_uapi_shd_grid_config *grid,
unsigned int image_height)
{
@@ -1495,7 +1496,7 @@ struct process_lines {
/* Helper to config intra_frame_operations_data. */
static int
-ipu3_css_acc_process_lines(const struct process_lines *pl,
+imgu_css_acc_process_lines(const struct process_lines *pl,
struct imgu_abi_acc_operation *p_op,
struct imgu_abi_acc_process_lines_cmd_data *p_pl,
struct imgu_abi_acc_transfer_op_data *p_tr)
@@ -1632,12 +1633,12 @@ ipu3_css_acc_process_lines(const struct process_lines *pl,
return 0;
}
-static int ipu3_css_af_ops_calc(struct ipu3_css *css, unsigned int pipe,
+static int imgu_css_af_ops_calc(struct imgu_css *css, unsigned int pipe,
struct imgu_abi_af_config *af_config)
{
struct imgu_abi_af_intra_frame_operations_data *to =
&af_config->operations_data;
- struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
struct imgu_fw_info *bi =
&css->fwp->binary_header[css_pipe->bindex];
@@ -1655,17 +1656,17 @@ static int ipu3_css_af_ops_calc(struct ipu3_css *css, unsigned int pipe,
.acc_enable = bi->info.isp.sp.enable.af,
};
- return ipu3_css_acc_process_lines(&pl, to->ops, to->process_lines_data,
+ return imgu_css_acc_process_lines(&pl, to->ops, to->process_lines_data,
NULL);
}
static int
-ipu3_css_awb_fr_ops_calc(struct ipu3_css *css, unsigned int pipe,
+imgu_css_awb_fr_ops_calc(struct imgu_css *css, unsigned int pipe,
struct imgu_abi_awb_fr_config *awb_fr_config)
{
struct imgu_abi_awb_fr_intra_frame_operations_data *to =
&awb_fr_config->operations_data;
- struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
struct imgu_fw_info *bi =
&css->fwp->binary_header[css_pipe->bindex];
struct process_lines pl = {
@@ -1682,16 +1683,16 @@ ipu3_css_awb_fr_ops_calc(struct ipu3_css *css, unsigned int pipe,
.acc_enable = bi->info.isp.sp.enable.awb_fr_acc,
};
- return ipu3_css_acc_process_lines(&pl, to->ops, to->process_lines_data,
+ return imgu_css_acc_process_lines(&pl, to->ops, to->process_lines_data,
NULL);
}
-static int ipu3_css_awb_ops_calc(struct ipu3_css *css, unsigned int pipe,
+static int imgu_css_awb_ops_calc(struct imgu_css *css, unsigned int pipe,
struct imgu_abi_awb_config *awb_config)
{
struct imgu_abi_awb_intra_frame_operations_data *to =
&awb_config->operations_data;
- struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
struct imgu_fw_info *bi =
&css->fwp->binary_header[css_pipe->bindex];
@@ -1708,33 +1709,33 @@ static int ipu3_css_awb_ops_calc(struct ipu3_css *css, unsigned int pipe,
.acc_enable = bi->info.isp.sp.enable.awb_acc,
};
- return ipu3_css_acc_process_lines(&pl, to->ops, to->process_lines_data,
+ return imgu_css_acc_process_lines(&pl, to->ops, to->process_lines_data,
to->transfer_data);
}
-static u16 ipu3_css_grid_end(u16 start, u8 width, u8 block_width_log2)
+static u16 imgu_css_grid_end(u16 start, u8 width, u8 block_width_log2)
{
return (start & IPU3_UAPI_GRID_START_MASK) +
(width << block_width_log2) - 1;
}
-static void ipu3_css_grid_end_calc(struct ipu3_uapi_grid_config *grid_cfg)
+static void imgu_css_grid_end_calc(struct ipu3_uapi_grid_config *grid_cfg)
{
- grid_cfg->x_end = ipu3_css_grid_end(grid_cfg->x_start, grid_cfg->width,
+ grid_cfg->x_end = imgu_css_grid_end(grid_cfg->x_start, grid_cfg->width,
grid_cfg->block_width_log2);
- grid_cfg->y_end = ipu3_css_grid_end(grid_cfg->y_start, grid_cfg->height,
+ grid_cfg->y_end = imgu_css_grid_end(grid_cfg->y_start, grid_cfg->height,
grid_cfg->block_height_log2);
}
/****************** config computation *****************************/
-static int ipu3_css_cfg_acc_stripe(struct ipu3_css *css, unsigned int pipe,
+static int imgu_css_cfg_acc_stripe(struct imgu_css *css, unsigned int pipe,
struct imgu_abi_acc_param *acc)
{
- struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
const struct imgu_fw_info *bi =
&css->fwp->binary_header[css_pipe->bindex];
- struct ipu3_css_scaler_info scaler_luma, scaler_chroma;
+ struct imgu_css_scaler_info scaler_luma, scaler_chroma;
const unsigned int stripes = bi->info.isp.sp.iterator.num_stripes;
const unsigned int f = IPU3_UAPI_ISP_VEC_ELEMS * 2;
unsigned int bds_ds, i;
@@ -1743,7 +1744,7 @@ static int ipu3_css_cfg_acc_stripe(struct ipu3_css *css, unsigned int pipe,
/* acc_param: osys_config */
- if (ipu3_css_osys_calc(css, pipe, stripes, &acc->osys, &scaler_luma,
+ if (imgu_css_osys_calc(css, pipe, stripes, &acc->osys, &scaler_luma,
&scaler_chroma, acc->stripe.block_stripes))
return -EINVAL;
@@ -1900,12 +1901,12 @@ static int ipu3_css_cfg_acc_stripe(struct ipu3_css *css, unsigned int pipe,
return 0;
}
-static void ipu3_css_cfg_acc_dvs(struct ipu3_css *css,
+static void imgu_css_cfg_acc_dvs(struct imgu_css *css,
struct imgu_abi_acc_param *acc,
unsigned int pipe)
{
unsigned int i;
- struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
/* Disable DVS statistics */
acc->dvs_stat.operations_data.process_lines_data[0].lines =
@@ -1919,11 +1920,11 @@ static void ipu3_css_cfg_acc_dvs(struct ipu3_css *css,
acc->dvs_stat.cfg.grd_config[i].enable = 0;
}
-static void acc_bds_per_stripe_data(struct ipu3_css *css,
+static void acc_bds_per_stripe_data(struct imgu_css *css,
struct imgu_abi_acc_param *acc,
const int i, unsigned int pipe)
{
- struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
acc->bds.per_stripe.aligned_data[i].data.crop.hor_crop_en = 0;
acc->bds.per_stripe.aligned_data[i].data.crop.hor_crop_start = 0;
@@ -1944,13 +1945,13 @@ static void acc_bds_per_stripe_data(struct ipu3_css *css,
* telling which fields to take from the old values (or generate if it is NULL)
* and which to take from the new user values.
*/
-int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
+int imgu_css_cfg_acc(struct imgu_css *css, unsigned int pipe,
struct ipu3_uapi_flags *use,
struct imgu_abi_acc_param *acc,
struct imgu_abi_acc_param *acc_old,
struct ipu3_uapi_acc_param *acc_user)
{
- struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
const struct imgu_fw_info *bi =
&css->fwp->binary_header[css_pipe->bindex];
const unsigned int stripes = bi->info.isp.sp.iterator.num_stripes;
@@ -1959,7 +1960,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
const unsigned int min_overlap = 10;
const struct v4l2_pix_format_mplane *pixm =
&css_pipe->queue[IPU3_CSS_QUEUE_IN].fmt.mpix;
- const struct ipu3_css_bds_config *cfg_bds;
+ const struct imgu_css_bds_config *cfg_bds;
struct imgu_abi_input_feeder_data *feeder_data;
unsigned int bds_ds, ofs_x, ofs_y, i, width, height;
@@ -1967,7 +1968,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
/* Update stripe using chroma and luma */
- if (ipu3_css_cfg_acc_stripe(css, pipe, acc))
+ if (imgu_css_cfg_acc_stripe(css, pipe, acc))
return -EINVAL;
/* acc_param: input_feeder_config */
@@ -2021,7 +2022,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
acc->bnr = acc_old->bnr;
} else {
/* Calculate from scratch */
- acc->bnr = ipu3_css_bnr_defaults;
+ acc->bnr = imgu_css_bnr_defaults;
}
acc->bnr.column_size = tnr_frame_width;
@@ -2049,7 +2050,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
acc->dm = acc_old->dm;
} else {
/* Calculate from scratch */
- acc->dm = ipu3_css_dm_defaults;
+ acc->dm = imgu_css_dm_defaults;
}
acc->dm.frame_width = tnr_frame_width;
@@ -2064,7 +2065,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
acc->ccm = acc_old->ccm;
} else {
/* Calculate from scratch */
- acc->ccm = ipu3_css_ccm_defaults;
+ acc->ccm = imgu_css_ccm_defaults;
}
/* acc_param: gamma_config */
@@ -2078,7 +2079,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
} else {
/* Calculate from scratch */
acc->gamma.gc_ctrl.enable = 1;
- acc->gamma.gc_lut = ipu3_css_gamma_lut;
+ acc->gamma.gc_lut = imgu_css_gamma_lut;
}
/* acc_param: csc_mat_config */
@@ -2091,7 +2092,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
acc->csc = acc_old->csc;
} else {
/* Calculate from scratch */
- acc->csc = ipu3_css_csc_defaults;
+ acc->csc = imgu_css_csc_defaults;
}
/* acc_param: cds_params */
@@ -2104,7 +2105,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
acc->cds = acc_old->cds;
} else {
/* Calculate from scratch */
- acc->cds = ipu3_css_cds_defaults;
+ acc->cds = imgu_css_cds_defaults;
}
/* acc_param: shd_config */
@@ -2119,7 +2120,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
acc->shd.shd_lut = acc_old->shd.shd_lut;
} else {
/* Calculate from scratch */
- acc->shd.shd = ipu3_css_shd_defaults;
+ acc->shd.shd = imgu_css_shd_defaults;
memset(&acc->shd.shd_lut, 0, sizeof(acc->shd.shd_lut));
}
@@ -2137,12 +2138,12 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
acc->shd.shd.grid.block_height_log2) %
acc->shd.shd.grid.grid_height_per_slice;
- if (ipu3_css_shd_ops_calc(&acc->shd.shd_ops, &acc->shd.shd.grid,
+ if (imgu_css_shd_ops_calc(&acc->shd.shd_ops, &acc->shd.shd.grid,
css_pipe->rect[IPU3_CSS_RECT_BDS].height))
return -EINVAL;
/* acc_param: dvs_stat_config */
- ipu3_css_cfg_acc_dvs(css, acc, pipe);
+ imgu_css_cfg_acc_dvs(css, acc, pipe);
/* acc_param: yuvp1_iefd_config */
@@ -2154,7 +2155,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
acc->iefd = acc_old->iefd;
} else {
/* Calculate from scratch */
- acc->iefd = ipu3_css_iefd_defaults;
+ acc->iefd = imgu_css_iefd_defaults;
}
/* acc_param: yuvp1_yds_config yds_c0 */
@@ -2167,7 +2168,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
acc->yds_c0 = acc_old->yds_c0;
} else {
/* Calculate from scratch */
- acc->yds_c0 = ipu3_css_yds_defaults;
+ acc->yds_c0 = imgu_css_yds_defaults;
}
/* acc_param: yuvp1_chnr_config chnr_c0 */
@@ -2180,7 +2181,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
acc->chnr_c0 = acc_old->chnr_c0;
} else {
/* Calculate from scratch */
- acc->chnr_c0 = ipu3_css_chnr_defaults;
+ acc->chnr_c0 = imgu_css_chnr_defaults;
}
/* acc_param: yuvp1_y_ee_nr_config */
@@ -2193,7 +2194,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
acc->y_ee_nr = acc_old->y_ee_nr;
} else {
/* Calculate from scratch */
- acc->y_ee_nr = ipu3_css_y_ee_nr_defaults;
+ acc->y_ee_nr = imgu_css_y_ee_nr_defaults;
}
/* acc_param: yuvp1_yds_config yds */
@@ -2206,7 +2207,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
acc->yds = acc_old->yds;
} else {
/* Calculate from scratch */
- acc->yds = ipu3_css_yds_defaults;
+ acc->yds = imgu_css_yds_defaults;
}
/* acc_param: yuvp1_chnr_config chnr */
@@ -2219,7 +2220,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
acc->chnr = acc_old->chnr;
} else {
/* Calculate from scratch */
- acc->chnr = ipu3_css_chnr_defaults;
+ acc->chnr = imgu_css_chnr_defaults;
}
/* acc_param: yuvp2_y_tm_lut_static_config */
@@ -2238,7 +2239,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
acc->yds2 = acc_old->yds2;
} else {
/* Calculate from scratch */
- acc->yds2 = ipu3_css_yds_defaults;
+ acc->yds2 = imgu_css_yds_defaults;
}
/* acc_param: yuvp2_tcc_static_config */
@@ -2270,8 +2271,8 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
for (i = 7; i < IPU3_UAPI_YUVP2_TCC_INV_Y_LUT_ELEMENTS; i++)
acc->tcc.inv_y_lut.entries[i] = 1024 >> (i - 6);
- acc->tcc.gain_pcwl = ipu3_css_tcc_gain_pcwl_lut;
- acc->tcc.r_sqr_lut = ipu3_css_tcc_r_sqr_lut;
+ acc->tcc.gain_pcwl = imgu_css_tcc_gain_pcwl_lut;
+ acc->tcc.r_sqr_lut = imgu_css_tcc_r_sqr_lut;
}
/* acc_param: dpc_config */
@@ -2287,10 +2288,10 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
bds_ds = (css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].height *
IMGU_BDS_GRANULARITY) / css_pipe->rect[IPU3_CSS_RECT_BDS].height;
if (bds_ds < IMGU_BDS_MIN_SF_INV ||
- bds_ds - IMGU_BDS_MIN_SF_INV >= ARRAY_SIZE(ipu3_css_bds_configs))
+ bds_ds - IMGU_BDS_MIN_SF_INV >= ARRAY_SIZE(imgu_css_bds_configs))
return -EINVAL;
- cfg_bds = &ipu3_css_bds_configs[bds_ds - IMGU_BDS_MIN_SF_INV];
+ cfg_bds = &imgu_css_bds_configs[bds_ds - IMGU_BDS_MIN_SF_INV];
acc->bds.hor.hor_ctrl1.hor_crop_en = 0;
acc->bds.hor.hor_ctrl1.hor_crop_start = 0;
acc->bds.hor.hor_ctrl1.hor_crop_end = 0;
@@ -2339,7 +2340,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
sizeof(acc->anr.stitch.pyramid));
} else {
/* Calculate from scratch */
- acc->anr = ipu3_css_anr_defaults;
+ acc->anr = imgu_css_anr_defaults;
}
/* Always enabled */
@@ -2377,10 +2378,10 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
acc->awb_fr.config = acc_old->awb_fr.config;
} else {
/* Set from scratch */
- acc->awb_fr.config = ipu3_css_awb_fr_defaults;
+ acc->awb_fr.config = imgu_css_awb_fr_defaults;
}
- ipu3_css_grid_end_calc(&acc->awb_fr.config.grid_cfg);
+ imgu_css_grid_end_calc(&acc->awb_fr.config.grid_cfg);
if (acc->awb_fr.config.grid_cfg.width <= 0)
return -EINVAL;
@@ -2415,7 +2416,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
acc->awb_fr.stripes[0].grid_cfg.width;
b_w_log2 = acc->awb_fr.stripes[0].grid_cfg.block_width_log2;
- end = ipu3_css_grid_end(acc->awb_fr.stripes[0].grid_cfg.x_start,
+ end = imgu_css_grid_end(acc->awb_fr.stripes[0].grid_cfg.x_start,
acc->awb_fr.stripes[0].grid_cfg.width,
b_w_log2);
acc->awb_fr.stripes[0].grid_cfg.x_end = end;
@@ -2425,7 +2426,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
acc->stripe.down_scaled_stripes[1].offset) &
IPU3_UAPI_GRID_START_MASK;
b_w_log2 = acc->awb_fr.stripes[1].grid_cfg.block_width_log2;
- end = ipu3_css_grid_end(acc->awb_fr.stripes[1].grid_cfg.x_start,
+ end = imgu_css_grid_end(acc->awb_fr.stripes[1].grid_cfg.x_start,
acc->awb_fr.stripes[1].grid_cfg.width,
b_w_log2);
acc->awb_fr.stripes[1].grid_cfg.x_end = end;
@@ -2439,7 +2440,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
acc->awb_fr.stripes[i].grid_cfg.height_per_slice = 1;
}
- if (ipu3_css_awb_fr_ops_calc(css, pipe, &acc->awb_fr))
+ if (imgu_css_awb_fr_ops_calc(css, pipe, &acc->awb_fr))
return -EINVAL;
/* acc_param: ae_config */
@@ -2461,18 +2462,18 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
static const struct ipu3_uapi_ae_weight_elem
weight_def = { 1, 1, 1, 1, 1, 1, 1, 1 };
- acc->ae.grid_cfg = ipu3_css_ae_grid_defaults;
- acc->ae.ae_ccm = ipu3_css_ae_ccm_defaults;
+ acc->ae.grid_cfg = imgu_css_ae_grid_defaults;
+ acc->ae.ae_ccm = imgu_css_ae_ccm_defaults;
for (i = 0; i < IPU3_UAPI_AE_WEIGHTS; i++)
acc->ae.weights[i] = weight_def;
}
b_w_log2 = acc->ae.grid_cfg.block_width_log2;
- acc->ae.grid_cfg.x_end = ipu3_css_grid_end(acc->ae.grid_cfg.x_start,
+ acc->ae.grid_cfg.x_end = imgu_css_grid_end(acc->ae.grid_cfg.x_start,
acc->ae.grid_cfg.width,
b_w_log2);
b_w_log2 = acc->ae.grid_cfg.block_height_log2;
- acc->ae.grid_cfg.y_end = ipu3_css_grid_end(acc->ae.grid_cfg.y_start,
+ acc->ae.grid_cfg.y_end = imgu_css_grid_end(acc->ae.grid_cfg.y_start,
acc->ae.grid_cfg.height,
b_w_log2);
@@ -2501,7 +2502,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
b_w_log2 = acc->ae.stripes[0].grid.block_width_log2;
acc->ae.stripes[0].grid.x_end =
- ipu3_css_grid_end(acc->ae.stripes[0].grid.x_start,
+ imgu_css_grid_end(acc->ae.stripes[0].grid.x_start,
acc->ae.stripes[0].grid.width,
b_w_log2);
@@ -2511,7 +2512,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
IPU3_UAPI_GRID_START_MASK;
b_w_log2 = acc->ae.stripes[1].grid.block_width_log2;
acc->ae.stripes[1].grid.x_end =
- ipu3_css_grid_end(acc->ae.stripes[1].grid.x_start,
+ imgu_css_grid_end(acc->ae.stripes[1].grid.x_start,
acc->ae.stripes[1].grid.width,
b_w_log2);
}
@@ -2528,11 +2529,11 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
} else {
/* Set from scratch */
acc->af.config.filter_config =
- ipu3_css_af_defaults.filter_config;
- acc->af.config.grid_cfg = ipu3_css_af_defaults.grid_cfg;
+ imgu_css_af_defaults.filter_config;
+ acc->af.config.grid_cfg = imgu_css_af_defaults.grid_cfg;
}
- ipu3_css_grid_end_calc(&acc->af.config.grid_cfg);
+ imgu_css_grid_end_calc(&acc->af.config.grid_cfg);
if (acc->af.config.grid_cfg.width <= 0)
return -EINVAL;
@@ -2578,7 +2579,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
b_w_log2 = acc->af.stripes[0].grid_cfg.block_width_log2;
acc->af.stripes[0].grid_cfg.x_end =
- ipu3_css_grid_end(acc->af.stripes[0].grid_cfg.x_start,
+ imgu_css_grid_end(acc->af.stripes[0].grid_cfg.x_start,
acc->af.stripes[0].grid_cfg.width,
b_w_log2);
@@ -2589,7 +2590,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
b_w_log2 = acc->af.stripes[1].grid_cfg.block_width_log2;
acc->af.stripes[1].grid_cfg.x_end =
- ipu3_css_grid_end(acc->af.stripes[1].grid_cfg.x_start,
+ imgu_css_grid_end(acc->af.stripes[1].grid_cfg.x_start,
acc->af.stripes[1].grid_cfg.width,
b_w_log2);
@@ -2601,7 +2602,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
acc->af.stripes[i].grid_cfg.height_per_slice = 1;
}
- if (ipu3_css_af_ops_calc(css, pipe, &acc->af))
+ if (imgu_css_af_ops_calc(css, pipe, &acc->af))
return -EINVAL;
/* acc_param: awb_config */
@@ -2614,7 +2615,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
acc->awb.config = acc_old->awb.config;
} else {
/* Set from scratch */
- acc->awb.config = ipu3_css_awb_defaults;
+ acc->awb.config = imgu_css_awb_defaults;
}
if (acc->awb.config.grid.width <= 0)
@@ -2622,7 +2623,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
acc->awb.config.grid.height_per_slice =
IMGU_ABI_AWB_MAX_CELLS_PER_SET / acc->awb.config.grid.width,
- ipu3_css_grid_end_calc(&acc->awb.config.grid);
+ imgu_css_grid_end_calc(&acc->awb.config.grid);
for (i = 0; i < stripes; i++)
acc->awb.stripes[i] = acc->awb.config;
@@ -2647,7 +2648,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
b_w_log2 = acc->awb.stripes[0].grid.block_width_log2;
acc->awb.stripes[0].grid.x_end =
- ipu3_css_grid_end(acc->awb.stripes[0].grid.x_start,
+ imgu_css_grid_end(acc->awb.stripes[0].grid.x_start,
acc->awb.stripes[0].grid.width,
b_w_log2);
@@ -2658,7 +2659,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
b_w_log2 = acc->awb.stripes[1].grid.block_width_log2;
acc->awb.stripes[1].grid.x_end =
- ipu3_css_grid_end(acc->awb.stripes[1].grid.x_start,
+ imgu_css_grid_end(acc->awb.stripes[1].grid.x_start,
acc->awb.stripes[1].grid.width,
b_w_log2);
@@ -2670,7 +2671,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
acc->awb.stripes[i].grid.height_per_slice = 1;
}
- if (ipu3_css_awb_ops_calc(css, pipe, &acc->awb))
+ if (imgu_css_awb_ops_calc(css, pipe, &acc->awb))
return -EINVAL;
return 0;
@@ -2685,7 +2686,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
* to the structure inside `new_binary_params'. In that case the caller
* should calculate and fill the structure from scratch.
*/
-static void *ipu3_css_cfg_copy(struct ipu3_css *css,
+static void *imgu_css_cfg_copy(struct imgu_css *css,
unsigned int pipe, bool use_user,
void *user_setting, void *old_binary_params,
void *new_binary_params,
@@ -2696,7 +2697,7 @@ static void *ipu3_css_cfg_copy(struct ipu3_css *css,
const enum imgu_abi_param_class c = IMGU_ABI_PARAM_CLASS_PARAM;
void *new_setting, *old_setting;
- new_setting = ipu3_css_fw_pipeline_params(css, pipe, c, m, par,
+ new_setting = imgu_css_fw_pipeline_params(css, pipe, c, m, par,
par_size, new_binary_params);
if (!new_setting)
return ERR_PTR(-EPROTO); /* Corrupted firmware */
@@ -2706,7 +2707,7 @@ static void *ipu3_css_cfg_copy(struct ipu3_css *css,
memcpy(new_setting, user_setting, par_size);
} else if (old_binary_params) {
/* Take previous value */
- old_setting = ipu3_css_fw_pipeline_params(css, pipe, c, m, par,
+ old_setting = imgu_css_fw_pipeline_params(css, pipe, c, m, par,
par_size,
old_binary_params);
if (!old_setting)
@@ -2722,7 +2723,7 @@ static void *ipu3_css_cfg_copy(struct ipu3_css *css,
/*
* Configure VMEM0 parameters (late binding parameters).
*/
-int ipu3_css_cfg_vmem0(struct ipu3_css *css, unsigned int pipe,
+int imgu_css_cfg_vmem0(struct imgu_css *css, unsigned int pipe,
struct ipu3_uapi_flags *use,
void *vmem0, void *vmem0_old,
struct ipu3_uapi_params *user)
@@ -2744,7 +2745,7 @@ int ipu3_css_cfg_vmem0(struct ipu3_css *css, unsigned int pipe,
/* Configure Linearization VMEM0 parameters */
- lin_vmem = ipu3_css_cfg_copy(css, pipe, use && use->lin_vmem_params,
+ lin_vmem = imgu_css_cfg_copy(css, pipe, use && use->lin_vmem_params,
&user->lin_vmem_params, vmem0_old, vmem0,
m, &pofs->vmem.lin, sizeof(*lin_vmem));
if (!IS_ERR_OR_NULL(lin_vmem)) {
@@ -2764,7 +2765,7 @@ int ipu3_css_cfg_vmem0(struct ipu3_css *css, unsigned int pipe,
/* Configure TNR3 VMEM parameters */
if (css->pipes[pipe].pipe_id == IPU3_CSS_PIPE_ID_VIDEO) {
- tnr_vmem = ipu3_css_cfg_copy(css, pipe,
+ tnr_vmem = imgu_css_cfg_copy(css, pipe,
use && use->tnr3_vmem_params,
&user->tnr3_vmem_params,
vmem0_old, vmem0, m,
@@ -2780,17 +2781,17 @@ int ipu3_css_cfg_vmem0(struct ipu3_css *css, unsigned int pipe,
/* Configure XNR3 VMEM parameters */
- xnr_vmem = ipu3_css_cfg_copy(css, pipe, use && use->xnr3_vmem_params,
+ xnr_vmem = imgu_css_cfg_copy(css, pipe, use && use->xnr3_vmem_params,
&user->xnr3_vmem_params, vmem0_old, vmem0,
m, &pofs->vmem.xnr3, sizeof(*xnr_vmem));
if (!IS_ERR_OR_NULL(xnr_vmem)) {
- xnr_vmem->x[i] = ipu3_css_xnr3_vmem_defaults.x
+ xnr_vmem->x[i] = imgu_css_xnr3_vmem_defaults.x
[i % IMGU_XNR3_VMEM_LUT_LEN];
- xnr_vmem->a[i] = ipu3_css_xnr3_vmem_defaults.a
+ xnr_vmem->a[i] = imgu_css_xnr3_vmem_defaults.a
[i % IMGU_XNR3_VMEM_LUT_LEN];
- xnr_vmem->b[i] = ipu3_css_xnr3_vmem_defaults.b
+ xnr_vmem->b[i] = imgu_css_xnr3_vmem_defaults.b
[i % IMGU_XNR3_VMEM_LUT_LEN];
- xnr_vmem->c[i] = ipu3_css_xnr3_vmem_defaults.c
+ xnr_vmem->c[i] = imgu_css_xnr3_vmem_defaults.c
[i % IMGU_XNR3_VMEM_LUT_LEN];
}
@@ -2801,12 +2802,12 @@ int ipu3_css_cfg_vmem0(struct ipu3_css *css, unsigned int pipe,
/*
* Configure DMEM0 parameters (late binding parameters).
*/
-int ipu3_css_cfg_dmem0(struct ipu3_css *css, unsigned int pipe,
+int imgu_css_cfg_dmem0(struct imgu_css *css, unsigned int pipe,
struct ipu3_uapi_flags *use,
void *dmem0, void *dmem0_old,
struct ipu3_uapi_params *user)
{
- struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
const struct imgu_fw_info *bi =
&css->fwp->binary_header[css_pipe->bindex];
struct imgu_fw_param_memory_offsets *pofs = (void *)css->fwp +
@@ -2824,7 +2825,7 @@ int ipu3_css_cfg_dmem0(struct ipu3_css *css, unsigned int pipe,
/* Configure TNR3 DMEM0 parameters */
if (css_pipe->pipe_id == IPU3_CSS_PIPE_ID_VIDEO) {
- tnr_dmem = ipu3_css_cfg_copy(css, pipe,
+ tnr_dmem = imgu_css_cfg_copy(css, pipe,
use && use->tnr3_dmem_params,
&user->tnr3_dmem_params,
dmem0_old, dmem0, m,
@@ -2839,7 +2840,7 @@ int ipu3_css_cfg_dmem0(struct ipu3_css *css, unsigned int pipe,
/* Configure XNR3 DMEM0 parameters */
- xnr_dmem = ipu3_css_cfg_copy(css, pipe, use && use->xnr3_dmem_params,
+ xnr_dmem = imgu_css_cfg_copy(css, pipe, use && use->xnr3_dmem_params,
&user->xnr3_dmem_params, dmem0_old, dmem0,
m, &pofs->dmem.xnr3, sizeof(*xnr_dmem));
if (!IS_ERR_OR_NULL(xnr_dmem)) {
@@ -2853,7 +2854,7 @@ int ipu3_css_cfg_dmem0(struct ipu3_css *css, unsigned int pipe,
}
/* Generate unity morphing table without morphing effect */
-void ipu3_css_cfg_gdc_table(struct imgu_abi_gdc_warp_param *gdc,
+void imgu_css_cfg_gdc_table(struct imgu_abi_gdc_warp_param *gdc,
int frame_in_x, int frame_in_y,
int frame_out_x, int frame_out_y,
int env_w, int env_h)
diff --git a/drivers/staging/media/ipu3/ipu3-css-params.h b/drivers/staging/media/ipu3/ipu3-css-params.h
index f3a0a47117a4..ffaec6b7d5cc 100644
--- a/drivers/staging/media/ipu3/ipu3-css-params.h
+++ b/drivers/staging/media/ipu3/ipu3-css-params.h
@@ -4,23 +4,23 @@
#ifndef __IPU3_PARAMS_H
#define __IPU3_PARAMS_H
-int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
+int imgu_css_cfg_acc(struct imgu_css *css, unsigned int pipe,
struct ipu3_uapi_flags *use,
struct imgu_abi_acc_param *acc,
struct imgu_abi_acc_param *acc_old,
struct ipu3_uapi_acc_param *acc_user);
-int ipu3_css_cfg_vmem0(struct ipu3_css *css, unsigned int pipe,
+int imgu_css_cfg_vmem0(struct imgu_css *css, unsigned int pipe,
struct ipu3_uapi_flags *use,
void *vmem0, void *vmem0_old,
struct ipu3_uapi_params *user);
-int ipu3_css_cfg_dmem0(struct ipu3_css *css, unsigned int pipe,
+int imgu_css_cfg_dmem0(struct imgu_css *css, unsigned int pipe,
struct ipu3_uapi_flags *use,
void *dmem0, void *dmem0_old,
struct ipu3_uapi_params *user);
-void ipu3_css_cfg_gdc_table(struct imgu_abi_gdc_warp_param *gdc,
+void imgu_css_cfg_gdc_table(struct imgu_abi_gdc_warp_param *gdc,
int frame_in_x, int frame_in_y,
int frame_out_x, int frame_out_y,
int env_w, int env_h);
diff --git a/drivers/staging/media/ipu3/ipu3-css-pool.c b/drivers/staging/media/ipu3/ipu3-css-pool.c
index 6f271f81669b..fa5b7d3acef2 100644
--- a/drivers/staging/media/ipu3/ipu3-css-pool.c
+++ b/drivers/staging/media/ipu3/ipu3-css-pool.c
@@ -7,30 +7,30 @@
#include "ipu3-css-pool.h"
#include "ipu3-dmamap.h"
-int ipu3_css_dma_buffer_resize(struct imgu_device *imgu,
- struct ipu3_css_map *map, size_t size)
+int imgu_css_dma_buffer_resize(struct imgu_device *imgu,
+ struct imgu_css_map *map, size_t size)
{
if (map->size < size && map->vaddr) {
dev_warn(&imgu->pci_dev->dev, "dma buf resized from %zu to %zu",
map->size, size);
- ipu3_dmamap_free(imgu, map);
- if (!ipu3_dmamap_alloc(imgu, map, size))
+ imgu_dmamap_free(imgu, map);
+ if (!imgu_dmamap_alloc(imgu, map, size))
return -ENOMEM;
}
return 0;
}
-void ipu3_css_pool_cleanup(struct imgu_device *imgu, struct ipu3_css_pool *pool)
+void imgu_css_pool_cleanup(struct imgu_device *imgu, struct imgu_css_pool *pool)
{
unsigned int i;
for (i = 0; i < IPU3_CSS_POOL_SIZE; i++)
- ipu3_dmamap_free(imgu, &pool->entry[i].param);
+ imgu_dmamap_free(imgu, &pool->entry[i].param);
}
-int ipu3_css_pool_init(struct imgu_device *imgu, struct ipu3_css_pool *pool,
+int imgu_css_pool_init(struct imgu_device *imgu, struct imgu_css_pool *pool,
size_t size)
{
unsigned int i;
@@ -42,7 +42,7 @@ int ipu3_css_pool_init(struct imgu_device *imgu, struct ipu3_css_pool *pool,
continue;
}
- if (!ipu3_dmamap_alloc(imgu, &pool->entry[i].param, size))
+ if (!imgu_dmamap_alloc(imgu, &pool->entry[i].param, size))
goto fail;
}
@@ -51,14 +51,14 @@ int ipu3_css_pool_init(struct imgu_device *imgu, struct ipu3_css_pool *pool,
return 0;
fail:
- ipu3_css_pool_cleanup(imgu, pool);
+ imgu_css_pool_cleanup(imgu, pool);
return -ENOMEM;
}
/*
* Allocate a new parameter via recycling the oldest entry in the pool.
*/
-void ipu3_css_pool_get(struct ipu3_css_pool *pool)
+void imgu_css_pool_get(struct imgu_css_pool *pool)
{
/* Get the oldest entry */
u32 n = (pool->last + 1) % IPU3_CSS_POOL_SIZE;
@@ -70,25 +70,25 @@ void ipu3_css_pool_get(struct ipu3_css_pool *pool)
/*
* Undo, for all practical purposes, the effect of pool_get().
*/
-void ipu3_css_pool_put(struct ipu3_css_pool *pool)
+void imgu_css_pool_put(struct imgu_css_pool *pool)
{
pool->entry[pool->last].valid = false;
pool->last = (pool->last + IPU3_CSS_POOL_SIZE - 1) % IPU3_CSS_POOL_SIZE;
}
/**
- * ipu3_css_pool_last - Retrieve the nth pool entry from last
+ * imgu_css_pool_last - Retrieve the nth pool entry from last
*
- * @pool: a pointer to &struct ipu3_css_pool.
+ * @pool: a pointer to &struct imgu_css_pool.
* @n: the distance to the last index.
*
* Returns:
* The nth entry from last or null map to indicate no frame stored.
*/
-const struct ipu3_css_map *
-ipu3_css_pool_last(struct ipu3_css_pool *pool, unsigned int n)
+const struct imgu_css_map *
+imgu_css_pool_last(struct imgu_css_pool *pool, unsigned int n)
{
- static const struct ipu3_css_map null_map = { 0 };
+ static const struct imgu_css_map null_map = { 0 };
int i = (pool->last + IPU3_CSS_POOL_SIZE - n) % IPU3_CSS_POOL_SIZE;
WARN_ON(n >= IPU3_CSS_POOL_SIZE);
diff --git a/drivers/staging/media/ipu3/ipu3-css-pool.h b/drivers/staging/media/ipu3/ipu3-css-pool.h
index 2657c39a4d71..f4a60b41401b 100644
--- a/drivers/staging/media/ipu3/ipu3-css-pool.h
+++ b/drivers/staging/media/ipu3/ipu3-css-pool.h
@@ -10,15 +10,15 @@ struct imgu_device;
#define IPU3_CSS_POOL_SIZE 4
/**
- * ipu3_css_map - store DMA mapping info for buffer
+ * imgu_css_map - store DMA mapping info for buffer
*
* @size: size of the buffer in bytes.
* @vaddr: kernel virtual address.
* @daddr: iova dma address to access IPU3.
* @vma: private, a pointer to &struct vm_struct,
- * used for ipu3_dmamap_free.
+ * used for imgu_dmamap_free.
*/
-struct ipu3_css_map {
+struct imgu_css_map {
size_t size;
void *vaddr;
dma_addr_t daddr;
@@ -26,30 +26,30 @@ struct ipu3_css_map {
};
/**
- * ipu3_css_pool - circular buffer pool definition
+ * imgu_css_pool - circular buffer pool definition
*
* @entry: array with IPU3_CSS_POOL_SIZE elements.
- * @entry.param: a &struct ipu3_css_map for storing the mem mapping.
+ * @entry.param: a &struct imgu_css_map for storing the mem mapping.
* @entry.valid: used to mark if the entry has valid data.
* @last: write pointer, initialized to IPU3_CSS_POOL_SIZE.
*/
-struct ipu3_css_pool {
+struct imgu_css_pool {
struct {
- struct ipu3_css_map param;
+ struct imgu_css_map param;
bool valid;
} entry[IPU3_CSS_POOL_SIZE];
u32 last;
};
-int ipu3_css_dma_buffer_resize(struct imgu_device *imgu,
- struct ipu3_css_map *map, size_t size);
-void ipu3_css_pool_cleanup(struct imgu_device *imgu,
- struct ipu3_css_pool *pool);
-int ipu3_css_pool_init(struct imgu_device *imgu, struct ipu3_css_pool *pool,
+int imgu_css_dma_buffer_resize(struct imgu_device *imgu,
+ struct imgu_css_map *map, size_t size);
+void imgu_css_pool_cleanup(struct imgu_device *imgu,
+ struct imgu_css_pool *pool);
+int imgu_css_pool_init(struct imgu_device *imgu, struct imgu_css_pool *pool,
size_t size);
-void ipu3_css_pool_get(struct ipu3_css_pool *pool);
-void ipu3_css_pool_put(struct ipu3_css_pool *pool);
-const struct ipu3_css_map *ipu3_css_pool_last(struct ipu3_css_pool *pool,
+void imgu_css_pool_get(struct imgu_css_pool *pool);
+void imgu_css_pool_put(struct imgu_css_pool *pool);
+const struct imgu_css_map *imgu_css_pool_last(struct imgu_css_pool *pool,
u32 last);
#endif
diff --git a/drivers/staging/media/ipu3/ipu3-css.c b/drivers/staging/media/ipu3/ipu3-css.c
index 44c55639389a..15ab77e4b766 100644
--- a/drivers/staging/media/ipu3/ipu3-css.c
+++ b/drivers/staging/media/ipu3/ipu3-css.c
@@ -46,7 +46,7 @@
IPU3_CSS_QUEUE_TO_FLAGS(IPU3_CSS_QUEUE_VF)
/* Formats supported by IPU3 Camera Sub System */
-static const struct ipu3_css_format ipu3_css_formats[] = {
+static const struct imgu_css_format imgu_css_formats[] = {
{
.pixelformat = V4L2_PIX_FMT_NV12,
.colorspace = V4L2_COLORSPACE_SRGB,
@@ -100,7 +100,7 @@ static const struct ipu3_css_format ipu3_css_formats[] = {
static const struct {
enum imgu_abi_queue_id qid;
size_t ptr_ofs;
-} ipu3_css_queues[IPU3_CSS_QUEUES] = {
+} imgu_css_queues[IPU3_CSS_QUEUES] = {
[IPU3_CSS_QUEUE_IN] = {
IMGU_ABI_QUEUE_C_ID,
offsetof(struct imgu_abi_buffer, payload.frame.frame_data)
@@ -120,7 +120,7 @@ static const struct {
};
/* Initialize queue based on given format, adjust format as needed */
-static int ipu3_css_queue_init(struct ipu3_css_queue *queue,
+static int imgu_css_queue_init(struct imgu_css_queue *queue,
struct v4l2_pix_format_mplane *fmt, u32 flags)
{
struct v4l2_pix_format_mplane *const f = &queue->fmt.mpix;
@@ -133,11 +133,11 @@ static int ipu3_css_queue_init(struct ipu3_css_queue *queue,
if (!fmt)
return 0;
- for (i = 0; i < ARRAY_SIZE(ipu3_css_formats); i++) {
- if (!(ipu3_css_formats[i].flags & flags))
+ for (i = 0; i < ARRAY_SIZE(imgu_css_formats); i++) {
+ if (!(imgu_css_formats[i].flags & flags))
continue;
- queue->css_fmt = &ipu3_css_formats[i];
- if (ipu3_css_formats[i].pixelformat == fmt->pixelformat)
+ queue->css_fmt = &imgu_css_formats[i];
+ if (imgu_css_formats[i].pixelformat == fmt->pixelformat)
break;
}
if (!queue->css_fmt)
@@ -178,7 +178,7 @@ static int ipu3_css_queue_init(struct ipu3_css_queue *queue,
return 0;
}
-static bool ipu3_css_queue_enabled(struct ipu3_css_queue *q)
+static bool imgu_css_queue_enabled(struct imgu_css_queue *q)
{
return q->css_fmt;
}
@@ -200,7 +200,7 @@ static inline void writes(const void *mem, ssize_t count, void __iomem *addr)
}
/* Wait until register `reg', masked with `mask', becomes `cmp' */
-static int ipu3_hw_wait(void __iomem *base, int reg, u32 mask, u32 cmp)
+static int imgu_hw_wait(void __iomem *base, int reg, u32 mask, u32 cmp)
{
u32 val;
@@ -210,7 +210,7 @@ static int ipu3_hw_wait(void __iomem *base, int reg, u32 mask, u32 cmp)
/* Initialize the IPU3 CSS hardware and associated h/w blocks */
-int ipu3_css_set_powerup(struct device *dev, void __iomem *base)
+int imgu_css_set_powerup(struct device *dev, void __iomem *base)
{
static const unsigned int freq = 450;
u32 pm_ctrl, state, val;
@@ -221,7 +221,7 @@ int ipu3_css_set_powerup(struct device *dev, void __iomem *base)
writel(0, base + IMGU_REG_GP_BUSY);
/* Wait for idle signal */
- if (ipu3_hw_wait(base, IMGU_REG_STATE, IMGU_STATE_IDLE_STS,
+ if (imgu_hw_wait(base, IMGU_REG_STATE, IMGU_STATE_IDLE_STS,
IMGU_STATE_IDLE_STS)) {
dev_err(dev, "failed to set CSS idle\n");
goto fail;
@@ -245,7 +245,7 @@ int ipu3_css_set_powerup(struct device *dev, void __iomem *base)
if (state & IMGU_STATE_POWER_DOWN) {
writel(IMGU_PM_CTRL_RACE_TO_HALT | IMGU_PM_CTRL_START,
base + IMGU_REG_PM_CTRL);
- if (ipu3_hw_wait(base, IMGU_REG_PM_CTRL,
+ if (imgu_hw_wait(base, IMGU_REG_PM_CTRL,
IMGU_PM_CTRL_START, 0)) {
dev_err(dev, "failed to power up CSS\n");
goto fail;
@@ -263,7 +263,7 @@ int ipu3_css_set_powerup(struct device *dev, void __iomem *base)
val = pm_ctrl & ~(IMGU_PM_CTRL_CSS_PWRDN | IMGU_PM_CTRL_RST_AT_EOF);
writel(val, base + IMGU_REG_PM_CTRL);
writel(0, base + IMGU_REG_GP_BUSY);
- if (ipu3_hw_wait(base, IMGU_REG_STATE,
+ if (imgu_hw_wait(base, IMGU_REG_STATE,
IMGU_STATE_PWRDNM_FSM_MASK, 0)) {
dev_err(dev, "failed to pwrdn CSS\n");
goto fail;
@@ -273,7 +273,7 @@ int ipu3_css_set_powerup(struct device *dev, void __iomem *base)
writel(1, base + IMGU_REG_GP_BUSY);
writel(readl(base + IMGU_REG_PM_CTRL) | IMGU_PM_CTRL_FORCE_HALT,
base + IMGU_REG_PM_CTRL);
- if (ipu3_hw_wait(base, IMGU_REG_STATE, IMGU_STATE_HALT_STS,
+ if (imgu_hw_wait(base, IMGU_REG_STATE, IMGU_STATE_HALT_STS,
IMGU_STATE_HALT_STS)) {
dev_err(dev, "failed to halt CSS\n");
goto fail;
@@ -281,7 +281,7 @@ int ipu3_css_set_powerup(struct device *dev, void __iomem *base)
writel(readl(base + IMGU_REG_PM_CTRL) | IMGU_PM_CTRL_START,
base + IMGU_REG_PM_CTRL);
- if (ipu3_hw_wait(base, IMGU_REG_PM_CTRL, IMGU_PM_CTRL_START, 0)) {
+ if (imgu_hw_wait(base, IMGU_REG_PM_CTRL, IMGU_PM_CTRL_START, 0)) {
dev_err(dev, "failed to start CSS\n");
goto fail;
}
@@ -296,26 +296,26 @@ int ipu3_css_set_powerup(struct device *dev, void __iomem *base)
return 0;
fail:
- ipu3_css_set_powerdown(dev, base);
+ imgu_css_set_powerdown(dev, base);
return -EIO;
}
-void ipu3_css_set_powerdown(struct device *dev, void __iomem *base)
+void imgu_css_set_powerdown(struct device *dev, void __iomem *base)
{
dev_dbg(dev, "%s\n", __func__);
/* wait for cio idle signal */
- if (ipu3_hw_wait(base, IMGU_REG_CIO_GATE_BURST_STATE,
+ if (imgu_hw_wait(base, IMGU_REG_CIO_GATE_BURST_STATE,
IMGU_CIO_GATE_BURST_MASK, 0))
dev_warn(dev, "wait cio gate idle timeout");
/* wait for css idle signal */
- if (ipu3_hw_wait(base, IMGU_REG_STATE, IMGU_STATE_IDLE_STS,
+ if (imgu_hw_wait(base, IMGU_REG_STATE, IMGU_STATE_IDLE_STS,
IMGU_STATE_IDLE_STS))
dev_warn(dev, "wait css idle timeout\n");
/* do halt-halted handshake with css */
writel(1, base + IMGU_REG_GP_HALT);
- if (ipu3_hw_wait(base, IMGU_REG_STATE, IMGU_STATE_HALT_STS,
+ if (imgu_hw_wait(base, IMGU_REG_STATE, IMGU_STATE_HALT_STS,
IMGU_STATE_HALT_STS))
dev_warn(dev, "failed to halt css");
@@ -323,7 +323,7 @@ void ipu3_css_set_powerdown(struct device *dev, void __iomem *base)
writel(0, base + IMGU_REG_GP_BUSY);
}
-static void ipu3_css_hw_enable_irq(struct ipu3_css *css)
+static void imgu_css_hw_enable_irq(struct imgu_css *css)
{
void __iomem *const base = css->base;
u32 val, i;
@@ -371,7 +371,7 @@ static void ipu3_css_hw_enable_irq(struct ipu3_css *css)
}
}
-static int ipu3_css_hw_init(struct ipu3_css *css)
+static int imgu_css_hw_init(struct imgu_css *css)
{
/* For checking that streaming monitor statuses are valid */
static const struct {
@@ -463,11 +463,11 @@ static int ipu3_css_hw_init(struct ipu3_css *css)
/* Initialize GDC with default values */
- for (i = 0; i < ARRAY_SIZE(ipu3_css_gdc_lut[0]); i++) {
- u32 val0 = ipu3_css_gdc_lut[0][i] & IMGU_GDC_LUT_MASK;
- u32 val1 = ipu3_css_gdc_lut[1][i] & IMGU_GDC_LUT_MASK;
- u32 val2 = ipu3_css_gdc_lut[2][i] & IMGU_GDC_LUT_MASK;
- u32 val3 = ipu3_css_gdc_lut[3][i] & IMGU_GDC_LUT_MASK;
+ for (i = 0; i < ARRAY_SIZE(imgu_css_gdc_lut[0]); i++) {
+ u32 val0 = imgu_css_gdc_lut[0][i] & IMGU_GDC_LUT_MASK;
+ u32 val1 = imgu_css_gdc_lut[1][i] & IMGU_GDC_LUT_MASK;
+ u32 val2 = imgu_css_gdc_lut[2][i] & IMGU_GDC_LUT_MASK;
+ u32 val3 = imgu_css_gdc_lut[3][i] & IMGU_GDC_LUT_MASK;
writel(val0 | (val1 << 16),
base + IMGU_REG_GDC_LUT_BASE + i * 8);
@@ -479,7 +479,7 @@ static int ipu3_css_hw_init(struct ipu3_css *css)
}
/* Boot the given IPU3 CSS SP */
-static int ipu3_css_hw_start_sp(struct ipu3_css *css, int sp)
+static int imgu_css_hw_start_sp(struct imgu_css *css, int sp)
{
void __iomem *const base = css->base;
struct imgu_fw_info *bi = &css->fwp->binary_header[css->fw_sp[sp]];
@@ -501,7 +501,7 @@ static int ipu3_css_hw_start_sp(struct ipu3_css *css, int sp)
writel(readl(base + IMGU_REG_SP_CTRL(sp))
| IMGU_CTRL_START | IMGU_CTRL_RUN, base + IMGU_REG_SP_CTRL(sp));
- if (ipu3_hw_wait(css->base, IMGU_REG_SP_DMEM_BASE(sp)
+ if (imgu_hw_wait(css->base, IMGU_REG_SP_DMEM_BASE(sp)
+ bi->info.sp.sw_state,
~0, IMGU_ABI_SP_SWSTATE_INITIALIZED))
return -EIO;
@@ -510,7 +510,7 @@ static int ipu3_css_hw_start_sp(struct ipu3_css *css, int sp)
}
/* Start the IPU3 CSS ImgU (Imaging Unit) and all the SPs */
-static int ipu3_css_hw_start(struct ipu3_css *css)
+static int imgu_css_hw_start(struct imgu_css *css)
{
static const u32 event_mask =
((1 << IMGU_ABI_EVTTYPE_OUT_FRAME_DONE) |
@@ -560,7 +560,7 @@ static int ipu3_css_hw_start(struct ipu3_css *css)
writel(readl(base + IMGU_REG_ISP_CTRL)
| IMGU_CTRL_START | IMGU_CTRL_RUN, base + IMGU_REG_ISP_CTRL);
- if (ipu3_hw_wait(css->base, IMGU_REG_ISP_DMEM_BASE
+ if (imgu_hw_wait(css->base, IMGU_REG_ISP_DMEM_BASE
+ bl->info.bl.sw_state, ~0,
IMGU_ABI_BL_SWSTATE_OK)) {
dev_err(css->dev, "failed to start bootloader\n");
@@ -581,7 +581,7 @@ static int ipu3_css_hw_start(struct ipu3_css *css)
base + IMGU_REG_SP_DMEM_BASE(0) + bi->info.sp.sw_state);
writel(1, base + IMGU_REG_SP_DMEM_BASE(0) + bi->info.sp.invalidate_tlb);
- if (ipu3_css_hw_start_sp(css, 0))
+ if (imgu_css_hw_start_sp(css, 0))
return -EIO;
writel(0, base + IMGU_REG_SP_DMEM_BASE(0) + bi->info.sp.isp_started);
@@ -608,7 +608,7 @@ static int ipu3_css_hw_start(struct ipu3_css *css)
writel(IMGU_ABI_SP_SWSTATE_TERMINATED,
base + IMGU_REG_SP_DMEM_BASE(1) + bi->info.sp.sw_state);
- if (ipu3_css_hw_start_sp(css, 1))
+ if (imgu_css_hw_start_sp(css, 1))
return -EIO;
writel(IMGU_ABI_SP_COMM_COMMAND_READY, base + IMGU_REG_SP_DMEM_BASE(1)
@@ -617,7 +617,7 @@ static int ipu3_css_hw_start(struct ipu3_css *css)
return 0;
}
-static void ipu3_css_hw_stop(struct ipu3_css *css)
+static void imgu_css_hw_stop(struct imgu_css *css)
{
void __iomem *const base = css->base;
struct imgu_fw_info *bi = &css->fwp->binary_header[css->fw_sp[0]];
@@ -626,18 +626,18 @@ static void ipu3_css_hw_stop(struct ipu3_css *css)
writel(IMGU_ABI_SP_COMM_COMMAND_TERMINATE,
base + IMGU_REG_SP_DMEM_BASE(0) +
bi->info.sp.host_sp_com + IMGU_ABI_SP_COMM_COMMAND);
- if (ipu3_hw_wait(css->base, IMGU_REG_SP_CTRL(0),
+ if (imgu_hw_wait(css->base, IMGU_REG_SP_CTRL(0),
IMGU_CTRL_IDLE, IMGU_CTRL_IDLE))
dev_err(css->dev, "wait sp0 idle timeout.\n");
if (readl(base + IMGU_REG_SP_DMEM_BASE(0) + bi->info.sp.sw_state) !=
IMGU_ABI_SP_SWSTATE_TERMINATED)
dev_err(css->dev, "sp0 is not terminated.\n");
- if (ipu3_hw_wait(css->base, IMGU_REG_ISP_CTRL,
+ if (imgu_hw_wait(css->base, IMGU_REG_ISP_CTRL,
IMGU_CTRL_IDLE, IMGU_CTRL_IDLE))
dev_err(css->dev, "wait isp idle timeout\n");
}
-static void ipu3_css_hw_cleanup(struct ipu3_css *css)
+static void imgu_css_hw_cleanup(struct imgu_css *css)
{
void __iomem *const base = css->base;
@@ -648,7 +648,7 @@ static void ipu3_css_hw_cleanup(struct ipu3_css *css)
writel(0, base + IMGU_REG_GP_BUSY);
/* Wait for idle signal */
- if (ipu3_hw_wait(css->base, IMGU_REG_STATE, IMGU_STATE_IDLE_STS,
+ if (imgu_hw_wait(css->base, IMGU_REG_STATE, IMGU_STATE_IDLE_STS,
IMGU_STATE_IDLE_STS))
dev_err(css->dev, "failed to shut down hw cleanly\n");
@@ -659,19 +659,19 @@ static void ipu3_css_hw_cleanup(struct ipu3_css *css)
usleep_range(200, 300);
}
-static void ipu3_css_pipeline_cleanup(struct ipu3_css *css, unsigned int pipe)
+static void imgu_css_pipeline_cleanup(struct imgu_css *css, unsigned int pipe)
{
struct imgu_device *imgu = dev_get_drvdata(css->dev);
unsigned int i;
- ipu3_css_pool_cleanup(imgu,
+ imgu_css_pool_cleanup(imgu,
&css->pipes[pipe].pool.parameter_set_info);
- ipu3_css_pool_cleanup(imgu, &css->pipes[pipe].pool.acc);
- ipu3_css_pool_cleanup(imgu, &css->pipes[pipe].pool.gdc);
- ipu3_css_pool_cleanup(imgu, &css->pipes[pipe].pool.obgrid);
+ imgu_css_pool_cleanup(imgu, &css->pipes[pipe].pool.acc);
+ imgu_css_pool_cleanup(imgu, &css->pipes[pipe].pool.gdc);
+ imgu_css_pool_cleanup(imgu, &css->pipes[pipe].pool.obgrid);
for (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++)
- ipu3_css_pool_cleanup(imgu,
+ imgu_css_pool_cleanup(imgu,
&css->pipes[pipe].pool.binary_params_p[i]);
}
@@ -679,7 +679,7 @@ static void ipu3_css_pipeline_cleanup(struct ipu3_css *css, unsigned int pipe)
* This function initializes various stages of the
* IPU3 CSS ISP pipeline
*/
-static int ipu3_css_pipeline_init(struct ipu3_css *css, unsigned int pipe)
+static int imgu_css_pipeline_init(struct imgu_css *css, unsigned int pipe)
{
static const int BYPC = 2; /* Bytes per component */
static const struct imgu_abi_buffer_sp buffer_sp_init = {
@@ -697,7 +697,7 @@ static int ipu3_css_pipeline_init(struct ipu3_css *css, unsigned int pipe)
const int stage = 0;
unsigned int i, j;
- struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
const struct imgu_fw_info *bi =
&css->fwp->binary_header[css_pipe->bindex];
const unsigned int stripes = bi->info.isp.sp.iterator.num_stripes;
@@ -725,7 +725,7 @@ static int ipu3_css_pipeline_init(struct ipu3_css *css, unsigned int pipe)
/* Configure iterator */
- cfg_iter = ipu3_css_fw_pipeline_params(css, pipe, cfg, m0,
+ cfg_iter = imgu_css_fw_pipeline_params(css, pipe, cfg, m0,
&cofs->dmem.iterator,
sizeof(*cfg_iter), vaddr);
if (!cfg_iter)
@@ -791,7 +791,7 @@ static int ipu3_css_pipeline_init(struct ipu3_css *css, unsigned int pipe)
/* Configure reference (delay) frames */
- cfg_ref = ipu3_css_fw_pipeline_params(css, pipe, cfg, m0,
+ cfg_ref = imgu_css_fw_pipeline_params(css, pipe, cfg, m0,
&cofs->dmem.ref,
sizeof(*cfg_ref), vaddr);
if (!cfg_ref)
@@ -821,7 +821,7 @@ static int ipu3_css_pipeline_init(struct ipu3_css *css, unsigned int pipe)
/* Configure DVS (digital video stabilization) */
- cfg_dvs = ipu3_css_fw_pipeline_params(css, pipe, cfg, m0,
+ cfg_dvs = imgu_css_fw_pipeline_params(css, pipe, cfg, m0,
&cofs->dmem.dvs, sizeof(*cfg_dvs),
vaddr);
if (!cfg_dvs)
@@ -837,7 +837,7 @@ static int ipu3_css_pipeline_init(struct ipu3_css *css, unsigned int pipe)
/* Configure TNR (temporal noise reduction) */
if (css_pipe->pipe_id == IPU3_CSS_PIPE_ID_VIDEO) {
- cfg_tnr = ipu3_css_fw_pipeline_params(css, pipe, cfg, m0,
+ cfg_tnr = imgu_css_fw_pipeline_params(css, pipe, cfg, m0,
&cofs->dmem.tnr3,
sizeof(*cfg_tnr),
vaddr);
@@ -868,7 +868,7 @@ static int ipu3_css_pipeline_init(struct ipu3_css *css, unsigned int pipe)
cfg = IMGU_ABI_PARAM_CLASS_STATE;
vaddr = css_pipe->binary_params_cs[cfg - 1][m0].vaddr;
- cfg_ref_state = ipu3_css_fw_pipeline_params(css, pipe, cfg, m0,
+ cfg_ref_state = imgu_css_fw_pipeline_params(css, pipe, cfg, m0,
&sofs->dmem.ref,
sizeof(*cfg_ref_state),
vaddr);
@@ -881,7 +881,7 @@ static int ipu3_css_pipeline_init(struct ipu3_css *css, unsigned int pipe)
/* Configure tnr dmem state parameters */
if (css_pipe->pipe_id == IPU3_CSS_PIPE_ID_VIDEO) {
cfg_tnr_state =
- ipu3_css_fw_pipeline_params(css, pipe, cfg, m0,
+ imgu_css_fw_pipeline_params(css, pipe, cfg, m0,
&sofs->dmem.tnr3,
sizeof(*cfg_tnr_state),
vaddr);
@@ -1068,21 +1068,21 @@ static int ipu3_css_pipeline_init(struct ipu3_css *css, unsigned int pipe)
/* Initialize parameter pools */
- if (ipu3_css_pool_init(imgu, &css_pipe->pool.parameter_set_info,
+ if (imgu_css_pool_init(imgu, &css_pipe->pool.parameter_set_info,
sizeof(struct imgu_abi_parameter_set_info)) ||
- ipu3_css_pool_init(imgu, &css_pipe->pool.acc,
+ imgu_css_pool_init(imgu, &css_pipe->pool.acc,
sizeof(struct imgu_abi_acc_param)) ||
- ipu3_css_pool_init(imgu, &css_pipe->pool.gdc,
+ imgu_css_pool_init(imgu, &css_pipe->pool.gdc,
sizeof(struct imgu_abi_gdc_warp_param) *
3 * cfg_dvs->num_horizontal_blocks / 2 *
cfg_dvs->num_vertical_blocks) ||
- ipu3_css_pool_init(imgu, &css_pipe->pool.obgrid,
- ipu3_css_fw_obgrid_size(
+ imgu_css_pool_init(imgu, &css_pipe->pool.obgrid,
+ imgu_css_fw_obgrid_size(
&css->fwp->binary_header[css_pipe->bindex])))
goto out_of_memory;
for (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++)
- if (ipu3_css_pool_init(imgu,
+ if (imgu_css_pool_init(imgu,
&css_pipe->pool.binary_params_p[i],
bi->info.isp.sp.mem_initializers.params
[IMGU_ABI_PARAM_CLASS_PARAM][i].size))
@@ -1091,15 +1091,15 @@ static int ipu3_css_pipeline_init(struct ipu3_css *css, unsigned int pipe)
return 0;
bad_firmware:
- ipu3_css_pipeline_cleanup(css, pipe);
+ imgu_css_pipeline_cleanup(css, pipe);
return -EPROTO;
out_of_memory:
- ipu3_css_pipeline_cleanup(css, pipe);
+ imgu_css_pipeline_cleanup(css, pipe);
return -ENOMEM;
}
-static u8 ipu3_css_queue_pos(struct ipu3_css *css, int queue, int thread)
+static u8 imgu_css_queue_pos(struct imgu_css *css, int queue, int thread)
{
static const unsigned int sp;
void __iomem *const base = css->base;
@@ -1112,7 +1112,7 @@ static u8 ipu3_css_queue_pos(struct ipu3_css *css, int queue, int thread)
}
/* Sent data to sp using given buffer queue, or if queue < 0, event queue. */
-static int ipu3_css_queue_data(struct ipu3_css *css,
+static int imgu_css_queue_data(struct imgu_css *css,
int queue, int thread, u32 data)
{
static const unsigned int sp;
@@ -1151,7 +1151,7 @@ static int ipu3_css_queue_data(struct ipu3_css *css,
}
/* Receive data using given buffer queue, or if queue < 0, event queue. */
-static int ipu3_css_dequeue_data(struct ipu3_css *css, int queue, u32 *data)
+static int imgu_css_dequeue_data(struct imgu_css *css, int queue, u32 *data)
{
static const unsigned int sp;
void __iomem *const base = css->base;
@@ -1188,7 +1188,7 @@ static int ipu3_css_dequeue_data(struct ipu3_css *css, int queue, u32 *data)
writeb(start2, &q->sp2host_evtq_info.start);
/* Acknowledge events dequeued from event queue */
- r = ipu3_css_queue_data(css, queue, 0,
+ r = imgu_css_queue_data(css, queue, 0,
IMGU_ABI_EVENT_EVENT_DEQUEUED);
if (r < 0)
return r;
@@ -1198,52 +1198,52 @@ static int ipu3_css_dequeue_data(struct ipu3_css *css, int queue, u32 *data)
}
/* Free binary-specific resources */
-static void ipu3_css_binary_cleanup(struct ipu3_css *css, unsigned int pipe)
+static void imgu_css_binary_cleanup(struct imgu_css *css, unsigned int pipe)
{
struct imgu_device *imgu = dev_get_drvdata(css->dev);
unsigned int i, j;
- struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
for (j = 0; j < IMGU_ABI_PARAM_CLASS_NUM - 1; j++)
for (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++)
- ipu3_dmamap_free(imgu,
+ imgu_dmamap_free(imgu,
&css_pipe->binary_params_cs[j][i]);
j = IPU3_CSS_AUX_FRAME_REF;
for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
- ipu3_dmamap_free(imgu,
+ imgu_dmamap_free(imgu,
&css_pipe->aux_frames[j].mem[i]);
j = IPU3_CSS_AUX_FRAME_TNR;
for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
- ipu3_dmamap_free(imgu,
+ imgu_dmamap_free(imgu,
&css_pipe->aux_frames[j].mem[i]);
}
-static int ipu3_css_binary_preallocate(struct ipu3_css *css, unsigned int pipe)
+static int imgu_css_binary_preallocate(struct imgu_css *css, unsigned int pipe)
{
struct imgu_device *imgu = dev_get_drvdata(css->dev);
unsigned int i, j;
- struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
for (j = IMGU_ABI_PARAM_CLASS_CONFIG;
j < IMGU_ABI_PARAM_CLASS_NUM; j++)
for (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++)
- if (!ipu3_dmamap_alloc(imgu,
+ if (!imgu_dmamap_alloc(imgu,
&css_pipe->binary_params_cs[j - 1][i],
CSS_ABI_SIZE))
goto out_of_memory;
for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
- if (!ipu3_dmamap_alloc(imgu,
+ if (!imgu_dmamap_alloc(imgu,
&css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].
mem[i], CSS_BDS_SIZE))
goto out_of_memory;
for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
- if (!ipu3_dmamap_alloc(imgu,
+ if (!imgu_dmamap_alloc(imgu,
&css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].
mem[i], CSS_GDC_SIZE))
goto out_of_memory;
@@ -1251,14 +1251,14 @@ static int ipu3_css_binary_preallocate(struct ipu3_css *css, unsigned int pipe)
return 0;
out_of_memory:
- ipu3_css_binary_cleanup(css, pipe);
+ imgu_css_binary_cleanup(css, pipe);
return -ENOMEM;
}
/* allocate binary-specific resources */
-static int ipu3_css_binary_setup(struct ipu3_css *css, unsigned int pipe)
+static int imgu_css_binary_setup(struct imgu_css *css, unsigned int pipe)
{
- struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
struct imgu_fw_info *bi = &css->fwp->binary_header[css_pipe->bindex];
struct imgu_device *imgu = dev_get_drvdata(css->dev);
int i, j, size;
@@ -1269,7 +1269,7 @@ static int ipu3_css_binary_setup(struct ipu3_css *css, unsigned int pipe)
for (j = IMGU_ABI_PARAM_CLASS_CONFIG; j < IMGU_ABI_PARAM_CLASS_NUM; j++)
for (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++) {
- if (ipu3_css_dma_buffer_resize(
+ if (imgu_css_dma_buffer_resize(
imgu,
&css_pipe->binary_params_cs[j - 1][i],
bi->info.isp.sp.mem_initializers.params[j][i].size))
@@ -1292,7 +1292,7 @@ static int ipu3_css_binary_setup(struct ipu3_css *css, unsigned int pipe)
css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperpixel * w;
size = w * h * BYPC + (w / 2) * (h / 2) * BYPC * 2;
for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
- if (ipu3_css_dma_buffer_resize(
+ if (imgu_css_dma_buffer_resize(
imgu,
&css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].mem[i],
size))
@@ -1313,7 +1313,7 @@ static int ipu3_css_binary_setup(struct ipu3_css *css, unsigned int pipe)
h = css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].height;
size = w * ALIGN(h * 3 / 2 + 3, 2); /* +3 for vf_pp prefetch */
for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
- if (ipu3_css_dma_buffer_resize(
+ if (imgu_css_dma_buffer_resize(
imgu,
&css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].mem[i],
size))
@@ -1322,11 +1322,11 @@ static int ipu3_css_binary_setup(struct ipu3_css *css, unsigned int pipe)
return 0;
out_of_memory:
- ipu3_css_binary_cleanup(css, pipe);
+ imgu_css_binary_cleanup(css, pipe);
return -ENOMEM;
}
-int ipu3_css_start_streaming(struct ipu3_css *css)
+int imgu_css_start_streaming(struct imgu_css *css)
{
u32 data;
int r, pipe;
@@ -1335,48 +1335,48 @@ int ipu3_css_start_streaming(struct ipu3_css *css)
return -EPROTO;
for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
- r = ipu3_css_binary_setup(css, pipe);
+ r = imgu_css_binary_setup(css, pipe);
if (r < 0)
return r;
}
- r = ipu3_css_hw_init(css);
+ r = imgu_css_hw_init(css);
if (r < 0)
return r;
- r = ipu3_css_hw_start(css);
+ r = imgu_css_hw_start(css);
if (r < 0)
goto fail;
for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
- r = ipu3_css_pipeline_init(css, pipe);
+ r = imgu_css_pipeline_init(css, pipe);
if (r < 0)
goto fail;
}
css->streaming = true;
- ipu3_css_hw_enable_irq(css);
+ imgu_css_hw_enable_irq(css);
/* Initialize parameters to default */
for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
- r = ipu3_css_set_parameters(css, pipe, NULL);
+ r = imgu_css_set_parameters(css, pipe, NULL);
if (r < 0)
goto fail;
}
- while (!(r = ipu3_css_dequeue_data(css, IMGU_ABI_QUEUE_A_ID, &data)))
+ while (!(r = imgu_css_dequeue_data(css, IMGU_ABI_QUEUE_A_ID, &data)))
;
if (r != -EBUSY)
goto fail;
- while (!(r = ipu3_css_dequeue_data(css, IMGU_ABI_QUEUE_B_ID, &data)))
+ while (!(r = imgu_css_dequeue_data(css, IMGU_ABI_QUEUE_B_ID, &data)))
;
if (r != -EBUSY)
goto fail;
for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
- r = ipu3_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
+ r = imgu_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
IMGU_ABI_EVENT_START_STREAM |
pipe << 16);
if (r < 0)
@@ -1387,22 +1387,22 @@ int ipu3_css_start_streaming(struct ipu3_css *css)
fail:
css->streaming = false;
- ipu3_css_hw_cleanup(css);
+ imgu_css_hw_cleanup(css);
for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
- ipu3_css_pipeline_cleanup(css, pipe);
- ipu3_css_binary_cleanup(css, pipe);
+ imgu_css_pipeline_cleanup(css, pipe);
+ imgu_css_binary_cleanup(css, pipe);
}
return r;
}
-void ipu3_css_stop_streaming(struct ipu3_css *css)
+void imgu_css_stop_streaming(struct imgu_css *css)
{
- struct ipu3_css_buffer *b, *b0;
+ struct imgu_css_buffer *b, *b0;
int q, r, pipe;
for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
- r = ipu3_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
+ r = imgu_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
IMGU_ABI_EVENT_STOP_STREAM);
if (r < 0)
dev_warn(css->dev, "failed on stop stream event\n");
@@ -1411,14 +1411,14 @@ void ipu3_css_stop_streaming(struct ipu3_css *css)
if (!css->streaming)
return;
- ipu3_css_hw_stop(css);
+ imgu_css_hw_stop(css);
- ipu3_css_hw_cleanup(css);
+ imgu_css_hw_cleanup(css);
for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
- struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
- ipu3_css_pipeline_cleanup(css, pipe);
+ imgu_css_pipeline_cleanup(css, pipe);
spin_lock(&css_pipe->qlock);
for (q = 0; q < IPU3_CSS_QUEUES; q++)
@@ -1434,10 +1434,10 @@ void ipu3_css_stop_streaming(struct ipu3_css *css)
css->streaming = false;
}
-bool ipu3_css_pipe_queue_empty(struct ipu3_css *css, unsigned int pipe)
+bool imgu_css_pipe_queue_empty(struct imgu_css *css, unsigned int pipe)
{
int q;
- struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
spin_lock(&css_pipe->qlock);
for (q = 0; q < IPU3_CSS_QUEUES; q++)
@@ -1447,44 +1447,44 @@ bool ipu3_css_pipe_queue_empty(struct ipu3_css *css, unsigned int pipe)
return (q == IPU3_CSS_QUEUES);
}
-bool ipu3_css_queue_empty(struct ipu3_css *css)
+bool imgu_css_queue_empty(struct imgu_css *css)
{
unsigned int pipe;
bool ret = 0;
for (pipe = 0; pipe < IMGU_MAX_PIPE_NUM; pipe++)
- ret &= ipu3_css_pipe_queue_empty(css, pipe);
+ ret &= imgu_css_pipe_queue_empty(css, pipe);
return ret;
}
-bool ipu3_css_is_streaming(struct ipu3_css *css)
+bool imgu_css_is_streaming(struct imgu_css *css)
{
return css->streaming;
}
-static int ipu3_css_map_init(struct ipu3_css *css, unsigned int pipe)
+static int imgu_css_map_init(struct imgu_css *css, unsigned int pipe)
{
struct imgu_device *imgu = dev_get_drvdata(css->dev);
- struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
unsigned int p, q, i;
/* Allocate and map common structures with imgu hardware */
for (p = 0; p < IPU3_CSS_PIPE_ID_NUM; p++)
for (i = 0; i < IMGU_ABI_MAX_STAGES; i++) {
- if (!ipu3_dmamap_alloc(imgu,
+ if (!imgu_dmamap_alloc(imgu,
&css_pipe->
xmem_sp_stage_ptrs[p][i],
sizeof(struct imgu_abi_sp_stage)))
return -ENOMEM;
- if (!ipu3_dmamap_alloc(imgu,
+ if (!imgu_dmamap_alloc(imgu,
&css_pipe->
xmem_isp_stage_ptrs[p][i],
sizeof(struct imgu_abi_isp_stage)))
return -ENOMEM;
}
- if (!ipu3_dmamap_alloc(imgu, &css_pipe->sp_ddr_ptrs,
+ if (!imgu_dmamap_alloc(imgu, &css_pipe->sp_ddr_ptrs,
ALIGN(sizeof(struct imgu_abi_ddr_address_map),
IMGU_ABI_ISP_DDR_WORD_BYTES)))
return -ENOMEM;
@@ -1493,58 +1493,58 @@ static int ipu3_css_map_init(struct ipu3_css *css, unsigned int pipe)
unsigned int abi_buf_num = ARRAY_SIZE(css_pipe->abi_buffers[q]);
for (i = 0; i < abi_buf_num; i++)
- if (!ipu3_dmamap_alloc(imgu,
+ if (!imgu_dmamap_alloc(imgu,
&css_pipe->abi_buffers[q][i],
sizeof(struct imgu_abi_buffer)))
return -ENOMEM;
}
- if (ipu3_css_binary_preallocate(css, pipe)) {
- ipu3_css_binary_cleanup(css, pipe);
+ if (imgu_css_binary_preallocate(css, pipe)) {
+ imgu_css_binary_cleanup(css, pipe);
return -ENOMEM;
}
return 0;
}
-static void ipu3_css_pipe_cleanup(struct ipu3_css *css, unsigned int pipe)
+static void imgu_css_pipe_cleanup(struct imgu_css *css, unsigned int pipe)
{
struct imgu_device *imgu = dev_get_drvdata(css->dev);
- struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
unsigned int p, q, i, abi_buf_num;
- ipu3_css_binary_cleanup(css, pipe);
+ imgu_css_binary_cleanup(css, pipe);
for (q = 0; q < IPU3_CSS_QUEUES; q++) {
abi_buf_num = ARRAY_SIZE(css_pipe->abi_buffers[q]);
for (i = 0; i < abi_buf_num; i++)
- ipu3_dmamap_free(imgu, &css_pipe->abi_buffers[q][i]);
+ imgu_dmamap_free(imgu, &css_pipe->abi_buffers[q][i]);
}
for (p = 0; p < IPU3_CSS_PIPE_ID_NUM; p++)
for (i = 0; i < IMGU_ABI_MAX_STAGES; i++) {
- ipu3_dmamap_free(imgu,
+ imgu_dmamap_free(imgu,
&css_pipe->xmem_sp_stage_ptrs[p][i]);
- ipu3_dmamap_free(imgu,
+ imgu_dmamap_free(imgu,
&css_pipe->xmem_isp_stage_ptrs[p][i]);
}
- ipu3_dmamap_free(imgu, &css_pipe->sp_ddr_ptrs);
+ imgu_dmamap_free(imgu, &css_pipe->sp_ddr_ptrs);
}
-void ipu3_css_cleanup(struct ipu3_css *css)
+void imgu_css_cleanup(struct imgu_css *css)
{
struct imgu_device *imgu = dev_get_drvdata(css->dev);
unsigned int pipe;
- ipu3_css_stop_streaming(css);
+ imgu_css_stop_streaming(css);
for (pipe = 0; pipe < IMGU_MAX_PIPE_NUM; pipe++)
- ipu3_css_pipe_cleanup(css, pipe);
- ipu3_dmamap_free(imgu, &css->xmem_sp_group_ptrs);
- ipu3_css_fw_cleanup(css);
+ imgu_css_pipe_cleanup(css, pipe);
+ imgu_dmamap_free(imgu, &css->xmem_sp_group_ptrs);
+ imgu_css_fw_cleanup(css);
}
-int ipu3_css_init(struct device *dev, struct ipu3_css *css,
+int imgu_css_init(struct device *dev, struct imgu_css *css,
void __iomem *base, int length)
{
struct imgu_device *imgu = dev_get_drvdata(dev);
@@ -1556,35 +1556,35 @@ int ipu3_css_init(struct device *dev, struct ipu3_css *css,
css->iomem_length = length;
for (pipe = 0; pipe < IMGU_MAX_PIPE_NUM; pipe++) {
- struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
css_pipe->vf_output_en = false;
spin_lock_init(&css_pipe->qlock);
css_pipe->bindex = IPU3_CSS_DEFAULT_BINARY;
css_pipe->pipe_id = IPU3_CSS_PIPE_ID_VIDEO;
for (q = 0; q < IPU3_CSS_QUEUES; q++) {
- r = ipu3_css_queue_init(&css_pipe->queue[q], NULL, 0);
+ r = imgu_css_queue_init(&css_pipe->queue[q], NULL, 0);
if (r)
return r;
}
- r = ipu3_css_map_init(css, pipe);
+ r = imgu_css_map_init(css, pipe);
if (r) {
- ipu3_css_cleanup(css);
+ imgu_css_cleanup(css);
return r;
}
}
- if (!ipu3_dmamap_alloc(imgu, &css->xmem_sp_group_ptrs,
+ if (!imgu_dmamap_alloc(imgu, &css->xmem_sp_group_ptrs,
sizeof(struct imgu_abi_sp_group)))
return -ENOMEM;
- r = ipu3_css_fw_init(css);
+ r = imgu_css_fw_init(css);
if (r)
return r;
return 0;
}
-static u32 ipu3_css_adjust(u32 res, u32 align)
+static u32 imgu_css_adjust(u32 res, u32 align)
{
u32 val = max_t(u32, IPU3_CSS_MIN_RES, res);
@@ -1592,9 +1592,9 @@ static u32 ipu3_css_adjust(u32 res, u32 align)
}
/* Select a binary matching the required resolutions and formats */
-static int ipu3_css_find_binary(struct ipu3_css *css,
+static int imgu_css_find_binary(struct imgu_css *css,
unsigned int pipe,
- struct ipu3_css_queue queue[IPU3_CSS_QUEUES],
+ struct imgu_css_queue queue[IPU3_CSS_QUEUES],
struct v4l2_rect rects[IPU3_CSS_RECTS])
{
const int binary_nr = css->fwp->file_header.binary_nr;
@@ -1611,7 +1611,7 @@ static int ipu3_css_find_binary(struct ipu3_css *css,
const char *name;
int i, j;
- if (!ipu3_css_queue_enabled(&queue[IPU3_CSS_QUEUE_IN]))
+ if (!imgu_css_queue_enabled(&queue[IPU3_CSS_QUEUE_IN]))
return -EINVAL;
/* Find out the strip size boundary */
@@ -1659,7 +1659,7 @@ static int ipu3_css_find_binary(struct ipu3_css *css,
in->height > bi->info.isp.sp.input.max_height)
continue;
- if (ipu3_css_queue_enabled(&queue[IPU3_CSS_QUEUE_OUT])) {
+ if (imgu_css_queue_enabled(&queue[IPU3_CSS_QUEUE_OUT])) {
if (bi->info.isp.num_output_pins <= 0)
continue;
@@ -1681,7 +1681,7 @@ static int ipu3_css_find_binary(struct ipu3_css *css,
continue;
}
- if (ipu3_css_queue_enabled(&queue[IPU3_CSS_QUEUE_VF])) {
+ if (imgu_css_queue_enabled(&queue[IPU3_CSS_QUEUE_VF])) {
if (bi->info.isp.num_output_pins <= 1)
continue;
@@ -1716,7 +1716,7 @@ static int ipu3_css_find_binary(struct ipu3_css *css,
* found binary number. May modify the given parameters if not exact match
* is found.
*/
-int ipu3_css_fmt_try(struct ipu3_css *css,
+int imgu_css_fmt_try(struct imgu_css *css,
struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES],
struct v4l2_rect *rects[IPU3_CSS_RECTS],
unsigned int pipe)
@@ -1744,14 +1744,14 @@ int ipu3_css_fmt_try(struct ipu3_css *css,
struct v4l2_rect *const bds = &r[IPU3_CSS_RECT_BDS];
struct v4l2_rect *const env = &r[IPU3_CSS_RECT_ENVELOPE];
struct v4l2_rect *const gdc = &r[IPU3_CSS_RECT_GDC];
- struct ipu3_css_queue q[IPU3_CSS_QUEUES];
+ struct imgu_css_queue q[IPU3_CSS_QUEUES];
struct v4l2_pix_format_mplane *const in =
&q[IPU3_CSS_QUEUE_IN].fmt.mpix;
struct v4l2_pix_format_mplane *const out =
&q[IPU3_CSS_QUEUE_OUT].fmt.mpix;
struct v4l2_pix_format_mplane *const vf =
&q[IPU3_CSS_QUEUE_VF].fmt.mpix;
- int i, s;
+ int i, s, ret;
/* Adjust all formats, get statistics buffer sizes and formats */
for (i = 0; i < IPU3_CSS_QUEUES; i++) {
@@ -1762,7 +1762,7 @@ int ipu3_css_fmt_try(struct ipu3_css *css,
else
dev_dbg(css->dev, "%s %s: (not set)\n", __func__,
qnames[i]);
- if (ipu3_css_queue_init(&q[i], fmts[i],
+ if (imgu_css_queue_init(&q[i], fmts[i],
IPU3_CSS_QUEUE_TO_FLAGS(i))) {
dev_notice(css->dev, "can not initialize queue %s\n",
qnames[i]);
@@ -1785,13 +1785,13 @@ int ipu3_css_fmt_try(struct ipu3_css *css,
}
/* Always require one input and vf only if out is also enabled */
- if (!ipu3_css_queue_enabled(&q[IPU3_CSS_QUEUE_IN]) ||
- !ipu3_css_queue_enabled(&q[IPU3_CSS_QUEUE_OUT])) {
+ if (!imgu_css_queue_enabled(&q[IPU3_CSS_QUEUE_IN]) ||
+ !imgu_css_queue_enabled(&q[IPU3_CSS_QUEUE_OUT])) {
dev_warn(css->dev, "required queues are disabled\n");
return -EINVAL;
}
- if (!ipu3_css_queue_enabled(&q[IPU3_CSS_QUEUE_OUT])) {
+ if (!imgu_css_queue_enabled(&q[IPU3_CSS_QUEUE_OUT])) {
out->width = in->width;
out->height = in->height;
}
@@ -1808,30 +1808,30 @@ int ipu3_css_fmt_try(struct ipu3_css *css,
gdc->height = out->height;
}
- in->width = ipu3_css_adjust(in->width, 1);
- in->height = ipu3_css_adjust(in->height, 1);
- eff->width = ipu3_css_adjust(eff->width, EFF_ALIGN_W);
- eff->height = ipu3_css_adjust(eff->height, 1);
- bds->width = ipu3_css_adjust(bds->width, BDS_ALIGN_W);
- bds->height = ipu3_css_adjust(bds->height, 1);
- gdc->width = ipu3_css_adjust(gdc->width, OUT_ALIGN_W);
- gdc->height = ipu3_css_adjust(gdc->height, OUT_ALIGN_H);
- out->width = ipu3_css_adjust(out->width, OUT_ALIGN_W);
- out->height = ipu3_css_adjust(out->height, OUT_ALIGN_H);
- vf->width = ipu3_css_adjust(vf->width, VF_ALIGN_W);
- vf->height = ipu3_css_adjust(vf->height, 1);
+ in->width = imgu_css_adjust(in->width, 1);
+ in->height = imgu_css_adjust(in->height, 1);
+ eff->width = imgu_css_adjust(eff->width, EFF_ALIGN_W);
+ eff->height = imgu_css_adjust(eff->height, 1);
+ bds->width = imgu_css_adjust(bds->width, BDS_ALIGN_W);
+ bds->height = imgu_css_adjust(bds->height, 1);
+ gdc->width = imgu_css_adjust(gdc->width, OUT_ALIGN_W);
+ gdc->height = imgu_css_adjust(gdc->height, OUT_ALIGN_H);
+ out->width = imgu_css_adjust(out->width, OUT_ALIGN_W);
+ out->height = imgu_css_adjust(out->height, OUT_ALIGN_H);
+ vf->width = imgu_css_adjust(vf->width, VF_ALIGN_W);
+ vf->height = imgu_css_adjust(vf->height, 1);
s = (bds->width - gdc->width) / 2 - FILTER_SIZE;
env->width = s < MIN_ENVELOPE ? MIN_ENVELOPE : s;
s = (bds->height - gdc->height) / 2 - FILTER_SIZE;
env->height = s < MIN_ENVELOPE ? MIN_ENVELOPE : s;
- css->pipes[pipe].bindex =
- ipu3_css_find_binary(css, pipe, q, r);
- if (css->pipes[pipe].bindex < 0) {
+ ret = imgu_css_find_binary(css, pipe, q, r);
+ if (ret < 0) {
dev_err(css->dev, "failed to find suitable binary\n");
return -EINVAL;
}
+ css->pipes[pipe].bindex = ret;
dev_dbg(css->dev, "Binary index %d for pipe %d found.",
css->pipes[pipe].bindex, pipe);
@@ -1839,7 +1839,7 @@ int ipu3_css_fmt_try(struct ipu3_css *css,
/* Final adjustment and set back the queried formats */
for (i = 0; i < IPU3_CSS_QUEUES; i++) {
if (fmts[i]) {
- if (ipu3_css_queue_init(&q[i], &q[i].fmt.mpix,
+ if (imgu_css_queue_init(&q[i], &q[i].fmt.mpix,
IPU3_CSS_QUEUE_TO_FLAGS(i))) {
dev_err(css->dev,
"final resolution adjustment failed\n");
@@ -1862,7 +1862,7 @@ int ipu3_css_fmt_try(struct ipu3_css *css,
return 0;
}
-int ipu3_css_fmt_set(struct ipu3_css *css,
+int imgu_css_fmt_set(struct imgu_css *css,
struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES],
struct v4l2_rect *rects[IPU3_CSS_RECTS],
unsigned int pipe)
@@ -1870,7 +1870,7 @@ int ipu3_css_fmt_set(struct ipu3_css *css,
struct v4l2_rect rect_data[IPU3_CSS_RECTS];
struct v4l2_rect *all_rects[IPU3_CSS_RECTS];
int i, r;
- struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
for (i = 0; i < IPU3_CSS_RECTS; i++) {
if (rects[i])
@@ -1879,12 +1879,12 @@ int ipu3_css_fmt_set(struct ipu3_css *css,
memset(&rect_data[i], 0, sizeof(rect_data[i]));
all_rects[i] = &rect_data[i];
}
- r = ipu3_css_fmt_try(css, fmts, all_rects, pipe);
+ r = imgu_css_fmt_try(css, fmts, all_rects, pipe);
if (r < 0)
return r;
for (i = 0; i < IPU3_CSS_QUEUES; i++)
- if (ipu3_css_queue_init(&css_pipe->queue[i], fmts[i],
+ if (imgu_css_queue_init(&css_pipe->queue[i], fmts[i],
IPU3_CSS_QUEUE_TO_FLAGS(i)))
return -EINVAL;
for (i = 0; i < IPU3_CSS_RECTS; i++) {
@@ -1896,7 +1896,7 @@ int ipu3_css_fmt_set(struct ipu3_css *css,
return 0;
}
-int ipu3_css_meta_fmt_set(struct v4l2_meta_format *fmt)
+int imgu_css_meta_fmt_set(struct v4l2_meta_format *fmt)
{
switch (fmt->dataformat) {
case V4L2_META_FMT_IPU3_PARAMS:
@@ -1913,27 +1913,27 @@ int ipu3_css_meta_fmt_set(struct v4l2_meta_format *fmt)
}
/*
- * Queue given buffer to CSS. ipu3_css_buf_prepare() must have been first
+ * Queue given buffer to CSS. imgu_css_buf_prepare() must have been first
* called for the buffer. May be called from interrupt context.
* Returns 0 on success, -EBUSY if the buffer queue is full, or some other
* code on error conditions.
*/
-int ipu3_css_buf_queue(struct ipu3_css *css, unsigned int pipe,
- struct ipu3_css_buffer *b)
+int imgu_css_buf_queue(struct imgu_css *css, unsigned int pipe,
+ struct imgu_css_buffer *b)
{
struct imgu_abi_buffer *abi_buf;
struct imgu_addr_t *buf_addr;
u32 data;
int r;
- struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
if (!css->streaming)
return -EPROTO; /* CSS or buffer in wrong state */
- if (b->queue >= IPU3_CSS_QUEUES || !ipu3_css_queues[b->queue].qid)
+ if (b->queue >= IPU3_CSS_QUEUES || !imgu_css_queues[b->queue].qid)
return -EINVAL;
- b->queue_pos = ipu3_css_queue_pos(css, ipu3_css_queues[b->queue].qid,
+ b->queue_pos = imgu_css_queue_pos(css, imgu_css_queues[b->queue].qid,
pipe);
if (b->queue_pos >= ARRAY_SIZE(css->pipes[pipe].abi_buffers[b->queue]))
@@ -1943,7 +1943,7 @@ int ipu3_css_buf_queue(struct ipu3_css *css, unsigned int pipe,
/* Fill struct abi_buffer for firmware */
memset(abi_buf, 0, sizeof(*abi_buf));
- buf_addr = (void *)abi_buf + ipu3_css_queues[b->queue].ptr_ofs;
+ buf_addr = (void *)abi_buf + imgu_css_queues[b->queue].ptr_ofs;
*(imgu_addr_t *)buf_addr = b->daddr;
if (b->queue == IPU3_CSS_QUEUE_STAT_3A)
@@ -1963,14 +1963,14 @@ int ipu3_css_buf_queue(struct ipu3_css *css, unsigned int pipe,
b->state = IPU3_CSS_BUFFER_QUEUED;
data = css->pipes[pipe].abi_buffers[b->queue][b->queue_pos].daddr;
- r = ipu3_css_queue_data(css, ipu3_css_queues[b->queue].qid,
+ r = imgu_css_queue_data(css, imgu_css_queues[b->queue].qid,
pipe, data);
if (r < 0)
goto queueing_failed;
data = IMGU_ABI_EVENT_BUFFER_ENQUEUED(pipe,
- ipu3_css_queues[b->queue].qid);
- r = ipu3_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe, data);
+ imgu_css_queues[b->queue].qid);
+ r = imgu_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe, data);
if (r < 0)
goto queueing_failed;
@@ -1992,7 +1992,7 @@ queueing_failed:
* should be called again, or -EBUSY which means that there are no more
* buffers available. May be called from interrupt context.
*/
-struct ipu3_css_buffer *ipu3_css_buf_dequeue(struct ipu3_css *css)
+struct imgu_css_buffer *imgu_css_buf_dequeue(struct imgu_css *css)
{
static const unsigned char evtype_to_queue[] = {
[IMGU_ABI_EVTTYPE_INPUT_FRAME_DONE] = IPU3_CSS_QUEUE_IN,
@@ -2000,15 +2000,15 @@ struct ipu3_css_buffer *ipu3_css_buf_dequeue(struct ipu3_css *css)
[IMGU_ABI_EVTTYPE_VF_OUT_FRAME_DONE] = IPU3_CSS_QUEUE_VF,
[IMGU_ABI_EVTTYPE_3A_STATS_DONE] = IPU3_CSS_QUEUE_STAT_3A,
};
- struct ipu3_css_buffer *b = ERR_PTR(-EAGAIN);
+ struct imgu_css_buffer *b = ERR_PTR(-EAGAIN);
u32 event, daddr;
int evtype, pipe, pipeid, queue, qid, r;
- struct ipu3_css_pipe *css_pipe;
+ struct imgu_css_pipe *css_pipe;
if (!css->streaming)
return ERR_PTR(-EPROTO);
- r = ipu3_css_dequeue_data(css, IMGU_ABI_QUEUE_EVENT_ID, &event);
+ r = imgu_css_dequeue_data(css, IMGU_ABI_QUEUE_EVENT_ID, &event);
if (r < 0)
return ERR_PTR(r);
@@ -2025,7 +2025,7 @@ struct ipu3_css_buffer *ipu3_css_buf_dequeue(struct ipu3_css *css)
pipeid = (event & IMGU_ABI_EVTTYPE_PIPEID_MASK) >>
IMGU_ABI_EVTTYPE_PIPEID_SHIFT;
queue = evtype_to_queue[evtype];
- qid = ipu3_css_queues[queue].qid;
+ qid = imgu_css_queues[queue].qid;
if (pipe >= IMGU_MAX_PIPE_NUM) {
dev_err(css->dev, "Invalid pipe: %i\n", pipe);
@@ -2041,14 +2041,14 @@ struct ipu3_css_buffer *ipu3_css_buf_dequeue(struct ipu3_css *css)
"event: buffer done 0x%x queue %i pipe %i pipeid %i\n",
event, queue, pipe, pipeid);
- r = ipu3_css_dequeue_data(css, qid, &daddr);
+ r = imgu_css_dequeue_data(css, qid, &daddr);
if (r < 0) {
dev_err(css->dev, "failed to dequeue buffer\n");
/* Force real error, not -EBUSY */
return ERR_PTR(-EIO);
}
- r = ipu3_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
+ r = imgu_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
IMGU_ABI_EVENT_BUFFER_DEQUEUED(qid));
if (r < 0) {
dev_err(css->dev, "failed to queue event\n");
@@ -2062,7 +2062,7 @@ struct ipu3_css_buffer *ipu3_css_buf_dequeue(struct ipu3_css *css)
return ERR_PTR(-EIO);
}
b = list_first_entry(&css_pipe->queue[queue].bufs,
- struct ipu3_css_buffer, list);
+ struct imgu_css_buffer, list);
if (queue != b->queue ||
daddr != css_pipe->abi_buffers
[b->queue][b->queue_pos].daddr) {
@@ -2090,7 +2090,7 @@ struct ipu3_css_buffer *ipu3_css_buf_dequeue(struct ipu3_css *css)
event, pipe);
break;
case IMGU_ABI_EVTTYPE_TIMER:
- r = ipu3_css_dequeue_data(css, IMGU_ABI_QUEUE_EVENT_ID, &event);
+ r = imgu_css_dequeue_data(css, IMGU_ABI_QUEUE_EVENT_ID, &event);
if (r < 0)
return ERR_PTR(r);
@@ -2128,11 +2128,11 @@ struct ipu3_css_buffer *ipu3_css_buf_dequeue(struct ipu3_css *css)
* Return index to css->parameter_set_info which has the newly created
* parameters or negative value on error.
*/
-int ipu3_css_set_parameters(struct ipu3_css *css, unsigned int pipe,
+int imgu_css_set_parameters(struct imgu_css *css, unsigned int pipe,
struct ipu3_uapi_params *set_params)
{
static const unsigned int queue_id = IMGU_ABI_QUEUE_A_ID;
- struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
const int stage = 0;
const struct imgu_fw_info *bi;
int obgrid_size;
@@ -2144,7 +2144,7 @@ int ipu3_css_set_parameters(struct ipu3_css *css, unsigned int pipe,
struct imgu_abi_acc_param *acc = NULL;
struct imgu_abi_gdc_warp_param *gdc = NULL;
struct ipu3_uapi_obgrid_param *obgrid = NULL;
- const struct ipu3_css_map *map;
+ const struct imgu_css_map *map;
void *vmem0 = NULL;
void *dmem0 = NULL;
@@ -2157,7 +2157,7 @@ int ipu3_css_set_parameters(struct ipu3_css *css, unsigned int pipe,
dev_dbg(css->dev, "%s for pipe %d", __func__, pipe);
bi = &css->fwp->binary_header[css_pipe->bindex];
- obgrid_size = ipu3_css_fw_obgrid_size(bi);
+ obgrid_size = imgu_css_fw_obgrid_size(bi);
stripes = bi->info.isp.sp.iterator.num_stripes ? : 1;
/*
@@ -2165,45 +2165,45 @@ int ipu3_css_set_parameters(struct ipu3_css *css, unsigned int pipe,
* parameters from previous buffers will be overwritten. Fix the driver
* not to allow this.
*/
- ipu3_css_pool_get(&css_pipe->pool.parameter_set_info);
- param_set = ipu3_css_pool_last(&css_pipe->pool.parameter_set_info,
+ imgu_css_pool_get(&css_pipe->pool.parameter_set_info);
+ param_set = imgu_css_pool_last(&css_pipe->pool.parameter_set_info,
0)->vaddr;
/* Get a new acc only if new parameters given, or none yet */
- map = ipu3_css_pool_last(&css_pipe->pool.acc, 0);
+ map = imgu_css_pool_last(&css_pipe->pool.acc, 0);
if (set_params || !map->vaddr) {
- ipu3_css_pool_get(&css_pipe->pool.acc);
- map = ipu3_css_pool_last(&css_pipe->pool.acc, 0);
+ imgu_css_pool_get(&css_pipe->pool.acc);
+ map = imgu_css_pool_last(&css_pipe->pool.acc, 0);
acc = map->vaddr;
}
/* Get new VMEM0 only if needed, or none yet */
m = IMGU_ABI_MEM_ISP_VMEM0;
- map = ipu3_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
+ map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
if (!map->vaddr || (set_params && (set_params->use.lin_vmem_params ||
set_params->use.tnr3_vmem_params ||
set_params->use.xnr3_vmem_params))) {
- ipu3_css_pool_get(&css_pipe->pool.binary_params_p[m]);
- map = ipu3_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
+ imgu_css_pool_get(&css_pipe->pool.binary_params_p[m]);
+ map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
vmem0 = map->vaddr;
}
/* Get new DMEM0 only if needed, or none yet */
m = IMGU_ABI_MEM_ISP_DMEM0;
- map = ipu3_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
+ map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
if (!map->vaddr || (set_params && (set_params->use.tnr3_dmem_params ||
set_params->use.xnr3_dmem_params))) {
- ipu3_css_pool_get(&css_pipe->pool.binary_params_p[m]);
- map = ipu3_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
+ imgu_css_pool_get(&css_pipe->pool.binary_params_p[m]);
+ map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
dmem0 = map->vaddr;
}
/* Configure acc parameter cluster */
if (acc) {
/* get acc_old */
- map = ipu3_css_pool_last(&css_pipe->pool.acc, 1);
+ map = imgu_css_pool_last(&css_pipe->pool.acc, 1);
/* user acc */
- r = ipu3_css_cfg_acc(css, pipe, use, acc, map->vaddr,
+ r = imgu_css_cfg_acc(css, pipe, use, acc, map->vaddr,
set_params ? &set_params->acc_param : NULL);
if (r < 0)
goto fail;
@@ -2212,8 +2212,8 @@ int ipu3_css_set_parameters(struct ipu3_css *css, unsigned int pipe,
/* Configure late binding parameters */
if (vmem0) {
m = IMGU_ABI_MEM_ISP_VMEM0;
- map = ipu3_css_pool_last(&css_pipe->pool.binary_params_p[m], 1);
- r = ipu3_css_cfg_vmem0(css, pipe, use, vmem0,
+ map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 1);
+ r = imgu_css_cfg_vmem0(css, pipe, use, vmem0,
map->vaddr, set_params);
if (r < 0)
goto fail;
@@ -2221,8 +2221,8 @@ int ipu3_css_set_parameters(struct ipu3_css *css, unsigned int pipe,
if (dmem0) {
m = IMGU_ABI_MEM_ISP_DMEM0;
- map = ipu3_css_pool_last(&css_pipe->pool.binary_params_p[m], 1);
- r = ipu3_css_cfg_dmem0(css, pipe, use, dmem0,
+ map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 1);
+ r = imgu_css_cfg_dmem0(css, pipe, use, dmem0,
map->vaddr, set_params);
if (r < 0)
goto fail;
@@ -2234,12 +2234,12 @@ int ipu3_css_set_parameters(struct ipu3_css *css, unsigned int pipe,
unsigned int g = IPU3_CSS_RECT_GDC;
unsigned int e = IPU3_CSS_RECT_ENVELOPE;
- map = ipu3_css_pool_last(&css_pipe->pool.gdc, 0);
+ map = imgu_css_pool_last(&css_pipe->pool.gdc, 0);
if (!map->vaddr) {
- ipu3_css_pool_get(&css_pipe->pool.gdc);
- map = ipu3_css_pool_last(&css_pipe->pool.gdc, 0);
+ imgu_css_pool_get(&css_pipe->pool.gdc);
+ map = imgu_css_pool_last(&css_pipe->pool.gdc, 0);
gdc = map->vaddr;
- ipu3_css_cfg_gdc_table(map->vaddr,
+ imgu_css_cfg_gdc_table(map->vaddr,
css_pipe->aux_frames[a].bytesperline /
css_pipe->aux_frames[a].bytesperpixel,
css_pipe->aux_frames[a].height,
@@ -2252,10 +2252,10 @@ int ipu3_css_set_parameters(struct ipu3_css *css, unsigned int pipe,
}
/* Get a new obgrid only if a new obgrid is given, or none yet */
- map = ipu3_css_pool_last(&css_pipe->pool.obgrid, 0);
+ map = imgu_css_pool_last(&css_pipe->pool.obgrid, 0);
if (!map->vaddr || (set_params && set_params->use.obgrid_param)) {
- ipu3_css_pool_get(&css_pipe->pool.obgrid);
- map = ipu3_css_pool_last(&css_pipe->pool.obgrid, 0);
+ imgu_css_pool_get(&css_pipe->pool.obgrid);
+ map = imgu_css_pool_last(&css_pipe->pool.obgrid, 0);
obgrid = map->vaddr;
/* Configure optical black level grid (obgrid) */
@@ -2269,30 +2269,30 @@ int ipu3_css_set_parameters(struct ipu3_css *css, unsigned int pipe,
/* Configure parameter set info, queued to `queue_id' */
memset(param_set, 0, sizeof(*param_set));
- map = ipu3_css_pool_last(&css_pipe->pool.acc, 0);
+ map = imgu_css_pool_last(&css_pipe->pool.acc, 0);
param_set->mem_map.acc_cluster_params_for_sp = map->daddr;
- map = ipu3_css_pool_last(&css_pipe->pool.gdc, 0);
+ map = imgu_css_pool_last(&css_pipe->pool.gdc, 0);
param_set->mem_map.dvs_6axis_params_y = map->daddr;
for (i = 0; i < stripes; i++) {
- map = ipu3_css_pool_last(&css_pipe->pool.obgrid, 0);
+ map = imgu_css_pool_last(&css_pipe->pool.obgrid, 0);
param_set->mem_map.obgrid_tbl[i] =
map->daddr + (obgrid_size / stripes) * i;
}
for (m = 0; m < IMGU_ABI_NUM_MEMORIES; m++) {
- map = ipu3_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
+ map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
param_set->mem_map.isp_mem_param[stage][m] = map->daddr;
}
/* Then queue the new parameter buffer */
- map = ipu3_css_pool_last(&css_pipe->pool.parameter_set_info, 0);
- r = ipu3_css_queue_data(css, queue_id, pipe, map->daddr);
+ map = imgu_css_pool_last(&css_pipe->pool.parameter_set_info, 0);
+ r = imgu_css_queue_data(css, queue_id, pipe, map->daddr);
if (r < 0)
goto fail;
- r = ipu3_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
+ r = imgu_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
IMGU_ABI_EVENT_BUFFER_ENQUEUED(pipe,
queue_id));
if (r < 0)
@@ -2303,12 +2303,12 @@ int ipu3_css_set_parameters(struct ipu3_css *css, unsigned int pipe,
do {
u32 daddr;
- r = ipu3_css_dequeue_data(css, queue_id, &daddr);
+ r = imgu_css_dequeue_data(css, queue_id, &daddr);
if (r == -EBUSY)
break;
if (r)
goto fail_no_put;
- r = ipu3_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
+ r = imgu_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
IMGU_ABI_EVENT_BUFFER_DEQUEUED
(queue_id));
if (r < 0) {
@@ -2326,19 +2326,19 @@ fail:
* parameters again later.
*/
- ipu3_css_pool_put(&css_pipe->pool.parameter_set_info);
+ imgu_css_pool_put(&css_pipe->pool.parameter_set_info);
if (acc)
- ipu3_css_pool_put(&css_pipe->pool.acc);
+ imgu_css_pool_put(&css_pipe->pool.acc);
if (gdc)
- ipu3_css_pool_put(&css_pipe->pool.gdc);
+ imgu_css_pool_put(&css_pipe->pool.gdc);
if (obgrid)
- ipu3_css_pool_put(&css_pipe->pool.obgrid);
+ imgu_css_pool_put(&css_pipe->pool.obgrid);
if (vmem0)
- ipu3_css_pool_put(
+ imgu_css_pool_put(
&css_pipe->pool.binary_params_p
[IMGU_ABI_MEM_ISP_VMEM0]);
if (dmem0)
- ipu3_css_pool_put(
+ imgu_css_pool_put(
&css_pipe->pool.binary_params_p
[IMGU_ABI_MEM_ISP_DMEM0]);
@@ -2346,7 +2346,7 @@ fail_no_put:
return r;
}
-int ipu3_css_irq_ack(struct ipu3_css *css)
+int imgu_css_irq_ack(struct imgu_css *css)
{
static const int NUM_SWIRQS = 3;
struct imgu_fw_info *bi = &css->fwp->binary_header[css->fw_sp[0]];
diff --git a/drivers/staging/media/ipu3/ipu3-css.h b/drivers/staging/media/ipu3/ipu3-css.h
index e88d60f1a0c3..6b8bab27ab1f 100644
--- a/drivers/staging/media/ipu3/ipu3-css.h
+++ b/drivers/staging/media/ipu3/ipu3-css.h
@@ -43,7 +43,7 @@
* The pipe id type, distinguishes the kind of pipes that
* can be run in parallel.
*/
-enum ipu3_css_pipe_id {
+enum imgu_css_pipe_id {
IPU3_CSS_PIPE_ID_PREVIEW,
IPU3_CSS_PIPE_ID_COPY,
IPU3_CSS_PIPE_ID_VIDEO,
@@ -53,29 +53,29 @@ enum ipu3_css_pipe_id {
IPU3_CSS_PIPE_ID_NUM
};
-struct ipu3_css_resolution {
+struct imgu_css_resolution {
u32 w;
u32 h;
};
-enum ipu3_css_buffer_state {
+enum imgu_css_buffer_state {
IPU3_CSS_BUFFER_NEW, /* Not yet queued */
IPU3_CSS_BUFFER_QUEUED, /* Queued, waiting to be filled */
IPU3_CSS_BUFFER_DONE, /* Finished processing, removed from queue */
IPU3_CSS_BUFFER_FAILED, /* Was not processed, removed from queue */
};
-struct ipu3_css_buffer {
+struct imgu_css_buffer {
/* Private fields: user doesn't touch */
dma_addr_t daddr;
unsigned int queue;
- enum ipu3_css_buffer_state state;
+ enum imgu_css_buffer_state state;
struct list_head list;
u8 queue_pos;
unsigned int pipe;
};
-struct ipu3_css_format {
+struct imgu_css_format {
u32 pixelformat;
enum v4l2_colorspace colorspace;
enum imgu_abi_frame_format frame_format;
@@ -89,22 +89,22 @@ struct ipu3_css_format {
u8 flags;
};
-struct ipu3_css_queue {
+struct imgu_css_queue {
union {
struct v4l2_pix_format_mplane mpix;
struct v4l2_meta_format meta;
} fmt;
- const struct ipu3_css_format *css_fmt;
+ const struct imgu_css_format *css_fmt;
unsigned int width_pad;
struct list_head bufs;
};
-struct ipu3_css_pipe {
- enum ipu3_css_pipe_id pipe_id;
+struct imgu_css_pipe {
+ enum imgu_css_pipe_id pipe_id;
unsigned int bindex;
- struct ipu3_css_queue queue[IPU3_CSS_QUEUES];
+ struct imgu_css_queue queue[IPU3_CSS_QUEUES];
struct v4l2_rect rect[IPU3_CSS_RECTS];
bool vf_output_en;
@@ -112,21 +112,21 @@ struct ipu3_css_pipe {
spinlock_t qlock;
/* Data structures shared with IMGU and driver, always allocated */
- struct ipu3_css_map sp_ddr_ptrs;
- struct ipu3_css_map xmem_sp_stage_ptrs[IPU3_CSS_PIPE_ID_NUM]
+ struct imgu_css_map sp_ddr_ptrs;
+ struct imgu_css_map xmem_sp_stage_ptrs[IPU3_CSS_PIPE_ID_NUM]
[IMGU_ABI_MAX_STAGES];
- struct ipu3_css_map xmem_isp_stage_ptrs[IPU3_CSS_PIPE_ID_NUM]
+ struct imgu_css_map xmem_isp_stage_ptrs[IPU3_CSS_PIPE_ID_NUM]
[IMGU_ABI_MAX_STAGES];
/*
* Data structures shared with IMGU and driver, binary specific.
* PARAM_CLASS_CONFIG and PARAM_CLASS_STATE parameters.
*/
- struct ipu3_css_map binary_params_cs[IMGU_ABI_PARAM_CLASS_NUM - 1]
+ struct imgu_css_map binary_params_cs[IMGU_ABI_PARAM_CLASS_NUM - 1]
[IMGU_ABI_NUM_MEMORIES];
struct {
- struct ipu3_css_map mem[IPU3_CSS_AUX_FRAMES];
+ struct imgu_css_map mem[IPU3_CSS_AUX_FRAMES];
unsigned int width;
unsigned int height;
unsigned int bytesperline;
@@ -134,76 +134,76 @@ struct ipu3_css_pipe {
} aux_frames[IPU3_CSS_AUX_FRAME_TYPES];
struct {
- struct ipu3_css_pool parameter_set_info;
- struct ipu3_css_pool acc;
- struct ipu3_css_pool gdc;
- struct ipu3_css_pool obgrid;
+ struct imgu_css_pool parameter_set_info;
+ struct imgu_css_pool acc;
+ struct imgu_css_pool gdc;
+ struct imgu_css_pool obgrid;
/* PARAM_CLASS_PARAM parameters for binding while streaming */
- struct ipu3_css_pool binary_params_p[IMGU_ABI_NUM_MEMORIES];
+ struct imgu_css_pool binary_params_p[IMGU_ABI_NUM_MEMORIES];
} pool;
- struct ipu3_css_map abi_buffers[IPU3_CSS_QUEUES]
+ struct imgu_css_map abi_buffers[IPU3_CSS_QUEUES]
[IMGU_ABI_HOST2SP_BUFQ_SIZE];
};
/* IPU3 Camera Sub System structure */
-struct ipu3_css {
+struct imgu_css {
struct device *dev;
void __iomem *base;
const struct firmware *fw;
struct imgu_fw_header *fwp;
int iomem_length;
int fw_bl, fw_sp[IMGU_NUM_SP]; /* Indices of bl and SP binaries */
- struct ipu3_css_map *binary; /* fw binaries mapped to device */
+ struct imgu_css_map *binary; /* fw binaries mapped to device */
bool streaming; /* true when streaming is enabled */
- struct ipu3_css_pipe pipes[IMGU_MAX_PIPE_NUM];
- struct ipu3_css_map xmem_sp_group_ptrs;
+ struct imgu_css_pipe pipes[IMGU_MAX_PIPE_NUM];
+ struct imgu_css_map xmem_sp_group_ptrs;
/* enabled pipe(s) */
DECLARE_BITMAP(enabled_pipes, IMGU_MAX_PIPE_NUM);
};
/******************* css v4l *******************/
-int ipu3_css_init(struct device *dev, struct ipu3_css *css,
+int imgu_css_init(struct device *dev, struct imgu_css *css,
void __iomem *base, int length);
-void ipu3_css_cleanup(struct ipu3_css *css);
-int ipu3_css_fmt_try(struct ipu3_css *css,
+void imgu_css_cleanup(struct imgu_css *css);
+int imgu_css_fmt_try(struct imgu_css *css,
struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES],
struct v4l2_rect *rects[IPU3_CSS_RECTS],
unsigned int pipe);
-int ipu3_css_fmt_set(struct ipu3_css *css,
+int imgu_css_fmt_set(struct imgu_css *css,
struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES],
struct v4l2_rect *rects[IPU3_CSS_RECTS],
unsigned int pipe);
-int ipu3_css_meta_fmt_set(struct v4l2_meta_format *fmt);
-int ipu3_css_buf_queue(struct ipu3_css *css, unsigned int pipe,
- struct ipu3_css_buffer *b);
-struct ipu3_css_buffer *ipu3_css_buf_dequeue(struct ipu3_css *css);
-int ipu3_css_start_streaming(struct ipu3_css *css);
-void ipu3_css_stop_streaming(struct ipu3_css *css);
-bool ipu3_css_queue_empty(struct ipu3_css *css);
-bool ipu3_css_is_streaming(struct ipu3_css *css);
-bool ipu3_css_pipe_queue_empty(struct ipu3_css *css, unsigned int pipe);
+int imgu_css_meta_fmt_set(struct v4l2_meta_format *fmt);
+int imgu_css_buf_queue(struct imgu_css *css, unsigned int pipe,
+ struct imgu_css_buffer *b);
+struct imgu_css_buffer *imgu_css_buf_dequeue(struct imgu_css *css);
+int imgu_css_start_streaming(struct imgu_css *css);
+void imgu_css_stop_streaming(struct imgu_css *css);
+bool imgu_css_queue_empty(struct imgu_css *css);
+bool imgu_css_is_streaming(struct imgu_css *css);
+bool imgu_css_pipe_queue_empty(struct imgu_css *css, unsigned int pipe);
/******************* css hw *******************/
-int ipu3_css_set_powerup(struct device *dev, void __iomem *base);
-void ipu3_css_set_powerdown(struct device *dev, void __iomem *base);
-int ipu3_css_irq_ack(struct ipu3_css *css);
+int imgu_css_set_powerup(struct device *dev, void __iomem *base);
+void imgu_css_set_powerdown(struct device *dev, void __iomem *base);
+int imgu_css_irq_ack(struct imgu_css *css);
/******************* set parameters ************/
-int ipu3_css_set_parameters(struct ipu3_css *css, unsigned int pipe,
+int imgu_css_set_parameters(struct imgu_css *css, unsigned int pipe,
struct ipu3_uapi_params *set_params);
/******************* auxiliary helpers *******************/
-static inline enum ipu3_css_buffer_state
-ipu3_css_buf_state(struct ipu3_css_buffer *b)
+static inline enum imgu_css_buffer_state
+imgu_css_buf_state(struct imgu_css_buffer *b)
{
return b->state;
}
/* Initialize given buffer. May be called several times. */
-static inline void ipu3_css_buf_init(struct ipu3_css_buffer *b,
+static inline void imgu_css_buf_init(struct imgu_css_buffer *b,
unsigned int queue, dma_addr_t daddr)
{
b->state = IPU3_CSS_BUFFER_NEW;
diff --git a/drivers/staging/media/ipu3/ipu3-dmamap.c b/drivers/staging/media/ipu3/ipu3-dmamap.c
index 93a393d4e15e..d978a00e1e0b 100644
--- a/drivers/staging/media/ipu3/ipu3-dmamap.c
+++ b/drivers/staging/media/ipu3/ipu3-dmamap.c
@@ -12,11 +12,12 @@
#include "ipu3.h"
#include "ipu3-css-pool.h"
#include "ipu3-mmu.h"
+#include "ipu3-dmamap.h"
/*
- * Free a buffer allocated by ipu3_dmamap_alloc_buffer()
+ * Free a buffer allocated by imgu_dmamap_alloc_buffer()
*/
-static void ipu3_dmamap_free_buffer(struct page **pages,
+static void imgu_dmamap_free_buffer(struct page **pages,
size_t size)
{
int count = size >> PAGE_SHIFT;
@@ -30,7 +31,7 @@ static void ipu3_dmamap_free_buffer(struct page **pages,
* Based on the implementation of __iommu_dma_alloc_pages()
* defined in drivers/iommu/dma-iommu.c
*/
-static struct page **ipu3_dmamap_alloc_buffer(size_t size,
+static struct page **imgu_dmamap_alloc_buffer(size_t size,
unsigned long order_mask,
gfp_t gfp)
{
@@ -73,7 +74,7 @@ static struct page **ipu3_dmamap_alloc_buffer(size_t size,
__free_pages(page, order);
}
if (!page) {
- ipu3_dmamap_free_buffer(pages, i << PAGE_SHIFT);
+ imgu_dmamap_free_buffer(pages, i << PAGE_SHIFT);
return NULL;
}
count -= order_size;
@@ -85,7 +86,7 @@ static struct page **ipu3_dmamap_alloc_buffer(size_t size,
}
/**
- * ipu3_dmamap_alloc - allocate and map a buffer into KVA
+ * imgu_dmamap_alloc - allocate and map a buffer into KVA
* @imgu: struct device pointer
* @map: struct to store mapping variables
* @len: size required
@@ -94,7 +95,7 @@ static struct page **ipu3_dmamap_alloc_buffer(size_t size,
* KVA on success
* %NULL on failure
*/
-void *ipu3_dmamap_alloc(struct imgu_device *imgu, struct ipu3_css_map *map,
+void *imgu_dmamap_alloc(struct imgu_device *imgu, struct imgu_css_map *map,
size_t len)
{
unsigned long shift = iova_shift(&imgu->iova_domain);
@@ -113,7 +114,7 @@ void *ipu3_dmamap_alloc(struct imgu_device *imgu, struct ipu3_css_map *map,
if (!iova)
return NULL;
- pages = ipu3_dmamap_alloc_buffer(size, alloc_sizes >> PAGE_SHIFT,
+ pages = imgu_dmamap_alloc_buffer(size, alloc_sizes >> PAGE_SHIFT,
GFP_KERNEL);
if (!pages)
goto out_free_iova;
@@ -121,7 +122,7 @@ void *ipu3_dmamap_alloc(struct imgu_device *imgu, struct ipu3_css_map *map,
/* Call IOMMU driver to setup pgt */
iovaddr = iova_dma_addr(&imgu->iova_domain, iova);
for (i = 0; i < size / PAGE_SIZE; ++i) {
- rval = ipu3_mmu_map(imgu->mmu, iovaddr,
+ rval = imgu_mmu_map(imgu->mmu, iovaddr,
page_to_phys(pages[i]), PAGE_SIZE);
if (rval)
goto out_unmap;
@@ -152,8 +153,8 @@ out_vunmap:
vunmap(map->vma->addr);
out_unmap:
- ipu3_dmamap_free_buffer(pages, size);
- ipu3_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
+ imgu_dmamap_free_buffer(pages, size);
+ imgu_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
i * PAGE_SIZE);
map->vma = NULL;
@@ -163,7 +164,7 @@ out_free_iova:
return NULL;
}
-void ipu3_dmamap_unmap(struct imgu_device *imgu, struct ipu3_css_map *map)
+void imgu_dmamap_unmap(struct imgu_device *imgu, struct imgu_css_map *map)
{
struct iova *iova;
@@ -172,16 +173,16 @@ void ipu3_dmamap_unmap(struct imgu_device *imgu, struct ipu3_css_map *map)
if (WARN_ON(!iova))
return;
- ipu3_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
+ imgu_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
iova_size(iova) << iova_shift(&imgu->iova_domain));
__free_iova(&imgu->iova_domain, iova);
}
/*
- * Counterpart of ipu3_dmamap_alloc
+ * Counterpart of imgu_dmamap_alloc
*/
-void ipu3_dmamap_free(struct imgu_device *imgu, struct ipu3_css_map *map)
+void imgu_dmamap_free(struct imgu_device *imgu, struct imgu_css_map *map)
{
struct vm_struct *area = map->vma;
@@ -191,18 +192,18 @@ void ipu3_dmamap_free(struct imgu_device *imgu, struct ipu3_css_map *map)
if (!map->vaddr)
return;
- ipu3_dmamap_unmap(imgu, map);
+ imgu_dmamap_unmap(imgu, map);
if (WARN_ON(!area) || WARN_ON(!area->pages))
return;
- ipu3_dmamap_free_buffer(area->pages, map->size);
+ imgu_dmamap_free_buffer(area->pages, map->size);
vunmap(map->vaddr);
map->vaddr = NULL;
}
-int ipu3_dmamap_map_sg(struct imgu_device *imgu, struct scatterlist *sglist,
- int nents, struct ipu3_css_map *map)
+int imgu_dmamap_map_sg(struct imgu_device *imgu, struct scatterlist *sglist,
+ int nents, struct imgu_css_map *map)
{
unsigned long shift = iova_shift(&imgu->iova_domain);
struct scatterlist *sg;
@@ -232,7 +233,7 @@ int ipu3_dmamap_map_sg(struct imgu_device *imgu, struct scatterlist *sglist,
dev_dbg(&imgu->pci_dev->dev, "dmamap: iova low pfn %lu, high pfn %lu\n",
iova->pfn_lo, iova->pfn_hi);
- if (ipu3_mmu_map_sg(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
+ if (imgu_mmu_map_sg(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
sglist, nents) < size)
goto out_fail;
@@ -248,7 +249,7 @@ out_fail:
return -EFAULT;
}
-int ipu3_dmamap_init(struct imgu_device *imgu)
+int imgu_dmamap_init(struct imgu_device *imgu)
{
unsigned long order, base_pfn;
int ret = iova_cache_get();
@@ -263,7 +264,7 @@ int ipu3_dmamap_init(struct imgu_device *imgu)
return 0;
}
-void ipu3_dmamap_exit(struct imgu_device *imgu)
+void imgu_dmamap_exit(struct imgu_device *imgu)
{
put_iova_domain(&imgu->iova_domain);
iova_cache_put();
diff --git a/drivers/staging/media/ipu3/ipu3-dmamap.h b/drivers/staging/media/ipu3/ipu3-dmamap.h
index b9d224a33273..9db513b3d603 100644
--- a/drivers/staging/media/ipu3/ipu3-dmamap.h
+++ b/drivers/staging/media/ipu3/ipu3-dmamap.h
@@ -8,15 +8,15 @@
struct imgu_device;
struct scatterlist;
-void *ipu3_dmamap_alloc(struct imgu_device *imgu, struct ipu3_css_map *map,
+void *imgu_dmamap_alloc(struct imgu_device *imgu, struct imgu_css_map *map,
size_t len);
-void ipu3_dmamap_free(struct imgu_device *imgu, struct ipu3_css_map *map);
+void imgu_dmamap_free(struct imgu_device *imgu, struct imgu_css_map *map);
-int ipu3_dmamap_map_sg(struct imgu_device *imgu, struct scatterlist *sglist,
- int nents, struct ipu3_css_map *map);
-void ipu3_dmamap_unmap(struct imgu_device *imgu, struct ipu3_css_map *map);
+int imgu_dmamap_map_sg(struct imgu_device *imgu, struct scatterlist *sglist,
+ int nents, struct imgu_css_map *map);
+void imgu_dmamap_unmap(struct imgu_device *imgu, struct imgu_css_map *map);
-int ipu3_dmamap_init(struct imgu_device *imgu);
-void ipu3_dmamap_exit(struct imgu_device *imgu);
+int imgu_dmamap_init(struct imgu_device *imgu);
+void imgu_dmamap_exit(struct imgu_device *imgu);
#endif
diff --git a/drivers/staging/media/ipu3/ipu3-mmu.c b/drivers/staging/media/ipu3/ipu3-mmu.c
index b9f209541f78..cfc2bdfb14b3 100644
--- a/drivers/staging/media/ipu3/ipu3-mmu.c
+++ b/drivers/staging/media/ipu3/ipu3-mmu.c
@@ -48,7 +48,7 @@
#define REG_GP_HALT (IMGU_REG_BASE + 0x5dc)
#define REG_GP_HALTED (IMGU_REG_BASE + 0x5e0)
-struct ipu3_mmu {
+struct imgu_mmu {
struct device *dev;
void __iomem *base;
/* protect access to l2pts, l1pt */
@@ -63,28 +63,28 @@ struct ipu3_mmu {
u32 **l2pts;
u32 *l1pt;
- struct ipu3_mmu_info geometry;
+ struct imgu_mmu_info geometry;
};
-static inline struct ipu3_mmu *to_ipu3_mmu(struct ipu3_mmu_info *info)
+static inline struct imgu_mmu *to_imgu_mmu(struct imgu_mmu_info *info)
{
- return container_of(info, struct ipu3_mmu, geometry);
+ return container_of(info, struct imgu_mmu, geometry);
}
/**
- * ipu3_mmu_tlb_invalidate - invalidate translation look-aside buffer
+ * imgu_mmu_tlb_invalidate - invalidate translation look-aside buffer
* @mmu: MMU to perform the invalidate operation on
*
* This function invalidates the whole TLB. Must be called when the hardware
* is powered on.
*/
-static void ipu3_mmu_tlb_invalidate(struct ipu3_mmu *mmu)
+static void imgu_mmu_tlb_invalidate(struct imgu_mmu *mmu)
{
writel(TLB_INVALIDATE, mmu->base + REG_TLB_INVALIDATE);
}
-static void call_if_ipu3_is_powered(struct ipu3_mmu *mmu,
- void (*func)(struct ipu3_mmu *mmu))
+static void call_if_imgu_is_powered(struct imgu_mmu *mmu,
+ void (*func)(struct imgu_mmu *mmu))
{
if (!pm_runtime_get_if_in_use(mmu->dev))
return;
@@ -94,14 +94,14 @@ static void call_if_ipu3_is_powered(struct ipu3_mmu *mmu,
}
/**
- * ipu3_mmu_set_halt - set CIO gate halt bit
+ * imgu_mmu_set_halt - set CIO gate halt bit
* @mmu: MMU to set the CIO gate bit in.
* @halt: Desired state of the gate bit.
*
* This function sets the CIO gate bit that controls whether external memory
* accesses are allowed. Must be called when the hardware is powered on.
*/
-static void ipu3_mmu_set_halt(struct ipu3_mmu *mmu, bool halt)
+static void imgu_mmu_set_halt(struct imgu_mmu *mmu, bool halt)
{
int ret;
u32 val;
@@ -116,12 +116,12 @@ static void ipu3_mmu_set_halt(struct ipu3_mmu *mmu, bool halt)
}
/**
- * ipu3_mmu_alloc_page_table - allocate a pre-filled page table
+ * imgu_mmu_alloc_page_table - allocate a pre-filled page table
* @pteval: Value to initialize for page table entries with.
*
* Return: Pointer to allocated page table or NULL on failure.
*/
-static u32 *ipu3_mmu_alloc_page_table(u32 pteval)
+static u32 *imgu_mmu_alloc_page_table(u32 pteval)
{
u32 *pt;
int pte;
@@ -139,10 +139,10 @@ static u32 *ipu3_mmu_alloc_page_table(u32 pteval)
}
/**
- * ipu3_mmu_free_page_table - free page table
+ * imgu_mmu_free_page_table - free page table
* @pt: Page table to free.
*/
-static void ipu3_mmu_free_page_table(u32 *pt)
+static void imgu_mmu_free_page_table(u32 *pt)
{
set_memory_wb((unsigned long int)pt, IPU3_PT_ORDER);
free_page((unsigned long)pt);
@@ -168,7 +168,7 @@ static inline void address_to_pte_idx(unsigned long iova, u32 *l1pt_idx,
*l1pt_idx = iova & IPU3_L1PT_MASK;
}
-static u32 *ipu3_mmu_get_l2pt(struct ipu3_mmu *mmu, u32 l1pt_idx)
+static u32 *imgu_mmu_get_l2pt(struct imgu_mmu *mmu, u32 l1pt_idx)
{
unsigned long flags;
u32 *l2pt, *new_l2pt;
@@ -182,7 +182,7 @@ static u32 *ipu3_mmu_get_l2pt(struct ipu3_mmu *mmu, u32 l1pt_idx)
spin_unlock_irqrestore(&mmu->lock, flags);
- new_l2pt = ipu3_mmu_alloc_page_table(mmu->dummy_page_pteval);
+ new_l2pt = imgu_mmu_alloc_page_table(mmu->dummy_page_pteval);
if (!new_l2pt)
return NULL;
@@ -193,7 +193,7 @@ static u32 *ipu3_mmu_get_l2pt(struct ipu3_mmu *mmu, u32 l1pt_idx)
l2pt = mmu->l2pts[l1pt_idx];
if (l2pt) {
- ipu3_mmu_free_page_table(new_l2pt);
+ imgu_mmu_free_page_table(new_l2pt);
goto done;
}
@@ -208,7 +208,7 @@ done:
return l2pt;
}
-static int __ipu3_mmu_map(struct ipu3_mmu *mmu, unsigned long iova,
+static int __imgu_mmu_map(struct imgu_mmu *mmu, unsigned long iova,
phys_addr_t paddr)
{
u32 l1pt_idx, l2pt_idx;
@@ -220,7 +220,7 @@ static int __ipu3_mmu_map(struct ipu3_mmu *mmu, unsigned long iova,
address_to_pte_idx(iova, &l1pt_idx, &l2pt_idx);
- l2pt = ipu3_mmu_get_l2pt(mmu, l1pt_idx);
+ l2pt = imgu_mmu_get_l2pt(mmu, l1pt_idx);
if (!l2pt)
return -ENOMEM;
@@ -238,11 +238,11 @@ static int __ipu3_mmu_map(struct ipu3_mmu *mmu, unsigned long iova,
return 0;
}
-/**
+/*
* The following four functions are implemented based on iommu.c
* drivers/iommu/iommu.c/iommu_pgsize().
*/
-static size_t ipu3_mmu_pgsize(unsigned long pgsize_bitmap,
+static size_t imgu_mmu_pgsize(unsigned long pgsize_bitmap,
unsigned long addr_merge, size_t size)
{
unsigned int pgsize_idx;
@@ -276,10 +276,10 @@ static size_t ipu3_mmu_pgsize(unsigned long pgsize_bitmap,
}
/* drivers/iommu/iommu.c/iommu_map() */
-int ipu3_mmu_map(struct ipu3_mmu_info *info, unsigned long iova,
+int imgu_mmu_map(struct imgu_mmu_info *info, unsigned long iova,
phys_addr_t paddr, size_t size)
{
- struct ipu3_mmu *mmu = to_ipu3_mmu(info);
+ struct imgu_mmu *mmu = to_imgu_mmu(info);
unsigned int min_pagesz;
int ret = 0;
@@ -301,13 +301,13 @@ int ipu3_mmu_map(struct ipu3_mmu_info *info, unsigned long iova,
iova, &paddr, size);
while (size) {
- size_t pgsize = ipu3_mmu_pgsize(mmu->geometry.pgsize_bitmap,
+ size_t pgsize = imgu_mmu_pgsize(mmu->geometry.pgsize_bitmap,
iova | paddr, size);
dev_dbg(mmu->dev, "mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
iova, &paddr, pgsize);
- ret = __ipu3_mmu_map(mmu, iova, paddr);
+ ret = __imgu_mmu_map(mmu, iova, paddr);
if (ret)
break;
@@ -316,16 +316,16 @@ int ipu3_mmu_map(struct ipu3_mmu_info *info, unsigned long iova,
size -= pgsize;
}
- call_if_ipu3_is_powered(mmu, ipu3_mmu_tlb_invalidate);
+ call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate);
return ret;
}
/* drivers/iommu/iommu.c/default_iommu_map_sg() */
-size_t ipu3_mmu_map_sg(struct ipu3_mmu_info *info, unsigned long iova,
+size_t imgu_mmu_map_sg(struct imgu_mmu_info *info, unsigned long iova,
struct scatterlist *sg, unsigned int nents)
{
- struct ipu3_mmu *mmu = to_ipu3_mmu(info);
+ struct imgu_mmu *mmu = to_imgu_mmu(info);
struct scatterlist *s;
size_t s_length, mapped = 0;
unsigned int i, min_pagesz;
@@ -345,25 +345,25 @@ size_t ipu3_mmu_map_sg(struct ipu3_mmu_info *info, unsigned long iova,
if (i == nents - 1 && !IS_ALIGNED(s->length, min_pagesz))
s_length = PAGE_ALIGN(s->length);
- ret = ipu3_mmu_map(info, iova + mapped, phys, s_length);
+ ret = imgu_mmu_map(info, iova + mapped, phys, s_length);
if (ret)
goto out_err;
mapped += s_length;
}
- call_if_ipu3_is_powered(mmu, ipu3_mmu_tlb_invalidate);
+ call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate);
return mapped;
out_err:
/* undo mappings already done */
- ipu3_mmu_unmap(info, iova, mapped);
+ imgu_mmu_unmap(info, iova, mapped);
return 0;
}
-static size_t __ipu3_mmu_unmap(struct ipu3_mmu *mmu,
+static size_t __imgu_mmu_unmap(struct imgu_mmu *mmu,
unsigned long iova, size_t size)
{
u32 l1pt_idx, l2pt_idx;
@@ -395,10 +395,10 @@ static size_t __ipu3_mmu_unmap(struct ipu3_mmu *mmu,
}
/* drivers/iommu/iommu.c/iommu_unmap() */
-size_t ipu3_mmu_unmap(struct ipu3_mmu_info *info, unsigned long iova,
+size_t imgu_mmu_unmap(struct imgu_mmu_info *info, unsigned long iova,
size_t size)
{
- struct ipu3_mmu *mmu = to_ipu3_mmu(info);
+ struct imgu_mmu *mmu = to_imgu_mmu(info);
size_t unmapped_page, unmapped = 0;
unsigned int min_pagesz;
@@ -423,10 +423,10 @@ size_t ipu3_mmu_unmap(struct ipu3_mmu_info *info, unsigned long iova,
* or we hit an area that isn't mapped.
*/
while (unmapped < size) {
- size_t pgsize = ipu3_mmu_pgsize(mmu->geometry.pgsize_bitmap,
+ size_t pgsize = imgu_mmu_pgsize(mmu->geometry.pgsize_bitmap,
iova, size - unmapped);
- unmapped_page = __ipu3_mmu_unmap(mmu, iova, pgsize);
+ unmapped_page = __imgu_mmu_unmap(mmu, iova, pgsize);
if (!unmapped_page)
break;
@@ -437,20 +437,21 @@ size_t ipu3_mmu_unmap(struct ipu3_mmu_info *info, unsigned long iova,
unmapped += unmapped_page;
}
- call_if_ipu3_is_powered(mmu, ipu3_mmu_tlb_invalidate);
+ call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate);
return unmapped;
}
/**
- * ipu3_mmu_init() - initialize IPU3 MMU block
+ * imgu_mmu_init() - initialize IPU3 MMU block
+ * @parent: struct device parent
* @base: IOMEM base of hardware registers.
*
* Return: Pointer to IPU3 MMU private data pointer or ERR_PTR() on error.
*/
-struct ipu3_mmu_info *ipu3_mmu_init(struct device *parent, void __iomem *base)
+struct imgu_mmu_info *imgu_mmu_init(struct device *parent, void __iomem *base)
{
- struct ipu3_mmu *mmu;
+ struct imgu_mmu *mmu;
u32 pteval;
mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
@@ -462,7 +463,7 @@ struct ipu3_mmu_info *ipu3_mmu_init(struct device *parent, void __iomem *base)
spin_lock_init(&mmu->lock);
/* Disallow external memory access when having no valid page tables. */
- ipu3_mmu_set_halt(mmu, true);
+ imgu_mmu_set_halt(mmu, true);
/*
* The MMU does not have a "valid" bit, so we have to use a dummy
@@ -478,7 +479,7 @@ struct ipu3_mmu_info *ipu3_mmu_init(struct device *parent, void __iomem *base)
* Allocate a dummy L2 page table with all entries pointing to
* the dummy page.
*/
- mmu->dummy_l2pt = ipu3_mmu_alloc_page_table(pteval);
+ mmu->dummy_l2pt = imgu_mmu_alloc_page_table(pteval);
if (!mmu->dummy_l2pt)
goto fail_dummy_page;
pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->dummy_l2pt));
@@ -493,14 +494,14 @@ struct ipu3_mmu_info *ipu3_mmu_init(struct device *parent, void __iomem *base)
goto fail_l2pt;
/* Allocate the L1 page table. */
- mmu->l1pt = ipu3_mmu_alloc_page_table(mmu->dummy_l2pt_pteval);
+ mmu->l1pt = imgu_mmu_alloc_page_table(mmu->dummy_l2pt_pteval);
if (!mmu->l1pt)
goto fail_l2pts;
pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->l1pt));
writel(pteval, mmu->base + REG_L1_PHYS);
- ipu3_mmu_tlb_invalidate(mmu);
- ipu3_mmu_set_halt(mmu, false);
+ imgu_mmu_tlb_invalidate(mmu);
+ imgu_mmu_set_halt(mmu, false);
mmu->geometry.aperture_start = 0;
mmu->geometry.aperture_end = DMA_BIT_MASK(IPU3_MMU_ADDRESS_BITS);
@@ -511,7 +512,7 @@ struct ipu3_mmu_info *ipu3_mmu_init(struct device *parent, void __iomem *base)
fail_l2pts:
vfree(mmu->l2pts);
fail_l2pt:
- ipu3_mmu_free_page_table(mmu->dummy_l2pt);
+ imgu_mmu_free_page_table(mmu->dummy_l2pt);
fail_dummy_page:
free_page((unsigned long)mmu->dummy_page);
fail_group:
@@ -521,41 +522,41 @@ fail_group:
}
/**
- * ipu3_mmu_exit() - clean up IPU3 MMU block
- * @mmu: IPU3 MMU private data
+ * imgu_mmu_exit() - clean up IPU3 MMU block
+ * @info: IPU3 MMU private data
*/
-void ipu3_mmu_exit(struct ipu3_mmu_info *info)
+void imgu_mmu_exit(struct imgu_mmu_info *info)
{
- struct ipu3_mmu *mmu = to_ipu3_mmu(info);
+ struct imgu_mmu *mmu = to_imgu_mmu(info);
/* We are going to free our page tables, no more memory access. */
- ipu3_mmu_set_halt(mmu, true);
- ipu3_mmu_tlb_invalidate(mmu);
+ imgu_mmu_set_halt(mmu, true);
+ imgu_mmu_tlb_invalidate(mmu);
- ipu3_mmu_free_page_table(mmu->l1pt);
+ imgu_mmu_free_page_table(mmu->l1pt);
vfree(mmu->l2pts);
- ipu3_mmu_free_page_table(mmu->dummy_l2pt);
+ imgu_mmu_free_page_table(mmu->dummy_l2pt);
free_page((unsigned long)mmu->dummy_page);
kfree(mmu);
}
-void ipu3_mmu_suspend(struct ipu3_mmu_info *info)
+void imgu_mmu_suspend(struct imgu_mmu_info *info)
{
- struct ipu3_mmu *mmu = to_ipu3_mmu(info);
+ struct imgu_mmu *mmu = to_imgu_mmu(info);
- ipu3_mmu_set_halt(mmu, true);
+ imgu_mmu_set_halt(mmu, true);
}
-void ipu3_mmu_resume(struct ipu3_mmu_info *info)
+void imgu_mmu_resume(struct imgu_mmu_info *info)
{
- struct ipu3_mmu *mmu = to_ipu3_mmu(info);
+ struct imgu_mmu *mmu = to_imgu_mmu(info);
u32 pteval;
- ipu3_mmu_set_halt(mmu, true);
+ imgu_mmu_set_halt(mmu, true);
pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->l1pt));
writel(pteval, mmu->base + REG_L1_PHYS);
- ipu3_mmu_tlb_invalidate(mmu);
- ipu3_mmu_set_halt(mmu, false);
+ imgu_mmu_tlb_invalidate(mmu);
+ imgu_mmu_set_halt(mmu, false);
}
diff --git a/drivers/staging/media/ipu3/ipu3-mmu.h b/drivers/staging/media/ipu3/ipu3-mmu.h
index 8fe63b4c6e1c..fa58827eb19c 100644
--- a/drivers/staging/media/ipu3/ipu3-mmu.h
+++ b/drivers/staging/media/ipu3/ipu3-mmu.h
@@ -6,13 +6,13 @@
#define __IPU3_MMU_H
/**
- * struct ipu3_mmu_info - Describes mmu geometry
+ * struct imgu_mmu_info - Describes mmu geometry
*
* @aperture_start: First address that can be mapped
* @aperture_end: Last address that can be mapped
* @pgsize_bitmap: Bitmap of page sizes in use
*/
-struct ipu3_mmu_info {
+struct imgu_mmu_info {
dma_addr_t aperture_start;
dma_addr_t aperture_end;
unsigned long pgsize_bitmap;
@@ -21,15 +21,15 @@ struct ipu3_mmu_info {
struct device;
struct scatterlist;
-struct ipu3_mmu_info *ipu3_mmu_init(struct device *parent, void __iomem *base);
-void ipu3_mmu_exit(struct ipu3_mmu_info *info);
-void ipu3_mmu_suspend(struct ipu3_mmu_info *info);
-void ipu3_mmu_resume(struct ipu3_mmu_info *info);
+struct imgu_mmu_info *imgu_mmu_init(struct device *parent, void __iomem *base);
+void imgu_mmu_exit(struct imgu_mmu_info *info);
+void imgu_mmu_suspend(struct imgu_mmu_info *info);
+void imgu_mmu_resume(struct imgu_mmu_info *info);
-int ipu3_mmu_map(struct ipu3_mmu_info *info, unsigned long iova,
+int imgu_mmu_map(struct imgu_mmu_info *info, unsigned long iova,
phys_addr_t paddr, size_t size);
-size_t ipu3_mmu_unmap(struct ipu3_mmu_info *info, unsigned long iova,
+size_t imgu_mmu_unmap(struct imgu_mmu_info *info, unsigned long iova,
size_t size);
-size_t ipu3_mmu_map_sg(struct ipu3_mmu_info *info, unsigned long iova,
+size_t imgu_mmu_map_sg(struct imgu_mmu_info *info, unsigned long iova,
struct scatterlist *sg, unsigned int nents);
#endif
diff --git a/drivers/staging/media/ipu3/ipu3-tables.c b/drivers/staging/media/ipu3/ipu3-tables.c
index 334517987eba..3a3730bd4395 100644
--- a/drivers/staging/media/ipu3/ipu3-tables.c
+++ b/drivers/staging/media/ipu3/ipu3-tables.c
@@ -5,8 +5,8 @@
#define X 0 /* Don't care value */
-const struct ipu3_css_bds_config
- ipu3_css_bds_configs[IMGU_BDS_CONFIG_LEN] = { {
+const struct imgu_css_bds_config
+ imgu_css_bds_configs[IMGU_BDS_CONFIG_LEN] = { {
/* Scale factor 32 / (32 + 0) = 1 */
.hor_phase_arr = {
.even = { { 0, 0, 64, 6, 0, 0, 0 } },
@@ -9015,7 +9015,7 @@ const struct ipu3_css_bds_config
.ver_ds_en = 1
} };
-const s32 ipu3_css_downscale_4taps[IMGU_SCALER_DOWNSCALE_4TAPS_LEN] = {
+const s32 imgu_css_downscale_4taps[IMGU_SCALER_DOWNSCALE_4TAPS_LEN] = {
IMGU_SCALER_FP * -0.000000000000000,
IMGU_SCALER_FP * -0.000249009327023,
IMGU_SCALER_FP * -0.001022241683322,
@@ -9146,7 +9146,7 @@ const s32 ipu3_css_downscale_4taps[IMGU_SCALER_DOWNSCALE_4TAPS_LEN] = {
IMGU_SCALER_FP * -0.000249009327023
};
-const s32 ipu3_css_downscale_2taps[IMGU_SCALER_DOWNSCALE_2TAPS_LEN] = {
+const s32 imgu_css_downscale_2taps[IMGU_SCALER_DOWNSCALE_2TAPS_LEN] = {
IMGU_SCALER_FP * 0.074300676367033,
IMGU_SCALER_FP * 0.094030234498392,
IMGU_SCALER_FP * 0.115522859526596,
@@ -9214,7 +9214,7 @@ const s32 ipu3_css_downscale_2taps[IMGU_SCALER_DOWNSCALE_2TAPS_LEN] = {
};
/* settings for Geometric Distortion Correction */
-const s16 ipu3_css_gdc_lut[4][256] = { {
+const s16 imgu_css_gdc_lut[4][256] = { {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, -1, -1, -1, -1, -2, -2, -2,
-2, -3, -3, -3, -4, -4, -4, -5, -5, -5, -6, -6, -7, -7, -7, -8, -8,
-9, -9, -10, -10, -11, -11, -12, -12, -13, -13, -14, -14, -15, -15,
@@ -9292,7 +9292,7 @@ const s16 ipu3_css_gdc_lut[4][256] = { {
-1, 0, 1, 0, 0, 0, 0, 0, 0, 0
} };
-const struct ipu3_css_xnr3_vmem_defaults ipu3_css_xnr3_vmem_defaults = {
+const struct imgu_css_xnr3_vmem_defaults imgu_css_xnr3_vmem_defaults = {
.x = {
1024, 1164, 1320, 1492, 1680, 1884, 2108, 2352,
2616, 2900, 3208, 3540, 3896, 4276, 4684, 5120
@@ -9311,7 +9311,7 @@ const struct ipu3_css_xnr3_vmem_defaults ipu3_css_xnr3_vmem_defaults = {
};
/* settings for Bayer Noise Reduction */
-const struct ipu3_uapi_bnr_static_config ipu3_css_bnr_defaults = {
+const struct ipu3_uapi_bnr_static_config imgu_css_bnr_defaults = {
{ 16, 16, 16, 16 }, /* wb_gains */
{ 16, 16, 16, 16 }, /* wb_gains_thr */
{ 0, X, 8, 6, X, 14 }, /* thr_coeffs */
@@ -9327,18 +9327,18 @@ const struct ipu3_uapi_bnr_static_config ipu3_css_bnr_defaults = {
{ 8, 4, 4, X, 8, X, 1, 1, 1, 1 }, /* dn_detect_ctrl */
};
-const struct ipu3_uapi_dm_config ipu3_css_dm_defaults = {
+const struct ipu3_uapi_dm_config imgu_css_dm_defaults = {
1, 1, 1, X, X, 8, X, 7, X, 8, X, 8, X, 4, X
};
-const struct ipu3_uapi_ccm_mat_config ipu3_css_ccm_defaults = {
+const struct ipu3_uapi_ccm_mat_config imgu_css_ccm_defaults = {
9775, -2671, 1087, 0,
-1071, 8303, 815, 0,
-23, -7887, 16103, 0
};
/* settings for Gamma correction */
-const struct ipu3_uapi_gamma_corr_lut ipu3_css_gamma_lut = { {
+const struct ipu3_uapi_gamma_corr_lut imgu_css_gamma_lut = { {
63, 79, 95, 111, 127, 143, 159, 175, 191, 207, 223, 239, 255, 271, 287,
303, 319, 335, 351, 367, 383, 399, 415, 431, 447, 463, 479, 495, 511,
527, 543, 559, 575, 591, 607, 623, 639, 655, 671, 687, 703, 719, 735,
@@ -9362,13 +9362,13 @@ const struct ipu3_uapi_gamma_corr_lut ipu3_css_gamma_lut = { {
7807, 7871, 7935, 7999, 8063, 8127, 8191
} };
-const struct ipu3_uapi_csc_mat_config ipu3_css_csc_defaults = {
+const struct ipu3_uapi_csc_mat_config imgu_css_csc_defaults = {
4898, 9617, 1867, 0,
-2410, -4732, 7143, 0,
10076, -8437, -1638, 0
};
-const struct ipu3_uapi_cds_params ipu3_css_cds_defaults = {
+const struct ipu3_uapi_cds_params imgu_css_cds_defaults = {
1, 3, 3, 1,
1, 3, 3, 1,
4, X, /* ds_nf */
@@ -9376,7 +9376,7 @@ const struct ipu3_uapi_cds_params ipu3_css_cds_defaults = {
0, X /* uv_bin_output */
};
-const struct ipu3_uapi_shd_config_static ipu3_css_shd_defaults = {
+const struct ipu3_uapi_shd_config_static imgu_css_shd_defaults = {
.grid = {
.width = 73,
.height = 55,
@@ -9397,7 +9397,7 @@ const struct ipu3_uapi_shd_config_static ipu3_css_shd_defaults = {
},
};
-const struct ipu3_uapi_yuvp1_iefd_config ipu3_css_iefd_defaults = {
+const struct ipu3_uapi_yuvp1_iefd_config imgu_css_iefd_defaults = {
.units = {
.cu_1 = { 0, 150, 7, 0 },
.cu_ed = { 7, 110, 244, X, 307, 409, 511, X,
@@ -9436,17 +9436,17 @@ const struct ipu3_uapi_yuvp1_iefd_config ipu3_css_iefd_defaults = {
{ 1, X, 2, X, 8, X } },
};
-const struct ipu3_uapi_yuvp1_yds_config ipu3_css_yds_defaults = {
+const struct ipu3_uapi_yuvp1_yds_config imgu_css_yds_defaults = {
0, 1, 1, 0, 0, 1, 1, 0, 2, X, 0, X
};
-const struct ipu3_uapi_yuvp1_chnr_config ipu3_css_chnr_defaults = {
+const struct ipu3_uapi_yuvp1_chnr_config imgu_css_chnr_defaults = {
.coring = { 0, X, 0, X },
.sense_gain = { 6, 6, 6, X, 4, 4, 4, X },
.iir_fir = { 8, X, 12, X, 0, 256 - 127, X },
};
-const struct ipu3_uapi_yuvp1_y_ee_nr_config ipu3_css_y_ee_nr_defaults = {
+const struct ipu3_uapi_yuvp1_y_ee_nr_config imgu_css_y_ee_nr_defaults = {
.lpf = { 4, X, 8, X, 16, X, 0 },
.sense = { 8191, X, 0, X, 8191, X, 0, X },
.gain = { 8, X, 0, X, 8, X, 0, X },
@@ -9457,7 +9457,7 @@ const struct ipu3_uapi_yuvp1_y_ee_nr_config ipu3_css_y_ee_nr_defaults = {
};
const struct ipu3_uapi_yuvp2_tcc_gain_pcwl_lut_static_config
- ipu3_css_tcc_gain_pcwl_lut = { {
+ imgu_css_tcc_gain_pcwl_lut = { {
1024, 1032, 1040, 1048, 1057, 1065, 1073, 1081, 1089, 1097, 1105, 1113,
1122, 1130, 1138, 1146, 1154, 1162, 1170, 1178, 1187, 1195, 1203, 1211,
1219, 1227, 1235, 1243, 1252, 1260, 1268, 1276, 1284, 1292, 1300, 1308,
@@ -9483,12 +9483,12 @@ const struct ipu3_uapi_yuvp2_tcc_gain_pcwl_lut_static_config
} };
const struct ipu3_uapi_yuvp2_tcc_r_sqr_lut_static_config
- ipu3_css_tcc_r_sqr_lut = { {
+ imgu_css_tcc_r_sqr_lut = { {
32, 44, 64, 92, 128, 180, 256, 364, 512, 628, 724, 808, 888,
956, 1024, 1088, 1144, 1200, 1256, 1304, 1356, 1404, 1448
} };
-const struct imgu_abi_anr_config ipu3_css_anr_defaults = {
+const struct imgu_abi_anr_config imgu_css_anr_defaults = {
.transform = {
.adaptive_treshhold_en = 1,
.alpha = { { 13, 13, 13, 13, 0, 0, 0, 0},
@@ -9545,7 +9545,7 @@ const struct imgu_abi_anr_config ipu3_css_anr_defaults = {
};
/* frame settings for Auto White Balance */
-const struct ipu3_uapi_awb_fr_config_s ipu3_css_awb_fr_defaults = {
+const struct ipu3_uapi_awb_fr_config_s imgu_css_awb_fr_defaults = {
.grid_cfg = {
.width = 16,
.height = 16,
@@ -9560,7 +9560,7 @@ const struct ipu3_uapi_awb_fr_config_s ipu3_css_awb_fr_defaults = {
};
/* settings for Auto Exposure */
-const struct ipu3_uapi_ae_grid_config ipu3_css_ae_grid_defaults = {
+const struct ipu3_uapi_ae_grid_config imgu_css_ae_grid_defaults = {
.width = 16,
.height = 16,
.block_width_log2 = 3,
@@ -9571,13 +9571,13 @@ const struct ipu3_uapi_ae_grid_config ipu3_css_ae_grid_defaults = {
};
/* settings for Auto Exposure color correction matrix */
-const struct ipu3_uapi_ae_ccm ipu3_css_ae_ccm_defaults = {
+const struct ipu3_uapi_ae_ccm imgu_css_ae_ccm_defaults = {
256, 256, 256, 256, /* gain_gr/r/b/gb */
.mat = { 128, 0, 0, 0, 0, 128, 0, 0, 0, 0, 128, 0, 0, 0, 0, 128 },
};
/* settings for Auto Focus */
-const struct ipu3_uapi_af_config_s ipu3_css_af_defaults = {
+const struct ipu3_uapi_af_config_s imgu_css_af_defaults = {
.filter_config = {
{ 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 128 }, 0,
{ 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 128 }, 0,
@@ -9595,7 +9595,7 @@ const struct ipu3_uapi_af_config_s ipu3_css_af_defaults = {
};
/* settings for Auto White Balance */
-const struct ipu3_uapi_awb_config_s ipu3_css_awb_defaults = {
+const struct ipu3_uapi_awb_config_s imgu_css_awb_defaults = {
8191, 8191, 8191, 8191 | /* rgbs_thr_gr/r/gb/b */
IPU3_UAPI_AWB_RGBS_THR_B_EN | IPU3_UAPI_AWB_RGBS_THR_B_INCL_SAT,
.grid = {
diff --git a/drivers/staging/media/ipu3/ipu3-tables.h b/drivers/staging/media/ipu3/ipu3-tables.h
index 6563782cbd22..a1bf3286f380 100644
--- a/drivers/staging/media/ipu3/ipu3-tables.h
+++ b/drivers/staging/media/ipu3/ipu3-tables.h
@@ -19,7 +19,7 @@
#define IMGU_GDC_LUT_UNIT 4
#define IMGU_GDC_LUT_LEN 256
-struct ipu3_css_bds_config {
+struct imgu_css_bds_config {
struct imgu_abi_bds_phase_arr hor_phase_arr;
struct imgu_abi_bds_phase_arr ver_phase_arr;
struct imgu_abi_bds_ptrn_arr ptrn_arr;
@@ -28,39 +28,39 @@ struct ipu3_css_bds_config {
u8 ver_ds_en;
};
-struct ipu3_css_xnr3_vmem_defaults {
+struct imgu_css_xnr3_vmem_defaults {
s16 x[IMGU_XNR3_VMEM_LUT_LEN];
s16 a[IMGU_XNR3_VMEM_LUT_LEN];
s16 b[IMGU_XNR3_VMEM_LUT_LEN];
s16 c[IMGU_XNR3_VMEM_LUT_LEN];
};
-extern const struct ipu3_css_bds_config
- ipu3_css_bds_configs[IMGU_BDS_CONFIG_LEN];
-extern const s32 ipu3_css_downscale_4taps[IMGU_SCALER_DOWNSCALE_4TAPS_LEN];
-extern const s32 ipu3_css_downscale_2taps[IMGU_SCALER_DOWNSCALE_2TAPS_LEN];
-extern const s16 ipu3_css_gdc_lut[IMGU_GDC_LUT_UNIT][IMGU_GDC_LUT_LEN];
-extern const struct ipu3_css_xnr3_vmem_defaults ipu3_css_xnr3_vmem_defaults;
-extern const struct ipu3_uapi_bnr_static_config ipu3_css_bnr_defaults;
-extern const struct ipu3_uapi_dm_config ipu3_css_dm_defaults;
-extern const struct ipu3_uapi_ccm_mat_config ipu3_css_ccm_defaults;
-extern const struct ipu3_uapi_gamma_corr_lut ipu3_css_gamma_lut;
-extern const struct ipu3_uapi_csc_mat_config ipu3_css_csc_defaults;
-extern const struct ipu3_uapi_cds_params ipu3_css_cds_defaults;
-extern const struct ipu3_uapi_shd_config_static ipu3_css_shd_defaults;
-extern const struct ipu3_uapi_yuvp1_iefd_config ipu3_css_iefd_defaults;
-extern const struct ipu3_uapi_yuvp1_yds_config ipu3_css_yds_defaults;
-extern const struct ipu3_uapi_yuvp1_chnr_config ipu3_css_chnr_defaults;
-extern const struct ipu3_uapi_yuvp1_y_ee_nr_config ipu3_css_y_ee_nr_defaults;
+extern const struct imgu_css_bds_config
+ imgu_css_bds_configs[IMGU_BDS_CONFIG_LEN];
+extern const s32 imgu_css_downscale_4taps[IMGU_SCALER_DOWNSCALE_4TAPS_LEN];
+extern const s32 imgu_css_downscale_2taps[IMGU_SCALER_DOWNSCALE_2TAPS_LEN];
+extern const s16 imgu_css_gdc_lut[IMGU_GDC_LUT_UNIT][IMGU_GDC_LUT_LEN];
+extern const struct imgu_css_xnr3_vmem_defaults imgu_css_xnr3_vmem_defaults;
+extern const struct ipu3_uapi_bnr_static_config imgu_css_bnr_defaults;
+extern const struct ipu3_uapi_dm_config imgu_css_dm_defaults;
+extern const struct ipu3_uapi_ccm_mat_config imgu_css_ccm_defaults;
+extern const struct ipu3_uapi_gamma_corr_lut imgu_css_gamma_lut;
+extern const struct ipu3_uapi_csc_mat_config imgu_css_csc_defaults;
+extern const struct ipu3_uapi_cds_params imgu_css_cds_defaults;
+extern const struct ipu3_uapi_shd_config_static imgu_css_shd_defaults;
+extern const struct ipu3_uapi_yuvp1_iefd_config imgu_css_iefd_defaults;
+extern const struct ipu3_uapi_yuvp1_yds_config imgu_css_yds_defaults;
+extern const struct ipu3_uapi_yuvp1_chnr_config imgu_css_chnr_defaults;
+extern const struct ipu3_uapi_yuvp1_y_ee_nr_config imgu_css_y_ee_nr_defaults;
extern const struct ipu3_uapi_yuvp2_tcc_gain_pcwl_lut_static_config
- ipu3_css_tcc_gain_pcwl_lut;
+ imgu_css_tcc_gain_pcwl_lut;
extern const struct ipu3_uapi_yuvp2_tcc_r_sqr_lut_static_config
- ipu3_css_tcc_r_sqr_lut;
-extern const struct imgu_abi_anr_config ipu3_css_anr_defaults;
-extern const struct ipu3_uapi_awb_fr_config_s ipu3_css_awb_fr_defaults;
-extern const struct ipu3_uapi_ae_grid_config ipu3_css_ae_grid_defaults;
-extern const struct ipu3_uapi_ae_ccm ipu3_css_ae_ccm_defaults;
-extern const struct ipu3_uapi_af_config_s ipu3_css_af_defaults;
-extern const struct ipu3_uapi_awb_config_s ipu3_css_awb_defaults;
+ imgu_css_tcc_r_sqr_lut;
+extern const struct imgu_abi_anr_config imgu_css_anr_defaults;
+extern const struct ipu3_uapi_awb_fr_config_s imgu_css_awb_fr_defaults;
+extern const struct ipu3_uapi_ae_grid_config imgu_css_ae_grid_defaults;
+extern const struct ipu3_uapi_ae_ccm imgu_css_ae_ccm_defaults;
+extern const struct ipu3_uapi_af_config_s imgu_css_af_defaults;
+extern const struct ipu3_uapi_awb_config_s imgu_css_awb_defaults;
#endif
diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
index c7936032beb9..9c0352b193a7 100644
--- a/drivers/staging/media/ipu3/ipu3-v4l2.c
+++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
@@ -12,7 +12,10 @@
/******************** v4l2_subdev_ops ********************/
-static int ipu3_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+#define IPU3_RUNNING_MODE_VIDEO 0
+#define IPU3_RUNNING_MODE_STILL 1
+
+static int imgu_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
struct imgu_v4l2_subdev,
@@ -47,7 +50,7 @@ static int ipu3_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
return 0;
}
-static int ipu3_subdev_s_stream(struct v4l2_subdev *sd, int enable)
+static int imgu_subdev_s_stream(struct v4l2_subdev *sd, int enable)
{
int i;
unsigned int node;
@@ -60,7 +63,7 @@ static int ipu3_subdev_s_stream(struct v4l2_subdev *sd, int enable)
struct device *dev = &imgu->pci_dev->dev;
struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES] = { NULL };
struct v4l2_rect *rects[IPU3_CSS_RECTS] = { NULL };
- struct ipu3_css_pipe *css_pipe = &imgu->css.pipes[pipe];
+ struct imgu_css_pipe *css_pipe = &imgu->css.pipes[pipe];
struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
dev_dbg(dev, "%s %d for pipe %d", __func__, enable, pipe);
@@ -104,7 +107,7 @@ static int ipu3_subdev_s_stream(struct v4l2_subdev *sd, int enable)
rects[IPU3_CSS_RECT_BDS] = &imgu_sd->rect.bds;
rects[IPU3_CSS_RECT_GDC] = &imgu_sd->rect.gdc;
- r = ipu3_css_fmt_set(&imgu->css, fmts, rects, pipe);
+ r = imgu_css_fmt_set(&imgu->css, fmts, rects, pipe);
if (r) {
dev_err(dev, "failed to set initial formats pipe %d with (%d)",
pipe, r);
@@ -116,7 +119,7 @@ static int ipu3_subdev_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
}
-static int ipu3_subdev_get_fmt(struct v4l2_subdev *sd,
+static int imgu_subdev_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
@@ -140,7 +143,7 @@ static int ipu3_subdev_get_fmt(struct v4l2_subdev *sd,
return 0;
}
-static int ipu3_subdev_set_fmt(struct v4l2_subdev *sd,
+static int imgu_subdev_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
@@ -186,7 +189,7 @@ static int ipu3_subdev_set_fmt(struct v4l2_subdev *sd,
return 0;
}
-static int ipu3_subdev_get_selection(struct v4l2_subdev *sd,
+static int imgu_subdev_get_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
@@ -219,7 +222,7 @@ static int ipu3_subdev_get_selection(struct v4l2_subdev *sd,
return 0;
}
-static int ipu3_subdev_set_selection(struct v4l2_subdev *sd,
+static int imgu_subdev_set_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
@@ -260,7 +263,7 @@ static int ipu3_subdev_set_selection(struct v4l2_subdev *sd,
/******************** media_entity_operations ********************/
-static int ipu3_link_setup(struct media_entity *entity,
+static int imgu_link_setup(struct media_entity *entity,
const struct media_pad *local,
const struct media_pad *remote, u32 flags)
{
@@ -299,7 +302,7 @@ static int ipu3_link_setup(struct media_entity *entity,
/******************** vb2_ops ********************/
-static int ipu3_vb2_buf_init(struct vb2_buffer *vb)
+static int imgu_vb2_buf_init(struct vb2_buffer *vb)
{
struct sg_table *sg = vb2_dma_sg_plane_desc(vb, 0);
struct imgu_device *imgu = vb2_get_drv_priv(vb->vb2_queue);
@@ -312,11 +315,11 @@ static int ipu3_vb2_buf_init(struct vb2_buffer *vb)
if (queue == IPU3_CSS_QUEUE_PARAMS)
return 0;
- return ipu3_dmamap_map_sg(imgu, sg->sgl, sg->nents, &buf->map);
+ return imgu_dmamap_map_sg(imgu, sg->sgl, sg->nents, &buf->map);
}
/* Called when each buffer is freed */
-static void ipu3_vb2_buf_cleanup(struct vb2_buffer *vb)
+static void imgu_vb2_buf_cleanup(struct vb2_buffer *vb)
{
struct imgu_device *imgu = vb2_get_drv_priv(vb->vb2_queue);
struct imgu_buffer *buf = container_of(vb,
@@ -328,11 +331,11 @@ static void ipu3_vb2_buf_cleanup(struct vb2_buffer *vb)
if (queue == IPU3_CSS_QUEUE_PARAMS)
return;
- ipu3_dmamap_unmap(imgu, &buf->map);
+ imgu_dmamap_unmap(imgu, &buf->map);
}
/* Transfer buffer ownership to me */
-static void ipu3_vb2_buf_queue(struct vb2_buffer *vb)
+static void imgu_vb2_buf_queue(struct vb2_buffer *vb)
{
struct imgu_device *imgu = vb2_get_drv_priv(vb->vb2_queue);
struct imgu_video_device *node =
@@ -358,7 +361,7 @@ static void ipu3_vb2_buf_queue(struct vb2_buffer *vb)
vb2_set_plane_payload(vb, 0, payload);
}
if (payload >= need_bytes)
- r = ipu3_css_set_parameters(&imgu->css, pipe,
+ r = imgu_css_set_parameters(&imgu->css, pipe,
vb2_plane_vaddr(vb, 0));
buf->flags = V4L2_BUF_FLAG_DONE;
vb2_buffer_done(vb, r == 0 ? VB2_BUF_STATE_DONE
@@ -369,7 +372,7 @@ static void ipu3_vb2_buf_queue(struct vb2_buffer *vb)
vid_buf.vbb.vb2_buf);
mutex_lock(&imgu->lock);
- ipu3_css_buf_init(&buf->css_buf, queue, buf->map.daddr);
+ imgu_css_buf_init(&buf->css_buf, queue, buf->map.daddr);
list_add_tail(&buf->vid_buf.list,
&node->buffers);
mutex_unlock(&imgu->lock);
@@ -385,7 +388,7 @@ static void ipu3_vb2_buf_queue(struct vb2_buffer *vb)
}
-static int ipu3_vb2_queue_setup(struct vb2_queue *vq,
+static int imgu_vb2_queue_setup(struct vb2_queue *vq,
unsigned int *num_buffers,
unsigned int *num_planes,
unsigned int sizes[],
@@ -422,7 +425,7 @@ static int ipu3_vb2_queue_setup(struct vb2_queue *vq,
}
/* Check if all enabled video nodes are streaming, exception ignored */
-static bool ipu3_all_nodes_streaming(struct imgu_device *imgu,
+static bool imgu_all_nodes_streaming(struct imgu_device *imgu,
struct imgu_video_device *except)
{
unsigned int i, pipe, p;
@@ -451,11 +454,11 @@ static bool ipu3_all_nodes_streaming(struct imgu_device *imgu,
return true;
}
-static void ipu3_return_all_buffers(struct imgu_device *imgu,
+static void imgu_return_all_buffers(struct imgu_device *imgu,
struct imgu_video_device *node,
enum vb2_buffer_state state)
{
- struct ipu3_vb2_buffer *b, *b0;
+ struct imgu_vb2_buffer *b, *b0;
/* Return all buffers */
mutex_lock(&imgu->lock);
@@ -466,7 +469,7 @@ static void ipu3_return_all_buffers(struct imgu_device *imgu,
mutex_unlock(&imgu->lock);
}
-static int ipu3_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
+static int imgu_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct imgu_media_pipe *imgu_pipe;
struct imgu_device *imgu = vb2_get_drv_priv(vq);
@@ -497,7 +500,7 @@ static int ipu3_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
goto fail_return_bufs;
- if (!ipu3_all_nodes_streaming(imgu, node))
+ if (!imgu_all_nodes_streaming(imgu, node))
return 0;
for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
@@ -518,12 +521,12 @@ static int ipu3_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
fail_stop_pipeline:
media_pipeline_stop(&node->vdev.entity);
fail_return_bufs:
- ipu3_return_all_buffers(imgu, node, VB2_BUF_STATE_QUEUED);
+ imgu_return_all_buffers(imgu, node, VB2_BUF_STATE_QUEUED);
return r;
}
-static void ipu3_vb2_stop_streaming(struct vb2_queue *vq)
+static void imgu_vb2_stop_streaming(struct vb2_queue *vq)
{
struct imgu_media_pipe *imgu_pipe;
struct imgu_device *imgu = vb2_get_drv_priv(vq);
@@ -544,7 +547,7 @@ static void ipu3_vb2_stop_streaming(struct vb2_queue *vq)
"failed to stop subdev streaming\n");
/* Was this the first node with streaming disabled? */
- if (imgu->streaming && ipu3_all_nodes_streaming(imgu, node)) {
+ if (imgu->streaming && imgu_all_nodes_streaming(imgu, node)) {
/* Yes, really stop streaming now */
dev_dbg(dev, "IMGU streaming is ready to stop");
r = imgu_s_stream(imgu, false);
@@ -552,7 +555,7 @@ static void ipu3_vb2_stop_streaming(struct vb2_queue *vq)
imgu->streaming = false;
}
- ipu3_return_all_buffers(imgu, node, VB2_BUF_STATE_ERROR);
+ imgu_return_all_buffers(imgu, node, VB2_BUF_STATE_ERROR);
media_pipeline_stop(&node->vdev.entity);
}
@@ -563,13 +566,13 @@ static void ipu3_vb2_stop_streaming(struct vb2_queue *vq)
#define DEF_VID_CAPTURE 0
#define DEF_VID_OUTPUT 1
-struct ipu3_fmt {
+struct imgu_fmt {
u32 fourcc;
u16 type; /* VID_CAPTURE or VID_OUTPUT not both */
};
/* format descriptions for capture and preview */
-static const struct ipu3_fmt formats[] = {
+static const struct imgu_fmt formats[] = {
{ V4L2_PIX_FMT_NV12, VID_CAPTURE },
{ V4L2_PIX_FMT_IPU3_SGRBG10, VID_OUTPUT },
{ V4L2_PIX_FMT_IPU3_SBGGR10, VID_OUTPUT },
@@ -578,7 +581,7 @@ static const struct ipu3_fmt formats[] = {
};
/* Find the first matched format, return default if not found */
-static const struct ipu3_fmt *find_format(struct v4l2_format *f, u32 type)
+static const struct imgu_fmt *find_format(struct v4l2_format *f, u32 type)
{
unsigned int i;
@@ -592,10 +595,10 @@ static const struct ipu3_fmt *find_format(struct v4l2_format *f, u32 type)
&formats[DEF_VID_OUTPUT];
}
-static int ipu3_vidioc_querycap(struct file *file, void *fh,
+static int imgu_vidioc_querycap(struct file *file, void *fh,
struct v4l2_capability *cap)
{
- struct imgu_video_device *node = file_to_intel_ipu3_node(file);
+ struct imgu_video_device *node = file_to_intel_imgu_node(file);
strscpy(cap->driver, IMGU_NAME, sizeof(cap->driver));
strscpy(cap->card, IMGU_NAME, sizeof(cap->card));
@@ -643,10 +646,10 @@ static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
}
/* Propagate forward always the format from the CIO2 subdev */
-static int ipu3_vidioc_g_fmt(struct file *file, void *fh,
+static int imgu_vidioc_g_fmt(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct imgu_video_device *node = file_to_intel_ipu3_node(file);
+ struct imgu_video_device *node = file_to_intel_imgu_node(file);
f->fmt = node->vdev_fmt.fmt;
@@ -667,7 +670,7 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
struct v4l2_mbus_framefmt pad_fmt;
unsigned int i, css_q;
int r;
- struct ipu3_css_pipe *css_pipe = &imgu->css.pipes[pipe];
+ struct imgu_css_pipe *css_pipe = &imgu->css.pipes[pipe];
struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
struct imgu_v4l2_subdev *imgu_sd = &imgu_pipe->imgu_sd;
@@ -733,9 +736,9 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
return -EINVAL;
if (try)
- r = ipu3_css_fmt_try(&imgu->css, fmts, rects, pipe);
+ r = imgu_css_fmt_try(&imgu->css, fmts, rects, pipe);
else
- r = ipu3_css_fmt_set(&imgu->css, fmts, rects, pipe);
+ r = imgu_css_fmt_set(&imgu->css, fmts, rects, pipe);
/* r is the binary number in the firmware blob */
if (r < 0)
@@ -749,10 +752,10 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
return 0;
}
-static int ipu3_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
+static int imgu_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
- const struct ipu3_fmt *fmt;
+ const struct imgu_fmt *fmt;
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
fmt = find_format(f, VID_CAPTURE);
@@ -769,58 +772,58 @@ static int ipu3_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
return 0;
}
-static int ipu3_vidioc_try_fmt(struct file *file, void *fh,
+static int imgu_vidioc_try_fmt(struct file *file, void *fh,
struct v4l2_format *f)
{
struct imgu_device *imgu = video_drvdata(file);
struct device *dev = &imgu->pci_dev->dev;
- struct imgu_video_device *node = file_to_intel_ipu3_node(file);
+ struct imgu_video_device *node = file_to_intel_imgu_node(file);
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
int r;
dev_dbg(dev, "%s [%ux%u] for node %d\n", __func__,
pix_mp->width, pix_mp->height, node->id);
- r = ipu3_try_fmt(file, fh, f);
+ r = imgu_try_fmt(file, fh, f);
if (r)
return r;
return imgu_fmt(imgu, node->pipe, node->id, f, true);
}
-static int ipu3_vidioc_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
+static int imgu_vidioc_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
struct imgu_device *imgu = video_drvdata(file);
struct device *dev = &imgu->pci_dev->dev;
- struct imgu_video_device *node = file_to_intel_ipu3_node(file);
+ struct imgu_video_device *node = file_to_intel_imgu_node(file);
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
int r;
dev_dbg(dev, "%s [%ux%u] for node %d\n", __func__,
pix_mp->width, pix_mp->height, node->id);
- r = ipu3_try_fmt(file, fh, f);
+ r = imgu_try_fmt(file, fh, f);
if (r)
return r;
return imgu_fmt(imgu, node->pipe, node->id, f, false);
}
-struct ipu3_meta_fmt {
+struct imgu_meta_fmt {
__u32 fourcc;
char *name;
};
/* From drivers/media/v4l2-core/v4l2-ioctl.c */
-static const struct ipu3_meta_fmt meta_fmts[] = {
+static const struct imgu_meta_fmt meta_fmts[] = {
{ V4L2_META_FMT_IPU3_PARAMS, "IPU3 processing parameters" },
{ V4L2_META_FMT_IPU3_STAT_3A, "IPU3 3A statistics" },
};
-static int ipu3_meta_enum_format(struct file *file, void *fh,
+static int imgu_meta_enum_format(struct file *file, void *fh,
struct v4l2_fmtdesc *fmt)
{
- struct imgu_video_device *node = file_to_intel_ipu3_node(file);
+ struct imgu_video_device *node = file_to_intel_imgu_node(file);
unsigned int i = fmt->type == V4L2_BUF_TYPE_META_OUTPUT ? 0 : 1;
/* Each node is dedicated to only one meta format */
@@ -833,10 +836,10 @@ static int ipu3_meta_enum_format(struct file *file, void *fh,
return 0;
}
-static int ipu3_vidioc_g_meta_fmt(struct file *file, void *fh,
+static int imgu_vidioc_g_meta_fmt(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct imgu_video_device *node = file_to_intel_ipu3_node(file);
+ struct imgu_video_device *node = file_to_intel_imgu_node(file);
if (f->type != node->vbq.type)
return -EINVAL;
@@ -846,7 +849,7 @@ static int ipu3_vidioc_g_meta_fmt(struct file *file, void *fh,
return 0;
}
-static int ipu3_vidioc_enum_input(struct file *file, void *fh,
+static int imgu_vidioc_enum_input(struct file *file, void *fh,
struct v4l2_input *input)
{
if (input->index > 0)
@@ -857,19 +860,19 @@ static int ipu3_vidioc_enum_input(struct file *file, void *fh,
return 0;
}
-static int ipu3_vidioc_g_input(struct file *file, void *fh, unsigned int *input)
+static int imgu_vidioc_g_input(struct file *file, void *fh, unsigned int *input)
{
*input = 0;
return 0;
}
-static int ipu3_vidioc_s_input(struct file *file, void *fh, unsigned int input)
+static int imgu_vidioc_s_input(struct file *file, void *fh, unsigned int input)
{
return input == 0 ? 0 : -EINVAL;
}
-static int ipu3_vidioc_enum_output(struct file *file, void *fh,
+static int imgu_vidioc_enum_output(struct file *file, void *fh,
struct v4l2_output *output)
{
if (output->index > 0)
@@ -880,7 +883,7 @@ static int ipu3_vidioc_enum_output(struct file *file, void *fh,
return 0;
}
-static int ipu3_vidioc_g_output(struct file *file, void *fh,
+static int imgu_vidioc_g_output(struct file *file, void *fh,
unsigned int *output)
{
*output = 0;
@@ -888,7 +891,7 @@ static int ipu3_vidioc_g_output(struct file *file, void *fh,
return 0;
}
-static int ipu3_vidioc_s_output(struct file *file, void *fh,
+static int imgu_vidioc_s_output(struct file *file, void *fh,
unsigned int output)
{
return output == 0 ? 0 : -EINVAL;
@@ -896,54 +899,54 @@ static int ipu3_vidioc_s_output(struct file *file, void *fh,
/******************** function pointers ********************/
-static struct v4l2_subdev_internal_ops ipu3_subdev_internal_ops = {
- .open = ipu3_subdev_open,
+static struct v4l2_subdev_internal_ops imgu_subdev_internal_ops = {
+ .open = imgu_subdev_open,
};
-static const struct v4l2_subdev_core_ops ipu3_subdev_core_ops = {
+static const struct v4l2_subdev_core_ops imgu_subdev_core_ops = {
.subscribe_event = v4l2_ctrl_subdev_subscribe_event,
.unsubscribe_event = v4l2_event_subdev_unsubscribe,
};
-static const struct v4l2_subdev_video_ops ipu3_subdev_video_ops = {
- .s_stream = ipu3_subdev_s_stream,
+static const struct v4l2_subdev_video_ops imgu_subdev_video_ops = {
+ .s_stream = imgu_subdev_s_stream,
};
-static const struct v4l2_subdev_pad_ops ipu3_subdev_pad_ops = {
+static const struct v4l2_subdev_pad_ops imgu_subdev_pad_ops = {
.link_validate = v4l2_subdev_link_validate_default,
- .get_fmt = ipu3_subdev_get_fmt,
- .set_fmt = ipu3_subdev_set_fmt,
- .get_selection = ipu3_subdev_get_selection,
- .set_selection = ipu3_subdev_set_selection,
+ .get_fmt = imgu_subdev_get_fmt,
+ .set_fmt = imgu_subdev_set_fmt,
+ .get_selection = imgu_subdev_get_selection,
+ .set_selection = imgu_subdev_set_selection,
};
-static const struct v4l2_subdev_ops ipu3_subdev_ops = {
- .core = &ipu3_subdev_core_ops,
- .video = &ipu3_subdev_video_ops,
- .pad = &ipu3_subdev_pad_ops,
+static const struct v4l2_subdev_ops imgu_subdev_ops = {
+ .core = &imgu_subdev_core_ops,
+ .video = &imgu_subdev_video_ops,
+ .pad = &imgu_subdev_pad_ops,
};
-static const struct media_entity_operations ipu3_media_ops = {
- .link_setup = ipu3_link_setup,
+static const struct media_entity_operations imgu_media_ops = {
+ .link_setup = imgu_link_setup,
.link_validate = v4l2_subdev_link_validate,
};
/****************** vb2_ops of the Q ********************/
-static const struct vb2_ops ipu3_vb2_ops = {
- .buf_init = ipu3_vb2_buf_init,
- .buf_cleanup = ipu3_vb2_buf_cleanup,
- .buf_queue = ipu3_vb2_buf_queue,
- .queue_setup = ipu3_vb2_queue_setup,
- .start_streaming = ipu3_vb2_start_streaming,
- .stop_streaming = ipu3_vb2_stop_streaming,
+static const struct vb2_ops imgu_vb2_ops = {
+ .buf_init = imgu_vb2_buf_init,
+ .buf_cleanup = imgu_vb2_buf_cleanup,
+ .buf_queue = imgu_vb2_buf_queue,
+ .queue_setup = imgu_vb2_queue_setup,
+ .start_streaming = imgu_vb2_start_streaming,
+ .stop_streaming = imgu_vb2_stop_streaming,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
/****************** v4l2_file_operations *****************/
-static const struct v4l2_file_operations ipu3_v4l2_fops = {
+static const struct v4l2_file_operations imgu_v4l2_fops = {
.unlocked_ioctl = video_ioctl2,
.open = v4l2_fh_open,
.release = vb2_fop_release,
@@ -953,26 +956,26 @@ static const struct v4l2_file_operations ipu3_v4l2_fops = {
/******************** v4l2_ioctl_ops ********************/
-static const struct v4l2_ioctl_ops ipu3_v4l2_ioctl_ops = {
- .vidioc_querycap = ipu3_vidioc_querycap,
+static const struct v4l2_ioctl_ops imgu_v4l2_ioctl_ops = {
+ .vidioc_querycap = imgu_vidioc_querycap,
.vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap_mplane = ipu3_vidioc_g_fmt,
- .vidioc_s_fmt_vid_cap_mplane = ipu3_vidioc_s_fmt,
- .vidioc_try_fmt_vid_cap_mplane = ipu3_vidioc_try_fmt,
+ .vidioc_g_fmt_vid_cap_mplane = imgu_vidioc_g_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = imgu_vidioc_s_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = imgu_vidioc_try_fmt,
.vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_out,
- .vidioc_g_fmt_vid_out_mplane = ipu3_vidioc_g_fmt,
- .vidioc_s_fmt_vid_out_mplane = ipu3_vidioc_s_fmt,
- .vidioc_try_fmt_vid_out_mplane = ipu3_vidioc_try_fmt,
+ .vidioc_g_fmt_vid_out_mplane = imgu_vidioc_g_fmt,
+ .vidioc_s_fmt_vid_out_mplane = imgu_vidioc_s_fmt,
+ .vidioc_try_fmt_vid_out_mplane = imgu_vidioc_try_fmt,
- .vidioc_enum_output = ipu3_vidioc_enum_output,
- .vidioc_g_output = ipu3_vidioc_g_output,
- .vidioc_s_output = ipu3_vidioc_s_output,
+ .vidioc_enum_output = imgu_vidioc_enum_output,
+ .vidioc_g_output = imgu_vidioc_g_output,
+ .vidioc_s_output = imgu_vidioc_s_output,
- .vidioc_enum_input = ipu3_vidioc_enum_input,
- .vidioc_g_input = ipu3_vidioc_g_input,
- .vidioc_s_input = ipu3_vidioc_s_input,
+ .vidioc_enum_input = imgu_vidioc_enum_input,
+ .vidioc_g_input = imgu_vidioc_g_input,
+ .vidioc_s_input = imgu_vidioc_s_input,
/* buffer queue management */
.vidioc_reqbufs = vb2_ioctl_reqbufs,
@@ -986,20 +989,20 @@ static const struct v4l2_ioctl_ops ipu3_v4l2_ioctl_ops = {
.vidioc_expbuf = vb2_ioctl_expbuf,
};
-static const struct v4l2_ioctl_ops ipu3_v4l2_meta_ioctl_ops = {
- .vidioc_querycap = ipu3_vidioc_querycap,
+static const struct v4l2_ioctl_ops imgu_v4l2_meta_ioctl_ops = {
+ .vidioc_querycap = imgu_vidioc_querycap,
/* meta capture */
- .vidioc_enum_fmt_meta_cap = ipu3_meta_enum_format,
- .vidioc_g_fmt_meta_cap = ipu3_vidioc_g_meta_fmt,
- .vidioc_s_fmt_meta_cap = ipu3_vidioc_g_meta_fmt,
- .vidioc_try_fmt_meta_cap = ipu3_vidioc_g_meta_fmt,
+ .vidioc_enum_fmt_meta_cap = imgu_meta_enum_format,
+ .vidioc_g_fmt_meta_cap = imgu_vidioc_g_meta_fmt,
+ .vidioc_s_fmt_meta_cap = imgu_vidioc_g_meta_fmt,
+ .vidioc_try_fmt_meta_cap = imgu_vidioc_g_meta_fmt,
/* meta output */
- .vidioc_enum_fmt_meta_out = ipu3_meta_enum_format,
- .vidioc_g_fmt_meta_out = ipu3_vidioc_g_meta_fmt,
- .vidioc_s_fmt_meta_out = ipu3_vidioc_g_meta_fmt,
- .vidioc_try_fmt_meta_out = ipu3_vidioc_g_meta_fmt,
+ .vidioc_enum_fmt_meta_out = imgu_meta_enum_format,
+ .vidioc_g_fmt_meta_out = imgu_vidioc_g_meta_fmt,
+ .vidioc_s_fmt_meta_out = imgu_vidioc_g_meta_fmt,
+ .vidioc_try_fmt_meta_out = imgu_vidioc_g_meta_fmt,
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
@@ -1012,7 +1015,7 @@ static const struct v4l2_ioctl_ops ipu3_v4l2_meta_ioctl_ops = {
.vidioc_expbuf = vb2_ioctl_expbuf,
};
-static int ipu3_sd_s_ctrl(struct v4l2_ctrl *ctrl)
+static int imgu_sd_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct imgu_v4l2_subdev *imgu_sd =
container_of(ctrl->handler, struct imgu_v4l2_subdev, ctrl_handler);
@@ -1031,25 +1034,29 @@ static int ipu3_sd_s_ctrl(struct v4l2_ctrl *ctrl)
}
}
-static const struct v4l2_ctrl_ops ipu3_subdev_ctrl_ops = {
- .s_ctrl = ipu3_sd_s_ctrl,
+static const struct v4l2_ctrl_ops imgu_subdev_ctrl_ops = {
+ .s_ctrl = imgu_sd_s_ctrl,
+};
+
+static const char * const imgu_ctrl_mode_strings[] = {
+ "Video mode",
+ "Still mode",
};
-static const struct v4l2_ctrl_config ipu3_subdev_ctrl_mode = {
- .ops = &ipu3_subdev_ctrl_ops,
+static const struct v4l2_ctrl_config imgu_subdev_ctrl_mode = {
+ .ops = &imgu_subdev_ctrl_ops,
.id = V4L2_CID_INTEL_IPU3_MODE,
.name = "IPU3 Pipe Mode",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = IPU3_RUNNING_MODE_VIDEO,
- .max = IPU3_RUNNING_MODE_STILL,
- .step = 1,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = ARRAY_SIZE(imgu_ctrl_mode_strings) - 1,
.def = IPU3_RUNNING_MODE_VIDEO,
+ .qmenu = imgu_ctrl_mode_strings,
};
/******************** Framework registration ********************/
/* helper function to config node's video properties */
-static void ipu3_node_to_v4l2(u32 node, struct video_device *vdev,
+static void imgu_node_to_v4l2(u32 node, struct video_device *vdev,
struct v4l2_format *f)
{
u32 cap;
@@ -1061,32 +1068,32 @@ static void ipu3_node_to_v4l2(u32 node, struct video_device *vdev,
case IMGU_NODE_IN:
cap = V4L2_CAP_VIDEO_OUTPUT_MPLANE;
f->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
- vdev->ioctl_ops = &ipu3_v4l2_ioctl_ops;
+ vdev->ioctl_ops = &imgu_v4l2_ioctl_ops;
break;
case IMGU_NODE_PARAMS:
cap = V4L2_CAP_META_OUTPUT;
f->type = V4L2_BUF_TYPE_META_OUTPUT;
f->fmt.meta.dataformat = V4L2_META_FMT_IPU3_PARAMS;
- vdev->ioctl_ops = &ipu3_v4l2_meta_ioctl_ops;
- ipu3_css_meta_fmt_set(&f->fmt.meta);
+ vdev->ioctl_ops = &imgu_v4l2_meta_ioctl_ops;
+ imgu_css_meta_fmt_set(&f->fmt.meta);
break;
case IMGU_NODE_STAT_3A:
cap = V4L2_CAP_META_CAPTURE;
f->type = V4L2_BUF_TYPE_META_CAPTURE;
f->fmt.meta.dataformat = V4L2_META_FMT_IPU3_STAT_3A;
- vdev->ioctl_ops = &ipu3_v4l2_meta_ioctl_ops;
- ipu3_css_meta_fmt_set(&f->fmt.meta);
+ vdev->ioctl_ops = &imgu_v4l2_meta_ioctl_ops;
+ imgu_css_meta_fmt_set(&f->fmt.meta);
break;
default:
cap = V4L2_CAP_VIDEO_CAPTURE_MPLANE;
f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- vdev->ioctl_ops = &ipu3_v4l2_ioctl_ops;
+ vdev->ioctl_ops = &imgu_v4l2_ioctl_ops;
}
vdev->device_caps = V4L2_CAP_STREAMING | cap;
}
-static int ipu3_v4l2_subdev_register(struct imgu_device *imgu,
+static int imgu_v4l2_subdev_register(struct imgu_device *imgu,
struct imgu_v4l2_subdev *imgu_sd,
unsigned int pipe)
{
@@ -1102,16 +1109,16 @@ static int ipu3_v4l2_subdev_register(struct imgu_device *imgu,
"failed initialize subdev media entity (%d)\n", r);
return r;
}
- imgu_sd->subdev.entity.ops = &ipu3_media_ops;
+ imgu_sd->subdev.entity.ops = &imgu_media_ops;
for (i = 0; i < IMGU_NODE_NUM; i++) {
imgu_sd->subdev_pads[i].flags = imgu_pipe->nodes[i].output ?
MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
}
/* Initialize subdev */
- v4l2_subdev_init(&imgu_sd->subdev, &ipu3_subdev_ops);
+ v4l2_subdev_init(&imgu_sd->subdev, &imgu_subdev_ops);
imgu_sd->subdev.entity.function = MEDIA_ENT_F_PROC_VIDEO_STATISTICS;
- imgu_sd->subdev.internal_ops = &ipu3_subdev_internal_ops;
+ imgu_sd->subdev.internal_ops = &imgu_subdev_internal_ops;
imgu_sd->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE |
V4L2_SUBDEV_FL_HAS_EVENTS;
snprintf(imgu_sd->subdev.name, sizeof(imgu_sd->subdev.name),
@@ -1120,7 +1127,7 @@ static int ipu3_v4l2_subdev_register(struct imgu_device *imgu,
atomic_set(&imgu_sd->running_mode, IPU3_RUNNING_MODE_VIDEO);
v4l2_ctrl_handler_init(hdl, 1);
imgu_sd->subdev.ctrl_handler = hdl;
- imgu_sd->ctrl = v4l2_ctrl_new_custom(hdl, &ipu3_subdev_ctrl_mode, NULL);
+ imgu_sd->ctrl = v4l2_ctrl_new_custom(hdl, &imgu_subdev_ctrl_mode, NULL);
if (hdl->error) {
r = hdl->error;
dev_err(&imgu->pci_dev->dev,
@@ -1144,7 +1151,7 @@ fail_subdev:
return r;
}
-static int ipu3_v4l2_node_setup(struct imgu_device *imgu, unsigned int pipe,
+static int imgu_v4l2_node_setup(struct imgu_device *imgu, unsigned int pipe,
int node_num)
{
int r;
@@ -1189,7 +1196,7 @@ static int ipu3_v4l2_node_setup(struct imgu_device *imgu, unsigned int pipe,
node->pad_fmt = def_bus_fmt;
node->id = node_num;
node->pipe = pipe;
- ipu3_node_to_v4l2(node_num, vdev, &node->vdev_fmt);
+ imgu_node_to_v4l2(node_num, vdev, &node->vdev_fmt);
if (node->vdev_fmt.type ==
V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ||
node->vdev_fmt.type ==
@@ -1214,11 +1221,11 @@ static int ipu3_v4l2_node_setup(struct imgu_device *imgu, unsigned int pipe,
/* Initialize vbq */
vbq->type = node->vdev_fmt.type;
vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
- vbq->ops = &ipu3_vb2_ops;
+ vbq->ops = &imgu_vb2_ops;
vbq->mem_ops = &vb2_dma_sg_memops;
if (imgu->buf_struct_size <= 0)
imgu->buf_struct_size =
- sizeof(struct ipu3_vb2_buffer);
+ sizeof(struct imgu_vb2_buffer);
vbq->buf_struct_size = imgu->buf_struct_size;
vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
/* can streamon w/o buffers */
@@ -1236,7 +1243,7 @@ static int ipu3_v4l2_node_setup(struct imgu_device *imgu, unsigned int pipe,
snprintf(vdev->name, sizeof(vdev->name), "%s %d %s",
IMGU_NAME, pipe, node->name);
vdev->release = video_device_release_empty;
- vdev->fops = &ipu3_v4l2_fops;
+ vdev->fops = &imgu_v4l2_fops;
vdev->lock = &node->lock;
vdev->v4l2_dev = &imgu->v4l2_dev;
vdev->queue = &node->vbq;
@@ -1269,7 +1276,7 @@ static int ipu3_v4l2_node_setup(struct imgu_device *imgu, unsigned int pipe,
return 0;
}
-static void ipu3_v4l2_nodes_cleanup_pipe(struct imgu_device *imgu,
+static void imgu_v4l2_nodes_cleanup_pipe(struct imgu_device *imgu,
unsigned int pipe, int node)
{
int i;
@@ -1282,12 +1289,12 @@ static void ipu3_v4l2_nodes_cleanup_pipe(struct imgu_device *imgu,
}
}
-static int ipu3_v4l2_nodes_setup_pipe(struct imgu_device *imgu, int pipe)
+static int imgu_v4l2_nodes_setup_pipe(struct imgu_device *imgu, int pipe)
{
int i, r;
for (i = 0; i < IMGU_NODE_NUM; i++) {
- r = ipu3_v4l2_node_setup(imgu, pipe, i);
+ r = imgu_v4l2_node_setup(imgu, pipe, i);
if (r)
goto cleanup;
}
@@ -1295,11 +1302,11 @@ static int ipu3_v4l2_nodes_setup_pipe(struct imgu_device *imgu, int pipe)
return 0;
cleanup:
- ipu3_v4l2_nodes_cleanup_pipe(imgu, pipe, i);
+ imgu_v4l2_nodes_cleanup_pipe(imgu, pipe, i);
return r;
}
-static void ipu3_v4l2_subdev_cleanup(struct imgu_device *imgu, unsigned int i)
+static void imgu_v4l2_subdev_cleanup(struct imgu_device *imgu, unsigned int i)
{
struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[i];
@@ -1308,13 +1315,13 @@ static void ipu3_v4l2_subdev_cleanup(struct imgu_device *imgu, unsigned int i)
media_entity_cleanup(&imgu_pipe->imgu_sd.subdev.entity);
}
-static void ipu3_v4l2_cleanup_pipes(struct imgu_device *imgu, unsigned int pipe)
+static void imgu_v4l2_cleanup_pipes(struct imgu_device *imgu, unsigned int pipe)
{
int i;
for (i = 0; i < pipe; i++) {
- ipu3_v4l2_nodes_cleanup_pipe(imgu, i, IMGU_NODE_NUM);
- ipu3_v4l2_subdev_cleanup(imgu, i);
+ imgu_v4l2_nodes_cleanup_pipe(imgu, i, IMGU_NODE_NUM);
+ imgu_v4l2_subdev_cleanup(imgu, i);
}
}
@@ -1325,15 +1332,15 @@ static int imgu_v4l2_register_pipes(struct imgu_device *imgu)
for (i = 0; i < IMGU_MAX_PIPE_NUM; i++) {
imgu_pipe = &imgu->imgu_pipe[i];
- r = ipu3_v4l2_subdev_register(imgu, &imgu_pipe->imgu_sd, i);
+ r = imgu_v4l2_subdev_register(imgu, &imgu_pipe->imgu_sd, i);
if (r) {
dev_err(&imgu->pci_dev->dev,
"failed to register subdev%d ret (%d)\n", i, r);
goto pipes_cleanup;
}
- r = ipu3_v4l2_nodes_setup_pipe(imgu, i);
+ r = imgu_v4l2_nodes_setup_pipe(imgu, i);
if (r) {
- ipu3_v4l2_subdev_cleanup(imgu, i);
+ imgu_v4l2_subdev_cleanup(imgu, i);
goto pipes_cleanup;
}
}
@@ -1341,7 +1348,7 @@ static int imgu_v4l2_register_pipes(struct imgu_device *imgu)
return 0;
pipes_cleanup:
- ipu3_v4l2_cleanup_pipes(imgu, i);
+ imgu_v4l2_cleanup_pipes(imgu, i);
return r;
}
@@ -1389,7 +1396,7 @@ int imgu_v4l2_register(struct imgu_device *imgu)
return 0;
fail_subdevs:
- ipu3_v4l2_cleanup_pipes(imgu, IMGU_MAX_PIPE_NUM);
+ imgu_v4l2_cleanup_pipes(imgu, IMGU_MAX_PIPE_NUM);
fail_v4l2_pipes:
v4l2_device_unregister(&imgu->v4l2_dev);
fail_v4l2_dev:
@@ -1401,7 +1408,7 @@ fail_v4l2_dev:
int imgu_v4l2_unregister(struct imgu_device *imgu)
{
media_device_unregister(&imgu->media_dev);
- ipu3_v4l2_cleanup_pipes(imgu, IMGU_MAX_PIPE_NUM);
+ imgu_v4l2_cleanup_pipes(imgu, IMGU_MAX_PIPE_NUM);
v4l2_device_unregister(&imgu->v4l2_dev);
media_device_cleanup(&imgu->media_dev);
@@ -1411,8 +1418,8 @@ int imgu_v4l2_unregister(struct imgu_device *imgu)
void imgu_v4l2_buffer_done(struct vb2_buffer *vb,
enum vb2_buffer_state state)
{
- struct ipu3_vb2_buffer *b =
- container_of(vb, struct ipu3_vb2_buffer, vbb.vb2_buf);
+ struct imgu_vb2_buffer *b =
+ container_of(vb, struct imgu_vb2_buffer, vbb.vb2_buf);
list_del(&b->list);
vb2_buffer_done(&b->vbb.vb2_buf, state);
diff --git a/drivers/staging/media/ipu3/ipu3.c b/drivers/staging/media/ipu3/ipu3.c
index d521b3afb8b1..d575ac78c8f0 100644
--- a/drivers/staging/media/ipu3/ipu3.c
+++ b/drivers/staging/media/ipu3/ipu3.c
@@ -72,7 +72,7 @@ static void imgu_dummybufs_cleanup(struct imgu_device *imgu, unsigned int pipe)
struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
for (i = 0; i < IPU3_CSS_QUEUES; i++)
- ipu3_dmamap_free(imgu,
+ imgu_dmamap_free(imgu,
&imgu_pipe->queues[i].dmap);
}
@@ -93,7 +93,7 @@ static int imgu_dummybufs_preallocate(struct imgu_device *imgu,
if (i == IMGU_QUEUE_MASTER || size == 0)
continue;
- if (!ipu3_dmamap_alloc(imgu,
+ if (!imgu_dmamap_alloc(imgu,
&imgu_pipe->queues[i].dmap, size)) {
imgu_dummybufs_cleanup(imgu, pipe);
return -ENOMEM;
@@ -133,7 +133,7 @@ static int imgu_dummybufs_init(struct imgu_device *imgu, unsigned int pipe)
else
size = mpix->plane_fmt[0].sizeimage;
- if (ipu3_css_dma_buffer_resize(imgu,
+ if (imgu_css_dma_buffer_resize(imgu,
&imgu_pipe->queues[i].dmap,
size)) {
imgu_dummybufs_cleanup(imgu, pipe);
@@ -141,7 +141,7 @@ static int imgu_dummybufs_init(struct imgu_device *imgu, unsigned int pipe)
}
for (k = 0; k < IMGU_MAX_QUEUE_DEPTH; k++)
- ipu3_css_buf_init(&imgu_pipe->queues[i].dummybufs[k], i,
+ imgu_css_buf_init(&imgu_pipe->queues[i].dummybufs[k], i,
imgu_pipe->queues[i].dmap.daddr);
}
@@ -149,7 +149,7 @@ static int imgu_dummybufs_init(struct imgu_device *imgu, unsigned int pipe)
}
/* May be called from atomic context */
-static struct ipu3_css_buffer *imgu_dummybufs_get(struct imgu_device *imgu,
+static struct imgu_css_buffer *imgu_dummybufs_get(struct imgu_device *imgu,
int queue, unsigned int pipe)
{
unsigned int i;
@@ -164,14 +164,14 @@ static struct ipu3_css_buffer *imgu_dummybufs_get(struct imgu_device *imgu,
return NULL;
for (i = 0; i < IMGU_MAX_QUEUE_DEPTH; i++)
- if (ipu3_css_buf_state(&imgu_pipe->queues[queue].dummybufs[i]) !=
+ if (imgu_css_buf_state(&imgu_pipe->queues[queue].dummybufs[i]) !=
IPU3_CSS_BUFFER_QUEUED)
break;
if (i == IMGU_MAX_QUEUE_DEPTH)
return NULL;
- ipu3_css_buf_init(&imgu_pipe->queues[queue].dummybufs[i], queue,
+ imgu_css_buf_init(&imgu_pipe->queues[queue].dummybufs[i], queue,
imgu_pipe->queues[queue].dmap.daddr);
return &imgu_pipe->queues[queue].dummybufs[i];
@@ -179,7 +179,7 @@ static struct ipu3_css_buffer *imgu_dummybufs_get(struct imgu_device *imgu,
/* Check if given buffer is a dummy buffer */
static bool imgu_dummybufs_check(struct imgu_device *imgu,
- struct ipu3_css_buffer *buf,
+ struct imgu_css_buffer *buf,
unsigned int pipe)
{
unsigned int i;
@@ -200,7 +200,7 @@ static void imgu_buffer_done(struct imgu_device *imgu, struct vb2_buffer *vb,
mutex_unlock(&imgu->lock);
}
-static struct ipu3_css_buffer *imgu_queue_getbuf(struct imgu_device *imgu,
+static struct imgu_css_buffer *imgu_queue_getbuf(struct imgu_device *imgu,
unsigned int node,
unsigned int pipe)
{
@@ -212,7 +212,7 @@ static struct ipu3_css_buffer *imgu_queue_getbuf(struct imgu_device *imgu,
/* Find first free buffer from the node */
list_for_each_entry(buf, &imgu_pipe->nodes[node].buffers, vid_buf.list) {
- if (ipu3_css_buf_state(&buf->css_buf) == IPU3_CSS_BUFFER_NEW)
+ if (imgu_css_buf_state(&buf->css_buf) == IPU3_CSS_BUFFER_NEW)
return &buf->css_buf;
}
@@ -230,7 +230,7 @@ int imgu_queue_buffers(struct imgu_device *imgu, bool initial, unsigned int pipe
int r = 0;
struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
- if (!ipu3_css_is_streaming(&imgu->css))
+ if (!imgu_css_is_streaming(&imgu->css))
return 0;
dev_dbg(&imgu->pci_dev->dev, "Queue buffers to pipe %d", pipe);
@@ -247,7 +247,7 @@ int imgu_queue_buffers(struct imgu_device *imgu, bool initial, unsigned int pipe
"Vf not enabled, ignore queue");
continue;
} else if (imgu_pipe->queue_enabled[node]) {
- struct ipu3_css_buffer *buf =
+ struct imgu_css_buffer *buf =
imgu_queue_getbuf(imgu, node, pipe);
struct imgu_buffer *ibuf = NULL;
bool dummy;
@@ -255,7 +255,7 @@ int imgu_queue_buffers(struct imgu_device *imgu, bool initial, unsigned int pipe
if (!buf)
break;
- r = ipu3_css_buf_queue(&imgu->css, pipe, buf);
+ r = imgu_css_buf_queue(&imgu->css, pipe, buf);
if (r)
break;
dummy = imgu_dummybufs_check(imgu, buf, pipe);
@@ -300,7 +300,7 @@ failed:
list_for_each_entry_safe(buf, buf0,
&imgu_pipe->nodes[node].buffers,
vid_buf.list) {
- if (ipu3_css_buf_state(&buf->css_buf) ==
+ if (imgu_css_buf_state(&buf->css_buf) ==
IPU3_CSS_BUFFER_QUEUED)
continue; /* Was already queued, skip */
@@ -317,18 +317,18 @@ static int imgu_powerup(struct imgu_device *imgu)
{
int r;
- r = ipu3_css_set_powerup(&imgu->pci_dev->dev, imgu->base);
+ r = imgu_css_set_powerup(&imgu->pci_dev->dev, imgu->base);
if (r)
return r;
- ipu3_mmu_resume(imgu->mmu);
+ imgu_mmu_resume(imgu->mmu);
return 0;
}
static void imgu_powerdown(struct imgu_device *imgu)
{
- ipu3_mmu_suspend(imgu->mmu);
- ipu3_css_set_powerdown(&imgu->pci_dev->dev, imgu->base);
+ imgu_mmu_suspend(imgu->mmu);
+ imgu_css_set_powerdown(&imgu->pci_dev->dev, imgu->base);
}
int imgu_s_stream(struct imgu_device *imgu, int enable)
@@ -341,7 +341,7 @@ int imgu_s_stream(struct imgu_device *imgu, int enable)
dev_dbg(dev, "stream off\n");
/* Block new buffers to be queued to CSS. */
atomic_set(&imgu->qbuf_barrier, 1);
- ipu3_css_stop_streaming(&imgu->css);
+ imgu_css_stop_streaming(&imgu->css);
synchronize_irq(imgu->pci_dev->irq);
atomic_set(&imgu->qbuf_barrier, 0);
imgu_powerdown(imgu);
@@ -366,7 +366,7 @@ int imgu_s_stream(struct imgu_device *imgu, int enable)
}
/* Start CSS streaming */
- r = ipu3_css_start_streaming(&imgu->css);
+ r = imgu_css_start_streaming(&imgu->css);
if (r) {
dev_err(dev, "failed to start css streaming (%d)", r);
goto fail_start_streaming;
@@ -393,7 +393,7 @@ fail_queueing:
for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM)
imgu_dummybufs_cleanup(imgu, pipe);
fail_dummybufs:
- ipu3_css_stop_streaming(&imgu->css);
+ imgu_css_stop_streaming(&imgu->css);
fail_start_streaming:
pm_runtime_put(dev);
@@ -435,7 +435,7 @@ static int imgu_video_nodes_init(struct imgu_device *imgu)
rects[IPU3_CSS_RECT_EFFECTIVE] = &imgu_pipe->imgu_sd.rect.eff;
rects[IPU3_CSS_RECT_BDS] = &imgu_pipe->imgu_sd.rect.bds;
- ipu3_css_fmt_set(&imgu->css, fmts, rects, j);
+ imgu_css_fmt_set(&imgu->css, fmts, rects, j);
/* Pre-allocate dummy buffers */
r = imgu_dummybufs_preallocate(imgu, j);
@@ -478,23 +478,22 @@ static irqreturn_t imgu_isr_threaded(int irq, void *imgu_ptr)
/* Dequeue / queue buffers */
do {
u64 ns = ktime_get_ns();
- struct ipu3_css_buffer *b;
+ struct imgu_css_buffer *b;
struct imgu_buffer *buf = NULL;
unsigned int node, pipe;
bool dummy;
do {
mutex_lock(&imgu->lock);
- b = ipu3_css_buf_dequeue(&imgu->css);
+ b = imgu_css_buf_dequeue(&imgu->css);
mutex_unlock(&imgu->lock);
} while (PTR_ERR(b) == -EAGAIN);
- if (IS_ERR_OR_NULL(b)) {
- if (!b || PTR_ERR(b) == -EBUSY) /* All done */
- break;
- dev_err(&imgu->pci_dev->dev,
- "failed to dequeue buffers (%ld)\n",
- PTR_ERR(b));
+ if (IS_ERR(b)) {
+ if (PTR_ERR(b) != -EBUSY) /* All done */
+ dev_err(&imgu->pci_dev->dev,
+ "failed to dequeue buffers (%ld)\n",
+ PTR_ERR(b));
break;
}
@@ -526,12 +525,12 @@ static irqreturn_t imgu_isr_threaded(int irq, void *imgu_ptr)
buf->vid_buf.vbb.sequence);
}
imgu_buffer_done(imgu, &buf->vid_buf.vbb.vb2_buf,
- ipu3_css_buf_state(&buf->css_buf) ==
+ imgu_css_buf_state(&buf->css_buf) ==
IPU3_CSS_BUFFER_DONE ?
VB2_BUF_STATE_DONE :
VB2_BUF_STATE_ERROR);
mutex_lock(&imgu->lock);
- if (ipu3_css_queue_empty(&imgu->css))
+ if (imgu_css_queue_empty(&imgu->css))
wake_up_all(&imgu->buf_drain_wq);
mutex_unlock(&imgu->lock);
} while (1);
@@ -553,7 +552,7 @@ static irqreturn_t imgu_isr(int irq, void *imgu_ptr)
struct imgu_device *imgu = imgu_ptr;
/* acknowledge interruption */
- if (ipu3_css_irq_ack(&imgu->css) < 0)
+ if (imgu_css_irq_ack(&imgu->css) < 0)
return IRQ_NONE;
return IRQ_WAKE_THREAD;
@@ -638,21 +637,21 @@ static int imgu_pci_probe(struct pci_dev *pci_dev,
atomic_set(&imgu->qbuf_barrier, 0);
init_waitqueue_head(&imgu->buf_drain_wq);
- r = ipu3_css_set_powerup(&pci_dev->dev, imgu->base);
+ r = imgu_css_set_powerup(&pci_dev->dev, imgu->base);
if (r) {
dev_err(&pci_dev->dev,
"failed to power up CSS (%d)\n", r);
goto out_mutex_destroy;
}
- imgu->mmu = ipu3_mmu_init(&pci_dev->dev, imgu->base);
+ imgu->mmu = imgu_mmu_init(&pci_dev->dev, imgu->base);
if (IS_ERR(imgu->mmu)) {
r = PTR_ERR(imgu->mmu);
dev_err(&pci_dev->dev, "failed to initialize MMU (%d)\n", r);
goto out_css_powerdown;
}
- r = ipu3_dmamap_init(imgu);
+ r = imgu_dmamap_init(imgu);
if (r) {
dev_err(&pci_dev->dev,
"failed to initialize DMA mapping (%d)\n", r);
@@ -660,7 +659,7 @@ static int imgu_pci_probe(struct pci_dev *pci_dev,
}
/* ISP programming */
- r = ipu3_css_init(&pci_dev->dev, &imgu->css, imgu->base, phys_len);
+ r = imgu_css_init(&pci_dev->dev, &imgu->css, imgu->base, phys_len);
if (r) {
dev_err(&pci_dev->dev, "failed to initialize CSS (%d)\n", r);
goto out_dmamap_exit;
@@ -690,13 +689,13 @@ static int imgu_pci_probe(struct pci_dev *pci_dev,
out_video_exit:
imgu_video_nodes_exit(imgu);
out_css_cleanup:
- ipu3_css_cleanup(&imgu->css);
+ imgu_css_cleanup(&imgu->css);
out_dmamap_exit:
- ipu3_dmamap_exit(imgu);
+ imgu_dmamap_exit(imgu);
out_mmu_exit:
- ipu3_mmu_exit(imgu->mmu);
+ imgu_mmu_exit(imgu->mmu);
out_css_powerdown:
- ipu3_css_set_powerdown(&pci_dev->dev, imgu->base);
+ imgu_css_set_powerdown(&pci_dev->dev, imgu->base);
out_mutex_destroy:
mutex_destroy(&imgu->lock);
@@ -711,10 +710,10 @@ static void imgu_pci_remove(struct pci_dev *pci_dev)
pm_runtime_get_noresume(&pci_dev->dev);
imgu_video_nodes_exit(imgu);
- ipu3_css_cleanup(&imgu->css);
- ipu3_css_set_powerdown(&pci_dev->dev, imgu->base);
- ipu3_dmamap_exit(imgu);
- ipu3_mmu_exit(imgu->mmu);
+ imgu_css_cleanup(&imgu->css);
+ imgu_css_set_powerdown(&pci_dev->dev, imgu->base);
+ imgu_dmamap_exit(imgu);
+ imgu_mmu_exit(imgu->mmu);
mutex_destroy(&imgu->lock);
}
@@ -724,7 +723,7 @@ static int __maybe_unused imgu_suspend(struct device *dev)
struct imgu_device *imgu = pci_get_drvdata(pci_dev);
dev_dbg(dev, "enter %s\n", __func__);
- imgu->suspend_in_stream = ipu3_css_is_streaming(&imgu->css);
+ imgu->suspend_in_stream = imgu_css_is_streaming(&imgu->css);
if (!imgu->suspend_in_stream)
goto out;
/* Block new buffers to be queued to CSS. */
@@ -736,10 +735,10 @@ static int __maybe_unused imgu_suspend(struct device *dev)
synchronize_irq(pci_dev->irq);
/* Wait until all buffers in CSS are done. */
if (!wait_event_timeout(imgu->buf_drain_wq,
- ipu3_css_queue_empty(&imgu->css), msecs_to_jiffies(1000)))
+ imgu_css_queue_empty(&imgu->css), msecs_to_jiffies(1000)))
dev_err(dev, "wait buffer drain timeout.\n");
- ipu3_css_stop_streaming(&imgu->css);
+ imgu_css_stop_streaming(&imgu->css);
atomic_set(&imgu->qbuf_barrier, 0);
imgu_powerdown(imgu);
pm_runtime_force_suspend(dev);
@@ -769,7 +768,7 @@ static int __maybe_unused imgu_resume(struct device *dev)
}
/* Start CSS streaming */
- r = ipu3_css_start_streaming(&imgu->css);
+ r = imgu_css_start_streaming(&imgu->css);
if (r) {
dev_err(dev, "failed to resume css streaming (%d)", r);
goto out;
diff --git a/drivers/staging/media/ipu3/ipu3.h b/drivers/staging/media/ipu3/ipu3.h
index 04fc99f47ebb..73b123b2b8a2 100644
--- a/drivers/staging/media/ipu3/ipu3.h
+++ b/drivers/staging/media/ipu3/ipu3.h
@@ -32,7 +32,7 @@
#define IMGU_NODE_STAT_3A 4 /* 3A statistics */
#define IMGU_NODE_NUM 5
-#define file_to_intel_ipu3_node(__file) \
+#define file_to_intel_imgu_node(__file) \
container_of(video_devdata(__file), struct imgu_video_device, vdev)
#define IPU3_INPUT_MIN_WIDTH 0U
@@ -44,7 +44,7 @@
#define IPU3_OUTPUT_MAX_WIDTH 4480U
#define IPU3_OUTPUT_MAX_HEIGHT 34004U
-struct ipu3_vb2_buffer {
+struct imgu_vb2_buffer {
/* Public fields */
struct vb2_v4l2_buffer vbb; /* Must be the first field */
@@ -53,9 +53,9 @@ struct ipu3_vb2_buffer {
};
struct imgu_buffer {
- struct ipu3_vb2_buffer vid_buf; /* Must be the first field */
- struct ipu3_css_buffer css_buf;
- struct ipu3_css_map map;
+ struct imgu_vb2_buffer vid_buf; /* Must be the first field */
+ struct imgu_css_buffer css_buf;
+ struct imgu_css_map map;
};
struct imgu_node_mapping {
@@ -107,8 +107,8 @@ struct imgu_media_pipe {
/* Internally enabled queues */
struct {
- struct ipu3_css_map dmap;
- struct ipu3_css_buffer dummybufs[IMGU_MAX_QUEUE_DEPTH];
+ struct imgu_css_map dmap;
+ struct imgu_css_buffer dummybufs[IMGU_MAX_QUEUE_DEPTH];
} queues[IPU3_CSS_QUEUES];
struct imgu_video_device nodes[IMGU_NODE_NUM];
bool queue_enabled[IMGU_NODE_NUM];
@@ -135,18 +135,18 @@ struct imgu_device {
struct v4l2_file_operations v4l2_file_ops;
/* MMU driver for css */
- struct ipu3_mmu_info *mmu;
+ struct imgu_mmu_info *mmu;
struct iova_domain iova_domain;
/* css - Camera Sub-System */
- struct ipu3_css css;
+ struct imgu_css css;
/*
* Coarse-grained lock to protect
* vid_buf.list and css->queue
*/
struct mutex lock;
- /* Forbit streaming and buffer queuing during system suspend. */
+ /* Forbid streaming and buffer queuing during system suspend. */
atomic_t qbuf_barrier;
/* Indicate if system suspend take place while imgu is streaming. */
bool suspend_in_stream;
diff --git a/drivers/staging/media/omap4iss/iss_csi2.c b/drivers/staging/media/omap4iss/iss_csi2.c
index 059cf5bd3c36..a6dc2d2b1228 100644
--- a/drivers/staging/media/omap4iss/iss_csi2.c
+++ b/drivers/staging/media/omap4iss/iss_csi2.c
@@ -712,7 +712,7 @@ static void csi2_isr_ctx(struct iss_csi2_device *csi2,
/* Skip interrupts until we reach the frame skip count. The CSI2 will be
* automatically disabled, as the frame skip count has been programmed
- * in the CSI2_CTx_CTRL1::COUNT field, so reenable it.
+ * in the CSI2_CTx_CTRL1::COUNT field, so re-enable it.
*
* It would have been nice to rely on the FRAME_NUMBER interrupt instead
* but it turned out that the interrupt is only generated when the CSI2
diff --git a/drivers/staging/media/rockchip/vpu/rk3288_vpu_hw_jpeg_enc.c b/drivers/staging/media/rockchip/vpu/rk3288_vpu_hw_jpeg_enc.c
index 5282236d1bb1..06daea66fb49 100644
--- a/drivers/staging/media/rockchip/vpu/rk3288_vpu_hw_jpeg_enc.c
+++ b/drivers/staging/media/rockchip/vpu/rk3288_vpu_hw_jpeg_enc.c
@@ -80,7 +80,7 @@ rk3288_vpu_jpeg_enc_set_qtable(struct rockchip_vpu_dev *vpu,
void rk3288_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx)
{
struct rockchip_vpu_dev *vpu = ctx->dev;
- struct vb2_buffer *src_buf, *dst_buf;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
struct rockchip_vpu_jpeg_ctx jpeg_ctx;
u32 reg;
@@ -88,7 +88,7 @@ void rk3288_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx)
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
memset(&jpeg_ctx, 0, sizeof(jpeg_ctx));
- jpeg_ctx.buffer = vb2_plane_vaddr(dst_buf, 0);
+ jpeg_ctx.buffer = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
jpeg_ctx.width = ctx->dst_fmt.width;
jpeg_ctx.height = ctx->dst_fmt.height;
jpeg_ctx.quality = ctx->jpeg_quality;
@@ -99,7 +99,7 @@ void rk3288_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx)
VEPU_REG_ENC_CTRL);
rk3288_vpu_set_src_img_ctrl(vpu, ctx);
- rk3288_vpu_jpeg_enc_set_buffers(vpu, ctx, src_buf);
+ rk3288_vpu_jpeg_enc_set_buffers(vpu, ctx, &src_buf->vb2_buf);
rk3288_vpu_jpeg_enc_set_qtable(vpu,
rockchip_vpu_jpeg_get_qtable(&jpeg_ctx, 0),
rockchip_vpu_jpeg_get_qtable(&jpeg_ctx, 1));
diff --git a/drivers/staging/media/rockchip/vpu/rk3399_vpu_hw_jpeg_enc.c b/drivers/staging/media/rockchip/vpu/rk3399_vpu_hw_jpeg_enc.c
index dbc86d95fe3b..3d438797692e 100644
--- a/drivers/staging/media/rockchip/vpu/rk3399_vpu_hw_jpeg_enc.c
+++ b/drivers/staging/media/rockchip/vpu/rk3399_vpu_hw_jpeg_enc.c
@@ -111,7 +111,7 @@ rk3399_vpu_jpeg_enc_set_qtable(struct rockchip_vpu_dev *vpu,
void rk3399_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx)
{
struct rockchip_vpu_dev *vpu = ctx->dev;
- struct vb2_buffer *src_buf, *dst_buf;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
struct rockchip_vpu_jpeg_ctx jpeg_ctx;
u32 reg;
@@ -119,7 +119,7 @@ void rk3399_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx)
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
memset(&jpeg_ctx, 0, sizeof(jpeg_ctx));
- jpeg_ctx.buffer = vb2_plane_vaddr(dst_buf, 0);
+ jpeg_ctx.buffer = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
jpeg_ctx.width = ctx->dst_fmt.width;
jpeg_ctx.height = ctx->dst_fmt.height;
jpeg_ctx.quality = ctx->jpeg_quality;
@@ -130,7 +130,7 @@ void rk3399_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx)
VEPU_REG_ENCODE_START);
rk3399_vpu_set_src_img_ctrl(vpu, ctx);
- rk3399_vpu_jpeg_enc_set_buffers(vpu, ctx, src_buf);
+ rk3399_vpu_jpeg_enc_set_buffers(vpu, ctx, &src_buf->vb2_buf);
rk3399_vpu_jpeg_enc_set_qtable(vpu,
rockchip_vpu_jpeg_get_qtable(&jpeg_ctx, 0),
rockchip_vpu_jpeg_get_qtable(&jpeg_ctx, 1));
diff --git a/drivers/media/i2c/soc_camera/Kconfig b/drivers/staging/media/soc_camera/Kconfig
index 7c2aabc8a3f6..bacd30f0348d 100644
--- a/drivers/media/i2c/soc_camera/Kconfig
+++ b/drivers/staging/media/soc_camera/Kconfig
@@ -1,11 +1,13 @@
-comment "soc_camera sensor drivers"
-
-config SOC_CAMERA_MT9M001
- tristate "mt9m001 support"
- depends on SOC_CAMERA && I2C
+config SOC_CAMERA
+ tristate "SoC camera support"
+ depends on VIDEO_V4L2 && HAS_DMA && I2C && BROKEN
+ select VIDEOBUF2_CORE
help
- This driver supports MT9M001 cameras from Micron, monochrome
- and colour models.
+ SoC Camera is a common API to several cameras, not connecting
+ over a bus like PCI or USB. For example some i2c camera connected
+ directly to the data bus of an SoC.
+
+comment "soc_camera sensor drivers"
config SOC_CAMERA_MT9M111
tristate "legacy soc_camera mt9m111, mt9m112 and mt9m131 support"
@@ -17,12 +19,6 @@ config SOC_CAMERA_MT9M111
This is the legacy configuration which shouldn't be used anymore,
while VIDEO_MT9M111 should be used instead.
-config SOC_CAMERA_MT9T112
- tristate "mt9t112 support"
- depends on SOC_CAMERA && I2C
- help
- This driver supports MT9T112 cameras from Aptina.
-
config SOC_CAMERA_MT9V022
tristate "mt9v022 and mt9v024 support"
depends on SOC_CAMERA && I2C
@@ -35,32 +31,20 @@ config SOC_CAMERA_OV5642
help
This is a V4L2 camera driver for the OmniVision OV5642 sensor
-config SOC_CAMERA_OV772X
- tristate "ov772x camera support"
- depends on SOC_CAMERA && I2C
- help
- This is a ov772x camera driver
-
-config SOC_CAMERA_OV9640
- tristate "ov9640 camera support"
- depends on SOC_CAMERA && I2C
- help
- This is a ov9640 camera driver
-
config SOC_CAMERA_OV9740
tristate "ov9740 camera support"
depends on SOC_CAMERA && I2C
help
This is a ov9740 camera driver
-config SOC_CAMERA_RJ54N1
- tristate "rj54n1cb0c support"
+config SOC_CAMERA_IMX074
+ tristate "imx074 support (DEPRECATED)"
depends on SOC_CAMERA && I2C
help
- This is a rj54n1cb0c video driver
+ This driver supports IMX074 cameras from Sony
-config SOC_CAMERA_TW9910
- tristate "tw9910 support"
+config SOC_CAMERA_MT9T031
+ tristate "mt9t031 support (DEPRECATED)"
depends on SOC_CAMERA && I2C
help
- This is a tw9910 video driver
+ This driver supports MT9T031 cameras from Micron.
diff --git a/drivers/staging/media/soc_camera/Makefile b/drivers/staging/media/soc_camera/Makefile
new file mode 100644
index 000000000000..3a351bd629f5
--- /dev/null
+++ b/drivers/staging/media/soc_camera/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_SOC_CAMERA) += soc_camera.o soc_mediabus.o
+obj-$(CONFIG_SOC_CAMERA_MT9V022) += soc_mt9v022.o
+obj-$(CONFIG_SOC_CAMERA_OV5642) += soc_ov5642.o
+obj-$(CONFIG_SOC_CAMERA_OV9740) += soc_ov9740.o
+obj-$(CONFIG_SOC_CAMERA_IMX074) += imx074.o
+obj-$(CONFIG_SOC_CAMERA_MT9T031) += mt9t031.o
diff --git a/drivers/staging/media/imx074/imx074.c b/drivers/staging/media/soc_camera/imx074.c
index 1676c166dc83..1676c166dc83 100644
--- a/drivers/staging/media/imx074/imx074.c
+++ b/drivers/staging/media/soc_camera/imx074.c
diff --git a/drivers/staging/media/mt9t031/mt9t031.c b/drivers/staging/media/soc_camera/mt9t031.c
index 4ff179302b4f..4ff179302b4f 100644
--- a/drivers/staging/media/mt9t031/mt9t031.c
+++ b/drivers/staging/media/soc_camera/mt9t031.c
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/staging/media/soc_camera/soc_camera.c
index 21034339cdcb..1ab86a7499b9 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/staging/media/soc_camera/soc_camera.c
@@ -4,11 +4,11 @@
* Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
*
* This driver provides an interface between platform-specific camera
- * busses and camera devices. It should be used if the camera is
+ * buses and camera devices. It should be used if the camera is
* connected not over a "proper" bus like PCI or USB, but over a
* special bus, like, for example, the Quick Capture interface on PXA270
* SoCs. Later it should also be used for i.MX31 SoCs from Freescale.
- * It can handle multiple cameras and / or multiple busses, which can
+ * It can handle multiple cameras and / or multiple buses, which can
* be used, e.g., in stereo-vision applications.
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/media/platform/soc_camera/soc_mediabus.c b/drivers/staging/media/soc_camera/soc_mediabus.c
index be74008ec0ca..be74008ec0ca 100644
--- a/drivers/media/platform/soc_camera/soc_mediabus.c
+++ b/drivers/staging/media/soc_camera/soc_mediabus.c
diff --git a/drivers/media/i2c/soc_camera/soc_mt9v022.c b/drivers/staging/media/soc_camera/soc_mt9v022.c
index 6d922b17ea94..6d922b17ea94 100644
--- a/drivers/media/i2c/soc_camera/soc_mt9v022.c
+++ b/drivers/staging/media/soc_camera/soc_mt9v022.c
diff --git a/drivers/media/i2c/soc_camera/soc_ov5642.c b/drivers/staging/media/soc_camera/soc_ov5642.c
index 0931898c79dd..0931898c79dd 100644
--- a/drivers/media/i2c/soc_camera/soc_ov5642.c
+++ b/drivers/staging/media/soc_camera/soc_ov5642.c
diff --git a/drivers/media/i2c/soc_camera/soc_ov9740.c b/drivers/staging/media/soc_camera/soc_ov9740.c
index a07d3145d1b4..a07d3145d1b4 100644
--- a/drivers/media/i2c/soc_camera/soc_ov9740.c
+++ b/drivers/staging/media/soc_camera/soc_ov9740.c
diff --git a/drivers/staging/media/sunxi/cedrus/TODO b/drivers/staging/media/sunxi/cedrus/TODO
index a951b3fd1ea1..ec277ece47af 100644
--- a/drivers/staging/media/sunxi/cedrus/TODO
+++ b/drivers/staging/media/sunxi/cedrus/TODO
@@ -5,8 +5,3 @@ Before this stateless decoder driver can leave the staging area:
* Userspace support for the Request API needs to be reviewed;
* Another stateless decoder driver should be submitted;
* At least one stateless encoder driver should be submitted.
-* When queueing a request containing references to I frames, the
- refcount of the memory for those I frames needs to be incremented
- and decremented when the request is completed. This will likely
- require some help from vb2. The driver should fail the request
- if the memory/buffer is gone.
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h
index 3acfdcf83691..4aedd24a9848 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.h
@@ -140,11 +140,14 @@ static inline dma_addr_t cedrus_buf_addr(struct vb2_buffer *buf,
}
static inline dma_addr_t cedrus_dst_buf_addr(struct cedrus_ctx *ctx,
- unsigned int index,
- unsigned int plane)
+ int index, unsigned int plane)
{
- struct vb2_buffer *buf = ctx->dst_bufs[index];
+ struct vb2_buffer *buf;
+ if (index < 0)
+ return 0;
+
+ buf = ctx->dst_bufs[index];
return buf ? cedrus_buf_addr(buf, &ctx->dst_fmt, plane) : 0;
}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
index 591d191d4286..4d6d602cdde6 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
@@ -50,6 +50,8 @@ void cedrus_device_run(void *priv)
break;
}
+ v4l2_m2m_buf_copy_metadata(run.src, run.dst, true);
+
dev->dec_ops[ctx->current_codec]->setup(ctx, &run);
/* Complete request(s) controls if needed. */
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_dec.h b/drivers/staging/media/sunxi/cedrus/cedrus_dec.h
index 4f423d3a1cad..d1ae7903677b 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_dec.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_dec.h
@@ -16,12 +16,6 @@
#ifndef _CEDRUS_DEC_H_
#define _CEDRUS_DEC_H_
-extern const struct v4l2_ioctl_ops cedrus_ioctl_ops;
-
-void cedrus_device_work(struct work_struct *work);
void cedrus_device_run(void *priv);
-int cedrus_queue_init(void *priv, struct vb2_queue *src_vq,
- struct vb2_queue *dst_vq);
-
#endif
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
index 300339fee1bc..0acf219a8c91 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
@@ -157,14 +157,14 @@ int cedrus_hw_probe(struct cedrus_dev *dev)
irq_dec = platform_get_irq(dev->pdev, 0);
if (irq_dec <= 0) {
- v4l2_err(&dev->v4l2_dev, "Failed to get IRQ\n");
+ dev_err(dev->dev, "Failed to get IRQ\n");
return irq_dec;
}
ret = devm_request_irq(dev->dev, irq_dec, cedrus_irq,
0, dev_name(dev->dev), dev);
if (ret) {
- v4l2_err(&dev->v4l2_dev, "Failed to request IRQ\n");
+ dev_err(dev->dev, "Failed to request IRQ\n");
return ret;
}
@@ -182,21 +182,21 @@ int cedrus_hw_probe(struct cedrus_dev *dev)
ret = of_reserved_mem_device_init(dev->dev);
if (ret && ret != -ENODEV) {
- v4l2_err(&dev->v4l2_dev, "Failed to reserve memory\n");
+ dev_err(dev->dev, "Failed to reserve memory\n");
return ret;
}
ret = sunxi_sram_claim(dev->dev);
if (ret) {
- v4l2_err(&dev->v4l2_dev, "Failed to claim SRAM\n");
+ dev_err(dev->dev, "Failed to claim SRAM\n");
goto err_mem;
}
dev->ahb_clk = devm_clk_get(dev->dev, "ahb");
if (IS_ERR(dev->ahb_clk)) {
- v4l2_err(&dev->v4l2_dev, "Failed to get AHB clock\n");
+ dev_err(dev->dev, "Failed to get AHB clock\n");
ret = PTR_ERR(dev->ahb_clk);
goto err_sram;
@@ -204,7 +204,7 @@ int cedrus_hw_probe(struct cedrus_dev *dev)
dev->mod_clk = devm_clk_get(dev->dev, "mod");
if (IS_ERR(dev->mod_clk)) {
- v4l2_err(&dev->v4l2_dev, "Failed to get MOD clock\n");
+ dev_err(dev->dev, "Failed to get MOD clock\n");
ret = PTR_ERR(dev->mod_clk);
goto err_sram;
@@ -212,7 +212,7 @@ int cedrus_hw_probe(struct cedrus_dev *dev)
dev->ram_clk = devm_clk_get(dev->dev, "ram");
if (IS_ERR(dev->ram_clk)) {
- v4l2_err(&dev->v4l2_dev, "Failed to get RAM clock\n");
+ dev_err(dev->dev, "Failed to get RAM clock\n");
ret = PTR_ERR(dev->ram_clk);
goto err_sram;
@@ -220,7 +220,7 @@ int cedrus_hw_probe(struct cedrus_dev *dev)
dev->rstc = devm_reset_control_get(dev->dev, NULL);
if (IS_ERR(dev->rstc)) {
- v4l2_err(&dev->v4l2_dev, "Failed to get reset control\n");
+ dev_err(dev->dev, "Failed to get reset control\n");
ret = PTR_ERR(dev->rstc);
goto err_sram;
@@ -229,7 +229,7 @@ int cedrus_hw_probe(struct cedrus_dev *dev)
res = platform_get_resource(dev->pdev, IORESOURCE_MEM, 0);
dev->base = devm_ioremap_resource(dev->dev, res);
if (IS_ERR(dev->base)) {
- v4l2_err(&dev->v4l2_dev, "Failed to map registers\n");
+ dev_err(dev->dev, "Failed to map registers\n");
ret = PTR_ERR(dev->base);
goto err_sram;
@@ -237,35 +237,35 @@ int cedrus_hw_probe(struct cedrus_dev *dev)
ret = clk_set_rate(dev->mod_clk, CEDRUS_CLOCK_RATE_DEFAULT);
if (ret) {
- v4l2_err(&dev->v4l2_dev, "Failed to set clock rate\n");
+ dev_err(dev->dev, "Failed to set clock rate\n");
goto err_sram;
}
ret = clk_prepare_enable(dev->ahb_clk);
if (ret) {
- v4l2_err(&dev->v4l2_dev, "Failed to enable AHB clock\n");
+ dev_err(dev->dev, "Failed to enable AHB clock\n");
goto err_sram;
}
ret = clk_prepare_enable(dev->mod_clk);
if (ret) {
- v4l2_err(&dev->v4l2_dev, "Failed to enable MOD clock\n");
+ dev_err(dev->dev, "Failed to enable MOD clock\n");
goto err_ahb_clk;
}
ret = clk_prepare_enable(dev->ram_clk);
if (ret) {
- v4l2_err(&dev->v4l2_dev, "Failed to enable RAM clock\n");
+ dev_err(dev->dev, "Failed to enable RAM clock\n");
goto err_mod_clk;
}
ret = reset_control_reset(dev->rstc);
if (ret) {
- v4l2_err(&dev->v4l2_dev, "Failed to apply reset\n");
+ dev_err(dev->dev, "Failed to apply reset\n");
goto err_ram_clk;
}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c b/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c
index 9abd39cae38c..13c34927bad5 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c
@@ -82,7 +82,10 @@ static void cedrus_mpeg2_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
dma_addr_t fwd_luma_addr, fwd_chroma_addr;
dma_addr_t bwd_luma_addr, bwd_chroma_addr;
struct cedrus_dev *dev = ctx->dev;
+ struct vb2_queue *vq;
const u8 *matrix;
+ int forward_idx;
+ int backward_idx;
unsigned int i;
u32 reg;
@@ -157,22 +160,18 @@ static void cedrus_mpeg2_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
/* Forward and backward prediction reference buffers. */
- fwd_luma_addr = cedrus_dst_buf_addr(ctx,
- slice_params->forward_ref_index,
- 0);
- fwd_chroma_addr = cedrus_dst_buf_addr(ctx,
- slice_params->forward_ref_index,
- 1);
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ forward_idx = vb2_find_timestamp(vq, slice_params->forward_ref_ts, 0);
+ fwd_luma_addr = cedrus_dst_buf_addr(ctx, forward_idx, 0);
+ fwd_chroma_addr = cedrus_dst_buf_addr(ctx, forward_idx, 1);
cedrus_write(dev, VE_DEC_MPEG_FWD_REF_LUMA_ADDR, fwd_luma_addr);
cedrus_write(dev, VE_DEC_MPEG_FWD_REF_CHROMA_ADDR, fwd_chroma_addr);
- bwd_luma_addr = cedrus_dst_buf_addr(ctx,
- slice_params->backward_ref_index,
- 0);
- bwd_chroma_addr = cedrus_dst_buf_addr(ctx,
- slice_params->backward_ref_index,
- 1);
+ backward_idx = vb2_find_timestamp(vq, slice_params->backward_ref_ts, 0);
+ bwd_luma_addr = cedrus_dst_buf_addr(ctx, backward_idx, 0);
+ bwd_chroma_addr = cedrus_dst_buf_addr(ctx, backward_idx, 1);
cedrus_write(dev, VE_DEC_MPEG_BWD_REF_LUMA_ADDR, bwd_luma_addr);
cedrus_write(dev, VE_DEC_MPEG_BWD_REF_CHROMA_ADDR, bwd_chroma_addr);
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.c b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
index 8721b4a7d496..b47854b3bce4 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_video.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
@@ -282,8 +282,13 @@ static int cedrus_s_fmt_vid_cap(struct file *file, void *priv,
{
struct cedrus_ctx *ctx = cedrus_file2ctx(file);
struct cedrus_dev *dev = ctx->dev;
+ struct vb2_queue *vq;
int ret;
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+
ret = cedrus_try_fmt_vid_cap(file, priv, f);
if (ret)
return ret;
@@ -299,8 +304,13 @@ static int cedrus_s_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cedrus_ctx *ctx = cedrus_file2ctx(file);
+ struct vb2_queue *vq;
int ret;
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+
ret = cedrus_try_fmt_vid_out(file, priv, f);
if (ret)
return ret;
@@ -416,6 +426,14 @@ static void cedrus_buf_cleanup(struct vb2_buffer *vb)
ctx->dst_bufs[vb->index] = NULL;
}
+static int cedrus_buf_out_validate(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ vbuf->field = V4L2_FIELD_NONE;
+ return 0;
+}
+
static int cedrus_buf_prepare(struct vb2_buffer *vb)
{
struct vb2_queue *vq = vb->vb2_queue;
@@ -493,6 +511,7 @@ static struct vb2_ops cedrus_qops = {
.buf_init = cedrus_buf_init,
.buf_cleanup = cedrus_buf_cleanup,
.buf_queue = cedrus_buf_queue,
+ .buf_out_validate = cedrus_buf_out_validate,
.buf_request_complete = cedrus_buf_request_complete,
.start_streaming = cedrus_start_streaming,
.stop_streaming = cedrus_stop_streaming,
diff --git a/drivers/staging/media/zoran/zoran.h b/drivers/staging/media/zoran/zoran.h
index 9bb3c21aa275..e84fb604a689 100644
--- a/drivers/staging/media/zoran/zoran.h
+++ b/drivers/staging/media/zoran/zoran.h
@@ -35,7 +35,7 @@ struct zoran_sync {
unsigned long frame; /* number of buffer that has been free'd */
unsigned long length; /* number of code bytes in buffer (capture only) */
unsigned long seq; /* frame sequence number */
- struct timeval timestamp; /* timestamp */
+ u64 ts; /* timestamp */
};
diff --git a/drivers/staging/media/zoran/zoran_card.c b/drivers/staging/media/zoran/zoran_card.c
index 94dadbba7cd5..ea10523194e8 100644
--- a/drivers/staging/media/zoran/zoran_card.c
+++ b/drivers/staging/media/zoran/zoran_card.c
@@ -1470,7 +1470,7 @@ static int __init zoran_init(void)
v4l_nbufs = 2;
if (v4l_nbufs > VIDEO_MAX_FRAME)
v4l_nbufs = VIDEO_MAX_FRAME;
- /* The user specfies the in KB, we want them in byte
+ /* The user specifies the in KB, we want them in byte
* (and page aligned) */
v4l_bufsize = PAGE_ALIGN(v4l_bufsize * 1024);
if (v4l_bufsize < 32768)
diff --git a/drivers/staging/media/zoran/zoran_device.c b/drivers/staging/media/zoran/zoran_device.c
index 40adceebca7e..22b27632762d 100644
--- a/drivers/staging/media/zoran/zoran_device.c
+++ b/drivers/staging/media/zoran/zoran_device.c
@@ -612,7 +612,7 @@ zr36057_set_memgrab (struct zoran *zr,
zr->v4l_memgrab_active = 0;
zr->v4l_grab_frame = NO_GRAB_ACTIVE;
- /* reenable grabbing to screen if it was running */
+ /* re-enable grabbing to screen if it was running */
if (zr->v4l_overlay_active) {
zr36057_overlay(zr, 1);
} else {
@@ -1151,7 +1151,7 @@ zoran_reap_stat_com (struct zoran *zr)
}
frame = zr->jpg_pend[zr->jpg_dma_tail & BUZ_MASK_FRAME];
buffer = &zr->jpg_buffers.buffer[frame];
- v4l2_get_timestamp(&buffer->bs.timestamp);
+ buffer->bs.ts = ktime_get_ns();
if (zr->codec_mode == BUZ_MODE_MOTION_COMPRESS) {
buffer->bs.length = (stat_com & 0x7fffff) >> 1;
@@ -1389,7 +1389,7 @@ zoran_irq (int irq,
zr->v4l_buffers.buffer[zr->v4l_grab_frame].state = BUZ_STATE_DONE;
zr->v4l_buffers.buffer[zr->v4l_grab_frame].bs.seq = zr->v4l_grab_seq;
- v4l2_get_timestamp(&zr->v4l_buffers.buffer[zr->v4l_grab_frame].bs.timestamp);
+ zr->v4l_buffers.buffer[zr->v4l_grab_frame].bs.ts = ktime_get_ns();
zr->v4l_grab_frame = NO_GRAB_ACTIVE;
zr->v4l_pend_tail++;
}
diff --git a/drivers/staging/media/zoran/zoran_driver.c b/drivers/staging/media/zoran/zoran_driver.c
index 27c76e2eeb41..04f88f9d6bb4 100644
--- a/drivers/staging/media/zoran/zoran_driver.c
+++ b/drivers/staging/media/zoran/zoran_driver.c
@@ -1354,7 +1354,7 @@ static int zoran_v4l2_buffer_status(struct zoran_fh *fh,
fh->buffers.buffer[num].state == BUZ_STATE_USER) {
buf->sequence = fh->buffers.buffer[num].bs.seq;
buf->flags |= V4L2_BUF_FLAG_DONE;
- buf->timestamp = fh->buffers.buffer[num].bs.timestamp;
+ buf->timestamp = ns_to_timeval(fh->buffers.buffer[num].bs.ts);
} else {
buf->flags |= V4L2_BUF_FLAG_QUEUED;
}
@@ -1388,7 +1388,7 @@ static int zoran_v4l2_buffer_status(struct zoran_fh *fh,
if (fh->buffers.buffer[num].state == BUZ_STATE_DONE ||
fh->buffers.buffer[num].state == BUZ_STATE_USER) {
buf->sequence = fh->buffers.buffer[num].bs.seq;
- buf->timestamp = fh->buffers.buffer[num].bs.timestamp;
+ buf->timestamp = ns_to_timeval(fh->buffers.buffer[num].bs.ts);
buf->bytesused = fh->buffers.buffer[num].bs.length;
buf->flags |= V4L2_BUF_FLAG_DONE;
} else {
diff --git a/drivers/staging/most/core.c b/drivers/staging/most/core.c
index 18936cdb1083..956daf8c3bd2 100644
--- a/drivers/staging/most/core.c
+++ b/drivers/staging/most/core.c
@@ -1431,7 +1431,7 @@ int most_register_interface(struct most_interface *iface)
INIT_LIST_HEAD(&iface->p->channel_list);
iface->p->dev_id = id;
- snprintf(iface->p->name, STRING_SIZE, "mdev%d", id);
+ strcpy(iface->p->name, iface->description);
iface->dev.init_name = iface->p->name;
iface->dev.bus = &mc.bus;
iface->dev.parent = &mc.dev;
diff --git a/drivers/staging/mt7621-dts/gbpc1.dts b/drivers/staging/mt7621-dts/gbpc1.dts
index b73385540216..250c15ace2a7 100644
--- a/drivers/staging/mt7621-dts/gbpc1.dts
+++ b/drivers/staging/mt7621-dts/gbpc1.dts
@@ -117,22 +117,6 @@
status = "okay";
};
-&ethernet {
- //mtd-mac-address = <&factory 0xe000>;
- gmac1: mac@0 {
- compatible = "mediatek,eth-mac";
- reg = <0>;
- phy-handle = <&phy1>;
- };
-
- mdio-bus {
- phy1: ethernet-phy@1 {
- reg = <1>;
- phy-mode = "rgmii";
- };
- };
-};
-
&pinctrl {
state_default: pinctrl0 {
gpio {
@@ -141,3 +125,16 @@
};
};
};
+
+&switch0 {
+ ports {
+ port@0 {
+ label = "ethblack";
+ status = "ok";
+ };
+ port@4 {
+ label = "ethblue";
+ status = "ok";
+ };
+ };
+};
diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi
index 6aff3680ce4b..17020e24abd2 100644
--- a/drivers/staging/mt7621-dts/mt7621.dtsi
+++ b/drivers/staging/mt7621-dts/mt7621.dtsi
@@ -372,16 +372,83 @@
mediatek,ethsys = <&ethsys>;
- mediatek,switch = <&gsw>;
+ gmac0: mac@0 {
+ compatible = "mediatek,eth-mac";
+ reg = <0>;
+ phy-mode = "rgmii";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ pause;
+ };
+ };
+ gmac1: mac@1 {
+ compatible = "mediatek,eth-mac";
+ reg = <1>;
+ status = "off";
+ phy-mode = "rgmii";
+ phy-handle = <&phy5>;
+ };
mdio-bus {
#address-cells = <1>;
#size-cells = <0>;
- phy1f: ethernet-phy@1f {
- reg = <0x1f>;
+ phy5: ethernet-phy@5 {
+ reg = <5>;
phy-mode = "rgmii";
};
+
+ switch0: switch0@0 {
+ compatible = "mediatek,mt7621";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ mediatek,mcm;
+ resets = <&rstctrl 2>;
+ reset-names = "mcm";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ port@0 {
+ status = "off";
+ reg = <0>;
+ label = "lan0";
+ };
+ port@1 {
+ status = "off";
+ reg = <1>;
+ label = "lan1";
+ };
+ port@2 {
+ status = "off";
+ reg = <2>;
+ label = "lan2";
+ };
+ port@3 {
+ status = "off";
+ reg = <3>;
+ label = "lan3";
+ };
+ port@4 {
+ status = "off";
+ reg = <4>;
+ label = "lan4";
+ };
+ port@6 {
+ reg = <6>;
+ label = "cpu";
+ ethernet = <&gmac0>;
+ phy-mode = "trgmii";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+ };
};
};
diff --git a/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt b/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt
deleted file mode 100644
index 596b38552697..000000000000
--- a/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt
+++ /dev/null
@@ -1,48 +0,0 @@
-Mediatek Gigabit Switch
-=======================
-
-The mediatek gigabit switch can be found on Mediatek SoCs.
-
-Required properties:
-- compatible: Should be "mediatek,mt7620-gsw", "mediatek,mt7621-gsw",
- "mediatek,mt7623-gsw"
-- reg: Address and length of the register set for the device
-- interrupts: Should contain the gigabit switches interrupt
-
-
-Additional required properties for ARM based SoCs:
-- mediatek,reset-pin: phandle describing the reset GPIO
-- clocks: the clocks used by the switch
-- clock-names: the names of the clocks listed in the clocks property
- these should be "trgpll", "esw", "gp2", "gp1"
-- mt7530-supply: the phandle of the regulator used to power the switch
-- mediatek,pctl-regmap: phandle to the port control regmap. this is used to
- setup the drive current
-
-
-Optional properties:
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
-
-Example:
-
-gsw: switch@1b100000 {
- compatible = "mediatek,mt7623-gsw";
- reg = <0 0x1b110000 0 0x300000>;
-
- interrupt-parent = <&pio>;
- interrupts = <168 IRQ_TYPE_EDGE_RISING>;
-
- clocks = <&apmixedsys CLK_APMIXED_TRGPLL>,
- <&ethsys CLK_ETHSYS_ESW>,
- <&ethsys CLK_ETHSYS_GP2>,
- <&ethsys CLK_ETHSYS_GP1>;
- clock-names = "trgpll", "esw", "gp2", "gp1";
-
- mt7530-supply = <&mt6323_vpa_reg>;
-
- mediatek,pctl-regmap = <&syscfg_pctl_a>;
- mediatek,reset-pin = <&pio 15 0>;
-
- status = "okay";
-};
diff --git a/drivers/staging/mt7621-eth/Kconfig b/drivers/staging/mt7621-eth/Kconfig
deleted file mode 100644
index 44ea86c7a96c..000000000000
--- a/drivers/staging/mt7621-eth/Kconfig
+++ /dev/null
@@ -1,39 +0,0 @@
-config NET_VENDOR_MEDIATEK_STAGING
- bool "MediaTek ethernet driver - staging version"
- depends on RALINK
- ---help---
- If you have an MT7621 Mediatek SoC with ethernet, say Y.
-
-if NET_VENDOR_MEDIATEK_STAGING
-choice
- prompt "MAC type"
-
-config NET_MEDIATEK_MT7621
- bool "MT7621"
- depends on MIPS && SOC_MT7621
-
-endchoice
-
-config NET_MEDIATEK_SOC_STAGING
- tristate "MediaTek SoC Gigabit Ethernet support"
- depends on NET_VENDOR_MEDIATEK_STAGING
- select PHYLIB
- ---help---
- This driver supports the gigabit ethernet MACs in the
- MediaTek SoC family.
-
-config NET_MEDIATEK_MDIO
- def_bool NET_MEDIATEK_SOC_STAGING
- depends on NET_MEDIATEK_MT7621
- select PHYLIB
-
-config NET_MEDIATEK_MDIO_MT7620
- def_bool NET_MEDIATEK_SOC_STAGING
- depends on NET_MEDIATEK_MT7621
- select NET_MEDIATEK_MDIO
-
-config NET_MEDIATEK_GSW_MT7621
- def_tristate NET_MEDIATEK_SOC_STAGING
- depends on NET_MEDIATEK_MT7621
-
-endif #NET_VENDOR_MEDIATEK_STAGING
diff --git a/drivers/staging/mt7621-eth/Makefile b/drivers/staging/mt7621-eth/Makefile
deleted file mode 100644
index 018bcc3596b3..000000000000
--- a/drivers/staging/mt7621-eth/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Makefile for the Ralink SoCs built-in ethernet macs
-#
-
-mtk-eth-soc-y += mtk_eth_soc.o ethtool.o
-
-mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MDIO) += mdio.o
-mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MDIO_MT7620) += mdio_mt7620.o
-
-mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MT7621) += soc_mt7621.o
-
-obj-$(CONFIG_NET_MEDIATEK_GSW_MT7621) += gsw_mt7621.o
-
-obj-$(CONFIG_NET_MEDIATEK_SOC_STAGING) += mtk-eth-soc.o
diff --git a/drivers/staging/mt7621-eth/TODO b/drivers/staging/mt7621-eth/TODO
deleted file mode 100644
index f9e47d4b4cd4..000000000000
--- a/drivers/staging/mt7621-eth/TODO
+++ /dev/null
@@ -1,13 +0,0 @@
-
-- verify devicetree documentation is consistent with code
-- fix ethtool - currently doesn't return valid data.
-- general code review and clean up
-- add support for second MAC on mt7621
-- convert gsw code to use switchdev interfaces
-- md7620_mmi_write etc should probably be wrapped
- in a regmap abstraction.
-- Get soc_mt7621 to work with QDMA TX if possible.
-- Ensure phys are correctly configured when a cable
- is plugged in.
-
-Cc: NeilBrown <neil@brown.name>
diff --git a/drivers/staging/mt7621-eth/ethtool.c b/drivers/staging/mt7621-eth/ethtool.c
deleted file mode 100644
index 8c4228e2c987..000000000000
--- a/drivers/staging/mt7621-eth/ethtool.c
+++ /dev/null
@@ -1,250 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include "mtk_eth_soc.h"
-#include "ethtool.h"
-
-struct mtk_stat {
- char name[ETH_GSTRING_LEN];
- unsigned int idx;
-};
-
-#define MTK_HW_STAT(stat) { \
- .name = #stat, \
- .idx = offsetof(struct mtk_hw_stats, stat) / sizeof(u64) \
-}
-
-static const struct mtk_stat mtk_ethtool_hw_stats[] = {
- MTK_HW_STAT(tx_bytes),
- MTK_HW_STAT(tx_packets),
- MTK_HW_STAT(tx_skip),
- MTK_HW_STAT(tx_collisions),
- MTK_HW_STAT(rx_bytes),
- MTK_HW_STAT(rx_packets),
- MTK_HW_STAT(rx_overflow),
- MTK_HW_STAT(rx_fcs_errors),
- MTK_HW_STAT(rx_short_errors),
- MTK_HW_STAT(rx_long_errors),
- MTK_HW_STAT(rx_checksum_errors),
- MTK_HW_STAT(rx_flow_control_packets),
-};
-
-#define MTK_HW_STATS_LEN ARRAY_SIZE(mtk_ethtool_hw_stats)
-
-static int mtk_get_link_ksettings(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- int err;
-
- if (!mac->phy_dev)
- return -ENODEV;
-
- if (mac->phy_flags == MTK_PHY_FLAG_ATTACH) {
- err = phy_read_status(mac->phy_dev);
- if (err)
- return -ENODEV;
- }
-
- phy_ethtool_ksettings_get(mac->phy_dev, cmd);
- return 0;
-}
-
-static int mtk_set_link_ksettings(struct net_device *dev,
- const struct ethtool_link_ksettings *cmd)
-{
- struct mtk_mac *mac = netdev_priv(dev);
-
- if (!mac->phy_dev)
- return -ENODEV;
-
- if (cmd->base.phy_address != mac->phy_dev->mdio.addr) {
- if (mac->hw->phy->phy_node[cmd->base.phy_address]) {
- mac->phy_dev = mac->hw->phy->phy[cmd->base.phy_address];
- mac->phy_flags = MTK_PHY_FLAG_PORT;
- } else if (mac->hw->mii_bus) {
- mac->phy_dev = mdiobus_get_phy(mac->hw->mii_bus,
- cmd->base.phy_address);
- if (!mac->phy_dev)
- return -ENODEV;
- mac->phy_flags = MTK_PHY_FLAG_ATTACH;
- } else {
- return -ENODEV;
- }
- }
-
- return phy_ethtool_ksettings_set(mac->phy_dev, cmd);
-}
-
-static void mtk_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_soc_data *soc = mac->hw->soc;
-
- strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
-
- if (soc->reg_table[MTK_REG_MTK_COUNTER_BASE])
- info->n_stats = MTK_HW_STATS_LEN;
-}
-
-static u32 mtk_get_msglevel(struct net_device *dev)
-{
- struct mtk_mac *mac = netdev_priv(dev);
-
- return mac->hw->msg_enable;
-}
-
-static void mtk_set_msglevel(struct net_device *dev, u32 value)
-{
- struct mtk_mac *mac = netdev_priv(dev);
-
- mac->hw->msg_enable = value;
-}
-
-static int mtk_nway_reset(struct net_device *dev)
-{
- struct mtk_mac *mac = netdev_priv(dev);
-
- if (!mac->phy_dev)
- return -EOPNOTSUPP;
-
- return genphy_restart_aneg(mac->phy_dev);
-}
-
-static u32 mtk_get_link(struct net_device *dev)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- int err;
-
- if (!mac->phy_dev)
- goto out_get_link;
-
- if (mac->phy_flags == MTK_PHY_FLAG_ATTACH) {
- err = genphy_update_link(mac->phy_dev);
- if (err)
- goto out_get_link;
- }
-
- return mac->phy_dev->link;
-
-out_get_link:
- return ethtool_op_get_link(dev);
-}
-
-static int mtk_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
-{
- struct mtk_mac *mac = netdev_priv(dev);
-
- if ((ring->tx_pending < 2) ||
- (ring->rx_pending < 2) ||
- (ring->rx_pending > mac->hw->soc->dma_ring_size) ||
- (ring->tx_pending > mac->hw->soc->dma_ring_size))
- return -EINVAL;
-
- dev->netdev_ops->ndo_stop(dev);
-
- mac->hw->tx_ring.tx_ring_size = BIT(fls(ring->tx_pending) - 1);
- mac->hw->rx_ring[0].rx_ring_size = BIT(fls(ring->rx_pending) - 1);
-
- return dev->netdev_ops->ndo_open(dev);
-}
-
-static void mtk_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
-{
- struct mtk_mac *mac = netdev_priv(dev);
-
- ring->rx_max_pending = mac->hw->soc->dma_ring_size;
- ring->tx_max_pending = mac->hw->soc->dma_ring_size;
- ring->rx_pending = mac->hw->rx_ring[0].rx_ring_size;
- ring->tx_pending = mac->hw->tx_ring.tx_ring_size;
-}
-
-static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
-{
- int i;
-
- switch (stringset) {
- case ETH_SS_STATS:
- for (i = 0; i < MTK_HW_STATS_LEN; i++) {
- memcpy(data, mtk_ethtool_hw_stats[i].name,
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
- break;
- }
-}
-
-static int mtk_get_sset_count(struct net_device *dev, int sset)
-{
- switch (sset) {
- case ETH_SS_STATS:
- return MTK_HW_STATS_LEN;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void mtk_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats, u64 *data)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_hw_stats *hwstats = mac->hw_stats;
- unsigned int start;
- int i;
-
- if (netif_running(dev) && netif_device_present(dev)) {
- if (spin_trylock(&hwstats->stats_lock)) {
- mtk_stats_update_mac(mac);
- spin_unlock(&hwstats->stats_lock);
- }
- }
-
- do {
- start = u64_stats_fetch_begin_irq(&hwstats->syncp);
- for (i = 0; i < MTK_HW_STATS_LEN; i++)
- data[i] = ((u64 *)hwstats)[mtk_ethtool_hw_stats[i].idx];
-
- } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
-}
-
-static struct ethtool_ops mtk_ethtool_ops = {
- .get_link_ksettings = mtk_get_link_ksettings,
- .set_link_ksettings = mtk_set_link_ksettings,
- .get_drvinfo = mtk_get_drvinfo,
- .get_msglevel = mtk_get_msglevel,
- .set_msglevel = mtk_set_msglevel,
- .nway_reset = mtk_nway_reset,
- .get_link = mtk_get_link,
- .set_ringparam = mtk_set_ringparam,
- .get_ringparam = mtk_get_ringparam,
-};
-
-void mtk_set_ethtool_ops(struct net_device *netdev)
-{
- struct mtk_mac *mac = netdev_priv(netdev);
- struct mtk_soc_data *soc = mac->hw->soc;
-
- if (soc->reg_table[MTK_REG_MTK_COUNTER_BASE]) {
- mtk_ethtool_ops.get_strings = mtk_get_strings;
- mtk_ethtool_ops.get_sset_count = mtk_get_sset_count;
- mtk_ethtool_ops.get_ethtool_stats = mtk_get_ethtool_stats;
- }
-
- netdev->ethtool_ops = &mtk_ethtool_ops;
-}
diff --git a/drivers/staging/mt7621-eth/ethtool.h b/drivers/staging/mt7621-eth/ethtool.h
deleted file mode 100644
index 0071469aea6c..000000000000
--- a/drivers/staging/mt7621-eth/ethtool.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#ifndef MTK_ETHTOOL_H
-#define MTK_ETHTOOL_H
-
-#include <linux/ethtool.h>
-
-void mtk_set_ethtool_ops(struct net_device *netdev);
-
-#endif /* MTK_ETHTOOL_H */
diff --git a/drivers/staging/mt7621-eth/gsw_mt7620.h b/drivers/staging/mt7621-eth/gsw_mt7620.h
deleted file mode 100644
index 70f7e5481952..000000000000
--- a/drivers/staging/mt7621-eth/gsw_mt7620.h
+++ /dev/null
@@ -1,277 +0,0 @@
-/* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#ifndef _RALINK_GSW_MT7620_H__
-#define _RALINK_GSW_MT7620_H__
-
-#define GSW_REG_PHY_TIMEOUT (5 * HZ)
-
-#define MT7620_GSW_REG_PIAC 0x0004
-
-#define GSW_NUM_VLANS 16
-#define GSW_NUM_VIDS 4096
-#define GSW_NUM_PORTS 7
-#define GSW_PORT6 6
-
-#define GSW_MDIO_ACCESS BIT(31)
-#define GSW_MDIO_READ BIT(19)
-#define GSW_MDIO_WRITE BIT(18)
-#define GSW_MDIO_START BIT(16)
-#define GSW_MDIO_ADDR_SHIFT 20
-#define GSW_MDIO_REG_SHIFT 25
-
-#define GSW_REG_PORT_PMCR(x) (0x3000 + (x * 0x100))
-#define GSW_REG_PORT_STATUS(x) (0x3008 + (x * 0x100))
-#define GSW_REG_SMACCR0 0x3fE4
-#define GSW_REG_SMACCR1 0x3fE8
-#define GSW_REG_CKGCR 0x3ff0
-
-#define GSW_REG_IMR 0x7008
-#define GSW_REG_ISR 0x700c
-#define GSW_REG_GPC1 0x7014
-
-#define SYSC_REG_CHIP_REV_ID 0x0c
-#define SYSC_REG_CFG 0x10
-#define SYSC_REG_CFG1 0x14
-#define RST_CTRL_MCM BIT(2)
-#define SYSC_PAD_RGMII2_MDIO 0x58
-#define SYSC_GPIO_MODE 0x60
-
-#define PORT_IRQ_ST_CHG 0x7f
-
-#define MT7621_ESW_PHY_POLLING 0x0000
-#define MT7620_ESW_PHY_POLLING 0x7000
-
-#define PMCR_IPG BIT(18)
-#define PMCR_MAC_MODE BIT(16)
-#define PMCR_FORCE BIT(15)
-#define PMCR_TX_EN BIT(14)
-#define PMCR_RX_EN BIT(13)
-#define PMCR_BACKOFF BIT(9)
-#define PMCR_BACKPRES BIT(8)
-#define PMCR_RX_FC BIT(5)
-#define PMCR_TX_FC BIT(4)
-#define PMCR_SPEED(_x) (_x << 2)
-#define PMCR_DUPLEX BIT(1)
-#define PMCR_LINK BIT(0)
-
-#define PHY_AN_EN BIT(31)
-#define PHY_PRE_EN BIT(30)
-#define PMY_MDC_CONF(_x) ((_x & 0x3f) << 24)
-
-/* ethernet subsystem config register */
-#define ETHSYS_SYSCFG0 0x14
-/* ethernet subsystem clock register */
-#define ETHSYS_CLKCFG0 0x2c
-#define ETHSYS_TRGMII_CLK_SEL362_5 BIT(11)
-
-/* p5 RGMII wrapper TX clock control register */
-#define MT7530_P5RGMIITXCR 0x7b04
-/* p5 RGMII wrapper RX clock control register */
-#define MT7530_P5RGMIIRXCR 0x7b00
-/* TRGMII TDX ODT registers */
-#define MT7530_TRGMII_TD0_ODT 0x7a54
-#define MT7530_TRGMII_TD1_ODT 0x7a5c
-#define MT7530_TRGMII_TD2_ODT 0x7a64
-#define MT7530_TRGMII_TD3_ODT 0x7a6c
-#define MT7530_TRGMII_TD4_ODT 0x7a74
-#define MT7530_TRGMII_TD5_ODT 0x7a7c
-/* TRGMII TCK ctrl register */
-#define MT7530_TRGMII_TCK_CTRL 0x7a78
-/* TRGMII Tx ctrl register */
-#define MT7530_TRGMII_TXCTRL 0x7a40
-/* port 6 extended control register */
-#define MT7530_P6ECR 0x7830
-/* IO driver control register */
-#define MT7530_IO_DRV_CR 0x7810
-/* top signal control register */
-#define MT7530_TOP_SIG_CTRL 0x7808
-/* modified hwtrap register */
-#define MT7530_MHWTRAP 0x7804
-/* hwtrap status register */
-#define MT7530_HWTRAP 0x7800
-/* status interrupt register */
-#define MT7530_SYS_INT_STS 0x700c
-/* system nterrupt register */
-#define MT7530_SYS_INT_EN 0x7008
-/* system control register */
-#define MT7530_SYS_CTRL 0x7000
-/* port MAC status register */
-#define MT7530_PMSR_P(x) (0x3008 + (x * 0x100))
-/* port MAC control register */
-#define MT7530_PMCR_P(x) (0x3000 + (x * 0x100))
-
-#define MT7621_XTAL_SHIFT 6
-#define MT7621_XTAL_MASK 0x7
-#define MT7621_XTAL_25 6
-#define MT7621_XTAL_40 3
-#define MT7621_MDIO_DRV_MASK (3 << 4)
-#define MT7621_GE1_MODE_MASK (3 << 12)
-
-#define TRGMII_TXCTRL_TXC_INV BIT(30)
-#define P6ECR_INTF_MODE_RGMII BIT(1)
-#define P5RGMIIRXCR_C_ALIGN BIT(8)
-#define P5RGMIIRXCR_DELAY_2 BIT(1)
-#define P5RGMIITXCR_DELAY_2 (BIT(8) | BIT(2))
-
-/* TOP_SIG_CTRL bits */
-#define TOP_SIG_CTRL_NORMAL (BIT(17) | BIT(16))
-
-/* MHWTRAP bits */
-#define MHWTRAP_MANUAL BIT(16)
-#define MHWTRAP_P5_MAC_SEL BIT(13)
-#define MHWTRAP_P6_DIS BIT(8)
-#define MHWTRAP_P5_RGMII_MODE BIT(7)
-#define MHWTRAP_P5_DIS BIT(6)
-#define MHWTRAP_PHY_ACCESS BIT(5)
-
-/* HWTRAP bits */
-#define HWTRAP_XTAL_SHIFT 9
-#define HWTRAP_XTAL_MASK 0x3
-
-/* SYS_CTRL bits */
-#define SYS_CTRL_SW_RST BIT(1)
-#define SYS_CTRL_REG_RST BIT(0)
-
-/* PMCR bits */
-#define PMCR_IFG_XMIT_96 BIT(18)
-#define PMCR_MAC_MODE BIT(16)
-#define PMCR_FORCE_MODE BIT(15)
-#define PMCR_TX_EN BIT(14)
-#define PMCR_RX_EN BIT(13)
-#define PMCR_BACK_PRES_EN BIT(9)
-#define PMCR_BACKOFF_EN BIT(8)
-#define PMCR_TX_FC_EN BIT(5)
-#define PMCR_RX_FC_EN BIT(4)
-#define PMCR_FORCE_SPEED_1000 BIT(3)
-#define PMCR_FORCE_FDX BIT(1)
-#define PMCR_FORCE_LNK BIT(0)
-#define PMCR_FIXED_LINK (PMCR_IFG_XMIT_96 | PMCR_MAC_MODE | \
- PMCR_FORCE_MODE | PMCR_TX_EN | PMCR_RX_EN | \
- PMCR_BACK_PRES_EN | PMCR_BACKOFF_EN | \
- PMCR_FORCE_SPEED_1000 | PMCR_FORCE_FDX | \
- PMCR_FORCE_LNK)
-
-#define PMCR_FIXED_LINK_FC (PMCR_FIXED_LINK | \
- PMCR_TX_FC_EN | PMCR_RX_FC_EN)
-
-/* TRGMII control registers */
-#define GSW_INTF_MODE 0x390
-#define GSW_TRGMII_TD0_ODT 0x354
-#define GSW_TRGMII_TD1_ODT 0x35c
-#define GSW_TRGMII_TD2_ODT 0x364
-#define GSW_TRGMII_TD3_ODT 0x36c
-#define GSW_TRGMII_TXCTL_ODT 0x374
-#define GSW_TRGMII_TCK_ODT 0x37c
-#define GSW_TRGMII_RCK_CTRL 0x300
-
-#define INTF_MODE_TRGMII BIT(1)
-#define TRGMII_RCK_CTRL_RX_RST BIT(31)
-
-/* Mac control registers */
-#define MTK_MAC_P2_MCR 0x200
-#define MTK_MAC_P1_MCR 0x100
-
-#define MAC_MCR_MAX_RX_2K BIT(29)
-#define MAC_MCR_IPG_CFG (BIT(18) | BIT(16))
-#define MAC_MCR_FORCE_MODE BIT(15)
-#define MAC_MCR_TX_EN BIT(14)
-#define MAC_MCR_RX_EN BIT(13)
-#define MAC_MCR_BACKOFF_EN BIT(9)
-#define MAC_MCR_BACKPR_EN BIT(8)
-#define MAC_MCR_FORCE_RX_FC BIT(5)
-#define MAC_MCR_FORCE_TX_FC BIT(4)
-#define MAC_MCR_SPEED_1000 BIT(3)
-#define MAC_MCR_FORCE_DPX BIT(1)
-#define MAC_MCR_FORCE_LINK BIT(0)
-#define MAC_MCR_FIXED_LINK (MAC_MCR_MAX_RX_2K | MAC_MCR_IPG_CFG | \
- MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | \
- MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | \
- MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_RX_FC | \
- MAC_MCR_FORCE_TX_FC | MAC_MCR_SPEED_1000 | \
- MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_LINK)
-#define MAC_MCR_FIXED_LINK_FC (MAC_MCR_MAX_RX_2K | MAC_MCR_IPG_CFG | \
- MAC_MCR_FIXED_LINK)
-
-/* possible XTAL speed */
-#define MT7623_XTAL_40 0
-#define MT7623_XTAL_20 1
-#define MT7623_XTAL_25 3
-
-/* GPIO port control registers */
-#define GPIO_OD33_CTRL8 0x4c0
-#define GPIO_BIAS_CTRL 0xed0
-#define GPIO_DRV_SEL10 0xf00
-
-/* on MT7620 the functio of port 4 can be software configured */
-enum {
- PORT4_EPHY = 0,
- PORT4_EXT,
-};
-
-/* struct mt7620_gsw - the structure that holds the SoC specific data
- * @dev: The Device struct
- * @base: The base address
- * @piac_offset: The PIAC base may change depending on SoC
- * @irq: The IRQ we are using
- * @port4: The port4 mode on MT7620
- * @autopoll: Is MDIO autopolling enabled
- * @ethsys: The ethsys register map
- * @pctl: The pin control register map
- * @clk_gsw: The switch clock
- * @clk_gp1: The gmac1 clock
- * @clk_gp2: The gmac2 clock
- * @clk_trgpll: The trgmii pll clock
- */
-struct mt7620_gsw {
- struct device *dev;
- void __iomem *base;
- u32 piac_offset;
- int irq;
- int port4;
- unsigned long int autopoll;
-
- struct regmap *ethsys;
- struct regmap *pctl;
-
- struct clk *clk_gsw;
- struct clk *clk_gp1;
- struct clk *clk_gp2;
- struct clk *clk_trgpll;
-};
-
-/* switch register I/O wrappers */
-void mtk_switch_w32(struct mt7620_gsw *gsw, u32 val, unsigned int reg);
-u32 mtk_switch_r32(struct mt7620_gsw *gsw, unsigned int reg);
-
-/* the callback used by the driver core to bringup the switch */
-int mtk_gsw_init(struct mtk_eth *eth);
-
-/* MDIO access wrappers */
-int mt7620_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val);
-int mt7620_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg);
-void mt7620_mdio_link_adjust(struct mtk_eth *eth, int port);
-int mt7620_has_carrier(struct mtk_eth *eth);
-void mt7620_print_link_state(struct mtk_eth *eth, int port, int link,
- int speed, int duplex);
-void mt7530_mdio_w32(struct mt7620_gsw *gsw, u32 reg, u32 val);
-u32 mt7530_mdio_r32(struct mt7620_gsw *gsw, u32 reg);
-void mt7530_mdio_m32(struct mt7620_gsw *gsw, u32 mask, u32 set, u32 reg);
-
-u32 _mt7620_mii_write(struct mt7620_gsw *gsw, u32 phy_addr,
- u32 phy_register, u32 write_data);
-u32 _mt7620_mii_read(struct mt7620_gsw *gsw, int phy_addr, int phy_reg);
-void mt7620_handle_carrier(struct mtk_eth *eth);
-
-#endif
diff --git a/drivers/staging/mt7621-eth/gsw_mt7621.c b/drivers/staging/mt7621-eth/gsw_mt7621.c
deleted file mode 100644
index 53767b17bad9..000000000000
--- a/drivers/staging/mt7621-eth/gsw_mt7621.c
+++ /dev/null
@@ -1,297 +0,0 @@
-/* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/platform_device.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
-
-#include <ralink_regs.h>
-
-#include "mtk_eth_soc.h"
-#include "gsw_mt7620.h"
-
-void mtk_switch_w32(struct mt7620_gsw *gsw, u32 val, unsigned int reg)
-{
- iowrite32(val, gsw->base + reg);
-}
-EXPORT_SYMBOL_GPL(mtk_switch_w32);
-
-u32 mtk_switch_r32(struct mt7620_gsw *gsw, unsigned int reg)
-{
- return ioread32(gsw->base + reg);
-}
-EXPORT_SYMBOL_GPL(mtk_switch_r32);
-
-static irqreturn_t gsw_interrupt_mt7621(int irq, void *_eth)
-{
- struct mtk_eth *eth = (struct mtk_eth *)_eth;
- struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
- u32 reg, i;
-
- reg = mt7530_mdio_r32(gsw, MT7530_SYS_INT_STS);
-
- for (i = 0; i < 5; i++) {
- unsigned int link;
-
- if ((reg & BIT(i)) == 0)
- continue;
-
- link = mt7530_mdio_r32(gsw, MT7530_PMSR_P(i)) & 0x1;
-
- if (link == eth->link[i])
- continue;
-
- eth->link[i] = link;
- if (link)
- netdev_info(*eth->netdev,
- "port %d link up\n", i);
- else
- netdev_info(*eth->netdev,
- "port %d link down\n", i);
- }
-
- mt7530_mdio_w32(gsw, MT7530_SYS_INT_STS, 0x1f);
-
- return IRQ_HANDLED;
-}
-
-static void mt7621_hw_init(struct mtk_eth *eth, struct mt7620_gsw *gsw,
- struct device_node *np)
-{
- u32 i;
- u32 val;
-
- /* hardware reset the switch */
- mtk_reset(eth, RST_CTRL_MCM);
- mdelay(10);
-
- /* reduce RGMII2 PAD driving strength */
- rt_sysc_m32(MT7621_MDIO_DRV_MASK, 0, SYSC_PAD_RGMII2_MDIO);
-
- /* gpio mux - RGMII1=Normal mode */
- rt_sysc_m32(BIT(14), 0, SYSC_GPIO_MODE);
-
- /* set GMAC1 RGMII mode */
- rt_sysc_m32(MT7621_GE1_MODE_MASK, 0, SYSC_REG_CFG1);
-
- /* enable MDIO to control MT7530 */
- rt_sysc_m32(3 << 12, 0, SYSC_GPIO_MODE);
-
- /* turn off all PHYs */
- for (i = 0; i <= 4; i++) {
- val = _mt7620_mii_read(gsw, i, 0x0);
- val |= BIT(11);
- _mt7620_mii_write(gsw, i, 0x0, val);
- }
-
- /* reset the switch */
- mt7530_mdio_w32(gsw, MT7530_SYS_CTRL,
- SYS_CTRL_SW_RST | SYS_CTRL_REG_RST);
- usleep_range(10, 20);
-
- if ((rt_sysc_r32(SYSC_REG_CHIP_REV_ID) & 0xFFFF) == 0x0101) {
- /* GE1, Force 1000M/FD, FC ON, MAX_RX_LENGTH 1536 */
- mtk_switch_w32(gsw, MAC_MCR_FIXED_LINK, MTK_MAC_P2_MCR);
- mt7530_mdio_w32(gsw, MT7530_PMCR_P(6), PMCR_FIXED_LINK);
- } else {
- /* GE1, Force 1000M/FD, FC ON, MAX_RX_LENGTH 1536 */
- mtk_switch_w32(gsw, MAC_MCR_FIXED_LINK_FC, MTK_MAC_P1_MCR);
- mt7530_mdio_w32(gsw, MT7530_PMCR_P(6), PMCR_FIXED_LINK_FC);
- }
-
- /* GE2, Link down */
- mtk_switch_w32(gsw, MAC_MCR_FORCE_MODE, MTK_MAC_P2_MCR);
-
- /* Enable Port 6, P5 as GMAC5, P5 disable */
- val = mt7530_mdio_r32(gsw, MT7530_MHWTRAP);
- /* Enable Port 6 */
- val &= ~MHWTRAP_P6_DIS;
- /* Disable Port 5 */
- val |= MHWTRAP_P5_DIS;
- /* manual override of HW-Trap */
- val |= MHWTRAP_MANUAL;
- mt7530_mdio_w32(gsw, MT7530_MHWTRAP, val);
-
- val = rt_sysc_r32(SYSC_REG_CFG);
- val = (val >> MT7621_XTAL_SHIFT) & MT7621_XTAL_MASK;
- if (val < MT7621_XTAL_25 && val >= MT7621_XTAL_40) {
- /* 40Mhz */
-
- /* disable MT7530 core clock */
- _mt7620_mii_write(gsw, 0, 13, 0x1f);
- _mt7620_mii_write(gsw, 0, 14, 0x410);
- _mt7620_mii_write(gsw, 0, 13, 0x401f);
- _mt7620_mii_write(gsw, 0, 14, 0x0);
-
- /* disable MT7530 PLL */
- _mt7620_mii_write(gsw, 0, 13, 0x1f);
- _mt7620_mii_write(gsw, 0, 14, 0x40d);
- _mt7620_mii_write(gsw, 0, 13, 0x401f);
- _mt7620_mii_write(gsw, 0, 14, 0x2020);
-
- /* for MT7530 core clock = 500Mhz */
- _mt7620_mii_write(gsw, 0, 13, 0x1f);
- _mt7620_mii_write(gsw, 0, 14, 0x40e);
- _mt7620_mii_write(gsw, 0, 13, 0x401f);
- _mt7620_mii_write(gsw, 0, 14, 0x119);
-
- /* enable MT7530 PLL */
- _mt7620_mii_write(gsw, 0, 13, 0x1f);
- _mt7620_mii_write(gsw, 0, 14, 0x40d);
- _mt7620_mii_write(gsw, 0, 13, 0x401f);
- _mt7620_mii_write(gsw, 0, 14, 0x2820);
-
- usleep_range(20, 40);
-
- /* enable MT7530 core clock */
- _mt7620_mii_write(gsw, 0, 13, 0x1f);
- _mt7620_mii_write(gsw, 0, 14, 0x410);
- _mt7620_mii_write(gsw, 0, 13, 0x401f);
- }
-
- /* RGMII */
- _mt7620_mii_write(gsw, 0, 14, 0x1);
-
- /* set MT7530 central align */
- mt7530_mdio_m32(gsw, BIT(0), P6ECR_INTF_MODE_RGMII, MT7530_P6ECR);
- mt7530_mdio_m32(gsw, TRGMII_TXCTRL_TXC_INV, 0,
- MT7530_TRGMII_TXCTRL);
- mt7530_mdio_w32(gsw, MT7530_TRGMII_TCK_CTRL, 0x855);
-
- /* delay setting for 10/1000M */
- mt7530_mdio_w32(gsw, MT7530_P5RGMIIRXCR,
- P5RGMIIRXCR_C_ALIGN | P5RGMIIRXCR_DELAY_2);
- mt7530_mdio_w32(gsw, MT7530_P5RGMIITXCR, 0x14);
-
- /* lower Tx Driving*/
- mt7530_mdio_w32(gsw, MT7530_TRGMII_TD0_ODT, 0x44);
- mt7530_mdio_w32(gsw, MT7530_TRGMII_TD1_ODT, 0x44);
- mt7530_mdio_w32(gsw, MT7530_TRGMII_TD2_ODT, 0x44);
- mt7530_mdio_w32(gsw, MT7530_TRGMII_TD3_ODT, 0x44);
- mt7530_mdio_w32(gsw, MT7530_TRGMII_TD4_ODT, 0x44);
- mt7530_mdio_w32(gsw, MT7530_TRGMII_TD5_ODT, 0x44);
-
- /* turn on all PHYs */
- for (i = 0; i <= 4; i++) {
- val = _mt7620_mii_read(gsw, i, 0);
- val &= ~BIT(11);
- _mt7620_mii_write(gsw, i, 0, val);
- }
-
-#define MT7530_NUM_PORTS 8
-#define REG_ESW_PORT_PCR(x) (0x2004 | ((x) << 8))
-#define REG_ESW_PORT_PVC(x) (0x2010 | ((x) << 8))
-#define REG_ESW_PORT_PPBV1(x) (0x2014 | ((x) << 8))
-#define MT7530_CPU_PORT 6
-
- /* This is copied from mt7530_apply_config in libreCMC driver */
- {
- int i;
-
- for (i = 0; i < MT7530_NUM_PORTS; i++)
- mt7530_mdio_w32(gsw, REG_ESW_PORT_PCR(i), 0x00400000);
-
- mt7530_mdio_w32(gsw, REG_ESW_PORT_PCR(MT7530_CPU_PORT),
- 0x00ff0000);
-
- for (i = 0; i < MT7530_NUM_PORTS; i++)
- mt7530_mdio_w32(gsw, REG_ESW_PORT_PVC(i), 0x810000c0);
- }
-
- /* enable irq */
- mt7530_mdio_m32(gsw, 0, 3 << 16, MT7530_TOP_SIG_CTRL);
- mt7530_mdio_w32(gsw, MT7530_SYS_INT_EN, 0x1f);
-}
-
-static const struct of_device_id mediatek_gsw_match[] = {
- { .compatible = "mediatek,mt7621-gsw" },
- {},
-};
-MODULE_DEVICE_TABLE(of, mediatek_gsw_match);
-
-int mtk_gsw_init(struct mtk_eth *eth)
-{
- struct device_node *np = eth->switch_np;
- struct platform_device *pdev = of_find_device_by_node(np);
- struct mt7620_gsw *gsw;
-
- if (!pdev)
- return -ENODEV;
-
- if (!of_device_is_compatible(np, mediatek_gsw_match->compatible))
- return -EINVAL;
-
- gsw = platform_get_drvdata(pdev);
- eth->sw_priv = gsw;
-
- if (!gsw->irq)
- return -EINVAL;
-
- request_irq(gsw->irq, gsw_interrupt_mt7621, 0,
- "gsw", eth);
- disable_irq(gsw->irq);
-
- mt7621_hw_init(eth, gsw, np);
-
- enable_irq(gsw->irq);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(mtk_gsw_init);
-
-static int mt7621_gsw_probe(struct platform_device *pdev)
-{
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- struct mt7620_gsw *gsw;
-
- gsw = devm_kzalloc(&pdev->dev, sizeof(struct mt7620_gsw), GFP_KERNEL);
- if (!gsw)
- return -ENOMEM;
-
- gsw->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(gsw->base))
- return PTR_ERR(gsw->base);
-
- gsw->dev = &pdev->dev;
- gsw->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
-
- platform_set_drvdata(pdev, gsw);
-
- return 0;
-}
-
-static int mt7621_gsw_remove(struct platform_device *pdev)
-{
- platform_set_drvdata(pdev, NULL);
-
- return 0;
-}
-
-static struct platform_driver gsw_driver = {
- .probe = mt7621_gsw_probe,
- .remove = mt7621_gsw_remove,
- .driver = {
- .name = "mt7621-gsw",
- .of_match_table = mediatek_gsw_match,
- },
-};
-
-module_platform_driver(gsw_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
-MODULE_DESCRIPTION("GBit switch driver for Mediatek MT7621 SoC");
diff --git a/drivers/staging/mt7621-eth/mdio.c b/drivers/staging/mt7621-eth/mdio.c
deleted file mode 100644
index 5fea6a447eed..000000000000
--- a/drivers/staging/mt7621-eth/mdio.c
+++ /dev/null
@@ -1,275 +0,0 @@
-/* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License
- *
- * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/phy.h>
-#include <linux/of_net.h>
-#include <linux/of_mdio.h>
-
-#include "mtk_eth_soc.h"
-#include "mdio.h"
-
-static int mtk_mdio_reset(struct mii_bus *bus)
-{
- /* TODO */
- return 0;
-}
-
-static void mtk_phy_link_adjust(struct net_device *dev)
-{
- struct mtk_eth *eth = netdev_priv(dev);
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&eth->phy->lock, flags);
- for (i = 0; i < 8; i++) {
- if (eth->phy->phy_node[i]) {
- struct phy_device *phydev = eth->phy->phy[i];
- int status_change = 0;
-
- if (phydev->link)
- if (eth->phy->duplex[i] != phydev->duplex ||
- eth->phy->speed[i] != phydev->speed)
- status_change = 1;
-
- if (phydev->link != eth->link[i])
- status_change = 1;
-
- switch (phydev->speed) {
- case SPEED_1000:
- case SPEED_100:
- case SPEED_10:
- eth->link[i] = phydev->link;
- eth->phy->duplex[i] = phydev->duplex;
- eth->phy->speed[i] = phydev->speed;
-
- if (status_change &&
- eth->soc->mdio_adjust_link)
- eth->soc->mdio_adjust_link(eth, i);
- break;
- }
- }
- }
- spin_unlock_irqrestore(&eth->phy->lock, flags);
-}
-
-int mtk_connect_phy_node(struct mtk_eth *eth, struct mtk_mac *mac,
- struct device_node *phy_node)
-{
- const __be32 *_port = NULL;
- struct phy_device *phydev;
- int phy_mode, port;
-
- _port = of_get_property(phy_node, "reg", NULL);
-
- if (!_port || (be32_to_cpu(*_port) >= 0x20)) {
- pr_err("%pOFn: invalid port id\n", phy_node);
- return -EINVAL;
- }
- port = be32_to_cpu(*_port);
- phy_mode = of_get_phy_mode(phy_node);
- if (phy_mode < 0) {
- dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
- eth->phy->phy_node[port] = NULL;
- return -EINVAL;
- }
-
- phydev = of_phy_connect(eth->netdev[mac->id], phy_node,
- mtk_phy_link_adjust, 0, phy_mode);
- if (!phydev) {
- dev_err(eth->dev, "could not connect to PHY\n");
- eth->phy->phy_node[port] = NULL;
- return -ENODEV;
- }
-
- phydev->supported &= PHY_1000BT_FEATURES;
- phydev->advertising = phydev->supported;
-
- dev_info(eth->dev,
- "connected port %d to PHY at %s [uid=%08x, driver=%s]\n",
- port, phydev_name(phydev), phydev->phy_id,
- phydev->drv->name);
-
- eth->phy->phy[port] = phydev;
- eth->link[port] = 0;
-
- return 0;
-}
-
-static void phy_init(struct mtk_eth *eth, struct mtk_mac *mac,
- struct phy_device *phy)
-{
- phy_attach(eth->netdev[mac->id], phydev_name(phy),
- PHY_INTERFACE_MODE_MII);
-
- phy->autoneg = AUTONEG_ENABLE;
- phy->speed = 0;
- phy->duplex = 0;
- phy_set_max_speed(phy, SPEED_100);
- phy->advertising = phy->supported | ADVERTISED_Autoneg;
-
- phy_start_aneg(phy);
-}
-
-static int mtk_phy_connect(struct mtk_mac *mac)
-{
- struct mtk_eth *eth = mac->hw;
- int i;
-
- for (i = 0; i < 8; i++) {
- if (eth->phy->phy_node[i]) {
- if (!mac->phy_dev) {
- mac->phy_dev = eth->phy->phy[i];
- mac->phy_flags = MTK_PHY_FLAG_PORT;
- }
- } else if (eth->mii_bus) {
- struct phy_device *phy;
-
- phy = mdiobus_get_phy(eth->mii_bus, i);
- if (phy) {
- phy_init(eth, mac, phy);
- if (!mac->phy_dev) {
- mac->phy_dev = phy;
- mac->phy_flags = MTK_PHY_FLAG_ATTACH;
- }
- }
- }
- }
-
- return 0;
-}
-
-static void mtk_phy_disconnect(struct mtk_mac *mac)
-{
- struct mtk_eth *eth = mac->hw;
- unsigned long flags;
- int i;
-
- for (i = 0; i < 8; i++)
- if (eth->phy->phy_fixed[i]) {
- spin_lock_irqsave(&eth->phy->lock, flags);
- eth->link[i] = 0;
- if (eth->soc->mdio_adjust_link)
- eth->soc->mdio_adjust_link(eth, i);
- spin_unlock_irqrestore(&eth->phy->lock, flags);
- } else if (eth->phy->phy[i]) {
- phy_disconnect(eth->phy->phy[i]);
- } else if (eth->mii_bus) {
- struct phy_device *phy =
- mdiobus_get_phy(eth->mii_bus, i);
-
- if (phy)
- phy_detach(phy);
- }
-}
-
-static void mtk_phy_start(struct mtk_mac *mac)
-{
- struct mtk_eth *eth = mac->hw;
- unsigned long flags;
- int i;
-
- for (i = 0; i < 8; i++) {
- if (eth->phy->phy_fixed[i]) {
- spin_lock_irqsave(&eth->phy->lock, flags);
- eth->link[i] = 1;
- if (eth->soc->mdio_adjust_link)
- eth->soc->mdio_adjust_link(eth, i);
- spin_unlock_irqrestore(&eth->phy->lock, flags);
- } else if (eth->phy->phy[i]) {
- phy_start(eth->phy->phy[i]);
- }
- }
-}
-
-static void mtk_phy_stop(struct mtk_mac *mac)
-{
- struct mtk_eth *eth = mac->hw;
- unsigned long flags;
- int i;
-
- for (i = 0; i < 8; i++)
- if (eth->phy->phy_fixed[i]) {
- spin_lock_irqsave(&eth->phy->lock, flags);
- eth->link[i] = 0;
- if (eth->soc->mdio_adjust_link)
- eth->soc->mdio_adjust_link(eth, i);
- spin_unlock_irqrestore(&eth->phy->lock, flags);
- } else if (eth->phy->phy[i]) {
- phy_stop(eth->phy->phy[i]);
- }
-}
-
-static struct mtk_phy phy_ralink = {
- .connect = mtk_phy_connect,
- .disconnect = mtk_phy_disconnect,
- .start = mtk_phy_start,
- .stop = mtk_phy_stop,
-};
-
-int mtk_mdio_init(struct mtk_eth *eth)
-{
- struct device_node *mii_np;
- int err;
-
- if (!eth->soc->mdio_read || !eth->soc->mdio_write)
- return 0;
-
- spin_lock_init(&phy_ralink.lock);
- eth->phy = &phy_ralink;
-
- mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
- if (!mii_np) {
- dev_err(eth->dev, "no %s child node found", "mdio-bus");
- return -ENODEV;
- }
-
- if (!of_device_is_available(mii_np)) {
- err = 0;
- goto err_put_node;
- }
-
- eth->mii_bus = mdiobus_alloc();
- if (!eth->mii_bus) {
- err = -ENOMEM;
- goto err_put_node;
- }
-
- eth->mii_bus->name = "mdio";
- eth->mii_bus->read = eth->soc->mdio_read;
- eth->mii_bus->write = eth->soc->mdio_write;
- eth->mii_bus->reset = mtk_mdio_reset;
- eth->mii_bus->priv = eth;
- eth->mii_bus->parent = eth->dev;
-
- snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
- err = of_mdiobus_register(eth->mii_bus, mii_np);
- if (err)
- goto err_free_bus;
-
- return 0;
-
-err_free_bus:
- kfree(eth->mii_bus);
-err_put_node:
- of_node_put(mii_np);
- eth->mii_bus = NULL;
- return err;
-}
-
-void mtk_mdio_cleanup(struct mtk_eth *eth)
-{
- if (!eth->mii_bus)
- return;
-
- mdiobus_unregister(eth->mii_bus);
- of_node_put(eth->mii_bus->dev.of_node);
- kfree(eth->mii_bus);
-}
diff --git a/drivers/staging/mt7621-eth/mdio.h b/drivers/staging/mt7621-eth/mdio.h
deleted file mode 100644
index b14e23842a01..000000000000
--- a/drivers/staging/mt7621-eth/mdio.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#ifndef _RALINK_MDIO_H__
-#define _RALINK_MDIO_H__
-
-#ifdef CONFIG_NET_MEDIATEK_MDIO
-int mtk_mdio_init(struct mtk_eth *eth);
-void mtk_mdio_cleanup(struct mtk_eth *eth);
-int mtk_connect_phy_node(struct mtk_eth *eth, struct mtk_mac *mac,
- struct device_node *phy_node);
-#else
-static inline int mtk_mdio_init(struct mtk_eth *eth) { return 0; }
-static inline void mtk_mdio_cleanup(struct mtk_eth *eth) {}
-#endif
-#endif
diff --git a/drivers/staging/mt7621-eth/mdio_mt7620.c b/drivers/staging/mt7621-eth/mdio_mt7620.c
deleted file mode 100644
index ced605c2914e..000000000000
--- a/drivers/staging/mt7621-eth/mdio_mt7620.c
+++ /dev/null
@@ -1,173 +0,0 @@
-/* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-#include "mtk_eth_soc.h"
-#include "gsw_mt7620.h"
-#include "mdio.h"
-
-static int mt7620_mii_busy_wait(struct mt7620_gsw *gsw)
-{
- unsigned long t_start = jiffies;
-
- while (1) {
- if (!(mtk_switch_r32(gsw,
- gsw->piac_offset + MT7620_GSW_REG_PIAC) &
- GSW_MDIO_ACCESS))
- return 0;
- if (time_after(jiffies, t_start + GSW_REG_PHY_TIMEOUT))
- break;
- }
-
- dev_err(gsw->dev, "mdio: MDIO timeout\n");
- return -1;
-}
-
-u32 _mt7620_mii_write(struct mt7620_gsw *gsw, u32 phy_addr,
- u32 phy_register, u32 write_data)
-{
- if (mt7620_mii_busy_wait(gsw))
- return -1;
-
- write_data &= 0xffff;
-
- mtk_switch_w32(gsw, GSW_MDIO_ACCESS | GSW_MDIO_START | GSW_MDIO_WRITE |
- (phy_register << GSW_MDIO_REG_SHIFT) |
- (phy_addr << GSW_MDIO_ADDR_SHIFT) | write_data,
- MT7620_GSW_REG_PIAC);
-
- if (mt7620_mii_busy_wait(gsw))
- return -1;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(_mt7620_mii_write);
-
-u32 _mt7620_mii_read(struct mt7620_gsw *gsw, int phy_addr, int phy_reg)
-{
- u32 d;
-
- if (mt7620_mii_busy_wait(gsw))
- return 0xffff;
-
- mtk_switch_w32(gsw, GSW_MDIO_ACCESS | GSW_MDIO_START | GSW_MDIO_READ |
- (phy_reg << GSW_MDIO_REG_SHIFT) |
- (phy_addr << GSW_MDIO_ADDR_SHIFT),
- MT7620_GSW_REG_PIAC);
-
- if (mt7620_mii_busy_wait(gsw))
- return 0xffff;
-
- d = mtk_switch_r32(gsw, MT7620_GSW_REG_PIAC) & 0xffff;
-
- return d;
-}
-EXPORT_SYMBOL_GPL(_mt7620_mii_read);
-
-int mt7620_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val)
-{
- struct mtk_eth *eth = bus->priv;
- struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
-
- return _mt7620_mii_write(gsw, phy_addr, phy_reg, val);
-}
-
-int mt7620_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
-{
- struct mtk_eth *eth = bus->priv;
- struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
-
- return _mt7620_mii_read(gsw, phy_addr, phy_reg);
-}
-
-void mt7530_mdio_w32(struct mt7620_gsw *gsw, u32 reg, u32 val)
-{
- _mt7620_mii_write(gsw, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
- _mt7620_mii_write(gsw, 0x1f, (reg >> 2) & 0xf, val & 0xffff);
- _mt7620_mii_write(gsw, 0x1f, 0x10, val >> 16);
-}
-EXPORT_SYMBOL_GPL(mt7530_mdio_w32);
-
-u32 mt7530_mdio_r32(struct mt7620_gsw *gsw, u32 reg)
-{
- u16 high, low;
-
- _mt7620_mii_write(gsw, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
- low = _mt7620_mii_read(gsw, 0x1f, (reg >> 2) & 0xf);
- high = _mt7620_mii_read(gsw, 0x1f, 0x10);
-
- return (high << 16) | (low & 0xffff);
-}
-EXPORT_SYMBOL_GPL(mt7530_mdio_r32);
-
-void mt7530_mdio_m32(struct mt7620_gsw *gsw, u32 mask, u32 set, u32 reg)
-{
- u32 val = mt7530_mdio_r32(gsw, reg);
-
- val &= ~mask;
- val |= set;
- mt7530_mdio_w32(gsw, reg, val);
-}
-EXPORT_SYMBOL_GPL(mt7530_mdio_m32);
-
-static unsigned char *mtk_speed_str(int speed)
-{
- switch (speed) {
- case 2:
- case SPEED_1000:
- return "1000";
- case 1:
- case SPEED_100:
- return "100";
- case 0:
- case SPEED_10:
- return "10";
- }
-
- return "? ";
-}
-
-int mt7620_has_carrier(struct mtk_eth *eth)
-{
- struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
- int i;
-
- for (i = 0; i < GSW_PORT6; i++)
- if (mt7530_mdio_r32(gsw, GSW_REG_PORT_STATUS(i)) & 0x1)
- return 1;
- return 0;
-}
-
-void mt7620_print_link_state(struct mtk_eth *eth, int port, int link,
- int speed, int duplex)
-{
- struct mt7620_gsw *gsw = eth->sw_priv;
-
- if (link)
- dev_info(gsw->dev, "port %d link up (%sMbps/%s duplex)\n",
- port, mtk_speed_str(speed),
- (duplex) ? "Full" : "Half");
- else
- dev_info(gsw->dev, "port %d link down\n", port);
-}
-
-void mt7620_mdio_link_adjust(struct mtk_eth *eth, int port)
-{
- mt7620_print_link_state(eth, port, eth->link[port],
- eth->phy->speed[port],
- (eth->phy->duplex[port] == DUPLEX_FULL));
-}
diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.c b/drivers/staging/mt7621-eth/mtk_eth_soc.c
deleted file mode 100644
index 6027b19f7bc2..000000000000
--- a/drivers/staging/mt7621-eth/mtk_eth_soc.c
+++ /dev/null
@@ -1,2176 +0,0 @@
-/* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/dma-mapping.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/platform_device.h>
-#include <linux/of_device.h>
-#include <linux/mfd/syscon.h>
-#include <linux/clk.h>
-#include <linux/of_net.h>
-#include <linux/of_mdio.h>
-#include <linux/if_vlan.h>
-#include <linux/reset.h>
-#include <linux/tcp.h>
-#include <linux/io.h>
-#include <linux/bug.h>
-#include <linux/regmap.h>
-
-#include "mtk_eth_soc.h"
-#include "mdio.h"
-#include "ethtool.h"
-
-#define MAX_RX_LENGTH 1536
-#define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
-#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
-#define DMA_DUMMY_DESC 0xffffffff
-#define MTK_DEFAULT_MSG_ENABLE \
- (NETIF_MSG_DRV | \
- NETIF_MSG_PROBE | \
- NETIF_MSG_LINK | \
- NETIF_MSG_TIMER | \
- NETIF_MSG_IFDOWN | \
- NETIF_MSG_IFUP | \
- NETIF_MSG_RX_ERR | \
- NETIF_MSG_TX_ERR)
-
-#define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE)
-#define NEXT_TX_DESP_IDX(X) (((X) + 1) & (ring->tx_ring_size - 1))
-#define NEXT_RX_DESP_IDX(X) (((X) + 1) & (ring->rx_ring_size - 1))
-
-#define SYSC_REG_RSTCTRL 0x34
-
-static int mtk_msg_level = -1;
-module_param_named(msg_level, mtk_msg_level, int, 0);
-MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
-
-static const u16 mtk_reg_table_default[MTK_REG_COUNT] = {
- [MTK_REG_PDMA_GLO_CFG] = MTK_PDMA_GLO_CFG,
- [MTK_REG_PDMA_RST_CFG] = MTK_PDMA_RST_CFG,
- [MTK_REG_DLY_INT_CFG] = MTK_DLY_INT_CFG,
- [MTK_REG_TX_BASE_PTR0] = MTK_TX_BASE_PTR0,
- [MTK_REG_TX_MAX_CNT0] = MTK_TX_MAX_CNT0,
- [MTK_REG_TX_CTX_IDX0] = MTK_TX_CTX_IDX0,
- [MTK_REG_TX_DTX_IDX0] = MTK_TX_DTX_IDX0,
- [MTK_REG_RX_BASE_PTR0] = MTK_RX_BASE_PTR0,
- [MTK_REG_RX_MAX_CNT0] = MTK_RX_MAX_CNT0,
- [MTK_REG_RX_CALC_IDX0] = MTK_RX_CALC_IDX0,
- [MTK_REG_RX_DRX_IDX0] = MTK_RX_DRX_IDX0,
- [MTK_REG_MTK_INT_ENABLE] = MTK_INT_ENABLE,
- [MTK_REG_MTK_INT_STATUS] = MTK_INT_STATUS,
- [MTK_REG_MTK_DMA_VID_BASE] = MTK_DMA_VID0,
- [MTK_REG_MTK_COUNTER_BASE] = MTK_GDMA1_TX_GBCNT,
- [MTK_REG_MTK_RST_GL] = MTK_RST_GL,
-};
-
-static const u16 *mtk_reg_table = mtk_reg_table_default;
-
-void mtk_w32(struct mtk_eth *eth, u32 val, unsigned int reg)
-{
- __raw_writel(val, eth->base + reg);
-}
-
-u32 mtk_r32(struct mtk_eth *eth, unsigned int reg)
-{
- return __raw_readl(eth->base + reg);
-}
-
-static void mtk_reg_w32(struct mtk_eth *eth, u32 val, enum mtk_reg reg)
-{
- mtk_w32(eth, val, mtk_reg_table[reg]);
-}
-
-static u32 mtk_reg_r32(struct mtk_eth *eth, enum mtk_reg reg)
-{
- return mtk_r32(eth, mtk_reg_table[reg]);
-}
-
-/* these bits are also exposed via the reset-controller API. however the switch
- * and FE need to be brought out of reset in the exakt same moemtn and the
- * reset-controller api does not provide this feature yet. Do the reset manually
- * until we fixed the reset-controller api to be able to do this
- */
-void mtk_reset(struct mtk_eth *eth, u32 reset_bits)
-{
- u32 val;
-
- regmap_read(eth->ethsys, SYSC_REG_RSTCTRL, &val);
- val |= reset_bits;
- regmap_write(eth->ethsys, SYSC_REG_RSTCTRL, val);
- usleep_range(10, 20);
- val &= ~reset_bits;
- regmap_write(eth->ethsys, SYSC_REG_RSTCTRL, val);
- usleep_range(10, 20);
-}
-EXPORT_SYMBOL(mtk_reset);
-
-static inline void mtk_irq_ack(struct mtk_eth *eth, u32 mask)
-{
- if (eth->soc->dma_type & MTK_PDMA)
- mtk_reg_w32(eth, mask, MTK_REG_MTK_INT_STATUS);
- if (eth->soc->dma_type & MTK_QDMA)
- mtk_w32(eth, mask, MTK_QMTK_INT_STATUS);
-}
-
-static inline u32 mtk_irq_pending(struct mtk_eth *eth)
-{
- u32 status = 0;
-
- if (eth->soc->dma_type & MTK_PDMA)
- status |= mtk_reg_r32(eth, MTK_REG_MTK_INT_STATUS);
- if (eth->soc->dma_type & MTK_QDMA)
- status |= mtk_r32(eth, MTK_QMTK_INT_STATUS);
-
- return status;
-}
-
-static void mtk_irq_ack_status(struct mtk_eth *eth, u32 mask)
-{
- u32 status_reg = MTK_REG_MTK_INT_STATUS;
-
- if (mtk_reg_table[MTK_REG_MTK_INT_STATUS2])
- status_reg = MTK_REG_MTK_INT_STATUS2;
-
- mtk_reg_w32(eth, mask, status_reg);
-}
-
-static u32 mtk_irq_pending_status(struct mtk_eth *eth)
-{
- u32 status_reg = MTK_REG_MTK_INT_STATUS;
-
- if (mtk_reg_table[MTK_REG_MTK_INT_STATUS2])
- status_reg = MTK_REG_MTK_INT_STATUS2;
-
- return mtk_reg_r32(eth, status_reg);
-}
-
-static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
-{
- u32 val;
-
- if (eth->soc->dma_type & MTK_PDMA) {
- val = mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
- mtk_reg_w32(eth, val & ~mask, MTK_REG_MTK_INT_ENABLE);
- /* flush write */
- mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
- }
- if (eth->soc->dma_type & MTK_QDMA) {
- val = mtk_r32(eth, MTK_QMTK_INT_ENABLE);
- mtk_w32(eth, val & ~mask, MTK_QMTK_INT_ENABLE);
- /* flush write */
- mtk_r32(eth, MTK_QMTK_INT_ENABLE);
- }
-}
-
-static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask)
-{
- u32 val;
-
- if (eth->soc->dma_type & MTK_PDMA) {
- val = mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
- mtk_reg_w32(eth, val | mask, MTK_REG_MTK_INT_ENABLE);
- /* flush write */
- mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
- }
- if (eth->soc->dma_type & MTK_QDMA) {
- val = mtk_r32(eth, MTK_QMTK_INT_ENABLE);
- mtk_w32(eth, val | mask, MTK_QMTK_INT_ENABLE);
- /* flush write */
- mtk_r32(eth, MTK_QMTK_INT_ENABLE);
- }
-}
-
-static inline u32 mtk_irq_enabled(struct mtk_eth *eth)
-{
- u32 enabled = 0;
-
- if (eth->soc->dma_type & MTK_PDMA)
- enabled |= mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
- if (eth->soc->dma_type & MTK_QDMA)
- enabled |= mtk_r32(eth, MTK_QMTK_INT_ENABLE);
-
- return enabled;
-}
-
-static inline void mtk_hw_set_macaddr(struct mtk_mac *mac,
- unsigned char *macaddr)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&mac->hw->page_lock, flags);
- mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], MTK_GDMA1_MAC_ADRH);
- mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
- (macaddr[4] << 8) | macaddr[5],
- MTK_GDMA1_MAC_ADRL);
- spin_unlock_irqrestore(&mac->hw->page_lock, flags);
-}
-
-static int mtk_set_mac_address(struct net_device *dev, void *p)
-{
- int ret = eth_mac_addr(dev, p);
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
-
- if (ret)
- return ret;
-
- if (eth->soc->set_mac)
- eth->soc->set_mac(mac, dev->dev_addr);
- else
- mtk_hw_set_macaddr(mac, p);
-
- return 0;
-}
-
-static inline int mtk_max_frag_size(int mtu)
-{
- /* make sure buf_size will be at least MAX_RX_LENGTH */
- if (mtu + MTK_RX_ETH_HLEN < MAX_RX_LENGTH)
- mtu = MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
-
- return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-}
-
-static inline int mtk_max_buf_size(int frag_size)
-{
- int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-
- WARN_ON(buf_size < MAX_RX_LENGTH);
-
- return buf_size;
-}
-
-static inline void mtk_get_rxd(struct mtk_rx_dma *rxd,
- struct mtk_rx_dma *dma_rxd)
-{
- rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
- rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
- rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
- rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
-}
-
-static inline void mtk_set_txd_pdma(struct mtk_tx_dma *txd,
- struct mtk_tx_dma *dma_txd)
-{
- WRITE_ONCE(dma_txd->txd1, txd->txd1);
- WRITE_ONCE(dma_txd->txd3, txd->txd3);
- WRITE_ONCE(dma_txd->txd4, txd->txd4);
- /* clean dma done flag last */
- WRITE_ONCE(dma_txd->txd2, txd->txd2);
-}
-
-static void mtk_clean_rx(struct mtk_eth *eth, struct mtk_rx_ring *ring)
-{
- int i;
-
- if (ring->rx_data && ring->rx_dma) {
- for (i = 0; i < ring->rx_ring_size; i++) {
- if (!ring->rx_data[i])
- continue;
- if (!ring->rx_dma[i].rxd1)
- continue;
- dma_unmap_single(eth->dev,
- ring->rx_dma[i].rxd1,
- ring->rx_buf_size,
- DMA_FROM_DEVICE);
- skb_free_frag(ring->rx_data[i]);
- }
- kfree(ring->rx_data);
- ring->rx_data = NULL;
- }
-
- if (ring->rx_dma) {
- dma_free_coherent(eth->dev,
- ring->rx_ring_size * sizeof(*ring->rx_dma),
- ring->rx_dma,
- ring->rx_phys);
- ring->rx_dma = NULL;
- }
-}
-
-static int mtk_dma_rx_alloc(struct mtk_eth *eth, struct mtk_rx_ring *ring)
-{
- int i, pad = 0;
-
- ring->frag_size = mtk_max_frag_size(ETH_DATA_LEN);
- ring->rx_buf_size = mtk_max_buf_size(ring->frag_size);
- ring->rx_ring_size = eth->soc->dma_ring_size;
- ring->rx_data = kcalloc(ring->rx_ring_size, sizeof(*ring->rx_data),
- GFP_KERNEL);
- if (!ring->rx_data)
- goto no_rx_mem;
-
- for (i = 0; i < ring->rx_ring_size; i++) {
- ring->rx_data[i] = netdev_alloc_frag(ring->frag_size);
- if (!ring->rx_data[i])
- goto no_rx_mem;
- }
-
- ring->rx_dma =
- dma_alloc_coherent(eth->dev,
- ring->rx_ring_size * sizeof(*ring->rx_dma),
- &ring->rx_phys, GFP_ATOMIC | __GFP_ZERO);
- if (!ring->rx_dma)
- goto no_rx_mem;
-
- if (!eth->soc->rx_2b_offset)
- pad = NET_IP_ALIGN;
-
- for (i = 0; i < ring->rx_ring_size; i++) {
- dma_addr_t dma_addr = dma_map_single(eth->dev,
- ring->rx_data[i] + NET_SKB_PAD + pad,
- ring->rx_buf_size,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
- goto no_rx_mem;
- ring->rx_dma[i].rxd1 = (unsigned int)dma_addr;
-
- if (eth->soc->rx_sg_dma)
- ring->rx_dma[i].rxd2 = RX_DMA_PLEN0(ring->rx_buf_size);
- else
- ring->rx_dma[i].rxd2 = RX_DMA_LSO;
- }
- ring->rx_calc_idx = ring->rx_ring_size - 1;
- /* make sure that all changes to the dma ring are flushed before we
- * continue
- */
- wmb();
-
- return 0;
-
-no_rx_mem:
- return -ENOMEM;
-}
-
-static void mtk_txd_unmap(struct device *dev, struct mtk_tx_buf *tx_buf)
-{
- if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
- dma_unmap_single(dev,
- dma_unmap_addr(tx_buf, dma_addr0),
- dma_unmap_len(tx_buf, dma_len0),
- DMA_TO_DEVICE);
- } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
- dma_unmap_page(dev,
- dma_unmap_addr(tx_buf, dma_addr0),
- dma_unmap_len(tx_buf, dma_len0),
- DMA_TO_DEVICE);
- }
- if (tx_buf->flags & MTK_TX_FLAGS_PAGE1)
- dma_unmap_page(dev,
- dma_unmap_addr(tx_buf, dma_addr1),
- dma_unmap_len(tx_buf, dma_len1),
- DMA_TO_DEVICE);
-
- tx_buf->flags = 0;
- if (tx_buf->skb && (tx_buf->skb != (struct sk_buff *)DMA_DUMMY_DESC))
- dev_kfree_skb_any(tx_buf->skb);
- tx_buf->skb = NULL;
-}
-
-static void mtk_pdma_tx_clean(struct mtk_eth *eth)
-{
- struct mtk_tx_ring *ring = &eth->tx_ring;
- int i;
-
- if (ring->tx_buf) {
- for (i = 0; i < ring->tx_ring_size; i++)
- mtk_txd_unmap(eth->dev, &ring->tx_buf[i]);
- kfree(ring->tx_buf);
- ring->tx_buf = NULL;
- }
-
- if (ring->tx_dma) {
- dma_free_coherent(eth->dev,
- ring->tx_ring_size * sizeof(*ring->tx_dma),
- ring->tx_dma,
- ring->tx_phys);
- ring->tx_dma = NULL;
- }
-}
-
-static void mtk_qdma_tx_clean(struct mtk_eth *eth)
-{
- struct mtk_tx_ring *ring = &eth->tx_ring;
- int i;
-
- if (ring->tx_buf) {
- for (i = 0; i < ring->tx_ring_size; i++)
- mtk_txd_unmap(eth->dev, &ring->tx_buf[i]);
- kfree(ring->tx_buf);
- ring->tx_buf = NULL;
- }
-
- if (ring->tx_dma) {
- dma_free_coherent(eth->dev,
- ring->tx_ring_size * sizeof(*ring->tx_dma),
- ring->tx_dma,
- ring->tx_phys);
- ring->tx_dma = NULL;
- }
-}
-
-void mtk_stats_update_mac(struct mtk_mac *mac)
-{
- struct mtk_hw_stats *hw_stats = mac->hw_stats;
- unsigned int base = mtk_reg_table[MTK_REG_MTK_COUNTER_BASE];
- u64 stats;
-
- base += hw_stats->reg_offset;
-
- u64_stats_update_begin(&hw_stats->syncp);
-
- if (mac->hw->soc->new_stats) {
- hw_stats->rx_bytes += mtk_r32(mac->hw, base);
- stats = mtk_r32(mac->hw, base + 0x04);
- if (stats)
- hw_stats->rx_bytes += (stats << 32);
- hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
- hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
- hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
- hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
- hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
- hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
- hw_stats->rx_flow_control_packets +=
- mtk_r32(mac->hw, base + 0x24);
- hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
- hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
- hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
- stats = mtk_r32(mac->hw, base + 0x34);
- if (stats)
- hw_stats->tx_bytes += (stats << 32);
- hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
- } else {
- hw_stats->tx_bytes += mtk_r32(mac->hw, base);
- hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x04);
- hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x08);
- hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x0c);
- hw_stats->rx_bytes += mtk_r32(mac->hw, base + 0x20);
- hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x24);
- hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x28);
- hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x2c);
- hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x30);
- hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x34);
- hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x38);
- hw_stats->rx_flow_control_packets +=
- mtk_r32(mac->hw, base + 0x3c);
- }
-
- u64_stats_update_end(&hw_stats->syncp);
-}
-
-static void mtk_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *storage)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_hw_stats *hw_stats = mac->hw_stats;
- unsigned int base = mtk_reg_table[MTK_REG_MTK_COUNTER_BASE];
- unsigned int start;
-
- if (!base) {
- netdev_stats_to_stats64(storage, &dev->stats);
- return;
- }
-
- if (netif_running(dev) && netif_device_present(dev)) {
- if (spin_trylock(&hw_stats->stats_lock)) {
- mtk_stats_update_mac(mac);
- spin_unlock(&hw_stats->stats_lock);
- }
- }
-
- do {
- start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
- storage->rx_packets = hw_stats->rx_packets;
- storage->tx_packets = hw_stats->tx_packets;
- storage->rx_bytes = hw_stats->rx_bytes;
- storage->tx_bytes = hw_stats->tx_bytes;
- storage->collisions = hw_stats->tx_collisions;
- storage->rx_length_errors = hw_stats->rx_short_errors +
- hw_stats->rx_long_errors;
- storage->rx_over_errors = hw_stats->rx_overflow;
- storage->rx_crc_errors = hw_stats->rx_fcs_errors;
- storage->rx_errors = hw_stats->rx_checksum_errors;
- storage->tx_aborted_errors = hw_stats->tx_skip;
- } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
-
- storage->tx_errors = dev->stats.tx_errors;
- storage->rx_dropped = dev->stats.rx_dropped;
- storage->tx_dropped = dev->stats.tx_dropped;
-}
-
-static int mtk_vlan_rx_add_vid(struct net_device *dev,
- __be16 proto, u16 vid)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
- u32 idx = (vid & 0xf);
- u32 vlan_cfg;
-
- if (!((mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) &&
- (dev->features & NETIF_F_HW_VLAN_CTAG_TX)))
- return 0;
-
- if (test_bit(idx, &eth->vlan_map)) {
- netdev_warn(dev, "disable tx vlan offload\n");
- dev->wanted_features &= ~NETIF_F_HW_VLAN_CTAG_TX;
- netdev_update_features(dev);
- } else {
- vlan_cfg = mtk_r32(eth,
- mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
- ((idx >> 1) << 2));
- if (idx & 0x1) {
- vlan_cfg &= 0xffff;
- vlan_cfg |= (vid << 16);
- } else {
- vlan_cfg &= 0xffff0000;
- vlan_cfg |= vid;
- }
- mtk_w32(eth,
- vlan_cfg, mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
- ((idx >> 1) << 2));
- set_bit(idx, &eth->vlan_map);
- }
-
- return 0;
-}
-
-static int mtk_vlan_rx_kill_vid(struct net_device *dev,
- __be16 proto, u16 vid)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
- u32 idx = (vid & 0xf);
-
- if (!((mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) &&
- (dev->features & NETIF_F_HW_VLAN_CTAG_TX)))
- return 0;
-
- clear_bit(idx, &eth->vlan_map);
-
- return 0;
-}
-
-static inline u32 mtk_pdma_empty_txd(struct mtk_tx_ring *ring)
-{
- barrier();
- return (u32)(ring->tx_ring_size -
- ((ring->tx_next_idx - ring->tx_free_idx) &
- (ring->tx_ring_size - 1)));
-}
-
-static int mtk_skb_padto(struct sk_buff *skb, struct mtk_eth *eth)
-{
- unsigned int len;
- int ret;
-
- if (unlikely(skb->len >= VLAN_ETH_ZLEN))
- return 0;
-
- if (eth->soc->padding_64b && !eth->soc->padding_bug)
- return 0;
-
- if (skb_vlan_tag_present(skb))
- len = ETH_ZLEN;
- else if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
- len = VLAN_ETH_ZLEN;
- else if (!eth->soc->padding_64b)
- len = ETH_ZLEN;
- else
- return 0;
-
- if (skb->len >= len)
- return 0;
-
- ret = skb_pad(skb, len - skb->len);
- if (ret < 0)
- return ret;
- skb->len = len;
- skb_set_tail_pointer(skb, len);
-
- return ret;
-}
-
-static int mtk_pdma_tx_map(struct sk_buff *skb, struct net_device *dev,
- int tx_num, struct mtk_tx_ring *ring, bool gso)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
- struct skb_frag_struct *frag;
- struct mtk_tx_dma txd, *ptxd;
- struct mtk_tx_buf *tx_buf;
- int i, j, k, frag_size, frag_map_size, offset;
- dma_addr_t mapped_addr;
- unsigned int nr_frags;
- u32 def_txd4;
-
- if (mtk_skb_padto(skb, eth)) {
- netif_warn(eth, tx_err, dev, "tx padding failed!\n");
- return -1;
- }
-
- tx_buf = &ring->tx_buf[ring->tx_next_idx];
- memset(tx_buf, 0, sizeof(*tx_buf));
- memset(&txd, 0, sizeof(txd));
- nr_frags = skb_shinfo(skb)->nr_frags;
-
- /* init tx descriptor */
- def_txd4 = eth->soc->txd4;
- txd.txd4 = def_txd4;
-
- if (eth->soc->mac_count > 1)
- txd.txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
-
- if (gso)
- txd.txd4 |= TX_DMA_TSO;
-
- /* TX Checksum offload */
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- txd.txd4 |= TX_DMA_CHKSUM;
-
- /* VLAN header offload */
- if (skb_vlan_tag_present(skb)) {
- u16 tag = skb_vlan_tag_get(skb);
-
- txd.txd4 |= TX_DMA_INS_VLAN |
- ((tag >> VLAN_PRIO_SHIFT) << 4) |
- (tag & 0xF);
- }
-
- mapped_addr = dma_map_single(&dev->dev, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
- return -1;
-
- txd.txd1 = mapped_addr;
- txd.txd2 = TX_DMA_PLEN0(skb_headlen(skb));
-
- tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
- dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
- dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
-
- /* TX SG offload */
- j = ring->tx_next_idx;
- k = 0;
- for (i = 0; i < nr_frags; i++) {
- offset = 0;
- frag = &skb_shinfo(skb)->frags[i];
- frag_size = skb_frag_size(frag);
-
- while (frag_size > 0) {
- frag_map_size = min(frag_size, TX_DMA_BUF_LEN);
- mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
- frag_map_size,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
- goto err_dma;
-
- if (k & 0x1) {
- j = NEXT_TX_DESP_IDX(j);
- txd.txd1 = mapped_addr;
- txd.txd2 = TX_DMA_PLEN0(frag_map_size);
- txd.txd4 = def_txd4;
-
- tx_buf = &ring->tx_buf[j];
- memset(tx_buf, 0, sizeof(*tx_buf));
-
- tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
- dma_unmap_addr_set(tx_buf, dma_addr0,
- mapped_addr);
- dma_unmap_len_set(tx_buf, dma_len0,
- frag_map_size);
- } else {
- txd.txd3 = mapped_addr;
- txd.txd2 |= TX_DMA_PLEN1(frag_map_size);
-
- tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC;
- tx_buf->flags |= MTK_TX_FLAGS_PAGE1;
- dma_unmap_addr_set(tx_buf, dma_addr1,
- mapped_addr);
- dma_unmap_len_set(tx_buf, dma_len1,
- frag_map_size);
-
- if (!((i == (nr_frags - 1)) &&
- (frag_map_size == frag_size))) {
- mtk_set_txd_pdma(&txd,
- &ring->tx_dma[j]);
- memset(&txd, 0, sizeof(txd));
- }
- }
- frag_size -= frag_map_size;
- offset += frag_map_size;
- k++;
- }
- }
-
- /* set last segment */
- if (k & 0x1)
- txd.txd2 |= TX_DMA_LS1;
- else
- txd.txd2 |= TX_DMA_LS0;
- mtk_set_txd_pdma(&txd, &ring->tx_dma[j]);
-
- /* store skb to cleanup */
- tx_buf->skb = skb;
-
- netdev_sent_queue(dev, skb->len);
- skb_tx_timestamp(skb);
-
- ring->tx_next_idx = NEXT_TX_DESP_IDX(j);
- /* make sure that all changes to the dma ring are flushed before we
- * continue
- */
- wmb();
- atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
-
- if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
- mtk_reg_w32(eth, ring->tx_next_idx, MTK_REG_TX_CTX_IDX0);
-
- return 0;
-
-err_dma:
- j = ring->tx_next_idx;
- for (i = 0; i < tx_num; i++) {
- ptxd = &ring->tx_dma[j];
- tx_buf = &ring->tx_buf[j];
-
- /* unmap dma */
- mtk_txd_unmap(&dev->dev, tx_buf);
-
- ptxd->txd2 = TX_DMA_DESP2_DEF;
- j = NEXT_TX_DESP_IDX(j);
- }
- /* make sure that all changes to the dma ring are flushed before we
- * continue
- */
- wmb();
- return -1;
-}
-
-/* the qdma core needs scratch memory to be setup */
-static int mtk_init_fq_dma(struct mtk_eth *eth)
-{
- dma_addr_t dma_addr, phy_ring_head, phy_ring_tail;
- int cnt = eth->soc->dma_ring_size;
- int i;
-
- eth->scratch_ring = dma_alloc_coherent(eth->dev,
- cnt * sizeof(struct mtk_tx_dma),
- &phy_ring_head,
- GFP_ATOMIC | __GFP_ZERO);
- if (unlikely(!eth->scratch_ring))
- return -ENOMEM;
-
- eth->scratch_head = kcalloc(cnt, QDMA_PAGE_SIZE,
- GFP_KERNEL);
- dma_addr = dma_map_single(eth->dev,
- eth->scratch_head, cnt * QDMA_PAGE_SIZE,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
- return -ENOMEM;
-
- memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
- phy_ring_tail = phy_ring_head + (sizeof(struct mtk_tx_dma) * (cnt - 1));
-
- for (i = 0; i < cnt; i++) {
- eth->scratch_ring[i].txd1 = (dma_addr + (i * QDMA_PAGE_SIZE));
- if (i < cnt - 1)
- eth->scratch_ring[i].txd2 = (phy_ring_head +
- ((i + 1) * sizeof(struct mtk_tx_dma)));
- eth->scratch_ring[i].txd3 = TX_QDMA_SDL(QDMA_PAGE_SIZE);
- }
-
- mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD);
- mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
- mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
- mtk_w32(eth, QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
-
- return 0;
-}
-
-static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
-{
- void *ret = ring->tx_dma;
-
- return ret + (desc - ring->tx_phys);
-}
-
-static struct mtk_tx_dma *mtk_tx_next_qdma(struct mtk_tx_ring *ring,
- struct mtk_tx_dma *txd)
-{
- return mtk_qdma_phys_to_virt(ring, txd->txd2);
-}
-
-static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
- struct mtk_tx_dma *txd)
-{
- int idx = txd - ring->tx_dma;
-
- return &ring->tx_buf[idx];
-}
-
-static int mtk_qdma_tx_map(struct sk_buff *skb, struct net_device *dev,
- int tx_num, struct mtk_tx_ring *ring, bool gso)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
- struct mtk_tx_dma *itxd, *txd;
- struct mtk_tx_buf *tx_buf;
- dma_addr_t mapped_addr;
- unsigned int nr_frags;
- int i, n_desc = 1;
- u32 txd4 = eth->soc->txd4;
-
- itxd = ring->tx_next_free;
- if (itxd == ring->tx_last_free)
- return -ENOMEM;
-
- if (eth->soc->mac_count > 1)
- txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
-
- tx_buf = mtk_desc_to_tx_buf(ring, itxd);
- memset(tx_buf, 0, sizeof(*tx_buf));
-
- if (gso)
- txd4 |= TX_DMA_TSO;
-
- /* TX Checksum offload */
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- txd4 |= TX_DMA_CHKSUM;
-
- /* VLAN header offload */
- if (skb_vlan_tag_present(skb))
- txd4 |= TX_DMA_INS_VLAN_MT7621 | skb_vlan_tag_get(skb);
-
- mapped_addr = dma_map_single(&dev->dev, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
- return -ENOMEM;
-
- WRITE_ONCE(itxd->txd1, mapped_addr);
- tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
- dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
- dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
-
- /* TX SG offload */
- txd = itxd;
- nr_frags = skb_shinfo(skb)->nr_frags;
- for (i = 0; i < nr_frags; i++) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
- unsigned int offset = 0;
- int frag_size = skb_frag_size(frag);
-
- while (frag_size) {
- bool last_frag = false;
- unsigned int frag_map_size;
-
- txd = mtk_tx_next_qdma(ring, txd);
- if (txd == ring->tx_last_free)
- goto err_dma;
-
- n_desc++;
- frag_map_size = min(frag_size, TX_DMA_BUF_LEN);
- mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
- frag_map_size,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
- goto err_dma;
-
- if (i == nr_frags - 1 &&
- (frag_size - frag_map_size) == 0)
- last_frag = true;
-
- WRITE_ONCE(txd->txd1, mapped_addr);
- WRITE_ONCE(txd->txd3, (QDMA_TX_SWC |
- TX_DMA_PLEN0(frag_map_size) |
- last_frag * TX_DMA_LS0) |
- mac->id);
- WRITE_ONCE(txd->txd4, 0);
-
- tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC;
- tx_buf = mtk_desc_to_tx_buf(ring, txd);
- memset(tx_buf, 0, sizeof(*tx_buf));
-
- tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
- dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
- dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
- frag_size -= frag_map_size;
- offset += frag_map_size;
- }
- }
-
- /* store skb to cleanup */
- tx_buf->skb = skb;
-
- WRITE_ONCE(itxd->txd4, txd4);
- WRITE_ONCE(itxd->txd3, (QDMA_TX_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
- (!nr_frags * TX_DMA_LS0)));
-
- netdev_sent_queue(dev, skb->len);
- skb_tx_timestamp(skb);
-
- ring->tx_next_free = mtk_tx_next_qdma(ring, txd);
- atomic_sub(n_desc, &ring->tx_free_count);
-
- /* make sure that all changes to the dma ring are flushed before we
- * continue
- */
- wmb();
-
- if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
- mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
-
- return 0;
-
-err_dma:
- do {
- tx_buf = mtk_desc_to_tx_buf(ring, txd);
-
- /* unmap dma */
- mtk_txd_unmap(&dev->dev, tx_buf);
-
- itxd->txd3 = TX_DMA_DESP2_DEF;
- itxd = mtk_tx_next_qdma(ring, itxd);
- } while (itxd != txd);
-
- return -ENOMEM;
-}
-
-static inline int mtk_cal_txd_req(struct sk_buff *skb)
-{
- int i, nfrags;
- struct skb_frag_struct *frag;
-
- nfrags = 1;
- if (skb_is_gso(skb)) {
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- frag = &skb_shinfo(skb)->frags[i];
- nfrags += DIV_ROUND_UP(frag->size, TX_DMA_BUF_LEN);
- }
- } else {
- nfrags += skb_shinfo(skb)->nr_frags;
- }
-
- return DIV_ROUND_UP(nfrags, 2);
-}
-
-static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
- struct mtk_tx_ring *ring = &eth->tx_ring;
- struct net_device_stats *stats = &dev->stats;
- int tx_num;
- int len = skb->len;
- bool gso = false;
-
- tx_num = mtk_cal_txd_req(skb);
- if (unlikely(atomic_read(&ring->tx_free_count) <= tx_num)) {
- netif_stop_queue(dev);
- netif_err(eth, tx_queued, dev,
- "Tx Ring full when queue awake!\n");
- return NETDEV_TX_BUSY;
- }
-
- /* TSO: fill MSS info in tcp checksum field */
- if (skb_is_gso(skb)) {
- if (skb_cow_head(skb, 0)) {
- netif_warn(eth, tx_err, dev,
- "GSO expand head fail.\n");
- goto drop;
- }
-
- if (skb_shinfo(skb)->gso_type &
- (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
- gso = true;
- tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
- }
- }
-
- if (ring->tx_map(skb, dev, tx_num, ring, gso) < 0)
- goto drop;
-
- stats->tx_packets++;
- stats->tx_bytes += len;
-
- if (unlikely(atomic_read(&ring->tx_free_count) <= ring->tx_thresh)) {
- netif_stop_queue(dev);
- smp_mb();
- if (unlikely(atomic_read(&ring->tx_free_count) >
- ring->tx_thresh))
- netif_wake_queue(dev);
- }
-
- return NETDEV_TX_OK;
-
-drop:
- stats->tx_dropped++;
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-static int mtk_poll_rx(struct napi_struct *napi, int budget,
- struct mtk_eth *eth, u32 rx_intr)
-{
- struct mtk_soc_data *soc = eth->soc;
- struct mtk_rx_ring *ring = &eth->rx_ring[0];
- int idx = ring->rx_calc_idx;
- u32 checksum_bit;
- struct sk_buff *skb;
- u8 *data, *new_data;
- struct mtk_rx_dma *rxd, trxd;
- int done = 0, pad;
-
- if (eth->soc->hw_features & NETIF_F_RXCSUM)
- checksum_bit = soc->checksum_bit;
- else
- checksum_bit = 0;
-
- if (eth->soc->rx_2b_offset)
- pad = 0;
- else
- pad = NET_IP_ALIGN;
-
- while (done < budget) {
- struct net_device *netdev;
- unsigned int pktlen;
- dma_addr_t dma_addr;
- int mac = 0;
-
- idx = NEXT_RX_DESP_IDX(idx);
- rxd = &ring->rx_dma[idx];
- data = ring->rx_data[idx];
-
- mtk_get_rxd(&trxd, rxd);
- if (!(trxd.rxd2 & RX_DMA_DONE))
- break;
-
- /* find out which mac the packet come from. values start at 1 */
- if (eth->soc->mac_count > 1) {
- mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
- RX_DMA_FPORT_MASK;
- mac--;
- if (mac < 0 || mac >= eth->soc->mac_count)
- goto release_desc;
- }
-
- netdev = eth->netdev[mac];
-
- /* alloc new buffer */
- new_data = napi_alloc_frag(ring->frag_size);
- if (unlikely(!new_data || !netdev)) {
- netdev->stats.rx_dropped++;
- goto release_desc;
- }
- dma_addr = dma_map_single(&netdev->dev,
- new_data + NET_SKB_PAD + pad,
- ring->rx_buf_size,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
- skb_free_frag(new_data);
- goto release_desc;
- }
-
- /* receive data */
- skb = build_skb(data, ring->frag_size);
- if (unlikely(!skb)) {
- put_page(virt_to_head_page(new_data));
- goto release_desc;
- }
- skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
-
- dma_unmap_single(&netdev->dev, trxd.rxd1,
- ring->rx_buf_size, DMA_FROM_DEVICE);
- pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
- skb->dev = netdev;
- skb_put(skb, pktlen);
- if (trxd.rxd4 & checksum_bit)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- skb_checksum_none_assert(skb);
- skb->protocol = eth_type_trans(skb, netdev);
-
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += pktlen;
-
- if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
- RX_DMA_VID(trxd.rxd3))
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- RX_DMA_VID(trxd.rxd3));
- napi_gro_receive(napi, skb);
-
- ring->rx_data[idx] = new_data;
- rxd->rxd1 = (unsigned int)dma_addr;
-
-release_desc:
- if (eth->soc->rx_sg_dma)
- rxd->rxd2 = RX_DMA_PLEN0(ring->rx_buf_size);
- else
- rxd->rxd2 = RX_DMA_LSO;
-
- ring->rx_calc_idx = idx;
- /* make sure that all changes to the dma ring are flushed before
- * we continue
- */
- wmb();
- if (eth->soc->dma_type == MTK_QDMA)
- mtk_w32(eth, ring->rx_calc_idx, MTK_QRX_CRX_IDX0);
- else
- mtk_reg_w32(eth, ring->rx_calc_idx,
- MTK_REG_RX_CALC_IDX0);
- done++;
- }
-
- if (done < budget)
- mtk_irq_ack(eth, rx_intr);
-
- return done;
-}
-
-static int mtk_pdma_tx_poll(struct mtk_eth *eth, int budget, bool *tx_again)
-{
- struct sk_buff *skb;
- struct mtk_tx_buf *tx_buf;
- int done = 0;
- u32 idx, hwidx;
- struct mtk_tx_ring *ring = &eth->tx_ring;
- unsigned int bytes = 0;
-
- idx = ring->tx_free_idx;
- hwidx = mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0);
-
- while ((idx != hwidx) && budget) {
- tx_buf = &ring->tx_buf[idx];
- skb = tx_buf->skb;
-
- if (!skb)
- break;
-
- if (skb != (struct sk_buff *)DMA_DUMMY_DESC) {
- bytes += skb->len;
- done++;
- budget--;
- }
- mtk_txd_unmap(eth->dev, tx_buf);
- idx = NEXT_TX_DESP_IDX(idx);
- }
- ring->tx_free_idx = idx;
- atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
-
- /* read hw index again make sure no new tx packet */
- if (idx != hwidx || idx != mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0))
- *tx_again = 1;
-
- if (done)
- netdev_completed_queue(*eth->netdev, done, bytes);
-
- return done;
-}
-
-static int mtk_qdma_tx_poll(struct mtk_eth *eth, int budget, bool *tx_again)
-{
- struct mtk_tx_ring *ring = &eth->tx_ring;
- struct mtk_tx_dma *desc;
- struct sk_buff *skb;
- struct mtk_tx_buf *tx_buf;
- int total = 0, done[MTK_MAX_DEVS];
- unsigned int bytes[MTK_MAX_DEVS];
- u32 cpu, dma;
- int i;
-
- memset(done, 0, sizeof(done));
- memset(bytes, 0, sizeof(bytes));
-
- cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
- dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
-
- desc = mtk_qdma_phys_to_virt(ring, cpu);
-
- while ((cpu != dma) && budget) {
- u32 next_cpu = desc->txd2;
- int mac;
-
- desc = mtk_tx_next_qdma(ring, desc);
- if ((desc->txd3 & QDMA_TX_OWNER_CPU) == 0)
- break;
-
- mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
- TX_DMA_FPORT_MASK;
- mac--;
-
- tx_buf = mtk_desc_to_tx_buf(ring, desc);
- skb = tx_buf->skb;
- if (!skb)
- break;
-
- if (skb != (struct sk_buff *)DMA_DUMMY_DESC) {
- bytes[mac] += skb->len;
- done[mac]++;
- budget--;
- }
- mtk_txd_unmap(eth->dev, tx_buf);
-
- ring->tx_last_free->txd2 = next_cpu;
- ring->tx_last_free = desc;
- atomic_inc(&ring->tx_free_count);
-
- cpu = next_cpu;
- }
-
- mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
-
- /* read hw index again make sure no new tx packet */
- if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR))
- *tx_again = true;
-
- for (i = 0; i < eth->soc->mac_count; i++) {
- if (!done[i])
- continue;
- netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
- total += done[i];
- }
-
- return total;
-}
-
-static int mtk_poll_tx(struct mtk_eth *eth, int budget, u32 tx_intr,
- bool *tx_again)
-{
- struct mtk_tx_ring *ring = &eth->tx_ring;
- struct net_device *netdev = eth->netdev[0];
- int done;
-
- done = eth->tx_ring.tx_poll(eth, budget, tx_again);
- if (!*tx_again)
- mtk_irq_ack(eth, tx_intr);
-
- if (!done)
- return 0;
-
- smp_mb();
- if (unlikely(!netif_queue_stopped(netdev)))
- return done;
-
- if (atomic_read(&ring->tx_free_count) > ring->tx_thresh)
- netif_wake_queue(netdev);
-
- return done;
-}
-
-static void mtk_stats_update(struct mtk_eth *eth)
-{
- int i;
-
- for (i = 0; i < eth->soc->mac_count; i++) {
- if (!eth->mac[i] || !eth->mac[i]->hw_stats)
- continue;
- if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
- mtk_stats_update_mac(eth->mac[i]);
- spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
- }
- }
-}
-
-static int mtk_poll(struct napi_struct *napi, int budget)
-{
- struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
- u32 status, mtk_status, mask, tx_intr, rx_intr, status_intr;
- int tx_done, rx_done;
- bool tx_again = false;
-
- status = mtk_irq_pending(eth);
- mtk_status = mtk_irq_pending_status(eth);
- tx_intr = eth->soc->tx_int;
- rx_intr = eth->soc->rx_int;
- status_intr = eth->soc->status_int;
- tx_done = 0;
- rx_done = 0;
- tx_again = 0;
-
- if (status & tx_intr)
- tx_done = mtk_poll_tx(eth, budget, tx_intr, &tx_again);
-
- if (status & rx_intr)
- rx_done = mtk_poll_rx(napi, budget, eth, rx_intr);
-
- if (unlikely(mtk_status & status_intr)) {
- mtk_stats_update(eth);
- mtk_irq_ack_status(eth, status_intr);
- }
-
- if (unlikely(netif_msg_intr(eth))) {
- mask = mtk_irq_enabled(eth);
- netdev_info(eth->netdev[0],
- "done tx %d, rx %d, intr 0x%08x/0x%x\n",
- tx_done, rx_done, status, mask);
- }
-
- if (tx_again || rx_done == budget)
- return budget;
-
- status = mtk_irq_pending(eth);
- if (status & (tx_intr | rx_intr))
- return budget;
-
- napi_complete(napi);
- mtk_irq_enable(eth, tx_intr | rx_intr);
-
- return rx_done;
-}
-
-static int mtk_pdma_tx_alloc(struct mtk_eth *eth)
-{
- int i;
- struct mtk_tx_ring *ring = &eth->tx_ring;
-
- ring->tx_ring_size = eth->soc->dma_ring_size;
- ring->tx_free_idx = 0;
- ring->tx_next_idx = 0;
- ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2,
- MAX_SKB_FRAGS);
-
- ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf),
- GFP_KERNEL);
- if (!ring->tx_buf)
- goto no_tx_mem;
-
- ring->tx_dma =
- dma_alloc_coherent(eth->dev,
- ring->tx_ring_size * sizeof(*ring->tx_dma),
- &ring->tx_phys, GFP_ATOMIC | __GFP_ZERO);
- if (!ring->tx_dma)
- goto no_tx_mem;
-
- for (i = 0; i < ring->tx_ring_size; i++) {
- ring->tx_dma[i].txd2 = TX_DMA_DESP2_DEF;
- ring->tx_dma[i].txd4 = eth->soc->txd4;
- }
-
- atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
- ring->tx_map = mtk_pdma_tx_map;
- ring->tx_poll = mtk_pdma_tx_poll;
- ring->tx_clean = mtk_pdma_tx_clean;
-
- /* make sure that all changes to the dma ring are flushed before we
- * continue
- */
- wmb();
-
- mtk_reg_w32(eth, ring->tx_phys, MTK_REG_TX_BASE_PTR0);
- mtk_reg_w32(eth, ring->tx_ring_size, MTK_REG_TX_MAX_CNT0);
- mtk_reg_w32(eth, 0, MTK_REG_TX_CTX_IDX0);
- mtk_reg_w32(eth, MTK_PST_DTX_IDX0, MTK_REG_PDMA_RST_CFG);
-
- return 0;
-
-no_tx_mem:
- return -ENOMEM;
-}
-
-static int mtk_qdma_tx_alloc_tx(struct mtk_eth *eth)
-{
- struct mtk_tx_ring *ring = &eth->tx_ring;
- int i, sz = sizeof(*ring->tx_dma);
-
- ring->tx_ring_size = eth->soc->dma_ring_size;
- ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf),
- GFP_KERNEL);
- if (!ring->tx_buf)
- goto no_tx_mem;
-
- ring->tx_dma = dma_alloc_coherent(eth->dev, ring->tx_ring_size * sz,
- &ring->tx_phys,
- GFP_ATOMIC | __GFP_ZERO);
- if (!ring->tx_dma)
- goto no_tx_mem;
-
- for (i = 0; i < ring->tx_ring_size; i++) {
- int next = (i + 1) % ring->tx_ring_size;
- u32 next_ptr = ring->tx_phys + next * sz;
-
- ring->tx_dma[i].txd2 = next_ptr;
- ring->tx_dma[i].txd3 = TX_DMA_DESP2_DEF;
- }
-
- atomic_set(&ring->tx_free_count, ring->tx_ring_size - 2);
- ring->tx_next_free = &ring->tx_dma[0];
- ring->tx_last_free = &ring->tx_dma[ring->tx_ring_size - 2];
- ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2,
- MAX_SKB_FRAGS);
-
- ring->tx_map = mtk_qdma_tx_map;
- ring->tx_poll = mtk_qdma_tx_poll;
- ring->tx_clean = mtk_qdma_tx_clean;
-
- /* make sure that all changes to the dma ring are flushed before we
- * continue
- */
- wmb();
-
- mtk_w32(eth, ring->tx_phys, MTK_QTX_CTX_PTR);
- mtk_w32(eth, ring->tx_phys, MTK_QTX_DTX_PTR);
- mtk_w32(eth,
- ring->tx_phys + ((ring->tx_ring_size - 1) * sz),
- MTK_QTX_CRX_PTR);
- mtk_w32(eth,
- ring->tx_phys + ((ring->tx_ring_size - 1) * sz),
- MTK_QTX_DRX_PTR);
-
- return 0;
-
-no_tx_mem:
- return -ENOMEM;
-}
-
-static int mtk_qdma_init(struct mtk_eth *eth, int ring)
-{
- int err;
-
- err = mtk_init_fq_dma(eth);
- if (err)
- return err;
-
- err = mtk_qdma_tx_alloc_tx(eth);
- if (err)
- return err;
-
- err = mtk_dma_rx_alloc(eth, &eth->rx_ring[ring]);
- if (err)
- return err;
-
- mtk_w32(eth, eth->rx_ring[ring].rx_phys, MTK_QRX_BASE_PTR0);
- mtk_w32(eth, eth->rx_ring[ring].rx_ring_size, MTK_QRX_MAX_CNT0);
- mtk_w32(eth, eth->rx_ring[ring].rx_calc_idx, MTK_QRX_CRX_IDX0);
- mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_QDMA_RST_IDX);
- mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
-
- /* Enable random early drop and set drop threshold automatically */
- mtk_w32(eth, 0x174444, MTK_QDMA_FC_THRES);
- mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
-
- return 0;
-}
-
-static int mtk_pdma_qdma_init(struct mtk_eth *eth)
-{
- int err = mtk_qdma_init(eth, 1);
-
- if (err)
- return err;
-
- err = mtk_dma_rx_alloc(eth, &eth->rx_ring[0]);
- if (err)
- return err;
-
- mtk_reg_w32(eth, eth->rx_ring[0].rx_phys, MTK_REG_RX_BASE_PTR0);
- mtk_reg_w32(eth, eth->rx_ring[0].rx_ring_size, MTK_REG_RX_MAX_CNT0);
- mtk_reg_w32(eth, eth->rx_ring[0].rx_calc_idx, MTK_REG_RX_CALC_IDX0);
- mtk_reg_w32(eth, MTK_PST_DRX_IDX0, MTK_REG_PDMA_RST_CFG);
-
- return 0;
-}
-
-static int mtk_pdma_init(struct mtk_eth *eth)
-{
- struct mtk_rx_ring *ring = &eth->rx_ring[0];
- int err;
-
- err = mtk_pdma_tx_alloc(eth);
- if (err)
- return err;
-
- err = mtk_dma_rx_alloc(eth, ring);
- if (err)
- return err;
-
- mtk_reg_w32(eth, ring->rx_phys, MTK_REG_RX_BASE_PTR0);
- mtk_reg_w32(eth, ring->rx_ring_size, MTK_REG_RX_MAX_CNT0);
- mtk_reg_w32(eth, ring->rx_calc_idx, MTK_REG_RX_CALC_IDX0);
- mtk_reg_w32(eth, MTK_PST_DRX_IDX0, MTK_REG_PDMA_RST_CFG);
-
- return 0;
-}
-
-static void mtk_dma_free(struct mtk_eth *eth)
-{
- int i;
-
- for (i = 0; i < eth->soc->mac_count; i++)
- if (eth->netdev[i])
- netdev_reset_queue(eth->netdev[i]);
- eth->tx_ring.tx_clean(eth);
- mtk_clean_rx(eth, &eth->rx_ring[0]);
- mtk_clean_rx(eth, &eth->rx_ring[1]);
- kfree(eth->scratch_head);
-}
-
-static void mtk_tx_timeout(struct net_device *dev)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
- struct mtk_tx_ring *ring = &eth->tx_ring;
-
- eth->netdev[mac->id]->stats.tx_errors++;
- netif_err(eth, tx_err, dev,
- "transmit timed out\n");
- if (eth->soc->dma_type & MTK_PDMA) {
- netif_info(eth, drv, dev, "pdma_cfg:%08x\n",
- mtk_reg_r32(eth, MTK_REG_PDMA_GLO_CFG));
- netif_info(eth, drv, dev,
- "tx_ring=%d, base=%08x, max=%u, ctx=%u, dtx=%u, fdx=%hu, next=%hu\n",
- 0, mtk_reg_r32(eth, MTK_REG_TX_BASE_PTR0),
- mtk_reg_r32(eth, MTK_REG_TX_MAX_CNT0),
- mtk_reg_r32(eth, MTK_REG_TX_CTX_IDX0),
- mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0),
- ring->tx_free_idx,
- ring->tx_next_idx);
- }
- if (eth->soc->dma_type & MTK_QDMA) {
- netif_info(eth, drv, dev, "qdma_cfg:%08x\n",
- mtk_r32(eth, MTK_QDMA_GLO_CFG));
- netif_info(eth, drv, dev,
- "tx_ring=%d, ctx=%08x, dtx=%08x, crx=%08x, drx=%08x, free=%hu\n",
- 0, mtk_r32(eth, MTK_QTX_CTX_PTR),
- mtk_r32(eth, MTK_QTX_DTX_PTR),
- mtk_r32(eth, MTK_QTX_CRX_PTR),
- mtk_r32(eth, MTK_QTX_DRX_PTR),
- atomic_read(&ring->tx_free_count));
- }
- netif_info(eth, drv, dev,
- "rx_ring=%d, base=%08x, max=%u, calc=%u, drx=%u\n",
- 0, mtk_reg_r32(eth, MTK_REG_RX_BASE_PTR0),
- mtk_reg_r32(eth, MTK_REG_RX_MAX_CNT0),
- mtk_reg_r32(eth, MTK_REG_RX_CALC_IDX0),
- mtk_reg_r32(eth, MTK_REG_RX_DRX_IDX0));
-
- schedule_work(&mac->pending_work);
-}
-
-static irqreturn_t mtk_handle_irq(int irq, void *_eth)
-{
- struct mtk_eth *eth = _eth;
- u32 status, int_mask;
-
- status = mtk_irq_pending(eth);
- if (unlikely(!status))
- return IRQ_NONE;
-
- int_mask = (eth->soc->rx_int | eth->soc->tx_int);
- if (likely(status & int_mask)) {
- if (likely(napi_schedule_prep(&eth->rx_napi)))
- __napi_schedule(&eth->rx_napi);
- } else {
- mtk_irq_ack(eth, status);
- }
- mtk_irq_disable(eth, int_mask);
-
- return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void mtk_poll_controller(struct net_device *dev)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
- u32 int_mask = eth->soc->tx_int | eth->soc->rx_int;
-
- mtk_irq_disable(eth, int_mask);
- mtk_handle_irq(dev->irq, dev);
- mtk_irq_enable(eth, int_mask);
-}
-#endif
-
-int mtk_set_clock_cycle(struct mtk_eth *eth)
-{
- unsigned long sysclk = eth->sysclk;
-
- sysclk /= MTK_US_CYC_CNT_DIVISOR;
- sysclk <<= MTK_US_CYC_CNT_SHIFT;
-
- mtk_w32(eth, (mtk_r32(eth, MTK_GLO_CFG) &
- ~(MTK_US_CYC_CNT_MASK << MTK_US_CYC_CNT_SHIFT)) |
- sysclk,
- MTK_GLO_CFG);
- return 0;
-}
-
-void mtk_fwd_config(struct mtk_eth *eth)
-{
- u32 fwd_cfg;
-
- fwd_cfg = mtk_r32(eth, MTK_GDMA1_FWD_CFG);
-
- /* disable jumbo frame */
- if (eth->soc->jumbo_frame)
- fwd_cfg &= ~MTK_GDM1_JMB_EN;
-
- /* set unicast/multicast/broadcast frame to cpu */
- fwd_cfg &= ~0xffff;
-
- mtk_w32(eth, fwd_cfg, MTK_GDMA1_FWD_CFG);
-}
-
-void mtk_csum_config(struct mtk_eth *eth)
-{
- if (eth->soc->hw_features & NETIF_F_RXCSUM)
- mtk_w32(eth, mtk_r32(eth, MTK_GDMA1_FWD_CFG) |
- (MTK_GDM1_ICS_EN | MTK_GDM1_TCS_EN | MTK_GDM1_UCS_EN),
- MTK_GDMA1_FWD_CFG);
- else
- mtk_w32(eth, mtk_r32(eth, MTK_GDMA1_FWD_CFG) &
- ~(MTK_GDM1_ICS_EN | MTK_GDM1_TCS_EN | MTK_GDM1_UCS_EN),
- MTK_GDMA1_FWD_CFG);
- if (eth->soc->hw_features & NETIF_F_IP_CSUM)
- mtk_w32(eth, mtk_r32(eth, MTK_CDMA_CSG_CFG) |
- (MTK_ICS_GEN_EN | MTK_TCS_GEN_EN | MTK_UCS_GEN_EN),
- MTK_CDMA_CSG_CFG);
- else
- mtk_w32(eth, mtk_r32(eth, MTK_CDMA_CSG_CFG) &
- ~(MTK_ICS_GEN_EN | MTK_TCS_GEN_EN | MTK_UCS_GEN_EN),
- MTK_CDMA_CSG_CFG);
-}
-
-static int mtk_start_dma(struct mtk_eth *eth)
-{
- unsigned long flags;
- u32 val;
- int err;
-
- if (eth->soc->dma_type == MTK_PDMA)
- err = mtk_pdma_init(eth);
- else if (eth->soc->dma_type == MTK_QDMA)
- err = mtk_qdma_init(eth, 0);
- else
- err = mtk_pdma_qdma_init(eth);
- if (err) {
- mtk_dma_free(eth);
- return err;
- }
-
- spin_lock_irqsave(&eth->page_lock, flags);
-
- val = MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN;
- if (eth->soc->rx_2b_offset)
- val |= MTK_RX_2B_OFFSET;
- val |= eth->soc->pdma_glo_cfg;
-
- if (eth->soc->dma_type & MTK_PDMA)
- mtk_reg_w32(eth, val, MTK_REG_PDMA_GLO_CFG);
-
- if (eth->soc->dma_type & MTK_QDMA)
- mtk_w32(eth, val, MTK_QDMA_GLO_CFG);
-
- spin_unlock_irqrestore(&eth->page_lock, flags);
-
- return 0;
-}
-
-static int mtk_open(struct net_device *dev)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
-
- dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
-
- if (!atomic_read(&eth->dma_refcnt)) {
- int err = mtk_start_dma(eth);
-
- if (err)
- return err;
-
- napi_enable(&eth->rx_napi);
- mtk_irq_enable(eth, eth->soc->tx_int | eth->soc->rx_int);
- }
- atomic_inc(&eth->dma_refcnt);
-
- if (eth->phy)
- eth->phy->start(mac);
-
- if (eth->soc->has_carrier && eth->soc->has_carrier(eth))
- netif_carrier_on(dev);
-
- netif_start_queue(dev);
- eth->soc->fwd_config(eth);
-
- return 0;
-}
-
-static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
-{
- unsigned long flags;
- u32 val;
- int i;
-
- /* stop the dma enfine */
- spin_lock_irqsave(&eth->page_lock, flags);
- val = mtk_r32(eth, glo_cfg);
- mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
- glo_cfg);
- spin_unlock_irqrestore(&eth->page_lock, flags);
-
- /* wait for dma stop */
- for (i = 0; i < 10; i++) {
- val = mtk_r32(eth, glo_cfg);
- if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
- msleep(20);
- continue;
- }
- break;
- }
-}
-
-static int mtk_stop(struct net_device *dev)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
-
- netif_tx_disable(dev);
- if (eth->phy)
- eth->phy->stop(mac);
-
- if (!atomic_dec_and_test(&eth->dma_refcnt))
- return 0;
-
- mtk_irq_disable(eth, eth->soc->tx_int | eth->soc->rx_int);
- napi_disable(&eth->rx_napi);
-
- if (eth->soc->dma_type & MTK_PDMA)
- mtk_stop_dma(eth, mtk_reg_table[MTK_REG_PDMA_GLO_CFG]);
-
- if (eth->soc->dma_type & MTK_QDMA)
- mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
-
- mtk_dma_free(eth);
-
- return 0;
-}
-
-static int __init mtk_init_hw(struct mtk_eth *eth)
-{
- int i, err;
-
- eth->soc->reset_fe(eth);
-
- if (eth->soc->switch_init)
- if (eth->soc->switch_init(eth)) {
- dev_err(eth->dev, "failed to initialize switch core\n");
- return -ENODEV;
- }
-
- err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0,
- dev_name(eth->dev), eth);
- if (err)
- return err;
-
- err = mtk_mdio_init(eth);
- if (err)
- return err;
-
- /* disable delay and normal interrupt */
- mtk_reg_w32(eth, 0, MTK_REG_DLY_INT_CFG);
- if (eth->soc->dma_type & MTK_QDMA)
- mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
- mtk_irq_disable(eth, eth->soc->tx_int | eth->soc->rx_int);
-
- /* frame engine will push VLAN tag regarding to VIDX field in Tx desc */
- if (mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE])
- for (i = 0; i < 16; i += 2)
- mtk_w32(eth, ((i + 1) << 16) + i,
- mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
- (i * 2));
-
- if (eth->soc->fwd_config(eth))
- dev_err(eth->dev, "unable to get clock\n");
-
- if (mtk_reg_table[MTK_REG_MTK_RST_GL]) {
- mtk_reg_w32(eth, 1, MTK_REG_MTK_RST_GL);
- mtk_reg_w32(eth, 0, MTK_REG_MTK_RST_GL);
- }
-
- return 0;
-}
-
-static int __init mtk_init(struct net_device *dev)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
- struct device_node *port;
- const char *mac_addr;
- int err;
-
- mac_addr = of_get_mac_address(mac->of_node);
- if (mac_addr)
- ether_addr_copy(dev->dev_addr, mac_addr);
-
- /* If the mac address is invalid, use random mac address */
- if (!is_valid_ether_addr(dev->dev_addr)) {
- eth_hw_addr_random(dev);
- dev_err(eth->dev, "generated random MAC address %pM\n",
- dev->dev_addr);
- }
- mac->hw->soc->set_mac(mac, dev->dev_addr);
-
- if (eth->soc->port_init)
- for_each_child_of_node(mac->of_node, port)
- if (of_device_is_compatible(port,
- "mediatek,eth-port") &&
- of_device_is_available(port))
- eth->soc->port_init(eth, mac, port);
-
- if (eth->phy) {
- err = eth->phy->connect(mac);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static void mtk_uninit(struct net_device *dev)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
-
- if (eth->phy)
- eth->phy->disconnect(mac);
- mtk_mdio_cleanup(eth);
-
- mtk_irq_disable(eth, ~0);
- free_irq(dev->irq, dev);
-}
-
-static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct mtk_mac *mac = netdev_priv(dev);
-
- if (!mac->phy_dev)
- return -ENODEV;
-
- switch (cmd) {
- case SIOCGMIIPHY:
- case SIOCGMIIREG:
- case SIOCSMIIREG:
- return phy_mii_ioctl(mac->phy_dev, ifr, cmd);
- default:
- break;
- }
-
- return -EOPNOTSUPP;
-}
-
-static int mtk_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
- int frag_size, old_mtu;
- u32 fwd_cfg;
-
- if (!eth->soc->jumbo_frame)
- return eth_change_mtu(dev, new_mtu);
-
- frag_size = mtk_max_frag_size(new_mtu);
- if (new_mtu < 68 || frag_size > PAGE_SIZE)
- return -EINVAL;
-
- old_mtu = dev->mtu;
- dev->mtu = new_mtu;
-
- /* return early if the buffer sizes will not change */
- if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
- return 0;
- if (old_mtu > ETH_DATA_LEN && new_mtu > ETH_DATA_LEN)
- return 0;
-
- if (new_mtu <= ETH_DATA_LEN)
- eth->rx_ring[0].frag_size = mtk_max_frag_size(ETH_DATA_LEN);
- else
- eth->rx_ring[0].frag_size = PAGE_SIZE;
- eth->rx_ring[0].rx_buf_size =
- mtk_max_buf_size(eth->rx_ring[0].frag_size);
-
- if (!netif_running(dev))
- return 0;
-
- mtk_stop(dev);
- fwd_cfg = mtk_r32(eth, MTK_GDMA1_FWD_CFG);
- if (new_mtu <= ETH_DATA_LEN) {
- fwd_cfg &= ~MTK_GDM1_JMB_EN;
- } else {
- fwd_cfg &= ~(MTK_GDM1_JMB_LEN_MASK << MTK_GDM1_JMB_LEN_SHIFT);
- fwd_cfg |= (DIV_ROUND_UP(frag_size, 1024) <<
- MTK_GDM1_JMB_LEN_SHIFT) | MTK_GDM1_JMB_EN;
- }
- mtk_w32(eth, fwd_cfg, MTK_GDMA1_FWD_CFG);
-
- return mtk_open(dev);
-}
-
-static void mtk_pending_work(struct work_struct *work)
-{
- struct mtk_mac *mac = container_of(work, struct mtk_mac, pending_work);
- struct mtk_eth *eth = mac->hw;
- struct net_device *dev = eth->netdev[mac->id];
- int err;
-
- rtnl_lock();
- mtk_stop(dev);
-
- err = mtk_open(dev);
- if (err) {
- netif_alert(eth, ifup, dev,
- "Driver up/down cycle failed, closing device.\n");
- dev_close(dev);
- }
- rtnl_unlock();
-}
-
-static int mtk_cleanup(struct mtk_eth *eth)
-{
- int i;
-
- for (i = 0; i < eth->soc->mac_count; i++) {
- struct mtk_mac *mac = netdev_priv(eth->netdev[i]);
-
- if (!eth->netdev[i])
- continue;
-
- unregister_netdev(eth->netdev[i]);
- free_netdev(eth->netdev[i]);
- cancel_work_sync(&mac->pending_work);
- }
-
- return 0;
-}
-
-static const struct net_device_ops mtk_netdev_ops = {
- .ndo_init = mtk_init,
- .ndo_uninit = mtk_uninit,
- .ndo_open = mtk_open,
- .ndo_stop = mtk_stop,
- .ndo_start_xmit = mtk_start_xmit,
- .ndo_set_mac_address = mtk_set_mac_address,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = mtk_do_ioctl,
- .ndo_change_mtu = mtk_change_mtu,
- .ndo_tx_timeout = mtk_tx_timeout,
- .ndo_get_stats64 = mtk_get_stats64,
- .ndo_vlan_rx_add_vid = mtk_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = mtk_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = mtk_poll_controller,
-#endif
-};
-
-static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
-{
- struct mtk_mac *mac;
- const __be32 *_id = of_get_property(np, "reg", NULL);
- int id, err;
-
- if (!_id) {
- dev_err(eth->dev, "missing mac id\n");
- return -EINVAL;
- }
- id = be32_to_cpup(_id);
- if (id >= eth->soc->mac_count || eth->netdev[id]) {
- dev_err(eth->dev, "%d is not a valid mac id\n", id);
- return -EINVAL;
- }
-
- eth->netdev[id] = alloc_etherdev(sizeof(*mac));
- if (!eth->netdev[id]) {
- dev_err(eth->dev, "alloc_etherdev failed\n");
- return -ENOMEM;
- }
- mac = netdev_priv(eth->netdev[id]);
- eth->mac[id] = mac;
- mac->id = id;
- mac->hw = eth;
- mac->of_node = np;
- INIT_WORK(&mac->pending_work, mtk_pending_work);
-
- if (mtk_reg_table[MTK_REG_MTK_COUNTER_BASE]) {
- mac->hw_stats = devm_kzalloc(eth->dev,
- sizeof(*mac->hw_stats),
- GFP_KERNEL);
- if (!mac->hw_stats) {
- err = -ENOMEM;
- goto free_netdev;
- }
- spin_lock_init(&mac->hw_stats->stats_lock);
- mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
- }
-
- SET_NETDEV_DEV(eth->netdev[id], eth->dev);
- eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
- eth->netdev[id]->base_addr = (unsigned long)eth->base;
-
- if (eth->soc->init_data)
- eth->soc->init_data(eth->soc, eth->netdev[id]);
-
- eth->netdev[id]->vlan_features = eth->soc->hw_features &
- ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
- eth->netdev[id]->features |= eth->soc->hw_features;
-
- if (mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE])
- eth->netdev[id]->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-
- mtk_set_ethtool_ops(eth->netdev[id]);
-
- err = register_netdev(eth->netdev[id]);
- if (err) {
- dev_err(eth->dev, "error bringing up device\n");
- err = -ENOMEM;
- goto free_netdev;
- }
- eth->netdev[id]->irq = eth->irq;
- netif_info(eth, probe, eth->netdev[id],
- "mediatek frame engine at 0x%08lx, irq %d\n",
- eth->netdev[id]->base_addr, eth->netdev[id]->irq);
-
- return 0;
-
-free_netdev:
- free_netdev(eth->netdev[id]);
- return err;
-}
-
-static int mtk_probe(struct platform_device *pdev)
-{
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- const struct of_device_id *match;
- struct device_node *mac_np;
- struct mtk_soc_data *soc;
- struct mtk_eth *eth;
- struct clk *sysclk;
- int err;
-
- device_reset(&pdev->dev);
-
- match = of_match_device(of_mtk_match, &pdev->dev);
- soc = (struct mtk_soc_data *)match->data;
-
- if (soc->reg_table)
- mtk_reg_table = soc->reg_table;
-
- eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
- if (!eth)
- return -ENOMEM;
-
- eth->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(eth->base))
- return PTR_ERR(eth->base);
-
- spin_lock_init(&eth->page_lock);
-
- eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
- "mediatek,ethsys");
- if (IS_ERR(eth->ethsys))
- return PTR_ERR(eth->ethsys);
-
- eth->irq = platform_get_irq(pdev, 0);
- if (eth->irq < 0) {
- dev_err(&pdev->dev, "no IRQ resource found\n");
- return -ENXIO;
- }
-
- sysclk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(sysclk)) {
- dev_err(&pdev->dev,
- "the clock is not defined in the devicetree\n");
- return -ENXIO;
- }
- eth->sysclk = clk_get_rate(sysclk);
-
- eth->switch_np = of_parse_phandle(pdev->dev.of_node,
- "mediatek,switch", 0);
- if (soc->has_switch && !eth->switch_np) {
- dev_err(&pdev->dev, "failed to read switch phandle\n");
- return -ENODEV;
- }
-
- eth->dev = &pdev->dev;
- eth->soc = soc;
- eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
-
- err = mtk_init_hw(eth);
- if (err)
- return err;
-
- if (eth->soc->mac_count > 1) {
- for_each_child_of_node(pdev->dev.of_node, mac_np) {
- if (!of_device_is_compatible(mac_np,
- "mediatek,eth-mac"))
- continue;
-
- if (!of_device_is_available(mac_np))
- continue;
-
- err = mtk_add_mac(eth, mac_np);
- if (err)
- goto err_free_dev;
- }
-
- init_dummy_netdev(&eth->dummy_dev);
- netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_poll,
- soc->napi_weight);
- } else {
- err = mtk_add_mac(eth, pdev->dev.of_node);
- if (err)
- goto err_free_dev;
- netif_napi_add(eth->netdev[0], &eth->rx_napi, mtk_poll,
- soc->napi_weight);
- }
-
- platform_set_drvdata(pdev, eth);
-
- return 0;
-
-err_free_dev:
- mtk_cleanup(eth);
- return err;
-}
-
-static int mtk_remove(struct platform_device *pdev)
-{
- struct mtk_eth *eth = platform_get_drvdata(pdev);
-
- netif_napi_del(&eth->rx_napi);
- mtk_cleanup(eth);
- platform_set_drvdata(pdev, NULL);
-
- return 0;
-}
-
-static struct platform_driver mtk_driver = {
- .probe = mtk_probe,
- .remove = mtk_remove,
- .driver = {
- .name = "mtk_soc_eth",
- .of_match_table = of_mtk_match,
- },
-};
-
-module_platform_driver(mtk_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
-MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.h b/drivers/staging/mt7621-eth/mtk_eth_soc.h
deleted file mode 100644
index e6ed80433f49..000000000000
--- a/drivers/staging/mt7621-eth/mtk_eth_soc.h
+++ /dev/null
@@ -1,716 +0,0 @@
-/* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#ifndef MTK_ETH_H
-#define MTK_ETH_H
-
-#include <linux/mii.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/dma-mapping.h>
-#include <linux/phy.h>
-#include <linux/ethtool.h>
-#include <linux/version.h>
-#include <linux/atomic.h>
-
-/* these registers have different offsets depending on the SoC. we use a lookup
- * table for these
- */
-enum mtk_reg {
- MTK_REG_PDMA_GLO_CFG = 0,
- MTK_REG_PDMA_RST_CFG,
- MTK_REG_DLY_INT_CFG,
- MTK_REG_TX_BASE_PTR0,
- MTK_REG_TX_MAX_CNT0,
- MTK_REG_TX_CTX_IDX0,
- MTK_REG_TX_DTX_IDX0,
- MTK_REG_RX_BASE_PTR0,
- MTK_REG_RX_MAX_CNT0,
- MTK_REG_RX_CALC_IDX0,
- MTK_REG_RX_DRX_IDX0,
- MTK_REG_MTK_INT_ENABLE,
- MTK_REG_MTK_INT_STATUS,
- MTK_REG_MTK_DMA_VID_BASE,
- MTK_REG_MTK_COUNTER_BASE,
- MTK_REG_MTK_RST_GL,
- MTK_REG_MTK_INT_STATUS2,
- MTK_REG_COUNT
-};
-
-/* delayed interrupt bits */
-#define MTK_DELAY_EN_INT 0x80
-#define MTK_DELAY_MAX_INT 0x04
-#define MTK_DELAY_MAX_TOUT 0x04
-#define MTK_DELAY_TIME 20
-#define MTK_DELAY_CHAN (((MTK_DELAY_EN_INT | MTK_DELAY_MAX_INT) << 8) \
- | MTK_DELAY_MAX_TOUT)
-#define MTK_DELAY_INIT ((MTK_DELAY_CHAN << 16) | MTK_DELAY_CHAN)
-#define MTK_PSE_FQFC_CFG_INIT 0x80504000
-#define MTK_PSE_FQFC_CFG_256Q 0xff908000
-
-/* interrupt bits */
-#define MTK_CNT_PPE_AF BIT(31)
-#define MTK_CNT_GDM_AF BIT(29)
-#define MTK_PSE_P2_FC BIT(26)
-#define MTK_PSE_BUF_DROP BIT(24)
-#define MTK_GDM_OTHER_DROP BIT(23)
-#define MTK_PSE_P1_FC BIT(22)
-#define MTK_PSE_P0_FC BIT(21)
-#define MTK_PSE_FQ_EMPTY BIT(20)
-#define MTK_GE1_STA_CHG BIT(18)
-#define MTK_TX_COHERENT BIT(17)
-#define MTK_RX_COHERENT BIT(16)
-#define MTK_TX_DONE_INT3 BIT(11)
-#define MTK_TX_DONE_INT2 BIT(10)
-#define MTK_TX_DONE_INT1 BIT(9)
-#define MTK_TX_DONE_INT0 BIT(8)
-#define MTK_RX_DONE_INT0 BIT(2)
-#define MTK_TX_DLY_INT BIT(1)
-#define MTK_RX_DLY_INT BIT(0)
-
-#define MTK_RX_DONE_INT MTK_RX_DONE_INT0
-#define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
- MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
-
-#define RT5350_RX_DLY_INT BIT(30)
-#define RT5350_TX_DLY_INT BIT(28)
-#define RT5350_RX_DONE_INT1 BIT(17)
-#define RT5350_RX_DONE_INT0 BIT(16)
-#define RT5350_TX_DONE_INT3 BIT(3)
-#define RT5350_TX_DONE_INT2 BIT(2)
-#define RT5350_TX_DONE_INT1 BIT(1)
-#define RT5350_TX_DONE_INT0 BIT(0)
-
-#define RT5350_RX_DONE_INT (RT5350_RX_DONE_INT0 | RT5350_RX_DONE_INT1)
-#define RT5350_TX_DONE_INT (RT5350_TX_DONE_INT0 | RT5350_TX_DONE_INT1 | \
- RT5350_TX_DONE_INT2 | RT5350_TX_DONE_INT3)
-
-/* registers */
-#define MTK_GDMA_OFFSET 0x0020
-#define MTK_PSE_OFFSET 0x0040
-#define MTK_GDMA2_OFFSET 0x0060
-#define MTK_CDMA_OFFSET 0x0080
-#define MTK_DMA_VID0 0x00a8
-#define MTK_PDMA_OFFSET 0x0100
-#define MTK_PPE_OFFSET 0x0200
-#define MTK_CMTABLE_OFFSET 0x0400
-#define MTK_POLICYTABLE_OFFSET 0x1000
-
-#define MT7621_GDMA_OFFSET 0x0500
-#define MT7620_GDMA_OFFSET 0x0600
-
-#define RT5350_PDMA_OFFSET 0x0800
-#define RT5350_SDM_OFFSET 0x0c00
-
-#define MTK_MDIO_ACCESS 0x00
-#define MTK_MDIO_CFG 0x04
-#define MTK_GLO_CFG 0x08
-#define MTK_RST_GL 0x0C
-#define MTK_INT_STATUS 0x10
-#define MTK_INT_ENABLE 0x14
-#define MTK_MDIO_CFG2 0x18
-#define MTK_FOC_TS_T 0x1C
-
-#define MTK_GDMA1_FWD_CFG (MTK_GDMA_OFFSET + 0x00)
-#define MTK_GDMA1_SCH_CFG (MTK_GDMA_OFFSET + 0x04)
-#define MTK_GDMA1_SHPR_CFG (MTK_GDMA_OFFSET + 0x08)
-#define MTK_GDMA1_MAC_ADRL (MTK_GDMA_OFFSET + 0x0C)
-#define MTK_GDMA1_MAC_ADRH (MTK_GDMA_OFFSET + 0x10)
-
-#define MTK_GDMA2_FWD_CFG (MTK_GDMA2_OFFSET + 0x00)
-#define MTK_GDMA2_SCH_CFG (MTK_GDMA2_OFFSET + 0x04)
-#define MTK_GDMA2_SHPR_CFG (MTK_GDMA2_OFFSET + 0x08)
-#define MTK_GDMA2_MAC_ADRL (MTK_GDMA2_OFFSET + 0x0C)
-#define MTK_GDMA2_MAC_ADRH (MTK_GDMA2_OFFSET + 0x10)
-
-#define MTK_PSE_FQ_CFG (MTK_PSE_OFFSET + 0x00)
-#define MTK_CDMA_FC_CFG (MTK_PSE_OFFSET + 0x04)
-#define MTK_GDMA1_FC_CFG (MTK_PSE_OFFSET + 0x08)
-#define MTK_GDMA2_FC_CFG (MTK_PSE_OFFSET + 0x0C)
-
-#define MTK_CDMA_CSG_CFG (MTK_CDMA_OFFSET + 0x00)
-#define MTK_CDMA_SCH_CFG (MTK_CDMA_OFFSET + 0x04)
-
-#define MT7621_GDMA_FWD_CFG(x) (MT7621_GDMA_OFFSET + (x * 0x1000))
-
-/* FIXME this might be different for different SOCs */
-#define MT7620_GDMA1_FWD_CFG (MT7621_GDMA_OFFSET + 0x00)
-
-#define RT5350_TX_BASE_PTR0 (RT5350_PDMA_OFFSET + 0x00)
-#define RT5350_TX_MAX_CNT0 (RT5350_PDMA_OFFSET + 0x04)
-#define RT5350_TX_CTX_IDX0 (RT5350_PDMA_OFFSET + 0x08)
-#define RT5350_TX_DTX_IDX0 (RT5350_PDMA_OFFSET + 0x0C)
-#define RT5350_TX_BASE_PTR1 (RT5350_PDMA_OFFSET + 0x10)
-#define RT5350_TX_MAX_CNT1 (RT5350_PDMA_OFFSET + 0x14)
-#define RT5350_TX_CTX_IDX1 (RT5350_PDMA_OFFSET + 0x18)
-#define RT5350_TX_DTX_IDX1 (RT5350_PDMA_OFFSET + 0x1C)
-#define RT5350_TX_BASE_PTR2 (RT5350_PDMA_OFFSET + 0x20)
-#define RT5350_TX_MAX_CNT2 (RT5350_PDMA_OFFSET + 0x24)
-#define RT5350_TX_CTX_IDX2 (RT5350_PDMA_OFFSET + 0x28)
-#define RT5350_TX_DTX_IDX2 (RT5350_PDMA_OFFSET + 0x2C)
-#define RT5350_TX_BASE_PTR3 (RT5350_PDMA_OFFSET + 0x30)
-#define RT5350_TX_MAX_CNT3 (RT5350_PDMA_OFFSET + 0x34)
-#define RT5350_TX_CTX_IDX3 (RT5350_PDMA_OFFSET + 0x38)
-#define RT5350_TX_DTX_IDX3 (RT5350_PDMA_OFFSET + 0x3C)
-#define RT5350_RX_BASE_PTR0 (RT5350_PDMA_OFFSET + 0x100)
-#define RT5350_RX_MAX_CNT0 (RT5350_PDMA_OFFSET + 0x104)
-#define RT5350_RX_CALC_IDX0 (RT5350_PDMA_OFFSET + 0x108)
-#define RT5350_RX_DRX_IDX0 (RT5350_PDMA_OFFSET + 0x10C)
-#define RT5350_RX_BASE_PTR1 (RT5350_PDMA_OFFSET + 0x110)
-#define RT5350_RX_MAX_CNT1 (RT5350_PDMA_OFFSET + 0x114)
-#define RT5350_RX_CALC_IDX1 (RT5350_PDMA_OFFSET + 0x118)
-#define RT5350_RX_DRX_IDX1 (RT5350_PDMA_OFFSET + 0x11C)
-#define RT5350_PDMA_GLO_CFG (RT5350_PDMA_OFFSET + 0x204)
-#define RT5350_PDMA_RST_CFG (RT5350_PDMA_OFFSET + 0x208)
-#define RT5350_DLY_INT_CFG (RT5350_PDMA_OFFSET + 0x20c)
-#define RT5350_MTK_INT_STATUS (RT5350_PDMA_OFFSET + 0x220)
-#define RT5350_MTK_INT_ENABLE (RT5350_PDMA_OFFSET + 0x228)
-#define RT5350_PDMA_SCH_CFG (RT5350_PDMA_OFFSET + 0x280)
-
-#define MTK_PDMA_GLO_CFG (MTK_PDMA_OFFSET + 0x00)
-#define MTK_PDMA_RST_CFG (MTK_PDMA_OFFSET + 0x04)
-#define MTK_PDMA_SCH_CFG (MTK_PDMA_OFFSET + 0x08)
-#define MTK_DLY_INT_CFG (MTK_PDMA_OFFSET + 0x0C)
-#define MTK_TX_BASE_PTR0 (MTK_PDMA_OFFSET + 0x10)
-#define MTK_TX_MAX_CNT0 (MTK_PDMA_OFFSET + 0x14)
-#define MTK_TX_CTX_IDX0 (MTK_PDMA_OFFSET + 0x18)
-#define MTK_TX_DTX_IDX0 (MTK_PDMA_OFFSET + 0x1C)
-#define MTK_TX_BASE_PTR1 (MTK_PDMA_OFFSET + 0x20)
-#define MTK_TX_MAX_CNT1 (MTK_PDMA_OFFSET + 0x24)
-#define MTK_TX_CTX_IDX1 (MTK_PDMA_OFFSET + 0x28)
-#define MTK_TX_DTX_IDX1 (MTK_PDMA_OFFSET + 0x2C)
-#define MTK_RX_BASE_PTR0 (MTK_PDMA_OFFSET + 0x30)
-#define MTK_RX_MAX_CNT0 (MTK_PDMA_OFFSET + 0x34)
-#define MTK_RX_CALC_IDX0 (MTK_PDMA_OFFSET + 0x38)
-#define MTK_RX_DRX_IDX0 (MTK_PDMA_OFFSET + 0x3C)
-#define MTK_TX_BASE_PTR2 (MTK_PDMA_OFFSET + 0x40)
-#define MTK_TX_MAX_CNT2 (MTK_PDMA_OFFSET + 0x44)
-#define MTK_TX_CTX_IDX2 (MTK_PDMA_OFFSET + 0x48)
-#define MTK_TX_DTX_IDX2 (MTK_PDMA_OFFSET + 0x4C)
-#define MTK_TX_BASE_PTR3 (MTK_PDMA_OFFSET + 0x50)
-#define MTK_TX_MAX_CNT3 (MTK_PDMA_OFFSET + 0x54)
-#define MTK_TX_CTX_IDX3 (MTK_PDMA_OFFSET + 0x58)
-#define MTK_TX_DTX_IDX3 (MTK_PDMA_OFFSET + 0x5C)
-#define MTK_RX_BASE_PTR1 (MTK_PDMA_OFFSET + 0x60)
-#define MTK_RX_MAX_CNT1 (MTK_PDMA_OFFSET + 0x64)
-#define MTK_RX_CALC_IDX1 (MTK_PDMA_OFFSET + 0x68)
-#define MTK_RX_DRX_IDX1 (MTK_PDMA_OFFSET + 0x6C)
-
-/* Switch DMA configuration */
-#define RT5350_SDM_CFG (RT5350_SDM_OFFSET + 0x00)
-#define RT5350_SDM_RRING (RT5350_SDM_OFFSET + 0x04)
-#define RT5350_SDM_TRING (RT5350_SDM_OFFSET + 0x08)
-#define RT5350_SDM_MAC_ADRL (RT5350_SDM_OFFSET + 0x0C)
-#define RT5350_SDM_MAC_ADRH (RT5350_SDM_OFFSET + 0x10)
-#define RT5350_SDM_TPCNT (RT5350_SDM_OFFSET + 0x100)
-#define RT5350_SDM_TBCNT (RT5350_SDM_OFFSET + 0x104)
-#define RT5350_SDM_RPCNT (RT5350_SDM_OFFSET + 0x108)
-#define RT5350_SDM_RBCNT (RT5350_SDM_OFFSET + 0x10C)
-#define RT5350_SDM_CS_ERR (RT5350_SDM_OFFSET + 0x110)
-
-#define RT5350_SDM_ICS_EN BIT(16)
-#define RT5350_SDM_TCS_EN BIT(17)
-#define RT5350_SDM_UCS_EN BIT(18)
-
-/* QDMA registers */
-#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
-#define MTK_QTX_SCH(x) (0x1804 + (x * 0x10))
-#define MTK_QRX_BASE_PTR0 0x1900
-#define MTK_QRX_MAX_CNT0 0x1904
-#define MTK_QRX_CRX_IDX0 0x1908
-#define MTK_QRX_DRX_IDX0 0x190C
-#define MTK_QDMA_GLO_CFG 0x1A04
-#define MTK_QDMA_RST_IDX 0x1A08
-#define MTK_QDMA_DELAY_INT 0x1A0C
-#define MTK_QDMA_FC_THRES 0x1A10
-#define MTK_QMTK_INT_STATUS 0x1A18
-#define MTK_QMTK_INT_ENABLE 0x1A1C
-#define MTK_QDMA_HRED2 0x1A44
-
-#define MTK_QTX_CTX_PTR 0x1B00
-#define MTK_QTX_DTX_PTR 0x1B04
-
-#define MTK_QTX_CRX_PTR 0x1B10
-#define MTK_QTX_DRX_PTR 0x1B14
-
-#define MTK_QDMA_FQ_HEAD 0x1B20
-#define MTK_QDMA_FQ_TAIL 0x1B24
-#define MTK_QDMA_FQ_CNT 0x1B28
-#define MTK_QDMA_FQ_BLEN 0x1B2C
-
-#define QDMA_PAGE_SIZE 2048
-#define QDMA_TX_OWNER_CPU BIT(31)
-#define QDMA_TX_SWC BIT(14)
-#define TX_QDMA_SDL(_x) (((_x) & 0x3fff) << 16)
-#define QDMA_RES_THRES 4
-
-/* MDIO_CFG register bits */
-#define MTK_MDIO_CFG_AUTO_POLL_EN BIT(29)
-#define MTK_MDIO_CFG_GP1_BP_EN BIT(16)
-#define MTK_MDIO_CFG_GP1_FRC_EN BIT(15)
-#define MTK_MDIO_CFG_GP1_SPEED_10 (0 << 13)
-#define MTK_MDIO_CFG_GP1_SPEED_100 (1 << 13)
-#define MTK_MDIO_CFG_GP1_SPEED_1000 (2 << 13)
-#define MTK_MDIO_CFG_GP1_DUPLEX BIT(12)
-#define MTK_MDIO_CFG_GP1_FC_TX BIT(11)
-#define MTK_MDIO_CFG_GP1_FC_RX BIT(10)
-#define MTK_MDIO_CFG_GP1_LNK_DWN BIT(9)
-#define MTK_MDIO_CFG_GP1_AN_FAIL BIT(8)
-#define MTK_MDIO_CFG_MDC_CLK_DIV_1 (0 << 6)
-#define MTK_MDIO_CFG_MDC_CLK_DIV_2 (1 << 6)
-#define MTK_MDIO_CFG_MDC_CLK_DIV_4 (2 << 6)
-#define MTK_MDIO_CFG_MDC_CLK_DIV_8 (3 << 6)
-#define MTK_MDIO_CFG_TURBO_MII_FREQ BIT(5)
-#define MTK_MDIO_CFG_TURBO_MII_MODE BIT(4)
-#define MTK_MDIO_CFG_RX_CLK_SKEW_0 (0 << 2)
-#define MTK_MDIO_CFG_RX_CLK_SKEW_200 (1 << 2)
-#define MTK_MDIO_CFG_RX_CLK_SKEW_400 (2 << 2)
-#define MTK_MDIO_CFG_RX_CLK_SKEW_INV (3 << 2)
-#define MTK_MDIO_CFG_TX_CLK_SKEW_0 0
-#define MTK_MDIO_CFG_TX_CLK_SKEW_200 1
-#define MTK_MDIO_CFG_TX_CLK_SKEW_400 2
-#define MTK_MDIO_CFG_TX_CLK_SKEW_INV 3
-
-/* uni-cast port */
-#define MTK_GDM1_JMB_LEN_MASK 0xf
-#define MTK_GDM1_JMB_LEN_SHIFT 28
-#define MTK_GDM1_ICS_EN BIT(22)
-#define MTK_GDM1_TCS_EN BIT(21)
-#define MTK_GDM1_UCS_EN BIT(20)
-#define MTK_GDM1_JMB_EN BIT(19)
-#define MTK_GDM1_STRPCRC BIT(16)
-#define MTK_GDM1_UFRC_P_CPU (0 << 12)
-#define MTK_GDM1_UFRC_P_GDMA1 (1 << 12)
-#define MTK_GDM1_UFRC_P_PPE (6 << 12)
-
-/* checksums */
-#define MTK_ICS_GEN_EN BIT(2)
-#define MTK_UCS_GEN_EN BIT(1)
-#define MTK_TCS_GEN_EN BIT(0)
-
-/* dma mode */
-#define MTK_PDMA BIT(0)
-#define MTK_QDMA BIT(1)
-#define MTK_PDMA_RX_QDMA_TX (MTK_PDMA | MTK_QDMA)
-
-/* dma ring */
-#define MTK_PST_DRX_IDX0 BIT(16)
-#define MTK_PST_DTX_IDX3 BIT(3)
-#define MTK_PST_DTX_IDX2 BIT(2)
-#define MTK_PST_DTX_IDX1 BIT(1)
-#define MTK_PST_DTX_IDX0 BIT(0)
-
-#define MTK_RX_2B_OFFSET BIT(31)
-#define MTK_TX_WB_DDONE BIT(6)
-#define MTK_RX_DMA_BUSY BIT(3)
-#define MTK_TX_DMA_BUSY BIT(1)
-#define MTK_RX_DMA_EN BIT(2)
-#define MTK_TX_DMA_EN BIT(0)
-
-#define MTK_PDMA_SIZE_4DWORDS (0 << 4)
-#define MTK_PDMA_SIZE_8DWORDS (1 << 4)
-#define MTK_PDMA_SIZE_16DWORDS (2 << 4)
-
-#define MTK_US_CYC_CNT_MASK 0xff
-#define MTK_US_CYC_CNT_SHIFT 0x8
-#define MTK_US_CYC_CNT_DIVISOR 1000000
-
-/* PDMA descriptor rxd2 */
-#define RX_DMA_DONE BIT(31)
-#define RX_DMA_LSO BIT(30)
-#define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16)
-#define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff)
-#define RX_DMA_TAG BIT(15)
-
-/* PDMA descriptor rxd3 */
-#define RX_DMA_TPID(_x) (((_x) >> 16) & 0xffff)
-#define RX_DMA_VID(_x) ((_x) & 0xfff)
-
-/* PDMA descriptor rxd4 */
-#define RX_DMA_L4VALID BIT(30)
-#define RX_DMA_FPORT_SHIFT 19
-#define RX_DMA_FPORT_MASK 0x7
-
-struct mtk_rx_dma {
- unsigned int rxd1;
- unsigned int rxd2;
- unsigned int rxd3;
- unsigned int rxd4;
-} __packed __aligned(4);
-
-/* PDMA tx descriptor bits */
-#define TX_DMA_BUF_LEN 0x3fff
-#define TX_DMA_PLEN0_MASK (TX_DMA_BUF_LEN << 16)
-#define TX_DMA_PLEN0(_x) (((_x) & TX_DMA_BUF_LEN) << 16)
-#define TX_DMA_PLEN1(_x) ((_x) & TX_DMA_BUF_LEN)
-#define TX_DMA_GET_PLEN0(_x) (((_x) >> 16) & TX_DMA_BUF_LEN)
-#define TX_DMA_GET_PLEN1(_x) ((_x) & TX_DMA_BUF_LEN)
-#define TX_DMA_LS1 BIT(14)
-#define TX_DMA_LS0 BIT(30)
-#define TX_DMA_DONE BIT(31)
-#define TX_DMA_FPORT_SHIFT 25
-#define TX_DMA_FPORT_MASK 0x7
-#define TX_DMA_INS_VLAN_MT7621 BIT(16)
-#define TX_DMA_INS_VLAN BIT(7)
-#define TX_DMA_INS_PPPOE BIT(12)
-#define TX_DMA_TAG BIT(15)
-#define TX_DMA_TAG_MASK BIT(15)
-#define TX_DMA_QN(_x) ((_x) << 16)
-#define TX_DMA_PN(_x) ((_x) << 24)
-#define TX_DMA_QN_MASK TX_DMA_QN(0x7)
-#define TX_DMA_PN_MASK TX_DMA_PN(0x7)
-#define TX_DMA_UDF BIT(20)
-#define TX_DMA_CHKSUM (0x7 << 29)
-#define TX_DMA_TSO BIT(28)
-#define TX_DMA_DESP4_DEF (TX_DMA_QN(3) | TX_DMA_PN(1))
-
-/* frame engine counters */
-#define MTK_PPE_AC_BCNT0 (MTK_CMTABLE_OFFSET + 0x00)
-#define MTK_GDMA1_TX_GBCNT (MTK_CMTABLE_OFFSET + 0x300)
-#define MTK_GDMA2_TX_GBCNT (MTK_GDMA1_TX_GBCNT + 0x40)
-
-/* phy device flags */
-#define MTK_PHY_FLAG_PORT BIT(0)
-#define MTK_PHY_FLAG_ATTACH BIT(1)
-
-struct mtk_tx_dma {
- unsigned int txd1;
- unsigned int txd2;
- unsigned int txd3;
- unsigned int txd4;
-} __packed __aligned(4);
-
-struct mtk_eth;
-struct mtk_mac;
-
-/* manage the attached phys */
-struct mtk_phy {
- spinlock_t lock;
-
- struct phy_device *phy[8];
- struct device_node *phy_node[8];
- const __be32 *phy_fixed[8];
- int duplex[8];
- int speed[8];
- int tx_fc[8];
- int rx_fc[8];
- int (*connect)(struct mtk_mac *mac);
- void (*disconnect)(struct mtk_mac *mac);
- void (*start)(struct mtk_mac *mac);
- void (*stop)(struct mtk_mac *mac);
-};
-
-/* struct mtk_soc_data - the structure that holds the SoC specific data
- * @reg_table: Some of the legacy registers changed their location
- * over time. Their offsets are stored in this table
- *
- * @init_data: Some features depend on the silicon revision. This
- * callback allows runtime modification of the content of
- * this struct
- * @reset_fe: This callback is used to trigger the reset of the frame
- * engine
- * @set_mac: This callback is used to set the unicast mac address
- * filter
- * @fwd_config: This callback is used to setup the forward config
- * register of the MAC
- * @switch_init: This callback is used to bring up the switch core
- * @port_init: Some SoCs have ports that can be router to a switch port
- * or an external PHY. This callback is used to setup these
- * ports.
- * @has_carrier: This callback allows driver to check if there is a cable
- * attached.
- * @mdio_init: This callbck is used to setup the MDIO bus if one is
- * present
- * @mdio_cleanup: This callback is used to cleanup the MDIO state.
- * @mdio_write: This callback is used to write data to the MDIO bus.
- * @mdio_read: This callback is used to write data to the MDIO bus.
- * @mdio_adjust_link: This callback is used to apply the PHY settings.
- * @piac_offset: the PIAC register has a different different base offset
- * @hw_features: feature set depends on the SoC type
- * @dma_ring_size: allow GBit SoCs to set bigger rings than FE SoCs
- * @napi_weight: allow GBit SoCs to set bigger napi weight than FE SoCs
- * @dma_type: SoCs is PDMA, QDMA or a mix of the 2
- * @pdma_glo_cfg: the default DMA configuration
- * @rx_int: the TX interrupt bits used by the SoC
- * @tx_int: the TX interrupt bits used by the SoC
- * @status_int: the Status interrupt bits used by the SoC
- * @checksum_bit: the bits used to turn on HW checksumming
- * @txd4: default value of the TXD4 descriptor
- * @mac_count: the number of MACs that the SoC has
- * @new_stats: there is a old and new way to read hardware stats
- * registers
- * @jumbo_frame: does the SoC support jumbo frames ?
- * @rx_2b_offset: tell the rx dma to offset the data by 2 bytes
- * @rx_sg_dma: scatter gather support
- * @padding_64b enable 64 bit padding
- * @padding_bug: rt2880 has a padding bug
- * @has_switch: does the SoC have a built-in switch
- *
- * Although all of the supported SoCs share the same basic functionality, there
- * are several SoC specific functions and features that we need to support. This
- * struct holds the SoC specific data so that the common core can figure out
- * how to setup and use these differences.
- */
-struct mtk_soc_data {
- const u16 *reg_table;
-
- void (*init_data)(struct mtk_soc_data *data, struct net_device *netdev);
- void (*reset_fe)(struct mtk_eth *eth);
- void (*set_mac)(struct mtk_mac *mac, unsigned char *macaddr);
- int (*fwd_config)(struct mtk_eth *eth);
- int (*switch_init)(struct mtk_eth *eth);
- void (*port_init)(struct mtk_eth *eth, struct mtk_mac *mac,
- struct device_node *port);
- int (*has_carrier)(struct mtk_eth *eth);
- int (*mdio_init)(struct mtk_eth *eth);
- void (*mdio_cleanup)(struct mtk_eth *eth);
- int (*mdio_write)(struct mii_bus *bus, int phy_addr, int phy_reg,
- u16 val);
- int (*mdio_read)(struct mii_bus *bus, int phy_addr, int phy_reg);
- void (*mdio_adjust_link)(struct mtk_eth *eth, int port);
- u32 piac_offset;
- netdev_features_t hw_features;
- u32 dma_ring_size;
- u32 napi_weight;
- u32 dma_type;
- u32 pdma_glo_cfg;
- u32 rx_int;
- u32 tx_int;
- u32 status_int;
- u32 checksum_bit;
- u32 txd4;
- u32 mac_count;
-
- u32 new_stats:1;
- u32 jumbo_frame:1;
- u32 rx_2b_offset:1;
- u32 rx_sg_dma:1;
- u32 padding_64b:1;
- u32 padding_bug:1;
- u32 has_switch:1;
-};
-
-#define MTK_STAT_OFFSET 0x40
-
-/* struct mtk_hw_stats - the structure that holds the traffic statistics.
- * @stats_lock: make sure that stats operations are atomic
- * @reg_offset: the status register offset of the SoC
- * @syncp: the refcount
- *
- * All of the supported SoCs have hardware counters for traffic statstics.
- * Whenever the status IRQ triggers we can read the latest stats from these
- * counters and store them in this struct.
- */
-struct mtk_hw_stats {
- spinlock_t stats_lock;
- u32 reg_offset;
- struct u64_stats_sync syncp;
-
- u64 tx_bytes;
- u64 tx_packets;
- u64 tx_skip;
- u64 tx_collisions;
- u64 rx_bytes;
- u64 rx_packets;
- u64 rx_overflow;
- u64 rx_fcs_errors;
- u64 rx_short_errors;
- u64 rx_long_errors;
- u64 rx_checksum_errors;
- u64 rx_flow_control_packets;
-};
-
-/* PDMA descriptor can point at 1-2 segments. This enum allows us to track how
- * memory was allocated so that it can be freed properly
- */
-enum mtk_tx_flags {
- MTK_TX_FLAGS_SINGLE0 = 0x01,
- MTK_TX_FLAGS_PAGE0 = 0x02,
- MTK_TX_FLAGS_PAGE1 = 0x04,
-};
-
-/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
- * by the TX descriptor s
- * @skb: The SKB pointer of the packet being sent
- * @dma_addr0: The base addr of the first segment
- * @dma_len0: The length of the first segment
- * @dma_addr1: The base addr of the second segment
- * @dma_len1: The length of the second segment
- */
-struct mtk_tx_buf {
- struct sk_buff *skb;
- u32 flags;
- DEFINE_DMA_UNMAP_ADDR(dma_addr0);
- DEFINE_DMA_UNMAP_LEN(dma_len0);
- DEFINE_DMA_UNMAP_ADDR(dma_addr1);
- DEFINE_DMA_UNMAP_LEN(dma_len1);
-};
-
-/* struct mtk_tx_ring - This struct holds info describing a TX ring
- * @tx_dma: The descriptor ring
- * @tx_buf: The memory pointed at by the ring
- * @tx_phys: The physical addr of tx_buf
- * @tx_next_free: Pointer to the next free descriptor
- * @tx_last_free: Pointer to the last free descriptor
- * @tx_thresh: The threshold of minimum amount of free descriptors
- * @tx_map: Callback to map a new packet into the ring
- * @tx_poll: Callback for the housekeeping function
- * @tx_clean: Callback for the cleanup function
- * @tx_ring_size: How many descriptors are in the ring
- * @tx_free_idx: The index of th next free descriptor
- * @tx_next_idx: QDMA uses a linked list. This element points to the next
- * free descriptor in the list
- * @tx_free_count: QDMA uses a linked list. Track how many free descriptors
- * are present
- */
-struct mtk_tx_ring {
- struct mtk_tx_dma *tx_dma;
- struct mtk_tx_buf *tx_buf;
- dma_addr_t tx_phys;
- struct mtk_tx_dma *tx_next_free;
- struct mtk_tx_dma *tx_last_free;
- u16 tx_thresh;
- int (*tx_map)(struct sk_buff *skb, struct net_device *dev, int tx_num,
- struct mtk_tx_ring *ring, bool gso);
- int (*tx_poll)(struct mtk_eth *eth, int budget, bool *tx_again);
- void (*tx_clean)(struct mtk_eth *eth);
-
- /* PDMA only */
- u16 tx_ring_size;
- u16 tx_free_idx;
-
- /* QDMA only */
- u16 tx_next_idx;
- atomic_t tx_free_count;
-};
-
-/* struct mtk_rx_ring - This struct holds info describing a RX ring
- * @rx_dma: The descriptor ring
- * @rx_data: The memory pointed at by the ring
- * @trx_phys: The physical addr of rx_buf
- * @rx_ring_size: How many descriptors are in the ring
- * @rx_buf_size: The size of each packet buffer
- * @rx_calc_idx: The current head of ring
- */
-struct mtk_rx_ring {
- struct mtk_rx_dma *rx_dma;
- u8 **rx_data;
- dma_addr_t rx_phys;
- u16 rx_ring_size;
- u16 frag_size;
- u16 rx_buf_size;
- u16 rx_calc_idx;
-};
-
-/* currently no SoC has more than 2 macs */
-#define MTK_MAX_DEVS 2
-
-/* struct mtk_eth - This is the main datasructure for holding the state
- * of the driver
- * @dev: The device pointer
- * @base: The mapped register i/o base
- * @page_lock: Make sure that register operations are atomic
- * @soc: pointer to our SoC specific data
- * @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a
- * dummy for NAPI to work
- * @netdev: The netdev instances
- * @mac: Each netdev is linked to a physical MAC
- * @switch_np: The phandle for the switch
- * @irq: The IRQ that we are using
- * @msg_enable: Ethtool msg level
- * @ysclk: The sysclk rate - neeed for calibration
- * @ethsys: The register map pointing at the range used to setup
- * MII modes
- * @dma_refcnt: track how many netdevs are using the DMA engine
- * @tx_ring: Pointer to the memore holding info about the TX ring
- * @rx_ring: Pointer to the memore holding info about the RX ring
- * @rx_napi: The NAPI struct
- * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
- * @scratch_head: The scratch memory that scratch_ring points to.
- * @phy: Info about the attached PHYs
- * @mii_bus: If there is a bus we need to create an instance for it
- * @link: Track if the ports have a physical link
- * @sw_priv: Pointer to the switches private data
- * @vlan_map: RX VID tracking
- */
-
-struct mtk_eth {
- struct device *dev;
- void __iomem *base;
- spinlock_t page_lock;
- struct mtk_soc_data *soc;
- struct net_device dummy_dev;
- struct net_device *netdev[MTK_MAX_DEVS];
- struct mtk_mac *mac[MTK_MAX_DEVS];
- struct device_node *switch_np;
- int irq;
- u32 msg_enable;
- unsigned long sysclk;
- struct regmap *ethsys;
- atomic_t dma_refcnt;
- struct mtk_tx_ring tx_ring;
- struct mtk_rx_ring rx_ring[2];
- struct napi_struct rx_napi;
- struct mtk_tx_dma *scratch_ring;
- void *scratch_head;
- struct mtk_phy *phy;
- struct mii_bus *mii_bus;
- int link[8];
- void *sw_priv;
- unsigned long vlan_map;
-};
-
-/* struct mtk_mac - the structure that holds the info about the MACs of the
- * SoC
- * @id: The number of the MAC
- * @of_node: Our devicetree node
- * @hw: Backpointer to our main datastruture
- * @hw_stats: Packet statistics counter
- * @phy_dev: The attached PHY if available
- * @phy_flags: The PHYs flags
- * @pending_work: The workqueue used to reset the dma ring
- */
-struct mtk_mac {
- int id;
- struct device_node *of_node;
- struct mtk_eth *hw;
- struct mtk_hw_stats *hw_stats;
- struct phy_device *phy_dev;
- u32 phy_flags;
- struct work_struct pending_work;
-};
-
-/* the struct describing the SoC. these are declared in the soc_xyz.c files */
-extern const struct of_device_id of_mtk_match[];
-
-/* read the hardware status register */
-void mtk_stats_update_mac(struct mtk_mac *mac);
-
-/* default checksum setup handler */
-void mtk_reset(struct mtk_eth *eth, u32 reset_bits);
-
-/* register i/o wrappers */
-void mtk_w32(struct mtk_eth *eth, u32 val, unsigned int reg);
-u32 mtk_r32(struct mtk_eth *eth, unsigned int reg);
-
-/* default clock calibration handler */
-int mtk_set_clock_cycle(struct mtk_eth *eth);
-
-/* default checksum setup handler */
-void mtk_csum_config(struct mtk_eth *eth);
-
-/* default forward config handler */
-void mtk_fwd_config(struct mtk_eth *eth);
-
-#endif /* MTK_ETH_H */
diff --git a/drivers/staging/mt7621-eth/soc_mt7621.c b/drivers/staging/mt7621-eth/soc_mt7621.c
deleted file mode 100644
index 5d63b5d96f6b..000000000000
--- a/drivers/staging/mt7621-eth/soc_mt7621.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/if_vlan.h>
-#include <linux/of_net.h>
-
-#include <asm/mach-ralink/ralink_regs.h>
-
-#include "mtk_eth_soc.h"
-#include "gsw_mt7620.h"
-#include "mdio.h"
-
-#define MT7620_CDMA_CSG_CFG 0x400
-#define MT7621_CDMP_IG_CTRL (MT7620_CDMA_CSG_CFG + 0x00)
-#define MT7621_CDMP_EG_CTRL (MT7620_CDMA_CSG_CFG + 0x04)
-#define MT7621_RESET_FE BIT(6)
-#define MT7621_L4_VALID BIT(24)
-
-#define MT7621_TX_DMA_UDF BIT(19)
-
-#define CDMA_ICS_EN BIT(2)
-#define CDMA_UCS_EN BIT(1)
-#define CDMA_TCS_EN BIT(0)
-
-#define GDMA_ICS_EN BIT(22)
-#define GDMA_TCS_EN BIT(21)
-#define GDMA_UCS_EN BIT(20)
-
-/* frame engine counters */
-#define MT7621_REG_MIB_OFFSET 0x2000
-#define MT7621_PPE_AC_BCNT0 (MT7621_REG_MIB_OFFSET + 0x00)
-#define MT7621_GDM1_TX_GBCNT (MT7621_REG_MIB_OFFSET + 0x400)
-#define MT7621_GDM2_TX_GBCNT (MT7621_GDM1_TX_GBCNT + 0x40)
-
-#define GSW_REG_GDMA1_MAC_ADRL 0x508
-#define GSW_REG_GDMA1_MAC_ADRH 0x50C
-#define GSW_REG_GDMA2_MAC_ADRL 0x1508
-#define GSW_REG_GDMA2_MAC_ADRH 0x150C
-
-#define MT7621_MTK_RST_GL 0x04
-#define MT7620_MTK_INT_STATUS2 0x08
-
-/* MTK_INT_STATUS reg on mt7620 define CNT_GDM1_AF at BIT(29)
- * but after test it should be BIT(13).
- */
-#define MT7621_MTK_GDM1_AF BIT(28)
-#define MT7621_MTK_GDM2_AF BIT(29)
-
-static const u16 mt7621_reg_table[MTK_REG_COUNT] = {
- [MTK_REG_PDMA_GLO_CFG] = RT5350_PDMA_GLO_CFG,
- [MTK_REG_PDMA_RST_CFG] = RT5350_PDMA_RST_CFG,
- [MTK_REG_DLY_INT_CFG] = RT5350_DLY_INT_CFG,
- [MTK_REG_TX_BASE_PTR0] = RT5350_TX_BASE_PTR0,
- [MTK_REG_TX_MAX_CNT0] = RT5350_TX_MAX_CNT0,
- [MTK_REG_TX_CTX_IDX0] = RT5350_TX_CTX_IDX0,
- [MTK_REG_TX_DTX_IDX0] = RT5350_TX_DTX_IDX0,
- [MTK_REG_RX_BASE_PTR0] = RT5350_RX_BASE_PTR0,
- [MTK_REG_RX_MAX_CNT0] = RT5350_RX_MAX_CNT0,
- [MTK_REG_RX_CALC_IDX0] = RT5350_RX_CALC_IDX0,
- [MTK_REG_RX_DRX_IDX0] = RT5350_RX_DRX_IDX0,
- [MTK_REG_MTK_INT_ENABLE] = RT5350_MTK_INT_ENABLE,
- [MTK_REG_MTK_INT_STATUS] = RT5350_MTK_INT_STATUS,
- [MTK_REG_MTK_DMA_VID_BASE] = 0,
- [MTK_REG_MTK_COUNTER_BASE] = MT7621_GDM1_TX_GBCNT,
- [MTK_REG_MTK_RST_GL] = MT7621_MTK_RST_GL,
- [MTK_REG_MTK_INT_STATUS2] = MT7620_MTK_INT_STATUS2,
-};
-
-static void mt7621_mtk_reset(struct mtk_eth *eth)
-{
- mtk_reset(eth, MT7621_RESET_FE);
-}
-
-static int mt7621_fwd_config(struct mtk_eth *eth)
-{
- /* Setup GMAC1 only, there is no support for GMAC2 yet */
- mtk_w32(eth, mtk_r32(eth, MT7620_GDMA1_FWD_CFG) & ~0xffff,
- MT7620_GDMA1_FWD_CFG);
-
- /* Enable RX checksum */
- mtk_w32(eth, mtk_r32(eth, MT7620_GDMA1_FWD_CFG) | (GDMA_ICS_EN |
- GDMA_TCS_EN | GDMA_UCS_EN),
- MT7620_GDMA1_FWD_CFG);
-
- /* Enable RX VLan Offloading */
- mtk_w32(eth, 0, MT7621_CDMP_EG_CTRL);
-
- return 0;
-}
-
-static void mt7621_set_mac(struct mtk_mac *mac, unsigned char *hwaddr)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&mac->hw->page_lock, flags);
- if (mac->id == 0) {
- mtk_w32(mac->hw, (hwaddr[0] << 8) | hwaddr[1],
- GSW_REG_GDMA1_MAC_ADRH);
- mtk_w32(mac->hw, (hwaddr[2] << 24) | (hwaddr[3] << 16) |
- (hwaddr[4] << 8) | hwaddr[5],
- GSW_REG_GDMA1_MAC_ADRL);
- }
- if (mac->id == 1) {
- mtk_w32(mac->hw, (hwaddr[0] << 8) | hwaddr[1],
- GSW_REG_GDMA2_MAC_ADRH);
- mtk_w32(mac->hw, (hwaddr[2] << 24) | (hwaddr[3] << 16) |
- (hwaddr[4] << 8) | hwaddr[5],
- GSW_REG_GDMA2_MAC_ADRL);
- }
- spin_unlock_irqrestore(&mac->hw->page_lock, flags);
-}
-
-static struct mtk_soc_data mt7621_data = {
- .hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
- NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
- NETIF_F_IPV6_CSUM,
- .dma_type = MTK_PDMA,
- .dma_ring_size = 256,
- .napi_weight = 64,
- .new_stats = 1,
- .padding_64b = 1,
- .rx_2b_offset = 1,
- .rx_sg_dma = 1,
- .has_switch = 1,
- .mac_count = 2,
- .reset_fe = mt7621_mtk_reset,
- .set_mac = mt7621_set_mac,
- .fwd_config = mt7621_fwd_config,
- .switch_init = mtk_gsw_init,
- .reg_table = mt7621_reg_table,
- .pdma_glo_cfg = MTK_PDMA_SIZE_16DWORDS,
- .rx_int = RT5350_RX_DONE_INT,
- .tx_int = RT5350_TX_DONE_INT,
- .status_int = MT7621_MTK_GDM1_AF | MT7621_MTK_GDM2_AF,
- .checksum_bit = MT7621_L4_VALID,
- .has_carrier = mt7620_has_carrier,
- .mdio_read = mt7620_mdio_read,
- .mdio_write = mt7620_mdio_write,
- .mdio_adjust_link = mt7620_mdio_link_adjust,
-};
-
-const struct of_device_id of_mtk_match[] = {
- { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, of_mtk_match);
diff --git a/drivers/staging/mt7621-pci/Kconfig b/drivers/staging/mt7621-pci/Kconfig
index d33533872a16..c8fa17cfa807 100644
--- a/drivers/staging/mt7621-pci/Kconfig
+++ b/drivers/staging/mt7621-pci/Kconfig
@@ -1,6 +1,7 @@
config PCI_MT7621
tristate "MediaTek MT7621 PCI Controller"
depends on RALINK
+ depends on PCI
select PCI_DRIVERS_GENERIC
help
This selects a driver for the MediaTek MT7621 PCI Controller.
diff --git a/drivers/staging/mt7621-spi/Kconfig b/drivers/staging/mt7621-spi/Kconfig
deleted file mode 100644
index 0b90f4cfa426..000000000000
--- a/drivers/staging/mt7621-spi/Kconfig
+++ /dev/null
@@ -1,6 +0,0 @@
-config SPI_MT7621
- tristate "MediaTek MT7621 SPI Controller"
- depends on RALINK
- help
- This selects a driver for the MediaTek MT7621 SPI Controller.
-
diff --git a/drivers/staging/mt7621-spi/Makefile b/drivers/staging/mt7621-spi/Makefile
deleted file mode 100644
index 3be508f63bac..000000000000
--- a/drivers/staging/mt7621-spi/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_SPI_MT7621) += spi-mt7621.o
diff --git a/drivers/staging/mt7621-spi/TODO b/drivers/staging/mt7621-spi/TODO
deleted file mode 100644
index fdbc5002c32a..000000000000
--- a/drivers/staging/mt7621-spi/TODO
+++ /dev/null
@@ -1,5 +0,0 @@
-
-- general code review and clean up
-- ensure device-tree requirements are documented
-
-Cc: NeilBrown <neil@brown.name>
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index d6248eecf123..2aee64fdaec5 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -163,7 +163,7 @@ int cvm_oct_phy_setup_device(struct net_device *dev)
goto no_phy;
phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0,
- PHY_INTERFACE_MODE_GMII);
+ priv->phy_mode);
of_node_put(phy_node);
if (!phydev)
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index ce61c5670ef6..986db76705cc 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -653,14 +653,37 @@ static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
return np;
}
-static void cvm_set_rgmii_delay(struct device_node *np, int iface, int port)
+static void cvm_set_rgmii_delay(struct octeon_ethernet *priv, int iface,
+ int port)
{
+ struct device_node *np = priv->of_node;
u32 delay_value;
+ bool rx_delay;
+ bool tx_delay;
- if (!of_property_read_u32(np, "rx-delay", &delay_value))
+ /* By default, both RX/TX delay is enabled in
+ * __cvmx_helper_rgmii_enable().
+ */
+ rx_delay = true;
+ tx_delay = true;
+
+ if (!of_property_read_u32(np, "rx-delay", &delay_value)) {
cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value);
- if (!of_property_read_u32(np, "tx-delay", &delay_value))
+ rx_delay = delay_value > 0;
+ }
+ if (!of_property_read_u32(np, "tx-delay", &delay_value)) {
cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value);
+ tx_delay = delay_value > 0;
+ }
+
+ if (!rx_delay && !tx_delay)
+ priv->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
+ else if (!rx_delay)
+ priv->phy_mode = PHY_INTERFACE_MODE_RGMII_RXID;
+ else if (!tx_delay)
+ priv->phy_mode = PHY_INTERFACE_MODE_RGMII_TXID;
+ else
+ priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
}
static int cvm_oct_probe(struct platform_device *pdev)
@@ -825,6 +848,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
priv->port = port;
priv->queue = cvmx_pko_get_base_queue(priv->port);
priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
+ priv->phy_mode = PHY_INTERFACE_MODE_NA;
for (qos = 0; qos < 16; qos++)
skb_queue_head_init(&priv->tx_free_list[qos]);
for (qos = 0; qos < cvmx_pko_get_num_queues(port);
@@ -856,6 +880,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
break;
case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ priv->phy_mode = PHY_INTERFACE_MODE_SGMII;
dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
strcpy(dev->name, "eth%d");
break;
@@ -865,11 +890,16 @@ static int cvm_oct_probe(struct platform_device *pdev)
strcpy(dev->name, "spi%d");
break;
- case CVMX_HELPER_INTERFACE_MODE_RGMII:
case CVMX_HELPER_INTERFACE_MODE_GMII:
+ priv->phy_mode = PHY_INTERFACE_MODE_GMII;
+ dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
+ strcpy(dev->name, "eth%d");
+ break;
+
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
strcpy(dev->name, "eth%d");
- cvm_set_rgmii_delay(priv->of_node, interface,
+ cvm_set_rgmii_delay(priv, interface,
port_index);
break;
}
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index 4a07e7f43d12..be570d33685a 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -12,7 +12,7 @@
#define OCTEON_ETHERNET_H
#include <linux/of.h>
-
+#include <linux/phy.h>
#include <asm/octeon/cvmx-helper-board.h>
/**
@@ -33,6 +33,8 @@ struct octeon_ethernet {
* cvmx_helper_interface_mode_t
*/
int imode;
+ /* PHY mode */
+ phy_interface_t phy_mode;
/* List of outstanding tx buffers per queue */
struct sk_buff_head tx_free_list[16];
unsigned int last_speed;
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
index 80b8d4153414..a54286498a47 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
@@ -45,7 +45,7 @@ static int dcon_init_xo_1(struct dcon_priv *dcon)
{
unsigned char lob;
int ret, i;
- struct dcon_gpio *pin = &gpios_asis[0];
+ const struct dcon_gpio *pin = &gpios_asis[0];
for (i = 0; i < ARRAY_SIZE(gpios_asis); i++) {
gpios[i] = devm_gpiod_get(&dcon->client->dev, pin[i].name,
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index 1723a47a96b4..952f2ab51347 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -174,7 +174,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf;
- rtw_alloc_hwxmits(padapter);
+ res = rtw_alloc_hwxmits(padapter);
+ if (res == _FAIL)
+ goto exit;
rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
for (i = 0; i < 4; i++)
@@ -1503,7 +1505,7 @@ exit:
return res;
}
-void rtw_alloc_hwxmits(struct adapter *padapter)
+s32 rtw_alloc_hwxmits(struct adapter *padapter)
{
struct hw_xmit *hwxmits;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -1512,6 +1514,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
pxmitpriv->hwxmits = kcalloc(pxmitpriv->hwxmit_entry,
sizeof(struct hw_xmit), GFP_KERNEL);
+ if (!pxmitpriv->hwxmits)
+ return _FAIL;
hwxmits = pxmitpriv->hwxmits;
@@ -1519,6 +1523,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
hwxmits[1] .sta_queue = &pxmitpriv->vi_pending;
hwxmits[2] .sta_queue = &pxmitpriv->be_pending;
hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
+ return _SUCCESS;
}
void rtw_free_hwxmits(struct adapter *padapter)
diff --git a/drivers/staging/rtl8188eu/include/rtw_xmit.h b/drivers/staging/rtl8188eu/include/rtw_xmit.h
index 788f59c74ea1..ba7e15fbde72 100644
--- a/drivers/staging/rtl8188eu/include/rtw_xmit.h
+++ b/drivers/staging/rtl8188eu/include/rtw_xmit.h
@@ -336,7 +336,7 @@ s32 rtw_txframes_sta_ac_pending(struct adapter *padapter,
void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry);
s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv);
-void rtw_alloc_hwxmits(struct adapter *padapter);
+s32 rtw_alloc_hwxmits(struct adapter *padapter);
void rtw_free_hwxmits(struct adapter *padapter);
s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt);
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
index 55da8c9dfe50..a084e1501f9d 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
@@ -507,7 +507,6 @@ static int michael_mic(struct crypto_shash *tfm_michael, u8 *key, u8 *hdr,
int err;
desc->tfm = tfm_michael;
- desc->flags = 0;
if (crypto_shash_setkey(tfm_michael, key, 8))
return -1;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
index 829fa4bd253c..d67bb57994c4 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
@@ -503,7 +503,6 @@ static int michael_mic(struct crypto_shash *tfm_michael, u8 *key, u8 *hdr,
int err;
desc->tfm = tfm_michael;
- desc->flags = 0;
if (crypto_shash_setkey(tfm_michael, key, 8))
return -1;
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index 1920d02f7c9f..8c36acedf507 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -147,17 +147,9 @@ static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
{
- u32 val;
- void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
- if (pcmd->rsp && pcmd->rspsz > 0)
- memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz);
- pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (!pcmd_callback)
- r8712_free_cmd_obj(pcmd);
- else
- pcmd_callback(padapter, pcmd);
+ r8712_free_cmd_obj(pcmd);
return H2C_SUCCESS;
}
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.h b/drivers/staging/rtl8712/rtl8712_cmd.h
index 92fb77666d44..1ef86b8c592f 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.h
+++ b/drivers/staging/rtl8712/rtl8712_cmd.h
@@ -140,7 +140,7 @@ enum rtl8712_h2c_cmd {
static struct _cmd_callback cmd_callback[] = {
{GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/
{GEN_CMD_CODE(_Write_MACREG), NULL},
- {GEN_CMD_CODE(_Read_BBREG), &r8712_getbbrfreg_cmdrsp_callback},
+ {GEN_CMD_CODE(_Read_BBREG), NULL},
{GEN_CMD_CODE(_Write_BBREG), NULL},
{GEN_CMD_CODE(_Read_RFREG), &r8712_getbbrfreg_cmdrsp_callback},
{GEN_CMD_CODE(_Write_RFREG), NULL}, /*5*/
diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c
index 094d61bcb469..b87f13a0b563 100644
--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c
@@ -260,7 +260,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
}
}
- rtw_alloc_hwxmits(padapter);
+ res = rtw_alloc_hwxmits(padapter);
+ if (res == _FAIL)
+ goto exit;
rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
for (i = 0; i < 4; i++) {
@@ -2144,7 +2146,7 @@ exit:
return res;
}
-void rtw_alloc_hwxmits(struct adapter *padapter)
+s32 rtw_alloc_hwxmits(struct adapter *padapter)
{
struct hw_xmit *hwxmits;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -2155,10 +2157,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
pxmitpriv->hwxmits = rtw_zmalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry);
- if (pxmitpriv->hwxmits == NULL) {
- DBG_871X("alloc hwxmits fail!...\n");
- return;
- }
+ if (!pxmitpriv->hwxmits)
+ return _FAIL;
hwxmits = pxmitpriv->hwxmits;
@@ -2204,7 +2204,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
}
-
+ return _SUCCESS;
}
void rtw_free_hwxmits(struct adapter *padapter)
diff --git a/drivers/staging/rtl8723bs/include/rtw_xmit.h b/drivers/staging/rtl8723bs/include/rtw_xmit.h
index 1b38b9182b31..37f42b2f22f1 100644
--- a/drivers/staging/rtl8723bs/include/rtw_xmit.h
+++ b/drivers/staging/rtl8723bs/include/rtw_xmit.h
@@ -487,7 +487,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv);
-void rtw_alloc_hwxmits(struct adapter *padapter);
+s32 rtw_alloc_hwxmits(struct adapter *padapter);
void rtw_free_hwxmits(struct adapter *padapter);
diff --git a/drivers/staging/rtlwifi/phydm/rtl_phydm.c b/drivers/staging/rtlwifi/phydm/rtl_phydm.c
index 9930ed954abb..4cc77b2016e1 100644
--- a/drivers/staging/rtlwifi/phydm/rtl_phydm.c
+++ b/drivers/staging/rtlwifi/phydm/rtl_phydm.c
@@ -180,6 +180,8 @@ static int rtl_phydm_init_priv(struct rtl_priv *rtlpriv,
rtlpriv->phydm.internal =
kzalloc(sizeof(struct phy_dm_struct), GFP_KERNEL);
+ if (!rtlpriv->phydm.internal)
+ return 0;
_rtl_phydm_init_com_info(rtlpriv, ic, params);
diff --git a/drivers/staging/rtlwifi/rtl8822be/fw.c b/drivers/staging/rtlwifi/rtl8822be/fw.c
index f061dd1382aa..cf6b7a80b753 100644
--- a/drivers/staging/rtlwifi/rtl8822be/fw.c
+++ b/drivers/staging/rtlwifi/rtl8822be/fw.c
@@ -743,6 +743,8 @@ void rtl8822be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
u1_rsvd_page_loc, 3);
skb = dev_alloc_skb(totalpacketlen);
+ if (!skb)
+ return;
memcpy((u8 *)skb_put(skb, totalpacketlen), &reserved_page_packet,
totalpacketlen);
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index edff6ce85655..9d85a3a1af4c 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -210,12 +210,15 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
return -EINVAL;
spin_lock_irqsave(&speakup_info.spinlock, flags);
+ synth_soft.alive = 1;
while (1) {
prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE);
- if (!unicode)
- synth_buffer_skip_nonlatin1();
- if (!synth_buffer_empty() || speakup_info.flushing)
- break;
+ if (synth_current() == &synth_soft) {
+ if (!unicode)
+ synth_buffer_skip_nonlatin1();
+ if (!synth_buffer_empty() || speakup_info.flushing)
+ break;
+ }
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (fp->f_flags & O_NONBLOCK) {
finish_wait(&speakup_event, &wait);
@@ -235,6 +238,8 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
/* Keep 3 bytes available for a 16bit UTF-8-encoded character */
while (chars_sent <= count - bytes_per_ch) {
+ if (synth_current() != &synth_soft)
+ break;
if (speakup_info.flushing) {
speakup_info.flushing = 0;
ch = '\x18';
@@ -331,7 +336,8 @@ static __poll_t softsynth_poll(struct file *fp, struct poll_table_struct *wait)
poll_wait(fp, &speakup_event, wait);
spin_lock_irqsave(&speakup_info.spinlock, flags);
- if (!synth_buffer_empty() || speakup_info.flushing)
+ if (synth_current() == &synth_soft &&
+ (!synth_buffer_empty() || speakup_info.flushing))
ret = EPOLLIN | EPOLLRDNORM;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return ret;
diff --git a/drivers/staging/speakup/spk_priv.h b/drivers/staging/speakup/spk_priv.h
index c8e688878fc7..ac6a74883af4 100644
--- a/drivers/staging/speakup/spk_priv.h
+++ b/drivers/staging/speakup/spk_priv.h
@@ -74,6 +74,7 @@ int synth_request_region(unsigned long start, unsigned long n);
int synth_release_region(unsigned long start, unsigned long n);
int synth_add(struct spk_synth *in_synth);
void synth_remove(struct spk_synth *in_synth);
+struct spk_synth *synth_current(void);
extern struct speakup_info_t speakup_info;
diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
index 25f259ee4ffc..3568bfb89912 100644
--- a/drivers/staging/speakup/synth.c
+++ b/drivers/staging/speakup/synth.c
@@ -481,4 +481,10 @@ void synth_remove(struct spk_synth *in_synth)
}
EXPORT_SYMBOL_GPL(synth_remove);
+struct spk_synth *synth_current(void)
+{
+ return synth;
+}
+EXPORT_SYMBOL_GPL(synth_current);
+
short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC | B_SYM };
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
index 611a6ee2943a..7c6cf41645eb 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
@@ -1370,10 +1370,6 @@ static int vidioc_g_parm(struct file *file, void *priv,
return 0;
}
-#define FRACT_CMP(a, OP, b) \
- ((u64)(a).numerator * (b).denominator OP \
- (u64)(b).numerator * (a).denominator)
-
static int vidioc_s_parm(struct file *file, void *priv,
struct v4l2_streamparm *parm)
{
@@ -1387,8 +1383,8 @@ static int vidioc_s_parm(struct file *file, void *priv,
/* tpf: {*, 0} resets timing; clip to [min, max]*/
tpf = tpf.denominator ? tpf : tpf_default;
- tpf = FRACT_CMP(tpf, <, tpf_min) ? tpf_min : tpf;
- tpf = FRACT_CMP(tpf, >, tpf_max) ? tpf_max : tpf;
+ tpf = V4L2_FRACT_COMPARE(tpf, <, tpf_min) ? tpf_min : tpf;
+ tpf = V4L2_FRACT_COMPARE(tpf, >, tpf_max) ? tpf_max : tpf;
dev->capture.timeperframe = tpf;
parm->parm.capture.timeperframe = tpf;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 804daf83be35..064d0db4c51e 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -3513,6 +3513,7 @@ static int vchiq_probe(struct platform_device *pdev)
struct device_node *fw_node;
const struct of_device_id *of_id;
struct vchiq_drvdata *drvdata;
+ struct device *vchiq_dev;
int err;
of_id = of_match_node(vchiq_of_match, pdev->dev.of_node);
@@ -3547,9 +3548,12 @@ static int vchiq_probe(struct platform_device *pdev)
goto failed_platform_init;
}
- if (IS_ERR(device_create(vchiq_class, &pdev->dev, vchiq_devid,
- NULL, "vchiq")))
+ vchiq_dev = device_create(vchiq_class, &pdev->dev, vchiq_devid, NULL,
+ "vchiq");
+ if (IS_ERR(vchiq_dev)) {
+ err = PTR_ERR(vchiq_dev);
goto failed_device_create;
+ }
vchiq_debugfs_init();
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index b370985b58a1..c6bb4aaf9bd0 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1033,8 +1033,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
return;
}
- MACvIntDisable(priv->PortOffset);
-
spin_lock_irqsave(&priv->lock, flags);
/* Read low level stats */
@@ -1122,8 +1120,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
}
spin_unlock_irqrestore(&priv->lock, flags);
-
- MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
}
static void vnt_interrupt_work(struct work_struct *work)
@@ -1133,14 +1129,17 @@ static void vnt_interrupt_work(struct work_struct *work)
if (priv->vif)
vnt_interrupt_process(priv);
+
+ MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
}
static irqreturn_t vnt_interrupt(int irq, void *arg)
{
struct vnt_private *priv = arg;
- if (priv->vif)
- schedule_work(&priv->interrupt_work);
+ schedule_work(&priv->interrupt_work);
+
+ MACvIntDisable(priv->PortOffset);
return IRQ_HANDLED;
}
diff --git a/drivers/target/iscsi/cxgbit/cxgbit.h b/drivers/target/iscsi/cxgbit/cxgbit.h
index 417b9e66b0cd..3cca22e19964 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit.h
+++ b/drivers/target/iscsi/cxgbit/cxgbit.h
@@ -345,7 +345,7 @@ struct cxgbit_device *cxgbit_find_device(struct net_device *, u8 *);
int cxgbit_ddp_init(struct cxgbit_device *);
int cxgbit_setup_conn_pgidx(struct cxgbit_sock *, u32);
int cxgbit_reserve_ttt(struct cxgbit_sock *, struct iscsi_cmd *);
-void cxgbit_release_cmd(struct iscsi_conn *, struct iscsi_cmd *);
+void cxgbit_unmap_cmd(struct iscsi_conn *, struct iscsi_cmd *);
static inline
struct cxgbi_ppm *cdev2ppm(struct cxgbit_device *cdev)
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
index 76a262674c8d..d57fd3ed3fa5 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
@@ -263,7 +263,7 @@ out:
r2t->targ_xfer_tag = ttinfo->tag;
}
-void cxgbit_release_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+void cxgbit_unmap_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
{
struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c
index c011c826fc26..4a7bb0b49d17 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_main.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c
@@ -678,7 +678,7 @@ static struct iscsit_transport cxgbit_transport = {
.iscsit_get_r2t_ttt = cxgbit_get_r2t_ttt,
.iscsit_get_rx_pdu = cxgbit_get_rx_pdu,
.iscsit_validate_params = cxgbit_validate_params,
- .iscsit_release_cmd = cxgbit_release_cmd,
+ .iscsit_unmap_cmd = cxgbit_unmap_cmd,
.iscsit_aborted_task = iscsit_aborted_task,
.iscsit_get_sup_prot_ops = cxgbit_get_sup_prot_ops,
};
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
index 25eb3891e34b..29b350a0b58f 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_target.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
@@ -960,7 +960,7 @@ after_immediate_data:
target_put_sess_cmd(&cmd->se_cmd);
return 0;
} else if (cmd->unsolicited_data) {
- iscsit_set_unsoliticed_dataout(cmd);
+ iscsit_set_unsolicited_dataout(cmd);
}
} else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index bd15a564fe24..5ce6e2a40e00 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -308,9 +308,6 @@ bool iscsit_check_np_match(
return false;
}
-/*
- * Called with mutex np_lock held
- */
static struct iscsi_np *iscsit_get_np(
struct sockaddr_storage *sockaddr,
int network_transport)
@@ -318,6 +315,8 @@ static struct iscsi_np *iscsit_get_np(
struct iscsi_np *np;
bool match;
+ lockdep_assert_held(&np_lock);
+
list_for_each_entry(np, &g_np_list, np_list) {
spin_lock_bh(&np->np_thread_lock);
if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
@@ -1195,7 +1194,7 @@ attach_cmd:
}
EXPORT_SYMBOL(iscsit_setup_scsi_cmd);
-void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *cmd)
+void iscsit_set_unsolicited_dataout(struct iscsi_cmd *cmd)
{
iscsit_set_dataout_sequence_values(cmd);
@@ -1203,7 +1202,7 @@ void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *cmd)
iscsit_start_dataout_timer(cmd, cmd->conn);
spin_unlock_bh(&cmd->dataout_timeout_lock);
}
-EXPORT_SYMBOL(iscsit_set_unsoliticed_dataout);
+EXPORT_SYMBOL(iscsit_set_unsolicited_dataout);
int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
struct iscsi_scsi_req *hdr)
@@ -1237,7 +1236,7 @@ int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
*/
if (!cmd->immediate_data) {
if (!cmd->sense_reason && cmd->unsolicited_data)
- iscsit_set_unsoliticed_dataout(cmd);
+ iscsit_set_unsolicited_dataout(cmd);
if (!cmd->sense_reason)
return 0;
@@ -1309,7 +1308,7 @@ after_immediate_data:
target_put_sess_cmd(&cmd->se_cmd);
return rc;
} else if (cmd->unsolicited_data)
- iscsit_set_unsoliticed_dataout(cmd);
+ iscsit_set_unsolicited_dataout(cmd);
} else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
/*
@@ -2241,28 +2240,25 @@ iscsit_handle_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
rx_size = payload_length;
if (payload_length) {
u32 checksum = 0, data_crc = 0;
- u32 padding = 0, pad_bytes = 0;
+ u32 padding = 0;
int niov = 0, rx_got;
- struct kvec iov[3];
+ struct kvec iov[2];
- text_in = kzalloc(payload_length, GFP_KERNEL);
+ rx_size = ALIGN(payload_length, 4);
+ text_in = kzalloc(rx_size, GFP_KERNEL);
if (!text_in)
goto reject;
cmd->text_in_ptr = text_in;
- memset(iov, 0, 3 * sizeof(struct kvec));
+ memset(iov, 0, sizeof(iov));
iov[niov].iov_base = text_in;
- iov[niov++].iov_len = payload_length;
+ iov[niov++].iov_len = rx_size;
- padding = ((-payload_length) & 3);
- if (padding != 0) {
- iov[niov].iov_base = &pad_bytes;
- iov[niov++].iov_len = padding;
- rx_size += padding;
+ padding = rx_size - payload_length;
+ if (padding)
pr_debug("Receiving %u additional bytes"
" for padding.\n", padding);
- }
if (conn->conn_ops->DataDigest) {
iov[niov].iov_base = &checksum;
iov[niov++].iov_len = ISCSI_CRC_LEN;
@@ -2274,9 +2270,9 @@ iscsit_handle_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
goto reject;
if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(conn->conn_rx_hash, text_in,
- payload_length, padding,
- &pad_bytes, &data_crc);
+ iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
+ text_in, rx_size, 0, NULL,
+ &data_crc);
if (checksum != data_crc) {
pr_err("Text data CRC32C DataDigest"
@@ -2655,9 +2651,6 @@ static int iscsit_handle_immediate_data(
return IMMEDIATE_DATA_NORMAL_OPERATION;
}
-/*
- * Called with sess->conn_lock held.
- */
/* #warning iscsi_build_conn_drop_async_message() only sends out on connections
with active network interface */
static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
@@ -2666,6 +2659,8 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
struct iscsi_conn *conn_p;
bool found = false;
+ lockdep_assert_held(&conn->sess->conn_lock);
+
/*
* Only send a Asynchronous Message on connections whos network
* interface is still functional.
@@ -4040,9 +4035,9 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
struct se_cmd *se_cmd = &cmd->se_cmd;
if (se_cmd->se_tfo != NULL) {
- spin_lock(&se_cmd->t_state_lock);
+ spin_lock_irq(&se_cmd->t_state_lock);
se_cmd->transport_state |= CMD_T_FABRIC_STOP;
- spin_unlock(&se_cmd->t_state_lock);
+ spin_unlock_irq(&se_cmd->t_state_lock);
}
}
spin_unlock_bh(&conn->cmd_lock);
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index 48bac0acf8c7..c95f56a3ce31 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -31,7 +31,7 @@ extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *,
struct iscsi_portal_group *, bool);
extern int iscsit_del_np(struct iscsi_np *);
extern int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8, unsigned char *);
-extern void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *);
+extern void iscsit_set_unsolicited_dataout(struct iscsi_cmd *);
extern int iscsit_logout_closesession(struct iscsi_cmd *, struct iscsi_conn *);
extern int iscsit_logout_closeconnection(struct iscsi_cmd *, struct iscsi_conn *);
extern int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *, struct iscsi_conn *);
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 4e680d753941..ca7d7e8aecc0 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -252,7 +252,6 @@ static int chap_server_compute_md5(
}
desc->tfm = tfm;
- desc->flags = 0;
ret = crypto_shash_init(desc);
if (ret < 0) {
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index a5481dfeae8d..cac94c94ef5d 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1389,18 +1389,6 @@ static int lio_write_pending(struct se_cmd *se_cmd)
return 0;
}
-static int lio_write_pending_status(struct se_cmd *se_cmd)
-{
- struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
- int ret;
-
- spin_lock_bh(&cmd->istate_lock);
- ret = !(cmd->cmd_flags & ICF_GOT_LAST_DATAOUT);
- spin_unlock_bh(&cmd->istate_lock);
-
- return ret;
-}
-
static int lio_queue_status(struct se_cmd *se_cmd)
{
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
@@ -1564,7 +1552,6 @@ const struct target_core_fabric_ops iscsi_ops = {
.sess_get_index = lio_sess_get_index,
.sess_get_initiator_sid = lio_sess_get_initiator_sid,
.write_pending = lio_write_pending,
- .write_pending_status = lio_write_pending_status,
.set_default_node_attributes = lio_set_default_node_attributes,
.get_cmd_state = iscsi_get_cmd_state,
.queue_data_in = lio_queue_data_in,
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 1193cf884a28..8890c0721053 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -802,14 +802,13 @@ void iscsit_start_time2retain_handler(struct iscsi_session *sess)
jiffies + sess->sess_ops->DefaultTime2Retain * HZ);
}
-/*
- * Called with spin_lock_bh(&struct se_portal_group->session_lock) held
- */
int iscsit_stop_time2retain_timer(struct iscsi_session *sess)
{
struct iscsi_portal_group *tpg = sess->tpg;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+ lockdep_assert_held(&se_tpg->session_lock);
+
if (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)
return -1;
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 1b54a9c70851..e02e1aaf63c5 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -48,14 +48,20 @@ int iscsit_dump_data_payload(
u32 buf_len,
int dump_padding_digest)
{
- char *buf, pad_bytes[4];
+ char *buf;
int ret = DATAOUT_WITHIN_COMMAND_RECOVERY, rx_got;
- u32 length, padding, offset = 0, size;
+ u32 length, offset = 0, size;
struct kvec iov;
if (conn->sess->sess_ops->RDMAExtensions)
return 0;
+ if (dump_padding_digest) {
+ buf_len = ALIGN(buf_len, 4);
+ if (conn->conn_ops->DataDigest)
+ buf_len += ISCSI_CRC_LEN;
+ }
+
length = min(buf_len, OFFLOAD_BUF_SIZE);
buf = kzalloc(length, GFP_ATOMIC);
@@ -75,41 +81,12 @@ int iscsit_dump_data_payload(
rx_got = rx_data(conn, &iov, 1, size);
if (rx_got != size) {
ret = DATAOUT_CANNOT_RECOVER;
- goto out;
+ break;
}
offset += size;
}
- if (!dump_padding_digest)
- goto out;
-
- padding = ((-buf_len) & 3);
- if (padding != 0) {
- iov.iov_len = padding;
- iov.iov_base = pad_bytes;
-
- rx_got = rx_data(conn, &iov, 1, padding);
- if (rx_got != padding) {
- ret = DATAOUT_CANNOT_RECOVER;
- goto out;
- }
- }
-
- if (conn->conn_ops->DataDigest) {
- u32 data_crc;
-
- iov.iov_len = ISCSI_CRC_LEN;
- iov.iov_base = &data_crc;
-
- rx_got = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
- if (rx_got != ISCSI_CRC_LEN) {
- ret = DATAOUT_CANNOT_RECOVER;
- goto out;
- }
- }
-
-out:
kfree(buf);
return ret;
}
@@ -797,14 +774,14 @@ static struct iscsi_ooo_cmdsn *iscsit_allocate_ooo_cmdsn(void)
return ooo_cmdsn;
}
-/*
- * Called with sess->cmdsn_mutex held.
- */
static int iscsit_attach_ooo_cmdsn(
struct iscsi_session *sess,
struct iscsi_ooo_cmdsn *ooo_cmdsn)
{
struct iscsi_ooo_cmdsn *ooo_tail, *ooo_tmp;
+
+ lockdep_assert_held(&sess->cmdsn_mutex);
+
/*
* We attach the struct iscsi_ooo_cmdsn entry to the out of order
* list in increasing CmdSN order.
@@ -871,15 +848,14 @@ void iscsit_clear_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
mutex_unlock(&sess->cmdsn_mutex);
}
-/*
- * Called with sess->cmdsn_mutex held.
- */
int iscsit_execute_ooo_cmdsns(struct iscsi_session *sess)
{
int ooo_count = 0;
struct iscsi_cmd *cmd = NULL;
struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
+ lockdep_assert_held(&sess->cmdsn_mutex);
+
list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
&sess->sess_ooo_cmdsn_list, ooo_list) {
if (ooo_cmdsn->cmdsn != sess->exp_cmd_sn)
@@ -980,7 +956,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
if (cmd->se_cmd.transport_state & CMD_T_ABORTED)
return 0;
- iscsit_set_unsoliticed_dataout(cmd);
+ iscsit_set_unsolicited_dataout(cmd);
}
return transport_handle_cdb_direct(&cmd->se_cmd);
@@ -1232,9 +1208,6 @@ void iscsit_mod_dataout_timer(struct iscsi_cmd *cmd)
spin_unlock_bh(&cmd->dataout_timeout_lock);
}
-/*
- * Called with cmd->dataout_timeout_lock held.
- */
void iscsit_start_dataout_timer(
struct iscsi_cmd *cmd,
struct iscsi_conn *conn)
@@ -1242,6 +1215,8 @@ void iscsit_start_dataout_timer(
struct iscsi_session *sess = conn->sess;
struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+ lockdep_assert_held(&cmd->dataout_timeout_lock);
+
if (cmd->dataout_timer_flags & ISCSI_TF_RUNNING)
return;
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 86987da86dd6..3ac494f63a0b 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -56,9 +56,6 @@
extern struct list_head g_tiqn_list;
extern spinlock_t tiqn_lock;
-/*
- * Called with cmd->r2t_lock held.
- */
int iscsit_add_r2t_to_list(
struct iscsi_cmd *cmd,
u32 offset,
@@ -68,6 +65,8 @@ int iscsit_add_r2t_to_list(
{
struct iscsi_r2t *r2t;
+ lockdep_assert_held(&cmd->r2t_lock);
+
r2t = kmem_cache_zalloc(lio_r2t_cache, GFP_ATOMIC);
if (!r2t) {
pr_err("Unable to allocate memory for struct iscsi_r2t.\n");
@@ -128,11 +127,10 @@ struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *cmd)
return NULL;
}
-/*
- * Called with cmd->r2t_lock held.
- */
void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsi_cmd *cmd)
{
+ lockdep_assert_held(&cmd->r2t_lock);
+
list_del(&r2t->r2t_list);
kmem_cache_free(lio_r2t_cache, r2t);
}
@@ -762,8 +760,8 @@ void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool check_queues)
iscsit_remove_cmd_from_response_queue(cmd, conn);
}
- if (conn && conn->conn_transport->iscsit_release_cmd)
- conn->conn_transport->iscsit_release_cmd(conn, cmd);
+ if (conn && conn->conn_transport->iscsit_unmap_cmd)
+ conn->conn_transport->iscsit_unmap_cmd(conn, cmd);
}
void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
@@ -956,9 +954,6 @@ void iscsit_mod_nopin_response_timer(struct iscsi_conn *conn)
spin_unlock_bh(&conn->nopin_timer_lock);
}
-/*
- * Called with conn->nopin_timer_lock held.
- */
void iscsit_start_nopin_response_timer(struct iscsi_conn *conn)
{
struct iscsi_session *sess = conn->sess;
@@ -1016,13 +1011,13 @@ void iscsit_handle_nopin_timeout(struct timer_list *t)
iscsit_dec_conn_usage_count(conn);
}
-/*
- * Called with conn->nopin_timer_lock held.
- */
void __iscsit_start_nopin_timer(struct iscsi_conn *conn)
{
struct iscsi_session *sess = conn->sess;
struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+
+ lockdep_assert_held(&conn->nopin_timer_lock);
+
/*
* NOPIN timeout is disabled.
*/
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 7bd7c0c0db6f..3305b47fdf53 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -128,14 +128,6 @@ static void tcm_loop_submission_work(struct work_struct *work)
set_host_byte(sc, DID_ERROR);
goto out_done;
}
- if (scsi_bidi_cmnd(sc)) {
- struct scsi_data_buffer *sdb = scsi_in(sc);
-
- sgl_bidi = sdb->table.sgl;
- sgl_bidi_count = sdb->table.nents;
- se_cmd->se_cmd_flags |= SCF_BIDI;
-
- }
transfer_length = scsi_transfer_length(sc);
if (!scsi_prot_sg_count(sc) &&
@@ -304,12 +296,6 @@ static int tcm_loop_target_reset(struct scsi_cmnd *sc)
return FAILED;
}
-static int tcm_loop_slave_alloc(struct scsi_device *sd)
-{
- blk_queue_flag_set(QUEUE_FLAG_BIDI, sd->request_queue);
- return 0;
-}
-
static struct scsi_host_template tcm_loop_driver_template = {
.show_info = tcm_loop_show_info,
.proc_name = "tcm_loopback",
@@ -325,7 +311,6 @@ static struct scsi_host_template tcm_loop_driver_template = {
.cmd_per_lun = 1024,
.max_sectors = 0xFFFF,
.dma_boundary = PAGE_SIZE - 1,
- .slave_alloc = tcm_loop_slave_alloc,
.module = THIS_MODULE,
.track_queue_depth = 1,
};
@@ -560,11 +545,6 @@ static int tcm_loop_write_pending(struct se_cmd *se_cmd)
return 0;
}
-static int tcm_loop_write_pending_status(struct se_cmd *se_cmd)
-{
- return 0;
-}
-
static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
{
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
@@ -1159,7 +1139,6 @@ static const struct target_core_fabric_ops loop_ops = {
.release_cmd = tcm_loop_release_cmd,
.sess_get_index = tcm_loop_sess_get_index,
.write_pending = tcm_loop_write_pending,
- .write_pending_status = tcm_loop_write_pending_status,
.set_default_node_attributes = tcm_loop_set_default_node_attributes,
.get_cmd_state = tcm_loop_get_cmd_state,
.queue_data_in = tcm_loop_queue_data_in,
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 08cee13dfb9a..b0d3583998f0 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -1749,11 +1749,6 @@ static int sbp_write_pending(struct se_cmd *se_cmd)
return 0;
}
-static int sbp_write_pending_status(struct se_cmd *se_cmd)
-{
- return 0;
-}
-
static void sbp_set_default_node_attrs(struct se_node_acl *nacl)
{
return;
@@ -2329,7 +2324,6 @@ static const struct target_core_fabric_ops sbp_ops = {
.release_cmd = sbp_release_cmd,
.sess_get_index = sbp_sess_get_index,
.write_pending = sbp_write_pending,
- .write_pending_status = sbp_write_pending_status,
.set_default_node_attributes = sbp_set_default_node_attrs,
.get_cmd_state = sbp_get_cmd_state,
.queue_data_in = sbp_queue_data_in,
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 6b0d9beacf90..e09f0cf86bed 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -910,9 +910,6 @@ static int core_alua_write_tpg_metadata(
return (ret < 0) ? -EIO : 0;
}
-/*
- * Called with tg_pt_gp->tg_pt_gp_transition_mutex held
- */
static int core_alua_update_tpg_primary_metadata(
struct t10_alua_tg_pt_gp *tg_pt_gp)
{
@@ -921,6 +918,8 @@ static int core_alua_update_tpg_primary_metadata(
char *path;
int len, rc;
+ lockdep_assert_held(&tg_pt_gp->tg_pt_gp_transition_mutex);
+
md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
if (!md_buf) {
pr_err("Unable to allocate buf for ALUA metadata\n");
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 8e7fffbb8802..fc5ef31f5ba8 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -401,10 +401,6 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
pr_err("Missing tfo->write_pending()\n");
return -EINVAL;
}
- if (!tfo->write_pending_status) {
- pr_err("Missing tfo->write_pending_status()\n");
- return -EINVAL;
- }
if (!tfo->set_default_node_attributes) {
pr_err("Missing tfo->set_default_node_attributes()\n");
return -EINVAL;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 93c56f4a9911..1f8482b6473b 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -404,9 +404,6 @@ int core_enable_device_list_for_node(
return 0;
}
-/*
- * Called with se_node_acl->lun_entry_mutex held.
- */
void core_disable_device_list_for_node(
struct se_lun *lun,
struct se_dev_entry *orig,
@@ -418,6 +415,9 @@ void core_disable_device_list_for_node(
* reference to se_device->dev_group.
*/
struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
+
+ lockdep_assert_held(&nacl->lun_entry_mutex);
+
/*
* If the MappedLUN entry is being disabled, the entry in
* lun->lun_deve_list must be removed now before clearing the
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 397f38cb7f4e..1597a9ebadca 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1290,9 +1290,6 @@ static int core_scsi3_check_implicit_release(
return ret;
}
-/*
- * Called with struct t10_reservation->registration_lock held.
- */
static void __core_scsi3_free_registration(
struct se_device *dev,
struct t10_pr_registration *pr_reg,
@@ -1308,6 +1305,8 @@ static void __core_scsi3_free_registration(
struct se_dev_entry *deve;
char i_buf[PR_REG_ISID_ID_LEN];
+ lockdep_assert_held(&pr_tmpl->registration_lock);
+
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
@@ -2450,9 +2449,6 @@ core_scsi3_emulate_pro_reserve(struct se_cmd *cmd, int type, int scope,
}
}
-/*
- * Called with struct se_device->dev_reservation_lock held.
- */
static void __core_scsi3_complete_pro_release(
struct se_device *dev,
struct se_node_acl *se_nacl,
@@ -2464,6 +2460,8 @@ static void __core_scsi3_complete_pro_release(
char i_buf[PR_REG_ISID_ID_LEN];
int pr_res_type = 0, pr_res_scope = 0;
+ lockdep_assert_held(&dev->dev_reservation_lock);
+
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
/*
@@ -2760,9 +2758,6 @@ core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key)
return 0;
}
-/*
- * Called with struct se_device->dev_reservation_lock held.
- */
static void __core_scsi3_complete_pro_preempt(
struct se_device *dev,
struct t10_pr_registration *pr_reg,
@@ -2775,6 +2770,8 @@ static void __core_scsi3_complete_pro_preempt(
const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
char i_buf[PR_REG_ISID_ID_LEN];
+ lockdep_assert_held(&dev->dev_reservation_lock);
+
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
/*
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index ad0061e09d4c..3a1bb799a9ab 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -114,21 +114,6 @@ static bool __target_check_io_state(struct se_cmd *se_cmd,
spin_unlock(&se_cmd->t_state_lock);
return false;
}
- if (se_cmd->transport_state & CMD_T_PRE_EXECUTE) {
- if (se_cmd->scsi_status) {
- pr_debug("Attempted to abort io tag: %llu early failure"
- " status: 0x%02x\n", se_cmd->tag,
- se_cmd->scsi_status);
- spin_unlock(&se_cmd->t_state_lock);
- return false;
- }
- }
- if (sess->sess_tearing_down) {
- pr_debug("Attempted to abort io tag: %llu already shutdown,"
- " skipping\n", se_cmd->tag);
- spin_unlock(&se_cmd->t_state_lock);
- return false;
- }
se_cmd->transport_state |= CMD_T_ABORTED;
if ((tmr_sess != se_cmd->se_sess) && tas)
@@ -232,33 +217,13 @@ static void core_tmr_drain_tmr_list(
continue;
spin_lock(&sess->sess_cmd_lock);
- spin_lock(&cmd->t_state_lock);
- if (!(cmd->transport_state & CMD_T_ACTIVE) ||
- (cmd->transport_state & CMD_T_FABRIC_STOP)) {
- spin_unlock(&cmd->t_state_lock);
- spin_unlock(&sess->sess_cmd_lock);
- continue;
- }
- if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
- spin_unlock(&cmd->t_state_lock);
- spin_unlock(&sess->sess_cmd_lock);
- continue;
- }
- if (sess->sess_tearing_down) {
- spin_unlock(&cmd->t_state_lock);
- spin_unlock(&sess->sess_cmd_lock);
- continue;
- }
- cmd->transport_state |= CMD_T_ABORTED;
- spin_unlock(&cmd->t_state_lock);
+ rc = __target_check_io_state(cmd, sess, 0);
+ spin_unlock(&sess->sess_cmd_lock);
- rc = kref_get_unless_zero(&cmd->cmd_kref);
if (!rc) {
printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n");
- spin_unlock(&sess->sess_cmd_lock);
continue;
}
- spin_unlock(&sess->sess_cmd_lock);
list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index ef9e75b359d4..e3f7e21e6614 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -664,11 +664,6 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
target_remove_from_state_list(cmd);
- /*
- * Clear struct se_cmd->se_lun before the handoff to FE.
- */
- cmd->se_lun = NULL;
-
spin_lock_irqsave(&cmd->t_state_lock, flags);
/*
* Determine if frontend context caller is requesting the stopping of
@@ -696,17 +691,6 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
return cmd->se_tfo->check_stop_free(cmd);
}
-static void transport_lun_remove_cmd(struct se_cmd *cmd)
-{
- struct se_lun *lun = cmd->se_lun;
-
- if (!lun)
- return;
-
- if (cmpxchg(&cmd->lun_ref_active, true, false))
- percpu_ref_put(&lun->lun_ref);
-}
-
static void target_complete_failure_work(struct work_struct *work)
{
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
@@ -797,8 +781,6 @@ static void target_handle_abort(struct se_cmd *cmd)
WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0);
- transport_lun_remove_cmd(cmd);
-
transport_cmd_check_stop_to_fabric(cmd);
}
@@ -1711,7 +1693,6 @@ static void target_complete_tmr_failure(struct work_struct *work)
se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
se_cmd->se_tfo->queue_tm_rsp(se_cmd);
- transport_lun_remove_cmd(se_cmd);
transport_cmd_check_stop_to_fabric(se_cmd);
}
@@ -1902,7 +1883,6 @@ void transport_generic_request_failure(struct se_cmd *cmd,
goto queue_full;
check_stop:
- transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
return;
@@ -2056,7 +2036,6 @@ void target_execute_cmd(struct se_cmd *cmd)
spin_lock_irq(&cmd->t_state_lock);
cmd->t_state = TRANSPORT_PROCESSING;
- cmd->transport_state &= ~CMD_T_PRE_EXECUTE;
cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
spin_unlock_irq(&cmd->t_state_lock);
@@ -2201,7 +2180,6 @@ queue_status:
transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
return;
}
- transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
}
@@ -2296,7 +2274,6 @@ static void target_complete_ok_work(struct work_struct *work)
if (ret)
goto queue_full;
- transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
return;
}
@@ -2322,7 +2299,6 @@ static void target_complete_ok_work(struct work_struct *work)
if (ret)
goto queue_full;
- transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
return;
}
@@ -2358,7 +2334,6 @@ queue_rsp:
if (ret)
goto queue_full;
- transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
return;
}
@@ -2394,7 +2369,6 @@ queue_status:
break;
}
- transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
return;
@@ -2721,9 +2695,6 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
*/
if (cmd->state_active)
target_remove_from_state_list(cmd);
-
- if (cmd->se_lun)
- transport_lun_remove_cmd(cmd);
}
if (aborted)
cmd->free_compl = &compl;
@@ -2765,7 +2736,6 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
ret = -ESHUTDOWN;
goto out;
}
- se_cmd->transport_state |= CMD_T_PRE_EXECUTE;
list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
percpu_ref_get(&se_sess->cmd_count);
out:
@@ -2796,6 +2766,9 @@ static void target_release_cmd_kref(struct kref *kref)
struct completion *abrt_compl = se_cmd->abrt_compl;
unsigned long flags;
+ if (se_cmd->lun_ref_active)
+ percpu_ref_put(&se_cmd->se_lun->lun_ref);
+
if (se_sess) {
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
list_del_init(&se_cmd->se_cmd_list);
@@ -3273,6 +3246,22 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
}
EXPORT_SYMBOL(transport_send_check_condition_and_sense);
+/**
+ * target_send_busy - Send SCSI BUSY status back to the initiator
+ * @cmd: SCSI command for which to send a BUSY reply.
+ *
+ * Note: Only call this function if target_submit_cmd*() failed.
+ */
+int target_send_busy(struct se_cmd *cmd)
+{
+ WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB);
+
+ cmd->scsi_status = SAM_STAT_BUSY;
+ trace_target_cmd_complete(cmd);
+ return cmd->se_tfo->queue_status(cmd);
+}
+EXPORT_SYMBOL(target_send_busy);
+
static void target_tmr_work(struct work_struct *work)
{
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 5831e0eecea1..9704b135a7bc 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -1663,7 +1663,7 @@ static void tcmu_dev_kref_release(struct kref *kref)
WARN_ON(!all_expired);
tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max + 1);
- kfree(udev->data_bitmap);
+ bitmap_free(udev->data_bitmap);
mutex_unlock(&udev->cmdr_lock);
call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
@@ -1794,11 +1794,12 @@ static int tcmu_netlink_event_send(struct tcmu_dev *udev,
ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
TCMU_MCGRP_CONFIG, GFP_KERNEL);
- /* We don't care if no one is listening */
- if (ret == -ESRCH)
- ret = 0;
- if (!ret)
- ret = tcmu_wait_genl_cmd_reply(udev);
+
+ /* Wait during an add as the listener may not be up yet */
+ if (ret == 0 ||
+ (ret == -ESRCH && cmd == TCMU_CMD_ADDED_DEVICE))
+ return tcmu_wait_genl_cmd_reply(udev);
+
return ret;
}
@@ -1870,9 +1871,7 @@ static int tcmu_configure_device(struct se_device *dev)
info = &udev->uio_info;
mutex_lock(&udev->cmdr_lock);
- udev->data_bitmap = kcalloc(BITS_TO_LONGS(udev->max_blocks),
- sizeof(unsigned long),
- GFP_KERNEL);
+ udev->data_bitmap = bitmap_zalloc(udev->max_blocks, GFP_KERNEL);
mutex_unlock(&udev->cmdr_lock);
if (!udev->data_bitmap) {
ret = -ENOMEM;
@@ -1959,7 +1958,7 @@ err_register:
vfree(udev->mb_addr);
udev->mb_addr = NULL;
err_vzalloc:
- kfree(udev->data_bitmap);
+ bitmap_free(udev->data_bitmap);
udev->data_bitmap = NULL;
err_bitmap_alloc:
kfree(info->name);
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index c2e1fc927fdf..9be1418e919f 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -442,11 +442,6 @@ static int xcopy_pt_write_pending(struct se_cmd *se_cmd)
return 0;
}
-static int xcopy_pt_write_pending_status(struct se_cmd *se_cmd)
-{
- return 0;
-}
-
static int xcopy_pt_queue_data_in(struct se_cmd *se_cmd)
{
return 0;
@@ -463,7 +458,6 @@ static const struct target_core_fabric_ops xcopy_pt_tfo = {
.release_cmd = xcopy_pt_release_cmd,
.check_stop_free = xcopy_pt_check_stop_free,
.write_pending = xcopy_pt_write_pending,
- .write_pending_status = xcopy_pt_write_pending_status,
.queue_data_in = xcopy_pt_queue_data_in,
.queue_status = xcopy_pt_queue_status,
};
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index 11d27b93b413..b8ced4458118 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -158,7 +158,6 @@ void ft_release_cmd(struct se_cmd *);
int ft_queue_status(struct se_cmd *);
int ft_queue_data_in(struct se_cmd *);
int ft_write_pending(struct se_cmd *);
-int ft_write_pending_status(struct se_cmd *);
int ft_get_cmd_state(struct se_cmd *);
void ft_queue_tm_resp(struct se_cmd *);
void ft_aborted_task(struct se_cmd *);
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index a183d4da7db2..f0529ba58f4c 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -184,13 +184,6 @@ int ft_queue_status(struct se_cmd *se_cmd)
return 0;
}
-int ft_write_pending_status(struct se_cmd *se_cmd)
-{
- struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
-
- return cmd->write_data_len != se_cmd->data_length;
-}
-
/*
* Send TX_RDY (transfer ready).
*/
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 1ce49518d440..c873a052fcb0 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -437,7 +437,6 @@ static const struct target_core_fabric_ops ft_fabric_ops = {
.sess_get_index = ft_sess_get_index,
.sess_get_initiator_sid = NULL,
.write_pending = ft_write_pending,
- .write_pending_status = ft_write_pending_status,
.set_default_node_attributes = ft_set_default_node_attr,
.get_cmd_state = ft_get_cmd_state,
.queue_data_in = ft_queue_data_in,
diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c
index 720760cd493f..ba39647a690c 100644
--- a/drivers/thermal/broadcom/bcm2835_thermal.c
+++ b/drivers/thermal/broadcom/bcm2835_thermal.c
@@ -119,8 +119,7 @@ static const struct debugfs_reg32 bcm2835_thermal_regs[] = {
static void bcm2835_thermal_debugfs(struct platform_device *pdev)
{
- struct thermal_zone_device *tz = platform_get_drvdata(pdev);
- struct bcm2835_thermal_data *data = tz->devdata;
+ struct bcm2835_thermal_data *data = platform_get_drvdata(pdev);
struct debugfs_regset32 *regset;
data->debugfsdir = debugfs_create_dir("bcm2835_thermal", NULL);
@@ -266,7 +265,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
data->tz = tz;
- platform_set_drvdata(pdev, tz);
+ platform_set_drvdata(pdev, data);
/*
* Thermal_zone doesn't enable hwmon as default,
@@ -290,8 +289,8 @@ err_clk:
static int bcm2835_thermal_remove(struct platform_device *pdev)
{
- struct thermal_zone_device *tz = platform_get_drvdata(pdev);
- struct bcm2835_thermal_data *data = tz->devdata;
+ struct bcm2835_thermal_data *data = platform_get_drvdata(pdev);
+ struct thermal_zone_device *tz = data->tz;
debugfs_remove_recursive(data->debugfsdir);
thermal_zone_of_sensor_unregister(&pdev->dev, tz);
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 6fff16113628..f7c1f49ec87f 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -536,12 +536,11 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev,
struct thermal_zone_device *tz, u32 power,
unsigned long *state)
{
- unsigned int cur_freq, target_freq;
+ unsigned int target_freq;
u32 last_load, normalised_power;
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
struct cpufreq_policy *policy = cpufreq_cdev->policy;
- cur_freq = cpufreq_quick_get(policy->cpu);
power = power > 0 ? power : 0;
last_load = cpufreq_cdev->last_load ?: 1;
normalised_power = (power * 100) / last_load;
diff --git a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
index 45e7e5cbdffb..7c71ffb733a1 100644
--- a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
+++ b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
@@ -230,7 +230,7 @@ static void get_single_name(acpi_handle handle, char *name)
if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer)))
pr_warn("Failed to get device name from acpi handle\n");
else {
- memcpy(name, buffer.pointer, ACPI_NAME_SIZE);
+ memcpy(name, buffer.pointer, ACPI_NAMESEG_SIZE);
kfree(buffer.pointer);
}
}
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 61ca7ce3624e..5f3ed24e26ec 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -22,6 +22,13 @@ enum int3400_thermal_uuid {
INT3400_THERMAL_PASSIVE_1,
INT3400_THERMAL_ACTIVE,
INT3400_THERMAL_CRITICAL,
+ INT3400_THERMAL_ADAPTIVE_PERFORMANCE,
+ INT3400_THERMAL_EMERGENCY_CALL_MODE,
+ INT3400_THERMAL_PASSIVE_2,
+ INT3400_THERMAL_POWER_BOSS,
+ INT3400_THERMAL_VIRTUAL_SENSOR,
+ INT3400_THERMAL_COOLING_MODE,
+ INT3400_THERMAL_HARDWARE_DUTY_CYCLING,
INT3400_THERMAL_MAXIMUM_UUID,
};
@@ -29,6 +36,13 @@ static char *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = {
"42A441D6-AE6A-462b-A84B-4A8CE79027D3",
"3A95C389-E4B8-4629-A526-C52C88626BAE",
"97C68AE7-15FA-499c-B8C9-5DA81D606E0A",
+ "63BE270F-1C11-48FD-A6F7-3AF253FF3E2D",
+ "5349962F-71E6-431D-9AE8-0A635B710AEE",
+ "9E04115A-AE87-4D1C-9500-0F3E340BFE75",
+ "F5A35014-C209-46A4-993A-EB56DE7530A1",
+ "6ED722A7-9240-48A5-B479-31EEF723D7CF",
+ "16CAF1B7-DD38-40ED-B1C1-1B8A1913D531",
+ "BE84BABF-C4D4-403D-B495-3128FD44dAC1",
};
struct int3400_thermal_priv {
@@ -299,10 +313,9 @@ static int int3400_thermal_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) {
- int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
- int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
- }
+ int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
+ int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
+
priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
priv, &int3400_thermal_ops,
&int3400_thermal_params, 0, 0);
diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
index 7571f7c2e7c9..ac7256b5f020 100644
--- a/drivers/thermal/intel/intel_powerclamp.c
+++ b/drivers/thermal/intel/intel_powerclamp.c
@@ -101,7 +101,7 @@ struct powerclamp_worker_data {
bool clamping;
};
-static struct powerclamp_worker_data * __percpu worker_data;
+static struct powerclamp_worker_data __percpu *worker_data;
static struct thermal_cooling_device *cooling_dev;
static unsigned long *cpu_clamping_mask; /* bit map for tracking per cpu
* clamping kthread worker
@@ -494,7 +494,7 @@ static void start_power_clamp_worker(unsigned long cpu)
struct powerclamp_worker_data *w_data = per_cpu_ptr(worker_data, cpu);
struct kthread_worker *worker;
- worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inject/%ld", cpu);
+ worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inj/%ld", cpu);
if (IS_ERR(worker))
return;
diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
index 5c07a61447d3..e4ea7f6aef20 100644
--- a/drivers/thermal/mtk_thermal.c
+++ b/drivers/thermal/mtk_thermal.c
@@ -199,6 +199,9 @@ enum {
#define MT7622_TS1 0
#define MT7622_NUM_CONTROLLER 1
+/* The maximum number of banks */
+#define MAX_NUM_ZONES 8
+
/* The calibration coefficient of sensor */
#define MT7622_CALIBRATION 165
@@ -249,7 +252,7 @@ struct mtk_thermal_data {
const int num_controller;
const int *controller_offset;
bool need_switch_bank;
- struct thermal_bank_cfg bank_data[];
+ struct thermal_bank_cfg bank_data[MAX_NUM_ZONES];
};
struct mtk_thermal {
@@ -268,7 +271,7 @@ struct mtk_thermal {
s32 vts[MAX_NUM_VTS];
const struct mtk_thermal_data *conf;
- struct mtk_thermal_bank banks[];
+ struct mtk_thermal_bank banks[MAX_NUM_ZONES];
};
/* MT8183 thermal sensor data */
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index 48eef552cba4..fc9399d9c082 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -666,7 +666,7 @@ static int exynos_get_temp(void *p, int *temp)
struct exynos_tmu_data *data = p;
int value, ret = 0;
- if (!data || !data->tmu_read || !data->enabled)
+ if (!data || !data->tmu_read)
return -EINVAL;
else if (!data->enabled)
/*
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index 7416bdbd8576..b7980c856898 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -678,7 +678,6 @@ int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw)
}
shash->tfm = tfm;
- shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
memset(hmac, 0, sizeof(hmac));
ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac);
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index b121d8f8f3d7..27aeca30eeae 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -266,7 +266,7 @@ MODULE_PARM_DESC(pc104_3, "set interface types for ISA(PC104) board #3 (e.g. pc1
module_param_array(pc104_4, ulong, NULL, 0);
MODULE_PARM_DESC(pc104_4, "set interface types for ISA(PC104) board #4 (e.g. pc104_4=232,232,485,485,...");
-static int rp_init(void);
+static int __init rp_init(void);
static void rp_cleanup_module(void);
module_init(rp_init);
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c
index 98dbc796353f..53ca9ba6ab4b 100644
--- a/drivers/tty/serial/8250/8250_lpss.c
+++ b/drivers/tty/serial/8250/8250_lpss.c
@@ -153,7 +153,6 @@ static int byt_serial_setup(struct lpss8250 *lpss, struct uart_port *port)
#ifdef CONFIG_SERIAL_8250_DMA
static const struct dw_dma_platform_data qrk_serial_dma_pdata = {
.nr_channels = 2,
- .is_private = true,
.chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
.chan_priority = CHAN_PRIORITY_ASCENDING,
.block_size = 4095,
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index db5df3d54818..3bdd56a1021b 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -49,11 +49,6 @@ struct ar933x_uart_port {
struct clk *clk;
};
-static inline bool ar933x_uart_console_enabled(void)
-{
- return IS_ENABLED(CONFIG_SERIAL_AR933X_CONSOLE);
-}
-
static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up,
int offset)
{
@@ -508,6 +503,7 @@ static const struct uart_ops ar933x_uart_ops = {
.verify_port = ar933x_uart_verify_port,
};
+#ifdef CONFIG_SERIAL_AR933X_CONSOLE
static struct ar933x_uart_port *
ar933x_console_ports[CONFIG_SERIAL_AR933X_NR_UARTS];
@@ -604,14 +600,7 @@ static struct console ar933x_uart_console = {
.index = -1,
.data = &ar933x_uart_driver,
};
-
-static void ar933x_uart_add_console_port(struct ar933x_uart_port *up)
-{
- if (!ar933x_uart_console_enabled())
- return;
-
- ar933x_console_ports[up->port.line] = up;
-}
+#endif /* CONFIG_SERIAL_AR933X_CONSOLE */
static struct uart_driver ar933x_uart_driver = {
.owner = THIS_MODULE,
@@ -700,7 +689,9 @@ static int ar933x_uart_probe(struct platform_device *pdev)
baud = ar933x_uart_get_baud(port->uartclk, 0, AR933X_UART_MAX_STEP);
up->max_baud = min_t(unsigned int, baud, AR933X_UART_MAX_BAUD);
- ar933x_uart_add_console_port(up);
+#ifdef CONFIG_SERIAL_AR933X_CONSOLE
+ ar933x_console_ports[up->port.line] = up;
+#endif
ret = uart_add_one_port(&ar933x_uart_driver, &up->port);
if (ret)
@@ -749,8 +740,9 @@ static int __init ar933x_uart_init(void)
{
int ret;
- if (ar933x_uart_console_enabled())
- ar933x_uart_driver.cons = &ar933x_uart_console;
+#ifdef CONFIG_SERIAL_AR933X_CONSOLE
+ ar933x_uart_driver.cons = &ar933x_uart_console;
+#endif
ret = uart_register_driver(&ar933x_uart_driver);
if (ret)
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 05147fe24343..0b4f36905321 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -166,6 +166,8 @@ struct atmel_uart_port {
unsigned int pending_status;
spinlock_t lock_suspended;
+ bool hd_start_rx; /* can start RX during half-duplex operation */
+
/* ISO7816 */
unsigned int fidi_min;
unsigned int fidi_max;
@@ -231,6 +233,13 @@ static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
__raw_writeb(value, port->membase + ATMEL_US_THR);
}
+static inline int atmel_uart_is_half_duplex(struct uart_port *port)
+{
+ return ((port->rs485.flags & SER_RS485_ENABLED) &&
+ !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
+ (port->iso7816.flags & SER_ISO7816_ENABLED);
+}
+
#ifdef CONFIG_SERIAL_ATMEL_PDC
static bool atmel_use_pdc_rx(struct uart_port *port)
{
@@ -608,10 +617,9 @@ static void atmel_stop_tx(struct uart_port *port)
/* Disable interrupts */
atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
- if (((port->rs485.flags & SER_RS485_ENABLED) &&
- !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
- port->iso7816.flags & SER_ISO7816_ENABLED)
+ if (atmel_uart_is_half_duplex(port))
atmel_start_rx(port);
+
}
/*
@@ -628,9 +636,7 @@ static void atmel_start_tx(struct uart_port *port)
return;
if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
- if (((port->rs485.flags & SER_RS485_ENABLED) &&
- !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
- port->iso7816.flags & SER_ISO7816_ENABLED)
+ if (atmel_uart_is_half_duplex(port))
atmel_stop_rx(port);
if (atmel_use_pdc_tx(port))
@@ -928,11 +934,14 @@ static void atmel_complete_tx_dma(void *arg)
*/
if (!uart_circ_empty(xmit))
atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
- else if (((port->rs485.flags & SER_RS485_ENABLED) &&
- !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
- port->iso7816.flags & SER_ISO7816_ENABLED) {
- /* DMA done, stop TX, start RX for RS485 */
- atmel_start_rx(port);
+ else if (atmel_uart_is_half_duplex(port)) {
+ /*
+ * DMA done, re-enable TXEMPTY and signal that we can stop
+ * TX and start RX for RS485
+ */
+ atmel_port->hd_start_rx = true;
+ atmel_uart_writel(port, ATMEL_US_IER,
+ atmel_port->tx_done_mask);
}
spin_unlock_irqrestore(&port->lock, flags);
@@ -1288,6 +1297,10 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
sg_dma_len(&atmel_port->sg_rx)/2,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(port->dev, "Preparing DMA cyclic failed\n");
+ goto chan_err;
+ }
desc->callback = atmel_complete_rx_dma;
desc->callback_param = port;
atmel_port->desc_rx = desc;
@@ -1376,9 +1389,20 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (pending & atmel_port->tx_done_mask) {
- /* Either PDC or interrupt transmission */
atmel_uart_writel(port, ATMEL_US_IDR,
atmel_port->tx_done_mask);
+
+ /* Start RX if flag was set and FIFO is empty */
+ if (atmel_port->hd_start_rx) {
+ if (!(atmel_uart_readl(port, ATMEL_US_CSR)
+ & ATMEL_US_TXEMPTY))
+ dev_warn(port->dev, "Should start RX, but TX fifo is not empty\n");
+
+ atmel_port->hd_start_rx = false;
+ atmel_start_rx(port);
+ return;
+ }
+
atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
}
}
@@ -1508,9 +1532,7 @@ static void atmel_tx_pdc(struct uart_port *port)
atmel_uart_writel(port, ATMEL_US_IER,
atmel_port->tx_done_mask);
} else {
- if (((port->rs485.flags & SER_RS485_ENABLED) &&
- !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
- port->iso7816.flags & SER_ISO7816_ENABLED) {
+ if (atmel_uart_is_half_duplex(port)) {
/* DMA done, stop TX, start RX for RS485 */
atmel_start_rx(port);
}
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index 6fb312e7af71..bfe5e9e034ec 100644
--- a/drivers/tty/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
@@ -148,8 +148,10 @@ static int configure_kgdboc(void)
char *cptr = config;
struct console *cons;
- if (!strlen(config) || isspace(config[0]))
+ if (!strlen(config) || isspace(config[0])) {
+ err = 0;
goto noconfig;
+ }
kgdboc_io_ops.is_console = 0;
kgdb_tty_driver = NULL;
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index f5bdde405627..450ba6d7996c 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1415,6 +1415,8 @@ static int max310x_spi_probe(struct spi_device *spi)
if (spi->dev.of_node) {
const struct of_device_id *of_id =
of_match_device(max310x_dt_ids, &spi->dev);
+ if (!of_id)
+ return -ENODEV;
devtype = (struct max310x_devtype *)of_id->data;
} else {
diff --git a/drivers/tty/serial/men_z135_uart.c b/drivers/tty/serial/men_z135_uart.c
index ef89534dd760..e5d3ebab6dae 100644
--- a/drivers/tty/serial/men_z135_uart.c
+++ b/drivers/tty/serial/men_z135_uart.c
@@ -353,7 +353,6 @@ static void men_z135_handle_tx(struct men_z135_port *uart)
memcpy_toio(port->membase + MEN_Z135_TX_RAM, &xmit->buf[xmit->tail], n);
xmit->tail = (xmit->tail + n) & (UART_XMIT_SIZE - 1);
- mmiowb();
iowrite32(n & 0x3ff, port->membase + MEN_Z135_TX_CTRL);
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 231f751d1ef4..7e7b1559fa36 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -810,6 +810,9 @@ static int mvebu_uart_probe(struct platform_device *pdev)
return -EINVAL;
}
+ if (!match)
+ return -ENODEV;
+
/* Assume that all UART ports have a DT alias or none has */
id = of_alias_get_id(pdev->dev.of_node, "serial");
if (!pdev->dev.of_node || id < 0)
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 27235a526cce..4c188f4079b3 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -1686,6 +1686,10 @@ static int mxs_auart_probe(struct platform_device *pdev)
s->port.mapbase = r->start;
s->port.membase = ioremap(r->start, resource_size(r));
+ if (!s->port.membase) {
+ ret = -ENOMEM;
+ goto out_disable_clks;
+ }
s->port.ops = &mxs_auart_ops;
s->port.iotype = UPIO_MEM;
s->port.fifosize = MXS_AUART_FIFO_SIZE;
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 9ed121f08a54..6157213a8359 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -192,8 +192,6 @@ enum {
#define PCH_UART_HAL_LOOP (PCH_UART_MCR_LOOP)
#define PCH_UART_HAL_AFE (PCH_UART_MCR_AFE)
-#define PCI_VENDOR_ID_ROHM 0x10DB
-
#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
#define DEFAULT_UARTCLK 1843200 /* 1.8432 MHz */
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 3bcec1c20219..35e5f9c5d5be 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -1050,7 +1050,7 @@ static int __init qcom_geni_console_setup(struct console *co, char *options)
{
struct uart_port *uport;
struct qcom_geni_serial_port *port;
- int baud;
+ int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 635178cf3eed..a31db15cd7c0 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1507,7 +1507,7 @@ static int __init sc16is7xx_init(void)
ret = i2c_add_driver(&sc16is7xx_i2c_uart_driver);
if (ret < 0) {
pr_err("failed to init sc16is7xx i2c --> %d\n", ret);
- return ret;
+ goto err_i2c;
}
#endif
@@ -1515,10 +1515,20 @@ static int __init sc16is7xx_init(void)
ret = spi_register_driver(&sc16is7xx_spi_uart_driver);
if (ret < 0) {
pr_err("failed to init sc16is7xx spi --> %d\n", ret);
- return ret;
+ goto err_spi;
}
#endif
return ret;
+
+#ifdef CONFIG_SERIAL_SC16IS7XX_SPI
+err_spi:
+#endif
+#ifdef CONFIG_SERIAL_SC16IS7XX_I2C
+ i2c_del_driver(&sc16is7xx_i2c_uart_driver);
+err_i2c:
+#endif
+ uart_unregister_driver(&sc16is7xx_uart);
+ return ret;
}
module_init(sc16is7xx_init);
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index 1b4008d022bf..d22ccb32aa9b 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -248,7 +248,6 @@ static void serial_txx9_initialize(struct uart_port *port)
sio_out(up, TXX9_SIFCR, TXX9_SIFCR_SWRST);
/* TX4925 BUG WORKAROUND. Accessing SIOC register
* immediately after soft reset causes bus error. */
- mmiowb();
udelay(1);
while ((sio_in(up, TXX9_SIFCR) & TXX9_SIFCR_SWRST) && --tmout)
udelay(1);
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 060fcd42b6d5..3cd139752d3f 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -838,19 +838,9 @@ static void sci_transmit_chars(struct uart_port *port)
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
- if (uart_circ_empty(xmit)) {
+ if (uart_circ_empty(xmit))
sci_stop_tx(port);
- } else {
- ctrl = serial_port_in(port, SCSCR);
-
- if (port->type != PORT_SCI) {
- serial_port_in(port, SCxSR); /* Dummy read */
- sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port));
- }
- ctrl |= SCSCR_TIE;
- serial_port_out(port, SCSCR, ctrl);
- }
}
/* On SH3, SCIF may read end-of-break as a space->mark char */
@@ -2522,14 +2512,16 @@ done:
* center of the last stop bit in sampling clocks.
*/
int last_stop = bits * 2 - 1;
- int deviation = min_err * srr * last_stop / 2 / baud;
+ int deviation = DIV_ROUND_CLOSEST(min_err * last_stop *
+ (int)(srr + 1),
+ 2 * (int)baud);
if (abs(deviation) >= 2) {
/* At least two sampling clocks off at the
* last stop bit; we can increase the error
* margin by shifting the sampling point.
*/
- int shift = min(-8, max(7, deviation / 2));
+ int shift = clamp(deviation / 2, -8, 7);
hssrr |= (shift << HSCIF_SRHP_SHIFT) &
HSCIF_SRHP_MASK;
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 044c3cbdcfa4..a9e12b3bc31d 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -325,7 +325,7 @@ static void tty_port_shutdown(struct tty_port *port, struct tty_struct *tty)
if (tty && C_HUPCL(tty))
tty_port_lower_dtr_rts(port);
- if (port->ops->shutdown)
+ if (port->ops && port->ops->shutdown)
port->ops->shutdown(port);
}
out:
@@ -398,7 +398,7 @@ EXPORT_SYMBOL_GPL(tty_port_tty_wakeup);
*/
int tty_port_carrier_raised(struct tty_port *port)
{
- if (port->ops->carrier_raised == NULL)
+ if (!port->ops || !port->ops->carrier_raised)
return 1;
return port->ops->carrier_raised(port);
}
@@ -414,7 +414,7 @@ EXPORT_SYMBOL(tty_port_carrier_raised);
*/
void tty_port_raise_dtr_rts(struct tty_port *port)
{
- if (port->ops->dtr_rts)
+ if (port->ops && port->ops->dtr_rts)
port->ops->dtr_rts(port, 1);
}
EXPORT_SYMBOL(tty_port_raise_dtr_rts);
@@ -429,7 +429,7 @@ EXPORT_SYMBOL(tty_port_raise_dtr_rts);
*/
void tty_port_lower_dtr_rts(struct tty_port *port)
{
- if (port->ops->dtr_rts)
+ if (port->ops && port->ops->dtr_rts)
port->ops->dtr_rts(port, 0);
}
EXPORT_SYMBOL(tty_port_lower_dtr_rts);
@@ -684,7 +684,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
if (!tty_port_initialized(port)) {
clear_bit(TTY_IO_ERROR, &tty->flags);
- if (port->ops->activate) {
+ if (port->ops && port->ops->activate) {
int retval = port->ops->activate(port, tty);
if (retval) {
mutex_unlock(&port->mutex);
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index d34984aa646d..650c66886c80 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1520,7 +1520,8 @@ static void csi_J(struct vc_data *vc, int vpar)
return;
}
scr_memsetw(start, vc->vc_video_erase_char, 2 * count);
- update_region(vc, (unsigned long) start, count);
+ if (con_should_update(vc))
+ do_update_region(vc, (unsigned long) start, count);
vc->vc_need_wrap = 0;
}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 739f8960811a..ec666eb4b7b4 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -558,10 +558,8 @@ static void acm_softint(struct work_struct *work)
clear_bit(EVENT_RX_STALL, &acm->flags);
}
- if (test_bit(EVENT_TTY_WAKEUP, &acm->flags)) {
+ if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags))
tty_port_tty_wakeup(&acm->port);
- clear_bit(EVENT_TTY_WAKEUP, &acm->flags);
- }
}
/*
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
index 48277bbc15e4..73c8e6591746 100644
--- a/drivers/usb/common/common.c
+++ b/drivers/usb/common/common.c
@@ -145,6 +145,8 @@ enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0)
do {
controller = of_find_node_with_property(controller, "phys");
+ if (!of_device_is_available(controller))
+ continue;
index = 0;
do {
if (arg0 == -1) {
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 8987cec9549d..ebcadaad89d1 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -473,11 +473,6 @@ static int usb_unbind_interface(struct device *dev)
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
- /* Undo any residual pm_autopm_get_interface_* calls */
- for (r = atomic_read(&intf->pm_usage_cnt); r > 0; --r)
- usb_autopm_put_interface_no_suspend(intf);
- atomic_set(&intf->pm_usage_cnt, 0);
-
if (!error)
usb_autosuspend_device(udev);
@@ -1633,7 +1628,6 @@ void usb_autopm_put_interface(struct usb_interface *intf)
int status;
usb_mark_last_busy(udev);
- atomic_dec(&intf->pm_usage_cnt);
status = pm_runtime_put_sync(&intf->dev);
dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
__func__, atomic_read(&intf->dev.power.usage_count),
@@ -1662,7 +1656,6 @@ void usb_autopm_put_interface_async(struct usb_interface *intf)
int status;
usb_mark_last_busy(udev);
- atomic_dec(&intf->pm_usage_cnt);
status = pm_runtime_put(&intf->dev);
dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
__func__, atomic_read(&intf->dev.power.usage_count),
@@ -1684,7 +1677,6 @@ void usb_autopm_put_interface_no_suspend(struct usb_interface *intf)
struct usb_device *udev = interface_to_usbdev(intf);
usb_mark_last_busy(udev);
- atomic_dec(&intf->pm_usage_cnt);
pm_runtime_put_noidle(&intf->dev);
}
EXPORT_SYMBOL_GPL(usb_autopm_put_interface_no_suspend);
@@ -1715,8 +1707,6 @@ int usb_autopm_get_interface(struct usb_interface *intf)
status = pm_runtime_get_sync(&intf->dev);
if (status < 0)
pm_runtime_put_sync(&intf->dev);
- else
- atomic_inc(&intf->pm_usage_cnt);
dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
__func__, atomic_read(&intf->dev.power.usage_count),
status);
@@ -1750,8 +1740,6 @@ int usb_autopm_get_interface_async(struct usb_interface *intf)
status = pm_runtime_get(&intf->dev);
if (status < 0 && status != -EINPROGRESS)
pm_runtime_put_noidle(&intf->dev);
- else
- atomic_inc(&intf->pm_usage_cnt);
dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
__func__, atomic_read(&intf->dev.power.usage_count),
status);
@@ -1775,7 +1763,6 @@ void usb_autopm_get_interface_no_resume(struct usb_interface *intf)
struct usb_device *udev = interface_to_usbdev(intf);
usb_mark_last_busy(udev);
- atomic_inc(&intf->pm_usage_cnt);
pm_runtime_get_noresume(&intf->dev);
}
EXPORT_SYMBOL_GPL(usb_autopm_get_interface_no_resume);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 3189181bb628..975d7c1288e3 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2742,6 +2742,9 @@ int usb_add_hcd(struct usb_hcd *hcd,
retval = usb_phy_roothub_set_mode(hcd->phy_roothub,
PHY_MODE_USB_HOST_SS);
if (retval)
+ retval = usb_phy_roothub_set_mode(hcd->phy_roothub,
+ PHY_MODE_USB_HOST);
+ if (retval)
goto err_usb_phy_roothub_power_on;
retval = usb_phy_roothub_power_on(hcd->phy_roothub);
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 82239f27c4cc..e844bb7b5676 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -820,9 +820,11 @@ int usb_string(struct usb_device *dev, int index, char *buf, size_t size)
if (dev->state == USB_STATE_SUSPENDED)
return -EHOSTUNREACH;
- if (size <= 0 || !buf || !index)
+ if (size <= 0 || !buf)
return -EINVAL;
buf[0] = 0;
+ if (index <= 0 || index >= 256)
+ return -EINVAL;
tbuf = kmalloc(256, GFP_NOIO);
if (!tbuf)
return -ENOMEM;
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index fdc6e4e403e8..8cced3609e24 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -29,6 +29,7 @@
#define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa
#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0
+#define PCI_DEVICE_ID_INTEL_CMLH 0x02ee
#define PCI_DEVICE_ID_INTEL_GLK 0x31aa
#define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee
#define PCI_DEVICE_ID_INTEL_CNPH 0xa36e
@@ -305,6 +306,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD),
(kernel_ulong_t) &dwc3_pci_mrfld_properties, },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLH),
+ (kernel_ulong_t) &dwc3_pci_intel_properties, },
+
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SPTLP),
(kernel_ulong_t) &dwc3_pci_intel_properties, },
diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c
index d2652dccc699..cac991173ac0 100644
--- a/drivers/usb/early/xhci-dbc.c
+++ b/drivers/usb/early/xhci-dbc.c
@@ -94,7 +94,7 @@ static void * __init xdbc_get_page(dma_addr_t *dma_addr)
{
void *virt;
- virt = memblock_alloc_nopanic(PAGE_SIZE, PAGE_SIZE);
+ virt = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!virt)
return NULL;
@@ -533,8 +533,6 @@ static int xdbc_handle_external_reset(void)
xdbc_mem_init();
- mmiowb();
-
ret = xdbc_start();
if (ret < 0)
goto reset_out;
@@ -587,8 +585,6 @@ static int __init xdbc_early_setup(void)
xdbc_mem_init();
- mmiowb();
-
ret = xdbc_start();
if (ret < 0) {
writel(0, &xdbc.xdbc_reg->control);
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 75b113a5b25c..f3816a5c861e 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -391,20 +391,20 @@ try_again:
req->complete = f_hidg_req_complete;
req->context = hidg;
+ spin_unlock_irqrestore(&hidg->write_spinlock, flags);
+
status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC);
if (status < 0) {
ERROR(hidg->func.config->cdev,
"usb_ep_queue error on int endpoint %zd\n", status);
- goto release_write_pending_unlocked;
+ goto release_write_pending;
} else {
status = count;
}
- spin_unlock_irqrestore(&hidg->write_spinlock, flags);
return status;
release_write_pending:
spin_lock_irqsave(&hidg->write_spinlock, flags);
-release_write_pending_unlocked:
hidg->write_pending = 0;
spin_unlock_irqrestore(&hidg->write_spinlock, flags);
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 34f5982cab78..7f01f78b1d23 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -1292,14 +1292,6 @@ static u32 usbg_sess_get_index(struct se_session *se_sess)
return 0;
}
-/*
- * XXX Error recovery: return != 0 if we expect writes. Dunno when that could be
- */
-static int usbg_write_pending_status(struct se_cmd *se_cmd)
-{
- return 0;
-}
-
static void usbg_set_default_node_attrs(struct se_node_acl *nacl)
{
}
@@ -1725,7 +1717,6 @@ static const struct target_core_fabric_ops usbg_ops = {
.sess_get_index = usbg_sess_get_index,
.sess_get_initiator_sid = NULL,
.write_pending = usbg_send_write_request,
- .write_pending_status = usbg_write_pending_status,
.set_default_node_attributes = usbg_set_default_node_attrs,
.get_cmd_state = usbg_get_cmd_state,
.queue_data_in = usbg_send_read_response,
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index baf72f95f0f1..213b52508621 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -979,8 +979,18 @@ static int dummy_udc_start(struct usb_gadget *g,
struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
struct dummy *dum = dum_hcd->dum;
- if (driver->max_speed == USB_SPEED_UNKNOWN)
+ switch (g->speed) {
+ /* All the speeds we support */
+ case USB_SPEED_LOW:
+ case USB_SPEED_FULL:
+ case USB_SPEED_HIGH:
+ case USB_SPEED_SUPER:
+ break;
+ default:
+ dev_err(dummy_dev(dum_hcd), "Unsupported driver max speed %d\n",
+ driver->max_speed);
return -EINVAL;
+ }
/*
* SLAVE side init ... the layer above hardware, which
@@ -1784,9 +1794,10 @@ static void dummy_timer(struct timer_list *t)
/* Bus speed is 500000 bytes/ms, so use a little less */
total = 490000;
break;
- default:
+ default: /* Can't happen */
dev_err(dummy_dev(dum_hcd), "bogus device speed\n");
- return;
+ total = 0;
+ break;
}
/* FIXME if HZ != 1000 this will probably misbehave ... */
@@ -1828,7 +1839,7 @@ restart:
/* Used up this frame's bandwidth? */
if (total <= 0)
- break;
+ continue;
/* find the gadget's ep for this request (if configured) */
address = usb_pipeendpoint (urb->pipe);
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index b77f3126580e..c2011cd7df8c 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -945,6 +945,7 @@ net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
break;
}
if (&req->req != _req) {
+ ep->stopped = stopped;
spin_unlock_irqrestore(&ep->dev->lock, flags);
return -EINVAL;
}
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index f63f82450bf4..898339e5df10 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -866,9 +866,6 @@ static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma)
(void) readl(&ep->dev->pci->pcimstctl);
writel(BIT(DMA_START), &dma->dmastat);
-
- if (!ep->is_in)
- stop_out_naking(ep);
}
static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
@@ -907,6 +904,7 @@ static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
writel(BIT(DMA_START), &dma->dmastat);
return;
}
+ stop_out_naking(ep);
}
tmp = dmactl_default;
@@ -1275,9 +1273,9 @@ static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req)
break;
}
if (&req->req != _req) {
+ ep->stopped = stopped;
spin_unlock_irqrestore(&ep->dev->lock, flags);
- dev_err(&ep->dev->pdev->dev, "%s: Request mismatch\n",
- __func__);
+ ep_dbg(ep->dev, "%s: Request mismatch\n", __func__);
return -EINVAL;
}
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
index 55c8c8abeacd..cded51f36fc1 100644
--- a/drivers/usb/gadget/udc/pch_udc.c
+++ b/drivers/usb/gadget/udc/pch_udc.c
@@ -368,7 +368,6 @@ struct pch_udc_dev {
#define PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC 0x0939
#define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808
-#define PCI_VENDOR_ID_ROHM 0x10DB
#define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D
#define PCI_DEVICE_ID_ML7831_IOH_UDC 0x8808
diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c
index c9233cddf9a2..c26228c25f99 100644
--- a/drivers/usb/host/ohci-sm501.c
+++ b/drivers/usb/host/ohci-sm501.c
@@ -126,8 +126,7 @@ static int ohci_hcd_sm501_drv_probe(struct platform_device *pdev)
retval = dma_declare_coherent_memory(dev, mem->start,
mem->start - mem->parent->start,
- resource_size(mem),
- DMA_MEMORY_EXCLUSIVE);
+ resource_size(mem));
if (retval) {
dev_err(dev, "cannot declare coherent memory\n");
goto err1;
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
index a631dbb369d7..f88a0370659f 100644
--- a/drivers/usb/host/ohci-tmio.c
+++ b/drivers/usb/host/ohci-tmio.c
@@ -225,7 +225,7 @@ static int ohci_hcd_tmio_drv_probe(struct platform_device *dev)
}
ret = dma_declare_coherent_memory(&dev->dev, sram->start, sram->start,
- resource_size(sram), DMA_MEMORY_EXCLUSIVE);
+ resource_size(sram));
if (ret)
goto err_dma_declare;
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 934584f0a20a..6343fbacd244 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -3204,6 +3204,9 @@ static int __init u132_hcd_init(void)
printk(KERN_INFO "driver %s\n", hcd_name);
workqueue = create_singlethread_workqueue("u132");
retval = platform_driver_register(&u132_platform_driver);
+ if (retval)
+ destroy_workqueue(workqueue);
+
return retval;
}
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index c78be578abb0..52e32644a4b2 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -421,8 +421,6 @@ static int xhci_dbc_mem_init(struct xhci_hcd *xhci, gfp_t flags)
string_length = xhci_dbc_populate_strings(dbc->string);
xhci_dbc_init_contexts(xhci, string_length);
- mmiowb();
-
xhci_dbc_eps_init(xhci);
dbc->state = DS_INITIALIZED;
@@ -516,7 +514,6 @@ static int xhci_do_dbc_stop(struct xhci_hcd *xhci)
return -1;
writel(0, &dbc->regs->control);
- xhci_dbc_mem_cleanup(xhci);
dbc->state = DS_DISABLED;
return 0;
@@ -562,8 +559,10 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci)
ret = xhci_do_dbc_stop(xhci);
spin_unlock_irqrestore(&dbc->lock, flags);
- if (!ret)
+ if (!ret) {
+ xhci_dbc_mem_cleanup(xhci);
pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
+ }
}
static void
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index e2eece693655..96a740543183 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1545,20 +1545,25 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
port_index = max_ports;
while (port_index--) {
u32 t1, t2;
-
+ int retries = 10;
+retry:
t1 = readl(ports[port_index]->addr);
t2 = xhci_port_state_to_neutral(t1);
portsc_buf[port_index] = 0;
- /* Bail out if a USB3 port has a new device in link training */
- if ((hcd->speed >= HCD_USB3) &&
+ /*
+ * Give a USB3 port in link training time to finish, but don't
+ * prevent suspend as port might be stuck
+ */
+ if ((hcd->speed >= HCD_USB3) && retries-- &&
(t1 & PORT_PLS_MASK) == XDEV_POLLING) {
- bus_state->bus_suspended = 0;
spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
- return -EBUSY;
+ msleep(XHCI_PORT_POLLING_LFPS_TIME);
+ spin_lock_irqsave(&xhci->lock, flags);
+ xhci_dbg(xhci, "port %d polling in bus suspend, waiting\n",
+ port_index);
+ goto retry;
}
-
/* suspend ports in U0, or bail out for new connect changes */
if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) {
if ((t1 & PORT_CSC) && wake_enabled) {
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index a6e463715779..671bce18782c 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -246,6 +246,7 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd)
if (!xhci_rcar_wait_for_pll_active(hcd))
return -ETIMEDOUT;
+ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
return xhci_rcar_download_firmware(hcd);
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 40fa25c4d041..9215a28dad40 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1647,10 +1647,13 @@ static void handle_port_status(struct xhci_hcd *xhci,
}
}
- if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_U0 &&
- DEV_SUPERSPEED_ANY(portsc)) {
+ if ((portsc & PORT_PLC) &&
+ DEV_SUPERSPEED_ANY(portsc) &&
+ ((portsc & PORT_PLS_MASK) == XDEV_U0 ||
+ (portsc & PORT_PLS_MASK) == XDEV_U1 ||
+ (portsc & PORT_PLS_MASK) == XDEV_U2)) {
xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
- /* We've just brought the device into U0 through either the
+ /* We've just brought the device into U0/1/2 through either the
* Resume state after a device remote wakeup, or through the
* U3Exit state after a host-initiated resume. If it's a device
* initiated remote wake, don't pass up the link state change,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 652dc36e3012..9334cdee382a 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -452,6 +452,14 @@ struct xhci_op_regs {
*/
#define XHCI_DEFAULT_BESL 4
+/*
+ * USB3 specification define a 360ms tPollingLFPSTiemout for USB3 ports
+ * to complete link training. usually link trainig completes much faster
+ * so check status 10 times with 36ms sleep in places we need to wait for
+ * polling to complete.
+ */
+#define XHCI_PORT_POLLING_LFPS_TIME 36
+
/**
* struct xhci_intr_reg - Interrupt Register Set
* @irq_pending: IMAN - Interrupt Management Register. Used to enable
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index 4d72b7d1d383..04684849d683 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -547,7 +547,7 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
*/
hub->port_swap = USB251XB_DEF_PORT_SWAP;
of_property_for_each_u32(np, "swap-dx-lanes", prop, p, port) {
- if ((port >= 0) && (port <= data->port_cnt))
+ if (port <= data->port_cnt)
hub->port_swap |= BIT(port);
}
@@ -612,7 +612,7 @@ static int usb251xb_probe(struct usb251xb *hub)
dev);
int err;
- if (np) {
+ if (np && of_id) {
err = usb251xb_get_ofdata(hub,
(struct usb251xb_data *)of_id->data);
if (err) {
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 6d9fd5f64903..7b306aa22d25 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -314,6 +314,7 @@ static void yurex_disconnect(struct usb_interface *interface)
usb_deregister_dev(interface, &yurex_class);
/* prevent more I/O from starting */
+ usb_poison_urb(dev->urb);
mutex_lock(&dev->io_mutex);
dev->interface = NULL;
mutex_unlock(&dev->io_mutex);
diff --git a/drivers/usb/mtu3/Kconfig b/drivers/usb/mtu3/Kconfig
index bcc23486c4ed..928c2cd6fc00 100644
--- a/drivers/usb/mtu3/Kconfig
+++ b/drivers/usb/mtu3/Kconfig
@@ -6,6 +6,7 @@ config USB_MTU3
tristate "MediaTek USB3 Dual Role controller"
depends on USB || USB_GADGET
depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on EXTCON || !EXTCON
select USB_XHCI_MTK if USB_SUPPORT && USB_XHCI_HCD
help
Say Y or M here if your system runs on MediaTek SoCs with
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index fffe23ab0189..979bef9bfb6b 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -80,6 +80,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */
{ USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
{ USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */
+ { USB_DEVICE(0x10C4, 0x8056) }, /* Lorenz Messtechnik devices */
{ USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
{ USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
{ USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 8f5b17471759..1d8461ae2c34 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -609,6 +609,8 @@ static const struct usb_device_id id_table_combined[] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index b863bedb55a1..5755f0df0025 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -567,7 +567,9 @@
/*
* NovaTech product ids (FTDI_VID)
*/
-#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
+#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
+#define FTDI_NT_ORIONLX_PLUS_PID 0x7c91 /* OrionLX+ Substation Automation Platform */
+#define FTDI_NT_ORION_IO_PID 0x7c92 /* Orion I/O */
/*
* Synapse Wireless product ids (FTDI_VID)
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index fc52ac75fbf6..18110225d506 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -366,8 +366,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
if (!urbtrack)
return -ENOMEM;
- kref_get(&mos_parport->ref_count);
- urbtrack->mos_parport = mos_parport;
urbtrack->urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urbtrack->urb) {
kfree(urbtrack);
@@ -388,6 +386,8 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
usb_sndctrlpipe(usbdev, 0),
(unsigned char *)urbtrack->setup,
NULL, 0, async_complete, urbtrack);
+ kref_get(&mos_parport->ref_count);
+ urbtrack->mos_parport = mos_parport;
kref_init(&urbtrack->ref_count);
INIT_LIST_HEAD(&urbtrack->urblist_entry);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 11b21d9410f3..83869065b802 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -246,6 +246,7 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_EC25 0x0125
#define QUECTEL_PRODUCT_BG96 0x0296
#define QUECTEL_PRODUCT_EP06 0x0306
+#define QUECTEL_PRODUCT_EM12 0x0512
#define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_6001 0x6001
@@ -1066,7 +1067,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(3) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
- { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000), /* SIMCom SIM5218 */
+ .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | NCTRL(3) | RSVD(4) },
/* Quectel products using Qualcomm vendor ID */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
@@ -1087,6 +1089,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
+ .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1940,10 +1945,12 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
.driver_info = RSVD(4) },
- { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
- { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
- { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
- { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
+ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */
+ .driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
{ USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
index 31b024441938..cc794e25a0b6 100644
--- a/drivers/usb/storage/realtek_cr.c
+++ b/drivers/usb/storage/realtek_cr.c
@@ -763,18 +763,16 @@ static void rts51x_suspend_timer_fn(struct timer_list *t)
break;
case RTS51X_STAT_IDLE:
case RTS51X_STAT_SS:
- usb_stor_dbg(us, "RTS51X_STAT_SS, intf->pm_usage_cnt:%d, power.usage:%d\n",
- atomic_read(&us->pusb_intf->pm_usage_cnt),
+ usb_stor_dbg(us, "RTS51X_STAT_SS, power.usage:%d\n",
atomic_read(&us->pusb_intf->dev.power.usage_count));
- if (atomic_read(&us->pusb_intf->pm_usage_cnt) > 0) {
+ if (atomic_read(&us->pusb_intf->dev.power.usage_count) > 0) {
usb_stor_dbg(us, "Ready to enter SS state\n");
rts51x_set_stat(chip, RTS51X_STAT_SS);
/* ignore mass storage interface's children */
pm_suspend_ignore_children(&us->pusb_intf->dev, true);
usb_autopm_put_interface_async(us->pusb_intf);
- usb_stor_dbg(us, "RTS51X_STAT_SS 01, intf->pm_usage_cnt:%d, power.usage:%d\n",
- atomic_read(&us->pusb_intf->pm_usage_cnt),
+ usb_stor_dbg(us, "RTS51X_STAT_SS 01, power.usage:%d\n",
atomic_read(&us->pusb_intf->dev.power.usage_count));
}
break;
@@ -807,11 +805,10 @@ static void rts51x_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
int ret;
if (working_scsi(srb)) {
- usb_stor_dbg(us, "working scsi, intf->pm_usage_cnt:%d, power.usage:%d\n",
- atomic_read(&us->pusb_intf->pm_usage_cnt),
+ usb_stor_dbg(us, "working scsi, power.usage:%d\n",
atomic_read(&us->pusb_intf->dev.power.usage_count));
- if (atomic_read(&us->pusb_intf->pm_usage_cnt) <= 0) {
+ if (atomic_read(&us->pusb_intf->dev.power.usage_count) <= 0) {
ret = usb_autopm_get_interface(us->pusb_intf);
usb_stor_dbg(us, "working scsi, ret=%d\n", ret);
}
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 36742e8e7edc..a6d68191c861 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -368,25 +368,19 @@ static void uas_data_cmplt(struct urb *urb)
struct scsi_cmnd *cmnd = urb->context;
struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
- struct scsi_data_buffer *sdb = NULL;
+ struct scsi_data_buffer *sdb = &cmnd->sdb;
unsigned long flags;
int status = urb->status;
spin_lock_irqsave(&devinfo->lock, flags);
if (cmdinfo->data_in_urb == urb) {
- sdb = scsi_in(cmnd);
cmdinfo->state &= ~DATA_IN_URB_INFLIGHT;
cmdinfo->data_in_urb = NULL;
} else if (cmdinfo->data_out_urb == urb) {
- sdb = scsi_out(cmnd);
cmdinfo->state &= ~DATA_OUT_URB_INFLIGHT;
cmdinfo->data_out_urb = NULL;
}
- if (sdb == NULL) {
- WARN_ON_ONCE(1);
- goto out;
- }
if (devinfo->resetting)
goto out;
@@ -401,9 +395,9 @@ static void uas_data_cmplt(struct urb *urb)
if (status != -ENOENT && status != -ECONNRESET && status != -ESHUTDOWN)
uas_log_cmd_state(cmnd, "data cmplt err", status);
/* error: no data transfered */
- sdb->resid = sdb->length;
+ scsi_set_resid(cmnd, sdb->length);
} else {
- sdb->resid = sdb->length - urb->actual_length;
+ scsi_set_resid(cmnd, sdb->length - urb->actual_length);
}
uas_try_complete(cmnd, __func__);
out:
@@ -426,8 +420,7 @@ static struct urb *uas_alloc_data_urb(struct uas_dev_info *devinfo, gfp_t gfp,
struct usb_device *udev = devinfo->udev;
struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
struct urb *urb = usb_alloc_urb(0, gfp);
- struct scsi_data_buffer *sdb = (dir == DMA_FROM_DEVICE)
- ? scsi_in(cmnd) : scsi_out(cmnd);
+ struct scsi_data_buffer *sdb = &cmnd->sdb;
unsigned int pipe = (dir == DMA_FROM_DEVICE)
? devinfo->data_in_pipe : devinfo->data_out_pipe;
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 0f62db091d8d..a2233d72ae7c 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -37,6 +37,7 @@
S(SRC_ATTACHED), \
S(SRC_STARTUP), \
S(SRC_SEND_CAPABILITIES), \
+ S(SRC_SEND_CAPABILITIES_TIMEOUT), \
S(SRC_NEGOTIATE_CAPABILITIES), \
S(SRC_TRANSITION_SUPPLY), \
S(SRC_READY), \
@@ -2966,10 +2967,34 @@ static void run_state_machine(struct tcpm_port *port)
/* port->hard_reset_count = 0; */
port->caps_count = 0;
port->pd_capable = true;
- tcpm_set_state_cond(port, hard_reset_state(port),
+ tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
PD_T_SEND_SOURCE_CAP);
}
break;
+ case SRC_SEND_CAPABILITIES_TIMEOUT:
+ /*
+ * Error recovery for a PD_DATA_SOURCE_CAP reply timeout.
+ *
+ * PD 2.0 sinks are supposed to accept src-capabilities with a
+ * 3.0 header and simply ignore any src PDOs which the sink does
+ * not understand such as PPS but some 2.0 sinks instead ignore
+ * the entire PD_DATA_SOURCE_CAP message, causing contract
+ * negotiation to fail.
+ *
+ * After PD_N_HARD_RESET_COUNT hard-reset attempts, we try
+ * sending src-capabilities with a lower PD revision to
+ * make these broken sinks work.
+ */
+ if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) {
+ tcpm_set_state(port, HARD_RESET_SEND, 0);
+ } else if (port->negotiated_rev > PD_REV20) {
+ port->negotiated_rev--;
+ port->hard_reset_count = 0;
+ tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
+ } else {
+ tcpm_set_state(port, hard_reset_state(port), 0);
+ }
+ break;
case SRC_NEGOTIATE_CAPABILITIES:
ret = tcpm_pd_check_request(port);
if (ret < 0) {
diff --git a/drivers/usb/typec/tcpm/wcove.c b/drivers/usb/typec/tcpm/wcove.c
index 423208e19383..6770afd40765 100644
--- a/drivers/usb/typec/tcpm/wcove.c
+++ b/drivers/usb/typec/tcpm/wcove.c
@@ -615,8 +615,13 @@ static int wcove_typec_probe(struct platform_device *pdev)
wcove->dev = &pdev->dev;
wcove->regmap = pmic->regmap;
- irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr,
- platform_get_irq(pdev, 0));
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq);
+ return irq;
+ }
+
+ irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr, irq);
if (irq < 0)
return irq;
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 97b09a42a10c..dbfb2f24d71e 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -361,16 +361,10 @@ static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
}
if (usb_endpoint_xfer_isoc(epd)) {
- /* validate packet size and number of packets */
- unsigned int maxp, packets, bytes;
-
- maxp = usb_endpoint_maxp(epd);
- maxp *= usb_endpoint_maxp_mult(epd);
- bytes = pdu->u.cmd_submit.transfer_buffer_length;
- packets = DIV_ROUND_UP(bytes, maxp);
-
+ /* validate number of packets */
if (pdu->u.cmd_submit.number_of_packets < 0 ||
- pdu->u.cmd_submit.number_of_packets > packets) {
+ pdu->u.cmd_submit.number_of_packets >
+ USBIP_MAX_ISO_PACKETS) {
dev_err(&sdev->udev->dev,
"CMD_SUBMIT: isoc invalid num packets %d\n",
pdu->u.cmd_submit.number_of_packets);
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
index bf8afe9b5883..8be857a4fa13 100644
--- a/drivers/usb/usbip/usbip_common.h
+++ b/drivers/usb/usbip/usbip_common.h
@@ -121,6 +121,13 @@ extern struct device_attribute dev_attr_usbip_debug;
#define USBIP_DIR_OUT 0x00
#define USBIP_DIR_IN 0x01
+/*
+ * Arbitrary limit for the maximum number of isochronous packets in an URB,
+ * compare for example the uhci_submit_isochronous function in
+ * drivers/usb/host/uhci-q.c
+ */
+#define USBIP_MAX_ISO_PACKETS 1024
+
/**
* struct usbip_header_basic - data pertinent to every request
* @command: the usbip request type
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index a25659b5a5d1..3fa20e95a6bb 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -1661,11 +1661,11 @@ static void __init vfio_pci_fill_ids(void)
rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
subvendor, subdevice, class, class_mask, 0);
if (rc)
- pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n",
+ pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n",
vendor, device, subvendor, subdevice,
class, class_mask, rc);
else
- pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n",
+ pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n",
vendor, device, subvendor, subdevice,
class, class_mask);
}
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 8dbb270998f4..6b64e45a5269 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -1398,7 +1398,7 @@ unlock_exit:
mutex_unlock(&container->lock);
}
-const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
+static const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
.name = "iommu-vfio-powerpc",
.owner = THIS_MODULE,
.open = tce_iommu_open,
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 73652e21efec..d0f731c9920a 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -58,12 +58,18 @@ module_param_named(disable_hugepages,
MODULE_PARM_DESC(disable_hugepages,
"Disable VFIO IOMMU support for IOMMU hugepages.");
+static unsigned int dma_entry_limit __read_mostly = U16_MAX;
+module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644);
+MODULE_PARM_DESC(dma_entry_limit,
+ "Maximum number of user DMA mappings per container (65535).");
+
struct vfio_iommu {
struct list_head domain_list;
struct vfio_domain *external_domain; /* domain for external user */
struct mutex lock;
struct rb_root dma_list;
struct blocking_notifier_head notifier;
+ unsigned int dma_avail;
bool v2;
bool nesting;
};
@@ -836,6 +842,7 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
vfio_unlink_dma(iommu, dma);
put_task_struct(dma->task);
kfree(dma);
+ iommu->dma_avail++;
}
static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
@@ -1081,12 +1088,18 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
goto out_unlock;
}
+ if (!iommu->dma_avail) {
+ ret = -ENOSPC;
+ goto out_unlock;
+ }
+
dma = kzalloc(sizeof(*dma), GFP_KERNEL);
if (!dma) {
ret = -ENOMEM;
goto out_unlock;
}
+ iommu->dma_avail--;
dma->iova = iova;
dma->vaddr = vaddr;
dma->prot = prot;
@@ -1583,6 +1596,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
INIT_LIST_HEAD(&iommu->domain_list);
iommu->dma_list = RB_ROOT;
+ iommu->dma_avail = dma_entry_limit;
mutex_init(&iommu->lock);
BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 23593cb23dd0..618fb6461017 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -346,11 +346,6 @@ static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
return 0;
}
-static int vhost_scsi_write_pending_status(struct se_cmd *se_cmd)
-{
- return 0;
-}
-
static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
{
return;
@@ -2302,7 +2297,6 @@ static const struct target_core_fabric_ops vhost_scsi_ops = {
.sess_get_index = vhost_scsi_sess_get_index,
.sess_get_initiator_sid = NULL,
.write_pending = vhost_scsi_write_pending,
- .write_pending_status = vhost_scsi_write_pending_status,
.set_default_node_attributes = vhost_scsi_set_default_node_attrs,
.get_cmd_state = vhost_scsi_get_cmd_state,
.queue_data_in = vhost_scsi_queue_data_in,
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index a2e5dc7716e2..351af88231ad 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -911,8 +911,12 @@ static int vhost_new_umem_range(struct vhost_umem *umem,
u64 start, u64 size, u64 end,
u64 userspace_addr, int perm)
{
- struct vhost_umem_node *tmp, *node = kmalloc(sizeof(*node), GFP_ATOMIC);
+ struct vhost_umem_node *tmp, *node;
+ if (!size)
+ return -EFAULT;
+
+ node = kmalloc(sizeof(*node), GFP_ATOMIC);
if (!node)
return -ENOMEM;
@@ -1188,7 +1192,7 @@ static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
struct vring_used __user *used)
{
- size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
+ size_t s __maybe_unused = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
return access_ok(desc, num * sizeof *desc) &&
access_ok(avail,
diff --git a/drivers/video/fbdev/aty/radeon_pm.c b/drivers/video/fbdev/aty/radeon_pm.c
index e695adb0e573..2dc5703eac51 100644
--- a/drivers/video/fbdev/aty/radeon_pm.c
+++ b/drivers/video/fbdev/aty/radeon_pm.c
@@ -2844,8 +2844,8 @@ void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk, int ignore_devlis
* in some desktop G4s), Via (M9+ chip on iBook G4) and
* Snowy (M11 chip on iBook G4 manufactured after July 2005)
*/
- if (!strcmp(rinfo->of_node->name, "ATY,JasperParent") ||
- !strcmp(rinfo->of_node->name, "ATY,SnowyParent")) {
+ if (of_node_name_eq(rinfo->of_node, "ATY,JasperParent") ||
+ of_node_name_eq(rinfo->of_node, "ATY,SnowyParent")) {
rinfo->reinit_func = radeon_reinitialize_M10;
rinfo->pm_mode |= radeon_pm_off;
}
@@ -2855,7 +2855,7 @@ void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk, int ignore_devlis
rinfo->pm_mode |= radeon_pm_off;
}
#endif
- if (!strcmp(rinfo->of_node->name, "ATY,ViaParent")) {
+ if (of_node_name_eq(rinfo->of_node, "ATY,ViaParent")) {
rinfo->reinit_func = radeon_reinitialize_M9P;
rinfo->pm_mode |= radeon_pm_off;
}
diff --git a/drivers/video/fbdev/cg14.c b/drivers/video/fbdev/cg14.c
index 9af54c2368fd..a6dce1a78490 100644
--- a/drivers/video/fbdev/cg14.c
+++ b/drivers/video/fbdev/cg14.c
@@ -486,8 +486,8 @@ static int cg14_probe(struct platform_device *op)
info->var.xres);
info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres);
- if (!strcmp(dp->parent->name, "sbus") ||
- !strcmp(dp->parent->name, "sbi")) {
+ if (of_node_name_eq(dp->parent, "sbus") ||
+ of_node_name_eq(dp->parent, "sbi")) {
info->fix.smem_start = op->resource[0].start;
par->iospace = op->resource[0].flags & IORESOURCE_BITS;
} else {
diff --git a/drivers/video/fbdev/cg3.c b/drivers/video/fbdev/cg3.c
index 1bd95b02f3aa..6d42def8436b 100644
--- a/drivers/video/fbdev/cg3.c
+++ b/drivers/video/fbdev/cg3.c
@@ -369,7 +369,7 @@ static int cg3_probe(struct platform_device *op)
info->var.red.length = 8;
info->var.green.length = 8;
info->var.blue.length = 8;
- if (!strcmp(dp->name, "cgRDI"))
+ if (of_node_name_eq(dp, "cgRDI"))
par->flags |= CG3_FLAG_RDI;
if (par->flags & CG3_FLAG_RDI)
cg3_rdi_maybe_fixup_var(&info->var, dp);
diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c
index 40182ed85648..ca549e1532e6 100644
--- a/drivers/video/fbdev/chipsfb.c
+++ b/drivers/video/fbdev/chipsfb.c
@@ -349,7 +349,7 @@ static void init_chips(struct fb_info *p, unsigned long addr)
static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
{
struct fb_info *p;
- unsigned long addr, size;
+ unsigned long addr;
unsigned short cmd;
int rc = -ENODEV;
@@ -361,7 +361,6 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
if ((dp->resource[0].flags & IORESOURCE_MEM) == 0)
goto err_disable;
addr = pci_resource_start(dp, 0);
- size = pci_resource_len(dp, 0);
if (addr == 0)
goto err_disable;
diff --git a/drivers/video/fbdev/core/fb_cmdline.c b/drivers/video/fbdev/core/fb_cmdline.c
index 39509ccd92f1..3b5bd666b952 100644
--- a/drivers/video/fbdev/core/fb_cmdline.c
+++ b/drivers/video/fbdev/core/fb_cmdline.c
@@ -75,36 +75,33 @@ EXPORT_SYMBOL(fb_get_options);
* NOTE: This function is a __setup and __init function.
* It only stores the options. Drivers have to call
* fb_get_options() as necessary.
- *
- * Returns zero.
- *
*/
static int __init video_setup(char *options)
{
- int i, global = 0;
-
if (!options || !*options)
- global = 1;
+ goto out;
- if (!global && !strncmp(options, "ofonly", 6)) {
+ if (!strncmp(options, "ofonly", 6)) {
ofonly = 1;
- global = 1;
+ goto out;
}
- if (!global && !strchr(options, ':')) {
- fb_mode_option = options;
- global = 1;
- }
+ if (strchr(options, ':')) {
+ /* named */
+ int i;
- if (!global) {
for (i = 0; i < FB_MAX; i++) {
if (video_options[i] == NULL) {
video_options[i] = options;
break;
}
}
+ } else {
+ /* global */
+ fb_mode_option = options;
}
+out:
return 1;
}
__setup("video=", video_setup);
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index bfa1360ec750..cd059a801662 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -656,11 +656,14 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
kfree(save);
}
+ if (logo_shown == FBCON_LOGO_DONTSHOW)
+ return;
+
if (logo_lines > vc->vc_bottom) {
logo_shown = FBCON_LOGO_CANSHOW;
printk(KERN_INFO
"fbcon_init: disable boot-logo (boot-logo bigger than screen).\n");
- } else if (logo_shown != FBCON_LOGO_DONTSHOW) {
+ } else {
logo_shown = FBCON_LOGO_DRAW;
vc->vc_top = logo_lines;
}
@@ -999,7 +1002,7 @@ static const char *fbcon_startup(void)
if (!softback_buf) {
softback_buf =
(unsigned long)
- kmalloc(fbcon_softback_size,
+ kvmalloc(fbcon_softback_size,
GFP_KERNEL);
if (!softback_buf) {
fbcon_softback_size = 0;
@@ -1008,7 +1011,7 @@ static const char *fbcon_startup(void)
}
} else {
if (softback_buf) {
- kfree((void *) softback_buf);
+ kvfree((void *) softback_buf);
softback_buf = 0;
softback_top = 0;
}
@@ -1066,6 +1069,9 @@ static void fbcon_init(struct vc_data *vc, int init)
cap = info->flags;
+ if (console_loglevel <= CONSOLE_LOGLEVEL_QUIET)
+ logo_shown = FBCON_LOGO_DONTSHOW;
+
if (vc != svc || logo_shown == FBCON_LOGO_DONTSHOW ||
(info->fix.type == FB_TYPE_TEXT))
logo = 0;
@@ -3672,7 +3678,7 @@ static void fbcon_exit(void)
}
#endif
- kfree((void *)softback_buf);
+ kvfree((void *)softback_buf);
softback_buf = 0UL;
for_each_registered_fb(i) {
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index cb43a2258c51..4721491e6c8c 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -431,6 +431,9 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
{
unsigned int x;
+ if (image->width > info->var.xres || image->height > info->var.yres)
+ return;
+
if (rotate == FB_ROTATE_UR) {
for (x = 0;
x < num && image->dx + image->width <= info->var.xres;
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index dd3128990776..3558a70a6664 100644
--- a/drivers/video/fbdev/core/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -978,6 +978,8 @@ void fb_edid_to_monspecs(unsigned char *edid, struct fb_monspecs *specs)
get_monspecs(edid, specs);
specs->modedb = fb_create_modedb(edid, &specs->modedb_len, specs);
+ if (!specs->modedb)
+ return;
/*
* Workaround for buggy EDIDs that sets that the first
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index ba906876cc45..9e529cc2b4ff 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -464,7 +464,8 @@ static int efifb_probe(struct platform_device *dev)
info->apertures->ranges[0].base = efifb_fix.smem_start;
info->apertures->ranges[0].size = size_remap;
- if (!efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
+ if (efi_enabled(EFI_BOOT) &&
+ !efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
if ((efifb_fix.smem_start + efifb_fix.smem_len) >
(md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) {
pr_err("efifb: video memory @ 0x%lx spans multiple EFI memory regions\n",
diff --git a/drivers/video/fbdev/ffb.c b/drivers/video/fbdev/ffb.c
index 6b1915872af1..b7aee0c427a8 100644
--- a/drivers/video/fbdev/ffb.c
+++ b/drivers/video/fbdev/ffb.c
@@ -944,7 +944,7 @@ static int ffb_probe(struct platform_device *op)
info->var.accel_flags = FB_ACCELF_TEXT;
- if (!strcmp(dp->name, "SUNW,afb"))
+ if (of_node_name_eq(dp, "SUNW,afb"))
par->flags |= FFB_FLAG_AFB;
par->board_type = of_getintprop_default(dp, "board_type", 0);
diff --git a/drivers/video/fbdev/geode/gxfb_core.c b/drivers/video/fbdev/geode/gxfb_core.c
index f4f76373b2a8..b1906cf5a8f0 100644
--- a/drivers/video/fbdev/geode/gxfb_core.c
+++ b/drivers/video/fbdev/geode/gxfb_core.c
@@ -33,6 +33,8 @@
#include <linux/pci.h>
#include <linux/cs5535.h>
+#include <asm/olpc.h>
+
#include "gxfb.h"
static char *mode_option;
@@ -107,9 +109,6 @@ static struct fb_videomode gx_modedb[] = {
FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
};
-#ifdef CONFIG_OLPC
-#include <asm/olpc.h>
-
static struct fb_videomode gx_dcon_modedb[] = {
/* The only mode the DCON has is 1200x900 */
{ NULL, 50, 1200, 900, 17460, 24, 8, 4, 5, 8, 3,
@@ -128,14 +127,6 @@ static void get_modedb(struct fb_videomode **modedb, unsigned int *size)
}
}
-#else
-static void get_modedb(struct fb_videomode **modedb, unsigned int *size)
-{
- *modedb = (struct fb_videomode *) gx_modedb;
- *size = ARRAY_SIZE(gx_modedb);
-}
-#endif
-
static int gxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
if (var->xres > 1600 || var->yres > 1200)
diff --git a/drivers/video/fbdev/geode/lxfb_core.c b/drivers/video/fbdev/geode/lxfb_core.c
index 138da6cb6cbc..17ab905811b1 100644
--- a/drivers/video/fbdev/geode/lxfb_core.c
+++ b/drivers/video/fbdev/geode/lxfb_core.c
@@ -23,6 +23,8 @@
#include <linux/pci.h>
#include <linux/uaccess.h>
+#include <asm/olpc.h>
+
#include "lxfb.h"
static char *mode_option;
@@ -216,9 +218,6 @@ static struct fb_videomode geode_modedb[] = {
0, FB_VMODE_NONINTERLACED, 0 },
};
-#ifdef CONFIG_OLPC
-#include <asm/olpc.h>
-
static struct fb_videomode olpc_dcon_modedb[] = {
/* The only mode the DCON has is 1200x900 */
{ NULL, 50, 1200, 900, 17460, 24, 8, 4, 5, 8, 3,
@@ -237,14 +236,6 @@ static void get_modedb(struct fb_videomode **modedb, unsigned int *size)
}
}
-#else
-static void get_modedb(struct fb_videomode **modedb, unsigned int *size)
-{
- *modedb = (struct fb_videomode *) geode_modedb;
- *size = ARRAY_SIZE(geode_modedb);
-}
-#endif
-
static int lxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
if (var->xres > 1920 || var->yres > 1440)
diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
index 5d9670daf60e..4b9615e4ce74 100644
--- a/drivers/video/fbdev/imsttfb.c
+++ b/drivers/video/fbdev/imsttfb.c
@@ -1497,8 +1497,8 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
switch (pdev->device) {
case PCI_DEVICE_ID_IMS_TT128: /* IMS,tt128mbA */
par->ramdac = IBM;
- if (dp && ((strcmp(dp->name, "IMS,tt128mb8") == 0) ||
- (strcmp(dp->name, "IMS,tt128mb8A") == 0)))
+ if (of_node_name_eq(dp, "IMS,tt128mb8") ||
+ of_node_name_eq(dp, "IMS,tt128mb8A"))
par->ramdac = TVP;
break;
case PCI_DEVICE_ID_IMS_TT3D: /* IMS,tt3d */
diff --git a/drivers/video/fbdev/mbx/mbxdebugfs.c b/drivers/video/fbdev/mbx/mbxdebugfs.c
index 2bd328883178..09af721638fb 100644
--- a/drivers/video/fbdev/mbx/mbxdebugfs.c
+++ b/drivers/video/fbdev/mbx/mbxdebugfs.c
@@ -211,36 +211,22 @@ static const struct file_operations misc_fops = {
static void mbxfb_debugfs_init(struct fb_info *fbi)
{
struct mbxfb_info *mfbi = fbi->par;
- struct mbxfb_debugfs_data *dbg;
-
- dbg = kzalloc(sizeof(struct mbxfb_debugfs_data), GFP_KERNEL);
- mfbi->debugfs_data = dbg;
-
- dbg->dir = debugfs_create_dir("mbxfb", NULL);
- dbg->sysconf = debugfs_create_file("sysconf", 0444, dbg->dir,
- fbi, &sysconf_fops);
- dbg->clock = debugfs_create_file("clock", 0444, dbg->dir,
- fbi, &clock_fops);
- dbg->display = debugfs_create_file("display", 0444, dbg->dir,
- fbi, &display_fops);
- dbg->gsctl = debugfs_create_file("gsctl", 0444, dbg->dir,
- fbi, &gsctl_fops);
- dbg->sdram = debugfs_create_file("sdram", 0444, dbg->dir,
- fbi, &sdram_fops);
- dbg->misc = debugfs_create_file("misc", 0444, dbg->dir,
- fbi, &misc_fops);
+ struct dentry *dir;
+
+ dir = debugfs_create_dir("mbxfb", NULL);
+ mfbi->debugfs_dir = dir;
+
+ debugfs_create_file("sysconf", 0444, dir, fbi, &sysconf_fops);
+ debugfs_create_file("clock", 0444, dir, fbi, &clock_fops);
+ debugfs_create_file("display", 0444, dir, fbi, &display_fops);
+ debugfs_create_file("gsctl", 0444, dir, fbi, &gsctl_fops);
+ debugfs_create_file("sdram", 0444, dir, fbi, &sdram_fops);
+ debugfs_create_file("misc", 0444, dir, fbi, &misc_fops);
}
static void mbxfb_debugfs_remove(struct fb_info *fbi)
{
struct mbxfb_info *mfbi = fbi->par;
- struct mbxfb_debugfs_data *dbg = mfbi->debugfs_data;
-
- debugfs_remove(dbg->misc);
- debugfs_remove(dbg->sdram);
- debugfs_remove(dbg->gsctl);
- debugfs_remove(dbg->display);
- debugfs_remove(dbg->clock);
- debugfs_remove(dbg->sysconf);
- debugfs_remove(dbg->dir);
+
+ debugfs_remove_recursive(mfbi->debugfs_dir);
}
diff --git a/drivers/video/fbdev/mbx/mbxfb.c b/drivers/video/fbdev/mbx/mbxfb.c
index 539b85da0897..6ded480a69b4 100644
--- a/drivers/video/fbdev/mbx/mbxfb.c
+++ b/drivers/video/fbdev/mbx/mbxfb.c
@@ -74,7 +74,7 @@ struct mbxfb_info {
u32 pseudo_palette[MAX_PALETTES];
#ifdef CONFIG_FB_MBX_DEBUG
- void *debugfs_data;
+ struct dentry *debugfs_dir;
#endif
};
diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c
index 057d3cdef92e..fbc6eafb63c7 100644
--- a/drivers/video/fbdev/offb.c
+++ b/drivers/video/fbdev/offb.c
@@ -141,6 +141,7 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
/* Clear PALETTE_ACCESS_CNTL in DAC_CNTL */
out_le32(par->cmap_adr + 0x58,
in_le32(par->cmap_adr + 0x58) & ~0x20);
+ /* fall through */
case cmap_r128:
/* Set palette index & data */
out_8(par->cmap_adr + 0xb0, regno);
@@ -210,6 +211,7 @@ static int offb_blank(int blank, struct fb_info *info)
/* Clear PALETTE_ACCESS_CNTL in DAC_CNTL */
out_le32(par->cmap_adr + 0x58,
in_le32(par->cmap_adr + 0x58) & ~0x20);
+ /* fall through */
case cmap_r128:
/* Set palette index & data */
out_8(par->cmap_adr + 0xb0, i);
@@ -646,7 +648,7 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node)
}
#endif
/* kludge for valkyrie */
- if (strcmp(dp->name, "valkyrie") == 0)
+ if (of_node_name_eq(dp, "valkyrie"))
address += 0x1000;
offb_init_fb(no_real_node ? "bootx" : NULL,
width, height, depth, pitch, address,
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/core.c b/drivers/video/fbdev/omap2/omapfb/dss/core.c
index b4bcf3a4a647..b5956a1a30d4 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/core.c
@@ -110,19 +110,12 @@ DEFINE_SHOW_ATTRIBUTE(dss);
static struct dentry *dss_debugfs_dir;
-static int dss_initialize_debugfs(void)
+static void dss_initialize_debugfs(void)
{
dss_debugfs_dir = debugfs_create_dir("omapdss", NULL);
- if (IS_ERR(dss_debugfs_dir)) {
- int err = PTR_ERR(dss_debugfs_dir);
- dss_debugfs_dir = NULL;
- return err;
- }
debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir,
&dss_debug_dump_clocks, &dss_fops);
-
- return 0;
}
static void dss_uninitialize_debugfs(void)
@@ -130,26 +123,19 @@ static void dss_uninitialize_debugfs(void)
debugfs_remove_recursive(dss_debugfs_dir);
}
-int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
+void dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
{
- struct dentry *d;
-
- d = debugfs_create_file(name, S_IRUGO, dss_debugfs_dir,
- write, &dss_fops);
-
- return PTR_ERR_OR_ZERO(d);
+ debugfs_create_file(name, S_IRUGO, dss_debugfs_dir, write, &dss_fops);
}
#else /* CONFIG_FB_OMAP2_DSS_DEBUGFS */
-static inline int dss_initialize_debugfs(void)
+static inline void dss_initialize_debugfs(void)
{
- return 0;
}
static inline void dss_uninitialize_debugfs(void)
{
}
-int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
+void dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
{
- return 0;
}
#endif /* CONFIG_FB_OMAP2_DSS_DEBUGFS */
@@ -182,15 +168,11 @@ static struct notifier_block omap_dss_pm_notif_block = {
static int __init omap_dss_probe(struct platform_device *pdev)
{
- int r;
-
core.pdev = pdev;
dss_features_init(omapdss_get_version());
- r = dss_initialize_debugfs();
- if (r)
- goto err_debugfs;
+ dss_initialize_debugfs();
if (def_disp_name)
core.default_display_name = def_disp_name;
@@ -198,10 +180,6 @@ static int __init omap_dss_probe(struct platform_device *pdev)
register_pm_notifier(&omap_dss_pm_notif_block);
return 0;
-
-err_debugfs:
-
- return r;
}
static int omap_dss_remove(struct platform_device *pdev)
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
index f1eb8b0f8a2a..5ce893c1923d 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
@@ -60,7 +60,7 @@ omapdss_of_get_next_port(const struct device_node *parent,
return NULL;
}
prev = port;
- } while (of_node_cmp(port->name, "port") != 0);
+ } while (!of_node_name_eq(port, "port"));
of_node_put(ports);
}
@@ -83,7 +83,7 @@ omapdss_of_get_next_endpoint(const struct device_node *parent,
if (!ep)
return NULL;
prev = ep;
- } while (of_node_cmp(ep->name, "endpoint") != 0);
+ } while (!of_node_name_eq(ep, "endpoint"));
return ep;
}
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.h b/drivers/video/fbdev/omap2/omapfb/dss/dss.h
index a3cc0ca8f9d2..b1a354494144 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss.h
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.h
@@ -214,7 +214,7 @@ struct platform_device *dss_get_core_pdev(void);
int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask);
void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask);
int dss_set_min_bus_tput(struct device *dev, unsigned long tput);
-int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *));
+void dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *));
/* display */
int dss_suspend_all_devices(void);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c
index fa72e735dad2..d146793dd044 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c
@@ -712,7 +712,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
else
acore.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
/*
- * The I2S input word length is twice the lenght given in the IEC-60958
+ * The I2S input word length is twice the length given in the IEC-60958
* status word. If the word size is greater than
* 20 bits, increment by one.
*/
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index 4061a20cfe24..3b361bc9feb8 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -667,10 +667,10 @@ static int ssd1307fb_probe(struct i2c_client *client,
if (par->reset) {
/* Reset the screen */
- gpiod_set_value_cansleep(par->reset, 0);
- udelay(4);
gpiod_set_value_cansleep(par->reset, 1);
udelay(4);
+ gpiod_set_value_cansleep(par->reset, 0);
+ udelay(4);
}
if (par->vbat_reg) {
diff --git a/drivers/video/fbdev/via/viafbdev.c b/drivers/video/fbdev/via/viafbdev.c
index 7bb7e90b8f00..bdf5a0ea876d 100644
--- a/drivers/video/fbdev/via/viafbdev.c
+++ b/drivers/video/fbdev/via/viafbdev.c
@@ -2110,7 +2110,7 @@ MODULE_PARM_DESC(viafb_lcd_panel_id,
module_param(viafb_lcd_dsp_method, int, S_IRUSR);
MODULE_PARM_DESC(viafb_lcd_dsp_method,
- "Set Flat Panel display scaling method.(Default=Expandsion)");
+ "Set Flat Panel display scaling method.(Default=Expansion)");
module_param(viafb_SAMM_ON, int, S_IRUSR);
MODULE_PARM_DESC(viafb_SAMM_ON,
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index df7d09409efe..8ca333f21292 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -27,6 +27,10 @@
#define GUEST_MAPPINGS_TRIES 5
+#define VBG_KERNEL_REQUEST \
+ (VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV | \
+ VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN)
+
/**
* Reserves memory in which the VMM can relocate any guest mappings
* that are floating around.
@@ -48,7 +52,8 @@ static void vbg_guest_mappings_init(struct vbg_dev *gdev)
int i, rc;
/* Query the required space. */
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO);
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO,
+ VBG_KERNEL_REQUEST);
if (!req)
return;
@@ -135,7 +140,8 @@ static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
* Tell the host that we're going to free the memory we reserved for
* it, the free it up. (Leak the memory if anything goes wrong here.)
*/
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO);
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO,
+ VBG_KERNEL_REQUEST);
if (!req)
return;
@@ -172,8 +178,10 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
struct vmmdev_guest_info2 *req2 = NULL;
int rc, ret = -ENOMEM;
- req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO);
- req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2);
+ req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO,
+ VBG_KERNEL_REQUEST);
+ req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2,
+ VBG_KERNEL_REQUEST);
if (!req1 || !req2)
goto out_free;
@@ -187,8 +195,8 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
req2->additions_minor = VBG_VERSION_MINOR;
req2->additions_build = VBG_VERSION_BUILD;
req2->additions_revision = VBG_SVN_REV;
- /* (no features defined yet) */
- req2->additions_features = 0;
+ req2->additions_features =
+ VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO;
strlcpy(req2->name, VBG_VERSION_STRING,
sizeof(req2->name));
@@ -230,7 +238,8 @@ static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
struct vmmdev_guest_status *req;
int rc;
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS);
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS,
+ VBG_KERNEL_REQUEST);
if (!req)
return -ENOMEM;
@@ -423,7 +432,8 @@ static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
struct vmmdev_heartbeat *req;
int rc;
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE);
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE,
+ VBG_KERNEL_REQUEST);
if (!req)
return -ENOMEM;
@@ -457,7 +467,8 @@ static int vbg_heartbeat_init(struct vbg_dev *gdev)
gdev->guest_heartbeat_req = vbg_req_alloc(
sizeof(*gdev->guest_heartbeat_req),
- VMMDEVREQ_GUEST_HEARTBEAT);
+ VMMDEVREQ_GUEST_HEARTBEAT,
+ VBG_KERNEL_REQUEST);
if (!gdev->guest_heartbeat_req)
return -ENOMEM;
@@ -528,7 +539,8 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
struct vmmdev_mask *req;
int rc;
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK);
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
+ VBG_KERNEL_REQUEST);
if (!req)
return -ENOMEM;
@@ -567,8 +579,14 @@ static int vbg_set_session_event_filter(struct vbg_dev *gdev,
u32 changed, previous;
int rc, ret = 0;
- /* Allocate a request buffer before taking the spinlock */
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK);
+ /*
+ * Allocate a request buffer before taking the spinlock, when
+ * the session is being terminated the requestor is the kernel,
+ * as we're cleaning up.
+ */
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
+ session_termination ? VBG_KERNEL_REQUEST :
+ session->requestor);
if (!req) {
if (!session_termination)
return -ENOMEM;
@@ -627,7 +645,8 @@ static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
struct vmmdev_mask *req;
int rc;
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES);
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
+ VBG_KERNEL_REQUEST);
if (!req)
return -ENOMEM;
@@ -662,8 +681,14 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev,
u32 changed, previous;
int rc, ret = 0;
- /* Allocate a request buffer before taking the spinlock */
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES);
+ /*
+ * Allocate a request buffer before taking the spinlock, when
+ * the session is being terminated the requestor is the kernel,
+ * as we're cleaning up.
+ */
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
+ session_termination ? VBG_KERNEL_REQUEST :
+ session->requestor);
if (!req) {
if (!session_termination)
return -ENOMEM;
@@ -722,7 +747,8 @@ static int vbg_query_host_version(struct vbg_dev *gdev)
struct vmmdev_host_version *req;
int rc, ret;
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION);
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION,
+ VBG_KERNEL_REQUEST);
if (!req)
return -ENOMEM;
@@ -783,19 +809,24 @@ int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
gdev->mem_balloon.get_req =
vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req),
- VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ);
+ VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ,
+ VBG_KERNEL_REQUEST);
gdev->mem_balloon.change_req =
vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req),
- VMMDEVREQ_CHANGE_MEMBALLOON);
+ VMMDEVREQ_CHANGE_MEMBALLOON,
+ VBG_KERNEL_REQUEST);
gdev->cancel_req =
vbg_req_alloc(sizeof(*(gdev->cancel_req)),
- VMMDEVREQ_HGCM_CANCEL2);
+ VMMDEVREQ_HGCM_CANCEL2,
+ VBG_KERNEL_REQUEST);
gdev->ack_events_req =
vbg_req_alloc(sizeof(*gdev->ack_events_req),
- VMMDEVREQ_ACKNOWLEDGE_EVENTS);
+ VMMDEVREQ_ACKNOWLEDGE_EVENTS,
+ VBG_KERNEL_REQUEST);
gdev->mouse_status_req =
vbg_req_alloc(sizeof(*gdev->mouse_status_req),
- VMMDEVREQ_GET_MOUSE_STATUS);
+ VMMDEVREQ_GET_MOUSE_STATUS,
+ VBG_KERNEL_REQUEST);
if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req ||
!gdev->cancel_req || !gdev->ack_events_req ||
@@ -892,9 +923,9 @@ void vbg_core_exit(struct vbg_dev *gdev)
* vboxguest_linux.c calls this when userspace opens the char-device.
* Return: A pointer to the new session or an ERR_PTR on error.
* @gdev: The Guest extension device.
- * @user: Set if this is a session for the vboxuser device.
+ * @requestor: VMMDEV_REQUESTOR_* flags
*/
-struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user)
+struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor)
{
struct vbg_session *session;
@@ -903,7 +934,7 @@ struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user)
return ERR_PTR(-ENOMEM);
session->gdev = gdev;
- session->user_session = user;
+ session->requestor = requestor;
return session;
}
@@ -924,7 +955,9 @@ void vbg_core_close_session(struct vbg_session *session)
if (!session->hgcm_client_ids[i])
continue;
- vbg_hgcm_disconnect(gdev, session->hgcm_client_ids[i], &rc);
+ /* requestor is kernel here, as we're cleaning up. */
+ vbg_hgcm_disconnect(gdev, VBG_KERNEL_REQUEST,
+ session->hgcm_client_ids[i], &rc);
}
kfree(session);
@@ -1152,7 +1185,8 @@ static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session,
return -EPERM;
}
- if (trusted_apps_only && session->user_session) {
+ if (trusted_apps_only &&
+ (session->requestor & VMMDEV_REQUESTOR_USER_DEVICE)) {
vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
req->request_type);
return -EPERM;
@@ -1209,8 +1243,8 @@ static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev,
if (i >= ARRAY_SIZE(session->hgcm_client_ids))
return -EMFILE;
- ret = vbg_hgcm_connect(gdev, &conn->u.in.loc, &client_id,
- &conn->hdr.rc);
+ ret = vbg_hgcm_connect(gdev, session->requestor, &conn->u.in.loc,
+ &client_id, &conn->hdr.rc);
mutex_lock(&gdev->session_mutex);
if (ret == 0 && conn->hdr.rc >= 0) {
@@ -1251,7 +1285,8 @@ static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev,
if (i >= ARRAY_SIZE(session->hgcm_client_ids))
return -EINVAL;
- ret = vbg_hgcm_disconnect(gdev, client_id, &disconn->hdr.rc);
+ ret = vbg_hgcm_disconnect(gdev, session->requestor, client_id,
+ &disconn->hdr.rc);
mutex_lock(&gdev->session_mutex);
if (ret == 0 && disconn->hdr.rc >= 0)
@@ -1313,12 +1348,12 @@ static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
}
if (IS_ENABLED(CONFIG_COMPAT) && f32bit)
- ret = vbg_hgcm_call32(gdev, client_id,
+ ret = vbg_hgcm_call32(gdev, session->requestor, client_id,
call->function, call->timeout_ms,
VBG_IOCTL_HGCM_CALL_PARMS32(call),
call->parm_count, &call->hdr.rc);
else
- ret = vbg_hgcm_call(gdev, client_id,
+ ret = vbg_hgcm_call(gdev, session->requestor, client_id,
call->function, call->timeout_ms,
VBG_IOCTL_HGCM_CALL_PARMS(call),
call->parm_count, &call->hdr.rc);
@@ -1408,6 +1443,7 @@ static int vbg_ioctl_check_balloon(struct vbg_dev *gdev,
}
static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
+ struct vbg_session *session,
struct vbg_ioctl_write_coredump *dump)
{
struct vmmdev_write_core_dump *req;
@@ -1415,7 +1451,8 @@ static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0))
return -EINVAL;
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP);
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP,
+ session->requestor);
if (!req)
return -ENOMEM;
@@ -1476,7 +1513,7 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
case VBG_IOCTL_CHECK_BALLOON:
return vbg_ioctl_check_balloon(gdev, data);
case VBG_IOCTL_WRITE_CORE_DUMP:
- return vbg_ioctl_write_core_dump(gdev, data);
+ return vbg_ioctl_write_core_dump(gdev, session, data);
}
/* Variable sized requests. */
@@ -1508,7 +1545,8 @@ int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
struct vmmdev_mouse_status *req;
int rc;
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS);
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS,
+ VBG_KERNEL_REQUEST);
if (!req)
return -ENOMEM;
diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h
index 7ad9ec45bfa9..4188c12b839f 100644
--- a/drivers/virt/vboxguest/vboxguest_core.h
+++ b/drivers/virt/vboxguest/vboxguest_core.h
@@ -154,15 +154,15 @@ struct vbg_session {
* host. Protected by vbg_gdev.session_mutex.
*/
u32 guest_caps;
- /** Does this session belong to a root process or a user one? */
- bool user_session;
+ /** VMMDEV_REQUESTOR_* flags */
+ u32 requestor;
/** Set on CANCEL_ALL_WAITEVENTS, protected by vbg_devevent_spinlock. */
bool cancel_waiters;
};
int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events);
void vbg_core_exit(struct vbg_dev *gdev);
-struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user);
+struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor);
void vbg_core_close_session(struct vbg_session *session);
int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data);
int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features);
@@ -172,12 +172,13 @@ irqreturn_t vbg_core_isr(int irq, void *dev_id);
void vbg_linux_mouse_event(struct vbg_dev *gdev);
/* Private (non exported) functions form vboxguest_utils.c */
-void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type);
+void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
+ u32 requestor);
void vbg_req_free(void *req, size_t len);
int vbg_req_perform(struct vbg_dev *gdev, void *req);
int vbg_hgcm_call32(
- struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
- struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
- int *vbox_status);
+ struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
+ u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
+ u32 parm_count, int *vbox_status);
#endif
diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c
index 6e2a9619192d..6e8c0f1c1056 100644
--- a/drivers/virt/vboxguest/vboxguest_linux.c
+++ b/drivers/virt/vboxguest/vboxguest_linux.c
@@ -5,6 +5,7 @@
* Copyright (C) 2006-2016 Oracle Corporation
*/
+#include <linux/cred.h>
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
@@ -28,6 +29,23 @@ static DEFINE_MUTEX(vbg_gdev_mutex);
/** Global vbg_gdev pointer used by vbg_get/put_gdev. */
static struct vbg_dev *vbg_gdev;
+static u32 vbg_misc_device_requestor(struct inode *inode)
+{
+ u32 requestor = VMMDEV_REQUESTOR_USERMODE |
+ VMMDEV_REQUESTOR_CON_DONT_KNOW |
+ VMMDEV_REQUESTOR_TRUST_NOT_GIVEN;
+
+ if (from_kuid(current_user_ns(), current->cred->uid) == 0)
+ requestor |= VMMDEV_REQUESTOR_USR_ROOT;
+ else
+ requestor |= VMMDEV_REQUESTOR_USR_USER;
+
+ if (in_egroup_p(inode->i_gid))
+ requestor |= VMMDEV_REQUESTOR_GRP_VBOX;
+
+ return requestor;
+}
+
static int vbg_misc_device_open(struct inode *inode, struct file *filp)
{
struct vbg_session *session;
@@ -36,7 +54,7 @@ static int vbg_misc_device_open(struct inode *inode, struct file *filp)
/* misc_open sets filp->private_data to our misc device */
gdev = container_of(filp->private_data, struct vbg_dev, misc_device);
- session = vbg_core_open_session(gdev, false);
+ session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode));
if (IS_ERR(session))
return PTR_ERR(session);
@@ -53,7 +71,8 @@ static int vbg_misc_device_user_open(struct inode *inode, struct file *filp)
gdev = container_of(filp->private_data, struct vbg_dev,
misc_device_user);
- session = vbg_core_open_session(gdev, false);
+ session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode) |
+ VMMDEV_REQUESTOR_USER_DEVICE);
if (IS_ERR(session))
return PTR_ERR(session);
@@ -115,7 +134,8 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
req == VBG_IOCTL_VMMDEV_REQUEST_BIG;
if (is_vmmdev_req)
- buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT);
+ buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT,
+ session->requestor);
else
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
index bf4474214b4d..75fd140b02ff 100644
--- a/drivers/virt/vboxguest/vboxguest_utils.c
+++ b/drivers/virt/vboxguest/vboxguest_utils.c
@@ -62,7 +62,8 @@ VBG_LOG(vbg_err, pr_err);
VBG_LOG(vbg_debug, pr_debug);
#endif
-void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
+void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
+ u32 requestor)
{
struct vmmdev_request_header *req;
int order = get_order(PAGE_ALIGN(len));
@@ -78,7 +79,7 @@ void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
req->request_type = req_type;
req->rc = VERR_GENERAL_FAILURE;
req->reserved1 = 0;
- req->reserved2 = 0;
+ req->requestor = requestor;
return req;
}
@@ -119,7 +120,7 @@ static bool hgcm_req_done(struct vbg_dev *gdev,
return done;
}
-int vbg_hgcm_connect(struct vbg_dev *gdev,
+int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor,
struct vmmdev_hgcm_service_location *loc,
u32 *client_id, int *vbox_status)
{
@@ -127,7 +128,7 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
int rc;
hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect),
- VMMDEVREQ_HGCM_CONNECT);
+ VMMDEVREQ_HGCM_CONNECT, requestor);
if (!hgcm_connect)
return -ENOMEM;
@@ -153,13 +154,15 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
}
EXPORT_SYMBOL(vbg_hgcm_connect);
-int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status)
+int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor,
+ u32 client_id, int *vbox_status)
{
struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL;
int rc;
hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect),
- VMMDEVREQ_HGCM_DISCONNECT);
+ VMMDEVREQ_HGCM_DISCONNECT,
+ requestor);
if (!hgcm_disconnect)
return -ENOMEM;
@@ -593,9 +596,10 @@ static int hgcm_call_copy_back_result(
return 0;
}
-int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
- u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
- u32 parm_count, int *vbox_status)
+int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
+ u32 function, u32 timeout_ms,
+ struct vmmdev_hgcm_function_parameter *parms, u32 parm_count,
+ int *vbox_status)
{
struct vmmdev_hgcm_call *call;
void **bounce_bufs = NULL;
@@ -615,7 +619,7 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
goto free_bounce_bufs;
}
- call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL);
+ call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL, requestor);
if (!call) {
ret = -ENOMEM;
goto free_bounce_bufs;
@@ -647,9 +651,9 @@ EXPORT_SYMBOL(vbg_hgcm_call);
#ifdef CONFIG_COMPAT
int vbg_hgcm_call32(
- struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
- struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
- int *vbox_status)
+ struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
+ u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
+ u32 parm_count, int *vbox_status)
{
struct vmmdev_hgcm_function_parameter *parm64 = NULL;
u32 i, size;
@@ -689,7 +693,7 @@ int vbg_hgcm_call32(
goto out_free;
}
- ret = vbg_hgcm_call(gdev, client_id, function, timeout_ms,
+ ret = vbg_hgcm_call(gdev, requestor, client_id, function, timeout_ms,
parm64, parm_count, vbox_status);
if (ret < 0)
goto out_free;
diff --git a/drivers/virt/vboxguest/vboxguest_version.h b/drivers/virt/vboxguest/vboxguest_version.h
index 77f0c8f8a231..84834dad38d5 100644
--- a/drivers/virt/vboxguest/vboxguest_version.h
+++ b/drivers/virt/vboxguest/vboxguest_version.h
@@ -9,11 +9,10 @@
#ifndef __VBOX_VERSION_H__
#define __VBOX_VERSION_H__
-/* Last synced October 4th 2017 */
-#define VBG_VERSION_MAJOR 5
-#define VBG_VERSION_MINOR 2
+#define VBG_VERSION_MAJOR 6
+#define VBG_VERSION_MINOR 0
#define VBG_VERSION_BUILD 0
-#define VBG_SVN_REV 68940
-#define VBG_VERSION_STRING "5.2.0"
+#define VBG_SVN_REV 127566
+#define VBG_VERSION_STRING "6.0.0"
#endif
diff --git a/drivers/virt/vboxguest/vmmdev.h b/drivers/virt/vboxguest/vmmdev.h
index 5e2ae978935d..6337b8d75d96 100644
--- a/drivers/virt/vboxguest/vmmdev.h
+++ b/drivers/virt/vboxguest/vmmdev.h
@@ -98,8 +98,8 @@ struct vmmdev_request_header {
s32 rc;
/** Reserved field no.1. MBZ. */
u32 reserved1;
- /** Reserved field no.2. MBZ. */
- u32 reserved2;
+ /** IN: Requestor information (VMMDEV_REQUESTOR_*) */
+ u32 requestor;
};
VMMDEV_ASSERT_SIZE(vmmdev_request_header, 24);
@@ -247,6 +247,8 @@ struct vmmdev_guest_info {
};
VMMDEV_ASSERT_SIZE(vmmdev_guest_info, 24 + 8);
+#define VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO BIT(0)
+
/** struct vmmdev_guestinfo2 - Guest information report, version 2. */
struct vmmdev_guest_info2 {
/** Header. */
@@ -259,7 +261,7 @@ struct vmmdev_guest_info2 {
u32 additions_build;
/** SVN revision. */
u32 additions_revision;
- /** Feature mask, currently unused. */
+ /** Feature mask. */
u32 additions_features;
/**
* The intentional meaning of this field was:
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 59e36ef4920f..98b30f54342c 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -161,6 +161,7 @@ EXPORT_SYMBOL_GPL(virtio_config_enable);
void virtio_add_status(struct virtio_device *dev, unsigned int status)
{
+ might_sleep();
dev->config->set_status(dev, dev->config->get_status(dev) | status);
}
EXPORT_SYMBOL_GPL(virtio_add_status);
@@ -170,6 +171,7 @@ int virtio_finalize_features(struct virtio_device *dev)
int ret = dev->config->finalize_features(dev);
unsigned status;
+ might_sleep();
if (ret)
return ret;
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index fb12fe205f86..f19061b585a4 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -457,9 +457,12 @@ static void update_balloon_size_func(struct work_struct *work)
update_balloon_size_work);
diff = towards_target(vb);
+ if (!diff)
+ return;
+
if (diff > 0)
diff -= fill_balloon(vb, diff);
- else if (diff < 0)
+ else
diff += leak_balloon(vb, -diff);
update_balloon_size(vb);
@@ -922,7 +925,6 @@ static int virtballoon_probe(struct virtio_device *vdev)
VIRTIO_BALLOON_CMD_ID_STOP);
vb->cmd_id_stop = cpu_to_virtio32(vb->vdev,
VIRTIO_BALLOON_CMD_ID_STOP);
- vb->num_free_page_blocks = 0;
spin_lock_init(&vb->free_page_list_lock);
INIT_LIST_HEAD(&vb->free_page_list);
if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON)) {
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index d0584c040c60..7a0398bb84f7 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -255,9 +255,11 @@ void vp_del_vqs(struct virtio_device *vdev)
for (i = 0; i < vp_dev->msix_used_vectors; ++i)
free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
- for (i = 0; i < vp_dev->msix_vectors; i++)
- if (vp_dev->msix_affinity_masks[i])
- free_cpumask_var(vp_dev->msix_affinity_masks[i]);
+ if (vp_dev->msix_affinity_masks) {
+ for (i = 0; i < vp_dev->msix_vectors; i++)
+ if (vp_dev->msix_affinity_masks[i])
+ free_cpumask_var(vp_dev->msix_affinity_masks[i]);
+ }
if (vp_dev->msix_enabled) {
/* Disable the vector used for configuration */
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index a0b07c331255..5df92c308286 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -271,6 +271,17 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
return false;
}
+size_t virtio_max_dma_size(struct virtio_device *vdev)
+{
+ size_t max_segment_size = SIZE_MAX;
+
+ if (vring_use_dma_api(vdev))
+ max_segment_size = dma_max_mapping_size(&vdev->dev);
+
+ return max_segment_size;
+}
+EXPORT_SYMBOL_GPL(virtio_max_dma_size);
+
static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
@@ -871,6 +882,8 @@ static struct virtqueue *vring_create_virtqueue_split(
GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
if (queue)
break;
+ if (!may_reduce_num)
+ return NULL;
}
if (!num)
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
index 0f4ecfcdb549..a9fb77585272 100644
--- a/drivers/w1/masters/ds2490.c
+++ b/drivers/w1/masters/ds2490.c
@@ -1016,15 +1016,15 @@ static int ds_probe(struct usb_interface *intf,
/* alternative 3, 1ms interrupt (greatly speeds search), 64 byte bulk */
alt = 3;
err = usb_set_interface(dev->udev,
- intf->altsetting[alt].desc.bInterfaceNumber, alt);
+ intf->cur_altsetting->desc.bInterfaceNumber, alt);
if (err) {
dev_err(&dev->udev->dev, "Failed to set alternative setting %d "
"for %d interface: err=%d.\n", alt,
- intf->altsetting[alt].desc.bInterfaceNumber, err);
+ intf->cur_altsetting->desc.bInterfaceNumber, err);
goto err_out_clear;
}
- iface_desc = &intf->altsetting[alt];
+ iface_desc = intf->cur_altsetting;
if (iface_desc->desc.bNumEndpoints != NUM_EP-1) {
pr_info("Num endpoints=%d. It is not DS9490R.\n",
iface_desc->desc.bNumEndpoints);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 65c3c42b5e7c..242eea859637 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -241,6 +241,22 @@ config RAVE_SP_WATCHDOG
help
Support for the watchdog on RAVE SP device.
+config MLX_WDT
+ tristate "Mellanox Watchdog"
+ depends on MELLANOX_PLATFORM
+ select WATCHDOG_CORE
+ select REGMAP
+ help
+ This is the driver for the hardware watchdog on Mellanox systems.
+ If you are going to use it, say Y here, otherwise N.
+ This driver can be used together with the watchdog daemon.
+ It can also watch your kernel to make sure it doesn't freeze,
+ and if it does, it reboots your system after a certain amount of
+ time.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mlx-wdt.
+
# ALPHA Architecture
# ARM Architecture
@@ -1157,7 +1173,7 @@ config HP_WATCHDOG
select WATCHDOG_CORE
depends on X86 && PCI
help
- A software monitoring watchdog and NMI sourcing driver. This driver
+ A software monitoring watchdog and NMI handling driver. This driver
will detect lockups and provide a stack trace. This is a driver that
will only load on an HP ProLiant system with a minimum of iLO2 support.
To compile this driver as a module, choose M here: the module will be
@@ -1175,12 +1191,13 @@ config KEMPLD_WDT
called kempld_wdt.
config HPWDT_NMI_DECODING
- bool "NMI decoding support for the HP ProLiant iLO2+ Hardware Watchdog Timer"
+ bool "NMI support for the HP ProLiant iLO2+ Hardware Watchdog Timer"
depends on HP_WATCHDOG
default y
help
- When an NMI occurs this feature will make the necessary BIOS calls to
- log the cause of the NMI.
+ Enables the NMI handler for the watchdog pretimeout NMI and the iLO
+ "Generate NMI to System" virtual button. When an NMI is claimed
+ by the driver, panic is called.
config SC1200_WDT
tristate "National Semiconductor PC87307/PC97307 (ala SC1200) Watchdog"
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 4e78a8c73f0c..ba930e464657 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -142,6 +142,7 @@ obj-$(CONFIG_INTEL_MID_WATCHDOG) += intel-mid_wdt.o
obj-$(CONFIG_INTEL_MEI_WDT) += mei_wdt.o
obj-$(CONFIG_NI903X_WDT) += ni903x_wdt.o
obj-$(CONFIG_NIC7018_WDT) += nic7018_wdt.o
+obj-$(CONFIG_MLX_WDT) += mlx_wdt.o
# M68K Architecture
obj-$(CONFIG_M54xx_WATCHDOG) += m54xx_wdt.o
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index 501aebb5b81f..aa95f57cc1c3 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -16,8 +16,6 @@
* heartbeat requests after the watchdog device has been closed.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
diff --git a/drivers/watchdog/mlx_wdt.c b/drivers/watchdog/mlx_wdt.c
new file mode 100644
index 000000000000..70c2cbf9c993
--- /dev/null
+++ b/drivers/watchdog/mlx_wdt.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Mellanox watchdog driver
+ *
+ * Copyright (C) 2019 Mellanox Technologies
+ * Copyright (C) 2019 Michael Shych <mshych@mellanox.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/platform_data/mlxreg.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/watchdog.h>
+
+#define MLXREG_WDT_CLOCK_SCALE 1000
+#define MLXREG_WDT_MAX_TIMEOUT_TYPE1 32
+#define MLXREG_WDT_MAX_TIMEOUT_TYPE2 255
+#define MLXREG_WDT_MIN_TIMEOUT 1
+#define MLXREG_WDT_OPTIONS_BASE (WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE | \
+ WDIOF_SETTIMEOUT)
+
+/**
+ * struct mlxreg_wdt - wd private data:
+ *
+ * @wdd: watchdog device;
+ * @device: basic device;
+ * @pdata: data received from platform driver;
+ * @regmap: register map of parent device;
+ * @timeout: defined timeout in sec.;
+ * @action_idx: index for direct access to action register;
+ * @timeout_idx:index for direct access to TO register;
+ * @tleft_idx: index for direct access to time left register;
+ * @ping_idx: index for direct access to ping register;
+ * @reset_idx: index for direct access to reset cause register;
+ * @wd_type: watchdog HW type;
+ */
+struct mlxreg_wdt {
+ struct watchdog_device wdd;
+ struct mlxreg_core_platform_data *pdata;
+ void *regmap;
+ int action_idx;
+ int timeout_idx;
+ int tleft_idx;
+ int ping_idx;
+ int reset_idx;
+ enum mlxreg_wdt_type wdt_type;
+};
+
+static void mlxreg_wdt_check_card_reset(struct mlxreg_wdt *wdt)
+{
+ struct mlxreg_core_data *reg_data;
+ u32 regval;
+ int rc;
+
+ if (wdt->reset_idx == -EINVAL)
+ return;
+
+ if (!(wdt->wdd.info->options & WDIOF_CARDRESET))
+ return;
+
+ reg_data = &wdt->pdata->data[wdt->reset_idx];
+ rc = regmap_read(wdt->regmap, reg_data->reg, &regval);
+ if (!rc) {
+ if (regval & ~reg_data->mask) {
+ wdt->wdd.bootstatus = WDIOF_CARDRESET;
+ dev_info(wdt->wdd.parent,
+ "watchdog previously reset the CPU\n");
+ }
+ }
+}
+
+static int mlxreg_wdt_start(struct watchdog_device *wdd)
+{
+ struct mlxreg_wdt *wdt = watchdog_get_drvdata(wdd);
+ struct mlxreg_core_data *reg_data = &wdt->pdata->data[wdt->action_idx];
+
+ return regmap_update_bits(wdt->regmap, reg_data->reg, ~reg_data->mask,
+ BIT(reg_data->bit));
+}
+
+static int mlxreg_wdt_stop(struct watchdog_device *wdd)
+{
+ struct mlxreg_wdt *wdt = watchdog_get_drvdata(wdd);
+ struct mlxreg_core_data *reg_data = &wdt->pdata->data[wdt->action_idx];
+
+ return regmap_update_bits(wdt->regmap, reg_data->reg, ~reg_data->mask,
+ ~BIT(reg_data->bit));
+}
+
+static int mlxreg_wdt_ping(struct watchdog_device *wdd)
+{
+ struct mlxreg_wdt *wdt = watchdog_get_drvdata(wdd);
+ struct mlxreg_core_data *reg_data = &wdt->pdata->data[wdt->ping_idx];
+
+ return regmap_update_bits_base(wdt->regmap, reg_data->reg,
+ ~reg_data->mask, BIT(reg_data->bit),
+ NULL, false, true);
+}
+
+static int mlxreg_wdt_set_timeout(struct watchdog_device *wdd,
+ unsigned int timeout)
+{
+ struct mlxreg_wdt *wdt = watchdog_get_drvdata(wdd);
+ struct mlxreg_core_data *reg_data = &wdt->pdata->data[wdt->timeout_idx];
+ u32 regval, set_time, hw_timeout;
+ int rc;
+
+ if (wdt->wdt_type == MLX_WDT_TYPE1) {
+ rc = regmap_read(wdt->regmap, reg_data->reg, &regval);
+ if (rc)
+ return rc;
+
+ hw_timeout = order_base_2(timeout * MLXREG_WDT_CLOCK_SCALE);
+ regval = (regval & reg_data->mask) | hw_timeout;
+ /* Rowndown to actual closest number of sec. */
+ set_time = BIT(hw_timeout) / MLXREG_WDT_CLOCK_SCALE;
+ } else {
+ set_time = timeout;
+ regval = timeout;
+ }
+
+ wdd->timeout = set_time;
+ rc = regmap_write(wdt->regmap, reg_data->reg, regval);
+
+ if (!rc) {
+ /*
+ * Restart watchdog with new timeout period
+ * if watchdog is already started.
+ */
+ if (watchdog_active(wdd)) {
+ rc = mlxreg_wdt_stop(wdd);
+ if (!rc)
+ rc = mlxreg_wdt_start(wdd);
+ }
+ }
+
+ return rc;
+}
+
+static unsigned int mlxreg_wdt_get_timeleft(struct watchdog_device *wdd)
+{
+ struct mlxreg_wdt *wdt = watchdog_get_drvdata(wdd);
+ struct mlxreg_core_data *reg_data = &wdt->pdata->data[wdt->tleft_idx];
+ u32 regval;
+ int rc;
+
+ rc = regmap_read(wdt->regmap, reg_data->reg, &regval);
+ /* Return 0 timeleft in case of failure register read. */
+ return rc == 0 ? regval : 0;
+}
+
+static const struct watchdog_ops mlxreg_wdt_ops_type1 = {
+ .start = mlxreg_wdt_start,
+ .stop = mlxreg_wdt_stop,
+ .ping = mlxreg_wdt_ping,
+ .set_timeout = mlxreg_wdt_set_timeout,
+ .owner = THIS_MODULE,
+};
+
+static const struct watchdog_ops mlxreg_wdt_ops_type2 = {
+ .start = mlxreg_wdt_start,
+ .stop = mlxreg_wdt_stop,
+ .ping = mlxreg_wdt_ping,
+ .set_timeout = mlxreg_wdt_set_timeout,
+ .get_timeleft = mlxreg_wdt_get_timeleft,
+ .owner = THIS_MODULE,
+};
+
+static const struct watchdog_info mlxreg_wdt_main_info = {
+ .options = MLXREG_WDT_OPTIONS_BASE
+ | WDIOF_CARDRESET,
+ .identity = "mlx-wdt-main",
+};
+
+static const struct watchdog_info mlxreg_wdt_aux_info = {
+ .options = MLXREG_WDT_OPTIONS_BASE
+ | WDIOF_ALARMONLY,
+ .identity = "mlx-wdt-aux",
+};
+
+static void mlxreg_wdt_config(struct mlxreg_wdt *wdt,
+ struct mlxreg_core_platform_data *pdata)
+{
+ struct mlxreg_core_data *data = pdata->data;
+ int i;
+
+ wdt->reset_idx = -EINVAL;
+ for (i = 0; i < pdata->counter; i++, data++) {
+ if (strnstr(data->label, "action", sizeof(data->label)))
+ wdt->action_idx = i;
+ else if (strnstr(data->label, "timeout", sizeof(data->label)))
+ wdt->timeout_idx = i;
+ else if (strnstr(data->label, "timeleft", sizeof(data->label)))
+ wdt->tleft_idx = i;
+ else if (strnstr(data->label, "ping", sizeof(data->label)))
+ wdt->ping_idx = i;
+ else if (strnstr(data->label, "reset", sizeof(data->label)))
+ wdt->reset_idx = i;
+ }
+
+ wdt->pdata = pdata;
+ if (strnstr(pdata->identity, mlxreg_wdt_main_info.identity,
+ sizeof(mlxreg_wdt_main_info.identity)))
+ wdt->wdd.info = &mlxreg_wdt_main_info;
+ else
+ wdt->wdd.info = &mlxreg_wdt_aux_info;
+
+ wdt->wdt_type = pdata->version;
+ if (wdt->wdt_type == MLX_WDT_TYPE2) {
+ wdt->wdd.ops = &mlxreg_wdt_ops_type2;
+ wdt->wdd.max_timeout = MLXREG_WDT_MAX_TIMEOUT_TYPE2;
+ } else {
+ wdt->wdd.ops = &mlxreg_wdt_ops_type1;
+ wdt->wdd.max_timeout = MLXREG_WDT_MAX_TIMEOUT_TYPE1;
+ }
+ wdt->wdd.min_timeout = MLXREG_WDT_MIN_TIMEOUT;
+}
+
+static int mlxreg_wdt_init_timeout(struct mlxreg_wdt *wdt,
+ struct mlxreg_core_platform_data *pdata)
+{
+ u32 timeout;
+
+ timeout = pdata->data[wdt->timeout_idx].health_cntr;
+ return mlxreg_wdt_set_timeout(&wdt->wdd, timeout);
+}
+
+static int mlxreg_wdt_probe(struct platform_device *pdev)
+{
+ struct mlxreg_core_platform_data *pdata;
+ struct mlxreg_wdt *wdt;
+ int rc;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "Failed to get platform data.\n");
+ return -EINVAL;
+ }
+ wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ if (!wdt)
+ return -ENOMEM;
+
+ wdt->wdd.parent = &pdev->dev;
+ wdt->regmap = pdata->regmap;
+ mlxreg_wdt_config(wdt, pdata);
+
+ if ((pdata->features & MLXREG_CORE_WD_FEATURE_NOWAYOUT))
+ watchdog_set_nowayout(&wdt->wdd, WATCHDOG_NOWAYOUT);
+ watchdog_stop_on_reboot(&wdt->wdd);
+ watchdog_stop_on_unregister(&wdt->wdd);
+ watchdog_set_drvdata(&wdt->wdd, wdt);
+ rc = mlxreg_wdt_init_timeout(wdt, pdata);
+ if (rc)
+ goto register_error;
+
+ if ((pdata->features & MLXREG_CORE_WD_FEATURE_START_AT_BOOT)) {
+ rc = mlxreg_wdt_start(&wdt->wdd);
+ if (rc)
+ goto register_error;
+ set_bit(WDOG_HW_RUNNING, &wdt->wdd.status);
+ }
+ mlxreg_wdt_check_card_reset(wdt);
+ rc = devm_watchdog_register_device(&pdev->dev, &wdt->wdd);
+
+register_error:
+ if (rc)
+ dev_err(&pdev->dev,
+ "Cannot register watchdog device (err=%d)\n", rc);
+ return rc;
+}
+
+static struct platform_driver mlxreg_wdt_driver = {
+ .probe = mlxreg_wdt_probe,
+ .driver = {
+ .name = "mlx-wdt",
+ },
+};
+
+module_platform_driver(mlxreg_wdt_driver);
+
+MODULE_AUTHOR("Michael Shych <michaelsh@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox watchdog driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mlx-wdt");
diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c
index 06a892e36a8d..2ffa39b46970 100644
--- a/drivers/watchdog/pc87413_wdt.c
+++ b/drivers/watchdog/pc87413_wdt.c
@@ -437,7 +437,7 @@ static long pc87413_ioctl(struct file *file, unsigned int cmd,
return -EINVAL;
timeout = new_timeout;
pc87413_refresh();
- /* fall through and return the new timeout... */
+ /* fall through - and return the new timeout... */
case WDIOC_GETTIMEOUT:
new_timeout = timeout * 60;
return put_user(new_timeout, uarg.i);
diff --git a/drivers/watchdog/pika_wdt.c b/drivers/watchdog/pika_wdt.c
index e0a6f8c0f03c..bb97f5b2f7eb 100644
--- a/drivers/watchdog/pika_wdt.c
+++ b/drivers/watchdog/pika_wdt.c
@@ -225,7 +225,7 @@ static int __init pikawdt_init(void)
{
struct device_node *np;
void __iomem *fpga;
- static u32 post1;
+ u32 post1;
int ret;
np = of_find_compatible_node(NULL, NULL, "pika,fpga");
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
index 780971318810..5dfd604477a4 100644
--- a/drivers/watchdog/qcom-wdt.c
+++ b/drivers/watchdog/qcom-wdt.c
@@ -245,6 +245,28 @@ static int qcom_wdt_remove(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused qcom_wdt_suspend(struct device *dev)
+{
+ struct qcom_wdt *wdt = dev_get_drvdata(dev);
+
+ if (watchdog_active(&wdt->wdd))
+ qcom_wdt_stop(&wdt->wdd);
+
+ return 0;
+}
+
+static int __maybe_unused qcom_wdt_resume(struct device *dev)
+{
+ struct qcom_wdt *wdt = dev_get_drvdata(dev);
+
+ if (watchdog_active(&wdt->wdd))
+ qcom_wdt_start(&wdt->wdd);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(qcom_wdt_pm_ops, qcom_wdt_suspend, qcom_wdt_resume);
+
static const struct of_device_id qcom_wdt_of_table[] = {
{ .compatible = "qcom,kpss-timer", .data = reg_offset_data_apcs_tmr },
{ .compatible = "qcom,scss-timer", .data = reg_offset_data_apcs_tmr },
@@ -259,6 +281,7 @@ static struct platform_driver qcom_watchdog_driver = {
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = qcom_wdt_of_table,
+ .pm = &qcom_wdt_pm_ops,
},
};
module_platform_driver(qcom_watchdog_driver);
diff --git a/drivers/watchdog/sbc60xxwdt.c b/drivers/watchdog/sbc60xxwdt.c
index 87333a41f753..72d15fd1f183 100644
--- a/drivers/watchdog/sbc60xxwdt.c
+++ b/drivers/watchdog/sbc60xxwdt.c
@@ -270,8 +270,8 @@ static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
timeout = new_timeout;
wdt_keepalive();
- /* Fall through */
}
+ /* Fall through */
case WDIOC_GETTIMEOUT:
return put_user(timeout, p);
default:
diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
index e8bd9887c566..e221e47396ab 100644
--- a/drivers/watchdog/sbsa_gwdt.c
+++ b/drivers/watchdog/sbsa_gwdt.c
@@ -161,7 +161,7 @@ static unsigned int sbsa_gwdt_get_timeleft(struct watchdog_device *wdd)
timeleft += readl(gwdt->control_base + SBSA_GWDT_WOR);
timeleft += lo_hi_readq(gwdt->control_base + SBSA_GWDT_WCV) -
- arch_counter_get_cntvct();
+ arch_timer_read_counter();
do_div(timeleft, gwdt->clk);
diff --git a/drivers/watchdog/sc1200wdt.c b/drivers/watchdog/sc1200wdt.c
index 8e4e2fc13f87..e035a4d4b299 100644
--- a/drivers/watchdog/sc1200wdt.c
+++ b/drivers/watchdog/sc1200wdt.c
@@ -239,7 +239,7 @@ static long sc1200wdt_ioctl(struct file *file, unsigned int cmd,
return -EINVAL;
timeout = new_timeout;
sc1200wdt_write_data(WDTO, timeout);
- /* fall through and return the new timeout */
+ /* fall through - and return the new timeout */
case WDIOC_GETTIMEOUT:
return put_user(timeout * 60, p);
diff --git a/drivers/watchdog/sc520_wdt.c b/drivers/watchdog/sc520_wdt.c
index 6aadb56e7faa..403542f9ed8d 100644
--- a/drivers/watchdog/sc520_wdt.c
+++ b/drivers/watchdog/sc520_wdt.c
@@ -324,8 +324,8 @@ static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return -EINVAL;
wdt_keepalive();
- /* Fall through */
}
+ /* Fall through */
case WDIOC_GETTIMEOUT:
return put_user(timeout, p);
default:
diff --git a/drivers/watchdog/smsc37b787_wdt.c b/drivers/watchdog/smsc37b787_wdt.c
index 445ea1ad1fa9..c768dcd53034 100644
--- a/drivers/watchdog/smsc37b787_wdt.c
+++ b/drivers/watchdog/smsc37b787_wdt.c
@@ -478,7 +478,7 @@ static long wb_smsc_wdt_ioctl(struct file *file,
return -EINVAL;
timeout = new_timeout;
wb_smsc_wdt_set_timeout(timeout);
- /* fall through and return the new timeout... */
+ /* fall through - and return the new timeout... */
case WDIOC_GETTIMEOUT:
new_timeout = timeout;
if (unit == UNIT_MINUTE)
diff --git a/drivers/watchdog/w83877f_wdt.c b/drivers/watchdog/w83877f_wdt.c
index 05658ecc0aa4..db9b6488e388 100644
--- a/drivers/watchdog/w83877f_wdt.c
+++ b/drivers/watchdog/w83877f_wdt.c
@@ -292,8 +292,8 @@ static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
timeout = new_timeout;
wdt_keepalive();
- /* Fall through */
}
+ /* Fall through */
case WDIOC_GETTIMEOUT:
return put_user(timeout, p);
default:
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index c48927a58e10..ad3844d9f876 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
-obj-$(CONFIG_X86) += fallback.o
obj-y += grant-table.o features.o balloon.o manage.o preempt.o time.o
obj-y += mem-reservation.o
obj-y += events/
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 39b229f9e256..d37dd5bb7a8f 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -604,6 +604,7 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages)
while (pgno < nr_pages) {
page = balloon_retrieve(true);
if (page) {
+ __ClearPageOffline(page);
pages[pgno++] = page;
#ifdef CONFIG_XEN_HAVE_PVMMU
/*
@@ -645,8 +646,10 @@ void free_xenballooned_pages(int nr_pages, struct page **pages)
mutex_lock(&balloon_mutex);
for (i = 0; i < nr_pages; i++) {
- if (pages[i])
+ if (pages[i]) {
+ __SetPageOffline(pages[i]);
balloon_append(pages[i]);
+ }
}
balloon_stats.target_unpopulated -= nr_pages;
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
index b1357aa4bc55..f192b6f42da9 100644
--- a/drivers/xen/cpu_hotplug.c
+++ b/drivers/xen/cpu_hotplug.c
@@ -54,7 +54,7 @@ static int vcpu_online(unsigned int cpu)
}
static void vcpu_hotplug(unsigned int cpu)
{
- if (!cpu_possible(cpu))
+ if (cpu >= nr_cpu_ids || !cpu_possible(cpu))
return;
switch (vcpu_online(cpu)) {
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 117e76b2f939..084e45882c73 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1687,7 +1687,6 @@ void __init xen_init_IRQ(void)
#ifdef CONFIG_X86
if (xen_pv_domain()) {
- irq_ctx_init(smp_processor_id());
if (xen_initial_domain())
pci_xen_initial_domain();
}
diff --git a/drivers/xen/fallback.c b/drivers/xen/fallback.c
deleted file mode 100644
index b04fb64c5a91..000000000000
--- a/drivers/xen/fallback.c
+++ /dev/null
@@ -1,81 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/bug.h>
-#include <linux/export.h>
-#include <asm/hypervisor.h>
-#include <asm/xen/hypercall.h>
-
-int xen_event_channel_op_compat(int cmd, void *arg)
-{
- struct evtchn_op op;
- int rc;
-
- op.cmd = cmd;
- memcpy(&op.u, arg, sizeof(op.u));
- rc = _hypercall1(int, event_channel_op_compat, &op);
-
- switch (cmd) {
- case EVTCHNOP_close:
- case EVTCHNOP_send:
- case EVTCHNOP_bind_vcpu:
- case EVTCHNOP_unmask:
- /* no output */
- break;
-
-#define COPY_BACK(eop) \
- case EVTCHNOP_##eop: \
- memcpy(arg, &op.u.eop, sizeof(op.u.eop)); \
- break
-
- COPY_BACK(bind_interdomain);
- COPY_BACK(bind_virq);
- COPY_BACK(bind_pirq);
- COPY_BACK(status);
- COPY_BACK(alloc_unbound);
- COPY_BACK(bind_ipi);
-#undef COPY_BACK
-
- default:
- WARN_ON(rc != -ENOSYS);
- break;
- }
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(xen_event_channel_op_compat);
-
-int xen_physdev_op_compat(int cmd, void *arg)
-{
- struct physdev_op op;
- int rc;
-
- op.cmd = cmd;
- memcpy(&op.u, arg, sizeof(op.u));
- rc = _hypercall1(int, physdev_op_compat, &op);
-
- switch (cmd) {
- case PHYSDEVOP_IRQ_UNMASK_NOTIFY:
- case PHYSDEVOP_set_iopl:
- case PHYSDEVOP_set_iobitmap:
- case PHYSDEVOP_apic_write:
- /* no output */
- break;
-
-#define COPY_BACK(pop, fld) \
- case PHYSDEVOP_##pop: \
- memcpy(arg, &op.u.fld, sizeof(op.u.fld)); \
- break
-
- COPY_BACK(irq_status_query, irq_status_query);
- COPY_BACK(apic_read, apic_op);
- COPY_BACK(ASSIGN_VECTOR, irq_op);
-#undef COPY_BACK
-
- default:
- WARN_ON(rc != -ENOSYS);
- break;
- }
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(xen_physdev_op_compat);
diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c
index cba6b586bfbd..2c4f324f8626 100644
--- a/drivers/xen/gntdev-dmabuf.c
+++ b/drivers/xen/gntdev-dmabuf.c
@@ -80,6 +80,12 @@ struct gntdev_dmabuf_priv {
struct list_head imp_list;
/* This is the lock which protects dma_buf_xxx lists. */
struct mutex lock;
+ /*
+ * We reference this file while exporting dma-bufs, so
+ * the grant device context is not destroyed while there are
+ * external users alive.
+ */
+ struct file *filp;
};
/* DMA buffer export support. */
@@ -311,6 +317,7 @@ static void dmabuf_exp_release(struct kref *kref)
dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf);
list_del(&gntdev_dmabuf->next);
+ fput(gntdev_dmabuf->priv->filp);
kfree(gntdev_dmabuf);
}
@@ -423,6 +430,7 @@ static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
mutex_lock(&args->dmabuf_priv->lock);
list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list);
mutex_unlock(&args->dmabuf_priv->lock);
+ get_file(gntdev_dmabuf->priv->filp);
return 0;
fail:
@@ -737,6 +745,14 @@ static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd)
return 0;
}
+static void dmabuf_imp_release_all(struct gntdev_dmabuf_priv *priv)
+{
+ struct gntdev_dmabuf *q, *gntdev_dmabuf;
+
+ list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next)
+ dmabuf_imp_release(priv, gntdev_dmabuf->fd);
+}
+
/* DMA buffer IOCTL support. */
long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
@@ -834,7 +850,7 @@ long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
return dmabuf_imp_release(priv->dmabuf_priv, op.fd);
}
-struct gntdev_dmabuf_priv *gntdev_dmabuf_init(void)
+struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp)
{
struct gntdev_dmabuf_priv *priv;
@@ -847,10 +863,13 @@ struct gntdev_dmabuf_priv *gntdev_dmabuf_init(void)
INIT_LIST_HEAD(&priv->exp_wait_list);
INIT_LIST_HEAD(&priv->imp_list);
+ priv->filp = filp;
+
return priv;
}
void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv)
{
+ dmabuf_imp_release_all(priv);
kfree(priv);
}
diff --git a/drivers/xen/gntdev-dmabuf.h b/drivers/xen/gntdev-dmabuf.h
index 7220a53d0fc5..3d9b9cf9d5a1 100644
--- a/drivers/xen/gntdev-dmabuf.h
+++ b/drivers/xen/gntdev-dmabuf.h
@@ -14,7 +14,7 @@
struct gntdev_dmabuf_priv;
struct gntdev_priv;
-struct gntdev_dmabuf_priv *gntdev_dmabuf_init(void);
+struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp);
void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv);
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 5efc5eee9544..7cf9c51318aa 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -600,7 +600,7 @@ static int gntdev_open(struct inode *inode, struct file *flip)
mutex_init(&priv->lock);
#ifdef CONFIG_XEN_GNTDEV_DMABUF
- priv->dmabuf_priv = gntdev_dmabuf_init();
+ priv->dmabuf_priv = gntdev_dmabuf_init(flip);
if (IS_ERR(priv->dmabuf_priv)) {
ret = PTR_ERR(priv->dmabuf_priv);
kfree(priv);
diff --git a/drivers/xen/privcmd-buf.c b/drivers/xen/privcmd-buf.c
index de01a6d0059d..a1c61e351d3f 100644
--- a/drivers/xen/privcmd-buf.c
+++ b/drivers/xen/privcmd-buf.c
@@ -140,8 +140,7 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *),
- GFP_KERNEL);
+ vma_priv = kzalloc(struct_size(vma_priv, pages, count), GFP_KERNEL);
if (!vma_priv)
return -ENOMEM;
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index bb7888429be6..877baf2a94f4 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -214,10 +214,13 @@ retry:
/*
* Get IO TLB memory from any location.
*/
- if (early)
+ if (early) {
xen_io_tlb_start = memblock_alloc(PAGE_ALIGN(bytes),
PAGE_SIZE);
- else {
+ if (!xen_io_tlb_start)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_ALIGN(bytes), PAGE_SIZE);
+ } else {
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index fbb9137c7d02..98e35644fda7 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -410,21 +410,21 @@ static int check_acpi_ids(struct acpi_processor *pr_backup)
/* All online CPUs have been processed at this stage. Now verify
* whether in fact "online CPUs" == physical CPUs.
*/
- acpi_id_present = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL);
+ acpi_id_present = bitmap_zalloc(nr_acpi_bits, GFP_KERNEL);
if (!acpi_id_present)
return -ENOMEM;
- acpi_id_cst_present = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL);
+ acpi_id_cst_present = bitmap_zalloc(nr_acpi_bits, GFP_KERNEL);
if (!acpi_id_cst_present) {
- kfree(acpi_id_present);
+ bitmap_free(acpi_id_present);
return -ENOMEM;
}
acpi_psd = kcalloc(nr_acpi_bits, sizeof(struct acpi_psd_package),
GFP_KERNEL);
if (!acpi_psd) {
- kfree(acpi_id_present);
- kfree(acpi_id_cst_present);
+ bitmap_free(acpi_id_present);
+ bitmap_free(acpi_id_cst_present);
return -ENOMEM;
}
@@ -533,14 +533,14 @@ static int __init xen_acpi_processor_init(void)
return -ENODEV;
nr_acpi_bits = get_max_acpi_id() + 1;
- acpi_ids_done = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL);
+ acpi_ids_done = bitmap_zalloc(nr_acpi_bits, GFP_KERNEL);
if (!acpi_ids_done)
return -ENOMEM;
acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
if (!acpi_perf_data) {
pr_debug("Memory allocation error for acpi_perf_data\n");
- kfree(acpi_ids_done);
+ bitmap_free(acpi_ids_done);
return -ENOMEM;
}
for_each_possible_cpu(i) {
@@ -584,7 +584,7 @@ err_unregister:
err_out:
/* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
free_acpi_perf_data();
- kfree(acpi_ids_done);
+ bitmap_free(acpi_ids_done);
return rc;
}
static void __exit xen_acpi_processor_exit(void)
@@ -592,9 +592,9 @@ static void __exit xen_acpi_processor_exit(void)
int i;
unregister_syscore_ops(&xap_syscore_ops);
- kfree(acpi_ids_done);
- kfree(acpi_id_present);
- kfree(acpi_id_cst_present);
+ bitmap_free(acpi_ids_done);
+ bitmap_free(acpi_id_present);
+ bitmap_free(acpi_id_cst_present);
kfree(acpi_psd);
for_each_possible_cpu(i)
acpi_processor_unregister_performance(i);
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index 2acbfe104e46..a67236b02452 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -37,6 +37,7 @@
#include <linux/mm_types.h>
#include <linux/init.h>
#include <linux/capability.h>
+#include <linux/memory_hotplug.h>
#include <xen/xen.h>
#include <xen/interface/xen.h>
@@ -50,6 +51,10 @@
#define BALLOON_CLASS_NAME "xen_memory"
+#ifdef CONFIG_MEMORY_HOTPLUG
+u64 xen_saved_max_mem_size = 0;
+#endif
+
static struct device balloon_dev;
static int register_balloon(struct device *dev);
@@ -63,6 +68,12 @@ static void watch_target(struct xenbus_watch *watch,
static bool watch_fired;
static long target_diff;
+#ifdef CONFIG_MEMORY_HOTPLUG
+ /* The balloon driver will take care of adding memory now. */
+ if (xen_saved_max_mem_size)
+ max_mem_size = xen_saved_max_mem_size;
+#endif
+
err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target);
if (err != 1) {
/* This is ok (for domain0 at least) - so just return */
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index ea4a08b83fa0..787966f44589 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -127,8 +127,6 @@ void xen_pcibk_reset_device(struct pci_dev *dev)
if (pci_is_enabled(dev))
pci_disable_device(dev);
- pci_write_config_word(dev, PCI_COMMAND, 0);
-
dev->is_busmaster = 0;
} else {
pci_read_config_word(dev, PCI_COMMAND, &cmd);
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 581c4e1a8b82..23f7f6ec7d1f 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -544,7 +544,7 @@ static void xen_pcibk_frontend_changed(struct xenbus_device *xdev,
xenbus_switch_state(xdev, XenbusStateClosed);
if (xenbus_dev_is_online(xdev))
break;
- /* fall through if not online */
+ /* fall through - if not online */
case XenbusStateUnknown:
dev_dbg(&xdev->dev, "frontend is gone! unregister device\n");
device_unregister(&xdev->dev);
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index c9e23a126218..ba0942e481bc 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -1184,7 +1184,7 @@ static void scsiback_frontend_changed(struct xenbus_device *dev,
xenbus_switch_state(dev, XenbusStateClosed);
if (xenbus_dev_is_online(dev))
break;
- /* fall through if not online */
+ /* fall through - if not online */
case XenbusStateUnknown:
device_unregister(&dev->dev);
break;
@@ -1404,11 +1404,6 @@ static int scsiback_write_pending(struct se_cmd *se_cmd)
return 0;
}
-static int scsiback_write_pending_status(struct se_cmd *se_cmd)
-{
- return 0;
-}
-
static void scsiback_set_default_node_attrs(struct se_node_acl *nacl)
{
}
@@ -1818,7 +1813,6 @@ static const struct target_core_fabric_ops scsiback_ops = {
.sess_get_index = scsiback_sess_get_index,
.sess_get_initiator_sid = NULL,
.write_pending = scsiback_write_pending,
- .write_pending_status = scsiback_write_pending_status,
.set_default_node_attributes = scsiback_set_default_node_attrs,
.get_cmd_state = scsiback_get_cmd_state,
.queue_data_in = scsiback_queue_data_in,
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index c3e201025ef0..0782ff3c2273 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -622,9 +622,7 @@ static int xenbus_file_open(struct inode *inode, struct file *filp)
if (xen_store_evtchn == 0)
return -ENOENT;
- nonseekable_open(inode, filp);
-
- filp->f_mode &= ~FMODE_ATOMIC_POS; /* cdev-style semantics */
+ stream_open(inode, filp);
u = kzalloc(sizeof(*u), GFP_KERNEL);
if (u == NULL)
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index 5a0db6dec8d1..aaee1e6584e6 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -40,6 +40,9 @@
*/
#define P9_LOCK_TIMEOUT (30*HZ)
+/* flags for v9fs_stat2inode() & v9fs_stat2inode_dotl() */
+#define V9FS_STAT2INODE_KEEP_ISIZE 1
+
extern struct file_system_type v9fs_fs_type;
extern const struct address_space_operations v9fs_addr_operations;
extern const struct file_operations v9fs_file_operations;
@@ -61,8 +64,10 @@ int v9fs_init_inode(struct v9fs_session_info *v9ses,
struct inode *inode, umode_t mode, dev_t);
void v9fs_evict_inode(struct inode *inode);
ino_t v9fs_qid2ino(struct p9_qid *qid);
-void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *);
-void v9fs_stat2inode_dotl(struct p9_stat_dotl *, struct inode *);
+void v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
+ struct super_block *sb, unsigned int flags);
+void v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
+ unsigned int flags);
int v9fs_dir_release(struct inode *inode, struct file *filp);
int v9fs_file_open(struct inode *inode, struct file *file);
void v9fs_inode2stat(struct inode *inode, struct p9_wstat *stat);
@@ -83,4 +88,18 @@ static inline void v9fs_invalidate_inode_attr(struct inode *inode)
}
int v9fs_open_to_dotl_flags(int flags);
+
+static inline void v9fs_i_size_write(struct inode *inode, loff_t i_size)
+{
+ /*
+ * 32-bit need the lock, concurrent updates could break the
+ * sequences and make i_size_read() loop forever.
+ * 64-bit updates are atomic and can skip the locking.
+ */
+ if (sizeof(i_size) > sizeof(long))
+ spin_lock(&inode->i_lock);
+ i_size_write(inode, i_size);
+ if (sizeof(i_size) > sizeof(long))
+ spin_unlock(&inode->i_lock);
+}
#endif
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index a25efa782fcc..9a1125305d84 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -446,7 +446,11 @@ v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
i_size = i_size_read(inode);
if (iocb->ki_pos > i_size) {
inode_add_bytes(inode, iocb->ki_pos - i_size);
- i_size_write(inode, iocb->ki_pos);
+ /*
+ * Need to serialize against i_size_write() in
+ * v9fs_stat2inode()
+ */
+ v9fs_i_size_write(inode, iocb->ki_pos);
}
return retval;
}
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 85ff859d3af5..72b779bc0942 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -538,7 +538,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
if (retval)
goto error;
- v9fs_stat2inode(st, inode, sb);
+ v9fs_stat2inode(st, inode, sb, 0);
v9fs_cache_inode_get_cookie(inode);
unlock_new_inode(inode);
return inode;
@@ -1092,7 +1092,7 @@ v9fs_vfs_getattr(const struct path *path, struct kstat *stat,
if (IS_ERR(st))
return PTR_ERR(st);
- v9fs_stat2inode(st, d_inode(dentry), dentry->d_sb);
+ v9fs_stat2inode(st, d_inode(dentry), dentry->d_sb, 0);
generic_fillattr(d_inode(dentry), stat);
p9stat_free(st);
@@ -1170,12 +1170,13 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
* @stat: Plan 9 metadata (mistat) structure
* @inode: inode to populate
* @sb: superblock of filesystem
+ * @flags: control flags (e.g. V9FS_STAT2INODE_KEEP_ISIZE)
*
*/
void
v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
- struct super_block *sb)
+ struct super_block *sb, unsigned int flags)
{
umode_t mode;
char ext[32];
@@ -1216,10 +1217,11 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
mode = p9mode2perm(v9ses, stat);
mode |= inode->i_mode & ~S_IALLUGO;
inode->i_mode = mode;
- i_size_write(inode, stat->length);
+ if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE))
+ v9fs_i_size_write(inode, stat->length);
/* not real number of blocks, but 512 byte ones ... */
- inode->i_blocks = (i_size_read(inode) + 512 - 1) >> 9;
+ inode->i_blocks = (stat->length + 512 - 1) >> 9;
v9inode->cache_validity &= ~V9FS_INO_INVALID_ATTR;
}
@@ -1416,9 +1418,9 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
{
int umode;
dev_t rdev;
- loff_t i_size;
struct p9_wstat *st;
struct v9fs_session_info *v9ses;
+ unsigned int flags;
v9ses = v9fs_inode2v9ses(inode);
st = p9_client_stat(fid);
@@ -1431,16 +1433,13 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
if ((inode->i_mode & S_IFMT) != (umode & S_IFMT))
goto out;
- spin_lock(&inode->i_lock);
/*
* We don't want to refresh inode->i_size,
* because we may have cached data
*/
- i_size = inode->i_size;
- v9fs_stat2inode(st, inode, inode->i_sb);
- if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
- inode->i_size = i_size;
- spin_unlock(&inode->i_lock);
+ flags = (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) ?
+ V9FS_STAT2INODE_KEEP_ISIZE : 0;
+ v9fs_stat2inode(st, inode, inode->i_sb, flags);
out:
p9stat_free(st);
kfree(st);
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 4823e1c46999..a950a927a626 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -143,7 +143,7 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
if (retval)
goto error;
- v9fs_stat2inode_dotl(st, inode);
+ v9fs_stat2inode_dotl(st, inode, 0);
v9fs_cache_inode_get_cookie(inode);
retval = v9fs_get_acl(inode, fid);
if (retval)
@@ -496,7 +496,7 @@ v9fs_vfs_getattr_dotl(const struct path *path, struct kstat *stat,
if (IS_ERR(st))
return PTR_ERR(st);
- v9fs_stat2inode_dotl(st, d_inode(dentry));
+ v9fs_stat2inode_dotl(st, d_inode(dentry), 0);
generic_fillattr(d_inode(dentry), stat);
/* Change block size to what the server returned */
stat->blksize = st->st_blksize;
@@ -607,11 +607,13 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
* v9fs_stat2inode_dotl - populate an inode structure with stat info
* @stat: stat structure
* @inode: inode to populate
+ * @flags: ctrl flags (e.g. V9FS_STAT2INODE_KEEP_ISIZE)
*
*/
void
-v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
+v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
+ unsigned int flags)
{
umode_t mode;
struct v9fs_inode *v9inode = V9FS_I(inode);
@@ -631,7 +633,8 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
mode |= inode->i_mode & ~S_IALLUGO;
inode->i_mode = mode;
- i_size_write(inode, stat->st_size);
+ if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE))
+ v9fs_i_size_write(inode, stat->st_size);
inode->i_blocks = stat->st_blocks;
} else {
if (stat->st_result_mask & P9_STATS_ATIME) {
@@ -661,8 +664,9 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
}
if (stat->st_result_mask & P9_STATS_RDEV)
inode->i_rdev = new_decode_dev(stat->st_rdev);
- if (stat->st_result_mask & P9_STATS_SIZE)
- i_size_write(inode, stat->st_size);
+ if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE) &&
+ stat->st_result_mask & P9_STATS_SIZE)
+ v9fs_i_size_write(inode, stat->st_size);
if (stat->st_result_mask & P9_STATS_BLOCKS)
inode->i_blocks = stat->st_blocks;
}
@@ -928,9 +932,9 @@ v9fs_vfs_get_link_dotl(struct dentry *dentry,
int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
{
- loff_t i_size;
struct p9_stat_dotl *st;
struct v9fs_session_info *v9ses;
+ unsigned int flags;
v9ses = v9fs_inode2v9ses(inode);
st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
@@ -942,16 +946,13 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
if ((inode->i_mode & S_IFMT) != (st->st_mode & S_IFMT))
goto out;
- spin_lock(&inode->i_lock);
/*
* We don't want to refresh inode->i_size,
* because we may have cached data
*/
- i_size = inode->i_size;
- v9fs_stat2inode_dotl(st, inode);
- if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
- inode->i_size = i_size;
- spin_unlock(&inode->i_lock);
+ flags = (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) ?
+ V9FS_STAT2INODE_KEEP_ISIZE : 0;
+ v9fs_stat2inode_dotl(st, inode, flags);
out:
kfree(st);
return 0;
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 48ce50484e80..d13d35cf69c0 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -92,7 +92,7 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
return ret;
if (v9ses->cache)
- sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024)/PAGE_SIZE;
+ sb->s_bdi->ra_pages = VM_READAHEAD_PAGES;
sb->s_flags |= SB_ACTIVE | SB_DIRSYNC;
if (!v9ses->cache)
@@ -172,7 +172,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
goto release_sb;
}
d_inode(root)->i_ino = v9fs_qid2ino(&st->qid);
- v9fs_stat2inode_dotl(st, d_inode(root));
+ v9fs_stat2inode_dotl(st, d_inode(root), 0);
kfree(st);
} else {
struct p9_wstat *st = NULL;
@@ -183,7 +183,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
}
d_inode(root)->i_ino = v9fs_qid2ino(&st->qid);
- v9fs_stat2inode(st, d_inode(root), sb);
+ v9fs_stat2inode(st, d_inode(root), sb, 0);
p9stat_free(st);
kfree(st);
diff --git a/fs/Kconfig b/fs/Kconfig
index ac474a61be37..3e6d3101f3ff 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -8,6 +8,13 @@ menu "File systems"
config DCACHE_WORD_ACCESS
bool
+config VALIDATE_FS_PARSER
+ bool "Validate filesystem parameter description"
+ default y
+ help
+ Enable this to perform validation of the parameter description for a
+ filesystem when it is registered.
+
if BLOCK
config FS_IOMAP
@@ -254,12 +261,9 @@ source "fs/romfs/Kconfig"
source "fs/pstore/Kconfig"
source "fs/sysv/Kconfig"
source "fs/ufs/Kconfig"
-source "fs/exofs/Kconfig"
endif # MISC_FILESYSTEMS
-source "fs/exofs/Kconfig.ore"
-
menuconfig NETWORK_FILESYSTEMS
bool "Network File Systems"
default y
diff --git a/fs/Makefile b/fs/Makefile
index ffeaa6632ab4..427fec226fae 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -13,7 +13,7 @@ obj-y := open.o read_write.o file_table.o super.o \
seq_file.o xattr.o libfs.o fs-writeback.o \
pnode.o splice.o sync.o utimes.o d_path.o \
stack.o fs_struct.o statfs.o fs_pin.o nsfs.o \
- fs_types.o
+ fs_types.o fs_context.o fs_parser.o
ifeq ($(CONFIG_BLOCK),y)
obj-y += buffer.o block_dev.o direct-io.o mpage.o
@@ -126,7 +126,6 @@ obj-$(CONFIG_OCFS2_FS) += ocfs2/
obj-$(CONFIG_BTRFS_FS) += btrfs/
obj-$(CONFIG_GFS2_FS) += gfs2/
obj-$(CONFIG_F2FS_FS) += f2fs/
-obj-y += exofs/ # Multiple modules
obj-$(CONFIG_CEPH_FS) += ceph/
obj-$(CONFIG_PSTORE) += pstore/
obj-$(CONFIG_EFIVAR_FS) += efivarfs/
diff --git a/fs/afs/callback.c b/fs/afs/callback.c
index 1c7955f5cdaf..128f2dbe256a 100644
--- a/fs/afs/callback.c
+++ b/fs/afs/callback.c
@@ -203,8 +203,7 @@ void afs_put_cb_interest(struct afs_net *net, struct afs_cb_interest *cbi)
*/
void afs_init_callback_state(struct afs_server *server)
{
- if (!test_and_clear_bit(AFS_SERVER_FL_NEW, &server->flags))
- server->cb_s_break++;
+ server->cb_s_break++;
}
/*
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 8ee5972893ed..2f8acb4c556d 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -34,7 +34,7 @@ static void SRXAFSCB_TellMeAboutYourself(struct work_struct *);
static int afs_deliver_yfs_cb_callback(struct afs_call *);
#define CM_NAME(name) \
- const char afs_SRXCB##name##_name[] __tracepoint_string = \
+ char afs_SRXCB##name##_name[] __tracepoint_string = \
"CB." #name
/*
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index ca08c83168f5..0b37867b5c20 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -1515,8 +1515,8 @@ static int afs_fs_setattr_size64(struct afs_fs_cursor *fc, struct iattr *attr)
xdr_encode_AFS_StoreStatus(&bp, attr);
- *bp++ = 0; /* position of start of write */
- *bp++ = 0;
+ *bp++ = htonl(attr->ia_size >> 32); /* position of start of write */
+ *bp++ = htonl((u32) attr->ia_size);
*bp++ = 0; /* size of write */
*bp++ = 0;
*bp++ = htonl(attr->ia_size >> 32); /* new file length */
@@ -1564,7 +1564,7 @@ static int afs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr)
xdr_encode_AFS_StoreStatus(&bp, attr);
- *bp++ = 0; /* position of start of write */
+ *bp++ = htonl(attr->ia_size); /* position of start of write */
*bp++ = 0; /* size of write */
*bp++ = htonl(attr->ia_size); /* new file length */
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 1a4ce07fb406..9cedc3fc1b77 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -216,9 +216,7 @@ struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
set_nlink(inode, 2);
inode->i_uid = GLOBAL_ROOT_UID;
inode->i_gid = GLOBAL_ROOT_GID;
- inode->i_ctime.tv_sec = get_seconds();
- inode->i_ctime.tv_nsec = 0;
- inode->i_atime = inode->i_mtime = inode->i_ctime;
+ inode->i_ctime = inode->i_atime = inode->i_mtime = current_time(inode);
inode->i_blocks = 0;
inode_set_iversion_raw(inode, 0);
inode->i_generation = 0;
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 8871b9e8645f..3904ab0b9563 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -36,15 +36,14 @@
struct pagevec;
struct afs_call;
-struct afs_mount_params {
- bool rwpath; /* T if the parent should be considered R/W */
+struct afs_fs_context {
bool force; /* T to force cell type */
bool autocell; /* T if set auto mount operation */
bool dyn_root; /* T if dynamic root */
+ bool no_cell; /* T if the source is "none" (for dynroot) */
afs_voltype_t type; /* type of volume requested */
- int volnamesz; /* size of volume name */
+ unsigned int volnamesz; /* size of volume name */
const char *volname; /* name of volume to mount */
- struct net *net_ns; /* Network namespace in effect */
struct afs_net *net; /* the AFS net namespace stuff */
struct afs_cell *cell; /* cell in which to find volume */
struct afs_volume *volume; /* volume record */
@@ -475,7 +474,6 @@ struct afs_server {
time64_t put_time; /* Time at which last put */
time64_t update_at; /* Time at which to next update the record */
unsigned long flags;
-#define AFS_SERVER_FL_NEW 0 /* New server, don't inc cb_s_break */
#define AFS_SERVER_FL_NOT_READY 1 /* The record is not ready for use */
#define AFS_SERVER_FL_NOT_FOUND 2 /* VL server says no such server */
#define AFS_SERVER_FL_VL_FAIL 3 /* Failed to access VL server */
@@ -828,7 +826,7 @@ static inline struct afs_cb_interest *afs_get_cb_interest(struct afs_cb_interest
static inline unsigned int afs_calc_vnode_cb_break(struct afs_vnode *vnode)
{
- return vnode->cb_break + vnode->cb_s_break + vnode->cb_v_break;
+ return vnode->cb_break + vnode->cb_v_break;
}
static inline bool afs_cb_is_broken(unsigned int cb_break,
@@ -836,7 +834,6 @@ static inline bool afs_cb_is_broken(unsigned int cb_break,
const struct afs_cb_interest *cbi)
{
return !cbi || cb_break != (vnode->cb_break +
- cbi->server->cb_s_break +
vnode->volume->cb_v_break);
}
@@ -1274,7 +1271,7 @@ static inline struct afs_volume *__afs_get_volume(struct afs_volume *volume)
return volume;
}
-extern struct afs_volume *afs_create_volume(struct afs_mount_params *);
+extern struct afs_volume *afs_create_volume(struct afs_fs_context *);
extern void afs_activate_volume(struct afs_volume *);
extern void afs_deactivate_volume(struct afs_volume *);
extern void afs_put_volume(struct afs_cell *, struct afs_volume *);
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 2e51c6994148..eecd8b699186 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -17,6 +17,7 @@
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/gfp.h>
+#include <linux/fs_context.h>
#include "internal.h"
@@ -47,6 +48,8 @@ static DECLARE_DELAYED_WORK(afs_mntpt_expiry_timer, afs_mntpt_expiry_timed_out);
static unsigned long afs_mntpt_expiry_timeout = 10 * 60;
+static const char afs_root_volume[] = "root.cell";
+
/*
* no valid lookup procedure on this sort of dir
*/
@@ -68,108 +71,112 @@ static int afs_mntpt_open(struct inode *inode, struct file *file)
}
/*
- * create a vfsmount to be automounted
+ * Set the parameters for the proposed superblock.
*/
-static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
+static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
{
- struct afs_super_info *as;
- struct vfsmount *mnt;
- struct afs_vnode *vnode;
- struct page *page;
- char *devname, *options;
- bool rwpath = false;
+ struct afs_fs_context *ctx = fc->fs_private;
+ struct afs_super_info *src_as = AFS_FS_S(mntpt->d_sb);
+ struct afs_vnode *vnode = AFS_FS_I(d_inode(mntpt));
+ struct afs_cell *cell;
+ const char *p;
int ret;
- _enter("{%pd}", mntpt);
-
- BUG_ON(!d_inode(mntpt));
-
- ret = -ENOMEM;
- devname = (char *) get_zeroed_page(GFP_KERNEL);
- if (!devname)
- goto error_no_devname;
-
- options = (char *) get_zeroed_page(GFP_KERNEL);
- if (!options)
- goto error_no_options;
+ if (fc->net_ns != src_as->net_ns) {
+ put_net(fc->net_ns);
+ fc->net_ns = get_net(src_as->net_ns);
+ }
- vnode = AFS_FS_I(d_inode(mntpt));
+ if (src_as->volume && src_as->volume->type == AFSVL_RWVOL) {
+ ctx->type = AFSVL_RWVOL;
+ ctx->force = true;
+ }
+ if (ctx->cell) {
+ afs_put_cell(ctx->net, ctx->cell);
+ ctx->cell = NULL;
+ }
if (test_bit(AFS_VNODE_PSEUDODIR, &vnode->flags)) {
/* if the directory is a pseudo directory, use the d_name */
- static const char afs_root_cell[] = ":root.cell.";
unsigned size = mntpt->d_name.len;
- ret = -ENOENT;
- if (size < 2 || size > AFS_MAXCELLNAME)
- goto error_no_page;
+ if (size < 2)
+ return -ENOENT;
+ p = mntpt->d_name.name;
if (mntpt->d_name.name[0] == '.') {
- devname[0] = '%';
- memcpy(devname + 1, mntpt->d_name.name + 1, size - 1);
- memcpy(devname + size, afs_root_cell,
- sizeof(afs_root_cell));
- rwpath = true;
- } else {
- devname[0] = '#';
- memcpy(devname + 1, mntpt->d_name.name, size);
- memcpy(devname + size + 1, afs_root_cell,
- sizeof(afs_root_cell));
+ size--;
+ p++;
+ ctx->type = AFSVL_RWVOL;
+ ctx->force = true;
}
+ if (size > AFS_MAXCELLNAME)
+ return -ENAMETOOLONG;
+
+ cell = afs_lookup_cell(ctx->net, p, size, NULL, false);
+ if (IS_ERR(cell)) {
+ pr_err("kAFS: unable to lookup cell '%pd'\n", mntpt);
+ return PTR_ERR(cell);
+ }
+ ctx->cell = cell;
+
+ ctx->volname = afs_root_volume;
+ ctx->volnamesz = sizeof(afs_root_volume) - 1;
} else {
/* read the contents of the AFS special symlink */
+ struct page *page;
loff_t size = i_size_read(d_inode(mntpt));
char *buf;
- ret = -EINVAL;
+ if (src_as->cell)
+ ctx->cell = afs_get_cell(src_as->cell);
+
if (size > PAGE_SIZE - 1)
- goto error_no_page;
+ return -EINVAL;
page = read_mapping_page(d_inode(mntpt)->i_mapping, 0, NULL);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- goto error_no_page;
- }
+ if (IS_ERR(page))
+ return PTR_ERR(page);
if (PageError(page)) {
ret = afs_bad(AFS_FS_I(d_inode(mntpt)), afs_file_error_mntpt);
- goto error;
+ put_page(page);
+ return ret;
}
- buf = kmap_atomic(page);
- memcpy(devname, buf, size);
- kunmap_atomic(buf);
+ buf = kmap(page);
+ ret = vfs_parse_fs_string(fc, "source", buf, size);
+ kunmap(page);
put_page(page);
- page = NULL;
+ if (ret < 0)
+ return ret;
}
- /* work out what options we want */
- as = AFS_FS_S(mntpt->d_sb);
- if (as->cell) {
- memcpy(options, "cell=", 5);
- strcpy(options + 5, as->cell->name);
- if ((as->volume && as->volume->type == AFSVL_RWVOL) || rwpath)
- strcat(options, ",rwpath");
- }
+ return 0;
+}
- /* try and do the mount */
- _debug("--- attempting mount %s -o %s ---", devname, options);
- mnt = vfs_submount(mntpt, &afs_fs_type, devname, options);
- _debug("--- mount result %p ---", mnt);
+/*
+ * create a vfsmount to be automounted
+ */
+static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
+{
+ struct fs_context *fc;
+ struct vfsmount *mnt;
+ int ret;
- free_page((unsigned long) devname);
- free_page((unsigned long) options);
- _leave(" = %p", mnt);
- return mnt;
+ BUG_ON(!d_inode(mntpt));
-error:
- put_page(page);
-error_no_page:
- free_page((unsigned long) options);
-error_no_options:
- free_page((unsigned long) devname);
-error_no_devname:
- _leave(" = %d", ret);
- return ERR_PTR(ret);
+ fc = fs_context_for_submount(&afs_fs_type, mntpt);
+ if (IS_ERR(fc))
+ return ERR_CAST(fc);
+
+ ret = afs_mntpt_set_params(fc, mntpt);
+ if (!ret)
+ mnt = fc_mount(fc);
+ else
+ mnt = ERR_PTR(ret);
+
+ put_fs_context(fc);
+ return mnt;
}
/*
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 2c588f9bbbda..15c7e82d80cb 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -572,13 +572,17 @@ static void afs_deliver_to_call(struct afs_call *call)
case -ENODATA:
case -EBADMSG:
case -EMSGSIZE:
- default:
abort_code = RXGEN_CC_UNMARSHAL;
if (state != AFS_CALL_CL_AWAIT_REPLY)
abort_code = RXGEN_SS_UNMARSHAL;
rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
abort_code, ret, "KUM");
goto local_abort;
+ default:
+ abort_code = RX_USER_ABORT;
+ rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
+ abort_code, ret, "KER");
+ goto local_abort;
}
}
@@ -610,6 +614,7 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
bool stalled = false;
u64 rtt;
u32 life, last_life;
+ bool rxrpc_complete = false;
DECLARE_WAITQUEUE(myself, current);
@@ -621,7 +626,7 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
rtt2 = 2;
timeout = rtt2;
- last_life = rxrpc_kernel_check_life(call->net->socket, call->rxcall);
+ rxrpc_kernel_check_life(call->net->socket, call->rxcall, &last_life);
add_wait_queue(&call->waitq, &myself);
for (;;) {
@@ -639,7 +644,12 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
if (afs_check_call_state(call, AFS_CALL_COMPLETE))
break;
- life = rxrpc_kernel_check_life(call->net->socket, call->rxcall);
+ if (!rxrpc_kernel_check_life(call->net->socket, call->rxcall, &life)) {
+ /* rxrpc terminated the call. */
+ rxrpc_complete = true;
+ break;
+ }
+
if (timeout == 0 &&
life == last_life && signal_pending(current)) {
if (stalled)
@@ -663,12 +673,16 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
remove_wait_queue(&call->waitq, &myself);
__set_current_state(TASK_RUNNING);
- /* Kill off the call if it's still live. */
if (!afs_check_call_state(call, AFS_CALL_COMPLETE)) {
- _debug("call interrupted");
- if (rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
- RX_USER_ABORT, -EINTR, "KWI"))
- afs_set_call_complete(call, -EINTR, 0);
+ if (rxrpc_complete) {
+ afs_set_call_complete(call, call->error, call->abort_code);
+ } else {
+ /* Kill off the call if it's still live. */
+ _debug("call interrupted");
+ if (rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
+ RX_USER_ABORT, -EINTR, "KWI"))
+ afs_set_call_complete(call, -EINTR, 0);
+ }
}
spin_lock_bh(&call->state_lock);
diff --git a/fs/afs/server.c b/fs/afs/server.c
index 642afa2e9783..65b33b6da48b 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -226,7 +226,6 @@ static struct afs_server *afs_alloc_server(struct afs_net *net,
RCU_INIT_POINTER(server->addresses, alist);
server->addr_version = alist->version;
server->uuid = *uuid;
- server->flags = (1UL << AFS_SERVER_FL_NEW);
server->update_at = ktime_get_real_seconds() + afs_server_update_delay;
rwlock_init(&server->fs_lock);
INIT_HLIST_HEAD(&server->cb_volumes);
diff --git a/fs/afs/super.c b/fs/afs/super.c
index dcd07fe99871..5adf012b8e27 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -1,6 +1,6 @@
/* AFS superblock handling
*
- * Copyright (c) 2002, 2007 Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2002, 2007, 2018 Red Hat, Inc. All rights reserved.
*
* This software may be freely redistributed under the terms of the
* GNU General Public License.
@@ -21,7 +21,7 @@
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
-#include <linux/parser.h>
+#include <linux/fs_parser.h>
#include <linux/statfs.h>
#include <linux/sched.h>
#include <linux/nsproxy.h>
@@ -30,21 +30,22 @@
#include "internal.h"
static void afs_i_init_once(void *foo);
-static struct dentry *afs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data);
static void afs_kill_super(struct super_block *sb);
static struct inode *afs_alloc_inode(struct super_block *sb);
static void afs_destroy_inode(struct inode *inode);
static int afs_statfs(struct dentry *dentry, struct kstatfs *buf);
static int afs_show_devname(struct seq_file *m, struct dentry *root);
static int afs_show_options(struct seq_file *m, struct dentry *root);
+static int afs_init_fs_context(struct fs_context *fc);
+static const struct fs_parameter_description afs_fs_parameters;
struct file_system_type afs_fs_type = {
- .owner = THIS_MODULE,
- .name = "afs",
- .mount = afs_mount,
- .kill_sb = afs_kill_super,
- .fs_flags = 0,
+ .owner = THIS_MODULE,
+ .name = "afs",
+ .init_fs_context = afs_init_fs_context,
+ .parameters = &afs_fs_parameters,
+ .kill_sb = afs_kill_super,
+ .fs_flags = 0,
};
MODULE_ALIAS_FS("afs");
@@ -63,22 +64,22 @@ static const struct super_operations afs_super_ops = {
static struct kmem_cache *afs_inode_cachep;
static atomic_t afs_count_active_inodes;
-enum {
- afs_no_opt,
- afs_opt_cell,
- afs_opt_dyn,
- afs_opt_rwpath,
- afs_opt_vol,
- afs_opt_autocell,
+enum afs_param {
+ Opt_autocell,
+ Opt_dyn,
+ Opt_source,
};
-static const match_table_t afs_options_list = {
- { afs_opt_cell, "cell=%s" },
- { afs_opt_dyn, "dyn" },
- { afs_opt_rwpath, "rwpath" },
- { afs_opt_vol, "vol=%s" },
- { afs_opt_autocell, "autocell" },
- { afs_no_opt, NULL },
+static const struct fs_parameter_spec afs_param_specs[] = {
+ fsparam_flag ("autocell", Opt_autocell),
+ fsparam_flag ("dyn", Opt_dyn),
+ fsparam_string("source", Opt_source),
+ {}
+};
+
+static const struct fs_parameter_description afs_fs_parameters = {
+ .name = "kAFS",
+ .specs = afs_param_specs,
};
/*
@@ -190,84 +191,23 @@ static int afs_show_options(struct seq_file *m, struct dentry *root)
}
/*
- * parse the mount options
- * - this function has been shamelessly adapted from the ext3 fs which
- * shamelessly adapted it from the msdos fs
- */
-static int afs_parse_options(struct afs_mount_params *params,
- char *options, const char **devname)
-{
- struct afs_cell *cell;
- substring_t args[MAX_OPT_ARGS];
- char *p;
- int token;
-
- _enter("%s", options);
-
- options[PAGE_SIZE - 1] = 0;
-
- while ((p = strsep(&options, ","))) {
- if (!*p)
- continue;
-
- token = match_token(p, afs_options_list, args);
- switch (token) {
- case afs_opt_cell:
- rcu_read_lock();
- cell = afs_lookup_cell_rcu(params->net,
- args[0].from,
- args[0].to - args[0].from);
- rcu_read_unlock();
- if (IS_ERR(cell))
- return PTR_ERR(cell);
- afs_put_cell(params->net, params->cell);
- params->cell = cell;
- break;
-
- case afs_opt_rwpath:
- params->rwpath = true;
- break;
-
- case afs_opt_vol:
- *devname = args[0].from;
- break;
-
- case afs_opt_autocell:
- params->autocell = true;
- break;
-
- case afs_opt_dyn:
- params->dyn_root = true;
- break;
-
- default:
- printk(KERN_ERR "kAFS:"
- " Unknown or invalid mount option: '%s'\n", p);
- return -EINVAL;
- }
- }
-
- _leave(" = 0");
- return 0;
-}
-
-/*
- * parse a device name to get cell name, volume name, volume type and R/W
- * selector
- * - this can be one of the following:
+ * Parse the source name to get cell name, volume name, volume type and R/W
+ * selector.
+ *
+ * This can be one of the following:
* "%[cell:]volume[.]" R/W volume
- * "#[cell:]volume[.]" R/O or R/W volume (rwpath=0),
- * or R/W (rwpath=1) volume
+ * "#[cell:]volume[.]" R/O or R/W volume (R/O parent),
+ * or R/W (R/W parent) volume
* "%[cell:]volume.readonly" R/O volume
* "#[cell:]volume.readonly" R/O volume
* "%[cell:]volume.backup" Backup volume
* "#[cell:]volume.backup" Backup volume
*/
-static int afs_parse_device_name(struct afs_mount_params *params,
- const char *name)
+static int afs_parse_source(struct fs_context *fc, struct fs_parameter *param)
{
+ struct afs_fs_context *ctx = fc->fs_private;
struct afs_cell *cell;
- const char *cellname, *suffix;
+ const char *cellname, *suffix, *name = param->string;
int cellnamesz;
_enter(",%s", name);
@@ -278,69 +218,149 @@ static int afs_parse_device_name(struct afs_mount_params *params,
}
if ((name[0] != '%' && name[0] != '#') || !name[1]) {
+ /* To use dynroot, we don't want to have to provide a source */
+ if (strcmp(name, "none") == 0) {
+ ctx->no_cell = true;
+ return 0;
+ }
printk(KERN_ERR "kAFS: unparsable volume name\n");
return -EINVAL;
}
/* determine the type of volume we're looking for */
- params->type = AFSVL_ROVOL;
- params->force = false;
- if (params->rwpath || name[0] == '%') {
- params->type = AFSVL_RWVOL;
- params->force = true;
+ if (name[0] == '%') {
+ ctx->type = AFSVL_RWVOL;
+ ctx->force = true;
}
name++;
/* split the cell name out if there is one */
- params->volname = strchr(name, ':');
- if (params->volname) {
+ ctx->volname = strchr(name, ':');
+ if (ctx->volname) {
cellname = name;
- cellnamesz = params->volname - name;
- params->volname++;
+ cellnamesz = ctx->volname - name;
+ ctx->volname++;
} else {
- params->volname = name;
+ ctx->volname = name;
cellname = NULL;
cellnamesz = 0;
}
/* the volume type is further affected by a possible suffix */
- suffix = strrchr(params->volname, '.');
+ suffix = strrchr(ctx->volname, '.');
if (suffix) {
if (strcmp(suffix, ".readonly") == 0) {
- params->type = AFSVL_ROVOL;
- params->force = true;
+ ctx->type = AFSVL_ROVOL;
+ ctx->force = true;
} else if (strcmp(suffix, ".backup") == 0) {
- params->type = AFSVL_BACKVOL;
- params->force = true;
+ ctx->type = AFSVL_BACKVOL;
+ ctx->force = true;
} else if (suffix[1] == 0) {
} else {
suffix = NULL;
}
}
- params->volnamesz = suffix ?
- suffix - params->volname : strlen(params->volname);
+ ctx->volnamesz = suffix ?
+ suffix - ctx->volname : strlen(ctx->volname);
_debug("cell %*.*s [%p]",
- cellnamesz, cellnamesz, cellname ?: "", params->cell);
+ cellnamesz, cellnamesz, cellname ?: "", ctx->cell);
/* lookup the cell record */
- if (cellname || !params->cell) {
- cell = afs_lookup_cell(params->net, cellname, cellnamesz,
+ if (cellname) {
+ cell = afs_lookup_cell(ctx->net, cellname, cellnamesz,
NULL, false);
if (IS_ERR(cell)) {
- printk(KERN_ERR "kAFS: unable to lookup cell '%*.*s'\n",
+ pr_err("kAFS: unable to lookup cell '%*.*s'\n",
cellnamesz, cellnamesz, cellname ?: "");
return PTR_ERR(cell);
}
- afs_put_cell(params->net, params->cell);
- params->cell = cell;
+ afs_put_cell(ctx->net, ctx->cell);
+ ctx->cell = cell;
}
_debug("CELL:%s [%p] VOLUME:%*.*s SUFFIX:%s TYPE:%d%s",
- params->cell->name, params->cell,
- params->volnamesz, params->volnamesz, params->volname,
- suffix ?: "-", params->type, params->force ? " FORCE" : "");
+ ctx->cell->name, ctx->cell,
+ ctx->volnamesz, ctx->volnamesz, ctx->volname,
+ suffix ?: "-", ctx->type, ctx->force ? " FORCE" : "");
+
+ fc->source = param->string;
+ param->string = NULL;
+ return 0;
+}
+
+/*
+ * Parse a single mount parameter.
+ */
+static int afs_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ struct fs_parse_result result;
+ struct afs_fs_context *ctx = fc->fs_private;
+ int opt;
+
+ opt = fs_parse(fc, &afs_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_source:
+ return afs_parse_source(fc, param);
+
+ case Opt_autocell:
+ ctx->autocell = true;
+ break;
+
+ case Opt_dyn:
+ ctx->dyn_root = true;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ _leave(" = 0");
+ return 0;
+}
+
+/*
+ * Validate the options, get the cell key and look up the volume.
+ */
+static int afs_validate_fc(struct fs_context *fc)
+{
+ struct afs_fs_context *ctx = fc->fs_private;
+ struct afs_volume *volume;
+ struct key *key;
+
+ if (!ctx->dyn_root) {
+ if (ctx->no_cell) {
+ pr_warn("kAFS: Can only specify source 'none' with -o dyn\n");
+ return -EINVAL;
+ }
+
+ if (!ctx->cell) {
+ pr_warn("kAFS: No cell specified\n");
+ return -EDESTADDRREQ;
+ }
+
+ /* We try to do the mount securely. */
+ key = afs_request_key(ctx->cell);
+ if (IS_ERR(key))
+ return PTR_ERR(key);
+
+ ctx->key = key;
+
+ if (ctx->volume) {
+ afs_put_volume(ctx->cell, ctx->volume);
+ ctx->volume = NULL;
+ }
+
+ volume = afs_create_volume(ctx);
+ if (IS_ERR(volume))
+ return PTR_ERR(volume);
+
+ ctx->volume = volume;
+ }
return 0;
}
@@ -348,39 +368,34 @@ static int afs_parse_device_name(struct afs_mount_params *params,
/*
* check a superblock to see if it's the one we're looking for
*/
-static int afs_test_super(struct super_block *sb, void *data)
+static int afs_test_super(struct super_block *sb, struct fs_context *fc)
{
- struct afs_super_info *as1 = data;
+ struct afs_fs_context *ctx = fc->fs_private;
struct afs_super_info *as = AFS_FS_S(sb);
- return (as->net_ns == as1->net_ns &&
+ return (as->net_ns == fc->net_ns &&
as->volume &&
- as->volume->vid == as1->volume->vid &&
+ as->volume->vid == ctx->volume->vid &&
!as->dyn_root);
}
-static int afs_dynroot_test_super(struct super_block *sb, void *data)
+static int afs_dynroot_test_super(struct super_block *sb, struct fs_context *fc)
{
- struct afs_super_info *as1 = data;
struct afs_super_info *as = AFS_FS_S(sb);
- return (as->net_ns == as1->net_ns &&
+ return (as->net_ns == fc->net_ns &&
as->dyn_root);
}
-static int afs_set_super(struct super_block *sb, void *data)
+static int afs_set_super(struct super_block *sb, struct fs_context *fc)
{
- struct afs_super_info *as = data;
-
- sb->s_fs_info = as;
return set_anon_super(sb, NULL);
}
/*
* fill in the superblock
*/
-static int afs_fill_super(struct super_block *sb,
- struct afs_mount_params *params)
+static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx)
{
struct afs_super_info *as = AFS_FS_S(sb);
struct afs_fid fid;
@@ -399,7 +414,7 @@ static int afs_fill_super(struct super_block *sb,
ret = super_setup_bdi(sb);
if (ret)
return ret;
- sb->s_bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_SIZE;
+ sb->s_bdi->ra_pages = VM_READAHEAD_PAGES;
/* allocate the root inode and dentry */
if (as->dyn_root) {
@@ -412,13 +427,13 @@ static int afs_fill_super(struct super_block *sb,
fid.vnode = 1;
fid.vnode_hi = 0;
fid.unique = 1;
- inode = afs_iget(sb, params->key, &fid, NULL, NULL, NULL);
+ inode = afs_iget(sb, ctx->key, &fid, NULL, NULL, NULL);
}
if (IS_ERR(inode))
return PTR_ERR(inode);
- if (params->autocell || params->dyn_root)
+ if (ctx->autocell || as->dyn_root)
set_bit(AFS_VNODE_AUTOCELL, &AFS_FS_I(inode)->flags);
ret = -ENOMEM;
@@ -443,17 +458,20 @@ error:
return ret;
}
-static struct afs_super_info *afs_alloc_sbi(struct afs_mount_params *params)
+static struct afs_super_info *afs_alloc_sbi(struct fs_context *fc)
{
+ struct afs_fs_context *ctx = fc->fs_private;
struct afs_super_info *as;
as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL);
if (as) {
- as->net_ns = get_net(params->net_ns);
- if (params->dyn_root)
+ as->net_ns = get_net(fc->net_ns);
+ if (ctx->dyn_root) {
as->dyn_root = true;
- else
- as->cell = afs_get_cell(params->cell);
+ } else {
+ as->cell = afs_get_cell(ctx->cell);
+ as->volume = __afs_get_volume(ctx->volume);
+ }
}
return as;
}
@@ -475,7 +493,7 @@ static void afs_kill_super(struct super_block *sb)
if (as->dyn_root)
afs_dynroot_depopulate(sb);
-
+
/* Clear the callback interests (which will do ilookup5) before
* deactivating the superblock.
*/
@@ -488,111 +506,103 @@ static void afs_kill_super(struct super_block *sb)
}
/*
- * get an AFS superblock
+ * Get an AFS superblock and root directory.
*/
-static struct dentry *afs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *options)
+static int afs_get_tree(struct fs_context *fc)
{
- struct afs_mount_params params;
+ struct afs_fs_context *ctx = fc->fs_private;
struct super_block *sb;
- struct afs_volume *candidate;
- struct key *key;
struct afs_super_info *as;
int ret;
- _enter(",,%s,%p", dev_name, options);
-
- memset(&params, 0, sizeof(params));
-
- ret = -EINVAL;
- if (current->nsproxy->net_ns != &init_net)
+ ret = afs_validate_fc(fc);
+ if (ret)
goto error;
- params.net_ns = current->nsproxy->net_ns;
- params.net = afs_net(params.net_ns);
-
- /* parse the options and device name */
- if (options) {
- ret = afs_parse_options(&params, options, &dev_name);
- if (ret < 0)
- goto error;
- }
-
- if (!params.dyn_root) {
- ret = afs_parse_device_name(&params, dev_name);
- if (ret < 0)
- goto error;
- /* try and do the mount securely */
- key = afs_request_key(params.cell);
- if (IS_ERR(key)) {
- _leave(" = %ld [key]", PTR_ERR(key));
- ret = PTR_ERR(key);
- goto error;
- }
- params.key = key;
- }
+ _enter("");
/* allocate a superblock info record */
ret = -ENOMEM;
- as = afs_alloc_sbi(&params);
+ as = afs_alloc_sbi(fc);
if (!as)
- goto error_key;
-
- if (!params.dyn_root) {
- /* Assume we're going to need a volume record; at the very
- * least we can use it to update the volume record if we have
- * one already. This checks that the volume exists within the
- * cell.
- */
- candidate = afs_create_volume(&params);
- if (IS_ERR(candidate)) {
- ret = PTR_ERR(candidate);
- goto error_as;
- }
-
- as->volume = candidate;
- }
+ goto error;
+ fc->s_fs_info = as;
/* allocate a deviceless superblock */
- sb = sget(fs_type,
- as->dyn_root ? afs_dynroot_test_super : afs_test_super,
- afs_set_super, flags, as);
+ sb = sget_fc(fc,
+ as->dyn_root ? afs_dynroot_test_super : afs_test_super,
+ afs_set_super);
if (IS_ERR(sb)) {
ret = PTR_ERR(sb);
- goto error_as;
+ goto error;
}
if (!sb->s_root) {
/* initial superblock/root creation */
_debug("create");
- ret = afs_fill_super(sb, &params);
+ ret = afs_fill_super(sb, ctx);
if (ret < 0)
goto error_sb;
- as = NULL;
sb->s_flags |= SB_ACTIVE;
} else {
_debug("reuse");
ASSERTCMP(sb->s_flags, &, SB_ACTIVE);
- afs_destroy_sbi(as);
- as = NULL;
}
- afs_put_cell(params.net, params.cell);
- key_put(params.key);
+ fc->root = dget(sb->s_root);
_leave(" = 0 [%p]", sb);
- return dget(sb->s_root);
+ return 0;
error_sb:
deactivate_locked_super(sb);
- goto error_key;
-error_as:
- afs_destroy_sbi(as);
-error_key:
- key_put(params.key);
error:
- afs_put_cell(params.net, params.cell);
_leave(" = %d", ret);
- return ERR_PTR(ret);
+ return ret;
+}
+
+static void afs_free_fc(struct fs_context *fc)
+{
+ struct afs_fs_context *ctx = fc->fs_private;
+
+ afs_destroy_sbi(fc->s_fs_info);
+ afs_put_volume(ctx->cell, ctx->volume);
+ afs_put_cell(ctx->net, ctx->cell);
+ key_put(ctx->key);
+ kfree(ctx);
+}
+
+static const struct fs_context_operations afs_context_ops = {
+ .free = afs_free_fc,
+ .parse_param = afs_parse_param,
+ .get_tree = afs_get_tree,
+};
+
+/*
+ * Set up the filesystem mount context.
+ */
+static int afs_init_fs_context(struct fs_context *fc)
+{
+ struct afs_fs_context *ctx;
+ struct afs_cell *cell;
+
+ ctx = kzalloc(sizeof(struct afs_fs_context), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->type = AFSVL_ROVOL;
+ ctx->net = afs_net(fc->net_ns);
+
+ /* Default to the workstation cell. */
+ rcu_read_lock();
+ cell = afs_lookup_cell_rcu(ctx->net, NULL, 0);
+ rcu_read_unlock();
+ if (IS_ERR(cell))
+ cell = NULL;
+ ctx->cell = cell;
+
+ fc->fs_private = ctx;
+ fc->ops = &afs_context_ops;
+ return 0;
}
/*
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
index 00975ed3640f..f6eba2def0a1 100644
--- a/fs/afs/volume.c
+++ b/fs/afs/volume.c
@@ -21,7 +21,7 @@ static const char *const afs_voltypes[] = { "R/W", "R/O", "BAK" };
/*
* Allocate a volume record and load it up from a vldb record.
*/
-static struct afs_volume *afs_alloc_volume(struct afs_mount_params *params,
+static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params,
struct afs_vldb_entry *vldb,
unsigned long type_mask)
{
@@ -113,7 +113,7 @@ static struct afs_vldb_entry *afs_vl_lookup_vldb(struct afs_cell *cell,
* - Rule 3: If parent volume is R/W, then only mount R/W volume unless
* explicitly told otherwise
*/
-struct afs_volume *afs_create_volume(struct afs_mount_params *params)
+struct afs_volume *afs_create_volume(struct afs_fs_context *params)
{
struct afs_vldb_entry *vldb;
struct afs_volume *volume;
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 72efcfcf9f95..0122d7445fba 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -264,6 +264,7 @@ static void afs_kill_pages(struct address_space *mapping,
first = page->index + 1;
lock_page(page);
generic_error_remove_page(mapping, page);
+ unlock_page(page);
}
__pagevec_release(&pv);
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
index 5aa57929e8c2..6e97a42d24d1 100644
--- a/fs/afs/yfsclient.c
+++ b/fs/afs/yfsclient.c
@@ -1514,7 +1514,7 @@ static int yfs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr)
bp = xdr_encode_u32(bp, 0); /* RPC flags */
bp = xdr_encode_YFSFid(bp, &vnode->fid);
bp = xdr_encode_YFS_StoreStatus(bp, attr);
- bp = xdr_encode_u64(bp, 0); /* position of start of write */
+ bp = xdr_encode_u64(bp, attr->ia_size); /* position of start of write */
bp = xdr_encode_u64(bp, 0); /* size of write */
bp = xdr_encode_u64(bp, attr->ia_size); /* new file length */
yfs_check_req(call, bp);
diff --git a/fs/aio.c b/fs/aio.c
index 38b741aef0bf..3490d1fa0e16 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -181,7 +181,7 @@ struct poll_iocb {
struct file *file;
struct wait_queue_head *head;
__poll_t events;
- bool woken;
+ bool done;
bool cancelled;
struct wait_queue_entry wait;
struct work_struct work;
@@ -204,8 +204,7 @@ struct aio_kiocb {
struct kioctx *ki_ctx;
kiocb_cancel_fn *ki_cancel;
- struct iocb __user *ki_user_iocb; /* user's aiocb */
- __u64 ki_user_data; /* user's data for completion */
+ struct io_event ki_res;
struct list_head ki_list; /* the aio core uses this
* for cancellation */
@@ -1022,6 +1021,9 @@ static bool get_reqs_available(struct kioctx *ctx)
/* aio_get_req
* Allocate a slot for an aio request.
* Returns NULL if no requests are free.
+ *
+ * The refcount is initialized to 2 - one for the async op completion,
+ * one for the synchronous code that does this.
*/
static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
{
@@ -1031,10 +1033,15 @@ static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
if (unlikely(!req))
return NULL;
+ if (unlikely(!get_reqs_available(ctx))) {
+ kmem_cache_free(kiocb_cachep, req);
+ return NULL;
+ }
+
percpu_ref_get(&ctx->reqs);
req->ki_ctx = ctx;
INIT_LIST_HEAD(&req->ki_list);
- refcount_set(&req->ki_refcnt, 0);
+ refcount_set(&req->ki_refcnt, 2);
req->ki_eventfd = NULL;
return req;
}
@@ -1067,30 +1074,20 @@ out:
return ret;
}
-static inline void iocb_put(struct aio_kiocb *iocb)
-{
- if (refcount_read(&iocb->ki_refcnt) == 0 ||
- refcount_dec_and_test(&iocb->ki_refcnt)) {
- if (iocb->ki_filp)
- fput(iocb->ki_filp);
- percpu_ref_put(&iocb->ki_ctx->reqs);
- kmem_cache_free(kiocb_cachep, iocb);
- }
-}
-
-static void aio_fill_event(struct io_event *ev, struct aio_kiocb *iocb,
- long res, long res2)
+static inline void iocb_destroy(struct aio_kiocb *iocb)
{
- ev->obj = (u64)(unsigned long)iocb->ki_user_iocb;
- ev->data = iocb->ki_user_data;
- ev->res = res;
- ev->res2 = res2;
+ if (iocb->ki_eventfd)
+ eventfd_ctx_put(iocb->ki_eventfd);
+ if (iocb->ki_filp)
+ fput(iocb->ki_filp);
+ percpu_ref_put(&iocb->ki_ctx->reqs);
+ kmem_cache_free(kiocb_cachep, iocb);
}
/* aio_complete
* Called when the io request on the given iocb is complete.
*/
-static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
+static void aio_complete(struct aio_kiocb *iocb)
{
struct kioctx *ctx = iocb->ki_ctx;
struct aio_ring *ring;
@@ -1114,14 +1111,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
event = ev_page + pos % AIO_EVENTS_PER_PAGE;
- aio_fill_event(event, iocb, res, res2);
+ *event = iocb->ki_res;
kunmap_atomic(ev_page);
flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
- pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n",
- ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data,
- res, res2);
+ pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
+ (void __user *)(unsigned long)iocb->ki_res.obj,
+ iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2);
/* after flagging the request as done, we
* must never even look at it again
@@ -1148,10 +1145,8 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
* eventfd. The eventfd_signal() function is safe to be called
* from IRQ context.
*/
- if (iocb->ki_eventfd) {
+ if (iocb->ki_eventfd)
eventfd_signal(iocb->ki_eventfd, 1);
- eventfd_ctx_put(iocb->ki_eventfd);
- }
/*
* We have to order our ring_info tail store above and test
@@ -1163,7 +1158,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
if (waitqueue_active(&ctx->wait))
wake_up(&ctx->wait);
- iocb_put(iocb);
+}
+
+static inline void iocb_put(struct aio_kiocb *iocb)
+{
+ if (refcount_dec_and_test(&iocb->ki_refcnt)) {
+ aio_complete(iocb);
+ iocb_destroy(iocb);
+ }
}
/* aio_read_events_ring
@@ -1437,7 +1439,9 @@ static void aio_complete_rw(struct kiocb *kiocb, long res, long res2)
file_end_write(kiocb->ki_filp);
}
- aio_complete(iocb, res, res2);
+ iocb->ki_res.res = res;
+ iocb->ki_res.res2 = res2;
+ iocb_put(iocb);
}
static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
@@ -1514,13 +1518,13 @@ static inline void aio_rw_done(struct kiocb *req, ssize_t ret)
}
}
-static ssize_t aio_read(struct kiocb *req, const struct iocb *iocb,
+static int aio_read(struct kiocb *req, const struct iocb *iocb,
bool vectored, bool compat)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct iov_iter iter;
struct file *file;
- ssize_t ret;
+ int ret;
ret = aio_prep_rw(req, iocb);
if (ret)
@@ -1542,13 +1546,13 @@ static ssize_t aio_read(struct kiocb *req, const struct iocb *iocb,
return ret;
}
-static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb,
+static int aio_write(struct kiocb *req, const struct iocb *iocb,
bool vectored, bool compat)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct iov_iter iter;
struct file *file;
- ssize_t ret;
+ int ret;
ret = aio_prep_rw(req, iocb);
if (ret)
@@ -1585,11 +1589,10 @@ static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb,
static void aio_fsync_work(struct work_struct *work)
{
- struct fsync_iocb *req = container_of(work, struct fsync_iocb, work);
- int ret;
+ struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work);
- ret = vfs_fsync(req->file, req->datasync);
- aio_complete(container_of(req, struct aio_kiocb, fsync), ret, 0);
+ iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync);
+ iocb_put(iocb);
}
static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
@@ -1608,11 +1611,6 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
return 0;
}
-static inline void aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask)
-{
- aio_complete(iocb, mangle_poll(mask), 0);
-}
-
static void aio_poll_complete_work(struct work_struct *work)
{
struct poll_iocb *req = container_of(work, struct poll_iocb, work);
@@ -1638,9 +1636,11 @@ static void aio_poll_complete_work(struct work_struct *work)
return;
}
list_del_init(&iocb->ki_list);
+ iocb->ki_res.res = mangle_poll(mask);
+ req->done = true;
spin_unlock_irq(&ctx->ctx_lock);
- aio_poll_complete(iocb, mask);
+ iocb_put(iocb);
}
/* assumes we are called with irqs disabled */
@@ -1668,31 +1668,27 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
__poll_t mask = key_to_poll(key);
unsigned long flags;
- req->woken = true;
-
/* for instances that support it check for an event match first: */
- if (mask) {
- if (!(mask & req->events))
- return 0;
+ if (mask && !(mask & req->events))
+ return 0;
+
+ list_del_init(&req->wait.entry);
+ if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
/*
* Try to complete the iocb inline if we can. Use
* irqsave/irqrestore because not all filesystems (e.g. fuse)
* call this function with IRQs disabled and because IRQs
* have to be disabled before ctx_lock is obtained.
*/
- if (spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
- list_del(&iocb->ki_list);
- spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
-
- list_del_init(&req->wait.entry);
- aio_poll_complete(iocb, mask);
- return 1;
- }
+ list_del(&iocb->ki_list);
+ iocb->ki_res.res = mangle_poll(mask);
+ req->done = true;
+ spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
+ iocb_put(iocb);
+ } else {
+ schedule_work(&req->work);
}
-
- list_del_init(&req->wait.entry);
- schedule_work(&req->work);
return 1;
}
@@ -1719,11 +1715,12 @@ aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
add_wait_queue(head, &pt->iocb->poll.wait);
}
-static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
+static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
{
struct kioctx *ctx = aiocb->ki_ctx;
struct poll_iocb *req = &aiocb->poll;
struct aio_poll_table apt;
+ bool cancel = false;
__poll_t mask;
/* reject any unknown events outside the normal event mask. */
@@ -1737,7 +1734,7 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
req->head = NULL;
- req->woken = false;
+ req->done = false;
req->cancelled = false;
apt.pt._qproc = aio_poll_queue_proc;
@@ -1749,156 +1746,135 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
INIT_LIST_HEAD(&req->wait.entry);
init_waitqueue_func_entry(&req->wait, aio_poll_wake);
- /* one for removal from waitqueue, one for this function */
- refcount_set(&aiocb->ki_refcnt, 2);
-
mask = vfs_poll(req->file, &apt.pt) & req->events;
- if (unlikely(!req->head)) {
- /* we did not manage to set up a waitqueue, done */
- goto out;
- }
-
spin_lock_irq(&ctx->ctx_lock);
- spin_lock(&req->head->lock);
- if (req->woken) {
- /* wake_up context handles the rest */
- mask = 0;
+ if (likely(req->head)) {
+ spin_lock(&req->head->lock);
+ if (unlikely(list_empty(&req->wait.entry))) {
+ if (apt.error)
+ cancel = true;
+ apt.error = 0;
+ mask = 0;
+ }
+ if (mask || apt.error) {
+ list_del_init(&req->wait.entry);
+ } else if (cancel) {
+ WRITE_ONCE(req->cancelled, true);
+ } else if (!req->done) { /* actually waiting for an event */
+ list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
+ aiocb->ki_cancel = aio_poll_cancel;
+ }
+ spin_unlock(&req->head->lock);
+ }
+ if (mask) { /* no async, we'd stolen it */
+ aiocb->ki_res.res = mangle_poll(mask);
apt.error = 0;
- } else if (mask || apt.error) {
- /* if we get an error or a mask we are done */
- WARN_ON_ONCE(list_empty(&req->wait.entry));
- list_del_init(&req->wait.entry);
- } else {
- /* actually waiting for an event */
- list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
- aiocb->ki_cancel = aio_poll_cancel;
}
- spin_unlock(&req->head->lock);
spin_unlock_irq(&ctx->ctx_lock);
-
-out:
- if (unlikely(apt.error))
- return apt.error;
-
if (mask)
- aio_poll_complete(aiocb, mask);
- iocb_put(aiocb);
- return 0;
+ iocb_put(aiocb);
+ return apt.error;
}
static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
- struct iocb __user *user_iocb, bool compat)
+ struct iocb __user *user_iocb, struct aio_kiocb *req,
+ bool compat)
{
- struct aio_kiocb *req;
- ssize_t ret;
-
- /* enforce forwards compatibility on users */
- if (unlikely(iocb->aio_reserved2)) {
- pr_debug("EINVAL: reserve field set\n");
- return -EINVAL;
- }
-
- /* prevent overflows */
- if (unlikely(
- (iocb->aio_buf != (unsigned long)iocb->aio_buf) ||
- (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
- ((ssize_t)iocb->aio_nbytes < 0)
- )) {
- pr_debug("EINVAL: overflow check\n");
- return -EINVAL;
- }
-
- if (!get_reqs_available(ctx))
- return -EAGAIN;
-
- ret = -EAGAIN;
- req = aio_get_req(ctx);
- if (unlikely(!req))
- goto out_put_reqs_available;
-
req->ki_filp = fget(iocb->aio_fildes);
- ret = -EBADF;
if (unlikely(!req->ki_filp))
- goto out_put_req;
+ return -EBADF;
if (iocb->aio_flags & IOCB_FLAG_RESFD) {
+ struct eventfd_ctx *eventfd;
/*
* If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
* instance of the file* now. The file descriptor must be
* an eventfd() fd, and will be signaled for each completed
* event using the eventfd_signal() function.
*/
- req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd);
- if (IS_ERR(req->ki_eventfd)) {
- ret = PTR_ERR(req->ki_eventfd);
- req->ki_eventfd = NULL;
- goto out_put_req;
- }
+ eventfd = eventfd_ctx_fdget(iocb->aio_resfd);
+ if (IS_ERR(eventfd))
+ return PTR_ERR(eventfd);
+
+ req->ki_eventfd = eventfd;
}
- ret = put_user(KIOCB_KEY, &user_iocb->aio_key);
- if (unlikely(ret)) {
+ if (unlikely(put_user(KIOCB_KEY, &user_iocb->aio_key))) {
pr_debug("EFAULT: aio_key\n");
- goto out_put_req;
+ return -EFAULT;
}
- req->ki_user_iocb = user_iocb;
- req->ki_user_data = iocb->aio_data;
+ req->ki_res.obj = (u64)(unsigned long)user_iocb;
+ req->ki_res.data = iocb->aio_data;
+ req->ki_res.res = 0;
+ req->ki_res.res2 = 0;
switch (iocb->aio_lio_opcode) {
case IOCB_CMD_PREAD:
- ret = aio_read(&req->rw, iocb, false, compat);
- break;
+ return aio_read(&req->rw, iocb, false, compat);
case IOCB_CMD_PWRITE:
- ret = aio_write(&req->rw, iocb, false, compat);
- break;
+ return aio_write(&req->rw, iocb, false, compat);
case IOCB_CMD_PREADV:
- ret = aio_read(&req->rw, iocb, true, compat);
- break;
+ return aio_read(&req->rw, iocb, true, compat);
case IOCB_CMD_PWRITEV:
- ret = aio_write(&req->rw, iocb, true, compat);
- break;
+ return aio_write(&req->rw, iocb, true, compat);
case IOCB_CMD_FSYNC:
- ret = aio_fsync(&req->fsync, iocb, false);
- break;
+ return aio_fsync(&req->fsync, iocb, false);
case IOCB_CMD_FDSYNC:
- ret = aio_fsync(&req->fsync, iocb, true);
- break;
+ return aio_fsync(&req->fsync, iocb, true);
case IOCB_CMD_POLL:
- ret = aio_poll(req, iocb);
- break;
+ return aio_poll(req, iocb);
default:
pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode);
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
-
- /*
- * If ret is 0, we'd either done aio_complete() ourselves or have
- * arranged for that to be done asynchronously. Anything non-zero
- * means that we need to destroy req ourselves.
- */
- if (ret)
- goto out_put_req;
- return 0;
-out_put_req:
- if (req->ki_eventfd)
- eventfd_ctx_put(req->ki_eventfd);
- iocb_put(req);
-out_put_reqs_available:
- put_reqs_available(ctx, 1);
- return ret;
}
static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
bool compat)
{
+ struct aio_kiocb *req;
struct iocb iocb;
+ int err;
if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
return -EFAULT;
- return __io_submit_one(ctx, &iocb, user_iocb, compat);
+ /* enforce forwards compatibility on users */
+ if (unlikely(iocb.aio_reserved2)) {
+ pr_debug("EINVAL: reserve field set\n");
+ return -EINVAL;
+ }
+
+ /* prevent overflows */
+ if (unlikely(
+ (iocb.aio_buf != (unsigned long)iocb.aio_buf) ||
+ (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) ||
+ ((ssize_t)iocb.aio_nbytes < 0)
+ )) {
+ pr_debug("EINVAL: overflow check\n");
+ return -EINVAL;
+ }
+
+ req = aio_get_req(ctx);
+ if (unlikely(!req))
+ return -EAGAIN;
+
+ err = __io_submit_one(ctx, &iocb, user_iocb, req, compat);
+
+ /* Done with the synchronous reference */
+ iocb_put(req);
+
+ /*
+ * If err is 0, we'd either done aio_complete() ourselves or have
+ * arranged for that to be done asynchronously. Anything non-zero
+ * means that we need to destroy req ourselves.
+ */
+ if (unlikely(err)) {
+ iocb_destroy(req);
+ put_reqs_available(ctx, 1);
+ }
+ return err;
}
/* sys_io_submit:
@@ -1997,24 +1973,6 @@ COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
}
#endif
-/* lookup_kiocb
- * Finds a given iocb for cancellation.
- */
-static struct aio_kiocb *
-lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb)
-{
- struct aio_kiocb *kiocb;
-
- assert_spin_locked(&ctx->ctx_lock);
-
- /* TODO: use a hash or array, this sucks. */
- list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
- if (kiocb->ki_user_iocb == iocb)
- return kiocb;
- }
- return NULL;
-}
-
/* sys_io_cancel:
* Attempts to cancel an iocb previously passed to io_submit. If
* the operation is successfully cancelled, the resulting event is
@@ -2032,6 +1990,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
struct aio_kiocb *kiocb;
int ret = -EINVAL;
u32 key;
+ u64 obj = (u64)(unsigned long)iocb;
if (unlikely(get_user(key, &iocb->aio_key)))
return -EFAULT;
@@ -2043,10 +2002,13 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
return -EINVAL;
spin_lock_irq(&ctx->ctx_lock);
- kiocb = lookup_kiocb(ctx, iocb);
- if (kiocb) {
- ret = kiocb->ki_cancel(&kiocb->rw);
- list_del_init(&kiocb->ki_list);
+ /* TODO: use a hash or array, this sucks. */
+ list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
+ if (kiocb->ki_res.obj == obj) {
+ ret = kiocb->ki_cancel(&kiocb->rw);
+ list_del_init(&kiocb->ki_list);
+ break;
+ }
}
spin_unlock_irq(&ctx->ctx_lock);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index e9faa52bb489..bb28e2ead679 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -264,7 +264,8 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
bio_for_each_segment_all(bvec, &bio, i, iter_all) {
if (should_dirty && !PageCompound(bvec->bv_page))
set_page_dirty_lock(bvec->bv_page);
- put_page(bvec->bv_page);
+ if (!bio_flagged(&bio, BIO_NO_PAGE_REF))
+ put_page(bvec->bv_page);
}
if (unlikely(bio.bi_status))
@@ -307,10 +308,10 @@ static void blkdev_bio_end_io(struct bio *bio)
struct blkdev_dio *dio = bio->bi_private;
bool should_dirty = dio->should_dirty;
- if (dio->multi_bio && !atomic_dec_and_test(&dio->ref)) {
- if (bio->bi_status && !dio->bio.bi_status)
- dio->bio.bi_status = bio->bi_status;
- } else {
+ if (bio->bi_status && !dio->bio.bi_status)
+ dio->bio.bi_status = bio->bi_status;
+
+ if (!dio->multi_bio || atomic_dec_and_test(&dio->ref)) {
if (!dio->is_sync) {
struct kiocb *iocb = dio->iocb;
ssize_t ret;
@@ -336,12 +337,14 @@ static void blkdev_bio_end_io(struct bio *bio)
if (should_dirty) {
bio_check_pages_dirty(bio);
} else {
- struct bio_vec *bvec;
- int i;
- struct bvec_iter_all iter_all;
+ if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
+ struct bvec_iter_all iter_all;
+ struct bio_vec *bvec;
+ int i;
- bio_for_each_segment_all(bvec, bio, i, iter_all)
- put_page(bvec->bv_page);
+ bio_for_each_segment_all(bvec, bio, i, iter_all)
+ put_page(bvec->bv_page);
+ }
bio_put(bio);
}
}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 129d26226e70..b3642367a595 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1210,6 +1210,8 @@ enum {
* Set for the subvolume tree owning the reloc tree.
*/
BTRFS_ROOT_DEAD_RELOC_TREE,
+ /* Mark dead root stored on device whose cleanup needs to be resumed */
+ BTRFS_ROOT_DEAD_TREE,
};
/*
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index f0cdb53f3e2d..6fe9197f6ee4 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2958,7 +2958,7 @@ int open_ctree(struct super_block *sb,
sb->s_bdi->congested_fn = btrfs_congested_fn;
sb->s_bdi->congested_data = fs_info;
sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
- sb->s_bdi->ra_pages = VM_MAX_READAHEAD * SZ_1K / PAGE_SIZE;
+ sb->s_bdi->ra_pages = VM_READAHEAD_PAGES;
sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 994f0cc41799..c5880329ae37 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -6174,7 +6174,7 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
*
* This is overestimating in most cases.
*/
- qgroup_rsv_size = outstanding_extents * fs_info->nodesize;
+ qgroup_rsv_size = (u64)outstanding_extents * fs_info->nodesize;
spin_lock(&block_rsv->lock);
block_rsv->size = reserve_size;
@@ -8764,6 +8764,8 @@ struct walk_control {
u64 refs[BTRFS_MAX_LEVEL];
u64 flags[BTRFS_MAX_LEVEL];
struct btrfs_key update_progress;
+ struct btrfs_key drop_progress;
+ int drop_level;
int stage;
int level;
int shared_level;
@@ -8771,6 +8773,7 @@ struct walk_control {
int keep_locks;
int reada_slot;
int reada_count;
+ int restarted;
};
#define DROP_REFERENCE 1
@@ -8934,6 +8937,33 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
}
/*
+ * This is used to verify a ref exists for this root to deal with a bug where we
+ * would have a drop_progress key that hadn't been updated properly.
+ */
+static int check_ref_exists(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, u64 bytenr, u64 parent,
+ int level)
+{
+ struct btrfs_path *path;
+ struct btrfs_extent_inline_ref *iref;
+ int ret;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ ret = lookup_extent_backref(trans, path, &iref, bytenr,
+ root->fs_info->nodesize, parent,
+ root->root_key.objectid, level, 0);
+ btrfs_free_path(path);
+ if (ret == -ENOENT)
+ return 0;
+ if (ret < 0)
+ return ret;
+ return 1;
+}
+
+/*
* helper to process tree block pointer.
*
* when wc->stage == DROP_REFERENCE, this function checks
@@ -9088,6 +9118,23 @@ skip:
}
/*
+ * If we had a drop_progress we need to verify the refs are set
+ * as expected. If we find our ref then we know that from here
+ * on out everything should be correct, and we can clear the
+ * ->restarted flag.
+ */
+ if (wc->restarted) {
+ ret = check_ref_exists(trans, root, bytenr, parent,
+ level - 1);
+ if (ret < 0)
+ goto out_unlock;
+ if (ret == 0)
+ goto no_delete;
+ ret = 0;
+ wc->restarted = 0;
+ }
+
+ /*
* Reloc tree doesn't contribute to qgroup numbers, and we have
* already accounted them at merge time (replace_path),
* thus we could skip expensive subtree trace here.
@@ -9102,13 +9149,23 @@ skip:
ret);
}
}
+
+ /*
+ * We need to update the next key in our walk control so we can
+ * update the drop_progress key accordingly. We don't care if
+ * find_next_key doesn't find a key because that means we're at
+ * the end and are going to clean up now.
+ */
+ wc->drop_level = level;
+ find_next_key(path, level, &wc->drop_progress);
+
ret = btrfs_free_extent(trans, root, bytenr, fs_info->nodesize,
parent, root->root_key.objectid,
level - 1, 0);
if (ret)
goto out_unlock;
}
-
+no_delete:
*lookup_info = 1;
ret = 1;
@@ -9425,6 +9482,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
}
}
+ wc->restarted = test_bit(BTRFS_ROOT_DEAD_TREE, &root->state);
wc->level = level;
wc->shared_level = -1;
wc->stage = DROP_REFERENCE;
@@ -9452,12 +9510,14 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
}
if (wc->stage == DROP_REFERENCE) {
- level = wc->level;
- btrfs_node_key(path->nodes[level],
- &root_item->drop_progress,
- path->slots[level]);
- root_item->drop_level = level;
- }
+ wc->drop_level = wc->level;
+ btrfs_node_key_to_cpu(path->nodes[wc->drop_level],
+ &wc->drop_progress,
+ path->slots[wc->drop_level]);
+ }
+ btrfs_cpu_key_to_disk(&root_item->drop_progress,
+ &wc->drop_progress);
+ root_item->drop_level = wc->drop_level;
BUG_ON(wc->level == 0);
if (btrfs_should_end_transaction(trans) ||
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index ab705183d749..ca8b8e785cf3 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2995,11 +2995,11 @@ static int __do_readpage(struct extent_io_tree *tree,
*/
if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
prev_em_start && *prev_em_start != (u64)-1 &&
- *prev_em_start != em->orig_start)
+ *prev_em_start != em->start)
force_bio_submit = true;
if (prev_em_start)
- *prev_em_start = em->orig_start;
+ *prev_em_start = em->start;
free_extent_map(em);
em = NULL;
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 920bf3b4b0ef..cccc75d15970 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -7,6 +7,7 @@
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
+#include <linux/sched/mm.h>
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
@@ -427,9 +428,13 @@ blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
unsigned long this_sum_bytes = 0;
int i;
u64 offset;
+ unsigned nofs_flag;
+
+ nofs_flag = memalloc_nofs_save();
+ sums = kvzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
+ GFP_KERNEL);
+ memalloc_nofs_restore(nofs_flag);
- sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
- GFP_NOFS);
if (!sums)
return BLK_STS_RESOURCE;
@@ -472,8 +477,10 @@ blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
bytes_left = bio->bi_iter.bi_size - total_bytes;
- sums = kzalloc(btrfs_ordered_sum_size(fs_info, bytes_left),
- GFP_NOFS);
+ nofs_flag = memalloc_nofs_save();
+ sums = kvzalloc(btrfs_ordered_sum_size(fs_info,
+ bytes_left), GFP_KERNEL);
+ memalloc_nofs_restore(nofs_flag);
BUG_ON(!sums); /* -ENOMEM */
sums->len = bytes_left;
ordered = btrfs_lookup_ordered_extent(inode,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 82fdda8ff5ab..2973608824ec 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -6783,7 +6783,7 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
u64 extent_start = 0;
u64 extent_end = 0;
u64 objectid = btrfs_ino(inode);
- u8 extent_type;
+ int extent_type = -1;
struct btrfs_path *path = NULL;
struct btrfs_root *root = inode->root;
struct btrfs_file_extent_item *item;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 494f0f10d70e..cd4e693406a0 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -501,6 +501,16 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ /*
+ * If the fs is mounted with nologreplay, which requires it to be
+ * mounted in RO mode as well, we can not allow discard on free space
+ * inside block groups, because log trees refer to extents that are not
+ * pinned in a block group's free space cache (pinning the extents is
+ * precisely the first phase of replaying a log tree).
+ */
+ if (btrfs_test_opt(fs_info, NOLOGREPLAY))
+ return -EROFS;
+
rcu_read_lock();
list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
dev_list) {
@@ -3207,21 +3217,6 @@ out:
return ret;
}
-static void btrfs_double_inode_unlock(struct inode *inode1, struct inode *inode2)
-{
- inode_unlock(inode1);
- inode_unlock(inode2);
-}
-
-static void btrfs_double_inode_lock(struct inode *inode1, struct inode *inode2)
-{
- if (inode1 < inode2)
- swap(inode1, inode2);
-
- inode_lock_nested(inode1, I_MUTEX_PARENT);
- inode_lock_nested(inode2, I_MUTEX_CHILD);
-}
-
static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
struct inode *inode2, u64 loff2, u64 len)
{
@@ -3956,7 +3951,7 @@ static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in,
if (same_inode)
inode_lock(inode_in);
else
- btrfs_double_inode_lock(inode_in, inode_out);
+ lock_two_nondirectories(inode_in, inode_out);
/* don't make the dst file partly checksummed */
if ((BTRFS_I(inode_in)->flags & BTRFS_INODE_NODATASUM) !=
@@ -4013,7 +4008,7 @@ static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in,
if (same_inode)
inode_unlock(inode_in);
else
- btrfs_double_inode_unlock(inode_in, inode_out);
+ unlock_two_nondirectories(inode_in, inode_out);
return ret;
}
@@ -4043,7 +4038,7 @@ loff_t btrfs_remap_file_range(struct file *src_file, loff_t off,
if (same_inode)
inode_unlock(src_inode);
else
- btrfs_double_inode_unlock(src_inode, dst_inode);
+ unlock_two_nondirectories(src_inode, dst_inode);
return ret < 0 ? ret : len;
}
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 6fde2b2741ef..45e3cfd1198b 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -6,6 +6,7 @@
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/writeback.h>
+#include <linux/sched/mm.h>
#include "ctree.h"
#include "transaction.h"
#include "btrfs_inode.h"
@@ -442,7 +443,7 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
cur = entry->list.next;
sum = list_entry(cur, struct btrfs_ordered_sum, list);
list_del(&sum->list);
- kfree(sum);
+ kvfree(sum);
}
kmem_cache_free(btrfs_ordered_extent_cache, entry);
}
diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
index dc6140013ae8..61d22a56c0ba 100644
--- a/fs/btrfs/props.c
+++ b/fs/btrfs/props.c
@@ -366,11 +366,11 @@ int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans,
static int prop_compression_validate(const char *value, size_t len)
{
- if (!strncmp("lzo", value, len))
+ if (!strncmp("lzo", value, 3))
return 0;
- else if (!strncmp("zlib", value, len))
+ else if (!strncmp("zlib", value, 4))
return 0;
- else if (!strncmp("zstd", value, len))
+ else if (!strncmp("zstd", value, 4))
return 0;
return -EINVAL;
@@ -396,7 +396,7 @@ static int prop_compression_apply(struct inode *inode,
btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
} else if (!strncmp("zlib", value, 4)) {
type = BTRFS_COMPRESS_ZLIB;
- } else if (!strncmp("zstd", value, len)) {
+ } else if (!strncmp("zstd", value, 4)) {
type = BTRFS_COMPRESS_ZSTD;
btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
} else {
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index c1cd5558a646..e659d9d61107 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -894,6 +894,12 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
if (fs_info->quota_root)
goto out;
+ fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
+ if (!fs_info->qgroup_ulist) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
/*
* 1 for quota root item
* 1 for BTRFS_QGROUP_STATUS item
@@ -909,13 +915,6 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
goto out;
}
- fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
- if (!fs_info->qgroup_ulist) {
- ret = -ENOMEM;
- btrfs_abort_transaction(trans, ret);
- goto out;
- }
-
/*
* initially create the quota tree
*/
@@ -1923,8 +1922,8 @@ static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
int i;
/* Level sanity check */
- if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL ||
- root_level < 0 || root_level >= BTRFS_MAX_LEVEL ||
+ if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 ||
+ root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 ||
root_level < cur_level) {
btrfs_err_rl(fs_info,
"%s: bad levels, cur_level=%d root_level=%d",
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 1869ba8e5981..67a6f7d47402 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -2430,8 +2430,9 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
bitmap_clear(rbio->dbitmap, pagenr, 1);
kunmap(p);
- for (stripe = 0; stripe < rbio->real_stripes; stripe++)
+ for (stripe = 0; stripe < nr_data; stripe++)
kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
+ kunmap(p_page);
}
__free_page(p_page);
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
index d09b6cdb785a..b283d3a6e837 100644
--- a/fs/btrfs/ref-verify.c
+++ b/fs/btrfs/ref-verify.c
@@ -205,28 +205,17 @@ static struct root_entry *lookup_root_entry(struct rb_root *root, u64 objectid)
#ifdef CONFIG_STACKTRACE
static void __save_stack_trace(struct ref_action *ra)
{
- struct stack_trace stack_trace;
-
- stack_trace.max_entries = MAX_TRACE;
- stack_trace.nr_entries = 0;
- stack_trace.entries = ra->trace;
- stack_trace.skip = 2;
- save_stack_trace(&stack_trace);
- ra->trace_len = stack_trace.nr_entries;
+ ra->trace_len = stack_trace_save(ra->trace, MAX_TRACE, 2);
}
static void __print_stack_trace(struct btrfs_fs_info *fs_info,
struct ref_action *ra)
{
- struct stack_trace trace;
-
if (ra->trace_len == 0) {
btrfs_err(fs_info, " ref-verify: no stacktrace");
return;
}
- trace.nr_entries = ra->trace_len;
- trace.entries = ra->trace;
- print_stack_trace(&trace, 2);
+ stack_trace_print(ra->trace, ra->trace_len, 2);
}
#else
static void inline __save_stack_trace(struct ref_action *ra)
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 0d2b957ca3a3..893d12fbfda0 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -263,8 +263,10 @@ int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info)
if (root) {
WARN_ON(!test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
&root->state));
- if (btrfs_root_refs(&root->root_item) == 0)
+ if (btrfs_root_refs(&root->root_item) == 0) {
+ set_bit(BTRFS_ROOT_DEAD_TREE, &root->state);
btrfs_add_dead_root(root);
+ }
continue;
}
@@ -310,8 +312,10 @@ int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info)
break;
}
- if (btrfs_root_refs(&root->root_item) == 0)
+ if (btrfs_root_refs(&root->root_item) == 0) {
+ set_bit(BTRFS_ROOT_DEAD_TREE, &root->state);
btrfs_add_dead_root(root);
+ }
}
btrfs_free_path(path);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index acdad6d658f5..e4e665f422fc 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1886,8 +1886,10 @@ static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
}
}
-static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
+static inline int btrfs_start_delalloc_flush(struct btrfs_trans_handle *trans)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+
/*
* We use writeback_inodes_sb here because if we used
* btrfs_start_delalloc_roots we would deadlock with fs freeze.
@@ -1897,15 +1899,50 @@ static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
* from already being in a transaction and our join_transaction doesn't
* have to re-take the fs freeze lock.
*/
- if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
+ if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) {
writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
+ } else {
+ struct btrfs_pending_snapshot *pending;
+ struct list_head *head = &trans->transaction->pending_snapshots;
+
+ /*
+ * Flush dellaloc for any root that is going to be snapshotted.
+ * This is done to avoid a corrupted version of files, in the
+ * snapshots, that had both buffered and direct IO writes (even
+ * if they were done sequentially) due to an unordered update of
+ * the inode's size on disk.
+ */
+ list_for_each_entry(pending, head, list) {
+ int ret;
+
+ ret = btrfs_start_delalloc_snapshot(pending->root);
+ if (ret)
+ return ret;
+ }
+ }
return 0;
}
-static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
+static inline void btrfs_wait_delalloc_flush(struct btrfs_trans_handle *trans)
{
- if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+
+ if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) {
btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
+ } else {
+ struct btrfs_pending_snapshot *pending;
+ struct list_head *head = &trans->transaction->pending_snapshots;
+
+ /*
+ * Wait for any dellaloc that we started previously for the roots
+ * that are going to be snapshotted. This is to avoid a corrupted
+ * version of files in the snapshots that had both buffered and
+ * direct IO writes (even if they were done sequentially).
+ */
+ list_for_each_entry(pending, head, list)
+ btrfs_wait_ordered_extents(pending->root,
+ U64_MAX, 0, U64_MAX);
+ }
}
int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
@@ -2023,7 +2060,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
extwriter_counter_dec(cur_trans, trans->type);
- ret = btrfs_start_delalloc_flush(fs_info);
+ ret = btrfs_start_delalloc_flush(trans);
if (ret)
goto cleanup_transaction;
@@ -2039,7 +2076,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
if (ret)
goto cleanup_transaction;
- btrfs_wait_delalloc_flush(fs_info);
+ btrfs_wait_delalloc_flush(trans);
btrfs_scrub_pause(fs_info);
/*
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index f06454a55e00..561884f60d35 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -3578,9 +3578,16 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
}
btrfs_release_path(path);
- /* find the first key from this transaction again */
+ /*
+ * Find the first key from this transaction again. See the note for
+ * log_new_dir_dentries, if we're logging a directory recursively we
+ * won't be holding its i_mutex, which means we can modify the directory
+ * while we're logging it. If we remove an entry between our first
+ * search and this search we'll not find the key again and can just
+ * bail.
+ */
ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
- if (WARN_ON(ret != 0))
+ if (ret != 0)
goto done;
/*
@@ -4544,6 +4551,19 @@ static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode,
item = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_item);
*size_ret = btrfs_inode_size(path->nodes[0], item);
+ /*
+ * If the in-memory inode's i_size is smaller then the inode
+ * size stored in the btree, return the inode's i_size, so
+ * that we get a correct inode size after replaying the log
+ * when before a power failure we had a shrinking truncate
+ * followed by addition of a new name (rename / new hard link).
+ * Otherwise return the inode size from the btree, to avoid
+ * data loss when replaying a log due to previously doing a
+ * write that expands the inode's size and logging a new name
+ * immediately after.
+ */
+ if (*size_ret > inode->vfs_inode.i_size)
+ *size_ret = inode->vfs_inode.i_size;
}
btrfs_release_path(path);
@@ -4705,15 +4725,8 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
struct btrfs_file_extent_item);
if (btrfs_file_extent_type(leaf, extent) ==
- BTRFS_FILE_EXTENT_INLINE) {
- len = btrfs_file_extent_ram_bytes(leaf, extent);
- ASSERT(len == i_size ||
- (len == fs_info->sectorsize &&
- btrfs_file_extent_compression(leaf, extent) !=
- BTRFS_COMPRESS_NONE) ||
- (len < i_size && i_size < fs_info->sectorsize));
+ BTRFS_FILE_EXTENT_INLINE)
return 0;
- }
len = btrfs_file_extent_num_bytes(leaf, extent);
/* Last extent goes beyond i_size, no need to log a hole. */
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 9024eee889b9..db934ceae9c1 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -6407,7 +6407,7 @@ static void btrfs_end_bio(struct bio *bio)
if (bio_op(bio) == REQ_OP_WRITE)
btrfs_dev_stat_inc_and_print(dev,
BTRFS_DEV_STAT_WRITE_ERRS);
- else
+ else if (!(bio->bi_opf & REQ_RAHEAD))
btrfs_dev_stat_inc_and_print(dev,
BTRFS_DEV_STAT_READ_ERRS);
if (bio->bi_opf & REQ_PREFLUSH)
diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
index 3e418a3aeb11..6b9e29d050f3 100644
--- a/fs/btrfs/zstd.c
+++ b/fs/btrfs/zstd.c
@@ -195,8 +195,7 @@ static void zstd_cleanup_workspace_manager(void)
struct workspace *workspace;
int i;
- del_timer(&wsm.timer);
-
+ spin_lock(&wsm.lock);
for (i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++) {
while (!list_empty(&wsm.idle_ws[i])) {
workspace = container_of(wsm.idle_ws[i].next,
@@ -206,6 +205,9 @@ static void zstd_cleanup_workspace_manager(void)
wsm.ops->free_workspace(&workspace->list);
}
}
+ spin_unlock(&wsm.lock);
+
+ del_timer_sync(&wsm.timer);
}
/*
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index bba28a5034ba..36a8dc699448 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -148,11 +148,17 @@ void ceph_caps_finalize(struct ceph_mds_client *mdsc)
spin_unlock(&mdsc->caps_list_lock);
}
-void ceph_adjust_min_caps(struct ceph_mds_client *mdsc, int delta)
+void ceph_adjust_caps_max_min(struct ceph_mds_client *mdsc,
+ struct ceph_mount_options *fsopt)
{
spin_lock(&mdsc->caps_list_lock);
- mdsc->caps_min_count += delta;
- BUG_ON(mdsc->caps_min_count < 0);
+ mdsc->caps_min_count = fsopt->max_readdir;
+ if (mdsc->caps_min_count < 1024)
+ mdsc->caps_min_count = 1024;
+ mdsc->caps_use_max = fsopt->caps_max;
+ if (mdsc->caps_use_max > 0 &&
+ mdsc->caps_use_max < mdsc->caps_min_count)
+ mdsc->caps_use_max = mdsc->caps_min_count;
spin_unlock(&mdsc->caps_list_lock);
}
@@ -272,6 +278,7 @@ int ceph_reserve_caps(struct ceph_mds_client *mdsc,
if (!err) {
BUG_ON(have + alloc != need);
ctx->count = need;
+ ctx->used = 0;
}
spin_lock(&mdsc->caps_list_lock);
@@ -295,13 +302,24 @@ int ceph_reserve_caps(struct ceph_mds_client *mdsc,
}
void ceph_unreserve_caps(struct ceph_mds_client *mdsc,
- struct ceph_cap_reservation *ctx)
+ struct ceph_cap_reservation *ctx)
{
+ bool reclaim = false;
+ if (!ctx->count)
+ return;
+
dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count);
spin_lock(&mdsc->caps_list_lock);
__ceph_unreserve_caps(mdsc, ctx->count);
ctx->count = 0;
+
+ if (mdsc->caps_use_max > 0 &&
+ mdsc->caps_use_count > mdsc->caps_use_max)
+ reclaim = true;
spin_unlock(&mdsc->caps_list_lock);
+
+ if (reclaim)
+ ceph_reclaim_caps_nr(mdsc, ctx->used);
}
struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc,
@@ -346,6 +364,7 @@ struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc,
BUG_ON(list_empty(&mdsc->caps_list));
ctx->count--;
+ ctx->used++;
mdsc->caps_reserve_count--;
mdsc->caps_use_count++;
@@ -500,12 +519,12 @@ static void __insert_cap_node(struct ceph_inode_info *ci,
static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
struct ceph_inode_info *ci)
{
- struct ceph_mount_options *ma = mdsc->fsc->mount_options;
+ struct ceph_mount_options *opt = mdsc->fsc->mount_options;
ci->i_hold_caps_min = round_jiffies(jiffies +
- ma->caps_wanted_delay_min * HZ);
+ opt->caps_wanted_delay_min * HZ);
ci->i_hold_caps_max = round_jiffies(jiffies +
- ma->caps_wanted_delay_max * HZ);
+ opt->caps_wanted_delay_max * HZ);
dout("__cap_set_timeouts %p min %lu max %lu\n", &ci->vfs_inode,
ci->i_hold_caps_min - jiffies, ci->i_hold_caps_max - jiffies);
}
@@ -657,6 +676,10 @@ void ceph_add_cap(struct inode *inode,
session->s_nr_caps++;
spin_unlock(&session->s_cap_lock);
} else {
+ spin_lock(&session->s_cap_lock);
+ list_move_tail(&cap->session_caps, &session->s_caps);
+ spin_unlock(&session->s_cap_lock);
+
if (cap->cap_gen < session->s_cap_gen)
cap->issued = cap->implemented = CEPH_CAP_PIN;
@@ -1081,9 +1104,7 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
(!session->s_cap_reconnect || cap->cap_gen == session->s_cap_gen)) {
cap->queue_release = 1;
if (removed) {
- list_add_tail(&cap->session_caps,
- &session->s_cap_releases);
- session->s_num_cap_releases++;
+ __ceph_queue_cap_release(session, cap);
removed = 0;
}
} else {
@@ -1245,7 +1266,7 @@ static int send_cap_msg(struct cap_msg_args *arg)
* Queue cap releases when an inode is dropped from our cache. Since
* inode is about to be destroyed, there is no need for i_ceph_lock.
*/
-void ceph_queue_caps_release(struct inode *inode)
+void __ceph_remove_caps(struct inode *inode)
{
struct ceph_inode_info *ci = ceph_inode(inode);
struct rb_node *p;
@@ -2393,6 +2414,12 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
if ((cap->issued & ci->i_flushing_caps) !=
ci->i_flushing_caps) {
ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
+ /* encode_caps_cb() also will reset these sequence
+ * numbers. make sure sequence numbers in cap flush
+ * message match later reconnect message */
+ cap->seq = 0;
+ cap->issue_seq = 0;
+ cap->mseq = 0;
__kick_flushing_caps(mdsc, session, ci,
oldest_flush_tid);
} else {
@@ -3880,12 +3907,10 @@ void ceph_handle_caps(struct ceph_mds_session *session,
cap->seq = seq;
cap->issue_seq = seq;
spin_lock(&session->s_cap_lock);
- list_add_tail(&cap->session_caps,
- &session->s_cap_releases);
- session->s_num_cap_releases++;
+ __ceph_queue_cap_release(session, cap);
spin_unlock(&session->s_cap_lock);
}
- goto flush_cap_releases;
+ goto done;
}
/* these will work even if we don't have a cap yet */
@@ -3955,7 +3980,12 @@ void ceph_handle_caps(struct ceph_mds_session *session,
ceph_cap_op_name(op));
}
- goto done;
+done:
+ mutex_unlock(&session->s_mutex);
+done_unlocked:
+ iput(inode);
+ ceph_put_string(extra_info.pool_ns);
+ return;
flush_cap_releases:
/*
@@ -3963,14 +3993,8 @@ flush_cap_releases:
* along for the mds (who clearly thinks we still have this
* cap).
*/
- ceph_send_cap_releases(mdsc, session);
-
-done:
- mutex_unlock(&session->s_mutex);
-done_unlocked:
- iput(inode);
- ceph_put_string(extra_info.pool_ns);
- return;
+ ceph_flush_cap_releases(mdsc, session);
+ goto done;
bad:
pr_err("ceph_handle_caps: corrupt message\n");
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index abdf98deeec4..98365e74cb4a 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -139,23 +139,6 @@ static int caps_show(struct seq_file *s, void *p)
return 0;
}
-static int dentry_lru_show(struct seq_file *s, void *ptr)
-{
- struct ceph_fs_client *fsc = s->private;
- struct ceph_mds_client *mdsc = fsc->mdsc;
- struct ceph_dentry_info *di;
-
- spin_lock(&mdsc->dentry_lru_lock);
- list_for_each_entry(di, &mdsc->dentry_lru, lru) {
- struct dentry *dentry = di->dentry;
- seq_printf(s, "%p %p\t%pd\n",
- di, dentry, dentry);
- }
- spin_unlock(&mdsc->dentry_lru_lock);
-
- return 0;
-}
-
static int mds_sessions_show(struct seq_file *s, void *ptr)
{
struct ceph_fs_client *fsc = s->private;
@@ -195,7 +178,6 @@ static int mds_sessions_show(struct seq_file *s, void *ptr)
CEPH_DEFINE_SHOW_FUNC(mdsmap_show)
CEPH_DEFINE_SHOW_FUNC(mdsc_show)
CEPH_DEFINE_SHOW_FUNC(caps_show)
-CEPH_DEFINE_SHOW_FUNC(dentry_lru_show)
CEPH_DEFINE_SHOW_FUNC(mds_sessions_show)
@@ -231,7 +213,6 @@ void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc)
debugfs_remove(fsc->debugfs_mds_sessions);
debugfs_remove(fsc->debugfs_caps);
debugfs_remove(fsc->debugfs_mdsc);
- debugfs_remove(fsc->debugfs_dentry_lru);
}
int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
@@ -291,14 +272,6 @@ int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
if (!fsc->debugfs_caps)
goto out;
- fsc->debugfs_dentry_lru = debugfs_create_file("dentry_lru",
- 0400,
- fsc->client->debugfs_dir,
- fsc,
- &dentry_lru_show_fops);
- if (!fsc->debugfs_dentry_lru)
- goto out;
-
return 0;
out:
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 82928cea0209..0637149fb9f9 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -29,6 +29,9 @@
const struct dentry_operations ceph_dentry_ops;
+static bool __dentry_lease_is_valid(struct ceph_dentry_info *di);
+static int __dir_lease_try_check(const struct dentry *dentry);
+
/*
* Initialize ceph dentry state.
*/
@@ -44,7 +47,7 @@ static int ceph_d_init(struct dentry *dentry)
di->lease_session = NULL;
di->time = jiffies;
dentry->d_fsdata = di;
- ceph_dentry_lru_add(dentry);
+ INIT_LIST_HEAD(&di->lease_list);
return 0;
}
@@ -241,6 +244,7 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
goto out;
}
if (fpos_cmp(ctx->pos, di->offset) <= 0) {
+ __ceph_dentry_dir_lease_touch(di);
emit_dentry = true;
}
spin_unlock(&dentry->d_lock);
@@ -1125,13 +1129,277 @@ static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
}
/*
+ * Move dentry to tail of mdsc->dentry_leases list when lease is updated.
+ * Leases at front of the list will expire first. (Assume all leases have
+ * similar duration)
+ *
+ * Called under dentry->d_lock.
+ */
+void __ceph_dentry_lease_touch(struct ceph_dentry_info *di)
+{
+ struct dentry *dn = di->dentry;
+ struct ceph_mds_client *mdsc;
+
+ dout("dentry_lease_touch %p %p '%pd'\n", di, dn, dn);
+
+ di->flags |= CEPH_DENTRY_LEASE_LIST;
+ if (di->flags & CEPH_DENTRY_SHRINK_LIST) {
+ di->flags |= CEPH_DENTRY_REFERENCED;
+ return;
+ }
+
+ mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
+ spin_lock(&mdsc->dentry_list_lock);
+ list_move_tail(&di->lease_list, &mdsc->dentry_leases);
+ spin_unlock(&mdsc->dentry_list_lock);
+}
+
+static void __dentry_dir_lease_touch(struct ceph_mds_client* mdsc,
+ struct ceph_dentry_info *di)
+{
+ di->flags &= ~(CEPH_DENTRY_LEASE_LIST | CEPH_DENTRY_REFERENCED);
+ di->lease_gen = 0;
+ di->time = jiffies;
+ list_move_tail(&di->lease_list, &mdsc->dentry_dir_leases);
+}
+
+/*
+ * When dir lease is used, add dentry to tail of mdsc->dentry_dir_leases
+ * list if it's not in the list, otherwise set 'referenced' flag.
+ *
+ * Called under dentry->d_lock.
+ */
+void __ceph_dentry_dir_lease_touch(struct ceph_dentry_info *di)
+{
+ struct dentry *dn = di->dentry;
+ struct ceph_mds_client *mdsc;
+
+ dout("dentry_dir_lease_touch %p %p '%pd' (offset %lld)\n",
+ di, dn, dn, di->offset);
+
+ if (!list_empty(&di->lease_list)) {
+ if (di->flags & CEPH_DENTRY_LEASE_LIST) {
+ /* don't remove dentry from dentry lease list
+ * if its lease is valid */
+ if (__dentry_lease_is_valid(di))
+ return;
+ } else {
+ di->flags |= CEPH_DENTRY_REFERENCED;
+ return;
+ }
+ }
+
+ if (di->flags & CEPH_DENTRY_SHRINK_LIST) {
+ di->flags |= CEPH_DENTRY_REFERENCED;
+ di->flags &= ~CEPH_DENTRY_LEASE_LIST;
+ return;
+ }
+
+ mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
+ spin_lock(&mdsc->dentry_list_lock);
+ __dentry_dir_lease_touch(mdsc, di),
+ spin_unlock(&mdsc->dentry_list_lock);
+}
+
+static void __dentry_lease_unlist(struct ceph_dentry_info *di)
+{
+ struct ceph_mds_client *mdsc;
+ if (di->flags & CEPH_DENTRY_SHRINK_LIST)
+ return;
+ if (list_empty(&di->lease_list))
+ return;
+
+ mdsc = ceph_sb_to_client(di->dentry->d_sb)->mdsc;
+ spin_lock(&mdsc->dentry_list_lock);
+ list_del_init(&di->lease_list);
+ spin_unlock(&mdsc->dentry_list_lock);
+}
+
+enum {
+ KEEP = 0,
+ DELETE = 1,
+ TOUCH = 2,
+ STOP = 4,
+};
+
+struct ceph_lease_walk_control {
+ bool dir_lease;
+ bool expire_dir_lease;
+ unsigned long nr_to_scan;
+ unsigned long dir_lease_ttl;
+};
+
+static unsigned long
+__dentry_leases_walk(struct ceph_mds_client *mdsc,
+ struct ceph_lease_walk_control *lwc,
+ int (*check)(struct dentry*, void*))
+{
+ struct ceph_dentry_info *di, *tmp;
+ struct dentry *dentry, *last = NULL;
+ struct list_head* list;
+ LIST_HEAD(dispose);
+ unsigned long freed = 0;
+ int ret = 0;
+
+ list = lwc->dir_lease ? &mdsc->dentry_dir_leases : &mdsc->dentry_leases;
+ spin_lock(&mdsc->dentry_list_lock);
+ list_for_each_entry_safe(di, tmp, list, lease_list) {
+ if (!lwc->nr_to_scan)
+ break;
+ --lwc->nr_to_scan;
+
+ dentry = di->dentry;
+ if (last == dentry)
+ break;
+
+ if (!spin_trylock(&dentry->d_lock))
+ continue;
+
+ if (dentry->d_lockref.count < 0) {
+ list_del_init(&di->lease_list);
+ goto next;
+ }
+
+ ret = check(dentry, lwc);
+ if (ret & TOUCH) {
+ /* move it into tail of dir lease list */
+ __dentry_dir_lease_touch(mdsc, di);
+ if (!last)
+ last = dentry;
+ }
+ if (ret & DELETE) {
+ /* stale lease */
+ di->flags &= ~CEPH_DENTRY_REFERENCED;
+ if (dentry->d_lockref.count > 0) {
+ /* update_dentry_lease() will re-add
+ * it to lease list, or
+ * ceph_d_delete() will return 1 when
+ * last reference is dropped */
+ list_del_init(&di->lease_list);
+ } else {
+ di->flags |= CEPH_DENTRY_SHRINK_LIST;
+ list_move_tail(&di->lease_list, &dispose);
+ dget_dlock(dentry);
+ }
+ }
+next:
+ spin_unlock(&dentry->d_lock);
+ if (ret & STOP)
+ break;
+ }
+ spin_unlock(&mdsc->dentry_list_lock);
+
+ while (!list_empty(&dispose)) {
+ di = list_first_entry(&dispose, struct ceph_dentry_info,
+ lease_list);
+ dentry = di->dentry;
+ spin_lock(&dentry->d_lock);
+
+ list_del_init(&di->lease_list);
+ di->flags &= ~CEPH_DENTRY_SHRINK_LIST;
+ if (di->flags & CEPH_DENTRY_REFERENCED) {
+ spin_lock(&mdsc->dentry_list_lock);
+ if (di->flags & CEPH_DENTRY_LEASE_LIST) {
+ list_add_tail(&di->lease_list,
+ &mdsc->dentry_leases);
+ } else {
+ __dentry_dir_lease_touch(mdsc, di);
+ }
+ spin_unlock(&mdsc->dentry_list_lock);
+ } else {
+ freed++;
+ }
+
+ spin_unlock(&dentry->d_lock);
+ /* ceph_d_delete() does the trick */
+ dput(dentry);
+ }
+ return freed;
+}
+
+static int __dentry_lease_check(struct dentry *dentry, void *arg)
+{
+ struct ceph_dentry_info *di = ceph_dentry(dentry);
+ int ret;
+
+ if (__dentry_lease_is_valid(di))
+ return STOP;
+ ret = __dir_lease_try_check(dentry);
+ if (ret == -EBUSY)
+ return KEEP;
+ if (ret > 0)
+ return TOUCH;
+ return DELETE;
+}
+
+static int __dir_lease_check(struct dentry *dentry, void *arg)
+{
+ struct ceph_lease_walk_control *lwc = arg;
+ struct ceph_dentry_info *di = ceph_dentry(dentry);
+
+ int ret = __dir_lease_try_check(dentry);
+ if (ret == -EBUSY)
+ return KEEP;
+ if (ret > 0) {
+ if (time_before(jiffies, di->time + lwc->dir_lease_ttl))
+ return STOP;
+ /* Move dentry to tail of dir lease list if we don't want
+ * to delete it. So dentries in the list are checked in a
+ * round robin manner */
+ if (!lwc->expire_dir_lease)
+ return TOUCH;
+ if (dentry->d_lockref.count > 0 ||
+ (di->flags & CEPH_DENTRY_REFERENCED))
+ return TOUCH;
+ /* invalidate dir lease */
+ di->lease_shared_gen = 0;
+ }
+ return DELETE;
+}
+
+int ceph_trim_dentries(struct ceph_mds_client *mdsc)
+{
+ struct ceph_lease_walk_control lwc;
+ unsigned long count;
+ unsigned long freed;
+
+ spin_lock(&mdsc->caps_list_lock);
+ if (mdsc->caps_use_max > 0 &&
+ mdsc->caps_use_count > mdsc->caps_use_max)
+ count = mdsc->caps_use_count - mdsc->caps_use_max;
+ else
+ count = 0;
+ spin_unlock(&mdsc->caps_list_lock);
+
+ lwc.dir_lease = false;
+ lwc.nr_to_scan = CEPH_CAPS_PER_RELEASE * 2;
+ freed = __dentry_leases_walk(mdsc, &lwc, __dentry_lease_check);
+ if (!lwc.nr_to_scan) /* more invalid leases */
+ return -EAGAIN;
+
+ if (lwc.nr_to_scan < CEPH_CAPS_PER_RELEASE)
+ lwc.nr_to_scan = CEPH_CAPS_PER_RELEASE;
+
+ lwc.dir_lease = true;
+ lwc.expire_dir_lease = freed < count;
+ lwc.dir_lease_ttl = mdsc->fsc->mount_options->caps_wanted_delay_max * HZ;
+ freed +=__dentry_leases_walk(mdsc, &lwc, __dir_lease_check);
+ if (!lwc.nr_to_scan) /* more to check */
+ return -EAGAIN;
+
+ return freed > 0 ? 1 : 0;
+}
+
+/*
* Ensure a dentry lease will no longer revalidate.
*/
void ceph_invalidate_dentry_lease(struct dentry *dentry)
{
+ struct ceph_dentry_info *di = ceph_dentry(dentry);
spin_lock(&dentry->d_lock);
- ceph_dentry(dentry)->time = jiffies;
- ceph_dentry(dentry)->lease_shared_gen = 0;
+ di->time = jiffies;
+ di->lease_shared_gen = 0;
+ __dentry_lease_unlist(di);
spin_unlock(&dentry->d_lock);
}
@@ -1139,45 +1407,59 @@ void ceph_invalidate_dentry_lease(struct dentry *dentry)
* Check if dentry lease is valid. If not, delete the lease. Try to
* renew if the least is more than half up.
*/
+static bool __dentry_lease_is_valid(struct ceph_dentry_info *di)
+{
+ struct ceph_mds_session *session;
+
+ if (!di->lease_gen)
+ return false;
+
+ session = di->lease_session;
+ if (session) {
+ u32 gen;
+ unsigned long ttl;
+
+ spin_lock(&session->s_gen_ttl_lock);
+ gen = session->s_cap_gen;
+ ttl = session->s_cap_ttl;
+ spin_unlock(&session->s_gen_ttl_lock);
+
+ if (di->lease_gen == gen &&
+ time_before(jiffies, ttl) &&
+ time_before(jiffies, di->time))
+ return true;
+ }
+ di->lease_gen = 0;
+ return false;
+}
+
static int dentry_lease_is_valid(struct dentry *dentry, unsigned int flags,
struct inode *dir)
{
struct ceph_dentry_info *di;
- struct ceph_mds_session *s;
- int valid = 0;
- u32 gen;
- unsigned long ttl;
struct ceph_mds_session *session = NULL;
u32 seq = 0;
+ int valid = 0;
spin_lock(&dentry->d_lock);
di = ceph_dentry(dentry);
- if (di && di->lease_session) {
- s = di->lease_session;
- spin_lock(&s->s_gen_ttl_lock);
- gen = s->s_cap_gen;
- ttl = s->s_cap_ttl;
- spin_unlock(&s->s_gen_ttl_lock);
+ if (di && __dentry_lease_is_valid(di)) {
+ valid = 1;
- if (di->lease_gen == gen &&
- time_before(jiffies, di->time) &&
- time_before(jiffies, ttl)) {
- valid = 1;
- if (di->lease_renew_after &&
- time_after(jiffies, di->lease_renew_after)) {
- /*
- * We should renew. If we're in RCU walk mode
- * though, we can't do that so just return
- * -ECHILD.
- */
- if (flags & LOOKUP_RCU) {
- valid = -ECHILD;
- } else {
- session = ceph_get_mds_session(s);
- seq = di->lease_seq;
- di->lease_renew_after = 0;
- di->lease_renew_from = jiffies;
- }
+ if (di->lease_renew_after &&
+ time_after(jiffies, di->lease_renew_after)) {
+ /*
+ * We should renew. If we're in RCU walk mode
+ * though, we can't do that so just return
+ * -ECHILD.
+ */
+ if (flags & LOOKUP_RCU) {
+ valid = -ECHILD;
+ } else {
+ session = ceph_get_mds_session(di->lease_session);
+ seq = di->lease_seq;
+ di->lease_renew_after = 0;
+ di->lease_renew_from = jiffies;
}
}
}
@@ -1193,6 +1475,38 @@ static int dentry_lease_is_valid(struct dentry *dentry, unsigned int flags,
}
/*
+ * Called under dentry->d_lock.
+ */
+static int __dir_lease_try_check(const struct dentry *dentry)
+{
+ struct ceph_dentry_info *di = ceph_dentry(dentry);
+ struct inode *dir;
+ struct ceph_inode_info *ci;
+ int valid = 0;
+
+ if (!di->lease_shared_gen)
+ return 0;
+ if (IS_ROOT(dentry))
+ return 0;
+
+ dir = d_inode(dentry->d_parent);
+ ci = ceph_inode(dir);
+
+ if (spin_trylock(&ci->i_ceph_lock)) {
+ if (atomic_read(&ci->i_shared_gen) == di->lease_shared_gen &&
+ __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 0))
+ valid = 1;
+ spin_unlock(&ci->i_ceph_lock);
+ } else {
+ valid = -EBUSY;
+ }
+
+ if (!valid)
+ di->lease_shared_gen = 0;
+ return valid;
+}
+
+/*
* Check if directory-wide content lease/cap is valid.
*/
static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
@@ -1205,6 +1519,8 @@ static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
if (atomic_read(&ci->i_shared_gen) == di->lease_shared_gen)
valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
spin_unlock(&ci->i_ceph_lock);
+ if (valid)
+ __ceph_dentry_dir_lease_touch(di);
dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
dir, (unsigned)atomic_read(&ci->i_shared_gen),
dentry, (unsigned)di->lease_shared_gen, valid);
@@ -1297,11 +1613,8 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
}
dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid");
- if (valid) {
- ceph_dentry_lru_touch(dentry);
- } else {
+ if (!valid)
ceph_dir_clear_complete(dir);
- }
if (!(flags & LOOKUP_RCU))
dput(parent);
@@ -1309,6 +1622,31 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
}
/*
+ * Delete unused dentry that doesn't have valid lease
+ *
+ * Called under dentry->d_lock.
+ */
+static int ceph_d_delete(const struct dentry *dentry)
+{
+ struct ceph_dentry_info *di;
+
+ /* won't release caps */
+ if (d_really_is_negative(dentry))
+ return 0;
+ if (ceph_snap(d_inode(dentry)) != CEPH_NOSNAP)
+ return 0;
+ /* vaild lease? */
+ di = ceph_dentry(dentry);
+ if (di) {
+ if (__dentry_lease_is_valid(di))
+ return 0;
+ if (__dir_lease_try_check(dentry))
+ return 0;
+ }
+ return 1;
+}
+
+/*
* Release our ceph_dentry_info.
*/
static void ceph_d_release(struct dentry *dentry)
@@ -1316,9 +1654,9 @@ static void ceph_d_release(struct dentry *dentry)
struct ceph_dentry_info *di = ceph_dentry(dentry);
dout("d_release %p\n", dentry);
- ceph_dentry_lru_del(dentry);
spin_lock(&dentry->d_lock);
+ __dentry_lease_unlist(di);
dentry->d_fsdata = NULL;
spin_unlock(&dentry->d_lock);
@@ -1419,49 +1757,7 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
return size - left;
}
-/*
- * We maintain a private dentry LRU.
- *
- * FIXME: this needs to be changed to a per-mds lru to be useful.
- */
-void ceph_dentry_lru_add(struct dentry *dn)
-{
- struct ceph_dentry_info *di = ceph_dentry(dn);
- struct ceph_mds_client *mdsc;
-
- dout("dentry_lru_add %p %p '%pd'\n", di, dn, dn);
- mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
- spin_lock(&mdsc->dentry_lru_lock);
- list_add_tail(&di->lru, &mdsc->dentry_lru);
- mdsc->num_dentry++;
- spin_unlock(&mdsc->dentry_lru_lock);
-}
-
-void ceph_dentry_lru_touch(struct dentry *dn)
-{
- struct ceph_dentry_info *di = ceph_dentry(dn);
- struct ceph_mds_client *mdsc;
-
- dout("dentry_lru_touch %p %p '%pd' (offset %lld)\n", di, dn, dn,
- di->offset);
- mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
- spin_lock(&mdsc->dentry_lru_lock);
- list_move_tail(&di->lru, &mdsc->dentry_lru);
- spin_unlock(&mdsc->dentry_lru_lock);
-}
-void ceph_dentry_lru_del(struct dentry *dn)
-{
- struct ceph_dentry_info *di = ceph_dentry(dn);
- struct ceph_mds_client *mdsc;
-
- dout("dentry_lru_del %p %p '%pd'\n", di, dn, dn);
- mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
- spin_lock(&mdsc->dentry_lru_lock);
- list_del_init(&di->lru);
- mdsc->num_dentry--;
- spin_unlock(&mdsc->dentry_lru_lock);
-}
/*
* Return name hash for a given dentry. This is dependent on
@@ -1470,6 +1766,7 @@ void ceph_dentry_lru_del(struct dentry *dn)
unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
{
struct ceph_inode_info *dci = ceph_inode(dir);
+ unsigned hash;
switch (dci->i_dir_layout.dl_dir_hash) {
case 0: /* for backward compat */
@@ -1477,8 +1774,11 @@ unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
return dn->d_name.hash;
default:
- return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
+ spin_lock(&dn->d_lock);
+ hash = ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
dn->d_name.name, dn->d_name.len);
+ spin_unlock(&dn->d_lock);
+ return hash;
}
}
@@ -1531,6 +1831,7 @@ const struct inode_operations ceph_snapdir_iops = {
const struct dentry_operations ceph_dentry_ops = {
.d_revalidate = ceph_d_revalidate,
+ .d_delete = ceph_d_delete,
.d_release = ceph_d_release,
.d_prune = ceph_d_prune,
.d_init = ceph_d_init,
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 189df668b6a0..9f53c3d99304 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -590,7 +590,8 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
* but it will at least behave sensibly when they are
* in sequence.
*/
- ret = filemap_write_and_wait_range(inode->i_mapping, off, off + len);
+ ret = filemap_write_and_wait_range(inode->i_mapping,
+ off, off + len - 1);
if (ret < 0)
return ret;
@@ -929,14 +930,15 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
(write ? "write" : "read"), file, pos, (unsigned)count,
snapc, snapc->seq);
- ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
+ ret = filemap_write_and_wait_range(inode->i_mapping,
+ pos, pos + count - 1);
if (ret < 0)
return ret;
if (write) {
int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
pos >> PAGE_SHIFT,
- (pos + count) >> PAGE_SHIFT);
+ (pos + count - 1) >> PAGE_SHIFT);
if (ret2 < 0)
dout("invalidate_inode_pages2_range returned %d\n", ret2);
@@ -1132,13 +1134,14 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
file, pos, (unsigned)count, snapc, snapc->seq);
- ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
+ ret = filemap_write_and_wait_range(inode->i_mapping,
+ pos, pos + count - 1);
if (ret < 0)
return ret;
ret = invalidate_inode_pages2_range(inode->i_mapping,
pos >> PAGE_SHIFT,
- (pos + count) >> PAGE_SHIFT);
+ (pos + count - 1) >> PAGE_SHIFT);
if (ret < 0)
dout("invalidate_inode_pages2_range returned %d\n", ret);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 9d1f34d46627..c2feb310ac1e 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -497,7 +497,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
ci->i_wrbuffer_ref = 0;
ci->i_wrbuffer_ref_head = 0;
atomic_set(&ci->i_filelock_ref, 0);
- atomic_set(&ci->i_shared_gen, 0);
+ atomic_set(&ci->i_shared_gen, 1);
ci->i_rdcache_gen = 0;
ci->i_rdcache_revoking = 0;
@@ -524,6 +524,7 @@ static void ceph_i_callback(struct rcu_head *head)
struct inode *inode = container_of(head, struct inode, i_rcu);
struct ceph_inode_info *ci = ceph_inode(inode);
+ kfree(ci->i_symlink);
kmem_cache_free(ceph_inode_cachep, ci);
}
@@ -537,7 +538,7 @@ void ceph_destroy_inode(struct inode *inode)
ceph_fscache_unregister_inode_cookie(ci);
- ceph_queue_caps_release(inode);
+ __ceph_remove_caps(inode);
if (__ceph_has_any_quota(ci))
ceph_adjust_quota_realms_count(inode, false);
@@ -548,20 +549,24 @@ void ceph_destroy_inode(struct inode *inode)
*/
if (ci->i_snap_realm) {
struct ceph_mds_client *mdsc =
- ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
- struct ceph_snap_realm *realm = ci->i_snap_realm;
-
- dout(" dropping residual ref to snap realm %p\n", realm);
- spin_lock(&realm->inodes_with_caps_lock);
- list_del_init(&ci->i_snap_realm_item);
- ci->i_snap_realm = NULL;
- if (realm->ino == ci->i_vino.ino)
- realm->inode = NULL;
- spin_unlock(&realm->inodes_with_caps_lock);
- ceph_put_snap_realm(mdsc, realm);
+ ceph_inode_to_client(inode)->mdsc;
+ if (ceph_snap(inode) == CEPH_NOSNAP) {
+ struct ceph_snap_realm *realm = ci->i_snap_realm;
+ dout(" dropping residual ref to snap realm %p\n",
+ realm);
+ spin_lock(&realm->inodes_with_caps_lock);
+ list_del_init(&ci->i_snap_realm_item);
+ ci->i_snap_realm = NULL;
+ if (realm->ino == ci->i_vino.ino)
+ realm->inode = NULL;
+ spin_unlock(&realm->inodes_with_caps_lock);
+ ceph_put_snap_realm(mdsc, realm);
+ } else {
+ ceph_put_snapid_map(mdsc, ci->i_snapid_map);
+ ci->i_snap_realm = NULL;
+ }
}
- kfree(ci->i_symlink);
while ((n = rb_first(&ci->i_fragtree)) != NULL) {
frag = rb_entry(n, struct ceph_inode_frag, node);
rb_erase(n, &ci->i_fragtree);
@@ -776,6 +781,9 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
pool_ns = ceph_find_or_create_string(iinfo->pool_ns_data,
iinfo->pool_ns_len);
+ if (ceph_snap(inode) != CEPH_NOSNAP && !ci->i_snapid_map)
+ ci->i_snapid_map = ceph_get_snapid_map(mdsc, ceph_snap(inode));
+
spin_lock(&ci->i_ceph_lock);
/*
@@ -869,6 +877,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
ci->i_rbytes = le64_to_cpu(info->rbytes);
ci->i_rfiles = le64_to_cpu(info->rfiles);
ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
+ ci->i_dir_pin = iinfo->dir_pin;
ceph_decode_timespec64(&ci->i_rctime, &info->rctime);
}
}
@@ -899,6 +908,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
case S_IFBLK:
case S_IFCHR:
case S_IFSOCK:
+ inode->i_blkbits = PAGE_SHIFT;
init_special_inode(inode, inode->i_mode, inode->i_rdev);
inode->i_op = &ceph_file_iops;
break;
@@ -1066,9 +1076,10 @@ static void update_dentry_lease(struct dentry *dentry,
goto out_unlock;
di->lease_shared_gen = atomic_read(&ceph_inode(dir)->i_shared_gen);
-
- if (duration == 0)
+ if (duration == 0) {
+ __ceph_dentry_dir_lease_touch(di);
goto out_unlock;
+ }
if (di->lease_gen == session->s_cap_gen &&
time_before(ttl, di->time))
@@ -1079,8 +1090,6 @@ static void update_dentry_lease(struct dentry *dentry,
di->lease_session = NULL;
}
- ceph_dentry_lru_touch(dentry);
-
if (!di->lease_session)
di->lease_session = ceph_get_mds_session(session);
di->lease_gen = session->s_cap_gen;
@@ -1088,6 +1097,8 @@ static void update_dentry_lease(struct dentry *dentry,
di->lease_renew_after = half_ttl;
di->lease_renew_from = 0;
di->time = ttl;
+
+ __ceph_dentry_lease_touch(di);
out_unlock:
spin_unlock(&dentry->d_lock);
if (old_lease_session)
@@ -1152,6 +1163,19 @@ static int splice_dentry(struct dentry **pdn, struct inode *in)
return 0;
}
+static int d_name_cmp(struct dentry *dentry, const char *name, size_t len)
+{
+ int ret;
+
+ /* take d_lock to ensure dentry->d_name stability */
+ spin_lock(&dentry->d_lock);
+ ret = dentry->d_name.len - len;
+ if (!ret)
+ ret = memcmp(dentry->d_name.name, name, len);
+ spin_unlock(&dentry->d_lock);
+ return ret;
+}
+
/*
* Incorporate results into the local cache. This is either just
* one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
@@ -1401,7 +1425,8 @@ retry_lookup:
err = splice_dentry(&req->r_dentry, in);
if (err < 0)
goto done;
- } else if (rinfo->head->is_dentry) {
+ } else if (rinfo->head->is_dentry &&
+ !d_name_cmp(req->r_dentry, rinfo->dname, rinfo->dname_len)) {
struct ceph_vino *ptvino = NULL;
if ((le32_to_cpu(rinfo->diri.in->cap.caps) & CEPH_CAP_FILE_SHARED) ||
@@ -2259,10 +2284,11 @@ int ceph_getattr(const struct path *path, struct kstat *stat,
if (!err) {
generic_fillattr(inode, stat);
stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino);
- if (ceph_snap(inode) != CEPH_NOSNAP)
- stat->dev = ceph_snap(inode);
+ if (ceph_snap(inode) == CEPH_NOSNAP)
+ stat->dev = inode->i_sb->s_dev;
else
- stat->dev = 0;
+ stat->dev = ci->i_snapid_map ? ci->i_snapid_map->dev : 0;
+
if (S_ISDIR(inode->i_mode)) {
if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
RBYTES))
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 163fc74bf221..9049c2a3e972 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -20,6 +20,8 @@
#include <linux/ceph/auth.h>
#include <linux/ceph/debugfs.h>
+#define RECONNECT_MAX_SIZE (INT_MAX - PAGE_SIZE)
+
/*
* A cluster of MDS (metadata server) daemons is responsible for
* managing the file system namespace (the directory hierarchy and
@@ -46,13 +48,17 @@
*/
struct ceph_reconnect_state {
- int nr_caps;
+ struct ceph_mds_session *session;
+ int nr_caps, nr_realms;
struct ceph_pagelist *pagelist;
unsigned msg_version;
+ bool allow_multi;
};
static void __wake_requests(struct ceph_mds_client *mdsc,
struct list_head *head);
+static void ceph_cap_release_work(struct work_struct *work);
+static void ceph_cap_reclaim_work(struct work_struct *work);
static const struct ceph_connection_operations mds_con_ops;
@@ -61,6 +67,29 @@ static const struct ceph_connection_operations mds_con_ops;
* mds reply parsing
*/
+static int parse_reply_info_quota(void **p, void *end,
+ struct ceph_mds_reply_info_in *info)
+{
+ u8 struct_v, struct_compat;
+ u32 struct_len;
+
+ ceph_decode_8_safe(p, end, struct_v, bad);
+ ceph_decode_8_safe(p, end, struct_compat, bad);
+ /* struct_v is expected to be >= 1. we only
+ * understand encoding with struct_compat == 1. */
+ if (!struct_v || struct_compat != 1)
+ goto bad;
+ ceph_decode_32_safe(p, end, struct_len, bad);
+ ceph_decode_need(p, end, struct_len, bad);
+ end = *p + struct_len;
+ ceph_decode_64_safe(p, end, info->max_bytes, bad);
+ ceph_decode_64_safe(p, end, info->max_files, bad);
+ *p = end;
+ return 0;
+bad:
+ return -EIO;
+}
+
/*
* parse individual inode info
*/
@@ -68,8 +97,24 @@ static int parse_reply_info_in(void **p, void *end,
struct ceph_mds_reply_info_in *info,
u64 features)
{
- int err = -EIO;
+ int err = 0;
+ u8 struct_v = 0;
+ if (features == (u64)-1) {
+ u32 struct_len;
+ u8 struct_compat;
+ ceph_decode_8_safe(p, end, struct_v, bad);
+ ceph_decode_8_safe(p, end, struct_compat, bad);
+ /* struct_v is expected to be >= 1. we only understand
+ * encoding with struct_compat == 1. */
+ if (!struct_v || struct_compat != 1)
+ goto bad;
+ ceph_decode_32_safe(p, end, struct_len, bad);
+ ceph_decode_need(p, end, struct_len, bad);
+ end = *p + struct_len;
+ }
+
+ ceph_decode_need(p, end, sizeof(struct ceph_mds_reply_inode), bad);
info->in = *p;
*p += sizeof(struct ceph_mds_reply_inode) +
sizeof(*info->in->fragtree.splits) *
@@ -87,49 +132,136 @@ static int parse_reply_info_in(void **p, void *end,
info->xattr_data = *p;
*p += info->xattr_len;
- if (features & CEPH_FEATURE_MDS_INLINE_DATA) {
+ if (features == (u64)-1) {
+ /* inline data */
ceph_decode_64_safe(p, end, info->inline_version, bad);
ceph_decode_32_safe(p, end, info->inline_len, bad);
ceph_decode_need(p, end, info->inline_len, bad);
info->inline_data = *p;
*p += info->inline_len;
- } else
- info->inline_version = CEPH_INLINE_NONE;
+ /* quota */
+ err = parse_reply_info_quota(p, end, info);
+ if (err < 0)
+ goto out_bad;
+ /* pool namespace */
+ ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
+ if (info->pool_ns_len > 0) {
+ ceph_decode_need(p, end, info->pool_ns_len, bad);
+ info->pool_ns_data = *p;
+ *p += info->pool_ns_len;
+ }
+ /* btime, change_attr */
+ {
+ struct ceph_timespec btime;
+ u64 change_attr;
+ ceph_decode_need(p, end, sizeof(btime), bad);
+ ceph_decode_copy(p, &btime, sizeof(btime));
+ ceph_decode_64_safe(p, end, change_attr, bad);
+ }
+
+ /* dir pin */
+ if (struct_v >= 2) {
+ ceph_decode_32_safe(p, end, info->dir_pin, bad);
+ } else {
+ info->dir_pin = -ENODATA;
+ }
+
+ *p = end;
+ } else {
+ if (features & CEPH_FEATURE_MDS_INLINE_DATA) {
+ ceph_decode_64_safe(p, end, info->inline_version, bad);
+ ceph_decode_32_safe(p, end, info->inline_len, bad);
+ ceph_decode_need(p, end, info->inline_len, bad);
+ info->inline_data = *p;
+ *p += info->inline_len;
+ } else
+ info->inline_version = CEPH_INLINE_NONE;
+
+ if (features & CEPH_FEATURE_MDS_QUOTA) {
+ err = parse_reply_info_quota(p, end, info);
+ if (err < 0)
+ goto out_bad;
+ } else {
+ info->max_bytes = 0;
+ info->max_files = 0;
+ }
+
+ info->pool_ns_len = 0;
+ info->pool_ns_data = NULL;
+ if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) {
+ ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
+ if (info->pool_ns_len > 0) {
+ ceph_decode_need(p, end, info->pool_ns_len, bad);
+ info->pool_ns_data = *p;
+ *p += info->pool_ns_len;
+ }
+ }
+
+ info->dir_pin = -ENODATA;
+ }
+ return 0;
+bad:
+ err = -EIO;
+out_bad:
+ return err;
+}
- if (features & CEPH_FEATURE_MDS_QUOTA) {
+static int parse_reply_info_dir(void **p, void *end,
+ struct ceph_mds_reply_dirfrag **dirfrag,
+ u64 features)
+{
+ if (features == (u64)-1) {
u8 struct_v, struct_compat;
u32 struct_len;
-
- /*
- * both struct_v and struct_compat are expected to be >= 1
- */
ceph_decode_8_safe(p, end, struct_v, bad);
ceph_decode_8_safe(p, end, struct_compat, bad);
- if (!struct_v || !struct_compat)
+ /* struct_v is expected to be >= 1. we only understand
+ * encoding whose struct_compat == 1. */
+ if (!struct_v || struct_compat != 1)
goto bad;
ceph_decode_32_safe(p, end, struct_len, bad);
ceph_decode_need(p, end, struct_len, bad);
- ceph_decode_64_safe(p, end, info->max_bytes, bad);
- ceph_decode_64_safe(p, end, info->max_files, bad);
- } else {
- info->max_bytes = 0;
- info->max_files = 0;
+ end = *p + struct_len;
}
- info->pool_ns_len = 0;
- info->pool_ns_data = NULL;
- if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) {
- ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
- if (info->pool_ns_len > 0) {
- ceph_decode_need(p, end, info->pool_ns_len, bad);
- info->pool_ns_data = *p;
- *p += info->pool_ns_len;
- }
+ ceph_decode_need(p, end, sizeof(**dirfrag), bad);
+ *dirfrag = *p;
+ *p += sizeof(**dirfrag) + sizeof(u32) * le32_to_cpu((*dirfrag)->ndist);
+ if (unlikely(*p > end))
+ goto bad;
+ if (features == (u64)-1)
+ *p = end;
+ return 0;
+bad:
+ return -EIO;
+}
+
+static int parse_reply_info_lease(void **p, void *end,
+ struct ceph_mds_reply_lease **lease,
+ u64 features)
+{
+ if (features == (u64)-1) {
+ u8 struct_v, struct_compat;
+ u32 struct_len;
+ ceph_decode_8_safe(p, end, struct_v, bad);
+ ceph_decode_8_safe(p, end, struct_compat, bad);
+ /* struct_v is expected to be >= 1. we only understand
+ * encoding whose struct_compat == 1. */
+ if (!struct_v || struct_compat != 1)
+ goto bad;
+ ceph_decode_32_safe(p, end, struct_len, bad);
+ ceph_decode_need(p, end, struct_len, bad);
+ end = *p + struct_len;
}
+ ceph_decode_need(p, end, sizeof(**lease), bad);
+ *lease = *p;
+ *p += sizeof(**lease);
+ if (features == (u64)-1)
+ *p = end;
return 0;
bad:
- return err;
+ return -EIO;
}
/*
@@ -147,20 +279,18 @@ static int parse_reply_info_trace(void **p, void *end,
if (err < 0)
goto out_bad;
- if (unlikely(*p + sizeof(*info->dirfrag) > end))
- goto bad;
- info->dirfrag = *p;
- *p += sizeof(*info->dirfrag) +
- sizeof(u32)*le32_to_cpu(info->dirfrag->ndist);
- if (unlikely(*p > end))
- goto bad;
+ err = parse_reply_info_dir(p, end, &info->dirfrag, features);
+ if (err < 0)
+ goto out_bad;
ceph_decode_32_safe(p, end, info->dname_len, bad);
ceph_decode_need(p, end, info->dname_len, bad);
info->dname = *p;
*p += info->dname_len;
- info->dlease = *p;
- *p += sizeof(*info->dlease);
+
+ err = parse_reply_info_lease(p, end, &info->dlease, features);
+ if (err < 0)
+ goto out_bad;
}
if (info->head->is_target) {
@@ -183,20 +313,16 @@ out_bad:
/*
* parse readdir results
*/
-static int parse_reply_info_dir(void **p, void *end,
+static int parse_reply_info_readdir(void **p, void *end,
struct ceph_mds_reply_info_parsed *info,
u64 features)
{
u32 num, i = 0;
int err;
- info->dir_dir = *p;
- if (*p + sizeof(*info->dir_dir) > end)
- goto bad;
- *p += sizeof(*info->dir_dir) +
- sizeof(u32)*le32_to_cpu(info->dir_dir->ndist);
- if (*p > end)
- goto bad;
+ err = parse_reply_info_dir(p, end, &info->dir_dir, features);
+ if (err < 0)
+ goto out_bad;
ceph_decode_need(p, end, sizeof(num) + 2, bad);
num = ceph_decode_32(p);
@@ -222,15 +348,16 @@ static int parse_reply_info_dir(void **p, void *end,
while (num) {
struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i;
/* dentry */
- ceph_decode_need(p, end, sizeof(u32)*2, bad);
- rde->name_len = ceph_decode_32(p);
+ ceph_decode_32_safe(p, end, rde->name_len, bad);
ceph_decode_need(p, end, rde->name_len, bad);
rde->name = *p;
*p += rde->name_len;
dout("parsed dir dname '%.*s'\n", rde->name_len, rde->name);
- rde->lease = *p;
- *p += sizeof(struct ceph_mds_reply_lease);
+ /* dentry lease */
+ err = parse_reply_info_lease(p, end, &rde->lease, features);
+ if (err)
+ goto out_bad;
/* inode */
err = parse_reply_info_in(p, end, &rde->inode, features);
if (err < 0)
@@ -281,7 +408,8 @@ static int parse_reply_info_create(void **p, void *end,
struct ceph_mds_reply_info_parsed *info,
u64 features)
{
- if (features & CEPH_FEATURE_REPLY_CREATE_INODE) {
+ if (features == (u64)-1 ||
+ (features & CEPH_FEATURE_REPLY_CREATE_INODE)) {
if (*p == end) {
info->has_create_ino = false;
} else {
@@ -310,7 +438,7 @@ static int parse_reply_info_extra(void **p, void *end,
if (op == CEPH_MDS_OP_GETFILELOCK)
return parse_reply_info_filelock(p, end, info, features);
else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP)
- return parse_reply_info_dir(p, end, info, features);
+ return parse_reply_info_readdir(p, end, info, features);
else if (op == CEPH_MDS_OP_CREATE)
return parse_reply_info_create(p, end, info, features);
else
@@ -494,7 +622,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr);
spin_lock_init(&s->s_gen_ttl_lock);
- s->s_cap_gen = 0;
+ s->s_cap_gen = 1;
s->s_cap_ttl = jiffies - 1;
spin_lock_init(&s->s_cap_lock);
@@ -510,6 +638,8 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
s->s_cap_reconnect = 0;
s->s_cap_iterator = NULL;
INIT_LIST_HEAD(&s->s_cap_releases);
+ INIT_WORK(&s->s_cap_release_work, ceph_cap_release_work);
+
INIT_LIST_HEAD(&s->s_cap_flushing);
mdsc->sessions[mds] = s;
@@ -535,6 +665,7 @@ static void __unregister_session(struct ceph_mds_client *mdsc,
dout("__unregister_session mds%d %p\n", s->s_mds, s);
BUG_ON(mdsc->sessions[s->s_mds] != s);
mdsc->sessions[s->s_mds] = NULL;
+ s->s_state = 0;
ceph_con_close(&s->s_con);
ceph_put_mds_session(s);
atomic_dec(&mdsc->num_sessions);
@@ -1197,13 +1328,10 @@ static int iterate_session_caps(struct ceph_mds_session *session,
cap->session = NULL;
list_del_init(&cap->session_caps);
session->s_nr_caps--;
- if (cap->queue_release) {
- list_add_tail(&cap->session_caps,
- &session->s_cap_releases);
- session->s_num_cap_releases++;
- } else {
+ if (cap->queue_release)
+ __ceph_queue_cap_release(session, cap);
+ else
old_cap = cap; /* put_cap it w/o locks held */
- }
}
if (ret < 0)
goto out;
@@ -1286,6 +1414,15 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
ci->i_prealloc_cap_flush = NULL;
}
+
+ if (drop &&
+ ci->i_wrbuffer_ref_head == 0 &&
+ ci->i_wr_ref == 0 &&
+ ci->i_dirty_caps == 0 &&
+ ci->i_flushing_caps == 0) {
+ ceph_put_snap_context(ci->i_head_snapc);
+ ci->i_head_snapc = NULL;
+ }
}
spin_unlock(&ci->i_ceph_lock);
while (!list_empty(&to_remove)) {
@@ -1638,7 +1775,7 @@ int ceph_trim_caps(struct ceph_mds_client *mdsc,
session->s_trim_caps = 0;
}
- ceph_send_cap_releases(mdsc, session);
+ ceph_flush_cap_releases(mdsc, session);
return 0;
}
@@ -1681,8 +1818,8 @@ static void wait_caps_flush(struct ceph_mds_client *mdsc,
/*
* called under s_mutex
*/
-void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
- struct ceph_mds_session *session)
+static void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session)
{
struct ceph_msg *msg = NULL;
struct ceph_mds_cap_release *head;
@@ -1774,6 +1911,81 @@ out_err:
spin_unlock(&session->s_cap_lock);
}
+static void ceph_cap_release_work(struct work_struct *work)
+{
+ struct ceph_mds_session *session =
+ container_of(work, struct ceph_mds_session, s_cap_release_work);
+
+ mutex_lock(&session->s_mutex);
+ if (session->s_state == CEPH_MDS_SESSION_OPEN ||
+ session->s_state == CEPH_MDS_SESSION_HUNG)
+ ceph_send_cap_releases(session->s_mdsc, session);
+ mutex_unlock(&session->s_mutex);
+ ceph_put_mds_session(session);
+}
+
+void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session)
+{
+ if (mdsc->stopping)
+ return;
+
+ get_session(session);
+ if (queue_work(mdsc->fsc->cap_wq,
+ &session->s_cap_release_work)) {
+ dout("cap release work queued\n");
+ } else {
+ ceph_put_mds_session(session);
+ dout("failed to queue cap release work\n");
+ }
+}
+
+/*
+ * caller holds session->s_cap_lock
+ */
+void __ceph_queue_cap_release(struct ceph_mds_session *session,
+ struct ceph_cap *cap)
+{
+ list_add_tail(&cap->session_caps, &session->s_cap_releases);
+ session->s_num_cap_releases++;
+
+ if (!(session->s_num_cap_releases % CEPH_CAPS_PER_RELEASE))
+ ceph_flush_cap_releases(session->s_mdsc, session);
+}
+
+static void ceph_cap_reclaim_work(struct work_struct *work)
+{
+ struct ceph_mds_client *mdsc =
+ container_of(work, struct ceph_mds_client, cap_reclaim_work);
+ int ret = ceph_trim_dentries(mdsc);
+ if (ret == -EAGAIN)
+ ceph_queue_cap_reclaim_work(mdsc);
+}
+
+void ceph_queue_cap_reclaim_work(struct ceph_mds_client *mdsc)
+{
+ if (mdsc->stopping)
+ return;
+
+ if (queue_work(mdsc->fsc->cap_wq, &mdsc->cap_reclaim_work)) {
+ dout("caps reclaim work queued\n");
+ } else {
+ dout("failed to queue caps release work\n");
+ }
+}
+
+void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr)
+{
+ int val;
+ if (!nr)
+ return;
+ val = atomic_add_return(nr, &mdsc->cap_reclaim_pending);
+ if (!(val % CEPH_CAPS_PER_RELEASE)) {
+ atomic_set(&mdsc->cap_reclaim_pending, 0);
+ ceph_queue_cap_reclaim_work(mdsc);
+ }
+}
+
/*
* requests
*/
@@ -1958,10 +2170,39 @@ retry:
return path;
}
+/* Duplicate the dentry->d_name.name safely */
+static int clone_dentry_name(struct dentry *dentry, const char **ppath,
+ int *ppathlen)
+{
+ u32 len;
+ char *name;
+
+retry:
+ len = READ_ONCE(dentry->d_name.len);
+ name = kmalloc(len + 1, GFP_NOFS);
+ if (!name)
+ return -ENOMEM;
+
+ spin_lock(&dentry->d_lock);
+ if (dentry->d_name.len != len) {
+ spin_unlock(&dentry->d_lock);
+ kfree(name);
+ goto retry;
+ }
+ memcpy(name, dentry->d_name.name, len);
+ spin_unlock(&dentry->d_lock);
+
+ name[len] = '\0';
+ *ppath = name;
+ *ppathlen = len;
+ return 0;
+}
+
static int build_dentry_path(struct dentry *dentry, struct inode *dir,
const char **ppath, int *ppathlen, u64 *pino,
- int *pfreepath)
+ bool *pfreepath, bool parent_locked)
{
+ int ret;
char *path;
rcu_read_lock();
@@ -1970,8 +2211,15 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir,
if (dir && ceph_snap(dir) == CEPH_NOSNAP) {
*pino = ceph_ino(dir);
rcu_read_unlock();
- *ppath = dentry->d_name.name;
- *ppathlen = dentry->d_name.len;
+ if (parent_locked) {
+ *ppath = dentry->d_name.name;
+ *ppathlen = dentry->d_name.len;
+ } else {
+ ret = clone_dentry_name(dentry, ppath, ppathlen);
+ if (ret)
+ return ret;
+ *pfreepath = true;
+ }
return 0;
}
rcu_read_unlock();
@@ -1979,13 +2227,13 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir,
if (IS_ERR(path))
return PTR_ERR(path);
*ppath = path;
- *pfreepath = 1;
+ *pfreepath = true;
return 0;
}
static int build_inode_path(struct inode *inode,
const char **ppath, int *ppathlen, u64 *pino,
- int *pfreepath)
+ bool *pfreepath)
{
struct dentry *dentry;
char *path;
@@ -2001,7 +2249,7 @@ static int build_inode_path(struct inode *inode,
if (IS_ERR(path))
return PTR_ERR(path);
*ppath = path;
- *pfreepath = 1;
+ *pfreepath = true;
return 0;
}
@@ -2012,7 +2260,7 @@ static int build_inode_path(struct inode *inode,
static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
struct inode *rdiri, const char *rpath,
u64 rino, const char **ppath, int *pathlen,
- u64 *ino, int *freepath)
+ u64 *ino, bool *freepath, bool parent_locked)
{
int r = 0;
@@ -2022,7 +2270,7 @@ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
ceph_snap(rinode));
} else if (rdentry) {
r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino,
- freepath);
+ freepath, parent_locked);
dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
*ppath);
} else if (rpath || rino) {
@@ -2048,7 +2296,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
const char *path2 = NULL;
u64 ino1 = 0, ino2 = 0;
int pathlen1 = 0, pathlen2 = 0;
- int freepath1 = 0, freepath2 = 0;
+ bool freepath1 = false, freepath2 = false;
int len;
u16 releases;
void *p, *end;
@@ -2056,16 +2304,19 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
ret = set_request_path_attr(req->r_inode, req->r_dentry,
req->r_parent, req->r_path1, req->r_ino1.ino,
- &path1, &pathlen1, &ino1, &freepath1);
+ &path1, &pathlen1, &ino1, &freepath1,
+ test_bit(CEPH_MDS_R_PARENT_LOCKED,
+ &req->r_req_flags));
if (ret < 0) {
msg = ERR_PTR(ret);
goto out;
}
+ /* If r_old_dentry is set, then assume that its parent is locked */
ret = set_request_path_attr(NULL, req->r_old_dentry,
req->r_old_dentry_dir,
req->r_path2, req->r_ino2.ino,
- &path2, &pathlen2, &ino2, &freepath2);
+ &path2, &pathlen2, &ino2, &freepath2, true);
if (ret < 0) {
msg = ERR_PTR(ret);
goto out_free1;
@@ -2653,7 +2904,10 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
dout("handle_reply tid %lld result %d\n", tid, result);
rinfo = &req->r_reply_info;
- err = parse_reply_info(msg, rinfo, session->s_con.peer_features);
+ if (test_bit(CEPHFS_FEATURE_REPLY_ENCODING, &session->s_features))
+ err = parse_reply_info(msg, rinfo, (u64)-1);
+ else
+ err = parse_reply_info(msg, rinfo, session->s_con.peer_features);
mutex_unlock(&mdsc->mutex);
mutex_lock(&session->s_mutex);
@@ -2684,7 +2938,6 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
req->r_op == CEPH_MDS_OP_LSSNAP))
ceph_readdir_prepopulate(req, req->r_session);
- ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
}
current->journal_info = NULL;
mutex_unlock(&req->r_fill_mutex);
@@ -2693,12 +2946,18 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
if (realm)
ceph_put_snap_realm(mdsc, realm);
- if (err == 0 && req->r_target_inode &&
- test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
- struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
- spin_lock(&ci->i_unsafe_lock);
- list_add_tail(&req->r_unsafe_target_item, &ci->i_unsafe_iops);
- spin_unlock(&ci->i_unsafe_lock);
+ if (err == 0) {
+ if (req->r_target_inode &&
+ test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
+ struct ceph_inode_info *ci =
+ ceph_inode(req->r_target_inode);
+ spin_lock(&ci->i_unsafe_lock);
+ list_add_tail(&req->r_unsafe_target_item,
+ &ci->i_unsafe_iops);
+ spin_unlock(&ci->i_unsafe_lock);
+ }
+
+ ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
}
out_err:
mutex_lock(&mdsc->mutex);
@@ -2777,6 +3036,25 @@ bad:
pr_err("mdsc_handle_forward decode error err=%d\n", err);
}
+static int __decode_and_drop_session_metadata(void **p, void *end)
+{
+ /* map<string,string> */
+ u32 n;
+ ceph_decode_32_safe(p, end, n, bad);
+ while (n-- > 0) {
+ u32 len;
+ ceph_decode_32_safe(p, end, len, bad);
+ ceph_decode_need(p, end, len, bad);
+ *p += len;
+ ceph_decode_32_safe(p, end, len, bad);
+ ceph_decode_need(p, end, len, bad);
+ *p += len;
+ }
+ return 0;
+bad:
+ return -1;
+}
+
/*
* handle a mds session control message
*/
@@ -2784,18 +3062,36 @@ static void handle_session(struct ceph_mds_session *session,
struct ceph_msg *msg)
{
struct ceph_mds_client *mdsc = session->s_mdsc;
+ int mds = session->s_mds;
+ int msg_version = le16_to_cpu(msg->hdr.version);
+ void *p = msg->front.iov_base;
+ void *end = p + msg->front.iov_len;
+ struct ceph_mds_session_head *h;
u32 op;
u64 seq;
- int mds = session->s_mds;
- struct ceph_mds_session_head *h = msg->front.iov_base;
+ unsigned long features = 0;
int wake = 0;
/* decode */
- if (msg->front.iov_len < sizeof(*h))
- goto bad;
+ ceph_decode_need(&p, end, sizeof(*h), bad);
+ h = p;
+ p += sizeof(*h);
+
op = le32_to_cpu(h->op);
seq = le64_to_cpu(h->seq);
+ if (msg_version >= 3) {
+ u32 len;
+ /* version >= 2, metadata */
+ if (__decode_and_drop_session_metadata(&p, end) < 0)
+ goto bad;
+ /* version >= 3, feature bits */
+ ceph_decode_32_safe(&p, end, len, bad);
+ ceph_decode_need(&p, end, len, bad);
+ memcpy(&features, p, min_t(size_t, len, sizeof(features)));
+ p += len;
+ }
+
mutex_lock(&mdsc->mutex);
if (op == CEPH_SESSION_CLOSE) {
get_session(session);
@@ -2821,6 +3117,7 @@ static void handle_session(struct ceph_mds_session *session,
if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
pr_info("mds%d reconnect success\n", session->s_mds);
session->s_state = CEPH_MDS_SESSION_OPEN;
+ session->s_features = features;
renewed_caps(mdsc, session, 0);
wake = 1;
if (mdsc->stopping)
@@ -2947,6 +3244,82 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
mutex_unlock(&mdsc->mutex);
}
+static int send_reconnect_partial(struct ceph_reconnect_state *recon_state)
+{
+ struct ceph_msg *reply;
+ struct ceph_pagelist *_pagelist;
+ struct page *page;
+ __le32 *addr;
+ int err = -ENOMEM;
+
+ if (!recon_state->allow_multi)
+ return -ENOSPC;
+
+ /* can't handle message that contains both caps and realm */
+ BUG_ON(!recon_state->nr_caps == !recon_state->nr_realms);
+
+ /* pre-allocate new pagelist */
+ _pagelist = ceph_pagelist_alloc(GFP_NOFS);
+ if (!_pagelist)
+ return -ENOMEM;
+
+ reply = ceph_msg_new2(CEPH_MSG_CLIENT_RECONNECT, 0, 1, GFP_NOFS, false);
+ if (!reply)
+ goto fail_msg;
+
+ /* placeholder for nr_caps */
+ err = ceph_pagelist_encode_32(_pagelist, 0);
+ if (err < 0)
+ goto fail;
+
+ if (recon_state->nr_caps) {
+ /* currently encoding caps */
+ err = ceph_pagelist_encode_32(recon_state->pagelist, 0);
+ if (err)
+ goto fail;
+ } else {
+ /* placeholder for nr_realms (currently encoding relams) */
+ err = ceph_pagelist_encode_32(_pagelist, 0);
+ if (err < 0)
+ goto fail;
+ }
+
+ err = ceph_pagelist_encode_8(recon_state->pagelist, 1);
+ if (err)
+ goto fail;
+
+ page = list_first_entry(&recon_state->pagelist->head, struct page, lru);
+ addr = kmap_atomic(page);
+ if (recon_state->nr_caps) {
+ /* currently encoding caps */
+ *addr = cpu_to_le32(recon_state->nr_caps);
+ } else {
+ /* currently encoding relams */
+ *(addr + 1) = cpu_to_le32(recon_state->nr_realms);
+ }
+ kunmap_atomic(addr);
+
+ reply->hdr.version = cpu_to_le16(5);
+ reply->hdr.compat_version = cpu_to_le16(4);
+
+ reply->hdr.data_len = cpu_to_le32(recon_state->pagelist->length);
+ ceph_msg_data_add_pagelist(reply, recon_state->pagelist);
+
+ ceph_con_send(&recon_state->session->s_con, reply);
+ ceph_pagelist_release(recon_state->pagelist);
+
+ recon_state->pagelist = _pagelist;
+ recon_state->nr_caps = 0;
+ recon_state->nr_realms = 0;
+ recon_state->msg_version = 5;
+ return 0;
+fail:
+ ceph_msg_put(reply);
+fail_msg:
+ ceph_pagelist_release(_pagelist);
+ return err;
+}
+
/*
* Encode information about a cap for a reconnect with the MDS.
*/
@@ -2966,9 +3339,6 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
inode, ceph_vinop(inode), cap, cap->cap_id,
ceph_cap_string(cap->issued));
- err = ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
- if (err)
- return err;
spin_lock(&ci->i_ceph_lock);
cap->seq = 0; /* reset cap seq */
@@ -3008,7 +3378,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
if (recon_state->msg_version >= 2) {
int num_fcntl_locks, num_flock_locks;
struct ceph_filelock *flocks = NULL;
- size_t struct_len, total_len = 0;
+ size_t struct_len, total_len = sizeof(u64);
u8 struct_v = 0;
encode_again:
@@ -3043,7 +3413,7 @@ encode_again:
if (recon_state->msg_version >= 3) {
/* version, compat_version and struct_len */
- total_len = 2 * sizeof(u8) + sizeof(u32);
+ total_len += 2 * sizeof(u8) + sizeof(u32);
struct_v = 2;
}
/*
@@ -3060,12 +3430,19 @@ encode_again:
struct_len += sizeof(u64); /* snap_follows */
total_len += struct_len;
- err = ceph_pagelist_reserve(pagelist, total_len);
- if (err) {
- kfree(flocks);
- goto out_err;
+
+ if (pagelist->length + total_len > RECONNECT_MAX_SIZE) {
+ err = send_reconnect_partial(recon_state);
+ if (err)
+ goto out_freeflocks;
+ pagelist = recon_state->pagelist;
}
+ err = ceph_pagelist_reserve(pagelist, total_len);
+ if (err)
+ goto out_freeflocks;
+
+ ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
if (recon_state->msg_version >= 3) {
ceph_pagelist_encode_8(pagelist, struct_v);
ceph_pagelist_encode_8(pagelist, 1);
@@ -3077,7 +3454,7 @@ encode_again:
num_fcntl_locks, num_flock_locks);
if (struct_v >= 2)
ceph_pagelist_encode_64(pagelist, snap_follows);
-
+out_freeflocks:
kfree(flocks);
} else {
u64 pathbase = 0;
@@ -3098,20 +3475,81 @@ encode_again:
}
err = ceph_pagelist_reserve(pagelist,
- pathlen + sizeof(u32) + sizeof(rec.v1));
+ sizeof(u64) + sizeof(u32) +
+ pathlen + sizeof(rec.v1));
if (err) {
- kfree(path);
- goto out_err;
+ goto out_freepath;
}
+ ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
ceph_pagelist_encode_string(pagelist, path, pathlen);
ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1));
-
+out_freepath:
kfree(path);
}
- recon_state->nr_caps++;
out_err:
+ if (err >= 0)
+ recon_state->nr_caps++;
+ return err;
+}
+
+static int encode_snap_realms(struct ceph_mds_client *mdsc,
+ struct ceph_reconnect_state *recon_state)
+{
+ struct rb_node *p;
+ struct ceph_pagelist *pagelist = recon_state->pagelist;
+ int err = 0;
+
+ if (recon_state->msg_version >= 4) {
+ err = ceph_pagelist_encode_32(pagelist, mdsc->num_snap_realms);
+ if (err < 0)
+ goto fail;
+ }
+
+ /*
+ * snaprealms. we provide mds with the ino, seq (version), and
+ * parent for all of our realms. If the mds has any newer info,
+ * it will tell us.
+ */
+ for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) {
+ struct ceph_snap_realm *realm =
+ rb_entry(p, struct ceph_snap_realm, node);
+ struct ceph_mds_snaprealm_reconnect sr_rec;
+
+ if (recon_state->msg_version >= 4) {
+ size_t need = sizeof(u8) * 2 + sizeof(u32) +
+ sizeof(sr_rec);
+
+ if (pagelist->length + need > RECONNECT_MAX_SIZE) {
+ err = send_reconnect_partial(recon_state);
+ if (err)
+ goto fail;
+ pagelist = recon_state->pagelist;
+ }
+
+ err = ceph_pagelist_reserve(pagelist, need);
+ if (err)
+ goto fail;
+
+ ceph_pagelist_encode_8(pagelist, 1);
+ ceph_pagelist_encode_8(pagelist, 1);
+ ceph_pagelist_encode_32(pagelist, sizeof(sr_rec));
+ }
+
+ dout(" adding snap realm %llx seq %lld parent %llx\n",
+ realm->ino, realm->seq, realm->parent_ino);
+ sr_rec.ino = cpu_to_le64(realm->ino);
+ sr_rec.seq = cpu_to_le64(realm->seq);
+ sr_rec.parent = cpu_to_le64(realm->parent_ino);
+
+ err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec));
+ if (err)
+ goto fail;
+
+ recon_state->nr_realms++;
+ }
+fail:
return err;
}
@@ -3132,18 +3570,17 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
{
struct ceph_msg *reply;
- struct rb_node *p;
int mds = session->s_mds;
int err = -ENOMEM;
- int s_nr_caps;
- struct ceph_pagelist *pagelist;
- struct ceph_reconnect_state recon_state;
+ struct ceph_reconnect_state recon_state = {
+ .session = session,
+ };
LIST_HEAD(dispose);
pr_info("mds%d reconnect start\n", mds);
- pagelist = ceph_pagelist_alloc(GFP_NOFS);
- if (!pagelist)
+ recon_state.pagelist = ceph_pagelist_alloc(GFP_NOFS);
+ if (!recon_state.pagelist)
goto fail_nopagelist;
reply = ceph_msg_new2(CEPH_MSG_CLIENT_RECONNECT, 0, 1, GFP_NOFS, false);
@@ -3187,63 +3624,90 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
/* replay unsafe requests */
replay_unsafe_requests(mdsc, session);
+ ceph_early_kick_flushing_caps(mdsc, session);
+
down_read(&mdsc->snap_rwsem);
- /* traverse this session's caps */
- s_nr_caps = session->s_nr_caps;
- err = ceph_pagelist_encode_32(pagelist, s_nr_caps);
+ /* placeholder for nr_caps */
+ err = ceph_pagelist_encode_32(recon_state.pagelist, 0);
if (err)
goto fail;
- recon_state.nr_caps = 0;
- recon_state.pagelist = pagelist;
- if (session->s_con.peer_features & CEPH_FEATURE_MDSENC)
+ if (test_bit(CEPHFS_FEATURE_MULTI_RECONNECT, &session->s_features)) {
recon_state.msg_version = 3;
- else
+ recon_state.allow_multi = true;
+ } else if (session->s_con.peer_features & CEPH_FEATURE_MDSENC) {
+ recon_state.msg_version = 3;
+ } else {
recon_state.msg_version = 2;
+ }
+ /* trsaverse this session's caps */
err = iterate_session_caps(session, encode_caps_cb, &recon_state);
- if (err < 0)
- goto fail;
spin_lock(&session->s_cap_lock);
session->s_cap_reconnect = 0;
spin_unlock(&session->s_cap_lock);
- /*
- * snaprealms. we provide mds with the ino, seq (version), and
- * parent for all of our realms. If the mds has any newer info,
- * it will tell us.
- */
- for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) {
- struct ceph_snap_realm *realm =
- rb_entry(p, struct ceph_snap_realm, node);
- struct ceph_mds_snaprealm_reconnect sr_rec;
+ if (err < 0)
+ goto fail;
- dout(" adding snap realm %llx seq %lld parent %llx\n",
- realm->ino, realm->seq, realm->parent_ino);
- sr_rec.ino = cpu_to_le64(realm->ino);
- sr_rec.seq = cpu_to_le64(realm->seq);
- sr_rec.parent = cpu_to_le64(realm->parent_ino);
- err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec));
- if (err)
- goto fail;
+ /* check if all realms can be encoded into current message */
+ if (mdsc->num_snap_realms) {
+ size_t total_len =
+ recon_state.pagelist->length +
+ mdsc->num_snap_realms *
+ sizeof(struct ceph_mds_snaprealm_reconnect);
+ if (recon_state.msg_version >= 4) {
+ /* number of realms */
+ total_len += sizeof(u32);
+ /* version, compat_version and struct_len */
+ total_len += mdsc->num_snap_realms *
+ (2 * sizeof(u8) + sizeof(u32));
+ }
+ if (total_len > RECONNECT_MAX_SIZE) {
+ if (!recon_state.allow_multi) {
+ err = -ENOSPC;
+ goto fail;
+ }
+ if (recon_state.nr_caps) {
+ err = send_reconnect_partial(&recon_state);
+ if (err)
+ goto fail;
+ }
+ recon_state.msg_version = 5;
+ }
}
- reply->hdr.version = cpu_to_le16(recon_state.msg_version);
+ err = encode_snap_realms(mdsc, &recon_state);
+ if (err < 0)
+ goto fail;
- /* raced with cap release? */
- if (s_nr_caps != recon_state.nr_caps) {
- struct page *page = list_first_entry(&pagelist->head,
- struct page, lru);
+ if (recon_state.msg_version >= 5) {
+ err = ceph_pagelist_encode_8(recon_state.pagelist, 0);
+ if (err < 0)
+ goto fail;
+ }
+
+ if (recon_state.nr_caps || recon_state.nr_realms) {
+ struct page *page =
+ list_first_entry(&recon_state.pagelist->head,
+ struct page, lru);
__le32 *addr = kmap_atomic(page);
- *addr = cpu_to_le32(recon_state.nr_caps);
+ if (recon_state.nr_caps) {
+ WARN_ON(recon_state.nr_realms != mdsc->num_snap_realms);
+ *addr = cpu_to_le32(recon_state.nr_caps);
+ } else if (recon_state.msg_version >= 4) {
+ *(addr + 1) = cpu_to_le32(recon_state.nr_realms);
+ }
kunmap_atomic(addr);
}
- reply->hdr.data_len = cpu_to_le32(pagelist->length);
- ceph_msg_data_add_pagelist(reply, pagelist);
+ reply->hdr.version = cpu_to_le16(recon_state.msg_version);
+ if (recon_state.msg_version >= 4)
+ reply->hdr.compat_version = cpu_to_le16(4);
- ceph_early_kick_flushing_caps(mdsc, session);
+ reply->hdr.data_len = cpu_to_le32(recon_state.pagelist->length);
+ ceph_msg_data_add_pagelist(reply, recon_state.pagelist);
ceph_con_send(&session->s_con, reply);
@@ -3254,7 +3718,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
mutex_unlock(&mdsc->mutex);
up_read(&mdsc->snap_rwsem);
- ceph_pagelist_release(pagelist);
+ ceph_pagelist_release(recon_state.pagelist);
return;
fail:
@@ -3262,7 +3726,7 @@ fail:
up_read(&mdsc->snap_rwsem);
mutex_unlock(&session->s_mutex);
fail_nomsg:
- ceph_pagelist_release(pagelist);
+ ceph_pagelist_release(recon_state.pagelist);
fail_nopagelist:
pr_err("error %d preparing reconnect for mds%d\n", err, mds);
return;
@@ -3580,7 +4044,6 @@ static void delayed_work(struct work_struct *work)
int renew_caps;
dout("mdsc delayed_work\n");
- ceph_check_delayed_caps(mdsc);
mutex_lock(&mdsc->mutex);
renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
@@ -3628,6 +4091,12 @@ static void delayed_work(struct work_struct *work)
}
mutex_unlock(&mdsc->mutex);
+ ceph_check_delayed_caps(mdsc);
+
+ ceph_queue_cap_reclaim_work(mdsc);
+
+ ceph_trim_snapid_map(mdsc);
+
schedule_delayed(mdsc);
}
@@ -3660,6 +4129,7 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
init_rwsem(&mdsc->snap_rwsem);
mdsc->snap_realms = RB_ROOT;
INIT_LIST_HEAD(&mdsc->snap_empty);
+ mdsc->num_snap_realms = 0;
spin_lock_init(&mdsc->snap_empty_lock);
mdsc->last_tid = 0;
mdsc->oldest_tid = 0;
@@ -3677,11 +4147,19 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
mdsc->num_cap_flushing = 0;
spin_lock_init(&mdsc->cap_dirty_lock);
init_waitqueue_head(&mdsc->cap_flushing_wq);
- spin_lock_init(&mdsc->dentry_lru_lock);
- INIT_LIST_HEAD(&mdsc->dentry_lru);
+ INIT_WORK(&mdsc->cap_reclaim_work, ceph_cap_reclaim_work);
+ atomic_set(&mdsc->cap_reclaim_pending, 0);
+
+ spin_lock_init(&mdsc->dentry_list_lock);
+ INIT_LIST_HEAD(&mdsc->dentry_leases);
+ INIT_LIST_HEAD(&mdsc->dentry_dir_leases);
ceph_caps_init(mdsc);
- ceph_adjust_min_caps(mdsc, fsc->min_caps);
+ ceph_adjust_caps_max_min(mdsc, fsc->mount_options);
+
+ spin_lock_init(&mdsc->snapid_map_lock);
+ mdsc->snapid_map_tree = RB_ROOT;
+ INIT_LIST_HEAD(&mdsc->snapid_map_lru);
init_rwsem(&mdsc->pool_perm_rwsem);
mdsc->pool_perm_tree = RB_ROOT;
@@ -3876,8 +4354,10 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
WARN_ON(!list_empty(&mdsc->cap_delay_list));
mutex_unlock(&mdsc->mutex);
+ ceph_cleanup_snapid_map(mdsc);
ceph_cleanup_empty_realms(mdsc);
+ cancel_work_sync(&mdsc->cap_reclaim_work);
cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
dout("stopped\n");
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 729da155ebf0..50385a481fdb 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -21,11 +21,14 @@
#define CEPHFS_FEATURE_REPLY_ENCODING 9
#define CEPHFS_FEATURE_RECLAIM_CLIENT 10
#define CEPHFS_FEATURE_LAZY_CAP_WANTED 11
+#define CEPHFS_FEATURE_MULTI_RECONNECT 12
#define CEPHFS_FEATURES_CLIENT_SUPPORTED { \
0, 1, 2, 3, 4, 5, 6, 7, \
CEPHFS_FEATURE_MIMIC, \
+ CEPHFS_FEATURE_REPLY_ENCODING, \
CEPHFS_FEATURE_LAZY_CAP_WANTED, \
+ CEPHFS_FEATURE_MULTI_RECONNECT, \
}
#define CEPHFS_FEATURES_CLIENT_REQUIRED {}
@@ -65,6 +68,7 @@ struct ceph_mds_reply_info_in {
char *pool_ns_data;
u64 max_bytes;
u64 max_files;
+ s32 dir_pin;
};
struct ceph_mds_reply_dir_entry {
@@ -152,6 +156,7 @@ struct ceph_mds_session {
int s_mds;
int s_state;
unsigned long s_ttl; /* time until mds kills us */
+ unsigned long s_features;
u64 s_seq; /* incoming msg seq # */
struct mutex s_mutex; /* serialize session messages */
@@ -167,19 +172,20 @@ struct ceph_mds_session {
/* protected by s_cap_lock */
spinlock_t s_cap_lock;
struct list_head s_caps; /* all caps issued by this session */
+ struct ceph_cap *s_cap_iterator;
int s_nr_caps, s_trim_caps;
int s_num_cap_releases;
int s_cap_reconnect;
int s_readonly;
struct list_head s_cap_releases; /* waiting cap_release messages */
- struct ceph_cap *s_cap_iterator;
+ struct work_struct s_cap_release_work;
/* protected by mutex */
struct list_head s_cap_flushing; /* inodes w/ flushing caps */
unsigned long s_renew_requested; /* last time we sent a renew req */
u64 s_renew_seq;
- refcount_t s_ref;
+ refcount_t s_ref;
struct list_head s_waiting; /* waiting requests */
struct list_head s_unsafe; /* unsafe requests */
};
@@ -310,6 +316,15 @@ struct ceph_pool_perm {
char pool_ns[];
};
+struct ceph_snapid_map {
+ struct rb_node node;
+ struct list_head lru;
+ atomic_t ref;
+ u64 snap;
+ dev_t dev;
+ unsigned long last_used;
+};
+
/*
* mds client state
*/
@@ -341,6 +356,7 @@ struct ceph_mds_client {
struct rw_semaphore snap_rwsem;
struct rb_root snap_realms;
struct list_head snap_empty;
+ int num_snap_realms;
spinlock_t snap_empty_lock; /* protect snap_empty */
u64 last_tid; /* most recent mds request */
@@ -362,6 +378,9 @@ struct ceph_mds_client {
spinlock_t cap_dirty_lock; /* protects above items */
wait_queue_head_t cap_flushing_wq;
+ struct work_struct cap_reclaim_work;
+ atomic_t cap_reclaim_pending;
+
/*
* Cap reservations
*
@@ -378,13 +397,18 @@ struct ceph_mds_client {
unreserved) */
int caps_total_count; /* total caps allocated */
int caps_use_count; /* in use */
+ int caps_use_max; /* max used caps */
int caps_reserve_count; /* unused, reserved */
int caps_avail_count; /* unused, unreserved */
int caps_min_count; /* keep at least this many
(unreserved) */
- spinlock_t dentry_lru_lock;
- struct list_head dentry_lru;
- int num_dentry;
+ spinlock_t dentry_list_lock;
+ struct list_head dentry_leases; /* fifo list */
+ struct list_head dentry_dir_leases; /* lru list */
+
+ spinlock_t snapid_map_lock;
+ struct rb_root snapid_map_tree;
+ struct list_head snapid_map_lru;
struct rw_semaphore pool_perm_rwsem;
struct rb_root pool_perm_tree;
@@ -438,9 +462,12 @@ static inline void ceph_mdsc_put_request(struct ceph_mds_request *req)
kref_put(&req->r_kref, ceph_mdsc_release_request);
}
-extern void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
- struct ceph_mds_session *session);
-
+extern void __ceph_queue_cap_release(struct ceph_mds_session *session,
+ struct ceph_cap *cap);
+extern void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session);
+extern void ceph_queue_cap_reclaim_work(struct ceph_mds_client *mdsc);
+extern void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr);
extern void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc);
extern char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index f74193da0e09..b26e12cd8ec3 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -3,12 +3,13 @@
#include <linux/sort.h>
#include <linux/slab.h>
-
#include "super.h"
#include "mds_client.h"
-
#include <linux/ceph/decode.h>
+/* unused map expires after 5 minutes */
+#define CEPH_SNAPID_MAP_TIMEOUT (5 * 60 * HZ)
+
/*
* Snapshots in ceph are driven in large part by cooperation from the
* client. In contrast to local file systems or file servers that
@@ -124,6 +125,8 @@ static struct ceph_snap_realm *ceph_create_snap_realm(
INIT_LIST_HEAD(&realm->inodes_with_caps);
spin_lock_init(&realm->inodes_with_caps_lock);
__insert_snap_realm(&mdsc->snap_realms, realm);
+ mdsc->num_snap_realms++;
+
dout("create_snap_realm %llx %p\n", realm->ino, realm);
return realm;
}
@@ -175,6 +178,7 @@ static void __destroy_snap_realm(struct ceph_mds_client *mdsc,
dout("__destroy_snap_realm %p %llx\n", realm, realm->ino);
rb_erase(&realm->node, &mdsc->snap_realms);
+ mdsc->num_snap_realms--;
if (realm->parent) {
list_del_init(&realm->child_item);
@@ -568,7 +572,12 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
old_snapc = NULL;
update_snapc:
- if (ci->i_head_snapc) {
+ if (ci->i_wrbuffer_ref_head == 0 &&
+ ci->i_wr_ref == 0 &&
+ ci->i_dirty_caps == 0 &&
+ ci->i_flushing_caps == 0) {
+ ci->i_head_snapc = NULL;
+ } else {
ci->i_head_snapc = ceph_get_snap_context(new_snapc);
dout(" new snapc is %p\n", new_snapc);
}
@@ -986,3 +995,154 @@ out:
up_write(&mdsc->snap_rwsem);
return;
}
+
+struct ceph_snapid_map* ceph_get_snapid_map(struct ceph_mds_client *mdsc,
+ u64 snap)
+{
+ struct ceph_snapid_map *sm, *exist;
+ struct rb_node **p, *parent;
+ int ret;
+
+ exist = NULL;
+ spin_lock(&mdsc->snapid_map_lock);
+ p = &mdsc->snapid_map_tree.rb_node;
+ while (*p) {
+ exist = rb_entry(*p, struct ceph_snapid_map, node);
+ if (snap > exist->snap) {
+ p = &(*p)->rb_left;
+ } else if (snap < exist->snap) {
+ p = &(*p)->rb_right;
+ } else {
+ if (atomic_inc_return(&exist->ref) == 1)
+ list_del_init(&exist->lru);
+ break;
+ }
+ exist = NULL;
+ }
+ spin_unlock(&mdsc->snapid_map_lock);
+ if (exist) {
+ dout("found snapid map %llx -> %x\n", exist->snap, exist->dev);
+ return exist;
+ }
+
+ sm = kmalloc(sizeof(*sm), GFP_NOFS);
+ if (!sm)
+ return NULL;
+
+ ret = get_anon_bdev(&sm->dev);
+ if (ret < 0) {
+ kfree(sm);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&sm->lru);
+ atomic_set(&sm->ref, 1);
+ sm->snap = snap;
+
+ exist = NULL;
+ parent = NULL;
+ p = &mdsc->snapid_map_tree.rb_node;
+ spin_lock(&mdsc->snapid_map_lock);
+ while (*p) {
+ parent = *p;
+ exist = rb_entry(*p, struct ceph_snapid_map, node);
+ if (snap > exist->snap)
+ p = &(*p)->rb_left;
+ else if (snap < exist->snap)
+ p = &(*p)->rb_right;
+ else
+ break;
+ exist = NULL;
+ }
+ if (exist) {
+ if (atomic_inc_return(&exist->ref) == 1)
+ list_del_init(&exist->lru);
+ } else {
+ rb_link_node(&sm->node, parent, p);
+ rb_insert_color(&sm->node, &mdsc->snapid_map_tree);
+ }
+ spin_unlock(&mdsc->snapid_map_lock);
+ if (exist) {
+ free_anon_bdev(sm->dev);
+ kfree(sm);
+ dout("found snapid map %llx -> %x\n", exist->snap, exist->dev);
+ return exist;
+ }
+
+ dout("create snapid map %llx -> %x\n", sm->snap, sm->dev);
+ return sm;
+}
+
+void ceph_put_snapid_map(struct ceph_mds_client* mdsc,
+ struct ceph_snapid_map *sm)
+{
+ if (!sm)
+ return;
+ if (atomic_dec_and_lock(&sm->ref, &mdsc->snapid_map_lock)) {
+ if (!RB_EMPTY_NODE(&sm->node)) {
+ sm->last_used = jiffies;
+ list_add_tail(&sm->lru, &mdsc->snapid_map_lru);
+ spin_unlock(&mdsc->snapid_map_lock);
+ } else {
+ /* already cleaned up by
+ * ceph_cleanup_snapid_map() */
+ spin_unlock(&mdsc->snapid_map_lock);
+ kfree(sm);
+ }
+ }
+}
+
+void ceph_trim_snapid_map(struct ceph_mds_client *mdsc)
+{
+ struct ceph_snapid_map *sm;
+ unsigned long now;
+ LIST_HEAD(to_free);
+
+ spin_lock(&mdsc->snapid_map_lock);
+ now = jiffies;
+
+ while (!list_empty(&mdsc->snapid_map_lru)) {
+ sm = list_first_entry(&mdsc->snapid_map_lru,
+ struct ceph_snapid_map, lru);
+ if (time_after(sm->last_used + CEPH_SNAPID_MAP_TIMEOUT, now))
+ break;
+
+ rb_erase(&sm->node, &mdsc->snapid_map_tree);
+ list_move(&sm->lru, &to_free);
+ }
+ spin_unlock(&mdsc->snapid_map_lock);
+
+ while (!list_empty(&to_free)) {
+ sm = list_first_entry(&to_free, struct ceph_snapid_map, lru);
+ list_del(&sm->lru);
+ dout("trim snapid map %llx -> %x\n", sm->snap, sm->dev);
+ free_anon_bdev(sm->dev);
+ kfree(sm);
+ }
+}
+
+void ceph_cleanup_snapid_map(struct ceph_mds_client *mdsc)
+{
+ struct ceph_snapid_map *sm;
+ struct rb_node *p;
+ LIST_HEAD(to_free);
+
+ spin_lock(&mdsc->snapid_map_lock);
+ while ((p = rb_first(&mdsc->snapid_map_tree))) {
+ sm = rb_entry(p, struct ceph_snapid_map, node);
+ rb_erase(p, &mdsc->snapid_map_tree);
+ RB_CLEAR_NODE(p);
+ list_move(&sm->lru, &to_free);
+ }
+ spin_unlock(&mdsc->snapid_map_lock);
+
+ while (!list_empty(&to_free)) {
+ sm = list_first_entry(&to_free, struct ceph_snapid_map, lru);
+ list_del(&sm->lru);
+ free_anon_bdev(sm->dev);
+ if (WARN_ON_ONCE(atomic_read(&sm->ref))) {
+ pr_err("snapid map %llx -> %x still in use\n",
+ sm->snap, sm->dev);
+ }
+ }
+}
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index da2cd8e89062..6d5bb2f74612 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -133,6 +133,7 @@ enum {
Opt_rasize,
Opt_caps_wanted_delay_min,
Opt_caps_wanted_delay_max,
+ Opt_caps_max,
Opt_readdir_max_entries,
Opt_readdir_max_bytes,
Opt_congestion_kb,
@@ -175,6 +176,7 @@ static match_table_t fsopt_tokens = {
{Opt_rasize, "rasize=%d"},
{Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"},
{Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"},
+ {Opt_caps_max, "caps_max=%d"},
{Opt_readdir_max_entries, "readdir_max_entries=%d"},
{Opt_readdir_max_bytes, "readdir_max_bytes=%d"},
{Opt_congestion_kb, "write_congestion_kb=%d"},
@@ -286,6 +288,11 @@ static int parse_fsopt_token(char *c, void *private)
return -EINVAL;
fsopt->caps_wanted_delay_max = intval;
break;
+ case Opt_caps_max:
+ if (intval < 0)
+ return -EINVAL;
+ fsopt->caps_max = intval;
+ break;
case Opt_readdir_max_entries:
if (intval < 1)
return -EINVAL;
@@ -576,6 +583,8 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
seq_printf(m, ",rasize=%d", fsopt->rasize);
if (fsopt->congestion_kb != default_congestion_kb())
seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
+ if (fsopt->caps_max)
+ seq_printf(m, ",caps_max=%d", fsopt->caps_max);
if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
seq_printf(m, ",caps_wanted_delay_min=%d",
fsopt->caps_wanted_delay_min);
@@ -671,6 +680,9 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
fsc->trunc_wq = alloc_workqueue("ceph-trunc", 0, 1);
if (!fsc->trunc_wq)
goto fail_pg_inv_wq;
+ fsc->cap_wq = alloc_workqueue("ceph-cap", 0, 1);
+ if (!fsc->cap_wq)
+ goto fail_trunc_wq;
/* set up mempools */
err = -ENOMEM;
@@ -678,13 +690,12 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
size = sizeof (struct page *) * (page_count ? page_count : 1);
fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size);
if (!fsc->wb_pagevec_pool)
- goto fail_trunc_wq;
-
- /* caps */
- fsc->min_caps = fsopt->max_readdir;
+ goto fail_cap_wq;
return fsc;
+fail_cap_wq:
+ destroy_workqueue(fsc->cap_wq);
fail_trunc_wq:
destroy_workqueue(fsc->trunc_wq);
fail_pg_inv_wq:
@@ -706,6 +717,7 @@ static void flush_fs_workqueues(struct ceph_fs_client *fsc)
flush_workqueue(fsc->wb_wq);
flush_workqueue(fsc->pg_inv_wq);
flush_workqueue(fsc->trunc_wq);
+ flush_workqueue(fsc->cap_wq);
}
static void destroy_fs_client(struct ceph_fs_client *fsc)
@@ -715,6 +727,7 @@ static void destroy_fs_client(struct ceph_fs_client *fsc)
destroy_workqueue(fsc->wb_wq);
destroy_workqueue(fsc->pg_inv_wq);
destroy_workqueue(fsc->trunc_wq);
+ destroy_workqueue(fsc->cap_wq);
mempool_destroy(fsc->wb_pagevec_pool);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index dfb64a5211b6..16c03188578e 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -79,6 +79,7 @@ struct ceph_mount_options {
int rasize; /* max readahead */
int congestion_kb; /* max writeback in flight */
int caps_wanted_delay_min, caps_wanted_delay_max;
+ int caps_max;
int max_readdir; /* max readdir result (entires) */
int max_readdir_bytes; /* max readdir result (bytes) */
@@ -100,17 +101,18 @@ struct ceph_fs_client {
struct ceph_client *client;
unsigned long mount_state;
- int min_caps; /* min caps i added */
loff_t max_file_size;
struct ceph_mds_client *mdsc;
/* writeback */
mempool_t *wb_pagevec_pool;
+ atomic_long_t writeback_count;
+
struct workqueue_struct *wb_wq;
struct workqueue_struct *pg_inv_wq;
struct workqueue_struct *trunc_wq;
- atomic_long_t writeback_count;
+ struct workqueue_struct *cap_wq;
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_dentry_lru, *debugfs_caps;
@@ -260,17 +262,22 @@ struct ceph_inode_xattr {
* Ceph dentry state
*/
struct ceph_dentry_info {
+ struct dentry *dentry;
struct ceph_mds_session *lease_session;
+ struct list_head lease_list;
+ unsigned flags;
int lease_shared_gen;
u32 lease_gen;
u32 lease_seq;
unsigned long lease_renew_after, lease_renew_from;
- struct list_head lru;
- struct dentry *dentry;
unsigned long time;
u64 offset;
};
+#define CEPH_DENTRY_REFERENCED 1
+#define CEPH_DENTRY_LEASE_LIST 2
+#define CEPH_DENTRY_SHRINK_LIST 4
+
struct ceph_inode_xattrs_info {
/*
* (still encoded) xattr blob. we avoid the overhead of parsing
@@ -318,6 +325,8 @@ struct ceph_inode_info {
/* quotas */
u64 i_max_bytes, i_max_files;
+ s32 i_dir_pin;
+
struct rb_root i_fragtree;
int i_fragtree_nsplits;
struct mutex i_fragtree_mutex;
@@ -370,7 +379,10 @@ struct ceph_inode_info {
struct list_head i_unsafe_iops; /* uncommitted mds inode ops */
spinlock_t i_unsafe_lock;
- struct ceph_snap_realm *i_snap_realm; /* snap realm (if caps) */
+ union {
+ struct ceph_snap_realm *i_snap_realm; /* snap realm (if caps) */
+ struct ceph_snapid_map *i_snapid_map; /* snapid -> dev_t */
+ };
int i_snap_realm_counter; /* snap realm (if caps) */
struct list_head i_snap_realm_item;
struct list_head i_snap_flush_item;
@@ -587,7 +599,7 @@ extern u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
struct ceph_inode_frag *pfrag,
int *found);
-static inline struct ceph_dentry_info *ceph_dentry(struct dentry *dentry)
+static inline struct ceph_dentry_info *ceph_dentry(const struct dentry *dentry)
{
return (struct ceph_dentry_info *)dentry->d_fsdata;
}
@@ -656,7 +668,8 @@ extern int __ceph_caps_mds_wanted(struct ceph_inode_info *ci, bool check);
extern void ceph_caps_init(struct ceph_mds_client *mdsc);
extern void ceph_caps_finalize(struct ceph_mds_client *mdsc);
-extern void ceph_adjust_min_caps(struct ceph_mds_client *mdsc, int delta);
+extern void ceph_adjust_caps_max_min(struct ceph_mds_client *mdsc,
+ struct ceph_mount_options *fsopt);
extern int ceph_reserve_caps(struct ceph_mds_client *mdsc,
struct ceph_cap_reservation *ctx, int need);
extern void ceph_unreserve_caps(struct ceph_mds_client *mdsc,
@@ -837,6 +850,14 @@ extern int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
struct ceph_cap_snap *capsnap);
extern void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc);
+extern struct ceph_snapid_map *ceph_get_snapid_map(struct ceph_mds_client *mdsc,
+ u64 snap);
+extern void ceph_put_snapid_map(struct ceph_mds_client* mdsc,
+ struct ceph_snapid_map *sm);
+extern void ceph_trim_snapid_map(struct ceph_mds_client *mdsc);
+extern void ceph_cleanup_snapid_map(struct ceph_mds_client *mdsc);
+
+
/*
* a cap_snap is "pending" if it is still awaiting an in-progress
* sync write (that may/may not still update size, mtime, etc.).
@@ -975,11 +996,11 @@ extern void ceph_add_cap(struct inode *inode,
unsigned cap, unsigned seq, u64 realmino, int flags,
struct ceph_cap **new_cap);
extern void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release);
+extern void __ceph_remove_caps(struct inode* inode);
extern void ceph_put_cap(struct ceph_mds_client *mdsc,
struct ceph_cap *cap);
extern int ceph_is_any_caps(struct inode *inode);
-extern void ceph_queue_caps_release(struct inode *inode);
extern int ceph_write_inode(struct inode *inode, struct writeback_control *wbc);
extern int ceph_fsync(struct file *file, loff_t start, loff_t end,
int datasync);
@@ -1049,10 +1070,10 @@ extern int ceph_handle_snapdir(struct ceph_mds_request *req,
extern struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
struct dentry *dentry, int err);
-extern void ceph_dentry_lru_add(struct dentry *dn);
-extern void ceph_dentry_lru_touch(struct dentry *dn);
-extern void ceph_dentry_lru_del(struct dentry *dn);
+extern void __ceph_dentry_lease_touch(struct ceph_dentry_info *di);
+extern void __ceph_dentry_dir_lease_touch(struct ceph_dentry_info *di);
extern void ceph_invalidate_dentry_lease(struct dentry *dentry);
+extern int ceph_trim_dentries(struct ceph_mds_client *mdsc);
extern unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn);
extern void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl);
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 316f6ad10644..0cc42c8879e9 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -228,8 +228,19 @@ static size_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val,
ci->i_rctime.tv_nsec);
}
-/* quotas */
+/* dir pin */
+static bool ceph_vxattrcb_dir_pin_exists(struct ceph_inode_info *ci)
+{
+ return ci->i_dir_pin != -ENODATA;
+}
+
+static size_t ceph_vxattrcb_dir_pin(struct ceph_inode_info *ci, char *val,
+ size_t size)
+{
+ return snprintf(val, size, "%d", (int)ci->i_dir_pin);
+}
+/* quotas */
static bool ceph_vxattrcb_quota_exists(struct ceph_inode_info *ci)
{
bool ret = false;
@@ -315,6 +326,13 @@ static struct ceph_vxattr ceph_dir_vxattrs[] = {
XATTR_RSTAT_FIELD(dir, rbytes),
XATTR_RSTAT_FIELD(dir, rctime),
{
+ .name = "ceph.dir.pin",
+ .name_size = sizeof("ceph.dir_pin"),
+ .getxattr_cb = ceph_vxattrcb_dir_pin,
+ .exists_cb = ceph_vxattrcb_dir_pin_exists,
+ .flags = VXATTR_FLAG_HIDDEN,
+ },
+ {
.name = "ceph.quota",
.name_size = sizeof("ceph.quota"),
.getxattr_cb = ceph_vxattrcb_quota,
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index f1ddc9d03c10..76724efc831c 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -117,25 +117,25 @@ config CIFS_UPCALL
secure Kerberos authentication is required). If unsure, say Y.
config CIFS_XATTR
- bool "CIFS extended attributes"
- depends on CIFS
- help
- Extended attributes are name:value pairs associated with inodes by
- the kernel or by users (see the attr(5) manual page for details).
- CIFS maps the name of extended attributes beginning with the user
- namespace prefix to SMB/CIFS EAs. EAs are stored on Windows
- servers without the user namespace prefix, but their names are
- seen by Linux cifs clients prefaced by the user namespace prefix.
- The system namespace (used by some filesystems to store ACLs) is
- not supported at this time.
-
- If unsure, say Y.
+ bool "CIFS extended attributes"
+ depends on CIFS
+ help
+ Extended attributes are name:value pairs associated with inodes by
+ the kernel or by users (see the attr(5) manual page for details).
+ CIFS maps the name of extended attributes beginning with the user
+ namespace prefix to SMB/CIFS EAs. EAs are stored on Windows
+ servers without the user namespace prefix, but their names are
+ seen by Linux cifs clients prefaced by the user namespace prefix.
+ The system namespace (used by some filesystems to store ACLs) is
+ not supported at this time.
+
+ If unsure, say Y.
config CIFS_POSIX
- bool "CIFS POSIX Extensions"
- depends on CIFS && CIFS_ALLOW_INSECURE_LEGACY && CIFS_XATTR
- help
- Enabling this option will cause the cifs client to attempt to
+ bool "CIFS POSIX Extensions"
+ depends on CIFS && CIFS_ALLOW_INSECURE_LEGACY && CIFS_XATTR
+ help
+ Enabling this option will cause the cifs client to attempt to
negotiate a newer dialect with servers, such as Samba 3.0.5
or later, that optionally can handle more POSIX like (rather
than Windows like) file behavior. It also enables
@@ -144,61 +144,62 @@ config CIFS_POSIX
CIFS POSIX ACL support. If unsure, say N.
config CIFS_ACL
- bool "Provide CIFS ACL support"
- depends on CIFS_XATTR && KEYS
- help
- Allows fetching CIFS/NTFS ACL from the server. The DACL blob
- is handed over to the application/caller. See the man
- page for getcifsacl for more information. If unsure, say Y.
+ bool "Provide CIFS ACL support"
+ depends on CIFS_XATTR && KEYS
+ help
+ Allows fetching CIFS/NTFS ACL from the server. The DACL blob
+ is handed over to the application/caller. See the man
+ page for getcifsacl for more information. If unsure, say Y.
config CIFS_DEBUG
bool "Enable CIFS debugging routines"
default y
depends on CIFS
help
- Enabling this option adds helpful debugging messages to
- the cifs code which increases the size of the cifs module.
- If unsure, say Y.
+ Enabling this option adds helpful debugging messages to
+ the cifs code which increases the size of the cifs module.
+ If unsure, say Y.
+
config CIFS_DEBUG2
bool "Enable additional CIFS debugging routines"
depends on CIFS_DEBUG
help
- Enabling this option adds a few more debugging routines
- to the cifs code which slightly increases the size of
- the cifs module and can cause additional logging of debug
- messages in some error paths, slowing performance. This
- option can be turned off unless you are debugging
- cifs problems. If unsure, say N.
+ Enabling this option adds a few more debugging routines
+ to the cifs code which slightly increases the size of
+ the cifs module and can cause additional logging of debug
+ messages in some error paths, slowing performance. This
+ option can be turned off unless you are debugging
+ cifs problems. If unsure, say N.
config CIFS_DEBUG_DUMP_KEYS
bool "Dump encryption keys for offline decryption (Unsafe)"
depends on CIFS_DEBUG
help
- Enabling this will dump the encryption and decryption keys
- used to communicate on an encrypted share connection on the
- console. This allows Wireshark to decrypt and dissect
- encrypted network captures. Enable this carefully.
- If unsure, say N.
+ Enabling this will dump the encryption and decryption keys
+ used to communicate on an encrypted share connection on the
+ console. This allows Wireshark to decrypt and dissect
+ encrypted network captures. Enable this carefully.
+ If unsure, say N.
config CIFS_DFS_UPCALL
- bool "DFS feature support"
- depends on CIFS && KEYS
- select DNS_RESOLVER
- help
- Distributed File System (DFS) support is used to access shares
- transparently in an enterprise name space, even if the share
- moves to a different server. This feature also enables
- an upcall mechanism for CIFS which contacts userspace helper
- utilities to provide server name resolution (host names to
- IP addresses) which is needed in order to reconnect to
- servers if their addresses change or for implicit mounts of
- DFS junction points. If unsure, say Y.
+ bool "DFS feature support"
+ depends on CIFS && KEYS
+ select DNS_RESOLVER
+ help
+ Distributed File System (DFS) support is used to access shares
+ transparently in an enterprise name space, even if the share
+ moves to a different server. This feature also enables
+ an upcall mechanism for CIFS which contacts userspace helper
+ utilities to provide server name resolution (host names to
+ IP addresses) which is needed in order to reconnect to
+ servers if their addresses change or for implicit mounts of
+ DFS junction points. If unsure, say Y.
config CIFS_NFSD_EXPORT
- bool "Allow nfsd to export CIFS file system"
- depends on CIFS && BROKEN
- help
- Allows NFS server to export a CIFS mounted share (nfsd over cifs)
+ bool "Allow nfsd to export CIFS file system"
+ depends on CIFS && BROKEN
+ help
+ Allows NFS server to export a CIFS mounted share (nfsd over cifs)
config CIFS_SMB_DIRECT
bool "SMB Direct support (Experimental)"
@@ -209,10 +210,9 @@ config CIFS_SMB_DIRECT
say N.
config CIFS_FSCACHE
- bool "Provide CIFS client caching support"
- depends on CIFS=m && FSCACHE || CIFS=y && FSCACHE=y
- help
- Makes CIFS FS-Cache capable. Say Y here if you want your CIFS data
- to be cached locally on disk through the general filesystem cache
- manager. If unsure, say N.
-
+ bool "Provide CIFS client caching support"
+ depends on CIFS=m && FSCACHE || CIFS=y && FSCACHE=y
+ help
+ Makes CIFS FS-Cache capable. Say Y here if you want your CIFS data
+ to be cached locally on disk through the general filesystem cache
+ manager. If unsure, say N.
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index e92a2fee3c57..13c1288b04a7 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -115,7 +115,12 @@ static void cifs_debug_tcon(struct seq_file *m, struct cifs_tcon *tcon)
seq_puts(m, " type: CDROM ");
else
seq_printf(m, " type: %d ", dev_type);
- if (tcon->seal)
+
+ seq_printf(m, "Serial Number: 0x%x", tcon->vol_serial_number);
+
+ if ((tcon->seal) ||
+ (tcon->ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) ||
+ (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA))
seq_printf(m, " Encrypted");
if (tcon->nocase)
seq_printf(m, " nocase");
@@ -371,6 +376,10 @@ skip_rdma:
atomic_read(&server->in_send),
atomic_read(&server->num_waiters));
#endif
+ if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
+ seq_puts(m, " encrypted");
+ if (ses->sign)
+ seq_puts(m, " signed");
seq_puts(m, "\n\tShares:");
j = 0;
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index d9b99abe1243..5d83c924cc47 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -285,9 +285,9 @@ static void dump_referral(const struct dfs_info3_param *ref)
{
cifs_dbg(FYI, "DFS: ref path: %s\n", ref->path_name);
cifs_dbg(FYI, "DFS: node path: %s\n", ref->node_name);
- cifs_dbg(FYI, "DFS: fl: %hd, srv_type: %hd\n",
+ cifs_dbg(FYI, "DFS: fl: %d, srv_type: %d\n",
ref->flags, ref->server_type);
- cifs_dbg(FYI, "DFS: ref_flags: %hd, path_consumed: %hd\n",
+ cifs_dbg(FYI, "DFS: ref_flags: %d, path_consumed: %d\n",
ref->ref_flag, ref->path_consumed);
}
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 42f0d67f1054..ed49222abecb 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -58,6 +58,7 @@ struct cifs_sb_info {
spinlock_t tlink_tree_lock;
struct tcon_link *master_tlink;
struct nls_table *local_nls;
+ unsigned int bsize;
unsigned int rsize;
unsigned int wsize;
unsigned long actimeo; /* attribute cache timeout (jiffies) */
diff --git a/fs/cifs/cifs_ioctl.h b/fs/cifs/cifs_ioctl.h
index d8bce2f862de..086ddc5108af 100644
--- a/fs/cifs/cifs_ioctl.h
+++ b/fs/cifs/cifs_ioctl.h
@@ -43,6 +43,9 @@ struct smb_snapshot_array {
/* snapshots[]; */
} __packed;
+/* query_info flags */
+#define PASSTHRU_QUERY_INFO 0x00000000
+#define PASSTHRU_FSCTL 0x00000001
struct smb_query_info {
__u32 info_type;
__u32 file_info_class;
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 62d48d486d8f..a05bf1d6e1d0 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -381,7 +381,7 @@ cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
seq_puts(s, "ntlm");
break;
case Kerberos:
- seq_puts(s, "krb5");
+ seq_printf(s, "krb5,cruid=%u", from_kuid_munged(&init_user_ns,ses->cred_uid));
break;
case RawNTLMSSP:
seq_puts(s, "ntlmssp");
@@ -554,10 +554,13 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
seq_printf(s, ",rsize=%u", cifs_sb->rsize);
seq_printf(s, ",wsize=%u", cifs_sb->wsize);
+ seq_printf(s, ",bsize=%u", cifs_sb->bsize);
seq_printf(s, ",echo_interval=%lu",
tcon->ses->server->echo_interval / HZ);
if (tcon->snapshot_time)
seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
+ if (tcon->handle_timeout)
+ seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
/* convert actimeo and display it in seconds */
seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
@@ -1007,7 +1010,7 @@ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
unsigned int xid;
int rc;
- if (remap_flags & ~REMAP_FILE_ADVISORY)
+ if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
return -EINVAL;
cifs_dbg(FYI, "clone range\n");
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 7652551a1fc4..5c0298b9998f 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -150,5 +150,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
-#define CIFS_VERSION "2.17"
+#define CIFS_VERSION "2.19"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 94dbdbe5be34..585ad3207cb1 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -60,6 +60,12 @@
#define CIFS_MAX_ACTIMEO (1 << 30)
/*
+ * Max persistent and resilient handle timeout (milliseconds).
+ * Windows durable max was 960000 (16 minutes)
+ */
+#define SMB3_MAX_HANDLE_TIMEOUT 960000
+
+/*
* MAX_REQ is the maximum number of requests that WE will send
* on one socket concurrently.
*/
@@ -216,6 +222,7 @@ struct cifs_io_parms;
struct cifs_search_info;
struct cifsInodeInfo;
struct cifs_open_parms;
+struct cifs_credits;
struct smb_version_operations {
int (*send_cancel)(struct TCP_Server_Info *, struct smb_rqst *,
@@ -230,12 +237,15 @@ struct smb_version_operations {
/* check response: verify signature, map error */
int (*check_receive)(struct mid_q_entry *, struct TCP_Server_Info *,
bool);
- void (*add_credits)(struct TCP_Server_Info *, const unsigned int,
- const int);
+ void (*add_credits)(struct TCP_Server_Info *server,
+ const struct cifs_credits *credits,
+ const int optype);
void (*set_credits)(struct TCP_Server_Info *, const int);
int * (*get_credits_field)(struct TCP_Server_Info *, const int);
unsigned int (*get_credits)(struct mid_q_entry *);
__u64 (*get_next_mid)(struct TCP_Server_Info *);
+ void (*revert_current_mid)(struct TCP_Server_Info *server,
+ const unsigned int val);
/* data offset from read response message */
unsigned int (*read_data_offset)(char *);
/*
@@ -383,8 +393,8 @@ struct smb_version_operations {
struct cifs_fid *);
/* calculate a size of SMB message */
unsigned int (*calc_smb_size)(void *buf, struct TCP_Server_Info *ptcpi);
- /* check for STATUS_PENDING and process it in a positive case */
- bool (*is_status_pending)(char *, struct TCP_Server_Info *, int);
+ /* check for STATUS_PENDING and process the response if yes */
+ bool (*is_status_pending)(char *buf, struct TCP_Server_Info *server);
/* check for STATUS_NETWORK_SESSION_EXPIRED */
bool (*is_session_expired)(char *);
/* send oplock break response */
@@ -452,7 +462,11 @@ struct smb_version_operations {
unsigned int (*wp_retry_size)(struct inode *);
/* get mtu credits */
int (*wait_mtu_credits)(struct TCP_Server_Info *, unsigned int,
- unsigned int *, unsigned int *);
+ unsigned int *, struct cifs_credits *);
+ /* adjust previously taken mtu credits to request size */
+ int (*adjust_credits)(struct TCP_Server_Info *server,
+ struct cifs_credits *credits,
+ const unsigned int payload_size);
/* check if we need to issue closedir */
bool (*dir_needs_close)(struct cifsFileInfo *);
long (*fallocate)(struct file *, struct cifs_tcon *, int, loff_t,
@@ -471,6 +485,14 @@ struct smb_version_operations {
struct cifs_tcon *tcon,
__le16 *path, int is_dir,
unsigned long p);
+ /* make unix special files (block, char, fifo, socket) */
+ int (*make_node)(unsigned int xid,
+ struct inode *inode,
+ struct dentry *dentry,
+ struct cifs_tcon *tcon,
+ char *full_path,
+ umode_t mode,
+ dev_t device_number);
};
struct smb_version_values {
@@ -557,6 +579,7 @@ struct smb_vol {
bool resilient:1; /* noresilient not required since not fored for CA */
bool domainauto:1;
bool rdma:1;
+ unsigned int bsize;
unsigned int rsize;
unsigned int wsize;
bool sockopt_tcp_nodelay:1;
@@ -569,6 +592,7 @@ struct smb_vol {
struct nls_table *local_nls;
unsigned int echo_interval; /* echo interval in secs */
__u64 snapshot_time; /* needed for timewarp tokens */
+ __u32 handle_timeout; /* persistent and durable handle timeout in ms */
unsigned int max_credits; /* smb3 max_credits 10 < credits < 60000 */
};
@@ -710,6 +734,11 @@ struct TCP_Server_Info {
int nr_targets;
};
+struct cifs_credits {
+ unsigned int value;
+ unsigned int instance;
+};
+
static inline unsigned int
in_flight(struct TCP_Server_Info *server)
{
@@ -721,28 +750,28 @@ in_flight(struct TCP_Server_Info *server)
}
static inline bool
-has_credits(struct TCP_Server_Info *server, int *credits)
+has_credits(struct TCP_Server_Info *server, int *credits, int num_credits)
{
int num;
spin_lock(&server->req_lock);
num = *credits;
spin_unlock(&server->req_lock);
- return num > 0;
+ return num >= num_credits;
}
static inline void
-add_credits(struct TCP_Server_Info *server, const unsigned int add,
+add_credits(struct TCP_Server_Info *server, const struct cifs_credits *credits,
const int optype)
{
- server->ops->add_credits(server, add, optype);
+ server->ops->add_credits(server, credits, optype);
}
static inline void
-add_credits_and_wake_if(struct TCP_Server_Info *server, const unsigned int add,
- const int optype)
+add_credits_and_wake_if(struct TCP_Server_Info *server,
+ const struct cifs_credits *credits, const int optype)
{
- if (add) {
- server->ops->add_credits(server, add, optype);
+ if (credits->value) {
+ server->ops->add_credits(server, credits, optype);
wake_up(&server->request_q);
}
}
@@ -753,6 +782,14 @@ set_credits(struct TCP_Server_Info *server, const int val)
server->ops->set_credits(server, val);
}
+static inline int
+adjust_credits(struct TCP_Server_Info *server, struct cifs_credits *credits,
+ const unsigned int payload_size)
+{
+ return server->ops->adjust_credits ?
+ server->ops->adjust_credits(server, credits, payload_size) : 0;
+}
+
static inline __le64
get_next_mid64(struct TCP_Server_Info *server)
{
@@ -770,6 +807,22 @@ get_next_mid(struct TCP_Server_Info *server)
return cpu_to_le16(mid);
}
+static inline void
+revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
+{
+ if (server->ops->revert_current_mid)
+ server->ops->revert_current_mid(server, val);
+}
+
+static inline void
+revert_current_mid_from_hdr(struct TCP_Server_Info *server,
+ const struct smb2_sync_hdr *shdr)
+{
+ unsigned int num = le16_to_cpu(shdr->CreditCharge);
+
+ return revert_current_mid(server, num > 0 ? num : 1);
+}
+
static inline __u16
get_mid(const struct smb_hdr *smb)
{
@@ -924,11 +977,14 @@ cap_unix(struct cifs_ses *ses)
struct cached_fid {
bool is_valid:1; /* Do we have a useable root fid */
+ bool file_all_info_is_valid:1;
+
struct kref refcount;
struct cifs_fid *fid;
struct mutex fid_mutex;
struct cifs_tcon *tcon;
struct work_struct lease_break;
+ struct smb2_file_all_info file_all_info;
};
/*
@@ -1009,6 +1065,7 @@ struct cifs_tcon {
__u32 vol_serial_number;
__le64 vol_create_time;
__u64 snapshot_time; /* for timewarp tokens - timestamp of snapshot */
+ __u32 handle_timeout; /* persistent and durable handle timeout in ms */
__u32 ss_flags; /* sector size flags */
__u32 perf_sector_size; /* best sector size for perf */
__u32 max_chunks;
@@ -1234,7 +1291,7 @@ struct cifs_readdata {
unsigned int pagesz;
unsigned int page_offset;
unsigned int tailsz;
- unsigned int credits;
+ struct cifs_credits credits;
unsigned int nr_pages;
struct page **pages;
};
@@ -1260,7 +1317,7 @@ struct cifs_writedata {
unsigned int pagesz;
unsigned int page_offset;
unsigned int tailsz;
- unsigned int credits;
+ struct cifs_credits credits;
unsigned int nr_pages;
struct page **pages;
};
@@ -1276,6 +1333,7 @@ cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file)
}
struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file);
+void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_hdlr);
void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
#define CIFS_CACHE_READ_FLG 1
@@ -1422,6 +1480,7 @@ struct mid_q_entry {
struct kref refcount;
struct TCP_Server_Info *server; /* server corresponding to this mid */
__u64 mid; /* multiplex id */
+ __u16 credits; /* number of credits consumed by this mid */
__u32 pid; /* process id */
__u32 sequence_number; /* for CIFS signing */
unsigned long when_alloc; /* when mid was created */
@@ -1696,6 +1755,7 @@ require use of the stronger protocol */
* GlobalMid_Lock protects:
* list operations on pending_mid_q and oplockQ
* updates to XID counters, multiplex id and SMB sequence numbers
+ * list operations on global DnotifyReqList
* tcp_ses_lock protects:
* list operations on tcp and SMB session lists
* tcon->open_file_lock protects the list of open files hanging off the tcon
@@ -1796,6 +1856,7 @@ GLOBAL_EXTERN spinlock_t gidsidlock;
#endif /* CONFIG_CIFS_ACL */
void cifs_oplock_break(struct work_struct *work);
+void cifs_queue_oplock_break(struct cifsFileInfo *cfile);
extern const struct slow_work_ops cifs_oplock_break_ops;
extern struct workqueue_struct *cifsiod_wq;
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 336c116995d7..4f96b3b00a7a 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -93,7 +93,8 @@ extern int cifs_discard_remaining_data(struct TCP_Server_Info *server);
extern int cifs_call_async(struct TCP_Server_Info *server,
struct smb_rqst *rqst,
mid_receive_t *receive, mid_callback_t *callback,
- mid_handle_t *handle, void *cbdata, const int flags);
+ mid_handle_t *handle, void *cbdata, const int flags,
+ const struct cifs_credits *exist_credits);
extern int cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
struct smb_rqst *rqst, int *resp_buf_type,
const int flags, struct kvec *resp_iov);
@@ -115,7 +116,7 @@ extern int cifs_check_receive(struct mid_q_entry *mid,
struct TCP_Server_Info *server, bool log_error);
extern int cifs_wait_mtu_credits(struct TCP_Server_Info *server,
unsigned int size, unsigned int *num,
- unsigned int *credits);
+ struct cifs_credits *credits);
extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
struct kvec *, int /* nvec to send */,
int * /* type of buf returned */, const int flags,
@@ -133,6 +134,9 @@ extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof);
extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
unsigned int bytes_written);
extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, bool);
+extern int cifs_get_writable_file(struct cifsInodeInfo *cifs_inode,
+ bool fsuid_only,
+ struct cifsFileInfo **ret_file);
extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *, bool);
extern unsigned int smbCalcSize(void *buf, struct TCP_Server_Info *server);
extern int decode_negTokenInit(unsigned char *security_blob, int length,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index bb54ccf8481c..f43747c062a7 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -139,8 +139,8 @@ static int __cifs_reconnect_tcon(const struct nls_table *nlsc,
return -ENOMEM;
if (tcon->ipc) {
- snprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$",
- tcon->ses->server->hostname);
+ scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$",
+ tcon->ses->server->hostname);
rc = CIFSTCon(0, tcon->ses, tree, tcon, nlsc);
goto out;
}
@@ -172,7 +172,7 @@ static int __cifs_reconnect_tcon(const struct nls_table *nlsc,
continue;
}
- snprintf(tree, MAX_TREE_SIZE, "\\%s", tgt);
+ scnprintf(tree, MAX_TREE_SIZE, "\\%s", tgt);
rc = CIFSTCon(0, tcon->ses, tree, tcon, nlsc);
if (!rc)
@@ -822,9 +822,10 @@ static void
cifs_echo_callback(struct mid_q_entry *mid)
{
struct TCP_Server_Info *server = mid->callback_data;
+ struct cifs_credits credits = { .value = 1, .instance = 0 };
DeleteMidQEntry(mid);
- add_credits(server, 1, CIFS_ECHO_OP);
+ add_credits(server, &credits, CIFS_ECHO_OP);
}
int
@@ -859,7 +860,7 @@ CIFSSMBEcho(struct TCP_Server_Info *server)
iov[1].iov_base = (char *)smb + 4;
rc = cifs_call_async(server, &rqst, NULL, cifs_echo_callback, NULL,
- server, CIFS_ASYNC_OP | CIFS_ECHO_OP);
+ server, CIFS_ASYNC_OP | CIFS_ECHO_OP, NULL);
if (rc)
cifs_dbg(FYI, "Echo request failed: %d\n", rc);
@@ -1605,16 +1606,17 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
}
if (server->ops->is_status_pending &&
- server->ops->is_status_pending(buf, server, 0)) {
+ server->ops->is_status_pending(buf, server)) {
cifs_discard_remaining_data(server);
return -1;
}
/* set up first two iov for signature check and to get credits */
rdata->iov[0].iov_base = buf;
- rdata->iov[0].iov_len = 4;
- rdata->iov[1].iov_base = buf + 4;
- rdata->iov[1].iov_len = server->total_read - 4;
+ rdata->iov[0].iov_len = server->vals->header_preamble_size;
+ rdata->iov[1].iov_base = buf + server->vals->header_preamble_size;
+ rdata->iov[1].iov_len =
+ server->total_read - server->vals->header_preamble_size;
cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
rdata->iov[0].iov_base, rdata->iov[0].iov_len);
cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
@@ -1713,6 +1715,7 @@ cifs_readv_callback(struct mid_q_entry *mid)
.rq_npages = rdata->nr_pages,
.rq_pagesz = rdata->pagesz,
.rq_tailsz = rdata->tailsz };
+ struct cifs_credits credits = { .value = 1, .instance = 0 };
cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%u\n",
__func__, mid->mid, mid->mid_state, rdata->result,
@@ -1750,7 +1753,7 @@ cifs_readv_callback(struct mid_q_entry *mid)
queue_work(cifsiod_wq, &rdata->work);
DeleteMidQEntry(mid);
- add_credits(server, 1, 0);
+ add_credits(server, &credits, 0);
}
/* cifs_async_readv - send an async write, and set up mid to handle result */
@@ -1809,7 +1812,7 @@ cifs_async_readv(struct cifs_readdata *rdata)
kref_get(&rdata->refcount);
rc = cifs_call_async(tcon->ses->server, &rqst, cifs_readv_receive,
- cifs_readv_callback, NULL, rdata, 0);
+ cifs_readv_callback, NULL, rdata, 0, NULL);
if (rc == 0)
cifs_stats_inc(&tcon->stats.cifs_stats.num_reads);
@@ -2123,14 +2126,18 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
wdata2->tailsz = tailsz;
wdata2->bytes = cur_len;
- wdata2->cfile = find_writable_file(CIFS_I(inode), false);
+ rc = cifs_get_writable_file(CIFS_I(inode), false,
+ &wdata2->cfile);
if (!wdata2->cfile) {
- cifs_dbg(VFS, "No writable handles for inode\n");
- rc = -EBADF;
- break;
+ cifs_dbg(VFS, "No writable handle to retry writepages rc=%d\n",
+ rc);
+ if (!is_retryable_error(rc))
+ rc = -EBADF;
+ } else {
+ wdata2->pid = wdata2->cfile->pid;
+ rc = server->ops->async_writev(wdata2,
+ cifs_writedata_release);
}
- wdata2->pid = wdata2->cfile->pid;
- rc = server->ops->async_writev(wdata2, cifs_writedata_release);
for (j = 0; j < nr_pages; j++) {
unlock_page(wdata2->pages[j]);
@@ -2145,6 +2152,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
kref_put(&wdata2->refcount, cifs_writedata_release);
if (is_retryable_error(rc))
continue;
+ i += nr_pages;
break;
}
@@ -2152,6 +2160,13 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
i += nr_pages;
} while (i < wdata->nr_pages);
+ /* cleanup remaining pages from the original wdata */
+ for (; i < wdata->nr_pages; i++) {
+ SetPageError(wdata->pages[i]);
+ end_page_writeback(wdata->pages[i]);
+ put_page(wdata->pages[i]);
+ }
+
if (rc != 0 && !is_retryable_error(rc))
mapping_set_error(inode->i_mapping, rc);
kref_put(&wdata->refcount, cifs_writedata_release);
@@ -2226,6 +2241,7 @@ cifs_writev_callback(struct mid_q_entry *mid)
struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
unsigned int written;
WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf;
+ struct cifs_credits credits = { .value = 1, .instance = 0 };
switch (mid->mid_state) {
case MID_RESPONSE_RECEIVED:
@@ -2261,7 +2277,7 @@ cifs_writev_callback(struct mid_q_entry *mid)
queue_work(cifsiod_wq, &wdata->work);
DeleteMidQEntry(mid);
- add_credits(tcon->ses->server, 1, 0);
+ add_credits(tcon->ses->server, &credits, 0);
}
/* cifs_async_writev - send an async write, and set up mid to handle result */
@@ -2339,7 +2355,7 @@ cifs_async_writev(struct cifs_writedata *wdata,
kref_get(&wdata->refcount);
rc = cifs_call_async(tcon->ses->server, &rqst, NULL,
- cifs_writev_callback, NULL, wdata, 0);
+ cifs_writev_callback, NULL, wdata, 0, NULL);
if (rc == 0)
cifs_stats_inc(&tcon->stats.cifs_stats.num_writes);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 8463c940e0e5..4c0e44489f21 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -102,8 +102,8 @@ enum {
Opt_backupuid, Opt_backupgid, Opt_uid,
Opt_cruid, Opt_gid, Opt_file_mode,
Opt_dirmode, Opt_port,
- Opt_rsize, Opt_wsize, Opt_actimeo,
- Opt_echo_interval, Opt_max_credits,
+ Opt_blocksize, Opt_rsize, Opt_wsize, Opt_actimeo,
+ Opt_echo_interval, Opt_max_credits, Opt_handletimeout,
Opt_snapshot,
/* Mount options which take string value */
@@ -204,9 +204,11 @@ static const match_table_t cifs_mount_option_tokens = {
{ Opt_dirmode, "dirmode=%s" },
{ Opt_dirmode, "dir_mode=%s" },
{ Opt_port, "port=%s" },
+ { Opt_blocksize, "bsize=%s" },
{ Opt_rsize, "rsize=%s" },
{ Opt_wsize, "wsize=%s" },
{ Opt_actimeo, "actimeo=%s" },
+ { Opt_handletimeout, "handletimeout=%s" },
{ Opt_echo_interval, "echo_interval=%s" },
{ Opt_max_credits, "max_credits=%s" },
{ Opt_snapshot, "snapshot=%s" },
@@ -348,7 +350,7 @@ static int reconn_set_ipaddr(struct TCP_Server_Info *server)
cifs_dbg(FYI, "%s: failed to create UNC path\n", __func__);
return -ENOMEM;
}
- snprintf(unc, len, "\\\\%s", server->hostname);
+ scnprintf(unc, len, "\\\\%s", server->hostname);
rc = dns_resolve_server_name_to_ip(unc, &ipaddr);
kfree(unc);
@@ -592,6 +594,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
msleep(3000);
} else {
atomic_inc(&tcpSesReconnectCount);
+ set_credits(server, 1);
spin_lock(&GlobalMid_Lock);
if (server->tcpStatus != CifsExiting)
server->tcpStatus = CifsNeedNegotiate;
@@ -1053,7 +1056,7 @@ cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
}
if (server->ops->is_status_pending &&
- server->ops->is_status_pending(buf, server, length))
+ server->ops->is_status_pending(buf, server))
return -1;
if (!mid)
@@ -1063,6 +1066,26 @@ cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
return 0;
}
+static void
+smb2_add_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
+{
+ struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buffer;
+
+ /*
+ * SMB1 does not use credits.
+ */
+ if (server->vals->header_preamble_size)
+ return;
+
+ if (shdr->CreditRequest) {
+ spin_lock(&server->req_lock);
+ server->credits += le16_to_cpu(shdr->CreditRequest);
+ spin_unlock(&server->req_lock);
+ wake_up(&server->request_q);
+ }
+}
+
+
static int
cifs_demultiplex_thread(void *p)
{
@@ -1169,10 +1192,6 @@ next_pdu:
continue;
}
- if (server->large_buf)
- buf = server->bigbuf;
-
-
server->lstrp = jiffies;
for (i = 0; i < num_mids; i++) {
@@ -1192,6 +1211,7 @@ next_pdu:
} else if (server->ops->is_oplock_break &&
server->ops->is_oplock_break(bufs[i],
server)) {
+ smb2_add_credits_from_hdr(bufs[i], server);
cifs_dbg(FYI, "Received oplock break\n");
} else {
cifs_dbg(VFS, "No task to wake, unknown frame "
@@ -1203,6 +1223,7 @@ next_pdu:
if (server->ops->dump_detail)
server->ops->dump_detail(bufs[i],
server);
+ smb2_add_credits_from_hdr(bufs[i], server);
cifs_dump_mids(server);
#endif /* CIFS_DEBUG2 */
}
@@ -1486,6 +1507,11 @@ cifs_parse_devname(const char *devname, struct smb_vol *vol)
const char *delims = "/\\";
size_t len;
+ if (unlikely(!devname || !*devname)) {
+ cifs_dbg(VFS, "Device name not specified.\n");
+ return -EINVAL;
+ }
+
/* make sure we have a valid UNC double delimiter prefix */
len = strspn(devname, delims);
if (len != 2)
@@ -1571,7 +1597,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
vol->cred_uid = current_uid();
vol->linux_uid = current_uid();
vol->linux_gid = current_gid();
-
+ vol->bsize = 1024 * 1024; /* can improve cp performance significantly */
/*
* default to SFM style remapping of seven reserved characters
* unless user overrides it or we negotiate CIFS POSIX where
@@ -1594,6 +1620,9 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
vol->actimeo = CIFS_DEF_ACTIMEO;
+ /* Most clients set timeout to 0, allows server to use its default */
+ vol->handle_timeout = 0; /* See MS-SMB2 spec section 2.2.14.2.12 */
+
/* offer SMB2.1 and later (SMB3 etc). Secure and widely accepted */
vol->ops = &smb30_operations;
vol->vals = &smbdefault_values;
@@ -1944,6 +1973,26 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
}
port = (unsigned short)option;
break;
+ case Opt_blocksize:
+ if (get_option_ul(args, &option)) {
+ cifs_dbg(VFS, "%s: Invalid blocksize value\n",
+ __func__);
+ goto cifs_parse_mount_err;
+ }
+ /*
+ * inode blocksize realistically should never need to be
+ * less than 16K or greater than 16M and default is 1MB.
+ * Note that small inode block sizes (e.g. 64K) can lead
+ * to very poor performance of common tools like cp and scp
+ */
+ if ((option < CIFS_MAX_MSGSIZE) ||
+ (option > (4 * SMB3_DEFAULT_IOSIZE))) {
+ cifs_dbg(VFS, "%s: Invalid blocksize\n",
+ __func__);
+ goto cifs_parse_mount_err;
+ }
+ vol->bsize = option;
+ break;
case Opt_rsize:
if (get_option_ul(args, &option)) {
cifs_dbg(VFS, "%s: Invalid rsize value\n",
@@ -1972,6 +2021,18 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
goto cifs_parse_mount_err;
}
break;
+ case Opt_handletimeout:
+ if (get_option_ul(args, &option)) {
+ cifs_dbg(VFS, "%s: Invalid handletimeout value\n",
+ __func__);
+ goto cifs_parse_mount_err;
+ }
+ vol->handle_timeout = option;
+ if (vol->handle_timeout > SMB3_MAX_HANDLE_TIMEOUT) {
+ cifs_dbg(VFS, "Invalid handle cache timeout, longer than 16 minutes\n");
+ goto cifs_parse_mount_err;
+ }
+ break;
case Opt_echo_interval:
if (get_option_ul(args, &option)) {
cifs_dbg(VFS, "%s: Invalid echo interval value\n",
@@ -2609,7 +2670,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
volume_info->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
tcp_ses->session_estab = false;
tcp_ses->sequence_number = 0;
- tcp_ses->reconnect_instance = 0;
+ tcp_ses->reconnect_instance = 1;
tcp_ses->lstrp = jiffies;
spin_lock_init(&tcp_ses->req_lock);
INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
@@ -2770,7 +2831,7 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb_vol *volume_info)
if (tcon == NULL)
return -ENOMEM;
- snprintf(unc, sizeof(unc), "\\\\%s\\IPC$", ses->server->hostname);
+ scnprintf(unc, sizeof(unc), "\\\\%s\\IPC$", ses->server->hostname);
/* cannot fail */
nls_codepage = load_nls_default();
@@ -3138,6 +3199,8 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb_vol *volume_info)
return 0;
if (tcon->snapshot_time != volume_info->snapshot_time)
return 0;
+ if (tcon->handle_timeout != volume_info->handle_timeout)
+ return 0;
return 1;
}
@@ -3252,6 +3315,16 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
tcon->snapshot_time = volume_info->snapshot_time;
}
+ if (volume_info->handle_timeout) {
+ if (ses->server->vals->protocol_id == 0) {
+ cifs_dbg(VFS,
+ "Use SMB2.1 or later for handle timeout option\n");
+ rc = -EOPNOTSUPP;
+ goto out_fail;
+ } else
+ tcon->handle_timeout = volume_info->handle_timeout;
+ }
+
tcon->ses = ses;
if (volume_info->password) {
tcon->password = kstrdup(volume_info->password, GFP_KERNEL);
@@ -3839,6 +3912,7 @@ int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
spin_lock_init(&cifs_sb->tlink_tree_lock);
cifs_sb->tlink_tree = RB_ROOT;
+ cifs_sb->bsize = pvolume_info->bsize;
/*
* Temporarily set r/wsize for matching superblock. If we end up using
* new sb then client will later negotiate it downward if needed.
@@ -4198,7 +4272,7 @@ static int update_vol_info(const struct dfs_cache_tgt_iterator *tgt_it,
new_unc = kmalloc(len, GFP_KERNEL);
if (!new_unc)
return -ENOMEM;
- snprintf(new_unc, len, "\\%s", tgt);
+ scnprintf(new_unc, len, "\\%s", tgt);
kfree(vol->UNC);
vol->UNC = new_unc;
@@ -4902,8 +4976,6 @@ cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses)
if (!server->ops->need_neg(server))
return 0;
- set_credits(server, 1);
-
rc = server->ops->negotiate(xid, ses);
if (rc == 0) {
spin_lock(&GlobalMid_Lock);
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 907e85d65bb4..f26a48dd2e39 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -621,20 +621,10 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
{
int rc = -EPERM;
unsigned int xid;
- int create_options = CREATE_NOT_DIR | CREATE_OPTION_SPECIAL;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
struct cifs_tcon *tcon;
- struct cifs_io_parms io_parms;
char *full_path = NULL;
- struct inode *newinode = NULL;
- __u32 oplock = 0;
- struct cifs_fid fid;
- struct cifs_open_parms oparms;
- FILE_ALL_INFO *buf = NULL;
- unsigned int bytes_written;
- struct win_dev *pdev;
- struct kvec iov[2];
if (!old_valid_dev(device_number))
return -EINVAL;
@@ -654,103 +644,12 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
goto mknod_out;
}
- if (tcon->unix_ext) {
- struct cifs_unix_set_info_args args = {
- .mode = mode & ~current_umask(),
- .ctime = NO_CHANGE_64,
- .atime = NO_CHANGE_64,
- .mtime = NO_CHANGE_64,
- .device = device_number,
- };
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
- args.uid = current_fsuid();
- args.gid = current_fsgid();
- } else {
- args.uid = INVALID_UID; /* no change */
- args.gid = INVALID_GID; /* no change */
- }
- rc = CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args,
- cifs_sb->local_nls,
- cifs_remap(cifs_sb));
- if (rc)
- goto mknod_out;
-
- rc = cifs_get_inode_info_unix(&newinode, full_path,
- inode->i_sb, xid);
-
- if (rc == 0)
- d_instantiate(direntry, newinode);
- goto mknod_out;
- }
-
- if (!S_ISCHR(mode) && !S_ISBLK(mode))
- goto mknod_out;
-
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
- goto mknod_out;
-
-
- cifs_dbg(FYI, "sfu compat create special file\n");
-
- buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
- if (buf == NULL) {
- rc = -ENOMEM;
- goto mknod_out;
- }
-
- if (backup_cred(cifs_sb))
- create_options |= CREATE_OPEN_BACKUP_INTENT;
-
- oparms.tcon = tcon;
- oparms.cifs_sb = cifs_sb;
- oparms.desired_access = GENERIC_WRITE;
- oparms.create_options = create_options;
- oparms.disposition = FILE_CREATE;
- oparms.path = full_path;
- oparms.fid = &fid;
- oparms.reconnect = false;
-
- if (tcon->ses->server->oplocks)
- oplock = REQ_OPLOCK;
- else
- oplock = 0;
- rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, buf);
- if (rc)
- goto mknod_out;
-
- /*
- * BB Do not bother to decode buf since no local inode yet to put
- * timestamps in, but we can reuse it safely.
- */
-
- pdev = (struct win_dev *)buf;
- io_parms.pid = current->tgid;
- io_parms.tcon = tcon;
- io_parms.offset = 0;
- io_parms.length = sizeof(struct win_dev);
- iov[1].iov_base = buf;
- iov[1].iov_len = sizeof(struct win_dev);
- if (S_ISCHR(mode)) {
- memcpy(pdev->type, "IntxCHR", 8);
- pdev->major = cpu_to_le64(MAJOR(device_number));
- pdev->minor = cpu_to_le64(MINOR(device_number));
- rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
- &bytes_written, iov, 1);
- } else if (S_ISBLK(mode)) {
- memcpy(pdev->type, "IntxBLK", 8);
- pdev->major = cpu_to_le64(MAJOR(device_number));
- pdev->minor = cpu_to_le64(MINOR(device_number));
- rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
- &bytes_written, iov, 1);
- }
- tcon->ses->server->ops->close(xid, tcon, &fid);
- d_drop(direntry);
-
- /* FIXME: add code here to set EAs */
+ rc = tcon->ses->server->ops->make_node(xid, inode, direntry, tcon,
+ full_path, mode,
+ device_number);
mknod_out:
kfree(full_path);
- kfree(buf);
free_xid(xid);
cifs_put_tlink(tlink);
return rc;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 659ce1b92c44..7037a137fa53 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -360,13 +360,31 @@ cifsFileInfo_get(struct cifsFileInfo *cifs_file)
return cifs_file;
}
-/*
- * Release a reference on the file private data. This may involve closing
- * the filehandle out on the server. Must be called without holding
- * tcon->open_file_lock and cifs_file->file_info_lock.
+/**
+ * cifsFileInfo_put - release a reference of file priv data
+ *
+ * Always potentially wait for oplock handler. See _cifsFileInfo_put().
*/
void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
{
+ _cifsFileInfo_put(cifs_file, true);
+}
+
+/**
+ * _cifsFileInfo_put - release a reference of file priv data
+ *
+ * This may involve closing the filehandle @cifs_file out on the
+ * server. Must be called without holding tcon->open_file_lock and
+ * cifs_file->file_info_lock.
+ *
+ * If @wait_for_oplock_handler is true and we are releasing the last
+ * reference, wait for any running oplock break handler of the file
+ * and cancel any pending one. If calling this function from the
+ * oplock break handler, you need to pass false.
+ *
+ */
+void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
+{
struct inode *inode = d_inode(cifs_file->dentry);
struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
struct TCP_Server_Info *server = tcon->ses->server;
@@ -414,7 +432,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
spin_unlock(&tcon->open_file_lock);
- oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
+ oplock_break_cancelled = wait_oplock_handler ?
+ cancel_work_sync(&cifs_file->oplock_break) : false;
if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
struct TCP_Server_Info *server = tcon->ses->server;
@@ -1645,8 +1664,20 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
rc = server->ops->mand_unlock_range(cfile, flock, xid);
out:
- if (flock->fl_flags & FL_POSIX && !rc)
+ if (flock->fl_flags & FL_POSIX) {
+ /*
+ * If this is a request to remove all locks because we
+ * are closing the file, it doesn't matter if the
+ * unlocking failed as both cifs.ko and the SMB server
+ * remove the lock on file close
+ */
+ if (rc) {
+ cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
+ if (!(flock->fl_flags & FL_CLOSE))
+ return rc;
+ }
rc = locks_lock_file_wait(file, flock);
+ }
return rc;
}
@@ -1842,24 +1873,30 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
return NULL;
}
-struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
- bool fsuid_only)
+/* Return -EBADF if no handle is found and general rc otherwise */
+int
+cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only,
+ struct cifsFileInfo **ret_file)
{
struct cifsFileInfo *open_file, *inv_file = NULL;
struct cifs_sb_info *cifs_sb;
struct cifs_tcon *tcon;
bool any_available = false;
- int rc;
+ int rc = -EBADF;
unsigned int refind = 0;
- /* Having a null inode here (because mapping->host was set to zero by
- the VFS or MM) should not happen but we had reports of on oops (due to
- it being zero) during stress testcases so we need to check for it */
+ *ret_file = NULL;
+
+ /*
+ * Having a null inode here (because mapping->host was set to zero by
+ * the VFS or MM) should not happen but we had reports of on oops (due
+ * to it being zero) during stress testcases so we need to check for it
+ */
if (cifs_inode == NULL) {
cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
dump_stack();
- return NULL;
+ return rc;
}
cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
@@ -1873,7 +1910,7 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
refind_writable:
if (refind > MAX_REOPEN_ATT) {
spin_unlock(&tcon->open_file_lock);
- return NULL;
+ return rc;
}
list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
if (!any_available && open_file->pid != current->tgid)
@@ -1885,7 +1922,8 @@ refind_writable:
/* found a good writable file */
cifsFileInfo_get(open_file);
spin_unlock(&tcon->open_file_lock);
- return open_file;
+ *ret_file = open_file;
+ return 0;
} else {
if (!inv_file)
inv_file = open_file;
@@ -1907,22 +1945,35 @@ refind_writable:
if (inv_file) {
rc = cifs_reopen_file(inv_file, false);
- if (!rc)
- return inv_file;
- else {
- spin_lock(&tcon->open_file_lock);
- list_move_tail(&inv_file->flist,
- &cifs_inode->openFileList);
- spin_unlock(&tcon->open_file_lock);
- cifsFileInfo_put(inv_file);
- ++refind;
- inv_file = NULL;
- spin_lock(&tcon->open_file_lock);
- goto refind_writable;
+ if (!rc) {
+ *ret_file = inv_file;
+ return 0;
}
+
+ spin_lock(&tcon->open_file_lock);
+ list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
+ spin_unlock(&tcon->open_file_lock);
+ cifsFileInfo_put(inv_file);
+ ++refind;
+ inv_file = NULL;
+ spin_lock(&tcon->open_file_lock);
+ goto refind_writable;
}
- return NULL;
+ return rc;
+}
+
+struct cifsFileInfo *
+find_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only)
+{
+ struct cifsFileInfo *cfile;
+ int rc;
+
+ rc = cifs_get_writable_file(cifs_inode, fsuid_only, &cfile);
+ if (rc)
+ cifs_dbg(FYI, "couldn't find writable handle rc=%d", rc);
+
+ return cfile;
}
static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
@@ -1959,8 +2010,8 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
if (mapping->host->i_size - offset < (loff_t)to)
to = (unsigned)(mapping->host->i_size - offset);
- open_file = find_writable_file(CIFS_I(mapping->host), false);
- if (open_file) {
+ rc = cifs_get_writable_file(CIFS_I(mapping->host), false, &open_file);
+ if (!rc) {
bytes_written = cifs_write(open_file, open_file->pid,
write_data, to - from, &offset);
cifsFileInfo_put(open_file);
@@ -1970,9 +2021,12 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
rc = 0;
else if (bytes_written < 0)
rc = bytes_written;
+ else
+ rc = -EFAULT;
} else {
- cifs_dbg(FYI, "No writeable filehandles for inode\n");
- rc = -EIO;
+ cifs_dbg(FYI, "No writable handle for write page rc=%d\n", rc);
+ if (!is_retryable_error(rc))
+ rc = -EIO;
}
kunmap(page);
@@ -2079,9 +2133,9 @@ static int
wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
struct address_space *mapping, struct writeback_control *wbc)
{
- int rc = 0;
- struct TCP_Server_Info *server;
- unsigned int i;
+ int rc;
+ struct TCP_Server_Info *server =
+ tlink_tcon(wdata->cfile->tlink)->ses->server;
wdata->sync_mode = wbc->sync_mode;
wdata->nr_pages = nr_pages;
@@ -2091,21 +2145,16 @@ wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
page_offset(wdata->pages[nr_pages - 1]),
(loff_t)PAGE_SIZE);
wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
+ wdata->pid = wdata->cfile->pid;
- if (wdata->cfile != NULL)
- cifsFileInfo_put(wdata->cfile);
- wdata->cfile = find_writable_file(CIFS_I(mapping->host), false);
- if (!wdata->cfile) {
- cifs_dbg(VFS, "No writable handles for inode\n");
- rc = -EBADF;
- } else {
- wdata->pid = wdata->cfile->pid;
- server = tlink_tcon(wdata->cfile->tlink)->ses->server;
- rc = server->ops->async_writev(wdata, cifs_writedata_release);
- }
+ rc = adjust_credits(server, &wdata->credits, wdata->bytes);
+ if (rc)
+ return rc;
- for (i = 0; i < nr_pages; ++i)
- unlock_page(wdata->pages[i]);
+ if (wdata->cfile->invalidHandle)
+ rc = -EAGAIN;
+ else
+ rc = server->ops->async_writev(wdata, cifs_writedata_release);
return rc;
}
@@ -2113,11 +2162,13 @@ wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
static int cifs_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
- struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
+ struct inode *inode = mapping->host;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct TCP_Server_Info *server;
bool done = false, scanned = false, range_whole = false;
pgoff_t end, index;
struct cifs_writedata *wdata;
+ struct cifsFileInfo *cfile = NULL;
int rc = 0;
int saved_rc = 0;
unsigned int xid;
@@ -2143,11 +2194,23 @@ static int cifs_writepages(struct address_space *mapping,
server = cifs_sb_master_tcon(cifs_sb)->ses->server;
retry:
while (!done && index <= end) {
- unsigned int i, nr_pages, found_pages, wsize, credits;
+ unsigned int i, nr_pages, found_pages, wsize;
pgoff_t next = 0, tofind, saved_index = index;
+ struct cifs_credits credits_on_stack;
+ struct cifs_credits *credits = &credits_on_stack;
+ int get_file_rc = 0;
+
+ if (cfile)
+ cifsFileInfo_put(cfile);
+
+ rc = cifs_get_writable_file(CIFS_I(inode), false, &cfile);
+
+ /* in case of an error store it to return later */
+ if (rc)
+ get_file_rc = rc;
rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
- &wsize, &credits);
+ &wsize, credits);
if (rc != 0) {
done = true;
break;
@@ -2180,13 +2243,26 @@ retry:
continue;
}
- wdata->credits = credits;
+ wdata->credits = credits_on_stack;
+ wdata->cfile = cfile;
+ cfile = NULL;
+
+ if (!wdata->cfile) {
+ cifs_dbg(VFS, "No writable handle in writepages rc=%d\n",
+ get_file_rc);
+ if (is_retryable_error(get_file_rc))
+ rc = get_file_rc;
+ else
+ rc = -EBADF;
+ } else
+ rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
- rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
+ for (i = 0; i < nr_pages; ++i)
+ unlock_page(wdata->pages[i]);
/* send failure -- clean up the mess */
if (rc != 0) {
- add_credits_and_wake_if(server, wdata->credits, 0);
+ add_credits_and_wake_if(server, &wdata->credits, 0);
for (i = 0; i < nr_pages; ++i) {
if (is_retryable_error(rc))
redirty_page_for_writepage(wbc,
@@ -2238,6 +2314,8 @@ retry:
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
mapping->writeback_index = index;
+ if (cfile)
+ cifsFileInfo_put(cfile);
free_xid(xid);
return rc;
}
@@ -2567,47 +2645,62 @@ static int
cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
struct cifs_aio_ctx *ctx)
{
- unsigned int wsize, credits;
+ unsigned int wsize;
+ struct cifs_credits credits;
int rc;
struct TCP_Server_Info *server =
tlink_tcon(wdata->cfile->tlink)->ses->server;
- /*
- * Wait for credits to resend this wdata.
- * Note: we are attempting to resend the whole wdata not in segments
- */
do {
- rc = server->ops->wait_mtu_credits(
- server, wdata->bytes, &wsize, &credits);
+ if (wdata->cfile->invalidHandle) {
+ rc = cifs_reopen_file(wdata->cfile, false);
+ if (rc == -EAGAIN)
+ continue;
+ else if (rc)
+ break;
+ }
- if (rc)
- goto out;
- if (wsize < wdata->bytes) {
- add_credits_and_wake_if(server, credits, 0);
- msleep(1000);
- }
- } while (wsize < wdata->bytes);
+ /*
+ * Wait for credits to resend this wdata.
+ * Note: we are attempting to resend the whole wdata not in
+ * segments
+ */
+ do {
+ rc = server->ops->wait_mtu_credits(server, wdata->bytes,
+ &wsize, &credits);
+ if (rc)
+ goto fail;
+
+ if (wsize < wdata->bytes) {
+ add_credits_and_wake_if(server, &credits, 0);
+ msleep(1000);
+ }
+ } while (wsize < wdata->bytes);
+ wdata->credits = credits;
- rc = -EAGAIN;
- while (rc == -EAGAIN) {
- rc = 0;
- if (wdata->cfile->invalidHandle)
- rc = cifs_reopen_file(wdata->cfile, false);
- if (!rc)
- rc = server->ops->async_writev(wdata,
+ rc = adjust_credits(server, &wdata->credits, wdata->bytes);
+
+ if (!rc) {
+ if (wdata->cfile->invalidHandle)
+ rc = -EAGAIN;
+ else
+ rc = server->ops->async_writev(wdata,
cifs_uncached_writedata_release);
- }
+ }
- if (!rc) {
- list_add_tail(&wdata->list, wdata_list);
- return 0;
- }
+ /* If the write was successfully sent, we are done */
+ if (!rc) {
+ list_add_tail(&wdata->list, wdata_list);
+ return 0;
+ }
- add_credits_and_wake_if(server, wdata->credits, 0);
-out:
- kref_put(&wdata->refcount, cifs_uncached_writedata_release);
+ /* Roll back credits and retry if needed */
+ add_credits_and_wake_if(server, &wdata->credits, 0);
+ } while (rc == -EAGAIN);
+fail:
+ kref_put(&wdata->refcount, cifs_uncached_writedata_release);
return rc;
}
@@ -2627,6 +2720,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
struct TCP_Server_Info *server;
struct page **pagevec;
size_t start;
+ unsigned int xid;
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
pid = open_file->pid;
@@ -2634,12 +2728,23 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
pid = current->tgid;
server = tlink_tcon(open_file->tlink)->ses->server;
+ xid = get_xid();
do {
- unsigned int wsize, credits;
+ unsigned int wsize;
+ struct cifs_credits credits_on_stack;
+ struct cifs_credits *credits = &credits_on_stack;
+
+ if (open_file->invalidHandle) {
+ rc = cifs_reopen_file(open_file, false);
+ if (rc == -EAGAIN)
+ continue;
+ else if (rc)
+ break;
+ }
rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
- &wsize, &credits);
+ &wsize, credits);
if (rc)
break;
@@ -2731,16 +2836,22 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
wdata->pid = pid;
wdata->bytes = cur_len;
wdata->pagesz = PAGE_SIZE;
- wdata->credits = credits;
+ wdata->credits = credits_on_stack;
wdata->ctx = ctx;
kref_get(&ctx->refcount);
- if (!wdata->cfile->invalidHandle ||
- !(rc = cifs_reopen_file(wdata->cfile, false)))
- rc = server->ops->async_writev(wdata,
+ rc = adjust_credits(server, &wdata->credits, wdata->bytes);
+
+ if (!rc) {
+ if (wdata->cfile->invalidHandle)
+ rc = -EAGAIN;
+ else
+ rc = server->ops->async_writev(wdata,
cifs_uncached_writedata_release);
+ }
+
if (rc) {
- add_credits_and_wake_if(server, wdata->credits, 0);
+ add_credits_and_wake_if(server, &wdata->credits, 0);
kref_put(&wdata->refcount,
cifs_uncached_writedata_release);
if (rc == -EAGAIN) {
@@ -2756,6 +2867,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
len -= cur_len;
} while (len > 0);
+ free_xid(xid);
return rc;
}
@@ -2765,7 +2877,6 @@ static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
struct cifs_tcon *tcon;
struct cifs_sb_info *cifs_sb;
struct dentry *dentry = ctx->cfile->dentry;
- unsigned int i;
int rc;
tcon = tlink_tcon(ctx->cfile->tlink);
@@ -2816,12 +2927,12 @@ restart_loop:
wdata->bytes, &tmp_from,
ctx->cfile, cifs_sb, &tmp_list,
ctx);
+
+ kref_put(&wdata->refcount,
+ cifs_uncached_writedata_release);
}
list_splice(&tmp_list, &ctx->list);
-
- kref_put(&wdata->refcount,
- cifs_uncached_writedata_release);
goto restart_loop;
}
}
@@ -2829,10 +2940,6 @@ restart_loop:
kref_put(&wdata->refcount, cifs_uncached_writedata_release);
}
- if (!ctx->direct_io)
- for (i = 0; i < ctx->npages; i++)
- put_page(ctx->bv[i].bv_page);
-
cifs_stats_bytes_written(tcon, ctx->total_len);
set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
@@ -3028,14 +3135,16 @@ cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
* these pages but not on the region from pos to ppos+len-1.
*/
written = cifs_user_writev(iocb, from);
- if (written > 0 && CIFS_CACHE_READ(cinode)) {
+ if (CIFS_CACHE_READ(cinode)) {
/*
- * Windows 7 server can delay breaking level2 oplock if a write
- * request comes - break it on the client to prevent reading
- * an old data.
+ * We have read level caching and we have just sent a write
+ * request to the server thus making data in the cache stale.
+ * Zap the cache and set oplock/lease level to NONE to avoid
+ * reading stale data from the cache. All subsequent read
+ * operations will read new data from the server.
*/
cifs_zap_mapping(inode);
- cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n",
+ cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
inode);
cinode->oplock = 0;
}
@@ -3260,48 +3369,61 @@ static int cifs_resend_rdata(struct cifs_readdata *rdata,
struct list_head *rdata_list,
struct cifs_aio_ctx *ctx)
{
- unsigned int rsize, credits;
+ unsigned int rsize;
+ struct cifs_credits credits;
int rc;
struct TCP_Server_Info *server =
tlink_tcon(rdata->cfile->tlink)->ses->server;
- /*
- * Wait for credits to resend this rdata.
- * Note: we are attempting to resend the whole rdata not in segments
- */
do {
- rc = server->ops->wait_mtu_credits(server, rdata->bytes,
+ if (rdata->cfile->invalidHandle) {
+ rc = cifs_reopen_file(rdata->cfile, true);
+ if (rc == -EAGAIN)
+ continue;
+ else if (rc)
+ break;
+ }
+
+ /*
+ * Wait for credits to resend this rdata.
+ * Note: we are attempting to resend the whole rdata not in
+ * segments
+ */
+ do {
+ rc = server->ops->wait_mtu_credits(server, rdata->bytes,
&rsize, &credits);
- if (rc)
- goto out;
+ if (rc)
+ goto fail;
- if (rsize < rdata->bytes) {
- add_credits_and_wake_if(server, credits, 0);
- msleep(1000);
- }
- } while (rsize < rdata->bytes);
+ if (rsize < rdata->bytes) {
+ add_credits_and_wake_if(server, &credits, 0);
+ msleep(1000);
+ }
+ } while (rsize < rdata->bytes);
+ rdata->credits = credits;
- rc = -EAGAIN;
- while (rc == -EAGAIN) {
- rc = 0;
- if (rdata->cfile->invalidHandle)
- rc = cifs_reopen_file(rdata->cfile, true);
- if (!rc)
- rc = server->ops->async_readv(rdata);
- }
+ rc = adjust_credits(server, &rdata->credits, rdata->bytes);
+ if (!rc) {
+ if (rdata->cfile->invalidHandle)
+ rc = -EAGAIN;
+ else
+ rc = server->ops->async_readv(rdata);
+ }
- if (!rc) {
- /* Add to aio pending list */
- list_add_tail(&rdata->list, rdata_list);
- return 0;
- }
+ /* If the read was successfully sent, we are done */
+ if (!rc) {
+ /* Add to aio pending list */
+ list_add_tail(&rdata->list, rdata_list);
+ return 0;
+ }
- add_credits_and_wake_if(server, rdata->credits, 0);
-out:
- kref_put(&rdata->refcount,
- cifs_uncached_readdata_release);
+ /* Roll back credits and retry if needed */
+ add_credits_and_wake_if(server, &rdata->credits, 0);
+ } while (rc == -EAGAIN);
+fail:
+ kref_put(&rdata->refcount, cifs_uncached_readdata_release);
return rc;
}
@@ -3311,7 +3433,9 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
struct cifs_aio_ctx *ctx)
{
struct cifs_readdata *rdata;
- unsigned int npages, rsize, credits;
+ unsigned int npages, rsize;
+ struct cifs_credits credits_on_stack;
+ struct cifs_credits *credits = &credits_on_stack;
size_t cur_len;
int rc;
pid_t pid;
@@ -3331,8 +3455,16 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
iov_iter_advance(&direct_iov, offset - ctx->pos);
do {
+ if (open_file->invalidHandle) {
+ rc = cifs_reopen_file(open_file, true);
+ if (rc == -EAGAIN)
+ continue;
+ else if (rc)
+ break;
+ }
+
rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
- &rsize, &credits);
+ &rsize, credits);
if (rc)
break;
@@ -3406,15 +3538,21 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
rdata->pagesz = PAGE_SIZE;
rdata->read_into_pages = cifs_uncached_read_into_pages;
rdata->copy_into_pages = cifs_uncached_copy_into_pages;
- rdata->credits = credits;
+ rdata->credits = credits_on_stack;
rdata->ctx = ctx;
kref_get(&ctx->refcount);
- if (!rdata->cfile->invalidHandle ||
- !(rc = cifs_reopen_file(rdata->cfile, true)))
- rc = server->ops->async_readv(rdata);
+ rc = adjust_credits(server, &rdata->credits, rdata->bytes);
+
+ if (!rc) {
+ if (rdata->cfile->invalidHandle)
+ rc = -EAGAIN;
+ else
+ rc = server->ops->async_readv(rdata);
+ }
+
if (rc) {
- add_credits_and_wake_if(server, rdata->credits, 0);
+ add_credits_and_wake_if(server, &rdata->credits, 0);
kref_put(&rdata->refcount,
cifs_uncached_readdata_release);
if (rc == -EAGAIN) {
@@ -3439,7 +3577,6 @@ collect_uncached_read_data(struct cifs_aio_ctx *ctx)
struct iov_iter *to = &ctx->iter;
struct cifs_sb_info *cifs_sb;
struct cifs_tcon *tcon;
- unsigned int i;
int rc;
tcon = tlink_tcon(ctx->cfile->tlink);
@@ -3523,17 +3660,8 @@ again:
kref_put(&rdata->refcount, cifs_uncached_readdata_release);
}
- if (!ctx->direct_io) {
- for (i = 0; i < ctx->npages; i++) {
- if (ctx->should_dirty)
- set_page_dirty(ctx->bv[i].bv_page);
- put_page(ctx->bv[i].bv_page);
- }
-
+ if (!ctx->direct_io)
ctx->total_len = ctx->len - iov_iter_count(to);
- }
-
- cifs_stats_bytes_read(tcon, ctx->total_len);
/* mask nodata case */
if (rc == -ENODATA)
@@ -4095,10 +4223,19 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
loff_t offset;
struct page *page, *tpage;
struct cifs_readdata *rdata;
- unsigned credits;
+ struct cifs_credits credits_on_stack;
+ struct cifs_credits *credits = &credits_on_stack;
+
+ if (open_file->invalidHandle) {
+ rc = cifs_reopen_file(open_file, true);
+ if (rc == -EAGAIN)
+ continue;
+ else if (rc)
+ break;
+ }
rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
- &rsize, &credits);
+ &rsize, credits);
if (rc)
break;
@@ -4144,18 +4281,24 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
rdata->tailsz = PAGE_SIZE;
rdata->read_into_pages = cifs_readpages_read_into_pages;
rdata->copy_into_pages = cifs_readpages_copy_into_pages;
- rdata->credits = credits;
+ rdata->credits = credits_on_stack;
list_for_each_entry_safe(page, tpage, &tmplist, lru) {
list_del(&page->lru);
rdata->pages[rdata->nr_pages++] = page;
}
- if (!rdata->cfile->invalidHandle ||
- !(rc = cifs_reopen_file(rdata->cfile, true)))
- rc = server->ops->async_readv(rdata);
+ rc = adjust_credits(server, &rdata->credits, rdata->bytes);
+
+ if (!rc) {
+ if (rdata->cfile->invalidHandle)
+ rc = -EAGAIN;
+ else
+ rc = server->ops->async_readv(rdata);
+ }
+
if (rc) {
- add_credits_and_wake_if(server, rdata->credits, 0);
+ add_credits_and_wake_if(server, &rdata->credits, 0);
for (i = 0; i < rdata->nr_pages; i++) {
page = rdata->pages[i];
lru_cache_add_file(page);
@@ -4466,6 +4609,7 @@ void cifs_oplock_break(struct work_struct *work)
cinode);
cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
}
+ _cifsFileInfo_put(cfile, false /* do not wait for ourself */);
cifs_done_oplock_break(cinode);
}
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 478003644916..538fd7d807e4 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1735,6 +1735,10 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
if (rc == 0 || rc != -EBUSY)
goto do_rename_exit;
+ /* Don't fall back to using SMB on SMB 2+ mount */
+ if (server->vals->protocol_id != 0)
+ goto do_rename_exit;
+
/* open-file renames don't work across directories */
if (to_dentry->d_parent != from_dentry->d_parent)
goto do_rename_exit;
@@ -2080,7 +2084,7 @@ int cifs_getattr(const struct path *path, struct kstat *stat,
return rc;
generic_fillattr(inode, stat);
- stat->blksize = CIFS_MAX_MSGSIZE;
+ stat->blksize = cifs_sb->bsize;
stat->ino = CIFS_I(inode)->uniqueid;
/* old CIFS Unix Extensions doesn't return create time */
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 2148b0f60e5e..62216dc8f9f5 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -103,9 +103,9 @@ parse_mf_symlink(const u8 *buf, unsigned int buf_len, unsigned int *_link_len,
return rc;
}
- snprintf(md5_str2, sizeof(md5_str2),
- CIFS_MF_SYMLINK_MD5_FORMAT,
- CIFS_MF_SYMLINK_MD5_ARGS(md5_hash));
+ scnprintf(md5_str2, sizeof(md5_str2),
+ CIFS_MF_SYMLINK_MD5_FORMAT,
+ CIFS_MF_SYMLINK_MD5_ARGS(md5_hash));
if (strncmp(md5_str1, md5_str2, 17) != 0)
return -EINVAL;
@@ -142,10 +142,10 @@ format_mf_symlink(u8 *buf, unsigned int buf_len, const char *link_str)
return rc;
}
- snprintf(buf, buf_len,
- CIFS_MF_SYMLINK_LEN_FORMAT CIFS_MF_SYMLINK_MD5_FORMAT,
- link_len,
- CIFS_MF_SYMLINK_MD5_ARGS(md5_hash));
+ scnprintf(buf, buf_len,
+ CIFS_MF_SYMLINK_LEN_FORMAT CIFS_MF_SYMLINK_MD5_FORMAT,
+ link_len,
+ CIFS_MF_SYMLINK_MD5_ARGS(md5_hash));
ofs = CIFS_MF_SYMLINK_LINK_OFFSET;
memcpy(buf + ofs, link_str, link_len);
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index bee203055b30..b1a696a73f7c 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -501,8 +501,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
&pCifsInode->flags);
- queue_work(cifsoplockd_wq,
- &netfile->oplock_break);
+ cifs_queue_oplock_break(netfile);
netfile->oplock_break_cancelled = false;
spin_unlock(&tcon->open_file_lock);
@@ -607,6 +606,28 @@ void cifs_put_writer(struct cifsInodeInfo *cinode)
spin_unlock(&cinode->writers_lock);
}
+/**
+ * cifs_queue_oplock_break - queue the oplock break handler for cfile
+ *
+ * This function is called from the demultiplex thread when it
+ * receives an oplock break for @cfile.
+ *
+ * Assumes the tcon->open_file_lock is held.
+ * Assumes cfile->file_info_lock is NOT held.
+ */
+void cifs_queue_oplock_break(struct cifsFileInfo *cfile)
+{
+ /*
+ * Bump the handle refcount now while we hold the
+ * open_file_lock to enforce the validity of it for the oplock
+ * break handler. The matching put is done at the end of the
+ * handler.
+ */
+ cifsFileInfo_get(cfile);
+
+ queue_work(cifsoplockd_wq, &cfile->oplock_break);
+}
+
void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
{
clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
@@ -768,6 +789,11 @@ cifs_aio_ctx_alloc(void)
{
struct cifs_aio_ctx *ctx;
+ /*
+ * Must use kzalloc to initialize ctx->bv to NULL and ctx->direct_io
+ * to false so that we know when we have to unreference pages within
+ * cifs_aio_ctx_release()
+ */
ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL);
if (!ctx)
return NULL;
@@ -786,7 +812,23 @@ cifs_aio_ctx_release(struct kref *refcount)
struct cifs_aio_ctx, refcount);
cifsFileInfo_put(ctx->cfile);
- kvfree(ctx->bv);
+
+ /*
+ * ctx->bv is only set if setup_aio_ctx_iter() was call successfuly
+ * which means that iov_iter_get_pages() was a success and thus that
+ * we have taken reference on pages.
+ */
+ if (ctx->bv) {
+ unsigned i;
+
+ for (i = 0; i < ctx->npages; i++) {
+ if (ctx->should_dirty)
+ set_page_dirty(ctx->bv[i].bv_page);
+ put_page(ctx->bv[i].bv_page);
+ }
+ kvfree(ctx->bv);
+ }
+
kfree(ctx);
}
@@ -917,7 +959,6 @@ cifs_alloc_hash(const char *name,
}
(*sdesc)->shash.tfm = *shash;
- (*sdesc)->shash.flags = 0x0;
return 0;
}
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 32a6c020478f..c711f1f39bf2 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -117,11 +117,11 @@ cifs_find_mid(struct TCP_Server_Info *server, char *buffer)
}
static void
-cifs_add_credits(struct TCP_Server_Info *server, const unsigned int add,
- const int optype)
+cifs_add_credits(struct TCP_Server_Info *server,
+ const struct cifs_credits *credits, const int optype)
{
spin_lock(&server->req_lock);
- server->credits += add;
+ server->credits += credits->value;
server->in_flight--;
spin_unlock(&server->req_lock);
wake_up(&server->request_q);
@@ -308,7 +308,7 @@ coalesce_t2(char *second_buf, struct smb_hdr *target_hdr)
remaining = tgt_total_cnt - total_in_tgt;
if (remaining < 0) {
- cifs_dbg(FYI, "Server sent too much data. tgt_total_cnt=%hu total_in_tgt=%hu\n",
+ cifs_dbg(FYI, "Server sent too much data. tgt_total_cnt=%hu total_in_tgt=%u\n",
tgt_total_cnt, total_in_tgt);
return -EPROTO;
}
@@ -1027,6 +1027,131 @@ cifs_can_echo(struct TCP_Server_Info *server)
return false;
}
+static int
+cifs_make_node(unsigned int xid, struct inode *inode,
+ struct dentry *dentry, struct cifs_tcon *tcon,
+ char *full_path, umode_t mode, dev_t dev)
+{
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct inode *newinode = NULL;
+ int rc = -EPERM;
+ int create_options = CREATE_NOT_DIR | CREATE_OPTION_SPECIAL;
+ FILE_ALL_INFO *buf = NULL;
+ struct cifs_io_parms io_parms;
+ __u32 oplock = 0;
+ struct cifs_fid fid;
+ struct cifs_open_parms oparms;
+ unsigned int bytes_written;
+ struct win_dev *pdev;
+ struct kvec iov[2];
+
+ if (tcon->unix_ext) {
+ /*
+ * SMB1 Unix Extensions: requires server support but
+ * works with all special files
+ */
+ struct cifs_unix_set_info_args args = {
+ .mode = mode & ~current_umask(),
+ .ctime = NO_CHANGE_64,
+ .atime = NO_CHANGE_64,
+ .mtime = NO_CHANGE_64,
+ .device = dev,
+ };
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
+ args.uid = current_fsuid();
+ args.gid = current_fsgid();
+ } else {
+ args.uid = INVALID_UID; /* no change */
+ args.gid = INVALID_GID; /* no change */
+ }
+ rc = CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args,
+ cifs_sb->local_nls,
+ cifs_remap(cifs_sb));
+ if (rc)
+ goto out;
+
+ rc = cifs_get_inode_info_unix(&newinode, full_path,
+ inode->i_sb, xid);
+
+ if (rc == 0)
+ d_instantiate(dentry, newinode);
+ goto out;
+ }
+
+ /*
+ * SMB1 SFU emulation: should work with all servers, but only
+ * support block and char device (no socket & fifo)
+ */
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
+ goto out;
+
+ if (!S_ISCHR(mode) && !S_ISBLK(mode))
+ goto out;
+
+ cifs_dbg(FYI, "sfu compat create special file\n");
+
+ buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
+ if (buf == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (backup_cred(cifs_sb))
+ create_options |= CREATE_OPEN_BACKUP_INTENT;
+
+ oparms.tcon = tcon;
+ oparms.cifs_sb = cifs_sb;
+ oparms.desired_access = GENERIC_WRITE;
+ oparms.create_options = create_options;
+ oparms.disposition = FILE_CREATE;
+ oparms.path = full_path;
+ oparms.fid = &fid;
+ oparms.reconnect = false;
+
+ if (tcon->ses->server->oplocks)
+ oplock = REQ_OPLOCK;
+ else
+ oplock = 0;
+ rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, buf);
+ if (rc)
+ goto out;
+
+ /*
+ * BB Do not bother to decode buf since no local inode yet to put
+ * timestamps in, but we can reuse it safely.
+ */
+
+ pdev = (struct win_dev *)buf;
+ io_parms.pid = current->tgid;
+ io_parms.tcon = tcon;
+ io_parms.offset = 0;
+ io_parms.length = sizeof(struct win_dev);
+ iov[1].iov_base = buf;
+ iov[1].iov_len = sizeof(struct win_dev);
+ if (S_ISCHR(mode)) {
+ memcpy(pdev->type, "IntxCHR", 8);
+ pdev->major = cpu_to_le64(MAJOR(dev));
+ pdev->minor = cpu_to_le64(MINOR(dev));
+ rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
+ &bytes_written, iov, 1);
+ } else if (S_ISBLK(mode)) {
+ memcpy(pdev->type, "IntxBLK", 8);
+ pdev->major = cpu_to_le64(MAJOR(dev));
+ pdev->minor = cpu_to_le64(MINOR(dev));
+ rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
+ &bytes_written, iov, 1);
+ }
+ tcon->ses->server->ops->close(xid, tcon, &fid);
+ d_drop(dentry);
+
+ /* FIXME: add code here to set EAs */
+out:
+ kfree(buf);
+ return rc;
+}
+
+
+
struct smb_version_operations smb1_operations = {
.send_cancel = send_nt_cancel,
.compare_fids = cifs_compare_fids,
@@ -1110,6 +1235,7 @@ struct smb_version_operations smb1_operations = {
.get_acl_by_fid = get_cifs_acl_by_fid,
.set_acl = set_cifs_acl,
#endif /* CIFS_ACL */
+ .make_node = cifs_make_node,
};
struct smb_version_values smb1_values = {
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
index b204e84b87fb..54bffb2a1786 100644
--- a/fs/cifs/smb2file.c
+++ b/fs/cifs/smb2file.c
@@ -68,13 +68,15 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
if (oparms->tcon->use_resilient) {
- nr_ioctl_req.Timeout = 0; /* use server default (120 seconds) */
+ /* default timeout is 0, servers pick default (120 seconds) */
+ nr_ioctl_req.Timeout =
+ cpu_to_le32(oparms->tcon->handle_timeout);
nr_ioctl_req.Reserved = 0;
rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid,
fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY,
true /* is_fsctl */,
(char *)&nr_ioctl_req, sizeof(nr_ioctl_req),
- NULL, NULL /* no return info */);
+ CIFSMaxBufSize, NULL, NULL /* no return info */);
if (rc == -EOPNOTSUPP) {
cifs_dbg(VFS,
"resiliency not supported by server, disabling\n");
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index 01a76bccdb8d..278405d26c47 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -37,6 +37,16 @@
#include "smb2pdu.h"
#include "smb2proto.h"
+static void
+free_set_inf_compound(struct smb_rqst *rqst)
+{
+ if (rqst[1].rq_iov)
+ SMB2_set_info_free(&rqst[1]);
+ if (rqst[2].rq_iov)
+ SMB2_close_free(&rqst[2]);
+}
+
+
static int
smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, const char *full_path,
@@ -112,14 +122,18 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
PATH_MAX * 2, 0, NULL);
smb2_set_next_command(tcon, &rqst[num_rqst]);
smb2_set_related(&rqst[num_rqst++]);
+ trace_smb3_query_info_compound_enter(xid, ses->Suid, tcon->tid,
+ full_path);
break;
case SMB2_OP_DELETE:
+ trace_smb3_delete_enter(xid, ses->Suid, tcon->tid, full_path);
break;
case SMB2_OP_MKDIR:
/*
* Directories are created through parameters in the
* SMB2_open() call.
*/
+ trace_smb3_mkdir_enter(xid, ses->Suid, tcon->tid, full_path);
break;
case SMB2_OP_RMDIR:
memset(&si_iov, 0, sizeof(si_iov));
@@ -135,6 +149,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
SMB2_O_INFO_FILE, 0, data, size);
smb2_set_next_command(tcon, &rqst[num_rqst]);
smb2_set_related(&rqst[num_rqst++]);
+ trace_smb3_rmdir_enter(xid, ses->Suid, tcon->tid, full_path);
break;
case SMB2_OP_SET_EOF:
memset(&si_iov, 0, sizeof(si_iov));
@@ -150,6 +165,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
SMB2_O_INFO_FILE, 0, data, size);
smb2_set_next_command(tcon, &rqst[num_rqst]);
smb2_set_related(&rqst[num_rqst++]);
+ trace_smb3_set_eof_enter(xid, ses->Suid, tcon->tid, full_path);
break;
case SMB2_OP_SET_INFO:
memset(&si_iov, 0, sizeof(si_iov));
@@ -166,6 +182,8 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
SMB2_O_INFO_FILE, 0, data, size);
smb2_set_next_command(tcon, &rqst[num_rqst]);
smb2_set_related(&rqst[num_rqst++]);
+ trace_smb3_set_info_compound_enter(xid, ses->Suid, tcon->tid,
+ full_path);
break;
case SMB2_OP_RENAME:
memset(&si_iov, 0, sizeof(si_iov));
@@ -190,6 +208,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
SMB2_O_INFO_FILE, 0, data, size);
smb2_set_next_command(tcon, &rqst[num_rqst]);
smb2_set_related(&rqst[num_rqst++]);
+ trace_smb3_rename_enter(xid, ses->Suid, tcon->tid, full_path);
break;
case SMB2_OP_HARDLINK:
memset(&si_iov, 0, sizeof(si_iov));
@@ -214,6 +233,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
SMB2_O_INFO_FILE, 0, data, size);
smb2_set_next_command(tcon, &rqst[num_rqst]);
smb2_set_related(&rqst[num_rqst++]);
+ trace_smb3_hardlink_enter(xid, ses->Suid, tcon->tid, full_path);
break;
default:
cifs_dbg(VFS, "Invalid command\n");
@@ -252,21 +272,65 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
SMB2_query_info_free(&rqst[1]);
if (rqst[2].rq_iov)
SMB2_close_free(&rqst[2]);
+ if (rc)
+ trace_smb3_query_info_compound_err(xid, ses->Suid,
+ tcon->tid, rc);
+ else
+ trace_smb3_query_info_compound_done(xid, ses->Suid,
+ tcon->tid);
break;
case SMB2_OP_DELETE:
+ if (rc)
+ trace_smb3_delete_err(xid, ses->Suid, tcon->tid, rc);
+ else
+ trace_smb3_delete_done(xid, ses->Suid, tcon->tid);
+ if (rqst[1].rq_iov)
+ SMB2_close_free(&rqst[1]);
+ break;
case SMB2_OP_MKDIR:
+ if (rc)
+ trace_smb3_mkdir_err(xid, ses->Suid, tcon->tid, rc);
+ else
+ trace_smb3_mkdir_done(xid, ses->Suid, tcon->tid);
if (rqst[1].rq_iov)
SMB2_close_free(&rqst[1]);
break;
case SMB2_OP_HARDLINK:
+ if (rc)
+ trace_smb3_hardlink_err(xid, ses->Suid, tcon->tid, rc);
+ else
+ trace_smb3_hardlink_done(xid, ses->Suid, tcon->tid);
+ free_set_inf_compound(rqst);
+ break;
case SMB2_OP_RENAME:
+ if (rc)
+ trace_smb3_rename_err(xid, ses->Suid, tcon->tid, rc);
+ else
+ trace_smb3_rename_done(xid, ses->Suid, tcon->tid);
+ free_set_inf_compound(rqst);
+ break;
case SMB2_OP_RMDIR:
+ if (rc)
+ trace_smb3_rmdir_err(xid, ses->Suid, tcon->tid, rc);
+ else
+ trace_smb3_rmdir_done(xid, ses->Suid, tcon->tid);
+ free_set_inf_compound(rqst);
+ break;
case SMB2_OP_SET_EOF:
+ if (rc)
+ trace_smb3_set_eof_err(xid, ses->Suid, tcon->tid, rc);
+ else
+ trace_smb3_set_eof_done(xid, ses->Suid, tcon->tid);
+ free_set_inf_compound(rqst);
+ break;
case SMB2_OP_SET_INFO:
- if (rqst[1].rq_iov)
- SMB2_set_info_free(&rqst[1]);
- if (rqst[2].rq_iov)
- SMB2_close_free(&rqst[2]);
+ if (rc)
+ trace_smb3_set_info_compound_err(xid, ses->Suid,
+ tcon->tid, rc);
+ else
+ trace_smb3_set_info_compound_done(xid, ses->Suid,
+ tcon->tid);
+ free_set_inf_compound(rqst);
break;
}
free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
@@ -309,12 +373,17 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
rc = open_shroot(xid, tcon, &fid);
if (rc)
goto out;
- rc = SMB2_query_info(xid, tcon, fid.persistent_fid,
- fid.volatile_fid, smb2_data);
+
+ if (tcon->crfid.file_all_info_is_valid) {
+ move_smb2_info_to_cifs(data,
+ &tcon->crfid.file_all_info);
+ } else {
+ rc = SMB2_query_info(xid, tcon, fid.persistent_fid,
+ fid.volatile_fid, smb2_data);
+ if (!rc)
+ move_smb2_info_to_cifs(data, smb2_data);
+ }
close_shroot(&tcon->crfid);
- if (rc)
- goto out;
- move_smb2_info_to_cifs(data, smb2_data);
goto out;
}
diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
index 924269cec135..e32c264e3adb 100644
--- a/fs/cifs/smb2maperror.c
+++ b/fs/cifs/smb2maperror.c
@@ -1036,7 +1036,8 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
{STATUS_UNFINISHED_CONTEXT_DELETED, -EIO,
"STATUS_UNFINISHED_CONTEXT_DELETED"},
{STATUS_NO_TGT_REPLY, -EIO, "STATUS_NO_TGT_REPLY"},
- {STATUS_OBJECTID_NOT_FOUND, -EIO, "STATUS_OBJECTID_NOT_FOUND"},
+ /* Note that ENOATTTR and ENODATA are the same errno */
+ {STATUS_OBJECTID_NOT_FOUND, -ENODATA, "STATUS_OBJECTID_NOT_FOUND"},
{STATUS_NO_IP_ADDRESSES, -EIO, "STATUS_NO_IP_ADDRESSES"},
{STATUS_WRONG_CREDENTIAL_HANDLE, -EIO,
"STATUS_WRONG_CREDENTIAL_HANDLE"},
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 7b8b58fb4d3f..e311f58dc1c8 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -517,7 +517,6 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
__u8 lease_state;
struct list_head *tmp;
struct cifsFileInfo *cfile;
- struct TCP_Server_Info *server = tcon->ses->server;
struct cifs_pending_open *open;
struct cifsInodeInfo *cinode;
int ack_req = le32_to_cpu(rsp->Flags &
@@ -537,14 +536,26 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
cifs_dbg(FYI, "lease key match, lease break 0x%x\n",
le32_to_cpu(rsp->NewLeaseState));
- server->ops->set_oplock_level(cinode, lease_state, 0, NULL);
-
if (ack_req)
cfile->oplock_break_cancelled = false;
else
cfile->oplock_break_cancelled = true;
- queue_work(cifsoplockd_wq, &cfile->oplock_break);
+ set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
+
+ /*
+ * Set or clear flags depending on the lease state being READ.
+ * HANDLE caching flag should be added when the client starts
+ * to defer closing remote file handles with HANDLE leases.
+ */
+ if (lease_state & SMB2_LEASE_READ_CACHING_HE)
+ set_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
+ &cinode->flags);
+ else
+ clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
+ &cinode->flags);
+
+ cifs_queue_oplock_break(cfile);
kfree(lw);
return true;
}
@@ -648,13 +659,6 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
if (rsp->sync_hdr.Command != SMB2_OPLOCK_BREAK)
return false;
- if (rsp->sync_hdr.CreditRequest) {
- spin_lock(&server->req_lock);
- server->credits += le16_to_cpu(rsp->sync_hdr.CreditRequest);
- spin_unlock(&server->req_lock);
- wake_up(&server->request_q);
- }
-
if (rsp->StructureSize !=
smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) {
if (le16_to_cpu(rsp->StructureSize) == 44)
@@ -708,8 +712,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
&cinode->flags);
spin_unlock(&cfile->file_info_lock);
- queue_work(cifsoplockd_wq,
- &cfile->oplock_break);
+
+ cifs_queue_oplock_break(cfile);
spin_unlock(&tcon->open_file_lock);
spin_unlock(&cifs_tcp_ses_lock);
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 6f96e2292856..c36ff0d1fe2a 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -67,10 +67,13 @@ change_conf(struct TCP_Server_Info *server)
}
static void
-smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add,
- const int optype)
+smb2_add_credits(struct TCP_Server_Info *server,
+ const struct cifs_credits *credits, const int optype)
{
int *val, rc = -1;
+ unsigned int add = credits->value;
+ unsigned int instance = credits->instance;
+ bool reconnect_detected = false;
spin_lock(&server->req_lock);
val = server->ops->get_credits_field(server, optype);
@@ -79,8 +82,11 @@ smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add,
if (((optype & CIFS_OP_MASK) == CIFS_NEG_OP) && (*val != 0))
trace_smb3_reconnect_with_invalid_credits(server->CurrentMid,
server->hostname, *val);
+ if ((instance == 0) || (instance == server->reconnect_instance))
+ *val += add;
+ else
+ reconnect_detected = true;
- *val += add;
if (*val > 65000) {
*val = 65000; /* Don't get near 64K credits, avoid srv bugs */
printk_once(KERN_WARNING "server overflowed SMB3 credits\n");
@@ -102,7 +108,12 @@ smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add,
spin_unlock(&server->req_lock);
wake_up(&server->request_q);
- if (server->tcpStatus == CifsNeedReconnect)
+ if (reconnect_detected)
+ cifs_dbg(FYI, "trying to put %d credits from the old server instance %d\n",
+ add, instance);
+
+ if (server->tcpStatus == CifsNeedReconnect
+ || server->tcpStatus == CifsExiting)
return;
switch (rc) {
@@ -163,7 +174,7 @@ smb2_get_credits(struct mid_q_entry *mid)
static int
smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
- unsigned int *num, unsigned int *credits)
+ unsigned int *num, struct cifs_credits *credits)
{
int rc = 0;
unsigned int scredits;
@@ -174,7 +185,7 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
spin_unlock(&server->req_lock);
cifs_num_waiters_inc(server);
rc = wait_event_killable(server->request_q,
- has_credits(server, &server->credits));
+ has_credits(server, &server->credits, 1));
cifs_num_waiters_dec(server);
if (rc)
return rc;
@@ -189,7 +200,8 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
/* can deadlock with reopen */
if (scredits <= 8) {
*num = SMB2_MAX_BUFFER_SIZE;
- *credits = 0;
+ credits->value = 0;
+ credits->instance = 0;
break;
}
@@ -198,8 +210,10 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
*num = min_t(unsigned int, size,
scredits * SMB2_MAX_BUFFER_SIZE);
- *credits = DIV_ROUND_UP(*num, SMB2_MAX_BUFFER_SIZE);
- server->credits -= *credits;
+ credits->value =
+ DIV_ROUND_UP(*num, SMB2_MAX_BUFFER_SIZE);
+ credits->instance = server->reconnect_instance;
+ server->credits -= credits->value;
server->in_flight++;
break;
}
@@ -208,6 +222,38 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
return rc;
}
+static int
+smb2_adjust_credits(struct TCP_Server_Info *server,
+ struct cifs_credits *credits,
+ const unsigned int payload_size)
+{
+ int new_val = DIV_ROUND_UP(payload_size, SMB2_MAX_BUFFER_SIZE);
+
+ if (!credits->value || credits->value == new_val)
+ return 0;
+
+ if (credits->value < new_val) {
+ WARN_ONCE(1, "request has less credits (%d) than required (%d)",
+ credits->value, new_val);
+ return -ENOTSUPP;
+ }
+
+ spin_lock(&server->req_lock);
+
+ if (server->reconnect_instance != credits->instance) {
+ spin_unlock(&server->req_lock);
+ cifs_dbg(VFS, "trying to return %d credits to old session\n",
+ credits->value - new_val);
+ return -EAGAIN;
+ }
+
+ server->credits += credits->value - new_val;
+ spin_unlock(&server->req_lock);
+ wake_up(&server->request_q);
+ credits->value = new_val;
+ return 0;
+}
+
static __u64
smb2_get_next_mid(struct TCP_Server_Info *server)
{
@@ -219,6 +265,15 @@ smb2_get_next_mid(struct TCP_Server_Info *server)
return mid;
}
+static void
+smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
+{
+ spin_lock(&GlobalMid_Lock);
+ if (server->CurrentMid >= val)
+ server->CurrentMid -= val;
+ spin_unlock(&GlobalMid_Lock);
+}
+
static struct mid_q_entry *
smb2_find_mid(struct TCP_Server_Info *server, char *buf)
{
@@ -526,7 +581,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
NULL /* no data input */, 0 /* no data input */,
- (char **)&out_buf, &ret_data_len);
+ CIFSMaxBufSize, (char **)&out_buf, &ret_data_len);
if (rc == -EOPNOTSUPP) {
cifs_dbg(FYI,
"server does not support query network interfaces\n");
@@ -564,6 +619,7 @@ smb2_close_cached_fid(struct kref *ref)
SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid,
cfid->fid->volatile_fid);
cfid->is_valid = false;
+ cfid->file_all_info_is_valid = false;
}
}
@@ -588,9 +644,18 @@ smb2_cached_lease_break(struct work_struct *work)
*/
int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
{
- struct cifs_open_parms oparams;
- int rc;
- __le16 srch_path = 0; /* Null - since an open of top of share */
+ struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server = ses->server;
+ struct cifs_open_parms oparms;
+ struct smb2_create_rsp *o_rsp = NULL;
+ struct smb2_query_info_rsp *qi_rsp = NULL;
+ int resp_buftype[2];
+ struct smb_rqst rqst[2];
+ struct kvec rsp_iov[2];
+ struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
+ struct kvec qi_iov[1];
+ int rc, flags = 0;
+ __le16 utf16_path = 0; /* Null - since an open of top of share */
u8 oplock = SMB2_OPLOCK_LEVEL_II;
mutex_lock(&tcon->crfid.fid_mutex);
@@ -602,22 +667,85 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
return 0;
}
- oparams.tcon = tcon;
- oparams.create_options = 0;
- oparams.desired_access = FILE_READ_ATTRIBUTES;
- oparams.disposition = FILE_OPEN;
- oparams.fid = pfid;
- oparams.reconnect = false;
+ if (smb3_encryption_required(tcon))
+ flags |= CIFS_TRANSFORM_REQ;
+
+ memset(rqst, 0, sizeof(rqst));
+ resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
+ memset(rsp_iov, 0, sizeof(rsp_iov));
+
+ /* Open */
+ memset(&open_iov, 0, sizeof(open_iov));
+ rqst[0].rq_iov = open_iov;
+ rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
+
+ oparms.tcon = tcon;
+ oparms.create_options = 0;
+ oparms.desired_access = FILE_READ_ATTRIBUTES;
+ oparms.disposition = FILE_OPEN;
+ oparms.fid = pfid;
+ oparms.reconnect = false;
+
+ rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, &utf16_path);
+ if (rc)
+ goto oshr_exit;
+ smb2_set_next_command(tcon, &rqst[0]);
+
+ memset(&qi_iov, 0, sizeof(qi_iov));
+ rqst[1].rq_iov = qi_iov;
+ rqst[1].rq_nvec = 1;
+
+ rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID,
+ COMPOUND_FID, FILE_ALL_INFORMATION,
+ SMB2_O_INFO_FILE, 0,
+ sizeof(struct smb2_file_all_info) +
+ PATH_MAX * 2, 0, NULL);
+ if (rc)
+ goto oshr_exit;
- rc = SMB2_open(xid, &oparams, &srch_path, &oplock, NULL, NULL, NULL);
- if (rc == 0) {
- memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
- tcon->crfid.tcon = tcon;
- tcon->crfid.is_valid = true;
- kref_init(&tcon->crfid.refcount);
+ smb2_set_related(&rqst[1]);
+
+ rc = compound_send_recv(xid, ses, flags, 2, rqst,
+ resp_buftype, rsp_iov);
+ if (rc)
+ goto oshr_exit;
+
+ o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
+ oparms.fid->persistent_fid = o_rsp->PersistentFileId;
+ oparms.fid->volatile_fid = o_rsp->VolatileFileId;
+#ifdef CONFIG_CIFS_DEBUG2
+ oparms.fid->mid = le64_to_cpu(o_rsp->sync_hdr.MessageId);
+#endif /* CIFS_DEBUG2 */
+
+ memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
+ tcon->crfid.tcon = tcon;
+ tcon->crfid.is_valid = true;
+ kref_init(&tcon->crfid.refcount);
+
+ if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) {
kref_get(&tcon->crfid.refcount);
- }
+ oplock = smb2_parse_lease_state(server, o_rsp,
+ &oparms.fid->epoch,
+ oparms.fid->lease_key);
+ } else
+ goto oshr_exit;
+
+ qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
+ if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
+ goto oshr_exit;
+ if (!smb2_validate_and_copy_iov(
+ le16_to_cpu(qi_rsp->OutputBufferOffset),
+ sizeof(struct smb2_file_all_info),
+ &rsp_iov[1], sizeof(struct smb2_file_all_info),
+ (char *)&tcon->crfid.file_all_info))
+ tcon->crfid.file_all_info_is_valid = 1;
+
+ oshr_exit:
mutex_unlock(&tcon->crfid.fid_mutex);
+ SMB2_open_free(&rqst[0]);
+ SMB2_query_info_free(&rqst[1]);
+ free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+ free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
return rc;
}
@@ -940,6 +1068,16 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
memset(rsp_iov, 0, sizeof(rsp_iov));
+ if (ses->server->ops->query_all_EAs) {
+ if (!ea_value) {
+ rc = ses->server->ops->query_all_EAs(xid, tcon, path,
+ ea_name, NULL, 0,
+ cifs_sb);
+ if (rc == -ENODATA)
+ goto sea_exit;
+ }
+ }
+
/* Open */
memset(&open_iov, 0, sizeof(open_iov));
rqst[0].rq_iov = open_iov;
@@ -1157,7 +1295,7 @@ SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
- NULL, 0 /* no input */,
+ NULL, 0 /* no input */, CIFSMaxBufSize,
(char **)&res_key, &ret_data_len);
if (rc) {
@@ -1188,7 +1326,8 @@ smb2_ioctl_query_info(const unsigned int xid,
struct smb_query_info __user *pqi;
int rc = 0;
int flags = 0;
- struct smb2_query_info_rsp *rsp = NULL;
+ struct smb2_query_info_rsp *qi_rsp = NULL;
+ struct smb2_ioctl_rsp *io_rsp = NULL;
void *buffer = NULL;
struct smb_rqst rqst[3];
int resp_buftype[3];
@@ -1198,6 +1337,7 @@ smb2_ioctl_query_info(const unsigned int xid,
u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
struct cifs_fid fid;
struct kvec qi_iov[1];
+ struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
struct kvec close_iov[1];
memset(rqst, 0, sizeof(rqst));
@@ -1248,15 +1388,35 @@ smb2_ioctl_query_info(const unsigned int xid,
smb2_set_next_command(tcon, &rqst[0]);
/* Query */
- memset(&qi_iov, 0, sizeof(qi_iov));
- rqst[1].rq_iov = qi_iov;
- rqst[1].rq_nvec = 1;
-
- rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID, COMPOUND_FID,
- qi.file_info_class, qi.info_type,
- qi.additional_information,
+ if (qi.flags & PASSTHRU_FSCTL) {
+ /* Can eventually relax perm check since server enforces too */
+ if (!capable(CAP_SYS_ADMIN))
+ rc = -EPERM;
+ else {
+ memset(&io_iov, 0, sizeof(io_iov));
+ rqst[1].rq_iov = io_iov;
+ rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
+
+ rc = SMB2_ioctl_init(tcon, &rqst[1],
+ COMPOUND_FID, COMPOUND_FID,
+ qi.info_type, true, NULL,
+ 0, CIFSMaxBufSize);
+ }
+ } else if (qi.flags == PASSTHRU_QUERY_INFO) {
+ memset(&qi_iov, 0, sizeof(qi_iov));
+ rqst[1].rq_iov = qi_iov;
+ rqst[1].rq_nvec = 1;
+
+ rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID,
+ COMPOUND_FID, qi.file_info_class,
+ qi.info_type, qi.additional_information,
qi.input_buffer_length,
qi.output_buffer_length, buffer);
+ } else { /* unknown flags */
+ cifs_dbg(VFS, "invalid passthru query flags: 0x%x\n", qi.flags);
+ rc = -EINVAL;
+ }
+
if (rc)
goto iqinf_exit;
smb2_set_next_command(tcon, &rqst[1]);
@@ -1276,24 +1436,44 @@ smb2_ioctl_query_info(const unsigned int xid,
resp_buftype, rsp_iov);
if (rc)
goto iqinf_exit;
- pqi = (struct smb_query_info __user *)arg;
- rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
- if (le32_to_cpu(rsp->OutputBufferLength) < qi.input_buffer_length)
- qi.input_buffer_length = le32_to_cpu(rsp->OutputBufferLength);
- if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length,
- sizeof(qi.input_buffer_length))) {
- rc = -EFAULT;
- goto iqinf_exit;
- }
- if (copy_to_user(pqi + 1, rsp->Buffer, qi.input_buffer_length)) {
- rc = -EFAULT;
- goto iqinf_exit;
+ if (qi.flags & PASSTHRU_FSCTL) {
+ pqi = (struct smb_query_info __user *)arg;
+ io_rsp = (struct smb2_ioctl_rsp *)rsp_iov[1].iov_base;
+ if (le32_to_cpu(io_rsp->OutputCount) < qi.input_buffer_length)
+ qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount);
+ if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length,
+ sizeof(qi.input_buffer_length))) {
+ rc = -EFAULT;
+ goto iqinf_exit;
+ }
+ if (copy_to_user(pqi + 1, &io_rsp[1], qi.input_buffer_length)) {
+ rc = -EFAULT;
+ goto iqinf_exit;
+ }
+ } else {
+ pqi = (struct smb_query_info __user *)arg;
+ qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
+ if (le32_to_cpu(qi_rsp->OutputBufferLength) < qi.input_buffer_length)
+ qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength);
+ if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length,
+ sizeof(qi.input_buffer_length))) {
+ rc = -EFAULT;
+ goto iqinf_exit;
+ }
+ if (copy_to_user(pqi + 1, qi_rsp->Buffer, qi.input_buffer_length)) {
+ rc = -EFAULT;
+ goto iqinf_exit;
+ }
}
iqinf_exit:
kfree(buffer);
SMB2_open_free(&rqst[0]);
- SMB2_query_info_free(&rqst[1]);
+ if (qi.flags & PASSTHRU_FSCTL)
+ SMB2_ioctl_free(&rqst[1]);
+ else
+ SMB2_query_info_free(&rqst[1]);
+
SMB2_close_free(&rqst[2]);
free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
@@ -1348,8 +1528,8 @@ smb2_copychunk_range(const unsigned int xid,
rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
true /* is_fsctl */, (char *)pcchunk,
- sizeof(struct copychunk_ioctl), (char **)&retbuf,
- &ret_data_len);
+ sizeof(struct copychunk_ioctl), CIFSMaxBufSize,
+ (char **)&retbuf, &ret_data_len);
if (rc == 0) {
if (ret_data_len !=
sizeof(struct copychunk_ioctl_rsp)) {
@@ -1509,7 +1689,7 @@ static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
true /* is_fctl */,
- &setsparse, 1, NULL, NULL);
+ &setsparse, 1, CIFSMaxBufSize, NULL, NULL);
if (rc) {
tcon->broken_sparse_sup = true;
cifs_dbg(FYI, "set sparse rc = %d\n", rc);
@@ -1582,7 +1762,7 @@ smb2_duplicate_extents(const unsigned int xid,
true /* is_fsctl */,
(char *)&dup_ext_buf,
sizeof(struct duplicate_extents_to_file),
- NULL,
+ CIFSMaxBufSize, NULL,
&ret_data_len);
if (ret_data_len > 0)
@@ -1617,7 +1797,7 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
true /* is_fsctl */,
(char *)&integr_info,
sizeof(struct fsctl_set_integrity_information_req),
- NULL,
+ CIFSMaxBufSize, NULL,
&ret_data_len);
}
@@ -1625,6 +1805,8 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
#define GMT_TOKEN_SIZE 50
+#define MIN_SNAPSHOT_ARRAY_SIZE 16 /* See MS-SMB2 section 3.3.5.15.1 */
+
/*
* Input buffer contains (empty) struct smb_snapshot array with size filled in
* For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
@@ -1636,13 +1818,29 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
char *retbuf = NULL;
unsigned int ret_data_len = 0;
int rc;
+ u32 max_response_size;
struct smb_snapshot_array snapshot_in;
+ if (get_user(ret_data_len, (unsigned int __user *)ioc_buf))
+ return -EFAULT;
+
+ /*
+ * Note that for snapshot queries that servers like Azure expect that
+ * the first query be minimal size (and just used to get the number/size
+ * of previous versions) so response size must be specified as EXACTLY
+ * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
+ * of eight bytes.
+ */
+ if (ret_data_len == 0)
+ max_response_size = MIN_SNAPSHOT_ARRAY_SIZE;
+ else
+ max_response_size = CIFSMaxBufSize;
+
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid,
FSCTL_SRV_ENUMERATE_SNAPSHOTS,
true /* is_fsctl */,
- NULL, 0 /* no input data */,
+ NULL, 0 /* no input data */, max_response_size,
(char **)&retbuf,
&ret_data_len);
cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n",
@@ -1753,14 +1951,14 @@ smb2_close_dir(const unsigned int xid, struct cifs_tcon *tcon,
* the number of credits and return true. Otherwise - return false.
*/
static bool
-smb2_is_status_pending(char *buf, struct TCP_Server_Info *server, int length)
+smb2_is_status_pending(char *buf, struct TCP_Server_Info *server)
{
struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
if (shdr->Status != STATUS_PENDING)
return false;
- if (!length) {
+ if (shdr->CreditRequest) {
spin_lock(&server->req_lock);
server->credits += le16_to_cpu(shdr->CreditRequest);
spin_unlock(&server->req_lock);
@@ -2120,7 +2318,7 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
FSCTL_DFS_GET_REFERRALS,
true /* is_fsctl */,
- (char *)dfs_req, dfs_req_size,
+ (char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
(char **)&dfs_rsp, &dfs_rsp_size);
} while (rc == -EAGAIN);
@@ -2191,6 +2389,8 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, &err_iov,
&resp_buftype);
+ if (!rc)
+ SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
if (!rc || !err_iov.iov_base) {
rc = -ENOENT;
goto free_path;
@@ -2407,22 +2607,38 @@ get_smb2_acl(struct cifs_sb_info *cifs_sb,
static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
loff_t offset, loff_t len, bool keep_size)
{
+ struct cifs_ses *ses = tcon->ses;
struct inode *inode;
struct cifsInodeInfo *cifsi;
struct cifsFileInfo *cfile = file->private_data;
struct file_zero_data_information fsctl_buf;
+ struct smb_rqst rqst[2];
+ int resp_buftype[2];
+ struct kvec rsp_iov[2];
+ struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
+ struct kvec si_iov[1];
+ unsigned int size[1];
+ void *data[1];
long rc;
unsigned int xid;
+ int num = 0, flags = 0;
+ __le64 eof;
xid = get_xid();
inode = d_inode(cfile->dentry);
cifsi = CIFS_I(inode);
+ trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
+ ses->Suid, offset, len);
+
+
/* if file not oplocked can't be sure whether asking to extend size */
if (!CIFS_CACHE_READ(cifsi))
if (keep_size == false) {
rc = -EOPNOTSUPP;
+ trace_smb3_zero_err(xid, cfile->fid.persistent_fid,
+ tcon->tid, ses->Suid, offset, len, rc);
free_xid(xid);
return rc;
}
@@ -2433,33 +2649,74 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
*/
if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) {
rc = -EOPNOTSUPP;
+ trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid,
+ ses->Suid, offset, len, rc);
free_xid(xid);
return rc;
}
- /*
- * need to make sure we are not asked to extend the file since the SMB3
- * fsctl does not change the file size. In the future we could change
- * this to zero the first part of the range then set the file size
- * which for a non sparse file would zero the newly extended range
- */
- if (keep_size == false)
- if (i_size_read(inode) < offset + len) {
- rc = -EOPNOTSUPP;
- free_xid(xid);
- return rc;
- }
-
cifs_dbg(FYI, "offset %lld len %lld", offset, len);
fsctl_buf.FileOffset = cpu_to_le64(offset);
fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
- rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
- cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
- true /* is_fctl */, (char *)&fsctl_buf,
- sizeof(struct file_zero_data_information), NULL, NULL);
+ if (smb3_encryption_required(tcon))
+ flags |= CIFS_TRANSFORM_REQ;
+
+ memset(rqst, 0, sizeof(rqst));
+ resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
+ memset(rsp_iov, 0, sizeof(rsp_iov));
+
+
+ memset(&io_iov, 0, sizeof(io_iov));
+ rqst[num].rq_iov = io_iov;
+ rqst[num].rq_nvec = SMB2_IOCTL_IOV_SIZE;
+ rc = SMB2_ioctl_init(tcon, &rqst[num++], cfile->fid.persistent_fid,
+ cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
+ true /* is_fctl */, (char *)&fsctl_buf,
+ sizeof(struct file_zero_data_information),
+ CIFSMaxBufSize);
+ if (rc)
+ goto zero_range_exit;
+
+ /*
+ * do we also need to change the size of the file?
+ */
+ if (keep_size == false && i_size_read(inode) < offset + len) {
+ smb2_set_next_command(tcon, &rqst[0]);
+
+ memset(&si_iov, 0, sizeof(si_iov));
+ rqst[num].rq_iov = si_iov;
+ rqst[num].rq_nvec = 1;
+
+ eof = cpu_to_le64(offset + len);
+ size[0] = 8; /* sizeof __le64 */
+ data[0] = &eof;
+
+ rc = SMB2_set_info_init(tcon, &rqst[num++],
+ cfile->fid.persistent_fid,
+ cfile->fid.persistent_fid,
+ current->tgid,
+ FILE_END_OF_FILE_INFORMATION,
+ SMB2_O_INFO_FILE, 0, data, size);
+ smb2_set_related(&rqst[1]);
+ }
+
+ rc = compound_send_recv(xid, ses, flags, num, rqst,
+ resp_buftype, rsp_iov);
+
+ zero_range_exit:
+ SMB2_ioctl_free(&rqst[0]);
+ SMB2_set_info_free(&rqst[1]);
+ free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+ free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
free_xid(xid);
+ if (rc)
+ trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid,
+ ses->Suid, offset, len, rc);
+ else
+ trace_smb3_zero_done(xid, cfile->fid.persistent_fid, tcon->tid,
+ ses->Suid, offset, len);
return rc;
}
@@ -2495,7 +2752,8 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
true /* is_fctl */, (char *)&fsctl_buf,
- sizeof(struct file_zero_data_information), NULL, NULL);
+ sizeof(struct file_zero_data_information),
+ CIFSMaxBufSize, NULL, NULL);
free_xid(xid);
return rc;
}
@@ -2508,15 +2766,20 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
struct cifsFileInfo *cfile = file->private_data;
long rc = -EOPNOTSUPP;
unsigned int xid;
+ __le64 eof;
xid = get_xid();
inode = d_inode(cfile->dentry);
cifsi = CIFS_I(inode);
+ trace_smb3_falloc_enter(xid, cfile->fid.persistent_fid, tcon->tid,
+ tcon->ses->Suid, off, len);
/* if file not oplocked can't be sure whether asking to extend size */
if (!CIFS_CACHE_READ(cifsi))
if (keep_size == false) {
+ trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
+ tcon->tid, tcon->ses->Suid, off, len, rc);
free_xid(xid);
return rc;
}
@@ -2536,6 +2799,12 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
/* BB: in future add else clause to extend file */
else
rc = -EOPNOTSUPP;
+ if (rc)
+ trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
+ tcon->tid, tcon->ses->Suid, off, len, rc);
+ else
+ trace_smb3_falloc_done(xid, cfile->fid.persistent_fid,
+ tcon->tid, tcon->ses->Suid, off, len);
free_xid(xid);
return rc;
}
@@ -2551,14 +2820,31 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
*/
if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) {
rc = -EOPNOTSUPP;
+ trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
+ tcon->tid, tcon->ses->Suid, off, len, rc);
free_xid(xid);
return rc;
}
- rc = smb2_set_sparse(xid, tcon, cfile, inode, false);
+ smb2_set_sparse(xid, tcon, cfile, inode, false);
+ rc = 0;
+ } else {
+ smb2_set_sparse(xid, tcon, cfile, inode, false);
+ rc = 0;
+ if (i_size_read(inode) < off + len) {
+ eof = cpu_to_le64(off + len);
+ rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
+ cfile->fid.volatile_fid, cfile->pid,
+ &eof);
+ }
}
- /* BB: else ... in future add code to extend file and set sparse */
+ if (rc)
+ trace_smb3_falloc_err(xid, cfile->fid.persistent_fid, tcon->tid,
+ tcon->ses->Suid, off, len, rc);
+ else
+ trace_smb3_falloc_done(xid, cfile->fid.persistent_fid, tcon->tid,
+ tcon->ses->Suid, off, len);
free_xid(xid);
return rc;
@@ -2595,6 +2881,15 @@ smb2_downgrade_oplock(struct TCP_Server_Info *server,
}
static void
+smb21_downgrade_oplock(struct TCP_Server_Info *server,
+ struct cifsInodeInfo *cinode, bool set_level2)
+{
+ server->ops->set_oplock_level(cinode,
+ set_level2 ? SMB2_LEASE_READ_CACHING_HE :
+ 0, 0, NULL);
+}
+
+static void
smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
unsigned int epoch, bool *purge_cache)
{
@@ -3210,15 +3505,15 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
}
if (server->ops->is_status_pending &&
- server->ops->is_status_pending(buf, server, 0))
+ server->ops->is_status_pending(buf, server))
return -1;
/* set up first two iov to get credits */
rdata->iov[0].iov_base = buf;
- rdata->iov[0].iov_len = 4;
- rdata->iov[1].iov_base = buf + 4;
+ rdata->iov[0].iov_len = 0;
+ rdata->iov[1].iov_base = buf;
rdata->iov[1].iov_len =
- min_t(unsigned int, buf_len, server->vals->read_rsp_size) - 4;
+ min_t(unsigned int, buf_len, server->vals->read_rsp_size);
cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
rdata->iov[0].iov_base, rdata->iov[0].iov_len);
cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
@@ -3530,6 +3825,104 @@ smb2_next_header(char *buf)
return le32_to_cpu(hdr->NextCommand);
}
+static int
+smb2_make_node(unsigned int xid, struct inode *inode,
+ struct dentry *dentry, struct cifs_tcon *tcon,
+ char *full_path, umode_t mode, dev_t dev)
+{
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ int rc = -EPERM;
+ int create_options = CREATE_NOT_DIR | CREATE_OPTION_SPECIAL;
+ FILE_ALL_INFO *buf = NULL;
+ struct cifs_io_parms io_parms;
+ __u32 oplock = 0;
+ struct cifs_fid fid;
+ struct cifs_open_parms oparms;
+ unsigned int bytes_written;
+ struct win_dev *pdev;
+ struct kvec iov[2];
+
+ /*
+ * Check if mounted with mount parm 'sfu' mount parm.
+ * SFU emulation should work with all servers, but only
+ * supports block and char device (no socket & fifo),
+ * and was used by default in earlier versions of Windows
+ */
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
+ goto out;
+
+ /*
+ * TODO: Add ability to create instead via reparse point. Windows (e.g.
+ * their current NFS server) uses this approach to expose special files
+ * over SMB2/SMB3 and Samba will do this with SMB3.1.1 POSIX Extensions
+ */
+
+ if (!S_ISCHR(mode) && !S_ISBLK(mode))
+ goto out;
+
+ cifs_dbg(FYI, "sfu compat create special file\n");
+
+ buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
+ if (buf == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (backup_cred(cifs_sb))
+ create_options |= CREATE_OPEN_BACKUP_INTENT;
+
+ oparms.tcon = tcon;
+ oparms.cifs_sb = cifs_sb;
+ oparms.desired_access = GENERIC_WRITE;
+ oparms.create_options = create_options;
+ oparms.disposition = FILE_CREATE;
+ oparms.path = full_path;
+ oparms.fid = &fid;
+ oparms.reconnect = false;
+
+ if (tcon->ses->server->oplocks)
+ oplock = REQ_OPLOCK;
+ else
+ oplock = 0;
+ rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, buf);
+ if (rc)
+ goto out;
+
+ /*
+ * BB Do not bother to decode buf since no local inode yet to put
+ * timestamps in, but we can reuse it safely.
+ */
+
+ pdev = (struct win_dev *)buf;
+ io_parms.pid = current->tgid;
+ io_parms.tcon = tcon;
+ io_parms.offset = 0;
+ io_parms.length = sizeof(struct win_dev);
+ iov[1].iov_base = buf;
+ iov[1].iov_len = sizeof(struct win_dev);
+ if (S_ISCHR(mode)) {
+ memcpy(pdev->type, "IntxCHR", 8);
+ pdev->major = cpu_to_le64(MAJOR(dev));
+ pdev->minor = cpu_to_le64(MINOR(dev));
+ rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
+ &bytes_written, iov, 1);
+ } else if (S_ISBLK(mode)) {
+ memcpy(pdev->type, "IntxBLK", 8);
+ pdev->major = cpu_to_le64(MAJOR(dev));
+ pdev->minor = cpu_to_le64(MINOR(dev));
+ rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
+ &bytes_written, iov, 1);
+ }
+ tcon->ses->server->ops->close(xid, tcon, &fid);
+ d_drop(dentry);
+
+ /* FIXME: add code here to set EAs */
+out:
+ kfree(buf);
+ return rc;
+}
+
+
struct smb_version_operations smb20_operations = {
.compare_fids = smb2_compare_fids,
.setup_request = smb2_setup_request,
@@ -3541,6 +3934,7 @@ struct smb_version_operations smb20_operations = {
.get_credits = smb2_get_credits,
.wait_mtu_credits = cifs_wait_mtu_credits,
.get_next_mid = smb2_get_next_mid,
+ .revert_current_mid = smb2_revert_current_mid,
.read_data_offset = smb2_read_data_offset,
.read_data_length = smb2_read_data_length,
.map_error = map_smb2_to_linux_error,
@@ -3623,6 +4017,7 @@ struct smb_version_operations smb20_operations = {
#endif /* CIFS_ACL */
.next_header = smb2_next_header,
.ioctl_query_info = smb2_ioctl_query_info,
+ .make_node = smb2_make_node,
};
struct smb_version_operations smb21_operations = {
@@ -3635,7 +4030,9 @@ struct smb_version_operations smb21_operations = {
.get_credits_field = smb2_get_credits_field,
.get_credits = smb2_get_credits,
.wait_mtu_credits = smb2_wait_mtu_credits,
+ .adjust_credits = smb2_adjust_credits,
.get_next_mid = smb2_get_next_mid,
+ .revert_current_mid = smb2_revert_current_mid,
.read_data_offset = smb2_read_data_offset,
.read_data_length = smb2_read_data_length,
.map_error = map_smb2_to_linux_error,
@@ -3646,7 +4043,7 @@ struct smb_version_operations smb21_operations = {
.print_stats = smb2_print_stats,
.is_oplock_break = smb2_is_valid_oplock_break,
.handle_cancelled_mid = smb2_handle_cancelled_mid,
- .downgrade_oplock = smb2_downgrade_oplock,
+ .downgrade_oplock = smb21_downgrade_oplock,
.need_neg = smb2_need_neg,
.negotiate = smb2_negotiate,
.negotiate_wsize = smb2_negotiate_wsize,
@@ -3719,6 +4116,7 @@ struct smb_version_operations smb21_operations = {
#endif /* CIFS_ACL */
.next_header = smb2_next_header,
.ioctl_query_info = smb2_ioctl_query_info,
+ .make_node = smb2_make_node,
};
struct smb_version_operations smb30_operations = {
@@ -3731,7 +4129,9 @@ struct smb_version_operations smb30_operations = {
.get_credits_field = smb2_get_credits_field,
.get_credits = smb2_get_credits,
.wait_mtu_credits = smb2_wait_mtu_credits,
+ .adjust_credits = smb2_adjust_credits,
.get_next_mid = smb2_get_next_mid,
+ .revert_current_mid = smb2_revert_current_mid,
.read_data_offset = smb2_read_data_offset,
.read_data_length = smb2_read_data_length,
.map_error = map_smb2_to_linux_error,
@@ -3743,7 +4143,7 @@ struct smb_version_operations smb30_operations = {
.dump_share_caps = smb2_dump_share_caps,
.is_oplock_break = smb2_is_valid_oplock_break,
.handle_cancelled_mid = smb2_handle_cancelled_mid,
- .downgrade_oplock = smb2_downgrade_oplock,
+ .downgrade_oplock = smb21_downgrade_oplock,
.need_neg = smb2_need_neg,
.negotiate = smb2_negotiate,
.negotiate_wsize = smb3_negotiate_wsize,
@@ -3824,6 +4224,7 @@ struct smb_version_operations smb30_operations = {
#endif /* CIFS_ACL */
.next_header = smb2_next_header,
.ioctl_query_info = smb2_ioctl_query_info,
+ .make_node = smb2_make_node,
};
struct smb_version_operations smb311_operations = {
@@ -3836,7 +4237,9 @@ struct smb_version_operations smb311_operations = {
.get_credits_field = smb2_get_credits_field,
.get_credits = smb2_get_credits,
.wait_mtu_credits = smb2_wait_mtu_credits,
+ .adjust_credits = smb2_adjust_credits,
.get_next_mid = smb2_get_next_mid,
+ .revert_current_mid = smb2_revert_current_mid,
.read_data_offset = smb2_read_data_offset,
.read_data_length = smb2_read_data_length,
.map_error = map_smb2_to_linux_error,
@@ -3848,7 +4251,7 @@ struct smb_version_operations smb311_operations = {
.dump_share_caps = smb2_dump_share_caps,
.is_oplock_break = smb2_is_valid_oplock_break,
.handle_cancelled_mid = smb2_handle_cancelled_mid,
- .downgrade_oplock = smb2_downgrade_oplock,
+ .downgrade_oplock = smb21_downgrade_oplock,
.need_neg = smb2_need_neg,
.negotiate = smb2_negotiate,
.negotiate_wsize = smb3_negotiate_wsize,
@@ -3930,6 +4333,7 @@ struct smb_version_operations smb311_operations = {
#endif /* CIFS_ACL */
.next_header = smb2_next_header,
.ioctl_query_info = smb2_ioctl_query_info,
+ .make_node = smb2_make_node,
};
struct smb_version_values smb20_values = {
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 77b3aaa39b35..a37774a55f3a 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -107,13 +107,13 @@ smb2_hdr_assemble(struct smb2_sync_hdr *shdr, __le16 smb2_cmd,
struct TCP_Server_Info *server = tcon->ses->server;
spin_lock(&server->req_lock);
- /* Request up to 2 credits but don't go over the limit. */
+ /* Request up to 10 credits but don't go over the limit. */
if (server->credits >= server->max_credits)
shdr->CreditRequest = cpu_to_le16(0);
else
shdr->CreditRequest = cpu_to_le16(
min_t(int, server->max_credits -
- server->credits, 2));
+ server->credits, 10));
spin_unlock(&server->req_lock);
} else {
shdr->CreditRequest = cpu_to_le16(2);
@@ -173,8 +173,8 @@ static int __smb2_reconnect(const struct nls_table *nlsc,
return -ENOMEM;
if (tcon->ipc) {
- snprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$",
- tcon->ses->server->hostname);
+ scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$",
+ tcon->ses->server->hostname);
rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc);
goto out;
}
@@ -206,7 +206,7 @@ static int __smb2_reconnect(const struct nls_table *nlsc,
continue;
}
- snprintf(tree, MAX_TREE_SIZE, "\\%s", tgt);
+ scnprintf(tree, MAX_TREE_SIZE, "\\%s", tgt);
rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc);
if (!rc)
@@ -490,6 +490,23 @@ build_posix_ctxt(struct smb2_posix_neg_context *pneg_ctxt)
{
pneg_ctxt->ContextType = SMB2_POSIX_EXTENSIONS_AVAILABLE;
pneg_ctxt->DataLength = cpu_to_le16(POSIX_CTXT_DATA_LEN);
+ /* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */
+ pneg_ctxt->Name[0] = 0x93;
+ pneg_ctxt->Name[1] = 0xAD;
+ pneg_ctxt->Name[2] = 0x25;
+ pneg_ctxt->Name[3] = 0x50;
+ pneg_ctxt->Name[4] = 0x9C;
+ pneg_ctxt->Name[5] = 0xB4;
+ pneg_ctxt->Name[6] = 0x11;
+ pneg_ctxt->Name[7] = 0xE7;
+ pneg_ctxt->Name[8] = 0xB4;
+ pneg_ctxt->Name[9] = 0x23;
+ pneg_ctxt->Name[10] = 0x83;
+ pneg_ctxt->Name[11] = 0xDE;
+ pneg_ctxt->Name[12] = 0x96;
+ pneg_ctxt->Name[13] = 0x8B;
+ pneg_ctxt->Name[14] = 0xCD;
+ pneg_ctxt->Name[15] = 0x7C;
}
static void
@@ -815,8 +832,11 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
} else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
/* ops set to 3.0 by default for default so update */
ses->server->ops = &smb21_operations;
- } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID))
+ ses->server->vals = &smb21_values;
+ } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
ses->server->ops = &smb311_operations;
+ ses->server->vals = &smb311_values;
+ }
} else if (le16_to_cpu(rsp->DialectRevision) !=
ses->server->vals->protocol_id) {
/* if requested single dialect ensure returned dialect matched */
@@ -985,9 +1005,16 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
- (char *)pneg_inbuf, inbuflen, (char **)&pneg_rsp, &rsplen);
-
- if (rc != 0) {
+ (char *)pneg_inbuf, inbuflen, CIFSMaxBufSize,
+ (char **)&pneg_rsp, &rsplen);
+ if (rc == -EOPNOTSUPP) {
+ /*
+ * Old Windows versions or Netapp SMB server can return
+ * not supported error. Client should accept it.
+ */
+ cifs_dbg(VFS, "Server does not support validate negotiate\n");
+ return 0;
+ } else if (rc != 0) {
cifs_dbg(VFS, "validate protocol negotiate failed: %d\n", rc);
rc = -EIO;
goto out_free_inbuf;
@@ -1605,15 +1632,25 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
iov[1].iov_base = unc_path;
iov[1].iov_len = unc_path_len;
- /* 3.11 tcon req must be signed if not encrypted. See MS-SMB2 3.2.4.1.1 */
+ /*
+ * 3.11 tcon req must be signed if not encrypted. See MS-SMB2 3.2.4.1.1
+ * unless it is guest or anonymous user. See MS-SMB2 3.2.5.3.1
+ * (Samba servers don't always set the flag so also check if null user)
+ */
if ((ses->server->dialect == SMB311_PROT_ID) &&
- !smb3_encryption_required(tcon))
+ !smb3_encryption_required(tcon) &&
+ !(ses->session_flags &
+ (SMB2_SESSION_FLAG_IS_GUEST|SMB2_SESSION_FLAG_IS_NULL)) &&
+ ((ses->user_name != NULL) || (ses->sectype == Kerberos)))
req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
memset(&rqst, 0, sizeof(struct smb_rqst));
rqst.rq_iov = iov;
rqst.rq_nvec = 2;
+ /* Need 64 for max size write so ask for more in case not there yet */
+ req->sync_hdr.CreditRequest = cpu_to_le16(64);
+
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base;
@@ -1771,9 +1808,10 @@ create_reconnect_durable_buf(struct cifs_fid *fid)
return buf;
}
-static __u8
-parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp,
- unsigned int *epoch, char *lease_key)
+__u8
+smb2_parse_lease_state(struct TCP_Server_Info *server,
+ struct smb2_create_rsp *rsp,
+ unsigned int *epoch, char *lease_key)
{
char *data_offset;
struct create_context *cc;
@@ -1824,8 +1862,9 @@ add_lease_context(struct TCP_Server_Info *server, struct kvec *iov,
}
static struct create_durable_v2 *
-create_durable_v2_buf(struct cifs_fid *pfid)
+create_durable_v2_buf(struct cifs_open_parms *oparms)
{
+ struct cifs_fid *pfid = oparms->fid;
struct create_durable_v2 *buf;
buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL);
@@ -1839,7 +1878,14 @@ create_durable_v2_buf(struct cifs_fid *pfid)
(struct create_durable_v2, Name));
buf->ccontext.NameLength = cpu_to_le16(4);
- buf->dcontext.Timeout = 0; /* Should this be configurable by workload */
+ /*
+ * NB: Handle timeout defaults to 0, which allows server to choose
+ * (most servers default to 120 seconds) and most clients default to 0.
+ * This can be overridden at mount ("handletimeout=") if the user wants
+ * a different persistent (or resilient) handle timeout for all opens
+ * opens on a particular SMB3 mount.
+ */
+ buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout);
buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
generate_random_uuid(buf->dcontext.CreateGuid);
memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
@@ -1892,7 +1938,7 @@ add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec,
struct smb2_create_req *req = iov[0].iov_base;
unsigned int num = *num_iovec;
- iov[num].iov_base = create_durable_v2_buf(oparms->fid);
+ iov[num].iov_base = create_durable_v2_buf(oparms);
if (iov[num].iov_base == NULL)
return -ENOMEM;
iov[num].iov_len = sizeof(struct create_durable_v2);
@@ -2170,6 +2216,8 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
rqst.rq_iov = iov;
rqst.rq_nvec = n_iov;
+ trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, CREATE_NOT_FILE,
+ FILE_WRITE_ATTRIBUTES);
/* resource #4: response buffer */
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
if (rc) {
@@ -2388,6 +2436,9 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
if (rc)
goto creat_exit;
+ trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid,
+ oparms->create_options, oparms->desired_access);
+
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
&rsp_iov);
rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
@@ -2425,8 +2476,9 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
}
if (rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE)
- *oplock = parse_lease_state(server, rsp, &oparms->fid->epoch,
- oparms->fid->lease_key);
+ *oplock = smb2_parse_lease_state(server, rsp,
+ &oparms->fid->epoch,
+ oparms->fid->lease_key);
else
*oplock = rsp->OplockLevel;
creat_exit:
@@ -2435,113 +2487,138 @@ creat_exit:
return rc;
}
-/*
- * SMB2 IOCTL is used for both IOCTLs and FSCTLs
- */
int
-SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
- u64 volatile_fid, u32 opcode, bool is_fsctl,
- char *in_data, u32 indatalen,
- char **out_data, u32 *plen /* returned data len */)
+SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
+ u64 persistent_fid, u64 volatile_fid, u32 opcode,
+ bool is_fsctl, char *in_data, u32 indatalen,
+ __u32 max_response_size)
{
- struct smb_rqst rqst;
struct smb2_ioctl_req *req;
- struct smb2_ioctl_rsp *rsp;
- struct cifs_ses *ses;
- struct kvec iov[2];
- struct kvec rsp_iov;
- int resp_buftype;
- int n_iov;
- int rc = 0;
- int flags = 0;
+ struct kvec *iov = rqst->rq_iov;
unsigned int total_len;
-
- cifs_dbg(FYI, "SMB2 IOCTL\n");
-
- if (out_data != NULL)
- *out_data = NULL;
-
- /* zero out returned data len, in case of error */
- if (plen)
- *plen = 0;
-
- if (tcon)
- ses = tcon->ses;
- else
- return -EIO;
-
- if (!ses || !(ses->server))
- return -EIO;
+ int rc;
rc = smb2_plain_req_init(SMB2_IOCTL, tcon, (void **) &req, &total_len);
if (rc)
return rc;
- if (smb3_encryption_required(tcon))
- flags |= CIFS_TRANSFORM_REQ;
-
req->CtlCode = cpu_to_le32(opcode);
req->PersistentFileId = persistent_fid;
req->VolatileFileId = volatile_fid;
+ iov[0].iov_base = (char *)req;
+ /*
+ * If no input data, the size of ioctl struct in
+ * protocol spec still includes a 1 byte data buffer,
+ * but if input data passed to ioctl, we do not
+ * want to double count this, so we do not send
+ * the dummy one byte of data in iovec[0] if sending
+ * input data (in iovec[1]).
+ */
if (indatalen) {
req->InputCount = cpu_to_le32(indatalen);
/* do not set InputOffset if no input data */
req->InputOffset =
cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer));
+ rqst->rq_nvec = 2;
+ iov[0].iov_len = total_len - 1;
iov[1].iov_base = in_data;
iov[1].iov_len = indatalen;
- n_iov = 2;
- } else
- n_iov = 1;
+ } else {
+ rqst->rq_nvec = 1;
+ iov[0].iov_len = total_len;
+ }
req->OutputOffset = 0;
req->OutputCount = 0; /* MBZ */
/*
- * Could increase MaxOutputResponse, but that would require more
- * than one credit. Windows typically sets this smaller, but for some
+ * In most cases max_response_size is set to 16K (CIFSMaxBufSize)
+ * We Could increase default MaxOutputResponse, but that could require
+ * more credits. Windows typically sets this smaller, but for some
* ioctls it may be useful to allow server to send more. No point
* limiting what the server can send as long as fits in one credit
- * Unfortunately - we can not handle more than CIFS_MAX_MSG_SIZE
- * (by default, note that it can be overridden to make max larger)
- * in responses (except for read responses which can be bigger.
- * We may want to bump this limit up
+ * We can not handle more than CIFS_MAX_BUF_SIZE yet but may want
+ * to increase this limit up in the future.
+ * Note that for snapshot queries that servers like Azure expect that
+ * the first query be minimal size (and just used to get the number/size
+ * of previous versions) so response size must be specified as EXACTLY
+ * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
+ * of eight bytes. Currently that is the only case where we set max
+ * response size smaller.
*/
- req->MaxOutputResponse = cpu_to_le32(CIFSMaxBufSize);
+ req->MaxOutputResponse = cpu_to_le32(max_response_size);
if (is_fsctl)
req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
else
req->Flags = 0;
- iov[0].iov_base = (char *)req;
-
- /*
- * If no input data, the size of ioctl struct in
- * protocol spec still includes a 1 byte data buffer,
- * but if input data passed to ioctl, we do not
- * want to double count this, so we do not send
- * the dummy one byte of data in iovec[0] if sending
- * input data (in iovec[1]).
- */
-
- if (indatalen) {
- iov[0].iov_len = total_len - 1;
- } else
- iov[0].iov_len = total_len;
-
/* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */
if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
+ return 0;
+}
+
+void
+SMB2_ioctl_free(struct smb_rqst *rqst)
+{
+ if (rqst && rqst->rq_iov)
+ cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
+}
+
+
+/*
+ * SMB2 IOCTL is used for both IOCTLs and FSCTLs
+ */
+int
+SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
+ u64 volatile_fid, u32 opcode, bool is_fsctl,
+ char *in_data, u32 indatalen, u32 max_out_data_len,
+ char **out_data, u32 *plen /* returned data len */)
+{
+ struct smb_rqst rqst;
+ struct smb2_ioctl_rsp *rsp = NULL;
+ struct cifs_ses *ses;
+ struct kvec iov[SMB2_IOCTL_IOV_SIZE];
+ struct kvec rsp_iov = {NULL, 0};
+ int resp_buftype = CIFS_NO_BUFFER;
+ int rc = 0;
+ int flags = 0;
+
+ cifs_dbg(FYI, "SMB2 IOCTL\n");
+
+ if (out_data != NULL)
+ *out_data = NULL;
+
+ /* zero out returned data len, in case of error */
+ if (plen)
+ *plen = 0;
+
+ if (tcon)
+ ses = tcon->ses;
+ else
+ return -EIO;
+
+ if (!ses || !(ses->server))
+ return -EIO;
+
+ if (smb3_encryption_required(tcon))
+ flags |= CIFS_TRANSFORM_REQ;
+
memset(&rqst, 0, sizeof(struct smb_rqst));
+ memset(&iov, 0, sizeof(iov));
rqst.rq_iov = iov;
- rqst.rq_nvec = n_iov;
+ rqst.rq_nvec = SMB2_IOCTL_IOV_SIZE;
+
+ rc = SMB2_ioctl_init(tcon, &rqst, persistent_fid, volatile_fid, opcode,
+ is_fsctl, in_data, indatalen, max_out_data_len);
+ if (rc)
+ goto ioctl_exit;
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
&rsp_iov);
- cifs_small_buf_release(req);
rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base;
if (rc != 0)
@@ -2591,6 +2668,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
}
ioctl_exit:
+ SMB2_ioctl_free(&rqst);
free_rsp_buf(resp_buftype, rsp);
return rc;
}
@@ -2613,7 +2691,8 @@ SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
FSCTL_SET_COMPRESSION, true /* is_fsctl */,
(char *)&fsctl_input /* data input */,
- 2 /* in data len */, &ret_data /* out data */, NULL);
+ 2 /* in data len */, CIFSMaxBufSize /* max out data */,
+ &ret_data /* out data */, NULL);
cifs_dbg(FYI, "set compression rc %d\n", rc);
@@ -2837,6 +2916,9 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
if (rc)
goto qinf_exit;
+ trace_smb3_query_info_enter(xid, persistent_fid, tcon->tid,
+ ses->Suid, info_class, (__u32)info_type);
+
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
@@ -2847,6 +2929,9 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
goto qinf_exit;
}
+ trace_smb3_query_info_done(xid, persistent_fid, tcon->tid,
+ ses->Suid, info_class, (__u32)info_type);
+
if (dlen) {
*dlen = le32_to_cpu(rsp->OutputBufferLength);
if (!*data) {
@@ -2924,14 +3009,16 @@ smb2_echo_callback(struct mid_q_entry *mid)
{
struct TCP_Server_Info *server = mid->callback_data;
struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf;
- unsigned int credits_received = 0;
+ struct cifs_credits credits = { .value = 0, .instance = 0 };
if (mid->mid_state == MID_RESPONSE_RECEIVED
- || mid->mid_state == MID_RESPONSE_MALFORMED)
- credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest);
+ || mid->mid_state == MID_RESPONSE_MALFORMED) {
+ credits.value = le16_to_cpu(rsp->sync_hdr.CreditRequest);
+ credits.instance = server->reconnect_instance;
+ }
DeleteMidQEntry(mid);
- add_credits(server, credits_received, CIFS_ECHO_OP);
+ add_credits(server, &credits, CIFS_ECHO_OP);
}
void smb2_reconnect_server(struct work_struct *work)
@@ -3023,7 +3110,7 @@ SMB2_echo(struct TCP_Server_Info *server)
iov[0].iov_base = (char *)req;
rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, NULL,
- server, CIFS_ECHO_OP);
+ server, CIFS_ECHO_OP, NULL);
if (rc)
cifs_dbg(FYI, "Echo request failed: %d\n", rc);
@@ -3114,6 +3201,11 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
req->MinimumCount = 0;
req->Length = cpu_to_le32(io_parms->length);
req->Offset = cpu_to_le64(io_parms->offset);
+
+ trace_smb3_read_enter(0 /* xid */,
+ io_parms->persistent_fid,
+ io_parms->tcon->tid, io_parms->tcon->ses->Suid,
+ io_parms->offset, io_parms->length);
#ifdef CONFIG_CIFS_SMB_DIRECT
/*
* If we want to do a RDMA write, fill in and append
@@ -3184,7 +3276,7 @@ smb2_readv_callback(struct mid_q_entry *mid)
struct TCP_Server_Info *server = tcon->ses->server;
struct smb2_sync_hdr *shdr =
(struct smb2_sync_hdr *)rdata->iov[0].iov_base;
- unsigned int credits_received = 0;
+ struct cifs_credits credits = { .value = 0, .instance = 0 };
struct smb_rqst rqst = { .rq_iov = rdata->iov,
.rq_nvec = 2,
.rq_pages = rdata->pages,
@@ -3199,7 +3291,8 @@ smb2_readv_callback(struct mid_q_entry *mid)
switch (mid->mid_state) {
case MID_RESPONSE_RECEIVED:
- credits_received = le16_to_cpu(shdr->CreditRequest);
+ credits.value = le16_to_cpu(shdr->CreditRequest);
+ credits.instance = server->reconnect_instance;
/* result already set, check signature */
if (server->sign && !mid->decrypted) {
int rc;
@@ -3224,11 +3317,11 @@ smb2_readv_callback(struct mid_q_entry *mid)
cifs_stats_bytes_read(tcon, rdata->got_bytes);
break;
case MID_RESPONSE_MALFORMED:
- credits_received = le16_to_cpu(shdr->CreditRequest);
+ credits.value = le16_to_cpu(shdr->CreditRequest);
+ credits.instance = server->reconnect_instance;
/* fall through */
default:
- if (rdata->result != -ENODATA)
- rdata->result = -EIO;
+ rdata->result = -EIO;
}
#ifdef CONFIG_CIFS_SMB_DIRECT
/*
@@ -3255,7 +3348,7 @@ smb2_readv_callback(struct mid_q_entry *mid)
queue_work(cifsiod_wq, &rdata->work);
DeleteMidQEntry(mid);
- add_credits(server, credits_received, 0);
+ add_credits(server, &credits, 0);
}
/* smb2_async_readv - send an async read, and set up mid to handle result */
@@ -3285,17 +3378,8 @@ smb2_async_readv(struct cifs_readdata *rdata)
rc = smb2_new_read_req(
(void **) &buf, &total_len, &io_parms, rdata, 0, 0);
- if (rc) {
- if (rc == -EAGAIN && rdata->credits) {
- /* credits was reset by reconnect */
- rdata->credits = 0;
- /* reduce in_flight value since we won't send the req */
- spin_lock(&server->req_lock);
- server->in_flight--;
- spin_unlock(&server->req_lock);
- }
+ if (rc)
return rc;
- }
if (smb3_encryption_required(io_parms.tcon))
flags |= CIFS_TRANSFORM_REQ;
@@ -3305,24 +3389,24 @@ smb2_async_readv(struct cifs_readdata *rdata)
shdr = (struct smb2_sync_hdr *)buf;
- if (rdata->credits) {
+ if (rdata->credits.value > 0) {
shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
SMB2_MAX_BUFFER_SIZE));
shdr->CreditRequest =
cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1);
- spin_lock(&server->req_lock);
- server->credits += rdata->credits -
- le16_to_cpu(shdr->CreditCharge);
- spin_unlock(&server->req_lock);
- wake_up(&server->request_q);
- rdata->credits = le16_to_cpu(shdr->CreditCharge);
+
+ rc = adjust_credits(server, &rdata->credits, rdata->bytes);
+ if (rc)
+ goto async_readv_out;
+
flags |= CIFS_HAS_CREDITS;
}
kref_get(&rdata->refcount);
rc = cifs_call_async(io_parms.tcon->ses->server, &rqst,
cifs_readv_receive, smb2_readv_callback,
- smb3_handle_read_data, rdata, flags);
+ smb3_handle_read_data, rdata, flags,
+ &rdata->credits);
if (rc) {
kref_put(&rdata->refcount, cifs_readdata_release);
cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
@@ -3332,6 +3416,7 @@ smb2_async_readv(struct cifs_readdata *rdata)
io_parms.offset, io_parms.length, rc);
}
+async_readv_out:
cifs_small_buf_release(buf);
return rc;
}
@@ -3366,8 +3451,6 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
rqst.rq_nvec = 1;
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
- cifs_small_buf_release(req);
-
rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
if (rc) {
@@ -3378,14 +3461,20 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
io_parms->tcon->tid, ses->Suid,
io_parms->offset, io_parms->length,
rc);
- }
+ } else
+ trace_smb3_read_done(xid, req->PersistentFileId,
+ io_parms->tcon->tid, ses->Suid,
+ io_parms->offset, 0);
free_rsp_buf(resp_buftype, rsp_iov.iov_base);
+ cifs_small_buf_release(req);
return rc == -ENODATA ? 0 : rc;
} else
trace_smb3_read_done(xid, req->PersistentFileId,
io_parms->tcon->tid, ses->Suid,
io_parms->offset, io_parms->length);
+ cifs_small_buf_release(req);
+
*nbytes = le32_to_cpu(rsp->DataLength);
if ((*nbytes > CIFS_MAX_MSGSIZE) ||
(*nbytes > io_parms->length)) {
@@ -3417,14 +3506,16 @@ smb2_writev_callback(struct mid_q_entry *mid)
{
struct cifs_writedata *wdata = mid->callback_data;
struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
+ struct TCP_Server_Info *server = tcon->ses->server;
unsigned int written;
struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
- unsigned int credits_received = 0;
+ struct cifs_credits credits = { .value = 0, .instance = 0 };
switch (mid->mid_state) {
case MID_RESPONSE_RECEIVED:
- credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest);
- wdata->result = smb2_check_receive(mid, tcon->ses->server, 0);
+ credits.value = le16_to_cpu(rsp->sync_hdr.CreditRequest);
+ credits.instance = server->reconnect_instance;
+ wdata->result = smb2_check_receive(mid, server, 0);
if (wdata->result != 0)
break;
@@ -3448,7 +3539,8 @@ smb2_writev_callback(struct mid_q_entry *mid)
wdata->result = -EAGAIN;
break;
case MID_RESPONSE_MALFORMED:
- credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest);
+ credits.value = le16_to_cpu(rsp->sync_hdr.CreditRequest);
+ credits.instance = server->reconnect_instance;
/* fall through */
default:
wdata->result = -EIO;
@@ -3481,7 +3573,7 @@ smb2_writev_callback(struct mid_q_entry *mid)
queue_work(cifsiod_wq, &wdata->work);
DeleteMidQEntry(mid);
- add_credits(tcon->ses->server, credits_received, 0);
+ add_credits(server, &credits, 0);
}
/* smb2_async_writev - send an async write, and set up mid to handle result */
@@ -3499,17 +3591,8 @@ smb2_async_writev(struct cifs_writedata *wdata,
unsigned int total_len;
rc = smb2_plain_req_init(SMB2_WRITE, tcon, (void **) &req, &total_len);
- if (rc) {
- if (rc == -EAGAIN && wdata->credits) {
- /* credits was reset by reconnect */
- wdata->credits = 0;
- /* reduce in_flight value since we won't send the req */
- spin_lock(&server->req_lock);
- server->in_flight--;
- spin_unlock(&server->req_lock);
- }
- goto async_writev_out;
- }
+ if (rc)
+ return rc;
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
@@ -3526,6 +3609,9 @@ smb2_async_writev(struct cifs_writedata *wdata,
req->DataOffset = cpu_to_le16(
offsetof(struct smb2_write_req, Buffer));
req->RemainingBytes = 0;
+
+ trace_smb3_write_enter(0 /* xid */, wdata->cfile->fid.persistent_fid,
+ tcon->tid, tcon->ses->Suid, wdata->offset, wdata->bytes);
#ifdef CONFIG_CIFS_SMB_DIRECT
/*
* If we want to do a server RDMA read, fill in and append
@@ -3595,23 +3681,22 @@ smb2_async_writev(struct cifs_writedata *wdata,
req->Length = cpu_to_le32(wdata->bytes);
#endif
- if (wdata->credits) {
+ if (wdata->credits.value > 0) {
shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
SMB2_MAX_BUFFER_SIZE));
shdr->CreditRequest =
cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1);
- spin_lock(&server->req_lock);
- server->credits += wdata->credits -
- le16_to_cpu(shdr->CreditCharge);
- spin_unlock(&server->req_lock);
- wake_up(&server->request_q);
- wdata->credits = le16_to_cpu(shdr->CreditCharge);
+
+ rc = adjust_credits(server, &wdata->credits, wdata->bytes);
+ if (rc)
+ goto async_writev_out;
+
flags |= CIFS_HAS_CREDITS;
}
kref_get(&wdata->refcount);
rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, NULL,
- wdata, flags);
+ wdata, flags, &wdata->credits);
if (rc) {
trace_smb3_write_err(0 /* no xid */, req->PersistentFileId,
@@ -3674,6 +3759,10 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
offsetof(struct smb2_write_req, Buffer));
req->RemainingBytes = 0;
+ trace_smb3_write_enter(xid, io_parms->persistent_fid,
+ io_parms->tcon->tid, io_parms->tcon->ses->Suid,
+ io_parms->offset, io_parms->length);
+
iov[0].iov_base = (char *)req;
/* 1 for Buffer */
iov[0].iov_len = total_len - 1;
@@ -3684,7 +3773,6 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
rc = cifs_send_recv(xid, io_parms->tcon->ses, &rqst,
&resp_buftype, flags, &rsp_iov);
- cifs_small_buf_release(req);
rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
if (rc) {
@@ -3702,6 +3790,7 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
io_parms->offset, *nbytes);
}
+ cifs_small_buf_release(req);
free_rsp_buf(resp_buftype, rsp);
return rc;
}
@@ -3836,6 +3925,9 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
rqst.rq_iov = iov;
rqst.rq_nvec = 2;
+ trace_smb3_query_dir_enter(xid, persistent_fid, tcon->tid,
+ tcon->ses->Suid, index, output_size);
+
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
@@ -3843,18 +3935,26 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
if (rc) {
if (rc == -ENODATA &&
rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) {
+ trace_smb3_query_dir_done(xid, persistent_fid,
+ tcon->tid, tcon->ses->Suid, index, 0);
srch_inf->endOfSearch = true;
rc = 0;
- } else
+ } else {
+ trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid,
+ tcon->ses->Suid, index, 0, rc);
cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
+ }
goto qdir_exit;
}
rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
info_buf_size);
- if (rc)
+ if (rc) {
+ trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid,
+ tcon->ses->Suid, index, 0, rc);
goto qdir_exit;
+ }
srch_inf->unicode = true;
@@ -3882,6 +3982,8 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
else
cifs_dbg(VFS, "illegal search buffer type\n");
+ trace_smb3_query_dir_done(xid, persistent_fid, tcon->tid,
+ tcon->ses->Suid, index, srch_inf->entries_in_buffer);
return rc;
qdir_exit:
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index 538e2299805f..ee8977688e21 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -288,12 +288,12 @@ struct smb2_encryption_neg_context {
__le16 Ciphers[1]; /* Ciphers[0] since only one used now */
} __packed;
-#define POSIX_CTXT_DATA_LEN 8
+#define POSIX_CTXT_DATA_LEN 16
struct smb2_posix_neg_context {
__le16 ContextType; /* 0x100 */
__le16 DataLength;
__le32 Reserved;
- __le64 Reserved1; /* In case needed for future (eg version or caps) */
+ __u8 Name[16]; /* POSIX ctxt GUID 93AD25509CB411E7B42383DE968BCD7C */
} __packed;
struct smb2_negotiate_rsp {
@@ -959,6 +959,13 @@ struct duplicate_extents_to_file {
__le64 ByteCount; /* Bytes to be copied */
} __packed;
+/*
+ * Maximum number of iovs we need for an ioctl request.
+ * [0] : struct smb2_ioctl_req
+ * [1] : in_data
+ */
+#define SMB2_IOCTL_IOV_SIZE 2
+
struct smb2_ioctl_req {
struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 57 */
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 87733b27a65f..52df125e9189 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -142,8 +142,13 @@ extern int SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
extern void SMB2_open_free(struct smb_rqst *rqst);
extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, u32 opcode,
- bool is_fsctl, char *in_data, u32 indatalen,
+ bool is_fsctl, char *in_data, u32 indatalen, u32 maxoutlen,
char **out_data, u32 *plen /* returned data len */);
+extern int SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
+ u64 persistent_fid, u64 volatile_fid, u32 opcode,
+ bool is_fsctl, char *in_data, u32 indatalen,
+ __u32 max_response_size);
+extern void SMB2_ioctl_free(struct smb_rqst *rqst);
extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id);
extern int SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
@@ -223,6 +228,9 @@ extern int smb3_validate_negotiate(const unsigned int, struct cifs_tcon *);
extern enum securityEnum smb2_select_sectype(struct TCP_Server_Info *,
enum securityEnum);
+extern __u8 smb2_parse_lease_state(struct TCP_Server_Info *server,
+ struct smb2_create_rsp *rsp,
+ unsigned int *epoch, char *lease_key);
extern int smb3_encryption_required(const struct cifs_tcon *tcon);
extern int smb2_validate_iov(unsigned int offset, unsigned int buffer_length,
struct kvec *iov, unsigned int min_buf_size);
diff --git a/fs/cifs/smb2status.h b/fs/cifs/smb2status.h
index 3d5f62150de4..447c0c6e4c64 100644
--- a/fs/cifs/smb2status.h
+++ b/fs/cifs/smb2status.h
@@ -30,9 +30,9 @@
*/
#define STATUS_SEVERITY_SUCCESS __constant_cpu_to_le32(0x0000)
-#define STATUS_SEVERITY_INFORMATIONAL __constanst_cpu_to_le32(0x0001)
-#define STATUS_SEVERITY_WARNING __constanst_cpu_to_le32(0x0002)
-#define STATUS_SEVERITY_ERROR __constanst_cpu_to_le32(0x0003)
+#define STATUS_SEVERITY_INFORMATIONAL cpu_to_le32(0x0001)
+#define STATUS_SEVERITY_WARNING cpu_to_le32(0x0002)
+#define STATUS_SEVERITY_ERROR cpu_to_le32(0x0003)
struct ntstatus {
/* Facility is the high 12 bits of the following field */
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 7b351c65ee46..d1181572758b 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -576,6 +576,7 @@ smb2_mid_entry_alloc(const struct smb2_sync_hdr *shdr,
struct TCP_Server_Info *server)
{
struct mid_q_entry *temp;
+ unsigned int credits = le16_to_cpu(shdr->CreditCharge);
if (server == NULL) {
cifs_dbg(VFS, "Null TCP session in smb2_mid_entry_alloc\n");
@@ -586,6 +587,7 @@ smb2_mid_entry_alloc(const struct smb2_sync_hdr *shdr,
memset(temp, 0, sizeof(struct mid_q_entry));
kref_init(&temp->refcount);
temp->mid = le64_to_cpu(shdr->MessageId);
+ temp->credits = credits > 0 ? credits : 1;
temp->pid = current->pid;
temp->command = shdr->Command; /* Always LE */
temp->when_alloc = jiffies;
@@ -600,6 +602,8 @@ smb2_mid_entry_alloc(const struct smb2_sync_hdr *shdr,
atomic_inc(&midCount);
temp->mid_state = MID_REQUEST_ALLOCATED;
+ trace_smb3_cmd_enter(shdr->TreeId, shdr->SessionId,
+ le16_to_cpu(shdr->Command), temp->mid);
return temp;
}
@@ -615,6 +619,10 @@ smb2_get_mid_entry(struct cifs_ses *ses, struct smb2_sync_hdr *shdr,
return -EAGAIN;
}
+ if (ses->server->tcpStatus == CifsNeedNegotiate &&
+ shdr->Command != SMB2_NEGOTIATE)
+ return -EAGAIN;
+
if (ses->status == CifsNew) {
if ((shdr->Command != SMB2_SESSION_SETUP) &&
(shdr->Command != SMB2_NEGOTIATE))
@@ -634,6 +642,7 @@ smb2_get_mid_entry(struct cifs_ses *ses, struct smb2_sync_hdr *shdr,
spin_lock(&GlobalMid_Lock);
list_add_tail(&(*mid)->qhead, &ses->server->pending_mid_q);
spin_unlock(&GlobalMid_Lock);
+
return 0;
}
@@ -674,13 +683,18 @@ smb2_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
smb2_seq_num_into_buf(ses->server, shdr);
rc = smb2_get_mid_entry(ses, shdr, &mid);
- if (rc)
+ if (rc) {
+ revert_current_mid_from_hdr(ses->server, shdr);
return ERR_PTR(rc);
+ }
+
rc = smb2_sign_rqst(rqst, ses->server);
if (rc) {
+ revert_current_mid_from_hdr(ses->server, shdr);
cifs_delete_mid(mid);
return ERR_PTR(rc);
}
+
return mid;
}
@@ -692,14 +706,21 @@ smb2_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
(struct smb2_sync_hdr *)rqst->rq_iov[0].iov_base;
struct mid_q_entry *mid;
+ if (server->tcpStatus == CifsNeedNegotiate &&
+ shdr->Command != SMB2_NEGOTIATE)
+ return ERR_PTR(-EAGAIN);
+
smb2_seq_num_into_buf(server, shdr);
mid = smb2_mid_entry_alloc(shdr, server);
- if (mid == NULL)
+ if (mid == NULL) {
+ revert_current_mid_from_hdr(server, shdr);
return ERR_PTR(-ENOMEM);
+ }
rc = smb2_sign_rqst(rqst, server);
if (rc) {
+ revert_current_mid_from_hdr(server, shdr);
DeleteMidQEntry(mid);
return ERR_PTR(rc);
}
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
index a568dac7b3a1..b943b74cd246 100644
--- a/fs/cifs/smbdirect.c
+++ b/fs/cifs/smbdirect.c
@@ -1550,7 +1550,7 @@ static int allocate_caches_and_workqueue(struct smbd_connection *info)
char name[MAX_NAME_LEN];
int rc;
- snprintf(name, MAX_NAME_LEN, "smbd_request_%p", info);
+ scnprintf(name, MAX_NAME_LEN, "smbd_request_%p", info);
info->request_cache =
kmem_cache_create(
name,
@@ -1566,7 +1566,7 @@ static int allocate_caches_and_workqueue(struct smbd_connection *info)
if (!info->request_mempool)
goto out1;
- snprintf(name, MAX_NAME_LEN, "smbd_response_%p", info);
+ scnprintf(name, MAX_NAME_LEN, "smbd_response_%p", info);
info->response_cache =
kmem_cache_create(
name,
@@ -1582,7 +1582,7 @@ static int allocate_caches_and_workqueue(struct smbd_connection *info)
if (!info->response_mempool)
goto out3;
- snprintf(name, MAX_NAME_LEN, "smbd_%p", info);
+ scnprintf(name, MAX_NAME_LEN, "smbd_%p", info);
info->workqueue = create_workqueue(name);
if (!info->workqueue)
goto out4;
diff --git a/fs/cifs/trace.h b/fs/cifs/trace.h
index 59be48206932..99c4d799c24b 100644
--- a/fs/cifs/trace.h
+++ b/fs/cifs/trace.h
@@ -58,6 +58,9 @@ DEFINE_EVENT(smb3_rw_err_class, smb3_##name, \
DEFINE_SMB3_RW_ERR_EVENT(write_err);
DEFINE_SMB3_RW_ERR_EVENT(read_err);
+DEFINE_SMB3_RW_ERR_EVENT(query_dir_err);
+DEFINE_SMB3_RW_ERR_EVENT(zero_err);
+DEFINE_SMB3_RW_ERR_EVENT(falloc_err);
/* For logging successful read or write */
@@ -100,8 +103,16 @@ DEFINE_EVENT(smb3_rw_done_class, smb3_##name, \
__u32 len), \
TP_ARGS(xid, fid, tid, sesid, offset, len))
+DEFINE_SMB3_RW_DONE_EVENT(write_enter);
+DEFINE_SMB3_RW_DONE_EVENT(read_enter);
+DEFINE_SMB3_RW_DONE_EVENT(query_dir_enter);
+DEFINE_SMB3_RW_DONE_EVENT(zero_enter);
+DEFINE_SMB3_RW_DONE_EVENT(falloc_enter);
DEFINE_SMB3_RW_DONE_EVENT(write_done);
DEFINE_SMB3_RW_DONE_EVENT(read_done);
+DEFINE_SMB3_RW_DONE_EVENT(query_dir_done);
+DEFINE_SMB3_RW_DONE_EVENT(zero_done);
+DEFINE_SMB3_RW_DONE_EVENT(falloc_done);
/*
* For handle based calls other than read and write, and get/set info
@@ -148,6 +159,48 @@ DEFINE_SMB3_FD_ERR_EVENT(close_err);
/*
* For handle based query/set info calls
*/
+DECLARE_EVENT_CLASS(smb3_inf_enter_class,
+ TP_PROTO(unsigned int xid,
+ __u64 fid,
+ __u32 tid,
+ __u64 sesid,
+ __u8 infclass,
+ __u32 type),
+ TP_ARGS(xid, fid, tid, sesid, infclass, type),
+ TP_STRUCT__entry(
+ __field(unsigned int, xid)
+ __field(__u64, fid)
+ __field(__u32, tid)
+ __field(__u64, sesid)
+ __field(__u8, infclass)
+ __field(__u32, type)
+ ),
+ TP_fast_assign(
+ __entry->xid = xid;
+ __entry->fid = fid;
+ __entry->tid = tid;
+ __entry->sesid = sesid;
+ __entry->infclass = infclass;
+ __entry->type = type;
+ ),
+ TP_printk("xid=%u sid=0x%llx tid=0x%x fid=0x%llx class=%u type=0x%x",
+ __entry->xid, __entry->sesid, __entry->tid, __entry->fid,
+ __entry->infclass, __entry->type)
+)
+
+#define DEFINE_SMB3_INF_ENTER_EVENT(name) \
+DEFINE_EVENT(smb3_inf_enter_class, smb3_##name, \
+ TP_PROTO(unsigned int xid, \
+ __u64 fid, \
+ __u32 tid, \
+ __u64 sesid, \
+ __u8 infclass, \
+ __u32 type), \
+ TP_ARGS(xid, fid, tid, sesid, infclass, type))
+
+DEFINE_SMB3_INF_ENTER_EVENT(query_info_enter);
+DEFINE_SMB3_INF_ENTER_EVENT(query_info_done);
+
DECLARE_EVENT_CLASS(smb3_inf_err_class,
TP_PROTO(unsigned int xid,
__u64 fid,
@@ -195,6 +248,123 @@ DEFINE_SMB3_INF_ERR_EVENT(query_info_err);
DEFINE_SMB3_INF_ERR_EVENT(set_info_err);
DEFINE_SMB3_INF_ERR_EVENT(fsctl_err);
+DECLARE_EVENT_CLASS(smb3_inf_compound_enter_class,
+ TP_PROTO(unsigned int xid,
+ __u32 tid,
+ __u64 sesid,
+ const char *full_path),
+ TP_ARGS(xid, tid, sesid, full_path),
+ TP_STRUCT__entry(
+ __field(unsigned int, xid)
+ __field(__u32, tid)
+ __field(__u64, sesid)
+ __string(path, full_path)
+ ),
+ TP_fast_assign(
+ __entry->xid = xid;
+ __entry->tid = tid;
+ __entry->sesid = sesid;
+ __assign_str(path, full_path);
+ ),
+ TP_printk("xid=%u sid=0x%llx tid=0x%x path=%s",
+ __entry->xid, __entry->sesid, __entry->tid,
+ __get_str(path))
+)
+
+#define DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(name) \
+DEFINE_EVENT(smb3_inf_compound_enter_class, smb3_##name, \
+ TP_PROTO(unsigned int xid, \
+ __u32 tid, \
+ __u64 sesid, \
+ const char *full_path), \
+ TP_ARGS(xid, tid, sesid, full_path))
+
+DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(query_info_compound_enter);
+DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(hardlink_enter);
+DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(rename_enter);
+DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(rmdir_enter);
+DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(set_eof_enter);
+DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(set_info_compound_enter);
+DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(delete_enter);
+DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(mkdir_enter);
+
+
+DECLARE_EVENT_CLASS(smb3_inf_compound_done_class,
+ TP_PROTO(unsigned int xid,
+ __u32 tid,
+ __u64 sesid),
+ TP_ARGS(xid, tid, sesid),
+ TP_STRUCT__entry(
+ __field(unsigned int, xid)
+ __field(__u32, tid)
+ __field(__u64, sesid)
+ ),
+ TP_fast_assign(
+ __entry->xid = xid;
+ __entry->tid = tid;
+ __entry->sesid = sesid;
+ ),
+ TP_printk("xid=%u sid=0x%llx tid=0x%x",
+ __entry->xid, __entry->sesid, __entry->tid)
+)
+
+#define DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(name) \
+DEFINE_EVENT(smb3_inf_compound_done_class, smb3_##name, \
+ TP_PROTO(unsigned int xid, \
+ __u32 tid, \
+ __u64 sesid), \
+ TP_ARGS(xid, tid, sesid))
+
+DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(query_info_compound_done);
+DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(hardlink_done);
+DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(rename_done);
+DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(rmdir_done);
+DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(set_eof_done);
+DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(set_info_compound_done);
+DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(delete_done);
+DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(mkdir_done);
+
+
+DECLARE_EVENT_CLASS(smb3_inf_compound_err_class,
+ TP_PROTO(unsigned int xid,
+ __u32 tid,
+ __u64 sesid,
+ int rc),
+ TP_ARGS(xid, tid, sesid, rc),
+ TP_STRUCT__entry(
+ __field(unsigned int, xid)
+ __field(__u32, tid)
+ __field(__u64, sesid)
+ __field(int, rc)
+ ),
+ TP_fast_assign(
+ __entry->xid = xid;
+ __entry->tid = tid;
+ __entry->sesid = sesid;
+ __entry->rc = rc;
+ ),
+ TP_printk("xid=%u sid=0x%llx tid=0x%x rc=%d",
+ __entry->xid, __entry->sesid, __entry->tid,
+ __entry->rc)
+)
+
+#define DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(name) \
+DEFINE_EVENT(smb3_inf_compound_err_class, smb3_##name, \
+ TP_PROTO(unsigned int xid, \
+ __u32 tid, \
+ __u64 sesid, \
+ int rc), \
+ TP_ARGS(xid, tid, sesid, rc))
+
+DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(query_info_compound_err);
+DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(hardlink_err);
+DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(rename_err);
+DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(rmdir_err);
+DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(set_eof_err);
+DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(set_info_compound_err);
+DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(mkdir_err);
+DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(delete_err);
+
/*
* For logging SMB3 Status code and Command for responses which return errors
*/
@@ -270,6 +440,7 @@ DEFINE_EVENT(smb3_cmd_done_class, smb3_##name, \
__u64 mid), \
TP_ARGS(tid, sesid, cmd, mid))
+DEFINE_SMB3_CMD_DONE_EVENT(cmd_enter);
DEFINE_SMB3_CMD_DONE_EVENT(cmd_done);
DEFINE_SMB3_CMD_DONE_EVENT(ses_expired);
@@ -378,19 +549,19 @@ DECLARE_EVENT_CLASS(smb3_tcon_class,
__field(unsigned int, xid)
__field(__u32, tid)
__field(__u64, sesid)
- __field(const char *, unc_name)
+ __string(name, unc_name)
__field(int, rc)
),
TP_fast_assign(
__entry->xid = xid;
__entry->tid = tid;
__entry->sesid = sesid;
- __entry->unc_name = unc_name;
+ __assign_str(name, unc_name);
__entry->rc = rc;
),
TP_printk("xid=%u sid=0x%llx tid=0x%x unc_name=%s rc=%d",
__entry->xid, __entry->sesid, __entry->tid,
- __entry->unc_name, __entry->rc)
+ __get_str(name), __entry->rc)
)
#define DEFINE_SMB3_TCON_EVENT(name) \
@@ -406,8 +577,47 @@ DEFINE_SMB3_TCON_EVENT(tcon);
/*
- * For smb2/smb3 open call
+ * For smb2/smb3 open (including create and mkdir) calls
*/
+
+DECLARE_EVENT_CLASS(smb3_open_enter_class,
+ TP_PROTO(unsigned int xid,
+ __u32 tid,
+ __u64 sesid,
+ int create_options,
+ int desired_access),
+ TP_ARGS(xid, tid, sesid, create_options, desired_access),
+ TP_STRUCT__entry(
+ __field(unsigned int, xid)
+ __field(__u32, tid)
+ __field(__u64, sesid)
+ __field(int, create_options)
+ __field(int, desired_access)
+ ),
+ TP_fast_assign(
+ __entry->xid = xid;
+ __entry->tid = tid;
+ __entry->sesid = sesid;
+ __entry->create_options = create_options;
+ __entry->desired_access = desired_access;
+ ),
+ TP_printk("xid=%u sid=0x%llx tid=0x%x cr_opts=0x%x des_access=0x%x",
+ __entry->xid, __entry->sesid, __entry->tid,
+ __entry->create_options, __entry->desired_access)
+)
+
+#define DEFINE_SMB3_OPEN_ENTER_EVENT(name) \
+DEFINE_EVENT(smb3_open_enter_class, smb3_##name, \
+ TP_PROTO(unsigned int xid, \
+ __u32 tid, \
+ __u64 sesid, \
+ int create_options, \
+ int desired_access), \
+ TP_ARGS(xid, tid, sesid, create_options, desired_access))
+
+DEFINE_SMB3_OPEN_ENTER_EVENT(open_enter);
+DEFINE_SMB3_OPEN_ENTER_EVENT(posix_mkdir_enter);
+
DECLARE_EVENT_CLASS(smb3_open_err_class,
TP_PROTO(unsigned int xid,
__u32 tid,
@@ -626,6 +836,7 @@ DEFINE_EVENT(smb3_credit_class, smb3_##name, \
TP_ARGS(currmid, hostname, credits))
DEFINE_SMB3_CREDIT_EVENT(reconnect_with_invalid_credits);
+DEFINE_SMB3_CREDIT_EVENT(credit_timeout);
#endif /* _CIFS_TRACE_H */
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 53532bd3f50d..1de8e996e566 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -33,6 +33,7 @@
#include <linux/uaccess.h>
#include <asm/processor.h>
#include <linux/mempool.h>
+#include <linux/signal.h>
#include "cifspdu.h"
#include "cifsglob.h"
#include "cifsproto.h"
@@ -291,6 +292,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
int n_vec;
unsigned int send_length = 0;
unsigned int i, j;
+ sigset_t mask, oldmask;
size_t total_len = 0, sent, size;
struct socket *ssocket = server->ssocket;
struct msghdr smb_msg;
@@ -301,8 +303,14 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
rc = smbd_send(server, rqst);
goto smbd_done;
}
+
if (ssocket == NULL)
- return -ENOTSOCK;
+ return -EAGAIN;
+
+ if (signal_pending(current)) {
+ cifs_dbg(FYI, "signal is pending before sending any data\n");
+ return -EINTR;
+ }
/* cork the socket */
kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
@@ -312,6 +320,16 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
send_length += smb_rqst_len(server, &rqst[j]);
rfc1002_marker = cpu_to_be32(send_length);
+ /*
+ * We should not allow signals to interrupt the network send because
+ * any partial send will cause session reconnects thus increasing
+ * latency of system calls and overload a server with unnecessary
+ * requests.
+ */
+
+ sigfillset(&mask);
+ sigprocmask(SIG_BLOCK, &mask, &oldmask);
+
/* Generate a rfc1002 marker for SMB2+ */
if (server->vals->header_preamble_size == 0) {
struct kvec hiov = {
@@ -321,7 +339,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
rc = smb_send_kvec(server, &smb_msg, &sent);
if (rc < 0)
- goto uncork;
+ goto unmask;
total_len += sent;
send_length += 4;
@@ -343,7 +361,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
rc = smb_send_kvec(server, &smb_msg, &sent);
if (rc < 0)
- goto uncork;
+ goto unmask;
total_len += sent;
@@ -365,7 +383,25 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
}
}
-uncork:
+unmask:
+ sigprocmask(SIG_SETMASK, &oldmask, NULL);
+
+ /*
+ * If signal is pending but we have already sent the whole packet to
+ * the server we need to return success status to allow a corresponding
+ * mid entry to be kept in the pending requests queue thus allowing
+ * to handle responses from the server by the client.
+ *
+ * If only part of the packet has been sent there is no need to hide
+ * interrupt because the session will be reconnected anyway, so there
+ * won't be any response from the server to handle.
+ */
+
+ if (signal_pending(current) && (total_len != send_length)) {
+ cifs_dbg(FYI, "signal is pending after attempt to send\n");
+ rc = -EINTR;
+ }
+
/* uncork it */
val = 0;
kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
@@ -450,29 +486,55 @@ smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
}
static int
-wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
- int *credits)
+wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
+ const int timeout, const int flags,
+ unsigned int *instance)
{
int rc;
+ int *credits;
+ int optype;
+ long int t;
+
+ if (timeout < 0)
+ t = MAX_JIFFY_OFFSET;
+ else
+ t = msecs_to_jiffies(timeout);
+
+ optype = flags & CIFS_OP_MASK;
+
+ *instance = 0;
+
+ credits = server->ops->get_credits_field(server, optype);
+ /* Since an echo is already inflight, no need to wait to send another */
+ if (*credits <= 0 && optype == CIFS_ECHO_OP)
+ return -EAGAIN;
spin_lock(&server->req_lock);
- if (timeout == CIFS_ASYNC_OP) {
+ if ((flags & CIFS_TIMEOUT_MASK) == CIFS_ASYNC_OP) {
/* oplock breaks must not be held up */
server->in_flight++;
*credits -= 1;
+ *instance = server->reconnect_instance;
spin_unlock(&server->req_lock);
return 0;
}
while (1) {
- if (*credits <= 0) {
+ if (*credits < num_credits) {
spin_unlock(&server->req_lock);
cifs_num_waiters_inc(server);
- rc = wait_event_killable(server->request_q,
- has_credits(server, credits));
+ rc = wait_event_killable_timeout(server->request_q,
+ has_credits(server, credits, num_credits), t);
cifs_num_waiters_dec(server);
- if (rc)
- return rc;
+ if (!rc) {
+ trace_smb3_credit_timeout(server->CurrentMid,
+ server->hostname, num_credits);
+ cifs_dbg(VFS, "wait timed out after %d ms\n",
+ timeout);
+ return -ENOTSUPP;
+ }
+ if (rc == -ERESTARTSYS)
+ return -ERESTARTSYS;
spin_lock(&server->req_lock);
} else {
if (server->tcpStatus == CifsExiting) {
@@ -481,14 +543,53 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
}
/*
+ * For normal commands, reserve the last MAX_COMPOUND
+ * credits to compound requests.
+ * Otherwise these compounds could be permanently
+ * starved for credits by single-credit requests.
+ *
+ * To prevent spinning CPU, block this thread until
+ * there are >MAX_COMPOUND credits available.
+ * But only do this is we already have a lot of
+ * credits in flight to avoid triggering this check
+ * for servers that are slow to hand out credits on
+ * new sessions.
+ */
+ if (!optype && num_credits == 1 &&
+ server->in_flight > 2 * MAX_COMPOUND &&
+ *credits <= MAX_COMPOUND) {
+ spin_unlock(&server->req_lock);
+ cifs_num_waiters_inc(server);
+ rc = wait_event_killable_timeout(
+ server->request_q,
+ has_credits(server, credits,
+ MAX_COMPOUND + 1),
+ t);
+ cifs_num_waiters_dec(server);
+ if (!rc) {
+ trace_smb3_credit_timeout(
+ server->CurrentMid,
+ server->hostname, num_credits);
+ cifs_dbg(VFS, "wait timed out after %d ms\n",
+ timeout);
+ return -ENOTSUPP;
+ }
+ if (rc == -ERESTARTSYS)
+ return -ERESTARTSYS;
+ spin_lock(&server->req_lock);
+ continue;
+ }
+
+ /*
* Can not count locking commands against total
* as they are allowed to block on server.
*/
/* update # of requests on the wire to server */
- if (timeout != CIFS_BLOCKING_OP) {
- *credits -= 1;
- server->in_flight++;
+ if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
+ *credits -= num_credits;
+ server->in_flight += num_credits;
+ *instance = server->reconnect_instance;
}
spin_unlock(&server->req_lock);
break;
@@ -498,24 +599,45 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
}
static int
-wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
- const int optype)
+wait_for_free_request(struct TCP_Server_Info *server, const int flags,
+ unsigned int *instance)
{
- int *val;
+ return wait_for_free_credits(server, 1, -1, flags,
+ instance);
+}
- val = server->ops->get_credits_field(server, optype);
- /* Since an echo is already inflight, no need to wait to send another */
- if (*val <= 0 && optype == CIFS_ECHO_OP)
- return -EAGAIN;
- return wait_for_free_credits(server, timeout, val);
+static int
+wait_for_compound_request(struct TCP_Server_Info *server, int num,
+ const int flags, unsigned int *instance)
+{
+ int *credits;
+
+ credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
+
+ spin_lock(&server->req_lock);
+ if (*credits < num) {
+ /*
+ * Return immediately if not too many requests in flight since
+ * we will likely be stuck on waiting for credits.
+ */
+ if (server->in_flight < num - *credits) {
+ spin_unlock(&server->req_lock);
+ return -ENOTSUPP;
+ }
+ }
+ spin_unlock(&server->req_lock);
+
+ return wait_for_free_credits(server, num, 60000, flags,
+ instance);
}
int
cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
- unsigned int *num, unsigned int *credits)
+ unsigned int *num, struct cifs_credits *credits)
{
*num = size;
- *credits = 0;
+ credits->value = 0;
+ credits->instance = server->reconnect_instance;
return 0;
}
@@ -602,27 +724,43 @@ cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
int
cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
mid_receive_t *receive, mid_callback_t *callback,
- mid_handle_t *handle, void *cbdata, const int flags)
+ mid_handle_t *handle, void *cbdata, const int flags,
+ const struct cifs_credits *exist_credits)
{
- int rc, timeout, optype;
+ int rc;
struct mid_q_entry *mid;
- unsigned int credits = 0;
+ struct cifs_credits credits = { .value = 0, .instance = 0 };
+ unsigned int instance;
+ int optype;
- timeout = flags & CIFS_TIMEOUT_MASK;
optype = flags & CIFS_OP_MASK;
if ((flags & CIFS_HAS_CREDITS) == 0) {
- rc = wait_for_free_request(server, timeout, optype);
+ rc = wait_for_free_request(server, flags, &instance);
if (rc)
return rc;
- credits = 1;
- }
+ credits.value = 1;
+ credits.instance = instance;
+ } else
+ instance = exist_credits->instance;
mutex_lock(&server->srv_mutex);
+
+ /*
+ * We can't use credits obtained from the previous session to send this
+ * request. Check if there were reconnects after we obtained credits and
+ * return -EAGAIN in such cases to let callers handle it.
+ */
+ if (instance != server->reconnect_instance) {
+ mutex_unlock(&server->srv_mutex);
+ add_credits_and_wake_if(server, &credits, optype);
+ return -EAGAIN;
+ }
+
mid = server->ops->setup_async_request(server, rqst);
if (IS_ERR(mid)) {
mutex_unlock(&server->srv_mutex);
- add_credits_and_wake_if(server, credits, optype);
+ add_credits_and_wake_if(server, &credits, optype);
return PTR_ERR(mid);
}
@@ -647,6 +785,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
cifs_in_send_dec(server);
if (rc < 0) {
+ revert_current_mid(server, mid->credits);
server->sequence_number -= 2;
cifs_delete_mid(mid);
}
@@ -656,7 +795,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
if (rc == 0)
return 0;
- add_credits_and_wake_if(server, credits, optype);
+ add_credits_and_wake_if(server, &credits, optype);
return rc;
}
@@ -786,8 +925,12 @@ static void
cifs_compound_callback(struct mid_q_entry *mid)
{
struct TCP_Server_Info *server = mid->server;
+ struct cifs_credits credits;
+
+ credits.value = server->ops->get_credits(mid);
+ credits.instance = server->reconnect_instance;
- add_credits(server, server->ops->get_credits(mid), mid->optype);
+ add_credits(server, &credits, mid->optype);
}
static void
@@ -809,14 +952,15 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
const int flags, const int num_rqst, struct smb_rqst *rqst,
int *resp_buf_type, struct kvec *resp_iov)
{
- int i, j, rc = 0;
- int timeout, optype;
+ int i, j, optype, rc = 0;
struct mid_q_entry *midQ[MAX_COMPOUND];
bool cancelled_mid[MAX_COMPOUND] = {false};
- unsigned int credits[MAX_COMPOUND] = {0};
+ struct cifs_credits credits[MAX_COMPOUND] = {
+ { .value = 0, .instance = 0 }
+ };
+ unsigned int instance;
char *buf;
- timeout = flags & CIFS_TIMEOUT_MASK;
optype = flags & CIFS_OP_MASK;
for (i = 0; i < num_rqst; i++)
@@ -831,30 +975,21 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
return -ENOENT;
/*
- * Ensure we obtain 1 credit per request in the compound chain.
- * It can be optimized further by waiting for all the credits
- * at once but this can wait long enough if we don't have enough
- * credits due to some heavy operations in progress or the server
- * not granting us much, so a fallback to the current approach is
- * needed anyway.
+ * Wait for all the requests to become available.
+ * This approach still leaves the possibility to be stuck waiting for
+ * credits if the server doesn't grant credits to the outstanding
+ * requests and if the client is completely idle, not generating any
+ * other requests.
+ * This can be handled by the eventual session reconnect.
*/
+ rc = wait_for_compound_request(ses->server, num_rqst, flags,
+ &instance);
+ if (rc)
+ return rc;
+
for (i = 0; i < num_rqst; i++) {
- rc = wait_for_free_request(ses->server, timeout, optype);
- if (rc) {
- /*
- * We haven't sent an SMB packet to the server yet but
- * we already obtained credits for i requests in the
- * compound chain - need to return those credits back
- * for future use. Note that we need to call add_credits
- * multiple times to match the way we obtained credits
- * in the first place and to account for in flight
- * requests correctly.
- */
- for (j = 0; j < i; j++)
- add_credits(ses->server, 1, optype);
- return rc;
- }
- credits[i] = 1;
+ credits[i].value = 1;
+ credits[i].instance = instance;
}
/*
@@ -865,16 +1000,31 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
mutex_lock(&ses->server->srv_mutex);
+ /*
+ * All the parts of the compound chain belong obtained credits from the
+ * same session. We can not use credits obtained from the previous
+ * session to send this request. Check if there were reconnects after
+ * we obtained credits and return -EAGAIN in such cases to let callers
+ * handle it.
+ */
+ if (instance != ses->server->reconnect_instance) {
+ mutex_unlock(&ses->server->srv_mutex);
+ for (j = 0; j < num_rqst; j++)
+ add_credits(ses->server, &credits[j], optype);
+ return -EAGAIN;
+ }
+
for (i = 0; i < num_rqst; i++) {
midQ[i] = ses->server->ops->setup_request(ses, &rqst[i]);
if (IS_ERR(midQ[i])) {
+ revert_current_mid(ses->server, i);
for (j = 0; j < i; j++)
cifs_delete_mid(midQ[j]);
mutex_unlock(&ses->server->srv_mutex);
/* Update # of requests on wire to server */
for (j = 0; j < num_rqst; j++)
- add_credits(ses->server, credits[j], optype);
+ add_credits(ses->server, &credits[j], optype);
return PTR_ERR(midQ[i]);
}
@@ -897,15 +1047,17 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
for (i = 0; i < num_rqst; i++)
cifs_save_when_sent(midQ[i]);
- if (rc < 0)
+ if (rc < 0) {
+ revert_current_mid(ses->server, num_rqst);
ses->server->sequence_number -= 2;
+ }
mutex_unlock(&ses->server->srv_mutex);
if (rc < 0) {
/* Sending failed for some reason - return credits back */
for (i = 0; i < num_rqst; i++)
- add_credits(ses->server, credits[i], optype);
+ add_credits(ses->server, &credits[i], optype);
goto out;
}
@@ -924,7 +1076,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
smb311_update_preauth_hash(ses, rqst[0].rq_iov,
rqst[0].rq_nvec);
- if (timeout == CIFS_ASYNC_OP)
+ if ((flags & CIFS_TIMEOUT_MASK) == CIFS_ASYNC_OP)
goto out;
for (i = 0; i < num_rqst; i++) {
@@ -942,7 +1094,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
midQ[i]->callback = cifs_cancelled_callback;
cancelled_mid[i] = true;
- credits[i] = 0;
+ credits[i].value = 0;
}
spin_unlock(&GlobalMid_Lock);
}
@@ -1061,13 +1213,14 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
int
SendReceive(const unsigned int xid, struct cifs_ses *ses,
struct smb_hdr *in_buf, struct smb_hdr *out_buf,
- int *pbytes_returned, const int timeout)
+ int *pbytes_returned, const int flags)
{
int rc = 0;
struct mid_q_entry *midQ;
unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
struct kvec iov = { .iov_base = in_buf, .iov_len = len };
struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
+ struct cifs_credits credits = { .value = 1, .instance = 0 };
if (ses == NULL) {
cifs_dbg(VFS, "Null smb session\n");
@@ -1091,7 +1244,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
return -EIO;
}
- rc = wait_for_free_request(ses->server, timeout, 0);
+ rc = wait_for_free_request(ses->server, flags, &credits.instance);
if (rc)
return rc;
@@ -1105,7 +1258,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
if (rc) {
mutex_unlock(&ses->server->srv_mutex);
/* Update # of requests on wire to server */
- add_credits(ses->server, 1, 0);
+ add_credits(ses->server, &credits, 0);
return rc;
}
@@ -1130,7 +1283,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
if (rc < 0)
goto out;
- if (timeout == CIFS_ASYNC_OP)
+ if ((flags & CIFS_TIMEOUT_MASK) == CIFS_ASYNC_OP)
goto out;
rc = wait_for_response(ses->server, midQ);
@@ -1141,7 +1294,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
/* no longer considered to be "in-flight" */
midQ->callback = DeleteMidQEntry;
spin_unlock(&GlobalMid_Lock);
- add_credits(ses->server, 1, 0);
+ add_credits(ses->server, &credits, 0);
return rc;
}
spin_unlock(&GlobalMid_Lock);
@@ -1149,7 +1302,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
rc = cifs_sync_mid_result(midQ, ses->server);
if (rc != 0) {
- add_credits(ses->server, 1, 0);
+ add_credits(ses->server, &credits, 0);
return rc;
}
@@ -1165,7 +1318,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
rc = cifs_check_receive(midQ, ses->server, 0);
out:
cifs_delete_mid(midQ);
- add_credits(ses->server, 1, 0);
+ add_credits(ses->server, &credits, 0);
return rc;
}
@@ -1207,6 +1360,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
struct kvec iov = { .iov_base = in_buf, .iov_len = len };
struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
+ unsigned int instance;
if (tcon == NULL || tcon->ses == NULL) {
cifs_dbg(VFS, "Null smb session\n");
@@ -1232,7 +1386,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
return -EIO;
}
- rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
+ rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, &instance);
if (rc)
return rc;
diff --git a/fs/crypto/Kconfig b/fs/crypto/Kconfig
index 02b7d91c9231..f0de238000c0 100644
--- a/fs/crypto/Kconfig
+++ b/fs/crypto/Kconfig
@@ -1,16 +1,16 @@
config FS_ENCRYPTION
- tristate "FS Encryption (Per-file encryption)"
+ bool "FS Encryption (Per-file encryption)"
select CRYPTO
select CRYPTO_AES
select CRYPTO_CBC
select CRYPTO_ECB
select CRYPTO_XTS
select CRYPTO_CTS
- select CRYPTO_CTR
select CRYPTO_SHA256
select KEYS
help
Enable encryption of files and directories. This
feature is similar to ecryptfs, but it is more memory
efficient since it avoids caching the encrypted and
- decrypted pages in the page cache.
+ decrypted pages in the page cache. Currently Ext4,
+ F2FS and UBIFS make use of this feature.
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index 7424f851eb5c..7da276159593 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -12,7 +12,6 @@
#ifndef _FSCRYPT_PRIVATE_H
#define _FSCRYPT_PRIVATE_H
-#define __FS_HAS_ENCRYPTION 1
#include <linux/fscrypt.h>
#include <crypto/hash.h>
diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c
index 926e5df20ec3..56debb1fcf5e 100644
--- a/fs/crypto/hooks.c
+++ b/fs/crypto/hooks.c
@@ -58,7 +58,7 @@ int __fscrypt_prepare_link(struct inode *inode, struct inode *dir)
return err;
if (!fscrypt_has_permitted_context(dir, inode))
- return -EPERM;
+ return -EXDEV;
return 0;
}
@@ -82,13 +82,13 @@ int __fscrypt_prepare_rename(struct inode *old_dir, struct dentry *old_dentry,
if (IS_ENCRYPTED(new_dir) &&
!fscrypt_has_permitted_context(new_dir,
d_inode(old_dentry)))
- return -EPERM;
+ return -EXDEV;
if ((flags & RENAME_EXCHANGE) &&
IS_ENCRYPTED(old_dir) &&
!fscrypt_has_permitted_context(old_dir,
d_inode(new_dentry)))
- return -EPERM;
+ return -EXDEV;
}
return 0;
}
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index 322ce9686bdb..2cb4956f8511 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -402,7 +402,6 @@ static int derive_essiv_salt(const u8 *key, int keysize, u8 *salt)
{
SHASH_DESC_ON_STACK(desc, tfm);
desc->tfm = tfm;
- desc->flags = 0;
return crypto_shash_digest(desc, key, keysize, salt);
}
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index f490de921ce8..bd7eaf9b3f00 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -151,8 +151,7 @@ EXPORT_SYMBOL(fscrypt_ioctl_get_policy);
* malicious offline violations of this constraint, while the link and rename
* checks are needed to prevent online violations of this constraint.
*
- * Return: 1 if permitted, 0 if forbidden. If forbidden, the caller must fail
- * the filesystem operation with EPERM.
+ * Return: 1 if permitted, 0 if forbidden.
*/
int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
{
diff --git a/fs/dax.c b/fs/dax.c
index 6959837cc465..e5e54da1715f 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -33,6 +33,7 @@
#include <linux/sizes.h>
#include <linux/mmu_notifier.h>
#include <linux/iomap.h>
+#include <asm/pgalloc.h>
#include "internal.h"
#define CREATE_TRACE_POINTS
@@ -788,7 +789,7 @@ static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
address = pgoff_address(index, vma);
/*
- * Note because we provide start/end to follow_pte_pmd it will
+ * Note because we provide range to follow_pte_pmd it will
* call mmu_notifier_invalidate_range_start() on our behalf
* before taking any lock.
*/
@@ -843,9 +844,8 @@ unlock_pte:
static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
struct address_space *mapping, void *entry)
{
- unsigned long pfn;
+ unsigned long pfn, index, count;
long ret = 0;
- size_t size;
/*
* A page got tagged dirty in DAX mapping? Something is seriously
@@ -894,17 +894,18 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
xas_unlock_irq(xas);
/*
- * Even if dax_writeback_mapping_range() was given a wbc->range_start
- * in the middle of a PMD, the 'index' we are given will be aligned to
- * the start index of the PMD, as will the pfn we pull from 'entry'.
+ * If dax_writeback_mapping_range() was given a wbc->range_start
+ * in the middle of a PMD, the 'index' we use needs to be
+ * aligned to the start of the PMD.
* This allows us to flush for PMD_SIZE and not have to worry about
* partial PMD writebacks.
*/
pfn = dax_to_pfn(entry);
- size = PAGE_SIZE << dax_entry_order(entry);
+ count = 1UL << dax_entry_order(entry);
+ index = xas->xa_index & ~(count - 1);
- dax_entry_mkclean(mapping, xas->xa_index, pfn);
- dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size);
+ dax_entry_mkclean(mapping, index, pfn);
+ dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE);
/*
* After we have flushed the cache, we can clear the dirty tag. There
* cannot be new dirty data in the pfn after the flush has completed as
@@ -917,8 +918,7 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
dax_wake_entry(xas, entry, false);
- trace_dax_writeback_one(mapping->host, xas->xa_index,
- size >> PAGE_SHIFT);
+ trace_dax_writeback_one(mapping->host, index, count);
return ret;
put_unlocked:
@@ -1220,9 +1220,7 @@ static vm_fault_t dax_fault_return(int error)
{
if (error == 0)
return VM_FAULT_NOPAGE;
- if (error == -ENOMEM)
- return VM_FAULT_OOM;
- return VM_FAULT_SIGBUS;
+ return vmf_error(error);
}
/*
@@ -1410,7 +1408,9 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
{
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
unsigned long pmd_addr = vmf->address & PMD_MASK;
+ struct vm_area_struct *vma = vmf->vma;
struct inode *inode = mapping->host;
+ pgtable_t pgtable = NULL;
struct page *zero_page;
spinlock_t *ptl;
pmd_t pmd_entry;
@@ -1425,12 +1425,22 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
*entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
DAX_PMD | DAX_ZERO_PAGE, false);
+ if (arch_needs_pgtable_deposit()) {
+ pgtable = pte_alloc_one(vma->vm_mm);
+ if (!pgtable)
+ return VM_FAULT_OOM;
+ }
+
ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
if (!pmd_none(*(vmf->pmd))) {
spin_unlock(ptl);
goto fallback;
}
+ if (pgtable) {
+ pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
+ mm_inc_nr_ptes(vma->vm_mm);
+ }
pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
pmd_entry = pmd_mkhuge(pmd_entry);
set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
@@ -1439,6 +1449,8 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
return VM_FAULT_NOPAGE;
fallback:
+ if (pgtable)
+ pte_free(vma->vm_mm, pgtable);
trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
return VM_FAULT_FALLBACK;
}
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 95b5e78c22b1..f25daa207421 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -163,19 +163,24 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root)
return 0;
}
-static void debugfs_evict_inode(struct inode *inode)
+static void debugfs_i_callback(struct rcu_head *head)
{
- truncate_inode_pages_final(&inode->i_data);
- clear_inode(inode);
+ struct inode *inode = container_of(head, struct inode, i_rcu);
if (S_ISLNK(inode->i_mode))
kfree(inode->i_link);
+ free_inode_nonrcu(inode);
+}
+
+static void debugfs_destroy_inode(struct inode *inode)
+{
+ call_rcu(&inode->i_rcu, debugfs_i_callback);
}
static const struct super_operations debugfs_super_operations = {
.statfs = simple_statfs,
.remount_fs = debugfs_remount,
.show_options = debugfs_show_options,
- .evict_inode = debugfs_evict_inode,
+ .destroy_inode = debugfs_destroy_inode,
};
static void debugfs_release_dentry(struct dentry *dentry)
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index c53814539070..553a3f3300ae 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -455,6 +455,7 @@ devpts_fill_super(struct super_block *s, void *data, int silent)
s->s_blocksize_bits = 10;
s->s_magic = DEVPTS_SUPER_MAGIC;
s->s_op = &devpts_sops;
+ s->s_d_op = &simple_dentry_operations;
s->s_time_gran = 1;
error = -ENOMEM;
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index f664da55234e..491cf5baa8c2 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -68,7 +68,6 @@ static int ecryptfs_hash_digest(struct crypto_shash *tfm,
int err;
desc->tfm = tfm;
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_shash_digest(desc, src, len, dst);
shash_desc_zero(desc);
return err;
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index e74fe84d0886..90fbac5d485b 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -769,7 +769,6 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
}
s->hash_desc->tfm = s->hash_tfm;
- s->hash_desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
rc = crypto_shash_digest(s->hash_desc,
(u8 *)s->auth_tok->token.password.session_key_encryption_key,
diff --git a/fs/exofs/BUGS b/fs/exofs/BUGS
deleted file mode 100644
index 1b2d4c63a579..000000000000
--- a/fs/exofs/BUGS
+++ /dev/null
@@ -1,3 +0,0 @@
-- Out-of-space may cause a severe problem if the object (and directory entry)
- were written, but the inode attributes failed. Then if the filesystem was
- unmounted and mounted the kernel can get into an endless loop doing a readdir.
diff --git a/fs/exofs/Kbuild b/fs/exofs/Kbuild
deleted file mode 100644
index a364fd0965ec..000000000000
--- a/fs/exofs/Kbuild
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Kbuild for the EXOFS module
-#
-# Copyright (C) 2008 Panasas Inc. All rights reserved.
-#
-# Authors:
-# Boaz Harrosh <ooo@electrozaur.com>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2
-#
-# Kbuild - Gets included from the Kernels Makefile and build system
-#
-
-# ore module library
-libore-y := ore.o ore_raid.o
-obj-$(CONFIG_ORE) += libore.o
-
-exofs-y := inode.o file.o namei.o dir.o super.o sys.o
-obj-$(CONFIG_EXOFS_FS) += exofs.o
diff --git a/fs/exofs/Kconfig b/fs/exofs/Kconfig
deleted file mode 100644
index 86194b2f799d..000000000000
--- a/fs/exofs/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
-config EXOFS_FS
- tristate "exofs: OSD based file system support"
- depends on SCSI_OSD_ULD
- help
- EXOFS is a file system that uses an OSD storage device,
- as its backing storage.
-
-# Debugging-related stuff
-config EXOFS_DEBUG
- bool "Enable debugging"
- depends on EXOFS_FS
- help
- This option enables EXOFS debug prints.
diff --git a/fs/exofs/Kconfig.ore b/fs/exofs/Kconfig.ore
deleted file mode 100644
index 2daf2329c28d..000000000000
--- a/fs/exofs/Kconfig.ore
+++ /dev/null
@@ -1,14 +0,0 @@
-# ORE - Objects Raid Engine (libore.ko)
-#
-# Note ORE needs to "select ASYNC_XOR". So Not to force multiple selects
-# for every ORE user we do it like this. Any user should add itself here
-# at the "depends on EXOFS_FS || ..." with an ||. The dependencies are
-# selected here, and we default to "ON". So in effect it is like been
-# selected by any of the users.
-config ORE
- tristate
- depends on EXOFS_FS || PNFS_OBJLAYOUT
- select ASYNC_XOR
- select RAID6_PQ
- select ASYNC_PQ
- default SCSI_OSD_ULD
diff --git a/fs/exofs/common.h b/fs/exofs/common.h
deleted file mode 100644
index 7d88ef566213..000000000000
--- a/fs/exofs/common.h
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- * common.h - Common definitions for both Kernel and user-mode utilities
- *
- * Copyright (C) 2005, 2006
- * Avishay Traeger (avishay@gmail.com)
- * Copyright (C) 2008, 2009
- * Boaz Harrosh <ooo@electrozaur.com>
- *
- * Copyrights for code taken from ext2:
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- * from
- * linux/fs/minix/inode.c
- * Copyright (C) 1991, 1992 Linus Torvalds
- *
- * This file is part of exofs.
- *
- * exofs is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation. Since it is based on ext2, and the only
- * valid version of GPL for the Linux kernel is version 2, the only valid
- * version of GPL for exofs is version 2.
- *
- * exofs is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with exofs; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef __EXOFS_COM_H__
-#define __EXOFS_COM_H__
-
-#include <linux/types.h>
-
-#include <scsi/osd_attributes.h>
-#include <scsi/osd_initiator.h>
-#include <scsi/osd_sec.h>
-
-/****************************************************************************
- * Object ID related defines
- * NOTE: inode# = object ID - EXOFS_OBJ_OFF
- ****************************************************************************/
-#define EXOFS_MIN_PID 0x10000 /* Smallest partition ID */
-#define EXOFS_OBJ_OFF 0x10000 /* offset for objects */
-#define EXOFS_SUPER_ID 0x10000 /* object ID for on-disk superblock */
-#define EXOFS_DEVTABLE_ID 0x10001 /* object ID for on-disk device table */
-#define EXOFS_ROOT_ID 0x10002 /* object ID for root directory */
-
-/* exofs Application specific page/attribute */
-/* Inode attrs */
-# define EXOFS_APAGE_FS_DATA (OSD_APAGE_APP_DEFINED_FIRST + 3)
-# define EXOFS_ATTR_INODE_DATA 1
-# define EXOFS_ATTR_INODE_FILE_LAYOUT 2
-# define EXOFS_ATTR_INODE_DIR_LAYOUT 3
-/* Partition attrs */
-# define EXOFS_APAGE_SB_DATA (0xF0000000U + 3)
-# define EXOFS_ATTR_SB_STATS 1
-
-/*
- * The maximum number of files we can have is limited by the size of the
- * inode number. This is the largest object ID that the file system supports.
- * Object IDs 0, 1, and 2 are always in use (see above defines).
- */
-enum {
- EXOFS_MAX_INO_ID = (sizeof(ino_t) * 8 == 64) ? ULLONG_MAX :
- (1ULL << (sizeof(ino_t) * 8ULL - 1ULL)),
- EXOFS_MAX_ID = (EXOFS_MAX_INO_ID - 1 - EXOFS_OBJ_OFF),
-};
-
-/****************************************************************************
- * Misc.
- ****************************************************************************/
-#define EXOFS_BLKSHIFT 12
-#define EXOFS_BLKSIZE (1UL << EXOFS_BLKSHIFT)
-
-/****************************************************************************
- * superblock-related things
- ****************************************************************************/
-#define EXOFS_SUPER_MAGIC 0x5DF5
-
-/*
- * The file system control block - stored in object EXOFS_SUPER_ID's data.
- * This is where the in-memory superblock is stored on disk.
- */
-enum {EXOFS_FSCB_VER = 1, EXOFS_DT_VER = 1};
-struct exofs_fscb {
- __le64 s_nextid; /* Only used after mkfs */
- __le64 s_numfiles; /* Only used after mkfs */
- __le32 s_version; /* == EXOFS_FSCB_VER */
- __le16 s_magic; /* Magic signature */
- __le16 s_newfs; /* Non-zero if this is a new fs */
-
- /* From here on it's a static part, only written by mkexofs */
- __le64 s_dev_table_oid; /* Resurved, not used */
- __le64 s_dev_table_count; /* == 0 means no dev_table */
-} __packed;
-
-/*
- * This struct is set on the FS partition's attributes.
- * [EXOFS_APAGE_SB_DATA, EXOFS_ATTR_SB_STATS] and is written together
- * with the create command, to atomically persist the sb writeable information.
- */
-struct exofs_sb_stats {
- __le64 s_nextid; /* Highest object ID used */
- __le64 s_numfiles; /* Number of files on fs */
-} __packed;
-
-/*
- * Describes the raid used in the FS. It is part of the device table.
- * This here is taken from the pNFS-objects definition. In exofs we
- * use one raid policy through-out the filesystem. (NOTE: the funny
- * alignment at beginning. We take care of it at exofs_device_table.
- */
-struct exofs_dt_data_map {
- __le32 cb_num_comps;
- __le64 cb_stripe_unit;
- __le32 cb_group_width;
- __le32 cb_group_depth;
- __le32 cb_mirror_cnt;
- __le32 cb_raid_algorithm;
-} __packed;
-
-/*
- * This is an osd device information descriptor. It is a single entry in
- * the exofs device table. It describes an osd target lun which
- * contains data belonging to this FS. (Same partition_id on all devices)
- */
-struct exofs_dt_device_info {
- __le32 systemid_len;
- u8 systemid[OSD_SYSTEMID_LEN];
- __le64 long_name_offset; /* If !0 then offset-in-file */
- __le32 osdname_len; /* */
- u8 osdname[44]; /* Embbeded, Usually an asci uuid */
-} __packed;
-
-/*
- * The EXOFS device table - stored in object EXOFS_DEVTABLE_ID's data.
- * It contains the raid used for this multy-device FS and an array of
- * participating devices.
- */
-struct exofs_device_table {
- __le32 dt_version; /* == EXOFS_DT_VER */
- struct exofs_dt_data_map dt_data_map; /* Raid policy to use */
-
- /* Resurved space For future use. Total includeing this:
- * (8 * sizeof(le64))
- */
- __le64 __Resurved[4];
-
- __le64 dt_num_devices; /* Array size */
- struct exofs_dt_device_info dt_dev_table[]; /* Array of devices */
-} __packed;
-
-/****************************************************************************
- * inode-related things
- ****************************************************************************/
-#define EXOFS_IDATA 5
-
-/*
- * The file control block - stored in an object's attributes. This is where
- * the in-memory inode is stored on disk.
- */
-struct exofs_fcb {
- __le64 i_size; /* Size of the file */
- __le16 i_mode; /* File mode */
- __le16 i_links_count; /* Links count */
- __le32 i_uid; /* Owner Uid */
- __le32 i_gid; /* Group Id */
- __le32 i_atime; /* Access time */
- __le32 i_ctime; /* Creation time */
- __le32 i_mtime; /* Modification time */
- __le32 i_flags; /* File flags (unused for now)*/
- __le32 i_generation; /* File version (for NFS) */
- __le32 i_data[EXOFS_IDATA]; /* Short symlink names and device #s */
-};
-
-#define EXOFS_INO_ATTR_SIZE sizeof(struct exofs_fcb)
-
-/* This is the Attribute the fcb is stored in */
-static const struct __weak osd_attr g_attr_inode_data = ATTR_DEF(
- EXOFS_APAGE_FS_DATA,
- EXOFS_ATTR_INODE_DATA,
- EXOFS_INO_ATTR_SIZE);
-
-/****************************************************************************
- * dentry-related things
- ****************************************************************************/
-#define EXOFS_NAME_LEN 255
-
-/*
- * The on-disk directory entry
- */
-struct exofs_dir_entry {
- __le64 inode_no; /* inode number */
- __le16 rec_len; /* directory entry length */
- u8 name_len; /* name length */
- u8 file_type; /* umm...file type */
- char name[EXOFS_NAME_LEN]; /* file name */
-};
-
-enum {
- EXOFS_FT_UNKNOWN,
- EXOFS_FT_REG_FILE,
- EXOFS_FT_DIR,
- EXOFS_FT_CHRDEV,
- EXOFS_FT_BLKDEV,
- EXOFS_FT_FIFO,
- EXOFS_FT_SOCK,
- EXOFS_FT_SYMLINK,
- EXOFS_FT_MAX
-};
-
-#define EXOFS_DIR_PAD 4
-#define EXOFS_DIR_ROUND (EXOFS_DIR_PAD - 1)
-#define EXOFS_DIR_REC_LEN(name_len) \
- (((name_len) + offsetof(struct exofs_dir_entry, name) + \
- EXOFS_DIR_ROUND) & ~EXOFS_DIR_ROUND)
-
-/*
- * The on-disk (optional) layout structure.
- * sits in an EXOFS_ATTR_INODE_FILE_LAYOUT or EXOFS_ATTR_INODE_DIR_LAYOUT
- * attribute, attached to any inode, usually to a directory.
- */
-
-enum exofs_inode_layout_gen_functions {
- LAYOUT_MOVING_WINDOW = 0,
- LAYOUT_IMPLICT = 1,
-};
-
-struct exofs_on_disk_inode_layout {
- __le16 gen_func; /* One of enum exofs_inode_layout_gen_functions */
- __le16 pad;
- union {
- /* gen_func == LAYOUT_MOVING_WINDOW (default) */
- struct exofs_layout_sliding_window {
- __le32 num_devices; /* first n devices in global-table*/
- } sliding_window __packed;
-
- /* gen_func == LAYOUT_IMPLICT */
- struct exofs_layout_implict_list {
- struct exofs_dt_data_map data_map;
- /* Variable array of size data_map.cb_num_comps. These
- * are device indexes of the devices in the global table
- */
- __le32 dev_indexes[];
- } implict __packed;
- };
-} __packed;
-
-static inline size_t exofs_on_disk_inode_layout_size(unsigned max_devs)
-{
- return sizeof(struct exofs_on_disk_inode_layout) +
- max_devs * sizeof(__le32);
-}
-
-#endif /*ifndef __EXOFS_COM_H__*/
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
deleted file mode 100644
index f0138674c1ed..000000000000
--- a/fs/exofs/dir.c
+++ /dev/null
@@ -1,661 +0,0 @@
-/*
- * Copyright (C) 2005, 2006
- * Avishay Traeger (avishay@gmail.com)
- * Copyright (C) 2008, 2009
- * Boaz Harrosh <ooo@electrozaur.com>
- *
- * Copyrights for code taken from ext2:
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- * from
- * linux/fs/minix/inode.c
- * Copyright (C) 1991, 1992 Linus Torvalds
- *
- * This file is part of exofs.
- *
- * exofs is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation. Since it is based on ext2, and the only
- * valid version of GPL for the Linux kernel is version 2, the only valid
- * version of GPL for exofs is version 2.
- *
- * exofs is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with exofs; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <linux/iversion.h>
-#include "exofs.h"
-
-static inline unsigned exofs_chunk_size(struct inode *inode)
-{
- return inode->i_sb->s_blocksize;
-}
-
-static inline void exofs_put_page(struct page *page)
-{
- kunmap(page);
- put_page(page);
-}
-
-static unsigned exofs_last_byte(struct inode *inode, unsigned long page_nr)
-{
- loff_t last_byte = inode->i_size;
-
- last_byte -= page_nr << PAGE_SHIFT;
- if (last_byte > PAGE_SIZE)
- last_byte = PAGE_SIZE;
- return last_byte;
-}
-
-static int exofs_commit_chunk(struct page *page, loff_t pos, unsigned len)
-{
- struct address_space *mapping = page->mapping;
- struct inode *dir = mapping->host;
- int err = 0;
-
- inode_inc_iversion(dir);
-
- if (!PageUptodate(page))
- SetPageUptodate(page);
-
- if (pos+len > dir->i_size) {
- i_size_write(dir, pos+len);
- mark_inode_dirty(dir);
- }
- set_page_dirty(page);
-
- if (IS_DIRSYNC(dir))
- err = write_one_page(page);
- else
- unlock_page(page);
-
- return err;
-}
-
-static bool exofs_check_page(struct page *page)
-{
- struct inode *dir = page->mapping->host;
- unsigned chunk_size = exofs_chunk_size(dir);
- char *kaddr = page_address(page);
- unsigned offs, rec_len;
- unsigned limit = PAGE_SIZE;
- struct exofs_dir_entry *p;
- char *error;
-
- /* if the page is the last one in the directory */
- if ((dir->i_size >> PAGE_SHIFT) == page->index) {
- limit = dir->i_size & ~PAGE_MASK;
- if (limit & (chunk_size - 1))
- goto Ebadsize;
- if (!limit)
- goto out;
- }
- for (offs = 0; offs <= limit - EXOFS_DIR_REC_LEN(1); offs += rec_len) {
- p = (struct exofs_dir_entry *)(kaddr + offs);
- rec_len = le16_to_cpu(p->rec_len);
-
- if (rec_len < EXOFS_DIR_REC_LEN(1))
- goto Eshort;
- if (rec_len & 3)
- goto Ealign;
- if (rec_len < EXOFS_DIR_REC_LEN(p->name_len))
- goto Enamelen;
- if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))
- goto Espan;
- }
- if (offs != limit)
- goto Eend;
-out:
- SetPageChecked(page);
- return true;
-
-Ebadsize:
- EXOFS_ERR("ERROR [exofs_check_page]: "
- "size of directory(0x%lx) is not a multiple of chunk size\n",
- dir->i_ino
- );
- goto fail;
-Eshort:
- error = "rec_len is smaller than minimal";
- goto bad_entry;
-Ealign:
- error = "unaligned directory entry";
- goto bad_entry;
-Enamelen:
- error = "rec_len is too small for name_len";
- goto bad_entry;
-Espan:
- error = "directory entry across blocks";
- goto bad_entry;
-bad_entry:
- EXOFS_ERR(
- "ERROR [exofs_check_page]: bad entry in directory(0x%lx): %s - "
- "offset=%lu, inode=0x%llx, rec_len=%d, name_len=%d\n",
- dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
- _LLU(le64_to_cpu(p->inode_no)),
- rec_len, p->name_len);
- goto fail;
-Eend:
- p = (struct exofs_dir_entry *)(kaddr + offs);
- EXOFS_ERR("ERROR [exofs_check_page]: "
- "entry in directory(0x%lx) spans the page boundary"
- "offset=%lu, inode=0x%llx\n",
- dir->i_ino, (page->index<<PAGE_SHIFT)+offs,
- _LLU(le64_to_cpu(p->inode_no)));
-fail:
- SetPageError(page);
- return false;
-}
-
-static struct page *exofs_get_page(struct inode *dir, unsigned long n)
-{
- struct address_space *mapping = dir->i_mapping;
- struct page *page = read_mapping_page(mapping, n, NULL);
-
- if (!IS_ERR(page)) {
- kmap(page);
- if (unlikely(!PageChecked(page))) {
- if (PageError(page) || !exofs_check_page(page))
- goto fail;
- }
- }
- return page;
-
-fail:
- exofs_put_page(page);
- return ERR_PTR(-EIO);
-}
-
-static inline int exofs_match(int len, const unsigned char *name,
- struct exofs_dir_entry *de)
-{
- if (len != de->name_len)
- return 0;
- if (!de->inode_no)
- return 0;
- return !memcmp(name, de->name, len);
-}
-
-static inline
-struct exofs_dir_entry *exofs_next_entry(struct exofs_dir_entry *p)
-{
- return (struct exofs_dir_entry *)((char *)p + le16_to_cpu(p->rec_len));
-}
-
-static inline unsigned
-exofs_validate_entry(char *base, unsigned offset, unsigned mask)
-{
- struct exofs_dir_entry *de = (struct exofs_dir_entry *)(base + offset);
- struct exofs_dir_entry *p =
- (struct exofs_dir_entry *)(base + (offset&mask));
- while ((char *)p < (char *)de) {
- if (p->rec_len == 0)
- break;
- p = exofs_next_entry(p);
- }
- return (char *)p - base;
-}
-
-static unsigned char exofs_filetype_table[EXOFS_FT_MAX] = {
- [EXOFS_FT_UNKNOWN] = DT_UNKNOWN,
- [EXOFS_FT_REG_FILE] = DT_REG,
- [EXOFS_FT_DIR] = DT_DIR,
- [EXOFS_FT_CHRDEV] = DT_CHR,
- [EXOFS_FT_BLKDEV] = DT_BLK,
- [EXOFS_FT_FIFO] = DT_FIFO,
- [EXOFS_FT_SOCK] = DT_SOCK,
- [EXOFS_FT_SYMLINK] = DT_LNK,
-};
-
-#define S_SHIFT 12
-static unsigned char exofs_type_by_mode[S_IFMT >> S_SHIFT] = {
- [S_IFREG >> S_SHIFT] = EXOFS_FT_REG_FILE,
- [S_IFDIR >> S_SHIFT] = EXOFS_FT_DIR,
- [S_IFCHR >> S_SHIFT] = EXOFS_FT_CHRDEV,
- [S_IFBLK >> S_SHIFT] = EXOFS_FT_BLKDEV,
- [S_IFIFO >> S_SHIFT] = EXOFS_FT_FIFO,
- [S_IFSOCK >> S_SHIFT] = EXOFS_FT_SOCK,
- [S_IFLNK >> S_SHIFT] = EXOFS_FT_SYMLINK,
-};
-
-static inline
-void exofs_set_de_type(struct exofs_dir_entry *de, struct inode *inode)
-{
- umode_t mode = inode->i_mode;
- de->file_type = exofs_type_by_mode[(mode & S_IFMT) >> S_SHIFT];
-}
-
-static int
-exofs_readdir(struct file *file, struct dir_context *ctx)
-{
- loff_t pos = ctx->pos;
- struct inode *inode = file_inode(file);
- unsigned int offset = pos & ~PAGE_MASK;
- unsigned long n = pos >> PAGE_SHIFT;
- unsigned long npages = dir_pages(inode);
- unsigned chunk_mask = ~(exofs_chunk_size(inode)-1);
- bool need_revalidate = !inode_eq_iversion(inode, file->f_version);
-
- if (pos > inode->i_size - EXOFS_DIR_REC_LEN(1))
- return 0;
-
- for ( ; n < npages; n++, offset = 0) {
- char *kaddr, *limit;
- struct exofs_dir_entry *de;
- struct page *page = exofs_get_page(inode, n);
-
- if (IS_ERR(page)) {
- EXOFS_ERR("ERROR: bad page in directory(0x%lx)\n",
- inode->i_ino);
- ctx->pos += PAGE_SIZE - offset;
- return PTR_ERR(page);
- }
- kaddr = page_address(page);
- if (unlikely(need_revalidate)) {
- if (offset) {
- offset = exofs_validate_entry(kaddr, offset,
- chunk_mask);
- ctx->pos = (n<<PAGE_SHIFT) + offset;
- }
- file->f_version = inode_query_iversion(inode);
- need_revalidate = false;
- }
- de = (struct exofs_dir_entry *)(kaddr + offset);
- limit = kaddr + exofs_last_byte(inode, n) -
- EXOFS_DIR_REC_LEN(1);
- for (; (char *)de <= limit; de = exofs_next_entry(de)) {
- if (de->rec_len == 0) {
- EXOFS_ERR("ERROR: "
- "zero-length entry in directory(0x%lx)\n",
- inode->i_ino);
- exofs_put_page(page);
- return -EIO;
- }
- if (de->inode_no) {
- unsigned char t;
-
- if (de->file_type < EXOFS_FT_MAX)
- t = exofs_filetype_table[de->file_type];
- else
- t = DT_UNKNOWN;
-
- if (!dir_emit(ctx, de->name, de->name_len,
- le64_to_cpu(de->inode_no),
- t)) {
- exofs_put_page(page);
- return 0;
- }
- }
- ctx->pos += le16_to_cpu(de->rec_len);
- }
- exofs_put_page(page);
- }
- return 0;
-}
-
-struct exofs_dir_entry *exofs_find_entry(struct inode *dir,
- struct dentry *dentry, struct page **res_page)
-{
- const unsigned char *name = dentry->d_name.name;
- int namelen = dentry->d_name.len;
- unsigned reclen = EXOFS_DIR_REC_LEN(namelen);
- unsigned long start, n;
- unsigned long npages = dir_pages(dir);
- struct page *page = NULL;
- struct exofs_i_info *oi = exofs_i(dir);
- struct exofs_dir_entry *de;
-
- if (npages == 0)
- goto out;
-
- *res_page = NULL;
-
- start = oi->i_dir_start_lookup;
- if (start >= npages)
- start = 0;
- n = start;
- do {
- char *kaddr;
- page = exofs_get_page(dir, n);
- if (!IS_ERR(page)) {
- kaddr = page_address(page);
- de = (struct exofs_dir_entry *) kaddr;
- kaddr += exofs_last_byte(dir, n) - reclen;
- while ((char *) de <= kaddr) {
- if (de->rec_len == 0) {
- EXOFS_ERR("ERROR: zero-length entry in "
- "directory(0x%lx)\n",
- dir->i_ino);
- exofs_put_page(page);
- goto out;
- }
- if (exofs_match(namelen, name, de))
- goto found;
- de = exofs_next_entry(de);
- }
- exofs_put_page(page);
- }
- if (++n >= npages)
- n = 0;
- } while (n != start);
-out:
- return NULL;
-
-found:
- *res_page = page;
- oi->i_dir_start_lookup = n;
- return de;
-}
-
-struct exofs_dir_entry *exofs_dotdot(struct inode *dir, struct page **p)
-{
- struct page *page = exofs_get_page(dir, 0);
- struct exofs_dir_entry *de = NULL;
-
- if (!IS_ERR(page)) {
- de = exofs_next_entry(
- (struct exofs_dir_entry *)page_address(page));
- *p = page;
- }
- return de;
-}
-
-ino_t exofs_parent_ino(struct dentry *child)
-{
- struct page *page;
- struct exofs_dir_entry *de;
- ino_t ino;
-
- de = exofs_dotdot(d_inode(child), &page);
- if (!de)
- return 0;
-
- ino = le64_to_cpu(de->inode_no);
- exofs_put_page(page);
- return ino;
-}
-
-ino_t exofs_inode_by_name(struct inode *dir, struct dentry *dentry)
-{
- ino_t res = 0;
- struct exofs_dir_entry *de;
- struct page *page;
-
- de = exofs_find_entry(dir, dentry, &page);
- if (de) {
- res = le64_to_cpu(de->inode_no);
- exofs_put_page(page);
- }
- return res;
-}
-
-int exofs_set_link(struct inode *dir, struct exofs_dir_entry *de,
- struct page *page, struct inode *inode)
-{
- loff_t pos = page_offset(page) +
- (char *) de - (char *) page_address(page);
- unsigned len = le16_to_cpu(de->rec_len);
- int err;
-
- lock_page(page);
- err = exofs_write_begin(NULL, page->mapping, pos, len, 0, &page, NULL);
- if (err)
- EXOFS_ERR("exofs_set_link: exofs_write_begin FAILED => %d\n",
- err);
-
- de->inode_no = cpu_to_le64(inode->i_ino);
- exofs_set_de_type(de, inode);
- if (likely(!err))
- err = exofs_commit_chunk(page, pos, len);
- exofs_put_page(page);
- dir->i_mtime = dir->i_ctime = current_time(dir);
- mark_inode_dirty(dir);
- return err;
-}
-
-int exofs_add_link(struct dentry *dentry, struct inode *inode)
-{
- struct inode *dir = d_inode(dentry->d_parent);
- const unsigned char *name = dentry->d_name.name;
- int namelen = dentry->d_name.len;
- unsigned chunk_size = exofs_chunk_size(dir);
- unsigned reclen = EXOFS_DIR_REC_LEN(namelen);
- unsigned short rec_len, name_len;
- struct page *page = NULL;
- struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
- struct exofs_dir_entry *de;
- unsigned long npages = dir_pages(dir);
- unsigned long n;
- char *kaddr;
- loff_t pos;
- int err;
-
- for (n = 0; n <= npages; n++) {
- char *dir_end;
-
- page = exofs_get_page(dir, n);
- err = PTR_ERR(page);
- if (IS_ERR(page))
- goto out;
- lock_page(page);
- kaddr = page_address(page);
- dir_end = kaddr + exofs_last_byte(dir, n);
- de = (struct exofs_dir_entry *)kaddr;
- kaddr += PAGE_SIZE - reclen;
- while ((char *)de <= kaddr) {
- if ((char *)de == dir_end) {
- name_len = 0;
- rec_len = chunk_size;
- de->rec_len = cpu_to_le16(chunk_size);
- de->inode_no = 0;
- goto got_it;
- }
- if (de->rec_len == 0) {
- EXOFS_ERR("ERROR: exofs_add_link: "
- "zero-length entry in directory(0x%lx)\n",
- inode->i_ino);
- err = -EIO;
- goto out_unlock;
- }
- err = -EEXIST;
- if (exofs_match(namelen, name, de))
- goto out_unlock;
- name_len = EXOFS_DIR_REC_LEN(de->name_len);
- rec_len = le16_to_cpu(de->rec_len);
- if (!de->inode_no && rec_len >= reclen)
- goto got_it;
- if (rec_len >= name_len + reclen)
- goto got_it;
- de = (struct exofs_dir_entry *) ((char *) de + rec_len);
- }
- unlock_page(page);
- exofs_put_page(page);
- }
-
- EXOFS_ERR("exofs_add_link: BAD dentry=%p or inode=0x%lx\n",
- dentry, inode->i_ino);
- return -EINVAL;
-
-got_it:
- pos = page_offset(page) +
- (char *)de - (char *)page_address(page);
- err = exofs_write_begin(NULL, page->mapping, pos, rec_len, 0,
- &page, NULL);
- if (err)
- goto out_unlock;
- if (de->inode_no) {
- struct exofs_dir_entry *de1 =
- (struct exofs_dir_entry *)((char *)de + name_len);
- de1->rec_len = cpu_to_le16(rec_len - name_len);
- de->rec_len = cpu_to_le16(name_len);
- de = de1;
- }
- de->name_len = namelen;
- memcpy(de->name, name, namelen);
- de->inode_no = cpu_to_le64(inode->i_ino);
- exofs_set_de_type(de, inode);
- err = exofs_commit_chunk(page, pos, rec_len);
- dir->i_mtime = dir->i_ctime = current_time(dir);
- mark_inode_dirty(dir);
- sbi->s_numfiles++;
-
-out_put:
- exofs_put_page(page);
-out:
- return err;
-out_unlock:
- unlock_page(page);
- goto out_put;
-}
-
-int exofs_delete_entry(struct exofs_dir_entry *dir, struct page *page)
-{
- struct address_space *mapping = page->mapping;
- struct inode *inode = mapping->host;
- struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
- char *kaddr = page_address(page);
- unsigned from = ((char *)dir - kaddr) & ~(exofs_chunk_size(inode)-1);
- unsigned to = ((char *)dir - kaddr) + le16_to_cpu(dir->rec_len);
- loff_t pos;
- struct exofs_dir_entry *pde = NULL;
- struct exofs_dir_entry *de = (struct exofs_dir_entry *) (kaddr + from);
- int err;
-
- while (de < dir) {
- if (de->rec_len == 0) {
- EXOFS_ERR("ERROR: exofs_delete_entry:"
- "zero-length entry in directory(0x%lx)\n",
- inode->i_ino);
- err = -EIO;
- goto out;
- }
- pde = de;
- de = exofs_next_entry(de);
- }
- if (pde)
- from = (char *)pde - (char *)page_address(page);
- pos = page_offset(page) + from;
- lock_page(page);
- err = exofs_write_begin(NULL, page->mapping, pos, to - from, 0,
- &page, NULL);
- if (err)
- EXOFS_ERR("exofs_delete_entry: exofs_write_begin FAILED => %d\n",
- err);
- if (pde)
- pde->rec_len = cpu_to_le16(to - from);
- dir->inode_no = 0;
- if (likely(!err))
- err = exofs_commit_chunk(page, pos, to - from);
- inode->i_ctime = inode->i_mtime = current_time(inode);
- mark_inode_dirty(inode);
- sbi->s_numfiles--;
-out:
- exofs_put_page(page);
- return err;
-}
-
-/* kept aligned on 4 bytes */
-#define THIS_DIR ".\0\0"
-#define PARENT_DIR "..\0"
-
-int exofs_make_empty(struct inode *inode, struct inode *parent)
-{
- struct address_space *mapping = inode->i_mapping;
- struct page *page = grab_cache_page(mapping, 0);
- unsigned chunk_size = exofs_chunk_size(inode);
- struct exofs_dir_entry *de;
- int err;
- void *kaddr;
-
- if (!page)
- return -ENOMEM;
-
- err = exofs_write_begin(NULL, page->mapping, 0, chunk_size, 0,
- &page, NULL);
- if (err) {
- unlock_page(page);
- goto fail;
- }
-
- kaddr = kmap_atomic(page);
- de = (struct exofs_dir_entry *)kaddr;
- de->name_len = 1;
- de->rec_len = cpu_to_le16(EXOFS_DIR_REC_LEN(1));
- memcpy(de->name, THIS_DIR, sizeof(THIS_DIR));
- de->inode_no = cpu_to_le64(inode->i_ino);
- exofs_set_de_type(de, inode);
-
- de = (struct exofs_dir_entry *)(kaddr + EXOFS_DIR_REC_LEN(1));
- de->name_len = 2;
- de->rec_len = cpu_to_le16(chunk_size - EXOFS_DIR_REC_LEN(1));
- de->inode_no = cpu_to_le64(parent->i_ino);
- memcpy(de->name, PARENT_DIR, sizeof(PARENT_DIR));
- exofs_set_de_type(de, inode);
- kunmap_atomic(kaddr);
- err = exofs_commit_chunk(page, 0, chunk_size);
-fail:
- put_page(page);
- return err;
-}
-
-int exofs_empty_dir(struct inode *inode)
-{
- struct page *page = NULL;
- unsigned long i, npages = dir_pages(inode);
-
- for (i = 0; i < npages; i++) {
- char *kaddr;
- struct exofs_dir_entry *de;
- page = exofs_get_page(inode, i);
-
- if (IS_ERR(page))
- continue;
-
- kaddr = page_address(page);
- de = (struct exofs_dir_entry *)kaddr;
- kaddr += exofs_last_byte(inode, i) - EXOFS_DIR_REC_LEN(1);
-
- while ((char *)de <= kaddr) {
- if (de->rec_len == 0) {
- EXOFS_ERR("ERROR: exofs_empty_dir: "
- "zero-length directory entry"
- "kaddr=%p, de=%p\n", kaddr, de);
- goto not_empty;
- }
- if (de->inode_no != 0) {
- /* check for . and .. */
- if (de->name[0] != '.')
- goto not_empty;
- if (de->name_len > 2)
- goto not_empty;
- if (de->name_len < 2) {
- if (le64_to_cpu(de->inode_no) !=
- inode->i_ino)
- goto not_empty;
- } else if (de->name[1] != '.')
- goto not_empty;
- }
- de = exofs_next_entry(de);
- }
- exofs_put_page(page);
- }
- return 1;
-
-not_empty:
- exofs_put_page(page);
- return 0;
-}
-
-const struct file_operations exofs_dir_operations = {
- .llseek = generic_file_llseek,
- .read = generic_read_dir,
- .iterate_shared = exofs_readdir,
-};
diff --git a/fs/exofs/exofs.h b/fs/exofs/exofs.h
deleted file mode 100644
index 5dc392404559..000000000000
--- a/fs/exofs/exofs.h
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * Copyright (C) 2005, 2006
- * Avishay Traeger (avishay@gmail.com)
- * Copyright (C) 2008, 2009
- * Boaz Harrosh <ooo@electrozaur.com>
- *
- * Copyrights for code taken from ext2:
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- * from
- * linux/fs/minix/inode.c
- * Copyright (C) 1991, 1992 Linus Torvalds
- *
- * This file is part of exofs.
- *
- * exofs is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation. Since it is based on ext2, and the only
- * valid version of GPL for the Linux kernel is version 2, the only valid
- * version of GPL for exofs is version 2.
- *
- * exofs is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with exofs; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __EXOFS_H__
-#define __EXOFS_H__
-
-#include <linux/fs.h>
-#include <linux/time.h>
-#include <linux/backing-dev.h>
-#include <scsi/osd_ore.h>
-
-#include "common.h"
-
-#define EXOFS_ERR(fmt, a...) printk(KERN_ERR "exofs: " fmt, ##a)
-
-#ifdef CONFIG_EXOFS_DEBUG
-#define EXOFS_DBGMSG(fmt, a...) \
- printk(KERN_NOTICE "exofs @%s:%d: " fmt, __func__, __LINE__, ##a)
-#else
-#define EXOFS_DBGMSG(fmt, a...) \
- do { if (0) printk(fmt, ##a); } while (0)
-#endif
-
-/* u64 has problems with printk this will cast it to unsigned long long */
-#define _LLU(x) (unsigned long long)(x)
-
-struct exofs_dev {
- struct ore_dev ored;
- unsigned did;
- unsigned urilen;
- uint8_t *uri;
- struct kobject ed_kobj;
-};
-/*
- * our extension to the in-memory superblock
- */
-struct exofs_sb_info {
- struct exofs_sb_stats s_ess; /* Written often, pre-allocate*/
- int s_timeout; /* timeout for OSD operations */
- uint64_t s_nextid; /* highest object ID used */
- uint32_t s_numfiles; /* number of files on fs */
- spinlock_t s_next_gen_lock; /* spinlock for gen # update */
- u32 s_next_generation; /* next gen # to use */
- atomic_t s_curr_pending; /* number of pending commands */
-
- struct ore_layout layout; /* Default files layout */
- struct ore_comp one_comp; /* id & cred of partition id=0*/
- struct ore_components oc; /* comps for the partition */
- struct kobject s_kobj; /* holds per-sbi kobject */
-};
-
-/*
- * our extension to the in-memory inode
- */
-struct exofs_i_info {
- struct inode vfs_inode; /* normal in-memory inode */
- wait_queue_head_t i_wq; /* wait queue for inode */
- unsigned long i_flags; /* various atomic flags */
- uint32_t i_data[EXOFS_IDATA];/*short symlink names and device #s*/
- uint32_t i_dir_start_lookup; /* which page to start lookup */
- uint64_t i_commit_size; /* the object's written length */
- struct ore_comp one_comp; /* same component for all devices */
- struct ore_components oc; /* inode view of the device table */
-};
-
-static inline osd_id exofs_oi_objno(struct exofs_i_info *oi)
-{
- return oi->vfs_inode.i_ino + EXOFS_OBJ_OFF;
-}
-
-/*
- * our inode flags
- */
-#define OBJ_2BCREATED 0 /* object will be created soon*/
-#define OBJ_CREATED 1 /* object has been created on the osd*/
-
-static inline int obj_2bcreated(struct exofs_i_info *oi)
-{
- return test_bit(OBJ_2BCREATED, &oi->i_flags);
-}
-
-static inline void set_obj_2bcreated(struct exofs_i_info *oi)
-{
- set_bit(OBJ_2BCREATED, &oi->i_flags);
-}
-
-static inline int obj_created(struct exofs_i_info *oi)
-{
- return test_bit(OBJ_CREATED, &oi->i_flags);
-}
-
-static inline void set_obj_created(struct exofs_i_info *oi)
-{
- set_bit(OBJ_CREATED, &oi->i_flags);
-}
-
-int __exofs_wait_obj_created(struct exofs_i_info *oi);
-static inline int wait_obj_created(struct exofs_i_info *oi)
-{
- if (likely(obj_created(oi)))
- return 0;
-
- return __exofs_wait_obj_created(oi);
-}
-
-/*
- * get to our inode from the vfs inode
- */
-static inline struct exofs_i_info *exofs_i(struct inode *inode)
-{
- return container_of(inode, struct exofs_i_info, vfs_inode);
-}
-
-/*
- * Maximum count of links to a file
- */
-#define EXOFS_LINK_MAX 32000
-
-/*************************
- * function declarations *
- *************************/
-
-/* inode.c */
-unsigned exofs_max_io_pages(struct ore_layout *layout,
- unsigned expected_pages);
-int exofs_setattr(struct dentry *, struct iattr *);
-int exofs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata);
-extern struct inode *exofs_iget(struct super_block *, unsigned long);
-struct inode *exofs_new_inode(struct inode *, umode_t);
-extern int exofs_write_inode(struct inode *, struct writeback_control *wbc);
-extern void exofs_evict_inode(struct inode *);
-
-/* dir.c: */
-int exofs_add_link(struct dentry *, struct inode *);
-ino_t exofs_inode_by_name(struct inode *, struct dentry *);
-int exofs_delete_entry(struct exofs_dir_entry *, struct page *);
-int exofs_make_empty(struct inode *, struct inode *);
-struct exofs_dir_entry *exofs_find_entry(struct inode *, struct dentry *,
- struct page **);
-int exofs_empty_dir(struct inode *);
-struct exofs_dir_entry *exofs_dotdot(struct inode *, struct page **);
-ino_t exofs_parent_ino(struct dentry *child);
-int exofs_set_link(struct inode *, struct exofs_dir_entry *, struct page *,
- struct inode *);
-
-/* super.c */
-void exofs_make_credential(u8 cred_a[OSD_CAP_LEN],
- const struct osd_obj_id *obj);
-int exofs_sbi_write_stats(struct exofs_sb_info *sbi);
-
-/* sys.c */
-int exofs_sysfs_init(void);
-void exofs_sysfs_uninit(void);
-int exofs_sysfs_sb_add(struct exofs_sb_info *sbi,
- struct exofs_dt_device_info *dt_dev);
-void exofs_sysfs_sb_del(struct exofs_sb_info *sbi);
-int exofs_sysfs_odev_add(struct exofs_dev *edev,
- struct exofs_sb_info *sbi);
-void exofs_sysfs_dbg_print(void);
-
-/*********************
- * operation vectors *
- *********************/
-/* dir.c: */
-extern const struct file_operations exofs_dir_operations;
-
-/* file.c */
-extern const struct inode_operations exofs_file_inode_operations;
-extern const struct file_operations exofs_file_operations;
-
-/* inode.c */
-extern const struct address_space_operations exofs_aops;
-
-/* namei.c */
-extern const struct inode_operations exofs_dir_inode_operations;
-extern const struct inode_operations exofs_special_inode_operations;
-
-/* exofs_init_comps will initialize an ore_components device array
- * pointing to a single ore_comp struct, and a round-robin view
- * of the device table.
- * The first device of each inode is the [inode->ino % num_devices]
- * and the rest of the devices sequentially following where the
- * first device is after the last device.
- * It is assumed that the global device array at @sbi is twice
- * bigger and that the device table repeats twice.
- * See: exofs_read_lookup_dev_table()
- */
-static inline void exofs_init_comps(struct ore_components *oc,
- struct ore_comp *one_comp,
- struct exofs_sb_info *sbi, osd_id oid)
-{
- unsigned dev_mod = (unsigned)oid, first_dev;
-
- one_comp->obj.partition = sbi->one_comp.obj.partition;
- one_comp->obj.id = oid;
- exofs_make_credential(one_comp->cred, &one_comp->obj);
-
- oc->first_dev = 0;
- oc->numdevs = sbi->layout.group_width * sbi->layout.mirrors_p1 *
- sbi->layout.group_count;
- oc->single_comp = EC_SINGLE_COMP;
- oc->comps = one_comp;
-
- /* Round robin device view of the table */
- first_dev = (dev_mod * sbi->layout.mirrors_p1) % sbi->oc.numdevs;
- oc->ods = &sbi->oc.ods[first_dev];
-}
-
-#endif
diff --git a/fs/exofs/file.c b/fs/exofs/file.c
deleted file mode 100644
index a94594ea2aa3..000000000000
--- a/fs/exofs/file.c
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (C) 2005, 2006
- * Avishay Traeger (avishay@gmail.com)
- * Copyright (C) 2008, 2009
- * Boaz Harrosh <ooo@electrozaur.com>
- *
- * Copyrights for code taken from ext2:
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- * from
- * linux/fs/minix/inode.c
- * Copyright (C) 1991, 1992 Linus Torvalds
- *
- * This file is part of exofs.
- *
- * exofs is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation. Since it is based on ext2, and the only
- * valid version of GPL for the Linux kernel is version 2, the only valid
- * version of GPL for exofs is version 2.
- *
- * exofs is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with exofs; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "exofs.h"
-
-static int exofs_release_file(struct inode *inode, struct file *filp)
-{
- return 0;
-}
-
-/* exofs_file_fsync - flush the inode to disk
- *
- * Note, in exofs all metadata is written as part of inode, regardless.
- * The writeout is synchronous
- */
-static int exofs_file_fsync(struct file *filp, loff_t start, loff_t end,
- int datasync)
-{
- struct inode *inode = filp->f_mapping->host;
- int ret;
-
- ret = file_write_and_wait_range(filp, start, end);
- if (ret)
- return ret;
-
- inode_lock(inode);
- ret = sync_inode_metadata(filp->f_mapping->host, 1);
- inode_unlock(inode);
- return ret;
-}
-
-static int exofs_flush(struct file *file, fl_owner_t id)
-{
- int ret = vfs_fsync(file, 0);
- /* TODO: Flush the OSD target */
- return ret;
-}
-
-const struct file_operations exofs_file_operations = {
- .llseek = generic_file_llseek,
- .read_iter = generic_file_read_iter,
- .write_iter = generic_file_write_iter,
- .mmap = generic_file_mmap,
- .open = generic_file_open,
- .release = exofs_release_file,
- .fsync = exofs_file_fsync,
- .flush = exofs_flush,
- .splice_read = generic_file_splice_read,
- .splice_write = iter_file_splice_write,
-};
-
-const struct inode_operations exofs_file_inode_operations = {
- .setattr = exofs_setattr,
-};
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
deleted file mode 100644
index 5f81fcd383a4..000000000000
--- a/fs/exofs/inode.c
+++ /dev/null
@@ -1,1514 +0,0 @@
-/*
- * Copyright (C) 2005, 2006
- * Avishay Traeger (avishay@gmail.com)
- * Copyright (C) 2008, 2009
- * Boaz Harrosh <ooo@electrozaur.com>
- *
- * Copyrights for code taken from ext2:
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- * from
- * linux/fs/minix/inode.c
- * Copyright (C) 1991, 1992 Linus Torvalds
- *
- * This file is part of exofs.
- *
- * exofs is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation. Since it is based on ext2, and the only
- * valid version of GPL for the Linux kernel is version 2, the only valid
- * version of GPL for exofs is version 2.
- *
- * exofs is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with exofs; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <linux/slab.h>
-
-#include "exofs.h"
-
-#define EXOFS_DBGMSG2(M...) do {} while (0)
-
-unsigned exofs_max_io_pages(struct ore_layout *layout,
- unsigned expected_pages)
-{
- unsigned pages = min_t(unsigned, expected_pages,
- layout->max_io_length / PAGE_SIZE);
-
- return pages;
-}
-
-struct page_collect {
- struct exofs_sb_info *sbi;
- struct inode *inode;
- unsigned expected_pages;
- struct ore_io_state *ios;
-
- struct page **pages;
- unsigned alloc_pages;
- unsigned nr_pages;
- unsigned long length;
- loff_t pg_first; /* keep 64bit also in 32-arches */
- bool read_4_write; /* This means two things: that the read is sync
- * And the pages should not be unlocked.
- */
- struct page *that_locked_page;
-};
-
-static void _pcol_init(struct page_collect *pcol, unsigned expected_pages,
- struct inode *inode)
-{
- struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
-
- pcol->sbi = sbi;
- pcol->inode = inode;
- pcol->expected_pages = expected_pages;
-
- pcol->ios = NULL;
- pcol->pages = NULL;
- pcol->alloc_pages = 0;
- pcol->nr_pages = 0;
- pcol->length = 0;
- pcol->pg_first = -1;
- pcol->read_4_write = false;
- pcol->that_locked_page = NULL;
-}
-
-static void _pcol_reset(struct page_collect *pcol)
-{
- pcol->expected_pages -= min(pcol->nr_pages, pcol->expected_pages);
-
- pcol->pages = NULL;
- pcol->alloc_pages = 0;
- pcol->nr_pages = 0;
- pcol->length = 0;
- pcol->pg_first = -1;
- pcol->ios = NULL;
- pcol->that_locked_page = NULL;
-
- /* this is probably the end of the loop but in writes
- * it might not end here. don't be left with nothing
- */
- if (!pcol->expected_pages)
- pcol->expected_pages =
- exofs_max_io_pages(&pcol->sbi->layout, ~0);
-}
-
-static int pcol_try_alloc(struct page_collect *pcol)
-{
- unsigned pages;
-
- /* TODO: easily support bio chaining */
- pages = exofs_max_io_pages(&pcol->sbi->layout, pcol->expected_pages);
-
- for (; pages; pages >>= 1) {
- pcol->pages = kmalloc_array(pages, sizeof(struct page *),
- GFP_KERNEL);
- if (likely(pcol->pages)) {
- pcol->alloc_pages = pages;
- return 0;
- }
- }
-
- EXOFS_ERR("Failed to kmalloc expected_pages=%u\n",
- pcol->expected_pages);
- return -ENOMEM;
-}
-
-static void pcol_free(struct page_collect *pcol)
-{
- kfree(pcol->pages);
- pcol->pages = NULL;
-
- if (pcol->ios) {
- ore_put_io_state(pcol->ios);
- pcol->ios = NULL;
- }
-}
-
-static int pcol_add_page(struct page_collect *pcol, struct page *page,
- unsigned len)
-{
- if (unlikely(pcol->nr_pages >= pcol->alloc_pages))
- return -ENOMEM;
-
- pcol->pages[pcol->nr_pages++] = page;
- pcol->length += len;
- return 0;
-}
-
-enum {PAGE_WAS_NOT_IN_IO = 17};
-static int update_read_page(struct page *page, int ret)
-{
- switch (ret) {
- case 0:
- /* Everything is OK */
- SetPageUptodate(page);
- if (PageError(page))
- ClearPageError(page);
- break;
- case -EFAULT:
- /* In this case we were trying to read something that wasn't on
- * disk yet - return a page full of zeroes. This should be OK,
- * because the object should be empty (if there was a write
- * before this read, the read would be waiting with the page
- * locked */
- clear_highpage(page);
-
- SetPageUptodate(page);
- if (PageError(page))
- ClearPageError(page);
- EXOFS_DBGMSG("recovered read error\n");
- /* fall through */
- case PAGE_WAS_NOT_IN_IO:
- ret = 0; /* recovered error */
- break;
- default:
- SetPageError(page);
- }
- return ret;
-}
-
-static void update_write_page(struct page *page, int ret)
-{
- if (unlikely(ret == PAGE_WAS_NOT_IN_IO))
- return; /* don't pass start don't collect $200 */
-
- if (ret) {
- mapping_set_error(page->mapping, ret);
- SetPageError(page);
- }
- end_page_writeback(page);
-}
-
-/* Called at the end of reads, to optionally unlock pages and update their
- * status.
- */
-static int __readpages_done(struct page_collect *pcol)
-{
- int i;
- u64 good_bytes;
- u64 length = 0;
- int ret = ore_check_io(pcol->ios, NULL);
-
- if (likely(!ret)) {
- good_bytes = pcol->length;
- ret = PAGE_WAS_NOT_IN_IO;
- } else {
- good_bytes = 0;
- }
-
- EXOFS_DBGMSG2("readpages_done(0x%lx) good_bytes=0x%llx"
- " length=0x%lx nr_pages=%u\n",
- pcol->inode->i_ino, _LLU(good_bytes), pcol->length,
- pcol->nr_pages);
-
- for (i = 0; i < pcol->nr_pages; i++) {
- struct page *page = pcol->pages[i];
- struct inode *inode = page->mapping->host;
- int page_stat;
-
- if (inode != pcol->inode)
- continue; /* osd might add more pages at end */
-
- if (likely(length < good_bytes))
- page_stat = 0;
- else
- page_stat = ret;
-
- EXOFS_DBGMSG2(" readpages_done(0x%lx, 0x%lx) %s\n",
- inode->i_ino, page->index,
- page_stat ? "bad_bytes" : "good_bytes");
-
- ret = update_read_page(page, page_stat);
- if (!pcol->read_4_write)
- unlock_page(page);
- length += PAGE_SIZE;
- }
-
- pcol_free(pcol);
- EXOFS_DBGMSG2("readpages_done END\n");
- return ret;
-}
-
-/* callback of async reads */
-static void readpages_done(struct ore_io_state *ios, void *p)
-{
- struct page_collect *pcol = p;
-
- __readpages_done(pcol);
- atomic_dec(&pcol->sbi->s_curr_pending);
- kfree(pcol);
-}
-
-static void _unlock_pcol_pages(struct page_collect *pcol, int ret, int rw)
-{
- int i;
-
- for (i = 0; i < pcol->nr_pages; i++) {
- struct page *page = pcol->pages[i];
-
- if (rw == READ)
- update_read_page(page, ret);
- else
- update_write_page(page, ret);
-
- unlock_page(page);
- }
-}
-
-static int _maybe_not_all_in_one_io(struct ore_io_state *ios,
- struct page_collect *pcol_src, struct page_collect *pcol)
-{
- /* length was wrong or offset was not page aligned */
- BUG_ON(pcol_src->nr_pages < ios->nr_pages);
-
- if (pcol_src->nr_pages > ios->nr_pages) {
- struct page **src_page;
- unsigned pages_less = pcol_src->nr_pages - ios->nr_pages;
- unsigned long len_less = pcol_src->length - ios->length;
- unsigned i;
- int ret;
-
- /* This IO was trimmed */
- pcol_src->nr_pages = ios->nr_pages;
- pcol_src->length = ios->length;
-
- /* Left over pages are passed to the next io */
- pcol->expected_pages += pages_less;
- pcol->nr_pages = pages_less;
- pcol->length = len_less;
- src_page = pcol_src->pages + pcol_src->nr_pages;
- pcol->pg_first = (*src_page)->index;
-
- ret = pcol_try_alloc(pcol);
- if (unlikely(ret))
- return ret;
-
- for (i = 0; i < pages_less; ++i)
- pcol->pages[i] = *src_page++;
-
- EXOFS_DBGMSG("Length was adjusted nr_pages=0x%x "
- "pages_less=0x%x expected_pages=0x%x "
- "next_offset=0x%llx next_len=0x%lx\n",
- pcol_src->nr_pages, pages_less, pcol->expected_pages,
- pcol->pg_first * PAGE_SIZE, pcol->length);
- }
- return 0;
-}
-
-static int read_exec(struct page_collect *pcol)
-{
- struct exofs_i_info *oi = exofs_i(pcol->inode);
- struct ore_io_state *ios;
- struct page_collect *pcol_copy = NULL;
- int ret;
-
- if (!pcol->pages)
- return 0;
-
- if (!pcol->ios) {
- int ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, true,
- pcol->pg_first << PAGE_SHIFT,
- pcol->length, &pcol->ios);
-
- if (ret)
- return ret;
- }
-
- ios = pcol->ios;
- ios->pages = pcol->pages;
-
- if (pcol->read_4_write) {
- ore_read(pcol->ios);
- return __readpages_done(pcol);
- }
-
- pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
- if (!pcol_copy) {
- ret = -ENOMEM;
- goto err;
- }
-
- *pcol_copy = *pcol;
- ios->done = readpages_done;
- ios->private = pcol_copy;
-
- /* pages ownership was passed to pcol_copy */
- _pcol_reset(pcol);
-
- ret = _maybe_not_all_in_one_io(ios, pcol_copy, pcol);
- if (unlikely(ret))
- goto err;
-
- EXOFS_DBGMSG2("read_exec(0x%lx) offset=0x%llx length=0x%llx\n",
- pcol->inode->i_ino, _LLU(ios->offset), _LLU(ios->length));
-
- ret = ore_read(ios);
- if (unlikely(ret))
- goto err;
-
- atomic_inc(&pcol->sbi->s_curr_pending);
-
- return 0;
-
-err:
- if (!pcol_copy) /* Failed before ownership transfer */
- pcol_copy = pcol;
- _unlock_pcol_pages(pcol_copy, ret, READ);
- pcol_free(pcol_copy);
- kfree(pcol_copy);
-
- return ret;
-}
-
-/* readpage_strip is called either directly from readpage() or by the VFS from
- * within read_cache_pages(), to add one more page to be read. It will try to
- * collect as many contiguous pages as posible. If a discontinuity is
- * encountered, or it runs out of resources, it will submit the previous segment
- * and will start a new collection. Eventually caller must submit the last
- * segment if present.
- */
-static int readpage_strip(void *data, struct page *page)
-{
- struct page_collect *pcol = data;
- struct inode *inode = pcol->inode;
- struct exofs_i_info *oi = exofs_i(inode);
- loff_t i_size = i_size_read(inode);
- pgoff_t end_index = i_size >> PAGE_SHIFT;
- size_t len;
- int ret;
-
- BUG_ON(!PageLocked(page));
-
- /* FIXME: Just for debugging, will be removed */
- if (PageUptodate(page))
- EXOFS_ERR("PageUptodate(0x%lx, 0x%lx)\n", pcol->inode->i_ino,
- page->index);
-
- pcol->that_locked_page = page;
-
- if (page->index < end_index)
- len = PAGE_SIZE;
- else if (page->index == end_index)
- len = i_size & ~PAGE_MASK;
- else
- len = 0;
-
- if (!len || !obj_created(oi)) {
- /* this will be out of bounds, or doesn't exist yet.
- * Current page is cleared and the request is split
- */
- clear_highpage(page);
-
- SetPageUptodate(page);
- if (PageError(page))
- ClearPageError(page);
-
- if (!pcol->read_4_write)
- unlock_page(page);
- EXOFS_DBGMSG("readpage_strip(0x%lx) empty page len=%zx "
- "read_4_write=%d index=0x%lx end_index=0x%lx "
- "splitting\n", inode->i_ino, len,
- pcol->read_4_write, page->index, end_index);
-
- return read_exec(pcol);
- }
-
-try_again:
-
- if (unlikely(pcol->pg_first == -1)) {
- pcol->pg_first = page->index;
- } else if (unlikely((pcol->pg_first + pcol->nr_pages) !=
- page->index)) {
- /* Discontinuity detected, split the request */
- ret = read_exec(pcol);
- if (unlikely(ret))
- goto fail;
- goto try_again;
- }
-
- if (!pcol->pages) {
- ret = pcol_try_alloc(pcol);
- if (unlikely(ret))
- goto fail;
- }
-
- if (len != PAGE_SIZE)
- zero_user(page, len, PAGE_SIZE - len);
-
- EXOFS_DBGMSG2(" readpage_strip(0x%lx, 0x%lx) len=0x%zx\n",
- inode->i_ino, page->index, len);
-
- ret = pcol_add_page(pcol, page, len);
- if (ret) {
- EXOFS_DBGMSG2("Failed pcol_add_page pages[i]=%p "
- "this_len=0x%zx nr_pages=%u length=0x%lx\n",
- page, len, pcol->nr_pages, pcol->length);
-
- /* split the request, and start again with current page */
- ret = read_exec(pcol);
- if (unlikely(ret))
- goto fail;
-
- goto try_again;
- }
-
- return 0;
-
-fail:
- /* SetPageError(page); ??? */
- unlock_page(page);
- return ret;
-}
-
-static int exofs_readpages(struct file *file, struct address_space *mapping,
- struct list_head *pages, unsigned nr_pages)
-{
- struct page_collect pcol;
- int ret;
-
- _pcol_init(&pcol, nr_pages, mapping->host);
-
- ret = read_cache_pages(mapping, pages, readpage_strip, &pcol);
- if (ret) {
- EXOFS_ERR("read_cache_pages => %d\n", ret);
- return ret;
- }
-
- ret = read_exec(&pcol);
- if (unlikely(ret))
- return ret;
-
- return read_exec(&pcol);
-}
-
-static int _readpage(struct page *page, bool read_4_write)
-{
- struct page_collect pcol;
- int ret;
-
- _pcol_init(&pcol, 1, page->mapping->host);
-
- pcol.read_4_write = read_4_write;
- ret = readpage_strip(&pcol, page);
- if (ret) {
- EXOFS_ERR("_readpage => %d\n", ret);
- return ret;
- }
-
- return read_exec(&pcol);
-}
-
-/*
- * We don't need the file
- */
-static int exofs_readpage(struct file *file, struct page *page)
-{
- return _readpage(page, false);
-}
-
-/* Callback for osd_write. All writes are asynchronous */
-static void writepages_done(struct ore_io_state *ios, void *p)
-{
- struct page_collect *pcol = p;
- int i;
- u64 good_bytes;
- u64 length = 0;
- int ret = ore_check_io(ios, NULL);
-
- atomic_dec(&pcol->sbi->s_curr_pending);
-
- if (likely(!ret)) {
- good_bytes = pcol->length;
- ret = PAGE_WAS_NOT_IN_IO;
- } else {
- good_bytes = 0;
- }
-
- EXOFS_DBGMSG2("writepages_done(0x%lx) good_bytes=0x%llx"
- " length=0x%lx nr_pages=%u\n",
- pcol->inode->i_ino, _LLU(good_bytes), pcol->length,
- pcol->nr_pages);
-
- for (i = 0; i < pcol->nr_pages; i++) {
- struct page *page = pcol->pages[i];
- struct inode *inode = page->mapping->host;
- int page_stat;
-
- if (inode != pcol->inode)
- continue; /* osd might add more pages to a bio */
-
- if (likely(length < good_bytes))
- page_stat = 0;
- else
- page_stat = ret;
-
- update_write_page(page, page_stat);
- unlock_page(page);
- EXOFS_DBGMSG2(" writepages_done(0x%lx, 0x%lx) status=%d\n",
- inode->i_ino, page->index, page_stat);
-
- length += PAGE_SIZE;
- }
-
- pcol_free(pcol);
- kfree(pcol);
- EXOFS_DBGMSG2("writepages_done END\n");
-}
-
-static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
-{
- struct page_collect *pcol = priv;
- pgoff_t index = offset / PAGE_SIZE;
-
- if (!pcol->that_locked_page ||
- (pcol->that_locked_page->index != index)) {
- struct page *page;
- loff_t i_size = i_size_read(pcol->inode);
-
- if (offset >= i_size) {
- *uptodate = true;
- EXOFS_DBGMSG2("offset >= i_size index=0x%lx\n", index);
- return ZERO_PAGE(0);
- }
-
- page = find_get_page(pcol->inode->i_mapping, index);
- if (!page) {
- page = find_or_create_page(pcol->inode->i_mapping,
- index, GFP_NOFS);
- if (unlikely(!page)) {
- EXOFS_DBGMSG("grab_cache_page Failed "
- "index=0x%llx\n", _LLU(index));
- return NULL;
- }
- unlock_page(page);
- }
- *uptodate = PageUptodate(page);
- EXOFS_DBGMSG2("index=0x%lx uptodate=%d\n", index, *uptodate);
- return page;
- } else {
- EXOFS_DBGMSG2("YES that_locked_page index=0x%lx\n",
- pcol->that_locked_page->index);
- *uptodate = true;
- return pcol->that_locked_page;
- }
-}
-
-static void __r4w_put_page(void *priv, struct page *page)
-{
- struct page_collect *pcol = priv;
-
- if ((pcol->that_locked_page != page) && (ZERO_PAGE(0) != page)) {
- EXOFS_DBGMSG2("index=0x%lx\n", page->index);
- put_page(page);
- return;
- }
- EXOFS_DBGMSG2("that_locked_page index=0x%lx\n",
- ZERO_PAGE(0) == page ? -1 : page->index);
-}
-
-static const struct _ore_r4w_op _r4w_op = {
- .get_page = &__r4w_get_page,
- .put_page = &__r4w_put_page,
-};
-
-static int write_exec(struct page_collect *pcol)
-{
- struct exofs_i_info *oi = exofs_i(pcol->inode);
- struct ore_io_state *ios;
- struct page_collect *pcol_copy = NULL;
- int ret;
-
- if (!pcol->pages)
- return 0;
-
- BUG_ON(pcol->ios);
- ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, false,
- pcol->pg_first << PAGE_SHIFT,
- pcol->length, &pcol->ios);
- if (unlikely(ret))
- goto err;
-
- pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
- if (!pcol_copy) {
- EXOFS_ERR("write_exec: Failed to kmalloc(pcol)\n");
- ret = -ENOMEM;
- goto err;
- }
-
- *pcol_copy = *pcol;
-
- ios = pcol->ios;
- ios->pages = pcol_copy->pages;
- ios->done = writepages_done;
- ios->r4w = &_r4w_op;
- ios->private = pcol_copy;
-
- /* pages ownership was passed to pcol_copy */
- _pcol_reset(pcol);
-
- ret = _maybe_not_all_in_one_io(ios, pcol_copy, pcol);
- if (unlikely(ret))
- goto err;
-
- EXOFS_DBGMSG2("write_exec(0x%lx) offset=0x%llx length=0x%llx\n",
- pcol->inode->i_ino, _LLU(ios->offset), _LLU(ios->length));
-
- ret = ore_write(ios);
- if (unlikely(ret)) {
- EXOFS_ERR("write_exec: ore_write() Failed\n");
- goto err;
- }
-
- atomic_inc(&pcol->sbi->s_curr_pending);
- return 0;
-
-err:
- if (!pcol_copy) /* Failed before ownership transfer */
- pcol_copy = pcol;
- _unlock_pcol_pages(pcol_copy, ret, WRITE);
- pcol_free(pcol_copy);
- kfree(pcol_copy);
-
- return ret;
-}
-
-/* writepage_strip is called either directly from writepage() or by the VFS from
- * within write_cache_pages(), to add one more page to be written to storage.
- * It will try to collect as many contiguous pages as possible. If a
- * discontinuity is encountered or it runs out of resources it will submit the
- * previous segment and will start a new collection.
- * Eventually caller must submit the last segment if present.
- */
-static int writepage_strip(struct page *page,
- struct writeback_control *wbc_unused, void *data)
-{
- struct page_collect *pcol = data;
- struct inode *inode = pcol->inode;
- struct exofs_i_info *oi = exofs_i(inode);
- loff_t i_size = i_size_read(inode);
- pgoff_t end_index = i_size >> PAGE_SHIFT;
- size_t len;
- int ret;
-
- BUG_ON(!PageLocked(page));
-
- ret = wait_obj_created(oi);
- if (unlikely(ret))
- goto fail;
-
- if (page->index < end_index)
- /* in this case, the page is within the limits of the file */
- len = PAGE_SIZE;
- else {
- len = i_size & ~PAGE_MASK;
-
- if (page->index > end_index || !len) {
- /* in this case, the page is outside the limits
- * (truncate in progress)
- */
- ret = write_exec(pcol);
- if (unlikely(ret))
- goto fail;
- if (PageError(page))
- ClearPageError(page);
- unlock_page(page);
- EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) "
- "outside the limits\n",
- inode->i_ino, page->index);
- return 0;
- }
- }
-
-try_again:
-
- if (unlikely(pcol->pg_first == -1)) {
- pcol->pg_first = page->index;
- } else if (unlikely((pcol->pg_first + pcol->nr_pages) !=
- page->index)) {
- /* Discontinuity detected, split the request */
- ret = write_exec(pcol);
- if (unlikely(ret))
- goto fail;
-
- EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) Discontinuity\n",
- inode->i_ino, page->index);
- goto try_again;
- }
-
- if (!pcol->pages) {
- ret = pcol_try_alloc(pcol);
- if (unlikely(ret))
- goto fail;
- }
-
- EXOFS_DBGMSG2(" writepage_strip(0x%lx, 0x%lx) len=0x%zx\n",
- inode->i_ino, page->index, len);
-
- ret = pcol_add_page(pcol, page, len);
- if (unlikely(ret)) {
- EXOFS_DBGMSG2("Failed pcol_add_page "
- "nr_pages=%u total_length=0x%lx\n",
- pcol->nr_pages, pcol->length);
-
- /* split the request, next loop will start again */
- ret = write_exec(pcol);
- if (unlikely(ret)) {
- EXOFS_DBGMSG("write_exec failed => %d", ret);
- goto fail;
- }
-
- goto try_again;
- }
-
- BUG_ON(PageWriteback(page));
- set_page_writeback(page);
-
- return 0;
-
-fail:
- EXOFS_DBGMSG("Error: writepage_strip(0x%lx, 0x%lx)=>%d\n",
- inode->i_ino, page->index, ret);
- mapping_set_error(page->mapping, -EIO);
- unlock_page(page);
- return ret;
-}
-
-static int exofs_writepages(struct address_space *mapping,
- struct writeback_control *wbc)
-{
- struct page_collect pcol;
- long start, end, expected_pages;
- int ret;
-
- start = wbc->range_start >> PAGE_SHIFT;
- end = (wbc->range_end == LLONG_MAX) ?
- start + mapping->nrpages :
- wbc->range_end >> PAGE_SHIFT;
-
- if (start || end)
- expected_pages = end - start + 1;
- else
- expected_pages = mapping->nrpages;
-
- if (expected_pages < 32L)
- expected_pages = 32L;
-
- EXOFS_DBGMSG2("inode(0x%lx) wbc->start=0x%llx wbc->end=0x%llx "
- "nrpages=%lu start=0x%lx end=0x%lx expected_pages=%ld\n",
- mapping->host->i_ino, wbc->range_start, wbc->range_end,
- mapping->nrpages, start, end, expected_pages);
-
- _pcol_init(&pcol, expected_pages, mapping->host);
-
- ret = write_cache_pages(mapping, wbc, writepage_strip, &pcol);
- if (unlikely(ret)) {
- EXOFS_ERR("write_cache_pages => %d\n", ret);
- return ret;
- }
-
- ret = write_exec(&pcol);
- if (unlikely(ret))
- return ret;
-
- if (wbc->sync_mode == WB_SYNC_ALL) {
- return write_exec(&pcol); /* pump the last reminder */
- } else if (pcol.nr_pages) {
- /* not SYNC let the reminder join the next writeout */
- unsigned i;
-
- for (i = 0; i < pcol.nr_pages; i++) {
- struct page *page = pcol.pages[i];
-
- end_page_writeback(page);
- set_page_dirty(page);
- unlock_page(page);
- }
- }
- return 0;
-}
-
-/*
-static int exofs_writepage(struct page *page, struct writeback_control *wbc)
-{
- struct page_collect pcol;
- int ret;
-
- _pcol_init(&pcol, 1, page->mapping->host);
-
- ret = writepage_strip(page, NULL, &pcol);
- if (ret) {
- EXOFS_ERR("exofs_writepage => %d\n", ret);
- return ret;
- }
-
- return write_exec(&pcol);
-}
-*/
-/* i_mutex held using inode->i_size directly */
-static void _write_failed(struct inode *inode, loff_t to)
-{
- if (to > inode->i_size)
- truncate_pagecache(inode, inode->i_size);
-}
-
-int exofs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
-{
- int ret = 0;
- struct page *page;
-
- page = *pagep;
- if (page == NULL) {
- page = grab_cache_page_write_begin(mapping, pos >> PAGE_SHIFT,
- flags);
- if (!page) {
- EXOFS_DBGMSG("grab_cache_page_write_begin failed\n");
- return -ENOMEM;
- }
- *pagep = page;
- }
-
- /* read modify write */
- if (!PageUptodate(page) && (len != PAGE_SIZE)) {
- loff_t i_size = i_size_read(mapping->host);
- pgoff_t end_index = i_size >> PAGE_SHIFT;
-
- if (page->index > end_index) {
- clear_highpage(page);
- SetPageUptodate(page);
- } else {
- ret = _readpage(page, true);
- if (ret) {
- unlock_page(page);
- EXOFS_DBGMSG("__readpage failed\n");
- }
- }
- }
- return ret;
-}
-
-static int exofs_write_begin_export(struct file *file,
- struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
-{
- *pagep = NULL;
-
- return exofs_write_begin(file, mapping, pos, len, flags, pagep,
- fsdata);
-}
-
-static int exofs_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
-{
- struct inode *inode = mapping->host;
- loff_t last_pos = pos + copied;
-
- if (!PageUptodate(page)) {
- if (copied < len) {
- _write_failed(inode, pos + len);
- copied = 0;
- goto out;
- }
- SetPageUptodate(page);
- }
- if (last_pos > inode->i_size) {
- i_size_write(inode, last_pos);
- mark_inode_dirty(inode);
- }
- set_page_dirty(page);
-out:
- unlock_page(page);
- put_page(page);
- return copied;
-}
-
-static int exofs_releasepage(struct page *page, gfp_t gfp)
-{
- EXOFS_DBGMSG("page 0x%lx\n", page->index);
- WARN_ON(1);
- return 0;
-}
-
-static void exofs_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
-{
- EXOFS_DBGMSG("page 0x%lx offset 0x%x length 0x%x\n",
- page->index, offset, length);
- WARN_ON(1);
-}
-
-
- /* TODO: Should be easy enough to do proprly */
-static ssize_t exofs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
-{
- return 0;
-}
-
-const struct address_space_operations exofs_aops = {
- .readpage = exofs_readpage,
- .readpages = exofs_readpages,
- .writepage = NULL,
- .writepages = exofs_writepages,
- .write_begin = exofs_write_begin_export,
- .write_end = exofs_write_end,
- .releasepage = exofs_releasepage,
- .set_page_dirty = __set_page_dirty_nobuffers,
- .invalidatepage = exofs_invalidatepage,
-
- /* Not implemented Yet */
- .bmap = NULL, /* TODO: use osd's OSD_ACT_READ_MAP */
- .direct_IO = exofs_direct_IO,
-
- /* With these NULL has special meaning or default is not exported */
- .migratepage = NULL,
- .launder_page = NULL,
- .is_partially_uptodate = NULL,
- .error_remove_page = NULL,
-};
-
-/******************************************************************************
- * INODE OPERATIONS
- *****************************************************************************/
-
-/*
- * Test whether an inode is a fast symlink.
- */
-static inline int exofs_inode_is_fast_symlink(struct inode *inode)
-{
- struct exofs_i_info *oi = exofs_i(inode);
-
- return S_ISLNK(inode->i_mode) && (oi->i_data[0] != 0);
-}
-
-static int _do_truncate(struct inode *inode, loff_t newsize)
-{
- struct exofs_i_info *oi = exofs_i(inode);
- struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
- int ret;
-
- inode->i_mtime = inode->i_ctime = current_time(inode);
-
- ret = ore_truncate(&sbi->layout, &oi->oc, (u64)newsize);
- if (likely(!ret))
- truncate_setsize(inode, newsize);
-
- EXOFS_DBGMSG2("(0x%lx) size=0x%llx ret=>%d\n",
- inode->i_ino, newsize, ret);
- return ret;
-}
-
-/*
- * Set inode attributes - update size attribute on OSD if needed,
- * otherwise just call generic functions.
- */
-int exofs_setattr(struct dentry *dentry, struct iattr *iattr)
-{
- struct inode *inode = d_inode(dentry);
- int error;
-
- /* if we are about to modify an object, and it hasn't been
- * created yet, wait
- */
- error = wait_obj_created(exofs_i(inode));
- if (unlikely(error))
- return error;
-
- error = setattr_prepare(dentry, iattr);
- if (unlikely(error))
- return error;
-
- if ((iattr->ia_valid & ATTR_SIZE) &&
- iattr->ia_size != i_size_read(inode)) {
- error = _do_truncate(inode, iattr->ia_size);
- if (unlikely(error))
- return error;
- }
-
- setattr_copy(inode, iattr);
- mark_inode_dirty(inode);
- return 0;
-}
-
-static const struct osd_attr g_attr_inode_file_layout = ATTR_DEF(
- EXOFS_APAGE_FS_DATA,
- EXOFS_ATTR_INODE_FILE_LAYOUT,
- 0);
-static const struct osd_attr g_attr_inode_dir_layout = ATTR_DEF(
- EXOFS_APAGE_FS_DATA,
- EXOFS_ATTR_INODE_DIR_LAYOUT,
- 0);
-
-/*
- * Read the Linux inode info from the OSD, and return it as is. In exofs the
- * inode info is in an application specific page/attribute of the osd-object.
- */
-static int exofs_get_inode(struct super_block *sb, struct exofs_i_info *oi,
- struct exofs_fcb *inode)
-{
- struct exofs_sb_info *sbi = sb->s_fs_info;
- struct osd_attr attrs[] = {
- [0] = g_attr_inode_data,
- [1] = g_attr_inode_file_layout,
- [2] = g_attr_inode_dir_layout,
- };
- struct ore_io_state *ios;
- struct exofs_on_disk_inode_layout *layout;
- int ret;
-
- ret = ore_get_io_state(&sbi->layout, &oi->oc, &ios);
- if (unlikely(ret)) {
- EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__);
- return ret;
- }
-
- attrs[1].len = exofs_on_disk_inode_layout_size(sbi->oc.numdevs);
- attrs[2].len = exofs_on_disk_inode_layout_size(sbi->oc.numdevs);
-
- ios->in_attr = attrs;
- ios->in_attr_len = ARRAY_SIZE(attrs);
-
- ret = ore_read(ios);
- if (unlikely(ret)) {
- EXOFS_ERR("object(0x%llx) corrupted, return empty file=>%d\n",
- _LLU(oi->one_comp.obj.id), ret);
- memset(inode, 0, sizeof(*inode));
- inode->i_mode = 0040000 | (0777 & ~022);
- /* If object is lost on target we might as well enable it's
- * delete.
- */
- ret = 0;
- goto out;
- }
-
- ret = extract_attr_from_ios(ios, &attrs[0]);
- if (ret) {
- EXOFS_ERR("%s: extract_attr 0 of inode failed\n", __func__);
- goto out;
- }
- WARN_ON(attrs[0].len != EXOFS_INO_ATTR_SIZE);
- memcpy(inode, attrs[0].val_ptr, EXOFS_INO_ATTR_SIZE);
-
- ret = extract_attr_from_ios(ios, &attrs[1]);
- if (ret) {
- EXOFS_ERR("%s: extract_attr 1 of inode failed\n", __func__);
- goto out;
- }
- if (attrs[1].len) {
- layout = attrs[1].val_ptr;
- if (layout->gen_func != cpu_to_le16(LAYOUT_MOVING_WINDOW)) {
- EXOFS_ERR("%s: unsupported files layout %d\n",
- __func__, layout->gen_func);
- ret = -ENOTSUPP;
- goto out;
- }
- }
-
- ret = extract_attr_from_ios(ios, &attrs[2]);
- if (ret) {
- EXOFS_ERR("%s: extract_attr 2 of inode failed\n", __func__);
- goto out;
- }
- if (attrs[2].len) {
- layout = attrs[2].val_ptr;
- if (layout->gen_func != cpu_to_le16(LAYOUT_MOVING_WINDOW)) {
- EXOFS_ERR("%s: unsupported meta-data layout %d\n",
- __func__, layout->gen_func);
- ret = -ENOTSUPP;
- goto out;
- }
- }
-
-out:
- ore_put_io_state(ios);
- return ret;
-}
-
-static void __oi_init(struct exofs_i_info *oi)
-{
- init_waitqueue_head(&oi->i_wq);
- oi->i_flags = 0;
-}
-/*
- * Fill in an inode read from the OSD and set it up for use
- */
-struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
-{
- struct exofs_i_info *oi;
- struct exofs_fcb fcb;
- struct inode *inode;
- int ret;
-
- inode = iget_locked(sb, ino);
- if (!inode)
- return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
- return inode;
- oi = exofs_i(inode);
- __oi_init(oi);
- exofs_init_comps(&oi->oc, &oi->one_comp, sb->s_fs_info,
- exofs_oi_objno(oi));
-
- /* read the inode from the osd */
- ret = exofs_get_inode(sb, oi, &fcb);
- if (ret)
- goto bad_inode;
-
- set_obj_created(oi);
-
- /* copy stuff from on-disk struct to in-memory struct */
- inode->i_mode = le16_to_cpu(fcb.i_mode);
- i_uid_write(inode, le32_to_cpu(fcb.i_uid));
- i_gid_write(inode, le32_to_cpu(fcb.i_gid));
- set_nlink(inode, le16_to_cpu(fcb.i_links_count));
- inode->i_ctime.tv_sec = (signed)le32_to_cpu(fcb.i_ctime);
- inode->i_atime.tv_sec = (signed)le32_to_cpu(fcb.i_atime);
- inode->i_mtime.tv_sec = (signed)le32_to_cpu(fcb.i_mtime);
- inode->i_ctime.tv_nsec =
- inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = 0;
- oi->i_commit_size = le64_to_cpu(fcb.i_size);
- i_size_write(inode, oi->i_commit_size);
- inode->i_blkbits = EXOFS_BLKSHIFT;
- inode->i_generation = le32_to_cpu(fcb.i_generation);
-
- oi->i_dir_start_lookup = 0;
-
- if ((inode->i_nlink == 0) && (inode->i_mode == 0)) {
- ret = -ESTALE;
- goto bad_inode;
- }
-
- if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
- if (fcb.i_data[0])
- inode->i_rdev =
- old_decode_dev(le32_to_cpu(fcb.i_data[0]));
- else
- inode->i_rdev =
- new_decode_dev(le32_to_cpu(fcb.i_data[1]));
- } else {
- memcpy(oi->i_data, fcb.i_data, sizeof(fcb.i_data));
- }
-
- if (S_ISREG(inode->i_mode)) {
- inode->i_op = &exofs_file_inode_operations;
- inode->i_fop = &exofs_file_operations;
- inode->i_mapping->a_ops = &exofs_aops;
- } else if (S_ISDIR(inode->i_mode)) {
- inode->i_op = &exofs_dir_inode_operations;
- inode->i_fop = &exofs_dir_operations;
- inode->i_mapping->a_ops = &exofs_aops;
- } else if (S_ISLNK(inode->i_mode)) {
- if (exofs_inode_is_fast_symlink(inode)) {
- inode->i_op = &simple_symlink_inode_operations;
- inode->i_link = (char *)oi->i_data;
- } else {
- inode->i_op = &page_symlink_inode_operations;
- inode_nohighmem(inode);
- inode->i_mapping->a_ops = &exofs_aops;
- }
- } else {
- inode->i_op = &exofs_special_inode_operations;
- if (fcb.i_data[0])
- init_special_inode(inode, inode->i_mode,
- old_decode_dev(le32_to_cpu(fcb.i_data[0])));
- else
- init_special_inode(inode, inode->i_mode,
- new_decode_dev(le32_to_cpu(fcb.i_data[1])));
- }
-
- unlock_new_inode(inode);
- return inode;
-
-bad_inode:
- iget_failed(inode);
- return ERR_PTR(ret);
-}
-
-int __exofs_wait_obj_created(struct exofs_i_info *oi)
-{
- if (!obj_created(oi)) {
- EXOFS_DBGMSG("!obj_created\n");
- BUG_ON(!obj_2bcreated(oi));
- wait_event(oi->i_wq, obj_created(oi));
- EXOFS_DBGMSG("wait_event done\n");
- }
- return unlikely(is_bad_inode(&oi->vfs_inode)) ? -EIO : 0;
-}
-
-/*
- * Callback function from exofs_new_inode(). The important thing is that we
- * set the obj_created flag so that other methods know that the object exists on
- * the OSD.
- */
-static void create_done(struct ore_io_state *ios, void *p)
-{
- struct inode *inode = p;
- struct exofs_i_info *oi = exofs_i(inode);
- struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
- int ret;
-
- ret = ore_check_io(ios, NULL);
- ore_put_io_state(ios);
-
- atomic_dec(&sbi->s_curr_pending);
-
- if (unlikely(ret)) {
- EXOFS_ERR("object=0x%llx creation failed in pid=0x%llx",
- _LLU(exofs_oi_objno(oi)),
- _LLU(oi->one_comp.obj.partition));
- /*TODO: When FS is corrupted creation can fail, object already
- * exist. Get rid of this asynchronous creation, if exist
- * increment the obj counter and try the next object. Until we
- * succeed. All these dangling objects will be made into lost
- * files by chkfs.exofs
- */
- }
-
- set_obj_created(oi);
-
- wake_up(&oi->i_wq);
-}
-
-/*
- * Set up a new inode and create an object for it on the OSD
- */
-struct inode *exofs_new_inode(struct inode *dir, umode_t mode)
-{
- struct super_block *sb = dir->i_sb;
- struct exofs_sb_info *sbi = sb->s_fs_info;
- struct inode *inode;
- struct exofs_i_info *oi;
- struct ore_io_state *ios;
- int ret;
-
- inode = new_inode(sb);
- if (!inode)
- return ERR_PTR(-ENOMEM);
-
- oi = exofs_i(inode);
- __oi_init(oi);
-
- set_obj_2bcreated(oi);
-
- inode_init_owner(inode, dir, mode);
- inode->i_ino = sbi->s_nextid++;
- inode->i_blkbits = EXOFS_BLKSHIFT;
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
- oi->i_commit_size = inode->i_size = 0;
- spin_lock(&sbi->s_next_gen_lock);
- inode->i_generation = sbi->s_next_generation++;
- spin_unlock(&sbi->s_next_gen_lock);
- insert_inode_hash(inode);
-
- exofs_init_comps(&oi->oc, &oi->one_comp, sb->s_fs_info,
- exofs_oi_objno(oi));
- exofs_sbi_write_stats(sbi); /* Make sure new sbi->s_nextid is on disk */
-
- mark_inode_dirty(inode);
-
- ret = ore_get_io_state(&sbi->layout, &oi->oc, &ios);
- if (unlikely(ret)) {
- EXOFS_ERR("exofs_new_inode: ore_get_io_state failed\n");
- return ERR_PTR(ret);
- }
-
- ios->done = create_done;
- ios->private = inode;
-
- ret = ore_create(ios);
- if (ret) {
- ore_put_io_state(ios);
- return ERR_PTR(ret);
- }
- atomic_inc(&sbi->s_curr_pending);
-
- return inode;
-}
-
-/*
- * struct to pass two arguments to update_inode's callback
- */
-struct updatei_args {
- struct exofs_sb_info *sbi;
- struct exofs_fcb fcb;
-};
-
-/*
- * Callback function from exofs_update_inode().
- */
-static void updatei_done(struct ore_io_state *ios, void *p)
-{
- struct updatei_args *args = p;
-
- ore_put_io_state(ios);
-
- atomic_dec(&args->sbi->s_curr_pending);
-
- kfree(args);
-}
-
-/*
- * Write the inode to the OSD. Just fill up the struct, and set the attribute
- * synchronously or asynchronously depending on the do_sync flag.
- */
-static int exofs_update_inode(struct inode *inode, int do_sync)
-{
- struct exofs_i_info *oi = exofs_i(inode);
- struct super_block *sb = inode->i_sb;
- struct exofs_sb_info *sbi = sb->s_fs_info;
- struct ore_io_state *ios;
- struct osd_attr attr;
- struct exofs_fcb *fcb;
- struct updatei_args *args;
- int ret;
-
- args = kzalloc(sizeof(*args), GFP_KERNEL);
- if (!args) {
- EXOFS_DBGMSG("Failed kzalloc of args\n");
- return -ENOMEM;
- }
-
- fcb = &args->fcb;
-
- fcb->i_mode = cpu_to_le16(inode->i_mode);
- fcb->i_uid = cpu_to_le32(i_uid_read(inode));
- fcb->i_gid = cpu_to_le32(i_gid_read(inode));
- fcb->i_links_count = cpu_to_le16(inode->i_nlink);
- fcb->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
- fcb->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
- fcb->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
- oi->i_commit_size = i_size_read(inode);
- fcb->i_size = cpu_to_le64(oi->i_commit_size);
- fcb->i_generation = cpu_to_le32(inode->i_generation);
-
- if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
- if (old_valid_dev(inode->i_rdev)) {
- fcb->i_data[0] =
- cpu_to_le32(old_encode_dev(inode->i_rdev));
- fcb->i_data[1] = 0;
- } else {
- fcb->i_data[0] = 0;
- fcb->i_data[1] =
- cpu_to_le32(new_encode_dev(inode->i_rdev));
- fcb->i_data[2] = 0;
- }
- } else
- memcpy(fcb->i_data, oi->i_data, sizeof(fcb->i_data));
-
- ret = ore_get_io_state(&sbi->layout, &oi->oc, &ios);
- if (unlikely(ret)) {
- EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__);
- goto free_args;
- }
-
- attr = g_attr_inode_data;
- attr.val_ptr = fcb;
- ios->out_attr_len = 1;
- ios->out_attr = &attr;
-
- wait_obj_created(oi);
-
- if (!do_sync) {
- args->sbi = sbi;
- ios->done = updatei_done;
- ios->private = args;
- }
-
- ret = ore_write(ios);
- if (!do_sync && !ret) {
- atomic_inc(&sbi->s_curr_pending);
- goto out; /* deallocation in updatei_done */
- }
-
- ore_put_io_state(ios);
-free_args:
- kfree(args);
-out:
- EXOFS_DBGMSG("(0x%lx) do_sync=%d ret=>%d\n",
- inode->i_ino, do_sync, ret);
- return ret;
-}
-
-int exofs_write_inode(struct inode *inode, struct writeback_control *wbc)
-{
- /* FIXME: fix fsync and use wbc->sync_mode == WB_SYNC_ALL */
- return exofs_update_inode(inode, 1);
-}
-
-/*
- * Callback function from exofs_delete_inode() - don't have much cleaning up to
- * do.
- */
-static void delete_done(struct ore_io_state *ios, void *p)
-{
- struct exofs_sb_info *sbi = p;
-
- ore_put_io_state(ios);
-
- atomic_dec(&sbi->s_curr_pending);
-}
-
-/*
- * Called when the refcount of an inode reaches zero. We remove the object
- * from the OSD here. We make sure the object was created before we try and
- * delete it.
- */
-void exofs_evict_inode(struct inode *inode)
-{
- struct exofs_i_info *oi = exofs_i(inode);
- struct super_block *sb = inode->i_sb;
- struct exofs_sb_info *sbi = sb->s_fs_info;
- struct ore_io_state *ios;
- int ret;
-
- truncate_inode_pages_final(&inode->i_data);
-
- /* TODO: should do better here */
- if (inode->i_nlink || is_bad_inode(inode))
- goto no_delete;
-
- inode->i_size = 0;
- clear_inode(inode);
-
- /* if we are deleting an obj that hasn't been created yet, wait.
- * This also makes sure that create_done cannot be called with an
- * already evicted inode.
- */
- wait_obj_created(oi);
- /* ignore the error, attempt a remove anyway */
-
- /* Now Remove the OSD objects */
- ret = ore_get_io_state(&sbi->layout, &oi->oc, &ios);
- if (unlikely(ret)) {
- EXOFS_ERR("%s: ore_get_io_state failed\n", __func__);
- return;
- }
-
- ios->done = delete_done;
- ios->private = sbi;
-
- ret = ore_remove(ios);
- if (ret) {
- EXOFS_ERR("%s: ore_remove failed\n", __func__);
- ore_put_io_state(ios);
- return;
- }
- atomic_inc(&sbi->s_curr_pending);
-
- return;
-
-no_delete:
- clear_inode(inode);
-}
diff --git a/fs/exofs/namei.c b/fs/exofs/namei.c
deleted file mode 100644
index 7295cd722770..000000000000
--- a/fs/exofs/namei.c
+++ /dev/null
@@ -1,323 +0,0 @@
-/*
- * Copyright (C) 2005, 2006
- * Avishay Traeger (avishay@gmail.com)
- * Copyright (C) 2008, 2009
- * Boaz Harrosh <ooo@electrozaur.com>
- *
- * Copyrights for code taken from ext2:
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- * from
- * linux/fs/minix/inode.c
- * Copyright (C) 1991, 1992 Linus Torvalds
- *
- * This file is part of exofs.
- *
- * exofs is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation. Since it is based on ext2, and the only
- * valid version of GPL for the Linux kernel is version 2, the only valid
- * version of GPL for exofs is version 2.
- *
- * exofs is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with exofs; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include "exofs.h"
-
-static inline int exofs_add_nondir(struct dentry *dentry, struct inode *inode)
-{
- int err = exofs_add_link(dentry, inode);
- if (!err) {
- d_instantiate(dentry, inode);
- return 0;
- }
- inode_dec_link_count(inode);
- iput(inode);
- return err;
-}
-
-static struct dentry *exofs_lookup(struct inode *dir, struct dentry *dentry,
- unsigned int flags)
-{
- struct inode *inode;
- ino_t ino;
-
- if (dentry->d_name.len > EXOFS_NAME_LEN)
- return ERR_PTR(-ENAMETOOLONG);
-
- ino = exofs_inode_by_name(dir, dentry);
- inode = ino ? exofs_iget(dir->i_sb, ino) : NULL;
- return d_splice_alias(inode, dentry);
-}
-
-static int exofs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- bool excl)
-{
- struct inode *inode = exofs_new_inode(dir, mode);
- int err = PTR_ERR(inode);
- if (!IS_ERR(inode)) {
- inode->i_op = &exofs_file_inode_operations;
- inode->i_fop = &exofs_file_operations;
- inode->i_mapping->a_ops = &exofs_aops;
- mark_inode_dirty(inode);
- err = exofs_add_nondir(dentry, inode);
- }
- return err;
-}
-
-static int exofs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
- dev_t rdev)
-{
- struct inode *inode;
- int err;
-
- inode = exofs_new_inode(dir, mode);
- err = PTR_ERR(inode);
- if (!IS_ERR(inode)) {
- init_special_inode(inode, inode->i_mode, rdev);
- mark_inode_dirty(inode);
- err = exofs_add_nondir(dentry, inode);
- }
- return err;
-}
-
-static int exofs_symlink(struct inode *dir, struct dentry *dentry,
- const char *symname)
-{
- struct super_block *sb = dir->i_sb;
- int err = -ENAMETOOLONG;
- unsigned l = strlen(symname)+1;
- struct inode *inode;
- struct exofs_i_info *oi;
-
- if (l > sb->s_blocksize)
- goto out;
-
- inode = exofs_new_inode(dir, S_IFLNK | S_IRWXUGO);
- err = PTR_ERR(inode);
- if (IS_ERR(inode))
- goto out;
-
- oi = exofs_i(inode);
- if (l > sizeof(oi->i_data)) {
- /* slow symlink */
- inode->i_op = &page_symlink_inode_operations;
- inode_nohighmem(inode);
- inode->i_mapping->a_ops = &exofs_aops;
- memset(oi->i_data, 0, sizeof(oi->i_data));
-
- err = page_symlink(inode, symname, l);
- if (err)
- goto out_fail;
- } else {
- /* fast symlink */
- inode->i_op = &simple_symlink_inode_operations;
- inode->i_link = (char *)oi->i_data;
- memcpy(oi->i_data, symname, l);
- inode->i_size = l-1;
- }
- mark_inode_dirty(inode);
-
- err = exofs_add_nondir(dentry, inode);
-out:
- return err;
-
-out_fail:
- inode_dec_link_count(inode);
- iput(inode);
- goto out;
-}
-
-static int exofs_link(struct dentry *old_dentry, struct inode *dir,
- struct dentry *dentry)
-{
- struct inode *inode = d_inode(old_dentry);
-
- inode->i_ctime = current_time(inode);
- inode_inc_link_count(inode);
- ihold(inode);
-
- return exofs_add_nondir(dentry, inode);
-}
-
-static int exofs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
-{
- struct inode *inode;
- int err;
-
- inode_inc_link_count(dir);
-
- inode = exofs_new_inode(dir, S_IFDIR | mode);
- err = PTR_ERR(inode);
- if (IS_ERR(inode))
- goto out_dir;
-
- inode->i_op = &exofs_dir_inode_operations;
- inode->i_fop = &exofs_dir_operations;
- inode->i_mapping->a_ops = &exofs_aops;
-
- inode_inc_link_count(inode);
-
- err = exofs_make_empty(inode, dir);
- if (err)
- goto out_fail;
-
- err = exofs_add_link(dentry, inode);
- if (err)
- goto out_fail;
-
- d_instantiate(dentry, inode);
-out:
- return err;
-
-out_fail:
- inode_dec_link_count(inode);
- inode_dec_link_count(inode);
- iput(inode);
-out_dir:
- inode_dec_link_count(dir);
- goto out;
-}
-
-static int exofs_unlink(struct inode *dir, struct dentry *dentry)
-{
- struct inode *inode = d_inode(dentry);
- struct exofs_dir_entry *de;
- struct page *page;
- int err = -ENOENT;
-
- de = exofs_find_entry(dir, dentry, &page);
- if (!de)
- goto out;
-
- err = exofs_delete_entry(de, page);
- if (err)
- goto out;
-
- inode->i_ctime = dir->i_ctime;
- inode_dec_link_count(inode);
- err = 0;
-out:
- return err;
-}
-
-static int exofs_rmdir(struct inode *dir, struct dentry *dentry)
-{
- struct inode *inode = d_inode(dentry);
- int err = -ENOTEMPTY;
-
- if (exofs_empty_dir(inode)) {
- err = exofs_unlink(dir, dentry);
- if (!err) {
- inode->i_size = 0;
- inode_dec_link_count(inode);
- inode_dec_link_count(dir);
- }
- }
- return err;
-}
-
-static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry,
- unsigned int flags)
-{
- struct inode *old_inode = d_inode(old_dentry);
- struct inode *new_inode = d_inode(new_dentry);
- struct page *dir_page = NULL;
- struct exofs_dir_entry *dir_de = NULL;
- struct page *old_page;
- struct exofs_dir_entry *old_de;
- int err = -ENOENT;
-
- if (flags & ~RENAME_NOREPLACE)
- return -EINVAL;
-
- old_de = exofs_find_entry(old_dir, old_dentry, &old_page);
- if (!old_de)
- goto out;
-
- if (S_ISDIR(old_inode->i_mode)) {
- err = -EIO;
- dir_de = exofs_dotdot(old_inode, &dir_page);
- if (!dir_de)
- goto out_old;
- }
-
- if (new_inode) {
- struct page *new_page;
- struct exofs_dir_entry *new_de;
-
- err = -ENOTEMPTY;
- if (dir_de && !exofs_empty_dir(new_inode))
- goto out_dir;
-
- err = -ENOENT;
- new_de = exofs_find_entry(new_dir, new_dentry, &new_page);
- if (!new_de)
- goto out_dir;
- err = exofs_set_link(new_dir, new_de, new_page, old_inode);
- new_inode->i_ctime = current_time(new_inode);
- if (dir_de)
- drop_nlink(new_inode);
- inode_dec_link_count(new_inode);
- if (err)
- goto out_dir;
- } else {
- err = exofs_add_link(new_dentry, old_inode);
- if (err)
- goto out_dir;
- if (dir_de)
- inode_inc_link_count(new_dir);
- }
-
- old_inode->i_ctime = current_time(old_inode);
-
- exofs_delete_entry(old_de, old_page);
- mark_inode_dirty(old_inode);
-
- if (dir_de) {
- err = exofs_set_link(old_inode, dir_de, dir_page, new_dir);
- inode_dec_link_count(old_dir);
- if (err)
- goto out_dir;
- }
- return 0;
-
-
-out_dir:
- if (dir_de) {
- kunmap(dir_page);
- put_page(dir_page);
- }
-out_old:
- kunmap(old_page);
- put_page(old_page);
-out:
- return err;
-}
-
-const struct inode_operations exofs_dir_inode_operations = {
- .create = exofs_create,
- .lookup = exofs_lookup,
- .link = exofs_link,
- .unlink = exofs_unlink,
- .symlink = exofs_symlink,
- .mkdir = exofs_mkdir,
- .rmdir = exofs_rmdir,
- .mknod = exofs_mknod,
- .rename = exofs_rename,
- .setattr = exofs_setattr,
-};
-
-const struct inode_operations exofs_special_inode_operations = {
- .setattr = exofs_setattr,
-};
diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
deleted file mode 100644
index 24a8e34882e9..000000000000
--- a/fs/exofs/ore.c
+++ /dev/null
@@ -1,1179 +0,0 @@
-/*
- * Copyright (C) 2005, 2006
- * Avishay Traeger (avishay@gmail.com)
- * Copyright (C) 2008, 2009
- * Boaz Harrosh <ooo@electrozaur.com>
- *
- * This file is part of exofs.
- *
- * exofs is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation. Since it is based on ext2, and the only
- * valid version of GPL for the Linux kernel is version 2, the only valid
- * version of GPL for exofs is version 2.
- *
- * exofs is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with exofs; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <asm/div64.h>
-#include <linux/lcm.h>
-
-#include "ore_raid.h"
-
-MODULE_AUTHOR("Boaz Harrosh <ooo@electrozaur.com>");
-MODULE_DESCRIPTION("Objects Raid Engine ore.ko");
-MODULE_LICENSE("GPL");
-
-/* ore_verify_layout does a couple of things:
- * 1. Given a minimum number of needed parameters fixes up the rest of the
- * members to be operatonals for the ore. The needed parameters are those
- * that are defined by the pnfs-objects layout STD.
- * 2. Check to see if the current ore code actually supports these parameters
- * for example stripe_unit must be a multple of the system PAGE_SIZE,
- * and etc...
- * 3. Cache some havily used calculations that will be needed by users.
- */
-
-enum { BIO_MAX_PAGES_KMALLOC =
- (PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec),};
-
-int ore_verify_layout(unsigned total_comps, struct ore_layout *layout)
-{
- u64 stripe_length;
-
- switch (layout->raid_algorithm) {
- case PNFS_OSD_RAID_0:
- layout->parity = 0;
- break;
- case PNFS_OSD_RAID_5:
- layout->parity = 1;
- break;
- case PNFS_OSD_RAID_PQ:
- layout->parity = 2;
- break;
- case PNFS_OSD_RAID_4:
- default:
- ORE_ERR("Only RAID_0/5/6 for now received-enum=%d\n",
- layout->raid_algorithm);
- return -EINVAL;
- }
- if (0 != (layout->stripe_unit & ~PAGE_MASK)) {
- ORE_ERR("Stripe Unit(0x%llx)"
- " must be Multples of PAGE_SIZE(0x%lx)\n",
- _LLU(layout->stripe_unit), PAGE_SIZE);
- return -EINVAL;
- }
- if (layout->group_width) {
- if (!layout->group_depth) {
- ORE_ERR("group_depth == 0 && group_width != 0\n");
- return -EINVAL;
- }
- if (total_comps < (layout->group_width * layout->mirrors_p1)) {
- ORE_ERR("Data Map wrong, "
- "numdevs=%d < group_width=%d * mirrors=%d\n",
- total_comps, layout->group_width,
- layout->mirrors_p1);
- return -EINVAL;
- }
- layout->group_count = total_comps / layout->mirrors_p1 /
- layout->group_width;
- } else {
- if (layout->group_depth) {
- printk(KERN_NOTICE "Warning: group_depth ignored "
- "group_width == 0 && group_depth == %lld\n",
- _LLU(layout->group_depth));
- }
- layout->group_width = total_comps / layout->mirrors_p1;
- layout->group_depth = -1;
- layout->group_count = 1;
- }
-
- stripe_length = (u64)layout->group_width * layout->stripe_unit;
- if (stripe_length >= (1ULL << 32)) {
- ORE_ERR("Stripe_length(0x%llx) >= 32bit is not supported\n",
- _LLU(stripe_length));
- return -EINVAL;
- }
-
- layout->max_io_length =
- (BIO_MAX_PAGES_KMALLOC * PAGE_SIZE - layout->stripe_unit) *
- (layout->group_width - layout->parity);
- if (layout->parity) {
- unsigned stripe_length =
- (layout->group_width - layout->parity) *
- layout->stripe_unit;
-
- layout->max_io_length /= stripe_length;
- layout->max_io_length *= stripe_length;
- }
- ORE_DBGMSG("max_io_length=0x%lx\n", layout->max_io_length);
-
- return 0;
-}
-EXPORT_SYMBOL(ore_verify_layout);
-
-static u8 *_ios_cred(struct ore_io_state *ios, unsigned index)
-{
- return ios->oc->comps[index & ios->oc->single_comp].cred;
-}
-
-static struct osd_obj_id *_ios_obj(struct ore_io_state *ios, unsigned index)
-{
- return &ios->oc->comps[index & ios->oc->single_comp].obj;
-}
-
-static struct osd_dev *_ios_od(struct ore_io_state *ios, unsigned index)
-{
- ORE_DBGMSG2("oc->first_dev=%d oc->numdevs=%d i=%d oc->ods=%p\n",
- ios->oc->first_dev, ios->oc->numdevs, index,
- ios->oc->ods);
-
- return ore_comp_dev(ios->oc, index);
-}
-
-int _ore_get_io_state(struct ore_layout *layout,
- struct ore_components *oc, unsigned numdevs,
- unsigned sgs_per_dev, unsigned num_par_pages,
- struct ore_io_state **pios)
-{
- struct ore_io_state *ios;
- size_t size_ios, size_extra, size_total;
- void *ios_extra;
-
- /*
- * The desired layout looks like this, with the extra_allocation
- * items pointed at from fields within ios or per_dev:
-
- struct __alloc_all_io_state {
- struct ore_io_state ios;
- struct ore_per_dev_state per_dev[numdevs];
- union {
- struct osd_sg_entry sglist[sgs_per_dev * numdevs];
- struct page *pages[num_par_pages];
- } extra_allocation;
- } whole_allocation;
-
- */
-
- /* This should never happen, so abort early if it ever does. */
- if (sgs_per_dev && num_par_pages) {
- ORE_DBGMSG("Tried to use both pages and sglist\n");
- *pios = NULL;
- return -EINVAL;
- }
-
- if (numdevs > (INT_MAX - sizeof(*ios)) /
- sizeof(struct ore_per_dev_state))
- return -ENOMEM;
- size_ios = sizeof(*ios) + sizeof(struct ore_per_dev_state) * numdevs;
-
- if (sgs_per_dev * numdevs > INT_MAX / sizeof(struct osd_sg_entry))
- return -ENOMEM;
- if (num_par_pages > INT_MAX / sizeof(struct page *))
- return -ENOMEM;
- size_extra = max(sizeof(struct osd_sg_entry) * (sgs_per_dev * numdevs),
- sizeof(struct page *) * num_par_pages);
-
- size_total = size_ios + size_extra;
-
- if (likely(size_total <= PAGE_SIZE)) {
- ios = kzalloc(size_total, GFP_KERNEL);
- if (unlikely(!ios)) {
- ORE_DBGMSG("Failed kzalloc bytes=%zd\n", size_total);
- *pios = NULL;
- return -ENOMEM;
- }
- ios_extra = (char *)ios + size_ios;
- } else {
- ios = kzalloc(size_ios, GFP_KERNEL);
- if (unlikely(!ios)) {
- ORE_DBGMSG("Failed alloc first part bytes=%zd\n",
- size_ios);
- *pios = NULL;
- return -ENOMEM;
- }
- ios_extra = kzalloc(size_extra, GFP_KERNEL);
- if (unlikely(!ios_extra)) {
- ORE_DBGMSG("Failed alloc second part bytes=%zd\n",
- size_extra);
- kfree(ios);
- *pios = NULL;
- return -ENOMEM;
- }
-
- /* In this case the per_dev[0].sgilist holds the pointer to
- * be freed
- */
- ios->extra_part_alloc = true;
- }
-
- if (num_par_pages) {
- ios->parity_pages = ios_extra;
- ios->max_par_pages = num_par_pages;
- }
- if (sgs_per_dev) {
- struct osd_sg_entry *sgilist = ios_extra;
- unsigned d;
-
- for (d = 0; d < numdevs; ++d) {
- ios->per_dev[d].sglist = sgilist;
- sgilist += sgs_per_dev;
- }
- ios->sgs_per_dev = sgs_per_dev;
- }
-
- ios->layout = layout;
- ios->oc = oc;
- *pios = ios;
- return 0;
-}
-
-/* Allocate an io_state for only a single group of devices
- *
- * If a user needs to call ore_read/write() this version must be used becase it
- * allocates extra stuff for striping and raid.
- * The ore might decide to only IO less then @length bytes do to alignmets
- * and constrains as follows:
- * - The IO cannot cross group boundary.
- * - In raid5/6 The end of the IO must align at end of a stripe eg.
- * (@offset + @length) % strip_size == 0. Or the complete range is within a
- * single stripe.
- * - Memory condition only permitted a shorter IO. (A user can use @length=~0
- * And check the returned ios->length for max_io_size.)
- *
- * The caller must check returned ios->length (and/or ios->nr_pages) and
- * re-issue these pages that fall outside of ios->length
- */
-int ore_get_rw_state(struct ore_layout *layout, struct ore_components *oc,
- bool is_reading, u64 offset, u64 length,
- struct ore_io_state **pios)
-{
- struct ore_io_state *ios;
- unsigned numdevs = layout->group_width * layout->mirrors_p1;
- unsigned sgs_per_dev = 0, max_par_pages = 0;
- int ret;
-
- if (layout->parity && length) {
- unsigned data_devs = layout->group_width - layout->parity;
- unsigned stripe_size = layout->stripe_unit * data_devs;
- unsigned pages_in_unit = layout->stripe_unit / PAGE_SIZE;
- u32 remainder;
- u64 num_stripes;
- u64 num_raid_units;
-
- num_stripes = div_u64_rem(length, stripe_size, &remainder);
- if (remainder)
- ++num_stripes;
-
- num_raid_units = num_stripes * layout->parity;
-
- if (is_reading) {
- /* For reads add per_dev sglist array */
- /* TODO: Raid 6 we need twice more. Actually:
- * num_stripes / LCMdP(W,P);
- * if (W%P != 0) num_stripes *= parity;
- */
-
- /* first/last seg is split */
- num_raid_units += layout->group_width;
- sgs_per_dev = div_u64(num_raid_units, data_devs) + 2;
- } else {
- /* For Writes add parity pages array. */
- max_par_pages = num_raid_units * pages_in_unit *
- sizeof(struct page *);
- }
- }
-
- ret = _ore_get_io_state(layout, oc, numdevs, sgs_per_dev, max_par_pages,
- pios);
- if (unlikely(ret))
- return ret;
-
- ios = *pios;
- ios->reading = is_reading;
- ios->offset = offset;
-
- if (length) {
- ore_calc_stripe_info(layout, offset, length, &ios->si);
- ios->length = ios->si.length;
- ios->nr_pages = ((ios->offset & (PAGE_SIZE - 1)) +
- ios->length + PAGE_SIZE - 1) / PAGE_SIZE;
- if (layout->parity)
- _ore_post_alloc_raid_stuff(ios);
- }
-
- return 0;
-}
-EXPORT_SYMBOL(ore_get_rw_state);
-
-/* Allocate an io_state for all the devices in the comps array
- *
- * This version of io_state allocation is used mostly by create/remove
- * and trunc where we currently need all the devices. The only wastful
- * bit is the read/write_attributes with no IO. Those sites should
- * be converted to use ore_get_rw_state() with length=0
- */
-int ore_get_io_state(struct ore_layout *layout, struct ore_components *oc,
- struct ore_io_state **pios)
-{
- return _ore_get_io_state(layout, oc, oc->numdevs, 0, 0, pios);
-}
-EXPORT_SYMBOL(ore_get_io_state);
-
-void ore_put_io_state(struct ore_io_state *ios)
-{
- if (ios) {
- unsigned i;
-
- for (i = 0; i < ios->numdevs; i++) {
- struct ore_per_dev_state *per_dev = &ios->per_dev[i];
-
- if (per_dev->or)
- osd_end_request(per_dev->or);
- if (per_dev->bio)
- bio_put(per_dev->bio);
- }
-
- _ore_free_raid_stuff(ios);
- kfree(ios);
- }
-}
-EXPORT_SYMBOL(ore_put_io_state);
-
-static void _sync_done(struct ore_io_state *ios, void *p)
-{
- struct completion *waiting = p;
-
- complete(waiting);
-}
-
-static void _last_io(struct kref *kref)
-{
- struct ore_io_state *ios = container_of(
- kref, struct ore_io_state, kref);
-
- ios->done(ios, ios->private);
-}
-
-static void _done_io(struct osd_request *or, void *p)
-{
- struct ore_io_state *ios = p;
-
- kref_put(&ios->kref, _last_io);
-}
-
-int ore_io_execute(struct ore_io_state *ios)
-{
- DECLARE_COMPLETION_ONSTACK(wait);
- bool sync = (ios->done == NULL);
- int i, ret;
-
- if (sync) {
- ios->done = _sync_done;
- ios->private = &wait;
- }
-
- for (i = 0; i < ios->numdevs; i++) {
- struct osd_request *or = ios->per_dev[i].or;
- if (unlikely(!or))
- continue;
-
- ret = osd_finalize_request(or, 0, _ios_cred(ios, i), NULL);
- if (unlikely(ret)) {
- ORE_DBGMSG("Failed to osd_finalize_request() => %d\n",
- ret);
- return ret;
- }
- }
-
- kref_init(&ios->kref);
-
- for (i = 0; i < ios->numdevs; i++) {
- struct osd_request *or = ios->per_dev[i].or;
- if (unlikely(!or))
- continue;
-
- kref_get(&ios->kref);
- osd_execute_request_async(or, _done_io, ios);
- }
-
- kref_put(&ios->kref, _last_io);
- ret = 0;
-
- if (sync) {
- wait_for_completion(&wait);
- ret = ore_check_io(ios, NULL);
- }
- return ret;
-}
-
-static void _clear_bio(struct bio *bio)
-{
- struct bio_vec *bv;
- unsigned i;
- struct bvec_iter_all iter_all;
-
- bio_for_each_segment_all(bv, bio, i, iter_all) {
- unsigned this_count = bv->bv_len;
-
- if (likely(PAGE_SIZE == this_count))
- clear_highpage(bv->bv_page);
- else
- zero_user(bv->bv_page, bv->bv_offset, this_count);
- }
-}
-
-int ore_check_io(struct ore_io_state *ios, ore_on_dev_error on_dev_error)
-{
- enum osd_err_priority acumulated_osd_err = 0;
- int acumulated_lin_err = 0;
- int i;
-
- for (i = 0; i < ios->numdevs; i++) {
- struct osd_sense_info osi;
- struct ore_per_dev_state *per_dev = &ios->per_dev[i];
- struct osd_request *or = per_dev->or;
- int ret;
-
- if (unlikely(!or))
- continue;
-
- ret = osd_req_decode_sense(or, &osi);
- if (likely(!ret))
- continue;
-
- if ((OSD_ERR_PRI_CLEAR_PAGES == osi.osd_err_pri) &&
- per_dev->bio) {
- /* start read offset passed endof file.
- * Note: if we do not have bio it means read-attributes
- * In this case we should return error to caller.
- */
- _clear_bio(per_dev->bio);
- ORE_DBGMSG("start read offset passed end of file "
- "offset=0x%llx, length=0x%llx\n",
- _LLU(per_dev->offset),
- _LLU(per_dev->length));
-
- continue; /* we recovered */
- }
-
- if (on_dev_error) {
- u64 residual = ios->reading ?
- or->in.residual : or->out.residual;
- u64 offset = (ios->offset + ios->length) - residual;
- unsigned dev = per_dev->dev - ios->oc->first_dev;
- struct ore_dev *od = ios->oc->ods[dev];
-
- on_dev_error(ios, od, dev, osi.osd_err_pri,
- offset, residual);
- }
- if (osi.osd_err_pri >= acumulated_osd_err) {
- acumulated_osd_err = osi.osd_err_pri;
- acumulated_lin_err = ret;
- }
- }
-
- return acumulated_lin_err;
-}
-EXPORT_SYMBOL(ore_check_io);
-
-/*
- * L - logical offset into the file
- *
- * D - number of Data devices
- * D = group_width - parity
- *
- * U - The number of bytes in a stripe within a group
- * U = stripe_unit * D
- *
- * T - The number of bytes striped within a group of component objects
- * (before advancing to the next group)
- * T = U * group_depth
- *
- * S - The number of bytes striped across all component objects
- * before the pattern repeats
- * S = T * group_count
- *
- * M - The "major" (i.e., across all components) cycle number
- * M = L / S
- *
- * G - Counts the groups from the beginning of the major cycle
- * G = (L - (M * S)) / T [or (L % S) / T]
- *
- * H - The byte offset within the group
- * H = (L - (M * S)) % T [or (L % S) % T]
- *
- * N - The "minor" (i.e., across the group) stripe number
- * N = H / U
- *
- * C - The component index coresponding to L
- *
- * C = (H - (N * U)) / stripe_unit + G * D
- * [or (L % U) / stripe_unit + G * D]
- *
- * O - The component offset coresponding to L
- * O = L % stripe_unit + N * stripe_unit + M * group_depth * stripe_unit
- *
- * LCMdP – Parity cycle: Lowest Common Multiple of group_width, parity
- * divide by parity
- * LCMdP = lcm(group_width, parity) / parity
- *
- * R - The parity Rotation stripe
- * (Note parity cycle always starts at a group's boundary)
- * R = N % LCMdP
- *
- * I = the first parity device index
- * I = (group_width + group_width - R*parity - parity) % group_width
- *
- * Craid - The component index Rotated
- * Craid = (group_width + C - R*parity) % group_width
- * (We add the group_width to avoid negative numbers modulo math)
- */
-void ore_calc_stripe_info(struct ore_layout *layout, u64 file_offset,
- u64 length, struct ore_striping_info *si)
-{
- u32 stripe_unit = layout->stripe_unit;
- u32 group_width = layout->group_width;
- u64 group_depth = layout->group_depth;
- u32 parity = layout->parity;
-
- u32 D = group_width - parity;
- u32 U = D * stripe_unit;
- u64 T = U * group_depth;
- u64 S = T * layout->group_count;
- u64 M = div64_u64(file_offset, S);
-
- /*
- G = (L - (M * S)) / T
- H = (L - (M * S)) % T
- */
- u64 LmodS = file_offset - M * S;
- u32 G = div64_u64(LmodS, T);
- u64 H = LmodS - G * T;
-
- u32 N = div_u64(H, U);
- u32 Nlast;
-
- /* "H - (N * U)" is just "H % U" so it's bound to u32 */
- u32 C = (u32)(H - (N * U)) / stripe_unit + G * group_width;
- u32 first_dev = C - C % group_width;
-
- div_u64_rem(file_offset, stripe_unit, &si->unit_off);
-
- si->obj_offset = si->unit_off + (N * stripe_unit) +
- (M * group_depth * stripe_unit);
- si->cur_comp = C - first_dev;
- si->cur_pg = si->unit_off / PAGE_SIZE;
-
- if (parity) {
- u32 LCMdP = lcm(group_width, parity) / parity;
- /* R = N % LCMdP; */
- u32 RxP = (N % LCMdP) * parity;
-
- si->par_dev = (group_width + group_width - parity - RxP) %
- group_width + first_dev;
- si->dev = (group_width + group_width + C - RxP) %
- group_width + first_dev;
- si->bytes_in_stripe = U;
- si->first_stripe_start = M * S + G * T + N * U;
- } else {
- /* Make the math correct see _prepare_one_group */
- si->par_dev = group_width;
- si->dev = C;
- }
-
- si->dev *= layout->mirrors_p1;
- si->par_dev *= layout->mirrors_p1;
- si->offset = file_offset;
- si->length = T - H;
- if (si->length > length)
- si->length = length;
-
- Nlast = div_u64(H + si->length + U - 1, U);
- si->maxdevUnits = Nlast - N;
-
- si->M = M;
-}
-EXPORT_SYMBOL(ore_calc_stripe_info);
-
-int _ore_add_stripe_unit(struct ore_io_state *ios, unsigned *cur_pg,
- unsigned pgbase, struct page **pages,
- struct ore_per_dev_state *per_dev, int cur_len)
-{
- unsigned pg = *cur_pg;
- struct request_queue *q =
- osd_request_queue(_ios_od(ios, per_dev->dev));
- unsigned len = cur_len;
- int ret;
-
- if (per_dev->bio == NULL) {
- unsigned bio_size;
-
- if (!ios->reading) {
- bio_size = ios->si.maxdevUnits;
- } else {
- bio_size = (ios->si.maxdevUnits + 1) *
- (ios->layout->group_width - ios->layout->parity) /
- ios->layout->group_width;
- }
- bio_size *= (ios->layout->stripe_unit / PAGE_SIZE);
-
- per_dev->bio = bio_kmalloc(GFP_KERNEL, bio_size);
- if (unlikely(!per_dev->bio)) {
- ORE_DBGMSG("Failed to allocate BIO size=%u\n",
- bio_size);
- ret = -ENOMEM;
- goto out;
- }
- }
-
- while (cur_len > 0) {
- unsigned pglen = min_t(unsigned, PAGE_SIZE - pgbase, cur_len);
- unsigned added_len;
-
- cur_len -= pglen;
-
- added_len = bio_add_pc_page(q, per_dev->bio, pages[pg],
- pglen, pgbase);
- if (unlikely(pglen != added_len)) {
- /* If bi_vcnt == bi_max then this is a SW BUG */
- ORE_DBGMSG("Failed bio_add_pc_page bi_vcnt=0x%x "
- "bi_max=0x%x BIO_MAX=0x%x cur_len=0x%x\n",
- per_dev->bio->bi_vcnt,
- per_dev->bio->bi_max_vecs,
- BIO_MAX_PAGES_KMALLOC, cur_len);
- ret = -ENOMEM;
- goto out;
- }
- _add_stripe_page(ios->sp2d, &ios->si, pages[pg]);
-
- pgbase = 0;
- ++pg;
- }
- BUG_ON(cur_len);
-
- per_dev->length += len;
- *cur_pg = pg;
- ret = 0;
-out: /* we fail the complete unit on an error eg don't advance
- * per_dev->length and cur_pg. This means that we might have a bigger
- * bio than the CDB requested length (per_dev->length). That's fine
- * only the oposite is fatal.
- */
- return ret;
-}
-
-static int _add_parity_units(struct ore_io_state *ios,
- struct ore_striping_info *si,
- unsigned dev, unsigned first_dev,
- unsigned mirrors_p1, unsigned devs_in_group,
- unsigned cur_len)
-{
- unsigned do_parity;
- int ret = 0;
-
- for (do_parity = ios->layout->parity; do_parity; --do_parity) {
- struct ore_per_dev_state *per_dev;
-
- per_dev = &ios->per_dev[dev - first_dev];
- if (!per_dev->length && !per_dev->offset) {
- /* Only/always the parity unit of the first
- * stripe will be empty. So this is a chance to
- * initialize the per_dev info.
- */
- per_dev->dev = dev;
- per_dev->offset = si->obj_offset - si->unit_off;
- }
-
- ret = _ore_add_parity_unit(ios, si, per_dev, cur_len,
- do_parity == 1);
- if (unlikely(ret))
- break;
-
- if (do_parity != 1) {
- dev = ((dev + mirrors_p1) % devs_in_group) + first_dev;
- si->cur_comp = (si->cur_comp + 1) %
- ios->layout->group_width;
- }
- }
-
- return ret;
-}
-
-static int _prepare_for_striping(struct ore_io_state *ios)
-{
- struct ore_striping_info *si = &ios->si;
- unsigned stripe_unit = ios->layout->stripe_unit;
- unsigned mirrors_p1 = ios->layout->mirrors_p1;
- unsigned group_width = ios->layout->group_width;
- unsigned devs_in_group = group_width * mirrors_p1;
- unsigned dev = si->dev;
- unsigned first_dev = dev - (dev % devs_in_group);
- unsigned cur_pg = ios->pages_consumed;
- u64 length = ios->length;
- int ret = 0;
-
- if (!ios->pages) {
- ios->numdevs = ios->layout->mirrors_p1;
- return 0;
- }
-
- BUG_ON(length > si->length);
-
- while (length) {
- struct ore_per_dev_state *per_dev =
- &ios->per_dev[dev - first_dev];
- unsigned cur_len, page_off = 0;
-
- if (!per_dev->length && !per_dev->offset) {
- /* First time initialize the per_dev info. */
- per_dev->dev = dev;
- if (dev == si->dev) {
- WARN_ON(dev == si->par_dev);
- per_dev->offset = si->obj_offset;
- cur_len = stripe_unit - si->unit_off;
- page_off = si->unit_off & ~PAGE_MASK;
- BUG_ON(page_off && (page_off != ios->pgbase));
- } else {
- per_dev->offset = si->obj_offset - si->unit_off;
- cur_len = stripe_unit;
- }
- } else {
- cur_len = stripe_unit;
- }
- if (cur_len >= length)
- cur_len = length;
-
- ret = _ore_add_stripe_unit(ios, &cur_pg, page_off, ios->pages,
- per_dev, cur_len);
- if (unlikely(ret))
- goto out;
-
- length -= cur_len;
-
- dev = ((dev + mirrors_p1) % devs_in_group) + first_dev;
- si->cur_comp = (si->cur_comp + 1) % group_width;
- if (unlikely((dev == si->par_dev) || (!length && ios->sp2d))) {
- if (!length && ios->sp2d) {
- /* If we are writing and this is the very last
- * stripe. then operate on parity dev.
- */
- dev = si->par_dev;
- /* If last stripe operate on parity comp */
- si->cur_comp = group_width - ios->layout->parity;
- }
-
- /* In writes cur_len just means if it's the
- * last one. See _ore_add_parity_unit.
- */
- ret = _add_parity_units(ios, si, dev, first_dev,
- mirrors_p1, devs_in_group,
- ios->sp2d ? length : cur_len);
- if (unlikely(ret))
- goto out;
-
- /* Rotate next par_dev backwards with wraping */
- si->par_dev = (devs_in_group + si->par_dev -
- ios->layout->parity * mirrors_p1) %
- devs_in_group + first_dev;
- /* Next stripe, start fresh */
- si->cur_comp = 0;
- si->cur_pg = 0;
- si->obj_offset += cur_len;
- si->unit_off = 0;
- }
- }
-out:
- ios->numdevs = devs_in_group;
- ios->pages_consumed = cur_pg;
- return ret;
-}
-
-int ore_create(struct ore_io_state *ios)
-{
- int i, ret;
-
- for (i = 0; i < ios->oc->numdevs; i++) {
- struct osd_request *or;
-
- or = osd_start_request(_ios_od(ios, i));
- if (unlikely(!or)) {
- ORE_ERR("%s: osd_start_request failed\n", __func__);
- ret = -ENOMEM;
- goto out;
- }
- ios->per_dev[i].or = or;
- ios->numdevs++;
-
- osd_req_create_object(or, _ios_obj(ios, i));
- }
- ret = ore_io_execute(ios);
-
-out:
- return ret;
-}
-EXPORT_SYMBOL(ore_create);
-
-int ore_remove(struct ore_io_state *ios)
-{
- int i, ret;
-
- for (i = 0; i < ios->oc->numdevs; i++) {
- struct osd_request *or;
-
- or = osd_start_request(_ios_od(ios, i));
- if (unlikely(!or)) {
- ORE_ERR("%s: osd_start_request failed\n", __func__);
- ret = -ENOMEM;
- goto out;
- }
- ios->per_dev[i].or = or;
- ios->numdevs++;
-
- osd_req_remove_object(or, _ios_obj(ios, i));
- }
- ret = ore_io_execute(ios);
-
-out:
- return ret;
-}
-EXPORT_SYMBOL(ore_remove);
-
-static int _write_mirror(struct ore_io_state *ios, int cur_comp)
-{
- struct ore_per_dev_state *master_dev = &ios->per_dev[cur_comp];
- unsigned dev = ios->per_dev[cur_comp].dev;
- unsigned last_comp = cur_comp + ios->layout->mirrors_p1;
- int ret = 0;
-
- if (ios->pages && !master_dev->length)
- return 0; /* Just an empty slot */
-
- for (; cur_comp < last_comp; ++cur_comp, ++dev) {
- struct ore_per_dev_state *per_dev = &ios->per_dev[cur_comp];
- struct osd_request *or;
-
- or = osd_start_request(_ios_od(ios, dev));
- if (unlikely(!or)) {
- ORE_ERR("%s: osd_start_request failed\n", __func__);
- ret = -ENOMEM;
- goto out;
- }
- per_dev->or = or;
-
- if (ios->pages) {
- struct bio *bio;
-
- if (per_dev != master_dev) {
- bio = bio_clone_fast(master_dev->bio,
- GFP_KERNEL, NULL);
- if (unlikely(!bio)) {
- ORE_DBGMSG(
- "Failed to allocate BIO size=%u\n",
- master_dev->bio->bi_max_vecs);
- ret = -ENOMEM;
- goto out;
- }
-
- bio->bi_disk = NULL;
- bio->bi_next = NULL;
- per_dev->offset = master_dev->offset;
- per_dev->length = master_dev->length;
- per_dev->bio = bio;
- per_dev->dev = dev;
- } else {
- bio = master_dev->bio;
- /* FIXME: bio_set_dir() */
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
- }
-
- osd_req_write(or, _ios_obj(ios, cur_comp),
- per_dev->offset, bio, per_dev->length);
- ORE_DBGMSG("write(0x%llx) offset=0x%llx "
- "length=0x%llx dev=%d\n",
- _LLU(_ios_obj(ios, cur_comp)->id),
- _LLU(per_dev->offset),
- _LLU(per_dev->length), dev);
- } else if (ios->kern_buff) {
- per_dev->offset = ios->si.obj_offset;
- per_dev->dev = ios->si.dev + dev;
-
- /* no cross device without page array */
- BUG_ON((ios->layout->group_width > 1) &&
- (ios->si.unit_off + ios->length >
- ios->layout->stripe_unit));
-
- ret = osd_req_write_kern(or, _ios_obj(ios, cur_comp),
- per_dev->offset,
- ios->kern_buff, ios->length);
- if (unlikely(ret))
- goto out;
- ORE_DBGMSG2("write_kern(0x%llx) offset=0x%llx "
- "length=0x%llx dev=%d\n",
- _LLU(_ios_obj(ios, cur_comp)->id),
- _LLU(per_dev->offset),
- _LLU(ios->length), per_dev->dev);
- } else {
- osd_req_set_attributes(or, _ios_obj(ios, cur_comp));
- ORE_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n",
- _LLU(_ios_obj(ios, cur_comp)->id),
- ios->out_attr_len, dev);
- }
-
- if (ios->out_attr)
- osd_req_add_set_attr_list(or, ios->out_attr,
- ios->out_attr_len);
-
- if (ios->in_attr)
- osd_req_add_get_attr_list(or, ios->in_attr,
- ios->in_attr_len);
- }
-
-out:
- return ret;
-}
-
-int ore_write(struct ore_io_state *ios)
-{
- int i;
- int ret;
-
- if (unlikely(ios->sp2d && !ios->r4w)) {
- /* A library is attempting a RAID-write without providing
- * a pages lock interface.
- */
- WARN_ON_ONCE(1);
- return -ENOTSUPP;
- }
-
- ret = _prepare_for_striping(ios);
- if (unlikely(ret))
- return ret;
-
- for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) {
- ret = _write_mirror(ios, i);
- if (unlikely(ret))
- return ret;
- }
-
- ret = ore_io_execute(ios);
- return ret;
-}
-EXPORT_SYMBOL(ore_write);
-
-int _ore_read_mirror(struct ore_io_state *ios, unsigned cur_comp)
-{
- struct osd_request *or;
- struct ore_per_dev_state *per_dev = &ios->per_dev[cur_comp];
- struct osd_obj_id *obj = _ios_obj(ios, cur_comp);
- unsigned first_dev = (unsigned)obj->id;
-
- if (ios->pages && !per_dev->length)
- return 0; /* Just an empty slot */
-
- first_dev = per_dev->dev + first_dev % ios->layout->mirrors_p1;
- or = osd_start_request(_ios_od(ios, first_dev));
- if (unlikely(!or)) {
- ORE_ERR("%s: osd_start_request failed\n", __func__);
- return -ENOMEM;
- }
- per_dev->or = or;
-
- if (ios->pages) {
- if (per_dev->cur_sg) {
- /* finalize the last sg_entry */
- _ore_add_sg_seg(per_dev, 0, false);
- if (unlikely(!per_dev->cur_sg))
- return 0; /* Skip parity only device */
-
- osd_req_read_sg(or, obj, per_dev->bio,
- per_dev->sglist, per_dev->cur_sg);
- } else {
- /* The no raid case */
- osd_req_read(or, obj, per_dev->offset,
- per_dev->bio, per_dev->length);
- }
-
- ORE_DBGMSG("read(0x%llx) offset=0x%llx length=0x%llx"
- " dev=%d sg_len=%d\n", _LLU(obj->id),
- _LLU(per_dev->offset), _LLU(per_dev->length),
- first_dev, per_dev->cur_sg);
- } else {
- BUG_ON(ios->kern_buff);
-
- osd_req_get_attributes(or, obj);
- ORE_DBGMSG2("obj(0x%llx) get_attributes=%d dev=%d\n",
- _LLU(obj->id),
- ios->in_attr_len, first_dev);
- }
- if (ios->out_attr)
- osd_req_add_set_attr_list(or, ios->out_attr, ios->out_attr_len);
-
- if (ios->in_attr)
- osd_req_add_get_attr_list(or, ios->in_attr, ios->in_attr_len);
-
- return 0;
-}
-
-int ore_read(struct ore_io_state *ios)
-{
- int i;
- int ret;
-
- ret = _prepare_for_striping(ios);
- if (unlikely(ret))
- return ret;
-
- for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) {
- ret = _ore_read_mirror(ios, i);
- if (unlikely(ret))
- return ret;
- }
-
- ret = ore_io_execute(ios);
- return ret;
-}
-EXPORT_SYMBOL(ore_read);
-
-int extract_attr_from_ios(struct ore_io_state *ios, struct osd_attr *attr)
-{
- struct osd_attr cur_attr = {.attr_page = 0}; /* start with zeros */
- void *iter = NULL;
- int nelem;
-
- do {
- nelem = 1;
- osd_req_decode_get_attr_list(ios->per_dev[0].or,
- &cur_attr, &nelem, &iter);
- if ((cur_attr.attr_page == attr->attr_page) &&
- (cur_attr.attr_id == attr->attr_id)) {
- attr->len = cur_attr.len;
- attr->val_ptr = cur_attr.val_ptr;
- return 0;
- }
- } while (iter);
-
- return -EIO;
-}
-EXPORT_SYMBOL(extract_attr_from_ios);
-
-static int _truncate_mirrors(struct ore_io_state *ios, unsigned cur_comp,
- struct osd_attr *attr)
-{
- int last_comp = cur_comp + ios->layout->mirrors_p1;
-
- for (; cur_comp < last_comp; ++cur_comp) {
- struct ore_per_dev_state *per_dev = &ios->per_dev[cur_comp];
- struct osd_request *or;
-
- or = osd_start_request(_ios_od(ios, cur_comp));
- if (unlikely(!or)) {
- ORE_ERR("%s: osd_start_request failed\n", __func__);
- return -ENOMEM;
- }
- per_dev->or = or;
-
- osd_req_set_attributes(or, _ios_obj(ios, cur_comp));
- osd_req_add_set_attr_list(or, attr, 1);
- }
-
- return 0;
-}
-
-struct _trunc_info {
- struct ore_striping_info si;
- u64 prev_group_obj_off;
- u64 next_group_obj_off;
-
- unsigned first_group_dev;
- unsigned nex_group_dev;
-};
-
-static void _calc_trunk_info(struct ore_layout *layout, u64 file_offset,
- struct _trunc_info *ti)
-{
- unsigned stripe_unit = layout->stripe_unit;
-
- ore_calc_stripe_info(layout, file_offset, 0, &ti->si);
-
- ti->prev_group_obj_off = ti->si.M * stripe_unit;
- ti->next_group_obj_off = ti->si.M ? (ti->si.M - 1) * stripe_unit : 0;
-
- ti->first_group_dev = ti->si.dev - (ti->si.dev % layout->group_width);
- ti->nex_group_dev = ti->first_group_dev + layout->group_width;
-}
-
-int ore_truncate(struct ore_layout *layout, struct ore_components *oc,
- u64 size)
-{
- struct ore_io_state *ios;
- struct exofs_trunc_attr {
- struct osd_attr attr;
- __be64 newsize;
- } *size_attrs;
- struct _trunc_info ti;
- int i, ret;
-
- ret = ore_get_io_state(layout, oc, &ios);
- if (unlikely(ret))
- return ret;
-
- _calc_trunk_info(ios->layout, size, &ti);
-
- size_attrs = kcalloc(ios->oc->numdevs, sizeof(*size_attrs),
- GFP_KERNEL);
- if (unlikely(!size_attrs)) {
- ret = -ENOMEM;
- goto out;
- }
-
- ios->numdevs = ios->oc->numdevs;
-
- for (i = 0; i < ios->numdevs; ++i) {
- struct exofs_trunc_attr *size_attr = &size_attrs[i];
- u64 obj_size;
-
- if (i < ti.first_group_dev)
- obj_size = ti.prev_group_obj_off;
- else if (i >= ti.nex_group_dev)
- obj_size = ti.next_group_obj_off;
- else if (i < ti.si.dev) /* dev within this group */
- obj_size = ti.si.obj_offset +
- ios->layout->stripe_unit - ti.si.unit_off;
- else if (i == ti.si.dev)
- obj_size = ti.si.obj_offset;
- else /* i > ti.dev */
- obj_size = ti.si.obj_offset - ti.si.unit_off;
-
- size_attr->newsize = cpu_to_be64(obj_size);
- size_attr->attr = g_attr_logical_length;
- size_attr->attr.val_ptr = &size_attr->newsize;
-
- ORE_DBGMSG2("trunc(0x%llx) obj_offset=0x%llx dev=%d\n",
- _LLU(oc->comps->obj.id), _LLU(obj_size), i);
- ret = _truncate_mirrors(ios, i * ios->layout->mirrors_p1,
- &size_attr->attr);
- if (unlikely(ret))
- goto out;
- }
- ret = ore_io_execute(ios);
-
-out:
- kfree(size_attrs);
- ore_put_io_state(ios);
- return ret;
-}
-EXPORT_SYMBOL(ore_truncate);
-
-const struct osd_attr g_attr_logical_length = ATTR_DEF(
- OSD_APAGE_OBJECT_INFORMATION, OSD_ATTR_OI_LOGICAL_LENGTH, 8);
-EXPORT_SYMBOL(g_attr_logical_length);
diff --git a/fs/exofs/ore_raid.c b/fs/exofs/ore_raid.c
deleted file mode 100644
index e83bab54b03e..000000000000
--- a/fs/exofs/ore_raid.c
+++ /dev/null
@@ -1,757 +0,0 @@
-/*
- * Copyright (C) 2011
- * Boaz Harrosh <ooo@electrozaur.com>
- *
- * This file is part of the objects raid engine (ore).
- *
- * It is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * You should have received a copy of the GNU General Public License
- * along with "ore". If not, write to the Free Software Foundation, Inc:
- * "Free Software Foundation <info@fsf.org>"
- */
-
-#include <linux/gfp.h>
-#include <linux/async_tx.h>
-
-#include "ore_raid.h"
-
-#undef ORE_DBGMSG2
-#define ORE_DBGMSG2 ORE_DBGMSG
-
-static struct page *_raid_page_alloc(void)
-{
- return alloc_page(GFP_KERNEL);
-}
-
-static void _raid_page_free(struct page *p)
-{
- __free_page(p);
-}
-
-/* This struct is forward declare in ore_io_state, but is private to here.
- * It is put on ios->sp2d for RAID5/6 writes only. See _gen_xor_unit.
- *
- * __stripe_pages_2d is a 2d array of pages, and it is also a corner turn.
- * Ascending page index access is sp2d(p-minor, c-major). But storage is
- * sp2d[p-minor][c-major], so it can be properlly presented to the async-xor
- * API.
- */
-struct __stripe_pages_2d {
- /* Cache some hot path repeated calculations */
- unsigned parity;
- unsigned data_devs;
- unsigned pages_in_unit;
-
- bool needed ;
-
- /* Array size is pages_in_unit (layout->stripe_unit / PAGE_SIZE) */
- struct __1_page_stripe {
- bool alloc;
- unsigned write_count;
- struct async_submit_ctl submit;
- struct dma_async_tx_descriptor *tx;
-
- /* The size of this array is data_devs + parity */
- struct page **pages;
- struct page **scribble;
- /* bool array, size of this array is data_devs */
- char *page_is_read;
- } _1p_stripes[];
-};
-
-/* This can get bigger then a page. So support multiple page allocations
- * _sp2d_free should be called even if _sp2d_alloc fails (by returning
- * none-zero).
- */
-static int _sp2d_alloc(unsigned pages_in_unit, unsigned group_width,
- unsigned parity, struct __stripe_pages_2d **psp2d)
-{
- struct __stripe_pages_2d *sp2d;
- unsigned data_devs = group_width - parity;
-
- /*
- * Desired allocation layout is, though when larger than PAGE_SIZE,
- * each struct __alloc_1p_arrays is separately allocated:
-
- struct _alloc_all_bytes {
- struct __alloc_stripe_pages_2d {
- struct __stripe_pages_2d sp2d;
- struct __1_page_stripe _1p_stripes[pages_in_unit];
- } __asp2d;
- struct __alloc_1p_arrays {
- struct page *pages[group_width];
- struct page *scribble[group_width];
- char page_is_read[data_devs];
- } __a1pa[pages_in_unit];
- } *_aab;
-
- struct __alloc_1p_arrays *__a1pa;
- struct __alloc_1p_arrays *__a1pa_end;
-
- */
-
- char *__a1pa;
- char *__a1pa_end;
-
- const size_t sizeof_stripe_pages_2d =
- sizeof(struct __stripe_pages_2d) +
- sizeof(struct __1_page_stripe) * pages_in_unit;
- const size_t sizeof__a1pa =
- ALIGN(sizeof(struct page *) * (2 * group_width) + data_devs,
- sizeof(void *));
- const size_t sizeof__a1pa_arrays = sizeof__a1pa * pages_in_unit;
- const size_t alloc_total = sizeof_stripe_pages_2d +
- sizeof__a1pa_arrays;
-
- unsigned num_a1pa, alloc_size, i;
-
- /* FIXME: check these numbers in ore_verify_layout */
- BUG_ON(sizeof_stripe_pages_2d > PAGE_SIZE);
- BUG_ON(sizeof__a1pa > PAGE_SIZE);
-
- /*
- * If alloc_total would be larger than PAGE_SIZE, only allocate
- * as many a1pa items as would fill the rest of the page, instead
- * of the full pages_in_unit count.
- */
- if (alloc_total > PAGE_SIZE) {
- num_a1pa = (PAGE_SIZE - sizeof_stripe_pages_2d) / sizeof__a1pa;
- alloc_size = sizeof_stripe_pages_2d + sizeof__a1pa * num_a1pa;
- } else {
- num_a1pa = pages_in_unit;
- alloc_size = alloc_total;
- }
-
- *psp2d = sp2d = kzalloc(alloc_size, GFP_KERNEL);
- if (unlikely(!sp2d)) {
- ORE_DBGMSG("!! Failed to alloc sp2d size=%d\n", alloc_size);
- return -ENOMEM;
- }
- /* From here Just call _sp2d_free */
-
- /* Find start of a1pa area. */
- __a1pa = (char *)sp2d + sizeof_stripe_pages_2d;
- /* Find end of the _allocated_ a1pa area. */
- __a1pa_end = __a1pa + alloc_size;
-
- /* Allocate additionally needed a1pa items in PAGE_SIZE chunks. */
- for (i = 0; i < pages_in_unit; ++i) {
- struct __1_page_stripe *stripe = &sp2d->_1p_stripes[i];
-
- if (unlikely(__a1pa >= __a1pa_end)) {
- num_a1pa = min_t(unsigned, PAGE_SIZE / sizeof__a1pa,
- pages_in_unit - i);
- alloc_size = sizeof__a1pa * num_a1pa;
-
- __a1pa = kzalloc(alloc_size, GFP_KERNEL);
- if (unlikely(!__a1pa)) {
- ORE_DBGMSG("!! Failed to _alloc_1p_arrays=%d\n",
- num_a1pa);
- return -ENOMEM;
- }
- __a1pa_end = __a1pa + alloc_size;
- /* First *pages is marked for kfree of the buffer */
- stripe->alloc = true;
- }
-
- /*
- * Attach all _lp_stripes pointers to the allocation for
- * it which was either part of the original PAGE_SIZE
- * allocation or the subsequent allocation in this loop.
- */
- stripe->pages = (void *)__a1pa;
- stripe->scribble = stripe->pages + group_width;
- stripe->page_is_read = (char *)stripe->scribble + group_width;
- __a1pa += sizeof__a1pa;
- }
-
- sp2d->parity = parity;
- sp2d->data_devs = data_devs;
- sp2d->pages_in_unit = pages_in_unit;
- return 0;
-}
-
-static void _sp2d_reset(struct __stripe_pages_2d *sp2d,
- const struct _ore_r4w_op *r4w, void *priv)
-{
- unsigned data_devs = sp2d->data_devs;
- unsigned group_width = data_devs + sp2d->parity;
- int p, c;
-
- if (!sp2d->needed)
- return;
-
- for (c = data_devs - 1; c >= 0; --c)
- for (p = sp2d->pages_in_unit - 1; p >= 0; --p) {
- struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
-
- if (_1ps->page_is_read[c]) {
- struct page *page = _1ps->pages[c];
-
- r4w->put_page(priv, page);
- _1ps->page_is_read[c] = false;
- }
- }
-
- for (p = 0; p < sp2d->pages_in_unit; p++) {
- struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
-
- memset(_1ps->pages, 0, group_width * sizeof(*_1ps->pages));
- _1ps->write_count = 0;
- _1ps->tx = NULL;
- }
-
- sp2d->needed = false;
-}
-
-static void _sp2d_free(struct __stripe_pages_2d *sp2d)
-{
- unsigned i;
-
- if (!sp2d)
- return;
-
- for (i = 0; i < sp2d->pages_in_unit; ++i) {
- if (sp2d->_1p_stripes[i].alloc)
- kfree(sp2d->_1p_stripes[i].pages);
- }
-
- kfree(sp2d);
-}
-
-static unsigned _sp2d_min_pg(struct __stripe_pages_2d *sp2d)
-{
- unsigned p;
-
- for (p = 0; p < sp2d->pages_in_unit; p++) {
- struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
-
- if (_1ps->write_count)
- return p;
- }
-
- return ~0;
-}
-
-static unsigned _sp2d_max_pg(struct __stripe_pages_2d *sp2d)
-{
- int p;
-
- for (p = sp2d->pages_in_unit - 1; p >= 0; --p) {
- struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
-
- if (_1ps->write_count)
- return p;
- }
-
- return ~0;
-}
-
-static void _gen_xor_unit(struct __stripe_pages_2d *sp2d)
-{
- unsigned p;
- unsigned tx_flags = ASYNC_TX_ACK;
-
- if (sp2d->parity == 1)
- tx_flags |= ASYNC_TX_XOR_ZERO_DST;
-
- for (p = 0; p < sp2d->pages_in_unit; p++) {
- struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
-
- if (!_1ps->write_count)
- continue;
-
- init_async_submit(&_1ps->submit, tx_flags,
- NULL, NULL, NULL, (addr_conv_t *)_1ps->scribble);
-
- if (sp2d->parity == 1)
- _1ps->tx = async_xor(_1ps->pages[sp2d->data_devs],
- _1ps->pages, 0, sp2d->data_devs,
- PAGE_SIZE, &_1ps->submit);
- else /* parity == 2 */
- _1ps->tx = async_gen_syndrome(_1ps->pages, 0,
- sp2d->data_devs + sp2d->parity,
- PAGE_SIZE, &_1ps->submit);
- }
-
- for (p = 0; p < sp2d->pages_in_unit; p++) {
- struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
- /* NOTE: We wait for HW synchronously (I don't have such HW
- * to test with.) Is parallelism needed with today's multi
- * cores?
- */
- async_tx_issue_pending(_1ps->tx);
- }
-}
-
-void _ore_add_stripe_page(struct __stripe_pages_2d *sp2d,
- struct ore_striping_info *si, struct page *page)
-{
- struct __1_page_stripe *_1ps;
-
- sp2d->needed = true;
-
- _1ps = &sp2d->_1p_stripes[si->cur_pg];
- _1ps->pages[si->cur_comp] = page;
- ++_1ps->write_count;
-
- si->cur_pg = (si->cur_pg + 1) % sp2d->pages_in_unit;
- /* si->cur_comp is advanced outside at main loop */
-}
-
-void _ore_add_sg_seg(struct ore_per_dev_state *per_dev, unsigned cur_len,
- bool not_last)
-{
- struct osd_sg_entry *sge;
-
- ORE_DBGMSG("dev=%d cur_len=0x%x not_last=%d cur_sg=%d "
- "offset=0x%llx length=0x%x last_sgs_total=0x%x\n",
- per_dev->dev, cur_len, not_last, per_dev->cur_sg,
- _LLU(per_dev->offset), per_dev->length,
- per_dev->last_sgs_total);
-
- if (!per_dev->cur_sg) {
- sge = per_dev->sglist;
-
- /* First time we prepare two entries */
- if (per_dev->length) {
- ++per_dev->cur_sg;
- sge->offset = per_dev->offset;
- sge->len = per_dev->length;
- } else {
- /* Here the parity is the first unit of this object.
- * This happens every time we reach a parity device on
- * the same stripe as the per_dev->offset. We need to
- * just skip this unit.
- */
- per_dev->offset += cur_len;
- return;
- }
- } else {
- /* finalize the last one */
- sge = &per_dev->sglist[per_dev->cur_sg - 1];
- sge->len = per_dev->length - per_dev->last_sgs_total;
- }
-
- if (not_last) {
- /* Partly prepare the next one */
- struct osd_sg_entry *next_sge = sge + 1;
-
- ++per_dev->cur_sg;
- next_sge->offset = sge->offset + sge->len + cur_len;
- /* Save cur len so we know how mutch was added next time */
- per_dev->last_sgs_total = per_dev->length;
- next_sge->len = 0;
- } else if (!sge->len) {
- /* Optimize for when the last unit is a parity */
- --per_dev->cur_sg;
- }
-}
-
-static int _alloc_read_4_write(struct ore_io_state *ios)
-{
- struct ore_layout *layout = ios->layout;
- int ret;
- /* We want to only read those pages not in cache so worst case
- * is a stripe populated with every other page
- */
- unsigned sgs_per_dev = ios->sp2d->pages_in_unit + 2;
-
- ret = _ore_get_io_state(layout, ios->oc,
- layout->group_width * layout->mirrors_p1,
- sgs_per_dev, 0, &ios->ios_read_4_write);
- return ret;
-}
-
-/* @si contains info of the to-be-inserted page. Update of @si should be
- * maintained by caller. Specificaly si->dev, si->obj_offset, ...
- */
-static int _add_to_r4w(struct ore_io_state *ios, struct ore_striping_info *si,
- struct page *page, unsigned pg_len)
-{
- struct request_queue *q;
- struct ore_per_dev_state *per_dev;
- struct ore_io_state *read_ios;
- unsigned first_dev = si->dev - (si->dev %
- (ios->layout->group_width * ios->layout->mirrors_p1));
- unsigned comp = si->dev - first_dev;
- unsigned added_len;
-
- if (!ios->ios_read_4_write) {
- int ret = _alloc_read_4_write(ios);
-
- if (unlikely(ret))
- return ret;
- }
-
- read_ios = ios->ios_read_4_write;
- read_ios->numdevs = ios->layout->group_width * ios->layout->mirrors_p1;
-
- per_dev = &read_ios->per_dev[comp];
- if (!per_dev->length) {
- per_dev->bio = bio_kmalloc(GFP_KERNEL,
- ios->sp2d->pages_in_unit);
- if (unlikely(!per_dev->bio)) {
- ORE_DBGMSG("Failed to allocate BIO size=%u\n",
- ios->sp2d->pages_in_unit);
- return -ENOMEM;
- }
- per_dev->offset = si->obj_offset;
- per_dev->dev = si->dev;
- } else if (si->obj_offset != (per_dev->offset + per_dev->length)) {
- u64 gap = si->obj_offset - (per_dev->offset + per_dev->length);
-
- _ore_add_sg_seg(per_dev, gap, true);
- }
- q = osd_request_queue(ore_comp_dev(read_ios->oc, per_dev->dev));
- added_len = bio_add_pc_page(q, per_dev->bio, page, pg_len,
- si->obj_offset % PAGE_SIZE);
- if (unlikely(added_len != pg_len)) {
- ORE_DBGMSG("Failed to bio_add_pc_page bi_vcnt=%d\n",
- per_dev->bio->bi_vcnt);
- return -ENOMEM;
- }
-
- per_dev->length += pg_len;
- return 0;
-}
-
-/* read the beginning of an unaligned first page */
-static int _add_to_r4w_first_page(struct ore_io_state *ios, struct page *page)
-{
- struct ore_striping_info si;
- unsigned pg_len;
-
- ore_calc_stripe_info(ios->layout, ios->offset, 0, &si);
-
- pg_len = si.obj_offset % PAGE_SIZE;
- si.obj_offset -= pg_len;
-
- ORE_DBGMSG("offset=0x%llx len=0x%x index=0x%lx dev=%x\n",
- _LLU(si.obj_offset), pg_len, page->index, si.dev);
-
- return _add_to_r4w(ios, &si, page, pg_len);
-}
-
-/* read the end of an incomplete last page */
-static int _add_to_r4w_last_page(struct ore_io_state *ios, u64 *offset)
-{
- struct ore_striping_info si;
- struct page *page;
- unsigned pg_len, p, c;
-
- ore_calc_stripe_info(ios->layout, *offset, 0, &si);
-
- p = si.cur_pg;
- c = si.cur_comp;
- page = ios->sp2d->_1p_stripes[p].pages[c];
-
- pg_len = PAGE_SIZE - (si.unit_off % PAGE_SIZE);
- *offset += pg_len;
-
- ORE_DBGMSG("p=%d, c=%d next-offset=0x%llx len=0x%x dev=%x par_dev=%d\n",
- p, c, _LLU(*offset), pg_len, si.dev, si.par_dev);
-
- BUG_ON(!page);
-
- return _add_to_r4w(ios, &si, page, pg_len);
-}
-
-static void _mark_read4write_pages_uptodate(struct ore_io_state *ios, int ret)
-{
- struct bio_vec *bv;
- unsigned i, d;
-
- /* loop on all devices all pages */
- for (d = 0; d < ios->numdevs; d++) {
- struct bio *bio = ios->per_dev[d].bio;
- struct bvec_iter_all iter_all;
-
- if (!bio)
- continue;
-
- bio_for_each_segment_all(bv, bio, i, iter_all) {
- struct page *page = bv->bv_page;
-
- SetPageUptodate(page);
- if (PageError(page))
- ClearPageError(page);
- }
- }
-}
-
-/* read_4_write is hacked to read the start of the first stripe and/or
- * the end of the last stripe. If needed, with an sg-gap at each device/page.
- * It is assumed to be called after the to_be_written pages of the first stripe
- * are populating ios->sp2d[][]
- *
- * NOTE: We call ios->r4w->lock_fn for all pages needed for parity calculations
- * These pages are held at sp2d[p].pages[c] but with
- * sp2d[p].page_is_read[c] = true. At _sp2d_reset these pages are
- * ios->r4w->lock_fn(). The ios->r4w->lock_fn might signal that the page is
- * @uptodate=true, so we don't need to read it, only unlock, after IO.
- *
- * TODO: The read_4_write should calc a need_to_read_pages_count, if bigger then
- * to-be-written count, we should consider the xor-in-place mode.
- * need_to_read_pages_count is the actual number of pages not present in cache.
- * maybe "devs_in_group - ios->sp2d[p].write_count" is a good enough
- * approximation? In this mode the read pages are put in the empty places of
- * ios->sp2d[p][*], xor is calculated the same way. These pages are
- * allocated/freed and don't go through cache
- */
-static int _read_4_write_first_stripe(struct ore_io_state *ios)
-{
- struct ore_striping_info read_si;
- struct __stripe_pages_2d *sp2d = ios->sp2d;
- u64 offset = ios->si.first_stripe_start;
- unsigned c, p, min_p = sp2d->pages_in_unit, max_p = -1;
-
- if (offset == ios->offset) /* Go to start collect $200 */
- goto read_last_stripe;
-
- min_p = _sp2d_min_pg(sp2d);
- max_p = _sp2d_max_pg(sp2d);
-
- ORE_DBGMSG("stripe_start=0x%llx ios->offset=0x%llx min_p=%d max_p=%d\n",
- offset, ios->offset, min_p, max_p);
-
- for (c = 0; ; c++) {
- ore_calc_stripe_info(ios->layout, offset, 0, &read_si);
- read_si.obj_offset += min_p * PAGE_SIZE;
- offset += min_p * PAGE_SIZE;
- for (p = min_p; p <= max_p; p++) {
- struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
- struct page **pp = &_1ps->pages[c];
- bool uptodate;
-
- if (*pp) {
- if (ios->offset % PAGE_SIZE)
- /* Read the remainder of the page */
- _add_to_r4w_first_page(ios, *pp);
- /* to-be-written pages start here */
- goto read_last_stripe;
- }
-
- *pp = ios->r4w->get_page(ios->private, offset,
- &uptodate);
- if (unlikely(!*pp))
- return -ENOMEM;
-
- if (!uptodate)
- _add_to_r4w(ios, &read_si, *pp, PAGE_SIZE);
-
- /* Mark read-pages to be cache_released */
- _1ps->page_is_read[c] = true;
- read_si.obj_offset += PAGE_SIZE;
- offset += PAGE_SIZE;
- }
- offset += (sp2d->pages_in_unit - p) * PAGE_SIZE;
- }
-
-read_last_stripe:
- return 0;
-}
-
-static int _read_4_write_last_stripe(struct ore_io_state *ios)
-{
- struct ore_striping_info read_si;
- struct __stripe_pages_2d *sp2d = ios->sp2d;
- u64 offset;
- u64 last_stripe_end;
- unsigned bytes_in_stripe = ios->si.bytes_in_stripe;
- unsigned c, p, min_p = sp2d->pages_in_unit, max_p = -1;
-
- offset = ios->offset + ios->length;
- if (offset % PAGE_SIZE)
- _add_to_r4w_last_page(ios, &offset);
- /* offset will be aligned to next page */
-
- last_stripe_end = div_u64(offset + bytes_in_stripe - 1, bytes_in_stripe)
- * bytes_in_stripe;
- if (offset == last_stripe_end) /* Optimize for the aligned case */
- goto read_it;
-
- ore_calc_stripe_info(ios->layout, offset, 0, &read_si);
- p = read_si.cur_pg;
- c = read_si.cur_comp;
-
- if (min_p == sp2d->pages_in_unit) {
- /* Didn't do it yet */
- min_p = _sp2d_min_pg(sp2d);
- max_p = _sp2d_max_pg(sp2d);
- }
-
- ORE_DBGMSG("offset=0x%llx stripe_end=0x%llx min_p=%d max_p=%d\n",
- offset, last_stripe_end, min_p, max_p);
-
- while (offset < last_stripe_end) {
- struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
-
- if ((min_p <= p) && (p <= max_p)) {
- struct page *page;
- bool uptodate;
-
- BUG_ON(_1ps->pages[c]);
- page = ios->r4w->get_page(ios->private, offset,
- &uptodate);
- if (unlikely(!page))
- return -ENOMEM;
-
- _1ps->pages[c] = page;
- /* Mark read-pages to be cache_released */
- _1ps->page_is_read[c] = true;
- if (!uptodate)
- _add_to_r4w(ios, &read_si, page, PAGE_SIZE);
- }
-
- offset += PAGE_SIZE;
- if (p == (sp2d->pages_in_unit - 1)) {
- ++c;
- p = 0;
- ore_calc_stripe_info(ios->layout, offset, 0, &read_si);
- } else {
- read_si.obj_offset += PAGE_SIZE;
- ++p;
- }
- }
-
-read_it:
- return 0;
-}
-
-static int _read_4_write_execute(struct ore_io_state *ios)
-{
- struct ore_io_state *ios_read;
- unsigned i;
- int ret;
-
- ios_read = ios->ios_read_4_write;
- if (!ios_read)
- return 0;
-
- /* FIXME: Ugly to signal _sbi_read_mirror that we have bio(s). Change
- * to check for per_dev->bio
- */
- ios_read->pages = ios->pages;
-
- /* Now read these devices */
- for (i = 0; i < ios_read->numdevs; i += ios_read->layout->mirrors_p1) {
- ret = _ore_read_mirror(ios_read, i);
- if (unlikely(ret))
- return ret;
- }
-
- ret = ore_io_execute(ios_read); /* Synchronus execution */
- if (unlikely(ret)) {
- ORE_DBGMSG("!! ore_io_execute => %d\n", ret);
- return ret;
- }
-
- _mark_read4write_pages_uptodate(ios_read, ret);
- ore_put_io_state(ios_read);
- ios->ios_read_4_write = NULL; /* Might need a reuse at last stripe */
- return 0;
-}
-
-/* In writes @cur_len means length left. .i.e cur_len==0 is the last parity U */
-int _ore_add_parity_unit(struct ore_io_state *ios,
- struct ore_striping_info *si,
- struct ore_per_dev_state *per_dev,
- unsigned cur_len, bool do_xor)
-{
- if (ios->reading) {
- if (per_dev->cur_sg >= ios->sgs_per_dev) {
- ORE_DBGMSG("cur_sg(%d) >= sgs_per_dev(%d)\n" ,
- per_dev->cur_sg, ios->sgs_per_dev);
- return -ENOMEM;
- }
- _ore_add_sg_seg(per_dev, cur_len, true);
- } else {
- struct __stripe_pages_2d *sp2d = ios->sp2d;
- struct page **pages = ios->parity_pages + ios->cur_par_page;
- unsigned num_pages;
- unsigned array_start = 0;
- unsigned i;
- int ret;
-
- si->cur_pg = _sp2d_min_pg(sp2d);
- num_pages = _sp2d_max_pg(sp2d) + 1 - si->cur_pg;
-
- if (!per_dev->length) {
- per_dev->offset += si->cur_pg * PAGE_SIZE;
- /* If first stripe, Read in all read4write pages
- * (if needed) before we calculate the first parity.
- */
- if (do_xor)
- _read_4_write_first_stripe(ios);
- }
- if (!cur_len && do_xor)
- /* If last stripe r4w pages of last stripe */
- _read_4_write_last_stripe(ios);
- _read_4_write_execute(ios);
-
- for (i = 0; i < num_pages; i++) {
- pages[i] = _raid_page_alloc();
- if (unlikely(!pages[i]))
- return -ENOMEM;
-
- ++(ios->cur_par_page);
- }
-
- BUG_ON(si->cur_comp < sp2d->data_devs);
- BUG_ON(si->cur_pg + num_pages > sp2d->pages_in_unit);
-
- ret = _ore_add_stripe_unit(ios, &array_start, 0, pages,
- per_dev, num_pages * PAGE_SIZE);
- if (unlikely(ret))
- return ret;
-
- if (do_xor) {
- _gen_xor_unit(sp2d);
- _sp2d_reset(sp2d, ios->r4w, ios->private);
- }
- }
- return 0;
-}
-
-int _ore_post_alloc_raid_stuff(struct ore_io_state *ios)
-{
- if (ios->parity_pages) {
- struct ore_layout *layout = ios->layout;
- unsigned pages_in_unit = layout->stripe_unit / PAGE_SIZE;
-
- if (_sp2d_alloc(pages_in_unit, layout->group_width,
- layout->parity, &ios->sp2d)) {
- return -ENOMEM;
- }
- }
- return 0;
-}
-
-void _ore_free_raid_stuff(struct ore_io_state *ios)
-{
- if (ios->sp2d) { /* writing and raid */
- unsigned i;
-
- for (i = 0; i < ios->cur_par_page; i++) {
- struct page *page = ios->parity_pages[i];
-
- if (page)
- _raid_page_free(page);
- }
- if (ios->extra_part_alloc)
- kfree(ios->parity_pages);
- /* If IO returned an error pages might need unlocking */
- _sp2d_reset(ios->sp2d, ios->r4w, ios->private);
- _sp2d_free(ios->sp2d);
- } else {
- /* Will only be set if raid reading && sglist is big */
- if (ios->extra_part_alloc)
- kfree(ios->per_dev[0].sglist);
- }
- if (ios->ios_read_4_write)
- ore_put_io_state(ios->ios_read_4_write);
-}
diff --git a/fs/exofs/ore_raid.h b/fs/exofs/ore_raid.h
deleted file mode 100644
index a6e746775570..000000000000
--- a/fs/exofs/ore_raid.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (C) from 2011
- * Boaz Harrosh <ooo@electrozaur.com>
- *
- * This file is part of the objects raid engine (ore).
- *
- * It is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * You should have received a copy of the GNU General Public License
- * along with "ore". If not, write to the Free Software Foundation, Inc:
- * "Free Software Foundation <info@fsf.org>"
- */
-
-#include <scsi/osd_ore.h>
-
-#define ORE_ERR(fmt, a...) printk(KERN_ERR "ore: " fmt, ##a)
-
-#ifdef CONFIG_EXOFS_DEBUG
-#define ORE_DBGMSG(fmt, a...) \
- printk(KERN_NOTICE "ore @%s:%d: " fmt, __func__, __LINE__, ##a)
-#else
-#define ORE_DBGMSG(fmt, a...) \
- do { if (0) printk(fmt, ##a); } while (0)
-#endif
-
-/* u64 has problems with printk this will cast it to unsigned long long */
-#define _LLU(x) (unsigned long long)(x)
-
-#define ORE_DBGMSG2(M...) do {} while (0)
-/* #define ORE_DBGMSG2 ORE_DBGMSG */
-
-/* ios_raid.c stuff needed by ios.c */
-int _ore_post_alloc_raid_stuff(struct ore_io_state *ios);
-void _ore_free_raid_stuff(struct ore_io_state *ios);
-
-void _ore_add_sg_seg(struct ore_per_dev_state *per_dev, unsigned cur_len,
- bool not_last);
-int _ore_add_parity_unit(struct ore_io_state *ios, struct ore_striping_info *si,
- struct ore_per_dev_state *per_dev, unsigned cur_len,
- bool do_xor);
-void _ore_add_stripe_page(struct __stripe_pages_2d *sp2d,
- struct ore_striping_info *si, struct page *page);
-static inline void _add_stripe_page(struct __stripe_pages_2d *sp2d,
- struct ore_striping_info *si, struct page *page)
-{
- if (!sp2d) /* Inline the fast path */
- return; /* Hay no raid stuff */
- _ore_add_stripe_page(sp2d, si, page);
-}
-
-/* ios.c stuff needed by ios_raid.c */
-int _ore_get_io_state(struct ore_layout *layout,
- struct ore_components *oc, unsigned numdevs,
- unsigned sgs_per_dev, unsigned num_par_pages,
- struct ore_io_state **pios);
-int _ore_add_stripe_unit(struct ore_io_state *ios, unsigned *cur_pg,
- unsigned pgbase, struct page **pages,
- struct ore_per_dev_state *per_dev, int cur_len);
-int _ore_read_mirror(struct ore_io_state *ios, unsigned cur_comp);
-int ore_io_execute(struct ore_io_state *ios);
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
deleted file mode 100644
index fc80c7233fa5..000000000000
--- a/fs/exofs/super.c
+++ /dev/null
@@ -1,1071 +0,0 @@
-/*
- * Copyright (C) 2005, 2006
- * Avishay Traeger (avishay@gmail.com)
- * Copyright (C) 2008, 2009
- * Boaz Harrosh <ooo@electrozaur.com>
- *
- * Copyrights for code taken from ext2:
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- * from
- * linux/fs/minix/inode.c
- * Copyright (C) 1991, 1992 Linus Torvalds
- *
- * This file is part of exofs.
- *
- * exofs is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation. Since it is based on ext2, and the only
- * valid version of GPL for the Linux kernel is version 2, the only valid
- * version of GPL for exofs is version 2.
- *
- * exofs is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with exofs; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <linux/string.h>
-#include <linux/parser.h>
-#include <linux/vfs.h>
-#include <linux/random.h>
-#include <linux/module.h>
-#include <linux/exportfs.h>
-#include <linux/slab.h>
-#include <linux/iversion.h>
-
-#include "exofs.h"
-
-#define EXOFS_DBGMSG2(M...) do {} while (0)
-
-/******************************************************************************
- * MOUNT OPTIONS
- *****************************************************************************/
-
-/*
- * struct to hold what we get from mount options
- */
-struct exofs_mountopt {
- bool is_osdname;
- const char *dev_name;
- uint64_t pid;
- int timeout;
-};
-
-/*
- * exofs-specific mount-time options.
- */
-enum { Opt_name, Opt_pid, Opt_to, Opt_err };
-
-/*
- * Our mount-time options. These should ideally be 64-bit unsigned, but the
- * kernel's parsing functions do not currently support that. 32-bit should be
- * sufficient for most applications now.
- */
-static match_table_t tokens = {
- {Opt_name, "osdname=%s"},
- {Opt_pid, "pid=%u"},
- {Opt_to, "to=%u"},
- {Opt_err, NULL}
-};
-
-/*
- * The main option parsing method. Also makes sure that all of the mandatory
- * mount options were set.
- */
-static int parse_options(char *options, struct exofs_mountopt *opts)
-{
- char *p;
- substring_t args[MAX_OPT_ARGS];
- int option;
- bool s_pid = false;
-
- EXOFS_DBGMSG("parse_options %s\n", options);
- /* defaults */
- memset(opts, 0, sizeof(*opts));
- opts->timeout = BLK_DEFAULT_SG_TIMEOUT;
-
- while ((p = strsep(&options, ",")) != NULL) {
- int token;
- char str[32];
-
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_name:
- kfree(opts->dev_name);
- opts->dev_name = match_strdup(&args[0]);
- if (unlikely(!opts->dev_name)) {
- EXOFS_ERR("Error allocating dev_name");
- return -ENOMEM;
- }
- opts->is_osdname = true;
- break;
- case Opt_pid:
- if (0 == match_strlcpy(str, &args[0], sizeof(str)))
- return -EINVAL;
- opts->pid = simple_strtoull(str, NULL, 0);
- if (opts->pid < EXOFS_MIN_PID) {
- EXOFS_ERR("Partition ID must be >= %u",
- EXOFS_MIN_PID);
- return -EINVAL;
- }
- s_pid = true;
- break;
- case Opt_to:
- if (match_int(&args[0], &option))
- return -EINVAL;
- if (option <= 0) {
- EXOFS_ERR("Timeout must be > 0");
- return -EINVAL;
- }
- opts->timeout = option * HZ;
- break;
- }
- }
-
- if (!s_pid) {
- EXOFS_ERR("Need to specify the following options:\n");
- EXOFS_ERR(" -o pid=pid_no_to_use\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/******************************************************************************
- * INODE CACHE
- *****************************************************************************/
-
-/*
- * Our inode cache. Isn't it pretty?
- */
-static struct kmem_cache *exofs_inode_cachep;
-
-/*
- * Allocate an inode in the cache
- */
-static struct inode *exofs_alloc_inode(struct super_block *sb)
-{
- struct exofs_i_info *oi;
-
- oi = kmem_cache_alloc(exofs_inode_cachep, GFP_KERNEL);
- if (!oi)
- return NULL;
-
- inode_set_iversion(&oi->vfs_inode, 1);
- return &oi->vfs_inode;
-}
-
-static void exofs_i_callback(struct rcu_head *head)
-{
- struct inode *inode = container_of(head, struct inode, i_rcu);
- kmem_cache_free(exofs_inode_cachep, exofs_i(inode));
-}
-
-/*
- * Remove an inode from the cache
- */
-static void exofs_destroy_inode(struct inode *inode)
-{
- call_rcu(&inode->i_rcu, exofs_i_callback);
-}
-
-/*
- * Initialize the inode
- */
-static void exofs_init_once(void *foo)
-{
- struct exofs_i_info *oi = foo;
-
- inode_init_once(&oi->vfs_inode);
-}
-
-/*
- * Create and initialize the inode cache
- */
-static int init_inodecache(void)
-{
- exofs_inode_cachep = kmem_cache_create_usercopy("exofs_inode_cache",
- sizeof(struct exofs_i_info), 0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD |
- SLAB_ACCOUNT,
- offsetof(struct exofs_i_info, i_data),
- sizeof_field(struct exofs_i_info, i_data),
- exofs_init_once);
- if (exofs_inode_cachep == NULL)
- return -ENOMEM;
- return 0;
-}
-
-/*
- * Destroy the inode cache
- */
-static void destroy_inodecache(void)
-{
- /*
- * Make sure all delayed rcu free inodes are flushed before we
- * destroy cache.
- */
- rcu_barrier();
- kmem_cache_destroy(exofs_inode_cachep);
-}
-
-/******************************************************************************
- * Some osd helpers
- *****************************************************************************/
-void exofs_make_credential(u8 cred_a[OSD_CAP_LEN], const struct osd_obj_id *obj)
-{
- osd_sec_init_nosec_doall_caps(cred_a, obj, false, true);
-}
-
-static int exofs_read_kern(struct osd_dev *od, u8 *cred, struct osd_obj_id *obj,
- u64 offset, void *p, unsigned length)
-{
- struct osd_request *or = osd_start_request(od);
-/* struct osd_sense_info osi = {.key = 0};*/
- int ret;
-
- if (unlikely(!or)) {
- EXOFS_DBGMSG("%s: osd_start_request failed.\n", __func__);
- return -ENOMEM;
- }
- ret = osd_req_read_kern(or, obj, offset, p, length);
- if (unlikely(ret)) {
- EXOFS_DBGMSG("%s: osd_req_read_kern failed.\n", __func__);
- goto out;
- }
-
- ret = osd_finalize_request(or, 0, cred, NULL);
- if (unlikely(ret)) {
- EXOFS_DBGMSG("Failed to osd_finalize_request() => %d\n", ret);
- goto out;
- }
-
- ret = osd_execute_request(or);
- if (unlikely(ret))
- EXOFS_DBGMSG("osd_execute_request() => %d\n", ret);
- /* osd_req_decode_sense(or, ret); */
-
-out:
- osd_end_request(or);
- EXOFS_DBGMSG2("read_kern(0x%llx) offset=0x%llx "
- "length=0x%llx dev=%p ret=>%d\n",
- _LLU(obj->id), _LLU(offset), _LLU(length), od, ret);
- return ret;
-}
-
-static const struct osd_attr g_attr_sb_stats = ATTR_DEF(
- EXOFS_APAGE_SB_DATA,
- EXOFS_ATTR_SB_STATS,
- sizeof(struct exofs_sb_stats));
-
-static int __sbi_read_stats(struct exofs_sb_info *sbi)
-{
- struct osd_attr attrs[] = {
- [0] = g_attr_sb_stats,
- };
- struct ore_io_state *ios;
- int ret;
-
- ret = ore_get_io_state(&sbi->layout, &sbi->oc, &ios);
- if (unlikely(ret)) {
- EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__);
- return ret;
- }
-
- ios->in_attr = attrs;
- ios->in_attr_len = ARRAY_SIZE(attrs);
-
- ret = ore_read(ios);
- if (unlikely(ret)) {
- EXOFS_ERR("Error reading super_block stats => %d\n", ret);
- goto out;
- }
-
- ret = extract_attr_from_ios(ios, &attrs[0]);
- if (ret) {
- EXOFS_ERR("%s: extract_attr of sb_stats failed\n", __func__);
- goto out;
- }
- if (attrs[0].len) {
- struct exofs_sb_stats *ess;
-
- if (unlikely(attrs[0].len != sizeof(*ess))) {
- EXOFS_ERR("%s: Wrong version of exofs_sb_stats "
- "size(%d) != expected(%zd)\n",
- __func__, attrs[0].len, sizeof(*ess));
- goto out;
- }
-
- ess = attrs[0].val_ptr;
- sbi->s_nextid = le64_to_cpu(ess->s_nextid);
- sbi->s_numfiles = le32_to_cpu(ess->s_numfiles);
- }
-
-out:
- ore_put_io_state(ios);
- return ret;
-}
-
-static void stats_done(struct ore_io_state *ios, void *p)
-{
- ore_put_io_state(ios);
- /* Good thanks nothing to do anymore */
-}
-
-/* Asynchronously write the stats attribute */
-int exofs_sbi_write_stats(struct exofs_sb_info *sbi)
-{
- struct osd_attr attrs[] = {
- [0] = g_attr_sb_stats,
- };
- struct ore_io_state *ios;
- int ret;
-
- ret = ore_get_io_state(&sbi->layout, &sbi->oc, &ios);
- if (unlikely(ret)) {
- EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__);
- return ret;
- }
-
- sbi->s_ess.s_nextid = cpu_to_le64(sbi->s_nextid);
- sbi->s_ess.s_numfiles = cpu_to_le64(sbi->s_numfiles);
- attrs[0].val_ptr = &sbi->s_ess;
-
-
- ios->done = stats_done;
- ios->private = sbi;
- ios->out_attr = attrs;
- ios->out_attr_len = ARRAY_SIZE(attrs);
-
- ret = ore_write(ios);
- if (unlikely(ret)) {
- EXOFS_ERR("%s: ore_write failed.\n", __func__);
- ore_put_io_state(ios);
- }
-
- return ret;
-}
-
-/******************************************************************************
- * SUPERBLOCK FUNCTIONS
- *****************************************************************************/
-static const struct super_operations exofs_sops;
-static const struct export_operations exofs_export_ops;
-
-/*
- * Write the superblock to the OSD
- */
-static int exofs_sync_fs(struct super_block *sb, int wait)
-{
- struct exofs_sb_info *sbi;
- struct exofs_fscb *fscb;
- struct ore_comp one_comp;
- struct ore_components oc;
- struct ore_io_state *ios;
- int ret = -ENOMEM;
-
- fscb = kmalloc(sizeof(*fscb), GFP_KERNEL);
- if (unlikely(!fscb))
- return -ENOMEM;
-
- sbi = sb->s_fs_info;
-
- /* NOTE: We no longer dirty the super_block anywhere in exofs. The
- * reason we write the fscb here on unmount is so we can stay backwards
- * compatible with fscb->s_version == 1. (What we are not compatible
- * with is if a new version FS crashed and then we try to mount an old
- * version). Otherwise the exofs_fscb is read-only from mkfs time. All
- * the writeable info is set in exofs_sbi_write_stats() above.
- */
-
- exofs_init_comps(&oc, &one_comp, sbi, EXOFS_SUPER_ID);
-
- ret = ore_get_io_state(&sbi->layout, &oc, &ios);
- if (unlikely(ret))
- goto out;
-
- ios->length = offsetof(struct exofs_fscb, s_dev_table_oid);
- memset(fscb, 0, ios->length);
- fscb->s_nextid = cpu_to_le64(sbi->s_nextid);
- fscb->s_numfiles = cpu_to_le64(sbi->s_numfiles);
- fscb->s_magic = cpu_to_le16(sb->s_magic);
- fscb->s_newfs = 0;
- fscb->s_version = EXOFS_FSCB_VER;
-
- ios->offset = 0;
- ios->kern_buff = fscb;
-
- ret = ore_write(ios);
- if (unlikely(ret))
- EXOFS_ERR("%s: ore_write failed.\n", __func__);
-
-out:
- EXOFS_DBGMSG("s_nextid=0x%llx ret=%d\n", _LLU(sbi->s_nextid), ret);
- ore_put_io_state(ios);
- kfree(fscb);
- return ret;
-}
-
-static void _exofs_print_device(const char *msg, const char *dev_path,
- struct osd_dev *od, u64 pid)
-{
- const struct osd_dev_info *odi = osduld_device_info(od);
-
- printk(KERN_NOTICE "exofs: %s %s osd_name-%s pid-0x%llx\n",
- msg, dev_path ?: "", odi->osdname, _LLU(pid));
-}
-
-static void exofs_free_sbi(struct exofs_sb_info *sbi)
-{
- unsigned numdevs = sbi->oc.numdevs;
-
- while (numdevs) {
- unsigned i = --numdevs;
- struct osd_dev *od = ore_comp_dev(&sbi->oc, i);
-
- if (od) {
- ore_comp_set_dev(&sbi->oc, i, NULL);
- osduld_put_device(od);
- }
- }
- kfree(sbi->oc.ods);
- kfree(sbi);
-}
-
-/*
- * This function is called when the vfs is freeing the superblock. We just
- * need to free our own part.
- */
-static void exofs_put_super(struct super_block *sb)
-{
- int num_pend;
- struct exofs_sb_info *sbi = sb->s_fs_info;
-
- /* make sure there are no pending commands */
- for (num_pend = atomic_read(&sbi->s_curr_pending); num_pend > 0;
- num_pend = atomic_read(&sbi->s_curr_pending)) {
- wait_queue_head_t wq;
-
- printk(KERN_NOTICE "%s: !!Pending operations in flight. "
- "This is a BUG. please report to osd-dev@open-osd.org\n",
- __func__);
- init_waitqueue_head(&wq);
- wait_event_timeout(wq,
- (atomic_read(&sbi->s_curr_pending) == 0),
- msecs_to_jiffies(100));
- }
-
- _exofs_print_device("Unmounting", NULL, ore_comp_dev(&sbi->oc, 0),
- sbi->one_comp.obj.partition);
-
- exofs_sysfs_sb_del(sbi);
- exofs_free_sbi(sbi);
- sb->s_fs_info = NULL;
-}
-
-static int _read_and_match_data_map(struct exofs_sb_info *sbi, unsigned numdevs,
- struct exofs_device_table *dt)
-{
- int ret;
-
- sbi->layout.stripe_unit =
- le64_to_cpu(dt->dt_data_map.cb_stripe_unit);
- sbi->layout.group_width =
- le32_to_cpu(dt->dt_data_map.cb_group_width);
- sbi->layout.group_depth =
- le32_to_cpu(dt->dt_data_map.cb_group_depth);
- sbi->layout.mirrors_p1 =
- le32_to_cpu(dt->dt_data_map.cb_mirror_cnt) + 1;
- sbi->layout.raid_algorithm =
- le32_to_cpu(dt->dt_data_map.cb_raid_algorithm);
-
- ret = ore_verify_layout(numdevs, &sbi->layout);
-
- EXOFS_DBGMSG("exofs: layout: "
- "num_comps=%u stripe_unit=0x%x group_width=%u "
- "group_depth=0x%llx mirrors_p1=%u raid_algorithm=%u\n",
- numdevs,
- sbi->layout.stripe_unit,
- sbi->layout.group_width,
- _LLU(sbi->layout.group_depth),
- sbi->layout.mirrors_p1,
- sbi->layout.raid_algorithm);
- return ret;
-}
-
-static unsigned __ra_pages(struct ore_layout *layout)
-{
- const unsigned _MIN_RA = 32; /* min 128K read-ahead */
- unsigned ra_pages = layout->group_width * layout->stripe_unit /
- PAGE_SIZE;
- unsigned max_io_pages = exofs_max_io_pages(layout, ~0);
-
- ra_pages *= 2; /* two stripes */
- if (ra_pages < _MIN_RA)
- ra_pages = roundup(_MIN_RA, ra_pages / 2);
-
- if (ra_pages > max_io_pages)
- ra_pages = max_io_pages;
-
- return ra_pages;
-}
-
-/* @odi is valid only as long as @fscb_dev is valid */
-static int exofs_devs_2_odi(struct exofs_dt_device_info *dt_dev,
- struct osd_dev_info *odi)
-{
- odi->systemid_len = le32_to_cpu(dt_dev->systemid_len);
- if (likely(odi->systemid_len))
- memcpy(odi->systemid, dt_dev->systemid, OSD_SYSTEMID_LEN);
-
- odi->osdname_len = le32_to_cpu(dt_dev->osdname_len);
- odi->osdname = dt_dev->osdname;
-
- /* FIXME support long names. Will need a _put function */
- if (dt_dev->long_name_offset)
- return -EINVAL;
-
- /* Make sure osdname is printable!
- * mkexofs should give us space for a null-terminator else the
- * device-table is invalid.
- */
- if (unlikely(odi->osdname_len >= sizeof(dt_dev->osdname)))
- odi->osdname_len = sizeof(dt_dev->osdname) - 1;
- dt_dev->osdname[odi->osdname_len] = 0;
-
- /* If it's all zeros something is bad we read past end-of-obj */
- return !(odi->systemid_len || odi->osdname_len);
-}
-
-static int __alloc_dev_table(struct exofs_sb_info *sbi, unsigned numdevs,
- struct exofs_dev **peds)
-{
- /* Twice bigger table: See exofs_init_comps() and comment at
- * exofs_read_lookup_dev_table()
- */
- const size_t numores = numdevs * 2 - 1;
- struct exofs_dev *eds;
- unsigned i;
-
- sbi->oc.ods = kzalloc(numores * sizeof(struct ore_dev *) +
- numdevs * sizeof(struct exofs_dev), GFP_KERNEL);
- if (unlikely(!sbi->oc.ods)) {
- EXOFS_ERR("ERROR: failed allocating Device array[%d]\n",
- numdevs);
- return -ENOMEM;
- }
-
- /* Start of allocated struct exofs_dev entries */
- *peds = eds = (void *)sbi->oc.ods[numores];
- /* Initialize pointers into struct exofs_dev */
- for (i = 0; i < numdevs; ++i)
- sbi->oc.ods[i] = &eds[i].ored;
- return 0;
-}
-
-static int exofs_read_lookup_dev_table(struct exofs_sb_info *sbi,
- struct osd_dev *fscb_od,
- unsigned table_count)
-{
- struct ore_comp comp;
- struct exofs_device_table *dt;
- struct exofs_dev *eds;
- unsigned table_bytes = table_count * sizeof(dt->dt_dev_table[0]) +
- sizeof(*dt);
- unsigned numdevs, i;
- int ret;
-
- dt = kmalloc(table_bytes, GFP_KERNEL);
- if (unlikely(!dt)) {
- EXOFS_ERR("ERROR: allocating %x bytes for device table\n",
- table_bytes);
- return -ENOMEM;
- }
-
- sbi->oc.numdevs = 0;
-
- comp.obj.partition = sbi->one_comp.obj.partition;
- comp.obj.id = EXOFS_DEVTABLE_ID;
- exofs_make_credential(comp.cred, &comp.obj);
-
- ret = exofs_read_kern(fscb_od, comp.cred, &comp.obj, 0, dt,
- table_bytes);
- if (unlikely(ret)) {
- EXOFS_ERR("ERROR: reading device table\n");
- goto out;
- }
-
- numdevs = le64_to_cpu(dt->dt_num_devices);
- if (unlikely(!numdevs)) {
- ret = -EINVAL;
- goto out;
- }
- WARN_ON(table_count != numdevs);
-
- ret = _read_and_match_data_map(sbi, numdevs, dt);
- if (unlikely(ret))
- goto out;
-
- ret = __alloc_dev_table(sbi, numdevs, &eds);
- if (unlikely(ret))
- goto out;
- /* exofs round-robins the device table view according to inode
- * number. We hold a: twice bigger table hence inodes can point
- * to any device and have a sequential view of the table
- * starting at this device. See exofs_init_comps()
- */
- memcpy(&sbi->oc.ods[numdevs], &sbi->oc.ods[0],
- (numdevs - 1) * sizeof(sbi->oc.ods[0]));
-
- /* create sysfs subdir under which we put the device table
- * And cluster layout. A Superblock is identified by the string:
- * "dev[0].osdname"_"pid"
- */
- exofs_sysfs_sb_add(sbi, &dt->dt_dev_table[0]);
-
- for (i = 0; i < numdevs; i++) {
- struct exofs_fscb fscb;
- struct osd_dev_info odi;
- struct osd_dev *od;
-
- if (exofs_devs_2_odi(&dt->dt_dev_table[i], &odi)) {
- EXOFS_ERR("ERROR: Read all-zeros device entry\n");
- ret = -EINVAL;
- goto out;
- }
-
- printk(KERN_NOTICE "Add device[%d]: osd_name-%s\n",
- i, odi.osdname);
-
- /* the exofs id is currently the table index */
- eds[i].did = i;
-
- /* On all devices the device table is identical. The user can
- * specify any one of the participating devices on the command
- * line. We always keep them in device-table order.
- */
- if (fscb_od && osduld_device_same(fscb_od, &odi)) {
- eds[i].ored.od = fscb_od;
- ++sbi->oc.numdevs;
- fscb_od = NULL;
- exofs_sysfs_odev_add(&eds[i], sbi);
- continue;
- }
-
- od = osduld_info_lookup(&odi);
- if (IS_ERR(od)) {
- ret = PTR_ERR(od);
- EXOFS_ERR("ERROR: device requested is not found "
- "osd_name-%s =>%d\n", odi.osdname, ret);
- goto out;
- }
-
- eds[i].ored.od = od;
- ++sbi->oc.numdevs;
-
- /* Read the fscb of the other devices to make sure the FS
- * partition is there.
- */
- ret = exofs_read_kern(od, comp.cred, &comp.obj, 0, &fscb,
- sizeof(fscb));
- if (unlikely(ret)) {
- EXOFS_ERR("ERROR: Malformed participating device "
- "error reading fscb osd_name-%s\n",
- odi.osdname);
- goto out;
- }
- exofs_sysfs_odev_add(&eds[i], sbi);
-
- /* TODO: verify other information is correct and FS-uuid
- * matches. Benny what did you say about device table
- * generation and old devices?
- */
- }
-
-out:
- kfree(dt);
- if (unlikely(fscb_od && !ret)) {
- EXOFS_ERR("ERROR: Bad device-table container device not present\n");
- osduld_put_device(fscb_od);
- return -EINVAL;
- }
- return ret;
-}
-
-/*
- * Read the superblock from the OSD and fill in the fields
- */
-static int exofs_fill_super(struct super_block *sb,
- struct exofs_mountopt *opts,
- struct exofs_sb_info *sbi,
- int silent)
-{
- struct inode *root;
- struct osd_dev *od; /* Master device */
- struct exofs_fscb fscb; /*on-disk superblock info */
- struct ore_comp comp;
- unsigned table_count;
- int ret;
-
- /* use mount options to fill superblock */
- if (opts->is_osdname) {
- struct osd_dev_info odi = {.systemid_len = 0};
-
- odi.osdname_len = strlen(opts->dev_name);
- odi.osdname = (u8 *)opts->dev_name;
- od = osduld_info_lookup(&odi);
- kfree(opts->dev_name);
- opts->dev_name = NULL;
- } else {
- od = osduld_path_lookup(opts->dev_name);
- }
- if (IS_ERR(od)) {
- ret = -EINVAL;
- goto free_sbi;
- }
-
- /* Default layout in case we do not have a device-table */
- sbi->layout.stripe_unit = PAGE_SIZE;
- sbi->layout.mirrors_p1 = 1;
- sbi->layout.group_width = 1;
- sbi->layout.group_depth = -1;
- sbi->layout.group_count = 1;
- sbi->s_timeout = opts->timeout;
-
- sbi->one_comp.obj.partition = opts->pid;
- sbi->one_comp.obj.id = 0;
- exofs_make_credential(sbi->one_comp.cred, &sbi->one_comp.obj);
- sbi->oc.single_comp = EC_SINGLE_COMP;
- sbi->oc.comps = &sbi->one_comp;
-
- /* fill in some other data by hand */
- memset(sb->s_id, 0, sizeof(sb->s_id));
- strcpy(sb->s_id, "exofs");
- sb->s_blocksize = EXOFS_BLKSIZE;
- sb->s_blocksize_bits = EXOFS_BLKSHIFT;
- sb->s_maxbytes = MAX_LFS_FILESIZE;
- sb->s_max_links = EXOFS_LINK_MAX;
- atomic_set(&sbi->s_curr_pending, 0);
- sb->s_bdev = NULL;
- sb->s_dev = 0;
-
- comp.obj.partition = sbi->one_comp.obj.partition;
- comp.obj.id = EXOFS_SUPER_ID;
- exofs_make_credential(comp.cred, &comp.obj);
-
- ret = exofs_read_kern(od, comp.cred, &comp.obj, 0, &fscb, sizeof(fscb));
- if (unlikely(ret))
- goto free_sbi;
-
- sb->s_magic = le16_to_cpu(fscb.s_magic);
- /* NOTE: we read below to be backward compatible with old versions */
- sbi->s_nextid = le64_to_cpu(fscb.s_nextid);
- sbi->s_numfiles = le32_to_cpu(fscb.s_numfiles);
-
- /* make sure what we read from the object store is correct */
- if (sb->s_magic != EXOFS_SUPER_MAGIC) {
- if (!silent)
- EXOFS_ERR("ERROR: Bad magic value\n");
- ret = -EINVAL;
- goto free_sbi;
- }
- if (le32_to_cpu(fscb.s_version) > EXOFS_FSCB_VER) {
- EXOFS_ERR("ERROR: Bad FSCB version expected-%d got-%d\n",
- EXOFS_FSCB_VER, le32_to_cpu(fscb.s_version));
- ret = -EINVAL;
- goto free_sbi;
- }
-
- /* start generation numbers from a random point */
- get_random_bytes(&sbi->s_next_generation, sizeof(u32));
- spin_lock_init(&sbi->s_next_gen_lock);
-
- table_count = le64_to_cpu(fscb.s_dev_table_count);
- if (table_count) {
- ret = exofs_read_lookup_dev_table(sbi, od, table_count);
- if (unlikely(ret))
- goto free_sbi;
- } else {
- struct exofs_dev *eds;
-
- ret = __alloc_dev_table(sbi, 1, &eds);
- if (unlikely(ret))
- goto free_sbi;
-
- ore_comp_set_dev(&sbi->oc, 0, od);
- sbi->oc.numdevs = 1;
- }
-
- __sbi_read_stats(sbi);
-
- /* set up operation vectors */
- ret = super_setup_bdi(sb);
- if (ret) {
- EXOFS_DBGMSG("Failed to super_setup_bdi\n");
- goto free_sbi;
- }
- sb->s_bdi->ra_pages = __ra_pages(&sbi->layout);
- sb->s_fs_info = sbi;
- sb->s_op = &exofs_sops;
- sb->s_export_op = &exofs_export_ops;
- root = exofs_iget(sb, EXOFS_ROOT_ID - EXOFS_OBJ_OFF);
- if (IS_ERR(root)) {
- EXOFS_ERR("ERROR: exofs_iget failed\n");
- ret = PTR_ERR(root);
- goto free_sbi;
- }
- sb->s_root = d_make_root(root);
- if (!sb->s_root) {
- EXOFS_ERR("ERROR: get root inode failed\n");
- ret = -ENOMEM;
- goto free_sbi;
- }
-
- if (!S_ISDIR(root->i_mode)) {
- dput(sb->s_root);
- sb->s_root = NULL;
- EXOFS_ERR("ERROR: corrupt root inode (mode = %hd)\n",
- root->i_mode);
- ret = -EINVAL;
- goto free_sbi;
- }
-
- exofs_sysfs_dbg_print();
- _exofs_print_device("Mounting", opts->dev_name,
- ore_comp_dev(&sbi->oc, 0),
- sbi->one_comp.obj.partition);
- return 0;
-
-free_sbi:
- EXOFS_ERR("Unable to mount exofs on %s pid=0x%llx err=%d\n",
- opts->dev_name, sbi->one_comp.obj.partition, ret);
- exofs_free_sbi(sbi);
- return ret;
-}
-
-/*
- * Set up the superblock (calls exofs_fill_super eventually)
- */
-static struct dentry *exofs_mount(struct file_system_type *type,
- int flags, const char *dev_name,
- void *data)
-{
- struct super_block *s;
- struct exofs_mountopt opts;
- struct exofs_sb_info *sbi;
- int ret;
-
- ret = parse_options(data, &opts);
- if (ret) {
- kfree(opts.dev_name);
- return ERR_PTR(ret);
- }
-
- sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
- if (!sbi) {
- kfree(opts.dev_name);
- return ERR_PTR(-ENOMEM);
- }
-
- s = sget(type, NULL, set_anon_super, flags, NULL);
-
- if (IS_ERR(s)) {
- kfree(opts.dev_name);
- kfree(sbi);
- return ERR_CAST(s);
- }
-
- if (!opts.dev_name)
- opts.dev_name = dev_name;
-
-
- ret = exofs_fill_super(s, &opts, sbi, flags & SB_SILENT ? 1 : 0);
- if (ret) {
- deactivate_locked_super(s);
- return ERR_PTR(ret);
- }
- s->s_flags |= SB_ACTIVE;
- return dget(s->s_root);
-}
-
-/*
- * Return information about the file system state in the buffer. This is used
- * by the 'df' command, for example.
- */
-static int exofs_statfs(struct dentry *dentry, struct kstatfs *buf)
-{
- struct super_block *sb = dentry->d_sb;
- struct exofs_sb_info *sbi = sb->s_fs_info;
- struct ore_io_state *ios;
- struct osd_attr attrs[] = {
- ATTR_DEF(OSD_APAGE_PARTITION_QUOTAS,
- OSD_ATTR_PQ_CAPACITY_QUOTA, sizeof(__be64)),
- ATTR_DEF(OSD_APAGE_PARTITION_INFORMATION,
- OSD_ATTR_PI_USED_CAPACITY, sizeof(__be64)),
- };
- uint64_t capacity = ULLONG_MAX;
- uint64_t used = ULLONG_MAX;
- int ret;
-
- ret = ore_get_io_state(&sbi->layout, &sbi->oc, &ios);
- if (ret) {
- EXOFS_DBGMSG("ore_get_io_state failed.\n");
- return ret;
- }
-
- ios->in_attr = attrs;
- ios->in_attr_len = ARRAY_SIZE(attrs);
-
- ret = ore_read(ios);
- if (unlikely(ret))
- goto out;
-
- ret = extract_attr_from_ios(ios, &attrs[0]);
- if (likely(!ret)) {
- capacity = get_unaligned_be64(attrs[0].val_ptr);
- if (unlikely(!capacity))
- capacity = ULLONG_MAX;
- } else
- EXOFS_DBGMSG("exofs_statfs: get capacity failed.\n");
-
- ret = extract_attr_from_ios(ios, &attrs[1]);
- if (likely(!ret))
- used = get_unaligned_be64(attrs[1].val_ptr);
- else
- EXOFS_DBGMSG("exofs_statfs: get used-space failed.\n");
-
- /* fill in the stats buffer */
- buf->f_type = EXOFS_SUPER_MAGIC;
- buf->f_bsize = EXOFS_BLKSIZE;
- buf->f_blocks = capacity >> 9;
- buf->f_bfree = (capacity - used) >> 9;
- buf->f_bavail = buf->f_bfree;
- buf->f_files = sbi->s_numfiles;
- buf->f_ffree = EXOFS_MAX_ID - sbi->s_numfiles;
- buf->f_namelen = EXOFS_NAME_LEN;
-
-out:
- ore_put_io_state(ios);
- return ret;
-}
-
-static const struct super_operations exofs_sops = {
- .alloc_inode = exofs_alloc_inode,
- .destroy_inode = exofs_destroy_inode,
- .write_inode = exofs_write_inode,
- .evict_inode = exofs_evict_inode,
- .put_super = exofs_put_super,
- .sync_fs = exofs_sync_fs,
- .statfs = exofs_statfs,
-};
-
-/******************************************************************************
- * EXPORT OPERATIONS
- *****************************************************************************/
-
-static struct dentry *exofs_get_parent(struct dentry *child)
-{
- unsigned long ino = exofs_parent_ino(child);
-
- if (!ino)
- return ERR_PTR(-ESTALE);
-
- return d_obtain_alias(exofs_iget(child->d_sb, ino));
-}
-
-static struct inode *exofs_nfs_get_inode(struct super_block *sb,
- u64 ino, u32 generation)
-{
- struct inode *inode;
-
- inode = exofs_iget(sb, ino);
- if (IS_ERR(inode))
- return ERR_CAST(inode);
- if (generation && inode->i_generation != generation) {
- /* we didn't find the right inode.. */
- iput(inode);
- return ERR_PTR(-ESTALE);
- }
- return inode;
-}
-
-static struct dentry *exofs_fh_to_dentry(struct super_block *sb,
- struct fid *fid, int fh_len, int fh_type)
-{
- return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
- exofs_nfs_get_inode);
-}
-
-static struct dentry *exofs_fh_to_parent(struct super_block *sb,
- struct fid *fid, int fh_len, int fh_type)
-{
- return generic_fh_to_parent(sb, fid, fh_len, fh_type,
- exofs_nfs_get_inode);
-}
-
-static const struct export_operations exofs_export_ops = {
- .fh_to_dentry = exofs_fh_to_dentry,
- .fh_to_parent = exofs_fh_to_parent,
- .get_parent = exofs_get_parent,
-};
-
-/******************************************************************************
- * INSMOD/RMMOD
- *****************************************************************************/
-
-/*
- * struct that describes this file system
- */
-static struct file_system_type exofs_type = {
- .owner = THIS_MODULE,
- .name = "exofs",
- .mount = exofs_mount,
- .kill_sb = generic_shutdown_super,
-};
-MODULE_ALIAS_FS("exofs");
-
-static int __init init_exofs(void)
-{
- int err;
-
- err = init_inodecache();
- if (err)
- goto out;
-
- err = register_filesystem(&exofs_type);
- if (err)
- goto out_d;
-
- /* We don't fail if sysfs creation failed */
- exofs_sysfs_init();
-
- return 0;
-out_d:
- destroy_inodecache();
-out:
- return err;
-}
-
-static void __exit exit_exofs(void)
-{
- exofs_sysfs_uninit();
- unregister_filesystem(&exofs_type);
- destroy_inodecache();
-}
-
-MODULE_AUTHOR("Avishay Traeger <avishay@gmail.com>");
-MODULE_DESCRIPTION("exofs");
-MODULE_LICENSE("GPL");
-
-module_init(init_exofs)
-module_exit(exit_exofs)
diff --git a/fs/exofs/sys.c b/fs/exofs/sys.c
deleted file mode 100644
index 1f7d5e46cdda..000000000000
--- a/fs/exofs/sys.c
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Copyright (C) 2012
- * Sachin Bhamare <sbhamare@panasas.com>
- * Boaz Harrosh <ooo@electrozaur.com>
- *
- * This file is part of exofs.
- *
- * exofs is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License 2 as published by
- * the Free Software Foundation.
- *
- * exofs is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with exofs; if not, write to the:
- * Free Software Foundation <licensing@fsf.org>
- */
-
-#include <linux/kobject.h>
-#include <linux/device.h>
-
-#include "exofs.h"
-
-struct odev_attr {
- struct attribute attr;
- ssize_t (*show)(struct exofs_dev *, char *);
- ssize_t (*store)(struct exofs_dev *, const char *, size_t);
-};
-
-static ssize_t odev_attr_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct exofs_dev *edp = container_of(kobj, struct exofs_dev, ed_kobj);
- struct odev_attr *a = container_of(attr, struct odev_attr, attr);
-
- return a->show ? a->show(edp, buf) : 0;
-}
-
-static ssize_t odev_attr_store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t len)
-{
- struct exofs_dev *edp = container_of(kobj, struct exofs_dev, ed_kobj);
- struct odev_attr *a = container_of(attr, struct odev_attr, attr);
-
- return a->store ? a->store(edp, buf, len) : len;
-}
-
-static const struct sysfs_ops odev_attr_ops = {
- .show = odev_attr_show,
- .store = odev_attr_store,
-};
-
-
-static struct kset *exofs_kset;
-
-static ssize_t osdname_show(struct exofs_dev *edp, char *buf)
-{
- struct osd_dev *odev = edp->ored.od;
- const struct osd_dev_info *odi = osduld_device_info(odev);
-
- return snprintf(buf, odi->osdname_len + 1, "%s", odi->osdname);
-}
-
-static ssize_t systemid_show(struct exofs_dev *edp, char *buf)
-{
- struct osd_dev *odev = edp->ored.od;
- const struct osd_dev_info *odi = osduld_device_info(odev);
-
- memcpy(buf, odi->systemid, odi->systemid_len);
- return odi->systemid_len;
-}
-
-static ssize_t uri_show(struct exofs_dev *edp, char *buf)
-{
- return snprintf(buf, edp->urilen, "%s", edp->uri);
-}
-
-static ssize_t uri_store(struct exofs_dev *edp, const char *buf, size_t len)
-{
- uint8_t *new_uri;
-
- edp->urilen = strlen(buf) + 1;
- new_uri = krealloc(edp->uri, edp->urilen, GFP_KERNEL);
- if (new_uri == NULL)
- return -ENOMEM;
- edp->uri = new_uri;
- strncpy(edp->uri, buf, edp->urilen);
- return edp->urilen;
-}
-
-#define OSD_ATTR(name, mode, show, store) \
- static struct odev_attr odev_attr_##name = \
- __ATTR(name, mode, show, store)
-
-OSD_ATTR(osdname, S_IRUGO, osdname_show, NULL);
-OSD_ATTR(systemid, S_IRUGO, systemid_show, NULL);
-OSD_ATTR(uri, S_IRWXU, uri_show, uri_store);
-
-static struct attribute *odev_attrs[] = {
- &odev_attr_osdname.attr,
- &odev_attr_systemid.attr,
- &odev_attr_uri.attr,
- NULL,
-};
-
-static struct kobj_type odev_ktype = {
- .default_attrs = odev_attrs,
- .sysfs_ops = &odev_attr_ops,
-};
-
-static struct kobj_type uuid_ktype = {
-};
-
-void exofs_sysfs_dbg_print(void)
-{
-#ifdef CONFIG_EXOFS_DEBUG
- struct kobject *k_name, *k_tmp;
-
- list_for_each_entry_safe(k_name, k_tmp, &exofs_kset->list, entry) {
- printk(KERN_INFO "%s: name %s ref %d\n",
- __func__, kobject_name(k_name),
- (int)kref_read(&k_name->kref));
- }
-#endif
-}
-/*
- * This function removes all kobjects under exofs_kset
- * At the end of it, exofs_kset kobject will have a refcount
- * of 1 which gets decremented only on exofs module unload
- */
-void exofs_sysfs_sb_del(struct exofs_sb_info *sbi)
-{
- struct kobject *k_name, *k_tmp;
- struct kobject *s_kobj = &sbi->s_kobj;
-
- list_for_each_entry_safe(k_name, k_tmp, &exofs_kset->list, entry) {
- /* Remove all that are children of this SBI */
- if (k_name->parent == s_kobj)
- kobject_put(k_name);
- }
- kobject_put(s_kobj);
-}
-
-/*
- * This function creates sysfs entries to hold the current exofs cluster
- * instance (uniquely identified by osdname,pid tuple).
- * This function gets called once per exofs mount instance.
- */
-int exofs_sysfs_sb_add(struct exofs_sb_info *sbi,
- struct exofs_dt_device_info *dt_dev)
-{
- struct kobject *s_kobj;
- int retval = 0;
- uint64_t pid = sbi->one_comp.obj.partition;
-
- /* allocate new uuid dirent */
- s_kobj = &sbi->s_kobj;
- s_kobj->kset = exofs_kset;
- retval = kobject_init_and_add(s_kobj, &uuid_ktype,
- &exofs_kset->kobj, "%s_%llx", dt_dev->osdname, pid);
- if (retval) {
- EXOFS_ERR("ERROR: Failed to create sysfs entry for "
- "uuid-%s_%llx => %d\n", dt_dev->osdname, pid, retval);
- return -ENOMEM;
- }
- return 0;
-}
-
-int exofs_sysfs_odev_add(struct exofs_dev *edev, struct exofs_sb_info *sbi)
-{
- struct kobject *d_kobj;
- int retval = 0;
-
- /* create osd device group which contains following attributes
- * osdname, systemid & uri
- */
- d_kobj = &edev->ed_kobj;
- d_kobj->kset = exofs_kset;
- retval = kobject_init_and_add(d_kobj, &odev_ktype,
- &sbi->s_kobj, "dev%u", edev->did);
- if (retval) {
- EXOFS_ERR("ERROR: Failed to create sysfs entry for "
- "device dev%u\n", edev->did);
- return retval;
- }
- return 0;
-}
-
-int exofs_sysfs_init(void)
-{
- exofs_kset = kset_create_and_add("exofs", NULL, fs_kobj);
- if (!exofs_kset) {
- EXOFS_ERR("ERROR: kset_create_and_add exofs failed\n");
- return -ENOMEM;
- }
- return 0;
-}
-
-void exofs_sysfs_uninit(void)
-{
- kset_unregister(exofs_kset);
-}
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index a453cc87082b..06f77ca7f36e 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -96,23 +96,8 @@ config EXT4_FS_SECURITY
If you are not using a security module that requires using
extended attributes for file security labels, say N.
-config EXT4_ENCRYPTION
- bool "Ext4 Encryption"
- depends on EXT4_FS
- select FS_ENCRYPTION
- help
- Enable encryption of ext4 files and directories. This
- feature is similar to ecryptfs, but it is more memory
- efficient since it avoids caching the encrypted and
- decrypted pages in the page cache.
-
-config EXT4_FS_ENCRYPTION
- bool
- default y
- depends on EXT4_ENCRYPTION
-
config EXT4_DEBUG
- bool "EXT4 debugging support"
+ bool "Ext4 debugging support"
depends on EXT4_FS
help
Enables run-time debugging support for the ext4 filesystem.
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index f93f9881ec18..0ccd51f72048 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -111,7 +111,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
int dir_has_error = 0;
struct fscrypt_str fstr = FSTR_INIT(NULL, 0);
- if (ext4_encrypted_inode(inode)) {
+ if (IS_ENCRYPTED(inode)) {
err = fscrypt_get_encryption_info(inode);
if (err && err != -ENOKEY)
return err;
@@ -138,7 +138,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
return err;
}
- if (ext4_encrypted_inode(inode)) {
+ if (IS_ENCRYPTED(inode)) {
err = fscrypt_fname_alloc_buffer(inode, EXT4_NAME_LEN, &fstr);
if (err < 0)
return err;
@@ -245,7 +245,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
offset += ext4_rec_len_from_disk(de->rec_len,
sb->s_blocksize);
if (le32_to_cpu(de->inode)) {
- if (!ext4_encrypted_inode(inode)) {
+ if (!IS_ENCRYPTED(inode)) {
if (!dir_emit(ctx, de->name,
de->name_len,
le32_to_cpu(de->inode),
@@ -283,9 +283,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
done:
err = 0;
errout:
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
fscrypt_fname_free_buffer(&fstr);
-#endif
brelse(bh);
return err;
}
@@ -613,7 +611,7 @@ finished:
static int ext4_dir_open(struct inode * inode, struct file * filp)
{
- if (ext4_encrypted_inode(inode))
+ if (IS_ENCRYPTED(inode))
return fscrypt_get_encryption_info(inode) ? -EACCES : 0;
return 0;
}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 185a05d3257e..0833b5fc0668 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -40,7 +40,6 @@
#include <linux/compat.h>
#endif
-#define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_EXT4_FS_ENCRYPTION)
#include <linux/fscrypt.h>
#include <linux/compiler.h>
@@ -426,6 +425,9 @@ struct flex_groups {
/* Flags that are appropriate for non-directories/regular files. */
#define EXT4_OTHER_FLMASK (EXT4_NODUMP_FL | EXT4_NOATIME_FL)
+/* The only flags that should be swapped */
+#define EXT4_FL_SHOULD_SWAP (EXT4_HUGE_FILE_FL | EXT4_EXTENTS_FL)
+
/* Mask out flags that are inappropriate for the given type of inode. */
static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags)
{
@@ -1326,7 +1328,7 @@ struct ext4_super_block {
#define EXT4_MF_FS_ABORTED 0x0002 /* Fatal error detected */
#define EXT4_MF_TEST_DUMMY_ENCRYPTION 0x0004
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
#define DUMMY_ENCRYPTION_ENABLED(sbi) (unlikely((sbi)->s_mount_flags & \
EXT4_MF_TEST_DUMMY_ENCRYPTION))
#else
@@ -1662,6 +1664,8 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
#define EXT4_FEATURE_INCOMPAT_INLINE_DATA 0x8000 /* data in inode */
#define EXT4_FEATURE_INCOMPAT_ENCRYPT 0x10000
+extern void ext4_update_dynamic_rev(struct super_block *sb);
+
#define EXT4_FEATURE_COMPAT_FUNCS(name, flagname) \
static inline bool ext4_has_feature_##name(struct super_block *sb) \
{ \
@@ -1670,6 +1674,7 @@ static inline bool ext4_has_feature_##name(struct super_block *sb) \
} \
static inline void ext4_set_feature_##name(struct super_block *sb) \
{ \
+ ext4_update_dynamic_rev(sb); \
EXT4_SB(sb)->s_es->s_feature_compat |= \
cpu_to_le32(EXT4_FEATURE_COMPAT_##flagname); \
} \
@@ -1687,6 +1692,7 @@ static inline bool ext4_has_feature_##name(struct super_block *sb) \
} \
static inline void ext4_set_feature_##name(struct super_block *sb) \
{ \
+ ext4_update_dynamic_rev(sb); \
EXT4_SB(sb)->s_es->s_feature_ro_compat |= \
cpu_to_le32(EXT4_FEATURE_RO_COMPAT_##flagname); \
} \
@@ -1704,6 +1710,7 @@ static inline bool ext4_has_feature_##name(struct super_block *sb) \
} \
static inline void ext4_set_feature_##name(struct super_block *sb) \
{ \
+ ext4_update_dynamic_rev(sb); \
EXT4_SB(sb)->s_es->s_feature_incompat |= \
cpu_to_le32(EXT4_FEATURE_INCOMPAT_##flagname); \
} \
@@ -2017,7 +2024,6 @@ static inline u32 ext4_chksum(struct ext4_sb_info *sbi, u32 crc,
BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver)!=sizeof(desc.ctx));
desc.shash.tfm = sbi->s_chksum_driver;
- desc.shash.flags = 0;
*(u32 *)desc.ctx = crc;
BUG_ON(crypto_shash_update(&desc.shash, address, length));
@@ -2051,7 +2057,7 @@ struct ext4_filename {
const struct qstr *usr_fname;
struct fscrypt_str disk_name;
struct dx_hash_info hinfo;
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
struct fscrypt_str crypto_buf;
#endif
};
@@ -2279,12 +2285,7 @@ extern unsigned ext4_free_clusters_after_init(struct super_block *sb,
struct ext4_group_desc *gdp);
ext4_fsblk_t ext4_inode_to_goal_block(struct inode *);
-static inline bool ext4_encrypted_inode(struct inode *inode)
-{
- return ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT);
-}
-
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
static inline int ext4_fname_setup_filename(struct inode *dir,
const struct qstr *iname,
int lookup, struct ext4_filename *fname)
@@ -2672,7 +2673,6 @@ do { \
#endif
-extern void ext4_update_dynamic_rev(struct super_block *sb);
extern int ext4_update_compat_feature(handle_t *handle, struct super_block *sb,
__u32 compat);
extern int ext4_update_rocompat_feature(handle_t *handle,
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index 15b6dd733780..75a5309f2231 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -384,7 +384,7 @@ static inline void ext4_update_inode_fsync_trans(handle_t *handle,
{
struct ext4_inode_info *ei = EXT4_I(inode);
- if (ext4_handle_valid(handle)) {
+ if (ext4_handle_valid(handle) && !is_handle_aborted(handle)) {
ei->i_sync_tid = handle->h_transaction->t_tid;
if (datasync)
ei->i_datasync_tid = handle->h_transaction->t_tid;
@@ -411,7 +411,7 @@ static inline int ext4_inode_journal_mode(struct inode *inode)
(ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA) &&
!test_opt(inode->i_sb, DELALLOC))) {
/* We do not support data journalling for encrypted data */
- if (S_ISREG(inode->i_mode) && ext4_encrypted_inode(inode))
+ if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode))
return EXT4_INODE_ORDERED_DATA_MODE; /* ordered */
return EXT4_INODE_JOURNAL_DATA_MODE; /* journal data */
}
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 240b6dea5441..0f89f5190cd7 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -2956,14 +2956,17 @@ again:
if (err < 0)
goto out;
- } else if (sbi->s_cluster_ratio > 1 && end >= ex_end) {
+ } else if (sbi->s_cluster_ratio > 1 && end >= ex_end &&
+ partial.state == initial) {
/*
- * If there's an extent to the right its first cluster
- * contains the immediate right boundary of the
- * truncated/punched region. Set partial_cluster to
- * its negative value so it won't be freed if shared
- * with the current extent. The end < ee_block case
- * is handled in ext4_ext_rm_leaf().
+ * If we're punching, there's an extent to the right.
+ * If the partial cluster hasn't been set, set it to
+ * that extent's first cluster and its state to nofree
+ * so it won't be freed should it contain blocks to be
+ * removed. If it's already set (tofree/nofree), we're
+ * retrying and keep the original partial cluster info
+ * so a cluster marked tofree as a result of earlier
+ * extent removal is not lost.
*/
lblk = ex_end + 1;
err = ext4_ext_search_right(inode, path, &lblk, &pblk,
@@ -3631,7 +3634,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
max_zeroout = sbi->s_extent_max_zeroout_kb >>
(inode->i_sb->s_blocksize_bits - 10);
- if (ext4_encrypted_inode(inode))
+ if (IS_ENCRYPTED(inode))
max_zeroout = 0;
/*
@@ -4048,18 +4051,8 @@ out:
} else
allocated = ret;
map->m_flags |= EXT4_MAP_NEW;
- /*
- * if we allocated more blocks than requested
- * we need to make sure we unmap the extra block
- * allocated. The actual needed block will get
- * unmapped later when we find the buffer_head marked
- * new.
- */
- if (allocated > map->m_len) {
- clean_bdev_aliases(inode->i_sb->s_bdev, newblock + map->m_len,
- allocated - map->m_len);
+ if (allocated > map->m_len)
allocated = map->m_len;
- }
map->m_len = allocated;
map_out:
@@ -4818,7 +4811,7 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
* leave it disabled for encrypted inodes for now. This is a
* bug we should fix....
*/
- if (ext4_encrypted_inode(inode) &&
+ if (IS_ENCRYPTED(inode) &&
(mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE |
FALLOC_FL_ZERO_RANGE)))
return -EOPNOTSUPP;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 69d65d49837b..98ec11f69cd4 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -125,7 +125,7 @@ ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
struct super_block *sb = inode->i_sb;
int blockmask = sb->s_blocksize - 1;
- if (pos >= i_size_read(inode))
+ if (pos >= ALIGN(i_size_read(inode), sb->s_blocksize))
return 0;
if ((pos | iov_iter_alignment(from)) & blockmask)
diff --git a/fs/ext4/hash.c b/fs/ext4/hash.c
index e22dcfab308b..46b24da33a28 100644
--- a/fs/ext4/hash.c
+++ b/fs/ext4/hash.c
@@ -231,6 +231,7 @@ int ext4fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo)
break;
case DX_HASH_HALF_MD4_UNSIGNED:
str2hashbuf = str2hashbuf_unsigned;
+ /* fall through */
case DX_HASH_HALF_MD4:
p = name;
while (len > 0) {
@@ -244,6 +245,7 @@ int ext4fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo)
break;
case DX_HASH_TEA_UNSIGNED:
str2hashbuf = str2hashbuf_unsigned;
+ /* fall through */
case DX_HASH_TEA:
p = name;
while (len > 0) {
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 7ff14a1adba3..f3e17a8c84b4 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -771,7 +771,7 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
if (unlikely(ext4_forced_shutdown(sbi)))
return ERR_PTR(-EIO);
- if ((ext4_encrypted_inode(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) &&
+ if ((IS_ENCRYPTED(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) &&
(S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) &&
!(i_flags & EXT4_EA_INODE_FL)) {
err = fscrypt_get_encryption_info(dir);
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index bf7fa1507e81..2024d3fa5504 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -1183,18 +1183,21 @@ do_indirects:
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
i_data[EXT4_IND_BLOCK] = 0;
}
+ /* fall through */
case EXT4_IND_BLOCK:
nr = i_data[EXT4_DIND_BLOCK];
if (nr) {
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
i_data[EXT4_DIND_BLOCK] = 0;
}
+ /* fall through */
case EXT4_DIND_BLOCK:
nr = i_data[EXT4_TIND_BLOCK];
if (nr) {
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
i_data[EXT4_TIND_BLOCK] = 0;
}
+ /* fall through */
case EXT4_TIND_BLOCK:
;
}
@@ -1219,6 +1222,7 @@ int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
ext4_lblk_t offsets[4], offsets2[4];
Indirect chain[4], chain2[4];
Indirect *partial, *partial2;
+ Indirect *p = NULL, *p2 = NULL;
ext4_lblk_t max_block;
__le32 nr = 0, nr2 = 0;
int n = 0, n2 = 0;
@@ -1260,7 +1264,7 @@ int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
}
- partial = ext4_find_shared(inode, n, offsets, chain, &nr);
+ partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
if (nr) {
if (partial == chain) {
/* Shared branch grows from the inode */
@@ -1285,13 +1289,11 @@ int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
partial->p + 1,
(__le32 *)partial->bh->b_data+addr_per_block,
(chain+n-1) - partial);
- BUFFER_TRACE(partial->bh, "call brelse");
- brelse(partial->bh);
partial--;
}
end_range:
- partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
+ partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
if (nr2) {
if (partial2 == chain2) {
/*
@@ -1321,16 +1323,14 @@ end_range:
(__le32 *)partial2->bh->b_data,
partial2->p,
(chain2+n2-1) - partial2);
- BUFFER_TRACE(partial2->bh, "call brelse");
- brelse(partial2->bh);
partial2--;
}
goto do_indirects;
}
/* Punch happened within the same level (n == n2) */
- partial = ext4_find_shared(inode, n, offsets, chain, &nr);
- partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
+ partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
+ partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
/* Free top, but only if partial2 isn't its subtree. */
if (nr) {
@@ -1387,11 +1387,7 @@ end_range:
partial->p + 1,
partial2->p,
(chain+n-1) - partial);
- BUFFER_TRACE(partial->bh, "call brelse");
- brelse(partial->bh);
- BUFFER_TRACE(partial2->bh, "call brelse");
- brelse(partial2->bh);
- return 0;
+ goto cleanup;
}
/*
@@ -1406,8 +1402,6 @@ end_range:
partial->p + 1,
(__le32 *)partial->bh->b_data+addr_per_block,
(chain+n-1) - partial);
- BUFFER_TRACE(partial->bh, "call brelse");
- brelse(partial->bh);
partial--;
}
if (partial2 > chain2 && depth2 <= depth) {
@@ -1415,11 +1409,21 @@ end_range:
(__le32 *)partial2->bh->b_data,
partial2->p,
(chain2+n2-1) - partial2);
- BUFFER_TRACE(partial2->bh, "call brelse");
- brelse(partial2->bh);
partial2--;
}
}
+
+cleanup:
+ while (p && p > chain) {
+ BUFFER_TRACE(p->bh, "call brelse");
+ brelse(p->bh);
+ p--;
+ }
+ while (p2 && p2 > chain2) {
+ BUFFER_TRACE(p2->bh, "call brelse");
+ brelse(p2->bh);
+ p2--;
+ }
return 0;
do_indirects:
@@ -1427,30 +1431,33 @@ do_indirects:
switch (offsets[0]) {
default:
if (++n >= n2)
- return 0;
+ break;
nr = i_data[EXT4_IND_BLOCK];
if (nr) {
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
i_data[EXT4_IND_BLOCK] = 0;
}
+ /* fall through */
case EXT4_IND_BLOCK:
if (++n >= n2)
- return 0;
+ break;
nr = i_data[EXT4_DIND_BLOCK];
if (nr) {
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
i_data[EXT4_DIND_BLOCK] = 0;
}
+ /* fall through */
case EXT4_DIND_BLOCK:
if (++n >= n2)
- return 0;
+ break;
nr = i_data[EXT4_TIND_BLOCK];
if (nr) {
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
i_data[EXT4_TIND_BLOCK] = 0;
}
+ /* fall through */
case EXT4_TIND_BLOCK:
;
}
- return 0;
+ goto cleanup;
}
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 34d7e0703cc6..b32a57bc5d5d 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -391,7 +391,7 @@ void ext4_da_update_reserve_space(struct inode *inode,
* inode's preallocations.
*/
if ((ei->i_reserved_data_blocks == 0) &&
- (atomic_read(&inode->i_writecount) == 0))
+ !inode_is_open_for_write(inode))
ext4_discard_preallocations(inode);
}
@@ -415,7 +415,7 @@ int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
{
int ret;
- if (ext4_encrypted_inode(inode))
+ if (IS_ENCRYPTED(inode))
return fscrypt_zeroout_range(inode, lblk, pblk, len);
ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
@@ -678,8 +678,6 @@ found:
if (flags & EXT4_GET_BLOCKS_ZERO &&
map->m_flags & EXT4_MAP_MAPPED &&
map->m_flags & EXT4_MAP_NEW) {
- clean_bdev_aliases(inode->i_sb->s_bdev, map->m_pblk,
- map->m_len);
ret = ext4_issue_zeroout(inode, map->m_lblk,
map->m_pblk, map->m_len);
if (ret) {
@@ -1150,7 +1148,7 @@ int do_journal_get_write_access(handle_t *handle,
return ret;
}
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
get_block_t *get_block)
{
@@ -1194,7 +1192,6 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
if (err)
break;
if (buffer_new(bh)) {
- clean_bdev_bh_alias(bh);
if (PageUptodate(page)) {
clear_buffer_new(bh);
set_buffer_uptodate(bh);
@@ -1217,8 +1214,7 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
(block_start < from || block_end > to)) {
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
*wait_bh++ = bh;
- decrypt = ext4_encrypted_inode(inode) &&
- S_ISREG(inode->i_mode);
+ decrypt = IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
}
}
/*
@@ -1303,7 +1299,7 @@ retry_journal:
/* In case writeback began while the page was unlocked */
wait_for_stable_page(page);
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
if (ext4_should_dioread_nolock(inode))
ret = ext4_block_write_begin(page, pos, len,
ext4_get_block_unwritten);
@@ -2490,10 +2486,6 @@ static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
}
BUG_ON(map->m_len == 0);
- if (map->m_flags & EXT4_MAP_NEW) {
- clean_bdev_aliases(inode->i_sb->s_bdev, map->m_pblk,
- map->m_len);
- }
return 0;
}
@@ -2836,12 +2828,12 @@ retry:
goto unplug;
}
ret = mpage_prepare_extent_to_map(&mpd);
+ /* Unlock pages we didn't use */
+ mpage_release_unused_pages(&mpd, false);
/* Submit prepared bio */
ext4_io_submit(&mpd.io_submit);
ext4_put_io_end_defer(mpd.io_submit.io_end);
mpd.io_submit.io_end = NULL;
- /* Unlock pages we didn't use */
- mpage_release_unused_pages(&mpd, false);
if (ret < 0)
goto unplug;
@@ -2909,10 +2901,11 @@ retry:
handle = NULL;
mpd.do_map = 0;
}
- /* Submit prepared bio */
- ext4_io_submit(&mpd.io_submit);
/* Unlock pages we didn't use */
mpage_release_unused_pages(&mpd, give_up_on_write);
+ /* Submit prepared bio */
+ ext4_io_submit(&mpd.io_submit);
+
/*
* Drop our io_end reference we got from init. We have
* to be careful and use deferred io_end finishing if
@@ -3105,7 +3098,7 @@ retry_journal:
/* In case writeback began while the page was unlocked */
wait_for_stable_page(page);
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
ret = ext4_block_write_begin(page, pos, len,
ext4_da_get_block_prep);
#else
@@ -3880,8 +3873,8 @@ static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
loff_t offset = iocb->ki_pos;
ssize_t ret;
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
- if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode))
+#ifdef CONFIG_FS_ENCRYPTION
+ if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
return 0;
#endif
@@ -4065,8 +4058,7 @@ static int __ext4_block_zero_page_range(handle_t *handle,
/* Uhhuh. Read error. Complain and punt. */
if (!buffer_uptodate(bh))
goto unlock;
- if (S_ISREG(inode->i_mode) &&
- ext4_encrypted_inode(inode)) {
+ if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode)) {
/* We expect the key to be set. */
BUG_ON(!fscrypt_has_encryption_key(inode));
BUG_ON(blocksize != PAGE_SIZE);
@@ -4142,7 +4134,7 @@ static int ext4_block_truncate_page(handle_t *handle,
struct inode *inode = mapping->host;
/* If we are processing an encrypted inode during orphan list handling */
- if (ext4_encrypted_inode(inode) && !fscrypt_has_encryption_key(inode))
+ if (IS_ENCRYPTED(inode) && !fscrypt_has_encryption_key(inode))
return 0;
blocksize = inode->i_sb->s_blocksize;
@@ -4722,7 +4714,7 @@ static bool ext4_should_use_dax(struct inode *inode)
return false;
if (ext4_has_inline_data(inode))
return false;
- if (ext4_encrypted_inode(inode))
+ if (ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT))
return false;
return true;
}
@@ -5072,7 +5064,7 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
ret = -EFSCORRUPTED;
goto bad_inode;
}
- if (ext4_encrypted_inode(inode)) {
+ if (IS_ENCRYPTED(inode)) {
inode->i_op = &ext4_encrypted_symlink_inode_operations;
ext4_set_aops(inode);
} else if (ext4_inode_is_fast_symlink(inode)) {
@@ -5351,7 +5343,6 @@ static int ext4_do_update_inode(handle_t *handle,
err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
if (err)
goto out_brelse;
- ext4_update_dynamic_rev(sb);
ext4_set_feature_large_file(sb);
ext4_handle_sync(handle);
err = ext4_handle_dirty_super(handle, sb);
@@ -6002,7 +5993,7 @@ int ext4_expand_extra_isize(struct inode *inode,
ext4_write_lock_xattr(inode, &no_expand);
- BUFFER_TRACE(iloc.bh, "get_write_access");
+ BUFFER_TRACE(iloc->bh, "get_write_access");
error = ext4_journal_get_write_access(handle, iloc->bh);
if (error) {
brelse(iloc->bh);
@@ -6089,36 +6080,6 @@ out:
return;
}
-#if 0
-/*
- * Bind an inode's backing buffer_head into this transaction, to prevent
- * it from being flushed to disk early. Unlike
- * ext4_reserve_inode_write, this leaves behind no bh reference and
- * returns no iloc structure, so the caller needs to repeat the iloc
- * lookup to mark the inode dirty later.
- */
-static int ext4_pin_inode(handle_t *handle, struct inode *inode)
-{
- struct ext4_iloc iloc;
-
- int err = 0;
- if (handle) {
- err = ext4_get_inode_loc(inode, &iloc);
- if (!err) {
- BUFFER_TRACE(iloc.bh, "get_write_access");
- err = jbd2_journal_get_write_access(handle, iloc.bh);
- if (!err)
- err = ext4_handle_dirty_metadata(handle,
- NULL,
- iloc.bh);
- brelse(iloc.bh);
- }
- }
- ext4_std_error(inode->i_sb, err);
- return err;
-}
-#endif
-
int ext4_change_inode_journal_flag(struct inode *inode, int val)
{
journal_t *journal;
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index d37dafa1d133..bab3da4f1e0d 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -63,18 +63,20 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2)
loff_t isize;
struct ext4_inode_info *ei1;
struct ext4_inode_info *ei2;
+ unsigned long tmp;
ei1 = EXT4_I(inode1);
ei2 = EXT4_I(inode2);
swap(inode1->i_version, inode2->i_version);
- swap(inode1->i_blocks, inode2->i_blocks);
- swap(inode1->i_bytes, inode2->i_bytes);
swap(inode1->i_atime, inode2->i_atime);
swap(inode1->i_mtime, inode2->i_mtime);
memswap(ei1->i_data, ei2->i_data, sizeof(ei1->i_data));
- swap(ei1->i_flags, ei2->i_flags);
+ tmp = ei1->i_flags & EXT4_FL_SHOULD_SWAP;
+ ei1->i_flags = (ei2->i_flags & EXT4_FL_SHOULD_SWAP) |
+ (ei1->i_flags & ~EXT4_FL_SHOULD_SWAP);
+ ei2->i_flags = tmp | (ei2->i_flags & ~EXT4_FL_SHOULD_SWAP);
swap(ei1->i_disksize, ei2->i_disksize);
ext4_es_remove_extent(inode1, 0, EXT_MAX_BLOCKS);
ext4_es_remove_extent(inode2, 0, EXT_MAX_BLOCKS);
@@ -115,28 +117,42 @@ static long swap_inode_boot_loader(struct super_block *sb,
int err;
struct inode *inode_bl;
struct ext4_inode_info *ei_bl;
-
- if (inode->i_nlink != 1 || !S_ISREG(inode->i_mode) ||
- IS_SWAPFILE(inode) || IS_ENCRYPTED(inode) ||
- ext4_has_inline_data(inode))
- return -EINVAL;
-
- if (IS_RDONLY(inode) || IS_APPEND(inode) || IS_IMMUTABLE(inode) ||
- !inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN))
- return -EPERM;
+ qsize_t size, size_bl, diff;
+ blkcnt_t blocks;
+ unsigned short bytes;
inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO, EXT4_IGET_SPECIAL);
if (IS_ERR(inode_bl))
return PTR_ERR(inode_bl);
ei_bl = EXT4_I(inode_bl);
- filemap_flush(inode->i_mapping);
- filemap_flush(inode_bl->i_mapping);
-
/* Protect orig inodes against a truncate and make sure,
* that only 1 swap_inode_boot_loader is running. */
lock_two_nondirectories(inode, inode_bl);
+ if (inode->i_nlink != 1 || !S_ISREG(inode->i_mode) ||
+ IS_SWAPFILE(inode) || IS_ENCRYPTED(inode) ||
+ (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL) ||
+ ext4_has_inline_data(inode)) {
+ err = -EINVAL;
+ goto journal_err_out;
+ }
+
+ if (IS_RDONLY(inode) || IS_APPEND(inode) || IS_IMMUTABLE(inode) ||
+ !inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN)) {
+ err = -EPERM;
+ goto journal_err_out;
+ }
+
+ down_write(&EXT4_I(inode)->i_mmap_sem);
+ err = filemap_write_and_wait(inode->i_mapping);
+ if (err)
+ goto err_out;
+
+ err = filemap_write_and_wait(inode_bl->i_mapping);
+ if (err)
+ goto err_out;
+
/* Wait for all existing dio workers */
inode_dio_wait(inode);
inode_dio_wait(inode_bl);
@@ -147,7 +163,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
handle = ext4_journal_start(inode_bl, EXT4_HT_MOVE_EXTENTS, 2);
if (IS_ERR(handle)) {
err = -EINVAL;
- goto journal_err_out;
+ goto err_out;
}
/* Protect extent tree against block allocations via delalloc */
@@ -170,6 +186,13 @@ static long swap_inode_boot_loader(struct super_block *sb,
memset(ei_bl->i_data, 0, sizeof(ei_bl->i_data));
}
+ err = dquot_initialize(inode);
+ if (err)
+ goto err_out1;
+
+ size = (qsize_t)(inode->i_blocks) * (1 << 9) + inode->i_bytes;
+ size_bl = (qsize_t)(inode_bl->i_blocks) * (1 << 9) + inode_bl->i_bytes;
+ diff = size - size_bl;
swap_inode_data(inode, inode_bl);
inode->i_ctime = inode_bl->i_ctime = current_time(inode);
@@ -183,34 +206,58 @@ static long swap_inode_boot_loader(struct super_block *sb,
err = ext4_mark_inode_dirty(handle, inode);
if (err < 0) {
+ /* No need to update quota information. */
ext4_warning(inode->i_sb,
"couldn't mark inode #%lu dirty (err %d)",
inode->i_ino, err);
/* Revert all changes: */
swap_inode_data(inode, inode_bl);
ext4_mark_inode_dirty(handle, inode);
- } else {
- err = ext4_mark_inode_dirty(handle, inode_bl);
- if (err < 0) {
- ext4_warning(inode_bl->i_sb,
- "couldn't mark inode #%lu dirty (err %d)",
- inode_bl->i_ino, err);
- /* Revert all changes: */
- swap_inode_data(inode, inode_bl);
- ext4_mark_inode_dirty(handle, inode);
- ext4_mark_inode_dirty(handle, inode_bl);
- }
+ goto err_out1;
}
+
+ blocks = inode_bl->i_blocks;
+ bytes = inode_bl->i_bytes;
+ inode_bl->i_blocks = inode->i_blocks;
+ inode_bl->i_bytes = inode->i_bytes;
+ err = ext4_mark_inode_dirty(handle, inode_bl);
+ if (err < 0) {
+ /* No need to update quota information. */
+ ext4_warning(inode_bl->i_sb,
+ "couldn't mark inode #%lu dirty (err %d)",
+ inode_bl->i_ino, err);
+ goto revert;
+ }
+
+ /* Bootloader inode should not be counted into quota information. */
+ if (diff > 0)
+ dquot_free_space(inode, diff);
+ else
+ err = dquot_alloc_space(inode, -1 * diff);
+
+ if (err < 0) {
+revert:
+ /* Revert all changes: */
+ inode_bl->i_blocks = blocks;
+ inode_bl->i_bytes = bytes;
+ swap_inode_data(inode, inode_bl);
+ ext4_mark_inode_dirty(handle, inode);
+ ext4_mark_inode_dirty(handle, inode_bl);
+ }
+
+err_out1:
ext4_journal_stop(handle);
ext4_double_up_write_data_sem(inode, inode_bl);
+err_out:
+ up_write(&EXT4_I(inode)->i_mmap_sem);
journal_err_out:
unlock_two_nondirectories(inode, inode_bl);
iput(inode_bl);
return err;
}
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
static int uuid_is_zero(__u8 u[16])
{
int i;
@@ -953,6 +1000,13 @@ resizefs_out:
if (!blk_queue_discard(q))
return -EOPNOTSUPP;
+ /*
+ * We haven't replayed the journal, so we cannot use our
+ * block-bitmap-guided storage zapping commands.
+ */
+ if (test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb))
+ return -EROFS;
+
if (copy_from_user(&range, (struct fstrim_range __user *)arg,
sizeof(range)))
return -EFAULT;
@@ -978,7 +1032,7 @@ resizefs_out:
return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
case EXT4_IOC_GET_ENCRYPTION_PWSALT: {
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
int err, err2;
struct ext4_sb_info *sbi = EXT4_SB(sb);
handle_t *handle;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index e2248083cdca..6fb76d408093 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -4176,9 +4176,8 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
>> bsbits;
- if ((size == isize) &&
- !ext4_fs_is_busy(sbi) &&
- (atomic_read(&ac->ac_inode->i_writecount) == 0)) {
+ if ((size == isize) && !ext4_fs_is_busy(sbi) &&
+ !inode_is_open_for_write(ac->ac_inode)) {
ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
return;
}
@@ -4258,7 +4257,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
(unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
(unsigned) ar->lleft, (unsigned) ar->pleft,
(unsigned) ar->lright, (unsigned) ar->pright,
- atomic_read(&ar->inode->i_writecount) ? "" : "non-");
+ inode_is_open_for_write(ar->inode) ? "" : "non-");
return 0;
}
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 2f5be02fc6f6..1083a9f3f16a 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -592,8 +592,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
return -EOPNOTSUPP;
}
- if (ext4_encrypted_inode(orig_inode) ||
- ext4_encrypted_inode(donor_inode)) {
+ if (IS_ENCRYPTED(orig_inode) || IS_ENCRYPTED(donor_inode)) {
ext4_msg(orig_inode->i_sb, KERN_ERR,
"Online defrag not supported for encrypted files");
return -EOPNOTSUPP;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 2b928eb07fa2..980166a8122a 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -612,7 +612,7 @@ static struct stats dx_show_leaf(struct inode *dir,
{
if (show_names)
{
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
int len;
char *name;
struct fscrypt_str fname_crypto_str =
@@ -621,7 +621,7 @@ static struct stats dx_show_leaf(struct inode *dir,
name = de->name;
len = de->name_len;
- if (ext4_encrypted_inode(dir))
+ if (IS_ENCRYPTED(dir))
res = fscrypt_get_encryption_info(dir);
if (res) {
printk(KERN_WARNING "Error setting up"
@@ -984,9 +984,9 @@ static int htree_dirblock_to_tree(struct file *dir_file,
top = (struct ext4_dir_entry_2 *) ((char *) de +
dir->i_sb->s_blocksize -
EXT4_DIR_REC_LEN(0));
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
/* Check if the directory is encrypted */
- if (ext4_encrypted_inode(dir)) {
+ if (IS_ENCRYPTED(dir)) {
err = fscrypt_get_encryption_info(dir);
if (err < 0) {
brelse(bh);
@@ -1015,7 +1015,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
continue;
if (de->inode == 0)
continue;
- if (!ext4_encrypted_inode(dir)) {
+ if (!IS_ENCRYPTED(dir)) {
tmp_str.name = de->name;
tmp_str.len = de->name_len;
err = ext4_htree_store_dirent(dir_file,
@@ -1047,7 +1047,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
}
errout:
brelse(bh);
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
fscrypt_fname_free_buffer(&fname_crypto_str);
#endif
return count;
@@ -1267,7 +1267,7 @@ static inline bool ext4_match(const struct ext4_filename *fname,
f.usr_fname = fname->usr_fname;
f.disk_name = fname->disk_name;
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
f.crypto_buf = fname->crypto_buf;
#endif
return fscrypt_match_name(&f, de->name, de->name_len);
@@ -1498,7 +1498,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
ext4_lblk_t block;
int retval;
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
*res_dir = NULL;
#endif
frame = dx_probe(fname, dir, NULL, frames);
@@ -1578,7 +1578,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
ino);
return ERR_PTR(-EFSCORRUPTED);
}
- if (!IS_ERR(inode) && ext4_encrypted_inode(dir) &&
+ if (!IS_ERR(inode) && IS_ENCRYPTED(dir) &&
(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
!fscrypt_has_permitted_context(dir, inode)) {
ext4_warning(inode->i_sb,
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index cff4c4aa7a9c..3e9298e6a705 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -67,7 +67,7 @@ static void ext4_finish_bio(struct bio *bio)
bio_for_each_segment_all(bvec, bio, i, iter_all) {
struct page *page = bvec->bv_page;
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
struct page *data_page = NULL;
#endif
struct buffer_head *bh, *head;
@@ -79,7 +79,7 @@ static void ext4_finish_bio(struct bio *bio)
if (!page)
continue;
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
if (!page->mapping) {
/* The bounce data pages are unmapped. */
data_page = page;
@@ -112,7 +112,7 @@ static void ext4_finish_bio(struct bio *bio)
bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
local_irq_restore(flags);
if (!under_io) {
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
if (data_page)
fscrypt_restore_control_page(data_page);
#endif
@@ -468,18 +468,15 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
ext4_io_submit(io);
continue;
}
- if (buffer_new(bh)) {
+ if (buffer_new(bh))
clear_buffer_new(bh);
- clean_bdev_bh_alias(bh);
- }
set_buffer_async_write(bh);
nr_to_submit++;
} while ((bh = bh->b_this_page) != head);
bh = head = page_buffers(page);
- if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode) &&
- nr_to_submit) {
+ if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) && nr_to_submit) {
gfp_t gfp_flags = GFP_NOFS;
retry_encrypt:
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index e53639784892..3adadf461825 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -49,7 +49,7 @@
static inline bool ext4_bio_encrypted(struct bio *bio)
{
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
return unlikely(bio->bi_private != NULL);
#else
return false;
@@ -243,8 +243,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
if (bio == NULL) {
struct fscrypt_ctx *ctx = NULL;
- if (ext4_encrypted_inode(inode) &&
- S_ISREG(inode->i_mode)) {
+ if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) {
ctx = fscrypt_get_ctx(inode, GFP_NOFS);
if (IS_ERR(ctx))
goto set_error_page;
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 48421de803b7..e7ae26e36c9c 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -932,11 +932,18 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
memcpy(n_group_desc, o_group_desc,
EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
n_group_desc[gdb_num] = gdb_bh;
+
+ BUFFER_TRACE(gdb_bh, "get_write_access");
+ err = ext4_journal_get_write_access(handle, gdb_bh);
+ if (err) {
+ kvfree(n_group_desc);
+ brelse(gdb_bh);
+ return err;
+ }
+
EXT4_SB(sb)->s_group_desc = n_group_desc;
EXT4_SB(sb)->s_gdb_count++;
kvfree(o_group_desc);
- BUFFER_TRACE(gdb_bh, "get_write_access");
- err = ext4_journal_get_write_access(handle, gdb_bh);
return err;
}
@@ -1960,7 +1967,8 @@ retry:
le16_to_cpu(es->s_reserved_gdt_blocks);
n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
n_blocks_count = (ext4_fsblk_t)n_group *
- EXT4_BLOCKS_PER_GROUP(sb);
+ EXT4_BLOCKS_PER_GROUP(sb) +
+ le32_to_cpu(es->s_first_data_block);
n_group--; /* set to last group number */
}
@@ -2072,6 +2080,10 @@ out:
free_flex_gd(flex_gd);
if (resize_inode != NULL)
iput(resize_inode);
- ext4_msg(sb, KERN_INFO, "resized filesystem to %llu", n_blocks_count);
+ if (err)
+ ext4_warning(sb, "error (%d) occurred during "
+ "file system resize", err);
+ ext4_msg(sb, KERN_INFO, "resized filesystem to %llu",
+ ext4_blocks_count(es));
return err;
}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index fb12d3c17c1b..6ed4eb81e674 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -430,6 +430,12 @@ static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
spin_unlock(&sbi->s_md_lock);
}
+static bool system_going_down(void)
+{
+ return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
+ || system_state == SYSTEM_RESTART;
+}
+
/* Deal with the reporting of failure conditions on a filesystem such as
* inconsistencies detected or read IO failures.
*
@@ -460,7 +466,12 @@ static void ext4_handle_error(struct super_block *sb)
if (journal)
jbd2_journal_abort(journal, -EIO);
}
- if (test_opt(sb, ERRORS_RO)) {
+ /*
+ * We force ERRORS_RO behavior when system is rebooting. Otherwise we
+ * could panic during 'reboot -f' as the underlying device got already
+ * disabled.
+ */
+ if (test_opt(sb, ERRORS_RO) || system_going_down()) {
ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
/*
* Make sure updated value of ->s_mount_flags will be visible
@@ -468,8 +479,7 @@ static void ext4_handle_error(struct super_block *sb)
*/
smp_wmb();
sb->s_flags |= SB_RDONLY;
- }
- if (test_opt(sb, ERRORS_PANIC)) {
+ } else if (test_opt(sb, ERRORS_PANIC)) {
if (EXT4_SB(sb)->s_journal &&
!(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
return;
@@ -1232,7 +1242,7 @@ static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
return try_to_free_buffers(page);
}
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
static int ext4_get_context(struct inode *inode, void *ctx, size_t len)
{
return ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
@@ -1922,7 +1932,7 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
*journal_ioprio =
IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
} else if (token == Opt_test_dummy_encryption) {
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
sbi->s_mount_flags |= EXT4_MF_TEST_DUMMY_ENCRYPTION;
ext4_msg(sb, KERN_WARNING,
"Test dummy encryption mode enabled");
@@ -2249,7 +2259,6 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
le16_add_cpu(&es->s_mnt_count, 1);
ext4_update_tstamp(es, s_mtime);
- ext4_update_dynamic_rev(sb);
if (sbi->s_journal)
ext4_set_feature_journal_needs_recovery(sb);
@@ -4167,7 +4176,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sb->s_op = &ext4_sops;
sb->s_export_op = &ext4_export_ops;
sb->s_xattr = ext4_xattr_handlers;
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
sb->s_cop = &ext4_cryptops;
#endif
#ifdef CONFIG_QUOTA
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index 9212a026a1f1..616c075da062 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -30,6 +30,7 @@ typedef enum {
attr_feature,
attr_pointer_ui,
attr_pointer_atomic,
+ attr_journal_task,
} attr_id_t;
typedef enum {
@@ -125,6 +126,14 @@ static ssize_t trigger_test_error(struct ext4_sb_info *sbi,
return count;
}
+static ssize_t journal_task_show(struct ext4_sb_info *sbi, char *buf)
+{
+ if (!sbi->s_journal)
+ return snprintf(buf, PAGE_SIZE, "<none>\n");
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ task_pid_vnr(sbi->s_journal->j_task));
+}
+
#define EXT4_ATTR(_name,_mode,_id) \
static struct ext4_attr ext4_attr_##_name = { \
.attr = {.name = __stringify(_name), .mode = _mode }, \
@@ -188,6 +197,7 @@ EXT4_RW_ATTR_SBI_UI(msg_ratelimit_burst, s_msg_ratelimit_state.burst);
EXT4_RO_ATTR_ES_UI(errors_count, s_error_count);
EXT4_ATTR(first_error_time, 0444, first_error_time);
EXT4_ATTR(last_error_time, 0444, last_error_time);
+EXT4_ATTR(journal_task, 0444, journal_task);
static unsigned int old_bump_val = 128;
EXT4_ATTR_PTR(max_writeback_mb_bump, 0444, pointer_ui, &old_bump_val);
@@ -217,6 +227,7 @@ static struct attribute *ext4_attrs[] = {
ATTR_LIST(errors_count),
ATTR_LIST(first_error_time),
ATTR_LIST(last_error_time),
+ ATTR_LIST(journal_task),
NULL,
};
@@ -224,7 +235,7 @@ static struct attribute *ext4_attrs[] = {
EXT4_ATTR_FEATURE(lazy_itable_init);
EXT4_ATTR_FEATURE(batched_discard);
EXT4_ATTR_FEATURE(meta_bg_resize);
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
EXT4_ATTR_FEATURE(encryption);
#endif
EXT4_ATTR_FEATURE(metadata_csum_seed);
@@ -233,7 +244,7 @@ static struct attribute *ext4_feat_attrs[] = {
ATTR_LIST(lazy_itable_init),
ATTR_LIST(batched_discard),
ATTR_LIST(meta_bg_resize),
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
ATTR_LIST(encryption),
#endif
ATTR_LIST(metadata_csum_seed),
@@ -304,6 +315,8 @@ static ssize_t ext4_attr_show(struct kobject *kobj,
return print_tstamp(buf, sbi->s_es, s_first_error_time);
case attr_last_error_time:
return print_tstamp(buf, sbi->s_es, s_last_error_time);
+ case attr_journal_task:
+ return journal_task_show(sbi, buf);
}
return 0;
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 86ed9c686249..dc82e7757f67 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -829,6 +829,7 @@ int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
if (IS_ERR(bh)) {
ret = PTR_ERR(bh);
+ bh = NULL;
goto out;
}
@@ -2903,6 +2904,7 @@ int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
if (error == -EIO)
EXT4_ERROR_INODE(inode, "block %llu read error",
EXT4_I(inode)->i_file_acl);
+ bh = NULL;
goto cleanup;
}
error = ext4_xattr_check_block(inode, bh);
@@ -3059,6 +3061,7 @@ ext4_xattr_block_cache_find(struct inode *inode,
if (IS_ERR(bh)) {
if (PTR_ERR(bh) == -ENOMEM)
return NULL;
+ bh = NULL;
EXT4_ERROR_INODE(inode, "block %lu read error",
(unsigned long)ce->e_value);
} else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig
index 9a20ef42fadd..e57cc754d543 100644
--- a/fs/f2fs/Kconfig
+++ b/fs/f2fs/Kconfig
@@ -3,6 +3,7 @@ config F2FS_FS
depends on BLOCK
select CRYPTO
select CRYPTO_CRC32
+ select F2FS_FS_XATTR if FS_ENCRYPTION
help
F2FS is based on Log-structured File System (LFS), which supports
versatile "flash-friendly" features. The design has been focused on
@@ -70,17 +71,6 @@ config F2FS_CHECK_FS
If you want to improve the performance, say N.
-config F2FS_FS_ENCRYPTION
- bool "F2FS Encryption"
- depends on F2FS_FS
- depends on F2FS_FS_XATTR
- select FS_ENCRYPTION
- help
- Enable encryption of f2fs files and directories. This
- feature is similar to ecryptfs, but it is more memory
- efficient since it avoids caching the encrypted and
- decrypted pages in the page cache.
-
config F2FS_IO_TRACE
bool "F2FS IO tracer"
depends on F2FS_FS
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index f955cd3e0677..a98e1b02279e 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -306,8 +306,9 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
goto skip_write;
/* collect a number of dirty meta pages and write together */
- if (wbc->for_kupdate ||
- get_pages(sbi, F2FS_DIRTY_META) < nr_pages_to_skip(sbi, META))
+ if (wbc->sync_mode != WB_SYNC_ALL &&
+ get_pages(sbi, F2FS_DIRTY_META) <
+ nr_pages_to_skip(sbi, META))
goto skip_write;
/* if locked failed, cp will flush dirty pages instead */
@@ -405,7 +406,7 @@ static int f2fs_set_meta_page_dirty(struct page *page)
if (!PageDirty(page)) {
__set_page_dirty_nobuffers(page);
inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
- SetPagePrivate(page);
+ f2fs_set_page_private(page, 0);
f2fs_trace_pid(page);
return 1;
}
@@ -956,7 +957,7 @@ void f2fs_update_dirty_page(struct inode *inode, struct page *page)
inode_inc_dirty_pages(inode);
spin_unlock(&sbi->inode_lock[type]);
- SetPagePrivate(page);
+ f2fs_set_page_private(page, 0);
f2fs_trace_pid(page);
}
@@ -1259,10 +1260,17 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
else
__clear_ckpt_flags(ckpt, CP_DISABLED_FLAG);
+ if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK))
+ __set_ckpt_flags(ckpt, CP_DISABLED_QUICK_FLAG);
+ else
+ __clear_ckpt_flags(ckpt, CP_DISABLED_QUICK_FLAG);
+
if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH))
__set_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
- else
- __clear_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
+ /*
+ * TODO: we count on fsck.f2fs to clear this flag until we figure out
+ * missing cases which clear it incorrectly.
+ */
if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR))
__set_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index da060b77f64d..9727944139f2 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -301,9 +301,10 @@ static inline void __submit_bio(struct f2fs_sb_info *sbi,
for (; start < F2FS_IO_SIZE(sbi); start++) {
struct page *page =
mempool_alloc(sbi->write_io_dummy,
- GFP_NOIO | __GFP_ZERO | __GFP_NOFAIL);
+ GFP_NOIO | __GFP_NOFAIL);
f2fs_bug_on(sbi, !page);
+ zero_user_segment(page, 0, PAGE_SIZE);
SetPagePrivate(page);
set_page_private(page, (unsigned long)DUMMY_WRITTEN_PAGE);
lock_page(page);
@@ -1468,7 +1469,7 @@ next:
}
if (size) {
- if (f2fs_encrypted_inode(inode))
+ if (IS_ENCRYPTED(inode))
flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
ret = fiemap_fill_next_extent(fieinfo, logical,
@@ -1553,6 +1554,9 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
if (last_block > last_block_in_file)
last_block = last_block_in_file;
+ /* just zeroing out page which is beyond EOF */
+ if (block_in_file >= last_block)
+ goto zero_out;
/*
* Map blocks using the previous result first.
*/
@@ -1565,16 +1569,11 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
* Then do more f2fs_map_blocks() calls until we are
* done with this page.
*/
- map.m_flags = 0;
-
- if (block_in_file < last_block) {
- map.m_lblk = block_in_file;
- map.m_len = last_block - block_in_file;
+ map.m_lblk = block_in_file;
+ map.m_len = last_block - block_in_file;
- if (f2fs_map_blocks(inode, &map, 0,
- F2FS_GET_BLOCK_DEFAULT))
- goto set_error_page;
- }
+ if (f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT))
+ goto set_error_page;
got_it:
if ((map.m_flags & F2FS_MAP_MAPPED)) {
block_nr = map.m_pblk + block_in_file - map.m_lblk;
@@ -1589,6 +1588,7 @@ got_it:
DATA_GENERIC))
goto set_error_page;
} else {
+zero_out:
zero_user_segment(page, 0, PAGE_SIZE);
if (!PageUptodate(page))
SetPageUptodate(page);
@@ -1739,7 +1739,7 @@ static inline bool check_inplace_update_policy(struct inode *inode,
if (policy & (0x1 << F2FS_IPU_ASYNC) &&
fio && fio->op == REQ_OP_WRITE &&
!(fio->op_flags & REQ_SYNC) &&
- !f2fs_encrypted_inode(inode))
+ !IS_ENCRYPTED(inode))
return true;
/* this is only set during fdatasync */
@@ -1863,8 +1863,13 @@ got_it:
if (fio->need_lock == LOCK_REQ)
f2fs_unlock_op(fio->sbi);
err = f2fs_inplace_write_data(fio);
- if (err && PageWriteback(page))
- end_page_writeback(page);
+ if (err) {
+ if (f2fs_encrypted_file(inode))
+ fscrypt_pullback_bio_page(&fio->encrypted_page,
+ true);
+ if (PageWriteback(page))
+ end_page_writeback(page);
+ }
trace_f2fs_do_write_data_page(fio->page, IPU);
set_inode_flag(inode, FI_UPDATE_WRITE);
return err;
@@ -2315,7 +2320,8 @@ static void f2fs_write_failed(struct address_space *mapping, loff_t to)
down_write(&F2FS_I(inode)->i_mmap_sem);
truncate_pagecache(inode, i_size);
- f2fs_truncate_blocks(inode, i_size, true, true);
+ if (!IS_NOQUOTA(inode))
+ f2fs_truncate_blocks(inode, i_size, true);
up_write(&F2FS_I(inode)->i_mmap_sem);
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
@@ -2585,14 +2591,11 @@ static void f2fs_dio_submit_bio(struct bio *bio, struct inode *inode,
{
struct f2fs_private_dio *dio;
bool write = (bio_op(bio) == REQ_OP_WRITE);
- int err;
dio = f2fs_kzalloc(F2FS_I_SB(inode),
sizeof(struct f2fs_private_dio), GFP_NOFS);
- if (!dio) {
- err = -ENOMEM;
+ if (!dio)
goto out;
- }
dio->inode = inode;
dio->orig_end_io = bio->bi_end_io;
@@ -2710,12 +2713,10 @@ void f2fs_invalidate_page(struct page *page, unsigned int offset,
clear_cold_data(page);
- /* This is atomic written page, keep Private */
if (IS_ATOMIC_WRITTEN_PAGE(page))
return f2fs_drop_inmem_page(inode, page);
- set_page_private(page, 0);
- ClearPagePrivate(page);
+ f2fs_clear_page_private(page);
}
int f2fs_release_page(struct page *page, gfp_t wait)
@@ -2729,8 +2730,7 @@ int f2fs_release_page(struct page *page, gfp_t wait)
return 0;
clear_cold_data(page);
- set_page_private(page, 0);
- ClearPagePrivate(page);
+ f2fs_clear_page_private(page);
return 1;
}
@@ -2798,12 +2798,8 @@ int f2fs_migrate_page(struct address_space *mapping,
return -EAGAIN;
}
- /*
- * A reference is expected if PagePrivate set when move mapping,
- * however F2FS breaks this for maintaining dirty page counts when
- * truncating pages. So here adjusting the 'extra_count' make it work.
- */
- extra_count = (atomic_written ? 1 : 0) - page_has_private(page);
+ /* one extra reference was held for atomic_write page */
+ extra_count = atomic_written ? 1 : 0;
rc = migrate_page_move_mapping(mapping, newpage,
page, mode, extra_count);
if (rc != MIGRATEPAGE_SUCCESS) {
@@ -2824,9 +2820,10 @@ int f2fs_migrate_page(struct address_space *mapping,
get_page(newpage);
}
- if (PagePrivate(page))
- SetPagePrivate(newpage);
- set_page_private(newpage, page_private(page));
+ if (PagePrivate(page)) {
+ f2fs_set_page_private(newpage, page_private(page));
+ f2fs_clear_page_private(page);
+ }
if (mode != MIGRATE_SYNC_NO_COPY)
migrate_page_copy(newpage, page);
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index fd7f170e2f2d..99e9a5c37b71 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -96,8 +96,10 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->free_secs = free_sections(sbi);
si->prefree_count = prefree_segments(sbi);
si->dirty_count = dirty_segments(sbi);
- si->node_pages = NODE_MAPPING(sbi)->nrpages;
- si->meta_pages = META_MAPPING(sbi)->nrpages;
+ if (sbi->node_inode)
+ si->node_pages = NODE_MAPPING(sbi)->nrpages;
+ if (sbi->meta_inode)
+ si->meta_pages = META_MAPPING(sbi)->nrpages;
si->nats = NM_I(sbi)->nat_cnt;
si->dirty_nats = NM_I(sbi)->dirty_nat_cnt;
si->sits = MAIN_SEGS(sbi);
@@ -175,7 +177,6 @@ static void update_sit_info(struct f2fs_sb_info *sbi)
static void update_mem_info(struct f2fs_sb_info *sbi)
{
struct f2fs_stat_info *si = F2FS_STAT(sbi);
- unsigned npages;
int i;
if (si->base_mem)
@@ -258,10 +259,14 @@ get_cache:
sizeof(struct extent_node);
si->page_mem = 0;
- npages = NODE_MAPPING(sbi)->nrpages;
- si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
- npages = META_MAPPING(sbi)->nrpages;
- si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
+ if (sbi->node_inode) {
+ unsigned npages = NODE_MAPPING(sbi)->nrpages;
+ si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
+ }
+ if (sbi->meta_inode) {
+ unsigned npages = META_MAPPING(sbi)->nrpages;
+ si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
+ }
}
static int stat_show(struct seq_file *s, void *v)
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 50d0d36280fa..59bc46017855 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -385,7 +385,7 @@ struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
if (err)
goto put_error;
- if ((f2fs_encrypted_inode(dir) || dummy_encrypt) &&
+ if ((IS_ENCRYPTED(dir) || dummy_encrypt) &&
f2fs_may_encrypt(inode)) {
err = fscrypt_inherit_context(dir, inode, page, false);
if (err)
@@ -399,7 +399,7 @@ struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
if (new_name) {
init_dent_inode(new_name, page);
- if (f2fs_encrypted_inode(dir))
+ if (IS_ENCRYPTED(dir))
file_set_enc_name(inode);
}
@@ -728,7 +728,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
!f2fs_truncate_hole(dir, page->index, page->index + 1)) {
f2fs_clear_page_cache_dirty_tag(page);
clear_page_dirty_for_io(page);
- ClearPagePrivate(page);
+ f2fs_clear_page_private(page);
ClearPageUptodate(page);
clear_cold_data(page);
inode_dec_dirty_pages(dir);
@@ -800,6 +800,10 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
if (de->name_len == 0) {
bit_pos++;
ctx->pos = start_pos + bit_pos;
+ printk_ratelimited(
+ "%s, invalid namelen(0), ino:%u, run fsck to fix.",
+ KERN_WARNING, le32_to_cpu(de->ino));
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
continue;
}
@@ -810,7 +814,8 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
/* check memory boundary before moving forward */
bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
- if (unlikely(bit_pos > d->max)) {
+ if (unlikely(bit_pos > d->max ||
+ le16_to_cpu(de->name_len) > F2FS_NAME_LEN)) {
f2fs_msg(sbi->sb, KERN_WARNING,
"%s: corrupted namelen=%d, run fsck to fix.",
__func__, le16_to_cpu(de->name_len));
@@ -819,7 +824,7 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
goto out;
}
- if (f2fs_encrypted_inode(d->inode)) {
+ if (IS_ENCRYPTED(d->inode)) {
int save_len = fstr->len;
err = fscrypt_fname_disk_to_usr(d->inode,
@@ -862,7 +867,7 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
struct fscrypt_str fstr = FSTR_INIT(NULL, 0);
int err = 0;
- if (f2fs_encrypted_inode(inode)) {
+ if (IS_ENCRYPTED(inode)) {
err = fscrypt_get_encryption_info(inode);
if (err && err != -ENOKEY)
goto out;
@@ -891,7 +896,7 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
page_cache_sync_readahead(inode->i_mapping, ra, file, n,
min(npages - n, (pgoff_t)MAX_DIR_RA_PAGES));
- dentry_page = f2fs_get_lock_data_page(inode, n, false);
+ dentry_page = f2fs_find_data_page(inode, n);
if (IS_ERR(dentry_page)) {
err = PTR_ERR(dentry_page);
if (err == -ENOENT) {
@@ -909,11 +914,11 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
err = f2fs_fill_dentries(ctx, &d,
n * NR_DENTRY_IN_BLOCK, &fstr);
if (err) {
- f2fs_put_page(dentry_page, 1);
+ f2fs_put_page(dentry_page, 0);
break;
}
- f2fs_put_page(dentry_page, 1);
+ f2fs_put_page(dentry_page, 0);
}
out_free:
fscrypt_fname_free_buffer(&fstr);
@@ -924,7 +929,7 @@ out:
static int f2fs_dir_open(struct inode *inode, struct file *filp)
{
- if (f2fs_encrypted_inode(inode))
+ if (IS_ENCRYPTED(inode))
return fscrypt_get_encryption_info(inode) ? -EACCES : 0;
return 0;
}
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
index 1cb0fcc67d2d..caf77fe8ac07 100644
--- a/fs/f2fs/extent_cache.c
+++ b/fs/f2fs/extent_cache.c
@@ -506,7 +506,7 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
unsigned int end = fofs + len;
unsigned int pos = (unsigned int)fofs;
bool updated = false;
- bool leftmost;
+ bool leftmost = false;
if (!et)
return;
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 02d0cd199828..bacf5c2a8850 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -24,7 +24,6 @@
#include <linux/quotaops.h>
#include <crypto/hash.h>
-#define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_F2FS_FS_ENCRYPTION)
#include <linux/fscrypt.h>
#ifdef CONFIG_F2FS_CHECK_FS
@@ -191,6 +190,8 @@ enum {
#define DEF_CP_INTERVAL 60 /* 60 secs */
#define DEF_IDLE_INTERVAL 5 /* 5 secs */
#define DEF_DISABLE_INTERVAL 5 /* 5 secs */
+#define DEF_DISABLE_QUICK_INTERVAL 1 /* 1 secs */
+#define DEF_UMOUNT_DISCARD_TIMEOUT 5 /* 5 secs */
struct cp_control {
int reason;
@@ -254,7 +255,7 @@ struct discard_entry {
/* max discard pend list number */
#define MAX_PLIST_NUM 512
#define plist_idx(blk_num) ((blk_num) >= MAX_PLIST_NUM ? \
- (MAX_PLIST_NUM - 1) : (blk_num - 1))
+ (MAX_PLIST_NUM - 1) : ((blk_num) - 1))
enum {
D_PREP, /* initial */
@@ -310,6 +311,7 @@ struct discard_policy {
bool sync; /* submit discard with REQ_SYNC flag */
bool ordered; /* issue discard by lba order */
unsigned int granularity; /* discard granularity */
+ int timeout; /* discard timeout for put_super */
};
struct discard_cmd_control {
@@ -456,7 +458,6 @@ struct f2fs_flush_device {
/* for inline stuff */
#define DEF_INLINE_RESERVED_SIZE 1
-#define DEF_MIN_INLINE_SIZE 1
static inline int get_extra_isize(struct inode *inode);
static inline int get_inline_xattr_addrs(struct inode *inode);
#define MAX_INLINE_DATA(inode) (sizeof(__le32) * \
@@ -1099,6 +1100,7 @@ enum {
SBI_IS_SHUTDOWN, /* shutdown by ioctl */
SBI_IS_RECOVERED, /* recovered orphan/data */
SBI_CP_DISABLED, /* CP was disabled last mount */
+ SBI_CP_DISABLED_QUICK, /* CP was disabled quickly */
SBI_QUOTA_NEED_FLUSH, /* need to flush quota info in CP */
SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */
SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */
@@ -1110,6 +1112,7 @@ enum {
DISCARD_TIME,
GC_TIME,
DISABLE_TIME,
+ UMOUNT_DISCARD_TIMEOUT,
MAX_TIME,
};
@@ -1137,7 +1140,7 @@ enum fsync_mode {
FSYNC_MODE_NOBARRIER, /* fsync behaves nobarrier based on posix */
};
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
#define DUMMY_ENCRYPTION_ENABLED(sbi) \
(unlikely(F2FS_OPTION(sbi).test_dummy_encryption))
#else
@@ -1238,8 +1241,6 @@ struct f2fs_sb_info {
unsigned int nquota_files; /* # of quota sysfile */
- u32 s_next_generation; /* for NFS support */
-
/* # of pages, see count_type */
atomic_t nr_pages[NR_COUNT_TYPE];
/* # of allocated blocks */
@@ -1421,7 +1422,6 @@ static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc,
BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx));
desc.shash.tfm = sbi->s_chksum_driver;
- desc.shash.flags = 0;
*(u32 *)desc.ctx = crc;
err = crypto_shash_update(&desc.shash, address, length);
@@ -1799,13 +1799,12 @@ static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
{
atomic_inc(&sbi->nr_pages[count_type]);
- if (count_type == F2FS_DIRTY_DATA || count_type == F2FS_INMEM_PAGES ||
- count_type == F2FS_WB_CP_DATA || count_type == F2FS_WB_DATA ||
- count_type == F2FS_RD_DATA || count_type == F2FS_RD_NODE ||
- count_type == F2FS_RD_META)
- return;
-
- set_sbi_flag(sbi, SBI_IS_DIRTY);
+ if (count_type == F2FS_DIRTY_DENTS ||
+ count_type == F2FS_DIRTY_NODES ||
+ count_type == F2FS_DIRTY_META ||
+ count_type == F2FS_DIRTY_QDATA ||
+ count_type == F2FS_DIRTY_IMETA)
+ set_sbi_flag(sbi, SBI_IS_DIRTY);
}
static inline void inode_inc_dirty_pages(struct inode *inode)
@@ -2157,10 +2156,17 @@ static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) ||
get_pages(sbi, F2FS_WB_CP_DATA) ||
get_pages(sbi, F2FS_DIO_READ) ||
- get_pages(sbi, F2FS_DIO_WRITE) ||
- atomic_read(&SM_I(sbi)->dcc_info->queued_discard) ||
- atomic_read(&SM_I(sbi)->fcc_info->queued_flush))
+ get_pages(sbi, F2FS_DIO_WRITE))
+ return false;
+
+ if (SM_I(sbi) && SM_I(sbi)->dcc_info &&
+ atomic_read(&SM_I(sbi)->dcc_info->queued_discard))
+ return false;
+
+ if (SM_I(sbi) && SM_I(sbi)->fcc_info &&
+ atomic_read(&SM_I(sbi)->fcc_info->queued_flush))
return false;
+
return f2fs_time_over(sbi, type);
}
@@ -2301,11 +2307,12 @@ static inline void f2fs_change_bit(unsigned int nr, char *addr)
#define F2FS_EXTENTS_FL 0x00080000 /* Inode uses extents */
#define F2FS_EA_INODE_FL 0x00200000 /* Inode used for large EA */
#define F2FS_EOFBLOCKS_FL 0x00400000 /* Blocks allocated beyond EOF */
+#define F2FS_NOCOW_FL 0x00800000 /* Do not cow file */
#define F2FS_INLINE_DATA_FL 0x10000000 /* Inode has inline data. */
#define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */
#define F2FS_RESERVED_FL 0x80000000 /* reserved for ext4 lib */
-#define F2FS_FL_USER_VISIBLE 0x304BDFFF /* User visible flags */
+#define F2FS_FL_USER_VISIBLE 0x30CBDFFF /* User visible flags */
#define F2FS_FL_USER_MODIFIABLE 0x204BC0FF /* User modifiable flags */
/* Flags we can manipulate with through F2FS_IOC_FSSETXATTR */
@@ -2762,9 +2769,9 @@ static inline int get_inline_xattr_addrs(struct inode *inode)
#define F2FS_OLD_ATTRIBUTE_SIZE (offsetof(struct f2fs_inode, i_addr))
#define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field) \
- ((offsetof(typeof(*f2fs_inode), field) + \
+ ((offsetof(typeof(*(f2fs_inode)), field) + \
sizeof((f2fs_inode)->field)) \
- <= (F2FS_OLD_ATTRIBUTE_SIZE + extra_isize)) \
+ <= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \
static inline void f2fs_reset_iostat(struct f2fs_sb_info *sbi)
{
@@ -2793,8 +2800,8 @@ static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi,
#define __is_large_section(sbi) ((sbi)->segs_per_sec > 1)
-#define __is_meta_io(fio) (PAGE_TYPE_OF_BIO(fio->type) == META && \
- (!is_read_io(fio->op) || fio->is_meta))
+#define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META && \
+ (!is_read_io((fio)->op) || (fio)->is_meta))
bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
block_t blkaddr, int type);
@@ -2826,13 +2833,33 @@ static inline bool is_valid_data_blkaddr(struct f2fs_sb_info *sbi,
return true;
}
+static inline void f2fs_set_page_private(struct page *page,
+ unsigned long data)
+{
+ if (PagePrivate(page))
+ return;
+
+ get_page(page);
+ SetPagePrivate(page);
+ set_page_private(page, data);
+}
+
+static inline void f2fs_clear_page_private(struct page *page)
+{
+ if (!PagePrivate(page))
+ return;
+
+ set_page_private(page, 0);
+ ClearPagePrivate(page);
+ f2fs_put_page(page, 0);
+}
+
/*
* file.c
*/
int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
void f2fs_truncate_data_blocks(struct dnode_of_data *dn);
-int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock,
- bool buf_write);
+int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock);
int f2fs_truncate(struct inode *inode);
int f2fs_getattr(const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags);
@@ -3006,7 +3033,7 @@ void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi);
void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi);
-bool f2fs_wait_discard_bios(struct f2fs_sb_info *sbi);
+bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi);
void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
struct cp_control *cpc);
void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi);
@@ -3463,19 +3490,14 @@ void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi);
/*
* crypto support
*/
-static inline bool f2fs_encrypted_inode(struct inode *inode)
-{
- return file_is_encrypt(inode);
-}
-
static inline bool f2fs_encrypted_file(struct inode *inode)
{
- return f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode);
+ return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
}
static inline void f2fs_set_encrypted_inode(struct inode *inode)
{
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
file_set_encrypt(inode);
f2fs_set_inode_flags(inode);
#endif
@@ -3554,7 +3576,7 @@ static inline void set_opt_mode(struct f2fs_sb_info *sbi, unsigned int mt)
static inline bool f2fs_may_encrypt(struct inode *inode)
{
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
umode_t mode = inode->i_mode;
return (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode));
@@ -3616,8 +3638,6 @@ extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
#define f2fs_build_fault_attr(sbi, rate, type) do { } while (0)
#endif
-#endif
-
static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
{
#ifdef CONFIG_QUOTA
@@ -3630,3 +3650,5 @@ static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
#endif
return false;
}
+
+#endif
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index bba56b39dcc5..5742ab8b57dc 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -582,15 +582,14 @@ truncate_out:
zero_user(page, offset, PAGE_SIZE - offset);
/* An encrypted inode should have a key and truncate the last page. */
- f2fs_bug_on(F2FS_I_SB(inode), cache_only && f2fs_encrypted_inode(inode));
+ f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
if (!cache_only)
set_page_dirty(page);
f2fs_put_page(page, 1);
return 0;
}
-int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock,
- bool buf_write)
+int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct dnode_of_data dn;
@@ -598,7 +597,6 @@ int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock,
int count = 0, err = 0;
struct page *ipage;
bool truncate_page = false;
- int flag = buf_write ? F2FS_GET_BLOCK_PRE_AIO : F2FS_GET_BLOCK_PRE_DIO;
trace_f2fs_truncate_blocks_enter(inode, from);
@@ -608,7 +606,7 @@ int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock,
goto free_partial;
if (lock)
- __do_map_lock(sbi, flag, true);
+ f2fs_lock_op(sbi);
ipage = f2fs_get_node_page(sbi, inode->i_ino);
if (IS_ERR(ipage)) {
@@ -646,7 +644,7 @@ free_next:
err = f2fs_truncate_inode_blocks(inode, free_from);
out:
if (lock)
- __do_map_lock(sbi, flag, false);
+ f2fs_unlock_op(sbi);
free_partial:
/* lastly zero out the first data page */
if (!err)
@@ -681,7 +679,7 @@ int f2fs_truncate(struct inode *inode)
return err;
}
- err = f2fs_truncate_blocks(inode, i_size_read(inode), true, false);
+ err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
if (err)
return err;
@@ -711,7 +709,7 @@ int f2fs_getattr(const struct path *path, struct kstat *stat,
stat->attributes |= STATX_ATTR_APPEND;
if (flags & F2FS_COMPR_FL)
stat->attributes |= STATX_ATTR_COMPRESSED;
- if (f2fs_encrypted_inode(inode))
+ if (IS_ENCRYPTED(inode))
stat->attributes |= STATX_ATTR_ENCRYPTED;
if (flags & F2FS_IMMUTABLE_FL)
stat->attributes |= STATX_ATTR_IMMUTABLE;
@@ -768,7 +766,6 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
int err;
- bool size_changed = false;
if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
return -EIO;
@@ -843,8 +840,6 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
down_write(&F2FS_I(inode)->i_sem);
F2FS_I(inode)->last_disk_size = i_size_read(inode);
up_write(&F2FS_I(inode)->i_sem);
-
- size_changed = true;
}
__setattr_copy(inode, attr);
@@ -858,7 +853,7 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
}
/* file size may changed here */
- f2fs_mark_inode_dirty_sync(inode, size_changed);
+ f2fs_mark_inode_dirty_sync(inode, true);
/* inode change will produce dirty node pages flushed by checkpoint */
f2fs_balance_fs(F2FS_I_SB(inode), true);
@@ -1262,7 +1257,7 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
new_size = i_size_read(inode) - len;
truncate_pagecache(inode, new_size);
- ret = f2fs_truncate_blocks(inode, new_size, true, false);
+ ret = f2fs_truncate_blocks(inode, new_size, true);
up_write(&F2FS_I(inode)->i_mmap_sem);
if (!ret)
f2fs_i_size_write(inode, new_size);
@@ -1447,7 +1442,7 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
f2fs_balance_fs(sbi, true);
down_write(&F2FS_I(inode)->i_mmap_sem);
- ret = f2fs_truncate_blocks(inode, i_size_read(inode), true, false);
+ ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
up_write(&F2FS_I(inode)->i_mmap_sem);
if (ret)
return ret;
@@ -1563,7 +1558,7 @@ static long f2fs_fallocate(struct file *file, int mode,
if (!S_ISREG(inode->i_mode))
return -EINVAL;
- if (f2fs_encrypted_inode(inode) &&
+ if (IS_ENCRYPTED(inode) &&
(mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
return -EOPNOTSUPP;
@@ -1647,10 +1642,12 @@ static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
struct f2fs_inode_info *fi = F2FS_I(inode);
unsigned int flags = fi->i_flags;
- if (f2fs_encrypted_inode(inode))
+ if (IS_ENCRYPTED(inode))
flags |= F2FS_ENCRYPT_FL;
if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
flags |= F2FS_INLINE_DATA_FL;
+ if (is_inode_flag_set(inode, FI_PIN_FILE))
+ flags |= F2FS_NOCOW_FL;
flags &= F2FS_FL_USER_VISIBLE;
@@ -1750,10 +1747,12 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
- if (!get_dirty_pages(inode))
- goto skip_flush;
-
- f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
+ /*
+ * Should wait end_io to count F2FS_WB_CP_DATA correctly by
+ * f2fs_is_atomic_file.
+ */
+ if (get_dirty_pages(inode))
+ f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
"Unexpected flush for atomic writes: ino=%lu, npages=%u",
inode->i_ino, get_dirty_pages(inode));
ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
@@ -1761,7 +1760,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
goto out;
}
-skip_flush:
+
set_inode_flag(inode, FI_ATOMIC_FILE);
clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
@@ -1968,11 +1967,11 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
break;
case F2FS_GOING_DOWN_NEED_FSCK:
set_sbi_flag(sbi, SBI_NEED_FSCK);
+ set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
+ set_sbi_flag(sbi, SBI_IS_DIRTY);
/* do checkpoint only */
ret = f2fs_sync_fs(sb, 1);
- if (ret)
- goto out;
- break;
+ goto out;
default:
ret = -EINVAL;
goto out;
@@ -1988,6 +1987,9 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
out:
if (in != F2FS_GOING_DOWN_FULLSYNC)
mnt_drop_write_file(filp);
+
+ trace_f2fs_shutdown(sbi, in, ret);
+
return ret;
}
@@ -2414,7 +2416,7 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
return -EINVAL;
- if (f2fs_encrypted_inode(src) || f2fs_encrypted_inode(dst))
+ if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
return -EOPNOTSUPP;
if (src == dst) {
@@ -2871,8 +2873,8 @@ static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
__u32 pin;
int ret = 0;
- if (!inode_owner_or_capable(inode))
- return -EACCES;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
if (get_user(pin, (__u32 __user *)arg))
return -EFAULT;
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index d636cbcf68f2..bb6a152310ef 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -298,7 +298,7 @@ process_inline:
clear_inode_flag(inode, FI_INLINE_DATA);
f2fs_put_page(ipage, 1);
} else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
- if (f2fs_truncate_blocks(inode, 0, false, false))
+ if (f2fs_truncate_blocks(inode, 0, false))
return false;
goto process_inline;
}
@@ -470,7 +470,7 @@ static int f2fs_add_inline_entries(struct inode *dir, void *inline_dentry)
return 0;
punch_dentry_pages:
truncate_inode_pages(&dir->i_data, 0);
- f2fs_truncate_blocks(dir, 0, false, false);
+ f2fs_truncate_blocks(dir, 0, false);
f2fs_remove_dirty_inode(dir);
return err;
}
@@ -659,6 +659,12 @@ int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
if (IS_ERR(ipage))
return PTR_ERR(ipage);
+ /*
+ * f2fs_readdir was protected by inode.i_rwsem, it is safe to access
+ * ipage without page's lock held.
+ */
+ unlock_page(ipage);
+
inline_dentry = inline_data_addr(inode, ipage);
make_dentry_ptr_inline(inode, &d, inline_dentry);
@@ -667,7 +673,7 @@ int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
if (!err)
ctx->pos = d.max;
- f2fs_put_page(ipage, 1);
+ f2fs_put_page(ipage, 0);
return err < 0 ? err : 0;
}
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index bec52961630b..e7f2e8759315 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -14,6 +14,7 @@
#include "f2fs.h"
#include "node.h"
#include "segment.h"
+#include "xattr.h"
#include <trace/events/f2fs.h>
@@ -43,7 +44,7 @@ void f2fs_set_inode_flags(struct inode *inode)
new_fl |= S_NOATIME;
if (flags & F2FS_DIRSYNC_FL)
new_fl |= S_DIRSYNC;
- if (f2fs_encrypted_inode(inode))
+ if (file_is_encrypt(inode))
new_fl |= S_ENCRYPTED;
inode_set_flags(inode, new_fl,
S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|
@@ -248,6 +249,20 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
return false;
}
+ if (f2fs_has_extra_attr(inode) &&
+ f2fs_sb_has_flexible_inline_xattr(sbi) &&
+ f2fs_has_inline_xattr(inode) &&
+ (!fi->i_inline_xattr_size ||
+ fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: inode (ino=%lx) has corrupted "
+ "i_inline_xattr_size: %d, max: %zu",
+ __func__, inode->i_ino, fi->i_inline_xattr_size,
+ MAX_INLINE_XATTR_SIZE);
+ return false;
+ }
+
if (F2FS_I(inode)->extent_tree) {
struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest;
@@ -453,7 +468,7 @@ make_now:
inode->i_mapping->a_ops = &f2fs_dblock_aops;
inode_nohighmem(inode);
} else if (S_ISLNK(inode->i_mode)) {
- if (f2fs_encrypted_inode(inode))
+ if (file_is_encrypt(inode))
inode->i_op = &f2fs_encrypted_symlink_inode_operations;
else
inode->i_op = &f2fs_symlink_inode_operations;
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 62d9829f3a6a..f5e34e467003 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -10,6 +10,7 @@
#include <linux/pagemap.h>
#include <linux/sched.h>
#include <linux/ctype.h>
+#include <linux/random.h>
#include <linux/dcache.h>
#include <linux/namei.h>
#include <linux/quotaops.h>
@@ -50,7 +51,7 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
inode->i_blocks = 0;
inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
F2FS_I(inode)->i_crtime = inode->i_mtime;
- inode->i_generation = sbi->s_next_generation++;
+ inode->i_generation = prandom_u32();
if (S_ISDIR(inode->i_mode))
F2FS_I(inode)->i_current_depth = 1;
@@ -75,7 +76,7 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
set_inode_flag(inode, FI_NEW_INODE);
/* If the directory encrypted, then we should encrypt the inode. */
- if ((f2fs_encrypted_inode(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) &&
+ if ((IS_ENCRYPTED(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) &&
f2fs_may_encrypt(inode))
f2fs_set_encrypted_inode(inode);
@@ -476,7 +477,7 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
if (err)
goto out_iput;
}
- if (f2fs_encrypted_inode(dir) &&
+ if (IS_ENCRYPTED(dir) &&
(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
!fscrypt_has_permitted_context(dir, inode)) {
f2fs_msg(inode->i_sb, KERN_WARNING,
@@ -803,7 +804,7 @@ static int f2fs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
- if (f2fs_encrypted_inode(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) {
+ if (IS_ENCRYPTED(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) {
int err = fscrypt_get_encryption_info(dir);
if (err)
return err;
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 4f450e573312..3f99ab288695 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1920,7 +1920,9 @@ static int f2fs_write_node_pages(struct address_space *mapping,
f2fs_balance_fs_bg(sbi);
/* collect a number of dirty node pages and write together */
- if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
+ if (wbc->sync_mode != WB_SYNC_ALL &&
+ get_pages(sbi, F2FS_DIRTY_NODES) <
+ nr_pages_to_skip(sbi, NODE))
goto skip_write;
if (wbc->sync_mode == WB_SYNC_ALL)
@@ -1959,7 +1961,7 @@ static int f2fs_set_node_page_dirty(struct page *page)
if (!PageDirty(page)) {
__set_page_dirty_nobuffers(page);
inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
- SetPagePrivate(page);
+ f2fs_set_page_private(page, 0);
f2fs_trace_pid(page);
return 1;
}
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 9b79056d705d..aa7fe79b62b2 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -191,8 +191,7 @@ void f2fs_register_inmem_page(struct inode *inode, struct page *page)
f2fs_trace_pid(page);
- set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
- SetPagePrivate(page);
+ f2fs_set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
@@ -215,7 +214,8 @@ void f2fs_register_inmem_page(struct inode *inode, struct page *page)
}
static int __revoke_inmem_pages(struct inode *inode,
- struct list_head *head, bool drop, bool recover)
+ struct list_head *head, bool drop, bool recover,
+ bool trylock)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct inmem_pages *cur, *tmp;
@@ -227,7 +227,16 @@ static int __revoke_inmem_pages(struct inode *inode,
if (drop)
trace_f2fs_commit_inmem_page(page, INMEM_DROP);
- lock_page(page);
+ if (trylock) {
+ /*
+ * to avoid deadlock in between page lock and
+ * inmem_lock.
+ */
+ if (!trylock_page(page))
+ continue;
+ } else {
+ lock_page(page);
+ }
f2fs_wait_on_page_writeback(page, DATA, true, true);
@@ -270,8 +279,7 @@ next:
ClearPageUptodate(page);
clear_cold_data(page);
}
- set_page_private(page, 0);
- ClearPagePrivate(page);
+ f2fs_clear_page_private(page);
f2fs_put_page(page, 1);
list_del(&cur->list);
@@ -318,13 +326,19 @@ void f2fs_drop_inmem_pages(struct inode *inode)
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
- mutex_lock(&fi->inmem_lock);
- __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
- spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
- if (!list_empty(&fi->inmem_ilist))
- list_del_init(&fi->inmem_ilist);
- spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
- mutex_unlock(&fi->inmem_lock);
+ while (!list_empty(&fi->inmem_pages)) {
+ mutex_lock(&fi->inmem_lock);
+ __revoke_inmem_pages(inode, &fi->inmem_pages,
+ true, false, true);
+
+ if (list_empty(&fi->inmem_pages)) {
+ spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
+ if (!list_empty(&fi->inmem_ilist))
+ list_del_init(&fi->inmem_ilist);
+ spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
+ }
+ mutex_unlock(&fi->inmem_lock);
+ }
clear_inode_flag(inode, FI_ATOMIC_FILE);
fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
@@ -354,8 +368,7 @@ void f2fs_drop_inmem_page(struct inode *inode, struct page *page)
kmem_cache_free(inmem_entry_slab, cur);
ClearPageUptodate(page);
- set_page_private(page, 0);
- ClearPagePrivate(page);
+ f2fs_clear_page_private(page);
f2fs_put_page(page, 0);
trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
@@ -429,12 +442,15 @@ retry:
* recovery or rewrite & commit last transaction. For other
* error number, revoking was done by filesystem itself.
*/
- err = __revoke_inmem_pages(inode, &revoke_list, false, true);
+ err = __revoke_inmem_pages(inode, &revoke_list,
+ false, true, false);
/* drop all uncommitted pages */
- __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
+ __revoke_inmem_pages(inode, &fi->inmem_pages,
+ true, false, false);
} else {
- __revoke_inmem_pages(inode, &revoke_list, false, false);
+ __revoke_inmem_pages(inode, &revoke_list,
+ false, false, false);
}
return err;
@@ -542,9 +558,13 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
static int __submit_flush_wait(struct f2fs_sb_info *sbi,
struct block_device *bdev)
{
- struct bio *bio = f2fs_bio_alloc(sbi, 0, true);
+ struct bio *bio;
int ret;
+ bio = f2fs_bio_alloc(sbi, 0, false);
+ if (!bio)
+ return -ENOMEM;
+
bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
bio_set_dev(bio, bdev);
ret = submit_bio_wait(bio);
@@ -868,6 +888,9 @@ int f2fs_disable_cp_again(struct f2fs_sb_info *sbi)
if (holes[DATA] > ovp || holes[NODE] > ovp)
return -EAGAIN;
+ if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) &&
+ dirty_segments(sbi) > overprovision_segments(sbi))
+ return -EAGAIN;
return 0;
}
@@ -1037,6 +1060,7 @@ static void __init_discard_policy(struct f2fs_sb_info *sbi,
dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
dpolicy->io_aware_gran = MAX_PLIST_NUM;
+ dpolicy->timeout = 0;
if (discard_type == DPOLICY_BG) {
dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
@@ -1059,6 +1083,8 @@ static void __init_discard_policy(struct f2fs_sb_info *sbi,
} else if (discard_type == DPOLICY_UMOUNT) {
dpolicy->max_requests = UINT_MAX;
dpolicy->io_aware = false;
+ /* we need to issue all to keep CP_TRIMMED_FLAG */
+ dpolicy->granularity = 1;
}
}
@@ -1424,7 +1450,14 @@ static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
int i, issued = 0;
bool io_interrupted = false;
+ if (dpolicy->timeout != 0)
+ f2fs_update_time(sbi, dpolicy->timeout);
+
for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
+ if (dpolicy->timeout != 0 &&
+ f2fs_time_over(sbi, dpolicy->timeout))
+ break;
+
if (i + 1 < dpolicy->granularity)
break;
@@ -1611,7 +1644,7 @@ void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi)
}
/* This comes from f2fs_put_super */
-bool f2fs_wait_discard_bios(struct f2fs_sb_info *sbi)
+bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
struct discard_policy dpolicy;
@@ -1619,6 +1652,7 @@ bool f2fs_wait_discard_bios(struct f2fs_sb_info *sbi)
__init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT,
dcc->discard_granularity);
+ dpolicy.timeout = UMOUNT_DISCARD_TIMEOUT;
__issue_discard_cmd(sbi, &dpolicy);
dropped = __drop_discard_cmd(sbi);
@@ -3164,10 +3198,10 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio)
stat_inc_inplace_blocks(fio->sbi);
err = f2fs_submit_page_bio(fio);
- if (!err)
+ if (!err) {
update_device_state(fio);
-
- f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
+ f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
+ }
return err;
}
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index a77f76f528b6..5c7ed0442d6e 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -865,7 +865,7 @@ static inline void wake_up_discard_thread(struct f2fs_sb_info *sbi, bool force)
}
}
mutex_unlock(&dcc->cmd_lock);
- if (!wakeup)
+ if (!wakeup || !is_idle(sbi, DISCARD_TIME))
return;
wake_up:
dcc->discard_wake = 1;
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 3d3ce9eb6d13..f2aaa2cc6b3e 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -269,7 +269,7 @@ static int f2fs_set_qf_name(struct super_block *sb, int qtype,
if (!qname) {
f2fs_msg(sb, KERN_ERR,
"Not enough memory for storing quotafile name");
- return -EINVAL;
+ return -ENOMEM;
}
if (F2FS_OPTION(sbi).s_qf_names[qtype]) {
if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0)
@@ -586,7 +586,7 @@ static int parse_options(struct super_block *sb, char *options)
case Opt_io_size_bits:
if (args->from && match_int(args, &arg))
return -EINVAL;
- if (arg > __ilog2_u32(BIO_MAX_PAGES)) {
+ if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_PAGES)) {
f2fs_msg(sb, KERN_WARNING,
"Not support %d, larger than %d",
1 << arg, BIO_MAX_PAGES);
@@ -757,7 +757,7 @@ static int parse_options(struct super_block *sb, char *options)
kvfree(name);
break;
case Opt_test_dummy_encryption:
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
if (!f2fs_sb_has_encrypt(sbi)) {
f2fs_msg(sb, KERN_ERR, "Encrypt feature is off");
return -EINVAL;
@@ -821,6 +821,8 @@ static int parse_options(struct super_block *sb, char *options)
}
if (test_opt(sbi, INLINE_XATTR_SIZE)) {
+ int min_size, max_size;
+
if (!f2fs_sb_has_extra_attr(sbi) ||
!f2fs_sb_has_flexible_inline_xattr(sbi)) {
f2fs_msg(sb, KERN_ERR,
@@ -834,14 +836,15 @@ static int parse_options(struct super_block *sb, char *options)
"set with inline_xattr option");
return -EINVAL;
}
- if (!F2FS_OPTION(sbi).inline_xattr_size ||
- F2FS_OPTION(sbi).inline_xattr_size >=
- DEF_ADDRS_PER_INODE -
- F2FS_TOTAL_EXTRA_ATTR_SIZE -
- DEF_INLINE_RESERVED_SIZE -
- DEF_MIN_INLINE_SIZE) {
+
+ min_size = sizeof(struct f2fs_xattr_header) / sizeof(__le32);
+ max_size = MAX_INLINE_XATTR_SIZE;
+
+ if (F2FS_OPTION(sbi).inline_xattr_size < min_size ||
+ F2FS_OPTION(sbi).inline_xattr_size > max_size) {
f2fs_msg(sb, KERN_ERR,
- "inline xattr size is out of range");
+ "inline xattr size is out of range: %d ~ %d",
+ min_size, max_size);
return -EINVAL;
}
}
@@ -915,6 +918,10 @@ static int f2fs_drop_inode(struct inode *inode)
sb_start_intwrite(inode->i_sb);
f2fs_i_size_write(inode, 0);
+ f2fs_submit_merged_write_cond(F2FS_I_SB(inode),
+ inode, NULL, 0, DATA);
+ truncate_inode_pages_final(inode->i_mapping);
+
if (F2FS_HAS_BLOCKS(inode))
f2fs_truncate(inode);
@@ -1048,7 +1055,7 @@ static void f2fs_put_super(struct super_block *sb)
}
/* be sure to wait for any on-going discard commands */
- dropped = f2fs_wait_discard_bios(sbi);
+ dropped = f2fs_issue_discard_timeout(sbi);
if ((f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi)) &&
!sbi->discard_blks && !dropped) {
@@ -1075,7 +1082,10 @@ static void f2fs_put_super(struct super_block *sb)
f2fs_bug_on(sbi, sbi->fsync_node_num);
iput(sbi->node_inode);
+ sbi->node_inode = NULL;
+
iput(sbi->meta_inode);
+ sbi->meta_inode = NULL;
/*
* iput() can update stat information, if f2fs_write_checkpoint()
@@ -1390,7 +1400,7 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
seq_printf(seq, ",whint_mode=%s", "user-based");
else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS)
seq_printf(seq, ",whint_mode=%s", "fs-based");
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
if (F2FS_OPTION(sbi).test_dummy_encryption)
seq_puts(seq, ",test_dummy_encryption");
#endif
@@ -1455,9 +1465,16 @@ static int f2fs_enable_quotas(struct super_block *sb);
static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
{
+ unsigned int s_flags = sbi->sb->s_flags;
struct cp_control cpc;
- int err;
+ int err = 0;
+ int ret;
+ if (s_flags & SB_RDONLY) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "checkpoint=disable on readonly fs");
+ return -EINVAL;
+ }
sbi->sb->s_flags |= SB_ACTIVE;
f2fs_update_time(sbi, DISABLE_TIME);
@@ -1465,18 +1482,24 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
while (!f2fs_time_over(sbi, DISABLE_TIME)) {
mutex_lock(&sbi->gc_mutex);
err = f2fs_gc(sbi, true, false, NULL_SEGNO);
- if (err == -ENODATA)
+ if (err == -ENODATA) {
+ err = 0;
break;
+ }
if (err && err != -EAGAIN)
- return err;
+ break;
}
- err = sync_filesystem(sbi->sb);
- if (err)
- return err;
+ ret = sync_filesystem(sbi->sb);
+ if (ret || err) {
+ err = ret ? ret: err;
+ goto restore_flag;
+ }
- if (f2fs_disable_cp_again(sbi))
- return -EAGAIN;
+ if (f2fs_disable_cp_again(sbi)) {
+ err = -EAGAIN;
+ goto restore_flag;
+ }
mutex_lock(&sbi->gc_mutex);
cpc.reason = CP_PAUSE;
@@ -1485,7 +1508,9 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
sbi->unusable_block_count = 0;
mutex_unlock(&sbi->gc_mutex);
- return 0;
+restore_flag:
+ sbi->sb->s_flags = s_flags; /* Restore MS_RDONLY status */
+ return err;
}
static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
@@ -2023,6 +2048,12 @@ void f2fs_quota_off_umount(struct super_block *sb)
set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
}
}
+ /*
+ * In case of checkpoint=disable, we must flush quota blocks.
+ * This can cause NULL exception for node_inode in end_io, since
+ * put_super already dropped it.
+ */
+ sync_filesystem(sb);
}
static void f2fs_truncate_quota_inode_pages(struct super_block *sb)
@@ -2154,7 +2185,7 @@ static const struct super_operations f2fs_sops = {
.remount_fs = f2fs_remount,
};
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
{
return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
@@ -2703,6 +2734,8 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL;
sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL;
sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL;
+ sbi->interval_time[UMOUNT_DISCARD_TIMEOUT] =
+ DEF_UMOUNT_DISCARD_TIMEOUT;
clear_sbi_flag(sbi, SBI_NEED_FSCK);
for (i = 0; i < NR_COUNT_TYPE; i++)
@@ -3022,10 +3055,11 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
struct f2fs_super_block *raw_super;
struct inode *root;
int err;
- bool retry = true, need_fsck = false;
+ bool skip_recovery = false, need_fsck = false;
char *options = NULL;
int recovery, i, valid_super_block;
struct curseg_info *seg_i;
+ int retry_cnt = 1;
try_onemore:
err = -EINVAL;
@@ -3097,7 +3131,6 @@ try_onemore:
sb->s_maxbytes = sbi->max_file_blocks <<
le32_to_cpu(raw_super->log_blocksize);
sb->s_max_links = F2FS_LINK_MAX;
- get_random_bytes(&sbi->s_next_generation, sizeof(u32));
#ifdef CONFIG_QUOTA
sb->dq_op = &f2fs_quota_operations;
@@ -3116,7 +3149,7 @@ try_onemore:
#endif
sb->s_op = &f2fs_sops;
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
sb->s_cop = &f2fs_cryptops;
#endif
sb->s_xattr = f2fs_xattr_handlers;
@@ -3200,6 +3233,10 @@ try_onemore:
if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_QUOTA_NEED_FSCK_FLAG))
set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
+ if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_DISABLED_QUICK_FLAG)) {
+ set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
+ sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_QUICK_INTERVAL;
+ }
/* Initialize device list */
err = f2fs_scan_devices(sbi);
@@ -3288,7 +3325,7 @@ try_onemore:
sb->s_root = d_make_root(root); /* allocate root dentry */
if (!sb->s_root) {
err = -ENOMEM;
- goto free_root_inode;
+ goto free_node_inode;
}
err = f2fs_register_sysfs(sbi);
@@ -3310,7 +3347,7 @@ try_onemore:
goto free_meta;
if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)))
- goto skip_recovery;
+ goto reset_checkpoint;
/* recover fsynced data */
if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
@@ -3327,11 +3364,13 @@ try_onemore:
if (need_fsck)
set_sbi_flag(sbi, SBI_NEED_FSCK);
- if (!retry)
- goto skip_recovery;
+ if (skip_recovery)
+ goto reset_checkpoint;
err = f2fs_recover_fsync_data(sbi, false);
if (err < 0) {
+ if (err != -ENOMEM)
+ skip_recovery = true;
need_fsck = true;
f2fs_msg(sb, KERN_ERR,
"Cannot recover all fsync data errno=%d", err);
@@ -3347,14 +3386,14 @@ try_onemore:
goto free_meta;
}
}
-skip_recovery:
+reset_checkpoint:
/* f2fs_recover_fsync_data() cleared this already */
clear_sbi_flag(sbi, SBI_POR_DOING);
if (test_opt(sbi, DISABLE_CHECKPOINT)) {
err = f2fs_disable_checkpoint(sbi);
if (err)
- goto free_meta;
+ goto sync_free_meta;
} else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) {
f2fs_enable_checkpoint(sbi);
}
@@ -3367,7 +3406,7 @@ skip_recovery:
/* After POR, we can run background GC thread.*/
err = f2fs_start_gc_thread(sbi);
if (err)
- goto free_meta;
+ goto sync_free_meta;
}
kvfree(options);
@@ -3387,8 +3426,14 @@ skip_recovery:
cur_cp_version(F2FS_CKPT(sbi)));
f2fs_update_time(sbi, CP_TIME);
f2fs_update_time(sbi, REQ_TIME);
+ clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
return 0;
+sync_free_meta:
+ /* safe to flush all the data */
+ sync_filesystem(sbi->sb);
+ retry_cnt = 0;
+
free_meta:
#ifdef CONFIG_QUOTA
f2fs_truncate_quota_inode_pages(sb);
@@ -3402,6 +3447,8 @@ free_meta:
* falls into an infinite loop in f2fs_sync_meta_pages().
*/
truncate_inode_pages_final(META_MAPPING(sbi));
+ /* evict some inodes being cached by GC */
+ evict_inodes(sb);
f2fs_unregister_sysfs(sbi);
free_root_inode:
dput(sb->s_root);
@@ -3410,6 +3457,7 @@ free_node_inode:
f2fs_release_ino_entry(sbi, true);
truncate_inode_pages_final(NODE_MAPPING(sbi));
iput(sbi->node_inode);
+ sbi->node_inode = NULL;
free_stats:
f2fs_destroy_stats(sbi);
free_nm:
@@ -3422,6 +3470,7 @@ free_devices:
free_meta_inode:
make_bad_inode(sbi->meta_inode);
iput(sbi->meta_inode);
+ sbi->meta_inode = NULL;
free_io_dummy:
mempool_destroy(sbi->write_io_dummy);
free_percpu:
@@ -3443,8 +3492,8 @@ free_sbi:
kvfree(sbi);
/* give only one another chance */
- if (retry) {
- retry = false;
+ if (retry_cnt > 0 && skip_recovery) {
+ retry_cnt--;
shrink_dcache_sb(sb);
goto try_onemore;
}
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 0575edbe3ed6..729f46a3c9ee 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -222,6 +222,8 @@ out:
#ifdef CONFIG_F2FS_FAULT_INJECTION
if (a->struct_type == FAULT_INFO_TYPE && t >= (1 << FAULT_MAX))
return -EINVAL;
+ if (a->struct_type == FAULT_INFO_RATE && t >= UINT_MAX)
+ return -EINVAL;
#endif
if (a->struct_type == RESERVED_BLOCKS) {
spin_lock(&sbi->stat_lock);
@@ -278,10 +280,16 @@ out:
return count;
}
- *ui = t;
- if (!strcmp(a->attr.name, "iostat_enable") && *ui == 0)
- f2fs_reset_iostat(sbi);
+ if (!strcmp(a->attr.name, "iostat_enable")) {
+ sbi->iostat_enable = !!t;
+ if (!sbi->iostat_enable)
+ f2fs_reset_iostat(sbi);
+ return count;
+ }
+
+ *ui = (unsigned int)t;
+
return count;
}
@@ -418,6 +426,8 @@ F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, idle_interval, interval_time[REQ_TIME]);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, discard_idle_interval,
interval_time[DISCARD_TIME]);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_idle_interval, interval_time[GC_TIME]);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info,
+ umount_discard_timeout, interval_time[UMOUNT_DISCARD_TIMEOUT]);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, iostat_enable, iostat_enable);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, readdir_ra, readdir_ra);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_pin_file_thresh, gc_pin_file_threshold);
@@ -431,7 +441,7 @@ F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes);
F2FS_GENERAL_RO_ATTR(features);
F2FS_GENERAL_RO_ATTR(current_reserved_blocks);
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
F2FS_FEATURE_RO_ATTR(encryption, FEAT_CRYPTO);
#endif
#ifdef CONFIG_BLK_DEV_ZONED
@@ -475,6 +485,7 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(idle_interval),
ATTR_LIST(discard_idle_interval),
ATTR_LIST(gc_idle_interval),
+ ATTR_LIST(umount_discard_timeout),
ATTR_LIST(iostat_enable),
ATTR_LIST(readdir_ra),
ATTR_LIST(gc_pin_file_thresh),
@@ -492,7 +503,7 @@ static struct attribute *f2fs_attrs[] = {
};
static struct attribute *f2fs_feat_attrs[] = {
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
ATTR_LIST(encryption),
#endif
#ifdef CONFIG_BLK_DEV_ZONED
diff --git a/fs/f2fs/trace.c b/fs/f2fs/trace.c
index ce2a5eb210b6..d0ab533a9ce8 100644
--- a/fs/f2fs/trace.c
+++ b/fs/f2fs/trace.c
@@ -14,7 +14,7 @@
#include "trace.h"
static RADIX_TREE(pids, GFP_ATOMIC);
-static struct mutex pids_lock;
+static spinlock_t pids_lock;
static struct last_io_info last_io;
static inline void __print_last_io(void)
@@ -58,23 +58,29 @@ void f2fs_trace_pid(struct page *page)
set_page_private(page, (unsigned long)pid);
+retry:
if (radix_tree_preload(GFP_NOFS))
return;
- mutex_lock(&pids_lock);
+ spin_lock(&pids_lock);
p = radix_tree_lookup(&pids, pid);
if (p == current)
goto out;
if (p)
radix_tree_delete(&pids, pid);
- f2fs_radix_tree_insert(&pids, pid, current);
+ if (radix_tree_insert(&pids, pid, current)) {
+ spin_unlock(&pids_lock);
+ radix_tree_preload_end();
+ cond_resched();
+ goto retry;
+ }
trace_printk("%3x:%3x %4x %-16s\n",
MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
pid, current->comm);
out:
- mutex_unlock(&pids_lock);
+ spin_unlock(&pids_lock);
radix_tree_preload_end();
}
@@ -119,7 +125,7 @@ void f2fs_trace_ios(struct f2fs_io_info *fio, int flush)
void f2fs_build_trace_ios(void)
{
- mutex_init(&pids_lock);
+ spin_lock_init(&pids_lock);
}
#define PIDVEC_SIZE 128
@@ -147,7 +153,7 @@ void f2fs_destroy_trace_ios(void)
pid_t next_pid = 0;
unsigned int found;
- mutex_lock(&pids_lock);
+ spin_lock(&pids_lock);
while ((found = gang_lookup_pids(pid, next_pid, PIDVEC_SIZE))) {
unsigned idx;
@@ -155,5 +161,5 @@ void f2fs_destroy_trace_ios(void)
for (idx = 0; idx < found; idx++)
radix_tree_delete(&pids, pid[idx]);
}
- mutex_unlock(&pids_lock);
+ spin_unlock(&pids_lock);
}
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 18d5ffbc5e8c..848a785abe25 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -224,11 +224,11 @@ static struct f2fs_xattr_entry *__find_inline_xattr(struct inode *inode,
{
struct f2fs_xattr_entry *entry;
unsigned int inline_size = inline_xattr_size(inode);
+ void *max_addr = base_addr + inline_size;
list_for_each_xattr(entry, base_addr) {
- if ((void *)entry + sizeof(__u32) > base_addr + inline_size ||
- (void *)XATTR_NEXT_ENTRY(entry) + sizeof(__u32) >
- base_addr + inline_size) {
+ if ((void *)entry + sizeof(__u32) > max_addr ||
+ (void *)XATTR_NEXT_ENTRY(entry) > max_addr) {
*last_addr = entry;
return NULL;
}
@@ -239,6 +239,13 @@ static struct f2fs_xattr_entry *__find_inline_xattr(struct inode *inode,
if (!memcmp(entry->e_name, name, len))
break;
}
+
+ /* inline xattr header or entry across max inline xattr size */
+ if (IS_XATTR_LAST_ENTRY(entry) &&
+ (void *)entry + sizeof(__u32) > max_addr) {
+ *last_addr = entry;
+ return NULL;
+ }
return entry;
}
@@ -340,7 +347,7 @@ check:
*base_addr = txattr_addr;
return 0;
out:
- kzfree(txattr_addr);
+ kvfree(txattr_addr);
return err;
}
@@ -383,7 +390,7 @@ static int read_all_xattrs(struct inode *inode, struct page *ipage,
*base_addr = txattr_addr;
return 0;
fail:
- kzfree(txattr_addr);
+ kvfree(txattr_addr);
return err;
}
@@ -510,7 +517,7 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
}
error = size;
out:
- kzfree(base_addr);
+ kvfree(base_addr);
return error;
}
@@ -538,7 +545,7 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
if (!handler || (handler->list && !handler->list(dentry)))
continue;
- prefix = handler->prefix ?: handler->name;
+ prefix = xattr_prefix(handler);
prefix_len = strlen(prefix);
size = prefix_len + entry->e_name_len + 1;
if (buffer) {
@@ -556,7 +563,7 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
}
error = buffer_size - rest;
cleanup:
- kzfree(base_addr);
+ kvfree(base_addr);
return error;
}
@@ -687,7 +694,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
if (!error && S_ISDIR(inode->i_mode))
set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_CP);
exit:
- kzfree(base_addr);
+ kvfree(base_addr);
return error;
}
diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h
index 67db134da0f5..9172ee082ca8 100644
--- a/fs/f2fs/xattr.h
+++ b/fs/f2fs/xattr.h
@@ -78,6 +78,12 @@ struct f2fs_xattr_entry {
sizeof(struct f2fs_xattr_header) - \
sizeof(struct f2fs_xattr_entry))
+#define MAX_INLINE_XATTR_SIZE \
+ (DEF_ADDRS_PER_INODE - \
+ F2FS_TOTAL_EXTRA_ATTR_SIZE / sizeof(__le32) - \
+ DEF_INLINE_RESERVED_SIZE - \
+ MIN_INLINE_DENTRY_SIZE / sizeof(__le32))
+
/*
* On-disk structure of f2fs_xattr
* We use inline xattrs space + 1 block for xattr.
diff --git a/fs/filesystems.c b/fs/filesystems.c
index b03f57b1105b..9135646e41ac 100644
--- a/fs/filesystems.c
+++ b/fs/filesystems.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/fs_parser.h>
/*
* Handling of filesystem drivers list.
@@ -73,6 +74,9 @@ int register_filesystem(struct file_system_type * fs)
int res = 0;
struct file_system_type ** p;
+ if (fs->parameters && !fs_validate_description(fs->parameters))
+ return -EINVAL;
+
BUG_ON(strchr(fs->name, '.'));
if (fs->next)
return -EBUSY;
diff --git a/fs/fs_context.c b/fs/fs_context.c
new file mode 100644
index 000000000000..87e3546b9a52
--- /dev/null
+++ b/fs/fs_context.c
@@ -0,0 +1,642 @@
+/* Provide a way to create a superblock configuration context within the kernel
+ * that allows a superblock to be set up prior to mounting.
+ *
+ * Copyright (C) 2017 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/nsproxy.h>
+#include <linux/slab.h>
+#include <linux/magic.h>
+#include <linux/security.h>
+#include <linux/mnt_namespace.h>
+#include <linux/pid_namespace.h>
+#include <linux/user_namespace.h>
+#include <net/net_namespace.h>
+#include "mount.h"
+#include "internal.h"
+
+enum legacy_fs_param {
+ LEGACY_FS_UNSET_PARAMS,
+ LEGACY_FS_MONOLITHIC_PARAMS,
+ LEGACY_FS_INDIVIDUAL_PARAMS,
+};
+
+struct legacy_fs_context {
+ char *legacy_data; /* Data page for legacy filesystems */
+ size_t data_size;
+ enum legacy_fs_param param_type;
+};
+
+static int legacy_init_fs_context(struct fs_context *fc);
+
+static const struct constant_table common_set_sb_flag[] = {
+ { "dirsync", SB_DIRSYNC },
+ { "lazytime", SB_LAZYTIME },
+ { "mand", SB_MANDLOCK },
+ { "posixacl", SB_POSIXACL },
+ { "ro", SB_RDONLY },
+ { "sync", SB_SYNCHRONOUS },
+};
+
+static const struct constant_table common_clear_sb_flag[] = {
+ { "async", SB_SYNCHRONOUS },
+ { "nolazytime", SB_LAZYTIME },
+ { "nomand", SB_MANDLOCK },
+ { "rw", SB_RDONLY },
+ { "silent", SB_SILENT },
+};
+
+static const char *const forbidden_sb_flag[] = {
+ "bind",
+ "dev",
+ "exec",
+ "move",
+ "noatime",
+ "nodev",
+ "nodiratime",
+ "noexec",
+ "norelatime",
+ "nostrictatime",
+ "nosuid",
+ "private",
+ "rec",
+ "relatime",
+ "remount",
+ "shared",
+ "slave",
+ "strictatime",
+ "suid",
+ "unbindable",
+};
+
+/*
+ * Check for a common mount option that manipulates s_flags.
+ */
+static int vfs_parse_sb_flag(struct fs_context *fc, const char *key)
+{
+ unsigned int token;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(forbidden_sb_flag); i++)
+ if (strcmp(key, forbidden_sb_flag[i]) == 0)
+ return -EINVAL;
+
+ token = lookup_constant(common_set_sb_flag, key, 0);
+ if (token) {
+ fc->sb_flags |= token;
+ fc->sb_flags_mask |= token;
+ return 0;
+ }
+
+ token = lookup_constant(common_clear_sb_flag, key, 0);
+ if (token) {
+ fc->sb_flags &= ~token;
+ fc->sb_flags_mask |= token;
+ return 0;
+ }
+
+ return -ENOPARAM;
+}
+
+/**
+ * vfs_parse_fs_param - Add a single parameter to a superblock config
+ * @fc: The filesystem context to modify
+ * @param: The parameter
+ *
+ * A single mount option in string form is applied to the filesystem context
+ * being set up. Certain standard options (for example "ro") are translated
+ * into flag bits without going to the filesystem. The active security module
+ * is allowed to observe and poach options. Any other options are passed over
+ * to the filesystem to parse.
+ *
+ * This may be called multiple times for a context.
+ *
+ * Returns 0 on success and a negative error code on failure. In the event of
+ * failure, supplementary error information may have been set.
+ */
+int vfs_parse_fs_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ int ret;
+
+ if (!param->key)
+ return invalf(fc, "Unnamed parameter\n");
+
+ ret = vfs_parse_sb_flag(fc, param->key);
+ if (ret != -ENOPARAM)
+ return ret;
+
+ ret = security_fs_context_parse_param(fc, param);
+ if (ret != -ENOPARAM)
+ /* Param belongs to the LSM or is disallowed by the LSM; so
+ * don't pass to the FS.
+ */
+ return ret;
+
+ if (fc->ops->parse_param) {
+ ret = fc->ops->parse_param(fc, param);
+ if (ret != -ENOPARAM)
+ return ret;
+ }
+
+ /* If the filesystem doesn't take any arguments, give it the
+ * default handling of source.
+ */
+ if (strcmp(param->key, "source") == 0) {
+ if (param->type != fs_value_is_string)
+ return invalf(fc, "VFS: Non-string source");
+ if (fc->source)
+ return invalf(fc, "VFS: Multiple sources");
+ fc->source = param->string;
+ param->string = NULL;
+ return 0;
+ }
+
+ return invalf(fc, "%s: Unknown parameter '%s'",
+ fc->fs_type->name, param->key);
+}
+EXPORT_SYMBOL(vfs_parse_fs_param);
+
+/**
+ * vfs_parse_fs_string - Convenience function to just parse a string.
+ */
+int vfs_parse_fs_string(struct fs_context *fc, const char *key,
+ const char *value, size_t v_size)
+{
+ int ret;
+
+ struct fs_parameter param = {
+ .key = key,
+ .type = fs_value_is_string,
+ .size = v_size,
+ };
+
+ if (v_size > 0) {
+ param.string = kmemdup_nul(value, v_size, GFP_KERNEL);
+ if (!param.string)
+ return -ENOMEM;
+ }
+
+ ret = vfs_parse_fs_param(fc, &param);
+ kfree(param.string);
+ return ret;
+}
+EXPORT_SYMBOL(vfs_parse_fs_string);
+
+/**
+ * generic_parse_monolithic - Parse key[=val][,key[=val]]* mount data
+ * @ctx: The superblock configuration to fill in.
+ * @data: The data to parse
+ *
+ * Parse a blob of data that's in key[=val][,key[=val]]* form. This can be
+ * called from the ->monolithic_mount_data() fs_context operation.
+ *
+ * Returns 0 on success or the error returned by the ->parse_option() fs_context
+ * operation on failure.
+ */
+int generic_parse_monolithic(struct fs_context *fc, void *data)
+{
+ char *options = data, *key;
+ int ret = 0;
+
+ if (!options)
+ return 0;
+
+ ret = security_sb_eat_lsm_opts(options, &fc->security);
+ if (ret)
+ return ret;
+
+ while ((key = strsep(&options, ",")) != NULL) {
+ if (*key) {
+ size_t v_len = 0;
+ char *value = strchr(key, '=');
+
+ if (value) {
+ if (value == key)
+ continue;
+ *value++ = 0;
+ v_len = strlen(value);
+ }
+ ret = vfs_parse_fs_string(fc, key, value, v_len);
+ if (ret < 0)
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(generic_parse_monolithic);
+
+/**
+ * alloc_fs_context - Create a filesystem context.
+ * @fs_type: The filesystem type.
+ * @reference: The dentry from which this one derives (or NULL)
+ * @sb_flags: Filesystem/superblock flags (SB_*)
+ * @sb_flags_mask: Applicable members of @sb_flags
+ * @purpose: The purpose that this configuration shall be used for.
+ *
+ * Open a filesystem and create a mount context. The mount context is
+ * initialised with the supplied flags and, if a submount/automount from
+ * another superblock (referred to by @reference) is supplied, may have
+ * parameters such as namespaces copied across from that superblock.
+ */
+static struct fs_context *alloc_fs_context(struct file_system_type *fs_type,
+ struct dentry *reference,
+ unsigned int sb_flags,
+ unsigned int sb_flags_mask,
+ enum fs_context_purpose purpose)
+{
+ int (*init_fs_context)(struct fs_context *);
+ struct fs_context *fc;
+ int ret = -ENOMEM;
+
+ fc = kzalloc(sizeof(struct fs_context), GFP_KERNEL);
+ if (!fc)
+ return ERR_PTR(-ENOMEM);
+
+ fc->purpose = purpose;
+ fc->sb_flags = sb_flags;
+ fc->sb_flags_mask = sb_flags_mask;
+ fc->fs_type = get_filesystem(fs_type);
+ fc->cred = get_current_cred();
+ fc->net_ns = get_net(current->nsproxy->net_ns);
+
+ switch (purpose) {
+ case FS_CONTEXT_FOR_MOUNT:
+ fc->user_ns = get_user_ns(fc->cred->user_ns);
+ break;
+ case FS_CONTEXT_FOR_SUBMOUNT:
+ fc->user_ns = get_user_ns(reference->d_sb->s_user_ns);
+ break;
+ case FS_CONTEXT_FOR_RECONFIGURE:
+ /* We don't pin any namespaces as the superblock's
+ * subscriptions cannot be changed at this point.
+ */
+ atomic_inc(&reference->d_sb->s_active);
+ fc->root = dget(reference);
+ break;
+ }
+
+ /* TODO: Make all filesystems support this unconditionally */
+ init_fs_context = fc->fs_type->init_fs_context;
+ if (!init_fs_context)
+ init_fs_context = legacy_init_fs_context;
+
+ ret = init_fs_context(fc);
+ if (ret < 0)
+ goto err_fc;
+ fc->need_free = true;
+ return fc;
+
+err_fc:
+ put_fs_context(fc);
+ return ERR_PTR(ret);
+}
+
+struct fs_context *fs_context_for_mount(struct file_system_type *fs_type,
+ unsigned int sb_flags)
+{
+ return alloc_fs_context(fs_type, NULL, sb_flags, 0,
+ FS_CONTEXT_FOR_MOUNT);
+}
+EXPORT_SYMBOL(fs_context_for_mount);
+
+struct fs_context *fs_context_for_reconfigure(struct dentry *dentry,
+ unsigned int sb_flags,
+ unsigned int sb_flags_mask)
+{
+ return alloc_fs_context(dentry->d_sb->s_type, dentry, sb_flags,
+ sb_flags_mask, FS_CONTEXT_FOR_RECONFIGURE);
+}
+EXPORT_SYMBOL(fs_context_for_reconfigure);
+
+struct fs_context *fs_context_for_submount(struct file_system_type *type,
+ struct dentry *reference)
+{
+ return alloc_fs_context(type, reference, 0, 0, FS_CONTEXT_FOR_SUBMOUNT);
+}
+EXPORT_SYMBOL(fs_context_for_submount);
+
+void fc_drop_locked(struct fs_context *fc)
+{
+ struct super_block *sb = fc->root->d_sb;
+ dput(fc->root);
+ fc->root = NULL;
+ deactivate_locked_super(sb);
+}
+
+static void legacy_fs_context_free(struct fs_context *fc);
+
+/**
+ * vfs_dup_fc_config: Duplicate a filesystem context.
+ * @src_fc: The context to copy.
+ */
+struct fs_context *vfs_dup_fs_context(struct fs_context *src_fc)
+{
+ struct fs_context *fc;
+ int ret;
+
+ if (!src_fc->ops->dup)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ fc = kmemdup(src_fc, sizeof(struct fs_context), GFP_KERNEL);
+ if (!fc)
+ return ERR_PTR(-ENOMEM);
+
+ fc->fs_private = NULL;
+ fc->s_fs_info = NULL;
+ fc->source = NULL;
+ fc->security = NULL;
+ get_filesystem(fc->fs_type);
+ get_net(fc->net_ns);
+ get_user_ns(fc->user_ns);
+ get_cred(fc->cred);
+
+ /* Can't call put until we've called ->dup */
+ ret = fc->ops->dup(fc, src_fc);
+ if (ret < 0)
+ goto err_fc;
+
+ ret = security_fs_context_dup(fc, src_fc);
+ if (ret < 0)
+ goto err_fc;
+ return fc;
+
+err_fc:
+ put_fs_context(fc);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(vfs_dup_fs_context);
+
+#ifdef CONFIG_PRINTK
+/**
+ * logfc - Log a message to a filesystem context
+ * @fc: The filesystem context to log to.
+ * @fmt: The format of the buffer.
+ */
+void logfc(struct fs_context *fc, const char *fmt, ...)
+{
+ va_list va;
+
+ va_start(va, fmt);
+
+ switch (fmt[0]) {
+ case 'w':
+ vprintk_emit(0, LOGLEVEL_WARNING, NULL, 0, fmt, va);
+ break;
+ case 'e':
+ vprintk_emit(0, LOGLEVEL_ERR, NULL, 0, fmt, va);
+ break;
+ default:
+ vprintk_emit(0, LOGLEVEL_NOTICE, NULL, 0, fmt, va);
+ break;
+ }
+
+ pr_cont("\n");
+ va_end(va);
+}
+EXPORT_SYMBOL(logfc);
+#endif
+
+/**
+ * put_fs_context - Dispose of a superblock configuration context.
+ * @fc: The context to dispose of.
+ */
+void put_fs_context(struct fs_context *fc)
+{
+ struct super_block *sb;
+
+ if (fc->root) {
+ sb = fc->root->d_sb;
+ dput(fc->root);
+ fc->root = NULL;
+ deactivate_super(sb);
+ }
+
+ if (fc->need_free && fc->ops && fc->ops->free)
+ fc->ops->free(fc);
+
+ security_free_mnt_opts(&fc->security);
+ put_net(fc->net_ns);
+ put_user_ns(fc->user_ns);
+ put_cred(fc->cred);
+ kfree(fc->subtype);
+ put_filesystem(fc->fs_type);
+ kfree(fc->source);
+ kfree(fc);
+}
+EXPORT_SYMBOL(put_fs_context);
+
+/*
+ * Free the config for a filesystem that doesn't support fs_context.
+ */
+static void legacy_fs_context_free(struct fs_context *fc)
+{
+ struct legacy_fs_context *ctx = fc->fs_private;
+
+ if (ctx) {
+ if (ctx->param_type == LEGACY_FS_INDIVIDUAL_PARAMS)
+ kfree(ctx->legacy_data);
+ kfree(ctx);
+ }
+}
+
+/*
+ * Duplicate a legacy config.
+ */
+static int legacy_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc)
+{
+ struct legacy_fs_context *ctx;
+ struct legacy_fs_context *src_ctx = src_fc->fs_private;
+
+ ctx = kmemdup(src_ctx, sizeof(*src_ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (ctx->param_type == LEGACY_FS_INDIVIDUAL_PARAMS) {
+ ctx->legacy_data = kmemdup(src_ctx->legacy_data,
+ src_ctx->data_size, GFP_KERNEL);
+ if (!ctx->legacy_data) {
+ kfree(ctx);
+ return -ENOMEM;
+ }
+ }
+
+ fc->fs_private = ctx;
+ return 0;
+}
+
+/*
+ * Add a parameter to a legacy config. We build up a comma-separated list of
+ * options.
+ */
+static int legacy_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ struct legacy_fs_context *ctx = fc->fs_private;
+ unsigned int size = ctx->data_size;
+ size_t len = 0;
+
+ if (strcmp(param->key, "source") == 0) {
+ if (param->type != fs_value_is_string)
+ return invalf(fc, "VFS: Legacy: Non-string source");
+ if (fc->source)
+ return invalf(fc, "VFS: Legacy: Multiple sources");
+ fc->source = param->string;
+ param->string = NULL;
+ return 0;
+ }
+
+ if ((fc->fs_type->fs_flags & FS_HAS_SUBTYPE) &&
+ strcmp(param->key, "subtype") == 0) {
+ if (param->type != fs_value_is_string)
+ return invalf(fc, "VFS: Legacy: Non-string subtype");
+ if (fc->subtype)
+ return invalf(fc, "VFS: Legacy: Multiple subtype");
+ fc->subtype = param->string;
+ param->string = NULL;
+ return 0;
+ }
+
+ if (ctx->param_type == LEGACY_FS_MONOLITHIC_PARAMS)
+ return invalf(fc, "VFS: Legacy: Can't mix monolithic and individual options");
+
+ switch (param->type) {
+ case fs_value_is_string:
+ len = 1 + param->size;
+ /* Fall through */
+ case fs_value_is_flag:
+ len += strlen(param->key);
+ break;
+ default:
+ return invalf(fc, "VFS: Legacy: Parameter type for '%s' not supported",
+ param->key);
+ }
+
+ if (len > PAGE_SIZE - 2 - size)
+ return invalf(fc, "VFS: Legacy: Cumulative options too large");
+ if (strchr(param->key, ',') ||
+ (param->type == fs_value_is_string &&
+ memchr(param->string, ',', param->size)))
+ return invalf(fc, "VFS: Legacy: Option '%s' contained comma",
+ param->key);
+ if (!ctx->legacy_data) {
+ ctx->legacy_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!ctx->legacy_data)
+ return -ENOMEM;
+ }
+
+ ctx->legacy_data[size++] = ',';
+ len = strlen(param->key);
+ memcpy(ctx->legacy_data + size, param->key, len);
+ size += len;
+ if (param->type == fs_value_is_string) {
+ ctx->legacy_data[size++] = '=';
+ memcpy(ctx->legacy_data + size, param->string, param->size);
+ size += param->size;
+ }
+ ctx->legacy_data[size] = '\0';
+ ctx->data_size = size;
+ ctx->param_type = LEGACY_FS_INDIVIDUAL_PARAMS;
+ return 0;
+}
+
+/*
+ * Add monolithic mount data.
+ */
+static int legacy_parse_monolithic(struct fs_context *fc, void *data)
+{
+ struct legacy_fs_context *ctx = fc->fs_private;
+
+ if (ctx->param_type != LEGACY_FS_UNSET_PARAMS) {
+ pr_warn("VFS: Can't mix monolithic and individual options\n");
+ return -EINVAL;
+ }
+
+ ctx->legacy_data = data;
+ ctx->param_type = LEGACY_FS_MONOLITHIC_PARAMS;
+ if (!ctx->legacy_data)
+ return 0;
+
+ if (fc->fs_type->fs_flags & FS_BINARY_MOUNTDATA)
+ return 0;
+ return security_sb_eat_lsm_opts(ctx->legacy_data, &fc->security);
+}
+
+/*
+ * Get a mountable root with the legacy mount command.
+ */
+static int legacy_get_tree(struct fs_context *fc)
+{
+ struct legacy_fs_context *ctx = fc->fs_private;
+ struct super_block *sb;
+ struct dentry *root;
+
+ root = fc->fs_type->mount(fc->fs_type, fc->sb_flags,
+ fc->source, ctx->legacy_data);
+ if (IS_ERR(root))
+ return PTR_ERR(root);
+
+ sb = root->d_sb;
+ BUG_ON(!sb);
+
+ fc->root = root;
+ return 0;
+}
+
+/*
+ * Handle remount.
+ */
+static int legacy_reconfigure(struct fs_context *fc)
+{
+ struct legacy_fs_context *ctx = fc->fs_private;
+ struct super_block *sb = fc->root->d_sb;
+
+ if (!sb->s_op->remount_fs)
+ return 0;
+
+ return sb->s_op->remount_fs(sb, &fc->sb_flags,
+ ctx ? ctx->legacy_data : NULL);
+}
+
+const struct fs_context_operations legacy_fs_context_ops = {
+ .free = legacy_fs_context_free,
+ .dup = legacy_fs_context_dup,
+ .parse_param = legacy_parse_param,
+ .parse_monolithic = legacy_parse_monolithic,
+ .get_tree = legacy_get_tree,
+ .reconfigure = legacy_reconfigure,
+};
+
+/*
+ * Initialise a legacy context for a filesystem that doesn't support
+ * fs_context.
+ */
+static int legacy_init_fs_context(struct fs_context *fc)
+{
+ fc->fs_private = kzalloc(sizeof(struct legacy_fs_context), GFP_KERNEL);
+ if (!fc->fs_private)
+ return -ENOMEM;
+ fc->ops = &legacy_fs_context_ops;
+ return 0;
+}
+
+int parse_monolithic_mount_data(struct fs_context *fc, void *data)
+{
+ int (*monolithic_mount_data)(struct fs_context *, void *);
+
+ monolithic_mount_data = fc->ops->parse_monolithic;
+ if (!monolithic_mount_data)
+ monolithic_mount_data = generic_parse_monolithic;
+
+ return monolithic_mount_data(fc, data);
+}
diff --git a/fs/fs_parser.c b/fs/fs_parser.c
new file mode 100644
index 000000000000..570d71043acf
--- /dev/null
+++ b/fs/fs_parser.c
@@ -0,0 +1,447 @@
+/* Filesystem parameter parser.
+ *
+ * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/export.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
+#include <linux/slab.h>
+#include <linux/security.h>
+#include <linux/namei.h>
+#include "internal.h"
+
+static const struct constant_table bool_names[] = {
+ { "0", false },
+ { "1", true },
+ { "false", false },
+ { "no", false },
+ { "true", true },
+ { "yes", true },
+};
+
+/**
+ * lookup_constant - Look up a constant by name in an ordered table
+ * @tbl: The table of constants to search.
+ * @tbl_size: The size of the table.
+ * @name: The name to look up.
+ * @not_found: The value to return if the name is not found.
+ */
+int __lookup_constant(const struct constant_table *tbl, size_t tbl_size,
+ const char *name, int not_found)
+{
+ unsigned int i;
+
+ for (i = 0; i < tbl_size; i++)
+ if (strcmp(name, tbl[i].name) == 0)
+ return tbl[i].value;
+
+ return not_found;
+}
+EXPORT_SYMBOL(__lookup_constant);
+
+static const struct fs_parameter_spec *fs_lookup_key(
+ const struct fs_parameter_description *desc,
+ const char *name)
+{
+ const struct fs_parameter_spec *p;
+
+ if (!desc->specs)
+ return NULL;
+
+ for (p = desc->specs; p->name; p++)
+ if (strcmp(p->name, name) == 0)
+ return p;
+
+ return NULL;
+}
+
+/*
+ * fs_parse - Parse a filesystem configuration parameter
+ * @fc: The filesystem context to log errors through.
+ * @desc: The parameter description to use.
+ * @param: The parameter.
+ * @result: Where to place the result of the parse
+ *
+ * Parse a filesystem configuration parameter and attempt a conversion for a
+ * simple parameter for which this is requested. If successful, the determined
+ * parameter ID is placed into @result->key, the desired type is indicated in
+ * @result->t and any converted value is placed into an appropriate member of
+ * the union in @result.
+ *
+ * The function returns the parameter number if the parameter was matched,
+ * -ENOPARAM if it wasn't matched and @desc->ignore_unknown indicated that
+ * unknown parameters are okay and -EINVAL if there was a conversion issue or
+ * the parameter wasn't recognised and unknowns aren't okay.
+ */
+int fs_parse(struct fs_context *fc,
+ const struct fs_parameter_description *desc,
+ struct fs_parameter *param,
+ struct fs_parse_result *result)
+{
+ const struct fs_parameter_spec *p;
+ const struct fs_parameter_enum *e;
+ int ret = -ENOPARAM, b;
+
+ result->has_value = !!param->string;
+ result->negated = false;
+ result->uint_64 = 0;
+
+ p = fs_lookup_key(desc, param->key);
+ if (!p) {
+ /* If we didn't find something that looks like "noxxx", see if
+ * "xxx" takes the "no"-form negative - but only if there
+ * wasn't an value.
+ */
+ if (result->has_value)
+ goto unknown_parameter;
+ if (param->key[0] != 'n' || param->key[1] != 'o' || !param->key[2])
+ goto unknown_parameter;
+
+ p = fs_lookup_key(desc, param->key + 2);
+ if (!p)
+ goto unknown_parameter;
+ if (!(p->flags & fs_param_neg_with_no))
+ goto unknown_parameter;
+ result->boolean = false;
+ result->negated = true;
+ }
+
+ if (p->flags & fs_param_deprecated)
+ warnf(fc, "%s: Deprecated parameter '%s'",
+ desc->name, param->key);
+
+ if (result->negated)
+ goto okay;
+
+ /* Certain parameter types only take a string and convert it. */
+ switch (p->type) {
+ case __fs_param_wasnt_defined:
+ return -EINVAL;
+ case fs_param_is_u32:
+ case fs_param_is_u32_octal:
+ case fs_param_is_u32_hex:
+ case fs_param_is_s32:
+ case fs_param_is_u64:
+ case fs_param_is_enum:
+ case fs_param_is_string:
+ if (param->type != fs_value_is_string)
+ goto bad_value;
+ if (!result->has_value) {
+ if (p->flags & fs_param_v_optional)
+ goto okay;
+ goto bad_value;
+ }
+ /* Fall through */
+ default:
+ break;
+ }
+
+ /* Try to turn the type we were given into the type desired by the
+ * parameter and give an error if we can't.
+ */
+ switch (p->type) {
+ case fs_param_is_flag:
+ if (param->type != fs_value_is_flag &&
+ (param->type != fs_value_is_string || result->has_value))
+ return invalf(fc, "%s: Unexpected value for '%s'",
+ desc->name, param->key);
+ result->boolean = true;
+ goto okay;
+
+ case fs_param_is_bool:
+ switch (param->type) {
+ case fs_value_is_flag:
+ result->boolean = true;
+ goto okay;
+ case fs_value_is_string:
+ if (param->size == 0) {
+ result->boolean = true;
+ goto okay;
+ }
+ b = lookup_constant(bool_names, param->string, -1);
+ if (b == -1)
+ goto bad_value;
+ result->boolean = b;
+ goto okay;
+ default:
+ goto bad_value;
+ }
+
+ case fs_param_is_u32:
+ ret = kstrtouint(param->string, 0, &result->uint_32);
+ goto maybe_okay;
+ case fs_param_is_u32_octal:
+ ret = kstrtouint(param->string, 8, &result->uint_32);
+ goto maybe_okay;
+ case fs_param_is_u32_hex:
+ ret = kstrtouint(param->string, 16, &result->uint_32);
+ goto maybe_okay;
+ case fs_param_is_s32:
+ ret = kstrtoint(param->string, 0, &result->int_32);
+ goto maybe_okay;
+ case fs_param_is_u64:
+ ret = kstrtoull(param->string, 0, &result->uint_64);
+ goto maybe_okay;
+
+ case fs_param_is_enum:
+ for (e = desc->enums; e->name[0]; e++) {
+ if (e->opt == p->opt &&
+ strcmp(e->name, param->string) == 0) {
+ result->uint_32 = e->value;
+ goto okay;
+ }
+ }
+ goto bad_value;
+
+ case fs_param_is_string:
+ goto okay;
+ case fs_param_is_blob:
+ if (param->type != fs_value_is_blob)
+ goto bad_value;
+ goto okay;
+
+ case fs_param_is_fd: {
+ if (param->type != fs_value_is_file)
+ goto bad_value;
+ goto okay;
+ }
+
+ case fs_param_is_blockdev:
+ case fs_param_is_path:
+ goto okay;
+ default:
+ BUG();
+ }
+
+maybe_okay:
+ if (ret < 0)
+ goto bad_value;
+okay:
+ return p->opt;
+
+bad_value:
+ return invalf(fc, "%s: Bad value for '%s'", desc->name, param->key);
+unknown_parameter:
+ return -ENOPARAM;
+}
+EXPORT_SYMBOL(fs_parse);
+
+/**
+ * fs_lookup_param - Look up a path referred to by a parameter
+ * @fc: The filesystem context to log errors through.
+ * @param: The parameter.
+ * @want_bdev: T if want a blockdev
+ * @_path: The result of the lookup
+ */
+int fs_lookup_param(struct fs_context *fc,
+ struct fs_parameter *param,
+ bool want_bdev,
+ struct path *_path)
+{
+ struct filename *f;
+ unsigned int flags = 0;
+ bool put_f;
+ int ret;
+
+ switch (param->type) {
+ case fs_value_is_string:
+ f = getname_kernel(param->string);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+ put_f = true;
+ break;
+ case fs_value_is_filename_empty:
+ flags = LOOKUP_EMPTY;
+ /* Fall through */
+ case fs_value_is_filename:
+ f = param->name;
+ put_f = false;
+ break;
+ default:
+ return invalf(fc, "%s: not usable as path", param->key);
+ }
+
+ ret = filename_lookup(param->dirfd, f, flags, _path, NULL);
+ if (ret < 0) {
+ errorf(fc, "%s: Lookup failure for '%s'", param->key, f->name);
+ goto out;
+ }
+
+ if (want_bdev &&
+ !S_ISBLK(d_backing_inode(_path->dentry)->i_mode)) {
+ path_put(_path);
+ _path->dentry = NULL;
+ _path->mnt = NULL;
+ errorf(fc, "%s: Non-blockdev passed as '%s'",
+ param->key, f->name);
+ ret = -ENOTBLK;
+ }
+
+out:
+ if (put_f)
+ putname(f);
+ return ret;
+}
+EXPORT_SYMBOL(fs_lookup_param);
+
+#ifdef CONFIG_VALIDATE_FS_PARSER
+/**
+ * validate_constant_table - Validate a constant table
+ * @name: Name to use in reporting
+ * @tbl: The constant table to validate.
+ * @tbl_size: The size of the table.
+ * @low: The lowest permissible value.
+ * @high: The highest permissible value.
+ * @special: One special permissible value outside of the range.
+ */
+bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size,
+ int low, int high, int special)
+{
+ size_t i;
+ bool good = true;
+
+ if (tbl_size == 0) {
+ pr_warn("VALIDATE C-TBL: Empty\n");
+ return true;
+ }
+
+ for (i = 0; i < tbl_size; i++) {
+ if (!tbl[i].name) {
+ pr_err("VALIDATE C-TBL[%zu]: Null\n", i);
+ good = false;
+ } else if (i > 0 && tbl[i - 1].name) {
+ int c = strcmp(tbl[i-1].name, tbl[i].name);
+
+ if (c == 0) {
+ pr_err("VALIDATE C-TBL[%zu]: Duplicate %s\n",
+ i, tbl[i].name);
+ good = false;
+ }
+ if (c > 0) {
+ pr_err("VALIDATE C-TBL[%zu]: Missorted %s>=%s\n",
+ i, tbl[i-1].name, tbl[i].name);
+ good = false;
+ }
+ }
+
+ if (tbl[i].value != special &&
+ (tbl[i].value < low || tbl[i].value > high)) {
+ pr_err("VALIDATE C-TBL[%zu]: %s->%d const out of range (%d-%d)\n",
+ i, tbl[i].name, tbl[i].value, low, high);
+ good = false;
+ }
+ }
+
+ return good;
+}
+
+/**
+ * fs_validate_description - Validate a parameter description
+ * @desc: The parameter description to validate.
+ */
+bool fs_validate_description(const struct fs_parameter_description *desc)
+{
+ const struct fs_parameter_spec *param, *p2;
+ const struct fs_parameter_enum *e;
+ const char *name = desc->name;
+ unsigned int nr_params = 0;
+ bool good = true, enums = false;
+
+ pr_notice("*** VALIDATE %s ***\n", name);
+
+ if (!name[0]) {
+ pr_err("VALIDATE Parser: No name\n");
+ name = "Unknown";
+ good = false;
+ }
+
+ if (desc->specs) {
+ for (param = desc->specs; param->name; param++) {
+ enum fs_parameter_type t = param->type;
+
+ /* Check that the type is in range */
+ if (t == __fs_param_wasnt_defined ||
+ t >= nr__fs_parameter_type) {
+ pr_err("VALIDATE %s: PARAM[%s] Bad type %u\n",
+ name, param->name, t);
+ good = false;
+ } else if (t == fs_param_is_enum) {
+ enums = true;
+ }
+
+ /* Check for duplicate parameter names */
+ for (p2 = desc->specs; p2 < param; p2++) {
+ if (strcmp(param->name, p2->name) == 0) {
+ pr_err("VALIDATE %s: PARAM[%s]: Duplicate\n",
+ name, param->name);
+ good = false;
+ }
+ }
+ }
+
+ nr_params = param - desc->specs;
+ }
+
+ if (desc->enums) {
+ if (!nr_params) {
+ pr_err("VALIDATE %s: Enum table but no parameters\n",
+ name);
+ good = false;
+ goto no_enums;
+ }
+ if (!enums) {
+ pr_err("VALIDATE %s: Enum table but no enum-type values\n",
+ name);
+ good = false;
+ goto no_enums;
+ }
+
+ for (e = desc->enums; e->name[0]; e++) {
+ /* Check that all entries in the enum table have at
+ * least one parameter that uses them.
+ */
+ for (param = desc->specs; param->name; param++) {
+ if (param->opt == e->opt &&
+ param->type != fs_param_is_enum) {
+ pr_err("VALIDATE %s: e[%tu] enum val for %s\n",
+ name, e - desc->enums, param->name);
+ good = false;
+ }
+ }
+ }
+
+ /* Check that all enum-type parameters have at least one enum
+ * value in the enum table.
+ */
+ for (param = desc->specs; param->name; param++) {
+ if (param->type != fs_param_is_enum)
+ continue;
+ for (e = desc->enums; e->name[0]; e++)
+ if (e->opt == param->opt)
+ break;
+ if (!e->name[0]) {
+ pr_err("VALIDATE %s: PARAM[%s] enum with no values\n",
+ name, param->name);
+ good = false;
+ }
+ }
+ } else {
+ if (enums) {
+ pr_err("VALIDATE %s: enum-type values, but no enum table\n",
+ name);
+ good = false;
+ goto no_enums;
+ }
+ }
+
+no_enums:
+ return good;
+}
+#endif /* CONFIG_VALIDATE_FS_PARSER */
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index 989df5accaee..fe80bea4ad89 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -35,7 +35,9 @@ static ssize_t fuse_conn_abort_write(struct file *file, const char __user *buf,
{
struct fuse_conn *fc = fuse_ctl_file_conn_get(file);
if (fc) {
- fuse_abort_conn(fc, true);
+ if (fc->abort_err)
+ fc->aborted = true;
+ fuse_abort_conn(fc);
fuse_conn_put(fc);
}
return count;
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index 8f68181256c0..55a26f351467 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -141,10 +141,11 @@ static int cuse_open(struct inode *inode, struct file *file)
static int cuse_release(struct inode *inode, struct file *file)
{
+ struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fc;
- fuse_sync_release(ff, file->f_flags);
+ fuse_sync_release(fi, ff, file->f_flags);
fuse_conn_put(fc);
return 0;
@@ -407,7 +408,7 @@ err_unlock:
err_region:
unregister_chrdev_region(devt, 1);
err:
- fuse_abort_conn(fc, false);
+ fuse_abort_conn(fc);
goto out;
}
@@ -586,7 +587,7 @@ static ssize_t cuse_class_abort_store(struct device *dev,
{
struct cuse_conn *cc = dev_get_drvdata(dev);
- fuse_abort_conn(&cc->fc, false);
+ fuse_abort_conn(&cc->fc);
return count;
}
static DEVICE_ATTR(abort, 0200, NULL, cuse_class_abort_store);
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 809c0f2f9942..9971a35cf1ef 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -251,17 +251,18 @@ static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
struct file *file)
{
struct fuse_req *req = NULL;
+ struct fuse_inode *fi = get_fuse_inode(file_inode(file));
struct fuse_file *ff = file->private_data;
do {
wait_event(fc->reserved_req_waitq, ff->reserved_req);
- spin_lock(&fc->lock);
+ spin_lock(&fi->lock);
if (ff->reserved_req) {
req = ff->reserved_req;
ff->reserved_req = NULL;
req->stolen_file = get_file(file);
}
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
} while (!req);
return req;
@@ -273,16 +274,17 @@ static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
{
struct file *file = req->stolen_file;
+ struct fuse_inode *fi = get_fuse_inode(file_inode(file));
struct fuse_file *ff = file->private_data;
WARN_ON(req->max_pages);
- spin_lock(&fc->lock);
+ spin_lock(&fi->lock);
memset(req, 0, sizeof(*req));
fuse_request_init(req, NULL, NULL, 0);
BUG_ON(ff->reserved_req);
ff->reserved_req = req;
wake_up_all(&fc->reserved_req_waitq);
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
fput(file);
}
@@ -431,10 +433,16 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
if (test_and_set_bit(FR_FINISHED, &req->flags))
goto put_request;
-
- spin_lock(&fiq->waitq.lock);
- list_del_init(&req->intr_entry);
- spin_unlock(&fiq->waitq.lock);
+ /*
+ * test_and_set_bit() implies smp_mb() between bit
+ * changing and below intr_entry check. Pairs with
+ * smp_mb() from queue_interrupt().
+ */
+ if (!list_empty(&req->intr_entry)) {
+ spin_lock(&fiq->waitq.lock);
+ list_del_init(&req->intr_entry);
+ spin_unlock(&fiq->waitq.lock);
+ }
WARN_ON(test_bit(FR_PENDING, &req->flags));
WARN_ON(test_bit(FR_SENT, &req->flags));
if (test_bit(FR_BACKGROUND, &req->flags)) {
@@ -462,27 +470,43 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
fc->active_background--;
flush_bg_queue(fc);
spin_unlock(&fc->bg_lock);
+ } else {
+ /* Wake up waiter sleeping in request_wait_answer() */
+ wake_up(&req->waitq);
}
- wake_up(&req->waitq);
+
if (req->end)
req->end(fc, req);
put_request:
fuse_put_request(fc, req);
}
-static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
+static int queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
{
spin_lock(&fiq->waitq.lock);
- if (test_bit(FR_FINISHED, &req->flags)) {
+ /* Check for we've sent request to interrupt this req */
+ if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) {
spin_unlock(&fiq->waitq.lock);
- return;
+ return -EINVAL;
}
+
if (list_empty(&req->intr_entry)) {
list_add_tail(&req->intr_entry, &fiq->interrupts);
+ /*
+ * Pairs with smp_mb() implied by test_and_set_bit()
+ * from request_end().
+ */
+ smp_mb();
+ if (test_bit(FR_FINISHED, &req->flags)) {
+ list_del_init(&req->intr_entry);
+ spin_unlock(&fiq->waitq.lock);
+ return 0;
+ }
wake_up_locked(&fiq->waitq);
+ kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
}
spin_unlock(&fiq->waitq.lock);
- kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
+ return 0;
}
static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
@@ -1306,7 +1330,7 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
goto err_unlock;
if (!fiq->connected) {
- err = (fc->aborted && fc->abort_err) ? -ECONNABORTED : -ENODEV;
+ err = fc->aborted ? -ECONNABORTED : -ENODEV;
goto err_unlock;
}
@@ -1353,7 +1377,7 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
spin_lock(&fpq->lock);
clear_bit(FR_LOCKED, &req->flags);
if (!fpq->connected) {
- err = (fc->aborted && fc->abort_err) ? -ECONNABORTED : -ENODEV;
+ err = fc->aborted ? -ECONNABORTED : -ENODEV;
goto out_end;
}
if (err) {
@@ -1900,16 +1924,17 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
struct fuse_req *req;
struct fuse_out_header oh;
+ err = -EINVAL;
if (nbytes < sizeof(struct fuse_out_header))
- return -EINVAL;
+ goto out;
err = fuse_copy_one(cs, &oh, sizeof(oh));
if (err)
- goto err_finish;
+ goto copy_finish;
err = -EINVAL;
if (oh.len != nbytes)
- goto err_finish;
+ goto copy_finish;
/*
* Zero oh.unique indicates unsolicited notification message
@@ -1917,41 +1942,40 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
*/
if (!oh.unique) {
err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
- return err ? err : nbytes;
+ goto out;
}
err = -EINVAL;
if (oh.error <= -1000 || oh.error > 0)
- goto err_finish;
+ goto copy_finish;
spin_lock(&fpq->lock);
- err = -ENOENT;
- if (!fpq->connected)
- goto err_unlock_pq;
+ req = NULL;
+ if (fpq->connected)
+ req = request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT);
- req = request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT);
- if (!req)
- goto err_unlock_pq;
+ err = -ENOENT;
+ if (!req) {
+ spin_unlock(&fpq->lock);
+ goto copy_finish;
+ }
/* Is it an interrupt reply ID? */
if (oh.unique & FUSE_INT_REQ_BIT) {
__fuse_get_request(req);
spin_unlock(&fpq->lock);
- err = -EINVAL;
- if (nbytes != sizeof(struct fuse_out_header)) {
- fuse_put_request(fc, req);
- goto err_finish;
- }
-
- if (oh.error == -ENOSYS)
+ err = 0;
+ if (nbytes != sizeof(struct fuse_out_header))
+ err = -EINVAL;
+ else if (oh.error == -ENOSYS)
fc->no_interrupt = 1;
else if (oh.error == -EAGAIN)
- queue_interrupt(&fc->iq, req);
+ err = queue_interrupt(&fc->iq, req);
+
fuse_put_request(fc, req);
- fuse_copy_finish(cs);
- return nbytes;
+ goto copy_finish;
}
clear_bit(FR_SENT, &req->flags);
@@ -1977,14 +2001,12 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
spin_unlock(&fpq->lock);
request_end(fc, req);
-
+out:
return err ? err : nbytes;
- err_unlock_pq:
- spin_unlock(&fpq->lock);
- err_finish:
+copy_finish:
fuse_copy_finish(cs);
- return err;
+ goto out;
}
static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
@@ -2034,10 +2056,8 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
ret = -EINVAL;
- if (rem < len) {
- pipe_unlock(pipe);
- goto out;
- }
+ if (rem < len)
+ goto out_free;
rem = len;
while (rem) {
@@ -2055,7 +2075,9 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
pipe->nrbufs--;
} else {
- pipe_buf_get(pipe, ibuf);
+ if (!pipe_buf_get(pipe, ibuf))
+ goto out_free;
+
*obuf = *ibuf;
obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
obuf->len = rem;
@@ -2078,11 +2100,11 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
ret = fuse_dev_do_write(fud, &cs, len);
pipe_lock(pipe);
+out_free:
for (idx = 0; idx < nbuf; idx++)
pipe_buf_release(pipe, &bufs[idx]);
pipe_unlock(pipe);
-out:
kvfree(bufs);
return ret;
}
@@ -2109,11 +2131,7 @@ static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
return mask;
}
-/*
- * Abort all requests on the given list (pending or processing)
- *
- * This function releases and reacquires fc->lock
- */
+/* Abort all requests on the given list (pending or processing) */
static void end_requests(struct fuse_conn *fc, struct list_head *head)
{
while (!list_empty(head)) {
@@ -2159,7 +2177,7 @@ static void end_polls(struct fuse_conn *fc)
* is OK, the request will in that case be removed from the list before we touch
* it.
*/
-void fuse_abort_conn(struct fuse_conn *fc, bool is_abort)
+void fuse_abort_conn(struct fuse_conn *fc)
{
struct fuse_iqueue *fiq = &fc->iq;
@@ -2175,7 +2193,6 @@ void fuse_abort_conn(struct fuse_conn *fc, bool is_abort)
fc->connected = 0;
spin_unlock(&fc->bg_lock);
- fc->aborted = is_abort;
fuse_set_initialized(fc);
list_for_each_entry(fud, &fc->devices, entry) {
struct fuse_pqueue *fpq = &fud->pq;
@@ -2253,7 +2270,7 @@ int fuse_dev_release(struct inode *inode, struct file *file)
/* Are we the last open device? */
if (atomic_dec_and_test(&fc->dev_count)) {
WARN_ON(fc->iq.fasync != NULL);
- fuse_abort_conn(fc, false);
+ fuse_abort_conn(fc);
}
fuse_dev_free(fud);
}
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index e909678afa2d..dd0f64f7bc06 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -149,21 +149,6 @@ static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args,
args->out.args[0].value = outarg;
}
-u64 fuse_get_attr_version(struct fuse_conn *fc)
-{
- u64 curr_version;
-
- /*
- * The spin lock isn't actually needed on 64bit archs, but we
- * don't yet care too much about such optimizations.
- */
- spin_lock(&fc->lock);
- curr_version = fc->attr_version;
- spin_unlock(&fc->lock);
-
- return curr_version;
-}
-
/*
* Check whether the dentry is still valid
*
@@ -222,9 +207,9 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
fuse_queue_forget(fc, forget, outarg.nodeid, 1);
goto invalid;
}
- spin_lock(&fc->lock);
+ spin_lock(&fi->lock);
fi->nlookup++;
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
}
kfree(forget);
if (ret == -ENOMEM)
@@ -400,6 +385,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
struct fuse_create_in inarg;
struct fuse_open_out outopen;
struct fuse_entry_out outentry;
+ struct fuse_inode *fi;
struct fuse_file *ff;
/* Userspace expects S_IFREG in create mode */
@@ -451,7 +437,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
&outentry.attr, entry_attr_timeout(&outentry), 0);
if (!inode) {
flags &= ~(O_CREAT | O_EXCL | O_TRUNC);
- fuse_sync_release(ff, flags);
+ fuse_sync_release(NULL, ff, flags);
fuse_queue_forget(fc, forget, outentry.nodeid, 1);
err = -ENOMEM;
goto out_err;
@@ -462,7 +448,8 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
fuse_dir_changed(dir);
err = finish_open(file, entry, generic_file_open);
if (err) {
- fuse_sync_release(ff, flags);
+ fi = get_fuse_inode(inode);
+ fuse_sync_release(fi, ff, flags);
} else {
file->private_data = ff;
fuse_finish_open(inode, file);
@@ -671,8 +658,8 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
struct inode *inode = d_inode(entry);
struct fuse_inode *fi = get_fuse_inode(inode);
- spin_lock(&fc->lock);
- fi->attr_version = ++fc->attr_version;
+ spin_lock(&fi->lock);
+ fi->attr_version = atomic64_inc_return(&fc->attr_version);
/*
* If i_nlink == 0 then unlink doesn't make sense, yet this can
* happen if userspace filesystem is careless. It would be
@@ -681,7 +668,7 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
*/
if (inode->i_nlink > 0)
drop_nlink(inode);
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
fuse_invalidate_attr(inode);
fuse_dir_changed(dir);
fuse_invalidate_entry_cache(entry);
@@ -825,10 +812,10 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
if (!err) {
struct fuse_inode *fi = get_fuse_inode(inode);
- spin_lock(&fc->lock);
- fi->attr_version = ++fc->attr_version;
+ spin_lock(&fi->lock);
+ fi->attr_version = atomic64_inc_return(&fc->attr_version);
inc_nlink(inode);
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
fuse_invalidate_attr(inode);
fuse_update_ctime(inode);
} else if (err == -EINTR) {
@@ -1356,15 +1343,14 @@ static void iattr_to_fattr(struct fuse_conn *fc, struct iattr *iattr,
*/
void fuse_set_nowrite(struct inode *inode)
{
- struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
BUG_ON(!inode_is_locked(inode));
- spin_lock(&fc->lock);
+ spin_lock(&fi->lock);
BUG_ON(fi->writectr < 0);
fi->writectr += FUSE_NOWRITE;
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
wait_event(fi->page_waitq, fi->writectr == FUSE_NOWRITE);
}
@@ -1385,11 +1371,11 @@ static void __fuse_release_nowrite(struct inode *inode)
void fuse_release_nowrite(struct inode *inode)
{
- struct fuse_conn *fc = get_fuse_conn(inode);
+ struct fuse_inode *fi = get_fuse_inode(inode);
- spin_lock(&fc->lock);
+ spin_lock(&fi->lock);
__fuse_release_nowrite(inode);
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
}
static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_args *args,
@@ -1524,7 +1510,7 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
goto error;
}
- spin_lock(&fc->lock);
+ spin_lock(&fi->lock);
/* the kernel maintains i_mtime locally */
if (trust_local_cmtime) {
if (attr->ia_valid & ATTR_MTIME)
@@ -1542,10 +1528,10 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
i_size_write(inode, outarg.attr.size);
if (is_truncate) {
- /* NOTE: this may release/reacquire fc->lock */
+ /* NOTE: this may release/reacquire fi->lock */
__fuse_release_nowrite(inode);
}
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
/*
* Only call invalidate_inode_pages2() after removing
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index a59c16bd90ac..06096b60f1df 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -19,8 +19,6 @@
#include <linux/falloc.h>
#include <linux/uio.h>
-static const struct file_operations fuse_direct_io_file_operations;
-
static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
int opcode, struct fuse_open_out *outargp)
{
@@ -64,9 +62,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
RB_CLEAR_NODE(&ff->polled_node);
init_waitqueue_head(&ff->poll_wait);
- spin_lock(&fc->lock);
- ff->kh = ++fc->khctr;
- spin_unlock(&fc->lock);
+ ff->kh = atomic64_inc_return(&fc->khctr);
return ff;
}
@@ -94,7 +90,7 @@ static void fuse_file_put(struct fuse_file *ff, bool sync, bool isdir)
if (refcount_dec_and_test(&ff->count)) {
struct fuse_req *req = ff->reserved_req;
- if (ff->fc->no_open && !isdir) {
+ if (isdir ? ff->fc->no_opendir : ff->fc->no_open) {
/*
* Drop the release request when client does not
* implement 'open'
@@ -128,8 +124,9 @@ int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
return -ENOMEM;
ff->fh = 0;
- ff->open_flags = FOPEN_KEEP_CACHE; /* Default for no-open */
- if (!fc->no_open || isdir) {
+ /* Default for no-open */
+ ff->open_flags = FOPEN_KEEP_CACHE | (isdir ? FOPEN_CACHE_DIR : 0);
+ if (isdir ? !fc->no_opendir : !fc->no_open) {
struct fuse_open_out outarg;
int err;
@@ -138,11 +135,14 @@ int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
ff->fh = outarg.fh;
ff->open_flags = outarg.open_flags;
- } else if (err != -ENOSYS || isdir) {
+ } else if (err != -ENOSYS) {
fuse_file_free(ff);
return err;
} else {
- fc->no_open = 1;
+ if (isdir)
+ fc->no_opendir = 1;
+ else
+ fc->no_open = 1;
}
}
@@ -159,17 +159,16 @@ EXPORT_SYMBOL_GPL(fuse_do_open);
static void fuse_link_write_file(struct file *file)
{
struct inode *inode = file_inode(file);
- struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_file *ff = file->private_data;
/*
* file may be written through mmap, so chain it onto the
* inodes's write_file list
*/
- spin_lock(&fc->lock);
+ spin_lock(&fi->lock);
if (list_empty(&ff->write_entry))
list_add(&ff->write_entry, &fi->write_files);
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
}
void fuse_finish_open(struct inode *inode, struct file *file)
@@ -177,8 +176,6 @@ void fuse_finish_open(struct inode *inode, struct file *file)
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = get_fuse_conn(inode);
- if (ff->open_flags & FOPEN_DIRECT_IO)
- file->f_op = &fuse_direct_io_file_operations;
if (!(ff->open_flags & FOPEN_KEEP_CACHE))
invalidate_inode_pages2(inode->i_mapping);
if (ff->open_flags & FOPEN_NONSEEKABLE)
@@ -186,10 +183,10 @@ void fuse_finish_open(struct inode *inode, struct file *file)
if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
struct fuse_inode *fi = get_fuse_inode(inode);
- spin_lock(&fc->lock);
- fi->attr_version = ++fc->attr_version;
+ spin_lock(&fi->lock);
+ fi->attr_version = atomic64_inc_return(&fc->attr_version);
i_size_write(inode, 0);
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
fuse_invalidate_attr(inode);
if (fc->writeback_cache)
file_update_time(file);
@@ -224,14 +221,20 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
return err;
}
-static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode)
+static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff,
+ int flags, int opcode)
{
struct fuse_conn *fc = ff->fc;
struct fuse_req *req = ff->reserved_req;
struct fuse_release_in *inarg = &req->misc.release.in;
+ /* Inode is NULL on error path of fuse_create_open() */
+ if (likely(fi)) {
+ spin_lock(&fi->lock);
+ list_del(&ff->write_entry);
+ spin_unlock(&fi->lock);
+ }
spin_lock(&fc->lock);
- list_del(&ff->write_entry);
if (!RB_EMPTY_NODE(&ff->polled_node))
rb_erase(&ff->polled_node, &fc->polled_files);
spin_unlock(&fc->lock);
@@ -249,11 +252,12 @@ static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode)
void fuse_release_common(struct file *file, bool isdir)
{
+ struct fuse_inode *fi = get_fuse_inode(file_inode(file));
struct fuse_file *ff = file->private_data;
struct fuse_req *req = ff->reserved_req;
int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE;
- fuse_prepare_release(ff, file->f_flags, opcode);
+ fuse_prepare_release(fi, ff, file->f_flags, opcode);
if (ff->flock) {
struct fuse_release_in *inarg = &req->misc.release.in;
@@ -295,10 +299,10 @@ static int fuse_release(struct inode *inode, struct file *file)
return 0;
}
-void fuse_sync_release(struct fuse_file *ff, int flags)
+void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff, int flags)
{
WARN_ON(refcount_read(&ff->count) > 1);
- fuse_prepare_release(ff, flags, FUSE_RELEASE);
+ fuse_prepare_release(fi, ff, flags, FUSE_RELEASE);
/*
* iput(NULL) is a no-op and since the refcount is 1 and everything's
* synchronous, we are fine with not doing igrab() here"
@@ -329,33 +333,39 @@ u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
return (u64) v0 + ((u64) v1 << 32);
}
-/*
- * Check if any page in a range is under writeback
- *
- * This is currently done by walking the list of writepage requests
- * for the inode, which can be pretty inefficient.
- */
-static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from,
- pgoff_t idx_to)
+static struct fuse_req *fuse_find_writeback(struct fuse_inode *fi,
+ pgoff_t idx_from, pgoff_t idx_to)
{
- struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_req *req;
- bool found = false;
- spin_lock(&fc->lock);
list_for_each_entry(req, &fi->writepages, writepages_entry) {
pgoff_t curr_index;
- BUG_ON(req->inode != inode);
+ WARN_ON(get_fuse_inode(req->inode) != fi);
curr_index = req->misc.write.in.offset >> PAGE_SHIFT;
if (idx_from < curr_index + req->num_pages &&
curr_index <= idx_to) {
- found = true;
- break;
+ return req;
}
}
- spin_unlock(&fc->lock);
+ return NULL;
+}
+
+/*
+ * Check if any page in a range is under writeback
+ *
+ * This is currently done by walking the list of writepage requests
+ * for the inode, which can be pretty inefficient.
+ */
+static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from,
+ pgoff_t idx_to)
+{
+ struct fuse_inode *fi = get_fuse_inode(inode);
+ bool found;
+
+ spin_lock(&fi->lock);
+ found = fuse_find_writeback(fi, idx_from, idx_to);
+ spin_unlock(&fi->lock);
return found;
}
@@ -598,9 +608,9 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
- spin_lock(&fc->lock);
- fi->attr_version = ++fc->attr_version;
- spin_unlock(&fc->lock);
+ spin_lock(&fi->lock);
+ fi->attr_version = atomic64_inc_return(&fc->attr_version);
+ spin_unlock(&fi->lock);
}
io->iocb->ki_complete(io->iocb, res, 0);
@@ -675,13 +685,13 @@ static void fuse_read_update_size(struct inode *inode, loff_t size,
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
- spin_lock(&fc->lock);
+ spin_lock(&fi->lock);
if (attr_ver == fi->attr_version && size < inode->i_size &&
!test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
- fi->attr_version = ++fc->attr_version;
+ fi->attr_version = atomic64_inc_return(&fc->attr_version);
i_size_write(inode, size);
}
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
}
static void fuse_short_read(struct fuse_req *req, struct inode *inode,
@@ -919,7 +929,7 @@ out:
return err;
}
-static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
+static ssize_t fuse_cache_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct inode *inode = iocb->ki_filp->f_mapping->host;
struct fuse_conn *fc = get_fuse_conn(inode);
@@ -996,13 +1006,13 @@ bool fuse_write_update_size(struct inode *inode, loff_t pos)
struct fuse_inode *fi = get_fuse_inode(inode);
bool ret = false;
- spin_lock(&fc->lock);
- fi->attr_version = ++fc->attr_version;
+ spin_lock(&fi->lock);
+ fi->attr_version = atomic64_inc_return(&fc->attr_version);
if (pos > inode->i_size) {
i_size_write(inode, pos);
ret = true;
}
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
return ret;
}
@@ -1125,9 +1135,6 @@ static ssize_t fuse_perform_write(struct kiocb *iocb,
int err = 0;
ssize_t res = 0;
- if (is_bad_inode(inode))
- return -EIO;
-
if (inode->i_size < pos + iov_iter_count(ii))
set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
@@ -1173,7 +1180,7 @@ static ssize_t fuse_perform_write(struct kiocb *iocb,
return res > 0 ? res : err;
}
-static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
@@ -1416,9 +1423,6 @@ static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
ssize_t res;
struct inode *inode = file_inode(io->iocb->ki_filp);
- if (is_bad_inode(inode))
- return -EIO;
-
res = fuse_direct_io(io, iter, ppos, 0);
fuse_invalidate_atime(inode);
@@ -1426,10 +1430,21 @@ static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
return res;
}
+static ssize_t fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
+
static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
- struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
- return __fuse_direct_read(&io, to, &iocb->ki_pos);
+ ssize_t res;
+
+ if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) {
+ res = fuse_direct_IO(iocb, to);
+ } else {
+ struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
+
+ res = __fuse_direct_read(&io, to, &iocb->ki_pos);
+ }
+
+ return res;
}
static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
@@ -1438,14 +1453,17 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
ssize_t res;
- if (is_bad_inode(inode))
- return -EIO;
-
/* Don't allow parallel writes to the same file */
inode_lock(inode);
res = generic_write_checks(iocb, from);
- if (res > 0)
- res = fuse_direct_io(&io, from, &iocb->ki_pos, FUSE_DIO_WRITE);
+ if (res > 0) {
+ if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) {
+ res = fuse_direct_IO(iocb, from);
+ } else {
+ res = fuse_direct_io(&io, from, &iocb->ki_pos,
+ FUSE_DIO_WRITE);
+ }
+ }
fuse_invalidate_attr(inode);
if (res > 0)
fuse_write_update_size(inode, iocb->ki_pos);
@@ -1454,6 +1472,34 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
return res;
}
+static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct file *file = iocb->ki_filp;
+ struct fuse_file *ff = file->private_data;
+
+ if (is_bad_inode(file_inode(file)))
+ return -EIO;
+
+ if (!(ff->open_flags & FOPEN_DIRECT_IO))
+ return fuse_cache_read_iter(iocb, to);
+ else
+ return fuse_direct_read_iter(iocb, to);
+}
+
+static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct fuse_file *ff = file->private_data;
+
+ if (is_bad_inode(file_inode(file)))
+ return -EIO;
+
+ if (!(ff->open_flags & FOPEN_DIRECT_IO))
+ return fuse_cache_write_iter(iocb, from);
+ else
+ return fuse_direct_write_iter(iocb, from);
+}
+
static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
{
int i;
@@ -1481,20 +1527,18 @@ static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
wake_up(&fi->page_waitq);
}
-/* Called under fc->lock, may release and reacquire it */
+/* Called under fi->lock, may release and reacquire it */
static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req,
loff_t size)
-__releases(fc->lock)
-__acquires(fc->lock)
+__releases(fi->lock)
+__acquires(fi->lock)
{
+ struct fuse_req *aux, *next;
struct fuse_inode *fi = get_fuse_inode(req->inode);
struct fuse_write_in *inarg = &req->misc.write.in;
__u64 data_size = req->num_pages * PAGE_SIZE;
bool queued;
- if (!fc->connected)
- goto out_free;
-
if (inarg->offset + data_size <= size) {
inarg->size = data_size;
} else if (inarg->offset < size) {
@@ -1505,28 +1549,40 @@ __acquires(fc->lock)
}
req->in.args[1].size = inarg->size;
- fi->writectr++;
queued = fuse_request_queue_background(fc, req);
- WARN_ON(!queued);
+ /* Fails on broken connection only */
+ if (unlikely(!queued))
+ goto out_free;
+
+ fi->writectr++;
return;
out_free:
fuse_writepage_finish(fc, req);
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
+
+ /* After fuse_writepage_finish() aux request list is private */
+ for (aux = req->misc.write.next; aux; aux = next) {
+ next = aux->misc.write.next;
+ aux->misc.write.next = NULL;
+ fuse_writepage_free(fc, aux);
+ fuse_put_request(fc, aux);
+ }
+
fuse_writepage_free(fc, req);
fuse_put_request(fc, req);
- spin_lock(&fc->lock);
+ spin_lock(&fi->lock);
}
/*
* If fi->writectr is positive (no truncate or fsync going on) send
* all queued writepage requests.
*
- * Called with fc->lock
+ * Called with fi->lock
*/
void fuse_flush_writepages(struct inode *inode)
-__releases(fc->lock)
-__acquires(fc->lock)
+__releases(fi->lock)
+__acquires(fi->lock)
{
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
@@ -1546,7 +1602,7 @@ static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req)
struct fuse_inode *fi = get_fuse_inode(inode);
mapping_set_error(inode->i_mapping, req->out.h.error);
- spin_lock(&fc->lock);
+ spin_lock(&fi->lock);
while (req->misc.write.next) {
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_write_in *inarg = &req->misc.write.in;
@@ -1583,7 +1639,7 @@ static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req)
}
fi->writectr--;
fuse_writepage_finish(fc, req);
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
fuse_writepage_free(fc, req);
}
@@ -1592,13 +1648,13 @@ static struct fuse_file *__fuse_write_file_get(struct fuse_conn *fc,
{
struct fuse_file *ff = NULL;
- spin_lock(&fc->lock);
+ spin_lock(&fi->lock);
if (!list_empty(&fi->write_files)) {
ff = list_entry(fi->write_files.next, struct fuse_file,
write_entry);
fuse_file_get(ff);
}
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
return ff;
}
@@ -1669,11 +1725,11 @@ static int fuse_writepage_locked(struct page *page)
inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
- spin_lock(&fc->lock);
+ spin_lock(&fi->lock);
list_add(&req->writepages_entry, &fi->writepages);
list_add_tail(&req->list, &fi->queued_writes);
fuse_flush_writepages(inode);
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
end_page_writeback(page);
@@ -1722,21 +1778,27 @@ static void fuse_writepages_send(struct fuse_fill_wb_data *data)
{
struct fuse_req *req = data->req;
struct inode *inode = data->inode;
- struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
int num_pages = req->num_pages;
int i;
req->ff = fuse_file_get(data->ff);
- spin_lock(&fc->lock);
+ spin_lock(&fi->lock);
list_add_tail(&req->list, &fi->queued_writes);
fuse_flush_writepages(inode);
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
for (i = 0; i < num_pages; i++)
end_page_writeback(data->orig_pages[i]);
}
+/*
+ * First recheck under fi->lock if the offending offset is still under
+ * writeback. If yes, then iterate auxiliary write requests, to see if there's
+ * one already added for a page at this offset. If there's none, then insert
+ * this new request onto the auxiliary list, otherwise reuse the existing one by
+ * copying the new page contents over to the old temporary page.
+ */
static bool fuse_writepage_in_flight(struct fuse_req *new_req,
struct page *page)
{
@@ -1744,57 +1806,50 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
struct fuse_inode *fi = get_fuse_inode(new_req->inode);
struct fuse_req *tmp;
struct fuse_req *old_req;
- bool found = false;
- pgoff_t curr_index;
- BUG_ON(new_req->num_pages != 0);
+ WARN_ON(new_req->num_pages != 0);
- spin_lock(&fc->lock);
+ spin_lock(&fi->lock);
list_del(&new_req->writepages_entry);
- list_for_each_entry(old_req, &fi->writepages, writepages_entry) {
- BUG_ON(old_req->inode != new_req->inode);
- curr_index = old_req->misc.write.in.offset >> PAGE_SHIFT;
- if (curr_index <= page->index &&
- page->index < curr_index + old_req->num_pages) {
- found = true;
- break;
- }
- }
- if (!found) {
+ old_req = fuse_find_writeback(fi, page->index, page->index);
+ if (!old_req) {
list_add(&new_req->writepages_entry, &fi->writepages);
- goto out_unlock;
+ spin_unlock(&fi->lock);
+ return false;
}
new_req->num_pages = 1;
- for (tmp = old_req; tmp != NULL; tmp = tmp->misc.write.next) {
- BUG_ON(tmp->inode != new_req->inode);
+ for (tmp = old_req->misc.write.next; tmp; tmp = tmp->misc.write.next) {
+ pgoff_t curr_index;
+
+ WARN_ON(tmp->inode != new_req->inode);
curr_index = tmp->misc.write.in.offset >> PAGE_SHIFT;
- if (tmp->num_pages == 1 &&
- curr_index == page->index) {
- old_req = tmp;
+ if (curr_index == page->index) {
+ WARN_ON(tmp->num_pages != 1);
+ WARN_ON(!test_bit(FR_PENDING, &tmp->flags));
+ swap(tmp->pages[0], new_req->pages[0]);
+ break;
}
}
- if (old_req->num_pages == 1 && test_bit(FR_PENDING, &old_req->flags)) {
- struct backing_dev_info *bdi = inode_to_bdi(page->mapping->host);
+ if (!tmp) {
+ new_req->misc.write.next = old_req->misc.write.next;
+ old_req->misc.write.next = new_req;
+ }
+
+ spin_unlock(&fi->lock);
- copy_highpage(old_req->pages[0], page);
- spin_unlock(&fc->lock);
+ if (tmp) {
+ struct backing_dev_info *bdi = inode_to_bdi(new_req->inode);
dec_wb_stat(&bdi->wb, WB_WRITEBACK);
dec_node_page_state(new_req->pages[0], NR_WRITEBACK_TEMP);
wb_writeout_inc(&bdi->wb);
fuse_writepage_free(fc, new_req);
fuse_request_free(new_req);
- goto out;
- } else {
- new_req->misc.write.next = old_req->misc.write.next;
- old_req->misc.write.next = new_req;
}
-out_unlock:
- spin_unlock(&fc->lock);
-out:
- return found;
+
+ return true;
}
static int fuse_writepages_fill(struct page *page,
@@ -1803,6 +1858,7 @@ static int fuse_writepages_fill(struct page *page,
struct fuse_fill_wb_data *data = _data;
struct fuse_req *req = data->req;
struct inode *inode = data->inode;
+ struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_conn *fc = get_fuse_conn(inode);
struct page *tmp_page;
bool is_writeback;
@@ -1873,9 +1929,9 @@ static int fuse_writepages_fill(struct page *page,
req->end = fuse_writepage_end;
req->inode = inode;
- spin_lock(&fc->lock);
+ spin_lock(&fi->lock);
list_add(&req->writepages_entry, &fi->writepages);
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
data->req = req;
}
@@ -1898,12 +1954,12 @@ static int fuse_writepages_fill(struct page *page,
data->orig_pages[req->num_pages] = page;
/*
- * Protected by fc->lock against concurrent access by
+ * Protected by fi->lock against concurrent access by
* fuse_page_is_writeback().
*/
- spin_lock(&fc->lock);
+ spin_lock(&fi->lock);
req->num_pages++;
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
out_unlock:
unlock_page(page);
@@ -2087,6 +2143,18 @@ static const struct vm_operations_struct fuse_file_vm_ops = {
static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
{
+ struct fuse_file *ff = file->private_data;
+
+ if (ff->open_flags & FOPEN_DIRECT_IO) {
+ /* Can't provide the coherency needed for MAP_SHARED */
+ if (vma->vm_flags & VM_MAYSHARE)
+ return -ENODEV;
+
+ invalidate_inode_pages2(file->f_mapping);
+
+ return generic_file_mmap(file, vma);
+ }
+
if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
fuse_link_write_file(file);
@@ -2095,17 +2163,6 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
return 0;
}
-static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma)
-{
- /* Can't provide the coherency needed for MAP_SHARED */
- if (vma->vm_flags & VM_MAYSHARE)
- return -ENODEV;
-
- invalidate_inode_pages2(file->f_mapping);
-
- return generic_file_mmap(file, vma);
-}
-
static int convert_fuse_file_lock(struct fuse_conn *fc,
const struct fuse_file_lock *ffl,
struct file_lock *fl)
@@ -3114,6 +3171,7 @@ static const struct file_operations fuse_file_operations = {
.lock = fuse_file_lock,
.flock = fuse_file_flock,
.splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
.unlocked_ioctl = fuse_file_ioctl,
.compat_ioctl = fuse_file_compat_ioctl,
.poll = fuse_file_poll,
@@ -3121,24 +3179,6 @@ static const struct file_operations fuse_file_operations = {
.copy_file_range = fuse_copy_file_range,
};
-static const struct file_operations fuse_direct_io_file_operations = {
- .llseek = fuse_file_llseek,
- .read_iter = fuse_direct_read_iter,
- .write_iter = fuse_direct_write_iter,
- .mmap = fuse_direct_mmap,
- .open = fuse_open,
- .flush = fuse_flush,
- .release = fuse_release,
- .fsync = fuse_fsync,
- .lock = fuse_file_lock,
- .flock = fuse_file_flock,
- .unlocked_ioctl = fuse_file_ioctl,
- .compat_ioctl = fuse_file_compat_ioctl,
- .poll = fuse_file_poll,
- .fallocate = fuse_file_fallocate,
- /* no splice_read */
-};
-
static const struct address_space_operations fuse_file_aops = {
.readpage = fuse_readpage,
.writepage = fuse_writepage,
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 2f2c92e6f8cb..0920c0c032a0 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -96,7 +96,7 @@ struct fuse_inode {
union {
/* Write related fields (regular file only) */
struct {
- /* Files usable in writepage. Protected by fc->lock */
+ /* Files usable in writepage. Protected by fi->lock */
struct list_head write_files;
/* Writepages pending on truncate or fsync */
@@ -144,6 +144,9 @@ struct fuse_inode {
/** Lock for serializing lookup and readdir for back compatibility*/
struct mutex mutex;
+
+ /** Lock to protect write related fields */
+ spinlock_t lock;
};
/** FUSE inode state bits */
@@ -163,7 +166,10 @@ struct fuse_file {
/** Fuse connection for this file */
struct fuse_conn *fc;
- /** Request reserved for flush and release */
+ /*
+ * Request reserved for flush and release.
+ * Modified under relative fuse_inode::lock.
+ */
struct fuse_req *reserved_req;
/** Kernel file handle guaranteed to be unique */
@@ -538,7 +544,7 @@ struct fuse_conn {
struct fuse_iqueue iq;
/** The next unique kernel file handle */
- u64 khctr;
+ atomic64_t khctr;
/** rbtree of fuse_files waiting for poll events indexed by ph */
struct rb_root polled_files;
@@ -624,6 +630,9 @@ struct fuse_conn {
/** Is open/release not implemented by fs? */
unsigned no_open:1;
+ /** Is opendir/releasedir not implemented by fs? */
+ unsigned no_opendir:1;
+
/** Is fsync not implemented by fs? */
unsigned no_fsync:1;
@@ -730,7 +739,7 @@ struct fuse_conn {
struct fuse_req *destroy_req;
/** Version counter for attribute changes */
- u64 attr_version;
+ atomic64_t attr_version;
/** Called on final put */
void (*release)(struct fuse_conn *);
@@ -770,6 +779,11 @@ static inline int invalid_nodeid(u64 nodeid)
return !nodeid || nodeid == FUSE_ROOT_ID;
}
+static inline u64 fuse_get_attr_version(struct fuse_conn *fc)
+{
+ return atomic64_read(&fc->attr_version);
+}
+
/** Device operations */
extern const struct file_operations fuse_dev_operations;
@@ -817,7 +831,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc);
void fuse_file_free(struct fuse_file *ff);
void fuse_finish_open(struct inode *inode, struct file *file);
-void fuse_sync_release(struct fuse_file *ff, int flags);
+void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff, int flags);
/**
* Send RELEASE or RELEASEDIR request
@@ -936,7 +950,7 @@ void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req);
bool fuse_request_queue_background(struct fuse_conn *fc, struct fuse_req *req);
/* Abort all requests */
-void fuse_abort_conn(struct fuse_conn *fc, bool is_abort);
+void fuse_abort_conn(struct fuse_conn *fc);
void fuse_wait_aborted(struct fuse_conn *fc);
/**
@@ -1000,8 +1014,6 @@ void fuse_flush_writepages(struct inode *inode);
void fuse_set_nowrite(struct inode *inode);
void fuse_release_nowrite(struct inode *inode);
-u64 fuse_get_attr_version(struct fuse_conn *fc);
-
/**
* File-system tells the kernel to invalidate cache for the given node id.
*/
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index c2d4099429be..ec5d9953dfb6 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -97,6 +97,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
fi->orig_ino = 0;
fi->state = 0;
mutex_init(&fi->mutex);
+ spin_lock_init(&fi->lock);
fi->forget = fuse_alloc_forget();
if (!fi->forget) {
kmem_cache_free(fuse_inode_cachep, inode);
@@ -163,7 +164,9 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
- fi->attr_version = ++fc->attr_version;
+ lockdep_assert_held(&fi->lock);
+
+ fi->attr_version = atomic64_inc_return(&fc->attr_version);
fi->i_time = attr_valid;
WRITE_ONCE(fi->inval_mask, 0);
@@ -209,10 +212,10 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
loff_t oldsize;
struct timespec64 old_mtime;
- spin_lock(&fc->lock);
+ spin_lock(&fi->lock);
if ((attr_version != 0 && fi->attr_version > attr_version) ||
test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
return;
}
@@ -227,7 +230,7 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
*/
if (!is_wb || !S_ISREG(inode->i_mode))
i_size_write(inode, attr->size);
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
if (!is_wb && S_ISREG(inode->i_mode)) {
bool inval = false;
@@ -322,9 +325,9 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
}
fi = get_fuse_inode(inode);
- spin_lock(&fc->lock);
+ spin_lock(&fi->lock);
fi->nlookup++;
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
fuse_change_attributes(inode, attr, attr_valid, attr_version);
return inode;
@@ -376,7 +379,7 @@ void fuse_unlock_inode(struct inode *inode, bool locked)
static void fuse_umount_begin(struct super_block *sb)
{
- fuse_abort_conn(get_fuse_conn_super(sb), false);
+ fuse_abort_conn(get_fuse_conn_super(sb));
}
static void fuse_send_destroy(struct fuse_conn *fc)
@@ -619,12 +622,12 @@ void fuse_conn_init(struct fuse_conn *fc, struct user_namespace *user_ns)
atomic_set(&fc->num_waiting, 0);
fc->max_background = FUSE_DEFAULT_MAX_BACKGROUND;
fc->congestion_threshold = FUSE_DEFAULT_CONGESTION_THRESHOLD;
- fc->khctr = 0;
+ atomic64_set(&fc->khctr, 0);
fc->polled_files = RB_ROOT;
fc->blocked = 0;
fc->initialized = 0;
fc->connected = 1;
- fc->attr_version = 1;
+ atomic64_set(&fc->attr_version, 1);
get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key));
fc->pid_ns = get_pid_ns(task_active_pid_ns(current));
fc->user_ns = get_user_ns(user_ns);
@@ -969,7 +972,8 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT |
FUSE_PARALLEL_DIROPS | FUSE_HANDLE_KILLPRIV | FUSE_POSIX_ACL |
- FUSE_ABORT_ERROR | FUSE_MAX_PAGES | FUSE_CACHE_SYMLINKS;
+ FUSE_ABORT_ERROR | FUSE_MAX_PAGES | FUSE_CACHE_SYMLINKS |
+ FUSE_NO_OPENDIR_SUPPORT;
req->in.h.opcode = FUSE_INIT;
req->in.numargs = 1;
req->in.args[0].size = sizeof(*arg);
@@ -1010,7 +1014,7 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
if (err)
return err;
- sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
+ sb->s_bdi->ra_pages = VM_READAHEAD_PAGES;
/* fuse does it's own writeback accounting */
sb->s_bdi->capabilities = BDI_CAP_NO_ACCT_WB | BDI_CAP_STRICTLIMIT;
@@ -1242,7 +1246,7 @@ static void fuse_sb_destroy(struct super_block *sb)
if (fc) {
fuse_send_destroy(fc);
- fuse_abort_conn(fc, false);
+ fuse_abort_conn(fc);
fuse_wait_aborted(fc);
down_write(&fc->killsb);
diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c
index ab18b78f4755..574d03f8a573 100644
--- a/fs/fuse/readdir.c
+++ b/fs/fuse/readdir.c
@@ -213,9 +213,9 @@ retry:
}
fi = get_fuse_inode(inode);
- spin_lock(&fc->lock);
+ spin_lock(&fi->lock);
fi->nlookup++;
- spin_unlock(&fc->lock);
+ spin_unlock(&fi->lock);
forget_all_cached_acls(inode);
fuse_change_attributes(inode, &o->attr,
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index b92740edc416..d32964cd1117 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -107,7 +107,7 @@ static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode,
static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name)
{
- u32 hash = jhash2((u32 *)name, sizeof(*name) / 4, 0);
+ u32 hash = jhash2((u32 *)name, ht_parms.key_len / 4, 0);
return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS);
}
@@ -2131,71 +2131,29 @@ static const struct file_operations gfs2_sbstats_fops = {
.release = seq_release,
};
-int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
-{
- struct dentry *dent;
-
- dent = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
- if (IS_ERR_OR_NULL(dent))
- goto fail;
- sdp->debugfs_dir = dent;
-
- dent = debugfs_create_file("glocks",
- S_IFREG | S_IRUGO,
- sdp->debugfs_dir, sdp,
- &gfs2_glocks_fops);
- if (IS_ERR_OR_NULL(dent))
- goto fail;
- sdp->debugfs_dentry_glocks = dent;
-
- dent = debugfs_create_file("glstats",
- S_IFREG | S_IRUGO,
- sdp->debugfs_dir, sdp,
- &gfs2_glstats_fops);
- if (IS_ERR_OR_NULL(dent))
- goto fail;
- sdp->debugfs_dentry_glstats = dent;
-
- dent = debugfs_create_file("sbstats",
- S_IFREG | S_IRUGO,
- sdp->debugfs_dir, sdp,
- &gfs2_sbstats_fops);
- if (IS_ERR_OR_NULL(dent))
- goto fail;
- sdp->debugfs_dentry_sbstats = dent;
+void gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
+{
+ sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
- return 0;
-fail:
- gfs2_delete_debugfs_file(sdp);
- return dent ? PTR_ERR(dent) : -ENOMEM;
+ debugfs_create_file("glocks", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
+ &gfs2_glocks_fops);
+
+ debugfs_create_file("glstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
+ &gfs2_glstats_fops);
+
+ debugfs_create_file("sbstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
+ &gfs2_sbstats_fops);
}
void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
{
- if (sdp->debugfs_dir) {
- if (sdp->debugfs_dentry_glocks) {
- debugfs_remove(sdp->debugfs_dentry_glocks);
- sdp->debugfs_dentry_glocks = NULL;
- }
- if (sdp->debugfs_dentry_glstats) {
- debugfs_remove(sdp->debugfs_dentry_glstats);
- sdp->debugfs_dentry_glstats = NULL;
- }
- if (sdp->debugfs_dentry_sbstats) {
- debugfs_remove(sdp->debugfs_dentry_sbstats);
- sdp->debugfs_dentry_sbstats = NULL;
- }
- debugfs_remove(sdp->debugfs_dir);
- sdp->debugfs_dir = NULL;
- }
+ debugfs_remove_recursive(sdp->debugfs_dir);
+ sdp->debugfs_dir = NULL;
}
-int gfs2_register_debugfs(void)
+void gfs2_register_debugfs(void)
{
gfs2_root = debugfs_create_dir("gfs2", NULL);
- if (IS_ERR(gfs2_root))
- return PTR_ERR(gfs2_root);
- return gfs2_root ? 0 : -ENOMEM;
}
void gfs2_unregister_debugfs(void)
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 8949bf28b249..936b3295839c 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -243,9 +243,9 @@ extern void gfs2_glock_free(struct gfs2_glock *gl);
extern int __init gfs2_glock_init(void);
extern void gfs2_glock_exit(void);
-extern int gfs2_create_debugfs_file(struct gfs2_sbd *sdp);
+extern void gfs2_create_debugfs_file(struct gfs2_sbd *sdp);
extern void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp);
-extern int gfs2_register_debugfs(void);
+extern void gfs2_register_debugfs(void);
extern void gfs2_unregister_debugfs(void);
extern const struct lm_lockops gfs2_dlm_ops;
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index e10e0b0a7cd5..cdf07b408f54 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -853,9 +853,6 @@ struct gfs2_sbd {
unsigned long sd_last_warning;
struct dentry *debugfs_dir; /* debugfs directory */
- struct dentry *debugfs_dentry_glocks;
- struct dentry *debugfs_dentry_glstats;
- struct dentry *debugfs_dentry_sbstats;
};
static inline void gfs2_glstats_inc(struct gfs2_glock *gl, int which)
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index 793808263c6d..18d4af7417fa 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -59,8 +59,8 @@ static inline u64 gfs2_get_inode_blocks(const struct inode *inode)
static inline void gfs2_add_inode_blocks(struct inode *inode, s64 change)
{
- gfs2_assert(GFS2_SB(inode), (change >= 0 || inode->i_blocks > -change));
- change *= (GFS2_SB(inode)->sd_sb.sb_bsize/GFS2_BASIC_BLOCK);
+ change <<= inode->i_blkbits - GFS2_BASIC_BLOCK_SHIFT;
+ gfs2_assert(GFS2_SB(inode), (change >= 0 || inode->i_blocks >= -change));
inode->i_blocks += change;
}
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index c7603063f861..136484ef35d3 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -178,16 +178,12 @@ static int __init init_gfs2_fs(void)
if (!gfs2_page_pool)
goto fail_mempool;
- error = gfs2_register_debugfs();
- if (error)
- goto fail_debugfs;
+ gfs2_register_debugfs();
pr_info("GFS2 installed\n");
return 0;
-fail_debugfs:
- mempool_destroy(gfs2_page_pool);
fail_mempool:
destroy_workqueue(gfs2_freeze_wq);
fail_wq3:
diff --git a/fs/hpfs/hpfs.h b/fs/hpfs/hpfs.h
index 823a328791c0..302f45101a96 100644
--- a/fs/hpfs/hpfs.h
+++ b/fs/hpfs/hpfs.h
@@ -120,11 +120,11 @@ struct hpfs_spare_block
u8 bad_sector: 1; /* bad sector, corrupted disk (???) */
u8 bad_bitmap: 1; /* bad bitmap */
u8 fast: 1; /* partition was fast formatted */
- u8 old_wrote: 1; /* old version wrote to partion */
- u8 old_wrote_1: 1; /* old version wrote to partion (?) */
+ u8 old_wrote: 1; /* old version wrote to partition */
+ u8 old_wrote_1: 1; /* old version wrote to partition (?) */
#else
- u8 old_wrote_1: 1; /* old version wrote to partion (?) */
- u8 old_wrote: 1; /* old version wrote to partion */
+ u8 old_wrote_1: 1; /* old version wrote to partition (?) */
+ u8 old_wrote: 1; /* old version wrote to partition */
u8 fast: 1; /* partition was fast formatted */
u8 bad_bitmap: 1; /* bad bitmap */
u8 bad_sector: 1; /* bad sector, corrupted disk (???) */
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index b0eef008de67..9285dd4f4b1c 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -27,7 +27,7 @@
#include <linux/backing-dev.h>
#include <linux/hugetlb.h>
#include <linux/pagevec.h>
-#include <linux/parser.h>
+#include <linux/fs_parser.h>
#include <linux/mman.h>
#include <linux/slab.h>
#include <linux/dnotify.h>
@@ -45,11 +45,17 @@ const struct file_operations hugetlbfs_file_operations;
static const struct inode_operations hugetlbfs_dir_inode_operations;
static const struct inode_operations hugetlbfs_inode_operations;
-struct hugetlbfs_config {
+enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT };
+
+struct hugetlbfs_fs_context {
struct hstate *hstate;
+ unsigned long long max_size_opt;
+ unsigned long long min_size_opt;
long max_hpages;
long nr_inodes;
long min_hpages;
+ enum hugetlbfs_size_type max_val_type;
+ enum hugetlbfs_size_type min_val_type;
kuid_t uid;
kgid_t gid;
umode_t mode;
@@ -57,22 +63,30 @@ struct hugetlbfs_config {
int sysctl_hugetlb_shm_group;
-enum {
- Opt_size, Opt_nr_inodes,
- Opt_mode, Opt_uid, Opt_gid,
- Opt_pagesize, Opt_min_size,
- Opt_err,
+enum hugetlb_param {
+ Opt_gid,
+ Opt_min_size,
+ Opt_mode,
+ Opt_nr_inodes,
+ Opt_pagesize,
+ Opt_size,
+ Opt_uid,
};
-static const match_table_t tokens = {
- {Opt_size, "size=%s"},
- {Opt_nr_inodes, "nr_inodes=%s"},
- {Opt_mode, "mode=%o"},
- {Opt_uid, "uid=%u"},
- {Opt_gid, "gid=%u"},
- {Opt_pagesize, "pagesize=%s"},
- {Opt_min_size, "min_size=%s"},
- {Opt_err, NULL},
+static const struct fs_parameter_spec hugetlb_param_specs[] = {
+ fsparam_u32 ("gid", Opt_gid),
+ fsparam_string("min_size", Opt_min_size),
+ fsparam_u32 ("mode", Opt_mode),
+ fsparam_string("nr_inodes", Opt_nr_inodes),
+ fsparam_string("pagesize", Opt_pagesize),
+ fsparam_string("size", Opt_size),
+ fsparam_u32 ("uid", Opt_uid),
+ {}
+};
+
+static const struct fs_parameter_description hugetlb_fs_parameters = {
+ .name = "hugetlbfs",
+ .specs = hugetlb_param_specs,
};
#ifdef CONFIG_NUMA
@@ -708,16 +722,16 @@ static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
}
static struct inode *hugetlbfs_get_root(struct super_block *sb,
- struct hugetlbfs_config *config)
+ struct hugetlbfs_fs_context *ctx)
{
struct inode *inode;
inode = new_inode(sb);
if (inode) {
inode->i_ino = get_next_ino();
- inode->i_mode = S_IFDIR | config->mode;
- inode->i_uid = config->uid;
- inode->i_gid = config->gid;
+ inode->i_mode = S_IFDIR | ctx->mode;
+ inode->i_uid = ctx->uid;
+ inode->i_gid = ctx->gid;
inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
inode->i_op = &hugetlbfs_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
@@ -741,11 +755,17 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
umode_t mode, dev_t dev)
{
struct inode *inode;
- struct resv_map *resv_map;
+ struct resv_map *resv_map = NULL;
- resv_map = resv_map_alloc();
- if (!resv_map)
- return NULL;
+ /*
+ * Reserve maps are only needed for inodes that can have associated
+ * page allocations.
+ */
+ if (S_ISREG(mode) || S_ISLNK(mode)) {
+ resv_map = resv_map_alloc();
+ if (!resv_map)
+ return NULL;
+ }
inode = new_inode(sb);
if (inode) {
@@ -780,8 +800,10 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
break;
}
lockdep_annotate_inode_mutex_key(inode);
- } else
- kref_put(&resv_map->refs, resv_map_release);
+ } else {
+ if (resv_map)
+ kref_put(&resv_map->refs, resv_map_release);
+ }
return inode;
}
@@ -1093,8 +1115,6 @@ static const struct super_operations hugetlbfs_ops = {
.show_options = hugetlbfs_show_options,
};
-enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT };
-
/*
* Convert size option passed from command line to number of huge pages
* in the pool specified by hstate. Size option could be in bytes
@@ -1117,170 +1137,151 @@ hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
return size_opt;
}
-static int
-hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
+/*
+ * Parse one mount parameter.
+ */
+static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- char *p, *rest;
- substring_t args[MAX_OPT_ARGS];
- int option;
- unsigned long long max_size_opt = 0, min_size_opt = 0;
- enum hugetlbfs_size_type max_val_type = NO_SIZE, min_val_type = NO_SIZE;
-
- if (!options)
+ struct hugetlbfs_fs_context *ctx = fc->fs_private;
+ struct fs_parse_result result;
+ char *rest;
+ unsigned long ps;
+ int opt;
+
+ opt = fs_parse(fc, &hugetlb_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_uid:
+ ctx->uid = make_kuid(current_user_ns(), result.uint_32);
+ if (!uid_valid(ctx->uid))
+ goto bad_val;
return 0;
- while ((p = strsep(&options, ",")) != NULL) {
- int token;
- if (!*p)
- continue;
+ case Opt_gid:
+ ctx->gid = make_kgid(current_user_ns(), result.uint_32);
+ if (!gid_valid(ctx->gid))
+ goto bad_val;
+ return 0;
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_uid:
- if (match_int(&args[0], &option))
- goto bad_val;
- pconfig->uid = make_kuid(current_user_ns(), option);
- if (!uid_valid(pconfig->uid))
- goto bad_val;
- break;
+ case Opt_mode:
+ ctx->mode = result.uint_32 & 01777U;
+ return 0;
- case Opt_gid:
- if (match_int(&args[0], &option))
- goto bad_val;
- pconfig->gid = make_kgid(current_user_ns(), option);
- if (!gid_valid(pconfig->gid))
- goto bad_val;
- break;
+ case Opt_size:
+ /* memparse() will accept a K/M/G without a digit */
+ if (!isdigit(param->string[0]))
+ goto bad_val;
+ ctx->max_size_opt = memparse(param->string, &rest);
+ ctx->max_val_type = SIZE_STD;
+ if (*rest == '%')
+ ctx->max_val_type = SIZE_PERCENT;
+ return 0;
- case Opt_mode:
- if (match_octal(&args[0], &option))
- goto bad_val;
- pconfig->mode = option & 01777U;
- break;
+ case Opt_nr_inodes:
+ /* memparse() will accept a K/M/G without a digit */
+ if (!isdigit(param->string[0]))
+ goto bad_val;
+ ctx->nr_inodes = memparse(param->string, &rest);
+ return 0;
- case Opt_size: {
- /* memparse() will accept a K/M/G without a digit */
- if (!isdigit(*args[0].from))
- goto bad_val;
- max_size_opt = memparse(args[0].from, &rest);
- max_val_type = SIZE_STD;
- if (*rest == '%')
- max_val_type = SIZE_PERCENT;
- break;
+ case Opt_pagesize:
+ ps = memparse(param->string, &rest);
+ ctx->hstate = size_to_hstate(ps);
+ if (!ctx->hstate) {
+ pr_err("Unsupported page size %lu MB\n", ps >> 20);
+ return -EINVAL;
}
+ return 0;
- case Opt_nr_inodes:
- /* memparse() will accept a K/M/G without a digit */
- if (!isdigit(*args[0].from))
- goto bad_val;
- pconfig->nr_inodes = memparse(args[0].from, &rest);
- break;
+ case Opt_min_size:
+ /* memparse() will accept a K/M/G without a digit */
+ if (!isdigit(param->string[0]))
+ goto bad_val;
+ ctx->min_size_opt = memparse(param->string, &rest);
+ ctx->min_val_type = SIZE_STD;
+ if (*rest == '%')
+ ctx->min_val_type = SIZE_PERCENT;
+ return 0;
- case Opt_pagesize: {
- unsigned long ps;
- ps = memparse(args[0].from, &rest);
- pconfig->hstate = size_to_hstate(ps);
- if (!pconfig->hstate) {
- pr_err("Unsupported page size %lu MB\n",
- ps >> 20);
- return -EINVAL;
- }
- break;
- }
+ default:
+ return -EINVAL;
+ }
- case Opt_min_size: {
- /* memparse() will accept a K/M/G without a digit */
- if (!isdigit(*args[0].from))
- goto bad_val;
- min_size_opt = memparse(args[0].from, &rest);
- min_val_type = SIZE_STD;
- if (*rest == '%')
- min_val_type = SIZE_PERCENT;
- break;
- }
+bad_val:
+ return invalf(fc, "hugetlbfs: Bad value '%s' for mount option '%s'\n",
+ param->string, param->key);
+}
- default:
- pr_err("Bad mount option: \"%s\"\n", p);
- return -EINVAL;
- break;
- }
- }
+/*
+ * Validate the parsed options.
+ */
+static int hugetlbfs_validate(struct fs_context *fc)
+{
+ struct hugetlbfs_fs_context *ctx = fc->fs_private;
/*
* Use huge page pool size (in hstate) to convert the size
* options to number of huge pages. If NO_SIZE, -1 is returned.
*/
- pconfig->max_hpages = hugetlbfs_size_to_hpages(pconfig->hstate,
- max_size_opt, max_val_type);
- pconfig->min_hpages = hugetlbfs_size_to_hpages(pconfig->hstate,
- min_size_opt, min_val_type);
+ ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
+ ctx->max_size_opt,
+ ctx->max_val_type);
+ ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
+ ctx->min_size_opt,
+ ctx->min_val_type);
/*
* If max_size was specified, then min_size must be smaller
*/
- if (max_val_type > NO_SIZE &&
- pconfig->min_hpages > pconfig->max_hpages) {
- pr_err("minimum size can not be greater than maximum size\n");
+ if (ctx->max_val_type > NO_SIZE &&
+ ctx->min_hpages > ctx->max_hpages) {
+ pr_err("Minimum size can not be greater than maximum size\n");
return -EINVAL;
}
return 0;
-
-bad_val:
- pr_err("Bad value '%s' for mount option '%s'\n", args[0].from, p);
- return -EINVAL;
}
static int
-hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
+hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
- int ret;
- struct hugetlbfs_config config;
+ struct hugetlbfs_fs_context *ctx = fc->fs_private;
struct hugetlbfs_sb_info *sbinfo;
- config.max_hpages = -1; /* No limit on size by default */
- config.nr_inodes = -1; /* No limit on number of inodes by default */
- config.uid = current_fsuid();
- config.gid = current_fsgid();
- config.mode = 0755;
- config.hstate = &default_hstate;
- config.min_hpages = -1; /* No default minimum size */
- ret = hugetlbfs_parse_options(data, &config);
- if (ret)
- return ret;
-
sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
if (!sbinfo)
return -ENOMEM;
sb->s_fs_info = sbinfo;
- sbinfo->hstate = config.hstate;
spin_lock_init(&sbinfo->stat_lock);
- sbinfo->max_inodes = config.nr_inodes;
- sbinfo->free_inodes = config.nr_inodes;
- sbinfo->spool = NULL;
- sbinfo->uid = config.uid;
- sbinfo->gid = config.gid;
- sbinfo->mode = config.mode;
+ sbinfo->hstate = ctx->hstate;
+ sbinfo->max_inodes = ctx->nr_inodes;
+ sbinfo->free_inodes = ctx->nr_inodes;
+ sbinfo->spool = NULL;
+ sbinfo->uid = ctx->uid;
+ sbinfo->gid = ctx->gid;
+ sbinfo->mode = ctx->mode;
/*
* Allocate and initialize subpool if maximum or minimum size is
* specified. Any needed reservations (for minimim size) are taken
* taken when the subpool is created.
*/
- if (config.max_hpages != -1 || config.min_hpages != -1) {
- sbinfo->spool = hugepage_new_subpool(config.hstate,
- config.max_hpages,
- config.min_hpages);
+ if (ctx->max_hpages != -1 || ctx->min_hpages != -1) {
+ sbinfo->spool = hugepage_new_subpool(ctx->hstate,
+ ctx->max_hpages,
+ ctx->min_hpages);
if (!sbinfo->spool)
goto out_free;
}
sb->s_maxbytes = MAX_LFS_FILESIZE;
- sb->s_blocksize = huge_page_size(config.hstate);
- sb->s_blocksize_bits = huge_page_shift(config.hstate);
+ sb->s_blocksize = huge_page_size(ctx->hstate);
+ sb->s_blocksize_bits = huge_page_shift(ctx->hstate);
sb->s_magic = HUGETLBFS_MAGIC;
sb->s_op = &hugetlbfs_ops;
sb->s_time_gran = 1;
- sb->s_root = d_make_root(hugetlbfs_get_root(sb, &config));
+ sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx));
if (!sb->s_root)
goto out_free;
return 0;
@@ -1290,16 +1291,52 @@ out_free:
return -ENOMEM;
}
-static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int hugetlbfs_get_tree(struct fs_context *fc)
{
- return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super);
+ int err = hugetlbfs_validate(fc);
+ if (err)
+ return err;
+ return vfs_get_super(fc, vfs_get_independent_super, hugetlbfs_fill_super);
+}
+
+static void hugetlbfs_fs_context_free(struct fs_context *fc)
+{
+ kfree(fc->fs_private);
+}
+
+static const struct fs_context_operations hugetlbfs_fs_context_ops = {
+ .free = hugetlbfs_fs_context_free,
+ .parse_param = hugetlbfs_parse_param,
+ .get_tree = hugetlbfs_get_tree,
+};
+
+static int hugetlbfs_init_fs_context(struct fs_context *fc)
+{
+ struct hugetlbfs_fs_context *ctx;
+
+ ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->max_hpages = -1; /* No limit on size by default */
+ ctx->nr_inodes = -1; /* No limit on number of inodes by default */
+ ctx->uid = current_fsuid();
+ ctx->gid = current_fsgid();
+ ctx->mode = 0755;
+ ctx->hstate = &default_hstate;
+ ctx->min_hpages = -1; /* No default minimum size */
+ ctx->max_val_type = NO_SIZE;
+ ctx->min_val_type = NO_SIZE;
+ fc->fs_private = ctx;
+ fc->ops = &hugetlbfs_fs_context_ops;
+ return 0;
}
static struct file_system_type hugetlbfs_fs_type = {
- .name = "hugetlbfs",
- .mount = hugetlbfs_mount,
- .kill_sb = kill_litter_super,
+ .name = "hugetlbfs",
+ .init_fs_context = hugetlbfs_init_fs_context,
+ .parameters = &hugetlb_fs_parameters,
+ .kill_sb = kill_litter_super,
};
static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
@@ -1384,8 +1421,29 @@ out:
return file;
}
+static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h)
+{
+ struct fs_context *fc;
+ struct vfsmount *mnt;
+
+ fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT);
+ if (IS_ERR(fc)) {
+ mnt = ERR_CAST(fc);
+ } else {
+ struct hugetlbfs_fs_context *ctx = fc->fs_private;
+ ctx->hstate = h;
+ mnt = fc_mount(fc);
+ put_fs_context(fc);
+ }
+ if (IS_ERR(mnt))
+ pr_err("Cannot mount internal hugetlbfs for page size %uK",
+ 1U << (h->order + PAGE_SHIFT - 10));
+ return mnt;
+}
+
static int __init init_hugetlbfs_fs(void)
{
+ struct vfsmount *mnt;
struct hstate *h;
int error;
int i;
@@ -1408,24 +1466,16 @@ static int __init init_hugetlbfs_fs(void)
i = 0;
for_each_hstate(h) {
- char buf[50];
- unsigned ps_kb = 1U << (h->order + PAGE_SHIFT - 10);
-
- snprintf(buf, sizeof(buf), "pagesize=%uK", ps_kb);
- hugetlbfs_vfsmount[i] = kern_mount_data(&hugetlbfs_fs_type,
- buf);
-
- if (IS_ERR(hugetlbfs_vfsmount[i])) {
- pr_err("Cannot mount internal hugetlbfs for "
- "page size %uK", ps_kb);
- error = PTR_ERR(hugetlbfs_vfsmount[i]);
- hugetlbfs_vfsmount[i] = NULL;
+ mnt = mount_one_hugetlbfs(h);
+ if (IS_ERR(mnt) && i == 0) {
+ error = PTR_ERR(mnt);
+ goto out;
}
+ hugetlbfs_vfsmount[i] = mnt;
i++;
}
- /* Non default hstates are optional */
- if (!IS_ERR_OR_NULL(hugetlbfs_vfsmount[default_hstate_idx]))
- return 0;
+
+ return 0;
out:
kmem_cache_destroy(hugetlbfs_inode_cachep);
diff --git a/fs/inode.c b/fs/inode.c
index e9d97add2b36..9a453f3637f8 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1817,8 +1817,13 @@ int file_remove_privs(struct file *file)
int kill;
int error = 0;
- /* Fast path for nothing security related */
- if (IS_NOSEC(inode))
+ /*
+ * Fast path for nothing security related.
+ * As well for non-regular files, e.g. blkdev inodes.
+ * For example, blkdev_write_iter() might get here
+ * trying to remove privs which it is not allowed to.
+ */
+ if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode))
return 0;
kill = dentry_needs_remove_privs(dentry);
diff --git a/fs/internal.h b/fs/internal.h
index d410186bc369..6a8b71643af4 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -17,6 +17,7 @@ struct linux_binprm;
struct path;
struct mount;
struct shrink_control;
+struct fs_context;
/*
* block_dev.c
@@ -52,8 +53,16 @@ int __generic_write_end(struct inode *inode, loff_t pos, unsigned copied,
extern void __init chrdev_init(void);
/*
+ * fs_context.c
+ */
+extern int parse_monolithic_mount_data(struct fs_context *, void *);
+extern void fc_drop_locked(struct fs_context *);
+
+/*
* namei.c
*/
+extern int filename_lookup(int dfd, struct filename *name, unsigned flags,
+ struct path *path, struct path *root);
extern int user_path_mountpoint_at(int, const char __user *, unsigned int, struct path *);
extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
const char *, unsigned int, struct path *);
@@ -99,10 +108,8 @@ extern struct file *alloc_empty_file_noaccount(int, const struct cred *);
/*
* super.c
*/
-extern int do_remount_sb(struct super_block *, int, void *, int);
+extern int reconfigure_super(struct fs_context *);
extern bool trylock_super(struct super_block *sb);
-extern struct dentry *mount_fs(struct file_system_type *,
- int, const char *, void *);
extern struct super_block *user_get_super(dev_t);
/*
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 5d99376d2369..84efb8956734 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -4,15 +4,28 @@
* supporting fast/efficient IO.
*
* A note on the read/write ordering memory barriers that are matched between
- * the application and kernel side. When the application reads the CQ ring
- * tail, it must use an appropriate smp_rmb() to order with the smp_wmb()
- * the kernel uses after writing the tail. Failure to do so could cause a
- * delay in when the application notices that completion events available.
- * This isn't a fatal condition. Likewise, the application must use an
- * appropriate smp_wmb() both before writing the SQ tail, and after writing
- * the SQ tail. The first one orders the sqe writes with the tail write, and
- * the latter is paired with the smp_rmb() the kernel will issue before
- * reading the SQ tail on submission.
+ * the application and kernel side.
+ *
+ * After the application reads the CQ ring tail, it must use an
+ * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
+ * before writing the tail (using smp_load_acquire to read the tail will
+ * do). It also needs a smp_mb() before updating CQ head (ordering the
+ * entry load(s) with the head store), pairing with an implicit barrier
+ * through a control-dependency in io_get_cqring (smp_store_release to
+ * store head will do). Failure to do so could lead to reading invalid
+ * CQ entries.
+ *
+ * Likewise, the application must use an appropriate smp_wmb() before
+ * writing the SQ tail (ordering SQ entry stores with the tail store),
+ * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
+ * to store the tail will do). And it needs a barrier ordering the SQ
+ * head load before writing new SQ entries (smp_load_acquire to read
+ * head will do).
+ *
+ * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
+ * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
+ * updating the SQ tail; a full memory barrier smp_mb() is needed
+ * between.
*
* Also see the examples in the liburing library:
*
@@ -70,20 +83,108 @@ struct io_uring {
u32 tail ____cacheline_aligned_in_smp;
};
+/*
+ * This data is shared with the application through the mmap at offset
+ * IORING_OFF_SQ_RING.
+ *
+ * The offsets to the member fields are published through struct
+ * io_sqring_offsets when calling io_uring_setup.
+ */
struct io_sq_ring {
+ /*
+ * Head and tail offsets into the ring; the offsets need to be
+ * masked to get valid indices.
+ *
+ * The kernel controls head and the application controls tail.
+ */
struct io_uring r;
+ /*
+ * Bitmask to apply to head and tail offsets (constant, equals
+ * ring_entries - 1)
+ */
u32 ring_mask;
+ /* Ring size (constant, power of 2) */
u32 ring_entries;
+ /*
+ * Number of invalid entries dropped by the kernel due to
+ * invalid index stored in array
+ *
+ * Written by the kernel, shouldn't be modified by the
+ * application (i.e. get number of "new events" by comparing to
+ * cached value).
+ *
+ * After a new SQ head value was read by the application this
+ * counter includes all submissions that were dropped reaching
+ * the new SQ head (and possibly more).
+ */
u32 dropped;
+ /*
+ * Runtime flags
+ *
+ * Written by the kernel, shouldn't be modified by the
+ * application.
+ *
+ * The application needs a full memory barrier before checking
+ * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
+ */
u32 flags;
+ /*
+ * Ring buffer of indices into array of io_uring_sqe, which is
+ * mmapped by the application using the IORING_OFF_SQES offset.
+ *
+ * This indirection could e.g. be used to assign fixed
+ * io_uring_sqe entries to operations and only submit them to
+ * the queue when needed.
+ *
+ * The kernel modifies neither the indices array nor the entries
+ * array.
+ */
u32 array[];
};
+/*
+ * This data is shared with the application through the mmap at offset
+ * IORING_OFF_CQ_RING.
+ *
+ * The offsets to the member fields are published through struct
+ * io_cqring_offsets when calling io_uring_setup.
+ */
struct io_cq_ring {
+ /*
+ * Head and tail offsets into the ring; the offsets need to be
+ * masked to get valid indices.
+ *
+ * The application controls head and the kernel tail.
+ */
struct io_uring r;
+ /*
+ * Bitmask to apply to head and tail offsets (constant, equals
+ * ring_entries - 1)
+ */
u32 ring_mask;
+ /* Ring size (constant, power of 2) */
u32 ring_entries;
+ /*
+ * Number of completion events lost because the queue was full;
+ * this should be avoided by the application by making sure
+ * there are not more requests pending thatn there is space in
+ * the completion queue.
+ *
+ * Written by the kernel, shouldn't be modified by the
+ * application (i.e. get number of "new events" by comparing to
+ * cached value).
+ *
+ * As completion events come in out of order this counter is not
+ * ordered with any other data.
+ */
u32 overflow;
+ /*
+ * Ring buffer of completion events.
+ *
+ * The kernel writes completion events fresh every time they are
+ * produced, so the application is allowed to modify pending
+ * entries.
+ */
struct io_uring_cqe cqes[];
};
@@ -189,17 +290,28 @@ struct sqe_submit {
bool needs_fixed_file;
};
+/*
+ * First field must be the file pointer in all the
+ * iocb unions! See also 'struct kiocb' in <linux/fs.h>
+ */
struct io_poll_iocb {
struct file *file;
struct wait_queue_head *head;
__poll_t events;
- bool woken;
+ bool done;
bool canceled;
struct wait_queue_entry wait;
};
+/*
+ * NOTE! Each of the iocb union members has the file pointer
+ * as the first entry in their struct definition. So you can
+ * access the file pointer through any of the sub-structs,
+ * or directly as just 'ki_filp' in this struct.
+ */
struct io_kiocb {
union {
+ struct file *file;
struct kiocb rw;
struct io_poll_iocb poll;
};
@@ -210,10 +322,11 @@ struct io_kiocb {
struct list_head list;
unsigned int flags;
refcount_t refs;
-#define REQ_F_FORCE_NONBLOCK 1 /* inline submission attempt */
+#define REQ_F_NOWAIT 1 /* must not punt to workers */
#define REQ_F_IOPOLL_COMPLETED 2 /* polled IO has completed */
#define REQ_F_FIXED_FILE 4 /* ctx owns file */
#define REQ_F_SEQ_PREV 8 /* sequential with previous */
+#define REQ_F_PREPPED 16 /* prep already done */
u64 user_data;
u64 error;
@@ -305,12 +418,6 @@ static void io_commit_cqring(struct io_ring_ctx *ctx)
/* order cqe stores with ring update */
smp_store_release(&ring->r.tail, ctx->cached_cq_tail);
- /*
- * Write sider barrier of tail update, app has read side. See
- * comment at the top of this file.
- */
- smp_wmb();
-
if (wq_has_sleeper(&ctx->cq_wait)) {
wake_up_interruptible(&ctx->cq_wait);
kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
@@ -324,9 +431,12 @@ static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
unsigned tail;
tail = ctx->cached_cq_tail;
- /* See comment at the top of the file */
- smp_rmb();
- if (tail + 1 == READ_ONCE(ring->r.head))
+ /*
+ * writes to the cq entry need to come after reading head; the
+ * control dependency is enough as we're using WRITE_ONCE to
+ * fill the cq entry
+ */
+ if (tail - READ_ONCE(ring->r.head) == ring->ring_entries)
return NULL;
ctx->cached_cq_tail++;
@@ -355,20 +465,25 @@ static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
}
}
-static void io_cqring_add_event(struct io_ring_ctx *ctx, u64 ki_user_data,
+static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
+{
+ if (waitqueue_active(&ctx->wait))
+ wake_up(&ctx->wait);
+ if (waitqueue_active(&ctx->sqo_wait))
+ wake_up(&ctx->sqo_wait);
+}
+
+static void io_cqring_add_event(struct io_ring_ctx *ctx, u64 user_data,
long res, unsigned ev_flags)
{
unsigned long flags;
spin_lock_irqsave(&ctx->completion_lock, flags);
- io_cqring_fill_event(ctx, ki_user_data, res, ev_flags);
+ io_cqring_fill_event(ctx, user_data, res, ev_flags);
io_commit_cqring(ctx);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
- if (waitqueue_active(&ctx->wait))
- wake_up(&ctx->wait);
- if (waitqueue_active(&ctx->sqo_wait))
- wake_up(&ctx->sqo_wait);
+ io_cqring_ev_posted(ctx);
}
static void io_ring_drop_ctx_refs(struct io_ring_ctx *ctx, unsigned refs)
@@ -382,13 +497,14 @@ static void io_ring_drop_ctx_refs(struct io_ring_ctx *ctx, unsigned refs)
static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
struct io_submit_state *state)
{
+ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
struct io_kiocb *req;
if (!percpu_ref_tryget(&ctx->refs))
return NULL;
if (!state) {
- req = kmem_cache_alloc(req_cachep, __GFP_NOWARN);
+ req = kmem_cache_alloc(req_cachep, gfp);
if (unlikely(!req))
goto out;
} else if (!state->free_reqs) {
@@ -396,10 +512,18 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
int ret;
sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
- ret = kmem_cache_alloc_bulk(req_cachep, __GFP_NOWARN, sz,
- state->reqs);
- if (unlikely(ret <= 0))
- goto out;
+ ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
+
+ /*
+ * Bulk alloc is all-or-nothing. If we fail to get a batch,
+ * retry single alloc to be on the safe side.
+ */
+ if (unlikely(ret <= 0)) {
+ state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
+ if (!state->reqs[0])
+ goto out;
+ ret = 1;
+ }
state->free_reqs = ret - 1;
state->cur_req = 1;
req = state->reqs[0];
@@ -411,7 +535,8 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
req->ctx = ctx;
req->flags = 0;
- refcount_set(&req->refs, 0);
+ /* one is dropped after submission, the other at completion */
+ refcount_set(&req->refs, 2);
return req;
out:
io_ring_drop_ctx_refs(ctx, 1);
@@ -429,10 +554,16 @@ static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
static void io_free_req(struct io_kiocb *req)
{
- if (!refcount_read(&req->refs) || refcount_dec_and_test(&req->refs)) {
- io_ring_drop_ctx_refs(req->ctx, 1);
- kmem_cache_free(req_cachep, req);
- }
+ if (req->file && !(req->flags & REQ_F_FIXED_FILE))
+ fput(req->file);
+ io_ring_drop_ctx_refs(req->ctx, 1);
+ kmem_cache_free(req_cachep, req);
+}
+
+static void io_put_req(struct io_kiocb *req)
+{
+ if (refcount_dec_and_test(&req->refs))
+ io_free_req(req);
}
/*
@@ -442,44 +573,34 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
struct list_head *done)
{
void *reqs[IO_IOPOLL_BATCH];
- int file_count, to_free;
- struct file *file = NULL;
struct io_kiocb *req;
+ int to_free;
- file_count = to_free = 0;
+ to_free = 0;
while (!list_empty(done)) {
req = list_first_entry(done, struct io_kiocb, list);
list_del(&req->list);
io_cqring_fill_event(ctx, req->user_data, req->error, 0);
-
- reqs[to_free++] = req;
(*nr_events)++;
- /*
- * Batched puts of the same file, to avoid dirtying the
- * file usage count multiple times, if avoidable.
- */
- if (!(req->flags & REQ_F_FIXED_FILE)) {
- if (!file) {
- file = req->rw.ki_filp;
- file_count = 1;
- } else if (file == req->rw.ki_filp) {
- file_count++;
+ if (refcount_dec_and_test(&req->refs)) {
+ /* If we're not using fixed files, we have to pair the
+ * completion part with the file put. Use regular
+ * completions for those, only batch free for fixed
+ * file.
+ */
+ if (req->flags & REQ_F_FIXED_FILE) {
+ reqs[to_free++] = req;
+ if (to_free == ARRAY_SIZE(reqs))
+ io_free_req_many(ctx, reqs, &to_free);
} else {
- fput_many(file, file_count);
- file = req->rw.ki_filp;
- file_count = 1;
+ io_free_req(req);
}
}
-
- if (to_free == ARRAY_SIZE(reqs))
- io_free_req_many(ctx, reqs, &to_free);
}
- io_commit_cqring(ctx);
- if (file)
- fput_many(file, file_count);
+ io_commit_cqring(ctx);
io_free_req_many(ctx, reqs, &to_free);
}
@@ -602,21 +723,14 @@ static void kiocb_end_write(struct kiocb *kiocb)
}
}
-static void io_fput(struct io_kiocb *req)
-{
- if (!(req->flags & REQ_F_FIXED_FILE))
- fput(req->rw.ki_filp);
-}
-
static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
kiocb_end_write(kiocb);
- io_fput(req);
io_cqring_add_event(req->ctx, req->user_data, res, 0);
- io_free_req(req);
+ io_put_req(req);
}
static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
@@ -666,11 +780,9 @@ static void io_iopoll_req_issued(struct io_kiocb *req)
list_add_tail(&req->list, &ctx->poll_list);
}
-static void io_file_put(struct io_submit_state *state, struct file *file)
+static void io_file_put(struct io_submit_state *state)
{
- if (!state) {
- fput(file);
- } else if (state->file) {
+ if (state->file) {
int diff = state->has_refs - state->used_refs;
if (diff)
@@ -695,7 +807,7 @@ static struct file *io_file_get(struct io_submit_state *state, int fd)
state->ios_left--;
return state->file;
}
- io_file_put(state, NULL);
+ io_file_put(state);
}
state->file = fget_many(fd, state->ios_left);
if (!state->file)
@@ -726,36 +838,23 @@ static bool io_file_supports_async(struct file *file)
}
static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
- bool force_nonblock, struct io_submit_state *state)
+ bool force_nonblock)
{
const struct io_uring_sqe *sqe = s->sqe;
struct io_ring_ctx *ctx = req->ctx;
struct kiocb *kiocb = &req->rw;
- unsigned ioprio, flags;
- int fd, ret;
+ unsigned ioprio;
+ int ret;
+ if (!req->file)
+ return -EBADF;
/* For -EAGAIN retry, everything is already prepped */
- if (kiocb->ki_filp)
+ if (req->flags & REQ_F_PREPPED)
return 0;
- flags = READ_ONCE(sqe->flags);
- fd = READ_ONCE(sqe->fd);
+ if (force_nonblock && !io_file_supports_async(req->file))
+ force_nonblock = false;
- if (flags & IOSQE_FIXED_FILE) {
- if (unlikely(!ctx->user_files ||
- (unsigned) fd >= ctx->nr_user_files))
- return -EBADF;
- kiocb->ki_filp = ctx->user_files[fd];
- req->flags |= REQ_F_FIXED_FILE;
- } else {
- if (s->needs_fixed_file)
- return -EBADF;
- kiocb->ki_filp = io_file_get(state, fd);
- if (unlikely(!kiocb->ki_filp))
- return -EBADF;
- if (force_nonblock && !io_file_supports_async(kiocb->ki_filp))
- force_nonblock = false;
- }
kiocb->ki_pos = READ_ONCE(sqe->off);
kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
@@ -764,7 +863,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
if (ioprio) {
ret = ioprio_check_cap(ioprio);
if (ret)
- goto out_fput;
+ return ret;
kiocb->ki_ioprio = ioprio;
} else
@@ -772,38 +871,30 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
if (unlikely(ret))
- goto out_fput;
- if (force_nonblock) {
+ return ret;
+
+ /* don't allow async punt if RWF_NOWAIT was requested */
+ if (kiocb->ki_flags & IOCB_NOWAIT)
+ req->flags |= REQ_F_NOWAIT;
+
+ if (force_nonblock)
kiocb->ki_flags |= IOCB_NOWAIT;
- req->flags |= REQ_F_FORCE_NONBLOCK;
- }
+
if (ctx->flags & IORING_SETUP_IOPOLL) {
- ret = -EOPNOTSUPP;
if (!(kiocb->ki_flags & IOCB_DIRECT) ||
!kiocb->ki_filp->f_op->iopoll)
- goto out_fput;
+ return -EOPNOTSUPP;
req->error = 0;
kiocb->ki_flags |= IOCB_HIPRI;
kiocb->ki_complete = io_complete_rw_iopoll;
} else {
- if (kiocb->ki_flags & IOCB_HIPRI) {
- ret = -EINVAL;
- goto out_fput;
- }
+ if (kiocb->ki_flags & IOCB_HIPRI)
+ return -EINVAL;
kiocb->ki_complete = io_complete_rw;
}
+ req->flags |= REQ_F_PREPPED;
return 0;
-out_fput:
- if (!(flags & IOSQE_FIXED_FILE)) {
- /*
- * in case of error, we didn't use this file reference. drop it.
- */
- if (state)
- state->used_refs--;
- io_file_put(state, kiocb->ki_filp);
- }
- return ret;
}
static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
@@ -864,6 +955,9 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
if (offset)
iov_iter_advance(iter, offset);
+
+ /* don't drop a reference to these pages */
+ iter->type |= ITER_BVEC_FLAG_NO_REF;
return 0;
}
@@ -887,7 +981,7 @@ static int io_import_iovec(struct io_ring_ctx *ctx, int rw,
opcode = READ_ONCE(sqe->opcode);
if (opcode == IORING_OP_READ_FIXED ||
opcode == IORING_OP_WRITE_FIXED) {
- ssize_t ret = io_import_fixed(ctx, rw, sqe, iter);
+ int ret = io_import_fixed(ctx, rw, sqe, iter);
*iovec = NULL;
return ret;
}
@@ -923,7 +1017,7 @@ static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
/* Use 8x RA size as a decent limiter for both reads/writes */
max_pages = filp->f_ra.ra_pages;
if (!max_pages)
- max_pages = VM_MAX_READAHEAD >> (PAGE_SHIFT - 10);
+ max_pages = VM_READAHEAD_PAGES;
max_pages *= 8;
/* If max pages are exceeded, reset the state */
@@ -945,31 +1039,29 @@ static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
async_list->io_end = io_end;
}
-static ssize_t io_read(struct io_kiocb *req, const struct sqe_submit *s,
- bool force_nonblock, struct io_submit_state *state)
+static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
+ bool force_nonblock)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct kiocb *kiocb = &req->rw;
struct iov_iter iter;
struct file *file;
size_t iov_count;
- ssize_t ret;
+ int ret;
- ret = io_prep_rw(req, s, force_nonblock, state);
+ ret = io_prep_rw(req, s, force_nonblock);
if (ret)
return ret;
file = kiocb->ki_filp;
- ret = -EBADF;
if (unlikely(!(file->f_mode & FMODE_READ)))
- goto out_fput;
- ret = -EINVAL;
+ return -EBADF;
if (unlikely(!file->f_op->read_iter))
- goto out_fput;
+ return -EINVAL;
ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter);
if (ret)
- goto out_fput;
+ return ret;
iov_count = iov_iter_count(&iter);
ret = rw_verify_area(READ, file, &kiocb->ki_pos, iov_count);
@@ -991,38 +1083,32 @@ static ssize_t io_read(struct io_kiocb *req, const struct sqe_submit *s,
}
}
kfree(iovec);
-out_fput:
- /* Hold on to the file for -EAGAIN */
- if (unlikely(ret && ret != -EAGAIN))
- io_fput(req);
return ret;
}
-static ssize_t io_write(struct io_kiocb *req, const struct sqe_submit *s,
- bool force_nonblock, struct io_submit_state *state)
+static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
+ bool force_nonblock)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct kiocb *kiocb = &req->rw;
struct iov_iter iter;
struct file *file;
size_t iov_count;
- ssize_t ret;
+ int ret;
- ret = io_prep_rw(req, s, force_nonblock, state);
+ ret = io_prep_rw(req, s, force_nonblock);
if (ret)
return ret;
- ret = -EBADF;
file = kiocb->ki_filp;
if (unlikely(!(file->f_mode & FMODE_WRITE)))
- goto out_fput;
- ret = -EINVAL;
+ return -EBADF;
if (unlikely(!file->f_op->write_iter))
- goto out_fput;
+ return -EINVAL;
ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter);
if (ret)
- goto out_fput;
+ return ret;
iov_count = iov_iter_count(&iter);
@@ -1036,6 +1122,8 @@ static ssize_t io_write(struct io_kiocb *req, const struct sqe_submit *s,
ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count);
if (!ret) {
+ ssize_t ret2;
+
/*
* Open-code file_start_write here to grab freeze protection,
* which will be released by another thread in
@@ -1050,14 +1138,22 @@ static ssize_t io_write(struct io_kiocb *req, const struct sqe_submit *s,
SB_FREEZE_WRITE);
}
kiocb->ki_flags |= IOCB_WRITE;
- io_rw_done(kiocb, call_write_iter(file, kiocb, &iter));
+
+ ret2 = call_write_iter(file, kiocb, &iter);
+ if (!force_nonblock || ret2 != -EAGAIN) {
+ io_rw_done(kiocb, ret2);
+ } else {
+ /*
+ * If ->needs_lock is true, we're already in async
+ * context.
+ */
+ if (!s->needs_lock)
+ io_async_list_note(WRITE, req, iov_count);
+ ret = -EAGAIN;
+ }
}
out_free:
kfree(iovec);
-out_fput:
- /* Hold on to the file for -EAGAIN */
- if (unlikely(ret && ret != -EAGAIN))
- io_fput(req);
return ret;
}
@@ -1072,29 +1168,19 @@ static int io_nop(struct io_kiocb *req, u64 user_data)
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
- /*
- * Twilight zone - it's possible that someone issued an opcode that
- * has a file attached, then got -EAGAIN on submission, and changed
- * the sqe before we retried it from async context. Avoid dropping
- * a file reference for this malicious case, and flag the error.
- */
- if (req->rw.ki_filp) {
- err = -EBADF;
- io_fput(req);
- }
io_cqring_add_event(ctx, user_data, err, 0);
- io_free_req(req);
+ io_put_req(req);
return 0;
}
static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_ring_ctx *ctx = req->ctx;
- unsigned flags;
- int fd;
- /* Prep already done */
- if (req->rw.ki_filp)
+ if (!req->file)
+ return -EBADF;
+ /* Prep already done (EAGAIN retry) */
+ if (req->flags & REQ_F_PREPPED)
return 0;
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
@@ -1102,20 +1188,7 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
return -EINVAL;
- fd = READ_ONCE(sqe->fd);
- flags = READ_ONCE(sqe->flags);
-
- if (flags & IOSQE_FIXED_FILE) {
- if (unlikely(!ctx->user_files || fd >= ctx->nr_user_files))
- return -EBADF;
- req->rw.ki_filp = ctx->user_files[fd];
- req->flags |= REQ_F_FIXED_FILE;
- } else {
- req->rw.ki_filp = fget(fd);
- if (unlikely(!req->rw.ki_filp))
- return -EBADF;
- }
-
+ req->flags |= REQ_F_PREPPED;
return 0;
}
@@ -1144,9 +1217,8 @@ static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
end > 0 ? end : LLONG_MAX,
fsync_flags & IORING_FSYNC_DATASYNC);
- io_fput(req);
io_cqring_add_event(req->ctx, sqe->user_data, ret, 0);
- io_free_req(req);
+ io_put_req(req);
return 0;
}
@@ -1204,15 +1276,16 @@ static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
spin_unlock_irq(&ctx->completion_lock);
io_cqring_add_event(req->ctx, sqe->user_data, ret, 0);
- io_free_req(req);
+ io_put_req(req);
return 0;
}
-static void io_poll_complete(struct io_kiocb *req, __poll_t mask)
+static void io_poll_complete(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ __poll_t mask)
{
- io_cqring_add_event(req->ctx, req->user_data, mangle_poll(mask), 0);
- io_fput(req);
- io_free_req(req);
+ req->poll.done = true;
+ io_cqring_fill_event(ctx, req->user_data, mangle_poll(mask), 0);
+ io_commit_cqring(ctx);
}
static void io_poll_complete_work(struct work_struct *work)
@@ -1240,9 +1313,11 @@ static void io_poll_complete_work(struct work_struct *work)
return;
}
list_del_init(&req->list);
+ io_poll_complete(ctx, req, mask);
spin_unlock_irq(&ctx->completion_lock);
- io_poll_complete(req, mask);
+ io_cqring_ev_posted(ctx);
+ io_put_req(req);
}
static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
@@ -1253,29 +1328,25 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
struct io_kiocb *req = container_of(poll, struct io_kiocb, poll);
struct io_ring_ctx *ctx = req->ctx;
__poll_t mask = key_to_poll(key);
-
- poll->woken = true;
+ unsigned long flags;
/* for instances that support it check for an event match first: */
- if (mask) {
- unsigned long flags;
+ if (mask && !(mask & poll->events))
+ return 0;
- if (!(mask & poll->events))
- return 0;
+ list_del_init(&poll->wait.entry);
- /* try to complete the iocb inline if we can: */
- if (spin_trylock_irqsave(&ctx->completion_lock, flags)) {
- list_del(&req->list);
- spin_unlock_irqrestore(&ctx->completion_lock, flags);
+ if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) {
+ list_del(&req->list);
+ io_poll_complete(ctx, req, mask);
+ spin_unlock_irqrestore(&ctx->completion_lock, flags);
- list_del_init(&poll->wait.entry);
- io_poll_complete(req, mask);
- return 1;
- }
+ io_cqring_ev_posted(ctx);
+ io_put_req(req);
+ } else {
+ queue_work(ctx->sqo_wq, &req->work);
}
- list_del_init(&poll->wait.entry);
- queue_work(ctx->sqo_wq, &req->work);
return 1;
}
@@ -1305,36 +1376,23 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
struct io_poll_iocb *poll = &req->poll;
struct io_ring_ctx *ctx = req->ctx;
struct io_poll_table ipt;
- unsigned flags;
+ bool cancel = false;
__poll_t mask;
u16 events;
- int fd;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
return -EINVAL;
+ if (!poll->file)
+ return -EBADF;
INIT_WORK(&req->work, io_poll_complete_work);
events = READ_ONCE(sqe->poll_events);
poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
- flags = READ_ONCE(sqe->flags);
- fd = READ_ONCE(sqe->fd);
-
- if (flags & IOSQE_FIXED_FILE) {
- if (unlikely(!ctx->user_files || fd >= ctx->nr_user_files))
- return -EBADF;
- poll->file = ctx->user_files[fd];
- req->flags |= REQ_F_FIXED_FILE;
- } else {
- poll->file = fget(fd);
- }
- if (unlikely(!poll->file))
- return -EBADF;
-
poll->head = NULL;
- poll->woken = false;
+ poll->done = false;
poll->canceled = false;
ipt.pt._qproc = io_poll_queue_proc;
@@ -1346,56 +1404,43 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
INIT_LIST_HEAD(&poll->wait.entry);
init_waitqueue_func_entry(&poll->wait, io_poll_wake);
- /* one for removal from waitqueue, one for this function */
- refcount_set(&req->refs, 2);
-
mask = vfs_poll(poll->file, &ipt.pt) & poll->events;
- if (unlikely(!poll->head)) {
- /* we did not manage to set up a waitqueue, done */
- goto out;
- }
spin_lock_irq(&ctx->completion_lock);
- spin_lock(&poll->head->lock);
- if (poll->woken) {
- /* wake_up context handles the rest */
- mask = 0;
+ if (likely(poll->head)) {
+ spin_lock(&poll->head->lock);
+ if (unlikely(list_empty(&poll->wait.entry))) {
+ if (ipt.error)
+ cancel = true;
+ ipt.error = 0;
+ mask = 0;
+ }
+ if (mask || ipt.error)
+ list_del_init(&poll->wait.entry);
+ else if (cancel)
+ WRITE_ONCE(poll->canceled, true);
+ else if (!poll->done) /* actually waiting for an event */
+ list_add_tail(&req->list, &ctx->cancel_list);
+ spin_unlock(&poll->head->lock);
+ }
+ if (mask) { /* no async, we'd stolen it */
+ req->error = mangle_poll(mask);
ipt.error = 0;
- } else if (mask || ipt.error) {
- /* if we get an error or a mask we are done */
- WARN_ON_ONCE(list_empty(&poll->wait.entry));
- list_del_init(&poll->wait.entry);
- } else {
- /* actually waiting for an event */
- list_add_tail(&req->list, &ctx->cancel_list);
+ io_poll_complete(ctx, req, mask);
}
- spin_unlock(&poll->head->lock);
spin_unlock_irq(&ctx->completion_lock);
-out:
- if (unlikely(ipt.error)) {
- if (!(flags & IOSQE_FIXED_FILE))
- fput(poll->file);
- /*
- * Drop one of our refs to this req, __io_submit_sqe() will
- * drop the other one since we're returning an error.
- */
- io_free_req(req);
- return ipt.error;
+ if (mask) {
+ io_cqring_ev_posted(ctx);
+ io_put_req(req);
}
-
- if (mask)
- io_poll_complete(req, mask);
- io_free_req(req);
- return 0;
+ return ipt.error;
}
static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
- const struct sqe_submit *s, bool force_nonblock,
- struct io_submit_state *state)
+ const struct sqe_submit *s, bool force_nonblock)
{
- ssize_t ret;
- int opcode;
+ int ret, opcode;
if (unlikely(s->index >= ctx->sq_entries))
return -EINVAL;
@@ -1409,18 +1454,18 @@ static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
case IORING_OP_READV:
if (unlikely(s->sqe->buf_index))
return -EINVAL;
- ret = io_read(req, s, force_nonblock, state);
+ ret = io_read(req, s, force_nonblock);
break;
case IORING_OP_WRITEV:
if (unlikely(s->sqe->buf_index))
return -EINVAL;
- ret = io_write(req, s, force_nonblock, state);
+ ret = io_write(req, s, force_nonblock);
break;
case IORING_OP_READ_FIXED:
- ret = io_read(req, s, force_nonblock, state);
+ ret = io_read(req, s, force_nonblock);
break;
case IORING_OP_WRITE_FIXED:
- ret = io_write(req, s, force_nonblock, state);
+ ret = io_write(req, s, force_nonblock);
break;
case IORING_OP_FSYNC:
ret = io_fsync(req, s->sqe, force_nonblock);
@@ -1493,8 +1538,7 @@ restart:
struct sqe_submit *s = &req->submit;
const struct io_uring_sqe *sqe = s->sqe;
- /* Ensure we clear previously set forced non-block flag */
- req->flags &= ~REQ_F_FORCE_NONBLOCK;
+ /* Ensure we clear previously set non-block flag */
req->rw.ki_flags &= ~IOCB_NOWAIT;
ret = 0;
@@ -1513,7 +1557,7 @@ restart:
s->has_user = cur_mm != NULL;
s->needs_lock = true;
do {
- ret = __io_submit_sqe(ctx, req, s, false, NULL);
+ ret = __io_submit_sqe(ctx, req, s, false);
/*
* We can get EAGAIN for polled IO even though
* we're forcing a sync submission from here,
@@ -1525,9 +1569,13 @@ restart:
cond_resched();
} while (1);
}
+
+ /* drop submission reference */
+ io_put_req(req);
+
if (ret) {
io_cqring_add_event(ctx, sqe->user_data, ret, 0);
- io_free_req(req);
+ io_put_req(req);
}
/* async context always use a copy of the sqe */
@@ -1614,11 +1662,55 @@ static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
return ret;
}
+static bool io_op_needs_file(const struct io_uring_sqe *sqe)
+{
+ int op = READ_ONCE(sqe->opcode);
+
+ switch (op) {
+ case IORING_OP_NOP:
+ case IORING_OP_POLL_REMOVE:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
+ struct io_submit_state *state, struct io_kiocb *req)
+{
+ unsigned flags;
+ int fd;
+
+ flags = READ_ONCE(s->sqe->flags);
+ fd = READ_ONCE(s->sqe->fd);
+
+ if (!io_op_needs_file(s->sqe)) {
+ req->file = NULL;
+ return 0;
+ }
+
+ if (flags & IOSQE_FIXED_FILE) {
+ if (unlikely(!ctx->user_files ||
+ (unsigned) fd >= ctx->nr_user_files))
+ return -EBADF;
+ req->file = ctx->user_files[fd];
+ req->flags |= REQ_F_FIXED_FILE;
+ } else {
+ if (s->needs_fixed_file)
+ return -EBADF;
+ req->file = io_file_get(state, fd);
+ if (unlikely(!req->file))
+ return -EBADF;
+ }
+
+ return 0;
+}
+
static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
struct io_submit_state *state)
{
struct io_kiocb *req;
- ssize_t ret;
+ int ret;
/* enforce forwards compatibility on users */
if (unlikely(s->sqe->flags & ~IOSQE_FIXED_FILE))
@@ -1628,10 +1720,12 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
if (unlikely(!req))
return -EAGAIN;
- req->rw.ki_filp = NULL;
+ ret = io_req_set_file(ctx, s, state, req);
+ if (unlikely(ret))
+ goto out;
- ret = __io_submit_sqe(ctx, req, s, true, state);
- if (ret == -EAGAIN) {
+ ret = __io_submit_sqe(ctx, req, s, true);
+ if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
struct io_uring_sqe *sqe_copy;
sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL);
@@ -1649,11 +1743,23 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
INIT_WORK(&req->work, io_sq_wq_submit_work);
queue_work(ctx->sqo_wq, &req->work);
}
- ret = 0;
+
+ /*
+ * Queued up for async execution, worker will release
+ * submit reference when the iocb is actually
+ * submitted.
+ */
+ return 0;
}
}
+
+out:
+ /* drop submission reference */
+ io_put_req(req);
+
+ /* and drop final reference, if we failed */
if (ret)
- io_free_req(req);
+ io_put_req(req);
return ret;
}
@@ -1664,7 +1770,7 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
static void io_submit_state_end(struct io_submit_state *state)
{
blk_finish_plug(&state->plug);
- io_file_put(state, NULL);
+ io_file_put(state);
if (state->free_reqs)
kmem_cache_free_bulk(req_cachep, state->free_reqs,
&state->reqs[state->cur_req]);
@@ -1693,24 +1799,10 @@ static void io_commit_sqring(struct io_ring_ctx *ctx)
* write new data to them.
*/
smp_store_release(&ring->r.head, ctx->cached_sq_head);
-
- /*
- * write side barrier of head update, app has read side. See
- * comment at the top of this file
- */
- smp_wmb();
}
}
/*
- * Undo last io_get_sqring()
- */
-static void io_drop_sqring(struct io_ring_ctx *ctx)
-{
- ctx->cached_sq_head--;
-}
-
-/*
* Fetch an sqe, if one is available. Note that s->sqe will point to memory
* that is mapped by userspace. This means that care needs to be taken to
* ensure that reads are stable, as we cannot rely on userspace always
@@ -1732,9 +1824,8 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
* though the application is the one updating it.
*/
head = ctx->cached_sq_head;
- /* See comment at the top of this file */
- smp_rmb();
- if (head == READ_ONCE(ring->r.tail))
+ /* make sure SQ entry isn't read before tail */
+ if (head == smp_load_acquire(&ring->r.tail))
return false;
head = READ_ONCE(ring->array[head & ctx->sq_mask]);
@@ -1748,8 +1839,6 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
/* drop invalid entries */
ctx->cached_sq_head++;
ring->dropped++;
- /* See comment at the top of this file */
- smp_wmb();
return false;
}
@@ -1859,7 +1948,8 @@ static int io_sq_thread(void *data)
/* Tell userspace we may need a wakeup call */
ctx->sq_ring->flags |= IORING_SQ_NEED_WAKEUP;
- smp_wmb();
+ /* make sure to read SQ tail after writing flags */
+ smp_mb();
if (!io_get_sqring(ctx, &sqes[0])) {
if (kthread_should_stop()) {
@@ -1872,13 +1962,11 @@ static int io_sq_thread(void *data)
finish_wait(&ctx->sqo_wait, &wait);
ctx->sq_ring->flags &= ~IORING_SQ_NEED_WAKEUP;
- smp_wmb();
continue;
}
finish_wait(&ctx->sqo_wait, &wait);
ctx->sq_ring->flags &= ~IORING_SQ_NEED_WAKEUP;
- smp_wmb();
}
i = 0;
@@ -1913,13 +2001,17 @@ static int io_sq_thread(void *data)
unuse_mm(cur_mm);
mmput(cur_mm);
}
+
+ if (kthread_should_park())
+ kthread_parkme();
+
return 0;
}
static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
{
struct io_submit_state state, *statep = NULL;
- int i, ret = 0, submit = 0;
+ int i, submit = 0;
if (to_submit > IO_PLUG_THRESHOLD) {
io_submit_state_start(&state, ctx, to_submit);
@@ -1928,6 +2020,7 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
for (i = 0; i < to_submit; i++) {
struct sqe_submit s;
+ int ret;
if (!io_get_sqring(ctx, &s))
break;
@@ -1935,21 +2028,18 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
s.has_user = true;
s.needs_lock = false;
s.needs_fixed_file = false;
+ submit++;
ret = io_submit_sqe(ctx, &s, statep);
- if (ret) {
- io_drop_sqring(ctx);
- break;
- }
-
- submit++;
+ if (ret)
+ io_cqring_add_event(ctx, s.sqe->user_data, ret, 0);
}
io_commit_sqring(ctx);
if (statep)
io_submit_state_end(statep);
- return submit ? submit : ret;
+ return submit;
}
static unsigned io_cqring_events(struct io_cq_ring *ring)
@@ -1975,7 +2065,15 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
return 0;
if (sig) {
- ret = set_user_sigmask(sig, &ksigmask, &sigsaved, sigsz);
+#ifdef CONFIG_COMPAT
+ if (in_compat_syscall())
+ ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
+ &ksigmask, &sigsaved, sigsz);
+ else
+#endif
+ ret = set_user_sigmask(sig, &ksigmask,
+ &sigsaved, sigsz);
+
if (ret)
return ret;
}
@@ -2039,6 +2137,7 @@ static void io_sq_thread_stop(struct io_ring_ctx *ctx)
if (ctx->sqo_thread) {
ctx->sqo_stop = 1;
mb();
+ kthread_park(ctx->sqo_thread);
kthread_stop(ctx->sqo_thread);
ctx->sqo_thread = NULL;
}
@@ -2200,6 +2299,7 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
fput(ctx->user_files[i]);
kfree(ctx->user_files);
+ ctx->user_files = NULL;
ctx->nr_user_files = 0;
return ret;
}
@@ -2220,19 +2320,23 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
mmgrab(current->mm);
ctx->sqo_mm = current->mm;
- ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
- if (!ctx->sq_thread_idle)
- ctx->sq_thread_idle = HZ;
+ if (ctx->flags & IORING_SETUP_SQPOLL) {
+ ret = -EPERM;
+ if (!capable(CAP_SYS_ADMIN))
+ goto err;
- ret = -EINVAL;
- if (!cpu_possible(p->sq_thread_cpu))
- goto err;
+ ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
+ if (!ctx->sq_thread_idle)
+ ctx->sq_thread_idle = HZ;
- if (ctx->flags & IORING_SETUP_SQPOLL) {
if (p->flags & IORING_SETUP_SQ_AFF) {
- int cpu;
+ int cpu = array_index_nospec(p->sq_thread_cpu,
+ nr_cpu_ids);
+
+ ret = -EINVAL;
+ if (!cpu_possible(cpu))
+ goto err;
- cpu = array_index_nospec(p->sq_thread_cpu, NR_CPUS);
ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread,
ctx, cpu,
"io_uring-sq");
@@ -2293,8 +2397,12 @@ static int io_account_mem(struct user_struct *user, unsigned long nr_pages)
static void io_mem_free(void *ptr)
{
- struct page *page = virt_to_head_page(ptr);
+ struct page *page;
+
+ if (!ptr)
+ return;
+ page = virt_to_head_page(ptr);
if (put_page_testzero(page))
free_compound_page(page);
}
@@ -2335,7 +2443,7 @@ static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
if (ctx->account_mem)
io_unaccount_mem(ctx->user, imu->nr_bvecs);
- kfree(imu->bvec);
+ kvfree(imu->bvec);
imu->nr_bvecs = 0;
}
@@ -2427,9 +2535,9 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
if (!pages || nr_pages > got_pages) {
kfree(vmas);
kfree(pages);
- pages = kmalloc_array(nr_pages, sizeof(struct page *),
+ pages = kvmalloc_array(nr_pages, sizeof(struct page *),
GFP_KERNEL);
- vmas = kmalloc_array(nr_pages,
+ vmas = kvmalloc_array(nr_pages,
sizeof(struct vm_area_struct *),
GFP_KERNEL);
if (!pages || !vmas) {
@@ -2441,7 +2549,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
got_pages = nr_pages;
}
- imu->bvec = kmalloc_array(nr_pages, sizeof(struct bio_vec),
+ imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
GFP_KERNEL);
ret = -ENOMEM;
if (!imu->bvec) {
@@ -2480,6 +2588,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
}
if (ctx->account_mem)
io_unaccount_mem(ctx->user, nr_pages);
+ kvfree(imu->bvec);
goto err;
}
@@ -2502,12 +2611,12 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
ctx->nr_user_bufs++;
}
- kfree(pages);
- kfree(vmas);
+ kvfree(pages);
+ kvfree(vmas);
return 0;
err:
- kfree(pages);
- kfree(vmas);
+ kvfree(pages);
+ kvfree(vmas);
io_sqe_buffer_unregister(ctx);
return ret;
}
@@ -2545,9 +2654,13 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
__poll_t mask = 0;
poll_wait(file, &ctx->cq_wait, wait);
- /* See comment at the top of this file */
+ /*
+ * synchronizes with barrier from wq_has_sleeper call in
+ * io_commit_cqring
+ */
smp_rmb();
- if (READ_ONCE(ctx->sq_ring->r.tail) + 1 != ctx->cached_sq_head)
+ if (READ_ONCE(ctx->sq_ring->r.tail) - ctx->cached_sq_head !=
+ ctx->sq_ring->ring_entries)
mask |= EPOLLOUT | EPOLLWRNORM;
if (READ_ONCE(ctx->cq_ring->r.head) != ctx->cached_cq_tail)
mask |= EPOLLIN | EPOLLRDNORM;
@@ -2658,24 +2771,12 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
mutex_lock(&ctx->uring_lock);
submitted = io_ring_submit(ctx, to_submit);
mutex_unlock(&ctx->uring_lock);
-
- if (submitted < 0)
- goto out_ctx;
}
if (flags & IORING_ENTER_GETEVENTS) {
unsigned nr_events = 0;
min_complete = min(min_complete, ctx->cq_entries);
- /*
- * The application could have included the 'to_submit' count
- * in how many events it wanted to wait for. If we failed to
- * submit the desired count, we may need to adjust the number
- * of events to poll/wait for.
- */
- if (submitted < to_submit)
- min_complete = min_t(unsigned, submitted, min_complete);
-
if (ctx->flags & IORING_SETUP_IOPOLL) {
mutex_lock(&ctx->uring_lock);
ret = io_iopoll_check(ctx, &nr_events, min_complete);
@@ -2721,17 +2822,12 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
return -EOVERFLOW;
ctx->sq_sqes = io_mem_alloc(size);
- if (!ctx->sq_sqes) {
- io_mem_free(ctx->sq_ring);
+ if (!ctx->sq_sqes)
return -ENOMEM;
- }
cq_ring = io_mem_alloc(struct_size(cq_ring, cqes, p->cq_entries));
- if (!cq_ring) {
- io_mem_free(ctx->sq_ring);
- io_mem_free(ctx->sq_sqes);
+ if (!cq_ring)
return -ENOMEM;
- }
ctx->cq_ring = cq_ring;
cq_ring->ring_mask = p->cq_entries - 1;
@@ -2902,11 +2998,31 @@ SYSCALL_DEFINE2(io_uring_setup, u32, entries,
static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
void __user *arg, unsigned nr_args)
+ __releases(ctx->uring_lock)
+ __acquires(ctx->uring_lock)
{
int ret;
+ /*
+ * We're inside the ring mutex, if the ref is already dying, then
+ * someone else killed the ctx or is already going through
+ * io_uring_register().
+ */
+ if (percpu_ref_is_dying(&ctx->refs))
+ return -ENXIO;
+
percpu_ref_kill(&ctx->refs);
+
+ /*
+ * Drop uring mutex before waiting for references to exit. If another
+ * thread is currently inside io_uring_enter() it might need to grab
+ * the uring_lock to make progress. If we hold it here across the drain
+ * wait, then we can deadlock. It's safe to drop the mutex here, since
+ * no new references will come in after we've killed the percpu ref.
+ */
+ mutex_unlock(&ctx->uring_lock);
wait_for_completion(&ctx->ctx_done);
+ mutex_lock(&ctx->uring_lock);
switch (opcode) {
case IORING_REGISTER_BUFFERS:
diff --git a/fs/iomap.c b/fs/iomap.c
index 97cb9d486a7d..abdd18e404f8 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -1589,12 +1589,14 @@ static void iomap_dio_bio_end_io(struct bio *bio)
if (should_dirty) {
bio_check_pages_dirty(bio);
} else {
- struct bio_vec *bvec;
- int i;
- struct bvec_iter_all iter_all;
+ if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
+ struct bvec_iter_all iter_all;
+ struct bio_vec *bvec;
+ int i;
- bio_for_each_segment_all(bvec, bio, i, iter_all)
- put_page(bvec->bv_page);
+ bio_for_each_segment_all(bvec, bio, i, iter_all)
+ put_page(bvec->bv_page);
+ }
bio_put(bio);
}
}
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 26f8d7e46462..02e0b79753e7 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -113,7 +113,7 @@ void __jbd2_log_wait_for_space(journal_t *journal)
nblocks = jbd2_space_needed(journal);
while (jbd2_log_space_left(journal) < nblocks) {
write_unlock(&journal->j_state_lock);
- mutex_lock(&journal->j_checkpoint_mutex);
+ mutex_lock_io(&journal->j_checkpoint_mutex);
/*
* Test again, another process may have checkpointed while we
@@ -276,9 +276,22 @@ restart:
"JBD2: %s: Waiting for Godot: block %llu\n",
journal->j_devname, (unsigned long long) bh->b_blocknr);
+ if (batch_count)
+ __flush_batch(journal, &batch_count);
jbd2_log_start_commit(journal, tid);
+ /*
+ * jbd2_journal_commit_transaction() may want
+ * to take the checkpoint_mutex if JBD2_FLUSHED
+ * is set, jbd2_update_log_tail() called by
+ * jbd2_journal_commit_transaction() may also take
+ * checkpoint_mutex. So we need to temporarily
+ * drop it.
+ */
+ mutex_unlock(&journal->j_checkpoint_mutex);
jbd2_log_wait_commit(journal, tid);
- goto retry;
+ mutex_lock_io(&journal->j_checkpoint_mutex);
+ spin_lock(&journal->j_list_lock);
+ goto restart;
}
if (!buffer_dirty(bh)) {
if (unlikely(buffer_write_io_error(bh)) && !result)
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 2eb55c3361a8..efd0ce9489ae 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -694,9 +694,11 @@ void jbd2_journal_commit_transaction(journal_t *journal)
the last tag we set up. */
tag->t_flags |= cpu_to_be16(JBD2_FLAG_LAST_TAG);
-
- jbd2_descriptor_block_csum_set(journal, descriptor);
start_journal_io:
+ if (descriptor)
+ jbd2_descriptor_block_csum_set(journal,
+ descriptor);
+
for (i = 0; i < bufs; i++) {
struct buffer_head *bh = wbuf[i];
/*
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 8ef6b6daaa7a..382c030cc78b 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -142,22 +142,6 @@ static __be32 jbd2_superblock_csum(journal_t *j, journal_superblock_t *sb)
return cpu_to_be32(csum);
}
-static int jbd2_superblock_csum_verify(journal_t *j, journal_superblock_t *sb)
-{
- if (!jbd2_journal_has_csum_v2or3(j))
- return 1;
-
- return sb->s_checksum == jbd2_superblock_csum(j, sb);
-}
-
-static void jbd2_superblock_csum_set(journal_t *j, journal_superblock_t *sb)
-{
- if (!jbd2_journal_has_csum_v2or3(j))
- return;
-
- sb->s_checksum = jbd2_superblock_csum(j, sb);
-}
-
/*
* Helper function used to manage commit timeouts
*/
@@ -1356,6 +1340,10 @@ static int journal_reset(journal_t *journal)
return jbd2_journal_start_thread(journal);
}
+/*
+ * This function expects that the caller will have locked the journal
+ * buffer head, and will return with it unlocked
+ */
static int jbd2_write_superblock(journal_t *journal, int write_flags)
{
struct buffer_head *bh = journal->j_sb_buffer;
@@ -1365,7 +1353,6 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags)
trace_jbd2_write_superblock(journal, write_flags);
if (!(journal->j_flags & JBD2_BARRIER))
write_flags &= ~(REQ_FUA | REQ_PREFLUSH);
- lock_buffer(bh);
if (buffer_write_io_error(bh)) {
/*
* Oh, dear. A previous attempt to write the journal
@@ -1381,7 +1368,8 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags)
clear_buffer_write_io_error(bh);
set_buffer_uptodate(bh);
}
- jbd2_superblock_csum_set(journal, sb);
+ if (jbd2_journal_has_csum_v2or3(journal))
+ sb->s_checksum = jbd2_superblock_csum(journal, sb);
get_bh(bh);
bh->b_end_io = end_buffer_write_sync;
ret = submit_bh(REQ_OP_WRITE, write_flags, bh);
@@ -1424,6 +1412,7 @@ int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
jbd_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n",
tail_block, tail_tid);
+ lock_buffer(journal->j_sb_buffer);
sb->s_sequence = cpu_to_be32(tail_tid);
sb->s_start = cpu_to_be32(tail_block);
@@ -1454,18 +1443,17 @@ static void jbd2_mark_journal_empty(journal_t *journal, int write_op)
journal_superblock_t *sb = journal->j_superblock;
BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
- read_lock(&journal->j_state_lock);
- /* Is it already empty? */
- if (sb->s_start == 0) {
- read_unlock(&journal->j_state_lock);
+ lock_buffer(journal->j_sb_buffer);
+ if (sb->s_start == 0) { /* Is it already empty? */
+ unlock_buffer(journal->j_sb_buffer);
return;
}
+
jbd_debug(1, "JBD2: Marking journal as empty (seq %d)\n",
journal->j_tail_sequence);
sb->s_sequence = cpu_to_be32(journal->j_tail_sequence);
sb->s_start = cpu_to_be32(0);
- read_unlock(&journal->j_state_lock);
jbd2_write_superblock(journal, write_op);
@@ -1488,9 +1476,8 @@ void jbd2_journal_update_sb_errno(journal_t *journal)
journal_superblock_t *sb = journal->j_superblock;
int errcode;
- read_lock(&journal->j_state_lock);
+ lock_buffer(journal->j_sb_buffer);
errcode = journal->j_errno;
- read_unlock(&journal->j_state_lock);
if (errcode == -ESHUTDOWN)
errcode = 0;
jbd_debug(1, "JBD2: updating superblock error (errno %d)\n", errcode);
@@ -1595,17 +1582,18 @@ static int journal_get_superblock(journal_t *journal)
}
}
- /* Check superblock checksum */
- if (!jbd2_superblock_csum_verify(journal, sb)) {
- printk(KERN_ERR "JBD2: journal checksum error\n");
- err = -EFSBADCRC;
- goto out;
- }
+ if (jbd2_journal_has_csum_v2or3(journal)) {
+ /* Check superblock checksum */
+ if (sb->s_checksum != jbd2_superblock_csum(journal, sb)) {
+ printk(KERN_ERR "JBD2: journal checksum error\n");
+ err = -EFSBADCRC;
+ goto out;
+ }
- /* Precompute checksum seed for all metadata */
- if (jbd2_journal_has_csum_v2or3(journal))
+ /* Precompute checksum seed for all metadata */
journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid,
sizeof(sb->s_uuid));
+ }
set_buffer_verified(bh);
@@ -1894,28 +1882,27 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
sb = journal->j_superblock;
+ /* Load the checksum driver if necessary */
+ if ((journal->j_chksum_driver == NULL) &&
+ INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V3)) {
+ journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
+ if (IS_ERR(journal->j_chksum_driver)) {
+ printk(KERN_ERR "JBD2: Cannot load crc32c driver.\n");
+ journal->j_chksum_driver = NULL;
+ return 0;
+ }
+ /* Precompute checksum seed for all metadata */
+ journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid,
+ sizeof(sb->s_uuid));
+ }
+
+ lock_buffer(journal->j_sb_buffer);
+
/* If enabling v3 checksums, update superblock */
if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V3)) {
sb->s_checksum_type = JBD2_CRC32C_CHKSUM;
sb->s_feature_compat &=
~cpu_to_be32(JBD2_FEATURE_COMPAT_CHECKSUM);
-
- /* Load the checksum driver */
- if (journal->j_chksum_driver == NULL) {
- journal->j_chksum_driver = crypto_alloc_shash("crc32c",
- 0, 0);
- if (IS_ERR(journal->j_chksum_driver)) {
- printk(KERN_ERR "JBD2: Cannot load crc32c "
- "driver.\n");
- journal->j_chksum_driver = NULL;
- return 0;
- }
-
- /* Precompute checksum seed for all metadata */
- journal->j_csum_seed = jbd2_chksum(journal, ~0,
- sb->s_uuid,
- sizeof(sb->s_uuid));
- }
}
/* If enabling v1 checksums, downgrade superblock */
@@ -1927,6 +1914,7 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
sb->s_feature_compat |= cpu_to_be32(compat);
sb->s_feature_ro_compat |= cpu_to_be32(ro);
sb->s_feature_incompat |= cpu_to_be32(incompat);
+ unlock_buffer(journal->j_sb_buffer);
return 1;
#undef COMPAT_FEATURE_ON
@@ -2067,7 +2055,7 @@ int jbd2_journal_wipe(journal_t *journal, int write)
err = jbd2_journal_skip_recovery(journal);
if (write) {
/* Lock to make assertions happy... */
- mutex_lock(&journal->j_checkpoint_mutex);
+ mutex_lock_io(&journal->j_checkpoint_mutex);
jbd2_mark_journal_empty(journal, REQ_SYNC | REQ_FUA);
mutex_unlock(&journal->j_checkpoint_mutex);
}
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index cc35537232f2..f940d31c2adc 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -63,7 +63,7 @@ void jbd2_journal_free_transaction(transaction_t *transaction)
/*
* jbd2_get_transaction: obtain a new transaction_t object.
*
- * Simply allocate and initialise a new transaction. Create it in
+ * Simply initialise a new transaction. Initialize it in
* RUNNING state and add it to the current journal (which should not
* have an existing running transaction: we only make a new transaction
* once we have started to commit the old one).
@@ -75,8 +75,8 @@ void jbd2_journal_free_transaction(transaction_t *transaction)
*
*/
-static transaction_t *
-jbd2_get_transaction(journal_t *journal, transaction_t *transaction)
+static void jbd2_get_transaction(journal_t *journal,
+ transaction_t *transaction)
{
transaction->t_journal = journal;
transaction->t_state = T_RUNNING;
@@ -100,8 +100,6 @@ jbd2_get_transaction(journal_t *journal, transaction_t *transaction)
transaction->t_max_wait = 0;
transaction->t_start = jiffies;
transaction->t_requested = 0;
-
- return transaction;
}
/*
@@ -1252,11 +1250,12 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
struct journal_head *jh;
char *committed_data = NULL;
- JBUFFER_TRACE(jh, "entry");
if (jbd2_write_access_granted(handle, bh, true))
return 0;
jh = jbd2_journal_add_journal_head(bh);
+ JBUFFER_TRACE(jh, "entry");
+
/*
* Do this first --- it can drop the journal lock, so we want to
* make sure that obtaining the committed_data is done
@@ -1367,15 +1366,17 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
if (is_handle_aborted(handle))
return -EROFS;
- if (!buffer_jbd(bh)) {
- ret = -EUCLEAN;
- goto out;
- }
+ if (!buffer_jbd(bh))
+ return -EUCLEAN;
+
/*
* We don't grab jh reference here since the buffer must be part
* of the running transaction.
*/
jh = bh2jh(bh);
+ jbd_debug(5, "journal_head %p\n", jh);
+ JBUFFER_TRACE(jh, "entry");
+
/*
* This and the following assertions are unreliable since we may see jh
* in inconsistent state unless we grab bh_state lock. But this is
@@ -1409,9 +1410,6 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
}
journal = transaction->t_journal;
- jbd_debug(5, "journal_head %p\n", jh);
- JBUFFER_TRACE(jh, "entry");
-
jbd_lock_bh_state(bh);
if (jh->b_modified == 0) {
@@ -1597,9 +1595,7 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
__jbd2_journal_unfile_buffer(jh);
if (!buffer_jbd(bh)) {
spin_unlock(&journal->j_list_lock);
- jbd_unlock_bh_state(bh);
- __bforget(bh);
- goto drop;
+ goto not_jbd;
}
}
spin_unlock(&journal->j_list_lock);
@@ -1609,14 +1605,21 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
/* However, if the buffer is still owned by a prior
* (committing) transaction, we can't drop it yet... */
JBUFFER_TRACE(jh, "belongs to older transaction");
- /* ... but we CAN drop it from the new transaction if we
- * have also modified it since the original commit. */
+ /* ... but we CAN drop it from the new transaction through
+ * marking the buffer as freed and set j_next_transaction to
+ * the new transaction, so that not only the commit code
+ * knows it should clear dirty bits when it is done with the
+ * buffer, but also the buffer can be checkpointed only
+ * after the new transaction commits. */
- if (jh->b_next_transaction) {
- J_ASSERT(jh->b_next_transaction == transaction);
+ set_buffer_freed(bh);
+
+ if (!jh->b_next_transaction) {
spin_lock(&journal->j_list_lock);
- jh->b_next_transaction = NULL;
+ jh->b_next_transaction = transaction;
spin_unlock(&journal->j_list_lock);
+ } else {
+ J_ASSERT(jh->b_next_transaction == transaction);
/*
* only drop a reference if this transaction modified
@@ -1625,9 +1628,40 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
if (was_modified)
drop_reserve = 1;
}
+ } else {
+ /*
+ * Finally, if the buffer is not belongs to any
+ * transaction, we can just drop it now if it has no
+ * checkpoint.
+ */
+ spin_lock(&journal->j_list_lock);
+ if (!jh->b_cp_transaction) {
+ JBUFFER_TRACE(jh, "belongs to none transaction");
+ spin_unlock(&journal->j_list_lock);
+ goto not_jbd;
+ }
+
+ /*
+ * Otherwise, if the buffer has been written to disk,
+ * it is safe to remove the checkpoint and drop it.
+ */
+ if (!buffer_dirty(bh)) {
+ __jbd2_journal_remove_checkpoint(jh);
+ spin_unlock(&journal->j_list_lock);
+ goto not_jbd;
+ }
+
+ /*
+ * The buffer is still not written to disk, we should
+ * attach this buffer to current transaction so that the
+ * buffer can be checkpointed only after the current
+ * transaction commits.
+ */
+ clear_buffer_dirty(bh);
+ __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
+ spin_unlock(&journal->j_list_lock);
}
-not_jbd:
jbd_unlock_bh_state(bh);
__brelse(bh);
drop:
@@ -1636,6 +1670,11 @@ drop:
handle->h_buffer_credits++;
}
return err;
+
+not_jbd:
+ jbd_unlock_bh_state(bh);
+ __bforget(bh);
+ goto drop;
}
/**
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 389ea53ea487..bccfc40b3a74 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -1414,11 +1414,6 @@ void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
- if (f->target) {
- kfree(f->target);
- f->target = NULL;
- }
-
fds = f->dents;
while(fds) {
fd = fds;
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index bb6ae387469f..05d892c79339 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -47,7 +47,10 @@ static struct inode *jffs2_alloc_inode(struct super_block *sb)
static void jffs2_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode));
+ struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+
+ kfree(f->target);
+ kmem_cache_free(jffs2_inode_cachep, f);
}
static void jffs2_destroy_inode(struct inode *inode)
diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h
index dba810cd83b1..0b7d197a904c 100644
--- a/fs/kernfs/kernfs-internal.h
+++ b/fs/kernfs/kernfs-internal.h
@@ -17,6 +17,7 @@
#include <linux/xattr.h>
#include <linux/kernfs.h>
+#include <linux/fs_context.h>
struct kernfs_iattrs {
struct iattr ia_iattr;
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index f3ac352699cf..9a4646eecb71 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -22,16 +22,6 @@
struct kmem_cache *kernfs_node_cache, *kernfs_iattrs_cache;
-static int kernfs_sop_remount_fs(struct super_block *sb, int *flags, char *data)
-{
- struct kernfs_root *root = kernfs_info(sb)->root;
- struct kernfs_syscall_ops *scops = root->syscall_ops;
-
- if (scops && scops->remount_fs)
- return scops->remount_fs(root, flags, data);
- return 0;
-}
-
static int kernfs_sop_show_options(struct seq_file *sf, struct dentry *dentry)
{
struct kernfs_root *root = kernfs_root(kernfs_dentry_node(dentry));
@@ -60,7 +50,6 @@ const struct super_operations kernfs_sops = {
.drop_inode = generic_delete_inode,
.evict_inode = kernfs_evict_inode,
- .remount_fs = kernfs_sop_remount_fs,
.show_options = kernfs_sop_show_options,
.show_path = kernfs_sop_show_path,
};
@@ -222,7 +211,7 @@ struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
} while (true);
}
-static int kernfs_fill_super(struct super_block *sb, unsigned long magic)
+static int kernfs_fill_super(struct super_block *sb, struct kernfs_fs_context *kfc)
{
struct kernfs_super_info *info = kernfs_info(sb);
struct inode *inode;
@@ -233,7 +222,7 @@ static int kernfs_fill_super(struct super_block *sb, unsigned long magic)
sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV;
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
- sb->s_magic = magic;
+ sb->s_magic = kfc->magic;
sb->s_op = &kernfs_sops;
sb->s_xattr = kernfs_xattr_handlers;
if (info->root->flags & KERNFS_ROOT_SUPPORT_EXPORTOP)
@@ -263,21 +252,20 @@ static int kernfs_fill_super(struct super_block *sb, unsigned long magic)
return 0;
}
-static int kernfs_test_super(struct super_block *sb, void *data)
+static int kernfs_test_super(struct super_block *sb, struct fs_context *fc)
{
struct kernfs_super_info *sb_info = kernfs_info(sb);
- struct kernfs_super_info *info = data;
+ struct kernfs_super_info *info = fc->s_fs_info;
return sb_info->root == info->root && sb_info->ns == info->ns;
}
-static int kernfs_set_super(struct super_block *sb, void *data)
+static int kernfs_set_super(struct super_block *sb, struct fs_context *fc)
{
- int error;
- error = set_anon_super(sb, data);
- if (!error)
- sb->s_fs_info = data;
- return error;
+ struct kernfs_fs_context *kfc = fc->fs_private;
+
+ kfc->ns_tag = NULL;
+ return set_anon_super_fc(sb, fc);
}
/**
@@ -294,63 +282,60 @@ const void *kernfs_super_ns(struct super_block *sb)
}
/**
- * kernfs_mount_ns - kernfs mount helper
- * @fs_type: file_system_type of the fs being mounted
- * @flags: mount flags specified for the mount
- * @root: kernfs_root of the hierarchy being mounted
- * @magic: file system specific magic number
- * @new_sb_created: tell the caller if we allocated a new superblock
- * @ns: optional namespace tag of the mount
+ * kernfs_get_tree - kernfs filesystem access/retrieval helper
+ * @fc: The filesystem context.
*
- * This is to be called from each kernfs user's file_system_type->mount()
- * implementation, which should pass through the specified @fs_type and
- * @flags, and specify the hierarchy and namespace tag to mount via @root
- * and @ns, respectively.
- *
- * The return value can be passed to the vfs layer verbatim.
+ * This is to be called from each kernfs user's fs_context->ops->get_tree()
+ * implementation, which should set the specified ->@fs_type and ->@flags, and
+ * specify the hierarchy and namespace tag to mount via ->@root and ->@ns,
+ * respectively.
*/
-struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags,
- struct kernfs_root *root, unsigned long magic,
- bool *new_sb_created, const void *ns)
+int kernfs_get_tree(struct fs_context *fc)
{
+ struct kernfs_fs_context *kfc = fc->fs_private;
struct super_block *sb;
struct kernfs_super_info *info;
int error;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
- info->root = root;
- info->ns = ns;
+ info->root = kfc->root;
+ info->ns = kfc->ns_tag;
INIT_LIST_HEAD(&info->node);
- sb = sget_userns(fs_type, kernfs_test_super, kernfs_set_super, flags,
- &init_user_ns, info);
- if (IS_ERR(sb) || sb->s_fs_info != info)
- kfree(info);
+ fc->s_fs_info = info;
+ sb = sget_fc(fc, kernfs_test_super, kernfs_set_super);
if (IS_ERR(sb))
- return ERR_CAST(sb);
-
- if (new_sb_created)
- *new_sb_created = !sb->s_root;
+ return PTR_ERR(sb);
if (!sb->s_root) {
struct kernfs_super_info *info = kernfs_info(sb);
- error = kernfs_fill_super(sb, magic);
+ kfc->new_sb_created = true;
+
+ error = kernfs_fill_super(sb, kfc);
if (error) {
deactivate_locked_super(sb);
- return ERR_PTR(error);
+ return error;
}
sb->s_flags |= SB_ACTIVE;
mutex_lock(&kernfs_mutex);
- list_add(&info->node, &root->supers);
+ list_add(&info->node, &info->root->supers);
mutex_unlock(&kernfs_mutex);
}
- return dget(sb->s_root);
+ fc->root = dget(sb->s_root);
+ return 0;
+}
+
+void kernfs_free_fs_context(struct fs_context *fc)
+{
+ /* Note that we don't deal with kfc->ns_tag here. */
+ kfree(fc->s_fs_info);
+ fc->s_fs_info = NULL;
}
/**
@@ -377,36 +362,6 @@ void kernfs_kill_sb(struct super_block *sb)
kfree(info);
}
-/**
- * kernfs_pin_sb: try to pin the superblock associated with a kernfs_root
- * @kernfs_root: the kernfs_root in question
- * @ns: the namespace tag
- *
- * Pin the superblock so the superblock won't be destroyed in subsequent
- * operations. This can be used to block ->kill_sb() which may be useful
- * for kernfs users which dynamically manage superblocks.
- *
- * Returns NULL if there's no superblock associated to this kernfs_root, or
- * -EINVAL if the superblock is being freed.
- */
-struct super_block *kernfs_pin_sb(struct kernfs_root *root, const void *ns)
-{
- struct kernfs_super_info *info;
- struct super_block *sb = NULL;
-
- mutex_lock(&kernfs_mutex);
- list_for_each_entry(info, &root->supers, node) {
- if (info->ns == ns) {
- sb = info->sb;
- if (!atomic_inc_not_zero(&info->sb->s_active))
- sb = ERR_PTR(-EINVAL);
- break;
- }
- }
- mutex_unlock(&kernfs_mutex);
- return sb;
-}
-
void __init kernfs_init(void)
{
diff --git a/fs/lockd/clnt4xdr.c b/fs/lockd/clnt4xdr.c
index 214a2fa1f1e3..7df6324ccb8a 100644
--- a/fs/lockd/clnt4xdr.c
+++ b/fs/lockd/clnt4xdr.c
@@ -75,17 +75,6 @@ static void nlm4_compute_offsets(const struct nlm_lock *lock,
}
/*
- * Handle decode buffer overflows out-of-line.
- */
-static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
-{
- dprintk("lockd: %s prematurely hit the end of our receive buffer. "
- "Remaining buffer length is %tu words.\n",
- func, xdr->end - xdr->p);
-}
-
-
-/*
* Encode/decode NLMv4 basic data types
*
* Basic NLMv4 data types are defined in Appendix II, section 6.1.4
@@ -176,7 +165,6 @@ out_size:
dprintk("NFS: returned cookie was too long: %u\n", length);
return -EIO;
out_overflow:
- print_overflow_msg(__func__, xdr);
return -EIO;
}
@@ -236,7 +224,6 @@ out_bad_xdr:
__func__, be32_to_cpup(p));
return -EIO;
out_overflow:
- print_overflow_msg(__func__, xdr);
return -EIO;
}
@@ -309,7 +296,6 @@ static int decode_nlm4_holder(struct xdr_stream *xdr, struct nlm_res *result)
out:
return error;
out_overflow:
- print_overflow_msg(__func__, xdr);
return -EIO;
}
diff --git a/fs/lockd/clntxdr.c b/fs/lockd/clntxdr.c
index 747b9c8c940a..4df62f635529 100644
--- a/fs/lockd/clntxdr.c
+++ b/fs/lockd/clntxdr.c
@@ -71,17 +71,6 @@ static void nlm_compute_offsets(const struct nlm_lock *lock,
}
/*
- * Handle decode buffer overflows out-of-line.
- */
-static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
-{
- dprintk("lockd: %s prematurely hit the end of our receive buffer. "
- "Remaining buffer length is %tu words.\n",
- func, xdr->end - xdr->p);
-}
-
-
-/*
* Encode/decode NLMv3 basic data types
*
* Basic NLMv3 data types are not defined in an IETF standards
@@ -173,7 +162,6 @@ out_size:
dprintk("NFS: returned cookie was too long: %u\n", length);
return -EIO;
out_overflow:
- print_overflow_msg(__func__, xdr);
return -EIO;
}
@@ -231,7 +219,6 @@ out_enum:
__func__, be32_to_cpup(p));
return -EIO;
out_overflow:
- print_overflow_msg(__func__, xdr);
return -EIO;
}
@@ -303,7 +290,6 @@ static int decode_nlm_holder(struct xdr_stream *xdr, struct nlm_res *result)
out:
return error;
out_overflow:
- print_overflow_msg(__func__, xdr);
return -EIO;
}
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 93fb7cf0b92b..f0b5c987d6ae 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -290,12 +290,11 @@ void nlmclnt_release_host(struct nlm_host *host)
WARN_ON_ONCE(host->h_server);
- if (refcount_dec_and_test(&host->h_count)) {
+ if (refcount_dec_and_mutex_lock(&host->h_count, &nlm_host_mutex)) {
WARN_ON_ONCE(!list_empty(&host->h_lockowners));
WARN_ON_ONCE(!list_empty(&host->h_granted));
WARN_ON_ONCE(!list_empty(&host->h_reclaim));
- mutex_lock(&nlm_host_mutex);
nlm_destroy_host_locked(host);
mutex_unlock(&nlm_host_mutex);
}
diff --git a/fs/locks.c b/fs/locks.c
index eaa1cfaf73b0..71d0c6c2aac5 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1160,6 +1160,11 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
*/
error = -EDEADLK;
spin_lock(&blocked_lock_lock);
+ /*
+ * Ensure that we don't find any locks blocked on this
+ * request during deadlock detection.
+ */
+ __locks_wake_up_blocks(request);
if (likely(!posix_locks_deadlock(request, fl))) {
error = FILE_LOCK_DEFERRED;
__locks_insert_block(fl, request,
diff --git a/fs/mount.h b/fs/mount.h
index f39bc9da4d73..6250de544760 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -146,3 +146,8 @@ static inline bool is_local_mountpoint(struct dentry *dentry)
return __is_local_mountpoint(dentry);
}
+
+static inline bool is_anon_ns(struct mnt_namespace *ns)
+{
+ return ns->seq == 0;
+}
diff --git a/fs/namei.c b/fs/namei.c
index 0a8c5c27f90e..dede0147b3f6 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2331,8 +2331,8 @@ static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path
return err;
}
-static int filename_lookup(int dfd, struct filename *name, unsigned flags,
- struct path *path, struct path *root)
+int filename_lookup(int dfd, struct filename *name, unsigned flags,
+ struct path *path, struct path *root)
{
int retval;
struct nameidata nd;
@@ -3460,6 +3460,7 @@ struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode, int open_flag)
inode->i_state |= I_LINKABLE;
spin_unlock(&inode->i_lock);
}
+ ima_post_create_tmpfile(inode);
return child;
out_err:
diff --git a/fs/namespace.c b/fs/namespace.c
index 98a8c182af4f..c9cab307fa77 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -27,6 +27,7 @@
#include <linux/task_work.h>
#include <linux/sched/task.h>
#include <uapi/linux/mount.h>
+#include <linux/fs_context.h>
#include "pnode.h"
#include "internal.h"
@@ -940,38 +941,81 @@ static struct mount *skip_mnt_tree(struct mount *p)
return p;
}
-struct vfsmount *
-vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data)
+/**
+ * vfs_create_mount - Create a mount for a configured superblock
+ * @fc: The configuration context with the superblock attached
+ *
+ * Create a mount to an already configured superblock. If necessary, the
+ * caller should invoke vfs_get_tree() before calling this.
+ *
+ * Note that this does not attach the mount to anything.
+ */
+struct vfsmount *vfs_create_mount(struct fs_context *fc)
{
struct mount *mnt;
- struct dentry *root;
- if (!type)
- return ERR_PTR(-ENODEV);
+ if (!fc->root)
+ return ERR_PTR(-EINVAL);
- mnt = alloc_vfsmnt(name);
+ mnt = alloc_vfsmnt(fc->source ?: "none");
if (!mnt)
return ERR_PTR(-ENOMEM);
- if (flags & SB_KERNMOUNT)
+ if (fc->sb_flags & SB_KERNMOUNT)
mnt->mnt.mnt_flags = MNT_INTERNAL;
- root = mount_fs(type, flags, name, data);
- if (IS_ERR(root)) {
- mnt_free_id(mnt);
- free_vfsmnt(mnt);
- return ERR_CAST(root);
- }
+ atomic_inc(&fc->root->d_sb->s_active);
+ mnt->mnt.mnt_sb = fc->root->d_sb;
+ mnt->mnt.mnt_root = dget(fc->root);
+ mnt->mnt_mountpoint = mnt->mnt.mnt_root;
+ mnt->mnt_parent = mnt;
- mnt->mnt.mnt_root = root;
- mnt->mnt.mnt_sb = root->d_sb;
- mnt->mnt_mountpoint = mnt->mnt.mnt_root;
- mnt->mnt_parent = mnt;
lock_mount_hash();
- list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts);
+ list_add_tail(&mnt->mnt_instance, &mnt->mnt.mnt_sb->s_mounts);
unlock_mount_hash();
return &mnt->mnt;
}
+EXPORT_SYMBOL(vfs_create_mount);
+
+struct vfsmount *fc_mount(struct fs_context *fc)
+{
+ int err = vfs_get_tree(fc);
+ if (!err) {
+ up_write(&fc->root->d_sb->s_umount);
+ return vfs_create_mount(fc);
+ }
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(fc_mount);
+
+struct vfsmount *vfs_kern_mount(struct file_system_type *type,
+ int flags, const char *name,
+ void *data)
+{
+ struct fs_context *fc;
+ struct vfsmount *mnt;
+ int ret = 0;
+
+ if (!type)
+ return ERR_PTR(-EINVAL);
+
+ fc = fs_context_for_mount(type, flags);
+ if (IS_ERR(fc))
+ return ERR_CAST(fc);
+
+ if (name)
+ ret = vfs_parse_fs_string(fc, "source",
+ name, strlen(name));
+ if (!ret)
+ ret = parse_monolithic_mount_data(fc, data);
+ if (!ret)
+ mnt = fc_mount(fc);
+ else
+ mnt = ERR_PTR(ret);
+
+ put_fs_context(fc);
+ return mnt;
+}
EXPORT_SYMBOL_GPL(vfs_kern_mount);
struct vfsmount *
@@ -1013,27 +1057,6 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
mnt->mnt.mnt_flags = old->mnt.mnt_flags;
mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
- /* Don't allow unprivileged users to change mount flags */
- if (flag & CL_UNPRIVILEGED) {
- mnt->mnt.mnt_flags |= MNT_LOCK_ATIME;
-
- if (mnt->mnt.mnt_flags & MNT_READONLY)
- mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
-
- if (mnt->mnt.mnt_flags & MNT_NODEV)
- mnt->mnt.mnt_flags |= MNT_LOCK_NODEV;
-
- if (mnt->mnt.mnt_flags & MNT_NOSUID)
- mnt->mnt.mnt_flags |= MNT_LOCK_NOSUID;
-
- if (mnt->mnt.mnt_flags & MNT_NOEXEC)
- mnt->mnt.mnt_flags |= MNT_LOCK_NOEXEC;
- }
-
- /* Don't allow unprivileged users to reveal what is under a mount */
- if ((flag & CL_UNPRIVILEGED) &&
- (!(flag & CL_EXPIRE) || list_empty(&old->mnt_expire)))
- mnt->mnt.mnt_flags |= MNT_LOCKED;
atomic_inc(&sb->s_active);
mnt->mnt.mnt_sb = sb;
@@ -1464,6 +1487,29 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
static void shrink_submounts(struct mount *mnt);
+static int do_umount_root(struct super_block *sb)
+{
+ int ret = 0;
+
+ down_write(&sb->s_umount);
+ if (!sb_rdonly(sb)) {
+ struct fs_context *fc;
+
+ fc = fs_context_for_reconfigure(sb->s_root, SB_RDONLY,
+ SB_RDONLY);
+ if (IS_ERR(fc)) {
+ ret = PTR_ERR(fc);
+ } else {
+ ret = parse_monolithic_mount_data(fc, NULL);
+ if (!ret)
+ ret = reconfigure_super(fc);
+ put_fs_context(fc);
+ }
+ }
+ up_write(&sb->s_umount);
+ return ret;
+}
+
static int do_umount(struct mount *mnt, int flags)
{
struct super_block *sb = mnt->mnt.mnt_sb;
@@ -1529,11 +1575,7 @@ static int do_umount(struct mount *mnt, int flags)
*/
if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
return -EPERM;
- down_write(&sb->s_umount);
- if (!sb_rdonly(sb))
- retval = do_remount_sb(sb, SB_RDONLY, NULL, 0);
- up_write(&sb->s_umount);
- return retval;
+ return do_umount_root(sb);
}
namespace_lock();
@@ -1839,6 +1881,33 @@ int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
return 0;
}
+static void lock_mnt_tree(struct mount *mnt)
+{
+ struct mount *p;
+
+ for (p = mnt; p; p = next_mnt(p, mnt)) {
+ int flags = p->mnt.mnt_flags;
+ /* Don't allow unprivileged users to change mount flags */
+ flags |= MNT_LOCK_ATIME;
+
+ if (flags & MNT_READONLY)
+ flags |= MNT_LOCK_READONLY;
+
+ if (flags & MNT_NODEV)
+ flags |= MNT_LOCK_NODEV;
+
+ if (flags & MNT_NOSUID)
+ flags |= MNT_LOCK_NOSUID;
+
+ if (flags & MNT_NOEXEC)
+ flags |= MNT_LOCK_NOEXEC;
+ /* Don't allow unprivileged users to reveal what is under a mount */
+ if (list_empty(&p->mnt_expire))
+ flags |= MNT_LOCKED;
+ p->mnt.mnt_flags = flags;
+ }
+}
+
static void cleanup_group_ids(struct mount *mnt, struct mount *end)
{
struct mount *p;
@@ -1956,6 +2025,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
struct mountpoint *dest_mp,
struct path *parent_path)
{
+ struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
HLIST_HEAD(tree_list);
struct mnt_namespace *ns = dest_mnt->mnt_ns;
struct mountpoint *smp;
@@ -2006,6 +2076,9 @@ static int attach_recursive_mnt(struct mount *source_mnt,
child->mnt_mountpoint);
if (q)
mnt_change_mountpoint(child, smp, q);
+ /* Notice when we are propagating across user namespaces */
+ if (child->mnt_parent->mnt_ns->user_ns != user_ns)
+ lock_mnt_tree(child);
commit_tree(child);
}
put_mountpoint(smp);
@@ -2313,7 +2386,7 @@ static int do_remount(struct path *path, int ms_flags, int sb_flags,
int err;
struct super_block *sb = path->mnt->mnt_sb;
struct mount *mnt = real_mount(path->mnt);
- void *sec_opts = NULL;
+ struct fs_context *fc;
if (!check_mnt(mnt))
return -EINVAL;
@@ -2324,24 +2397,22 @@ static int do_remount(struct path *path, int ms_flags, int sb_flags,
if (!can_change_locked_flags(mnt, mnt_flags))
return -EPERM;
- if (data && !(sb->s_type->fs_flags & FS_BINARY_MOUNTDATA)) {
- err = security_sb_eat_lsm_opts(data, &sec_opts);
- if (err)
- return err;
- }
- err = security_sb_remount(sb, sec_opts);
- security_free_mnt_opts(&sec_opts);
- if (err)
- return err;
+ fc = fs_context_for_reconfigure(path->dentry, sb_flags, MS_RMT_MASK);
+ if (IS_ERR(fc))
+ return PTR_ERR(fc);
- down_write(&sb->s_umount);
- err = -EPERM;
- if (ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) {
- err = do_remount_sb(sb, sb_flags, data, 0);
- if (!err)
- set_mount_attributes(mnt, mnt_flags);
+ err = parse_monolithic_mount_data(fc, data);
+ if (!err) {
+ down_write(&sb->s_umount);
+ err = -EPERM;
+ if (ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) {
+ err = reconfigure_super(fc);
+ if (!err)
+ set_mount_attributes(mnt, mnt_flags);
+ }
+ up_write(&sb->s_umount);
}
- up_write(&sb->s_umount);
+ put_fs_context(fc);
return err;
}
@@ -2425,29 +2496,6 @@ out:
return err;
}
-static struct vfsmount *fs_set_subtype(struct vfsmount *mnt, const char *fstype)
-{
- int err;
- const char *subtype = strchr(fstype, '.');
- if (subtype) {
- subtype++;
- err = -EINVAL;
- if (!subtype[0])
- goto err;
- } else
- subtype = "";
-
- mnt->mnt_sb->s_subtype = kstrdup(subtype, GFP_KERNEL);
- err = -ENOMEM;
- if (!mnt->mnt_sb->s_subtype)
- goto err;
- return mnt;
-
- err:
- mntput(mnt);
- return ERR_PTR(err);
-}
-
/*
* add a mount into a namespace's mount tree
*/
@@ -2492,7 +2540,39 @@ unlock:
return err;
}
-static bool mount_too_revealing(struct vfsmount *mnt, int *new_mnt_flags);
+static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags);
+
+/*
+ * Create a new mount using a superblock configuration and request it
+ * be added to the namespace tree.
+ */
+static int do_new_mount_fc(struct fs_context *fc, struct path *mountpoint,
+ unsigned int mnt_flags)
+{
+ struct vfsmount *mnt;
+ struct super_block *sb = fc->root->d_sb;
+ int error;
+
+ error = security_sb_kern_mount(sb);
+ if (!error && mount_too_revealing(sb, &mnt_flags))
+ error = -EPERM;
+
+ if (unlikely(error)) {
+ fc_drop_locked(fc);
+ return error;
+ }
+
+ up_write(&sb->s_umount);
+
+ mnt = vfs_create_mount(fc);
+ if (IS_ERR(mnt))
+ return PTR_ERR(mnt);
+
+ error = do_add_mount(real_mount(mnt), mountpoint, mnt_flags);
+ if (error < 0)
+ mntput(mnt);
+ return error;
+}
/*
* create a new mount for userspace and request it to be added into the
@@ -2502,8 +2582,9 @@ static int do_new_mount(struct path *path, const char *fstype, int sb_flags,
int mnt_flags, const char *name, void *data)
{
struct file_system_type *type;
- struct vfsmount *mnt;
- int err;
+ struct fs_context *fc;
+ const char *subtype = NULL;
+ int err = 0;
if (!fstype)
return -EINVAL;
@@ -2512,23 +2593,37 @@ static int do_new_mount(struct path *path, const char *fstype, int sb_flags,
if (!type)
return -ENODEV;
- mnt = vfs_kern_mount(type, sb_flags, name, data);
- if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) &&
- !mnt->mnt_sb->s_subtype)
- mnt = fs_set_subtype(mnt, fstype);
+ if (type->fs_flags & FS_HAS_SUBTYPE) {
+ subtype = strchr(fstype, '.');
+ if (subtype) {
+ subtype++;
+ if (!*subtype) {
+ put_filesystem(type);
+ return -EINVAL;
+ }
+ } else {
+ subtype = "";
+ }
+ }
+ fc = fs_context_for_mount(type, sb_flags);
put_filesystem(type);
- if (IS_ERR(mnt))
- return PTR_ERR(mnt);
-
- if (mount_too_revealing(mnt, &mnt_flags)) {
- mntput(mnt);
- return -EPERM;
- }
+ if (IS_ERR(fc))
+ return PTR_ERR(fc);
+
+ if (subtype)
+ err = vfs_parse_fs_string(fc, "subtype",
+ subtype, strlen(subtype));
+ if (!err && name)
+ err = vfs_parse_fs_string(fc, "source", name, strlen(name));
+ if (!err)
+ err = parse_monolithic_mount_data(fc, data);
+ if (!err)
+ err = vfs_get_tree(fc);
+ if (!err)
+ err = do_new_mount_fc(fc, path, mnt_flags);
- err = do_add_mount(real_mount(mnt), path, mnt_flags);
- if (err)
- mntput(mnt);
+ put_fs_context(fc);
return err;
}
@@ -2863,7 +2958,8 @@ static void dec_mnt_namespaces(struct ucounts *ucounts)
static void free_mnt_ns(struct mnt_namespace *ns)
{
- ns_free_inum(&ns->ns);
+ if (!is_anon_ns(ns))
+ ns_free_inum(&ns->ns);
dec_mnt_namespaces(ns->ucounts);
put_user_ns(ns->user_ns);
kfree(ns);
@@ -2878,7 +2974,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
*/
static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
-static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
+static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns, bool anon)
{
struct mnt_namespace *new_ns;
struct ucounts *ucounts;
@@ -2888,28 +2984,27 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
if (!ucounts)
return ERR_PTR(-ENOSPC);
- new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
+ new_ns = kzalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
if (!new_ns) {
dec_mnt_namespaces(ucounts);
return ERR_PTR(-ENOMEM);
}
- ret = ns_alloc_inum(&new_ns->ns);
- if (ret) {
- kfree(new_ns);
- dec_mnt_namespaces(ucounts);
- return ERR_PTR(ret);
+ if (!anon) {
+ ret = ns_alloc_inum(&new_ns->ns);
+ if (ret) {
+ kfree(new_ns);
+ dec_mnt_namespaces(ucounts);
+ return ERR_PTR(ret);
+ }
}
new_ns->ns.ops = &mntns_operations;
- new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
+ if (!anon)
+ new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
atomic_set(&new_ns->count, 1);
- new_ns->root = NULL;
INIT_LIST_HEAD(&new_ns->list);
init_waitqueue_head(&new_ns->poll);
- new_ns->event = 0;
new_ns->user_ns = get_user_ns(user_ns);
new_ns->ucounts = ucounts;
- new_ns->mounts = 0;
- new_ns->pending_mounts = 0;
return new_ns;
}
@@ -2933,7 +3028,7 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
old = ns->root;
- new_ns = alloc_mnt_ns(user_ns);
+ new_ns = alloc_mnt_ns(user_ns, false);
if (IS_ERR(new_ns))
return new_ns;
@@ -2941,13 +3036,18 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
/* First pass: copy the tree topology */
copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE;
if (user_ns != ns->user_ns)
- copy_flags |= CL_SHARED_TO_SLAVE | CL_UNPRIVILEGED;
+ copy_flags |= CL_SHARED_TO_SLAVE;
new = copy_tree(old, old->mnt.mnt_root, copy_flags);
if (IS_ERR(new)) {
namespace_unlock();
free_mnt_ns(new_ns);
return ERR_CAST(new);
}
+ if (user_ns != ns->user_ns) {
+ lock_mount_hash();
+ lock_mnt_tree(new);
+ unlock_mount_hash();
+ }
new_ns->root = new;
list_add_tail(&new_ns->list, &new->mnt_list);
@@ -2988,37 +3088,25 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
return new_ns;
}
-/**
- * create_mnt_ns - creates a private namespace and adds a root filesystem
- * @mnt: pointer to the new root filesystem mountpoint
- */
-static struct mnt_namespace *create_mnt_ns(struct vfsmount *m)
-{
- struct mnt_namespace *new_ns = alloc_mnt_ns(&init_user_ns);
- if (!IS_ERR(new_ns)) {
- struct mount *mnt = real_mount(m);
- mnt->mnt_ns = new_ns;
- new_ns->root = mnt;
- new_ns->mounts++;
- list_add(&mnt->mnt_list, &new_ns->list);
- } else {
- mntput(m);
- }
- return new_ns;
-}
-
-struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
+struct dentry *mount_subtree(struct vfsmount *m, const char *name)
{
+ struct mount *mnt = real_mount(m);
struct mnt_namespace *ns;
struct super_block *s;
struct path path;
int err;
- ns = create_mnt_ns(mnt);
- if (IS_ERR(ns))
+ ns = alloc_mnt_ns(&init_user_ns, true);
+ if (IS_ERR(ns)) {
+ mntput(m);
return ERR_CAST(ns);
+ }
+ mnt->mnt_ns = ns;
+ ns->root = mnt;
+ ns->mounts++;
+ list_add(&mnt->mnt_list, &ns->list);
- err = vfs_path_lookup(mnt->mnt_root, mnt,
+ err = vfs_path_lookup(m->mnt_root, m,
name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
put_mnt_ns(ns);
@@ -3228,6 +3316,7 @@ out0:
static void __init init_mount_tree(void)
{
struct vfsmount *mnt;
+ struct mount *m;
struct mnt_namespace *ns;
struct path root;
struct file_system_type *type;
@@ -3240,10 +3329,14 @@ static void __init init_mount_tree(void)
if (IS_ERR(mnt))
panic("Can't create rootfs");
- ns = create_mnt_ns(mnt);
+ ns = alloc_mnt_ns(&init_user_ns, false);
if (IS_ERR(ns))
panic("Can't allocate initial namespace");
-
+ m = real_mount(mnt);
+ m->mnt_ns = ns;
+ ns->root = m;
+ ns->mounts = 1;
+ list_add(&m->mnt_list, &ns->list);
init_task.nsproxy->mnt_ns = ns;
get_mnt_ns(ns);
@@ -3297,10 +3390,10 @@ void put_mnt_ns(struct mnt_namespace *ns)
free_mnt_ns(ns);
}
-struct vfsmount *kern_mount_data(struct file_system_type *type, void *data)
+struct vfsmount *kern_mount(struct file_system_type *type)
{
struct vfsmount *mnt;
- mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, data);
+ mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, NULL);
if (!IS_ERR(mnt)) {
/*
* it is a longterm mount, don't release mnt until
@@ -3310,7 +3403,7 @@ struct vfsmount *kern_mount_data(struct file_system_type *type, void *data)
}
return mnt;
}
-EXPORT_SYMBOL_GPL(kern_mount_data);
+EXPORT_SYMBOL_GPL(kern_mount);
void kern_unmount(struct vfsmount *mnt)
{
@@ -3352,7 +3445,8 @@ bool current_chrooted(void)
return chrooted;
}
-static bool mnt_already_visible(struct mnt_namespace *ns, struct vfsmount *new,
+static bool mnt_already_visible(struct mnt_namespace *ns,
+ const struct super_block *sb,
int *new_mnt_flags)
{
int new_flags = *new_mnt_flags;
@@ -3364,7 +3458,7 @@ static bool mnt_already_visible(struct mnt_namespace *ns, struct vfsmount *new,
struct mount *child;
int mnt_flags;
- if (mnt->mnt.mnt_sb->s_type != new->mnt_sb->s_type)
+ if (mnt->mnt.mnt_sb->s_type != sb->s_type)
continue;
/* This mount is not fully visible if it's root directory
@@ -3415,7 +3509,7 @@ found:
return visible;
}
-static bool mount_too_revealing(struct vfsmount *mnt, int *new_mnt_flags)
+static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags)
{
const unsigned long required_iflags = SB_I_NOEXEC | SB_I_NODEV;
struct mnt_namespace *ns = current->nsproxy->mnt_ns;
@@ -3425,7 +3519,7 @@ static bool mount_too_revealing(struct vfsmount *mnt, int *new_mnt_flags)
return false;
/* Can this filesystem be too revealing? */
- s_iflags = mnt->mnt_sb->s_iflags;
+ s_iflags = sb->s_iflags;
if (!(s_iflags & SB_I_USERNS_VISIBLE))
return false;
@@ -3435,7 +3529,7 @@ static bool mount_too_revealing(struct vfsmount *mnt, int *new_mnt_flags)
return true;
}
- return !mnt_already_visible(ns, mnt, new_mnt_flags);
+ return !mnt_already_visible(ns, sb, new_mnt_flags);
}
bool mnt_may_suid(struct vfsmount *mnt)
@@ -3484,6 +3578,9 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
return -EPERM;
+ if (is_anon_ns(mnt_ns))
+ return -EINVAL;
+
if (fs->users != 1)
return -EINVAL;
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index a87a56273407..06233bfa6d73 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -72,16 +72,6 @@ static int nfs4_encode_void(struct svc_rqst *rqstp, __be32 *p)
return xdr_ressize_check(rqstp, p);
}
-static __be32 *read_buf(struct xdr_stream *xdr, size_t nbytes)
-{
- __be32 *p;
-
- p = xdr_inline_decode(xdr, nbytes);
- if (unlikely(p == NULL))
- printk(KERN_WARNING "NFS: NFSv4 callback reply buffer overflowed!\n");
- return p;
-}
-
static __be32 decode_string(struct xdr_stream *xdr, unsigned int *len,
const char **str, size_t maxlen)
{
@@ -98,13 +88,13 @@ static __be32 decode_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
{
__be32 *p;
- p = read_buf(xdr, 4);
+ p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
return htonl(NFS4ERR_RESOURCE);
fh->size = ntohl(*p);
if (fh->size > NFS4_FHSIZE)
return htonl(NFS4ERR_BADHANDLE);
- p = read_buf(xdr, fh->size);
+ p = xdr_inline_decode(xdr, fh->size);
if (unlikely(p == NULL))
return htonl(NFS4ERR_RESOURCE);
memcpy(&fh->data[0], p, fh->size);
@@ -117,11 +107,11 @@ static __be32 decode_bitmap(struct xdr_stream *xdr, uint32_t *bitmap)
__be32 *p;
unsigned int attrlen;
- p = read_buf(xdr, 4);
+ p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
return htonl(NFS4ERR_RESOURCE);
attrlen = ntohl(*p);
- p = read_buf(xdr, attrlen << 2);
+ p = xdr_inline_decode(xdr, attrlen << 2);
if (unlikely(p == NULL))
return htonl(NFS4ERR_RESOURCE);
if (likely(attrlen > 0))
@@ -135,7 +125,7 @@ static __be32 decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
{
__be32 *p;
- p = read_buf(xdr, NFS4_STATEID_SIZE);
+ p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
if (unlikely(p == NULL))
return htonl(NFS4ERR_RESOURCE);
memcpy(stateid->data, p, NFS4_STATEID_SIZE);
@@ -156,7 +146,7 @@ static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound
status = decode_string(xdr, &hdr->taglen, &hdr->tag, CB_OP_TAGLEN_MAXSZ);
if (unlikely(status != 0))
return status;
- p = read_buf(xdr, 12);
+ p = xdr_inline_decode(xdr, 12);
if (unlikely(p == NULL))
return htonl(NFS4ERR_RESOURCE);
hdr->minorversion = ntohl(*p++);
@@ -176,7 +166,7 @@ static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound
static __be32 decode_op_hdr(struct xdr_stream *xdr, unsigned int *op)
{
__be32 *p;
- p = read_buf(xdr, 4);
+ p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
return htonl(NFS4ERR_RESOURCE_HDR);
*op = ntohl(*p);
@@ -205,7 +195,7 @@ static __be32 decode_recall_args(struct svc_rqst *rqstp,
status = decode_delegation_stateid(xdr, &args->stateid);
if (unlikely(status != 0))
return status;
- p = read_buf(xdr, 4);
+ p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
return htonl(NFS4ERR_RESOURCE);
args->truncate = ntohl(*p);
@@ -227,7 +217,7 @@ static __be32 decode_layoutrecall_args(struct svc_rqst *rqstp,
__be32 status = 0;
uint32_t iomode;
- p = read_buf(xdr, 4 * sizeof(uint32_t));
+ p = xdr_inline_decode(xdr, 4 * sizeof(uint32_t));
if (unlikely(p == NULL))
return htonl(NFS4ERR_BADXDR);
@@ -245,14 +235,14 @@ static __be32 decode_layoutrecall_args(struct svc_rqst *rqstp,
if (unlikely(status != 0))
return status;
- p = read_buf(xdr, 2 * sizeof(uint64_t));
+ p = xdr_inline_decode(xdr, 2 * sizeof(uint64_t));
if (unlikely(p == NULL))
return htonl(NFS4ERR_BADXDR);
p = xdr_decode_hyper(p, &args->cbl_range.offset);
p = xdr_decode_hyper(p, &args->cbl_range.length);
return decode_layout_stateid(xdr, &args->cbl_stateid);
} else if (args->cbl_recall_type == RETURN_FSID) {
- p = read_buf(xdr, 2 * sizeof(uint64_t));
+ p = xdr_inline_decode(xdr, 2 * sizeof(uint64_t));
if (unlikely(p == NULL))
return htonl(NFS4ERR_BADXDR);
p = xdr_decode_hyper(p, &args->cbl_fsid.major);
@@ -275,7 +265,7 @@ __be32 decode_devicenotify_args(struct svc_rqst *rqstp,
args->ndevs = 0;
/* Num of device notifications */
- p = read_buf(xdr, sizeof(uint32_t));
+ p = xdr_inline_decode(xdr, sizeof(uint32_t));
if (unlikely(p == NULL)) {
status = htonl(NFS4ERR_BADXDR);
goto out;
@@ -298,7 +288,8 @@ __be32 decode_devicenotify_args(struct svc_rqst *rqstp,
for (i = 0; i < n; i++) {
struct cb_devicenotifyitem *dev = &args->devs[i];
- p = read_buf(xdr, (4 * sizeof(uint32_t)) + NFS4_DEVICEID4_SIZE);
+ p = xdr_inline_decode(xdr, (4 * sizeof(uint32_t)) +
+ NFS4_DEVICEID4_SIZE);
if (unlikely(p == NULL)) {
status = htonl(NFS4ERR_BADXDR);
goto err;
@@ -329,7 +320,7 @@ __be32 decode_devicenotify_args(struct svc_rqst *rqstp,
p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
if (dev->cbd_layout_type == NOTIFY_DEVICEID4_CHANGE) {
- p = read_buf(xdr, sizeof(uint32_t));
+ p = xdr_inline_decode(xdr, sizeof(uint32_t));
if (unlikely(p == NULL)) {
status = htonl(NFS4ERR_BADXDR);
goto err;
@@ -359,7 +350,7 @@ static __be32 decode_sessionid(struct xdr_stream *xdr,
{
__be32 *p;
- p = read_buf(xdr, NFS4_MAX_SESSIONID_LEN);
+ p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN);
if (unlikely(p == NULL))
return htonl(NFS4ERR_RESOURCE);
@@ -379,13 +370,13 @@ static __be32 decode_rc_list(struct xdr_stream *xdr,
goto out;
status = htonl(NFS4ERR_RESOURCE);
- p = read_buf(xdr, sizeof(uint32_t));
+ p = xdr_inline_decode(xdr, sizeof(uint32_t));
if (unlikely(p == NULL))
goto out;
rc_list->rcl_nrefcalls = ntohl(*p++);
if (rc_list->rcl_nrefcalls) {
- p = read_buf(xdr,
+ p = xdr_inline_decode(xdr,
rc_list->rcl_nrefcalls * 2 * sizeof(uint32_t));
if (unlikely(p == NULL))
goto out;
@@ -418,7 +409,7 @@ static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp,
if (status)
return status;
- p = read_buf(xdr, 5 * sizeof(uint32_t));
+ p = xdr_inline_decode(xdr, 5 * sizeof(uint32_t));
if (unlikely(p == NULL))
return htonl(NFS4ERR_RESOURCE);
@@ -461,7 +452,7 @@ static __be32 decode_recallany_args(struct svc_rqst *rqstp,
uint32_t bitmap[2];
__be32 *p, status;
- p = read_buf(xdr, 4);
+ p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
return htonl(NFS4ERR_BADXDR);
args->craa_objs_to_keep = ntohl(*p++);
@@ -480,7 +471,7 @@ static __be32 decode_recallslot_args(struct svc_rqst *rqstp,
struct cb_recallslotargs *args = argp;
__be32 *p;
- p = read_buf(xdr, 4);
+ p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
return htonl(NFS4ERR_BADXDR);
args->crsa_target_highest_slotid = ntohl(*p++);
@@ -492,14 +483,14 @@ static __be32 decode_lockowner(struct xdr_stream *xdr, struct cb_notify_lock_arg
__be32 *p;
unsigned int len;
- p = read_buf(xdr, 12);
+ p = xdr_inline_decode(xdr, 12);
if (unlikely(p == NULL))
return htonl(NFS4ERR_BADXDR);
p = xdr_decode_hyper(p, &args->cbnl_owner.clientid);
len = be32_to_cpu(*p);
- p = read_buf(xdr, len);
+ p = xdr_inline_decode(xdr, len);
if (unlikely(p == NULL))
return htonl(NFS4ERR_BADXDR);
@@ -537,7 +528,7 @@ static __be32 decode_write_response(struct xdr_stream *xdr,
__be32 *p;
/* skip the always zero field */
- p = read_buf(xdr, 4);
+ p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out;
p++;
@@ -577,7 +568,7 @@ static __be32 decode_offload_args(struct svc_rqst *rqstp,
return status;
/* decode status */
- p = read_buf(xdr, 4);
+ p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out;
args->error = ntohl(*p++);
@@ -943,10 +934,11 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp)
};
unsigned int nops = 0;
- xdr_init_decode(&xdr_in, &rqstp->rq_arg, rqstp->rq_arg.head[0].iov_base);
+ xdr_init_decode(&xdr_in, &rqstp->rq_arg,
+ rqstp->rq_arg.head[0].iov_base, NULL);
p = (__be32*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len);
- xdr_init_encode(&xdr_out, &rqstp->rq_res, p);
+ xdr_init_encode(&xdr_out, &rqstp->rq_res, p, NULL);
status = decode_compound_hdr_arg(&xdr_in, &hdr_arg);
if (status == htonl(NFS4ERR_RESOURCE))
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index fb1cf1a4bda2..90d71fda65ce 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -453,7 +453,7 @@ void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
case XPRT_TRANSPORT_RDMA:
if (retrans == NFS_UNSPEC_RETRANS)
to->to_retries = NFS_DEF_TCP_RETRANS;
- if (timeo == NFS_UNSPEC_TIMEO || to->to_retries == 0)
+ if (timeo == NFS_UNSPEC_TIMEO || to->to_initval == 0)
to->to_initval = NFS_DEF_TCP_TIMEO * HZ / 10;
if (to->to_initval > NFS_MAX_TCP_TIMEOUT)
to->to_initval = NFS_MAX_TCP_TIMEOUT;
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 885363ca8569..2f6b447cdd82 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -229,6 +229,8 @@ static struct inode *nfs_delegation_grab_inode(struct nfs_delegation *delegation
spin_lock(&delegation->lock);
if (delegation->inode != NULL)
inode = igrab(delegation->inode);
+ if (!inode)
+ set_bit(NFS_DELEGATION_INODE_FREEING, &delegation->flags);
spin_unlock(&delegation->lock);
return inode;
}
@@ -681,7 +683,7 @@ void nfs_expire_all_delegations(struct nfs_client *clp)
/**
* nfs_super_return_all_delegations - return delegations for one superblock
- * @sb: sb to process
+ * @server: pointer to nfs_server to process
*
*/
void nfs_server_return_all_delegations(struct nfs_server *server)
@@ -944,10 +946,11 @@ restart:
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
list_for_each_entry_rcu(delegation, &server->delegations,
super_list) {
- if (test_bit(NFS_DELEGATION_RETURNING,
- &delegation->flags))
- continue;
- if (test_bit(NFS_DELEGATION_NEED_RECLAIM,
+ if (test_bit(NFS_DELEGATION_INODE_FREEING,
+ &delegation->flags) ||
+ test_bit(NFS_DELEGATION_RETURNING,
+ &delegation->flags) ||
+ test_bit(NFS_DELEGATION_NEED_RECLAIM,
&delegation->flags) == 0)
continue;
if (!nfs_sb_active(server->super))
@@ -1053,10 +1056,11 @@ restart:
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
list_for_each_entry_rcu(delegation, &server->delegations,
super_list) {
- if (test_bit(NFS_DELEGATION_RETURNING,
- &delegation->flags))
- continue;
- if (test_bit(NFS_DELEGATION_TEST_EXPIRED,
+ if (test_bit(NFS_DELEGATION_INODE_FREEING,
+ &delegation->flags) ||
+ test_bit(NFS_DELEGATION_RETURNING,
+ &delegation->flags) ||
+ test_bit(NFS_DELEGATION_TEST_EXPIRED,
&delegation->flags) == 0)
continue;
if (!nfs_sb_active(server->super))
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index dcbf3394ba0e..35b4b02c1ae0 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -34,6 +34,7 @@ enum {
NFS_DELEGATION_RETURNING,
NFS_DELEGATION_REVOKED,
NFS_DELEGATION_TEST_EXPIRED,
+ NFS_DELEGATION_INODE_FREEING,
};
int nfs_inode_set_delegation(struct inode *inode, const struct cred *cred,
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 6bf4471850c8..a71d0b42d160 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -139,12 +139,19 @@ struct nfs_cache_array {
struct nfs_cache_array_entry array[0];
};
+struct readdirvec {
+ unsigned long nr;
+ unsigned long index;
+ struct page *pages[NFS_MAX_READDIR_RAPAGES];
+};
+
typedef int (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, bool);
typedef struct {
struct file *file;
struct page *page;
struct dir_context *ctx;
unsigned long page_index;
+ struct readdirvec pvec;
u64 *dir_cookie;
u64 last_cookie;
loff_t current_index;
@@ -524,6 +531,10 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
struct nfs_cache_array *array;
unsigned int count = 0;
int status;
+ int max_rapages = NFS_MAX_READDIR_RAPAGES;
+
+ desc->pvec.index = desc->page_index;
+ desc->pvec.nr = 0;
scratch = alloc_page(GFP_KERNEL);
if (scratch == NULL)
@@ -548,20 +559,40 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
if (desc->plus)
nfs_prime_dcache(file_dentry(desc->file), entry);
- status = nfs_readdir_add_to_array(entry, page);
+ status = nfs_readdir_add_to_array(entry, desc->pvec.pages[desc->pvec.nr]);
+ if (status == -ENOSPC) {
+ desc->pvec.nr++;
+ if (desc->pvec.nr == max_rapages)
+ break;
+ status = nfs_readdir_add_to_array(entry, desc->pvec.pages[desc->pvec.nr]);
+ }
if (status != 0)
break;
} while (!entry->eof);
+ /*
+ * page and desc->pvec.pages[0] are valid, don't need to check
+ * whether or not to be NULL.
+ */
+ copy_highpage(page, desc->pvec.pages[0]);
+
out_nopages:
if (count == 0 || (status == -EBADCOOKIE && entry->eof != 0)) {
- array = kmap(page);
+ array = kmap_atomic(desc->pvec.pages[desc->pvec.nr]);
array->eof_index = array->size;
status = 0;
- kunmap(page);
+ kunmap_atomic(array);
}
put_page(scratch);
+
+ /*
+ * desc->pvec.nr > 0 means at least one page was completely filled,
+ * we should return -ENOSPC. Otherwise function
+ * nfs_readdir_xdr_to_array will enter infinite loop.
+ */
+ if (desc->pvec.nr > 0)
+ return -ENOSPC;
return status;
}
@@ -574,8 +605,8 @@ void nfs_readdir_free_pages(struct page **pages, unsigned int npages)
}
/*
- * nfs_readdir_large_page will allocate pages that must be freed with a call
- * to nfs_readdir_free_pagearray
+ * nfs_readdir_alloc_pages() will allocate pages that must be freed with a call
+ * to nfs_readdir_free_pages()
*/
static
int nfs_readdir_alloc_pages(struct page **pages, unsigned int npages)
@@ -595,6 +626,24 @@ out_freepages:
return -ENOMEM;
}
+/*
+ * nfs_readdir_rapages_init initialize rapages by nfs_cache_array structure.
+ */
+static
+void nfs_readdir_rapages_init(nfs_readdir_descriptor_t *desc)
+{
+ struct nfs_cache_array *array;
+ int max_rapages = NFS_MAX_READDIR_RAPAGES;
+ int index;
+
+ for (index = 0; index < max_rapages; index++) {
+ array = kmap_atomic(desc->pvec.pages[index]);
+ memset(array, 0, sizeof(struct nfs_cache_array));
+ array->eof_index = -1;
+ kunmap_atomic(array);
+ }
+}
+
static
int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page, struct inode *inode)
{
@@ -605,6 +654,12 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page,
int status = -ENOMEM;
unsigned int array_size = ARRAY_SIZE(pages);
+ /*
+ * This means we hit readdir rdpages miss, the preallocated rdpages
+ * are useless, the preallocate rdpages should be reinitialized.
+ */
+ nfs_readdir_rapages_init(desc);
+
entry.prev_cookie = 0;
entry.cookie = desc->last_cookie;
entry.eof = 0;
@@ -664,9 +719,24 @@ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page* page)
struct inode *inode = file_inode(desc->file);
int ret;
- ret = nfs_readdir_xdr_to_array(desc, page, inode);
- if (ret < 0)
- goto error;
+ /*
+ * If desc->page_index in range desc->pvec.index and
+ * desc->pvec.index + desc->pvec.nr, we get readdir cache hit.
+ */
+ if (desc->page_index >= desc->pvec.index &&
+ desc->page_index < (desc->pvec.index + desc->pvec.nr)) {
+ /*
+ * page and desc->pvec.pages[x] are valid, don't need to check
+ * whether or not to be NULL.
+ */
+ copy_highpage(page, desc->pvec.pages[desc->page_index - desc->pvec.index]);
+ ret = 0;
+ } else {
+ ret = nfs_readdir_xdr_to_array(desc, page, inode);
+ if (ret < 0)
+ goto error;
+ }
+
SetPageUptodate(page);
if (invalidate_inode_pages2_range(inode->i_mapping, page->index + 1, -1) < 0) {
@@ -831,6 +901,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
*desc = &my_desc;
struct nfs_open_dir_context *dir_ctx = file->private_data;
int res = 0;
+ int max_rapages = NFS_MAX_READDIR_RAPAGES;
dfprintk(FILE, "NFS: readdir(%pD2) starting at cookie %llu\n",
file, (long long)ctx->pos);
@@ -850,6 +921,12 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
desc->decode = NFS_PROTO(inode)->decode_dirent;
desc->plus = nfs_use_readdirplus(inode, ctx);
+ res = nfs_readdir_alloc_pages(desc->pvec.pages, max_rapages);
+ if (res < 0)
+ return -ENOMEM;
+
+ nfs_readdir_rapages_init(desc);
+
if (ctx->pos == 0 || nfs_attribute_cache_expired(inode))
res = nfs_revalidate_mapping(inode, file->f_mapping);
if (res < 0)
@@ -885,6 +962,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
break;
} while (!desc->eof);
out:
+ nfs_readdir_free_pages(desc->pvec.pages, max_rapages);
if (res > 0)
res = 0;
dfprintk(FILE, "NFS: readdir(%pD2) returns %d\n", file, res);
@@ -945,7 +1023,7 @@ static int nfs_fsync_dir(struct file *filp, loff_t start, loff_t end,
/**
* nfs_force_lookup_revalidate - Mark the directory as having changed
- * @dir - pointer to directory inode
+ * @dir: pointer to directory inode
*
* This forces the revalidation code in nfs_lookup_revalidate() to do a
* full lookup on all child dentries of 'dir' whenever a change occurs
@@ -1649,7 +1727,7 @@ nfs4_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
reval_dentry:
if (flags & LOOKUP_RCU)
return -ECHILD;
- return nfs_lookup_revalidate_dentry(dir, dentry, inode);;
+ return nfs_lookup_revalidate_dentry(dir, dentry, inode);
full_reval:
return nfs_do_lookup_revalidate(dir, dentry, flags);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 33824a0a57bf..0fd811ac08b5 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -428,7 +428,7 @@ out_put:
hdr->release(hdr);
}
-static void nfs_read_sync_pgio_error(struct list_head *head)
+static void nfs_read_sync_pgio_error(struct list_head *head, int error)
{
struct nfs_page *req;
@@ -664,8 +664,7 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
if (!nfs_pageio_add_request(&desc, req)) {
- nfs_list_remove_request(req);
- nfs_list_add_request(req, &failed);
+ nfs_list_move_request(req, &failed);
spin_lock(&cinfo.inode->i_lock);
dreq->flags = 0;
if (desc.pg_error < 0)
@@ -821,7 +820,7 @@ out_put:
hdr->release(hdr);
}
-static void nfs_write_sync_pgio_error(struct list_head *head)
+static void nfs_write_sync_pgio_error(struct list_head *head, int error)
{
struct nfs_page *req;
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 29553fdba8af..4899b85f9b3c 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -89,8 +89,8 @@ EXPORT_SYMBOL_GPL(nfs_file_release);
/**
* nfs_revalidate_size - Revalidate the file size
- * @inode - pointer to inode struct
- * @file - pointer to struct file
+ * @inode: pointer to inode struct
+ * @filp: pointer to struct file
*
* Revalidates the file length. This is basically a wrapper around
* nfs_revalidate_inode() that takes into account the fact that we may
@@ -276,6 +276,12 @@ EXPORT_SYMBOL_GPL(nfs_file_fsync);
* then a modify/write/read cycle when writing to a page in the
* page cache.
*
+ * Some pNFS layout drivers can only read/write at a certain block
+ * granularity like all block devices and therefore we must perform
+ * read/modify/write whenever a page hasn't read yet and the data
+ * to be written there is not aligned to a block boundary and/or
+ * smaller than the block size.
+ *
* The modify/write/read cycle may occur if a page is read before
* being completely filled by the writer. In this situation, the
* page must be completely written to stable storage on the server
@@ -291,26 +297,32 @@ EXPORT_SYMBOL_GPL(nfs_file_fsync);
* and that the new data won't completely replace the old data in
* that range of the file.
*/
-static int nfs_want_read_modify_write(struct file *file, struct page *page,
- loff_t pos, unsigned len)
+static bool nfs_full_page_write(struct page *page, loff_t pos, unsigned int len)
{
unsigned int pglen = nfs_page_length(page);
unsigned int offset = pos & (PAGE_SIZE - 1);
unsigned int end = offset + len;
- if (pnfs_ld_read_whole_page(file->f_mapping->host)) {
- if (!PageUptodate(page))
- return 1;
- return 0;
- }
+ return !pglen || (end >= pglen && !offset);
+}
- if ((file->f_mode & FMODE_READ) && /* open for read? */
- !PageUptodate(page) && /* Uptodate? */
- !PagePrivate(page) && /* i/o request already? */
- pglen && /* valid bytes of file? */
- (end < pglen || offset)) /* replace all valid bytes? */
- return 1;
- return 0;
+static bool nfs_want_read_modify_write(struct file *file, struct page *page,
+ loff_t pos, unsigned int len)
+{
+ /*
+ * Up-to-date pages, those with ongoing or full-page write
+ * don't need read/modify/write
+ */
+ if (PageUptodate(page) || PagePrivate(page) ||
+ nfs_full_page_write(page, pos, len))
+ return false;
+
+ if (pnfs_ld_read_whole_page(file->f_mapping->host))
+ return true;
+ /* Open for reading too? */
+ if (file->f_mode & FMODE_READ)
+ return true;
+ return false;
}
/*
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 63abe705f4ca..6673d4ff5a2a 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -410,7 +410,7 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
for (i = 0; i < fls->mirror_array_cnt; i++) {
struct nfs4_ff_layout_mirror *mirror;
struct cred *kcred;
- const struct cred *cred;
+ const struct cred __rcu *cred;
kuid_t uid;
kgid_t gid;
u32 ds_count, fh_count, id;
@@ -501,7 +501,7 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
goto out_err_free;
kcred->fsuid = uid;
kcred->fsgid = gid;
- cred = kcred;
+ cred = RCU_INITIALIZER(kcred);
if (lgr->range.iomode == IOMODE_READ)
rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
@@ -788,30 +788,82 @@ ff_layout_alloc_commit_info(struct pnfs_layout_segment *lseg,
}
}
+static void
+ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, int idx)
+{
+ struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
+
+ if (devid)
+ nfs4_mark_deviceid_unavailable(devid);
+}
+
+static void
+ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, int idx)
+{
+ struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
+
+ if (devid)
+ nfs4_mark_deviceid_available(devid);
+}
+
static struct nfs4_pnfs_ds *
-ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
- int start_idx,
- int *best_idx)
+ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
+ int start_idx, int *best_idx,
+ bool check_device)
{
struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
+ struct nfs4_ff_layout_mirror *mirror;
struct nfs4_pnfs_ds *ds;
bool fail_return = false;
int idx;
- /* mirrors are sorted by efficiency */
+ /* mirrors are initially sorted by efficiency */
for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
if (idx+1 == fls->mirror_array_cnt)
- fail_return = true;
- ds = nfs4_ff_layout_prepare_ds(lseg, idx, fail_return);
- if (ds) {
- *best_idx = idx;
- return ds;
- }
+ fail_return = !check_device;
+
+ mirror = FF_LAYOUT_COMP(lseg, idx);
+ ds = nfs4_ff_layout_prepare_ds(lseg, mirror, fail_return);
+ if (!ds)
+ continue;
+
+ if (check_device &&
+ nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node))
+ continue;
+
+ *best_idx = idx;
+ return ds;
}
return NULL;
}
+static struct nfs4_pnfs_ds *
+ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment *lseg,
+ int start_idx, int *best_idx)
+{
+ return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, false);
+}
+
+static struct nfs4_pnfs_ds *
+ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment *lseg,
+ int start_idx, int *best_idx)
+{
+ return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, true);
+}
+
+static struct nfs4_pnfs_ds *
+ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
+ int start_idx, int *best_idx)
+{
+ struct nfs4_pnfs_ds *ds;
+
+ ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx);
+ if (ds)
+ return ds;
+ return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx);
+}
+
static void
ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
struct nfs_page *req,
@@ -925,7 +977,8 @@ retry:
goto out_mds;
for (i = 0; i < pgio->pg_mirror_count; i++) {
- ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, i, true);
+ mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
+ ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror, true);
if (!ds) {
if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
goto out_mds;
@@ -936,7 +989,6 @@ retry:
goto retry;
}
pgm = &pgio->pg_mirrors[i];
- mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
}
@@ -1071,6 +1123,8 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
break;
case -NFS4ERR_RETRY_UNCACHED_REP:
break;
+ case -EAGAIN:
+ return -NFS4ERR_RESET_TO_PNFS;
/* Invalidate Layout errors */
case -NFS4ERR_PNFS_NO_LAYOUT:
case -ESTALE: /* mapped NFS4ERR_STALE */
@@ -1131,6 +1185,7 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task,
case -EBADHANDLE:
case -ELOOP:
case -ENOSPC:
+ case -EAGAIN:
break;
case -EJUKEBOX:
nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
@@ -1158,8 +1213,10 @@ static int ff_layout_async_handle_error(struct rpc_task *task,
{
int vers = clp->cl_nfs_mod->rpc_vers->number;
- if (task->tk_status >= 0)
+ if (task->tk_status >= 0) {
+ ff_layout_mark_ds_reachable(lseg, idx);
return 0;
+ }
/* Handle the case of an invalid layout segment */
if (!pnfs_is_valid_lseg(lseg))
@@ -1222,6 +1279,8 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
mirror, offset, length, status, opnum,
GFP_NOIO);
+ if (status == NFS4ERR_NXIO)
+ ff_layout_mark_ds_unreachable(lseg, idx);
pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, lseg);
dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
}
@@ -1230,6 +1289,7 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
static int ff_layout_read_done_cb(struct rpc_task *task,
struct nfs_pgio_header *hdr)
{
+ int new_idx = hdr->pgio_mirror_idx;
int err;
trace_nfs4_pnfs_read(hdr, task->tk_status);
@@ -1248,8 +1308,8 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
case -NFS4ERR_RESET_TO_PNFS:
if (ff_layout_choose_best_ds_for_read(hdr->lseg,
hdr->pgio_mirror_idx + 1,
- &hdr->pgio_mirror_idx))
- goto out_eagain;
+ &new_idx))
+ goto out_layouterror;
set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
return task->tk_status;
case -NFS4ERR_RESET_TO_MDS:
@@ -1260,6 +1320,10 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
}
return 0;
+out_layouterror:
+ ff_layout_read_record_layoutstats_done(task, hdr);
+ ff_layout_send_layouterror(hdr->lseg);
+ hdr->pgio_mirror_idx = new_idx;
out_eagain:
rpc_restart_call_prepare(task);
return -EAGAIN;
@@ -1293,15 +1357,6 @@ ff_layout_set_layoutcommit(struct inode *inode,
(unsigned long long) NFS_I(inode)->layout->plh_lwb);
}
-static bool
-ff_layout_device_unavailable(struct pnfs_layout_segment *lseg, int idx)
-{
- /* No mirroring for now */
- struct nfs4_deviceid_node *node = FF_LAYOUT_DEVID_NODE(lseg, idx);
-
- return ff_layout_test_devid_unavailable(node);
-}
-
static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
struct nfs_pgio_header *hdr)
{
@@ -1332,10 +1387,6 @@ static int ff_layout_read_prepare_common(struct rpc_task *task,
rpc_exit(task, -EIO);
return -EIO;
}
- if (ff_layout_device_unavailable(hdr->lseg, hdr->pgio_mirror_idx)) {
- rpc_exit(task, -EHOSTDOWN);
- return -EAGAIN;
- }
ff_layout_read_record_layoutstats_start(task, hdr);
return 0;
@@ -1369,6 +1420,16 @@ static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
ff_layout_read_prepare_common(task, hdr);
}
+static void
+ff_layout_io_prepare_transmit(struct rpc_task *task,
+ void *data)
+{
+ struct nfs_pgio_header *hdr = data;
+
+ if (!pnfs_is_valid_lseg(hdr->lseg))
+ rpc_exit(task, -EAGAIN);
+}
+
static void ff_layout_read_call_done(struct rpc_task *task, void *data)
{
struct nfs_pgio_header *hdr = data;
@@ -1399,9 +1460,10 @@ static void ff_layout_read_release(void *data)
struct nfs_pgio_header *hdr = data;
ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
- if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
+ if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) {
+ ff_layout_send_layouterror(hdr->lseg);
pnfs_read_resend_pnfs(hdr);
- else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
+ } else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
ff_layout_reset_read(hdr);
pnfs_generic_rw_release(data);
}
@@ -1513,11 +1575,6 @@ static int ff_layout_write_prepare_common(struct rpc_task *task,
return -EIO;
}
- if (ff_layout_device_unavailable(hdr->lseg, hdr->pgio_mirror_idx)) {
- rpc_exit(task, -EHOSTDOWN);
- return -EAGAIN;
- }
-
ff_layout_write_record_layoutstats_start(task, hdr);
return 0;
}
@@ -1573,9 +1630,10 @@ static void ff_layout_write_release(void *data)
struct nfs_pgio_header *hdr = data;
ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
- if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
+ if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) {
+ ff_layout_send_layouterror(hdr->lseg);
ff_layout_reset_write(hdr, true);
- else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
+ } else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
ff_layout_reset_write(hdr, false);
pnfs_generic_rw_release(data);
}
@@ -1657,6 +1715,7 @@ static void ff_layout_commit_release(void *data)
static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
.rpc_call_prepare = ff_layout_read_prepare_v3,
+ .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
.rpc_call_done = ff_layout_read_call_done,
.rpc_count_stats = ff_layout_read_count_stats,
.rpc_release = ff_layout_read_release,
@@ -1664,6 +1723,7 @@ static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
.rpc_call_prepare = ff_layout_read_prepare_v4,
+ .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
.rpc_call_done = ff_layout_read_call_done,
.rpc_count_stats = ff_layout_read_count_stats,
.rpc_release = ff_layout_read_release,
@@ -1671,6 +1731,7 @@ static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
.rpc_call_prepare = ff_layout_write_prepare_v3,
+ .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
.rpc_call_done = ff_layout_write_call_done,
.rpc_count_stats = ff_layout_write_count_stats,
.rpc_release = ff_layout_write_release,
@@ -1678,6 +1739,7 @@ static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
.rpc_call_prepare = ff_layout_write_prepare_v4,
+ .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
.rpc_call_done = ff_layout_write_call_done,
.rpc_count_stats = ff_layout_write_count_stats,
.rpc_release = ff_layout_write_release,
@@ -1703,6 +1765,7 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
struct pnfs_layout_segment *lseg = hdr->lseg;
struct nfs4_pnfs_ds *ds;
struct rpc_clnt *ds_clnt;
+ struct nfs4_ff_layout_mirror *mirror;
const struct cred *ds_cred;
loff_t offset = hdr->args.offset;
u32 idx = hdr->pgio_mirror_idx;
@@ -1713,20 +1776,21 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
__func__, hdr->inode->i_ino,
hdr->args.pgbase, (size_t)hdr->args.count, offset);
- ds = nfs4_ff_layout_prepare_ds(lseg, idx, false);
+ mirror = FF_LAYOUT_COMP(lseg, idx);
+ ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
if (!ds)
goto out_failed;
- ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
+ ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
hdr->inode);
if (IS_ERR(ds_clnt))
goto out_failed;
- ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
+ ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
if (!ds_cred)
goto out_failed;
- vers = nfs4_ff_layout_ds_version(lseg, idx);
+ vers = nfs4_ff_layout_ds_version(mirror);
dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), vers);
@@ -1734,13 +1798,11 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
hdr->pgio_done_cb = ff_layout_read_done_cb;
refcount_inc(&ds->ds_clp->cl_count);
hdr->ds_clp = ds->ds_clp;
- fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
+ fh = nfs4_ff_layout_select_ds_fh(mirror);
if (fh)
hdr->args.fh = fh;
- if (vers == 4 &&
- !nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
- goto out_failed;
+ nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
/*
* Note that if we ever decide to split across DSes,
@@ -1770,26 +1832,28 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
struct pnfs_layout_segment *lseg = hdr->lseg;
struct nfs4_pnfs_ds *ds;
struct rpc_clnt *ds_clnt;
+ struct nfs4_ff_layout_mirror *mirror;
const struct cred *ds_cred;
loff_t offset = hdr->args.offset;
int vers;
struct nfs_fh *fh;
int idx = hdr->pgio_mirror_idx;
- ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
+ mirror = FF_LAYOUT_COMP(lseg, idx);
+ ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
if (!ds)
goto out_failed;
- ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
+ ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
hdr->inode);
if (IS_ERR(ds_clnt))
goto out_failed;
- ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
+ ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
if (!ds_cred)
goto out_failed;
- vers = nfs4_ff_layout_ds_version(lseg, idx);
+ vers = nfs4_ff_layout_ds_version(mirror);
dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n",
__func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
@@ -1800,13 +1864,11 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
refcount_inc(&ds->ds_clp->cl_count);
hdr->ds_clp = ds->ds_clp;
hdr->ds_commit_idx = idx;
- fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
+ fh = nfs4_ff_layout_select_ds_fh(mirror);
if (fh)
hdr->args.fh = fh;
- if (vers == 4 &&
- !nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
- goto out_failed;
+ nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
/*
* Note that if we ever decide to split across DSes,
@@ -1849,6 +1911,7 @@ static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
struct pnfs_layout_segment *lseg = data->lseg;
struct nfs4_pnfs_ds *ds;
struct rpc_clnt *ds_clnt;
+ struct nfs4_ff_layout_mirror *mirror;
const struct cred *ds_cred;
u32 idx;
int vers, ret;
@@ -1859,20 +1922,21 @@ static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
goto out_err;
idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
- ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
+ mirror = FF_LAYOUT_COMP(lseg, idx);
+ ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
if (!ds)
goto out_err;
- ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
+ ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
data->inode);
if (IS_ERR(ds_clnt))
goto out_err;
- ds_cred = ff_layout_get_ds_cred(lseg, idx, data->cred);
+ ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, data->cred);
if (!ds_cred)
goto out_err;
- vers = nfs4_ff_layout_ds_version(lseg, idx);
+ vers = nfs4_ff_layout_ds_version(mirror);
dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count),
@@ -2036,7 +2100,7 @@ ff_layout_encode_layoutreturn(struct xdr_stream *xdr,
dprintk("%s: Begin\n", __func__);
- xdr_init_encode(&tmp_xdr, &tmp_buf, NULL);
+ xdr_init_encode(&tmp_xdr, &tmp_buf, NULL, NULL);
ff_layout_encode_ioerr(&tmp_xdr, args, ff_args);
ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args);
@@ -2102,6 +2166,52 @@ out_nomem:
return -ENOMEM;
}
+#ifdef CONFIG_NFS_V4_2
+void
+ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
+{
+ struct pnfs_layout_hdr *lo = lseg->pls_layout;
+ struct nfs42_layout_error *errors;
+ LIST_HEAD(head);
+
+ if (!nfs_server_capable(lo->plh_inode, NFS_CAP_LAYOUTERROR))
+ return;
+ ff_layout_fetch_ds_ioerr(lo, &lseg->pls_range, &head, -1);
+ if (list_empty(&head))
+ return;
+
+ errors = kmalloc_array(NFS42_LAYOUTERROR_MAX,
+ sizeof(*errors), GFP_NOFS);
+ if (errors != NULL) {
+ const struct nfs4_ff_layout_ds_err *pos;
+ size_t n = 0;
+
+ list_for_each_entry(pos, &head, list) {
+ errors[n].offset = pos->offset;
+ errors[n].length = pos->length;
+ nfs4_stateid_copy(&errors[n].stateid, &pos->stateid);
+ errors[n].errors[0].dev_id = pos->deviceid;
+ errors[n].errors[0].status = pos->status;
+ errors[n].errors[0].opnum = pos->opnum;
+ n++;
+ if (!list_is_last(&pos->list, &head) &&
+ n < NFS42_LAYOUTERROR_MAX)
+ continue;
+ if (nfs42_proc_layouterror(lseg, errors, n) < 0)
+ break;
+ n = 0;
+ }
+ kfree(errors);
+ }
+ ff_layout_free_ds_ioerr(&head);
+}
+#else
+void
+ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
+{
+}
+#endif
+
static int
ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
{
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.h b/fs/nfs/flexfilelayout/flexfilelayout.h
index c2626bad466b..2f369966abf7 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.h
+++ b/fs/nfs/flexfilelayout/flexfilelayout.h
@@ -132,16 +132,6 @@ FF_LAYOUT_LSEG(struct pnfs_layout_segment *lseg)
generic_hdr);
}
-static inline struct nfs4_deviceid_node *
-FF_LAYOUT_DEVID_NODE(struct pnfs_layout_segment *lseg, u32 idx)
-{
- if (idx >= FF_LAYOUT_LSEG(lseg)->mirror_array_cnt ||
- FF_LAYOUT_LSEG(lseg)->mirror_array[idx] == NULL ||
- FF_LAYOUT_LSEG(lseg)->mirror_array[idx]->mirror_ds == NULL)
- return NULL;
- return &FF_LAYOUT_LSEG(lseg)->mirror_array[idx]->mirror_ds->id_node;
-}
-
static inline struct nfs4_ff_layout_ds *
FF_LAYOUT_MIRROR_DS(struct nfs4_deviceid_node *node)
{
@@ -151,9 +141,25 @@ FF_LAYOUT_MIRROR_DS(struct nfs4_deviceid_node *node)
static inline struct nfs4_ff_layout_mirror *
FF_LAYOUT_COMP(struct pnfs_layout_segment *lseg, u32 idx)
{
- if (idx >= FF_LAYOUT_LSEG(lseg)->mirror_array_cnt)
- return NULL;
- return FF_LAYOUT_LSEG(lseg)->mirror_array[idx];
+ struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
+
+ if (idx < fls->mirror_array_cnt)
+ return fls->mirror_array[idx];
+ return NULL;
+}
+
+static inline struct nfs4_deviceid_node *
+FF_LAYOUT_DEVID_NODE(struct pnfs_layout_segment *lseg, u32 idx)
+{
+ struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, idx);
+
+ if (mirror != NULL) {
+ struct nfs4_ff_layout_ds *mirror_ds = mirror->mirror_ds;
+
+ if (!IS_ERR_OR_NULL(mirror_ds))
+ return &mirror_ds->id_node;
+ }
+ return NULL;
}
static inline u32
@@ -174,28 +180,10 @@ ff_layout_no_read_on_rw(struct pnfs_layout_segment *lseg)
return FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_READ_IO;
}
-static inline bool
-ff_layout_test_devid_unavailable(struct nfs4_deviceid_node *node)
-{
- /*
- * Flexfiles should never mark a DS unavailable, but if it does
- * print a (ratelimited) warning as this can affect performance.
- */
- if (nfs4_test_deviceid_unavailable(node)) {
- u32 *p = (u32 *)node->deviceid.data;
-
- pr_warn_ratelimited("NFS: flexfiles layout referencing an "
- "unavailable device [%x%x%x%x]\n",
- p[0], p[1], p[2], p[3]);
- return true;
- }
- return false;
-}
-
static inline int
-nfs4_ff_layout_ds_version(struct pnfs_layout_segment *lseg, u32 ds_idx)
+nfs4_ff_layout_ds_version(const struct nfs4_ff_layout_mirror *mirror)
{
- return FF_LAYOUT_COMP(lseg, ds_idx)->mirror_ds->ds_versions[0].version;
+ return mirror->mirror_ds->ds_versions[0].version;
}
struct nfs4_ff_layout_ds *
@@ -207,6 +195,7 @@ int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo,
struct nfs4_ff_layout_mirror *mirror, u64 offset,
u64 length, int status, enum nfs_opnum4 opnum,
gfp_t gfp_flags);
+void ff_layout_send_layouterror(struct pnfs_layout_segment *lseg);
int ff_layout_encode_ds_ioerr(struct xdr_stream *xdr, const struct list_head *head);
void ff_layout_free_ds_ioerr(struct list_head *head);
unsigned int ff_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo,
@@ -214,23 +203,23 @@ unsigned int ff_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo,
struct list_head *head,
unsigned int maxnum);
struct nfs_fh *
-nfs4_ff_layout_select_ds_fh(struct pnfs_layout_segment *lseg, u32 mirror_idx);
-int
-nfs4_ff_layout_select_ds_stateid(struct pnfs_layout_segment *lseg,
- u32 mirror_idx,
- nfs4_stateid *stateid);
+nfs4_ff_layout_select_ds_fh(struct nfs4_ff_layout_mirror *mirror);
+void
+nfs4_ff_layout_select_ds_stateid(const struct nfs4_ff_layout_mirror *mirror,
+ nfs4_stateid *stateid);
struct nfs4_pnfs_ds *
-nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
+nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg,
+ struct nfs4_ff_layout_mirror *mirror,
bool fail_return);
struct rpc_clnt *
-nfs4_ff_find_or_create_ds_client(struct pnfs_layout_segment *lseg,
- u32 ds_idx,
+nfs4_ff_find_or_create_ds_client(struct nfs4_ff_layout_mirror *mirror,
struct nfs_client *ds_clp,
struct inode *inode);
-const struct cred *ff_layout_get_ds_cred(struct pnfs_layout_segment *lseg,
- u32 ds_idx, const struct cred *mdscred);
+const struct cred *ff_layout_get_ds_cred(struct nfs4_ff_layout_mirror *mirror,
+ const struct pnfs_layout_range *range,
+ const struct cred *mdscred);
bool ff_layout_avoid_mds_available_ds(struct pnfs_layout_segment *lseg);
bool ff_layout_avoid_read_on_rw(struct pnfs_layout_segment *lseg);
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index 11766a74216d..a809989807d6 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -183,56 +183,6 @@ out_err:
return NULL;
}
-static void ff_layout_mark_devid_invalid(struct pnfs_layout_segment *lseg,
- struct nfs4_deviceid_node *devid)
-{
- nfs4_delete_deviceid(devid->ld, devid->nfs_client, &devid->deviceid);
- if (!ff_layout_has_available_ds(lseg))
- pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
- lseg);
-}
-
-static bool ff_layout_mirror_valid(struct pnfs_layout_segment *lseg,
- struct nfs4_ff_layout_mirror *mirror,
- bool create)
-{
- if (mirror == NULL || IS_ERR(mirror->mirror_ds))
- goto outerr;
- if (mirror->mirror_ds == NULL) {
- if (create) {
- struct nfs4_deviceid_node *node;
- struct pnfs_layout_hdr *lh = lseg->pls_layout;
- struct nfs4_ff_layout_ds *mirror_ds = ERR_PTR(-ENODEV);
-
- node = nfs4_find_get_deviceid(NFS_SERVER(lh->plh_inode),
- &mirror->devid, lh->plh_lc_cred,
- GFP_KERNEL);
- if (node)
- mirror_ds = FF_LAYOUT_MIRROR_DS(node);
-
- /* check for race with another call to this function */
- if (cmpxchg(&mirror->mirror_ds, NULL, mirror_ds) &&
- mirror_ds != ERR_PTR(-ENODEV))
- nfs4_put_deviceid_node(node);
- } else
- goto outerr;
- }
-
- if (IS_ERR(mirror->mirror_ds))
- goto outerr;
-
- if (mirror->mirror_ds->ds == NULL) {
- struct nfs4_deviceid_node *devid;
- devid = &mirror->mirror_ds->id_node;
- ff_layout_mark_devid_invalid(lseg, devid);
- return false;
- }
- return true;
-outerr:
- pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, lseg);
- return false;
-}
-
static void extend_ds_error(struct nfs4_ff_layout_ds_err *err,
u64 offset, u64 length)
{
@@ -326,7 +276,6 @@ int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo,
spin_lock(&flo->generic_hdr.plh_inode->i_lock);
ff_layout_add_ds_error_locked(flo, dserr);
spin_unlock(&flo->generic_hdr.plh_inode->i_lock);
-
return 0;
}
@@ -353,46 +302,54 @@ ff_layout_get_mirror_cred(struct nfs4_ff_layout_mirror *mirror, u32 iomode)
}
struct nfs_fh *
-nfs4_ff_layout_select_ds_fh(struct pnfs_layout_segment *lseg, u32 mirror_idx)
+nfs4_ff_layout_select_ds_fh(struct nfs4_ff_layout_mirror *mirror)
{
- struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, mirror_idx);
- struct nfs_fh *fh = NULL;
-
- if (!ff_layout_mirror_valid(lseg, mirror, false)) {
- pr_err_ratelimited("NFS: %s: No data server for mirror offset index %d\n",
- __func__, mirror_idx);
- goto out;
- }
-
/* FIXME: For now assume there is only 1 version available for the DS */
- fh = &mirror->fh_versions[0];
-out:
- return fh;
+ return &mirror->fh_versions[0];
}
-int
-nfs4_ff_layout_select_ds_stateid(struct pnfs_layout_segment *lseg,
- u32 mirror_idx,
- nfs4_stateid *stateid)
+void
+nfs4_ff_layout_select_ds_stateid(const struct nfs4_ff_layout_mirror *mirror,
+ nfs4_stateid *stateid)
{
- struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, mirror_idx);
+ if (nfs4_ff_layout_ds_version(mirror) == 4)
+ nfs4_stateid_copy(stateid, &mirror->stateid);
+}
- if (!ff_layout_mirror_valid(lseg, mirror, false)) {
- pr_err_ratelimited("NFS: %s: No data server for mirror offset index %d\n",
- __func__, mirror_idx);
- goto out;
+static bool
+ff_layout_init_mirror_ds(struct pnfs_layout_hdr *lo,
+ struct nfs4_ff_layout_mirror *mirror)
+{
+ if (mirror == NULL)
+ goto outerr;
+ if (mirror->mirror_ds == NULL) {
+ struct nfs4_deviceid_node *node;
+ struct nfs4_ff_layout_ds *mirror_ds = ERR_PTR(-ENODEV);
+
+ node = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode),
+ &mirror->devid, lo->plh_lc_cred,
+ GFP_KERNEL);
+ if (node)
+ mirror_ds = FF_LAYOUT_MIRROR_DS(node);
+
+ /* check for race with another call to this function */
+ if (cmpxchg(&mirror->mirror_ds, NULL, mirror_ds) &&
+ mirror_ds != ERR_PTR(-ENODEV))
+ nfs4_put_deviceid_node(node);
}
- nfs4_stateid_copy(stateid, &mirror->stateid);
- return 1;
-out:
- return 0;
+ if (IS_ERR(mirror->mirror_ds))
+ goto outerr;
+
+ return true;
+outerr:
+ return false;
}
/**
* nfs4_ff_layout_prepare_ds - prepare a DS connection for an RPC call
* @lseg: the layout segment we're operating on
- * @ds_idx: index of the DS to use
+ * @mirror: layout mirror describing the DS to use
* @fail_return: return layout on connect failure?
*
* Try to prepare a DS connection to accept an RPC call. This involves
@@ -407,26 +364,18 @@ out:
* Returns a pointer to a connected DS object on success or NULL on failure.
*/
struct nfs4_pnfs_ds *
-nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
+nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg,
+ struct nfs4_ff_layout_mirror *mirror,
bool fail_return)
{
- struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
struct nfs4_pnfs_ds *ds = NULL;
- struct nfs4_deviceid_node *devid;
struct inode *ino = lseg->pls_layout->plh_inode;
struct nfs_server *s = NFS_SERVER(ino);
unsigned int max_payload;
int status;
- if (!ff_layout_mirror_valid(lseg, mirror, true)) {
- pr_err_ratelimited("NFS: %s: No data server for offset index %d\n",
- __func__, ds_idx);
- goto out;
- }
-
- devid = &mirror->mirror_ds->id_node;
- if (ff_layout_test_devid_unavailable(devid))
- goto out_fail;
+ if (!ff_layout_init_mirror_ds(lseg->pls_layout, mirror))
+ goto noconnect;
ds = mirror->mirror_ds->ds;
/* matching smp_wmb() in _nfs4_pnfs_v3/4_ds_connect */
@@ -437,8 +386,8 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
/* FIXME: For now we assume the server sent only one version of NFS
* to use for the DS.
*/
- status = nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo,
- dataserver_retrans,
+ status = nfs4_pnfs_ds_connect(s, ds, &mirror->mirror_ds->id_node,
+ dataserver_timeo, dataserver_retrans,
mirror->mirror_ds->ds_versions[0].version,
mirror->mirror_ds->ds_versions[0].minor_version);
@@ -453,11 +402,12 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
mirror->mirror_ds->ds_versions[0].wsize = max_payload;
goto out;
}
-out_fail:
+noconnect:
ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
mirror, lseg->pls_range.offset,
lseg->pls_range.length, NFS4ERR_NXIO,
OP_ILLEGAL, GFP_NOIO);
+ ff_layout_send_layouterror(lseg);
if (fail_return || !ff_layout_has_available_ds(lseg))
pnfs_error_mark_layout_for_return(ino, lseg);
ds = NULL;
@@ -466,14 +416,14 @@ out:
}
const struct cred *
-ff_layout_get_ds_cred(struct pnfs_layout_segment *lseg, u32 ds_idx,
+ff_layout_get_ds_cred(struct nfs4_ff_layout_mirror *mirror,
+ const struct pnfs_layout_range *range,
const struct cred *mdscred)
{
- struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
const struct cred *cred;
if (mirror && !mirror->mirror_ds->ds_versions[0].tightly_coupled) {
- cred = ff_layout_get_mirror_cred(mirror, lseg->pls_range.iomode);
+ cred = ff_layout_get_mirror_cred(mirror, range->iomode);
if (!cred)
cred = get_cred(mdscred);
} else {
@@ -483,15 +433,18 @@ ff_layout_get_ds_cred(struct pnfs_layout_segment *lseg, u32 ds_idx,
}
/**
-* Find or create a DS rpc client with th MDS server rpc client auth flavor
-* in the nfs_client cl_ds_clients list.
-*/
+ * nfs4_ff_find_or_create_ds_client - Find or create a DS rpc client
+ * @mirror: pointer to the mirror
+ * @ds_clp: nfs_client for the DS
+ * @inode: pointer to inode
+ *
+ * Find or create a DS rpc client with th MDS server rpc client auth flavor
+ * in the nfs_client cl_ds_clients list.
+ */
struct rpc_clnt *
-nfs4_ff_find_or_create_ds_client(struct pnfs_layout_segment *lseg, u32 ds_idx,
+nfs4_ff_find_or_create_ds_client(struct nfs4_ff_layout_mirror *mirror,
struct nfs_client *ds_clp, struct inode *inode)
{
- struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
-
switch (mirror->mirror_ds->ds_versions[0].version) {
case 3:
/* For NFSv3 DS, flavor is set when creating DS connections */
@@ -608,7 +561,7 @@ static bool ff_read_layout_has_available_ds(struct pnfs_layout_segment *lseg)
if (IS_ERR(mirror->mirror_ds))
continue;
devid = &mirror->mirror_ds->id_node;
- if (!ff_layout_test_devid_unavailable(devid))
+ if (!nfs4_test_deviceid_unavailable(devid))
return true;
}
}
@@ -629,7 +582,7 @@ static bool ff_rw_layout_has_available_ds(struct pnfs_layout_segment *lseg)
if (!mirror->mirror_ds)
continue;
devid = &mirror->mirror_ds->id_node;
- if (ff_layout_test_devid_unavailable(devid))
+ if (nfs4_test_deviceid_unavailable(devid))
return false;
}
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 094775ea0781..414a90d48493 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -143,6 +143,7 @@ EXPORT_SYMBOL_GPL(nfs_sync_inode);
/**
* nfs_sync_mapping - helper to flush all mmapped dirty data to disk
+ * @mapping: pointer to struct address_space
*/
int nfs_sync_mapping(struct address_space *mapping)
{
@@ -1184,8 +1185,8 @@ int nfs_attribute_cache_expired(struct inode *inode)
/**
* nfs_revalidate_inode - Revalidate the inode attributes
- * @server - pointer to nfs_server struct
- * @inode - pointer to inode struct
+ * @server: pointer to nfs_server struct
+ * @inode: pointer to inode struct
*
* Updates inode attribute information by retrieving the data from the server.
*/
@@ -1255,8 +1256,8 @@ out:
/**
* nfs_revalidate_mapping - Revalidate the pagecache
- * @inode - pointer to host inode
- * @mapping - pointer to mapping
+ * @inode: pointer to host inode
+ * @mapping: pointer to mapping
*/
int nfs_revalidate_mapping(struct inode *inode,
struct address_space *mapping)
@@ -1371,8 +1372,8 @@ static void nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
/**
* nfs_check_inode_attributes - verify consistency of the inode attribute cache
- * @inode - pointer to inode
- * @fattr - updated attributes
+ * @inode: pointer to inode
+ * @fattr: updated attributes
*
* Verifies the attribute cache. If we have just changed the attributes,
* so that fattr carries weak cache consistency data, then it may
@@ -1572,8 +1573,8 @@ EXPORT_SYMBOL_GPL(_nfs_display_fhandle);
/**
* nfs_inode_attrs_need_update - check if the inode attributes need updating
- * @inode - pointer to inode
- * @fattr - attributes
+ * @inode: pointer to inode
+ * @fattr: attributes
*
* Attempt to divine whether or not an RPC call reply carrying stale
* attributes got scheduled after another call carrying updated ones.
@@ -1614,8 +1615,8 @@ static int nfs_refresh_inode_locked(struct inode *inode, struct nfs_fattr *fattr
/**
* nfs_refresh_inode - try to update the inode attribute cache
- * @inode - pointer to inode
- * @fattr - updated attributes
+ * @inode: pointer to inode
+ * @fattr: updated attributes
*
* Check that an RPC call that returned attributes has not overlapped with
* other recent updates of the inode metadata, then decide whether it is
@@ -1649,8 +1650,8 @@ static int nfs_post_op_update_inode_locked(struct inode *inode,
/**
* nfs_post_op_update_inode - try to update the inode attribute cache
- * @inode - pointer to inode
- * @fattr - updated attributes
+ * @inode: pointer to inode
+ * @fattr: updated attributes
*
* After an operation that has changed the inode metadata, mark the
* attribute cache as being invalid, then try to update it.
@@ -1679,8 +1680,8 @@ EXPORT_SYMBOL_GPL(nfs_post_op_update_inode);
/**
* nfs_post_op_update_inode_force_wcc_locked - update the inode attribute cache
- * @inode - pointer to inode
- * @fattr - updated attributes
+ * @inode: pointer to inode
+ * @fattr: updated attributes
*
* After an operation that has changed the inode metadata, mark the
* attribute cache as being invalid, then try to update it. Fake up
@@ -1731,8 +1732,8 @@ out_noforce:
/**
* nfs_post_op_update_inode_force_wcc - try to update the inode attribute cache
- * @inode - pointer to inode
- * @fattr - updated attributes
+ * @inode: pointer to inode
+ * @fattr: updated attributes
*
* After an operation that has changed the inode metadata, mark the
* attribute cache as being invalid, then try to update it. Fake up
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index b1e577302518..c7cf23ae6597 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -69,7 +69,8 @@ struct nfs_clone_mount {
* Maximum number of pages that readdir can use for creating
* a vmapped array of pages.
*/
-#define NFS_MAX_READDIR_PAGES 8
+#define NFS_MAX_READDIR_PAGES 64
+#define NFS_MAX_READDIR_RAPAGES 8
struct nfs_client_initdata {
unsigned long init_flags;
@@ -755,6 +756,7 @@ static inline bool nfs_error_is_fatal(int err)
{
switch (err) {
case -ERESTARTSYS:
+ case -EINTR:
case -EACCES:
case -EDQUOT:
case -EFBIG:
@@ -763,6 +765,7 @@ static inline bool nfs_error_is_fatal(int err)
case -EROFS:
case -ESTALE:
case -E2BIG:
+ case -ENOMEM:
return true;
default:
return false;
diff --git a/fs/nfs/io.c b/fs/nfs/io.c
index 9034b4926909..5088fda9b453 100644
--- a/fs/nfs/io.c
+++ b/fs/nfs/io.c
@@ -25,7 +25,7 @@ static void nfs_block_o_direct(struct nfs_inode *nfsi, struct inode *inode)
/**
* nfs_start_io_read - declare the file is being used for buffered reads
- * @inode - file inode
+ * @inode: file inode
*
* Declare that a buffered read operation is about to start, and ensure
* that we block all direct I/O.
@@ -56,7 +56,7 @@ nfs_start_io_read(struct inode *inode)
/**
* nfs_end_io_read - declare that the buffered read operation is done
- * @inode - file inode
+ * @inode: file inode
*
* Declare that a buffered read operation is done, and release the shared
* lock on inode->i_rwsem.
@@ -69,7 +69,7 @@ nfs_end_io_read(struct inode *inode)
/**
* nfs_start_io_write - declare the file is being used for buffered writes
- * @inode - file inode
+ * @inode: file inode
*
* Declare that a buffered read operation is about to start, and ensure
* that we block all direct I/O.
@@ -83,7 +83,7 @@ nfs_start_io_write(struct inode *inode)
/**
* nfs_end_io_write - declare that the buffered write operation is done
- * @inode - file inode
+ * @inode: file inode
*
* Declare that a buffered write operation is done, and release the
* lock on inode->i_rwsem.
@@ -105,7 +105,7 @@ static void nfs_block_buffered(struct nfs_inode *nfsi, struct inode *inode)
/**
* nfs_end_io_direct - declare the file is being used for direct i/o
- * @inode - file inode
+ * @inode: file inode
*
* Declare that a direct I/O operation is about to start, and ensure
* that we block all buffered I/O.
@@ -136,7 +136,7 @@ nfs_start_io_direct(struct inode *inode)
/**
* nfs_end_io_direct - declare that the direct i/o operation is done
- * @inode - file inode
+ * @inode: file inode
*
* Declare that a direct I/O operation is done, and release the shared
* lock on inode->i_rwsem.
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index e5686be67be8..15f099a24c29 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -221,10 +221,10 @@ static struct vfsmount *nfs_do_clone_mount(struct nfs_server *server,
/**
* nfs_do_submount - set up mountpoint when crossing a filesystem boundary
- * @dentry - parent directory
- * @fh - filehandle for new root dentry
- * @fattr - attributes for new root inode
- * @authflavor - security flavor to use when performing the mount
+ * @dentry: parent directory
+ * @fh: filehandle for new root dentry
+ * @fattr: attributes for new root inode
+ * @authflavor: security flavor to use when performing the mount
*
*/
struct vfsmount *nfs_do_submount(struct dentry *dentry, struct nfs_fh *fh,
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index 350675e3ed47..a7ed29de0a40 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -22,6 +22,7 @@
#include <linux/nfs.h>
#include <linux/nfs2.h>
#include <linux/nfs_fs.h>
+#include "nfstrace.h"
#include "internal.h"
#define NFSDBG_FACILITY NFSDBG_XDR
@@ -55,42 +56,16 @@
#define NFS_attrstat_sz (1+NFS_fattr_sz)
#define NFS_diropres_sz (1+NFS_fhandle_sz+NFS_fattr_sz)
-#define NFS_readlinkres_sz (2)
-#define NFS_readres_sz (1+NFS_fattr_sz+1)
+#define NFS_readlinkres_sz (2+1)
+#define NFS_readres_sz (1+NFS_fattr_sz+1+1)
#define NFS_writeres_sz (NFS_attrstat_sz)
#define NFS_stat_sz (1)
-#define NFS_readdirres_sz (1)
+#define NFS_readdirres_sz (1+1)
#define NFS_statfsres_sz (1+NFS_info_sz)
static int nfs_stat_to_errno(enum nfs_stat);
/*
- * While encoding arguments, set up the reply buffer in advance to
- * receive reply data directly into the page cache.
- */
-static void prepare_reply_buffer(struct rpc_rqst *req, struct page **pages,
- unsigned int base, unsigned int len,
- unsigned int bufsize)
-{
- struct rpc_auth *auth = req->rq_cred->cr_auth;
- unsigned int replen;
-
- replen = RPC_REPHDRSIZE + auth->au_rslack + bufsize;
- xdr_inline_pages(&req->rq_rcv_buf, replen << 2, pages, base, len);
-}
-
-/*
- * Handle decode buffer overflows out-of-line.
- */
-static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
-{
- dprintk("NFS: %s prematurely hit the end of our receive buffer. "
- "Remaining buffer length is %tu words.\n",
- func, xdr->end - xdr->p);
-}
-
-
-/*
* Encode/decode NFSv2 basic data types
*
* Basic NFSv2 data types are defined in section 2.3 of RFC 1094:
@@ -110,8 +85,8 @@ static int decode_nfsdata(struct xdr_stream *xdr, struct nfs_pgio_res *result)
__be32 *p;
p = xdr_inline_decode(xdr, 4);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EIO;
count = be32_to_cpup(p);
recvd = xdr_read_pages(xdr, count);
if (unlikely(count > recvd))
@@ -125,9 +100,6 @@ out_cheating:
"count %u > recvd %u\n", count, recvd);
count = recvd;
goto out;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
/*
@@ -157,13 +129,16 @@ static int decode_stat(struct xdr_stream *xdr, enum nfs_stat *status)
__be32 *p;
p = xdr_inline_decode(xdr, 4);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EIO;
+ if (unlikely(*p != cpu_to_be32(NFS_OK)))
+ goto out_status;
+ *status = 0;
+ return 0;
+out_status:
*status = be32_to_cpup(p);
+ trace_nfs_xdr_status((int)*status);
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
/*
@@ -205,14 +180,11 @@ static int decode_fhandle(struct xdr_stream *xdr, struct nfs_fh *fh)
__be32 *p;
p = xdr_inline_decode(xdr, NFS2_FHSIZE);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EIO;
fh->size = NFS2_FHSIZE;
memcpy(fh->data, p, NFS2_FHSIZE);
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
/*
@@ -282,8 +254,8 @@ static int decode_fattr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
__be32 *p;
p = xdr_inline_decode(xdr, NFS_fattr_sz << 2);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EIO;
fattr->valid |= NFS_ATTR_FATTR_V2;
@@ -325,9 +297,6 @@ out_uid:
out_gid:
dprintk("NFS: returned invalid gid\n");
return -EINVAL;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
/*
@@ -416,23 +385,20 @@ static int decode_filename_inline(struct xdr_stream *xdr,
u32 count;
p = xdr_inline_decode(xdr, 4);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EIO;
count = be32_to_cpup(p);
if (count > NFS3_MAXNAMLEN)
goto out_nametoolong;
p = xdr_inline_decode(xdr, count);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EIO;
*name = (const char *)p;
*length = count;
return 0;
out_nametoolong:
dprintk("NFS: returned filename too long: %u\n", count);
return -ENAMETOOLONG;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
/*
@@ -455,8 +421,8 @@ static int decode_path(struct xdr_stream *xdr)
__be32 *p;
p = xdr_inline_decode(xdr, 4);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EIO;
length = be32_to_cpup(p);
if (unlikely(length >= xdr->buf->page_len || length > NFS_MAXPATHLEN))
goto out_size;
@@ -472,9 +438,6 @@ out_cheating:
dprintk("NFS: server cheating in pathname result: "
"length %u > received %u\n", length, recvd);
return -EIO;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
/*
@@ -615,8 +578,8 @@ static void nfs2_xdr_enc_readlinkargs(struct rpc_rqst *req,
const struct nfs_readlinkargs *args = data;
encode_fhandle(xdr, args->fh);
- prepare_reply_buffer(req, args->pages, args->pgbase,
- args->pglen, NFS_readlinkres_sz);
+ rpc_prepare_reply_pages(req, args->pages, args->pgbase,
+ args->pglen, NFS_readlinkres_sz);
}
/*
@@ -651,8 +614,8 @@ static void nfs2_xdr_enc_readargs(struct rpc_rqst *req,
const struct nfs_pgio_args *args = data;
encode_readargs(xdr, args);
- prepare_reply_buffer(req, args->pages, args->pgbase,
- args->count, NFS_readres_sz);
+ rpc_prepare_reply_pages(req, args->pages, args->pgbase,
+ args->count, NFS_readres_sz);
req->rq_rcv_buf.flags |= XDRBUF_READ;
}
@@ -809,8 +772,8 @@ static void nfs2_xdr_enc_readdirargs(struct rpc_rqst *req,
const struct nfs_readdirargs *args = data;
encode_readdirargs(xdr, args);
- prepare_reply_buffer(req, args->pages, 0,
- args->count, NFS_readdirres_sz);
+ rpc_prepare_reply_pages(req, args->pages, 0,
+ args->count, NFS_readdirres_sz);
}
/*
@@ -951,12 +914,12 @@ int nfs2_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
int error;
p = xdr_inline_decode(xdr, 4);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EAGAIN;
if (*p++ == xdr_zero) {
p = xdr_inline_decode(xdr, 4);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EAGAIN;
if (*p++ == xdr_zero)
return -EAGAIN;
entry->eof = 1;
@@ -964,8 +927,8 @@ int nfs2_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
}
p = xdr_inline_decode(xdr, 4);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EAGAIN;
entry->ino = be32_to_cpup(p);
error = decode_filename_inline(xdr, &entry->name, &entry->len);
@@ -978,17 +941,13 @@ int nfs2_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
*/
entry->prev_cookie = entry->cookie;
p = xdr_inline_decode(xdr, 4);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EAGAIN;
entry->cookie = be32_to_cpup(p);
entry->d_type = DT_UNKNOWN;
return 0;
-
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EAGAIN;
}
/*
@@ -1052,17 +1011,14 @@ static int decode_info(struct xdr_stream *xdr, struct nfs2_fsstat *result)
__be32 *p;
p = xdr_inline_decode(xdr, NFS_info_sz << 2);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EIO;
result->tsize = be32_to_cpup(p++);
result->bsize = be32_to_cpup(p++);
result->blocks = be32_to_cpup(p++);
result->bfree = be32_to_cpup(p++);
result->bavail = be32_to_cpup(p);
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int nfs2_xdr_dec_statfsres(struct rpc_rqst *req, struct xdr_stream *xdr,
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
index 9fce18548f7e..c5c3fc6e6c60 100644
--- a/fs/nfs/nfs3acl.c
+++ b/fs/nfs/nfs3acl.c
@@ -222,8 +222,6 @@ static int __nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
switch (status) {
case 0:
status = nfs_refresh_inode(inode, fattr);
- set_cached_acl(inode, ACL_TYPE_ACCESS, acl);
- set_cached_acl(inode, ACL_TYPE_DEFAULT, dfacl);
break;
case -EPFNOSUPPORT:
case -EPROTONOSUPPORT:
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index 78df4eb60f85..110358f4986d 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -21,6 +21,7 @@
#include <linux/nfs3.h>
#include <linux/nfs_fs.h>
#include <linux/nfsacl.h>
+#include "nfstrace.h"
#include "internal.h"
#define NFSDBG_FACILITY NFSDBG_XDR
@@ -68,13 +69,13 @@
#define NFS3_removeres_sz (NFS3_setattrres_sz)
#define NFS3_lookupres_sz (1+NFS3_fh_sz+(2 * NFS3_post_op_attr_sz))
#define NFS3_accessres_sz (1+NFS3_post_op_attr_sz+1)
-#define NFS3_readlinkres_sz (1+NFS3_post_op_attr_sz+1)
-#define NFS3_readres_sz (1+NFS3_post_op_attr_sz+3)
+#define NFS3_readlinkres_sz (1+NFS3_post_op_attr_sz+1+1)
+#define NFS3_readres_sz (1+NFS3_post_op_attr_sz+3+1)
#define NFS3_writeres_sz (1+NFS3_wcc_data_sz+4)
#define NFS3_createres_sz (1+NFS3_fh_sz+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
#define NFS3_renameres_sz (1+(2 * NFS3_wcc_data_sz))
#define NFS3_linkres_sz (1+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
-#define NFS3_readdirres_sz (1+NFS3_post_op_attr_sz+2)
+#define NFS3_readdirres_sz (1+NFS3_post_op_attr_sz+2+1)
#define NFS3_fsstatres_sz (1+NFS3_post_op_attr_sz+13)
#define NFS3_fsinfores_sz (1+NFS3_post_op_attr_sz+12)
#define NFS3_pathconfres_sz (1+NFS3_post_op_attr_sz+6)
@@ -84,7 +85,7 @@
#define ACL3_setaclargs_sz (NFS3_fh_sz+1+ \
XDR_QUADLEN(NFS_ACL_INLINE_BUFSIZE))
#define ACL3_getaclres_sz (1+NFS3_post_op_attr_sz+1+ \
- XDR_QUADLEN(NFS_ACL_INLINE_BUFSIZE))
+ XDR_QUADLEN(NFS_ACL_INLINE_BUFSIZE)+1)
#define ACL3_setaclres_sz (1+NFS3_post_op_attr_sz)
static int nfs3_stat_to_errno(enum nfs_stat);
@@ -104,32 +105,6 @@ static const umode_t nfs_type2fmt[] = {
};
/*
- * While encoding arguments, set up the reply buffer in advance to
- * receive reply data directly into the page cache.
- */
-static void prepare_reply_buffer(struct rpc_rqst *req, struct page **pages,
- unsigned int base, unsigned int len,
- unsigned int bufsize)
-{
- struct rpc_auth *auth = req->rq_cred->cr_auth;
- unsigned int replen;
-
- replen = RPC_REPHDRSIZE + auth->au_rslack + bufsize;
- xdr_inline_pages(&req->rq_rcv_buf, replen << 2, pages, base, len);
-}
-
-/*
- * Handle decode buffer overflows out-of-line.
- */
-static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
-{
- dprintk("NFS: %s prematurely hit the end of our receive buffer. "
- "Remaining buffer length is %tu words.\n",
- func, xdr->end - xdr->p);
-}
-
-
-/*
* Encode/decode NFSv3 basic data types
*
* Basic NFSv3 data types are defined in section 2.5 of RFC 1813:
@@ -151,13 +126,10 @@ static int decode_uint32(struct xdr_stream *xdr, u32 *value)
__be32 *p;
p = xdr_inline_decode(xdr, 4);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EIO;
*value = be32_to_cpup(p);
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_uint64(struct xdr_stream *xdr, u64 *value)
@@ -165,13 +137,10 @@ static int decode_uint64(struct xdr_stream *xdr, u64 *value)
__be32 *p;
p = xdr_inline_decode(xdr, 8);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EIO;
xdr_decode_hyper(p, value);
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
/*
@@ -211,14 +180,14 @@ static int decode_inline_filename3(struct xdr_stream *xdr,
u32 count;
p = xdr_inline_decode(xdr, 4);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EIO;
count = be32_to_cpup(p);
if (count > NFS3_MAXNAMLEN)
goto out_nametoolong;
p = xdr_inline_decode(xdr, count);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EIO;
*name = (const char *)p;
*length = count;
return 0;
@@ -226,9 +195,6 @@ static int decode_inline_filename3(struct xdr_stream *xdr,
out_nametoolong:
dprintk("NFS: returned filename too long: %u\n", count);
return -ENAMETOOLONG;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
/*
@@ -249,8 +215,8 @@ static int decode_nfspath3(struct xdr_stream *xdr)
__be32 *p;
p = xdr_inline_decode(xdr, 4);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EIO;
count = be32_to_cpup(p);
if (unlikely(count >= xdr->buf->page_len || count > NFS3_MAXPATHLEN))
goto out_nametoolong;
@@ -267,9 +233,6 @@ out_cheating:
dprintk("NFS: server cheating in pathname result: "
"count %u > recvd %u\n", count, recvd);
return -EIO;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
/*
@@ -303,13 +266,10 @@ static int decode_cookieverf3(struct xdr_stream *xdr, __be32 *verifier)
__be32 *p;
p = xdr_inline_decode(xdr, NFS3_COOKIEVERFSIZE);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EIO;
memcpy(verifier, p, NFS3_COOKIEVERFSIZE);
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
/*
@@ -330,13 +290,10 @@ static int decode_writeverf3(struct xdr_stream *xdr, struct nfs_write_verifier *
__be32 *p;
p = xdr_inline_decode(xdr, NFS3_WRITEVERFSIZE);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EIO;
memcpy(verifier->data, p, NFS3_WRITEVERFSIZE);
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
/*
@@ -364,13 +321,16 @@ static int decode_nfsstat3(struct xdr_stream *xdr, enum nfs_stat *status)
__be32 *p;
p = xdr_inline_decode(xdr, 4);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EIO;
+ if (unlikely(*p != cpu_to_be32(NFS3_OK)))
+ goto out_status;
+ *status = 0;
+ return 0;
+out_status:
*status = be32_to_cpup(p);
+ trace_nfs_xdr_status((int)*status);
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
/*
@@ -453,23 +413,20 @@ static int decode_nfs_fh3(struct xdr_stream *xdr, struct nfs_fh *fh)
__be32 *p;
p = xdr_inline_decode(xdr, 4);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EIO;
length = be32_to_cpup(p++);
if (unlikely(length > NFS3_FHSIZE))
goto out_toobig;
p = xdr_inline_decode(xdr, length);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EIO;
fh->size = length;
memcpy(fh->data, p, length);
return 0;
out_toobig:
dprintk("NFS: file handle size (%u) too big\n", length);
return -E2BIG;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static void zero_nfs_fh3(struct nfs_fh *fh)
@@ -655,8 +612,8 @@ static int decode_fattr3(struct xdr_stream *xdr, struct nfs_fattr *fattr)
__be32 *p;
p = xdr_inline_decode(xdr, NFS3_fattr_sz << 2);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EIO;
p = xdr_decode_ftype3(p, &fmode);
@@ -690,9 +647,6 @@ out_uid:
out_gid:
dprintk("NFS: returned invalid gid\n");
return -EINVAL;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
/*
@@ -710,14 +664,11 @@ static int decode_post_op_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
__be32 *p;
p = xdr_inline_decode(xdr, 4);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EIO;
if (*p != xdr_zero)
return decode_fattr3(xdr, fattr);
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
/*
@@ -733,8 +684,8 @@ static int decode_wcc_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
__be32 *p;
p = xdr_inline_decode(xdr, NFS3_wcc_attr_sz << 2);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EIO;
fattr->valid |= NFS_ATTR_FATTR_PRESIZE
| NFS_ATTR_FATTR_PRECHANGE
@@ -747,9 +698,6 @@ static int decode_wcc_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
fattr->pre_change_attr = nfs_timespec_to_change_attr(&fattr->pre_ctime);
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
/*
@@ -773,14 +721,11 @@ static int decode_pre_op_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
__be32 *p;
p = xdr_inline_decode(xdr, 4);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EIO;
if (*p != xdr_zero)
return decode_wcc_attr(xdr, fattr);
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_wcc_data(struct xdr_stream *xdr, struct nfs_fattr *fattr)
@@ -808,15 +753,12 @@ out:
static int decode_post_op_fh3(struct xdr_stream *xdr, struct nfs_fh *fh)
{
__be32 *p = xdr_inline_decode(xdr, 4);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EIO;
if (*p != xdr_zero)
return decode_nfs_fh3(xdr, fh);
zero_nfs_fh3(fh);
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
/*
@@ -953,8 +895,8 @@ static void nfs3_xdr_enc_readlink3args(struct rpc_rqst *req,
const struct nfs3_readlinkargs *args = data;
encode_nfs_fh3(xdr, args->fh);
- prepare_reply_buffer(req, args->pages, args->pgbase,
- args->pglen, NFS3_readlinkres_sz);
+ rpc_prepare_reply_pages(req, args->pages, args->pgbase,
+ args->pglen, NFS3_readlinkres_sz);
}
/*
@@ -986,8 +928,8 @@ static void nfs3_xdr_enc_read3args(struct rpc_rqst *req,
unsigned int replen = args->replen ? args->replen : NFS3_readres_sz;
encode_read3args(xdr, args);
- prepare_reply_buffer(req, args->pages, args->pgbase,
- args->count, replen);
+ rpc_prepare_reply_pages(req, args->pages, args->pgbase,
+ args->count, replen);
req->rq_rcv_buf.flags |= XDRBUF_READ;
}
@@ -1279,7 +1221,7 @@ static void nfs3_xdr_enc_readdir3args(struct rpc_rqst *req,
const struct nfs3_readdirargs *args = data;
encode_readdir3args(xdr, args);
- prepare_reply_buffer(req, args->pages, 0,
+ rpc_prepare_reply_pages(req, args->pages, 0,
args->count, NFS3_readdirres_sz);
}
@@ -1321,7 +1263,7 @@ static void nfs3_xdr_enc_readdirplus3args(struct rpc_rqst *req,
const struct nfs3_readdirargs *args = data;
encode_readdirplus3args(xdr, args);
- prepare_reply_buffer(req, args->pages, 0,
+ rpc_prepare_reply_pages(req, args->pages, 0,
args->count, NFS3_readdirres_sz);
}
@@ -1366,7 +1308,7 @@ static void nfs3_xdr_enc_getacl3args(struct rpc_rqst *req,
encode_nfs_fh3(xdr, args->fh);
encode_uint32(xdr, args->mask);
if (args->mask & (NFS_ACL | NFS_DFACL)) {
- prepare_reply_buffer(req, args->pages, 0,
+ rpc_prepare_reply_pages(req, args->pages, 0,
NFSACL_MAXPAGES << PAGE_SHIFT,
ACL3_getaclres_sz);
req->rq_rcv_buf.flags |= XDRBUF_SPARSE_PAGES;
@@ -1643,8 +1585,8 @@ static int decode_read3resok(struct xdr_stream *xdr,
__be32 *p;
p = xdr_inline_decode(xdr, 4 + 4 + 4);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EIO;
count = be32_to_cpup(p++);
eof = be32_to_cpup(p++);
ocount = be32_to_cpup(p++);
@@ -1667,9 +1609,6 @@ out_cheating:
count = recvd;
eof = 0;
goto out;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr,
@@ -1690,7 +1629,7 @@ static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr,
result->op_status = status;
if (status != NFS3_OK)
goto out_status;
- result->replen = 3 + ((xdr_stream_pos(xdr) - pos) >> 2);
+ result->replen = 4 + ((xdr_stream_pos(xdr) - pos) >> 2);
error = decode_read3resok(xdr, result);
out:
return error;
@@ -1731,22 +1670,18 @@ static int decode_write3resok(struct xdr_stream *xdr,
__be32 *p;
p = xdr_inline_decode(xdr, 4 + 4);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EIO;
result->count = be32_to_cpup(p++);
result->verf->committed = be32_to_cpup(p++);
if (unlikely(result->verf->committed > NFS_FILE_SYNC))
goto out_badvalue;
if (decode_writeverf3(xdr, &result->verf->verifier))
- goto out_eio;
+ return -EIO;
return result->count;
out_badvalue:
dprintk("NFS: bad stable_how value: %u\n", result->verf->committed);
return -EIO;
-out_overflow:
- print_overflow_msg(__func__, xdr);
-out_eio:
- return -EIO;
}
static int nfs3_xdr_dec_write3res(struct rpc_rqst *req, struct xdr_stream *xdr,
@@ -2010,12 +1945,12 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
u64 new_cookie;
p = xdr_inline_decode(xdr, 4);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EAGAIN;
if (*p == xdr_zero) {
p = xdr_inline_decode(xdr, 4);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EAGAIN;
if (*p == xdr_zero)
return -EAGAIN;
entry->eof = 1;
@@ -2051,8 +1986,8 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
/* In fact, a post_op_fh3: */
p = xdr_inline_decode(xdr, 4);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EAGAIN;
if (*p != xdr_zero) {
error = decode_nfs_fh3(xdr, entry->fh);
if (unlikely(error)) {
@@ -2069,9 +2004,6 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EAGAIN;
out_truncated:
dprintk("NFS: directory entry contains invalid file handle\n");
*entry = old;
@@ -2183,8 +2115,8 @@ static int decode_fsstat3resok(struct xdr_stream *xdr,
__be32 *p;
p = xdr_inline_decode(xdr, 8 * 6 + 4);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EIO;
p = xdr_decode_size3(p, &result->tbytes);
p = xdr_decode_size3(p, &result->fbytes);
p = xdr_decode_size3(p, &result->abytes);
@@ -2193,9 +2125,6 @@ static int decode_fsstat3resok(struct xdr_stream *xdr,
xdr_decode_size3(p, &result->afiles);
/* ignore invarsec */
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int nfs3_xdr_dec_fsstat3res(struct rpc_rqst *req,
@@ -2255,8 +2184,8 @@ static int decode_fsinfo3resok(struct xdr_stream *xdr,
__be32 *p;
p = xdr_inline_decode(xdr, 4 * 7 + 8 + 8 + 4);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EIO;
result->rtmax = be32_to_cpup(p++);
result->rtpref = be32_to_cpup(p++);
result->rtmult = be32_to_cpup(p++);
@@ -2270,9 +2199,6 @@ static int decode_fsinfo3resok(struct xdr_stream *xdr,
/* ignore properties */
result->lease_time = 0;
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int nfs3_xdr_dec_fsinfo3res(struct rpc_rqst *req,
@@ -2328,15 +2254,12 @@ static int decode_pathconf3resok(struct xdr_stream *xdr,
__be32 *p;
p = xdr_inline_decode(xdr, 4 * 6);
- if (unlikely(p == NULL))
- goto out_overflow;
+ if (unlikely(!p))
+ return -EIO;
result->max_link = be32_to_cpup(p++);
result->max_namelen = be32_to_cpup(p);
/* ignore remaining fields */
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int nfs3_xdr_dec_pathconf3res(struct rpc_rqst *req,
diff --git a/fs/nfs/nfs42.h b/fs/nfs/nfs42.h
index 19ec38f85ce0..901cca7542f9 100644
--- a/fs/nfs/nfs42.h
+++ b/fs/nfs/nfs42.h
@@ -20,5 +20,8 @@ loff_t nfs42_proc_llseek(struct file *, loff_t, int);
int nfs42_proc_layoutstats_generic(struct nfs_server *,
struct nfs42_layoutstat_data *);
int nfs42_proc_clone(struct file *, struct file *, loff_t, loff_t, loff_t);
+int nfs42_proc_layouterror(struct pnfs_layout_segment *lseg,
+ const struct nfs42_layout_error *errors,
+ size_t n);
#endif /* __LINUX_FS_NFS_NFS4_2_H */
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index fed06fd9998d..5196bfa7894d 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -329,9 +329,6 @@ ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src,
};
ssize_t err, err2;
- if (!nfs_server_capable(file_inode(dst), NFS_CAP_COPY))
- return -EOPNOTSUPP;
-
src_lock = nfs_get_lock_context(nfs_file_open_context(src));
if (IS_ERR(src_lock))
return PTR_ERR(src_lock);
@@ -672,6 +669,170 @@ int nfs42_proc_layoutstats_generic(struct nfs_server *server,
return 0;
}
+static struct nfs42_layouterror_data *
+nfs42_alloc_layouterror_data(struct pnfs_layout_segment *lseg, gfp_t gfp_flags)
+{
+ struct nfs42_layouterror_data *data;
+ struct inode *inode = lseg->pls_layout->plh_inode;
+
+ data = kzalloc(sizeof(*data), gfp_flags);
+ if (data) {
+ data->args.inode = data->inode = nfs_igrab_and_active(inode);
+ if (data->inode) {
+ data->lseg = pnfs_get_lseg(lseg);
+ if (data->lseg)
+ return data;
+ nfs_iput_and_deactive(data->inode);
+ }
+ kfree(data);
+ }
+ return NULL;
+}
+
+static void
+nfs42_free_layouterror_data(struct nfs42_layouterror_data *data)
+{
+ pnfs_put_lseg(data->lseg);
+ nfs_iput_and_deactive(data->inode);
+ kfree(data);
+}
+
+static void
+nfs42_layouterror_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs42_layouterror_data *data = calldata;
+ struct inode *inode = data->inode;
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct pnfs_layout_hdr *lo = data->lseg->pls_layout;
+ unsigned i;
+
+ spin_lock(&inode->i_lock);
+ if (!pnfs_layout_is_valid(lo)) {
+ spin_unlock(&inode->i_lock);
+ rpc_exit(task, 0);
+ return;
+ }
+ for (i = 0; i < data->args.num_errors; i++)
+ nfs4_stateid_copy(&data->args.errors[i].stateid,
+ &lo->plh_stateid);
+ spin_unlock(&inode->i_lock);
+ nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
+ &data->res.seq_res, task);
+}
+
+static void
+nfs42_layouterror_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs42_layouterror_data *data = calldata;
+ struct inode *inode = data->inode;
+ struct pnfs_layout_hdr *lo = data->lseg->pls_layout;
+
+ if (!nfs4_sequence_done(task, &data->res.seq_res))
+ return;
+
+ switch (task->tk_status) {
+ case 0:
+ break;
+ case -NFS4ERR_BADHANDLE:
+ case -ESTALE:
+ pnfs_destroy_layout(NFS_I(inode));
+ break;
+ case -NFS4ERR_EXPIRED:
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_DELEG_REVOKED:
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_BAD_STATEID:
+ spin_lock(&inode->i_lock);
+ if (pnfs_layout_is_valid(lo) &&
+ nfs4_stateid_match(&data->args.errors[0].stateid,
+ &lo->plh_stateid)) {
+ LIST_HEAD(head);
+
+ /*
+ * Mark the bad layout state as invalid, then retry
+ * with the current stateid.
+ */
+ pnfs_mark_layout_stateid_invalid(lo, &head);
+ spin_unlock(&inode->i_lock);
+ pnfs_free_lseg_list(&head);
+ nfs_commit_inode(inode, 0);
+ } else
+ spin_unlock(&inode->i_lock);
+ break;
+ case -NFS4ERR_OLD_STATEID:
+ spin_lock(&inode->i_lock);
+ if (pnfs_layout_is_valid(lo) &&
+ nfs4_stateid_match_other(&data->args.errors[0].stateid,
+ &lo->plh_stateid)) {
+ /* Do we need to delay before resending? */
+ if (!nfs4_stateid_is_newer(&lo->plh_stateid,
+ &data->args.errors[0].stateid))
+ rpc_delay(task, HZ);
+ rpc_restart_call_prepare(task);
+ }
+ spin_unlock(&inode->i_lock);
+ break;
+ case -ENOTSUPP:
+ case -EOPNOTSUPP:
+ NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTERROR;
+ }
+}
+
+static void
+nfs42_layouterror_release(void *calldata)
+{
+ struct nfs42_layouterror_data *data = calldata;
+
+ nfs42_free_layouterror_data(data);
+}
+
+static const struct rpc_call_ops nfs42_layouterror_ops = {
+ .rpc_call_prepare = nfs42_layouterror_prepare,
+ .rpc_call_done = nfs42_layouterror_done,
+ .rpc_release = nfs42_layouterror_release,
+};
+
+int nfs42_proc_layouterror(struct pnfs_layout_segment *lseg,
+ const struct nfs42_layout_error *errors, size_t n)
+{
+ struct inode *inode = lseg->pls_layout->plh_inode;
+ struct nfs42_layouterror_data *data;
+ struct rpc_task *task;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTERROR],
+ };
+ struct rpc_task_setup task_setup = {
+ .rpc_message = &msg,
+ .callback_ops = &nfs42_layouterror_ops,
+ .flags = RPC_TASK_ASYNC,
+ };
+ unsigned int i;
+
+ if (!nfs_server_capable(inode, NFS_CAP_LAYOUTERROR))
+ return -EOPNOTSUPP;
+ if (n > NFS42_LAYOUTERROR_MAX)
+ return -EINVAL;
+ data = nfs42_alloc_layouterror_data(lseg, GFP_NOFS);
+ if (!data)
+ return -ENOMEM;
+ for (i = 0; i < n; i++) {
+ data->args.errors[i] = errors[i];
+ data->args.num_errors++;
+ data->res.num_errors++;
+ }
+ msg.rpc_argp = &data->args;
+ msg.rpc_resp = &data->res;
+ task_setup.callback_data = data;
+ task_setup.rpc_client = NFS_SERVER(inode)->client;
+ nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0);
+ task = rpc_run_task(&task_setup);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ rpc_put_task(task);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nfs42_proc_layouterror);
+
static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
struct file *dst_f, struct nfs_lock_context *src_lock,
struct nfs_lock_context *dst_lock, loff_t src_offset,
diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
index 69f72ed2bf87..aed865a84629 100644
--- a/fs/nfs/nfs42xdr.c
+++ b/fs/nfs/nfs42xdr.c
@@ -51,6 +51,15 @@
1 /* opaque devaddr4 length */ + \
XDR_QUADLEN(PNFS_LAYOUTSTATS_MAXSIZE))
#define decode_layoutstats_maxsz (op_decode_hdr_maxsz)
+#define encode_device_error_maxsz (XDR_QUADLEN(NFS4_DEVICEID4_SIZE) + \
+ 1 /* status */ + 1 /* opnum */)
+#define encode_layouterror_maxsz (op_decode_hdr_maxsz + \
+ 2 /* offset */ + \
+ 2 /* length */ + \
+ encode_stateid_maxsz + \
+ 1 /* Array size */ + \
+ encode_device_error_maxsz)
+#define decode_layouterror_maxsz (op_decode_hdr_maxsz)
#define encode_clone_maxsz (encode_stateid_maxsz + \
encode_stateid_maxsz + \
2 /* src offset */ + \
@@ -59,43 +68,53 @@
#define decode_clone_maxsz (op_decode_hdr_maxsz)
#define NFS4_enc_allocate_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_allocate_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_allocate_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_allocate_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_copy_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_savefh_maxsz + \
encode_putfh_maxsz + \
encode_copy_maxsz + \
encode_commit_maxsz)
#define NFS4_dec_copy_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_savefh_maxsz + \
decode_putfh_maxsz + \
decode_copy_maxsz + \
decode_commit_maxsz)
#define NFS4_enc_offload_cancel_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_offload_cancel_maxsz)
#define NFS4_dec_offload_cancel_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_offload_cancel_maxsz)
#define NFS4_enc_deallocate_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_deallocate_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_deallocate_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_deallocate_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_seek_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_seek_maxsz)
#define NFS4_dec_seek_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_seek_maxsz)
#define NFS4_enc_layoutstats_sz (compound_encode_hdr_maxsz + \
@@ -106,6 +125,16 @@
decode_sequence_maxsz + \
decode_putfh_maxsz + \
PNFS_LAYOUTSTATS_MAXDEV * decode_layoutstats_maxsz)
+#define NFS4_enc_layouterror_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ NFS42_LAYOUTERROR_MAX * \
+ encode_layouterror_maxsz)
+#define NFS4_dec_layouterror_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ NFS42_LAYOUTERROR_MAX * \
+ decode_layouterror_maxsz)
#define NFS4_enc_clone_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
@@ -223,6 +252,34 @@ static void encode_clone(struct xdr_stream *xdr,
xdr_encode_hyper(p, args->count);
}
+static void encode_device_error(struct xdr_stream *xdr,
+ const struct nfs42_device_error *error)
+{
+ __be32 *p;
+
+ p = reserve_space(xdr, NFS4_DEVICEID4_SIZE + 2*4);
+ p = xdr_encode_opaque_fixed(p, error->dev_id.data,
+ NFS4_DEVICEID4_SIZE);
+ *p++ = cpu_to_be32(error->status);
+ *p = cpu_to_be32(error->opnum);
+}
+
+static void encode_layouterror(struct xdr_stream *xdr,
+ const struct nfs42_layout_error *args,
+ struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ encode_op_hdr(xdr, OP_LAYOUTERROR, decode_layouterror_maxsz, hdr);
+ p = reserve_space(xdr, 8 + 8);
+ p = xdr_encode_hyper(p, args->offset);
+ p = xdr_encode_hyper(p, args->length);
+ encode_nfs4_stateid(xdr, &args->stateid);
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(1);
+ encode_device_error(xdr, &args->errors[0]);
+}
+
/*
* Encode ALLOCATE request
*/
@@ -381,6 +438,27 @@ static void nfs4_xdr_enc_clone(struct rpc_rqst *req,
encode_nops(&hdr);
}
+/*
+ * Encode LAYOUTERROR request
+ */
+static void nfs4_xdr_enc_layouterror(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const void *data)
+{
+ const struct nfs42_layouterror_args *args = data;
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+ int i;
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, NFS_FH(args->inode), &hdr);
+ for (i = 0; i < args->num_errors; i++)
+ encode_layouterror(xdr, &args->errors[i], &hdr);
+ encode_nops(&hdr);
+}
+
static int decode_allocate(struct xdr_stream *xdr, struct nfs42_falloc_res *res)
{
return decode_op_hdr(xdr, OP_ALLOCATE);
@@ -394,7 +472,7 @@ static int decode_write_response(struct xdr_stream *xdr,
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
count = be32_to_cpup(p);
if (count > 1)
return -EREMOTEIO;
@@ -402,18 +480,14 @@ static int decode_write_response(struct xdr_stream *xdr,
status = decode_opaque_fixed(xdr, &res->stateid,
NFS4_STATEID_SIZE);
if (unlikely(status))
- goto out_overflow;
+ return -EIO;
}
p = xdr_inline_decode(xdr, 8 + 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
p = xdr_decode_hyper(p, &res->count);
res->verifier.committed = be32_to_cpup(p);
return decode_verifier(xdr, &res->verifier.verifier);
-
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_copy_requirements(struct xdr_stream *xdr,
@@ -422,14 +496,11 @@ static int decode_copy_requirements(struct xdr_stream *xdr,
p = xdr_inline_decode(xdr, 4 + 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
res->consecutive = be32_to_cpup(p++);
res->synchronous = be32_to_cpup(p++);
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_copy(struct xdr_stream *xdr, struct nfs42_copy_res *res)
@@ -474,15 +545,11 @@ static int decode_seek(struct xdr_stream *xdr, struct nfs42_seek_res *res)
p = xdr_inline_decode(xdr, 4 + 8);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
res->sr_eof = be32_to_cpup(p++);
p = xdr_decode_hyper(p, &res->sr_offset);
return 0;
-
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_layoutstats(struct xdr_stream *xdr)
@@ -495,6 +562,11 @@ static int decode_clone(struct xdr_stream *xdr)
return decode_op_hdr(xdr, OP_CLONE);
}
+static int decode_layouterror(struct xdr_stream *xdr)
+{
+ return decode_op_hdr(xdr, OP_LAYOUTERROR);
+}
+
/*
* Decode ALLOCATE request
*/
@@ -704,4 +776,30 @@ out:
return status;
}
+/*
+ * Decode LAYOUTERROR request
+ */
+static int nfs4_xdr_dec_layouterror(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ void *data)
+{
+ struct nfs42_layouterror_res *res = data;
+ struct compound_hdr hdr;
+ int status, i;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+
+ for (i = 0; i < res->num_errors && status == 0; i++)
+ status = decode_layouterror(xdr);
+out:
+ res->rpc_status = status;
+ return status;
+}
+
#endif /* __LINUX_FS_NFS_NFS4_2XDR_H */
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 2548405da1f7..1339ede979af 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -42,7 +42,7 @@ static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion)
}
#ifdef CONFIG_NFS_V4_1
-/**
+/*
* Per auth flavor data server rpc clients
*/
struct nfs4_ds_server {
@@ -51,7 +51,9 @@ struct nfs4_ds_server {
};
/**
- * Common lookup case for DS I/O
+ * nfs4_find_ds_client - Common lookup case for DS I/O
+ * @ds_clp: pointer to the DS's nfs_client
+ * @flavor: rpc auth flavour to match
*/
static struct nfs4_ds_server *
nfs4_find_ds_client(struct nfs_client *ds_clp, rpc_authflavor_t flavor)
@@ -118,9 +120,13 @@ nfs4_free_ds_server(struct nfs4_ds_server *dss)
}
/**
-* Find or create a DS rpc client with th MDS server rpc client auth flavor
-* in the nfs_client cl_ds_clients list.
-*/
+ * nfs4_find_or_create_ds_client - Find or create a DS rpc client
+ * @ds_clp: pointer to the DS's nfs_client
+ * @inode: pointer to the inode
+ *
+ * Find or create a DS rpc client with th MDS server rpc client auth flavor
+ * in the nfs_client cl_ds_clients list.
+ */
struct rpc_clnt *
nfs4_find_or_create_ds_client(struct nfs_client *ds_clp, struct inode *inode)
{
@@ -145,7 +151,6 @@ static void
nfs4_shutdown_ds_clients(struct nfs_client *clp)
{
struct nfs4_ds_server *dss;
- LIST_HEAD(shutdown_list);
while (!list_empty(&clp->cl_ds_clients)) {
dss = list_entry(clp->cl_ds_clients.next,
@@ -284,7 +289,7 @@ static int nfs4_init_callback(struct nfs_client *clp)
/**
* nfs40_init_client - nfs_client initialization tasks for NFSv4.0
- * @clp - nfs_client to initialize
+ * @clp: nfs_client to initialize
*
* Returns zero on success, or a negative errno if some error occurred.
*/
@@ -312,7 +317,7 @@ int nfs40_init_client(struct nfs_client *clp)
/**
* nfs41_init_client - nfs_client initialization tasks for NFSv4.1+
- * @clp - nfs_client to initialize
+ * @clp: nfs_client to initialize
*
* Returns zero on success, or a negative errno if some error occurred.
*/
@@ -360,9 +365,7 @@ static int nfs4_init_client_minor_version(struct nfs_client *clp)
* nfs4_init_client - Initialise an NFS4 client record
*
* @clp: nfs_client to initialise
- * @timeparms: timeout parameters for underlying RPC transport
- * @ip_addr: callback IP address in presentation format
- * @authflavor: authentication flavor for underlying RPC transport
+ * @cl_init: pointer to nfs_client_initdata
*
* Returns pointer to an NFS client, or an ERR_PTR value.
*/
@@ -649,13 +652,13 @@ nfs4_check_server_scope(struct nfs41_server_scope *s1,
/**
* nfs4_detect_session_trunking - Checks for session trunking.
- *
- * Called after a successful EXCHANGE_ID on a multi-addr connection.
- * Upon success, add the transport.
- *
* @clp: original mount nfs_client
* @res: result structure from an exchange_id using the original mount
* nfs_client with a new multi_addr transport
+ * @xprt: pointer to the transport to add.
+ *
+ * Called after a successful EXCHANGE_ID on a multi-addr connection.
+ * Upon success, add the transport.
*
* Returns zero on success, otherwise -EINVAL
*
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 45b2322e092d..00d17198ee12 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -133,8 +133,10 @@ static ssize_t nfs4_copy_file_range(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
size_t count, unsigned int flags)
{
+ if (!nfs_server_capable(file_inode(file_out), NFS_CAP_COPY))
+ return -EOPNOTSUPP;
if (file_inode(file_in) == file_inode(file_out))
- return -EINVAL;
+ return -EOPNOTSUPP;
return nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count);
}
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index 24f06dcc2b08..2e460c33ae48 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -137,6 +137,7 @@ static size_t nfs_parse_server_name(char *string, size_t len,
/**
* nfs_find_best_sec - Find a security mechanism supported locally
+ * @clnt: pointer to rpc_clnt
* @server: NFS server struct
* @flavors: List of security tuples returned by SECINFO procedure
*
@@ -288,8 +289,8 @@ static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
/**
* nfs_follow_referral - set up mountpoint when hitting a referral on moved error
- * @dentry - parent directory
- * @locations - array of NFSv4 server location information
+ * @dentry: parent directory
+ * @locations: array of NFSv4 server location information
*
*/
static struct vfsmount *nfs_follow_referral(struct dentry *dentry,
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 557a5d636183..741ff8c9c6ed 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -730,33 +730,41 @@ static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
res->sr_slot = NULL;
}
+static void nfs4_slot_sequence_record_sent(struct nfs4_slot *slot,
+ u32 seqnr)
+{
+ if ((s32)(seqnr - slot->seq_nr_highest_sent) > 0)
+ slot->seq_nr_highest_sent = seqnr;
+}
+static void nfs4_slot_sequence_acked(struct nfs4_slot *slot,
+ u32 seqnr)
+{
+ slot->seq_nr_highest_sent = seqnr;
+ slot->seq_nr_last_acked = seqnr;
+}
+
static int nfs41_sequence_process(struct rpc_task *task,
struct nfs4_sequence_res *res)
{
struct nfs4_session *session;
struct nfs4_slot *slot = res->sr_slot;
struct nfs_client *clp;
- bool interrupted = false;
int ret = 1;
if (slot == NULL)
goto out_noaction;
/* don't increment the sequence number if the task wasn't sent */
- if (!RPC_WAS_SENT(task))
+ if (!RPC_WAS_SENT(task) || slot->seq_done)
goto out;
session = slot->table->session;
- if (slot->interrupted) {
- if (res->sr_status != -NFS4ERR_DELAY)
- slot->interrupted = 0;
- interrupted = true;
- }
-
trace_nfs4_sequence_done(session, res);
/* Check the SEQUENCE operation status */
switch (res->sr_status) {
case 0:
+ /* Mark this sequence number as having been acked */
+ nfs4_slot_sequence_acked(slot, slot->seq_nr);
/* Update the slot's sequence and clientid lease timer */
slot->seq_done = 1;
clp = session->clp;
@@ -771,9 +779,9 @@ static int nfs41_sequence_process(struct rpc_task *task,
* sr_status remains 1 if an RPC level error occurred.
* The server may or may not have processed the sequence
* operation..
- * Mark the slot as having hosted an interrupted RPC call.
*/
- slot->interrupted = 1;
+ nfs4_slot_sequence_record_sent(slot, slot->seq_nr);
+ slot->seq_done = 1;
goto out;
case -NFS4ERR_DELAY:
/* The server detected a resend of the RPC call and
@@ -784,6 +792,7 @@ static int nfs41_sequence_process(struct rpc_task *task,
__func__,
slot->slot_nr,
slot->seq_nr);
+ nfs4_slot_sequence_acked(slot, slot->seq_nr);
goto out_retry;
case -NFS4ERR_RETRY_UNCACHED_REP:
case -NFS4ERR_SEQ_FALSE_RETRY:
@@ -791,6 +800,7 @@ static int nfs41_sequence_process(struct rpc_task *task,
* The server thinks we tried to replay a request.
* Retry the call after bumping the sequence ID.
*/
+ nfs4_slot_sequence_acked(slot, slot->seq_nr);
goto retry_new_seq;
case -NFS4ERR_BADSLOT:
/*
@@ -801,21 +811,28 @@ static int nfs41_sequence_process(struct rpc_task *task,
goto session_recover;
goto retry_nowait;
case -NFS4ERR_SEQ_MISORDERED:
+ nfs4_slot_sequence_record_sent(slot, slot->seq_nr);
/*
- * Was the last operation on this sequence interrupted?
- * If so, retry after bumping the sequence number.
- */
- if (interrupted)
- goto retry_new_seq;
- /*
- * Could this slot have been previously retired?
- * If so, then the server may be expecting seq_nr = 1!
+ * Were one or more calls using this slot interrupted?
+ * If the server never received the request, then our
+ * transmitted slot sequence number may be too high.
*/
- if (slot->seq_nr != 1) {
- slot->seq_nr = 1;
+ if ((s32)(slot->seq_nr - slot->seq_nr_last_acked) > 1) {
+ slot->seq_nr--;
goto retry_nowait;
}
- goto session_recover;
+ /*
+ * RFC5661:
+ * A retry might be sent while the original request is
+ * still in progress on the replier. The replier SHOULD
+ * deal with the issue by returning NFS4ERR_DELAY as the
+ * reply to SEQUENCE or CB_SEQUENCE operation, but
+ * implementations MAY return NFS4ERR_SEQ_MISORDERED.
+ *
+ * Restart the search after a delay.
+ */
+ slot->seq_nr = slot->seq_nr_highest_sent;
+ goto out_retry;
default:
/* Just update the slot sequence no. */
slot->seq_done = 1;
@@ -906,17 +923,6 @@ static const struct rpc_call_ops nfs41_call_sync_ops = {
.rpc_call_done = nfs41_call_sync_done,
};
-static void
-nfs4_sequence_process_interrupted(struct nfs_client *client,
- struct nfs4_slot *slot, const struct cred *cred)
-{
- struct rpc_task *task;
-
- task = _nfs41_proc_sequence(client, cred, slot, true);
- if (!IS_ERR(task))
- rpc_put_task_async(task);
-}
-
#else /* !CONFIG_NFS_V4_1 */
static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
@@ -937,16 +943,15 @@ int nfs4_sequence_done(struct rpc_task *task,
}
EXPORT_SYMBOL_GPL(nfs4_sequence_done);
-static void
-nfs4_sequence_process_interrupted(struct nfs_client *client,
- struct nfs4_slot *slot, const struct cred *cred)
+#endif /* !CONFIG_NFS_V4_1 */
+
+static void nfs41_sequence_res_init(struct nfs4_sequence_res *res)
{
- WARN_ON_ONCE(1);
- slot->interrupted = 0;
+ res->sr_timestamp = jiffies;
+ res->sr_status_flags = 0;
+ res->sr_status = 1;
}
-#endif /* !CONFIG_NFS_V4_1 */
-
static
void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
@@ -958,10 +963,6 @@ void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args,
args->sa_slot = slot;
res->sr_slot = slot;
- res->sr_timestamp = jiffies;
- res->sr_status_flags = 0;
- res->sr_status = 1;
-
}
int nfs4_setup_sequence(struct nfs_client *client,
@@ -982,31 +983,25 @@ int nfs4_setup_sequence(struct nfs_client *client,
task->tk_timeout = 0;
}
- for (;;) {
- spin_lock(&tbl->slot_tbl_lock);
- /* The state manager will wait until the slot table is empty */
- if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
- goto out_sleep;
-
- slot = nfs4_alloc_slot(tbl);
- if (IS_ERR(slot)) {
- /* Try again in 1/4 second */
- if (slot == ERR_PTR(-ENOMEM))
- task->tk_timeout = HZ >> 2;
- goto out_sleep;
- }
- spin_unlock(&tbl->slot_tbl_lock);
+ spin_lock(&tbl->slot_tbl_lock);
+ /* The state manager will wait until the slot table is empty */
+ if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
+ goto out_sleep;
- if (likely(!slot->interrupted))
- break;
- nfs4_sequence_process_interrupted(client,
- slot, task->tk_msg.rpc_cred);
+ slot = nfs4_alloc_slot(tbl);
+ if (IS_ERR(slot)) {
+ /* Try again in 1/4 second */
+ if (slot == ERR_PTR(-ENOMEM))
+ task->tk_timeout = HZ >> 2;
+ goto out_sleep;
}
+ spin_unlock(&tbl->slot_tbl_lock);
nfs4_sequence_attach_slot(args, res, slot);
trace_nfs4_setup_sequence(session, args);
out_start:
+ nfs41_sequence_res_init(res);
rpc_call_start(task);
return 0;
@@ -1555,6 +1550,10 @@ static void nfs_clear_open_stateid(struct nfs4_state *state,
static void nfs_set_open_stateid_locked(struct nfs4_state *state,
const nfs4_stateid *stateid, nfs4_stateid *freeme)
+ __must_hold(&state->owner->so_lock)
+ __must_hold(&state->seqlock)
+ __must_hold(RCU)
+
{
DEFINE_WAIT(wait);
int status = 0;
@@ -2934,7 +2933,8 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
}
out:
- nfs4_sequence_free_slot(&opendata->o_res.seq_res);
+ if (!opendata->cancelled)
+ nfs4_sequence_free_slot(&opendata->o_res.seq_res);
return ret;
}
@@ -5963,7 +5963,7 @@ out:
/**
* nfs4_proc_setclientid_confirm - Confirm client ID
* @clp: state data structure
- * @res: result of a previous SETCLIENTID
+ * @arg: result of a previous SETCLIENTID
* @cred: credential to use for this call
*
* Returns zero, a negative errno, or a negative NFS4ERR status code.
@@ -6302,7 +6302,6 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
p->arg.seqid = seqid;
p->res.seqid = seqid;
p->lsp = lsp;
- refcount_inc(&lsp->ls_count);
/* Ensure we don't close file until we're done freeing locks! */
p->ctx = get_nfs_open_context(ctx);
p->l_ctx = nfs_get_lock_context(ctx);
@@ -6527,7 +6526,6 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
p->res.lock_seqid = p->arg.lock_seqid;
p->lsp = lsp;
p->server = server;
- refcount_inc(&lsp->ls_count);
p->ctx = get_nfs_open_context(ctx);
locks_init_lock(&p->fl);
locks_copy_lock(&p->fl, fl);
@@ -7527,7 +7525,7 @@ int nfs4_proc_fsid_present(struct inode *inode, const struct cred *cred)
return status;
}
-/**
+/*
* If 'use_integrity' is true and the state managment nfs_client
* cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient
* and the machine credential as per RFC3530bis and RFC5661 Security
@@ -8937,10 +8935,12 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout)
if (status != 0)
goto out;
- /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
- if (task->tk_status < 0 || lgp->res.layoutp->len == 0) {
+ if (task->tk_status < 0) {
status = nfs4_layoutget_handle_exception(task, lgp, &exception);
*timeout = exception.timeout;
+ } else if (lgp->res.layoutp->len == 0) {
+ status = -EAGAIN;
+ *timeout = nfs4_update_delay(&exception.timeout);
} else
lseg = pnfs_layout_process(lgp);
out:
@@ -9219,7 +9219,7 @@ nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
return status;
}
-/**
+/*
* Use the state managment nfs_client cl_rpcclient, which uses krb5i (if
* possible) as per RFC3530bis and RFC5661 Security Considerations sections
*/
@@ -9484,7 +9484,7 @@ static const struct rpc_call_ops nfs41_free_stateid_ops = {
* @server: server / transport on which to perform the operation
* @stateid: state ID to release
* @cred: credential
- * @is_recovery: set to true if this call needs to be privileged
+ * @privileged: set to true if this call needs to be privileged
*
* Note: this function is always asynchronous.
*/
@@ -9691,7 +9691,8 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
| NFS_CAP_DEALLOCATE
| NFS_CAP_SEEK
| NFS_CAP_LAYOUTSTATS
- | NFS_CAP_CLONE,
+ | NFS_CAP_CLONE
+ | NFS_CAP_LAYOUTERROR,
.init_client = nfs41_init_client,
.shutdown_client = nfs41_shutdown_client,
.match_stateid = nfs41_match_stateid,
diff --git a/fs/nfs/nfs4session.c b/fs/nfs/nfs4session.c
index a5489d70a724..bcb532def9e2 100644
--- a/fs/nfs/nfs4session.c
+++ b/fs/nfs/nfs4session.c
@@ -55,7 +55,7 @@ static void nfs4_shrink_slot_table(struct nfs4_slot_table *tbl, u32 newsize)
/**
* nfs4_slot_tbl_drain_complete - wake waiters when drain is complete
- * @tbl - controlling slot table
+ * @tbl: controlling slot table
*
*/
void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl)
@@ -110,6 +110,8 @@ static struct nfs4_slot *nfs4_new_slot(struct nfs4_slot_table *tbl,
slot->table = tbl;
slot->slot_nr = slotid;
slot->seq_nr = seq_init;
+ slot->seq_nr_highest_sent = seq_init;
+ slot->seq_nr_last_acked = seq_init - 1;
}
return slot;
}
@@ -276,7 +278,8 @@ static void nfs4_reset_slot_table(struct nfs4_slot_table *tbl,
p = &tbl->slots;
while (*p) {
(*p)->seq_nr = ivalue;
- (*p)->interrupted = 0;
+ (*p)->seq_nr_highest_sent = ivalue;
+ (*p)->seq_nr_last_acked = ivalue - 1;
p = &(*p)->next;
}
tbl->highest_used_slotid = NFS4_NO_SLOT;
diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h
index 3c550f297561..b996ee23f1ba 100644
--- a/fs/nfs/nfs4session.h
+++ b/fs/nfs/nfs4session.h
@@ -10,7 +10,7 @@
/* maximum number of slots to use */
#define NFS4_DEF_SLOT_TABLE_SIZE (64U)
-#define NFS4_DEF_CB_SLOT_TABLE_SIZE (1U)
+#define NFS4_DEF_CB_SLOT_TABLE_SIZE (16U)
#define NFS4_MAX_SLOT_TABLE (1024U)
#define NFS4_NO_SLOT ((u32)-1)
@@ -23,8 +23,9 @@ struct nfs4_slot {
unsigned long generation;
u32 slot_nr;
u32 seq_nr;
- unsigned int interrupted : 1,
- privileged : 1,
+ u32 seq_nr_last_acked;
+ u32 seq_nr_highest_sent;
+ unsigned int privileged : 1,
seq_done : 1;
};
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 02488b50534a..3de36479ed7a 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -563,6 +563,7 @@ static void nfs4_gc_state_owners(struct nfs_server *server)
* nfs4_get_state_owner - Look up a state owner given a credential
* @server: nfs_server to search
* @cred: RPC credential to match
+ * @gfp_flags: allocation mode
*
* Returns a pointer to an instantiated nfs4_state_owner struct, or NULL.
*/
diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
index b4557cf685fb..cd1a5c08da9a 100644
--- a/fs/nfs/nfs4trace.h
+++ b/fs/nfs/nfs4trace.h
@@ -524,6 +524,31 @@ TRACE_EVENT(nfs4_setup_sequence,
)
);
+TRACE_EVENT(nfs4_xdr_status,
+ TP_PROTO(
+ u32 op,
+ int error
+ ),
+
+ TP_ARGS(op, error),
+
+ TP_STRUCT__entry(
+ __field(u32, op)
+ __field(int, error)
+ ),
+
+ TP_fast_assign(
+ __entry->op = op;
+ __entry->error = -error;
+ ),
+
+ TP_printk(
+ "operation %d: nfs status %d (%s)",
+ __entry->op,
+ __entry->error, show_nfsv4_errors(__entry->error)
+ )
+);
+
DECLARE_EVENT_CLASS(nfs4_open_event,
TP_PROTO(
const struct nfs_open_context *ctx,
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 2fc8f6fa25e4..602446158bfb 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -54,6 +54,7 @@
#include <linux/nfs_fs.h>
#include "nfs4_fs.h"
+#include "nfs4trace.h"
#include "internal.h"
#include "nfs4idmap.h"
#include "nfs4session.h"
@@ -214,14 +215,14 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
nfs4_fattr_bitmap_maxsz)
#define encode_read_maxsz (op_encode_hdr_maxsz + \
encode_stateid_maxsz + 3)
-#define decode_read_maxsz (op_decode_hdr_maxsz + 2)
+#define decode_read_maxsz (op_decode_hdr_maxsz + 2 + 1)
#define encode_readdir_maxsz (op_encode_hdr_maxsz + \
2 + encode_verifier_maxsz + 5 + \
nfs4_label_maxsz)
#define decode_readdir_maxsz (op_decode_hdr_maxsz + \
- decode_verifier_maxsz)
+ decode_verifier_maxsz + 1)
#define encode_readlink_maxsz (op_encode_hdr_maxsz)
-#define decode_readlink_maxsz (op_decode_hdr_maxsz + 1)
+#define decode_readlink_maxsz (op_decode_hdr_maxsz + 1 + 1)
#define encode_write_maxsz (op_encode_hdr_maxsz + \
encode_stateid_maxsz + 4)
#define decode_write_maxsz (op_decode_hdr_maxsz + \
@@ -283,14 +284,14 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
#define decode_delegreturn_maxsz (op_decode_hdr_maxsz)
#define encode_getacl_maxsz (encode_getattr_maxsz)
#define decode_getacl_maxsz (op_decode_hdr_maxsz + \
- nfs4_fattr_bitmap_maxsz + 1)
+ nfs4_fattr_bitmap_maxsz + 1 + 1)
#define encode_setacl_maxsz (op_encode_hdr_maxsz + \
encode_stateid_maxsz + 3)
#define decode_setacl_maxsz (decode_setattr_maxsz)
#define encode_fs_locations_maxsz \
(encode_getattr_maxsz)
#define decode_fs_locations_maxsz \
- (0)
+ (1)
#define encode_secinfo_maxsz (op_encode_hdr_maxsz + nfs4_name_maxsz)
#define decode_secinfo_maxsz (op_decode_hdr_maxsz + 1 + ((NFS_MAX_SECFLAVORS * (16 + GSS_OID_MAX_LEN)) / 4))
@@ -391,12 +392,13 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
1 /* opaque devaddr4 length */ + \
/* devaddr4 payload is read into page */ \
1 /* notification bitmap length */ + \
- 1 /* notification bitmap, word 0 */)
+ 1 /* notification bitmap, word 0 */ + \
+ 1 /* possible XDR padding */)
#define encode_layoutget_maxsz (op_encode_hdr_maxsz + 10 + \
encode_stateid_maxsz)
#define decode_layoutget_maxsz (op_decode_hdr_maxsz + 8 + \
decode_stateid_maxsz + \
- XDR_QUADLEN(PNFS_LAYOUT_MAXSIZE))
+ XDR_QUADLEN(PNFS_LAYOUT_MAXSIZE) + 1)
#define encode_layoutcommit_maxsz (op_encode_hdr_maxsz + \
2 /* offset */ + \
2 /* length */ + \
@@ -1015,12 +1017,11 @@ static void encode_compound_hdr(struct xdr_stream *xdr,
struct compound_hdr *hdr)
{
__be32 *p;
- struct rpc_auth *auth = req->rq_cred->cr_auth;
/* initialize running count of expected bytes in reply.
* NOTE: the replied tag SHOULD be the same is the one sent,
* but this is not required as a MUST for the server to do so. */
- hdr->replen = RPC_REPHDRSIZE + auth->au_rslack + 3 + hdr->taglen;
+ hdr->replen = 3 + hdr->taglen;
WARN_ON_ONCE(hdr->taglen > NFS4_MAXTAGLEN);
encode_string(xdr, hdr->taglen, hdr->tag);
@@ -2340,9 +2341,9 @@ static void nfs4_xdr_enc_open(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_getfattr_open(xdr, args->bitmask, args->open_bitmap, &hdr);
if (args->lg_args) {
encode_layoutget(xdr, args->lg_args, &hdr);
- xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2,
- args->lg_args->layout.pages,
- 0, args->lg_args->layout.pglen);
+ rpc_prepare_reply_pages(req, args->lg_args->layout.pages, 0,
+ args->lg_args->layout.pglen,
+ hdr.replen);
}
encode_nops(&hdr);
}
@@ -2386,9 +2387,9 @@ static void nfs4_xdr_enc_open_noattr(struct rpc_rqst *req,
encode_getfattr_open(xdr, args->bitmask, args->open_bitmap, &hdr);
if (args->lg_args) {
encode_layoutget(xdr, args->lg_args, &hdr);
- xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2,
- args->lg_args->layout.pages,
- 0, args->lg_args->layout.pglen);
+ rpc_prepare_reply_pages(req, args->lg_args->layout.pages, 0,
+ args->lg_args->layout.pglen,
+ hdr.replen);
}
encode_nops(&hdr);
}
@@ -2498,8 +2499,8 @@ static void nfs4_xdr_enc_readlink(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_putfh(xdr, args->fh, &hdr);
encode_readlink(xdr, args, req, &hdr);
- xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages,
- args->pgbase, args->pglen);
+ rpc_prepare_reply_pages(req, args->pages, args->pgbase,
+ args->pglen, hdr.replen);
encode_nops(&hdr);
}
@@ -2519,11 +2520,8 @@ static void nfs4_xdr_enc_readdir(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_putfh(xdr, args->fh, &hdr);
encode_readdir(xdr, args, req, &hdr);
- xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages,
- args->pgbase, args->count);
- dprintk("%s: inlined page args = (%u, %p, %u, %u)\n",
- __func__, hdr.replen << 2, args->pages,
- args->pgbase, args->count);
+ rpc_prepare_reply_pages(req, args->pages, args->pgbase,
+ args->count, hdr.replen);
encode_nops(&hdr);
}
@@ -2543,8 +2541,8 @@ static void nfs4_xdr_enc_read(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_putfh(xdr, args->fh, &hdr);
encode_read(xdr, args, &hdr);
- xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2,
- args->pages, args->pgbase, args->count);
+ rpc_prepare_reply_pages(req, args->pages, args->pgbase,
+ args->count, hdr.replen);
req->rq_rcv_buf.flags |= XDRBUF_READ;
encode_nops(&hdr);
}
@@ -2590,9 +2588,8 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_getattr(xdr, nfs4_acl_bitmap, NULL,
ARRAY_SIZE(nfs4_acl_bitmap), &hdr);
- xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
- args->acl_pages, 0, args->acl_len);
-
+ rpc_prepare_reply_pages(req, args->acl_pages, 0,
+ args->acl_len, replen + 1);
encode_nops(&hdr);
}
@@ -2813,9 +2810,8 @@ static void nfs4_xdr_enc_fs_locations(struct rpc_rqst *req,
encode_fs_locations(xdr, args->bitmask, &hdr);
}
- /* Set up reply kvec to capture returned fs_locations array. */
- xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
- (struct page **)&args->page, 0, PAGE_SIZE);
+ rpc_prepare_reply_pages(req, (struct page **)&args->page, 0,
+ PAGE_SIZE, replen + 1);
encode_nops(&hdr);
}
@@ -3017,10 +3013,8 @@ static void nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req,
/* set up reply kvec. Subtract notification bitmap max size (2)
* so that notification bitmap is put in xdr_buf tail */
- xdr_inline_pages(&req->rq_rcv_buf, (hdr.replen - 2) << 2,
- args->pdev->pages, args->pdev->pgbase,
- args->pdev->pglen);
-
+ rpc_prepare_reply_pages(req, args->pdev->pages, args->pdev->pgbase,
+ args->pdev->pglen, hdr.replen - 2);
encode_nops(&hdr);
}
@@ -3041,9 +3035,8 @@ static void nfs4_xdr_enc_layoutget(struct rpc_rqst *req,
encode_putfh(xdr, NFS_FH(args->inode), &hdr);
encode_layoutget(xdr, args, &hdr);
- xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2,
- args->layout.pages, 0, args->layout.pglen);
-
+ rpc_prepare_reply_pages(req, args->layout.pages, 0,
+ args->layout.pglen, hdr.replen);
encode_nops(&hdr);
}
@@ -3144,22 +3137,12 @@ static void nfs4_xdr_enc_free_stateid(struct rpc_rqst *req,
}
#endif /* CONFIG_NFS_V4_1 */
-static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
-{
- dprintk("nfs: %s: prematurely hit end of receive buffer. "
- "Remaining buffer length is %tu words.\n",
- func, xdr->end - xdr->p);
-}
-
static int decode_opaque_inline(struct xdr_stream *xdr, unsigned int *len, char **string)
{
ssize_t ret = xdr_stream_decode_opaque_inline(xdr, (void **)string,
NFS4_OPAQUE_LIMIT);
- if (unlikely(ret < 0)) {
- if (ret == -EBADMSG)
- print_overflow_msg(__func__, xdr);
+ if (unlikely(ret < 0))
return -EIO;
- }
*len = ret;
return 0;
}
@@ -3170,22 +3153,19 @@ static int decode_compound_hdr(struct xdr_stream *xdr, struct compound_hdr *hdr)
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
hdr->status = be32_to_cpup(p++);
hdr->taglen = be32_to_cpup(p);
p = xdr_inline_decode(xdr, hdr->taglen + 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
hdr->tag = (char *)p;
p += XDR_QUADLEN(hdr->taglen);
hdr->nops = be32_to_cpup(p);
if (unlikely(hdr->nops < 1))
return nfs4_stat_to_errno(hdr->status);
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static bool __decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected,
@@ -3201,11 +3181,14 @@ static bool __decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected,
opnum = be32_to_cpup(p++);
if (unlikely(opnum != expected))
goto out_bad_operation;
+ if (unlikely(*p != cpu_to_be32(NFS_OK)))
+ goto out_status;
+ *nfs_retval = 0;
+ return true;
+out_status:
nfserr = be32_to_cpup(p);
- if (nfserr == NFS_OK)
- *nfs_retval = 0;
- else
- *nfs_retval = nfs4_stat_to_errno(nfserr);
+ trace_nfs4_xdr_status(opnum, nfserr);
+ *nfs_retval = nfs4_stat_to_errno(nfserr);
return true;
out_bad_operation:
dprintk("nfs: Server returned operation"
@@ -3214,7 +3197,6 @@ out_bad_operation:
*nfs_retval = -EREMOTEIO;
return false;
out_overflow:
- print_overflow_msg(__func__, xdr);
*nfs_retval = -EIO;
return false;
}
@@ -3235,10 +3217,9 @@ static int decode_ace(struct xdr_stream *xdr, void *ace)
char *str;
p = xdr_inline_decode(xdr, 12);
- if (likely(p))
- return decode_opaque_inline(xdr, &strlen, &str);
- print_overflow_msg(__func__, xdr);
- return -EIO;
+ if (unlikely(!p))
+ return -EIO;
+ return decode_opaque_inline(xdr, &strlen, &str);
}
static ssize_t
@@ -3249,10 +3230,9 @@ decode_bitmap4(struct xdr_stream *xdr, uint32_t *bitmap, size_t sz)
ret = xdr_stream_decode_uint32_array(xdr, bitmap, sz);
if (likely(ret >= 0))
return ret;
- if (ret == -EMSGSIZE)
- return sz;
- print_overflow_msg(__func__, xdr);
- return -EIO;
+ if (ret != -EMSGSIZE)
+ return -EIO;
+ return sz;
}
static int decode_attr_bitmap(struct xdr_stream *xdr, uint32_t *bitmap)
@@ -3268,13 +3248,10 @@ static int decode_attr_length(struct xdr_stream *xdr, uint32_t *attrlen, unsigne
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
*attrlen = be32_to_cpup(p);
*savep = xdr_stream_pos(xdr);
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_attr_supported(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *bitmask)
@@ -3303,7 +3280,7 @@ static int decode_attr_type(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *
if (likely(bitmap[0] & FATTR4_WORD0_TYPE)) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
*type = be32_to_cpup(p);
if (*type < NF4REG || *type > NF4NAMEDATTR) {
dprintk("%s: bad type %d\n", __func__, *type);
@@ -3314,9 +3291,6 @@ static int decode_attr_type(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *
}
dprintk("%s: type=0%o\n", __func__, nfs_type2fmt[*type]);
return ret;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_attr_fh_expire_type(struct xdr_stream *xdr,
@@ -3330,15 +3304,12 @@ static int decode_attr_fh_expire_type(struct xdr_stream *xdr,
if (likely(bitmap[0] & FATTR4_WORD0_FH_EXPIRE_TYPE)) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
*type = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_FH_EXPIRE_TYPE;
}
dprintk("%s: expire type=0x%x\n", __func__, *type);
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_attr_change(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *change)
@@ -3352,7 +3323,7 @@ static int decode_attr_change(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t
if (likely(bitmap[0] & FATTR4_WORD0_CHANGE)) {
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
xdr_decode_hyper(p, change);
bitmap[0] &= ~FATTR4_WORD0_CHANGE;
ret = NFS_ATTR_FATTR_CHANGE;
@@ -3360,9 +3331,6 @@ static int decode_attr_change(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t
dprintk("%s: change attribute=%Lu\n", __func__,
(unsigned long long)*change);
return ret;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_attr_size(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *size)
@@ -3376,16 +3344,13 @@ static int decode_attr_size(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *
if (likely(bitmap[0] & FATTR4_WORD0_SIZE)) {
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
xdr_decode_hyper(p, size);
bitmap[0] &= ~FATTR4_WORD0_SIZE;
ret = NFS_ATTR_FATTR_SIZE;
}
dprintk("%s: file size=%Lu\n", __func__, (unsigned long long)*size);
return ret;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_attr_link_support(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
@@ -3398,15 +3363,12 @@ static int decode_attr_link_support(struct xdr_stream *xdr, uint32_t *bitmap, ui
if (likely(bitmap[0] & FATTR4_WORD0_LINK_SUPPORT)) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
*res = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_LINK_SUPPORT;
}
dprintk("%s: link support=%s\n", __func__, *res == 0 ? "false" : "true");
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_attr_symlink_support(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
@@ -3419,15 +3381,12 @@ static int decode_attr_symlink_support(struct xdr_stream *xdr, uint32_t *bitmap,
if (likely(bitmap[0] & FATTR4_WORD0_SYMLINK_SUPPORT)) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
*res = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_SYMLINK_SUPPORT;
}
dprintk("%s: symlink support=%s\n", __func__, *res == 0 ? "false" : "true");
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_attr_fsid(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_fsid *fsid)
@@ -3442,7 +3401,7 @@ static int decode_attr_fsid(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs
if (likely(bitmap[0] & FATTR4_WORD0_FSID)) {
p = xdr_inline_decode(xdr, 16);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
p = xdr_decode_hyper(p, &fsid->major);
xdr_decode_hyper(p, &fsid->minor);
bitmap[0] &= ~FATTR4_WORD0_FSID;
@@ -3452,9 +3411,6 @@ static int decode_attr_fsid(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs
(unsigned long long)fsid->major,
(unsigned long long)fsid->minor);
return ret;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_attr_lease_time(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
@@ -3467,15 +3423,12 @@ static int decode_attr_lease_time(struct xdr_stream *xdr, uint32_t *bitmap, uint
if (likely(bitmap[0] & FATTR4_WORD0_LEASE_TIME)) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
*res = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_LEASE_TIME;
}
dprintk("%s: file size=%u\n", __func__, (unsigned int)*res);
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_attr_error(struct xdr_stream *xdr, uint32_t *bitmap, int32_t *res)
@@ -3487,14 +3440,11 @@ static int decode_attr_error(struct xdr_stream *xdr, uint32_t *bitmap, int32_t *
if (likely(bitmap[0] & FATTR4_WORD0_RDATTR_ERROR)) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
bitmap[0] &= ~FATTR4_WORD0_RDATTR_ERROR;
*res = -be32_to_cpup(p);
}
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_attr_exclcreat_supported(struct xdr_stream *xdr,
@@ -3526,13 +3476,13 @@ static int decode_attr_filehandle(struct xdr_stream *xdr, uint32_t *bitmap, stru
if (likely(bitmap[0] & FATTR4_WORD0_FILEHANDLE)) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
len = be32_to_cpup(p);
if (len > NFS4_FHSIZE)
return -EIO;
p = xdr_inline_decode(xdr, len);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
if (fh != NULL) {
memcpy(fh->data, p, len);
fh->size = len;
@@ -3540,9 +3490,6 @@ static int decode_attr_filehandle(struct xdr_stream *xdr, uint32_t *bitmap, stru
bitmap[0] &= ~FATTR4_WORD0_FILEHANDLE;
}
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_attr_aclsupport(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
@@ -3555,15 +3502,12 @@ static int decode_attr_aclsupport(struct xdr_stream *xdr, uint32_t *bitmap, uint
if (likely(bitmap[0] & FATTR4_WORD0_ACLSUPPORT)) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
*res = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_ACLSUPPORT;
}
dprintk("%s: ACLs supported=%u\n", __func__, (unsigned int)*res);
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_attr_fileid(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *fileid)
@@ -3577,16 +3521,13 @@ static int decode_attr_fileid(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t
if (likely(bitmap[0] & FATTR4_WORD0_FILEID)) {
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
xdr_decode_hyper(p, fileid);
bitmap[0] &= ~FATTR4_WORD0_FILEID;
ret = NFS_ATTR_FATTR_FILEID;
}
dprintk("%s: fileid=%Lu\n", __func__, (unsigned long long)*fileid);
return ret;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_attr_mounted_on_fileid(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *fileid)
@@ -3600,16 +3541,13 @@ static int decode_attr_mounted_on_fileid(struct xdr_stream *xdr, uint32_t *bitma
if (likely(bitmap[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)) {
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
xdr_decode_hyper(p, fileid);
bitmap[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
ret = NFS_ATTR_FATTR_MOUNTED_ON_FILEID;
}
dprintk("%s: fileid=%Lu\n", __func__, (unsigned long long)*fileid);
return ret;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_attr_files_avail(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
@@ -3623,15 +3561,12 @@ static int decode_attr_files_avail(struct xdr_stream *xdr, uint32_t *bitmap, uin
if (likely(bitmap[0] & FATTR4_WORD0_FILES_AVAIL)) {
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
xdr_decode_hyper(p, res);
bitmap[0] &= ~FATTR4_WORD0_FILES_AVAIL;
}
dprintk("%s: files avail=%Lu\n", __func__, (unsigned long long)*res);
return status;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_attr_files_free(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
@@ -3645,15 +3580,12 @@ static int decode_attr_files_free(struct xdr_stream *xdr, uint32_t *bitmap, uint
if (likely(bitmap[0] & FATTR4_WORD0_FILES_FREE)) {
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
xdr_decode_hyper(p, res);
bitmap[0] &= ~FATTR4_WORD0_FILES_FREE;
}
dprintk("%s: files free=%Lu\n", __func__, (unsigned long long)*res);
return status;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_attr_files_total(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
@@ -3667,15 +3599,12 @@ static int decode_attr_files_total(struct xdr_stream *xdr, uint32_t *bitmap, uin
if (likely(bitmap[0] & FATTR4_WORD0_FILES_TOTAL)) {
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
xdr_decode_hyper(p, res);
bitmap[0] &= ~FATTR4_WORD0_FILES_TOTAL;
}
dprintk("%s: files total=%Lu\n", __func__, (unsigned long long)*res);
return status;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_pathname(struct xdr_stream *xdr, struct nfs4_pathname *path)
@@ -3686,7 +3615,7 @@ static int decode_pathname(struct xdr_stream *xdr, struct nfs4_pathname *path)
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
n = be32_to_cpup(p);
if (n == 0)
goto root_path;
@@ -3718,9 +3647,6 @@ out_eio:
dprintk(" status %d", status);
status = -EIO;
goto out;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs4_fs_locations *res)
@@ -3745,7 +3671,7 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st
goto out;
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ goto out_eio;
n = be32_to_cpup(p);
if (n <= 0)
goto out_eio;
@@ -3758,7 +3684,7 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st
loc = &res->locations[res->nlocations];
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ goto out_eio;
m = be32_to_cpup(p);
dprintk("%s: servers:\n", __func__);
@@ -3796,8 +3722,6 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st
out:
dprintk("%s: fs_locations done, error = %d\n", __func__, status);
return status;
-out_overflow:
- print_overflow_msg(__func__, xdr);
out_eio:
status = -EIO;
goto out;
@@ -3814,15 +3738,12 @@ static int decode_attr_maxfilesize(struct xdr_stream *xdr, uint32_t *bitmap, uin
if (likely(bitmap[0] & FATTR4_WORD0_MAXFILESIZE)) {
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
xdr_decode_hyper(p, res);
bitmap[0] &= ~FATTR4_WORD0_MAXFILESIZE;
}
dprintk("%s: maxfilesize=%Lu\n", __func__, (unsigned long long)*res);
return status;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_attr_maxlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *maxlink)
@@ -3836,15 +3757,12 @@ static int decode_attr_maxlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_
if (likely(bitmap[0] & FATTR4_WORD0_MAXLINK)) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
*maxlink = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_MAXLINK;
}
dprintk("%s: maxlink=%u\n", __func__, *maxlink);
return status;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_attr_maxname(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *maxname)
@@ -3858,15 +3776,12 @@ static int decode_attr_maxname(struct xdr_stream *xdr, uint32_t *bitmap, uint32_
if (likely(bitmap[0] & FATTR4_WORD0_MAXNAME)) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
*maxname = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_MAXNAME;
}
dprintk("%s: maxname=%u\n", __func__, *maxname);
return status;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_attr_maxread(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
@@ -3881,7 +3796,7 @@ static int decode_attr_maxread(struct xdr_stream *xdr, uint32_t *bitmap, uint32_
uint64_t maxread;
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
xdr_decode_hyper(p, &maxread);
if (maxread > 0x7FFFFFFF)
maxread = 0x7FFFFFFF;
@@ -3890,9 +3805,6 @@ static int decode_attr_maxread(struct xdr_stream *xdr, uint32_t *bitmap, uint32_
}
dprintk("%s: maxread=%lu\n", __func__, (unsigned long)*res);
return status;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_attr_maxwrite(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
@@ -3907,7 +3819,7 @@ static int decode_attr_maxwrite(struct xdr_stream *xdr, uint32_t *bitmap, uint32
uint64_t maxwrite;
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
xdr_decode_hyper(p, &maxwrite);
if (maxwrite > 0x7FFFFFFF)
maxwrite = 0x7FFFFFFF;
@@ -3916,9 +3828,6 @@ static int decode_attr_maxwrite(struct xdr_stream *xdr, uint32_t *bitmap, uint32
}
dprintk("%s: maxwrite=%lu\n", __func__, (unsigned long)*res);
return status;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_attr_mode(struct xdr_stream *xdr, uint32_t *bitmap, umode_t *mode)
@@ -3933,7 +3842,7 @@ static int decode_attr_mode(struct xdr_stream *xdr, uint32_t *bitmap, umode_t *m
if (likely(bitmap[1] & FATTR4_WORD1_MODE)) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
tmp = be32_to_cpup(p);
*mode = tmp & ~S_IFMT;
bitmap[1] &= ~FATTR4_WORD1_MODE;
@@ -3941,9 +3850,6 @@ static int decode_attr_mode(struct xdr_stream *xdr, uint32_t *bitmap, umode_t *m
}
dprintk("%s: file mode=0%o\n", __func__, (unsigned int)*mode);
return ret;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_attr_nlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *nlink)
@@ -3957,16 +3863,13 @@ static int decode_attr_nlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t
if (likely(bitmap[1] & FATTR4_WORD1_NUMLINKS)) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
*nlink = be32_to_cpup(p);
bitmap[1] &= ~FATTR4_WORD1_NUMLINKS;
ret = NFS_ATTR_FATTR_NLINK;
}
dprintk("%s: nlink=%u\n", __func__, (unsigned int)*nlink);
return ret;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static ssize_t decode_nfs4_string(struct xdr_stream *xdr,
@@ -4011,10 +3914,9 @@ static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap,
return NFS_ATTR_FATTR_OWNER;
}
out:
- if (len != -EBADMSG)
- return 0;
- print_overflow_msg(__func__, xdr);
- return -EIO;
+ if (len == -EBADMSG)
+ return -EIO;
+ return 0;
}
static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap,
@@ -4046,10 +3948,9 @@ static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap,
return NFS_ATTR_FATTR_GROUP;
}
out:
- if (len != -EBADMSG)
- return 0;
- print_overflow_msg(__func__, xdr);
- return -EIO;
+ if (len == -EBADMSG)
+ return -EIO;
+ return 0;
}
static int decode_attr_rdev(struct xdr_stream *xdr, uint32_t *bitmap, dev_t *rdev)
@@ -4066,7 +3967,7 @@ static int decode_attr_rdev(struct xdr_stream *xdr, uint32_t *bitmap, dev_t *rde
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
major = be32_to_cpup(p++);
minor = be32_to_cpup(p);
tmp = MKDEV(major, minor);
@@ -4077,9 +3978,6 @@ static int decode_attr_rdev(struct xdr_stream *xdr, uint32_t *bitmap, dev_t *rde
}
dprintk("%s: rdev=(0x%x:0x%x)\n", __func__, major, minor);
return ret;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_attr_space_avail(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
@@ -4093,15 +3991,12 @@ static int decode_attr_space_avail(struct xdr_stream *xdr, uint32_t *bitmap, uin
if (likely(bitmap[1] & FATTR4_WORD1_SPACE_AVAIL)) {
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
xdr_decode_hyper(p, res);
bitmap[1] &= ~FATTR4_WORD1_SPACE_AVAIL;
}
dprintk("%s: space avail=%Lu\n", __func__, (unsigned long long)*res);
return status;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_attr_space_free(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
@@ -4115,15 +4010,12 @@ static int decode_attr_space_free(struct xdr_stream *xdr, uint32_t *bitmap, uint
if (likely(bitmap[1] & FATTR4_WORD1_SPACE_FREE)) {
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
xdr_decode_hyper(p, res);
bitmap[1] &= ~FATTR4_WORD1_SPACE_FREE;
}
dprintk("%s: space free=%Lu\n", __func__, (unsigned long long)*res);
return status;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_attr_space_total(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
@@ -4137,15 +4029,12 @@ static int decode_attr_space_total(struct xdr_stream *xdr, uint32_t *bitmap, uin
if (likely(bitmap[1] & FATTR4_WORD1_SPACE_TOTAL)) {
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
xdr_decode_hyper(p, res);
bitmap[1] &= ~FATTR4_WORD1_SPACE_TOTAL;
}
dprintk("%s: space total=%Lu\n", __func__, (unsigned long long)*res);
return status;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_attr_space_used(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *used)
@@ -4159,7 +4048,7 @@ static int decode_attr_space_used(struct xdr_stream *xdr, uint32_t *bitmap, uint
if (likely(bitmap[1] & FATTR4_WORD1_SPACE_USED)) {
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
xdr_decode_hyper(p, used);
bitmap[1] &= ~FATTR4_WORD1_SPACE_USED;
ret = NFS_ATTR_FATTR_SPACE_USED;
@@ -4167,9 +4056,6 @@ static int decode_attr_space_used(struct xdr_stream *xdr, uint32_t *bitmap, uint
dprintk("%s: space used=%Lu\n", __func__,
(unsigned long long)*used);
return ret;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static __be32 *
@@ -4189,12 +4075,9 @@ static int decode_attr_time(struct xdr_stream *xdr, struct timespec *time)
p = xdr_inline_decode(xdr, nfstime4_maxsz << 2);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
xdr_decode_nfstime4(p, time);
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_attr_time_access(struct xdr_stream *xdr, uint32_t *bitmap, struct timespec *time)
@@ -4265,19 +4148,19 @@ static int decode_attr_security_label(struct xdr_stream *xdr, uint32_t *bitmap,
if (likely(bitmap[2] & FATTR4_WORD2_SECURITY_LABEL)) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
lfs = be32_to_cpup(p++);
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
pi = be32_to_cpup(p++);
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
len = be32_to_cpup(p++);
p = xdr_inline_decode(xdr, len);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
if (len < NFS4_MAXLABELLEN) {
if (label) {
memcpy(label->label, p, len);
@@ -4295,10 +4178,6 @@ static int decode_attr_security_label(struct xdr_stream *xdr, uint32_t *bitmap,
dprintk("%s: label=%s, len=%d, PI=%d, LFS=%d\n", __func__,
(char *)label->label, label->len, label->pi, label->lfs);
return status;
-
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_attr_time_modify(struct xdr_stream *xdr, uint32_t *bitmap, struct timespec *time)
@@ -4342,14 +4221,11 @@ static int decode_change_info(struct xdr_stream *xdr, struct nfs4_change_info *c
p = xdr_inline_decode(xdr, 20);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
cinfo->atomic = be32_to_cpup(p++);
p = xdr_decode_hyper(p, &cinfo->before);
xdr_decode_hyper(p, &cinfo->after);
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_access(struct xdr_stream *xdr, u32 *supported, u32 *access)
@@ -4363,24 +4239,19 @@ static int decode_access(struct xdr_stream *xdr, u32 *supported, u32 *access)
return status;
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
supp = be32_to_cpup(p++);
acc = be32_to_cpup(p);
*supported = supp;
*access = acc;
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_opaque_fixed(struct xdr_stream *xdr, void *buf, size_t len)
{
ssize_t ret = xdr_stream_decode_opaque_fixed(xdr, buf, len);
- if (unlikely(ret < 0)) {
- print_overflow_msg(__func__, xdr);
+ if (unlikely(ret < 0))
return -EIO;
- }
return 0;
}
@@ -4460,13 +4331,11 @@ static int decode_create(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
return status;
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
bmlen = be32_to_cpup(p);
p = xdr_inline_decode(xdr, bmlen << 2);
if (likely(p))
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
return -EIO;
}
@@ -4574,13 +4443,10 @@ static int decode_threshold_hint(struct xdr_stream *xdr,
if (likely(bitmap[0] & hint_bit)) {
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
xdr_decode_hyper(p, res);
}
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_first_threshold_item4(struct xdr_stream *xdr,
@@ -4593,10 +4459,8 @@ static int decode_first_threshold_item4(struct xdr_stream *xdr,
/* layout type */
p = xdr_inline_decode(xdr, 4);
- if (unlikely(!p)) {
- print_overflow_msg(__func__, xdr);
+ if (unlikely(!p))
return -EIO;
- }
res->l_type = be32_to_cpup(p);
/* thi_hintset bitmap */
@@ -4654,7 +4518,7 @@ static int decode_attr_mdsthreshold(struct xdr_stream *xdr,
return -EREMOTEIO;
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
num = be32_to_cpup(p);
if (num == 0)
return 0;
@@ -4667,9 +4531,6 @@ static int decode_attr_mdsthreshold(struct xdr_stream *xdr,
bitmap[2] &= ~FATTR4_WORD2_MDSTHRESHOLD;
}
return status;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
@@ -4857,7 +4718,7 @@ static int decode_pnfs_layout_types(struct xdr_stream *xdr,
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
fsinfo->nlayouttypes = be32_to_cpup(p);
/* pNFS is not supported by the underlying file system */
@@ -4867,7 +4728,7 @@ static int decode_pnfs_layout_types(struct xdr_stream *xdr,
/* Decode and set first layout type, move xdr->p past unused types */
p = xdr_inline_decode(xdr, fsinfo->nlayouttypes * 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
/* If we get too many, then just cap it at the max */
if (fsinfo->nlayouttypes > NFS_MAX_LAYOUT_TYPES) {
@@ -4879,9 +4740,6 @@ static int decode_pnfs_layout_types(struct xdr_stream *xdr,
for(i = 0; i < fsinfo->nlayouttypes; ++i)
fsinfo->layouttype[i] = be32_to_cpup(p++);
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
/*
@@ -4915,10 +4773,8 @@ static int decode_attr_layout_blksize(struct xdr_stream *xdr, uint32_t *bitmap,
*res = 0;
if (bitmap[2] & FATTR4_WORD2_LAYOUT_BLKSIZE) {
p = xdr_inline_decode(xdr, 4);
- if (unlikely(!p)) {
- print_overflow_msg(__func__, xdr);
+ if (unlikely(!p))
return -EIO;
- }
*res = be32_to_cpup(p);
bitmap[2] &= ~FATTR4_WORD2_LAYOUT_BLKSIZE;
}
@@ -4937,10 +4793,8 @@ static int decode_attr_clone_blksize(struct xdr_stream *xdr, uint32_t *bitmap,
*res = 0;
if (bitmap[2] & FATTR4_WORD2_CLONE_BLKSIZE) {
p = xdr_inline_decode(xdr, 4);
- if (unlikely(!p)) {
- print_overflow_msg(__func__, xdr);
+ if (unlikely(!p))
return -EIO;
- }
*res = be32_to_cpup(p);
bitmap[2] &= ~FATTR4_WORD2_CLONE_BLKSIZE;
}
@@ -5016,19 +4870,16 @@ static int decode_getfh(struct xdr_stream *xdr, struct nfs_fh *fh)
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
len = be32_to_cpup(p);
if (len > NFS4_FHSIZE)
return -EIO;
fh->size = len;
p = xdr_inline_decode(xdr, len);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
memcpy(fh->data, p, len);
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_link(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
@@ -5052,7 +4903,7 @@ static int decode_lock_denied (struct xdr_stream *xdr, struct file_lock *fl)
p = xdr_inline_decode(xdr, 32); /* read 32 bytes */
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
p = xdr_decode_hyper(p, &offset); /* read 2 8-byte long words */
p = xdr_decode_hyper(p, &length);
type = be32_to_cpup(p++); /* 4 byte read */
@@ -5069,11 +4920,9 @@ static int decode_lock_denied (struct xdr_stream *xdr, struct file_lock *fl)
p = xdr_decode_hyper(p, &clientid); /* read 8 bytes */
namelen = be32_to_cpup(p); /* read 4 bytes */ /* have read all 32 bytes now */
p = xdr_inline_decode(xdr, namelen); /* variable size field */
- if (likely(p))
- return -NFS4ERR_DENIED;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
+ if (likely(!p))
+ return -EIO;
+ return -NFS4ERR_DENIED;
}
static int decode_lock(struct xdr_stream *xdr, struct nfs_lock_res *res)
@@ -5142,7 +4991,7 @@ static int decode_space_limit(struct xdr_stream *xdr,
p = xdr_inline_decode(xdr, 12);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
limit_type = be32_to_cpup(p++);
switch (limit_type) {
case NFS4_LIMIT_SIZE:
@@ -5156,9 +5005,6 @@ static int decode_space_limit(struct xdr_stream *xdr,
maxsize >>= PAGE_SHIFT;
*pagemod_limit = min_t(u64, maxsize, ULONG_MAX);
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_rw_delegation(struct xdr_stream *xdr,
@@ -5173,7 +5019,7 @@ static int decode_rw_delegation(struct xdr_stream *xdr,
return status;
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
res->do_recall = be32_to_cpup(p);
switch (delegation_type) {
@@ -5186,9 +5032,6 @@ static int decode_rw_delegation(struct xdr_stream *xdr,
return -EIO;
}
return decode_ace(xdr, NULL);
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_no_delegation(struct xdr_stream *xdr, struct nfs_openres *res)
@@ -5198,7 +5041,7 @@ static int decode_no_delegation(struct xdr_stream *xdr, struct nfs_openres *res)
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
why_no_delegation = be32_to_cpup(p);
switch (why_no_delegation) {
case WND4_CONTENTION:
@@ -5207,9 +5050,6 @@ static int decode_no_delegation(struct xdr_stream *xdr, struct nfs_openres *res)
/* Ignore for now */
}
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_delegation(struct xdr_stream *xdr, struct nfs_openres *res)
@@ -5219,7 +5059,7 @@ static int decode_delegation(struct xdr_stream *xdr, struct nfs_openres *res)
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
delegation_type = be32_to_cpup(p);
res->delegation_type = 0;
switch (delegation_type) {
@@ -5232,9 +5072,6 @@ static int decode_delegation(struct xdr_stream *xdr, struct nfs_openres *res)
return decode_no_delegation(xdr, res);
}
return -EIO;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
@@ -5256,7 +5093,7 @@ static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
res->rflags = be32_to_cpup(p++);
bmlen = be32_to_cpup(p);
if (bmlen > 10)
@@ -5264,7 +5101,7 @@ static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
p = xdr_inline_decode(xdr, bmlen << 2);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
savewords = min_t(uint32_t, bmlen, NFS4_BITMAP_SIZE);
for (i = 0; i < savewords; ++i)
res->attrset[i] = be32_to_cpup(p++);
@@ -5275,9 +5112,6 @@ static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
xdr_error:
dprintk("%s: Bitmap too large! Length = %u\n", __func__, bmlen);
return -EIO;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_open_confirm(struct xdr_stream *xdr, struct nfs_open_confirmres *res)
@@ -5326,7 +5160,7 @@ static int decode_read(struct xdr_stream *xdr, struct rpc_rqst *req,
return status;
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
eof = be32_to_cpup(p++);
count = be32_to_cpup(p);
recvd = xdr_read_pages(xdr, count);
@@ -5339,9 +5173,6 @@ static int decode_read(struct xdr_stream *xdr, struct rpc_rqst *req,
res->eof = eof;
res->count = count;
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs4_readdir_res *readdir)
@@ -5374,7 +5205,7 @@ static int decode_readlink(struct xdr_stream *xdr, struct rpc_rqst *req)
/* Convert length of symlink */
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
len = be32_to_cpup(p);
if (len >= rcvbuf->page_len || len <= 0) {
dprintk("nfs: server returned giant symlink!\n");
@@ -5395,9 +5226,6 @@ static int decode_readlink(struct xdr_stream *xdr, struct rpc_rqst *req)
*/
xdr_terminate_string(rcvbuf, len);
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_remove(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
@@ -5500,7 +5328,6 @@ static int decode_setattr(struct xdr_stream *xdr)
return status;
if (decode_bitmap4(xdr, NULL, 0) >= 0)
return 0;
- print_overflow_msg(__func__, xdr);
return -EIO;
}
@@ -5512,7 +5339,7 @@ static int decode_setclientid(struct xdr_stream *xdr, struct nfs4_setclientid_re
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
opnum = be32_to_cpup(p++);
if (opnum != OP_SETCLIENTID) {
dprintk("nfs: decode_setclientid: Server returned operation"
@@ -5523,7 +5350,7 @@ static int decode_setclientid(struct xdr_stream *xdr, struct nfs4_setclientid_re
if (nfserr == NFS_OK) {
p = xdr_inline_decode(xdr, 8 + NFS4_VERIFIER_SIZE);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
p = xdr_decode_hyper(p, &res->clientid);
memcpy(res->confirm.data, p, NFS4_VERIFIER_SIZE);
} else if (nfserr == NFSERR_CLID_INUSE) {
@@ -5532,28 +5359,25 @@ static int decode_setclientid(struct xdr_stream *xdr, struct nfs4_setclientid_re
/* skip netid string */
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
len = be32_to_cpup(p);
p = xdr_inline_decode(xdr, len);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
/* skip uaddr string */
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
len = be32_to_cpup(p);
p = xdr_inline_decode(xdr, len);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
return -NFSERR_CLID_INUSE;
} else
return nfs4_stat_to_errno(nfserr);
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_setclientid_confirm(struct xdr_stream *xdr)
@@ -5572,13 +5396,10 @@ static int decode_write(struct xdr_stream *xdr, struct nfs_pgio_res *res)
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
res->count = be32_to_cpup(p++);
res->verf->committed = be32_to_cpup(p++);
return decode_write_verifier(xdr, &res->verf->verifier);
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_delegreturn(struct xdr_stream *xdr)
@@ -5594,30 +5415,24 @@ static int decode_secinfo_gss(struct xdr_stream *xdr,
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
oid_len = be32_to_cpup(p);
if (oid_len > GSS_OID_MAX_LEN)
- goto out_err;
+ return -EINVAL;
p = xdr_inline_decode(xdr, oid_len);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
memcpy(flavor->flavor_info.oid.data, p, oid_len);
flavor->flavor_info.oid.len = oid_len;
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
flavor->flavor_info.qop = be32_to_cpup(p++);
flavor->flavor_info.service = be32_to_cpup(p);
return 0;
-
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
-out_err:
- return -EINVAL;
}
static int decode_secinfo_common(struct xdr_stream *xdr, struct nfs4_secinfo_res *res)
@@ -5629,7 +5444,7 @@ static int decode_secinfo_common(struct xdr_stream *xdr, struct nfs4_secinfo_res
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
res->flavors->num_flavors = 0;
num_flavors = be32_to_cpup(p);
@@ -5641,7 +5456,7 @@ static int decode_secinfo_common(struct xdr_stream *xdr, struct nfs4_secinfo_res
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
sec_flavor->flavor = be32_to_cpup(p);
if (sec_flavor->flavor == RPC_AUTH_GSS) {
@@ -5655,9 +5470,6 @@ static int decode_secinfo_common(struct xdr_stream *xdr, struct nfs4_secinfo_res
status = 0;
out:
return status;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_secinfo(struct xdr_stream *xdr, struct nfs4_secinfo_res *res)
@@ -5711,11 +5523,11 @@ static int decode_exchange_id(struct xdr_stream *xdr,
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
xdr_decode_hyper(p, &res->clientid);
p = xdr_inline_decode(xdr, 12);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
res->seqid = be32_to_cpup(p++);
res->flags = be32_to_cpup(p++);
@@ -5739,7 +5551,7 @@ static int decode_exchange_id(struct xdr_stream *xdr,
/* server_owner4.so_minor_id */
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
p = xdr_decode_hyper(p, &res->server_owner->minor_id);
/* server_owner4.so_major_id */
@@ -5759,7 +5571,7 @@ static int decode_exchange_id(struct xdr_stream *xdr,
/* Implementation Id */
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
impl_id_count = be32_to_cpup(p++);
if (impl_id_count) {
@@ -5778,16 +5590,13 @@ static int decode_exchange_id(struct xdr_stream *xdr,
/* nii_date */
p = xdr_inline_decode(xdr, 12);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
p = xdr_decode_hyper(p, &res->impl_id->date.seconds);
res->impl_id->date.nseconds = be32_to_cpup(p);
/* if there's more than one entry, ignore the rest */
}
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_chan_attrs(struct xdr_stream *xdr,
@@ -5798,7 +5607,7 @@ static int decode_chan_attrs(struct xdr_stream *xdr,
p = xdr_inline_decode(xdr, 28);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
val = be32_to_cpup(p++); /* headerpadsz */
if (val)
return -EINVAL; /* no support for header padding yet */
@@ -5816,12 +5625,9 @@ static int decode_chan_attrs(struct xdr_stream *xdr,
if (nr_attrs == 1) {
p = xdr_inline_decode(xdr, 4); /* skip rdma_attrs */
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
}
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_sessionid(struct xdr_stream *xdr, struct nfs4_sessionid *sid)
@@ -5844,7 +5650,7 @@ static int decode_bind_conn_to_session(struct xdr_stream *xdr,
/* dir flags, rdma mode bool */
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
res->dir = be32_to_cpup(p++);
if (res->dir == 0 || res->dir > NFS4_CDFS4_BOTH)
@@ -5855,9 +5661,6 @@ static int decode_bind_conn_to_session(struct xdr_stream *xdr,
res->use_conn_in_rdma_mode = true;
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_create_session(struct xdr_stream *xdr,
@@ -5875,7 +5678,7 @@ static int decode_create_session(struct xdr_stream *xdr,
/* seqid, flags */
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
res->seqid = be32_to_cpup(p++);
res->flags = be32_to_cpup(p);
@@ -5884,9 +5687,6 @@ static int decode_create_session(struct xdr_stream *xdr,
if (!status)
status = decode_chan_attrs(xdr, &res->bc_attrs);
return status;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_destroy_session(struct xdr_stream *xdr, void *dummy)
@@ -5967,7 +5767,6 @@ out_err:
res->sr_status = status;
return status;
out_overflow:
- print_overflow_msg(__func__, xdr);
status = -EIO;
goto out_err;
#else /* CONFIG_NFS_V4_1 */
@@ -5995,7 +5794,7 @@ static int decode_getdeviceinfo(struct xdr_stream *xdr,
if (status == -ETOOSMALL) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
pdev->mincount = be32_to_cpup(p);
dprintk("%s: Min count too small. mincnt = %u\n",
__func__, pdev->mincount);
@@ -6005,7 +5804,7 @@ static int decode_getdeviceinfo(struct xdr_stream *xdr,
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
type = be32_to_cpup(p++);
if (type != pdev->layout_type) {
dprintk("%s: layout mismatch req: %u pdev: %u\n",
@@ -6019,19 +5818,19 @@ static int decode_getdeviceinfo(struct xdr_stream *xdr,
*/
pdev->mincount = be32_to_cpup(p);
if (xdr_read_pages(xdr, pdev->mincount) != pdev->mincount)
- goto out_overflow;
+ return -EIO;
/* Parse notification bitmap, verifying that it is zero. */
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
len = be32_to_cpup(p);
if (len) {
uint32_t i;
p = xdr_inline_decode(xdr, 4 * len);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
res->notification = be32_to_cpup(p++);
for (i = 1; i < len; i++) {
@@ -6043,9 +5842,6 @@ static int decode_getdeviceinfo(struct xdr_stream *xdr,
}
}
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
@@ -6115,7 +5911,6 @@ out:
res->status = status;
return status;
out_overflow:
- print_overflow_msg(__func__, xdr);
status = -EIO;
goto out;
}
@@ -6131,16 +5926,13 @@ static int decode_layoutreturn(struct xdr_stream *xdr,
return status;
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
res->lrs_present = be32_to_cpup(p);
if (res->lrs_present)
status = decode_layout_stateid(xdr, &res->stateid);
else
nfs4_stateid_copy(&res->stateid, &invalid_stateid);
return status;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_layoutcommit(struct xdr_stream *xdr,
@@ -6158,19 +5950,16 @@ static int decode_layoutcommit(struct xdr_stream *xdr,
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
sizechanged = be32_to_cpup(p);
if (sizechanged) {
/* throw away new size */
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
}
return 0;
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EIO;
}
static int decode_test_stateid(struct xdr_stream *xdr,
@@ -6186,21 +5975,17 @@ static int decode_test_stateid(struct xdr_stream *xdr,
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
num_res = be32_to_cpup(p++);
if (num_res != 1)
- goto out;
+ return -EIO;
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EIO;
res->status = be32_to_cpup(p++);
return status;
-out_overflow:
- print_overflow_msg(__func__, xdr);
-out:
- return -EIO;
}
static int decode_free_stateid(struct xdr_stream *xdr,
@@ -7570,11 +7355,11 @@ int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
uint64_t new_cookie;
__be32 *p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EAGAIN;
if (*p == xdr_zero) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
- goto out_overflow;
+ return -EAGAIN;
if (*p == xdr_zero)
return -EAGAIN;
entry->eof = 1;
@@ -7583,13 +7368,13 @@ int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
p = xdr_inline_decode(xdr, 12);
if (unlikely(!p))
- goto out_overflow;
+ return -EAGAIN;
p = xdr_decode_hyper(p, &new_cookie);
entry->len = be32_to_cpup(p);
p = xdr_inline_decode(xdr, entry->len);
if (unlikely(!p))
- goto out_overflow;
+ return -EAGAIN;
entry->name = (const char *) p;
/*
@@ -7601,14 +7386,14 @@ int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
entry->fattr->valid = 0;
if (decode_attr_bitmap(xdr, bitmap) < 0)
- goto out_overflow;
+ return -EAGAIN;
if (decode_attr_length(xdr, &len, &savep) < 0)
- goto out_overflow;
+ return -EAGAIN;
if (decode_getfattr_attrs(xdr, bitmap, entry->fattr, entry->fh,
NULL, entry->label, entry->server) < 0)
- goto out_overflow;
+ return -EAGAIN;
if (entry->fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
entry->ino = entry->fattr->mounted_on_fileid;
else if (entry->fattr->valid & NFS_ATTR_FATTR_FILEID)
@@ -7622,10 +7407,6 @@ int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
entry->cookie = new_cookie;
return 0;
-
-out_overflow:
- print_overflow_msg(__func__, xdr);
- return -EAGAIN;
}
/*
@@ -7791,6 +7572,7 @@ const struct rpc_procinfo nfs4_procedures[] = {
PROC42(COPY, enc_copy, dec_copy),
PROC42(OFFLOAD_CANCEL, enc_offload_cancel, dec_offload_cancel),
PROC(LOOKUPP, enc_lookupp, dec_lookupp),
+ PROC42(LAYOUTERROR, enc_layouterror, dec_layouterror),
};
static unsigned int nfs_version4_counts[ARRAY_SIZE(nfs4_procedures)];
diff --git a/fs/nfs/nfstrace.c b/fs/nfs/nfstrace.c
index b60d5fbd7727..a90b363500c2 100644
--- a/fs/nfs/nfstrace.c
+++ b/fs/nfs/nfstrace.c
@@ -11,3 +11,4 @@
EXPORT_TRACEPOINT_SYMBOL_GPL(nfs_fsync_enter);
EXPORT_TRACEPOINT_SYMBOL_GPL(nfs_fsync_exit);
+EXPORT_TRACEPOINT_SYMBOL_GPL(nfs_xdr_status);
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index bd60f8d1e181..a0d6910aa03a 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -969,6 +969,91 @@ TRACE_EVENT(nfs_commit_done,
)
);
+TRACE_DEFINE_ENUM(NFS_OK);
+TRACE_DEFINE_ENUM(NFSERR_PERM);
+TRACE_DEFINE_ENUM(NFSERR_NOENT);
+TRACE_DEFINE_ENUM(NFSERR_IO);
+TRACE_DEFINE_ENUM(NFSERR_NXIO);
+TRACE_DEFINE_ENUM(NFSERR_ACCES);
+TRACE_DEFINE_ENUM(NFSERR_EXIST);
+TRACE_DEFINE_ENUM(NFSERR_XDEV);
+TRACE_DEFINE_ENUM(NFSERR_NODEV);
+TRACE_DEFINE_ENUM(NFSERR_NOTDIR);
+TRACE_DEFINE_ENUM(NFSERR_ISDIR);
+TRACE_DEFINE_ENUM(NFSERR_INVAL);
+TRACE_DEFINE_ENUM(NFSERR_FBIG);
+TRACE_DEFINE_ENUM(NFSERR_NOSPC);
+TRACE_DEFINE_ENUM(NFSERR_ROFS);
+TRACE_DEFINE_ENUM(NFSERR_MLINK);
+TRACE_DEFINE_ENUM(NFSERR_NAMETOOLONG);
+TRACE_DEFINE_ENUM(NFSERR_NOTEMPTY);
+TRACE_DEFINE_ENUM(NFSERR_DQUOT);
+TRACE_DEFINE_ENUM(NFSERR_STALE);
+TRACE_DEFINE_ENUM(NFSERR_REMOTE);
+TRACE_DEFINE_ENUM(NFSERR_WFLUSH);
+TRACE_DEFINE_ENUM(NFSERR_BADHANDLE);
+TRACE_DEFINE_ENUM(NFSERR_NOT_SYNC);
+TRACE_DEFINE_ENUM(NFSERR_BAD_COOKIE);
+TRACE_DEFINE_ENUM(NFSERR_NOTSUPP);
+TRACE_DEFINE_ENUM(NFSERR_TOOSMALL);
+TRACE_DEFINE_ENUM(NFSERR_SERVERFAULT);
+TRACE_DEFINE_ENUM(NFSERR_BADTYPE);
+TRACE_DEFINE_ENUM(NFSERR_JUKEBOX);
+
+#define nfs_show_status(x) \
+ __print_symbolic(x, \
+ { NFS_OK, "OK" }, \
+ { NFSERR_PERM, "PERM" }, \
+ { NFSERR_NOENT, "NOENT" }, \
+ { NFSERR_IO, "IO" }, \
+ { NFSERR_NXIO, "NXIO" }, \
+ { NFSERR_ACCES, "ACCES" }, \
+ { NFSERR_EXIST, "EXIST" }, \
+ { NFSERR_XDEV, "XDEV" }, \
+ { NFSERR_NODEV, "NODEV" }, \
+ { NFSERR_NOTDIR, "NOTDIR" }, \
+ { NFSERR_ISDIR, "ISDIR" }, \
+ { NFSERR_INVAL, "INVAL" }, \
+ { NFSERR_FBIG, "FBIG" }, \
+ { NFSERR_NOSPC, "NOSPC" }, \
+ { NFSERR_ROFS, "ROFS" }, \
+ { NFSERR_MLINK, "MLINK" }, \
+ { NFSERR_NAMETOOLONG, "NAMETOOLONG" }, \
+ { NFSERR_NOTEMPTY, "NOTEMPTY" }, \
+ { NFSERR_DQUOT, "DQUOT" }, \
+ { NFSERR_STALE, "STALE" }, \
+ { NFSERR_REMOTE, "REMOTE" }, \
+ { NFSERR_WFLUSH, "WFLUSH" }, \
+ { NFSERR_BADHANDLE, "BADHANDLE" }, \
+ { NFSERR_NOT_SYNC, "NOTSYNC" }, \
+ { NFSERR_BAD_COOKIE, "BADCOOKIE" }, \
+ { NFSERR_NOTSUPP, "NOTSUPP" }, \
+ { NFSERR_TOOSMALL, "TOOSMALL" }, \
+ { NFSERR_SERVERFAULT, "REMOTEIO" }, \
+ { NFSERR_BADTYPE, "BADTYPE" }, \
+ { NFSERR_JUKEBOX, "JUKEBOX" })
+
+TRACE_EVENT(nfs_xdr_status,
+ TP_PROTO(
+ int error
+ ),
+
+ TP_ARGS(error),
+
+ TP_STRUCT__entry(
+ __field(int, error)
+ ),
+
+ TP_fast_assign(
+ __entry->error = error;
+ ),
+
+ TP_printk(
+ "error=%d (%s)",
+ __entry->error, nfs_show_status(__entry->error)
+ )
+);
+
#endif /* _TRACE_NFS_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index e54d899c1848..e9f39fa5964b 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -350,7 +350,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct page *page,
/**
* nfs_unlock_request - Unlock request and wake up sleepers.
- * @req:
+ * @req: pointer to request
*/
void nfs_unlock_request(struct nfs_page *req)
{
@@ -368,7 +368,7 @@ void nfs_unlock_request(struct nfs_page *req)
/**
* nfs_unlock_and_release_request - Unlock request and release the nfs_page
- * @req:
+ * @req: pointer to request
*/
void nfs_unlock_and_release_request(struct nfs_page *req)
{
@@ -531,7 +531,6 @@ EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
* nfs_pgio_rpcsetup - Set up arguments for a pageio call
* @hdr: The pageio hdr
* @count: Number of bytes to read
- * @offset: Initial offset
* @how: How to commit data (writes only)
* @cinfo: Commit information for the call (writes only)
*/
@@ -634,7 +633,6 @@ EXPORT_SYMBOL_GPL(nfs_initiate_pgio);
/**
* nfs_pgio_error - Clean up from a pageio error
- * @desc: IO descriptor
* @hdr: pageio header
*/
static void nfs_pgio_error(struct nfs_pgio_header *hdr)
@@ -768,8 +766,7 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
pageused = 0;
while (!list_empty(head)) {
req = nfs_list_entry(head->next);
- nfs_list_remove_request(req);
- nfs_list_add_request(req, &hdr->pages);
+ nfs_list_move_request(req, &hdr->pages);
if (!last_page || last_page != req->wb_page) {
pageused++;
@@ -893,6 +890,7 @@ static bool nfs_match_lock_context(const struct nfs_lock_context *l1,
* nfs_can_coalesce_requests - test two requests for compatibility
* @prev: pointer to nfs_page
* @req: pointer to nfs_page
+ * @pgio: pointer to nfs_pagio_descriptor
*
* The nfs_page structures 'prev' and 'req' are compared to ensure that the
* page data area they describe is contiguous, and that their RPC
@@ -961,8 +959,7 @@ static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
}
if (!nfs_can_coalesce_requests(prev, req, desc))
return 0;
- nfs_list_remove_request(req);
- nfs_list_add_request(req, &mirror->pg_list);
+ nfs_list_move_request(req, &mirror->pg_list);
mirror->pg_count += req->wb_bytes;
return 1;
}
@@ -988,6 +985,16 @@ static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
}
}
+static void
+nfs_pageio_cleanup_request(struct nfs_pageio_descriptor *desc,
+ struct nfs_page *req)
+{
+ LIST_HEAD(head);
+
+ nfs_list_move_request(req, &head);
+ desc->pg_completion_ops->error_cleanup(&head, desc->pg_error);
+}
+
/**
* nfs_pageio_add_request - Attempt to coalesce a request into a page list.
* @desc: destination io descriptor
@@ -1025,10 +1032,8 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
nfs_page_group_unlock(req);
desc->pg_moreio = 1;
nfs_pageio_doio(desc);
- if (desc->pg_error < 0)
- return 0;
- if (mirror->pg_recoalesce)
- return 0;
+ if (desc->pg_error < 0 || mirror->pg_recoalesce)
+ goto out_cleanup_subreq;
/* retry add_request for this subreq */
nfs_page_group_lock(req);
continue;
@@ -1061,6 +1066,10 @@ err_ptr:
desc->pg_error = PTR_ERR(subreq);
nfs_page_group_unlock(req);
return 0;
+out_cleanup_subreq:
+ if (req != subreq)
+ nfs_pageio_cleanup_request(desc, subreq);
+ return 0;
}
static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
@@ -1079,7 +1088,6 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
struct nfs_page *req;
req = list_first_entry(&head, struct nfs_page, wb_list);
- nfs_list_remove_request(req);
if (__nfs_pageio_add_request(desc, req))
continue;
if (desc->pg_error < 0) {
@@ -1120,7 +1128,8 @@ static void nfs_pageio_error_cleanup(struct nfs_pageio_descriptor *desc)
for (midx = 0; midx < desc->pg_mirror_count; midx++) {
mirror = &desc->pg_mirrors[midx];
- desc->pg_completion_ops->error_cleanup(&mirror->pg_list);
+ desc->pg_completion_ops->error_cleanup(&mirror->pg_list,
+ desc->pg_error);
}
}
@@ -1168,11 +1177,14 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
if (nfs_pgio_has_mirroring(desc))
desc->pg_mirror_idx = midx;
if (!nfs_pageio_add_request_mirror(desc, dupreq))
- goto out_failed;
+ goto out_cleanup_subreq;
}
return 1;
+out_cleanup_subreq:
+ if (req != dupreq)
+ nfs_pageio_cleanup_request(desc, dupreq);
out_failed:
nfs_pageio_error_cleanup(desc);
return 0;
@@ -1194,7 +1206,7 @@ static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
desc->pg_mirror_idx = mirror_idx;
for (;;) {
nfs_pageio_doio(desc);
- if (!mirror->pg_recoalesce)
+ if (desc->pg_error < 0 || !mirror->pg_recoalesce)
break;
if (!nfs_do_recoalesce(desc))
break;
@@ -1222,9 +1234,8 @@ int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
while (!list_empty(&hdr->pages)) {
struct nfs_page *req = nfs_list_entry(hdr->pages.next);
- nfs_list_remove_request(req);
if (!nfs_pageio_add_request(desc, req))
- nfs_list_add_request(req, &failed);
+ nfs_list_move_request(req, &failed);
}
nfs_pageio_complete(desc);
if (!list_empty(&failed)) {
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 53726da5c010..7066cd7c7aff 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -758,22 +758,35 @@ static int
pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
struct nfs_server *server,
struct list_head *layout_list)
+ __must_hold(&clp->cl_lock)
+ __must_hold(RCU)
{
struct pnfs_layout_hdr *lo, *next;
struct inode *inode;
list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) {
- if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags))
+ if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) ||
+ test_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags) ||
+ !list_empty(&lo->plh_bulk_destroy))
continue;
+ /* If the sb is being destroyed, just bail */
+ if (!nfs_sb_active(server->super))
+ break;
inode = igrab(lo->plh_inode);
- if (inode == NULL)
- continue;
- list_del_init(&lo->plh_layouts);
- if (pnfs_layout_add_bulk_destroy_list(inode, layout_list))
- continue;
- rcu_read_unlock();
- spin_unlock(&clp->cl_lock);
- iput(inode);
+ if (inode != NULL) {
+ list_del_init(&lo->plh_layouts);
+ if (pnfs_layout_add_bulk_destroy_list(inode,
+ layout_list))
+ continue;
+ rcu_read_unlock();
+ spin_unlock(&clp->cl_lock);
+ iput(inode);
+ } else {
+ rcu_read_unlock();
+ spin_unlock(&clp->cl_lock);
+ set_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags);
+ }
+ nfs_sb_deactive(server->super);
spin_lock(&clp->cl_lock);
rcu_read_lock();
return -EAGAIN;
@@ -811,7 +824,7 @@ pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
/* Free all lsegs that are attached to commit buckets */
nfs_commit_inode(inode, 0);
pnfs_put_layout_hdr(lo);
- iput(inode);
+ nfs_iput_and_deactive(inode);
}
return ret;
}
@@ -1876,7 +1889,7 @@ lookup_again:
atomic_read(&lo->plh_outstanding) != 0) {
spin_unlock(&ino->i_lock);
lseg = ERR_PTR(wait_var_event_killable(&lo->plh_outstanding,
- atomic_read(&lo->plh_outstanding)));
+ !atomic_read(&lo->plh_outstanding)));
if (IS_ERR(lseg) || !list_empty(&lo->plh_segs))
goto out_put_layout_hdr;
pnfs_put_layout_hdr(lo);
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 5e80a07b7bea..c0420b979d88 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -104,6 +104,7 @@ enum {
NFS_LAYOUT_RETURN_REQUESTED, /* Return this layout ASAP */
NFS_LAYOUT_INVALID_STID, /* layout stateid id is invalid */
NFS_LAYOUT_FIRST_LAYOUTGET, /* Serialize first layoutget */
+ NFS_LAYOUT_INODE_FREEING, /* The inode is being freed */
};
enum layoutdriver_policy_flags {
@@ -349,6 +350,7 @@ void nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *, const struct nf
void nfs4_init_deviceid_node(struct nfs4_deviceid_node *, struct nfs_server *,
const struct nfs4_deviceid *);
bool nfs4_put_deviceid_node(struct nfs4_deviceid_node *);
+void nfs4_mark_deviceid_available(struct nfs4_deviceid_node *node);
void nfs4_mark_deviceid_unavailable(struct nfs4_deviceid_node *node);
bool nfs4_test_deviceid_unavailable(struct nfs4_deviceid_node *node);
void nfs4_deviceid_purge_client(const struct nfs_client *);
diff --git a/fs/nfs/pnfs_dev.c b/fs/nfs/pnfs_dev.c
index 7fb59487ee90..537b80d693f1 100644
--- a/fs/nfs/pnfs_dev.c
+++ b/fs/nfs/pnfs_dev.c
@@ -284,10 +284,22 @@ nfs4_put_deviceid_node(struct nfs4_deviceid_node *d)
EXPORT_SYMBOL_GPL(nfs4_put_deviceid_node);
void
+nfs4_mark_deviceid_available(struct nfs4_deviceid_node *node)
+{
+ if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags)) {
+ clear_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
+ smp_mb__after_atomic();
+ }
+}
+EXPORT_SYMBOL_GPL(nfs4_mark_deviceid_available);
+
+void
nfs4_mark_deviceid_unavailable(struct nfs4_deviceid_node *node)
{
node->timestamp_unavailable = jiffies;
+ smp_mb__before_atomic();
set_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
+ smp_mb__after_atomic();
}
EXPORT_SYMBOL_GPL(nfs4_mark_deviceid_unavailable);
@@ -302,6 +314,7 @@ nfs4_test_deviceid_unavailable(struct nfs4_deviceid_node *node)
if (time_in_range(node->timestamp_unavailable, start, end))
return true;
clear_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
+ smp_mb__after_atomic();
}
return false;
}
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index f9f19784db82..1d95a60b2586 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -205,7 +205,7 @@ static void nfs_initiate_read(struct nfs_pgio_header *hdr,
}
static void
-nfs_async_read_error(struct list_head *head)
+nfs_async_read_error(struct list_head *head, int error)
{
struct nfs_page *req;
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 0570391eaa16..c27ac96a95bd 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1919,7 +1919,7 @@ static int nfs_parse_devname(const char *dev_name,
/* kill possible hostname list: not supported */
comma = strchr(dev_name, ',');
if (comma != NULL && comma < end)
- *comma = 0;
+ len = comma - dev_name;
}
if (len > maxnamlen)
@@ -2041,7 +2041,8 @@ static int nfs23_validate_mount_data(void *options,
memcpy(sap, &data->addr, sizeof(data->addr));
args->nfs_server.addrlen = sizeof(data->addr);
args->nfs_server.port = ntohs(data->addr.sin_port);
- if (!nfs_verify_server_address(sap))
+ if (sap->sa_family != AF_INET ||
+ !nfs_verify_server_address(sap))
goto out_no_address;
if (!(data->flags & NFS_MOUNT_TCP))
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index 79b97b3c4427..52d533967485 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -39,6 +39,7 @@ nfs_free_unlinkdata(struct nfs_unlinkdata *data)
/**
* nfs_async_unlink_done - Sillydelete post-processing
* @task: rpc_task of the sillydelete
+ * @calldata: pointer to nfs_unlinkdata
*
* Do the directory attribute update.
*/
@@ -54,7 +55,7 @@ static void nfs_async_unlink_done(struct rpc_task *task, void *calldata)
/**
* nfs_async_unlink_release - Release the sillydelete data.
- * @task: rpc_task of the sillydelete
+ * @calldata: struct nfs_unlinkdata to release
*
* We need to call nfs_put_unlinkdata as a 'tk_release' task since the
* rpc_task would be freed too.
@@ -159,8 +160,8 @@ static int nfs_call_unlink(struct dentry *dentry, struct inode *inode, struct nf
/**
* nfs_async_unlink - asynchronous unlinking of a file
- * @dir: parent directory of dentry
- * @dentry: dentry to unlink
+ * @dentry: parent directory of dentry
+ * @name: name of dentry to unlink
*/
static int
nfs_async_unlink(struct dentry *dentry, const struct qstr *name)
@@ -324,6 +325,7 @@ static const struct rpc_call_ops nfs_rename_ops = {
* @new_dir: target directory for the rename
* @old_dentry: original dentry to be renamed
* @new_dentry: dentry to which the old_dentry should be renamed
+ * @complete: Function to run on successful completion
*
* It's expected that valid references to the dentries and inodes are held
*/
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index d09c9f878141..f3ebabaa291d 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -26,6 +26,7 @@
#include <linux/iversion.h>
#include <linux/uaccess.h>
+#include <linux/sched/mm.h>
#include "delegation.h"
#include "internal.h"
@@ -712,11 +713,13 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
struct inode *inode = mapping->host;
struct nfs_pageio_descriptor pgio;
- struct nfs_io_completion *ioc = nfs_io_completion_alloc(GFP_NOFS);
+ struct nfs_io_completion *ioc;
+ unsigned int pflags = memalloc_nofs_save();
int err;
nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
+ ioc = nfs_io_completion_alloc(GFP_NOFS);
if (ioc)
nfs_io_completion_init(ioc, nfs_io_completion_commit, inode);
@@ -727,6 +730,8 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
nfs_pageio_complete(&pgio);
nfs_io_completion_put(ioc);
+ memalloc_nofs_restore(pflags);
+
if (err < 0)
goto out_err;
err = pgio.pg_error;
@@ -865,7 +870,6 @@ EXPORT_SYMBOL_GPL(nfs_request_add_commit_list_locked);
/**
* nfs_request_add_commit_list - add request to a commit list
* @req: pointer to a struct nfs_page
- * @dst: commit list head
* @cinfo: holds list lock and accounting info
*
* This sets the PG_CLEAN bit, updates the cinfo count of
@@ -1412,20 +1416,27 @@ static void nfs_redirty_request(struct nfs_page *req)
nfs_release_request(req);
}
-static void nfs_async_write_error(struct list_head *head)
+static void nfs_async_write_error(struct list_head *head, int error)
{
struct nfs_page *req;
while (!list_empty(head)) {
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
+ if (nfs_error_is_fatal(error)) {
+ nfs_context_set_write_error(req->wb_context, error);
+ if (nfs_error_is_fatal_on_server(error)) {
+ nfs_write_error_remove_page(req);
+ continue;
+ }
+ }
nfs_redirty_request(req);
}
}
static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr)
{
- nfs_async_write_error(&hdr->pages);
+ nfs_async_write_error(&hdr->pages, 0);
filemap_fdatawrite_range(hdr->inode->i_mapping, hdr->args.offset,
hdr->args.offset + hdr->args.count - 1);
}
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index 9eb8086ea841..9bc32af4e2da 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -442,7 +442,9 @@ nfsd3_proc_readdir(struct svc_rqst *rqstp)
struct nfsd3_readdirargs *argp = rqstp->rq_argp;
struct nfsd3_readdirres *resp = rqstp->rq_resp;
__be32 nfserr;
- int count;
+ int count = 0;
+ struct page **p;
+ caddr_t page_addr = NULL;
dprintk("nfsd: READDIR(3) %s %d bytes at %d\n",
SVCFH_fmt(&argp->fh),
@@ -462,9 +464,31 @@ nfsd3_proc_readdir(struct svc_rqst *rqstp)
nfserr = nfsd_readdir(rqstp, &resp->fh, (loff_t*) &argp->cookie,
&resp->common, nfs3svc_encode_entry);
memcpy(resp->verf, argp->verf, 8);
- resp->count = resp->buffer - argp->buffer;
- if (resp->offset)
- xdr_encode_hyper(resp->offset, argp->cookie);
+ count = 0;
+ for (p = rqstp->rq_respages + 1; p < rqstp->rq_next_page; p++) {
+ page_addr = page_address(*p);
+
+ if (((caddr_t)resp->buffer >= page_addr) &&
+ ((caddr_t)resp->buffer < page_addr + PAGE_SIZE)) {
+ count += (caddr_t)resp->buffer - page_addr;
+ break;
+ }
+ count += PAGE_SIZE;
+ }
+ resp->count = count >> 2;
+ if (resp->offset) {
+ loff_t offset = argp->cookie;
+
+ if (unlikely(resp->offset1)) {
+ /* we ended up with offset on a page boundary */
+ *resp->offset = htonl(offset >> 32);
+ *resp->offset1 = htonl(offset & 0xffffffff);
+ resp->offset1 = NULL;
+ } else {
+ xdr_encode_hyper(resp->offset, offset);
+ }
+ resp->offset = NULL;
+ }
RETURN_STATUS(nfserr);
}
@@ -533,6 +557,7 @@ nfsd3_proc_readdirplus(struct svc_rqst *rqstp)
} else {
xdr_encode_hyper(resp->offset, offset);
}
+ resp->offset = NULL;
}
RETURN_STATUS(nfserr);
@@ -576,7 +601,7 @@ nfsd3_proc_fsinfo(struct svc_rqst *rqstp)
resp->f_wtmax = max_blocksize;
resp->f_wtpref = max_blocksize;
resp->f_wtmult = PAGE_SIZE;
- resp->f_dtpref = PAGE_SIZE;
+ resp->f_dtpref = max_blocksize;
resp->f_maxfilesize = ~(u32) 0;
resp->f_properties = NFS3_FSF_DEFAULT;
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 9b973f4f7d01..8d789124ed3c 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -573,6 +573,9 @@ int
nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p)
{
struct nfsd3_readdirargs *args = rqstp->rq_argp;
+ int len;
+ u32 max_blocksize = svc_max_payload(rqstp);
+
p = decode_fh(p, &args->fh);
if (!p)
return 0;
@@ -580,8 +583,14 @@ nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p)
args->verf = p; p += 2;
args->dircount = ~0;
args->count = ntohl(*p++);
- args->count = min_t(u32, args->count, PAGE_SIZE);
- args->buffer = page_address(*(rqstp->rq_next_page++));
+ len = args->count = min_t(u32, args->count, max_blocksize);
+
+ while (len > 0) {
+ struct page *p = *(rqstp->rq_next_page++);
+ if (!args->buffer)
+ args->buffer = page_address(p);
+ len -= PAGE_SIZE;
+ }
return xdr_argsize_check(rqstp, p);
}
@@ -921,6 +930,7 @@ encode_entry(struct readdir_cd *ccd, const char *name, int namlen,
} else {
xdr_encode_hyper(cd->offset, offset64);
}
+ cd->offset = NULL;
}
/*
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index c74e4538d0eb..7caa3801ce72 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -60,16 +60,6 @@ struct nfs4_cb_compound_hdr {
int status;
};
-/*
- * Handle decode buffer overflows out-of-line.
- */
-static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
-{
- dprintk("NFS: %s prematurely hit the end of our receive buffer. "
- "Remaining buffer length is %tu words.\n",
- func, xdr->end - xdr->p);
-}
-
static __be32 *xdr_encode_empty_array(__be32 *p)
{
*p++ = xdr_zero;
@@ -240,7 +230,6 @@ static int decode_cb_op_status(struct xdr_stream *xdr,
*status = nfs_cb_stat_to_errno(be32_to_cpup(p));
return 0;
out_overflow:
- print_overflow_msg(__func__, xdr);
return -EIO;
out_unexpected:
dprintk("NFSD: Callback server returned operation %d but "
@@ -309,7 +298,6 @@ static int decode_cb_compound4res(struct xdr_stream *xdr,
hdr->nops = be32_to_cpup(p);
return 0;
out_overflow:
- print_overflow_msg(__func__, xdr);
return -EIO;
}
@@ -437,7 +425,6 @@ out:
cb->cb_seq_status = status;
return status;
out_overflow:
- print_overflow_msg(__func__, xdr);
status = -EIO;
goto out;
}
@@ -913,9 +900,9 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
return PTR_ERR(client);
}
cred = get_backchannel_cred(clp, client, ses);
- if (IS_ERR(cred)) {
+ if (!cred) {
rpc_shutdown_client(client);
- return PTR_ERR(cred);
+ return -ENOMEM;
}
clp->cl_cb_client = client;
clp->cl_cb_cred = cred;
@@ -1023,8 +1010,9 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
cb->cb_seq_status = 1;
cb->cb_status = 0;
if (minorversion) {
- if (!nfsd41_cb_get_slot(clp, task))
+ if (!cb->cb_holds_slot && !nfsd41_cb_get_slot(clp, task))
return;
+ cb->cb_holds_slot = true;
}
rpc_call_start(task);
}
@@ -1051,6 +1039,9 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
return true;
}
+ if (!cb->cb_holds_slot)
+ goto need_restart;
+
switch (cb->cb_seq_status) {
case 0:
/*
@@ -1089,6 +1080,7 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
cb->cb_seq_status);
}
+ cb->cb_holds_slot = false;
clear_bit(0, &clp->cl_cb_slot_busy);
rpc_wake_up_next(&clp->cl_cb_waitq);
dprintk("%s: freed slot, new seqid=%d\n", __func__,
@@ -1296,6 +1288,7 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
cb->cb_seq_status = 1;
cb->cb_status = 0;
cb->cb_need_restart = false;
+ cb->cb_holds_slot = false;
}
void nfsd4_run_cb(struct nfsd4_callback *cb)
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 5188f9f70c78..8c8563441208 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -126,7 +126,6 @@ nfs4_make_rec_clidname(char *dname, const struct xdr_netobj *clname)
SHASH_DESC_ON_STACK(desc, tfm);
desc->tfm = tfm;
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
status = crypto_shash_digest(desc, clname->data, clname->len,
cksum.data);
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index fb3c9844c82a..f056b1d3fecd 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -265,6 +265,7 @@ find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
static void
free_blocked_lock(struct nfsd4_blocked_lock *nbl)
{
+ locks_delete_block(&nbl->nbl_lock);
locks_release_private(&nbl->nbl_lock);
kfree(nbl);
}
@@ -293,11 +294,18 @@ remove_blocked_locks(struct nfs4_lockowner *lo)
nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
nbl_lru);
list_del_init(&nbl->nbl_lru);
- locks_delete_block(&nbl->nbl_lock);
free_blocked_lock(nbl);
}
}
+static void
+nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb)
+{
+ struct nfsd4_blocked_lock *nbl = container_of(cb,
+ struct nfsd4_blocked_lock, nbl_cb);
+ locks_delete_block(&nbl->nbl_lock);
+}
+
static int
nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
{
@@ -325,6 +333,7 @@ nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
}
static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
+ .prepare = nfsd4_cb_notify_lock_prepare,
.done = nfsd4_cb_notify_lock_done,
.release = nfsd4_cb_notify_lock_release,
};
@@ -1544,16 +1553,16 @@ static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
{
u32 slotsize = slot_bytes(ca);
u32 num = ca->maxreqs;
- int avail;
+ unsigned long avail, total_avail;
spin_lock(&nfsd_drc_lock);
- avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION,
- nfsd_drc_max_mem - nfsd_drc_mem_used);
+ total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
+ avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
/*
* Never use more than a third of the remaining memory,
* unless it's the only way to give this client a slot:
*/
- avail = clamp_t(int, avail, slotsize, avail/3);
+ avail = clamp_t(int, avail, slotsize, total_avail/3);
num = min_t(int, num, avail / slotsize);
nfsd_drc_mem_used += num * slotsize;
spin_unlock(&nfsd_drc_lock);
@@ -4863,7 +4872,6 @@ nfs4_laundromat(struct nfsd_net *nn)
nbl = list_first_entry(&reaplist,
struct nfsd4_blocked_lock, nbl_lru);
list_del_init(&nbl->nbl_lru);
- locks_delete_block(&nbl->nbl_lock);
free_blocked_lock(nbl);
}
out:
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 72a7681f4046..f2feb2d11bae 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1126,7 +1126,7 @@ static ssize_t write_v4_end_grace(struct file *file, char *buf, size_t size)
case 'Y':
case 'y':
case '1':
- if (nn->nfsd_serv)
+ if (!nn->nfsd_serv)
return -EBUSY;
nfsd4_end_grace(nn);
break;
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 396c76755b03..9d6cb246c6c5 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -70,6 +70,7 @@ struct nfsd4_callback {
int cb_seq_status;
int cb_status;
bool cb_need_restart;
+ bool cb_holds_slot;
};
struct nfsd4_callback_ops {
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index f2129a5d9f23..4391fd3abd8f 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -189,7 +189,7 @@ retry:
*/
if (!err)
return 0;
- else if (err != -EEXIST)
+ else if (err != -EBUSY)
goto failed_unlock;
err = invalidate_inode_pages2_range(btnc, newkey, newkey);
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index 6b9c27548997..63c6bb1f8c4d 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -346,10 +346,16 @@ static __kernel_fsid_t fanotify_get_fsid(struct fsnotify_iter_info *iter_info)
__kernel_fsid_t fsid = {};
fsnotify_foreach_obj_type(type) {
+ struct fsnotify_mark_connector *conn;
+
if (!fsnotify_iter_should_report_type(iter_info, type))
continue;
- fsid = iter_info->marks[type]->connector->fsid;
+ conn = READ_ONCE(iter_info->marks[type]->connector);
+ /* Mark is just getting destroyed or created? */
+ if (!conn)
+ continue;
+ fsid = conn->fsid;
if (WARN_ON_ONCE(!fsid.val[0] && !fsid.val[1]))
continue;
return fsid;
@@ -408,8 +414,12 @@ static int fanotify_handle_event(struct fsnotify_group *group,
return 0;
}
- if (FAN_GROUP_FLAG(group, FAN_REPORT_FID))
+ if (FAN_GROUP_FLAG(group, FAN_REPORT_FID)) {
fsid = fanotify_get_fsid(iter_info);
+ /* Racing with mark destruction or creation? */
+ if (!fsid.val[0] && !fsid.val[1])
+ return 0;
+ }
event = fanotify_alloc_event(group, inode, mask, data, data_type,
&fsid);
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 56992b32c6bb..a90bb19dcfa2 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -208,6 +208,7 @@ static int copy_fid_to_user(struct fanotify_event *event, char __user *buf)
{
struct fanotify_event_info_fid info = { };
struct file_handle handle = { };
+ unsigned char bounce[FANOTIFY_INLINE_FH_LEN], *fh;
size_t fh_len = event->fh_len;
size_t len = fanotify_event_info_len(event);
@@ -233,7 +234,16 @@ static int copy_fid_to_user(struct fanotify_event *event, char __user *buf)
buf += sizeof(handle);
len -= sizeof(handle);
- if (copy_to_user(buf, fanotify_event_fh(event), fh_len))
+ /*
+ * For an inline fh, copy through stack to exclude the copy from
+ * usercopy hardening protections.
+ */
+ fh = fanotify_event_fh(event);
+ if (fh_len <= FANOTIFY_INLINE_FH_LEN) {
+ memcpy(bounce, fh, fh_len);
+ fh = bounce;
+ }
+ if (copy_to_user(buf, fh, fh_len))
return -EFAULT;
/* Pad with 0's */
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index e2901fbb9f76..7b53598c8804 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -519,8 +519,10 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
if (!fsn_mark)
return -ENOENT;
- else if (create)
- return -EEXIST;
+ else if (create) {
+ ret = -EEXIST;
+ goto out;
+ }
i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
@@ -548,6 +550,7 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
/* return the wd */
ret = i_mark->wd;
+out:
/* match the get from fsnotify_find_mark() */
fsnotify_put_mark(fsn_mark);
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index d593d4269561..22acb0a79b53 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -239,13 +239,13 @@ static void fsnotify_drop_object(unsigned int type, void *objp)
void fsnotify_put_mark(struct fsnotify_mark *mark)
{
- struct fsnotify_mark_connector *conn;
+ struct fsnotify_mark_connector *conn = READ_ONCE(mark->connector);
void *objp = NULL;
unsigned int type = FSNOTIFY_OBJ_TYPE_DETACHED;
bool free_conn = false;
/* Catch marks that were actually never attached to object */
- if (!mark->connector) {
+ if (!conn) {
if (refcount_dec_and_test(&mark->refcnt))
fsnotify_final_mark_destroy(mark);
return;
@@ -255,10 +255,9 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
* We have to be careful so that traversals of obj_list under lock can
* safely grab mark reference.
*/
- if (!refcount_dec_and_lock(&mark->refcnt, &mark->connector->lock))
+ if (!refcount_dec_and_lock(&mark->refcnt, &conn->lock))
return;
- conn = mark->connector;
hlist_del_init_rcu(&mark->obj_list);
if (hlist_empty(&conn->list)) {
objp = fsnotify_detach_connector_from_object(conn, &type);
@@ -266,7 +265,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
} else {
__fsnotify_recalc_mask(conn);
}
- mark->connector = NULL;
+ WRITE_ONCE(mark->connector, NULL);
spin_unlock(&conn->lock);
fsnotify_drop_object(type, objp);
@@ -620,7 +619,7 @@ restart:
/* mark should be the last entry. last is the current last entry */
hlist_add_behind_rcu(&mark->obj_list, &last->obj_list);
added:
- mark->connector = conn;
+ WRITE_ONCE(mark->connector, conn);
out_err:
spin_unlock(&conn->lock);
spin_unlock(&mark->lock);
@@ -808,6 +807,7 @@ void fsnotify_init_mark(struct fsnotify_mark *mark,
refcount_set(&mark->refcnt, 1);
fsnotify_get_group(group);
mark->group = group;
+ WRITE_ONCE(mark->connector, NULL);
}
/*
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index a35259eebc56..1dc9a08e8bdc 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -4719,22 +4719,23 @@ out:
/* Lock an inode and grab a bh pointing to the inode. */
int ocfs2_reflink_inodes_lock(struct inode *s_inode,
- struct buffer_head **bh1,
+ struct buffer_head **bh_s,
struct inode *t_inode,
- struct buffer_head **bh2)
+ struct buffer_head **bh_t)
{
- struct inode *inode1;
- struct inode *inode2;
+ struct inode *inode1 = s_inode;
+ struct inode *inode2 = t_inode;
struct ocfs2_inode_info *oi1;
struct ocfs2_inode_info *oi2;
+ struct buffer_head *bh1 = NULL;
+ struct buffer_head *bh2 = NULL;
bool same_inode = (s_inode == t_inode);
+ bool need_swap = (inode1->i_ino > inode2->i_ino);
int status;
/* First grab the VFS and rw locks. */
lock_two_nondirectories(s_inode, t_inode);
- inode1 = s_inode;
- inode2 = t_inode;
- if (inode1->i_ino > inode2->i_ino)
+ if (need_swap)
swap(inode1, inode2);
status = ocfs2_rw_lock(inode1, 1);
@@ -4757,17 +4758,13 @@ int ocfs2_reflink_inodes_lock(struct inode *s_inode,
trace_ocfs2_double_lock((unsigned long long)oi1->ip_blkno,
(unsigned long long)oi2->ip_blkno);
- if (*bh1)
- *bh1 = NULL;
- if (*bh2)
- *bh2 = NULL;
-
/* We always want to lock the one with the lower lockid first. */
if (oi1->ip_blkno > oi2->ip_blkno)
mlog_errno(-ENOLCK);
/* lock id1 */
- status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_REFLINK_TARGET);
+ status = ocfs2_inode_lock_nested(inode1, &bh1, 1,
+ OI_LS_REFLINK_TARGET);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
@@ -4776,15 +4773,25 @@ int ocfs2_reflink_inodes_lock(struct inode *s_inode,
/* lock id2 */
if (!same_inode) {
- status = ocfs2_inode_lock_nested(inode2, bh2, 1,
+ status = ocfs2_inode_lock_nested(inode2, &bh2, 1,
OI_LS_REFLINK_TARGET);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
goto out_cl1;
}
- } else
- *bh2 = *bh1;
+ } else {
+ bh2 = bh1;
+ }
+
+ /*
+ * If we swapped inode order above, we have to swap the buffer heads
+ * before passing them back to the caller.
+ */
+ if (need_swap)
+ swap(bh1, bh2);
+ *bh_s = bh1;
+ *bh_t = bh2;
trace_ocfs2_double_lock_end(
(unsigned long long)oi1->ip_blkno,
@@ -4794,8 +4801,7 @@ int ocfs2_reflink_inodes_lock(struct inode *s_inode,
out_cl1:
ocfs2_inode_unlock(inode1, 1);
- brelse(*bh1);
- *bh1 = NULL;
+ brelse(bh1);
out_rw2:
ocfs2_rw_unlock(inode2, 1);
out_i2:
diff --git a/fs/open.c b/fs/open.c
index 0285ce7dbd51..a00350018a47 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -733,6 +733,12 @@ static int do_dentry_open(struct file *f,
return 0;
}
+ /* Any file opened for execve()/uselib() has to be a regular file. */
+ if (unlikely(f->f_flags & FMODE_EXEC && !S_ISREG(inode->i_mode))) {
+ error = -EACCES;
+ goto cleanup_file;
+ }
+
if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) {
error = get_write_access(inode);
if (unlikely(error))
@@ -1209,3 +1215,21 @@ int nonseekable_open(struct inode *inode, struct file *filp)
}
EXPORT_SYMBOL(nonseekable_open);
+
+/*
+ * stream_open is used by subsystems that want stream-like file descriptors.
+ * Such file descriptors are not seekable and don't have notion of position
+ * (file.f_pos is always 0). Contrary to file descriptors of other regular
+ * files, .read() and .write() can run simultaneously.
+ *
+ * stream_open never fails and is marked to return int so that it could be
+ * directly used as file_operations.open .
+ */
+int stream_open(struct inode *inode, struct file *filp)
+{
+ filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE | FMODE_ATOMIC_POS);
+ filp->f_mode |= FMODE_STREAM;
+ return 0;
+}
+
+EXPORT_SYMBOL(stream_open);
diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
index f038235c64bd..c3334eca18c7 100644
--- a/fs/orangefs/inode.c
+++ b/fs/orangefs/inode.c
@@ -261,11 +261,8 @@ int orangefs_getattr(const struct path *path, struct kstat *stat,
generic_fillattr(inode, stat);
/* override block size reported to stat */
- if (request_mask & STATX_SIZE)
- stat->result_mask = STATX_BASIC_STATS;
- else
- stat->result_mask = STATX_BASIC_STATS &
- ~STATX_SIZE;
+ if (!(request_mask & STATX_SIZE))
+ stat->result_mask &= ~STATX_SIZE;
stat->attributes_mask = STATX_ATTR_IMMUTABLE |
STATX_ATTR_APPEND;
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 9e62dcf06fc4..68b3303e4b46 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -443,6 +443,24 @@ static int ovl_copy_up_inode(struct ovl_copy_up_ctx *c, struct dentry *temp)
{
int err;
+ /*
+ * Copy up data first and then xattrs. Writing data after
+ * xattrs will remove security.capability xattr automatically.
+ */
+ if (S_ISREG(c->stat.mode) && !c->metacopy) {
+ struct path upperpath, datapath;
+
+ ovl_path_upper(c->dentry, &upperpath);
+ if (WARN_ON(upperpath.dentry != NULL))
+ return -EIO;
+ upperpath.dentry = temp;
+
+ ovl_path_lowerdata(c->dentry, &datapath);
+ err = ovl_copy_up_data(&datapath, &upperpath, c->stat.size);
+ if (err)
+ return err;
+ }
+
err = ovl_copy_xattr(c->lowerpath.dentry, temp);
if (err)
return err;
@@ -460,19 +478,6 @@ static int ovl_copy_up_inode(struct ovl_copy_up_ctx *c, struct dentry *temp)
return err;
}
- if (S_ISREG(c->stat.mode) && !c->metacopy) {
- struct path upperpath, datapath;
-
- ovl_path_upper(c->dentry, &upperpath);
- BUG_ON(upperpath.dentry != NULL);
- upperpath.dentry = temp;
-
- ovl_path_lowerdata(c->dentry, &datapath);
- err = ovl_copy_up_data(&datapath, &upperpath, c->stat.size);
- if (err)
- return err;
- }
-
if (c->metacopy) {
err = ovl_check_setxattr(c->dentry, temp, OVL_XATTR_METACOPY,
NULL, 0, -EOPNOTSUPP);
@@ -737,6 +742,8 @@ static int ovl_copy_up_meta_inode_data(struct ovl_copy_up_ctx *c)
{
struct path upperpath, datapath;
int err;
+ char *capability = NULL;
+ ssize_t uninitialized_var(cap_size);
ovl_path_upper(c->dentry, &upperpath);
if (WARN_ON(upperpath.dentry == NULL))
@@ -746,15 +753,37 @@ static int ovl_copy_up_meta_inode_data(struct ovl_copy_up_ctx *c)
if (WARN_ON(datapath.dentry == NULL))
return -EIO;
+ if (c->stat.size) {
+ err = cap_size = ovl_getxattr(upperpath.dentry, XATTR_NAME_CAPS,
+ &capability, 0);
+ if (err < 0 && err != -ENODATA)
+ goto out;
+ }
+
err = ovl_copy_up_data(&datapath, &upperpath, c->stat.size);
if (err)
- return err;
+ goto out_free;
+
+ /*
+ * Writing to upper file will clear security.capability xattr. We
+ * don't want that to happen for normal copy-up operation.
+ */
+ if (capability) {
+ err = ovl_do_setxattr(upperpath.dentry, XATTR_NAME_CAPS,
+ capability, cap_size, 0);
+ if (err)
+ goto out_free;
+ }
+
err = vfs_removexattr(upperpath.dentry, OVL_XATTR_METACOPY);
if (err)
- return err;
+ goto out_free;
ovl_set_upperdata(d_inode(c->dentry));
+out_free:
+ kfree(capability);
+out:
return err;
}
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index 5e45cb3630a0..9c6018287d57 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -277,6 +277,8 @@ int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir);
int ovl_check_metacopy_xattr(struct dentry *dentry);
bool ovl_is_metacopy_dentry(struct dentry *dentry);
char *ovl_get_redirect_xattr(struct dentry *dentry, int padding);
+ssize_t ovl_getxattr(struct dentry *dentry, char *name, char **value,
+ size_t padding);
static inline bool ovl_is_impuredir(struct dentry *dentry)
{
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index 7c01327b1852..4035e640f402 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -863,28 +863,49 @@ bool ovl_is_metacopy_dentry(struct dentry *dentry)
return (oe->numlower > 1);
}
-char *ovl_get_redirect_xattr(struct dentry *dentry, int padding)
+ssize_t ovl_getxattr(struct dentry *dentry, char *name, char **value,
+ size_t padding)
{
- int res;
- char *s, *next, *buf = NULL;
+ ssize_t res;
+ char *buf = NULL;
- res = vfs_getxattr(dentry, OVL_XATTR_REDIRECT, NULL, 0);
+ res = vfs_getxattr(dentry, name, NULL, 0);
if (res < 0) {
if (res == -ENODATA || res == -EOPNOTSUPP)
- return NULL;
+ return -ENODATA;
goto fail;
}
- buf = kzalloc(res + padding + 1, GFP_KERNEL);
- if (!buf)
- return ERR_PTR(-ENOMEM);
+ if (res != 0) {
+ buf = kzalloc(res + padding, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- if (res == 0)
- goto invalid;
+ res = vfs_getxattr(dentry, name, buf, res);
+ if (res < 0)
+ goto fail;
+ }
+ *value = buf;
+
+ return res;
+
+fail:
+ pr_warn_ratelimited("overlayfs: failed to get xattr %s: err=%zi)\n",
+ name, res);
+ kfree(buf);
+ return res;
+}
- res = vfs_getxattr(dentry, OVL_XATTR_REDIRECT, buf, res);
+char *ovl_get_redirect_xattr(struct dentry *dentry, int padding)
+{
+ int res;
+ char *s, *next, *buf = NULL;
+
+ res = ovl_getxattr(dentry, OVL_XATTR_REDIRECT, &buf, padding + 1);
+ if (res == -ENODATA)
+ return NULL;
if (res < 0)
- goto fail;
+ return ERR_PTR(res);
if (res == 0)
goto invalid;
@@ -900,15 +921,9 @@ char *ovl_get_redirect_xattr(struct dentry *dentry, int padding)
}
return buf;
-
-err_free:
- kfree(buf);
- return ERR_PTR(res);
-fail:
- pr_warn_ratelimited("overlayfs: failed to get redirect (%i)\n", res);
- goto err_free;
invalid:
pr_warn_ratelimited("overlayfs: invalid redirect (%s)\n", buf);
res = -EINVAL;
- goto err_free;
+ kfree(buf);
+ return ERR_PTR(res);
}
diff --git a/fs/pipe.c b/fs/pipe.c
index 51d5fd8840ab..41065901106b 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -188,9 +188,9 @@ EXPORT_SYMBOL(generic_pipe_buf_steal);
* in the tee() system call, when we duplicate the buffers in one
* pipe into another.
*/
-void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
+bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
{
- get_page(buf->page);
+ return try_get_page(buf->page);
}
EXPORT_SYMBOL(generic_pipe_buf_get);
@@ -225,8 +225,15 @@ void generic_pipe_buf_release(struct pipe_inode_info *pipe,
}
EXPORT_SYMBOL(generic_pipe_buf_release);
+/* New data written to a pipe may be appended to a buffer with this type. */
static const struct pipe_buf_operations anon_pipe_buf_ops = {
- .can_merge = 1,
+ .confirm = generic_pipe_buf_confirm,
+ .release = anon_pipe_buf_release,
+ .steal = anon_pipe_buf_steal,
+ .get = generic_pipe_buf_get,
+};
+
+static const struct pipe_buf_operations anon_pipe_buf_nomerge_ops = {
.confirm = generic_pipe_buf_confirm,
.release = anon_pipe_buf_release,
.steal = anon_pipe_buf_steal,
@@ -234,13 +241,32 @@ static const struct pipe_buf_operations anon_pipe_buf_ops = {
};
static const struct pipe_buf_operations packet_pipe_buf_ops = {
- .can_merge = 0,
.confirm = generic_pipe_buf_confirm,
.release = anon_pipe_buf_release,
.steal = anon_pipe_buf_steal,
.get = generic_pipe_buf_get,
};
+/**
+ * pipe_buf_mark_unmergeable - mark a &struct pipe_buffer as unmergeable
+ * @buf: the buffer to mark
+ *
+ * Description:
+ * This function ensures that no future writes will be merged into the
+ * given &struct pipe_buffer. This is necessary when multiple pipe buffers
+ * share the same backing page.
+ */
+void pipe_buf_mark_unmergeable(struct pipe_buffer *buf)
+{
+ if (buf->ops == &anon_pipe_buf_ops)
+ buf->ops = &anon_pipe_buf_nomerge_ops;
+}
+
+static bool pipe_buf_can_merge(struct pipe_buffer *buf)
+{
+ return buf->ops == &anon_pipe_buf_ops;
+}
+
static ssize_t
pipe_read(struct kiocb *iocb, struct iov_iter *to)
{
@@ -378,7 +404,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
struct pipe_buffer *buf = pipe->bufs + lastbuf;
int offset = buf->offset + buf->len;
- if (buf->ops->can_merge && offset + chars <= PAGE_SIZE) {
+ if (pipe_buf_can_merge(buf) && offset + chars <= PAGE_SIZE) {
ret = pipe_buf_confirm(pipe, buf);
if (ret)
goto out;
diff --git a/fs/pnode.c b/fs/pnode.c
index 1100e810d855..7ea6cfb65077 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -214,7 +214,6 @@ static struct mount *next_group(struct mount *m, struct mount *origin)
}
/* all accesses are serialized by namespace_sem */
-static struct user_namespace *user_ns;
static struct mount *last_dest, *first_source, *last_source, *dest_master;
static struct mountpoint *mp;
static struct hlist_head *list;
@@ -260,9 +259,6 @@ static int propagate_one(struct mount *m)
type |= CL_MAKE_SHARED;
}
- /* Notice when we are propagating across user namespaces */
- if (m->mnt_ns->user_ns != user_ns)
- type |= CL_UNPRIVILEGED;
child = copy_tree(last_source, last_source->mnt.mnt_root, type);
if (IS_ERR(child))
return PTR_ERR(child);
@@ -303,7 +299,6 @@ int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
* propagate_one(); everything is serialized by namespace_sem,
* so globals will do just fine.
*/
- user_ns = current->nsproxy->mnt_ns->user_ns;
last_dest = dest_mnt;
first_source = source_mnt;
last_source = source_mnt;
diff --git a/fs/pnode.h b/fs/pnode.h
index dc87e65becd2..3960a83666cf 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -27,8 +27,7 @@
#define CL_MAKE_SHARED 0x08
#define CL_PRIVATE 0x10
#define CL_SHARED_TO_SLAVE 0x20
-#define CL_UNPRIVILEGED 0x40
-#define CL_COPY_MNT_NS_FILE 0x80
+#define CL_COPY_MNT_NS_FILE 0x40
#define CL_COPY_ALL (CL_COPY_UNBINDABLE | CL_COPY_MNT_NS_FILE)
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 5ab1849971b4..f179568b4c76 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -59,6 +59,7 @@
#include <linux/capability.h>
#include <linux/file.h>
#include <linux/fdtable.h>
+#include <linux/generic-radix-tree.h>
#include <linux/string.h>
#include <linux/seq_file.h>
#include <linux/namei.h>
@@ -92,7 +93,6 @@
#include <linux/sched/coredump.h>
#include <linux/sched/debug.h>
#include <linux/sched/stat.h>
-#include <linux/flex_array.h>
#include <linux/posix-timers.h>
#include <trace/events/oom.h>
#include "internal.h"
@@ -407,7 +407,6 @@ static void unlock_trace(struct task_struct *task)
static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
{
- struct stack_trace trace;
unsigned long *entries;
int err;
@@ -430,20 +429,17 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
if (!entries)
return -ENOMEM;
- trace.nr_entries = 0;
- trace.max_entries = MAX_STACK_TRACE_DEPTH;
- trace.entries = entries;
- trace.skip = 0;
-
err = lock_trace(task);
if (!err) {
- unsigned int i;
+ unsigned int i, nr_entries;
- save_stack_trace_tsk(task, &trace);
+ nr_entries = stack_trace_save_tsk(task, entries,
+ MAX_STACK_TRACE_DEPTH, 0);
- for (i = 0; i < trace.nr_entries; i++) {
+ for (i = 0; i < nr_entries; i++) {
seq_printf(m, "[<0>] %pB\n", (void *)entries[i]);
}
+
unlock_trace(task);
}
kfree(entries);
@@ -489,10 +485,9 @@ static int lstats_show_proc(struct seq_file *m, void *v)
lr->count, lr->time, lr->max);
for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
unsigned long bt = lr->backtrace[q];
+
if (!bt)
break;
- if (bt == ULONG_MAX)
- break;
seq_printf(m, " %ps", (void *)bt);
}
seq_putc(m, '\n');
@@ -616,24 +611,25 @@ static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns,
static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
{
- long nr;
- unsigned long args[6], sp, pc;
+ struct syscall_info info;
+ u64 *args = &info.data.args[0];
int res;
res = lock_trace(task);
if (res)
return res;
- if (task_current_syscall(task, &nr, args, 6, &sp, &pc))
+ if (task_current_syscall(task, &info))
seq_puts(m, "running\n");
- else if (nr < 0)
- seq_printf(m, "%ld 0x%lx 0x%lx\n", nr, sp, pc);
+ else if (info.data.nr < 0)
+ seq_printf(m, "%d 0x%llx 0x%llx\n",
+ info.data.nr, info.sp, info.data.instruction_pointer);
else
seq_printf(m,
- "%ld 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
- nr,
+ "%d 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n",
+ info.data.nr,
args[0], args[1], args[2], args[3], args[4], args[5],
- sp, pc);
+ info.sp, info.data.instruction_pointer);
unlock_trace(task);
return 0;
@@ -2142,11 +2138,12 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
struct task_struct *task;
struct mm_struct *mm;
unsigned long nr_files, pos, i;
- struct flex_array *fa = NULL;
- struct map_files_info info;
+ GENRADIX(struct map_files_info) fa;
struct map_files_info *p;
int ret;
+ genradix_init(&fa);
+
ret = -ENOENT;
task = get_proc_task(file_inode(file));
if (!task)
@@ -2178,35 +2175,22 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
*/
for (vma = mm->mmap, pos = 2; vma; vma = vma->vm_next) {
- if (vma->vm_file && ++pos > ctx->pos)
- nr_files++;
- }
+ if (!vma->vm_file)
+ continue;
+ if (++pos <= ctx->pos)
+ continue;
- if (nr_files) {
- fa = flex_array_alloc(sizeof(info), nr_files,
- GFP_KERNEL);
- if (!fa || flex_array_prealloc(fa, 0, nr_files,
- GFP_KERNEL)) {
+ p = genradix_ptr_alloc(&fa, nr_files++, GFP_KERNEL);
+ if (!p) {
ret = -ENOMEM;
- if (fa)
- flex_array_free(fa);
up_read(&mm->mmap_sem);
mmput(mm);
goto out_put_task;
}
- for (i = 0, vma = mm->mmap, pos = 2; vma;
- vma = vma->vm_next) {
- if (!vma->vm_file)
- continue;
- if (++pos <= ctx->pos)
- continue;
- info.start = vma->vm_start;
- info.end = vma->vm_end;
- info.mode = vma->vm_file->f_mode;
- if (flex_array_put(fa, i++, &info, GFP_KERNEL))
- BUG();
- }
+ p->start = vma->vm_start;
+ p->end = vma->vm_end;
+ p->mode = vma->vm_file->f_mode;
}
up_read(&mm->mmap_sem);
mmput(mm);
@@ -2215,7 +2199,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
char buf[4 * sizeof(long) + 2]; /* max: %lx-%lx\0 */
unsigned int len;
- p = flex_array_get(fa, i);
+ p = genradix_ptr(&fa, i);
len = snprintf(buf, sizeof(buf), "%lx-%lx", p->start, p->end);
if (!proc_fill_cache(file, ctx,
buf, len,
@@ -2225,12 +2209,11 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
break;
ctx->pos++;
}
- if (fa)
- flex_array_free(fa);
out_put_task:
put_task_struct(task);
out:
+ genradix_free(&fa);
return ret;
}
@@ -2459,11 +2442,10 @@ static struct dentry *proc_pident_instantiate(struct dentry *dentry,
static struct dentry *proc_pident_lookup(struct inode *dir,
struct dentry *dentry,
- const struct pid_entry *ents,
- unsigned int nents)
+ const struct pid_entry *p,
+ const struct pid_entry *end)
{
struct task_struct *task = get_proc_task(dir);
- const struct pid_entry *p, *last;
struct dentry *res = ERR_PTR(-ENOENT);
if (!task)
@@ -2473,8 +2455,7 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
* Yes, it does not scale. And it should not. Don't add
* new entries into /proc/<tgid>/ without very good reasons.
*/
- last = &ents[nents];
- for (p = ents; p < last; p++) {
+ for (; p < end; p++) {
if (p->len != dentry->d_name.len)
continue;
if (!memcmp(dentry->d_name.name, p->name, p->len)) {
@@ -2610,7 +2591,7 @@ static struct dentry *proc_##LSM##_attr_dir_lookup(struct inode *dir, \
{ \
return proc_pident_lookup(dir, dentry, \
LSM##_attr_dir_stuff, \
- ARRAY_SIZE(LSM##_attr_dir_stuff)); \
+ LSM##_attr_dir_stuff + ARRAY_SIZE(LSM##_attr_dir_stuff)); \
} \
\
static const struct inode_operations proc_##LSM##_attr_dir_inode_ops = { \
@@ -2655,7 +2636,8 @@ static struct dentry *proc_attr_dir_lookup(struct inode *dir,
struct dentry *dentry, unsigned int flags)
{
return proc_pident_lookup(dir, dentry,
- attr_dir_stuff, ARRAY_SIZE(attr_dir_stuff));
+ attr_dir_stuff,
+ attr_dir_stuff + ARRAY_SIZE(attr_dir_stuff));
}
static const struct inode_operations proc_attr_dir_inode_operations = {
@@ -3088,10 +3070,20 @@ static const struct file_operations proc_tgid_base_operations = {
.llseek = generic_file_llseek,
};
+struct pid *tgid_pidfd_to_pid(const struct file *file)
+{
+ if (!d_is_dir(file->f_path.dentry) ||
+ (file->f_op != &proc_tgid_base_operations))
+ return ERR_PTR(-EBADF);
+
+ return proc_pid(file_inode(file));
+}
+
static struct dentry *proc_tgid_base_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
{
return proc_pident_lookup(dir, dentry,
- tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff));
+ tgid_base_stuff,
+ tgid_base_stuff + ARRAY_SIZE(tgid_base_stuff));
}
static const struct inode_operations proc_tgid_base_inode_operations = {
@@ -3463,7 +3455,8 @@ static int proc_tid_base_readdir(struct file *file, struct dir_context *ctx)
static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
{
return proc_pident_lookup(dir, dentry,
- tid_base_stuff, ARRAY_SIZE(tid_base_stuff));
+ tid_base_stuff,
+ tid_base_stuff + ARRAY_SIZE(tid_base_stuff));
}
static const struct file_operations proc_tid_base_operations = {
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index da649ccd6804..fc7e38def174 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -24,7 +24,6 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/mount.h>
-#include <linux/magic.h>
#include <linux/uaccess.h>
@@ -122,13 +121,12 @@ static int proc_show_options(struct seq_file *seq, struct dentry *root)
return 0;
}
-static const struct super_operations proc_sops = {
+const struct super_operations proc_sops = {
.alloc_inode = proc_alloc_inode,
.destroy_inode = proc_destroy_inode,
.drop_inode = generic_delete_inode,
.evict_inode = proc_evict_inode,
.statfs = simple_statfs,
- .remount_fs = proc_remount,
.show_options = proc_show_options,
};
@@ -488,51 +486,3 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
pde_put(de);
return inode;
}
-
-int proc_fill_super(struct super_block *s, void *data, int silent)
-{
- struct pid_namespace *ns = get_pid_ns(s->s_fs_info);
- struct inode *root_inode;
- int ret;
-
- if (!proc_parse_options(data, ns))
- return -EINVAL;
-
- /* User space would break if executables or devices appear on proc */
- s->s_iflags |= SB_I_USERNS_VISIBLE | SB_I_NOEXEC | SB_I_NODEV;
- s->s_flags |= SB_NODIRATIME | SB_NOSUID | SB_NOEXEC;
- s->s_blocksize = 1024;
- s->s_blocksize_bits = 10;
- s->s_magic = PROC_SUPER_MAGIC;
- s->s_op = &proc_sops;
- s->s_time_gran = 1;
-
- /*
- * procfs isn't actually a stacking filesystem; however, there is
- * too much magic going on inside it to permit stacking things on
- * top of it
- */
- s->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
-
- /* procfs dentries and inodes don't require IO to create */
- s->s_shrink.seeks = 0;
-
- pde_get(&proc_root);
- root_inode = proc_get_inode(s, &proc_root);
- if (!root_inode) {
- pr_err("proc_fill_super: get root inode failed\n");
- return -ENOMEM;
- }
-
- s->s_root = d_make_root(root_inode);
- if (!s->s_root) {
- pr_err("proc_fill_super: allocate dentry failed\n");
- return -ENOMEM;
- }
-
- ret = proc_setup_self(s);
- if (ret) {
- return ret;
- }
- return proc_setup_thread_self(s);
-}
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index ea575375f210..d1671e97f7fe 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -207,13 +207,12 @@ struct pde_opener {
struct completion *c;
} __randomize_layout;
extern const struct inode_operations proc_link_inode_operations;
-
extern const struct inode_operations proc_pid_link_inode_operations;
+extern const struct super_operations proc_sops;
void proc_init_kmemcache(void);
void set_proc_pid_nlink(void);
extern struct inode *proc_get_inode(struct super_block *, struct proc_dir_entry *);
-extern int proc_fill_super(struct super_block *, void *data, int flags);
extern void proc_entry_rundown(struct proc_dir_entry *);
/*
@@ -271,10 +270,8 @@ static inline void proc_tty_init(void) {}
* root.c
*/
extern struct proc_dir_entry proc_root;
-extern int proc_parse_options(char *options, struct pid_namespace *pid);
extern void proc_self_init(void);
-extern int proc_remount(struct super_block *, int *, char *);
/*
* task_[no]mmu.c
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index bbcc185062bb..f5834488b67d 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -54,6 +54,28 @@ static LIST_HEAD(kclist_head);
static DECLARE_RWSEM(kclist_lock);
static int kcore_need_update = 1;
+/*
+ * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
+ * Same as oldmem_pfn_is_ram in vmcore
+ */
+static int (*mem_pfn_is_ram)(unsigned long pfn);
+
+int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn))
+{
+ if (mem_pfn_is_ram)
+ return -EBUSY;
+ mem_pfn_is_ram = fn;
+ return 0;
+}
+
+static int pfn_is_ram(unsigned long pfn)
+{
+ if (mem_pfn_is_ram)
+ return mem_pfn_is_ram(pfn);
+ else
+ return 1;
+}
+
/* This doesn't grab kclist_lock, so it should only be used at init time. */
void __init kclist_add(struct kcore_list *new, void *addr, size_t size,
int type)
@@ -465,6 +487,11 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
goto out;
}
m = NULL; /* skip the list anchor */
+ } else if (!pfn_is_ram(__pa(start) >> PAGE_SHIFT)) {
+ if (clear_user(buffer, tsz)) {
+ ret = -EFAULT;
+ goto out;
+ }
} else if (m->type == KCORE_VMALLOC) {
vread(buf, (char *)start, tsz);
/* we have to zero-fill user buffer even if no read */
@@ -588,7 +615,7 @@ static void __init proc_kcore_text_init(void)
/*
* MODULES_VADDR has no intersection with VMALLOC_ADDR.
*/
-struct kcore_list kcore_modules;
+static struct kcore_list kcore_modules;
static void __init add_modules_range(void)
{
if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) {
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 4d598a399bbf..7325baa8f9d4 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -1626,8 +1626,11 @@ static void drop_sysctl_table(struct ctl_table_header *header)
if (--header->nreg)
return;
- put_links(header);
- start_unregistering(header);
+ if (parent) {
+ put_links(header);
+ start_unregistering(header);
+ }
+
if (!--header->count)
kfree_rcu(header, rcu);
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 621e6ec322ca..8b145e7b9661 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -19,86 +19,178 @@
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/user_namespace.h>
+#include <linux/fs_context.h>
#include <linux/mount.h>
#include <linux/pid_namespace.h>
-#include <linux/parser.h>
+#include <linux/fs_parser.h>
#include <linux/cred.h>
+#include <linux/magic.h>
+#include <linux/slab.h>
#include "internal.h"
-enum {
- Opt_gid, Opt_hidepid, Opt_err,
+struct proc_fs_context {
+ struct pid_namespace *pid_ns;
+ unsigned int mask;
+ int hidepid;
+ int gid;
};
-static const match_table_t tokens = {
- {Opt_hidepid, "hidepid=%u"},
- {Opt_gid, "gid=%u"},
- {Opt_err, NULL},
+enum proc_param {
+ Opt_gid,
+ Opt_hidepid,
};
-int proc_parse_options(char *options, struct pid_namespace *pid)
+static const struct fs_parameter_spec proc_param_specs[] = {
+ fsparam_u32("gid", Opt_gid),
+ fsparam_u32("hidepid", Opt_hidepid),
+ {}
+};
+
+static const struct fs_parameter_description proc_fs_parameters = {
+ .name = "proc",
+ .specs = proc_param_specs,
+};
+
+static int proc_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- char *p;
- substring_t args[MAX_OPT_ARGS];
- int option;
-
- if (!options)
- return 1;
-
- while ((p = strsep(&options, ",")) != NULL) {
- int token;
- if (!*p)
- continue;
-
- args[0].to = args[0].from = NULL;
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_gid:
- if (match_int(&args[0], &option))
- return 0;
- pid->pid_gid = make_kgid(current_user_ns(), option);
- break;
- case Opt_hidepid:
- if (match_int(&args[0], &option))
- return 0;
- if (option < HIDEPID_OFF ||
- option > HIDEPID_INVISIBLE) {
- pr_err("proc: hidepid value must be between 0 and 2.\n");
- return 0;
- }
- pid->hide_pid = option;
- break;
- default:
- pr_err("proc: unrecognized mount option \"%s\" "
- "or missing value\n", p);
- return 0;
- }
+ struct proc_fs_context *ctx = fc->fs_private;
+ struct fs_parse_result result;
+ int opt;
+
+ opt = fs_parse(fc, &proc_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_gid:
+ ctx->gid = result.uint_32;
+ break;
+
+ case Opt_hidepid:
+ ctx->hidepid = result.uint_32;
+ if (ctx->hidepid < HIDEPID_OFF ||
+ ctx->hidepid > HIDEPID_INVISIBLE)
+ return invalf(fc, "proc: hidepid value must be between 0 and 2.\n");
+ break;
+
+ default:
+ return -EINVAL;
}
- return 1;
+ ctx->mask |= 1 << opt;
+ return 0;
}
-int proc_remount(struct super_block *sb, int *flags, char *data)
+static void proc_apply_options(struct super_block *s,
+ struct fs_context *fc,
+ struct pid_namespace *pid_ns,
+ struct user_namespace *user_ns)
{
+ struct proc_fs_context *ctx = fc->fs_private;
+
+ if (ctx->mask & (1 << Opt_gid))
+ pid_ns->pid_gid = make_kgid(user_ns, ctx->gid);
+ if (ctx->mask & (1 << Opt_hidepid))
+ pid_ns->hide_pid = ctx->hidepid;
+}
+
+static int proc_fill_super(struct super_block *s, struct fs_context *fc)
+{
+ struct pid_namespace *pid_ns = get_pid_ns(s->s_fs_info);
+ struct inode *root_inode;
+ int ret;
+
+ proc_apply_options(s, fc, pid_ns, current_user_ns());
+
+ /* User space would break if executables or devices appear on proc */
+ s->s_iflags |= SB_I_USERNS_VISIBLE | SB_I_NOEXEC | SB_I_NODEV;
+ s->s_flags |= SB_NODIRATIME | SB_NOSUID | SB_NOEXEC;
+ s->s_blocksize = 1024;
+ s->s_blocksize_bits = 10;
+ s->s_magic = PROC_SUPER_MAGIC;
+ s->s_op = &proc_sops;
+ s->s_time_gran = 1;
+
+ /*
+ * procfs isn't actually a stacking filesystem; however, there is
+ * too much magic going on inside it to permit stacking things on
+ * top of it
+ */
+ s->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
+
+ /* procfs dentries and inodes don't require IO to create */
+ s->s_shrink.seeks = 0;
+
+ pde_get(&proc_root);
+ root_inode = proc_get_inode(s, &proc_root);
+ if (!root_inode) {
+ pr_err("proc_fill_super: get root inode failed\n");
+ return -ENOMEM;
+ }
+
+ s->s_root = d_make_root(root_inode);
+ if (!s->s_root) {
+ pr_err("proc_fill_super: allocate dentry failed\n");
+ return -ENOMEM;
+ }
+
+ ret = proc_setup_self(s);
+ if (ret) {
+ return ret;
+ }
+ return proc_setup_thread_self(s);
+}
+
+static int proc_reconfigure(struct fs_context *fc)
+{
+ struct super_block *sb = fc->root->d_sb;
struct pid_namespace *pid = sb->s_fs_info;
sync_filesystem(sb);
- return !proc_parse_options(data, pid);
+
+ proc_apply_options(sb, fc, pid, current_user_ns());
+ return 0;
}
-static struct dentry *proc_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int proc_get_tree(struct fs_context *fc)
{
- struct pid_namespace *ns;
+ struct proc_fs_context *ctx = fc->fs_private;
- if (flags & SB_KERNMOUNT) {
- ns = data;
- data = NULL;
- } else {
- ns = task_active_pid_ns(current);
- }
+ put_user_ns(fc->user_ns);
+ fc->user_ns = get_user_ns(ctx->pid_ns->user_ns);
+ fc->s_fs_info = ctx->pid_ns;
+ return vfs_get_super(fc, vfs_get_keyed_super, proc_fill_super);
+}
- return mount_ns(fs_type, flags, data, ns, ns->user_ns, proc_fill_super);
+static void proc_fs_context_free(struct fs_context *fc)
+{
+ struct proc_fs_context *ctx = fc->fs_private;
+
+ if (ctx->pid_ns)
+ put_pid_ns(ctx->pid_ns);
+ kfree(ctx);
+}
+
+static const struct fs_context_operations proc_fs_context_ops = {
+ .free = proc_fs_context_free,
+ .parse_param = proc_parse_param,
+ .get_tree = proc_get_tree,
+ .reconfigure = proc_reconfigure,
+};
+
+static int proc_init_fs_context(struct fs_context *fc)
+{
+ struct proc_fs_context *ctx;
+
+ ctx = kzalloc(sizeof(struct proc_fs_context), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->pid_ns = get_pid_ns(task_active_pid_ns(current));
+ fc->fs_private = ctx;
+ fc->ops = &proc_fs_context_ops;
+ return 0;
}
static void proc_kill_sb(struct super_block *sb)
@@ -115,10 +207,11 @@ static void proc_kill_sb(struct super_block *sb)
}
static struct file_system_type proc_fs_type = {
- .name = "proc",
- .mount = proc_mount,
- .kill_sb = proc_kill_sb,
- .fs_flags = FS_USERNS_MOUNT,
+ .name = "proc",
+ .init_fs_context = proc_init_fs_context,
+ .parameters = &proc_fs_parameters,
+ .kill_sb = proc_kill_sb,
+ .fs_flags = FS_USERNS_MOUNT,
};
void __init proc_root_init(void)
@@ -156,7 +249,7 @@ static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentr
{
if (!proc_pid_lookup(dentry, flags))
return NULL;
-
+
return proc_lookup(dir, dentry, flags);
}
@@ -209,9 +302,28 @@ struct proc_dir_entry proc_root = {
int pid_ns_prepare_proc(struct pid_namespace *ns)
{
+ struct proc_fs_context *ctx;
+ struct fs_context *fc;
struct vfsmount *mnt;
- mnt = kern_mount_data(&proc_fs_type, ns);
+ fc = fs_context_for_mount(&proc_fs_type, SB_KERNMOUNT);
+ if (IS_ERR(fc))
+ return PTR_ERR(fc);
+
+ if (fc->user_ns != ns->user_ns) {
+ put_user_ns(fc->user_ns);
+ fc->user_ns = get_user_ns(ns->user_ns);
+ }
+
+ ctx = fc->fs_private;
+ if (ctx->pid_ns != ns) {
+ put_pid_ns(ctx->pid_ns);
+ get_pid_ns(ns);
+ ctx->pid_ns = ns;
+ }
+
+ mnt = fc_mount(fc);
+ put_fs_context(fc);
if (IS_ERR(mnt))
return PTR_ERR(mnt);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index beccb0b1d57c..95ca1fe7283c 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -59,7 +59,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
SEQ_PUT_DEC("VmPeak:\t", hiwater_vm);
SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm);
SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm);
- SEQ_PUT_DEC(" kB\nVmPin:\t", mm->pinned_vm);
+ SEQ_PUT_DEC(" kB\nVmPin:\t", atomic64_read(&mm->pinned_vm));
SEQ_PUT_DEC(" kB\nVmHWM:\t", hiwater_rss);
SEQ_PUT_DEC(" kB\nVmRSS:\t", total_rss);
SEQ_PUT_DEC(" kB\nRssAnon:\t", anon);
@@ -1143,6 +1143,24 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
count = -EINTR;
goto out_mm;
}
+ /*
+ * Avoid to modify vma->vm_flags
+ * without locked ops while the
+ * coredump reads the vm_flags.
+ */
+ if (!mmget_still_valid(mm)) {
+ /*
+ * Silently return "count"
+ * like if get_task_mm()
+ * failed. FIXME: should this
+ * function have returned
+ * -ESRCH if get_task_mm()
+ * failed like if
+ * get_proc_task() fails?
+ */
+ up_write(&mm->mmap_sem);
+ goto out_mm;
+ }
for (vma = mm->mmap; vma; vma = vma->vm_next) {
vma->vm_flags &= ~VM_SOFTDIRTY;
vma_set_page_prot(vma);
diff --git a/fs/read_write.c b/fs/read_write.c
index 30df848b7451..61b43ad7608e 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -478,8 +478,8 @@ static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t
return ret;
}
-ssize_t __vfs_write(struct file *file, const char __user *p, size_t count,
- loff_t *pos)
+static ssize_t __vfs_write(struct file *file, const char __user *p,
+ size_t count, loff_t *pos)
{
if (file->f_op->write)
return file->f_op->write(file, p, count, pos);
@@ -560,12 +560,13 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_
static inline loff_t file_pos_read(struct file *file)
{
- return file->f_pos;
+ return file->f_mode & FMODE_STREAM ? 0 : file->f_pos;
}
static inline void file_pos_write(struct file *file, loff_t pos)
{
- file->f_pos = pos;
+ if ((file->f_mode & FMODE_STREAM) == 0)
+ file->f_pos = pos;
}
ssize_t ksys_read(unsigned int fd, char __user *buf, size_t count)
@@ -1238,6 +1239,9 @@ COMPAT_SYSCALL_DEFINE5(preadv64v2, unsigned long, fd,
const struct compat_iovec __user *,vec,
unsigned long, vlen, loff_t, pos, rwf_t, flags)
{
+ if (pos == -1)
+ return do_compat_readv(fd, vec, vlen, flags);
+
return do_compat_preadv64(fd, vec, vlen, pos, flags);
}
#endif
@@ -1344,6 +1348,9 @@ COMPAT_SYSCALL_DEFINE5(pwritev64v2, unsigned long, fd,
const struct compat_iovec __user *,vec,
unsigned long, vlen, loff_t, pos, rwf_t, flags)
{
+ if (pos == -1)
+ return do_compat_writev(fd, vec, vlen, flags);
+
return do_compat_pwritev64(fd, vec, vlen, pos, flags);
}
#endif
diff --git a/fs/splice.c b/fs/splice.c
index 6489fb9436e4..25212dcca2df 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -138,7 +138,6 @@ error:
}
const struct pipe_buf_operations page_cache_pipe_buf_ops = {
- .can_merge = 0,
.confirm = page_cache_pipe_buf_confirm,
.release = page_cache_pipe_buf_release,
.steal = page_cache_pipe_buf_steal,
@@ -156,7 +155,6 @@ static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
}
static const struct pipe_buf_operations user_page_pipe_buf_ops = {
- .can_merge = 0,
.confirm = generic_pipe_buf_confirm,
.release = page_cache_pipe_buf_release,
.steal = user_page_pipe_buf_steal,
@@ -326,22 +324,20 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
EXPORT_SYMBOL(generic_file_splice_read);
const struct pipe_buf_operations default_pipe_buf_ops = {
- .can_merge = 0,
.confirm = generic_pipe_buf_confirm,
.release = generic_pipe_buf_release,
.steal = generic_pipe_buf_steal,
.get = generic_pipe_buf_get,
};
-static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
- struct pipe_buffer *buf)
+int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
{
return 1;
}
/* Pipe buffer operations for a socket and similar. */
const struct pipe_buf_operations nosteal_pipe_buf_ops = {
- .can_merge = 0,
.confirm = generic_pipe_buf_confirm,
.release = generic_pipe_buf_release,
.steal = generic_pipe_buf_nosteal,
@@ -1597,7 +1593,11 @@ retry:
* Get a reference to this pipe buffer,
* so we can copy the contents over.
*/
- pipe_buf_get(ipipe, ibuf);
+ if (!pipe_buf_get(ipipe, ibuf)) {
+ if (ret == 0)
+ ret = -EFAULT;
+ break;
+ }
*obuf = *ibuf;
/*
@@ -1606,6 +1606,8 @@ retry:
*/
obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
+ pipe_buf_mark_unmergeable(obuf);
+
obuf->len = len;
opipe->nrbufs++;
ibuf->offset += obuf->len;
@@ -1669,7 +1671,11 @@ static int link_pipe(struct pipe_inode_info *ipipe,
* Get a reference to this pipe buffer,
* so we can copy the contents over.
*/
- pipe_buf_get(ipipe, ibuf);
+ if (!pipe_buf_get(ipipe, ibuf)) {
+ if (ret == 0)
+ ret = -EFAULT;
+ break;
+ }
obuf = opipe->bufs + nbuf;
*obuf = *ibuf;
@@ -1680,6 +1686,8 @@ static int link_pipe(struct pipe_inode_info *ipipe,
*/
obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
+ pipe_buf_mark_unmergeable(obuf);
+
if (obuf->len > len)
obuf->len = len;
diff --git a/fs/stat.c b/fs/stat.c
index adbfcd86c81b..c38e4c2e1221 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -45,11 +45,6 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
stat->ctime = inode->i_ctime;
stat->blksize = i_blocksize(inode);
stat->blocks = inode->i_blocks;
-
- if (IS_NOATIME(inode))
- stat->result_mask &= ~STATX_ATIME;
- if (IS_AUTOMOUNT(inode))
- stat->attributes |= STATX_ATTR_AUTOMOUNT;
}
EXPORT_SYMBOL(generic_fillattr);
@@ -75,6 +70,13 @@ int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
stat->result_mask |= STATX_BASIC_STATS;
request_mask &= STATX_ALL;
query_flags &= KSTAT_QUERY_FLAGS;
+
+ /* allow the fs to override these if it really wants to */
+ if (IS_NOATIME(inode))
+ stat->result_mask &= ~STATX_ATIME;
+ if (IS_AUTOMOUNT(inode))
+ stat->attributes |= STATX_ATTR_AUTOMOUNT;
+
if (inode->i_op->getattr)
return inode->i_op->getattr(path, stat, request_mask,
query_flags);
diff --git a/fs/super.c b/fs/super.c
index 48e25eba8465..2739f57515f8 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -35,6 +35,7 @@
#include <linux/fsnotify.h>
#include <linux/lockdep.h>
#include <linux/user_namespace.h>
+#include <linux/fs_context.h>
#include <uapi/linux/mount.h>
#include "internal.h"
@@ -476,6 +477,94 @@ void generic_shutdown_super(struct super_block *sb)
EXPORT_SYMBOL(generic_shutdown_super);
/**
+ * sget_fc - Find or create a superblock
+ * @fc: Filesystem context.
+ * @test: Comparison callback
+ * @set: Setup callback
+ *
+ * Find or create a superblock using the parameters stored in the filesystem
+ * context and the two callback functions.
+ *
+ * If an extant superblock is matched, then that will be returned with an
+ * elevated reference count that the caller must transfer or discard.
+ *
+ * If no match is made, a new superblock will be allocated and basic
+ * initialisation will be performed (s_type, s_fs_info and s_id will be set and
+ * the set() callback will be invoked), the superblock will be published and it
+ * will be returned in a partially constructed state with SB_BORN and SB_ACTIVE
+ * as yet unset.
+ */
+struct super_block *sget_fc(struct fs_context *fc,
+ int (*test)(struct super_block *, struct fs_context *),
+ int (*set)(struct super_block *, struct fs_context *))
+{
+ struct super_block *s = NULL;
+ struct super_block *old;
+ struct user_namespace *user_ns = fc->global ? &init_user_ns : fc->user_ns;
+ int err;
+
+ if (!(fc->sb_flags & SB_KERNMOUNT) &&
+ fc->purpose != FS_CONTEXT_FOR_SUBMOUNT) {
+ /* Don't allow mounting unless the caller has CAP_SYS_ADMIN
+ * over the namespace.
+ */
+ if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT)) {
+ if (!capable(CAP_SYS_ADMIN))
+ return ERR_PTR(-EPERM);
+ } else {
+ if (!ns_capable(fc->user_ns, CAP_SYS_ADMIN))
+ return ERR_PTR(-EPERM);
+ }
+ }
+
+retry:
+ spin_lock(&sb_lock);
+ if (test) {
+ hlist_for_each_entry(old, &fc->fs_type->fs_supers, s_instances) {
+ if (test(old, fc))
+ goto share_extant_sb;
+ }
+ }
+ if (!s) {
+ spin_unlock(&sb_lock);
+ s = alloc_super(fc->fs_type, fc->sb_flags, user_ns);
+ if (!s)
+ return ERR_PTR(-ENOMEM);
+ goto retry;
+ }
+
+ s->s_fs_info = fc->s_fs_info;
+ err = set(s, fc);
+ if (err) {
+ s->s_fs_info = NULL;
+ spin_unlock(&sb_lock);
+ destroy_unused_super(s);
+ return ERR_PTR(err);
+ }
+ fc->s_fs_info = NULL;
+ s->s_type = fc->fs_type;
+ strlcpy(s->s_id, s->s_type->name, sizeof(s->s_id));
+ list_add_tail(&s->s_list, &super_blocks);
+ hlist_add_head(&s->s_instances, &s->s_type->fs_supers);
+ spin_unlock(&sb_lock);
+ get_filesystem(s->s_type);
+ register_shrinker_prepared(&s->s_shrink);
+ return s;
+
+share_extant_sb:
+ if (user_ns != old->s_user_ns) {
+ spin_unlock(&sb_lock);
+ destroy_unused_super(s);
+ return ERR_PTR(-EBUSY);
+ }
+ if (!grab_super(old))
+ goto retry;
+ destroy_unused_super(s);
+ return old;
+}
+EXPORT_SYMBOL(sget_fc);
+
+/**
* sget_userns - find or create a superblock
* @type: filesystem type superblock should belong to
* @test: comparison callback
@@ -835,28 +924,35 @@ rescan:
}
/**
- * do_remount_sb - asks filesystem to change mount options.
- * @sb: superblock in question
- * @sb_flags: revised superblock flags
- * @data: the rest of options
- * @force: whether or not to force the change
+ * reconfigure_super - asks filesystem to change superblock parameters
+ * @fc: The superblock and configuration
*
- * Alters the mount options of a mounted file system.
+ * Alters the configuration parameters of a live superblock.
*/
-int do_remount_sb(struct super_block *sb, int sb_flags, void *data, int force)
+int reconfigure_super(struct fs_context *fc)
{
+ struct super_block *sb = fc->root->d_sb;
int retval;
- int remount_ro;
+ bool remount_ro = false;
+ bool force = fc->sb_flags & SB_FORCE;
+ if (fc->sb_flags_mask & ~MS_RMT_MASK)
+ return -EINVAL;
if (sb->s_writers.frozen != SB_UNFROZEN)
return -EBUSY;
+ retval = security_sb_remount(sb, fc->security);
+ if (retval)
+ return retval;
+
+ if (fc->sb_flags_mask & SB_RDONLY) {
#ifdef CONFIG_BLOCK
- if (!(sb_flags & SB_RDONLY) && bdev_read_only(sb->s_bdev))
- return -EACCES;
+ if (!(fc->sb_flags & SB_RDONLY) && bdev_read_only(sb->s_bdev))
+ return -EACCES;
#endif
- remount_ro = (sb_flags & SB_RDONLY) && !sb_rdonly(sb);
+ remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb);
+ }
if (remount_ro) {
if (!hlist_empty(&sb->s_pins)) {
@@ -867,13 +963,14 @@ int do_remount_sb(struct super_block *sb, int sb_flags, void *data, int force)
return 0;
if (sb->s_writers.frozen != SB_UNFROZEN)
return -EBUSY;
- remount_ro = (sb_flags & SB_RDONLY) && !sb_rdonly(sb);
+ remount_ro = !sb_rdonly(sb);
}
}
shrink_dcache_sb(sb);
- /* If we are remounting RDONLY and current sb is read/write,
- make sure there are no rw files opened */
+ /* If we are reconfiguring to RDONLY and current sb is read/write,
+ * make sure there are no files open for writing.
+ */
if (remount_ro) {
if (force) {
sb->s_readonly_remount = 1;
@@ -885,8 +982,8 @@ int do_remount_sb(struct super_block *sb, int sb_flags, void *data, int force)
}
}
- if (sb->s_op->remount_fs) {
- retval = sb->s_op->remount_fs(sb, &sb_flags, data);
+ if (fc->ops->reconfigure) {
+ retval = fc->ops->reconfigure(fc);
if (retval) {
if (!force)
goto cancel_readonly;
@@ -895,7 +992,9 @@ int do_remount_sb(struct super_block *sb, int sb_flags, void *data, int force)
sb->s_type->name, retval);
}
}
- sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (sb_flags & MS_RMT_MASK);
+
+ WRITE_ONCE(sb->s_flags, ((sb->s_flags & ~fc->sb_flags_mask) |
+ (fc->sb_flags & fc->sb_flags_mask)));
/* Needs to be ordered wrt mnt_is_readonly() */
smp_wmb();
sb->s_readonly_remount = 0;
@@ -922,10 +1021,15 @@ static void do_emergency_remount_callback(struct super_block *sb)
down_write(&sb->s_umount);
if (sb->s_root && sb->s_bdev && (sb->s_flags & SB_BORN) &&
!sb_rdonly(sb)) {
- /*
- * What lock protects sb->s_flags??
- */
- do_remount_sb(sb, SB_RDONLY, NULL, 1);
+ struct fs_context *fc;
+
+ fc = fs_context_for_reconfigure(sb->s_root,
+ SB_RDONLY | SB_FORCE, SB_RDONLY);
+ if (!IS_ERR(fc)) {
+ if (parse_monolithic_mount_data(fc, NULL) == 0)
+ (void)reconfigure_super(fc);
+ put_fs_context(fc);
+ }
}
up_write(&sb->s_umount);
}
@@ -1087,6 +1191,89 @@ struct dentry *mount_ns(struct file_system_type *fs_type,
EXPORT_SYMBOL(mount_ns);
+int set_anon_super_fc(struct super_block *sb, struct fs_context *fc)
+{
+ return set_anon_super(sb, NULL);
+}
+EXPORT_SYMBOL(set_anon_super_fc);
+
+static int test_keyed_super(struct super_block *sb, struct fs_context *fc)
+{
+ return sb->s_fs_info == fc->s_fs_info;
+}
+
+static int test_single_super(struct super_block *s, struct fs_context *fc)
+{
+ return 1;
+}
+
+/**
+ * vfs_get_super - Get a superblock with a search key set in s_fs_info.
+ * @fc: The filesystem context holding the parameters
+ * @keying: How to distinguish superblocks
+ * @fill_super: Helper to initialise a new superblock
+ *
+ * Search for a superblock and create a new one if not found. The search
+ * criterion is controlled by @keying. If the search fails, a new superblock
+ * is created and @fill_super() is called to initialise it.
+ *
+ * @keying can take one of a number of values:
+ *
+ * (1) vfs_get_single_super - Only one superblock of this type may exist on the
+ * system. This is typically used for special system filesystems.
+ *
+ * (2) vfs_get_keyed_super - Multiple superblocks may exist, but they must have
+ * distinct keys (where the key is in s_fs_info). Searching for the same
+ * key again will turn up the superblock for that key.
+ *
+ * (3) vfs_get_independent_super - Multiple superblocks may exist and are
+ * unkeyed. Each call will get a new superblock.
+ *
+ * A permissions check is made by sget_fc() unless we're getting a superblock
+ * for a kernel-internal mount or a submount.
+ */
+int vfs_get_super(struct fs_context *fc,
+ enum vfs_get_super_keying keying,
+ int (*fill_super)(struct super_block *sb,
+ struct fs_context *fc))
+{
+ int (*test)(struct super_block *, struct fs_context *);
+ struct super_block *sb;
+
+ switch (keying) {
+ case vfs_get_single_super:
+ test = test_single_super;
+ break;
+ case vfs_get_keyed_super:
+ test = test_keyed_super;
+ break;
+ case vfs_get_independent_super:
+ test = NULL;
+ break;
+ default:
+ BUG();
+ }
+
+ sb = sget_fc(fc, test, set_anon_super_fc);
+ if (IS_ERR(sb))
+ return PTR_ERR(sb);
+
+ if (!sb->s_root) {
+ int err = fill_super(sb, fc);
+ if (err) {
+ deactivate_locked_super(sb);
+ return err;
+ }
+
+ sb->s_flags |= SB_ACTIVE;
+ }
+
+ BUG_ON(fc->root);
+ fc->root = dget(sb->s_root);
+ return 0;
+}
+EXPORT_SYMBOL(vfs_get_super);
+
#ifdef CONFIG_BLOCK
static int set_bdev_super(struct super_block *s, void *data)
{
@@ -1212,6 +1399,31 @@ struct dentry *mount_nodev(struct file_system_type *fs_type,
}
EXPORT_SYMBOL(mount_nodev);
+static int reconfigure_single(struct super_block *s,
+ int flags, void *data)
+{
+ struct fs_context *fc;
+ int ret;
+
+ /* The caller really need to be passing fc down into mount_single(),
+ * then a chunk of this can be removed. [Bollocks -- AV]
+ * Better yet, reconfiguration shouldn't happen, but rather the second
+ * mount should be rejected if the parameters are not compatible.
+ */
+ fc = fs_context_for_reconfigure(s->s_root, flags, MS_RMT_MASK);
+ if (IS_ERR(fc))
+ return PTR_ERR(fc);
+
+ ret = parse_monolithic_mount_data(fc, data);
+ if (ret < 0)
+ goto out;
+
+ ret = reconfigure_super(fc);
+out:
+ put_fs_context(fc);
+ return ret;
+}
+
static int compare_single(struct super_block *s, void *p)
{
return 1;
@@ -1229,41 +1441,59 @@ struct dentry *mount_single(struct file_system_type *fs_type,
return ERR_CAST(s);
if (!s->s_root) {
error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
- if (error) {
- deactivate_locked_super(s);
- return ERR_PTR(error);
- }
- s->s_flags |= SB_ACTIVE;
+ if (!error)
+ s->s_flags |= SB_ACTIVE;
} else {
- do_remount_sb(s, flags, data, 0);
+ error = reconfigure_single(s, flags, data);
+ }
+ if (unlikely(error)) {
+ deactivate_locked_super(s);
+ return ERR_PTR(error);
}
return dget(s->s_root);
}
EXPORT_SYMBOL(mount_single);
-struct dentry *
-mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
+/**
+ * vfs_get_tree - Get the mountable root
+ * @fc: The superblock configuration context.
+ *
+ * The filesystem is invoked to get or create a superblock which can then later
+ * be used for mounting. The filesystem places a pointer to the root to be
+ * used for mounting in @fc->root.
+ */
+int vfs_get_tree(struct fs_context *fc)
{
- struct dentry *root;
struct super_block *sb;
- int error = -ENOMEM;
- void *sec_opts = NULL;
+ int error;
- if (data && !(type->fs_flags & FS_BINARY_MOUNTDATA)) {
- error = security_sb_eat_lsm_opts(data, &sec_opts);
- if (error)
- return ERR_PTR(error);
- }
+ if (fc->root)
+ return -EBUSY;
- root = type->mount(type, flags, name, data);
- if (IS_ERR(root)) {
- error = PTR_ERR(root);
- goto out_free_secdata;
+ /* Get the mountable root in fc->root, with a ref on the root and a ref
+ * on the superblock.
+ */
+ error = fc->ops->get_tree(fc);
+ if (error < 0)
+ return error;
+
+ if (!fc->root) {
+ pr_err("Filesystem %s get_tree() didn't set fc->root\n",
+ fc->fs_type->name);
+ /* We don't know what the locking state of the superblock is -
+ * if there is a superblock.
+ */
+ BUG();
}
- sb = root->d_sb;
- BUG_ON(!sb);
+
+ sb = fc->root->d_sb;
WARN_ON(!sb->s_bdi);
+ if (fc->subtype && !sb->s_subtype) {
+ sb->s_subtype = fc->subtype;
+ fc->subtype = NULL;
+ }
+
/*
* Write barrier is for super_cache_count(). We place it before setting
* SB_BORN as the data dependency between the two functions is the
@@ -1273,14 +1503,10 @@ mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
smp_wmb();
sb->s_flags |= SB_BORN;
- error = security_sb_set_mnt_opts(sb, sec_opts, 0, NULL);
- if (error)
- goto out_sb;
-
- if (!(flags & (MS_KERNMOUNT|MS_SUBMOUNT))) {
- error = security_sb_kern_mount(sb);
- if (error)
- goto out_sb;
+ error = security_sb_set_mnt_opts(sb, fc->security, 0, NULL);
+ if (unlikely(error)) {
+ fc_drop_locked(fc);
+ return error;
}
/*
@@ -1290,18 +1516,11 @@ mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
* violate this rule.
*/
WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
- "negative value (%lld)\n", type->name, sb->s_maxbytes);
+ "negative value (%lld)\n", fc->fs_type->name, sb->s_maxbytes);
- up_write(&sb->s_umount);
- security_free_mnt_opts(&sec_opts);
- return root;
-out_sb:
- dput(root);
- deactivate_locked_super(sb);
-out_free_secdata:
- security_free_mnt_opts(&sec_opts);
- return ERR_PTR(error);
+ return 0;
}
+EXPORT_SYMBOL(vfs_get_tree);
/*
* Setup private BDI for given superblock. It gets automatically cleaned up
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index 92682fcc41f6..1b56686ab178 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -13,34 +13,71 @@
#include <linux/magic.h>
#include <linux/mount.h>
#include <linux/init.h>
+#include <linux/slab.h>
#include <linux/user_namespace.h>
+#include <linux/fs_context.h>
+#include <net/net_namespace.h>
#include "sysfs.h"
static struct kernfs_root *sysfs_root;
struct kernfs_node *sysfs_root_kn;
-static struct dentry *sysfs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int sysfs_get_tree(struct fs_context *fc)
{
- struct dentry *root;
- void *ns;
- bool new_sb = false;
+ struct kernfs_fs_context *kfc = fc->fs_private;
+ int ret;
- if (!(flags & SB_KERNMOUNT)) {
+ ret = kernfs_get_tree(fc);
+ if (ret)
+ return ret;
+
+ if (kfc->new_sb_created)
+ fc->root->d_sb->s_iflags |= SB_I_USERNS_VISIBLE;
+ return 0;
+}
+
+static void sysfs_fs_context_free(struct fs_context *fc)
+{
+ struct kernfs_fs_context *kfc = fc->fs_private;
+
+ if (kfc->ns_tag)
+ kobj_ns_drop(KOBJ_NS_TYPE_NET, kfc->ns_tag);
+ kernfs_free_fs_context(fc);
+ kfree(kfc);
+}
+
+static const struct fs_context_operations sysfs_fs_context_ops = {
+ .free = sysfs_fs_context_free,
+ .get_tree = sysfs_get_tree,
+};
+
+static int sysfs_init_fs_context(struct fs_context *fc)
+{
+ struct kernfs_fs_context *kfc;
+ struct net *netns;
+
+ if (!(fc->sb_flags & SB_KERNMOUNT)) {
if (!kobj_ns_current_may_mount(KOBJ_NS_TYPE_NET))
- return ERR_PTR(-EPERM);
+ return -EPERM;
}
- ns = kobj_ns_grab_current(KOBJ_NS_TYPE_NET);
- root = kernfs_mount_ns(fs_type, flags, sysfs_root,
- SYSFS_MAGIC, &new_sb, ns);
- if (!new_sb)
- kobj_ns_drop(KOBJ_NS_TYPE_NET, ns);
- else if (!IS_ERR(root))
- root->d_sb->s_iflags |= SB_I_USERNS_VISIBLE;
+ kfc = kzalloc(sizeof(struct kernfs_fs_context), GFP_KERNEL);
+ if (!kfc)
+ return -ENOMEM;
- return root;
+ kfc->ns_tag = netns = kobj_ns_grab_current(KOBJ_NS_TYPE_NET);
+ kfc->root = sysfs_root;
+ kfc->magic = SYSFS_MAGIC;
+ fc->fs_private = kfc;
+ fc->ops = &sysfs_fs_context_ops;
+ if (netns) {
+ if (fc->user_ns)
+ put_user_ns(fc->user_ns);
+ fc->user_ns = get_user_ns(netns->user_ns);
+ }
+ fc->global = true;
+ return 0;
}
static void sysfs_kill_sb(struct super_block *sb)
@@ -52,10 +89,10 @@ static void sysfs_kill_sb(struct super_block *sb)
}
static struct file_system_type sysfs_fs_type = {
- .name = "sysfs",
- .mount = sysfs_mount,
- .kill_sb = sysfs_kill_sb,
- .fs_flags = FS_USERNS_MOUNT,
+ .name = "sysfs",
+ .init_fs_context = sysfs_init_fs_context,
+ .kill_sb = sysfs_kill_sb,
+ .fs_flags = FS_USERNS_MOUNT,
};
int __init sysfs_init(void)
diff --git a/fs/ubifs/Kconfig b/fs/ubifs/Kconfig
index bc1e082d921d..9da2f135121b 100644
--- a/fs/ubifs/Kconfig
+++ b/fs/ubifs/Kconfig
@@ -8,6 +8,7 @@ config UBIFS_FS
select CRYPTO_LZO if UBIFS_FS_LZO
select CRYPTO_DEFLATE if UBIFS_FS_ZLIB
select CRYPTO_HASH_INFO
+ select UBIFS_FS_XATTR if FS_ENCRYPTION
depends on MTD_UBI
help
UBIFS is a file system for flash devices which works on top of UBI.
@@ -60,17 +61,6 @@ config UBIFS_FS_XATTR
If unsure, say Y.
-config UBIFS_FS_ENCRYPTION
- bool "UBIFS Encryption"
- depends on UBIFS_FS_XATTR && BLOCK
- select FS_ENCRYPTION
- default n
- help
- Enable encryption of UBIFS files and directories. This
- feature is similar to ecryptfs, but it is more memory
- efficient since it avoids caching the encrypted and
- decrypted pages in the page cache.
-
config UBIFS_FS_SECURITY
bool "UBIFS Security Labels"
depends on UBIFS_FS_XATTR
diff --git a/fs/ubifs/Makefile b/fs/ubifs/Makefile
index 5f838319c8d5..5c4b845754a7 100644
--- a/fs/ubifs/Makefile
+++ b/fs/ubifs/Makefile
@@ -6,6 +6,6 @@ ubifs-y += tnc.o master.o scan.o replay.o log.o commit.o gc.o orphan.o
ubifs-y += budget.o find.o tnc_commit.o compress.o lpt.o lprops.o
ubifs-y += recovery.o ioctl.o lpt_commit.o tnc_misc.o debug.o
ubifs-y += misc.o
-ubifs-$(CONFIG_UBIFS_FS_ENCRYPTION) += crypto.o
+ubifs-$(CONFIG_FS_ENCRYPTION) += crypto.o
ubifs-$(CONFIG_UBIFS_FS_XATTR) += xattr.o
ubifs-$(CONFIG_UBIFS_FS_AUTHENTICATION) += auth.o
diff --git a/fs/ubifs/auth.c b/fs/ubifs/auth.c
index 5bf5fd08879e..b758004085c4 100644
--- a/fs/ubifs/auth.c
+++ b/fs/ubifs/auth.c
@@ -33,7 +33,6 @@ int __ubifs_node_calc_hash(const struct ubifs_info *c, const void *node,
int err;
shash->tfm = c->hash_tfm;
- shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_shash_digest(shash, node, le32_to_cpu(ch->len), hash);
if (err < 0)
@@ -56,7 +55,6 @@ static int ubifs_hash_calc_hmac(const struct ubifs_info *c, const u8 *hash,
int err;
shash->tfm = c->hmac_tfm;
- shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_shash_digest(shash, hash, c->hash_len, hmac);
if (err < 0)
@@ -88,7 +86,6 @@ int ubifs_prepare_auth_node(struct ubifs_info *c, void *node,
return -ENOMEM;
hash_desc->tfm = c->hash_tfm;
- hash_desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
ubifs_shash_copy_state(c, inhash, hash_desc);
err = crypto_shash_final(hash_desc, hash);
@@ -123,7 +120,6 @@ static struct shash_desc *ubifs_get_desc(const struct ubifs_info *c,
return ERR_PTR(-ENOMEM);
desc->tfm = tfm;
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_shash_init(desc);
if (err) {
@@ -364,7 +360,6 @@ static int ubifs_node_calc_hmac(const struct ubifs_info *c, const void *node,
ubifs_assert(c, ofs_hmac + hmac_len < len);
shash->tfm = c->hmac_tfm;
- shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_shash_init(shash);
if (err)
@@ -483,7 +478,6 @@ int ubifs_hmac_wkm(struct ubifs_info *c, u8 *hmac)
return 0;
shash->tfm = c->hmac_tfm;
- shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_shash_init(shash);
if (err)
diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c
index 0164bcc827f8..82e4e6a30b04 100644
--- a/fs/ubifs/ioctl.c
+++ b/fs/ubifs/ioctl.c
@@ -28,6 +28,11 @@
#include <linux/mount.h>
#include "ubifs.h"
+/* Need to be kept consistent with checked flags in ioctl2ubifs() */
+#define UBIFS_SUPPORTED_IOCTL_FLAGS \
+ (FS_COMPR_FL | FS_SYNC_FL | FS_APPEND_FL | \
+ FS_IMMUTABLE_FL | FS_DIRSYNC_FL)
+
/**
* ubifs_set_inode_flags - set VFS inode flags.
* @inode: VFS inode to set flags for
@@ -169,6 +174,9 @@ long ubifs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (get_user(flags, (int __user *) arg))
return -EFAULT;
+ if (flags & ~UBIFS_SUPPORTED_IOCTL_FLAGS)
+ return -EOPNOTSUPP;
+
if (!S_ISDIR(inode->i_mode))
flags &= ~FS_DIRSYNC_FL;
@@ -185,7 +193,7 @@ long ubifs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return err;
}
case FS_IOC_SET_ENCRYPTION_POLICY: {
-#ifdef CONFIG_UBIFS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
struct ubifs_info *c = inode->i_sb->s_fs_info;
err = ubifs_enable_encryption(c);
@@ -198,7 +206,7 @@ long ubifs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
#endif
}
case FS_IOC_GET_ENCRYPTION_POLICY: {
-#ifdef CONFIG_UBIFS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
return fscrypt_ioctl_get_policy(file, (void __user *)arg);
#else
return -EOPNOTSUPP;
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index 0a0e65c07c6d..5c8a81a019a4 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -576,7 +576,6 @@ static int authenticate_sleb_hash(struct ubifs_info *c, struct shash_desc *log_h
SHASH_DESC_ON_STACK(hash_desc, c->hash_tfm);
hash_desc->tfm = c->hash_tfm;
- hash_desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
ubifs_shash_copy_state(c, log_hash, hash_desc);
return crypto_shash_final(hash_desc, hash);
@@ -587,7 +586,6 @@ static int authenticate_sleb_hmac(struct ubifs_info *c, u8 *hash, u8 *hmac)
SHASH_DESC_ON_STACK(hmac_desc, c->hmac_tfm);
hmac_desc->tfm = c->hmac_tfm;
- hmac_desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
return crypto_shash_digest(hmac_desc, hash, c->hash_len, hmac);
}
diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c
index 3da90c951c23..67fac1e8adfb 100644
--- a/fs/ubifs/sb.c
+++ b/fs/ubifs/sb.c
@@ -748,7 +748,7 @@ int ubifs_read_superblock(struct ubifs_info *c)
goto out;
}
-#ifndef CONFIG_UBIFS_FS_ENCRYPTION
+#ifndef CONFIG_FS_ENCRYPTION
if (c->encrypted) {
ubifs_err(c, "file system contains encrypted files but UBIFS"
" was built without crypto support.");
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 1fac1133dadd..12628184772c 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -276,14 +276,12 @@ static void ubifs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
struct ubifs_inode *ui = ubifs_inode(inode);
+ kfree(ui->data);
kmem_cache_free(ubifs_inode_slab, ui);
}
static void ubifs_destroy_inode(struct inode *inode)
{
- struct ubifs_inode *ui = ubifs_inode(inode);
-
- kfree(ui->data);
call_rcu(&inode->i_rcu, ubifs_i_callback);
}
@@ -2146,7 +2144,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
#ifdef CONFIG_UBIFS_FS_XATTR
sb->s_xattr = ubifs_xattr_handlers;
#endif
-#ifdef CONFIG_UBIFS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
sb->s_cop = &ubifs_crypt_operations;
#endif
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index 38401adaa00d..1ae12900e01d 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -43,7 +43,6 @@
#include <crypto/hash.h>
#include <crypto/algapi.h>
-#define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_UBIFS_FS_ENCRYPTION)
#include <linux/fscrypt.h>
#include "ubifs-media.h"
@@ -142,7 +141,7 @@
*/
#define WORST_COMPR_FACTOR 2
-#ifdef CONFIG_UBIFS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
#define UBIFS_CIPHER_BLOCK_SIZE FS_CRYPTO_BLOCK_SIZE
#else
#define UBIFS_CIPHER_BLOCK_SIZE 0
@@ -2072,7 +2071,7 @@ int ubifs_decompress(const struct ubifs_info *c, const void *buf, int len,
#include "misc.h"
#include "key.h"
-#ifndef CONFIG_UBIFS_FS_ENCRYPTION
+#ifndef CONFIG_FS_ENCRYPTION
static inline int ubifs_encrypt(const struct inode *inode,
struct ubifs_data_node *dn,
unsigned int in_len, unsigned int *out_len,
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index ae796e10f68b..e7276932e433 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1242,8 +1242,10 @@ set_size:
truncate_setsize(inode, newsize);
down_write(&iinfo->i_data_sem);
udf_clear_extent_cache(inode);
- udf_truncate_extents(inode);
+ err = udf_truncate_extents(inode);
up_write(&iinfo->i_data_sem);
+ if (err)
+ return err;
}
update_time:
inode->i_mtime = inode->i_ctime = current_time(inode);
diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
index b647f0bd150c..63a47f1e1d52 100644
--- a/fs/udf/truncate.c
+++ b/fs/udf/truncate.c
@@ -199,7 +199,7 @@ static void udf_update_alloc_ext_desc(struct inode *inode,
* for making file shorter. For making file longer, udf_extend_file() has to
* be used.
*/
-void udf_truncate_extents(struct inode *inode)
+int udf_truncate_extents(struct inode *inode)
{
struct extent_position epos;
struct kernel_lb_addr eloc, neloc = {};
@@ -224,7 +224,7 @@ void udf_truncate_extents(struct inode *inode)
if (etype == -1) {
/* We should extend the file? */
WARN_ON(byte_offset);
- return;
+ return 0;
}
epos.offset -= adsize;
extent_trunc(inode, &epos, &eloc, etype, elen, byte_offset);
@@ -260,6 +260,9 @@ void udf_truncate_extents(struct inode *inode)
epos.block = eloc;
epos.bh = udf_tread(sb,
udf_get_lb_pblock(sb, &eloc, 0));
+ /* Error reading indirect block? */
+ if (!epos.bh)
+ return -EIO;
if (elen)
indirect_ext_len =
(elen + sb->s_blocksize - 1) >>
@@ -283,4 +286,5 @@ void udf_truncate_extents(struct inode *inode)
iinfo->i_lenExtents = inode->i_size;
brelse(epos.bh);
+ return 0;
}
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index ee246769dee4..d89ef71887fc 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -235,7 +235,7 @@ extern struct inode *udf_new_inode(struct inode *, umode_t);
/* truncate.c */
extern void udf_truncate_tail_extent(struct inode *);
extern void udf_discard_prealloc(struct inode *);
-extern void udf_truncate_extents(struct inode *);
+extern int udf_truncate_extents(struct inode *);
/* balloc.c */
extern void udf_free_blocks(struct super_block *, struct inode *,
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index 1fd3011ea623..7fd4802222b8 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -229,7 +229,7 @@ ufs_get_inode_gid(struct super_block *sb, struct ufs_inode *inode)
case UFS_UID_44BSD:
return fs32_to_cpu(sb, inode->ui_u3.ui_44.ui_gid);
case UFS_UID_EFT:
- if (inode->ui_u1.oldids.ui_suid == 0xFFFF)
+ if (inode->ui_u1.oldids.ui_sgid == 0xFFFF)
return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_gid);
/* Fall through */
default:
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 89800fc7dc9d..f5de1e726356 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -629,6 +629,8 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
/* the various vma->vm_userfaultfd_ctx still points to it */
down_write(&mm->mmap_sem);
+ /* no task can run (and in turn coredump) yet */
+ VM_WARN_ON(!mmget_still_valid(mm));
for (vma = mm->mmap; vma; vma = vma->vm_next)
if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
@@ -883,6 +885,8 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
* taking the mmap_sem for writing.
*/
down_write(&mm->mmap_sem);
+ if (!mmget_still_valid(mm))
+ goto skip_mm;
prev = NULL;
for (vma = mm->mmap; vma; vma = vma->vm_next) {
cond_resched();
@@ -905,6 +909,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
vma->vm_flags = new_flags;
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
}
+skip_mm:
up_write(&mm->mmap_sem);
mmput(mm);
wakeup:
@@ -1333,6 +1338,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
goto out;
down_write(&mm->mmap_sem);
+ if (!mmget_still_valid(mm))
+ goto out_unlock;
vma = find_vma_prev(mm, start, &prev);
if (!vma)
goto out_unlock;
@@ -1520,6 +1527,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
goto out;
down_write(&mm->mmap_sem);
+ if (!mmget_still_valid(mm))
+ goto out_unlock;
vma = find_vma_prev(mm, start, &prev);
if (!vma)
goto out_unlock;
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 48502cb9990f..4637ae1ae91c 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -1191,7 +1191,10 @@ xfs_iread_extents(
* Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
*/
level = be16_to_cpu(block->bb_level);
- ASSERT(level > 0);
+ if (unlikely(level == 0)) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
+ return -EFSCORRUPTED;
+ }
pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
bno = be64_to_cpu(*pp);
@@ -4249,9 +4252,13 @@ xfs_bmapi_write(
struct xfs_bmbt_irec *mval, /* output: map values */
int *nmap) /* i/o: mval size/count */
{
+ struct xfs_bmalloca bma = {
+ .tp = tp,
+ .ip = ip,
+ .total = total,
+ };
struct xfs_mount *mp = ip->i_mount;
struct xfs_ifork *ifp;
- struct xfs_bmalloca bma = { NULL }; /* args for xfs_bmap_alloc */
xfs_fileoff_t end; /* end of mapped file region */
bool eof = false; /* after the end of extents */
int error; /* error return */
@@ -4319,10 +4326,6 @@ xfs_bmapi_write(
eof = true;
if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
bma.prev.br_startoff = NULLFILEOFF;
- bma.tp = tp;
- bma.ip = ip;
- bma.total = total;
- bma.datatype = 0;
bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
n = 0;
diff --git a/fs/xfs/libxfs/xfs_dir2_leaf.c b/fs/xfs/libxfs/xfs_dir2_leaf.c
index 9a3767818c50..9c2a0a13ed61 100644
--- a/fs/xfs/libxfs/xfs_dir2_leaf.c
+++ b/fs/xfs/libxfs/xfs_dir2_leaf.c
@@ -563,43 +563,40 @@ xfs_dir3_leaf_find_entry(
*/
int /* error */
xfs_dir2_leaf_addname(
- xfs_da_args_t *args) /* operation arguments */
+ struct xfs_da_args *args) /* operation arguments */
{
+ struct xfs_dir3_icleaf_hdr leafhdr;
+ struct xfs_trans *tp = args->trans;
__be16 *bestsp; /* freespace table in leaf */
- int compact; /* need to compact leaves */
- xfs_dir2_data_hdr_t *hdr; /* data block header */
+ __be16 *tagp; /* end of data entry */
struct xfs_buf *dbp; /* data block buffer */
- xfs_dir2_data_entry_t *dep; /* data block entry */
- xfs_inode_t *dp; /* incore directory inode */
- xfs_dir2_data_unused_t *dup; /* data unused entry */
+ struct xfs_buf *lbp; /* leaf's buffer */
+ struct xfs_dir2_leaf *leaf; /* leaf structure */
+ struct xfs_inode *dp = args->dp; /* incore directory inode */
+ struct xfs_dir2_data_hdr *hdr; /* data block header */
+ struct xfs_dir2_data_entry *dep; /* data block entry */
+ struct xfs_dir2_leaf_entry *lep; /* leaf entry table pointer */
+ struct xfs_dir2_leaf_entry *ents;
+ struct xfs_dir2_data_unused *dup; /* data unused entry */
+ struct xfs_dir2_leaf_tail *ltp; /* leaf tail pointer */
+ struct xfs_dir2_data_free *bf; /* bestfree table */
+ int compact; /* need to compact leaves */
int error; /* error return value */
int grown; /* allocated new data block */
- int highstale; /* index of next stale leaf */
+ int highstale = 0; /* index of next stale leaf */
int i; /* temporary, index */
int index; /* leaf table position */
- struct xfs_buf *lbp; /* leaf's buffer */
- xfs_dir2_leaf_t *leaf; /* leaf structure */
int length; /* length of new entry */
- xfs_dir2_leaf_entry_t *lep; /* leaf entry table pointer */
int lfloglow; /* low leaf logging index */
int lfloghigh; /* high leaf logging index */
- int lowstale; /* index of prev stale leaf */
- xfs_dir2_leaf_tail_t *ltp; /* leaf tail pointer */
+ int lowstale = 0; /* index of prev stale leaf */
int needbytes; /* leaf block bytes needed */
int needlog; /* need to log data header */
int needscan; /* need to rescan data free */
- __be16 *tagp; /* end of data entry */
- xfs_trans_t *tp; /* transaction pointer */
xfs_dir2_db_t use_block; /* data block number */
- struct xfs_dir2_data_free *bf; /* bestfree table */
- struct xfs_dir2_leaf_entry *ents;
- struct xfs_dir3_icleaf_hdr leafhdr;
trace_xfs_dir2_leaf_addname(args);
- dp = args->dp;
- tp = args->trans;
-
error = xfs_dir3_leaf_read(tp, dp, args->geo->leafblk, -1, &lbp);
if (error)
return error;
diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c
index 3b03703c5c3d..16731d2d684b 100644
--- a/fs/xfs/libxfs/xfs_dir2_node.c
+++ b/fs/xfs/libxfs/xfs_dir2_node.c
@@ -426,24 +426,22 @@ xfs_dir2_leaf_to_node(
static int /* error */
xfs_dir2_leafn_add(
struct xfs_buf *bp, /* leaf buffer */
- xfs_da_args_t *args, /* operation arguments */
+ struct xfs_da_args *args, /* operation arguments */
int index) /* insertion pt for new entry */
{
+ struct xfs_dir3_icleaf_hdr leafhdr;
+ struct xfs_inode *dp = args->dp;
+ struct xfs_dir2_leaf *leaf = bp->b_addr;
+ struct xfs_dir2_leaf_entry *lep;
+ struct xfs_dir2_leaf_entry *ents;
int compact; /* compacting stale leaves */
- xfs_inode_t *dp; /* incore directory inode */
- int highstale; /* next stale entry */
- xfs_dir2_leaf_t *leaf; /* leaf structure */
- xfs_dir2_leaf_entry_t *lep; /* leaf entry */
+ int highstale = 0; /* next stale entry */
int lfloghigh; /* high leaf entry logging */
int lfloglow; /* low leaf entry logging */
- int lowstale; /* previous stale entry */
- struct xfs_dir3_icleaf_hdr leafhdr;
- struct xfs_dir2_leaf_entry *ents;
+ int lowstale = 0; /* previous stale entry */
trace_xfs_dir2_leafn_add(args, index);
- dp = args->dp;
- leaf = bp->b_addr;
dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
ents = dp->d_ops->leaf_ents_p(leaf);
diff --git a/fs/xfs/scrub/btree.c b/fs/xfs/scrub/btree.c
index 6f94d1f7322d..117910db51b8 100644
--- a/fs/xfs/scrub/btree.c
+++ b/fs/xfs/scrub/btree.c
@@ -415,8 +415,17 @@ xchk_btree_check_owner(
struct xfs_btree_cur *cur = bs->cur;
struct check_owner *co;
- if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && bp == NULL)
+ /*
+ * In theory, xfs_btree_get_block should only give us a null buffer
+ * pointer for the root of a root-in-inode btree type, but we need
+ * to check defensively here in case the cursor state is also screwed
+ * up.
+ */
+ if (bp == NULL) {
+ if (!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE))
+ xchk_btree_set_corrupt(bs->sc, bs->cur, level);
return 0;
+ }
/*
* We want to cross-reference each btree block with the bnobt
diff --git a/fs/xfs/scrub/dabtree.c b/fs/xfs/scrub/dabtree.c
index f1260b4bfdee..90527b094878 100644
--- a/fs/xfs/scrub/dabtree.c
+++ b/fs/xfs/scrub/dabtree.c
@@ -574,6 +574,11 @@ xchk_da_btree(
/* Drill another level deeper. */
blkno = be32_to_cpu(key->before);
level++;
+ if (level >= XFS_DA_NODE_MAXDEPTH) {
+ /* Too deep! */
+ xchk_da_set_corrupt(&ds, level - 1);
+ break;
+ }
ds.tree_level--;
error = xchk_da_btree_block(&ds, level, blkno);
if (error)
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index 93f07edafd81..9ee2a7d02e70 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -161,6 +161,14 @@ xfs_ioc_trim(
return -EPERM;
if (!blk_queue_discard(q))
return -EOPNOTSUPP;
+
+ /*
+ * We haven't recovered the log, so we cannot use our bnobt-guided
+ * storage zapping commands.
+ */
+ if (mp->m_flags & XFS_MOUNT_NORECOVERY)
+ return -EROFS;
+
if (copy_from_user(&range, urange, sizeof(range)))
return -EFAULT;
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 1f2e2845eb76..a7ceae90110e 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -529,18 +529,17 @@ xfs_file_dio_aio_write(
count = iov_iter_count(from);
/*
- * If we are doing unaligned IO, wait for all other IO to drain,
- * otherwise demote the lock if we had to take the exclusive lock
- * for other reasons in xfs_file_aio_write_checks.
+ * If we are doing unaligned IO, we can't allow any other overlapping IO
+ * in-flight at the same time or we risk data corruption. Wait for all
+ * other IO to drain before we submit. If the IO is aligned, demote the
+ * iolock if we had to take the exclusive lock in
+ * xfs_file_aio_write_checks() for other reasons.
*/
if (unaligned_io) {
- /* If we are going to wait for other DIO to finish, bail */
- if (iocb->ki_flags & IOCB_NOWAIT) {
- if (atomic_read(&inode->i_dio_count))
- return -EAGAIN;
- } else {
- inode_dio_wait(inode);
- }
+ /* unaligned dio always waits, bail */
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ return -EAGAIN;
+ inode_dio_wait(inode);
} else if (iolock == XFS_IOLOCK_EXCL) {
xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
iolock = XFS_IOLOCK_SHARED;
@@ -548,6 +547,14 @@ xfs_file_dio_aio_write(
trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
+
+ /*
+ * If unaligned, this is the only IO in-flight. If it has not yet
+ * completed, wait on it before we release the iolock to prevent
+ * subsequent overlapping IO.
+ */
+ if (ret == -EIOCBQUEUED && unaligned_io)
+ inode_dio_wait(inode);
out:
xfs_iunlock(ip, iolock);
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index 30b1ae53689f..c50542dc71e0 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -150,7 +150,10 @@
/* Defaults for debug_level, debug and normal */
+#ifndef ACPI_DEBUG_DEFAULT
#define ACPI_DEBUG_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_EVALUATION | ACPI_LV_REPAIR)
+#endif
+
#define ACPI_NORMAL_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_REPAIR)
#define ACPI_DEBUG_ALL (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL)
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 0300374101cd..2a462cf4eaa9 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -91,8 +91,8 @@ acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev,
bool acpi_dev_found(const char *hid);
bool acpi_dev_present(const char *hid, const char *uid, s64 hrv);
-const char *
-acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv);
+struct acpi_device *
+acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv);
#ifdef CONFIG_ACPI
@@ -687,6 +687,10 @@ static inline bool acpi_device_can_poweroff(struct acpi_device *adev)
adev->power.states[ACPI_STATE_D3_HOT].flags.explicit_set);
}
+static inline void acpi_dev_put(struct acpi_device *adev)
+{
+ put_device(&adev->dev);
+}
#else /* CONFIG_ACPI */
static inline int register_acpi_bus_type(void *bus) { return 0; }
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 24dbb4e742a6..3b1b1d0e4c33 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20190215
+#define ACPI_CA_VERSION 0x20190405
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 65cc9cbf1141..d568128025df 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -66,14 +66,14 @@
******************************************************************************/
struct acpi_table_header {
- char signature[ACPI_NAME_SIZE]; /* ASCII table signature */
+ char signature[ACPI_NAMESEG_SIZE]; /* ASCII table signature */
u32 length; /* Length of table in bytes, including this header */
u8 revision; /* ACPI Specification minor version number */
u8 checksum; /* To make sum of entire table == 0 */
char oem_id[ACPI_OEM_ID_SIZE]; /* ASCII OEM identification */
char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; /* ASCII OEM table identification */
u32 oem_revision; /* OEM revision number */
- char asl_compiler_id[ACPI_NAME_SIZE]; /* ASCII ASL compiler vendor ID */
+ char asl_compiler_id[ACPI_NAMESEG_SIZE]; /* ASCII ASL compiler vendor ID */
u32 asl_compiler_revision; /* ASL compiler version */
};
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index f73382e82c26..ad6892a24015 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -375,7 +375,7 @@ typedef u64 acpi_physical_address;
/* Names within the namespace are 4 bytes long */
-#define ACPI_NAME_SIZE 4
+#define ACPI_NAMESEG_SIZE 4 /* Fixed by ACPI spec */
#define ACPI_PATH_SEGMENT_LENGTH 5 /* 4 chars for name + 1 char for separator */
#define ACPI_PATH_SEPARATOR '.'
@@ -515,11 +515,11 @@ typedef u64 acpi_integer;
/* Optimizations for 4-character (32-bit) acpi_name manipulation */
#ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED
-#define ACPI_COMPARE_NAME(a,b) (*ACPI_CAST_PTR (u32, (a)) == *ACPI_CAST_PTR (u32, (b)))
-#define ACPI_MOVE_NAME(dest,src) (*ACPI_CAST_PTR (u32, (dest)) = *ACPI_CAST_PTR (u32, (src)))
+#define ACPI_COMPARE_NAMESEG(a,b) (*ACPI_CAST_PTR (u32, (a)) == *ACPI_CAST_PTR (u32, (b)))
+#define ACPI_COPY_NAMESEG(dest,src) (*ACPI_CAST_PTR (u32, (dest)) = *ACPI_CAST_PTR (u32, (src)))
#else
-#define ACPI_COMPARE_NAME(a,b) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAME_SIZE))
-#define ACPI_MOVE_NAME(dest,src) (strncpy (ACPI_CAST_PTR (char, (dest)), ACPI_CAST_PTR (char, (src)), ACPI_NAME_SIZE))
+#define ACPI_COMPARE_NAMESEG(a,b) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAMESEG_SIZE))
+#define ACPI_COPY_NAMESEG(dest,src) (strncpy (ACPI_CAST_PTR (char, (dest)), ACPI_CAST_PTR (char, (src)), ACPI_NAMESEG_SIZE))
#endif
/* Support for the special RSDP signature (8 characters) */
@@ -529,7 +529,7 @@ typedef u64 acpi_integer;
/* Support for OEMx signature (x can be any character) */
#define ACPI_IS_OEM_SIG(a) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_OEM_NAME, 3) &&\
- strnlen (a, ACPI_NAME_SIZE) == ACPI_NAME_SIZE)
+ strnlen (a, ACPI_NAMESEG_SIZE) == ACPI_NAMESEG_SIZE)
/*
* Algorithm to obtain access bit width.
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 9ff328fd946a..624b90b34085 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -82,6 +82,11 @@
#define ACPI_NO_ERROR_MESSAGES
#undef ACPI_DEBUG_OUTPUT
+/* Use a specific bugging default separate from ACPICA */
+
+#undef ACPI_DEBUG_DEFAULT
+#define ACPI_DEBUG_DEFAULT (ACPI_LV_INFO | ACPI_LV_REPAIR)
+
/* External interface for __KERNEL__, stub is needed */
#define ACPI_EXTERNAL_RETURN_STATUS(prototype) \
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
new file mode 100644
index 000000000000..6f4536d70b8e
--- /dev/null
+++ b/include/asm-generic/Kbuild
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# asm headers that all architectures except um should have
+# (This file is not included when SRCARCH=um since UML borrows several
+# asm headers from the host architecutre.)
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index fcb61b4659b3..8666fe7f35d7 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -23,7 +23,9 @@
*
* Return:
* 0 - On success
- * <0 - On error
+ * -EFAULT - User access resulted in a page fault
+ * -EAGAIN - Atomic operation was unable to complete due to contention
+ * -ENOSYS - Operation not supported
*/
static inline int
arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
@@ -85,7 +87,9 @@ out_pagefault_enable:
*
* Return:
* 0 - On success
- * <0 - On error
+ * -EFAULT - User access resulted in a page fault
+ * -EAGAIN - Atomic operation was unable to complete due to contention
+ * -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG)
*/
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index d356f802945a..8f3bf95a36d1 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -19,12 +19,9 @@
#include <asm-generic/iomap.h>
#endif
+#include <asm/mmiowb.h>
#include <asm-generic/pci_iomap.h>
-#ifndef mmiowb
-#define mmiowb() do {} while (0)
-#endif
-
#ifndef __io_br
#define __io_br() barrier()
#endif
@@ -32,9 +29,9 @@
/* prevent prefetching of coherent DMA data ahead of a dma-complete */
#ifndef __io_ar
#ifdef rmb
-#define __io_ar() rmb()
+#define __io_ar(v) rmb()
#else
-#define __io_ar() barrier()
+#define __io_ar(v) barrier()
#endif
#endif
@@ -49,7 +46,7 @@
/* serialize device access against a spin_unlock, usually handled there. */
#ifndef __io_aw
-#define __io_aw() barrier()
+#define __io_aw() mmiowb_set_pending()
#endif
#ifndef __io_pbw
@@ -65,7 +62,7 @@
#endif
#ifndef __io_par
-#define __io_par() __io_ar()
+#define __io_par(v) __io_ar(v)
#endif
@@ -158,7 +155,7 @@ static inline u8 readb(const volatile void __iomem *addr)
__io_br();
val = __raw_readb(addr);
- __io_ar();
+ __io_ar(val);
return val;
}
#endif
@@ -171,7 +168,7 @@ static inline u16 readw(const volatile void __iomem *addr)
__io_br();
val = __le16_to_cpu(__raw_readw(addr));
- __io_ar();
+ __io_ar(val);
return val;
}
#endif
@@ -184,7 +181,7 @@ static inline u32 readl(const volatile void __iomem *addr)
__io_br();
val = __le32_to_cpu(__raw_readl(addr));
- __io_ar();
+ __io_ar(val);
return val;
}
#endif
@@ -198,7 +195,7 @@ static inline u64 readq(const volatile void __iomem *addr)
__io_br();
val = __le64_to_cpu(__raw_readq(addr));
- __io_ar();
+ __io_ar(val);
return val;
}
#endif
@@ -471,7 +468,7 @@ static inline u8 inb(unsigned long addr)
__io_pbr();
val = __raw_readb(PCI_IOBASE + addr);
- __io_par();
+ __io_par(val);
return val;
}
#endif
@@ -484,7 +481,7 @@ static inline u16 inw(unsigned long addr)
__io_pbr();
val = __le16_to_cpu(__raw_readw(PCI_IOBASE + addr));
- __io_par();
+ __io_par(val);
return val;
}
#endif
@@ -497,7 +494,7 @@ static inline u32 inl(unsigned long addr)
__io_pbr();
val = __le32_to_cpu(__raw_readl(PCI_IOBASE + addr));
- __io_par();
+ __io_par(val);
return val;
}
#endif
diff --git a/include/asm-generic/mmiowb.h b/include/asm-generic/mmiowb.h
new file mode 100644
index 000000000000..9439ff037b2d
--- /dev/null
+++ b/include/asm-generic/mmiowb.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_MMIOWB_H
+#define __ASM_GENERIC_MMIOWB_H
+
+/*
+ * Generic implementation of mmiowb() tracking for spinlocks.
+ *
+ * If your architecture doesn't ensure that writes to an I/O peripheral
+ * within two spinlocked sections on two different CPUs are seen by the
+ * peripheral in the order corresponding to the lock handover, then you
+ * need to follow these FIVE easy steps:
+ *
+ * 1. Implement mmiowb() (and arch_mmiowb_state() if you're fancy)
+ * in asm/mmiowb.h, then #include this file
+ * 2. Ensure your I/O write accessors call mmiowb_set_pending()
+ * 3. Select ARCH_HAS_MMIOWB
+ * 4. Untangle the resulting mess of header files
+ * 5. Complain to your architects
+ */
+#ifdef CONFIG_MMIOWB
+
+#include <linux/compiler.h>
+#include <asm-generic/mmiowb_types.h>
+
+#ifndef arch_mmiowb_state
+#include <asm/percpu.h>
+#include <asm/smp.h>
+
+DECLARE_PER_CPU(struct mmiowb_state, __mmiowb_state);
+#define __mmiowb_state() this_cpu_ptr(&__mmiowb_state)
+#else
+#define __mmiowb_state() arch_mmiowb_state()
+#endif /* arch_mmiowb_state */
+
+static inline void mmiowb_set_pending(void)
+{
+ struct mmiowb_state *ms = __mmiowb_state();
+ ms->mmiowb_pending = ms->nesting_count;
+}
+
+static inline void mmiowb_spin_lock(void)
+{
+ struct mmiowb_state *ms = __mmiowb_state();
+ ms->nesting_count++;
+}
+
+static inline void mmiowb_spin_unlock(void)
+{
+ struct mmiowb_state *ms = __mmiowb_state();
+
+ if (unlikely(ms->mmiowb_pending)) {
+ ms->mmiowb_pending = 0;
+ mmiowb();
+ }
+
+ ms->nesting_count--;
+}
+#else
+#define mmiowb_set_pending() do { } while (0)
+#define mmiowb_spin_lock() do { } while (0)
+#define mmiowb_spin_unlock() do { } while (0)
+#endif /* CONFIG_MMIOWB */
+#endif /* __ASM_GENERIC_MMIOWB_H */
diff --git a/include/asm-generic/mmiowb_types.h b/include/asm-generic/mmiowb_types.h
new file mode 100644
index 000000000000..8eb0095655e7
--- /dev/null
+++ b/include/asm-generic/mmiowb_types.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_MMIOWB_TYPES_H
+#define __ASM_GENERIC_MMIOWB_TYPES_H
+
+#include <linux/types.h>
+
+struct mmiowb_state {
+ u16 nesting_count;
+ u16 mmiowb_pending;
+};
+
+#endif /* __ASM_GENERIC_MMIOWB_TYPES_H */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index fa782fba51ee..75d9d68a6de7 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -1126,6 +1126,8 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
static inline void init_espfix_bsp(void) { }
#endif
+extern void __init pgd_cache_init(void);
+
#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
{
diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h
deleted file mode 100644
index 93e67a055a4d..000000000000
--- a/include/asm-generic/rwsem.h
+++ /dev/null
@@ -1,140 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_GENERIC_RWSEM_H
-#define _ASM_GENERIC_RWSEM_H
-
-#ifndef _LINUX_RWSEM_H
-#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
-#endif
-
-#ifdef __KERNEL__
-
-/*
- * R/W semaphores originally for PPC using the stuff in lib/rwsem.c.
- * Adapted largely from include/asm-i386/rwsem.h
- * by Paul Mackerras <paulus@samba.org>.
- */
-
-/*
- * the semaphore definition
- */
-#ifdef CONFIG_64BIT
-# define RWSEM_ACTIVE_MASK 0xffffffffL
-#else
-# define RWSEM_ACTIVE_MASK 0x0000ffffL
-#endif
-
-#define RWSEM_UNLOCKED_VALUE 0x00000000L
-#define RWSEM_ACTIVE_BIAS 0x00000001L
-#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
-#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-
-/*
- * lock for reading
- */
-static inline void __down_read(struct rw_semaphore *sem)
-{
- if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0))
- rwsem_down_read_failed(sem);
-}
-
-static inline int __down_read_killable(struct rw_semaphore *sem)
-{
- if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) {
- if (IS_ERR(rwsem_down_read_failed_killable(sem)))
- return -EINTR;
- }
-
- return 0;
-}
-
-static inline int __down_read_trylock(struct rw_semaphore *sem)
-{
- long tmp;
-
- while ((tmp = atomic_long_read(&sem->count)) >= 0) {
- if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp,
- tmp + RWSEM_ACTIVE_READ_BIAS)) {
- return 1;
- }
- }
- return 0;
-}
-
-/*
- * lock for writing
- */
-static inline void __down_write(struct rw_semaphore *sem)
-{
- long tmp;
-
- tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
- &sem->count);
- if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
- rwsem_down_write_failed(sem);
-}
-
-static inline int __down_write_killable(struct rw_semaphore *sem)
-{
- long tmp;
-
- tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
- &sem->count);
- if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
- if (IS_ERR(rwsem_down_write_failed_killable(sem)))
- return -EINTR;
- return 0;
-}
-
-static inline int __down_write_trylock(struct rw_semaphore *sem)
-{
- long tmp;
-
- tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
- RWSEM_ACTIVE_WRITE_BIAS);
- return tmp == RWSEM_UNLOCKED_VALUE;
-}
-
-/*
- * unlock after reading
- */
-static inline void __up_read(struct rw_semaphore *sem)
-{
- long tmp;
-
- tmp = atomic_long_dec_return_release(&sem->count);
- if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
- rwsem_wake(sem);
-}
-
-/*
- * unlock after writing
- */
-static inline void __up_write(struct rw_semaphore *sem)
-{
- if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS,
- &sem->count) < 0))
- rwsem_wake(sem);
-}
-
-/*
- * downgrade write lock to read lock
- */
-static inline void __downgrade_write(struct rw_semaphore *sem)
-{
- long tmp;
-
- /*
- * When downgrading from exclusive to shared ownership,
- * anything inside the write-locked region cannot leak
- * into the read side. In contrast, anything in the
- * read-locked region is ok to be re-ordered into the
- * write side. As such, rely on RELEASE semantics.
- */
- tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, &sem->count);
- if (tmp < 0)
- rwsem_downgrade_wake(sem);
-}
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_GENERIC_RWSEM_H */
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index d79abca81a52..d1779d442aa5 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -77,6 +77,20 @@ static inline int arch_is_kernel_data(unsigned long addr)
}
#endif
+/*
+ * Check if an address is part of freed initmem. This is needed on architectures
+ * with virt == phys kernel mapping, for code that wants to check if an address
+ * is part of a static object within [_stext, _end]. After initmem is freed,
+ * memory can be allocated from it, and such allocations would then have
+ * addresses within the range [_stext, _end].
+ */
+#ifndef arch_is_kernel_initmem_freed
+static inline int arch_is_kernel_initmem_freed(unsigned long addr)
+{
+ return 0;
+}
+#endif
+
/**
* memory_contains - checks if an object is contained within a memory region
* @begin: virtual address of the beginning of the memory region
diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h
index 0c938a4354f6..b88239e9efe4 100644
--- a/include/asm-generic/syscall.h
+++ b/include/asm-generic/syscall.h
@@ -105,41 +105,30 @@ void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
* syscall_get_arguments - extract system call parameter values
* @task: task of interest, must be blocked
* @regs: task_pt_regs() of @task
- * @i: argument index [0,5]
- * @n: number of arguments; n+i must be [1,6].
* @args: array filled with argument values
*
- * Fetches @n arguments to the system call starting with the @i'th argument
- * (from 0 through 5). Argument @i is stored in @args[0], and so on.
- * An arch inline version is probably optimal when @i and @n are constants.
+ * Fetches 6 arguments to the system call. First argument is stored in
+* @args[0], and so on.
*
* It's only valid to call this when @task is stopped for tracing on
* entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
- * It's invalid to call this with @i + @n > 6; we only support system calls
- * taking up to 6 arguments.
*/
void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
- unsigned int i, unsigned int n, unsigned long *args);
+ unsigned long *args);
/**
* syscall_set_arguments - change system call parameter value
* @task: task of interest, must be in system call entry tracing
* @regs: task_pt_regs() of @task
- * @i: argument index [0,5]
- * @n: number of arguments; n+i must be [1,6].
* @args: array of argument values to store
*
- * Changes @n arguments to the system call starting with the @i'th argument.
- * Argument @i gets value @args[0], and so on.
- * An arch inline version is probably optimal when @i and @n are constants.
+ * Changes 6 arguments to the system call.
+ * The first argument gets value @args[0], and so on.
*
* It's only valid to call this when @task is stopped for tracing on
* entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
- * It's invalid to call this with @i + @n > 6; we only support system calls
- * taking up to 6 arguments.
*/
void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
- unsigned int i, unsigned int n,
const unsigned long *args);
/**
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 6be86c1c5c58..480e5b2a5748 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -19,9 +19,140 @@
#include <linux/swap.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+
+/*
+ * Blindly accessing user memory from NMI context can be dangerous
+ * if we're in the middle of switching the current user task or switching
+ * the loaded mm.
+ */
+#ifndef nmi_uaccess_okay
+# define nmi_uaccess_okay() true
+#endif
#ifdef CONFIG_MMU
+/*
+ * Generic MMU-gather implementation.
+ *
+ * The mmu_gather data structure is used by the mm code to implement the
+ * correct and efficient ordering of freeing pages and TLB invalidations.
+ *
+ * This correct ordering is:
+ *
+ * 1) unhook page
+ * 2) TLB invalidate page
+ * 3) free page
+ *
+ * That is, we must never free a page before we have ensured there are no live
+ * translations left to it. Otherwise it might be possible to observe (or
+ * worse, change) the page content after it has been reused.
+ *
+ * The mmu_gather API consists of:
+ *
+ * - tlb_gather_mmu() / tlb_finish_mmu(); start and finish a mmu_gather
+ *
+ * Finish in particular will issue a (final) TLB invalidate and free
+ * all (remaining) queued pages.
+ *
+ * - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
+ *
+ * Defaults to flushing at tlb_end_vma() to reset the range; helps when
+ * there's large holes between the VMAs.
+ *
+ * - tlb_remove_page() / __tlb_remove_page()
+ * - tlb_remove_page_size() / __tlb_remove_page_size()
+ *
+ * __tlb_remove_page_size() is the basic primitive that queues a page for
+ * freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
+ * boolean indicating if the queue is (now) full and a call to
+ * tlb_flush_mmu() is required.
+ *
+ * tlb_remove_page() and tlb_remove_page_size() imply the call to
+ * tlb_flush_mmu() when required and has no return value.
+ *
+ * - tlb_change_page_size()
+ *
+ * call before __tlb_remove_page*() to set the current page-size; implies a
+ * possible tlb_flush_mmu() call.
+ *
+ * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
+ *
+ * tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
+ * related state, like the range)
+ *
+ * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
+ * whatever pages are still batched.
+ *
+ * - mmu_gather::fullmm
+ *
+ * A flag set by tlb_gather_mmu() to indicate we're going to free
+ * the entire mm; this allows a number of optimizations.
+ *
+ * - We can ignore tlb_{start,end}_vma(); because we don't
+ * care about ranges. Everything will be shot down.
+ *
+ * - (RISC) architectures that use ASIDs can cycle to a new ASID
+ * and delay the invalidation until ASID space runs out.
+ *
+ * - mmu_gather::need_flush_all
+ *
+ * A flag that can be set by the arch code if it wants to force
+ * flush the entire TLB irrespective of the range. For instance
+ * x86-PAE needs this when changing top-level entries.
+ *
+ * And allows the architecture to provide and implement tlb_flush():
+ *
+ * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make
+ * use of:
+ *
+ * - mmu_gather::start / mmu_gather::end
+ *
+ * which provides the range that needs to be flushed to cover the pages to
+ * be freed.
+ *
+ * - mmu_gather::freed_tables
+ *
+ * set when we freed page table pages
+ *
+ * - tlb_get_unmap_shift() / tlb_get_unmap_size()
+ *
+ * returns the smallest TLB entry size unmapped in this range.
+ *
+ * If an architecture does not provide tlb_flush() a default implementation
+ * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is
+ * specified, in which case we'll default to flush_tlb_mm().
+ *
+ * Additionally there are a few opt-in features:
+ *
+ * HAVE_MMU_GATHER_PAGE_SIZE
+ *
+ * This ensures we call tlb_flush() every time tlb_change_page_size() actually
+ * changes the size and provides mmu_gather::page_size to tlb_flush().
+ *
+ * HAVE_RCU_TABLE_FREE
+ *
+ * This provides tlb_remove_table(), to be used instead of tlb_remove_page()
+ * for page directores (__p*_free_tlb()). This provides separate freeing of
+ * the page-table pages themselves in a semi-RCU fashion (see comment below).
+ * Useful if your architecture doesn't use IPIs for remote TLB invalidates
+ * and therefore doesn't naturally serialize with software page-table walkers.
+ *
+ * When used, an architecture is expected to provide __tlb_remove_table()
+ * which does the actual freeing of these pages.
+ *
+ * HAVE_RCU_TABLE_NO_INVALIDATE
+ *
+ * This makes HAVE_RCU_TABLE_FREE avoid calling tlb_flush_mmu_tlbonly() before
+ * freeing the page-table pages. This can be avoided if you use
+ * HAVE_RCU_TABLE_FREE and your architecture does _NOT_ use the Linux
+ * page-tables natively.
+ *
+ * MMU_GATHER_NO_RANGE
+ *
+ * Use this if your architecture lacks an efficient flush_tlb_range().
+ */
+
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
/*
* Semi RCU freeing of the page directories.
@@ -60,11 +191,11 @@ struct mmu_table_batch {
#define MAX_TABLE_BATCH \
((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
-extern void tlb_table_flush(struct mmu_gather *tlb);
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
#endif
+#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
/*
* If we can't allocate a page to make a big batch of page pointers
* to work on, then just handle a few from the on-stack structure.
@@ -89,14 +220,21 @@ struct mmu_gather_batch {
*/
#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
-/* struct mmu_gather is an opaque type used by the mm code for passing around
+extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
+ int page_size);
+#endif
+
+/*
+ * struct mmu_gather is an opaque type used by the mm code for passing around
* any data needed by arch specific code for tlb_remove_page.
*/
struct mmu_gather {
struct mm_struct *mm;
+
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
struct mmu_table_batch *batch;
#endif
+
unsigned long start;
unsigned long end;
/*
@@ -124,23 +262,30 @@ struct mmu_gather {
unsigned int cleared_puds : 1;
unsigned int cleared_p4ds : 1;
+ /*
+ * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
+ */
+ unsigned int vma_exec : 1;
+ unsigned int vma_huge : 1;
+
+ unsigned int batch_count;
+
+#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
struct mmu_gather_batch *active;
struct mmu_gather_batch local;
struct page *__pages[MMU_GATHER_BUNDLE];
- unsigned int batch_count;
- int page_size;
-};
-#define HAVE_GENERIC_MMU_GATHER
+#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
+ unsigned int page_size;
+#endif
+#endif
+};
void arch_tlb_gather_mmu(struct mmu_gather *tlb,
struct mm_struct *mm, unsigned long start, unsigned long end);
void tlb_flush_mmu(struct mmu_gather *tlb);
void arch_tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end, bool force);
-void tlb_flush_mmu_free(struct mmu_gather *tlb);
-extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
- int page_size);
static inline void __tlb_adjust_range(struct mmu_gather *tlb,
unsigned long address,
@@ -163,8 +308,94 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
tlb->cleared_pmds = 0;
tlb->cleared_puds = 0;
tlb->cleared_p4ds = 0;
+ /*
+ * Do not reset mmu_gather::vma_* fields here, we do not
+ * call into tlb_start_vma() again to set them if there is an
+ * intermediate flush.
+ */
+}
+
+#ifdef CONFIG_MMU_GATHER_NO_RANGE
+
+#if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma)
+#error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma()
+#endif
+
+/*
+ * When an architecture does not have efficient means of range flushing TLBs
+ * there is no point in doing intermediate flushes on tlb_end_vma() to keep the
+ * range small. We equally don't have to worry about page granularity or other
+ * things.
+ *
+ * All we need to do is issue a full flush for any !0 range.
+ */
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+ if (tlb->end)
+ flush_tlb_mm(tlb->mm);
+}
+
+static inline void
+tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
+
+#define tlb_end_vma tlb_end_vma
+static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
+
+#else /* CONFIG_MMU_GATHER_NO_RANGE */
+
+#ifndef tlb_flush
+
+#if defined(tlb_start_vma) || defined(tlb_end_vma)
+#error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
+#endif
+
+/*
+ * When an architecture does not provide its own tlb_flush() implementation
+ * but does have a reasonably efficient flush_vma_range() implementation
+ * use that.
+ */
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+ if (tlb->fullmm || tlb->need_flush_all) {
+ flush_tlb_mm(tlb->mm);
+ } else if (tlb->end) {
+ struct vm_area_struct vma = {
+ .vm_mm = tlb->mm,
+ .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) |
+ (tlb->vma_huge ? VM_HUGETLB : 0),
+ };
+
+ flush_tlb_range(&vma, tlb->start, tlb->end);
+ }
+}
+
+static inline void
+tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+ /*
+ * flush_tlb_range() implementations that look at VM_HUGETLB (tile,
+ * mips-4k) flush only large pages.
+ *
+ * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
+ * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
+ * range.
+ *
+ * We rely on tlb_end_vma() to issue a flush, such that when we reset
+ * these values the batch is empty.
+ */
+ tlb->vma_huge = !!(vma->vm_flags & VM_HUGETLB);
+ tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
}
+#else
+
+static inline void
+tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
+
+#endif
+
+#endif /* CONFIG_MMU_GATHER_NO_RANGE */
+
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
if (!tlb->end)
@@ -196,21 +427,18 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
return tlb_remove_page_size(tlb, page, PAGE_SIZE);
}
-#ifndef tlb_remove_check_page_size_change
-#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
-static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
+static inline void tlb_change_page_size(struct mmu_gather *tlb,
unsigned int page_size)
{
- /*
- * We don't care about page size change, just update
- * mmu_gather page size here so that debug checks
- * doesn't throw false warning.
- */
-#ifdef CONFIG_DEBUG_VM
+#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
+ if (tlb->page_size && tlb->page_size != page_size) {
+ if (!tlb->fullmm)
+ tlb_flush_mmu(tlb);
+ }
+
tlb->page_size = page_size;
#endif
}
-#endif
static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
{
@@ -237,17 +465,30 @@ static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
* the vmas are adjusted to only cover the region to be torn down.
*/
#ifndef tlb_start_vma
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#endif
+static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+ if (tlb->fullmm)
+ return;
-#define __tlb_end_vma(tlb, vma) \
- do { \
- if (!tlb->fullmm) \
- tlb_flush_mmu_tlbonly(tlb); \
- } while (0)
+ tlb_update_vma_flags(tlb, vma);
+ flush_cache_range(vma, vma->vm_start, vma->vm_end);
+}
+#endif
#ifndef tlb_end_vma
-#define tlb_end_vma __tlb_end_vma
+static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+ if (tlb->fullmm)
+ return;
+
+ /*
+ * Do a TLB flush and reset the range at VMA boundaries; this avoids
+ * the ranges growing with the unused space between consecutive VMAs,
+ * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
+ * this.
+ */
+ tlb_flush_mmu_tlbonly(tlb);
+}
#endif
#ifndef __tlb_remove_tlb_entry
@@ -372,6 +613,4 @@ static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
#endif /* CONFIG_MMU */
-#define tlb_migrate_finish(mm) do {} while (0)
-
#endif /* _ASM_GENERIC__TLB_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 3d7a6a9c2370..f8f6f04c4453 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -733,7 +733,7 @@
KEEP(*(.orc_unwind_ip)) \
__stop_orc_unwind_ip = .; \
} \
- . = ALIGN(6); \
+ . = ALIGN(2); \
.orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \
__start_orc_unwind = .; \
KEEP(*(.orc_unwind)) \
diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h
index 349e5957c949..702967d996bb 100644
--- a/include/clocksource/arm_arch_timer.h
+++ b/include/clocksource/arm_arch_timer.h
@@ -74,6 +74,7 @@ enum arch_timer_spi_nr {
struct arch_timer_kvm_info {
struct timecounter timecounter;
int virtual_irq;
+ int physical_irq;
};
struct arch_timer_mem_frame {
diff --git a/include/crypto/aes.h b/include/crypto/aes.h
index 852eaa9cd4db..0fdb542c70cd 100644
--- a/include/crypto/aes.h
+++ b/include/crypto/aes.h
@@ -28,10 +28,10 @@ struct crypto_aes_ctx {
u32 key_length;
};
-extern const u32 crypto_ft_tab[4][256];
-extern const u32 crypto_fl_tab[4][256];
-extern const u32 crypto_it_tab[4][256];
-extern const u32 crypto_il_tab[4][256];
+extern const u32 crypto_ft_tab[4][256] ____cacheline_aligned;
+extern const u32 crypto_fl_tab[4][256] ____cacheline_aligned;
+extern const u32 crypto_it_tab[4][256] ____cacheline_aligned;
+extern const u32 crypto_il_tab[4][256] ____cacheline_aligned;
int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len);
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
index 2d690494568c..8884046659a0 100644
--- a/include/crypto/akcipher.h
+++ b/include/crypto/akcipher.h
@@ -19,14 +19,20 @@
*
* @base: Common attributes for async crypto requests
* @src: Source data
- * @dst: Destination data
+ * For verify op this is signature + digest, in that case
+ * total size of @src is @src_len + @dst_len.
+ * @dst: Destination data (Should be NULL for verify op)
* @src_len: Size of the input buffer
- * @dst_len: Size of the output buffer. It needs to be at least
- * as big as the expected result depending on the operation
+ * For verify op it's size of signature part of @src, this part
+ * is supposed to be operated by cipher.
+ * @dst_len: Size of @dst buffer (for all ops except verify).
+ * It needs to be at least as big as the expected result
+ * depending on the operation.
* After operation it will be updated with the actual size of the
* result.
* In case of error where the dst sgl size was insufficient,
* it will be updated to the size required for the operation.
+ * For verify op this is size of digest part in @src.
* @__ctx: Start of private context data
*/
struct akcipher_request {
@@ -55,10 +61,9 @@ struct crypto_akcipher {
* algorithm. In case of error, where the dst_len was insufficient,
* the req->dst_len will be updated to the size required for the
* operation
- * @verify: Function performs a sign operation as defined by public key
- * algorithm. In case of error, where the dst_len was insufficient,
- * the req->dst_len will be updated to the size required for the
- * operation
+ * @verify: Function performs a complete verify operation as defined by
+ * public key algorithm, returning verification status. Requires
+ * digest value as input parameter.
* @encrypt: Function performs an encrypt operation as defined by public key
* algorithm. In case of error, where the dst_len was insufficient,
* the req->dst_len will be updated to the size required for the
@@ -69,10 +74,10 @@ struct crypto_akcipher {
* operation
* @set_pub_key: Function invokes the algorithm specific set public key
* function, which knows how to decode and interpret
- * the BER encoded public key
+ * the BER encoded public key and parameters
* @set_priv_key: Function invokes the algorithm specific set private key
* function, which knows how to decode and interpret
- * the BER encoded private key
+ * the BER encoded private key and parameters
* @max_size: Function returns dest buffer size required for a given key.
* @init: Initialize the cryptographic transformation object.
* This function is used to initialize the cryptographic
@@ -238,9 +243,10 @@ static inline void akcipher_request_set_callback(struct akcipher_request *req,
*
* @req: public key request
* @src: ptr to input scatter list
- * @dst: ptr to output scatter list
+ * @dst: ptr to output scatter list or NULL for verify op
* @src_len: size of the src input scatter list to be processed
- * @dst_len: size of the dst output scatter list
+ * @dst_len: size of the dst output scatter list or size of signature
+ * portion in @src for verify op
*/
static inline void akcipher_request_set_crypt(struct akcipher_request *req,
struct scatterlist *src,
@@ -343,14 +349,18 @@ static inline int crypto_akcipher_sign(struct akcipher_request *req)
}
/**
- * crypto_akcipher_verify() - Invoke public key verify operation
+ * crypto_akcipher_verify() - Invoke public key signature verification
*
- * Function invokes the specific public key verify operation for a given
- * public key algorithm
+ * Function invokes the specific public key signature verification operation
+ * for a given public key algorithm.
*
* @req: asymmetric key request
*
- * Return: zero on success; error code in case of error
+ * Note: req->dst should be NULL, req->src should point to SG of size
+ * (req->src_size + req->dst_size), containing signature (of req->src_size
+ * length) with appended digest (of req->dst_size length).
+ *
+ * Return: zero on verification success; error code in case of error.
*/
static inline int crypto_akcipher_verify(struct akcipher_request *req)
{
@@ -369,11 +379,12 @@ static inline int crypto_akcipher_verify(struct akcipher_request *req)
* crypto_akcipher_set_pub_key() - Invoke set public key operation
*
* Function invokes the algorithm specific set key function, which knows
- * how to decode and interpret the encoded key
+ * how to decode and interpret the encoded key and parameters
*
* @tfm: tfm handle
- * @key: BER encoded public key
- * @keylen: length of the key
+ * @key: BER encoded public key, algo OID, paramlen, BER encoded
+ * parameters
+ * @keylen: length of the key (not including other data)
*
* Return: zero on success; error code in case of error
*/
@@ -390,11 +401,12 @@ static inline int crypto_akcipher_set_pub_key(struct crypto_akcipher *tfm,
* crypto_akcipher_set_priv_key() - Invoke set private key operation
*
* Function invokes the algorithm specific set key function, which knows
- * how to decode and interpret the encoded key
+ * how to decode and interpret the encoded key and parameters
*
* @tfm: tfm handle
- * @key: BER encoded private key
- * @keylen: length of the key
+ * @key: BER encoded private key, algo OID, paramlen, BER encoded
+ * parameters
+ * @keylen: length of the key (not including other data)
*
* Return: zero on success; error code in case of error
*/
diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h
index 1e64f354c2b8..23169f4d87e6 100644
--- a/include/crypto/cryptd.h
+++ b/include/crypto/cryptd.h
@@ -18,27 +18,11 @@
#include <crypto/hash.h>
#include <crypto/skcipher.h>
-struct cryptd_ablkcipher {
- struct crypto_ablkcipher base;
-};
-
-static inline struct cryptd_ablkcipher *__cryptd_ablkcipher_cast(
- struct crypto_ablkcipher *tfm)
-{
- return (struct cryptd_ablkcipher *)tfm;
-}
-
-/* alg_name should be algorithm to be cryptd-ed */
-struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
- u32 type, u32 mask);
-struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm);
-bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm);
-void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm);
-
struct cryptd_skcipher {
struct crypto_skcipher base;
};
+/* alg_name should be algorithm to be cryptd-ed */
struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
u32 type, u32 mask);
struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm);
diff --git a/include/crypto/des.h b/include/crypto/des.h
index d4094d58ac54..72c7c8e5a5a7 100644
--- a/include/crypto/des.h
+++ b/include/crypto/des.h
@@ -6,6 +6,11 @@
#ifndef __CRYPTO_DES_H
#define __CRYPTO_DES_H
+#include <crypto/skcipher.h>
+#include <linux/compiler.h>
+#include <linux/fips.h>
+#include <linux/string.h>
+
#define DES_KEY_SIZE 8
#define DES_EXPKEY_WORDS 32
#define DES_BLOCK_SIZE 8
@@ -14,6 +19,44 @@
#define DES3_EDE_EXPKEY_WORDS (3 * DES_EXPKEY_WORDS)
#define DES3_EDE_BLOCK_SIZE DES_BLOCK_SIZE
+static inline int __des3_verify_key(u32 *flags, const u8 *key)
+{
+ int err = -EINVAL;
+ u32 K[6];
+
+ memcpy(K, key, DES3_EDE_KEY_SIZE);
+
+ if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
+ !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
+ (fips_enabled ||
+ (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)))
+ goto bad;
+
+ if (unlikely(!((K[0] ^ K[4]) | (K[1] ^ K[5]))) && fips_enabled)
+ goto bad;
+
+ err = 0;
+
+out:
+ memzero_explicit(K, DES3_EDE_KEY_SIZE);
+
+ return err;
+
+bad:
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ goto out;
+}
+
+static inline int des3_verify_key(struct crypto_skcipher *tfm, const u8 *key)
+{
+ u32 flags;
+ int err;
+
+ flags = crypto_skcipher_get_flags(tfm);
+ err = __des3_verify_key(&flags, key);
+ crypto_skcipher_set_flags(tfm, flags);
+ return err;
+}
extern unsigned long des_ekey(u32 *pe, const u8 *k);
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 3b31c1b349ae..d21bea2c4382 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -146,8 +146,6 @@ struct ahash_alg {
struct shash_desc {
struct crypto_shash *tfm;
- u32 flags;
-
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
@@ -819,6 +817,7 @@ static inline void *shash_desc_ctx(struct shash_desc *desc)
* cipher handle must point to a keyed message digest cipher in order for this
* function to succeed.
*
+ * Context: Any context.
* Return: 0 if the setting of the key was successful; < 0 if an error occurred
*/
int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
@@ -835,6 +834,7 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
* crypto_shash_update and crypto_shash_final. The parameters have the same
* meaning as discussed for those separate three functions.
*
+ * Context: Any context.
* Return: 0 if the message digest creation was successful; < 0 if an error
* occurred
*/
@@ -850,6 +850,7 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
* caller-allocated output buffer out which must have sufficient size (e.g. by
* calling crypto_shash_descsize).
*
+ * Context: Any context.
* Return: 0 if the export creation was successful; < 0 if an error occurred
*/
static inline int crypto_shash_export(struct shash_desc *desc, void *out)
@@ -866,6 +867,7 @@ static inline int crypto_shash_export(struct shash_desc *desc, void *out)
* the input buffer. That buffer should have been generated with the
* crypto_ahash_export function.
*
+ * Context: Any context.
* Return: 0 if the import was successful; < 0 if an error occurred
*/
static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
@@ -886,6 +888,7 @@ static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
* operational state handle. Any potentially existing state created by
* previous operations is discarded.
*
+ * Context: Any context.
* Return: 0 if the message digest initialization was successful; < 0 if an
* error occurred
*/
@@ -907,6 +910,7 @@ static inline int crypto_shash_init(struct shash_desc *desc)
*
* Updates the message digest state of the operational state handle.
*
+ * Context: Any context.
* Return: 0 if the message digest update was successful; < 0 if an error
* occurred
*/
@@ -923,6 +927,7 @@ int crypto_shash_update(struct shash_desc *desc, const u8 *data,
* into the output buffer. The caller must ensure that the output buffer is
* large enough by using crypto_shash_digestsize.
*
+ * Context: Any context.
* Return: 0 if the message digest creation was successful; < 0 if an error
* occurred
*/
@@ -939,6 +944,7 @@ int crypto_shash_final(struct shash_desc *desc, u8 *out);
* crypto_shash_update and crypto_shash_final. The parameters have the same
* meaning as discussed for those separate functions.
*
+ * Context: Any context.
* Return: 0 if the message digest creation was successful; < 0 if an error
* occurred
*/
diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h
index f18344518e32..d2316242a988 100644
--- a/include/crypto/internal/simd.h
+++ b/include/crypto/internal/simd.h
@@ -6,6 +6,11 @@
#ifndef _CRYPTO_INTERNAL_SIMD_H
#define _CRYPTO_INTERNAL_SIMD_H
+#include <linux/percpu.h>
+#include <linux/types.h>
+
+/* skcipher support */
+
struct simd_skcipher_alg;
struct skcipher_alg;
@@ -22,4 +27,43 @@ int simd_register_skciphers_compat(struct skcipher_alg *algs, int count,
void simd_unregister_skciphers(struct skcipher_alg *algs, int count,
struct simd_skcipher_alg **simd_algs);
+/* AEAD support */
+
+struct simd_aead_alg;
+struct aead_alg;
+
+struct simd_aead_alg *simd_aead_create_compat(const char *algname,
+ const char *drvname,
+ const char *basename);
+struct simd_aead_alg *simd_aead_create(const char *algname,
+ const char *basename);
+void simd_aead_free(struct simd_aead_alg *alg);
+
+int simd_register_aeads_compat(struct aead_alg *algs, int count,
+ struct simd_aead_alg **simd_algs);
+
+void simd_unregister_aeads(struct aead_alg *algs, int count,
+ struct simd_aead_alg **simd_algs);
+
+/*
+ * crypto_simd_usable() - is it allowed at this time to use SIMD instructions or
+ * access the SIMD register file?
+ *
+ * This delegates to may_use_simd(), except that this also returns false if SIMD
+ * in crypto code has been temporarily disabled on this CPU by the crypto
+ * self-tests, in order to test the no-SIMD fallback code. This override is
+ * currently limited to configurations where the extra self-tests are enabled,
+ * because it might be a bit too invasive to be part of the regular self-tests.
+ *
+ * This is a macro so that <asm/simd.h>, which some architectures don't have,
+ * doesn't have to be included directly here.
+ */
+#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+DECLARE_PER_CPU(bool, crypto_simd_disabled_for_test);
+#define crypto_simd_usable() \
+ (may_use_simd() && !this_cpu_read(crypto_simd_disabled_for_test))
+#else
+#define crypto_simd_usable() may_use_simd()
+#endif
+
#endif /* _CRYPTO_INTERNAL_SIMD_H */
diff --git a/include/crypto/morus1280_glue.h b/include/crypto/morus1280_glue.h
index ad2aa743dd99..5cefddb1991f 100644
--- a/include/crypto/morus1280_glue.h
+++ b/include/crypto/morus1280_glue.h
@@ -47,16 +47,7 @@ int crypto_morus1280_glue_setauthsize(struct crypto_aead *tfm,
int crypto_morus1280_glue_encrypt(struct aead_request *req);
int crypto_morus1280_glue_decrypt(struct aead_request *req);
-int cryptd_morus1280_glue_setkey(struct crypto_aead *aead, const u8 *key,
- unsigned int keylen);
-int cryptd_morus1280_glue_setauthsize(struct crypto_aead *aead,
- unsigned int authsize);
-int cryptd_morus1280_glue_encrypt(struct aead_request *req);
-int cryptd_morus1280_glue_decrypt(struct aead_request *req);
-int cryptd_morus1280_glue_init_tfm(struct crypto_aead *aead);
-void cryptd_morus1280_glue_exit_tfm(struct crypto_aead *aead);
-
-#define MORUS1280_DECLARE_ALGS(id, driver_name, priority) \
+#define MORUS1280_DECLARE_ALG(id, driver_name, priority) \
static const struct morus1280_glue_ops crypto_morus1280_##id##_ops = {\
.init = crypto_morus1280_##id##_init, \
.ad = crypto_morus1280_##id##_ad, \
@@ -77,55 +68,29 @@ void cryptd_morus1280_glue_exit_tfm(struct crypto_aead *aead);
{ \
} \
\
- static struct aead_alg crypto_morus1280_##id##_algs[] = {\
- { \
- .setkey = crypto_morus1280_glue_setkey, \
- .setauthsize = crypto_morus1280_glue_setauthsize, \
- .encrypt = crypto_morus1280_glue_encrypt, \
- .decrypt = crypto_morus1280_glue_decrypt, \
- .init = crypto_morus1280_##id##_init_tfm, \
- .exit = crypto_morus1280_##id##_exit_tfm, \
- \
- .ivsize = MORUS_NONCE_SIZE, \
- .maxauthsize = MORUS_MAX_AUTH_SIZE, \
- .chunksize = MORUS1280_BLOCK_SIZE, \
- \
- .base = { \
- .cra_flags = CRYPTO_ALG_INTERNAL, \
- .cra_blocksize = 1, \
- .cra_ctxsize = sizeof(struct morus1280_ctx), \
- .cra_alignmask = 0, \
- \
- .cra_name = "__morus1280", \
- .cra_driver_name = "__"driver_name, \
- \
- .cra_module = THIS_MODULE, \
- } \
- }, { \
- .setkey = cryptd_morus1280_glue_setkey, \
- .setauthsize = cryptd_morus1280_glue_setauthsize, \
- .encrypt = cryptd_morus1280_glue_encrypt, \
- .decrypt = cryptd_morus1280_glue_decrypt, \
- .init = cryptd_morus1280_glue_init_tfm, \
- .exit = cryptd_morus1280_glue_exit_tfm, \
+ static struct aead_alg crypto_morus1280_##id##_alg = { \
+ .setkey = crypto_morus1280_glue_setkey, \
+ .setauthsize = crypto_morus1280_glue_setauthsize, \
+ .encrypt = crypto_morus1280_glue_encrypt, \
+ .decrypt = crypto_morus1280_glue_decrypt, \
+ .init = crypto_morus1280_##id##_init_tfm, \
+ .exit = crypto_morus1280_##id##_exit_tfm, \
+ \
+ .ivsize = MORUS_NONCE_SIZE, \
+ .maxauthsize = MORUS_MAX_AUTH_SIZE, \
+ .chunksize = MORUS1280_BLOCK_SIZE, \
+ \
+ .base = { \
+ .cra_flags = CRYPTO_ALG_INTERNAL, \
+ .cra_blocksize = 1, \
+ .cra_ctxsize = sizeof(struct morus1280_ctx), \
+ .cra_alignmask = 0, \
+ .cra_priority = priority, \
\
- .ivsize = MORUS_NONCE_SIZE, \
- .maxauthsize = MORUS_MAX_AUTH_SIZE, \
- .chunksize = MORUS1280_BLOCK_SIZE, \
+ .cra_name = "__morus1280", \
+ .cra_driver_name = "__"driver_name, \
\
- .base = { \
- .cra_flags = CRYPTO_ALG_ASYNC, \
- .cra_blocksize = 1, \
- .cra_ctxsize = sizeof(struct crypto_aead *), \
- .cra_alignmask = 0, \
- \
- .cra_priority = priority, \
- \
- .cra_name = "morus1280", \
- .cra_driver_name = driver_name, \
- \
- .cra_module = THIS_MODULE, \
- } \
+ .cra_module = THIS_MODULE, \
} \
}
diff --git a/include/crypto/morus640_glue.h b/include/crypto/morus640_glue.h
index df8e1103ff94..0ee6266cb26c 100644
--- a/include/crypto/morus640_glue.h
+++ b/include/crypto/morus640_glue.h
@@ -47,16 +47,7 @@ int crypto_morus640_glue_setauthsize(struct crypto_aead *tfm,
int crypto_morus640_glue_encrypt(struct aead_request *req);
int crypto_morus640_glue_decrypt(struct aead_request *req);
-int cryptd_morus640_glue_setkey(struct crypto_aead *aead, const u8 *key,
- unsigned int keylen);
-int cryptd_morus640_glue_setauthsize(struct crypto_aead *aead,
- unsigned int authsize);
-int cryptd_morus640_glue_encrypt(struct aead_request *req);
-int cryptd_morus640_glue_decrypt(struct aead_request *req);
-int cryptd_morus640_glue_init_tfm(struct crypto_aead *aead);
-void cryptd_morus640_glue_exit_tfm(struct crypto_aead *aead);
-
-#define MORUS640_DECLARE_ALGS(id, driver_name, priority) \
+#define MORUS640_DECLARE_ALG(id, driver_name, priority) \
static const struct morus640_glue_ops crypto_morus640_##id##_ops = {\
.init = crypto_morus640_##id##_init, \
.ad = crypto_morus640_##id##_ad, \
@@ -77,55 +68,29 @@ void cryptd_morus640_glue_exit_tfm(struct crypto_aead *aead);
{ \
} \
\
- static struct aead_alg crypto_morus640_##id##_algs[] = {\
- { \
- .setkey = crypto_morus640_glue_setkey, \
- .setauthsize = crypto_morus640_glue_setauthsize, \
- .encrypt = crypto_morus640_glue_encrypt, \
- .decrypt = crypto_morus640_glue_decrypt, \
- .init = crypto_morus640_##id##_init_tfm, \
- .exit = crypto_morus640_##id##_exit_tfm, \
- \
- .ivsize = MORUS_NONCE_SIZE, \
- .maxauthsize = MORUS_MAX_AUTH_SIZE, \
- .chunksize = MORUS640_BLOCK_SIZE, \
- \
- .base = { \
- .cra_flags = CRYPTO_ALG_INTERNAL, \
- .cra_blocksize = 1, \
- .cra_ctxsize = sizeof(struct morus640_ctx), \
- .cra_alignmask = 0, \
- \
- .cra_name = "__morus640", \
- .cra_driver_name = "__"driver_name, \
- \
- .cra_module = THIS_MODULE, \
- } \
- }, { \
- .setkey = cryptd_morus640_glue_setkey, \
- .setauthsize = cryptd_morus640_glue_setauthsize, \
- .encrypt = cryptd_morus640_glue_encrypt, \
- .decrypt = cryptd_morus640_glue_decrypt, \
- .init = cryptd_morus640_glue_init_tfm, \
- .exit = cryptd_morus640_glue_exit_tfm, \
+ static struct aead_alg crypto_morus640_##id##_alg = {\
+ .setkey = crypto_morus640_glue_setkey, \
+ .setauthsize = crypto_morus640_glue_setauthsize, \
+ .encrypt = crypto_morus640_glue_encrypt, \
+ .decrypt = crypto_morus640_glue_decrypt, \
+ .init = crypto_morus640_##id##_init_tfm, \
+ .exit = crypto_morus640_##id##_exit_tfm, \
+ \
+ .ivsize = MORUS_NONCE_SIZE, \
+ .maxauthsize = MORUS_MAX_AUTH_SIZE, \
+ .chunksize = MORUS640_BLOCK_SIZE, \
+ \
+ .base = { \
+ .cra_flags = CRYPTO_ALG_INTERNAL, \
+ .cra_blocksize = 1, \
+ .cra_ctxsize = sizeof(struct morus640_ctx), \
+ .cra_alignmask = 0, \
+ .cra_priority = priority, \
\
- .ivsize = MORUS_NONCE_SIZE, \
- .maxauthsize = MORUS_MAX_AUTH_SIZE, \
- .chunksize = MORUS640_BLOCK_SIZE, \
+ .cra_name = "__morus640", \
+ .cra_driver_name = "__"driver_name, \
\
- .base = { \
- .cra_flags = CRYPTO_ALG_ASYNC, \
- .cra_blocksize = 1, \
- .cra_ctxsize = sizeof(struct crypto_aead *), \
- .cra_alignmask = 0, \
- \
- .cra_priority = priority, \
- \
- .cra_name = "morus640", \
- .cra_driver_name = driver_name, \
- \
- .cra_module = THIS_MODULE, \
- } \
+ .cra_module = THIS_MODULE, \
} \
}
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index be626eac9113..712fe1214b5f 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -15,6 +15,7 @@
#define _LINUX_PUBLIC_KEY_H
#include <linux/keyctl.h>
+#include <linux/oid_registry.h>
/*
* Cryptographic data for the public-key subtype of the asymmetric key type.
@@ -25,6 +26,9 @@
struct public_key {
void *key;
u32 keylen;
+ enum OID algo;
+ void *params;
+ u32 paramlen;
bool key_is_private;
const char *id_type;
const char *pkey_algo;
diff --git a/include/crypto/streebog.h b/include/crypto/streebog.h
index 856e32af8657..cae1b4a01971 100644
--- a/include/crypto/streebog.h
+++ b/include/crypto/streebog.h
@@ -23,7 +23,10 @@ struct streebog_uint512 {
};
struct streebog_state {
- u8 buffer[STREEBOG_BLOCK_SIZE];
+ union {
+ u8 buffer[STREEBOG_BLOCK_SIZE];
+ struct streebog_uint512 m;
+ };
struct streebog_uint512 hash;
struct streebog_uint512 h;
struct streebog_uint512 N;
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index bb9acea61369..286d58efed5d 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -36,6 +36,7 @@ struct drm_fb_helper;
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <linux/kgdb.h>
+#include <linux/vgaarb.h>
enum mode_set_atomic {
LEAVE_ATOMIC_MODE_SET,
@@ -642,11 +643,18 @@ drm_fb_helper_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
int resource_id,
const char *name)
{
+ int ret = 0;
+
+ /*
+ * WARNING: Apparently we must kick fbdev drivers before vgacon,
+ * otherwise the vga fbdev driver falls over.
+ */
#if IS_REACHABLE(CONFIG_FB)
- return remove_conflicting_pci_framebuffers(pdev, resource_id, name);
-#else
- return 0;
+ ret = remove_conflicting_pci_framebuffers(pdev, resource_id, name);
#endif
+ if (ret == 0)
+ ret = vga_remove_vgacon(pdev);
+ return ret;
}
#endif
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index cfb7be40bed7..ce4de6b1e444 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -418,6 +418,8 @@ struct drm_crtc_helper_funcs {
* Drivers can use the @old_crtc_state input parameter if the operations
* needed to enable the CRTC don't depend solely on the new state but
* also on the transition between the old state and the new state.
+ *
+ * This function is optional.
*/
void (*atomic_enable)(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state);
@@ -441,6 +443,8 @@ struct drm_crtc_helper_funcs {
* parameter @old_crtc_state which could be used to access the old
* state. Atomic drivers should consider to use this one instead
* of @disable.
+ *
+ * This function is optional.
*/
void (*atomic_disable)(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state);
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index cbf3180cb612..668ad971cd7b 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -420,7 +420,6 @@ extern struct ttm_bo_global {
/**
* Protected by ttm_global_mutex.
*/
- unsigned int use_count;
struct list_head device_list;
/**
diff --git a/include/dt-bindings/clock/actions,s500-cmu.h b/include/dt-bindings/clock/actions,s500-cmu.h
new file mode 100644
index 000000000000..030981cd2d56
--- /dev/null
+++ b/include/dt-bindings/clock/actions,s500-cmu.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Device Tree binding constants for Actions Semi S500 Clock Management Unit
+ *
+ * Copyright (c) 2014 Actions Semi Inc.
+ * Copyright (c) 2018 LSI-TEC - Caninos Loucos
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_S500_CMU_H
+#define __DT_BINDINGS_CLOCK_S500_CMU_H
+
+#define CLK_NONE 0
+
+/* fixed rate clocks */
+#define CLK_LOSC 1
+#define CLK_HOSC 2
+
+/* pll clocks */
+#define CLK_CORE_PLL 3
+#define CLK_DEV_PLL 4
+#define CLK_DDR_PLL 5
+#define CLK_NAND_PLL 6
+#define CLK_DISPLAY_PLL 7
+#define CLK_ETHERNET_PLL 8
+#define CLK_AUDIO_PLL 9
+
+/* system clock */
+#define CLK_DEV 10
+#define CLK_H 11
+#define CLK_AHBPREDIV 12
+#define CLK_AHB 13
+#define CLK_DE 14
+#define CLK_BISP 15
+#define CLK_VCE 16
+#define CLK_VDE 17
+
+/* peripheral device clock */
+#define CLK_TIMER 18
+#define CLK_I2C0 19
+#define CLK_I2C1 20
+#define CLK_I2C2 21
+#define CLK_I2C3 22
+#define CLK_PWM0 23
+#define CLK_PWM1 24
+#define CLK_PWM2 25
+#define CLK_PWM3 26
+#define CLK_PWM4 27
+#define CLK_PWM5 28
+#define CLK_SD0 29
+#define CLK_SD1 30
+#define CLK_SD2 31
+#define CLK_SENSOR0 32
+#define CLK_SENSOR1 33
+#define CLK_SPI0 34
+#define CLK_SPI1 35
+#define CLK_SPI2 36
+#define CLK_SPI3 37
+#define CLK_UART0 38
+#define CLK_UART1 39
+#define CLK_UART2 40
+#define CLK_UART3 41
+#define CLK_UART4 42
+#define CLK_UART5 43
+#define CLK_UART6 44
+#define CLK_DE1 45
+#define CLK_DE2 46
+#define CLK_I2SRX 47
+#define CLK_I2STX 48
+#define CLK_HDMI_AUDIO 49
+#define CLK_HDMI 50
+#define CLK_SPDIF 51
+#define CLK_NAND 52
+#define CLK_ECC 53
+#define CLK_RMII_REF 54
+
+#define CLK_NR_CLKS (CLK_RMII_REF + 1)
+
+#endif /* __DT_BINDINGS_CLOCK_S500_CMU_H */
diff --git a/include/dt-bindings/clock/axg-aoclkc.h b/include/dt-bindings/clock/axg-aoclkc.h
index 61955016a55b..8ec4a269c7a6 100644
--- a/include/dt-bindings/clock/axg-aoclkc.h
+++ b/include/dt-bindings/clock/axg-aoclkc.h
@@ -21,6 +21,11 @@
#define CLKID_AO_SAR_ADC_SEL 8
#define CLKID_AO_SAR_ADC_DIV 9
#define CLKID_AO_SAR_ADC_CLK 10
-#define CLKID_AO_ALT_XTAL 11
+#define CLKID_AO_CTS_OSCIN 11
+#define CLKID_AO_32K_PRE 12
+#define CLKID_AO_32K_DIV 13
+#define CLKID_AO_32K_SEL 14
+#define CLKID_AO_32K 15
+#define CLKID_AO_CTS_RTC_OSCIN 16
#endif
diff --git a/include/dt-bindings/clock/exynos5433.h b/include/dt-bindings/clock/exynos5433.h
index 98bd85ce1e45..25ffa53573a5 100644
--- a/include/dt-bindings/clock/exynos5433.h
+++ b/include/dt-bindings/clock/exynos5433.h
@@ -156,7 +156,7 @@
#define CLK_ACLK_G2D_266 220
#define CLK_ACLK_G2D_400 221
#define CLK_ACLK_G3D_400 222
-#define CLK_ACLK_IMEM_SSX_266 223
+#define CLK_ACLK_IMEM_SSSX_266 223
#define CLK_ACLK_BUS0_400 224
#define CLK_ACLK_BUS1_400 225
#define CLK_ACLK_IMEM_200 226
@@ -1406,4 +1406,10 @@
#define CAM1_NR_CLK 113
+/* CMU_IMEM */
+#define CLK_ACLK_SLIMSSS 2
+#define CLK_PCLK_SLIMSSS 35
+
+#define IMEM_NR_CLK 36
+
#endif /* _DT_BINDINGS_CLOCK_EXYNOS5433_H */
diff --git a/include/dt-bindings/clock/g12a-aoclkc.h b/include/dt-bindings/clock/g12a-aoclkc.h
new file mode 100644
index 000000000000..8db01ffbeb06
--- /dev/null
+++ b/include/dt-bindings/clock/g12a-aoclkc.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * Copyright (c) 2018 Amlogic, inc.
+ * Author: Qiufang Dai <qiufang.dai@amlogic.com>
+ */
+
+#ifndef DT_BINDINGS_CLOCK_AMLOGIC_MESON_G12A_AOCLK
+#define DT_BINDINGS_CLOCK_AMLOGIC_MESON_G12A_AOCLK
+
+#define CLKID_AO_AHB 0
+#define CLKID_AO_IR_IN 1
+#define CLKID_AO_I2C_M0 2
+#define CLKID_AO_I2C_S0 3
+#define CLKID_AO_UART 4
+#define CLKID_AO_PROD_I2C 5
+#define CLKID_AO_UART2 6
+#define CLKID_AO_IR_OUT 7
+#define CLKID_AO_SAR_ADC 8
+#define CLKID_AO_MAILBOX 9
+#define CLKID_AO_M3 10
+#define CLKID_AO_AHB_SRAM 11
+#define CLKID_AO_RTI 12
+#define CLKID_AO_M4_FCLK 13
+#define CLKID_AO_M4_HCLK 14
+#define CLKID_AO_CLK81 15
+#define CLKID_AO_SAR_ADC_CLK 18
+#define CLKID_AO_32K 23
+#define CLKID_AO_CEC 27
+#define CLKID_AO_CTS_RTC_OSCIN 28
+
+#endif
diff --git a/include/dt-bindings/clock/g12a-clkc.h b/include/dt-bindings/clock/g12a-clkc.h
new file mode 100644
index 000000000000..83b657038d1e
--- /dev/null
+++ b/include/dt-bindings/clock/g12a-clkc.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR MIT */
+/*
+ * Meson-G12A clock tree IDs
+ *
+ * Copyright (c) 2018 Amlogic, Inc. All rights reserved.
+ */
+
+#ifndef __G12A_CLKC_H
+#define __G12A_CLKC_H
+
+#define CLKID_SYS_PLL 0
+#define CLKID_FIXED_PLL 1
+#define CLKID_FCLK_DIV2 2
+#define CLKID_FCLK_DIV3 3
+#define CLKID_FCLK_DIV4 4
+#define CLKID_FCLK_DIV5 5
+#define CLKID_FCLK_DIV7 6
+#define CLKID_GP0_PLL 7
+#define CLKID_CLK81 10
+#define CLKID_MPLL0 11
+#define CLKID_MPLL1 12
+#define CLKID_MPLL2 13
+#define CLKID_MPLL3 14
+#define CLKID_DDR 15
+#define CLKID_DOS 16
+#define CLKID_AUDIO_LOCKER 17
+#define CLKID_MIPI_DSI_HOST 18
+#define CLKID_ETH_PHY 19
+#define CLKID_ISA 20
+#define CLKID_PL301 21
+#define CLKID_PERIPHS 22
+#define CLKID_SPICC0 23
+#define CLKID_I2C 24
+#define CLKID_SANA 25
+#define CLKID_SD 26
+#define CLKID_RNG0 27
+#define CLKID_UART0 28
+#define CLKID_SPICC1 29
+#define CLKID_HIU_IFACE 30
+#define CLKID_MIPI_DSI_PHY 31
+#define CLKID_ASSIST_MISC 32
+#define CLKID_SD_EMMC_A 33
+#define CLKID_SD_EMMC_B 34
+#define CLKID_SD_EMMC_C 35
+#define CLKID_AUDIO_CODEC 36
+#define CLKID_AUDIO 37
+#define CLKID_ETH 38
+#define CLKID_DEMUX 39
+#define CLKID_AUDIO_IFIFO 40
+#define CLKID_ADC 41
+#define CLKID_UART1 42
+#define CLKID_G2D 43
+#define CLKID_RESET 44
+#define CLKID_PCIE_COMB 45
+#define CLKID_PARSER 46
+#define CLKID_USB 47
+#define CLKID_PCIE_PHY 48
+#define CLKID_AHB_ARB0 49
+#define CLKID_AHB_DATA_BUS 50
+#define CLKID_AHB_CTRL_BUS 51
+#define CLKID_HTX_HDCP22 52
+#define CLKID_HTX_PCLK 53
+#define CLKID_BT656 54
+#define CLKID_USB1_DDR_BRIDGE 55
+#define CLKID_MMC_PCLK 56
+#define CLKID_UART2 57
+#define CLKID_VPU_INTR 58
+#define CLKID_GIC 59
+#define CLKID_SD_EMMC_A_CLK0 60
+#define CLKID_SD_EMMC_B_CLK0 61
+#define CLKID_SD_EMMC_C_CLK0 62
+#define CLKID_HIFI_PLL 74
+#define CLKID_VCLK2_VENCI0 80
+#define CLKID_VCLK2_VENCI1 81
+#define CLKID_VCLK2_VENCP0 82
+#define CLKID_VCLK2_VENCP1 83
+#define CLKID_VCLK2_VENCT0 84
+#define CLKID_VCLK2_VENCT1 85
+#define CLKID_VCLK2_OTHER 86
+#define CLKID_VCLK2_ENCI 87
+#define CLKID_VCLK2_ENCP 88
+#define CLKID_DAC_CLK 89
+#define CLKID_AOCLK 90
+#define CLKID_IEC958 91
+#define CLKID_ENC480P 92
+#define CLKID_RNG1 93
+#define CLKID_VCLK2_ENCT 94
+#define CLKID_VCLK2_ENCL 95
+#define CLKID_VCLK2_VENCLMMC 96
+#define CLKID_VCLK2_VENCL 97
+#define CLKID_VCLK2_OTHER1 98
+#define CLKID_FCLK_DIV2P5 99
+#define CLKID_DMA 105
+#define CLKID_EFUSE 106
+#define CLKID_ROM_BOOT 107
+#define CLKID_RESET_SEC 108
+#define CLKID_SEC_AHB_APB3 109
+#define CLKID_VPU_0_SEL 110
+#define CLKID_VPU_0 112
+#define CLKID_VPU_1_SEL 113
+#define CLKID_VPU_1 115
+#define CLKID_VPU 116
+#define CLKID_VAPB_0_SEL 117
+#define CLKID_VAPB_0 119
+#define CLKID_VAPB_1_SEL 120
+#define CLKID_VAPB_1 122
+#define CLKID_VAPB_SEL 123
+#define CLKID_VAPB 124
+#define CLKID_HDMI_PLL 128
+#define CLKID_VID_PLL 129
+#define CLKID_VCLK 138
+#define CLKID_VCLK2 139
+#define CLKID_VCLK_DIV1 148
+#define CLKID_VCLK_DIV2 149
+#define CLKID_VCLK_DIV4 150
+#define CLKID_VCLK_DIV6 151
+#define CLKID_VCLK_DIV12 152
+#define CLKID_VCLK2_DIV1 153
+#define CLKID_VCLK2_DIV2 154
+#define CLKID_VCLK2_DIV4 155
+#define CLKID_VCLK2_DIV6 156
+#define CLKID_VCLK2_DIV12 157
+#define CLKID_CTS_ENCI 162
+#define CLKID_CTS_ENCP 163
+#define CLKID_CTS_VDAC 164
+#define CLKID_HDMI_TX 165
+#define CLKID_HDMI 168
+#define CLKID_MALI_0_SEL 169
+#define CLKID_MALI_0 171
+#define CLKID_MALI_1_SEL 172
+#define CLKID_MALI_1 174
+#define CLKID_MALI 175
+#define CLKID_MPLL_5OM 177
+
+#endif /* __G12A_CLKC_H */
diff --git a/include/dt-bindings/clock/gxbb-aoclkc.h b/include/dt-bindings/clock/gxbb-aoclkc.h
index 9d15e2221fdb..ec3b26319fc4 100644
--- a/include/dt-bindings/clock/gxbb-aoclkc.h
+++ b/include/dt-bindings/clock/gxbb-aoclkc.h
@@ -63,5 +63,12 @@
#define CLKID_AO_UART2 4
#define CLKID_AO_IR_BLASTER 5
#define CLKID_AO_CEC_32K 6
+#define CLKID_AO_CTS_OSCIN 7
+#define CLKID_AO_32K_PRE 8
+#define CLKID_AO_32K_DIV 9
+#define CLKID_AO_32K_SEL 10
+#define CLKID_AO_32K 11
+#define CLKID_AO_CTS_RTC_OSCIN 12
+#define CLKID_AO_CLK81 13
#endif
diff --git a/include/dt-bindings/clock/imx5-clock.h b/include/dt-bindings/clock/imx5-clock.h
index d382fc71aa83..a81be5be6700 100644
--- a/include/dt-bindings/clock/imx5-clock.h
+++ b/include/dt-bindings/clock/imx5-clock.h
@@ -214,6 +214,7 @@
#define IMX5_CLK_IEEE1588_SEL 202
#define IMX5_CLK_IEEE1588_PODF 203
#define IMX5_CLK_IEEE1588_GATE 204
-#define IMX5_CLK_END 205
+#define IMX5_CLK_SCC2_IPG_GATE 205
+#define IMX5_CLK_END 206
#endif /* __DT_BINDINGS_CLOCK_IMX5_H */
diff --git a/include/dt-bindings/clock/imx8mm-clock.h b/include/dt-bindings/clock/imx8mm-clock.h
new file mode 100644
index 000000000000..1b4353e7b486
--- /dev/null
+++ b/include/dt-bindings/clock/imx8mm-clock.h
@@ -0,0 +1,244 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2017-2018 NXP
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMX8MM_H
+#define __DT_BINDINGS_CLOCK_IMX8MM_H
+
+#define IMX8MM_CLK_DUMMY 0
+#define IMX8MM_CLK_32K 1
+#define IMX8MM_CLK_24M 2
+#define IMX8MM_OSC_HDMI_CLK 3
+#define IMX8MM_CLK_EXT1 4
+#define IMX8MM_CLK_EXT2 5
+#define IMX8MM_CLK_EXT3 6
+#define IMX8MM_CLK_EXT4 7
+#define IMX8MM_AUDIO_PLL1_REF_SEL 8
+#define IMX8MM_AUDIO_PLL2_REF_SEL 9
+#define IMX8MM_VIDEO_PLL1_REF_SEL 10
+#define IMX8MM_DRAM_PLL_REF_SEL 11
+#define IMX8MM_GPU_PLL_REF_SEL 12
+#define IMX8MM_VPU_PLL_REF_SEL 13
+#define IMX8MM_ARM_PLL_REF_SEL 14
+#define IMX8MM_SYS_PLL1_REF_SEL 15
+#define IMX8MM_SYS_PLL2_REF_SEL 16
+#define IMX8MM_SYS_PLL3_REF_SEL 17
+#define IMX8MM_AUDIO_PLL1 18
+#define IMX8MM_AUDIO_PLL2 19
+#define IMX8MM_VIDEO_PLL1 20
+#define IMX8MM_DRAM_PLL 21
+#define IMX8MM_GPU_PLL 22
+#define IMX8MM_VPU_PLL 23
+#define IMX8MM_ARM_PLL 24
+#define IMX8MM_SYS_PLL1 25
+#define IMX8MM_SYS_PLL2 26
+#define IMX8MM_SYS_PLL3 27
+#define IMX8MM_AUDIO_PLL1_BYPASS 28
+#define IMX8MM_AUDIO_PLL2_BYPASS 29
+#define IMX8MM_VIDEO_PLL1_BYPASS 30
+#define IMX8MM_DRAM_PLL_BYPASS 31
+#define IMX8MM_GPU_PLL_BYPASS 32
+#define IMX8MM_VPU_PLL_BYPASS 33
+#define IMX8MM_ARM_PLL_BYPASS 34
+#define IMX8MM_SYS_PLL1_BYPASS 35
+#define IMX8MM_SYS_PLL2_BYPASS 36
+#define IMX8MM_SYS_PLL3_BYPASS 37
+#define IMX8MM_AUDIO_PLL1_OUT 38
+#define IMX8MM_AUDIO_PLL2_OUT 39
+#define IMX8MM_VIDEO_PLL1_OUT 40
+#define IMX8MM_DRAM_PLL_OUT 41
+#define IMX8MM_GPU_PLL_OUT 42
+#define IMX8MM_VPU_PLL_OUT 43
+#define IMX8MM_ARM_PLL_OUT 44
+#define IMX8MM_SYS_PLL1_OUT 45
+#define IMX8MM_SYS_PLL2_OUT 46
+#define IMX8MM_SYS_PLL3_OUT 47
+#define IMX8MM_SYS_PLL1_40M 48
+#define IMX8MM_SYS_PLL1_80M 49
+#define IMX8MM_SYS_PLL1_100M 50
+#define IMX8MM_SYS_PLL1_133M 51
+#define IMX8MM_SYS_PLL1_160M 52
+#define IMX8MM_SYS_PLL1_200M 53
+#define IMX8MM_SYS_PLL1_266M 54
+#define IMX8MM_SYS_PLL1_400M 55
+#define IMX8MM_SYS_PLL1_800M 56
+#define IMX8MM_SYS_PLL2_50M 57
+#define IMX8MM_SYS_PLL2_100M 58
+#define IMX8MM_SYS_PLL2_125M 59
+#define IMX8MM_SYS_PLL2_166M 60
+#define IMX8MM_SYS_PLL2_200M 61
+#define IMX8MM_SYS_PLL2_250M 62
+#define IMX8MM_SYS_PLL2_333M 63
+#define IMX8MM_SYS_PLL2_500M 64
+#define IMX8MM_SYS_PLL2_1000M 65
+
+/* core */
+#define IMX8MM_CLK_A53_SRC 66
+#define IMX8MM_CLK_M4_SRC 67
+#define IMX8MM_CLK_VPU_SRC 68
+#define IMX8MM_CLK_GPU3D_SRC 69
+#define IMX8MM_CLK_GPU2D_SRC 70
+#define IMX8MM_CLK_A53_CG 71
+#define IMX8MM_CLK_M4_CG 72
+#define IMX8MM_CLK_VPU_CG 73
+#define IMX8MM_CLK_GPU3D_CG 74
+#define IMX8MM_CLK_GPU2D_CG 75
+#define IMX8MM_CLK_A53_DIV 76
+#define IMX8MM_CLK_M4_DIV 77
+#define IMX8MM_CLK_VPU_DIV 78
+#define IMX8MM_CLK_GPU3D_DIV 79
+#define IMX8MM_CLK_GPU2D_DIV 80
+
+/* bus */
+#define IMX8MM_CLK_MAIN_AXI 81
+#define IMX8MM_CLK_ENET_AXI 82
+#define IMX8MM_CLK_NAND_USDHC_BUS 83
+#define IMX8MM_CLK_VPU_BUS 84
+#define IMX8MM_CLK_DISP_AXI 85
+#define IMX8MM_CLK_DISP_APB 86
+#define IMX8MM_CLK_DISP_RTRM 87
+#define IMX8MM_CLK_USB_BUS 88
+#define IMX8MM_CLK_GPU_AXI 89
+#define IMX8MM_CLK_GPU_AHB 90
+#define IMX8MM_CLK_NOC 91
+#define IMX8MM_CLK_NOC_APB 92
+
+#define IMX8MM_CLK_AHB 93
+#define IMX8MM_CLK_AUDIO_AHB 94
+#define IMX8MM_CLK_IPG_ROOT 95
+#define IMX8MM_CLK_IPG_AUDIO_ROOT 96
+
+#define IMX8MM_CLK_DRAM_ALT 97
+#define IMX8MM_CLK_DRAM_APB 98
+#define IMX8MM_CLK_VPU_G1 99
+#define IMX8MM_CLK_VPU_G2 100
+#define IMX8MM_CLK_DISP_DTRC 101
+#define IMX8MM_CLK_DISP_DC8000 102
+#define IMX8MM_CLK_PCIE1_CTRL 103
+#define IMX8MM_CLK_PCIE1_PHY 104
+#define IMX8MM_CLK_PCIE1_AUX 105
+#define IMX8MM_CLK_DC_PIXEL 106
+#define IMX8MM_CLK_LCDIF_PIXEL 107
+#define IMX8MM_CLK_SAI1 108
+#define IMX8MM_CLK_SAI2 109
+#define IMX8MM_CLK_SAI3 110
+#define IMX8MM_CLK_SAI4 111
+#define IMX8MM_CLK_SAI5 112
+#define IMX8MM_CLK_SAI6 113
+#define IMX8MM_CLK_SPDIF1 114
+#define IMX8MM_CLK_SPDIF2 115
+#define IMX8MM_CLK_ENET_REF 116
+#define IMX8MM_CLK_ENET_TIMER 117
+#define IMX8MM_CLK_ENET_PHY_REF 118
+#define IMX8MM_CLK_NAND 119
+#define IMX8MM_CLK_QSPI 120
+#define IMX8MM_CLK_USDHC1 121
+#define IMX8MM_CLK_USDHC2 122
+#define IMX8MM_CLK_I2C1 123
+#define IMX8MM_CLK_I2C2 124
+#define IMX8MM_CLK_I2C3 125
+#define IMX8MM_CLK_I2C4 126
+#define IMX8MM_CLK_UART1 127
+#define IMX8MM_CLK_UART2 128
+#define IMX8MM_CLK_UART3 129
+#define IMX8MM_CLK_UART4 130
+#define IMX8MM_CLK_USB_CORE_REF 131
+#define IMX8MM_CLK_USB_PHY_REF 132
+#define IMX8MM_CLK_ECSPI1 133
+#define IMX8MM_CLK_ECSPI2 134
+#define IMX8MM_CLK_PWM1 135
+#define IMX8MM_CLK_PWM2 136
+#define IMX8MM_CLK_PWM3 137
+#define IMX8MM_CLK_PWM4 138
+#define IMX8MM_CLK_GPT1 139
+#define IMX8MM_CLK_WDOG 140
+#define IMX8MM_CLK_WRCLK 141
+#define IMX8MM_CLK_DSI_CORE 142
+#define IMX8MM_CLK_DSI_PHY_REF 143
+#define IMX8MM_CLK_DSI_DBI 144
+#define IMX8MM_CLK_USDHC3 145
+#define IMX8MM_CLK_CSI1_CORE 146
+#define IMX8MM_CLK_CSI1_PHY_REF 147
+#define IMX8MM_CLK_CSI1_ESC 148
+#define IMX8MM_CLK_CSI2_CORE 149
+#define IMX8MM_CLK_CSI2_PHY_REF 150
+#define IMX8MM_CLK_CSI2_ESC 151
+#define IMX8MM_CLK_PCIE2_CTRL 152
+#define IMX8MM_CLK_PCIE2_PHY 153
+#define IMX8MM_CLK_PCIE2_AUX 154
+#define IMX8MM_CLK_ECSPI3 155
+#define IMX8MM_CLK_PDM 156
+#define IMX8MM_CLK_VPU_H1 157
+#define IMX8MM_CLK_CLKO1 158
+
+#define IMX8MM_CLK_ECSPI1_ROOT 159
+#define IMX8MM_CLK_ECSPI2_ROOT 160
+#define IMX8MM_CLK_ECSPI3_ROOT 161
+#define IMX8MM_CLK_ENET1_ROOT 162
+#define IMX8MM_CLK_GPT1_ROOT 163
+#define IMX8MM_CLK_I2C1_ROOT 164
+#define IMX8MM_CLK_I2C2_ROOT 165
+#define IMX8MM_CLK_I2C3_ROOT 166
+#define IMX8MM_CLK_I2C4_ROOT 167
+#define IMX8MM_CLK_OCOTP_ROOT 168
+#define IMX8MM_CLK_PCIE1_ROOT 169
+#define IMX8MM_CLK_PWM1_ROOT 170
+#define IMX8MM_CLK_PWM2_ROOT 171
+#define IMX8MM_CLK_PWM3_ROOT 172
+#define IMX8MM_CLK_PWM4_ROOT 173
+#define IMX8MM_CLK_QSPI_ROOT 174
+#define IMX8MM_CLK_NAND_ROOT 175
+#define IMX8MM_CLK_SAI1_ROOT 176
+#define IMX8MM_CLK_SAI1_IPG 177
+#define IMX8MM_CLK_SAI2_ROOT 178
+#define IMX8MM_CLK_SAI2_IPG 179
+#define IMX8MM_CLK_SAI3_ROOT 180
+#define IMX8MM_CLK_SAI3_IPG 181
+#define IMX8MM_CLK_SAI4_ROOT 182
+#define IMX8MM_CLK_SAI4_IPG 183
+#define IMX8MM_CLK_SAI5_ROOT 184
+#define IMX8MM_CLK_SAI5_IPG 185
+#define IMX8MM_CLK_SAI6_ROOT 186
+#define IMX8MM_CLK_SAI6_IPG 187
+#define IMX8MM_CLK_UART1_ROOT 188
+#define IMX8MM_CLK_UART2_ROOT 189
+#define IMX8MM_CLK_UART3_ROOT 190
+#define IMX8MM_CLK_UART4_ROOT 191
+#define IMX8MM_CLK_USB1_CTRL_ROOT 192
+#define IMX8MM_CLK_GPU3D_ROOT 193
+#define IMX8MM_CLK_USDHC1_ROOT 194
+#define IMX8MM_CLK_USDHC2_ROOT 195
+#define IMX8MM_CLK_WDOG1_ROOT 196
+#define IMX8MM_CLK_WDOG2_ROOT 197
+#define IMX8MM_CLK_WDOG3_ROOT 198
+#define IMX8MM_CLK_VPU_G1_ROOT 199
+#define IMX8MM_CLK_GPU_BUS_ROOT 200
+#define IMX8MM_CLK_VPU_H1_ROOT 201
+#define IMX8MM_CLK_VPU_G2_ROOT 202
+#define IMX8MM_CLK_PDM_ROOT 203
+#define IMX8MM_CLK_DISP_ROOT 204
+#define IMX8MM_CLK_DISP_AXI_ROOT 205
+#define IMX8MM_CLK_DISP_APB_ROOT 206
+#define IMX8MM_CLK_DISP_RTRM_ROOT 207
+#define IMX8MM_CLK_USDHC3_ROOT 208
+#define IMX8MM_CLK_TMU_ROOT 209
+#define IMX8MM_CLK_VPU_DEC_ROOT 210
+#define IMX8MM_CLK_SDMA1_ROOT 211
+#define IMX8MM_CLK_SDMA2_ROOT 212
+#define IMX8MM_CLK_SDMA3_ROOT 213
+#define IMX8MM_CLK_GPT_3M 214
+#define IMX8MM_CLK_ARM 215
+#define IMX8MM_CLK_PDM_IPG 216
+#define IMX8MM_CLK_GPU2D_ROOT 217
+#define IMX8MM_CLK_MU_ROOT 218
+#define IMX8MM_CLK_CSI1_ROOT 219
+
+#define IMX8MM_CLK_DRAM_CORE 220
+#define IMX8MM_CLK_DRAM_ALT_ROOT 221
+
+#define IMX8MM_CLK_NAND_USDHC_BUS_RAWNAND_CLK 222
+
+#define IMX8MM_CLK_END 223
+
+#endif
diff --git a/include/dt-bindings/clock/imx8mq-clock.h b/include/dt-bindings/clock/imx8mq-clock.h
index 04f7ac345984..6677e920dc2d 100644
--- a/include/dt-bindings/clock/imx8mq-clock.h
+++ b/include/dt-bindings/clock/imx8mq-clock.h
@@ -245,151 +245,160 @@
/* USB_CORE_REF */
#define IMX8MQ_CLK_USB_CORE_REF 152
/* USB_PHY_REF */
-#define IMX8MQ_CLK_USB_PHY_REF 163
+#define IMX8MQ_CLK_USB_PHY_REF 153
/* ECSPI1 */
-#define IMX8MQ_CLK_ECSPI1 164
+#define IMX8MQ_CLK_ECSPI1 154
/* ECSPI2 */
-#define IMX8MQ_CLK_ECSPI2 165
+#define IMX8MQ_CLK_ECSPI2 155
/* PWM1 */
-#define IMX8MQ_CLK_PWM1 166
+#define IMX8MQ_CLK_PWM1 156
/* PWM2 */
-#define IMX8MQ_CLK_PWM2 167
+#define IMX8MQ_CLK_PWM2 157
/* PWM3 */
-#define IMX8MQ_CLK_PWM3 168
+#define IMX8MQ_CLK_PWM3 158
/* PWM4 */
-#define IMX8MQ_CLK_PWM4 169
+#define IMX8MQ_CLK_PWM4 159
/* GPT1 */
-#define IMX8MQ_CLK_GPT1 170
+#define IMX8MQ_CLK_GPT1 160
/* WDOG */
-#define IMX8MQ_CLK_WDOG 171
+#define IMX8MQ_CLK_WDOG 161
/* WRCLK */
-#define IMX8MQ_CLK_WRCLK 172
+#define IMX8MQ_CLK_WRCLK 162
/* DSI_CORE */
-#define IMX8MQ_CLK_DSI_CORE 173
+#define IMX8MQ_CLK_DSI_CORE 163
/* DSI_PHY */
-#define IMX8MQ_CLK_DSI_PHY_REF 174
+#define IMX8MQ_CLK_DSI_PHY_REF 164
/* DSI_DBI */
-#define IMX8MQ_CLK_DSI_DBI 175
+#define IMX8MQ_CLK_DSI_DBI 165
/*DSI_ESC */
-#define IMX8MQ_CLK_DSI_ESC 176
+#define IMX8MQ_CLK_DSI_ESC 166
/* CSI1_CORE */
-#define IMX8MQ_CLK_CSI1_CORE 177
+#define IMX8MQ_CLK_CSI1_CORE 167
/* CSI1_PHY */
-#define IMX8MQ_CLK_CSI1_PHY_REF 178
+#define IMX8MQ_CLK_CSI1_PHY_REF 168
/* CSI_ESC */
-#define IMX8MQ_CLK_CSI1_ESC 179
+#define IMX8MQ_CLK_CSI1_ESC 169
/* CSI2_CORE */
#define IMX8MQ_CLK_CSI2_CORE 170
/* CSI2_PHY */
-#define IMX8MQ_CLK_CSI2_PHY_REF 181
+#define IMX8MQ_CLK_CSI2_PHY_REF 171
/* CSI2_ESC */
-#define IMX8MQ_CLK_CSI2_ESC 182
+#define IMX8MQ_CLK_CSI2_ESC 172
/* PCIE2_CTRL */
-#define IMX8MQ_CLK_PCIE2_CTRL 183
+#define IMX8MQ_CLK_PCIE2_CTRL 173
/* PCIE2_PHY */
-#define IMX8MQ_CLK_PCIE2_PHY 184
+#define IMX8MQ_CLK_PCIE2_PHY 174
/* PCIE2_AUX */
-#define IMX8MQ_CLK_PCIE2_AUX 185
+#define IMX8MQ_CLK_PCIE2_AUX 175
/* ECSPI3 */
-#define IMX8MQ_CLK_ECSPI3 186
+#define IMX8MQ_CLK_ECSPI3 176
/* CCGR clocks */
-#define IMX8MQ_CLK_A53_ROOT 187
-#define IMX8MQ_CLK_DRAM_ROOT 188
-#define IMX8MQ_CLK_ECSPI1_ROOT 189
+#define IMX8MQ_CLK_A53_ROOT 177
+#define IMX8MQ_CLK_DRAM_ROOT 178
+#define IMX8MQ_CLK_ECSPI1_ROOT 179
#define IMX8MQ_CLK_ECSPI2_ROOT 180
#define IMX8MQ_CLK_ECSPI3_ROOT 181
#define IMX8MQ_CLK_ENET1_ROOT 182
-#define IMX8MQ_CLK_GPT1_ROOT 193
-#define IMX8MQ_CLK_I2C1_ROOT 194
-#define IMX8MQ_CLK_I2C2_ROOT 195
-#define IMX8MQ_CLK_I2C3_ROOT 196
-#define IMX8MQ_CLK_I2C4_ROOT 197
-#define IMX8MQ_CLK_M4_ROOT 198
-#define IMX8MQ_CLK_PCIE1_ROOT 199
-#define IMX8MQ_CLK_PCIE2_ROOT 200
-#define IMX8MQ_CLK_PWM1_ROOT 201
-#define IMX8MQ_CLK_PWM2_ROOT 202
-#define IMX8MQ_CLK_PWM3_ROOT 203
-#define IMX8MQ_CLK_PWM4_ROOT 204
-#define IMX8MQ_CLK_QSPI_ROOT 205
-#define IMX8MQ_CLK_SAI1_ROOT 206
-#define IMX8MQ_CLK_SAI2_ROOT 207
-#define IMX8MQ_CLK_SAI3_ROOT 208
-#define IMX8MQ_CLK_SAI4_ROOT 209
-#define IMX8MQ_CLK_SAI5_ROOT 210
-#define IMX8MQ_CLK_SAI6_ROOT 212
-#define IMX8MQ_CLK_UART1_ROOT 213
-#define IMX8MQ_CLK_UART2_ROOT 214
-#define IMX8MQ_CLK_UART3_ROOT 215
-#define IMX8MQ_CLK_UART4_ROOT 216
-#define IMX8MQ_CLK_USB1_CTRL_ROOT 217
-#define IMX8MQ_CLK_USB2_CTRL_ROOT 218
-#define IMX8MQ_CLK_USB1_PHY_ROOT 219
-#define IMX8MQ_CLK_USB2_PHY_ROOT 220
-#define IMX8MQ_CLK_USDHC1_ROOT 221
-#define IMX8MQ_CLK_USDHC2_ROOT 222
-#define IMX8MQ_CLK_WDOG1_ROOT 223
-#define IMX8MQ_CLK_WDOG2_ROOT 224
-#define IMX8MQ_CLK_WDOG3_ROOT 225
-#define IMX8MQ_CLK_GPU_ROOT 226
-#define IMX8MQ_CLK_HEVC_ROOT 227
-#define IMX8MQ_CLK_AVC_ROOT 228
-#define IMX8MQ_CLK_VP9_ROOT 229
-#define IMX8MQ_CLK_HEVC_INTER_ROOT 230
-#define IMX8MQ_CLK_DISP_ROOT 231
-#define IMX8MQ_CLK_HDMI_ROOT 232
-#define IMX8MQ_CLK_HDMI_PHY_ROOT 233
-#define IMX8MQ_CLK_VPU_DEC_ROOT 234
-#define IMX8MQ_CLK_CSI1_ROOT 235
-#define IMX8MQ_CLK_CSI2_ROOT 236
-#define IMX8MQ_CLK_RAWNAND_ROOT 237
-#define IMX8MQ_CLK_SDMA1_ROOT 238
-#define IMX8MQ_CLK_SDMA2_ROOT 239
-#define IMX8MQ_CLK_VPU_G1_ROOT 240
-#define IMX8MQ_CLK_VPU_G2_ROOT 241
+#define IMX8MQ_CLK_GPT1_ROOT 183
+#define IMX8MQ_CLK_I2C1_ROOT 184
+#define IMX8MQ_CLK_I2C2_ROOT 185
+#define IMX8MQ_CLK_I2C3_ROOT 186
+#define IMX8MQ_CLK_I2C4_ROOT 187
+#define IMX8MQ_CLK_M4_ROOT 188
+#define IMX8MQ_CLK_PCIE1_ROOT 189
+#define IMX8MQ_CLK_PCIE2_ROOT 190
+#define IMX8MQ_CLK_PWM1_ROOT 191
+#define IMX8MQ_CLK_PWM2_ROOT 192
+#define IMX8MQ_CLK_PWM3_ROOT 193
+#define IMX8MQ_CLK_PWM4_ROOT 194
+#define IMX8MQ_CLK_QSPI_ROOT 195
+#define IMX8MQ_CLK_SAI1_ROOT 196
+#define IMX8MQ_CLK_SAI2_ROOT 197
+#define IMX8MQ_CLK_SAI3_ROOT 198
+#define IMX8MQ_CLK_SAI4_ROOT 199
+#define IMX8MQ_CLK_SAI5_ROOT 200
+#define IMX8MQ_CLK_SAI6_ROOT 201
+#define IMX8MQ_CLK_UART1_ROOT 202
+#define IMX8MQ_CLK_UART2_ROOT 203
+#define IMX8MQ_CLK_UART3_ROOT 204
+#define IMX8MQ_CLK_UART4_ROOT 205
+#define IMX8MQ_CLK_USB1_CTRL_ROOT 206
+#define IMX8MQ_CLK_USB2_CTRL_ROOT 207
+#define IMX8MQ_CLK_USB1_PHY_ROOT 208
+#define IMX8MQ_CLK_USB2_PHY_ROOT 209
+#define IMX8MQ_CLK_USDHC1_ROOT 210
+#define IMX8MQ_CLK_USDHC2_ROOT 211
+#define IMX8MQ_CLK_WDOG1_ROOT 212
+#define IMX8MQ_CLK_WDOG2_ROOT 213
+#define IMX8MQ_CLK_WDOG3_ROOT 214
+#define IMX8MQ_CLK_GPU_ROOT 215
+#define IMX8MQ_CLK_HEVC_ROOT 216
+#define IMX8MQ_CLK_AVC_ROOT 217
+#define IMX8MQ_CLK_VP9_ROOT 218
+#define IMX8MQ_CLK_HEVC_INTER_ROOT 219
+#define IMX8MQ_CLK_DISP_ROOT 220
+#define IMX8MQ_CLK_HDMI_ROOT 221
+#define IMX8MQ_CLK_HDMI_PHY_ROOT 222
+#define IMX8MQ_CLK_VPU_DEC_ROOT 223
+#define IMX8MQ_CLK_CSI1_ROOT 224
+#define IMX8MQ_CLK_CSI2_ROOT 225
+#define IMX8MQ_CLK_RAWNAND_ROOT 226
+#define IMX8MQ_CLK_SDMA1_ROOT 227
+#define IMX8MQ_CLK_SDMA2_ROOT 228
+#define IMX8MQ_CLK_VPU_G1_ROOT 229
+#define IMX8MQ_CLK_VPU_G2_ROOT 230
/* SCCG PLL GATE */
-#define IMX8MQ_SYS1_PLL_OUT 242
-#define IMX8MQ_SYS2_PLL_OUT 243
-#define IMX8MQ_SYS3_PLL_OUT 244
-#define IMX8MQ_DRAM_PLL_OUT 245
-
-#define IMX8MQ_GPT_3M_CLK 246
-
-#define IMX8MQ_CLK_IPG_ROOT 247
-#define IMX8MQ_CLK_IPG_AUDIO_ROOT 248
-#define IMX8MQ_CLK_SAI1_IPG 249
-#define IMX8MQ_CLK_SAI2_IPG 250
-#define IMX8MQ_CLK_SAI3_IPG 251
-#define IMX8MQ_CLK_SAI4_IPG 252
-#define IMX8MQ_CLK_SAI5_IPG 253
-#define IMX8MQ_CLK_SAI6_IPG 254
+#define IMX8MQ_SYS1_PLL_OUT 231
+#define IMX8MQ_SYS2_PLL_OUT 232
+#define IMX8MQ_SYS3_PLL_OUT 233
+#define IMX8MQ_DRAM_PLL_OUT 234
+
+#define IMX8MQ_GPT_3M_CLK 235
+
+#define IMX8MQ_CLK_IPG_ROOT 236
+#define IMX8MQ_CLK_IPG_AUDIO_ROOT 237
+#define IMX8MQ_CLK_SAI1_IPG 238
+#define IMX8MQ_CLK_SAI2_IPG 239
+#define IMX8MQ_CLK_SAI3_IPG 240
+#define IMX8MQ_CLK_SAI4_IPG 241
+#define IMX8MQ_CLK_SAI5_IPG 242
+#define IMX8MQ_CLK_SAI6_IPG 243
/* DSI AHB/IPG clocks */
/* rxesc clock */
-#define IMX8MQ_CLK_DSI_AHB 255
+#define IMX8MQ_CLK_DSI_AHB 244
/* txesc clock */
-#define IMX8MQ_CLK_DSI_IPG_DIV 256
+#define IMX8MQ_CLK_DSI_IPG_DIV 245
-#define IMX8MQ_CLK_TMU_ROOT 257
+#define IMX8MQ_CLK_TMU_ROOT 246
/* Display root clocks */
-#define IMX8MQ_CLK_DISP_AXI_ROOT 258
-#define IMX8MQ_CLK_DISP_APB_ROOT 259
-#define IMX8MQ_CLK_DISP_RTRM_ROOT 260
+#define IMX8MQ_CLK_DISP_AXI_ROOT 247
+#define IMX8MQ_CLK_DISP_APB_ROOT 248
+#define IMX8MQ_CLK_DISP_RTRM_ROOT 249
-#define IMX8MQ_CLK_OCOTP_ROOT 261
+#define IMX8MQ_CLK_OCOTP_ROOT 250
-#define IMX8MQ_CLK_DRAM_ALT_ROOT 262
-#define IMX8MQ_CLK_DRAM_CORE 263
+#define IMX8MQ_CLK_DRAM_ALT_ROOT 251
+#define IMX8MQ_CLK_DRAM_CORE 252
-#define IMX8MQ_CLK_MU_ROOT 264
-#define IMX8MQ_VIDEO2_PLL_OUT 265
+#define IMX8MQ_CLK_MU_ROOT 253
+#define IMX8MQ_VIDEO2_PLL_OUT 254
-#define IMX8MQ_CLK_CLKO2 266
+#define IMX8MQ_CLK_CLKO2 255
-#define IMX8MQ_CLK_NAND_USDHC_BUS_RAWNAND_CLK 267
+#define IMX8MQ_CLK_NAND_USDHC_BUS_RAWNAND_CLK 256
-#define IMX8MQ_CLK_END 268
+#define IMX8MQ_CLK_CLKO1 257
+#define IMX8MQ_CLK_ARM 258
+
+#define IMX8MQ_CLK_GPIO1_ROOT 259
+#define IMX8MQ_CLK_GPIO2_ROOT 260
+#define IMX8MQ_CLK_GPIO3_ROOT 261
+#define IMX8MQ_CLK_GPIO4_ROOT 262
+#define IMX8MQ_CLK_GPIO5_ROOT 263
+
+#define IMX8MQ_CLK_END 264
#endif /* __DT_BINDINGS_CLOCK_IMX8MQ_H */
diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h
index 228a5e234af0..e785c6eb3561 100644
--- a/include/dt-bindings/clock/marvell,mmp2.h
+++ b/include/dt-bindings/clock/marvell,mmp2.h
@@ -71,6 +71,7 @@
#define MMP2_CLK_CCIC1_MIX 117
#define MMP2_CLK_CCIC1_PHY 118
#define MMP2_CLK_CCIC1_SPHY 119
+#define MMP2_CLK_DISP0_LCDC 120
#define MMP2_NR_CLKS 200
#endif
diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h
index 5fe2923382d0..8067077a62ca 100644
--- a/include/dt-bindings/clock/meson8b-clkc.h
+++ b/include/dt-bindings/clock/meson8b-clkc.h
@@ -104,6 +104,7 @@
#define CLKID_MPLL2 95
#define CLKID_NAND_CLK 112
#define CLKID_ABP 124
+#define CLKID_APB 124
#define CLKID_PERIPH 126
#define CLKID_AXI 128
#define CLKID_L2_DRAM 130
diff --git a/include/dt-bindings/clock/mt2712-clk.h b/include/dt-bindings/clock/mt2712-clk.h
index 76265836a1e1..c3b29dff9c0e 100644
--- a/include/dt-bindings/clock/mt2712-clk.h
+++ b/include/dt-bindings/clock/mt2712-clk.h
@@ -228,7 +228,8 @@
#define CLK_TOP_NFI2X_EN 189
#define CLK_TOP_NFIECC_EN 190
#define CLK_TOP_NFI1X_CK_EN 191
-#define CLK_TOP_NR_CLK 192
+#define CLK_TOP_APLL2_D3 192
+#define CLK_TOP_NR_CLK 193
/* INFRACFG */
diff --git a/include/dt-bindings/clock/mt8173-clk.h b/include/dt-bindings/clock/mt8173-clk.h
index 8aea623dd518..76e4e5b65353 100644
--- a/include/dt-bindings/clock/mt8173-clk.h
+++ b/include/dt-bindings/clock/mt8173-clk.h
@@ -194,7 +194,8 @@
#define CLK_INFRA_PMICWRAP 11
#define CLK_INFRA_CLK_13M 12
#define CLK_INFRA_CA53SEL 13
-#define CLK_INFRA_CA57SEL 14
+#define CLK_INFRA_CA57SEL 14 /* Deprecated. Don't use it. */
+#define CLK_INFRA_CA72SEL 14
#define CLK_INFRA_NR_CLK 15
/* PERI_SYS */
diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h
index 3658b0c14966..ede93a0ca156 100644
--- a/include/dt-bindings/clock/qcom,rpmcc.h
+++ b/include/dt-bindings/clock/qcom,rpmcc.h
@@ -127,5 +127,15 @@
#define RPM_SMD_BIMC_GPU_A_CLK 77
#define RPM_SMD_QPIC_CLK 78
#define RPM_SMD_QPIC_CLK_A 79
+#define RPM_SMD_LN_BB_CLK1 80
+#define RPM_SMD_LN_BB_CLK1_A 81
+#define RPM_SMD_LN_BB_CLK2 82
+#define RPM_SMD_LN_BB_CLK2_A 83
+#define RPM_SMD_LN_BB_CLK3_PIN 84
+#define RPM_SMD_LN_BB_CLK3_A_PIN 85
+#define RPM_SMD_RF_CLK3 86
+#define RPM_SMD_RF_CLK3_A 87
+#define RPM_SMD_RF_CLK3_PIN 88
+#define RPM_SMD_RF_CLK3_A_PIN 89
#endif
diff --git a/include/dt-bindings/clock/qcom,rpmh.h b/include/dt-bindings/clock/qcom,rpmh.h
index f48fbd6f2095..edcab3f7b7d3 100644
--- a/include/dt-bindings/clock/qcom,rpmh.h
+++ b/include/dt-bindings/clock/qcom,rpmh.h
@@ -18,5 +18,6 @@
#define RPMH_RF_CLK2_A 9
#define RPMH_RF_CLK3 10
#define RPMH_RF_CLK3_A 11
+#define RPMH_IPA_CLK 12
#endif
diff --git a/include/dt-bindings/clock/r8a774a1-cpg-mssr.h b/include/dt-bindings/clock/r8a774a1-cpg-mssr.h
index 9bc5d45ff4b5..e355363f40c2 100644
--- a/include/dt-bindings/clock/r8a774a1-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a774a1-cpg-mssr.h
@@ -54,5 +54,6 @@
#define R8A774A1_CLK_CPEX 43
#define R8A774A1_CLK_R 44
#define R8A774A1_CLK_OSC 45
+#define R8A774A1_CLK_CANFD 46
#endif /* __DT_BINDINGS_CLOCK_R8A774A1_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r8a774c0-cpg-mssr.h b/include/dt-bindings/clock/r8a774c0-cpg-mssr.h
index 8fe51b6aca28..8ad9cd6be8e9 100644
--- a/include/dt-bindings/clock/r8a774c0-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a774c0-cpg-mssr.h
@@ -56,5 +56,6 @@
#define R8A774C0_CLK_CSI0 45
#define R8A774C0_CLK_CP 46
#define R8A774C0_CLK_CPEX 47
+#define R8A774C0_CLK_CANFD 48
#endif /* __DT_BINDINGS_CLOCK_R8A774C0_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/sifive-fu540-prci.h b/include/dt-bindings/clock/sifive-fu540-prci.h
new file mode 100644
index 000000000000..6a0b70a37d78
--- /dev/null
+++ b/include/dt-bindings/clock/sifive-fu540-prci.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2019 SiFive, Inc.
+ * Wesley Terpstra
+ * Paul Walmsley
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_SIFIVE_FU540_PRCI_H
+#define __DT_BINDINGS_CLOCK_SIFIVE_FU540_PRCI_H
+
+/* Clock indexes for use by Device Tree data and the PRCI driver */
+
+#define PRCI_CLK_COREPLL 0
+#define PRCI_CLK_DDRPLL 1
+#define PRCI_CLK_GEMGXLPLL 2
+#define PRCI_CLK_TLCLK 3
+
+#endif
diff --git a/include/dt-bindings/clock/stm32mp1-clks.h b/include/dt-bindings/clock/stm32mp1-clks.h
index 90ec780bfc68..4cdaf135829c 100644
--- a/include/dt-bindings/clock/stm32mp1-clks.h
+++ b/include/dt-bindings/clock/stm32mp1-clks.h
@@ -248,7 +248,4 @@
#define STM32MP1_LAST_CLK 232
-#define LTDC_K LTDC_PX
-#define ETHMAC_K ETHCK_K
-
#endif /* _DT_BINDINGS_STM32MP1_CLKS_H_ */
diff --git a/include/dt-bindings/pinctrl/at91.h b/include/dt-bindings/pinctrl/at91.h
index eb81867eac77..8dc10e00c627 100644
--- a/include/dt-bindings/pinctrl/at91.h
+++ b/include/dt-bindings/pinctrl/at91.h
@@ -17,6 +17,7 @@
#define AT91_PINCTRL_DIS_SCHMIT (1 << 4)
#define AT91_PINCTRL_OUTPUT (1 << 7)
#define AT91_PINCTRL_OUTPUT_VAL(x) ((x & 0x1) << 8)
+#define AT91_PINCTRL_SLEWRATE (1 << 9)
#define AT91_PINCTRL_DEBOUNCE (1 << 16)
#define AT91_PINCTRL_DEBOUNCE_VAL(x) (x << 17)
@@ -27,6 +28,9 @@
#define AT91_PINCTRL_DRIVE_STRENGTH_MED (0x2 << 5)
#define AT91_PINCTRL_DRIVE_STRENGTH_HI (0x3 << 5)
+#define AT91_PINCTRL_SLEWRATE_DIS (0x0 << 9)
+#define AT91_PINCTRL_SLEWRATE_ENA (0x1 << 9)
+
#define AT91_PIOA 0
#define AT91_PIOB 1
#define AT91_PIOC 2
diff --git a/include/dt-bindings/reset/amlogic,meson-g12a-reset.h b/include/dt-bindings/reset/amlogic,meson-g12a-reset.h
index 8063e8314eef..6d487c5eba2c 100644
--- a/include/dt-bindings/reset/amlogic,meson-g12a-reset.h
+++ b/include/dt-bindings/reset/amlogic,meson-g12a-reset.h
@@ -51,7 +51,10 @@
#define RESET_SD_EMMC_A 44
#define RESET_SD_EMMC_B 45
#define RESET_SD_EMMC_C 46
-/* 47-60 */
+/* 47 */
+#define RESET_USB_PHY20 48
+#define RESET_USB_PHY21 49
+/* 50-60 */
#define RESET_AUDIO_CODEC 61
/* 62-63 */
/* RESET2 */
diff --git a/include/dt-bindings/reset/g12a-aoclkc.h b/include/dt-bindings/reset/g12a-aoclkc.h
new file mode 100644
index 000000000000..bd2e2337135c
--- /dev/null
+++ b/include/dt-bindings/reset/g12a-aoclkc.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#ifndef DT_BINDINGS_RESET_AMLOGIC_MESON_G12A_AOCLK
+#define DT_BINDINGS_RESET_AMLOGIC_MESON_G12A_AOCLK
+
+#define RESET_AO_IR_IN 0
+#define RESET_AO_UART 1
+#define RESET_AO_I2C_M 2
+#define RESET_AO_I2C_S 3
+#define RESET_AO_SAR_ADC 4
+#define RESET_AO_UART2 5
+#define RESET_AO_IR_OUT 6
+
+#endif
diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h
index 359c2f936004..42a93eda331c 100644
--- a/include/keys/system_keyring.h
+++ b/include/keys/system_keyring.h
@@ -61,5 +61,13 @@ static inline struct key *get_ima_blacklist_keyring(void)
}
#endif /* CONFIG_IMA_BLACKLIST_KEYRING */
+#if defined(CONFIG_INTEGRITY_PLATFORM_KEYRING) && \
+ defined(CONFIG_SYSTEM_TRUSTED_KEYRING)
+extern void __init set_platform_trusted_keys(struct key *keyring);
+#else
+static inline void set_platform_trusted_keys(struct key *keyring)
+{
+}
+#endif
#endif /* _KEYS_SYSTEM_KEYRING_H */
diff --git a/include/keys/trusted.h b/include/keys/trusted.h
index adbcb6817826..0071298b9b28 100644
--- a/include/keys/trusted.h
+++ b/include/keys/trusted.h
@@ -38,7 +38,7 @@ enum {
int TSS_authhmac(unsigned char *digest, const unsigned char *key,
unsigned int keylen, unsigned char *h1,
- unsigned char *h2, unsigned char h3, ...);
+ unsigned char *h2, unsigned int h3, ...);
int TSS_checkhmac1(unsigned char *buffer,
const uint32_t command,
const unsigned char *ononce,
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index 33771352dcd6..05a18dd265b5 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -22,7 +22,22 @@
#include <linux/clocksource.h>
#include <linux/hrtimer.h>
+enum kvm_arch_timers {
+ TIMER_PTIMER,
+ TIMER_VTIMER,
+ NR_KVM_TIMERS
+};
+
+enum kvm_arch_timer_regs {
+ TIMER_REG_CNT,
+ TIMER_REG_CVAL,
+ TIMER_REG_TVAL,
+ TIMER_REG_CTL,
+};
+
struct arch_timer_context {
+ struct kvm_vcpu *vcpu;
+
/* Registers: control register, timer value */
u32 cnt_ctl;
u64 cnt_cval;
@@ -30,30 +45,36 @@ struct arch_timer_context {
/* Timer IRQ */
struct kvm_irq_level irq;
+ /* Virtual offset */
+ u64 cntvoff;
+
+ /* Emulated Timer (may be unused) */
+ struct hrtimer hrtimer;
+
/*
- * We have multiple paths which can save/restore the timer state
- * onto the hardware, so we need some way of keeping track of
- * where the latest state is.
- *
- * loaded == true: State is loaded on the hardware registers.
- * loaded == false: State is stored in memory.
+ * We have multiple paths which can save/restore the timer state onto
+ * the hardware, so we need some way of keeping track of where the
+ * latest state is.
*/
- bool loaded;
+ bool loaded;
- /* Virtual offset */
- u64 cntvoff;
+ /* Duplicated state from arch_timer.c for convenience */
+ u32 host_timer_irq;
+ u32 host_timer_irq_flags;
+};
+
+struct timer_map {
+ struct arch_timer_context *direct_vtimer;
+ struct arch_timer_context *direct_ptimer;
+ struct arch_timer_context *emul_ptimer;
};
struct arch_timer_cpu {
- struct arch_timer_context vtimer;
- struct arch_timer_context ptimer;
+ struct arch_timer_context timers[NR_KVM_TIMERS];
/* Background timer used when the guest is not running */
struct hrtimer bg_timer;
- /* Physical timer emulation */
- struct hrtimer phys_timer;
-
/* Is the timer enabled */
bool enabled;
};
@@ -76,9 +97,6 @@ int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
bool kvm_timer_is_pending(struct kvm_vcpu *vcpu);
-void kvm_timer_schedule(struct kvm_vcpu *vcpu);
-void kvm_timer_unschedule(struct kvm_vcpu *vcpu);
-
u64 kvm_phys_timer_read(void);
void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu);
@@ -88,7 +106,19 @@ void kvm_timer_init_vhe(void);
bool kvm_arch_timer_get_input_level(int vintid);
-#define vcpu_vtimer(v) (&(v)->arch.timer_cpu.vtimer)
-#define vcpu_ptimer(v) (&(v)->arch.timer_cpu.ptimer)
+#define vcpu_timer(v) (&(v)->arch.timer_cpu)
+#define vcpu_get_timer(v,t) (&vcpu_timer(v)->timers[(t)])
+#define vcpu_vtimer(v) (&(v)->arch.timer_cpu.timers[TIMER_VTIMER])
+#define vcpu_ptimer(v) (&(v)->arch.timer_cpu.timers[TIMER_PTIMER])
+
+#define arch_timer_ctx_index(ctx) ((ctx) - vcpu_timer((ctx)->vcpu)->timers)
+
+u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
+ enum kvm_arch_timers tmr,
+ enum kvm_arch_timer_regs treg);
+void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
+ enum kvm_arch_timers tmr,
+ enum kvm_arch_timer_regs treg,
+ u64 val);
#endif
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 6ac47f5ea514..ca55ae00f8c9 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -400,12 +400,17 @@ extern bool acpi_osi_is_win8(void);
#ifdef CONFIG_ACPI_NUMA
int acpi_map_pxm_to_online_node(int pxm);
+int acpi_map_pxm_to_node(int pxm);
int acpi_get_node(acpi_handle handle);
#else
static inline int acpi_map_pxm_to_online_node(int pxm)
{
return 0;
}
+static inline int acpi_map_pxm_to_node(int pxm)
+{
+ return 0;
+}
static inline int acpi_get_node(acpi_handle handle)
{
return 0;
@@ -664,12 +669,14 @@ static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
return false;
}
-static inline const char *
-acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv)
+static inline struct acpi_device *
+acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv)
{
return NULL;
}
+static inline void acpi_dev_put(struct acpi_device *adev) {}
+
static inline bool is_acpi_node(struct fwnode_handle *fwnode)
{
return false;
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
index 38cd77b39a64..723e4dfa1c14 100644
--- a/include/linux/acpi_iort.h
+++ b/include/linux/acpi_iort.h
@@ -26,6 +26,14 @@
#define IORT_IRQ_MASK(irq) (irq & 0xffffffffULL)
#define IORT_IRQ_TRIGGER_MASK(irq) ((irq >> 32) & 0xffffffffULL)
+/*
+ * PMCG model identifiers for use in smmu pmu driver. Please note
+ * that this is purely for the use of software and has nothing to
+ * do with hardware or with IORT specification.
+ */
+#define IORT_SMMU_V3_PMCG_GENERIC 0x00000000 /* Generic SMMUv3 PMCG */
+#define IORT_SMMU_V3_PMCG_HISI_HIP08 0x00000001 /* HiSilicon HIP08 PMCG */
+
int iort_register_domain_token(int trans_id, phys_addr_t base,
struct fwnode_handle *fw_node);
void iort_deregister_domain_token(int trans_id);
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index d143c13bed26..f99b74a6e4ca 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -25,6 +25,43 @@
#define AMBA_CID 0xb105f00d
#define CORESIGHT_CID 0xb105900d
+/*
+ * CoreSight Architecture specification updates the ID specification
+ * for components on the AMBA bus. (ARM IHI 0029E)
+ *
+ * Bits 15:12 of the CID are the device class.
+ *
+ * Class 0xF remains for PrimeCell and legacy components. (AMBA_CID above)
+ * Class 0x9 defines the component as CoreSight (CORESIGHT_CID above)
+ * Class 0x0, 0x1, 0xB, 0xE define components that do not have driver support
+ * at present.
+ * Class 0x2-0x8,0xA and 0xD-0xD are presently reserved.
+ *
+ * Remaining CID bits stay as 0xb105-00d
+ */
+
+/**
+ * Class 0x9 components use additional values to form a Unique Component
+ * Identifier (UCI), where peripheral ID values are identical for different
+ * components. Passed to the amba bus code from the component driver via
+ * the amba_id->data pointer.
+ * @devarch : coresight devarch register value
+ * @devarch_mask: mask bits used for matching. 0 indicates UCI not used.
+ * @devtype : coresight device type value
+ * @data : additional driver data. As we have usurped the original
+ * pointer some devices may still need additional data
+ */
+struct amba_cs_uci_id {
+ unsigned int devarch;
+ unsigned int devarch_mask;
+ unsigned int devtype;
+ void *data;
+};
+
+/* define offsets for registers used by UCI */
+#define UCI_REG_DEVTYPE_OFFSET 0xFCC
+#define UCI_REG_DEVARCH_OFFSET 0xFBC
+
struct clk;
struct amba_device {
@@ -32,6 +69,8 @@ struct amba_device {
struct resource res;
struct clk *pclk;
unsigned int periphid;
+ unsigned int cid;
+ struct amba_cs_uci_id uci;
unsigned int irq[AMBA_NR_IRQS];
char *driver_override;
};
diff --git a/include/linux/atalk.h b/include/linux/atalk.h
index 5a90f28d5ff2..f6034ba774be 100644
--- a/include/linux/atalk.h
+++ b/include/linux/atalk.h
@@ -108,7 +108,7 @@ static __inline__ struct elapaarp *aarp_hdr(struct sk_buff *skb)
#define AARP_RESOLVE_TIME (10 * HZ)
extern struct datalink_proto *ddp_dl, *aarp_dl;
-extern void aarp_proto_init(void);
+extern int aarp_proto_init(void);
/* Inter module exports */
@@ -161,16 +161,26 @@ extern int sysctl_aarp_resolve_time;
extern int atalk_register_sysctl(void);
extern void atalk_unregister_sysctl(void);
#else
-#define atalk_register_sysctl() do { } while(0)
-#define atalk_unregister_sysctl() do { } while(0)
+static inline int atalk_register_sysctl(void)
+{
+ return 0;
+}
+static inline void atalk_unregister_sysctl(void)
+{
+}
#endif
#ifdef CONFIG_PROC_FS
extern int atalk_proc_init(void);
extern void atalk_proc_exit(void);
#else
-#define atalk_proc_init() ({ 0; })
-#define atalk_proc_exit() do { } while(0)
+static inline int atalk_proc_init(void)
+{
+ return 0;
+}
+static inline void atalk_proc_exit(void)
+{
+}
#endif /* CONFIG_PROC_FS */
#endif /* __LINUX_ATALK_H__ */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index bb6090aa165d..e584673c1881 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -120,19 +120,23 @@ static inline bool bio_full(struct bio *bio)
return bio->bi_vcnt >= bio->bi_max_vecs;
}
-#define mp_bvec_for_each_segment(bv, bvl, i, iter_all) \
- for (bv = bvec_init_iter_all(&iter_all); \
- (iter_all.done < (bvl)->bv_len) && \
- (mp_bvec_next_segment((bvl), &iter_all), 1); \
- iter_all.done += bv->bv_len, i += 1)
+static inline bool bio_next_segment(const struct bio *bio,
+ struct bvec_iter_all *iter)
+{
+ if (iter->idx >= bio->bi_vcnt)
+ return false;
+
+ bvec_advance(&bio->bi_io_vec[iter->idx], iter);
+ return true;
+}
/*
* drivers should _never_ use the all version - the bio may have been split
* before it got to the driver and the driver won't own all of it
*/
-#define bio_for_each_segment_all(bvl, bio, i, iter_all) \
- for (i = 0, iter_all.idx = 0; iter_all.idx < (bio)->bi_vcnt; iter_all.idx++) \
- mp_bvec_for_each_segment(bvl, &((bio)->bi_io_vec[iter_all.idx]), i, iter_all)
+#define bio_for_each_segment_all(bvl, bio, i, iter) \
+ for (i = 0, bvl = bvec_init_iter_all(&iter); \
+ bio_next_segment((bio), &iter); i++)
static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
unsigned bytes)
diff --git a/include/linux/bitrev.h b/include/linux/bitrev.h
index 50fb0dee23e8..d35b8ec1c485 100644
--- a/include/linux/bitrev.h
+++ b/include/linux/bitrev.h
@@ -34,41 +34,41 @@ static inline u32 __bitrev32(u32 x)
#define __constant_bitrev32(x) \
({ \
- u32 __x = x; \
- __x = (__x >> 16) | (__x << 16); \
- __x = ((__x & (u32)0xFF00FF00UL) >> 8) | ((__x & (u32)0x00FF00FFUL) << 8); \
- __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \
- __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \
- __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \
- __x; \
+ u32 ___x = x; \
+ ___x = (___x >> 16) | (___x << 16); \
+ ___x = ((___x & (u32)0xFF00FF00UL) >> 8) | ((___x & (u32)0x00FF00FFUL) << 8); \
+ ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \
+ ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \
+ ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \
+ ___x; \
})
#define __constant_bitrev16(x) \
({ \
- u16 __x = x; \
- __x = (__x >> 8) | (__x << 8); \
- __x = ((__x & (u16)0xF0F0U) >> 4) | ((__x & (u16)0x0F0FU) << 4); \
- __x = ((__x & (u16)0xCCCCU) >> 2) | ((__x & (u16)0x3333U) << 2); \
- __x = ((__x & (u16)0xAAAAU) >> 1) | ((__x & (u16)0x5555U) << 1); \
- __x; \
+ u16 ___x = x; \
+ ___x = (___x >> 8) | (___x << 8); \
+ ___x = ((___x & (u16)0xF0F0U) >> 4) | ((___x & (u16)0x0F0FU) << 4); \
+ ___x = ((___x & (u16)0xCCCCU) >> 2) | ((___x & (u16)0x3333U) << 2); \
+ ___x = ((___x & (u16)0xAAAAU) >> 1) | ((___x & (u16)0x5555U) << 1); \
+ ___x; \
})
#define __constant_bitrev8x4(x) \
({ \
- u32 __x = x; \
- __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \
- __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \
- __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \
- __x; \
+ u32 ___x = x; \
+ ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \
+ ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \
+ ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \
+ ___x; \
})
#define __constant_bitrev8(x) \
({ \
- u8 __x = x; \
- __x = (__x >> 4) | (__x << 4); \
- __x = ((__x & (u8)0xCCU) >> 2) | ((__x & (u8)0x33U) << 2); \
- __x = ((__x & (u8)0xAAU) >> 1) | ((__x & (u8)0x55U) << 1); \
- __x; \
+ u8 ___x = x; \
+ ___x = (___x >> 4) | (___x << 4); \
+ ___x = ((___x & (u8)0xCCU) >> 2) | ((___x & (u8)0x33U) << 2); \
+ ___x = ((___x & (u8)0xAAU) >> 1) | ((___x & (u8)0x55U) << 1); \
+ ___x; \
})
#define bitrev32(x) \
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index b0c814bcc7e3..db29928de467 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -57,7 +57,6 @@ struct blk_mq_hw_ctx {
unsigned int queue_num;
atomic_t nr_active;
- unsigned int nr_expired;
struct hlist_node cpuhp_dead;
struct kobject kobj;
@@ -300,11 +299,10 @@ void blk_mq_end_request(struct request *rq, blk_status_t error);
void __blk_mq_end_request(struct request *rq, blk_status_t error);
void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
-void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
- bool kick_requeue_list);
void blk_mq_kick_requeue_list(struct request_queue *q);
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
bool blk_mq_complete_request(struct request *rq);
+void blk_mq_complete_request_sync(struct request *rq);
bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
struct bio *bio);
bool blk_mq_queue_stopped(struct request_queue *q);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index d66bf5f32610..791fee35df88 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -215,6 +215,7 @@ struct bio {
/*
* bio flags
*/
+#define BIO_NO_PAGE_REF 0 /* don't put release vec pages */
#define BIO_SEG_VALID 1 /* bi_phys_segments valid */
#define BIO_CLONED 2 /* doesn't own data */
#define BIO_BOUNCED 3 /* bio is a bounce bio */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index faed9d9eb84c..317ab30d2904 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -50,6 +50,9 @@ struct blk_stat_callback;
/* Must be consistent with blk_mq_poll_stats_bkt() */
#define BLK_MQ_POLL_STATS_BKTS 16
+/* Doing classic polling */
+#define BLK_MQ_POLL_CLASSIC -1
+
/*
* Maximum number of blkcg policies allowed to be registered concurrently.
* Defined here to simplify include dependency.
@@ -216,8 +219,6 @@ struct request {
unsigned short write_hint;
unsigned short ioprio;
- void *special; /* opaque pointer available for LLD use */
-
unsigned int extra_len; /* length of alignment and padding */
enum mq_rq_state state;
@@ -236,9 +237,6 @@ struct request {
*/
rq_end_io_fn *end_io;
void *end_io_data;
-
- /* for bidi */
- struct request *next_rq;
};
static inline bool blk_op_is_scsi(unsigned int op)
@@ -550,7 +548,6 @@ struct request_queue {
struct rcu_head rcu_head;
wait_queue_head_t mq_freeze_wq;
struct percpu_ref q_usage_counter;
- struct list_head all_q_node;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
@@ -574,7 +571,6 @@ struct request_queue {
#define QUEUE_FLAG_STOPPED 0 /* queue is stopped */
#define QUEUE_FLAG_DYING 1 /* queue being torn down */
-#define QUEUE_FLAG_BIDI 2 /* queue supports bidi requests */
#define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */
#define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */
#define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */
@@ -640,8 +636,6 @@ static inline bool blk_account_rq(struct request *rq)
return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq);
}
-#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
-
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 695b2a880d9a..a4c644c1c091 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -292,7 +292,7 @@ static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog,
static inline void bpf_cgroup_storage_release(struct bpf_prog *prog,
struct bpf_map *map) {}
static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
- struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return 0; }
+ struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
static inline void bpf_cgroup_storage_free(
struct bpf_cgroup_storage *storage) {}
static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index a2132e09dc1c..944ccc310201 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -193,7 +193,6 @@ enum bpf_arg_type {
ARG_PTR_TO_CTX, /* pointer to context */
ARG_ANYTHING, /* any (initialized) argument is ok */
- ARG_PTR_TO_SOCKET, /* pointer to bpf_sock */
ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */
ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
};
@@ -511,7 +510,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
} \
_out: \
rcu_read_unlock(); \
- preempt_enable_no_resched(); \
+ preempt_enable(); \
_ret; \
})
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 69f7a3449eda..7d8228d1c898 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -66,6 +66,46 @@ struct bpf_reg_state {
* same reference to the socket, to determine proper reference freeing.
*/
u32 id;
+ /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
+ * from a pointer-cast helper, bpf_sk_fullsock() and
+ * bpf_tcp_sock().
+ *
+ * Consider the following where "sk" is a reference counted
+ * pointer returned from "sk = bpf_sk_lookup_tcp();":
+ *
+ * 1: sk = bpf_sk_lookup_tcp();
+ * 2: if (!sk) { return 0; }
+ * 3: fullsock = bpf_sk_fullsock(sk);
+ * 4: if (!fullsock) { bpf_sk_release(sk); return 0; }
+ * 5: tp = bpf_tcp_sock(fullsock);
+ * 6: if (!tp) { bpf_sk_release(sk); return 0; }
+ * 7: bpf_sk_release(sk);
+ * 8: snd_cwnd = tp->snd_cwnd; // verifier will complain
+ *
+ * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and
+ * "tp" ptr should be invalidated also. In order to do that,
+ * the reg holding "fullsock" and "sk" need to remember
+ * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id
+ * such that the verifier can reset all regs which have
+ * ref_obj_id matching the sk_reg->id.
+ *
+ * sk_reg->ref_obj_id is set to sk_reg->id at line 1.
+ * sk_reg->id will stay as NULL-marking purpose only.
+ * After NULL-marking is done, sk_reg->id can be reset to 0.
+ *
+ * After "fullsock = bpf_sk_fullsock(sk);" at line 3,
+ * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id.
+ *
+ * After "tp = bpf_tcp_sock(fullsock);" at line 5,
+ * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id
+ * which is the same as sk_reg->ref_obj_id.
+ *
+ * From the verifier perspective, if sk, fullsock and tp
+ * are not NULL, they are the same ptr with different
+ * reg->type. In particular, bpf_sk_release(tp) is also
+ * allowed and has the same effect as bpf_sk_release(sk).
+ */
+ u32 ref_obj_id;
/* For scalar types (SCALAR_VALUE), this represents our knowledge of
* the actual value.
* For pointer types, this represents the variable part of the offset
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 9cd00a37b8d3..6db2d9a6e503 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -148,6 +148,22 @@
#define BCM_LED_SRC_OFF 0xe /* Tied high */
#define BCM_LED_SRC_ON 0xf /* Tied low */
+/*
+ * Broadcom Multicolor LED configurations (expansion register 4)
+ */
+#define BCM_EXP_MULTICOLOR (MII_BCM54XX_EXP_SEL_ER + 0x04)
+#define BCM_LED_MULTICOLOR_IN_PHASE BIT(8)
+#define BCM_LED_MULTICOLOR_LINK_ACT 0x0
+#define BCM_LED_MULTICOLOR_SPEED 0x1
+#define BCM_LED_MULTICOLOR_ACT_FLASH 0x2
+#define BCM_LED_MULTICOLOR_FDX 0x3
+#define BCM_LED_MULTICOLOR_OFF 0x4
+#define BCM_LED_MULTICOLOR_ON 0x5
+#define BCM_LED_MULTICOLOR_ALT 0x6
+#define BCM_LED_MULTICOLOR_FLASH 0x7
+#define BCM_LED_MULTICOLOR_LINK 0x8
+#define BCM_LED_MULTICOLOR_ACT 0x9
+#define BCM_LED_MULTICOLOR_PROGRAM 0xa
/*
* BCM5482: Shadow registers
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index b356e0006731..7f14517a559b 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -69,6 +69,10 @@ struct bsg_job {
int result;
unsigned int reply_payload_rcv_len;
+ /* BIDI support */
+ struct request *bidi_rq;
+ struct bio *bidi_bio;
+
void *dd_data; /* Used for driver-specific storage */
};
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index f6275c4da13a..ff13cbc1887d 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -145,26 +145,33 @@ static inline bool bvec_iter_advance(const struct bio_vec *bv,
static inline struct bio_vec *bvec_init_iter_all(struct bvec_iter_all *iter_all)
{
- iter_all->bv.bv_page = NULL;
iter_all->done = 0;
+ iter_all->idx = 0;
return &iter_all->bv;
}
-static inline void mp_bvec_next_segment(const struct bio_vec *bvec,
- struct bvec_iter_all *iter_all)
+static inline void bvec_advance(const struct bio_vec *bvec,
+ struct bvec_iter_all *iter_all)
{
struct bio_vec *bv = &iter_all->bv;
- if (bv->bv_page) {
+ if (iter_all->done) {
bv->bv_page = nth_page(bv->bv_page, 1);
bv->bv_offset = 0;
} else {
- bv->bv_page = bvec->bv_page;
- bv->bv_offset = bvec->bv_offset;
+ bv->bv_page = bvec_nth_page(bvec->bv_page, bvec->bv_offset /
+ PAGE_SIZE);
+ bv->bv_offset = bvec->bv_offset & ~PAGE_MASK;
}
bv->bv_len = min_t(unsigned int, PAGE_SIZE - bv->bv_offset,
bvec->bv_len - iter_all->done);
+ iter_all->done += bv->bv_len;
+
+ if (iter_all->done == bvec->bv_len) {
+ iter_all->idx++;
+ iter_all->done = 0;
+ }
}
/*
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index a420c07904bc..337d5049ff93 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -294,6 +294,8 @@ extern void ceph_destroy_client(struct ceph_client *client);
extern int __ceph_open_session(struct ceph_client *client,
unsigned long started);
extern int ceph_open_session(struct ceph_client *client);
+int ceph_wait_for_latest_osdmap(struct ceph_client *client,
+ unsigned long timeout);
/* pagevec.c */
extern void ceph_release_page_vector(struct page **pages, int num_pages);
diff --git a/include/linux/ceph/types.h b/include/linux/ceph/types.h
index 27cd973d3881..bd3d532902d7 100644
--- a/include/linux/ceph/types.h
+++ b/include/linux/ceph/types.h
@@ -24,6 +24,7 @@ struct ceph_vino {
/* context for the caps reservation mechanism */
struct ceph_cap_reservation {
int count;
+ int used;
};
diff --git a/include/linux/cgroup_rdma.h b/include/linux/cgroup_rdma.h
index e94290b29e99..ef1bae2983f3 100644
--- a/include/linux/cgroup_rdma.h
+++ b/include/linux/cgroup_rdma.h
@@ -39,7 +39,7 @@ struct rdmacg_device {
* APIs for RDMA/IB stack to publish when a device wants to
* participate in resource accounting
*/
-int rdmacg_register_device(struct rdmacg_device *device);
+void rdmacg_register_device(struct rdmacg_device *device);
void rdmacg_unregister_device(struct rdmacg_device *device);
/* APIs for RDMA/IB stack to charge/uncharge pool specific resources */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index e443fa9fa859..b7cf80a71293 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -792,6 +792,9 @@ unsigned int __clk_get_enable_count(struct clk *clk);
unsigned long clk_hw_get_rate(const struct clk_hw *hw);
unsigned long __clk_get_flags(struct clk *clk);
unsigned long clk_hw_get_flags(const struct clk_hw *hw);
+#define clk_hw_can_set_rate_parent(hw) \
+ (clk_hw_get_flags((hw)) & CLK_SET_RATE_PARENT)
+
bool clk_hw_is_prepared(const struct clk_hw *hw);
bool clk_hw_rate_is_protected(const struct clk_hw *hw);
bool clk_hw_is_enabled(const struct clk_hw *hw);
diff --git a/include/linux/clk.h b/include/linux/clk.h
index a7773b5c0b9f..f689fc58d7be 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -384,6 +384,17 @@ int __must_check devm_clk_bulk_get_all(struct device *dev,
struct clk *devm_clk_get(struct device *dev, const char *id);
/**
+ * devm_clk_get_optional - lookup and obtain a managed reference to an optional
+ * clock producer.
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ *
+ * Behaves the same as devm_clk_get() except where there is no clock producer.
+ * In this case, instead of returning -ENOENT, the function returns NULL.
+ */
+struct clk *devm_clk_get_optional(struct device *dev, const char *id);
+
+/**
* devm_get_clk_from_child - lookup and obtain a managed reference to a
* clock producer from child node.
* @dev: device for clock "consumer"
@@ -718,6 +729,12 @@ static inline struct clk *devm_clk_get(struct device *dev, const char *id)
return NULL;
}
+static inline struct clk *devm_clk_get_optional(struct device *dev,
+ const char *id)
+{
+ return NULL;
+}
+
static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
struct clk_bulk_data *clks)
{
@@ -794,6 +811,22 @@ static inline bool clk_has_parent(struct clk *clk, struct clk *parent)
return true;
}
+static inline int clk_set_rate_range(struct clk *clk, unsigned long min,
+ unsigned long max)
+{
+ return 0;
+}
+
+static inline int clk_set_min_rate(struct clk *clk, unsigned long rate)
+{
+ return 0;
+}
+
+static inline int clk_set_max_rate(struct clk *clk, unsigned long rate)
+{
+ return 0;
+}
+
static inline int clk_set_parent(struct clk *clk, struct clk *parent)
{
return 0;
@@ -862,6 +895,25 @@ static inline void clk_bulk_disable_unprepare(int num_clks,
clk_bulk_unprepare(num_clks, clks);
}
+/**
+ * clk_get_optional - lookup and obtain a reference to an optional clock
+ * producer.
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ *
+ * Behaves the same as clk_get() except where there is no clock producer. In
+ * this case, instead of returning -ENOENT, the function returns NULL.
+ */
+static inline struct clk *clk_get_optional(struct device *dev, const char *id)
+{
+ struct clk *clk = clk_get(dev, id);
+
+ if (clk == ERR_PTR(-ENOENT))
+ return NULL;
+
+ return clk;
+}
+
#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
struct clk *of_clk_get(struct device_node *np, int index);
struct clk *of_clk_get_by_name(struct device_node *np, const char *name);
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index eacc5df57b99..78872efc7be0 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -160,6 +160,7 @@ struct clk_hw_omap {
struct clockdomain *clkdm;
const struct clk_hw_omap_ops *ops;
u32 context;
+ int autoidle_count;
};
/*
diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h
index 4890ff033220..ccb32af5848b 100644
--- a/include/linux/clkdev.h
+++ b/include/linux/clkdev.h
@@ -52,4 +52,8 @@ int clk_add_alias(const char *, const char *, const char *, struct device *);
int clk_register_clkdev(struct clk *, const char *, const char *);
int clk_hw_register_clkdev(struct clk_hw *, const char *, const char *);
+int devm_clk_hw_register_clkdev(struct device *dev, struct clk_hw *hw,
+ const char *con_id, const char *dev_id);
+void devm_clk_release_clkdev(struct device *dev, const char *con_id,
+ const char *dev_id);
#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 445348facea9..d58aa0db05f9 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -67,7 +67,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
.line = __LINE__, \
}; \
______r = !!(cond); \
- ______f.miss_hit[______r]++; \
+ ______r ? ______f.miss_hit[1]++ : ______f.miss_hit[0]++;\
______r; \
}))
#endif /* CONFIG_PROFILE_ALL_BRANCHES */
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 5041357d0297..732745f865b7 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -137,9 +137,26 @@ static inline int disable_nonboot_cpus(void)
return freeze_secondary_cpus(0);
}
extern void enable_nonboot_cpus(void);
+
+static inline int suspend_disable_secondary_cpus(void)
+{
+ int cpu = 0;
+
+ if (IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU))
+ cpu = -1;
+
+ return freeze_secondary_cpus(cpu);
+}
+static inline void suspend_enable_secondary_cpus(void)
+{
+ return enable_nonboot_cpus();
+}
+
#else /* !CONFIG_PM_SLEEP_SMP */
static inline int disable_nonboot_cpus(void) { return 0; }
static inline void enable_nonboot_cpus(void) {}
+static inline int suspend_disable_secondary_cpus(void) { return 0; }
+static inline void suspend_enable_secondary_cpus(void) { }
#endif /* !CONFIG_PM_SLEEP_SMP */
void cpu_startup_entry(enum cpuhp_state state);
@@ -175,6 +192,7 @@ enum cpuhp_smt_control {
CPU_SMT_DISABLED,
CPU_SMT_FORCE_DISABLED,
CPU_SMT_NOT_SUPPORTED,
+ CPU_SMT_NOT_IMPLEMENTED,
};
#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
@@ -182,9 +200,33 @@ extern enum cpuhp_smt_control cpu_smt_control;
extern void cpu_smt_disable(bool force);
extern void cpu_smt_check_topology(void);
#else
-# define cpu_smt_control (CPU_SMT_ENABLED)
+# define cpu_smt_control (CPU_SMT_NOT_IMPLEMENTED)
static inline void cpu_smt_disable(bool force) { }
static inline void cpu_smt_check_topology(void) { }
#endif
+/*
+ * These are used for a global "mitigations=" cmdline option for toggling
+ * optional CPU mitigations.
+ */
+enum cpu_mitigations {
+ CPU_MITIGATIONS_OFF,
+ CPU_MITIGATIONS_AUTO,
+ CPU_MITIGATIONS_AUTO_NOSMT,
+};
+
+extern enum cpu_mitigations cpu_mitigations;
+
+/* mitigations=off */
+static inline bool cpu_mitigations_off(void)
+{
+ return cpu_mitigations == CPU_MITIGATIONS_OFF;
+}
+
+/* mitigations=auto,nosmt */
+static inline bool cpu_mitigations_auto_nosmt(void)
+{
+ return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
+}
+
#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index b160e98076e3..684caf067003 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -178,6 +178,11 @@ static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { }
#endif
+static inline bool policy_is_inactive(struct cpufreq_policy *policy)
+{
+ return cpumask_empty(policy->cpus);
+}
+
static inline bool policy_is_shared(struct cpufreq_policy *policy)
{
return cpumask_weight(policy->cpus) > 1;
@@ -193,8 +198,14 @@ unsigned int cpufreq_quick_get_max(unsigned int cpu);
void disable_cpufreq(void);
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
+
+struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu);
+void cpufreq_cpu_release(struct cpufreq_policy *policy);
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
+int cpufreq_set_policy(struct cpufreq_policy *policy,
+ struct cpufreq_policy *new_policy);
void cpufreq_update_policy(unsigned int cpu);
+void cpufreq_update_limits(unsigned int cpu);
bool have_governor_per_policy(void);
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
void cpufreq_enable_fast_switch(struct cpufreq_policy *policy);
@@ -322,6 +333,9 @@ struct cpufreq_driver {
/* should be defined, if possible */
unsigned int (*get)(unsigned int cpu);
+ /* Called to update policy limits on firmware notifications. */
+ void (*update_limits)(unsigned int cpu);
+
/* optional */
int (*bios_limit)(int cpu, unsigned int *limit);
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index e78281d07b70..dbfdd0fadbef 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -147,6 +147,7 @@ enum cpuhp_state {
CPUHP_AP_X86_VDSO_VMA_ONLINE,
CPUHP_AP_IRQ_AFFINITY_ONLINE,
CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS,
+ CPUHP_AP_X86_INTEL_EPB_ONLINE,
CPUHP_AP_PERF_ONLINE,
CPUHP_AP_PERF_X86_ONLINE,
CPUHP_AP_PERF_X86_UNCORE_ONLINE,
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 3b39472324a3..bb9a0db89f1a 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -83,6 +83,7 @@ struct cpuidle_device {
unsigned int use_deepest_state:1;
unsigned int poll_time_limit:1;
unsigned int cpu;
+ ktime_t next_hrtimer;
int last_residency;
struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX];
diff --git a/include/linux/cred.h b/include/linux/cred.h
index ddd45bb74887..efb6edf32de7 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -138,7 +138,7 @@ struct cred {
#ifdef CONFIG_KEYS
unsigned char jit_keyring; /* default keyring to attach requested
* keys to */
- struct key __rcu *session_keyring; /* keyring inherited over fork */
+ struct key *session_keyring; /* keyring inherited over fork */
struct key *process_keyring; /* keyring private to this process */
struct key *thread_keyring; /* keyring private to this thread */
struct key *request_key_auth; /* assumed request_key authority */
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index e528baebad69..b0672756d056 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -10,6 +10,7 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
+#include <linux/dm-ioctl.h>
#include <linux/math64.h>
#include <linux/ratelimit.h>
@@ -315,12 +316,6 @@ struct dm_target {
* whether or not its underlying devices have support.
*/
bool discards_supported:1;
-
- /*
- * Set if the target required discard bios to be split
- * on max_io_len boundary.
- */
- bool split_discard_bios:1;
};
/* Each target can link one of these into the table */
@@ -431,6 +426,14 @@ void dm_remap_zone_report(struct dm_target *ti, sector_t start,
struct blk_zone *zones, unsigned int *nr_zones);
union map_info *dm_get_rq_mapinfo(struct request *rq);
+/*
+ * Device mapper functions to parse and create devices specified by the
+ * parameter "dm-mod.create="
+ */
+int __init dm_early_create(struct dm_ioctl *dmi,
+ struct dm_target_spec **spec_array,
+ char **target_params_array);
+
struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
/*
@@ -609,7 +612,7 @@ do { \
*/
#define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
-static inline sector_t to_sector(unsigned long n)
+static inline sector_t to_sector(unsigned long long n)
{
return (n >> SECTOR_SHIFT);
}
diff --git a/include/linux/device.h b/include/linux/device.h
index f40f6064ba05..4e6987e11f68 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -49,8 +49,6 @@ struct bus_attribute {
ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count);
};
-#define BUS_ATTR(_name, _mode, _show, _store) \
- struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store)
#define BUS_ATTR_RW(_name) \
struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
#define BUS_ATTR_RO(_name) \
@@ -1028,8 +1026,10 @@ struct device {
struct list_head dma_pools; /* dma pools (if dma'ble) */
+#ifdef CONFIG_DMA_DECLARE_COHERENT
struct dma_coherent_mem *dma_mem; /* internal for coherent mem
override */
+#endif
#ifdef CONFIG_DMA_CMA
struct cma *cma_area; /* contiguous memory area for dma
allocations */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index f6ded992c183..75e60be91e5f 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -130,6 +130,7 @@ struct dma_map_ops {
enum dma_data_direction direction);
int (*dma_supported)(struct device *dev, u64 mask);
u64 (*get_required_mask)(struct device *dev);
+ size_t (*max_mapping_size)(struct device *dev);
};
#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
@@ -153,7 +154,7 @@ static inline int is_device_dma_capable(struct device *dev)
return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
}
-#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
+#ifdef CONFIG_DMA_DECLARE_COHERENT
/*
* These three functions are only for dma allocator.
* Don't use them in device drivers.
@@ -192,7 +193,7 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
{
return 0;
}
-#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
+#endif /* CONFIG_DMA_DECLARE_COHERENT */
static inline bool dma_is_direct(const struct dma_map_ops *ops)
{
@@ -208,6 +209,8 @@ dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
unsigned long attrs);
int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
enum dma_data_direction dir, unsigned long attrs);
+dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
defined(CONFIG_SWIOTLB)
@@ -257,6 +260,8 @@ static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
}
#endif
+size_t dma_direct_max_mapping_size(struct device *dev);
+
#ifdef CONFIG_HAS_DMA
#include <asm/dma-mapping.h>
@@ -346,19 +351,20 @@ static inline dma_addr_t dma_map_resource(struct device *dev,
unsigned long attrs)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
- dma_addr_t addr;
+ dma_addr_t addr = DMA_MAPPING_ERROR;
BUG_ON(!valid_dma_direction(dir));
/* Don't allow RAM to be mapped */
- BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
+ if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr))))
+ return DMA_MAPPING_ERROR;
- addr = phys_addr;
- if (ops && ops->map_resource)
+ if (dma_is_direct(ops))
+ addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
+ else if (ops->map_resource)
addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
debug_dma_map_resource(dev, phys_addr, size, dir, addr);
-
return addr;
}
@@ -369,7 +375,7 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
- if (ops && ops->unmap_resource)
+ if (!dma_is_direct(ops) && ops->unmap_resource)
ops->unmap_resource(dev, addr, size, dir, attrs);
debug_dma_unmap_resource(dev, addr, size, dir);
}
@@ -460,6 +466,7 @@ int dma_supported(struct device *dev, u64 mask);
int dma_set_mask(struct device *dev, u64 mask);
int dma_set_coherent_mask(struct device *dev, u64 mask);
u64 dma_get_required_mask(struct device *dev);
+size_t dma_max_mapping_size(struct device *dev);
#else /* CONFIG_HAS_DMA */
static inline dma_addr_t dma_map_page_attrs(struct device *dev,
struct page *page, size_t offset, size_t size,
@@ -561,6 +568,10 @@ static inline u64 dma_get_required_mask(struct device *dev)
{
return 0;
}
+static inline size_t dma_max_mapping_size(struct device *dev)
+{
+ return 0;
+}
#endif /* CONFIG_HAS_DMA */
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
@@ -668,15 +679,23 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
return dma_set_mask_and_coherent(dev, mask);
}
-#ifndef arch_setup_dma_ops
+#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
+void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ const struct iommu_ops *iommu, bool coherent);
+#else
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
- u64 size, const struct iommu_ops *iommu,
- bool coherent) { }
-#endif
+ u64 size, const struct iommu_ops *iommu, bool coherent)
+{
+}
+#endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
-#ifndef arch_teardown_dma_ops
-static inline void arch_teardown_dma_ops(struct device *dev) { }
-#endif
+#ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS
+void arch_teardown_dma_ops(struct device *dev);
+#else
+static inline void arch_teardown_dma_ops(struct device *dev)
+{
+}
+#endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */
static inline unsigned int dma_get_max_seg_size(struct device *dev)
{
@@ -725,19 +744,14 @@ static inline int dma_get_cache_alignment(void)
return 1;
}
-/* flags for the coherent memory api */
-#define DMA_MEMORY_EXCLUSIVE 0x01
-
-#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
+#ifdef CONFIG_DMA_DECLARE_COHERENT
int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
- dma_addr_t device_addr, size_t size, int flags);
+ dma_addr_t device_addr, size_t size);
void dma_release_declared_memory(struct device *dev);
-void *dma_mark_declared_memory_occupied(struct device *dev,
- dma_addr_t device_addr, size_t size);
#else
static inline int
dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
- dma_addr_t device_addr, size_t size, int flags)
+ dma_addr_t device_addr, size_t size)
{
return -ENOSYS;
}
@@ -746,14 +760,7 @@ static inline void
dma_release_declared_memory(struct device *dev)
{
}
-
-static inline void *
-dma_mark_declared_memory_occupied(struct device *dev,
- dma_addr_t device_addr, size_t size)
-{
- return ERR_PTR(-EBUSY);
-}
-#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
+#endif /* CONFIG_DMA_DECLARE_COHERENT */
static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
diff --git a/include/linux/dma/dw.h b/include/linux/dma/dw.h
index e166cac8e870..9752f3745f76 100644
--- a/include/linux/dma/dw.h
+++ b/include/linux/dma/dw.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Driver for the Synopsys DesignWare DMA Controller
*
* Copyright (C) 2007 Atmel Corporation
* Copyright (C) 2010-2011 ST Microelectronics
* Copyright (C) 2014 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _DMA_DW_H
#define _DMA_DW_H
@@ -45,9 +42,13 @@ struct dw_dma_chip {
#if IS_ENABLED(CONFIG_DW_DMAC_CORE)
int dw_dma_probe(struct dw_dma_chip *chip);
int dw_dma_remove(struct dw_dma_chip *chip);
+int idma32_dma_probe(struct dw_dma_chip *chip);
+int idma32_dma_remove(struct dw_dma_chip *chip);
#else
static inline int dw_dma_probe(struct dw_dma_chip *chip) { return -ENODEV; }
static inline int dw_dma_remove(struct dw_dma_chip *chip) { return 0; }
+static inline int idma32_dma_probe(struct dw_dma_chip *chip) { return -ENODEV; }
+static inline int idma32_dma_remove(struct dw_dma_chip *chip) { return 0; }
#endif /* CONFIG_DW_DMAC_CORE */
#endif /* _DMA_DW_H */
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index c46fdb36700b..8de8c4f15163 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -102,9 +102,7 @@ const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list);
extern const char * dmi_get_system_info(int field);
extern const struct dmi_device * dmi_find_device(int type, const char *name,
const struct dmi_device *from);
-extern void dmi_scan_machine(void);
-extern void dmi_memdev_walk(void);
-extern void dmi_set_dump_stack_arch_desc(void);
+extern void dmi_setup(void);
extern bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp);
extern int dmi_get_bios_year(void);
extern int dmi_name_in_vendors(const char *str);
@@ -122,9 +120,7 @@ static inline int dmi_check_system(const struct dmi_system_id *list) { return 0;
static inline const char * dmi_get_system_info(int field) { return NULL; }
static inline const struct dmi_device * dmi_find_device(int type, const char *name,
const struct dmi_device *from) { return NULL; }
-static inline void dmi_scan_machine(void) { return; }
-static inline void dmi_memdev_walk(void) { }
-static inline void dmi_set_dump_stack_arch_desc(void) { }
+static inline void dmi_setup(void) { }
static inline bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp)
{
if (yearp)
diff --git a/include/linux/efi.h b/include/linux/efi.h
index ae96ea145ae3..6ebc2098cfe1 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1611,8 +1611,14 @@ efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
struct screen_info *si, efi_guid_t *proto,
unsigned long size);
-bool efi_runtime_disabled(void);
+#ifdef CONFIG_EFI
+extern bool efi_runtime_disabled(void);
+#else
+static inline bool efi_runtime_disabled(void) { return true; }
+#endif
+
extern void efi_call_virt_check_flags(unsigned long flags, const char *call);
+extern unsigned long efi_call_virt_save_flags(void);
enum efi_secureboot_mode {
efi_secureboot_mode_unset,
@@ -1658,7 +1664,7 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table);
\
arch_efi_call_virt_setup(); \
\
- local_save_flags(__flags); \
+ __flags = efi_call_virt_save_flags(); \
__s = arch_efi_call_virt(p, f, args); \
efi_call_virt_check_flags(__flags, __stringify(f)); \
\
@@ -1673,7 +1679,7 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table);
\
arch_efi_call_virt_setup(); \
\
- local_save_flags(__flags); \
+ __flags = efi_call_virt_save_flags(); \
arch_efi_call_virt(p, f, args); \
efi_call_virt_check_flags(__flags, __stringify(f)); \
\
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 2e9e2763bf47..6e8bc53740f0 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -31,6 +31,7 @@ struct elevator_mq_ops {
void (*exit_sched)(struct elevator_queue *);
int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int);
void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
+ void (*depth_updated)(struct blk_mq_hw_ctx *);
bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *);
diff --git a/include/linux/errno.h b/include/linux/errno.h
index 3cba627577d6..d73f597a2484 100644
--- a/include/linux/errno.h
+++ b/include/linux/errno.h
@@ -18,6 +18,7 @@
#define ERESTART_RESTARTBLOCK 516 /* restart by calling sys_restart_syscall */
#define EPROBE_DEFER 517 /* Driver requests probe retry */
#define EOPENSTALE 518 /* open found a stale dentry */
+#define ENOPARAM 519 /* Parameter not supported */
/* Defined for the NFSv3 protocol */
#define EBADHANDLE 521 /* Illegal NFS file handle */
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index e2f3b21cd72a..aa8bfd6f738c 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -449,6 +449,18 @@ static inline void eth_addr_dec(u8 *addr)
}
/**
+ * eth_addr_inc() - Increment the given MAC address.
+ * @addr: Pointer to a six-byte array containing Ethernet address to increment.
+ */
+static inline void eth_addr_inc(u8 *addr)
+{
+ u64 u = ether_addr_to_u64(addr);
+
+ u++;
+ u64_to_ether_addr(u, addr);
+}
+
+/**
* is_etherdev_addr - Tell if given Ethernet address belongs to the device.
* @dev: Pointer to a device structure
* @addr: Pointer to a six-byte array containing the Ethernet address
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index d7711048ef93..f5740423b002 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -116,6 +116,7 @@ struct f2fs_super_block {
/*
* For checkpoint
*/
+#define CP_DISABLED_QUICK_FLAG 0x00002000
#define CP_DISABLED_FLAG 0x00001000
#define CP_QUOTA_NEED_FSCK_FLAG 0x00000800
#define CP_LARGE_NAT_BITMAP_FLAG 0x00000400
@@ -186,7 +187,7 @@ struct f2fs_orphan_block {
struct f2fs_extent {
__le32 fofs; /* start file offset of the extent */
__le32 blk; /* start block address of the extent */
- __le32 len; /* lengh of the extent */
+ __le32 len; /* length of the extent */
} __packed;
#define F2FS_NAME_LEN 255
@@ -284,7 +285,7 @@ enum {
struct node_footer {
__le32 nid; /* node id */
- __le32 ino; /* inode nunmber */
+ __le32 ino; /* inode number */
__le32 flag; /* include cold/fsync/dentry marks and offset */
__le64 cp_ver; /* checkpoint version */
__le32 next_blkaddr; /* next node page block address */
@@ -489,12 +490,12 @@ typedef __le32 f2fs_hash_t;
/*
* space utilization of regular dentry and inline dentry (w/o extra reservation)
- * regular dentry inline dentry
- * bitmap 1 * 27 = 27 1 * 23 = 23
- * reserved 1 * 3 = 3 1 * 7 = 7
- * dentry 11 * 214 = 2354 11 * 182 = 2002
- * filename 8 * 214 = 1712 8 * 182 = 1456
- * total 4096 3488
+ * regular dentry inline dentry (def) inline dentry (min)
+ * bitmap 1 * 27 = 27 1 * 23 = 23 1 * 1 = 1
+ * reserved 1 * 3 = 3 1 * 7 = 7 1 * 1 = 1
+ * dentry 11 * 214 = 2354 11 * 182 = 2002 11 * 2 = 22
+ * filename 8 * 214 = 1712 8 * 182 = 1456 8 * 2 = 16
+ * total 4096 3488 40
*
* Note: there are more reserved space in inline dentry than in regular
* dentry, when converting inline dentry we should handle this carefully.
@@ -506,12 +507,13 @@ typedef __le32 f2fs_hash_t;
#define SIZE_OF_RESERVED (PAGE_SIZE - ((SIZE_OF_DIR_ENTRY + \
F2FS_SLOT_LEN) * \
NR_DENTRY_IN_BLOCK + SIZE_OF_DENTRY_BITMAP))
+#define MIN_INLINE_DENTRY_SIZE 40 /* just include '.' and '..' entries */
/* One directory entry slot representing F2FS_SLOT_LEN-sized file name */
struct f2fs_dir_entry {
__le32 hash_code; /* hash code of file name */
__le32 ino; /* inode number */
- __le16 name_len; /* lengh of file name */
+ __le16 name_len; /* length of file name */
__u8 file_type; /* file type */
} __packed;
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 6074aa064b54..7d3abde3f183 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -20,6 +20,7 @@
#include <linux/set_memory.h>
#include <linux/kallsyms.h>
#include <linux/if_vlan.h>
+#include <linux/vmalloc.h>
#include <net/sch_generic.h>
@@ -503,7 +504,6 @@ struct bpf_prog {
u16 pages; /* Number of allocated pages */
u16 jited:1, /* Is our filter JIT'ed? */
jit_requested:1,/* archs need to JIT the prog */
- undo_set_mem:1, /* Passed set_memory_ro() checkpoint */
gpl_compatible:1, /* Is filter GPL compatible? */
cb_access:1, /* Is control block accessed? */
dst_needed:1, /* Do we need dst entry? */
@@ -733,24 +733,15 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
{
- fp->undo_set_mem = 1;
+ set_vm_flush_reset_perms(fp);
set_memory_ro((unsigned long)fp, fp->pages);
}
-static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
-{
- if (fp->undo_set_mem)
- set_memory_rw((unsigned long)fp, fp->pages);
-}
-
static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
{
+ set_vm_flush_reset_perms(hdr);
set_memory_ro((unsigned long)hdr, hdr->pages);
-}
-
-static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
-{
- set_memory_rw((unsigned long)hdr, hdr->pages);
+ set_memory_x((unsigned long)hdr, hdr->pages);
}
static inline struct bpf_binary_header *
@@ -788,7 +779,6 @@ void __bpf_prog_free(struct bpf_prog *fp);
static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
{
- bpf_prog_unlock_ro(fp);
__bpf_prog_free(fp);
}
diff --git a/include/linux/firmware/intel/stratix10-smc.h b/include/linux/firmware/intel/stratix10-smc.h
index 5be5dab50b13..01684d935580 100644
--- a/include/linux/firmware/intel/stratix10-smc.h
+++ b/include/linux/firmware/intel/stratix10-smc.h
@@ -309,4 +309,23 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
#define INTEL_SIP_SMC_FUNCID_RSU_UPDATE 12
#define INTEL_SIP_SMC_RSU_UPDATE \
INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_UPDATE)
+
+/*
+ * Request INTEL_SIP_SMC_ECC_DBE
+ *
+ * Sync call used by service driver at EL1 to alert EL3 that a Double
+ * Bit ECC error has occurred.
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_ECC_DBE
+ * a1 SysManager Double Bit Error value
+ * a2-7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ */
+#define INTEL_SIP_SMC_FUNCID_ECC_DBE 13
+#define INTEL_SIP_SMC_ECC_DBE \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_ECC_DBE)
+
#endif
diff --git a/include/linux/flex_array.h b/include/linux/flex_array.h
deleted file mode 100644
index b94fa61b51fb..000000000000
--- a/include/linux/flex_array.h
+++ /dev/null
@@ -1,149 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _FLEX_ARRAY_H
-#define _FLEX_ARRAY_H
-
-#include <linux/types.h>
-#include <linux/reciprocal_div.h>
-#include <asm/page.h>
-
-#define FLEX_ARRAY_PART_SIZE PAGE_SIZE
-#define FLEX_ARRAY_BASE_SIZE PAGE_SIZE
-
-struct flex_array_part;
-
-/*
- * This is meant to replace cases where an array-like
- * structure has gotten too big to fit into kmalloc()
- * and the developer is getting tempted to use
- * vmalloc().
- */
-
-struct flex_array {
- union {
- struct {
- int element_size;
- int total_nr_elements;
- int elems_per_part;
- struct reciprocal_value reciprocal_elems;
- struct flex_array_part *parts[];
- };
- /*
- * This little trick makes sure that
- * sizeof(flex_array) == PAGE_SIZE
- */
- char padding[FLEX_ARRAY_BASE_SIZE];
- };
-};
-
-/* Number of bytes left in base struct flex_array, excluding metadata */
-#define FLEX_ARRAY_BASE_BYTES_LEFT \
- (FLEX_ARRAY_BASE_SIZE - offsetof(struct flex_array, parts))
-
-/* Number of pointers in base to struct flex_array_part pages */
-#define FLEX_ARRAY_NR_BASE_PTRS \
- (FLEX_ARRAY_BASE_BYTES_LEFT / sizeof(struct flex_array_part *))
-
-/* Number of elements of size that fit in struct flex_array_part */
-#define FLEX_ARRAY_ELEMENTS_PER_PART(size) \
- (FLEX_ARRAY_PART_SIZE / size)
-
-/*
- * Defines a statically allocated flex array and ensures its parameters are
- * valid.
- */
-#define DEFINE_FLEX_ARRAY(__arrayname, __element_size, __total) \
- struct flex_array __arrayname = { { { \
- .element_size = (__element_size), \
- .total_nr_elements = (__total), \
- } } }; \
- static inline void __arrayname##_invalid_parameter(void) \
- { \
- BUILD_BUG_ON((__total) > FLEX_ARRAY_NR_BASE_PTRS * \
- FLEX_ARRAY_ELEMENTS_PER_PART(__element_size)); \
- }
-
-/**
- * flex_array_alloc() - Creates a flexible array.
- * @element_size: individual object size.
- * @total: maximum number of objects which can be stored.
- * @flags: GFP flags
- *
- * Return: Returns an object of structure flex_array.
- */
-struct flex_array *flex_array_alloc(int element_size, unsigned int total,
- gfp_t flags);
-
-/**
- * flex_array_prealloc() - Ensures that memory for the elements indexed in the
- * range defined by start and nr_elements has been allocated.
- * @fa: array to allocate memory to.
- * @start: start address
- * @nr_elements: number of elements to be allocated.
- * @flags: GFP flags
- *
- */
-int flex_array_prealloc(struct flex_array *fa, unsigned int start,
- unsigned int nr_elements, gfp_t flags);
-
-/**
- * flex_array_free() - Removes all elements of a flexible array.
- * @fa: array to be freed.
- */
-void flex_array_free(struct flex_array *fa);
-
-/**
- * flex_array_free_parts() - Removes all elements of a flexible array, but
- * leaves the array itself in place.
- * @fa: array to be emptied.
- */
-void flex_array_free_parts(struct flex_array *fa);
-
-/**
- * flex_array_put() - Stores data into a flexible array.
- * @fa: array where element is to be stored.
- * @element_nr: position to copy, must be less than the maximum specified when
- * the array was created.
- * @src: data source to be copied into the array.
- * @flags: GFP flags
- *
- * Return: Returns zero on success, a negative error code otherwise.
- */
-int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
- gfp_t flags);
-
-/**
- * flex_array_clear() - Clears an individual element in the array, sets the
- * given element to FLEX_ARRAY_FREE.
- * @element_nr: element position to clear.
- * @fa: array to which element to be cleared belongs.
- *
- * Return: Returns zero on success, -EINVAL otherwise.
- */
-int flex_array_clear(struct flex_array *fa, unsigned int element_nr);
-
-/**
- * flex_array_get() - Retrieves data into a flexible array.
- *
- * @element_nr: Element position to retrieve data from.
- * @fa: array from which data is to be retrieved.
- *
- * Return: Returns a pointer to the data element, or NULL if that
- * particular element has never been allocated.
- */
-void *flex_array_get(struct flex_array *fa, unsigned int element_nr);
-
-/**
- * flex_array_shrink() - Reduces the allocated size of an array.
- * @fa: array to shrink.
- *
- * Return: Returns number of pages of memory actually freed.
- *
- */
-int flex_array_shrink(struct flex_array *fa);
-
-#define flex_array_put_ptr(fa, nr, src, gfp) \
- flex_array_put(fa, nr, (void *)&(src), gfp)
-
-void *flex_array_get_ptr(struct flex_array *fa, unsigned int element_nr);
-
-#endif /* _FLEX_ARRAY_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 0a257d89208e..dd28e7679089 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -64,6 +64,8 @@ struct workqueue_struct;
struct iov_iter;
struct fscrypt_info;
struct fscrypt_operations;
+struct fs_context;
+struct fs_parameter_description;
extern void __init inode_init(void);
extern void __init inode_init_early(void);
@@ -156,6 +158,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define FMODE_OPENED ((__force fmode_t)0x80000)
#define FMODE_CREATED ((__force fmode_t)0x100000)
+/* File is stream-like */
+#define FMODE_STREAM ((__force fmode_t)0x200000)
+
/* File was opened by fanotify and shouldn't generate fanotify events */
#define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
@@ -708,7 +713,7 @@ struct inode {
struct fsnotify_mark_connector __rcu *i_fsnotify_marks;
#endif
-#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+#ifdef CONFIG_FS_ENCRYPTION
struct fscrypt_info *i_crypt_info;
#endif
@@ -1349,6 +1354,7 @@ extern int send_sigurg(struct fown_struct *fown);
/* These sb flags are internal to the kernel */
#define SB_SUBMOUNT (1<<26)
+#define SB_FORCE (1<<27)
#define SB_NOSEC (1<<28)
#define SB_BORN (1<<29)
#define SB_ACTIVE (1<<30)
@@ -1415,7 +1421,7 @@ struct super_block {
void *s_security;
#endif
const struct xattr_handler **s_xattr;
-#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+#ifdef CONFIG_FS_ENCRYPTION
const struct fscrypt_operations *s_cop;
#endif
struct hlist_bl_head s_roots; /* alternate root dentries for NFS */
@@ -1459,7 +1465,7 @@ struct super_block {
* Filesystem subtype. If non-empty the filesystem type field
* in /proc/mounts will be "type.subtype"
*/
- char *s_subtype;
+ const char *s_subtype;
const struct dentry_operations *s_d_op; /* default d_op for dentries */
@@ -2170,6 +2176,8 @@ struct file_system_type {
#define FS_HAS_SUBTYPE 4
#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */
#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
+ int (*init_fs_context)(struct fs_context *);
+ const struct fs_parameter_description *parameters;
struct dentry *(*mount) (struct file_system_type *, int,
const char *, void *);
void (*kill_sb) (struct super_block *);
@@ -2225,8 +2233,12 @@ void kill_litter_super(struct super_block *sb);
void deactivate_super(struct super_block *sb);
void deactivate_locked_super(struct super_block *sb);
int set_anon_super(struct super_block *s, void *data);
+int set_anon_super_fc(struct super_block *s, struct fs_context *fc);
int get_anon_bdev(dev_t *);
void free_anon_bdev(dev_t);
+struct super_block *sget_fc(struct fs_context *fc,
+ int (*test)(struct super_block *, struct fs_context *),
+ int (*set)(struct super_block *, struct fs_context *));
struct super_block *sget_userns(struct file_system_type *type,
int (*test)(struct super_block *,void *),
int (*set)(struct super_block *,void *),
@@ -2269,8 +2281,7 @@ mount_pseudo(struct file_system_type *fs_type, char *name,
extern int register_filesystem(struct file_system_type *);
extern int unregister_filesystem(struct file_system_type *);
-extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data);
-#define kern_mount(type) kern_mount_data(type, NULL)
+extern struct vfsmount *kern_mount(struct file_system_type *);
extern void kern_unmount(struct vfsmount *mnt);
extern int may_umount_tree(struct vfsmount *);
extern int may_umount(struct vfsmount *);
@@ -3066,6 +3077,7 @@ extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t);
extern loff_t no_seek_end_llseek(struct file *, loff_t, int);
extern int generic_file_open(struct inode * inode, struct file * filp);
extern int nonseekable_open(struct inode * inode, struct file * filp);
+extern int stream_open(struct inode * inode, struct file * filp);
#ifdef CONFIG_BLOCK
typedef void (dio_submit_t)(struct bio *bio, struct inode *inode,
diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
new file mode 100644
index 000000000000..eaca452088fa
--- /dev/null
+++ b/include/linux/fs_context.h
@@ -0,0 +1,188 @@
+/* Filesystem superblock creation and reconfiguration context.
+ *
+ * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_FS_CONTEXT_H
+#define _LINUX_FS_CONTEXT_H
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/security.h>
+
+struct cred;
+struct dentry;
+struct file_operations;
+struct file_system_type;
+struct mnt_namespace;
+struct net;
+struct pid_namespace;
+struct super_block;
+struct user_namespace;
+struct vfsmount;
+struct path;
+
+enum fs_context_purpose {
+ FS_CONTEXT_FOR_MOUNT, /* New superblock for explicit mount */
+ FS_CONTEXT_FOR_SUBMOUNT, /* New superblock for automatic submount */
+ FS_CONTEXT_FOR_RECONFIGURE, /* Superblock reconfiguration (remount) */
+};
+
+/*
+ * Type of parameter value.
+ */
+enum fs_value_type {
+ fs_value_is_undefined,
+ fs_value_is_flag, /* Value not given a value */
+ fs_value_is_string, /* Value is a string */
+ fs_value_is_blob, /* Value is a binary blob */
+ fs_value_is_filename, /* Value is a filename* + dirfd */
+ fs_value_is_filename_empty, /* Value is a filename* + dirfd + AT_EMPTY_PATH */
+ fs_value_is_file, /* Value is a file* */
+};
+
+/*
+ * Configuration parameter.
+ */
+struct fs_parameter {
+ const char *key; /* Parameter name */
+ enum fs_value_type type:8; /* The type of value here */
+ union {
+ char *string;
+ void *blob;
+ struct filename *name;
+ struct file *file;
+ };
+ size_t size;
+ int dirfd;
+};
+
+/*
+ * Filesystem context for holding the parameters used in the creation or
+ * reconfiguration of a superblock.
+ *
+ * Superblock creation fills in ->root whereas reconfiguration begins with this
+ * already set.
+ *
+ * See Documentation/filesystems/mounting.txt
+ */
+struct fs_context {
+ const struct fs_context_operations *ops;
+ struct file_system_type *fs_type;
+ void *fs_private; /* The filesystem's context */
+ struct dentry *root; /* The root and superblock */
+ struct user_namespace *user_ns; /* The user namespace for this mount */
+ struct net *net_ns; /* The network namespace for this mount */
+ const struct cred *cred; /* The mounter's credentials */
+ const char *source; /* The source name (eg. dev path) */
+ const char *subtype; /* The subtype to set on the superblock */
+ void *security; /* Linux S&M options */
+ void *s_fs_info; /* Proposed s_fs_info */
+ unsigned int sb_flags; /* Proposed superblock flags (SB_*) */
+ unsigned int sb_flags_mask; /* Superblock flags that were changed */
+ unsigned int lsm_flags; /* Information flags from the fs to the LSM */
+ enum fs_context_purpose purpose:8;
+ bool need_free:1; /* Need to call ops->free() */
+ bool global:1; /* Goes into &init_user_ns */
+};
+
+struct fs_context_operations {
+ void (*free)(struct fs_context *fc);
+ int (*dup)(struct fs_context *fc, struct fs_context *src_fc);
+ int (*parse_param)(struct fs_context *fc, struct fs_parameter *param);
+ int (*parse_monolithic)(struct fs_context *fc, void *data);
+ int (*get_tree)(struct fs_context *fc);
+ int (*reconfigure)(struct fs_context *fc);
+};
+
+/*
+ * fs_context manipulation functions.
+ */
+extern struct fs_context *fs_context_for_mount(struct file_system_type *fs_type,
+ unsigned int sb_flags);
+extern struct fs_context *fs_context_for_reconfigure(struct dentry *dentry,
+ unsigned int sb_flags,
+ unsigned int sb_flags_mask);
+extern struct fs_context *fs_context_for_submount(struct file_system_type *fs_type,
+ struct dentry *reference);
+
+extern struct fs_context *vfs_dup_fs_context(struct fs_context *fc);
+extern int vfs_parse_fs_param(struct fs_context *fc, struct fs_parameter *param);
+extern int vfs_parse_fs_string(struct fs_context *fc, const char *key,
+ const char *value, size_t v_size);
+extern int generic_parse_monolithic(struct fs_context *fc, void *data);
+extern int vfs_get_tree(struct fs_context *fc);
+extern void put_fs_context(struct fs_context *fc);
+
+/*
+ * sget() wrapper to be called from the ->get_tree() op.
+ */
+enum vfs_get_super_keying {
+ vfs_get_single_super, /* Only one such superblock may exist */
+ vfs_get_keyed_super, /* Superblocks with different s_fs_info keys may exist */
+ vfs_get_independent_super, /* Multiple independent superblocks may exist */
+};
+extern int vfs_get_super(struct fs_context *fc,
+ enum vfs_get_super_keying keying,
+ int (*fill_super)(struct super_block *sb,
+ struct fs_context *fc));
+
+extern const struct file_operations fscontext_fops;
+
+#ifdef CONFIG_PRINTK
+extern __attribute__((format(printf, 2, 3)))
+void logfc(struct fs_context *fc, const char *fmt, ...);
+#else
+static inline __attribute__((format(printf, 2, 3)))
+void logfc(struct fs_context *fc, const char *fmt, ...)
+{
+}
+#endif
+
+/**
+ * infof - Store supplementary informational message
+ * @fc: The context in which to log the informational message
+ * @fmt: The format string
+ *
+ * Store the supplementary informational message for the process if the process
+ * has enabled the facility.
+ */
+#define infof(fc, fmt, ...) ({ logfc(fc, "i "fmt, ## __VA_ARGS__); })
+
+/**
+ * warnf - Store supplementary warning message
+ * @fc: The context in which to log the error message
+ * @fmt: The format string
+ *
+ * Store the supplementary warning message for the process if the process has
+ * enabled the facility.
+ */
+#define warnf(fc, fmt, ...) ({ logfc(fc, "w "fmt, ## __VA_ARGS__); })
+
+/**
+ * errorf - Store supplementary error message
+ * @fc: The context in which to log the error message
+ * @fmt: The format string
+ *
+ * Store the supplementary error message for the process if the process has
+ * enabled the facility.
+ */
+#define errorf(fc, fmt, ...) ({ logfc(fc, "e "fmt, ## __VA_ARGS__); })
+
+/**
+ * invalf - Store supplementary invalid argument error message
+ * @fc: The context in which to log the error message
+ * @fmt: The format string
+ *
+ * Store the supplementary error message for the process if the process has
+ * enabled the facility and return -EINVAL.
+ */
+#define invalf(fc, fmt, ...) ({ errorf(fc, fmt, ## __VA_ARGS__); -EINVAL; })
+
+#endif /* _LINUX_FS_CONTEXT_H */
diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h
new file mode 100644
index 000000000000..d966f96ffe62
--- /dev/null
+++ b/include/linux/fs_parser.h
@@ -0,0 +1,151 @@
+/* Filesystem parameter description and parser
+ *
+ * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_FS_PARSER_H
+#define _LINUX_FS_PARSER_H
+
+#include <linux/fs_context.h>
+
+struct path;
+
+struct constant_table {
+ const char *name;
+ int value;
+};
+
+/*
+ * The type of parameter expected.
+ */
+enum fs_parameter_type {
+ __fs_param_wasnt_defined,
+ fs_param_is_flag,
+ fs_param_is_bool,
+ fs_param_is_u32,
+ fs_param_is_u32_octal,
+ fs_param_is_u32_hex,
+ fs_param_is_s32,
+ fs_param_is_u64,
+ fs_param_is_enum,
+ fs_param_is_string,
+ fs_param_is_blob,
+ fs_param_is_blockdev,
+ fs_param_is_path,
+ fs_param_is_fd,
+ nr__fs_parameter_type,
+};
+
+/*
+ * Specification of the type of value a parameter wants.
+ *
+ * Note that the fsparam_flag(), fsparam_string(), fsparam_u32(), ... macros
+ * should be used to generate elements of this type.
+ */
+struct fs_parameter_spec {
+ const char *name;
+ u8 opt; /* Option number (returned by fs_parse()) */
+ enum fs_parameter_type type:8; /* The desired parameter type */
+ unsigned short flags;
+#define fs_param_v_optional 0x0001 /* The value is optional */
+#define fs_param_neg_with_no 0x0002 /* "noxxx" is negative param */
+#define fs_param_neg_with_empty 0x0004 /* "xxx=" is negative param */
+#define fs_param_deprecated 0x0008 /* The param is deprecated */
+};
+
+struct fs_parameter_enum {
+ u8 opt; /* Option number (as fs_parameter_spec::opt) */
+ char name[14];
+ u8 value;
+};
+
+struct fs_parameter_description {
+ const char name[16]; /* Name for logging purposes */
+ const struct fs_parameter_spec *specs; /* List of param specifications */
+ const struct fs_parameter_enum *enums; /* Enum values */
+};
+
+/*
+ * Result of parse.
+ */
+struct fs_parse_result {
+ bool negated; /* T if param was "noxxx" */
+ bool has_value; /* T if value supplied to param */
+ union {
+ bool boolean; /* For spec_bool */
+ int int_32; /* For spec_s32/spec_enum */
+ unsigned int uint_32; /* For spec_u32{,_octal,_hex}/spec_enum */
+ u64 uint_64; /* For spec_u64 */
+ };
+};
+
+extern int fs_parse(struct fs_context *fc,
+ const struct fs_parameter_description *desc,
+ struct fs_parameter *value,
+ struct fs_parse_result *result);
+extern int fs_lookup_param(struct fs_context *fc,
+ struct fs_parameter *param,
+ bool want_bdev,
+ struct path *_path);
+
+extern int __lookup_constant(const struct constant_table tbl[], size_t tbl_size,
+ const char *name, int not_found);
+#define lookup_constant(t, n, nf) __lookup_constant(t, ARRAY_SIZE(t), (n), (nf))
+
+#ifdef CONFIG_VALIDATE_FS_PARSER
+extern bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size,
+ int low, int high, int special);
+extern bool fs_validate_description(const struct fs_parameter_description *desc);
+#else
+static inline bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size,
+ int low, int high, int special)
+{ return true; }
+static inline bool fs_validate_description(const struct fs_parameter_description *desc)
+{ return true; }
+#endif
+
+/*
+ * Parameter type, name, index and flags element constructors. Use as:
+ *
+ * fsparam_xxxx("foo", Opt_foo)
+ *
+ * If existing helpers are not enough, direct use of __fsparam() would
+ * work, but any such case is probably a sign that new helper is needed.
+ * Helpers will remain stable; low-level implementation may change.
+ */
+#define __fsparam(TYPE, NAME, OPT, FLAGS) \
+ { \
+ .name = NAME, \
+ .opt = OPT, \
+ .type = TYPE, \
+ .flags = FLAGS \
+ }
+
+#define fsparam_flag(NAME, OPT) __fsparam(fs_param_is_flag, NAME, OPT, 0)
+#define fsparam_flag_no(NAME, OPT) \
+ __fsparam(fs_param_is_flag, NAME, OPT, \
+ fs_param_neg_with_no)
+#define fsparam_bool(NAME, OPT) __fsparam(fs_param_is_bool, NAME, OPT, 0)
+#define fsparam_u32(NAME, OPT) __fsparam(fs_param_is_u32, NAME, OPT, 0)
+#define fsparam_u32oct(NAME, OPT) \
+ __fsparam(fs_param_is_u32_octal, NAME, OPT, 0)
+#define fsparam_u32hex(NAME, OPT) \
+ __fsparam(fs_param_is_u32_hex, NAME, OPT, 0)
+#define fsparam_s32(NAME, OPT) __fsparam(fs_param_is_s32, NAME, OPT, 0)
+#define fsparam_u64(NAME, OPT) __fsparam(fs_param_is_u64, NAME, OPT, 0)
+#define fsparam_enum(NAME, OPT) __fsparam(fs_param_is_enum, NAME, OPT, 0)
+#define fsparam_string(NAME, OPT) \
+ __fsparam(fs_param_is_string, NAME, OPT, 0)
+#define fsparam_blob(NAME, OPT) __fsparam(fs_param_is_blob, NAME, OPT, 0)
+#define fsparam_bdev(NAME, OPT) __fsparam(fs_param_is_blockdev, NAME, OPT, 0)
+#define fsparam_path(NAME, OPT) __fsparam(fs_param_is_path, NAME, OPT, 0)
+#define fsparam_fd(NAME, OPT) __fsparam(fs_param_is_fd, NAME, OPT, 0)
+
+
+#endif /* _LINUX_FS_PARSER_H */
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index 952ab97af325..e5194fc3983e 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -2,9 +2,8 @@
/*
* fscrypt.h: declarations for per-file encryption
*
- * Filesystems that implement per-file encryption include this header
- * file with the __FS_HAS_ENCRYPTION set according to whether that filesystem
- * is being built with encryption support or not.
+ * Filesystems that implement per-file encryption must include this header
+ * file.
*
* Copyright (C) 2015, Google, Inc.
*
@@ -15,6 +14,8 @@
#define _LINUX_FSCRYPT_H
#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
#define FS_CRYPTO_BLOCK_SIZE 16
@@ -42,11 +43,410 @@ struct fscrypt_name {
/* Maximum value for the third parameter of fscrypt_operations.set_context(). */
#define FSCRYPT_SET_CONTEXT_MAX_SIZE 28
-#if __FS_HAS_ENCRYPTION
-#include <linux/fscrypt_supp.h>
-#else
-#include <linux/fscrypt_notsupp.h>
-#endif
+#ifdef CONFIG_FS_ENCRYPTION
+/*
+ * fscrypt superblock flags
+ */
+#define FS_CFLG_OWN_PAGES (1U << 1)
+
+/*
+ * crypto operations for filesystems
+ */
+struct fscrypt_operations {
+ unsigned int flags;
+ const char *key_prefix;
+ int (*get_context)(struct inode *, void *, size_t);
+ int (*set_context)(struct inode *, const void *, size_t, void *);
+ bool (*dummy_context)(struct inode *);
+ bool (*empty_dir)(struct inode *);
+ unsigned int max_namelen;
+};
+
+struct fscrypt_ctx {
+ union {
+ struct {
+ struct page *bounce_page; /* Ciphertext page */
+ struct page *control_page; /* Original page */
+ } w;
+ struct {
+ struct bio *bio;
+ struct work_struct work;
+ } r;
+ struct list_head free_list; /* Free list */
+ };
+ u8 flags; /* Flags */
+};
+
+static inline bool fscrypt_has_encryption_key(const struct inode *inode)
+{
+ return (inode->i_crypt_info != NULL);
+}
+
+static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
+{
+ return inode->i_sb->s_cop->dummy_context &&
+ inode->i_sb->s_cop->dummy_context(inode);
+}
+
+/* crypto.c */
+extern void fscrypt_enqueue_decrypt_work(struct work_struct *);
+extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
+extern void fscrypt_release_ctx(struct fscrypt_ctx *);
+extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
+ unsigned int, unsigned int,
+ u64, gfp_t);
+extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int,
+ unsigned int, u64);
+
+static inline struct page *fscrypt_control_page(struct page *page)
+{
+ return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
+}
+
+extern void fscrypt_restore_control_page(struct page *);
+
+/* policy.c */
+extern int fscrypt_ioctl_set_policy(struct file *, const void __user *);
+extern int fscrypt_ioctl_get_policy(struct file *, void __user *);
+extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
+extern int fscrypt_inherit_context(struct inode *, struct inode *,
+ void *, bool);
+/* keyinfo.c */
+extern int fscrypt_get_encryption_info(struct inode *);
+extern void fscrypt_put_encryption_info(struct inode *);
+
+/* fname.c */
+extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
+ int lookup, struct fscrypt_name *);
+
+static inline void fscrypt_free_filename(struct fscrypt_name *fname)
+{
+ kfree(fname->crypto_buf.name);
+}
+
+extern int fscrypt_fname_alloc_buffer(const struct inode *, u32,
+ struct fscrypt_str *);
+extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
+extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
+ const struct fscrypt_str *, struct fscrypt_str *);
+
+#define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE 32
+
+/* Extracts the second-to-last ciphertext block; see explanation below */
+#define FSCRYPT_FNAME_DIGEST(name, len) \
+ ((name) + round_down((len) - FS_CRYPTO_BLOCK_SIZE - 1, \
+ FS_CRYPTO_BLOCK_SIZE))
+
+#define FSCRYPT_FNAME_DIGEST_SIZE FS_CRYPTO_BLOCK_SIZE
+
+/**
+ * fscrypt_digested_name - alternate identifier for an on-disk filename
+ *
+ * When userspace lists an encrypted directory without access to the key,
+ * filenames whose ciphertext is longer than FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE
+ * bytes are shown in this abbreviated form (base64-encoded) rather than as the
+ * full ciphertext (base64-encoded). This is necessary to allow supporting
+ * filenames up to NAME_MAX bytes, since base64 encoding expands the length.
+ *
+ * To make it possible for filesystems to still find the correct directory entry
+ * despite not knowing the full on-disk name, we encode any filesystem-specific
+ * 'hash' and/or 'minor_hash' which the filesystem may need for its lookups,
+ * followed by the second-to-last ciphertext block of the filename. Due to the
+ * use of the CBC-CTS encryption mode, the second-to-last ciphertext block
+ * depends on the full plaintext. (Note that ciphertext stealing causes the
+ * last two blocks to appear "flipped".) This makes accidental collisions very
+ * unlikely: just a 1 in 2^128 chance for two filenames to collide even if they
+ * share the same filesystem-specific hashes.
+ *
+ * However, this scheme isn't immune to intentional collisions, which can be
+ * created by anyone able to create arbitrary plaintext filenames and view them
+ * without the key. Making the "digest" be a real cryptographic hash like
+ * SHA-256 over the full ciphertext would prevent this, although it would be
+ * less efficient and harder to implement, especially since the filesystem would
+ * need to calculate it for each directory entry examined during a search.
+ */
+struct fscrypt_digested_name {
+ u32 hash;
+ u32 minor_hash;
+ u8 digest[FSCRYPT_FNAME_DIGEST_SIZE];
+};
+
+/**
+ * fscrypt_match_name() - test whether the given name matches a directory entry
+ * @fname: the name being searched for
+ * @de_name: the name from the directory entry
+ * @de_name_len: the length of @de_name in bytes
+ *
+ * Normally @fname->disk_name will be set, and in that case we simply compare
+ * that to the name stored in the directory entry. The only exception is that
+ * if we don't have the key for an encrypted directory and a filename in it is
+ * very long, then we won't have the full disk_name and we'll instead need to
+ * match against the fscrypt_digested_name.
+ *
+ * Return: %true if the name matches, otherwise %false.
+ */
+static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
+ const u8 *de_name, u32 de_name_len)
+{
+ if (unlikely(!fname->disk_name.name)) {
+ const struct fscrypt_digested_name *n =
+ (const void *)fname->crypto_buf.name;
+ if (WARN_ON_ONCE(fname->usr_fname->name[0] != '_'))
+ return false;
+ if (de_name_len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE)
+ return false;
+ return !memcmp(FSCRYPT_FNAME_DIGEST(de_name, de_name_len),
+ n->digest, FSCRYPT_FNAME_DIGEST_SIZE);
+ }
+
+ if (de_name_len != fname->disk_name.len)
+ return false;
+ return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
+}
+
+/* bio.c */
+extern void fscrypt_decrypt_bio(struct bio *);
+extern void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
+ struct bio *bio);
+extern void fscrypt_pullback_bio_page(struct page **, bool);
+extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
+ unsigned int);
+
+/* hooks.c */
+extern int fscrypt_file_open(struct inode *inode, struct file *filp);
+extern int __fscrypt_prepare_link(struct inode *inode, struct inode *dir);
+extern int __fscrypt_prepare_rename(struct inode *old_dir,
+ struct dentry *old_dentry,
+ struct inode *new_dir,
+ struct dentry *new_dentry,
+ unsigned int flags);
+extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry);
+extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len,
+ unsigned int max_len,
+ struct fscrypt_str *disk_link);
+extern int __fscrypt_encrypt_symlink(struct inode *inode, const char *target,
+ unsigned int len,
+ struct fscrypt_str *disk_link);
+extern const char *fscrypt_get_symlink(struct inode *inode, const void *caddr,
+ unsigned int max_size,
+ struct delayed_call *done);
+#else /* !CONFIG_FS_ENCRYPTION */
+
+static inline bool fscrypt_has_encryption_key(const struct inode *inode)
+{
+ return false;
+}
+
+static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
+{
+ return false;
+}
+
+/* crypto.c */
+static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work)
+{
+}
+
+static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode,
+ gfp_t gfp_flags)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
+{
+ return;
+}
+
+static inline struct page *fscrypt_encrypt_page(const struct inode *inode,
+ struct page *page,
+ unsigned int len,
+ unsigned int offs,
+ u64 lblk_num, gfp_t gfp_flags)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int fscrypt_decrypt_page(const struct inode *inode,
+ struct page *page,
+ unsigned int len, unsigned int offs,
+ u64 lblk_num)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline struct page *fscrypt_control_page(struct page *page)
+{
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-EINVAL);
+}
+
+static inline void fscrypt_restore_control_page(struct page *page)
+{
+ return;
+}
+
+/* policy.c */
+static inline int fscrypt_ioctl_set_policy(struct file *filp,
+ const void __user *arg)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_has_permitted_context(struct inode *parent,
+ struct inode *child)
+{
+ return 0;
+}
+
+static inline int fscrypt_inherit_context(struct inode *parent,
+ struct inode *child,
+ void *fs_data, bool preload)
+{
+ return -EOPNOTSUPP;
+}
+
+/* keyinfo.c */
+static inline int fscrypt_get_encryption_info(struct inode *inode)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void fscrypt_put_encryption_info(struct inode *inode)
+{
+ return;
+}
+
+ /* fname.c */
+static inline int fscrypt_setup_filename(struct inode *dir,
+ const struct qstr *iname,
+ int lookup, struct fscrypt_name *fname)
+{
+ if (IS_ENCRYPTED(dir))
+ return -EOPNOTSUPP;
+
+ memset(fname, 0, sizeof(struct fscrypt_name));
+ fname->usr_fname = iname;
+ fname->disk_name.name = (unsigned char *)iname->name;
+ fname->disk_name.len = iname->len;
+ return 0;
+}
+
+static inline void fscrypt_free_filename(struct fscrypt_name *fname)
+{
+ return;
+}
+
+static inline int fscrypt_fname_alloc_buffer(const struct inode *inode,
+ u32 max_encrypted_len,
+ struct fscrypt_str *crypto_str)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str)
+{
+ return;
+}
+
+static inline int fscrypt_fname_disk_to_usr(struct inode *inode,
+ u32 hash, u32 minor_hash,
+ const struct fscrypt_str *iname,
+ struct fscrypt_str *oname)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
+ const u8 *de_name, u32 de_name_len)
+{
+ /* Encryption support disabled; use standard comparison */
+ if (de_name_len != fname->disk_name.len)
+ return false;
+ return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
+}
+
+/* bio.c */
+static inline void fscrypt_decrypt_bio(struct bio *bio)
+{
+}
+
+static inline void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
+ struct bio *bio)
+{
+}
+
+static inline void fscrypt_pullback_bio_page(struct page **page, bool restore)
+{
+ return;
+}
+
+static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
+ sector_t pblk, unsigned int len)
+{
+ return -EOPNOTSUPP;
+}
+
+/* hooks.c */
+
+static inline int fscrypt_file_open(struct inode *inode, struct file *filp)
+{
+ if (IS_ENCRYPTED(inode))
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+static inline int __fscrypt_prepare_link(struct inode *inode,
+ struct inode *dir)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __fscrypt_prepare_rename(struct inode *old_dir,
+ struct dentry *old_dentry,
+ struct inode *new_dir,
+ struct dentry *new_dentry,
+ unsigned int flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __fscrypt_prepare_lookup(struct inode *dir,
+ struct dentry *dentry)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __fscrypt_prepare_symlink(struct inode *dir,
+ unsigned int len,
+ unsigned int max_len,
+ struct fscrypt_str *disk_link)
+{
+ return -EOPNOTSUPP;
+}
+
+
+static inline int __fscrypt_encrypt_symlink(struct inode *inode,
+ const char *target,
+ unsigned int len,
+ struct fscrypt_str *disk_link)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline const char *fscrypt_get_symlink(struct inode *inode,
+ const void *caddr,
+ unsigned int max_size,
+ struct delayed_call *done)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+#endif /* !CONFIG_FS_ENCRYPTION */
/**
* fscrypt_require_key - require an inode's encryption key
@@ -89,7 +489,7 @@ static inline int fscrypt_require_key(struct inode *inode)
* in an encrypted directory tree use the same encryption policy.
*
* Return: 0 on success, -ENOKEY if the directory's encryption key is missing,
- * -EPERM if the link would result in an inconsistent encryption policy, or
+ * -EXDEV if the link would result in an inconsistent encryption policy, or
* another -errno code.
*/
static inline int fscrypt_prepare_link(struct dentry *old_dentry,
@@ -119,7 +519,7 @@ static inline int fscrypt_prepare_link(struct dentry *old_dentry,
* We also verify that the rename will not violate the constraint that all files
* in an encrypted directory tree use the same encryption policy.
*
- * Return: 0 on success, -ENOKEY if an encryption key is missing, -EPERM if the
+ * Return: 0 on success, -ENOKEY if an encryption key is missing, -EXDEV if the
* rename would cause inconsistent encryption policies, or another -errno code.
*/
static inline int fscrypt_prepare_rename(struct inode *old_dir,
diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h
deleted file mode 100644
index ee8b43e4c15a..000000000000
--- a/include/linux/fscrypt_notsupp.h
+++ /dev/null
@@ -1,231 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * fscrypt_notsupp.h
- *
- * This stubs out the fscrypt functions for filesystems configured without
- * encryption support.
- *
- * Do not include this file directly. Use fscrypt.h instead!
- */
-#ifndef _LINUX_FSCRYPT_H
-#error "Incorrect include of linux/fscrypt_notsupp.h!"
-#endif
-
-#ifndef _LINUX_FSCRYPT_NOTSUPP_H
-#define _LINUX_FSCRYPT_NOTSUPP_H
-
-static inline bool fscrypt_has_encryption_key(const struct inode *inode)
-{
- return false;
-}
-
-static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
-{
- return false;
-}
-
-/* crypto.c */
-static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work)
-{
-}
-
-static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode,
- gfp_t gfp_flags)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
-static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
-{
- return;
-}
-
-static inline struct page *fscrypt_encrypt_page(const struct inode *inode,
- struct page *page,
- unsigned int len,
- unsigned int offs,
- u64 lblk_num, gfp_t gfp_flags)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
-static inline int fscrypt_decrypt_page(const struct inode *inode,
- struct page *page,
- unsigned int len, unsigned int offs,
- u64 lblk_num)
-{
- return -EOPNOTSUPP;
-}
-
-static inline struct page *fscrypt_control_page(struct page *page)
-{
- WARN_ON_ONCE(1);
- return ERR_PTR(-EINVAL);
-}
-
-static inline void fscrypt_restore_control_page(struct page *page)
-{
- return;
-}
-
-/* policy.c */
-static inline int fscrypt_ioctl_set_policy(struct file *filp,
- const void __user *arg)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int fscrypt_has_permitted_context(struct inode *parent,
- struct inode *child)
-{
- return 0;
-}
-
-static inline int fscrypt_inherit_context(struct inode *parent,
- struct inode *child,
- void *fs_data, bool preload)
-{
- return -EOPNOTSUPP;
-}
-
-/* keyinfo.c */
-static inline int fscrypt_get_encryption_info(struct inode *inode)
-{
- return -EOPNOTSUPP;
-}
-
-static inline void fscrypt_put_encryption_info(struct inode *inode)
-{
- return;
-}
-
- /* fname.c */
-static inline int fscrypt_setup_filename(struct inode *dir,
- const struct qstr *iname,
- int lookup, struct fscrypt_name *fname)
-{
- if (IS_ENCRYPTED(dir))
- return -EOPNOTSUPP;
-
- memset(fname, 0, sizeof(struct fscrypt_name));
- fname->usr_fname = iname;
- fname->disk_name.name = (unsigned char *)iname->name;
- fname->disk_name.len = iname->len;
- return 0;
-}
-
-static inline void fscrypt_free_filename(struct fscrypt_name *fname)
-{
- return;
-}
-
-static inline int fscrypt_fname_alloc_buffer(const struct inode *inode,
- u32 max_encrypted_len,
- struct fscrypt_str *crypto_str)
-{
- return -EOPNOTSUPP;
-}
-
-static inline void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str)
-{
- return;
-}
-
-static inline int fscrypt_fname_disk_to_usr(struct inode *inode,
- u32 hash, u32 minor_hash,
- const struct fscrypt_str *iname,
- struct fscrypt_str *oname)
-{
- return -EOPNOTSUPP;
-}
-
-static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
- const u8 *de_name, u32 de_name_len)
-{
- /* Encryption support disabled; use standard comparison */
- if (de_name_len != fname->disk_name.len)
- return false;
- return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
-}
-
-/* bio.c */
-static inline void fscrypt_decrypt_bio(struct bio *bio)
-{
-}
-
-static inline void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
- struct bio *bio)
-{
-}
-
-static inline void fscrypt_pullback_bio_page(struct page **page, bool restore)
-{
- return;
-}
-
-static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
- sector_t pblk, unsigned int len)
-{
- return -EOPNOTSUPP;
-}
-
-/* hooks.c */
-
-static inline int fscrypt_file_open(struct inode *inode, struct file *filp)
-{
- if (IS_ENCRYPTED(inode))
- return -EOPNOTSUPP;
- return 0;
-}
-
-static inline int __fscrypt_prepare_link(struct inode *inode,
- struct inode *dir)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int __fscrypt_prepare_rename(struct inode *old_dir,
- struct dentry *old_dentry,
- struct inode *new_dir,
- struct dentry *new_dentry,
- unsigned int flags)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int __fscrypt_prepare_lookup(struct inode *dir,
- struct dentry *dentry)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int __fscrypt_prepare_symlink(struct inode *dir,
- unsigned int len,
- unsigned int max_len,
- struct fscrypt_str *disk_link)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int __fscrypt_encrypt_symlink(struct inode *inode,
- const char *target,
- unsigned int len,
- struct fscrypt_str *disk_link)
-{
- return -EOPNOTSUPP;
-}
-
-static inline const char *fscrypt_get_symlink(struct inode *inode,
- const void *caddr,
- unsigned int max_size,
- struct delayed_call *done)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
-#endif /* _LINUX_FSCRYPT_NOTSUPP_H */
diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h
deleted file mode 100644
index 6456c6b2005f..000000000000
--- a/include/linux/fscrypt_supp.h
+++ /dev/null
@@ -1,204 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * fscrypt_supp.h
- *
- * Do not include this file directly. Use fscrypt.h instead!
- */
-#ifndef _LINUX_FSCRYPT_H
-#error "Incorrect include of linux/fscrypt_supp.h!"
-#endif
-
-#ifndef _LINUX_FSCRYPT_SUPP_H
-#define _LINUX_FSCRYPT_SUPP_H
-
-#include <linux/mm.h>
-#include <linux/slab.h>
-
-/*
- * fscrypt superblock flags
- */
-#define FS_CFLG_OWN_PAGES (1U << 1)
-
-/*
- * crypto operations for filesystems
- */
-struct fscrypt_operations {
- unsigned int flags;
- const char *key_prefix;
- int (*get_context)(struct inode *, void *, size_t);
- int (*set_context)(struct inode *, const void *, size_t, void *);
- bool (*dummy_context)(struct inode *);
- bool (*empty_dir)(struct inode *);
- unsigned int max_namelen;
-};
-
-struct fscrypt_ctx {
- union {
- struct {
- struct page *bounce_page; /* Ciphertext page */
- struct page *control_page; /* Original page */
- } w;
- struct {
- struct bio *bio;
- struct work_struct work;
- } r;
- struct list_head free_list; /* Free list */
- };
- u8 flags; /* Flags */
-};
-
-static inline bool fscrypt_has_encryption_key(const struct inode *inode)
-{
- return (inode->i_crypt_info != NULL);
-}
-
-static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
-{
- return inode->i_sb->s_cop->dummy_context &&
- inode->i_sb->s_cop->dummy_context(inode);
-}
-
-/* crypto.c */
-extern void fscrypt_enqueue_decrypt_work(struct work_struct *);
-extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
-extern void fscrypt_release_ctx(struct fscrypt_ctx *);
-extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
- unsigned int, unsigned int,
- u64, gfp_t);
-extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int,
- unsigned int, u64);
-
-static inline struct page *fscrypt_control_page(struct page *page)
-{
- return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
-}
-
-extern void fscrypt_restore_control_page(struct page *);
-
-/* policy.c */
-extern int fscrypt_ioctl_set_policy(struct file *, const void __user *);
-extern int fscrypt_ioctl_get_policy(struct file *, void __user *);
-extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
-extern int fscrypt_inherit_context(struct inode *, struct inode *,
- void *, bool);
-/* keyinfo.c */
-extern int fscrypt_get_encryption_info(struct inode *);
-extern void fscrypt_put_encryption_info(struct inode *);
-
-/* fname.c */
-extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
- int lookup, struct fscrypt_name *);
-
-static inline void fscrypt_free_filename(struct fscrypt_name *fname)
-{
- kfree(fname->crypto_buf.name);
-}
-
-extern int fscrypt_fname_alloc_buffer(const struct inode *, u32,
- struct fscrypt_str *);
-extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
-extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
- const struct fscrypt_str *, struct fscrypt_str *);
-
-#define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE 32
-
-/* Extracts the second-to-last ciphertext block; see explanation below */
-#define FSCRYPT_FNAME_DIGEST(name, len) \
- ((name) + round_down((len) - FS_CRYPTO_BLOCK_SIZE - 1, \
- FS_CRYPTO_BLOCK_SIZE))
-
-#define FSCRYPT_FNAME_DIGEST_SIZE FS_CRYPTO_BLOCK_SIZE
-
-/**
- * fscrypt_digested_name - alternate identifier for an on-disk filename
- *
- * When userspace lists an encrypted directory without access to the key,
- * filenames whose ciphertext is longer than FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE
- * bytes are shown in this abbreviated form (base64-encoded) rather than as the
- * full ciphertext (base64-encoded). This is necessary to allow supporting
- * filenames up to NAME_MAX bytes, since base64 encoding expands the length.
- *
- * To make it possible for filesystems to still find the correct directory entry
- * despite not knowing the full on-disk name, we encode any filesystem-specific
- * 'hash' and/or 'minor_hash' which the filesystem may need for its lookups,
- * followed by the second-to-last ciphertext block of the filename. Due to the
- * use of the CBC-CTS encryption mode, the second-to-last ciphertext block
- * depends on the full plaintext. (Note that ciphertext stealing causes the
- * last two blocks to appear "flipped".) This makes accidental collisions very
- * unlikely: just a 1 in 2^128 chance for two filenames to collide even if they
- * share the same filesystem-specific hashes.
- *
- * However, this scheme isn't immune to intentional collisions, which can be
- * created by anyone able to create arbitrary plaintext filenames and view them
- * without the key. Making the "digest" be a real cryptographic hash like
- * SHA-256 over the full ciphertext would prevent this, although it would be
- * less efficient and harder to implement, especially since the filesystem would
- * need to calculate it for each directory entry examined during a search.
- */
-struct fscrypt_digested_name {
- u32 hash;
- u32 minor_hash;
- u8 digest[FSCRYPT_FNAME_DIGEST_SIZE];
-};
-
-/**
- * fscrypt_match_name() - test whether the given name matches a directory entry
- * @fname: the name being searched for
- * @de_name: the name from the directory entry
- * @de_name_len: the length of @de_name in bytes
- *
- * Normally @fname->disk_name will be set, and in that case we simply compare
- * that to the name stored in the directory entry. The only exception is that
- * if we don't have the key for an encrypted directory and a filename in it is
- * very long, then we won't have the full disk_name and we'll instead need to
- * match against the fscrypt_digested_name.
- *
- * Return: %true if the name matches, otherwise %false.
- */
-static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
- const u8 *de_name, u32 de_name_len)
-{
- if (unlikely(!fname->disk_name.name)) {
- const struct fscrypt_digested_name *n =
- (const void *)fname->crypto_buf.name;
- if (WARN_ON_ONCE(fname->usr_fname->name[0] != '_'))
- return false;
- if (de_name_len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE)
- return false;
- return !memcmp(FSCRYPT_FNAME_DIGEST(de_name, de_name_len),
- n->digest, FSCRYPT_FNAME_DIGEST_SIZE);
- }
-
- if (de_name_len != fname->disk_name.len)
- return false;
- return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
-}
-
-/* bio.c */
-extern void fscrypt_decrypt_bio(struct bio *);
-extern void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
- struct bio *bio);
-extern void fscrypt_pullback_bio_page(struct page **, bool);
-extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
- unsigned int);
-
-/* hooks.c */
-extern int fscrypt_file_open(struct inode *inode, struct file *filp);
-extern int __fscrypt_prepare_link(struct inode *inode, struct inode *dir);
-extern int __fscrypt_prepare_rename(struct inode *old_dir,
- struct dentry *old_dentry,
- struct inode *new_dir,
- struct dentry *new_dentry,
- unsigned int flags);
-extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry);
-extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len,
- unsigned int max_len,
- struct fscrypt_str *disk_link);
-extern int __fscrypt_encrypt_symlink(struct inode *inode, const char *target,
- unsigned int len,
- struct fscrypt_str *disk_link);
-extern const char *fscrypt_get_symlink(struct inode *inode, const void *caddr,
- unsigned int max_size,
- struct delayed_call *done);
-
-#endif /* _LINUX_FSCRYPT_SUPP_H */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 730876187344..20899919ead8 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -241,21 +241,11 @@ static inline void ftrace_free_mem(struct module *mod, void *start, void *end) {
#ifdef CONFIG_STACK_TRACER
-#define STACK_TRACE_ENTRIES 500
-
-struct stack_trace;
-
-extern unsigned stack_trace_index[];
-extern struct stack_trace stack_trace_max;
-extern unsigned long stack_trace_max_size;
-extern arch_spinlock_t stack_trace_max_lock;
-
extern int stack_tracer_enabled;
-void stack_trace_print(void);
-int
-stack_trace_sysctl(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
+
+int stack_trace_sysctl(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
DECLARE_PER_CPU(int, disable_stack_tracer);
diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h
new file mode 100644
index 000000000000..3a91130a4fbd
--- /dev/null
+++ b/include/linux/generic-radix-tree.h
@@ -0,0 +1,231 @@
+#ifndef _LINUX_GENERIC_RADIX_TREE_H
+#define _LINUX_GENERIC_RADIX_TREE_H
+
+/**
+ * DOC: Generic radix trees/sparse arrays:
+ *
+ * Very simple and minimalistic, supporting arbitrary size entries up to
+ * PAGE_SIZE.
+ *
+ * A genradix is defined with the type it will store, like so:
+ *
+ * static GENRADIX(struct foo) foo_genradix;
+ *
+ * The main operations are:
+ *
+ * - genradix_init(radix) - initialize an empty genradix
+ *
+ * - genradix_free(radix) - free all memory owned by the genradix and
+ * reinitialize it
+ *
+ * - genradix_ptr(radix, idx) - gets a pointer to the entry at idx, returning
+ * NULL if that entry does not exist
+ *
+ * - genradix_ptr_alloc(radix, idx, gfp) - gets a pointer to an entry,
+ * allocating it if necessary
+ *
+ * - genradix_for_each(radix, iter, p) - iterate over each entry in a genradix
+ *
+ * The radix tree allocates one page of entries at a time, so entries may exist
+ * that were never explicitly allocated - they will be initialized to all
+ * zeroes.
+ *
+ * Internally, a genradix is just a radix tree of pages, and indexing works in
+ * terms of byte offsets. The wrappers in this header file use sizeof on the
+ * type the radix contains to calculate a byte offset from the index - see
+ * __idx_to_offset.
+ */
+
+#include <asm/page.h>
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+
+struct genradix_root;
+
+struct __genradix {
+ struct genradix_root __rcu *root;
+};
+
+/*
+ * NOTE: currently, sizeof(_type) must not be larger than PAGE_SIZE:
+ */
+
+#define __GENRADIX_INITIALIZER \
+ { \
+ .tree = { \
+ .root = NULL, \
+ } \
+ }
+
+/*
+ * We use a 0 size array to stash the type we're storing without taking any
+ * space at runtime - then the various accessor macros can use typeof() to get
+ * to it for casts/sizeof - we also force the alignment so that storing a type
+ * with a ridiculous alignment doesn't blow up the alignment or size of the
+ * genradix.
+ */
+
+#define GENRADIX(_type) \
+struct { \
+ struct __genradix tree; \
+ _type type[0] __aligned(1); \
+}
+
+#define DEFINE_GENRADIX(_name, _type) \
+ GENRADIX(_type) _name = __GENRADIX_INITIALIZER
+
+/**
+ * genradix_init - initialize a genradix
+ * @_radix: genradix to initialize
+ *
+ * Does not fail
+ */
+#define genradix_init(_radix) \
+do { \
+ *(_radix) = (typeof(*_radix)) __GENRADIX_INITIALIZER; \
+} while (0)
+
+void __genradix_free(struct __genradix *);
+
+/**
+ * genradix_free: free all memory owned by a genradix
+ * @_radix: the genradix to free
+ *
+ * After freeing, @_radix will be reinitialized and empty
+ */
+#define genradix_free(_radix) __genradix_free(&(_radix)->tree)
+
+static inline size_t __idx_to_offset(size_t idx, size_t obj_size)
+{
+ if (__builtin_constant_p(obj_size))
+ BUILD_BUG_ON(obj_size > PAGE_SIZE);
+ else
+ BUG_ON(obj_size > PAGE_SIZE);
+
+ if (!is_power_of_2(obj_size)) {
+ size_t objs_per_page = PAGE_SIZE / obj_size;
+
+ return (idx / objs_per_page) * PAGE_SIZE +
+ (idx % objs_per_page) * obj_size;
+ } else {
+ return idx * obj_size;
+ }
+}
+
+#define __genradix_cast(_radix) (typeof((_radix)->type[0]) *)
+#define __genradix_obj_size(_radix) sizeof((_radix)->type[0])
+#define __genradix_idx_to_offset(_radix, _idx) \
+ __idx_to_offset(_idx, __genradix_obj_size(_radix))
+
+void *__genradix_ptr(struct __genradix *, size_t);
+
+/**
+ * genradix_ptr - get a pointer to a genradix entry
+ * @_radix: genradix to access
+ * @_idx: index to fetch
+ *
+ * Returns a pointer to entry at @_idx, or NULL if that entry does not exist.
+ */
+#define genradix_ptr(_radix, _idx) \
+ (__genradix_cast(_radix) \
+ __genradix_ptr(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx)))
+
+void *__genradix_ptr_alloc(struct __genradix *, size_t, gfp_t);
+
+/**
+ * genradix_ptr_alloc - get a pointer to a genradix entry, allocating it
+ * if necessary
+ * @_radix: genradix to access
+ * @_idx: index to fetch
+ * @_gfp: gfp mask
+ *
+ * Returns a pointer to entry at @_idx, or NULL on allocation failure
+ */
+#define genradix_ptr_alloc(_radix, _idx, _gfp) \
+ (__genradix_cast(_radix) \
+ __genradix_ptr_alloc(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx), \
+ _gfp))
+
+struct genradix_iter {
+ size_t offset;
+ size_t pos;
+};
+
+/**
+ * genradix_iter_init - initialize a genradix_iter
+ * @_radix: genradix that will be iterated over
+ * @_idx: index to start iterating from
+ */
+#define genradix_iter_init(_radix, _idx) \
+ ((struct genradix_iter) { \
+ .pos = (_idx), \
+ .offset = __genradix_idx_to_offset((_radix), (_idx)),\
+ })
+
+void *__genradix_iter_peek(struct genradix_iter *, struct __genradix *, size_t);
+
+/**
+ * genradix_iter_peek - get first entry at or above iterator's current
+ * position
+ * @_iter: a genradix_iter
+ * @_radix: genradix being iterated over
+ *
+ * If no more entries exist at or above @_iter's current position, returns NULL
+ */
+#define genradix_iter_peek(_iter, _radix) \
+ (__genradix_cast(_radix) \
+ __genradix_iter_peek(_iter, &(_radix)->tree, \
+ PAGE_SIZE / __genradix_obj_size(_radix)))
+
+static inline void __genradix_iter_advance(struct genradix_iter *iter,
+ size_t obj_size)
+{
+ iter->offset += obj_size;
+
+ if (!is_power_of_2(obj_size) &&
+ (iter->offset & (PAGE_SIZE - 1)) + obj_size > PAGE_SIZE)
+ iter->offset = round_up(iter->offset, PAGE_SIZE);
+
+ iter->pos++;
+}
+
+#define genradix_iter_advance(_iter, _radix) \
+ __genradix_iter_advance(_iter, __genradix_obj_size(_radix))
+
+#define genradix_for_each_from(_radix, _iter, _p, _start) \
+ for (_iter = genradix_iter_init(_radix, _start); \
+ (_p = genradix_iter_peek(&_iter, _radix)) != NULL; \
+ genradix_iter_advance(&_iter, _radix))
+
+/**
+ * genradix_for_each - iterate over entry in a genradix
+ * @_radix: genradix to iterate over
+ * @_iter: a genradix_iter to track current position
+ * @_p: pointer to genradix entry type
+ *
+ * On every iteration, @_p will point to the current entry, and @_iter.pos
+ * will be the current entry's index.
+ */
+#define genradix_for_each(_radix, _iter, _p) \
+ genradix_for_each_from(_radix, _iter, _p, 0)
+
+int __genradix_prealloc(struct __genradix *, size_t, gfp_t);
+
+/**
+ * genradix_prealloc - preallocate entries in a generic radix tree
+ * @_radix: genradix to preallocate
+ * @_nr: number of entries to preallocate
+ * @_gfp: gfp mask
+ *
+ * Returns 0 on success, -ENOMEM on failure
+ */
+#define genradix_prealloc(_radix, _nr, _gfp) \
+ __genradix_prealloc(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _nr + 1),\
+ _gfp)
+
+
+#endif /* _LINUX_GENERIC_RADIX_TREE_H */
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 0fbbcdf0c178..da0af631ded5 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -60,8 +60,14 @@ extern void irq_enter(void);
*/
extern void irq_exit(void);
+#ifndef arch_nmi_enter
+#define arch_nmi_enter() do { } while (0)
+#define arch_nmi_exit() do { } while (0)
+#endif
+
#define nmi_enter() \
do { \
+ arch_nmi_enter(); \
printk_nmi_enter(); \
lockdep_off(); \
ftrace_nmi_enter(); \
@@ -80,6 +86,7 @@ extern void irq_exit(void);
ftrace_nmi_exit(); \
lockdep_on(); \
printk_nmi_exit(); \
+ arch_nmi_exit(); \
} while (0)
#endif /* LINUX_HARDIRQ_H */
diff --git a/include/linux/hid.h b/include/linux/hid.h
index f9707d1dcb58..ae9da674b749 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -382,6 +382,7 @@ struct hid_item {
#define HID_GROUP_WACOM 0x0101
#define HID_GROUP_LOGITECH_DJ_DEVICE 0x0102
#define HID_GROUP_STEAM 0x0103
+#define HID_GROUP_LOGITECH_27MHZ_DEVICE 0x0104
/*
* HID protocol status
@@ -417,6 +418,7 @@ struct hid_global {
struct hid_local {
unsigned usage[HID_MAX_USAGES]; /* usage array */
+ u8 usage_size[HID_MAX_USAGES]; /* usage size array */
unsigned collection_index[HID_MAX_USAGES]; /* collection index array */
unsigned usage_index;
unsigned usage_minimum;
@@ -893,7 +895,7 @@ struct hid_field *hidinput_get_led_field(struct hid_device *hid);
unsigned int hidinput_count_leds(struct hid_device *hid);
__s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code);
void hid_output_report(struct hid_report *report, __u8 *data);
-void __hid_request(struct hid_device *hid, struct hid_report *rep, int reqtype);
+int __hid_request(struct hid_device *hid, struct hid_report *rep, int reqtype);
u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags);
struct hid_device *hid_allocate_device(void);
struct hid_report *hid_register_report(struct hid_device *device,
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 66f9ebbb1df3..ad50b7b4f141 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -468,7 +468,7 @@ struct hmm_devmem_ops {
* Note that mmap semaphore is held in read mode at least when this
* callback occurs, hence the vma is valid upon callback entry.
*/
- int (*fault)(struct hmm_devmem *devmem,
+ vm_fault_t (*fault)(struct hmm_devmem *devmem,
struct vm_area_struct *vma,
unsigned long addr,
const struct page *page,
@@ -511,7 +511,7 @@ struct hmm_devmem_ops {
* chunk, as an optimization. It must, however, prioritize the faulting address
* over all the others.
*/
-typedef int (*dev_page_fault_t)(struct vm_area_struct *vma,
+typedef vm_fault_t (*dev_page_fault_t)(struct vm_area_struct *vma,
unsigned long addr,
const struct page *page,
unsigned int flags,
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index ea35263eb76b..11943b60f208 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -203,7 +203,6 @@ static inline void hugetlb_show_meminfo(void)
#define pud_huge(x) 0
#define is_hugepage_only_range(mm, addr, len) 0
#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
-#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
src_addr, pagep) ({ BUG(); 0; })
#define huge_pte_offset(mm, address, sz) 0
@@ -234,6 +233,13 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
{
BUG();
}
+static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
+ struct vm_area_struct *vma, unsigned long address,
+ unsigned int flags)
+{
+ BUG();
+ return 0;
+}
#endif /* !CONFIG_HUGETLB_PAGE */
/*
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index 99e0c1b0b5fb..2b949fa501e1 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -40,6 +40,11 @@ enum hwmon_chip_attributes {
hwmon_chip_register_tz,
hwmon_chip_update_interval,
hwmon_chip_alarms,
+ hwmon_chip_samples,
+ hwmon_chip_curr_samples,
+ hwmon_chip_in_samples,
+ hwmon_chip_power_samples,
+ hwmon_chip_temp_samples,
};
#define HWMON_C_TEMP_RESET_HISTORY BIT(hwmon_chip_temp_reset_history)
@@ -49,6 +54,11 @@ enum hwmon_chip_attributes {
#define HWMON_C_REGISTER_TZ BIT(hwmon_chip_register_tz)
#define HWMON_C_UPDATE_INTERVAL BIT(hwmon_chip_update_interval)
#define HWMON_C_ALARMS BIT(hwmon_chip_alarms)
+#define HWMON_C_SAMPLES BIT(hwmon_chip_samples)
+#define HWMON_C_CURR_SAMPLES BIT(hwmon_chip_curr_samples)
+#define HWMON_C_IN_SAMPLES BIT(hwmon_chip_in_samples)
+#define HWMON_C_POWER_SAMPLES BIT(hwmon_chip_power_samples)
+#define HWMON_C_TEMP_SAMPLES BIT(hwmon_chip_temp_samples)
enum hwmon_temp_attributes {
hwmon_temp_input = 0,
@@ -365,6 +375,14 @@ struct hwmon_channel_info {
const u32 *config;
};
+#define HWMON_CHANNEL_INFO(stype, ...) \
+ (&(struct hwmon_channel_info) { \
+ .type = hwmon_##stype, \
+ .config = (u32 []) { \
+ __VA_ARGS__, 0 \
+ } \
+ })
+
/**
* Chip configuration
* @ops: Pointer to hwmon operations.
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index cc85f4524dbf..9c94b2ea789c 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -110,7 +110,7 @@ struct ip_mc_list {
static inline int ip_mc_may_pull(struct sk_buff *skb, unsigned int len)
{
if (skb_transport_offset(skb) + ip_transport_len(skb) < len)
- return -EINVAL;
+ return 0;
return pskb_may_pull(skb, len);
}
diff --git a/include/linux/ima.h b/include/linux/ima.h
index b5e16b8c50b7..fd9f7cf4cdf5 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -18,6 +18,7 @@ struct linux_binprm;
#ifdef CONFIG_IMA
extern int ima_bprm_check(struct linux_binprm *bprm);
extern int ima_file_check(struct file *file, int mask);
+extern void ima_post_create_tmpfile(struct inode *inode);
extern void ima_file_free(struct file *file);
extern int ima_file_mmap(struct file *file, unsigned long prot);
extern int ima_load_data(enum kernel_load_data_id id);
@@ -30,7 +31,7 @@ extern void ima_post_path_mknod(struct dentry *dentry);
extern void ima_add_kexec_buffer(struct kimage *image);
#endif
-#if defined(CONFIG_X86) && defined(CONFIG_EFI)
+#if (defined(CONFIG_X86) && defined(CONFIG_EFI)) || defined(CONFIG_S390)
extern bool arch_ima_get_secureboot(void);
extern const char * const *arch_get_ima_policy(void);
#else
@@ -56,6 +57,10 @@ static inline int ima_file_check(struct file *file, int mask)
return 0;
}
+static inline void ima_post_create_tmpfile(struct inode *inode)
+{
+}
+
static inline void ima_file_free(struct file *file)
{
return;
diff --git a/include/linux/input/ili210x.h b/include/linux/input/ili210x.h
deleted file mode 100644
index b76e7c1404cd..000000000000
--- a/include/linux/input/ili210x.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ILI210X_H
-#define _ILI210X_H
-
-struct ili210x_platform_data {
- unsigned long irq_flags;
- unsigned int poll_period;
- bool (*get_pendown_state)(void);
-};
-
-#endif
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 0605f3bf6e79..fa364de9db18 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -374,20 +374,17 @@ enum {
#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
#define QI_DEV_EIOTLB_MAX_INVS 32
-#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55)
-#define QI_PGRP_PRIV(priv) (((u64)(priv)) << 32)
-#define QI_PGRP_RESP_CODE(res) ((u64)(res))
-#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32)
-#define QI_PGRP_DID(did) (((u64)(did)) << 16)
+/* Page group response descriptor QW0 */
#define QI_PGRP_PASID_P(p) (((u64)(p)) << 4)
+#define QI_PGRP_PDP(p) (((u64)(p)) << 5)
+#define QI_PGRP_RESP_CODE(res) (((u64)(res)) << 12)
+#define QI_PGRP_DID(rid) (((u64)(rid)) << 16)
+#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32)
+
+/* Page group response descriptor QW1 */
+#define QI_PGRP_LPIG(x) (((u64)(x)) << 2)
+#define QI_PGRP_IDX(idx) (((u64)(idx)) << 3)
-#define QI_PSTRM_ADDR(addr) (((u64)(addr)) & VTD_PAGE_MASK)
-#define QI_PSTRM_DEVFN(devfn) (((u64)(devfn)) << 4)
-#define QI_PSTRM_RESP_CODE(res) ((u64)(res))
-#define QI_PSTRM_IDX(idx) (((u64)(idx)) << 55)
-#define QI_PSTRM_PRIV(priv) (((u64)(priv)) << 32)
-#define QI_PSTRM_BUS(bus) (((u64)(bus)) << 24)
-#define QI_PSTRM_PASID(pasid) (((u64)(pasid)) << 4)
#define QI_RESP_SUCCESS 0x0
#define QI_RESP_INVALID 0x1
diff --git a/include/linux/intel-ish-client-if.h b/include/linux/intel-ish-client-if.h
new file mode 100644
index 000000000000..16255c2ca2f4
--- /dev/null
+++ b/include/linux/intel-ish-client-if.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Intel ISH client Interface definitions
+ *
+ * Copyright (c) 2019, Intel Corporation.
+ */
+
+#ifndef _INTEL_ISH_CLIENT_IF_H_
+#define _INTEL_ISH_CLIENT_IF_H_
+
+struct ishtp_cl_device;
+struct ishtp_device;
+struct ishtp_cl;
+struct ishtp_fw_client;
+
+/* Client state */
+enum cl_state {
+ ISHTP_CL_INITIALIZING = 0,
+ ISHTP_CL_CONNECTING,
+ ISHTP_CL_CONNECTED,
+ ISHTP_CL_DISCONNECTING,
+ ISHTP_CL_DISCONNECTED
+};
+
+/**
+ * struct ishtp_cl_device - ISHTP device handle
+ * @driver: driver instance on a bus
+ * @name: Name of the device for probe
+ * @probe: driver callback for device probe
+ * @remove: driver callback on device removal
+ *
+ * Client drivers defines to get probed/removed for ISHTP client device.
+ */
+struct ishtp_cl_driver {
+ struct device_driver driver;
+ const char *name;
+ const guid_t *guid;
+ int (*probe)(struct ishtp_cl_device *dev);
+ int (*remove)(struct ishtp_cl_device *dev);
+ int (*reset)(struct ishtp_cl_device *dev);
+ const struct dev_pm_ops *pm;
+};
+
+/**
+ * struct ishtp_msg_data - ISHTP message data struct
+ * @size: Size of data in the *data
+ * @data: Pointer to data
+ */
+struct ishtp_msg_data {
+ uint32_t size;
+ unsigned char *data;
+};
+
+/*
+ * struct ishtp_cl_rb - request block structure
+ * @list: Link to list members
+ * @cl: ISHTP client instance
+ * @buffer: message header
+ * @buf_idx: Index into buffer
+ * @read_time: unused at this time
+ */
+struct ishtp_cl_rb {
+ struct list_head list;
+ struct ishtp_cl *cl;
+ struct ishtp_msg_data buffer;
+ unsigned long buf_idx;
+ unsigned long read_time;
+};
+
+int ishtp_cl_driver_register(struct ishtp_cl_driver *driver,
+ struct module *owner);
+void ishtp_cl_driver_unregister(struct ishtp_cl_driver *driver);
+int ishtp_register_event_cb(struct ishtp_cl_device *device,
+ void (*read_cb)(struct ishtp_cl_device *));
+
+/* Get the device * from ishtp device instance */
+struct device *ishtp_device(struct ishtp_cl_device *cl_device);
+/* Trace interface for clients */
+void *ishtp_trace_callback(struct ishtp_cl_device *cl_device);
+/* Get device pointer of PCI device for DMA acces */
+struct device *ishtp_get_pci_device(struct ishtp_cl_device *cl_device);
+
+struct ishtp_cl *ishtp_cl_allocate(struct ishtp_cl_device *cl_device);
+void ishtp_cl_free(struct ishtp_cl *cl);
+int ishtp_cl_link(struct ishtp_cl *cl);
+void ishtp_cl_unlink(struct ishtp_cl *cl);
+int ishtp_cl_disconnect(struct ishtp_cl *cl);
+int ishtp_cl_connect(struct ishtp_cl *cl);
+int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length);
+int ishtp_cl_flush_queues(struct ishtp_cl *cl);
+int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb);
+bool ishtp_cl_tx_empty(struct ishtp_cl *cl);
+struct ishtp_cl_rb *ishtp_cl_rx_get_rb(struct ishtp_cl *cl);
+void *ishtp_get_client_data(struct ishtp_cl *cl);
+void ishtp_set_client_data(struct ishtp_cl *cl, void *data);
+struct ishtp_device *ishtp_get_ishtp_device(struct ishtp_cl *cl);
+void ishtp_set_tx_ring_size(struct ishtp_cl *cl, int size);
+void ishtp_set_rx_ring_size(struct ishtp_cl *cl, int size);
+void ishtp_set_connection_state(struct ishtp_cl *cl, int state);
+void ishtp_cl_set_fw_client_id(struct ishtp_cl *cl, int fw_client_id);
+
+void ishtp_put_device(struct ishtp_cl_device *cl_dev);
+void ishtp_get_device(struct ishtp_cl_device *cl_dev);
+void ishtp_set_drvdata(struct ishtp_cl_device *cl_device, void *data);
+void *ishtp_get_drvdata(struct ishtp_cl_device *cl_device);
+int ishtp_register_event_cb(struct ishtp_cl_device *device,
+ void (*read_cb)(struct ishtp_cl_device *));
+struct ishtp_fw_client *ishtp_fw_cl_get_client(struct ishtp_device *dev,
+ const guid_t *uuid);
+int ishtp_get_fw_client_id(struct ishtp_fw_client *fw_client);
+int ish_hw_reset(struct ishtp_device *dev);
+#endif /* _INTEL_ISH_CLIENT_IF_H_ */
diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h
index 99bc5b3ae26e..e3f76315ca4d 100644
--- a/include/linux/intel-svm.h
+++ b/include/linux/intel-svm.h
@@ -20,7 +20,7 @@ struct device;
struct svm_dev_ops {
void (*fault_cb)(struct device *dev, int pasid, u64 address,
- u32 private, int rwxp, int response);
+ void *private, int rwxp, int response);
};
/* Values for rxwp in fault_cb callback */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 690b238a44d5..c7eef32e7739 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -668,31 +668,6 @@ extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
extern void tasklet_init(struct tasklet_struct *t,
void (*func)(unsigned long), unsigned long data);
-struct tasklet_hrtimer {
- struct hrtimer timer;
- struct tasklet_struct tasklet;
- enum hrtimer_restart (*function)(struct hrtimer *);
-};
-
-extern void
-tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
- enum hrtimer_restart (*function)(struct hrtimer *),
- clockid_t which_clock, enum hrtimer_mode mode);
-
-static inline
-void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
- const enum hrtimer_mode mode)
-{
- hrtimer_start(&ttimer->timer, time, mode);
-}
-
-static inline
-void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
-{
- hrtimer_cancel(&ttimer->timer);
- tasklet_kill(&ttimer->tasklet);
-}
-
/*
* Autoprobing for irqs:
*
diff --git a/drivers/iommu/io-pgtable.h b/include/linux/io-pgtable.h
index 47d5ae559329..47d5ae559329 100644
--- a/drivers/iommu/io-pgtable.h
+++ b/include/linux/io-pgtable.h
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index e90da6b6f3d1..ffbbc7e39cee 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -167,8 +167,9 @@ struct iommu_resv_region {
* @detach_dev: detach device from an iommu domain
* @map: map a physically contiguous memory region to an iommu domain
* @unmap: unmap a physically contiguous memory region from an iommu domain
- * @flush_tlb_all: Synchronously flush all hardware TLBs for this domain
+ * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
* @iotlb_range_add: Add a given iova range to the flush queue for this domain
+ * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
* @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
* queue
* @iova_to_phys: translate iova to physical address
@@ -183,6 +184,8 @@ struct iommu_resv_region {
* @domain_window_enable: Configure and enable a particular window for a domain
* @domain_window_disable: Disable a particular window for a domain
* @of_xlate: add OF master IDs to iommu grouping
+ * @is_attach_deferred: Check if domain attach should be deferred from iommu
+ * driver init to device driver init (default no)
* @pgsize_bitmap: bitmap of all possible supported page sizes
*/
struct iommu_ops {
@@ -201,6 +204,7 @@ struct iommu_ops {
void (*flush_iotlb_all)(struct iommu_domain *domain);
void (*iotlb_range_add)(struct iommu_domain *domain,
unsigned long iova, size_t size);
+ void (*iotlb_sync_map)(struct iommu_domain *domain);
void (*iotlb_sync)(struct iommu_domain *domain);
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
int (*add_device)(struct device *dev);
diff --git a/include/linux/irq.h b/include/linux/irq.h
index d6160d479b14..7ae8de5ad0f2 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -195,7 +195,7 @@ struct irq_data {
* IRQD_LEVEL - Interrupt is level triggered
* IRQD_WAKEUP_STATE - Interrupt is configured for wakeup
* from suspend
- * IRDQ_MOVE_PCNTXT - Interrupt can be moved in process
+ * IRQD_MOVE_PCNTXT - Interrupt can be moved in process
* context
* IRQD_IRQ_DISABLED - Disabled state of the interrupt
* IRQD_IRQ_MASKED - Masked state of the interrupt
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 626179077bb0..0f049b384ccd 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -158,8 +158,7 @@ int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq);
* Legacy platforms not converted to DT yet must use this to init
* their GIC
*/
-void gic_init(unsigned int nr, int start,
- void __iomem *dist , void __iomem *cpu);
+void gic_init(void __iomem *dist , void __iomem *cpu);
int gicv2m_init(struct fwnode_handle *parent_handle,
struct irq_domain *parent);
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 0f919d5fe84f..c2ffff5f9ae2 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1606,7 +1606,6 @@ static inline u32 jbd2_chksum(journal_t *journal, u32 crc,
JBD_MAX_CHECKSUM_SIZE);
desc.shash.tfm = journal->j_chksum_driver;
- desc.shash.flags = 0;
*(u32 *)desc.ctx = crc;
err = crypto_shash_update(&desc.shash, address, length);
diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h
index a49f2b45b3f0..42710d5949ba 100644
--- a/include/linux/jump_label_ratelimit.h
+++ b/include/linux/jump_label_ratelimit.h
@@ -12,21 +12,79 @@ struct static_key_deferred {
struct delayed_work work;
};
-extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
-extern void static_key_deferred_flush(struct static_key_deferred *key);
+struct static_key_true_deferred {
+ struct static_key_true key;
+ unsigned long timeout;
+ struct delayed_work work;
+};
+
+struct static_key_false_deferred {
+ struct static_key_false key;
+ unsigned long timeout;
+ struct delayed_work work;
+};
+
+#define static_key_slow_dec_deferred(x) \
+ __static_key_slow_dec_deferred(&(x)->key, &(x)->work, (x)->timeout)
+#define static_branch_slow_dec_deferred(x) \
+ __static_key_slow_dec_deferred(&(x)->key.key, &(x)->work, (x)->timeout)
+
+#define static_key_deferred_flush(x) \
+ __static_key_deferred_flush((x), &(x)->work)
+
+extern void
+__static_key_slow_dec_deferred(struct static_key *key,
+ struct delayed_work *work,
+ unsigned long timeout);
+extern void __static_key_deferred_flush(void *key, struct delayed_work *work);
extern void
jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
+extern void jump_label_update_timeout(struct work_struct *work);
+
+#define DEFINE_STATIC_KEY_DEFERRED_TRUE(name, rl) \
+ struct static_key_true_deferred name = { \
+ .key = { STATIC_KEY_INIT_TRUE }, \
+ .timeout = (rl), \
+ .work = __DELAYED_WORK_INITIALIZER((name).work, \
+ jump_label_update_timeout, \
+ 0), \
+ }
+
+#define DEFINE_STATIC_KEY_DEFERRED_FALSE(name, rl) \
+ struct static_key_false_deferred name = { \
+ .key = { STATIC_KEY_INIT_FALSE }, \
+ .timeout = (rl), \
+ .work = __DELAYED_WORK_INITIALIZER((name).work, \
+ jump_label_update_timeout, \
+ 0), \
+ }
+
+#define static_branch_deferred_inc(x) static_branch_inc(&(x)->key)
+
#else /* !CONFIG_JUMP_LABEL */
struct static_key_deferred {
struct static_key key;
};
+struct static_key_true_deferred {
+ struct static_key_true key;
+};
+struct static_key_false_deferred {
+ struct static_key_false key;
+};
+#define DEFINE_STATIC_KEY_DEFERRED_TRUE(name, rl) \
+ struct static_key_true_deferred name = { STATIC_KEY_TRUE_INIT }
+#define DEFINE_STATIC_KEY_DEFERRED_FALSE(name, rl) \
+ struct static_key_false_deferred name = { STATIC_KEY_FALSE_INIT }
+
+#define static_branch_slow_dec_deferred(x) static_branch_dec(&(x)->key)
+
static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
{
STATIC_KEY_CHECK_USE(key);
static_key_slow_dec(&key->key);
}
-static inline void static_key_deferred_flush(struct static_key_deferred *key)
+static inline void static_key_deferred_flush(void *key)
{
STATIC_KEY_CHECK_USE(key);
}
diff --git a/include/linux/kcore.h b/include/linux/kcore.h
index 8c3f8c14eeaa..da676cdbd727 100644
--- a/include/linux/kcore.h
+++ b/include/linux/kcore.h
@@ -38,22 +38,13 @@ struct vmcoredd_node {
#ifdef CONFIG_PROC_KCORE
void __init kclist_add(struct kcore_list *, void *, size_t, int type);
-static inline
-void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz)
-{
- m->vaddr = (unsigned long)vaddr;
- kclist_add(m, addr, sz, KCORE_REMAP);
-}
+
+extern int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn));
#else
static inline
void kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
{
}
-
-static inline
-void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz)
-{
-}
#endif
#endif /* _LINUX_KCORE_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 34a5036debd3..2d14e21c16c0 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -47,8 +47,8 @@
#define u64_to_user_ptr(x) ( \
{ \
- typecheck(u64, x); \
- (void __user *)(uintptr_t)x; \
+ typecheck(u64, (x)); \
+ (void __user *)(uintptr_t)(x); \
} \
)
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 0cac1207bb00..c8893f663470 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -26,7 +26,9 @@ struct vm_area_struct;
struct super_block;
struct file_system_type;
struct poll_table_struct;
+struct fs_context;
+struct kernfs_fs_context;
struct kernfs_open_node;
struct kernfs_iattrs;
@@ -168,7 +170,6 @@ struct kernfs_node {
* kernfs_node parameter.
*/
struct kernfs_syscall_ops {
- int (*remount_fs)(struct kernfs_root *root, int *flags, char *data);
int (*show_options)(struct seq_file *sf, struct kernfs_root *root);
int (*mkdir)(struct kernfs_node *parent, const char *name,
@@ -272,6 +273,18 @@ struct kernfs_ops {
#endif
};
+/*
+ * The kernfs superblock creation/mount parameter context.
+ */
+struct kernfs_fs_context {
+ struct kernfs_root *root; /* Root of the hierarchy being mounted */
+ void *ns_tag; /* Namespace tag of the mount (or NULL) */
+ unsigned long magic; /* File system specific magic number */
+
+ /* The following are set/used by kernfs_mount() */
+ bool new_sb_created; /* Set to T if we allocated a new sb */
+};
+
#ifdef CONFIG_KERNFS
static inline enum kernfs_node_type kernfs_type(struct kernfs_node *kn)
@@ -359,11 +372,9 @@ __poll_t kernfs_generic_poll(struct kernfs_open_file *of,
void kernfs_notify(struct kernfs_node *kn);
const void *kernfs_super_ns(struct super_block *sb);
-struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags,
- struct kernfs_root *root, unsigned long magic,
- bool *new_sb_created, const void *ns);
+int kernfs_get_tree(struct fs_context *fc);
+void kernfs_free_fs_context(struct fs_context *fc);
void kernfs_kill_sb(struct super_block *sb);
-struct super_block *kernfs_pin_sb(struct kernfs_root *root, const void *ns);
void kernfs_init(void);
@@ -465,11 +476,10 @@ static inline void kernfs_notify(struct kernfs_node *kn) { }
static inline const void *kernfs_super_ns(struct super_block *sb)
{ return NULL; }
-static inline struct dentry *
-kernfs_mount_ns(struct file_system_type *fs_type, int flags,
- struct kernfs_root *root, unsigned long magic,
- bool *new_sb_created, const void *ns)
-{ return ERR_PTR(-ENOSYS); }
+static inline int kernfs_get_tree(struct fs_context *fc)
+{ return -ENOSYS; }
+
+static inline void kernfs_free_fs_context(struct fs_context *fc) { }
static inline void kernfs_kill_sb(struct super_block *sb) { }
@@ -552,13 +562,4 @@ static inline int kernfs_rename(struct kernfs_node *kn,
return kernfs_rename_ns(kn, new_parent, new_name, NULL);
}
-static inline struct dentry *
-kernfs_mount(struct file_system_type *fs_type, int flags,
- struct kernfs_root *root, unsigned long magic,
- bool *new_sb_created)
-{
- return kernfs_mount_ns(fs_type, flags, root,
- magic, new_sb_created, NULL);
-}
-
#endif /* __LINUX_KERNFS_H */
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 201f0f2683f2..9a897256e481 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -173,6 +173,7 @@ struct kretprobe_instance {
struct kretprobe *rp;
kprobe_opcode_t *ret_addr;
struct task_struct *task;
+ void *fp;
char data[0];
};
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index c38cc5eb7e73..640a03642766 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -28,6 +28,7 @@
#include <linux/irqbypass.h>
#include <linux/swait.h>
#include <linux/refcount.h>
+#include <linux/nospec.h>
#include <asm/signal.h>
#include <linux/kvm.h>
@@ -48,6 +49,27 @@
*/
#define KVM_MEMSLOT_INVALID (1UL << 16)
+/*
+ * Bit 63 of the memslot generation number is an "update in-progress flag",
+ * e.g. is temporarily set for the duration of install_new_memslots().
+ * This flag effectively creates a unique generation number that is used to
+ * mark cached memslot data, e.g. MMIO accesses, as potentially being stale,
+ * i.e. may (or may not) have come from the previous memslots generation.
+ *
+ * This is necessary because the actual memslots update is not atomic with
+ * respect to the generation number update. Updating the generation number
+ * first would allow a vCPU to cache a spte from the old memslots using the
+ * new generation number, and updating the generation number after switching
+ * to the new memslots would allow cache hits using the old generation number
+ * to reference the defunct memslots.
+ *
+ * This mechanism is used to prevent getting hits in KVM's caches while a
+ * memslot update is in-progress, and to prevent cache hits *after* updating
+ * the actual generation number against accesses that were inserted into the
+ * cache *before* the memslots were updated.
+ */
+#define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS BIT_ULL(63)
+
/* Two fragments for cross MMIO pages. */
#define KVM_MAX_MMIO_FRAGMENTS 2
@@ -492,10 +514,10 @@ static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
{
- /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case
- * the caller has read kvm->online_vcpus before (as is the case
- * for kvm_for_each_vcpu, for example).
- */
+ int num_vcpus = atomic_read(&kvm->online_vcpus);
+ i = array_index_nospec(i, num_vcpus);
+
+ /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */
smp_rmb();
return kvm->vcpus[i];
}
@@ -579,6 +601,7 @@ void kvm_put_kvm(struct kvm *kvm);
static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
{
+ as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM);
return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
lockdep_is_held(&kvm->slots_lock) ||
!refcount_read(&kvm->users_count));
@@ -634,7 +657,7 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
struct kvm_memory_slot *dont);
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
unsigned long npages);
-void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots);
+void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
const struct kvm_userspace_memory_region *mem,
@@ -1182,6 +1205,7 @@ extern bool kvm_rebooting;
extern unsigned int halt_poll_ns;
extern unsigned int halt_poll_ns_grow;
+extern unsigned int halt_poll_ns_grow_start;
extern unsigned int halt_poll_ns_shrink;
struct kvm_device {
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 68133842e6d7..c9419c05a90a 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1122,10 +1122,11 @@ extern int ata_host_activate(struct ata_host *host, int irq,
extern void ata_host_detach(struct ata_host *host);
extern void ata_host_init(struct ata_host *, struct device *, struct ata_port_operations *);
extern int ata_scsi_detect(struct scsi_host_template *sht);
-extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
+extern int ata_scsi_ioctl(struct scsi_device *dev, unsigned int cmd,
+ void __user *arg);
extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd);
extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev,
- int cmd, void __user *arg);
+ unsigned int cmd, void __user *arg);
extern void ata_sas_port_destroy(struct ata_port *);
extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
struct ata_port_info *, struct Scsi_Host *);
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index ad609617aeb8..feb342d026f2 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -42,6 +42,8 @@ enum {
NDD_SECURITY_OVERWRITE = 3,
/* tracking whether or not there is a pending device reference */
NDD_WORK_PENDING = 4,
+ /* ignore / filter NSLABEL_FLAG_LOCAL for this DIMM, i.e. no aliasing */
+ NDD_NOBLK = 5,
/* need to set a limit somewhere, but yes, this is likely overkill */
ND_IOCTL_MAX_BUFLEN = SZ_4M,
@@ -128,6 +130,7 @@ struct nd_region_desc {
void *provider_data;
int num_lanes;
int numa_node;
+ int target_node;
unsigned long flags;
struct device_node *of_node;
};
diff --git a/include/linux/list.h b/include/linux/list.h
index 79626b5ab36c..58aa3adf94e6 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -207,7 +207,7 @@ static inline void list_bulk_move_tail(struct list_head *head,
}
/**
- * list_is_first -- tests whether @ list is the first entry in list @head
+ * list_is_first -- tests whether @list is the first entry in list @head
* @list: the entry to test
* @head: the head of the list
*/
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 53551f470722..a14bab1a0a3e 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -86,7 +86,6 @@ struct klp_func {
struct list_head node;
struct list_head stack_node;
unsigned long old_size, new_size;
- bool kobj_added;
bool nop;
bool patched;
bool transition;
@@ -141,7 +140,6 @@ struct klp_object {
struct list_head func_list;
struct list_head node;
struct module *mod;
- bool kobj_added;
bool dynamic;
bool patched;
};
@@ -170,7 +168,6 @@ struct klp_patch {
struct list_head list;
struct kobject kobj;
struct list_head obj_list;
- bool kobj_added;
bool enabled;
bool forced;
struct work_struct free_work;
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 79c3873d58ac..6e2377e6c1d6 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -66,6 +66,11 @@ struct lock_class_key {
extern struct lock_class_key __lockdep_no_validate__;
+struct lock_trace {
+ unsigned int nr_entries;
+ unsigned int offset;
+};
+
#define LOCKSTAT_POINTS 4
/*
@@ -100,7 +105,7 @@ struct lock_class {
* IRQ/softirq usage tracking bits:
*/
unsigned long usage_mask;
- struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES];
+ struct lock_trace usage_traces[XXX_LOCK_USAGE_STATES];
/*
* Generation counter, when doing certain classes of graph walking,
@@ -188,7 +193,7 @@ struct lock_list {
struct list_head entry;
struct lock_class *class;
struct lock_class *links_to;
- struct stack_trace trace;
+ struct lock_trace trace;
int distance;
/*
@@ -471,7 +476,7 @@ struct pin_cookie { };
#define NIL_COOKIE (struct pin_cookie){ }
-#define lockdep_pin_lock(l) ({ struct pin_cookie cookie; cookie; })
+#define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; })
#define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0)
#define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0)
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 85a301632cf1..a240a3fc5fc4 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -76,6 +76,22 @@
* changes on the process such as clearing out non-inheritable signal
* state. This is called immediately after commit_creds().
*
+ * Security hooks for mount using fs_context.
+ * [See also Documentation/filesystems/mounting.txt]
+ *
+ * @fs_context_dup:
+ * Allocate and attach a security structure to sc->security. This pointer
+ * is initialised to NULL by the caller.
+ * @fc indicates the new filesystem context.
+ * @src_fc indicates the original filesystem context.
+ * @fs_context_parse_param:
+ * Userspace provided a parameter to configure a superblock. The LSM may
+ * reject it with an error and may use it for itself, in which case it
+ * should return 0; otherwise it should return -ENOPARAM to pass it on to
+ * the filesystem.
+ * @fc indicates the filesystem context.
+ * @param The parameter
+ *
* Security hooks for filesystem operations.
*
* @sb_alloc_security:
@@ -111,7 +127,6 @@
* options cleanly (a filesystem may modify the data e.g. with strsep()).
* This also allows the original mount data to be stripped of security-
* specific options to avoid having to make filesystems aware of them.
- * @type the type of filesystem being mounted.
* @orig the original mount data copied from userspace.
* @copy copied data which will be passed to the security module.
* Returns 0 if the copy was successful.
@@ -304,10 +319,11 @@
* @new_dentry contains the dentry structure of the new link.
* Return 0 if permission is granted.
* @path_chmod:
- * Check for permission to change DAC's permission of a file or directory.
- * @dentry contains the dentry structure.
- * @mnt contains the vfsmnt structure.
- * @mode contains DAC's mode.
+ * Check for permission to change a mode of the file @path. The new
+ * mode is specified in @mode.
+ * @path contains the path structure of the file to change the mode.
+ * @mode contains the new DAC's permission, which is a bitmask of
+ * constants from <include/uapi/linux/stat.h>
* Return 0 if permission is granted.
* @path_chown:
* Check for permission to change owner/group of a file or directory.
@@ -486,7 +502,7 @@
* Return 0 if permission is granted.
* @file_lock:
* Check permission before performing file locking operations.
- * Note: this hook mediates both flock and fcntl style locks.
+ * Note the hook mediates both flock and fcntl style locks.
* @file contains the file structure.
* @cmd contains the posix-translated lock operation to perform
* (e.g. F_RDLCK, F_WRLCK).
@@ -629,12 +645,12 @@
* @p contains the task_struct of process.
* @nice contains the new nice value.
* Return 0 if permission is granted.
- * @task_setioprio
+ * @task_setioprio:
* Check permission before setting the ioprio value of @p to @ioprio.
* @p contains the task_struct of process.
* @ioprio contains the new ioprio value
* Return 0 if permission is granted.
- * @task_getioprio
+ * @task_getioprio:
* Check permission before getting the ioprio value of @p.
* @p contains the task_struct of process.
* Return 0 if permission is granted.
@@ -656,17 +672,15 @@
* Return 0 if permission is granted.
* @task_setscheduler:
* Check permission before setting scheduling policy and/or parameters of
- * process @p based on @policy and @lp.
+ * process @p.
* @p contains the task_struct for process.
- * @policy contains the scheduling policy.
- * @lp contains the scheduling parameters.
* Return 0 if permission is granted.
* @task_getscheduler:
* Check permission before obtaining scheduling information for process
* @p.
* @p contains the task_struct for process.
* Return 0 if permission is granted.
- * @task_movememory
+ * @task_movememory:
* Check permission before moving memory owned by process @p.
* @p contains the task_struct for process.
* Return 0 if permission is granted.
@@ -753,9 +767,9 @@
* socket structure, but rather, the socket security information is stored
* in the associated inode. Typically, the inode alloc_security hook will
* allocate and and attach security information to
- * sock->inode->i_security. This hook may be used to update the
- * sock->inode->i_security field with additional information that wasn't
- * available when the inode was allocated.
+ * SOCK_INODE(sock)->i_security. This hook may be used to update the
+ * SOCK_INODE(sock)->i_security field with additional information that
+ * wasn't available when the inode was allocated.
* @sock contains the newly created socket structure.
* @family contains the requested protocol family.
* @type contains the requested communications type.
@@ -860,13 +874,13 @@
* @socket_getpeersec_dgram:
* This hook allows the security module to provide peer socket security
* state for udp sockets on a per-packet basis to userspace via
- * getsockopt SO_GETPEERSEC. The application must first have indicated
- * the IP_PASSSEC option via getsockopt. It can then retrieve the
+ * getsockopt SO_GETPEERSEC. The application must first have indicated
+ * the IP_PASSSEC option via getsockopt. It can then retrieve the
* security state returned by this hook for a packet via the SCM_SECURITY
* ancillary message type.
- * @skb is the skbuff for the packet being queried
- * @secdata is a pointer to a buffer in which to copy the security data
- * @seclen is the maximum length for @secdata
+ * @sock contains the peer socket. May be NULL.
+ * @skb is the sk_buff for the packet being queried. May be NULL.
+ * @secid pointer to store the secid of the packet.
* Return 0 on success, error on failure.
* @sk_alloc_security:
* Allocate and attach a security structure to the sk->sk_security field,
@@ -890,9 +904,9 @@
* @secmark_relabel_packet:
* check if the process should be allowed to relabel packets to
* the given secid
- * @security_secmark_refcount_inc
+ * @secmark_refcount_inc:
* tells the LSM to increment the number of secmark labeling rules loaded
- * @security_secmark_refcount_dec
+ * @secmark_refcount_dec:
* tells the LSM to decrement the number of secmark labeling rules loaded
* @req_classify_flow:
* Sets the flow's sid to the openreq sid.
@@ -1097,41 +1111,41 @@
*
* @msg_queue_alloc_security:
* Allocate and attach a security structure to the
- * msq->q_perm.security field. The security field is initialized to
+ * @perm->security field. The security field is initialized to
* NULL when the structure is first created.
- * @msq contains the message queue structure to be modified.
+ * @perm contains the IPC permissions of the message queue.
* Return 0 if operation was successful and permission is granted.
* @msg_queue_free_security:
- * Deallocate security structure for this message queue.
- * @msq contains the message queue structure to be modified.
+ * Deallocate security field @perm->security for the message queue.
+ * @perm contains the IPC permissions of the message queue.
* @msg_queue_associate:
* Check permission when a message queue is requested through the
- * msgget system call. This hook is only called when returning the
+ * msgget system call. This hook is only called when returning the
* message queue identifier for an existing message queue, not when a
* new message queue is created.
- * @msq contains the message queue to act upon.
+ * @perm contains the IPC permissions of the message queue.
* @msqflg contains the operation control flags.
* Return 0 if permission is granted.
* @msg_queue_msgctl:
* Check permission when a message control operation specified by @cmd
- * is to be performed on the message queue @msq.
- * The @msq may be NULL, e.g. for IPC_INFO or MSG_INFO.
- * @msq contains the message queue to act upon. May be NULL.
+ * is to be performed on the message queue with permissions @perm.
+ * The @perm may be NULL, e.g. for IPC_INFO or MSG_INFO.
+ * @perm contains the IPC permissions of the msg queue. May be NULL.
* @cmd contains the operation to be performed.
* Return 0 if permission is granted.
* @msg_queue_msgsnd:
* Check permission before a message, @msg, is enqueued on the message
- * queue, @msq.
- * @msq contains the message queue to send message to.
+ * queue with permissions @perm.
+ * @perm contains the IPC permissions of the message queue.
* @msg contains the message to be enqueued.
* @msqflg contains operational flags.
* Return 0 if permission is granted.
* @msg_queue_msgrcv:
* Check permission before a message, @msg, is removed from the message
- * queue, @msq. The @target task structure contains a pointer to the
+ * queue. The @target task structure contains a pointer to the
* process that will be receiving the message (not equal to the current
* process when inline receives are being performed).
- * @msq contains the message queue to retrieve message from.
+ * @perm contains the IPC permissions of the message queue.
* @msg contains the message destination.
* @target contains the task structure for recipient process.
* @type contains the type of message requested.
@@ -1141,34 +1155,34 @@
* Security hooks for System V Shared Memory Segments
*
* @shm_alloc_security:
- * Allocate and attach a security structure to the shp->shm_perm.security
- * field. The security field is initialized to NULL when the structure is
+ * Allocate and attach a security structure to the @perm->security
+ * field. The security field is initialized to NULL when the structure is
* first created.
- * @shp contains the shared memory structure to be modified.
+ * @perm contains the IPC permissions of the shared memory structure.
* Return 0 if operation was successful and permission is granted.
* @shm_free_security:
- * Deallocate the security struct for this memory segment.
- * @shp contains the shared memory structure to be modified.
+ * Deallocate the security structure @perm->security for the memory segment.
+ * @perm contains the IPC permissions of the shared memory structure.
* @shm_associate:
* Check permission when a shared memory region is requested through the
- * shmget system call. This hook is only called when returning the shared
+ * shmget system call. This hook is only called when returning the shared
* memory region identifier for an existing region, not when a new shared
* memory region is created.
- * @shp contains the shared memory structure to be modified.
+ * @perm contains the IPC permissions of the shared memory structure.
* @shmflg contains the operation control flags.
* Return 0 if permission is granted.
* @shm_shmctl:
* Check permission when a shared memory control operation specified by
- * @cmd is to be performed on the shared memory region @shp.
- * The @shp may be NULL, e.g. for IPC_INFO or SHM_INFO.
- * @shp contains shared memory structure to be modified.
+ * @cmd is to be performed on the shared memory region with permissions @perm.
+ * The @perm may be NULL, e.g. for IPC_INFO or SHM_INFO.
+ * @perm contains the IPC permissions of the shared memory structure.
* @cmd contains the operation to be performed.
* Return 0 if permission is granted.
* @shm_shmat:
* Check permissions prior to allowing the shmat system call to attach the
- * shared memory segment @shp to the data segment of the calling process.
- * The attaching address is specified by @shmaddr.
- * @shp contains the shared memory structure to be modified.
+ * shared memory segment with permissions @perm to the data segment of the
+ * calling process. The attaching address is specified by @shmaddr.
+ * @perm contains the IPC permissions of the shared memory structure.
* @shmaddr contains the address to attach memory region to.
* @shmflg contains the operational flags.
* Return 0 if permission is granted.
@@ -1176,34 +1190,34 @@
* Security hooks for System V Semaphores
*
* @sem_alloc_security:
- * Allocate and attach a security structure to the sma->sem_perm.security
- * field. The security field is initialized to NULL when the structure is
+ * Allocate and attach a security structure to the @perm->security
+ * field. The security field is initialized to NULL when the structure is
* first created.
- * @sma contains the semaphore structure
+ * @perm contains the IPC permissions of the semaphore.
* Return 0 if operation was successful and permission is granted.
* @sem_free_security:
- * deallocate security struct for this semaphore
- * @sma contains the semaphore structure.
+ * Deallocate security structure @perm->security for the semaphore.
+ * @perm contains the IPC permissions of the semaphore.
* @sem_associate:
* Check permission when a semaphore is requested through the semget
- * system call. This hook is only called when returning the semaphore
+ * system call. This hook is only called when returning the semaphore
* identifier for an existing semaphore, not when a new one must be
* created.
- * @sma contains the semaphore structure.
+ * @perm contains the IPC permissions of the semaphore.
* @semflg contains the operation control flags.
* Return 0 if permission is granted.
* @sem_semctl:
* Check permission when a semaphore operation specified by @cmd is to be
- * performed on the semaphore @sma. The @sma may be NULL, e.g. for
+ * performed on the semaphore. The @perm may be NULL, e.g. for
* IPC_INFO or SEM_INFO.
- * @sma contains the semaphore structure. May be NULL.
+ * @perm contains the IPC permissions of the semaphore. May be NULL.
* @cmd contains the operation to be performed.
* Return 0 if permission is granted.
* @sem_semop:
* Check permissions before performing operations on members of the
- * semaphore set @sma. If the @alter flag is nonzero, the semaphore set
+ * semaphore set. If the @alter flag is nonzero, the semaphore set
* may be modified.
- * @sma contains the semaphore structure.
+ * @perm contains the IPC permissions of the semaphore.
* @sops contains the operations to perform.
* @nsops contains the number of operations to perform.
* @alter contains the flag indicating whether changes are to be made.
@@ -1276,13 +1290,12 @@
* Check permission before accessing the kernel message ring or changing
* logging to the console.
* See the syslog(2) manual page for an explanation of the @type values.
- * @type contains the type of action.
- * @from_file indicates the context of action (if it came from /proc).
+ * @type contains the SYSLOG_ACTION_* constant from <include/linux/syslog.h>
* Return 0 if permission is granted.
* @settime:
* Check permission to change the system time.
- * struct timespec64 is defined in include/linux/time64.h and timezone
- * is defined in include/linux/time.h
+ * struct timespec64 is defined in <include/linux/time64.h> and timezone
+ * is defined in <include/linux/time.h>
* @ts contains new time
* @tz contains new timezone
* Return 0 if permission is granted.
@@ -1324,7 +1337,7 @@
* @audit_rule_init:
* Allocate and initialize an LSM audit rule structure.
* @field contains the required Audit action.
- * Fields flags are defined in include/linux/audit.h
+ * Fields flags are defined in <include/linux/audit.h>
* @op contains the operator the rule uses.
* @rulestr contains the context where the rule will be applied to.
* @lsmrule contains a pointer to receive the result.
@@ -1332,9 +1345,9 @@
* -EINVAL in case of an invalid rule.
*
* @audit_rule_known:
- * Specifies whether given @rule contains any fields related to
+ * Specifies whether given @krule contains any fields related to
* current LSM.
- * @rule contains the audit rule of interest.
+ * @krule contains the audit rule of interest.
* Return 1 in case of relation found, 0 otherwise.
*
* @audit_rule_match:
@@ -1343,13 +1356,13 @@
* @secid contains the security id in question.
* @field contains the field which relates to current LSM.
* @op contains the operator that will be used for matching.
- * @rule points to the audit rule that will be checked against.
+ * @lrule points to the audit rule that will be checked against.
* Return 1 if secid matches the rule, 0 if it does not, -ERRNO on failure.
*
* @audit_rule_free:
* Deallocate the LSM audit rule structure previously allocated by
* audit_rule_init.
- * @rule contains the allocated rule
+ * @lsmrule contains the allocated rule
*
* @inode_invalidate_secctx:
* Notify the security module that it must revalidate the security context
@@ -1362,9 +1375,7 @@
* this hook to initialize the security context in its incore inode to the
* value provided by the server for the file when the server returned the
* file's attributes to the client.
- *
* Must be called with inode->i_mutex locked.
- *
* @inode we wish to set the security context of.
* @ctx contains the string which we wish to set in the inode.
* @ctxlen contains the length of @ctx.
@@ -1377,9 +1388,7 @@
* this hook to change the security context in its incore inode and on the
* backing filesystem to a value provided by the client on a SETATTR
* operation.
- *
* Must be called with inode->i_mutex locked.
- *
* @dentry contains the inode we wish to set the security context of.
* @ctx contains the string which we wish to set in the inode.
* @ctxlen contains the length of @ctx.
@@ -1387,7 +1396,6 @@
* @inode_getsecctx:
* On success, returns 0 and fills out @ctx and @ctxlen with the security
* context for the given @inode.
- *
* @inode we wish to get the security context of.
* @ctx is a pointer in which to place the allocated security context.
* @ctxlen points to the place to put the length of @ctx.
@@ -1460,6 +1468,9 @@ union security_list_options {
void (*bprm_committing_creds)(struct linux_binprm *bprm);
void (*bprm_committed_creds)(struct linux_binprm *bprm);
+ int (*fs_context_dup)(struct fs_context *fc, struct fs_context *src_sc);
+ int (*fs_context_parse_param)(struct fs_context *fc, struct fs_parameter *param);
+
int (*sb_alloc_security)(struct super_block *sb);
void (*sb_free_security)(struct super_block *sb);
void (*sb_free_mnt_opts)(void *mnt_opts);
@@ -1621,28 +1632,28 @@ union security_list_options {
int (*msg_msg_alloc_security)(struct msg_msg *msg);
void (*msg_msg_free_security)(struct msg_msg *msg);
- int (*msg_queue_alloc_security)(struct kern_ipc_perm *msq);
- void (*msg_queue_free_security)(struct kern_ipc_perm *msq);
- int (*msg_queue_associate)(struct kern_ipc_perm *msq, int msqflg);
- int (*msg_queue_msgctl)(struct kern_ipc_perm *msq, int cmd);
- int (*msg_queue_msgsnd)(struct kern_ipc_perm *msq, struct msg_msg *msg,
+ int (*msg_queue_alloc_security)(struct kern_ipc_perm *perm);
+ void (*msg_queue_free_security)(struct kern_ipc_perm *perm);
+ int (*msg_queue_associate)(struct kern_ipc_perm *perm, int msqflg);
+ int (*msg_queue_msgctl)(struct kern_ipc_perm *perm, int cmd);
+ int (*msg_queue_msgsnd)(struct kern_ipc_perm *perm, struct msg_msg *msg,
int msqflg);
- int (*msg_queue_msgrcv)(struct kern_ipc_perm *msq, struct msg_msg *msg,
+ int (*msg_queue_msgrcv)(struct kern_ipc_perm *perm, struct msg_msg *msg,
struct task_struct *target, long type,
int mode);
- int (*shm_alloc_security)(struct kern_ipc_perm *shp);
- void (*shm_free_security)(struct kern_ipc_perm *shp);
- int (*shm_associate)(struct kern_ipc_perm *shp, int shmflg);
- int (*shm_shmctl)(struct kern_ipc_perm *shp, int cmd);
- int (*shm_shmat)(struct kern_ipc_perm *shp, char __user *shmaddr,
+ int (*shm_alloc_security)(struct kern_ipc_perm *perm);
+ void (*shm_free_security)(struct kern_ipc_perm *perm);
+ int (*shm_associate)(struct kern_ipc_perm *perm, int shmflg);
+ int (*shm_shmctl)(struct kern_ipc_perm *perm, int cmd);
+ int (*shm_shmat)(struct kern_ipc_perm *perm, char __user *shmaddr,
int shmflg);
- int (*sem_alloc_security)(struct kern_ipc_perm *sma);
- void (*sem_free_security)(struct kern_ipc_perm *sma);
- int (*sem_associate)(struct kern_ipc_perm *sma, int semflg);
- int (*sem_semctl)(struct kern_ipc_perm *sma, int cmd);
- int (*sem_semop)(struct kern_ipc_perm *sma, struct sembuf *sops,
+ int (*sem_alloc_security)(struct kern_ipc_perm *perm);
+ void (*sem_free_security)(struct kern_ipc_perm *perm);
+ int (*sem_associate)(struct kern_ipc_perm *perm, int semflg);
+ int (*sem_semctl)(struct kern_ipc_perm *perm, int cmd);
+ int (*sem_semop)(struct kern_ipc_perm *perm, struct sembuf *sops,
unsigned nsops, int alter);
int (*netlink_send)(struct sock *sk, struct sk_buff *skb);
@@ -1800,6 +1811,8 @@ struct security_hook_heads {
struct hlist_head bprm_check_security;
struct hlist_head bprm_committing_creds;
struct hlist_head bprm_committed_creds;
+ struct hlist_head fs_context_dup;
+ struct hlist_head fs_context_parse_param;
struct hlist_head sb_alloc_security;
struct hlist_head sb_free_security;
struct hlist_head sb_free_mnt_opts;
diff --git a/include/linux/mailbox/zynqmp-ipi-message.h b/include/linux/mailbox/zynqmp-ipi-message.h
new file mode 100644
index 000000000000..9542b41eacfd
--- /dev/null
+++ b/include/linux/mailbox/zynqmp-ipi-message.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_ZYNQMP_IPI_MESSAGE_H_
+#define _LINUX_ZYNQMP_IPI_MESSAGE_H_
+
+/**
+ * struct zynqmp_ipi_message - ZynqMP IPI message structure
+ * @len: Length of message
+ * @data: message payload
+ *
+ * This is the structure for data used in mbox_send_message
+ * the maximum length of data buffer is fixed to 12 bytes.
+ * Client is supposed to be aware of this.
+ */
+struct zynqmp_ipi_message {
+ size_t len;
+ u8 data[0];
+};
+
+#endif /* _LINUX_ZYNQMP_IPI_MESSAGE_H_ */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 859b55b66db2..294d5d80e150 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -108,9 +108,6 @@ void memblock_discard(void);
#define memblock_dbg(fmt, ...) \
if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
-phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
- phys_addr_t start, phys_addr_t end,
- int nid, enum memblock_flags flags);
phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
phys_addr_t size, phys_addr_t align);
void memblock_allow_resize(void);
@@ -127,7 +124,6 @@ int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
-enum memblock_flags choose_memblock_flags(void);
unsigned long memblock_free_all(void);
void reset_node_managed_pages(pg_data_t *pgdat);
@@ -277,18 +273,6 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
nid, flags, p_start, p_end, p_nid)
-static inline void memblock_set_region_flags(struct memblock_region *r,
- enum memblock_flags flags)
-{
- r->flags |= flags;
-}
-
-static inline void memblock_clear_region_flags(struct memblock_region *r,
- enum memblock_flags flags)
-{
- r->flags &= ~flags;
-}
-
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
int memblock_set_node(phys_addr_t base, phys_addr_t size,
struct memblock_type *type, int nid);
@@ -325,17 +309,20 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
#endif
-phys_addr_t memblock_phys_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
+phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
+ phys_addr_t start, phys_addr_t end);
phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
-phys_addr_t memblock_phys_alloc(phys_addr_t size, phys_addr_t align);
+static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
+ phys_addr_t align)
+{
+ return memblock_phys_alloc_range(size, align, 0,
+ MEMBLOCK_ALLOC_ACCESSIBLE);
+}
void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr,
int nid);
-void *memblock_alloc_try_nid_nopanic(phys_addr_t size, phys_addr_t align,
- phys_addr_t min_addr, phys_addr_t max_addr,
- int nid);
void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr,
int nid);
@@ -362,36 +349,12 @@ static inline void * __init memblock_alloc_from(phys_addr_t size,
MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
}
-static inline void * __init memblock_alloc_nopanic(phys_addr_t size,
- phys_addr_t align)
-{
- return memblock_alloc_try_nid_nopanic(size, align, MEMBLOCK_LOW_LIMIT,
- MEMBLOCK_ALLOC_ACCESSIBLE,
- NUMA_NO_NODE);
-}
-
static inline void * __init memblock_alloc_low(phys_addr_t size,
phys_addr_t align)
{
return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
}
-static inline void * __init memblock_alloc_low_nopanic(phys_addr_t size,
- phys_addr_t align)
-{
- return memblock_alloc_try_nid_nopanic(size, align, MEMBLOCK_LOW_LIMIT,
- ARCH_LOW_ADDRESS_LIMIT,
- NUMA_NO_NODE);
-}
-
-static inline void * __init memblock_alloc_from_nopanic(phys_addr_t size,
- phys_addr_t align,
- phys_addr_t min_addr)
-{
- return memblock_alloc_try_nid_nopanic(size, align, min_addr,
- MEMBLOCK_ALLOC_ACCESSIBLE,
- NUMA_NO_NODE);
-}
static inline void * __init memblock_alloc_node(phys_addr_t size,
phys_addr_t align, int nid)
@@ -400,14 +363,6 @@ static inline void * __init memblock_alloc_node(phys_addr_t size,
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
}
-static inline void * __init memblock_alloc_node_nopanic(phys_addr_t size,
- int nid)
-{
- return memblock_alloc_try_nid_nopanic(size, SMP_CACHE_BYTES,
- MEMBLOCK_LOW_LIMIT,
- MEMBLOCK_ALLOC_ACCESSIBLE, nid);
-}
-
static inline void __init memblock_free_early(phys_addr_t base,
phys_addr_t size)
{
@@ -443,16 +398,6 @@ static inline bool memblock_bottom_up(void)
return memblock.bottom_up;
}
-phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
- phys_addr_t start, phys_addr_t end,
- enum memblock_flags flags);
-phys_addr_t memblock_alloc_base_nid(phys_addr_t size,
- phys_addr_t align, phys_addr_t max_addr,
- int nid, enum memblock_flags flags);
-phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
- phys_addr_t max_addr);
-phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
- phys_addr_t max_addr);
phys_addr_t memblock_phys_mem_size(void);
phys_addr_t memblock_reserved_size(void);
phys_addr_t memblock_mem_size(unsigned long limit_pfn);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 1f3d880b7ca1..dbb6118370c1 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -566,7 +566,10 @@ struct mem_cgroup *lock_page_memcg(struct page *page);
void __unlock_page_memcg(struct mem_cgroup *memcg);
void unlock_page_memcg(struct page *page);
-/* idx can be of type enum memcg_stat_item or node_stat_item */
+/*
+ * idx can be of type enum memcg_stat_item or node_stat_item.
+ * Keep in sync with memcg_exact_page_state().
+ */
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
int idx)
{
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 52869d6d38b3..8ade08c50d26 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -100,6 +100,8 @@ extern void __online_page_free(struct page *page);
extern int try_online_node(int nid);
+extern u64 max_mem_size;
+
extern bool memhp_auto_online;
/* If movable_node boot option specified */
extern bool movable_node_enabled;
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
index 75e5c8ff85fc..c34d5f0d34d7 100644
--- a/include/linux/mfd/palmas.h
+++ b/include/linux/mfd/palmas.h
@@ -553,7 +553,6 @@ struct palmas_pmic {
struct palmas *palmas;
struct device *dev;
struct regulator_desc desc[PALMAS_NUM_REGS];
- struct regulator_dev *rdev[PALMAS_NUM_REGS];
struct mutex mutex;
int smps123;
diff --git a/include/linux/mfd/wm831x/regulator.h b/include/linux/mfd/wm831x/regulator.h
index 955d30fc6a27..30c587a0624c 100644
--- a/include/linux/mfd/wm831x/regulator.h
+++ b/include/linux/mfd/wm831x/regulator.h
@@ -1213,6 +1213,6 @@
#define WM831X_LDO1_OK_WIDTH 1 /* LDO1_OK */
#define WM831X_ISINK_MAX_ISEL 55
-extern int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1];
+extern const unsigned int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1];
#endif
diff --git a/include/linux/mfd/wm8400-private.h b/include/linux/mfd/wm8400-private.h
index 4ee908f5b834..43d0d307e2e3 100644
--- a/include/linux/mfd/wm8400-private.h
+++ b/include/linux/mfd/wm8400-private.h
@@ -923,12 +923,4 @@ struct wm8400 {
#define WM8400_LINE_CMP_VTHD_SHIFT 0 /* LINE_CMP_VTHD - [3:0] */
#define WM8400_LINE_CMP_VTHD_WIDTH 4 /* LINE_CMP_VTHD - [3:0] */
-int wm8400_block_read(struct wm8400 *wm8400, u8 reg, int count, u16 *data);
-
-static inline int wm8400_set_bits(struct wm8400 *wm8400, u8 reg,
- u16 mask, u16 val)
-{
- return regmap_update_bits(wm8400->regmap, reg, mask, val);
-}
-
#endif
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 6fee8b1a4400..5cd824c1c0ca 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -469,7 +469,7 @@ static inline u32 linkmode_adv_to_lcl_adv_t(unsigned long *advertising)
if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
advertising))
lcl_adv |= ADVERTISE_PAUSE_CAP;
- if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
advertising))
lcl_adv |= ADVERTISE_PAUSE_ASYM;
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 5ffb5df1a2c2..0d0729648844 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -365,6 +365,7 @@ struct mlx5_core_sig_ctx {
enum {
MLX5_MKEY_MR = 1,
MLX5_MKEY_MW,
+ MLX5_MKEY_INDIRECT_DEVX,
};
struct mlx5_core_mkey {
@@ -593,6 +594,8 @@ enum mlx5_pagefault_type_flags {
};
struct mlx5_td {
+ /* protects tirs list changes while tirs refresh */
+ struct mutex list_lock;
struct list_head tirs_list;
u32 tdn;
};
@@ -960,10 +963,6 @@ int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
struct mlx5_odp_caps *odp_caps);
int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
u8 port_num, void *out, size_t sz);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
- u32 wq_num, u8 type, int error);
-#endif
int mlx5_init_rl_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index b26ea9077384..0343c81d4c5f 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -557,7 +557,8 @@ static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev,
int mlx5_core_create_dct(struct mlx5_core_dev *dev,
struct mlx5_core_dct *qp,
- u32 *in, int inlen);
+ u32 *in, int inlen,
+ u32 *out, int outlen);
int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp,
u32 *in,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 5801ee849f36..083d7b4863ed 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -26,6 +26,7 @@
#include <linux/page_ref.h>
#include <linux/memremap.h>
#include <linux/overflow.h>
+#include <linux/sizes.h>
struct mempolicy;
struct anon_vma;
@@ -965,6 +966,10 @@ static inline bool is_pci_p2pdma_page(const struct page *page)
}
#endif /* CONFIG_DEV_PAGEMAP_OPS */
+/* 127: arbitrary random number, small enough to assemble well */
+#define page_ref_zero_or_close_to_overflow(page) \
+ ((unsigned int) page_ref_count(page) + 127u <= 127u)
+
static inline void get_page(struct page *page)
{
page = compound_head(page);
@@ -972,10 +977,19 @@ static inline void get_page(struct page *page)
* Getting a normal page or the head of a compound page
* requires to already have an elevated page->_refcount.
*/
- VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page);
+ VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page);
page_ref_inc(page);
}
+static inline __must_check bool try_get_page(struct page *page)
+{
+ page = compound_head(page);
+ if (WARN_ON_ONCE(page_ref_count(page) <= 0))
+ return false;
+ page_ref_inc(page);
+ return true;
+}
+
static inline void put_page(struct page *page)
{
page = compound_head(page);
@@ -2402,8 +2416,7 @@ int __must_check write_one_page(struct page *page);
void task_dirty_inc(struct task_struct *tsk);
/* readahead.c */
-#define VM_MAX_READAHEAD 128 /* kbytes */
-#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */
+#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
pgoff_t offset, unsigned long nr_to_read);
@@ -2597,37 +2610,31 @@ static inline void kernel_poison_pages(struct page *page, int numpages,
int enable) { }
#endif
-#ifdef CONFIG_DEBUG_PAGEALLOC
extern bool _debug_pagealloc_enabled;
-extern void __kernel_map_pages(struct page *page, int numpages, int enable);
static inline bool debug_pagealloc_enabled(void)
{
- return _debug_pagealloc_enabled;
+ return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) && _debug_pagealloc_enabled;
}
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP)
+extern void __kernel_map_pages(struct page *page, int numpages, int enable);
+
static inline void
kernel_map_pages(struct page *page, int numpages, int enable)
{
- if (!debug_pagealloc_enabled())
- return;
-
__kernel_map_pages(page, numpages, enable);
}
#ifdef CONFIG_HIBERNATION
extern bool kernel_page_present(struct page *page);
#endif /* CONFIG_HIBERNATION */
-#else /* CONFIG_DEBUG_PAGEALLOC */
+#else /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */
static inline void
kernel_map_pages(struct page *page, int numpages, int enable) {}
#ifdef CONFIG_HIBERNATION
static inline bool kernel_page_present(struct page *page) { return true; }
#endif /* CONFIG_HIBERNATION */
-static inline bool debug_pagealloc_enabled(void)
-{
- return false;
-}
-#endif /* CONFIG_DEBUG_PAGEALLOC */
+#endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */
#ifdef __HAVE_ARCH_GATE_AREA
extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 86e7a7a46353..4ef4bbe78a1d 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -411,7 +411,7 @@ struct mm_struct {
unsigned long total_vm; /* Total pages mapped */
unsigned long locked_vm; /* Pages that have PG_mlocked set */
- unsigned long pinned_vm; /* Refcount permanently increased */
+ atomic64_t pinned_vm; /* Refcount permanently increased */
unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
unsigned long stack_vm; /* VM_STACK */
@@ -671,7 +671,7 @@ enum vm_fault_reason {
/* Encode hstate index for a hwpoisoned large page */
#define VM_FAULT_SET_HINDEX(x) ((__force vm_fault_t)((x) << 16))
-#define VM_FAULT_GET_HINDEX(x) (((x) >> 16) & 0xf)
+#define VM_FAULT_GET_HINDEX(x) (((__force unsigned int)(x) >> 16) & 0xf)
#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | \
VM_FAULT_SIGSEGV | VM_FAULT_HWPOISON | \
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 14eaeeb46f41..448621c32e4d 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -788,4 +788,16 @@ struct tee_client_device_id {
uuid_t uuid;
};
+/* WMI */
+
+#define WMI_MODULE_PREFIX "wmi:"
+
+/**
+ * struct wmi_device_id - WMI device identifier
+ * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
+ */
+struct wmi_device_id {
+ const char guid_string[UUID_STRING_LEN+1];
+};
+
#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/module.h b/include/linux/module.h
index f5bc4c046461..5bf5dcd91009 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -172,7 +172,7 @@ extern void cleanup_module(void);
* The following license idents are currently accepted as indicating free
* software modules
*
- * "GPL" [GNU Public License v2 or later]
+ * "GPL" [GNU Public License v2]
* "GPL v2" [GNU Public License v2]
* "GPL and additional rights" [GNU Public License v2 rights and more]
* "Dual BSD/GPL" [GNU Public License v2
@@ -186,6 +186,22 @@ extern void cleanup_module(void);
*
* "Proprietary" [Non free products]
*
+ * Both "GPL v2" and "GPL" (the latter also in dual licensed strings) are
+ * merely stating that the module is licensed under the GPL v2, but are not
+ * telling whether "GPL v2 only" or "GPL v2 or later". The reason why there
+ * are two variants is a historic and failed attempt to convey more
+ * information in the MODULE_LICENSE string. For module loading the
+ * "only/or later" distinction is completely irrelevant and does neither
+ * replace the proper license identifiers in the corresponding source file
+ * nor amends them in any way. The sole purpose is to make the
+ * 'Proprietary' flagging work and to refuse to bind symbols which are
+ * exported with EXPORT_SYMBOL_GPL when a non free module is loaded.
+ *
+ * In the same way "BSD" is not a clear license information. It merely
+ * states, that the module is licensed under one of the compatible BSD
+ * license variants. The detailed and correct license information is again
+ * to be found in the corresponding source files.
+ *
* There are dual licensed components, but when running with Linux it is the
* GPL that is relevant so this is a non issue. Similarly LGPL linked with GPL
* is a GPL combined work.
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 037eed52164b..9197ddbf35fb 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -21,6 +21,7 @@ struct super_block;
struct vfsmount;
struct dentry;
struct mnt_namespace;
+struct fs_context;
#define MNT_NOSUID 0x01
#define MNT_NODEV 0x02
@@ -88,6 +89,8 @@ struct path;
extern struct vfsmount *clone_private_mount(const struct path *path);
struct file_system_type;
+extern struct vfsmount *fc_mount(struct fs_context *fc);
+extern struct vfsmount *vfs_create_mount(struct fs_context *fc);
extern struct vfsmount *vfs_kern_mount(struct file_system_type *type,
int flags, const char *name,
void *data);
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 784fb52b9900..7e9b81c3b50d 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -83,12 +83,12 @@ struct msi_desc {
struct {
u32 masked;
struct {
- __u8 is_msix : 1;
- __u8 multiple : 3;
- __u8 multi_cap : 3;
- __u8 maskbit : 1;
- __u8 is_64 : 1;
- __u16 entry_nr;
+ u8 is_msix : 1;
+ u8 multiple : 3;
+ u8 multi_cap : 3;
+ u8 maskbit : 1;
+ u8 is_64 : 1;
+ u16 entry_nr;
unsigned default_irq;
} msi_attrib;
union {
diff --git a/include/linux/net.h b/include/linux/net.h
index 651fca72286c..c606c72311d0 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -83,6 +83,12 @@ enum sock_type {
#endif /* ARCH_HAS_SOCKET_TYPES */
+/**
+ * enum sock_shutdown_cmd - Shutdown types
+ * @SHUT_RD: shutdown receptions
+ * @SHUT_WR: shutdown transmissions
+ * @SHUT_RDWR: shutdown receptions/transmissions
+ */
enum sock_shutdown_cmd {
SHUT_RD,
SHUT_WR,
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 26f69cf763f4..324e872c91d1 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1500,6 +1500,7 @@ struct net_device_ops {
* @IFF_FAILOVER: device is a failover master device
* @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
* @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
+ * @IFF_LIVE_RENAME_OK: rename is allowed while device is up and running
*/
enum netdev_priv_flags {
IFF_802_1Q_VLAN = 1<<0,
@@ -1532,6 +1533,7 @@ enum netdev_priv_flags {
IFF_FAILOVER = 1<<27,
IFF_FAILOVER_SLAVE = 1<<28,
IFF_L3MDEV_RX_HANDLER = 1<<29,
+ IFF_LIVE_RENAME_OK = 1<<30,
};
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
@@ -1563,6 +1565,7 @@ enum netdev_priv_flags {
#define IFF_FAILOVER IFF_FAILOVER
#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
+#define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK
/**
* struct net_device - The DEVICE structure.
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 1b06f0b28453..22494d170619 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -538,6 +538,7 @@ enum {
NFSPROC4_CLNT_OFFLOAD_CANCEL,
NFSPROC4_CLNT_LOOKUPP,
+ NFSPROC4_CLNT_LAYOUTERROR,
};
/* nfs41 types */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 6aa8cc83c3b6..c827d31298cc 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -261,5 +261,6 @@ struct nfs_server {
#define NFS_CAP_CLONE (1U << 23)
#define NFS_CAP_COPY (1U << 24)
#define NFS_CAP_OFFLOAD_CANCEL (1U << 25)
+#define NFS_CAP_LAYOUTERROR (1U << 26)
#endif
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index e27572d30d97..ad69430fd0eb 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -164,6 +164,16 @@ nfs_list_add_request(struct nfs_page *req, struct list_head *head)
list_add_tail(&req->wb_list, head);
}
+/**
+ * nfs_list_move_request - Move a request to a new list
+ * @req: request
+ * @head: head of list into which to insert the request.
+ */
+static inline void
+nfs_list_move_request(struct nfs_page *req, struct list_head *head)
+{
+ list_move_tail(&req->wb_list, head);
+}
/**
* nfs_list_remove_request - Remove a request from its wb_list
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 441a93ebcac0..9b8324ec08f3 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -383,6 +383,41 @@ struct nfs42_layoutstat_data {
struct nfs42_layoutstat_res res;
};
+struct nfs42_device_error {
+ struct nfs4_deviceid dev_id;
+ int status;
+ enum nfs_opnum4 opnum;
+};
+
+struct nfs42_layout_error {
+ __u64 offset;
+ __u64 length;
+ nfs4_stateid stateid;
+ struct nfs42_device_error errors[1];
+};
+
+#define NFS42_LAYOUTERROR_MAX 5
+
+struct nfs42_layouterror_args {
+ struct nfs4_sequence_args seq_args;
+ struct inode *inode;
+ unsigned int num_errors;
+ struct nfs42_layout_error errors[NFS42_LAYOUTERROR_MAX];
+};
+
+struct nfs42_layouterror_res {
+ struct nfs4_sequence_res seq_res;
+ unsigned int num_errors;
+ int rpc_status;
+};
+
+struct nfs42_layouterror_data {
+ struct nfs42_layouterror_args args;
+ struct nfs42_layouterror_res res;
+ struct inode *inode;
+ struct pnfs_layout_segment *lseg;
+};
+
struct nfs42_clone_args {
struct nfs4_sequence_args seq_args;
struct nfs_fh *src_fh;
@@ -1549,7 +1584,7 @@ struct nfs_commit_data {
};
struct nfs_pgio_completion_ops {
- void (*error_cleanup)(struct list_head *head);
+ void (*error_cleanup)(struct list_head *head, int);
void (*init_hdr)(struct nfs_pgio_header *hdr);
void (*completion)(struct nfs_pgio_header *hdr);
void (*reschedule_io)(struct nfs_pgio_header *hdr);
diff --git a/include/linux/ntb.h b/include/linux/ntb.h
index 181d16601dd9..56a92e3ae3ae 100644
--- a/include/linux/ntb.h
+++ b/include/linux/ntb.h
@@ -296,7 +296,8 @@ struct ntb_dev_ops {
int (*db_clear_mask)(struct ntb_dev *ntb, u64 db_bits);
int (*peer_db_addr)(struct ntb_dev *ntb,
- phys_addr_t *db_addr, resource_size_t *db_size);
+ phys_addr_t *db_addr, resource_size_t *db_size,
+ u64 *db_data, int db_bit);
u64 (*peer_db_read)(struct ntb_dev *ntb);
int (*peer_db_set)(struct ntb_dev *ntb, u64 db_bits);
int (*peer_db_clear)(struct ntb_dev *ntb, u64 db_bits);
@@ -1078,6 +1079,8 @@ static inline int ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
* @ntb: NTB device context.
* @db_addr: OUT - The address of the peer doorbell register.
* @db_size: OUT - The number of bytes to write the peer doorbell register.
+ * @db_data: OUT - The data of peer doorbell register
+ * @db_bit: door bell bit number
*
* Return the address of the peer doorbell register. This may be used, for
* example, by drivers that offload memory copy operations to a dma engine.
@@ -1091,12 +1094,13 @@ static inline int ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
*/
static inline int ntb_peer_db_addr(struct ntb_dev *ntb,
phys_addr_t *db_addr,
- resource_size_t *db_size)
+ resource_size_t *db_size,
+ u64 *db_data, int db_bit)
{
if (!ntb->ops->peer_db_addr)
return -EINVAL;
- return ntb->ops->peer_db_addr(ntb, db_addr, db_size);
+ return ntb->ops->peer_db_addr(ntb, db_addr, db_size, db_data, db_bit);
}
/**
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index baa49e6a23cc..c40720cb59ac 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -967,8 +967,13 @@ struct nvme_get_log_page_command {
__le16 numdl;
__le16 numdu;
__u16 rsvd11;
- __le32 lpol;
- __le32 lpou;
+ union {
+ struct {
+ __le32 lpol;
+ __le32 lpou;
+ };
+ __le64 lpo;
+ };
__u32 rsvd14[2];
};
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h
index 67ab8d271df3..60f541912ccf 100644
--- a/include/linux/of_reserved_mem.h
+++ b/include/linux/of_reserved_mem.h
@@ -35,13 +35,6 @@ int of_reserved_mem_device_init_by_idx(struct device *dev,
struct device_node *np, int idx);
void of_reserved_mem_device_release(struct device *dev);
-int early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
- phys_addr_t align,
- phys_addr_t start,
- phys_addr_t end,
- bool nomap,
- phys_addr_t *res_base);
-
void fdt_init_reserved_mem(void);
void fdt_reserved_mem_save_node(unsigned long node, const char *uname,
phys_addr_t base, phys_addr_t size);
diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h
index d2fa9ca42e9a..7f30446348c4 100644
--- a/include/linux/oid_registry.h
+++ b/include/linux/oid_registry.h
@@ -93,6 +93,24 @@ enum OID {
OID_authorityKeyIdentifier, /* 2.5.29.35 */
OID_extKeyUsage, /* 2.5.29.37 */
+ /* EC-RDSA */
+ OID_gostCPSignA, /* 1.2.643.2.2.35.1 */
+ OID_gostCPSignB, /* 1.2.643.2.2.35.2 */
+ OID_gostCPSignC, /* 1.2.643.2.2.35.3 */
+ OID_gost2012PKey256, /* 1.2.643.7.1.1.1.1 */
+ OID_gost2012PKey512, /* 1.2.643.7.1.1.1.2 */
+ OID_gost2012Digest256, /* 1.2.643.7.1.1.2.2 */
+ OID_gost2012Digest512, /* 1.2.643.7.1.1.2.3 */
+ OID_gost2012Signature256, /* 1.2.643.7.1.1.3.2 */
+ OID_gost2012Signature512, /* 1.2.643.7.1.1.3.3 */
+ OID_gostTC26Sign256A, /* 1.2.643.7.1.2.1.1.1 */
+ OID_gostTC26Sign256B, /* 1.2.643.7.1.2.1.1.2 */
+ OID_gostTC26Sign256C, /* 1.2.643.7.1.2.1.1.3 */
+ OID_gostTC26Sign256D, /* 1.2.643.7.1.2.1.1.4 */
+ OID_gostTC26Sign512A, /* 1.2.643.7.1.2.1.2.1 */
+ OID_gostTC26Sign512B, /* 1.2.643.7.1.2.1.2.2 */
+ OID_gostTC26Sign512C, /* 1.2.643.7.1.2.1.2.3 */
+
OID__NR
};
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 4eb26d278046..280ae96dc4c3 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -41,16 +41,6 @@ int move_freepages_block(struct zone *zone, struct page *page,
/*
* Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
- * If specified range includes migrate types other than MOVABLE or CMA,
- * this will fail with -EBUSY.
- *
- * For isolating all pages in the range finally, the caller have to
- * free all pages in the range. test_page_isolated() can be used for
- * test it.
- *
- * The following flags are allowed (they can be combined in a bit mask)
- * SKIP_HWPOISON - ignore hwpoison pages
- * REPORT_FAILURE - report details about the failure to isolate the range
*/
int
start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index b477a70cc2e4..bcf909d0de5f 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -239,6 +239,7 @@ pgoff_t page_cache_prev_miss(struct address_space *mapping,
#define FGP_WRITE 0x00000008
#define FGP_NOFS 0x00000010
#define FGP_NOWAIT 0x00000020
+#define FGP_FOR_MMAP 0x00000040
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
int fgp_flags, gfp_t cache_gfp_mask);
diff --git a/include/linux/parport.h b/include/linux/parport.h
index f41f1d041e2c..397607a0c0eb 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -460,7 +460,6 @@ extern size_t parport_ieee1284_epp_read_addr (struct parport *,
void *, size_t, int);
/* IEEE1284.3 functions */
-#define daisy_dev_name "Device ID probe"
extern int parport_daisy_init (struct parport *port);
extern void parport_daisy_fini (struct parport *port);
extern struct pardevice *parport_open (int devnum, const char *name);
@@ -469,18 +468,6 @@ extern ssize_t parport_device_id (int devnum, char *buffer, size_t len);
extern void parport_daisy_deselect_all (struct parport *port);
extern int parport_daisy_select (struct parport *port, int daisy, int mode);
-#ifdef CONFIG_PARPORT_1284
-extern int daisy_drv_init(void);
-extern void daisy_drv_exit(void);
-#else
-static inline int daisy_drv_init(void)
-{
- return 0;
-}
-
-static inline void daisy_drv_exit(void) {}
-#endif
-
/* Lowlevel drivers _can_ call this support function to handle irqs. */
static inline void parport_generic_irq(struct parport *port)
{
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h
index 7c4b8e27268c..1ebb88e7c184 100644
--- a/include/linux/pci-ats.h
+++ b/include/linux/pci-ats.h
@@ -40,6 +40,7 @@ void pci_disable_pasid(struct pci_dev *pdev);
void pci_restore_pasid_state(struct pci_dev *pdev);
int pci_pasid_features(struct pci_dev *pdev);
int pci_max_pasids(struct pci_dev *pdev);
+int pci_prg_resp_pasid_required(struct pci_dev *pdev);
#else /* CONFIG_PCI_PASID */
@@ -66,6 +67,10 @@ static inline int pci_max_pasids(struct pci_dev *pdev)
return -EINVAL;
}
+static inline int pci_prg_resp_pasid_required(struct pci_dev *pdev)
+{
+ return 0;
+}
#endif /* CONFIG_PCI_PASID */
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index 37dab8116901..c3ffa3917f88 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -59,6 +59,8 @@ struct pci_epc_ops {
enum pci_epc_irq_type type, u16 interrupt_num);
int (*start)(struct pci_epc *epc);
void (*stop)(struct pci_epc *epc);
+ const struct pci_epc_features* (*get_features)(struct pci_epc *epc,
+ u8 func_no);
struct module *owner;
};
@@ -97,16 +99,25 @@ struct pci_epc {
struct config_group *group;
/* spinlock to protect against concurrent access of EP controller */
spinlock_t lock;
- unsigned int features;
};
-#define EPC_FEATURE_NO_LINKUP_NOTIFIER BIT(0)
-#define EPC_FEATURE_BAR_MASK (BIT(1) | BIT(2) | BIT(3))
-#define EPC_FEATURE_MSIX_AVAILABLE BIT(4)
-#define EPC_FEATURE_SET_BAR(features, bar) \
- (features |= (EPC_FEATURE_BAR_MASK & (bar << 1)))
-#define EPC_FEATURE_GET_BAR(features) \
- ((features & EPC_FEATURE_BAR_MASK) >> 1)
+/**
+ * struct pci_epc_features - features supported by a EPC device per function
+ * @linkup_notifier: indicate if the EPC device can notify EPF driver on link up
+ * @msi_capable: indicate if the endpoint function has MSI capability
+ * @msix_capable: indicate if the endpoint function has MSI-X capability
+ * @reserved_bar: bitmap to indicate reserved BAR unavailable to function driver
+ * @bar_fixed_64bit: bitmap to indicate fixed 64bit BARs
+ * @bar_fixed_size: Array specifying the size supported by each BAR
+ */
+struct pci_epc_features {
+ unsigned int linkup_notifier : 1;
+ unsigned int msi_capable : 1;
+ unsigned int msix_capable : 1;
+ u8 reserved_bar;
+ u8 bar_fixed_64bit;
+ u64 bar_fixed_size[BAR_5 + 1];
+};
#define to_pci_epc(device) container_of((device), struct pci_epc, dev)
@@ -158,6 +169,10 @@ int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
enum pci_epc_irq_type type, u16 interrupt_num);
int pci_epc_start(struct pci_epc *epc);
void pci_epc_stop(struct pci_epc *epc);
+const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
+ u8 func_no);
+unsigned int pci_epc_get_first_free_bar(const struct pci_epc_features
+ *epc_features);
struct pci_epc *pci_epc_get(const char *epc_name);
void pci_epc_put(struct pci_epc *epc);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index e7c51b00cdfe..77448215ef5b 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -373,6 +373,9 @@ struct pci_dev {
bool match_driver; /* Skip attaching driver */
unsigned int transparent:1; /* Subtractive decode bridge */
+ unsigned int io_window:1; /* Bridge has I/O window */
+ unsigned int pref_window:1; /* Bridge has pref mem window */
+ unsigned int pref_64_window:1; /* Pref mem window is 64-bit */
unsigned int multifunction:1; /* Multi-function device */
unsigned int is_busmaster:1; /* Is busmaster */
@@ -1524,11 +1527,13 @@ void pci_ats_init(struct pci_dev *dev);
int pci_enable_ats(struct pci_dev *dev, int ps);
void pci_disable_ats(struct pci_dev *dev);
int pci_ats_queue_depth(struct pci_dev *dev);
+int pci_ats_page_aligned(struct pci_dev *dev);
#else
static inline void pci_ats_init(struct pci_dev *d) { }
static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; }
static inline void pci_disable_ats(struct pci_dev *d) { }
static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; }
+static inline int pci_ats_page_aligned(struct pci_dev *dev) { return 0; }
#endif
#ifdef CONFIG_PCIE_PTM
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 5eaf39dbc388..70e86148cb1e 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1140,6 +1140,8 @@
#define PCI_VENDOR_ID_TCONRAD 0x10da
#define PCI_DEVICE_ID_TCONRAD_TOKENRING 0x0508
+#define PCI_VENDOR_ID_ROHM 0x10db
+
#define PCI_VENDOR_ID_NVIDIA 0x10de
#define PCI_DEVICE_ID_NVIDIA_TNT 0x0020
#define PCI_DEVICE_ID_NVIDIA_TNT2 0x0028
@@ -2573,6 +2575,8 @@
#define PCI_VENDOR_ID_HYGON 0x1d94
+#define PCI_VENDOR_ID_HXT 0x1dbf
+
#define PCI_VENDOR_ID_TEKRAM 0x1de1
#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index e47ef764f613..15a82ff0aefe 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -240,7 +240,6 @@ struct perf_event;
#define PERF_PMU_CAP_NO_INTERRUPT 0x01
#define PERF_PMU_CAP_NO_NMI 0x02
#define PERF_PMU_CAP_AUX_NO_SG 0x04
-#define PERF_PMU_CAP_AUX_SW_DOUBLEBUF 0x08
#define PERF_PMU_CAP_EXCLUSIVE 0x10
#define PERF_PMU_CAP_ITRACE 0x20
#define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40
@@ -464,7 +463,7 @@ enum perf_addr_filter_action_t {
/**
* struct perf_addr_filter - address range filter definition
* @entry: event's filter list linkage
- * @inode: object file's inode for file-based filters
+ * @path: object file's path for file-based filters
* @offset: filter range offset
* @size: filter range size (size==0 means single address trigger)
* @action: filter/start/stop
@@ -888,6 +887,9 @@ extern void perf_sched_cb_dec(struct pmu *pmu);
extern void perf_sched_cb_inc(struct pmu *pmu);
extern int perf_event_task_disable(void);
extern int perf_event_task_enable(void);
+
+extern void perf_pmu_resched(struct pmu *pmu);
+
extern int perf_event_refresh(struct perf_event *event, int refresh);
extern void perf_event_update_userpage(struct perf_event *event);
extern int perf_event_release_kernel(struct perf_event *event);
@@ -1055,12 +1057,18 @@ static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned lo
#endif
/*
- * Take a snapshot of the regs. Skip ip and frame pointer to
- * the nth caller. We only need a few of the regs:
+ * When generating a perf sample in-line, instead of from an interrupt /
+ * exception, we lack a pt_regs. This is typically used from software events
+ * like: SW_CONTEXT_SWITCHES, SW_MIGRATIONS and the tie-in with tracepoints.
+ *
+ * We typically don't need a full set, but (for x86) do require:
* - ip for PERF_SAMPLE_IP
* - cs for user_mode() tests
- * - bp for callchains
- * - eflags, for future purposes, just in case
+ * - sp for PERF_SAMPLE_CALLCHAIN
+ * - eflags for MISC bits and CALLCHAIN (see: perf_hw_regs())
+ *
+ * NOTE: assumes @regs is otherwise already 0 filled; this is important for
+ * things like PERF_SAMPLE_REGS_INTR.
*/
static inline void perf_fetch_caller_regs(struct pt_regs *regs)
{
diff --git a/include/linux/pinctrl/pinconf.h b/include/linux/pinctrl/pinconf.h
index 8dd85d302b90..93c9dd133e9d 100644
--- a/include/linux/pinctrl/pinconf.h
+++ b/include/linux/pinctrl/pinconf.h
@@ -14,8 +14,6 @@
#ifdef CONFIG_PINCONF
-#include <linux/pinctrl/machine.h>
-
struct pinctrl_dev;
struct seq_file;
@@ -31,7 +29,6 @@ struct seq_file;
* @pin_config_group_get: get configurations for an entire pin group; should
* return -ENOTSUPP and -EINVAL using the same rules as pin_config_get.
* @pin_config_group_set: configure all pins in a group
- * @pin_config_dbg_parse_modify: optional debugfs to modify a pin configuration
* @pin_config_dbg_show: optional debugfs display hook that will provide
* per-device info for a certain pin in debugfs
* @pin_config_group_dbg_show: optional debugfs display hook that will provide
@@ -57,9 +54,6 @@ struct pinconf_ops {
unsigned selector,
unsigned long *configs,
unsigned num_configs);
- int (*pin_config_dbg_parse_modify) (struct pinctrl_dev *pctldev,
- const char *arg,
- unsigned long *config);
void (*pin_config_dbg_show) (struct pinctrl_dev *pctldev,
struct seq_file *s,
unsigned offset);
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 5a3bb3b7c9ad..5c626fdc10db 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -74,13 +74,6 @@ struct pipe_inode_info {
*/
struct pipe_buf_operations {
/*
- * This is set to 1, if the generic pipe read/write may coalesce
- * data into an existing buffer. If this is set to 0, a new pipe
- * page segment is always used for new data.
- */
- int can_merge;
-
- /*
* ->confirm() verifies that the data in the pipe buffer is there
* and that the contents are good. If the pages in the pipe belong
* to a file system, we may need to wait for IO completion in this
@@ -108,18 +101,20 @@ struct pipe_buf_operations {
/*
* Get a reference to the pipe buffer.
*/
- void (*get)(struct pipe_inode_info *, struct pipe_buffer *);
+ bool (*get)(struct pipe_inode_info *, struct pipe_buffer *);
};
/**
* pipe_buf_get - get a reference to a pipe_buffer
* @pipe: the pipe that the buffer belongs to
* @buf: the buffer to get a reference to
+ *
+ * Return: %true if the reference was successfully obtained.
*/
-static inline void pipe_buf_get(struct pipe_inode_info *pipe,
+static inline __must_check bool pipe_buf_get(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
- buf->ops->get(pipe, buf);
+ return buf->ops->get(pipe, buf);
}
/**
@@ -178,10 +173,12 @@ struct pipe_inode_info *alloc_pipe_info(void);
void free_pipe_info(struct pipe_inode_info *);
/* Generic pipe buffer ops functions */
-void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
+bool generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
+int generic_pipe_buf_nosteal(struct pipe_inode_info *, struct pipe_buffer *);
void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
+void pipe_buf_mark_unmergeable(struct pipe_buffer *buf);
extern const struct pipe_buf_operations nosteal_pipe_buf_ops;
diff --git a/include/linux/platform_data/ads7828.h b/include/linux/platform_data/ads7828.h
index 3245f45f9d77..a3370a007702 100644
--- a/include/linux/platform_data/ads7828.h
+++ b/include/linux/platform_data/ads7828.h
@@ -4,7 +4,7 @@
* Copyright (c) 2012 Savoir-faire Linux Inc.
* Vivien Didelot <vivien.didelot@savoirfairelinux.com>
*
- * For further information, see the Documentation/hwmon/ads7828 file.
+ * For further information, see the Documentation/hwmon/ads7828.rst file.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h
index 1a1d58ebffbf..f3eaf9ec00a1 100644
--- a/include/linux/platform_data/dma-dw.h
+++ b/include/linux/platform_data/dma-dw.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Driver for the Synopsys DesignWare DMA Controller
*
* Copyright (C) 2007 Atmel Corporation
* Copyright (C) 2010-2011 ST Microelectronics
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _PLATFORM_DATA_DMA_DW_H
#define _PLATFORM_DATA_DMA_DW_H
@@ -38,10 +35,6 @@ struct dw_dma_slave {
/**
* struct dw_dma_platform_data - Controller configuration parameters
* @nr_channels: Number of channels supported by hardware (max 8)
- * @is_private: The device channels should be marked as private and not for
- * by the general purpose DMA channel allocator.
- * @is_memcpy: The device channels do support memory-to-memory transfers.
- * @is_idma32: The type of the DMA controller is iDMA32
* @chan_allocation_order: Allocate channels starting from 0 or 7
* @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0.
* @block_size: Maximum block size supported by the controller
@@ -53,9 +46,6 @@ struct dw_dma_slave {
*/
struct dw_dma_platform_data {
unsigned int nr_channels;
- bool is_private;
- bool is_memcpy;
- bool is_idma32;
#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */
#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */
unsigned char chan_allocation_order;
diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h
index 7d964e787299..9daea8d42a10 100644
--- a/include/linux/platform_data/dma-imx.h
+++ b/include/linux/platform_data/dma-imx.h
@@ -55,6 +55,7 @@ struct imx_dma_data {
int dma_request2; /* secondary DMA request line */
enum sdma_peripheral_type peripheral_type;
int priority;
+ struct device_node *of_node;
};
static inline int imx_dma_is_ipu(struct dma_chan *chan)
diff --git a/include/linux/platform_data/ds620.h b/include/linux/platform_data/ds620.h
index 6ef58bb77e46..f0ce22a78bb8 100644
--- a/include/linux/platform_data/ds620.h
+++ b/include/linux/platform_data/ds620.h
@@ -14,7 +14,7 @@ struct ds620_platform_data {
* 1 = PO_LOW
* 2 = PO_HIGH
*
- * (see Documentation/hwmon/ds620)
+ * (see Documentation/hwmon/ds620.rst)
*/
int pomode;
};
diff --git a/include/linux/platform_data/gpio/gpio-amd-fch.h b/include/linux/platform_data/gpio/gpio-amd-fch.h
index a867637e172d..9e46678edb2a 100644
--- a/include/linux/platform_data/gpio/gpio-amd-fch.h
+++ b/include/linux/platform_data/gpio/gpio-amd-fch.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL+ */
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* AMD FCH gpio driver platform-data
diff --git a/include/linux/platform_data/ina2xx.h b/include/linux/platform_data/ina2xx.h
index 9f0aa1b48c78..dde59fd3590f 100644
--- a/include/linux/platform_data/ina2xx.h
+++ b/include/linux/platform_data/ina2xx.h
@@ -7,7 +7,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
- * For further information, see the Documentation/hwmon/ina2xx file.
+ * For further information, see the Documentation/hwmon/ina2xx.rst file.
*/
/**
diff --git a/include/linux/platform_data/max197.h b/include/linux/platform_data/max197.h
index 8da8f94ee15c..2bbd0919bc89 100644
--- a/include/linux/platform_data/max197.h
+++ b/include/linux/platform_data/max197.h
@@ -8,7 +8,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
- * For further information, see the Documentation/hwmon/max197 file.
+ * For further information, see the Documentation/hwmon/max197.rst file.
*/
#ifndef _PDATA_MAX197_H
diff --git a/include/linux/platform_data/media/si4713.h b/include/linux/platform_data/media/si4713.h
index 932668ad54f7..13b3eb7a9059 100644
--- a/include/linux/platform_data/media/si4713.h
+++ b/include/linux/platform_data/media/si4713.h
@@ -31,7 +31,7 @@ struct si4713_platform_data {
*/
struct si4713_rnl {
__u32 index; /* modulator index */
- __u32 frequency; /* frequency to peform rnl measurement */
+ __u32 frequency; /* frequency to perform rnl measurement */
__s32 rnl; /* result of measurement in dBuV */
__u32 reserved[4]; /* drivers and apps must init this to 0 */
};
@@ -40,7 +40,7 @@ struct si4713_rnl {
* This is the ioctl number to query for rnl. Users must pass a
* struct si4713_rnl pointer specifying desired frequency in 'frequency' field
* following driver capabilities (i.e V4L2_TUNER_CAP_LOW).
- * Driver must return measured value in the same struture, filling 'rnl' field.
+ * Driver must return measured value in the same structure, filling 'rnl' field.
*/
#define SI4713_IOC_MEASURE_RNL _IOWR('V', BASE_VIDIOC_PRIVATE + 0, \
struct si4713_rnl)
diff --git a/include/linux/platform_data/media/soc_camera_platform.h b/include/linux/platform_data/media/soc_camera_platform.h
deleted file mode 100644
index 1e5065dab430..000000000000
--- a/include/linux/platform_data/media/soc_camera_platform.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Generic Platform Camera Driver Header
- *
- * Copyright (C) 2008 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __SOC_CAMERA_H__
-#define __SOC_CAMERA_H__
-
-#include <linux/videodev2.h>
-#include <media/soc_camera.h>
-#include <media/v4l2-mediabus.h>
-
-struct device;
-
-struct soc_camera_platform_info {
- const char *format_name;
- unsigned long format_depth;
- struct v4l2_mbus_framefmt format;
- unsigned long mbus_param;
- enum v4l2_mbus_type mbus_type;
- struct soc_camera_device *icd;
- int (*set_capture)(struct soc_camera_platform_info *info, int enable);
-};
-
-static inline void soc_camera_platform_release(struct platform_device **pdev)
-{
- *pdev = NULL;
-}
-
-static inline int soc_camera_platform_add(struct soc_camera_device *icd,
- struct platform_device **pdev,
- struct soc_camera_link *plink,
- void (*release)(struct device *dev),
- int id)
-{
- struct soc_camera_subdev_desc *ssdd =
- (struct soc_camera_subdev_desc *)plink;
- struct soc_camera_platform_info *info = ssdd->drv_priv;
- int ret;
-
- if (&icd->sdesc->subdev_desc != ssdd)
- return -ENODEV;
-
- if (*pdev)
- return -EBUSY;
-
- *pdev = platform_device_alloc("soc_camera_platform", id);
- if (!*pdev)
- return -ENOMEM;
-
- info->icd = icd;
-
- (*pdev)->dev.platform_data = info;
- (*pdev)->dev.release = release;
-
- ret = platform_device_add(*pdev);
- if (ret < 0) {
- platform_device_put(*pdev);
- *pdev = NULL;
- info->icd = NULL;
- }
-
- return ret;
-}
-
-static inline void soc_camera_platform_del(const struct soc_camera_device *icd,
- struct platform_device *pdev,
- const struct soc_camera_link *plink)
-{
- const struct soc_camera_subdev_desc *ssdd =
- (const struct soc_camera_subdev_desc *)plink;
- if (&icd->sdesc->subdev_desc != ssdd || !pdev)
- return;
-
- platform_device_unregister(pdev);
-}
-
-#endif /* __SOC_CAMERA_H__ */
diff --git a/include/linux/platform_data/mlxreg.h b/include/linux/platform_data/mlxreg.h
index 19f5cb618c55..6d54fe3bcac9 100644
--- a/include/linux/platform_data/mlxreg.h
+++ b/include/linux/platform_data/mlxreg.h
@@ -35,6 +35,19 @@
#define __LINUX_PLATFORM_DATA_MLXREG_H
#define MLXREG_CORE_LABEL_MAX_SIZE 32
+#define MLXREG_CORE_WD_FEATURE_NOWAYOUT BIT(0)
+#define MLXREG_CORE_WD_FEATURE_START_AT_BOOT BIT(1)
+
+/**
+ * enum mlxreg_wdt_type - type of HW watchdog
+ *
+ * TYPE1 HW watchdog implementation exist in old systems.
+ * All new systems have TYPE2 HW watchdog.
+ */
+enum mlxreg_wdt_type {
+ MLX_WDT_TYPE1,
+ MLX_WDT_TYPE2,
+};
/**
* struct mlxreg_hotplug_device - I2C device data:
@@ -61,6 +74,7 @@ struct mlxreg_hotplug_device {
* @reg: attribute register;
* @mask: attribute access mask;
* @bit: attribute effective bit;
+ * @capability: attribute capability register;
* @mode: access mode;
* @np - pointer to node platform associated with attribute;
* @hpdev - hotplug device data;
@@ -72,6 +86,7 @@ struct mlxreg_core_data {
u32 reg;
u32 mask;
u32 bit;
+ u32 capability;
umode_t mode;
struct device_node *np;
struct mlxreg_hotplug_device hpdev;
@@ -107,14 +122,20 @@ struct mlxreg_core_item {
/**
* struct mlxreg_core_platform_data - platform data:
*
- * @led_data: led private data;
+ * @data: instance private data;
* @regmap: register map of parent device;
- * @counter: number of led instances;
+ * @counter: number of instances;
+ * @features: supported features of device;
+ * @version: implementation version;
+ * @identity: device identity name;
*/
struct mlxreg_core_platform_data {
struct mlxreg_core_data *data;
void *regmap;
int counter;
+ u32 features;
+ u32 version;
+ char identity[MLXREG_CORE_LABEL_MAX_SIZE];
};
/**
diff --git a/include/linux/platform_data/ntc_thermistor.h b/include/linux/platform_data/ntc_thermistor.h
index ee03d429742b..5fa115d3ea4b 100644
--- a/include/linux/platform_data/ntc_thermistor.h
+++ b/include/linux/platform_data/ntc_thermistor.h
@@ -42,7 +42,7 @@ struct ntc_thermistor_platform_data {
* read_uV()
*
* How to setup pullup_ohm, pulldown_ohm, and connect is
- * described at Documentation/hwmon/ntc_thermistor
+ * described at Documentation/hwmon/ntc_thermistor.rst
*
* pullup/down_ohm: 0 for infinite / not-connected
*
diff --git a/include/linux/platform_data/spi-ep93xx.h b/include/linux/platform_data/spi-ep93xx.h
index eb16c6739ac2..b439f2a896e0 100644
--- a/include/linux/platform_data/spi-ep93xx.h
+++ b/include/linux/platform_data/spi-ep93xx.h
@@ -6,13 +6,9 @@ struct spi_device;
/**
* struct ep93xx_spi_info - EP93xx specific SPI descriptor
- * @chipselect: array of gpio numbers to use as chip selects
- * @num_chipselect: ARRAY_SIZE(chipselect)
* @use_dma: use DMA for the transfers
*/
struct ep93xx_spi_info {
- int *chipselect;
- int num_chipselect;
bool use_dma;
};
diff --git a/include/linux/platform_data/wilco-ec.h b/include/linux/platform_data/wilco-ec.h
new file mode 100644
index 000000000000..446473a46b88
--- /dev/null
+++ b/include/linux/platform_data/wilco-ec.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ChromeOS Wilco Embedded Controller
+ *
+ * Copyright 2018 Google LLC
+ */
+
+#ifndef WILCO_EC_H
+#define WILCO_EC_H
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+
+/* Message flags for using the mailbox() interface */
+#define WILCO_EC_FLAG_NO_RESPONSE BIT(0) /* EC does not respond */
+#define WILCO_EC_FLAG_EXTENDED_DATA BIT(1) /* EC returns 256 data bytes */
+#define WILCO_EC_FLAG_RAW_REQUEST BIT(2) /* Do not trim request data */
+#define WILCO_EC_FLAG_RAW_RESPONSE BIT(3) /* Do not trim response data */
+#define WILCO_EC_FLAG_RAW (WILCO_EC_FLAG_RAW_REQUEST | \
+ WILCO_EC_FLAG_RAW_RESPONSE)
+
+/* Normal commands have a maximum 32 bytes of data */
+#define EC_MAILBOX_DATA_SIZE 32
+/* Extended commands have 256 bytes of response data */
+#define EC_MAILBOX_DATA_SIZE_EXTENDED 256
+
+/**
+ * struct wilco_ec_device - Wilco Embedded Controller handle.
+ * @dev: Device handle.
+ * @mailbox_lock: Mutex to ensure one mailbox command at a time.
+ * @io_command: I/O port for mailbox command. Provided by ACPI.
+ * @io_data: I/O port for mailbox data. Provided by ACPI.
+ * @io_packet: I/O port for mailbox packet data. Provided by ACPI.
+ * @data_buffer: Buffer used for EC communication. The same buffer
+ * is used to hold the request and the response.
+ * @data_size: Size of the data buffer used for EC communication.
+ * @debugfs_pdev: The child platform_device used by the debugfs sub-driver.
+ * @rtc_pdev: The child platform_device used by the RTC sub-driver.
+ */
+struct wilco_ec_device {
+ struct device *dev;
+ struct mutex mailbox_lock;
+ struct resource *io_command;
+ struct resource *io_data;
+ struct resource *io_packet;
+ void *data_buffer;
+ size_t data_size;
+ struct platform_device *debugfs_pdev;
+ struct platform_device *rtc_pdev;
+};
+
+/**
+ * struct wilco_ec_request - Mailbox request message format.
+ * @struct_version: Should be %EC_MAILBOX_PROTO_VERSION
+ * @checksum: Sum of all bytes must be 0.
+ * @mailbox_id: Mailbox identifier, specifies the command set.
+ * @mailbox_version: Mailbox interface version %EC_MAILBOX_VERSION
+ * @reserved: Set to zero.
+ * @data_size: Length of request, data + last 2 bytes of the header.
+ * @command: Mailbox command code, unique for each mailbox_id set.
+ * @reserved_raw: Set to zero for most commands, but is used by
+ * some command types and for raw commands.
+ */
+struct wilco_ec_request {
+ u8 struct_version;
+ u8 checksum;
+ u16 mailbox_id;
+ u8 mailbox_version;
+ u8 reserved;
+ u16 data_size;
+ u8 command;
+ u8 reserved_raw;
+} __packed;
+
+/**
+ * struct wilco_ec_response - Mailbox response message format.
+ * @struct_version: Should be %EC_MAILBOX_PROTO_VERSION
+ * @checksum: Sum of all bytes must be 0.
+ * @result: Result code from the EC. Non-zero indicates an error.
+ * @data_size: Length of the response data buffer.
+ * @reserved: Set to zero.
+ * @mbox0: EC returned data at offset 0 is unused (always 0) so this byte
+ * is treated as part of the header instead of the data.
+ * @data: Response data buffer. Max size is %EC_MAILBOX_DATA_SIZE_EXTENDED.
+ */
+struct wilco_ec_response {
+ u8 struct_version;
+ u8 checksum;
+ u16 result;
+ u16 data_size;
+ u8 reserved[2];
+ u8 mbox0;
+ u8 data[0];
+} __packed;
+
+/**
+ * enum wilco_ec_msg_type - Message type to select a set of command codes.
+ * @WILCO_EC_MSG_LEGACY: Legacy EC messages for standard EC behavior.
+ * @WILCO_EC_MSG_PROPERTY: Get/Set/Sync EC controlled NVRAM property.
+ * @WILCO_EC_MSG_TELEMETRY_SHORT: 32 bytes of telemetry data provided by the EC.
+ * @WILCO_EC_MSG_TELEMETRY_LONG: 256 bytes of telemetry data provided by the EC.
+ */
+enum wilco_ec_msg_type {
+ WILCO_EC_MSG_LEGACY = 0x00f0,
+ WILCO_EC_MSG_PROPERTY = 0x00f2,
+ WILCO_EC_MSG_TELEMETRY_SHORT = 0x00f5,
+ WILCO_EC_MSG_TELEMETRY_LONG = 0x00f6,
+};
+
+/**
+ * struct wilco_ec_message - Request and response message.
+ * @type: Mailbox message type.
+ * @flags: Message flags, e.g. %WILCO_EC_FLAG_NO_RESPONSE.
+ * @command: Mailbox command code.
+ * @result: Result code from the EC. Non-zero indicates an error.
+ * @request_size: Number of bytes to send to the EC.
+ * @request_data: Buffer containing the request data.
+ * @response_size: Number of bytes expected from the EC.
+ * This is 32 by default and 256 if the flag
+ * is set for %WILCO_EC_FLAG_EXTENDED_DATA
+ * @response_data: Buffer containing the response data, should be
+ * response_size bytes and allocated by caller.
+ */
+struct wilco_ec_message {
+ enum wilco_ec_msg_type type;
+ u8 flags;
+ u8 command;
+ u8 result;
+ size_t request_size;
+ void *request_data;
+ size_t response_size;
+ void *response_data;
+};
+
+/**
+ * wilco_ec_mailbox() - Send request to the EC and receive the response.
+ * @ec: Wilco EC device.
+ * @msg: Wilco EC message.
+ *
+ * Return: Number of bytes received or negative error code on failure.
+ */
+int wilco_ec_mailbox(struct wilco_ec_device *ec, struct wilco_ec_message *msg);
+
+#endif /* WILCO_EC_H */
diff --git a/include/linux/platform_data/clk-lpss.h b/include/linux/platform_data/x86/clk-lpss.h
index 23901992b9dd..23901992b9dd 100644
--- a/include/linux/platform_data/clk-lpss.h
+++ b/include/linux/platform_data/x86/clk-lpss.h
diff --git a/include/linux/platform_data/x86/clk-pmc-atom.h b/include/linux/platform_data/x86/clk-pmc-atom.h
index 3ab892208343..7a37ac27d0fb 100644
--- a/include/linux/platform_data/x86/clk-pmc-atom.h
+++ b/include/linux/platform_data/x86/clk-pmc-atom.h
@@ -35,10 +35,13 @@ struct pmc_clk {
*
* @base: PMC clock register base offset
* @clks: pointer to set of registered clocks, typically 0..5
+ * @critical: flag to indicate if firmware enabled pmc_plt_clks
+ * should be marked as critial or not
*/
struct pmc_clk_data {
void __iomem *base;
const struct pmc_clk *clks;
+ bool critical;
};
#endif /* __PLATFORM_DATA_X86_CLK_PMC_ATOM_H */
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 06f7ed893928..66c19a65a514 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -643,7 +643,6 @@ struct dev_pm_info {
struct dev_pm_qos *qos;
};
-extern void update_pm_runtime_accounting(struct device *dev);
extern int dev_pm_get_subsys_data(struct device *dev);
extern void dev_pm_put_subsys_data(struct device *dev);
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 1ed5874bcee0..0e8e356bed6a 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -16,6 +16,7 @@
#include <linux/of.h>
#include <linux/notifier.h>
#include <linux/spinlock.h>
+#include <linux/cpumask.h>
/*
* Flags to control the behaviour of a genpd.
@@ -42,11 +43,22 @@
* GENPD_FLAG_ACTIVE_WAKEUP: Instructs genpd to keep the PM domain powered
* on, in case any of its attached devices is used
* in the wakeup path to serve system wakeups.
+ *
+ * GENPD_FLAG_CPU_DOMAIN: Instructs genpd that it should expect to get
+ * devices attached, which may belong to CPUs or
+ * possibly have subdomains with CPUs attached.
+ * This flag enables the genpd backend driver to
+ * deploy idle power management support for CPUs
+ * and groups of CPUs. Note that, the backend
+ * driver must then comply with the so called,
+ * last-man-standing algorithm, for the CPUs in the
+ * PM domain.
*/
#define GENPD_FLAG_PM_CLK (1U << 0)
#define GENPD_FLAG_IRQ_SAFE (1U << 1)
#define GENPD_FLAG_ALWAYS_ON (1U << 2)
#define GENPD_FLAG_ACTIVE_WAKEUP (1U << 3)
+#define GENPD_FLAG_CPU_DOMAIN (1U << 4)
enum gpd_status {
GPD_STATE_ACTIVE = 0, /* PM domain is active */
@@ -69,6 +81,7 @@ struct genpd_power_state {
s64 residency_ns;
struct fwnode_handle *fwnode;
ktime_t idle_time;
+ void *data;
};
struct genpd_lock_ops;
@@ -93,6 +106,7 @@ struct generic_pm_domain {
unsigned int suspended_count; /* System suspend device counter */
unsigned int prepared_count; /* Suspend counter of prepared devices */
unsigned int performance_state; /* Aggregated max performance state */
+ cpumask_var_t cpus; /* A cpumask of the attached CPUs */
int (*power_off)(struct generic_pm_domain *domain);
int (*power_on)(struct generic_pm_domain *domain);
struct opp_table *opp_table; /* OPP table of the genpd */
@@ -104,15 +118,17 @@ struct generic_pm_domain {
s64 max_off_time_ns; /* Maximum allowed "suspended" time. */
bool max_off_time_changed;
bool cached_power_down_ok;
+ bool cached_power_down_state_idx;
int (*attach_dev)(struct generic_pm_domain *domain,
struct device *dev);
void (*detach_dev)(struct generic_pm_domain *domain,
struct device *dev);
unsigned int flags; /* Bit field of configs for genpd */
struct genpd_power_state *states;
+ void (*free_states)(struct genpd_power_state *states,
+ unsigned int state_count);
unsigned int state_count; /* number of states */
unsigned int state_idx; /* state that genpd will go to when off */
- void *free; /* Free the state that was allocated for default */
ktime_t on_time;
ktime_t accounting_time;
const struct genpd_lock_ops *lock_ops;
@@ -159,6 +175,7 @@ struct generic_pm_domain_data {
struct pm_domain_data base;
struct gpd_timing_data td;
struct notifier_block nb;
+ int cpu;
unsigned int performance_state;
void *data;
};
@@ -187,6 +204,9 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state);
extern struct dev_power_governor simple_qos_governor;
extern struct dev_power_governor pm_domain_always_on_gov;
+#ifdef CONFIG_CPU_IDLE
+extern struct dev_power_governor pm_domain_cpu_gov;
+#endif
#else
static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 24c757a32a7b..b150fe97ce5a 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -102,6 +102,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
unsigned long *freq);
+struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
+ unsigned long u_volt);
struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
unsigned long *freq);
@@ -207,6 +209,12 @@ static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
return ERR_PTR(-ENOTSUPP);
}
+static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
+ unsigned long u_volt)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
+
static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
unsigned long *freq)
{
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index 4238dde0aaf0..0ff134d6575a 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -96,7 +96,6 @@ static inline void device_set_wakeup_path(struct device *dev)
/* drivers/base/power/wakeup.c */
extern void wakeup_source_prepare(struct wakeup_source *ws, const char *name);
extern struct wakeup_source *wakeup_source_create(const char *name);
-extern void wakeup_source_drop(struct wakeup_source *ws);
extern void wakeup_source_destroy(struct wakeup_source *ws);
extern void wakeup_source_add(struct wakeup_source *ws);
extern void wakeup_source_remove(struct wakeup_source *ws);
@@ -134,8 +133,6 @@ static inline struct wakeup_source *wakeup_source_create(const char *name)
return NULL;
}
-static inline void wakeup_source_drop(struct wakeup_source *ws) {}
-
static inline void wakeup_source_destroy(struct wakeup_source *ws) {}
static inline void wakeup_source_add(struct wakeup_source *ws) {}
@@ -204,12 +201,6 @@ static inline void wakeup_source_init(struct wakeup_source *ws,
wakeup_source_add(ws);
}
-static inline void wakeup_source_trash(struct wakeup_source *ws)
-{
- wakeup_source_remove(ws);
- wakeup_source_drop(ws);
-}
-
static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
{
return pm_wakeup_ws_event(ws, msec, false);
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 5046bad0c1c5..d6d980a681c7 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -83,9 +83,6 @@
#define MUTEX_DEBUG_FREE 0x22
#define MUTEX_POISON_WW_CTX ((void *) 0x500 + POISON_POINTER_DELTA)
-/********** lib/flex_array.c **********/
-#define FLEX_ARRAY_FREE 0x6c /* for use-after-free poisoning */
-
/********** security/ **********/
#define KEY_DESTROY 0xbd
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index d0e1f1522a78..52a283ba0465 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -73,6 +73,7 @@ struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mo
int (*show)(struct seq_file *, void *),
proc_write_t write,
void *data);
+extern struct pid *tgid_pidfd_to_pid(const struct file *file);
#else /* CONFIG_PROC_FS */
@@ -114,6 +115,11 @@ static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *p
#define proc_create_net(name, mode, parent, state_size, ops) ({NULL;})
#define proc_create_net_single(name, mode, parent, show, data) ({NULL;})
+static inline struct pid *tgid_pidfd_to_pid(const struct file *file)
+{
+ return ERR_PTR(-EBADF);
+}
+
#endif /* CONFIG_PROC_FS */
struct net;
diff --git a/include/linux/property.h b/include/linux/property.h
index 65d3420dd5d1..a29369c89e6e 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -13,6 +13,7 @@
#ifndef _LINUX_PROPERTY_H_
#define _LINUX_PROPERTY_H_
+#include <linux/bits.h>
#include <linux/fwnode.h>
#include <linux/types.h>
@@ -304,6 +305,23 @@ struct fwnode_handle *
fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port,
u32 endpoint);
+/*
+ * Fwnode lookup flags
+ *
+ * @FWNODE_GRAPH_ENDPOINT_NEXT: In the case of no exact match, look for the
+ * closest endpoint ID greater than the specified
+ * one.
+ * @FWNODE_GRAPH_DEVICE_DISABLED: That the device to which the remote
+ * endpoint of the given endpoint belongs to,
+ * may be disabled.
+ */
+#define FWNODE_GRAPH_ENDPOINT_NEXT BIT(0)
+#define FWNODE_GRAPH_DEVICE_DISABLED BIT(1)
+
+struct fwnode_handle *
+fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode,
+ u32 port, u32 endpoint, unsigned long flags);
+
#define fwnode_graph_for_each_endpoint(fwnode, child) \
for (child = NULL; \
(child = fwnode_graph_get_next_endpoint(fwnode, child)); )
diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h
index 827c601841c4..6f89fc8d4b8e 100644
--- a/include/linux/psp-sev.h
+++ b/include/linux/psp-sev.h
@@ -5,8 +5,7 @@
*
* Author: Brijesh Singh <brijesh.singh@amd.com>
*
- * SEV spec 0.14 is available at:
- * http://support.amd.com/TechDocs/55766_SEV-KM API_Specification.pdf
+ * SEV API spec is available at https://developer.amd.com/sev
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index edb9b040c94c..d5084ebd9f03 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -9,6 +9,13 @@
#include <linux/bug.h> /* For BUG_ON. */
#include <linux/pid_namespace.h> /* For task_active_pid_ns. */
#include <uapi/linux/ptrace.h>
+#include <linux/seccomp.h>
+
+/* Add sp to seccomp_data, as seccomp is user API, we don't want to modify it */
+struct syscall_info {
+ __u64 sp;
+ struct seccomp_data data;
+};
extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
@@ -407,9 +414,7 @@ static inline void user_single_step_report(struct pt_regs *regs)
#define current_user_stack_pointer() user_stack_pointer(current_pt_regs())
#endif
-extern int task_current_syscall(struct task_struct *target, long *callno,
- unsigned long args[6], unsigned int maxargs,
- unsigned long *sp, unsigned long *pc);
+extern int task_current_syscall(struct task_struct *target, struct syscall_info *info);
extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact);
#endif
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index d5199b507d79..b628abfffacc 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -242,11 +242,7 @@ pwm_set_relative_duty_cycle(struct pwm_state *state, unsigned int duty_cycle,
* struct pwm_ops - PWM controller operations
* @request: optional hook for requesting a PWM
* @free: optional hook for freeing a PWM
- * @config: configure duty cycles and period length for this PWM
- * @set_polarity: configure the polarity of this PWM
* @capture: capture and report PWM signal
- * @enable: enable PWM output toggling
- * @disable: disable PWM output toggling
* @apply: atomically apply a new PWM config. The state argument
* should be adjusted with the real hardware config (if the
* approximate the period or duty_cycle value, state should
@@ -254,53 +250,56 @@ pwm_set_relative_duty_cycle(struct pwm_state *state, unsigned int duty_cycle,
* @get_state: get the current PWM state. This function is only
* called once per PWM device when the PWM chip is
* registered.
- * @dbg_show: optional routine to show contents in debugfs
* @owner: helps prevent removal of modules exporting active PWMs
+ * @config: configure duty cycles and period length for this PWM
+ * @set_polarity: configure the polarity of this PWM
+ * @enable: enable PWM output toggling
+ * @disable: disable PWM output toggling
*/
struct pwm_ops {
int (*request)(struct pwm_chip *chip, struct pwm_device *pwm);
void (*free)(struct pwm_chip *chip, struct pwm_device *pwm);
- int (*config)(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns);
- int (*set_polarity)(struct pwm_chip *chip, struct pwm_device *pwm,
- enum pwm_polarity polarity);
int (*capture)(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_capture *result, unsigned long timeout);
- int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm);
- void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm);
int (*apply)(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state);
void (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state);
-#ifdef CONFIG_DEBUG_FS
- void (*dbg_show)(struct pwm_chip *chip, struct seq_file *s);
-#endif
struct module *owner;
+
+ /* Only used by legacy drivers */
+ int (*config)(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns);
+ int (*set_polarity)(struct pwm_chip *chip, struct pwm_device *pwm,
+ enum pwm_polarity polarity);
+ int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm);
+ void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm);
};
/**
* struct pwm_chip - abstract a PWM controller
* @dev: device providing the PWMs
- * @list: list node for internal use
* @ops: callbacks for this PWM controller
* @base: number of first PWM controlled by this chip
* @npwm: number of PWMs controlled by this chip
- * @pwms: array of PWM devices allocated by the framework
* @of_xlate: request a PWM device given a device tree PWM specifier
* @of_pwm_n_cells: number of cells expected in the device tree PWM specifier
+ * @list: list node for internal use
+ * @pwms: array of PWM devices allocated by the framework
*/
struct pwm_chip {
struct device *dev;
- struct list_head list;
const struct pwm_ops *ops;
int base;
unsigned int npwm;
- struct pwm_device *pwms;
-
struct pwm_device * (*of_xlate)(struct pwm_chip *pc,
const struct of_phandle_args *args);
unsigned int of_pwm_n_cells;
+
+ /* only used internally by the PWM framework */
+ struct list_head list;
+ struct pwm_device *pwms;
};
/**
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index f6165d304b4d..48841e5dab90 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -1338,7 +1338,6 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
}
/* Let SB update */
- mmiowb();
return rc;
}
@@ -1374,7 +1373,6 @@ static inline void qed_sb_ack(struct qed_sb_info *sb_info,
/* Both segments (interrupts & acks) are written to same place address;
* Need to guarantee all commands will be received (in-order) by HW.
*/
- mmiowb();
barrier();
}
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 6cdb1db776cf..922bb6848813 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -878,9 +878,11 @@ static inline void rcu_head_init(struct rcu_head *rhp)
static inline bool
rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
{
- if (READ_ONCE(rhp->func) == f)
+ rcu_callback_t func = READ_ONCE(rhp->func);
+
+ if (func == f)
return true;
- WARN_ON_ONCE(READ_ONCE(rhp->func) != (rcu_callback_t)~0L);
+ WARN_ON_ONCE(func != (rcu_callback_t)~0L);
return false;
}
diff --git a/include/linux/rcuwait.h b/include/linux/rcuwait.h
index 90bfa3279a01..563290fc194f 100644
--- a/include/linux/rcuwait.h
+++ b/include/linux/rcuwait.h
@@ -18,7 +18,7 @@
* awoken.
*/
struct rcuwait {
- struct task_struct *task;
+ struct task_struct __rcu *task;
};
#define __RCUWAIT_INITIALIZER(name) \
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index f3f76051e8b0..aaf3cee70439 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -478,6 +478,11 @@ static inline int regulator_is_supported_voltage(struct regulator *regulator,
return 0;
}
+static inline unsigned int regulator_get_linear_step(struct regulator *regulator)
+{
+ return 0;
+}
+
static inline int regulator_set_current_limit(struct regulator *regulator,
int min_uA, int max_uA)
{
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index 507a2b524208..04d04709f2bd 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -345,9 +345,9 @@ struct firmware;
* @stop: power off the device
* @kick: kick a virtqueue (virtqueue id given as a parameter)
* @da_to_va: optional platform hook to perform address translations
- * @load_rsc_table: load resource table from firmware image
+ * @parse_fw: parse firmware to extract information (e.g. resource table)
* @find_loaded_rsc_table: find the loaded resouce table
- * @load: load firmeware to memory, where the remote processor
+ * @load: load firmware to memory, where the remote processor
* expects to find it
* @sanity_check: sanity check the fw image
* @get_boot_addr: get boot address to entry point specified in firmware
@@ -554,11 +554,11 @@ struct rproc_vdev {
struct kref refcount;
struct rproc_subdev subdev;
+ struct device dev;
unsigned int id;
struct list_head node;
struct rproc *rproc;
- struct virtio_device vdev;
struct rproc_vring vring[RVDEV_NUM_VRINGS];
u32 rsc_offset;
u32 index;
@@ -601,7 +601,7 @@ int rproc_coredump_add_custom_segment(struct rproc *rproc,
static inline struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev)
{
- return container_of(vdev, struct rproc_vdev, vdev);
+ return container_of(vdev->dev.parent, struct rproc_vdev, dev);
}
static inline struct rproc *vdev_to_rproc(struct virtio_device *vdev)
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 5b9ae62272bb..1a40277b512c 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -128,7 +128,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events);
struct ring_buffer_iter *
-ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu);
+ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags);
void ring_buffer_read_prepare_sync(void);
void ring_buffer_read_start(struct ring_buffer_iter *iter);
void ring_buffer_read_finish(struct ring_buffer_iter *iter);
@@ -187,8 +187,6 @@ void ring_buffer_set_clock(struct ring_buffer *buffer,
void ring_buffer_set_time_stamp_abs(struct ring_buffer *buffer, bool abs);
bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer);
-size_t ring_buffer_page_len(void *page);
-
size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu);
size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu);
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
deleted file mode 100644
index e47568363e5e..000000000000
--- a/include/linux/rwsem-spinlock.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* rwsem-spinlock.h: fallback C implementation
- *
- * Copyright (c) 2001 David Howells (dhowells@redhat.com).
- * - Derived partially from ideas by Andrea Arcangeli <andrea@suse.de>
- * - Derived also from comments by Linus
- */
-
-#ifndef _LINUX_RWSEM_SPINLOCK_H
-#define _LINUX_RWSEM_SPINLOCK_H
-
-#ifndef _LINUX_RWSEM_H
-#error "please don't include linux/rwsem-spinlock.h directly, use linux/rwsem.h instead"
-#endif
-
-#ifdef __KERNEL__
-/*
- * the rw-semaphore definition
- * - if count is 0 then there are no active readers or writers
- * - if count is +ve then that is the number of active readers
- * - if count is -1 then there is one active writer
- * - if wait_list is not empty, then there are processes waiting for the semaphore
- */
-struct rw_semaphore {
- __s32 count;
- raw_spinlock_t wait_lock;
- struct list_head wait_list;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-};
-
-#define RWSEM_UNLOCKED_VALUE 0x00000000
-
-extern void __down_read(struct rw_semaphore *sem);
-extern int __must_check __down_read_killable(struct rw_semaphore *sem);
-extern int __down_read_trylock(struct rw_semaphore *sem);
-extern void __down_write(struct rw_semaphore *sem);
-extern int __must_check __down_write_killable(struct rw_semaphore *sem);
-extern int __down_write_trylock(struct rw_semaphore *sem);
-extern void __up_read(struct rw_semaphore *sem);
-extern void __up_write(struct rw_semaphore *sem);
-extern void __downgrade_write(struct rw_semaphore *sem);
-extern int rwsem_is_locked(struct rw_semaphore *sem);
-
-#endif /* __KERNEL__ */
-#endif /* _LINUX_RWSEM_SPINLOCK_H */
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 67dbb57508b1..2ea18a3def04 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -20,25 +20,30 @@
#include <linux/osq_lock.h>
#endif
-struct rw_semaphore;
-
-#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
-#include <linux/rwsem-spinlock.h> /* use a generic implementation */
-#define __RWSEM_INIT_COUNT(name) .count = RWSEM_UNLOCKED_VALUE
-#else
-/* All arch specific implementations share the same struct */
+/*
+ * For an uncontended rwsem, count and owner are the only fields a task
+ * needs to touch when acquiring the rwsem. So they are put next to each
+ * other to increase the chance that they will share the same cacheline.
+ *
+ * In a contended rwsem, the owner is likely the most frequently accessed
+ * field in the structure as the optimistic waiter that holds the osq lock
+ * will spin on owner. For an embedded rwsem, other hot fields in the
+ * containing structure should be moved further away from the rwsem to
+ * reduce the chance that they will share the same cacheline causing
+ * cacheline bouncing problem.
+ */
struct rw_semaphore {
atomic_long_t count;
- struct list_head wait_list;
- raw_spinlock_t wait_lock;
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
- struct optimistic_spin_queue osq; /* spinner MCS lock */
/*
* Write owner. Used as a speculative check to see
* if the owner is running on the cpu.
*/
struct task_struct *owner;
+ struct optimistic_spin_queue osq; /* spinner MCS lock */
#endif
+ raw_spinlock_t wait_lock;
+ struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
@@ -50,24 +55,14 @@ struct rw_semaphore {
*/
#define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-2L)
-extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_write_failed_killable(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
-extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
-
-/* Include the arch specific part */
-#include <asm/rwsem.h>
-
/* In all implementations count != 0 means locked */
static inline int rwsem_is_locked(struct rw_semaphore *sem)
{
return atomic_long_read(&sem->count) != 0;
}
+#define RWSEM_UNLOCKED_VALUE 0L
#define __RWSEM_INIT_COUNT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
-#endif
/* Common initializer macros and functions */
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index 14d558146aea..20f3e3f029b9 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -330,7 +330,7 @@ static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr)
/*
* This one is special, since it doesn't actually clear the bit, rather it
* sets the corresponding bit in the ->cleared mask instead. Paired with
- * the caller doing sbitmap_batch_clear() if a given index is full, which
+ * the caller doing sbitmap_deferred_clear() if a given index is full, which
* will clear the previously freed entries in the corresponding ->word.
*/
static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr)
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index b96f0d0b5b8f..b4be960c7e5d 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -339,12 +339,12 @@ int sg_alloc_table_chained(struct sg_table *table, int nents,
/*
* sg page iterator
*
- * Iterates over sg entries page-by-page. On each successful iteration,
- * you can call sg_page_iter_page(@piter) and sg_page_iter_dma_address(@piter)
- * to get the current page and its dma address. @piter->sg will point to the
- * sg holding this page and @piter->sg_pgoffset to the page's page offset
- * within the sg. The iteration will stop either when a maximum number of sg
- * entries was reached or a terminating sg (sg_last(sg) == true) was reached.
+ * Iterates over sg entries page-by-page. On each successful iteration, you
+ * can call sg_page_iter_page(@piter) to get the current page and its dma
+ * address. @piter->sg will point to the sg holding this page and
+ * @piter->sg_pgoffset to the page's page offset within the sg. The iteration
+ * will stop either when a maximum number of sg entries was reached or a
+ * terminating sg (sg_last(sg) == true) was reached.
*/
struct sg_page_iter {
struct scatterlist *sg; /* sg holding the page */
@@ -356,7 +356,19 @@ struct sg_page_iter {
* next step */
};
+/*
+ * sg page iterator for DMA addresses
+ *
+ * This is the same as sg_page_iter however you can call
+ * sg_page_iter_dma_address(@dma_iter) to get the page's DMA
+ * address. sg_page_iter_page() cannot be called on this iterator.
+ */
+struct sg_dma_page_iter {
+ struct sg_page_iter base;
+};
+
bool __sg_page_iter_next(struct sg_page_iter *piter);
+bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter);
void __sg_page_iter_start(struct sg_page_iter *piter,
struct scatterlist *sglist, unsigned int nents,
unsigned long pgoffset);
@@ -372,11 +384,13 @@ static inline struct page *sg_page_iter_page(struct sg_page_iter *piter)
/**
* sg_page_iter_dma_address - get the dma address of the current page held by
* the page iterator.
- * @piter: page iterator holding the page
+ * @dma_iter: page iterator holding the page
*/
-static inline dma_addr_t sg_page_iter_dma_address(struct sg_page_iter *piter)
+static inline dma_addr_t
+sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
{
- return sg_dma_address(piter->sg) + (piter->sg_pgoffset << PAGE_SHIFT);
+ return sg_dma_address(dma_iter->base.sg) +
+ (dma_iter->base.sg_pgoffset << PAGE_SHIFT);
}
/**
@@ -385,11 +399,28 @@ static inline dma_addr_t sg_page_iter_dma_address(struct sg_page_iter *piter)
* @piter: page iterator to hold current page, sg, sg_pgoffset
* @nents: maximum number of sg entries to iterate over
* @pgoffset: starting page offset
+ *
+ * Callers may use sg_page_iter_page() to get each page pointer.
*/
#define for_each_sg_page(sglist, piter, nents, pgoffset) \
for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \
__sg_page_iter_next(piter);)
+/**
+ * for_each_sg_dma_page - iterate over the pages of the given sg list
+ * @sglist: sglist to iterate over
+ * @dma_iter: page iterator to hold current page
+ * @dma_nents: maximum number of sg entries to iterate over, this is the value
+ * returned from dma_map_sg
+ * @pgoffset: starting page offset
+ *
+ * Callers may use sg_page_iter_dma_address() to get each page's DMA address.
+ */
+#define for_each_sg_dma_page(sglist, dma_iter, dma_nents, pgoffset) \
+ for (__sg_page_iter_start(&(dma_iter)->base, sglist, dma_nents, \
+ pgoffset); \
+ __sg_page_iter_dma_next(dma_iter);)
+
/*
* Mapping sg iterator
*
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1549584a1538..50606a6e73d6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1057,7 +1057,6 @@ struct task_struct {
#ifdef CONFIG_RSEQ
struct rseq __user *rseq;
- u32 rseq_len;
u32 rseq_sig;
/*
* RmW on rseq_event_mask must be performed atomically
@@ -1855,12 +1854,10 @@ static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
{
if (clone_flags & CLONE_THREAD) {
t->rseq = NULL;
- t->rseq_len = 0;
t->rseq_sig = 0;
t->rseq_event_mask = 0;
} else {
t->rseq = current->rseq;
- t->rseq_len = current->rseq_len;
t->rseq_sig = current->rseq_sig;
t->rseq_event_mask = current->rseq_event_mask;
}
@@ -1869,7 +1866,6 @@ static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
static inline void rseq_execve(struct task_struct *t)
{
t->rseq = NULL;
- t->rseq_len = 0;
t->rseq_sig = 0;
t->rseq_event_mask = 0;
}
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 0cd9f10423fb..a3fda9f024c3 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -49,6 +49,27 @@ static inline void mmdrop(struct mm_struct *mm)
__mmdrop(mm);
}
+/*
+ * This has to be called after a get_task_mm()/mmget_not_zero()
+ * followed by taking the mmap_sem for writing before modifying the
+ * vmas or anything the coredump pretends not to change from under it.
+ *
+ * NOTE: find_extend_vma() called from GUP context is the only place
+ * that can modify the "mm" (notably the vm_start/end) under mmap_sem
+ * for reading and outside the context of the process, so it is also
+ * the only case that holds the mmap_sem for reading that must call
+ * this function. Generally if the mmap_sem is hold for reading
+ * there's no need of this check after get_task_mm()/mmget_not_zero().
+ *
+ * This function can be obsoleted and the check can be removed, after
+ * the coredump code will hold the mmap_sem for writing before
+ * invoking the ->core_dump methods.
+ */
+static inline bool mmget_still_valid(struct mm_struct *mm)
+{
+ return likely(!mm->core_state);
+}
+
/**
* mmget() - Pin the address space associated with a &struct mm_struct.
* @mm: The address space to pin.
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index ae5655197698..e412c092c1e8 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -418,10 +418,20 @@ static inline void set_restore_sigmask(void)
set_thread_flag(TIF_RESTORE_SIGMASK);
WARN_ON(!test_thread_flag(TIF_SIGPENDING));
}
+
+static inline void clear_tsk_restore_sigmask(struct task_struct *tsk)
+{
+ clear_tsk_thread_flag(tsk, TIF_RESTORE_SIGMASK);
+}
+
static inline void clear_restore_sigmask(void)
{
clear_thread_flag(TIF_RESTORE_SIGMASK);
}
+static inline bool test_tsk_restore_sigmask(struct task_struct *tsk)
+{
+ return test_tsk_thread_flag(tsk, TIF_RESTORE_SIGMASK);
+}
static inline bool test_restore_sigmask(void)
{
return test_thread_flag(TIF_RESTORE_SIGMASK);
@@ -439,6 +449,10 @@ static inline void set_restore_sigmask(void)
current->restore_sigmask = true;
WARN_ON(!test_thread_flag(TIF_SIGPENDING));
}
+static inline void clear_tsk_restore_sigmask(struct task_struct *tsk)
+{
+ tsk->restore_sigmask = false;
+}
static inline void clear_restore_sigmask(void)
{
current->restore_sigmask = false;
@@ -447,6 +461,10 @@ static inline bool test_restore_sigmask(void)
{
return current->restore_sigmask;
}
+static inline bool test_tsk_restore_sigmask(struct task_struct *tsk)
+{
+ return tsk->restore_sigmask;
+}
static inline bool test_and_clear_restore_sigmask(void)
{
if (!current->restore_sigmask)
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index 2e97a2227045..f1227f2c38a4 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -76,6 +76,7 @@ extern void exit_itimers(struct signal_struct *);
extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long);
extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
struct task_struct *fork_idle(int);
+struct mm_struct *copy_init_mm(void);
extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 57c7ed3fe465..cfc0a89a7159 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -76,8 +76,8 @@ struct sched_domain_shared {
struct sched_domain {
/* These fields must be setup */
- struct sched_domain *parent; /* top domain must be null terminated */
- struct sched_domain *child; /* bottom domain must be null terminated */
+ struct sched_domain __rcu *parent; /* top domain must be null terminated */
+ struct sched_domain __rcu *child; /* bottom domain must be null terminated */
struct sched_group *groups; /* the balancing groups of the domain */
unsigned long min_interval; /* Minimum balance interval ms */
unsigned long max_interval; /* Maximum balance interval ms */
diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h
index c7b5f86b91a1..468d2565a9fe 100644
--- a/include/linux/sched/user.h
+++ b/include/linux/sched/user.h
@@ -31,6 +31,13 @@ struct user_struct {
atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */
#ifdef CONFIG_KEYS
+ /*
+ * These pointers can only change from NULL to a non-NULL value once.
+ * Writes are protected by key_user_keyring_mutex.
+ * Unlocked readers should use READ_ONCE() unless they know that
+ * install_user_keyrings() has been called successfully (which sets
+ * these members to non-NULL values, preventing further modifications).
+ */
struct key *uid_keyring; /* UID specific keyring */
struct key *session_keyring; /* UID's default session keyring */
#endif
diff --git a/include/linux/security.h b/include/linux/security.h
index 2b35a43d11d6..49f2685324b0 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -53,6 +53,9 @@ struct msg_msg;
struct xattr;
struct xfrm_sec_ctx;
struct mm_struct;
+struct fs_context;
+struct fs_parameter;
+enum fs_value_type;
/* Default (no) options for the capable function */
#define CAP_OPT_NONE 0x0
@@ -61,7 +64,7 @@ struct mm_struct;
/* If capable is being called by a setid function */
#define CAP_OPT_INSETID BIT(2)
-/* LSM Agnostic defines for sb_set_mnt_opts */
+/* LSM Agnostic defines for fs_context::lsm_flags */
#define SECURITY_LSM_NATIVE_LABELS 1
struct ctl_table;
@@ -223,6 +226,8 @@ int security_bprm_set_creds(struct linux_binprm *bprm);
int security_bprm_check(struct linux_binprm *bprm);
void security_bprm_committing_creds(struct linux_binprm *bprm);
void security_bprm_committed_creds(struct linux_binprm *bprm);
+int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc);
+int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param);
int security_sb_alloc(struct super_block *sb);
void security_sb_free(struct super_block *sb);
void security_free_mnt_opts(void **mnt_opts);
@@ -519,6 +524,17 @@ static inline void security_bprm_committed_creds(struct linux_binprm *bprm)
{
}
+static inline int security_fs_context_dup(struct fs_context *fc,
+ struct fs_context *src_fc)
+{
+ return 0;
+}
+static inline int security_fs_context_parse_param(struct fs_context *fc,
+ struct fs_parameter *param)
+{
+ return -ENOPARAM;
+}
+
static inline int security_sb_alloc(struct super_block *sb)
{
return 0;
diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h
index 2a986d282a97..b5071497b8cb 100644
--- a/include/linux/set_memory.h
+++ b/include/linux/set_memory.h
@@ -17,6 +17,17 @@ static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
#endif
+#ifndef CONFIG_ARCH_HAS_SET_DIRECT_MAP
+static inline int set_direct_map_invalid_noflush(struct page *page)
+{
+ return 0;
+}
+static inline int set_direct_map_default_noflush(struct page *page)
+{
+ return 0;
+}
+#endif
+
#ifndef set_mce_nospec
static inline int set_mce_nospec(unsigned long pfn)
{
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index f3fb1edb3526..20d815a33145 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -21,6 +21,7 @@ struct shmem_inode_info {
struct list_head swaplist; /* chain of maybes on swap */
struct shared_policy policy; /* NUMA memory alloc policy */
struct simple_xattrs xattrs; /* list of xattrs */
+ atomic_t stop_eviction; /* hold when working on inode */
struct inode vfs_inode;
};
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 27beb549ffbe..9027a8c4219f 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -327,26 +327,49 @@ struct skb_frag_struct {
#endif
};
+/**
+ * skb_frag_size - Returns the size of a skb fragment
+ * @frag: skb fragment
+ */
static inline unsigned int skb_frag_size(const skb_frag_t *frag)
{
return frag->size;
}
+/**
+ * skb_frag_size_set - Sets the size of a skb fragment
+ * @frag: skb fragment
+ * @size: size of fragment
+ */
static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
{
frag->size = size;
}
+/**
+ * skb_frag_size_add - Incrementes the size of a skb fragment by %delta
+ * @frag: skb fragment
+ * @delta: value to add
+ */
static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
{
frag->size += delta;
}
+/**
+ * skb_frag_size_sub - Decrements the size of a skb fragment by %delta
+ * @frag: skb fragment
+ * @delta: value to subtract
+ */
static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
{
frag->size -= delta;
}
+/**
+ * skb_frag_must_loop - Test if %p is a high memory page
+ * @p: fragment's page
+ */
static inline bool skb_frag_must_loop(struct page *p)
{
#if defined(CONFIG_HIGHMEM)
@@ -590,7 +613,7 @@ typedef unsigned int sk_buff_data_t;
typedef unsigned char *sk_buff_data_t;
#endif
-/**
+/**
* struct sk_buff - socket buffer
* @next: Next buffer in list
* @prev: Previous buffer in list
@@ -648,7 +671,7 @@ typedef unsigned char *sk_buff_data_t;
* @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL
* @dst_pending_confirm: need to confirm neighbour
* @decrypted: Decrypted SKB
- * @napi_id: id of the NAPI struct this skb came from
+ * @napi_id: id of the NAPI struct this skb came from
* @secmark: security marking
* @mark: Generic packet mark
* @vlan_proto: vlan encapsulation protocol
@@ -883,7 +906,10 @@ struct sk_buff {
#define SKB_ALLOC_RX 0x02
#define SKB_ALLOC_NAPI 0x04
-/* Returns true if the skb was allocated from PFMEMALLOC reserves */
+/**
+ * skb_pfmemalloc - Test if the skb was allocated from PFMEMALLOC reserves
+ * @skb: buffer
+ */
static inline bool skb_pfmemalloc(const struct sk_buff *skb)
{
return unlikely(skb->pfmemalloc);
@@ -905,7 +931,7 @@ static inline bool skb_pfmemalloc(const struct sk_buff *skb)
*/
static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
{
- /* If refdst was not refcounted, check we still are in a
+ /* If refdst was not refcounted, check we still are in a
* rcu_read_lock section
*/
WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
@@ -952,6 +978,10 @@ static inline bool skb_dst_is_noref(const struct sk_buff *skb)
return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
}
+/**
+ * skb_rtable - Returns the skb &rtable
+ * @skb: buffer
+ */
static inline struct rtable *skb_rtable(const struct sk_buff *skb)
{
return (struct rtable *)skb_dst(skb);
@@ -966,6 +996,10 @@ static inline bool skb_pkt_type_ok(u32 ptype)
return ptype <= PACKET_OTHERHOST;
}
+/**
+ * skb_napi_id - Returns the skb's NAPI id
+ * @skb: buffer
+ */
static inline unsigned int skb_napi_id(const struct sk_buff *skb)
{
#ifdef CONFIG_NET_RX_BUSY_POLL
@@ -975,7 +1009,12 @@ static inline unsigned int skb_napi_id(const struct sk_buff *skb)
#endif
}
-/* decrement the reference count and return true if we can free the skb */
+/**
+ * skb_unref - decrement the skb's reference count
+ * @skb: buffer
+ *
+ * Returns true if we can free the skb.
+ */
static inline bool skb_unref(struct sk_buff *skb)
{
if (unlikely(!skb))
@@ -1005,6 +1044,14 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
int node);
struct sk_buff *__build_skb(void *data, unsigned int frag_size);
struct sk_buff *build_skb(void *data, unsigned int frag_size);
+
+/**
+ * alloc_skb - allocate a network buffer
+ * @size: size to allocate
+ * @priority: allocation mask
+ *
+ * This function is a convenient wrapper around __alloc_skb().
+ */
static inline struct sk_buff *alloc_skb(unsigned int size,
gfp_t priority)
{
@@ -1047,6 +1094,13 @@ static inline bool skb_fclone_busy(const struct sock *sk,
fclones->skb2.sk == sk;
}
+/**
+ * alloc_skb_fclone - allocate a network buffer from fclone cache
+ * @size: size to allocate
+ * @priority: allocation mask
+ *
+ * This function is a convenient wrapper around __alloc_skb().
+ */
static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
gfp_t priority)
{
@@ -4232,10 +4286,10 @@ static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
}
+/* Note: Should be called only if skb_is_gso(skb) is true */
static inline bool skb_is_gso_tcp(const struct sk_buff *skb)
{
- return skb_is_gso(skb) &&
- skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
+ return skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
}
static inline void skb_gso_reset(struct sk_buff *skb)
@@ -4323,7 +4377,7 @@ static inline bool skb_head_is_locked(const struct sk_buff *skb)
/* Local Checksum Offload.
* Compute outer checksum based on the assumption that the
* inner checksum will be offloaded later.
- * See Documentation/networking/checksum-offloads.txt for
+ * See Documentation/networking/checksum-offloads.rst for
* explanation of how this works.
* Fill in outer checksum adjustment (e.g. with sum of outer
* pseudo-header) before calling.
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 11b45f7ae405..9449b19c5f10 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -32,6 +32,8 @@
#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
/* Use GFP_DMA memory */
#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
+/* Use GFP_DMA32 memory */
+#define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U)
/* DEBUG: Store the last owner for bug hunting */
#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
/* Panic if kmem_cache_create() fails */
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
index d0884b525001..9d1bc65d226c 100644
--- a/include/linux/smpboot.h
+++ b/include/linux/smpboot.h
@@ -29,7 +29,7 @@ struct smpboot_thread_data;
* @thread_comm: The base name of the thread
*/
struct smp_hotplug_thread {
- struct task_struct __percpu **store;
+ struct task_struct * __percpu *store;
struct list_head list;
int (*thread_should_run)(unsigned int cpu);
void (*thread_fn)(unsigned int cpu);
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 6016daeecee4..b57cd8bf96e2 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -26,7 +26,7 @@ typedef __kernel_sa_family_t sa_family_t;
/*
* 1003.1g requires sa_family_t and that sa_data is char.
*/
-
+
struct sockaddr {
sa_family_t sa_family; /* address family, AF_xxx */
char sa_data[14]; /* 14 bytes of protocol address */
@@ -44,7 +44,7 @@ struct linger {
* system, not 4.3. Thus msg_accrights(len) are now missing. They
* belong in an obscure libc emulation or the bin.
*/
-
+
struct msghdr {
void *msg_name; /* ptr to socket address structure */
int msg_namelen; /* size of socket address structure */
@@ -54,7 +54,7 @@ struct msghdr {
unsigned int msg_flags; /* flags on received message */
struct kiocb *msg_iocb; /* ptr to iocb for async requests */
};
-
+
struct user_msghdr {
void __user *msg_name; /* ptr to socket address structure */
int msg_namelen; /* size of socket address structure */
@@ -122,7 +122,7 @@ struct cmsghdr {
* inside range, given by msg->msg_controllen before using
* ancillary object DATA. --ANK (980731)
*/
-
+
static inline struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size,
struct cmsghdr *__cmsg)
{
@@ -264,10 +264,10 @@ struct ucred {
/* Maximum queue length specifiable by listen. */
#define SOMAXCONN 128
-/* Flags we can use with send/ and recv.
+/* Flags we can use with send/ and recv.
Added those for 1003.1g not all are supported yet
*/
-
+
#define MSG_OOB 1
#define MSG_PEEK 2
#define MSG_DONTROUTE 4
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h
index c1c59473cef9..6005f0126631 100644
--- a/include/linux/spi/pxa2xx_spi.h
+++ b/include/linux/spi/pxa2xx_spi.h
@@ -25,6 +25,7 @@ struct dma_chan;
struct pxa2xx_spi_controller {
u16 num_chipselect;
u8 enable_dma;
+ u8 dma_burst_size;
bool is_slave;
/* DMA engine specific config */
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
index 3703d0dcac2e..af9ff2f0f1b2 100644
--- a/include/linux/spi/spi-mem.h
+++ b/include/linux/spi/spi-mem.h
@@ -295,6 +295,10 @@ int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
const struct spi_mem_op *op,
struct sg_table *sg);
+
+bool spi_mem_default_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op);
+
#else
static inline int
spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
@@ -310,6 +314,14 @@ spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
struct sg_table *sg)
{
}
+
+static inline
+bool spi_mem_default_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ return false;
+}
+
#endif /* CONFIG_SPI_MEM */
int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op);
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 662b336aa2e4..053abd22ad31 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -143,7 +143,7 @@ struct spi_device {
u32 max_speed_hz;
u8 chip_select;
u8 bits_per_word;
- u16 mode;
+ u32 mode;
#define SPI_CPHA 0x01 /* clock phase */
#define SPI_CPOL 0x02 /* clock polarity */
#define SPI_MODE_0 (0|0) /* (original MicroWire) */
@@ -330,6 +330,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* must fail if an unrecognized or unsupported mode is requested.
* It's always safe to call this unless transfers are pending on
* the device whose settings are being modified.
+ * @set_cs_timing: optional hook for SPI devices to request SPI master
+ * controller for configuring specific CS setup time, hold time and inactive
+ * delay interms of clock counts
* @transfer: adds a message to the controller's transfer queue.
* @cleanup: frees controller-specific state
* @can_dma: determine whether this controller supports DMA
@@ -363,6 +366,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @unprepare_transfer_hardware: there are currently no more messages on the
* queue so the subsystem notifies the driver that it may relax the
* hardware by issuing this call
+ *
* @set_cs: set the logic level of the chip select line. May be called
* from interrupt context.
* @prepare_message: set up the controller to transfer a single message,
@@ -439,13 +443,12 @@ struct spi_controller {
u16 dma_alignment;
/* spi_device.mode flags understood by this controller driver */
- u16 mode_bits;
+ u32 mode_bits;
/* bitmask of supported bits_per_word for transfers */
u32 bits_per_word_mask;
#define SPI_BPW_MASK(bits) BIT((bits) - 1)
-#define SPI_BIT_MASK(bits) (((bits) == 32) ? ~0U : (BIT(bits) - 1))
-#define SPI_BPW_RANGE_MASK(min, max) (SPI_BIT_MASK(max) - SPI_BIT_MASK(min - 1))
+#define SPI_BPW_RANGE_MASK(min, max) GENMASK((max) - 1, (min) - 1)
/* limits on transfer speed */
u32 min_speed_hz;
@@ -489,6 +492,17 @@ struct spi_controller {
*/
int (*setup)(struct spi_device *spi);
+ /*
+ * set_cs_timing() method is for SPI controllers that supports
+ * configuring CS timing.
+ *
+ * This hook allows SPI client drivers to request SPI controllers
+ * to configure specific CS timing through spi_set_cs_timing() after
+ * spi_setup().
+ */
+ void (*set_cs_timing)(struct spi_device *spi, u8 setup_clk_cycles,
+ u8 hold_clk_cycles, u8 inactive_clk_cycles);
+
/* bidirectional bulk transfers
*
* + The transfer() method may not sleep; its main role is
@@ -1277,7 +1291,7 @@ struct spi_board_info {
/* mode becomes spi_device.mode, and is essential for chips
* where the default of SPI_CS_HIGH = 0 is wrong.
*/
- u16 mode;
+ u32 mode;
/* ... may need additional spi_device chip config data here.
* avoid stuff protocol drivers can set; but include stuff
diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h
index b7e021b274dc..4444c2a992cb 100644
--- a/include/linux/spi/spi_bitbang.h
+++ b/include/linux/spi/spi_bitbang.h
@@ -44,6 +44,7 @@ extern int spi_bitbang_setup_transfer(struct spi_device *spi,
/* start or stop queue processing */
extern int spi_bitbang_start(struct spi_bitbang *spi);
+extern int spi_bitbang_init(struct spi_bitbang *spi);
extern void spi_bitbang_stop(struct spi_bitbang *spi);
#endif /* __SPI_BITBANG_H */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index e089157dcf97..ed7c4d6b8235 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -57,6 +57,7 @@
#include <linux/stringify.h>
#include <linux/bottom_half.h>
#include <asm/barrier.h>
+#include <asm/mmiowb.h>
/*
@@ -178,6 +179,7 @@ static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
{
__acquire(lock);
arch_spin_lock(&lock->raw_lock);
+ mmiowb_spin_lock();
}
#ifndef arch_spin_lock_flags
@@ -189,15 +191,22 @@ do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lo
{
__acquire(lock);
arch_spin_lock_flags(&lock->raw_lock, *flags);
+ mmiowb_spin_lock();
}
static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
{
- return arch_spin_trylock(&(lock)->raw_lock);
+ int ret = arch_spin_trylock(&(lock)->raw_lock);
+
+ if (ret)
+ mmiowb_spin_lock();
+
+ return ret;
}
static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
{
+ mmiowb_spin_unlock();
arch_spin_unlock(&lock->raw_lock);
__release(lock);
}
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index c495b2d51569..e432cc92c73d 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -56,45 +56,11 @@ struct srcu_struct { };
void call_srcu(struct srcu_struct *ssp, struct rcu_head *head,
void (*func)(struct rcu_head *head));
-void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced);
+void cleanup_srcu_struct(struct srcu_struct *ssp);
int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
void synchronize_srcu(struct srcu_struct *ssp);
-/**
- * cleanup_srcu_struct - deconstruct a sleep-RCU structure
- * @ssp: structure to clean up.
- *
- * Must invoke this after you are finished using a given srcu_struct that
- * was initialized via init_srcu_struct(), else you leak memory.
- */
-static inline void cleanup_srcu_struct(struct srcu_struct *ssp)
-{
- _cleanup_srcu_struct(ssp, false);
-}
-
-/**
- * cleanup_srcu_struct_quiesced - deconstruct a quiesced sleep-RCU structure
- * @ssp: structure to clean up.
- *
- * Must invoke this after you are finished using a given srcu_struct that
- * was initialized via init_srcu_struct(), else you leak memory. Also,
- * all grace-period processing must have completed.
- *
- * "Completed" means that the last synchronize_srcu() and
- * synchronize_srcu_expedited() calls must have returned before the call
- * to cleanup_srcu_struct_quiesced(). It also means that the callback
- * from the last call_srcu() must have been invoked before the call to
- * cleanup_srcu_struct_quiesced(), but you can use srcu_barrier() to help
- * with this last. Violating these rules will get you a WARN_ON() splat
- * (with high probability, anyway), and will also cause the srcu_struct
- * to be leaked.
- */
-static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *ssp)
-{
- _cleanup_srcu_struct(ssp, true);
-}
-
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/**
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
index 7978b3e2c1e1..0805dee1b6b8 100644
--- a/include/linux/stackdepot.h
+++ b/include/linux/stackdepot.h
@@ -23,10 +23,10 @@
typedef u32 depot_stack_handle_t;
-struct stack_trace;
+depot_stack_handle_t stack_depot_save(unsigned long *entries,
+ unsigned int nr_entries, gfp_t gfp_flags);
-depot_stack_handle_t depot_save_stack(struct stack_trace *trace, gfp_t flags);
-
-void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace);
+unsigned int stack_depot_fetch(depot_stack_handle_t handle,
+ unsigned long **entries);
#endif
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index ba29a0613e66..f0cfd12cb45e 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -3,11 +3,64 @@
#define __LINUX_STACKTRACE_H
#include <linux/types.h>
+#include <asm/errno.h>
struct task_struct;
struct pt_regs;
#ifdef CONFIG_STACKTRACE
+void stack_trace_print(unsigned long *trace, unsigned int nr_entries,
+ int spaces);
+int stack_trace_snprint(char *buf, size_t size, unsigned long *entries,
+ unsigned int nr_entries, int spaces);
+unsigned int stack_trace_save(unsigned long *store, unsigned int size,
+ unsigned int skipnr);
+unsigned int stack_trace_save_tsk(struct task_struct *task,
+ unsigned long *store, unsigned int size,
+ unsigned int skipnr);
+unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
+ unsigned int size, unsigned int skipnr);
+unsigned int stack_trace_save_user(unsigned long *store, unsigned int size);
+
+/* Internal interfaces. Do not use in generic code */
+#ifdef CONFIG_ARCH_STACKWALK
+
+/**
+ * stack_trace_consume_fn - Callback for arch_stack_walk()
+ * @cookie: Caller supplied pointer handed back by arch_stack_walk()
+ * @addr: The stack entry address to consume
+ * @reliable: True when the stack entry is reliable. Required by
+ * some printk based consumers.
+ *
+ * Return: True, if the entry was consumed or skipped
+ * False, if there is no space left to store
+ */
+typedef bool (*stack_trace_consume_fn)(void *cookie, unsigned long addr,
+ bool reliable);
+/**
+ * arch_stack_walk - Architecture specific function to walk the stack
+ * @consume_entry: Callback which is invoked by the architecture code for
+ * each entry.
+ * @cookie: Caller supplied pointer which is handed back to
+ * @consume_entry
+ * @task: Pointer to a task struct, can be NULL
+ * @regs: Pointer to registers, can be NULL
+ *
+ * ============ ======= ============================================
+ * task regs
+ * ============ ======= ============================================
+ * task NULL Stack trace from task (can be current)
+ * current regs Stack trace starting on regs->stackpointer
+ * ============ ======= ============================================
+ */
+void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+ struct task_struct *task, struct pt_regs *regs);
+int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, void *cookie,
+ struct task_struct *task);
+void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
+ const struct pt_regs *regs);
+
+#else /* CONFIG_ARCH_STACKWALK */
struct stack_trace {
unsigned int nr_entries, max_entries;
unsigned long *entries;
@@ -21,24 +74,20 @@ extern void save_stack_trace_tsk(struct task_struct *tsk,
struct stack_trace *trace);
extern int save_stack_trace_tsk_reliable(struct task_struct *tsk,
struct stack_trace *trace);
-
-extern void print_stack_trace(struct stack_trace *trace, int spaces);
-extern int snprint_stack_trace(char *buf, size_t size,
- struct stack_trace *trace, int spaces);
-
-#ifdef CONFIG_USER_STACKTRACE_SUPPORT
extern void save_stack_trace_user(struct stack_trace *trace);
+#endif /* !CONFIG_ARCH_STACKWALK */
+#endif /* CONFIG_STACKTRACE */
+
+#if defined(CONFIG_STACKTRACE) && defined(CONFIG_HAVE_RELIABLE_STACKTRACE)
+int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store,
+ unsigned int size);
#else
-# define save_stack_trace_user(trace) do { } while (0)
+static inline int stack_trace_save_tsk_reliable(struct task_struct *tsk,
+ unsigned long *store,
+ unsigned int size)
+{
+ return -ENOSYS;
+}
#endif
-#else /* !CONFIG_STACKTRACE */
-# define save_stack_trace(trace) do { } while (0)
-# define save_stack_trace_tsk(tsk, trace) do { } while (0)
-# define save_stack_trace_user(trace) do { } while (0)
-# define print_stack_trace(trace, spaces) do { } while (0)
-# define snprint_stack_trace(buf, size, trace, spaces) do { } while (0)
-# define save_stack_trace_tsk_reliable(tsk, trace) ({ -ENOSYS; })
-#endif /* CONFIG_STACKTRACE */
-
#endif /* __LINUX_STACKTRACE_H */
diff --git a/include/linux/string.h b/include/linux/string.h
index 7927b875f80c..4deb11f7976b 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -31,6 +31,10 @@ size_t strlcpy(char *, const char *, size_t);
#ifndef __HAVE_ARCH_STRSCPY
ssize_t strscpy(char *, const char *, size_t);
#endif
+
+/* Wraps calls to strscpy()/memset(), no arch specific code required */
+ssize_t strscpy_pad(char *dest, const char *src, size_t count);
+
#ifndef __HAVE_ARCH_STRCAT
extern char * strcat(char *, const char *);
#endif
@@ -150,6 +154,9 @@ extern void * memscan(void *,int,__kernel_size_t);
#ifndef __HAVE_ARCH_MEMCMP
extern int memcmp(const void *,const void *,__kernel_size_t);
#endif
+#ifndef __HAVE_ARCH_BCMP
+extern int bcmp(const void *,const void *,__kernel_size_t);
+#endif
#ifndef __HAVE_ARCH_MEMCHR
extern void * memchr(const void *,int,__kernel_size_t);
#endif
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index eed3cb16ccf1..5f9076fdb090 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -74,14 +74,12 @@ struct rpc_cred_cache;
struct rpc_authops;
struct rpc_auth {
unsigned int au_cslack; /* call cred size estimate */
- /* guess at number of u32's auth adds before
- * reply data; normally the verifier size: */
- unsigned int au_rslack;
- /* for gss, used to calculate au_rslack: */
- unsigned int au_verfsize;
-
- unsigned int au_flags; /* various flags */
- const struct rpc_authops *au_ops; /* operations */
+ unsigned int au_rslack; /* reply cred size estimate */
+ unsigned int au_verfsize; /* size of reply verifier */
+ unsigned int au_ralign; /* words before UL header */
+
+ unsigned int au_flags;
+ const struct rpc_authops *au_ops;
rpc_authflavor_t au_flavor; /* pseudoflavor (note may
* differ from the flavor in
* au_ops->au_flavor in gss
@@ -131,13 +129,15 @@ struct rpc_credops {
void (*crdestroy)(struct rpc_cred *);
int (*crmatch)(struct auth_cred *, struct rpc_cred *, int);
- __be32 * (*crmarshal)(struct rpc_task *, __be32 *);
+ int (*crmarshal)(struct rpc_task *task,
+ struct xdr_stream *xdr);
int (*crrefresh)(struct rpc_task *);
- __be32 * (*crvalidate)(struct rpc_task *, __be32 *);
- int (*crwrap_req)(struct rpc_task *, kxdreproc_t,
- void *, __be32 *, void *);
- int (*crunwrap_resp)(struct rpc_task *, kxdrdproc_t,
- void *, __be32 *, void *);
+ int (*crvalidate)(struct rpc_task *task,
+ struct xdr_stream *xdr);
+ int (*crwrap_req)(struct rpc_task *task,
+ struct xdr_stream *xdr);
+ int (*crunwrap_resp)(struct rpc_task *task,
+ struct xdr_stream *xdr);
int (*crkey_timeout)(struct rpc_cred *);
char * (*crstringify_acceptor)(struct rpc_cred *);
bool (*crneed_reencode)(struct rpc_task *);
@@ -165,10 +165,18 @@ struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *
void rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *);
struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *, int);
void put_rpccred(struct rpc_cred *);
-__be32 * rpcauth_marshcred(struct rpc_task *, __be32 *);
-__be32 * rpcauth_checkverf(struct rpc_task *, __be32 *);
-int rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp, __be32 *data, void *obj);
-int rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp, __be32 *data, void *obj);
+int rpcauth_marshcred(struct rpc_task *task,
+ struct xdr_stream *xdr);
+int rpcauth_checkverf(struct rpc_task *task,
+ struct xdr_stream *xdr);
+int rpcauth_wrap_req_encode(struct rpc_task *task,
+ struct xdr_stream *xdr);
+int rpcauth_wrap_req(struct rpc_task *task,
+ struct xdr_stream *xdr);
+int rpcauth_unwrap_resp_decode(struct rpc_task *task,
+ struct xdr_stream *xdr);
+int rpcauth_unwrap_resp(struct rpc_task *task,
+ struct xdr_stream *xdr);
bool rpcauth_xmit_need_reencode(struct rpc_task *task);
int rpcauth_refreshcred(struct rpc_task *);
void rpcauth_invalcred(struct rpc_task *);
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 1c441714d569..98bc9883b230 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -169,6 +169,9 @@ int rpcb_v4_register(struct net *net, const u32 program,
const char *netid);
void rpcb_getport_async(struct rpc_task *);
+void rpc_prepare_reply_pages(struct rpc_rqst *req, struct page **pages,
+ unsigned int base, unsigned int len,
+ unsigned int hdrsize);
void rpc_call_start(struct rpc_task *);
int rpc_call_async(struct rpc_clnt *clnt,
const struct rpc_message *msg, int flags,
diff --git a/include/linux/sunrpc/gss_krb5_enctypes.h b/include/linux/sunrpc/gss_krb5_enctypes.h
index ec6234eee89c..981c89cef19d 100644
--- a/include/linux/sunrpc/gss_krb5_enctypes.h
+++ b/include/linux/sunrpc/gss_krb5_enctypes.h
@@ -1,4 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Dumb way to share this static piece of information with nfsd
+ * Define the string that exports the set of kernel-supported
+ * Kerberos enctypes. This list is sent via upcall to gssd, and
+ * is also exposed via the nfsd /proc API. The consumers generally
+ * treat this as an ordered list, where the first item in the list
+ * is the most preferred.
+ */
+
+#ifndef _LINUX_SUNRPC_GSS_KRB5_ENCTYPES_H
+#define _LINUX_SUNRPC_GSS_KRB5_ENCTYPES_H
+
+#ifdef CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES
+
+/*
+ * NB: This list includes encryption types that were deprecated
+ * by RFC 8429 (DES3_CBC_SHA1 and ARCFOUR_HMAC).
+ *
+ * ENCTYPE_AES256_CTS_HMAC_SHA1_96
+ * ENCTYPE_AES128_CTS_HMAC_SHA1_96
+ * ENCTYPE_DES3_CBC_SHA1
+ * ENCTYPE_ARCFOUR_HMAC
+ */
+#define KRB5_SUPPORTED_ENCTYPES "18,17,16,23"
+
+#else /* CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES */
+
+/*
+ * NB: This list includes encryption types that were deprecated
+ * by RFC 8429 and RFC 6649.
+ *
+ * ENCTYPE_AES256_CTS_HMAC_SHA1_96
+ * ENCTYPE_AES128_CTS_HMAC_SHA1_96
+ * ENCTYPE_DES3_CBC_SHA1
+ * ENCTYPE_ARCFOUR_HMAC
+ * ENCTYPE_DES_CBC_MD5
+ * ENCTYPE_DES_CBC_CRC
+ * ENCTYPE_DES_CBC_MD4
*/
#define KRB5_SUPPORTED_ENCTYPES "18,17,16,23,3,1,2"
+
+#endif /* CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES */
+
+#endif /* _LINUX_SUNRPC_GSS_KRB5_ENCTYPES_H */
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 219aa3910a0c..52d41d0c1ae1 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -97,6 +97,7 @@ typedef void (*rpc_action)(struct rpc_task *);
struct rpc_call_ops {
void (*rpc_call_prepare)(struct rpc_task *, void *);
+ void (*rpc_call_prepare_transmit)(struct rpc_task *, void *);
void (*rpc_call_done)(struct rpc_task *, void *);
void (*rpc_count_stats)(struct rpc_task *, void *);
void (*rpc_release)(void *);
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 2ec128060239..9ee3970ba59c 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -87,6 +87,16 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
#define xdr_one cpu_to_be32(1)
#define xdr_two cpu_to_be32(2)
+#define rpc_auth_null cpu_to_be32(RPC_AUTH_NULL)
+#define rpc_auth_unix cpu_to_be32(RPC_AUTH_UNIX)
+#define rpc_auth_short cpu_to_be32(RPC_AUTH_SHORT)
+#define rpc_auth_gss cpu_to_be32(RPC_AUTH_GSS)
+
+#define rpc_call cpu_to_be32(RPC_CALL)
+#define rpc_reply cpu_to_be32(RPC_REPLY)
+
+#define rpc_msg_accepted cpu_to_be32(RPC_MSG_ACCEPTED)
+
#define rpc_success cpu_to_be32(RPC_SUCCESS)
#define rpc_prog_unavail cpu_to_be32(RPC_PROG_UNAVAIL)
#define rpc_prog_mismatch cpu_to_be32(RPC_PROG_MISMATCH)
@@ -95,6 +105,9 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
#define rpc_system_err cpu_to_be32(RPC_SYSTEM_ERR)
#define rpc_drop_reply cpu_to_be32(RPC_DROP_REPLY)
+#define rpc_mismatch cpu_to_be32(RPC_MISMATCH)
+#define rpc_auth_error cpu_to_be32(RPC_AUTH_ERROR)
+
#define rpc_auth_ok cpu_to_be32(RPC_AUTH_OK)
#define rpc_autherr_badcred cpu_to_be32(RPC_AUTH_BADCRED)
#define rpc_autherr_rejectedcred cpu_to_be32(RPC_AUTH_REJECTEDCRED)
@@ -103,7 +116,6 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
#define rpc_autherr_tooweak cpu_to_be32(RPC_AUTH_TOOWEAK)
#define rpcsec_gsserr_credproblem cpu_to_be32(RPCSEC_GSS_CREDPROBLEM)
#define rpcsec_gsserr_ctxproblem cpu_to_be32(RPCSEC_GSS_CTXPROBLEM)
-#define rpc_autherr_oldseqnum cpu_to_be32(101)
/*
* Miscellaneous XDR helper functions
@@ -167,7 +179,6 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p)
extern void xdr_shift_buf(struct xdr_buf *, size_t);
extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *);
extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int);
-extern void xdr_buf_trim(struct xdr_buf *, unsigned int);
extern int xdr_buf_read_netobj(struct xdr_buf *, struct xdr_netobj *, unsigned int);
extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
@@ -217,6 +228,8 @@ struct xdr_stream {
struct kvec scratch; /* Scratch buffer */
struct page **page_ptr; /* pointer to the current page */
unsigned int nwords; /* Remaining decode buffer length */
+
+ struct rpc_rqst *rqst; /* For debugging */
};
/*
@@ -227,7 +240,8 @@ typedef void (*kxdreproc_t)(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
typedef int (*kxdrdproc_t)(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
void *obj);
-extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p);
+extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf,
+ __be32 *p, struct rpc_rqst *rqst);
extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes);
extern void xdr_commit_encode(struct xdr_stream *xdr);
extern void xdr_truncate_encode(struct xdr_stream *xdr, size_t len);
@@ -235,7 +249,8 @@ extern int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen);
extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages,
unsigned int base, unsigned int len);
extern unsigned int xdr_stream_pos(const struct xdr_stream *xdr);
-extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p);
+extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf,
+ __be32 *p, struct rpc_rqst *rqst);
extern void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
struct page **pages, unsigned int len);
extern void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen);
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index ad7e910b119d..3a391544299e 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -196,8 +196,6 @@ struct rpc_xprt {
size_t max_payload; /* largest RPC payload size,
in bytes */
- unsigned int tsh_size; /* size of transport specific
- header */
struct rpc_wait_queue binding; /* requests waiting on rpcbind */
struct rpc_wait_queue sending; /* requests waiting to send */
@@ -362,11 +360,6 @@ struct rpc_xprt * xprt_alloc(struct net *net, size_t size,
unsigned int max_req);
void xprt_free(struct rpc_xprt *);
-static inline __be32 *xprt_skip_transport_header(struct rpc_xprt *xprt, __be32 *p)
-{
- return p + xprt->tsh_size;
-}
-
static inline int
xprt_enable_swap(struct rpc_xprt *xprt)
{
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
index 458bfe0137f5..b81d0b3e0799 100644
--- a/include/linux/sunrpc/xprtsock.h
+++ b/include/linux/sunrpc/xprtsock.h
@@ -26,6 +26,7 @@ struct sock_xprt {
*/
struct socket * sock;
struct sock * inet;
+ struct file * file;
/*
* State of TCP reply receive
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 3f529ad9a9d2..6b3ea9ea6a9e 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -425,6 +425,7 @@ void restore_processor_state(void);
/* kernel/power/main.c */
extern int register_pm_notifier(struct notifier_block *nb);
extern int unregister_pm_notifier(struct notifier_block *nb);
+extern void ksys_sync_helper(void);
#define pm_notifier(fn, pri) { \
static struct notifier_block fn##_nb = \
@@ -462,6 +463,8 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
return 0;
}
+static inline void ksys_sync_helper(void) {}
+
#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
static inline bool pm_wakeup_pending(void) { return false; }
diff --git a/include/linux/swap.h b/include/linux/swap.h
index fc50e21b3b88..4bfb5c4ac108 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -157,9 +157,9 @@ struct swap_extent {
/*
* Max bad pages in the new format..
*/
-#define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x)
#define MAX_SWAP_BADPAGES \
- ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int))
+ ((offsetof(union swap_header, magic.magic) - \
+ offsetof(union swap_header, info.badpages)) / sizeof(int))
enum {
SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 54254388899e..361f62bb4a8e 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -73,6 +73,8 @@ bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs);
void __init swiotlb_exit(void);
unsigned int swiotlb_max_segment(void);
+size_t swiotlb_max_mapping_size(struct device *dev);
+bool is_swiotlb_active(void);
#else
#define swiotlb_force SWIOTLB_NO_FORCE
static inline bool is_swiotlb_buffer(phys_addr_t paddr)
@@ -92,6 +94,15 @@ static inline unsigned int swiotlb_max_segment(void)
{
return 0;
}
+static inline size_t swiotlb_max_mapping_size(struct device *dev)
+{
+ return SIZE_MAX;
+}
+
+static inline bool is_swiotlb_active(void)
+{
+ return false;
+}
#endif /* CONFIG_SWIOTLB */
extern void swiotlb_print_info(void);
diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h
index eee0412bdf4b..52a079b3a9a6 100644
--- a/include/linux/switchtec.h
+++ b/include/linux/switchtec.h
@@ -248,9 +248,13 @@ struct ntb_ctrl_regs {
u32 win_size;
u64 xlate_addr;
} bar_entry[6];
- u32 reserved2[216];
- u32 req_id_table[256];
- u32 reserved3[512];
+ struct {
+ u32 win_size;
+ u32 reserved[3];
+ } bar_ext_entry[6];
+ u32 reserved2[192];
+ u32 req_id_table[512];
+ u32 reserved3[256];
u64 lut_entry[512];
} __packed;
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index c2962953bf11..e446806a561f 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -985,6 +985,9 @@ asmlinkage long sys_statx(int dfd, const char __user *path, unsigned flags,
unsigned mask, struct statx __user *buffer);
asmlinkage long sys_rseq(struct rseq __user *rseq, uint32_t rseq_len,
int flags, uint32_t sig);
+asmlinkage long sys_pidfd_send_signal(int pidfd, int sig,
+ siginfo_t __user *info,
+ unsigned int flags);
/*
* Architecture-specific system calls
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 55388ab45fd4..f92a10b5e112 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -68,6 +68,12 @@ extern void tick_broadcast_control(enum tick_broadcast_mode mode);
static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { }
#endif /* BROADCAST */
+#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_HOTPLUG_CPU)
+extern void tick_offline_cpu(unsigned int cpu);
+#else
+static inline void tick_offline_cpu(unsigned int cpu) { }
+#endif
+
#ifdef CONFIG_GENERIC_CLOCKEVENTS
extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state);
#else
@@ -122,6 +128,7 @@ extern void tick_nohz_idle_enter(void);
extern void tick_nohz_idle_exit(void);
extern void tick_nohz_irq_exit(void);
extern bool tick_nohz_idle_got_tick(void);
+extern ktime_t tick_nohz_get_next_hrtimer(void);
extern ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next);
extern unsigned long tick_nohz_get_idle_calls(void);
extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu);
@@ -145,7 +152,11 @@ static inline void tick_nohz_idle_restart_tick(void) { }
static inline void tick_nohz_idle_enter(void) { }
static inline void tick_nohz_idle_exit(void) { }
static inline bool tick_nohz_idle_got_tick(void) { return false; }
-
+static inline ktime_t tick_nohz_get_next_hrtimer(void)
+{
+ /* Next wake up is the tick period, assume it starts now */
+ return ktime_add(ktime_get(), TICK_NSEC);
+}
static inline ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
{
*delta_next = TICK_NSEC;
diff --git a/include/linux/time64.h b/include/linux/time64.h
index f38d382ffec1..a620ee610b9f 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -33,6 +33,17 @@ struct itimerspec64 {
#define KTIME_MAX ((s64)~((u64)1 << 63))
#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
+/*
+ * Limits for settimeofday():
+ *
+ * To prevent setting the time close to the wraparound point time setting
+ * is limited so a reasonable uptime can be accomodated. Uptime of 30 years
+ * should be really sufficient, which means the cutoff is 2232. At that
+ * point the cutoff is just a small part of the larger problem.
+ */
+#define TIME_UPTIME_SEC_MAX (30LL * 365 * 24 *3600)
+#define TIME_SETTOD_SEC_MAX (KTIME_SEC_MAX - TIME_UPTIME_SEC_MAX)
+
static inline int timespec64_equal(const struct timespec64 *a,
const struct timespec64 *b)
{
@@ -100,6 +111,16 @@ static inline bool timespec64_valid_strict(const struct timespec64 *ts)
return true;
}
+static inline bool timespec64_valid_settod(const struct timespec64 *ts)
+{
+ if (!timespec64_valid(ts))
+ return false;
+ /* Disallow values which cause overflow issues vs. CLOCK_REALTIME */
+ if ((unsigned long long)ts->tv_sec >= TIME_SETTOD_SEC_MAX)
+ return false;
+ return true;
+}
+
/**
* timespec64_to_ns - Convert timespec64 to nanoseconds
* @ts: pointer to the timespec64 variable to be converted
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index b49a55cf775f..1b5436b213a2 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -22,12 +22,41 @@
#ifndef __LINUX_TPM_H__
#define __LINUX_TPM_H__
+#include <linux/hw_random.h>
+#include <linux/acpi.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <crypto/hash_info.h>
+
#define TPM_DIGEST_SIZE 20 /* Max TPM v1.2 PCR size */
+#define TPM_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE
struct tpm_chip;
struct trusted_key_payload;
struct trusted_key_options;
+enum tpm_algorithms {
+ TPM_ALG_ERROR = 0x0000,
+ TPM_ALG_SHA1 = 0x0004,
+ TPM_ALG_KEYEDHASH = 0x0008,
+ TPM_ALG_SHA256 = 0x000B,
+ TPM_ALG_SHA384 = 0x000C,
+ TPM_ALG_SHA512 = 0x000D,
+ TPM_ALG_NULL = 0x0010,
+ TPM_ALG_SM3_256 = 0x0012,
+};
+
+struct tpm_digest {
+ u16 alg_id;
+ u8 digest[TPM_MAX_DIGEST_SIZE];
+} __packed;
+
+struct tpm_bank_info {
+ u16 alg_id;
+ u16 digest_size;
+ u16 crypto_id;
+};
+
enum TPM_OPS_FLAGS {
TPM_OPS_AUTO_STARTUP = BIT(0),
};
@@ -41,7 +70,7 @@ struct tpm_class_ops {
int (*send) (struct tpm_chip *chip, u8 *buf, size_t len);
void (*cancel) (struct tpm_chip *chip);
u8 (*status) (struct tpm_chip *chip);
- bool (*update_timeouts)(struct tpm_chip *chip,
+ void (*update_timeouts)(struct tpm_chip *chip,
unsigned long *timeout_cap);
int (*go_idle)(struct tpm_chip *chip);
int (*cmd_ready)(struct tpm_chip *chip);
@@ -50,11 +79,100 @@ struct tpm_class_ops {
void (*clk_enable)(struct tpm_chip *chip, bool value);
};
+#define TPM_NUM_EVENT_LOG_FILES 3
+
+/* Indexes the duration array */
+enum tpm_duration {
+ TPM_SHORT = 0,
+ TPM_MEDIUM = 1,
+ TPM_LONG = 2,
+ TPM_LONG_LONG = 3,
+ TPM_UNDEFINED,
+ TPM_NUM_DURATIONS = TPM_UNDEFINED,
+};
+
+#define TPM_PPI_VERSION_LEN 3
+
+struct tpm_space {
+ u32 context_tbl[3];
+ u8 *context_buf;
+ u32 session_tbl[3];
+ u8 *session_buf;
+};
+
+struct tpm_bios_log {
+ void *bios_event_log;
+ void *bios_event_log_end;
+};
+
+struct tpm_chip_seqops {
+ struct tpm_chip *chip;
+ const struct seq_operations *seqops;
+};
+
+struct tpm_chip {
+ struct device dev;
+ struct device devs;
+ struct cdev cdev;
+ struct cdev cdevs;
+
+ /* A driver callback under ops cannot be run unless ops_sem is held
+ * (sometimes implicitly, eg for the sysfs code). ops becomes null
+ * when the driver is unregistered, see tpm_try_get_ops.
+ */
+ struct rw_semaphore ops_sem;
+ const struct tpm_class_ops *ops;
+
+ struct tpm_bios_log log;
+ struct tpm_chip_seqops bin_log_seqops;
+ struct tpm_chip_seqops ascii_log_seqops;
+
+ unsigned int flags;
+
+ int dev_num; /* /dev/tpm# */
+ unsigned long is_open; /* only one allowed */
+
+ char hwrng_name[64];
+ struct hwrng hwrng;
+
+ struct mutex tpm_mutex; /* tpm is processing */
+
+ unsigned long timeout_a; /* jiffies */
+ unsigned long timeout_b; /* jiffies */
+ unsigned long timeout_c; /* jiffies */
+ unsigned long timeout_d; /* jiffies */
+ bool timeout_adjusted;
+ unsigned long duration[TPM_NUM_DURATIONS]; /* jiffies */
+ bool duration_adjusted;
+
+ struct dentry *bios_dir[TPM_NUM_EVENT_LOG_FILES];
+
+ const struct attribute_group *groups[3];
+ unsigned int groups_cnt;
+
+ u32 nr_allocated_banks;
+ struct tpm_bank_info *allocated_banks;
+#ifdef CONFIG_ACPI
+ acpi_handle acpi_dev_handle;
+ char ppi_version[TPM_PPI_VERSION_LEN + 1];
+#endif /* CONFIG_ACPI */
+
+ struct tpm_space work_space;
+ u32 last_cc;
+ u32 nr_commands;
+ u32 *cc_attrs_tbl;
+
+ /* active locality */
+ int locality;
+};
+
#if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
extern int tpm_is_tpm2(struct tpm_chip *chip);
-extern int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf);
-extern int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, const u8 *hash);
+extern int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx,
+ struct tpm_digest *digest);
+extern int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
+ struct tpm_digest *digests);
extern int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen);
extern int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max);
extern int tpm_seal_trusted(struct tpm_chip *chip,
@@ -70,13 +188,14 @@ static inline int tpm_is_tpm2(struct tpm_chip *chip)
return -ENODEV;
}
-static inline int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf)
+static inline int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx,
+ struct tpm_digest *digest)
{
return -ENODEV;
}
static inline int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
- const u8 *hash)
+ struct tpm_digest *digests)
{
return -ENODEV;
}
diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h
index 20d9da77fc11..81519f163211 100644
--- a/include/linux/tpm_eventlog.h
+++ b/include/linux/tpm_eventlog.h
@@ -3,12 +3,11 @@
#ifndef __LINUX_TPM_EVENTLOG_H__
#define __LINUX_TPM_EVENTLOG_H__
-#include <crypto/hash_info.h>
+#include <linux/tpm.h>
#define TCG_EVENT_NAME_LEN_MAX 255
#define MAX_TEXT_EVENT 1000 /* Max event string length */
#define ACPI_TCPA_SIG "TCPA" /* 0x41504354 /'TCPA' */
-#define TPM2_ACTIVE_PCR_BANKS 3
#define EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2 0x1
#define EFI_TCG2_EVENT_LOG_FORMAT_TCG_2 0x2
@@ -82,7 +81,7 @@ struct tcg_efi_specid_event_algs {
u16 digest_size;
} __packed;
-struct tcg_efi_specid_event {
+struct tcg_efi_specid_event_head {
u8 signature[16];
u32 platform_class;
u8 spec_version_minor;
@@ -90,9 +89,7 @@ struct tcg_efi_specid_event {
u8 spec_errata;
u8 uintnsize;
u32 num_algs;
- struct tcg_efi_specid_event_algs digest_sizes[TPM2_ACTIVE_PCR_BANKS];
- u8 vendor_info_size;
- u8 vendor_info[0];
+ struct tcg_efi_specid_event_algs digest_sizes[];
} __packed;
struct tcg_pcr_event {
@@ -108,17 +105,11 @@ struct tcg_event_field {
u8 event[0];
} __packed;
-struct tpm2_digest {
- u16 alg_id;
- u8 digest[SHA512_DIGEST_SIZE];
-} __packed;
-
-struct tcg_pcr_event2 {
+struct tcg_pcr_event2_head {
u32 pcr_idx;
u32 event_type;
u32 count;
- struct tpm2_digest digests[TPM2_ACTIVE_PCR_BANKS];
- struct tcg_event_field event;
+ struct tpm_digest digests[];
} __packed;
#endif
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 37b226e8df13..2b70130af585 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -268,6 +268,8 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
#define user_access_end() do { } while (0)
#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
+static inline unsigned long user_access_save(void) { return 0UL; }
+static inline void user_access_restore(unsigned long flags) { }
#endif
#ifdef CONFIG_HARDENED_USERCOPY
diff --git a/include/linux/uio.h b/include/linux/uio.h
index ecf584f6b82d..2d0131ad4604 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -23,14 +23,23 @@ struct kvec {
};
enum iter_type {
- ITER_IOVEC = 0,
- ITER_KVEC = 2,
- ITER_BVEC = 4,
- ITER_PIPE = 8,
- ITER_DISCARD = 16,
+ /* set if ITER_BVEC doesn't hold a bv_page ref */
+ ITER_BVEC_FLAG_NO_REF = 2,
+
+ /* iter types */
+ ITER_IOVEC = 4,
+ ITER_KVEC = 8,
+ ITER_BVEC = 16,
+ ITER_PIPE = 32,
+ ITER_DISCARD = 64,
};
struct iov_iter {
+ /*
+ * Bit 0 is the read/write bit, set if we're writing.
+ * Bit 1 is the BVEC_FLAG_NO_REF bit, set if type is a bvec and
+ * the caller isn't expecting to drop a page reference when done.
+ */
unsigned int type;
size_t iov_offset;
size_t count;
@@ -51,7 +60,7 @@ struct iov_iter {
static inline enum iter_type iov_iter_type(const struct iov_iter *i)
{
- return i->type & ~(READ | WRITE);
+ return i->type & ~(READ | WRITE | ITER_BVEC_FLAG_NO_REF);
}
static inline bool iter_is_iovec(const struct iov_iter *i)
@@ -84,6 +93,11 @@ static inline unsigned char iov_iter_rw(const struct iov_iter *i)
return i->type & (READ | WRITE);
}
+static inline bool iov_iter_bvec_no_ref(const struct iov_iter *i)
+{
+ return (i->type & ITER_BVEC_FLAG_NO_REF) != 0;
+}
+
/*
* Total number of bytes covered by an iovec.
*
@@ -110,14 +124,6 @@ static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
};
}
-#define iov_for_each(iov, iter, start) \
- if (iov_iter_type(start) == ITER_IOVEC || \
- iov_iter_type(start) == ITER_KVEC) \
- for (iter = (start); \
- (iter).count && \
- ((iov = iov_iter_iovec(&(iter))), 1); \
- iov_iter_advance(&(iter), (iov).iov_len))
-
size_t iov_iter_copy_from_user_atomic(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes);
void iov_iter_advance(struct iov_iter *i, size_t bytes);
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 103a48a48872..12bf0b68ed92 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -115,6 +115,7 @@ struct uprobes_state {
struct xol_area *xol_area;
};
+extern void __init uprobes_init(void);
extern int set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
extern int set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
extern bool is_swbp_insn(uprobe_opcode_t *insn);
@@ -154,6 +155,10 @@ extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
struct uprobes_state {
};
+static inline void uprobes_init(void)
+{
+}
+
#define uprobe_get_trap_addr(regs) instruction_pointer(regs)
static inline int
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 5e49e82c4368..ff010d1fd1c7 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -200,7 +200,6 @@ usb_find_last_int_out_endpoint(struct usb_host_interface *alt,
* @dev: driver model's view of this device
* @usb_dev: if an interface is bound to the USB major, this will point
* to the sysfs representation for that device.
- * @pm_usage_cnt: PM usage counter for this interface
* @reset_ws: Used for scheduling resets from atomic context.
* @resetting_device: USB core reset the device, so use alt setting 0 as
* current; needs bandwidth alloc after reset.
@@ -257,7 +256,6 @@ struct usb_interface {
struct device dev; /* interface specific device info */
struct device *usb_dev;
- atomic_t pm_usage_cnt; /* usage counter for autosuspend */
struct work_struct reset_ws; /* for resets in atomic context */
};
#define to_usb_interface(d) container_of(d, struct usb_interface, dev)
diff --git a/include/linux/vbox_utils.h b/include/linux/vbox_utils.h
index a240ed2a0372..ff56c443180c 100644
--- a/include/linux/vbox_utils.h
+++ b/include/linux/vbox_utils.h
@@ -24,15 +24,17 @@ __printf(1, 2) void vbg_debug(const char *fmt, ...);
#define vbg_debug pr_debug
#endif
-int vbg_hgcm_connect(struct vbg_dev *gdev,
+int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor,
struct vmmdev_hgcm_service_location *loc,
u32 *client_id, int *vbox_status);
-int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status);
+int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor,
+ u32 client_id, int *vbox_status);
-int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
- u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
- u32 parm_count, int *vbox_status);
+int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
+ u32 function, u32 timeout_ms,
+ struct vmmdev_hgcm_function_parameter *parms, u32 parm_count,
+ int *vbox_status);
/**
* Convert a VirtualBox status code to a standard Linux kernel return value.
diff --git a/include/linux/verification.h b/include/linux/verification.h
index cfa4730d607a..018fb5f13d44 100644
--- a/include/linux/verification.h
+++ b/include/linux/verification.h
@@ -17,6 +17,7 @@
* should be used.
*/
#define VERIFY_USE_SECONDARY_KEYRING ((struct key *)1UL)
+#define VERIFY_USE_PLATFORM_KEYRING ((struct key *)2UL)
/*
* The use to which an asymmetric key is being put.
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index ee162e3e879b..553b34c8b5f7 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -125,9 +125,11 @@ extern void vga_put(struct pci_dev *pdev, unsigned int rsrc);
#ifdef CONFIG_VGA_ARB
extern struct pci_dev *vga_default_device(void);
extern void vga_set_default_device(struct pci_dev *pdev);
+extern int vga_remove_vgacon(struct pci_dev *pdev);
#else
static inline struct pci_dev *vga_default_device(void) { return NULL; };
static inline void vga_set_default_device(struct pci_dev *pdev) { };
+static inline int vga_remove_vgacon(struct pci_dev *pdev) { return 0; };
#endif
/*
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index fa1b5da2804e..673fe3ef3607 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -157,6 +157,8 @@ int virtio_device_freeze(struct virtio_device *dev);
int virtio_device_restore(struct virtio_device *dev);
#endif
+size_t virtio_max_dma_size(struct virtio_device *vdev);
+
#define virtio_device_for_each_vq(vdev, vq) \
list_for_each_entry(vq, &vdev->vqs, list)
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 987b6491b946..bb4cc4910750 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -290,6 +290,7 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
/* Config space accessors. */
#define virtio_cread(vdev, structname, member, ptr) \
do { \
+ might_sleep(); \
/* Must match the member's type, and be integer */ \
if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \
(*ptr) = 1; \
@@ -319,6 +320,7 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
/* Config space accessors. */
#define virtio_cwrite(vdev, structname, member, ptr) \
do { \
+ might_sleep(); \
/* Must match the member's type, and be integer */ \
if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \
BUG_ON((*ptr) == 1); \
@@ -358,6 +360,7 @@ static inline void __virtio_cread_many(struct virtio_device *vdev,
vdev->config->generation(vdev) : 0;
int i;
+ might_sleep();
do {
old = gen;
@@ -380,6 +383,8 @@ static inline void virtio_cread_bytes(struct virtio_device *vdev,
static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
{
u8 ret;
+
+ might_sleep();
vdev->config->get(vdev, offset, &ret, sizeof(ret));
return ret;
}
@@ -387,6 +392,7 @@ static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
static inline void virtio_cwrite8(struct virtio_device *vdev,
unsigned int offset, u8 val)
{
+ might_sleep();
vdev->config->set(vdev, offset, &val, sizeof(val));
}
@@ -394,6 +400,8 @@ static inline u16 virtio_cread16(struct virtio_device *vdev,
unsigned int offset)
{
u16 ret;
+
+ might_sleep();
vdev->config->get(vdev, offset, &ret, sizeof(ret));
return virtio16_to_cpu(vdev, (__force __virtio16)ret);
}
@@ -401,6 +409,7 @@ static inline u16 virtio_cread16(struct virtio_device *vdev,
static inline void virtio_cwrite16(struct virtio_device *vdev,
unsigned int offset, u16 val)
{
+ might_sleep();
val = (__force u16)cpu_to_virtio16(vdev, val);
vdev->config->set(vdev, offset, &val, sizeof(val));
}
@@ -409,6 +418,8 @@ static inline u32 virtio_cread32(struct virtio_device *vdev,
unsigned int offset)
{
u32 ret;
+
+ might_sleep();
vdev->config->get(vdev, offset, &ret, sizeof(ret));
return virtio32_to_cpu(vdev, (__force __virtio32)ret);
}
@@ -416,6 +427,7 @@ static inline u32 virtio_cread32(struct virtio_device *vdev,
static inline void virtio_cwrite32(struct virtio_device *vdev,
unsigned int offset, u32 val)
{
+ might_sleep();
val = (__force u32)cpu_to_virtio32(vdev, val);
vdev->config->set(vdev, offset, &val, sizeof(val));
}
@@ -431,6 +443,7 @@ static inline u64 virtio_cread64(struct virtio_device *vdev,
static inline void virtio_cwrite64(struct virtio_device *vdev,
unsigned int offset, u64 val)
{
+ might_sleep();
val = (__force u64)cpu_to_virtio64(vdev, val);
vdev->config->set(vdev, offset, &val, sizeof(val));
}
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index fab02133a919..3dc70adfe5f5 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -63,7 +63,7 @@ struct virtqueue;
/*
* Creates a virtqueue and allocates the descriptor ring. If
* may_reduce_num is set, then this may allocate a smaller ring than
- * expected. The caller should query virtqueue_get_ring_size to learn
+ * expected. The caller should query virtqueue_get_vring_size to learn
* the actual size of the ring.
*/
struct virtqueue *vring_create_virtqueue(unsigned int index,
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 398e9c95cd61..c6eebb839552 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -21,6 +21,11 @@ struct notifier_block; /* in notifier.h */
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
#define VM_NO_GUARD 0x00000040 /* don't add guard page */
#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
+/*
+ * Memory with VM_FLUSH_RESET_PERMS cannot be freed in an interrupt or with
+ * vfree_atomic().
+ */
+#define VM_FLUSH_RESET_PERMS 0x00000100 /* Reset direct map and flush TLB on unmap */
/* bits [20..32] reserved for arch specific ioremap internals */
/*
@@ -142,6 +147,13 @@ extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
pgprot_t prot, struct page **pages);
extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
extern void unmap_kernel_range(unsigned long addr, unsigned long size);
+static inline void set_vm_flush_reset_perms(void *addr)
+{
+ struct vm_struct *vm = find_vm_area(addr);
+
+ if (vm)
+ vm->flags |= VM_FLUSH_RESET_PERMS;
+}
#else
static inline int
map_kernel_range_noflush(unsigned long start, unsigned long size,
@@ -157,6 +169,9 @@ static inline void
unmap_kernel_range(unsigned long addr, unsigned long size)
{
}
+static inline void set_vm_flush_reset_perms(void *addr)
+{
+}
#endif
/* Allocate/destroy a 'vmalloc' VM area. */
diff --git a/include/linux/wmi.h b/include/linux/wmi.h
index 4757cb5077e5..592f81afecbb 100644
--- a/include/linux/wmi.h
+++ b/include/linux/wmi.h
@@ -18,6 +18,7 @@
#include <linux/device.h>
#include <linux/acpi.h>
+#include <linux/mod_devicetable.h>
#include <uapi/linux/wmi.h>
struct wmi_device {
@@ -39,10 +40,6 @@ extern union acpi_object *wmidev_block_query(struct wmi_device *wdev,
extern int set_required_buffer_size(struct wmi_device *wdev, u64 length);
-struct wmi_device_id {
- const char *guid_string;
-};
-
struct wmi_driver {
struct device_driver driver;
const struct wmi_device_id *id_table;
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 5d9d318bcf7a..0e01e6129145 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -131,6 +131,12 @@ static inline unsigned int xa_pointer_tag(void *entry)
* xa_mk_internal() - Create an internal entry.
* @v: Value to turn into an internal entry.
*
+ * Internal entries are used for a number of purposes. Entries 0-255 are
+ * used for sibling entries (only 0-62 are used by the current code). 256
+ * is used for the retry entry. 257 is used for the reserved / zero entry.
+ * Negative internal entries are used to represent errnos. Node pointers
+ * are also tagged as internal entries in some situations.
+ *
* Context: Any context.
* Return: An XArray internal entry corresponding to this value.
*/
@@ -163,6 +169,22 @@ static inline bool xa_is_internal(const void *entry)
return ((unsigned long)entry & 3) == 2;
}
+#define XA_ZERO_ENTRY xa_mk_internal(257)
+
+/**
+ * xa_is_zero() - Is the entry a zero entry?
+ * @entry: Entry retrieved from the XArray
+ *
+ * The normal API will return NULL as the contents of a slot containing
+ * a zero entry. You can only see zero entries by using the advanced API.
+ *
+ * Return: %true if the entry is a zero entry.
+ */
+static inline bool xa_is_zero(const void *entry)
+{
+ return unlikely(entry == XA_ZERO_ENTRY);
+}
+
/**
* xa_is_err() - Report whether an XArray operation returned an error
* @entry: Result from calling an XArray function
@@ -200,6 +222,27 @@ static inline int xa_err(void *entry)
return 0;
}
+/**
+ * struct xa_limit - Represents a range of IDs.
+ * @min: The lowest ID to allocate (inclusive).
+ * @max: The maximum ID to allocate (inclusive).
+ *
+ * This structure is used either directly or via the XA_LIMIT() macro
+ * to communicate the range of IDs that are valid for allocation.
+ * Two common ranges are predefined for you:
+ * * xa_limit_32b - [0 - UINT_MAX]
+ * * xa_limit_31b - [0 - INT_MAX]
+ */
+struct xa_limit {
+ u32 max;
+ u32 min;
+};
+
+#define XA_LIMIT(_min, _max) (struct xa_limit) { .min = _min, .max = _max }
+
+#define xa_limit_32b XA_LIMIT(0, UINT_MAX)
+#define xa_limit_31b XA_LIMIT(0, INT_MAX)
+
typedef unsigned __bitwise xa_mark_t;
#define XA_MARK_0 ((__force xa_mark_t)0U)
#define XA_MARK_1 ((__force xa_mark_t)1U)
@@ -220,10 +263,14 @@ enum xa_lock_type {
#define XA_FLAGS_LOCK_IRQ ((__force gfp_t)XA_LOCK_IRQ)
#define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH)
#define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U)
+#define XA_FLAGS_ZERO_BUSY ((__force gfp_t)8U)
+#define XA_FLAGS_ALLOC_WRAPPED ((__force gfp_t)16U)
#define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \
(__force unsigned)(mark)))
+/* ALLOC is for a normal 0-based alloc. ALLOC1 is for an 1-based alloc */
#define XA_FLAGS_ALLOC (XA_FLAGS_TRACK_FREE | XA_FLAGS_MARK(XA_FREE_MARK))
+#define XA_FLAGS_ALLOC1 (XA_FLAGS_TRACK_FREE | XA_FLAGS_ZERO_BUSY)
/**
* struct xarray - The anchor of the XArray.
@@ -279,7 +326,7 @@ struct xarray {
#define DEFINE_XARRAY(name) DEFINE_XARRAY_FLAGS(name, 0)
/**
- * DEFINE_XARRAY_ALLOC() - Define an XArray which can allocate IDs.
+ * DEFINE_XARRAY_ALLOC() - Define an XArray which allocates IDs starting at 0.
* @name: A string that names your XArray.
*
* This is intended for file scope definitions of allocating XArrays.
@@ -287,6 +334,15 @@ struct xarray {
*/
#define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC)
+/**
+ * DEFINE_XARRAY_ALLOC1() - Define an XArray which allocates IDs starting at 1.
+ * @name: A string that names your XArray.
+ *
+ * This is intended for file scope definitions of allocating XArrays.
+ * See also DEFINE_XARRAY().
+ */
+#define DEFINE_XARRAY_ALLOC1(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC1)
+
void *xa_load(struct xarray *, unsigned long index);
void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
void *xa_erase(struct xarray *, unsigned long index);
@@ -463,9 +519,12 @@ void *__xa_erase(struct xarray *, unsigned long index);
void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
void *entry, gfp_t);
-int __xa_insert(struct xarray *, unsigned long index, void *entry, gfp_t);
-int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t);
-int __xa_reserve(struct xarray *, unsigned long index, gfp_t);
+int __must_check __xa_insert(struct xarray *, unsigned long index,
+ void *entry, gfp_t);
+int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry,
+ struct xa_limit, gfp_t);
+int __must_check __xa_alloc_cyclic(struct xarray *, u32 *id, void *entry,
+ struct xa_limit, u32 *next, gfp_t);
void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
@@ -526,9 +585,9 @@ static inline void *xa_store_irq(struct xarray *xa, unsigned long index,
* @xa: XArray.
* @index: Index of entry.
*
- * This function is the equivalent of calling xa_store() with %NULL as
- * the third argument. The XArray does not need to allocate memory, so
- * the user does not need to provide GFP flags.
+ * After this function returns, loading from @index will return %NULL.
+ * If the index is part of a multi-index entry, all indices will be erased
+ * and none of the entries will be part of a multi-index entry.
*
* Context: Any context. Takes and releases the xa_lock while
* disabling softirqs.
@@ -550,9 +609,9 @@ static inline void *xa_erase_bh(struct xarray *xa, unsigned long index)
* @xa: XArray.
* @index: Index of entry.
*
- * This function is the equivalent of calling xa_store() with %NULL as
- * the third argument. The XArray does not need to allocate memory, so
- * the user does not need to provide GFP flags.
+ * After this function returns, loading from @index will return %NULL.
+ * If the index is part of a multi-index entry, all indices will be erased
+ * and none of the entries will be part of a multi-index entry.
*
* Context: Process context. Takes and releases the xa_lock while
* disabling interrupts.
@@ -664,11 +723,11 @@ static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index,
*
* Context: Any context. Takes and releases the xa_lock. May sleep if
* the @gfp flags permit.
- * Return: 0 if the store succeeded. -EEXIST if another entry was present.
+ * Return: 0 if the store succeeded. -EBUSY if another entry was present.
* -ENOMEM if memory could not be allocated.
*/
-static inline int xa_insert(struct xarray *xa, unsigned long index,
- void *entry, gfp_t gfp)
+static inline int __must_check xa_insert(struct xarray *xa,
+ unsigned long index, void *entry, gfp_t gfp)
{
int err;
@@ -693,11 +752,11 @@ static inline int xa_insert(struct xarray *xa, unsigned long index,
*
* Context: Any context. Takes and releases the xa_lock while
* disabling softirqs. May sleep if the @gfp flags permit.
- * Return: 0 if the store succeeded. -EEXIST if another entry was present.
+ * Return: 0 if the store succeeded. -EBUSY if another entry was present.
* -ENOMEM if memory could not be allocated.
*/
-static inline int xa_insert_bh(struct xarray *xa, unsigned long index,
- void *entry, gfp_t gfp)
+static inline int __must_check xa_insert_bh(struct xarray *xa,
+ unsigned long index, void *entry, gfp_t gfp)
{
int err;
@@ -722,11 +781,11 @@ static inline int xa_insert_bh(struct xarray *xa, unsigned long index,
*
* Context: Process context. Takes and releases the xa_lock while
* disabling interrupts. May sleep if the @gfp flags permit.
- * Return: 0 if the store succeeded. -EEXIST if another entry was present.
+ * Return: 0 if the store succeeded. -EBUSY if another entry was present.
* -ENOMEM if memory could not be allocated.
*/
-static inline int xa_insert_irq(struct xarray *xa, unsigned long index,
- void *entry, gfp_t gfp)
+static inline int __must_check xa_insert_irq(struct xarray *xa,
+ unsigned long index, void *entry, gfp_t gfp)
{
int err;
@@ -741,26 +800,26 @@ static inline int xa_insert_irq(struct xarray *xa, unsigned long index,
* xa_alloc() - Find somewhere to store this entry in the XArray.
* @xa: XArray.
* @id: Pointer to ID.
- * @max: Maximum ID to allocate (inclusive).
* @entry: New entry.
+ * @limit: Range of ID to allocate.
* @gfp: Memory allocation flags.
*
- * Allocates an unused ID in the range specified by @id and @max.
- * Updates the @id pointer with the index, then stores the entry at that
- * index. A concurrent lookup will not see an uninitialised @id.
+ * Finds an empty entry in @xa between @limit.min and @limit.max,
+ * stores the index into the @id pointer, then stores the entry at
+ * that index. A concurrent lookup will not see an uninitialised @id.
*
- * Context: Process context. Takes and releases the xa_lock. May sleep if
+ * Context: Any context. Takes and releases the xa_lock. May sleep if
* the @gfp flags permit.
- * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if
- * there is no more space in the XArray.
+ * Return: 0 on success, -ENOMEM if memory could not be allocated or
+ * -EBUSY if there are no free entries in @limit.
*/
-static inline int xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry,
- gfp_t gfp)
+static inline __must_check int xa_alloc(struct xarray *xa, u32 *id,
+ void *entry, struct xa_limit limit, gfp_t gfp)
{
int err;
xa_lock(xa);
- err = __xa_alloc(xa, id, max, entry, gfp);
+ err = __xa_alloc(xa, id, entry, limit, gfp);
xa_unlock(xa);
return err;
@@ -770,26 +829,26 @@ static inline int xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry,
* xa_alloc_bh() - Find somewhere to store this entry in the XArray.
* @xa: XArray.
* @id: Pointer to ID.
- * @max: Maximum ID to allocate (inclusive).
* @entry: New entry.
+ * @limit: Range of ID to allocate.
* @gfp: Memory allocation flags.
*
- * Allocates an unused ID in the range specified by @id and @max.
- * Updates the @id pointer with the index, then stores the entry at that
- * index. A concurrent lookup will not see an uninitialised @id.
+ * Finds an empty entry in @xa between @limit.min and @limit.max,
+ * stores the index into the @id pointer, then stores the entry at
+ * that index. A concurrent lookup will not see an uninitialised @id.
*
* Context: Any context. Takes and releases the xa_lock while
* disabling softirqs. May sleep if the @gfp flags permit.
- * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if
- * there is no more space in the XArray.
+ * Return: 0 on success, -ENOMEM if memory could not be allocated or
+ * -EBUSY if there are no free entries in @limit.
*/
-static inline int xa_alloc_bh(struct xarray *xa, u32 *id, u32 max, void *entry,
- gfp_t gfp)
+static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id,
+ void *entry, struct xa_limit limit, gfp_t gfp)
{
int err;
xa_lock_bh(xa);
- err = __xa_alloc(xa, id, max, entry, gfp);
+ err = __xa_alloc(xa, id, entry, limit, gfp);
xa_unlock_bh(xa);
return err;
@@ -799,26 +858,125 @@ static inline int xa_alloc_bh(struct xarray *xa, u32 *id, u32 max, void *entry,
* xa_alloc_irq() - Find somewhere to store this entry in the XArray.
* @xa: XArray.
* @id: Pointer to ID.
- * @max: Maximum ID to allocate (inclusive).
* @entry: New entry.
+ * @limit: Range of ID to allocate.
* @gfp: Memory allocation flags.
*
- * Allocates an unused ID in the range specified by @id and @max.
- * Updates the @id pointer with the index, then stores the entry at that
- * index. A concurrent lookup will not see an uninitialised @id.
+ * Finds an empty entry in @xa between @limit.min and @limit.max,
+ * stores the index into the @id pointer, then stores the entry at
+ * that index. A concurrent lookup will not see an uninitialised @id.
*
* Context: Process context. Takes and releases the xa_lock while
* disabling interrupts. May sleep if the @gfp flags permit.
- * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if
- * there is no more space in the XArray.
+ * Return: 0 on success, -ENOMEM if memory could not be allocated or
+ * -EBUSY if there are no free entries in @limit.
*/
-static inline int xa_alloc_irq(struct xarray *xa, u32 *id, u32 max, void *entry,
- gfp_t gfp)
+static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id,
+ void *entry, struct xa_limit limit, gfp_t gfp)
{
int err;
xa_lock_irq(xa);
- err = __xa_alloc(xa, id, max, entry, gfp);
+ err = __xa_alloc(xa, id, entry, limit, gfp);
+ xa_unlock_irq(xa);
+
+ return err;
+}
+
+/**
+ * xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
+ * @xa: XArray.
+ * @id: Pointer to ID.
+ * @entry: New entry.
+ * @limit: Range of allocated ID.
+ * @next: Pointer to next ID to allocate.
+ * @gfp: Memory allocation flags.
+ *
+ * Finds an empty entry in @xa between @limit.min and @limit.max,
+ * stores the index into the @id pointer, then stores the entry at
+ * that index. A concurrent lookup will not see an uninitialised @id.
+ * The search for an empty entry will start at @next and will wrap
+ * around if necessary.
+ *
+ * Context: Any context. Takes and releases the xa_lock. May sleep if
+ * the @gfp flags permit.
+ * Return: 0 if the allocation succeeded without wrapping. 1 if the
+ * allocation succeeded after wrapping, -ENOMEM if memory could not be
+ * allocated or -EBUSY if there are no free entries in @limit.
+ */
+static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
+ struct xa_limit limit, u32 *next, gfp_t gfp)
+{
+ int err;
+
+ xa_lock(xa);
+ err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
+ xa_unlock(xa);
+
+ return err;
+}
+
+/**
+ * xa_alloc_cyclic_bh() - Find somewhere to store this entry in the XArray.
+ * @xa: XArray.
+ * @id: Pointer to ID.
+ * @entry: New entry.
+ * @limit: Range of allocated ID.
+ * @next: Pointer to next ID to allocate.
+ * @gfp: Memory allocation flags.
+ *
+ * Finds an empty entry in @xa between @limit.min and @limit.max,
+ * stores the index into the @id pointer, then stores the entry at
+ * that index. A concurrent lookup will not see an uninitialised @id.
+ * The search for an empty entry will start at @next and will wrap
+ * around if necessary.
+ *
+ * Context: Any context. Takes and releases the xa_lock while
+ * disabling softirqs. May sleep if the @gfp flags permit.
+ * Return: 0 if the allocation succeeded without wrapping. 1 if the
+ * allocation succeeded after wrapping, -ENOMEM if memory could not be
+ * allocated or -EBUSY if there are no free entries in @limit.
+ */
+static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry,
+ struct xa_limit limit, u32 *next, gfp_t gfp)
+{
+ int err;
+
+ xa_lock_bh(xa);
+ err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
+ xa_unlock_bh(xa);
+
+ return err;
+}
+
+/**
+ * xa_alloc_cyclic_irq() - Find somewhere to store this entry in the XArray.
+ * @xa: XArray.
+ * @id: Pointer to ID.
+ * @entry: New entry.
+ * @limit: Range of allocated ID.
+ * @next: Pointer to next ID to allocate.
+ * @gfp: Memory allocation flags.
+ *
+ * Finds an empty entry in @xa between @limit.min and @limit.max,
+ * stores the index into the @id pointer, then stores the entry at
+ * that index. A concurrent lookup will not see an uninitialised @id.
+ * The search for an empty entry will start at @next and will wrap
+ * around if necessary.
+ *
+ * Context: Process context. Takes and releases the xa_lock while
+ * disabling interrupts. May sleep if the @gfp flags permit.
+ * Return: 0 if the allocation succeeded without wrapping. 1 if the
+ * allocation succeeded after wrapping, -ENOMEM if memory could not be
+ * allocated or -EBUSY if there are no free entries in @limit.
+ */
+static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry,
+ struct xa_limit limit, u32 *next, gfp_t gfp)
+{
+ int err;
+
+ xa_lock_irq(xa);
+ err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
xa_unlock_irq(xa);
return err;
@@ -842,16 +1000,10 @@ static inline int xa_alloc_irq(struct xarray *xa, u32 *id, u32 max, void *entry,
* May sleep if the @gfp flags permit.
* Return: 0 if the reservation succeeded or -ENOMEM if it failed.
*/
-static inline
+static inline __must_check
int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
{
- int ret;
-
- xa_lock(xa);
- ret = __xa_reserve(xa, index, gfp);
- xa_unlock(xa);
-
- return ret;
+ return xa_err(xa_cmpxchg(xa, index, NULL, XA_ZERO_ENTRY, gfp));
}
/**
@@ -866,16 +1018,10 @@ int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
* disabling softirqs.
* Return: 0 if the reservation succeeded or -ENOMEM if it failed.
*/
-static inline
+static inline __must_check
int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp)
{
- int ret;
-
- xa_lock_bh(xa);
- ret = __xa_reserve(xa, index, gfp);
- xa_unlock_bh(xa);
-
- return ret;
+ return xa_err(xa_cmpxchg_bh(xa, index, NULL, XA_ZERO_ENTRY, gfp));
}
/**
@@ -890,16 +1036,10 @@ int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp)
* disabling interrupts.
* Return: 0 if the reservation succeeded or -ENOMEM if it failed.
*/
-static inline
+static inline __must_check
int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp)
{
- int ret;
-
- xa_lock_irq(xa);
- ret = __xa_reserve(xa, index, gfp);
- xa_unlock_irq(xa);
-
- return ret;
+ return xa_err(xa_cmpxchg_irq(xa, index, NULL, XA_ZERO_ENTRY, gfp));
}
/**
@@ -913,7 +1053,7 @@ int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp)
*/
static inline void xa_release(struct xarray *xa, unsigned long index)
{
- xa_cmpxchg(xa, index, NULL, NULL, 0);
+ xa_cmpxchg(xa, index, XA_ZERO_ENTRY, NULL, 0);
}
/* Everything below here is the Advanced API. Proceed with caution. */
@@ -1073,18 +1213,6 @@ static inline bool xa_is_sibling(const void *entry)
}
#define XA_RETRY_ENTRY xa_mk_internal(256)
-#define XA_ZERO_ENTRY xa_mk_internal(257)
-
-/**
- * xa_is_zero() - Is the entry a zero entry?
- * @entry: Entry retrieved from the XArray
- *
- * Return: %true if the entry is a zero entry.
- */
-static inline bool xa_is_zero(const void *entry)
-{
- return unlikely(entry == XA_ZERO_ENTRY);
-}
/**
* xa_is_retry() - Is the entry a retry entry?
diff --git a/include/media/davinci/dm355_ccdc.h b/include/media/davinci/dm355_ccdc.h
index e6bc72f6b60f..1cba42d805fa 100644
--- a/include/media/davinci/dm355_ccdc.h
+++ b/include/media/davinci/dm355_ccdc.h
@@ -228,7 +228,7 @@ struct ccdc_config_params_raw {
/* Threshold of median filter */
int med_filt_thres;
/*
- * horz and vertical data offset. Appliable for defect correction
+ * horz and vertical data offset. Applicable for defect correction
* and lsc
*/
struct ccdc_data_offset data_offset;
@@ -238,7 +238,7 @@ struct ccdc_config_params_raw {
struct ccdc_black_clamp blk_clamp;
/* Structure for Black Compensation */
struct ccdc_black_compensation blk_comp;
- /* struture for vertical Defect Correction Module Configuration */
+ /* structure for vertical Defect Correction Module Configuration */
struct ccdc_vertical_dft vertical_dft;
/* structure for color space converter Module Configuration */
struct ccdc_csc csc;
diff --git a/include/media/davinci/dm644x_ccdc.h b/include/media/davinci/dm644x_ccdc.h
index 6ea2ce241851..694fc8f6081f 100644
--- a/include/media/davinci/dm644x_ccdc.h
+++ b/include/media/davinci/dm644x_ccdc.h
@@ -152,7 +152,7 @@ struct ccdc_params_raw {
* order in memory(bottom to top)
*/
unsigned char image_invert_enable;
- /* configurable paramaters */
+ /* configurable parameters */
struct ccdc_config_params_raw config_params;
};
diff --git a/include/media/drv-intf/exynos-fimc.h b/include/media/drv-intf/exynos-fimc.h
index f9c64338841f..54c214737142 100644
--- a/include/media/drv-intf/exynos-fimc.h
+++ b/include/media/drv-intf/exynos-fimc.h
@@ -81,7 +81,7 @@ struct fimc_source_info {
* v4l2_device notification id. This is only for internal use in the kernel.
* Sensor subdevs should issue S5P_FIMC_TX_END_NOTIFY notification in single
* frame capture mode when there is only one VSYNC pulse issued by the sensor
- * at begining of the frame transmission.
+ * at beginning of the frame transmission.
*/
#define S5P_FIMC_TX_END_NOTIFY _IO('e', 0)
diff --git a/include/media/drv-intf/saa7146.h b/include/media/drv-intf/saa7146.h
index a7bf2c4a2e4d..71ce63c99cb4 100644
--- a/include/media/drv-intf/saa7146.h
+++ b/include/media/drv-intf/saa7146.h
@@ -139,7 +139,7 @@ struct saa7146_dev
void *ext_priv; /* pointer for extension private use (most likely some private data) */
struct saa7146_ext_vv *ext_vv_data;
- /* per device video/vbi informations (if available) */
+ /* per device video/vbi information (if available) */
struct saa7146_vv *vv_data;
void (*vv_callback)(struct saa7146_dev *dev, unsigned long status);
diff --git a/include/media/drv-intf/saa7146_vv.h b/include/media/drv-intf/saa7146_vv.h
index 6f80fb7f31a5..b34d86bb0664 100644
--- a/include/media/drv-intf/saa7146_vv.h
+++ b/include/media/drv-intf/saa7146_vv.h
@@ -151,7 +151,7 @@ struct saa7146_vv
struct saa7146_ext_vv
{
- /* informations about the video capabilities of the device */
+ /* information about the video capabilities of the device */
int inputs;
int audios;
u32 capabilities;
@@ -241,7 +241,7 @@ void saa7146_res_free(struct saa7146_fh *fh, unsigned int bits);
#define SAA7146_CLIPPING_MASK 0x6
#define SAA7146_CLIPPING_MASK_INVERTED 0x7
-/* output formats: each entry holds four informations */
+/* output formats: each entry holds four information */
#define RGB08_COMPOSED 0x0217 /* composed is used in the sense of "not-planar" */
/* this means: planar?=0, yuv2rgb-conversation-mode=2, dither=yes(=1), format-mode = 7 */
#define RGB15_COMPOSED 0x0213
diff --git a/include/media/drv-intf/sh_mobile_ceu.h b/include/media/drv-intf/sh_mobile_ceu.h
deleted file mode 100644
index 555f0ecc0fde..000000000000
--- a/include/media/drv-intf/sh_mobile_ceu.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_MOBILE_CEU_H__
-#define __ASM_SH_MOBILE_CEU_H__
-
-#define SH_CEU_FLAG_USE_8BIT_BUS (1 << 0) /* use 8bit bus width */
-#define SH_CEU_FLAG_USE_16BIT_BUS (1 << 1) /* use 16bit bus width */
-#define SH_CEU_FLAG_HSYNC_LOW (1 << 2) /* default High if possible */
-#define SH_CEU_FLAG_VSYNC_LOW (1 << 3) /* default High if possible */
-#define SH_CEU_FLAG_LOWER_8BIT (1 << 4) /* default upper 8bit */
-
-struct device;
-struct resource;
-
-struct sh_mobile_ceu_companion {
- u32 num_resources;
- struct resource *resource;
- int id;
- void *platform_data;
-};
-
-struct sh_mobile_ceu_info {
- unsigned long flags;
- int max_width;
- int max_height;
- struct v4l2_async_subdev **asd; /* Flat array, arranged in groups */
- unsigned int *asd_sizes; /* 0-terminated array pf asd group sizes */
-};
-
-#endif /* __ASM_SH_MOBILE_CEU_H__ */
diff --git a/include/media/dvb_frontend.h b/include/media/dvb_frontend.h
index 6f7a85ab3541..f05cd7b94a2c 100644
--- a/include/media/dvb_frontend.h
+++ b/include/media/dvb_frontend.h
@@ -160,7 +160,7 @@ enum dvbfe_algo {
* The frontend search for a signal failed
*
* @DVBFE_ALGO_SEARCH_INVALID:
- * The frontend search algorith was probably supplied with invalid
+ * The frontend search algorithm was probably supplied with invalid
* parameters and the search is an invalid one
*
* @DVBFE_ALGO_SEARCH_ERROR:
@@ -204,7 +204,7 @@ enum dvbfe_search {
* @set_config: callback function used to send some tuner-specific
* parameters.
* @get_frequency: get the actual tuned frequency
- * @get_bandwidth: get the bandwitdh used by the low pass filters
+ * @get_bandwidth: get the bandwidth used by the low pass filters
* @get_if_frequency: get the Intermediate Frequency, in Hz. For baseband,
* should return 0.
* @get_status: returns the frontend lock status
@@ -232,7 +232,7 @@ struct dvb_tuner_ops {
int (*suspend)(struct dvb_frontend *fe);
int (*resume)(struct dvb_frontend *fe);
- /* This is the recomended way to set the tuner */
+ /* This is the recommended way to set the tuner */
int (*set_params)(struct dvb_frontend *fe);
int (*set_analog_params)(struct dvb_frontend *fe, struct analog_parameters *p);
@@ -358,7 +358,7 @@ struct dvb_frontend_internal_info {
* @release: callback function called when frontend is ready to be
* freed.
* drivers should free any allocated memory.
- * @release_sec: callback function requesting that the Satelite Equipment
+ * @release_sec: callback function requesting that the Satellite Equipment
* Control (SEC) driver to release and free any memory
* allocated by the driver.
* @init: callback function used to initialize the tuner device.
diff --git a/include/media/i2c/tw9910.h b/include/media/i2c/tw9910.h
index bec8f7bce745..2f93799d5a21 100644
--- a/include/media/i2c/tw9910.h
+++ b/include/media/i2c/tw9910.h
@@ -16,8 +16,6 @@
#ifndef __TW9910_H__
#define __TW9910_H__
-#include <media/soc_camera.h>
-
/**
* tw9910_mpout_pin - MPOUT (multi-purpose output) pin functions
*/
diff --git a/include/media/mpeg2-ctrls.h b/include/media/mpeg2-ctrls.h
index d21f40edc09e..6601455b3d5e 100644
--- a/include/media/mpeg2-ctrls.h
+++ b/include/media/mpeg2-ctrls.h
@@ -30,10 +30,9 @@ struct v4l2_mpeg2_sequence {
__u32 vbv_buffer_size;
/* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence extension */
- __u8 profile_and_level_indication;
+ __u16 profile_and_level_indication;
__u8 progressive_sequence;
__u8 chroma_format;
- __u8 pad;
};
struct v4l2_mpeg2_picture {
@@ -51,23 +50,20 @@ struct v4l2_mpeg2_picture {
__u8 intra_vlc_format;
__u8 alternate_scan;
__u8 repeat_first_field;
- __u8 progressive_frame;
- __u8 pad;
+ __u16 progressive_frame;
};
struct v4l2_ctrl_mpeg2_slice_params {
__u32 bit_size;
__u32 data_bit_offset;
+ __u64 backward_ref_ts;
+ __u64 forward_ref_ts;
struct v4l2_mpeg2_sequence sequence;
struct v4l2_mpeg2_picture picture;
/* ISO/IEC 13818-2, ITU-T Rec. H.262: Slice */
- __u8 quantiser_scale_code;
-
- __u8 backward_ref_index;
- __u8 forward_ref_index;
- __u8 pad;
+ __u32 quantiser_scale_code;
};
struct v4l2_ctrl_mpeg2_quantization {
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index d621acadfbf3..5e684bb0d64c 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -37,6 +37,9 @@
#define RC_PROTO_BIT_XMP BIT_ULL(RC_PROTO_XMP)
#define RC_PROTO_BIT_CEC BIT_ULL(RC_PROTO_CEC)
#define RC_PROTO_BIT_IMON BIT_ULL(RC_PROTO_IMON)
+#define RC_PROTO_BIT_RCMM12 BIT_ULL(RC_PROTO_RCMM12)
+#define RC_PROTO_BIT_RCMM24 BIT_ULL(RC_PROTO_RCMM24)
+#define RC_PROTO_BIT_RCMM32 BIT_ULL(RC_PROTO_RCMM32)
#define RC_PROTO_BIT_ALL \
(RC_PROTO_BIT_UNKNOWN | RC_PROTO_BIT_OTHER | \
@@ -51,7 +54,8 @@
RC_PROTO_BIT_RC6_6A_24 | RC_PROTO_BIT_RC6_6A_32 | \
RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_SHARP | \
RC_PROTO_BIT_XMP | RC_PROTO_BIT_CEC | \
- RC_PROTO_BIT_IMON)
+ RC_PROTO_BIT_IMON | RC_PROTO_BIT_RCMM12 | \
+ RC_PROTO_BIT_RCMM24 | RC_PROTO_BIT_RCMM32)
/* All rc protocols for which we have decoders */
#define RC_PROTO_BIT_ALL_IR_DECODER \
(RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC5X_20 | \
@@ -64,7 +68,9 @@
RC_PROTO_BIT_RC6_0 | RC_PROTO_BIT_RC6_6A_20 | \
RC_PROTO_BIT_RC6_6A_24 | RC_PROTO_BIT_RC6_6A_32 | \
RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_SHARP | \
- RC_PROTO_BIT_XMP | RC_PROTO_BIT_IMON)
+ RC_PROTO_BIT_XMP | RC_PROTO_BIT_IMON | \
+ RC_PROTO_BIT_RCMM12 | RC_PROTO_BIT_RCMM24 | \
+ RC_PROTO_BIT_RCMM32)
#define RC_PROTO_BIT_ALL_IR_ENCODER \
(RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC5X_20 | \
@@ -77,7 +83,9 @@
RC_PROTO_BIT_RC6_0 | RC_PROTO_BIT_RC6_6A_20 | \
RC_PROTO_BIT_RC6_6A_24 | \
RC_PROTO_BIT_RC6_6A_32 | RC_PROTO_BIT_RC6_MCE | \
- RC_PROTO_BIT_SHARP | RC_PROTO_BIT_IMON)
+ RC_PROTO_BIT_SHARP | RC_PROTO_BIT_IMON | \
+ RC_PROTO_BIT_RCMM12 | RC_PROTO_BIT_RCMM24 | \
+ RC_PROTO_BIT_RCMM32)
#define RC_SCANCODE_UNKNOWN(x) (x)
#define RC_SCANCODE_OTHER(x) (x)
@@ -136,14 +144,14 @@ struct rc_map_list {
/* Routines from rc-map.c */
/**
- * rc_map_register() - Registers a Remote Controler scancode map
+ * rc_map_register() - Registers a Remote Controller scancode map
*
* @map: pointer to struct rc_map_list
*/
int rc_map_register(struct rc_map_list *map);
/**
- * rc_map_unregister() - Unregisters a Remote Controler scancode map
+ * rc_map_unregister() - Unregisters a Remote Controller scancode map
*
* @map: pointer to struct rc_map_list
*/
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index 0c511ed8ffb0..2b93cb281fa5 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -362,15 +362,6 @@ __v4l2_find_nearest_size(const void *array, size_t array_size,
size_t height_offset, s32 width, s32 height);
/**
- * v4l2_get_timestamp - helper routine to get a timestamp to be used when
- * filling streaming metadata. Internally, it uses ktime_get_ts(),
- * which is the recommended way to get it.
- *
- * @tv: pointer to &struct timeval to be filled.
- */
-void v4l2_get_timestamp(struct timeval *tv);
-
-/**
* v4l2_g_parm_cap - helper routine for vidioc_g_parm to fill this in by
* calling the g_frame_interval op of the given subdev. It only works
* for V4L2_BUF_TYPE_VIDEO_CAPTURE(_MPLANE), hence the _cap in the
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index d63cf227b0ab..e5cae37ced2d 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -648,7 +648,7 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl,
* @def: The control's default value.
* @qmenu_int: The control's menu entries.
*
- * Same as v4l2_ctrl_new_std_menu(), but @mask is set to 0 and it additionaly
+ * Same as v4l2_ctrl_new_std_menu(), but @mask is set to 0 and it additionally
* takes as an argument an array of integers determining the menu items.
*
* If @id refers to a non-integer-menu control, then this function will
diff --git a/include/media/v4l2-event.h b/include/media/v4l2-event.h
index 17833e886e11..c2b6cdc714d2 100644
--- a/include/media/v4l2-event.h
+++ b/include/media/v4l2-event.h
@@ -34,11 +34,13 @@ struct video_device;
* @list: List node for the v4l2_fh->available list.
* @sev: Pointer to parent v4l2_subscribed_event.
* @event: The event itself.
+ * @ts: The timestamp of the event.
*/
struct v4l2_kevent {
struct list_head list;
struct v4l2_subscribed_event *sev;
struct v4l2_event event;
+ u64 ts;
};
/**
diff --git a/include/media/v4l2-fwnode.h b/include/media/v4l2-fwnode.h
index 6d9d9f1839ac..6c07825e18b9 100644
--- a/include/media/v4l2-fwnode.h
+++ b/include/media/v4l2-fwnode.h
@@ -143,7 +143,7 @@ struct v4l2_fwnode_link {
* @vep.bus_type to V4L2_MBUS_UNKNOWN. The caller may not provide a default
* configuration in this case as the defaults are specific to a given bus type.
* This functionality is deprecated and should not be used in new drivers and it
- * is only supported for CSI-2 D-PHY, parallel and Bt.656 busses.
+ * is only supported for CSI-2 D-PHY, parallel and Bt.656 buses.
*
* The function does not change the V4L2 fwnode endpoint state if it fails.
*
@@ -186,7 +186,7 @@ void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep);
* @vep.bus_type to V4L2_MBUS_UNKNOWN. The caller may not provide a default
* configuration in this case as the defaults are specific to a given bus type.
* This functionality is deprecated and should not be used in new drivers and it
- * is only supported for CSI-2 D-PHY, parallel and Bt.656 busses.
+ * is only supported for CSI-2 D-PHY, parallel and Bt.656 buses.
*
* The function does not change the V4L2 fwnode endpoint state if it fails.
*
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 5467264771ec..bb3e63d6bd1a 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -425,7 +425,7 @@ unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
*
* @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
*/
-void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx);
+struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx);
/**
* v4l2_m2m_next_src_buf() - return next source buffer from the list of ready
@@ -433,7 +433,8 @@ void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx);
*
* @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
*/
-static inline void *v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
+static inline struct vb2_v4l2_buffer *
+v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
{
return v4l2_m2m_next_buf(&m2m_ctx->out_q_ctx);
}
@@ -444,7 +445,8 @@ static inline void *v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
*
* @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
*/
-static inline void *v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
+static inline struct vb2_v4l2_buffer *
+v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
{
return v4l2_m2m_next_buf(&m2m_ctx->cap_q_ctx);
}
@@ -454,7 +456,7 @@ static inline void *v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
*
* @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
*/
-void *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx);
+struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx);
/**
* v4l2_m2m_last_src_buf() - return last destination buffer from the list of
@@ -462,7 +464,8 @@ void *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx);
*
* @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
*/
-static inline void *v4l2_m2m_last_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
+static inline struct vb2_v4l2_buffer *
+v4l2_m2m_last_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
{
return v4l2_m2m_last_buf(&m2m_ctx->out_q_ctx);
}
@@ -473,7 +476,8 @@ static inline void *v4l2_m2m_last_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
*
* @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
*/
-static inline void *v4l2_m2m_last_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
+static inline struct vb2_v4l2_buffer *
+v4l2_m2m_last_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
{
return v4l2_m2m_last_buf(&m2m_ctx->cap_q_ctx);
}
@@ -547,7 +551,7 @@ struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx)
*
* @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
*/
-void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx);
+struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx);
/**
* v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready
@@ -555,7 +559,8 @@ void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx);
*
* @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
*/
-static inline void *v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
+static inline struct vb2_v4l2_buffer *
+v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
{
return v4l2_m2m_buf_remove(&m2m_ctx->out_q_ctx);
}
@@ -566,7 +571,8 @@ static inline void *v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
*
* @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
*/
-static inline void *v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
+static inline struct vb2_v4l2_buffer *
+v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
{
return v4l2_m2m_buf_remove(&m2m_ctx->cap_q_ctx);
}
@@ -622,6 +628,26 @@ v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx)
return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->cap_q_ctx, idx);
}
+/**
+ * v4l2_m2m_buf_copy_metadata() - copy buffer metadata from
+ * the output buffer to the capture buffer
+ *
+ * @out_vb: the output buffer that is the source of the metadata.
+ * @cap_vb: the capture buffer that will receive the metadata.
+ * @copy_frame_flags: copy the KEY/B/PFRAME flags as well.
+ *
+ * This helper function copies the timestamp, timecode (if the TIMECODE
+ * buffer flag was set), field and the TIMECODE, KEYFRAME, BFRAME, PFRAME
+ * and TSTAMP_SRC_MASK flags from @out_vb to @cap_vb.
+ *
+ * If @copy_frame_flags is false, then the KEYFRAME, BFRAME and PFRAME
+ * flags are not copied. This is typically needed for encoders that
+ * set this bits explicitly.
+ */
+void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb,
+ struct vb2_v4l2_buffer *cap_vb,
+ bool copy_frame_flags);
+
/* v4l2 request helper */
void v4l2_m2m_request_queue(struct media_request *req);
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 47af609dc8f1..349e1c18cf48 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -70,7 +70,7 @@ struct v4l2_decode_vbi_line {
* device. These devices are usually audio/video muxers/encoders/decoders or
* sensors and webcam controllers.
*
- * Usually these devices are controlled through an i2c bus, but other busses
+ * Usually these devices are controlled through an i2c bus, but other buses
* may also be used.
*
* The v4l2_subdev struct provides a way of accessing these devices in a
@@ -1093,13 +1093,14 @@ void v4l2_subdev_init(struct v4l2_subdev *sd,
*/
#define v4l2_subdev_call(sd, o, f, args...) \
({ \
+ struct v4l2_subdev *__sd = (sd); \
int __result; \
- if (!(sd)) \
+ if (!__sd) \
__result = -ENODEV; \
- else if (!((sd)->ops->o && (sd)->ops->o->f)) \
+ else if (!(__sd->ops->o && __sd->ops->o->f)) \
__result = -ENOIOCTLCMD; \
else \
- __result = (sd)->ops->o->f((sd), ##args); \
+ __result = __sd->ops->o->f(__sd, ##args); \
__result; \
})
diff --git a/include/media/videobuf-core.h b/include/media/videobuf-core.h
index 60a664febba0..2c4db97cd96f 100644
--- a/include/media/videobuf-core.h
+++ b/include/media/videobuf-core.h
@@ -43,7 +43,7 @@ struct videobuf_queue;
* (which v4l2 uses).
*
* If there is a valid mapping for a buffer, buffer->baddr/bsize holds
- * userspace address + size which can be feeded into the
+ * userspace address + size which can be fed into the
* videobuf_dma_init_user function listed above.
*
*/
@@ -80,7 +80,7 @@ struct videobuf_buffer {
struct list_head queue;
wait_queue_head_t done;
unsigned int field_count;
- struct timeval ts;
+ u64 ts;
/* Memory type */
enum v4l2_memory memory;
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 4a737b2c610b..910f3d469005 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -262,6 +262,8 @@ struct vb2_buffer {
* prepared: this buffer has been prepared, i.e. the
* buf_prepare op was called. It is cleared again
* after the 'buf_finish' op is called.
+ * copied_timestamp: the timestamp of this capture buffer was copied
+ * from an output buffer.
* queued_entry: entry on the queued buffers list, which holds
* all buffers queued from userspace
* done_entry: entry on the list that stores all buffers ready
@@ -269,8 +271,9 @@ struct vb2_buffer {
* vb2_plane: per-plane information; do not change
*/
enum vb2_buffer_state state;
- bool synced;
- bool prepared;
+ unsigned int synced:1;
+ unsigned int prepared:1;
+ unsigned int copied_timestamp:1;
struct vb2_plane planes[VB2_MAX_PLANES];
struct list_head queued_entry;
@@ -296,6 +299,7 @@ struct vb2_buffer {
u32 cnt_mem_num_users;
u32 cnt_mem_mmap;
+ u32 cnt_buf_out_validate;
u32 cnt_buf_init;
u32 cnt_buf_prepare;
u32 cnt_buf_finish;
@@ -342,6 +346,10 @@ struct vb2_buffer {
* @wait_finish: reacquire all locks released in the previous callback;
* required to continue operation after sleeping while
* waiting for a new buffer to arrive.
+ * @buf_out_validate: called when the output buffer is prepared or queued
+ * to a request; drivers can use this to validate
+ * userspace-provided information; this is required only
+ * for OUTPUT queues.
* @buf_init: called once after allocating a buffer (in MMAP case)
* or after acquiring a new USERPTR buffer; drivers may
* perform additional buffer-related initialization;
@@ -391,7 +399,7 @@ struct vb2_buffer {
* @buf_queue: passes buffer vb to the driver; driver may start
* hardware operation on this buffer; driver should give
* the buffer back by calling vb2_buffer_done() function;
- * it is allways called after calling VIDIOC_STREAMON()
+ * it is always called after calling VIDIOC_STREAMON()
* ioctl; might be called before @start_streaming callback
* if user pre-queued buffers before calling
* VIDIOC_STREAMON().
@@ -409,6 +417,7 @@ struct vb2_ops {
void (*wait_prepare)(struct vb2_queue *q);
void (*wait_finish)(struct vb2_queue *q);
+ int (*buf_out_validate)(struct vb2_buffer *vb);
int (*buf_init)(struct vb2_buffer *vb);
int (*buf_prepare)(struct vb2_buffer *vb);
void (*buf_finish)(struct vb2_buffer *vb);
diff --git a/include/media/videobuf2-dma-sg.h b/include/media/videobuf2-dma-sg.h
index 52afa0e2bb17..f28fcb0cfac7 100644
--- a/include/media/videobuf2-dma-sg.h
+++ b/include/media/videobuf2-dma-sg.h
@@ -3,7 +3,7 @@
*
* Copyright (C) 2010 Samsung Electronics
*
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/include/media/videobuf2-v4l2.h b/include/media/videobuf2-v4l2.h
index 727855463838..8a10889dc2fd 100644
--- a/include/media/videobuf2-v4l2.h
+++ b/include/media/videobuf2-v4l2.h
@@ -55,6 +55,22 @@ struct vb2_v4l2_buffer {
#define to_vb2_v4l2_buffer(vb) \
container_of(vb, struct vb2_v4l2_buffer, vb2_buf)
+/**
+ * vb2_find_timestamp() - Find buffer with given timestamp in the queue
+ *
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
+ * @timestamp: the timestamp to find.
+ * @start_idx: the start index (usually 0) in the buffer array to start
+ * searching from. Note that there may be multiple buffers
+ * with the same timestamp value, so you can restart the search
+ * by setting @start_idx to the previously found index + 1.
+ *
+ * Returns the buffer index of the buffer with the given @timestamp, or
+ * -1 if no buffer with @timestamp was found.
+ */
+int vb2_find_timestamp(const struct vb2_queue *q, u64 timestamp,
+ unsigned int start_idx);
+
int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b);
/**
diff --git a/include/misc/charlcd.h b/include/misc/charlcd.h
index 23f61850f363..1832402324ce 100644
--- a/include/misc/charlcd.h
+++ b/include/misc/charlcd.h
@@ -35,6 +35,7 @@ struct charlcd_ops {
};
struct charlcd *charlcd_alloc(unsigned int drvdata_size);
+void charlcd_free(struct charlcd *lcd);
int charlcd_register(struct charlcd *lcd);
int charlcd_unregister(struct charlcd *lcd);
diff --git a/include/net/act_api.h b/include/net/act_api.h
index c745e9ccfab2..c61a1bf4e3de 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -39,7 +39,7 @@ struct tc_action {
struct gnet_stats_basic_cpu __percpu *cpu_bstats_hw;
struct gnet_stats_queue __percpu *cpu_qstats;
struct tc_cookie __rcu *act_cookie;
- struct tcf_chain *goto_chain;
+ struct tcf_chain __rcu *goto_chain;
};
#define tcf_index common.tcfa_index
#define tcf_refcnt common.tcfa_refcnt
@@ -90,7 +90,7 @@ struct tc_action_ops {
int (*lookup)(struct net *net, struct tc_action **a, u32 index);
int (*init)(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **act, int ovr,
- int bind, bool rtnl_held,
+ int bind, bool rtnl_held, struct tcf_proto *tp,
struct netlink_ext_ack *extack);
int (*walk)(struct net *, struct sk_buff *,
struct netlink_callback *, int,
@@ -181,6 +181,11 @@ int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
+int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
+ struct tcf_chain **handle,
+ struct netlink_ext_ack *newchain);
+struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
+ struct tcf_chain *newchain);
#endif /* CONFIG_NET_CLS_ACT */
static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index 2bfb87eb98ce..78c856cba4f5 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -61,10 +61,12 @@ int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
rxrpc_user_attach_call_t, unsigned long, gfp_t,
unsigned int);
void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
-u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
+bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *,
+ u32 *);
void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *);
u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *,
ktime_t *);
+bool rxrpc_kernel_call_is_complete(struct rxrpc_call *);
#endif /* _NET_RXRPC_H */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index bb307a11ee63..13bfeb712d36 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -7183,6 +7183,11 @@ void cfg80211_pmsr_complete(struct wireless_dev *wdev,
#define wiphy_info(wiphy, format, args...) \
dev_info(&(wiphy)->dev, format, ##args)
+#define wiphy_err_ratelimited(wiphy, format, args...) \
+ dev_err_ratelimited(&(wiphy)->dev, format, ##args)
+#define wiphy_warn_ratelimited(wiphy, format, args...) \
+ dev_warn_ratelimited(&(wiphy)->dev, format, ##args)
+
#define wiphy_debug(wiphy, format, args...) \
wiphy_printk(KERN_DEBUG, wiphy, format, ##args)
diff --git a/include/net/ip.h b/include/net/ip.h
index be3cad9c2e4c..583526aad1d0 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -677,7 +677,7 @@ int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
unsigned char __user *data, int optlen);
void ip_options_undo(struct ip_options *opt);
void ip_forward_options(struct sk_buff *skb);
-int ip_options_rcv_srr(struct sk_buff *skb);
+int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
/*
* Functions provided by ip_sockglue.c
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index ac2ed8ec662b..112dc18c658f 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -6231,8 +6231,6 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
* @hw: pointer as obtained from ieee80211_alloc_hw()
* @ac: AC number to return packets from.
*
- * Should only be called between calls to ieee80211_txq_schedule_start()
- * and ieee80211_txq_schedule_end().
* Returns the next txq if successful, %NULL if no queue is eligible. If a txq
* is returned, it should be returned with ieee80211_return_txq() after the
* driver has finished scheduling it.
@@ -6240,51 +6238,58 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac);
/**
- * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq()
- *
- * @hw: pointer as obtained from ieee80211_alloc_hw()
- * @txq: pointer obtained from station or virtual interface
- *
- * Should only be called between calls to ieee80211_txq_schedule_start()
- * and ieee80211_txq_schedule_end().
- */
-void ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
-
-/**
- * ieee80211_txq_schedule_start - acquire locks for safe scheduling of an AC
+ * ieee80211_txq_schedule_start - start new scheduling round for TXQs
*
* @hw: pointer as obtained from ieee80211_alloc_hw()
* @ac: AC number to acquire locks for
*
- * Acquire locks needed to schedule TXQs from the given AC. Should be called
- * before ieee80211_next_txq() or ieee80211_return_txq().
+ * Should be called before ieee80211_next_txq() or ieee80211_return_txq().
+ * The driver must not call multiple TXQ scheduling rounds concurrently.
*/
-void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
- __acquires(txq_lock);
+void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac);
+
+/* (deprecated) */
+static inline void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
+{
+}
+
+void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq, bool force);
/**
- * ieee80211_txq_schedule_end - release locks for safe scheduling of an AC
+ * ieee80211_schedule_txq - schedule a TXQ for transmission
*
* @hw: pointer as obtained from ieee80211_alloc_hw()
- * @ac: AC number to acquire locks for
+ * @txq: pointer obtained from station or virtual interface
*
- * Release locks previously acquired by ieee80211_txq_schedule_end().
+ * Schedules a TXQ for transmission if it is not already scheduled,
+ * even if mac80211 does not have any packets buffered.
+ *
+ * The driver may call this function if it has buffered packets for
+ * this TXQ internally.
*/
-void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
- __releases(txq_lock);
+static inline void
+ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
+{
+ __ieee80211_schedule_txq(hw, txq, true);
+}
/**
- * ieee80211_schedule_txq - schedule a TXQ for transmission
+ * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq()
*
* @hw: pointer as obtained from ieee80211_alloc_hw()
* @txq: pointer obtained from station or virtual interface
+ * @force: schedule txq even if mac80211 does not have any buffered packets.
*
- * Schedules a TXQ for transmission if it is not already scheduled. Takes a
- * lock, which means it must *not* be called between
- * ieee80211_txq_schedule_start() and ieee80211_txq_schedule_end()
+ * The driver may set force=true if it has buffered packets for this TXQ
+ * internally.
*/
-void ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
- __acquires(txq_lock) __releases(txq_lock);
+static inline void
+ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq,
+ bool force)
+{
+ __ieee80211_schedule_txq(hw, txq, force);
+}
/**
* ieee80211_txq_may_transmit - check whether TXQ is allowed to transmit
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index a68ced28d8f4..12689ddfc24c 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -59,6 +59,7 @@ struct net {
*/
spinlock_t rules_mod_lock;
+ u32 hash_mix;
atomic64_t cookie_gen;
struct list_head list; /* list of network namespaces */
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 5ee7b30b4917..d2bc733a2ef1 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -316,6 +316,8 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
gfp_t flags);
void nf_ct_tmpl_free(struct nf_conn *tmpl);
+u32 nf_ct_get_id(const struct nf_conn *ct);
+
static inline void
nf_ct_set(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info info)
{
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index 778087591983..a49edfdf47e8 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -75,6 +75,12 @@ bool nf_conntrack_invert_icmp_tuple(struct nf_conntrack_tuple *tuple,
bool nf_conntrack_invert_icmpv6_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_tuple *orig);
+int nf_conntrack_inet_error(struct nf_conn *tmpl, struct sk_buff *skb,
+ unsigned int dataoff,
+ const struct nf_hook_state *state,
+ u8 l4proto,
+ union nf_inet_addr *outer_daddr);
+
int nf_conntrack_icmpv4_error(struct nf_conn *tmpl,
struct sk_buff *skb,
unsigned int dataoff,
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index c331e96a713b..3e9ab643eedf 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -382,6 +382,7 @@ void nft_unregister_set(struct nft_set_type *type);
* @dtype: data type (verdict or numeric type defined by userspace)
* @objtype: object type (see NFT_OBJECT_* definitions)
* @size: maximum set size
+ * @use: number of rules references to this set
* @nelems: number of elements
* @ndeact: number of deactivated elements queued for removal
* @timeout: default timeout value in jiffies
@@ -407,6 +408,7 @@ struct nft_set {
u32 dtype;
u32 objtype;
u32 size;
+ u32 use;
atomic_t nelems;
u32 ndeact;
u64 timeout;
@@ -416,7 +418,8 @@ struct nft_set {
unsigned char *udata;
/* runtime data below here */
const struct nft_set_ops *ops ____cacheline_aligned;
- u16 flags:14,
+ u16 flags:13,
+ bound:1,
genmask:2;
u8 klen;
u8 dlen;
@@ -466,6 +469,10 @@ struct nft_set_binding {
u32 flags;
};
+enum nft_trans_phase;
+void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_set_binding *binding,
+ enum nft_trans_phase phase);
int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_binding *binding);
void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
@@ -1344,15 +1351,12 @@ struct nft_trans_rule {
struct nft_trans_set {
struct nft_set *set;
u32 set_id;
- bool bound;
};
#define nft_trans_set(trans) \
(((struct nft_trans_set *)trans->data)->set)
#define nft_trans_set_id(trans) \
(((struct nft_trans_set *)trans->data)->set_id)
-#define nft_trans_set_bound(trans) \
- (((struct nft_trans_set *)trans->data)->bound)
struct nft_trans_chain {
bool update;
diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h
index 16a842456189..d9b665151f3d 100644
--- a/include/net/netns/hash.h
+++ b/include/net/netns/hash.h
@@ -2,16 +2,10 @@
#ifndef __NET_NS_HASH_H__
#define __NET_NS_HASH_H__
-#include <asm/cache.h>
-
-struct net;
+#include <net/net_namespace.h>
static inline u32 net_hash_mix(const struct net *net)
{
-#ifdef CONFIG_NET_NS
- return (u32)(((unsigned long)net) >> ilog2(sizeof(*net)));
-#else
- return 0;
-#endif
+ return net->hash_mix;
}
#endif
diff --git a/include/net/netrom.h b/include/net/netrom.h
index 5a0714ff500f..80f15b1c1a48 100644
--- a/include/net/netrom.h
+++ b/include/net/netrom.h
@@ -266,7 +266,7 @@ void nr_stop_idletimer(struct sock *);
int nr_t1timer_running(struct sock *);
/* sysctl_net_netrom.c */
-void nr_register_sysctl(void);
+int nr_register_sysctl(void);
void nr_unregister_sysctl(void);
#endif
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index 87499b6b35d6..df5c69db68af 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -166,7 +166,7 @@ struct nci_conn_info {
* According to specification 102 622 chapter 4.4 Pipes,
* the pipe identifier is 7 bits long.
*/
-#define NCI_HCI_MAX_PIPES 127
+#define NCI_HCI_MAX_PIPES 128
struct nci_hci_gate {
u8 gate;
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 347015515a7d..21a5243fecd1 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -108,7 +108,6 @@ reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
static inline void reqsk_free(struct request_sock *req)
{
- /* temporary debugging */
WARN_ON_ONCE(refcount_read(&req->rsk_refcnt) != 0);
req->rsk_ops->destructor(req);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 31284c078d06..a2b38b3deeca 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -378,6 +378,7 @@ struct tcf_chain {
bool flushing;
const struct tcf_proto_ops *tmplt_ops;
void *tmplt_priv;
+ struct rcu_head rcu;
};
struct tcf_block {
@@ -922,6 +923,41 @@ static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
sch->qstats.overlimits++;
}
+static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch)
+{
+ __u32 qlen = qdisc_qlen_sum(sch);
+
+ return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen);
+}
+
+static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen,
+ __u32 *backlog)
+{
+ struct gnet_stats_queue qstats = { 0 };
+ __u32 len = qdisc_qlen_sum(sch);
+
+ __gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len);
+ *qlen = qstats.qlen;
+ *backlog = qstats.backlog;
+}
+
+static inline void qdisc_tree_flush_backlog(struct Qdisc *sch)
+{
+ __u32 qlen, backlog;
+
+ qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
+ qdisc_tree_reduce_backlog(sch, qlen, backlog);
+}
+
+static inline void qdisc_purge_queue(struct Qdisc *sch)
+{
+ __u32 qlen, backlog;
+
+ qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
+ qdisc_reset(sch);
+ qdisc_tree_reduce_backlog(sch, qlen, backlog);
+}
+
static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
{
qh->head = NULL;
@@ -1105,13 +1141,8 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
sch_tree_lock(sch);
old = *pold;
*pold = new;
- if (old != NULL) {
- unsigned int qlen = old->q.qlen;
- unsigned int backlog = old->qstats.backlog;
-
- qdisc_reset(old);
- qdisc_tree_reduce_backlog(old, qlen, backlog);
- }
+ if (old != NULL)
+ qdisc_tree_flush_backlog(old);
sch_tree_unlock(sch);
return old;
diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
index 32ee65a30aff..1c6e6c0766ca 100644
--- a/include/net/sctp/checksum.h
+++ b/include/net/sctp/checksum.h
@@ -61,7 +61,7 @@ static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2,
static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
unsigned int offset)
{
- struct sctphdr *sh = sctp_hdr(skb);
+ struct sctphdr *sh = (struct sctphdr *)(skb->data + offset);
const struct skb_checksum_ops ops = {
.update = sctp_csum_update,
.combine = sctp_csum_combine,
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index 6640f84fe536..6d5beac29bc1 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -105,7 +105,6 @@ enum sctp_verb {
SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */
SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */
SCTP_CMD_SEND_MSG, /* Send the whole use message */
- SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */
SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/
SCTP_CMD_SET_ASOC, /* Restore association context */
SCTP_CMD_LAST
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 58e4b23cecf4..140fd836a396 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -48,6 +48,7 @@
#define __sctp_structs_h__
#include <linux/ktime.h>
+#include <linux/generic-radix-tree.h>
#include <linux/rhashtable-types.h>
#include <linux/socket.h> /* linux/in.h needs this!! */
#include <linux/in.h> /* We get struct sockaddr_in. */
@@ -57,7 +58,6 @@
#include <linux/atomic.h> /* This gets us atomic counters. */
#include <linux/skbuff.h> /* We need sk_buff_head. */
#include <linux/workqueue.h> /* We need tq_struct. */
-#include <linux/flex_array.h> /* We need flex_array. */
#include <linux/sctp.h> /* We need sctp* header structs. */
#include <net/sctp/auth.h> /* We need auth specific structs */
#include <net/ip.h> /* For inet_skb_parm */
@@ -1449,8 +1449,9 @@ struct sctp_stream_in {
};
struct sctp_stream {
- struct flex_array *out;
- struct flex_array *in;
+ GENRADIX(struct sctp_stream_out) out;
+ GENRADIX(struct sctp_stream_in) in;
+
__u16 outcnt;
__u16 incnt;
/* Current stream being sent, if any */
@@ -1473,17 +1474,17 @@ struct sctp_stream {
};
static inline struct sctp_stream_out *sctp_stream_out(
- const struct sctp_stream *stream,
+ struct sctp_stream *stream,
__u16 sid)
{
- return flex_array_get(stream->out, sid);
+ return genradix_ptr(&stream->out, sid);
}
static inline struct sctp_stream_in *sctp_stream_in(
- const struct sctp_stream *stream,
+ struct sctp_stream *stream,
__u16 sid)
{
- return flex_array_get(stream->in, sid);
+ return genradix_ptr(&stream->in, sid);
}
#define SCTP_SO(s, i) sctp_stream_out((s), (i))
diff --git a/include/net/sock.h b/include/net/sock.h
index 328cb7cb7b0b..341f8bafa0cf 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -710,6 +710,12 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
hlist_add_head_rcu(&sk->sk_node, list);
}
+static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list)
+{
+ sock_hold(sk);
+ hlist_add_tail_rcu(&sk->sk_node, list);
+}
+
static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
{
hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
@@ -2078,12 +2084,6 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq)
* @p: poll_table
*
* See the comments in the wq_has_sleeper function.
- *
- * Do not derive sock from filp->private_data here. An SMC socket establishes
- * an internal TCP socket that is used in the fallback case. All socket
- * operations on the SMC socket are then forwarded to the TCP socket. In case of
- * poll, the filp->private_data pointer references the SMC socket because the
- * TCP socket has no file assigned.
*/
static inline void sock_poll_wait(struct file *filp, struct socket *sock,
poll_table *p)
diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h
index ee8d005f56fc..eb8f01c819e6 100644
--- a/include/net/tc_act/tc_gact.h
+++ b/include/net/tc_act/tc_gact.h
@@ -56,7 +56,7 @@ static inline bool is_tcf_gact_goto_chain(const struct tc_action *a)
static inline u32 tcf_gact_goto_chain_index(const struct tc_action *a)
{
- return a->goto_chain->index;
+ return READ_ONCE(a->tcfa_action) & TC_ACT_EXT_VAL_MASK;
}
#endif /* __NET_TC_GACT_H */
diff --git a/include/net/tls.h b/include/net/tls.h
index a5a938583295..5934246b2c6f 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -307,6 +307,7 @@ int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int tls_device_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
void tls_device_sk_destruct(struct sock *sk);
+void tls_device_free_resources_tx(struct sock *sk);
void tls_device_init(void);
void tls_device_cleanup(void);
int tls_tx_records(struct sock *sk, int flags);
@@ -330,6 +331,7 @@ int tls_push_sg(struct sock *sk, struct tls_context *ctx,
int flags);
int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
int flags);
+bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx);
static inline struct tls_msg *tls_msg(struct sk_buff *skb)
{
@@ -379,7 +381,7 @@ tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
{
#ifdef CONFIG_SOCK_VALIDATE_XMIT
- return sk_fullsock(sk) &
+ return sk_fullsock(sk) &&
(smp_load_acquire(&sk->sk_validate_xmit_skb) ==
&tls_validate_xmit_skb);
#else
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 61cf7dbb6782..d074b6d60f8a 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -36,7 +36,6 @@ struct xdp_umem {
u32 headroom;
u32 chunk_size_nohr;
struct user_struct *user;
- struct pid *pid;
unsigned long address;
refcount_t users;
struct work_struct work;
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 85386becbaea..99f722c4d804 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -219,7 +219,7 @@ struct xfrm_state {
struct xfrm_stats stats;
struct xfrm_lifetime_cur curlft;
- struct tasklet_hrtimer mtimer;
+ struct hrtimer mtimer;
struct xfrm_state_offload xso;
@@ -295,7 +295,8 @@ struct xfrm_replay {
};
struct xfrm_if_cb {
- struct xfrm_if *(*decode_session)(struct sk_buff *skb);
+ struct xfrm_if *(*decode_session)(struct sk_buff *skb,
+ unsigned short family);
};
void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
@@ -1404,6 +1405,23 @@ static inline int xfrm_state_kern(const struct xfrm_state *x)
return atomic_read(&x->tunnel_users);
}
+static inline bool xfrm_id_proto_valid(u8 proto)
+{
+ switch (proto) {
+ case IPPROTO_AH:
+ case IPPROTO_ESP:
+ case IPPROTO_COMP:
+#if IS_ENABLED(CONFIG_IPV6)
+ case IPPROTO_ROUTING:
+ case IPPROTO_DSTOPTS:
+#endif
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* IPSEC_PROTO_ANY only matches 3 IPsec protocols, 0 could match all. */
static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
{
return (!userproto || proto == userproto ||
diff --git a/include/rdma/ib_hdrs.h b/include/rdma/ib_hdrs.h
index 6e35416170a3..9a90bd031e8c 100644
--- a/include/rdma/ib_hdrs.h
+++ b/include/rdma/ib_hdrs.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2016 Intel Corporation.
+ * Copyright(c) 2016 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -100,6 +100,8 @@ struct ib_atomic_eth {
__be64 compare_data; /* potentially unaligned */
} __packed;
+#include <rdma/tid_rdma_defs.h>
+
union ib_ehdrs {
struct {
__be32 deth[2];
@@ -117,6 +119,16 @@ union ib_ehdrs {
__be32 aeth;
__be32 ieth;
struct ib_atomic_eth atomic_eth;
+ /* TID RDMA headers */
+ union {
+ struct tid_rdma_read_req r_req;
+ struct tid_rdma_read_resp r_rsp;
+ struct tid_rdma_write_req w_req;
+ struct tid_rdma_write_resp w_rsp;
+ struct tid_rdma_write_data w_data;
+ struct tid_rdma_resync resync;
+ struct tid_rdma_ack ack;
+ } tid_rdma;
} __packed;
struct ib_other_headers {
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index fdef558e3a2d..79ba8219e7dc 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -616,12 +616,11 @@ struct ib_mad_agent {
void *context;
u32 hi_tid;
u32 flags;
+ void *security;
+ struct list_head mad_agent_sec_list;
u8 port_num;
u8 rmpp_version;
- void *security;
bool smp_allowed;
- bool lsm_nb_reg;
- struct notifier_block lsm_nb;
};
/**
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index 5d3755ec5afa..73af05db04c7 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -36,6 +36,7 @@
#include <linux/list.h>
#include <linux/scatterlist.h>
#include <linux/workqueue.h>
+#include <rdma/ib_verbs.h>
struct ib_ucontext;
struct ib_umem_odp;
@@ -80,7 +81,7 @@ static inline size_t ib_umem_num_pages(struct ib_umem *umem)
#ifdef CONFIG_INFINIBAND_USER_MEM
-struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
+struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
size_t size, int access, int dmasync);
void ib_umem_release(struct ib_umem *umem);
int ib_umem_page_count(struct ib_umem *umem);
@@ -91,9 +92,10 @@ int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
#include <linux/err.h>
-static inline struct ib_umem *ib_umem_get(struct ib_ucontext *context,
+static inline struct ib_umem *ib_umem_get(struct ib_udata *udata,
unsigned long addr, size_t size,
- int access, int dmasync) {
+ int access, int dmasync)
+{
return ERR_PTR(-EINVAL);
}
static inline void ib_umem_release(struct ib_umem *umem) { }
diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h
index 0b1446fe2fab..dadc96dea39c 100644
--- a/include/rdma/ib_umem_odp.h
+++ b/include/rdma/ib_umem_odp.h
@@ -83,6 +83,19 @@ static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem)
return container_of(umem, struct ib_umem_odp, umem);
}
+/*
+ * The lower 2 bits of the DMA address signal the R/W permissions for
+ * the entry. To upgrade the permissions, provide the appropriate
+ * bitmask to the map_dma_pages function.
+ *
+ * Be aware that upgrading a mapped address might result in change of
+ * the DMA address for the page.
+ */
+#define ODP_READ_ALLOWED_BIT (1<<0ULL)
+#define ODP_WRITE_ALLOWED_BIT (1<<1ULL)
+
+#define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT))
+
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
struct ib_ucontext_per_mm {
@@ -103,23 +116,10 @@ struct ib_ucontext_per_mm {
};
int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access);
-struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
+struct ib_umem_odp *ib_alloc_odp_umem(struct ib_umem_odp *root_umem,
unsigned long addr, size_t size);
void ib_umem_odp_release(struct ib_umem_odp *umem_odp);
-/*
- * The lower 2 bits of the DMA address signal the R/W permissions for
- * the entry. To upgrade the permissions, provide the appropriate
- * bitmask to the map_dma_pages function.
- *
- * Be aware that upgrading a mapped address might result in change of
- * the DMA address for the page.
- */
-#define ODP_READ_ALLOWED_BIT (1<<0ULL)
-#define ODP_WRITE_ALLOWED_BIT (1<<1ULL)
-
-#define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT))
-
int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
u64 bcnt, u64 access_mask,
unsigned long current_seq);
@@ -169,12 +169,6 @@ static inline int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access)
return -EINVAL;
}
-static inline struct ib_umem_odp *
-ib_alloc_odp_umem(struct ib_ucontext *context, unsigned long addr, size_t size)
-{
- return ERR_PTR(-EINVAL);
-}
-
static inline void ib_umem_odp_release(struct ib_umem_odp *umem_odp) {}
#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 80debf5982ac..9b9e17bcc201 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -238,6 +238,7 @@ enum ib_device_cap_flags {
IB_DEVICE_RDMA_NETDEV_OPA_VNIC = (1ULL << 35),
/* The device supports padding incoming writes to cacheline. */
IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36),
+ IB_DEVICE_ALLOW_USER_UNREG = (1ULL << 37),
};
enum ib_signature_prot_cap {
@@ -268,6 +269,7 @@ enum ib_odp_transport_cap_bits {
IB_ODP_SUPPORT_WRITE = 1 << 2,
IB_ODP_SUPPORT_READ = 1 << 3,
IB_ODP_SUPPORT_ATOMIC = 1 << 4,
+ IB_ODP_SUPPORT_SRQ_RECV = 1 << 5,
};
struct ib_odp_caps {
@@ -276,6 +278,7 @@ struct ib_odp_caps {
uint32_t rc_odp_caps;
uint32_t uc_odp_caps;
uint32_t ud_odp_caps;
+ uint32_t xrc_odp_caps;
} per_transport_caps;
};
@@ -1504,12 +1507,10 @@ struct ib_ucontext {
bool cleanup_retryable;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
void (*invalidate_range)(struct ib_umem_odp *umem_odp,
unsigned long start, unsigned long end);
struct mutex per_mm_list_lock;
struct list_head per_mm_list;
-#endif
struct ib_rdmacg_object cg_obj;
/*
@@ -2186,7 +2187,6 @@ struct ib_port_cache {
struct ib_cache {
rwlock_t lock;
struct ib_event_handler event_handler;
- struct ib_port_cache *ports;
};
struct iw_cm_verbs;
@@ -2198,6 +2198,21 @@ struct ib_port_immutable {
u32 max_mad_size;
};
+struct ib_port_data {
+ struct ib_device *ib_dev;
+
+ struct ib_port_immutable immutable;
+
+ spinlock_t pkey_list_lock;
+ struct list_head pkey_list;
+
+ struct ib_port_cache cache;
+
+ spinlock_t netdev_lock;
+ struct net_device __rcu *netdev;
+ struct hlist_node ndev_hash_link;
+};
+
/* rdma netdev type - specifies protocol type */
enum rdma_netdev_t {
RDMA_NETDEV_OPA_VNIC,
@@ -2243,12 +2258,6 @@ struct rdma_netdev_alloc_params {
struct net_device *netdev, void *param);
};
-struct ib_port_pkey_list {
- /* Lock to hold while modifying the list. */
- spinlock_t list_lock;
- struct list_head pkey_list;
-};
-
struct ib_counters {
struct ib_device *device;
struct ib_uobject *uobject;
@@ -2264,6 +2273,19 @@ struct ib_counters_read_attr {
struct uverbs_attr_bundle;
+#define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \
+ .size_##ib_struct = \
+ (sizeof(struct drv_struct) + \
+ BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) + \
+ BUILD_BUG_ON_ZERO( \
+ !__same_type(((struct drv_struct *)NULL)->member, \
+ struct ib_struct)))
+
+#define rdma_zalloc_drv_obj(ib_dev, ib_type) \
+ ((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, GFP_KERNEL))
+
+#define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
+
/**
* struct ib_device_ops - InfiniBand device operations
* This structure defines all the InfiniBand device operations, providers will
@@ -2367,15 +2389,14 @@ struct ib_device_ops {
int (*del_gid)(const struct ib_gid_attr *attr, void **context);
int (*query_pkey)(struct ib_device *device, u8 port_num, u16 index,
u16 *pkey);
- struct ib_ucontext *(*alloc_ucontext)(struct ib_device *device,
- struct ib_udata *udata);
- int (*dealloc_ucontext)(struct ib_ucontext *context);
+ int (*alloc_ucontext)(struct ib_ucontext *context,
+ struct ib_udata *udata);
+ void (*dealloc_ucontext)(struct ib_ucontext *context);
int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
- struct ib_pd *(*alloc_pd)(struct ib_device *device,
- struct ib_ucontext *context,
- struct ib_udata *udata);
- int (*dealloc_pd)(struct ib_pd *pd);
+ int (*alloc_pd)(struct ib_pd *pd, struct ib_ucontext *context,
+ struct ib_udata *udata);
+ void (*dealloc_pd)(struct ib_pd *pd);
struct ib_ah *(*create_ah)(struct ib_pd *pd,
struct rdma_ah_attr *ah_attr, u32 flags,
struct ib_udata *udata);
@@ -2506,34 +2527,57 @@ struct ib_device_ops {
*/
int (*get_hw_stats)(struct ib_device *device,
struct rdma_hw_stats *stats, u8 port, int index);
+ /*
+ * This function is called once for each port when a ib device is
+ * registered.
+ */
+ int (*init_port)(struct ib_device *device, u8 port_num,
+ struct kobject *port_sysfs);
+ /**
+ * Allows rdma drivers to add their own restrack attributes.
+ */
+ int (*fill_res_entry)(struct sk_buff *msg,
+ struct rdma_restrack_entry *entry);
+
+ /* Device lifecycle callbacks */
+ /*
+ * Called after the device becomes registered, before clients are
+ * attached
+ */
+ int (*enable_driver)(struct ib_device *dev);
+ /*
+ * This is called as part of ib_dealloc_device().
+ */
+ void (*dealloc_driver)(struct ib_device *dev);
+
+ DECLARE_RDMA_OBJ_SIZE(ib_pd);
+ DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
};
+struct rdma_restrack_root;
+
struct ib_device {
/* Do not access @dma_device directly from ULP nor from HW drivers. */
struct device *dma_device;
struct ib_device_ops ops;
char name[IB_DEVICE_NAME_MAX];
+ struct rcu_head rcu_head;
struct list_head event_handler_list;
spinlock_t event_handler_lock;
- rwlock_t client_data_lock;
- struct list_head core_list;
- /* Access to the client_data_list is protected by the client_data_lock
- * rwlock and the lists_rwsem read-write semaphore
- */
- struct list_head client_data_list;
+ struct rw_semaphore client_data_rwsem;
+ struct xarray client_data;
+ struct mutex unregistration_lock;
struct ib_cache cache;
/**
- * port_immutable is indexed by port number
+ * port_data is indexed by port number
*/
- struct ib_port_immutable *port_immutable;
+ struct ib_port_data *port_data;
int num_comp_vectors;
- struct ib_port_pkey_list *port_pkey_list;
-
struct iw_cm_verbs *iwcm;
struct module *owner;
@@ -2547,12 +2591,6 @@ struct ib_device {
struct kobject *ports_kobj;
struct list_head port_list;
- enum {
- IB_DEV_UNINITIALIZED,
- IB_DEV_REGISTERED,
- IB_DEV_UNREGISTERED
- } reg_state;
-
int uverbs_abi_ver;
u64 uverbs_cmd_mask;
u64 uverbs_ex_cmd_mask;
@@ -2561,6 +2599,8 @@ struct ib_device {
__be64 node_guid;
u32 local_dma_lkey;
u16 is_switch:1;
+ /* Indicates kernel verbs support, should not be used in drivers */
+ u16 kverbs_provider:1;
u8 node_type;
u8 phys_port_cnt;
struct ib_device_attr attrs;
@@ -2572,10 +2612,7 @@ struct ib_device {
#endif
u32 index;
- /*
- * Implementation details of the RDMA core, don't use in drivers
- */
- struct rdma_restrack_root res;
+ struct rdma_restrack_root *res;
const struct uapi_definition *driver_def;
enum rdma_driver_id driver_id;
@@ -2586,10 +2623,13 @@ struct ib_device {
*/
refcount_t refcount;
struct completion unreg_completion;
+ struct work_struct unregistration_work;
+
+ const struct rdma_link_ops *link_ops;
};
struct ib_client {
- char *name;
+ const char *name;
void (*add) (struct ib_device *);
void (*remove)(struct ib_device *, void *client_data);
@@ -2616,22 +2656,47 @@ struct ib_client {
const struct sockaddr *addr,
void *client_data);
struct list_head list;
+ u32 client_id;
+
+ /* kverbs are not required by the client */
+ u8 no_kverbs_req:1;
};
-struct ib_device *ib_alloc_device(size_t size);
+struct ib_device *_ib_alloc_device(size_t size);
+#define ib_alloc_device(drv_struct, member) \
+ container_of(_ib_alloc_device(sizeof(struct drv_struct) + \
+ BUILD_BUG_ON_ZERO(offsetof( \
+ struct drv_struct, member))), \
+ struct drv_struct, member)
+
void ib_dealloc_device(struct ib_device *device);
void ib_get_device_fw_str(struct ib_device *device, char *str);
-int ib_register_device(struct ib_device *device, const char *name,
- int (*port_callback)(struct ib_device *, u8,
- struct kobject *));
+int ib_register_device(struct ib_device *device, const char *name);
void ib_unregister_device(struct ib_device *device);
+void ib_unregister_driver(enum rdma_driver_id driver_id);
+void ib_unregister_device_and_put(struct ib_device *device);
+void ib_unregister_device_queued(struct ib_device *ib_dev);
int ib_register_client (struct ib_client *client);
void ib_unregister_client(struct ib_client *client);
-void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
+/**
+ * ib_get_client_data - Get IB client context
+ * @device:Device to get context for
+ * @client:Client to get context for
+ *
+ * ib_get_client_data() returns the client context data set with
+ * ib_set_client_data(). This can only be called while the client is
+ * registered to the device, once the ib_client remove() callback returns this
+ * cannot be called.
+ */
+static inline void *ib_get_client_data(struct ib_device *device,
+ struct ib_client *client)
+{
+ return xa_load(&device->client_data, client->client_id);
+}
void ib_set_client_data(struct ib_device *device, struct ib_client *client,
void *data);
void ib_set_device_ops(struct ib_device *device,
@@ -2790,6 +2855,16 @@ static inline u8 rdma_start_port(const struct ib_device *device)
}
/**
+ * rdma_for_each_port - Iterate over all valid port numbers of the IB device
+ * @device - The struct ib_device * to iterate over
+ * @iter - The unsigned int to store the port number
+ */
+#define rdma_for_each_port(device, iter) \
+ for (iter = rdma_start_port(device + BUILD_BUG_ON_ZERO(!__same_type( \
+ unsigned int, iter))); \
+ iter <= rdma_end_port(device); (iter)++)
+
+/**
* rdma_end_port - Return the last valid port number for the device
* specified
*
@@ -2812,34 +2887,38 @@ static inline int rdma_is_port_valid(const struct ib_device *device,
static inline bool rdma_is_grh_required(const struct ib_device *device,
u8 port_num)
{
- return device->port_immutable[port_num].core_cap_flags &
- RDMA_CORE_PORT_IB_GRH_REQUIRED;
+ return device->port_data[port_num].immutable.core_cap_flags &
+ RDMA_CORE_PORT_IB_GRH_REQUIRED;
}
static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
{
- return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
+ return device->port_data[port_num].immutable.core_cap_flags &
+ RDMA_CORE_CAP_PROT_IB;
}
static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
{
- return device->port_immutable[port_num].core_cap_flags &
- (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
+ return device->port_data[port_num].immutable.core_cap_flags &
+ (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
}
static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
{
- return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
+ return device->port_data[port_num].immutable.core_cap_flags &
+ RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
}
static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
{
- return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
+ return device->port_data[port_num].immutable.core_cap_flags &
+ RDMA_CORE_CAP_PROT_ROCE;
}
static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
{
- return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
+ return device->port_data[port_num].immutable.core_cap_flags &
+ RDMA_CORE_CAP_PROT_IWARP;
}
static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
@@ -2850,12 +2929,14 @@ static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
{
- return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_RAW_PACKET;
+ return device->port_data[port_num].immutable.core_cap_flags &
+ RDMA_CORE_CAP_PROT_RAW_PACKET;
}
static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
{
- return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_USNIC;
+ return device->port_data[port_num].immutable.core_cap_flags &
+ RDMA_CORE_CAP_PROT_USNIC;
}
/**
@@ -2872,7 +2953,8 @@ static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_n
*/
static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
{
- return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
+ return device->port_data[port_num].immutable.core_cap_flags &
+ RDMA_CORE_CAP_IB_MAD;
}
/**
@@ -2896,8 +2978,8 @@ static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
*/
static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
{
- return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD)
- == RDMA_CORE_CAP_OPA_MAD;
+ return (device->port_data[port_num].immutable.core_cap_flags &
+ RDMA_CORE_CAP_OPA_MAD) == RDMA_CORE_CAP_OPA_MAD;
}
/**
@@ -2922,7 +3004,8 @@ static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
*/
static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
{
- return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
+ return device->port_data[port_num].immutable.core_cap_flags &
+ RDMA_CORE_CAP_IB_SMI;
}
/**
@@ -2942,7 +3025,8 @@ static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
*/
static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
{
- return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
+ return device->port_data[port_num].immutable.core_cap_flags &
+ RDMA_CORE_CAP_IB_CM;
}
/**
@@ -2959,7 +3043,8 @@ static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
*/
static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
{
- return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
+ return device->port_data[port_num].immutable.core_cap_flags &
+ RDMA_CORE_CAP_IW_CM;
}
/**
@@ -2979,7 +3064,8 @@ static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
*/
static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
{
- return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
+ return device->port_data[port_num].immutable.core_cap_flags &
+ RDMA_CORE_CAP_IB_SA;
}
/**
@@ -3019,7 +3105,8 @@ static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num
*/
static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
{
- return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
+ return device->port_data[port_num].immutable.core_cap_flags &
+ RDMA_CORE_CAP_AF_IB;
}
/**
@@ -3040,7 +3127,8 @@ static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
*/
static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
{
- return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
+ return device->port_data[port_num].immutable.core_cap_flags &
+ RDMA_CORE_CAP_ETH_AH;
}
/**
@@ -3054,7 +3142,7 @@ static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
*/
static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
{
- return (device->port_immutable[port_num].core_cap_flags &
+ return (device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
}
@@ -3072,7 +3160,7 @@ static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
*/
static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
{
- return device->port_immutable[port_num].max_mad_size;
+ return device->port_data[port_num].immutable.max_mad_size;
}
/**
@@ -3686,32 +3774,18 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
{
dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
}
-/**
- * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
- * @dev: The device for which the DMA addresses were created
- * @sg: The scatter/gather entry
- *
- * Note: this function is obsolete. To do: change all occurrences of
- * ib_sg_dma_address() into sg_dma_address().
- */
-static inline u64 ib_sg_dma_address(struct ib_device *dev,
- struct scatterlist *sg)
-{
- return sg_dma_address(sg);
-}
/**
- * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
- * @dev: The device for which the DMA addresses were created
- * @sg: The scatter/gather entry
+ * ib_dma_max_seg_size - Return the size limit of a single DMA transfer
+ * @dev: The device to query
*
- * Note: this function is obsolete. To do: change all occurrences of
- * ib_sg_dma_len() into sg_dma_len().
+ * The returned value represents a size in bytes.
*/
-static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
- struct scatterlist *sg)
+static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev)
{
- return sg_dma_len(sg);
+ struct device_dma_parameters *p = dev->dma_device->dma_parms;
+
+ return p ? p->max_segment_size : UINT_MAX;
}
/**
@@ -3946,9 +4020,17 @@ static inline bool ib_device_try_get(struct ib_device *dev)
}
void ib_device_put(struct ib_device *device);
+struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
+ enum rdma_driver_id driver_id);
+struct ib_device *ib_device_get_by_name(const char *name,
+ enum rdma_driver_id driver_id);
struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
u16 pkey, const union ib_gid *gid,
const struct sockaddr *addr);
+int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
+ unsigned int port);
+struct net_device *ib_device_netdev(struct ib_device *dev, u8 port);
+
struct ib_wq *ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr);
int ib_destroy_wq(struct ib_wq *wq);
@@ -4222,7 +4304,6 @@ void rdma_roce_rescan_device(struct ib_device *ibdev);
struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
-
int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
@@ -4258,4 +4339,27 @@ rdma_set_device_sysfs_group(struct ib_device *dev,
dev->groups[1] = group;
}
+/**
+ * rdma_device_to_ibdev - Get ib_device pointer from device pointer
+ *
+ * @device: device pointer for which ib_device pointer to retrieve
+ *
+ * rdma_device_to_ibdev() retrieves ib_device pointer from device.
+ *
+ */
+static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
+{
+ return container_of(device, struct ib_device, dev);
+}
+
+/**
+ * rdma_device_to_drv_device - Helper macro to reach back to driver's
+ * ib_device holder structure from device pointer.
+ *
+ * NOTE: New drivers should not make use of this API; This API is only for
+ * existing drivers who have exposed sysfs entries using
+ * rdma_set_device_sysfs_group().
+ */
+#define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member) \
+ container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member)
#endif /* IB_VERBS_H */
diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
index 5cd7701db148..0e1f02815643 100644
--- a/include/rdma/iw_cm.h
+++ b/include/rdma/iw_cm.h
@@ -94,7 +94,8 @@ struct iw_cm_id {
void (*add_ref)(struct iw_cm_id *);
void (*rem_ref)(struct iw_cm_id *);
u8 tos;
- bool mapped;
+ bool tos_set:1;
+ bool mapped:1;
};
struct iw_cm_conn_param {
@@ -105,6 +106,18 @@ struct iw_cm_conn_param {
u32 qpn;
};
+enum iw_flags {
+
+ /*
+ * This flag allows the iwcm and iwpmd to still advertise
+ * mappings but the real and mapped port numbers are the
+ * same. Further, iwpmd will not bind any user socket to
+ * reserve the port. This is required for soft iwarp
+ * to play in the port mapped iwarp space.
+ */
+ IW_F_NO_PORT_MAP = (1 << 0),
+};
+
struct iw_cm_verbs {
void (*add_ref)(struct ib_qp *qp);
@@ -127,6 +140,7 @@ struct iw_cm_verbs {
int (*destroy_listen)(struct iw_cm_id *cm_id);
char ifname[IFNAMSIZ];
+ enum iw_flags driver_flags;
};
/**
diff --git a/include/rdma/iw_portmap.h b/include/rdma/iw_portmap.h
index fda31673a562..b9fee7feeeb5 100644
--- a/include/rdma/iw_portmap.h
+++ b/include/rdma/iw_portmap.h
@@ -58,167 +58,31 @@ struct iwpm_sa_data {
struct sockaddr_storage mapped_loc_addr;
struct sockaddr_storage rem_addr;
struct sockaddr_storage mapped_rem_addr;
+ u32 flags;
};
-/**
- * iwpm_init - Allocate resources for the iwarp port mapper
- *
- * Should be called when network interface goes up.
- */
int iwpm_init(u8);
-
-/**
- * iwpm_exit - Deallocate resources for the iwarp port mapper
- *
- * Should be called when network interface goes down.
- */
int iwpm_exit(u8);
-
-/**
- * iwpm_valid_pid - Check if the userspace iwarp port mapper pid is valid
- *
- * Returns true if the pid is greater than zero, otherwise returns false
- */
int iwpm_valid_pid(void);
-
-/**
- * iwpm_register_pid - Send a netlink query to userspace
- * to get the iwarp port mapper pid
- * @pm_msg: Contains driver info to send to the userspace port mapper
- * @nl_client: The index of the netlink client
- */
int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client);
-
-/**
- * iwpm_add_mapping - Send a netlink add mapping request to
- * the userspace port mapper
- * @pm_msg: Contains the local ip/tcp address info to send
- * @nl_client: The index of the netlink client
- *
- * If the request is successful, the pm_msg stores
- * the port mapper response (mapped address info)
- */
int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client);
-
-/**
- * iwpm_add_and_query_mapping - Send a netlink add and query mapping request
- * to the userspace port mapper
- * @pm_msg: Contains the local and remote ip/tcp address info to send
- * @nl_client: The index of the netlink client
- *
- * If the request is successful, the pm_msg stores the
- * port mapper response (mapped local and remote address info)
- */
int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client);
-
-/**
- * iwpm_remove_mapping - Send a netlink remove mapping request
- * to the userspace port mapper
- *
- * @local_addr: Local ip/tcp address to remove
- * @nl_client: The index of the netlink client
- */
int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client);
-
-/**
- * iwpm_register_pid_cb - Process the port mapper response to
- * iwpm_register_pid query
- * @skb:
- * @cb: Contains the received message (payload and netlink header)
- *
- * If successful, the function receives the userspace port mapper pid
- * which is used in future communication with the port mapper
- */
int iwpm_register_pid_cb(struct sk_buff *, struct netlink_callback *);
-
-/**
- * iwpm_add_mapping_cb - Process the port mapper response to
- * iwpm_add_mapping request
- * @skb:
- * @cb: Contains the received message (payload and netlink header)
- */
int iwpm_add_mapping_cb(struct sk_buff *, struct netlink_callback *);
-
-/**
- * iwpm_add_and_query_mapping_cb - Process the port mapper response to
- * iwpm_add_and_query_mapping request
- * @skb:
- * @cb: Contains the received message (payload and netlink header)
- */
int iwpm_add_and_query_mapping_cb(struct sk_buff *, struct netlink_callback *);
-
-/**
- * iwpm_remote_info_cb - Process remote connecting peer address info, which
- * the port mapper has received from the connecting peer
- *
- * @cb: Contains the received message (payload and netlink header)
- *
- * Stores the IPv4/IPv6 address info in a hash table
- */
int iwpm_remote_info_cb(struct sk_buff *, struct netlink_callback *);
-
-/**
- * iwpm_mapping_error_cb - Process port mapper notification for error
- *
- * @skb:
- * @cb: Contains the received message (payload and netlink header)
- */
int iwpm_mapping_error_cb(struct sk_buff *, struct netlink_callback *);
-
-/**
- * iwpm_mapping_info_cb - Process a notification that the userspace
- * port mapper daemon is started
- * @skb:
- * @cb: Contains the received message (payload and netlink header)
- *
- * Using the received port mapper pid, send all the local mapping
- * info records to the userspace port mapper
- */
int iwpm_mapping_info_cb(struct sk_buff *, struct netlink_callback *);
-
-/**
- * iwpm_ack_mapping_info_cb - Process the port mapper ack for
- * the provided local mapping info records
- * @skb:
- * @cb: Contains the received message (payload and netlink header)
- */
int iwpm_ack_mapping_info_cb(struct sk_buff *, struct netlink_callback *);
-
-/**
- * iwpm_get_remote_info - Get the remote connecting peer address info
- *
- * @mapped_loc_addr: Mapped local address of the listening peer
- * @mapped_rem_addr: Mapped remote address of the connecting peer
- * @remote_addr: To store the remote address of the connecting peer
- * @nl_client: The index of the netlink client
- *
- * The remote address info is retrieved and provided to the client in
- * the remote_addr. After that it is removed from the hash table
- */
int iwpm_get_remote_info(struct sockaddr_storage *mapped_loc_addr,
struct sockaddr_storage *mapped_rem_addr,
struct sockaddr_storage *remote_addr, u8 nl_client);
-
-/**
- * iwpm_create_mapinfo - Store local and mapped IPv4/IPv6 address
- * info in a hash table
- * @local_addr: Local ip/tcp address
- * @mapped_addr: Mapped local ip/tcp address
- * @nl_client: The index of the netlink client
- */
int iwpm_create_mapinfo(struct sockaddr_storage *local_addr,
- struct sockaddr_storage *mapped_addr, u8 nl_client);
-
-/**
- * iwpm_remove_mapinfo - Remove local and mapped IPv4/IPv6 address
- * info from the hash table
- * @local_addr: Local ip/tcp address
- * @mapped_addr: Mapped local ip/tcp address
- *
- * Returns err code if mapping info is not found in the hash table,
- * otherwise returns 0
- */
+ struct sockaddr_storage *mapped_addr, u8 nl_client,
+ u32 map_flags);
int iwpm_remove_mapinfo(struct sockaddr_storage *local_addr,
struct sockaddr_storage *mapped_addr);
+int iwpm_hello_cb(struct sk_buff *skb, struct netlink_callback *cb);
#endif /* _IW_PORTMAP_H */
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 60987a5903b7..71f48cfdc24c 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -374,6 +374,7 @@ int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse);
*/
int rdma_set_afonly(struct rdma_cm_id *id, int afonly);
+int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout);
/**
* rdma_get_service_id - Return the IB service ID for a specified address.
* @id: Communication identifier associated with the address.
diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h
index 70218e6b5187..10732ab31ba2 100644
--- a/include/rdma/rdma_netlink.h
+++ b/include/rdma/rdma_netlink.h
@@ -99,4 +99,15 @@ int rdma_nl_multicast(struct sk_buff *skb, unsigned int group, gfp_t flags);
* Returns true on success or false if no listeners.
*/
bool rdma_nl_chk_listeners(unsigned int group);
+
+struct rdma_link_ops {
+ struct list_head list;
+ const char *type;
+ int (*newlink)(const char *ibdev_name, struct net_device *ndev);
+};
+
+void rdma_link_register(struct rdma_link_ops *ops);
+void rdma_link_unregister(struct rdma_link_ops *ops);
+
+#define MODULE_ALIAS_RDMA_LINK(type) MODULE_ALIAS("rdma-link-" type)
#endif /* _RDMA_NETLINK_H */
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index dd0ed8048bb4..4c257aff7d32 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -182,9 +182,15 @@ struct rvt_driver_params {
u32 max_mad_size;
u8 qos_shift;
u8 max_rdma_atomic;
+ u8 extra_rdma_atomic;
u8 reserved_operations;
};
+/* User context */
+struct rvt_ucontext {
+ struct ib_ucontext ibucontext;
+};
+
/* Protection domain */
struct rvt_pd {
struct ib_pd ibpd;
@@ -250,9 +256,6 @@ struct rvt_driver_provided {
*/
void (*do_send)(struct rvt_qp *qp);
- /* Passed to ib core registration. Callback to create syfs files */
- int (*port_callback)(struct ib_device *, u8, struct kobject *);
-
/*
* Returns a pointer to the undelying hardware's PCI device. This is
* used to display information as to what hardware is being referenced
@@ -522,7 +525,14 @@ static inline unsigned rvt_get_npkeys(struct rvt_dev_info *rdi)
*/
static inline unsigned int rvt_max_atomic(struct rvt_dev_info *rdi)
{
- return rdi->dparms.max_rdma_atomic + 1;
+ return rdi->dparms.max_rdma_atomic +
+ rdi->dparms.extra_rdma_atomic + 1;
+}
+
+static inline unsigned int rvt_size_atomic(struct rvt_dev_info *rdi)
+{
+ return rdi->dparms.max_rdma_atomic +
+ rdi->dparms.extra_rdma_atomic;
}
/*
@@ -569,9 +579,10 @@ static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi,
/**
* rvt_mod_retry_timer - mod a retry timer
* @qp - the QP
+ * @shift - timeout shift to wait for multiple packets
* Modify a potentially already running retry timer
*/
-static inline void rvt_mod_retry_timer(struct rvt_qp *qp)
+static inline void rvt_mod_retry_timer_ext(struct rvt_qp *qp, u8 shift)
{
struct ib_qp *ibqp = &qp->ibqp;
struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
@@ -579,8 +590,13 @@ static inline void rvt_mod_retry_timer(struct rvt_qp *qp)
lockdep_assert_held(&qp->s_lock);
qp->s_flags |= RVT_S_TIMER;
/* 4.096 usec. * (1 << qp->timeout) */
- mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies +
- rdi->busy_jiffies);
+ mod_timer(&qp->s_timer, jiffies + rdi->busy_jiffies +
+ (qp->timeout_jiffies << shift));
+}
+
+static inline void rvt_mod_retry_timer(struct rvt_qp *qp)
+{
+ return rvt_mod_retry_timer_ext(qp, 0);
}
struct rvt_dev_info *rvt_alloc_device(size_t size, int nports);
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index cbafb1878669..f0fbd4063fef 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -174,6 +174,7 @@ struct rvt_swqe {
u32 lpsn; /* last packet sequence number */
u32 ssn; /* send sequence number */
u32 length; /* total length of data in sg_list */
+ void *priv; /* driver dependent field */
struct rvt_sge sg_list[0];
};
@@ -235,6 +236,7 @@ struct rvt_ack_entry {
u32 lpsn;
u8 opcode;
u8 sent;
+ void *priv;
};
#define RC_QP_SCALING_INTERVAL 5
@@ -244,6 +246,7 @@ struct rvt_ack_entry {
#define RVT_OPERATION_ATOMIC_SGE 0x00000004
#define RVT_OPERATION_LOCAL 0x00000008
#define RVT_OPERATION_USE_RESERVE 0x00000010
+#define RVT_OPERATION_IGN_RNR_CNT 0x00000020
#define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1)
@@ -373,6 +376,7 @@ struct rvt_qp {
u8 s_rnr_retry; /* requester RNR retry counter */
u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
u8 s_tail_ack_queue; /* index into s_ack_queue[] */
+ u8 s_acked_ack_queue; /* index into s_ack_queue[] */
struct rvt_sge_state s_ack_rdma_sge;
struct timer_list s_timer;
@@ -629,6 +633,16 @@ __be32 rvt_compute_aeth(struct rvt_qp *qp);
void rvt_get_credit(struct rvt_qp *qp, u32 aeth);
/**
+ * rvt_restart_sge - rewind the sge state for a wqe
+ * @ss: the sge state pointer
+ * @wqe: the wqe to rewind
+ * @len: the data length from the start of the wqe in bytes
+ *
+ * Returns the remaining data length.
+ */
+u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len);
+
+/**
* @qp - the qp pair
* @len - the length
*
@@ -676,7 +690,11 @@ enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t);
void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth);
void rvt_del_timers_sync(struct rvt_qp *qp);
void rvt_stop_rc_timers(struct rvt_qp *qp);
-void rvt_add_retry_timer(struct rvt_qp *qp);
+void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift);
+static inline void rvt_add_retry_timer(struct rvt_qp *qp)
+{
+ rvt_add_retry_timer_ext(qp, 0);
+}
void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
void *data, u32 length,
diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h
index 8f179be9d9a9..ecf3c7702a4f 100644
--- a/include/rdma/restrack.h
+++ b/include/rdma/restrack.h
@@ -7,12 +7,12 @@
#define _RDMA_RESTRACK_H_
#include <linux/typecheck.h>
-#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/kref.h>
#include <linux/completion.h>
#include <linux/sched/task.h>
#include <uapi/rdma/rdma_netlink.h>
+#include <linux/xarray.h>
/**
* enum rdma_restrack_type - HW objects to track
@@ -48,30 +48,7 @@ enum rdma_restrack_type {
RDMA_RESTRACK_MAX
};
-#define RDMA_RESTRACK_HASH_BITS 8
-struct rdma_restrack_entry;
-
-/**
- * struct rdma_restrack_root - main resource tracking management
- * entity, per-device
- */
-struct rdma_restrack_root {
- /*
- * @rwsem: Read/write lock to protect lists
- */
- struct rw_semaphore rwsem;
- /**
- * @hash: global database for all resources per-device
- */
- DECLARE_HASHTABLE(hash, RDMA_RESTRACK_HASH_BITS);
- /**
- * @fill_res_entry: driver-specific fill function
- *
- * Allows rdma drivers to add their own restrack attributes.
- */
- int (*fill_res_entry)(struct sk_buff *msg,
- struct rdma_restrack_entry *entry);
-};
+struct ib_device;
/**
* struct rdma_restrack_entry - metadata per-entry
@@ -109,10 +86,6 @@ struct rdma_restrack_entry {
*/
const char *kern_name;
/**
- * @node: hash table entry
- */
- struct hlist_node node;
- /**
* @type: various objects in restrack database
*/
enum rdma_restrack_type type;
@@ -120,27 +93,13 @@ struct rdma_restrack_entry {
* @user: user resource
*/
bool user;
+ /**
+ * @id: ID to expose to users
+ */
+ u32 id;
};
-/**
- * rdma_restrack_init() - initialize resource tracking
- * @res: resource tracking root
- */
-void rdma_restrack_init(struct rdma_restrack_root *res);
-
-/**
- * rdma_restrack_clean() - clean resource tracking
- * @res: resource tracking root
- */
-void rdma_restrack_clean(struct rdma_restrack_root *res);
-
-/**
- * rdma_restrack_count() - the current usage of specific object
- * @res: resource entry
- * @type: actual type of object to operate
- * @ns: PID namespace
- */
-int rdma_restrack_count(struct rdma_restrack_root *res,
+int rdma_restrack_count(struct ib_device *dev,
enum rdma_restrack_type type,
struct pid_namespace *ns);
@@ -193,4 +152,7 @@ int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name,
int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value);
int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name,
u64 value);
+struct rdma_restrack_entry *rdma_restrack_get_byid(struct ib_device *dev,
+ enum rdma_restrack_type type,
+ u32 id);
#endif /* _RDMA_RESTRACK_H_ */
diff --git a/include/rdma/tid_rdma_defs.h b/include/rdma/tid_rdma_defs.h
new file mode 100644
index 000000000000..08fe47c7ad2c
--- /dev/null
+++ b/include/rdma/tid_rdma_defs.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright(c) 2018 Intel Corporation.
+ *
+ */
+
+#ifndef TID_RDMA_DEFS_H
+#define TID_RDMA_DEFS_H
+
+#include <rdma/ib_pack.h>
+
+struct tid_rdma_read_req {
+ __le32 kdeth0;
+ __le32 kdeth1;
+ struct ib_reth reth;
+ __be32 tid_flow_psn;
+ __be32 tid_flow_qp;
+ __be32 verbs_qp;
+};
+
+struct tid_rdma_read_resp {
+ __le32 kdeth0;
+ __le32 kdeth1;
+ __be32 aeth;
+ __be32 reserved[4];
+ __be32 verbs_psn;
+ __be32 verbs_qp;
+};
+
+struct tid_rdma_write_req {
+ __le32 kdeth0;
+ __le32 kdeth1;
+ struct ib_reth reth;
+ __be32 reserved[2];
+ __be32 verbs_qp;
+};
+
+struct tid_rdma_write_resp {
+ __le32 kdeth0;
+ __le32 kdeth1;
+ __be32 aeth;
+ __be32 reserved[3];
+ __be32 tid_flow_psn;
+ __be32 tid_flow_qp;
+ __be32 verbs_qp;
+};
+
+struct tid_rdma_write_data {
+ __le32 kdeth0;
+ __le32 kdeth1;
+ __be32 reserved[6];
+ __be32 verbs_qp;
+};
+
+struct tid_rdma_resync {
+ __le32 kdeth0;
+ __le32 kdeth1;
+ __be32 reserved[6];
+ __be32 verbs_qp;
+};
+
+struct tid_rdma_ack {
+ __le32 kdeth0;
+ __le32 kdeth1;
+ __be32 aeth;
+ __be32 reserved[2];
+ __be32 tid_flow_psn;
+ __be32 verbs_psn;
+ __be32 tid_flow_qp;
+ __be32 verbs_qp;
+};
+
+/*
+ * TID RDMA Opcodes
+ */
+#define IB_OPCODE_TID_RDMA 0xe0
+enum {
+ IB_OPCODE_WRITE_REQ = 0x0,
+ IB_OPCODE_WRITE_RESP = 0x1,
+ IB_OPCODE_WRITE_DATA = 0x2,
+ IB_OPCODE_WRITE_DATA_LAST = 0x3,
+ IB_OPCODE_READ_REQ = 0x4,
+ IB_OPCODE_READ_RESP = 0x5,
+ IB_OPCODE_RESYNC = 0x6,
+ IB_OPCODE_ACK = 0x7,
+
+ IB_OPCODE(TID_RDMA, WRITE_REQ),
+ IB_OPCODE(TID_RDMA, WRITE_RESP),
+ IB_OPCODE(TID_RDMA, WRITE_DATA),
+ IB_OPCODE(TID_RDMA, WRITE_DATA_LAST),
+ IB_OPCODE(TID_RDMA, READ_REQ),
+ IB_OPCODE(TID_RDMA, READ_RESP),
+ IB_OPCODE(TID_RDMA, RESYNC),
+ IB_OPCODE(TID_RDMA, ACK),
+};
+
+#define TID_OP(x) IB_OPCODE_TID_RDMA_##x
+
+/*
+ * Define TID RDMA specific WR opcodes. The ib_wr_opcode
+ * enum already provides some reserved values for use by
+ * low level drivers. Two of those are used but renamed
+ * to be more descriptive.
+ */
+#define IB_WR_TID_RDMA_WRITE IB_WR_RESERVED1
+#define IB_WR_TID_RDMA_READ IB_WR_RESERVED2
+
+#endif /* TID_RDMA_DEFS_H */
diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h
index 27da906beea7..28570ac2b6a0 100644
--- a/include/rdma/uverbs_ioctl.h
+++ b/include/rdma/uverbs_ioctl.h
@@ -652,6 +652,7 @@ struct uverbs_attr_bundle {
struct ib_udata driver_udata;
struct ib_udata ucore;
struct ib_uverbs_file *ufile;
+ struct ib_ucontext *context;
DECLARE_BITMAP(attr_present, UVERBS_API_ATTR_BKEY_LEN);
struct uverbs_attr attrs[];
};
@@ -663,6 +664,23 @@ static inline bool uverbs_attr_is_valid(const struct uverbs_attr_bundle *attrs_b
attrs_bundle->attr_present);
}
+/**
+ * rdma_udata_to_drv_context - Helper macro to get the driver's context out of
+ * ib_udata which is embedded in uverbs_attr_bundle.
+ *
+ * If udata is not NULL this cannot fail. Otherwise a NULL udata will result
+ * in a NULL ucontext pointer, as a safety precaution. Callers should be using
+ * 'udata' to determine if the driver call is in user or kernel mode, not
+ * 'ucontext'.
+ *
+ */
+#define rdma_udata_to_drv_context(udata, drv_dev_struct, member) \
+ (udata ? container_of(container_of(udata, struct uverbs_attr_bundle, \
+ driver_udata) \
+ ->context, \
+ drv_dev_struct, member) : \
+ (drv_dev_struct *)NULL)
+
#define IS_UVERBS_COPY_ERR(_ret) ((_ret) && (_ret) != -ENOENT)
static inline const struct uverbs_attr *uverbs_attr_get(const struct uverbs_attr_bundle *attrs_bundle,
diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h
index 883abcf6d36e..794c47565971 100644
--- a/include/rdma/uverbs_std_types.h
+++ b/include/rdma/uverbs_std_types.h
@@ -48,9 +48,12 @@
#define uobj_get_type(_attrs, _object) \
uapi_get_object((_attrs)->ufile->device->uapi, _object)
+struct ib_uobject *_uobj_get_read(enum uverbs_default_objects type,
+ u32 object_id,
+ struct uverbs_attr_bundle *attrs);
+
#define uobj_get_read(_type, _id, _attrs) \
- rdma_lookup_get_uobject(uobj_get_type(_attrs, _type), (_attrs)->ufile, \
- _uobj_check_id(_id), UVERBS_LOOKUP_READ)
+ _uobj_get_read(_type, _uobj_check_id(_id), _attrs)
#define ufd_get_read(_type, _fdnum, _attrs) \
rdma_lookup_get_uobject(uobj_get_type(_attrs, _type), (_attrs)->ufile, \
@@ -67,9 +70,12 @@ static inline void *_uobj_get_obj_read(struct ib_uobject *uobj)
((struct ib_##_object *)_uobj_get_obj_read( \
uobj_get_read(_type, _id, _attrs)))
+struct ib_uobject *_uobj_get_write(enum uverbs_default_objects type,
+ u32 object_id,
+ struct uverbs_attr_bundle *attrs);
+
#define uobj_get_write(_type, _id, _attrs) \
- rdma_lookup_get_uobject(uobj_get_type(_attrs, _type), (_attrs)->ufile, \
- _uobj_check_id(_id), UVERBS_LOOKUP_WRITE)
+ _uobj_get_write(_type, _uobj_check_id(_id), _attrs)
int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id,
const struct uverbs_attr_bundle *attrs);
@@ -123,8 +129,10 @@ __uobj_alloc(const struct uverbs_api_object *obj,
{
struct ib_uobject *uobj = rdma_alloc_begin_uobject(obj, attrs->ufile);
- if (!IS_ERR(uobj))
+ if (!IS_ERR(uobj)) {
*ib_dev = uobj->context->device;
+ attrs->context = uobj->context;
+ }
return uobj;
}
diff --git a/include/rdma/uverbs_types.h b/include/rdma/uverbs_types.h
index acb1bfa3cc99..175d761695e1 100644
--- a/include/rdma/uverbs_types.h
+++ b/include/rdma/uverbs_types.h
@@ -157,6 +157,7 @@ struct uverbs_obj_fd_type {
extern const struct uverbs_obj_type_class uverbs_idr_class;
extern const struct uverbs_obj_type_class uverbs_fd_class;
+void uverbs_close_fd(struct file *f);
#define UVERBS_BUILD_BUG_ON(cond) (sizeof(char[1 - 2 * !!(cond)]) - \
sizeof(char))
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index cb8a273732cf..bb8092fa1e36 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -79,7 +79,7 @@ enum fip_state {
* It must not change after fcoe_ctlr_init() sets it.
*/
enum fip_mode {
- FIP_MODE_AUTO = FIP_ST_AUTO,
+ FIP_MODE_AUTO,
FIP_MODE_NON_FIP,
FIP_MODE_FABRIC,
FIP_MODE_VN2VN,
@@ -250,7 +250,7 @@ struct fcoe_rport {
};
/* FIP API functions */
-void fcoe_ctlr_init(struct fcoe_ctlr *, enum fip_state);
+void fcoe_ctlr_init(struct fcoe_ctlr *, enum fip_mode);
void fcoe_ctlr_destroy(struct fcoe_ctlr *);
void fcoe_ctlr_link_up(struct fcoe_ctlr *);
int fcoe_ctlr_link_down(struct fcoe_ctlr *);
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 3de3b10da19a..56b2dba7d911 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -52,8 +52,8 @@ enum sas_phy_role {
};
enum sas_phy_type {
- PHY_TYPE_PHYSICAL,
- PHY_TYPE_VIRTUAL
+ PHY_TYPE_PHYSICAL,
+ PHY_TYPE_VIRTUAL
};
/* The events are mnemonically described in sas_dump.c
@@ -91,7 +91,7 @@ enum discover_event {
#define to_dom_device(_obj) container_of(_obj, struct domain_device, dev_obj)
#define to_dev_attr(_attr) container_of(_attr, struct domain_dev_attribute,\
- attr)
+ attr)
enum routing_attribute {
DIRECT_ROUTING,
@@ -184,37 +184,37 @@ struct domain_device {
spinlock_t done_lock;
enum sas_device_type dev_type;
- enum sas_linkrate linkrate;
- enum sas_linkrate min_linkrate;
- enum sas_linkrate max_linkrate;
+ enum sas_linkrate linkrate;
+ enum sas_linkrate min_linkrate;
+ enum sas_linkrate max_linkrate;
- int pathways;
+ int pathways;
- struct domain_device *parent;
- struct list_head siblings; /* devices on the same level */
- struct asd_sas_port *port; /* shortcut to root of the tree */
+ struct domain_device *parent;
+ struct list_head siblings; /* devices on the same level */
+ struct asd_sas_port *port; /* shortcut to root of the tree */
struct sas_phy *phy;
- struct list_head dev_list_node;
+ struct list_head dev_list_node;
struct list_head disco_list_node; /* awaiting probe or destruct */
- enum sas_protocol iproto;
- enum sas_protocol tproto;
+ enum sas_protocol iproto;
+ enum sas_protocol tproto;
- struct sas_rphy *rphy;
+ struct sas_rphy *rphy;
- u8 sas_addr[SAS_ADDR_SIZE];
- u8 hashed_sas_addr[HASHED_SAS_ADDR_SIZE];
+ u8 sas_addr[SAS_ADDR_SIZE];
+ u8 hashed_sas_addr[HASHED_SAS_ADDR_SIZE];
- u8 frame_rcvd[32];
+ u8 frame_rcvd[32];
- union {
- struct expander_device ex_dev;
- struct sata_device sata_dev; /* STP & directly attached */
+ union {
+ struct expander_device ex_dev;
+ struct sata_device sata_dev; /* STP & directly attached */
struct ssp_device ssp_dev;
- };
+ };
- void *lldd_dev;
+ void *lldd_dev;
unsigned long state;
struct kref kref;
};
@@ -512,10 +512,10 @@ enum exec_status {
/* When a task finishes with a response, the LLDD examines the
* response:
- * - For an ATA task task_status_struct::stat is set to
+ * - For an ATA task task_status_struct::stat is set to
* SAS_PROTO_RESPONSE, and the task_status_struct::buf is set to the
* contents of struct ata_task_resp.
- * - For SSP tasks, if no data is present or status/TMF response
+ * - For SSP tasks, if no data is present or status/TMF response
* is valid, task_status_struct::stat is set. If data is present
* (SENSE data), the LLDD copies up to SAS_STATUS_BUF_SIZE, sets
* task_status_struct::buf_valid_size, and task_status_struct::stat is
@@ -671,15 +671,13 @@ extern void sas_prep_resume_ha(struct sas_ha_struct *sas_ha);
extern void sas_resume_ha(struct sas_ha_struct *sas_ha);
extern void sas_suspend_ha(struct sas_ha_struct *sas_ha);
-int sas_set_phy_speed(struct sas_phy *phy,
- struct sas_phy_linkrates *rates);
+int sas_set_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates);
int sas_phy_reset(struct sas_phy *phy, int hard_reset);
-extern int sas_queuecommand(struct Scsi_Host * ,struct scsi_cmnd *);
+extern int sas_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
extern int sas_target_alloc(struct scsi_target *);
extern int sas_slave_configure(struct scsi_device *);
extern int sas_change_queue_depth(struct scsi_device *, int new_depth);
-extern int sas_bios_param(struct scsi_device *,
- struct block_device *,
+extern int sas_bios_param(struct scsi_device *, struct block_device *,
sector_t capacity, int *hsc);
extern struct scsi_transport_template *
sas_domain_attach_transport(struct sas_domain_function_template *);
@@ -709,7 +707,8 @@ int sas_eh_target_reset_handler(struct scsi_cmnd *cmd);
extern void sas_target_destroy(struct scsi_target *);
extern int sas_slave_alloc(struct scsi_device *);
-extern int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg);
+extern int sas_ioctl(struct scsi_device *sdev, unsigned int cmd,
+ void __user *arg);
extern int sas_drain_work(struct sas_ha_struct *ha);
extern void sas_ssp_task_response(struct device *dev, struct sas_task *task,
diff --git a/include/scsi/osd_initiator.h b/include/scsi/osd_initiator.h
deleted file mode 100644
index 86a569d008b2..000000000000
--- a/include/scsi/osd_initiator.h
+++ /dev/null
@@ -1,511 +0,0 @@
-/*
- * osd_initiator.h - OSD initiator API definition
- *
- * Copyright (C) 2008 Panasas Inc. All rights reserved.
- *
- * Authors:
- * Boaz Harrosh <ooo@electrozaur.com>
- * Benny Halevy <bhalevy@panasas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- *
- */
-#ifndef __OSD_INITIATOR_H__
-#define __OSD_INITIATOR_H__
-
-#include <scsi/osd_protocol.h>
-#include <scsi/osd_types.h>
-
-#include <linux/blkdev.h>
-#include <scsi/scsi_device.h>
-
-/* Note: "NI" in comments below means "Not Implemented yet" */
-
-/* Configure of code:
- * #undef if you *don't* want OSD v1 support in runtime.
- * If #defined the initiator will dynamically configure to encode OSD v1
- * CDB's if the target is detected to be OSD v1 only.
- * OSD v2 only commands, options, and attributes will be ignored if target
- * is v1 only.
- * If #defined will result in bigger/slower code (OK Slower maybe not)
- * Q: Should this be CONFIG_SCSI_OSD_VER1_SUPPORT and set from Kconfig?
- */
-#define OSD_VER1_SUPPORT y
-
-enum osd_std_version {
- OSD_VER_NONE = 0,
- OSD_VER1 = 1,
- OSD_VER2 = 2,
-};
-
-/*
- * Object-based Storage Device.
- * This object represents an OSD device.
- * It is not a full linux device in any way. It is only
- * a place to hang resources associated with a Linux
- * request Q and some default properties.
- */
-struct osd_dev {
- struct scsi_device *scsi_device;
- unsigned def_timeout;
-
-#ifdef OSD_VER1_SUPPORT
- enum osd_std_version version;
-#endif
-};
-
-/* Unique Identification of an OSD device */
-struct osd_dev_info {
- unsigned systemid_len;
- u8 systemid[OSD_SYSTEMID_LEN];
- unsigned osdname_len;
- u8 *osdname;
-};
-
-/* Retrieve/return osd_dev(s) for use by Kernel clients
- * Use IS_ERR/ERR_PTR on returned "osd_dev *".
- */
-struct osd_dev *osduld_path_lookup(const char *dev_name);
-struct osd_dev *osduld_info_lookup(const struct osd_dev_info *odi);
-void osduld_put_device(struct osd_dev *od);
-
-const struct osd_dev_info *osduld_device_info(struct osd_dev *od);
-bool osduld_device_same(struct osd_dev *od, const struct osd_dev_info *odi);
-
-/* Add/remove test ioctls from external modules */
-typedef int (do_test_fn)(struct osd_dev *od, unsigned cmd, unsigned long arg);
-int osduld_register_test(unsigned ioctl, do_test_fn *do_test);
-void osduld_unregister_test(unsigned ioctl);
-
-/* These are called by uld at probe time */
-void osd_dev_init(struct osd_dev *od, struct scsi_device *scsi_device);
-void osd_dev_fini(struct osd_dev *od);
-
-/**
- * osd_auto_detect_ver - Detect the OSD version, return Unique Identification
- *
- * @od: OSD target lun handle
- * @caps: Capabilities authorizing OSD root read attributes access
- * @odi: Retrieved information uniquely identifying the osd target lun
- * Note: odi->osdname must be kfreed by caller.
- *
- * Auto detects the OSD version of the OSD target and sets the @od
- * accordingly. Meanwhile also returns the "system id" and "osd name" root
- * attributes which uniquely identify the OSD target. This member is usually
- * called by the ULD. ULD users should call osduld_device_info().
- * This rutine allocates osd requests and memory at GFP_KERNEL level and might
- * sleep.
- */
-int osd_auto_detect_ver(struct osd_dev *od,
- void *caps, struct osd_dev_info *odi);
-
-static inline struct request_queue *osd_request_queue(struct osd_dev *od)
-{
- return od->scsi_device->request_queue;
-}
-
-/* we might want to use function vector in the future */
-static inline void osd_dev_set_ver(struct osd_dev *od, enum osd_std_version v)
-{
-#ifdef OSD_VER1_SUPPORT
- od->version = v;
-#endif
-}
-
-static inline bool osd_dev_is_ver1(struct osd_dev *od)
-{
-#ifdef OSD_VER1_SUPPORT
- return od->version == OSD_VER1;
-#else
- return false;
-#endif
-}
-
-struct osd_request;
-typedef void (osd_req_done_fn)(struct osd_request *or, void *private);
-
-struct osd_request {
- struct osd_cdb cdb;
- struct osd_data_out_integrity_info out_data_integ;
- struct osd_data_in_integrity_info in_data_integ;
-
- struct osd_dev *osd_dev;
- struct request *request;
-
- struct _osd_req_data_segment {
- void *buff;
- unsigned alloc_size; /* 0 here means: don't call kfree */
- unsigned total_bytes;
- } cdb_cont, set_attr, enc_get_attr, get_attr;
-
- struct _osd_io_info {
- struct bio *bio;
- u64 total_bytes;
- u64 residual;
- struct request *req;
- struct _osd_req_data_segment *last_seg;
- u8 *pad_buff;
- } out, in;
-
- unsigned timeout;
- unsigned retries;
- unsigned sense_len;
- u8 sense[OSD_MAX_SENSE_LEN];
- enum osd_attributes_mode attributes_mode;
-
- osd_req_done_fn *async_done;
- void *async_private;
- blk_status_t async_error;
- int req_errors;
-};
-
-static inline bool osd_req_is_ver1(struct osd_request *or)
-{
- return osd_dev_is_ver1(or->osd_dev);
-}
-
-/*
- * How to use the osd library:
- *
- * osd_start_request
- * Allocates a request.
- *
- * osd_req_*
- * Call one of, to encode the desired operation.
- *
- * osd_add_{get,set}_attr
- * Optionally add attributes to the CDB, list or page mode.
- *
- * osd_finalize_request
- * Computes final data out/in offsets and signs the request,
- * making it ready for execution.
- *
- * osd_execute_request
- * May be called to execute it through the block layer. Other wise submit
- * the associated block request in some other way.
- *
- * After execution:
- * osd_req_decode_sense
- * Decodes sense information to verify execution results.
- *
- * osd_req_decode_get_attr
- * Retrieve osd_add_get_attr_list() values if used.
- *
- * osd_end_request
- * Must be called to deallocate the request.
- */
-
-/**
- * osd_start_request - Allocate and initialize an osd_request
- *
- * @osd_dev: OSD device that holds the scsi-device and default values
- * that the request is associated with.
- *
- * Allocate osd_request and initialize all members to the
- * default/initial state.
- */
-struct osd_request *osd_start_request(struct osd_dev *od);
-
-enum osd_req_options {
- OSD_REQ_FUA = 0x08, /* Force Unit Access */
- OSD_REQ_DPO = 0x10, /* Disable Page Out */
-
- OSD_REQ_BYPASS_TIMESTAMPS = 0x80,
-};
-
-/**
- * osd_finalize_request - Sign request and prepare request for execution
- *
- * @or: osd_request to prepare
- * @options: combination of osd_req_options bit flags or 0.
- * @cap: A Pointer to an OSD_CAP_LEN bytes buffer that is received from
- * The security manager as capabilities for this cdb.
- * @cap_key: The cryptographic key used to sign the cdb/data. Can be null
- * if NOSEC is used.
- *
- * The actual request and bios are only allocated here, so are the get_attr
- * buffers that will receive the returned attributes. Copy's @cap to cdb.
- * Sign the cdb/data with @cap_key.
- */
-int osd_finalize_request(struct osd_request *or,
- u8 options, const void *cap, const u8 *cap_key);
-
-/**
- * osd_execute_request - Execute the request synchronously through block-layer
- *
- * @or: osd_request to Executed
- *
- * Calls blk_execute_rq to q the command and waits for completion.
- */
-int osd_execute_request(struct osd_request *or);
-
-/**
- * osd_execute_request_async - Execute the request without waitting.
- *
- * @or: - osd_request to Executed
- * @done: (Optional) - Called at end of execution
- * @private: - Will be passed to @done function
- *
- * Calls blk_execute_rq_nowait to queue the command. When execution is done
- * optionally calls @done with @private as parameter. @or->async_error will
- * have the return code
- */
-int osd_execute_request_async(struct osd_request *or,
- osd_req_done_fn *done, void *private);
-
-/**
- * osd_req_decode_sense_full - Decode sense information after execution.
- *
- * @or: - osd_request to examine
- * @osi - Receives a more detailed error report information (optional).
- * @silent - Do not print to dmsg (Even if enabled)
- * @bad_obj_list - Some commands act on multiple objects. Failed objects will
- * be received here (optional)
- * @max_obj - Size of @bad_obj_list.
- * @bad_attr_list - List of failing attributes (optional)
- * @max_attr - Size of @bad_attr_list.
- *
- * After execution, osd_request results are analyzed using this function. The
- * return code is the final disposition on the error. So it is possible that a
- * CHECK_CONDITION was returned from target but this will return NO_ERROR, for
- * example on recovered errors. All parameters are optional if caller does
- * not need any returned information.
- * Note: This function will also dump the error to dmsg according to settings
- * of the SCSI_OSD_DPRINT_SENSE Kconfig value. Set @silent if you know the
- * command would routinely fail, to not spam the dmsg file.
- */
-
-/**
- * osd_err_priority - osd categorized return codes in ascending severity.
- *
- * The categories are borrowed from the pnfs_osd_errno enum.
- * See comments for translated Linux codes returned by osd_req_decode_sense.
- */
-enum osd_err_priority {
- OSD_ERR_PRI_NO_ERROR = 0,
- /* Recoverable, caller should clear_highpage() all pages */
- OSD_ERR_PRI_CLEAR_PAGES = 1, /* -EFAULT */
- OSD_ERR_PRI_RESOURCE = 2, /* -ENOMEM */
- OSD_ERR_PRI_BAD_CRED = 3, /* -EINVAL */
- OSD_ERR_PRI_NO_ACCESS = 4, /* -EACCES */
- OSD_ERR_PRI_UNREACHABLE = 5, /* any other */
- OSD_ERR_PRI_NOT_FOUND = 6, /* -ENOENT */
- OSD_ERR_PRI_NO_SPACE = 7, /* -ENOSPC */
- OSD_ERR_PRI_EIO = 8, /* -EIO */
-};
-
-struct osd_sense_info {
- enum osd_err_priority osd_err_pri;
-
- int key; /* one of enum scsi_sense_keys */
- int additional_code ; /* enum osd_additional_sense_codes */
- union { /* Sense specific information */
- u16 sense_info;
- u16 cdb_field_offset; /* scsi_invalid_field_in_cdb */
- };
- union { /* Command specific information */
- u64 command_info;
- };
-
- u32 not_initiated_command_functions; /* osd_command_functions_bits */
- u32 completed_command_functions; /* osd_command_functions_bits */
- struct osd_obj_id obj;
- struct osd_attr attr;
-};
-
-int osd_req_decode_sense_full(struct osd_request *or,
- struct osd_sense_info *osi, bool silent,
- struct osd_obj_id *bad_obj_list, int max_obj,
- struct osd_attr *bad_attr_list, int max_attr);
-
-static inline int osd_req_decode_sense(struct osd_request *or,
- struct osd_sense_info *osi)
-{
- return osd_req_decode_sense_full(or, osi, false, NULL, 0, NULL, 0);
-}
-
-/**
- * osd_end_request - return osd_request to free store
- *
- * @or: osd_request to free
- *
- * Deallocate all osd_request resources (struct req's, BIOs, buffers, etc.)
- */
-void osd_end_request(struct osd_request *or);
-
-/*
- * CDB Encoding
- *
- * Note: call only one of the following methods.
- */
-
-/*
- * Device commands
- */
-void osd_req_set_master_seed_xchg(struct osd_request *or, ...);/* NI */
-void osd_req_set_master_key(struct osd_request *or, ...);/* NI */
-
-void osd_req_format(struct osd_request *or, u64 tot_capacity);
-
-/* list all partitions
- * @list header must be initialized to zero on first run.
- *
- * Call osd_is_obj_list_done() to find if we got the complete list.
- */
-int osd_req_list_dev_partitions(struct osd_request *or,
- osd_id initial_id, struct osd_obj_id_list *list, unsigned nelem);
-
-void osd_req_flush_obsd(struct osd_request *or,
- enum osd_options_flush_scope_values);
-
-void osd_req_perform_scsi_command(struct osd_request *or,
- const u8 *cdb, ...);/* NI */
-void osd_req_task_management(struct osd_request *or, ...);/* NI */
-
-/*
- * Partition commands
- */
-void osd_req_create_partition(struct osd_request *or, osd_id partition);
-void osd_req_remove_partition(struct osd_request *or, osd_id partition);
-
-void osd_req_set_partition_key(struct osd_request *or,
- osd_id partition, u8 new_key_id[OSD_CRYPTO_KEYID_SIZE],
- u8 seed[OSD_CRYPTO_SEED_SIZE]);/* NI */
-
-/* list all collections in the partition
- * @list header must be init to zero on first run.
- *
- * Call osd_is_obj_list_done() to find if we got the complete list.
- */
-int osd_req_list_partition_collections(struct osd_request *or,
- osd_id partition, osd_id initial_id, struct osd_obj_id_list *list,
- unsigned nelem);
-
-/* list all objects in the partition
- * @list header must be init to zero on first run.
- *
- * Call osd_is_obj_list_done() to find if we got the complete list.
- */
-int osd_req_list_partition_objects(struct osd_request *or,
- osd_id partition, osd_id initial_id, struct osd_obj_id_list *list,
- unsigned nelem);
-
-void osd_req_flush_partition(struct osd_request *or,
- osd_id partition, enum osd_options_flush_scope_values);
-
-/*
- * Collection commands
- */
-void osd_req_create_collection(struct osd_request *or,
- const struct osd_obj_id *);/* NI */
-void osd_req_remove_collection(struct osd_request *or,
- const struct osd_obj_id *);/* NI */
-
-/* list all objects in the collection */
-int osd_req_list_collection_objects(struct osd_request *or,
- const struct osd_obj_id *, osd_id initial_id,
- struct osd_obj_id_list *list, unsigned nelem);
-
-/* V2 only filtered list of objects in the collection */
-void osd_req_query(struct osd_request *or, ...);/* NI */
-
-void osd_req_flush_collection(struct osd_request *or,
- const struct osd_obj_id *, enum osd_options_flush_scope_values);
-
-void osd_req_get_member_attrs(struct osd_request *or, ...);/* V2-only NI */
-void osd_req_set_member_attrs(struct osd_request *or, ...);/* V2-only NI */
-
-/*
- * Object commands
- */
-void osd_req_create_object(struct osd_request *or, struct osd_obj_id *);
-void osd_req_remove_object(struct osd_request *or, struct osd_obj_id *);
-
-void osd_req_write(struct osd_request *or,
- const struct osd_obj_id *obj, u64 offset, struct bio *bio, u64 len);
-int osd_req_write_kern(struct osd_request *or,
- const struct osd_obj_id *obj, u64 offset, void *buff, u64 len);
-void osd_req_append(struct osd_request *or,
- const struct osd_obj_id *, struct bio *data_out);/* NI */
-void osd_req_create_write(struct osd_request *or,
- const struct osd_obj_id *, struct bio *data_out, u64 offset);/* NI */
-void osd_req_clear(struct osd_request *or,
- const struct osd_obj_id *, u64 offset, u64 len);/* NI */
-void osd_req_punch(struct osd_request *or,
- const struct osd_obj_id *, u64 offset, u64 len);/* V2-only NI */
-
-void osd_req_flush_object(struct osd_request *or,
- const struct osd_obj_id *, enum osd_options_flush_scope_values,
- /*V2*/ u64 offset, /*V2*/ u64 len);
-
-void osd_req_read(struct osd_request *or,
- const struct osd_obj_id *obj, u64 offset, struct bio *bio, u64 len);
-int osd_req_read_kern(struct osd_request *or,
- const struct osd_obj_id *obj, u64 offset, void *buff, u64 len);
-
-/* Scatter/Gather write/read commands */
-int osd_req_write_sg(struct osd_request *or,
- const struct osd_obj_id *obj, struct bio *bio,
- const struct osd_sg_entry *sglist, unsigned numentries);
-int osd_req_read_sg(struct osd_request *or,
- const struct osd_obj_id *obj, struct bio *bio,
- const struct osd_sg_entry *sglist, unsigned numentries);
-int osd_req_write_sg_kern(struct osd_request *or,
- const struct osd_obj_id *obj, void **buff,
- const struct osd_sg_entry *sglist, unsigned numentries);
-int osd_req_read_sg_kern(struct osd_request *or,
- const struct osd_obj_id *obj, void **buff,
- const struct osd_sg_entry *sglist, unsigned numentries);
-
-/*
- * Root/Partition/Collection/Object Attributes commands
- */
-
-/* get before set */
-void osd_req_get_attributes(struct osd_request *or, const struct osd_obj_id *);
-
-/* set before get */
-void osd_req_set_attributes(struct osd_request *or, const struct osd_obj_id *);
-
-/*
- * Attributes appended to most commands
- */
-
-/* Attributes List mode (or V2 CDB) */
- /*
- * TODO: In ver2 if at finalize time only one attr was set and no gets,
- * then the Attributes CDB mode is used automatically to save IO.
- */
-
-/* set a list of attributes. */
-int osd_req_add_set_attr_list(struct osd_request *or,
- const struct osd_attr *, unsigned nelem);
-
-/* get a list of attributes */
-int osd_req_add_get_attr_list(struct osd_request *or,
- const struct osd_attr *, unsigned nelem);
-
-/*
- * Attributes list decoding
- * Must be called after osd_request.request was executed
- * It is called in a loop to decode the returned get_attr
- * (see osd_add_get_attr)
- */
-int osd_req_decode_get_attr_list(struct osd_request *or,
- struct osd_attr *, int *nelem, void **iterator);
-
-/* Attributes Page mode */
-
-/*
- * Read an attribute page and optionally set one attribute
- *
- * Retrieves the attribute page directly to a user buffer.
- * @attr_page_data shall stay valid until end of execution.
- * See osd_attributes.h for common page structures
- */
-int osd_req_add_get_attr_page(struct osd_request *or,
- u32 page_id, void *attr_page_data, unsigned max_page_len,
- const struct osd_attr *set_one);
-
-#endif /* __OSD_LIB_H__ */
diff --git a/include/scsi/osd_ore.h b/include/scsi/osd_ore.h
deleted file mode 100644
index 7a8d2cd30328..000000000000
--- a/include/scsi/osd_ore.h
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * Copyright (C) 2011
- * Boaz Harrosh <ooo@electrozaur.com>
- *
- * Public Declarations of the ORE API
- *
- * This file is part of the ORE (Object Raid Engine) library.
- *
- * ORE is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation. (GPL v2)
- *
- * ORE is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with the ORE; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __ORE_H__
-#define __ORE_H__
-
-#include <scsi/osd_initiator.h>
-#include <scsi/osd_attributes.h>
-#include <scsi/osd_sec.h>
-#include <linux/pnfs_osd_xdr.h>
-#include <linux/bug.h>
-
-struct ore_comp {
- struct osd_obj_id obj;
- u8 cred[OSD_CAP_LEN];
-};
-
-struct ore_layout {
- /* Our way of looking at the data_map */
- enum pnfs_osd_raid_algorithm4
- raid_algorithm;
- unsigned stripe_unit;
- unsigned mirrors_p1;
-
- unsigned group_width;
- unsigned parity;
- u64 group_depth;
- unsigned group_count;
-
- /* Cached often needed calculations filled in by
- * ore_verify_layout
- */
- unsigned long max_io_length; /* Max length that should be passed to
- * ore_get_rw_state
- */
-};
-
-struct ore_dev {
- struct osd_dev *od;
-};
-
-struct ore_components {
- unsigned first_dev; /* First logical device no */
- unsigned numdevs; /* Num of devices in array */
- /* If @single_comp == EC_SINGLE_COMP, @comps points to a single
- * component. else there are @numdevs components
- */
- enum EC_COMP_USAGE {
- EC_SINGLE_COMP = 0, EC_MULTPLE_COMPS = 0xffffffff
- } single_comp;
- struct ore_comp *comps;
-
- /* Array of pointers to ore_dev-* . User will usually have these pointed
- * too a bigger struct which contain an "ore_dev ored" member and use
- * container_of(oc->ods[i], struct foo_dev, ored) to access the bigger
- * structure.
- */
- struct ore_dev **ods;
-};
-
-/* ore_comp_dev Recievies a logical device index */
-static inline struct osd_dev *ore_comp_dev(
- const struct ore_components *oc, unsigned i)
-{
- BUG_ON((i < oc->first_dev) || (oc->first_dev + oc->numdevs <= i));
- return oc->ods[i - oc->first_dev]->od;
-}
-
-static inline void ore_comp_set_dev(
- struct ore_components *oc, unsigned i, struct osd_dev *od)
-{
- oc->ods[i - oc->first_dev]->od = od;
-}
-
-struct ore_striping_info {
- u64 offset;
- u64 obj_offset;
- u64 length;
- u64 first_stripe_start; /* only used in raid writes */
- u64 M; /* for truncate */
- unsigned bytes_in_stripe;
- unsigned dev;
- unsigned par_dev;
- unsigned unit_off;
- unsigned cur_pg;
- unsigned cur_comp;
- unsigned maxdevUnits;
-};
-
-struct ore_io_state;
-typedef void (*ore_io_done_fn)(struct ore_io_state *ios, void *private);
-struct _ore_r4w_op {
- /* @Priv given here is passed ios->private */
- struct page * (*get_page)(void *priv, u64 page_index, bool *uptodate);
- void (*put_page)(void *priv, struct page *page);
-};
-
-struct ore_io_state {
- struct kref kref;
- struct ore_striping_info si;
-
- void *private;
- ore_io_done_fn done;
-
- struct ore_layout *layout;
- struct ore_components *oc;
-
- /* Global read/write IO*/
- loff_t offset;
- unsigned long length;
- void *kern_buff;
-
- struct page **pages;
- unsigned nr_pages;
- unsigned pgbase;
- unsigned pages_consumed;
-
- /* Attributes */
- unsigned in_attr_len;
- struct osd_attr *in_attr;
- unsigned out_attr_len;
- struct osd_attr *out_attr;
-
- bool reading;
-
- /* House keeping of Parity pages */
- bool extra_part_alloc;
- struct page **parity_pages;
- unsigned max_par_pages;
- unsigned cur_par_page;
- unsigned sgs_per_dev;
- struct __stripe_pages_2d *sp2d;
- struct ore_io_state *ios_read_4_write;
- const struct _ore_r4w_op *r4w;
-
- /* Variable array of size numdevs */
- unsigned numdevs;
- struct ore_per_dev_state {
- struct osd_request *or;
- struct bio *bio;
- loff_t offset;
- unsigned length;
- unsigned last_sgs_total;
- unsigned dev;
- struct osd_sg_entry *sglist;
- unsigned cur_sg;
- } per_dev[];
-};
-
-static inline unsigned ore_io_state_size(unsigned numdevs)
-{
- return sizeof(struct ore_io_state) +
- sizeof(struct ore_per_dev_state) * numdevs;
-}
-
-/* ore.c */
-int ore_verify_layout(unsigned total_comps, struct ore_layout *layout);
-void ore_calc_stripe_info(struct ore_layout *layout, u64 file_offset,
- u64 length, struct ore_striping_info *si);
-int ore_get_rw_state(struct ore_layout *layout, struct ore_components *comps,
- bool is_reading, u64 offset, u64 length,
- struct ore_io_state **ios);
-int ore_get_io_state(struct ore_layout *layout, struct ore_components *comps,
- struct ore_io_state **ios);
-void ore_put_io_state(struct ore_io_state *ios);
-
-typedef void (*ore_on_dev_error)(struct ore_io_state *ios, struct ore_dev *od,
- unsigned dev_index, enum osd_err_priority oep,
- u64 dev_offset, u64 dev_len);
-int ore_check_io(struct ore_io_state *ios, ore_on_dev_error rep);
-
-int ore_create(struct ore_io_state *ios);
-int ore_remove(struct ore_io_state *ios);
-int ore_write(struct ore_io_state *ios);
-int ore_read(struct ore_io_state *ios);
-int ore_truncate(struct ore_layout *layout, struct ore_components *comps,
- u64 size);
-
-int extract_attr_from_ios(struct ore_io_state *ios, struct osd_attr *attr);
-
-extern const struct osd_attr g_attr_logical_length;
-
-#endif
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index eb7853c1a23b..5339baadc082 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -274,10 +274,4 @@ static inline int scsi_is_wlun(u64 lun)
/* Used to obtain the PCI location of a device */
#define SCSI_IOCTL_GET_PCI 0x5387
-/* Pull a u32 out of a SCSI message (using BE SCSI conventions) */
-static inline __u32 scsi_to_u32(__u8 *ptr)
-{
- return (ptr[0]<<24) + (ptr[1]<<16) + (ptr[2]<<8) + ptr[3];
-}
-
#endif /* _SCSI_SCSI_H */
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index d85e6befa26b..76ed5e4acd38 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -35,7 +35,6 @@ struct scsi_driver;
struct scsi_data_buffer {
struct sg_table table;
unsigned length;
- int resid;
};
/* embedded in scsi_cmnd */
@@ -76,16 +75,6 @@ struct scsi_cmnd {
int eh_eflags; /* Used by error handlr */
/*
- * A SCSI Command is assigned a nonzero serial_number before passed
- * to the driver's queue command function. The serial_number is
- * cleared when scsi_done is entered indicating that the command
- * has been completed. It is a bug for LLDDs to use this number
- * for purposes other than printk (and even that is only useful
- * for debugging).
- */
- unsigned long serial_number;
-
- /*
* This is set to jiffies as it was when the command was first
* allocated. It is used to time how long the command has
* been outstanding
@@ -202,34 +191,17 @@ static inline unsigned scsi_bufflen(struct scsi_cmnd *cmd)
static inline void scsi_set_resid(struct scsi_cmnd *cmd, int resid)
{
- cmd->sdb.resid = resid;
+ cmd->req.resid_len = resid;
}
static inline int scsi_get_resid(struct scsi_cmnd *cmd)
{
- return cmd->sdb.resid;
+ return cmd->req.resid_len;
}
#define scsi_for_each_sg(cmd, sg, nseg, __i) \
for_each_sg(scsi_sglist(cmd), sg, nseg, __i)
-static inline int scsi_bidi_cmnd(struct scsi_cmnd *cmd)
-{
- return blk_bidi_rq(cmd->request) &&
- (cmd->request->next_rq->special != NULL);
-}
-
-static inline struct scsi_data_buffer *scsi_in(struct scsi_cmnd *cmd)
-{
- return scsi_bidi_cmnd(cmd) ?
- cmd->request->next_rq->special : &cmd->sdb;
-}
-
-static inline struct scsi_data_buffer *scsi_out(struct scsi_cmnd *cmd)
-{
- return &cmd->sdb;
-}
-
static inline int scsi_sg_copy_from_buffer(struct scsi_cmnd *cmd,
void *buf, int buflen)
{
@@ -351,7 +323,7 @@ static inline void set_driver_byte(struct scsi_cmnd *cmd, char status)
static inline unsigned scsi_transfer_length(struct scsi_cmnd *scmd)
{
- unsigned int xfer_len = scsi_out(scmd)->length;
+ unsigned int xfer_len = scmd->sdb.length;
unsigned int prot_interval = scsi_prot_interval(scmd);
if (scmd->prot_flags & SCSI_PROT_TRANSFER_PI)
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index 2b7e227960e1..3810b340551c 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -39,7 +39,6 @@ struct scsi_eh_save {
unsigned char prot_op;
unsigned char *cmnd;
struct scsi_data_buffer sdb;
- struct request *next_rq;
/* new command support */
unsigned char eh_cmnd[BLK_MAX_CDB];
struct scatterlist sense_sgl;
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 6ca954e9f752..2b539a1b3f62 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -60,7 +60,8 @@ struct scsi_host_template {
*
* Status: OPTIONAL
*/
- int (* ioctl)(struct scsi_device *dev, int cmd, void __user *arg);
+ int (*ioctl)(struct scsi_device *dev, unsigned int cmd,
+ void __user *arg);
#ifdef CONFIG_COMPAT
@@ -70,7 +71,8 @@ struct scsi_host_template {
*
* Status: OPTIONAL
*/
- int (* compat_ioctl)(struct scsi_device *dev, int cmd, void __user *arg);
+ int (*compat_ioctl)(struct scsi_device *dev, unsigned int cmd,
+ void __user *arg);
#endif
/*
@@ -298,11 +300,7 @@ struct scsi_host_template {
/*
* This is an optional routine that allows the transport to become
* involved when a scsi io timer fires. The return value tells the
- * timer routine how to finish the io timeout handling:
- * EH_HANDLED: I fixed the error, please complete the command
- * EH_RESET_TIMER: I need more time, reset the timer and
- * begin counting again
- * EH_DONE: Begin normal error recovery
+ * timer routine how to finish the io timeout handling.
*
* Status: OPTIONAL
*/
@@ -488,7 +486,6 @@ struct scsi_host_template {
unsigned long irq_flags; \
int rc; \
spin_lock_irqsave(shost->host_lock, irq_flags); \
- scsi_cmd_get_serial(shost, cmd); \
rc = func_name##_lck (cmd, cmd->scsi_done); \
spin_unlock_irqrestore(shost->host_lock, irq_flags); \
return rc; \
@@ -598,12 +595,6 @@ struct Scsi_Host {
* is nr_hw_queues * can_queue.
*/
unsigned nr_hw_queues;
- /*
- * Used to assign serial numbers to the cmds.
- * Protected by the host lock.
- */
- unsigned long cmd_serial_number;
-
unsigned active_mode:2;
unsigned unchecked_isa_dma:1;
@@ -740,7 +731,6 @@ extern int scsi_host_busy(struct Scsi_Host *shost);
extern void scsi_host_put(struct Scsi_Host *t);
extern struct Scsi_Host *scsi_host_lookup(unsigned short);
extern const char *scsi_host_state_name(enum scsi_host_state);
-extern void scsi_cmd_get_serial(struct Scsi_Host *, struct scsi_cmnd *);
static inline int __must_check scsi_add_host(struct Scsi_Host *host,
struct device *dev)
diff --git a/include/soc/rockchip/rk3399_grf.h b/include/soc/rockchip/rk3399_grf.h
new file mode 100644
index 000000000000..3eebabcb2812
--- /dev/null
+++ b/include/soc/rockchip/rk3399_grf.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Rockchip General Register Files definitions
+ *
+ * Copyright (c) 2018, Collabora Ltd.
+ * Author: Enric Balletbo i Serra <enric.balletbo@collabora.com>
+ */
+
+#ifndef __SOC_RK3399_GRF_H
+#define __SOC_RK3399_GRF_H
+
+/* PMU GRF Registers */
+#define RK3399_PMUGRF_OS_REG2 0x308
+#define RK3399_PMUGRF_DDRTYPE_SHIFT 13
+#define RK3399_PMUGRF_DDRTYPE_MASK 7
+#define RK3399_PMUGRF_DDRTYPE_DDR3 3
+#define RK3399_PMUGRF_DDRTYPE_LPDDR2 5
+#define RK3399_PMUGRF_DDRTYPE_LPDDR3 6
+#define RK3399_PMUGRF_DDRTYPE_LPDDR4 7
+
+#endif
diff --git a/include/soc/rockchip/rockchip_sip.h b/include/soc/rockchip/rockchip_sip.h
index 7e28092c4d3d..ad9482c56797 100644
--- a/include/soc/rockchip/rockchip_sip.h
+++ b/include/soc/rockchip/rockchip_sip.h
@@ -23,5 +23,6 @@
#define ROCKCHIP_SIP_CONFIG_DRAM_GET_RATE 0x05
#define ROCKCHIP_SIP_CONFIG_DRAM_CLR_IRQ 0x06
#define ROCKCHIP_SIP_CONFIG_DRAM_SET_PARAM 0x07
+#define ROCKCHIP_SIP_CONFIG_DRAM_SET_ODT_PD 0x08
#endif
diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h
index b43f37fea096..e489a028ec9f 100644
--- a/include/soc/tegra/mc.h
+++ b/include/soc/tegra/mc.h
@@ -9,6 +9,7 @@
#ifndef __SOC_TEGRA_MC_H__
#define __SOC_TEGRA_MC_H__
+#include <linux/err.h>
#include <linux/reset-controller.h>
#include <linux/types.h>
@@ -77,6 +78,7 @@ struct tegra_smmu_soc {
struct tegra_mc;
struct tegra_smmu;
+struct gart_device;
#ifdef CONFIG_TEGRA_IOMMU_SMMU
struct tegra_smmu *tegra_smmu_probe(struct device *dev,
@@ -96,6 +98,28 @@ static inline void tegra_smmu_remove(struct tegra_smmu *smmu)
}
#endif
+#ifdef CONFIG_TEGRA_IOMMU_GART
+struct gart_device *tegra_gart_probe(struct device *dev, struct tegra_mc *mc);
+int tegra_gart_suspend(struct gart_device *gart);
+int tegra_gart_resume(struct gart_device *gart);
+#else
+static inline struct gart_device *
+tegra_gart_probe(struct device *dev, struct tegra_mc *mc)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int tegra_gart_suspend(struct gart_device *gart)
+{
+ return -ENODEV;
+}
+
+static inline int tegra_gart_resume(struct gart_device *gart)
+{
+ return -ENODEV;
+}
+#endif
+
struct tegra_mc_reset {
const char *name;
unsigned long id;
@@ -144,7 +168,8 @@ struct tegra_mc_soc {
struct tegra_mc {
struct device *dev;
struct tegra_smmu *smmu;
- void __iomem *regs, *regs2;
+ struct gart_device *gart;
+ void __iomem *regs;
struct clk *clk;
int irq;
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 465d7d033c4c..18bd8c3ea605 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -750,7 +750,7 @@ static inline snd_pcm_uframes_t snd_pcm_playback_avail(struct snd_pcm_runtime *r
}
/**
- * snd_pcm_playback_avail - Get the available (readable) space for capture
+ * snd_pcm_capture_avail - Get the available (readable) space for capture
* @runtime: PCM runtime instance
*
* Result is between 0 ... (boundary - 1)
diff --git a/include/sound/soc.h b/include/sound/soc.h
index eb7db605955b..482b4ea87c3c 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -802,8 +802,13 @@ struct snd_soc_component_driver {
int probe_order;
int remove_order;
- /* signal if the module handling the component cannot be removed */
- unsigned int ignore_module_refcount:1;
+ /*
+ * signal if the module handling the component should not be removed
+ * if a pcm is open. Setting this would prevent the module
+ * refcount being incremented in probe() but allow it be incremented
+ * when a pcm is opened and decremented when it is closed.
+ */
+ unsigned int module_get_upon_open:1;
/* bits */
unsigned int idle_bias_on:1;
@@ -1083,6 +1088,8 @@ struct snd_soc_card {
struct mutex mutex;
struct mutex dapm_mutex;
+ spinlock_t dpcm_lock;
+
bool instantiated;
bool topology_shortname_created;
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
index 91948bc5b185..75bee29fd7dd 100644
--- a/include/target/iscsi/iscsi_transport.h
+++ b/include/target/iscsi/iscsi_transport.h
@@ -26,7 +26,7 @@ struct iscsit_transport {
void (*iscsit_aborted_task)(struct iscsi_conn *, struct iscsi_cmd *);
int (*iscsit_xmit_pdu)(struct iscsi_conn *, struct iscsi_cmd *,
struct iscsi_datain_req *, const void *, u32);
- void (*iscsit_release_cmd)(struct iscsi_conn *, struct iscsi_cmd *);
+ void (*iscsit_unmap_cmd)(struct iscsi_conn *, struct iscsi_cmd *);
void (*iscsit_get_rx_pdu)(struct iscsi_conn *);
int (*iscsit_validate_params)(struct iscsi_conn *);
void (*iscsit_get_r2t_ttt)(struct iscsi_conn *, struct iscsi_cmd *,
@@ -53,7 +53,7 @@ extern void iscsit_put_transport(struct iscsit_transport *);
*/
extern int iscsit_setup_scsi_cmd(struct iscsi_conn *, struct iscsi_cmd *,
unsigned char *);
-extern void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *);
+extern void iscsit_set_unsolicited_dataout(struct iscsi_cmd *);
extern int iscsit_process_scsi_cmd(struct iscsi_conn *, struct iscsi_cmd *,
struct iscsi_scsi_req *);
extern int
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 69b7b955902c..19a5bf4214fc 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -502,7 +502,6 @@ struct se_cmd {
#define CMD_T_STOP (1 << 5)
#define CMD_T_TAS (1 << 10)
#define CMD_T_FABRIC_STOP (1 << 11)
-#define CMD_T_PRE_EXECUTE (1 << 12)
spinlock_t t_state_lock;
struct kref cmd_kref;
struct completion t_transport_stop_comp;
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index ee5ddd81cd8d..8ed90407f062 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -74,7 +74,6 @@ struct target_core_fabric_ops {
u32 (*sess_get_initiator_sid)(struct se_session *,
unsigned char *, u32);
int (*write_pending)(struct se_cmd *);
- int (*write_pending_status)(struct se_cmd *);
void (*set_default_node_attributes)(struct se_node_acl *);
int (*get_cmd_state)(struct se_cmd *);
int (*queue_data_in)(struct se_cmd *);
@@ -174,6 +173,7 @@ int transport_generic_free_cmd(struct se_cmd *, int);
bool transport_wait_for_tasks(struct se_cmd *);
int transport_send_check_condition_and_sense(struct se_cmd *,
sense_reason_t, int);
+int target_send_busy(struct se_cmd *cmd);
int target_get_sess_cmd(struct se_cmd *, bool);
int target_put_sess_cmd(struct se_cmd *);
void target_sess_cmd_list_set_waiting(struct se_session *);
diff --git a/include/trace/events/devfreq.h b/include/trace/events/devfreq.h
new file mode 100644
index 000000000000..cf5b8772175d
--- /dev/null
+++ b/include/trace/events/devfreq.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM devfreq
+
+#if !defined(_TRACE_DEVFREQ_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_DEVFREQ_H
+
+#include <linux/devfreq.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(devfreq_monitor,
+ TP_PROTO(struct devfreq *devfreq),
+
+ TP_ARGS(devfreq),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, freq)
+ __field(unsigned long, busy_time)
+ __field(unsigned long, total_time)
+ __field(unsigned int, polling_ms)
+ __string(dev_name, dev_name(&devfreq->dev))
+ ),
+
+ TP_fast_assign(
+ __entry->freq = devfreq->previous_freq;
+ __entry->busy_time = devfreq->last_status.busy_time;
+ __entry->total_time = devfreq->last_status.total_time;
+ __entry->polling_ms = devfreq->profile->polling_ms;
+ __assign_str(dev_name, dev_name(&devfreq->dev));
+ ),
+
+ TP_printk("dev_name=%s freq=%lu polling_ms=%u load=%lu",
+ __get_str(dev_name), __entry->freq, __entry->polling_ms,
+ __entry->total_time == 0 ? 0 :
+ (100 * __entry->busy_time) / __entry->total_time)
+);
+#endif /* _TRACE_DEVFREQ_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 3ec73f17ee2a..a3916b4dd57e 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -149,6 +149,17 @@ TRACE_DEFINE_ENUM(CP_TRIMMED);
{ CP_SPEC_LOG_NUM, "log type is 2" }, \
{ CP_RECOVER_DIR, "dir needs recovery" })
+#define show_shutdown_mode(type) \
+ __print_symbolic(type, \
+ { F2FS_GOING_DOWN_FULLSYNC, "full sync" }, \
+ { F2FS_GOING_DOWN_METASYNC, "meta sync" }, \
+ { F2FS_GOING_DOWN_NOSYNC, "no sync" }, \
+ { F2FS_GOING_DOWN_METAFLUSH, "meta flush" }, \
+ { F2FS_GOING_DOWN_NEED_FSCK, "need fsck" })
+
+struct f2fs_sb_info;
+struct f2fs_io_info;
+struct extent_info;
struct victim_sel_policy;
struct f2fs_map_blocks;
@@ -533,6 +544,9 @@ TRACE_EVENT(f2fs_map_blocks,
__field(block_t, m_lblk)
__field(block_t, m_pblk)
__field(unsigned int, m_len)
+ __field(unsigned int, m_flags)
+ __field(int, m_seg_type)
+ __field(bool, m_may_create)
__field(int, ret)
),
@@ -542,15 +556,22 @@ TRACE_EVENT(f2fs_map_blocks,
__entry->m_lblk = map->m_lblk;
__entry->m_pblk = map->m_pblk;
__entry->m_len = map->m_len;
+ __entry->m_flags = map->m_flags;
+ __entry->m_seg_type = map->m_seg_type;
+ __entry->m_may_create = map->m_may_create;
__entry->ret = ret;
),
TP_printk("dev = (%d,%d), ino = %lu, file offset = %llu, "
- "start blkaddr = 0x%llx, len = 0x%llx, err = %d",
+ "start blkaddr = 0x%llx, len = 0x%llx, flags = %u,"
+ "seg_type = %d, may_create = %d, err = %d",
show_dev_ino(__entry),
(unsigned long long)__entry->m_lblk,
(unsigned long long)__entry->m_pblk,
(unsigned long long)__entry->m_len,
+ __entry->m_flags,
+ __entry->m_seg_type,
+ __entry->m_may_create,
__entry->ret)
);
@@ -1616,6 +1637,30 @@ DEFINE_EVENT(f2fs_sync_dirty_inodes, f2fs_sync_dirty_inodes_exit,
TP_ARGS(sb, type, count)
);
+TRACE_EVENT(f2fs_shutdown,
+
+ TP_PROTO(struct f2fs_sb_info *sbi, unsigned int mode, int ret),
+
+ TP_ARGS(sbi, mode, ret),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(unsigned int, mode)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sbi->sb->s_dev;
+ __entry->mode = mode;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("dev = (%d,%d), mode: %s, ret:%d",
+ show_dev(__entry->dev),
+ show_shutdown_mode(__entry->mode),
+ __entry->ret)
+);
+
#endif /* _TRACE_F2FS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/pwc.h b/include/trace/events/pwc.h
new file mode 100644
index 000000000000..a2da764a3b41
--- /dev/null
+++ b/include/trace/events/pwc.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#if !defined(_TRACE_PWC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PWC_H
+
+#include <linux/usb.h>
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM pwc
+
+TRACE_EVENT(pwc_handler_enter,
+ TP_PROTO(struct urb *urb, struct pwc_device *pdev),
+ TP_ARGS(urb, pdev),
+ TP_STRUCT__entry(
+ __field(struct urb*, urb)
+ __field(struct pwc_frame_buf*, fbuf)
+ __field(int, urb__status)
+ __field(u32, urb__actual_length)
+ __field(int, fbuf__filled)
+ __string(name, pdev->v4l2_dev.name)
+ ),
+ TP_fast_assign(
+ __entry->urb = urb;
+ __entry->fbuf = pdev->fill_buf;
+ __entry->urb__status = urb->status;
+ __entry->urb__actual_length = urb->actual_length;
+ __entry->fbuf__filled = (pdev->fill_buf
+ ? pdev->fill_buf->filled : 0);
+ __assign_str(name, pdev->v4l2_dev.name);
+ ),
+ TP_printk("dev=%s (fbuf=%p filled=%d) urb=%p (status=%d actual_length=%u)",
+ __get_str(name),
+ __entry->fbuf,
+ __entry->fbuf__filled,
+ __entry->urb,
+ __entry->urb__status,
+ __entry->urb__actual_length)
+);
+
+TRACE_EVENT(pwc_handler_exit,
+ TP_PROTO(struct urb *urb, struct pwc_device *pdev),
+ TP_ARGS(urb, pdev),
+ TP_STRUCT__entry(
+ __field(struct urb*, urb)
+ __field(struct pwc_frame_buf*, fbuf)
+ __field(int, fbuf__filled)
+ __string(name, pdev->v4l2_dev.name)
+ ),
+ TP_fast_assign(
+ __entry->urb = urb;
+ __entry->fbuf = pdev->fill_buf;
+ __entry->fbuf__filled = pdev->fill_buf->filled;
+ __assign_str(name, pdev->v4l2_dev.name);
+ ),
+ TP_printk(" dev=%s (fbuf=%p filled=%d) urb=%p",
+ __get_str(name),
+ __entry->fbuf,
+ __entry->fbuf__filled,
+ __entry->urb)
+);
+
+#endif /* _TRACE_PWC_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h
new file mode 100644
index 000000000000..d1f7fe1b6fe4
--- /dev/null
+++ b/include/trace/events/rpcgss.h
@@ -0,0 +1,361 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 Oracle. All rights reserved.
+ *
+ * Trace point definitions for the "rpcgss" subsystem.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rpcgss
+
+#if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RPCGSS_H
+
+#include <linux/tracepoint.h>
+
+/**
+ ** GSS-API related trace events
+ **/
+
+TRACE_DEFINE_ENUM(GSS_S_BAD_MECH);
+TRACE_DEFINE_ENUM(GSS_S_BAD_NAME);
+TRACE_DEFINE_ENUM(GSS_S_BAD_NAMETYPE);
+TRACE_DEFINE_ENUM(GSS_S_BAD_BINDINGS);
+TRACE_DEFINE_ENUM(GSS_S_BAD_STATUS);
+TRACE_DEFINE_ENUM(GSS_S_BAD_SIG);
+TRACE_DEFINE_ENUM(GSS_S_NO_CRED);
+TRACE_DEFINE_ENUM(GSS_S_NO_CONTEXT);
+TRACE_DEFINE_ENUM(GSS_S_DEFECTIVE_TOKEN);
+TRACE_DEFINE_ENUM(GSS_S_DEFECTIVE_CREDENTIAL);
+TRACE_DEFINE_ENUM(GSS_S_CREDENTIALS_EXPIRED);
+TRACE_DEFINE_ENUM(GSS_S_CONTEXT_EXPIRED);
+TRACE_DEFINE_ENUM(GSS_S_FAILURE);
+TRACE_DEFINE_ENUM(GSS_S_BAD_QOP);
+TRACE_DEFINE_ENUM(GSS_S_UNAUTHORIZED);
+TRACE_DEFINE_ENUM(GSS_S_UNAVAILABLE);
+TRACE_DEFINE_ENUM(GSS_S_DUPLICATE_ELEMENT);
+TRACE_DEFINE_ENUM(GSS_S_NAME_NOT_MN);
+TRACE_DEFINE_ENUM(GSS_S_CONTINUE_NEEDED);
+TRACE_DEFINE_ENUM(GSS_S_DUPLICATE_TOKEN);
+TRACE_DEFINE_ENUM(GSS_S_OLD_TOKEN);
+TRACE_DEFINE_ENUM(GSS_S_UNSEQ_TOKEN);
+TRACE_DEFINE_ENUM(GSS_S_GAP_TOKEN);
+
+#define show_gss_status(x) \
+ __print_flags(x, "|", \
+ { GSS_S_BAD_MECH, "GSS_S_BAD_MECH" }, \
+ { GSS_S_BAD_NAME, "GSS_S_BAD_NAME" }, \
+ { GSS_S_BAD_NAMETYPE, "GSS_S_BAD_NAMETYPE" }, \
+ { GSS_S_BAD_BINDINGS, "GSS_S_BAD_BINDINGS" }, \
+ { GSS_S_BAD_STATUS, "GSS_S_BAD_STATUS" }, \
+ { GSS_S_BAD_SIG, "GSS_S_BAD_SIG" }, \
+ { GSS_S_NO_CRED, "GSS_S_NO_CRED" }, \
+ { GSS_S_NO_CONTEXT, "GSS_S_NO_CONTEXT" }, \
+ { GSS_S_DEFECTIVE_TOKEN, "GSS_S_DEFECTIVE_TOKEN" }, \
+ { GSS_S_DEFECTIVE_CREDENTIAL, "GSS_S_DEFECTIVE_CREDENTIAL" }, \
+ { GSS_S_CREDENTIALS_EXPIRED, "GSS_S_CREDENTIALS_EXPIRED" }, \
+ { GSS_S_CONTEXT_EXPIRED, "GSS_S_CONTEXT_EXPIRED" }, \
+ { GSS_S_FAILURE, "GSS_S_FAILURE" }, \
+ { GSS_S_BAD_QOP, "GSS_S_BAD_QOP" }, \
+ { GSS_S_UNAUTHORIZED, "GSS_S_UNAUTHORIZED" }, \
+ { GSS_S_UNAVAILABLE, "GSS_S_UNAVAILABLE" }, \
+ { GSS_S_DUPLICATE_ELEMENT, "GSS_S_DUPLICATE_ELEMENT" }, \
+ { GSS_S_NAME_NOT_MN, "GSS_S_NAME_NOT_MN" }, \
+ { GSS_S_CONTINUE_NEEDED, "GSS_S_CONTINUE_NEEDED" }, \
+ { GSS_S_DUPLICATE_TOKEN, "GSS_S_DUPLICATE_TOKEN" }, \
+ { GSS_S_OLD_TOKEN, "GSS_S_OLD_TOKEN" }, \
+ { GSS_S_UNSEQ_TOKEN, "GSS_S_UNSEQ_TOKEN" }, \
+ { GSS_S_GAP_TOKEN, "GSS_S_GAP_TOKEN" })
+
+
+DECLARE_EVENT_CLASS(rpcgss_gssapi_event,
+ TP_PROTO(
+ const struct rpc_task *task,
+ u32 maj_stat
+ ),
+
+ TP_ARGS(task, maj_stat),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, maj_stat)
+
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->maj_stat = maj_stat;
+ ),
+
+ TP_printk("task:%u@%u maj_stat=%s",
+ __entry->task_id, __entry->client_id,
+ __entry->maj_stat == 0 ?
+ "GSS_S_COMPLETE" : show_gss_status(__entry->maj_stat))
+);
+
+#define DEFINE_GSSAPI_EVENT(name) \
+ DEFINE_EVENT(rpcgss_gssapi_event, rpcgss_##name, \
+ TP_PROTO( \
+ const struct rpc_task *task, \
+ u32 maj_stat \
+ ), \
+ TP_ARGS(task, maj_stat))
+
+TRACE_EVENT(rpcgss_import_ctx,
+ TP_PROTO(
+ int status
+ ),
+
+ TP_ARGS(status),
+
+ TP_STRUCT__entry(
+ __field(int, status)
+ ),
+
+ TP_fast_assign(
+ __entry->status = status;
+ ),
+
+ TP_printk("status=%d", __entry->status)
+);
+
+DEFINE_GSSAPI_EVENT(get_mic);
+DEFINE_GSSAPI_EVENT(verify_mic);
+DEFINE_GSSAPI_EVENT(wrap);
+DEFINE_GSSAPI_EVENT(unwrap);
+
+
+/**
+ ** GSS auth unwrap failures
+ **/
+
+TRACE_EVENT(rpcgss_unwrap_failed,
+ TP_PROTO(
+ const struct rpc_task *task
+ ),
+
+ TP_ARGS(task),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ ),
+
+ TP_printk("task:%u@%u", __entry->task_id, __entry->client_id)
+);
+
+TRACE_EVENT(rpcgss_bad_seqno,
+ TP_PROTO(
+ const struct rpc_task *task,
+ u32 expected,
+ u32 received
+ ),
+
+ TP_ARGS(task, expected, received),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, expected)
+ __field(u32, received)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->expected = expected;
+ __entry->received = received;
+ ),
+
+ TP_printk("task:%u@%u expected seqno %u, received seqno %u",
+ __entry->task_id, __entry->client_id,
+ __entry->expected, __entry->received)
+);
+
+TRACE_EVENT(rpcgss_seqno,
+ TP_PROTO(
+ const struct rpc_task *task
+ ),
+
+ TP_ARGS(task),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, xid)
+ __field(u32, seqno)
+ ),
+
+ TP_fast_assign(
+ const struct rpc_rqst *rqst = task->tk_rqstp;
+
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ __entry->seqno = rqst->rq_seqno;
+ ),
+
+ TP_printk("task:%u@%u xid=0x%08x seqno=%u",
+ __entry->task_id, __entry->client_id,
+ __entry->xid, __entry->seqno)
+);
+
+TRACE_EVENT(rpcgss_need_reencode,
+ TP_PROTO(
+ const struct rpc_task *task,
+ u32 seq_xmit,
+ bool ret
+ ),
+
+ TP_ARGS(task, seq_xmit, ret),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, xid)
+ __field(u32, seq_xmit)
+ __field(u32, seqno)
+ __field(bool, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid);
+ __entry->seq_xmit = seq_xmit;
+ __entry->seqno = task->tk_rqstp->rq_seqno;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("task:%u@%u xid=0x%08x rq_seqno=%u seq_xmit=%u reencode %sneeded",
+ __entry->task_id, __entry->client_id,
+ __entry->xid, __entry->seqno, __entry->seq_xmit,
+ __entry->ret ? "" : "un")
+);
+
+/**
+ ** gssd upcall related trace events
+ **/
+
+TRACE_EVENT(rpcgss_upcall_msg,
+ TP_PROTO(
+ const char *buf
+ ),
+
+ TP_ARGS(buf),
+
+ TP_STRUCT__entry(
+ __string(msg, buf)
+ ),
+
+ TP_fast_assign(
+ __assign_str(msg, buf)
+ ),
+
+ TP_printk("msg='%s'", __get_str(msg))
+);
+
+TRACE_EVENT(rpcgss_upcall_result,
+ TP_PROTO(
+ u32 uid,
+ int result
+ ),
+
+ TP_ARGS(uid, result),
+
+ TP_STRUCT__entry(
+ __field(u32, uid)
+ __field(int, result)
+
+ ),
+
+ TP_fast_assign(
+ __entry->uid = uid;
+ __entry->result = result;
+ ),
+
+ TP_printk("for uid %u, result=%d", __entry->uid, __entry->result)
+);
+
+TRACE_EVENT(rpcgss_context,
+ TP_PROTO(
+ unsigned long expiry,
+ unsigned long now,
+ unsigned int timeout,
+ unsigned int len,
+ const u8 *data
+ ),
+
+ TP_ARGS(expiry, now, timeout, len, data),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, expiry)
+ __field(unsigned long, now)
+ __field(unsigned int, timeout)
+ __field(int, len)
+ __string(acceptor, data)
+ ),
+
+ TP_fast_assign(
+ __entry->expiry = expiry;
+ __entry->now = now;
+ __entry->timeout = timeout;
+ __entry->len = len;
+ strncpy(__get_str(acceptor), data, len);
+ ),
+
+ TP_printk("gc_expiry=%lu now=%lu timeout=%u acceptor=%.*s",
+ __entry->expiry, __entry->now, __entry->timeout,
+ __entry->len, __get_str(acceptor))
+);
+
+
+/**
+ ** Miscellaneous events
+ */
+
+TRACE_DEFINE_ENUM(RPC_AUTH_GSS_KRB5);
+TRACE_DEFINE_ENUM(RPC_AUTH_GSS_KRB5I);
+TRACE_DEFINE_ENUM(RPC_AUTH_GSS_KRB5P);
+
+#define show_pseudoflavor(x) \
+ __print_symbolic(x, \
+ { RPC_AUTH_GSS_KRB5, "RPC_AUTH_GSS_KRB5" }, \
+ { RPC_AUTH_GSS_KRB5I, "RPC_AUTH_GSS_KRB5I" }, \
+ { RPC_AUTH_GSS_KRB5P, "RPC_AUTH_GSS_KRB5P" })
+
+
+TRACE_EVENT(rpcgss_createauth,
+ TP_PROTO(
+ unsigned int flavor,
+ int error
+ ),
+
+ TP_ARGS(flavor, error),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, flavor)
+ __field(int, error)
+
+ ),
+
+ TP_fast_assign(
+ __entry->flavor = flavor;
+ __entry->error = error;
+ ),
+
+ TP_printk("flavor=%s error=%d",
+ show_pseudoflavor(__entry->flavor), __entry->error)
+);
+
+
+#endif /* _TRACE_RPCGSS_H */
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index 399b1aedc927..962975b4313f 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -521,12 +521,18 @@ TRACE_EVENT(xprtrdma_post_send,
TP_STRUCT__entry(
__field(const void *, req)
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
__field(int, num_sge)
__field(int, signaled)
__field(int, status)
),
TP_fast_assign(
+ const struct rpc_rqst *rqst = &req->rl_slot;
+
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
__entry->req = req;
__entry->num_sge = req->rl_sendctx->sc_wr.num_sge;
__entry->signaled = req->rl_sendctx->sc_wr.send_flags &
@@ -534,9 +540,11 @@ TRACE_EVENT(xprtrdma_post_send,
__entry->status = status;
),
- TP_printk("req=%p, %d SGEs%s, status=%d",
+ TP_printk("task:%u@%u req=%p (%d SGE%s) %sstatus=%d",
+ __entry->task_id, __entry->client_id,
__entry->req, __entry->num_sge,
- (__entry->signaled ? ", signaled" : ""),
+ (__entry->num_sge == 1 ? "" : "s"),
+ (__entry->signaled ? "signaled " : ""),
__entry->status
)
);
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 5b50fe4906d2..7b60fd186cfe 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -76,6 +76,7 @@ enum rxrpc_client_trace {
rxrpc_client_chan_disconnect,
rxrpc_client_chan_pass,
rxrpc_client_chan_unstarted,
+ rxrpc_client_chan_wait_failed,
rxrpc_client_cleanup,
rxrpc_client_count,
rxrpc_client_discard,
@@ -276,6 +277,7 @@ enum rxrpc_tx_point {
EM(rxrpc_client_chan_disconnect, "ChDisc") \
EM(rxrpc_client_chan_pass, "ChPass") \
EM(rxrpc_client_chan_unstarted, "ChUnst") \
+ EM(rxrpc_client_chan_wait_failed, "ChWtFl") \
EM(rxrpc_client_cleanup, "Clean ") \
EM(rxrpc_client_count, "Count ") \
EM(rxrpc_client_discard, "Discar") \
diff --git a/include/trace/events/spi.h b/include/trace/events/spi.h
index aef6869f563d..0dd9171d2ad8 100644
--- a/include/trace/events/spi.h
+++ b/include/trace/events/spi.h
@@ -131,9 +131,11 @@ DECLARE_EVENT_CLASS(spi_transfer,
__field( struct spi_transfer *, xfer )
__field( int, len )
__dynamic_array(u8, rx_buf,
- spi_valid_rxbuf(msg, xfer) ? xfer->len : 0)
+ spi_valid_rxbuf(msg, xfer) ?
+ (xfer->len < 64 ? xfer->len : 64) : 0)
__dynamic_array(u8, tx_buf,
- spi_valid_txbuf(msg, xfer) ? xfer->len : 0)
+ spi_valid_txbuf(msg, xfer) ?
+ (xfer->len < 64 ? xfer->len : 64) : 0)
),
TP_fast_assign(
@@ -144,11 +146,11 @@ DECLARE_EVENT_CLASS(spi_transfer,
if (spi_valid_txbuf(msg, xfer))
memcpy(__get_dynamic_array(tx_buf),
- xfer->tx_buf, xfer->len);
+ xfer->tx_buf, __get_dynamic_array_len(tx_buf));
if (spi_valid_rxbuf(msg, xfer))
memcpy(__get_dynamic_array(rx_buf),
- xfer->rx_buf, xfer->len);
+ xfer->rx_buf, __get_dynamic_array_len(rx_buf));
),
TP_printk("spi%d.%d %p len=%d tx=[%*phD] rx=[%*phD]",
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 7b8b987d332e..f0a6f0c5549c 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -77,6 +77,50 @@ TRACE_EVENT(rpc_request,
)
);
+TRACE_DEFINE_ENUM(RPC_TASK_ASYNC);
+TRACE_DEFINE_ENUM(RPC_TASK_SWAPPER);
+TRACE_DEFINE_ENUM(RPC_CALL_MAJORSEEN);
+TRACE_DEFINE_ENUM(RPC_TASK_ROOTCREDS);
+TRACE_DEFINE_ENUM(RPC_TASK_DYNAMIC);
+TRACE_DEFINE_ENUM(RPC_TASK_KILLED);
+TRACE_DEFINE_ENUM(RPC_TASK_SOFT);
+TRACE_DEFINE_ENUM(RPC_TASK_SOFTCONN);
+TRACE_DEFINE_ENUM(RPC_TASK_SENT);
+TRACE_DEFINE_ENUM(RPC_TASK_TIMEOUT);
+TRACE_DEFINE_ENUM(RPC_TASK_NOCONNECT);
+TRACE_DEFINE_ENUM(RPC_TASK_NO_RETRANS_TIMEOUT);
+
+#define rpc_show_task_flags(flags) \
+ __print_flags(flags, "|", \
+ { RPC_TASK_ASYNC, "ASYNC" }, \
+ { RPC_TASK_SWAPPER, "SWAPPER" }, \
+ { RPC_CALL_MAJORSEEN, "MAJORSEEN" }, \
+ { RPC_TASK_ROOTCREDS, "ROOTCREDS" }, \
+ { RPC_TASK_DYNAMIC, "DYNAMIC" }, \
+ { RPC_TASK_KILLED, "KILLED" }, \
+ { RPC_TASK_SOFT, "SOFT" }, \
+ { RPC_TASK_SOFTCONN, "SOFTCONN" }, \
+ { RPC_TASK_SENT, "SENT" }, \
+ { RPC_TASK_TIMEOUT, "TIMEOUT" }, \
+ { RPC_TASK_NOCONNECT, "NOCONNECT" }, \
+ { RPC_TASK_NO_RETRANS_TIMEOUT, "NORTO" })
+
+TRACE_DEFINE_ENUM(RPC_TASK_RUNNING);
+TRACE_DEFINE_ENUM(RPC_TASK_QUEUED);
+TRACE_DEFINE_ENUM(RPC_TASK_ACTIVE);
+TRACE_DEFINE_ENUM(RPC_TASK_NEED_XMIT);
+TRACE_DEFINE_ENUM(RPC_TASK_NEED_RECV);
+TRACE_DEFINE_ENUM(RPC_TASK_MSG_PIN_WAIT);
+
+#define rpc_show_runstate(flags) \
+ __print_flags(flags, "|", \
+ { (1UL << RPC_TASK_RUNNING), "RUNNING" }, \
+ { (1UL << RPC_TASK_QUEUED), "QUEUED" }, \
+ { (1UL << RPC_TASK_ACTIVE), "ACTIVE" }, \
+ { (1UL << RPC_TASK_NEED_XMIT), "NEED_XMIT" }, \
+ { (1UL << RPC_TASK_NEED_RECV), "NEED_RECV" }, \
+ { (1UL << RPC_TASK_MSG_PIN_WAIT), "MSG_PIN_WAIT" })
+
DECLARE_EVENT_CLASS(rpc_task_running,
TP_PROTO(const struct rpc_task *task, const void *action),
@@ -102,10 +146,10 @@ DECLARE_EVENT_CLASS(rpc_task_running,
__entry->flags = task->tk_flags;
),
- TP_printk("task:%u@%d flags=%4.4x state=%4.4lx status=%d action=%ps",
+ TP_printk("task:%u@%d flags=%s runstate=%s status=%d action=%ps",
__entry->task_id, __entry->client_id,
- __entry->flags,
- __entry->runstate,
+ rpc_show_task_flags(__entry->flags),
+ rpc_show_runstate(__entry->runstate),
__entry->status,
__entry->action
)
@@ -149,10 +193,10 @@ DECLARE_EVENT_CLASS(rpc_task_queued,
__assign_str(q_name, rpc_qname(q));
),
- TP_printk("task:%u@%d flags=%4.4x state=%4.4lx status=%d timeout=%lu queue=%s",
+ TP_printk("task:%u@%d flags=%s runstate=%s status=%d timeout=%lu queue=%s",
__entry->task_id, __entry->client_id,
- __entry->flags,
- __entry->runstate,
+ rpc_show_task_flags(__entry->flags),
+ rpc_show_runstate(__entry->runstate),
__entry->status,
__entry->timeout,
__get_str(q_name)
@@ -169,6 +213,87 @@ DECLARE_EVENT_CLASS(rpc_task_queued,
DEFINE_RPC_QUEUED_EVENT(sleep);
DEFINE_RPC_QUEUED_EVENT(wakeup);
+DECLARE_EVENT_CLASS(rpc_failure,
+
+ TP_PROTO(const struct rpc_task *task),
+
+ TP_ARGS(task),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ ),
+
+ TP_printk("task:%u@%u",
+ __entry->task_id, __entry->client_id)
+);
+
+#define DEFINE_RPC_FAILURE(name) \
+ DEFINE_EVENT(rpc_failure, rpc_bad_##name, \
+ TP_PROTO( \
+ const struct rpc_task *task \
+ ), \
+ TP_ARGS(task))
+
+DEFINE_RPC_FAILURE(callhdr);
+DEFINE_RPC_FAILURE(verifier);
+
+DECLARE_EVENT_CLASS(rpc_reply_event,
+
+ TP_PROTO(
+ const struct rpc_task *task
+ ),
+
+ TP_ARGS(task),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, xid)
+ __string(progname, task->tk_client->cl_program->name)
+ __field(u32, version)
+ __string(procname, rpc_proc_name(task))
+ __string(servername, task->tk_xprt->servername)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid);
+ __assign_str(progname, task->tk_client->cl_program->name)
+ __entry->version = task->tk_client->cl_vers;
+ __assign_str(procname, rpc_proc_name(task))
+ __assign_str(servername, task->tk_xprt->servername)
+ ),
+
+ TP_printk("task:%u@%d server=%s xid=0x%08x %sv%d %s",
+ __entry->task_id, __entry->client_id, __get_str(servername),
+ __entry->xid, __get_str(progname), __entry->version,
+ __get_str(procname))
+)
+
+#define DEFINE_RPC_REPLY_EVENT(name) \
+ DEFINE_EVENT(rpc_reply_event, rpc__##name, \
+ TP_PROTO( \
+ const struct rpc_task *task \
+ ), \
+ TP_ARGS(task))
+
+DEFINE_RPC_REPLY_EVENT(prog_unavail);
+DEFINE_RPC_REPLY_EVENT(prog_mismatch);
+DEFINE_RPC_REPLY_EVENT(proc_unavail);
+DEFINE_RPC_REPLY_EVENT(garbage_args);
+DEFINE_RPC_REPLY_EVENT(unparsable);
+DEFINE_RPC_REPLY_EVENT(mismatch);
+DEFINE_RPC_REPLY_EVENT(stale_creds);
+DEFINE_RPC_REPLY_EVENT(bad_creds);
+DEFINE_RPC_REPLY_EVENT(auth_tooweak);
+
TRACE_EVENT(rpc_stats_latency,
TP_PROTO(
@@ -210,6 +335,169 @@ TRACE_EVENT(rpc_stats_latency,
__entry->backlog, __entry->rtt, __entry->execute)
);
+TRACE_EVENT(rpc_xdr_overflow,
+ TP_PROTO(
+ const struct xdr_stream *xdr,
+ size_t requested
+ ),
+
+ TP_ARGS(xdr, requested),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(int, version)
+ __field(size_t, requested)
+ __field(const void *, end)
+ __field(const void *, p)
+ __field(const void *, head_base)
+ __field(size_t, head_len)
+ __field(const void *, tail_base)
+ __field(size_t, tail_len)
+ __field(unsigned int, page_len)
+ __field(unsigned int, len)
+ __string(progname,
+ xdr->rqst->rq_task->tk_client->cl_program->name)
+ __string(procedure,
+ xdr->rqst->rq_task->tk_msg.rpc_proc->p_name)
+ ),
+
+ TP_fast_assign(
+ if (xdr->rqst) {
+ const struct rpc_task *task = xdr->rqst->rq_task;
+
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __assign_str(progname,
+ task->tk_client->cl_program->name)
+ __entry->version = task->tk_client->cl_vers;
+ __assign_str(procedure, task->tk_msg.rpc_proc->p_name)
+ } else {
+ __entry->task_id = 0;
+ __entry->client_id = 0;
+ __assign_str(progname, "unknown")
+ __entry->version = 0;
+ __assign_str(procedure, "unknown")
+ }
+ __entry->requested = requested;
+ __entry->end = xdr->end;
+ __entry->p = xdr->p;
+ __entry->head_base = xdr->buf->head[0].iov_base,
+ __entry->head_len = xdr->buf->head[0].iov_len,
+ __entry->page_len = xdr->buf->page_len,
+ __entry->tail_base = xdr->buf->tail[0].iov_base,
+ __entry->tail_len = xdr->buf->tail[0].iov_len,
+ __entry->len = xdr->buf->len;
+ ),
+
+ TP_printk(
+ "task:%u@%u %sv%d %s requested=%zu p=%p end=%p xdr=[%p,%zu]/%u/[%p,%zu]/%u\n",
+ __entry->task_id, __entry->client_id,
+ __get_str(progname), __entry->version, __get_str(procedure),
+ __entry->requested, __entry->p, __entry->end,
+ __entry->head_base, __entry->head_len,
+ __entry->page_len,
+ __entry->tail_base, __entry->tail_len,
+ __entry->len
+ )
+);
+
+TRACE_EVENT(rpc_xdr_alignment,
+ TP_PROTO(
+ const struct xdr_stream *xdr,
+ size_t offset,
+ unsigned int copied
+ ),
+
+ TP_ARGS(xdr, offset, copied),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(int, version)
+ __field(size_t, offset)
+ __field(unsigned int, copied)
+ __field(const void *, head_base)
+ __field(size_t, head_len)
+ __field(const void *, tail_base)
+ __field(size_t, tail_len)
+ __field(unsigned int, page_len)
+ __field(unsigned int, len)
+ __string(progname,
+ xdr->rqst->rq_task->tk_client->cl_program->name)
+ __string(procedure,
+ xdr->rqst->rq_task->tk_msg.rpc_proc->p_name)
+ ),
+
+ TP_fast_assign(
+ const struct rpc_task *task = xdr->rqst->rq_task;
+
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __assign_str(progname,
+ task->tk_client->cl_program->name)
+ __entry->version = task->tk_client->cl_vers;
+ __assign_str(procedure, task->tk_msg.rpc_proc->p_name)
+
+ __entry->offset = offset;
+ __entry->copied = copied;
+ __entry->head_base = xdr->buf->head[0].iov_base,
+ __entry->head_len = xdr->buf->head[0].iov_len,
+ __entry->page_len = xdr->buf->page_len,
+ __entry->tail_base = xdr->buf->tail[0].iov_base,
+ __entry->tail_len = xdr->buf->tail[0].iov_len,
+ __entry->len = xdr->buf->len;
+ ),
+
+ TP_printk(
+ "task:%u@%u %sv%d %s offset=%zu copied=%u xdr=[%p,%zu]/%u/[%p,%zu]/%u\n",
+ __entry->task_id, __entry->client_id,
+ __get_str(progname), __entry->version, __get_str(procedure),
+ __entry->offset, __entry->copied,
+ __entry->head_base, __entry->head_len,
+ __entry->page_len,
+ __entry->tail_base, __entry->tail_len,
+ __entry->len
+ )
+);
+
+TRACE_EVENT(rpc_reply_pages,
+ TP_PROTO(
+ const struct rpc_rqst *req
+ ),
+
+ TP_ARGS(req),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, head_base)
+ __field(size_t, head_len)
+ __field(const void *, tail_base)
+ __field(size_t, tail_len)
+ __field(unsigned int, page_len)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = req->rq_task->tk_pid;
+ __entry->client_id = req->rq_task->tk_client->cl_clid;
+
+ __entry->head_base = req->rq_rcv_buf.head[0].iov_base;
+ __entry->head_len = req->rq_rcv_buf.head[0].iov_len;
+ __entry->page_len = req->rq_rcv_buf.page_len;
+ __entry->tail_base = req->rq_rcv_buf.tail[0].iov_base;
+ __entry->tail_len = req->rq_rcv_buf.tail[0].iov_len;
+ ),
+
+ TP_printk(
+ "task:%u@%u xdr=[%p,%zu]/%u/[%p,%zu]\n",
+ __entry->task_id, __entry->client_id,
+ __entry->head_base, __entry->head_len,
+ __entry->page_len,
+ __entry->tail_base, __entry->tail_len
+ )
+);
+
/*
* First define the enums in the below macros to be exported to userspace
* via TRACE_DEFINE_ENUM().
@@ -404,9 +692,70 @@ DECLARE_EVENT_CLASS(rpc_xprt_event,
DEFINE_RPC_XPRT_EVENT(timer);
DEFINE_RPC_XPRT_EVENT(lookup_rqst);
-DEFINE_RPC_XPRT_EVENT(transmit);
DEFINE_RPC_XPRT_EVENT(complete_rqst);
+TRACE_EVENT(xprt_transmit,
+ TP_PROTO(
+ const struct rpc_rqst *rqst,
+ int status
+ ),
+
+ TP_ARGS(rqst, status),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, xid)
+ __field(u32, seqno)
+ __field(int, status)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client ?
+ rqst->rq_task->tk_client->cl_clid : -1;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ __entry->seqno = rqst->rq_seqno;
+ __entry->status = status;
+ ),
+
+ TP_printk(
+ "task:%u@%u xid=0x%08x seqno=%u status=%d",
+ __entry->task_id, __entry->client_id, __entry->xid,
+ __entry->seqno, __entry->status)
+);
+
+TRACE_EVENT(xprt_enq_xmit,
+ TP_PROTO(
+ const struct rpc_task *task,
+ int stage
+ ),
+
+ TP_ARGS(task, stage),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, xid)
+ __field(u32, seqno)
+ __field(int, stage)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client ?
+ task->tk_client->cl_clid : -1;
+ __entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid);
+ __entry->seqno = task->tk_rqstp->rq_seqno;
+ __entry->stage = stage;
+ ),
+
+ TP_printk(
+ "task:%u@%u xid=0x%08x seqno=%u stage=%d",
+ __entry->task_id, __entry->client_id, __entry->xid,
+ __entry->seqno, __entry->stage)
+);
+
TRACE_EVENT(xprt_ping,
TP_PROTO(const struct rpc_xprt *xprt, int status),
diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h
index 44a3259ed4a5..b6e0cbc2c71f 100644
--- a/include/trace/events/syscalls.h
+++ b/include/trace/events/syscalls.h
@@ -28,7 +28,7 @@ TRACE_EVENT_FN(sys_enter,
TP_fast_assign(
__entry->id = id;
- syscall_get_arguments(current, regs, 0, 6, __entry->args);
+ syscall_get_arguments(current, regs, __entry->args);
),
TP_printk("NR %ld (%lx, %lx, %lx, %lx, %lx, %lx)",
diff --git a/include/trace/events/tegra_apb_dma.h b/include/trace/events/tegra_apb_dma.h
new file mode 100644
index 000000000000..0818f6286110
--- /dev/null
+++ b/include/trace/events/tegra_apb_dma.h
@@ -0,0 +1,61 @@
+#if !defined(_TRACE_TEGRA_APB_DMA_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_TEGRA_APM_DMA_H
+
+#include <linux/tracepoint.h>
+#include <linux/dmaengine.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM tegra_apb_dma
+
+TRACE_EVENT(tegra_dma_tx_status,
+ TP_PROTO(struct dma_chan *dc, dma_cookie_t cookie, struct dma_tx_state *state),
+ TP_ARGS(dc, cookie, state),
+ TP_STRUCT__entry(
+ __string(chan, dev_name(&dc->dev->device))
+ __field(dma_cookie_t, cookie)
+ __field(__u32, residue)
+ ),
+ TP_fast_assign(
+ __assign_str(chan, dev_name(&dc->dev->device));
+ __entry->cookie = cookie;
+ __entry->residue = state ? state->residue : (u32)-1;
+ ),
+ TP_printk("channel %s: dma cookie %d, residue %u",
+ __get_str(chan), __entry->cookie, __entry->residue)
+);
+
+TRACE_EVENT(tegra_dma_complete_cb,
+ TP_PROTO(struct dma_chan *dc, int count, void *ptr),
+ TP_ARGS(dc, count, ptr),
+ TP_STRUCT__entry(
+ __string(chan, dev_name(&dc->dev->device))
+ __field(int, count)
+ __field(void *, ptr)
+ ),
+ TP_fast_assign(
+ __assign_str(chan, dev_name(&dc->dev->device));
+ __entry->count = count;
+ __entry->ptr = ptr;
+ ),
+ TP_printk("channel %s: done %d, ptr %p",
+ __get_str(chan), __entry->count, __entry->ptr)
+);
+
+TRACE_EVENT(tegra_dma_isr,
+ TP_PROTO(struct dma_chan *dc, int irq),
+ TP_ARGS(dc, irq),
+ TP_STRUCT__entry(
+ __string(chan, dev_name(&dc->dev->device))
+ __field(int, irq)
+ ),
+ TP_fast_assign(
+ __assign_str(chan, dev_name(&dc->dev->device));
+ __entry->irq = irq;
+ ),
+ TP_printk("%s: irq %d\n", __get_str(chan), __entry->irq)
+);
+
+#endif /* _TRACE_TEGRADMA_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index a57e4ee989d6..b7a904825e7d 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -73,7 +73,7 @@ TRACE_EVENT(timer_start,
__entry->flags = flags;
),
- TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld] cpu=%u idx=%u flags=%s",
+ TP_printk("timer=%p function=%ps expires=%lu [timeout=%ld] cpu=%u idx=%u flags=%s",
__entry->timer, __entry->function, __entry->expires,
(long)__entry->expires - __entry->now,
__entry->flags & TIMER_CPUMASK,
@@ -89,23 +89,27 @@ TRACE_EVENT(timer_start,
*/
TRACE_EVENT(timer_expire_entry,
- TP_PROTO(struct timer_list *timer),
+ TP_PROTO(struct timer_list *timer, unsigned long baseclk),
- TP_ARGS(timer),
+ TP_ARGS(timer, baseclk),
TP_STRUCT__entry(
__field( void *, timer )
__field( unsigned long, now )
__field( void *, function)
+ __field( unsigned long, baseclk )
),
TP_fast_assign(
__entry->timer = timer;
__entry->now = jiffies;
__entry->function = timer->function;
+ __entry->baseclk = baseclk;
),
- TP_printk("timer=%p function=%pf now=%lu", __entry->timer, __entry->function,__entry->now)
+ TP_printk("timer=%p function=%ps now=%lu baseclk=%lu",
+ __entry->timer, __entry->function, __entry->now,
+ __entry->baseclk)
);
/**
@@ -210,7 +214,7 @@ TRACE_EVENT(hrtimer_start,
__entry->mode = mode;
),
- TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu "
+ TP_printk("hrtimer=%p function=%ps expires=%llu softexpires=%llu "
"mode=%s", __entry->hrtimer, __entry->function,
(unsigned long long) __entry->expires,
(unsigned long long) __entry->softexpires,
@@ -243,7 +247,8 @@ TRACE_EVENT(hrtimer_expire_entry,
__entry->function = hrtimer->function;
),
- TP_printk("hrtimer=%p function=%pf now=%llu", __entry->hrtimer, __entry->function,
+ TP_printk("hrtimer=%p function=%ps now=%llu",
+ __entry->hrtimer, __entry->function,
(unsigned long long) __entry->now)
);
diff --git a/include/uapi/asm-generic/Kbuild.asm b/include/uapi/asm-generic/Kbuild
index 355c4ac2c0b0..ebb180aac74e 100644
--- a/include/uapi/asm-generic/Kbuild.asm
+++ b/include/uapi/asm-generic/Kbuild
@@ -1,6 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Headers that are mandatory in usr/include/asm/
-#
+# (This file is not included when SRCARCH=um since UML does not support UAPI.)
+
mandatory-y += auxvec.h
mandatory-y += bitsperlong.h
mandatory-y += bpf_perf_event.h
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index c8b430cb6dc4..8c1391c89171 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -2,8 +2,8 @@
#ifndef __ASM_GENERIC_SOCKET_H
#define __ASM_GENERIC_SOCKET_H
+#include <linux/posix_types.h>
#include <asm/sockios.h>
-#include <asm/bitsperlong.h>
/* For setsockopt(2) */
#define SOL_SOCKET 1
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index bf4624efe5e6..dee7292e1df6 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -824,6 +824,8 @@ __SYSCALL(__NR_futex_time64, sys_futex)
__SYSCALL(__NR_sched_rr_get_interval_time64, sys_sched_rr_get_interval)
#endif
+#define __NR_pidfd_send_signal 424
+__SYSCALL(__NR_pidfd_send_signal, sys_pidfd_send_signal)
#define __NR_io_uring_setup 425
__SYSCALL(__NR_io_uring_setup, sys_io_uring_setup)
#define __NR_io_uring_enter 426
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 5f24b50c9e88..059dc2bedaf6 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -7,5 +7,7 @@ no-export-headers += kvm.h
endif
ifeq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/kvm_para.h),)
+ifeq ($(wildcard $(objtree)/arch/$(SRCARCH)/include/generated/uapi/asm/kvm_para.h),)
no-export-headers += kvm_para.h
endif
+endif
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 3c38ac9a92a7..929c8e537a14 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -502,16 +502,6 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
- * Description
- * Push an element *value* in *map*. *flags* is one of:
- *
- * **BPF_EXIST**
- * If the queue/stack is full, the oldest element is removed to
- * make room for this.
- * Return
- * 0 on success, or a negative error in case of failure.
- *
* int bpf_probe_read(void *dst, u32 size, const void *src)
* Description
* For tracing programs, safely attempt to read *size* bytes from
@@ -1435,14 +1425,14 @@ union bpf_attr {
* u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
* Description
* Equivalent to bpf_get_socket_cookie() helper that accepts
- * *skb*, but gets socket from **struct bpf_sock_addr** contex.
+ * *skb*, but gets socket from **struct bpf_sock_addr** context.
* Return
* A 8-byte long non-decreasing number.
*
* u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
* Description
* Equivalent to bpf_get_socket_cookie() helper that accepts
- * *skb*, but gets socket from **struct bpf_sock_ops** contex.
+ * *skb*, but gets socket from **struct bpf_sock_ops** context.
* Return
* A 8-byte long non-decreasing number.
*
@@ -2098,52 +2088,52 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
+ * int bpf_rc_repeat(void *ctx)
* Description
* This helper is used in programs implementing IR decoding, to
- * report a successfully decoded key press with *scancode*,
- * *toggle* value in the given *protocol*. The scancode will be
- * translated to a keycode using the rc keymap, and reported as
- * an input key down event. After a period a key up event is
- * generated. This period can be extended by calling either
- * **bpf_rc_keydown**\ () again with the same values, or calling
- * **bpf_rc_repeat**\ ().
+ * report a successfully decoded repeat key message. This delays
+ * the generation of a key up event for previously generated
+ * key down event.
*
- * Some protocols include a toggle bit, in case the button was
- * released and pressed again between consecutive scancodes.
+ * Some IR protocols like NEC have a special IR message for
+ * repeating last button, for when a button is held down.
*
* The *ctx* should point to the lirc sample as passed into
* the program.
*
- * The *protocol* is the decoded protocol number (see
- * **enum rc_proto** for some predefined values).
- *
* This helper is only available is the kernel was compiled with
* the **CONFIG_BPF_LIRC_MODE2** configuration option set to
* "**y**".
* Return
* 0
*
- * int bpf_rc_repeat(void *ctx)
+ * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
* Description
* This helper is used in programs implementing IR decoding, to
- * report a successfully decoded repeat key message. This delays
- * the generation of a key up event for previously generated
- * key down event.
+ * report a successfully decoded key press with *scancode*,
+ * *toggle* value in the given *protocol*. The scancode will be
+ * translated to a keycode using the rc keymap, and reported as
+ * an input key down event. After a period a key up event is
+ * generated. This period can be extended by calling either
+ * **bpf_rc_keydown**\ () again with the same values, or calling
+ * **bpf_rc_repeat**\ ().
*
- * Some IR protocols like NEC have a special IR message for
- * repeating last button, for when a button is held down.
+ * Some protocols include a toggle bit, in case the button was
+ * released and pressed again between consecutive scancodes.
*
* The *ctx* should point to the lirc sample as passed into
* the program.
*
+ * The *protocol* is the decoded protocol number (see
+ * **enum rc_proto** for some predefined values).
+ *
* This helper is only available is the kernel was compiled with
* the **CONFIG_BPF_LIRC_MODE2** configuration option set to
* "**y**".
* Return
* 0
*
- * uint64_t bpf_skb_cgroup_id(struct sk_buff *skb)
+ * u64 bpf_skb_cgroup_id(struct sk_buff *skb)
* Description
* Return the cgroup v2 id of the socket associated with the *skb*.
* This is roughly similar to the **bpf_get_cgroup_classid**\ ()
@@ -2159,30 +2149,12 @@ union bpf_attr {
* Return
* The id is returned or 0 in case the id could not be retrieved.
*
- * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
- * Description
- * Return id of cgroup v2 that is ancestor of cgroup associated
- * with the *skb* at the *ancestor_level*. The root cgroup is at
- * *ancestor_level* zero and each step down the hierarchy
- * increments the level. If *ancestor_level* == level of cgroup
- * associated with *skb*, then return value will be same as that
- * of **bpf_skb_cgroup_id**\ ().
- *
- * The helper is useful to implement policies based on cgroups
- * that are upper in hierarchy than immediate cgroup associated
- * with *skb*.
- *
- * The format of returned id and helper limitations are same as in
- * **bpf_skb_cgroup_id**\ ().
- * Return
- * The id is returned or 0 in case the id could not be retrieved.
- *
* u64 bpf_get_current_cgroup_id(void)
* Return
* A 64-bit integer containing the current cgroup id based
* on the cgroup within which the current task is running.
*
- * void* get_local_storage(void *map, u64 flags)
+ * void *bpf_get_local_storage(void *map, u64 flags)
* Description
* Get the pointer to the local storage area.
* The type and the size of the local storage is defined
@@ -2209,6 +2181,24 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
+ * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
+ * Description
+ * Return id of cgroup v2 that is ancestor of cgroup associated
+ * with the *skb* at the *ancestor_level*. The root cgroup is at
+ * *ancestor_level* zero and each step down the hierarchy
+ * increments the level. If *ancestor_level* == level of cgroup
+ * associated with *skb*, then return value will be same as that
+ * of **bpf_skb_cgroup_id**\ ().
+ *
+ * The helper is useful to implement policies based on cgroups
+ * that are upper in hierarchy than immediate cgroup associated
+ * with *skb*.
+ *
+ * The format of returned id and helper limitations are same as in
+ * **bpf_skb_cgroup_id**\ ().
+ * Return
+ * The id is returned or 0 in case the id could not be retrieved.
+ *
* struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
* Description
* Look for TCP socket matching *tuple*, optionally in a child
@@ -2289,6 +2279,16 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
+ * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
+ * Description
+ * Push an element *value* in *map*. *flags* is one of:
+ *
+ * **BPF_EXIST**
+ * If the queue/stack is full, the oldest element is
+ * removed to make room for this.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
* int bpf_map_pop_elem(struct bpf_map *map, void *value)
* Description
* Pop an element from *map*.
@@ -2343,29 +2343,94 @@ union bpf_attr {
* Return
* 0
*
+ * int bpf_spin_lock(struct bpf_spin_lock *lock)
+ * Description
+ * Acquire a spinlock represented by the pointer *lock*, which is
+ * stored as part of a value of a map. Taking the lock allows to
+ * safely update the rest of the fields in that value. The
+ * spinlock can (and must) later be released with a call to
+ * **bpf_spin_unlock**\ (\ *lock*\ ).
+ *
+ * Spinlocks in BPF programs come with a number of restrictions
+ * and constraints:
+ *
+ * * **bpf_spin_lock** objects are only allowed inside maps of
+ * types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this
+ * list could be extended in the future).
+ * * BTF description of the map is mandatory.
+ * * The BPF program can take ONE lock at a time, since taking two
+ * or more could cause dead locks.
+ * * Only one **struct bpf_spin_lock** is allowed per map element.
+ * * When the lock is taken, calls (either BPF to BPF or helpers)
+ * are not allowed.
+ * * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not
+ * allowed inside a spinlock-ed region.
+ * * The BPF program MUST call **bpf_spin_unlock**\ () to release
+ * the lock, on all execution paths, before it returns.
+ * * The BPF program can access **struct bpf_spin_lock** only via
+ * the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ ()
+ * helpers. Loading or storing data into the **struct
+ * bpf_spin_lock** *lock*\ **;** field of a map is not allowed.
+ * * To use the **bpf_spin_lock**\ () helper, the BTF description
+ * of the map value must be a struct and have **struct
+ * bpf_spin_lock** *anyname*\ **;** field at the top level.
+ * Nested lock inside another struct is not allowed.
+ * * The **struct bpf_spin_lock** *lock* field in a map value must
+ * be aligned on a multiple of 4 bytes in that value.
+ * * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy
+ * the **bpf_spin_lock** field to user space.
+ * * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from
+ * a BPF program, do not update the **bpf_spin_lock** field.
+ * * **bpf_spin_lock** cannot be on the stack or inside a
+ * networking packet (it can only be inside of a map values).
+ * * **bpf_spin_lock** is available to root only.
+ * * Tracing programs and socket filter programs cannot use
+ * **bpf_spin_lock**\ () due to insufficient preemption checks
+ * (but this may change in the future).
+ * * **bpf_spin_lock** is not allowed in inner maps of map-in-map.
+ * Return
+ * 0
+ *
+ * int bpf_spin_unlock(struct bpf_spin_lock *lock)
+ * Description
+ * Release the *lock* previously locked by a call to
+ * **bpf_spin_lock**\ (\ *lock*\ ).
+ * Return
+ * 0
+ *
* struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk)
* Description
* This helper gets a **struct bpf_sock** pointer such
- * that all the fields in bpf_sock can be accessed.
+ * that all the fields in this **bpf_sock** can be accessed.
* Return
- * A **struct bpf_sock** pointer on success, or NULL in
+ * A **struct bpf_sock** pointer on success, or **NULL** in
* case of failure.
*
* struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk)
* Description
* This helper gets a **struct bpf_tcp_sock** pointer from a
* **struct bpf_sock** pointer.
- *
* Return
- * A **struct bpf_tcp_sock** pointer on success, or NULL in
+ * A **struct bpf_tcp_sock** pointer on success, or **NULL** in
* case of failure.
*
* int bpf_skb_ecn_set_ce(struct sk_buf *skb)
- * Description
- * Sets ECN of IP header to ce (congestion encountered) if
- * current value is ect (ECN capable). Works with IPv6 and IPv4.
- * Return
- * 1 if set, 0 if not set.
+ * Description
+ * Set ECN (Explicit Congestion Notification) field of IP header
+ * to **CE** (Congestion Encountered) if current value is **ECT**
+ * (ECN Capable Transport). Otherwise, do nothing. Works with IPv6
+ * and IPv4.
+ * Return
+ * 1 if the **CE** flag is set (either by the current helper call
+ * or because it was already present), 0 if it is not set.
+ *
+ * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk)
+ * Description
+ * Return a **struct bpf_sock** pointer in **TCP_LISTEN** state.
+ * **bpf_sk_release**\ () is unnecessary and not allowed.
+ * Return
+ * A **struct bpf_sock** pointer on success, or **NULL** in
+ * case of failure.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -2465,7 +2530,8 @@ union bpf_attr {
FN(spin_unlock), \
FN(sk_fullsock), \
FN(tcp_sock), \
- FN(skb_ecn_set_ce),
+ FN(skb_ecn_set_ce), \
+ FN(get_listener_sock),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
index d1e49514977b..f396a82dfd3e 100644
--- a/include/uapi/linux/dm-ioctl.h
+++ b/include/uapi/linux/dm-ioctl.h
@@ -270,9 +270,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 39
+#define DM_VERSION_MINOR 40
#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2018-04-03)"
+#define DM_VERSION_EXTRA "-ioctl (2019-01-18)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index e4d6ddd93567..34c02e4290fe 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -421,6 +421,8 @@ typedef struct elf64_shdr {
#define NT_ARM_SYSTEM_CALL 0x404 /* ARM system call number */
#define NT_ARM_SVE 0x405 /* ARM Scalable Vector Extension registers */
#define NT_ARM_PAC_MASK 0x406 /* ARM pointer authentication code masks */
+#define NT_ARM_PACA_KEYS 0x407 /* ARM pointer authentication address keys */
+#define NT_ARM_PACG_KEYS 0x408 /* ARM pointer authentication generic key */
#define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */
#define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note */
#define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 3652b239dad1..d473e5ed044c 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1591,7 +1591,7 @@ enum ethtool_link_mode_bit_indices {
static inline int ethtool_validate_speed(__u32 speed)
{
- return speed <= INT_MAX || speed == SPEED_UNKNOWN;
+ return speed <= INT_MAX || speed == (__u32)SPEED_UNKNOWN;
}
/* Duplex, half or full. */
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index b4967d48bfda..2ac598614a8f 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -122,6 +122,9 @@
* - add FOPEN_CACHE_DIR
* - add FUSE_MAX_PAGES, add max_pages to init_out
* - add FUSE_CACHE_SYMLINKS
+ *
+ * 7.29
+ * - add FUSE_NO_OPENDIR_SUPPORT flag
*/
#ifndef _LINUX_FUSE_H
@@ -157,7 +160,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 28
+#define FUSE_KERNEL_MINOR_VERSION 29
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -259,6 +262,7 @@ struct fuse_file_lock {
* FUSE_ABORT_ERROR: reading the device after abort returns ECONNABORTED
* FUSE_MAX_PAGES: init_out.max_pages contains the max number of req pages
* FUSE_CACHE_SYMLINKS: cache READLINK responses
+ * FUSE_NO_OPENDIR_SUPPORT: kernel supports zero-message opendir
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -284,6 +288,7 @@ struct fuse_file_lock {
#define FUSE_ABORT_ERROR (1 << 21)
#define FUSE_MAX_PAGES (1 << 22)
#define FUSE_CACHE_SYMLINKS (1 << 23)
+#define FUSE_NO_OPENDIR_SUPPORT (1 << 24)
/**
* CUSE INIT request/reply flags
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 7f14d4a66c28..64cee116928e 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -439,10 +439,12 @@
#define KEY_TITLE 0x171
#define KEY_SUBTITLE 0x172
#define KEY_ANGLE 0x173
-#define KEY_ZOOM 0x174
+#define KEY_FULL_SCREEN 0x174 /* AC View Toggle */
+#define KEY_ZOOM KEY_FULL_SCREEN
#define KEY_MODE 0x175
#define KEY_KEYBOARD 0x176
-#define KEY_SCREEN 0x177
+#define KEY_ASPECT_RATIO 0x177 /* HUTRR37: Aspect */
+#define KEY_SCREEN KEY_ASPECT_RATIO
#define KEY_PC 0x178 /* Media Select Computer */
#define KEY_TV 0x179 /* Media Select TV */
#define KEY_TV2 0x17a /* Media Select Cable */
diff --git a/include/uapi/linux/lirc.h b/include/uapi/linux/lirc.h
index 6b319581882f..45fcbf99d72e 100644
--- a/include/uapi/linux/lirc.h
+++ b/include/uapi/linux/lirc.h
@@ -192,6 +192,9 @@ struct lirc_scancode {
* @RC_PROTO_XMP: XMP protocol
* @RC_PROTO_CEC: CEC protocol
* @RC_PROTO_IMON: iMon Pad protocol
+ * @RC_PROTO_RCMM12: RC-MM protocol 12 bits
+ * @RC_PROTO_RCMM24: RC-MM protocol 24 bits
+ * @RC_PROTO_RCMM32: RC-MM protocol 32 bits
*/
enum rc_proto {
RC_PROTO_UNKNOWN = 0,
@@ -218,6 +221,9 @@ enum rc_proto {
RC_PROTO_XMP = 21,
RC_PROTO_CEC = 22,
RC_PROTO_IMON = 23,
+ RC_PROTO_RCMM12 = 24,
+ RC_PROTO_RCMM24 = 25,
+ RC_PROTO_RCMM32 = 26,
};
#endif
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
index f57c9e434d2d..de5d90212409 100644
--- a/include/uapi/linux/ndctl.h
+++ b/include/uapi/linux/ndctl.h
@@ -243,6 +243,7 @@ struct nd_cmd_pkg {
#define NVDIMM_FAMILY_HPE1 1
#define NVDIMM_FAMILY_HPE2 2
#define NVDIMM_FAMILY_MSFT 3
+#define NVDIMM_FAMILY_HYPERV 4
#define ND_IOCTL_CALL _IOWR(ND_IOCTL, ND_CMD_CALL,\
struct nd_cmd_pkg)
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index e1e9888c85e6..5c98133f2c94 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -866,6 +866,7 @@
#define PCI_ATS_CAP 0x04 /* ATS Capability Register */
#define PCI_ATS_CAP_QDEP(x) ((x) & 0x1f) /* Invalidate Queue Depth */
#define PCI_ATS_MAX_QDEP 32 /* Max Invalidate Queue Depth */
+#define PCI_ATS_CAP_PAGE_ALIGNED 0x0020 /* Page Aligned Request */
#define PCI_ATS_CTRL 0x06 /* ATS Control Register */
#define PCI_ATS_CTRL_ENABLE 0x8000 /* ATS Enable */
#define PCI_ATS_CTRL_STU(x) ((x) & 0x1f) /* Smallest Translation Unit */
@@ -880,6 +881,7 @@
#define PCI_PRI_STATUS_RF 0x001 /* Response Failure */
#define PCI_PRI_STATUS_UPRGI 0x002 /* Unexpected PRG index */
#define PCI_PRI_STATUS_STOPPED 0x100 /* PRI Stopped */
+#define PCI_PRI_STATUS_PASID 0x8000 /* PRG Response PASID Required */
#define PCI_PRI_MAX_REQ 0x08 /* PRI max reqs supported */
#define PCI_PRI_ALLOC_REQ 0x0c /* PRI max reqs allowed */
#define PCI_EXT_CAP_PRI_SIZEOF 16
diff --git a/include/uapi/linux/psci.h b/include/uapi/linux/psci.h
index b3bcabe380da..2fcad1dd0b0e 100644
--- a/include/uapi/linux/psci.h
+++ b/include/uapi/linux/psci.h
@@ -49,8 +49,11 @@
#define PSCI_1_0_FN_PSCI_FEATURES PSCI_0_2_FN(10)
#define PSCI_1_0_FN_SYSTEM_SUSPEND PSCI_0_2_FN(14)
+#define PSCI_1_0_FN_SET_SUSPEND_MODE PSCI_0_2_FN(15)
+#define PSCI_1_1_FN_SYSTEM_RESET2 PSCI_0_2_FN(18)
#define PSCI_1_0_FN64_SYSTEM_SUSPEND PSCI_0_2_FN64(14)
+#define PSCI_1_1_FN64_SYSTEM_RESET2 PSCI_0_2_FN64(18)
/* PSCI v0.2 power state encoding for CPU_SUSPEND function */
#define PSCI_0_2_POWER_STATE_ID_MASK 0xffff
@@ -97,6 +100,10 @@
#define PSCI_1_0_FEATURES_CPU_SUSPEND_PF_MASK \
(0x1 << PSCI_1_0_FEATURES_CPU_SUSPEND_PF_SHIFT)
+#define PSCI_1_0_OS_INITIATED BIT(0)
+#define PSCI_1_0_SUSPEND_MODE_PC 0
+#define PSCI_1_0_SUSPEND_MODE_OSI 1
+
/* PSCI return values (inclusive of all PSCI versions) */
#define PSCI_RET_SUCCESS 0
#define PSCI_RET_NOT_SUPPORTED -1
diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h
index ac8c60bcc83b..43521d500c2b 100644
--- a/include/uapi/linux/psp-sev.h
+++ b/include/uapi/linux/psp-sev.h
@@ -6,8 +6,7 @@
*
* Author: Brijesh Singh <brijesh.singh@amd.com>
*
- * SEV spec 0.14 is available at:
- * http://support.amd.com/TechDocs/55766_SEV-KM%20API_Specification.pdf
+ * SEV API specification is available at: https://developer.amd.com/sev/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -30,7 +29,8 @@ enum {
SEV_PDH_GEN,
SEV_PDH_CERT_EXPORT,
SEV_PEK_CERT_IMPORT,
- SEV_GET_ID,
+ SEV_GET_ID, /* This command is deprecated, use SEV_GET_ID2 */
+ SEV_GET_ID2,
SEV_MAX,
};
@@ -125,7 +125,7 @@ struct sev_user_data_pdh_cert_export {
} __packed;
/**
- * struct sev_user_data_get_id - GET_ID command parameters
+ * struct sev_user_data_get_id - GET_ID command parameters (deprecated)
*
* @socket1: Buffer to pass unique ID of first socket
* @socket2: Buffer to pass unique ID of second socket
@@ -136,6 +136,16 @@ struct sev_user_data_get_id {
} __packed;
/**
+ * struct sev_user_data_get_id2 - GET_ID command parameters
+ * @address: Buffer to store unique ID
+ * @length: length of the unique ID
+ */
+struct sev_user_data_get_id2 {
+ __u64 address; /* In */
+ __u32 length; /* In/Out */
+} __packed;
+
+/**
* struct sev_issue_cmd - SEV ioctl parameters
*
* @cmd: SEV commands to execute
diff --git a/include/uapi/linux/spi/spidev.h b/include/uapi/linux/spi/spidev.h
index c4253f0090d8..ee0f2460bff6 100644
--- a/include/uapi/linux/spi/spidev.h
+++ b/include/uapi/linux/spi/spidev.h
@@ -66,6 +66,9 @@
* @delay_usecs: If nonzero, how long to delay after the last bit transfer
* before optionally deselecting the device before the next transfer.
* @cs_change: True to deselect device before starting the next transfer.
+ * @word_delay_usecs: If nonzero, how long to wait between words within one
+ * transfer. This property needs explicit support in the SPI controller,
+ * otherwise it is silently ignored.
*
* This structure is mapped directly to the kernel spi_transfer structure;
* the fields have the same meanings, except of course that the pointers
@@ -100,7 +103,8 @@ struct spi_ioc_transfer {
__u8 cs_change;
__u8 tx_nbits;
__u8 rx_nbits;
- __u16 pad;
+ __u8 word_delay_usecs;
+ __u8 pad;
/* If the contents of 'struct spi_ioc_transfer' ever change
* incompatibly, then the ioctl number (currently 0) must change;
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 3dcfc6148f99..06479f2fb3ae 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -533,6 +533,8 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type {
};
#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER (V4L2_CID_MPEG_BASE+381)
#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER_QP (V4L2_CID_MPEG_BASE+382)
+#define V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION (V4L2_CID_MPEG_BASE+383)
+#define V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET (V4L2_CID_MPEG_BASE+384)
#define V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP (V4L2_CID_MPEG_BASE+400)
#define V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP (V4L2_CID_MPEG_BASE+401)
#define V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP (V4L2_CID_MPEG_BASE+402)
diff --git a/include/uapi/linux/vbox_vmmdev_types.h b/include/uapi/linux/vbox_vmmdev_types.h
index 0e68024f36c7..26f39816af14 100644
--- a/include/uapi/linux/vbox_vmmdev_types.h
+++ b/include/uapi/linux/vbox_vmmdev_types.h
@@ -102,6 +102,66 @@ enum vmmdev_request_type {
#define VMMDEVREQ_HGCM_CALL VMMDEVREQ_HGCM_CALL32
#endif
+/* vmmdev_request_header.requestor defines */
+
+/* Requestor user not given. */
+#define VMMDEV_REQUESTOR_USR_NOT_GIVEN 0x00000000
+/* The kernel driver (vboxguest) is the requestor. */
+#define VMMDEV_REQUESTOR_USR_DRV 0x00000001
+/* Some other kernel driver is the requestor. */
+#define VMMDEV_REQUESTOR_USR_DRV_OTHER 0x00000002
+/* The root or a admin user is the requestor. */
+#define VMMDEV_REQUESTOR_USR_ROOT 0x00000003
+/* Regular joe user is making the request. */
+#define VMMDEV_REQUESTOR_USR_USER 0x00000006
+/* User classification mask. */
+#define VMMDEV_REQUESTOR_USR_MASK 0x00000007
+
+/* Kernel mode request. Note this is 0, check for !USERMODE instead. */
+#define VMMDEV_REQUESTOR_KERNEL 0x00000000
+/* User mode request. */
+#define VMMDEV_REQUESTOR_USERMODE 0x00000008
+/* User or kernel mode classification mask. */
+#define VMMDEV_REQUESTOR_MODE_MASK 0x00000008
+
+/* Don't know the physical console association of the requestor. */
+#define VMMDEV_REQUESTOR_CON_DONT_KNOW 0x00000000
+/*
+ * The request originates with a process that is NOT associated with the
+ * physical console.
+ */
+#define VMMDEV_REQUESTOR_CON_NO 0x00000010
+/* Requestor process is associated with the physical console. */
+#define VMMDEV_REQUESTOR_CON_YES 0x00000020
+/* Console classification mask. */
+#define VMMDEV_REQUESTOR_CON_MASK 0x00000030
+
+/* Requestor is member of special VirtualBox user group. */
+#define VMMDEV_REQUESTOR_GRP_VBOX 0x00000080
+
+/* Note: trust level is for windows guests only, linux always uses not-given */
+/* Requestor trust level: Unspecified */
+#define VMMDEV_REQUESTOR_TRUST_NOT_GIVEN 0x00000000
+/* Requestor trust level: Untrusted (SID S-1-16-0) */
+#define VMMDEV_REQUESTOR_TRUST_UNTRUSTED 0x00001000
+/* Requestor trust level: Untrusted (SID S-1-16-4096) */
+#define VMMDEV_REQUESTOR_TRUST_LOW 0x00002000
+/* Requestor trust level: Medium (SID S-1-16-8192) */
+#define VMMDEV_REQUESTOR_TRUST_MEDIUM 0x00003000
+/* Requestor trust level: Medium plus (SID S-1-16-8448) */
+#define VMMDEV_REQUESTOR_TRUST_MEDIUM_PLUS 0x00004000
+/* Requestor trust level: High (SID S-1-16-12288) */
+#define VMMDEV_REQUESTOR_TRUST_HIGH 0x00005000
+/* Requestor trust level: System (SID S-1-16-16384) */
+#define VMMDEV_REQUESTOR_TRUST_SYSTEM 0x00006000
+/* Requestor trust level >= Protected (SID S-1-16-20480, S-1-16-28672) */
+#define VMMDEV_REQUESTOR_TRUST_PROTECTED 0x00007000
+/* Requestor trust level mask */
+#define VMMDEV_REQUESTOR_TRUST_MASK 0x00007000
+
+/* Requestor is using the less trusted user device node (/dev/vboxuser) */
+#define VMMDEV_REQUESTOR_USER_DEVICE 0x00008000
+
/** HGCM service location types. */
enum vmmdev_hgcm_service_location_type {
VMMDEV_HGCM_LOC_INVALID = 0,
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 02bb7ad6e986..8f10748dac79 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -353,6 +353,10 @@ struct vfio_region_gfx_edid {
#define VFIO_DEVICE_GFX_LINK_STATE_DOWN 2
};
+#define VFIO_REGION_TYPE_CCW (2)
+/* ccw sub-types */
+#define VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD (1)
+
/*
* 10de vendor sub-type
*
diff --git a/include/uapi/linux/vfio_ccw.h b/include/uapi/linux/vfio_ccw.h
index 2ec5f367ff78..cbecbf0cd54f 100644
--- a/include/uapi/linux/vfio_ccw.h
+++ b/include/uapi/linux/vfio_ccw.h
@@ -12,6 +12,7 @@
#include <linux/types.h>
+/* used for START SUBCHANNEL, always present */
struct ccw_io_region {
#define ORB_AREA_SIZE 12
__u8 orb_area[ORB_AREA_SIZE];
@@ -22,4 +23,15 @@ struct ccw_io_region {
__u32 ret_code;
} __packed;
+/*
+ * used for processing commands that trigger asynchronous actions
+ * Note: this is controlled by a capability
+ */
+#define VFIO_CCW_ASYNC_CMD_HSCH (1 << 0)
+#define VFIO_CCW_ASYNC_CMD_CSCH (1 << 1)
+struct ccw_cmd_region {
+ __u32 command;
+ __u32 ret_code;
+} __packed;
+
#endif
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index b5671ce2724f..1db220da3bcc 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -130,6 +130,13 @@ enum v4l2_field {
((field) == V4L2_FIELD_BOTTOM ||\
(field) == V4L2_FIELD_TOP ||\
(field) == V4L2_FIELD_ALTERNATE)
+#define V4L2_FIELD_IS_INTERLACED(field) \
+ ((field) == V4L2_FIELD_INTERLACED ||\
+ (field) == V4L2_FIELD_INTERLACED_TB ||\
+ (field) == V4L2_FIELD_INTERLACED_BT)
+#define V4L2_FIELD_IS_SEQUENTIAL(field) \
+ ((field) == V4L2_FIELD_SEQ_TB ||\
+ (field) == V4L2_FIELD_SEQ_BT)
enum v4l2_buf_type {
V4L2_BUF_TYPE_VIDEO_CAPTURE = 1,
@@ -161,7 +168,8 @@ enum v4l2_buf_type {
|| (type) == V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY \
|| (type) == V4L2_BUF_TYPE_VBI_OUTPUT \
|| (type) == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT \
- || (type) == V4L2_BUF_TYPE_SDR_OUTPUT)
+ || (type) == V4L2_BUF_TYPE_SDR_OUTPUT \
+ || (type) == V4L2_BUF_TYPE_META_OUTPUT)
enum v4l2_tuner_type {
V4L2_TUNER_RADIO = 1,
@@ -554,6 +562,10 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_YUV555 v4l2_fourcc('Y', 'U', 'V', 'O') /* 16 YUV-5-5-5 */
#define V4L2_PIX_FMT_YUV565 v4l2_fourcc('Y', 'U', 'V', 'P') /* 16 YUV-5-6-5 */
#define V4L2_PIX_FMT_YUV32 v4l2_fourcc('Y', 'U', 'V', '4') /* 32 YUV-8-8-8-8 */
+#define V4L2_PIX_FMT_AYUV32 v4l2_fourcc('A', 'Y', 'U', 'V') /* 32 AYUV-8-8-8-8 */
+#define V4L2_PIX_FMT_XYUV32 v4l2_fourcc('X', 'Y', 'U', 'V') /* 32 XYUV-8-8-8-8 */
+#define V4L2_PIX_FMT_VUYA32 v4l2_fourcc('V', 'U', 'Y', 'A') /* 32 VUYA-8-8-8-8 */
+#define V4L2_PIX_FMT_VUYX32 v4l2_fourcc('V', 'U', 'Y', 'X') /* 32 VUYX-8-8-8-8 */
#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H', 'I', '2', '4') /* 8 8-bit color */
#define V4L2_PIX_FMT_HM12 v4l2_fourcc('H', 'M', '1', '2') /* 8 YUV 4:2:0 16x16 macroblocks */
#define V4L2_PIX_FMT_M420 v4l2_fourcc('M', '4', '2', '0') /* 12 YUV 4:2:0 2 lines y, 1 line uv interleaved */
@@ -973,6 +985,18 @@ struct v4l2_buffer {
};
};
+/**
+ * v4l2_timeval_to_ns - Convert timeval to nanoseconds
+ * @ts: pointer to the timeval variable to be converted
+ *
+ * Returns the scalar nanosecond representation of the timeval
+ * parameter.
+ */
+static inline __u64 v4l2_timeval_to_ns(const struct timeval *tv)
+{
+ return (__u64)tv->tv_sec * 1000000000ULL + tv->tv_usec * 1000;
+}
+
/* Flags for 'flags' field */
/* Buffer is mapped (flag) */
#define V4L2_BUF_FLAG_MAPPED 0x00000001
diff --git a/include/uapi/mtd/ubi-user.h b/include/uapi/mtd/ubi-user.h
index aad3b6201fc0..b69e9ba6742b 100644
--- a/include/uapi/mtd/ubi-user.h
+++ b/include/uapi/mtd/ubi-user.h
@@ -171,6 +171,11 @@
/* Re-name volumes */
#define UBI_IOCRNVOL _IOW(UBI_IOC_MAGIC, 3, struct ubi_rnvol_req)
+/* Read the specified PEB and scrub it if there are bitflips */
+#define UBI_IOCRPEB _IOW(UBI_IOC_MAGIC, 4, __s32)
+/* Force scrubbing on the specified PEB */
+#define UBI_IOCSPEB _IOW(UBI_IOC_MAGIC, 5, __s32)
+
/* ioctl commands of the UBI control character device */
#define UBI_CTRL_IOC_MAGIC 'o'
diff --git a/include/uapi/rdma/bnxt_re-abi.h b/include/uapi/rdma/bnxt_re-abi.h
index a7a6111e50c7..dc52e3cf574c 100644
--- a/include/uapi/rdma/bnxt_re-abi.h
+++ b/include/uapi/rdma/bnxt_re-abi.h
@@ -44,6 +44,14 @@
#define BNXT_RE_ABI_VERSION 1
+#define BNXT_RE_CHIP_ID0_CHIP_NUM_SFT 0x00
+#define BNXT_RE_CHIP_ID0_CHIP_REV_SFT 0x10
+#define BNXT_RE_CHIP_ID0_CHIP_MET_SFT 0x18
+
+enum {
+ BNXT_RE_UCNTX_CMASK_HAVE_CCTX = 0x1ULL
+};
+
struct bnxt_re_uctx_resp {
__u32 dev_id;
__u32 max_qp;
@@ -51,6 +59,9 @@ struct bnxt_re_uctx_resp {
__u32 cqe_sz;
__u32 max_cqd;
__u32 rsvd;
+ __aligned_u64 comp_mask;
+ __u32 chip_id0;
+ __u32 chip_id1;
};
/*
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 480d9a60b68e..0474c7400268 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -270,6 +270,8 @@ struct ib_uverbs_ex_query_device_resp {
struct ib_uverbs_tm_caps tm_caps;
struct ib_uverbs_cq_moderation_caps cq_moderation_caps;
__aligned_u64 max_dm_size;
+ __u32 xrc_odp_caps;
+ __u32 reserved;
};
struct ib_uverbs_query_port {
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index 87b3198f4b5d..f4d4010b7e3e 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -238,6 +238,7 @@ enum mlx5_ib_query_dev_resp_flags {
MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0,
MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD = 1 << 1,
MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE = 1 << 2,
+ MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT = 1 << 3,
};
enum mlx5_ib_tunnel_offloads {
diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
index b8d121d457f1..8149d224030b 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
@@ -84,6 +84,14 @@ enum mlx5_ib_devx_obj_query_attrs {
MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
};
+enum mlx5_ib_devx_obj_query_async_attrs {
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN,
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD,
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID,
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN,
+};
+
enum mlx5_ib_devx_query_eqn_attrs {
MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC = (1U << UVERBS_ID_NS_SHIFT),
MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
@@ -94,6 +102,7 @@ enum mlx5_ib_devx_obj_methods {
MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
MLX5_IB_METHOD_DEVX_OBJ_QUERY,
+ MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY,
};
enum mlx5_ib_devx_umem_reg_attrs {
@@ -113,11 +122,20 @@ enum mlx5_ib_devx_umem_methods {
MLX5_IB_METHOD_DEVX_UMEM_DEREG,
};
+enum mlx5_ib_devx_async_cmd_fd_alloc_attrs {
+ MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+};
+
+enum mlx5_ib_devx_async_cmd_fd_methods {
+ MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC = (1U << UVERBS_ID_NS_SHIFT),
+};
+
enum mlx5_ib_objects {
MLX5_IB_OBJECT_DEVX = (1U << UVERBS_ID_NS_SHIFT),
MLX5_IB_OBJECT_DEVX_OBJ,
MLX5_IB_OBJECT_DEVX_UMEM,
MLX5_IB_OBJECT_FLOW_MATCHER,
+ MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
};
enum mlx5_ib_flow_matcher_create_attrs {
diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
index 4ef62c0e8452..4a701033b93f 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_verbs.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
@@ -51,5 +51,10 @@ enum mlx5_ib_uapi_flow_action_packet_reformat_type {
MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x3,
};
+struct mlx5_ib_uapi_devx_async_cmd_hdr {
+ __aligned_u64 wr_id;
+ __u8 out_data[];
+};
+
#endif
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index 2e18b77a817f..5cc592728071 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -5,8 +5,7 @@
#include <linux/types.h>
enum {
- RDMA_NL_RDMA_CM = 1,
- RDMA_NL_IWCM,
+ RDMA_NL_IWCM = 2,
RDMA_NL_RSVD,
RDMA_NL_LS, /* RDMA Local Services */
RDMA_NL_NLDEV, /* RDMA device interface */
@@ -14,8 +13,7 @@ enum {
};
enum {
- RDMA_NL_GROUP_CM = 1,
- RDMA_NL_GROUP_IWPM,
+ RDMA_NL_GROUP_IWPM = 2,
RDMA_NL_GROUP_LS,
RDMA_NL_NUM_GROUPS
};
@@ -24,15 +22,17 @@ enum {
#define RDMA_NL_GET_OP(type) (type & ((1 << 10) - 1))
#define RDMA_NL_GET_TYPE(client, op) ((client << 10) + op)
-enum {
- RDMA_NL_RDMA_CM_ID_STATS = 0,
- RDMA_NL_RDMA_CM_NUM_OPS
-};
+/* The minimum version that the iwpm kernel supports */
+#define IWPM_UABI_VERSION_MIN 3
+
+/* The latest version that the iwpm kernel supports */
+#define IWPM_UABI_VERSION 4
+/* iwarp port mapper message flags */
enum {
- RDMA_NL_RDMA_CM_ATTR_SRC_ADDR = 1,
- RDMA_NL_RDMA_CM_ATTR_DST_ADDR,
- RDMA_NL_RDMA_CM_NUM_ATTR,
+
+ /* Do not map the port for this IWPM request */
+ IWPM_FLAGS_NO_PORT_MAP = (1 << 0),
};
/* iwarp port mapper op-codes */
@@ -45,6 +45,7 @@ enum {
RDMA_NL_IWPM_HANDLE_ERR,
RDMA_NL_IWPM_MAPINFO,
RDMA_NL_IWPM_MAPINFO_NUM,
+ RDMA_NL_IWPM_HELLO,
RDMA_NL_IWPM_NUM_OPS
};
@@ -83,20 +84,38 @@ enum {
IWPM_NLA_MANAGE_MAPPING_UNSPEC = 0,
IWPM_NLA_MANAGE_MAPPING_SEQ,
IWPM_NLA_MANAGE_ADDR,
- IWPM_NLA_MANAGE_MAPPED_LOC_ADDR,
+ IWPM_NLA_MANAGE_FLAGS,
+ IWPM_NLA_MANAGE_MAPPING_MAX
+};
+
+enum {
+ IWPM_NLA_RMANAGE_MAPPING_UNSPEC = 0,
+ IWPM_NLA_RMANAGE_MAPPING_SEQ,
+ IWPM_NLA_RMANAGE_ADDR,
+ IWPM_NLA_RMANAGE_MAPPED_LOC_ADDR,
+ /* The following maintains bisectability of rdma-core */
+ IWPM_NLA_MANAGE_MAPPED_LOC_ADDR = IWPM_NLA_RMANAGE_MAPPED_LOC_ADDR,
IWPM_NLA_RMANAGE_MAPPING_ERR,
IWPM_NLA_RMANAGE_MAPPING_MAX
};
-#define IWPM_NLA_MANAGE_MAPPING_MAX 3
-#define IWPM_NLA_QUERY_MAPPING_MAX 4
#define IWPM_NLA_MAPINFO_SEND_MAX 3
+#define IWPM_NLA_REMOVE_MAPPING_MAX 3
enum {
IWPM_NLA_QUERY_MAPPING_UNSPEC = 0,
IWPM_NLA_QUERY_MAPPING_SEQ,
IWPM_NLA_QUERY_LOCAL_ADDR,
IWPM_NLA_QUERY_REMOTE_ADDR,
+ IWPM_NLA_QUERY_FLAGS,
+ IWPM_NLA_QUERY_MAPPING_MAX,
+};
+
+enum {
+ IWPM_NLA_RQUERY_MAPPING_UNSPEC = 0,
+ IWPM_NLA_RQUERY_MAPPING_SEQ,
+ IWPM_NLA_RQUERY_LOCAL_ADDR,
+ IWPM_NLA_RQUERY_REMOTE_ADDR,
IWPM_NLA_RQUERY_MAPPED_LOC_ADDR,
IWPM_NLA_RQUERY_MAPPED_REM_ADDR,
IWPM_NLA_RQUERY_MAPPING_ERR,
@@ -114,6 +133,7 @@ enum {
IWPM_NLA_MAPINFO_UNSPEC = 0,
IWPM_NLA_MAPINFO_LOCAL_ADDR,
IWPM_NLA_MAPINFO_MAPPED_ADDR,
+ IWPM_NLA_MAPINFO_FLAGS,
IWPM_NLA_MAPINFO_MAX
};
@@ -132,6 +152,12 @@ enum {
IWPM_NLA_ERR_MAX
};
+enum {
+ IWPM_NLA_HELLO_UNSPEC = 0,
+ IWPM_NLA_HELLO_ABI_VERSION,
+ IWPM_NLA_HELLO_MAX
+};
+
/*
* Local service operations:
* RESOLVE - The client requests the local service to resolve a path.
@@ -229,9 +255,11 @@ enum rdma_nldev_command {
RDMA_NLDEV_CMD_GET, /* can dump */
RDMA_NLDEV_CMD_SET,
- /* 3 - 4 are free to use */
+ RDMA_NLDEV_CMD_NEWLINK,
- RDMA_NLDEV_CMD_PORT_GET = 5, /* can dump */
+ RDMA_NLDEV_CMD_DELLINK,
+
+ RDMA_NLDEV_CMD_PORT_GET, /* can dump */
/* 6 - 8 are free to use */
@@ -431,6 +459,20 @@ enum rdma_nldev_attr {
RDMA_NLDEV_ATTR_DRIVER_U64, /* u64 */
/*
+ * Indexes to get/set secific entry,
+ * for QP use RDMA_NLDEV_ATTR_RES_LQPN
+ */
+ RDMA_NLDEV_ATTR_RES_PDN, /* u32 */
+ RDMA_NLDEV_ATTR_RES_CQN, /* u32 */
+ RDMA_NLDEV_ATTR_RES_MRN, /* u32 */
+ RDMA_NLDEV_ATTR_RES_CM_IDN, /* u32 */
+ RDMA_NLDEV_ATTR_RES_CTXN, /* u32 */
+ /*
+ * Identifies the rdma driver. eg: "rxe" or "siw"
+ */
+ RDMA_NLDEV_ATTR_LINK_TYPE, /* string */
+
+ /*
* Always the end
*/
RDMA_NLDEV_ATTR_MAX
diff --git a/include/uapi/rdma/rdma_user_cm.h b/include/uapi/rdma/rdma_user_cm.h
index 0d1e78ebad05..e42940a215a3 100644
--- a/include/uapi/rdma/rdma_user_cm.h
+++ b/include/uapi/rdma/rdma_user_cm.h
@@ -300,6 +300,10 @@ enum {
RDMA_OPTION_ID_TOS = 0,
RDMA_OPTION_ID_REUSEADDR = 1,
RDMA_OPTION_ID_AFONLY = 2,
+ RDMA_OPTION_ID_ACK_TIMEOUT = 3
+};
+
+enum {
RDMA_OPTION_IB_PATH = 1
};
diff --git a/include/uapi/rdma/rdma_user_rxe.h b/include/uapi/rdma/rdma_user_rxe.h
index 44ef6a3b7afc..aae2e696bb38 100644
--- a/include/uapi/rdma/rdma_user_rxe.h
+++ b/include/uapi/rdma/rdma_user_rxe.h
@@ -58,8 +58,7 @@ struct rxe_global_route {
struct rxe_av {
__u8 port_num;
__u8 network_type;
- __u16 reserved1;
- __u32 reserved2;
+ __u8 dmac[6];
struct rxe_global_route grh;
union {
struct sockaddr_in _sockaddr_in;
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 404d4b9ffe76..df1153cea0b7 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -32,6 +32,7 @@
#ifndef __KERNEL__
#include <stdlib.h>
+#include <time.h>
#endif
/*
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index b80b85f0d9d8..b03fafa1ff58 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -258,7 +258,8 @@ void ipu_cpmem_set_stride(struct ipuv3_channel *ch, int stride);
void ipu_cpmem_set_high_priority(struct ipuv3_channel *ch);
void ipu_cpmem_set_buffer(struct ipuv3_channel *ch, int bufnum, dma_addr_t buf);
void ipu_cpmem_set_uv_offset(struct ipuv3_channel *ch, u32 u_off, u32 v_off);
-void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride);
+void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride,
+ u32 pixelformat);
void ipu_cpmem_set_axi_id(struct ipuv3_channel *ch, u32 id);
int ipu_cpmem_get_burstsize(struct ipuv3_channel *ch);
void ipu_cpmem_set_burstsize(struct ipuv3_channel *ch, int burstsize);
@@ -355,8 +356,9 @@ bool ipu_prg_channel_configure_pending(struct ipuv3_channel *ipu_chan);
*/
struct ipu_csi;
int ipu_csi_init_interface(struct ipu_csi *csi,
- struct v4l2_mbus_config *mbus_cfg,
- struct v4l2_mbus_framefmt *mbus_fmt);
+ const struct v4l2_mbus_config *mbus_cfg,
+ const struct v4l2_mbus_framefmt *infmt,
+ const struct v4l2_mbus_framefmt *outfmt);
bool ipu_csi_is_interlaced(struct ipu_csi *csi);
void ipu_csi_get_window(struct ipu_csi *csi, struct v4l2_rect *w);
void ipu_csi_set_window(struct ipu_csi *csi, struct v4l2_rect *w);
diff --git a/include/xen/xen.h b/include/xen/xen.h
index 0e2156786ad2..19d032373de5 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -46,4 +46,8 @@ struct bio_vec;
bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
const struct bio_vec *vec2);
+#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_XEN_BALLOON)
+extern u64 xen_saved_max_mem_size;
+#endif
+
#endif /* _XEN_XEN_H */
diff --git a/init/Kconfig b/init/Kconfig
index 53b54214a36e..4592bf7997c0 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -13,7 +13,7 @@ config CC_IS_GCC
config GCC_VERSION
int
- default $(shell,$(srctree)/scripts/gcc-version.sh -p $(CC) | sed 's/^0*//') if CC_IS_GCC
+ default $(shell,$(srctree)/scripts/gcc-version.sh $(CC)) if CC_IS_GCC
default 0
config CC_IS_CLANG
@@ -26,6 +26,22 @@ config CLANG_VERSION
config CC_HAS_ASM_GOTO
def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC))
+config CC_HAS_WARN_MAYBE_UNINITIALIZED
+ def_bool $(cc-option,-Wmaybe-uninitialized)
+ help
+ GCC >= 4.7 supports this option.
+
+config CC_DISABLE_WARN_MAYBE_UNINITIALIZED
+ bool
+ depends on CC_HAS_WARN_MAYBE_UNINITIALIZED
+ default CC_IS_GCC && GCC_VERSION < 40900 # unreliable for GCC < 4.9
+ help
+ GCC's -Wmaybe-uninitialized is not reliable by definition.
+ Lots of false positive warnings are produced in some cases.
+
+ If this option is enabled, -Wno-maybe-uninitialzed is passed
+ to the compiler to suppress maybe-uninitialized warnings.
+
config CONSTRUCTORS
bool
depends on !UML
@@ -382,6 +398,7 @@ config VIRT_CPU_ACCOUNTING_GEN
bool "Full dynticks CPU time accounting"
depends on HAVE_CONTEXT_TRACKING
depends on HAVE_VIRT_CPU_ACCOUNTING_GEN
+ depends on GENERIC_CLOCKEVENTS
select VIRT_CPU_ACCOUNTING
select CONTEXT_TRACKING
help
@@ -1113,6 +1130,7 @@ config CC_OPTIMIZE_FOR_PERFORMANCE
config CC_OPTIMIZE_FOR_SIZE
bool "Optimize for size"
+ imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives
help
Enabling this option will pass "-Os" instead of "-O2" to
your compiler resulting in a smaller kernel.
diff --git a/init/main.c b/init/main.c
index 3192c6cc5382..efe6d62e3846 100644
--- a/init/main.c
+++ b/init/main.c
@@ -373,12 +373,20 @@ static inline void smp_prepare_cpus(unsigned int maxcpus) { }
*/
static void __init setup_command_line(char *command_line)
{
- saved_command_line =
- memblock_alloc(strlen(boot_command_line) + 1, SMP_CACHE_BYTES);
- initcall_command_line =
- memblock_alloc(strlen(boot_command_line) + 1, SMP_CACHE_BYTES);
- static_command_line = memblock_alloc(strlen(command_line) + 1,
- SMP_CACHE_BYTES);
+ size_t len = strlen(boot_command_line) + 1;
+
+ saved_command_line = memblock_alloc(len, SMP_CACHE_BYTES);
+ if (!saved_command_line)
+ panic("%s: Failed to allocate %zu bytes\n", __func__, len);
+
+ initcall_command_line = memblock_alloc(len, SMP_CACHE_BYTES);
+ if (!initcall_command_line)
+ panic("%s: Failed to allocate %zu bytes\n", __func__, len);
+
+ static_command_line = memblock_alloc(len, SMP_CACHE_BYTES);
+ if (!static_command_line)
+ panic("%s: Failed to allocate %zu bytes\n", __func__, len);
+
strcpy(saved_command_line, boot_command_line);
strcpy(static_command_line, command_line);
}
@@ -496,6 +504,10 @@ void __init __weak thread_stack_cache_init(void)
void __init __weak mem_encrypt_init(void) { }
+void __init __weak poking_init(void) { }
+
+void __init __weak pgd_cache_init(void) { }
+
bool initcall_debug;
core_param(initcall_debug, initcall_debug, bool, 0644);
@@ -527,6 +539,7 @@ static void __init mm_init(void)
init_espfix_bsp();
/* Should be run after espfix64 is set up. */
pti_init();
+ pgd_cache_init();
}
void __init __weak arch_call_rest_init(void)
@@ -574,6 +587,8 @@ asmlinkage __visible void __init start_kernel(void)
page_alloc_init();
pr_notice("Kernel command line: %s\n", boot_command_line);
+ /* parameters may set static keys */
+ jump_label_init();
parse_early_param();
after_dashes = parse_args("Booting kernel",
static_command_line, __start___param,
@@ -583,8 +598,6 @@ asmlinkage __visible void __init start_kernel(void)
parse_args("Setting init args", after_dashes, NULL, 0, -1, -1,
NULL, set_init_arg);
- jump_label_init();
-
/*
* These use large bootmem allocations and must precede
* kmem_cache_init()
@@ -729,6 +742,7 @@ asmlinkage __visible void __init start_kernel(void)
taskstats_init_early();
delayacct_init();
+ poking_init();
check_bugs();
acpi_subsystem_init();
@@ -770,8 +784,14 @@ static int __init initcall_blacklist(char *str)
pr_debug("blacklisting initcall %s\n", str_entry);
entry = memblock_alloc(sizeof(*entry),
SMP_CACHE_BYTES);
+ if (!entry)
+ panic("%s: Failed to allocate %zu bytes\n",
+ __func__, sizeof(*entry));
entry->buf = memblock_alloc(strlen(str_entry) + 1,
SMP_CACHE_BYTES);
+ if (!entry->buf)
+ panic("%s: Failed to allocate %zu bytes\n",
+ __func__, strlen(str_entry) + 1);
strcpy(entry->buf, str_entry);
list_add(&entry->next, &blacklisted_initcalls);
}
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index c839bf83231d..aea30530c472 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -18,6 +18,7 @@
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/mount.h>
+#include <linux/fs_context.h>
#include <linux/namei.h>
#include <linux/sysctl.h>
#include <linux/poll.h>
@@ -42,6 +43,10 @@
#include <net/sock.h>
#include "util.h"
+struct mqueue_fs_context {
+ struct ipc_namespace *ipc_ns;
+};
+
#define MQUEUE_MAGIC 0x19800202
#define DIRENT_SIZE 20
#define FILENT_SIZE 80
@@ -87,9 +92,11 @@ struct mqueue_inode_info {
unsigned long qsize; /* size of queue in memory (sum of all msgs) */
};
+static struct file_system_type mqueue_fs_type;
static const struct inode_operations mqueue_dir_inode_operations;
static const struct file_operations mqueue_file_operations;
static const struct super_operations mqueue_super_ops;
+static const struct fs_context_operations mqueue_fs_context_ops;
static void remove_notification(struct mqueue_inode_info *info);
static struct kmem_cache *mqueue_inode_cachep;
@@ -322,7 +329,7 @@ err:
return ERR_PTR(ret);
}
-static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
+static int mqueue_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct inode *inode;
struct ipc_namespace *ns = sb->s_fs_info;
@@ -343,18 +350,56 @@ static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
return 0;
}
-static struct dentry *mqueue_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static int mqueue_get_tree(struct fs_context *fc)
{
- struct ipc_namespace *ns;
- if (flags & SB_KERNMOUNT) {
- ns = data;
- data = NULL;
- } else {
- ns = current->nsproxy->ipc_ns;
- }
- return mount_ns(fs_type, flags, data, ns, ns->user_ns, mqueue_fill_super);
+ struct mqueue_fs_context *ctx = fc->fs_private;
+
+ put_user_ns(fc->user_ns);
+ fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns);
+ fc->s_fs_info = ctx->ipc_ns;
+ return vfs_get_super(fc, vfs_get_keyed_super, mqueue_fill_super);
+}
+
+static void mqueue_fs_context_free(struct fs_context *fc)
+{
+ struct mqueue_fs_context *ctx = fc->fs_private;
+
+ if (ctx->ipc_ns)
+ put_ipc_ns(ctx->ipc_ns);
+ kfree(ctx);
+}
+
+static int mqueue_init_fs_context(struct fs_context *fc)
+{
+ struct mqueue_fs_context *ctx;
+
+ ctx = kzalloc(sizeof(struct mqueue_fs_context), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
+ fc->fs_private = ctx;
+ fc->ops = &mqueue_fs_context_ops;
+ return 0;
+}
+
+static struct vfsmount *mq_create_mount(struct ipc_namespace *ns)
+{
+ struct mqueue_fs_context *ctx;
+ struct fs_context *fc;
+ struct vfsmount *mnt;
+
+ fc = fs_context_for_mount(&mqueue_fs_type, SB_KERNMOUNT);
+ if (IS_ERR(fc))
+ return ERR_CAST(fc);
+
+ ctx = fc->fs_private;
+ put_ipc_ns(ctx->ipc_ns);
+ ctx->ipc_ns = get_ipc_ns(ns);
+
+ mnt = fc_mount(fc);
+ put_fs_context(fc);
+ return mnt;
}
static void init_once(void *foo)
@@ -1522,15 +1567,22 @@ static const struct super_operations mqueue_super_ops = {
.statfs = simple_statfs,
};
+static const struct fs_context_operations mqueue_fs_context_ops = {
+ .free = mqueue_fs_context_free,
+ .get_tree = mqueue_get_tree,
+};
+
static struct file_system_type mqueue_fs_type = {
- .name = "mqueue",
- .mount = mqueue_mount,
- .kill_sb = kill_litter_super,
- .fs_flags = FS_USERNS_MOUNT,
+ .name = "mqueue",
+ .init_fs_context = mqueue_init_fs_context,
+ .kill_sb = kill_litter_super,
+ .fs_flags = FS_USERNS_MOUNT,
};
int mq_init_ns(struct ipc_namespace *ns)
{
+ struct vfsmount *m;
+
ns->mq_queues_count = 0;
ns->mq_queues_max = DFLT_QUEUESMAX;
ns->mq_msg_max = DFLT_MSGMAX;
@@ -1538,12 +1590,10 @@ int mq_init_ns(struct ipc_namespace *ns)
ns->mq_msg_default = DFLT_MSG;
ns->mq_msgsize_default = DFLT_MSGSIZE;
- ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns);
- if (IS_ERR(ns->mq_mnt)) {
- int err = PTR_ERR(ns->mq_mnt);
- ns->mq_mnt = NULL;
- return err;
- }
+ m = mq_create_mount(ns);
+ if (IS_ERR(m))
+ return PTR_ERR(m);
+ ns->mq_mnt = m;
return 0;
}
diff --git a/ipc/namespace.c b/ipc/namespace.c
index 21607791d62c..b3ca1476ca51 100644
--- a/ipc/namespace.c
+++ b/ipc/namespace.c
@@ -42,7 +42,7 @@ static struct ipc_namespace *create_ipc_ns(struct user_namespace *user_ns,
goto fail;
err = -ENOMEM;
- ns = kmalloc(sizeof(struct ipc_namespace), GFP_KERNEL);
+ ns = kzalloc(sizeof(struct ipc_namespace), GFP_KERNEL);
if (ns == NULL)
goto fail_dec;
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
index fbba478ae522..bf770d7556f7 100644
--- a/kernel/Kconfig.locks
+++ b/kernel/Kconfig.locks
@@ -229,7 +229,7 @@ config MUTEX_SPIN_ON_OWNER
config RWSEM_SPIN_ON_OWNER
def_bool y
- depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW
+ depends on SMP && ARCH_SUPPORTS_ATOMIC_RMW
config LOCK_SPIN_ON_OWNER
def_bool y
@@ -251,3 +251,10 @@ config ARCH_USE_QUEUED_RWLOCKS
config QUEUED_RWLOCKS
def_bool y if ARCH_USE_QUEUED_RWLOCKS
depends on SMP
+
+config ARCH_HAS_MMIOWB
+ bool
+
+config MMIOWB
+ def_bool y if ARCH_HAS_MMIOWB
+ depends on SMP
diff --git a/kernel/Makefile b/kernel/Makefile
index 6c57e78817da..62471e75a2b0 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -30,6 +30,7 @@ KCOV_INSTRUMENT_extable.o := n
# Don't self-instrument.
KCOV_INSTRUMENT_kcov.o := n
KASAN_SANITIZE_kcov.o := n
+CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
# cond_syscall is currently not LTO compatible
CFLAGS_sys_ni.o = $(DISABLE_LTO)
diff --git a/kernel/backtracetest.c b/kernel/backtracetest.c
index 1323360d90e3..a563c8fdad0d 100644
--- a/kernel/backtracetest.c
+++ b/kernel/backtracetest.c
@@ -48,19 +48,14 @@ static void backtrace_test_irq(void)
#ifdef CONFIG_STACKTRACE
static void backtrace_test_saved(void)
{
- struct stack_trace trace;
unsigned long entries[8];
+ unsigned int nr_entries;
pr_info("Testing a saved backtrace.\n");
pr_info("The following trace is a kernel self test and not a bug!\n");
- trace.nr_entries = 0;
- trace.max_entries = ARRAY_SIZE(entries);
- trace.entries = entries;
- trace.skip = 0;
-
- save_stack_trace(&trace);
- print_stack_trace(&trace, 0);
+ nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
+ stack_trace_print(entries, nr_entries, 0);
}
#else
static void backtrace_test_saved(void)
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index ff09d32a8a1b..c605397c79f0 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -848,7 +848,6 @@ void __weak bpf_jit_free(struct bpf_prog *fp)
if (fp->jited) {
struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
- bpf_jit_binary_unlock_ro(hdr);
bpf_jit_binary_free(hdr);
WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index 8974b3755670..3c18260403dd 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -162,10 +162,14 @@ static void cpu_map_kthread_stop(struct work_struct *work)
static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
struct xdp_frame *xdpf)
{
+ unsigned int hard_start_headroom;
unsigned int frame_size;
void *pkt_data_start;
struct sk_buff *skb;
+ /* Part of headroom was reserved to xdpf */
+ hard_start_headroom = sizeof(struct xdp_frame) + xdpf->headroom;
+
/* build_skb need to place skb_shared_info after SKB end, and
* also want to know the memory "truesize". Thus, need to
* know the memory frame size backing xdp_buff.
@@ -183,15 +187,15 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
* is not at a fixed memory location, with mixed length
* packets, which is bad for cache-line hotness.
*/
- frame_size = SKB_DATA_ALIGN(xdpf->len + xdpf->headroom) +
+ frame_size = SKB_DATA_ALIGN(xdpf->len + hard_start_headroom) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- pkt_data_start = xdpf->data - xdpf->headroom;
+ pkt_data_start = xdpf->data - hard_start_headroom;
skb = build_skb(pkt_data_start, frame_size);
if (!skb)
return NULL;
- skb_reserve(skb, xdpf->headroom);
+ skb_reserve(skb, hard_start_headroom);
__skb_put(skb, xdpf->len);
if (xdpf->metasize)
skb_metadata_set(skb, xdpf->metasize);
@@ -205,6 +209,9 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
* - RX ring dev queue index (skb_record_rx_queue)
*/
+ /* Allow SKB to reuse area used by xdp_frame */
+ xdp_scrub_frame(xdpf);
+
return skb;
}
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 2ada5e21dfa6..4a8f390a2b82 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -554,19 +554,6 @@ struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type typ
}
EXPORT_SYMBOL(bpf_prog_get_type_path);
-static void bpf_evict_inode(struct inode *inode)
-{
- enum bpf_type type;
-
- truncate_inode_pages_final(&inode->i_data);
- clear_inode(inode);
-
- if (S_ISLNK(inode->i_mode))
- kfree(inode->i_link);
- if (!bpf_inode_type(inode, &type))
- bpf_any_put(inode->i_private, type);
-}
-
/*
* Display the mount options in /proc/mounts.
*/
@@ -579,11 +566,28 @@ static int bpf_show_options(struct seq_file *m, struct dentry *root)
return 0;
}
+static void bpf_destroy_inode_deferred(struct rcu_head *head)
+{
+ struct inode *inode = container_of(head, struct inode, i_rcu);
+ enum bpf_type type;
+
+ if (S_ISLNK(inode->i_mode))
+ kfree(inode->i_link);
+ if (!bpf_inode_type(inode, &type))
+ bpf_any_put(inode->i_private, type);
+ free_inode_nonrcu(inode);
+}
+
+static void bpf_destroy_inode(struct inode *inode)
+{
+ call_rcu(&inode->i_rcu, bpf_destroy_inode_deferred);
+}
+
static const struct super_operations bpf_super_ops = {
.statfs = simple_statfs,
.drop_inode = generic_delete_inode,
.show_options = bpf_show_options,
- .evict_inode = bpf_evict_inode,
+ .destroy_inode = bpf_destroy_inode,
};
enum {
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 62f6bced3a3c..afca36f53c49 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -136,21 +136,29 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
void *bpf_map_area_alloc(size_t size, int numa_node)
{
- /* We definitely need __GFP_NORETRY, so OOM killer doesn't
- * trigger under memory pressure as we really just want to
- * fail instead.
+ /* We really just want to fail instead of triggering OOM killer
+ * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
+ * which is used for lower order allocation requests.
+ *
+ * It has been observed that higher order allocation requests done by
+ * vmalloc with __GFP_NORETRY being set might fail due to not trying
+ * to reclaim memory from the page cache, thus we set
+ * __GFP_RETRY_MAYFAIL to avoid such situations.
*/
- const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
+
+ const gfp_t flags = __GFP_NOWARN | __GFP_ZERO;
void *area;
if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
- area = kmalloc_node(size, GFP_USER | flags, numa_node);
+ area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags,
+ numa_node);
if (area != NULL)
return area;
}
- return __vmalloc_node_flags_caller(size, numa_node, GFP_KERNEL | flags,
- __builtin_return_address(0));
+ return __vmalloc_node_flags_caller(size, numa_node,
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL |
+ flags, __builtin_return_address(0));
}
void bpf_map_area_free(void *area)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index a7b96bf0e654..09d5d972c9ff 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -212,7 +212,7 @@ struct bpf_call_arg_meta {
int access_size;
s64 msize_smax_value;
u64 msize_umax_value;
- int ptr_id;
+ int ref_obj_id;
int func_id;
};
@@ -346,35 +346,23 @@ static bool reg_type_may_be_null(enum bpf_reg_type type)
type == PTR_TO_TCP_SOCK_OR_NULL;
}
-static bool type_is_refcounted(enum bpf_reg_type type)
-{
- return type == PTR_TO_SOCKET;
-}
-
-static bool type_is_refcounted_or_null(enum bpf_reg_type type)
-{
- return type == PTR_TO_SOCKET || type == PTR_TO_SOCKET_OR_NULL;
-}
-
-static bool reg_is_refcounted(const struct bpf_reg_state *reg)
-{
- return type_is_refcounted(reg->type);
-}
-
static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
{
return reg->type == PTR_TO_MAP_VALUE &&
map_value_has_spin_lock(reg->map_ptr);
}
-static bool reg_is_refcounted_or_null(const struct bpf_reg_state *reg)
+static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type)
{
- return type_is_refcounted_or_null(reg->type);
+ return type == PTR_TO_SOCKET ||
+ type == PTR_TO_SOCKET_OR_NULL ||
+ type == PTR_TO_TCP_SOCK ||
+ type == PTR_TO_TCP_SOCK_OR_NULL;
}
-static bool arg_type_is_refcounted(enum bpf_arg_type type)
+static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
{
- return type == ARG_PTR_TO_SOCKET;
+ return type == ARG_PTR_TO_SOCK_COMMON;
}
/* Determine whether the function releases some resources allocated by another
@@ -392,6 +380,12 @@ static bool is_acquire_function(enum bpf_func_id func_id)
func_id == BPF_FUNC_sk_lookup_udp;
}
+static bool is_ptr_cast_function(enum bpf_func_id func_id)
+{
+ return func_id == BPF_FUNC_tcp_sock ||
+ func_id == BPF_FUNC_sk_fullsock;
+}
+
/* string representation of 'enum bpf_reg_type' */
static const char * const reg_type_str[] = {
[NOT_INIT] = "?",
@@ -466,6 +460,8 @@ static void print_verifier_state(struct bpf_verifier_env *env,
verbose(env, ",call_%d", func(env, reg)->callsite);
} else {
verbose(env, "(id=%d", reg->id);
+ if (reg_type_may_be_refcounted_or_null(t))
+ verbose(env, ",ref_obj_id=%d", reg->ref_obj_id);
if (t != SCALAR_VALUE)
verbose(env, ",off=%d", reg->off);
if (type_is_pkt_pointer(t))
@@ -1901,8 +1897,9 @@ continue_func:
}
frame++;
if (frame >= MAX_CALL_FRAMES) {
- WARN_ONCE(1, "verifier bug. Call stack is too deep\n");
- return -EFAULT;
+ verbose(env, "the call stack of %d frames is too deep !\n",
+ frame);
+ return -E2BIG;
}
goto process_func;
}
@@ -2414,16 +2411,15 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
/* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */
if (!type_is_sk_pointer(type))
goto err_type;
- } else if (arg_type == ARG_PTR_TO_SOCKET) {
- expected_type = PTR_TO_SOCKET;
- if (type != expected_type)
- goto err_type;
- if (meta->ptr_id || !reg->id) {
- verbose(env, "verifier internal error: mismatched references meta=%d, reg=%d\n",
- meta->ptr_id, reg->id);
- return -EFAULT;
+ if (reg->ref_obj_id) {
+ if (meta->ref_obj_id) {
+ verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
+ regno, reg->ref_obj_id,
+ meta->ref_obj_id);
+ return -EFAULT;
+ }
+ meta->ref_obj_id = reg->ref_obj_id;
}
- meta->ptr_id = reg->id;
} else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
if (meta->func_id == BPF_FUNC_spin_lock) {
if (process_spin_lock(env, regno, true))
@@ -2740,32 +2736,38 @@ static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
return true;
}
-static bool check_refcount_ok(const struct bpf_func_proto *fn)
+static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id)
{
int count = 0;
- if (arg_type_is_refcounted(fn->arg1_type))
+ if (arg_type_may_be_refcounted(fn->arg1_type))
count++;
- if (arg_type_is_refcounted(fn->arg2_type))
+ if (arg_type_may_be_refcounted(fn->arg2_type))
count++;
- if (arg_type_is_refcounted(fn->arg3_type))
+ if (arg_type_may_be_refcounted(fn->arg3_type))
count++;
- if (arg_type_is_refcounted(fn->arg4_type))
+ if (arg_type_may_be_refcounted(fn->arg4_type))
count++;
- if (arg_type_is_refcounted(fn->arg5_type))
+ if (arg_type_may_be_refcounted(fn->arg5_type))
count++;
+ /* A reference acquiring function cannot acquire
+ * another refcounted ptr.
+ */
+ if (is_acquire_function(func_id) && count)
+ return false;
+
/* We only support one arg being unreferenced at the moment,
* which is sufficient for the helper functions we have right now.
*/
return count <= 1;
}
-static int check_func_proto(const struct bpf_func_proto *fn)
+static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
{
return check_raw_mode_ok(fn) &&
check_arg_pair_ok(fn) &&
- check_refcount_ok(fn) ? 0 : -EINVAL;
+ check_refcount_ok(fn, func_id) ? 0 : -EINVAL;
}
/* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
@@ -2799,19 +2801,20 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
}
static void release_reg_references(struct bpf_verifier_env *env,
- struct bpf_func_state *state, int id)
+ struct bpf_func_state *state,
+ int ref_obj_id)
{
struct bpf_reg_state *regs = state->regs, *reg;
int i;
for (i = 0; i < MAX_BPF_REG; i++)
- if (regs[i].id == id)
+ if (regs[i].ref_obj_id == ref_obj_id)
mark_reg_unknown(env, regs, i);
bpf_for_each_spilled_reg(i, state, reg) {
if (!reg)
continue;
- if (reg_is_refcounted(reg) && reg->id == id)
+ if (reg->ref_obj_id == ref_obj_id)
__mark_reg_unknown(reg);
}
}
@@ -2820,15 +2823,20 @@ static void release_reg_references(struct bpf_verifier_env *env,
* resources. Identify all copies of the same pointer and clear the reference.
*/
static int release_reference(struct bpf_verifier_env *env,
- struct bpf_call_arg_meta *meta)
+ int ref_obj_id)
{
struct bpf_verifier_state *vstate = env->cur_state;
+ int err;
int i;
+ err = release_reference_state(cur_func(env), ref_obj_id);
+ if (err)
+ return err;
+
for (i = 0; i <= vstate->curframe; i++)
- release_reg_references(env, vstate->frame[i], meta->ptr_id);
+ release_reg_references(env, vstate->frame[i], ref_obj_id);
- return release_reference_state(cur_func(env), meta->ptr_id);
+ return 0;
}
static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
@@ -3047,7 +3055,7 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
memset(&meta, 0, sizeof(meta));
meta.pkt_access = fn->pkt_access;
- err = check_func_proto(fn);
+ err = check_func_proto(fn, func_id);
if (err) {
verbose(env, "kernel subsystem misconfigured func %s#%d\n",
func_id_name(func_id), func_id);
@@ -3093,7 +3101,7 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
return err;
}
} else if (is_release_function(func_id)) {
- err = release_reference(env, &meta);
+ err = release_reference(env, meta.ref_obj_id);
if (err) {
verbose(env, "func %s#%d reference has not been acquired before\n",
func_id_name(func_id), func_id);
@@ -3154,8 +3162,10 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
if (id < 0)
return id;
- /* For release_reference() */
+ /* For mark_ptr_or_null_reg() */
regs[BPF_REG_0].id = id;
+ /* For release_reference() */
+ regs[BPF_REG_0].ref_obj_id = id;
} else {
/* For mark_ptr_or_null_reg() */
regs[BPF_REG_0].id = ++env->id_gen;
@@ -3170,6 +3180,10 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
return -EINVAL;
}
+ if (is_ptr_cast_function(func_id))
+ /* For release_reference() */
+ regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
+
do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
err = check_map_func_compatibility(env, meta.map_ptr, func_id);
@@ -3368,7 +3382,7 @@ do_sim:
*dst_reg = *ptr_reg;
}
ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
- if (!ptr_is_dst_reg)
+ if (!ptr_is_dst_reg && ret)
*dst_reg = tmp;
return !ret ? -EFAULT : 0;
}
@@ -4124,15 +4138,35 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
return 0;
}
+static void __find_good_pkt_pointers(struct bpf_func_state *state,
+ struct bpf_reg_state *dst_reg,
+ enum bpf_reg_type type, u16 new_range)
+{
+ struct bpf_reg_state *reg;
+ int i;
+
+ for (i = 0; i < MAX_BPF_REG; i++) {
+ reg = &state->regs[i];
+ if (reg->type == type && reg->id == dst_reg->id)
+ /* keep the maximum range already checked */
+ reg->range = max(reg->range, new_range);
+ }
+
+ bpf_for_each_spilled_reg(i, state, reg) {
+ if (!reg)
+ continue;
+ if (reg->type == type && reg->id == dst_reg->id)
+ reg->range = max(reg->range, new_range);
+ }
+}
+
static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
struct bpf_reg_state *dst_reg,
enum bpf_reg_type type,
bool range_right_open)
{
- struct bpf_func_state *state = vstate->frame[vstate->curframe];
- struct bpf_reg_state *regs = state->regs, *reg;
u16 new_range;
- int i, j;
+ int i;
if (dst_reg->off < 0 ||
(dst_reg->off == 0 && range_right_open))
@@ -4197,20 +4231,9 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
* the range won't allow anything.
* dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
*/
- for (i = 0; i < MAX_BPF_REG; i++)
- if (regs[i].type == type && regs[i].id == dst_reg->id)
- /* keep the maximum range already checked */
- regs[i].range = max(regs[i].range, new_range);
-
- for (j = 0; j <= vstate->curframe; j++) {
- state = vstate->frame[j];
- bpf_for_each_spilled_reg(i, state, reg) {
- if (!reg)
- continue;
- if (reg->type == type && reg->id == dst_reg->id)
- reg->range = max(reg->range, new_range);
- }
- }
+ for (i = 0; i <= vstate->curframe; i++)
+ __find_good_pkt_pointers(vstate->frame[i], dst_reg, type,
+ new_range);
}
/* compute branch direction of the expression "if (reg opcode val) goto target;"
@@ -4665,17 +4688,41 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state,
} else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) {
reg->type = PTR_TO_TCP_SOCK;
}
- if (is_null || !(reg_is_refcounted(reg) ||
- reg_may_point_to_spin_lock(reg))) {
- /* We don't need id from this point onwards anymore,
- * thus we should better reset it, so that state
- * pruning has chances to take effect.
+ if (is_null) {
+ /* We don't need id and ref_obj_id from this point
+ * onwards anymore, thus we should better reset it,
+ * so that state pruning has chances to take effect.
+ */
+ reg->id = 0;
+ reg->ref_obj_id = 0;
+ } else if (!reg_may_point_to_spin_lock(reg)) {
+ /* For not-NULL ptr, reg->ref_obj_id will be reset
+ * in release_reg_references().
+ *
+ * reg->id is still used by spin_lock ptr. Other
+ * than spin_lock ptr type, reg->id can be reset.
*/
reg->id = 0;
}
}
}
+static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id,
+ bool is_null)
+{
+ struct bpf_reg_state *reg;
+ int i;
+
+ for (i = 0; i < MAX_BPF_REG; i++)
+ mark_ptr_or_null_reg(state, &state->regs[i], id, is_null);
+
+ bpf_for_each_spilled_reg(i, state, reg) {
+ if (!reg)
+ continue;
+ mark_ptr_or_null_reg(state, reg, id, is_null);
+ }
+}
+
/* The logic is similar to find_good_pkt_pointers(), both could eventually
* be folded together at some point.
*/
@@ -4683,24 +4730,20 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
bool is_null)
{
struct bpf_func_state *state = vstate->frame[vstate->curframe];
- struct bpf_reg_state *reg, *regs = state->regs;
+ struct bpf_reg_state *regs = state->regs;
+ u32 ref_obj_id = regs[regno].ref_obj_id;
u32 id = regs[regno].id;
- int i, j;
-
- if (reg_is_refcounted_or_null(&regs[regno]) && is_null)
- release_reference_state(state, id);
+ int i;
- for (i = 0; i < MAX_BPF_REG; i++)
- mark_ptr_or_null_reg(state, &regs[i], id, is_null);
+ if (ref_obj_id && ref_obj_id == id && is_null)
+ /* regs[regno] is in the " == NULL" branch.
+ * No one could have freed the reference state before
+ * doing the NULL check.
+ */
+ WARN_ON_ONCE(release_reference_state(state, id));
- for (j = 0; j <= vstate->curframe; j++) {
- state = vstate->frame[j];
- bpf_for_each_spilled_reg(i, state, reg) {
- if (!reg)
- continue;
- mark_ptr_or_null_reg(state, reg, id, is_null);
- }
- }
+ for (i = 0; i <= vstate->curframe; i++)
+ __mark_ptr_or_null_regs(vstate->frame[i], id, is_null);
}
static bool try_match_pkt_pointers(const struct bpf_insn *insn,
@@ -6052,15 +6095,17 @@ static int propagate_liveness(struct bpf_verifier_env *env,
}
/* Propagate read liveness of registers... */
BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
- /* We don't need to worry about FP liveness because it's read-only */
- for (i = 0; i < BPF_REG_FP; i++) {
- if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ)
- continue;
- if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) {
- err = mark_reg_read(env, &vstate->frame[vstate->curframe]->regs[i],
- &vparent->frame[vstate->curframe]->regs[i]);
- if (err)
- return err;
+ for (frame = 0; frame <= vstate->curframe; frame++) {
+ /* We don't need to worry about FP liveness, it's read-only */
+ for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) {
+ if (vparent->frame[frame]->regs[i].live & REG_LIVE_READ)
+ continue;
+ if (vstate->frame[frame]->regs[i].live & REG_LIVE_READ) {
+ err = mark_reg_read(env, &vstate->frame[frame]->regs[i],
+ &vparent->frame[frame]->regs[i]);
+ if (err)
+ return err;
+ }
}
}
@@ -6678,17 +6723,17 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
/* valid generic load 64-bit imm */
goto next_insn;
- if (insn->src_reg != BPF_PSEUDO_MAP_FD) {
- verbose(env,
- "unrecognized bpf_ld_imm64 insn\n");
+ if (insn[0].src_reg != BPF_PSEUDO_MAP_FD ||
+ insn[1].imm != 0) {
+ verbose(env, "unrecognized bpf_ld_imm64 insn\n");
return -EINVAL;
}
- f = fdget(insn->imm);
+ f = fdget(insn[0].imm);
map = __bpf_map_get(f);
if (IS_ERR(map)) {
verbose(env, "fd %d is not pointing to valid bpf_map\n",
- insn->imm);
+ insn[0].imm);
return PTR_ERR(map);
}
diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h
index c9a35f09e4b9..30e39f3932ad 100644
--- a/kernel/cgroup/cgroup-internal.h
+++ b/kernel/cgroup/cgroup-internal.h
@@ -7,6 +7,7 @@
#include <linux/workqueue.h>
#include <linux/list.h>
#include <linux/refcount.h>
+#include <linux/fs_context.h>
#define TRACE_CGROUP_PATH_LEN 1024
extern spinlock_t trace_cgroup_path_lock;
@@ -37,6 +38,31 @@ extern void __init enable_debug_cgroup(void);
} while (0)
/*
+ * The cgroup filesystem superblock creation/mount context.
+ */
+struct cgroup_fs_context {
+ struct kernfs_fs_context kfc;
+ struct cgroup_root *root;
+ struct cgroup_namespace *ns;
+ unsigned int flags; /* CGRP_ROOT_* flags */
+
+ /* cgroup1 bits */
+ bool cpuset_clone_children;
+ bool none; /* User explicitly requested empty subsystem */
+ bool all_ss; /* Seen 'all' option */
+ u16 subsys_mask; /* Selected subsystems */
+ char *name; /* Hierarchy name */
+ char *release_agent; /* Path for release notifications */
+};
+
+static inline struct cgroup_fs_context *cgroup_fc2context(struct fs_context *fc)
+{
+ struct kernfs_fs_context *kfc = fc->fs_private;
+
+ return container_of(kfc, struct cgroup_fs_context, kfc);
+}
+
+/*
* A cgroup can be associated with multiple css_sets as different tasks may
* belong to different cgroups on different hierarchies. In the other
* direction, a css_set is naturally associated with multiple cgroups.
@@ -117,16 +143,6 @@ struct cgroup_mgctx {
#define DEFINE_CGROUP_MGCTX(name) \
struct cgroup_mgctx name = CGROUP_MGCTX_INIT(name)
-struct cgroup_sb_opts {
- u16 subsys_mask;
- unsigned int flags;
- char *release_agent;
- bool cpuset_clone_children;
- char *name;
- /* User explicitly requested empty subsystem */
- bool none;
-};
-
extern struct mutex cgroup_mutex;
extern spinlock_t css_set_lock;
extern struct cgroup_subsys *cgroup_subsys[];
@@ -197,12 +213,10 @@ int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen,
struct cgroup_namespace *ns);
void cgroup_free_root(struct cgroup_root *root);
-void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts);
+void init_cgroup_root(struct cgroup_fs_context *ctx);
int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask);
int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask);
-struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags,
- struct cgroup_root *root, unsigned long magic,
- struct cgroup_namespace *ns);
+int cgroup_do_get_tree(struct fs_context *fc);
int cgroup_migrate_vet_dst(struct cgroup *dst_cgrp);
void cgroup_migrate_finish(struct cgroup_mgctx *mgctx);
@@ -246,14 +260,15 @@ extern const struct proc_ns_operations cgroupns_operations;
*/
extern struct cftype cgroup1_base_files[];
extern struct kernfs_syscall_ops cgroup1_kf_syscall_ops;
+extern const struct fs_parameter_description cgroup1_fs_parameters;
int proc_cgroupstats_show(struct seq_file *m, void *v);
bool cgroup1_ssid_disabled(int ssid);
void cgroup1_pidlist_destroy_all(struct cgroup *cgrp);
void cgroup1_release_agent(struct work_struct *work);
void cgroup1_check_for_release(struct cgroup *cgrp);
-struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags,
- void *data, unsigned long magic,
- struct cgroup_namespace *ns);
+int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param);
+int cgroup1_get_tree(struct fs_context *fc);
+int cgroup1_reconfigure(struct fs_context *ctx);
#endif /* __CGROUP_INTERNAL_H */
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index f94a7229974e..c126b34fd4ff 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -13,9 +13,12 @@
#include <linux/delayacct.h>
#include <linux/pid_namespace.h>
#include <linux/cgroupstats.h>
+#include <linux/fs_parser.h>
#include <trace/events/cgroup.h>
+#define cg_invalf(fc, fmt, ...) invalf(fc, fmt, ## __VA_ARGS__)
+
/*
* pidlists linger the following amount before being destroyed. The goal
* is avoiding frequent destruction in the middle of consecutive read calls
@@ -906,172 +909,195 @@ static int cgroup1_show_options(struct seq_file *seq, struct kernfs_root *kf_roo
return 0;
}
-static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
-{
- char *token, *o = data;
- bool all_ss = false, one_ss = false;
- u16 mask = U16_MAX;
- struct cgroup_subsys *ss;
- int nr_opts = 0;
- int i;
-
-#ifdef CONFIG_CPUSETS
- mask = ~((u16)1 << cpuset_cgrp_id);
-#endif
-
- memset(opts, 0, sizeof(*opts));
+enum cgroup1_param {
+ Opt_all,
+ Opt_clone_children,
+ Opt_cpuset_v2_mode,
+ Opt_name,
+ Opt_none,
+ Opt_noprefix,
+ Opt_release_agent,
+ Opt_xattr,
+};
- while ((token = strsep(&o, ",")) != NULL) {
- nr_opts++;
+static const struct fs_parameter_spec cgroup1_param_specs[] = {
+ fsparam_flag ("all", Opt_all),
+ fsparam_flag ("clone_children", Opt_clone_children),
+ fsparam_flag ("cpuset_v2_mode", Opt_cpuset_v2_mode),
+ fsparam_string("name", Opt_name),
+ fsparam_flag ("none", Opt_none),
+ fsparam_flag ("noprefix", Opt_noprefix),
+ fsparam_string("release_agent", Opt_release_agent),
+ fsparam_flag ("xattr", Opt_xattr),
+ {}
+};
- if (!*token)
- return -EINVAL;
- if (!strcmp(token, "none")) {
- /* Explicitly have no subsystems */
- opts->none = true;
- continue;
- }
- if (!strcmp(token, "all")) {
- /* Mutually exclusive option 'all' + subsystem name */
- if (one_ss)
- return -EINVAL;
- all_ss = true;
- continue;
- }
- if (!strcmp(token, "noprefix")) {
- opts->flags |= CGRP_ROOT_NOPREFIX;
- continue;
- }
- if (!strcmp(token, "clone_children")) {
- opts->cpuset_clone_children = true;
- continue;
- }
- if (!strcmp(token, "cpuset_v2_mode")) {
- opts->flags |= CGRP_ROOT_CPUSET_V2_MODE;
- continue;
- }
- if (!strcmp(token, "xattr")) {
- opts->flags |= CGRP_ROOT_XATTR;
- continue;
- }
- if (!strncmp(token, "release_agent=", 14)) {
- /* Specifying two release agents is forbidden */
- if (opts->release_agent)
- return -EINVAL;
- opts->release_agent =
- kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
- if (!opts->release_agent)
- return -ENOMEM;
- continue;
- }
- if (!strncmp(token, "name=", 5)) {
- const char *name = token + 5;
-
- /* blocked by boot param? */
- if (cgroup_no_v1_named)
- return -ENOENT;
- /* Can't specify an empty name */
- if (!strlen(name))
- return -EINVAL;
- /* Must match [\w.-]+ */
- for (i = 0; i < strlen(name); i++) {
- char c = name[i];
- if (isalnum(c))
- continue;
- if ((c == '.') || (c == '-') || (c == '_'))
- continue;
- return -EINVAL;
- }
- /* Specifying two names is forbidden */
- if (opts->name)
- return -EINVAL;
- opts->name = kstrndup(name,
- MAX_CGROUP_ROOT_NAMELEN - 1,
- GFP_KERNEL);
- if (!opts->name)
- return -ENOMEM;
+const struct fs_parameter_description cgroup1_fs_parameters = {
+ .name = "cgroup1",
+ .specs = cgroup1_param_specs,
+};
- continue;
+int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
+ struct cgroup_subsys *ss;
+ struct fs_parse_result result;
+ int opt, i;
+
+ opt = fs_parse(fc, &cgroup1_fs_parameters, param, &result);
+ if (opt == -ENOPARAM) {
+ if (strcmp(param->key, "source") == 0) {
+ fc->source = param->string;
+ param->string = NULL;
+ return 0;
}
-
for_each_subsys(ss, i) {
- if (strcmp(token, ss->legacy_name))
+ if (strcmp(param->key, ss->legacy_name))
continue;
- if (!cgroup_ssid_enabled(i))
+ ctx->subsys_mask |= (1 << i);
+ return 0;
+ }
+ return cg_invalf(fc, "cgroup1: Unknown subsys name '%s'", param->key);
+ }
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_none:
+ /* Explicitly have no subsystems */
+ ctx->none = true;
+ break;
+ case Opt_all:
+ ctx->all_ss = true;
+ break;
+ case Opt_noprefix:
+ ctx->flags |= CGRP_ROOT_NOPREFIX;
+ break;
+ case Opt_clone_children:
+ ctx->cpuset_clone_children = true;
+ break;
+ case Opt_cpuset_v2_mode:
+ ctx->flags |= CGRP_ROOT_CPUSET_V2_MODE;
+ break;
+ case Opt_xattr:
+ ctx->flags |= CGRP_ROOT_XATTR;
+ break;
+ case Opt_release_agent:
+ /* Specifying two release agents is forbidden */
+ if (ctx->release_agent)
+ return cg_invalf(fc, "cgroup1: release_agent respecified");
+ ctx->release_agent = param->string;
+ param->string = NULL;
+ break;
+ case Opt_name:
+ /* blocked by boot param? */
+ if (cgroup_no_v1_named)
+ return -ENOENT;
+ /* Can't specify an empty name */
+ if (!param->size)
+ return cg_invalf(fc, "cgroup1: Empty name");
+ if (param->size > MAX_CGROUP_ROOT_NAMELEN - 1)
+ return cg_invalf(fc, "cgroup1: Name too long");
+ /* Must match [\w.-]+ */
+ for (i = 0; i < param->size; i++) {
+ char c = param->string[i];
+ if (isalnum(c))
continue;
- if (cgroup1_ssid_disabled(i))
+ if ((c == '.') || (c == '-') || (c == '_'))
continue;
-
- /* Mutually exclusive option 'all' + subsystem name */
- if (all_ss)
- return -EINVAL;
- opts->subsys_mask |= (1 << i);
- one_ss = true;
-
- break;
+ return cg_invalf(fc, "cgroup1: Invalid name");
}
- if (i == CGROUP_SUBSYS_COUNT)
- return -ENOENT;
+ /* Specifying two names is forbidden */
+ if (ctx->name)
+ return cg_invalf(fc, "cgroup1: name respecified");
+ ctx->name = param->string;
+ param->string = NULL;
+ break;
}
+ return 0;
+}
+
+static int check_cgroupfs_options(struct fs_context *fc)
+{
+ struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
+ u16 mask = U16_MAX;
+ u16 enabled = 0;
+ struct cgroup_subsys *ss;
+ int i;
+
+#ifdef CONFIG_CPUSETS
+ mask = ~((u16)1 << cpuset_cgrp_id);
+#endif
+ for_each_subsys(ss, i)
+ if (cgroup_ssid_enabled(i) && !cgroup1_ssid_disabled(i))
+ enabled |= 1 << i;
+
+ ctx->subsys_mask &= enabled;
/*
- * If the 'all' option was specified select all the subsystems,
- * otherwise if 'none', 'name=' and a subsystem name options were
- * not specified, let's default to 'all'
+ * In absense of 'none', 'name=' or subsystem name options,
+ * let's default to 'all'.
*/
- if (all_ss || (!one_ss && !opts->none && !opts->name))
- for_each_subsys(ss, i)
- if (cgroup_ssid_enabled(i) && !cgroup1_ssid_disabled(i))
- opts->subsys_mask |= (1 << i);
+ if (!ctx->subsys_mask && !ctx->none && !ctx->name)
+ ctx->all_ss = true;
+
+ if (ctx->all_ss) {
+ /* Mutually exclusive option 'all' + subsystem name */
+ if (ctx->subsys_mask)
+ return cg_invalf(fc, "cgroup1: subsys name conflicts with all");
+ /* 'all' => select all the subsystems */
+ ctx->subsys_mask = enabled;
+ }
/*
* We either have to specify by name or by subsystems. (So all
* empty hierarchies must have a name).
*/
- if (!opts->subsys_mask && !opts->name)
- return -EINVAL;
+ if (!ctx->subsys_mask && !ctx->name)
+ return cg_invalf(fc, "cgroup1: Need name or subsystem set");
/*
* Option noprefix was introduced just for backward compatibility
* with the old cpuset, so we allow noprefix only if mounting just
* the cpuset subsystem.
*/
- if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask))
- return -EINVAL;
+ if ((ctx->flags & CGRP_ROOT_NOPREFIX) && (ctx->subsys_mask & mask))
+ return cg_invalf(fc, "cgroup1: noprefix used incorrectly");
/* Can't specify "none" and some subsystems */
- if (opts->subsys_mask && opts->none)
- return -EINVAL;
+ if (ctx->subsys_mask && ctx->none)
+ return cg_invalf(fc, "cgroup1: none used incorrectly");
return 0;
}
-static int cgroup1_remount(struct kernfs_root *kf_root, int *flags, char *data)
+int cgroup1_reconfigure(struct fs_context *fc)
{
- int ret = 0;
+ struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
+ struct kernfs_root *kf_root = kernfs_root_from_sb(fc->root->d_sb);
struct cgroup_root *root = cgroup_root_from_kf(kf_root);
- struct cgroup_sb_opts opts;
+ int ret = 0;
u16 added_mask, removed_mask;
cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
/* See what subsystems are wanted */
- ret = parse_cgroupfs_options(data, &opts);
+ ret = check_cgroupfs_options(fc);
if (ret)
goto out_unlock;
- if (opts.subsys_mask != root->subsys_mask || opts.release_agent)
+ if (ctx->subsys_mask != root->subsys_mask || ctx->release_agent)
pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
task_tgid_nr(current), current->comm);
- added_mask = opts.subsys_mask & ~root->subsys_mask;
- removed_mask = root->subsys_mask & ~opts.subsys_mask;
+ added_mask = ctx->subsys_mask & ~root->subsys_mask;
+ removed_mask = root->subsys_mask & ~ctx->subsys_mask;
/* Don't allow flags or name to change at remount */
- if ((opts.flags ^ root->flags) ||
- (opts.name && strcmp(opts.name, root->name))) {
- pr_err("option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"\n",
- opts.flags, opts.name ?: "", root->flags, root->name);
+ if ((ctx->flags ^ root->flags) ||
+ (ctx->name && strcmp(ctx->name, root->name))) {
+ cg_invalf(fc, "option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"",
+ ctx->flags, ctx->name ?: "", root->flags, root->name);
ret = -EINVAL;
goto out_unlock;
}
@@ -1088,17 +1114,15 @@ static int cgroup1_remount(struct kernfs_root *kf_root, int *flags, char *data)
WARN_ON(rebind_subsystems(&cgrp_dfl_root, removed_mask));
- if (opts.release_agent) {
+ if (ctx->release_agent) {
spin_lock(&release_agent_path_lock);
- strcpy(root->release_agent_path, opts.release_agent);
+ strcpy(root->release_agent_path, ctx->release_agent);
spin_unlock(&release_agent_path_lock);
}
trace_cgroup_remount(root);
out_unlock:
- kfree(opts.release_agent);
- kfree(opts.name);
mutex_unlock(&cgroup_mutex);
return ret;
}
@@ -1106,28 +1130,30 @@ static int cgroup1_remount(struct kernfs_root *kf_root, int *flags, char *data)
struct kernfs_syscall_ops cgroup1_kf_syscall_ops = {
.rename = cgroup1_rename,
.show_options = cgroup1_show_options,
- .remount_fs = cgroup1_remount,
.mkdir = cgroup_mkdir,
.rmdir = cgroup_rmdir,
.show_path = cgroup_show_path,
};
-struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags,
- void *data, unsigned long magic,
- struct cgroup_namespace *ns)
+/*
+ * The guts of cgroup1 mount - find or create cgroup_root to use.
+ * Called with cgroup_mutex held; returns 0 on success, -E... on
+ * error and positive - in case when the candidate is busy dying.
+ * On success it stashes a reference to cgroup_root into given
+ * cgroup_fs_context; that reference is *NOT* counting towards the
+ * cgroup_root refcount.
+ */
+static int cgroup1_root_to_use(struct fs_context *fc)
{
- struct cgroup_sb_opts opts;
+ struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
struct cgroup_root *root;
struct cgroup_subsys *ss;
- struct dentry *dentry;
int i, ret;
- cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
-
/* First find the desired set of subsystems */
- ret = parse_cgroupfs_options(data, &opts);
+ ret = check_cgroupfs_options(fc);
if (ret)
- goto out_unlock;
+ return ret;
/*
* Destruction of cgroup root is asynchronous, so subsystems may
@@ -1137,16 +1163,12 @@ struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags,
* starting. Testing ref liveliness is good enough.
*/
for_each_subsys(ss, i) {
- if (!(opts.subsys_mask & (1 << i)) ||
+ if (!(ctx->subsys_mask & (1 << i)) ||
ss->root == &cgrp_dfl_root)
continue;
- if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) {
- mutex_unlock(&cgroup_mutex);
- msleep(10);
- ret = restart_syscall();
- goto out_free;
- }
+ if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt))
+ return 1; /* restart */
cgroup_put(&ss->root->cgrp);
}
@@ -1161,8 +1183,8 @@ struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags,
* name matches but sybsys_mask doesn't, we should fail.
* Remember whether name matched.
*/
- if (opts.name) {
- if (strcmp(opts.name, root->name))
+ if (ctx->name) {
+ if (strcmp(ctx->name, root->name))
continue;
name_match = true;
}
@@ -1171,19 +1193,18 @@ struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags,
* If we asked for subsystems (or explicitly for no
* subsystems) then they must match.
*/
- if ((opts.subsys_mask || opts.none) &&
- (opts.subsys_mask != root->subsys_mask)) {
+ if ((ctx->subsys_mask || ctx->none) &&
+ (ctx->subsys_mask != root->subsys_mask)) {
if (!name_match)
continue;
- ret = -EBUSY;
- goto out_unlock;
+ return -EBUSY;
}
- if (root->flags ^ opts.flags)
+ if (root->flags ^ ctx->flags)
pr_warn("new mount options do not match the existing superblock, will be ignored\n");
- ret = 0;
- goto out_unlock;
+ ctx->root = root;
+ return 0;
}
/*
@@ -1191,55 +1212,58 @@ struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags,
* specification is allowed for already existing hierarchies but we
* can't create new one without subsys specification.
*/
- if (!opts.subsys_mask && !opts.none) {
- ret = -EINVAL;
- goto out_unlock;
- }
+ if (!ctx->subsys_mask && !ctx->none)
+ return cg_invalf(fc, "cgroup1: No subsys list or none specified");
/* Hierarchies may only be created in the initial cgroup namespace. */
- if (ns != &init_cgroup_ns) {
- ret = -EPERM;
- goto out_unlock;
- }
+ if (ctx->ns != &init_cgroup_ns)
+ return -EPERM;
root = kzalloc(sizeof(*root), GFP_KERNEL);
- if (!root) {
- ret = -ENOMEM;
- goto out_unlock;
- }
+ if (!root)
+ return -ENOMEM;
- init_cgroup_root(root, &opts);
+ ctx->root = root;
+ init_cgroup_root(ctx);
- ret = cgroup_setup_root(root, opts.subsys_mask);
+ ret = cgroup_setup_root(root, ctx->subsys_mask);
if (ret)
cgroup_free_root(root);
+ return ret;
+}
-out_unlock:
- if (!ret && !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
- mutex_unlock(&cgroup_mutex);
- msleep(10);
- ret = restart_syscall();
- goto out_free;
- }
- mutex_unlock(&cgroup_mutex);
-out_free:
- kfree(opts.release_agent);
- kfree(opts.name);
+int cgroup1_get_tree(struct fs_context *fc)
+{
+ struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
+ int ret;
- if (ret)
- return ERR_PTR(ret);
+ /* Check if the caller has permission to mount. */
+ if (!ns_capable(ctx->ns->user_ns, CAP_SYS_ADMIN))
+ return -EPERM;
+
+ cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
+
+ ret = cgroup1_root_to_use(fc);
+ if (!ret && !percpu_ref_tryget_live(&ctx->root->cgrp.self.refcnt))
+ ret = 1; /* restart */
- dentry = cgroup_do_mount(&cgroup_fs_type, flags, root,
- CGROUP_SUPER_MAGIC, ns);
+ mutex_unlock(&cgroup_mutex);
- if (!IS_ERR(dentry) && percpu_ref_is_dying(&root->cgrp.self.refcnt)) {
- struct super_block *sb = dentry->d_sb;
- dput(dentry);
+ if (!ret)
+ ret = cgroup_do_get_tree(fc);
+
+ if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) {
+ struct super_block *sb = fc->root->d_sb;
+ dput(fc->root);
deactivate_locked_super(sb);
+ ret = 1;
+ }
+
+ if (unlikely(ret > 0)) {
msleep(10);
- dentry = ERR_PTR(restart_syscall());
+ return restart_syscall();
}
- return dentry;
+ return ret;
}
static int __init cgroup1_wq_init(void)
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index eef24a25bda7..3f2b4bde0f9c 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -54,6 +54,7 @@
#include <linux/proc_ns.h>
#include <linux/nsproxy.h>
#include <linux/file.h>
+#include <linux/fs_parser.h>
#include <linux/sched/cputime.h>
#include <linux/psi.h>
#include <net/sock.h>
@@ -1772,26 +1773,37 @@ int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
return len;
}
-static int parse_cgroup_root_flags(char *data, unsigned int *root_flags)
-{
- char *token;
+enum cgroup2_param {
+ Opt_nsdelegate,
+ nr__cgroup2_params
+};
- *root_flags = 0;
+static const struct fs_parameter_spec cgroup2_param_specs[] = {
+ fsparam_flag ("nsdelegate", Opt_nsdelegate),
+ {}
+};
- if (!data || *data == '\0')
- return 0;
+static const struct fs_parameter_description cgroup2_fs_parameters = {
+ .name = "cgroup2",
+ .specs = cgroup2_param_specs,
+};
- while ((token = strsep(&data, ",")) != NULL) {
- if (!strcmp(token, "nsdelegate")) {
- *root_flags |= CGRP_ROOT_NS_DELEGATE;
- continue;
- }
+static int cgroup2_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
+ struct fs_parse_result result;
+ int opt;
- pr_err("cgroup2: unknown option \"%s\"\n", token);
- return -EINVAL;
- }
+ opt = fs_parse(fc, &cgroup2_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
- return 0;
+ switch (opt) {
+ case Opt_nsdelegate:
+ ctx->flags |= CGRP_ROOT_NS_DELEGATE;
+ return 0;
+ }
+ return -EINVAL;
}
static void apply_cgroup_root_flags(unsigned int root_flags)
@@ -1811,16 +1823,11 @@ static int cgroup_show_options(struct seq_file *seq, struct kernfs_root *kf_root
return 0;
}
-static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data)
+static int cgroup_reconfigure(struct fs_context *fc)
{
- unsigned int root_flags;
- int ret;
-
- ret = parse_cgroup_root_flags(data, &root_flags);
- if (ret)
- return ret;
+ struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
- apply_cgroup_root_flags(root_flags);
+ apply_cgroup_root_flags(ctx->flags);
return 0;
}
@@ -1908,8 +1915,9 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp)
INIT_WORK(&cgrp->release_agent_work, cgroup1_release_agent);
}
-void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts)
+void init_cgroup_root(struct cgroup_fs_context *ctx)
{
+ struct cgroup_root *root = ctx->root;
struct cgroup *cgrp = &root->cgrp;
INIT_LIST_HEAD(&root->root_list);
@@ -1918,12 +1926,12 @@ void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts)
init_cgroup_housekeeping(cgrp);
idr_init(&root->cgroup_idr);
- root->flags = opts->flags;
- if (opts->release_agent)
- strscpy(root->release_agent_path, opts->release_agent, PATH_MAX);
- if (opts->name)
- strscpy(root->name, opts->name, MAX_CGROUP_ROOT_NAMELEN);
- if (opts->cpuset_clone_children)
+ root->flags = ctx->flags;
+ if (ctx->release_agent)
+ strscpy(root->release_agent_path, ctx->release_agent, PATH_MAX);
+ if (ctx->name)
+ strscpy(root->name, ctx->name, MAX_CGROUP_ROOT_NAMELEN);
+ if (ctx->cpuset_clone_children)
set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags);
}
@@ -2028,60 +2036,104 @@ out:
return ret;
}
-struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags,
- struct cgroup_root *root, unsigned long magic,
- struct cgroup_namespace *ns)
+int cgroup_do_get_tree(struct fs_context *fc)
{
- struct dentry *dentry;
- bool new_sb = false;
+ struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
+ int ret;
- dentry = kernfs_mount(fs_type, flags, root->kf_root, magic, &new_sb);
+ ctx->kfc.root = ctx->root->kf_root;
+ if (fc->fs_type == &cgroup2_fs_type)
+ ctx->kfc.magic = CGROUP2_SUPER_MAGIC;
+ else
+ ctx->kfc.magic = CGROUP_SUPER_MAGIC;
+ ret = kernfs_get_tree(fc);
/*
* In non-init cgroup namespace, instead of root cgroup's dentry,
* we return the dentry corresponding to the cgroupns->root_cgrp.
*/
- if (!IS_ERR(dentry) && ns != &init_cgroup_ns) {
+ if (!ret && ctx->ns != &init_cgroup_ns) {
struct dentry *nsdentry;
- struct super_block *sb = dentry->d_sb;
+ struct super_block *sb = fc->root->d_sb;
struct cgroup *cgrp;
mutex_lock(&cgroup_mutex);
spin_lock_irq(&css_set_lock);
- cgrp = cset_cgroup_from_root(ns->root_cset, root);
+ cgrp = cset_cgroup_from_root(ctx->ns->root_cset, ctx->root);
spin_unlock_irq(&css_set_lock);
mutex_unlock(&cgroup_mutex);
nsdentry = kernfs_node_dentry(cgrp->kn, sb);
- dput(dentry);
- if (IS_ERR(nsdentry))
+ dput(fc->root);
+ fc->root = nsdentry;
+ if (IS_ERR(nsdentry)) {
+ ret = PTR_ERR(nsdentry);
deactivate_locked_super(sb);
- dentry = nsdentry;
+ }
}
- if (!new_sb)
- cgroup_put(&root->cgrp);
+ if (!ctx->kfc.new_sb_created)
+ cgroup_put(&ctx->root->cgrp);
- return dentry;
+ return ret;
}
-static struct dentry *cgroup_mount(struct file_system_type *fs_type,
- int flags, const char *unused_dev_name,
- void *data)
+/*
+ * Destroy a cgroup filesystem context.
+ */
+static void cgroup_fs_context_free(struct fs_context *fc)
{
- struct cgroup_namespace *ns = current->nsproxy->cgroup_ns;
- struct dentry *dentry;
+ struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
+
+ kfree(ctx->name);
+ kfree(ctx->release_agent);
+ put_cgroup_ns(ctx->ns);
+ kernfs_free_fs_context(fc);
+ kfree(ctx);
+}
+
+static int cgroup_get_tree(struct fs_context *fc)
+{
+ struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
int ret;
- get_cgroup_ns(ns);
+ cgrp_dfl_visible = true;
+ cgroup_get_live(&cgrp_dfl_root.cgrp);
+ ctx->root = &cgrp_dfl_root;
- /* Check if the caller has permission to mount. */
- if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN)) {
- put_cgroup_ns(ns);
- return ERR_PTR(-EPERM);
- }
+ ret = cgroup_do_get_tree(fc);
+ if (!ret)
+ apply_cgroup_root_flags(ctx->flags);
+ return ret;
+}
+
+static const struct fs_context_operations cgroup_fs_context_ops = {
+ .free = cgroup_fs_context_free,
+ .parse_param = cgroup2_parse_param,
+ .get_tree = cgroup_get_tree,
+ .reconfigure = cgroup_reconfigure,
+};
+
+static const struct fs_context_operations cgroup1_fs_context_ops = {
+ .free = cgroup_fs_context_free,
+ .parse_param = cgroup1_parse_param,
+ .get_tree = cgroup1_get_tree,
+ .reconfigure = cgroup1_reconfigure,
+};
+
+/*
+ * Initialise the cgroup filesystem creation/reconfiguration context. Notably,
+ * we select the namespace we're going to use.
+ */
+static int cgroup_init_fs_context(struct fs_context *fc)
+{
+ struct cgroup_fs_context *ctx;
+
+ ctx = kzalloc(sizeof(struct cgroup_fs_context), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
/*
* The first time anyone tries to mount a cgroup, enable the list
@@ -2090,29 +2142,18 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
if (!use_task_css_set_links)
cgroup_enable_task_cg_lists();
- if (fs_type == &cgroup2_fs_type) {
- unsigned int root_flags;
-
- ret = parse_cgroup_root_flags(data, &root_flags);
- if (ret) {
- put_cgroup_ns(ns);
- return ERR_PTR(ret);
- }
-
- cgrp_dfl_visible = true;
- cgroup_get_live(&cgrp_dfl_root.cgrp);
-
- dentry = cgroup_do_mount(&cgroup2_fs_type, flags, &cgrp_dfl_root,
- CGROUP2_SUPER_MAGIC, ns);
- if (!IS_ERR(dentry))
- apply_cgroup_root_flags(root_flags);
- } else {
- dentry = cgroup1_mount(&cgroup_fs_type, flags, data,
- CGROUP_SUPER_MAGIC, ns);
- }
-
- put_cgroup_ns(ns);
- return dentry;
+ ctx->ns = current->nsproxy->cgroup_ns;
+ get_cgroup_ns(ctx->ns);
+ fc->fs_private = &ctx->kfc;
+ if (fc->fs_type == &cgroup2_fs_type)
+ fc->ops = &cgroup_fs_context_ops;
+ else
+ fc->ops = &cgroup1_fs_context_ops;
+ if (fc->user_ns)
+ put_user_ns(fc->user_ns);
+ fc->user_ns = get_user_ns(ctx->ns->user_ns);
+ fc->global = true;
+ return 0;
}
static void cgroup_kill_sb(struct super_block *sb)
@@ -2135,17 +2176,19 @@ static void cgroup_kill_sb(struct super_block *sb)
}
struct file_system_type cgroup_fs_type = {
- .name = "cgroup",
- .mount = cgroup_mount,
- .kill_sb = cgroup_kill_sb,
- .fs_flags = FS_USERNS_MOUNT,
+ .name = "cgroup",
+ .init_fs_context = cgroup_init_fs_context,
+ .parameters = &cgroup1_fs_parameters,
+ .kill_sb = cgroup_kill_sb,
+ .fs_flags = FS_USERNS_MOUNT,
};
static struct file_system_type cgroup2_fs_type = {
- .name = "cgroup2",
- .mount = cgroup_mount,
- .kill_sb = cgroup_kill_sb,
- .fs_flags = FS_USERNS_MOUNT,
+ .name = "cgroup2",
+ .init_fs_context = cgroup_init_fs_context,
+ .parameters = &cgroup2_fs_parameters,
+ .kill_sb = cgroup_kill_sb,
+ .fs_flags = FS_USERNS_MOUNT,
};
int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen,
@@ -5280,7 +5323,6 @@ int cgroup_rmdir(struct kernfs_node *kn)
static struct kernfs_syscall_ops cgroup_kf_syscall_ops = {
.show_options = cgroup_show_options,
- .remount_fs = cgroup_remount,
.mkdir = cgroup_mkdir,
.rmdir = cgroup_rmdir,
.show_path = cgroup_show_path,
@@ -5347,11 +5389,12 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
*/
int __init cgroup_init_early(void)
{
- static struct cgroup_sb_opts __initdata opts;
+ static struct cgroup_fs_context __initdata ctx;
struct cgroup_subsys *ss;
int i;
- init_cgroup_root(&cgrp_dfl_root, &opts);
+ ctx.root = &cgrp_dfl_root;
+ init_cgroup_root(&ctx);
cgrp_dfl_root.cgrp.self.flags |= CSS_NO_REF;
RCU_INIT_POINTER(init_task.cgroups, &init_css_set);
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 72afd55f70c6..6a1942ed781c 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -39,6 +39,7 @@
#include <linux/memory.h>
#include <linux/export.h>
#include <linux/mount.h>
+#include <linux/fs_context.h>
#include <linux/namei.h>
#include <linux/pagemap.h>
#include <linux/proc_fs.h>
@@ -359,25 +360,52 @@ static inline bool is_in_v2_mode(void)
* users. If someone tries to mount the "cpuset" filesystem, we
* silently switch it to mount "cgroup" instead
*/
-static struct dentry *cpuset_mount(struct file_system_type *fs_type,
- int flags, const char *unused_dev_name, void *data)
-{
- struct file_system_type *cgroup_fs = get_fs_type("cgroup");
- struct dentry *ret = ERR_PTR(-ENODEV);
- if (cgroup_fs) {
- char mountopts[] =
- "cpuset,noprefix,"
- "release_agent=/sbin/cpuset_release_agent";
- ret = cgroup_fs->mount(cgroup_fs, flags,
- unused_dev_name, mountopts);
- put_filesystem(cgroup_fs);
+static int cpuset_get_tree(struct fs_context *fc)
+{
+ struct file_system_type *cgroup_fs;
+ struct fs_context *new_fc;
+ int ret;
+
+ cgroup_fs = get_fs_type("cgroup");
+ if (!cgroup_fs)
+ return -ENODEV;
+
+ new_fc = fs_context_for_mount(cgroup_fs, fc->sb_flags);
+ if (IS_ERR(new_fc)) {
+ ret = PTR_ERR(new_fc);
+ } else {
+ static const char agent_path[] = "/sbin/cpuset_release_agent";
+ ret = vfs_parse_fs_string(new_fc, "cpuset", NULL, 0);
+ if (!ret)
+ ret = vfs_parse_fs_string(new_fc, "noprefix", NULL, 0);
+ if (!ret)
+ ret = vfs_parse_fs_string(new_fc, "release_agent",
+ agent_path, sizeof(agent_path) - 1);
+ if (!ret)
+ ret = vfs_get_tree(new_fc);
+ if (!ret) { /* steal the result */
+ fc->root = new_fc->root;
+ new_fc->root = NULL;
+ }
+ put_fs_context(new_fc);
}
+ put_filesystem(cgroup_fs);
return ret;
}
+static const struct fs_context_operations cpuset_fs_context_ops = {
+ .get_tree = cpuset_get_tree,
+};
+
+static int cpuset_init_fs_context(struct fs_context *fc)
+{
+ fc->ops = &cpuset_fs_context_ops;
+ return 0;
+}
+
static struct file_system_type cpuset_fs_type = {
- .name = "cpuset",
- .mount = cpuset_mount,
+ .name = "cpuset",
+ .init_fs_context = cpuset_init_fs_context,
};
/*
@@ -712,11 +740,10 @@ static inline int nr_cpusets(void)
* Must be called with cpuset_mutex held.
*
* The three key local variables below are:
- * q - a linked-list queue of cpuset pointers, used to implement a
- * top-down scan of all cpusets. This scan loads a pointer
- * to each cpuset marked is_sched_load_balance into the
- * array 'csa'. For our purposes, rebuilding the schedulers
- * sched domains, we can ignore !is_sched_load_balance cpusets.
+ * cp - cpuset pointer, used (together with pos_css) to perform a
+ * top-down scan of all cpusets. For our purposes, rebuilding
+ * the schedulers sched domains, we can ignore !is_sched_load_
+ * balance cpusets.
* csa - (for CpuSet Array) Array of pointers to all the cpusets
* that need to be load balanced, for convenient iterative
* access by the subsequent code that finds the best partition,
@@ -747,7 +774,7 @@ static inline int nr_cpusets(void)
static int generate_sched_domains(cpumask_var_t **domains,
struct sched_domain_attr **attributes)
{
- struct cpuset *cp; /* scans q */
+ struct cpuset *cp; /* top-down scan of cpusets */
struct cpuset **csa; /* array of all cpuset ptrs */
int csn; /* how many cpuset ptrs in csa so far */
int i, j, k; /* indices for partition finding loops */
diff --git a/kernel/cgroup/rdma.c b/kernel/cgroup/rdma.c
index d3bbb757ee49..1d75ae7f1cb7 100644
--- a/kernel/cgroup/rdma.c
+++ b/kernel/cgroup/rdma.c
@@ -313,10 +313,8 @@ EXPORT_SYMBOL(rdmacg_try_charge);
* If IB stack wish a device to participate in rdma cgroup resource
* tracking, it must invoke this API to register with rdma cgroup before
* any user space application can start using the RDMA resources.
- * Returns 0 on success or EINVAL when table length given is beyond
- * supported size.
*/
-int rdmacg_register_device(struct rdmacg_device *device)
+void rdmacg_register_device(struct rdmacg_device *device)
{
INIT_LIST_HEAD(&device->dev_node);
INIT_LIST_HEAD(&device->rpools);
@@ -324,7 +322,6 @@ int rdmacg_register_device(struct rdmacg_device *device)
mutex_lock(&rdmacg_mutex);
list_add_tail(&device->dev_node, &rdmacg_devices);
mutex_unlock(&rdmacg_mutex);
- return 0;
}
EXPORT_SYMBOL(rdmacg_register_device);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 025f419d16f6..f2ef10460698 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -9,6 +9,7 @@
#include <linux/notifier.h>
#include <linux/sched/signal.h>
#include <linux/sched/hotplug.h>
+#include <linux/sched/isolation.h>
#include <linux/sched/task.h>
#include <linux/sched/smt.h>
#include <linux/unistd.h>
@@ -564,6 +565,20 @@ static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
}
+static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
+{
+ if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
+ return true;
+ /*
+ * When CPU hotplug is disabled, then taking the CPU down is not
+ * possible because takedown_cpu() and the architecture and
+ * subsystem specific mechanisms are not available. So the CPU
+ * which would be completely unplugged again needs to stay around
+ * in the current state.
+ */
+ return st->state <= CPUHP_BRINGUP_CPU;
+}
+
static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
enum cpuhp_state target)
{
@@ -574,8 +589,10 @@ static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
st->state++;
ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
if (ret) {
- st->target = prev_state;
- undo_cpu_up(cpu, st);
+ if (can_rollback_cpu(st)) {
+ st->target = prev_state;
+ undo_cpu_up(cpu, st);
+ }
break;
}
}
@@ -844,6 +861,8 @@ static int take_cpu_down(void *_param)
/* Give up timekeeping duties */
tick_handover_do_timer();
+ /* Remove CPU from timer broadcasting */
+ tick_offline_cpu(cpu);
/* Park the stopper thread */
stop_machine_park(cpu);
return 0;
@@ -1183,8 +1202,15 @@ int freeze_secondary_cpus(int primary)
int cpu, error = 0;
cpu_maps_update_begin();
- if (!cpu_online(primary))
+ if (primary == -1) {
primary = cpumask_first(cpu_online_mask);
+ if (!housekeeping_cpu(primary, HK_FLAG_TIMER))
+ primary = housekeeping_any_cpu(HK_FLAG_TIMER);
+ } else {
+ if (!cpu_online(primary))
+ primary = cpumask_first(cpu_online_mask);
+ }
+
/*
* We take down all of the non-boot CPUs in one shot to avoid races
* with the userspace trying to use the CPU hotplug at the same time
@@ -2017,19 +2043,6 @@ static const struct attribute_group cpuhp_cpu_root_attr_group = {
#ifdef CONFIG_HOTPLUG_SMT
-static const char *smt_states[] = {
- [CPU_SMT_ENABLED] = "on",
- [CPU_SMT_DISABLED] = "off",
- [CPU_SMT_FORCE_DISABLED] = "forceoff",
- [CPU_SMT_NOT_SUPPORTED] = "notsupported",
-};
-
-static ssize_t
-show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
-{
- return snprintf(buf, PAGE_SIZE - 2, "%s\n", smt_states[cpu_smt_control]);
-}
-
static void cpuhp_offline_cpu_device(unsigned int cpu)
{
struct device *dev = get_cpu_device(cpu);
@@ -2100,9 +2113,10 @@ static int cpuhp_smt_enable(void)
return ret;
}
+
static ssize_t
-store_smt_control(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+__store_smt_control(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
int ctrlval, ret;
@@ -2140,14 +2154,44 @@ store_smt_control(struct device *dev, struct device_attribute *attr,
unlock_device_hotplug();
return ret ? ret : count;
}
+
+#else /* !CONFIG_HOTPLUG_SMT */
+static ssize_t
+__store_smt_control(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_HOTPLUG_SMT */
+
+static const char *smt_states[] = {
+ [CPU_SMT_ENABLED] = "on",
+ [CPU_SMT_DISABLED] = "off",
+ [CPU_SMT_FORCE_DISABLED] = "forceoff",
+ [CPU_SMT_NOT_SUPPORTED] = "notsupported",
+ [CPU_SMT_NOT_IMPLEMENTED] = "notimplemented",
+};
+
+static ssize_t
+show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ const char *state = smt_states[cpu_smt_control];
+
+ return snprintf(buf, PAGE_SIZE - 2, "%s\n", state);
+}
+
+static ssize_t
+store_smt_control(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return __store_smt_control(dev, attr, buf, count);
+}
static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
static ssize_t
show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
{
- bool active = topology_max_smt_threads() > 1;
-
- return snprintf(buf, PAGE_SIZE - 2, "%d\n", active);
+ return snprintf(buf, PAGE_SIZE - 2, "%d\n", sched_smt_active());
}
static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
@@ -2163,21 +2207,17 @@ static const struct attribute_group cpuhp_smt_attr_group = {
NULL
};
-static int __init cpu_smt_state_init(void)
+static int __init cpu_smt_sysfs_init(void)
{
return sysfs_create_group(&cpu_subsys.dev_root->kobj,
&cpuhp_smt_attr_group);
}
-#else
-static inline int cpu_smt_state_init(void) { return 0; }
-#endif
-
static int __init cpuhp_sysfs_init(void)
{
int cpu, ret;
- ret = cpu_smt_state_init();
+ ret = cpu_smt_sysfs_init();
if (ret)
return ret;
@@ -2198,7 +2238,7 @@ static int __init cpuhp_sysfs_init(void)
return 0;
}
device_initcall(cpuhp_sysfs_init);
-#endif
+#endif /* CONFIG_SYSFS && CONFIG_HOTPLUG_CPU */
/*
* cpu_bit_bitmap[] is a special, "compressed" data structure that
@@ -2288,3 +2328,18 @@ void __init boot_cpu_hotplug_init(void)
#endif
this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
}
+
+enum cpu_mitigations cpu_mitigations __ro_after_init = CPU_MITIGATIONS_AUTO;
+
+static int __init mitigations_parse_cmdline(char *arg)
+{
+ if (!strcmp(arg, "off"))
+ cpu_mitigations = CPU_MITIGATIONS_OFF;
+ else if (!strcmp(arg, "auto"))
+ cpu_mitigations = CPU_MITIGATIONS_AUTO;
+ else if (!strcmp(arg, "auto,nosmt"))
+ cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
+
+ return 0;
+}
+early_param("mitigations", mitigations_parse_cmdline);
diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
index 0711d18645de..a06ba3013b3b 100644
--- a/kernel/dma/Kconfig
+++ b/kernel/dma/Kconfig
@@ -19,7 +19,13 @@ config ARCH_HAS_DMA_COHERENCE_H
config ARCH_HAS_DMA_SET_MASK
bool
-config HAVE_GENERIC_DMA_COHERENT
+config DMA_DECLARE_COHERENT
+ bool
+
+config ARCH_HAS_SETUP_DMA_OPS
+ bool
+
+config ARCH_HAS_TEARDOWN_DMA_OPS
bool
config ARCH_HAS_SYNC_DMA_FOR_DEVICE
@@ -56,3 +62,116 @@ config DMA_REMAP
config DMA_DIRECT_REMAP
bool
select DMA_REMAP
+
+config DMA_CMA
+ bool "DMA Contiguous Memory Allocator"
+ depends on HAVE_DMA_CONTIGUOUS && CMA
+ help
+ This enables the Contiguous Memory Allocator which allows drivers
+ to allocate big physically-contiguous blocks of memory for use with
+ hardware components that do not support I/O map nor scatter-gather.
+
+ You can disable CMA by specifying "cma=0" on the kernel's command
+ line.
+
+ For more information see <include/linux/dma-contiguous.h>.
+ If unsure, say "n".
+
+if DMA_CMA
+comment "Default contiguous memory area size:"
+
+config CMA_SIZE_MBYTES
+ int "Size in Mega Bytes"
+ depends on !CMA_SIZE_SEL_PERCENTAGE
+ default 0 if X86
+ default 16
+ help
+ Defines the size (in MiB) of the default memory area for Contiguous
+ Memory Allocator. If the size of 0 is selected, CMA is disabled by
+ default, but it can be enabled by passing cma=size[MG] to the kernel.
+
+
+config CMA_SIZE_PERCENTAGE
+ int "Percentage of total memory"
+ depends on !CMA_SIZE_SEL_MBYTES
+ default 0 if X86
+ default 10
+ help
+ Defines the size of the default memory area for Contiguous Memory
+ Allocator as a percentage of the total memory in the system.
+ If 0 percent is selected, CMA is disabled by default, but it can be
+ enabled by passing cma=size[MG] to the kernel.
+
+choice
+ prompt "Selected region size"
+ default CMA_SIZE_SEL_MBYTES
+
+config CMA_SIZE_SEL_MBYTES
+ bool "Use mega bytes value only"
+
+config CMA_SIZE_SEL_PERCENTAGE
+ bool "Use percentage value only"
+
+config CMA_SIZE_SEL_MIN
+ bool "Use lower value (minimum)"
+
+config CMA_SIZE_SEL_MAX
+ bool "Use higher value (maximum)"
+
+endchoice
+
+config CMA_ALIGNMENT
+ int "Maximum PAGE_SIZE order of alignment for contiguous buffers"
+ range 4 12
+ default 8
+ help
+ DMA mapping framework by default aligns all buffers to the smallest
+ PAGE_SIZE order which is greater than or equal to the requested buffer
+ size. This works well for buffers up to a few hundreds kilobytes, but
+ for larger buffers it just a memory waste. With this parameter you can
+ specify the maximum PAGE_SIZE order for contiguous buffers. Larger
+ buffers will be aligned only to this specified order. The order is
+ expressed as a power of two multiplied by the PAGE_SIZE.
+
+ For example, if your system defaults to 4KiB pages, the order value
+ of 8 means that the buffers will be aligned up to 1MiB only.
+
+ If unsure, leave the default value "8".
+
+endif
+
+config DMA_API_DEBUG
+ bool "Enable debugging of DMA-API usage"
+ select NEED_DMA_MAP_STATE
+ help
+ Enable this option to debug the use of the DMA API by device drivers.
+ With this option you will be able to detect common bugs in device
+ drivers like double-freeing of DMA mappings or freeing mappings that
+ were never allocated.
+
+ This also attempts to catch cases where a page owned by DMA is
+ accessed by the cpu in a way that could cause data corruption. For
+ example, this enables cow_user_page() to check that the source page is
+ not undergoing DMA.
+
+ This option causes a performance degradation. Use only if you want to
+ debug device drivers and dma interactions.
+
+ If unsure, say N.
+
+config DMA_API_DEBUG_SG
+ bool "Debug DMA scatter-gather usage"
+ default y
+ depends on DMA_API_DEBUG
+ help
+ Perform extra checking that callers of dma_map_sg() have respected the
+ appropriate segment length/boundary limits for the given device when
+ preparing DMA scatterlists.
+
+ This is particularly likely to have been overlooked in cases where the
+ dma_map_sg() API is used for general bulk mapping of pages rather than
+ preparing literal scatter-gather descriptors, where there is a risk of
+ unexpected behaviour from DMA API implementations if the scatterlist
+ is technically out-of-spec.
+
+ If unsure, say N.
diff --git a/kernel/dma/Makefile b/kernel/dma/Makefile
index 72ff6e46aa86..d237cf3dc181 100644
--- a/kernel/dma/Makefile
+++ b/kernel/dma/Makefile
@@ -2,7 +2,7 @@
obj-$(CONFIG_HAS_DMA) += mapping.o direct.o dummy.o
obj-$(CONFIG_DMA_CMA) += contiguous.o
-obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += coherent.o
+obj-$(CONFIG_DMA_DECLARE_COHERENT) += coherent.o
obj-$(CONFIG_DMA_VIRT_OPS) += virt.o
obj-$(CONFIG_DMA_API_DEBUG) += debug.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o
diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c
index 66f0fb7e9a3a..29fd6590dc1e 100644
--- a/kernel/dma/coherent.c
+++ b/kernel/dma/coherent.c
@@ -14,7 +14,6 @@ struct dma_coherent_mem {
dma_addr_t device_base;
unsigned long pfn_base;
int size;
- int flags;
unsigned long *bitmap;
spinlock_t spinlock;
bool use_dev_dma_pfn_offset;
@@ -38,12 +37,12 @@ static inline dma_addr_t dma_get_device_base(struct device *dev,
return mem->device_base;
}
-static int dma_init_coherent_memory(
- phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
- struct dma_coherent_mem **mem)
+static int dma_init_coherent_memory(phys_addr_t phys_addr,
+ dma_addr_t device_addr, size_t size,
+ struct dma_coherent_mem **mem)
{
struct dma_coherent_mem *dma_mem = NULL;
- void __iomem *mem_base = NULL;
+ void *mem_base = NULL;
int pages = size >> PAGE_SHIFT;
int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
int ret;
@@ -73,7 +72,6 @@ static int dma_init_coherent_memory(
dma_mem->device_base = device_addr;
dma_mem->pfn_base = PFN_DOWN(phys_addr);
dma_mem->size = pages;
- dma_mem->flags = flags;
spin_lock_init(&dma_mem->spinlock);
*mem = dma_mem;
@@ -110,12 +108,12 @@ static int dma_assign_coherent_memory(struct device *dev,
}
int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
- dma_addr_t device_addr, size_t size, int flags)
+ dma_addr_t device_addr, size_t size)
{
struct dma_coherent_mem *mem;
int ret;
- ret = dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem);
+ ret = dma_init_coherent_memory(phys_addr, device_addr, size, &mem);
if (ret)
return ret;
@@ -137,29 +135,6 @@ void dma_release_declared_memory(struct device *dev)
}
EXPORT_SYMBOL(dma_release_declared_memory);
-void *dma_mark_declared_memory_occupied(struct device *dev,
- dma_addr_t device_addr, size_t size)
-{
- struct dma_coherent_mem *mem = dev->dma_mem;
- unsigned long flags;
- int pos, err;
-
- size += device_addr & ~PAGE_MASK;
-
- if (!mem)
- return ERR_PTR(-EINVAL);
-
- spin_lock_irqsave(&mem->spinlock, flags);
- pos = PFN_DOWN(device_addr - dma_get_device_base(dev, mem));
- err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
- spin_unlock_irqrestore(&mem->spinlock, flags);
-
- if (err != 0)
- return ERR_PTR(err);
- return mem->virt_base + (pos << PAGE_SHIFT);
-}
-EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
-
static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
ssize_t size, dma_addr_t *dma_handle)
{
@@ -213,15 +188,7 @@ int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
return 0;
*ret = __dma_alloc_from_coherent(mem, size, dma_handle);
- if (*ret)
- return 1;
-
- /*
- * In the case where the allocation can not be satisfied from the
- * per-device area, try to fall back to generic memory if the
- * constraints allow it.
- */
- return mem->flags & DMA_MEMORY_EXCLUSIVE;
+ return 1;
}
void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
@@ -350,8 +317,7 @@ static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
if (!mem) {
ret = dma_init_coherent_memory(rmem->base, rmem->base,
- rmem->size,
- DMA_MEMORY_EXCLUSIVE, &mem);
+ rmem->size, &mem);
if (ret) {
pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
&rmem->base, (unsigned long)rmem->size / SZ_1M);
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index 23cf5361bcf1..badd77670d00 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -89,8 +89,8 @@ struct dma_debug_entry {
int sg_mapped_ents;
enum map_err_types map_err_type;
#ifdef CONFIG_STACKTRACE
- struct stack_trace stacktrace;
- unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
+ unsigned int stack_len;
+ unsigned long stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
#endif
};
@@ -134,17 +134,6 @@ static u32 nr_total_entries;
/* number of preallocated entries requested by kernel cmdline */
static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
-/* debugfs dentry's for the stuff above */
-static struct dentry *dma_debug_dent __read_mostly;
-static struct dentry *global_disable_dent __read_mostly;
-static struct dentry *error_count_dent __read_mostly;
-static struct dentry *show_all_errors_dent __read_mostly;
-static struct dentry *show_num_errors_dent __read_mostly;
-static struct dentry *num_free_entries_dent __read_mostly;
-static struct dentry *min_free_entries_dent __read_mostly;
-static struct dentry *nr_total_entries_dent __read_mostly;
-static struct dentry *filter_dent __read_mostly;
-
/* per-driver filter related state */
#define NAME_MAX_LEN 64
@@ -185,7 +174,7 @@ static inline void dump_entry_trace(struct dma_debug_entry *entry)
#ifdef CONFIG_STACKTRACE
if (entry) {
pr_warning("Mapped at:\n");
- print_stack_trace(&entry->stacktrace, 0);
+ stack_trace_print(entry->stack_entries, entry->stack_len, 0);
}
#endif
}
@@ -715,12 +704,10 @@ static struct dma_debug_entry *dma_entry_alloc(void)
spin_unlock_irqrestore(&free_entries_lock, flags);
#ifdef CONFIG_STACKTRACE
- entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
- entry->stacktrace.entries = entry->st_entries;
- entry->stacktrace.skip = 2;
- save_stack_trace(&entry->stacktrace);
+ entry->stack_len = stack_trace_save(entry->stack_entries,
+ ARRAY_SIZE(entry->stack_entries),
+ 1);
#endif
-
return entry;
}
@@ -840,66 +827,46 @@ static const struct file_operations filter_fops = {
.llseek = default_llseek,
};
-static int dma_debug_fs_init(void)
+static int dump_show(struct seq_file *seq, void *v)
{
- dma_debug_dent = debugfs_create_dir("dma-api", NULL);
- if (!dma_debug_dent) {
- pr_err("can not create debugfs directory\n");
- return -ENOMEM;
- }
+ int idx;
- global_disable_dent = debugfs_create_bool("disabled", 0444,
- dma_debug_dent,
- &global_disable);
- if (!global_disable_dent)
- goto out_err;
-
- error_count_dent = debugfs_create_u32("error_count", 0444,
- dma_debug_dent, &error_count);
- if (!error_count_dent)
- goto out_err;
-
- show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
- dma_debug_dent,
- &show_all_errors);
- if (!show_all_errors_dent)
- goto out_err;
-
- show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
- dma_debug_dent,
- &show_num_errors);
- if (!show_num_errors_dent)
- goto out_err;
-
- num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
- dma_debug_dent,
- &num_free_entries);
- if (!num_free_entries_dent)
- goto out_err;
-
- min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
- dma_debug_dent,
- &min_free_entries);
- if (!min_free_entries_dent)
- goto out_err;
-
- nr_total_entries_dent = debugfs_create_u32("nr_total_entries", 0444,
- dma_debug_dent,
- &nr_total_entries);
- if (!nr_total_entries_dent)
- goto out_err;
-
- filter_dent = debugfs_create_file("driver_filter", 0644,
- dma_debug_dent, NULL, &filter_fops);
- if (!filter_dent)
- goto out_err;
+ for (idx = 0; idx < HASH_SIZE; idx++) {
+ struct hash_bucket *bucket = &dma_entry_hash[idx];
+ struct dma_debug_entry *entry;
+ unsigned long flags;
+ spin_lock_irqsave(&bucket->lock, flags);
+ list_for_each_entry(entry, &bucket->list, list) {
+ seq_printf(seq,
+ "%s %s %s idx %d P=%llx N=%lx D=%llx L=%llx %s %s\n",
+ dev_name(entry->dev),
+ dev_driver_string(entry->dev),
+ type2name[entry->type], idx,
+ phys_addr(entry), entry->pfn,
+ entry->dev_addr, entry->size,
+ dir2name[entry->direction],
+ maperr2str[entry->map_err_type]);
+ }
+ spin_unlock_irqrestore(&bucket->lock, flags);
+ }
return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(dump);
-out_err:
- debugfs_remove_recursive(dma_debug_dent);
-
- return -ENOMEM;
+static void dma_debug_fs_init(void)
+{
+ struct dentry *dentry = debugfs_create_dir("dma-api", NULL);
+
+ debugfs_create_bool("disabled", 0444, dentry, &global_disable);
+ debugfs_create_u32("error_count", 0444, dentry, &error_count);
+ debugfs_create_u32("all_errors", 0644, dentry, &show_all_errors);
+ debugfs_create_u32("num_errors", 0644, dentry, &show_num_errors);
+ debugfs_create_u32("num_free_entries", 0444, dentry, &num_free_entries);
+ debugfs_create_u32("min_free_entries", 0444, dentry, &min_free_entries);
+ debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries);
+ debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops);
+ debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops);
}
static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
@@ -985,12 +952,7 @@ static int dma_debug_init(void)
spin_lock_init(&dma_entry_hash[i].lock);
}
- if (dma_debug_fs_init() != 0) {
- pr_err("error creating debugfs entries - disabling\n");
- global_disable = true;
-
- return 0;
- }
+ dma_debug_fs_init();
nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES);
for (i = 0; i < nr_pages; ++i)
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index d5bb51cf27c6..fcdb23e8d2fc 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -355,6 +355,20 @@ out_unmap:
}
EXPORT_SYMBOL(dma_direct_map_sg);
+dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ dma_addr_t dma_addr = paddr;
+
+ if (unlikely(!dma_direct_possible(dev, dma_addr, size))) {
+ report_addr(dev, dma_addr, size);
+ return DMA_MAPPING_ERROR;
+ }
+
+ return dma_addr;
+}
+EXPORT_SYMBOL(dma_direct_map_resource);
+
/*
* Because 32-bit DMA masks are so common we expect every architecture to be
* able to satisfy them - either by not supporting more physical memory, or by
@@ -379,3 +393,14 @@ int dma_direct_supported(struct device *dev, u64 mask)
*/
return mask >= __phys_to_dma(dev, min_mask);
}
+
+size_t dma_direct_max_mapping_size(struct device *dev)
+{
+ size_t size = SIZE_MAX;
+
+ /* If SWIOTLB is active, use its maximum mapping size */
+ if (is_swiotlb_active())
+ size = swiotlb_max_mapping_size(dev);
+
+ return size;
+}
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index ef2aba503467..c000906348c9 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -360,3 +360,17 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
ops->cache_sync(dev, vaddr, size, dir);
}
EXPORT_SYMBOL(dma_cache_sync);
+
+size_t dma_max_mapping_size(struct device *dev)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+ size_t size = SIZE_MAX;
+
+ if (dma_is_direct(ops))
+ size = dma_direct_max_mapping_size(dev);
+ else if (ops && ops->max_mapping_size)
+ size = ops->max_mapping_size(dev);
+
+ return size;
+}
+EXPORT_SYMBOL_GPL(dma_max_mapping_size);
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index a9ad02d4d84e..53012db1e53c 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -199,6 +199,7 @@ void __init swiotlb_update_mem_attributes(void)
int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
{
unsigned long i, bytes;
+ size_t alloc_size;
bytes = nslabs << IO_TLB_SHIFT;
@@ -211,12 +212,18 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
* to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
* between io_tlb_start and io_tlb_end.
*/
- io_tlb_list = memblock_alloc(
- PAGE_ALIGN(io_tlb_nslabs * sizeof(int)),
- PAGE_SIZE);
- io_tlb_orig_addr = memblock_alloc(
- PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)),
- PAGE_SIZE);
+ alloc_size = PAGE_ALIGN(io_tlb_nslabs * sizeof(int));
+ io_tlb_list = memblock_alloc(alloc_size, PAGE_SIZE);
+ if (!io_tlb_list)
+ panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
+ __func__, alloc_size, PAGE_SIZE);
+
+ alloc_size = PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t));
+ io_tlb_orig_addr = memblock_alloc(alloc_size, PAGE_SIZE);
+ if (!io_tlb_orig_addr)
+ panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
+ __func__, alloc_size, PAGE_SIZE);
+
for (i = 0; i < io_tlb_nslabs; i++) {
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
@@ -249,7 +256,7 @@ swiotlb_init(int verbose)
bytes = io_tlb_nslabs << IO_TLB_SHIFT;
/* Get IO TLB memory from the low pages */
- vstart = memblock_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE);
+ vstart = memblock_alloc_low(PAGE_ALIGN(bytes), PAGE_SIZE);
if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
return;
@@ -666,6 +673,20 @@ bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
return true;
}
+size_t swiotlb_max_mapping_size(struct device *dev)
+{
+ return ((size_t)1 << IO_TLB_SHIFT) * IO_TLB_SEGSIZE;
+}
+
+bool is_swiotlb_active(void)
+{
+ /*
+ * When SWIOTLB is initialized, even if io_tlb_start points to physical
+ * address zero, io_tlb_end surely doesn't.
+ */
+ return io_tlb_end != 0;
+}
+
#ifdef CONFIG_DEBUG_FS
static int __init swiotlb_create_debugfs(void)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 5f59d848171e..abbd4b3b96c2 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2009,8 +2009,8 @@ event_sched_out(struct perf_event *event,
event->pmu->del(event, 0);
event->oncpu = -1;
- if (event->pending_disable) {
- event->pending_disable = 0;
+ if (READ_ONCE(event->pending_disable) >= 0) {
+ WRITE_ONCE(event->pending_disable, -1);
state = PERF_EVENT_STATE_OFF;
}
perf_event_set_state(event, state);
@@ -2198,7 +2198,8 @@ EXPORT_SYMBOL_GPL(perf_event_disable);
void perf_event_disable_inatomic(struct perf_event *event)
{
- event->pending_disable = 1;
+ WRITE_ONCE(event->pending_disable, smp_processor_id());
+ /* can fail, see perf_pending_event_disable() */
irq_work_queue(&event->pending);
}
@@ -2477,6 +2478,16 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
perf_pmu_enable(cpuctx->ctx.pmu);
}
+void perf_pmu_resched(struct pmu *pmu)
+{
+ struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
+ struct perf_event_context *task_ctx = cpuctx->task_ctx;
+
+ perf_ctx_lock(cpuctx, task_ctx);
+ ctx_resched(cpuctx, task_ctx, EVENT_ALL|EVENT_CPU);
+ perf_ctx_unlock(cpuctx, task_ctx);
+}
+
/*
* Cross CPU call to install and enable a performance event
*
@@ -4238,7 +4249,8 @@ static bool is_sb_event(struct perf_event *event)
if (attr->mmap || attr->mmap_data || attr->mmap2 ||
attr->comm || attr->comm_exec ||
attr->task || attr->ksymbol ||
- attr->context_switch)
+ attr->context_switch ||
+ attr->bpf_event)
return true;
return false;
}
@@ -5473,7 +5485,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
/* now it's safe to free the pages */
atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
- vma->vm_mm->pinned_vm -= rb->aux_mmap_locked;
+ atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm);
/* this has to be the last one */
rb_free_aux(rb);
@@ -5546,7 +5558,7 @@ again:
*/
atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
- vma->vm_mm->pinned_vm -= mmap_locked;
+ atomic64_sub(mmap_locked, &vma->vm_mm->pinned_vm);
free_uid(mmap_user);
out_put:
@@ -5694,7 +5706,7 @@ accounting:
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
- locked = vma->vm_mm->pinned_vm + extra;
+ locked = atomic64_read(&vma->vm_mm->pinned_vm) + extra;
if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
!capable(CAP_IPC_LOCK)) {
@@ -5735,7 +5747,7 @@ accounting:
unlock:
if (!ret) {
atomic_long_add(user_extra, &user->locked_vm);
- vma->vm_mm->pinned_vm += extra;
+ atomic64_add(extra, &vma->vm_mm->pinned_vm);
atomic_inc(&event->mmap_count);
} else if (rb) {
@@ -5809,10 +5821,45 @@ void perf_event_wakeup(struct perf_event *event)
}
}
+static void perf_pending_event_disable(struct perf_event *event)
+{
+ int cpu = READ_ONCE(event->pending_disable);
+
+ if (cpu < 0)
+ return;
+
+ if (cpu == smp_processor_id()) {
+ WRITE_ONCE(event->pending_disable, -1);
+ perf_event_disable_local(event);
+ return;
+ }
+
+ /*
+ * CPU-A CPU-B
+ *
+ * perf_event_disable_inatomic()
+ * @pending_disable = CPU-A;
+ * irq_work_queue();
+ *
+ * sched-out
+ * @pending_disable = -1;
+ *
+ * sched-in
+ * perf_event_disable_inatomic()
+ * @pending_disable = CPU-B;
+ * irq_work_queue(); // FAILS
+ *
+ * irq_work_run()
+ * perf_pending_event()
+ *
+ * But the event runs on CPU-B and wants disabling there.
+ */
+ irq_work_queue_on(&event->pending, cpu);
+}
+
static void perf_pending_event(struct irq_work *entry)
{
- struct perf_event *event = container_of(entry,
- struct perf_event, pending);
+ struct perf_event *event = container_of(entry, struct perf_event, pending);
int rctx;
rctx = perf_swevent_get_recursion_context();
@@ -5821,10 +5868,7 @@ static void perf_pending_event(struct irq_work *entry)
* and we won't recurse 'further'.
*/
- if (event->pending_disable) {
- event->pending_disable = 0;
- perf_event_disable_local(event);
- }
+ perf_pending_event_disable(event);
if (event->pending_wakeup) {
event->pending_wakeup = 0;
@@ -7188,6 +7232,7 @@ static void perf_event_mmap_output(struct perf_event *event,
struct perf_output_handle handle;
struct perf_sample_data sample;
int size = mmap_event->event_id.header.size;
+ u32 type = mmap_event->event_id.header.type;
int ret;
if (!perf_event_mmap_match(event, data))
@@ -7231,6 +7276,7 @@ static void perf_event_mmap_output(struct perf_event *event,
perf_output_end(&handle);
out:
mmap_event->event_id.header.size = size;
+ mmap_event->event_id.header.type = type;
}
static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
@@ -9041,26 +9087,29 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
if (task == TASK_TOMBSTONE)
return;
- if (!ifh->nr_file_filters)
- return;
-
- mm = get_task_mm(event->ctx->task);
- if (!mm)
- goto restart;
+ if (ifh->nr_file_filters) {
+ mm = get_task_mm(event->ctx->task);
+ if (!mm)
+ goto restart;
- down_read(&mm->mmap_sem);
+ down_read(&mm->mmap_sem);
+ }
raw_spin_lock_irqsave(&ifh->lock, flags);
list_for_each_entry(filter, &ifh->list, entry) {
- event->addr_filter_ranges[count].start = 0;
- event->addr_filter_ranges[count].size = 0;
+ if (filter->path.dentry) {
+ /*
+ * Adjust base offset if the filter is associated to a
+ * binary that needs to be mapped:
+ */
+ event->addr_filter_ranges[count].start = 0;
+ event->addr_filter_ranges[count].size = 0;
- /*
- * Adjust base offset if the filter is associated to a binary
- * that needs to be mapped:
- */
- if (filter->path.dentry)
perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]);
+ } else {
+ event->addr_filter_ranges[count].start = filter->offset;
+ event->addr_filter_ranges[count].size = filter->size;
+ }
count++;
}
@@ -9068,9 +9117,11 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
event->addr_filters_gen++;
raw_spin_unlock_irqrestore(&ifh->lock, flags);
- up_read(&mm->mmap_sem);
+ if (ifh->nr_file_filters) {
+ up_read(&mm->mmap_sem);
- mmput(mm);
+ mmput(mm);
+ }
restart:
perf_event_stop(event, 1);
@@ -9174,6 +9225,7 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
case IF_SRC_KERNELADDR:
case IF_SRC_KERNEL:
kernel = 1;
+ /* fall through */
case IF_SRC_FILEADDR:
case IF_SRC_FILE:
@@ -10232,6 +10284,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
init_waitqueue_head(&event->waitq);
+ event->pending_disable = -1;
init_irq_work(&event->pending, perf_pending_event);
mutex_init(&event->mmap_mutex);
@@ -11874,7 +11927,7 @@ static void __init perf_event_init_all_cpus(void)
}
}
-void perf_swevent_init_cpu(unsigned int cpu)
+static void perf_swevent_init_cpu(unsigned int cpu)
{
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 678ccec60d8f..674b35383491 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -392,7 +392,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
* store that will be enabled on successful return
*/
if (!handle->size) { /* A, matches D */
- event->pending_disable = 1;
+ event->pending_disable = smp_processor_id();
perf_output_wakeup(handle);
local_set(&rb->aux_nest, 0);
goto err_put;
@@ -455,24 +455,21 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
rb->aux_head += size;
}
- if (size || handle->aux_flags) {
- /*
- * Only send RECORD_AUX if we have something useful to communicate
- *
- * Note: the OVERWRITE records by themselves are not considered
- * useful, as they don't communicate any *new* information,
- * aside from the short-lived offset, that becomes history at
- * the next event sched-in and therefore isn't useful.
- * The userspace that needs to copy out AUX data in overwrite
- * mode should know to use user_page::aux_head for the actual
- * offset. So, from now on we don't output AUX records that
- * have *only* OVERWRITE flag set.
- */
-
- if (handle->aux_flags & ~(u64)PERF_AUX_FLAG_OVERWRITE)
- perf_event_aux_event(handle->event, aux_head, size,
- handle->aux_flags);
- }
+ /*
+ * Only send RECORD_AUX if we have something useful to communicate
+ *
+ * Note: the OVERWRITE records by themselves are not considered
+ * useful, as they don't communicate any *new* information,
+ * aside from the short-lived offset, that becomes history at
+ * the next event sched-in and therefore isn't useful.
+ * The userspace that needs to copy out AUX data in overwrite
+ * mode should know to use user_page::aux_head for the actual
+ * offset. So, from now on we don't output AUX records that
+ * have *only* OVERWRITE flag set.
+ */
+ if (size || (handle->aux_flags & ~(u64)PERF_AUX_FLAG_OVERWRITE))
+ perf_event_aux_event(handle->event, aux_head, size,
+ handle->aux_flags);
rb->user_page->aux_head = rb->aux_head;
if (rb_need_aux_wakeup(rb))
@@ -480,7 +477,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
if (wakeup) {
if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)
- handle->event->pending_disable = 1;
+ handle->event->pending_disable = smp_processor_id();
perf_output_wakeup(handle);
}
@@ -598,29 +595,26 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
{
bool overwrite = !(flags & RING_BUFFER_WRITABLE);
int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
- int ret = -ENOMEM, max_order = 0;
+ int ret = -ENOMEM, max_order;
if (!has_aux(event))
return -EOPNOTSUPP;
- if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) {
- /*
- * We need to start with the max_order that fits in nr_pages,
- * not the other way around, hence ilog2() and not get_order.
- */
- max_order = ilog2(nr_pages);
+ /*
+ * We need to start with the max_order that fits in nr_pages,
+ * not the other way around, hence ilog2() and not get_order.
+ */
+ max_order = ilog2(nr_pages);
- /*
- * PMU requests more than one contiguous chunks of memory
- * for SW double buffering
- */
- if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_SW_DOUBLEBUF) &&
- !overwrite) {
- if (!max_order)
- return -EINVAL;
+ /*
+ * PMU requests more than one contiguous chunks of memory
+ * for SW double buffering
+ */
+ if (!overwrite) {
+ if (!max_order)
+ return -EINVAL;
- max_order--;
- }
+ max_order--;
}
rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL,
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 48abc0f18eae..4ca7364c956d 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -53,7 +53,7 @@ static struct percpu_rw_semaphore dup_mmap_sem;
struct uprobe {
struct rb_node rb_node; /* node in the rb tree */
- atomic_t ref;
+ refcount_t ref;
struct rw_semaphore register_rwsem;
struct rw_semaphore consumer_rwsem;
struct list_head pending_list;
@@ -547,13 +547,13 @@ set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long v
static struct uprobe *get_uprobe(struct uprobe *uprobe)
{
- atomic_inc(&uprobe->ref);
+ refcount_inc(&uprobe->ref);
return uprobe;
}
static void put_uprobe(struct uprobe *uprobe)
{
- if (atomic_dec_and_test(&uprobe->ref)) {
+ if (refcount_dec_and_test(&uprobe->ref)) {
/*
* If application munmap(exec_vma) before uprobe_unregister()
* gets called, we don't get a chance to remove uprobe from
@@ -644,7 +644,7 @@ static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
rb_link_node(&uprobe->rb_node, parent, p);
rb_insert_color(&uprobe->rb_node, &uprobes_tree);
/* get access + creation ref */
- atomic_set(&uprobe->ref, 2);
+ refcount_set(&uprobe->ref, 2);
return u;
}
@@ -2294,16 +2294,14 @@ static struct notifier_block uprobe_exception_nb = {
.priority = INT_MAX-1, /* notified after kprobes, kgdb */
};
-static int __init init_uprobes(void)
+void __init uprobes_init(void)
{
int i;
for (i = 0; i < UPROBES_HASH_SZ; i++)
mutex_init(&uprobes_mmap_mutex[i]);
- if (percpu_init_rwsem(&dup_mmap_sem))
- return -ENOMEM;
+ BUG_ON(percpu_init_rwsem(&dup_mmap_sem));
- return register_die_notifier(&uprobe_exception_nb);
+ BUG_ON(register_die_notifier(&uprobe_exception_nb));
}
-__initcall(init_uprobes);
diff --git a/kernel/fork.c b/kernel/fork.c
index 874e48c410f8..fbe9dfcd8680 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -815,6 +815,7 @@ void __init fork_init(void)
#endif
lockdep_init_task(&init_task);
+ uprobes_init();
}
int __weak arch_dup_task_struct(struct task_struct *dst,
@@ -980,7 +981,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
mm_pgtables_bytes_init(mm);
mm->map_count = 0;
mm->locked_vm = 0;
- mm->pinned_vm = 0;
+ atomic64_set(&mm->pinned_vm, 0);
memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
spin_lock_init(&mm->page_table_lock);
spin_lock_init(&mm->arg_lock);
@@ -1298,13 +1299,20 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
complete_vfork_done(tsk);
}
-/*
- * Allocate a new mm structure and copy contents from the
- * mm structure of the passed in task structure.
+/**
+ * dup_mm() - duplicates an existing mm structure
+ * @tsk: the task_struct with which the new mm will be associated.
+ * @oldmm: the mm to duplicate.
+ *
+ * Allocates a new mm structure and duplicates the provided @oldmm structure
+ * content into it.
+ *
+ * Return: the duplicated mm or NULL on failure.
*/
-static struct mm_struct *dup_mm(struct task_struct *tsk)
+static struct mm_struct *dup_mm(struct task_struct *tsk,
+ struct mm_struct *oldmm)
{
- struct mm_struct *mm, *oldmm = current->mm;
+ struct mm_struct *mm;
int err;
mm = allocate_mm();
@@ -1371,7 +1379,7 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
}
retval = -ENOMEM;
- mm = dup_mm(tsk);
+ mm = dup_mm(tsk, current->mm);
if (!mm)
goto fail_nomem;
@@ -2186,6 +2194,11 @@ struct task_struct *fork_idle(int cpu)
return task;
}
+struct mm_struct *copy_init_mm(void)
+{
+ return dup_mm(NULL, &init_mm);
+}
+
/*
* Ok, this is the main fork-routine.
*
diff --git a/kernel/futex.c b/kernel/futex.c
index c3b73b0311bc..6262f1534ac9 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1311,13 +1311,15 @@ static int lookup_pi_state(u32 __user *uaddr, u32 uval,
static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
{
+ int err;
u32 uninitialized_var(curval);
if (unlikely(should_fail_futex(true)))
return -EFAULT;
- if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
- return -EFAULT;
+ err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
+ if (unlikely(err))
+ return err;
/* If user space value changed, let the caller retry */
return curval != uval ? -EAGAIN : 0;
@@ -1502,10 +1504,8 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
if (unlikely(should_fail_futex(true)))
ret = -EFAULT;
- if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
- ret = -EFAULT;
-
- } else if (curval != uval) {
+ ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
+ if (!ret && (curval != uval)) {
/*
* If a unconditional UNLOCK_PI operation (user space did not
* try the TID->0 transition) raced with a waiter setting the
@@ -1700,32 +1700,32 @@ retry_private:
double_lock_hb(hb1, hb2);
op_ret = futex_atomic_op_inuser(op, uaddr2);
if (unlikely(op_ret < 0)) {
-
double_unlock_hb(hb1, hb2);
-#ifndef CONFIG_MMU
- /*
- * we don't get EFAULT from MMU faults if we don't have an MMU,
- * but we might get them from range checking
- */
- ret = op_ret;
- goto out_put_keys;
-#endif
-
- if (unlikely(op_ret != -EFAULT)) {
+ if (!IS_ENABLED(CONFIG_MMU) ||
+ unlikely(op_ret != -EFAULT && op_ret != -EAGAIN)) {
+ /*
+ * we don't get EFAULT from MMU faults if we don't have
+ * an MMU, but we might get them from range checking
+ */
ret = op_ret;
goto out_put_keys;
}
- ret = fault_in_user_writeable(uaddr2);
- if (ret)
- goto out_put_keys;
+ if (op_ret == -EFAULT) {
+ ret = fault_in_user_writeable(uaddr2);
+ if (ret)
+ goto out_put_keys;
+ }
- if (!(flags & FLAGS_SHARED))
+ if (!(flags & FLAGS_SHARED)) {
+ cond_resched();
goto retry_private;
+ }
put_futex_key(&key2);
put_futex_key(&key1);
+ cond_resched();
goto retry;
}
@@ -2350,7 +2350,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
u32 uval, uninitialized_var(curval), newval;
struct task_struct *oldowner, *newowner;
u32 newtid;
- int ret;
+ int ret, err = 0;
lockdep_assert_held(q->lock_ptr);
@@ -2421,14 +2421,17 @@ retry:
if (!pi_state->owner)
newtid |= FUTEX_OWNER_DIED;
- if (get_futex_value_locked(&uval, uaddr))
- goto handle_fault;
+ err = get_futex_value_locked(&uval, uaddr);
+ if (err)
+ goto handle_err;
for (;;) {
newval = (uval & FUTEX_OWNER_DIED) | newtid;
- if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
- goto handle_fault;
+ err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
+ if (err)
+ goto handle_err;
+
if (curval == uval)
break;
uval = curval;
@@ -2456,23 +2459,37 @@ retry:
return 0;
/*
- * To handle the page fault we need to drop the locks here. That gives
- * the other task (either the highest priority waiter itself or the
- * task which stole the rtmutex) the chance to try the fixup of the
- * pi_state. So once we are back from handling the fault we need to
- * check the pi_state after reacquiring the locks and before trying to
- * do another fixup. When the fixup has been done already we simply
- * return.
+ * In order to reschedule or handle a page fault, we need to drop the
+ * locks here. In the case of a fault, this gives the other task
+ * (either the highest priority waiter itself or the task which stole
+ * the rtmutex) the chance to try the fixup of the pi_state. So once we
+ * are back from handling the fault we need to check the pi_state after
+ * reacquiring the locks and before trying to do another fixup. When
+ * the fixup has been done already we simply return.
*
* Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely
* drop hb->lock since the caller owns the hb -> futex_q relation.
* Dropping the pi_mutex->wait_lock requires the state revalidate.
*/
-handle_fault:
+handle_err:
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
spin_unlock(q->lock_ptr);
- ret = fault_in_user_writeable(uaddr);
+ switch (err) {
+ case -EFAULT:
+ ret = fault_in_user_writeable(uaddr);
+ break;
+
+ case -EAGAIN:
+ cond_resched();
+ ret = 0;
+ break;
+
+ default:
+ WARN_ON_ONCE(1);
+ ret = err;
+ break;
+ }
spin_lock(q->lock_ptr);
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
@@ -3041,10 +3058,8 @@ retry:
* A unconditional UNLOCK_PI op raced against a waiter
* setting the FUTEX_WAITERS bit. Try again.
*/
- if (ret == -EAGAIN) {
- put_futex_key(&key);
- goto retry;
- }
+ if (ret == -EAGAIN)
+ goto pi_retry;
/*
* wake_futex_pi has detected invalid state. Tell user
* space.
@@ -3059,9 +3074,19 @@ retry:
* preserve the WAITERS bit not the OWNER_DIED one. We are the
* owner.
*/
- if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0)) {
+ if ((ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))) {
spin_unlock(&hb->lock);
- goto pi_faulted;
+ switch (ret) {
+ case -EFAULT:
+ goto pi_faulted;
+
+ case -EAGAIN:
+ goto pi_retry;
+
+ default:
+ WARN_ON_ONCE(1);
+ goto out_putkey;
+ }
}
/*
@@ -3075,6 +3100,11 @@ out_putkey:
put_futex_key(&key);
return ret;
+pi_retry:
+ put_futex_key(&key);
+ cond_resched();
+ goto retry;
+
pi_faulted:
put_futex_key(&key);
@@ -3435,47 +3465,67 @@ err_unlock:
static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
{
u32 uval, uninitialized_var(nval), mval;
+ int err;
+
+ /* Futex address must be 32bit aligned */
+ if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
+ return -1;
retry:
if (get_user(uval, uaddr))
return -1;
- if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
- /*
- * Ok, this dying thread is truly holding a futex
- * of interest. Set the OWNER_DIED bit atomically
- * via cmpxchg, and if the value had FUTEX_WAITERS
- * set, wake up a waiter (if any). (We have to do a
- * futex_wake() even if OWNER_DIED is already set -
- * to handle the rare but possible case of recursive
- * thread-death.) The rest of the cleanup is done in
- * userspace.
- */
- mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
- /*
- * We are not holding a lock here, but we want to have
- * the pagefault_disable/enable() protection because
- * we want to handle the fault gracefully. If the
- * access fails we try to fault in the futex with R/W
- * verification via get_user_pages. get_user() above
- * does not guarantee R/W access. If that fails we
- * give up and leave the futex locked.
- */
- if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
+ if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr))
+ return 0;
+
+ /*
+ * Ok, this dying thread is truly holding a futex
+ * of interest. Set the OWNER_DIED bit atomically
+ * via cmpxchg, and if the value had FUTEX_WAITERS
+ * set, wake up a waiter (if any). (We have to do a
+ * futex_wake() even if OWNER_DIED is already set -
+ * to handle the rare but possible case of recursive
+ * thread-death.) The rest of the cleanup is done in
+ * userspace.
+ */
+ mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
+
+ /*
+ * We are not holding a lock here, but we want to have
+ * the pagefault_disable/enable() protection because
+ * we want to handle the fault gracefully. If the
+ * access fails we try to fault in the futex with R/W
+ * verification via get_user_pages. get_user() above
+ * does not guarantee R/W access. If that fails we
+ * give up and leave the futex locked.
+ */
+ if ((err = cmpxchg_futex_value_locked(&nval, uaddr, uval, mval))) {
+ switch (err) {
+ case -EFAULT:
if (fault_in_user_writeable(uaddr))
return -1;
goto retry;
- }
- if (nval != uval)
+
+ case -EAGAIN:
+ cond_resched();
goto retry;
- /*
- * Wake robust non-PI futexes here. The wakeup of
- * PI futexes happens in exit_pi_state():
- */
- if (!pi && (uval & FUTEX_WAITERS))
- futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
+ default:
+ WARN_ON_ONCE(1);
+ return err;
+ }
}
+
+ if (nval != uval)
+ goto retry;
+
+ /*
+ * Wake robust non-PI futexes here. The wakeup of
+ * PI futexes happens in exit_pi_state():
+ */
+ if (!pi && (uval & FUTEX_WAITERS))
+ futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
+
return 0;
}
diff --git a/kernel/iomem.c b/kernel/iomem.c
index f7525e14ebc6..93c264444510 100644
--- a/kernel/iomem.c
+++ b/kernel/iomem.c
@@ -55,7 +55,7 @@ static void *try_ram_remap(resource_size_t offset, size_t size,
*
* MEMREMAP_WB - matches the default mapping for System RAM on
* the architecture. This is usually a read-allocate write-back cache.
- * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM
+ * Moreover, if MEMREMAP_WB is specified and the requested remap region is RAM
* memremap() will bypass establishing a new mapping and instead return
* a pointer into the direct map.
*
@@ -86,7 +86,7 @@ void *memremap(resource_size_t offset, size_t size, unsigned long flags)
/* Try all mapping types requested until one returns non-NULL */
if (flags & MEMREMAP_WB) {
/*
- * MEMREMAP_WB is special in that it can be satisifed
+ * MEMREMAP_WB is special in that it can be satisfied
* from the direct map. Some archs depend on the
* capability of memremap() to autodetect cases where
* the requested range is potentially in System RAM.
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 3faef4a77f71..51128bea3846 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -1449,6 +1449,10 @@ int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
{
data = data->parent_data;
+
+ if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
+ return 0;
+
if (data->chip->irq_set_wake)
return data->chip->irq_set_wake(data, on);
diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c
index 5d5378ea0afe..f6e5515ee077 100644
--- a/kernel/irq/devres.c
+++ b/kernel/irq/devres.c
@@ -84,8 +84,6 @@ EXPORT_SYMBOL(devm_request_threaded_irq);
* @dev: device to request interrupt for
* @irq: Interrupt line to allocate
* @handler: Function to be called when the IRQ occurs
- * @thread_fn: function to be called in a threaded interrupt context. NULL
- * for devices which handle everything in @handler
* @irqflags: Interrupt type flags
* @devname: An ascii name for the claiming device, dev_name(dev) if NULL
* @dev_id: A cookie passed back to the handler function
@@ -222,9 +220,8 @@ devm_irq_alloc_generic_chip(struct device *dev, const char *name, int num_ct,
irq_flow_handler_t handler)
{
struct irq_chip_generic *gc;
- unsigned long sz = sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
- gc = devm_kzalloc(dev, sz, GFP_KERNEL);
+ gc = devm_kzalloc(dev, struct_size(gc, chip_types, num_ct), GFP_KERNEL);
if (gc)
irq_init_generic_chip(gc, name, num_ct,
irq_base, reg_base, handler);
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 13539e12cd80..9f8a709337cf 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -558,6 +558,7 @@ int __init early_irq_init(void)
alloc_masks(&desc[i], node);
raw_spin_lock_init(&desc[i].lock);
lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
+ mutex_init(&desc[i].request_mutex);
desc_set_defaults(i, &desc[i], node, NULL, NULL);
}
return arch_early_irq_init();
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index ec43ab2fdfda..78f3ddeb7fe4 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -196,6 +196,7 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
case IRQ_SET_MASK_OK:
case IRQ_SET_MASK_OK_DONE:
cpumask_copy(desc->irq_common_data.affinity, mask);
+ /* fall through */
case IRQ_SET_MASK_OK_NOCOPY:
irq_validate_effective_affinity(data);
irq_set_thread_affinity(desc);
@@ -356,8 +357,10 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
desc->affinity_notify = notify;
raw_spin_unlock_irqrestore(&desc->lock, flags);
- if (old_notify)
+ if (old_notify) {
+ cancel_work_sync(&old_notify->work);
kref_put(&old_notify->kref, old_notify->release);
+ }
return 0;
}
diff --git a/kernel/irq/timings.c b/kernel/irq/timings.c
index 1e4cb63a5c82..90c735da15d0 100644
--- a/kernel/irq/timings.c
+++ b/kernel/irq/timings.c
@@ -9,6 +9,7 @@
#include <linux/idr.h>
#include <linux/irq.h>
#include <linux/math64.h>
+#include <linux/log2.h>
#include <trace/events/irq.h>
@@ -18,16 +19,6 @@ DEFINE_STATIC_KEY_FALSE(irq_timing_enabled);
DEFINE_PER_CPU(struct irq_timings, irq_timings);
-struct irqt_stat {
- u64 next_evt;
- u64 last_ts;
- u64 variance;
- u32 avg;
- u32 nr_samples;
- int anomalies;
- int valid;
-};
-
static DEFINE_IDR(irqt_stats);
void irq_timings_enable(void)
@@ -40,75 +31,360 @@ void irq_timings_disable(void)
static_branch_disable(&irq_timing_enabled);
}
-/**
- * irqs_update - update the irq timing statistics with a new timestamp
+/*
+ * The main goal of this algorithm is to predict the next interrupt
+ * occurrence on the current CPU.
+ *
+ * Currently, the interrupt timings are stored in a circular array
+ * buffer every time there is an interrupt, as a tuple: the interrupt
+ * number and the associated timestamp when the event occurred <irq,
+ * timestamp>.
+ *
+ * For every interrupt occurring in a short period of time, we can
+ * measure the elapsed time between the occurrences for the same
+ * interrupt and we end up with a suite of intervals. The experience
+ * showed the interrupts are often coming following a periodic
+ * pattern.
+ *
+ * The objective of the algorithm is to find out this periodic pattern
+ * in a fastest way and use its period to predict the next irq event.
+ *
+ * When the next interrupt event is requested, we are in the situation
+ * where the interrupts are disabled and the circular buffer
+ * containing the timings is filled with the events which happened
+ * after the previous next-interrupt-event request.
+ *
+ * At this point, we read the circular buffer and we fill the irq
+ * related statistics structure. After this step, the circular array
+ * containing the timings is empty because all the values are
+ * dispatched in their corresponding buffers.
+ *
+ * Now for each interrupt, we can predict the next event by using the
+ * suffix array, log interval and exponential moving average
+ *
+ * 1. Suffix array
+ *
+ * Suffix array is an array of all the suffixes of a string. It is
+ * widely used as a data structure for compression, text search, ...
+ * For instance for the word 'banana', the suffixes will be: 'banana'
+ * 'anana' 'nana' 'ana' 'na' 'a'
+ *
+ * Usually, the suffix array is sorted but for our purpose it is
+ * not necessary and won't provide any improvement in the context of
+ * the solved problem where we clearly define the boundaries of the
+ * search by a max period and min period.
+ *
+ * The suffix array will build a suite of intervals of different
+ * length and will look for the repetition of each suite. If the suite
+ * is repeating then we have the period because it is the length of
+ * the suite whatever its position in the buffer.
+ *
+ * 2. Log interval
+ *
+ * We saw the irq timings allow to compute the interval of the
+ * occurrences for a specific interrupt. We can reasonibly assume the
+ * longer is the interval, the higher is the error for the next event
+ * and we can consider storing those interval values into an array
+ * where each slot in the array correspond to an interval at the power
+ * of 2 of the index. For example, index 12 will contain values
+ * between 2^11 and 2^12.
+ *
+ * At the end we have an array of values where at each index defines a
+ * [2^index - 1, 2 ^ index] interval values allowing to store a large
+ * number of values inside a small array.
+ *
+ * For example, if we have the value 1123, then we store it at
+ * ilog2(1123) = 10 index value.
+ *
+ * Storing those value at the specific index is done by computing an
+ * exponential moving average for this specific slot. For instance,
+ * for values 1800, 1123, 1453, ... fall under the same slot (10) and
+ * the exponential moving average is computed every time a new value
+ * is stored at this slot.
+ *
+ * 3. Exponential Moving Average
+ *
+ * The EMA is largely used to track a signal for stocks or as a low
+ * pass filter. The magic of the formula, is it is very simple and the
+ * reactivity of the average can be tuned with the factors called
+ * alpha.
+ *
+ * The higher the alphas are, the faster the average respond to the
+ * signal change. In our case, if a slot in the array is a big
+ * interval, we can have numbers with a big difference between
+ * them. The impact of those differences in the average computation
+ * can be tuned by changing the alpha value.
+ *
+ *
+ * -- The algorithm --
+ *
+ * We saw the different processing above, now let's see how they are
+ * used together.
+ *
+ * For each interrupt:
+ * For each interval:
+ * Compute the index = ilog2(interval)
+ * Compute a new_ema(buffer[index], interval)
+ * Store the index in a circular buffer
+ *
+ * Compute the suffix array of the indexes
+ *
+ * For each suffix:
+ * If the suffix is reverse-found 3 times
+ * Return suffix
+ *
+ * Return Not found
+ *
+ * However we can not have endless suffix array to be build, it won't
+ * make sense and it will add an extra overhead, so we can restrict
+ * this to a maximum suffix length of 5 and a minimum suffix length of
+ * 2. The experience showed 5 is the majority of the maximum pattern
+ * period found for different devices.
+ *
+ * The result is a pattern finding less than 1us for an interrupt.
*
- * @irqs: an irqt_stat struct pointer
- * @ts: the new timestamp
+ * Example based on real values:
*
- * The statistics are computed online, in other words, the code is
- * designed to compute the statistics on a stream of values rather
- * than doing multiple passes on the values to compute the average,
- * then the variance. The integer division introduces a loss of
- * precision but with an acceptable error margin regarding the results
- * we would have with the double floating precision: we are dealing
- * with nanosec, so big numbers, consequently the mantisse is
- * negligeable, especially when converting the time in usec
- * afterwards.
+ * Example 1 : MMC write/read interrupt interval:
*
- * The computation happens at idle time. When the CPU is not idle, the
- * interrupts' timestamps are stored in the circular buffer, when the
- * CPU goes idle and this routine is called, all the buffer's values
- * are injected in the statistical model continuying to extend the
- * statistics from the previous busy-idle cycle.
+ * 223947, 1240, 1384, 1386, 1386,
+ * 217416, 1236, 1384, 1386, 1387,
+ * 214719, 1241, 1386, 1387, 1384,
+ * 213696, 1234, 1384, 1386, 1388,
+ * 219904, 1240, 1385, 1389, 1385,
+ * 212240, 1240, 1386, 1386, 1386,
+ * 214415, 1236, 1384, 1386, 1387,
+ * 214276, 1234, 1384, 1388, ?
*
- * The observations showed a device will trigger a burst of periodic
- * interrupts followed by one or two peaks of longer time, for
- * instance when a SD card device flushes its cache, then the periodic
- * intervals occur again. A one second inactivity period resets the
- * stats, that gives us the certitude the statistical values won't
- * exceed 1x10^9, thus the computation won't overflow.
+ * For each element, apply ilog2(value)
*
- * Basically, the purpose of the algorithm is to watch the periodic
- * interrupts and eliminate the peaks.
+ * 15, 8, 8, 8, 8,
+ * 15, 8, 8, 8, 8,
+ * 15, 8, 8, 8, 8,
+ * 15, 8, 8, 8, 8,
+ * 15, 8, 8, 8, 8,
+ * 15, 8, 8, 8, 8,
+ * 15, 8, 8, 8, 8,
+ * 15, 8, 8, 8, ?
*
- * An interrupt is considered periodically stable if the interval of
- * its occurences follow the normal distribution, thus the values
- * comply with:
+ * Max period of 5, we take the last (max_period * 3) 15 elements as
+ * we can be confident if the pattern repeats itself three times it is
+ * a repeating pattern.
*
- * avg - 3 x stddev < value < avg + 3 x stddev
+ * 8,
+ * 15, 8, 8, 8, 8,
+ * 15, 8, 8, 8, 8,
+ * 15, 8, 8, 8, ?
*
- * Which can be simplified to:
+ * Suffixes are:
*
- * -3 x stddev < value - avg < 3 x stddev
+ * 1) 8, 15, 8, 8, 8 <- max period
+ * 2) 8, 15, 8, 8
+ * 3) 8, 15, 8
+ * 4) 8, 15 <- min period
*
- * abs(value - avg) < 3 x stddev
+ * From there we search the repeating pattern for each suffix.
*
- * In order to save a costly square root computation, we use the
- * variance. For the record, stddev = sqrt(variance). The equation
- * above becomes:
+ * buffer: 8, 15, 8, 8, 8, 8, 15, 8, 8, 8, 8, 15, 8, 8, 8
+ * | | | | | | | | | | | | | | |
+ * 8, 15, 8, 8, 8 | | | | | | | | | |
+ * 8, 15, 8, 8, 8 | | | | |
+ * 8, 15, 8, 8, 8
*
- * abs(value - avg) < 3 x sqrt(variance)
+ * When moving the suffix, we found exactly 3 matches.
*
- * And finally we square it:
+ * The first suffix with period 5 is repeating.
*
- * (value - avg) ^ 2 < (3 x sqrt(variance)) ^ 2
+ * The next event is (3 * max_period) % suffix_period
*
- * (value - avg) x (value - avg) < 9 x variance
+ * In this example, the result 0, so the next event is suffix[0] => 8
*
- * Statistically speaking, any values out of this interval is
- * considered as an anomaly and is discarded. However, a normal
- * distribution appears when the number of samples is 30 (it is the
- * rule of thumb in statistics, cf. "30 samples" on Internet). When
- * there are three consecutive anomalies, the statistics are resetted.
+ * However, 8 is the index in the array of exponential moving average
+ * which was calculated on the fly when storing the values, so the
+ * interval is ema[8] = 1366
*
+ *
+ * Example 2:
+ *
+ * 4, 3, 5, 100,
+ * 3, 3, 5, 117,
+ * 4, 4, 5, 112,
+ * 4, 3, 4, 110,
+ * 3, 5, 3, 117,
+ * 4, 4, 5, 112,
+ * 4, 3, 4, 110,
+ * 3, 4, 5, 112,
+ * 4, 3, 4, 110
+ *
+ * ilog2
+ *
+ * 0, 0, 0, 4,
+ * 0, 0, 0, 4,
+ * 0, 0, 0, 4,
+ * 0, 0, 0, 4,
+ * 0, 0, 0, 4,
+ * 0, 0, 0, 4,
+ * 0, 0, 0, 4,
+ * 0, 0, 0, 4,
+ * 0, 0, 0, 4
+ *
+ * Max period 5:
+ * 0, 0, 4,
+ * 0, 0, 0, 4,
+ * 0, 0, 0, 4,
+ * 0, 0, 0, 4
+ *
+ * Suffixes:
+ *
+ * 1) 0, 0, 4, 0, 0
+ * 2) 0, 0, 4, 0
+ * 3) 0, 0, 4
+ * 4) 0, 0
+ *
+ * buffer: 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4
+ * | | | | | | X
+ * 0, 0, 4, 0, 0, | X
+ * 0, 0
+ *
+ * buffer: 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4
+ * | | | | | | | | | | | | | | |
+ * 0, 0, 4, 0, | | | | | | | | | | |
+ * 0, 0, 4, 0, | | | | | | |
+ * 0, 0, 4, 0, | | |
+ * 0 0 4
+ *
+ * Pattern is found 3 times, the remaining is 1 which results from
+ * (max_period * 3) % suffix_period. This value is the index in the
+ * suffix arrays. The suffix array for a period 4 has the value 4
+ * at index 1.
+ */
+#define EMA_ALPHA_VAL 64
+#define EMA_ALPHA_SHIFT 7
+
+#define PREDICTION_PERIOD_MIN 2
+#define PREDICTION_PERIOD_MAX 5
+#define PREDICTION_FACTOR 4
+#define PREDICTION_MAX 10 /* 2 ^ PREDICTION_MAX useconds */
+#define PREDICTION_BUFFER_SIZE 16 /* slots for EMAs, hardly more than 16 */
+
+struct irqt_stat {
+ u64 last_ts;
+ u64 ema_time[PREDICTION_BUFFER_SIZE];
+ int timings[IRQ_TIMINGS_SIZE];
+ int circ_timings[IRQ_TIMINGS_SIZE];
+ int count;
+};
+
+/*
+ * Exponential moving average computation
*/
-static void irqs_update(struct irqt_stat *irqs, u64 ts)
+static u64 irq_timings_ema_new(u64 value, u64 ema_old)
+{
+ s64 diff;
+
+ if (unlikely(!ema_old))
+ return value;
+
+ diff = (value - ema_old) * EMA_ALPHA_VAL;
+ /*
+ * We can use a s64 type variable to be added with the u64
+ * ema_old variable as this one will never have its topmost
+ * bit set, it will be always smaller than 2^63 nanosec
+ * interrupt interval (292 years).
+ */
+ return ema_old + (diff >> EMA_ALPHA_SHIFT);
+}
+
+static int irq_timings_next_event_index(int *buffer, size_t len, int period_max)
+{
+ int i;
+
+ /*
+ * The buffer contains the suite of intervals, in a ilog2
+ * basis, we are looking for a repetition. We point the
+ * beginning of the search three times the length of the
+ * period beginning at the end of the buffer. We do that for
+ * each suffix.
+ */
+ for (i = period_max; i >= PREDICTION_PERIOD_MIN ; i--) {
+
+ int *begin = &buffer[len - (i * 3)];
+ int *ptr = begin;
+
+ /*
+ * We look if the suite with period 'i' repeat
+ * itself. If it is truncated at the end, as it
+ * repeats we can use the period to find out the next
+ * element.
+ */
+ while (!memcmp(ptr, begin, i * sizeof(*ptr))) {
+ ptr += i;
+ if (ptr >= &buffer[len])
+ return begin[((i * 3) % i)];
+ }
+ }
+
+ return -1;
+}
+
+static u64 __irq_timings_next_event(struct irqt_stat *irqs, int irq, u64 now)
+{
+ int index, i, period_max, count, start, min = INT_MAX;
+
+ if ((now - irqs->last_ts) >= NSEC_PER_SEC) {
+ irqs->count = irqs->last_ts = 0;
+ return U64_MAX;
+ }
+
+ /*
+ * As we want to find three times the repetition, we need a
+ * number of intervals greater or equal to three times the
+ * maximum period, otherwise we truncate the max period.
+ */
+ period_max = irqs->count > (3 * PREDICTION_PERIOD_MAX) ?
+ PREDICTION_PERIOD_MAX : irqs->count / 3;
+
+ /*
+ * If we don't have enough irq timings for this prediction,
+ * just bail out.
+ */
+ if (period_max <= PREDICTION_PERIOD_MIN)
+ return U64_MAX;
+
+ /*
+ * 'count' will depends if the circular buffer wrapped or not
+ */
+ count = irqs->count < IRQ_TIMINGS_SIZE ?
+ irqs->count : IRQ_TIMINGS_SIZE;
+
+ start = irqs->count < IRQ_TIMINGS_SIZE ?
+ 0 : (irqs->count & IRQ_TIMINGS_MASK);
+
+ /*
+ * Copy the content of the circular buffer into another buffer
+ * in order to linearize the buffer instead of dealing with
+ * wrapping indexes and shifted array which will be prone to
+ * error and extremelly difficult to debug.
+ */
+ for (i = 0; i < count; i++) {
+ int index = (start + i) & IRQ_TIMINGS_MASK;
+
+ irqs->timings[i] = irqs->circ_timings[index];
+ min = min_t(int, irqs->timings[i], min);
+ }
+
+ index = irq_timings_next_event_index(irqs->timings, count, period_max);
+ if (index < 0)
+ return irqs->last_ts + irqs->ema_time[min];
+
+ return irqs->last_ts + irqs->ema_time[index];
+}
+
+static inline void irq_timings_store(int irq, struct irqt_stat *irqs, u64 ts)
{
u64 old_ts = irqs->last_ts;
- u64 variance = 0;
u64 interval;
- s64 diff;
+ int index;
/*
* The timestamps are absolute time values, we need to compute
@@ -135,87 +411,28 @@ static void irqs_update(struct irqt_stat *irqs, u64 ts)
* want as we need another timestamp to compute an interval.
*/
if (interval >= NSEC_PER_SEC) {
- memset(irqs, 0, sizeof(*irqs));
- irqs->last_ts = ts;
+ irqs->count = 0;
return;
}
/*
- * Pre-compute the delta with the average as the result is
- * used several times in this function.
- */
- diff = interval - irqs->avg;
-
- /*
- * Increment the number of samples.
- */
- irqs->nr_samples++;
-
- /*
- * Online variance divided by the number of elements if there
- * is more than one sample. Normally the formula is division
- * by nr_samples - 1 but we assume the number of element will be
- * more than 32 and dividing by 32 instead of 31 is enough
- * precise.
- */
- if (likely(irqs->nr_samples > 1))
- variance = irqs->variance >> IRQ_TIMINGS_SHIFT;
-
- /*
- * The rule of thumb in statistics for the normal distribution
- * is having at least 30 samples in order to have the model to
- * apply. Values outside the interval are considered as an
- * anomaly.
- */
- if ((irqs->nr_samples >= 30) && ((diff * diff) > (9 * variance))) {
- /*
- * After three consecutive anomalies, we reset the
- * stats as it is no longer stable enough.
- */
- if (irqs->anomalies++ >= 3) {
- memset(irqs, 0, sizeof(*irqs));
- irqs->last_ts = ts;
- return;
- }
- } else {
- /*
- * The anomalies must be consecutives, so at this
- * point, we reset the anomalies counter.
- */
- irqs->anomalies = 0;
- }
-
- /*
- * The interrupt is considered stable enough to try to predict
- * the next event on it.
+ * Get the index in the ema table for this interrupt. The
+ * PREDICTION_FACTOR increase the interval size for the array
+ * of exponential average.
*/
- irqs->valid = 1;
+ index = likely(interval) ?
+ ilog2((interval >> 10) / PREDICTION_FACTOR) : 0;
/*
- * Online average algorithm:
- *
- * new_average = average + ((value - average) / count)
- *
- * The variance computation depends on the new average
- * to be computed here first.
- *
+ * Store the index as an element of the pattern in another
+ * circular array.
*/
- irqs->avg = irqs->avg + (diff >> IRQ_TIMINGS_SHIFT);
+ irqs->circ_timings[irqs->count & IRQ_TIMINGS_MASK] = index;
- /*
- * Online variance algorithm:
- *
- * new_variance = variance + (value - average) x (value - new_average)
- *
- * Warning: irqs->avg is updated with the line above, hence
- * 'interval - irqs->avg' is no longer equal to 'diff'
- */
- irqs->variance = irqs->variance + (diff * (interval - irqs->avg));
+ irqs->ema_time[index] = irq_timings_ema_new(interval,
+ irqs->ema_time[index]);
- /*
- * Update the next event
- */
- irqs->next_evt = ts + irqs->avg;
+ irqs->count++;
}
/**
@@ -259,6 +476,9 @@ u64 irq_timings_next_event(u64 now)
*/
lockdep_assert_irqs_disabled();
+ if (!irqts->count)
+ return next_evt;
+
/*
* Number of elements in the circular buffer: If it happens it
* was flushed before, then the number of elements could be
@@ -269,21 +489,19 @@ u64 irq_timings_next_event(u64 now)
* type but with the cost of extra computation in the
* interrupt handler hot path. We choose efficiency.
*
- * Inject measured irq/timestamp to the statistical model
- * while decrementing the counter because we consume the data
- * from our circular buffer.
+ * Inject measured irq/timestamp to the pattern prediction
+ * model while decrementing the counter because we consume the
+ * data from our circular buffer.
*/
- for (i = irqts->count & IRQ_TIMINGS_MASK,
- irqts->count = min(IRQ_TIMINGS_SIZE, irqts->count);
- irqts->count > 0; irqts->count--, i = (i + 1) & IRQ_TIMINGS_MASK) {
- irq = irq_timing_decode(irqts->values[i], &ts);
+ i = (irqts->count & IRQ_TIMINGS_MASK) - 1;
+ irqts->count = min(IRQ_TIMINGS_SIZE, irqts->count);
+ for (; irqts->count > 0; irqts->count--, i = (i + 1) & IRQ_TIMINGS_MASK) {
+ irq = irq_timing_decode(irqts->values[i], &ts);
s = idr_find(&irqt_stats, irq);
- if (s) {
- irqs = this_cpu_ptr(s);
- irqs_update(irqs, ts);
- }
+ if (s)
+ irq_timings_store(irq, this_cpu_ptr(s), ts);
}
/*
@@ -294,26 +512,12 @@ u64 irq_timings_next_event(u64 now)
irqs = this_cpu_ptr(s);
- if (!irqs->valid)
- continue;
+ ts = __irq_timings_next_event(irqs, i, now);
+ if (ts <= now)
+ return now;
- if (irqs->next_evt <= now) {
- irq = i;
- next_evt = now;
-
- /*
- * This interrupt mustn't use in the future
- * until new events occur and update the
- * statistics.
- */
- irqs->valid = 0;
- break;
- }
-
- if (irqs->next_evt < next_evt) {
- irq = i;
- next_evt = irqs->next_evt;
- }
+ if (ts < next_evt)
+ next_evt = ts;
}
return next_evt;
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 6b7cdf17ccf8..73288914ed5e 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -56,61 +56,70 @@ void __weak arch_irq_work_raise(void)
*/
}
-/*
- * Enqueue the irq_work @work on @cpu unless it's already pending
- * somewhere.
- *
- * Can be re-enqueued while the callback is still in progress.
- */
-bool irq_work_queue_on(struct irq_work *work, int cpu)
+/* Enqueue on current CPU, work must already be claimed and preempt disabled */
+static void __irq_work_queue_local(struct irq_work *work)
{
- /* All work should have been flushed before going offline */
- WARN_ON_ONCE(cpu_is_offline(cpu));
-
-#ifdef CONFIG_SMP
-
- /* Arch remote IPI send/receive backend aren't NMI safe */
- WARN_ON_ONCE(in_nmi());
+ /* If the work is "lazy", handle it from next tick if any */
+ if (work->flags & IRQ_WORK_LAZY) {
+ if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
+ tick_nohz_tick_stopped())
+ arch_irq_work_raise();
+ } else {
+ if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
+ arch_irq_work_raise();
+ }
+}
+/* Enqueue the irq work @work on the current CPU */
+bool irq_work_queue(struct irq_work *work)
+{
/* Only queue if not already pending */
if (!irq_work_claim(work))
return false;
- if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
- arch_send_call_function_single_ipi(cpu);
-
-#else /* #ifdef CONFIG_SMP */
- irq_work_queue(work);
-#endif /* #else #ifdef CONFIG_SMP */
+ /* Queue the entry and raise the IPI if needed. */
+ preempt_disable();
+ __irq_work_queue_local(work);
+ preempt_enable();
return true;
}
+EXPORT_SYMBOL_GPL(irq_work_queue);
-/* Enqueue the irq work @work on the current CPU */
-bool irq_work_queue(struct irq_work *work)
+/*
+ * Enqueue the irq_work @work on @cpu unless it's already pending
+ * somewhere.
+ *
+ * Can be re-enqueued while the callback is still in progress.
+ */
+bool irq_work_queue_on(struct irq_work *work, int cpu)
{
+#ifndef CONFIG_SMP
+ return irq_work_queue(work);
+
+#else /* CONFIG_SMP: */
+ /* All work should have been flushed before going offline */
+ WARN_ON_ONCE(cpu_is_offline(cpu));
+
/* Only queue if not already pending */
if (!irq_work_claim(work))
return false;
- /* Queue the entry and raise the IPI if needed. */
preempt_disable();
-
- /* If the work is "lazy", handle it from next tick if any */
- if (work->flags & IRQ_WORK_LAZY) {
- if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
- tick_nohz_tick_stopped())
- arch_irq_work_raise();
+ if (cpu != smp_processor_id()) {
+ /* Arch remote IPI send/receive backend aren't NMI safe */
+ WARN_ON_ONCE(in_nmi());
+ if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
+ arch_send_call_function_single_ipi(cpu);
} else {
- if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
- arch_irq_work_raise();
+ __irq_work_queue_local(work);
}
-
preempt_enable();
return true;
+#endif /* CONFIG_SMP */
}
-EXPORT_SYMBOL_GPL(irq_work_queue);
+
bool irq_work_needs_cpu(void)
{
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index bad96b476eb6..de6efdecc70d 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -202,11 +202,13 @@ void static_key_disable(struct static_key *key)
}
EXPORT_SYMBOL_GPL(static_key_disable);
-static void __static_key_slow_dec_cpuslocked(struct static_key *key,
- unsigned long rate_limit,
- struct delayed_work *work)
+static bool static_key_slow_try_dec(struct static_key *key)
{
- lockdep_assert_cpus_held();
+ int val;
+
+ val = atomic_fetch_add_unless(&key->enabled, -1, 1);
+ if (val == 1)
+ return false;
/*
* The negative count check is valid even when a negative
@@ -215,63 +217,70 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key,
* returns is unbalanced, because all other static_key_slow_inc()
* instances block while the update is in progress.
*/
- if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
- WARN(atomic_read(&key->enabled) < 0,
- "jump label: negative count!\n");
+ WARN(val < 0, "jump label: negative count!\n");
+ return true;
+}
+
+static void __static_key_slow_dec_cpuslocked(struct static_key *key)
+{
+ lockdep_assert_cpus_held();
+
+ if (static_key_slow_try_dec(key))
return;
- }
- if (rate_limit) {
- atomic_inc(&key->enabled);
- schedule_delayed_work(work, rate_limit);
- } else {
+ jump_label_lock();
+ if (atomic_dec_and_test(&key->enabled))
jump_label_update(key);
- }
jump_label_unlock();
}
-static void __static_key_slow_dec(struct static_key *key,
- unsigned long rate_limit,
- struct delayed_work *work)
+static void __static_key_slow_dec(struct static_key *key)
{
cpus_read_lock();
- __static_key_slow_dec_cpuslocked(key, rate_limit, work);
+ __static_key_slow_dec_cpuslocked(key);
cpus_read_unlock();
}
-static void jump_label_update_timeout(struct work_struct *work)
+void jump_label_update_timeout(struct work_struct *work)
{
struct static_key_deferred *key =
container_of(work, struct static_key_deferred, work.work);
- __static_key_slow_dec(&key->key, 0, NULL);
+ __static_key_slow_dec(&key->key);
}
+EXPORT_SYMBOL_GPL(jump_label_update_timeout);
void static_key_slow_dec(struct static_key *key)
{
STATIC_KEY_CHECK_USE(key);
- __static_key_slow_dec(key, 0, NULL);
+ __static_key_slow_dec(key);
}
EXPORT_SYMBOL_GPL(static_key_slow_dec);
void static_key_slow_dec_cpuslocked(struct static_key *key)
{
STATIC_KEY_CHECK_USE(key);
- __static_key_slow_dec_cpuslocked(key, 0, NULL);
+ __static_key_slow_dec_cpuslocked(key);
}
-void static_key_slow_dec_deferred(struct static_key_deferred *key)
+void __static_key_slow_dec_deferred(struct static_key *key,
+ struct delayed_work *work,
+ unsigned long timeout)
{
STATIC_KEY_CHECK_USE(key);
- __static_key_slow_dec(&key->key, key->timeout, &key->work);
+
+ if (static_key_slow_try_dec(key))
+ return;
+
+ schedule_delayed_work(work, timeout);
}
-EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
+EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred);
-void static_key_deferred_flush(struct static_key_deferred *key)
+void __static_key_deferred_flush(void *key, struct delayed_work *work)
{
STATIC_KEY_CHECK_USE(key);
- flush_delayed_work(&key->work);
+ flush_delayed_work(work);
}
-EXPORT_SYMBOL_GPL(static_key_deferred_flush);
+EXPORT_SYMBOL_GPL(__static_key_deferred_flush);
void jump_label_rate_limit(struct static_key_deferred *key,
unsigned long rl)
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index d7140447be75..fd5c95ff9251 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -1150,7 +1150,7 @@ int kernel_kexec(void)
error = dpm_suspend_end(PMSG_FREEZE);
if (error)
goto Resume_devices;
- error = disable_nonboot_cpus();
+ error = suspend_disable_secondary_cpus();
if (error)
goto Enable_cpus;
local_irq_disable();
@@ -1183,7 +1183,7 @@ int kernel_kexec(void)
Enable_irqs:
local_irq_enable();
Enable_cpus:
- enable_nonboot_cpus();
+ suspend_enable_secondary_cpus();
dpm_resume_start(PMSG_RESTORE);
Resume_devices:
dpm_resume_end(PMSG_RESTORE);
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index f1d0e00a3971..f7fb8f6a688f 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -688,7 +688,6 @@ static int kexec_calculate_store_digests(struct kimage *image)
goto out_free_desc;
desc->tfm = tfm;
- desc->flags = 0;
ret = crypto_shash_init(desc);
if (ret < 0)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index c83e54727131..b1ea30a5540e 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -709,7 +709,6 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
static int reuse_unused_kprobe(struct kprobe *ap)
{
struct optimized_kprobe *op;
- int ret;
/*
* Unused kprobe MUST be on the way of delayed unoptimizing (means
@@ -720,9 +719,8 @@ static int reuse_unused_kprobe(struct kprobe *ap)
/* Enable the probe again */
ap->flags &= ~KPROBE_FLAG_DISABLED;
/* Optimize it again (remove from op->list) */
- ret = kprobe_optready(ap);
- if (ret)
- return ret;
+ if (!kprobe_optready(ap))
+ return -EINVAL;
optimize_kprobe(ap);
return 0;
diff --git a/kernel/latencytop.c b/kernel/latencytop.c
index 96b4179cee6a..99a5b5f46dc5 100644
--- a/kernel/latencytop.c
+++ b/kernel/latencytop.c
@@ -120,8 +120,8 @@ account_global_scheduler_latency(struct task_struct *tsk,
break;
}
- /* 0 and ULONG_MAX entries mean end of backtrace: */
- if (record == 0 || record == ULONG_MAX)
+ /* 0 entry marks end of backtrace: */
+ if (!record)
break;
}
if (same) {
@@ -141,20 +141,6 @@ account_global_scheduler_latency(struct task_struct *tsk,
memcpy(&latency_record[i], lat, sizeof(struct latency_record));
}
-/*
- * Iterator to store a backtrace into a latency record entry
- */
-static inline void store_stacktrace(struct task_struct *tsk,
- struct latency_record *lat)
-{
- struct stack_trace trace;
-
- memset(&trace, 0, sizeof(trace));
- trace.max_entries = LT_BACKTRACEDEPTH;
- trace.entries = &lat->backtrace[0];
- save_stack_trace_tsk(tsk, &trace);
-}
-
/**
* __account_scheduler_latency - record an occurred latency
* @tsk - the task struct of the task hitting the latency
@@ -191,7 +177,8 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
lat.count = 1;
lat.time = usecs;
lat.max = usecs;
- store_stacktrace(tsk, &lat);
+
+ stack_trace_save_tsk(tsk, lat.backtrace, LT_BACKTRACEDEPTH, 0);
raw_spin_lock_irqsave(&latency_lock, flags);
@@ -210,8 +197,8 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
break;
}
- /* 0 and ULONG_MAX entries mean end of backtrace: */
- if (record == 0 || record == ULONG_MAX)
+ /* 0 entry is end of backtrace */
+ if (!record)
break;
}
if (same) {
@@ -252,10 +239,10 @@ static int lstats_show(struct seq_file *m, void *v)
lr->count, lr->time, lr->max);
for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
unsigned long bt = lr->backtrace[q];
+
if (!bt)
break;
- if (bt == ULONG_MAX)
- break;
+
seq_printf(m, " %ps", (void *)bt);
}
seq_puts(m, "\n");
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index eb0ee10a1981..f12c0eabd843 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -426,7 +426,13 @@ static void klp_free_object_dynamic(struct klp_object *obj)
kfree(obj);
}
-static struct klp_object *klp_alloc_object_dynamic(const char *name)
+static void klp_init_func_early(struct klp_object *obj,
+ struct klp_func *func);
+static void klp_init_object_early(struct klp_patch *patch,
+ struct klp_object *obj);
+
+static struct klp_object *klp_alloc_object_dynamic(const char *name,
+ struct klp_patch *patch)
{
struct klp_object *obj;
@@ -442,7 +448,7 @@ static struct klp_object *klp_alloc_object_dynamic(const char *name)
}
}
- INIT_LIST_HEAD(&obj->func_list);
+ klp_init_object_early(patch, obj);
obj->dynamic = true;
return obj;
@@ -471,6 +477,7 @@ static struct klp_func *klp_alloc_func_nop(struct klp_func *old_func,
}
}
+ klp_init_func_early(obj, func);
/*
* func->new_func is same as func->old_func. These addresses are
* set when the object is loaded, see klp_init_object_loaded().
@@ -490,11 +497,9 @@ static int klp_add_object_nops(struct klp_patch *patch,
obj = klp_find_object(patch, old_obj);
if (!obj) {
- obj = klp_alloc_object_dynamic(old_obj->name);
+ obj = klp_alloc_object_dynamic(old_obj->name, patch);
if (!obj)
return -ENOMEM;
-
- list_add_tail(&obj->node, &patch->obj_list);
}
klp_for_each_func(old_obj, old_func) {
@@ -505,8 +510,6 @@ static int klp_add_object_nops(struct klp_patch *patch,
func = klp_alloc_func_nop(old_func, obj);
if (!func)
return -ENOMEM;
-
- list_add_tail(&func->node, &obj->func_list);
}
return 0;
@@ -588,13 +591,7 @@ static void __klp_free_funcs(struct klp_object *obj, bool nops_only)
continue;
list_del(&func->node);
-
- /* Might be called from klp_init_patch() error path. */
- if (func->kobj_added) {
- kobject_put(&func->kobj);
- } else if (func->nop) {
- klp_free_func_nop(func);
- }
+ kobject_put(&func->kobj);
}
}
@@ -624,13 +621,7 @@ static void __klp_free_objects(struct klp_patch *patch, bool nops_only)
continue;
list_del(&obj->node);
-
- /* Might be called from klp_init_patch() error path. */
- if (obj->kobj_added) {
- kobject_put(&obj->kobj);
- } else if (obj->dynamic) {
- klp_free_object_dynamic(obj);
- }
+ kobject_put(&obj->kobj);
}
}
@@ -675,10 +666,8 @@ static void klp_free_patch_finish(struct klp_patch *patch)
* this is called when the patch gets disabled and it
* cannot get enabled again.
*/
- if (patch->kobj_added) {
- kobject_put(&patch->kobj);
- wait_for_completion(&patch->finish);
- }
+ kobject_put(&patch->kobj);
+ wait_for_completion(&patch->finish);
/* Put the module after the last access to struct klp_patch. */
if (!patch->forced)
@@ -700,8 +689,6 @@ static void klp_free_patch_work_fn(struct work_struct *work)
static int klp_init_func(struct klp_object *obj, struct klp_func *func)
{
- int ret;
-
if (!func->old_name)
return -EINVAL;
@@ -724,13 +711,9 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func)
* object. If the user selects 0 for old_sympos, then 1 will be used
* since a unique symbol will be the first occurrence.
*/
- ret = kobject_init_and_add(&func->kobj, &klp_ktype_func,
- &obj->kobj, "%s,%lu", func->old_name,
- func->old_sympos ? func->old_sympos : 1);
- if (!ret)
- func->kobj_added = true;
-
- return ret;
+ return kobject_add(&func->kobj, &obj->kobj, "%s,%lu",
+ func->old_name,
+ func->old_sympos ? func->old_sympos : 1);
}
/* Arches may override this to finish any remaining arch-specific tasks */
@@ -801,11 +784,9 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
klp_find_object_module(obj);
name = klp_is_module(obj) ? obj->name : "vmlinux";
- ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
- &patch->kobj, "%s", name);
+ ret = kobject_add(&obj->kobj, &patch->kobj, "%s", name);
if (ret)
return ret;
- obj->kobj_added = true;
klp_for_each_func(obj, func) {
ret = klp_init_func(obj, func);
@@ -819,6 +800,21 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
return ret;
}
+static void klp_init_func_early(struct klp_object *obj,
+ struct klp_func *func)
+{
+ kobject_init(&func->kobj, &klp_ktype_func);
+ list_add_tail(&func->node, &obj->func_list);
+}
+
+static void klp_init_object_early(struct klp_patch *patch,
+ struct klp_object *obj)
+{
+ INIT_LIST_HEAD(&obj->func_list);
+ kobject_init(&obj->kobj, &klp_ktype_object);
+ list_add_tail(&obj->node, &patch->obj_list);
+}
+
static int klp_init_patch_early(struct klp_patch *patch)
{
struct klp_object *obj;
@@ -829,7 +825,7 @@ static int klp_init_patch_early(struct klp_patch *patch)
INIT_LIST_HEAD(&patch->list);
INIT_LIST_HEAD(&patch->obj_list);
- patch->kobj_added = false;
+ kobject_init(&patch->kobj, &klp_ktype_patch);
patch->enabled = false;
patch->forced = false;
INIT_WORK(&patch->free_work, klp_free_patch_work_fn);
@@ -839,13 +835,10 @@ static int klp_init_patch_early(struct klp_patch *patch)
if (!obj->funcs)
return -EINVAL;
- INIT_LIST_HEAD(&obj->func_list);
- obj->kobj_added = false;
- list_add_tail(&obj->node, &patch->obj_list);
+ klp_init_object_early(patch, obj);
klp_for_each_func_static(obj, func) {
- func->kobj_added = false;
- list_add_tail(&func->node, &obj->func_list);
+ klp_init_func_early(obj, func);
}
}
@@ -860,11 +853,9 @@ static int klp_init_patch(struct klp_patch *patch)
struct klp_object *obj;
int ret;
- ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
- klp_root_kobj, "%s", patch->mod->name);
+ ret = kobject_add(&patch->kobj, klp_root_kobj, "%s", patch->mod->name);
if (ret)
return ret;
- patch->kobj_added = true;
if (patch->replace) {
ret = klp_add_nops(patch);
@@ -926,9 +917,6 @@ static int __klp_enable_patch(struct klp_patch *patch)
if (WARN_ON(patch->enabled))
return -EINVAL;
- if (!patch->kobj_added)
- return -EINVAL;
-
pr_notice("enabling patch '%s'\n", patch->mod->name);
klp_init_transition(patch, KLP_PATCHED);
@@ -1003,11 +991,10 @@ int klp_enable_patch(struct klp_patch *patch)
return -ENODEV;
if (!klp_have_reliable_stack()) {
- pr_err("This architecture doesn't have support for the livepatch consistency model.\n");
- return -EOPNOTSUPP;
+ pr_warn("This architecture doesn't have support for the livepatch consistency model.\n");
+ pr_warn("The livepatch transition may never complete.\n");
}
-
mutex_lock(&klp_mutex);
ret = klp_init_patch_early(patch);
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index 9c89ae8b337a..c53370d596be 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -202,15 +202,15 @@ void klp_update_patch_state(struct task_struct *task)
* Determine whether the given stack trace includes any references to a
* to-be-patched or to-be-unpatched function.
*/
-static int klp_check_stack_func(struct klp_func *func,
- struct stack_trace *trace)
+static int klp_check_stack_func(struct klp_func *func, unsigned long *entries,
+ unsigned int nr_entries)
{
unsigned long func_addr, func_size, address;
struct klp_ops *ops;
int i;
- for (i = 0; i < trace->nr_entries; i++) {
- address = trace->entries[i];
+ for (i = 0; i < nr_entries; i++) {
+ address = entries[i];
if (klp_target_state == KLP_UNPATCHED) {
/*
@@ -254,29 +254,25 @@ static int klp_check_stack_func(struct klp_func *func,
static int klp_check_stack(struct task_struct *task, char *err_buf)
{
static unsigned long entries[MAX_STACK_ENTRIES];
- struct stack_trace trace;
struct klp_object *obj;
struct klp_func *func;
- int ret;
+ int ret, nr_entries;
- trace.skip = 0;
- trace.nr_entries = 0;
- trace.max_entries = MAX_STACK_ENTRIES;
- trace.entries = entries;
- ret = save_stack_trace_tsk_reliable(task, &trace);
+ ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries));
WARN_ON_ONCE(ret == -ENOSYS);
- if (ret) {
+ if (ret < 0) {
snprintf(err_buf, STACK_ERR_BUF_SIZE,
"%s: %s:%d has an unreliable stack\n",
__func__, task->comm, task->pid);
return ret;
}
+ nr_entries = ret;
klp_for_each_object(klp_transition_patch, obj) {
if (!obj->patched)
continue;
klp_for_each_func(obj, func) {
- ret = klp_check_stack_func(func, &trace);
+ ret = klp_check_stack_func(func, entries, nr_entries);
if (ret) {
snprintf(err_buf, STACK_ERR_BUF_SIZE,
"%s: %s:%d is sleeping on function %s\n",
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
index 392c7f23af76..6fe2f333aecb 100644
--- a/kernel/locking/Makefile
+++ b/kernel/locking/Makefile
@@ -3,7 +3,7 @@
# and is generally not a function of system call inputs.
KCOV_INSTRUMENT := n
-obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o
+obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o rwsem-xadd.o
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE)
@@ -25,8 +25,7 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
-obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
-obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o
obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
obj-$(CONFIG_WW_MUTEX_SELFTEST) += test-ww_mutex.o
+obj-$(CONFIG_LOCK_EVENT_COUNTS) += lock_events.o
diff --git a/kernel/locking/lock_events.c b/kernel/locking/lock_events.c
new file mode 100644
index 000000000000..fa2c2f951c6b
--- /dev/null
+++ b/kernel/locking/lock_events.c
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Authors: Waiman Long <waiman.long@hpe.com>
+ */
+
+/*
+ * Collect locking event counts
+ */
+#include <linux/debugfs.h>
+#include <linux/sched.h>
+#include <linux/sched/clock.h>
+#include <linux/fs.h>
+
+#include "lock_events.h"
+
+#undef LOCK_EVENT
+#define LOCK_EVENT(name) [LOCKEVENT_ ## name] = #name,
+
+#define LOCK_EVENTS_DIR "lock_event_counts"
+
+/*
+ * When CONFIG_LOCK_EVENT_COUNTS is enabled, event counts of different
+ * types of locks will be reported under the <debugfs>/lock_event_counts/
+ * directory. See lock_events_list.h for the list of available locking
+ * events.
+ *
+ * Writing to the special ".reset_counts" file will reset all the above
+ * locking event counts. This is a very slow operation and so should not
+ * be done frequently.
+ *
+ * These event counts are implemented as per-cpu variables which are
+ * summed and computed whenever the corresponding debugfs files are read. This
+ * minimizes added overhead making the counts usable even in a production
+ * environment.
+ */
+static const char * const lockevent_names[lockevent_num + 1] = {
+
+#include "lock_events_list.h"
+
+ [LOCKEVENT_reset_cnts] = ".reset_counts",
+};
+
+/*
+ * Per-cpu counts
+ */
+DEFINE_PER_CPU(unsigned long, lockevents[lockevent_num]);
+
+/*
+ * The lockevent_read() function can be overridden.
+ */
+ssize_t __weak lockevent_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char buf[64];
+ int cpu, id, len;
+ u64 sum = 0;
+
+ /*
+ * Get the counter ID stored in file->f_inode->i_private
+ */
+ id = (long)file_inode(file)->i_private;
+
+ if (id >= lockevent_num)
+ return -EBADF;
+
+ for_each_possible_cpu(cpu)
+ sum += per_cpu(lockevents[id], cpu);
+ len = snprintf(buf, sizeof(buf) - 1, "%llu\n", sum);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+/*
+ * Function to handle write request
+ *
+ * When idx = reset_cnts, reset all the counts.
+ */
+static ssize_t lockevent_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ int cpu;
+
+ /*
+ * Get the counter ID stored in file->f_inode->i_private
+ */
+ if ((long)file_inode(file)->i_private != LOCKEVENT_reset_cnts)
+ return count;
+
+ for_each_possible_cpu(cpu) {
+ int i;
+ unsigned long *ptr = per_cpu_ptr(lockevents, cpu);
+
+ for (i = 0 ; i < lockevent_num; i++)
+ WRITE_ONCE(ptr[i], 0);
+ }
+ return count;
+}
+
+/*
+ * Debugfs data structures
+ */
+static const struct file_operations fops_lockevent = {
+ .read = lockevent_read,
+ .write = lockevent_write,
+ .llseek = default_llseek,
+};
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#include <asm/paravirt.h>
+
+static bool __init skip_lockevent(const char *name)
+{
+ static int pv_on __initdata = -1;
+
+ if (pv_on < 0)
+ pv_on = !pv_is_native_spin_unlock();
+ /*
+ * Skip PV qspinlock events on bare metal.
+ */
+ if (!pv_on && !memcmp(name, "pv_", 3))
+ return true;
+ return false;
+}
+#else
+static inline bool skip_lockevent(const char *name)
+{
+ return false;
+}
+#endif
+
+/*
+ * Initialize debugfs for the locking event counts.
+ */
+static int __init init_lockevent_counts(void)
+{
+ struct dentry *d_counts = debugfs_create_dir(LOCK_EVENTS_DIR, NULL);
+ int i;
+
+ if (!d_counts)
+ goto out;
+
+ /*
+ * Create the debugfs files
+ *
+ * As reading from and writing to the stat files can be slow, only
+ * root is allowed to do the read/write to limit impact to system
+ * performance.
+ */
+ for (i = 0; i < lockevent_num; i++) {
+ if (skip_lockevent(lockevent_names[i]))
+ continue;
+ if (!debugfs_create_file(lockevent_names[i], 0400, d_counts,
+ (void *)(long)i, &fops_lockevent))
+ goto fail_undo;
+ }
+
+ if (!debugfs_create_file(lockevent_names[LOCKEVENT_reset_cnts], 0200,
+ d_counts, (void *)(long)LOCKEVENT_reset_cnts,
+ &fops_lockevent))
+ goto fail_undo;
+
+ return 0;
+fail_undo:
+ debugfs_remove_recursive(d_counts);
+out:
+ pr_warn("Could not create '%s' debugfs entries\n", LOCK_EVENTS_DIR);
+ return -ENOMEM;
+}
+fs_initcall(init_lockevent_counts);
diff --git a/kernel/locking/lock_events.h b/kernel/locking/lock_events.h
new file mode 100644
index 000000000000..feb1acc54611
--- /dev/null
+++ b/kernel/locking/lock_events.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Authors: Waiman Long <longman@redhat.com>
+ */
+
+#ifndef __LOCKING_LOCK_EVENTS_H
+#define __LOCKING_LOCK_EVENTS_H
+
+enum lock_events {
+
+#include "lock_events_list.h"
+
+ lockevent_num, /* Total number of lock event counts */
+ LOCKEVENT_reset_cnts = lockevent_num,
+};
+
+#ifdef CONFIG_LOCK_EVENT_COUNTS
+/*
+ * Per-cpu counters
+ */
+DECLARE_PER_CPU(unsigned long, lockevents[lockevent_num]);
+
+/*
+ * Increment the PV qspinlock statistical counters
+ */
+static inline void __lockevent_inc(enum lock_events event, bool cond)
+{
+ if (cond)
+ __this_cpu_inc(lockevents[event]);
+}
+
+#define lockevent_inc(ev) __lockevent_inc(LOCKEVENT_ ##ev, true)
+#define lockevent_cond_inc(ev, c) __lockevent_inc(LOCKEVENT_ ##ev, c)
+
+static inline void __lockevent_add(enum lock_events event, int inc)
+{
+ __this_cpu_add(lockevents[event], inc);
+}
+
+#define lockevent_add(ev, c) __lockevent_add(LOCKEVENT_ ##ev, c)
+
+#else /* CONFIG_LOCK_EVENT_COUNTS */
+
+#define lockevent_inc(ev)
+#define lockevent_add(ev, c)
+#define lockevent_cond_inc(ev, c)
+
+#endif /* CONFIG_LOCK_EVENT_COUNTS */
+#endif /* __LOCKING_LOCK_EVENTS_H */
diff --git a/kernel/locking/lock_events_list.h b/kernel/locking/lock_events_list.h
new file mode 100644
index 000000000000..ad7668cfc9da
--- /dev/null
+++ b/kernel/locking/lock_events_list.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Authors: Waiman Long <longman@redhat.com>
+ */
+
+#ifndef LOCK_EVENT
+#define LOCK_EVENT(name) LOCKEVENT_ ## name,
+#endif
+
+#ifdef CONFIG_QUEUED_SPINLOCKS
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+/*
+ * Locking events for PV qspinlock.
+ */
+LOCK_EVENT(pv_hash_hops) /* Average # of hops per hashing operation */
+LOCK_EVENT(pv_kick_unlock) /* # of vCPU kicks issued at unlock time */
+LOCK_EVENT(pv_kick_wake) /* # of vCPU kicks for pv_latency_wake */
+LOCK_EVENT(pv_latency_kick) /* Average latency (ns) of vCPU kick */
+LOCK_EVENT(pv_latency_wake) /* Average latency (ns) of kick-to-wakeup */
+LOCK_EVENT(pv_lock_stealing) /* # of lock stealing operations */
+LOCK_EVENT(pv_spurious_wakeup) /* # of spurious wakeups in non-head vCPUs */
+LOCK_EVENT(pv_wait_again) /* # of wait's after queue head vCPU kick */
+LOCK_EVENT(pv_wait_early) /* # of early vCPU wait's */
+LOCK_EVENT(pv_wait_head) /* # of vCPU wait's at the queue head */
+LOCK_EVENT(pv_wait_node) /* # of vCPU wait's at non-head queue node */
+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
+
+/*
+ * Locking events for qspinlock
+ *
+ * Subtracting lock_use_node[234] from lock_slowpath will give you
+ * lock_use_node1.
+ */
+LOCK_EVENT(lock_pending) /* # of locking ops via pending code */
+LOCK_EVENT(lock_slowpath) /* # of locking ops via MCS lock queue */
+LOCK_EVENT(lock_use_node2) /* # of locking ops that use 2nd percpu node */
+LOCK_EVENT(lock_use_node3) /* # of locking ops that use 3rd percpu node */
+LOCK_EVENT(lock_use_node4) /* # of locking ops that use 4th percpu node */
+LOCK_EVENT(lock_no_node) /* # of locking ops w/o using percpu node */
+#endif /* CONFIG_QUEUED_SPINLOCKS */
+
+/*
+ * Locking events for rwsem
+ */
+LOCK_EVENT(rwsem_sleep_reader) /* # of reader sleeps */
+LOCK_EVENT(rwsem_sleep_writer) /* # of writer sleeps */
+LOCK_EVENT(rwsem_wake_reader) /* # of reader wakeups */
+LOCK_EVENT(rwsem_wake_writer) /* # of writer wakeups */
+LOCK_EVENT(rwsem_opt_wlock) /* # of write locks opt-spin acquired */
+LOCK_EVENT(rwsem_opt_fail) /* # of failed opt-spinnings */
+LOCK_EVENT(rwsem_rlock) /* # of read locks acquired */
+LOCK_EVENT(rwsem_rlock_fast) /* # of fast read locks acquired */
+LOCK_EVENT(rwsem_rlock_fail) /* # of failed read lock acquisitions */
+LOCK_EVENT(rwsem_rtrylock) /* # of read trylock calls */
+LOCK_EVENT(rwsem_wlock) /* # of write locks acquired */
+LOCK_EVENT(rwsem_wlock_fail) /* # of failed write lock acquisitions */
+LOCK_EVENT(rwsem_wtrylock) /* # of write trylock calls */
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 21cb81fe6359..d06190fa5082 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -434,29 +434,14 @@ static void print_lockdep_off(const char *bug_msg)
#endif
}
-static int save_trace(struct stack_trace *trace)
+static int save_trace(struct lock_trace *trace)
{
- trace->nr_entries = 0;
- trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
- trace->entries = stack_trace + nr_stack_trace_entries;
-
- trace->skip = 3;
-
- save_stack_trace(trace);
-
- /*
- * Some daft arches put -1 at the end to indicate its a full trace.
- *
- * <rant> this is buggy anyway, since it takes a whole extra entry so a
- * complete trace that maxes out the entries provided will be reported
- * as incomplete, friggin useless </rant>
- */
- if (trace->nr_entries != 0 &&
- trace->entries[trace->nr_entries-1] == ULONG_MAX)
- trace->nr_entries--;
-
- trace->max_entries = trace->nr_entries;
+ unsigned long *entries = stack_trace + nr_stack_trace_entries;
+ unsigned int max_entries;
+ trace->offset = nr_stack_trace_entries;
+ max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
+ trace->nr_entries = stack_trace_save(entries, max_entries, 3);
nr_stack_trace_entries += trace->nr_entries;
if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
@@ -516,11 +501,11 @@ static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
{
char c = '.';
- if (class->usage_mask & lock_flag(bit + 2))
+ if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK))
c = '+';
if (class->usage_mask & lock_flag(bit)) {
c = '-';
- if (class->usage_mask & lock_flag(bit + 2))
+ if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK))
c = '?';
}
@@ -649,6 +634,9 @@ static int static_obj(const void *obj)
end = (unsigned long) &_end,
addr = (unsigned long) obj;
+ if (arch_is_kernel_initmem_freed(addr))
+ return 0;
+
/*
* static variable?
*/
@@ -842,7 +830,9 @@ static bool class_lock_list_valid(struct lock_class *c, struct list_head *h)
return true;
}
-static u16 chain_hlocks[];
+#ifdef CONFIG_PROVE_LOCKING
+static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
+#endif
static bool check_lock_chain_key(struct lock_chain *chain)
{
@@ -980,15 +970,22 @@ static inline void check_data_structures(void) { }
*/
static void init_data_structures_once(void)
{
- static bool initialization_happened;
+ static bool ds_initialized, rcu_head_initialized;
int i;
- if (likely(initialization_happened))
+ if (likely(rcu_head_initialized))
+ return;
+
+ if (system_state >= SYSTEM_SCHEDULING) {
+ init_rcu_head(&delayed_free.rcu_head);
+ rcu_head_initialized = true;
+ }
+
+ if (ds_initialized)
return;
- initialization_happened = true;
+ ds_initialized = true;
- init_rcu_head(&delayed_free.rcu_head);
INIT_LIST_HEAD(&delayed_free.pf[0].zapped);
INIT_LIST_HEAD(&delayed_free.pf[1].zapped);
@@ -1198,7 +1195,7 @@ static struct lock_list *alloc_list_entry(void)
static int add_lock_to_list(struct lock_class *this,
struct lock_class *links_to, struct list_head *head,
unsigned long ip, int distance,
- struct stack_trace *trace)
+ struct lock_trace *trace)
{
struct lock_list *entry;
/*
@@ -1417,6 +1414,13 @@ static inline int __bfs_backwards(struct lock_list *src_entry,
* checking.
*/
+static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
+{
+ unsigned long *entries = stack_trace + trace->offset;
+
+ stack_trace_print(entries, trace->nr_entries, spaces);
+}
+
/*
* Print a dependency chain entry (this is only done when a deadlock
* has been detected):
@@ -1429,8 +1433,7 @@ print_circular_bug_entry(struct lock_list *target, int depth)
printk("\n-> #%u", depth);
print_lock_name(target->class);
printk(KERN_CONT ":\n");
- print_stack_trace(&target->trace, 6);
-
+ print_lock_trace(&target->trace, 6);
return 0;
}
@@ -1524,10 +1527,9 @@ static inline int class_equal(struct lock_list *entry, void *data)
}
static noinline int print_circular_bug(struct lock_list *this,
- struct lock_list *target,
- struct held_lock *check_src,
- struct held_lock *check_tgt,
- struct stack_trace *trace)
+ struct lock_list *target,
+ struct held_lock *check_src,
+ struct held_lock *check_tgt)
{
struct task_struct *curr = current;
struct lock_list *parent;
@@ -1667,19 +1669,25 @@ check_redundant(struct lock_list *root, struct lock_class *target,
}
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
+
+static inline int usage_accumulate(struct lock_list *entry, void *mask)
+{
+ *(unsigned long *)mask |= entry->class->usage_mask;
+
+ return 0;
+}
+
/*
* Forwards and backwards subgraph searching, for the purposes of
* proving that two subgraphs can be connected by a new dependency
* without creating any illegal irq-safe -> irq-unsafe lock dependency.
*/
-static inline int usage_match(struct lock_list *entry, void *bit)
+static inline int usage_match(struct lock_list *entry, void *mask)
{
- return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
+ return entry->class->usage_mask & *(unsigned long *)mask;
}
-
-
/*
* Find a node in the forwards-direction dependency sub-graph starting
* at @root->class that matches @bit.
@@ -1691,14 +1699,14 @@ static inline int usage_match(struct lock_list *entry, void *bit)
* Return <0 on error.
*/
static int
-find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
+find_usage_forwards(struct lock_list *root, unsigned long usage_mask,
struct lock_list **target_entry)
{
int result;
debug_atomic_inc(nr_find_usage_forwards_checks);
- result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
+ result = __bfs_forwards(root, &usage_mask, usage_match, target_entry);
return result;
}
@@ -1714,14 +1722,14 @@ find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
* Return <0 on error.
*/
static int
-find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
+find_usage_backwards(struct lock_list *root, unsigned long usage_mask,
struct lock_list **target_entry)
{
int result;
debug_atomic_inc(nr_find_usage_backwards_checks);
- result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
+ result = __bfs_backwards(root, &usage_mask, usage_match, target_entry);
return result;
}
@@ -1743,7 +1751,7 @@ static void print_lock_class_header(struct lock_class *class, int depth)
len += printk("%*s %s", depth, "", usage_str[bit]);
len += printk(KERN_CONT " at:\n");
- print_stack_trace(class->usage_traces + bit, len);
+ print_lock_trace(class->usage_traces + bit, len);
}
}
printk("%*s }\n", depth, "");
@@ -1768,7 +1776,7 @@ print_shortest_lock_dependencies(struct lock_list *leaf,
do {
print_lock_class_header(entry->class, depth);
printk("%*s ... acquired at:\n", depth, "");
- print_stack_trace(&entry->trace, 2);
+ print_lock_trace(&entry->trace, 2);
printk("\n");
if (depth == 0 && (entry != root)) {
@@ -1881,14 +1889,14 @@ print_bad_irq_dependency(struct task_struct *curr,
print_lock_name(backwards_entry->class);
pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
- print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
+ print_lock_trace(backwards_entry->class->usage_traces + bit1, 1);
pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
print_lock_name(forwards_entry->class);
pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
pr_warn("...");
- print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
+ print_lock_trace(forwards_entry->class->usage_traces + bit2, 1);
pr_warn("\nother info that might help us debug this:\n\n");
print_irq_lock_scenario(backwards_entry, forwards_entry,
@@ -1913,39 +1921,6 @@ print_bad_irq_dependency(struct task_struct *curr,
return 0;
}
-static int
-check_usage(struct task_struct *curr, struct held_lock *prev,
- struct held_lock *next, enum lock_usage_bit bit_backwards,
- enum lock_usage_bit bit_forwards, const char *irqclass)
-{
- int ret;
- struct lock_list this, that;
- struct lock_list *uninitialized_var(target_entry);
- struct lock_list *uninitialized_var(target_entry1);
-
- this.parent = NULL;
-
- this.class = hlock_class(prev);
- ret = find_usage_backwards(&this, bit_backwards, &target_entry);
- if (ret < 0)
- return print_bfs_bug(ret);
- if (ret == 1)
- return ret;
-
- that.parent = NULL;
- that.class = hlock_class(next);
- ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
- if (ret < 0)
- return print_bfs_bug(ret);
- if (ret == 1)
- return ret;
-
- return print_bad_irq_dependency(curr, &this, &that,
- target_entry, target_entry1,
- prev, next,
- bit_backwards, bit_forwards, irqclass);
-}
-
static const char *state_names[] = {
#define LOCKDEP_STATE(__STATE) \
__stringify(__STATE),
@@ -1962,9 +1937,19 @@ static const char *state_rnames[] = {
static inline const char *state_name(enum lock_usage_bit bit)
{
- return (bit & LOCK_USAGE_READ_MASK) ? state_rnames[bit >> 2] : state_names[bit >> 2];
+ if (bit & LOCK_USAGE_READ_MASK)
+ return state_rnames[bit >> LOCK_USAGE_DIR_MASK];
+ else
+ return state_names[bit >> LOCK_USAGE_DIR_MASK];
}
+/*
+ * The bit number is encoded like:
+ *
+ * bit0: 0 exclusive, 1 read lock
+ * bit1: 0 used in irq, 1 irq enabled
+ * bit2-n: state
+ */
static int exclusive_bit(int new_bit)
{
int state = new_bit & LOCK_USAGE_STATE_MASK;
@@ -1976,45 +1961,160 @@ static int exclusive_bit(int new_bit)
return state | (dir ^ LOCK_USAGE_DIR_MASK);
}
+/*
+ * Observe that when given a bitmask where each bitnr is encoded as above, a
+ * right shift of the mask transforms the individual bitnrs as -1 and
+ * conversely, a left shift transforms into +1 for the individual bitnrs.
+ *
+ * So for all bits whose number have LOCK_ENABLED_* set (bitnr1 == 1), we can
+ * create the mask with those bit numbers using LOCK_USED_IN_* (bitnr1 == 0)
+ * instead by subtracting the bit number by 2, or shifting the mask right by 2.
+ *
+ * Similarly, bitnr1 == 0 becomes bitnr1 == 1 by adding 2, or shifting left 2.
+ *
+ * So split the mask (note that LOCKF_ENABLED_IRQ_ALL|LOCKF_USED_IN_IRQ_ALL is
+ * all bits set) and recompose with bitnr1 flipped.
+ */
+static unsigned long invert_dir_mask(unsigned long mask)
+{
+ unsigned long excl = 0;
+
+ /* Invert dir */
+ excl |= (mask & LOCKF_ENABLED_IRQ_ALL) >> LOCK_USAGE_DIR_MASK;
+ excl |= (mask & LOCKF_USED_IN_IRQ_ALL) << LOCK_USAGE_DIR_MASK;
+
+ return excl;
+}
+
+/*
+ * As above, we clear bitnr0 (LOCK_*_READ off) with bitmask ops. First, for all
+ * bits with bitnr0 set (LOCK_*_READ), add those with bitnr0 cleared (LOCK_*).
+ * And then mask out all bitnr0.
+ */
+static unsigned long exclusive_mask(unsigned long mask)
+{
+ unsigned long excl = invert_dir_mask(mask);
+
+ /* Strip read */
+ excl |= (excl & LOCKF_IRQ_READ) >> LOCK_USAGE_READ_MASK;
+ excl &= ~LOCKF_IRQ_READ;
+
+ return excl;
+}
+
+/*
+ * Retrieve the _possible_ original mask to which @mask is
+ * exclusive. Ie: this is the opposite of exclusive_mask().
+ * Note that 2 possible original bits can match an exclusive
+ * bit: one has LOCK_USAGE_READ_MASK set, the other has it
+ * cleared. So both are returned for each exclusive bit.
+ */
+static unsigned long original_mask(unsigned long mask)
+{
+ unsigned long excl = invert_dir_mask(mask);
+
+ /* Include read in existing usages */
+ excl |= (excl & LOCKF_IRQ) << LOCK_USAGE_READ_MASK;
+
+ return excl;
+}
+
+/*
+ * Find the first pair of bit match between an original
+ * usage mask and an exclusive usage mask.
+ */
+static int find_exclusive_match(unsigned long mask,
+ unsigned long excl_mask,
+ enum lock_usage_bit *bitp,
+ enum lock_usage_bit *excl_bitp)
+{
+ int bit, excl;
+
+ for_each_set_bit(bit, &mask, LOCK_USED) {
+ excl = exclusive_bit(bit);
+ if (excl_mask & lock_flag(excl)) {
+ *bitp = bit;
+ *excl_bitp = excl;
+ return 0;
+ }
+ }
+ return -1;
+}
+
+/*
+ * Prove that the new dependency does not connect a hardirq-safe(-read)
+ * lock with a hardirq-unsafe lock - to achieve this we search
+ * the backwards-subgraph starting at <prev>, and the
+ * forwards-subgraph starting at <next>:
+ */
static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
- struct held_lock *next, enum lock_usage_bit bit)
+ struct held_lock *next)
{
+ unsigned long usage_mask = 0, forward_mask, backward_mask;
+ enum lock_usage_bit forward_bit = 0, backward_bit = 0;
+ struct lock_list *uninitialized_var(target_entry1);
+ struct lock_list *uninitialized_var(target_entry);
+ struct lock_list this, that;
+ int ret;
+
/*
- * Prove that the new dependency does not connect a hardirq-safe
- * lock with a hardirq-unsafe lock - to achieve this we search
- * the backwards-subgraph starting at <prev>, and the
- * forwards-subgraph starting at <next>:
+ * Step 1: gather all hard/soft IRQs usages backward in an
+ * accumulated usage mask.
*/
- if (!check_usage(curr, prev, next, bit,
- exclusive_bit(bit), state_name(bit)))
- return 0;
+ this.parent = NULL;
+ this.class = hlock_class(prev);
- bit++; /* _READ */
+ ret = __bfs_backwards(&this, &usage_mask, usage_accumulate, NULL);
+ if (ret < 0)
+ return print_bfs_bug(ret);
+
+ usage_mask &= LOCKF_USED_IN_IRQ_ALL;
+ if (!usage_mask)
+ return 1;
/*
- * Prove that the new dependency does not connect a hardirq-safe-read
- * lock with a hardirq-unsafe lock - to achieve this we search
- * the backwards-subgraph starting at <prev>, and the
- * forwards-subgraph starting at <next>:
+ * Step 2: find exclusive uses forward that match the previous
+ * backward accumulated mask.
*/
- if (!check_usage(curr, prev, next, bit,
- exclusive_bit(bit), state_name(bit)))
- return 0;
+ forward_mask = exclusive_mask(usage_mask);
- return 1;
-}
+ that.parent = NULL;
+ that.class = hlock_class(next);
-static int
-check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
- struct held_lock *next)
-{
-#define LOCKDEP_STATE(__STATE) \
- if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \
- return 0;
-#include "lockdep_states.h"
-#undef LOCKDEP_STATE
+ ret = find_usage_forwards(&that, forward_mask, &target_entry1);
+ if (ret < 0)
+ return print_bfs_bug(ret);
+ if (ret == 1)
+ return ret;
- return 1;
+ /*
+ * Step 3: we found a bad match! Now retrieve a lock from the backward
+ * list whose usage mask matches the exclusive usage mask from the
+ * lock found on the forward list.
+ */
+ backward_mask = original_mask(target_entry1->class->usage_mask);
+
+ ret = find_usage_backwards(&this, backward_mask, &target_entry);
+ if (ret < 0)
+ return print_bfs_bug(ret);
+ if (DEBUG_LOCKS_WARN_ON(ret == 1))
+ return 1;
+
+ /*
+ * Step 4: narrow down to a pair of incompatible usage bits
+ * and report it.
+ */
+ ret = find_exclusive_match(target_entry->class->usage_mask,
+ target_entry1->class->usage_mask,
+ &backward_bit, &forward_bit);
+ if (DEBUG_LOCKS_WARN_ON(ret == -1))
+ return 1;
+
+ return print_bad_irq_dependency(curr, &this, &that,
+ target_entry, target_entry1,
+ prev, next,
+ backward_bit, forward_bit,
+ state_name(backward_bit));
}
static void inc_chains(void)
@@ -2031,9 +2131,8 @@ static void inc_chains(void)
#else
-static inline int
-check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
- struct held_lock *next)
+static inline int check_irq_usage(struct task_struct *curr,
+ struct held_lock *prev, struct held_lock *next)
{
return 1;
}
@@ -2161,8 +2260,7 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
*/
static int
check_prev_add(struct task_struct *curr, struct held_lock *prev,
- struct held_lock *next, int distance, struct stack_trace *trace,
- int (*save)(struct stack_trace *trace))
+ struct held_lock *next, int distance, struct lock_trace *trace)
{
struct lock_list *uninitialized_var(target_entry);
struct lock_list *entry;
@@ -2200,20 +2298,20 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
this.parent = NULL;
ret = check_noncircular(&this, hlock_class(prev), &target_entry);
if (unlikely(!ret)) {
- if (!trace->entries) {
+ if (!trace->nr_entries) {
/*
- * If @save fails here, the printing might trigger
- * a WARN but because of the !nr_entries it should
- * not do bad things.
+ * If save_trace fails here, the printing might
+ * trigger a WARN but because of the !nr_entries it
+ * should not do bad things.
*/
- save(trace);
+ save_trace(trace);
}
- return print_circular_bug(&this, target_entry, next, prev, trace);
+ return print_circular_bug(&this, target_entry, next, prev);
}
else if (unlikely(ret < 0))
return print_bfs_bug(ret);
- if (!check_prev_add_irq(curr, prev, next))
+ if (!check_irq_usage(curr, prev, next))
return 0;
/*
@@ -2256,7 +2354,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
return print_bfs_bug(ret);
- if (!trace->entries && !save(trace))
+ if (!trace->nr_entries && !save_trace(trace))
return 0;
/*
@@ -2288,14 +2386,9 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
static int
check_prevs_add(struct task_struct *curr, struct held_lock *next)
{
+ struct lock_trace trace = { .nr_entries = 0 };
int depth = curr->lockdep_depth;
struct held_lock *hlock;
- struct stack_trace trace = {
- .nr_entries = 0,
- .max_entries = 0,
- .entries = NULL,
- .skip = 0,
- };
/*
* Debugging checks.
@@ -2321,7 +2414,8 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
* added:
*/
if (hlock->read != 2 && hlock->check) {
- int ret = check_prev_add(curr, hlock, next, distance, &trace, save_trace);
+ int ret = check_prev_add(curr, hlock, next, distance,
+ &trace);
if (!ret)
return 0;
@@ -2722,6 +2816,10 @@ static inline int validate_chain(struct task_struct *curr,
{
return 1;
}
+
+static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
+{
+}
#endif
/*
@@ -2775,6 +2873,12 @@ static void check_chain_key(struct task_struct *curr)
#endif
}
+static int mark_lock(struct task_struct *curr, struct held_lock *this,
+ enum lock_usage_bit new_bit);
+
+#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
+
+
static void
print_usage_bug_scenario(struct held_lock *lock)
{
@@ -2818,7 +2922,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
print_lock(this);
pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
- print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
+ print_lock_trace(hlock_class(this)->usage_traces + prev_bit, 1);
print_irqtrace_events(curr);
pr_warn("\nother info that might help us debug this:\n");
@@ -2844,10 +2948,6 @@ valid_state(struct task_struct *curr, struct held_lock *this,
return 1;
}
-static int mark_lock(struct task_struct *curr, struct held_lock *this,
- enum lock_usage_bit new_bit);
-
-#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
/*
* print irq inversion bug:
@@ -2927,7 +3027,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
root.parent = NULL;
root.class = hlock_class(this);
- ret = find_usage_forwards(&root, bit, &target_entry);
+ ret = find_usage_forwards(&root, lock_flag(bit), &target_entry);
if (ret < 0)
return print_bfs_bug(ret);
if (ret == 1)
@@ -2951,7 +3051,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
root.parent = NULL;
root.class = hlock_class(this);
- ret = find_usage_backwards(&root, bit, &target_entry);
+ ret = find_usage_backwards(&root, lock_flag(bit), &target_entry);
if (ret < 0)
return print_bfs_bug(ret);
if (ret == 1)
@@ -3006,7 +3106,7 @@ static int (*state_verbose_f[])(struct lock_class *class) = {
static inline int state_verbose(enum lock_usage_bit bit,
struct lock_class *class)
{
- return state_verbose_f[bit >> 2](class);
+ return state_verbose_f[bit >> LOCK_USAGE_DIR_MASK](class);
}
typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
@@ -3148,7 +3248,7 @@ void lockdep_hardirqs_on(unsigned long ip)
/*
* See the fine text that goes along with this variable definition.
*/
- if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
+ if (DEBUG_LOCKS_WARN_ON(early_boot_irqs_disabled))
return;
/*
@@ -4680,8 +4780,8 @@ static void free_zapped_rcu(struct rcu_head *ch)
return;
raw_local_irq_save(flags);
- if (!graph_lock())
- goto out_irq;
+ arch_spin_lock(&lockdep_lock);
+ current->lockdep_recursion = 1;
/* closed head */
pf = delayed_free.pf + (delayed_free.index ^ 1);
@@ -4693,8 +4793,8 @@ static void free_zapped_rcu(struct rcu_head *ch)
*/
call_rcu_zapped(delayed_free.pf + delayed_free.index);
- graph_unlock();
-out_irq:
+ current->lockdep_recursion = 0;
+ arch_spin_unlock(&lockdep_lock);
raw_local_irq_restore(flags);
}
@@ -4735,21 +4835,17 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
{
struct pending_free *pf;
unsigned long flags;
- int locked;
init_data_structures_once();
raw_local_irq_save(flags);
- locked = graph_lock();
- if (!locked)
- goto out_irq;
-
+ arch_spin_lock(&lockdep_lock);
+ current->lockdep_recursion = 1;
pf = get_pending_free();
__lockdep_free_key_range(pf, start, size);
call_rcu_zapped(pf);
-
- graph_unlock();
-out_irq:
+ current->lockdep_recursion = 0;
+ arch_spin_unlock(&lockdep_lock);
raw_local_irq_restore(flags);
/*
diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h
index d4c197425f68..150ec3f0c5b5 100644
--- a/kernel/locking/lockdep_internals.h
+++ b/kernel/locking/lockdep_internals.h
@@ -42,13 +42,35 @@ enum {
__LOCKF(USED)
};
-#define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ)
-#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
+#define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE |
+static const unsigned long LOCKF_ENABLED_IRQ =
+#include "lockdep_states.h"
+ 0;
+#undef LOCKDEP_STATE
+
+#define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE |
+static const unsigned long LOCKF_USED_IN_IRQ =
+#include "lockdep_states.h"
+ 0;
+#undef LOCKDEP_STATE
+
+#define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE##_READ |
+static const unsigned long LOCKF_ENABLED_IRQ_READ =
+#include "lockdep_states.h"
+ 0;
+#undef LOCKDEP_STATE
+
+#define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE##_READ |
+static const unsigned long LOCKF_USED_IN_IRQ_READ =
+#include "lockdep_states.h"
+ 0;
+#undef LOCKDEP_STATE
+
+#define LOCKF_ENABLED_IRQ_ALL (LOCKF_ENABLED_IRQ | LOCKF_ENABLED_IRQ_READ)
+#define LOCKF_USED_IN_IRQ_ALL (LOCKF_USED_IN_IRQ | LOCKF_USED_IN_IRQ_READ)
-#define LOCKF_ENABLED_IRQ_READ \
- (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ)
-#define LOCKF_USED_IN_IRQ_READ \
- (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
+#define LOCKF_IRQ (LOCKF_ENABLED_IRQ | LOCKF_USED_IN_IRQ)
+#define LOCKF_IRQ_READ (LOCKF_ENABLED_IRQ_READ | LOCKF_USED_IN_IRQ_READ)
/*
* CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text,
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index ad40a2617063..80a463d31a8d 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -829,7 +829,9 @@ static void lock_torture_cleanup(void)
"End of test: SUCCESS");
kfree(cxt.lwsa);
+ cxt.lwsa = NULL;
kfree(cxt.lrsa);
+ cxt.lrsa = NULL;
end:
torture_cleanup_end();
diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c
index 883cf1b92d90..f17dad99eec8 100644
--- a/kernel/locking/percpu-rwsem.c
+++ b/kernel/locking/percpu-rwsem.c
@@ -7,6 +7,8 @@
#include <linux/sched.h>
#include <linux/errno.h>
+#include "rwsem.h"
+
int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
const char *name, struct lock_class_key *rwsem_key)
{
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 5e9247dc2515..e14b32c69639 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -395,7 +395,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
* 0,1,0 -> 0,0,1
*/
clear_pending_set_locked(lock);
- qstat_inc(qstat_lock_pending, true);
+ lockevent_inc(lock_pending);
return;
/*
@@ -403,7 +403,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
* queuing.
*/
queue:
- qstat_inc(qstat_lock_slowpath, true);
+ lockevent_inc(lock_slowpath);
pv_queue:
node = this_cpu_ptr(&qnodes[0].mcs);
idx = node->count++;
@@ -419,7 +419,7 @@ pv_queue:
* simple enough.
*/
if (unlikely(idx >= MAX_NODES)) {
- qstat_inc(qstat_lock_no_node, true);
+ lockevent_inc(lock_no_node);
while (!queued_spin_trylock(lock))
cpu_relax();
goto release;
@@ -430,7 +430,7 @@ pv_queue:
/*
* Keep counts of non-zero index values:
*/
- qstat_inc(qstat_lock_use_node2 + idx - 1, idx);
+ lockevent_cond_inc(lock_use_node2 + idx - 1, idx);
/*
* Ensure that we increment the head node->count before initialising
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index 8f36c27c1794..89bab079e7a4 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -89,7 +89,7 @@ static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock)
if (!(val & _Q_LOCKED_PENDING_MASK) &&
(cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) {
- qstat_inc(qstat_pv_lock_stealing, true);
+ lockevent_inc(pv_lock_stealing);
return true;
}
if (!(val & _Q_TAIL_MASK) || (val & _Q_PENDING_MASK))
@@ -219,7 +219,7 @@ static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node)
hopcnt++;
if (!cmpxchg(&he->lock, NULL, lock)) {
WRITE_ONCE(he->node, node);
- qstat_hop(hopcnt);
+ lockevent_pv_hop(hopcnt);
return &he->lock;
}
}
@@ -320,8 +320,8 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
smp_store_mb(pn->state, vcpu_halted);
if (!READ_ONCE(node->locked)) {
- qstat_inc(qstat_pv_wait_node, true);
- qstat_inc(qstat_pv_wait_early, wait_early);
+ lockevent_inc(pv_wait_node);
+ lockevent_cond_inc(pv_wait_early, wait_early);
pv_wait(&pn->state, vcpu_halted);
}
@@ -339,7 +339,8 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
* So it is better to spin for a while in the hope that the
* MCS lock will be released soon.
*/
- qstat_inc(qstat_pv_spurious_wakeup, !READ_ONCE(node->locked));
+ lockevent_cond_inc(pv_spurious_wakeup,
+ !READ_ONCE(node->locked));
}
/*
@@ -416,7 +417,7 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
/*
* Tracking # of slowpath locking operations
*/
- qstat_inc(qstat_lock_slowpath, true);
+ lockevent_inc(lock_slowpath);
for (;; waitcnt++) {
/*
@@ -464,8 +465,8 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
}
}
WRITE_ONCE(pn->state, vcpu_hashed);
- qstat_inc(qstat_pv_wait_head, true);
- qstat_inc(qstat_pv_wait_again, waitcnt);
+ lockevent_inc(pv_wait_head);
+ lockevent_cond_inc(pv_wait_again, waitcnt);
pv_wait(&lock->locked, _Q_SLOW_VAL);
/*
@@ -528,7 +529,7 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
* vCPU is harmless other than the additional latency in completing
* the unlock.
*/
- qstat_inc(qstat_pv_kick_unlock, true);
+ lockevent_inc(pv_kick_unlock);
pv_kick(node->cpu);
}
diff --git a/kernel/locking/qspinlock_stat.h b/kernel/locking/qspinlock_stat.h
index d73f85388d5c..54152670ff24 100644
--- a/kernel/locking/qspinlock_stat.h
+++ b/kernel/locking/qspinlock_stat.h
@@ -9,262 +9,105 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * Authors: Waiman Long <waiman.long@hpe.com>
+ * Authors: Waiman Long <longman@redhat.com>
*/
-/*
- * When queued spinlock statistical counters are enabled, the following
- * debugfs files will be created for reporting the counter values:
- *
- * <debugfs>/qlockstat/
- * pv_hash_hops - average # of hops per hashing operation
- * pv_kick_unlock - # of vCPU kicks issued at unlock time
- * pv_kick_wake - # of vCPU kicks used for computing pv_latency_wake
- * pv_latency_kick - average latency (ns) of vCPU kick operation
- * pv_latency_wake - average latency (ns) from vCPU kick to wakeup
- * pv_lock_stealing - # of lock stealing operations
- * pv_spurious_wakeup - # of spurious wakeups in non-head vCPUs
- * pv_wait_again - # of wait's after a queue head vCPU kick
- * pv_wait_early - # of early vCPU wait's
- * pv_wait_head - # of vCPU wait's at the queue head
- * pv_wait_node - # of vCPU wait's at a non-head queue node
- * lock_pending - # of locking operations via pending code
- * lock_slowpath - # of locking operations via MCS lock queue
- * lock_use_node2 - # of locking operations that use 2nd per-CPU node
- * lock_use_node3 - # of locking operations that use 3rd per-CPU node
- * lock_use_node4 - # of locking operations that use 4th per-CPU node
- * lock_no_node - # of locking operations without using per-CPU node
- *
- * Subtracting lock_use_node[234] from lock_slowpath will give you
- * lock_use_node1.
- *
- * Writing to the "reset_counters" file will reset all the above counter
- * values.
- *
- * These statistical counters are implemented as per-cpu variables which are
- * summed and computed whenever the corresponding debugfs files are read. This
- * minimizes added overhead making the counters usable even in a production
- * environment.
- *
- * There may be slight difference between pv_kick_wake and pv_kick_unlock.
- */
-enum qlock_stats {
- qstat_pv_hash_hops,
- qstat_pv_kick_unlock,
- qstat_pv_kick_wake,
- qstat_pv_latency_kick,
- qstat_pv_latency_wake,
- qstat_pv_lock_stealing,
- qstat_pv_spurious_wakeup,
- qstat_pv_wait_again,
- qstat_pv_wait_early,
- qstat_pv_wait_head,
- qstat_pv_wait_node,
- qstat_lock_pending,
- qstat_lock_slowpath,
- qstat_lock_use_node2,
- qstat_lock_use_node3,
- qstat_lock_use_node4,
- qstat_lock_no_node,
- qstat_num, /* Total number of statistical counters */
- qstat_reset_cnts = qstat_num,
-};
+#include "lock_events.h"
-#ifdef CONFIG_QUEUED_LOCK_STAT
+#ifdef CONFIG_LOCK_EVENT_COUNTS
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
/*
- * Collect pvqspinlock statistics
+ * Collect pvqspinlock locking event counts
*/
-#include <linux/debugfs.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/fs.h>
-static const char * const qstat_names[qstat_num + 1] = {
- [qstat_pv_hash_hops] = "pv_hash_hops",
- [qstat_pv_kick_unlock] = "pv_kick_unlock",
- [qstat_pv_kick_wake] = "pv_kick_wake",
- [qstat_pv_spurious_wakeup] = "pv_spurious_wakeup",
- [qstat_pv_latency_kick] = "pv_latency_kick",
- [qstat_pv_latency_wake] = "pv_latency_wake",
- [qstat_pv_lock_stealing] = "pv_lock_stealing",
- [qstat_pv_wait_again] = "pv_wait_again",
- [qstat_pv_wait_early] = "pv_wait_early",
- [qstat_pv_wait_head] = "pv_wait_head",
- [qstat_pv_wait_node] = "pv_wait_node",
- [qstat_lock_pending] = "lock_pending",
- [qstat_lock_slowpath] = "lock_slowpath",
- [qstat_lock_use_node2] = "lock_use_node2",
- [qstat_lock_use_node3] = "lock_use_node3",
- [qstat_lock_use_node4] = "lock_use_node4",
- [qstat_lock_no_node] = "lock_no_node",
- [qstat_reset_cnts] = "reset_counters",
-};
+#define EVENT_COUNT(ev) lockevents[LOCKEVENT_ ## ev]
/*
- * Per-cpu counters
+ * PV specific per-cpu counter
*/
-static DEFINE_PER_CPU(unsigned long, qstats[qstat_num]);
static DEFINE_PER_CPU(u64, pv_kick_time);
/*
- * Function to read and return the qlock statistical counter values
+ * Function to read and return the PV qspinlock counts.
*
* The following counters are handled specially:
- * 1. qstat_pv_latency_kick
+ * 1. pv_latency_kick
* Average kick latency (ns) = pv_latency_kick/pv_kick_unlock
- * 2. qstat_pv_latency_wake
+ * 2. pv_latency_wake
* Average wake latency (ns) = pv_latency_wake/pv_kick_wake
- * 3. qstat_pv_hash_hops
+ * 3. pv_hash_hops
* Average hops/hash = pv_hash_hops/pv_kick_unlock
*/
-static ssize_t qstat_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
+ssize_t lockevent_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
{
char buf[64];
- int cpu, counter, len;
- u64 stat = 0, kicks = 0;
+ int cpu, id, len;
+ u64 sum = 0, kicks = 0;
/*
* Get the counter ID stored in file->f_inode->i_private
*/
- counter = (long)file_inode(file)->i_private;
+ id = (long)file_inode(file)->i_private;
- if (counter >= qstat_num)
+ if (id >= lockevent_num)
return -EBADF;
for_each_possible_cpu(cpu) {
- stat += per_cpu(qstats[counter], cpu);
+ sum += per_cpu(lockevents[id], cpu);
/*
- * Need to sum additional counter for some of them
+ * Need to sum additional counters for some of them
*/
- switch (counter) {
+ switch (id) {
- case qstat_pv_latency_kick:
- case qstat_pv_hash_hops:
- kicks += per_cpu(qstats[qstat_pv_kick_unlock], cpu);
+ case LOCKEVENT_pv_latency_kick:
+ case LOCKEVENT_pv_hash_hops:
+ kicks += per_cpu(EVENT_COUNT(pv_kick_unlock), cpu);
break;
- case qstat_pv_latency_wake:
- kicks += per_cpu(qstats[qstat_pv_kick_wake], cpu);
+ case LOCKEVENT_pv_latency_wake:
+ kicks += per_cpu(EVENT_COUNT(pv_kick_wake), cpu);
break;
}
}
- if (counter == qstat_pv_hash_hops) {
+ if (id == LOCKEVENT_pv_hash_hops) {
u64 frac = 0;
if (kicks) {
- frac = 100ULL * do_div(stat, kicks);
+ frac = 100ULL * do_div(sum, kicks);
frac = DIV_ROUND_CLOSEST_ULL(frac, kicks);
}
/*
* Return a X.XX decimal number
*/
- len = snprintf(buf, sizeof(buf) - 1, "%llu.%02llu\n", stat, frac);
+ len = snprintf(buf, sizeof(buf) - 1, "%llu.%02llu\n",
+ sum, frac);
} else {
/*
* Round to the nearest ns
*/
- if ((counter == qstat_pv_latency_kick) ||
- (counter == qstat_pv_latency_wake)) {
+ if ((id == LOCKEVENT_pv_latency_kick) ||
+ (id == LOCKEVENT_pv_latency_wake)) {
if (kicks)
- stat = DIV_ROUND_CLOSEST_ULL(stat, kicks);
+ sum = DIV_ROUND_CLOSEST_ULL(sum, kicks);
}
- len = snprintf(buf, sizeof(buf) - 1, "%llu\n", stat);
+ len = snprintf(buf, sizeof(buf) - 1, "%llu\n", sum);
}
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
/*
- * Function to handle write request
- *
- * When counter = reset_cnts, reset all the counter values.
- * Since the counter updates aren't atomic, the resetting is done twice
- * to make sure that the counters are very likely to be all cleared.
- */
-static ssize_t qstat_write(struct file *file, const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- int cpu;
-
- /*
- * Get the counter ID stored in file->f_inode->i_private
- */
- if ((long)file_inode(file)->i_private != qstat_reset_cnts)
- return count;
-
- for_each_possible_cpu(cpu) {
- int i;
- unsigned long *ptr = per_cpu_ptr(qstats, cpu);
-
- for (i = 0 ; i < qstat_num; i++)
- WRITE_ONCE(ptr[i], 0);
- }
- return count;
-}
-
-/*
- * Debugfs data structures
- */
-static const struct file_operations fops_qstat = {
- .read = qstat_read,
- .write = qstat_write,
- .llseek = default_llseek,
-};
-
-/*
- * Initialize debugfs for the qspinlock statistical counters
- */
-static int __init init_qspinlock_stat(void)
-{
- struct dentry *d_qstat = debugfs_create_dir("qlockstat", NULL);
- int i;
-
- if (!d_qstat)
- goto out;
-
- /*
- * Create the debugfs files
- *
- * As reading from and writing to the stat files can be slow, only
- * root is allowed to do the read/write to limit impact to system
- * performance.
- */
- for (i = 0; i < qstat_num; i++)
- if (!debugfs_create_file(qstat_names[i], 0400, d_qstat,
- (void *)(long)i, &fops_qstat))
- goto fail_undo;
-
- if (!debugfs_create_file(qstat_names[qstat_reset_cnts], 0200, d_qstat,
- (void *)(long)qstat_reset_cnts, &fops_qstat))
- goto fail_undo;
-
- return 0;
-fail_undo:
- debugfs_remove_recursive(d_qstat);
-out:
- pr_warn("Could not create 'qlockstat' debugfs entries\n");
- return -ENOMEM;
-}
-fs_initcall(init_qspinlock_stat);
-
-/*
- * Increment the PV qspinlock statistical counters
- */
-static inline void qstat_inc(enum qlock_stats stat, bool cond)
-{
- if (cond)
- this_cpu_inc(qstats[stat]);
-}
-
-/*
* PV hash hop count
*/
-static inline void qstat_hop(int hopcnt)
+static inline void lockevent_pv_hop(int hopcnt)
{
- this_cpu_add(qstats[qstat_pv_hash_hops], hopcnt);
+ this_cpu_add(EVENT_COUNT(pv_hash_hops), hopcnt);
}
/*
@@ -276,7 +119,7 @@ static inline void __pv_kick(int cpu)
per_cpu(pv_kick_time, cpu) = start;
pv_kick(cpu);
- this_cpu_add(qstats[qstat_pv_latency_kick], sched_clock() - start);
+ this_cpu_add(EVENT_COUNT(pv_latency_kick), sched_clock() - start);
}
/*
@@ -289,18 +132,19 @@ static inline void __pv_wait(u8 *ptr, u8 val)
*pkick_time = 0;
pv_wait(ptr, val);
if (*pkick_time) {
- this_cpu_add(qstats[qstat_pv_latency_wake],
+ this_cpu_add(EVENT_COUNT(pv_latency_wake),
sched_clock() - *pkick_time);
- qstat_inc(qstat_pv_kick_wake, true);
+ lockevent_inc(pv_kick_wake);
}
}
#define pv_kick(c) __pv_kick(c)
#define pv_wait(p, v) __pv_wait(p, v)
-#else /* CONFIG_QUEUED_LOCK_STAT */
+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
+
+#else /* CONFIG_LOCK_EVENT_COUNTS */
-static inline void qstat_inc(enum qlock_stats stat, bool cond) { }
-static inline void qstat_hop(int hopcnt) { }
+static inline void lockevent_pv_hop(int hopcnt) { }
-#endif /* CONFIG_QUEUED_LOCK_STAT */
+#endif /* CONFIG_LOCK_EVENT_COUNTS */
diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c
deleted file mode 100644
index a7ffb2a96ede..000000000000
--- a/kernel/locking/rwsem-spinlock.c
+++ /dev/null
@@ -1,339 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* rwsem-spinlock.c: R/W semaphores: contention handling functions for
- * generic spinlock implementation
- *
- * Copyright (c) 2001 David Howells (dhowells@redhat.com).
- * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
- * - Derived also from comments by Linus
- */
-#include <linux/rwsem.h>
-#include <linux/sched/signal.h>
-#include <linux/sched/debug.h>
-#include <linux/export.h>
-
-enum rwsem_waiter_type {
- RWSEM_WAITING_FOR_WRITE,
- RWSEM_WAITING_FOR_READ
-};
-
-struct rwsem_waiter {
- struct list_head list;
- struct task_struct *task;
- enum rwsem_waiter_type type;
-};
-
-int rwsem_is_locked(struct rw_semaphore *sem)
-{
- int ret = 1;
- unsigned long flags;
-
- if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
- ret = (sem->count != 0);
- raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
- }
- return ret;
-}
-EXPORT_SYMBOL(rwsem_is_locked);
-
-/*
- * initialise the semaphore
- */
-void __init_rwsem(struct rw_semaphore *sem, const char *name,
- struct lock_class_key *key)
-{
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- /*
- * Make sure we are not reinitializing a held semaphore:
- */
- debug_check_no_locks_freed((void *)sem, sizeof(*sem));
- lockdep_init_map(&sem->dep_map, name, key, 0);
-#endif
- sem->count = 0;
- raw_spin_lock_init(&sem->wait_lock);
- INIT_LIST_HEAD(&sem->wait_list);
-}
-EXPORT_SYMBOL(__init_rwsem);
-
-/*
- * handle the lock release when processes blocked on it that can now run
- * - if we come here, then:
- * - the 'active count' _reached_ zero
- * - the 'waiting count' is non-zero
- * - the spinlock must be held by the caller
- * - woken process blocks are discarded from the list after having task zeroed
- * - writers are only woken if wakewrite is non-zero
- */
-static inline struct rw_semaphore *
-__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
-{
- struct rwsem_waiter *waiter;
- struct task_struct *tsk;
- int woken;
-
- waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
-
- if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
- if (wakewrite)
- /* Wake up a writer. Note that we do not grant it the
- * lock - it will have to acquire it when it runs. */
- wake_up_process(waiter->task);
- goto out;
- }
-
- /* grant an infinite number of read locks to the front of the queue */
- woken = 0;
- do {
- struct list_head *next = waiter->list.next;
-
- list_del(&waiter->list);
- tsk = waiter->task;
- /*
- * Make sure we do not wakeup the next reader before
- * setting the nil condition to grant the next reader;
- * otherwise we could miss the wakeup on the other
- * side and end up sleeping again. See the pairing
- * in rwsem_down_read_failed().
- */
- smp_mb();
- waiter->task = NULL;
- wake_up_process(tsk);
- put_task_struct(tsk);
- woken++;
- if (next == &sem->wait_list)
- break;
- waiter = list_entry(next, struct rwsem_waiter, list);
- } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
-
- sem->count += woken;
-
- out:
- return sem;
-}
-
-/*
- * wake a single writer
- */
-static inline struct rw_semaphore *
-__rwsem_wake_one_writer(struct rw_semaphore *sem)
-{
- struct rwsem_waiter *waiter;
-
- waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
- wake_up_process(waiter->task);
-
- return sem;
-}
-
-/*
- * get a read lock on the semaphore
- */
-int __sched __down_read_common(struct rw_semaphore *sem, int state)
-{
- struct rwsem_waiter waiter;
- unsigned long flags;
-
- raw_spin_lock_irqsave(&sem->wait_lock, flags);
-
- if (sem->count >= 0 && list_empty(&sem->wait_list)) {
- /* granted */
- sem->count++;
- raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
- goto out;
- }
-
- /* set up my own style of waitqueue */
- waiter.task = current;
- waiter.type = RWSEM_WAITING_FOR_READ;
- get_task_struct(current);
-
- list_add_tail(&waiter.list, &sem->wait_list);
-
- /* wait to be given the lock */
- for (;;) {
- if (!waiter.task)
- break;
- if (signal_pending_state(state, current))
- goto out_nolock;
- set_current_state(state);
- raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
- schedule();
- raw_spin_lock_irqsave(&sem->wait_lock, flags);
- }
-
- raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
- out:
- return 0;
-
-out_nolock:
- /*
- * We didn't take the lock, so that there is a writer, which
- * is owner or the first waiter of the sem. If it's a waiter,
- * it will be woken by current owner. Not need to wake anybody.
- */
- list_del(&waiter.list);
- raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
- return -EINTR;
-}
-
-void __sched __down_read(struct rw_semaphore *sem)
-{
- __down_read_common(sem, TASK_UNINTERRUPTIBLE);
-}
-
-int __sched __down_read_killable(struct rw_semaphore *sem)
-{
- return __down_read_common(sem, TASK_KILLABLE);
-}
-
-/*
- * trylock for reading -- returns 1 if successful, 0 if contention
- */
-int __down_read_trylock(struct rw_semaphore *sem)
-{
- unsigned long flags;
- int ret = 0;
-
-
- raw_spin_lock_irqsave(&sem->wait_lock, flags);
-
- if (sem->count >= 0 && list_empty(&sem->wait_list)) {
- /* granted */
- sem->count++;
- ret = 1;
- }
-
- raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
-
- return ret;
-}
-
-/*
- * get a write lock on the semaphore
- */
-int __sched __down_write_common(struct rw_semaphore *sem, int state)
-{
- struct rwsem_waiter waiter;
- unsigned long flags;
- int ret = 0;
-
- raw_spin_lock_irqsave(&sem->wait_lock, flags);
-
- /* set up my own style of waitqueue */
- waiter.task = current;
- waiter.type = RWSEM_WAITING_FOR_WRITE;
- list_add_tail(&waiter.list, &sem->wait_list);
-
- /* wait for someone to release the lock */
- for (;;) {
- /*
- * That is the key to support write lock stealing: allows the
- * task already on CPU to get the lock soon rather than put
- * itself into sleep and waiting for system woke it or someone
- * else in the head of the wait list up.
- */
- if (sem->count == 0)
- break;
- if (signal_pending_state(state, current))
- goto out_nolock;
-
- set_current_state(state);
- raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
- schedule();
- raw_spin_lock_irqsave(&sem->wait_lock, flags);
- }
- /* got the lock */
- sem->count = -1;
- list_del(&waiter.list);
-
- raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
-
- return ret;
-
-out_nolock:
- list_del(&waiter.list);
- if (!list_empty(&sem->wait_list) && sem->count >= 0)
- __rwsem_do_wake(sem, 0);
- raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
-
- return -EINTR;
-}
-
-void __sched __down_write(struct rw_semaphore *sem)
-{
- __down_write_common(sem, TASK_UNINTERRUPTIBLE);
-}
-
-int __sched __down_write_killable(struct rw_semaphore *sem)
-{
- return __down_write_common(sem, TASK_KILLABLE);
-}
-
-/*
- * trylock for writing -- returns 1 if successful, 0 if contention
- */
-int __down_write_trylock(struct rw_semaphore *sem)
-{
- unsigned long flags;
- int ret = 0;
-
- raw_spin_lock_irqsave(&sem->wait_lock, flags);
-
- if (sem->count == 0) {
- /* got the lock */
- sem->count = -1;
- ret = 1;
- }
-
- raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
-
- return ret;
-}
-
-/*
- * release a read lock on the semaphore
- */
-void __up_read(struct rw_semaphore *sem)
-{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&sem->wait_lock, flags);
-
- if (--sem->count == 0 && !list_empty(&sem->wait_list))
- sem = __rwsem_wake_one_writer(sem);
-
- raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
-}
-
-/*
- * release a write lock on the semaphore
- */
-void __up_write(struct rw_semaphore *sem)
-{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&sem->wait_lock, flags);
-
- sem->count = 0;
- if (!list_empty(&sem->wait_list))
- sem = __rwsem_do_wake(sem, 1);
-
- raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
-}
-
-/*
- * downgrade a write lock into a read lock
- * - just wake up any readers at the front of the queue
- */
-void __downgrade_write(struct rw_semaphore *sem)
-{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&sem->wait_lock, flags);
-
- sem->count = 1;
- if (!list_empty(&sem->wait_list))
- sem = __rwsem_do_wake(sem, 0);
-
- raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
-}
-
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index fbe96341beee..6b3ee9948bf1 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -147,6 +147,7 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
* will notice the queued writer.
*/
wake_q_add(wake_q, waiter->task);
+ lockevent_inc(rwsem_wake_writer);
}
return;
@@ -176,9 +177,8 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
goto try_reader_grant;
}
/*
- * It is not really necessary to set it to reader-owned here,
- * but it gives the spinners an early indication that the
- * readers now have the lock.
+ * Set it to reader-owned to give spinners an early
+ * indication that readers now have the lock.
*/
__rwsem_set_reader_owned(sem, waiter->task);
}
@@ -215,6 +215,7 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
}
adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
+ lockevent_cond_inc(rwsem_wake_reader, woken);
if (list_empty(&sem->wait_list)) {
/* hit end of list above */
adjustment -= RWSEM_WAITING_BIAS;
@@ -225,92 +226,6 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
}
/*
- * Wait for the read lock to be granted
- */
-static inline struct rw_semaphore __sched *
-__rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
-{
- long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
- struct rwsem_waiter waiter;
- DEFINE_WAKE_Q(wake_q);
-
- waiter.task = current;
- waiter.type = RWSEM_WAITING_FOR_READ;
-
- raw_spin_lock_irq(&sem->wait_lock);
- if (list_empty(&sem->wait_list)) {
- /*
- * In case the wait queue is empty and the lock isn't owned
- * by a writer, this reader can exit the slowpath and return
- * immediately as its RWSEM_ACTIVE_READ_BIAS has already
- * been set in the count.
- */
- if (atomic_long_read(&sem->count) >= 0) {
- raw_spin_unlock_irq(&sem->wait_lock);
- return sem;
- }
- adjustment += RWSEM_WAITING_BIAS;
- }
- list_add_tail(&waiter.list, &sem->wait_list);
-
- /* we're now waiting on the lock, but no longer actively locking */
- count = atomic_long_add_return(adjustment, &sem->count);
-
- /*
- * If there are no active locks, wake the front queued process(es).
- *
- * If there are no writers and we are first in the queue,
- * wake our own waiter to join the existing active readers !
- */
- if (count == RWSEM_WAITING_BIAS ||
- (count > RWSEM_WAITING_BIAS &&
- adjustment != -RWSEM_ACTIVE_READ_BIAS))
- __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
-
- raw_spin_unlock_irq(&sem->wait_lock);
- wake_up_q(&wake_q);
-
- /* wait to be given the lock */
- while (true) {
- set_current_state(state);
- if (!waiter.task)
- break;
- if (signal_pending_state(state, current)) {
- raw_spin_lock_irq(&sem->wait_lock);
- if (waiter.task)
- goto out_nolock;
- raw_spin_unlock_irq(&sem->wait_lock);
- break;
- }
- schedule();
- }
-
- __set_current_state(TASK_RUNNING);
- return sem;
-out_nolock:
- list_del(&waiter.list);
- if (list_empty(&sem->wait_list))
- atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
- raw_spin_unlock_irq(&sem->wait_lock);
- __set_current_state(TASK_RUNNING);
- return ERR_PTR(-EINTR);
-}
-
-__visible struct rw_semaphore * __sched
-rwsem_down_read_failed(struct rw_semaphore *sem)
-{
- return __rwsem_down_read_failed_common(sem, TASK_UNINTERRUPTIBLE);
-}
-EXPORT_SYMBOL(rwsem_down_read_failed);
-
-__visible struct rw_semaphore * __sched
-rwsem_down_read_failed_killable(struct rw_semaphore *sem)
-{
- return __rwsem_down_read_failed_common(sem, TASK_KILLABLE);
-}
-EXPORT_SYMBOL(rwsem_down_read_failed_killable);
-
-/*
* This function must be called with the sem->wait_lock held to prevent
* race conditions between checking the rwsem wait list and setting the
* sem->count accordingly.
@@ -346,21 +261,17 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
*/
static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
{
- long old, count = atomic_long_read(&sem->count);
-
- while (true) {
- if (!(count == 0 || count == RWSEM_WAITING_BIAS))
- return false;
+ long count = atomic_long_read(&sem->count);
- old = atomic_long_cmpxchg_acquire(&sem->count, count,
- count + RWSEM_ACTIVE_WRITE_BIAS);
- if (old == count) {
+ while (!count || count == RWSEM_WAITING_BIAS) {
+ if (atomic_long_try_cmpxchg_acquire(&sem->count, &count,
+ count + RWSEM_ACTIVE_WRITE_BIAS)) {
rwsem_set_owner(sem);
+ lockevent_inc(rwsem_opt_wlock);
return true;
}
-
- count = old;
}
+ return false;
}
static inline bool owner_on_cpu(struct task_struct *owner)
@@ -481,6 +392,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
osq_unlock(&sem->osq);
done:
preempt_enable();
+ lockevent_cond_inc(rwsem_opt_fail, !taken);
return taken;
}
@@ -505,6 +417,97 @@ static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
#endif
/*
+ * Wait for the read lock to be granted
+ */
+static inline struct rw_semaphore __sched *
+__rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
+{
+ long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
+ struct rwsem_waiter waiter;
+ DEFINE_WAKE_Q(wake_q);
+
+ waiter.task = current;
+ waiter.type = RWSEM_WAITING_FOR_READ;
+
+ raw_spin_lock_irq(&sem->wait_lock);
+ if (list_empty(&sem->wait_list)) {
+ /*
+ * In case the wait queue is empty and the lock isn't owned
+ * by a writer, this reader can exit the slowpath and return
+ * immediately as its RWSEM_ACTIVE_READ_BIAS has already
+ * been set in the count.
+ */
+ if (atomic_long_read(&sem->count) >= 0) {
+ raw_spin_unlock_irq(&sem->wait_lock);
+ rwsem_set_reader_owned(sem);
+ lockevent_inc(rwsem_rlock_fast);
+ return sem;
+ }
+ adjustment += RWSEM_WAITING_BIAS;
+ }
+ list_add_tail(&waiter.list, &sem->wait_list);
+
+ /* we're now waiting on the lock, but no longer actively locking */
+ count = atomic_long_add_return(adjustment, &sem->count);
+
+ /*
+ * If there are no active locks, wake the front queued process(es).
+ *
+ * If there are no writers and we are first in the queue,
+ * wake our own waiter to join the existing active readers !
+ */
+ if (count == RWSEM_WAITING_BIAS ||
+ (count > RWSEM_WAITING_BIAS &&
+ adjustment != -RWSEM_ACTIVE_READ_BIAS))
+ __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
+
+ raw_spin_unlock_irq(&sem->wait_lock);
+ wake_up_q(&wake_q);
+
+ /* wait to be given the lock */
+ while (true) {
+ set_current_state(state);
+ if (!waiter.task)
+ break;
+ if (signal_pending_state(state, current)) {
+ raw_spin_lock_irq(&sem->wait_lock);
+ if (waiter.task)
+ goto out_nolock;
+ raw_spin_unlock_irq(&sem->wait_lock);
+ break;
+ }
+ schedule();
+ lockevent_inc(rwsem_sleep_reader);
+ }
+
+ __set_current_state(TASK_RUNNING);
+ lockevent_inc(rwsem_rlock);
+ return sem;
+out_nolock:
+ list_del(&waiter.list);
+ if (list_empty(&sem->wait_list))
+ atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
+ raw_spin_unlock_irq(&sem->wait_lock);
+ __set_current_state(TASK_RUNNING);
+ lockevent_inc(rwsem_rlock_fail);
+ return ERR_PTR(-EINTR);
+}
+
+__visible struct rw_semaphore * __sched
+rwsem_down_read_failed(struct rw_semaphore *sem)
+{
+ return __rwsem_down_read_failed_common(sem, TASK_UNINTERRUPTIBLE);
+}
+EXPORT_SYMBOL(rwsem_down_read_failed);
+
+__visible struct rw_semaphore * __sched
+rwsem_down_read_failed_killable(struct rw_semaphore *sem)
+{
+ return __rwsem_down_read_failed_common(sem, TASK_KILLABLE);
+}
+EXPORT_SYMBOL(rwsem_down_read_failed_killable);
+
+/*
* Wait until we successfully acquire the write lock
*/
static inline struct rw_semaphore *
@@ -580,6 +583,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
goto out_nolock;
schedule();
+ lockevent_inc(rwsem_sleep_writer);
set_current_state(state);
} while ((count = atomic_long_read(&sem->count)) & RWSEM_ACTIVE_MASK);
@@ -588,6 +592,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
__set_current_state(TASK_RUNNING);
list_del(&waiter.list);
raw_spin_unlock_irq(&sem->wait_lock);
+ lockevent_inc(rwsem_wlock);
return ret;
@@ -601,6 +606,7 @@ out_nolock:
__rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
raw_spin_unlock_irq(&sem->wait_lock);
wake_up_q(&wake_q);
+ lockevent_inc(rwsem_wlock_fail);
return ERR_PTR(-EINTR);
}
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index e586f0d03ad3..ccbf18f560ff 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -24,7 +24,6 @@ void __sched down_read(struct rw_semaphore *sem)
rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
- rwsem_set_reader_owned(sem);
}
EXPORT_SYMBOL(down_read);
@@ -39,7 +38,6 @@ int __sched down_read_killable(struct rw_semaphore *sem)
return -EINTR;
}
- rwsem_set_reader_owned(sem);
return 0;
}
@@ -52,10 +50,8 @@ int down_read_trylock(struct rw_semaphore *sem)
{
int ret = __down_read_trylock(sem);
- if (ret == 1) {
+ if (ret == 1)
rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
- rwsem_set_reader_owned(sem);
- }
return ret;
}
@@ -70,7 +66,6 @@ void __sched down_write(struct rw_semaphore *sem)
rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
- rwsem_set_owner(sem);
}
EXPORT_SYMBOL(down_write);
@@ -88,7 +83,6 @@ int __sched down_write_killable(struct rw_semaphore *sem)
return -EINTR;
}
- rwsem_set_owner(sem);
return 0;
}
@@ -101,10 +95,8 @@ int down_write_trylock(struct rw_semaphore *sem)
{
int ret = __down_write_trylock(sem);
- if (ret == 1) {
+ if (ret == 1)
rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
- rwsem_set_owner(sem);
- }
return ret;
}
@@ -117,9 +109,7 @@ EXPORT_SYMBOL(down_write_trylock);
void up_read(struct rw_semaphore *sem)
{
rwsem_release(&sem->dep_map, 1, _RET_IP_);
- DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED));
- rwsem_clear_reader_owned(sem);
__up_read(sem);
}
@@ -131,9 +121,7 @@ EXPORT_SYMBOL(up_read);
void up_write(struct rw_semaphore *sem)
{
rwsem_release(&sem->dep_map, 1, _RET_IP_);
- DEBUG_RWSEMS_WARN_ON(sem->owner != current);
- rwsem_clear_owner(sem);
__up_write(sem);
}
@@ -145,9 +133,7 @@ EXPORT_SYMBOL(up_write);
void downgrade_write(struct rw_semaphore *sem)
{
lock_downgrade(&sem->dep_map, _RET_IP_);
- DEBUG_RWSEMS_WARN_ON(sem->owner != current);
- rwsem_set_reader_owned(sem);
__downgrade_write(sem);
}
@@ -161,7 +147,6 @@ void down_read_nested(struct rw_semaphore *sem, int subclass)
rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
- rwsem_set_reader_owned(sem);
}
EXPORT_SYMBOL(down_read_nested);
@@ -172,7 +157,6 @@ void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
- rwsem_set_owner(sem);
}
EXPORT_SYMBOL(_down_write_nest_lock);
@@ -193,7 +177,6 @@ void down_write_nested(struct rw_semaphore *sem, int subclass)
rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
- rwsem_set_owner(sem);
}
EXPORT_SYMBOL(down_write_nested);
@@ -208,7 +191,6 @@ int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
return -EINTR;
}
- rwsem_set_owner(sem);
return 0;
}
@@ -216,7 +198,8 @@ EXPORT_SYMBOL(down_write_killable_nested);
void up_read_non_owner(struct rw_semaphore *sem)
{
- DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED));
+ DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED),
+ sem);
__up_read(sem);
}
diff --git a/kernel/locking/rwsem.h b/kernel/locking/rwsem.h
index bad2bca0268b..64877f5294e3 100644
--- a/kernel/locking/rwsem.h
+++ b/kernel/locking/rwsem.h
@@ -23,15 +23,44 @@
* is involved. Ideally we would like to track all the readers that own
* a rwsem, but the overhead is simply too big.
*/
+#include "lock_events.h"
+
#define RWSEM_READER_OWNED (1UL << 0)
#define RWSEM_ANONYMOUSLY_OWNED (1UL << 1)
#ifdef CONFIG_DEBUG_RWSEMS
-# define DEBUG_RWSEMS_WARN_ON(c) DEBUG_LOCKS_WARN_ON(c)
+# define DEBUG_RWSEMS_WARN_ON(c, sem) do { \
+ if (!debug_locks_silent && \
+ WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
+ #c, atomic_long_read(&(sem)->count), \
+ (long)((sem)->owner), (long)current, \
+ list_empty(&(sem)->wait_list) ? "" : "not ")) \
+ debug_locks_off(); \
+ } while (0)
+#else
+# define DEBUG_RWSEMS_WARN_ON(c, sem)
+#endif
+
+/*
+ * R/W semaphores originally for PPC using the stuff in lib/rwsem.c.
+ * Adapted largely from include/asm-i386/rwsem.h
+ * by Paul Mackerras <paulus@samba.org>.
+ */
+
+/*
+ * the semaphore definition
+ */
+#ifdef CONFIG_64BIT
+# define RWSEM_ACTIVE_MASK 0xffffffffL
#else
-# define DEBUG_RWSEMS_WARN_ON(c)
+# define RWSEM_ACTIVE_MASK 0x0000ffffL
#endif
+#define RWSEM_ACTIVE_BIAS 0x00000001L
+#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
+#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
+#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
/*
* All writes to owner are protected by WRITE_ONCE() to make sure that
@@ -132,3 +161,144 @@ static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
{
}
#endif
+
+extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_write_failed_killable(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
+
+/*
+ * lock for reading
+ */
+static inline void __down_read(struct rw_semaphore *sem)
+{
+ if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) {
+ rwsem_down_read_failed(sem);
+ DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner &
+ RWSEM_READER_OWNED), sem);
+ } else {
+ rwsem_set_reader_owned(sem);
+ }
+}
+
+static inline int __down_read_killable(struct rw_semaphore *sem)
+{
+ if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) {
+ if (IS_ERR(rwsem_down_read_failed_killable(sem)))
+ return -EINTR;
+ DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner &
+ RWSEM_READER_OWNED), sem);
+ } else {
+ rwsem_set_reader_owned(sem);
+ }
+ return 0;
+}
+
+static inline int __down_read_trylock(struct rw_semaphore *sem)
+{
+ /*
+ * Optimize for the case when the rwsem is not locked at all.
+ */
+ long tmp = RWSEM_UNLOCKED_VALUE;
+
+ lockevent_inc(rwsem_rtrylock);
+ do {
+ if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
+ tmp + RWSEM_ACTIVE_READ_BIAS)) {
+ rwsem_set_reader_owned(sem);
+ return 1;
+ }
+ } while (tmp >= 0);
+ return 0;
+}
+
+/*
+ * lock for writing
+ */
+static inline void __down_write(struct rw_semaphore *sem)
+{
+ long tmp;
+
+ tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
+ &sem->count);
+ if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
+ rwsem_down_write_failed(sem);
+ rwsem_set_owner(sem);
+}
+
+static inline int __down_write_killable(struct rw_semaphore *sem)
+{
+ long tmp;
+
+ tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
+ &sem->count);
+ if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
+ if (IS_ERR(rwsem_down_write_failed_killable(sem)))
+ return -EINTR;
+ rwsem_set_owner(sem);
+ return 0;
+}
+
+static inline int __down_write_trylock(struct rw_semaphore *sem)
+{
+ long tmp;
+
+ lockevent_inc(rwsem_wtrylock);
+ tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
+ RWSEM_ACTIVE_WRITE_BIAS);
+ if (tmp == RWSEM_UNLOCKED_VALUE) {
+ rwsem_set_owner(sem);
+ return true;
+ }
+ return false;
+}
+
+/*
+ * unlock after reading
+ */
+static inline void __up_read(struct rw_semaphore *sem)
+{
+ long tmp;
+
+ DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED),
+ sem);
+ rwsem_clear_reader_owned(sem);
+ tmp = atomic_long_dec_return_release(&sem->count);
+ if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
+ rwsem_wake(sem);
+}
+
+/*
+ * unlock after writing
+ */
+static inline void __up_write(struct rw_semaphore *sem)
+{
+ DEBUG_RWSEMS_WARN_ON(sem->owner != current, sem);
+ rwsem_clear_owner(sem);
+ if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS,
+ &sem->count) < 0))
+ rwsem_wake(sem);
+}
+
+/*
+ * downgrade write lock to read lock
+ */
+static inline void __downgrade_write(struct rw_semaphore *sem)
+{
+ long tmp;
+
+ /*
+ * When downgrading from exclusive to shared ownership,
+ * anything inside the write-locked region cannot leak
+ * into the read side. In contrast, anything in the
+ * read-locked region is ok to be re-ordered into the
+ * write side. As such, rely on RELEASE semantics.
+ */
+ DEBUG_RWSEMS_WARN_ON(sem->owner != current, sem);
+ tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, &sem->count);
+ rwsem_set_reader_owned(sem);
+ if (tmp < 0)
+ rwsem_downgrade_wake(sem);
+}
diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
index 936f3d14dd6b..0ff08380f531 100644
--- a/kernel/locking/spinlock.c
+++ b/kernel/locking/spinlock.c
@@ -22,6 +22,13 @@
#include <linux/debug_locks.h>
#include <linux/export.h>
+#ifdef CONFIG_MMIOWB
+#ifndef arch_mmiowb_state
+DEFINE_PER_CPU(struct mmiowb_state, __mmiowb_state);
+EXPORT_PER_CPU_SYMBOL(__mmiowb_state);
+#endif
+#endif
+
/*
* If lockdep is enabled then we use the non-preemption spin-ops
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c
index 9aa0fccd5d43..399669f7eba8 100644
--- a/kernel/locking/spinlock_debug.c
+++ b/kernel/locking/spinlock_debug.c
@@ -111,6 +111,7 @@ void do_raw_spin_lock(raw_spinlock_t *lock)
{
debug_spin_lock_before(lock);
arch_spin_lock(&lock->raw_lock);
+ mmiowb_spin_lock();
debug_spin_lock_after(lock);
}
@@ -118,8 +119,10 @@ int do_raw_spin_trylock(raw_spinlock_t *lock)
{
int ret = arch_spin_trylock(&lock->raw_lock);
- if (ret)
+ if (ret) {
+ mmiowb_spin_lock();
debug_spin_lock_after(lock);
+ }
#ifndef CONFIG_SMP
/*
* Must not happen on UP:
@@ -131,6 +134,7 @@ int do_raw_spin_trylock(raw_spinlock_t *lock)
void do_raw_spin_unlock(raw_spinlock_t *lock)
{
+ mmiowb_spin_unlock();
debug_spin_unlock(lock);
arch_spin_unlock(&lock->raw_lock);
}
diff --git a/kernel/module.c b/kernel/module.c
index 0b9aa8ab89f0..a9020bdd4cf6 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -98,6 +98,10 @@ DEFINE_MUTEX(module_mutex);
EXPORT_SYMBOL_GPL(module_mutex);
static LIST_HEAD(modules);
+/* Work queue for freeing init sections in success case */
+static struct work_struct init_free_wq;
+static struct llist_head init_free_list;
+
#ifdef CONFIG_MODULES_TREE_LOOKUP
/*
@@ -1949,9 +1953,16 @@ void module_enable_ro(const struct module *mod, bool after_init)
if (!rodata_enabled)
return;
+ set_vm_flush_reset_perms(mod->core_layout.base);
+ set_vm_flush_reset_perms(mod->init_layout.base);
frob_text(&mod->core_layout, set_memory_ro);
+ frob_text(&mod->core_layout, set_memory_x);
+
frob_rodata(&mod->core_layout, set_memory_ro);
+
frob_text(&mod->init_layout, set_memory_ro);
+ frob_text(&mod->init_layout, set_memory_x);
+
frob_rodata(&mod->init_layout, set_memory_ro);
if (after_init)
@@ -1967,15 +1978,6 @@ static void module_enable_nx(const struct module *mod)
frob_writable_data(&mod->init_layout, set_memory_nx);
}
-static void module_disable_nx(const struct module *mod)
-{
- frob_rodata(&mod->core_layout, set_memory_x);
- frob_ro_after_init(&mod->core_layout, set_memory_x);
- frob_writable_data(&mod->core_layout, set_memory_x);
- frob_rodata(&mod->init_layout, set_memory_x);
- frob_writable_data(&mod->init_layout, set_memory_x);
-}
-
/* Iterate through all modules and set each module's text as RW */
void set_all_modules_text_rw(void)
{
@@ -2019,23 +2021,8 @@ void set_all_modules_text_ro(void)
}
mutex_unlock(&module_mutex);
}
-
-static void disable_ro_nx(const struct module_layout *layout)
-{
- if (rodata_enabled) {
- frob_text(layout, set_memory_rw);
- frob_rodata(layout, set_memory_rw);
- frob_ro_after_init(layout, set_memory_rw);
- }
- frob_rodata(layout, set_memory_x);
- frob_ro_after_init(layout, set_memory_x);
- frob_writable_data(layout, set_memory_x);
-}
-
#else
-static void disable_ro_nx(const struct module_layout *layout) { }
static void module_enable_nx(const struct module *mod) { }
-static void module_disable_nx(const struct module *mod) { }
#endif
#ifdef CONFIG_LIVEPATCH
@@ -2115,6 +2102,11 @@ static void free_module_elf(struct module *mod)
void __weak module_memfree(void *module_region)
{
+ /*
+ * This memory may be RO, and freeing RO memory in an interrupt is not
+ * supported by vmalloc.
+ */
+ WARN_ON(in_interrupt());
vfree(module_region);
}
@@ -2166,7 +2158,6 @@ static void free_module(struct module *mod)
mutex_unlock(&module_mutex);
/* This may be empty, but that's OK */
- disable_ro_nx(&mod->init_layout);
module_arch_freeing_init(mod);
module_memfree(mod->init_layout.base);
kfree(mod->args);
@@ -2176,7 +2167,6 @@ static void free_module(struct module *mod)
lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size);
/* Finally, free the core (containing the module structure) */
- disable_ro_nx(&mod->core_layout);
module_memfree(mod->core_layout.base);
}
@@ -3415,17 +3405,34 @@ static void do_mod_ctors(struct module *mod)
/* For freeing module_init on success, in case kallsyms traversing */
struct mod_initfree {
- struct rcu_head rcu;
+ struct llist_node node;
void *module_init;
};
-static void do_free_init(struct rcu_head *head)
+static void do_free_init(struct work_struct *w)
{
- struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
- module_memfree(m->module_init);
- kfree(m);
+ struct llist_node *pos, *n, *list;
+ struct mod_initfree *initfree;
+
+ list = llist_del_all(&init_free_list);
+
+ synchronize_rcu();
+
+ llist_for_each_safe(pos, n, list) {
+ initfree = container_of(pos, struct mod_initfree, node);
+ module_memfree(initfree->module_init);
+ kfree(initfree);
+ }
}
+static int __init modules_wq_init(void)
+{
+ INIT_WORK(&init_free_wq, do_free_init);
+ init_llist_head(&init_free_list);
+ return 0;
+}
+module_init(modules_wq_init);
+
/*
* This is where the real work happens.
*
@@ -3502,7 +3509,6 @@ static noinline int do_init_module(struct module *mod)
#endif
module_enable_ro(mod, true);
mod_tree_remove_init(mod);
- disable_ro_nx(&mod->init_layout);
module_arch_freeing_init(mod);
mod->init_layout.base = NULL;
mod->init_layout.size = 0;
@@ -3513,14 +3519,18 @@ static noinline int do_init_module(struct module *mod)
* We want to free module_init, but be aware that kallsyms may be
* walking this with preempt disabled. In all the failure paths, we
* call synchronize_rcu(), but we don't want to slow down the success
- * path, so use actual RCU here.
+ * path. module_memfree() cannot be called in an interrupt, so do the
+ * work and call synchronize_rcu() in a work queue.
+ *
* Note that module_alloc() on most architectures creates W+X page
* mappings which won't be cleaned up until do_free_init() runs. Any
* code such as mark_rodata_ro() which depends on those mappings to
* be cleaned up needs to sync with the queued work - ie
* rcu_barrier()
*/
- call_rcu(&freeinit->rcu, do_free_init);
+ if (llist_add(&freeinit->node, &init_free_list))
+ schedule_work(&init_free_wq);
+
mutex_unlock(&module_mutex);
wake_up_all(&module_wq);
@@ -3817,10 +3827,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
module_bug_cleanup(mod);
mutex_unlock(&module_mutex);
- /* we can't deallocate the module until we clear memory protection */
- module_disable_ro(mod);
- module_disable_nx(mod);
-
ddebug_cleanup:
ftrace_release_mod(mod);
dynamic_debug_remove(mod, info->debug);
diff --git a/kernel/panic.c b/kernel/panic.c
index 0ae0d7332f12..c1fcaad337b7 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -318,12 +318,7 @@ void panic(const char *fmt, ...)
}
#endif
#if defined(CONFIG_S390)
- {
- unsigned long caller;
-
- caller = (unsigned long)__builtin_return_address(0);
- disabled_wait(caller);
- }
+ disabled_wait();
#endif
pr_emerg("---[ end Kernel panic - not syncing: %s ]---\n", buf);
local_irq_enable();
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index f8fe57d1022e..9bbaaab14b36 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -114,6 +114,15 @@ config PM_SLEEP_SMP
depends on PM_SLEEP
select HOTPLUG_CPU
+config PM_SLEEP_SMP_NONZERO_CPU
+ def_bool y
+ depends on PM_SLEEP_SMP
+ depends on ARCH_SUSPEND_NONZERO_CPU
+ ---help---
+ If an arch can suspend (for suspend, hibernate, kexec, etc) on a
+ non-zero numbered CPU, it may define ARCH_SUSPEND_NONZERO_CPU. This
+ will allow nohz_full mask to include CPU0.
+
config PM_AUTOSLEEP
bool "Opportunistic sleep"
depends on PM_SLEEP
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index abef759de7c8..c8c272df7154 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -14,7 +14,6 @@
#include <linux/export.h>
#include <linux/suspend.h>
-#include <linux/syscalls.h>
#include <linux/reboot.h>
#include <linux/string.h>
#include <linux/device.h>
@@ -281,7 +280,7 @@ static int create_image(int platform_mode)
if (error || hibernation_test(TEST_PLATFORM))
goto Platform_finish;
- error = disable_nonboot_cpus();
+ error = suspend_disable_secondary_cpus();
if (error || hibernation_test(TEST_CPUS))
goto Enable_cpus;
@@ -323,7 +322,7 @@ static int create_image(int platform_mode)
local_irq_enable();
Enable_cpus:
- enable_nonboot_cpus();
+ suspend_enable_secondary_cpus();
Platform_finish:
platform_finish(platform_mode);
@@ -417,7 +416,7 @@ int hibernation_snapshot(int platform_mode)
int __weak hibernate_resume_nonboot_cpu_disable(void)
{
- return disable_nonboot_cpus();
+ return suspend_disable_secondary_cpus();
}
/**
@@ -486,7 +485,7 @@ static int resume_target_kernel(bool platform_mode)
local_irq_enable();
Enable_cpus:
- enable_nonboot_cpus();
+ suspend_enable_secondary_cpus();
Cleanup:
platform_restore_cleanup(platform_mode);
@@ -564,7 +563,7 @@ int hibernation_platform_enter(void)
if (error)
goto Platform_finish;
- error = disable_nonboot_cpus();
+ error = suspend_disable_secondary_cpus();
if (error)
goto Enable_cpus;
@@ -586,7 +585,7 @@ int hibernation_platform_enter(void)
local_irq_enable();
Enable_cpus:
- enable_nonboot_cpus();
+ suspend_enable_secondary_cpus();
Platform_finish:
hibernation_ops->finish();
@@ -709,9 +708,7 @@ int hibernate(void)
goto Exit;
}
- pr_info("Syncing filesystems ... \n");
- ksys_sync();
- pr_info("done.\n");
+ ksys_sync_helper();
error = freeze_processes();
if (error)
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 98e76cad128b..4f43e724f6eb 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -16,6 +16,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/suspend.h>
+#include <linux/syscalls.h>
#include "power.h"
@@ -51,6 +52,19 @@ void unlock_system_sleep(void)
}
EXPORT_SYMBOL_GPL(unlock_system_sleep);
+void ksys_sync_helper(void)
+{
+ ktime_t start;
+ long elapsed_msecs;
+
+ start = ktime_get();
+ ksys_sync();
+ elapsed_msecs = ktime_to_ms(ktime_sub(ktime_get(), start));
+ pr_info("Filesystems sync: %ld.%03ld seconds\n",
+ elapsed_msecs / MSEC_PER_SEC, elapsed_msecs % MSEC_PER_SEC);
+}
+EXPORT_SYMBOL_GPL(ksys_sync_helper);
+
/* Routines for PM-transition notifications */
static BLOCKING_NOTIFIER_HEAD(pm_chain_head);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 4802b039b89f..bc9558ab1e5b 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -965,6 +965,9 @@ void __init __register_nosave_region(unsigned long start_pfn,
/* This allocation cannot fail */
region = memblock_alloc(sizeof(struct nosave_region),
SMP_CACHE_BYTES);
+ if (!region)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(struct nosave_region));
}
region->start_pfn = start_pfn;
region->end_pfn = end_pfn;
@@ -1339,8 +1342,9 @@ static inline void do_copy_page(long *dst, long *src)
* safe_copy_page - Copy a page in a safe way.
*
* Check if the page we are going to copy is marked as present in the kernel
- * page tables (this always is the case if CONFIG_DEBUG_PAGEALLOC is not set
- * and in that case kernel_page_present() always returns 'true').
+ * page tables. This always is the case if CONFIG_DEBUG_PAGEALLOC or
+ * CONFIG_ARCH_HAS_SET_DIRECT_MAP is not set. In that case kernel_page_present()
+ * always returns 'true'.
*/
static void safe_copy_page(void *dst, struct page *s_page)
{
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 0bd595a0b610..ef908c134b34 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -17,7 +17,6 @@
#include <linux/console.h>
#include <linux/cpu.h>
#include <linux/cpuidle.h>
-#include <linux/syscalls.h>
#include <linux/gfp.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -428,7 +427,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
if (suspend_test(TEST_PLATFORM))
goto Platform_wake;
- error = disable_nonboot_cpus();
+ error = suspend_disable_secondary_cpus();
if (error || suspend_test(TEST_CPUS))
goto Enable_cpus;
@@ -458,7 +457,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
BUG_ON(irqs_disabled());
Enable_cpus:
- enable_nonboot_cpus();
+ suspend_enable_secondary_cpus();
Platform_wake:
platform_resume_noirq(state);
@@ -568,13 +567,11 @@ static int enter_state(suspend_state_t state)
if (state == PM_SUSPEND_TO_IDLE)
s2idle_begin();
-#ifndef CONFIG_SUSPEND_SKIP_SYNC
- trace_suspend_resume(TPS("sync_filesystems"), 0, true);
- pr_info("Syncing filesystems ... ");
- ksys_sync();
- pr_cont("done.\n");
- trace_suspend_resume(TPS("sync_filesystems"), 0, false);
-#endif
+ if (!IS_ENABLED(CONFIG_SUSPEND_SKIP_SYNC)) {
+ trace_suspend_resume(TPS("sync_filesystems"), 0, true);
+ ksys_sync_helper();
+ trace_suspend_resume(TPS("sync_filesystems"), 0, false);
+ }
pm_pr_dbg("Preparing system for sleep (%s)\n", mem_sleep_labels[state]);
pm_suspend_clear_flags();
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 2d8b60a3c86b..cb24e840a3e6 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -10,7 +10,6 @@
*/
#include <linux/suspend.h>
-#include <linux/syscalls.h>
#include <linux/reboot.h>
#include <linux/string.h>
#include <linux/device.h>
@@ -228,9 +227,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
if (data->frozen)
break;
- printk("Syncing filesystems ... ");
- ksys_sync();
- printk("done.\n");
+ ksys_sync_helper();
error = freeze_processes();
if (error)
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 8eee85bb2687..02ca827b8fac 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -65,6 +65,7 @@ int console_printk[4] = {
CONSOLE_LOGLEVEL_MIN, /* minimum_console_loglevel */
CONSOLE_LOGLEVEL_DEFAULT, /* default_console_loglevel */
};
+EXPORT_SYMBOL_GPL(console_printk);
atomic_t ignore_console_lock_warning __read_mostly = ATOMIC_INIT(0);
EXPORT_SYMBOL(ignore_console_lock_warning);
@@ -1143,14 +1144,7 @@ void __init setup_log_buf(int early)
if (!new_log_buf_len)
return;
- if (early) {
- new_log_buf =
- memblock_alloc(new_log_buf_len, LOG_ALIGN);
- } else {
- new_log_buf = memblock_alloc_nopanic(new_log_buf_len,
- LOG_ALIGN);
- }
-
+ new_log_buf = memblock_alloc(new_log_buf_len, LOG_ALIGN);
if (unlikely(!new_log_buf)) {
pr_err("log_buf_len: %lu bytes not available\n",
new_log_buf_len);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 771e93f9c43f..6f357f4fc859 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -29,6 +29,7 @@
#include <linux/hw_breakpoint.h>
#include <linux/cn_proc.h>
#include <linux/compat.h>
+#include <linux/sched/signal.h>
/*
* Access another process' address space via ptrace.
@@ -924,18 +925,26 @@ int ptrace_request(struct task_struct *child, long request,
ret = ptrace_setsiginfo(child, &siginfo);
break;
- case PTRACE_GETSIGMASK:
+ case PTRACE_GETSIGMASK: {
+ sigset_t *mask;
+
if (addr != sizeof(sigset_t)) {
ret = -EINVAL;
break;
}
- if (copy_to_user(datavp, &child->blocked, sizeof(sigset_t)))
+ if (test_tsk_restore_sigmask(child))
+ mask = &child->saved_sigmask;
+ else
+ mask = &child->blocked;
+
+ if (copy_to_user(datavp, mask, sizeof(sigset_t)))
ret = -EFAULT;
else
ret = 0;
break;
+ }
case PTRACE_SETSIGMASK: {
sigset_t new_set;
@@ -961,6 +970,8 @@ int ptrace_request(struct task_struct *child, long request,
child->blocked = new_set;
spin_unlock_irq(&child->sighand->siglock);
+ clear_tsk_restore_sigmask(child);
+
ret = 0;
break;
}
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index acee72c0b24b..4b58c907b4b7 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -233,6 +233,7 @@ static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
#ifdef CONFIG_RCU_STALL_COMMON
extern int rcu_cpu_stall_suppress;
+extern int rcu_cpu_stall_timeout;
int rcu_jiffies_till_stall_check(void);
#define rcu_ftrace_dump_stall_suppress() \
diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c
index c29761152874..7a6890b23c5f 100644
--- a/kernel/rcu/rcuperf.c
+++ b/kernel/rcu/rcuperf.c
@@ -494,6 +494,10 @@ rcu_perf_cleanup(void)
if (torture_cleanup_begin())
return;
+ if (!cur_ops) {
+ torture_cleanup_end();
+ return;
+ }
if (reader_tasks) {
for (i = 0; i < nrealreaders; i++)
@@ -614,6 +618,7 @@ rcu_perf_init(void)
pr_cont("\n");
WARN_ON(!IS_MODULE(CONFIG_RCU_PERF_TEST));
firsterr = -EINVAL;
+ cur_ops = NULL;
goto unwind;
}
if (cur_ops->init)
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index f14d1b18a74f..efaa5b3f4d3f 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -299,7 +299,6 @@ struct rcu_torture_ops {
int irq_capable;
int can_boost;
int extendables;
- int ext_irq_conflict;
const char *name;
};
@@ -592,12 +591,7 @@ static void srcu_torture_init(void)
static void srcu_torture_cleanup(void)
{
- static DEFINE_TORTURE_RANDOM(rand);
-
- if (torture_random(&rand) & 0x800)
- cleanup_srcu_struct(&srcu_ctld);
- else
- cleanup_srcu_struct_quiesced(&srcu_ctld);
+ cleanup_srcu_struct(&srcu_ctld);
srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
}
@@ -1160,7 +1154,7 @@ rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
unsigned long randmask2 = randmask1 >> 3;
WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT);
- /* Most of the time lots of bits, half the time only one bit. */
+ /* Mostly only one bit (need preemption!), sometimes lots of bits. */
if (!(randmask1 & 0x7))
mask = mask & randmask2;
else
@@ -1170,10 +1164,6 @@ rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) ||
(!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH))))
mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
- if ((mask & RCUTORTURE_RDR_IRQ) &&
- !(mask & cur_ops->ext_irq_conflict) &&
- (oldmask & cur_ops->ext_irq_conflict))
- mask |= cur_ops->ext_irq_conflict; /* Or if readers object. */
return mask ?: RCUTORTURE_RDR_RCU;
}
@@ -1848,7 +1838,7 @@ static int rcutorture_oom_notify(struct notifier_block *self,
WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
__func__);
rcu_torture_fwd_cb_hist();
- rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rcu_fwd_startat) / 2));
+ rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rcu_fwd_startat)) / 2);
WRITE_ONCE(rcu_fwd_emergency_stop, true);
smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
pr_info("%s: Freed %lu RCU callbacks.\n",
@@ -2094,6 +2084,10 @@ rcu_torture_cleanup(void)
cur_ops->cb_barrier();
return;
}
+ if (!cur_ops) {
+ torture_cleanup_end();
+ return;
+ }
rcu_torture_barrier_cleanup();
torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
@@ -2267,6 +2261,7 @@ rcu_torture_init(void)
pr_cont("\n");
WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
firsterr = -EINVAL;
+ cur_ops = NULL;
goto unwind;
}
if (cur_ops->fqs == NULL && fqs_duration != 0) {
diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c
index 5d4a39a6505a..44d6606b8325 100644
--- a/kernel/rcu/srcutiny.c
+++ b/kernel/rcu/srcutiny.c
@@ -76,19 +76,16 @@ EXPORT_SYMBOL_GPL(init_srcu_struct);
* Must invoke this after you are finished using a given srcu_struct that
* was initialized via init_srcu_struct(), else you leak memory.
*/
-void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced)
+void cleanup_srcu_struct(struct srcu_struct *ssp)
{
WARN_ON(ssp->srcu_lock_nesting[0] || ssp->srcu_lock_nesting[1]);
- if (quiesced)
- WARN_ON(work_pending(&ssp->srcu_work));
- else
- flush_work(&ssp->srcu_work);
+ flush_work(&ssp->srcu_work);
WARN_ON(ssp->srcu_gp_running);
WARN_ON(ssp->srcu_gp_waiting);
WARN_ON(ssp->srcu_cb_head);
WARN_ON(&ssp->srcu_cb_head != ssp->srcu_cb_tail);
}
-EXPORT_SYMBOL_GPL(_cleanup_srcu_struct);
+EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
/*
* Removes the count for the old reader from the appropriate element of
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index a60b8ba9e1ac..9b761e546de8 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -360,8 +360,14 @@ static unsigned long srcu_get_delay(struct srcu_struct *ssp)
return SRCU_INTERVAL;
}
-/* Helper for cleanup_srcu_struct() and cleanup_srcu_struct_quiesced(). */
-void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced)
+/**
+ * cleanup_srcu_struct - deconstruct a sleep-RCU structure
+ * @ssp: structure to clean up.
+ *
+ * Must invoke this after you are finished using a given srcu_struct that
+ * was initialized via init_srcu_struct(), else you leak memory.
+ */
+void cleanup_srcu_struct(struct srcu_struct *ssp)
{
int cpu;
@@ -369,24 +375,14 @@ void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced)
return; /* Just leak it! */
if (WARN_ON(srcu_readers_active(ssp)))
return; /* Just leak it! */
- if (quiesced) {
- if (WARN_ON(delayed_work_pending(&ssp->work)))
- return; /* Just leak it! */
- } else {
- flush_delayed_work(&ssp->work);
- }
+ flush_delayed_work(&ssp->work);
for_each_possible_cpu(cpu) {
struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
- if (quiesced) {
- if (WARN_ON(timer_pending(&sdp->delay_work)))
- return; /* Just leak it! */
- if (WARN_ON(work_pending(&sdp->work)))
- return; /* Just leak it! */
- } else {
- del_timer_sync(&sdp->delay_work);
- flush_work(&sdp->work);
- }
+ del_timer_sync(&sdp->delay_work);
+ flush_work(&sdp->work);
+ if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist)))
+ return; /* Forgot srcu_barrier(), so just leak it! */
}
if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
WARN_ON(srcu_readers_active(ssp))) {
@@ -397,7 +393,7 @@ void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced)
free_percpu(ssp->sda);
ssp->sda = NULL;
}
-EXPORT_SYMBOL_GPL(_cleanup_srcu_struct);
+EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
/*
* Counts the new reader in the appropriate per-CPU element of the
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index 911bd9076d43..477b4eb44af5 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -52,7 +52,7 @@ void rcu_qs(void)
local_irq_save(flags);
if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
rcu_ctrlblk.donetail = rcu_ctrlblk.curtail;
- raise_softirq(RCU_SOFTIRQ);
+ raise_softirq_irqoff(RCU_SOFTIRQ);
}
local_irq_restore(flags);
}
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 8eee921b384d..b4d88a594785 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -102,11 +102,6 @@ int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
/* Number of rcu_nodes at specified level. */
int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
-/* panic() on RCU Stall sysctl. */
-int sysctl_panic_on_rcu_stall __read_mostly;
-/* Commandeer a sysrq key to dump RCU's tree. */
-static bool sysrq_rcu;
-module_param(sysrq_rcu, bool, 0444);
/*
* The rcu_scheduler_active variable is initialized to the value
@@ -149,7 +144,7 @@ static void sync_sched_exp_online_cleanup(int cpu);
/* rcuc/rcub kthread realtime priority */
static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
-module_param(kthread_prio, int, 0644);
+module_param(kthread_prio, int, 0444);
/* Delay in jiffies for grace-period initialization delays, debug only. */
@@ -406,7 +401,7 @@ static bool rcu_kick_kthreads;
*/
static ulong jiffies_till_sched_qs = ULONG_MAX;
module_param(jiffies_till_sched_qs, ulong, 0444);
-static ulong jiffies_to_sched_qs; /* Adjusted version of above if not default */
+static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */
module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
/*
@@ -424,6 +419,7 @@ static void adjust_jiffies_till_sched_qs(void)
WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
return;
}
+ /* Otherwise, set to third fqs scan, but bound below on large system. */
j = READ_ONCE(jiffies_till_first_fqs) +
2 * READ_ONCE(jiffies_till_next_fqs);
if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
@@ -513,74 +509,6 @@ static const char *gp_state_getname(short gs)
}
/*
- * Show the state of the grace-period kthreads.
- */
-void show_rcu_gp_kthreads(void)
-{
- int cpu;
- unsigned long j;
- unsigned long ja;
- unsigned long jr;
- unsigned long jw;
- struct rcu_data *rdp;
- struct rcu_node *rnp;
-
- j = jiffies;
- ja = j - READ_ONCE(rcu_state.gp_activity);
- jr = j - READ_ONCE(rcu_state.gp_req_activity);
- jw = j - READ_ONCE(rcu_state.gp_wake_time);
- pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n",
- rcu_state.name, gp_state_getname(rcu_state.gp_state),
- rcu_state.gp_state,
- rcu_state.gp_kthread ? rcu_state.gp_kthread->state : 0x1ffffL,
- ja, jr, jw, (long)READ_ONCE(rcu_state.gp_wake_seq),
- (long)READ_ONCE(rcu_state.gp_seq),
- (long)READ_ONCE(rcu_get_root()->gp_seq_needed),
- READ_ONCE(rcu_state.gp_flags));
- rcu_for_each_node_breadth_first(rnp) {
- if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed))
- continue;
- pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld\n",
- rnp->grplo, rnp->grphi, (long)rnp->gp_seq,
- (long)rnp->gp_seq_needed);
- if (!rcu_is_leaf_node(rnp))
- continue;
- for_each_leaf_node_possible_cpu(rnp, cpu) {
- rdp = per_cpu_ptr(&rcu_data, cpu);
- if (rdp->gpwrap ||
- ULONG_CMP_GE(rcu_state.gp_seq,
- rdp->gp_seq_needed))
- continue;
- pr_info("\tcpu %d ->gp_seq_needed %ld\n",
- cpu, (long)rdp->gp_seq_needed);
- }
- }
- /* sched_show_task(rcu_state.gp_kthread); */
-}
-EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
-
-/* Dump grace-period-request information due to commandeered sysrq. */
-static void sysrq_show_rcu(int key)
-{
- show_rcu_gp_kthreads();
-}
-
-static struct sysrq_key_op sysrq_rcudump_op = {
- .handler = sysrq_show_rcu,
- .help_msg = "show-rcu(y)",
- .action_msg = "Show RCU tree",
- .enable_mask = SYSRQ_ENABLE_DUMP,
-};
-
-static int __init rcu_sysrq_init(void)
-{
- if (sysrq_rcu)
- return register_sysrq_key('y', &sysrq_rcudump_op);
- return 0;
-}
-early_initcall(rcu_sysrq_init);
-
-/*
* Send along grace-period-related data for rcutorture diagnostics.
*/
void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
@@ -1034,27 +962,6 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
}
/*
- * Handler for the irq_work request posted when a grace period has
- * gone on for too long, but not yet long enough for an RCU CPU
- * stall warning. Set state appropriately, but just complain if
- * there is unexpected state on entry.
- */
-static void rcu_iw_handler(struct irq_work *iwp)
-{
- struct rcu_data *rdp;
- struct rcu_node *rnp;
-
- rdp = container_of(iwp, struct rcu_data, rcu_iw);
- rnp = rdp->mynode;
- raw_spin_lock_rcu_node(rnp);
- if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
- rdp->rcu_iw_gp_seq = rnp->gp_seq;
- rdp->rcu_iw_pending = false;
- }
- raw_spin_unlock_rcu_node(rnp);
-}
-
-/*
* Return true if the specified CPU has passed through a quiescent
* state by virtue of being in or having passed through an dynticks
* idle state since the last call to dyntick_save_progress_counter()
@@ -1167,295 +1074,6 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
return 0;
}
-static void record_gp_stall_check_time(void)
-{
- unsigned long j = jiffies;
- unsigned long j1;
-
- rcu_state.gp_start = j;
- j1 = rcu_jiffies_till_stall_check();
- /* Record ->gp_start before ->jiffies_stall. */
- smp_store_release(&rcu_state.jiffies_stall, j + j1); /* ^^^ */
- rcu_state.jiffies_resched = j + j1 / 2;
- rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
-}
-
-/*
- * Complain about starvation of grace-period kthread.
- */
-static void rcu_check_gp_kthread_starvation(void)
-{
- struct task_struct *gpk = rcu_state.gp_kthread;
- unsigned long j;
-
- j = jiffies - READ_ONCE(rcu_state.gp_activity);
- if (j > 2 * HZ) {
- pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
- rcu_state.name, j,
- (long)rcu_seq_current(&rcu_state.gp_seq),
- READ_ONCE(rcu_state.gp_flags),
- gp_state_getname(rcu_state.gp_state), rcu_state.gp_state,
- gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1);
- if (gpk) {
- pr_err("RCU grace-period kthread stack dump:\n");
- sched_show_task(gpk);
- wake_up_process(gpk);
- }
- }
-}
-
-/*
- * Dump stacks of all tasks running on stalled CPUs. First try using
- * NMIs, but fall back to manual remote stack tracing on architectures
- * that don't support NMI-based stack dumps. The NMI-triggered stack
- * traces are more accurate because they are printed by the target CPU.
- */
-static void rcu_dump_cpu_stacks(void)
-{
- int cpu;
- unsigned long flags;
- struct rcu_node *rnp;
-
- rcu_for_each_leaf_node(rnp) {
- raw_spin_lock_irqsave_rcu_node(rnp, flags);
- for_each_leaf_node_possible_cpu(rnp, cpu)
- if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
- if (!trigger_single_cpu_backtrace(cpu))
- dump_cpu_task(cpu);
- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
- }
-}
-
-/*
- * If too much time has passed in the current grace period, and if
- * so configured, go kick the relevant kthreads.
- */
-static void rcu_stall_kick_kthreads(void)
-{
- unsigned long j;
-
- if (!rcu_kick_kthreads)
- return;
- j = READ_ONCE(rcu_state.jiffies_kick_kthreads);
- if (time_after(jiffies, j) && rcu_state.gp_kthread &&
- (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) {
- WARN_ONCE(1, "Kicking %s grace-period kthread\n",
- rcu_state.name);
- rcu_ftrace_dump(DUMP_ALL);
- wake_up_process(rcu_state.gp_kthread);
- WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ);
- }
-}
-
-static void panic_on_rcu_stall(void)
-{
- if (sysctl_panic_on_rcu_stall)
- panic("RCU Stall\n");
-}
-
-static void print_other_cpu_stall(unsigned long gp_seq)
-{
- int cpu;
- unsigned long flags;
- unsigned long gpa;
- unsigned long j;
- int ndetected = 0;
- struct rcu_node *rnp = rcu_get_root();
- long totqlen = 0;
-
- /* Kick and suppress, if so configured. */
- rcu_stall_kick_kthreads();
- if (rcu_cpu_stall_suppress)
- return;
-
- /*
- * OK, time to rat on our buddy...
- * See Documentation/RCU/stallwarn.txt for info on how to debug
- * RCU CPU stall warnings.
- */
- pr_err("INFO: %s detected stalls on CPUs/tasks:", rcu_state.name);
- print_cpu_stall_info_begin();
- rcu_for_each_leaf_node(rnp) {
- raw_spin_lock_irqsave_rcu_node(rnp, flags);
- ndetected += rcu_print_task_stall(rnp);
- if (rnp->qsmask != 0) {
- for_each_leaf_node_possible_cpu(rnp, cpu)
- if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
- print_cpu_stall_info(cpu);
- ndetected++;
- }
- }
- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
- }
-
- print_cpu_stall_info_end();
- for_each_possible_cpu(cpu)
- totqlen += rcu_get_n_cbs_cpu(cpu);
- pr_cont("(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
- smp_processor_id(), (long)(jiffies - rcu_state.gp_start),
- (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
- if (ndetected) {
- rcu_dump_cpu_stacks();
-
- /* Complain about tasks blocking the grace period. */
- rcu_print_detail_task_stall();
- } else {
- if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) {
- pr_err("INFO: Stall ended before state dump start\n");
- } else {
- j = jiffies;
- gpa = READ_ONCE(rcu_state.gp_activity);
- pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
- rcu_state.name, j - gpa, j, gpa,
- READ_ONCE(jiffies_till_next_fqs),
- rcu_get_root()->qsmask);
- /* In this case, the current CPU might be at fault. */
- sched_show_task(current);
- }
- }
- /* Rewrite if needed in case of slow consoles. */
- if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
- WRITE_ONCE(rcu_state.jiffies_stall,
- jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
-
- rcu_check_gp_kthread_starvation();
-
- panic_on_rcu_stall();
-
- rcu_force_quiescent_state(); /* Kick them all. */
-}
-
-static void print_cpu_stall(void)
-{
- int cpu;
- unsigned long flags;
- struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
- struct rcu_node *rnp = rcu_get_root();
- long totqlen = 0;
-
- /* Kick and suppress, if so configured. */
- rcu_stall_kick_kthreads();
- if (rcu_cpu_stall_suppress)
- return;
-
- /*
- * OK, time to rat on ourselves...
- * See Documentation/RCU/stallwarn.txt for info on how to debug
- * RCU CPU stall warnings.
- */
- pr_err("INFO: %s self-detected stall on CPU", rcu_state.name);
- print_cpu_stall_info_begin();
- raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
- print_cpu_stall_info(smp_processor_id());
- raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
- print_cpu_stall_info_end();
- for_each_possible_cpu(cpu)
- totqlen += rcu_get_n_cbs_cpu(cpu);
- pr_cont(" (t=%lu jiffies g=%ld q=%lu)\n",
- jiffies - rcu_state.gp_start,
- (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
-
- rcu_check_gp_kthread_starvation();
-
- rcu_dump_cpu_stacks();
-
- raw_spin_lock_irqsave_rcu_node(rnp, flags);
- /* Rewrite if needed in case of slow consoles. */
- if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
- WRITE_ONCE(rcu_state.jiffies_stall,
- jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-
- panic_on_rcu_stall();
-
- /*
- * Attempt to revive the RCU machinery by forcing a context switch.
- *
- * A context switch would normally allow the RCU state machine to make
- * progress and it could be we're stuck in kernel space without context
- * switches for an entirely unreasonable amount of time.
- */
- set_tsk_need_resched(current);
- set_preempt_need_resched();
-}
-
-static void check_cpu_stall(struct rcu_data *rdp)
-{
- unsigned long gs1;
- unsigned long gs2;
- unsigned long gps;
- unsigned long j;
- unsigned long jn;
- unsigned long js;
- struct rcu_node *rnp;
-
- if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) ||
- !rcu_gp_in_progress())
- return;
- rcu_stall_kick_kthreads();
- j = jiffies;
-
- /*
- * Lots of memory barriers to reject false positives.
- *
- * The idea is to pick up rcu_state.gp_seq, then
- * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally
- * another copy of rcu_state.gp_seq. These values are updated in
- * the opposite order with memory barriers (or equivalent) during
- * grace-period initialization and cleanup. Now, a false positive
- * can occur if we get an new value of rcu_state.gp_start and a old
- * value of rcu_state.jiffies_stall. But given the memory barriers,
- * the only way that this can happen is if one grace period ends
- * and another starts between these two fetches. This is detected
- * by comparing the second fetch of rcu_state.gp_seq with the
- * previous fetch from rcu_state.gp_seq.
- *
- * Given this check, comparisons of jiffies, rcu_state.jiffies_stall,
- * and rcu_state.gp_start suffice to forestall false positives.
- */
- gs1 = READ_ONCE(rcu_state.gp_seq);
- smp_rmb(); /* Pick up ->gp_seq first... */
- js = READ_ONCE(rcu_state.jiffies_stall);
- smp_rmb(); /* ...then ->jiffies_stall before the rest... */
- gps = READ_ONCE(rcu_state.gp_start);
- smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
- gs2 = READ_ONCE(rcu_state.gp_seq);
- if (gs1 != gs2 ||
- ULONG_CMP_LT(j, js) ||
- ULONG_CMP_GE(gps, js))
- return; /* No stall or GP completed since entering function. */
- rnp = rdp->mynode;
- jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
- if (rcu_gp_in_progress() &&
- (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
- cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
-
- /* We haven't checked in, so go dump stack. */
- print_cpu_stall();
-
- } else if (rcu_gp_in_progress() &&
- ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
- cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
-
- /* They had a few time units to dump stack, so complain. */
- print_other_cpu_stall(gs2);
- }
-}
-
-/**
- * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
- *
- * Set the stall-warning timeout way off into the future, thus preventing
- * any RCU CPU stall-warning messages from appearing in the current set of
- * RCU grace periods.
- *
- * The caller must disable hard irqs.
- */
-void rcu_cpu_stall_reset(void)
-{
- WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2);
-}
-
/* Trace-event wrapper function for trace_rcu_future_grace_period. */
static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
unsigned long gp_seq_req, const char *s)
@@ -1585,7 +1203,7 @@ static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
static void rcu_gp_kthread_wake(void)
{
if ((current == rcu_state.gp_kthread &&
- !in_interrupt() && !in_serving_softirq()) ||
+ !in_irq() && !in_serving_softirq()) ||
!READ_ONCE(rcu_state.gp_flags) ||
!rcu_state.gp_kthread)
return;
@@ -2295,11 +1913,10 @@ rcu_report_qs_rdp(int cpu, struct rcu_data *rdp)
return;
}
mask = rdp->grpmask;
+ rdp->core_needs_qs = false;
if ((rnp->qsmask & mask) == 0) {
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
} else {
- rdp->core_needs_qs = false;
-
/*
* This GP can't end until cpu checks in, so all of our
* callbacks can be processed during the next GP.
@@ -2548,11 +2165,11 @@ void rcu_sched_clock_irq(int user)
}
/*
- * Scan the leaf rcu_node structures, processing dyntick state for any that
- * have not yet encountered a quiescent state, using the function specified.
- * Also initiate boosting for any threads blocked on the root rcu_node.
- *
- * The caller must have suppressed start of new grace periods.
+ * Scan the leaf rcu_node structures. For each structure on which all
+ * CPUs have reported a quiescent state and on which there are tasks
+ * blocking the current grace period, initiate RCU priority boosting.
+ * Otherwise, invoke the specified function to check dyntick state for
+ * each CPU that has not yet reported a quiescent state.
*/
static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
{
@@ -2635,101 +2252,6 @@ void rcu_force_quiescent_state(void)
}
EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
-/*
- * This function checks for grace-period requests that fail to motivate
- * RCU to come out of its idle mode.
- */
-void
-rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
- const unsigned long gpssdelay)
-{
- unsigned long flags;
- unsigned long j;
- struct rcu_node *rnp_root = rcu_get_root();
- static atomic_t warned = ATOMIC_INIT(0);
-
- if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
- ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed))
- return;
- j = jiffies; /* Expensive access, and in common case don't get here. */
- if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
- time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
- atomic_read(&warned))
- return;
-
- raw_spin_lock_irqsave_rcu_node(rnp, flags);
- j = jiffies;
- if (rcu_gp_in_progress() ||
- ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
- time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
- time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
- atomic_read(&warned)) {
- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
- return;
- }
- /* Hold onto the leaf lock to make others see warned==1. */
-
- if (rnp_root != rnp)
- raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
- j = jiffies;
- if (rcu_gp_in_progress() ||
- ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
- time_before(j, rcu_state.gp_req_activity + gpssdelay) ||
- time_before(j, rcu_state.gp_activity + gpssdelay) ||
- atomic_xchg(&warned, 1)) {
- raw_spin_unlock_rcu_node(rnp_root); /* irqs remain disabled. */
- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
- return;
- }
- WARN_ON(1);
- if (rnp_root != rnp)
- raw_spin_unlock_rcu_node(rnp_root);
- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
- show_rcu_gp_kthreads();
-}
-
-/*
- * Do a forward-progress check for rcutorture. This is normally invoked
- * due to an OOM event. The argument "j" gives the time period during
- * which rcutorture would like progress to have been made.
- */
-void rcu_fwd_progress_check(unsigned long j)
-{
- unsigned long cbs;
- int cpu;
- unsigned long max_cbs = 0;
- int max_cpu = -1;
- struct rcu_data *rdp;
-
- if (rcu_gp_in_progress()) {
- pr_info("%s: GP age %lu jiffies\n",
- __func__, jiffies - rcu_state.gp_start);
- show_rcu_gp_kthreads();
- } else {
- pr_info("%s: Last GP end %lu jiffies ago\n",
- __func__, jiffies - rcu_state.gp_end);
- preempt_disable();
- rdp = this_cpu_ptr(&rcu_data);
- rcu_check_gp_start_stall(rdp->mynode, rdp, j);
- preempt_enable();
- }
- for_each_possible_cpu(cpu) {
- cbs = rcu_get_n_cbs_cpu(cpu);
- if (!cbs)
- continue;
- if (max_cpu < 0)
- pr_info("%s: callbacks", __func__);
- pr_cont(" %d: %lu", cpu, cbs);
- if (cbs <= max_cbs)
- continue;
- max_cbs = cbs;
- max_cpu = cpu;
- }
- if (max_cpu >= 0)
- pr_cont("\n");
-}
-EXPORT_SYMBOL_GPL(rcu_fwd_progress_check);
-
/* Perform RCU core processing work for the current CPU. */
static __latent_entropy void rcu_core(struct softirq_action *unused)
{
@@ -3559,13 +3081,11 @@ static int rcu_pm_notify(struct notifier_block *self,
switch (action) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
- if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
- rcu_expedite_gp();
+ rcu_expedite_gp();
break;
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
- if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
- rcu_unexpedite_gp();
+ rcu_unexpedite_gp();
break;
default:
break;
@@ -3742,8 +3262,7 @@ static void __init rcu_init_geometry(void)
jiffies_till_first_fqs = d;
if (jiffies_till_next_fqs == ULONG_MAX)
jiffies_till_next_fqs = d;
- if (jiffies_till_sched_qs == ULONG_MAX)
- adjust_jiffies_till_sched_qs();
+ adjust_jiffies_till_sched_qs();
/* If the compile-time values are accurate, just leave. */
if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
@@ -3858,5 +3377,6 @@ void __init rcu_init(void)
srcu_init();
}
+#include "tree_stall.h"
#include "tree_exp.h"
#include "tree_plugin.h"
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index bb4f995f2d3f..e253d11af3c4 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -393,15 +393,13 @@ static const char *tp_rcu_varname __used __tracepoint_string = rcu_name;
int rcu_dynticks_snap(struct rcu_data *rdp);
-/* Forward declarations for rcutree_plugin.h */
+/* Forward declarations for tree_plugin.h */
static void rcu_bootup_announce(void);
static void rcu_qs(void);
static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
#ifdef CONFIG_HOTPLUG_CPU
static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
-static void rcu_print_detail_task_stall(void);
-static int rcu_print_task_stall(struct rcu_node *rnp);
static int rcu_print_task_exp_stall(struct rcu_node *rnp);
static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
static void rcu_flavor_sched_clock_irq(int user);
@@ -418,9 +416,6 @@ static void rcu_prepare_for_idle(void);
static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
static bool rcu_preempt_need_deferred_qs(struct task_struct *t);
static void rcu_preempt_deferred_qs(struct task_struct *t);
-static void print_cpu_stall_info_begin(void);
-static void print_cpu_stall_info(int cpu);
-static void print_cpu_stall_info_end(void);
static void zero_cpu_stall_ticks(struct rcu_data *rdp);
static bool rcu_nocb_cpu_needs_barrier(int cpu);
static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
@@ -445,3 +440,10 @@ static void rcu_bind_gp_kthread(void);
static bool rcu_nohz_full_cpu(void);
static void rcu_dynticks_task_enter(void);
static void rcu_dynticks_task_exit(void);
+
+/* Forward declarations for tree_stall.h */
+static void record_gp_stall_check_time(void);
+static void rcu_iw_handler(struct irq_work *iwp);
+static void check_cpu_stall(struct rcu_data *rdp);
+static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
+ const unsigned long gpssdelay);
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index 4c2a0189e748..9c990df880d1 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -10,6 +10,7 @@
#include <linux/lockdep.h>
static void rcu_exp_handler(void *unused);
+static int rcu_print_task_exp_stall(struct rcu_node *rnp);
/*
* Record the start of an expedited grace period.
@@ -633,7 +634,7 @@ static void rcu_exp_handler(void *unused)
raw_spin_lock_irqsave_rcu_node(rnp, flags);
if (rnp->expmask & rdp->grpmask) {
rdp->deferred_qs = true;
- WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, true);
+ t->rcu_read_unlock_special.b.exp_hint = true;
}
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
return;
@@ -648,7 +649,7 @@ static void rcu_exp_handler(void *unused)
*
* If the CPU is fully enabled (or if some buggy RCU-preempt
* read-side critical section is being used from idle), just
- * invoke rcu_preempt_defer_qs() to immediately report the
+ * invoke rcu_preempt_deferred_qs() to immediately report the
* quiescent state. We cannot use rcu_read_unlock_special()
* because we are in an interrupt handler, which will cause that
* function to take an early exit without doing anything.
@@ -670,6 +671,27 @@ static void sync_sched_exp_online_cleanup(int cpu)
{
}
+/*
+ * Scan the current list of tasks blocked within RCU read-side critical
+ * sections, printing out the tid of each that is blocking the current
+ * expedited grace period.
+ */
+static int rcu_print_task_exp_stall(struct rcu_node *rnp)
+{
+ struct task_struct *t;
+ int ndetected = 0;
+
+ if (!rnp->exp_tasks)
+ return 0;
+ t = list_entry(rnp->exp_tasks->prev,
+ struct task_struct, rcu_node_entry);
+ list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
+ pr_cont(" P%d", t->pid);
+ ndetected++;
+ }
+ return ndetected;
+}
+
#else /* #ifdef CONFIG_PREEMPT_RCU */
/* Invoked on each online non-idle CPU for expedited quiescent state. */
@@ -709,6 +731,16 @@ static void sync_sched_exp_online_cleanup(int cpu)
WARN_ON_ONCE(ret);
}
+/*
+ * Because preemptible RCU does not exist, we never have to check for
+ * tasks blocked within RCU read-side critical sections that are
+ * blocking the current expedited grace period.
+ */
+static int rcu_print_task_exp_stall(struct rcu_node *rnp)
+{
+ return 0;
+}
+
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
/**
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 97dba50f6fb2..1102765f91fd 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -285,7 +285,7 @@ static void rcu_qs(void)
TPS("cpuqs"));
__this_cpu_write(rcu_data.cpu_no_qs.b.norm, false);
barrier(); /* Coordinate with rcu_flavor_sched_clock_irq(). */
- current->rcu_read_unlock_special.b.need_qs = false;
+ WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, false);
}
}
@@ -643,100 +643,6 @@ static void rcu_read_unlock_special(struct task_struct *t)
}
/*
- * Dump detailed information for all tasks blocking the current RCU
- * grace period on the specified rcu_node structure.
- */
-static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
-{
- unsigned long flags;
- struct task_struct *t;
-
- raw_spin_lock_irqsave_rcu_node(rnp, flags);
- if (!rcu_preempt_blocked_readers_cgp(rnp)) {
- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
- return;
- }
- t = list_entry(rnp->gp_tasks->prev,
- struct task_struct, rcu_node_entry);
- list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
- /*
- * We could be printing a lot while holding a spinlock.
- * Avoid triggering hard lockup.
- */
- touch_nmi_watchdog();
- sched_show_task(t);
- }
- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-}
-
-/*
- * Dump detailed information for all tasks blocking the current RCU
- * grace period.
- */
-static void rcu_print_detail_task_stall(void)
-{
- struct rcu_node *rnp = rcu_get_root();
-
- rcu_print_detail_task_stall_rnp(rnp);
- rcu_for_each_leaf_node(rnp)
- rcu_print_detail_task_stall_rnp(rnp);
-}
-
-static void rcu_print_task_stall_begin(struct rcu_node *rnp)
-{
- pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
- rnp->level, rnp->grplo, rnp->grphi);
-}
-
-static void rcu_print_task_stall_end(void)
-{
- pr_cont("\n");
-}
-
-/*
- * Scan the current list of tasks blocked within RCU read-side critical
- * sections, printing out the tid of each.
- */
-static int rcu_print_task_stall(struct rcu_node *rnp)
-{
- struct task_struct *t;
- int ndetected = 0;
-
- if (!rcu_preempt_blocked_readers_cgp(rnp))
- return 0;
- rcu_print_task_stall_begin(rnp);
- t = list_entry(rnp->gp_tasks->prev,
- struct task_struct, rcu_node_entry);
- list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
- pr_cont(" P%d", t->pid);
- ndetected++;
- }
- rcu_print_task_stall_end();
- return ndetected;
-}
-
-/*
- * Scan the current list of tasks blocked within RCU read-side critical
- * sections, printing out the tid of each that is blocking the current
- * expedited grace period.
- */
-static int rcu_print_task_exp_stall(struct rcu_node *rnp)
-{
- struct task_struct *t;
- int ndetected = 0;
-
- if (!rnp->exp_tasks)
- return 0;
- t = list_entry(rnp->exp_tasks->prev,
- struct task_struct, rcu_node_entry);
- list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
- pr_cont(" P%d", t->pid);
- ndetected++;
- }
- return ndetected;
-}
-
-/*
* Check that the list of blocked tasks for the newly completed grace
* period is in fact empty. It is a serious bug to complete a grace
* period that still has RCU readers blocked! This function must be
@@ -804,19 +710,25 @@ static void rcu_flavor_sched_clock_irq(int user)
/*
* Check for a task exiting while in a preemptible-RCU read-side
- * critical section, clean up if so. No need to issue warnings,
- * as debug_check_no_locks_held() already does this if lockdep
- * is enabled.
+ * critical section, clean up if so. No need to issue warnings, as
+ * debug_check_no_locks_held() already does this if lockdep is enabled.
+ * Besides, if this function does anything other than just immediately
+ * return, there was a bug of some sort. Spewing warnings from this
+ * function is like as not to simply obscure important prior warnings.
*/
void exit_rcu(void)
{
struct task_struct *t = current;
- if (likely(list_empty(&current->rcu_node_entry)))
+ if (unlikely(!list_empty(&current->rcu_node_entry))) {
+ t->rcu_read_lock_nesting = 1;
+ barrier();
+ WRITE_ONCE(t->rcu_read_unlock_special.b.blocked, true);
+ } else if (unlikely(t->rcu_read_lock_nesting)) {
+ t->rcu_read_lock_nesting = 1;
+ } else {
return;
- t->rcu_read_lock_nesting = 1;
- barrier();
- t->rcu_read_unlock_special.b.blocked = true;
+ }
__rcu_read_unlock();
rcu_preempt_deferred_qs(current);
}
@@ -980,33 +892,6 @@ static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
static void rcu_preempt_deferred_qs(struct task_struct *t) { }
/*
- * Because preemptible RCU does not exist, we never have to check for
- * tasks blocked within RCU read-side critical sections.
- */
-static void rcu_print_detail_task_stall(void)
-{
-}
-
-/*
- * Because preemptible RCU does not exist, we never have to check for
- * tasks blocked within RCU read-side critical sections.
- */
-static int rcu_print_task_stall(struct rcu_node *rnp)
-{
- return 0;
-}
-
-/*
- * Because preemptible RCU does not exist, we never have to check for
- * tasks blocked within RCU read-side critical sections that are
- * blocking the current expedited grace period.
- */
-static int rcu_print_task_exp_stall(struct rcu_node *rnp)
-{
- return 0;
-}
-
-/*
* Because there is no preemptible RCU, there can be no readers blocked,
* so there is no need to check for blocked tasks. So check only for
* bogus qsmask values.
@@ -1185,8 +1070,6 @@ static int rcu_boost_kthread(void *arg)
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
__releases(rnp->lock)
{
- struct task_struct *t;
-
raw_lockdep_assert_held_rcu_node(rnp);
if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
@@ -1200,9 +1083,8 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
if (rnp->exp_tasks == NULL)
rnp->boost_tasks = rnp->gp_tasks;
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
- t = rnp->boost_kthread_task;
- if (t)
- rcu_wake_cond(t, rnp->boost_kthread_status);
+ rcu_wake_cond(rnp->boost_kthread_task,
+ rnp->boost_kthread_status);
} else {
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
@@ -1649,98 +1531,6 @@ static void rcu_cleanup_after_idle(void)
#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
-#ifdef CONFIG_RCU_FAST_NO_HZ
-
-static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
-{
- struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
-
- sprintf(cp, "last_accelerate: %04lx/%04lx, Nonlazy posted: %c%c%c",
- rdp->last_accelerate & 0xffff, jiffies & 0xffff,
- ".l"[rdp->all_lazy],
- ".L"[!rcu_segcblist_n_nonlazy_cbs(&rdp->cblist)],
- ".D"[!rdp->tick_nohz_enabled_snap]);
-}
-
-#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
-
-static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
-{
- *cp = '\0';
-}
-
-#endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
-
-/* Initiate the stall-info list. */
-static void print_cpu_stall_info_begin(void)
-{
- pr_cont("\n");
-}
-
-/*
- * Print out diagnostic information for the specified stalled CPU.
- *
- * If the specified CPU is aware of the current RCU grace period, then
- * print the number of scheduling clock interrupts the CPU has taken
- * during the time that it has been aware. Otherwise, print the number
- * of RCU grace periods that this CPU is ignorant of, for example, "1"
- * if the CPU was aware of the previous grace period.
- *
- * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
- */
-static void print_cpu_stall_info(int cpu)
-{
- unsigned long delta;
- char fast_no_hz[72];
- struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
- char *ticks_title;
- unsigned long ticks_value;
-
- /*
- * We could be printing a lot while holding a spinlock. Avoid
- * triggering hard lockup.
- */
- touch_nmi_watchdog();
-
- ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq);
- if (ticks_value) {
- ticks_title = "GPs behind";
- } else {
- ticks_title = "ticks this GP";
- ticks_value = rdp->ticks_this_gp;
- }
- print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
- delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
- pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s\n",
- cpu,
- "O."[!!cpu_online(cpu)],
- "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
- "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
- !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' :
- rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
- "!."[!delta],
- ticks_value, ticks_title,
- rcu_dynticks_snap(rdp) & 0xfff,
- rdp->dynticks_nesting, rdp->dynticks_nmi_nesting,
- rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
- READ_ONCE(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
- fast_no_hz);
-}
-
-/* Terminate the stall-info list. */
-static void print_cpu_stall_info_end(void)
-{
- pr_err("\t");
-}
-
-/* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */
-static void zero_cpu_stall_ticks(struct rcu_data *rdp)
-{
- rdp->ticks_this_gp = 0;
- rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
- WRITE_ONCE(rdp->last_fqs_resched, jiffies);
-}
-
#ifdef CONFIG_RCU_NOCB_CPU
/*
@@ -1766,11 +1556,22 @@ static void zero_cpu_stall_ticks(struct rcu_data *rdp)
*/
-/* Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters. */
+/*
+ * Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters.
+ * The string after the "rcu_nocbs=" is either "all" for all CPUs, or a
+ * comma-separated list of CPUs and/or CPU ranges. If an invalid list is
+ * given, a warning is emitted and all CPUs are offloaded.
+ */
static int __init rcu_nocb_setup(char *str)
{
alloc_bootmem_cpumask_var(&rcu_nocb_mask);
- cpulist_parse(str, rcu_nocb_mask);
+ if (!strcasecmp(str, "all"))
+ cpumask_setall(rcu_nocb_mask);
+ else
+ if (cpulist_parse(str, rcu_nocb_mask)) {
+ pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n");
+ cpumask_setall(rcu_nocb_mask);
+ }
return 1;
}
__setup("rcu_nocbs=", rcu_nocb_setup);
diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
new file mode 100644
index 000000000000..f65a73a97323
--- /dev/null
+++ b/kernel/rcu/tree_stall.h
@@ -0,0 +1,709 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * RCU CPU stall warnings for normal RCU grace periods
+ *
+ * Copyright IBM Corporation, 2019
+ *
+ * Author: Paul E. McKenney <paulmck@linux.ibm.com>
+ */
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Controlling CPU stall warnings, including delay calculation.
+
+/* panic() on RCU Stall sysctl. */
+int sysctl_panic_on_rcu_stall __read_mostly;
+
+#ifdef CONFIG_PROVE_RCU
+#define RCU_STALL_DELAY_DELTA (5 * HZ)
+#else
+#define RCU_STALL_DELAY_DELTA 0
+#endif
+
+/* Limit-check stall timeouts specified at boottime and runtime. */
+int rcu_jiffies_till_stall_check(void)
+{
+ int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
+
+ /*
+ * Limit check must be consistent with the Kconfig limits
+ * for CONFIG_RCU_CPU_STALL_TIMEOUT.
+ */
+ if (till_stall_check < 3) {
+ WRITE_ONCE(rcu_cpu_stall_timeout, 3);
+ till_stall_check = 3;
+ } else if (till_stall_check > 300) {
+ WRITE_ONCE(rcu_cpu_stall_timeout, 300);
+ till_stall_check = 300;
+ }
+ return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
+}
+EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check);
+
+/* Don't do RCU CPU stall warnings during long sysrq printouts. */
+void rcu_sysrq_start(void)
+{
+ if (!rcu_cpu_stall_suppress)
+ rcu_cpu_stall_suppress = 2;
+}
+
+void rcu_sysrq_end(void)
+{
+ if (rcu_cpu_stall_suppress == 2)
+ rcu_cpu_stall_suppress = 0;
+}
+
+/* Don't print RCU CPU stall warnings during a kernel panic. */
+static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
+{
+ rcu_cpu_stall_suppress = 1;
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block rcu_panic_block = {
+ .notifier_call = rcu_panic,
+};
+
+static int __init check_cpu_stall_init(void)
+{
+ atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
+ return 0;
+}
+early_initcall(check_cpu_stall_init);
+
+/* If so specified via sysctl, panic, yielding cleaner stall-warning output. */
+static void panic_on_rcu_stall(void)
+{
+ if (sysctl_panic_on_rcu_stall)
+ panic("RCU Stall\n");
+}
+
+/**
+ * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
+ *
+ * Set the stall-warning timeout way off into the future, thus preventing
+ * any RCU CPU stall-warning messages from appearing in the current set of
+ * RCU grace periods.
+ *
+ * The caller must disable hard irqs.
+ */
+void rcu_cpu_stall_reset(void)
+{
+ WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Interaction with RCU grace periods
+
+/* Start of new grace period, so record stall time (and forcing times). */
+static void record_gp_stall_check_time(void)
+{
+ unsigned long j = jiffies;
+ unsigned long j1;
+
+ rcu_state.gp_start = j;
+ j1 = rcu_jiffies_till_stall_check();
+ /* Record ->gp_start before ->jiffies_stall. */
+ smp_store_release(&rcu_state.jiffies_stall, j + j1); /* ^^^ */
+ rcu_state.jiffies_resched = j + j1 / 2;
+ rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
+}
+
+/* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */
+static void zero_cpu_stall_ticks(struct rcu_data *rdp)
+{
+ rdp->ticks_this_gp = 0;
+ rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
+ WRITE_ONCE(rdp->last_fqs_resched, jiffies);
+}
+
+/*
+ * If too much time has passed in the current grace period, and if
+ * so configured, go kick the relevant kthreads.
+ */
+static void rcu_stall_kick_kthreads(void)
+{
+ unsigned long j;
+
+ if (!rcu_kick_kthreads)
+ return;
+ j = READ_ONCE(rcu_state.jiffies_kick_kthreads);
+ if (time_after(jiffies, j) && rcu_state.gp_kthread &&
+ (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) {
+ WARN_ONCE(1, "Kicking %s grace-period kthread\n",
+ rcu_state.name);
+ rcu_ftrace_dump(DUMP_ALL);
+ wake_up_process(rcu_state.gp_kthread);
+ WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ);
+ }
+}
+
+/*
+ * Handler for the irq_work request posted about halfway into the RCU CPU
+ * stall timeout, and used to detect excessive irq disabling. Set state
+ * appropriately, but just complain if there is unexpected state on entry.
+ */
+static void rcu_iw_handler(struct irq_work *iwp)
+{
+ struct rcu_data *rdp;
+ struct rcu_node *rnp;
+
+ rdp = container_of(iwp, struct rcu_data, rcu_iw);
+ rnp = rdp->mynode;
+ raw_spin_lock_rcu_node(rnp);
+ if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
+ rdp->rcu_iw_gp_seq = rnp->gp_seq;
+ rdp->rcu_iw_pending = false;
+ }
+ raw_spin_unlock_rcu_node(rnp);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Printing RCU CPU stall warnings
+
+#ifdef CONFIG_PREEMPT
+
+/*
+ * Dump detailed information for all tasks blocking the current RCU
+ * grace period on the specified rcu_node structure.
+ */
+static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
+{
+ unsigned long flags;
+ struct task_struct *t;
+
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
+ if (!rcu_preempt_blocked_readers_cgp(rnp)) {
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ return;
+ }
+ t = list_entry(rnp->gp_tasks->prev,
+ struct task_struct, rcu_node_entry);
+ list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
+ /*
+ * We could be printing a lot while holding a spinlock.
+ * Avoid triggering hard lockup.
+ */
+ touch_nmi_watchdog();
+ sched_show_task(t);
+ }
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+}
+
+/*
+ * Scan the current list of tasks blocked within RCU read-side critical
+ * sections, printing out the tid of each.
+ */
+static int rcu_print_task_stall(struct rcu_node *rnp)
+{
+ struct task_struct *t;
+ int ndetected = 0;
+
+ if (!rcu_preempt_blocked_readers_cgp(rnp))
+ return 0;
+ pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
+ rnp->level, rnp->grplo, rnp->grphi);
+ t = list_entry(rnp->gp_tasks->prev,
+ struct task_struct, rcu_node_entry);
+ list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
+ pr_cont(" P%d", t->pid);
+ ndetected++;
+ }
+ pr_cont("\n");
+ return ndetected;
+}
+
+#else /* #ifdef CONFIG_PREEMPT */
+
+/*
+ * Because preemptible RCU does not exist, we never have to check for
+ * tasks blocked within RCU read-side critical sections.
+ */
+static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
+{
+}
+
+/*
+ * Because preemptible RCU does not exist, we never have to check for
+ * tasks blocked within RCU read-side critical sections.
+ */
+static int rcu_print_task_stall(struct rcu_node *rnp)
+{
+ return 0;
+}
+#endif /* #else #ifdef CONFIG_PREEMPT */
+
+/*
+ * Dump stacks of all tasks running on stalled CPUs. First try using
+ * NMIs, but fall back to manual remote stack tracing on architectures
+ * that don't support NMI-based stack dumps. The NMI-triggered stack
+ * traces are more accurate because they are printed by the target CPU.
+ */
+static void rcu_dump_cpu_stacks(void)
+{
+ int cpu;
+ unsigned long flags;
+ struct rcu_node *rnp;
+
+ rcu_for_each_leaf_node(rnp) {
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
+ for_each_leaf_node_possible_cpu(rnp, cpu)
+ if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
+ if (!trigger_single_cpu_backtrace(cpu))
+ dump_cpu_task(cpu);
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ }
+}
+
+#ifdef CONFIG_RCU_FAST_NO_HZ
+
+static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
+{
+ struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+
+ sprintf(cp, "last_accelerate: %04lx/%04lx, Nonlazy posted: %c%c%c",
+ rdp->last_accelerate & 0xffff, jiffies & 0xffff,
+ ".l"[rdp->all_lazy],
+ ".L"[!rcu_segcblist_n_nonlazy_cbs(&rdp->cblist)],
+ ".D"[!!rdp->tick_nohz_enabled_snap]);
+}
+
+#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
+
+static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
+{
+ *cp = '\0';
+}
+
+#endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
+
+/*
+ * Print out diagnostic information for the specified stalled CPU.
+ *
+ * If the specified CPU is aware of the current RCU grace period, then
+ * print the number of scheduling clock interrupts the CPU has taken
+ * during the time that it has been aware. Otherwise, print the number
+ * of RCU grace periods that this CPU is ignorant of, for example, "1"
+ * if the CPU was aware of the previous grace period.
+ *
+ * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
+ */
+static void print_cpu_stall_info(int cpu)
+{
+ unsigned long delta;
+ char fast_no_hz[72];
+ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+ char *ticks_title;
+ unsigned long ticks_value;
+
+ /*
+ * We could be printing a lot while holding a spinlock. Avoid
+ * triggering hard lockup.
+ */
+ touch_nmi_watchdog();
+
+ ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq);
+ if (ticks_value) {
+ ticks_title = "GPs behind";
+ } else {
+ ticks_title = "ticks this GP";
+ ticks_value = rdp->ticks_this_gp;
+ }
+ print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
+ delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
+ pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s\n",
+ cpu,
+ "O."[!!cpu_online(cpu)],
+ "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
+ "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
+ !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' :
+ rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
+ "!."[!delta],
+ ticks_value, ticks_title,
+ rcu_dynticks_snap(rdp) & 0xfff,
+ rdp->dynticks_nesting, rdp->dynticks_nmi_nesting,
+ rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
+ READ_ONCE(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
+ fast_no_hz);
+}
+
+/* Complain about starvation of grace-period kthread. */
+static void rcu_check_gp_kthread_starvation(void)
+{
+ struct task_struct *gpk = rcu_state.gp_kthread;
+ unsigned long j;
+
+ j = jiffies - READ_ONCE(rcu_state.gp_activity);
+ if (j > 2 * HZ) {
+ pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
+ rcu_state.name, j,
+ (long)rcu_seq_current(&rcu_state.gp_seq),
+ READ_ONCE(rcu_state.gp_flags),
+ gp_state_getname(rcu_state.gp_state), rcu_state.gp_state,
+ gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1);
+ if (gpk) {
+ pr_err("RCU grace-period kthread stack dump:\n");
+ sched_show_task(gpk);
+ wake_up_process(gpk);
+ }
+ }
+}
+
+static void print_other_cpu_stall(unsigned long gp_seq)
+{
+ int cpu;
+ unsigned long flags;
+ unsigned long gpa;
+ unsigned long j;
+ int ndetected = 0;
+ struct rcu_node *rnp;
+ long totqlen = 0;
+
+ /* Kick and suppress, if so configured. */
+ rcu_stall_kick_kthreads();
+ if (rcu_cpu_stall_suppress)
+ return;
+
+ /*
+ * OK, time to rat on our buddy...
+ * See Documentation/RCU/stallwarn.txt for info on how to debug
+ * RCU CPU stall warnings.
+ */
+ pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name);
+ rcu_for_each_leaf_node(rnp) {
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
+ ndetected += rcu_print_task_stall(rnp);
+ if (rnp->qsmask != 0) {
+ for_each_leaf_node_possible_cpu(rnp, cpu)
+ if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
+ print_cpu_stall_info(cpu);
+ ndetected++;
+ }
+ }
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ }
+
+ for_each_possible_cpu(cpu)
+ totqlen += rcu_get_n_cbs_cpu(cpu);
+ pr_cont("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
+ smp_processor_id(), (long)(jiffies - rcu_state.gp_start),
+ (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
+ if (ndetected) {
+ rcu_dump_cpu_stacks();
+
+ /* Complain about tasks blocking the grace period. */
+ rcu_for_each_leaf_node(rnp)
+ rcu_print_detail_task_stall_rnp(rnp);
+ } else {
+ if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) {
+ pr_err("INFO: Stall ended before state dump start\n");
+ } else {
+ j = jiffies;
+ gpa = READ_ONCE(rcu_state.gp_activity);
+ pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
+ rcu_state.name, j - gpa, j, gpa,
+ READ_ONCE(jiffies_till_next_fqs),
+ rcu_get_root()->qsmask);
+ /* In this case, the current CPU might be at fault. */
+ sched_show_task(current);
+ }
+ }
+ /* Rewrite if needed in case of slow consoles. */
+ if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
+ WRITE_ONCE(rcu_state.jiffies_stall,
+ jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
+
+ rcu_check_gp_kthread_starvation();
+
+ panic_on_rcu_stall();
+
+ rcu_force_quiescent_state(); /* Kick them all. */
+}
+
+static void print_cpu_stall(void)
+{
+ int cpu;
+ unsigned long flags;
+ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
+ struct rcu_node *rnp = rcu_get_root();
+ long totqlen = 0;
+
+ /* Kick and suppress, if so configured. */
+ rcu_stall_kick_kthreads();
+ if (rcu_cpu_stall_suppress)
+ return;
+
+ /*
+ * OK, time to rat on ourselves...
+ * See Documentation/RCU/stallwarn.txt for info on how to debug
+ * RCU CPU stall warnings.
+ */
+ pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name);
+ raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
+ print_cpu_stall_info(smp_processor_id());
+ raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
+ for_each_possible_cpu(cpu)
+ totqlen += rcu_get_n_cbs_cpu(cpu);
+ pr_cont("\t(t=%lu jiffies g=%ld q=%lu)\n",
+ jiffies - rcu_state.gp_start,
+ (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
+
+ rcu_check_gp_kthread_starvation();
+
+ rcu_dump_cpu_stacks();
+
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
+ /* Rewrite if needed in case of slow consoles. */
+ if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
+ WRITE_ONCE(rcu_state.jiffies_stall,
+ jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+
+ panic_on_rcu_stall();
+
+ /*
+ * Attempt to revive the RCU machinery by forcing a context switch.
+ *
+ * A context switch would normally allow the RCU state machine to make
+ * progress and it could be we're stuck in kernel space without context
+ * switches for an entirely unreasonable amount of time.
+ */
+ set_tsk_need_resched(current);
+ set_preempt_need_resched();
+}
+
+static void check_cpu_stall(struct rcu_data *rdp)
+{
+ unsigned long gs1;
+ unsigned long gs2;
+ unsigned long gps;
+ unsigned long j;
+ unsigned long jn;
+ unsigned long js;
+ struct rcu_node *rnp;
+
+ if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) ||
+ !rcu_gp_in_progress())
+ return;
+ rcu_stall_kick_kthreads();
+ j = jiffies;
+
+ /*
+ * Lots of memory barriers to reject false positives.
+ *
+ * The idea is to pick up rcu_state.gp_seq, then
+ * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally
+ * another copy of rcu_state.gp_seq. These values are updated in
+ * the opposite order with memory barriers (or equivalent) during
+ * grace-period initialization and cleanup. Now, a false positive
+ * can occur if we get an new value of rcu_state.gp_start and a old
+ * value of rcu_state.jiffies_stall. But given the memory barriers,
+ * the only way that this can happen is if one grace period ends
+ * and another starts between these two fetches. This is detected
+ * by comparing the second fetch of rcu_state.gp_seq with the
+ * previous fetch from rcu_state.gp_seq.
+ *
+ * Given this check, comparisons of jiffies, rcu_state.jiffies_stall,
+ * and rcu_state.gp_start suffice to forestall false positives.
+ */
+ gs1 = READ_ONCE(rcu_state.gp_seq);
+ smp_rmb(); /* Pick up ->gp_seq first... */
+ js = READ_ONCE(rcu_state.jiffies_stall);
+ smp_rmb(); /* ...then ->jiffies_stall before the rest... */
+ gps = READ_ONCE(rcu_state.gp_start);
+ smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
+ gs2 = READ_ONCE(rcu_state.gp_seq);
+ if (gs1 != gs2 ||
+ ULONG_CMP_LT(j, js) ||
+ ULONG_CMP_GE(gps, js))
+ return; /* No stall or GP completed since entering function. */
+ rnp = rdp->mynode;
+ jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
+ if (rcu_gp_in_progress() &&
+ (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
+ cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
+
+ /* We haven't checked in, so go dump stack. */
+ print_cpu_stall();
+
+ } else if (rcu_gp_in_progress() &&
+ ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
+ cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
+
+ /* They had a few time units to dump stack, so complain. */
+ print_other_cpu_stall(gs2);
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// RCU forward-progress mechanisms, including of callback invocation.
+
+
+/*
+ * Show the state of the grace-period kthreads.
+ */
+void show_rcu_gp_kthreads(void)
+{
+ int cpu;
+ unsigned long j;
+ unsigned long ja;
+ unsigned long jr;
+ unsigned long jw;
+ struct rcu_data *rdp;
+ struct rcu_node *rnp;
+
+ j = jiffies;
+ ja = j - READ_ONCE(rcu_state.gp_activity);
+ jr = j - READ_ONCE(rcu_state.gp_req_activity);
+ jw = j - READ_ONCE(rcu_state.gp_wake_time);
+ pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n",
+ rcu_state.name, gp_state_getname(rcu_state.gp_state),
+ rcu_state.gp_state,
+ rcu_state.gp_kthread ? rcu_state.gp_kthread->state : 0x1ffffL,
+ ja, jr, jw, (long)READ_ONCE(rcu_state.gp_wake_seq),
+ (long)READ_ONCE(rcu_state.gp_seq),
+ (long)READ_ONCE(rcu_get_root()->gp_seq_needed),
+ READ_ONCE(rcu_state.gp_flags));
+ rcu_for_each_node_breadth_first(rnp) {
+ if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed))
+ continue;
+ pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld\n",
+ rnp->grplo, rnp->grphi, (long)rnp->gp_seq,
+ (long)rnp->gp_seq_needed);
+ if (!rcu_is_leaf_node(rnp))
+ continue;
+ for_each_leaf_node_possible_cpu(rnp, cpu) {
+ rdp = per_cpu_ptr(&rcu_data, cpu);
+ if (rdp->gpwrap ||
+ ULONG_CMP_GE(rcu_state.gp_seq,
+ rdp->gp_seq_needed))
+ continue;
+ pr_info("\tcpu %d ->gp_seq_needed %ld\n",
+ cpu, (long)rdp->gp_seq_needed);
+ }
+ }
+ /* sched_show_task(rcu_state.gp_kthread); */
+}
+EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
+
+/*
+ * This function checks for grace-period requests that fail to motivate
+ * RCU to come out of its idle mode.
+ */
+static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
+ const unsigned long gpssdelay)
+{
+ unsigned long flags;
+ unsigned long j;
+ struct rcu_node *rnp_root = rcu_get_root();
+ static atomic_t warned = ATOMIC_INIT(0);
+
+ if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
+ ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed))
+ return;
+ j = jiffies; /* Expensive access, and in common case don't get here. */
+ if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
+ time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
+ atomic_read(&warned))
+ return;
+
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
+ j = jiffies;
+ if (rcu_gp_in_progress() ||
+ ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
+ time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
+ time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
+ atomic_read(&warned)) {
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ return;
+ }
+ /* Hold onto the leaf lock to make others see warned==1. */
+
+ if (rnp_root != rnp)
+ raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
+ j = jiffies;
+ if (rcu_gp_in_progress() ||
+ ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
+ time_before(j, rcu_state.gp_req_activity + gpssdelay) ||
+ time_before(j, rcu_state.gp_activity + gpssdelay) ||
+ atomic_xchg(&warned, 1)) {
+ raw_spin_unlock_rcu_node(rnp_root); /* irqs remain disabled. */
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ return;
+ }
+ WARN_ON(1);
+ if (rnp_root != rnp)
+ raw_spin_unlock_rcu_node(rnp_root);
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ show_rcu_gp_kthreads();
+}
+
+/*
+ * Do a forward-progress check for rcutorture. This is normally invoked
+ * due to an OOM event. The argument "j" gives the time period during
+ * which rcutorture would like progress to have been made.
+ */
+void rcu_fwd_progress_check(unsigned long j)
+{
+ unsigned long cbs;
+ int cpu;
+ unsigned long max_cbs = 0;
+ int max_cpu = -1;
+ struct rcu_data *rdp;
+
+ if (rcu_gp_in_progress()) {
+ pr_info("%s: GP age %lu jiffies\n",
+ __func__, jiffies - rcu_state.gp_start);
+ show_rcu_gp_kthreads();
+ } else {
+ pr_info("%s: Last GP end %lu jiffies ago\n",
+ __func__, jiffies - rcu_state.gp_end);
+ preempt_disable();
+ rdp = this_cpu_ptr(&rcu_data);
+ rcu_check_gp_start_stall(rdp->mynode, rdp, j);
+ preempt_enable();
+ }
+ for_each_possible_cpu(cpu) {
+ cbs = rcu_get_n_cbs_cpu(cpu);
+ if (!cbs)
+ continue;
+ if (max_cpu < 0)
+ pr_info("%s: callbacks", __func__);
+ pr_cont(" %d: %lu", cpu, cbs);
+ if (cbs <= max_cbs)
+ continue;
+ max_cbs = cbs;
+ max_cpu = cpu;
+ }
+ if (max_cpu >= 0)
+ pr_cont("\n");
+}
+EXPORT_SYMBOL_GPL(rcu_fwd_progress_check);
+
+/* Commandeer a sysrq key to dump RCU's tree. */
+static bool sysrq_rcu;
+module_param(sysrq_rcu, bool, 0444);
+
+/* Dump grace-period-request information due to commandeered sysrq. */
+static void sysrq_show_rcu(int key)
+{
+ show_rcu_gp_kthreads();
+}
+
+static struct sysrq_key_op sysrq_rcudump_op = {
+ .handler = sysrq_show_rcu,
+ .help_msg = "show-rcu(y)",
+ .action_msg = "Show RCU tree",
+ .enable_mask = SYSRQ_ENABLE_DUMP,
+};
+
+static int __init rcu_sysrq_init(void)
+{
+ if (sysrq_rcu)
+ return register_sysrq_key('y', &sysrq_rcudump_op);
+ return 0;
+}
+early_initcall(rcu_sysrq_init);
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index cbaa976c5945..c3bf44ba42e5 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -424,68 +424,11 @@ EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
#endif
#ifdef CONFIG_RCU_STALL_COMMON
-
-#ifdef CONFIG_PROVE_RCU
-#define RCU_STALL_DELAY_DELTA (5 * HZ)
-#else
-#define RCU_STALL_DELAY_DELTA 0
-#endif
-
int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress);
-static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
-
module_param(rcu_cpu_stall_suppress, int, 0644);
+int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
module_param(rcu_cpu_stall_timeout, int, 0644);
-
-int rcu_jiffies_till_stall_check(void)
-{
- int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
-
- /*
- * Limit check must be consistent with the Kconfig limits
- * for CONFIG_RCU_CPU_STALL_TIMEOUT.
- */
- if (till_stall_check < 3) {
- WRITE_ONCE(rcu_cpu_stall_timeout, 3);
- till_stall_check = 3;
- } else if (till_stall_check > 300) {
- WRITE_ONCE(rcu_cpu_stall_timeout, 300);
- till_stall_check = 300;
- }
- return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
-}
-EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check);
-
-void rcu_sysrq_start(void)
-{
- if (!rcu_cpu_stall_suppress)
- rcu_cpu_stall_suppress = 2;
-}
-
-void rcu_sysrq_end(void)
-{
- if (rcu_cpu_stall_suppress == 2)
- rcu_cpu_stall_suppress = 0;
-}
-
-static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
-{
- rcu_cpu_stall_suppress = 1;
- return NOTIFY_DONE;
-}
-
-static struct notifier_block rcu_panic_block = {
- .notifier_call = rcu_panic,
-};
-
-static int __init check_cpu_stall_init(void)
-{
- atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
- return 0;
-}
-early_initcall(check_cpu_stall_init);
-
#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
#ifdef CONFIG_TASKS_RCU
diff --git a/kernel/relay.c b/kernel/relay.c
index 9e0f52375487..ade14fb7ce2e 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -1177,7 +1177,6 @@ static void relay_pipe_buf_release(struct pipe_inode_info *pipe,
}
static const struct pipe_buf_operations relay_pipe_buf_ops = {
- .can_merge = 0,
.confirm = generic_pipe_buf_confirm,
.release = relay_pipe_buf_release,
.steal = generic_pipe_buf_steal,
diff --git a/kernel/resource.c b/kernel/resource.c
index e81b17b53fa5..8c15f846e8ef 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -382,7 +382,7 @@ static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
int (*func)(struct resource *, void *))
{
struct resource res;
- int ret = -1;
+ int ret = -EINVAL;
while (start < end &&
!find_next_iomem_res(start, end, flags, desc, first_lvl, &res)) {
@@ -452,6 +452,9 @@ int walk_mem_res(u64 start, u64 end, void *arg,
* This function calls the @func callback against all memory ranges of type
* System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
* It is to be used only for System RAM.
+ *
+ * This will find System RAM ranges that are children of top-level resources
+ * in addition to top-level System RAM resources.
*/
int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
void *arg, int (*func)(unsigned long, unsigned long, void *))
@@ -460,14 +463,14 @@ int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
unsigned long flags;
struct resource res;
unsigned long pfn, end_pfn;
- int ret = -1;
+ int ret = -EINVAL;
start = (u64) start_pfn << PAGE_SHIFT;
end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
while (start < end &&
!find_next_iomem_res(start, end, flags, IORES_DESC_NONE,
- true, &res)) {
+ false, &res)) {
pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT;
end_pfn = (res.end + 1) >> PAGE_SHIFT;
if (end_pfn > pfn)
@@ -517,21 +520,20 @@ EXPORT_SYMBOL_GPL(page_is_ram);
int region_intersects(resource_size_t start, size_t size, unsigned long flags,
unsigned long desc)
{
- resource_size_t end = start + size - 1;
+ struct resource res;
int type = 0; int other = 0;
struct resource *p;
+ res.start = start;
+ res.end = start + size - 1;
+
read_lock(&resource_lock);
for (p = iomem_resource.child; p ; p = p->sibling) {
bool is_type = (((p->flags & flags) == flags) &&
((desc == IORES_DESC_NONE) ||
(desc == p->desc)));
- if (start >= p->start && start <= p->end)
- is_type ? type++ : other++;
- if (end >= p->start && end <= p->end)
- is_type ? type++ : other++;
- if (p->start >= start && p->end <= end)
+ if (resource_overlaps(p, &res))
is_type ? type++ : other++;
}
read_unlock(&resource_lock);
@@ -1128,6 +1130,15 @@ struct resource * __request_region(struct resource *parent,
conflict = __request_resource(parent, res);
if (!conflict)
break;
+ /*
+ * mm/hmm.c reserves physical addresses which then
+ * become unavailable to other users. Conflicts are
+ * not expected. Warn to aid debugging if encountered.
+ */
+ if (conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) {
+ pr_warn("Unaddressable device %s %pR conflicts with %pR",
+ conflict->name, conflict, res);
+ }
if (conflict != parent) {
if (!(conflict->flags & IORESOURCE_BUSY)) {
parent = conflict;
diff --git a/kernel/rseq.c b/kernel/rseq.c
index 25e9a7b60eba..9424ee90589e 100644
--- a/kernel/rseq.c
+++ b/kernel/rseq.c
@@ -254,8 +254,7 @@ static int rseq_ip_fixup(struct pt_regs *regs)
* - signal delivery,
* and return to user-space.
*
- * This is how we can ensure that the entire rseq critical section,
- * consisting of both the C part and the assembly instruction sequence,
+ * This is how we can ensure that the entire rseq critical section
* will issue the commit instruction only if executed atomically with
* respect to other threads scheduled on the same CPU, and with respect
* to signal handlers.
@@ -314,7 +313,7 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len,
/* Unregister rseq for current thread. */
if (current->rseq != rseq || !current->rseq)
return -EINVAL;
- if (current->rseq_len != rseq_len)
+ if (rseq_len != sizeof(*rseq))
return -EINVAL;
if (current->rseq_sig != sig)
return -EPERM;
@@ -322,7 +321,6 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len,
if (ret)
return ret;
current->rseq = NULL;
- current->rseq_len = 0;
current->rseq_sig = 0;
return 0;
}
@@ -336,7 +334,7 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len,
* the provided address differs from the prior
* one.
*/
- if (current->rseq != rseq || current->rseq_len != rseq_len)
+ if (current->rseq != rseq || rseq_len != sizeof(*rseq))
return -EINVAL;
if (current->rseq_sig != sig)
return -EPERM;
@@ -354,7 +352,6 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len,
if (!access_ok(rseq, rseq_len))
return -EFAULT;
current->rseq = rseq;
- current->rseq_len = rseq_len;
current->rseq_sig = sig;
/*
* If rseq was previously inactive, and has just been
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index ead464a0f2e5..102dfcf0a29a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -792,10 +792,14 @@ void activate_task(struct rq *rq, struct task_struct *p, int flags)
rq->nr_uninterruptible--;
enqueue_task(rq, p, flags);
+
+ p->on_rq = TASK_ON_RQ_QUEUED;
}
void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
{
+ p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING;
+
if (task_contributes_to_load(p))
rq->nr_uninterruptible++;
@@ -920,7 +924,7 @@ static inline bool is_per_cpu_kthread(struct task_struct *p)
}
/*
- * Per-CPU kthreads are allowed to run on !actie && online CPUs, see
+ * Per-CPU kthreads are allowed to run on !active && online CPUs, see
* __set_cpus_allowed_ptr() and select_fallback_rq().
*/
static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
@@ -1151,7 +1155,6 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
/* Need help from migration thread: drop lock and wait. */
task_rq_unlock(rq, p, &rf);
stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
- tlb_migrate_finish(p->mm);
return 0;
} else if (task_on_rq_queued(p)) {
/*
@@ -1237,11 +1240,9 @@ static void __migrate_swap_task(struct task_struct *p, int cpu)
rq_pin_lock(src_rq, &srf);
rq_pin_lock(dst_rq, &drf);
- p->on_rq = TASK_ON_RQ_MIGRATING;
deactivate_task(src_rq, p, 0);
set_task_cpu(p, cpu);
activate_task(dst_rq, p, 0);
- p->on_rq = TASK_ON_RQ_QUEUED;
check_preempt_curr(dst_rq, p, 0);
rq_unpin_lock(dst_rq, &drf);
@@ -1681,16 +1682,6 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
__schedstat_inc(p->se.statistics.nr_wakeups_sync);
}
-static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
-{
- activate_task(rq, p, en_flags);
- p->on_rq = TASK_ON_RQ_QUEUED;
-
- /* If a worker is waking up, notify the workqueue: */
- if (p->flags & PF_WQ_WORKER)
- wq_worker_waking_up(p, cpu_of(rq));
-}
-
/*
* Mark the task runnable and perform wakeup-preemption.
*/
@@ -1742,7 +1733,7 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
en_flags |= ENQUEUE_MIGRATED;
#endif
- ttwu_activate(rq, p, en_flags);
+ activate_task(rq, p, en_flags);
ttwu_do_wakeup(rq, p, wake_flags, rf);
}
@@ -2107,56 +2098,6 @@ out:
}
/**
- * try_to_wake_up_local - try to wake up a local task with rq lock held
- * @p: the thread to be awakened
- * @rf: request-queue flags for pinning
- *
- * Put @p on the run-queue if it's not already there. The caller must
- * ensure that this_rq() is locked, @p is bound to this_rq() and not
- * the current task.
- */
-static void try_to_wake_up_local(struct task_struct *p, struct rq_flags *rf)
-{
- struct rq *rq = task_rq(p);
-
- if (WARN_ON_ONCE(rq != this_rq()) ||
- WARN_ON_ONCE(p == current))
- return;
-
- lockdep_assert_held(&rq->lock);
-
- if (!raw_spin_trylock(&p->pi_lock)) {
- /*
- * This is OK, because current is on_cpu, which avoids it being
- * picked for load-balance and preemption/IRQs are still
- * disabled avoiding further scheduler activity on it and we've
- * not yet picked a replacement task.
- */
- rq_unlock(rq, rf);
- raw_spin_lock(&p->pi_lock);
- rq_relock(rq, rf);
- }
-
- if (!(p->state & TASK_NORMAL))
- goto out;
-
- trace_sched_waking(p);
-
- if (!task_on_rq_queued(p)) {
- if (p->in_iowait) {
- delayacct_blkio_end(p);
- atomic_dec(&rq->nr_iowait);
- }
- ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK);
- }
-
- ttwu_do_wakeup(rq, p, 0, rf);
- ttwu_stat(p, smp_processor_id(), 0);
-out:
- raw_spin_unlock(&p->pi_lock);
-}
-
-/**
* wake_up_process - Wake up a specific process
* @p: The process to be woken up.
*
@@ -2467,7 +2408,6 @@ void wake_up_new_task(struct task_struct *p)
post_init_entity_util_avg(p);
activate_task(rq, p, ENQUEUE_NOCLOCK);
- p->on_rq = TASK_ON_RQ_QUEUED;
trace_sched_wakeup_new(p);
check_preempt_curr(rq, p, WF_FORK);
#ifdef CONFIG_SMP
@@ -3466,25 +3406,11 @@ static void __sched notrace __schedule(bool preempt)
prev->state = TASK_RUNNING;
} else {
deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK);
- prev->on_rq = 0;
if (prev->in_iowait) {
atomic_inc(&rq->nr_iowait);
delayacct_blkio_start();
}
-
- /*
- * If a worker went to sleep, notify and ask workqueue
- * whether it wants to wake up a task to maintain
- * concurrency.
- */
- if (prev->flags & PF_WQ_WORKER) {
- struct task_struct *to_wakeup;
-
- to_wakeup = wq_worker_sleeping(prev);
- if (to_wakeup)
- try_to_wake_up_local(to_wakeup, &rf);
- }
}
switch_count = &prev->nvcsw;
}
@@ -3544,6 +3470,20 @@ static inline void sched_submit_work(struct task_struct *tsk)
{
if (!tsk->state || tsk_is_pi_blocked(tsk))
return;
+
+ /*
+ * If a worker went to sleep, notify and ask workqueue whether
+ * it wants to wake up a task to maintain concurrency.
+ * As this function is called inside the schedule() context,
+ * we disable preemption to avoid it calling schedule() again
+ * in the possible wakeup of a kworker.
+ */
+ if (tsk->flags & PF_WQ_WORKER) {
+ preempt_disable();
+ wq_worker_sleeping(tsk);
+ preempt_enable_no_resched();
+ }
+
/*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
@@ -3552,6 +3492,12 @@ static inline void sched_submit_work(struct task_struct *tsk)
blk_schedule_flush_plug(tsk);
}
+static void sched_update_worker(struct task_struct *tsk)
+{
+ if (tsk->flags & PF_WQ_WORKER)
+ wq_worker_running(tsk);
+}
+
asmlinkage __visible void __sched schedule(void)
{
struct task_struct *tsk = current;
@@ -3562,6 +3508,7 @@ asmlinkage __visible void __sched schedule(void)
__schedule(false);
sched_preempt_enable_no_resched();
} while (need_resched());
+ sched_update_worker(tsk);
}
EXPORT_SYMBOL(schedule);
@@ -5918,7 +5865,7 @@ void __init sched_init_smp(void)
static int __init migration_init(void)
{
- sched_rq_cpu_starting(smp_processor_id());
+ sched_cpu_starting(smp_processor_id());
return 0;
}
early_initcall(migration_init);
@@ -6559,6 +6506,8 @@ static void cpu_cgroup_attach(struct cgroup_taskset *tset)
static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
struct cftype *cftype, u64 shareval)
{
+ if (shareval > scale_load_down(ULONG_MAX))
+ shareval = MAX_SHARES;
return sched_group_set_shares(css_tg(css), scale_load(shareval));
}
@@ -6574,7 +6523,7 @@ static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
static DEFINE_MUTEX(cfs_constraints_mutex);
const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
-const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
+static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
@@ -6654,20 +6603,22 @@ out_unlock:
return ret;
}
-int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
+static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
{
u64 quota, period;
period = ktime_to_ns(tg->cfs_bandwidth.period);
if (cfs_quota_us < 0)
quota = RUNTIME_INF;
- else
+ else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC)
quota = (u64)cfs_quota_us * NSEC_PER_USEC;
+ else
+ return -EINVAL;
return tg_set_cfs_bandwidth(tg, period, quota);
}
-long tg_get_cfs_quota(struct task_group *tg)
+static long tg_get_cfs_quota(struct task_group *tg)
{
u64 quota_us;
@@ -6680,17 +6631,20 @@ long tg_get_cfs_quota(struct task_group *tg)
return quota_us;
}
-int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
+static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
{
u64 quota, period;
+ if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC)
+ return -EINVAL;
+
period = (u64)cfs_period_us * NSEC_PER_USEC;
quota = tg->cfs_bandwidth.quota;
return tg_set_cfs_bandwidth(tg, period, quota);
}
-long tg_get_cfs_period(struct task_group *tg)
+static long tg_get_cfs_period(struct task_group *tg)
{
u64 cfs_period_us;
@@ -6998,7 +6952,7 @@ static int __maybe_unused cpu_period_quota_parse(char *buf,
{
char tok[21]; /* U64_MAX */
- if (!sscanf(buf, "%s %llu", tok, periodp))
+ if (sscanf(buf, "%20s %llu", tok, periodp) < 1)
return -EINVAL;
*periodp *= NSEC_PER_USEC;
diff --git a/kernel/sched/cpufreq.c b/kernel/sched/cpufreq.c
index 835671f0f917..b5dcd1d83c7f 100644
--- a/kernel/sched/cpufreq.c
+++ b/kernel/sched/cpufreq.c
@@ -7,7 +7,7 @@
*/
#include "sched.h"
-DEFINE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
+DEFINE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
/**
* cpufreq_add_update_util_hook - Populate the CPU's update_util_data pointer.
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 2efe629425be..5403479073b0 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -13,6 +13,8 @@
#include <linux/sched/cpufreq.h>
#include <trace/events/power.h>
+#define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8)
+
struct sugov_tunables {
struct gov_attr_set attr_set;
unsigned int rate_limit_us;
@@ -48,7 +50,6 @@ struct sugov_cpu {
bool iowait_boost_pending;
unsigned int iowait_boost;
- unsigned int iowait_boost_max;
u64 last_update;
unsigned long bw_dl;
@@ -291,8 +292,8 @@ static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
*
* The IO wait boost of a task is disabled after a tick since the last update
* of a CPU. If a new IO wait boost is requested after more then a tick, then
- * we enable the boost starting from the minimum frequency, which improves
- * energy efficiency by ignoring sporadic wakeups from IO.
+ * we enable the boost starting from IOWAIT_BOOST_MIN, which improves energy
+ * efficiency by ignoring sporadic wakeups from IO.
*/
static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
bool set_iowait_boost)
@@ -303,8 +304,7 @@ static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
if (delta_ns <= TICK_NSEC)
return false;
- sg_cpu->iowait_boost = set_iowait_boost
- ? sg_cpu->sg_policy->policy->min : 0;
+ sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0;
sg_cpu->iowait_boost_pending = set_iowait_boost;
return true;
@@ -318,8 +318,9 @@ static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
*
* Each time a task wakes up after an IO operation, the CPU utilization can be
* boosted to a certain utilization which doubles at each "frequent and
- * successive" wakeup from IO, ranging from the utilization of the minimum
- * OPP to the utilization of the maximum OPP.
+ * successive" wakeup from IO, ranging from IOWAIT_BOOST_MIN to the utilization
+ * of the maximum OPP.
+ *
* To keep doubling, an IO boost has to be requested at least once per tick,
* otherwise we restart from the utilization of the minimum OPP.
*/
@@ -344,14 +345,13 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
/* Double the boost at each request */
if (sg_cpu->iowait_boost) {
- sg_cpu->iowait_boost <<= 1;
- if (sg_cpu->iowait_boost > sg_cpu->iowait_boost_max)
- sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
+ sg_cpu->iowait_boost =
+ min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
return;
}
/* First wakeup after IO: start with minimum boost */
- sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min;
+ sg_cpu->iowait_boost = IOWAIT_BOOST_MIN;
}
/**
@@ -373,47 +373,38 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
* This mechanism is designed to boost high frequently IO waiting tasks, while
* being more conservative on tasks which does sporadic IO operations.
*/
-static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
- unsigned long *util, unsigned long *max)
+static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
+ unsigned long util, unsigned long max)
{
- unsigned int boost_util, boost_max;
+ unsigned long boost;
/* No boost currently required */
if (!sg_cpu->iowait_boost)
- return;
+ return util;
/* Reset boost if the CPU appears to have been idle enough */
if (sugov_iowait_reset(sg_cpu, time, false))
- return;
+ return util;
- /*
- * An IO waiting task has just woken up:
- * allow to further double the boost value
- */
- if (sg_cpu->iowait_boost_pending) {
- sg_cpu->iowait_boost_pending = false;
- } else {
+ if (!sg_cpu->iowait_boost_pending) {
/*
- * Otherwise: reduce the boost value and disable it when we
- * reach the minimum.
+ * No boost pending; reduce the boost value.
*/
sg_cpu->iowait_boost >>= 1;
- if (sg_cpu->iowait_boost < sg_cpu->sg_policy->policy->min) {
+ if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) {
sg_cpu->iowait_boost = 0;
- return;
+ return util;
}
}
+ sg_cpu->iowait_boost_pending = false;
+
/*
- * Apply the current boost value: a CPU is boosted only if its current
- * utilization is smaller then the current IO boost level.
+ * @util is already in capacity scale; convert iowait_boost
+ * into the same scale so we can compare.
*/
- boost_util = sg_cpu->iowait_boost;
- boost_max = sg_cpu->iowait_boost_max;
- if (*util * boost_max < *max * boost_util) {
- *util = boost_util;
- *max = boost_max;
- }
+ boost = (sg_cpu->iowait_boost * max) >> SCHED_CAPACITY_SHIFT;
+ return max(boost, util);
}
#ifdef CONFIG_NO_HZ_COMMON
@@ -460,7 +451,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
util = sugov_get_util(sg_cpu);
max = sg_cpu->max;
- sugov_iowait_apply(sg_cpu, time, &util, &max);
+ util = sugov_iowait_apply(sg_cpu, time, util, max);
next_f = get_next_freq(sg_policy, util, max);
/*
* Do not reduce the frequency if the CPU has not been idle
@@ -500,7 +491,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
j_util = sugov_get_util(j_sg_cpu);
j_max = j_sg_cpu->max;
- sugov_iowait_apply(j_sg_cpu, time, &j_util, &j_max);
+ j_util = sugov_iowait_apply(j_sg_cpu, time, j_util, j_max);
if (j_util * max > j_max * util) {
util = j_util;
@@ -782,6 +773,7 @@ out:
return 0;
fail:
+ kobject_put(&tunables->attr_set.kobj);
policy->governor_data = NULL;
sugov_tunables_free(tunables);
@@ -837,7 +829,6 @@ static int sugov_start(struct cpufreq_policy *policy)
memset(sg_cpu, 0, sizeof(*sg_cpu));
sg_cpu->cpu = cpu;
sg_cpu->sg_policy = sg_policy;
- sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
}
for_each_cpu(cpu, policy->cpus) {
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 6a73e41a2016..43901fa3f269 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -252,7 +252,6 @@ static void task_non_contending(struct task_struct *p)
if (dl_entity_is_special(dl_se))
return;
- WARN_ON(hrtimer_active(&dl_se->inactive_timer));
WARN_ON(dl_se->dl_non_contending);
zerolag_time = dl_se->deadline -
@@ -269,7 +268,7 @@ static void task_non_contending(struct task_struct *p)
* If the "0-lag time" already passed, decrease the active
* utilization now, instead of starting a timer
*/
- if (zerolag_time < 0) {
+ if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
if (dl_task(p))
sub_running_bw(dl_se, dl_rq);
if (!dl_task(p) || p->state == TASK_DEAD) {
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 8039d62ae36e..678bfb9bd87f 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -702,7 +702,7 @@ do { \
static const char *sched_tunable_scaling_names[] = {
"none",
- "logaritmic",
+ "logarithmic",
"linear"
};
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ea74d43924b2..f35930f5e528 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2007,6 +2007,10 @@ static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
if (p->last_task_numa_placement) {
delta = runtime - p->last_sum_exec_runtime;
*period = now - p->last_task_numa_placement;
+
+ /* Avoid time going backwards, prevent potential divide error: */
+ if (unlikely((s64)*period < 0))
+ *period = 0;
} else {
delta = p->se.avg.load_sum;
*period = LOAD_AVG_MAX;
@@ -2593,7 +2597,7 @@ out:
/*
* Drive the periodic memory faults..
*/
-void task_tick_numa(struct rq *rq, struct task_struct *curr)
+static void task_tick_numa(struct rq *rq, struct task_struct *curr)
{
struct callback_head *work = &curr->numa_work;
u64 period, now;
@@ -3567,7 +3571,7 @@ static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
* Synchronize entity load avg of dequeued entity without locking
* the previous rq.
*/
-void sync_entity_load_avg(struct sched_entity *se)
+static void sync_entity_load_avg(struct sched_entity *se)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
u64 last_update_time;
@@ -3580,7 +3584,7 @@ void sync_entity_load_avg(struct sched_entity *se)
* Task first catches up with cfs_rq, and then subtract
* itself from the cfs_rq (task must be off the queue now).
*/
-void remove_entity_load_avg(struct sched_entity *se)
+static void remove_entity_load_avg(struct sched_entity *se)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
unsigned long flags;
@@ -4885,6 +4889,8 @@ static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
+extern const u64 max_cfs_quota_period;
+
static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
{
struct cfs_bandwidth *cfs_b =
@@ -4892,6 +4898,7 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
unsigned long flags;
int overrun;
int idle = 0;
+ int count = 0;
raw_spin_lock_irqsave(&cfs_b->lock, flags);
for (;;) {
@@ -4899,6 +4906,28 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
if (!overrun)
break;
+ if (++count > 3) {
+ u64 new, old = ktime_to_ns(cfs_b->period);
+
+ new = (old * 147) / 128; /* ~115% */
+ new = min(new, max_cfs_quota_period);
+
+ cfs_b->period = ns_to_ktime(new);
+
+ /* since max is 1s, this is limited to 1e9^2, which fits in u64 */
+ cfs_b->quota *= new;
+ cfs_b->quota = div64_u64(cfs_b->quota, old);
+
+ pr_warn_ratelimited(
+ "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us %lld, cfs_quota_us = %lld)\n",
+ smp_processor_id(),
+ div_u64(new, NSEC_PER_USEC),
+ div_u64(cfs_b->quota, NSEC_PER_USEC));
+
+ /* reset count so we don't come right back in here */
+ count = 0;
+ }
+
idle = do_sched_cfs_period_timer(cfs_b, overrun, flags);
}
if (idle)
@@ -5116,7 +5145,6 @@ static inline void hrtick_update(struct rq *rq)
#ifdef CONFIG_SMP
static inline unsigned long cpu_util(int cpu);
-static unsigned long capacity_of(int cpu);
static inline bool cpu_overutilized(int cpu)
{
@@ -7492,7 +7520,6 @@ static void detach_task(struct task_struct *p, struct lb_env *env)
{
lockdep_assert_held(&env->src_rq->lock);
- p->on_rq = TASK_ON_RQ_MIGRATING;
deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
set_task_cpu(p, env->dst_cpu);
}
@@ -7628,7 +7655,6 @@ static void attach_task(struct rq *rq, struct task_struct *p)
BUG_ON(task_rq(p) != rq);
activate_task(rq, p, ENQUEUE_NOCLOCK);
- p->on_rq = TASK_ON_RQ_QUEUED;
check_preempt_curr(rq, p, 0);
}
@@ -7784,10 +7810,10 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
if (cfs_rq->last_h_load_update == now)
return;
- cfs_rq->h_load_next = NULL;
+ WRITE_ONCE(cfs_rq->h_load_next, NULL);
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
- cfs_rq->h_load_next = se;
+ WRITE_ONCE(cfs_rq->h_load_next, se);
if (cfs_rq->last_h_load_update == now)
break;
}
@@ -7797,7 +7823,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
cfs_rq->last_h_load_update = now;
}
- while ((se = cfs_rq->h_load_next) != NULL) {
+ while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
load = cfs_rq->h_load;
load = div64_ul(load * se->avg.load_avg,
cfs_rq_load_avg(cfs_rq) + 1);
@@ -8060,6 +8086,18 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
}
/*
+ * Check whether a rq has a misfit task and if it looks like we can actually
+ * help that task: we can migrate the task to a CPU of higher capacity, or
+ * the task's current CPU is heavily pressured.
+ */
+static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd)
+{
+ return rq->misfit_task_load &&
+ (rq->cpu_capacity_orig < rq->rd->max_cpu_capacity ||
+ check_cpu_capacity(rq, sd));
+}
+
+/*
* Group imbalance indicates (and tries to solve) the problem where balancing
* groups is inadequate due to ->cpus_allowed constraints.
*
@@ -9510,22 +9548,26 @@ static inline int on_null_domain(struct rq *rq)
* - When one of the busy CPUs notice that there may be an idle rebalancing
* needed, they will kick the idle load balancer, which then does idle
* load balancing for all the idle CPUs.
+ * - HK_FLAG_MISC CPUs are used for this task, because HK_FLAG_SCHED not set
+ * anywhere yet.
*/
static inline int find_new_ilb(void)
{
- int ilb = cpumask_first(nohz.idle_cpus_mask);
+ int ilb;
- if (ilb < nr_cpu_ids && idle_cpu(ilb))
- return ilb;
+ for_each_cpu_and(ilb, nohz.idle_cpus_mask,
+ housekeeping_cpumask(HK_FLAG_MISC)) {
+ if (idle_cpu(ilb))
+ return ilb;
+ }
return nr_cpu_ids;
}
/*
- * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
- * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
- * CPU (if there is one).
+ * Kick a CPU to do the nohz balancing, if it is time for it. We pick any
+ * idle CPU in the HK_FLAG_MISC housekeeping set (if there is one).
*/
static void kick_ilb(unsigned int flags)
{
@@ -9586,35 +9628,21 @@ static void nohz_balancer_kick(struct rq *rq)
if (time_before(now, nohz.next_balance))
goto out;
- if (rq->nr_running >= 2 || rq->misfit_task_load) {
+ if (rq->nr_running >= 2) {
flags = NOHZ_KICK_MASK;
goto out;
}
rcu_read_lock();
- sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
- if (sds) {
- /*
- * If there is an imbalance between LLC domains (IOW we could
- * increase the overall cache use), we need some less-loaded LLC
- * domain to pull some load. Likewise, we may need to spread
- * load within the current LLC domain (e.g. packed SMT cores but
- * other CPUs are idle). We can't really know from here how busy
- * the others are - so just get a nohz balance going if it looks
- * like this LLC domain has tasks we could move.
- */
- nr_busy = atomic_read(&sds->nr_busy_cpus);
- if (nr_busy > 1) {
- flags = NOHZ_KICK_MASK;
- goto unlock;
- }
-
- }
sd = rcu_dereference(rq->sd);
if (sd) {
- if ((rq->cfs.h_nr_running >= 1) &&
- check_cpu_capacity(rq, sd)) {
+ /*
+ * If there's a CFS task and the current CPU has reduced
+ * capacity; kick the ILB to see if there's a better CPU to run
+ * on.
+ */
+ if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) {
flags = NOHZ_KICK_MASK;
goto unlock;
}
@@ -9622,6 +9650,11 @@ static void nohz_balancer_kick(struct rq *rq)
sd = rcu_dereference(per_cpu(sd_asym_packing, cpu));
if (sd) {
+ /*
+ * When ASYM_PACKING; see if there's a more preferred CPU
+ * currently idle; in which case, kick the ILB to move tasks
+ * around.
+ */
for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) {
if (sched_asym_prefer(i, cpu)) {
flags = NOHZ_KICK_MASK;
@@ -9629,6 +9662,45 @@ static void nohz_balancer_kick(struct rq *rq)
}
}
}
+
+ sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, cpu));
+ if (sd) {
+ /*
+ * When ASYM_CPUCAPACITY; see if there's a higher capacity CPU
+ * to run the misfit task on.
+ */
+ if (check_misfit_status(rq, sd)) {
+ flags = NOHZ_KICK_MASK;
+ goto unlock;
+ }
+
+ /*
+ * For asymmetric systems, we do not want to nicely balance
+ * cache use, instead we want to embrace asymmetry and only
+ * ensure tasks have enough CPU capacity.
+ *
+ * Skip the LLC logic because it's not relevant in that case.
+ */
+ goto unlock;
+ }
+
+ sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
+ if (sds) {
+ /*
+ * If there is an imbalance between LLC domains (IOW we could
+ * increase the overall cache use), we need some less-loaded LLC
+ * domain to pull some load. Likewise, we may need to spread
+ * load within the current LLC domain (e.g. packed SMT cores but
+ * other CPUs are idle). We can't really know from here how busy
+ * the others are - so just get a nohz balance going if it looks
+ * like this LLC domain has tasks we could move.
+ */
+ nr_busy = atomic_read(&sds->nr_busy_cpus);
+ if (nr_busy > 1) {
+ flags = NOHZ_KICK_MASK;
+ goto unlock;
+ }
+ }
unlock:
rcu_read_unlock();
out:
diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c
index b02d148e7672..687302051a27 100644
--- a/kernel/sched/isolation.c
+++ b/kernel/sched/isolation.c
@@ -65,6 +65,7 @@ void __init housekeeping_init(void)
static int __init housekeeping_setup(char *str, enum hk_flags flags)
{
cpumask_var_t non_housekeeping_mask;
+ cpumask_var_t tmp;
int err;
alloc_bootmem_cpumask_var(&non_housekeeping_mask);
@@ -75,16 +76,23 @@ static int __init housekeeping_setup(char *str, enum hk_flags flags)
return 0;
}
+ alloc_bootmem_cpumask_var(&tmp);
if (!housekeeping_flags) {
alloc_bootmem_cpumask_var(&housekeeping_mask);
cpumask_andnot(housekeeping_mask,
cpu_possible_mask, non_housekeeping_mask);
- if (cpumask_empty(housekeeping_mask))
+
+ cpumask_andnot(tmp, cpu_present_mask, non_housekeeping_mask);
+ if (cpumask_empty(tmp)) {
+ pr_warn("Housekeeping: must include one present CPU, "
+ "using boot CPU:%d\n", smp_processor_id());
__cpumask_set_cpu(smp_processor_id(), housekeeping_mask);
+ __cpumask_clear_cpu(smp_processor_id(), non_housekeeping_mask);
+ }
} else {
- cpumask_var_t tmp;
-
- alloc_bootmem_cpumask_var(&tmp);
+ cpumask_andnot(tmp, cpu_present_mask, non_housekeeping_mask);
+ if (cpumask_empty(tmp))
+ __cpumask_clear_cpu(smp_processor_id(), non_housekeeping_mask);
cpumask_andnot(tmp, cpu_possible_mask, non_housekeeping_mask);
if (!cpumask_equal(tmp, housekeeping_mask)) {
pr_warn("Housekeeping: nohz_full= must match isolcpus=\n");
@@ -92,8 +100,8 @@ static int __init housekeeping_setup(char *str, enum hk_flags flags)
free_bootmem_cpumask_var(non_housekeeping_mask);
return 0;
}
- free_bootmem_cpumask_var(tmp);
}
+ free_bootmem_cpumask_var(tmp);
if ((flags & HK_FLAG_TICK) && !(housekeeping_flags & HK_FLAG_TICK)) {
if (IS_ENABLED(CONFIG_NO_HZ_FULL)) {
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 90fa23d36565..1e6b909dca36 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2555,6 +2555,8 @@ int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
if (rt_runtime_us < 0)
rt_runtime = RUNTIME_INF;
+ else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC)
+ return -EINVAL;
return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
}
@@ -2575,6 +2577,9 @@ int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
{
u64 rt_runtime, rt_period;
+ if (rt_period_us > U64_MAX / NSEC_PER_USEC)
+ return -EINVAL;
+
rt_period = rt_period_us * NSEC_PER_USEC;
rt_runtime = tg->rt_bandwidth.rt_runtime;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index efa686eeff26..b52ed1ada0be 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -780,7 +780,7 @@ struct root_domain {
* NULL-terminated list of performance domains intersecting with the
* CPUs of the rd. Protected by RCU.
*/
- struct perf_domain *pd;
+ struct perf_domain __rcu *pd;
};
extern struct root_domain def_root_domain;
@@ -869,8 +869,8 @@ struct rq {
atomic_t nr_iowait;
#ifdef CONFIG_SMP
- struct root_domain *rd;
- struct sched_domain *sd;
+ struct root_domain *rd;
+ struct sched_domain __rcu *sd;
unsigned long cpu_capacity;
unsigned long cpu_capacity_orig;
@@ -1324,13 +1324,13 @@ static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
return sd;
}
-DECLARE_PER_CPU(struct sched_domain *, sd_llc);
+DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc);
DECLARE_PER_CPU(int, sd_llc_size);
DECLARE_PER_CPU(int, sd_llc_id);
-DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
-DECLARE_PER_CPU(struct sched_domain *, sd_numa);
-DECLARE_PER_CPU(struct sched_domain *, sd_asym_packing);
-DECLARE_PER_CPU(struct sched_domain *, sd_asym_cpucapacity);
+DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
+DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa);
+DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
+DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
extern struct static_key_false sched_asym_cpucapacity;
struct sched_group_capacity {
@@ -2185,7 +2185,7 @@ static inline u64 irq_time_read(int cpu)
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
#ifdef CONFIG_CPU_FREQ
-DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
+DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
/**
* cpufreq_update_util - Take a note about CPU utilization changes.
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index ab7f371a3a17..f53f89df837d 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -615,13 +615,13 @@ static void destroy_sched_domains(struct sched_domain *sd)
* the cpumask of the domain), this allows us to quickly tell if
* two CPUs are in the same cache domain, see cpus_share_cache().
*/
-DEFINE_PER_CPU(struct sched_domain *, sd_llc);
+DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc);
DEFINE_PER_CPU(int, sd_llc_size);
DEFINE_PER_CPU(int, sd_llc_id);
-DEFINE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
-DEFINE_PER_CPU(struct sched_domain *, sd_numa);
-DEFINE_PER_CPU(struct sched_domain *, sd_asym_packing);
-DEFINE_PER_CPU(struct sched_domain *, sd_asym_cpucapacity);
+DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
+DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa);
+DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
+DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity);
static void update_top_cache_domain(int cpu)
@@ -1059,6 +1059,7 @@ static struct sched_group *get_group(int cpu, struct sd_data *sdd)
struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
struct sched_domain *child = sd->child;
struct sched_group *sg;
+ bool already_visited;
if (child)
cpu = cpumask_first(sched_domain_span(child));
@@ -1066,9 +1067,14 @@ static struct sched_group *get_group(int cpu, struct sd_data *sdd)
sg = *per_cpu_ptr(sdd->sg, cpu);
sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
- /* For claim_allocations: */
- atomic_inc(&sg->ref);
- atomic_inc(&sg->sgc->ref);
+ /* Increase refcounts for claim_allocations: */
+ already_visited = atomic_inc_return(&sg->ref) > 1;
+ /* sgc visits should follow a similar trend as sg */
+ WARN_ON(already_visited != (atomic_inc_return(&sg->sgc->ref) > 1));
+
+ /* If we have already visited that group, it's already initialized. */
+ if (already_visited)
+ return sg;
if (child) {
cpumask_copy(sched_group_span(sg), sched_domain_span(child));
@@ -1087,8 +1093,8 @@ static struct sched_group *get_group(int cpu, struct sd_data *sdd)
/*
* build_sched_groups will build a circular linked list of the groups
- * covered by the given span, and will set each group's ->cpumask correctly,
- * and ->cpu_capacity to 0.
+ * covered by the given span, will set each group's ->cpumask correctly,
+ * and will initialize their ->sgc.
*
* Assumes the sched_domain tree is fully constructed
*/
@@ -2075,9 +2081,8 @@ void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
}
/*
- * Set up scheduler domains and groups. Callers must hold the hotplug lock.
- * For now this just excludes isolated CPUs, but could be used to
- * exclude other special cases in the future.
+ * Set up scheduler domains and groups. For now this just excludes isolated
+ * CPUs, but could be used to exclude other special cases in the future.
*/
int sched_init_domains(const struct cpumask *cpu_map)
{
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 54a0347ca812..a635ecba6fe2 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -149,7 +149,7 @@ static void populate_seccomp_data(struct seccomp_data *sd)
sd->nr = syscall_get_nr(task, regs);
sd->arch = syscall_get_arch();
- syscall_get_arguments(task, regs, 0, 6, args);
+ syscall_get_arguments(task, regs, args);
sd->args[0] = args[0];
sd->args[1] = args[1];
sd->args[2] = args[2];
@@ -331,7 +331,7 @@ static int is_ancestor(struct seccomp_filter *parent,
* Expects sighand and cred_guard_mutex locks to be held.
*
* Returns 0 on success, -ve on error, or the pid of a thread which was
- * either not in the correct seccomp mode or it did not have an ancestral
+ * either not in the correct seccomp mode or did not have an ancestral
* seccomp filter.
*/
static inline pid_t seccomp_can_sync_threads(void)
@@ -502,7 +502,10 @@ out:
*
* Caller must be holding current->sighand->siglock lock.
*
- * Returns 0 on success, -ve on error.
+ * Returns 0 on success, -ve on error, or
+ * - in TSYNC mode: the pid of a thread which was either not in the correct
+ * seccomp mode or did not have an ancestral seccomp filter
+ * - in NEW_LISTENER mode: the fd of the new listener
*/
static long seccomp_attach_filter(unsigned int flags,
struct seccomp_filter *filter)
@@ -1258,6 +1261,16 @@ static long seccomp_set_mode_filter(unsigned int flags,
if (flags & ~SECCOMP_FILTER_FLAG_MASK)
return -EINVAL;
+ /*
+ * In the successful case, NEW_LISTENER returns the new listener fd.
+ * But in the failure case, TSYNC returns the thread that died. If you
+ * combine these two flags, there's no way to tell whether something
+ * succeeded or failed. So, let's disallow this combination.
+ */
+ if ((flags & SECCOMP_FILTER_FLAG_TSYNC) &&
+ (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER))
+ return -EINVAL;
+
/* Prepare the new filter before holding any locks. */
prepared = seccomp_prepare_user_filter(filter);
if (IS_ERR(prepared))
@@ -1304,7 +1317,7 @@ out:
mutex_unlock(&current->signal->cred_guard_mutex);
out_put_fd:
if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) {
- if (ret < 0) {
+ if (ret) {
listener_f->private_data = NULL;
fput(listener_f);
put_unused_fd(listener);
diff --git a/kernel/signal.c b/kernel/signal.c
index 5d53183e2705..227ba170298e 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -19,7 +19,9 @@
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/cputime.h>
+#include <linux/file.h>
#include <linux/fs.h>
+#include <linux/proc_fs.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
#include <linux/coredump.h>
@@ -3487,6 +3489,16 @@ COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
#endif
#endif
+static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
+{
+ clear_siginfo(info);
+ info->si_signo = sig;
+ info->si_errno = 0;
+ info->si_code = SI_USER;
+ info->si_pid = task_tgid_vnr(current);
+ info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
+}
+
/**
* sys_kill - send a signal to a process
* @pid: the PID of the process
@@ -3496,16 +3508,120 @@ SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
{
struct kernel_siginfo info;
- clear_siginfo(&info);
- info.si_signo = sig;
- info.si_errno = 0;
- info.si_code = SI_USER;
- info.si_pid = task_tgid_vnr(current);
- info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
+ prepare_kill_siginfo(sig, &info);
return kill_something_info(sig, &info, pid);
}
+#ifdef CONFIG_PROC_FS
+/*
+ * Verify that the signaler and signalee either are in the same pid namespace
+ * or that the signaler's pid namespace is an ancestor of the signalee's pid
+ * namespace.
+ */
+static bool access_pidfd_pidns(struct pid *pid)
+{
+ struct pid_namespace *active = task_active_pid_ns(current);
+ struct pid_namespace *p = ns_of_pid(pid);
+
+ for (;;) {
+ if (!p)
+ return false;
+ if (p == active)
+ break;
+ p = p->parent;
+ }
+
+ return true;
+}
+
+static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info)
+{
+#ifdef CONFIG_COMPAT
+ /*
+ * Avoid hooking up compat syscalls and instead handle necessary
+ * conversions here. Note, this is a stop-gap measure and should not be
+ * considered a generic solution.
+ */
+ if (in_compat_syscall())
+ return copy_siginfo_from_user32(
+ kinfo, (struct compat_siginfo __user *)info);
+#endif
+ return copy_siginfo_from_user(kinfo, info);
+}
+
+/**
+ * sys_pidfd_send_signal - send a signal to a process through a task file
+ * descriptor
+ * @pidfd: the file descriptor of the process
+ * @sig: signal to be sent
+ * @info: the signal info
+ * @flags: future flags to be passed
+ *
+ * The syscall currently only signals via PIDTYPE_PID which covers
+ * kill(<positive-pid>, <signal>. It does not signal threads or process
+ * groups.
+ * In order to extend the syscall to threads and process groups the @flags
+ * argument should be used. In essence, the @flags argument will determine
+ * what is signaled and not the file descriptor itself. Put in other words,
+ * grouping is a property of the flags argument not a property of the file
+ * descriptor.
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
+ siginfo_t __user *, info, unsigned int, flags)
+{
+ int ret;
+ struct fd f;
+ struct pid *pid;
+ kernel_siginfo_t kinfo;
+
+ /* Enforce flags be set to 0 until we add an extension. */
+ if (flags)
+ return -EINVAL;
+
+ f = fdget(pidfd);
+ if (!f.file)
+ return -EBADF;
+
+ /* Is this a pidfd? */
+ pid = tgid_pidfd_to_pid(f.file);
+ if (IS_ERR(pid)) {
+ ret = PTR_ERR(pid);
+ goto err;
+ }
+
+ ret = -EINVAL;
+ if (!access_pidfd_pidns(pid))
+ goto err;
+
+ if (info) {
+ ret = copy_siginfo_from_user_any(&kinfo, info);
+ if (unlikely(ret))
+ goto err;
+
+ ret = -EINVAL;
+ if (unlikely(sig != kinfo.si_signo))
+ goto err;
+
+ /* Only allow sending arbitrary signals to yourself. */
+ ret = -EPERM;
+ if ((task_pid(current) != pid) &&
+ (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
+ goto err;
+ } else {
+ prepare_kill_siginfo(sig, &kinfo);
+ }
+
+ ret = kill_pid_info(sig, &kinfo, pid);
+
+err:
+ fdput(f);
+ return ret;
+}
+#endif /* CONFIG_PROC_FS */
+
static int
do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
{
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 10277429ed84..2c3382378d94 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -573,57 +573,6 @@ void tasklet_kill(struct tasklet_struct *t)
}
EXPORT_SYMBOL(tasklet_kill);
-/*
- * tasklet_hrtimer
- */
-
-/*
- * The trampoline is called when the hrtimer expires. It schedules a tasklet
- * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
- * hrtimer callback, but from softirq context.
- */
-static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
-{
- struct tasklet_hrtimer *ttimer =
- container_of(timer, struct tasklet_hrtimer, timer);
-
- tasklet_hi_schedule(&ttimer->tasklet);
- return HRTIMER_NORESTART;
-}
-
-/*
- * Helper function which calls the hrtimer callback from
- * tasklet/softirq context
- */
-static void __tasklet_hrtimer_trampoline(unsigned long data)
-{
- struct tasklet_hrtimer *ttimer = (void *)data;
- enum hrtimer_restart restart;
-
- restart = ttimer->function(&ttimer->timer);
- if (restart != HRTIMER_NORESTART)
- hrtimer_restart(&ttimer->timer);
-}
-
-/**
- * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
- * @ttimer: tasklet_hrtimer which is initialized
- * @function: hrtimer callback function which gets called from softirq context
- * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
- * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
- */
-void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
- enum hrtimer_restart (*function)(struct hrtimer *),
- clockid_t which_clock, enum hrtimer_mode mode)
-{
- hrtimer_init(&ttimer->timer, which_clock, mode);
- ttimer->timer.function = __hrtimer_tasklet_trampoline;
- tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
- (unsigned long)ttimer);
- ttimer->function = function;
-}
-EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
-
void __init softirq_init(void)
{
int cpu;
diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c
index f8edee9c792d..27bafc1e271e 100644
--- a/kernel/stacktrace.c
+++ b/kernel/stacktrace.c
@@ -5,41 +5,56 @@
*
* Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*/
+#include <linux/sched/task_stack.h>
+#include <linux/sched/debug.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/kallsyms.h>
#include <linux/stacktrace.h>
-void print_stack_trace(struct stack_trace *trace, int spaces)
+/**
+ * stack_trace_print - Print the entries in the stack trace
+ * @entries: Pointer to storage array
+ * @nr_entries: Number of entries in the storage array
+ * @spaces: Number of leading spaces to print
+ */
+void stack_trace_print(unsigned long *entries, unsigned int nr_entries,
+ int spaces)
{
- int i;
+ unsigned int i;
- if (WARN_ON(!trace->entries))
+ if (WARN_ON(!entries))
return;
- for (i = 0; i < trace->nr_entries; i++)
- printk("%*c%pS\n", 1 + spaces, ' ', (void *)trace->entries[i]);
+ for (i = 0; i < nr_entries; i++)
+ printk("%*c%pS\n", 1 + spaces, ' ', (void *)entries[i]);
}
-EXPORT_SYMBOL_GPL(print_stack_trace);
+EXPORT_SYMBOL_GPL(stack_trace_print);
-int snprint_stack_trace(char *buf, size_t size,
- struct stack_trace *trace, int spaces)
+/**
+ * stack_trace_snprint - Print the entries in the stack trace into a buffer
+ * @buf: Pointer to the print buffer
+ * @size: Size of the print buffer
+ * @entries: Pointer to storage array
+ * @nr_entries: Number of entries in the storage array
+ * @spaces: Number of leading spaces to print
+ *
+ * Return: Number of bytes printed.
+ */
+int stack_trace_snprint(char *buf, size_t size, unsigned long *entries,
+ unsigned int nr_entries, int spaces)
{
- int i;
- int generated;
- int total = 0;
+ unsigned int generated, i, total = 0;
- if (WARN_ON(!trace->entries))
+ if (WARN_ON(!entries))
return 0;
- for (i = 0; i < trace->nr_entries; i++) {
+ for (i = 0; i < nr_entries && size; i++) {
generated = snprintf(buf, size, "%*c%pS\n", 1 + spaces, ' ',
- (void *)trace->entries[i]);
+ (void *)entries[i]);
total += generated;
-
- /* Assume that generated isn't a negative number */
if (generated >= size) {
buf += size;
size = 0;
@@ -51,7 +66,176 @@ int snprint_stack_trace(char *buf, size_t size,
return total;
}
-EXPORT_SYMBOL_GPL(snprint_stack_trace);
+EXPORT_SYMBOL_GPL(stack_trace_snprint);
+
+#ifdef CONFIG_ARCH_STACKWALK
+
+struct stacktrace_cookie {
+ unsigned long *store;
+ unsigned int size;
+ unsigned int skip;
+ unsigned int len;
+};
+
+static bool stack_trace_consume_entry(void *cookie, unsigned long addr,
+ bool reliable)
+{
+ struct stacktrace_cookie *c = cookie;
+
+ if (c->len >= c->size)
+ return false;
+
+ if (c->skip > 0) {
+ c->skip--;
+ return true;
+ }
+ c->store[c->len++] = addr;
+ return c->len < c->size;
+}
+
+static bool stack_trace_consume_entry_nosched(void *cookie, unsigned long addr,
+ bool reliable)
+{
+ if (in_sched_functions(addr))
+ return true;
+ return stack_trace_consume_entry(cookie, addr, reliable);
+}
+
+/**
+ * stack_trace_save - Save a stack trace into a storage array
+ * @store: Pointer to storage array
+ * @size: Size of the storage array
+ * @skipnr: Number of entries to skip at the start of the stack trace
+ *
+ * Return: Number of trace entries stored.
+ */
+unsigned int stack_trace_save(unsigned long *store, unsigned int size,
+ unsigned int skipnr)
+{
+ stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
+ struct stacktrace_cookie c = {
+ .store = store,
+ .size = size,
+ .skip = skipnr + 1,
+ };
+
+ arch_stack_walk(consume_entry, &c, current, NULL);
+ return c.len;
+}
+EXPORT_SYMBOL_GPL(stack_trace_save);
+
+/**
+ * stack_trace_save_tsk - Save a task stack trace into a storage array
+ * @task: The task to examine
+ * @store: Pointer to storage array
+ * @size: Size of the storage array
+ * @skipnr: Number of entries to skip at the start of the stack trace
+ *
+ * Return: Number of trace entries stored.
+ */
+unsigned int stack_trace_save_tsk(struct task_struct *tsk, unsigned long *store,
+ unsigned int size, unsigned int skipnr)
+{
+ stack_trace_consume_fn consume_entry = stack_trace_consume_entry_nosched;
+ struct stacktrace_cookie c = {
+ .store = store,
+ .size = size,
+ .skip = skipnr + 1,
+ };
+
+ if (!try_get_task_stack(tsk))
+ return 0;
+
+ arch_stack_walk(consume_entry, &c, tsk, NULL);
+ put_task_stack(tsk);
+ return c.len;
+}
+
+/**
+ * stack_trace_save_regs - Save a stack trace based on pt_regs into a storage array
+ * @regs: Pointer to pt_regs to examine
+ * @store: Pointer to storage array
+ * @size: Size of the storage array
+ * @skipnr: Number of entries to skip at the start of the stack trace
+ *
+ * Return: Number of trace entries stored.
+ */
+unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
+ unsigned int size, unsigned int skipnr)
+{
+ stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
+ struct stacktrace_cookie c = {
+ .store = store,
+ .size = size,
+ .skip = skipnr,
+ };
+
+ arch_stack_walk(consume_entry, &c, current, regs);
+ return c.len;
+}
+
+#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
+/**
+ * stack_trace_save_tsk_reliable - Save task stack with verification
+ * @tsk: Pointer to the task to examine
+ * @store: Pointer to storage array
+ * @size: Size of the storage array
+ *
+ * Return: An error if it detects any unreliable features of the
+ * stack. Otherwise it guarantees that the stack trace is
+ * reliable and returns the number of entries stored.
+ *
+ * If the task is not 'current', the caller *must* ensure the task is inactive.
+ */
+int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store,
+ unsigned int size)
+{
+ stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
+ struct stacktrace_cookie c = {
+ .store = store,
+ .size = size,
+ };
+ int ret;
+
+ /*
+ * If the task doesn't have a stack (e.g., a zombie), the stack is
+ * "reliably" empty.
+ */
+ if (!try_get_task_stack(tsk))
+ return 0;
+
+ ret = arch_stack_walk_reliable(consume_entry, &c, tsk);
+ put_task_stack(tsk);
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_USER_STACKTRACE_SUPPORT
+/**
+ * stack_trace_save_user - Save a user space stack trace into a storage array
+ * @store: Pointer to storage array
+ * @size: Size of the storage array
+ *
+ * Return: Number of trace entries stored.
+ */
+unsigned int stack_trace_save_user(unsigned long *store, unsigned int size)
+{
+ stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
+ struct stacktrace_cookie c = {
+ .store = store,
+ .size = size,
+ };
+
+ /* Trace user stack if not a kernel thread */
+ if (!current->mm)
+ return 0;
+
+ arch_stack_walk_user(consume_entry, &c, task_pt_regs(current));
+ return c.len;
+}
+#endif
+
+#else /* CONFIG_ARCH_STACKWALK */
/*
* Architectures that do not implement save_stack_trace_*()
@@ -77,3 +261,118 @@ save_stack_trace_tsk_reliable(struct task_struct *tsk,
WARN_ONCE(1, KERN_INFO "save_stack_tsk_reliable() not implemented yet.\n");
return -ENOSYS;
}
+
+/**
+ * stack_trace_save - Save a stack trace into a storage array
+ * @store: Pointer to storage array
+ * @size: Size of the storage array
+ * @skipnr: Number of entries to skip at the start of the stack trace
+ *
+ * Return: Number of trace entries stored
+ */
+unsigned int stack_trace_save(unsigned long *store, unsigned int size,
+ unsigned int skipnr)
+{
+ struct stack_trace trace = {
+ .entries = store,
+ .max_entries = size,
+ .skip = skipnr + 1,
+ };
+
+ save_stack_trace(&trace);
+ return trace.nr_entries;
+}
+EXPORT_SYMBOL_GPL(stack_trace_save);
+
+/**
+ * stack_trace_save_tsk - Save a task stack trace into a storage array
+ * @task: The task to examine
+ * @store: Pointer to storage array
+ * @size: Size of the storage array
+ * @skipnr: Number of entries to skip at the start of the stack trace
+ *
+ * Return: Number of trace entries stored
+ */
+unsigned int stack_trace_save_tsk(struct task_struct *task,
+ unsigned long *store, unsigned int size,
+ unsigned int skipnr)
+{
+ struct stack_trace trace = {
+ .entries = store,
+ .max_entries = size,
+ .skip = skipnr + 1,
+ };
+
+ save_stack_trace_tsk(task, &trace);
+ return trace.nr_entries;
+}
+
+/**
+ * stack_trace_save_regs - Save a stack trace based on pt_regs into a storage array
+ * @regs: Pointer to pt_regs to examine
+ * @store: Pointer to storage array
+ * @size: Size of the storage array
+ * @skipnr: Number of entries to skip at the start of the stack trace
+ *
+ * Return: Number of trace entries stored
+ */
+unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
+ unsigned int size, unsigned int skipnr)
+{
+ struct stack_trace trace = {
+ .entries = store,
+ .max_entries = size,
+ .skip = skipnr,
+ };
+
+ save_stack_trace_regs(regs, &trace);
+ return trace.nr_entries;
+}
+
+#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
+/**
+ * stack_trace_save_tsk_reliable - Save task stack with verification
+ * @tsk: Pointer to the task to examine
+ * @store: Pointer to storage array
+ * @size: Size of the storage array
+ *
+ * Return: An error if it detects any unreliable features of the
+ * stack. Otherwise it guarantees that the stack trace is
+ * reliable and returns the number of entries stored.
+ *
+ * If the task is not 'current', the caller *must* ensure the task is inactive.
+ */
+int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store,
+ unsigned int size)
+{
+ struct stack_trace trace = {
+ .entries = store,
+ .max_entries = size,
+ };
+ int ret = save_stack_trace_tsk_reliable(tsk, &trace);
+
+ return ret ? ret : trace.nr_entries;
+}
+#endif
+
+#ifdef CONFIG_USER_STACKTRACE_SUPPORT
+/**
+ * stack_trace_save_user - Save a user space stack trace into a storage array
+ * @store: Pointer to storage array
+ * @size: Size of the storage array
+ *
+ * Return: Number of trace entries stored
+ */
+unsigned int stack_trace_save_user(unsigned long *store, unsigned int size)
+{
+ struct stack_trace trace = {
+ .entries = store,
+ .max_entries = size,
+ };
+
+ save_stack_trace_user(&trace);
+ return trace.nr_entries;
+}
+#endif /* CONFIG_USER_STACKTRACE_SUPPORT */
+
+#endif /* !CONFIG_ARCH_STACKWALK */
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 51d7c6794bf1..d21f4befaea4 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -168,6 +168,7 @@ COND_SYSCALL(syslog);
/* kernel/sched/core.c */
/* kernel/signal.c */
+COND_SYSCALL(pidfd_send_signal);
/* kernel/sys.c */
COND_SYSCALL(setregid);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 3fb1405f3f8c..c9ec050bcf46 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -128,6 +128,7 @@ static int zero;
static int __maybe_unused one = 1;
static int __maybe_unused two = 2;
static int __maybe_unused four = 4;
+static unsigned long zero_ul;
static unsigned long one_ul = 1;
static unsigned long long_max = LONG_MAX;
static int one_hundred = 100;
@@ -1750,7 +1751,7 @@ static struct ctl_table fs_table[] = {
.maxlen = sizeof(files_stat.max_files),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
- .extra1 = &zero,
+ .extra1 = &zero_ul,
.extra2 = &long_max,
},
{
@@ -2643,23 +2644,25 @@ static int do_proc_dointvec_minmax_conv(bool *negp, unsigned long *lvalp,
int *valp,
int write, void *data)
{
+ int tmp, ret;
struct do_proc_dointvec_minmax_conv_param *param = data;
+ /*
+ * If writing, first do so via a temporary local int so we can
+ * bounds-check it before touching *valp.
+ */
+ int *ip = write ? &tmp : valp;
+
+ ret = do_proc_dointvec_conv(negp, lvalp, ip, write, data);
+ if (ret)
+ return ret;
+
if (write) {
- int val = *negp ? -*lvalp : *lvalp;
- if ((param->min && *param->min > val) ||
- (param->max && *param->max < val))
+ if ((param->min && *param->min > tmp) ||
+ (param->max && *param->max < tmp))
return -EINVAL;
- *valp = val;
- } else {
- int val = *valp;
- if (val < 0) {
- *negp = true;
- *lvalp = -(unsigned long)val;
- } else {
- *negp = false;
- *lvalp = (unsigned long)val;
- }
+ *valp = tmp;
}
+
return 0;
}
@@ -2708,22 +2711,22 @@ static int do_proc_douintvec_minmax_conv(unsigned long *lvalp,
unsigned int *valp,
int write, void *data)
{
+ int ret;
+ unsigned int tmp;
struct do_proc_douintvec_minmax_conv_param *param = data;
+ /* write via temporary local uint for bounds-checking */
+ unsigned int *up = write ? &tmp : valp;
- if (write) {
- unsigned int val = *lvalp;
-
- if (*lvalp > UINT_MAX)
- return -EINVAL;
+ ret = do_proc_douintvec_conv(lvalp, up, write, data);
+ if (ret)
+ return ret;
- if ((param->min && *param->min > val) ||
- (param->max && *param->max < val))
+ if (write) {
+ if ((param->min && *param->min > tmp) ||
+ (param->max && *param->max < tmp))
return -ERANGE;
- *valp = val;
- } else {
- unsigned int val = *valp;
- *lvalp = (unsigned long) val;
+ *valp = tmp;
}
return 0;
@@ -3326,7 +3329,7 @@ int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write,
#endif /* CONFIG_PROC_SYSCTL */
-#ifdef CONFIG_BPF_SYSCALL
+#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_SYSCTL)
static int proc_dointvec_minmax_bpf_stats(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 2c97e8c2d29f..0519a8805aab 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -594,7 +594,7 @@ static ktime_t alarm_timer_remaining(struct k_itimer *timr, ktime_t now)
{
struct alarm *alarm = &timr->it.alarm.alarmtimer;
- return ktime_sub(now, alarm->node.expires);
+ return ktime_sub(alarm->node.expires, now);
}
/**
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 5e77662dd2d9..f5490222e134 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -611,6 +611,22 @@ void clockevents_resume(void)
}
#ifdef CONFIG_HOTPLUG_CPU
+
+# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+/**
+ * tick_offline_cpu - Take CPU out of the broadcast mechanism
+ * @cpu: The outgoing CPU
+ *
+ * Called on the outgoing CPU after it took itself offline.
+ */
+void tick_offline_cpu(unsigned int cpu)
+{
+ raw_spin_lock(&clockevents_lock);
+ tick_broadcast_offline(cpu);
+ raw_spin_unlock(&clockevents_lock);
+}
+# endif
+
/**
* tick_cleanup_dead_cpu - Cleanup the tick and clockevents of a dead cpu
*/
@@ -621,8 +637,6 @@ void tick_cleanup_dead_cpu(int cpu)
raw_spin_lock_irqsave(&clockevents_lock, flags);
- tick_shutdown_broadcast_oneshot(cpu);
- tick_shutdown_broadcast(cpu);
tick_shutdown(cpu);
/*
* Unregister the clock event devices which were
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index dc1b6f1929f9..d23b434c2ca7 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -63,7 +63,7 @@ __cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock);
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void)
{
- unsigned long seq;
+ unsigned int seq;
u64 ret;
do {
@@ -89,7 +89,7 @@ struct clocksource * __init __weak clocksource_default_clock(void)
return &clocksource_jiffies;
}
-struct clocksource refined_jiffies;
+static struct clocksource refined_jiffies;
int register_refined_jiffies(long cycles_per_second)
{
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index 1002cf61700a..142b07619918 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -94,7 +94,7 @@ static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
unsigned long long notrace sched_clock(void)
{
u64 cyc, res;
- unsigned long seq;
+ unsigned int seq;
struct clock_read_data *rd;
do {
@@ -267,12 +267,12 @@ void __init generic_sched_clock_init(void)
*/
static u64 notrace suspended_sched_clock_read(void)
{
- unsigned long seq = raw_read_seqcount(&cd.seq);
+ unsigned int seq = raw_read_seqcount(&cd.seq);
return cd.read_data[seq & 1].epoch_cyc;
}
-static int sched_clock_suspend(void)
+int sched_clock_suspend(void)
{
struct clock_read_data *rd = &cd.read_data[0];
@@ -283,7 +283,7 @@ static int sched_clock_suspend(void)
return 0;
}
-static void sched_clock_resume(void)
+void sched_clock_resume(void)
{
struct clock_read_data *rd = &cd.read_data[0];
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index ee834d4fb814..e51778c312f1 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -36,10 +36,16 @@ static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
static void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
static void tick_broadcast_clear_oneshot(int cpu);
static void tick_resume_broadcast_oneshot(struct clock_event_device *bc);
+# ifdef CONFIG_HOTPLUG_CPU
+static void tick_broadcast_oneshot_offline(unsigned int cpu);
+# endif
#else
static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); }
static inline void tick_broadcast_clear_oneshot(int cpu) { }
static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { }
+# ifdef CONFIG_HOTPLUG_CPU
+static inline void tick_broadcast_oneshot_offline(unsigned int cpu) { }
+# endif
#endif
/*
@@ -433,27 +439,29 @@ void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
}
#ifdef CONFIG_HOTPLUG_CPU
-/*
- * Remove a CPU from broadcasting
- */
-void tick_shutdown_broadcast(unsigned int cpu)
+static void tick_shutdown_broadcast(void)
{
- struct clock_event_device *bc;
- unsigned long flags;
-
- raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
-
- bc = tick_broadcast_device.evtdev;
- cpumask_clear_cpu(cpu, tick_broadcast_mask);
- cpumask_clear_cpu(cpu, tick_broadcast_on);
+ struct clock_event_device *bc = tick_broadcast_device.evtdev;
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
if (bc && cpumask_empty(tick_broadcast_mask))
clockevents_shutdown(bc);
}
+}
- raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+/*
+ * Remove a CPU from broadcasting
+ */
+void tick_broadcast_offline(unsigned int cpu)
+{
+ raw_spin_lock(&tick_broadcast_lock);
+ cpumask_clear_cpu(cpu, tick_broadcast_mask);
+ cpumask_clear_cpu(cpu, tick_broadcast_on);
+ tick_broadcast_oneshot_offline(cpu);
+ tick_shutdown_broadcast();
+ raw_spin_unlock(&tick_broadcast_lock);
}
+
#endif
void tick_suspend_broadcast(void)
@@ -801,13 +809,13 @@ int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
* either the CPU handling the broadcast
* interrupt or we got woken by something else.
*
- * We are not longer in the broadcast mask, so
+ * We are no longer in the broadcast mask, so
* if the cpu local expiry time is already
* reached, we would reprogram the cpu local
* timer with an already expired event.
*
* This can lead to a ping-pong when we return
- * to idle and therefor rearm the broadcast
+ * to idle and therefore rearm the broadcast
* timer before the cpu local timer was able
* to fire. This happens because the forced
* reprogramming makes sure that the event
@@ -950,14 +958,10 @@ void hotplug_cpu__broadcast_tick_pull(int deadcpu)
}
/*
- * Remove a dead CPU from broadcasting
+ * Remove a dying CPU from broadcasting
*/
-void tick_shutdown_broadcast_oneshot(unsigned int cpu)
+static void tick_broadcast_oneshot_offline(unsigned int cpu)
{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
-
/*
* Clear the broadcast masks for the dead cpu, but do not stop
* the broadcast device!
@@ -965,8 +969,6 @@ void tick_shutdown_broadcast_oneshot(unsigned int cpu)
cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
cpumask_clear_cpu(cpu, tick_broadcast_force_mask);
-
- raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
#endif
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 529143b4c8d2..59225b484e4e 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -46,6 +46,14 @@ ktime_t tick_period;
* procedure also covers cpu hotplug.
*/
int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
+#ifdef CONFIG_NO_HZ_FULL
+/*
+ * tick_do_timer_boot_cpu indicates the boot CPU temporarily owns
+ * tick_do_timer_cpu and it should be taken over by an eligible secondary
+ * when one comes online.
+ */
+static int tick_do_timer_boot_cpu __read_mostly = -1;
+#endif
/*
* Debugging: see timer_list.c
@@ -149,7 +157,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
!tick_broadcast_oneshot_active()) {
clockevents_switch_state(dev, CLOCK_EVT_STATE_PERIODIC);
} else {
- unsigned long seq;
+ unsigned int seq;
ktime_t next;
do {
@@ -167,6 +175,26 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
}
}
+#ifdef CONFIG_NO_HZ_FULL
+static void giveup_do_timer(void *info)
+{
+ int cpu = *(unsigned int *)info;
+
+ WARN_ON(tick_do_timer_cpu != smp_processor_id());
+
+ tick_do_timer_cpu = cpu;
+}
+
+static void tick_take_do_timer_from_boot(void)
+{
+ int cpu = smp_processor_id();
+ int from = tick_do_timer_boot_cpu;
+
+ if (from >= 0 && from != cpu)
+ smp_call_function_single(from, giveup_do_timer, &cpu, 1);
+}
+#endif
+
/*
* Setup the tick device
*/
@@ -186,12 +214,26 @@ static void tick_setup_device(struct tick_device *td,
* this cpu:
*/
if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
- if (!tick_nohz_full_cpu(cpu))
- tick_do_timer_cpu = cpu;
- else
- tick_do_timer_cpu = TICK_DO_TIMER_NONE;
+ tick_do_timer_cpu = cpu;
+
tick_next_period = ktime_get();
tick_period = NSEC_PER_SEC / HZ;
+#ifdef CONFIG_NO_HZ_FULL
+ /*
+ * The boot CPU may be nohz_full, in which case set
+ * tick_do_timer_boot_cpu so the first housekeeping
+ * secondary that comes up will take do_timer from
+ * us.
+ */
+ if (tick_nohz_full_cpu(cpu))
+ tick_do_timer_boot_cpu = cpu;
+
+ } else if (tick_do_timer_boot_cpu != -1 &&
+ !tick_nohz_full_cpu(cpu)) {
+ tick_take_do_timer_from_boot();
+ tick_do_timer_boot_cpu = -1;
+ WARN_ON(tick_do_timer_cpu != cpu);
+#endif
}
/*
@@ -487,6 +529,7 @@ void tick_freeze(void)
trace_suspend_resume(TPS("timekeeping_freeze"),
smp_processor_id(), true);
system_state = SYSTEM_SUSPEND;
+ sched_clock_suspend();
timekeeping_suspend();
} else {
tick_suspend_local();
@@ -510,6 +553,7 @@ void tick_unfreeze(void)
if (tick_freeze_depth == num_online_cpus()) {
timekeeping_resume();
+ sched_clock_resume();
system_state = SYSTEM_RUNNING;
trace_suspend_resume(TPS("timekeeping_freeze"),
smp_processor_id(), false);
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index e277284c2831..7b2496136729 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -64,7 +64,6 @@ extern ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt);
extern int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu);
extern void tick_install_broadcast_device(struct clock_event_device *dev);
extern int tick_is_broadcast_device(struct clock_event_device *dev);
-extern void tick_shutdown_broadcast(unsigned int cpu);
extern void tick_suspend_broadcast(void);
extern void tick_resume_broadcast(void);
extern bool tick_resume_check_broadcast(void);
@@ -78,7 +77,6 @@ static inline void tick_install_broadcast_device(struct clock_event_device *dev)
static inline int tick_is_broadcast_device(struct clock_event_device *dev) { return 0; }
static inline int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) { return 0; }
static inline void tick_do_periodic_broadcast(struct clock_event_device *d) { }
-static inline void tick_shutdown_broadcast(unsigned int cpu) { }
static inline void tick_suspend_broadcast(void) { }
static inline void tick_resume_broadcast(void) { }
static inline bool tick_resume_check_broadcast(void) { return false; }
@@ -128,19 +126,23 @@ static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
/* Functions related to oneshot broadcasting */
#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
extern void tick_broadcast_switch_to_oneshot(void);
-extern void tick_shutdown_broadcast_oneshot(unsigned int cpu);
extern int tick_broadcast_oneshot_active(void);
extern void tick_check_oneshot_broadcast_this_cpu(void);
bool tick_broadcast_oneshot_available(void);
extern struct cpumask *tick_get_broadcast_oneshot_mask(void);
#else /* !(BROADCAST && ONESHOT): */
static inline void tick_broadcast_switch_to_oneshot(void) { }
-static inline void tick_shutdown_broadcast_oneshot(unsigned int cpu) { }
static inline int tick_broadcast_oneshot_active(void) { return 0; }
static inline void tick_check_oneshot_broadcast_this_cpu(void) { }
static inline bool tick_broadcast_oneshot_available(void) { return tick_oneshot_possible(); }
#endif /* !(BROADCAST && ONESHOT) */
+#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_HOTPLUG_CPU)
+extern void tick_broadcast_offline(unsigned int cpu);
+#else
+static inline void tick_broadcast_offline(unsigned int cpu) { }
+#endif
+
/* NO_HZ_FULL internal */
#ifdef CONFIG_NO_HZ_FULL
extern void tick_nohz_init(void);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 6fa52cd6df0b..f4ee1a3428ae 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -121,10 +121,16 @@ static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
* into a long sleep. If two CPUs happen to assign themselves to
* this duty, then the jiffies update is still serialized by
* jiffies_lock.
+ *
+ * If nohz_full is enabled, this should not happen because the
+ * tick_do_timer_cpu never relinquishes.
*/
- if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)
- && !tick_nohz_full_cpu(cpu))
+ if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) {
+#ifdef CONFIG_NO_HZ_FULL
+ WARN_ON(tick_nohz_full_running);
+#endif
tick_do_timer_cpu = cpu;
+ }
#endif
/* Check, if the jiffies need an update */
@@ -395,8 +401,8 @@ void __init tick_nohz_full_setup(cpumask_var_t cpumask)
static int tick_nohz_cpu_down(unsigned int cpu)
{
/*
- * The boot CPU handles housekeeping duty (unbound timers,
- * workqueues, timekeeping, ...) on behalf of full dynticks
+ * The tick_do_timer_cpu CPU handles housekeeping duty (unbound
+ * timers, workqueues, timekeeping, ...) on behalf of full dynticks
* CPUs. It must remain online when nohz full is enabled.
*/
if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
@@ -423,12 +429,15 @@ void __init tick_nohz_init(void)
return;
}
- cpu = smp_processor_id();
+ if (IS_ENABLED(CONFIG_PM_SLEEP_SMP) &&
+ !IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU)) {
+ cpu = smp_processor_id();
- if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
- pr_warn("NO_HZ: Clearing %d from nohz_full range for timekeeping\n",
- cpu);
- cpumask_clear_cpu(cpu, tick_nohz_full_mask);
+ if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
+ pr_warn("NO_HZ: Clearing %d from nohz_full range "
+ "for timekeeping\n", cpu);
+ cpumask_clear_cpu(cpu, tick_nohz_full_mask);
+ }
}
for_each_cpu(cpu, tick_nohz_full_mask)
@@ -645,7 +654,8 @@ static inline bool local_timer_softirq_pending(void)
static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
{
u64 basemono, next_tick, next_tmr, next_rcu, delta, expires;
- unsigned long seq, basejiff;
+ unsigned long basejiff;
+ unsigned int seq;
/* Read jiffies and the time when jiffies were updated last */
do {
@@ -904,8 +914,13 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
/*
* Boot safety: make sure the timekeeping duty has been
* assigned before entering dyntick-idle mode,
+ * tick_do_timer_cpu is TICK_DO_TIMER_BOOT
*/
- if (tick_do_timer_cpu == TICK_DO_TIMER_NONE)
+ if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_BOOT))
+ return false;
+
+ /* Should not happen for nohz-full */
+ if (WARN_ON_ONCE(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
return false;
}
@@ -1023,6 +1038,18 @@ bool tick_nohz_idle_got_tick(void)
}
/**
+ * tick_nohz_get_next_hrtimer - return the next expiration time for the hrtimer
+ * or the tick, whatever that expires first. Note that, if the tick has been
+ * stopped, it returns the next hrtimer.
+ *
+ * Called from power state control code with interrupts disabled
+ */
+ktime_t tick_nohz_get_next_hrtimer(void)
+{
+ return __this_cpu_read(tick_cpu_device.evtdev)->next_event;
+}
+
+/**
* tick_nohz_get_sleep_length - return the expected length of the current sleep
* @delta_next: duration until the next event if the tick cannot be stopped
*
diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h
index 6de959a854b2..4fb06527cf64 100644
--- a/kernel/time/tick-sched.h
+++ b/kernel/time/tick-sched.h
@@ -24,12 +24,19 @@ enum tick_nohz_mode {
* struct tick_sched - sched tick emulation and no idle tick control/stats
* @sched_timer: hrtimer to schedule the periodic tick in high
* resolution mode
+ * @check_clocks: Notification mechanism about clocksource changes
+ * @nohz_mode: Mode - one state of tick_nohz_mode
+ * @inidle: Indicator that the CPU is in the tick idle mode
+ * @tick_stopped: Indicator that the idle tick has been stopped
+ * @idle_active: Indicator that the CPU is actively in the tick idle mode;
+ * it is resetted during irq handling phases.
+ * @do_timer_lst: CPU was the last one doing do_timer before going idle
+ * @got_idle_tick: Tick timer function has run with @inidle set
* @last_tick: Store the last tick expiry time when the tick
* timer is modified for nohz sleeps. This is necessary
* to resume the tick timer operation in the timeline
* when the CPU returns from nohz sleep.
* @next_tick: Next tick to be fired when in dynticks mode.
- * @tick_stopped: Indicator that the idle tick has been stopped
* @idle_jiffies: jiffies at the entry to idle for idle time accounting
* @idle_calls: Total number of idle calls
* @idle_sleeps: Number of idle calls, where the sched tick was stopped
@@ -40,8 +47,8 @@ enum tick_nohz_mode {
* @iowait_sleeptime: Sum of the time slept in idle with sched tick stopped, with IO outstanding
* @timer_expires: Anticipated timer expiration time (in case sched tick is stopped)
* @timer_expires_base: Base time clock monotonic for @timer_expires
- * @do_timer_lst: CPU was the last one doing do_timer before going idle
- * @got_idle_tick: Tick timer function has run with @inidle set
+ * @next_timer: Expiry time of next expiring timer for debugging purpose only
+ * @tick_dep_mask: Tick dependency mask - is set, if someone needs the tick
*/
struct tick_sched {
struct hrtimer sched_timer;
diff --git a/kernel/time/time.c b/kernel/time/time.c
index c3f756f8534b..86656bbac232 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -171,7 +171,7 @@ int do_sys_settimeofday64(const struct timespec64 *tv, const struct timezone *tz
static int firsttime = 1;
int error = 0;
- if (tv && !timespec64_valid(tv))
+ if (tv && !timespec64_valid_settod(tv))
return -EINVAL;
error = security_settime64(tv, tz);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index f986e1918d12..5716e28bfa3c 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -720,7 +720,7 @@ static void timekeeping_forward_now(struct timekeeper *tk)
void ktime_get_real_ts64(struct timespec64 *ts)
{
struct timekeeper *tk = &tk_core.timekeeper;
- unsigned long seq;
+ unsigned int seq;
u64 nsecs;
WARN_ON(timekeeping_suspended);
@@ -829,7 +829,7 @@ EXPORT_SYMBOL_GPL(ktime_get_coarse_with_offset);
ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
{
ktime_t *offset = offsets[offs];
- unsigned long seq;
+ unsigned int seq;
ktime_t tconv;
do {
@@ -960,7 +960,7 @@ time64_t __ktime_get_real_seconds(void)
void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
{
struct timekeeper *tk = &tk_core.timekeeper;
- unsigned long seq;
+ unsigned int seq;
ktime_t base_raw;
ktime_t base_real;
u64 nsec_raw;
@@ -1122,7 +1122,7 @@ int get_device_system_crosststamp(int (*get_time_fn)
ktime_t base_real, base_raw;
u64 nsec_real, nsec_raw;
u8 cs_was_changed_seq;
- unsigned long seq;
+ unsigned int seq;
bool do_interp;
int ret;
@@ -1221,7 +1221,7 @@ int do_settimeofday64(const struct timespec64 *ts)
unsigned long flags;
int ret = 0;
- if (!timespec64_valid_strict(ts))
+ if (!timespec64_valid_settod(ts))
return -EINVAL;
raw_spin_lock_irqsave(&timekeeper_lock, flags);
@@ -1278,7 +1278,7 @@ static int timekeeping_inject_offset(const struct timespec64 *ts)
/* Make sure the proposed value is valid */
tmp = timespec64_add(tk_xtime(tk), *ts);
if (timespec64_compare(&tk->wall_to_monotonic, ts) > 0 ||
- !timespec64_valid_strict(&tmp)) {
+ !timespec64_valid_settod(&tmp)) {
ret = -EINVAL;
goto error;
}
@@ -1409,7 +1409,7 @@ int timekeeping_notify(struct clocksource *clock)
void ktime_get_raw_ts64(struct timespec64 *ts)
{
struct timekeeper *tk = &tk_core.timekeeper;
- unsigned long seq;
+ unsigned int seq;
u64 nsecs;
do {
@@ -1431,7 +1431,7 @@ EXPORT_SYMBOL(ktime_get_raw_ts64);
int timekeeping_valid_for_hres(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
- unsigned long seq;
+ unsigned int seq;
int ret;
do {
@@ -1450,7 +1450,7 @@ int timekeeping_valid_for_hres(void)
u64 timekeeping_max_deferment(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
- unsigned long seq;
+ unsigned int seq;
u64 ret;
do {
@@ -1527,7 +1527,7 @@ void __init timekeeping_init(void)
unsigned long flags;
read_persistent_wall_and_boot_offset(&wall_time, &boot_offset);
- if (timespec64_valid_strict(&wall_time) &&
+ if (timespec64_valid_settod(&wall_time) &&
timespec64_to_ns(&wall_time) > 0) {
persistent_clock_exists = true;
} else if (timespec64_to_ns(&wall_time) != 0) {
@@ -2150,7 +2150,7 @@ EXPORT_SYMBOL_GPL(getboottime64);
void ktime_get_coarse_real_ts64(struct timespec64 *ts)
{
struct timekeeper *tk = &tk_core.timekeeper;
- unsigned long seq;
+ unsigned int seq;
do {
seq = read_seqcount_begin(&tk_core.seq);
@@ -2164,7 +2164,7 @@ void ktime_get_coarse_ts64(struct timespec64 *ts)
{
struct timekeeper *tk = &tk_core.timekeeper;
struct timespec64 now, mono;
- unsigned long seq;
+ unsigned int seq;
do {
seq = read_seqcount_begin(&tk_core.seq);
diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h
index 7a9b4eb7a1d5..141ab3ab0354 100644
--- a/kernel/time/timekeeping.h
+++ b/kernel/time/timekeeping.h
@@ -14,6 +14,13 @@ extern u64 timekeeping_max_deferment(void);
extern void timekeeping_warp_clock(void);
extern int timekeeping_suspend(void);
extern void timekeeping_resume(void);
+#ifdef CONFIG_GENERIC_SCHED_CLOCK
+extern int sched_clock_suspend(void);
+extern void sched_clock_resume(void);
+#else
+static inline int sched_clock_suspend(void) { return 0; }
+static inline void sched_clock_resume(void) { }
+#endif
extern void do_timer(unsigned long ticks);
extern void update_wall_time(void);
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 6502c3ed317e..343c7ba33b1c 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -536,6 +536,8 @@ static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
hlist_add_head(&timer->entry, base->vectors + idx);
__set_bit(idx, base->pending_map);
timer_set_idx(timer, idx);
+
+ trace_timer_start(timer, timer->expires, timer->flags);
}
static void
@@ -757,13 +759,6 @@ static inline void debug_init(struct timer_list *timer)
trace_timer_init(timer);
}
-static inline void
-debug_activate(struct timer_list *timer, unsigned long expires)
-{
- debug_timer_activate(timer);
- trace_timer_start(timer, expires, timer->flags);
-}
-
static inline void debug_deactivate(struct timer_list *timer)
{
debug_timer_deactivate(timer);
@@ -1037,7 +1032,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int option
}
}
- debug_activate(timer, expires);
+ debug_timer_activate(timer);
timer->expires = expires;
/*
@@ -1171,7 +1166,7 @@ void add_timer_on(struct timer_list *timer, int cpu)
}
forward_timer_base(base);
- debug_activate(timer, timer->expires);
+ debug_timer_activate(timer);
internal_add_timer(base, timer);
raw_spin_unlock_irqrestore(&base->lock, flags);
}
@@ -1298,7 +1293,9 @@ int del_timer_sync(struct timer_list *timer)
EXPORT_SYMBOL(del_timer_sync);
#endif
-static void call_timer_fn(struct timer_list *timer, void (*fn)(struct timer_list *))
+static void call_timer_fn(struct timer_list *timer,
+ void (*fn)(struct timer_list *),
+ unsigned long baseclk)
{
int count = preempt_count();
@@ -1321,7 +1318,7 @@ static void call_timer_fn(struct timer_list *timer, void (*fn)(struct timer_list
*/
lock_map_acquire(&lockdep_map);
- trace_timer_expire_entry(timer);
+ trace_timer_expire_entry(timer, baseclk);
fn(timer);
trace_timer_expire_exit(timer);
@@ -1342,6 +1339,13 @@ static void call_timer_fn(struct timer_list *timer, void (*fn)(struct timer_list
static void expire_timers(struct timer_base *base, struct hlist_head *head)
{
+ /*
+ * This value is required only for tracing. base->clk was
+ * incremented directly before expire_timers was called. But expiry
+ * is related to the old base->clk value.
+ */
+ unsigned long baseclk = base->clk - 1;
+
while (!hlist_empty(head)) {
struct timer_list *timer;
void (*fn)(struct timer_list *);
@@ -1355,11 +1359,11 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
if (timer->flags & TIMER_IRQSAFE) {
raw_spin_unlock(&base->lock);
- call_timer_fn(timer, fn);
+ call_timer_fn(timer, fn, baseclk);
raw_spin_lock(&base->lock);
} else {
raw_spin_unlock_irq(&base->lock);
- call_timer_fn(timer, fn);
+ call_timer_fn(timer, fn, baseclk);
raw_spin_lock_irq(&base->lock);
}
}
diff --git a/kernel/torture.c b/kernel/torture.c
index 8faa1a9aaeb9..17b2be9bde12 100644
--- a/kernel/torture.c
+++ b/kernel/torture.c
@@ -88,6 +88,8 @@ bool torture_offline(int cpu, long *n_offl_attempts, long *n_offl_successes,
if (!cpu_online(cpu) || !cpu_is_hotpluggable(cpu))
return false;
+ if (num_online_cpus() <= 1)
+ return false; /* Can't offline the last CPU. */
if (verbose > 1)
pr_alert("%s" TORTURE_FLAG
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index fa8b1fe824f3..8bd1d6d001d7 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -370,6 +370,7 @@ config PROFILE_ANNOTATED_BRANCHES
config PROFILE_ALL_BRANCHES
bool "Profile all if conditionals" if !FORTIFY_SOURCE
select TRACE_BRANCH_PROFILING
+ imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives
help
This tracer profiles all branch conditions. Every if ()
taken in the kernel is recorded whether it hit or miss.
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index fac0ddf8a8e2..e1c6d79fb4cc 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -723,6 +723,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
#endif
case BLKTRACESTART:
start = 1;
+ /* fall through */
case BLKTRACESTOP:
ret = __blk_trace_startstop(q, start);
break;
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index d64c00afceb5..94b0e37d90ef 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -14,6 +14,8 @@
#include <linux/syscalls.h>
#include <linux/error-injection.h>
+#include <asm/tlb.h>
+
#include "trace_probe.h"
#include "trace.h"
@@ -163,6 +165,10 @@ BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
* access_ok() should prevent writing to non-user memory, but in
* some situations (nommu, temporary switch, etc) access_ok() does
* not provide enough validation, hence the check on KERNEL_DS.
+ *
+ * nmi_uaccess_okay() ensures the probe is not run in an interim
+ * state, when the task or mm are switched. This is specifically
+ * required to prevent the use of temporary mm.
*/
if (unlikely(in_interrupt() ||
@@ -170,6 +176,8 @@ BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
return -EPERM;
if (unlikely(uaccess_kernel()))
return -EPERM;
+ if (unlikely(!nmi_uaccess_okay()))
+ return -EPERM;
if (!access_ok(unsafe_ptr, size))
return -EPERM;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index aac7847c0214..b920358dd8f7 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -33,6 +33,7 @@
#include <linux/list.h>
#include <linux/hash.h>
#include <linux/rcupdate.h>
+#include <linux/kprobes.h>
#include <trace/events/sched.h>
@@ -1992,7 +1993,7 @@ static void print_bug_type(void)
* modifying the code. @failed should be one of either:
* EFAULT - if the problem happens on reading the @ip address
* EINVAL - if what is read at @ip is not what was expected
- * EPERM - if the problem happens on writting to the @ip address
+ * EPERM - if the problem happens on writing to the @ip address
*/
void ftrace_bug(int failed, struct dyn_ftrace *rec)
{
@@ -2391,7 +2392,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
}
- return -1; /* unknow ftrace bug */
+ return -1; /* unknown ftrace bug */
}
void __weak ftrace_replace_code(int mod_flags)
@@ -3004,7 +3005,7 @@ ftrace_allocate_pages(unsigned long num_to_init)
int cnt;
if (!num_to_init)
- return 0;
+ return NULL;
start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
if (!pg)
@@ -3702,6 +3703,31 @@ enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
}
static int
+add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g,
+ int clear_filter)
+{
+ long index = simple_strtoul(func_g->search, NULL, 0);
+ struct ftrace_page *pg;
+ struct dyn_ftrace *rec;
+
+ /* The index starts at 1 */
+ if (--index < 0)
+ return 0;
+
+ do_for_each_ftrace_rec(pg, rec) {
+ if (pg->index <= index) {
+ index -= pg->index;
+ /* this is a double loop, break goes to the next page */
+ break;
+ }
+ rec = &pg->records[index];
+ enter_record(hash, rec, clear_filter);
+ return 1;
+ } while_for_each_ftrace_rec();
+ return 0;
+}
+
+static int
ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
struct ftrace_glob *mod_g, int exclude_mod)
{
@@ -3769,6 +3795,11 @@ match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
if (unlikely(ftrace_disabled))
goto out_unlock;
+ if (func_g.type == MATCH_INDEX) {
+ found = add_rec_by_index(hash, &func_g, clear_filter);
+ goto out_unlock;
+ }
+
do_for_each_ftrace_rec(pg, rec) {
if (rec->flags & FTRACE_FL_DISABLED)
@@ -4725,7 +4756,7 @@ static int
ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
int reset, int enable)
{
- return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
+ return ftrace_set_hash(ops, NULL, 0, ip, remove, reset, enable);
}
/**
@@ -5433,7 +5464,7 @@ void ftrace_create_filter_files(struct ftrace_ops *ops,
/*
* The name "destroy_filter_files" is really a misnomer. Although
- * in the future, it may actualy delete the files, but this is
+ * in the future, it may actually delete the files, but this is
* really intended to make sure the ops passed in are disabled
* and that when this function returns, the caller is free to
* free the ops.
@@ -5756,7 +5787,7 @@ void ftrace_module_enable(struct module *mod)
/*
* If the tracing is enabled, go ahead and enable the record.
*
- * The reason not to enable the record immediatelly is the
+ * The reason not to enable the record immediately is the
* inherent check of ftrace_make_nop/ftrace_make_call for
* correct previous instructions. Making first the NOP
* conversion puts the module to the correct state, thus
@@ -6216,7 +6247,7 @@ void ftrace_reset_array_ops(struct trace_array *tr)
tr->ops->func = ftrace_stub;
}
-static inline void
+static nokprobe_inline void
__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ignored, struct pt_regs *regs)
{
@@ -6276,11 +6307,13 @@ static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
{
__ftrace_ops_list_func(ip, parent_ip, NULL, regs);
}
+NOKPROBE_SYMBOL(ftrace_ops_list_func);
#else
static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
{
__ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
}
+NOKPROBE_SYMBOL(ftrace_ops_no_ops);
#endif
/*
@@ -6307,6 +6340,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
preempt_enable_notrace();
trace_clear_recursion(bit);
}
+NOKPROBE_SYMBOL(ftrace_ops_assist_func);
/**
* ftrace_ops_get_func - get the function a trampoline should call
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 06e864a334bb..4ee8d8aa3d0f 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -353,20 +353,6 @@ static void rb_init_page(struct buffer_data_page *bpage)
local_set(&bpage->commit, 0);
}
-/**
- * ring_buffer_page_len - the size of data on the page.
- * @page: The page to read
- *
- * Returns the amount of data on the page, including buffer page header.
- */
-size_t ring_buffer_page_len(void *page)
-{
- struct buffer_data_page *bpage = page;
-
- return (local_read(&bpage->commit) & ~RB_MISSED_FLAGS)
- + BUF_PAGE_HDR_SIZE;
-}
-
/*
* Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
* this issue out.
@@ -776,7 +762,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
preempt_disable_notrace();
time = rb_time_stamp(buffer);
- preempt_enable_no_resched_notrace();
+ preempt_enable_notrace();
return time;
}
@@ -4205,6 +4191,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume);
* ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
* @buffer: The ring buffer to read from
* @cpu: The cpu buffer to iterate over
+ * @flags: gfp flags to use for memory allocation
*
* This performs the initial preparations necessary to iterate
* through the buffer. Memory is allocated, buffer recording
@@ -4222,7 +4209,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume);
* This overall must be paired with ring_buffer_read_finish.
*/
struct ring_buffer_iter *
-ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
+ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_iter *iter;
@@ -4230,7 +4217,7 @@ ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return NULL;
- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
+ iter = kmalloc(sizeof(*iter), flags);
if (!iter)
return NULL;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c4238b441624..ec439999f387 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -159,6 +159,8 @@ static union trace_eval_map_item *trace_eval_maps;
#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
static int tracing_set_tracer(struct trace_array *tr, const char *buf);
+static void ftrace_trace_userstack(struct ring_buffer *buffer,
+ unsigned long flags, int pc);
#define MAX_TRACER_SIZE 100
static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
@@ -496,8 +498,10 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
* not modified.
*/
pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
- if (!pid_list)
+ if (!pid_list) {
+ trace_parser_put(&parser);
return -ENOMEM;
+ }
pid_list->pid_max = READ_ONCE(pid_max);
@@ -507,6 +511,7 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
if (!pid_list->pids) {
+ trace_parser_put(&parser);
kfree(pid_list);
return -ENOMEM;
}
@@ -894,7 +899,7 @@ int __trace_bputs(unsigned long ip, const char *str)
EXPORT_SYMBOL_GPL(__trace_bputs);
#ifdef CONFIG_TRACER_SNAPSHOT
-void tracing_snapshot_instance(struct trace_array *tr)
+void tracing_snapshot_instance_cond(struct trace_array *tr, void *cond_data)
{
struct tracer *tracer = tr->current_trace;
unsigned long flags;
@@ -920,10 +925,15 @@ void tracing_snapshot_instance(struct trace_array *tr)
}
local_irq_save(flags);
- update_max_tr(tr, current, smp_processor_id());
+ update_max_tr(tr, current, smp_processor_id(), cond_data);
local_irq_restore(flags);
}
+void tracing_snapshot_instance(struct trace_array *tr)
+{
+ tracing_snapshot_instance_cond(tr, NULL);
+}
+
/**
* tracing_snapshot - take a snapshot of the current buffer.
*
@@ -946,6 +956,54 @@ void tracing_snapshot(void)
}
EXPORT_SYMBOL_GPL(tracing_snapshot);
+/**
+ * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
+ * @tr: The tracing instance to snapshot
+ * @cond_data: The data to be tested conditionally, and possibly saved
+ *
+ * This is the same as tracing_snapshot() except that the snapshot is
+ * conditional - the snapshot will only happen if the
+ * cond_snapshot.update() implementation receiving the cond_data
+ * returns true, which means that the trace array's cond_snapshot
+ * update() operation used the cond_data to determine whether the
+ * snapshot should be taken, and if it was, presumably saved it along
+ * with the snapshot.
+ */
+void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
+{
+ tracing_snapshot_instance_cond(tr, cond_data);
+}
+EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
+
+/**
+ * tracing_snapshot_cond_data - get the user data associated with a snapshot
+ * @tr: The tracing instance
+ *
+ * When the user enables a conditional snapshot using
+ * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
+ * with the snapshot. This accessor is used to retrieve it.
+ *
+ * Should not be called from cond_snapshot.update(), since it takes
+ * the tr->max_lock lock, which the code calling
+ * cond_snapshot.update() has already done.
+ *
+ * Returns the cond_data associated with the trace array's snapshot.
+ */
+void *tracing_cond_snapshot_data(struct trace_array *tr)
+{
+ void *cond_data = NULL;
+
+ arch_spin_lock(&tr->max_lock);
+
+ if (tr->cond_snapshot)
+ cond_data = tr->cond_snapshot->cond_data;
+
+ arch_spin_unlock(&tr->max_lock);
+
+ return cond_data;
+}
+EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
+
static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
struct trace_buffer *size_buf, int cpu_id);
static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
@@ -1025,12 +1083,111 @@ void tracing_snapshot_alloc(void)
tracing_snapshot();
}
EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
+
+/**
+ * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
+ * @tr: The tracing instance
+ * @cond_data: User data to associate with the snapshot
+ * @update: Implementation of the cond_snapshot update function
+ *
+ * Check whether the conditional snapshot for the given instance has
+ * already been enabled, or if the current tracer is already using a
+ * snapshot; if so, return -EBUSY, else create a cond_snapshot and
+ * save the cond_data and update function inside.
+ *
+ * Returns 0 if successful, error otherwise.
+ */
+int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
+ cond_update_fn_t update)
+{
+ struct cond_snapshot *cond_snapshot;
+ int ret = 0;
+
+ cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
+ if (!cond_snapshot)
+ return -ENOMEM;
+
+ cond_snapshot->cond_data = cond_data;
+ cond_snapshot->update = update;
+
+ mutex_lock(&trace_types_lock);
+
+ ret = tracing_alloc_snapshot_instance(tr);
+ if (ret)
+ goto fail_unlock;
+
+ if (tr->current_trace->use_max_tr) {
+ ret = -EBUSY;
+ goto fail_unlock;
+ }
+
+ /*
+ * The cond_snapshot can only change to NULL without the
+ * trace_types_lock. We don't care if we race with it going
+ * to NULL, but we want to make sure that it's not set to
+ * something other than NULL when we get here, which we can
+ * do safely with only holding the trace_types_lock and not
+ * having to take the max_lock.
+ */
+ if (tr->cond_snapshot) {
+ ret = -EBUSY;
+ goto fail_unlock;
+ }
+
+ arch_spin_lock(&tr->max_lock);
+ tr->cond_snapshot = cond_snapshot;
+ arch_spin_unlock(&tr->max_lock);
+
+ mutex_unlock(&trace_types_lock);
+
+ return ret;
+
+ fail_unlock:
+ mutex_unlock(&trace_types_lock);
+ kfree(cond_snapshot);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
+
+/**
+ * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
+ * @tr: The tracing instance
+ *
+ * Check whether the conditional snapshot for the given instance is
+ * enabled; if so, free the cond_snapshot associated with it,
+ * otherwise return -EINVAL.
+ *
+ * Returns 0 if successful, error otherwise.
+ */
+int tracing_snapshot_cond_disable(struct trace_array *tr)
+{
+ int ret = 0;
+
+ arch_spin_lock(&tr->max_lock);
+
+ if (!tr->cond_snapshot)
+ ret = -EINVAL;
+ else {
+ kfree(tr->cond_snapshot);
+ tr->cond_snapshot = NULL;
+ }
+
+ arch_spin_unlock(&tr->max_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
#else
void tracing_snapshot(void)
{
WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
}
EXPORT_SYMBOL_GPL(tracing_snapshot);
+void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
+{
+ WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
+}
+EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
int tracing_alloc_snapshot(void)
{
WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
@@ -1043,6 +1200,21 @@ void tracing_snapshot_alloc(void)
tracing_snapshot();
}
EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
+void *tracing_cond_snapshot_data(struct trace_array *tr)
+{
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
+int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
+{
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
+int tracing_snapshot_cond_disable(struct trace_array *tr)
+{
+ return false;
+}
+EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
#endif /* CONFIG_TRACER_SNAPSHOT */
void tracer_tracing_off(struct trace_array *tr)
@@ -1330,7 +1502,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
max_data->critical_start = data->critical_start;
max_data->critical_end = data->critical_end;
- memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
+ strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
max_data->pid = tsk->pid;
/*
* If tsk == current, then use current_uid(), as that does not use
@@ -1354,12 +1526,14 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
* @tr: tracer
* @tsk: the task with the latency
* @cpu: The cpu that initiated the trace.
+ * @cond_data: User data associated with a conditional snapshot
*
* Flip the buffers between the @tr and the max_tr and record information
* about which task was the cause of this latency.
*/
void
-update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
+update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
+ void *cond_data)
{
if (tr->stop_count)
return;
@@ -1380,9 +1554,15 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
else
ring_buffer_record_off(tr->max_buffer.buffer);
+#ifdef CONFIG_TRACER_SNAPSHOT
+ if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
+ goto out_unlock;
+#endif
swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
__update_max_tr(tr, tsk, cpu);
+
+ out_unlock:
arch_spin_unlock(&tr->max_lock);
}
@@ -1748,7 +1928,7 @@ static inline char *get_saved_cmdlines(int idx)
static inline void set_cmdline(int idx, const char *cmdline)
{
- memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
+ strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
}
static int allocate_cmdlines_buffer(unsigned int val,
@@ -2574,12 +2754,21 @@ trace_function(struct trace_array *tr,
#ifdef CONFIG_STACKTRACE
-#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
+/* Allow 4 levels of nesting: normal, softirq, irq, NMI */
+#define FTRACE_KSTACK_NESTING 4
+
+#define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
+
struct ftrace_stack {
- unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
+ unsigned long calls[FTRACE_KSTACK_ENTRIES];
+};
+
+
+struct ftrace_stacks {
+ struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
};
-static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
+static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
static DEFINE_PER_CPU(int, ftrace_stack_reserve);
static void __ftrace_trace_stack(struct ring_buffer *buffer,
@@ -2588,13 +2777,10 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
{
struct trace_event_call *call = &event_kernel_stack;
struct ring_buffer_event *event;
+ unsigned int size, nr_entries;
+ struct ftrace_stack *fstack;
struct stack_entry *entry;
- struct stack_trace trace;
- int use_stack;
- int size = FTRACE_STACK_ENTRIES;
-
- trace.nr_entries = 0;
- trace.skip = skip;
+ int stackidx;
/*
* Add one, for this function and the call to save_stack_trace()
@@ -2602,7 +2788,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
*/
#ifndef CONFIG_UNWINDER_ORC
if (!regs)
- trace.skip++;
+ skip++;
#endif
/*
@@ -2613,53 +2799,40 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
*/
preempt_disable_notrace();
- use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
+ stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
+
+ /* This should never happen. If it does, yell once and skip */
+ if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING))
+ goto out;
+
/*
- * We don't need any atomic variables, just a barrier.
- * If an interrupt comes in, we don't care, because it would
- * have exited and put the counter back to what we want.
- * We just need a barrier to keep gcc from moving things
- * around.
+ * The above __this_cpu_inc_return() is 'atomic' cpu local. An
+ * interrupt will either see the value pre increment or post
+ * increment. If the interrupt happens pre increment it will have
+ * restored the counter when it returns. We just need a barrier to
+ * keep gcc from moving things around.
*/
barrier();
- if (use_stack == 1) {
- trace.entries = this_cpu_ptr(ftrace_stack.calls);
- trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
- if (regs)
- save_stack_trace_regs(regs, &trace);
- else
- save_stack_trace(&trace);
+ fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
+ size = ARRAY_SIZE(fstack->calls);
- if (trace.nr_entries > size)
- size = trace.nr_entries;
- } else
- /* From now on, use_stack is a boolean */
- use_stack = 0;
-
- size *= sizeof(unsigned long);
+ if (regs) {
+ nr_entries = stack_trace_save_regs(regs, fstack->calls,
+ size, skip);
+ } else {
+ nr_entries = stack_trace_save(fstack->calls, size, skip);
+ }
+ size = nr_entries * sizeof(unsigned long);
event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
sizeof(*entry) + size, flags, pc);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
- memset(&entry->caller, 0, size);
-
- if (use_stack)
- memcpy(&entry->caller, trace.entries,
- trace.nr_entries * sizeof(unsigned long));
- else {
- trace.max_entries = FTRACE_STACK_ENTRIES;
- trace.entries = entry->caller;
- if (regs)
- save_stack_trace_regs(regs, &trace);
- else
- save_stack_trace(&trace);
- }
-
- entry->size = trace.nr_entries;
+ memcpy(&entry->caller, fstack->calls, size);
+ entry->size = nr_entries;
if (!call_filter_check_discard(call, entry, buffer, event))
__buffer_unlock_commit(buffer, event);
@@ -2729,15 +2902,15 @@ void trace_dump_stack(int skip)
}
EXPORT_SYMBOL_GPL(trace_dump_stack);
+#ifdef CONFIG_USER_STACKTRACE_SUPPORT
static DEFINE_PER_CPU(int, user_stack_count);
-void
+static void
ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
{
struct trace_event_call *call = &event_user_stack;
struct ring_buffer_event *event;
struct userstack_entry *entry;
- struct stack_trace trace;
if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
return;
@@ -2768,12 +2941,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
entry->tgid = current->tgid;
memset(&entry->caller, 0, sizeof(entry->caller));
- trace.nr_entries = 0;
- trace.max_entries = FTRACE_STACK_ENTRIES;
- trace.skip = 0;
- trace.entries = entry->caller;
-
- save_stack_trace_user(&trace);
+ stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
if (!call_filter_check_discard(call, entry, buffer, event))
__buffer_unlock_commit(buffer, event);
@@ -2782,13 +2950,12 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
out:
preempt_enable();
}
-
-#ifdef UNUSED
-static void __trace_userstack(struct trace_array *tr, unsigned long flags)
+#else /* CONFIG_USER_STACKTRACE_SUPPORT */
+static void ftrace_trace_userstack(struct ring_buffer *buffer,
+ unsigned long flags, int pc)
{
- ftrace_trace_userstack(tr, flags, preempt_count());
}
-#endif /* UNUSED */
+#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
#endif /* CONFIG_STACKTRACE */
@@ -3904,7 +4071,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
for_each_tracing_cpu(cpu) {
iter->buffer_iter[cpu] =
- ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
+ ring_buffer_read_prepare(iter->trace_buffer->buffer,
+ cpu, GFP_KERNEL);
}
ring_buffer_read_prepare_sync();
for_each_tracing_cpu(cpu) {
@@ -3914,7 +4082,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
} else {
cpu = iter->cpu_file;
iter->buffer_iter[cpu] =
- ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
+ ring_buffer_read_prepare(iter->trace_buffer->buffer,
+ cpu, GFP_KERNEL);
ring_buffer_read_prepare_sync();
ring_buffer_read_start(iter->buffer_iter[cpu]);
tracing_iter_reset(iter, cpu);
@@ -4702,6 +4871,7 @@ static const char readme_msg[] =
"\t [:size=#entries]\n"
"\t [:pause][:continue][:clear]\n"
"\t [:name=histname1]\n"
+ "\t [:<handler>.<action>]\n"
"\t [if <filter>]\n\n"
"\t When a matching event is hit, an entry is added to a hash\n"
"\t table using the key(s) and value(s) named, and the value of a\n"
@@ -4742,8 +4912,21 @@ static const char readme_msg[] =
"\t unchanged.\n\n"
"\t The enable_hist and disable_hist triggers can be used to\n"
"\t have one event conditionally start and stop another event's\n"
- "\t already-attached hist trigger. The syntax is analagous to\n"
- "\t the enable_event and disable_event triggers.\n"
+ "\t already-attached hist trigger. The syntax is analogous to\n"
+ "\t the enable_event and disable_event triggers.\n\n"
+ "\t Hist trigger handlers and actions are executed whenever a\n"
+ "\t a histogram entry is added or updated. They take the form:\n\n"
+ "\t <handler>.<action>\n\n"
+ "\t The available handlers are:\n\n"
+ "\t onmatch(matching.event) - invoke on addition or update\n"
+ "\t onmax(var) - invoke if var exceeds current max\n"
+ "\t onchange(var) - invoke action if var changes\n\n"
+ "\t The available actions are:\n\n"
+ "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
+ "\t save(field,...) - save current event fields\n"
+#ifdef CONFIG_TRACER_SNAPSHOT
+ "\t snapshot() - snapshot the trace buffer\n"
+#endif
#endif
;
@@ -5388,6 +5571,16 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
if (t == tr->current_trace)
goto out;
+#ifdef CONFIG_TRACER_SNAPSHOT
+ if (t->use_max_tr) {
+ arch_spin_lock(&tr->max_lock);
+ if (tr->cond_snapshot)
+ ret = -EBUSY;
+ arch_spin_unlock(&tr->max_lock);
+ if (ret)
+ goto out;
+ }
+#endif
/* Some tracers won't work on kernel command line */
if (system_state < SYSTEM_RUNNING && t->noboot) {
pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
@@ -5626,7 +5819,6 @@ out:
return ret;
fail:
- kfree(iter->trace);
kfree(iter);
__trace_array_put(tr);
mutex_unlock(&trace_types_lock);
@@ -5825,7 +6017,6 @@ static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
}
static const struct pipe_buf_operations tracing_pipe_buf_ops = {
- .can_merge = 0,
.confirm = generic_pipe_buf_confirm,
.release = generic_pipe_buf_release,
.steal = generic_pipe_buf_steal,
@@ -6470,6 +6661,13 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
goto out;
}
+ arch_spin_lock(&tr->max_lock);
+ if (tr->cond_snapshot)
+ ret = -EBUSY;
+ arch_spin_unlock(&tr->max_lock);
+ if (ret)
+ goto out;
+
switch (val) {
case 0:
if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
@@ -6495,7 +6693,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
local_irq_disable();
/* Now, we're going to swap */
if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
- update_max_tr(tr, current, smp_processor_id());
+ update_max_tr(tr, current, smp_processor_id(), NULL);
else
update_max_tr_single(tr, current, iter->cpu_file);
local_irq_enable();
@@ -6819,36 +7017,43 @@ struct buffer_ref {
struct ring_buffer *buffer;
void *page;
int cpu;
- int ref;
+ refcount_t refcount;
};
+static void buffer_ref_release(struct buffer_ref *ref)
+{
+ if (!refcount_dec_and_test(&ref->refcount))
+ return;
+ ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
+ kfree(ref);
+}
+
static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
struct buffer_ref *ref = (struct buffer_ref *)buf->private;
- if (--ref->ref)
- return;
-
- ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
- kfree(ref);
+ buffer_ref_release(ref);
buf->private = 0;
}
-static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
+static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
struct buffer_ref *ref = (struct buffer_ref *)buf->private;
- ref->ref++;
+ if (refcount_read(&ref->refcount) > INT_MAX/2)
+ return false;
+
+ refcount_inc(&ref->refcount);
+ return true;
}
/* Pipe buffer operations for a buffer. */
static const struct pipe_buf_operations buffer_pipe_buf_ops = {
- .can_merge = 0,
.confirm = generic_pipe_buf_confirm,
.release = buffer_pipe_buf_release,
- .steal = generic_pipe_buf_steal,
+ .steal = generic_pipe_buf_nosteal,
.get = buffer_pipe_buf_get,
};
@@ -6861,11 +7066,7 @@ static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
struct buffer_ref *ref =
(struct buffer_ref *)spd->partial[i].private;
- if (--ref->ref)
- return;
-
- ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
- kfree(ref);
+ buffer_ref_release(ref);
spd->partial[i].private = 0;
}
@@ -6920,7 +7121,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
break;
}
- ref->ref = 1;
+ refcount_set(&ref->refcount, 1);
ref->buffer = iter->trace_buffer->buffer;
ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
if (IS_ERR(ref->page)) {
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 08900828d282..639047b259d7 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -194,6 +194,51 @@ struct trace_pid_list {
unsigned long *pids;
};
+typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data);
+
+/**
+ * struct cond_snapshot - conditional snapshot data and callback
+ *
+ * The cond_snapshot structure encapsulates a callback function and
+ * data associated with the snapshot for a given tracing instance.
+ *
+ * When a snapshot is taken conditionally, by invoking
+ * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is
+ * passed in turn to the cond_snapshot.update() function. That data
+ * can be compared by the update() implementation with the cond_data
+ * contained wihin the struct cond_snapshot instance associated with
+ * the trace_array. Because the tr->max_lock is held throughout the
+ * update() call, the update() function can directly retrieve the
+ * cond_snapshot and cond_data associated with the per-instance
+ * snapshot associated with the trace_array.
+ *
+ * The cond_snapshot.update() implementation can save data to be
+ * associated with the snapshot if it decides to, and returns 'true'
+ * in that case, or it returns 'false' if the conditional snapshot
+ * shouldn't be taken.
+ *
+ * The cond_snapshot instance is created and associated with the
+ * user-defined cond_data by tracing_cond_snapshot_enable().
+ * Likewise, the cond_snapshot instance is destroyed and is no longer
+ * associated with the trace instance by
+ * tracing_cond_snapshot_disable().
+ *
+ * The method below is required.
+ *
+ * @update: When a conditional snapshot is invoked, the update()
+ * callback function is invoked with the tr->max_lock held. The
+ * update() implementation signals whether or not to actually
+ * take the snapshot, by returning 'true' if so, 'false' if no
+ * snapshot should be taken. Because the max_lock is held for
+ * the duration of update(), the implementation is safe to
+ * directly retrieven and save any implementation data it needs
+ * to in association with the snapshot.
+ */
+struct cond_snapshot {
+ void *cond_data;
+ cond_update_fn_t update;
+};
+
/*
* The trace array - an array of per-CPU trace arrays. This is the
* highest level data structure that individual tracers deal with.
@@ -277,6 +322,9 @@ struct trace_array {
#endif
int time_stamp_abs_ref;
struct list_head hist_vars;
+#ifdef CONFIG_TRACER_SNAPSHOT
+ struct cond_snapshot *cond_snapshot;
+#endif
};
enum {
@@ -727,23 +775,16 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
const char __user *ubuf, size_t cnt);
#ifdef CONFIG_TRACER_MAX_TRACE
-void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
+void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
+ void *cond_data);
void update_max_tr_single(struct trace_array *tr,
struct task_struct *tsk, int cpu);
#endif /* CONFIG_TRACER_MAX_TRACE */
#ifdef CONFIG_STACKTRACE
-void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
- int pc);
-
void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
int pc);
#else
-static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
- unsigned long flags, int pc)
-{
-}
-
static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
int skip, int pc)
{
@@ -855,10 +896,11 @@ static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash)
#define TRACE_GRAPH_PRINT_PROC 0x8
#define TRACE_GRAPH_PRINT_DURATION 0x10
#define TRACE_GRAPH_PRINT_ABS_TIME 0x20
-#define TRACE_GRAPH_PRINT_IRQS 0x40
-#define TRACE_GRAPH_PRINT_TAIL 0x80
-#define TRACE_GRAPH_SLEEP_TIME 0x100
-#define TRACE_GRAPH_GRAPH_TIME 0x200
+#define TRACE_GRAPH_PRINT_REL_TIME 0x40
+#define TRACE_GRAPH_PRINT_IRQS 0x80
+#define TRACE_GRAPH_PRINT_TAIL 0x100
+#define TRACE_GRAPH_SLEEP_TIME 0x200
+#define TRACE_GRAPH_GRAPH_TIME 0x400
#define TRACE_GRAPH_PRINT_FILL_SHIFT 28
#define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
@@ -1458,6 +1500,7 @@ enum regex_type {
MATCH_MIDDLE_ONLY,
MATCH_END_ONLY,
MATCH_GLOB,
+ MATCH_INDEX,
};
struct regex {
@@ -1808,6 +1851,11 @@ static inline bool event_command_needs_rec(struct event_command *cmd_ops)
extern int trace_event_enable_disable(struct trace_event_file *file,
int enable, int soft_disable);
extern int tracing_alloc_snapshot(void);
+extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data);
+extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update);
+
+extern int tracing_snapshot_cond_disable(struct trace_array *tr);
+extern void *tracing_cond_snapshot_data(struct trace_array *tr);
extern const char *__start___trace_bprintk_fmt[];
extern const char *__stop___trace_bprintk_fmt[];
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 4ad967453b6f..3ea65cdff30d 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -205,6 +205,8 @@ void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect)
void ftrace_likely_update(struct ftrace_likely_data *f, int val,
int expect, int is_constant)
{
+ unsigned long flags = user_access_save();
+
/* A constant is always correct */
if (is_constant) {
f->constant++;
@@ -223,6 +225,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
f->data.correct++;
else
f->data.incorrect++;
+
+ user_access_restore(flags);
}
EXPORT_SYMBOL(ftrace_likely_update);
diff --git a/kernel/trace/trace_dynevent.c b/kernel/trace/trace_dynevent.c
index dd1f43588d70..fa100ed3b4de 100644
--- a/kernel/trace/trace_dynevent.c
+++ b/kernel/trace/trace_dynevent.c
@@ -74,7 +74,7 @@ int dyn_event_release(int argc, char **argv, struct dyn_event_operations *type)
static int create_dyn_event(int argc, char **argv)
{
struct dyn_event_operations *ops;
- int ret;
+ int ret = -ENODEV;
if (argv[0][0] == '-' || argv[0][0] == '!')
return dyn_event_release(argc, argv, NULL);
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index 06bb2fd9a56c..fc8e97328e54 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -65,7 +65,8 @@ FTRACE_ENTRY_REG(function, ftrace_entry,
__field( unsigned long, parent_ip )
),
- F_printk(" %lx <-- %lx", __entry->ip, __entry->parent_ip),
+ F_printk(" %ps <-- %ps",
+ (void *)__entry->ip, (void *)__entry->parent_ip),
FILTER_TRACE_FN,
@@ -83,7 +84,7 @@ FTRACE_ENTRY_PACKED(funcgraph_entry, ftrace_graph_ent_entry,
__field_desc( int, graph_ent, depth )
),
- F_printk("--> %lx (%d)", __entry->func, __entry->depth),
+ F_printk("--> %ps (%d)", (void *)__entry->func, __entry->depth),
FILTER_OTHER
);
@@ -102,8 +103,8 @@ FTRACE_ENTRY_PACKED(funcgraph_exit, ftrace_graph_ret_entry,
__field_desc( int, ret, depth )
),
- F_printk("<-- %lx (%d) (start: %llx end: %llx) over: %d",
- __entry->func, __entry->depth,
+ F_printk("<-- %ps (%d) (start: %llx end: %llx) over: %d",
+ (void *)__entry->func, __entry->depth,
__entry->calltime, __entry->rettime,
__entry->depth),
@@ -167,12 +168,6 @@ FTRACE_ENTRY_DUP(wakeup, ctx_switch_entry,
#define FTRACE_STACK_ENTRIES 8
-#ifndef CONFIG_64BIT
-# define IP_FMT "%08lx"
-#else
-# define IP_FMT "%016lx"
-#endif
-
FTRACE_ENTRY(kernel_stack, stack_entry,
TRACE_STACK,
@@ -182,12 +177,13 @@ FTRACE_ENTRY(kernel_stack, stack_entry,
__dynamic_array(unsigned long, caller )
),
- F_printk("\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
- "\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
- "\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n",
- __entry->caller[0], __entry->caller[1], __entry->caller[2],
- __entry->caller[3], __entry->caller[4], __entry->caller[5],
- __entry->caller[6], __entry->caller[7]),
+ F_printk("\t=> %ps\n\t=> %ps\n\t=> %ps\n"
+ "\t=> %ps\n\t=> %ps\n\t=> %ps\n"
+ "\t=> %ps\n\t=> %ps\n",
+ (void *)__entry->caller[0], (void *)__entry->caller[1],
+ (void *)__entry->caller[2], (void *)__entry->caller[3],
+ (void *)__entry->caller[4], (void *)__entry->caller[5],
+ (void *)__entry->caller[6], (void *)__entry->caller[7]),
FILTER_OTHER
);
@@ -201,12 +197,13 @@ FTRACE_ENTRY(user_stack, userstack_entry,
__array( unsigned long, caller, FTRACE_STACK_ENTRIES )
),
- F_printk("\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
- "\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
- "\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n",
- __entry->caller[0], __entry->caller[1], __entry->caller[2],
- __entry->caller[3], __entry->caller[4], __entry->caller[5],
- __entry->caller[6], __entry->caller[7]),
+ F_printk("\t=> %ps\n\t=> %ps\n\t=> %ps\n"
+ "\t=> %ps\n\t=> %ps\n\t=> %ps\n"
+ "\t=> %ps\n\t=> %ps\n",
+ (void *)__entry->caller[0], (void *)__entry->caller[1],
+ (void *)__entry->caller[2], (void *)__entry->caller[3],
+ (void *)__entry->caller[4], (void *)__entry->caller[5],
+ (void *)__entry->caller[6], (void *)__entry->caller[7]),
FILTER_OTHER
);
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 76217bbef815..4629a6104474 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -299,15 +299,13 @@ int perf_uprobe_init(struct perf_event *p_event,
if (!p_event->attr.uprobe_path)
return -EINVAL;
- path = kzalloc(PATH_MAX, GFP_KERNEL);
- if (!path)
- return -ENOMEM;
- ret = strncpy_from_user(
- path, u64_to_user_ptr(p_event->attr.uprobe_path), PATH_MAX);
- if (ret == PATH_MAX)
- return -E2BIG;
- if (ret < 0)
- goto out;
+
+ path = strndup_user(u64_to_user_ptr(p_event->attr.uprobe_path),
+ PATH_MAX);
+ if (IS_ERR(path)) {
+ ret = PTR_ERR(path);
+ return (ret == -EINVAL) ? -E2BIG : ret;
+ }
if (path[0] == '\0') {
ret = -EINVAL;
goto out;
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 217ef481fbbb..05a66493a164 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -491,10 +491,12 @@ predicate_parse(const char *str, int nr_parens, int nr_preds,
break;
case '&':
case '|':
+ /* accepting only "&&" or "||" */
if (next[1] == next[0]) {
ptr++;
break;
}
+ /* fall through */
default:
parse_error(pe, FILT_ERR_TOO_MANY_PREDS,
next - str);
@@ -823,6 +825,9 @@ enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
*search = buff;
+ if (isdigit(buff[0]))
+ return MATCH_INDEX;
+
for (i = 0; i < len; i++) {
if (buff[i] == '*') {
if (!i) {
@@ -860,6 +865,8 @@ static void filter_build_regex(struct filter_pred *pred)
}
switch (type) {
+ /* MATCH_INDEX should not happen, but if it does, match full */
+ case MATCH_INDEX:
case MATCH_FULL:
r->match = regex_match_full;
break;
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 449d90cfa151..a1d20421f4b0 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -313,9 +313,9 @@ struct hist_trigger_data {
struct field_var_hist *field_var_hists[SYNTH_FIELDS_MAX];
unsigned int n_field_var_hists;
- struct field_var *max_vars[SYNTH_FIELDS_MAX];
- unsigned int n_max_vars;
- unsigned int n_max_var_str;
+ struct field_var *save_vars[SYNTH_FIELDS_MAX];
+ unsigned int n_save_vars;
+ unsigned int n_save_var_str;
};
static int synth_event_create(int argc, const char **argv);
@@ -383,41 +383,157 @@ struct action_data;
typedef void (*action_fn_t) (struct hist_trigger_data *hist_data,
struct tracing_map_elt *elt, void *rec,
- struct ring_buffer_event *rbe,
+ struct ring_buffer_event *rbe, void *key,
struct action_data *data, u64 *var_ref_vals);
+typedef bool (*check_track_val_fn_t) (u64 track_val, u64 var_val);
+
+enum handler_id {
+ HANDLER_ONMATCH = 1,
+ HANDLER_ONMAX,
+ HANDLER_ONCHANGE,
+};
+
+enum action_id {
+ ACTION_SAVE = 1,
+ ACTION_TRACE,
+ ACTION_SNAPSHOT,
+};
+
struct action_data {
+ enum handler_id handler;
+ enum action_id action;
+ char *action_name;
action_fn_t fn;
+
unsigned int n_params;
char *params[SYNTH_FIELDS_MAX];
+ /*
+ * When a histogram trigger is hit, the values of any
+ * references to variables, including variables being passed
+ * as parameters to synthetic events, are collected into a
+ * var_ref_vals array. This var_ref_idx is the index of the
+ * first param in the array to be passed to the synthetic
+ * event invocation.
+ */
+ unsigned int var_ref_idx;
+ struct synth_event *synth_event;
+ bool use_trace_keyword;
+ char *synth_event_name;
+
union {
struct {
- /*
- * When a histogram trigger is hit, the values of any
- * references to variables, including variables being passed
- * as parameters to synthetic events, are collected into a
- * var_ref_vals array. This var_ref_idx is the index of the
- * first param in the array to be passed to the synthetic
- * event invocation.
- */
- unsigned int var_ref_idx;
- char *match_event;
- char *match_event_system;
- char *synth_event_name;
- struct synth_event *synth_event;
- } onmatch;
+ char *event;
+ char *event_system;
+ } match_data;
struct {
+ /*
+ * var_str contains the $-unstripped variable
+ * name referenced by var_ref, and used when
+ * printing the action. Because var_ref
+ * creation is deferred to create_actions(),
+ * we need a per-action way to save it until
+ * then, thus var_str.
+ */
char *var_str;
- char *fn_name;
- unsigned int max_var_ref_idx;
- struct hist_field *max_var;
- struct hist_field *var;
- } onmax;
+
+ /*
+ * var_ref refers to the variable being
+ * tracked e.g onmax($var).
+ */
+ struct hist_field *var_ref;
+
+ /*
+ * track_var contains the 'invisible' tracking
+ * variable created to keep the current
+ * e.g. max value.
+ */
+ struct hist_field *track_var;
+
+ check_track_val_fn_t check_val;
+ action_fn_t save_data;
+ } track_data;
};
};
+struct track_data {
+ u64 track_val;
+ bool updated;
+
+ unsigned int key_len;
+ void *key;
+ struct tracing_map_elt elt;
+
+ struct action_data *action_data;
+ struct hist_trigger_data *hist_data;
+};
+
+struct hist_elt_data {
+ char *comm;
+ u64 *var_ref_vals;
+ char *field_var_str[SYNTH_FIELDS_MAX];
+};
+
+struct snapshot_context {
+ struct tracing_map_elt *elt;
+ void *key;
+};
+
+static void track_data_free(struct track_data *track_data)
+{
+ struct hist_elt_data *elt_data;
+
+ if (!track_data)
+ return;
+
+ kfree(track_data->key);
+
+ elt_data = track_data->elt.private_data;
+ if (elt_data) {
+ kfree(elt_data->comm);
+ kfree(elt_data);
+ }
+
+ kfree(track_data);
+}
+
+static struct track_data *track_data_alloc(unsigned int key_len,
+ struct action_data *action_data,
+ struct hist_trigger_data *hist_data)
+{
+ struct track_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
+ struct hist_elt_data *elt_data;
+
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ data->key = kzalloc(key_len, GFP_KERNEL);
+ if (!data->key) {
+ track_data_free(data);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ data->key_len = key_len;
+ data->action_data = action_data;
+ data->hist_data = hist_data;
+
+ elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
+ if (!elt_data) {
+ track_data_free(data);
+ return ERR_PTR(-ENOMEM);
+ }
+ data->elt.private_data = elt_data;
+
+ elt_data->comm = kzalloc(TASK_COMM_LEN, GFP_KERNEL);
+ if (!elt_data->comm) {
+ track_data_free(data);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return data;
+}
static char last_hist_cmd[MAX_FILTER_STR_VAL];
static char hist_err_str[MAX_FILTER_STR_VAL];
@@ -1078,12 +1194,12 @@ static struct synth_event *alloc_synth_event(const char *name, int n_fields,
static void action_trace(struct hist_trigger_data *hist_data,
struct tracing_map_elt *elt, void *rec,
- struct ring_buffer_event *rbe,
+ struct ring_buffer_event *rbe, void *key,
struct action_data *data, u64 *var_ref_vals)
{
- struct synth_event *event = data->onmatch.synth_event;
+ struct synth_event *event = data->synth_event;
- trace_synth(event, var_ref_vals, data->onmatch.var_ref_idx);
+ trace_synth(event, var_ref_vals, data->var_ref_idx);
}
struct hist_var_data {
@@ -1200,8 +1316,8 @@ static int synth_event_create(int argc, const char **argv)
/* This interface accepts group name prefix */
if (strchr(name, '/')) {
- len = sizeof(SYNTH_SYSTEM "/") - 1;
- if (strncmp(name, SYNTH_SYSTEM "/", len))
+ len = str_has_prefix(name, SYNTH_SYSTEM "/");
+ if (len == 0)
return -EINVAL;
name += len;
}
@@ -1644,9 +1760,9 @@ find_match_var(struct hist_trigger_data *hist_data, char *var_name)
for (i = 0; i < hist_data->n_actions; i++) {
struct action_data *data = hist_data->actions[i];
- if (data->fn == action_trace) {
- char *system = data->onmatch.match_event_system;
- char *event_name = data->onmatch.match_event;
+ if (data->handler == HANDLER_ONMATCH) {
+ char *system = data->match_data.event_system;
+ char *event_name = data->match_data.event;
file = find_var_file(tr, system, event_name, var_name);
if (!file)
@@ -1691,12 +1807,6 @@ static struct hist_field *find_event_var(struct hist_trigger_data *hist_data,
return hist_field;
}
-struct hist_elt_data {
- char *comm;
- u64 *var_ref_vals;
- char *field_var_str[SYNTH_FIELDS_MAX];
-};
-
static u64 hist_field_var_ref(struct hist_field *hist_field,
struct tracing_map_elt *elt,
struct ring_buffer_event *rbe,
@@ -1882,7 +1992,8 @@ static int parse_action(char *str, struct hist_trigger_attrs *attrs)
return ret;
if ((str_has_prefix(str, "onmatch(")) ||
- (str_has_prefix(str, "onmax("))) {
+ (str_has_prefix(str, "onmax(")) ||
+ (str_has_prefix(str, "onchange("))) {
attrs->action_str[attrs->n_actions] = kstrdup(str, GFP_KERNEL);
if (!attrs->action_str[attrs->n_actions]) {
ret = -ENOMEM;
@@ -2030,7 +2141,7 @@ static inline void save_comm(char *comm, struct task_struct *task)
return;
}
- memcpy(comm, task->comm, TASK_COMM_LEN);
+ strncpy(comm, task->comm, TASK_COMM_LEN);
}
static void hist_elt_data_free(struct hist_elt_data *elt_data)
@@ -2076,7 +2187,7 @@ static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt)
}
}
- n_str = hist_data->n_field_var_str + hist_data->n_max_var_str;
+ n_str = hist_data->n_field_var_str + hist_data->n_save_var_str;
size = STR_VAR_LEN_MAX;
@@ -3050,7 +3161,7 @@ create_field_var_hist(struct hist_trigger_data *target_hist_data,
int ret;
if (target_hist_data->n_field_var_hists >= SYNTH_FIELDS_MAX) {
- hist_err_event("onmatch: Too many field variables defined: ",
+ hist_err_event("trace action: Too many field variables defined: ",
subsys_name, event_name, field_name);
return ERR_PTR(-EINVAL);
}
@@ -3058,7 +3169,7 @@ create_field_var_hist(struct hist_trigger_data *target_hist_data,
file = event_file(tr, subsys_name, event_name);
if (IS_ERR(file)) {
- hist_err_event("onmatch: Event file not found: ",
+ hist_err_event("trace action: Event file not found: ",
subsys_name, event_name, field_name);
ret = PTR_ERR(file);
return ERR_PTR(ret);
@@ -3072,7 +3183,7 @@ create_field_var_hist(struct hist_trigger_data *target_hist_data,
*/
hist_data = find_compatible_hist(target_hist_data, file);
if (!hist_data) {
- hist_err_event("onmatch: Matching event histogram not found: ",
+ hist_err_event("trace action: Matching event histogram not found: ",
subsys_name, event_name, field_name);
return ERR_PTR(-EINVAL);
}
@@ -3134,7 +3245,7 @@ create_field_var_hist(struct hist_trigger_data *target_hist_data,
kfree(cmd);
kfree(var_hist->cmd);
kfree(var_hist);
- hist_err_event("onmatch: Couldn't create histogram for field: ",
+ hist_err_event("trace action: Couldn't create histogram for field: ",
subsys_name, event_name, field_name);
return ERR_PTR(ret);
}
@@ -3147,7 +3258,7 @@ create_field_var_hist(struct hist_trigger_data *target_hist_data,
if (IS_ERR_OR_NULL(event_var)) {
kfree(var_hist->cmd);
kfree(var_hist);
- hist_err_event("onmatch: Couldn't find synthetic variable: ",
+ hist_err_event("trace action: Couldn't find synthetic variable: ",
subsys_name, event_name, field_name);
return ERR_PTR(-EINVAL);
}
@@ -3225,13 +3336,13 @@ static void update_field_vars(struct hist_trigger_data *hist_data,
hist_data->n_field_vars, 0);
}
-static void update_max_vars(struct hist_trigger_data *hist_data,
- struct tracing_map_elt *elt,
- struct ring_buffer_event *rbe,
- void *rec)
+static void save_track_data_vars(struct hist_trigger_data *hist_data,
+ struct tracing_map_elt *elt, void *rec,
+ struct ring_buffer_event *rbe, void *key,
+ struct action_data *data, u64 *var_ref_vals)
{
- __update_field_vars(elt, rbe, rec, hist_data->max_vars,
- hist_data->n_max_vars, hist_data->n_field_var_str);
+ __update_field_vars(elt, rbe, rec, hist_data->save_vars,
+ hist_data->n_save_vars, hist_data->n_field_var_str);
}
static struct hist_field *create_var(struct hist_trigger_data *hist_data,
@@ -3366,18 +3477,190 @@ create_target_field_var(struct hist_trigger_data *target_hist_data,
return create_field_var(target_hist_data, file, var_name);
}
-static void onmax_print(struct seq_file *m,
- struct hist_trigger_data *hist_data,
- struct tracing_map_elt *elt,
- struct action_data *data)
+static bool check_track_val_max(u64 track_val, u64 var_val)
+{
+ if (var_val <= track_val)
+ return false;
+
+ return true;
+}
+
+static bool check_track_val_changed(u64 track_val, u64 var_val)
+{
+ if (var_val == track_val)
+ return false;
+
+ return true;
+}
+
+static u64 get_track_val(struct hist_trigger_data *hist_data,
+ struct tracing_map_elt *elt,
+ struct action_data *data)
+{
+ unsigned int track_var_idx = data->track_data.track_var->var.idx;
+ u64 track_val;
+
+ track_val = tracing_map_read_var(elt, track_var_idx);
+
+ return track_val;
+}
+
+static void save_track_val(struct hist_trigger_data *hist_data,
+ struct tracing_map_elt *elt,
+ struct action_data *data, u64 var_val)
+{
+ unsigned int track_var_idx = data->track_data.track_var->var.idx;
+
+ tracing_map_set_var(elt, track_var_idx, var_val);
+}
+
+static void save_track_data(struct hist_trigger_data *hist_data,
+ struct tracing_map_elt *elt, void *rec,
+ struct ring_buffer_event *rbe, void *key,
+ struct action_data *data, u64 *var_ref_vals)
+{
+ if (data->track_data.save_data)
+ data->track_data.save_data(hist_data, elt, rec, rbe, key, data, var_ref_vals);
+}
+
+static bool check_track_val(struct tracing_map_elt *elt,
+ struct action_data *data,
+ u64 var_val)
+{
+ struct hist_trigger_data *hist_data;
+ u64 track_val;
+
+ hist_data = data->track_data.track_var->hist_data;
+ track_val = get_track_val(hist_data, elt, data);
+
+ return data->track_data.check_val(track_val, var_val);
+}
+
+#ifdef CONFIG_TRACER_SNAPSHOT
+static bool cond_snapshot_update(struct trace_array *tr, void *cond_data)
+{
+ /* called with tr->max_lock held */
+ struct track_data *track_data = tr->cond_snapshot->cond_data;
+ struct hist_elt_data *elt_data, *track_elt_data;
+ struct snapshot_context *context = cond_data;
+ u64 track_val;
+
+ if (!track_data)
+ return false;
+
+ track_val = get_track_val(track_data->hist_data, context->elt,
+ track_data->action_data);
+
+ track_data->track_val = track_val;
+ memcpy(track_data->key, context->key, track_data->key_len);
+
+ elt_data = context->elt->private_data;
+ track_elt_data = track_data->elt.private_data;
+ if (elt_data->comm)
+ strncpy(track_elt_data->comm, elt_data->comm, TASK_COMM_LEN);
+
+ track_data->updated = true;
+
+ return true;
+}
+
+static void save_track_data_snapshot(struct hist_trigger_data *hist_data,
+ struct tracing_map_elt *elt, void *rec,
+ struct ring_buffer_event *rbe, void *key,
+ struct action_data *data,
+ u64 *var_ref_vals)
+{
+ struct trace_event_file *file = hist_data->event_file;
+ struct snapshot_context context;
+
+ context.elt = elt;
+ context.key = key;
+
+ tracing_snapshot_cond(file->tr, &context);
+}
+
+static void hist_trigger_print_key(struct seq_file *m,
+ struct hist_trigger_data *hist_data,
+ void *key,
+ struct tracing_map_elt *elt);
+
+static struct action_data *snapshot_action(struct hist_trigger_data *hist_data)
+{
+ unsigned int i;
+
+ if (!hist_data->n_actions)
+ return NULL;
+
+ for (i = 0; i < hist_data->n_actions; i++) {
+ struct action_data *data = hist_data->actions[i];
+
+ if (data->action == ACTION_SNAPSHOT)
+ return data;
+ }
+
+ return NULL;
+}
+
+static void track_data_snapshot_print(struct seq_file *m,
+ struct hist_trigger_data *hist_data)
{
- unsigned int i, save_var_idx, max_idx = data->onmax.max_var->var.idx;
+ struct trace_event_file *file = hist_data->event_file;
+ struct track_data *track_data;
+ struct action_data *action;
- seq_printf(m, "\n\tmax: %10llu", tracing_map_read_var(elt, max_idx));
+ track_data = tracing_cond_snapshot_data(file->tr);
+ if (!track_data)
+ return;
- for (i = 0; i < hist_data->n_max_vars; i++) {
- struct hist_field *save_val = hist_data->max_vars[i]->val;
- struct hist_field *save_var = hist_data->max_vars[i]->var;
+ if (!track_data->updated)
+ return;
+
+ action = snapshot_action(hist_data);
+ if (!action)
+ return;
+
+ seq_puts(m, "\nSnapshot taken (see tracing/snapshot). Details:\n");
+ seq_printf(m, "\ttriggering value { %s(%s) }: %10llu",
+ action->handler == HANDLER_ONMAX ? "onmax" : "onchange",
+ action->track_data.var_str, track_data->track_val);
+
+ seq_puts(m, "\ttriggered by event with key: ");
+ hist_trigger_print_key(m, hist_data, track_data->key, &track_data->elt);
+ seq_putc(m, '\n');
+}
+#else
+static bool cond_snapshot_update(struct trace_array *tr, void *cond_data)
+{
+ return false;
+}
+static void save_track_data_snapshot(struct hist_trigger_data *hist_data,
+ struct tracing_map_elt *elt, void *rec,
+ struct ring_buffer_event *rbe, void *key,
+ struct action_data *data,
+ u64 *var_ref_vals) {}
+static void track_data_snapshot_print(struct seq_file *m,
+ struct hist_trigger_data *hist_data) {}
+#endif /* CONFIG_TRACER_SNAPSHOT */
+
+static void track_data_print(struct seq_file *m,
+ struct hist_trigger_data *hist_data,
+ struct tracing_map_elt *elt,
+ struct action_data *data)
+{
+ u64 track_val = get_track_val(hist_data, elt, data);
+ unsigned int i, save_var_idx;
+
+ if (data->handler == HANDLER_ONMAX)
+ seq_printf(m, "\n\tmax: %10llu", track_val);
+ else if (data->handler == HANDLER_ONCHANGE)
+ seq_printf(m, "\n\tchanged: %10llu", track_val);
+
+ if (data->action == ACTION_SNAPSHOT)
+ return;
+
+ for (i = 0; i < hist_data->n_save_vars; i++) {
+ struct hist_field *save_val = hist_data->save_vars[i]->val;
+ struct hist_field *save_var = hist_data->save_vars[i]->var;
u64 val;
save_var_idx = save_var->var.idx;
@@ -3392,64 +3675,81 @@ static void onmax_print(struct seq_file *m,
}
}
-static void onmax_save(struct hist_trigger_data *hist_data,
- struct tracing_map_elt *elt, void *rec,
- struct ring_buffer_event *rbe,
- struct action_data *data, u64 *var_ref_vals)
+static void ontrack_action(struct hist_trigger_data *hist_data,
+ struct tracing_map_elt *elt, void *rec,
+ struct ring_buffer_event *rbe, void *key,
+ struct action_data *data, u64 *var_ref_vals)
{
- unsigned int max_idx = data->onmax.max_var->var.idx;
- unsigned int max_var_ref_idx = data->onmax.max_var_ref_idx;
-
- u64 var_val, max_val;
+ u64 var_val = var_ref_vals[data->track_data.var_ref->var_ref_idx];
- var_val = var_ref_vals[max_var_ref_idx];
- max_val = tracing_map_read_var(elt, max_idx);
-
- if (var_val <= max_val)
- return;
-
- tracing_map_set_var(elt, max_idx, var_val);
-
- update_max_vars(hist_data, elt, rbe, rec);
+ if (check_track_val(elt, data, var_val)) {
+ save_track_val(hist_data, elt, data, var_val);
+ save_track_data(hist_data, elt, rec, rbe, key, data, var_ref_vals);
+ }
}
-static void onmax_destroy(struct action_data *data)
+static void action_data_destroy(struct action_data *data)
{
unsigned int i;
- destroy_hist_field(data->onmax.max_var, 0);
- destroy_hist_field(data->onmax.var, 0);
+ lockdep_assert_held(&event_mutex);
- kfree(data->onmax.var_str);
- kfree(data->onmax.fn_name);
+ kfree(data->action_name);
for (i = 0; i < data->n_params; i++)
kfree(data->params[i]);
+ if (data->synth_event)
+ data->synth_event->ref--;
+
+ kfree(data->synth_event_name);
+
kfree(data);
}
-static int onmax_create(struct hist_trigger_data *hist_data,
- struct action_data *data)
+static void track_data_destroy(struct hist_trigger_data *hist_data,
+ struct action_data *data)
{
struct trace_event_file *file = hist_data->event_file;
- struct hist_field *var_field, *ref_field, *max_var;
- unsigned int var_ref_idx = hist_data->n_var_refs;
- struct field_var *field_var;
- char *onmax_var_str, *param;
- unsigned int i;
+
+ destroy_hist_field(data->track_data.track_var, 0);
+
+ if (data->action == ACTION_SNAPSHOT) {
+ struct track_data *track_data;
+
+ track_data = tracing_cond_snapshot_data(file->tr);
+ if (track_data && track_data->hist_data == hist_data) {
+ tracing_snapshot_cond_disable(file->tr);
+ track_data_free(track_data);
+ }
+ }
+
+ kfree(data->track_data.var_str);
+
+ action_data_destroy(data);
+}
+
+static int action_create(struct hist_trigger_data *hist_data,
+ struct action_data *data);
+
+static int track_data_create(struct hist_trigger_data *hist_data,
+ struct action_data *data)
+{
+ struct hist_field *var_field, *ref_field, *track_var = NULL;
+ struct trace_event_file *file = hist_data->event_file;
+ char *track_data_var_str;
int ret = 0;
- onmax_var_str = data->onmax.var_str;
- if (onmax_var_str[0] != '$') {
- hist_err("onmax: For onmax(x), x must be a variable: ", onmax_var_str);
+ track_data_var_str = data->track_data.var_str;
+ if (track_data_var_str[0] != '$') {
+ hist_err("For onmax(x) or onchange(x), x must be a variable: ", track_data_var_str);
return -EINVAL;
}
- onmax_var_str++;
+ track_data_var_str++;
- var_field = find_target_event_var(hist_data, NULL, NULL, onmax_var_str);
+ var_field = find_target_event_var(hist_data, NULL, NULL, track_data_var_str);
if (!var_field) {
- hist_err("onmax: Couldn't find onmax variable: ", onmax_var_str);
+ hist_err("Couldn't find onmax or onchange variable: ", track_data_var_str);
return -EINVAL;
}
@@ -3457,39 +3757,26 @@ static int onmax_create(struct hist_trigger_data *hist_data,
if (!ref_field)
return -ENOMEM;
- data->onmax.var = ref_field;
+ data->track_data.var_ref = ref_field;
- data->fn = onmax_save;
- data->onmax.max_var_ref_idx = var_ref_idx;
- max_var = create_var(hist_data, file, "max", sizeof(u64), "u64");
- if (IS_ERR(max_var)) {
- hist_err("onmax: Couldn't create onmax variable: ", "max");
- ret = PTR_ERR(max_var);
+ if (data->handler == HANDLER_ONMAX)
+ track_var = create_var(hist_data, file, "__max", sizeof(u64), "u64");
+ if (IS_ERR(track_var)) {
+ hist_err("Couldn't create onmax variable: ", "__max");
+ ret = PTR_ERR(track_var);
goto out;
}
- data->onmax.max_var = max_var;
-
- for (i = 0; i < data->n_params; i++) {
- param = kstrdup(data->params[i], GFP_KERNEL);
- if (!param) {
- ret = -ENOMEM;
- goto out;
- }
- field_var = create_target_field_var(hist_data, NULL, NULL, param);
- if (IS_ERR(field_var)) {
- hist_err("onmax: Couldn't create field variable: ", param);
- ret = PTR_ERR(field_var);
- kfree(param);
- goto out;
- }
-
- hist_data->max_vars[hist_data->n_max_vars++] = field_var;
- if (field_var->val->flags & HIST_FIELD_FL_STRING)
- hist_data->n_max_var_str++;
-
- kfree(param);
+ if (data->handler == HANDLER_ONCHANGE)
+ track_var = create_var(hist_data, file, "__change", sizeof(u64), "u64");
+ if (IS_ERR(track_var)) {
+ hist_err("Couldn't create onchange variable: ", "__change");
+ ret = PTR_ERR(track_var);
+ goto out;
}
+ data->track_data.track_var = track_var;
+
+ ret = action_create(hist_data, data);
out:
return ret;
}
@@ -3497,14 +3784,18 @@ static int onmax_create(struct hist_trigger_data *hist_data,
static int parse_action_params(char *params, struct action_data *data)
{
char *param, *saved_param;
+ bool first_param = true;
int ret = 0;
while (params) {
- if (data->n_params >= SYNTH_FIELDS_MAX)
+ if (data->n_params >= SYNTH_FIELDS_MAX) {
+ hist_err("Too many action params", "");
goto out;
+ }
param = strsep(&params, ",");
if (!param) {
+ hist_err("No action param found", "");
ret = -EINVAL;
goto out;
}
@@ -3522,86 +3813,164 @@ static int parse_action_params(char *params, struct action_data *data)
goto out;
}
+ if (first_param && data->use_trace_keyword) {
+ data->synth_event_name = saved_param;
+ first_param = false;
+ continue;
+ }
+ first_param = false;
+
data->params[data->n_params++] = saved_param;
}
out:
return ret;
}
-static struct action_data *onmax_parse(char *str)
+static int action_parse(char *str, struct action_data *data,
+ enum handler_id handler)
{
- char *onmax_fn_name, *onmax_var_str;
- struct action_data *data;
- int ret = -EINVAL;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return ERR_PTR(-ENOMEM);
+ char *action_name;
+ int ret = 0;
- onmax_var_str = strsep(&str, ")");
- if (!onmax_var_str || !str) {
+ strsep(&str, ".");
+ if (!str) {
+ hist_err("action parsing: No action found", "");
ret = -EINVAL;
- goto free;
+ goto out;
}
- data->onmax.var_str = kstrdup(onmax_var_str, GFP_KERNEL);
- if (!data->onmax.var_str) {
- ret = -ENOMEM;
- goto free;
+ action_name = strsep(&str, "(");
+ if (!action_name || !str) {
+ hist_err("action parsing: No action found", "");
+ ret = -EINVAL;
+ goto out;
}
- strsep(&str, ".");
- if (!str)
- goto free;
-
- onmax_fn_name = strsep(&str, "(");
- if (!onmax_fn_name || !str)
- goto free;
-
- if (str_has_prefix(onmax_fn_name, "save")) {
+ if (str_has_prefix(action_name, "save")) {
char *params = strsep(&str, ")");
if (!params) {
+ hist_err("action parsing: No params found for %s", "save");
ret = -EINVAL;
- goto free;
+ goto out;
}
ret = parse_action_params(params, data);
if (ret)
- goto free;
- } else
+ goto out;
+
+ if (handler == HANDLER_ONMAX)
+ data->track_data.check_val = check_track_val_max;
+ else if (handler == HANDLER_ONCHANGE)
+ data->track_data.check_val = check_track_val_changed;
+ else {
+ hist_err("action parsing: Handler doesn't support action: ", action_name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ data->track_data.save_data = save_track_data_vars;
+ data->fn = ontrack_action;
+ data->action = ACTION_SAVE;
+ } else if (str_has_prefix(action_name, "snapshot")) {
+ char *params = strsep(&str, ")");
+
+ if (!str) {
+ hist_err("action parsing: No closing paren found: %s", params);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (handler == HANDLER_ONMAX)
+ data->track_data.check_val = check_track_val_max;
+ else if (handler == HANDLER_ONCHANGE)
+ data->track_data.check_val = check_track_val_changed;
+ else {
+ hist_err("action parsing: Handler doesn't support action: ", action_name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ data->track_data.save_data = save_track_data_snapshot;
+ data->fn = ontrack_action;
+ data->action = ACTION_SNAPSHOT;
+ } else {
+ char *params = strsep(&str, ")");
+
+ if (str_has_prefix(action_name, "trace"))
+ data->use_trace_keyword = true;
+
+ if (params) {
+ ret = parse_action_params(params, data);
+ if (ret)
+ goto out;
+ }
+
+ if (handler == HANDLER_ONMAX)
+ data->track_data.check_val = check_track_val_max;
+ else if (handler == HANDLER_ONCHANGE)
+ data->track_data.check_val = check_track_val_changed;
+
+ if (handler != HANDLER_ONMATCH) {
+ data->track_data.save_data = action_trace;
+ data->fn = ontrack_action;
+ } else
+ data->fn = action_trace;
+
+ data->action = ACTION_TRACE;
+ }
+
+ data->action_name = kstrdup(action_name, GFP_KERNEL);
+ if (!data->action_name) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ data->handler = handler;
+ out:
+ return ret;
+}
+
+static struct action_data *track_data_parse(struct hist_trigger_data *hist_data,
+ char *str, enum handler_id handler)
+{
+ struct action_data *data;
+ int ret = -EINVAL;
+ char *var_str;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ var_str = strsep(&str, ")");
+ if (!var_str || !str) {
+ ret = -EINVAL;
goto free;
+ }
- data->onmax.fn_name = kstrdup(onmax_fn_name, GFP_KERNEL);
- if (!data->onmax.fn_name) {
+ data->track_data.var_str = kstrdup(var_str, GFP_KERNEL);
+ if (!data->track_data.var_str) {
ret = -ENOMEM;
goto free;
}
+
+ ret = action_parse(str, data, handler);
+ if (ret)
+ goto free;
out:
return data;
free:
- onmax_destroy(data);
+ track_data_destroy(hist_data, data);
data = ERR_PTR(ret);
goto out;
}
static void onmatch_destroy(struct action_data *data)
{
- unsigned int i;
-
- lockdep_assert_held(&event_mutex);
+ kfree(data->match_data.event);
+ kfree(data->match_data.event_system);
- kfree(data->onmatch.match_event);
- kfree(data->onmatch.match_event_system);
- kfree(data->onmatch.synth_event_name);
-
- for (i = 0; i < data->n_params; i++)
- kfree(data->params[i]);
-
- if (data->onmatch.synth_event)
- data->onmatch.synth_event->ref--;
-
- kfree(data);
+ action_data_destroy(data);
}
static void destroy_field_var(struct field_var *field_var)
@@ -3651,8 +4020,9 @@ static int check_synth_field(struct synth_event *event,
}
static struct hist_field *
-onmatch_find_var(struct hist_trigger_data *hist_data, struct action_data *data,
- char *system, char *event, char *var)
+trace_action_find_var(struct hist_trigger_data *hist_data,
+ struct action_data *data,
+ char *system, char *event, char *var)
{
struct hist_field *hist_field;
@@ -3660,24 +4030,24 @@ onmatch_find_var(struct hist_trigger_data *hist_data, struct action_data *data,
hist_field = find_target_event_var(hist_data, system, event, var);
if (!hist_field) {
- if (!system) {
- system = data->onmatch.match_event_system;
- event = data->onmatch.match_event;
+ if (!system && data->handler == HANDLER_ONMATCH) {
+ system = data->match_data.event_system;
+ event = data->match_data.event;
}
hist_field = find_event_var(hist_data, system, event, var);
}
if (!hist_field)
- hist_err_event("onmatch: Couldn't find onmatch param: $", system, event, var);
+ hist_err_event("trace action: Couldn't find param: $", system, event, var);
return hist_field;
}
static struct hist_field *
-onmatch_create_field_var(struct hist_trigger_data *hist_data,
- struct action_data *data, char *system,
- char *event, char *var)
+trace_action_create_field_var(struct hist_trigger_data *hist_data,
+ struct action_data *data, char *system,
+ char *event, char *var)
{
struct hist_field *hist_field = NULL;
struct field_var *field_var;
@@ -3700,9 +4070,9 @@ onmatch_create_field_var(struct hist_trigger_data *hist_data,
* looking for fields on the onmatch(system.event.xxx)
* event.
*/
- if (!system) {
- system = data->onmatch.match_event_system;
- event = data->onmatch.match_event;
+ if (!system && data->handler == HANDLER_ONMATCH) {
+ system = data->match_data.event_system;
+ event = data->match_data.event;
}
/*
@@ -3724,24 +4094,30 @@ onmatch_create_field_var(struct hist_trigger_data *hist_data,
goto out;
}
-static int onmatch_create(struct hist_trigger_data *hist_data,
- struct trace_event_file *file,
- struct action_data *data)
+static int trace_action_create(struct hist_trigger_data *hist_data,
+ struct action_data *data)
{
char *event_name, *param, *system = NULL;
struct hist_field *hist_field, *var_ref;
unsigned int i, var_ref_idx;
unsigned int field_pos = 0;
struct synth_event *event;
+ char *synth_event_name;
int ret = 0;
lockdep_assert_held(&event_mutex);
- event = find_synth_event(data->onmatch.synth_event_name);
+ if (data->use_trace_keyword)
+ synth_event_name = data->synth_event_name;
+ else
+ synth_event_name = data->action_name;
+
+ event = find_synth_event(synth_event_name);
if (!event) {
- hist_err("onmatch: Couldn't find synthetic event: ", data->onmatch.synth_event_name);
+ hist_err("trace action: Couldn't find synthetic event: ", synth_event_name);
return -EINVAL;
}
+
event->ref++;
var_ref_idx = hist_data->n_var_refs;
@@ -3769,13 +4145,15 @@ static int onmatch_create(struct hist_trigger_data *hist_data,
}
if (param[0] == '$')
- hist_field = onmatch_find_var(hist_data, data, system,
- event_name, param);
+ hist_field = trace_action_find_var(hist_data, data,
+ system, event_name,
+ param);
else
- hist_field = onmatch_create_field_var(hist_data, data,
- system,
- event_name,
- param);
+ hist_field = trace_action_create_field_var(hist_data,
+ data,
+ system,
+ event_name,
+ param);
if (!hist_field) {
kfree(p);
@@ -3797,7 +4175,7 @@ static int onmatch_create(struct hist_trigger_data *hist_data,
continue;
}
- hist_err_event("onmatch: Param type doesn't match synthetic event field type: ",
+ hist_err_event("trace action: Param type doesn't match synthetic event field type: ",
system, event_name, param);
kfree(p);
ret = -EINVAL;
@@ -3805,14 +4183,13 @@ static int onmatch_create(struct hist_trigger_data *hist_data,
}
if (field_pos != event->n_fields) {
- hist_err("onmatch: Param count doesn't match synthetic event field count: ", event->name);
+ hist_err("trace action: Param count doesn't match synthetic event field count: ", event->name);
ret = -EINVAL;
goto err;
}
- data->fn = action_trace;
- data->onmatch.synth_event = event;
- data->onmatch.var_ref_idx = var_ref_idx;
+ data->synth_event = event;
+ data->var_ref_idx = var_ref_idx;
out:
return ret;
err:
@@ -3821,10 +4198,75 @@ static int onmatch_create(struct hist_trigger_data *hist_data,
goto out;
}
+static int action_create(struct hist_trigger_data *hist_data,
+ struct action_data *data)
+{
+ struct trace_event_file *file = hist_data->event_file;
+ struct track_data *track_data;
+ struct field_var *field_var;
+ unsigned int i;
+ char *param;
+ int ret = 0;
+
+ if (data->action == ACTION_TRACE)
+ return trace_action_create(hist_data, data);
+
+ if (data->action == ACTION_SNAPSHOT) {
+ track_data = track_data_alloc(hist_data->key_size, data, hist_data);
+ if (IS_ERR(track_data)) {
+ ret = PTR_ERR(track_data);
+ goto out;
+ }
+
+ ret = tracing_snapshot_cond_enable(file->tr, track_data,
+ cond_snapshot_update);
+ if (ret)
+ track_data_free(track_data);
+
+ goto out;
+ }
+
+ if (data->action == ACTION_SAVE) {
+ if (hist_data->n_save_vars) {
+ ret = -EEXIST;
+ hist_err("save action: Can't have more than one save() action per hist", "");
+ goto out;
+ }
+
+ for (i = 0; i < data->n_params; i++) {
+ param = kstrdup(data->params[i], GFP_KERNEL);
+ if (!param) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ field_var = create_target_field_var(hist_data, NULL, NULL, param);
+ if (IS_ERR(field_var)) {
+ hist_err("save action: Couldn't create field variable: ", param);
+ ret = PTR_ERR(field_var);
+ kfree(param);
+ goto out;
+ }
+
+ hist_data->save_vars[hist_data->n_save_vars++] = field_var;
+ if (field_var->val->flags & HIST_FIELD_FL_STRING)
+ hist_data->n_save_var_str++;
+ kfree(param);
+ }
+ }
+ out:
+ return ret;
+}
+
+static int onmatch_create(struct hist_trigger_data *hist_data,
+ struct action_data *data)
+{
+ return action_create(hist_data, data);
+}
+
static struct action_data *onmatch_parse(struct trace_array *tr, char *str)
{
char *match_event, *match_event_system;
- char *synth_event_name, *params;
struct action_data *data;
int ret = -EINVAL;
@@ -3850,43 +4292,19 @@ static struct action_data *onmatch_parse(struct trace_array *tr, char *str)
goto free;
}
- data->onmatch.match_event = kstrdup(match_event, GFP_KERNEL);
- if (!data->onmatch.match_event) {
- ret = -ENOMEM;
- goto free;
- }
-
- data->onmatch.match_event_system = kstrdup(match_event_system, GFP_KERNEL);
- if (!data->onmatch.match_event_system) {
+ data->match_data.event = kstrdup(match_event, GFP_KERNEL);
+ if (!data->match_data.event) {
ret = -ENOMEM;
goto free;
}
- strsep(&str, ".");
- if (!str) {
- hist_err("onmatch: Missing . after onmatch(): ", str);
- goto free;
- }
-
- synth_event_name = strsep(&str, "(");
- if (!synth_event_name || !str) {
- hist_err("onmatch: Missing opening paramlist paren: ", synth_event_name);
- goto free;
- }
-
- data->onmatch.synth_event_name = kstrdup(synth_event_name, GFP_KERNEL);
- if (!data->onmatch.synth_event_name) {
+ data->match_data.event_system = kstrdup(match_event_system, GFP_KERNEL);
+ if (!data->match_data.event_system) {
ret = -ENOMEM;
goto free;
}
- params = strsep(&str, ")");
- if (!params || !str || (str && strlen(str))) {
- hist_err("onmatch: Missing closing paramlist paren: ", params);
- goto free;
- }
-
- ret = parse_action_params(params, data);
+ ret = action_parse(str, data, HANDLER_ONMATCH);
if (ret)
goto free;
out:
@@ -4326,10 +4744,11 @@ static void destroy_actions(struct hist_trigger_data *hist_data)
for (i = 0; i < hist_data->n_actions; i++) {
struct action_data *data = hist_data->actions[i];
- if (data->fn == action_trace)
+ if (data->handler == HANDLER_ONMATCH)
onmatch_destroy(data);
- else if (data->fn == onmax_save)
- onmax_destroy(data);
+ else if (data->handler == HANDLER_ONMAX ||
+ data->handler == HANDLER_ONCHANGE)
+ track_data_destroy(hist_data, data);
else
kfree(data);
}
@@ -4355,16 +4774,24 @@ static int parse_actions(struct hist_trigger_data *hist_data)
ret = PTR_ERR(data);
break;
}
- data->fn = action_trace;
} else if ((len = str_has_prefix(str, "onmax("))) {
char *action_str = str + len;
- data = onmax_parse(action_str);
+ data = track_data_parse(hist_data, action_str,
+ HANDLER_ONMAX);
+ if (IS_ERR(data)) {
+ ret = PTR_ERR(data);
+ break;
+ }
+ } else if ((len = str_has_prefix(str, "onchange("))) {
+ char *action_str = str + len;
+
+ data = track_data_parse(hist_data, action_str,
+ HANDLER_ONCHANGE);
if (IS_ERR(data)) {
ret = PTR_ERR(data);
break;
}
- data->fn = onmax_save;
} else {
ret = -EINVAL;
break;
@@ -4376,8 +4803,7 @@ static int parse_actions(struct hist_trigger_data *hist_data)
return ret;
}
-static int create_actions(struct hist_trigger_data *hist_data,
- struct trace_event_file *file)
+static int create_actions(struct hist_trigger_data *hist_data)
{
struct action_data *data;
unsigned int i;
@@ -4386,14 +4812,18 @@ static int create_actions(struct hist_trigger_data *hist_data,
for (i = 0; i < hist_data->attrs->n_actions; i++) {
data = hist_data->actions[i];
- if (data->fn == action_trace) {
- ret = onmatch_create(hist_data, file, data);
+ if (data->handler == HANDLER_ONMATCH) {
+ ret = onmatch_create(hist_data, data);
if (ret)
- return ret;
- } else if (data->fn == onmax_save) {
- ret = onmax_create(hist_data, data);
+ break;
+ } else if (data->handler == HANDLER_ONMAX ||
+ data->handler == HANDLER_ONCHANGE) {
+ ret = track_data_create(hist_data, data);
if (ret)
- return ret;
+ break;
+ } else {
+ ret = -EINVAL;
+ break;
}
}
@@ -4409,26 +4839,51 @@ static void print_actions(struct seq_file *m,
for (i = 0; i < hist_data->n_actions; i++) {
struct action_data *data = hist_data->actions[i];
- if (data->fn == onmax_save)
- onmax_print(m, hist_data, elt, data);
+ if (data->action == ACTION_SNAPSHOT)
+ continue;
+
+ if (data->handler == HANDLER_ONMAX ||
+ data->handler == HANDLER_ONCHANGE)
+ track_data_print(m, hist_data, elt, data);
}
}
-static void print_onmax_spec(struct seq_file *m,
- struct hist_trigger_data *hist_data,
- struct action_data *data)
+static void print_action_spec(struct seq_file *m,
+ struct hist_trigger_data *hist_data,
+ struct action_data *data)
{
unsigned int i;
- seq_puts(m, ":onmax(");
- seq_printf(m, "%s", data->onmax.var_str);
- seq_printf(m, ").%s(", data->onmax.fn_name);
-
- for (i = 0; i < hist_data->n_max_vars; i++) {
- seq_printf(m, "%s", hist_data->max_vars[i]->var->var.name);
- if (i < hist_data->n_max_vars - 1)
- seq_puts(m, ",");
+ if (data->action == ACTION_SAVE) {
+ for (i = 0; i < hist_data->n_save_vars; i++) {
+ seq_printf(m, "%s", hist_data->save_vars[i]->var->var.name);
+ if (i < hist_data->n_save_vars - 1)
+ seq_puts(m, ",");
+ }
+ } else if (data->action == ACTION_TRACE) {
+ if (data->use_trace_keyword)
+ seq_printf(m, "%s", data->synth_event_name);
+ for (i = 0; i < data->n_params; i++) {
+ if (i || data->use_trace_keyword)
+ seq_puts(m, ",");
+ seq_printf(m, "%s", data->params[i]);
+ }
}
+}
+
+static void print_track_data_spec(struct seq_file *m,
+ struct hist_trigger_data *hist_data,
+ struct action_data *data)
+{
+ if (data->handler == HANDLER_ONMAX)
+ seq_puts(m, ":onmax(");
+ else if (data->handler == HANDLER_ONCHANGE)
+ seq_puts(m, ":onchange(");
+ seq_printf(m, "%s", data->track_data.var_str);
+ seq_printf(m, ").%s(", data->action_name);
+
+ print_action_spec(m, hist_data, data);
+
seq_puts(m, ")");
}
@@ -4436,18 +4891,12 @@ static void print_onmatch_spec(struct seq_file *m,
struct hist_trigger_data *hist_data,
struct action_data *data)
{
- unsigned int i;
+ seq_printf(m, ":onmatch(%s.%s).", data->match_data.event_system,
+ data->match_data.event);
- seq_printf(m, ":onmatch(%s.%s).", data->onmatch.match_event_system,
- data->onmatch.match_event);
+ seq_printf(m, "%s(", data->action_name);
- seq_printf(m, "%s(", data->onmatch.synth_event->name);
-
- for (i = 0; i < data->n_params; i++) {
- if (i)
- seq_puts(m, ",");
- seq_printf(m, "%s", data->params[i]);
- }
+ print_action_spec(m, hist_data, data);
seq_puts(m, ")");
}
@@ -4463,8 +4912,11 @@ static bool actions_match(struct hist_trigger_data *hist_data,
for (i = 0; i < hist_data->n_actions; i++) {
struct action_data *data = hist_data->actions[i];
struct action_data *data_test = hist_data_test->actions[i];
+ char *action_name, *action_name_test;
- if (data->fn != data_test->fn)
+ if (data->handler != data_test->handler)
+ return false;
+ if (data->action != data_test->action)
return false;
if (data->n_params != data_test->n_params)
@@ -4475,22 +4927,30 @@ static bool actions_match(struct hist_trigger_data *hist_data,
return false;
}
- if (data->fn == action_trace) {
- if (strcmp(data->onmatch.synth_event_name,
- data_test->onmatch.synth_event_name) != 0)
- return false;
- if (strcmp(data->onmatch.match_event_system,
- data_test->onmatch.match_event_system) != 0)
- return false;
- if (strcmp(data->onmatch.match_event,
- data_test->onmatch.match_event) != 0)
+ if (data->use_trace_keyword)
+ action_name = data->synth_event_name;
+ else
+ action_name = data->action_name;
+
+ if (data_test->use_trace_keyword)
+ action_name_test = data_test->synth_event_name;
+ else
+ action_name_test = data_test->action_name;
+
+ if (strcmp(action_name, action_name_test) != 0)
+ return false;
+
+ if (data->handler == HANDLER_ONMATCH) {
+ if (strcmp(data->match_data.event_system,
+ data_test->match_data.event_system) != 0)
return false;
- } else if (data->fn == onmax_save) {
- if (strcmp(data->onmax.var_str,
- data_test->onmax.var_str) != 0)
+ if (strcmp(data->match_data.event,
+ data_test->match_data.event) != 0)
return false;
- if (strcmp(data->onmax.fn_name,
- data_test->onmax.fn_name) != 0)
+ } else if (data->handler == HANDLER_ONMAX ||
+ data->handler == HANDLER_ONCHANGE) {
+ if (strcmp(data->track_data.var_str,
+ data_test->track_data.var_str) != 0)
return false;
}
}
@@ -4507,10 +4967,11 @@ static void print_actions_spec(struct seq_file *m,
for (i = 0; i < hist_data->n_actions; i++) {
struct action_data *data = hist_data->actions[i];
- if (data->fn == action_trace)
+ if (data->handler == HANDLER_ONMATCH)
print_onmatch_spec(m, hist_data, data);
- else if (data->fn == onmax_save)
- print_onmax_spec(m, hist_data, data);
+ else if (data->handler == HANDLER_ONMAX ||
+ data->handler == HANDLER_ONCHANGE)
+ print_track_data_spec(m, hist_data, data);
}
}
@@ -4695,22 +5156,24 @@ static inline void add_to_key(char *compound_key, void *key,
/* ensure NULL-termination */
if (size > key_field->size - 1)
size = key_field->size - 1;
- }
- memcpy(compound_key + key_field->offset, key, size);
+ strncpy(compound_key + key_field->offset, (char *)key, size);
+ } else
+ memcpy(compound_key + key_field->offset, key, size);
}
static void
hist_trigger_actions(struct hist_trigger_data *hist_data,
struct tracing_map_elt *elt, void *rec,
- struct ring_buffer_event *rbe, u64 *var_ref_vals)
+ struct ring_buffer_event *rbe, void *key,
+ u64 *var_ref_vals)
{
struct action_data *data;
unsigned int i;
for (i = 0; i < hist_data->n_actions; i++) {
data = hist_data->actions[i];
- data->fn(hist_data, elt, rec, rbe, data, var_ref_vals);
+ data->fn(hist_data, elt, rec, rbe, key, data, var_ref_vals);
}
}
@@ -4723,7 +5186,6 @@ static void event_hist_trigger(struct event_trigger_data *data, void *rec,
u64 var_ref_vals[TRACING_MAP_VARS_MAX];
char compound_key[HIST_KEY_SIZE_MAX];
struct tracing_map_elt *elt = NULL;
- struct stack_trace stacktrace;
struct hist_field *key_field;
u64 field_contents;
void *key = NULL;
@@ -4735,14 +5197,9 @@ static void event_hist_trigger(struct event_trigger_data *data, void *rec,
key_field = hist_data->fields[i];
if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
- stacktrace.max_entries = HIST_STACKTRACE_DEPTH;
- stacktrace.entries = entries;
- stacktrace.nr_entries = 0;
- stacktrace.skip = HIST_STACKTRACE_SKIP;
-
- memset(stacktrace.entries, 0, HIST_STACKTRACE_SIZE);
- save_stack_trace(&stacktrace);
-
+ memset(entries, 0, HIST_STACKTRACE_SIZE);
+ stack_trace_save(entries, HIST_STACKTRACE_DEPTH,
+ HIST_STACKTRACE_SKIP);
key = entries;
} else {
field_contents = key_field->fn(key_field, elt, rbe, rec);
@@ -4771,7 +5228,7 @@ static void event_hist_trigger(struct event_trigger_data *data, void *rec,
hist_trigger_elt_update(hist_data, elt, rec, rbe, var_ref_vals);
if (resolve_var_refs(hist_data, key, var_ref_vals, true))
- hist_trigger_actions(hist_data, elt, rec, rbe, var_ref_vals);
+ hist_trigger_actions(hist_data, elt, rec, rbe, key, var_ref_vals);
}
static void hist_trigger_stacktrace_print(struct seq_file *m,
@@ -4783,7 +5240,7 @@ static void hist_trigger_stacktrace_print(struct seq_file *m,
unsigned int i;
for (i = 0; i < max_entries; i++) {
- if (stacktrace_entries[i] == ULONG_MAX)
+ if (!stacktrace_entries[i])
return;
seq_printf(m, "%*c", 1 + spaces, ' ');
@@ -4792,10 +5249,10 @@ static void hist_trigger_stacktrace_print(struct seq_file *m,
}
}
-static void
-hist_trigger_entry_print(struct seq_file *m,
- struct hist_trigger_data *hist_data, void *key,
- struct tracing_map_elt *elt)
+static void hist_trigger_print_key(struct seq_file *m,
+ struct hist_trigger_data *hist_data,
+ void *key,
+ struct tracing_map_elt *elt)
{
struct hist_field *key_field;
char str[KSYM_SYMBOL_LEN];
@@ -4871,6 +5328,17 @@ hist_trigger_entry_print(struct seq_file *m,
seq_puts(m, " ");
seq_puts(m, "}");
+}
+
+static void hist_trigger_entry_print(struct seq_file *m,
+ struct hist_trigger_data *hist_data,
+ void *key,
+ struct tracing_map_elt *elt)
+{
+ const char *field_name;
+ unsigned int i;
+
+ hist_trigger_print_key(m, hist_data, key, elt);
seq_printf(m, " hitcount: %10llu",
tracing_map_read_sum(elt, HITCOUNT_IDX));
@@ -4937,6 +5405,8 @@ static void hist_trigger_show(struct seq_file *m,
if (n_entries < 0)
n_entries = 0;
+ track_data_snapshot_print(m, hist_data);
+
seq_printf(m, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n",
(u64)atomic64_read(&hist_data->map->hits),
n_entries, (u64)atomic64_read(&hist_data->map->drops));
@@ -5683,7 +6153,7 @@ static int event_hist_trigger_func(struct event_command *cmd_ops,
if (has_hist_vars(hist_data))
save_hist_vars(hist_data);
- ret = create_actions(hist_data, file);
+ ret = create_actions(hist_data);
if (ret)
goto out_unreg;
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index c2af1560e856..69ebf3c2f1b5 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -380,6 +380,7 @@ static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
{
trace_seq_putc(s, ' ');
trace_print_lat_fmt(s, entry);
+ trace_seq_puts(s, " | ");
}
/* If the pid changed since the last trace, output this event */
@@ -501,6 +502,17 @@ static void print_graph_abs_time(u64 t, struct trace_seq *s)
}
static void
+print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
+{
+ unsigned long long usecs;
+
+ usecs = iter->ts - iter->trace_buffer->time_start;
+ do_div(usecs, NSEC_PER_USEC);
+
+ trace_seq_printf(s, "%9llu us | ", usecs);
+}
+
+static void
print_graph_irq(struct trace_iterator *iter, unsigned long addr,
enum trace_type type, int cpu, pid_t pid, u32 flags)
{
@@ -517,6 +529,10 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
print_graph_abs_time(iter->ts, s);
+ /* Relative time */
+ if (flags & TRACE_GRAPH_PRINT_REL_TIME)
+ print_graph_rel_time(iter, s);
+
/* Cpu */
if (flags & TRACE_GRAPH_PRINT_CPU)
print_graph_cpu(s, cpu);
@@ -725,6 +741,10 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
print_graph_abs_time(iter->ts, s);
+ /* Relative time */
+ if (flags & TRACE_GRAPH_PRINT_REL_TIME)
+ print_graph_rel_time(iter, s);
+
/* Cpu */
if (flags & TRACE_GRAPH_PRINT_CPU)
print_graph_cpu(s, cpu);
@@ -1101,6 +1121,8 @@ static void print_lat_header(struct seq_file *s, u32 flags)
if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
size += 16;
+ if (flags & TRACE_GRAPH_PRINT_REL_TIME)
+ size += 16;
if (flags & TRACE_GRAPH_PRINT_CPU)
size += 4;
if (flags & TRACE_GRAPH_PRINT_PROC)
@@ -1125,12 +1147,14 @@ static void __print_graph_headers_flags(struct trace_array *tr,
seq_putc(s, '#');
if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
seq_puts(s, " TIME ");
+ if (flags & TRACE_GRAPH_PRINT_REL_TIME)
+ seq_puts(s, " REL TIME ");
if (flags & TRACE_GRAPH_PRINT_CPU)
seq_puts(s, " CPU");
if (flags & TRACE_GRAPH_PRINT_PROC)
seq_puts(s, " TASK/PID ");
if (lat)
- seq_puts(s, "||||");
+ seq_puts(s, "|||| ");
if (flags & TRACE_GRAPH_PRINT_DURATION)
seq_puts(s, " DURATION ");
seq_puts(s, " FUNCTION CALLS\n");
@@ -1139,12 +1163,14 @@ static void __print_graph_headers_flags(struct trace_array *tr,
seq_putc(s, '#');
if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
seq_puts(s, " | ");
+ if (flags & TRACE_GRAPH_PRINT_REL_TIME)
+ seq_puts(s, " | ");
if (flags & TRACE_GRAPH_PRINT_CPU)
seq_puts(s, " | ");
if (flags & TRACE_GRAPH_PRINT_PROC)
seq_puts(s, " | | ");
if (lat)
- seq_puts(s, "||||");
+ seq_puts(s, "|||| ");
if (flags & TRACE_GRAPH_PRINT_DURATION)
seq_puts(s, " | | ");
seq_puts(s, " | | | |\n");
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index d42a473b8240..a745b0cee5d3 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -239,7 +239,7 @@ static void irqsoff_trace_close(struct trace_iterator *iter)
#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
TRACE_GRAPH_PRINT_PROC | \
- TRACE_GRAPH_PRINT_ABS_TIME | \
+ TRACE_GRAPH_PRINT_REL_TIME | \
TRACE_GRAPH_PRINT_DURATION)
static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
index d953c163a079..810d78a8d14c 100644
--- a/kernel/trace/trace_kdb.c
+++ b/kernel/trace/trace_kdb.c
@@ -51,14 +51,16 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
if (cpu_file == RING_BUFFER_ALL_CPUS) {
for_each_tracing_cpu(cpu) {
iter.buffer_iter[cpu] =
- ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu);
+ ring_buffer_read_prepare(iter.trace_buffer->buffer,
+ cpu, GFP_ATOMIC);
ring_buffer_read_start(iter.buffer_iter[cpu]);
tracing_iter_reset(&iter, cpu);
}
} else {
iter.cpu_file = cpu_file;
iter.buffer_iter[cpu_file] =
- ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu_file);
+ ring_buffer_read_prepare(iter.trace_buffer->buffer,
+ cpu_file, GFP_ATOMIC);
ring_buffer_read_start(iter.buffer_iter[cpu_file]);
tracing_iter_reset(&iter, cpu_file);
}
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 99592c27465e..5d5129b05df7 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -35,7 +35,7 @@ static struct dyn_event_operations trace_kprobe_ops = {
.match = trace_kprobe_match,
};
-/**
+/*
* Kprobe event core functions
*/
struct trace_kprobe {
@@ -221,7 +221,7 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group,
tk->rp.maxactive = maxactive;
- if (!event || !is_good_name(event)) {
+ if (!event || !group) {
ret = -EINVAL;
goto error;
}
@@ -231,11 +231,6 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group,
if (!tk->tp.call.name)
goto error;
- if (!group || !is_good_name(group)) {
- ret = -EINVAL;
- goto error;
- }
-
tk->tp.class.system = kstrdup(group, GFP_KERNEL);
if (!tk->tp.class.system)
goto error;
@@ -624,7 +619,11 @@ static int trace_kprobe_create(int argc, const char *argv[])
if (event)
event++;
- if (is_return && isdigit(argv[0][1])) {
+ if (isdigit(argv[0][1])) {
+ if (!is_return) {
+ pr_info("Maxactive is not for kprobe");
+ return -EINVAL;
+ }
if (event)
len = event - &argv[0][1] - 1;
else
@@ -634,8 +633,8 @@ static int trace_kprobe_create(int argc, const char *argv[])
memcpy(buf, &argv[0][1], len);
buf[len] = '\0';
ret = kstrtouint(buf, 0, &maxactive);
- if (ret) {
- pr_info("Failed to parse maxactive.\n");
+ if (ret || !maxactive) {
+ pr_info("Invalid maxactive number\n");
return ret;
}
/* kretprobes instances are iterated over via a list. The
@@ -694,9 +693,9 @@ static int trace_kprobe_create(int argc, const char *argv[])
tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
argc, is_return);
if (IS_ERR(tk)) {
- pr_info("Failed to allocate trace_probe.(%d)\n",
- (int)PTR_ERR(tk));
ret = PTR_ERR(tk);
+ /* This must return -ENOMEM otherwise there is a bug */
+ WARN_ON_ONCE(ret != -ENOMEM);
goto out;
}
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index 9962cb5da8ac..8f8411e7835f 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -13,7 +13,7 @@
#include "trace_probe.h"
-const char *reserved_field_names[] = {
+static const char *reserved_field_names[] = {
"common_type",
"common_flags",
"common_preempt_count",
@@ -159,6 +159,7 @@ int traceprobe_parse_event_name(const char **pevent, const char **pgroup,
char *buf)
{
const char *slash, *event = *pevent;
+ int len;
slash = strchr(event, '/');
if (slash) {
@@ -171,12 +172,25 @@ int traceprobe_parse_event_name(const char **pevent, const char **pgroup,
return -E2BIG;
}
strlcpy(buf, event, slash - event + 1);
+ if (!is_good_name(buf)) {
+ pr_info("Group name must follow the same rules as C identifiers\n");
+ return -EINVAL;
+ }
*pgroup = buf;
*pevent = slash + 1;
+ event = *pevent;
}
- if (strlen(event) == 0) {
+ len = strlen(event);
+ if (len == 0) {
pr_info("Event name is not specified\n");
return -EINVAL;
+ } else if (len > MAX_EVENT_NAME_LEN) {
+ pr_info("Event name is too long\n");
+ return -E2BIG;
+ }
+ if (!is_good_name(event)) {
+ pr_info("Event name must follow the same rules as C identifiers\n");
+ return -EINVAL;
}
return 0;
}
@@ -300,6 +314,7 @@ parse_probe_arg(char *arg, const struct fetch_type *type,
case '+': /* deref memory */
arg++; /* Skip '+', because kstrtol() rejects it. */
+ /* fall through */
case '-':
tmp = strchr(arg, '(');
if (!tmp)
@@ -547,6 +562,8 @@ int traceprobe_parse_probe_arg(struct trace_probe *tp, int i, char *arg,
body = strchr(arg, '=');
if (body) {
+ if (body - arg > MAX_ARG_NAME_LEN || body == arg)
+ return -EINVAL;
parg->name = kmemdup_nul(arg, body - arg, GFP_KERNEL);
body++;
} else {
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index 8a63f8bc01bc..2177c206de15 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -32,6 +32,7 @@
#define MAX_TRACE_ARGS 128
#define MAX_ARGSTR_LEN 63
#define MAX_ARRAY_LEN 64
+#define MAX_ARG_NAME_LEN 32
#define MAX_STRING_SIZE PATH_MAX
/* Reserved field names */
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 4ea7e6845efb..743b2b520d34 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -180,8 +180,11 @@ static void wakeup_trace_close(struct trace_iterator *iter)
}
#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
- TRACE_GRAPH_PRINT_ABS_TIME | \
- TRACE_GRAPH_PRINT_DURATION)
+ TRACE_GRAPH_PRINT_CPU | \
+ TRACE_GRAPH_PRINT_REL_TIME | \
+ TRACE_GRAPH_PRINT_DURATION | \
+ TRACE_GRAPH_PRINT_OVERHEAD | \
+ TRACE_GRAPH_PRINT_IRQS)
static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
{
@@ -472,6 +475,7 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
__trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
+ __trace_stack(wakeup_trace, flags, 0, pc);
T0 = data->preempt_timestamp;
T1 = ftrace_now(cpu);
@@ -482,7 +486,7 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
if (likely(!is_tracing_stopped())) {
wakeup_trace->max_latency = delta;
- update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
+ update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu, NULL);
}
out_unlock:
@@ -583,6 +587,7 @@ probe_wakeup(void *ignore, struct task_struct *p)
data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
data->preempt_timestamp = ftrace_now(cpu);
tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
+ __trace_stack(wakeup_trace, flags, 0, pc);
/*
* We must be careful in using CALLER_ADDR2. But since wake_up
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index eec648a0d673..5d16f73898db 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -18,44 +18,32 @@
#include "trace.h"
-static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
- { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
-unsigned stack_trace_index[STACK_TRACE_ENTRIES];
+#define STACK_TRACE_ENTRIES 500
-/*
- * Reserve one entry for the passed in ip. This will allow
- * us to remove most or all of the stack size overhead
- * added by the stack tracer itself.
- */
-struct stack_trace stack_trace_max = {
- .max_entries = STACK_TRACE_ENTRIES - 1,
- .entries = &stack_dump_trace[0],
-};
+static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES];
+static unsigned stack_trace_index[STACK_TRACE_ENTRIES];
-unsigned long stack_trace_max_size;
-arch_spinlock_t stack_trace_max_lock =
+static unsigned int stack_trace_nr_entries;
+static unsigned long stack_trace_max_size;
+static arch_spinlock_t stack_trace_max_lock =
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
DEFINE_PER_CPU(int, disable_stack_tracer);
static DEFINE_MUTEX(stack_sysctl_mutex);
int stack_tracer_enabled;
-static int last_stack_tracer_enabled;
-void stack_trace_print(void)
+static void print_max_stack(void)
{
long i;
int size;
pr_emerg(" Depth Size Location (%d entries)\n"
" ----- ---- --------\n",
- stack_trace_max.nr_entries);
+ stack_trace_nr_entries);
- for (i = 0; i < stack_trace_max.nr_entries; i++) {
- if (stack_dump_trace[i] == ULONG_MAX)
- break;
- if (i+1 == stack_trace_max.nr_entries ||
- stack_dump_trace[i+1] == ULONG_MAX)
+ for (i = 0; i < stack_trace_nr_entries; i++) {
+ if (i + 1 == stack_trace_nr_entries)
size = stack_trace_index[i];
else
size = stack_trace_index[i] - stack_trace_index[i+1];
@@ -65,16 +53,7 @@ void stack_trace_print(void)
}
}
-/*
- * When arch-specific code overrides this function, the following
- * data should be filled up, assuming stack_trace_max_lock is held to
- * prevent concurrent updates.
- * stack_trace_index[]
- * stack_trace_max
- * stack_trace_max_size
- */
-void __weak
-check_stack(unsigned long ip, unsigned long *stack)
+static void check_stack(unsigned long ip, unsigned long *stack)
{
unsigned long this_size, flags; unsigned long *p, *top, *start;
static int tracer_frame;
@@ -110,13 +89,12 @@ check_stack(unsigned long ip, unsigned long *stack)
stack_trace_max_size = this_size;
- stack_trace_max.nr_entries = 0;
- stack_trace_max.skip = 0;
-
- save_stack_trace(&stack_trace_max);
+ stack_trace_nr_entries = stack_trace_save(stack_dump_trace,
+ ARRAY_SIZE(stack_dump_trace) - 1,
+ 0);
/* Skip over the overhead of the stack tracer itself */
- for (i = 0; i < stack_trace_max.nr_entries; i++) {
+ for (i = 0; i < stack_trace_nr_entries; i++) {
if (stack_dump_trace[i] == ip)
break;
}
@@ -125,7 +103,7 @@ check_stack(unsigned long ip, unsigned long *stack)
* Some archs may not have the passed in ip in the dump.
* If that happens, we need to show everything.
*/
- if (i == stack_trace_max.nr_entries)
+ if (i == stack_trace_nr_entries)
i = 0;
/*
@@ -143,15 +121,13 @@ check_stack(unsigned long ip, unsigned long *stack)
* loop will only happen once. This code only takes place
* on a new max, so it is far from a fast path.
*/
- while (i < stack_trace_max.nr_entries) {
+ while (i < stack_trace_nr_entries) {
int found = 0;
stack_trace_index[x] = this_size;
p = start;
- for (; p < top && i < stack_trace_max.nr_entries; p++) {
- if (stack_dump_trace[i] == ULONG_MAX)
- break;
+ for (; p < top && i < stack_trace_nr_entries; p++) {
/*
* The READ_ONCE_NOCHECK is used to let KASAN know that
* this is not a stack-out-of-bounds error.
@@ -182,12 +158,10 @@ check_stack(unsigned long ip, unsigned long *stack)
i++;
}
- stack_trace_max.nr_entries = x;
- for (; x < i; x++)
- stack_dump_trace[x] = ULONG_MAX;
+ stack_trace_nr_entries = x;
if (task_stack_end_corrupted(current)) {
- stack_trace_print();
+ print_max_stack();
BUG();
}
@@ -286,7 +260,7 @@ __next(struct seq_file *m, loff_t *pos)
{
long n = *pos - 1;
- if (n >= stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX)
+ if (n >= stack_trace_nr_entries)
return NULL;
m->private = (void *)n;
@@ -350,7 +324,7 @@ static int t_show(struct seq_file *m, void *v)
seq_printf(m, " Depth Size Location"
" (%d entries)\n"
" ----- ---- --------\n",
- stack_trace_max.nr_entries);
+ stack_trace_nr_entries);
if (!stack_tracer_enabled && !stack_trace_max_size)
print_disabled(m);
@@ -360,12 +334,10 @@ static int t_show(struct seq_file *m, void *v)
i = *(long *)v;
- if (i >= stack_trace_max.nr_entries ||
- stack_dump_trace[i] == ULONG_MAX)
+ if (i >= stack_trace_nr_entries)
return 0;
- if (i+1 == stack_trace_max.nr_entries ||
- stack_dump_trace[i+1] == ULONG_MAX)
+ if (i + 1 == stack_trace_nr_entries)
size = stack_trace_index[i];
else
size = stack_trace_index[i] - stack_trace_index[i+1];
@@ -422,23 +394,21 @@ stack_trace_sysctl(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
+ int was_enabled;
int ret;
mutex_lock(&stack_sysctl_mutex);
+ was_enabled = !!stack_tracer_enabled;
ret = proc_dointvec(table, write, buffer, lenp, ppos);
- if (ret || !write ||
- (last_stack_tracer_enabled == !!stack_tracer_enabled))
+ if (ret || !write || (was_enabled == !!stack_tracer_enabled))
goto out;
- last_stack_tracer_enabled = !!stack_tracer_enabled;
-
if (stack_tracer_enabled)
register_ftrace_function(&trace_ops);
else
unregister_ftrace_function(&trace_ops);
-
out:
mutex_unlock(&stack_sysctl_mutex);
return ret;
@@ -454,7 +424,6 @@ static __init int enable_stacktrace(char *str)
strncpy(stack_trace_filter_buf, str + len, COMMAND_LINE_SIZE);
stack_tracer_enabled = 1;
- last_stack_tracer_enabled = 1;
return 1;
}
__setup("stacktrace", enable_stacktrace);
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index f93a56d2db27..fa8fbff736d6 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -314,6 +314,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
struct ring_buffer_event *event;
struct ring_buffer *buffer;
unsigned long irq_flags;
+ unsigned long args[6];
int pc;
int syscall_nr;
int size;
@@ -347,7 +348,8 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
entry = ring_buffer_event_data(event);
entry->nr = syscall_nr;
- syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
+ syscall_get_arguments(current, regs, args);
+ memcpy(entry->args, args, sizeof(unsigned long) * sys_data->nb_args);
event_trigger_unlock_commit(trace_file, buffer, event, entry,
irq_flags, pc);
@@ -583,6 +585,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
struct syscall_metadata *sys_data;
struct syscall_trace_enter *rec;
struct hlist_head *head;
+ unsigned long args[6];
bool valid_prog_array;
int syscall_nr;
int rctx;
@@ -613,8 +616,8 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
return;
rec->nr = syscall_nr;
- syscall_get_arguments(current, regs, 0, sys_data->nb_args,
- (unsigned long *)&rec->args);
+ syscall_get_arguments(current, regs, args);
+ memcpy(&rec->args, args, sizeof(unsigned long) * sys_data->nb_args);
if ((valid_prog_array &&
!perf_call_bpf_enter(sys_data->enter_event, regs, sys_data, rec)) ||
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 9bde07c06362..be78d99ee6bc 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -273,10 +273,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
{
struct trace_uprobe *tu;
- if (!event || !is_good_name(event))
- return ERR_PTR(-EINVAL);
-
- if (!group || !is_good_name(group))
+ if (!event || !group)
return ERR_PTR(-EINVAL);
tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
@@ -524,8 +521,9 @@ static int trace_uprobe_create(int argc, const char **argv)
tu = alloc_trace_uprobe(group, event, argc, is_return);
if (IS_ERR(tu)) {
- pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu));
ret = PTR_ERR(tu);
+ /* This must return -ENOMEM otherwise there is a bug */
+ WARN_ON_ONCE(ret != -ENOMEM);
goto fail_address_parse;
}
tu->offset = offset;
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 977918d5d350..7f9e7b9306fe 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -42,9 +42,9 @@ int __read_mostly watchdog_user_enabled = 1;
int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT;
int __read_mostly soft_watchdog_user_enabled = 1;
int __read_mostly watchdog_thresh = 10;
-int __read_mostly nmi_watchdog_available;
+static int __read_mostly nmi_watchdog_available;
-struct cpumask watchdog_allowed_mask __read_mostly;
+static struct cpumask watchdog_allowed_mask __read_mostly;
struct cpumask watchdog_cpumask __read_mostly;
unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
@@ -199,6 +199,13 @@ static int __init nosoftlockup_setup(char *str)
}
__setup("nosoftlockup", nosoftlockup_setup);
+static int __init watchdog_thresh_setup(char *str)
+{
+ get_option(&str, &watchdog_thresh);
+ return 1;
+}
+__setup("watchdog_thresh=", watchdog_thresh_setup);
+
#ifdef CONFIG_SMP
int __read_mostly sysctl_softlockup_all_cpu_backtrace;
@@ -547,13 +554,15 @@ static void softlockup_start_all(void)
int lockup_detector_online_cpu(unsigned int cpu)
{
- watchdog_enable(cpu);
+ if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
+ watchdog_enable(cpu);
return 0;
}
int lockup_detector_offline_cpu(unsigned int cpu)
{
- watchdog_disable(cpu);
+ if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
+ watchdog_disable(cpu);
return 0;
}
@@ -581,7 +590,7 @@ static void lockup_detector_reconfigure(void)
* Create the watchdog thread infrastructure and configure the detector(s).
*
* The threads are not unparked as watchdog_allowed_mask is empty. When
- * the threads are sucessfully initialized, take the proper locks and
+ * the threads are successfully initialized, take the proper locks and
* unpark the threads in the watchdog_cpumask if the watchdog is enabled.
*/
static __init void lockup_detector_setup(void)
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
index 71381168dede..247bf0b1582c 100644
--- a/kernel/watchdog_hld.c
+++ b/kernel/watchdog_hld.c
@@ -135,7 +135,8 @@ static void watchdog_overflow_callback(struct perf_event *event,
if (__this_cpu_read(hard_watchdog_warn) == true)
return;
- pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
+ pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n",
+ this_cpu);
print_modules();
print_irqtrace_events(current);
if (regs)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 8ea9e4fb8cc6..faf7622246da 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -841,43 +841,32 @@ static void wake_up_worker(struct worker_pool *pool)
}
/**
- * wq_worker_waking_up - a worker is waking up
+ * wq_worker_running - a worker is running again
* @task: task waking up
- * @cpu: CPU @task is waking up to
*
- * This function is called during try_to_wake_up() when a worker is
- * being awoken.
- *
- * CONTEXT:
- * spin_lock_irq(rq->lock)
+ * This function is called when a worker returns from schedule()
*/
-void wq_worker_waking_up(struct task_struct *task, int cpu)
+void wq_worker_running(struct task_struct *task)
{
struct worker *worker = kthread_data(task);
- if (!(worker->flags & WORKER_NOT_RUNNING)) {
- WARN_ON_ONCE(worker->pool->cpu != cpu);
+ if (!worker->sleeping)
+ return;
+ if (!(worker->flags & WORKER_NOT_RUNNING))
atomic_inc(&worker->pool->nr_running);
- }
+ worker->sleeping = 0;
}
/**
* wq_worker_sleeping - a worker is going to sleep
* @task: task going to sleep
*
- * This function is called during schedule() when a busy worker is
- * going to sleep. Worker on the same cpu can be woken up by
- * returning pointer to its task.
- *
- * CONTEXT:
- * spin_lock_irq(rq->lock)
- *
- * Return:
- * Worker task on @cpu to wake up, %NULL if none.
+ * This function is called from schedule() when a busy worker is
+ * going to sleep.
*/
-struct task_struct *wq_worker_sleeping(struct task_struct *task)
+void wq_worker_sleeping(struct task_struct *task)
{
- struct worker *worker = kthread_data(task), *to_wakeup = NULL;
+ struct worker *next, *worker = kthread_data(task);
struct worker_pool *pool;
/*
@@ -886,13 +875,15 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task)
* checking NOT_RUNNING.
*/
if (worker->flags & WORKER_NOT_RUNNING)
- return NULL;
+ return;
pool = worker->pool;
- /* this can only happen on the local cpu */
- if (WARN_ON_ONCE(pool->cpu != raw_smp_processor_id()))
- return NULL;
+ if (WARN_ON_ONCE(worker->sleeping))
+ return;
+
+ worker->sleeping = 1;
+ spin_lock_irq(&pool->lock);
/*
* The counterpart of the following dec_and_test, implied mb,
@@ -906,9 +897,12 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task)
* lock is safe.
*/
if (atomic_dec_and_test(&pool->nr_running) &&
- !list_empty(&pool->worklist))
- to_wakeup = first_idle_worker(pool);
- return to_wakeup ? to_wakeup->task : NULL;
+ !list_empty(&pool->worklist)) {
+ next = first_idle_worker(pool);
+ if (next)
+ wake_up_process(next->task);
+ }
+ spin_unlock_irq(&pool->lock);
}
/**
@@ -3445,6 +3439,8 @@ static void wq_init_lockdep(struct workqueue_struct *wq)
lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name);
if (!lock_name)
lock_name = wq->name;
+
+ wq->lock_name = lock_name;
lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0);
}
@@ -4264,7 +4260,7 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
INIT_LIST_HEAD(&wq->list);
if (alloc_and_link_pwqs(wq) < 0)
- goto err_free_wq;
+ goto err_unreg_lockdep;
if (wq_online && init_rescuer(wq) < 0)
goto err_destroy;
@@ -4290,6 +4286,9 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
return wq;
+err_unreg_lockdep:
+ wq_unregister_lockdep(wq);
+ wq_free_lockdep(wq);
err_free_wq:
free_workqueue_attrs(wq->unbound_attrs);
kfree(wq);
@@ -4924,7 +4923,7 @@ static void rebind_workers(struct worker_pool *pool)
*
* WRITE_ONCE() is necessary because @worker->flags may be
* tested without holding any lock in
- * wq_worker_waking_up(). Without it, NOT_RUNNING test may
+ * wq_worker_running(). Without it, NOT_RUNNING test may
* fail incorrectly leading to premature concurrency
* management operations.
*/
diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
index cb68b03ca89a..498de0e909a4 100644
--- a/kernel/workqueue_internal.h
+++ b/kernel/workqueue_internal.h
@@ -44,6 +44,7 @@ struct worker {
unsigned long last_active; /* L: last active timestamp */
unsigned int flags; /* X: flags */
int id; /* I: worker id */
+ int sleeping; /* None */
/*
* Opaque string set with work_set_desc(). Printed out with task
@@ -72,8 +73,8 @@ static inline struct worker *current_wq_worker(void)
* Scheduler hooks for concurrency managed workqueue. Only to be used from
* sched/ and workqueue.c.
*/
-void wq_worker_waking_up(struct task_struct *task, int cpu);
-struct task_struct *wq_worker_sleeping(struct task_struct *task);
+void wq_worker_running(struct task_struct *task);
+void wq_worker_sleeping(struct task_struct *task);
work_func_t wq_worker_last_func(struct task_struct *task);
#endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
diff --git a/lib/Kconfig b/lib/Kconfig
index a9e56539bd11..e86975bfca6a 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -597,6 +597,10 @@ config ARCH_HAS_UACCESS_FLUSHCACHE
config ARCH_HAS_UACCESS_MCSAFE
bool
+# Temporary. Goes away when all archs are cleaned up
+config ARCH_STACKWALK
+ bool
+
config STACKDEPOT
bool
select STACKTRACE
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 91ed81250fb3..4c54a89f06ee 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -196,6 +196,7 @@ config DEBUG_INFO_REDUCED
config DEBUG_INFO_SPLIT
bool "Produce split debuginfo in .dwo files"
depends on DEBUG_INFO
+ depends on $(cc-option,-gsplit-dwarf)
help
Generate debug info into separate .dwo files. This significantly
reduces the build directory size for builds with DEBUG_INFO,
@@ -211,6 +212,7 @@ config DEBUG_INFO_SPLIT
config DEBUG_INFO_DWARF4
bool "Generate dwarf4 debuginfo"
depends on DEBUG_INFO
+ depends on $(cc-option,-gdwarf-4)
help
Generate dwarf4 debug info. This requires recent versions
of gcc and gdb. It makes the debug information larger.
@@ -751,9 +753,9 @@ endmenu # "Memory Debugging"
config ARCH_HAS_KCOV
bool
help
- KCOV does not have any arch-specific code, but currently it is enabled
- only for x86_64. KCOV requires testing on other archs, and most likely
- disabling of instrumentation for some early boot code.
+ An architecture should select this when it can successfully
+ build and run with CONFIG_KCOV. This typically requires
+ disabling instrumentation for some early boot code.
config CC_HAS_SANCOV_TRACE_PC
def_bool $(cc-option,-fsanitize-coverage=trace-pc)
@@ -1654,42 +1656,6 @@ config PROVIDE_OHCI1394_DMA_INIT
See Documentation/debugging-via-ohci1394.txt for more information.
-config DMA_API_DEBUG
- bool "Enable debugging of DMA-API usage"
- select NEED_DMA_MAP_STATE
- help
- Enable this option to debug the use of the DMA API by device drivers.
- With this option you will be able to detect common bugs in device
- drivers like double-freeing of DMA mappings or freeing mappings that
- were never allocated.
-
- This also attempts to catch cases where a page owned by DMA is
- accessed by the cpu in a way that could cause data corruption. For
- example, this enables cow_user_page() to check that the source page is
- not undergoing DMA.
-
- This option causes a performance degradation. Use only if you want to
- debug device drivers and dma interactions.
-
- If unsure, say N.
-
-config DMA_API_DEBUG_SG
- bool "Debug DMA scatter-gather usage"
- default y
- depends on DMA_API_DEBUG
- help
- Perform extra checking that callers of dma_map_sg() have respected the
- appropriate segment length/boundary limits for the given device when
- preparing DMA scatterlists.
-
- This is particularly likely to have been overlooked in cases where the
- dma_map_sg() API is used for general bulk mapping of pages rather than
- preparing literal scatter-gather descriptors, where there is a risk of
- unexpected behaviour from DMA API implementations if the scatterlist
- is technically out-of-spec.
-
- If unsure, say N.
-
menuconfig RUNTIME_TESTING_MENU
bool "Runtime Testing"
def_bool y
@@ -1803,6 +1769,9 @@ config TEST_HEXDUMP
config TEST_STRING_HELPERS
tristate "Test functions located in the string_helpers module at runtime"
+config TEST_STRSCPY
+ tristate "Test strscpy*() family of functions at runtime"
+
config TEST_KSTRTOX
tristate "Test kstrto*() family of functions at runtime"
@@ -1963,6 +1932,7 @@ config TEST_KMOD
depends on m
depends on BLOCK && (64BIT || LBDAF) # for XFS, BTRFS
depends on NETDEVICES && NET_CORE && INET # for TUN
+ depends on BLOCK
select TEST_LKM
select XFS_FS
select TUN
diff --git a/lib/Makefile b/lib/Makefile
index 647517940b29..07506e3891a0 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -17,6 +17,17 @@ KCOV_INSTRUMENT_list_debug.o := n
KCOV_INSTRUMENT_debugobjects.o := n
KCOV_INSTRUMENT_dynamic_debug.o := n
+# Early boot use of cmdline, don't instrument it
+ifdef CONFIG_AMD_MEM_ENCRYPT
+KASAN_SANITIZE_string.o := n
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_string.o = -pg
+endif
+
+CFLAGS_string.o := $(call cc-option, -fno-stack-protector)
+endif
+
lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o timerqueue.o xarray.o \
idr.o int_sqrt.o extable.o \
@@ -35,10 +46,11 @@ obj-y += lockref.o
obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
- gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
+ gcd.o lcm.o list_sort.o uuid.o iov_iter.o clz_ctz.o \
bsearch.o find_bit.o llist.o memweight.o kfifo.o \
percpu-refcount.o rhashtable.o reciprocal_div.o \
- once.o refcount.o usercopy.o errseq.o bucket_locks.o
+ once.o refcount.o usercopy.o errseq.o bucket_locks.o \
+ generic-radix-tree.o
obj-$(CONFIG_STRING_SELFTEST) += test_string.o
obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
@@ -69,6 +81,7 @@ obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o
obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
obj-$(CONFIG_TEST_PRINTF) += test_printf.o
obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
+obj-$(CONFIG_TEST_STRSCPY) += test_strscpy.o
obj-$(CONFIG_TEST_BITFIELD) += test_bitfield.o
obj-$(CONFIG_TEST_UUID) += test_uuid.o
obj-$(CONFIG_TEST_XARRAY) += test_xarray.o
@@ -212,7 +225,7 @@ KCOV_INSTRUMENT_stackdepot.o := n
libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
fdt_empty_tree.o
$(foreach file, $(libfdt_files), \
- $(eval CFLAGS_$(file) = -I$(src)/../scripts/dtc/libfdt))
+ $(eval CFLAGS_$(file) = -I $(srctree)/scripts/dtc/libfdt))
lib-$(CONFIG_LIBFDT) += $(libfdt_files)
obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o
@@ -267,6 +280,7 @@ obj-$(CONFIG_UCS2_STRING) += ucs2_string.o
obj-$(CONFIG_UBSAN) += ubsan.o
UBSAN_SANITIZE_ubsan.o := n
+CFLAGS_ubsan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
obj-$(CONFIG_SBITMAP) += sbitmap.o
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 087a3e9a0202..0cb672eb107c 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -165,6 +165,9 @@ EXPORT_SYMBOL(zalloc_cpumask_var);
void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
{
*mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
+ if (!*mask)
+ panic("%s: Failed to allocate %u bytes\n", __func__,
+ cpumask_size());
}
/**
diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c
index 4d0d47c1ffbd..e89ebfdbb0fc 100644
--- a/lib/crc-t10dif.c
+++ b/lib/crc-t10dif.c
@@ -69,7 +69,6 @@ __u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len)
rcu_read_lock();
desc.shash.tfm = rcu_dereference(crct10dif_tfm);
- desc.shash.flags = 0;
*(__u16 *)desc.ctx = crc;
err = crypto_shash_update(&desc.shash, buffer, len);
diff --git a/lib/digsig.c b/lib/digsig.c
index 6ba6fcd92dd1..3b0a579bdcdf 100644
--- a/lib/digsig.c
+++ b/lib/digsig.c
@@ -240,7 +240,6 @@ int digsig_verify(struct key *keyring, const char *sig, int siglen,
goto err;
desc->tfm = shash;
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
crypto_shash_init(desc);
crypto_shash_update(desc, data, datalen);
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index cf7b129b0b2b..e26aa4f65eb9 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -65,22 +65,16 @@ static bool fail_task(struct fault_attr *attr, struct task_struct *task)
static bool fail_stacktrace(struct fault_attr *attr)
{
- struct stack_trace trace;
int depth = attr->stacktrace_depth;
unsigned long entries[MAX_STACK_TRACE_DEPTH];
- int n;
+ int n, nr_entries;
bool found = (attr->require_start == 0 && attr->require_end == ULONG_MAX);
if (depth == 0)
return found;
- trace.nr_entries = 0;
- trace.entries = entries;
- trace.max_entries = depth;
- trace.skip = 1;
-
- save_stack_trace(&trace);
- for (n = 0; n < trace.nr_entries; n++) {
+ nr_entries = stack_trace_save(entries, depth, 1);
+ for (n = 0; n < nr_entries; n++) {
if (attr->reject_start <= entries[n] &&
entries[n] < attr->reject_end)
return false;
diff --git a/lib/flex_array.c b/lib/flex_array.c
deleted file mode 100644
index 2eed22fa507c..000000000000
--- a/lib/flex_array.c
+++ /dev/null
@@ -1,398 +0,0 @@
-/*
- * Flexible array managed in PAGE_SIZE parts
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright IBM Corporation, 2009
- *
- * Author: Dave Hansen <dave@linux.vnet.ibm.com>
- */
-
-#include <linux/flex_array.h>
-#include <linux/slab.h>
-#include <linux/stddef.h>
-#include <linux/export.h>
-#include <linux/reciprocal_div.h>
-
-struct flex_array_part {
- char elements[FLEX_ARRAY_PART_SIZE];
-};
-
-/*
- * If a user requests an allocation which is small
- * enough, we may simply use the space in the
- * flex_array->parts[] array to store the user
- * data.
- */
-static inline int elements_fit_in_base(struct flex_array *fa)
-{
- int data_size = fa->element_size * fa->total_nr_elements;
- if (data_size <= FLEX_ARRAY_BASE_BYTES_LEFT)
- return 1;
- return 0;
-}
-
-/**
- * flex_array_alloc - allocate a new flexible array
- * @element_size: the size of individual elements in the array
- * @total: total number of elements that this should hold
- * @flags: page allocation flags to use for base array
- *
- * Note: all locking must be provided by the caller.
- *
- * @total is used to size internal structures. If the user ever
- * accesses any array indexes >=@total, it will produce errors.
- *
- * The maximum number of elements is defined as: the number of
- * elements that can be stored in a page times the number of
- * page pointers that we can fit in the base structure or (using
- * integer math):
- *
- * (PAGE_SIZE/element_size) * (PAGE_SIZE-8)/sizeof(void *)
- *
- * Here's a table showing example capacities. Note that the maximum
- * index that the get/put() functions is just nr_objects-1. This
- * basically means that you get 4MB of storage on 32-bit and 2MB on
- * 64-bit.
- *
- *
- * Element size | Objects | Objects |
- * PAGE_SIZE=4k | 32-bit | 64-bit |
- * ---------------------------------|
- * 1 bytes | 4177920 | 2088960 |
- * 2 bytes | 2088960 | 1044480 |
- * 3 bytes | 1392300 | 696150 |
- * 4 bytes | 1044480 | 522240 |
- * 32 bytes | 130560 | 65408 |
- * 33 bytes | 126480 | 63240 |
- * 2048 bytes | 2040 | 1020 |
- * 2049 bytes | 1020 | 510 |
- * void * | 1044480 | 261120 |
- *
- * Since 64-bit pointers are twice the size, we lose half the
- * capacity in the base structure. Also note that no effort is made
- * to efficiently pack objects across page boundaries.
- */
-struct flex_array *flex_array_alloc(int element_size, unsigned int total,
- gfp_t flags)
-{
- struct flex_array *ret;
- int elems_per_part = 0;
- int max_size = 0;
- struct reciprocal_value reciprocal_elems = { 0 };
-
- if (element_size) {
- elems_per_part = FLEX_ARRAY_ELEMENTS_PER_PART(element_size);
- reciprocal_elems = reciprocal_value(elems_per_part);
- max_size = FLEX_ARRAY_NR_BASE_PTRS * elems_per_part;
- }
-
- /* max_size will end up 0 if element_size > PAGE_SIZE */
- if (total > max_size)
- return NULL;
- ret = kzalloc(sizeof(struct flex_array), flags);
- if (!ret)
- return NULL;
- ret->element_size = element_size;
- ret->total_nr_elements = total;
- ret->elems_per_part = elems_per_part;
- ret->reciprocal_elems = reciprocal_elems;
- if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO))
- memset(&ret->parts[0], FLEX_ARRAY_FREE,
- FLEX_ARRAY_BASE_BYTES_LEFT);
- return ret;
-}
-EXPORT_SYMBOL(flex_array_alloc);
-
-static int fa_element_to_part_nr(struct flex_array *fa,
- unsigned int element_nr)
-{
- /*
- * if element_size == 0 we don't get here, so we never touch
- * the zeroed fa->reciprocal_elems, which would yield invalid
- * results
- */
- return reciprocal_divide(element_nr, fa->reciprocal_elems);
-}
-
-/**
- * flex_array_free_parts - just free the second-level pages
- * @fa: the flex array from which to free parts
- *
- * This is to be used in cases where the base 'struct flex_array'
- * has been statically allocated and should not be free.
- */
-void flex_array_free_parts(struct flex_array *fa)
-{
- int part_nr;
-
- if (elements_fit_in_base(fa))
- return;
- for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++)
- kfree(fa->parts[part_nr]);
-}
-EXPORT_SYMBOL(flex_array_free_parts);
-
-void flex_array_free(struct flex_array *fa)
-{
- flex_array_free_parts(fa);
- kfree(fa);
-}
-EXPORT_SYMBOL(flex_array_free);
-
-static unsigned int index_inside_part(struct flex_array *fa,
- unsigned int element_nr,
- unsigned int part_nr)
-{
- unsigned int part_offset;
-
- part_offset = element_nr - part_nr * fa->elems_per_part;
- return part_offset * fa->element_size;
-}
-
-static struct flex_array_part *
-__fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags)
-{
- struct flex_array_part *part = fa->parts[part_nr];
- if (!part) {
- part = kmalloc(sizeof(struct flex_array_part), flags);
- if (!part)
- return NULL;
- if (!(flags & __GFP_ZERO))
- memset(part, FLEX_ARRAY_FREE,
- sizeof(struct flex_array_part));
- fa->parts[part_nr] = part;
- }
- return part;
-}
-
-/**
- * flex_array_put - copy data into the array at @element_nr
- * @fa: the flex array to copy data into
- * @element_nr: index of the position in which to insert
- * the new element.
- * @src: address of data to copy into the array
- * @flags: page allocation flags to use for array expansion
- *
- *
- * Note that this *copies* the contents of @src into
- * the array. If you are trying to store an array of
- * pointers, make sure to pass in &ptr instead of ptr.
- * You may instead wish to use the flex_array_put_ptr()
- * helper function.
- *
- * Locking must be provided by the caller.
- */
-int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
- gfp_t flags)
-{
- int part_nr = 0;
- struct flex_array_part *part;
- void *dst;
-
- if (element_nr >= fa->total_nr_elements)
- return -ENOSPC;
- if (!fa->element_size)
- return 0;
- if (elements_fit_in_base(fa))
- part = (struct flex_array_part *)&fa->parts[0];
- else {
- part_nr = fa_element_to_part_nr(fa, element_nr);
- part = __fa_get_part(fa, part_nr, flags);
- if (!part)
- return -ENOMEM;
- }
- dst = &part->elements[index_inside_part(fa, element_nr, part_nr)];
- memcpy(dst, src, fa->element_size);
- return 0;
-}
-EXPORT_SYMBOL(flex_array_put);
-
-/**
- * flex_array_clear - clear element in array at @element_nr
- * @fa: the flex array of the element.
- * @element_nr: index of the position to clear.
- *
- * Locking must be provided by the caller.
- */
-int flex_array_clear(struct flex_array *fa, unsigned int element_nr)
-{
- int part_nr = 0;
- struct flex_array_part *part;
- void *dst;
-
- if (element_nr >= fa->total_nr_elements)
- return -ENOSPC;
- if (!fa->element_size)
- return 0;
- if (elements_fit_in_base(fa))
- part = (struct flex_array_part *)&fa->parts[0];
- else {
- part_nr = fa_element_to_part_nr(fa, element_nr);
- part = fa->parts[part_nr];
- if (!part)
- return -EINVAL;
- }
- dst = &part->elements[index_inside_part(fa, element_nr, part_nr)];
- memset(dst, FLEX_ARRAY_FREE, fa->element_size);
- return 0;
-}
-EXPORT_SYMBOL(flex_array_clear);
-
-/**
- * flex_array_prealloc - guarantee that array space exists
- * @fa: the flex array for which to preallocate parts
- * @start: index of first array element for which space is allocated
- * @nr_elements: number of elements for which space is allocated
- * @flags: page allocation flags
- *
- * This will guarantee that no future calls to flex_array_put()
- * will allocate memory. It can be used if you are expecting to
- * be holding a lock or in some atomic context while writing
- * data into the array.
- *
- * Locking must be provided by the caller.
- */
-int flex_array_prealloc(struct flex_array *fa, unsigned int start,
- unsigned int nr_elements, gfp_t flags)
-{
- int start_part;
- int end_part;
- int part_nr;
- unsigned int end;
- struct flex_array_part *part;
-
- if (!start && !nr_elements)
- return 0;
- if (start >= fa->total_nr_elements)
- return -ENOSPC;
- if (!nr_elements)
- return 0;
-
- end = start + nr_elements - 1;
-
- if (end >= fa->total_nr_elements)
- return -ENOSPC;
- if (!fa->element_size)
- return 0;
- if (elements_fit_in_base(fa))
- return 0;
- start_part = fa_element_to_part_nr(fa, start);
- end_part = fa_element_to_part_nr(fa, end);
- for (part_nr = start_part; part_nr <= end_part; part_nr++) {
- part = __fa_get_part(fa, part_nr, flags);
- if (!part)
- return -ENOMEM;
- }
- return 0;
-}
-EXPORT_SYMBOL(flex_array_prealloc);
-
-/**
- * flex_array_get - pull data back out of the array
- * @fa: the flex array from which to extract data
- * @element_nr: index of the element to fetch from the array
- *
- * Returns a pointer to the data at index @element_nr. Note
- * that this is a copy of the data that was passed in. If you
- * are using this to store pointers, you'll get back &ptr. You
- * may instead wish to use the flex_array_get_ptr helper.
- *
- * Locking must be provided by the caller.
- */
-void *flex_array_get(struct flex_array *fa, unsigned int element_nr)
-{
- int part_nr = 0;
- struct flex_array_part *part;
-
- if (!fa->element_size)
- return NULL;
- if (element_nr >= fa->total_nr_elements)
- return NULL;
- if (elements_fit_in_base(fa))
- part = (struct flex_array_part *)&fa->parts[0];
- else {
- part_nr = fa_element_to_part_nr(fa, element_nr);
- part = fa->parts[part_nr];
- if (!part)
- return NULL;
- }
- return &part->elements[index_inside_part(fa, element_nr, part_nr)];
-}
-EXPORT_SYMBOL(flex_array_get);
-
-/**
- * flex_array_get_ptr - pull a ptr back out of the array
- * @fa: the flex array from which to extract data
- * @element_nr: index of the element to fetch from the array
- *
- * Returns the pointer placed in the flex array at element_nr using
- * flex_array_put_ptr(). This function should not be called if the
- * element in question was not set using the _put_ptr() helper.
- */
-void *flex_array_get_ptr(struct flex_array *fa, unsigned int element_nr)
-{
- void **tmp;
-
- tmp = flex_array_get(fa, element_nr);
- if (!tmp)
- return NULL;
-
- return *tmp;
-}
-EXPORT_SYMBOL(flex_array_get_ptr);
-
-static int part_is_free(struct flex_array_part *part)
-{
- int i;
-
- for (i = 0; i < sizeof(struct flex_array_part); i++)
- if (part->elements[i] != FLEX_ARRAY_FREE)
- return 0;
- return 1;
-}
-
-/**
- * flex_array_shrink - free unused second-level pages
- * @fa: the flex array to shrink
- *
- * Frees all second-level pages that consist solely of unused
- * elements. Returns the number of pages freed.
- *
- * Locking must be provided by the caller.
- */
-int flex_array_shrink(struct flex_array *fa)
-{
- struct flex_array_part *part;
- int part_nr;
- int ret = 0;
-
- if (!fa->total_nr_elements || !fa->element_size)
- return 0;
- if (elements_fit_in_base(fa))
- return ret;
- for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++) {
- part = fa->parts[part_nr];
- if (!part)
- continue;
- if (part_is_free(part)) {
- fa->parts[part_nr] = NULL;
- kfree(part);
- ret++;
- }
- }
- return ret;
-}
-EXPORT_SYMBOL(flex_array_shrink);
diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c
new file mode 100644
index 000000000000..a7bafc413730
--- /dev/null
+++ b/lib/generic-radix-tree.c
@@ -0,0 +1,217 @@
+
+#include <linux/export.h>
+#include <linux/generic-radix-tree.h>
+#include <linux/gfp.h>
+
+#define GENRADIX_ARY (PAGE_SIZE / sizeof(struct genradix_node *))
+#define GENRADIX_ARY_SHIFT ilog2(GENRADIX_ARY)
+
+struct genradix_node {
+ union {
+ /* Interior node: */
+ struct genradix_node *children[GENRADIX_ARY];
+
+ /* Leaf: */
+ u8 data[PAGE_SIZE];
+ };
+};
+
+static inline int genradix_depth_shift(unsigned depth)
+{
+ return PAGE_SHIFT + GENRADIX_ARY_SHIFT * depth;
+}
+
+/*
+ * Returns size (of data, in bytes) that a tree of a given depth holds:
+ */
+static inline size_t genradix_depth_size(unsigned depth)
+{
+ return 1UL << genradix_depth_shift(depth);
+}
+
+/* depth that's needed for a genradix that can address up to ULONG_MAX: */
+#define GENRADIX_MAX_DEPTH \
+ DIV_ROUND_UP(BITS_PER_LONG - PAGE_SHIFT, GENRADIX_ARY_SHIFT)
+
+#define GENRADIX_DEPTH_MASK \
+ ((unsigned long) (roundup_pow_of_two(GENRADIX_MAX_DEPTH + 1) - 1))
+
+unsigned genradix_root_to_depth(struct genradix_root *r)
+{
+ return (unsigned long) r & GENRADIX_DEPTH_MASK;
+}
+
+struct genradix_node *genradix_root_to_node(struct genradix_root *r)
+{
+ return (void *) ((unsigned long) r & ~GENRADIX_DEPTH_MASK);
+}
+
+/*
+ * Returns pointer to the specified byte @offset within @radix, or NULL if not
+ * allocated
+ */
+void *__genradix_ptr(struct __genradix *radix, size_t offset)
+{
+ struct genradix_root *r = READ_ONCE(radix->root);
+ struct genradix_node *n = genradix_root_to_node(r);
+ unsigned level = genradix_root_to_depth(r);
+
+ if (ilog2(offset) >= genradix_depth_shift(level))
+ return NULL;
+
+ while (1) {
+ if (!n)
+ return NULL;
+ if (!level)
+ break;
+
+ level--;
+
+ n = n->children[offset >> genradix_depth_shift(level)];
+ offset &= genradix_depth_size(level) - 1;
+ }
+
+ return &n->data[offset];
+}
+EXPORT_SYMBOL(__genradix_ptr);
+
+/*
+ * Returns pointer to the specified byte @offset within @radix, allocating it if
+ * necessary - newly allocated slots are always zeroed out:
+ */
+void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset,
+ gfp_t gfp_mask)
+{
+ struct genradix_root *v = READ_ONCE(radix->root);
+ struct genradix_node *n, *new_node = NULL;
+ unsigned level;
+
+ /* Increase tree depth if necessary: */
+ while (1) {
+ struct genradix_root *r = v, *new_root;
+
+ n = genradix_root_to_node(r);
+ level = genradix_root_to_depth(r);
+
+ if (n && ilog2(offset) < genradix_depth_shift(level))
+ break;
+
+ if (!new_node) {
+ new_node = (void *)
+ __get_free_page(gfp_mask|__GFP_ZERO);
+ if (!new_node)
+ return NULL;
+ }
+
+ new_node->children[0] = n;
+ new_root = ((struct genradix_root *)
+ ((unsigned long) new_node | (n ? level + 1 : 0)));
+
+ if ((v = cmpxchg_release(&radix->root, r, new_root)) == r) {
+ v = new_root;
+ new_node = NULL;
+ }
+ }
+
+ while (level--) {
+ struct genradix_node **p =
+ &n->children[offset >> genradix_depth_shift(level)];
+ offset &= genradix_depth_size(level) - 1;
+
+ n = READ_ONCE(*p);
+ if (!n) {
+ if (!new_node) {
+ new_node = (void *)
+ __get_free_page(gfp_mask|__GFP_ZERO);
+ if (!new_node)
+ return NULL;
+ }
+
+ if (!(n = cmpxchg_release(p, NULL, new_node)))
+ swap(n, new_node);
+ }
+ }
+
+ if (new_node)
+ free_page((unsigned long) new_node);
+
+ return &n->data[offset];
+}
+EXPORT_SYMBOL(__genradix_ptr_alloc);
+
+void *__genradix_iter_peek(struct genradix_iter *iter,
+ struct __genradix *radix,
+ size_t objs_per_page)
+{
+ struct genradix_root *r;
+ struct genradix_node *n;
+ unsigned level, i;
+restart:
+ r = READ_ONCE(radix->root);
+ if (!r)
+ return NULL;
+
+ n = genradix_root_to_node(r);
+ level = genradix_root_to_depth(r);
+
+ if (ilog2(iter->offset) >= genradix_depth_shift(level))
+ return NULL;
+
+ while (level) {
+ level--;
+
+ i = (iter->offset >> genradix_depth_shift(level)) &
+ (GENRADIX_ARY - 1);
+
+ while (!n->children[i]) {
+ i++;
+ iter->offset = round_down(iter->offset +
+ genradix_depth_size(level),
+ genradix_depth_size(level));
+ iter->pos = (iter->offset >> PAGE_SHIFT) *
+ objs_per_page;
+ if (i == GENRADIX_ARY)
+ goto restart;
+ }
+
+ n = n->children[i];
+ }
+
+ return &n->data[iter->offset & (PAGE_SIZE - 1)];
+}
+EXPORT_SYMBOL(__genradix_iter_peek);
+
+static void genradix_free_recurse(struct genradix_node *n, unsigned level)
+{
+ if (level) {
+ unsigned i;
+
+ for (i = 0; i < GENRADIX_ARY; i++)
+ if (n->children[i])
+ genradix_free_recurse(n->children[i], level - 1);
+ }
+
+ free_page((unsigned long) n);
+}
+
+int __genradix_prealloc(struct __genradix *radix, size_t size,
+ gfp_t gfp_mask)
+{
+ size_t offset;
+
+ for (offset = 0; offset < size; offset += PAGE_SIZE)
+ if (!__genradix_ptr_alloc(radix, offset, gfp_mask))
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL(__genradix_prealloc);
+
+void __genradix_free(struct __genradix *radix)
+{
+ struct genradix_root *r = xchg(&radix->root, NULL);
+
+ genradix_free_recurse(genradix_root_to_node(r),
+ genradix_root_to_depth(r));
+}
+EXPORT_SYMBOL(__genradix_free);
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index be4bd627caf0..b396d328a764 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -861,8 +861,21 @@ EXPORT_SYMBOL(_copy_from_iter_full_nocache);
static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
{
- struct page *head = compound_head(page);
- size_t v = n + offset + page_address(page) - page_address(head);
+ struct page *head;
+ size_t v = n + offset;
+
+ /*
+ * The general case needs to access the page order in order
+ * to compute the page size.
+ * However, we mostly deal with order-0 pages and thus can
+ * avoid a possible cache line miss for requests that fit all
+ * page orders.
+ */
+ if (n <= v && v <= PAGE_SIZE)
+ return true;
+
+ head = compound_head(page);
+ v += (page - head) << PAGE_SHIFT;
if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
return true;
@@ -1515,6 +1528,7 @@ EXPORT_SYMBOL(csum_and_copy_to_iter);
size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
struct iov_iter *i)
{
+#ifdef CONFIG_CRYPTO
struct ahash_request *hash = hashp;
struct scatterlist sg;
size_t copied;
@@ -1524,6 +1538,9 @@ size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
ahash_request_set_crypt(hash, &sg, NULL, copied);
crypto_ahash_update(hash);
return copied;
+#else
+ return 0;
+#endif
}
EXPORT_SYMBOL(hash_and_copy_to_iter);
diff --git a/lib/irq_poll.c b/lib/irq_poll.c
index 86a709954f5a..2f17b488d58e 100644
--- a/lib/irq_poll.c
+++ b/lib/irq_poll.c
@@ -35,7 +35,7 @@ void irq_poll_sched(struct irq_poll *iop)
local_irq_save(flags);
list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
- __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
+ raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
local_irq_restore(flags);
}
EXPORT_SYMBOL(irq_poll_sched);
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index f0a2934605bf..4e9829c4d64c 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -47,7 +47,6 @@ u32 crc32c(u32 crc, const void *address, unsigned int length)
int err;
shash->tfm = tfm;
- shash->flags = 0;
*ctx = crc;
err = crypto_shash_update(shash, address, length);
diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c
index 4525fb094844..a8ede77afe0d 100644
--- a/lib/lzo/lzo1x_compress.c
+++ b/lib/lzo/lzo1x_compress.c
@@ -291,13 +291,14 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
{
const unsigned char *ip = in;
unsigned char *op = out;
+ unsigned char *data_start;
size_t l = in_len;
size_t t = 0;
signed char state_offset = -2;
unsigned int m4_max_offset;
- // LZO v0 will never write 17 as first byte,
- // so this is used to version the bitstream
+ // LZO v0 will never write 17 as first byte (except for zero-length
+ // input), so this is used to version the bitstream
if (bitstream_version > 0) {
*op++ = 17;
*op++ = bitstream_version;
@@ -306,6 +307,8 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
m4_max_offset = M4_MAX_OFFSET_V0;
}
+ data_start = op;
+
while (l > 20) {
size_t ll = l <= (m4_max_offset + 1) ? l : (m4_max_offset + 1);
uintptr_t ll_end = (uintptr_t) ip + ll;
@@ -324,7 +327,7 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
if (t > 0) {
const unsigned char *ii = in + in_len - t;
- if (op == out && t <= 238) {
+ if (op == data_start && t <= 238) {
*op++ = (17 + t);
} else if (t <= 3) {
op[state_offset] |= t;
diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
index 6d2600ea3b55..9e07e9ef1aad 100644
--- a/lib/lzo/lzo1x_decompress_safe.c
+++ b/lib/lzo/lzo1x_decompress_safe.c
@@ -54,11 +54,9 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
if (unlikely(in_len < 3))
goto input_overrun;
- if (likely(*ip == 17)) {
+ if (likely(in_len >= 5) && likely(*ip == 17)) {
bitstream_version = ip[1];
ip += 2;
- if (unlikely(in_len < 5))
- goto input_overrun;
} else {
bitstream_version = 0;
}
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index 4e90d443d1b0..e723eacf7868 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -39,7 +39,7 @@ endif
ifeq ($(CONFIG_KERNEL_MODE_NEON),y)
NEON_FLAGS := -ffreestanding
ifeq ($(ARCH),arm)
-NEON_FLAGS += -mfloat-abi=softfp -mfpu=neon
+NEON_FLAGS += -march=armv7-a -mfloat-abi=softfp -mfpu=neon
endif
CFLAGS_recov_neon_inner.o += $(NEON_FLAGS)
ifeq ($(ARCH),arm64)
diff --git a/lib/raid6/neon.uc b/lib/raid6/neon.uc
index d5242f544551..b7c68030da4f 100644
--- a/lib/raid6/neon.uc
+++ b/lib/raid6/neon.uc
@@ -28,7 +28,6 @@
typedef uint8x16_t unative_t;
-#define NBYTES(x) ((unative_t){x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x})
#define NSIZE sizeof(unative_t)
/*
@@ -61,7 +60,7 @@ void raid6_neon$#_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs)
int d, z, z0;
register unative_t wd$$, wq$$, wp$$, w1$$, w2$$;
- const unative_t x1d = NBYTES(0x1d);
+ const unative_t x1d = vdupq_n_u8(0x1d);
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
@@ -92,7 +91,7 @@ void raid6_neon$#_xor_syndrome_real(int disks, int start, int stop,
int d, z, z0;
register unative_t wd$$, wq$$, wp$$, w1$$, w2$$;
- const unative_t x1d = NBYTES(0x1d);
+ const unative_t x1d = vdupq_n_u8(0x1d);
z0 = stop; /* P/Q right side optimization */
p = dptr[disks-2]; /* XOR parity */
diff --git a/lib/raid6/recov_neon_inner.c b/lib/raid6/recov_neon_inner.c
index 8cd20c9f834a..f13c07f82297 100644
--- a/lib/raid6/recov_neon_inner.c
+++ b/lib/raid6/recov_neon_inner.c
@@ -10,11 +10,6 @@
#include <arm_neon.h>
-static const uint8x16_t x0f = {
- 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
- 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
-};
-
#ifdef CONFIG_ARM
/*
* AArch32 does not provide this intrinsic natively because it does not
@@ -41,6 +36,7 @@ void __raid6_2data_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dp,
uint8x16_t pm1 = vld1q_u8(pbmul + 16);
uint8x16_t qm0 = vld1q_u8(qmul);
uint8x16_t qm1 = vld1q_u8(qmul + 16);
+ uint8x16_t x0f = vdupq_n_u8(0x0f);
/*
* while ( bytes-- ) {
@@ -60,14 +56,14 @@ void __raid6_2data_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dp,
px = veorq_u8(vld1q_u8(p), vld1q_u8(dp));
vx = veorq_u8(vld1q_u8(q), vld1q_u8(dq));
- vy = (uint8x16_t)vshrq_n_s16((int16x8_t)vx, 4);
+ vy = vshrq_n_u8(vx, 4);
vx = vqtbl1q_u8(qm0, vandq_u8(vx, x0f));
- vy = vqtbl1q_u8(qm1, vandq_u8(vy, x0f));
+ vy = vqtbl1q_u8(qm1, vy);
qx = veorq_u8(vx, vy);
- vy = (uint8x16_t)vshrq_n_s16((int16x8_t)px, 4);
+ vy = vshrq_n_u8(px, 4);
vx = vqtbl1q_u8(pm0, vandq_u8(px, x0f));
- vy = vqtbl1q_u8(pm1, vandq_u8(vy, x0f));
+ vy = vqtbl1q_u8(pm1, vy);
vx = veorq_u8(vx, vy);
db = veorq_u8(vx, qx);
@@ -87,6 +83,7 @@ void __raid6_datap_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dq,
{
uint8x16_t qm0 = vld1q_u8(qmul);
uint8x16_t qm1 = vld1q_u8(qmul + 16);
+ uint8x16_t x0f = vdupq_n_u8(0x0f);
/*
* while (bytes--) {
@@ -100,9 +97,9 @@ void __raid6_datap_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dq,
vx = veorq_u8(vld1q_u8(q), vld1q_u8(dq));
- vy = (uint8x16_t)vshrq_n_s16((int16x8_t)vx, 4);
+ vy = vshrq_n_u8(vx, 4);
vx = vqtbl1q_u8(qm0, vandq_u8(vx, x0f));
- vy = vqtbl1q_u8(qm1, vandq_u8(vy, x0f));
+ vy = vqtbl1q_u8(qm1, vy);
vx = veorq_u8(vx, vy);
vy = veorq_u8(vx, vld1q_u8(p));
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 0a105d4af166..97f59abc3e92 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -416,8 +416,12 @@ static void rht_deferred_worker(struct work_struct *work)
else if (tbl->nest)
err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
- if (!err)
- err = rhashtable_rehash_table(ht);
+ if (!err || err == -EEXIST) {
+ int nerr;
+
+ nerr = rhashtable_rehash_table(ht);
+ err = err ?: nerr;
+ }
mutex_unlock(&ht->mutex);
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 5b382c1244ed..155fe38756ec 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -591,6 +591,17 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
unsigned int cpu)
{
+ /*
+ * Once the clear bit is set, the bit may be allocated out.
+ *
+ * Orders READ/WRITE on the asssociated instance(such as request
+ * of blk_mq) by this bit for avoiding race with re-allocation,
+ * and its pair is the memory barrier implied in __sbitmap_get_word.
+ *
+ * One invariant is that the clear bit has to be zero when the bit
+ * is in use.
+ */
+ smp_mb__before_atomic();
sbitmap_deferred_clear_bit(&sbq->sb, nr);
/*
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 9ba349e775ef..739dc9fe2c55 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -625,6 +625,32 @@ bool __sg_page_iter_next(struct sg_page_iter *piter)
}
EXPORT_SYMBOL(__sg_page_iter_next);
+static int sg_dma_page_count(struct scatterlist *sg)
+{
+ return PAGE_ALIGN(sg->offset + sg_dma_len(sg)) >> PAGE_SHIFT;
+}
+
+bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter)
+{
+ struct sg_page_iter *piter = &dma_iter->base;
+
+ if (!piter->__nents || !piter->sg)
+ return false;
+
+ piter->sg_pgoffset += piter->__pg_advance;
+ piter->__pg_advance = 1;
+
+ while (piter->sg_pgoffset >= sg_dma_page_count(piter->sg)) {
+ piter->sg_pgoffset -= sg_dma_page_count(piter->sg);
+ piter->sg = sg_next(piter->sg);
+ if (!--piter->__nents || !piter->sg)
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(__sg_page_iter_dma_next);
+
/**
* sg_miter_start - start mapping iteration over a sg list
* @miter: sg mapping iter to be started
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index e513459a5601..605c61f65d94 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -194,40 +194,52 @@ static inline struct stack_record *find_stack(struct stack_record *bucket,
return NULL;
}
-void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace)
+/**
+ * stack_depot_fetch - Fetch stack entries from a depot
+ *
+ * @handle: Stack depot handle which was returned from
+ * stack_depot_save().
+ * @entries: Pointer to store the entries address
+ *
+ * Return: The number of trace entries for this depot.
+ */
+unsigned int stack_depot_fetch(depot_stack_handle_t handle,
+ unsigned long **entries)
{
union handle_parts parts = { .handle = handle };
void *slab = stack_slabs[parts.slabindex];
size_t offset = parts.offset << STACK_ALLOC_ALIGN;
struct stack_record *stack = slab + offset;
- trace->nr_entries = trace->max_entries = stack->size;
- trace->entries = stack->entries;
- trace->skip = 0;
+ *entries = stack->entries;
+ return stack->size;
}
-EXPORT_SYMBOL_GPL(depot_fetch_stack);
+EXPORT_SYMBOL_GPL(stack_depot_fetch);
/**
- * depot_save_stack - save stack in a stack depot.
- * @trace - the stacktrace to save.
- * @alloc_flags - flags for allocating additional memory if required.
+ * stack_depot_save - Save a stack trace from an array
+ *
+ * @entries: Pointer to storage array
+ * @nr_entries: Size of the storage array
+ * @alloc_flags: Allocation gfp flags
*
- * Returns the handle of the stack struct stored in depot.
+ * Return: The handle of the stack struct stored in depot
*/
-depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
- gfp_t alloc_flags)
+depot_stack_handle_t stack_depot_save(unsigned long *entries,
+ unsigned int nr_entries,
+ gfp_t alloc_flags)
{
- u32 hash;
- depot_stack_handle_t retval = 0;
struct stack_record *found = NULL, **bucket;
- unsigned long flags;
+ depot_stack_handle_t retval = 0;
struct page *page = NULL;
void *prealloc = NULL;
+ unsigned long flags;
+ u32 hash;
- if (unlikely(trace->nr_entries == 0))
+ if (unlikely(nr_entries == 0))
goto fast_exit;
- hash = hash_stack(trace->entries, trace->nr_entries);
+ hash = hash_stack(entries, nr_entries);
bucket = &stack_table[hash & STACK_HASH_MASK];
/*
@@ -235,8 +247,8 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
* The smp_load_acquire() here pairs with smp_store_release() to
* |bucket| below.
*/
- found = find_stack(smp_load_acquire(bucket), trace->entries,
- trace->nr_entries, hash);
+ found = find_stack(smp_load_acquire(bucket), entries,
+ nr_entries, hash);
if (found)
goto exit;
@@ -264,10 +276,10 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
spin_lock_irqsave(&depot_lock, flags);
- found = find_stack(*bucket, trace->entries, trace->nr_entries, hash);
+ found = find_stack(*bucket, entries, nr_entries, hash);
if (!found) {
struct stack_record *new =
- depot_alloc_stack(trace->entries, trace->nr_entries,
+ depot_alloc_stack(entries, nr_entries,
hash, &prealloc, alloc_flags);
if (new) {
new->next = *bucket;
@@ -297,4 +309,4 @@ exit:
fast_exit:
return retval;
}
-EXPORT_SYMBOL_GPL(depot_save_stack);
+EXPORT_SYMBOL_GPL(stack_depot_save);
diff --git a/lib/string.c b/lib/string.c
index 38e4ca08e757..6016eb3ac73d 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -159,11 +159,9 @@ EXPORT_SYMBOL(strlcpy);
* @src: Where to copy the string from
* @count: Size of destination buffer
*
- * Copy the string, or as much of it as fits, into the dest buffer.
- * The routine returns the number of characters copied (not including
- * the trailing NUL) or -E2BIG if the destination buffer wasn't big enough.
- * The behavior is undefined if the string buffers overlap.
- * The destination buffer is always NUL terminated, unless it's zero-sized.
+ * Copy the string, or as much of it as fits, into the dest buffer. The
+ * behavior is undefined if the string buffers overlap. The destination
+ * buffer is always NUL terminated, unless it's zero-sized.
*
* Preferred to strlcpy() since the API doesn't require reading memory
* from the src string beyond the specified "count" bytes, and since
@@ -173,8 +171,10 @@ EXPORT_SYMBOL(strlcpy);
*
* Preferred to strncpy() since it always returns a valid string, and
* doesn't unnecessarily force the tail of the destination buffer to be
- * zeroed. If the zeroing is desired, it's likely cleaner to use strscpy()
- * with an overflow test, then just memset() the tail of the dest buffer.
+ * zeroed. If zeroing is desired please use strscpy_pad().
+ *
+ * Return: The number of characters copied (not including the trailing
+ * %NUL) or -E2BIG if the destination buffer wasn't big enough.
*/
ssize_t strscpy(char *dest, const char *src, size_t count)
{
@@ -237,6 +237,39 @@ ssize_t strscpy(char *dest, const char *src, size_t count)
EXPORT_SYMBOL(strscpy);
#endif
+/**
+ * strscpy_pad() - Copy a C-string into a sized buffer
+ * @dest: Where to copy the string to
+ * @src: Where to copy the string from
+ * @count: Size of destination buffer
+ *
+ * Copy the string, or as much of it as fits, into the dest buffer. The
+ * behavior is undefined if the string buffers overlap. The destination
+ * buffer is always %NUL terminated, unless it's zero-sized.
+ *
+ * If the source string is shorter than the destination buffer, zeros
+ * the tail of the destination buffer.
+ *
+ * For full explanation of why you may want to consider using the
+ * 'strscpy' functions please see the function docstring for strscpy().
+ *
+ * Return: The number of characters copied (not including the trailing
+ * %NUL) or -E2BIG if the destination buffer wasn't big enough.
+ */
+ssize_t strscpy_pad(char *dest, const char *src, size_t count)
+{
+ ssize_t written;
+
+ written = strscpy(dest, src, count);
+ if (written < 0 || written == count - 1)
+ return written;
+
+ memset(dest + written + 1, 0, count - written - 1);
+
+ return written;
+}
+EXPORT_SYMBOL(strscpy_pad);
+
#ifndef __HAVE_ARCH_STRCAT
/**
* strcat - Append one %NUL-terminated string to another
@@ -866,6 +899,26 @@ __visible int memcmp(const void *cs, const void *ct, size_t count)
EXPORT_SYMBOL(memcmp);
#endif
+#ifndef __HAVE_ARCH_BCMP
+/**
+ * bcmp - returns 0 if and only if the buffers have identical contents.
+ * @a: pointer to first buffer.
+ * @b: pointer to second buffer.
+ * @len: size of buffers.
+ *
+ * The sign or magnitude of a non-zero return value has no particular
+ * meaning, and architectures may implement their own more efficient bcmp(). So
+ * while this particular implementation is a simple (tail) call to memcmp, do
+ * not rely on anything but whether the return value is zero or non-zero.
+ */
+#undef bcmp
+int bcmp(const void *a, const void *b, size_t len)
+{
+ return memcmp(a, b, len);
+}
+EXPORT_SYMBOL(bcmp);
+#endif
+
#ifndef __HAVE_ARCH_MEMSCAN
/**
* memscan - Find a character in an area of memory.
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index 58eacd41526c..023ba9f3b99f 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -23,10 +23,11 @@
* hit it), 'max' is the address space maximum (and we return
* -EFAULT if we hit it).
*/
-static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
+static inline long do_strncpy_from_user(char *dst, const char __user *src,
+ unsigned long count, unsigned long max)
{
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
- long res = 0;
+ unsigned long res = 0;
/*
* Truncate 'max' to the user-specified limit, so that
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index 1c1a1b0e38a5..7f2db3fe311f 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -28,7 +28,7 @@
static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
{
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
- long align, res = 0;
+ unsigned long align, res = 0;
unsigned long c;
/*
@@ -42,7 +42,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
* Do everything aligned. But that means that we
* need to also expand the maximum..
*/
- align = (sizeof(long) - 1) & (unsigned long)src;
+ align = (sizeof(unsigned long) - 1) & (unsigned long)src;
src -= align;
max += align;
diff --git a/lib/syscall.c b/lib/syscall.c
index 1a7077f20eae..fb328e7ccb08 100644
--- a/lib/syscall.c
+++ b/lib/syscall.c
@@ -5,16 +5,14 @@
#include <linux/export.h>
#include <asm/syscall.h>
-static int collect_syscall(struct task_struct *target, long *callno,
- unsigned long args[6], unsigned int maxargs,
- unsigned long *sp, unsigned long *pc)
+static int collect_syscall(struct task_struct *target, struct syscall_info *info)
{
struct pt_regs *regs;
if (!try_get_task_stack(target)) {
/* Task has no stack, so the task isn't in a syscall. */
- *sp = *pc = 0;
- *callno = -1;
+ memset(info, 0, sizeof(*info));
+ info->data.nr = -1;
return 0;
}
@@ -24,12 +22,13 @@ static int collect_syscall(struct task_struct *target, long *callno,
return -EAGAIN;
}
- *sp = user_stack_pointer(regs);
- *pc = instruction_pointer(regs);
+ info->sp = user_stack_pointer(regs);
+ info->data.instruction_pointer = instruction_pointer(regs);
- *callno = syscall_get_nr(target, regs);
- if (*callno != -1L && maxargs > 0)
- syscall_get_arguments(target, regs, 0, maxargs, args);
+ info->data.nr = syscall_get_nr(target, regs);
+ if (info->data.nr != -1L)
+ syscall_get_arguments(target, regs,
+ (unsigned long *)&info->data.args[0]);
put_task_stack(target);
return 0;
@@ -38,41 +37,35 @@ static int collect_syscall(struct task_struct *target, long *callno,
/**
* task_current_syscall - Discover what a blocked task is doing.
* @target: thread to examine
- * @callno: filled with system call number or -1
- * @args: filled with @maxargs system call arguments
- * @maxargs: number of elements in @args to fill
- * @sp: filled with user stack pointer
- * @pc: filled with user PC
+ * @info: structure with the following fields:
+ * .sp - filled with user stack pointer
+ * .data.nr - filled with system call number or -1
+ * .data.args - filled with @maxargs system call arguments
+ * .data.instruction_pointer - filled with user PC
*
- * If @target is blocked in a system call, returns zero with *@callno
- * set to the the call's number and @args filled in with its arguments.
- * Registers not used for system call arguments may not be available and
- * it is not kosher to use &struct user_regset calls while the system
+ * If @target is blocked in a system call, returns zero with @info.data.nr
+ * set to the the call's number and @info.data.args filled in with its
+ * arguments. Registers not used for system call arguments may not be available
+ * and it is not kosher to use &struct user_regset calls while the system
* call is still in progress. Note we may get this result if @target
* has finished its system call but not yet returned to user mode, such
* as when it's stopped for signal handling or syscall exit tracing.
*
* If @target is blocked in the kernel during a fault or exception,
- * returns zero with *@callno set to -1 and does not fill in @args.
- * If so, it's now safe to examine @target using &struct user_regset
- * get() calls as long as we're sure @target won't return to user mode.
+ * returns zero with *@info.data.nr set to -1 and does not fill in
+ * @info.data.args. If so, it's now safe to examine @target using
+ * &struct user_regset get() calls as long as we're sure @target won't return
+ * to user mode.
*
* Returns -%EAGAIN if @target does not remain blocked.
- *
- * Returns -%EINVAL if @maxargs is too large (maximum is six).
*/
-int task_current_syscall(struct task_struct *target, long *callno,
- unsigned long args[6], unsigned int maxargs,
- unsigned long *sp, unsigned long *pc)
+int task_current_syscall(struct task_struct *target, struct syscall_info *info)
{
long state;
unsigned long ncsw;
- if (unlikely(maxargs > 6))
- return -EINVAL;
-
if (target == current)
- return collect_syscall(target, callno, args, maxargs, sp, pc);
+ return collect_syscall(target, info);
state = target->state;
if (unlikely(!state))
@@ -80,7 +73,7 @@ int task_current_syscall(struct task_struct *target, long *callno,
ncsw = wait_task_inactive(target, state);
if (unlikely(!ncsw) ||
- unlikely(collect_syscall(target, callno, args, maxargs, sp, pc)) ||
+ unlikely(collect_syscall(target, info)) ||
unlikely(wait_task_inactive(target, state) != ncsw))
return -EAGAIN;
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index 6cd7d0740005..792d90608052 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -12,6 +12,8 @@
#include <linux/slab.h>
#include <linux/string.h>
+#include "../tools/testing/selftests/kselftest_module.h"
+
static unsigned total_tests __initdata;
static unsigned failed_tests __initdata;
@@ -361,7 +363,7 @@ static void noinline __init test_mem_optimisations(void)
}
}
-static int __init test_bitmap_init(void)
+static void __init selftest(void)
{
test_zero_clear();
test_fill_set();
@@ -369,22 +371,8 @@ static int __init test_bitmap_init(void)
test_bitmap_arr32();
test_bitmap_parselist();
test_mem_optimisations();
-
- if (failed_tests == 0)
- pr_info("all %u tests passed\n", total_tests);
- else
- pr_warn("failed %u out of %u tests\n",
- failed_tests, total_tests);
-
- return failed_tests ? -EINVAL : 0;
}
-static void __exit test_bitmap_cleanup(void)
-{
-}
-
-module_init(test_bitmap_init);
-module_exit(test_bitmap_cleanup);
-
+KSTM_MODULE_LOADERS(test_bitmap);
MODULE_AUTHOR("david decotigny <david.decotigny@googlers.com>");
MODULE_LICENSE("GPL");
diff --git a/lib/test_printf.c b/lib/test_printf.c
index d5ab4ca4da99..93da0a5000ec 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -21,6 +21,8 @@
#include <linux/gfp.h>
#include <linux/mm.h>
+#include "../tools/testing/selftests/kselftest_module.h"
+
#define BUF_SIZE 256
#define PAD_SIZE 16
#define FILL_CHAR '$'
@@ -609,12 +611,11 @@ test_pointer(void)
flags();
}
-static int __init
-test_printf_init(void)
+static void __init selftest(void)
{
alloced_buffer = kmalloc(BUF_SIZE + 2*PAD_SIZE, GFP_KERNEL);
if (!alloced_buffer)
- return -ENOMEM;
+ return;
test_buffer = alloced_buffer + PAD_SIZE;
test_basic();
@@ -623,16 +624,8 @@ test_printf_init(void)
test_pointer();
kfree(alloced_buffer);
-
- if (failed_tests == 0)
- pr_info("all %u tests passed\n", total_tests);
- else
- pr_warn("failed %u out of %u tests\n", failed_tests, total_tests);
-
- return failed_tests ? -EINVAL : 0;
}
-module_init(test_printf_init);
-
+KSTM_MODULE_LOADERS(test_printf);
MODULE_AUTHOR("Rasmus Villemoes <linux@rasmusvillemoes.dk>");
MODULE_LICENSE("GPL");
diff --git a/lib/test_strscpy.c b/lib/test_strscpy.c
new file mode 100644
index 000000000000..a827f94601f5
--- /dev/null
+++ b/lib/test_strscpy.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/string.h>
+
+#include "../tools/testing/selftests/kselftest_module.h"
+
+/*
+ * Kernel module for testing 'strscpy' family of functions.
+ */
+
+KSTM_MODULE_GLOBALS();
+
+/*
+ * tc() - Run a specific test case.
+ * @src: Source string, argument to strscpy_pad()
+ * @count: Size of destination buffer, argument to strscpy_pad()
+ * @expected: Expected return value from call to strscpy_pad()
+ * @terminator: 1 if there should be a terminating null byte 0 otherwise.
+ * @chars: Number of characters from the src string expected to be
+ * written to the dst buffer.
+ * @pad: Number of pad characters expected (in the tail of dst buffer).
+ * (@pad does not include the null terminator byte.)
+ *
+ * Calls strscpy_pad() and verifies the return value and state of the
+ * destination buffer after the call returns.
+ */
+static int __init tc(char *src, int count, int expected,
+ int chars, int terminator, int pad)
+{
+ int nr_bytes_poison;
+ int max_expected;
+ int max_count;
+ int written;
+ char buf[6];
+ int index, i;
+ const char POISON = 'z';
+
+ total_tests++;
+
+ if (!src) {
+ pr_err("null source string not supported\n");
+ return -1;
+ }
+
+ memset(buf, POISON, sizeof(buf));
+ /* Future proofing test suite, validate args */
+ max_count = sizeof(buf) - 2; /* Space for null and to verify overflow */
+ max_expected = count - 1; /* Space for the null */
+ if (count > max_count) {
+ pr_err("count (%d) is too big (%d) ... aborting", count, max_count);
+ return -1;
+ }
+ if (expected > max_expected) {
+ pr_warn("expected (%d) is bigger than can possibly be returned (%d)",
+ expected, max_expected);
+ }
+
+ written = strscpy_pad(buf, src, count);
+ if ((written) != (expected)) {
+ pr_err("%d != %d (written, expected)\n", written, expected);
+ goto fail;
+ }
+
+ if (count && written == -E2BIG) {
+ if (strncmp(buf, src, count - 1) != 0) {
+ pr_err("buffer state invalid for -E2BIG\n");
+ goto fail;
+ }
+ if (buf[count - 1] != '\0') {
+ pr_err("too big string is not null terminated correctly\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < chars; i++) {
+ if (buf[i] != src[i]) {
+ pr_err("buf[i]==%c != src[i]==%c\n", buf[i], src[i]);
+ goto fail;
+ }
+ }
+
+ if (terminator) {
+ if (buf[count - 1] != '\0') {
+ pr_err("string is not null terminated correctly\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < pad; i++) {
+ index = chars + terminator + i;
+ if (buf[index] != '\0') {
+ pr_err("padding missing at index: %d\n", i);
+ goto fail;
+ }
+ }
+
+ nr_bytes_poison = sizeof(buf) - chars - terminator - pad;
+ for (i = 0; i < nr_bytes_poison; i++) {
+ index = sizeof(buf) - 1 - i; /* Check from the end back */
+ if (buf[index] != POISON) {
+ pr_err("poison value missing at index: %d\n", i);
+ goto fail;
+ }
+ }
+
+ return 0;
+fail:
+ failed_tests++;
+ return -1;
+}
+
+static void __init selftest(void)
+{
+ /*
+ * tc() uses a destination buffer of size 6 and needs at
+ * least 2 characters spare (one for null and one to check for
+ * overflow). This means we should only call tc() with
+ * strings up to a maximum of 4 characters long and 'count'
+ * should not exceed 4. To test with longer strings increase
+ * the buffer size in tc().
+ */
+
+ /* tc(src, count, expected, chars, terminator, pad) */
+ KSTM_CHECK_ZERO(tc("a", 0, -E2BIG, 0, 0, 0));
+ KSTM_CHECK_ZERO(tc("", 0, -E2BIG, 0, 0, 0));
+
+ KSTM_CHECK_ZERO(tc("a", 1, -E2BIG, 0, 1, 0));
+ KSTM_CHECK_ZERO(tc("", 1, 0, 0, 1, 0));
+
+ KSTM_CHECK_ZERO(tc("ab", 2, -E2BIG, 1, 1, 0));
+ KSTM_CHECK_ZERO(tc("a", 2, 1, 1, 1, 0));
+ KSTM_CHECK_ZERO(tc("", 2, 0, 0, 1, 1));
+
+ KSTM_CHECK_ZERO(tc("abc", 3, -E2BIG, 2, 1, 0));
+ KSTM_CHECK_ZERO(tc("ab", 3, 2, 2, 1, 0));
+ KSTM_CHECK_ZERO(tc("a", 3, 1, 1, 1, 1));
+ KSTM_CHECK_ZERO(tc("", 3, 0, 0, 1, 2));
+
+ KSTM_CHECK_ZERO(tc("abcd", 4, -E2BIG, 3, 1, 0));
+ KSTM_CHECK_ZERO(tc("abc", 4, 3, 3, 1, 0));
+ KSTM_CHECK_ZERO(tc("ab", 4, 2, 2, 1, 1));
+ KSTM_CHECK_ZERO(tc("a", 4, 1, 1, 1, 2));
+ KSTM_CHECK_ZERO(tc("", 4, 0, 0, 1, 3));
+}
+
+KSTM_MODULE_LOADERS(test_strscpy);
+MODULE_AUTHOR("Tobin C. Harding <tobin@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
index 83cdcaa82bf6..f832b095afba 100644
--- a/lib/test_vmalloc.c
+++ b/lib/test_vmalloc.c
@@ -383,14 +383,14 @@ static void shuffle_array(int *arr, int n)
static int test_func(void *private)
{
struct test_driver *t = private;
- cpumask_t newmask = CPU_MASK_NONE;
int random_array[ARRAY_SIZE(test_case_array)];
int index, i, j, ret;
ktime_t kt;
u64 delta;
- cpumask_set_cpu(t->cpu, &newmask);
- set_cpus_allowed_ptr(current, &newmask);
+ ret = set_cpus_allowed_ptr(current, cpumask_of(t->cpu));
+ if (ret < 0)
+ pr_err("Failed to set affinity to %d CPU\n", t->cpu);
for (i = 0; i < ARRAY_SIZE(test_case_array); i++)
random_array[i] = i;
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index c596a957f764..5d4bad8bd96a 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -40,9 +40,9 @@ static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)
static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp)
{
- u32 id = 0;
+ u32 id;
- XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(index),
+ XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(index), xa_limit_32b,
gfp) != 0);
XA_BUG_ON(xa, id != index);
}
@@ -107,8 +107,11 @@ static noinline void check_xas_retry(struct xarray *xa)
XA_BUG_ON(xa, xas.xa_node != XAS_RESTART);
XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
XA_BUG_ON(xa, xas.xa_node != NULL);
+ rcu_read_unlock();
XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
+
+ rcu_read_lock();
XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas)));
xas.xa_node = XAS_RESTART;
XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
@@ -343,7 +346,7 @@ static noinline void check_cmpxchg(struct xarray *xa)
XA_BUG_ON(xa, !xa_empty(xa));
XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL);
- XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EEXIST);
+ XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EBUSY);
XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS);
XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS);
XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE);
@@ -358,46 +361,65 @@ static noinline void check_reserve(struct xarray *xa)
{
void *entry;
unsigned long index;
+ int count;
/* An array with a reserved entry is not empty */
XA_BUG_ON(xa, !xa_empty(xa));
- xa_reserve(xa, 12345678, GFP_KERNEL);
+ XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
XA_BUG_ON(xa, xa_empty(xa));
XA_BUG_ON(xa, xa_load(xa, 12345678));
xa_release(xa, 12345678);
XA_BUG_ON(xa, !xa_empty(xa));
/* Releasing a used entry does nothing */
- xa_reserve(xa, 12345678, GFP_KERNEL);
+ XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL);
xa_release(xa, 12345678);
xa_erase_index(xa, 12345678);
XA_BUG_ON(xa, !xa_empty(xa));
- /* cmpxchg sees a reserved entry as NULL */
- xa_reserve(xa, 12345678, GFP_KERNEL);
- XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, NULL, xa_mk_value(12345678),
- GFP_NOWAIT) != NULL);
+ /* cmpxchg sees a reserved entry as ZERO */
+ XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, XA_ZERO_ENTRY,
+ xa_mk_value(12345678), GFP_NOWAIT) != NULL);
xa_release(xa, 12345678);
xa_erase_index(xa, 12345678);
XA_BUG_ON(xa, !xa_empty(xa));
- /* But xa_insert does not */
- xa_reserve(xa, 12345678, GFP_KERNEL);
+ /* xa_insert treats it as busy */
+ XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) !=
- -EEXIST);
+ -EBUSY);
XA_BUG_ON(xa, xa_empty(xa));
XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL);
XA_BUG_ON(xa, !xa_empty(xa));
/* Can iterate through a reserved entry */
xa_store_index(xa, 5, GFP_KERNEL);
- xa_reserve(xa, 6, GFP_KERNEL);
+ XA_BUG_ON(xa, xa_reserve(xa, 6, GFP_KERNEL) != 0);
xa_store_index(xa, 7, GFP_KERNEL);
+ count = 0;
xa_for_each(xa, index, entry) {
XA_BUG_ON(xa, index != 5 && index != 7);
+ count++;
+ }
+ XA_BUG_ON(xa, count != 2);
+
+ /* If we free a reserved entry, we should be able to allocate it */
+ if (xa->xa_flags & XA_FLAGS_ALLOC) {
+ u32 id;
+
+ XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(8),
+ XA_LIMIT(5, 10), GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != 8);
+
+ xa_release(xa, 6);
+ XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(6),
+ XA_LIMIT(5, 10), GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != 6);
}
+
xa_destroy(xa);
}
@@ -586,64 +608,194 @@ static noinline void check_multi_store(struct xarray *xa)
#endif
}
-static DEFINE_XARRAY_ALLOC(xa0);
-
-static noinline void check_xa_alloc(void)
+static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base)
{
int i;
u32 id;
- /* An empty array should assign 0 to the first alloc */
- xa_alloc_index(&xa0, 0, GFP_KERNEL);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ /* An empty array should assign %base to the first alloc */
+ xa_alloc_index(xa, base, GFP_KERNEL);
/* Erasing it should make the array empty again */
- xa_erase_index(&xa0, 0);
- XA_BUG_ON(&xa0, !xa_empty(&xa0));
+ xa_erase_index(xa, base);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /* And it should assign %base again */
+ xa_alloc_index(xa, base, GFP_KERNEL);
+
+ /* Allocating and then erasing a lot should not lose base */
+ for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++)
+ xa_alloc_index(xa, i, GFP_KERNEL);
+ for (i = base; i < 2 * XA_CHUNK_SIZE; i++)
+ xa_erase_index(xa, i);
+ xa_alloc_index(xa, base, GFP_KERNEL);
- /* And it should assign 0 again */
- xa_alloc_index(&xa0, 0, GFP_KERNEL);
+ /* Destroying the array should do the same as erasing */
+ xa_destroy(xa);
+
+ /* And it should assign %base again */
+ xa_alloc_index(xa, base, GFP_KERNEL);
- /* The next assigned ID should be 1 */
- xa_alloc_index(&xa0, 1, GFP_KERNEL);
- xa_erase_index(&xa0, 1);
+ /* The next assigned ID should be base+1 */
+ xa_alloc_index(xa, base + 1, GFP_KERNEL);
+ xa_erase_index(xa, base + 1);
/* Storing a value should mark it used */
- xa_store_index(&xa0, 1, GFP_KERNEL);
- xa_alloc_index(&xa0, 2, GFP_KERNEL);
+ xa_store_index(xa, base + 1, GFP_KERNEL);
+ xa_alloc_index(xa, base + 2, GFP_KERNEL);
- /* If we then erase 0, it should be free */
- xa_erase_index(&xa0, 0);
- xa_alloc_index(&xa0, 0, GFP_KERNEL);
+ /* If we then erase base, it should be free */
+ xa_erase_index(xa, base);
+ xa_alloc_index(xa, base, GFP_KERNEL);
- xa_erase_index(&xa0, 1);
- xa_erase_index(&xa0, 2);
+ xa_erase_index(xa, base + 1);
+ xa_erase_index(xa, base + 2);
for (i = 1; i < 5000; i++) {
- xa_alloc_index(&xa0, i, GFP_KERNEL);
+ xa_alloc_index(xa, base + i, GFP_KERNEL);
}
- xa_destroy(&xa0);
+ xa_destroy(xa);
- id = 0xfffffffeU;
- XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
+ /* Check that we fail properly at the limit of allocation */
+ XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX - 1),
+ XA_LIMIT(UINT_MAX - 1, UINT_MAX),
GFP_KERNEL) != 0);
- XA_BUG_ON(&xa0, id != 0xfffffffeU);
- XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
+ XA_BUG_ON(xa, id != 0xfffffffeU);
+ XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX),
+ XA_LIMIT(UINT_MAX - 1, UINT_MAX),
GFP_KERNEL) != 0);
- XA_BUG_ON(&xa0, id != 0xffffffffU);
- XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
- GFP_KERNEL) != -ENOSPC);
- XA_BUG_ON(&xa0, id != 0xffffffffU);
- xa_destroy(&xa0);
-
- id = 10;
- XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id),
- GFP_KERNEL) != -ENOSPC);
- XA_BUG_ON(&xa0, xa_store_index(&xa0, 3, GFP_KERNEL) != 0);
- XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id),
- GFP_KERNEL) != -ENOSPC);
- xa_erase_index(&xa0, 3);
- XA_BUG_ON(&xa0, !xa_empty(&xa0));
+ XA_BUG_ON(xa, id != 0xffffffffU);
+ id = 3;
+ XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(0),
+ XA_LIMIT(UINT_MAX - 1, UINT_MAX),
+ GFP_KERNEL) != -EBUSY);
+ XA_BUG_ON(xa, id != 3);
+ xa_destroy(xa);
+
+ XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5),
+ GFP_KERNEL) != -EBUSY);
+ XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5),
+ GFP_KERNEL) != -EBUSY);
+ xa_erase_index(xa, 3);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base)
+{
+ unsigned int i, id;
+ unsigned long index;
+ void *entry;
+
+ /* Allocate and free a NULL and check xa_empty() behaves */
+ XA_BUG_ON(xa, !xa_empty(xa));
+ XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != base);
+ XA_BUG_ON(xa, xa_empty(xa));
+ XA_BUG_ON(xa, xa_erase(xa, id) != NULL);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /* Ditto, but check destroy instead of erase */
+ XA_BUG_ON(xa, !xa_empty(xa));
+ XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != base);
+ XA_BUG_ON(xa, xa_empty(xa));
+ xa_destroy(xa);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ for (i = base; i < base + 10; i++) {
+ XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b,
+ GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != i);
+ }
+
+ XA_BUG_ON(xa, xa_store(xa, 3, xa_mk_index(3), GFP_KERNEL) != NULL);
+ XA_BUG_ON(xa, xa_store(xa, 4, xa_mk_index(4), GFP_KERNEL) != NULL);
+ XA_BUG_ON(xa, xa_store(xa, 4, NULL, GFP_KERNEL) != xa_mk_index(4));
+ XA_BUG_ON(xa, xa_erase(xa, 5) != NULL);
+ XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != 5);
+
+ xa_for_each(xa, index, entry) {
+ xa_erase_index(xa, index);
+ }
+
+ for (i = base; i < base + 9; i++) {
+ XA_BUG_ON(xa, xa_erase(xa, i) != NULL);
+ XA_BUG_ON(xa, xa_empty(xa));
+ }
+ XA_BUG_ON(xa, xa_erase(xa, 8) != NULL);
+ XA_BUG_ON(xa, xa_empty(xa));
+ XA_BUG_ON(xa, xa_erase(xa, base + 9) != NULL);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ xa_destroy(xa);
+}
+
+static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base)
+{
+ struct xa_limit limit = XA_LIMIT(1, 0x3fff);
+ u32 next = 0;
+ unsigned int i, id;
+ unsigned long index;
+ void *entry;
+
+ XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(1), limit,
+ &next, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != 1);
+
+ next = 0x3ffd;
+ XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit,
+ &next, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != 0x3ffd);
+ xa_erase_index(xa, 0x3ffd);
+ xa_erase_index(xa, 1);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ for (i = 0x3ffe; i < 0x4003; i++) {
+ if (i < 0x4000)
+ entry = xa_mk_index(i);
+ else
+ entry = xa_mk_index(i - 0x3fff);
+ XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, entry, limit,
+ &next, GFP_KERNEL) != (id == 1));
+ XA_BUG_ON(xa, xa_mk_index(id) != entry);
+ }
+
+ /* Check wrap-around is handled correctly */
+ if (base != 0)
+ xa_erase_index(xa, base);
+ xa_erase_index(xa, base + 1);
+ next = UINT_MAX;
+ XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX),
+ xa_limit_32b, &next, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != UINT_MAX);
+ XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base),
+ xa_limit_32b, &next, GFP_KERNEL) != 1);
+ XA_BUG_ON(xa, id != base);
+ XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1),
+ xa_limit_32b, &next, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != base + 1);
+
+ xa_for_each(xa, index, entry)
+ xa_erase_index(xa, index);
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static DEFINE_XARRAY_ALLOC(xa0);
+static DEFINE_XARRAY_ALLOC1(xa1);
+
+static noinline void check_xa_alloc(void)
+{
+ check_xa_alloc_1(&xa0, 0);
+ check_xa_alloc_1(&xa1, 1);
+ check_xa_alloc_2(&xa0, 0);
+ check_xa_alloc_2(&xa1, 1);
+ check_xa_alloc_3(&xa0, 0);
+ check_xa_alloc_3(&xa1, 1);
}
static noinline void __check_store_iter(struct xarray *xa, unsigned long start,
@@ -1194,9 +1346,8 @@ static void check_align_1(struct xarray *xa, char *name)
void *entry;
for (i = 0; i < 8; i++) {
- id = 0;
- XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, name + i, GFP_KERNEL)
- != 0);
+ XA_BUG_ON(xa, xa_alloc(xa, &id, name + i, xa_limit_32b,
+ GFP_KERNEL) != 0);
XA_BUG_ON(xa, id != i);
}
xa_for_each(xa, index, entry)
@@ -1204,6 +1355,30 @@ static void check_align_1(struct xarray *xa, char *name)
xa_destroy(xa);
}
+/*
+ * We should always be able to store without allocating memory after
+ * reserving a slot.
+ */
+static void check_align_2(struct xarray *xa, char *name)
+{
+ int i;
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ for (i = 0; i < 8; i++) {
+ XA_BUG_ON(xa, xa_store(xa, 0, name + i, GFP_KERNEL) != NULL);
+ xa_erase(xa, 0);
+ }
+
+ for (i = 0; i < 8; i++) {
+ XA_BUG_ON(xa, xa_reserve(xa, 0, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, xa_store(xa, 0, name + i, 0) != NULL);
+ xa_erase(xa, 0);
+ }
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
static noinline void check_align(struct xarray *xa)
{
char name[] = "Motorola 68000";
@@ -1212,7 +1387,7 @@ static noinline void check_align(struct xarray *xa)
check_align_1(xa, name + 1);
check_align_1(xa, name + 2);
check_align_1(xa, name + 3);
-// check_align_2(xa, name);
+ check_align_2(xa, name);
}
static LIST_HEAD(shadow_nodes);
@@ -1354,6 +1529,7 @@ static int xarray_checks(void)
check_xas_erase(&array);
check_cmpxchg(&array);
check_reserve(&array);
+ check_reserve(&xa0);
check_multi_store(&array);
check_xa_alloc();
check_find(&array);
diff --git a/lib/ubsan.c b/lib/ubsan.c
index e4162f59a81c..ecc179338094 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/sched.h>
+#include <linux/uaccess.h>
#include "ubsan.h"
@@ -86,11 +87,13 @@ static bool is_inline_int(struct type_descriptor *type)
return bits <= inline_bits;
}
-static s_max get_signed_val(struct type_descriptor *type, unsigned long val)
+static s_max get_signed_val(struct type_descriptor *type, void *val)
{
if (is_inline_int(type)) {
unsigned extra_bits = sizeof(s_max)*8 - type_bit_width(type);
- return ((s_max)val) << extra_bits >> extra_bits;
+ unsigned long ulong_val = (unsigned long)val;
+
+ return ((s_max)ulong_val) << extra_bits >> extra_bits;
}
if (type_bit_width(type) == 64)
@@ -99,15 +102,15 @@ static s_max get_signed_val(struct type_descriptor *type, unsigned long val)
return *(s_max *)val;
}
-static bool val_is_negative(struct type_descriptor *type, unsigned long val)
+static bool val_is_negative(struct type_descriptor *type, void *val)
{
return type_is_signed(type) && get_signed_val(type, val) < 0;
}
-static u_max get_unsigned_val(struct type_descriptor *type, unsigned long val)
+static u_max get_unsigned_val(struct type_descriptor *type, void *val)
{
if (is_inline_int(type))
- return val;
+ return (unsigned long)val;
if (type_bit_width(type) == 64)
return *(u64 *)val;
@@ -116,7 +119,7 @@ static u_max get_unsigned_val(struct type_descriptor *type, unsigned long val)
}
static void val_to_string(char *str, size_t size, struct type_descriptor *type,
- unsigned long value)
+ void *value)
{
if (type_is_int(type)) {
if (type_bit_width(type) == 128) {
@@ -163,8 +166,8 @@ static void ubsan_epilogue(unsigned long *flags)
current->in_ubsan--;
}
-static void handle_overflow(struct overflow_data *data, unsigned long lhs,
- unsigned long rhs, char op)
+static void handle_overflow(struct overflow_data *data, void *lhs,
+ void *rhs, char op)
{
struct type_descriptor *type = data->type;
@@ -191,8 +194,7 @@ static void handle_overflow(struct overflow_data *data, unsigned long lhs,
}
void __ubsan_handle_add_overflow(struct overflow_data *data,
- unsigned long lhs,
- unsigned long rhs)
+ void *lhs, void *rhs)
{
handle_overflow(data, lhs, rhs, '+');
@@ -200,23 +202,21 @@ void __ubsan_handle_add_overflow(struct overflow_data *data,
EXPORT_SYMBOL(__ubsan_handle_add_overflow);
void __ubsan_handle_sub_overflow(struct overflow_data *data,
- unsigned long lhs,
- unsigned long rhs)
+ void *lhs, void *rhs)
{
handle_overflow(data, lhs, rhs, '-');
}
EXPORT_SYMBOL(__ubsan_handle_sub_overflow);
void __ubsan_handle_mul_overflow(struct overflow_data *data,
- unsigned long lhs,
- unsigned long rhs)
+ void *lhs, void *rhs)
{
handle_overflow(data, lhs, rhs, '*');
}
EXPORT_SYMBOL(__ubsan_handle_mul_overflow);
void __ubsan_handle_negate_overflow(struct overflow_data *data,
- unsigned long old_val)
+ void *old_val)
{
unsigned long flags;
char old_val_str[VALUE_LENGTH];
@@ -237,8 +237,7 @@ EXPORT_SYMBOL(__ubsan_handle_negate_overflow);
void __ubsan_handle_divrem_overflow(struct overflow_data *data,
- unsigned long lhs,
- unsigned long rhs)
+ void *lhs, void *rhs)
{
unsigned long flags;
char rhs_val_str[VALUE_LENGTH];
@@ -313,6 +312,7 @@ static void handle_object_size_mismatch(struct type_mismatch_data_common *data,
static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data,
unsigned long ptr)
{
+ unsigned long flags = user_access_save();
if (!ptr)
handle_null_ptr_deref(data);
@@ -320,10 +320,12 @@ static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data,
handle_misaligned_access(data, ptr);
else
handle_object_size_mismatch(data, ptr);
+
+ user_access_restore(flags);
}
void __ubsan_handle_type_mismatch(struct type_mismatch_data *data,
- unsigned long ptr)
+ void *ptr)
{
struct type_mismatch_data_common common_data = {
.location = &data->location,
@@ -332,12 +334,12 @@ void __ubsan_handle_type_mismatch(struct type_mismatch_data *data,
.type_check_kind = data->type_check_kind
};
- ubsan_type_mismatch_common(&common_data, ptr);
+ ubsan_type_mismatch_common(&common_data, (unsigned long)ptr);
}
EXPORT_SYMBOL(__ubsan_handle_type_mismatch);
void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data,
- unsigned long ptr)
+ void *ptr)
{
struct type_mismatch_data_common common_data = {
@@ -347,30 +349,11 @@ void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data,
.type_check_kind = data->type_check_kind
};
- ubsan_type_mismatch_common(&common_data, ptr);
+ ubsan_type_mismatch_common(&common_data, (unsigned long)ptr);
}
EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1);
-void __ubsan_handle_vla_bound_not_positive(struct vla_bound_data *data,
- unsigned long bound)
-{
- unsigned long flags;
- char bound_str[VALUE_LENGTH];
-
- if (suppress_report(&data->location))
- return;
-
- ubsan_prologue(&data->location, &flags);
-
- val_to_string(bound_str, sizeof(bound_str), data->type, bound);
- pr_err("variable length array bound value %s <= 0\n", bound_str);
-
- ubsan_epilogue(&flags);
-}
-EXPORT_SYMBOL(__ubsan_handle_vla_bound_not_positive);
-
-void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data,
- unsigned long index)
+void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, void *index)
{
unsigned long flags;
char index_str[VALUE_LENGTH];
@@ -388,7 +371,7 @@ void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data,
EXPORT_SYMBOL(__ubsan_handle_out_of_bounds);
void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
- unsigned long lhs, unsigned long rhs)
+ void *lhs, void *rhs)
{
unsigned long flags;
struct type_descriptor *rhs_type = data->rhs_type;
@@ -439,7 +422,7 @@ void __ubsan_handle_builtin_unreachable(struct unreachable_data *data)
EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable);
void __ubsan_handle_load_invalid_value(struct invalid_value_data *data,
- unsigned long val)
+ void *val)
{
unsigned long flags;
char val_str[VALUE_LENGTH];
diff --git a/lib/ubsan.h b/lib/ubsan.h
index f4d8d0bd4016..b8fa83864467 100644
--- a/lib/ubsan.h
+++ b/lib/ubsan.h
@@ -57,11 +57,6 @@ struct nonnull_arg_data {
int arg_index;
};
-struct vla_bound_data {
- struct source_location location;
- struct type_descriptor *type;
-};
-
struct out_of_bounds_data {
struct source_location location;
struct type_descriptor *array_type;
diff --git a/lib/xarray.c b/lib/xarray.c
index 81c3171ddde9..6be3acbb861f 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -57,6 +57,11 @@ static inline bool xa_track_free(const struct xarray *xa)
return xa->xa_flags & XA_FLAGS_TRACK_FREE;
}
+static inline bool xa_zero_busy(const struct xarray *xa)
+{
+ return xa->xa_flags & XA_FLAGS_ZERO_BUSY;
+}
+
static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark)
{
if (!(xa->xa_flags & XA_FLAGS_MARK(mark)))
@@ -432,6 +437,8 @@ static void xas_shrink(struct xa_state *xas)
break;
if (!xa_is_node(entry) && node->shift)
break;
+ if (xa_is_zero(entry) && xa_zero_busy(xa))
+ entry = NULL;
xas->xa_node = XAS_BOUNDS;
RCU_INIT_POINTER(xa->xa_head, entry);
@@ -628,6 +635,8 @@ static void *xas_create(struct xa_state *xas, bool allow_root)
if (xas_top(node)) {
entry = xa_head_locked(xa);
xas->xa_node = NULL;
+ if (!entry && xa_zero_busy(xa))
+ entry = XA_ZERO_ENTRY;
shift = xas_expand(xas, entry);
if (shift < 0)
return NULL;
@@ -758,10 +767,12 @@ void *xas_store(struct xa_state *xas, void *entry)
void *first, *next;
bool value = xa_is_value(entry);
- if (entry)
- first = xas_create(xas, !xa_is_node(entry));
- else
+ if (entry) {
+ bool allow_root = !xa_is_node(entry) && !xa_is_zero(entry);
+ first = xas_create(xas, allow_root);
+ } else {
first = xas_load(xas);
+ }
if (xas_invalid(xas))
return first;
@@ -791,7 +802,7 @@ void *xas_store(struct xa_state *xas, void *entry)
* entry is set to NULL.
*/
rcu_assign_pointer(*slot, entry);
- if (xa_is_node(next))
+ if (xa_is_node(next) && (!node || node->shift))
xas_free_nodes(xas, xa_to_node(next));
if (!node)
break;
@@ -1294,13 +1305,12 @@ static void *xas_result(struct xa_state *xas, void *curr)
* @xa: XArray.
* @index: Index into array.
*
- * If the entry at this index is a multi-index entry then all indices will
- * be erased, and the entry will no longer be a multi-index entry.
- * This function expects the xa_lock to be held on entry.
+ * After this function returns, loading from @index will return %NULL.
+ * If the index is part of a multi-index entry, all indices will be erased
+ * and none of the entries will be part of a multi-index entry.
*
- * Context: Any context. Expects xa_lock to be held on entry. May
- * release and reacquire xa_lock if @gfp flags permit.
- * Return: The old entry at this index.
+ * Context: Any context. Expects xa_lock to be held on entry.
+ * Return: The entry which used to be at this index.
*/
void *__xa_erase(struct xarray *xa, unsigned long index)
{
@@ -1314,9 +1324,9 @@ EXPORT_SYMBOL(__xa_erase);
* @xa: XArray.
* @index: Index of entry.
*
- * This function is the equivalent of calling xa_store() with %NULL as
- * the third argument. The XArray does not need to allocate memory, so
- * the user does not need to provide GFP flags.
+ * After this function returns, loading from @index will return %NULL.
+ * If the index is part of a multi-index entry, all indices will be erased
+ * and none of the entries will be part of a multi-index entry.
*
* Context: Any context. Takes and releases the xa_lock.
* Return: The entry which used to be at this index.
@@ -1421,16 +1431,12 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
if (WARN_ON_ONCE(xa_is_advanced(entry)))
return XA_ERROR(-EINVAL);
- if (xa_track_free(xa) && !entry)
- entry = XA_ZERO_ENTRY;
do {
curr = xas_load(&xas);
- if (curr == XA_ZERO_ENTRY)
- curr = NULL;
if (curr == old) {
xas_store(&xas, entry);
- if (xa_track_free(xa))
+ if (xa_track_free(xa) && entry && !curr)
xas_clear_mark(&xas, XA_FREE_MARK);
}
} while (__xas_nomem(&xas, gfp));
@@ -1452,7 +1458,7 @@ EXPORT_SYMBOL(__xa_cmpxchg);
*
* Context: Any context. Expects xa_lock to be held on entry. May
* release and reacquire xa_lock if @gfp flags permit.
- * Return: 0 if the store succeeded. -EEXIST if another entry was present.
+ * Return: 0 if the store succeeded. -EBUSY if another entry was present.
* -ENOMEM if memory could not be allocated.
*/
int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
@@ -1472,7 +1478,7 @@ int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
if (xa_track_free(xa))
xas_clear_mark(&xas, XA_FREE_MARK);
} else {
- xas_set_err(&xas, -EEXIST);
+ xas_set_err(&xas, -EBUSY);
}
} while (__xas_nomem(&xas, gfp));
@@ -1480,42 +1486,6 @@ int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
}
EXPORT_SYMBOL(__xa_insert);
-/**
- * __xa_reserve() - Reserve this index in the XArray.
- * @xa: XArray.
- * @index: Index into array.
- * @gfp: Memory allocation flags.
- *
- * Ensures there is somewhere to store an entry at @index in the array.
- * If there is already something stored at @index, this function does
- * nothing. If there was nothing there, the entry is marked as reserved.
- * Loading from a reserved entry returns a %NULL pointer.
- *
- * If you do not use the entry that you have reserved, call xa_release()
- * or xa_erase() to free any unnecessary memory.
- *
- * Context: Any context. Expects the xa_lock to be held on entry. May
- * release the lock, sleep and reacquire the lock if the @gfp flags permit.
- * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
- */
-int __xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
-{
- XA_STATE(xas, xa, index);
- void *curr;
-
- do {
- curr = xas_load(&xas);
- if (!curr) {
- xas_store(&xas, XA_ZERO_ENTRY);
- if (xa_track_free(xa))
- xas_clear_mark(&xas, XA_FREE_MARK);
- }
- } while (__xas_nomem(&xas, gfp));
-
- return xas_error(&xas);
-}
-EXPORT_SYMBOL(__xa_reserve);
-
#ifdef CONFIG_XARRAY_MULTI
static void xas_set_range(struct xa_state *xas, unsigned long first,
unsigned long last)
@@ -1607,23 +1577,23 @@ EXPORT_SYMBOL(xa_store_range);
* __xa_alloc() - Find somewhere to store this entry in the XArray.
* @xa: XArray.
* @id: Pointer to ID.
- * @max: Maximum ID to allocate (inclusive).
+ * @limit: Range for allocated ID.
* @entry: New entry.
* @gfp: Memory allocation flags.
*
- * Allocates an unused ID in the range specified by @id and @max.
- * Updates the @id pointer with the index, then stores the entry at that
- * index. A concurrent lookup will not see an uninitialised @id.
+ * Finds an empty entry in @xa between @limit.min and @limit.max,
+ * stores the index into the @id pointer, then stores the entry at
+ * that index. A concurrent lookup will not see an uninitialised @id.
*
* Context: Any context. Expects xa_lock to be held on entry. May
* release and reacquire xa_lock if @gfp flags permit.
- * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if
- * there is no more space in the XArray.
+ * Return: 0 on success, -ENOMEM if memory could not be allocated or
+ * -EBUSY if there are no free entries in @limit.
*/
-int __xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, gfp_t gfp)
+int __xa_alloc(struct xarray *xa, u32 *id, void *entry,
+ struct xa_limit limit, gfp_t gfp)
{
XA_STATE(xas, xa, 0);
- int err;
if (WARN_ON_ONCE(xa_is_advanced(entry)))
return -EINVAL;
@@ -1634,22 +1604,71 @@ int __xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, gfp_t gfp)
entry = XA_ZERO_ENTRY;
do {
- xas.xa_index = *id;
- xas_find_marked(&xas, max, XA_FREE_MARK);
+ xas.xa_index = limit.min;
+ xas_find_marked(&xas, limit.max, XA_FREE_MARK);
if (xas.xa_node == XAS_RESTART)
- xas_set_err(&xas, -ENOSPC);
+ xas_set_err(&xas, -EBUSY);
+ else
+ *id = xas.xa_index;
xas_store(&xas, entry);
xas_clear_mark(&xas, XA_FREE_MARK);
} while (__xas_nomem(&xas, gfp));
- err = xas_error(&xas);
- if (!err)
- *id = xas.xa_index;
- return err;
+ return xas_error(&xas);
}
EXPORT_SYMBOL(__xa_alloc);
/**
+ * __xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
+ * @xa: XArray.
+ * @id: Pointer to ID.
+ * @entry: New entry.
+ * @limit: Range of allocated ID.
+ * @next: Pointer to next ID to allocate.
+ * @gfp: Memory allocation flags.
+ *
+ * Finds an empty entry in @xa between @limit.min and @limit.max,
+ * stores the index into the @id pointer, then stores the entry at
+ * that index. A concurrent lookup will not see an uninitialised @id.
+ * The search for an empty entry will start at @next and will wrap
+ * around if necessary.
+ *
+ * Context: Any context. Expects xa_lock to be held on entry. May
+ * release and reacquire xa_lock if @gfp flags permit.
+ * Return: 0 if the allocation succeeded without wrapping. 1 if the
+ * allocation succeeded after wrapping, -ENOMEM if memory could not be
+ * allocated or -EBUSY if there are no free entries in @limit.
+ */
+int __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
+ struct xa_limit limit, u32 *next, gfp_t gfp)
+{
+ u32 min = limit.min;
+ int ret;
+
+ limit.min = max(min, *next);
+ ret = __xa_alloc(xa, id, entry, limit, gfp);
+ if ((xa->xa_flags & XA_FLAGS_ALLOC_WRAPPED) && ret == 0) {
+ xa->xa_flags &= ~XA_FLAGS_ALLOC_WRAPPED;
+ ret = 1;
+ }
+
+ if (ret < 0 && limit.min > min) {
+ limit.min = min;
+ ret = __xa_alloc(xa, id, entry, limit, gfp);
+ if (ret == 0)
+ ret = 1;
+ }
+
+ if (ret >= 0) {
+ *next = *id + 1;
+ if (*next == 0)
+ xa->xa_flags |= XA_FLAGS_ALLOC_WRAPPED;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(__xa_alloc_cyclic);
+
+/**
* __xa_set_mark() - Set this mark on this entry while locked.
* @xa: XArray.
* @index: Index of entry.
@@ -1943,6 +1962,8 @@ void xa_destroy(struct xarray *xa)
entry = xa_head_locked(xa);
RCU_INIT_POINTER(xa->xa_head, NULL);
xas_init_marks(&xas);
+ if (xa_zero_busy(xa))
+ xa_mark_clear(xa, XA_FREE_MARK);
/* lockdep checks we're still holding the lock in xas_free_nodes() */
if (xa_is_node(entry))
xas_free_nodes(&xas, xa_to_node(entry));
diff --git a/mm/cma.c b/mm/cma.c
index f4f3a8a57d86..bb2d333ffcb3 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -327,16 +327,14 @@ int __init cma_declare_contiguous(phys_addr_t base,
* memory in case of failure.
*/
if (base < highmem_start && limit > highmem_start) {
- addr = memblock_alloc_range(size, alignment,
- highmem_start, limit,
- MEMBLOCK_NONE);
+ addr = memblock_phys_alloc_range(size, alignment,
+ highmem_start, limit);
limit = highmem_start;
}
if (!addr) {
- addr = memblock_alloc_range(size, alignment, base,
- limit,
- MEMBLOCK_NONE);
+ addr = memblock_phys_alloc_range(size, alignment, base,
+ limit);
if (!addr) {
ret = -ENOMEM;
goto err;
diff --git a/mm/compaction.c b/mm/compaction.c
index f171a83707ce..3319e0872d01 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -242,6 +242,7 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
bool check_target)
{
struct page *page = pfn_to_online_page(pfn);
+ struct page *block_page;
struct page *end_page;
unsigned long block_pfn;
@@ -267,20 +268,26 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
return false;
+ /* Ensure the start of the pageblock or zone is online and valid */
+ block_pfn = pageblock_start_pfn(pfn);
+ block_page = pfn_to_online_page(max(block_pfn, zone->zone_start_pfn));
+ if (block_page) {
+ page = block_page;
+ pfn = block_pfn;
+ }
+
+ /* Ensure the end of the pageblock or zone is online and valid */
+ block_pfn += pageblock_nr_pages;
+ block_pfn = min(block_pfn, zone_end_pfn(zone) - 1);
+ end_page = pfn_to_online_page(block_pfn);
+ if (!end_page)
+ return false;
+
/*
* Only clear the hint if a sample indicates there is either a
* free page or an LRU page in the block. One or other condition
* is necessary for the block to be a migration source/target.
*/
- block_pfn = pageblock_start_pfn(pfn);
- pfn = max(block_pfn, zone->zone_start_pfn);
- page = pfn_to_page(pfn);
- if (zone != page_zone(page))
- return false;
- pfn = block_pfn + pageblock_nr_pages;
- pfn = min(pfn, zone_end_pfn(zone));
- end_page = pfn_to_page(pfn);
-
do {
if (pfn_valid_within(pfn)) {
if (check_source && PageLRU(page)) {
@@ -309,7 +316,7 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
static void __reset_isolation_suitable(struct zone *zone)
{
unsigned long migrate_pfn = zone->zone_start_pfn;
- unsigned long free_pfn = zone_end_pfn(zone);
+ unsigned long free_pfn = zone_end_pfn(zone) - 1;
unsigned long reset_migrate = free_pfn;
unsigned long reset_free = migrate_pfn;
bool source_set = false;
@@ -1363,7 +1370,7 @@ fast_isolate_freepages(struct compact_control *cc)
count_compact_events(COMPACTISOLATED, nr_isolated);
} else {
/* If isolation fails, abort the search */
- order = -1;
+ order = cc->search_order + 1;
page = NULL;
}
}
diff --git a/mm/debug.c b/mm/debug.c
index 1611cf00a137..eee9c221280c 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -79,7 +79,7 @@ void __dump_page(struct page *page, const char *reason)
pr_warn("ksm ");
else if (mapping) {
pr_warn("%ps ", mapping->a_ops);
- if (mapping->host->i_dentry.first) {
+ if (mapping->host && mapping->host->i_dentry.first) {
struct dentry *dentry;
dentry = container_of(mapping->host->i_dentry.first, struct dentry, d_u.d_alias);
pr_warn("name:\"%pd\" ", dentry);
@@ -137,7 +137,7 @@ void dump_mm(const struct mm_struct *mm)
"mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
"pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
"hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
- "pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n"
+ "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
"start_code %lx end_code %lx start_data %lx end_data %lx\n"
"start_brk %lx brk %lx start_stack %lx\n"
"arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
@@ -168,7 +168,8 @@ void dump_mm(const struct mm_struct *mm)
mm_pgtables_bytes(mm),
mm->map_count,
mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
- mm->pinned_vm, mm->data_vm, mm->exec_vm, mm->stack_vm,
+ (u64)atomic64_read(&mm->pinned_vm),
+ mm->data_vm, mm->exec_vm, mm->stack_vm,
mm->start_code, mm->end_code, mm->start_data, mm->end_data,
mm->start_brk, mm->brk, mm->start_stack,
mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
diff --git a/mm/filemap.c b/mm/filemap.c
index a3b4021c448f..d78f577baef2 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1587,6 +1587,9 @@ EXPORT_SYMBOL(find_lock_entry);
* @gfp_mask and added to the page cache and the VM's LRU
* list. The page is returned locked and with an increased
* refcount.
+ * - FGP_FOR_MMAP: Similar to FGP_CREAT, only we want to allow the caller to do
+ * its own locking dance if the page is already in cache, or unlock the page
+ * before returning if we had to add the page to pagecache.
*
* If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
* if the GFP flags specified for FGP_CREAT are atomic.
@@ -1641,7 +1644,7 @@ no_page:
if (!page)
return NULL;
- if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK)))
+ if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
fgp_flags |= FGP_LOCK;
/* Init accessed so avoid atomic mark_page_accessed later */
@@ -1655,6 +1658,13 @@ no_page:
if (err == -EEXIST)
goto repeat;
}
+
+ /*
+ * add_to_page_cache_lru locks the page, and for mmap we expect
+ * an unlocked page.
+ */
+ if (page && (fgp_flags & FGP_FOR_MMAP))
+ unlock_page(page);
}
return page;
@@ -2379,64 +2389,98 @@ out:
EXPORT_SYMBOL(generic_file_read_iter);
#ifdef CONFIG_MMU
-/**
- * page_cache_read - adds requested page to the page cache if not already there
- * @file: file to read
- * @offset: page index
- * @gfp_mask: memory allocation flags
- *
- * This adds the requested page to the page cache if it isn't already there,
- * and schedules an I/O to read in its contents from disk.
- *
- * Return: %0 on success, negative error code otherwise.
- */
-static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
+#define MMAP_LOTSAMISS (100)
+static struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
+ struct file *fpin)
{
- struct address_space *mapping = file->f_mapping;
- struct page *page;
- int ret;
+ int flags = vmf->flags;
- do {
- page = __page_cache_alloc(gfp_mask);
- if (!page)
- return -ENOMEM;
+ if (fpin)
+ return fpin;
- ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
- if (ret == 0)
- ret = mapping->a_ops->readpage(file, page);
- else if (ret == -EEXIST)
- ret = 0; /* losing race to add is OK */
+ /*
+ * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
+ * anything, so we only pin the file and drop the mmap_sem if only
+ * FAULT_FLAG_ALLOW_RETRY is set.
+ */
+ if ((flags & (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT)) ==
+ FAULT_FLAG_ALLOW_RETRY) {
+ fpin = get_file(vmf->vma->vm_file);
+ up_read(&vmf->vma->vm_mm->mmap_sem);
+ }
+ return fpin;
+}
- put_page(page);
+/*
+ * lock_page_maybe_drop_mmap - lock the page, possibly dropping the mmap_sem
+ * @vmf - the vm_fault for this fault.
+ * @page - the page to lock.
+ * @fpin - the pointer to the file we may pin (or is already pinned).
+ *
+ * This works similar to lock_page_or_retry in that it can drop the mmap_sem.
+ * It differs in that it actually returns the page locked if it returns 1 and 0
+ * if it couldn't lock the page. If we did have to drop the mmap_sem then fpin
+ * will point to the pinned file and needs to be fput()'ed at a later point.
+ */
+static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
+ struct file **fpin)
+{
+ if (trylock_page(page))
+ return 1;
- } while (ret == AOP_TRUNCATED_PAGE);
+ /*
+ * NOTE! This will make us return with VM_FAULT_RETRY, but with
+ * the mmap_sem still held. That's how FAULT_FLAG_RETRY_NOWAIT
+ * is supposed to work. We have way too many special cases..
+ */
+ if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
+ return 0;
- return ret;
+ *fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
+ if (vmf->flags & FAULT_FLAG_KILLABLE) {
+ if (__lock_page_killable(page)) {
+ /*
+ * We didn't have the right flags to drop the mmap_sem,
+ * but all fault_handlers only check for fatal signals
+ * if we return VM_FAULT_RETRY, so we need to drop the
+ * mmap_sem here and return 0 if we don't have a fpin.
+ */
+ if (*fpin == NULL)
+ up_read(&vmf->vma->vm_mm->mmap_sem);
+ return 0;
+ }
+ } else
+ __lock_page(page);
+ return 1;
}
-#define MMAP_LOTSAMISS (100)
/*
- * Synchronous readahead happens when we don't even find
- * a page in the page cache at all.
+ * Synchronous readahead happens when we don't even find a page in the page
+ * cache at all. We don't want to perform IO under the mmap sem, so if we have
+ * to drop the mmap sem we return the file that was pinned in order for us to do
+ * that. If we didn't pin a file then we return NULL. The file that is
+ * returned needs to be fput()'ed when we're done with it.
*/
-static void do_sync_mmap_readahead(struct vm_area_struct *vma,
- struct file_ra_state *ra,
- struct file *file,
- pgoff_t offset)
+static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
{
+ struct file *file = vmf->vma->vm_file;
+ struct file_ra_state *ra = &file->f_ra;
struct address_space *mapping = file->f_mapping;
+ struct file *fpin = NULL;
+ pgoff_t offset = vmf->pgoff;
/* If we don't want any read-ahead, don't bother */
- if (vma->vm_flags & VM_RAND_READ)
- return;
+ if (vmf->vma->vm_flags & VM_RAND_READ)
+ return fpin;
if (!ra->ra_pages)
- return;
+ return fpin;
- if (vma->vm_flags & VM_SEQ_READ) {
+ if (vmf->vma->vm_flags & VM_SEQ_READ) {
+ fpin = maybe_unlock_mmap_for_io(vmf, fpin);
page_cache_sync_readahead(mapping, ra, file, offset,
ra->ra_pages);
- return;
+ return fpin;
}
/* Avoid banging the cache line if not needed */
@@ -2448,37 +2492,44 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
* stop bothering with read-ahead. It will only hurt.
*/
if (ra->mmap_miss > MMAP_LOTSAMISS)
- return;
+ return fpin;
/*
* mmap read-around
*/
+ fpin = maybe_unlock_mmap_for_io(vmf, fpin);
ra->start = max_t(long, 0, offset - ra->ra_pages / 2);
ra->size = ra->ra_pages;
ra->async_size = ra->ra_pages / 4;
ra_submit(ra, mapping, file);
+ return fpin;
}
/*
* Asynchronous readahead happens when we find the page and PG_readahead,
- * so we want to possibly extend the readahead further..
+ * so we want to possibly extend the readahead further. We return the file that
+ * was pinned if we have to drop the mmap_sem in order to do IO.
*/
-static void do_async_mmap_readahead(struct vm_area_struct *vma,
- struct file_ra_state *ra,
- struct file *file,
- struct page *page,
- pgoff_t offset)
+static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
+ struct page *page)
{
+ struct file *file = vmf->vma->vm_file;
+ struct file_ra_state *ra = &file->f_ra;
struct address_space *mapping = file->f_mapping;
+ struct file *fpin = NULL;
+ pgoff_t offset = vmf->pgoff;
/* If we don't want any read-ahead, don't bother */
- if (vma->vm_flags & VM_RAND_READ)
- return;
+ if (vmf->vma->vm_flags & VM_RAND_READ)
+ return fpin;
if (ra->mmap_miss > 0)
ra->mmap_miss--;
- if (PageReadahead(page))
+ if (PageReadahead(page)) {
+ fpin = maybe_unlock_mmap_for_io(vmf, fpin);
page_cache_async_readahead(mapping, ra, file,
page, offset, ra->ra_pages);
+ }
+ return fpin;
}
/**
@@ -2510,6 +2561,7 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
{
int error;
struct file *file = vmf->vma->vm_file;
+ struct file *fpin = NULL;
struct address_space *mapping = file->f_mapping;
struct file_ra_state *ra = &file->f_ra;
struct inode *inode = mapping->host;
@@ -2531,23 +2583,26 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
* We found the page, so try async readahead before
* waiting for the lock.
*/
- do_async_mmap_readahead(vmf->vma, ra, file, page, offset);
+ fpin = do_async_mmap_readahead(vmf, page);
} else if (!page) {
/* No page in the page cache at all */
- do_sync_mmap_readahead(vmf->vma, ra, file, offset);
count_vm_event(PGMAJFAULT);
count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
ret = VM_FAULT_MAJOR;
+ fpin = do_sync_mmap_readahead(vmf);
retry_find:
- page = find_get_page(mapping, offset);
- if (!page)
- goto no_cached_page;
+ page = pagecache_get_page(mapping, offset,
+ FGP_CREAT|FGP_FOR_MMAP,
+ vmf->gfp_mask);
+ if (!page) {
+ if (fpin)
+ goto out_retry;
+ return vmf_error(-ENOMEM);
+ }
}
- if (!lock_page_or_retry(page, vmf->vma->vm_mm, vmf->flags)) {
- put_page(page);
- return ret | VM_FAULT_RETRY;
- }
+ if (!lock_page_maybe_drop_mmap(vmf, page, &fpin))
+ goto out_retry;
/* Did it get truncated? */
if (unlikely(page->mapping != mapping)) {
@@ -2565,6 +2620,16 @@ retry_find:
goto page_not_uptodate;
/*
+ * We've made it this far and we had to drop our mmap_sem, now is the
+ * time to return to the upper layer and have it re-find the vma and
+ * redo the fault.
+ */
+ if (fpin) {
+ unlock_page(page);
+ goto out_retry;
+ }
+
+ /*
* Found the page and have a reference on it.
* We must recheck i_size under page lock.
*/
@@ -2578,28 +2643,6 @@ retry_find:
vmf->page = page;
return ret | VM_FAULT_LOCKED;
-no_cached_page:
- /*
- * We're only likely to ever get here if MADV_RANDOM is in
- * effect.
- */
- error = page_cache_read(file, offset, vmf->gfp_mask);
-
- /*
- * The page we want has now been added to the page cache.
- * In the unlikely event that someone removed it in the
- * meantime, we'll just come back here and read it again.
- */
- if (error >= 0)
- goto retry_find;
-
- /*
- * An error return from page_cache_read can result if the
- * system is low on memory, or a problem occurs while trying
- * to schedule I/O.
- */
- return vmf_error(error);
-
page_not_uptodate:
/*
* Umm, take care of errors if the page isn't up-to-date.
@@ -2608,12 +2651,15 @@ page_not_uptodate:
* and we need to check for errors.
*/
ClearPageError(page);
+ fpin = maybe_unlock_mmap_for_io(vmf, fpin);
error = mapping->a_ops->readpage(file, page);
if (!error) {
wait_on_page_locked(page);
if (!PageUptodate(page))
error = -EIO;
}
+ if (fpin)
+ goto out_retry;
put_page(page);
if (!error || error == AOP_TRUNCATED_PAGE)
@@ -2622,6 +2668,18 @@ page_not_uptodate:
/* Things didn't work out. Return zero to tell the mm layer so. */
shrink_readahead_size_eio(file, ra);
return VM_FAULT_SIGBUS;
+
+out_retry:
+ /*
+ * We dropped the mmap_sem, we need to return to the fault handler to
+ * re-find the vma and come back and find our hopefully still populated
+ * page.
+ */
+ if (page)
+ put_page(page);
+ if (fpin)
+ fput(fpin);
+ return ret | VM_FAULT_RETRY;
}
EXPORT_SYMBOL(filemap_fault);
diff --git a/mm/gup.c b/mm/gup.c
index f84e22685aaa..91819b8ad9cc 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -160,8 +160,12 @@ retry:
goto retry;
}
- if (flags & FOLL_GET)
- get_page(page);
+ if (flags & FOLL_GET) {
+ if (unlikely(!try_get_page(page))) {
+ page = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+ }
if (flags & FOLL_TOUCH) {
if ((flags & FOLL_WRITE) &&
!pte_dirty(pte) && !PageDirty(page))
@@ -298,7 +302,10 @@ retry_locked:
if (pmd_trans_unstable(pmd))
ret = -EBUSY;
} else {
- get_page(page);
+ if (unlikely(!try_get_page(page))) {
+ spin_unlock(ptl);
+ return ERR_PTR(-ENOMEM);
+ }
spin_unlock(ptl);
lock_page(page);
ret = split_huge_page(page);
@@ -500,7 +507,10 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
if (is_device_public_page(*page))
goto unmap;
}
- get_page(*page);
+ if (unlikely(!try_get_page(*page))) {
+ ret = -ENOMEM;
+ goto unmap;
+ }
out:
ret = 0;
unmap:
@@ -1545,6 +1555,20 @@ static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
}
}
+/*
+ * Return the compund head page with ref appropriately incremented,
+ * or NULL if that failed.
+ */
+static inline struct page *try_get_compound_head(struct page *page, int refs)
+{
+ struct page *head = compound_head(page);
+ if (WARN_ON_ONCE(page_ref_count(head) < 0))
+ return NULL;
+ if (unlikely(!page_cache_add_speculative(head, refs)))
+ return NULL;
+ return head;
+}
+
#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
@@ -1579,9 +1603,9 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);
- head = compound_head(page);
- if (!page_cache_get_speculative(head))
+ head = try_get_compound_head(page, 1);
+ if (!head)
goto pte_unmap;
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
@@ -1720,8 +1744,8 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
refs++;
} while (addr += PAGE_SIZE, addr != end);
- head = compound_head(pmd_page(orig));
- if (!page_cache_add_speculative(head, refs)) {
+ head = try_get_compound_head(pmd_page(orig), refs);
+ if (!head) {
*nr -= refs;
return 0;
}
@@ -1758,8 +1782,8 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
refs++;
} while (addr += PAGE_SIZE, addr != end);
- head = compound_head(pud_page(orig));
- if (!page_cache_add_speculative(head, refs)) {
+ head = try_get_compound_head(pud_page(orig), refs);
+ if (!head) {
*nr -= refs;
return 0;
}
@@ -1795,8 +1819,8 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
refs++;
} while (addr += PAGE_SIZE, addr != end);
- head = compound_head(pgd_page(orig));
- if (!page_cache_add_speculative(head, refs)) {
+ head = try_get_compound_head(pgd_page(orig), refs);
+ if (!head) {
*nr -= refs;
return 0;
}
diff --git a/mm/hmm.c b/mm/hmm.c
index a04e4b810610..fe1cd87e49ac 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -990,7 +990,7 @@ static void hmm_devmem_ref_kill(struct percpu_ref *ref)
percpu_ref_kill(ref);
}
-static int hmm_devmem_fault(struct vm_area_struct *vma,
+static vm_fault_t hmm_devmem_fault(struct vm_area_struct *vma,
unsigned long addr,
const struct page *page,
unsigned int flags,
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 404acdcd0455..b6a34b32d8ac 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -755,6 +755,21 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
spinlock_t *ptl;
ptl = pmd_lock(mm, pmd);
+ if (!pmd_none(*pmd)) {
+ if (write) {
+ if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
+ WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
+ goto out_unlock;
+ }
+ entry = pmd_mkyoung(*pmd);
+ entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+ if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
+ update_mmu_cache_pmd(vma, addr, pmd);
+ }
+
+ goto out_unlock;
+ }
+
entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
if (pfn_t_devmap(pfn))
entry = pmd_mkdevmap(entry);
@@ -766,11 +781,16 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
if (pgtable) {
pgtable_trans_huge_deposit(mm, pmd, pgtable);
mm_inc_nr_ptes(mm);
+ pgtable = NULL;
}
set_pmd_at(mm, addr, pmd, entry);
update_mmu_cache_pmd(vma, addr, pmd);
+
+out_unlock:
spin_unlock(ptl);
+ if (pgtable)
+ pte_free(mm, pgtable);
}
vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
@@ -821,6 +841,20 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
spinlock_t *ptl;
ptl = pud_lock(mm, pud);
+ if (!pud_none(*pud)) {
+ if (write) {
+ if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
+ WARN_ON_ONCE(!is_huge_zero_pud(*pud));
+ goto out_unlock;
+ }
+ entry = pud_mkyoung(*pud);
+ entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
+ if (pudp_set_access_flags(vma, addr, pud, entry, 1))
+ update_mmu_cache_pud(vma, addr, pud);
+ }
+ goto out_unlock;
+ }
+
entry = pud_mkhuge(pfn_t_pud(pfn, prot));
if (pfn_t_devmap(pfn))
entry = pud_mkdevmap(entry);
@@ -830,6 +864,8 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
}
set_pud_at(mm, addr, pud, entry);
update_mmu_cache_pud(vma, addr, pud);
+
+out_unlock:
spin_unlock(ptl);
}
@@ -1641,7 +1677,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
struct mm_struct *mm = tlb->mm;
bool ret = false;
- tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE);
+ tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
ptl = pmd_trans_huge_lock(pmd, vma);
if (!ptl)
@@ -1717,7 +1753,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t orig_pmd;
spinlock_t *ptl;
- tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE);
+ tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
ptl = __pmd_trans_huge_lock(pmd, vma);
if (!ptl)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 97b1e0290c66..641cedfc8c0f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3353,7 +3353,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
* This is a hugetlb vma, all the pte entries should point
* to huge page.
*/
- tlb_remove_check_page_size_change(tlb, sz);
+ tlb_change_page_size(tlb, sz);
tlb_start_vma(tlb, vma);
/*
@@ -4299,6 +4299,19 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
page = pte_page(huge_ptep_get(pte));
+
+ /*
+ * Instead of doing 'try_get_page()' below in the same_page
+ * loop, just check the count once here.
+ */
+ if (unlikely(page_count(page) <= 0)) {
+ if (pages) {
+ spin_unlock(ptl);
+ remainder = 0;
+ err = -ENOMEM;
+ break;
+ }
+ }
same_page:
if (pages) {
pages[i] = mem_map_offset(page, pfn_offset);
diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile
index 5d1065efbd47..08b43de2383b 100644
--- a/mm/kasan/Makefile
+++ b/mm/kasan/Makefile
@@ -2,18 +2,21 @@
KASAN_SANITIZE := n
UBSAN_SANITIZE_common.o := n
UBSAN_SANITIZE_generic.o := n
+UBSAN_SANITIZE_generic_report.o := n
UBSAN_SANITIZE_tags.o := n
KCOV_INSTRUMENT := n
-CFLAGS_REMOVE_common.o = -pg
-CFLAGS_REMOVE_generic.o = -pg
-CFLAGS_REMOVE_tags.o = -pg
+CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_generic.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_generic_report.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_tags.o = $(CC_FLAGS_FTRACE)
# Function splitter causes unnecessary splits in __asan_load1/__asan_store1
# see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533
CFLAGS_common.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
CFLAGS_generic.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
+CFLAGS_generic_report.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
CFLAGS_tags.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
obj-$(CONFIG_KASAN) := common.o init.o report.o
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 80bbe62b16cd..36afcf64e016 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -36,6 +36,7 @@
#include <linux/types.h>
#include <linux/vmalloc.h>
#include <linux/bug.h>
+#include <linux/uaccess.h>
#include "kasan.h"
#include "../slab.h"
@@ -48,37 +49,28 @@ static inline int in_irqentry_text(unsigned long ptr)
ptr < (unsigned long)&__softirqentry_text_end);
}
-static inline void filter_irq_stacks(struct stack_trace *trace)
+static inline unsigned int filter_irq_stacks(unsigned long *entries,
+ unsigned int nr_entries)
{
- int i;
+ unsigned int i;
- if (!trace->nr_entries)
- return;
- for (i = 0; i < trace->nr_entries; i++)
- if (in_irqentry_text(trace->entries[i])) {
+ for (i = 0; i < nr_entries; i++) {
+ if (in_irqentry_text(entries[i])) {
/* Include the irqentry function into the stack. */
- trace->nr_entries = i + 1;
- break;
+ return i + 1;
}
+ }
+ return nr_entries;
}
static inline depot_stack_handle_t save_stack(gfp_t flags)
{
unsigned long entries[KASAN_STACK_DEPTH];
- struct stack_trace trace = {
- .nr_entries = 0,
- .entries = entries,
- .max_entries = KASAN_STACK_DEPTH,
- .skip = 0
- };
+ unsigned int nr_entries;
- save_stack_trace(&trace);
- filter_irq_stacks(&trace);
- if (trace.nr_entries != 0 &&
- trace.entries[trace.nr_entries-1] == ULONG_MAX)
- trace.nr_entries--;
-
- return depot_save_stack(&trace, flags);
+ nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
+ nr_entries = filter_irq_stacks(entries, nr_entries);
+ return stack_depot_save(entries, nr_entries, flags);
}
static inline void set_track(struct kasan_track *track, gfp_t flags)
@@ -614,6 +606,15 @@ void kasan_free_shadow(const struct vm_struct *vm)
vfree(kasan_mem_to_shadow(vm->addr));
}
+extern void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip);
+
+void kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip)
+{
+ unsigned long flags = user_access_save();
+ __kasan_report(addr, size, is_write, ip);
+ user_access_restore(flags);
+}
+
#ifdef CONFIG_MEMORY_HOTPLUG
static bool shadow_mapped(unsigned long addr)
{
diff --git a/mm/kasan/init.c b/mm/kasan/init.c
index fcaa1ca03175..ce45c491ebcd 100644
--- a/mm/kasan/init.c
+++ b/mm/kasan/init.c
@@ -83,8 +83,14 @@ static inline bool kasan_early_shadow_page_entry(pte_t pte)
static __init void *early_alloc(size_t size, int node)
{
- return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
- MEMBLOCK_ALLOC_ACCESSIBLE, node);
+ void *ptr = memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
+ MEMBLOCK_ALLOC_ACCESSIBLE, node);
+
+ if (!ptr)
+ panic("%s: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n",
+ __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS));
+
+ return ptr;
}
static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr,
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 3e0c11f7d7a1..3ce956efa0cb 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -163,7 +163,10 @@ static inline u8 random_tag(void)
#endif
#ifndef arch_kasan_set_tag
-#define arch_kasan_set_tag(addr, tag) ((void *)(addr))
+static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
+{
+ return addr;
+}
#endif
#ifndef arch_kasan_reset_tag
#define arch_kasan_reset_tag(addr) ((void *)(addr))
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index ca9418fe9232..03a443579386 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -100,10 +100,11 @@ static void print_track(struct kasan_track *track, const char *prefix)
{
pr_err("%s by task %u:\n", prefix, track->pid);
if (track->stack) {
- struct stack_trace trace;
+ unsigned long *entries;
+ unsigned int nr_entries;
- depot_fetch_stack(track->stack, &trace);
- print_stack_trace(&trace, 0);
+ nr_entries = stack_depot_fetch(track->stack, &entries);
+ stack_trace_print(entries, nr_entries, 0);
} else {
pr_err("(stack is not available)\n");
}
@@ -281,8 +282,7 @@ void kasan_report_invalid_free(void *object, unsigned long ip)
end_report(&flags);
}
-void kasan_report(unsigned long addr, size_t size,
- bool is_write, unsigned long ip)
+void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip)
{
struct kasan_access_info info;
void *tagged_addr;
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 707fa5579f66..e57bf810f798 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -410,11 +410,6 @@ static void print_unreferenced(struct seq_file *seq,
*/
static void dump_object_info(struct kmemleak_object *object)
{
- struct stack_trace trace;
-
- trace.nr_entries = object->trace_len;
- trace.entries = object->trace;
-
pr_notice("Object 0x%08lx (size %zu):\n",
object->pointer, object->size);
pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
@@ -424,7 +419,7 @@ static void dump_object_info(struct kmemleak_object *object)
pr_notice(" flags = 0x%x\n", object->flags);
pr_notice(" checksum = %u\n", object->checksum);
pr_notice(" backtrace:\n");
- print_stack_trace(&trace, 4);
+ stack_trace_print(object->trace, object->trace_len, 4);
}
/*
@@ -553,15 +548,7 @@ static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int ali
*/
static int __save_stack_trace(unsigned long *trace)
{
- struct stack_trace stack_trace;
-
- stack_trace.max_entries = MAX_TRACE;
- stack_trace.nr_entries = 0;
- stack_trace.entries = trace;
- stack_trace.skip = 2;
- save_stack_trace(&stack_trace);
-
- return stack_trace.nr_entries;
+ return stack_trace_save(trace, MAX_TRACE, 2);
}
/*
@@ -1401,6 +1388,7 @@ static void scan_block(void *_start, void *_end,
/*
* Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
*/
+#ifdef CONFIG_SMP
static void scan_large_block(void *start, void *end)
{
void *next;
@@ -1412,6 +1400,7 @@ static void scan_large_block(void *start, void *end)
cond_resched();
}
}
+#endif
/*
* Scan a memory block corresponding to a kmemleak_object. A condition is
@@ -1529,11 +1518,6 @@ static void kmemleak_scan(void)
}
rcu_read_unlock();
- /* data/bss scanning */
- scan_large_block(_sdata, _edata);
- scan_large_block(__bss_start, __bss_stop);
- scan_large_block(__start_ro_after_init, __end_ro_after_init);
-
#ifdef CONFIG_SMP
/* per-cpu sections scanning */
for_each_possible_cpu(i)
@@ -2024,13 +2008,8 @@ early_param("kmemleak", kmemleak_boot_config);
static void __init print_log_trace(struct early_log *log)
{
- struct stack_trace trace;
-
- trace.nr_entries = log->trace_len;
- trace.entries = log->trace;
-
pr_notice("Early log backtrace:\n");
- print_stack_trace(&trace, 2);
+ stack_trace_print(log->trace, log->trace_len, 2);
}
/*
@@ -2071,6 +2050,17 @@ void __init kmemleak_init(void)
}
local_irq_restore(flags);
+ /* register the data/bss sections */
+ create_object((unsigned long)_sdata, _edata - _sdata,
+ KMEMLEAK_GREY, GFP_ATOMIC);
+ create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
+ KMEMLEAK_GREY, GFP_ATOMIC);
+ /* only register .data..ro_after_init if not within .data */
+ if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata)
+ create_object((unsigned long)__start_ro_after_init,
+ __end_ro_after_init - __start_ro_after_init,
+ KMEMLEAK_GREY, GFP_ATOMIC);
+
/*
* This is the point where tracking allocations is safe. Automatic
* scanning is started during the late initcall. Add the early logged
diff --git a/mm/madvise.c b/mm/madvise.c
index 21a7881a2db4..bb3a4554d5d5 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -328,7 +328,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
if (pmd_trans_unstable(pmd))
return 0;
- tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
+ tlb_change_page_size(tlb, PAGE_SIZE);
orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
flush_tlb_batched_pending(mm);
arch_enter_lazy_mmu_mode();
diff --git a/mm/memblock.c b/mm/memblock.c
index 9db5e51babd2..a48f520c2d01 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -76,8 +76,19 @@
* :c:func:`memblock_set_node`. The :c:func:`memblock_add_node`
* performs such an assignment directly.
*
- * Once memblock is setup the memory can be allocated using either
- * memblock or bootmem APIs.
+ * Once memblock is setup the memory can be allocated using one of the
+ * API variants:
+ *
+ * * :c:func:`memblock_phys_alloc*` - these functions return the
+ * **physical** address of the allocated memory
+ * * :c:func:`memblock_alloc*` - these functions return the **virtual**
+ * address of the allocated memory.
+ *
+ * Note, that both API variants use implict assumptions about allowed
+ * memory ranges and the fallback methods. Consult the documentation
+ * of :c:func:`memblock_alloc_internal` and
+ * :c:func:`memblock_alloc_range_nid` functions for more elaboarte
+ * description.
*
* As the system boot progresses, the architecture specific
* :c:func:`mem_init` function frees all the memory to the buddy page
@@ -132,7 +143,7 @@ static int memblock_can_resize __initdata_memblock;
static int memblock_memory_in_slab __initdata_memblock = 0;
static int memblock_reserved_in_slab __initdata_memblock = 0;
-enum memblock_flags __init_memblock choose_memblock_flags(void)
+static enum memblock_flags __init_memblock choose_memblock_flags(void)
{
return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
}
@@ -261,7 +272,7 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
* Return:
* Found address on success, 0 on failure.
*/
-phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
+static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
phys_addr_t align, phys_addr_t start,
phys_addr_t end, int nid,
enum memblock_flags flags)
@@ -435,17 +446,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
else
in_slab = &memblock_reserved_in_slab;
- /* Try to find some space for it.
- *
- * WARNING: We assume that either slab_is_available() and we use it or
- * we use MEMBLOCK for allocations. That means that this is unsafe to
- * use when bootmem is currently active (unless bootmem itself is
- * implemented on top of MEMBLOCK which isn't the case yet)
- *
- * This should however not be an issue for now, as we currently only
- * call into MEMBLOCK while it's still active, or much later when slab
- * is active for memory hotplug operations
- */
+ /* Try to find some space for it */
if (use_slab) {
new_array = kmalloc(new_size, GFP_KERNEL);
addr = new_array ? __pa(new_array) : 0;
@@ -858,11 +859,14 @@ static int __init_memblock memblock_setclr_flag(phys_addr_t base,
if (ret)
return ret;
- for (i = start_rgn; i < end_rgn; i++)
+ for (i = start_rgn; i < end_rgn; i++) {
+ struct memblock_region *r = &type->regions[i];
+
if (set)
- memblock_set_region_flags(&type->regions[i], flag);
+ r->flags |= flag;
else
- memblock_clear_region_flags(&type->regions[i], flag);
+ r->flags &= ~flag;
+ }
memblock_merge_regions(type);
return 0;
@@ -962,8 +966,31 @@ void __init_memblock __next_reserved_mem_region(u64 *idx,
*idx = ULLONG_MAX;
}
+static bool should_skip_region(struct memblock_region *m, int nid, int flags)
+{
+ int m_nid = memblock_get_region_node(m);
+
+ /* only memory regions are associated with nodes, check it */
+ if (nid != NUMA_NO_NODE && nid != m_nid)
+ return true;
+
+ /* skip hotpluggable memory regions if needed */
+ if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
+ return true;
+
+ /* if we want mirror memory skip non-mirror memory regions */
+ if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
+ return true;
+
+ /* skip nomap memory unless we were asked for it explicitly */
+ if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
+ return true;
+
+ return false;
+}
+
/**
- * __next__mem_range - next function for for_each_free_mem_range() etc.
+ * __next_mem_range - next function for for_each_free_mem_range() etc.
* @idx: pointer to u64 loop variable
* @nid: node selector, %NUMA_NO_NODE for all nodes
* @flags: pick from blocks based on memory attributes
@@ -1009,20 +1036,7 @@ void __init_memblock __next_mem_range(u64 *idx, int nid,
phys_addr_t m_end = m->base + m->size;
int m_nid = memblock_get_region_node(m);
- /* only memory regions are associated with nodes, check it */
- if (nid != NUMA_NO_NODE && nid != m_nid)
- continue;
-
- /* skip hotpluggable memory regions if needed */
- if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
- continue;
-
- /* if we want mirror memory skip non-mirror memory regions */
- if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
- continue;
-
- /* skip nomap memory unless we were asked for it explicitly */
- if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
+ if (should_skip_region(m, nid, flags))
continue;
if (!type_b) {
@@ -1126,20 +1140,7 @@ void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
phys_addr_t m_end = m->base + m->size;
int m_nid = memblock_get_region_node(m);
- /* only memory regions are associated with nodes, check it */
- if (nid != NUMA_NO_NODE && nid != m_nid)
- continue;
-
- /* skip hotpluggable memory regions if needed */
- if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
- continue;
-
- /* if we want mirror memory skip non-mirror memory regions */
- if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
- continue;
-
- /* skip nomap memory unless we were asked for it explicitly */
- if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
+ if (should_skip_region(m, nid, flags))
continue;
if (!type_b) {
@@ -1255,94 +1256,123 @@ int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
}
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
+/**
+ * memblock_alloc_range_nid - allocate boot memory block
+ * @size: size of memory block to be allocated in bytes
+ * @align: alignment of the region and block's size
+ * @start: the lower bound of the memory region to allocate (phys address)
+ * @end: the upper bound of the memory region to allocate (phys address)
+ * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
+ *
+ * The allocation is performed from memory region limited by
+ * memblock.current_limit if @max_addr == %MEMBLOCK_ALLOC_ACCESSIBLE.
+ *
+ * If the specified node can not hold the requested memory the
+ * allocation falls back to any node in the system
+ *
+ * For systems with memory mirroring, the allocation is attempted first
+ * from the regions with mirroring enabled and then retried from any
+ * memory region.
+ *
+ * In addition, function sets the min_count to 0 using kmemleak_alloc_phys for
+ * allocated boot memory block, so that it is never reported as leaks.
+ *
+ * Return:
+ * Physical address of allocated memory block on success, %0 on failure.
+ */
static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
phys_addr_t align, phys_addr_t start,
- phys_addr_t end, int nid,
- enum memblock_flags flags)
+ phys_addr_t end, int nid)
{
+ enum memblock_flags flags = choose_memblock_flags();
phys_addr_t found;
+ if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
+ nid = NUMA_NO_NODE;
+
if (!align) {
/* Can't use WARNs this early in boot on powerpc */
dump_stack();
align = SMP_CACHE_BYTES;
}
+ if (end > memblock.current_limit)
+ end = memblock.current_limit;
+
+again:
found = memblock_find_in_range_node(size, align, start, end, nid,
flags);
- if (found && !memblock_reserve(found, size)) {
- /*
- * The min_count is set to 0 so that memblock allocations are
- * never reported as leaks.
- */
- kmemleak_alloc_phys(found, size, 0, 0);
- return found;
- }
- return 0;
-}
-
-phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
- phys_addr_t start, phys_addr_t end,
- enum memblock_flags flags)
-{
- return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
- flags);
-}
-
-phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
- phys_addr_t align, phys_addr_t max_addr,
- int nid, enum memblock_flags flags)
-{
- return memblock_alloc_range_nid(size, align, 0, max_addr, nid, flags);
-}
-
-phys_addr_t __init memblock_phys_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
-{
- enum memblock_flags flags = choose_memblock_flags();
- phys_addr_t ret;
+ if (found && !memblock_reserve(found, size))
+ goto done;
-again:
- ret = memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE,
- nid, flags);
+ if (nid != NUMA_NO_NODE) {
+ found = memblock_find_in_range_node(size, align, start,
+ end, NUMA_NO_NODE,
+ flags);
+ if (found && !memblock_reserve(found, size))
+ goto done;
+ }
- if (!ret && (flags & MEMBLOCK_MIRROR)) {
+ if (flags & MEMBLOCK_MIRROR) {
flags &= ~MEMBLOCK_MIRROR;
+ pr_warn("Could not allocate %pap bytes of mirrored memory\n",
+ &size);
goto again;
}
- return ret;
-}
-
-phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
-{
- return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE,
- MEMBLOCK_NONE);
-}
-phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
-{
- phys_addr_t alloc;
-
- alloc = __memblock_alloc_base(size, align, max_addr);
+ return 0;
- if (alloc == 0)
- panic("ERROR: Failed to allocate %pa bytes below %pa.\n",
- &size, &max_addr);
+done:
+ /* Skip kmemleak for kasan_init() due to high volume. */
+ if (end != MEMBLOCK_ALLOC_KASAN)
+ /*
+ * The min_count is set to 0 so that memblock allocated
+ * blocks are never reported as leaks. This is because many
+ * of these blocks are only referred via the physical
+ * address which is not looked up by kmemleak.
+ */
+ kmemleak_alloc_phys(found, size, 0, 0);
- return alloc;
+ return found;
}
-phys_addr_t __init memblock_phys_alloc(phys_addr_t size, phys_addr_t align)
+/**
+ * memblock_phys_alloc_range - allocate a memory block inside specified range
+ * @size: size of memory block to be allocated in bytes
+ * @align: alignment of the region and block's size
+ * @start: the lower bound of the memory region to allocate (physical address)
+ * @end: the upper bound of the memory region to allocate (physical address)
+ *
+ * Allocate @size bytes in the between @start and @end.
+ *
+ * Return: physical address of the allocated memory block on success,
+ * %0 on failure.
+ */
+phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
+ phys_addr_t align,
+ phys_addr_t start,
+ phys_addr_t end)
{
- return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
+ return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE);
}
+/**
+ * memblock_phys_alloc_try_nid - allocate a memory block from specified MUMA node
+ * @size: size of memory block to be allocated in bytes
+ * @align: alignment of the region and block's size
+ * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
+ *
+ * Allocates memory block from the specified NUMA node. If the node
+ * has no available memory, attempts to allocated from any node in the
+ * system.
+ *
+ * Return: physical address of the allocated memory block on success,
+ * %0 on failure.
+ */
phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
{
- phys_addr_t res = memblock_phys_alloc_nid(size, align, nid);
-
- if (res)
- return res;
- return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
+ return memblock_alloc_range_nid(size, align, 0,
+ MEMBLOCK_ALLOC_ACCESSIBLE, nid);
}
/**
@@ -1353,19 +1383,13 @@ phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t ali
* @max_addr: the upper bound of the memory region to allocate (phys address)
* @nid: nid of the free area to find, %NUMA_NO_NODE for any node
*
- * The @min_addr limit is dropped if it can not be satisfied and the allocation
- * will fall back to memory below @min_addr. Also, allocation may fall back
- * to any node in the system if the specified node can not
- * hold the requested memory.
+ * Allocates memory block using memblock_alloc_range_nid() and
+ * converts the returned physical address to virtual.
*
- * The allocation is performed from memory region limited by
- * memblock.current_limit if @max_addr == %MEMBLOCK_ALLOC_ACCESSIBLE.
- *
- * The phys address of allocated boot memory block is converted to virtual and
- * allocated memory is reset to 0.
- *
- * In addition, function sets the min_count to 0 using kmemleak_alloc for
- * allocated boot memory block, so that it is never reported as leaks.
+ * The @min_addr limit is dropped if it can not be satisfied and the allocation
+ * will fall back to memory below @min_addr. Other constraints, such
+ * as node and mirrored memory will be handled again in
+ * memblock_alloc_range_nid().
*
* Return:
* Virtual address of allocated memory block on success, NULL on failure.
@@ -1376,11 +1400,6 @@ static void * __init memblock_alloc_internal(
int nid)
{
phys_addr_t alloc;
- void *ptr;
- enum memblock_flags flags = choose_memblock_flags();
-
- if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
- nid = NUMA_NO_NODE;
/*
* Detect any accidental use of these APIs after slab is ready, as at
@@ -1390,54 +1409,16 @@ static void * __init memblock_alloc_internal(
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, nid);
- if (!align) {
- dump_stack();
- align = SMP_CACHE_BYTES;
- }
+ alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid);
- if (max_addr > memblock.current_limit)
- max_addr = memblock.current_limit;
-again:
- alloc = memblock_find_in_range_node(size, align, min_addr, max_addr,
- nid, flags);
- if (alloc && !memblock_reserve(alloc, size))
- goto done;
+ /* retry allocation without lower limit */
+ if (!alloc && min_addr)
+ alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid);
- if (nid != NUMA_NO_NODE) {
- alloc = memblock_find_in_range_node(size, align, min_addr,
- max_addr, NUMA_NO_NODE,
- flags);
- if (alloc && !memblock_reserve(alloc, size))
- goto done;
- }
-
- if (min_addr) {
- min_addr = 0;
- goto again;
- }
+ if (!alloc)
+ return NULL;
- if (flags & MEMBLOCK_MIRROR) {
- flags &= ~MEMBLOCK_MIRROR;
- pr_warn("Could not allocate %pap bytes of mirrored memory\n",
- &size);
- goto again;
- }
-
- return NULL;
-done:
- ptr = phys_to_virt(alloc);
-
- /* Skip kmemleak for kasan_init() due to high volume. */
- if (max_addr != MEMBLOCK_ALLOC_KASAN)
- /*
- * The min_count is set to 0 so that bootmem allocated
- * blocks are never reported as leaks. This is because many
- * of these blocks are only referred via the physical
- * address which is not looked up by kmemleak.
- */
- kmemleak_alloc(ptr, size, 0, 0);
-
- return ptr;
+ return phys_to_virt(alloc);
}
/**
@@ -1479,7 +1460,7 @@ void * __init memblock_alloc_try_nid_raw(
}
/**
- * memblock_alloc_try_nid_nopanic - allocate boot memory block
+ * memblock_alloc_try_nid - allocate boot memory block
* @size: size of memory block to be allocated in bytes
* @align: alignment of the region and block's size
* @min_addr: the lower bound of the memory region from where the allocation
@@ -1495,42 +1476,6 @@ void * __init memblock_alloc_try_nid_raw(
* Return:
* Virtual address of allocated memory block on success, NULL on failure.
*/
-void * __init memblock_alloc_try_nid_nopanic(
- phys_addr_t size, phys_addr_t align,
- phys_addr_t min_addr, phys_addr_t max_addr,
- int nid)
-{
- void *ptr;
-
- memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
- __func__, (u64)size, (u64)align, nid, &min_addr,
- &max_addr, (void *)_RET_IP_);
-
- ptr = memblock_alloc_internal(size, align,
- min_addr, max_addr, nid);
- if (ptr)
- memset(ptr, 0, size);
- return ptr;
-}
-
-/**
- * memblock_alloc_try_nid - allocate boot memory block with panicking
- * @size: size of memory block to be allocated in bytes
- * @align: alignment of the region and block's size
- * @min_addr: the lower bound of the memory region from where the allocation
- * is preferred (phys address)
- * @max_addr: the upper bound of the memory region from where the allocation
- * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
- * allocate only from memory limited by memblock.current_limit value
- * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
- *
- * Public panicking version of memblock_alloc_try_nid_nopanic()
- * which provides debug information (including caller info), if enabled,
- * and panics if the request can not be satisfied.
- *
- * Return:
- * Virtual address of allocated memory block on success, NULL on failure.
- */
void * __init memblock_alloc_try_nid(
phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr,
@@ -1543,24 +1488,20 @@ void * __init memblock_alloc_try_nid(
&max_addr, (void *)_RET_IP_);
ptr = memblock_alloc_internal(size, align,
min_addr, max_addr, nid);
- if (ptr) {
+ if (ptr)
memset(ptr, 0, size);
- return ptr;
- }
- panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa\n",
- __func__, (u64)size, (u64)align, nid, &min_addr, &max_addr);
- return NULL;
+ return ptr;
}
/**
- * __memblock_free_late - free bootmem block pages directly to buddy allocator
+ * __memblock_free_late - free pages directly to buddy allocator
* @base: phys starting address of the boot memory block
* @size: size of the boot memory block in bytes
*
- * This is only useful when the bootmem allocator has already been torn
+ * This is only useful when the memblock allocator has already been torn
* down, but we are still initializing the system. Pages are released directly
- * to the buddy allocator, no bootmem metadata is updated because it is gone.
+ * to the buddy allocator.
*/
void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
{
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 532e0e2a4817..81a0d3914ec9 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3882,6 +3882,22 @@ struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
return &memcg->cgwb_domain;
}
+/*
+ * idx can be of type enum memcg_stat_item or node_stat_item.
+ * Keep in sync with memcg_exact_page().
+ */
+static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx)
+{
+ long x = atomic_long_read(&memcg->stat[idx]);
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ x += per_cpu_ptr(memcg->stat_cpu, cpu)->count[idx];
+ if (x < 0)
+ x = 0;
+ return x;
+}
+
/**
* mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
* @wb: bdi_writeback in question
@@ -3907,10 +3923,10 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
struct mem_cgroup *parent;
- *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
+ *pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY);
/* this should eventually include NR_UNSTABLE_NFS */
- *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
+ *pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK);
*pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
(1 << LRU_ACTIVE_FILE));
*pheadroom = PAGE_COUNTER_MAX;
diff --git a/mm/memory.c b/mm/memory.c
index 3541a15067f2..f7d962d7de19 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -356,7 +356,7 @@ void free_pgd_range(struct mmu_gather *tlb,
* We add page table cache pages with PAGE_SIZE,
* (see pte_free_tlb()), flush the tlb if we need
*/
- tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
+ tlb_change_page_size(tlb, PAGE_SIZE);
pgd = pgd_offset(tlb->mm, addr);
do {
next = pgd_addr_end(addr, end);
@@ -1046,7 +1046,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
pte_t *pte;
swp_entry_t entry;
- tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
+ tlb_change_page_size(tlb, PAGE_SIZE);
again:
init_rss_vec(rss);
start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
@@ -1155,7 +1155,7 @@ again:
*/
if (force_flush) {
force_flush = 0;
- tlb_flush_mmu_free(tlb);
+ tlb_flush_mmu(tlb);
if (addr != end)
goto again;
}
@@ -1549,10 +1549,12 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte)));
goto out_unlock;
}
- entry = *pte;
- goto out_mkwrite;
- } else
- goto out_unlock;
+ entry = pte_mkyoung(*pte);
+ entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+ if (ptep_set_access_flags(vma, addr, pte, entry, 1))
+ update_mmu_cache(vma, addr, pte);
+ }
+ goto out_unlock;
}
/* Ok, finally just insert the thing.. */
@@ -1561,7 +1563,6 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
else
entry = pte_mkspecial(pfn_t_pte(pfn, prot));
-out_mkwrite:
if (mkwrite) {
entry = pte_mkyoung(entry);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 6b05576fb4ec..b236069ff0d8 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -96,27 +96,29 @@ void mem_hotplug_done(void)
cpus_read_unlock();
}
+u64 max_mem_size = U64_MAX;
+
/* add this memory to iomem resource */
static struct resource *register_memory_resource(u64 start, u64 size)
{
- struct resource *res, *conflict;
- res = kzalloc(sizeof(struct resource), GFP_KERNEL);
- if (!res)
- return ERR_PTR(-ENOMEM);
-
- res->name = "System RAM";
- res->start = start;
- res->end = start + size - 1;
- res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
- conflict = request_resource_conflict(&iomem_resource, res);
- if (conflict) {
- if (conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) {
- pr_debug("Device unaddressable memory block "
- "memory hotplug at %#010llx !\n",
- (unsigned long long)start);
- }
- pr_debug("System RAM resource %pR cannot be added\n", res);
- kfree(res);
+ struct resource *res;
+ unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+ char *resource_name = "System RAM";
+
+ if (start + size > max_mem_size)
+ return ERR_PTR(-E2BIG);
+
+ /*
+ * Request ownership of the new memory range. This might be
+ * a child of an existing resource that was present but
+ * not marked as busy.
+ */
+ res = __request_region(&iomem_resource, start, size,
+ resource_name, flags);
+
+ if (!res) {
+ pr_debug("Unable to reserve System RAM region: %016llx->%016llx\n",
+ start, start + size);
return ERR_PTR(-EEXIST);
}
return res;
@@ -872,6 +874,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
*/
mem = find_memory_block(__pfn_to_section(pfn));
nid = mem->nid;
+ put_device(&mem->dev);
/* associate pfn range with the zone */
zone = move_pfn_range(online_type, nid, pfn, nr_pages);
@@ -1574,7 +1577,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
{
unsigned long pfn, nr_pages;
long offlined_pages;
- int ret, node;
+ int ret, node, nr_isolate_pageblock;
unsigned long flags;
unsigned long valid_start, valid_end;
struct zone *zone;
@@ -1600,10 +1603,11 @@ static int __ref __offline_pages(unsigned long start_pfn,
ret = start_isolate_page_range(start_pfn, end_pfn,
MIGRATE_MOVABLE,
SKIP_HWPOISON | REPORT_FAILURE);
- if (ret) {
+ if (ret < 0) {
reason = "failure to isolate range";
goto failed_removal;
}
+ nr_isolate_pageblock = ret;
arg.start_pfn = start_pfn;
arg.nr_pages = nr_pages;
@@ -1655,8 +1659,16 @@ static int __ref __offline_pages(unsigned long start_pfn,
/* Ok, all of our target is isolated.
We cannot do rollback at this point. */
offline_isolated_pages(start_pfn, end_pfn);
- /* reset pagetype flags and makes migrate type to be MOVABLE */
- undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
+
+ /*
+ * Onlining will reset pagetype flags and makes migrate type
+ * MOVABLE, so just need to decrease the number of isolated
+ * pageblocks zone counter here.
+ */
+ spin_lock_irqsave(&zone->lock, flags);
+ zone->nr_isolate_pageblock -= nr_isolate_pageblock;
+ spin_unlock_irqrestore(&zone->lock, flags);
+
/* removal success */
adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages);
zone->present_pages -= offlined_pages;
@@ -1688,12 +1700,12 @@ static int __ref __offline_pages(unsigned long start_pfn,
failed_removal_isolated:
undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
+ memory_notify(MEM_CANCEL_OFFLINE, &arg);
failed_removal:
pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n",
(unsigned long long) start_pfn << PAGE_SHIFT,
((unsigned long long) end_pfn << PAGE_SHIFT) - 1,
reason);
- memory_notify(MEM_CANCEL_OFFLINE, &arg);
/* pushback to free area */
mem_hotplug_done();
return ret;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index af171ccb56a2..2219e747df49 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -428,6 +428,13 @@ static inline bool queue_pages_required(struct page *page,
return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
}
+/*
+ * queue_pages_pmd() has three possible return values:
+ * 1 - pages are placed on the right node or queued successfully.
+ * 0 - THP was split.
+ * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing
+ * page was already on a node that does not follow the policy.
+ */
static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
@@ -437,7 +444,7 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
unsigned long flags;
if (unlikely(is_pmd_migration_entry(*pmd))) {
- ret = 1;
+ ret = -EIO;
goto unlock;
}
page = pmd_page(*pmd);
@@ -454,8 +461,15 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
ret = 1;
flags = qp->flags;
/* go to thp migration */
- if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
+ if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
+ if (!vma_migratable(walk->vma)) {
+ ret = -EIO;
+ goto unlock;
+ }
+
migrate_page_add(page, qp->pagelist, flags);
+ } else
+ ret = -EIO;
unlock:
spin_unlock(ptl);
out:
@@ -480,8 +494,10 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
- if (ret)
+ if (ret > 0)
return 0;
+ else if (ret < 0)
+ return ret;
}
if (pmd_trans_unstable(pmd))
@@ -502,11 +518,16 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
continue;
if (!queue_pages_required(page, qp))
continue;
- migrate_page_add(page, qp->pagelist, flags);
+ if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
+ if (!vma_migratable(vma))
+ break;
+ migrate_page_add(page, qp->pagelist, flags);
+ } else
+ break;
}
pte_unmap_unlock(pte - 1, ptl);
cond_resched();
- return 0;
+ return addr != end ? -EIO : 0;
}
static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
@@ -576,7 +597,12 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
unsigned long endvma = vma->vm_end;
unsigned long flags = qp->flags;
- if (!vma_migratable(vma))
+ /*
+ * Need check MPOL_MF_STRICT to return -EIO if possible
+ * regardless of vma_migratable
+ */
+ if (!vma_migratable(vma) &&
+ !(flags & MPOL_MF_STRICT))
return 1;
if (endvma > end)
@@ -603,7 +629,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
}
/* queue pages from current vma */
- if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
+ if (flags & MPOL_MF_VALID)
return 0;
return 1;
}
diff --git a/mm/migrate.c b/mm/migrate.c
index ac6f4939bb59..663a5449367a 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -248,10 +248,8 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
pte = swp_entry_to_pte(entry);
} else if (is_device_public_page(new)) {
pte = pte_mkdevmap(pte);
- flush_dcache_page(new);
}
- } else
- flush_dcache_page(new);
+ }
#ifdef CONFIG_HUGETLB_PAGE
if (PageHuge(new)) {
@@ -995,6 +993,13 @@ static int move_to_new_page(struct page *newpage, struct page *page,
*/
if (!PageMappingFlags(page))
page->mapping = NULL;
+
+ if (unlikely(is_zone_device_page(newpage))) {
+ if (is_device_public_page(newpage))
+ flush_dcache_page(newpage);
+ } else
+ flush_dcache_page(newpage);
+
}
out:
return rc;
diff --git a/mm/mmap.c b/mm/mmap.c
index 41eb48d9b527..bd7b9f293b39 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -45,6 +45,7 @@
#include <linux/moduleparam.h>
#include <linux/pkeys.h>
#include <linux/oom.h>
+#include <linux/sched/mm.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
@@ -2525,7 +2526,8 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
vma = find_vma_prev(mm, addr, &prev);
if (vma && (vma->vm_start <= addr))
return vma;
- if (!prev || expand_stack(prev, addr))
+ /* don't alter vm_end if the coredump is running */
+ if (!prev || !mmget_still_valid(mm) || expand_stack(prev, addr))
return NULL;
if (prev->vm_flags & VM_LOCKED)
populate_vma_page_range(prev, addr, prev->vm_end, NULL);
@@ -2551,6 +2553,9 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
return vma;
if (!(vma->vm_flags & VM_GROWSDOWN))
return NULL;
+ /* don't alter vm_start if the coredump is running */
+ if (!mmget_still_valid(mm))
+ return NULL;
start = vma->vm_start;
if (expand_stack(vma, addr))
return NULL;
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index f2f03c655807..99740e1dd273 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -11,7 +11,7 @@
#include <asm/pgalloc.h>
#include <asm/tlb.h>
-#ifdef HAVE_GENERIC_MMU_GATHER
+#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
static bool tlb_next_batch(struct mmu_gather *tlb)
{
@@ -41,35 +41,10 @@ static bool tlb_next_batch(struct mmu_gather *tlb)
return true;
}
-void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- tlb->mm = mm;
-
- /* Is it from 0 to ~0? */
- tlb->fullmm = !(start | (end+1));
- tlb->need_flush_all = 0;
- tlb->local.next = NULL;
- tlb->local.nr = 0;
- tlb->local.max = ARRAY_SIZE(tlb->__pages);
- tlb->active = &tlb->local;
- tlb->batch_count = 0;
-
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
- tlb->batch = NULL;
-#endif
- tlb->page_size = 0;
-
- __tlb_reset_range(tlb);
-}
-
-void tlb_flush_mmu_free(struct mmu_gather *tlb)
+static void tlb_batch_pages_flush(struct mmu_gather *tlb)
{
struct mmu_gather_batch *batch;
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
- tlb_table_flush(tlb);
-#endif
for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
free_pages_and_swap_cache(batch->pages, batch->nr);
batch->nr = 0;
@@ -77,31 +52,10 @@ void tlb_flush_mmu_free(struct mmu_gather *tlb)
tlb->active = &tlb->local;
}
-void tlb_flush_mmu(struct mmu_gather *tlb)
-{
- tlb_flush_mmu_tlbonly(tlb);
- tlb_flush_mmu_free(tlb);
-}
-
-/* tlb_finish_mmu
- * Called at the end of the shootdown operation to free up any resources
- * that were required.
- */
-void arch_tlb_finish_mmu(struct mmu_gather *tlb,
- unsigned long start, unsigned long end, bool force)
+static void tlb_batch_list_free(struct mmu_gather *tlb)
{
struct mmu_gather_batch *batch, *next;
- if (force) {
- __tlb_reset_range(tlb);
- __tlb_adjust_range(tlb, start, end - start);
- }
-
- tlb_flush_mmu(tlb);
-
- /* keep the page table cache within bounds */
- check_pgt_cache();
-
for (batch = tlb->local.next; batch; batch = next) {
next = batch->next;
free_pages((unsigned long)batch, 0);
@@ -109,19 +63,15 @@ void arch_tlb_finish_mmu(struct mmu_gather *tlb,
tlb->local.next = NULL;
}
-/* __tlb_remove_page
- * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
- * handling the additional races in SMP caused by other CPUs caching valid
- * mappings in their TLBs. Returns the number of free page slots left.
- * When out of page slots we must call tlb_flush_mmu().
- *returns true if the caller should flush.
- */
bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
{
struct mmu_gather_batch *batch;
VM_BUG_ON(!tlb->end);
+
+#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
VM_WARN_ON(tlb->page_size != page_size);
+#endif
batch = tlb->active;
/*
@@ -139,7 +89,7 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
return false;
}
-#endif /* HAVE_GENERIC_MMU_GATHER */
+#endif /* HAVE_MMU_GATHER_NO_GATHER */
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
@@ -152,7 +102,7 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
*/
static inline void tlb_table_invalidate(struct mmu_gather *tlb)
{
-#ifdef CONFIG_HAVE_RCU_TABLE_INVALIDATE
+#ifndef CONFIG_HAVE_RCU_TABLE_NO_INVALIDATE
/*
* Invalidate page-table caches used by hardware walkers. Then we still
* need to RCU-sched wait while freeing the pages because software
@@ -193,7 +143,7 @@ static void tlb_remove_table_rcu(struct rcu_head *head)
free_page((unsigned long)batch);
}
-void tlb_table_flush(struct mmu_gather *tlb)
+static void tlb_table_flush(struct mmu_gather *tlb)
{
struct mmu_table_batch **batch = &tlb->batch;
@@ -225,6 +175,22 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
+static void tlb_flush_mmu_free(struct mmu_gather *tlb)
+{
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+ tlb_table_flush(tlb);
+#endif
+#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
+ tlb_batch_pages_flush(tlb);
+#endif
+}
+
+void tlb_flush_mmu(struct mmu_gather *tlb)
+{
+ tlb_flush_mmu_tlbonly(tlb);
+ tlb_flush_mmu_free(tlb);
+}
+
/**
* tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
* @tlb: the mmu_gather structure to initialize
@@ -240,10 +206,40 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
unsigned long start, unsigned long end)
{
- arch_tlb_gather_mmu(tlb, mm, start, end);
+ tlb->mm = mm;
+
+ /* Is it from 0 to ~0? */
+ tlb->fullmm = !(start | (end+1));
+
+#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
+ tlb->need_flush_all = 0;
+ tlb->local.next = NULL;
+ tlb->local.nr = 0;
+ tlb->local.max = ARRAY_SIZE(tlb->__pages);
+ tlb->active = &tlb->local;
+ tlb->batch_count = 0;
+#endif
+
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+ tlb->batch = NULL;
+#endif
+#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
+ tlb->page_size = 0;
+#endif
+
+ __tlb_reset_range(tlb);
inc_tlb_flush_pending(tlb->mm);
}
+/**
+ * tlb_finish_mmu - finish an mmu_gather structure
+ * @tlb: the mmu_gather structure to finish
+ * @start: start of the region that will be removed from the page-table
+ * @end: end of the region that will be removed from the page-table
+ *
+ * Called at the end of the shootdown operation to free up any resources that
+ * were required.
+ */
void tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end)
{
@@ -254,8 +250,17 @@ void tlb_finish_mmu(struct mmu_gather *tlb,
* the TLB by observing pte_none|!pte_dirty, for example so flush TLB
* forcefully if we detect parallel PTE batching threads.
*/
- bool force = mm_tlb_flush_nested(tlb->mm);
+ if (mm_tlb_flush_nested(tlb->mm)) {
+ __tlb_reset_range(tlb);
+ __tlb_adjust_range(tlb, start, end - start);
+ }
- arch_tlb_finish_mmu(tlb, start, end, force);
+ tlb_flush_mmu(tlb);
+
+ /* keep the page table cache within bounds */
+ check_pgt_cache();
+#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
+ tlb_batch_list_free(tlb);
+#endif
dec_tlb_flush_pending(tlb->mm);
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3eb01dedfb50..59661106da16 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -266,7 +266,20 @@ compound_page_dtor * const compound_page_dtors[] = {
int min_free_kbytes = 1024;
int user_min_free_kbytes = -1;
+#ifdef CONFIG_DISCONTIGMEM
+/*
+ * DiscontigMem defines memory ranges as separate pg_data_t even if the ranges
+ * are not on separate NUMA nodes. Functionally this works but with
+ * watermark_boost_factor, it can reclaim prematurely as the ranges can be
+ * quite small. By default, do not boost watermarks on discontigmem as in
+ * many cases very high-order allocations like THP are likely to be
+ * unsupported and the premature reclaim offsets the advantage of long-term
+ * fragmentation avoidance.
+ */
+int watermark_boost_factor __read_mostly;
+#else
int watermark_boost_factor __read_mostly = 15000;
+#endif
int watermark_scale_factor = 10;
static unsigned long nr_kernel_pages __initdata;
@@ -1131,7 +1144,9 @@ static __always_inline bool free_pages_prepare(struct page *page,
}
arch_free_page(page, order);
kernel_poison_pages(page, 1 << order, 0);
- kernel_map_pages(page, 1 << order, 0);
+ if (debug_pagealloc_enabled())
+ kernel_map_pages(page, 1 << order, 0);
+
kasan_free_nondeferred_pages(page, order);
return true;
@@ -2001,7 +2016,8 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
set_page_refcounted(page);
arch_alloc_page(page, order);
- kernel_map_pages(page, 1 << order, 1);
+ if (debug_pagealloc_enabled())
+ kernel_map_pages(page, 1 << order, 1);
kasan_alloc_pages(page, order);
kernel_poison_pages(page, 1 << order, 1);
set_page_owner(page, order, gfp_flags);
@@ -3419,8 +3435,11 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
alloc_flags |= ALLOC_KSWAPD;
#ifdef CONFIG_ZONE_DMA32
+ if (!zone)
+ return alloc_flags;
+
if (zone_idx(zone) != ZONE_NORMAL)
- goto out;
+ return alloc_flags;
/*
* If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
@@ -3429,9 +3448,9 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
*/
BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
if (nr_online_nodes > 1 && !populated_zone(--zone))
- goto out;
+ return alloc_flags;
-out:
+ alloc_flags |= ALLOC_NOFRAGMENT;
#endif /* CONFIG_ZONE_DMA32 */
return alloc_flags;
}
@@ -3773,11 +3792,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
memalloc_noreclaim_restore(noreclaim_flag);
psi_memstall_leave(&pflags);
- if (*compact_result <= COMPACT_INACTIVE) {
- WARN_ON_ONCE(page);
- return NULL;
- }
-
/*
* At least in one zone compaction wasn't deferred or skipped, so let's
* count a compaction stall
@@ -6445,8 +6459,8 @@ static void __ref setup_usemap(struct pglist_data *pgdat,
zone->pageblock_flags = NULL;
if (usemapsize) {
zone->pageblock_flags =
- memblock_alloc_node_nopanic(usemapsize,
- pgdat->node_id);
+ memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
+ pgdat->node_id);
if (!zone->pageblock_flags)
panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
usemapsize, zone->name, pgdat->node_id);
@@ -6679,7 +6693,8 @@ static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
end = pgdat_end_pfn(pgdat);
end = ALIGN(end, MAX_ORDER_NR_PAGES);
size = (end - start) * sizeof(struct page);
- map = memblock_alloc_node_nopanic(size, pgdat->node_id);
+ map = memblock_alloc_node(size, SMP_CACHE_BYTES,
+ pgdat->node_id);
if (!map)
panic("Failed to allocate %ld bytes for node %d memory map\n",
size, pgdat->node_id);
@@ -7959,8 +7974,7 @@ void *__init alloc_large_system_hash(const char *tablename,
size = bucketsize << log2qty;
if (flags & HASH_EARLY) {
if (flags & HASH_ZERO)
- table = memblock_alloc_nopanic(size,
- SMP_CACHE_BYTES);
+ table = memblock_alloc(size, SMP_CACHE_BYTES);
else
table = memblock_alloc_raw(size,
SMP_CACHE_BYTES);
@@ -8005,7 +8019,10 @@ void *__init alloc_large_system_hash(const char *tablename,
bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
int migratetype, int flags)
{
- unsigned long pfn, iter, found;
+ unsigned long found;
+ unsigned long iter = 0;
+ unsigned long pfn = page_to_pfn(page);
+ const char *reason = "unmovable page";
/*
* TODO we could make this much more efficient by not checking every
@@ -8015,17 +8032,20 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
* can still lead to having bootmem allocations in zone_movable.
*/
- /*
- * CMA allocations (alloc_contig_range) really need to mark isolate
- * CMA pageblocks even when they are not movable in fact so consider
- * them movable here.
- */
- if (is_migrate_cma(migratetype) &&
- is_migrate_cma(get_pageblock_migratetype(page)))
- return false;
+ if (is_migrate_cma_page(page)) {
+ /*
+ * CMA allocations (alloc_contig_range) really need to mark
+ * isolate CMA pageblocks even when they are not movable in fact
+ * so consider them movable here.
+ */
+ if (is_migrate_cma(migratetype))
+ return false;
- pfn = page_to_pfn(page);
- for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
+ reason = "CMA page";
+ goto unmovable;
+ }
+
+ for (found = 0; iter < pageblock_nr_pages; iter++) {
unsigned long check = pfn + iter;
if (!pfn_valid_within(check))
@@ -8105,7 +8125,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
unmovable:
WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE);
if (flags & REPORT_FAILURE)
- dump_page(pfn_to_page(pfn+iter), "unmovable page");
+ dump_page(pfn_to_page(pfn + iter), reason);
return true;
}
@@ -8233,7 +8253,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
ret = start_isolate_page_range(pfn_max_align_down(start),
pfn_max_align_up(end), migratetype, 0);
- if (ret)
+ if (ret < 0)
return ret;
/*
diff --git a/mm/page_ext.c b/mm/page_ext.c
index ab4244920e0f..d8f1aca4ad43 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -161,7 +161,7 @@ static int __init alloc_node_page_ext(int nid)
table_size = get_entry_size() * nr_pages;
- base = memblock_alloc_try_nid_nopanic(
+ base = memblock_alloc_try_nid(
table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
if (!base)
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index ce323e56b34d..019280712e1b 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -59,7 +59,8 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
* FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
* We just check MOVABLE pages.
*/
- if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype, flags))
+ if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype,
+ isol_flags))
ret = 0;
/*
@@ -160,27 +161,36 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
return NULL;
}
-/*
- * start_isolate_page_range() -- make page-allocation-type of range of pages
- * to be MIGRATE_ISOLATE.
- * @start_pfn: The lower PFN of the range to be isolated.
- * @end_pfn: The upper PFN of the range to be isolated.
- * @migratetype: migrate type to set in error recovery.
+/**
+ * start_isolate_page_range() - make page-allocation-type of range of pages to
+ * be MIGRATE_ISOLATE.
+ * @start_pfn: The lower PFN of the range to be isolated.
+ * @end_pfn: The upper PFN of the range to be isolated.
+ * start_pfn/end_pfn must be aligned to pageblock_order.
+ * @migratetype: Migrate type to set in error recovery.
+ * @flags: The following flags are allowed (they can be combined in
+ * a bit mask)
+ * SKIP_HWPOISON - ignore hwpoison pages
+ * REPORT_FAILURE - report details about the failure to
+ * isolate the range
*
* Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
* the range will never be allocated. Any free pages and pages freed in the
- * future will not be allocated again.
- *
- * start_pfn/end_pfn must be aligned to pageblock_order.
- * Return 0 on success and -EBUSY if any part of range cannot be isolated.
+ * future will not be allocated again. If specified range includes migrate types
+ * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all
+ * pages in the range finally, the caller have to free all pages in the range.
+ * test_page_isolated() can be used for test it.
*
* There is no high level synchronization mechanism that prevents two threads
- * from trying to isolate overlapping ranges. If this happens, one thread
+ * from trying to isolate overlapping ranges. If this happens, one thread
* will notice pageblocks in the overlapping range already set to isolate.
* This happens in set_migratetype_isolate, and set_migratetype_isolate
- * returns an error. We then clean up by restoring the migration type on
- * pageblocks we may have modified and return -EBUSY to caller. This
+ * returns an error. We then clean up by restoring the migration type on
+ * pageblocks we may have modified and return -EBUSY to caller. This
* prevents two threads from simultaneously working on overlapping ranges.
+ *
+ * Return: the number of isolated pageblocks on success and -EBUSY if any part
+ * of range cannot be isolated.
*/
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
unsigned migratetype, int flags)
@@ -188,6 +198,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
unsigned long pfn;
unsigned long undo_pfn;
struct page *page;
+ int nr_isolate_pageblock = 0;
BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
@@ -196,13 +207,15 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
pfn < end_pfn;
pfn += pageblock_nr_pages) {
page = __first_valid_page(pfn, pageblock_nr_pages);
- if (page &&
- set_migratetype_isolate(page, migratetype, flags)) {
- undo_pfn = pfn;
- goto undo;
+ if (page) {
+ if (set_migratetype_isolate(page, migratetype, flags)) {
+ undo_pfn = pfn;
+ goto undo;
+ }
+ nr_isolate_pageblock++;
}
}
- return 0;
+ return nr_isolate_pageblock;
undo:
for (pfn = start_pfn;
pfn < undo_pfn;
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 925b6f44a444..addcbb2ae4e4 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -58,15 +58,10 @@ static bool need_page_owner(void)
static __always_inline depot_stack_handle_t create_dummy_stack(void)
{
unsigned long entries[4];
- struct stack_trace dummy;
+ unsigned int nr_entries;
- dummy.nr_entries = 0;
- dummy.max_entries = ARRAY_SIZE(entries);
- dummy.entries = &entries[0];
- dummy.skip = 0;
-
- save_stack_trace(&dummy);
- return depot_save_stack(&dummy, GFP_KERNEL);
+ nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
+ return stack_depot_save(entries, nr_entries, GFP_KERNEL);
}
static noinline void register_dummy_stack(void)
@@ -120,49 +115,39 @@ void __reset_page_owner(struct page *page, unsigned int order)
}
}
-static inline bool check_recursive_alloc(struct stack_trace *trace,
- unsigned long ip)
+static inline bool check_recursive_alloc(unsigned long *entries,
+ unsigned int nr_entries,
+ unsigned long ip)
{
- int i;
-
- if (!trace->nr_entries)
- return false;
+ unsigned int i;
- for (i = 0; i < trace->nr_entries; i++) {
- if (trace->entries[i] == ip)
+ for (i = 0; i < nr_entries; i++) {
+ if (entries[i] == ip)
return true;
}
-
return false;
}
static noinline depot_stack_handle_t save_stack(gfp_t flags)
{
unsigned long entries[PAGE_OWNER_STACK_DEPTH];
- struct stack_trace trace = {
- .nr_entries = 0,
- .entries = entries,
- .max_entries = PAGE_OWNER_STACK_DEPTH,
- .skip = 2
- };
depot_stack_handle_t handle;
+ unsigned int nr_entries;
- save_stack_trace(&trace);
- if (trace.nr_entries != 0 &&
- trace.entries[trace.nr_entries-1] == ULONG_MAX)
- trace.nr_entries--;
+ nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
/*
- * We need to check recursion here because our request to stackdepot
- * could trigger memory allocation to save new entry. New memory
- * allocation would reach here and call depot_save_stack() again
- * if we don't catch it. There is still not enough memory in stackdepot
- * so it would try to allocate memory again and loop forever.
+ * We need to check recursion here because our request to
+ * stackdepot could trigger memory allocation to save new
+ * entry. New memory allocation would reach here and call
+ * stack_depot_save_entries() again if we don't catch it. There is
+ * still not enough memory in stackdepot so it would try to
+ * allocate memory again and loop forever.
*/
- if (check_recursive_alloc(&trace, _RET_IP_))
+ if (check_recursive_alloc(entries, nr_entries, _RET_IP_))
return dummy_handle;
- handle = depot_save_stack(&trace, flags);
+ handle = stack_depot_save(entries, nr_entries, flags);
if (!handle)
handle = failure_handle;
@@ -340,16 +325,10 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
struct page *page, struct page_owner *page_owner,
depot_stack_handle_t handle)
{
- int ret;
- int pageblock_mt, page_mt;
+ int ret, pageblock_mt, page_mt;
+ unsigned long *entries;
+ unsigned int nr_entries;
char *kbuf;
- unsigned long entries[PAGE_OWNER_STACK_DEPTH];
- struct stack_trace trace = {
- .nr_entries = 0,
- .entries = entries,
- .max_entries = PAGE_OWNER_STACK_DEPTH,
- .skip = 0
- };
count = min_t(size_t, count, PAGE_SIZE);
kbuf = kmalloc(count, GFP_KERNEL);
@@ -378,8 +357,8 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
if (ret >= count)
goto err;
- depot_fetch_stack(handle, &trace);
- ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0);
+ nr_entries = stack_depot_fetch(handle, &entries);
+ ret += stack_trace_snprint(kbuf + ret, count - ret, entries, nr_entries, 0);
if (ret >= count)
goto err;
@@ -410,14 +389,9 @@ void __dump_page_owner(struct page *page)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_owner *page_owner;
- unsigned long entries[PAGE_OWNER_STACK_DEPTH];
- struct stack_trace trace = {
- .nr_entries = 0,
- .entries = entries,
- .max_entries = PAGE_OWNER_STACK_DEPTH,
- .skip = 0
- };
depot_stack_handle_t handle;
+ unsigned long *entries;
+ unsigned int nr_entries;
gfp_t gfp_mask;
int mt;
@@ -441,10 +415,10 @@ void __dump_page_owner(struct page *page)
return;
}
- depot_fetch_stack(handle, &trace);
+ nr_entries = stack_depot_fetch(handle, &entries);
pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
- print_stack_trace(&trace, 0);
+ stack_trace_print(entries, nr_entries, 0);
if (page_owner->last_migrate_reason != -1)
pr_alert("page has been migrated, last migrate reason: %s\n",
diff --git a/mm/percpu.c b/mm/percpu.c
index c5c750781628..68dd2e7e73b5 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1086,6 +1086,7 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
struct pcpu_chunk *chunk;
unsigned long aligned_addr, lcm_align;
int start_offset, offset_bits, region_size, region_bits;
+ size_t alloc_size;
/* region calculations */
aligned_addr = tmp_addr & PAGE_MASK;
@@ -1101,9 +1102,12 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
region_size = ALIGN(start_offset + map_size, lcm_align);
/* allocate chunk */
- chunk = memblock_alloc(sizeof(struct pcpu_chunk) +
- BITS_TO_LONGS(region_size >> PAGE_SHIFT),
- SMP_CACHE_BYTES);
+ alloc_size = sizeof(struct pcpu_chunk) +
+ BITS_TO_LONGS(region_size >> PAGE_SHIFT);
+ chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
+ if (!chunk)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ alloc_size);
INIT_LIST_HEAD(&chunk->list);
@@ -1114,12 +1118,25 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
chunk->nr_pages = region_size >> PAGE_SHIFT;
region_bits = pcpu_chunk_map_bits(chunk);
- chunk->alloc_map = memblock_alloc(BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]),
- SMP_CACHE_BYTES);
- chunk->bound_map = memblock_alloc(BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]),
- SMP_CACHE_BYTES);
- chunk->md_blocks = memblock_alloc(pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]),
- SMP_CACHE_BYTES);
+ alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]);
+ chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
+ if (!chunk->alloc_map)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ alloc_size);
+
+ alloc_size =
+ BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]);
+ chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
+ if (!chunk->bound_map)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ alloc_size);
+
+ alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]);
+ chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
+ if (!chunk->md_blocks)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ alloc_size);
+
pcpu_init_md_blocks(chunk);
/* manage populated page bitmap */
@@ -1888,7 +1905,7 @@ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
__alignof__(ai->groups[0].cpu_map[0]));
ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
- ptr = memblock_alloc_nopanic(PFN_ALIGN(ai_size), PAGE_SIZE);
+ ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE);
if (!ptr)
return NULL;
ai = ptr;
@@ -2044,6 +2061,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
int group, unit, i;
int map_size;
unsigned long tmp_addr;
+ size_t alloc_size;
#define PCPU_SETUP_BUG_ON(cond) do { \
if (unlikely(cond)) { \
@@ -2075,14 +2093,29 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
/* process group information and build config tables accordingly */
- group_offsets = memblock_alloc(ai->nr_groups * sizeof(group_offsets[0]),
- SMP_CACHE_BYTES);
- group_sizes = memblock_alloc(ai->nr_groups * sizeof(group_sizes[0]),
- SMP_CACHE_BYTES);
- unit_map = memblock_alloc(nr_cpu_ids * sizeof(unit_map[0]),
- SMP_CACHE_BYTES);
- unit_off = memblock_alloc(nr_cpu_ids * sizeof(unit_off[0]),
- SMP_CACHE_BYTES);
+ alloc_size = ai->nr_groups * sizeof(group_offsets[0]);
+ group_offsets = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
+ if (!group_offsets)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ alloc_size);
+
+ alloc_size = ai->nr_groups * sizeof(group_sizes[0]);
+ group_sizes = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
+ if (!group_sizes)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ alloc_size);
+
+ alloc_size = nr_cpu_ids * sizeof(unit_map[0]);
+ unit_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
+ if (!unit_map)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ alloc_size);
+
+ alloc_size = nr_cpu_ids * sizeof(unit_off[0]);
+ unit_off = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
+ if (!unit_off)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ alloc_size);
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
unit_map[cpu] = UINT_MAX;
@@ -2148,6 +2181,9 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
pcpu_slot = memblock_alloc(pcpu_nr_slots * sizeof(pcpu_slot[0]),
SMP_CACHE_BYTES);
+ if (!pcpu_slot)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ pcpu_nr_slots * sizeof(pcpu_slot[0]));
for (i = 0; i < pcpu_nr_slots; i++)
INIT_LIST_HEAD(&pcpu_slot[i]);
@@ -2460,7 +2496,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
- areas = memblock_alloc_nopanic(areas_size, SMP_CACHE_BYTES);
+ areas = memblock_alloc(areas_size, SMP_CACHE_BYTES);
if (!areas) {
rc = -ENOMEM;
goto out_free;
@@ -2531,8 +2567,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
ai->groups[group].base_offset = areas[group] - base;
}
- pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
- PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
+ pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
+ PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
ai->dyn_size, ai->unit_size);
rc = pcpu_setup_first_chunk(ai, base);
@@ -2602,6 +2638,9 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
sizeof(pages[0]));
pages = memblock_alloc(pages_size, SMP_CACHE_BYTES);
+ if (!pages)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ pages_size);
/* allocate pages */
j = 0;
@@ -2653,8 +2692,8 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
}
/* we're ready, commit */
- pr_info("%d %s pages/cpu @%p s%zu r%zu d%zu\n",
- unit_pages, psize_str, vm.addr, ai->static_size,
+ pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
+ unit_pages, psize_str, ai->static_size,
ai->reserved_size, ai->dyn_size);
rc = pcpu_setup_first_chunk(ai, vm.addr);
@@ -2690,8 +2729,7 @@ EXPORT_SYMBOL(__per_cpu_offset);
static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
size_t align)
{
- return memblock_alloc_from_nopanic(
- size, align, __pa(MAX_DMA_ADDRESS));
+ return memblock_alloc_from(size, align, __pa(MAX_DMA_ADDRESS));
}
static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
@@ -2739,9 +2777,7 @@ void __init setup_per_cpu_areas(void)
void *fc;
ai = pcpu_alloc_alloc_info(1, 1);
- fc = memblock_alloc_from_nopanic(unit_size,
- PAGE_SIZE,
- __pa(MAX_DMA_ADDRESS));
+ fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
if (!ai || !fc)
panic("Failed to allocate memory for percpu areas.");
/* kmemleak tracks the percpu allocations separately */
diff --git a/mm/shmem.c b/mm/shmem.c
index b3db3779a30a..2275a0ff7c30 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1081,9 +1081,14 @@ static void shmem_evict_inode(struct inode *inode)
}
spin_unlock(&sbinfo->shrinklist_lock);
}
- if (!list_empty(&info->swaplist)) {
+ while (!list_empty(&info->swaplist)) {
+ /* Wait while shmem_unuse() is scanning this inode... */
+ wait_var_event(&info->stop_eviction,
+ !atomic_read(&info->stop_eviction));
mutex_lock(&shmem_swaplist_mutex);
- list_del_init(&info->swaplist);
+ /* ...but beware of the race if we peeked too early */
+ if (!atomic_read(&info->stop_eviction))
+ list_del_init(&info->swaplist);
mutex_unlock(&shmem_swaplist_mutex);
}
}
@@ -1099,10 +1104,11 @@ extern struct swap_info_struct *swap_info[];
static int shmem_find_swap_entries(struct address_space *mapping,
pgoff_t start, unsigned int nr_entries,
struct page **entries, pgoff_t *indices,
- bool frontswap)
+ unsigned int type, bool frontswap)
{
XA_STATE(xas, &mapping->i_pages, start);
struct page *page;
+ swp_entry_t entry;
unsigned int ret = 0;
if (!nr_entries)
@@ -1116,13 +1122,12 @@ static int shmem_find_swap_entries(struct address_space *mapping,
if (!xa_is_value(page))
continue;
- if (frontswap) {
- swp_entry_t entry = radix_to_swp_entry(page);
-
- if (!frontswap_test(swap_info[swp_type(entry)],
- swp_offset(entry)))
- continue;
- }
+ entry = radix_to_swp_entry(page);
+ if (swp_type(entry) != type)
+ continue;
+ if (frontswap &&
+ !frontswap_test(swap_info[type], swp_offset(entry)))
+ continue;
indices[ret] = xas.xa_index;
entries[ret] = page;
@@ -1194,7 +1199,7 @@ static int shmem_unuse_inode(struct inode *inode, unsigned int type,
pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries,
pvec.pages, indices,
- frontswap);
+ type, frontswap);
if (pvec.nr == 0) {
ret = 0;
break;
@@ -1227,36 +1232,27 @@ int shmem_unuse(unsigned int type, bool frontswap,
unsigned long *fs_pages_to_unuse)
{
struct shmem_inode_info *info, *next;
- struct inode *inode;
- struct inode *prev_inode = NULL;
int error = 0;
if (list_empty(&shmem_swaplist))
return 0;
mutex_lock(&shmem_swaplist_mutex);
-
- /*
- * The extra refcount on the inode is necessary to safely dereference
- * p->next after re-acquiring the lock. New shmem inodes with swap
- * get added to the end of the list and we will scan them all.
- */
list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
if (!info->swapped) {
list_del_init(&info->swaplist);
continue;
}
-
- inode = igrab(&info->vfs_inode);
- if (!inode)
- continue;
-
+ /*
+ * Drop the swaplist mutex while searching the inode for swap;
+ * but before doing so, make sure shmem_evict_inode() will not
+ * remove placeholder inode from swaplist, nor let it be freed
+ * (igrab() would protect from unlink, but not from unmount).
+ */
+ atomic_inc(&info->stop_eviction);
mutex_unlock(&shmem_swaplist_mutex);
- if (prev_inode)
- iput(prev_inode);
- prev_inode = inode;
- error = shmem_unuse_inode(inode, type, frontswap,
+ error = shmem_unuse_inode(&info->vfs_inode, type, frontswap,
fs_pages_to_unuse);
cond_resched();
@@ -1264,14 +1260,13 @@ int shmem_unuse(unsigned int type, bool frontswap,
next = list_next_entry(info, swaplist);
if (!info->swapped)
list_del_init(&info->swaplist);
+ if (atomic_dec_and_test(&info->stop_eviction))
+ wake_up_var(&info->stop_eviction);
if (error)
break;
}
mutex_unlock(&shmem_swaplist_mutex);
- if (prev_inode)
- iput(prev_inode);
-
return error;
}
@@ -2238,6 +2233,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
info = SHMEM_I(inode);
memset(info, 0, (char *)inode - (char *)info);
spin_lock_init(&info->lock);
+ atomic_set(&info->stop_eviction, 0);
info->seals = F_SEAL_SEAL;
info->flags = flags & VM_NORESERVE;
INIT_LIST_HEAD(&info->shrinklist);
diff --git a/mm/slab.c b/mm/slab.c
index 28652e4218e0..284ab737faee 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1467,53 +1467,17 @@ static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
}
#ifdef CONFIG_DEBUG_PAGEALLOC
-static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
- unsigned long caller)
-{
- int size = cachep->object_size;
-
- addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
-
- if (size < 5 * sizeof(unsigned long))
- return;
-
- *addr++ = 0x12345678;
- *addr++ = caller;
- *addr++ = smp_processor_id();
- size -= 3 * sizeof(unsigned long);
- {
- unsigned long *sptr = &caller;
- unsigned long svalue;
-
- while (!kstack_end(sptr)) {
- svalue = *sptr++;
- if (kernel_text_address(svalue)) {
- *addr++ = svalue;
- size -= sizeof(unsigned long);
- if (size <= sizeof(unsigned long))
- break;
- }
- }
-
- }
- *addr++ = 0x87654321;
-}
-
-static void slab_kernel_map(struct kmem_cache *cachep, void *objp,
- int map, unsigned long caller)
+static void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map)
{
if (!is_debug_pagealloc_cache(cachep))
return;
- if (caller)
- store_stackinfo(cachep, objp, caller);
-
kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
}
#else
static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp,
- int map, unsigned long caller) {}
+ int map) {}
#endif
@@ -1661,7 +1625,7 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep,
if (cachep->flags & SLAB_POISON) {
check_poison_obj(cachep, objp);
- slab_kernel_map(cachep, objp, 1, 0);
+ slab_kernel_map(cachep, objp, 1);
}
if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
@@ -2115,6 +2079,8 @@ done:
cachep->allocflags = __GFP_COMP;
if (flags & SLAB_CACHE_DMA)
cachep->allocflags |= GFP_DMA;
+ if (flags & SLAB_CACHE_DMA32)
+ cachep->allocflags |= GFP_DMA32;
if (flags & SLAB_RECLAIM_ACCOUNT)
cachep->allocflags |= __GFP_RECLAIMABLE;
cachep->size = size;
@@ -2372,7 +2338,6 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
/* Slab management obj is off-slab. */
freelist = kmem_cache_alloc_node(cachep->freelist_cache,
local_flags, nodeid);
- freelist = kasan_reset_tag(freelist);
if (!freelist)
return NULL;
} else {
@@ -2432,7 +2397,7 @@ static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
/* need to poison the objs? */
if (cachep->flags & SLAB_POISON) {
poison_obj(cachep, objp, POISON_FREE);
- slab_kernel_map(cachep, objp, 0, 0);
+ slab_kernel_map(cachep, objp, 0);
}
}
#endif
@@ -2811,7 +2776,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
if (cachep->flags & SLAB_POISON) {
poison_obj(cachep, objp, POISON_FREE);
- slab_kernel_map(cachep, objp, 0, caller);
+ slab_kernel_map(cachep, objp, 0);
}
return objp;
}
@@ -3075,7 +3040,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
return objp;
if (cachep->flags & SLAB_POISON) {
check_poison_obj(cachep, objp);
- slab_kernel_map(cachep, objp, 1, 0);
+ slab_kernel_map(cachep, objp, 1);
poison_obj(cachep, objp, POISON_INUSE);
}
if (cachep->flags & SLAB_STORE_USER)
@@ -4306,7 +4271,8 @@ static void show_symbol(struct seq_file *m, unsigned long address)
static int leaks_show(struct seq_file *m, void *p)
{
- struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
+ struct kmem_cache *cachep = list_entry(p, struct kmem_cache,
+ root_caches_node);
struct page *page;
struct kmem_cache_node *n;
const char *name;
diff --git a/mm/slab.h b/mm/slab.h
index e5e6658eeacc..43ac818b8592 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -127,7 +127,8 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
/* Legal flag mask for kmem_cache_create(), for various configurations */
-#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
+#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
+ SLAB_CACHE_DMA32 | SLAB_PANIC | \
SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
#if defined(CONFIG_DEBUG_SLAB)
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 03eeb8b7b4b1..58251ba63e4a 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -53,7 +53,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
SLAB_FAILSLAB | SLAB_KASAN)
#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
- SLAB_ACCOUNT)
+ SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
/*
* Merge control. If this is set then no merging of slab caches will occur.
diff --git a/mm/slub.c b/mm/slub.c
index 1b08fbcb7e61..6b28cd2b5a58 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -552,31 +552,22 @@ static void set_track(struct kmem_cache *s, void *object,
if (addr) {
#ifdef CONFIG_STACKTRACE
- struct stack_trace trace;
- int i;
+ unsigned int nr_entries;
- trace.nr_entries = 0;
- trace.max_entries = TRACK_ADDRS_COUNT;
- trace.entries = p->addrs;
- trace.skip = 3;
metadata_access_enable();
- save_stack_trace(&trace);
+ nr_entries = stack_trace_save(p->addrs, TRACK_ADDRS_COUNT, 3);
metadata_access_disable();
- /* See rant in lockdep.c */
- if (trace.nr_entries != 0 &&
- trace.entries[trace.nr_entries - 1] == ULONG_MAX)
- trace.nr_entries--;
-
- for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
- p->addrs[i] = 0;
+ if (nr_entries < TRACK_ADDRS_COUNT)
+ p->addrs[nr_entries] = 0;
#endif
p->addr = addr;
p->cpu = smp_processor_id();
p->pid = current->pid;
p->when = jiffies;
- } else
+ } else {
memset(p, 0, sizeof(struct track));
+ }
}
static void init_tracking(struct kmem_cache *s, void *object)
@@ -3589,6 +3580,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
if (s->flags & SLAB_CACHE_DMA)
s->allocflags |= GFP_DMA;
+ if (s->flags & SLAB_CACHE_DMA32)
+ s->allocflags |= GFP_DMA32;
+
if (s->flags & SLAB_RECLAIM_ACCOUNT)
s->allocflags |= __GFP_RECLAIMABLE;
@@ -5679,6 +5673,8 @@ static char *create_unique_id(struct kmem_cache *s)
*/
if (s->flags & SLAB_CACHE_DMA)
*p++ = 'd';
+ if (s->flags & SLAB_CACHE_DMA32)
+ *p++ = 'D';
if (s->flags & SLAB_RECLAIM_ACCOUNT)
*p++ = 'a';
if (s->flags & SLAB_CONSISTENCY_CHECKS)
diff --git a/mm/sparse.c b/mm/sparse.c
index 77a0554fa5bd..56e057c432f9 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -65,11 +65,15 @@ static noinline struct mem_section __ref *sparse_index_alloc(int nid)
unsigned long array_size = SECTIONS_PER_ROOT *
sizeof(struct mem_section);
- if (slab_is_available())
+ if (slab_is_available()) {
section = kzalloc_node(array_size, GFP_KERNEL, nid);
- else
+ } else {
section = memblock_alloc_node(array_size, SMP_CACHE_BYTES,
nid);
+ if (!section)
+ panic("%s: Failed to allocate %lu bytes nid=%d\n",
+ __func__, array_size, nid);
+ }
return section;
}
@@ -218,6 +222,9 @@ void __init memory_present(int nid, unsigned long start, unsigned long end)
size = sizeof(struct mem_section*) * NR_SECTION_ROOTS;
align = 1 << (INTERNODE_CACHE_SHIFT);
mem_section = memblock_alloc(size, align);
+ if (!mem_section)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, size, align);
}
#endif
@@ -323,9 +330,7 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
limit = goal + (1UL << PA_SECTION_SHIFT);
nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
again:
- p = memblock_alloc_try_nid_nopanic(size,
- SMP_CACHE_BYTES, goal, limit,
- nid);
+ p = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, goal, limit, nid);
if (!p && limit) {
limit = 0;
goto again;
@@ -379,7 +384,7 @@ static unsigned long * __init
sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
unsigned long size)
{
- return memblock_alloc_node_nopanic(size, pgdat->node_id);
+ return memblock_alloc_node(size, SMP_CACHE_BYTES, pgdat->node_id);
}
static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
@@ -404,13 +409,18 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid,
{
unsigned long size = section_map_size();
struct page *map = sparse_buffer_alloc(size);
+ phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
if (map)
return map;
map = memblock_alloc_try_nid(size,
- PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
+ PAGE_SIZE, addr,
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+ if (!map)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n",
+ __func__, size, PAGE_SIZE, nid, &addr);
+
return map;
}
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
@@ -420,10 +430,11 @@ static void *sparsemap_buf_end __meminitdata;
static void __init sparse_buffer_init(unsigned long size, int nid)
{
+ phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */
sparsemap_buf =
memblock_alloc_try_nid_raw(size, PAGE_SIZE,
- __pa(MAX_DMA_ADDRESS),
+ addr,
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
sparsemap_buf_end = sparsemap_buf + size;
}
@@ -556,7 +567,7 @@ void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
}
#ifdef CONFIG_MEMORY_HOTREMOVE
-/* Mark all memory sections within the pfn range as online */
+/* Mark all memory sections within the pfn range as offline */
void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long pfn;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 2b8d9c3fbb47..cf63b5f01adf 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2023,7 +2023,6 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si,
* If the boolean frontswap is true, only unuse pages_to_unuse pages;
* pages_to_unuse==0 means all pages; ignored if frontswap is false
*/
-#define SWAP_UNUSE_MAX_TRIES 3
int try_to_unuse(unsigned int type, bool frontswap,
unsigned long pages_to_unuse)
{
@@ -2035,7 +2034,6 @@ int try_to_unuse(unsigned int type, bool frontswap,
struct page *page;
swp_entry_t entry;
unsigned int i;
- int retries = 0;
if (!si->inuse_pages)
return 0;
@@ -2053,11 +2051,9 @@ retry:
spin_lock(&mmlist_lock);
p = &init_mm.mmlist;
- while ((p = p->next) != &init_mm.mmlist) {
- if (signal_pending(current)) {
- retval = -EINTR;
- break;
- }
+ while (si->inuse_pages &&
+ !signal_pending(current) &&
+ (p = p->next) != &init_mm.mmlist) {
mm = list_entry(p, struct mm_struct, mmlist);
if (!mmget_not_zero(mm))
@@ -2084,7 +2080,9 @@ retry:
mmput(prev_mm);
i = 0;
- while ((i = find_next_to_unuse(si, i, frontswap)) != 0) {
+ while (si->inuse_pages &&
+ !signal_pending(current) &&
+ (i = find_next_to_unuse(si, i, frontswap)) != 0) {
entry = swp_entry(type, i);
page = find_get_page(swap_address_space(entry), i);
@@ -2117,14 +2115,18 @@ retry:
* If yes, we would need to do retry the unuse logic again.
* Under global memory pressure, swap entries can be reinserted back
* into process space after the mmlist loop above passes over them.
- * Its not worth continuosuly retrying to unuse the swap in this case.
- * So we try SWAP_UNUSE_MAX_TRIES times.
+ *
+ * Limit the number of retries? No: when mmget_not_zero() above fails,
+ * that mm is likely to be freeing swap from exit_mmap(), which proceeds
+ * at its own independent pace; and even shmem_writepage() could have
+ * been preempted after get_swap_page(), temporarily hiding that swap.
+ * It's easy and robust (though cpu-intensive) just to keep retrying.
*/
- if (++retries >= SWAP_UNUSE_MAX_TRIES)
- retval = -EBUSY;
- else if (si->inuse_pages)
- goto retry;
-
+ if (si->inuse_pages) {
+ if (!signal_pending(current))
+ goto retry;
+ retval = -EINTR;
+ }
out:
return (retval == FRONTSWAP_PAGES_UNUSED) ? 0 : retval;
}
diff --git a/mm/util.c b/mm/util.c
index d559bde497a9..43a2984bccaa 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -204,7 +204,7 @@ EXPORT_SYMBOL(vmemdup_user);
* @s: The string to duplicate
* @n: Maximum number of bytes to copy, including the trailing NUL.
*
- * Return: newly allocated copy of @s or %NULL in case of error
+ * Return: newly allocated copy of @s or an ERR_PTR() in case of error
*/
char *strndup_user(const char __user *s, long n)
{
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index e86ba6e74b50..e5e9e1fcac01 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -18,6 +18,7 @@
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/set_memory.h>
#include <linux/debugobjects.h>
#include <linux/kallsyms.h>
#include <linux/list.h>
@@ -1059,24 +1060,9 @@ static void vb_free(const void *addr, unsigned long size)
spin_unlock(&vb->lock);
}
-/**
- * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
- *
- * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
- * to amortize TLB flushing overheads. What this means is that any page you
- * have now, may, in a former life, have been mapped into kernel virtual
- * address by the vmap layer and so there might be some CPUs with TLB entries
- * still referencing that page (additional to the regular 1:1 kernel mapping).
- *
- * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
- * be sure that none of the pages we have control over will have any aliases
- * from the vmap layer.
- */
-void vm_unmap_aliases(void)
+static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
{
- unsigned long start = ULONG_MAX, end = 0;
int cpu;
- int flush = 0;
if (unlikely(!vmap_initialized))
return;
@@ -1113,6 +1099,27 @@ void vm_unmap_aliases(void)
flush_tlb_kernel_range(start, end);
mutex_unlock(&vmap_purge_lock);
}
+
+/**
+ * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
+ *
+ * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
+ * to amortize TLB flushing overheads. What this means is that any page you
+ * have now, may, in a former life, have been mapped into kernel virtual
+ * address by the vmap layer and so there might be some CPUs with TLB entries
+ * still referencing that page (additional to the regular 1:1 kernel mapping).
+ *
+ * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
+ * be sure that none of the pages we have control over will have any aliases
+ * from the vmap layer.
+ */
+void vm_unmap_aliases(void)
+{
+ unsigned long start = ULONG_MAX, end = 0;
+ int flush = 0;
+
+ _vm_unmap_aliases(start, end, flush);
+}
EXPORT_SYMBOL_GPL(vm_unmap_aliases);
/**
@@ -1505,6 +1512,72 @@ struct vm_struct *remove_vm_area(const void *addr)
return NULL;
}
+static inline void set_area_direct_map(const struct vm_struct *area,
+ int (*set_direct_map)(struct page *page))
+{
+ int i;
+
+ for (i = 0; i < area->nr_pages; i++)
+ if (page_address(area->pages[i]))
+ set_direct_map(area->pages[i]);
+}
+
+/* Handle removing and resetting vm mappings related to the vm_struct. */
+static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
+{
+ unsigned long addr = (unsigned long)area->addr;
+ unsigned long start = ULONG_MAX, end = 0;
+ int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
+ int i;
+
+ /*
+ * The below block can be removed when all architectures that have
+ * direct map permissions also have set_direct_map_() implementations.
+ * This is concerned with resetting the direct map any an vm alias with
+ * execute permissions, without leaving a RW+X window.
+ */
+ if (flush_reset && !IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
+ set_memory_nx(addr, area->nr_pages);
+ set_memory_rw(addr, area->nr_pages);
+ }
+
+ remove_vm_area(area->addr);
+
+ /* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */
+ if (!flush_reset)
+ return;
+
+ /*
+ * If not deallocating pages, just do the flush of the VM area and
+ * return.
+ */
+ if (!deallocate_pages) {
+ vm_unmap_aliases();
+ return;
+ }
+
+ /*
+ * If execution gets here, flush the vm mapping and reset the direct
+ * map. Find the start and end range of the direct mappings to make sure
+ * the vm_unmap_aliases() flush includes the direct map.
+ */
+ for (i = 0; i < area->nr_pages; i++) {
+ if (page_address(area->pages[i])) {
+ start = min(addr, start);
+ end = max(addr, end);
+ }
+ }
+
+ /*
+ * Set direct map to something invalid so that it won't be cached if
+ * there are any accesses after the TLB flush, then flush the TLB and
+ * reset the direct map permissions to the default.
+ */
+ set_area_direct_map(area, set_direct_map_invalid_noflush);
+ _vm_unmap_aliases(start, end, 1);
+ set_area_direct_map(area, set_direct_map_default_noflush);
+}
+
static void __vunmap(const void *addr, int deallocate_pages)
{
struct vm_struct *area;
@@ -1526,7 +1599,8 @@ static void __vunmap(const void *addr, int deallocate_pages)
debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
- remove_vm_area(addr);
+ vm_remove_mappings(area, deallocate_pages);
+
if (deallocate_pages) {
int i;
@@ -1961,8 +2035,9 @@ EXPORT_SYMBOL(vzalloc_node);
*/
void *vmalloc_exec(unsigned long size)
{
- return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL_EXEC,
- NUMA_NO_NODE, __builtin_return_address(0));
+ return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
+ GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
+ NUMA_NO_NODE, __builtin_return_address(0));
}
#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 90648187f622..fd9de504e516 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2176,7 +2176,6 @@ static void shrink_active_list(unsigned long nr_to_scan,
* 10TB 320 32GB
*/
static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
- struct mem_cgroup *memcg,
struct scan_control *sc, bool actual_reclaim)
{
enum lru_list active_lru = file * LRU_FILE + LRU_ACTIVE;
@@ -2197,16 +2196,12 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
inactive = lruvec_lru_size(lruvec, inactive_lru, sc->reclaim_idx);
active = lruvec_lru_size(lruvec, active_lru, sc->reclaim_idx);
- if (memcg)
- refaults = memcg_page_state(memcg, WORKINGSET_ACTIVATE);
- else
- refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE);
-
/*
* When refaults are being observed, it means a new workingset
* is being established. Disable active list protection to get
* rid of the stale workingset quickly.
*/
+ refaults = lruvec_page_state(lruvec, WORKINGSET_ACTIVATE);
if (file && actual_reclaim && lruvec->refaults != refaults) {
inactive_ratio = 0;
} else {
@@ -2227,12 +2222,10 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
}
static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
- struct lruvec *lruvec, struct mem_cgroup *memcg,
- struct scan_control *sc)
+ struct lruvec *lruvec, struct scan_control *sc)
{
if (is_active_lru(lru)) {
- if (inactive_list_is_low(lruvec, is_file_lru(lru),
- memcg, sc, true))
+ if (inactive_list_is_low(lruvec, is_file_lru(lru), sc, true))
shrink_active_list(nr_to_scan, lruvec, sc, lru);
return 0;
}
@@ -2332,7 +2325,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
* anonymous pages on the LRU in eligible zones.
* Otherwise, the small LRU gets thrashed.
*/
- if (!inactive_list_is_low(lruvec, false, memcg, sc, false) &&
+ if (!inactive_list_is_low(lruvec, false, sc, false) &&
lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, sc->reclaim_idx)
>> sc->priority) {
scan_balance = SCAN_ANON;
@@ -2350,7 +2343,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
* lruvec even if it has plenty of old anonymous pages unless the
* system is under heavy pressure.
*/
- if (!inactive_list_is_low(lruvec, true, memcg, sc, false) &&
+ if (!inactive_list_is_low(lruvec, true, sc, false) &&
lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, sc->reclaim_idx) >> sc->priority) {
scan_balance = SCAN_FILE;
goto out;
@@ -2503,7 +2496,7 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc
nr[lru] -= nr_to_scan;
nr_reclaimed += shrink_list(lru, nr_to_scan,
- lruvec, memcg, sc);
+ lruvec, sc);
}
}
@@ -2570,7 +2563,7 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc
* Even if we did not try to evict anon pages at all, we want to
* rebalance the anon lru active/inactive ratio.
*/
- if (inactive_list_is_low(lruvec, false, memcg, sc, true))
+ if (inactive_list_is_low(lruvec, false, sc, true))
shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
sc, LRU_ACTIVE_ANON);
}
@@ -2969,12 +2962,8 @@ static void snapshot_refaults(struct mem_cgroup *root_memcg, pg_data_t *pgdat)
unsigned long refaults;
struct lruvec *lruvec;
- if (memcg)
- refaults = memcg_page_state(memcg, WORKINGSET_ACTIVATE);
- else
- refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE);
-
lruvec = mem_cgroup_lruvec(pgdat, memcg);
+ refaults = lruvec_page_state(lruvec, WORKINGSET_ACTIVATE);
lruvec->refaults = refaults;
} while ((memcg = mem_cgroup_iter(root_memcg, memcg, NULL)));
}
@@ -3339,7 +3328,7 @@ static void age_active_anon(struct pglist_data *pgdat,
do {
struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
- if (inactive_list_is_low(lruvec, false, memcg, sc, true))
+ if (inactive_list_is_low(lruvec, false, sc, true))
shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
sc, LRU_ACTIVE_ANON);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 36b56f858f0f..a7d493366a65 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1274,13 +1274,8 @@ const char * const vmstat_text[] = {
#endif
#endif /* CONFIG_MEMORY_BALLOON */
#ifdef CONFIG_DEBUG_TLBFLUSH
-#ifdef CONFIG_SMP
"nr_tlb_remote_flush",
"nr_tlb_remote_flush_received",
-#else
- "", /* nr_tlb_remote_flush */
- "", /* nr_tlb_remote_flush_received */
-#endif /* CONFIG_SMP */
"nr_tlb_local_flush_all",
"nr_tlb_local_flush_one",
#endif /* CONFIG_DEBUG_TLBFLUSH */
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 15293c2a5dd8..8d77b6ee4477 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -443,27 +443,29 @@ static int vlan_dev_fcoe_disable(struct net_device *dev)
return rc;
}
-static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
+static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
- int rc = -EINVAL;
+ int rc = 0;
+
+ if (ops->ndo_fcoe_ddp_target)
+ rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc);
- if (ops->ndo_fcoe_get_wwn)
- rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type);
return rc;
}
+#endif
-static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
- struct scatterlist *sgl, unsigned int sgc)
+#ifdef NETDEV_FCOE_WWNN
+static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
- int rc = 0;
-
- if (ops->ndo_fcoe_ddp_target)
- rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc);
+ int rc = -EINVAL;
+ if (ops->ndo_fcoe_get_wwn)
+ rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type);
return rc;
}
#endif
@@ -794,9 +796,11 @@ static const struct net_device_ops vlan_netdev_ops = {
.ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
.ndo_fcoe_enable = vlan_dev_fcoe_enable,
.ndo_fcoe_disable = vlan_dev_fcoe_disable,
- .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
.ndo_fcoe_ddp_target = vlan_dev_fcoe_ddp_target,
#endif
+#ifdef NETDEV_FCOE_WWNN
+ .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
+#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = vlan_dev_poll_controller,
.ndo_netpoll_setup = vlan_dev_netpoll_setup,
diff --git a/net/9p/client.c b/net/9p/client.c
index 357214a51f13..b85d51f4b8eb 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -1061,7 +1061,7 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
p9_debug(P9_DEBUG_ERROR,
"Please specify a msize of at least 4k\n");
err = -EINVAL;
- goto free_client;
+ goto close_trans;
}
err = p9_client_version(clnt);
diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
index e2fbf3677b9b..29420ebb8f07 100644
--- a/net/9p/trans_xen.c
+++ b/net/9p/trans_xen.c
@@ -513,7 +513,7 @@ static void xen_9pfs_front_changed(struct xenbus_device *dev,
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
- /* Missed the backend's CLOSING state -- fallthrough */
+ /* fall through - Missed the backend's CLOSING state */
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index 49a16cee2aae..420a98bf79b5 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -879,15 +879,24 @@ static struct notifier_block aarp_notifier = {
static unsigned char aarp_snap_id[] = { 0x00, 0x00, 0x00, 0x80, 0xF3 };
-void __init aarp_proto_init(void)
+int __init aarp_proto_init(void)
{
+ int rc;
+
aarp_dl = register_snap_client(aarp_snap_id, aarp_rcv);
- if (!aarp_dl)
+ if (!aarp_dl) {
printk(KERN_CRIT "Unable to register AARP with SNAP.\n");
+ return -ENOMEM;
+ }
timer_setup(&aarp_timer, aarp_expire_timeout, 0);
aarp_timer.expires = jiffies + sysctl_aarp_expiry_time;
add_timer(&aarp_timer);
- register_netdevice_notifier(&aarp_notifier);
+ rc = register_netdevice_notifier(&aarp_notifier);
+ if (rc) {
+ del_timer_sync(&aarp_timer);
+ unregister_snap_client(aarp_dl);
+ }
+ return rc;
}
/* Remove the AARP entries associated with a device. */
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 795fbc6c06aa..dbe8b1993be9 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1904,9 +1904,6 @@ static unsigned char ddp_snap_id[] = { 0x08, 0x00, 0x07, 0x80, 0x9B };
EXPORT_SYMBOL(atrtr_get_dev);
EXPORT_SYMBOL(atalk_find_dev_addr);
-static const char atalk_err_snap[] __initconst =
- KERN_CRIT "Unable to register DDP with SNAP.\n";
-
/* Called by proto.c on kernel start up */
static int __init atalk_init(void)
{
@@ -1921,17 +1918,23 @@ static int __init atalk_init(void)
goto out_proto;
ddp_dl = register_snap_client(ddp_snap_id, atalk_rcv);
- if (!ddp_dl)
- printk(atalk_err_snap);
+ if (!ddp_dl) {
+ pr_crit("Unable to register DDP with SNAP.\n");
+ rc = -ENOMEM;
+ goto out_sock;
+ }
dev_add_pack(&ltalk_packet_type);
dev_add_pack(&ppptalk_packet_type);
rc = register_netdevice_notifier(&ddp_notifier);
if (rc)
- goto out_sock;
+ goto out_snap;
+
+ rc = aarp_proto_init();
+ if (rc)
+ goto out_dev;
- aarp_proto_init();
rc = atalk_proc_init();
if (rc)
goto out_aarp;
@@ -1945,11 +1948,13 @@ out_proc:
atalk_proc_exit();
out_aarp:
aarp_cleanup_module();
+out_dev:
unregister_netdevice_notifier(&ddp_notifier);
-out_sock:
+out_snap:
dev_remove_pack(&ppptalk_packet_type);
dev_remove_pack(&ltalk_packet_type);
unregister_snap_client(ddp_dl);
+out_sock:
sock_unregister(PF_APPLETALK);
out_proto:
proto_unregister(&ddp_proto);
diff --git a/net/atm/lec.c b/net/atm/lec.c
index d7f5cf5b7594..ad4f829193f0 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -710,7 +710,10 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
static int lec_mcast_attach(struct atm_vcc *vcc, int arg)
{
- if (arg < 0 || arg >= MAX_LEC_ITF || !dev_lec[arg])
+ if (arg < 0 || arg >= MAX_LEC_ITF)
+ return -EINVAL;
+ arg = array_index_nospec(arg, MAX_LEC_ITF);
+ if (!dev_lec[arg])
return -EINVAL;
vcc->proto_data = dev_lec[arg];
return lec_mcast_make(netdev_priv(dev_lec[arg]), vcc);
@@ -728,6 +731,7 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
i = arg;
if (arg >= MAX_LEC_ITF)
return -EINVAL;
+ i = array_index_nospec(arg, MAX_LEC_ITF);
if (!dev_lec[i]) {
int size;
diff --git a/net/atm/resources.c b/net/atm/resources.c
index bada395ecdb1..889349c6d90d 100644
--- a/net/atm/resources.c
+++ b/net/atm/resources.c
@@ -203,13 +203,9 @@ int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat)
int __user *sioc_len;
int __user *iobuf_len;
-#ifndef CONFIG_COMPAT
- compat = 0; /* Just so the compiler _knows_ */
-#endif
-
switch (cmd) {
case ATM_GETNAMES:
- if (compat) {
+ if (IS_ENABLED(CONFIG_COMPAT) && compat) {
#ifdef CONFIG_COMPAT
struct compat_atm_iobuf __user *ciobuf = arg;
compat_uptr_t cbuf;
@@ -253,7 +249,7 @@ int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat)
break;
}
- if (compat) {
+ if (IS_ENABLED(CONFIG_COMPAT) && compat) {
#ifdef CONFIG_COMPAT
struct compat_atmif_sioc __user *csioc = arg;
compat_uptr_t carg;
@@ -417,7 +413,7 @@ int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat)
}
/* fall through */
default:
- if (compat) {
+ if (IS_ENABLED(CONFIG_COMPAT) && compat) {
#ifdef CONFIG_COMPAT
if (!dev->ops->compat_ioctl) {
error = -EINVAL;
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index a9b7919c9de5..d5df0114f08a 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -104,8 +104,10 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo);
- /* free the TID stats immediately */
- cfg80211_sinfo_release_content(&sinfo);
+ if (!ret) {
+ /* free the TID stats immediately */
+ cfg80211_sinfo_release_content(&sinfo);
+ }
dev_put(real_netdev);
if (ret == -ENOENT) {
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index ef39aabdb694..4fb01108e5f5 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -803,6 +803,8 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
const u8 *mac, const unsigned short vid)
{
struct batadv_bla_claim search_claim, *claim;
+ struct batadv_bla_claim *claim_removed_entry;
+ struct hlist_node *claim_removed_node;
ether_addr_copy(search_claim.addr, mac);
search_claim.vid = vid;
@@ -813,10 +815,18 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__,
mac, batadv_print_vid(vid));
- batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
- batadv_choose_claim, claim);
- batadv_claim_put(claim); /* reference from the hash is gone */
+ claim_removed_node = batadv_hash_remove(bat_priv->bla.claim_hash,
+ batadv_compare_claim,
+ batadv_choose_claim, claim);
+ if (!claim_removed_node)
+ goto free_claim;
+ /* reference from the hash is gone */
+ claim_removed_entry = hlist_entry(claim_removed_node,
+ struct batadv_bla_claim, hash_entry);
+ batadv_claim_put(claim_removed_entry);
+
+free_claim:
/* don't need the reference from hash_find() anymore */
batadv_claim_put(claim);
}
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index 0b4b3fb778a6..208655cf6717 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -1116,9 +1116,9 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
struct attribute *attr,
char *buff, size_t count)
{
- struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
struct batadv_hard_iface *hard_iface;
+ struct batadv_priv *bat_priv;
u32 tp_override;
u32 old_tp_override;
bool ret;
@@ -1147,7 +1147,10 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
atomic_set(&hard_iface->bat_v.throughput_override, tp_override);
- batadv_netlink_notify_hardif(bat_priv, hard_iface);
+ if (hard_iface->soft_iface) {
+ bat_priv = netdev_priv(hard_iface->soft_iface);
+ batadv_netlink_notify_hardif(bat_priv, hard_iface);
+ }
out:
batadv_hardif_put(hard_iface);
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index f73d79139ae7..26c4e2493ddf 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -616,14 +616,26 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv,
struct batadv_tt_global_entry *tt_global,
const char *message)
{
+ struct batadv_tt_global_entry *tt_removed_entry;
+ struct hlist_node *tt_removed_node;
+
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Deleting global tt entry %pM (vid: %d): %s\n",
tt_global->common.addr,
batadv_print_vid(tt_global->common.vid), message);
- batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
- batadv_choose_tt, &tt_global->common);
- batadv_tt_global_entry_put(tt_global);
+ tt_removed_node = batadv_hash_remove(bat_priv->tt.global_hash,
+ batadv_compare_tt,
+ batadv_choose_tt,
+ &tt_global->common);
+ if (!tt_removed_node)
+ return;
+
+ /* drop reference of remove hash entry */
+ tt_removed_entry = hlist_entry(tt_removed_node,
+ struct batadv_tt_global_entry,
+ common.hash_entry);
+ batadv_tt_global_entry_put(tt_removed_entry);
}
/**
@@ -1337,9 +1349,10 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
unsigned short vid, const char *message,
bool roaming)
{
+ struct batadv_tt_local_entry *tt_removed_entry;
struct batadv_tt_local_entry *tt_local_entry;
u16 flags, curr_flags = BATADV_NO_FLAGS;
- void *tt_entry_exists;
+ struct hlist_node *tt_removed_node;
tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
if (!tt_local_entry)
@@ -1368,15 +1381,18 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
*/
batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
- tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash,
+ tt_removed_node = batadv_hash_remove(bat_priv->tt.local_hash,
batadv_compare_tt,
batadv_choose_tt,
&tt_local_entry->common);
- if (!tt_entry_exists)
+ if (!tt_removed_node)
goto out;
- /* extra call to free the local tt entry */
- batadv_tt_local_entry_put(tt_local_entry);
+ /* drop reference of remove hash entry */
+ tt_removed_entry = hlist_entry(tt_removed_node,
+ struct batadv_tt_local_entry,
+ common.hash_entry);
+ batadv_tt_local_entry_put(tt_removed_entry);
out:
if (tt_local_entry)
diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
index 78bec8df8525..aaa39409eeb7 100644
--- a/net/bluetooth/amp.c
+++ b/net/bluetooth/amp.c
@@ -161,7 +161,6 @@ static int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize, u8 *output)
}
shash->tfm = tfm;
- shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
ret = crypto_shash_digest(shash, plaintext, psize, output);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 9a580999ca57..d892b7c3cc42 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -523,12 +523,12 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr,
struct sock *sk = sock->sk;
int err = 0;
- BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr);
-
if (!addr || addr_len < sizeof(struct sockaddr_sco) ||
addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
+ BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr);
+
lock_sock(sk);
if (sk->sk_state != BT_OPEN) {
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 621146d04c03..e68c715f8d37 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -183,7 +183,6 @@ static int aes_cmac(struct crypto_shash *tfm, const u8 k[16], const u8 *m,
}
desc->tfm = tfm;
- desc->flags = 0;
/* Swap key and message from LSB to MSB */
swap_buf(k, tmp, 16);
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index da7051d62727..fab142b796ef 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -16,7 +16,7 @@
static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
u32 *retval, u32 *time)
{
- struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
+ struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };
enum bpf_cgroup_storage_type stype;
u64 time_start, time_spent = 0;
int ret = 0;
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 5ea7e56119c1..ba303ee99b9b 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -197,13 +197,10 @@ static void __br_handle_local_finish(struct sk_buff *skb)
/* note: already called with rcu_read_lock */
static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
- struct net_bridge_port *p = br_port_get_rcu(skb->dev);
-
__br_handle_local_finish(skb);
- BR_INPUT_SKB_CB(skb)->brdev = p->br->dev;
- br_pass_frame_up(skb);
- return 0;
+ /* return 1 to signal the okfn() was called so it's ok to use the skb */
+ return 1;
}
/*
@@ -280,10 +277,18 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
goto forward;
}
- /* Deliver packet to local host only */
- NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, dev_net(skb->dev),
- NULL, skb, skb->dev, NULL, br_handle_local_finish);
- return RX_HANDLER_CONSUMED;
+ /* The else clause should be hit when nf_hook():
+ * - returns < 0 (drop/error)
+ * - returns = 0 (stolen/nf_queue)
+ * Thus return 1 from the okfn() to signal the skb is ok to pass
+ */
+ if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
+ dev_net(skb->dev), NULL, skb, skb->dev, NULL,
+ br_handle_local_finish) == 1) {
+ return RX_HANDLER_PASS;
+ } else {
+ return RX_HANDLER_CONSUMED;
+ }
}
forward:
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index a0e369179f6d..45e7f4173bba 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -601,6 +601,7 @@ static int br_ip4_multicast_add_group(struct net_bridge *br,
if (ipv4_is_local_multicast(group))
return 0;
+ memset(&br_group, 0, sizeof(br_group));
br_group.u.ip4 = group;
br_group.proto = htons(ETH_P_IP);
br_group.vid = vid;
@@ -1497,6 +1498,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
+ memset(&br_group, 0, sizeof(br_group));
br_group.u.ip4 = group;
br_group.proto = htons(ETH_P_IP);
br_group.vid = vid;
@@ -1520,6 +1522,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
+ memset(&br_group, 0, sizeof(br_group));
br_group.u.ip6 = *group;
br_group.proto = htons(ETH_P_IPV6);
br_group.vid = vid;
@@ -2028,7 +2031,8 @@ static void br_multicast_start_querier(struct net_bridge *br,
__br_multicast_open(br, query);
- list_for_each_entry(port, &br->port_list, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &br->port_list, list) {
if (port->state == BR_STATE_DISABLED ||
port->state == BR_STATE_BLOCKING)
continue;
@@ -2040,6 +2044,7 @@ static void br_multicast_start_querier(struct net_bridge *br,
br_multicast_enable(&port->ip6_own_query);
#endif
}
+ rcu_read_unlock();
}
int br_multicast_toggle(struct net_bridge *br, unsigned long val)
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 9d34de68571b..22afa566cbce 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -502,6 +502,7 @@ static unsigned int br_nf_pre_routing(void *priv,
nf_bridge->ipv4_daddr = ip_hdr(skb)->daddr;
skb->protocol = htons(ETH_P_IP);
+ skb->transport_header = skb->network_header + ip_hdr(skb)->ihl * 4;
NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
skb->dev, NULL,
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
index 564710f88f93..e88d6641647b 100644
--- a/net/bridge/br_netfilter_ipv6.c
+++ b/net/bridge/br_netfilter_ipv6.c
@@ -235,6 +235,8 @@ unsigned int br_nf_pre_routing_ipv6(void *priv,
nf_bridge->ipv6_daddr = ipv6_hdr(skb)->daddr;
skb->protocol = htons(ETH_P_IPV6);
+ skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
+
NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
skb->dev, NULL,
br_nf_pre_routing_finish_ipv6);
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 9c07591b0232..7104cf13da84 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -1441,7 +1441,7 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED,
br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) ||
nla_put_u8(skb, IFLA_BR_VLAN_STATS_PER_PORT,
- br_opt_get(br, IFLA_BR_VLAN_STATS_PER_PORT)))
+ br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)))
return -EMSGSIZE;
#endif
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index eb15891f8b9f..3cad01ac64e4 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -2032,7 +2032,8 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
if (match_kern)
match_kern->match_size = ret;
- if (WARN_ON(type == EBT_COMPAT_TARGET && size_left))
+ /* rule should have no remaining data after target */
+ if (type == EBT_COMPAT_TARGET && size_left)
return -EINVAL;
match32 = (struct compat_ebt_entry_mwt *) buf;
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 9cab80207ced..79eac465ec65 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -738,7 +738,6 @@ int __ceph_open_session(struct ceph_client *client, unsigned long started)
}
EXPORT_SYMBOL(__ceph_open_session);
-
int ceph_open_session(struct ceph_client *client)
{
int ret;
@@ -754,6 +753,23 @@ int ceph_open_session(struct ceph_client *client)
}
EXPORT_SYMBOL(ceph_open_session);
+int ceph_wait_for_latest_osdmap(struct ceph_client *client,
+ unsigned long timeout)
+{
+ u64 newest_epoch;
+ int ret;
+
+ ret = ceph_monc_get_version(&client->monc, "osdmap", &newest_epoch);
+ if (ret)
+ return ret;
+
+ if (client->osdc.osdmap->epoch >= newest_epoch)
+ return 0;
+
+ ceph_osdc_maybe_request_map(&client->osdc);
+ return ceph_monc_wait_osdmap(&client->monc, newest_epoch, timeout);
+}
+EXPORT_SYMBOL(ceph_wait_for_latest_osdmap);
static int __init init_ceph_lib(void)
{
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 7e71b0df1fbc..3083988ce729 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -840,6 +840,7 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
size_t bytes)
{
struct ceph_bio_iter *it = &cursor->bio_iter;
+ struct page *page = bio_iter_page(it->bio, it->iter);
BUG_ON(bytes > cursor->resid);
BUG_ON(bytes > bio_iter_len(it->bio, it->iter));
@@ -851,7 +852,8 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
return false; /* no more data */
}
- if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done))
+ if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done &&
+ page == bio_iter_page(it->bio, it->iter)))
return false; /* more bytes to process in this segment */
if (!it->iter.bi_size) {
@@ -899,6 +901,7 @@ static bool ceph_msg_data_bvecs_advance(struct ceph_msg_data_cursor *cursor,
size_t bytes)
{
struct bio_vec *bvecs = cursor->data->bvec_pos.bvecs;
+ struct page *page = bvec_iter_page(bvecs, cursor->bvec_iter);
BUG_ON(bytes > cursor->resid);
BUG_ON(bytes > bvec_iter_len(bvecs, cursor->bvec_iter));
@@ -910,7 +913,8 @@ static bool ceph_msg_data_bvecs_advance(struct ceph_msg_data_cursor *cursor,
return false; /* no more data */
}
- if (!bytes || cursor->bvec_iter.bi_bvec_done)
+ if (!bytes || (cursor->bvec_iter.bi_bvec_done &&
+ page == bvec_iter_page(bvecs, cursor->bvec_iter)))
return false; /* more bytes to process in this segment */
BUG_ON(cursor->last_piece);
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 18deb3d889c4..a53e4fbb6319 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -922,6 +922,15 @@ int ceph_monc_blacklist_add(struct ceph_mon_client *monc,
mutex_unlock(&monc->mutex);
ret = wait_generic_request(req);
+ if (!ret)
+ /*
+ * Make sure we have the osdmap that includes the blacklist
+ * entry. This is needed to ensure that the OSDs pick up the
+ * new blacklist before processing any future requests from
+ * this client.
+ */
+ ret = ceph_wait_for_latest_osdmap(monc->client, 0);
+
out:
put_generic_request(req);
return ret;
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 98c0ff3d6441..48a31dc9161c 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -495,9 +495,8 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
/ sizeof(struct crush_rule_step))
goto bad;
#endif
- r = c->rules[i] = kmalloc(sizeof(*r) +
- yes*sizeof(struct crush_rule_step),
- GFP_NOFS);
+ r = kmalloc(struct_size(r, steps, yes), GFP_NOFS);
+ c->rules[i] = r;
if (r == NULL)
goto badmem;
dout(" rule %d is at %p\n", i, r);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index b2651bb6d2a3..e657289db4ac 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -279,7 +279,7 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
break;
sk_busy_loop(sk, flags & MSG_DONTWAIT);
- } while (!skb_queue_empty(&sk->sk_receive_queue));
+ } while (sk->sk_receive_queue.prev != *last);
error = -EAGAIN;
diff --git a/net/core/dev.c b/net/core/dev.c
index 2b67f2aa59dd..f409406254dd 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1184,7 +1184,21 @@ int dev_change_name(struct net_device *dev, const char *newname)
BUG_ON(!dev_net(dev));
net = dev_net(dev);
- if (dev->flags & IFF_UP)
+
+ /* Some auto-enslaved devices e.g. failover slaves are
+ * special, as userspace might rename the device after
+ * the interface had been brought up and running since
+ * the point kernel initiated auto-enslavement. Allow
+ * live name change even when these slave devices are
+ * up and running.
+ *
+ * Typically, users of these auto-enslaving devices
+ * don't actually care about slave name change, as
+ * they are supposed to operate on master interface
+ * directly.
+ */
+ if (dev->flags & IFF_UP &&
+ likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK)))
return -EBUSY;
write_seqcount_begin(&devnet_rename_seq);
@@ -5014,8 +5028,10 @@ static inline void __netif_receive_skb_list_ptype(struct list_head *head,
if (pt_prev->list_func != NULL)
pt_prev->list_func(head, pt_prev, orig_dev);
else
- list_for_each_entry_safe(skb, next, head, list)
+ list_for_each_entry_safe(skb, next, head, list) {
+ skb_list_del_init(skb);
pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
+ }
}
static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 78e22cea4cc7..da0a29f30885 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -3897,6 +3897,11 @@ static int devlink_nl_cmd_info_get_dumpit(struct sk_buff *msg,
continue;
}
+ if (!devlink->ops->info_get) {
+ idx++;
+ continue;
+ }
+
mutex_lock(&devlink->lock);
err = devlink_nl_info_fill(msg, devlink, DEVLINK_CMD_INFO_GET,
NETLINK_CB(cb->skb).portid,
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index d4918ffddda8..36ed619faf36 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1797,11 +1797,16 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
WARN_ON_ONCE(!ret);
gstrings.len = ret;
- data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
- if (gstrings.len && !data)
- return -ENOMEM;
- __ethtool_get_strings(dev, gstrings.string_set, data);
+ if (gstrings.len) {
+ data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
+ if (!data)
+ return -ENOMEM;
+
+ __ethtool_get_strings(dev, gstrings.string_set, data);
+ } else {
+ data = NULL;
+ }
ret = -EFAULT;
if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
@@ -1897,11 +1902,15 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
return -EFAULT;
stats.n_stats = n_stats;
- data = vzalloc(array_size(n_stats, sizeof(u64)));
- if (n_stats && !data)
- return -ENOMEM;
- ops->get_ethtool_stats(dev, &stats, data);
+ if (n_stats) {
+ data = vzalloc(array_size(n_stats, sizeof(u64)));
+ if (!data)
+ return -ENOMEM;
+ ops->get_ethtool_stats(dev, &stats, data);
+ } else {
+ data = NULL;
+ }
ret = -EFAULT;
if (copy_to_user(useraddr, &stats, sizeof(stats)))
@@ -1941,16 +1950,21 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
return -EFAULT;
stats.n_stats = n_stats;
- data = vzalloc(array_size(n_stats, sizeof(u64)));
- if (n_stats && !data)
- return -ENOMEM;
- if (dev->phydev && !ops->get_ethtool_phy_stats) {
- ret = phy_ethtool_get_stats(dev->phydev, &stats, data);
- if (ret < 0)
- return ret;
+ if (n_stats) {
+ data = vzalloc(array_size(n_stats, sizeof(u64)));
+ if (!data)
+ return -ENOMEM;
+
+ if (dev->phydev && !ops->get_ethtool_phy_stats) {
+ ret = phy_ethtool_get_stats(dev->phydev, &stats, data);
+ if (ret < 0)
+ goto out;
+ } else {
+ ops->get_ethtool_phy_stats(dev, &stats, data);
+ }
} else {
- ops->get_ethtool_phy_stats(dev, &stats, data);
+ data = NULL;
}
ret = -EFAULT;
@@ -2319,9 +2333,10 @@ static int ethtool_set_tunable(struct net_device *dev, void __user *useraddr)
return ret;
}
-static int ethtool_get_per_queue_coalesce(struct net_device *dev,
- void __user *useraddr,
- struct ethtool_per_queue_op *per_queue_opt)
+static noinline_for_stack int
+ethtool_get_per_queue_coalesce(struct net_device *dev,
+ void __user *useraddr,
+ struct ethtool_per_queue_op *per_queue_opt)
{
u32 bit;
int ret;
@@ -2349,9 +2364,10 @@ static int ethtool_get_per_queue_coalesce(struct net_device *dev,
return 0;
}
-static int ethtool_set_per_queue_coalesce(struct net_device *dev,
- void __user *useraddr,
- struct ethtool_per_queue_op *per_queue_opt)
+static noinline_for_stack int
+ethtool_set_per_queue_coalesce(struct net_device *dev,
+ void __user *useraddr,
+ struct ethtool_per_queue_op *per_queue_opt)
{
u32 bit;
int i, ret = 0;
@@ -2405,7 +2421,7 @@ roll_back:
return ret;
}
-static int ethtool_set_per_queue(struct net_device *dev,
+static int noinline_for_stack ethtool_set_per_queue(struct net_device *dev,
void __user *useraddr, u32 sub_cmd)
{
struct ethtool_per_queue_op per_queue_opt;
diff --git a/net/core/failover.c b/net/core/failover.c
index 4a92a98ccce9..b5cd3c727285 100644
--- a/net/core/failover.c
+++ b/net/core/failover.c
@@ -80,14 +80,14 @@ static int failover_slave_register(struct net_device *slave_dev)
goto err_upper_link;
}
- slave_dev->priv_flags |= IFF_FAILOVER_SLAVE;
+ slave_dev->priv_flags |= (IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
if (fops && fops->slave_register &&
!fops->slave_register(slave_dev, failover_dev))
return NOTIFY_OK;
netdev_upper_dev_unlink(slave_dev, failover_dev);
- slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE;
+ slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
err_upper_link:
netdev_rx_handler_unregister(slave_dev);
done:
@@ -121,7 +121,7 @@ int failover_slave_unregister(struct net_device *slave_dev)
netdev_rx_handler_unregister(slave_dev);
netdev_upper_dev_unlink(slave_dev, failover_dev);
- slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE;
+ slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
if (fops && fops->slave_unregister &&
!fops->slave_unregister(slave_dev, failover_dev))
diff --git a/net/core/filter.c b/net/core/filter.c
index 5ceba98069d4..27e61ffd9039 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1796,8 +1796,6 @@ static const struct bpf_func_proto bpf_skb_pull_data_proto = {
BPF_CALL_1(bpf_sk_fullsock, struct sock *, sk)
{
- sk = sk_to_full_sk(sk);
-
return sk_fullsock(sk) ? (unsigned long)sk : (unsigned long)NULL;
}
@@ -2804,7 +2802,7 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
u32 off = skb_mac_header_len(skb);
int ret;
- if (!skb_is_gso_tcp(skb))
+ if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
return -ENOTSUPP;
ret = skb_cow(skb, len_diff);
@@ -2845,7 +2843,7 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
u32 off = skb_mac_header_len(skb);
int ret;
- if (!skb_is_gso_tcp(skb))
+ if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
return -ENOTSUPP;
ret = skb_unclone(skb, GFP_ATOMIC);
@@ -2970,7 +2968,7 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
int ret;
- if (!skb_is_gso_tcp(skb))
+ if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
return -ENOTSUPP;
ret = skb_cow(skb, len_diff);
@@ -2999,7 +2997,7 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
int ret;
- if (!skb_is_gso_tcp(skb))
+ if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
return -ENOTSUPP;
ret = skb_unclone(skb, GFP_ATOMIC);
@@ -4385,6 +4383,8 @@ BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr,
* Only binding to IP is supported.
*/
err = -EINVAL;
+ if (addr_len < offsetofend(struct sockaddr, sa_family))
+ return err;
if (addr->sa_family == AF_INET) {
if (addr_len < sizeof(struct sockaddr_in))
return err;
@@ -5266,7 +5266,7 @@ static const struct bpf_func_proto bpf_sk_release_proto = {
.func = bpf_sk_release,
.gpl_only = false,
.ret_type = RET_INTEGER,
- .arg1_type = ARG_PTR_TO_SOCKET,
+ .arg1_type = ARG_PTR_TO_SOCK_COMMON,
};
BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx,
@@ -5407,8 +5407,6 @@ u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
BPF_CALL_1(bpf_tcp_sock, struct sock *, sk)
{
- sk = sk_to_full_sk(sk);
-
if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP)
return (unsigned long)sk;
@@ -5422,6 +5420,23 @@ static const struct bpf_func_proto bpf_tcp_sock_proto = {
.arg1_type = ARG_PTR_TO_SOCK_COMMON,
};
+BPF_CALL_1(bpf_get_listener_sock, struct sock *, sk)
+{
+ sk = sk_to_full_sk(sk);
+
+ if (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_RCU_FREE))
+ return (unsigned long)sk;
+
+ return (unsigned long)NULL;
+}
+
+static const struct bpf_func_proto bpf_get_listener_sock_proto = {
+ .func = bpf_get_listener_sock,
+ .gpl_only = false,
+ .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
+ .arg1_type = ARG_PTR_TO_SOCK_COMMON,
+};
+
BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb)
{
unsigned int iphdr_len;
@@ -5607,6 +5622,8 @@ cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
#ifdef CONFIG_INET
case BPF_FUNC_tcp_sock:
return &bpf_tcp_sock_proto;
+ case BPF_FUNC_get_listener_sock:
+ return &bpf_get_listener_sock_proto;
case BPF_FUNC_skb_ecn_set_ce:
return &bpf_skb_ecn_set_ce_proto;
#endif
@@ -5702,6 +5719,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_sk_release_proto;
case BPF_FUNC_tcp_sock:
return &bpf_tcp_sock_proto;
+ case BPF_FUNC_get_listener_sock:
+ return &bpf_get_listener_sock_proto;
#endif
default:
return bpf_base_func_proto(func_id);
@@ -6596,14 +6615,8 @@ static bool flow_dissector_is_valid_access(int off, int size,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{
- if (type == BPF_WRITE) {
- switch (off) {
- case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
- break;
- default:
- return false;
- }
- }
+ if (type == BPF_WRITE)
+ return false;
switch (off) {
case bpf_ctx_range(struct __sk_buff, data):
@@ -6615,11 +6628,7 @@ static bool flow_dissector_is_valid_access(int off, int size,
case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
info->reg_type = PTR_TO_FLOW_KEYS;
break;
- case bpf_ctx_range(struct __sk_buff, tc_classid):
- case bpf_ctx_range(struct __sk_buff, data_meta):
- case bpf_ctx_range_till(struct __sk_buff, family, local_port):
- case bpf_ctx_range(struct __sk_buff, tstamp):
- case bpf_ctx_range(struct __sk_buff, wire_len):
+ default:
return false;
}
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index bb1a54747d64..94a450b2191a 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -707,6 +707,7 @@ bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
/* Pass parameters to the BPF program */
memset(flow_keys, 0, sizeof(*flow_keys));
cb->qdisc_cb.flow_keys = flow_keys;
+ flow_keys->n_proto = skb->protocol;
flow_keys->nhoff = skb_network_offset(skb);
flow_keys->thoff = flow_keys->nhoff;
@@ -716,7 +717,8 @@ bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
/* Restore state */
memcpy(cb, &cb_saved, sizeof(cb_saved));
- flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, 0, skb->len);
+ flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff,
+ skb_network_offset(skb), skb->len);
flow_keys->thoff = clamp_t(u16, flow_keys->thoff,
flow_keys->nhoff, skb->len);
diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c
index acf45ddbe924..e095fb871d91 100644
--- a/net/core/gro_cells.c
+++ b/net/core/gro_cells.c
@@ -13,22 +13,36 @@ int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
struct gro_cell *cell;
+ int res;
- if (!gcells->cells || skb_cloned(skb) || netif_elide_gro(dev))
- return netif_rx(skb);
+ rcu_read_lock();
+ if (unlikely(!(dev->flags & IFF_UP)))
+ goto drop;
+
+ if (!gcells->cells || skb_cloned(skb) || netif_elide_gro(dev)) {
+ res = netif_rx(skb);
+ goto unlock;
+ }
cell = this_cpu_ptr(gcells->cells);
if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
+drop:
atomic_long_inc(&dev->rx_dropped);
kfree_skb(skb);
- return NET_RX_DROP;
+ res = NET_RX_DROP;
+ goto unlock;
}
__skb_queue_tail(&cell->napi_skbs, skb);
if (skb_queue_len(&cell->napi_skbs) == 1)
napi_schedule(&cell->napi);
- return NET_RX_SUCCESS;
+
+ res = NET_RX_SUCCESS;
+
+unlock:
+ rcu_read_unlock();
+ return res;
}
EXPORT_SYMBOL(gro_cells_receive);
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index cf2f8897ca19..126d31ff5ee3 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -625,6 +625,8 @@ int bpf_lwt_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len, bool ingress)
/* push the encap headers and fix pointers */
skb_reset_inner_headers(skb);
+ skb_reset_inner_mac_header(skb); /* mac header is not yet set */
+ skb_set_inner_protocol(skb, skb->protocol);
skb->encapsulation = 1;
skb_push(skb, len);
if (ingress)
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 4ff661f6f989..8f8b7b6c2945 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -928,6 +928,8 @@ static int rx_queue_add_kobject(struct net_device *dev, int index)
if (error)
return error;
+ dev_hold(queue->dev);
+
if (dev->sysfs_rx_queue_group) {
error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
if (error) {
@@ -937,7 +939,6 @@ static int rx_queue_add_kobject(struct net_device *dev, int index)
}
kobject_uevent(kobj, KOBJ_ADD);
- dev_hold(queue->dev);
return error;
}
@@ -1464,6 +1465,8 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
if (error)
return error;
+ dev_hold(queue->dev);
+
#ifdef CONFIG_BQL
error = sysfs_create_group(kobj, &dql_group);
if (error) {
@@ -1473,7 +1476,6 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
#endif
kobject_uevent(kobj, KOBJ_ADD);
- dev_hold(queue->dev);
return 0;
}
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 17f36317363d..7e6dcc625701 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -304,6 +304,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
refcount_set(&net->count, 1);
refcount_set(&net->passive, 1);
+ get_random_bytes(&net->hash_mix, sizeof(u32));
net->dev_base_seq = 1;
net->user_ns = user_ns;
idr_init(&net->netns_ids);
diff --git a/net/core/ptp_classifier.c b/net/core/ptp_classifier.c
index 703cf76aa7c2..7109c168b5e0 100644
--- a/net/core/ptp_classifier.c
+++ b/net/core/ptp_classifier.c
@@ -185,9 +185,10 @@ void __init ptp_classifier_init(void)
{ 0x16, 0, 0, 0x00000000 },
{ 0x06, 0, 0, 0x00000000 },
};
- struct sock_fprog_kern ptp_prog = {
- .len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter,
- };
+ struct sock_fprog_kern ptp_prog;
+
+ ptp_prog.len = ARRAY_SIZE(ptp_filter);
+ ptp_prog.filter = ptp_filter;
BUG_ON(bpf_prog_create(&ptp_insns, &ptp_prog));
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index a51cab95ba64..220c56e93659 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -4948,7 +4948,7 @@ static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check,
{
struct if_stats_msg *ifsm;
- if (nlh->nlmsg_len < sizeof(*ifsm)) {
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) {
NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
return -EINVAL;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 2415d9cb9b89..40796b8bf820 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3801,7 +3801,7 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
unsigned int delta_truesize;
struct sk_buff *lp;
- if (unlikely(p->len + len >= 65536))
+ if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush))
return -E2BIG;
lp = NAPI_GRO_CB(p)->last;
@@ -5083,7 +5083,8 @@ EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
{
- int mac_len;
+ int mac_len, meta_len;
+ void *meta;
if (skb_cow(skb, skb_headroom(skb)) < 0) {
kfree_skb(skb);
@@ -5095,6 +5096,13 @@ static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
mac_len - VLAN_HLEN - ETH_TLEN);
}
+
+ meta_len = skb_metadata_len(skb);
+ if (meta_len) {
+ meta = skb_metadata_end(skb) - meta_len;
+ memmove(meta + VLAN_HLEN, meta, meta_len);
+ }
+
skb->mac_header += VLAN_HLEN;
return skb;
}
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index ae6f06e45737..cc94d921476c 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -554,6 +554,7 @@ static void sk_psock_destroy_deferred(struct work_struct *gc)
struct sk_psock *psock = container_of(gc, struct sk_psock, gc);
/* No sk_callback_lock since already detached. */
+ strp_stop(&psock->parser.strp);
strp_done(&psock->parser.strp);
cancel_work_sync(&psock->work);
diff --git a/net/core/sock.c b/net/core/sock.c
index 782343bb925b..067878a1e4c5 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -348,7 +348,7 @@ static int sock_get_timeout(long timeo, void *optval, bool old_timeval)
tv.tv_usec = ((timeo % HZ) * USEC_PER_SEC) / HZ;
}
- if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
+ if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
struct old_timeval32 tv32 = { tv.tv_sec, tv.tv_usec };
*(struct old_timeval32 *)optval = tv32;
return sizeof(tv32);
@@ -372,7 +372,7 @@ static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen, bool
{
struct __kernel_sock_timeval tv;
- if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
+ if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
struct old_timeval32 tv32;
if (optlen < sizeof(tv32))
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index f227f002c73d..db87d9f58019 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -738,7 +738,12 @@ static int __feat_register_sp(struct list_head *fn, u8 feat, u8 is_local,
if (dccp_feat_clone_sp_val(&fval, sp_val, sp_len))
return -ENOMEM;
- return dccp_feat_push_change(fn, feat, is_local, mandatory, &fval);
+ if (dccp_feat_push_change(fn, feat, is_local, mandatory, &fval)) {
+ kfree(fval.sp.vec);
+ return -ENOMEM;
+ }
+
+ return 0;
}
/**
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index d5740bad5b18..57d84e9b7b6f 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -436,8 +436,8 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
newnp->ipv6_mc_list = NULL;
newnp->ipv6_ac_list = NULL;
newnp->ipv6_fl_list = NULL;
- newnp->mcast_oif = inet6_iif(skb);
- newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
+ newnp->mcast_oif = inet_iif(skb);
+ newnp->mcast_hops = ip_hdr(skb)->ttl;
/*
* No need to charge this sock to the relevant IPv6 refcnt debug socks count
diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c
index ed4f6dc26365..85c22ada4744 100644
--- a/net/dsa/tag_qca.c
+++ b/net/dsa/tag_qca.c
@@ -98,8 +98,18 @@ static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev,
return skb;
}
+static int qca_tag_flow_dissect(const struct sk_buff *skb, __be16 *proto,
+ int *offset)
+{
+ *offset = QCA_HDR_LEN;
+ *proto = ((__be16 *)skb->data)[0];
+
+ return 0;
+}
+
const struct dsa_device_ops qca_netdev_ops = {
.xmit = qca_tag_xmit,
.rcv = qca_tag_rcv,
+ .flow_dissect = qca_tag_flow_dissect,
.overhead = QCA_HDR_LEN,
};
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index b8cd43c9ed5b..a97bf326b231 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -94,9 +94,8 @@ static void hsr_check_announce(struct net_device *hsr_dev,
&& (old_operstate != IF_OPER_UP)) {
/* Went up */
hsr->announce_count = 0;
- hsr->announce_timer.expires = jiffies +
- msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
- add_timer(&hsr->announce_timer);
+ mod_timer(&hsr->announce_timer,
+ jiffies + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL));
}
if ((hsr_dev->operstate != IF_OPER_UP) && (old_operstate == IF_OPER_UP))
@@ -332,6 +331,7 @@ static void hsr_announce(struct timer_list *t)
{
struct hsr_priv *hsr;
struct hsr_port *master;
+ unsigned long interval;
hsr = from_timer(hsr, t, announce_timer);
@@ -343,18 +343,16 @@ static void hsr_announce(struct timer_list *t)
hsr->protVersion);
hsr->announce_count++;
- hsr->announce_timer.expires = jiffies +
- msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
+ interval = msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
} else {
send_hsr_supervision_frame(master, HSR_TLV_LIFE_CHECK,
hsr->protVersion);
- hsr->announce_timer.expires = jiffies +
- msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
+ interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
}
if (is_admin_up(master->dev))
- add_timer(&hsr->announce_timer);
+ mod_timer(&hsr->announce_timer, jiffies + interval);
rcu_read_unlock();
}
@@ -486,7 +484,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER);
if (res)
- return res;
+ goto err_add_port;
res = register_netdevice(hsr_dev);
if (res)
@@ -506,6 +504,8 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
fail:
hsr_for_each_port(hsr, port)
hsr_del_port(port);
+err_add_port:
+ hsr_del_node(&hsr->self_node_db);
return res;
}
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index 286ceb41ac0c..9af16cb68f76 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -124,6 +124,18 @@ int hsr_create_self_node(struct list_head *self_node_db,
return 0;
}
+void hsr_del_node(struct list_head *self_node_db)
+{
+ struct hsr_node *node;
+
+ rcu_read_lock();
+ node = list_first_or_null_rcu(self_node_db, struct hsr_node, mac_list);
+ rcu_read_unlock();
+ if (node) {
+ list_del_rcu(&node->mac_list);
+ kfree(node);
+ }
+}
/* Allocate an hsr_node and add it to node_db. 'addr' is the node's AddressA;
* seq_out is used to initialize filtering of outgoing duplicate frames
diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
index 370b45998121..531fd3dfcac1 100644
--- a/net/hsr/hsr_framereg.h
+++ b/net/hsr/hsr_framereg.h
@@ -16,6 +16,7 @@
struct hsr_node;
+void hsr_del_node(struct list_head *self_node_db);
struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
u16 seq_out);
struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 10e809b296ec..fb065a8937ea 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -226,7 +226,7 @@ static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
tail[plen - 1] = proto;
}
-static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
+static int esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
{
int encap_type;
struct udphdr *uh;
@@ -234,6 +234,7 @@ static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, stru
__be16 sport, dport;
struct xfrm_encap_tmpl *encap = x->encap;
struct ip_esp_hdr *esph = esp->esph;
+ unsigned int len;
spin_lock_bh(&x->lock);
sport = encap->encap_sport;
@@ -241,11 +242,14 @@ static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, stru
encap_type = encap->encap_type;
spin_unlock_bh(&x->lock);
+ len = skb->len + esp->tailen - skb_transport_offset(skb);
+ if (len + sizeof(struct iphdr) >= IP_MAX_MTU)
+ return -EMSGSIZE;
+
uh = (struct udphdr *)esph;
uh->source = sport;
uh->dest = dport;
- uh->len = htons(skb->len + esp->tailen
- - skb_transport_offset(skb));
+ uh->len = htons(len);
uh->check = 0;
switch (encap_type) {
@@ -262,6 +266,8 @@ static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, stru
*skb_mac_header(skb) = IPPROTO_UDP;
esp->esph = esph;
+
+ return 0;
}
int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
@@ -275,8 +281,12 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
int tailen = esp->tailen;
/* this is non-NULL only with UDP Encapsulation */
- if (x->encap)
- esp_output_udp_encap(x, skb, esp);
+ if (x->encap) {
+ int err = esp_output_udp_encap(x, skb, esp);
+
+ if (err < 0)
+ return err;
+ }
if (!skb_cloned(skb)) {
if (tailen <= skb_tailroom(skb)) {
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index 8756e0e790d2..d3170a8001b2 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -52,13 +52,13 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
goto out;
if (sp->len == XFRM_MAX_DEPTH)
- goto out;
+ goto out_reset;
x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
(xfrm_address_t *)&ip_hdr(skb)->daddr,
spi, IPPROTO_ESP, AF_INET);
if (!x)
- goto out;
+ goto out_reset;
sp->xvec[sp->len++] = x;
sp->olen++;
@@ -66,7 +66,7 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
xo = xfrm_offload(skb);
if (!xo) {
xfrm_state_put(x);
- goto out;
+ goto out_reset;
}
}
@@ -82,6 +82,8 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
xfrm_input(skb, IPPROTO_ESP, spi, -2);
return ERR_PTR(-EINPROGRESS);
+out_reset:
+ secpath_reset(skb);
out:
skb_push(skb, offset);
NAPI_GRO_CB(skb)->same_flow = 0;
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 437070d1ffb1..12ce6c526d72 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -121,6 +121,7 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
struct guehdr *guehdr;
void *data;
u16 doffset = 0;
+ u8 proto_ctype;
if (!fou)
return 1;
@@ -212,13 +213,14 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
if (unlikely(guehdr->control))
return gue_control_message(skb, guehdr);
+ proto_ctype = guehdr->proto_ctype;
__skb_pull(skb, sizeof(struct udphdr) + hdrlen);
skb_reset_transport_header(skb);
if (iptunnel_pull_offloads(skb))
goto drop;
- return -guehdr->proto_ctype;
+ return -proto_ctype;
drop:
kfree_skb(skb);
@@ -1024,7 +1026,7 @@ static int gue_err(struct sk_buff *skb, u32 info)
int ret;
len = sizeof(struct udphdr) + sizeof(struct guehdr);
- if (!pskb_may_pull(skb, len))
+ if (!pskb_may_pull(skb, transport_offset + len))
return -EINVAL;
guehdr = (struct guehdr *)&udp_hdr(skb)[1];
@@ -1059,7 +1061,7 @@ static int gue_err(struct sk_buff *skb, u32 info)
optlen = guehdr->hlen << 2;
- if (!pskb_may_pull(skb, len + optlen))
+ if (!pskb_may_pull(skb, transport_offset + len + optlen))
return -EINVAL;
guehdr = (struct guehdr *)&udp_hdr(skb)[1];
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index fd219f7bd3ea..4b0526441476 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -259,7 +259,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
struct net *net = dev_net(skb->dev);
struct metadata_dst *tun_dst = NULL;
struct erspan_base_hdr *ershdr;
- struct erspan_metadata *pkt_md;
struct ip_tunnel_net *itn;
struct ip_tunnel *tunnel;
const struct iphdr *iph;
@@ -282,9 +281,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
if (unlikely(!pskb_may_pull(skb, len)))
return PACKET_REJECT;
- ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
- pkt_md = (struct erspan_metadata *)(ershdr + 1);
-
if (__iptunnel_pull_header(skb,
len,
htons(ETH_P_TEB),
@@ -292,8 +288,9 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
goto drop;
if (tunnel->collect_md) {
+ struct erspan_metadata *pkt_md, *md;
struct ip_tunnel_info *info;
- struct erspan_metadata *md;
+ unsigned char *gh;
__be64 tun_id;
__be16 flags;
@@ -306,6 +303,14 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
if (!tun_dst)
return PACKET_REJECT;
+ /* skb can be uncloned in __iptunnel_pull_header, so
+ * old pkt_md is no longer valid and we need to reset
+ * it
+ */
+ gh = skb_network_header(skb) +
+ skb_network_header_len(skb);
+ pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
+ sizeof(*ershdr));
md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
md->version = ver;
md2 = &md->u.md2;
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index ecce2dc78f17..1132d6d1796a 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -257,11 +257,10 @@ int ip_local_deliver(struct sk_buff *skb)
ip_local_deliver_finish);
}
-static inline bool ip_rcv_options(struct sk_buff *skb)
+static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
{
struct ip_options *opt;
const struct iphdr *iph;
- struct net_device *dev = skb->dev;
/* It looks as overkill, because not all
IP options require packet mangling.
@@ -297,7 +296,7 @@ static inline bool ip_rcv_options(struct sk_buff *skb)
}
}
- if (ip_options_rcv_srr(skb))
+ if (ip_options_rcv_srr(skb, dev))
goto drop;
}
@@ -353,7 +352,7 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk,
}
#endif
- if (iph->ihl > 5 && ip_rcv_options(skb))
+ if (iph->ihl > 5 && ip_rcv_options(skb, dev))
goto drop;
rt = skb_rtable(skb);
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 32a35043c9f5..3db31bb9df50 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -612,7 +612,7 @@ void ip_forward_options(struct sk_buff *skb)
}
}
-int ip_options_rcv_srr(struct sk_buff *skb)
+int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev)
{
struct ip_options *opt = &(IPCB(skb)->opt);
int srrspace, srrptr;
@@ -647,7 +647,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
orefdst = skb->_skb_refdst;
skb_dst_set(skb, NULL);
- err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev);
+ err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, dev);
rt2 = skb_rtable(skb);
if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) {
skb_dst_drop(skb);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index c80188875f39..e8bb2e85c5a4 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -519,6 +519,7 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
to->pkt_type = from->pkt_type;
to->priority = from->priority;
to->protocol = from->protocol;
+ to->skb_iif = from->skb_iif;
skb_dst_drop(to);
skb_dst_copy(to, from);
to->dev = from->dev;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 2756fb725bf0..a5d8cad18ead 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -515,9 +515,10 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
mtu = dst_mtu(&rt->dst) - dev->hard_header_len
- sizeof(struct iphdr) - tunnel_hlen;
else
- mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
+ mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
- skb_dst_update_pmtu(skb, mtu);
+ if (skb_valid_dst(skb))
+ skb_dst_update_pmtu(skb, mtu);
if (skb->protocol == htons(ETH_P_IP)) {
if (!skb_is_gso(skb) &&
@@ -530,9 +531,11 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
}
#if IS_ENABLED(CONFIG_IPV6)
else if (skb->protocol == htons(ETH_P_IPV6)) {
- struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
+ struct rt6_info *rt6;
__be32 daddr;
+ rt6 = skb_valid_dst(skb) ? (struct rt6_info *)skb_dst(skb) :
+ NULL;
daddr = md ? dst : tunnel->parms.iph.daddr;
if (rt6 && mtu < dst_mtu(skb_dst(skb)) &&
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 68a21bf75dd0..35d8346742e2 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -646,10 +646,8 @@ static int __init vti_init(void)
msg = "ipip tunnel";
err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
- if (err < 0) {
- pr_info("%s: cant't register tunnel\n",__func__);
+ if (err < 0)
goto xfrm_tunnel_failed;
- }
msg = "netlink interface";
err = rtnl_link_register(&vti_link_ops);
@@ -659,9 +657,9 @@ static int __init vti_init(void)
return err;
rtnl_link_failed:
- xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
-xfrm_tunnel_failed:
xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
+xfrm_tunnel_failed:
+ xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
xfrm_proto_comp_failed:
xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
xfrm_proto_ah_failed:
@@ -676,6 +674,7 @@ pernet_dev_failed:
static void __exit vti_fini(void)
{
rtnl_link_unregister(&vti_link_ops);
+ xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 835d50b279f5..a2a88ab07f7b 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -56,7 +56,7 @@ struct clusterip_config {
#endif
enum clusterip_hashmode hash_mode; /* which hashing mode */
u_int32_t hash_initval; /* hash initialization */
- struct rcu_head rcu; /* for call_rcu_bh */
+ struct rcu_head rcu; /* for call_rcu */
struct net *net; /* netns for pernet list */
char ifname[IFNAMSIZ]; /* device ifname */
};
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 738ff0a1a048..6fdf1c195d8e 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1183,11 +1183,39 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
return dst;
}
+static void ipv4_send_dest_unreach(struct sk_buff *skb)
+{
+ struct ip_options opt;
+ int res;
+
+ /* Recompile ip options since IPCB may not be valid anymore.
+ * Also check we have a reasonable ipv4 header.
+ */
+ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
+ ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
+ return;
+
+ memset(&opt, 0, sizeof(opt));
+ if (ip_hdr(skb)->ihl > 5) {
+ if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
+ return;
+ opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
+
+ rcu_read_lock();
+ res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
+ rcu_read_unlock();
+
+ if (res)
+ return;
+ }
+ __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
+}
+
static void ipv4_link_failure(struct sk_buff *skb)
{
struct rtable *rt;
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
+ ipv4_send_dest_unreach(skb);
rt = skb_rtable(skb);
if (rt)
@@ -1303,6 +1331,10 @@ static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
if (fnhe->fnhe_daddr == daddr) {
rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
+ /* set fnhe_daddr to 0 to ensure it won't bind with
+ * new dsts in rt_bind_exception().
+ */
+ fnhe->fnhe_daddr = 0;
fnhe_flush_routes(fnhe);
kfree_rcu(fnhe, rcu);
break;
@@ -2149,12 +2181,13 @@ int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
int our = 0;
int err = -EINVAL;
- if (in_dev)
- our = ip_check_mc_rcu(in_dev, daddr, saddr,
- ip_hdr(skb)->protocol);
+ if (!in_dev)
+ return err;
+ our = ip_check_mc_rcu(in_dev, daddr, saddr,
+ ip_hdr(skb)->protocol);
/* check l3 master if no match yet */
- if ((!in_dev || !our) && netif_is_l3_slave(dev)) {
+ if (!our && netif_is_l3_slave(dev)) {
struct in_device *l3_in_dev;
l3_in_dev = __in_dev_get_rcu(skb->dev);
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 606f868d9f3f..e531344611a0 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -216,7 +216,12 @@ struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
refcount_set(&req->rsk_refcnt, 1);
tcp_sk(child)->tsoffset = tsoff;
sock_rps_save_rxhash(child, skb);
- inet_csk_reqsk_queue_add(sk, req, child);
+ if (!inet_csk_reqsk_queue_add(sk, req, child)) {
+ bh_unlock_sock(child);
+ sock_put(child);
+ child = NULL;
+ reqsk_put(req);
+ }
} else {
reqsk_free(req);
}
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index ba0fc4b18465..eeb4041fa5f9 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -49,6 +49,7 @@ static int ip_ping_group_range_min[] = { 0, 0 };
static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
static int comp_sack_nr_max = 255;
static u32 u32_max_div_HZ = UINT_MAX / HZ;
+static int one_day_secs = 24 * 3600;
/* obsolete */
static int sysctl_tcp_low_latency __read_mostly;
@@ -1151,7 +1152,9 @@ static struct ctl_table ipv4_net_table[] = {
.data = &init_net.ipv4.sysctl_tcp_min_rtt_wlen,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one_day_secs
},
{
.procname = "tcp_autocorking",
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index ad07dd71063d..6baa6dc1b13b 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -943,6 +943,10 @@ ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
ssize_t copied;
long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
+ if (IS_ENABLED(CONFIG_DEBUG_VM) &&
+ WARN_ONCE(PageSlab(page), "page must not be a Slab one"))
+ return -EINVAL;
+
/* Wait for a connection to finish. One exception is TCP Fast Open
* (passive side) where data is allowed to be sent before a connection
* is fully established.
@@ -1933,6 +1937,11 @@ static int tcp_inq_hint(struct sock *sk)
inq = tp->rcv_nxt - tp->copied_seq;
release_sock(sk);
}
+ /* After receiving a FIN, tell the user-space to continue reading
+ * by returning a non-zero inq.
+ */
+ if (inq == 0 && sock_flag(sk, SOCK_DONE))
+ inq = 1;
return inq;
}
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index cd4814f7e962..477cb4aa456c 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -49,9 +49,8 @@
#define DCTCP_MAX_ALPHA 1024U
struct dctcp {
- u32 acked_bytes_ecn;
- u32 acked_bytes_total;
- u32 prior_snd_una;
+ u32 old_delivered;
+ u32 old_delivered_ce;
u32 prior_rcv_nxt;
u32 dctcp_alpha;
u32 next_seq;
@@ -67,19 +66,14 @@ static unsigned int dctcp_alpha_on_init __read_mostly = DCTCP_MAX_ALPHA;
module_param(dctcp_alpha_on_init, uint, 0644);
MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value");
-static unsigned int dctcp_clamp_alpha_on_loss __read_mostly;
-module_param(dctcp_clamp_alpha_on_loss, uint, 0644);
-MODULE_PARM_DESC(dctcp_clamp_alpha_on_loss,
- "parameter for clamping alpha on loss");
-
static struct tcp_congestion_ops dctcp_reno;
static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
{
ca->next_seq = tp->snd_nxt;
- ca->acked_bytes_ecn = 0;
- ca->acked_bytes_total = 0;
+ ca->old_delivered = tp->delivered;
+ ca->old_delivered_ce = tp->delivered_ce;
}
static void dctcp_init(struct sock *sk)
@@ -91,7 +85,6 @@ static void dctcp_init(struct sock *sk)
sk->sk_state == TCP_CLOSE)) {
struct dctcp *ca = inet_csk_ca(sk);
- ca->prior_snd_una = tp->snd_una;
ca->prior_rcv_nxt = tp->rcv_nxt;
ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
@@ -123,37 +116,25 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct dctcp *ca = inet_csk_ca(sk);
- u32 acked_bytes = tp->snd_una - ca->prior_snd_una;
-
- /* If ack did not advance snd_una, count dupack as MSS size.
- * If ack did update window, do not count it at all.
- */
- if (acked_bytes == 0 && !(flags & CA_ACK_WIN_UPDATE))
- acked_bytes = inet_csk(sk)->icsk_ack.rcv_mss;
- if (acked_bytes) {
- ca->acked_bytes_total += acked_bytes;
- ca->prior_snd_una = tp->snd_una;
-
- if (flags & CA_ACK_ECE)
- ca->acked_bytes_ecn += acked_bytes;
- }
/* Expired RTT */
if (!before(tp->snd_una, ca->next_seq)) {
- u64 bytes_ecn = ca->acked_bytes_ecn;
+ u32 delivered_ce = tp->delivered_ce - ca->old_delivered_ce;
u32 alpha = ca->dctcp_alpha;
/* alpha = (1 - g) * alpha + g * F */
alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g);
- if (bytes_ecn) {
+ if (delivered_ce) {
+ u32 delivered = tp->delivered - ca->old_delivered;
+
/* If dctcp_shift_g == 1, a 32bit value would overflow
- * after 8 Mbytes.
+ * after 8 M packets.
*/
- bytes_ecn <<= (10 - dctcp_shift_g);
- do_div(bytes_ecn, max(1U, ca->acked_bytes_total));
+ delivered_ce <<= (10 - dctcp_shift_g);
+ delivered_ce /= max(1U, delivered);
- alpha = min(alpha + (u32)bytes_ecn, DCTCP_MAX_ALPHA);
+ alpha = min(alpha + delivered_ce, DCTCP_MAX_ALPHA);
}
/* dctcp_alpha can be read from dctcp_get_info() without
* synchro, so we ask compiler to not use dctcp_alpha
@@ -164,21 +145,23 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags)
}
}
-static void dctcp_state(struct sock *sk, u8 new_state)
+static void dctcp_react_to_loss(struct sock *sk)
{
- if (dctcp_clamp_alpha_on_loss && new_state == TCP_CA_Loss) {
- struct dctcp *ca = inet_csk_ca(sk);
+ struct dctcp *ca = inet_csk_ca(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
- /* If this extension is enabled, we clamp dctcp_alpha to
- * max on packet loss; the motivation is that dctcp_alpha
- * is an indicator to the extend of congestion and packet
- * loss is an indicator of extreme congestion; setting
- * this in practice turned out to be beneficial, and
- * effectively assumes total congestion which reduces the
- * window by half.
- */
- ca->dctcp_alpha = DCTCP_MAX_ALPHA;
- }
+ ca->loss_cwnd = tp->snd_cwnd;
+ tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
+}
+
+static void dctcp_state(struct sock *sk, u8 new_state)
+{
+ if (new_state == TCP_CA_Recovery &&
+ new_state != inet_csk(sk)->icsk_ca_state)
+ dctcp_react_to_loss(sk);
+ /* We handle RTO in dctcp_cwnd_event to ensure that we perform only
+ * one loss-adjustment per RTT.
+ */
}
static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
@@ -190,6 +173,9 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
case CA_EVENT_ECN_NO_CE:
dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state);
break;
+ case CA_EVENT_LOSS:
+ dctcp_react_to_loss(sk);
+ break;
default:
/* Don't care for the rest. */
break;
@@ -200,6 +186,7 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
union tcp_cc_info *info)
{
const struct dctcp *ca = inet_csk_ca(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
/* Fill it also in case of VEGASINFO due to req struct limits.
* We can still correctly retrieve it later.
@@ -211,8 +198,10 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
info->dctcp.dctcp_enabled = 1;
info->dctcp.dctcp_ce_state = (u16) ca->ce_state;
info->dctcp.dctcp_alpha = ca->dctcp_alpha;
- info->dctcp.dctcp_ab_ecn = ca->acked_bytes_ecn;
- info->dctcp.dctcp_ab_tot = ca->acked_bytes_total;
+ info->dctcp.dctcp_ab_ecn = tp->mss_cache *
+ (tp->delivered_ce - ca->old_delivered_ce);
+ info->dctcp.dctcp_ab_tot = tp->mss_cache *
+ (tp->delivered - ca->old_delivered);
}
*attr = INET_DIAG_DCTCPINFO;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 4eb0c8ca3c60..731d3045b50a 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -402,11 +402,12 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
+ int room;
+
+ room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh;
/* Check #1 */
- if (tp->rcv_ssthresh < tp->window_clamp &&
- (int)tp->rcv_ssthresh < tcp_space(sk) &&
- !tcp_under_memory_pressure(sk)) {
+ if (room > 0 && !tcp_under_memory_pressure(sk)) {
int incr;
/* Check #2. Increase window, if skb with such overhead
@@ -419,8 +420,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
if (incr) {
incr = max_t(int, incr, 2 * skb->len);
- tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
- tp->window_clamp);
+ tp->rcv_ssthresh += min(room, incr);
inet_csk(sk)->icsk_ack.quick |= 1;
}
}
@@ -6498,7 +6498,13 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
af_ops->send_synack(fastopen_sk, dst, &fl, req,
&foc, TCP_SYNACK_FASTOPEN);
/* Add the child socket directly into the accept queue */
- inet_csk_reqsk_queue_add(sk, req, fastopen_sk);
+ if (!inet_csk_reqsk_queue_add(sk, req, fastopen_sk)) {
+ reqsk_fastopen_remove(fastopen_sk, req, false);
+ bh_unlock_sock(fastopen_sk);
+ sock_put(fastopen_sk);
+ reqsk_put(req);
+ goto drop;
+ }
sk->sk_data_ready(sk);
bh_unlock_sock(fastopen_sk);
sock_put(fastopen_sk);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 831d844a27ca..a2896944aa37 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1673,7 +1673,9 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
((TCP_SKB_CB(tail)->tcp_flags |
- TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_URG) ||
+ TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) ||
+ !((TCP_SKB_CB(tail)->tcp_flags &
+ TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
((TCP_SKB_CB(tail)->tcp_flags ^
TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
#ifdef CONFIG_TLS_DEVICE
@@ -1692,6 +1694,15 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
if (after(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))
TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
+ /* We have to update both TCP_SKB_CB(tail)->tcp_flags and
+ * thtail->fin, so that the fast path in tcp_rcv_established()
+ * is not entered if we append a packet with a FIN.
+ * SYN, RST, URG are not present.
+ * ACK is set on both packets.
+ * PSH : we do not really care in TCP stack,
+ * at least for 'GRO' packets.
+ */
+ thtail->fin |= th->fin;
TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
if (TCP_SKB_CB(skb)->has_rxtstamp) {
@@ -1734,15 +1745,8 @@ EXPORT_SYMBOL(tcp_add_backlog);
int tcp_filter(struct sock *sk, struct sk_buff *skb)
{
struct tcphdr *th = (struct tcphdr *)skb->data;
- unsigned int eaten = skb->len;
- int err;
- err = sk_filter_trim_cap(sk, skb, th->doff * 4);
- if (!err) {
- eaten -= skb->len;
- TCP_SKB_CB(skb)->end_seq -= eaten;
- }
- return err;
+ return sk_filter_trim_cap(sk, skb, th->doff * 4);
}
EXPORT_SYMBOL(tcp_filter);
@@ -2585,7 +2589,8 @@ static void __net_exit tcp_sk_exit(struct net *net)
{
int cpu;
- module_put(net->ipv4.tcp_congestion_control->owner);
+ if (net->ipv4.tcp_congestion_control)
+ module_put(net->ipv4.tcp_congestion_control->owner);
for_each_possible_cpu(cpu)
inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 64f9715173ac..065334b41d57 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -352,6 +352,7 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
struct sk_buff *pp = NULL;
struct udphdr *uh2;
struct sk_buff *p;
+ unsigned int ulen;
/* requires non zero csum, for symmetry with GSO */
if (!uh->check) {
@@ -359,6 +360,12 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
return NULL;
}
+ /* Do not deal with padded or malicious packets, sorry ! */
+ ulen = ntohs(uh->len);
+ if (ulen <= sizeof(*uh) || ulen != skb_gro_len(skb)) {
+ NAPI_GRO_CB(skb)->flush = 1;
+ return NULL;
+ }
/* pull encapsulating udp header */
skb_gro_pull(skb, sizeof(struct udphdr));
skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
@@ -377,13 +384,14 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
/* Terminate the flow on len mismatch or if it grow "too much".
* Under small packet flood GRO count could elsewhere grow a lot
- * leading to execessive truesize values
+ * leading to excessive truesize values.
+ * On len mismatch merge the first packet shorter than gso_size,
+ * otherwise complete the GRO packet.
*/
- if (!skb_gro_receive(p, skb) &&
+ if (ulen > ntohs(uh2->len) || skb_gro_receive(p, skb) ||
+ ulen != ntohs(uh2->len) ||
NAPI_GRO_CB(p)->count >= UDP_GRO_CNT_MAX)
pp = p;
- else if (uh->len != uh2->len)
- pp = p;
return pp;
}
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index d73a6d6652f6..2b144b92ae46 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -111,7 +111,8 @@ static void
_decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
{
const struct iphdr *iph = ip_hdr(skb);
- u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
+ int ihl = iph->ihl;
+ u8 *xprth = skb_network_header(skb) + ihl * 4;
struct flowi4 *fl4 = &fl->u.ip4;
int oif = 0;
@@ -122,6 +123,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
fl4->flowi4_mark = skb->mark;
fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
+ fl4->flowi4_proto = iph->protocol;
+ fl4->daddr = reverse ? iph->saddr : iph->daddr;
+ fl4->saddr = reverse ? iph->daddr : iph->saddr;
+ fl4->flowi4_tos = iph->tos;
+
if (!ip_is_fragment(iph)) {
switch (iph->protocol) {
case IPPROTO_UDP:
@@ -133,7 +139,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
pskb_may_pull(skb, xprth + 4 - skb->data)) {
__be16 *ports;
- xprth = skb_network_header(skb) + iph->ihl * 4;
+ xprth = skb_network_header(skb) + ihl * 4;
ports = (__be16 *)xprth;
fl4->fl4_sport = ports[!!reverse];
@@ -146,7 +152,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
pskb_may_pull(skb, xprth + 2 - skb->data)) {
u8 *icmp;
- xprth = skb_network_header(skb) + iph->ihl * 4;
+ xprth = skb_network_header(skb) + ihl * 4;
icmp = xprth;
fl4->fl4_icmp_type = icmp[0];
@@ -159,7 +165,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
pskb_may_pull(skb, xprth + 4 - skb->data)) {
__be32 *ehdr;
- xprth = skb_network_header(skb) + iph->ihl * 4;
+ xprth = skb_network_header(skb) + ihl * 4;
ehdr = (__be32 *)xprth;
fl4->fl4_ipsec_spi = ehdr[0];
@@ -171,7 +177,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
pskb_may_pull(skb, xprth + 8 - skb->data)) {
__be32 *ah_hdr;
- xprth = skb_network_header(skb) + iph->ihl * 4;
+ xprth = skb_network_header(skb) + ihl * 4;
ah_hdr = (__be32 *)xprth;
fl4->fl4_ipsec_spi = ah_hdr[1];
@@ -183,7 +189,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
pskb_may_pull(skb, xprth + 4 - skb->data)) {
__be16 *ipcomp_hdr;
- xprth = skb_network_header(skb) + iph->ihl * 4;
+ xprth = skb_network_header(skb) + ihl * 4;
ipcomp_hdr = (__be16 *)xprth;
fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
@@ -196,7 +202,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
__be16 *greflags;
__be32 *gre_hdr;
- xprth = skb_network_header(skb) + iph->ihl * 4;
+ xprth = skb_network_header(skb) + ihl * 4;
greflags = (__be16 *)xprth;
gre_hdr = (__be32 *)xprth;
@@ -213,10 +219,6 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
break;
}
}
- fl4->flowi4_proto = iph->protocol;
- fl4->daddr = reverse ? iph->saddr : iph->daddr;
- fl4->saddr = reverse ? iph->daddr : iph->saddr;
- fl4->flowi4_tos = iph->tos;
}
static void xfrm4_update_pmtu(struct dst_entry *dst, struct sock *sk,
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index d43d076c98f5..1766325423b5 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -476,7 +476,7 @@ static int ip6addrlbl_valid_dump_req(const struct nlmsghdr *nlh,
}
if (nlmsg_attrlen(nlh, sizeof(*ifal))) {
- NL_SET_ERR_MSG_MOD(extack, "Invalid data after header for address label dump requewst");
+ NL_SET_ERR_MSG_MOD(extack, "Invalid data after header for address label dump request");
return -EINVAL;
}
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index d46b4eb645c2..cb99f6fb79b7 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -74,13 +74,13 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
goto out;
if (sp->len == XFRM_MAX_DEPTH)
- goto out;
+ goto out_reset;
x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
(xfrm_address_t *)&ipv6_hdr(skb)->daddr,
spi, IPPROTO_ESP, AF_INET6);
if (!x)
- goto out;
+ goto out_reset;
sp->xvec[sp->len++] = x;
sp->olen++;
@@ -88,7 +88,7 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
xo = xfrm_offload(skb);
if (!xo) {
xfrm_state_put(x);
- goto out;
+ goto out_reset;
}
}
@@ -109,6 +109,8 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
xfrm_input(skb, IPPROTO_ESP, spi, -2);
return ERR_PTR(-EINPROGRESS);
+out_reset:
+ secpath_reset(skb);
out:
skb_push(skb, offset);
NAPI_GRO_CB(skb)->same_flow = 0;
diff --git a/net/ipv6/fou6.c b/net/ipv6/fou6.c
index 867474abe269..ec4e2ed95f36 100644
--- a/net/ipv6/fou6.c
+++ b/net/ipv6/fou6.c
@@ -94,7 +94,7 @@ static int gue6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
int ret;
len = sizeof(struct udphdr) + sizeof(struct guehdr);
- if (!pskb_may_pull(skb, len))
+ if (!pskb_may_pull(skb, transport_offset + len))
return -EINVAL;
guehdr = (struct guehdr *)&udp_hdr(skb)[1];
@@ -129,7 +129,7 @@ static int gue6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
optlen = guehdr->hlen << 2;
- if (!pskb_may_pull(skb, len + optlen))
+ if (!pskb_may_pull(skb, transport_offset + len + optlen))
return -EINVAL;
guehdr = (struct guehdr *)&udp_hdr(skb)[1];
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
index 79d2e43c05c5..5fc1f4e0c0cf 100644
--- a/net/ipv6/ila/ila_xlat.c
+++ b/net/ipv6/ila/ila_xlat.c
@@ -417,6 +417,7 @@ int ila_xlat_nl_cmd_flush(struct sk_buff *skb, struct genl_info *info)
done:
rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
return ret;
}
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 6613d8dbb0e5..91247a6fc67f 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -921,9 +921,7 @@ static void fib6_drop_pcpu_from(struct fib6_info *f6i,
if (pcpu_rt) {
struct fib6_info *from;
- from = rcu_dereference_protected(pcpu_rt->from,
- lockdep_is_held(&table->tb6_lock));
- rcu_assign_pointer(pcpu_rt->from, NULL);
+ from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
fib6_info_release(from);
}
}
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index cb54a8a3c273..be5f3d7ceb96 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -94,15 +94,21 @@ static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
return fl;
}
+static void fl_free_rcu(struct rcu_head *head)
+{
+ struct ip6_flowlabel *fl = container_of(head, struct ip6_flowlabel, rcu);
+
+ if (fl->share == IPV6_FL_S_PROCESS)
+ put_pid(fl->owner.pid);
+ kfree(fl->opt);
+ kfree(fl);
+}
+
static void fl_free(struct ip6_flowlabel *fl)
{
- if (fl) {
- if (fl->share == IPV6_FL_S_PROCESS)
- put_pid(fl->owner.pid);
- kfree(fl->opt);
- kfree_rcu(fl, rcu);
- }
+ if (fl)
+ call_rcu(&fl->rcu, fl_free_rcu);
}
static void fl_release(struct ip6_flowlabel *fl)
@@ -633,9 +639,9 @@ recheck:
if (fl1->share == IPV6_FL_S_EXCL ||
fl1->share != fl->share ||
((fl1->share == IPV6_FL_S_PROCESS) &&
- (fl1->owner.pid == fl->owner.pid)) ||
+ (fl1->owner.pid != fl->owner.pid)) ||
((fl1->share == IPV6_FL_S_USER) &&
- uid_eq(fl1->owner.uid, fl->owner.uid)))
+ !uid_eq(fl1->owner.uid, fl->owner.uid)))
goto release;
err = -ENOMEM;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index b32c95f02128..655e46b227f9 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -525,10 +525,10 @@ static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
}
static int ip6erspan_rcv(struct sk_buff *skb,
- struct tnl_ptk_info *tpi)
+ struct tnl_ptk_info *tpi,
+ int gre_hdr_len)
{
struct erspan_base_hdr *ershdr;
- struct erspan_metadata *pkt_md;
const struct ipv6hdr *ipv6h;
struct erspan_md2 *md2;
struct ip6_tnl *tunnel;
@@ -547,18 +547,16 @@ static int ip6erspan_rcv(struct sk_buff *skb,
if (unlikely(!pskb_may_pull(skb, len)))
return PACKET_REJECT;
- ershdr = (struct erspan_base_hdr *)skb->data;
- pkt_md = (struct erspan_metadata *)(ershdr + 1);
-
if (__iptunnel_pull_header(skb, len,
htons(ETH_P_TEB),
false, false) < 0)
return PACKET_REJECT;
if (tunnel->parms.collect_md) {
+ struct erspan_metadata *pkt_md, *md;
struct metadata_dst *tun_dst;
struct ip_tunnel_info *info;
- struct erspan_metadata *md;
+ unsigned char *gh;
__be64 tun_id;
__be16 flags;
@@ -571,6 +569,14 @@ static int ip6erspan_rcv(struct sk_buff *skb,
if (!tun_dst)
return PACKET_REJECT;
+ /* skb can be uncloned in __iptunnel_pull_header, so
+ * old pkt_md is no longer valid and we need to reset
+ * it
+ */
+ gh = skb_network_header(skb) +
+ skb_network_header_len(skb);
+ pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
+ sizeof(*ershdr));
info = &tun_dst->u.tun_info;
md = ip_tunnel_info_opts(info);
md->version = ver;
@@ -607,7 +613,7 @@ static int gre_rcv(struct sk_buff *skb)
if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
tpi.proto == htons(ETH_P_ERSPAN2))) {
- if (ip6erspan_rcv(skb, &tpi) == PACKET_RCVD)
+ if (ip6erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
return 0;
goto out;
}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index edbd12067170..e51f3c648b09 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -601,7 +601,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
inet6_sk(skb->sk) : NULL;
struct ipv6hdr *tmp_hdr;
struct frag_hdr *fh;
- unsigned int mtu, hlen, left, len;
+ unsigned int mtu, hlen, left, len, nexthdr_offset;
int hroom, troom;
__be32 frag_id;
int ptr, offset = 0, err = 0;
@@ -612,6 +612,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
goto fail;
hlen = err;
nexthdr = *prevhdr;
+ nexthdr_offset = prevhdr - skb_network_header(skb);
mtu = ip6_skb_dst_mtu(skb);
@@ -646,6 +647,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
(err = skb_checksum_help(skb)))
goto fail;
+ prevhdr = skb_network_header(skb) + nexthdr_offset;
hroom = LL_RESERVED_SPACE(rt->dst.dev);
if (skb_has_frag_list(skb)) {
unsigned int first_len = skb_pagelen(skb);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 0c6403cf8b52..ade1390c6348 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -627,7 +627,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
eiph->daddr, eiph->saddr, 0, 0,
IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
- if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL) {
+ if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL6) {
if (!IS_ERR(rt))
ip_rt_put(rt);
goto out;
@@ -636,7 +636,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
} else {
if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
skb2->dev) ||
- skb_dst(skb2)->dev->type != ARPHRD_TUNNEL)
+ skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
goto out;
}
diff --git a/net/ipv6/netfilter/ip6t_srh.c b/net/ipv6/netfilter/ip6t_srh.c
index 1059894a6f4c..4cb83fb69844 100644
--- a/net/ipv6/netfilter/ip6t_srh.c
+++ b/net/ipv6/netfilter/ip6t_srh.c
@@ -210,6 +210,8 @@ static bool srh1_mt6(const struct sk_buff *skb, struct xt_action_param *par)
psidoff = srhoff + sizeof(struct ipv6_sr_hdr) +
((srh->segments_left + 1) * sizeof(struct in6_addr));
psid = skb_header_pointer(skb, psidoff, sizeof(_psid), &_psid);
+ if (!psid)
+ return false;
if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_PSID,
ipv6_masked_addr_cmp(psid, &srhinfo->psid_msk,
&srhinfo->psid_addr)))
@@ -223,6 +225,8 @@ static bool srh1_mt6(const struct sk_buff *skb, struct xt_action_param *par)
nsidoff = srhoff + sizeof(struct ipv6_sr_hdr) +
((srh->segments_left - 1) * sizeof(struct in6_addr));
nsid = skb_header_pointer(skb, nsidoff, sizeof(_nsid), &_nsid);
+ if (!nsid)
+ return false;
if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_NSID,
ipv6_masked_addr_cmp(nsid, &srhinfo->nsid_msk,
&srhinfo->nsid_addr)))
@@ -233,6 +237,8 @@ static bool srh1_mt6(const struct sk_buff *skb, struct xt_action_param *par)
if (srhinfo->mt_flags & IP6T_SRH_LSID) {
lsidoff = srhoff + sizeof(struct ipv6_sr_hdr);
lsid = skb_header_pointer(skb, lsidoff, sizeof(_lsid), &_lsid);
+ if (!lsid)
+ return false;
if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_LSID,
ipv6_masked_addr_cmp(lsid, &srhinfo->lsid_msk,
&srhinfo->lsid_addr)))
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 4ef4bbdb49d4..0520aca3354b 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -379,11 +379,8 @@ static void ip6_dst_destroy(struct dst_entry *dst)
in6_dev_put(idev);
}
- rcu_read_lock();
- from = rcu_dereference(rt->from);
- rcu_assign_pointer(rt->from, NULL);
+ from = xchg((__force struct fib6_info **)&rt->from, NULL);
fib6_info_release(from);
- rcu_read_unlock();
}
static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
@@ -1040,14 +1037,20 @@ static struct rt6_info *ip6_create_rt_rcu(struct fib6_info *rt)
struct rt6_info *nrt;
if (!fib6_info_hold_safe(rt))
- return NULL;
+ goto fallback;
nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
- if (nrt)
- ip6_rt_copy_init(nrt, rt);
- else
+ if (!nrt) {
fib6_info_release(rt);
+ goto fallback;
+ }
+
+ ip6_rt_copy_init(nrt, rt);
+ return nrt;
+fallback:
+ nrt = dev_net(dev)->ipv6.ip6_null_entry;
+ dst_hold(&nrt->dst);
return nrt;
}
@@ -1096,10 +1099,6 @@ restart:
dst_hold(&rt->dst);
} else {
rt = ip6_create_rt_rcu(f6i);
- if (!rt) {
- rt = net->ipv6.ip6_null_entry;
- dst_hold(&rt->dst);
- }
}
rcu_read_unlock();
@@ -1286,9 +1285,7 @@ static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
/* purge completely the exception to allow releasing the held resources:
* some [sk] cache may keep the dst around for unlimited time
*/
- from = rcu_dereference_protected(rt6_ex->rt6i->from,
- lockdep_is_held(&rt6_exception_lock));
- rcu_assign_pointer(rt6_ex->rt6i->from, NULL);
+ from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL);
fib6_info_release(from);
dst_dev_put(&rt6_ex->rt6i->dst);
@@ -2328,6 +2325,10 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
rcu_read_lock();
from = rcu_dereference(rt6->from);
+ if (!from) {
+ rcu_read_unlock();
+ return;
+ }
nrt6 = ip6_rt_cache_alloc(from, daddr, saddr);
if (nrt6) {
rt6_do_update_pmtu(nrt6, mtu);
@@ -3391,11 +3392,8 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
rcu_read_lock();
from = rcu_dereference(rt->from);
- /* This fib6_info_hold() is safe here because we hold reference to rt
- * and rt already holds reference to fib6_info.
- */
- fib6_info_hold(from);
- rcu_read_unlock();
+ if (!from)
+ goto out;
nrt = ip6_rt_cache_alloc(from, &msg->dest, NULL);
if (!nrt)
@@ -3407,10 +3405,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
- /* No need to remove rt from the exception table if rt is
- * a cached route because rt6_insert_exception() will
- * takes care of it
- */
+ /* rt6_insert_exception() will take care of duplicated exceptions */
if (rt6_insert_exception(nrt, from)) {
dst_release_immediate(&nrt->dst);
goto out;
@@ -3423,7 +3418,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
out:
- fib6_info_release(from);
+ rcu_read_unlock();
neigh_release(neigh);
}
@@ -3662,23 +3657,34 @@ int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
{
- int type;
struct dst_entry *dst = skb_dst(skb);
+ struct net *net = dev_net(dst->dev);
+ struct inet6_dev *idev;
+ int type;
+
+ if (netif_is_l3_master(skb->dev) &&
+ dst->dev == net->loopback_dev)
+ idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
+ else
+ idev = ip6_dst_idev(dst);
+
switch (ipstats_mib_noroutes) {
case IPSTATS_MIB_INNOROUTES:
type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
if (type == IPV6_ADDR_ANY) {
- IP6_INC_STATS(dev_net(dst->dev),
- __in6_dev_get_safely(skb->dev),
- IPSTATS_MIB_INADDRERRORS);
+ IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
break;
}
/* FALLTHROUGH */
case IPSTATS_MIB_OUTNOROUTES:
- IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
- ipstats_mib_noroutes);
+ IP6_INC_STATS(net, idev, ipstats_mib_noroutes);
break;
}
+
+ /* Start over by dropping the dst for l3mdev case */
+ if (netif_is_l3_master(skb->dev))
+ skb_dst_drop(skb);
+
icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
kfree_skb(skb);
return 0;
@@ -5011,16 +5017,20 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
rcu_read_lock();
from = rcu_dereference(rt->from);
-
- if (fibmatch)
- err = rt6_fill_node(net, skb, from, NULL, NULL, NULL, iif,
- RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
- nlh->nlmsg_seq, 0);
- else
- err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
- &fl6.saddr, iif, RTM_NEWROUTE,
- NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
- 0);
+ if (from) {
+ if (fibmatch)
+ err = rt6_fill_node(net, skb, from, NULL, NULL, NULL,
+ iif, RTM_NEWROUTE,
+ NETLINK_CB(in_skb).portid,
+ nlh->nlmsg_seq, 0);
+ else
+ err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
+ &fl6.saddr, iif, RTM_NEWROUTE,
+ NETLINK_CB(in_skb).portid,
+ nlh->nlmsg_seq, 0);
+ } else {
+ err = -ENETUNREACH;
+ }
rcu_read_unlock();
if (err < 0) {
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 09e440e8dfae..b2109b74857d 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -669,6 +669,10 @@ static int ipip6_rcv(struct sk_buff *skb)
!net_eq(tunnel->net, dev_net(tunnel->dev))))
goto out;
+ /* skb can be uncloned in iptunnel_pull_header, so
+ * old iph is no longer valid
+ */
+ iph = (const struct iphdr *)skb_mac_header(skb);
err = IP_ECN_decapsulate(iph, skb);
if (unlikely(err)) {
if (log_ecn_error)
@@ -778,8 +782,9 @@ static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
pbw0 = tunnel->ip6rd.prefixlen >> 5;
pbi0 = tunnel->ip6rd.prefixlen & 0x1f;
- d = (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >>
- tunnel->ip6rd.relay_prefixlen;
+ d = tunnel->ip6rd.relay_prefixlen < 32 ?
+ (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >>
+ tunnel->ip6rd.relay_prefixlen : 0;
pbi1 = pbi0 - tunnel->ip6rd.relay_prefixlen;
if (pbi1 > 0)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 57ef69a10889..44d431849d39 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1110,11 +1110,11 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
newnp->ipv6_fl_list = NULL;
newnp->pktoptions = NULL;
newnp->opt = NULL;
- newnp->mcast_oif = tcp_v6_iif(skb);
- newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
- newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
+ newnp->mcast_oif = inet_iif(skb);
+ newnp->mcast_hops = ip_hdr(skb)->ttl;
+ newnp->rcv_flowinfo = 0;
if (np->repflow)
- newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
+ newnp->flow_label = 0;
/*
* No need to charge this sock to the relevant IPv6 refcnt debug socks count
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index b444483cdb2b..622eeaf5732b 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1047,6 +1047,8 @@ static void udp_v6_flush_pending_frames(struct sock *sk)
static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
int addr_len)
{
+ if (addr_len < offsetofend(struct sockaddr, sa_family))
+ return -EINVAL;
/* The following checks are replicated from __ip6_datagram_connect()
* and intended to prevent BPF program called below from accessing
* bytes that are out of the bound specified by user in addr_len.
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index bc65db782bfb..d9e5f6808811 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -345,7 +345,7 @@ static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
unsigned int i;
xfrm_flush_gc();
- xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
+ xfrm_state_flush(net, 0, false, true);
for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i]));
@@ -402,6 +402,10 @@ static void __exit xfrm6_tunnel_fini(void)
xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
+ /* Someone maybe has gotten the xfrm6_tunnel_spi.
+ * So need to wait it.
+ */
+ rcu_barrier();
kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
}
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index c5c5ab6c5a1c..44fdc641710d 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -2054,14 +2054,14 @@ static int __init kcm_init(void)
if (err)
goto fail;
- err = sock_register(&kcm_family_ops);
- if (err)
- goto sock_register_fail;
-
err = register_pernet_device(&kcm_net_ops);
if (err)
goto net_ops_fail;
+ err = sock_register(&kcm_family_ops);
+ if (err)
+ goto sock_register_fail;
+
err = kcm_proc_init();
if (err)
goto proc_init_fail;
@@ -2069,12 +2069,12 @@ static int __init kcm_init(void)
return 0;
proc_init_fail:
- unregister_pernet_device(&kcm_net_ops);
-
-net_ops_fail:
sock_unregister(PF_KCM);
sock_register_fail:
+ unregister_pernet_device(&kcm_net_ops);
+
+net_ops_fail:
proto_unregister(&kcm_proto);
fail:
@@ -2090,8 +2090,8 @@ fail:
static void __exit kcm_exit(void)
{
kcm_proc_exit();
- unregister_pernet_device(&kcm_net_ops);
sock_unregister(PF_KCM);
+ unregister_pernet_device(&kcm_net_ops);
proto_unregister(&kcm_proto);
destroy_workqueue(kcm_wq);
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 5651c29cb5bd..4af1e1d60b9f 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1951,8 +1951,10 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
if (rq->sadb_x_ipsecrequest_mode == 0)
return -EINVAL;
+ if (!xfrm_id_proto_valid(rq->sadb_x_ipsecrequest_proto))
+ return -EINVAL;
- t->id.proto = rq->sadb_x_ipsecrequest_proto; /* XXX check proto */
+ t->id.proto = rq->sadb_x_ipsecrequest_proto;
if ((mode = pfkey_mode_to_xfrm(rq->sadb_x_ipsecrequest_mode)) < 0)
return -EINVAL;
t->mode = mode;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index fed6becc5daf..52b5a2797c0c 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -169,8 +169,8 @@ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
rcu_read_lock_bh();
list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
- if (tunnel->tunnel_id == tunnel_id) {
- l2tp_tunnel_inc_refcount(tunnel);
+ if (tunnel->tunnel_id == tunnel_id &&
+ refcount_inc_not_zero(&tunnel->ref_count)) {
rcu_read_unlock_bh();
return tunnel;
@@ -190,8 +190,8 @@ struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
rcu_read_lock_bh();
list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
- if (++count > nth) {
- l2tp_tunnel_inc_refcount(tunnel);
+ if (++count > nth &&
+ refcount_inc_not_zero(&tunnel->ref_count)) {
rcu_read_unlock_bh();
return tunnel;
}
@@ -909,7 +909,7 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct l2tp_tunnel *tunnel;
- tunnel = l2tp_tunnel(sk);
+ tunnel = rcu_dereference_sk_user_data(sk);
if (tunnel == NULL)
goto pass_up;
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 0ae6899edac0..37a69df17cab 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -674,9 +674,6 @@ static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
if (flags & MSG_OOB)
goto out;
- if (addr_len)
- *addr_len = sizeof(*lsa);
-
if (flags & MSG_ERRQUEUE)
return ipv6_recv_error(sk, msg, len, addr_len);
@@ -706,6 +703,7 @@ static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
lsa->l2tp_conn_id = 0;
if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
lsa->l2tp_scope_id = inet6_iif(skb);
+ *addr_len = sizeof(*lsa);
}
if (np->rxopt.all)
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index b99e73a7e7e0..2017b7d780f5 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -320,14 +320,13 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
struct llc_sap *sap;
int rc = -EINVAL;
- dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
-
lock_sock(sk);
if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr)))
goto out;
rc = -EAFNOSUPPORT;
if (unlikely(addr->sllc_family != AF_LLC))
goto out;
+ dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
rc = -ENODEV;
rcu_read_lock();
if (sk->sk_bound_dev_if) {
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index cff0fb3578c9..deb3faf08337 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -841,7 +841,7 @@ void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
dir = sdata->vif.debugfs_dir;
- if (!dir)
+ if (IS_ERR_OR_NULL(dir))
return;
sprintf(buf, "netdev:%s", sdata->name);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 28d022a3eee3..ae4f0be3b393 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -1195,6 +1195,9 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local,
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif);
+ if (local->in_reconfig)
+ return;
+
if (!check_sdata_in_driver(sdata))
return;
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index e03c46ac8e4d..c62101857b9b 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -112,8 +112,9 @@ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
IEEE80211_HT_CAP_TX_STBC);
/* Allow user to configure RX STBC bits */
- if (ht_capa_mask->cap_info & IEEE80211_HT_CAP_RX_STBC)
- ht_cap->cap |= ht_capa->cap_info & IEEE80211_HT_CAP_RX_STBC;
+ if (ht_capa_mask->cap_info & cpu_to_le16(IEEE80211_HT_CAP_RX_STBC))
+ ht_cap->cap |= le16_to_cpu(ht_capa->cap_info) &
+ IEEE80211_HT_CAP_RX_STBC;
/* Allow user to decrease AMPDU factor */
if (ht_capa_mask->ampdu_params_info &
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 4a6ff1482a9f..02d2e6f11e93 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1908,6 +1908,9 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
list_del_rcu(&sdata->list);
mutex_unlock(&sdata->local->iflist_mtx);
+ if (sdata->vif.txq)
+ ieee80211_txq_purge(sdata->local, to_txq_info(sdata->vif.txq));
+
synchronize_rcu();
if (sdata->dev) {
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 4700718e010f..37e372896230 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -167,8 +167,10 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
* The driver doesn't know anything about VLAN interfaces.
* Hence, don't send GTKs for VLAN interfaces to the driver.
*/
- if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+ ret = 1;
goto out_unsupported;
+ }
}
ret = drv_set_key(key->local, SET_KEY, sdata,
@@ -213,11 +215,8 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
/* all of these we can do in software - if driver can */
if (ret == 1)
return 0;
- if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL)) {
- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- return 0;
+ if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL))
return -EINVAL;
- }
return 0;
default:
return -EINVAL;
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 95eb5064fa91..b76a2aefa9ec 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -23,7 +23,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath);
static u32 mesh_table_hash(const void *addr, u32 len, u32 seed)
{
/* Use last four bytes of hw addr as hash index */
- return jhash_1word(*(u32 *)(addr+2), seed);
+ return jhash_1word(__get_unaligned_cpu32((u8 *)addr + 2), seed);
}
static const struct rhashtable_params mesh_rht_params = {
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 7f8d93401ce0..bf0b187f994e 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1568,7 +1568,15 @@ static void sta_ps_start(struct sta_info *sta)
return;
for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
- if (txq_has_queue(sta->sta.txq[tid]))
+ struct ieee80211_txq *txq = sta->sta.txq[tid];
+ struct txq_info *txqi = to_txq_info(txq);
+
+ spin_lock(&local->active_txq_lock[txq->ac]);
+ if (!list_empty(&txqi->schedule_order))
+ list_del_init(&txqi->schedule_order);
+ spin_unlock(&local->active_txq_lock[txq->ac]);
+
+ if (txq_has_queue(txq))
set_bit(tid, &sta->txq_buffered_tids);
else
clear_bit(tid, &sta->txq_buffered_tids);
diff --git a/net/mac80211/trace_msg.h b/net/mac80211/trace_msg.h
index 366b9e6f043e..40141df09f25 100644
--- a/net/mac80211/trace_msg.h
+++ b/net/mac80211/trace_msg.h
@@ -1,4 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Portions of this file
+ * Copyright (C) 2019 Intel Corporation
+ */
+
#ifdef CONFIG_MAC80211_MESSAGE_TRACING
#if !defined(__MAC80211_MSG_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ)
@@ -11,7 +16,7 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM mac80211_msg
-#define MAX_MSG_LEN 100
+#define MAX_MSG_LEN 120
DECLARE_EVENT_CLASS(mac80211_msg_event,
TP_PROTO(struct va_format *vaf),
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 8a49a74c0a37..2e816dd67be7 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -3221,6 +3221,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
u8 max_subframes = sta->sta.max_amsdu_subframes;
int max_frags = local->hw.max_tx_fragments;
int max_amsdu_len = sta->sta.max_amsdu_len;
+ int orig_truesize;
__be16 len;
void *data;
bool ret = false;
@@ -3261,6 +3262,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
if (!head || skb_is_gso(head))
goto out;
+ orig_truesize = head->truesize;
orig_len = head->len;
if (skb->len + head->len > max_amsdu_len)
@@ -3318,6 +3320,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
*frag_tail = skb;
out_recalc:
+ fq->memory_usage += head->truesize - orig_truesize;
if (head->len != orig_len) {
flow->backlog += head->len - orig_len;
tin->backlog_bytes += head->len - orig_len;
@@ -3646,16 +3649,17 @@ EXPORT_SYMBOL(ieee80211_tx_dequeue);
struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
{
struct ieee80211_local *local = hw_to_local(hw);
+ struct ieee80211_txq *ret = NULL;
struct txq_info *txqi = NULL;
- lockdep_assert_held(&local->active_txq_lock[ac]);
+ spin_lock_bh(&local->active_txq_lock[ac]);
begin:
txqi = list_first_entry_or_null(&local->active_txqs[ac],
struct txq_info,
schedule_order);
if (!txqi)
- return NULL;
+ goto out;
if (txqi->txq.sta) {
struct sta_info *sta = container_of(txqi->txq.sta,
@@ -3672,24 +3676,30 @@ struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
if (txqi->schedule_round == local->schedule_round[ac])
- return NULL;
+ goto out;
list_del_init(&txqi->schedule_order);
txqi->schedule_round = local->schedule_round[ac];
- return &txqi->txq;
+ ret = &txqi->txq;
+
+out:
+ spin_unlock_bh(&local->active_txq_lock[ac]);
+ return ret;
}
EXPORT_SYMBOL(ieee80211_next_txq);
-void ieee80211_return_txq(struct ieee80211_hw *hw,
- struct ieee80211_txq *txq)
+void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq,
+ bool force)
{
struct ieee80211_local *local = hw_to_local(hw);
struct txq_info *txqi = to_txq_info(txq);
- lockdep_assert_held(&local->active_txq_lock[txq->ac]);
+ spin_lock_bh(&local->active_txq_lock[txq->ac]);
if (list_empty(&txqi->schedule_order) &&
- (!skb_queue_empty(&txqi->frags) || txqi->tin.backlog_packets)) {
+ (force || !skb_queue_empty(&txqi->frags) ||
+ txqi->tin.backlog_packets)) {
/* If airtime accounting is active, always enqueue STAs at the
* head of the list to ensure that they only get moved to the
* back by the airtime DRR scheduler once they have a negative
@@ -3706,20 +3716,10 @@ void ieee80211_return_txq(struct ieee80211_hw *hw,
list_add_tail(&txqi->schedule_order,
&local->active_txqs[txq->ac]);
}
-}
-EXPORT_SYMBOL(ieee80211_return_txq);
-void ieee80211_schedule_txq(struct ieee80211_hw *hw,
- struct ieee80211_txq *txq)
- __acquires(txq_lock) __releases(txq_lock)
-{
- struct ieee80211_local *local = hw_to_local(hw);
-
- spin_lock_bh(&local->active_txq_lock[txq->ac]);
- ieee80211_return_txq(hw, txq);
spin_unlock_bh(&local->active_txq_lock[txq->ac]);
}
-EXPORT_SYMBOL(ieee80211_schedule_txq);
+EXPORT_SYMBOL(__ieee80211_schedule_txq);
bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
struct ieee80211_txq *txq)
@@ -3729,7 +3729,7 @@ bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
struct sta_info *sta;
u8 ac = txq->ac;
- lockdep_assert_held(&local->active_txq_lock[ac]);
+ spin_lock_bh(&local->active_txq_lock[ac]);
if (!txqi->txq.sta)
goto out;
@@ -3759,34 +3759,27 @@ bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
sta->airtime[ac].deficit += sta->airtime_weight;
list_move_tail(&txqi->schedule_order, &local->active_txqs[ac]);
+ spin_unlock_bh(&local->active_txq_lock[ac]);
return false;
out:
if (!list_empty(&txqi->schedule_order))
list_del_init(&txqi->schedule_order);
+ spin_unlock_bh(&local->active_txq_lock[ac]);
return true;
}
EXPORT_SYMBOL(ieee80211_txq_may_transmit);
void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
- __acquires(txq_lock)
{
struct ieee80211_local *local = hw_to_local(hw);
spin_lock_bh(&local->active_txq_lock[ac]);
local->schedule_round[ac]++;
-}
-EXPORT_SYMBOL(ieee80211_txq_schedule_start);
-
-void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
- __releases(txq_lock)
-{
- struct ieee80211_local *local = hw_to_local(hw);
-
spin_unlock_bh(&local->active_txq_lock[ac]);
}
-EXPORT_SYMBOL(ieee80211_txq_schedule_end);
+EXPORT_SYMBOL(ieee80211_txq_schedule_start);
void __ieee80211_subif_start_xmit(struct sk_buff *skb,
struct net_device *dev,
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
index dda8930f20e7..f3a8557494d6 100644
--- a/net/mpls/mpls_iptunnel.c
+++ b/net/mpls/mpls_iptunnel.c
@@ -140,9 +140,15 @@ static int mpls_xmit(struct sk_buff *skb)
if (rt)
err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gateway,
skb);
- else if (rt6)
- err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt6->rt6i_gateway,
- skb);
+ else if (rt6) {
+ if (ipv6_addr_v4mapped(&rt6->rt6i_gateway)) {
+ /* 6PE (RFC 4798) */
+ err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt6->rt6i_gateway.s6_addr32[3],
+ skb);
+ } else
+ err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt6->rt6i_gateway,
+ skb);
+ }
if (err)
net_dbg_ratelimited("%s: packet transmission failed: %d\n",
__func__, err);
diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c
index 5d782445d2fc..bad17bba8ba7 100644
--- a/net/ncsi/ncsi-netlink.c
+++ b/net/ncsi/ncsi-netlink.c
@@ -251,6 +251,10 @@ static int ncsi_pkg_info_all_nl(struct sk_buff *skb,
}
attr = nla_nest_start(skb, NCSI_ATTR_PACKAGE_LIST);
+ if (!attr) {
+ rc = -EMSGSIZE;
+ goto err;
+ }
rc = ncsi_write_package_info(skb, ndp, package->id);
if (rc) {
nla_nest_cancel(skb, attr);
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index dc07fcc7938e..802db01e3075 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <net/ncsi.h>
@@ -667,7 +668,10 @@ static int ncsi_rsp_handler_oem_bcm_gma(struct ncsi_request *nr)
ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
memcpy(saddr.sa_data, &rsp->data[BCM_MAC_ADDR_OFFSET], ETH_ALEN);
/* Increase mac address by 1 for BMC's address */
- saddr.sa_data[ETH_ALEN - 1]++;
+ eth_addr_inc((u8 *)saddr.sa_data);
+ if (!is_valid_ether_addr((const u8 *)saddr.sa_data))
+ return -ENXIO;
+
ret = ops->ndo_set_mac_address(ndev, &saddr);
if (ret < 0)
netdev_warn(ndev, "NCSI: 'Writing mac address to device failed\n");
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index d43ffb09939b..6548271209a0 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -1007,6 +1007,7 @@ config NETFILTER_XT_TARGET_TEE
depends on NETFILTER_ADVANCED
depends on IPV6 || IPV6=n
depends on !NF_CONNTRACK || NF_CONNTRACK
+ depends on IP6_NF_IPTABLES || !IP6_NF_IPTABLES
select NF_DUP_IPV4
select NF_DUP_IPV6 if IP6_NF_IPTABLES
---help---
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 43bbaa32b1d6..14457551bcb4 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1678,7 +1678,7 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
if (!cp) {
int v;
- if (!sysctl_schedule_icmp(ipvs))
+ if (ipip || !sysctl_schedule_icmp(ipvs))
return NF_ACCEPT;
if (!ip_vs_try_to_schedule(ipvs, AF_INET, skb, pd, &v, &cp, &ciph))
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 82bfbeef46af..2a714527cde1 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/jhash.h>
+#include <linux/siphash.h>
#include <linux/err.h>
#include <linux/percpu.h>
#include <linux/moduleparam.h>
@@ -449,6 +450,40 @@ nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
}
EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
+/* Generate a almost-unique pseudo-id for a given conntrack.
+ *
+ * intentionally doesn't re-use any of the seeds used for hash
+ * table location, we assume id gets exposed to userspace.
+ *
+ * Following nf_conn items do not change throughout lifetime
+ * of the nf_conn after it has been committed to main hash table:
+ *
+ * 1. nf_conn address
+ * 2. nf_conn->ext address
+ * 3. nf_conn->master address (normally NULL)
+ * 4. tuple
+ * 5. the associated net namespace
+ */
+u32 nf_ct_get_id(const struct nf_conn *ct)
+{
+ static __read_mostly siphash_key_t ct_id_seed;
+ unsigned long a, b, c, d;
+
+ net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
+
+ a = (unsigned long)ct;
+ b = (unsigned long)ct->master ^ net_hash_mix(nf_ct_net(ct));
+ c = (unsigned long)ct->ext;
+ d = (unsigned long)siphash(&ct->tuplehash, sizeof(ct->tuplehash),
+ &ct_id_seed);
+#ifdef CONFIG_64BIT
+ return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
+#else
+ return siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &ct_id_seed);
+#endif
+}
+EXPORT_SYMBOL_GPL(nf_ct_get_id);
+
static void
clean_from_lists(struct nf_conn *ct)
{
@@ -982,12 +1017,9 @@ __nf_conntrack_confirm(struct sk_buff *skb)
/* set conntrack timestamp, if enabled. */
tstamp = nf_conn_tstamp_find(ct);
- if (tstamp) {
- if (skb->tstamp == 0)
- __net_timestamp(skb);
+ if (tstamp)
+ tstamp->start = ktime_get_real_ns();
- tstamp->start = ktime_to_ns(skb->tstamp);
- }
/* Since the lookup is lockless, hash insertion must be done after
* starting the timer and setting the CONFIRMED bit. The RCU barriers
* guarantee that no other CPU can find the conntrack before the above
@@ -1350,6 +1382,7 @@ __nf_conntrack_alloc(struct net *net,
/* save hash for reusing when confirming */
*(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
ct->status = 0;
+ ct->timeout = 0;
write_pnet(&ct->ct_net, net);
memset(&ct->__nfct_init_offset[0], 0,
offsetof(struct nf_conn, proto) -
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 66c596d287a5..d7f61b0547c6 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -29,6 +29,7 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
+#include <linux/siphash.h>
#include <linux/netfilter.h>
#include <net/netlink.h>
@@ -485,7 +486,9 @@ nla_put_failure:
static int ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
{
- if (nla_put_be32(skb, CTA_ID, htonl((unsigned long)ct)))
+ __be32 id = (__force __be32)nf_ct_get_id(ct);
+
+ if (nla_put_be32(skb, CTA_ID, id))
goto nla_put_failure;
return 0;
@@ -1286,8 +1289,9 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl,
}
if (cda[CTA_ID]) {
- u_int32_t id = ntohl(nla_get_be32(cda[CTA_ID]));
- if (id != (u32)(unsigned long)ct) {
+ __be32 id = nla_get_be32(cda[CTA_ID]);
+
+ if (id != (__force __be32)nf_ct_get_id(ct)) {
nf_ct_put(ct);
return -ENOENT;
}
@@ -2692,6 +2696,25 @@ nla_put_failure:
static const union nf_inet_addr any_addr;
+static __be32 nf_expect_get_id(const struct nf_conntrack_expect *exp)
+{
+ static __read_mostly siphash_key_t exp_id_seed;
+ unsigned long a, b, c, d;
+
+ net_get_random_once(&exp_id_seed, sizeof(exp_id_seed));
+
+ a = (unsigned long)exp;
+ b = (unsigned long)exp->helper;
+ c = (unsigned long)exp->master;
+ d = (unsigned long)siphash(&exp->tuple, sizeof(exp->tuple), &exp_id_seed);
+
+#ifdef CONFIG_64BIT
+ return (__force __be32)siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &exp_id_seed);
+#else
+ return (__force __be32)siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &exp_id_seed);
+#endif
+}
+
static int
ctnetlink_exp_dump_expect(struct sk_buff *skb,
const struct nf_conntrack_expect *exp)
@@ -2739,7 +2762,7 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
}
#endif
if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) ||
- nla_put_be32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)) ||
+ nla_put_be32(skb, CTA_EXPECT_ID, nf_expect_get_id(exp)) ||
nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) ||
nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class)))
goto nla_put_failure;
@@ -3044,7 +3067,8 @@ static int ctnetlink_get_expect(struct net *net, struct sock *ctnl,
if (cda[CTA_EXPECT_ID]) {
__be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
- if (ntohl(id) != (u32)(unsigned long)exp) {
+
+ if (id != nf_expect_get_id(exp)) {
nf_ct_expect_put(exp);
return -ENOENT;
}
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index b9403a266a2e..37bb530d848f 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -55,7 +55,7 @@ void nf_l4proto_log_invalid(const struct sk_buff *skb,
struct va_format vaf;
va_list args;
- if (net->ct.sysctl_log_invalid != protonum ||
+ if (net->ct.sysctl_log_invalid != protonum &&
net->ct.sysctl_log_invalid != IPPROTO_RAW)
return;
diff --git a/net/netfilter/nf_conntrack_proto_icmp.c b/net/netfilter/nf_conntrack_proto_icmp.c
index 7df477996b16..9becac953587 100644
--- a/net/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/netfilter/nf_conntrack_proto_icmp.c
@@ -103,49 +103,94 @@ int nf_conntrack_icmp_packet(struct nf_conn *ct,
return NF_ACCEPT;
}
-/* Returns conntrack if it dealt with ICMP, and filled in skb fields */
-static int
-icmp_error_message(struct nf_conn *tmpl, struct sk_buff *skb,
- const struct nf_hook_state *state)
+/* Check inner header is related to any of the existing connections */
+int nf_conntrack_inet_error(struct nf_conn *tmpl, struct sk_buff *skb,
+ unsigned int dataoff,
+ const struct nf_hook_state *state,
+ u8 l4proto, union nf_inet_addr *outer_daddr)
{
struct nf_conntrack_tuple innertuple, origtuple;
const struct nf_conntrack_tuple_hash *h;
const struct nf_conntrack_zone *zone;
enum ip_conntrack_info ctinfo;
struct nf_conntrack_zone tmp;
+ union nf_inet_addr *ct_daddr;
+ enum ip_conntrack_dir dir;
+ struct nf_conn *ct;
WARN_ON(skb_nfct(skb));
zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
/* Are they talking about one of our connections? */
- if (!nf_ct_get_tuplepr(skb,
- skb_network_offset(skb) + ip_hdrlen(skb)
- + sizeof(struct icmphdr),
- PF_INET, state->net, &origtuple)) {
- pr_debug("icmp_error_message: failed to get tuple\n");
+ if (!nf_ct_get_tuplepr(skb, dataoff,
+ state->pf, state->net, &origtuple))
return -NF_ACCEPT;
- }
/* Ordinarily, we'd expect the inverted tupleproto, but it's
been preserved inside the ICMP. */
- if (!nf_ct_invert_tuple(&innertuple, &origtuple)) {
- pr_debug("icmp_error_message: no match\n");
+ if (!nf_ct_invert_tuple(&innertuple, &origtuple))
return -NF_ACCEPT;
- }
-
- ctinfo = IP_CT_RELATED;
h = nf_conntrack_find_get(state->net, zone, &innertuple);
- if (!h) {
- pr_debug("icmp_error_message: no match\n");
+ if (!h)
+ return -NF_ACCEPT;
+
+ /* Consider: A -> T (=This machine) -> B
+ * Conntrack entry will look like this:
+ * Original: A->B
+ * Reply: B->T (SNAT case) OR A
+ *
+ * When this function runs, we got packet that looks like this:
+ * iphdr|icmphdr|inner_iphdr|l4header (tcp, udp, ..).
+ *
+ * Above nf_conntrack_find_get() makes lookup based on inner_hdr,
+ * so we should expect that destination of the found connection
+ * matches outer header destination address.
+ *
+ * In above example, we can consider these two cases:
+ * 1. Error coming in reply direction from B or M (middle box) to
+ * T (SNAT case) or A.
+ * Inner saddr will be B, dst will be T or A.
+ * The found conntrack will be reply tuple (B->T/A).
+ * 2. Error coming in original direction from A or M to B.
+ * Inner saddr will be A, inner daddr will be B.
+ * The found conntrack will be original tuple (A->B).
+ *
+ * In both cases, conntrack[dir].dst == inner.dst.
+ *
+ * A bogus packet could look like this:
+ * Inner: B->T
+ * Outer: B->X (other machine reachable by T).
+ *
+ * In this case, lookup yields connection A->B and will
+ * set packet from B->X as *RELATED*, even though no connection
+ * from X was ever seen.
+ */
+ ct = nf_ct_tuplehash_to_ctrack(h);
+ dir = NF_CT_DIRECTION(h);
+ ct_daddr = &ct->tuplehash[dir].tuple.dst.u3;
+ if (!nf_inet_addr_cmp(outer_daddr, ct_daddr)) {
+ if (state->pf == AF_INET) {
+ nf_l4proto_log_invalid(skb, state->net, state->pf,
+ l4proto,
+ "outer daddr %pI4 != inner %pI4",
+ &outer_daddr->ip, &ct_daddr->ip);
+ } else if (state->pf == AF_INET6) {
+ nf_l4proto_log_invalid(skb, state->net, state->pf,
+ l4proto,
+ "outer daddr %pI6 != inner %pI6",
+ &outer_daddr->ip6, &ct_daddr->ip6);
+ }
+ nf_ct_put(ct);
return -NF_ACCEPT;
}
- if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
+ ctinfo = IP_CT_RELATED;
+ if (dir == IP_CT_DIR_REPLY)
ctinfo += IP_CT_IS_REPLY;
/* Update skb to refer to this connection */
- nf_ct_set(skb, nf_ct_tuplehash_to_ctrack(h), ctinfo);
+ nf_ct_set(skb, ct, ctinfo);
return NF_ACCEPT;
}
@@ -162,11 +207,12 @@ int nf_conntrack_icmpv4_error(struct nf_conn *tmpl,
struct sk_buff *skb, unsigned int dataoff,
const struct nf_hook_state *state)
{
+ union nf_inet_addr outer_daddr;
const struct icmphdr *icmph;
struct icmphdr _ih;
/* Not enough header? */
- icmph = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_ih), &_ih);
+ icmph = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih);
if (icmph == NULL) {
icmp_error_log(skb, state, "short packet");
return -NF_ACCEPT;
@@ -199,7 +245,12 @@ int nf_conntrack_icmpv4_error(struct nf_conn *tmpl,
icmph->type != ICMP_REDIRECT)
return NF_ACCEPT;
- return icmp_error_message(tmpl, skb, state);
+ memset(&outer_daddr, 0, sizeof(outer_daddr));
+ outer_daddr.ip = ip_hdr(skb)->daddr;
+
+ dataoff += sizeof(*icmph);
+ return nf_conntrack_inet_error(tmpl, skb, dataoff, state,
+ IPPROTO_ICMP, &outer_daddr);
}
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
diff --git a/net/netfilter/nf_conntrack_proto_icmpv6.c b/net/netfilter/nf_conntrack_proto_icmpv6.c
index bec4a3211658..c63ee3612855 100644
--- a/net/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/netfilter/nf_conntrack_proto_icmpv6.c
@@ -123,51 +123,6 @@ int nf_conntrack_icmpv6_packet(struct nf_conn *ct,
return NF_ACCEPT;
}
-static int
-icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
- struct sk_buff *skb,
- unsigned int icmp6off)
-{
- struct nf_conntrack_tuple intuple, origtuple;
- const struct nf_conntrack_tuple_hash *h;
- enum ip_conntrack_info ctinfo;
- struct nf_conntrack_zone tmp;
-
- WARN_ON(skb_nfct(skb));
-
- /* Are they talking about one of our connections? */
- if (!nf_ct_get_tuplepr(skb,
- skb_network_offset(skb)
- + sizeof(struct ipv6hdr)
- + sizeof(struct icmp6hdr),
- PF_INET6, net, &origtuple)) {
- pr_debug("icmpv6_error: Can't get tuple\n");
- return -NF_ACCEPT;
- }
-
- /* Ordinarily, we'd expect the inverted tupleproto, but it's
- been preserved inside the ICMP. */
- if (!nf_ct_invert_tuple(&intuple, &origtuple)) {
- pr_debug("icmpv6_error: Can't invert tuple\n");
- return -NF_ACCEPT;
- }
-
- ctinfo = IP_CT_RELATED;
-
- h = nf_conntrack_find_get(net, nf_ct_zone_tmpl(tmpl, skb, &tmp),
- &intuple);
- if (!h) {
- pr_debug("icmpv6_error: no match\n");
- return -NF_ACCEPT;
- } else {
- if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
- ctinfo += IP_CT_IS_REPLY;
- }
-
- /* Update skb to refer to this connection */
- nf_ct_set(skb, nf_ct_tuplehash_to_ctrack(h), ctinfo);
- return NF_ACCEPT;
-}
static void icmpv6_error_log(const struct sk_buff *skb,
const struct nf_hook_state *state,
@@ -182,6 +137,7 @@ int nf_conntrack_icmpv6_error(struct nf_conn *tmpl,
unsigned int dataoff,
const struct nf_hook_state *state)
{
+ union nf_inet_addr outer_daddr;
const struct icmp6hdr *icmp6h;
struct icmp6hdr _ih;
int type;
@@ -210,7 +166,11 @@ int nf_conntrack_icmpv6_error(struct nf_conn *tmpl,
if (icmp6h->icmp6_type >= 128)
return NF_ACCEPT;
- return icmpv6_error_message(state->net, tmpl, skb, dataoff);
+ memcpy(&outer_daddr.ip6, &ipv6_hdr(skb)->daddr,
+ sizeof(outer_daddr.ip6));
+ dataoff += sizeof(*icmp6h);
+ return nf_conntrack_inet_error(tmpl, skb, dataoff, state,
+ IPPROTO_ICMPV6, &outer_daddr);
}
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index f067c6b50857..39fcc1ed18f3 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -20,9 +20,9 @@
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
-#include <net/route.h>
-#include <net/ip6_route.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_expect.h>
@@ -871,38 +871,33 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
} else if (sip_external_media) {
struct net_device *dev = skb_dst(skb)->dev;
struct net *net = dev_net(dev);
- struct rtable *rt;
- struct flowi4 fl4 = {};
-#if IS_ENABLED(CONFIG_IPV6)
- struct flowi6 fl6 = {};
-#endif
+ struct flowi fl;
struct dst_entry *dst = NULL;
+ memset(&fl, 0, sizeof(fl));
+
switch (nf_ct_l3num(ct)) {
case NFPROTO_IPV4:
- fl4.daddr = daddr->ip;
- rt = ip_route_output_key(net, &fl4);
- if (!IS_ERR(rt))
- dst = &rt->dst;
+ fl.u.ip4.daddr = daddr->ip;
+ nf_ip_route(net, &dst, &fl, false);
break;
-#if IS_ENABLED(CONFIG_IPV6)
case NFPROTO_IPV6:
- fl6.daddr = daddr->in6;
- dst = ip6_route_output(net, NULL, &fl6);
- if (dst->error) {
- dst_release(dst);
- dst = NULL;
- }
+ fl.u.ip6.daddr = daddr->in6;
+ nf_ip6_route(net, &dst, &fl, false);
break;
-#endif
}
/* Don't predict any conntracks when media endpoint is reachable
* through the same interface as the signalling peer.
*/
- if (dst && dst->dev == dev)
- return NF_ACCEPT;
+ if (dst) {
+ bool external_media = (dst->dev == dev);
+
+ dst_release(dst);
+ if (external_media)
+ return NF_ACCEPT;
+ }
}
/* We need to check whether the registration exists before attempting
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index af7dc6537758..000952719adf 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -415,9 +415,14 @@ static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple,
case IPPROTO_ICMPV6:
/* id is same for either direction... */
keyptr = &tuple->src.u.icmp.id;
- min = range->min_proto.icmp.id;
- range_size = ntohs(range->max_proto.icmp.id) -
- ntohs(range->min_proto.icmp.id) + 1;
+ if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
+ min = 0;
+ range_size = 65536;
+ } else {
+ min = ntohs(range->min_proto.icmp.id);
+ range_size = ntohs(range->max_proto.icmp.id) -
+ ntohs(range->min_proto.icmp.id) + 1;
+ }
goto find_free_id;
#if IS_ENABLED(CONFIG_NF_CT_PROTO_GRE)
case IPPROTO_GRE:
diff --git a/net/netfilter/nf_nat_masquerade.c b/net/netfilter/nf_nat_masquerade.c
index 86fa4dcc63c5..d85c4d902e7b 100644
--- a/net/netfilter/nf_nat_masquerade.c
+++ b/net/netfilter/nf_nat_masquerade.c
@@ -11,7 +11,8 @@
#include <net/netfilter/ipv6/nf_nat_masquerade.h>
static DEFINE_MUTEX(masq_mutex);
-static unsigned int masq_refcnt __read_mostly;
+static unsigned int masq_refcnt4 __read_mostly;
+static unsigned int masq_refcnt6 __read_mostly;
unsigned int
nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
@@ -141,8 +142,13 @@ int nf_nat_masquerade_ipv4_register_notifier(void)
int ret = 0;
mutex_lock(&masq_mutex);
+ if (WARN_ON_ONCE(masq_refcnt4 == UINT_MAX)) {
+ ret = -EOVERFLOW;
+ goto out_unlock;
+ }
+
/* check if the notifier was already set */
- if (++masq_refcnt > 1)
+ if (++masq_refcnt4 > 1)
goto out_unlock;
/* Register for device down reports */
@@ -160,7 +166,7 @@ int nf_nat_masquerade_ipv4_register_notifier(void)
err_unregister:
unregister_netdevice_notifier(&masq_dev_notifier);
err_dec:
- masq_refcnt--;
+ masq_refcnt4--;
out_unlock:
mutex_unlock(&masq_mutex);
return ret;
@@ -171,7 +177,7 @@ void nf_nat_masquerade_ipv4_unregister_notifier(void)
{
mutex_lock(&masq_mutex);
/* check if the notifier still has clients */
- if (--masq_refcnt > 0)
+ if (--masq_refcnt4 > 0)
goto out_unlock;
unregister_netdevice_notifier(&masq_dev_notifier);
@@ -321,25 +327,23 @@ int nf_nat_masquerade_ipv6_register_notifier(void)
int ret = 0;
mutex_lock(&masq_mutex);
- /* check if the notifier is already set */
- if (++masq_refcnt > 1)
+ if (WARN_ON_ONCE(masq_refcnt6 == UINT_MAX)) {
+ ret = -EOVERFLOW;
goto out_unlock;
+ }
- ret = register_netdevice_notifier(&masq_dev_notifier);
- if (ret)
- goto err_dec;
+ /* check if the notifier is already set */
+ if (++masq_refcnt6 > 1)
+ goto out_unlock;
ret = register_inet6addr_notifier(&masq_inet6_notifier);
if (ret)
- goto err_unregister;
+ goto err_dec;
mutex_unlock(&masq_mutex);
return ret;
-
-err_unregister:
- unregister_netdevice_notifier(&masq_dev_notifier);
err_dec:
- masq_refcnt--;
+ masq_refcnt6--;
out_unlock:
mutex_unlock(&masq_mutex);
return ret;
@@ -350,11 +354,10 @@ void nf_nat_masquerade_ipv6_unregister_notifier(void)
{
mutex_lock(&masq_mutex);
/* check if the notifier still has clients */
- if (--masq_refcnt > 0)
+ if (--masq_refcnt6 > 0)
goto out_unlock;
unregister_inet6addr_notifier(&masq_inet6_notifier);
- unregister_netdevice_notifier(&masq_dev_notifier);
out_unlock:
mutex_unlock(&masq_mutex);
}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index faf6bd10a19f..1606eaa5ae0d 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -142,7 +142,7 @@ static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set)
list_for_each_entry_reverse(trans, &net->nft.commit_list, list) {
if (trans->msg_type == NFT_MSG_NEWSET &&
nft_trans_set(trans) == set) {
- nft_trans_set_bound(trans) = true;
+ set->bound = true;
break;
}
}
@@ -1545,7 +1545,7 @@ static int nft_chain_parse_hook(struct net *net,
if (IS_ERR(type))
return PTR_ERR(type);
}
- if (!(type->hook_mask & (1 << hook->num)))
+ if (hook->num > NF_MAX_HOOKS || !(type->hook_mask & (1 << hook->num)))
return -EOPNOTSUPP;
if (type->type == NFT_CHAIN_T_NAT &&
@@ -2162,9 +2162,11 @@ err1:
static void nf_tables_expr_destroy(const struct nft_ctx *ctx,
struct nft_expr *expr)
{
+ const struct nft_expr_type *type = expr->ops->type;
+
if (expr->ops->destroy)
expr->ops->destroy(ctx, expr);
- module_put(expr->ops->type->owner);
+ module_put(type->owner);
}
struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
@@ -2804,8 +2806,11 @@ err2:
nf_tables_rule_release(&ctx, rule);
err1:
for (i = 0; i < n; i++) {
- if (info[i].ops != NULL)
+ if (info[i].ops) {
module_put(info[i].ops->type->owner);
+ if (info[i].ops->type->release_ops)
+ info[i].ops->type->release_ops(info[i].ops);
+ }
}
kvfree(info);
return err;
@@ -3672,6 +3677,9 @@ err1:
static void nft_set_destroy(struct nft_set *set)
{
+ if (WARN_ON(set->use > 0))
+ return;
+
set->ops->destroy(set);
module_put(to_set_type(set->ops)->owner);
kfree(set->name);
@@ -3712,7 +3720,7 @@ static int nf_tables_delset(struct net *net, struct sock *nlsk,
NL_SET_BAD_ATTR(extack, attr);
return PTR_ERR(set);
}
- if (!list_empty(&set->bindings) ||
+ if (set->use ||
(nlh->nlmsg_flags & NLM_F_NONREC && atomic_read(&set->nelems) > 0)) {
NL_SET_BAD_ATTR(extack, attr);
return -EBUSY;
@@ -3742,6 +3750,9 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_binding *i;
struct nft_set_iter iter;
+ if (set->use == UINT_MAX)
+ return -EOVERFLOW;
+
if (!list_empty(&set->bindings) && nft_set_is_anonymous(set))
return -EBUSY;
@@ -3769,6 +3780,7 @@ bind:
binding->chain = ctx->chain;
list_add_tail_rcu(&binding->list, &set->bindings);
nft_set_trans_bind(ctx, set);
+ set->use++;
return 0;
}
@@ -3788,6 +3800,25 @@ void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
}
EXPORT_SYMBOL_GPL(nf_tables_unbind_set);
+void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_set_binding *binding,
+ enum nft_trans_phase phase)
+{
+ switch (phase) {
+ case NFT_TRANS_PREPARE:
+ set->use--;
+ return;
+ case NFT_TRANS_ABORT:
+ case NFT_TRANS_RELEASE:
+ set->use--;
+ /* fall through */
+ default:
+ nf_tables_unbind_set(ctx, set, binding,
+ phase == NFT_TRANS_COMMIT);
+ }
+}
+EXPORT_SYMBOL_GPL(nf_tables_deactivate_set);
+
void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set)
{
if (list_empty(&set->bindings) && nft_set_is_anonymous(set))
@@ -6536,6 +6567,11 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
struct nft_chain *chain;
struct nft_table *table;
+ if (list_empty(&net->nft.commit_list)) {
+ mutex_unlock(&net->nft.commit_mutex);
+ return 0;
+ }
+
/* 0. Validate ruleset, otherwise roll back for error reporting. */
if (nf_tables_validate(net) < 0)
return -EAGAIN;
@@ -6709,8 +6745,7 @@ static void nf_tables_abort_release(struct nft_trans *trans)
nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
break;
case NFT_MSG_NEWSET:
- if (!nft_trans_set_bound(trans))
- nft_set_destroy(nft_trans_set(trans));
+ nft_set_destroy(nft_trans_set(trans));
break;
case NFT_MSG_NEWSETELEM:
nft_set_elem_destroy(nft_trans_elem_set(trans),
@@ -6783,8 +6818,11 @@ static int __nf_tables_abort(struct net *net)
break;
case NFT_MSG_NEWSET:
trans->ctx.table->use--;
- if (!nft_trans_set_bound(trans))
- list_del_rcu(&nft_trans_set(trans)->list);
+ if (nft_trans_set(trans)->bound) {
+ nft_trans_destroy(trans);
+ break;
+ }
+ list_del_rcu(&nft_trans_set(trans)->list);
break;
case NFT_MSG_DELSET:
trans->ctx.table->use++;
@@ -6792,8 +6830,11 @@ static int __nf_tables_abort(struct net *net)
nft_trans_destroy(trans);
break;
case NFT_MSG_NEWSETELEM:
+ if (nft_trans_elem_set(trans)->bound) {
+ nft_trans_destroy(trans);
+ break;
+ }
te = (struct nft_trans_elem *)trans->data;
-
te->set->ops->remove(net, te->set, &te->elem);
atomic_dec(&te->set->nelems);
break;
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index b1f9c5303f02..0b3347570265 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -540,7 +540,7 @@ __build_packet_message(struct nfnl_log_net *log,
goto nla_put_failure;
}
- if (skb->tstamp) {
+ if (hooknum <= NF_INET_FORWARD && skb->tstamp) {
struct nfulnl_msg_packet_timestamp ts;
struct timespec64 kts = ktime_to_timespec64(skb->tstamp);
ts.sec = cpu_to_be64(kts.tv_sec);
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 0dcc3592d053..e057b2961d31 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -582,7 +582,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
if (nfqnl_put_bridge(entry, skb) < 0)
goto nla_put_failure;
- if (entskb->tstamp) {
+ if (entry->state.hook <= NF_INET_FORWARD && entskb->tstamp) {
struct nfqnl_msg_packet_timestamp ts;
struct timespec64 kts = ktime_to_timespec64(entskb->tstamp);
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index a8a74a16f9c4..e461007558e8 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -240,11 +240,15 @@ static void nft_dynset_deactivate(const struct nft_ctx *ctx,
{
struct nft_dynset *priv = nft_expr_priv(expr);
- if (phase == NFT_TRANS_PREPARE)
- return;
+ nf_tables_deactivate_set(ctx, priv->set, &priv->binding, phase);
+}
+
+static void nft_dynset_activate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr)
+{
+ struct nft_dynset *priv = nft_expr_priv(expr);
- nf_tables_unbind_set(ctx, priv->set, &priv->binding,
- phase == NFT_TRANS_COMMIT);
+ priv->set->use++;
}
static void nft_dynset_destroy(const struct nft_ctx *ctx,
@@ -292,6 +296,7 @@ static const struct nft_expr_ops nft_dynset_ops = {
.eval = nft_dynset_eval,
.init = nft_dynset_init,
.destroy = nft_dynset_destroy,
+ .activate = nft_dynset_activate,
.deactivate = nft_dynset_deactivate,
.dump = nft_dynset_dump,
};
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index 14496da5141d..161c3451a747 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -127,11 +127,15 @@ static void nft_lookup_deactivate(const struct nft_ctx *ctx,
{
struct nft_lookup *priv = nft_expr_priv(expr);
- if (phase == NFT_TRANS_PREPARE)
- return;
+ nf_tables_deactivate_set(ctx, priv->set, &priv->binding, phase);
+}
+
+static void nft_lookup_activate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr)
+{
+ struct nft_lookup *priv = nft_expr_priv(expr);
- nf_tables_unbind_set(ctx, priv->set, &priv->binding,
- phase == NFT_TRANS_COMMIT);
+ priv->set->use++;
}
static void nft_lookup_destroy(const struct nft_ctx *ctx,
@@ -222,6 +226,7 @@ static const struct nft_expr_ops nft_lookup_ops = {
.size = NFT_EXPR_SIZE(sizeof(struct nft_lookup)),
.eval = nft_lookup_eval,
.init = nft_lookup_init,
+ .activate = nft_lookup_activate,
.deactivate = nft_lookup_deactivate,
.destroy = nft_lookup_destroy,
.dump = nft_lookup_dump,
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
index 79ef074c18ca..8dfa798ea683 100644
--- a/net/netfilter/nft_objref.c
+++ b/net/netfilter/nft_objref.c
@@ -65,21 +65,34 @@ nla_put_failure:
return -1;
}
-static void nft_objref_destroy(const struct nft_ctx *ctx,
- const struct nft_expr *expr)
+static void nft_objref_deactivate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ enum nft_trans_phase phase)
{
struct nft_object *obj = nft_objref_priv(expr);
+ if (phase == NFT_TRANS_COMMIT)
+ return;
+
obj->use--;
}
+static void nft_objref_activate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr)
+{
+ struct nft_object *obj = nft_objref_priv(expr);
+
+ obj->use++;
+}
+
static struct nft_expr_type nft_objref_type;
static const struct nft_expr_ops nft_objref_ops = {
.type = &nft_objref_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_object *)),
.eval = nft_objref_eval,
.init = nft_objref_init,
- .destroy = nft_objref_destroy,
+ .activate = nft_objref_activate,
+ .deactivate = nft_objref_deactivate,
.dump = nft_objref_dump,
};
@@ -162,11 +175,15 @@ static void nft_objref_map_deactivate(const struct nft_ctx *ctx,
{
struct nft_objref_map *priv = nft_expr_priv(expr);
- if (phase == NFT_TRANS_PREPARE)
- return;
+ nf_tables_deactivate_set(ctx, priv->set, &priv->binding, phase);
+}
+
+static void nft_objref_map_activate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr)
+{
+ struct nft_objref_map *priv = nft_expr_priv(expr);
- nf_tables_unbind_set(ctx, priv->set, &priv->binding,
- phase == NFT_TRANS_COMMIT);
+ priv->set->use++;
}
static void nft_objref_map_destroy(const struct nft_ctx *ctx,
@@ -183,6 +200,7 @@ static const struct nft_expr_ops nft_objref_map_ops = {
.size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)),
.eval = nft_objref_map_eval,
.init = nft_objref_map_init,
+ .activate = nft_objref_map_activate,
.deactivate = nft_objref_map_deactivate,
.destroy = nft_objref_map_destroy,
.dump = nft_objref_map_dump,
diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c
index f8092926f704..a340cd8a751b 100644
--- a/net/netfilter/nft_redir.c
+++ b/net/netfilter/nft_redir.c
@@ -233,5 +233,5 @@ module_exit(nft_redir_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>");
-MODULE_ALIAS_NFT_AF_EXPR(AF_INET4, "redir");
+MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "redir");
MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "redir");
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index fa61208371f8..321a0036fdf5 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -308,10 +308,6 @@ static void *nft_rbtree_deactivate(const struct net *net,
else if (d > 0)
parent = parent->rb_right;
else {
- if (!nft_set_elem_active(&rbe->ext, genmask)) {
- parent = parent->rb_left;
- continue;
- }
if (nft_rbtree_interval_end(rbe) &&
!nft_rbtree_interval_end(this)) {
parent = parent->rb_left;
@@ -320,6 +316,9 @@ static void *nft_rbtree_deactivate(const struct net *net,
nft_rbtree_interval_end(this)) {
parent = parent->rb_right;
continue;
+ } else if (!nft_set_elem_active(&rbe->ext, genmask)) {
+ parent = parent->rb_left;
+ continue;
}
nft_rbtree_flush(net, set, rbe);
return rbe;
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c
index c13bcd0ab491..8dbb4d48f2ed 100644
--- a/net/netfilter/xt_time.c
+++ b/net/netfilter/xt_time.c
@@ -163,19 +163,24 @@ time_mt(const struct sk_buff *skb, struct xt_action_param *par)
s64 stamp;
/*
- * We cannot use get_seconds() instead of __net_timestamp() here.
+ * We need real time here, but we can neither use skb->tstamp
+ * nor __net_timestamp().
+ *
+ * skb->tstamp and skb->skb_mstamp_ns overlap, however, they
+ * use different clock types (real vs monotonic).
+ *
* Suppose you have two rules:
- * 1. match before 13:00
- * 2. match after 13:00
+ * 1. match before 13:00
+ * 2. match after 13:00
+ *
* If you match against processing time (get_seconds) it
* may happen that the same packet matches both rules if
- * it arrived at the right moment before 13:00.
+ * it arrived at the right moment before 13:00, so it would be
+ * better to check skb->tstamp and set it via __net_timestamp()
+ * if needed. This however breaks outgoing packets tx timestamp,
+ * and causes them to get delayed forever by fq packet scheduler.
*/
- if (skb->tstamp == 0)
- __net_timestamp((struct sk_buff *)skb);
-
- stamp = ktime_to_ns(skb->tstamp);
- stamp = div_s64(stamp, NSEC_PER_SEC);
+ stamp = get_seconds();
if (info->flags & XT_TIME_LOCAL_TZ)
/* Adjust for local timezone */
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index f28e937320a3..216ab915dd54 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -988,7 +988,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
struct netlink_sock *nlk = nlk_sk(sk);
struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
int err = 0;
- unsigned long groups = nladdr->nl_groups;
+ unsigned long groups;
bool bound;
if (addr_len < sizeof(struct sockaddr_nl))
@@ -996,6 +996,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
if (nladdr->nl_family != AF_NETLINK)
return -EINVAL;
+ groups = nladdr->nl_groups;
/* Only superuser is allowed to listen multicasts */
if (groups) {
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 25eeb6d2a75a..cb69d35c8e6a 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -362,11 +362,11 @@ int genl_register_family(struct genl_family *family)
} else
family->attrbuf = NULL;
- family->id = idr_alloc(&genl_fam_idr, family,
- start, end + 1, GFP_KERNEL);
+ family->id = idr_alloc_cyclic(&genl_fam_idr, family,
+ start, end + 1, GFP_KERNEL);
if (family->id < 0) {
err = family->id;
- goto errout_locked;
+ goto errout_free;
}
err = genl_validate_assign_mc_groups(family);
@@ -385,6 +385,7 @@ int genl_register_family(struct genl_family *family)
errout_remove:
idr_remove(&genl_fam_idr, family->id);
+errout_free:
kfree(family->attrbuf);
errout_locked:
genl_unlock_all();
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 1d3144d19903..71ffd1a6dc7c 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1392,18 +1392,22 @@ static int __init nr_proto_init(void)
int i;
int rc = proto_register(&nr_proto, 0);
- if (rc != 0)
- goto out;
+ if (rc)
+ return rc;
if (nr_ndevs > 0x7fffffff/sizeof(struct net_device *)) {
- printk(KERN_ERR "NET/ROM: nr_proto_init - nr_ndevs parameter to large\n");
- return -1;
+ pr_err("NET/ROM: %s - nr_ndevs parameter too large\n",
+ __func__);
+ rc = -EINVAL;
+ goto unregister_proto;
}
dev_nr = kcalloc(nr_ndevs, sizeof(struct net_device *), GFP_KERNEL);
- if (dev_nr == NULL) {
- printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n");
- return -1;
+ if (!dev_nr) {
+ pr_err("NET/ROM: %s - unable to allocate device array\n",
+ __func__);
+ rc = -ENOMEM;
+ goto unregister_proto;
}
for (i = 0; i < nr_ndevs; i++) {
@@ -1413,13 +1417,13 @@ static int __init nr_proto_init(void)
sprintf(name, "nr%d", i);
dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, nr_setup);
if (!dev) {
- printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device structure\n");
+ rc = -ENOMEM;
goto fail;
}
dev->base_addr = i;
- if (register_netdev(dev)) {
- printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register network device\n");
+ rc = register_netdev(dev);
+ if (rc) {
free_netdev(dev);
goto fail;
}
@@ -1427,36 +1431,64 @@ static int __init nr_proto_init(void)
dev_nr[i] = dev;
}
- if (sock_register(&nr_family_ops)) {
- printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register socket family\n");
+ rc = sock_register(&nr_family_ops);
+ if (rc)
goto fail;
- }
- register_netdevice_notifier(&nr_dev_notifier);
+ rc = register_netdevice_notifier(&nr_dev_notifier);
+ if (rc)
+ goto out_sock;
ax25_register_pid(&nr_pid);
ax25_linkfail_register(&nr_linkfail_notifier);
#ifdef CONFIG_SYSCTL
- nr_register_sysctl();
+ rc = nr_register_sysctl();
+ if (rc)
+ goto out_sysctl;
#endif
nr_loopback_init();
- proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops);
- proc_create_seq("nr_neigh", 0444, init_net.proc_net, &nr_neigh_seqops);
- proc_create_seq("nr_nodes", 0444, init_net.proc_net, &nr_node_seqops);
-out:
- return rc;
+ rc = -ENOMEM;
+ if (!proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops))
+ goto proc_remove1;
+ if (!proc_create_seq("nr_neigh", 0444, init_net.proc_net,
+ &nr_neigh_seqops))
+ goto proc_remove2;
+ if (!proc_create_seq("nr_nodes", 0444, init_net.proc_net,
+ &nr_node_seqops))
+ goto proc_remove3;
+
+ return 0;
+
+proc_remove3:
+ remove_proc_entry("nr_neigh", init_net.proc_net);
+proc_remove2:
+ remove_proc_entry("nr", init_net.proc_net);
+proc_remove1:
+
+ nr_loopback_clear();
+ nr_rt_free();
+
+#ifdef CONFIG_SYSCTL
+ nr_unregister_sysctl();
+out_sysctl:
+#endif
+ ax25_linkfail_release(&nr_linkfail_notifier);
+ ax25_protocol_release(AX25_P_NETROM);
+ unregister_netdevice_notifier(&nr_dev_notifier);
+out_sock:
+ sock_unregister(PF_NETROM);
fail:
while (--i >= 0) {
unregister_netdev(dev_nr[i]);
free_netdev(dev_nr[i]);
}
kfree(dev_nr);
+unregister_proto:
proto_unregister(&nr_proto);
- rc = -1;
- goto out;
+ return rc;
}
module_init(nr_proto_init);
diff --git a/net/netrom/nr_loopback.c b/net/netrom/nr_loopback.c
index 215ad22a9647..93d13f019981 100644
--- a/net/netrom/nr_loopback.c
+++ b/net/netrom/nr_loopback.c
@@ -70,7 +70,7 @@ static void nr_loopback_timer(struct timer_list *unused)
}
}
-void __exit nr_loopback_clear(void)
+void nr_loopback_clear(void)
{
del_timer_sync(&loopback_timer);
skb_queue_purge(&loopback_queue);
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index 6485f593e2f0..b76aa668a94b 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -953,7 +953,7 @@ const struct seq_operations nr_neigh_seqops = {
/*
* Free all memory associated with the nodes and routes lists.
*/
-void __exit nr_rt_free(void)
+void nr_rt_free(void)
{
struct nr_neigh *s = NULL;
struct nr_node *t = NULL;
diff --git a/net/netrom/sysctl_net_netrom.c b/net/netrom/sysctl_net_netrom.c
index ba1c368b3f18..771011b84270 100644
--- a/net/netrom/sysctl_net_netrom.c
+++ b/net/netrom/sysctl_net_netrom.c
@@ -146,9 +146,12 @@ static struct ctl_table nr_table[] = {
{ }
};
-void __init nr_register_sysctl(void)
+int __init nr_register_sysctl(void)
{
nr_table_header = register_net_sysctl(&init_net, "net/netrom", nr_table);
+ if (!nr_table_header)
+ return -ENOMEM;
+ return 0;
}
void nr_unregister_sysctl(void)
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index ae296273ce3d..17dcd0b5eb32 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -726,6 +726,10 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
llcp_sock->service_name = kmemdup(addr->service_name,
llcp_sock->service_name_len,
GFP_KERNEL);
+ if (!llcp_sock->service_name) {
+ ret = -ENOMEM;
+ goto sock_llcp_release;
+ }
nfc_llcp_sock_link(&local->connecting_sockets, sk);
@@ -745,10 +749,11 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
return ret;
sock_unlink:
- nfc_llcp_put_ssap(local, llcp_sock->ssap);
-
nfc_llcp_sock_unlink(&local->connecting_sockets, sk);
+sock_llcp_release:
+ nfc_llcp_put_ssap(local, llcp_sock->ssap);
+
put_dev:
nfc_put_device(dev);
diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
index ddfc52ac1f9b..c0d323b58e73 100644
--- a/net/nfc/nci/hci.c
+++ b/net/nfc/nci/hci.c
@@ -312,6 +312,10 @@ static void nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe,
create_info = (struct nci_hci_create_pipe_resp *)skb->data;
dest_gate = create_info->dest_gate;
new_pipe = create_info->pipe;
+ if (new_pipe >= NCI_HCI_MAX_PIPES) {
+ status = NCI_HCI_ANY_E_NOK;
+ goto exit;
+ }
/* Save the new created pipe and bind with local gate,
* the description for skb->data[3] is destination gate id
@@ -336,6 +340,10 @@ static void nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe,
goto exit;
}
delete_info = (struct nci_hci_delete_pipe_noti *)skb->data;
+ if (delete_info->pipe >= NCI_HCI_MAX_PIPES) {
+ status = NCI_HCI_ANY_E_NOK;
+ goto exit;
+ }
ndev->hci_dev->pipes[delete_info->pipe].gate =
NCI_HCI_INVALID_GATE;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 6679e96ab1dc..9dd158ab51b3 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -448,6 +448,10 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
0, upcall_info->cmd);
+ if (!upcall) {
+ err = -EINVAL;
+ goto out;
+ }
upcall->dp_ifindex = dp_ifindex;
err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb);
@@ -460,6 +464,10 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
if (upcall_info->egress_tun_info) {
nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
+ if (!nla) {
+ err = -EMSGSIZE;
+ goto out;
+ }
err = ovs_nla_put_tunnel_info(user_skb,
upcall_info->egress_tun_info);
BUG_ON(err);
@@ -468,6 +476,10 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
if (upcall_info->actions_len) {
nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_ACTIONS);
+ if (!nla) {
+ err = -EMSGSIZE;
+ goto out;
+ }
err = ovs_nla_put_actions(upcall_info->actions,
upcall_info->actions_len,
user_skb);
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index ba01fc4270bd..5b8e5bd7457b 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -30,7 +30,6 @@
#include <linux/in6.h>
#include <linux/jiffies.h>
#include <linux/time.h>
-#include <linux/flex_array.h>
#include <linux/cpumask.h>
#include <net/inet_ecn.h>
#include <net/ip_tunnels.h>
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 691da853bef5..4bdf5e3ac208 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -2306,14 +2306,14 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
struct sw_flow_actions *acts;
int new_acts_size;
- int req_size = NLA_ALIGN(attr_len);
+ size_t req_size = NLA_ALIGN(attr_len);
int next_offset = offsetof(struct sw_flow_actions, actions) +
(*sfa)->actions_len;
if (req_size <= (ksize(*sfa) - next_offset))
goto out;
- new_acts_size = ksize(*sfa) * 2;
+ new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h
index 6657606b2b47..66f9553758a5 100644
--- a/net/openvswitch/flow_netlink.h
+++ b/net/openvswitch/flow_netlink.h
@@ -30,7 +30,6 @@
#include <linux/in6.h>
#include <linux/jiffies.h>
#include <linux/time.h>
-#include <linux/flex_array.h>
#include <net/inet_ecn.h>
#include <net/ip_tunnels.h>
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 80ea2a71852e..cfb0098c9a01 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -111,29 +111,6 @@ int ovs_flow_tbl_count(const struct flow_table *table)
return table->count;
}
-static struct flex_array *alloc_buckets(unsigned int n_buckets)
-{
- struct flex_array *buckets;
- int i, err;
-
- buckets = flex_array_alloc(sizeof(struct hlist_head),
- n_buckets, GFP_KERNEL);
- if (!buckets)
- return NULL;
-
- err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
- if (err) {
- flex_array_free(buckets);
- return NULL;
- }
-
- for (i = 0; i < n_buckets; i++)
- INIT_HLIST_HEAD((struct hlist_head *)
- flex_array_get(buckets, i));
-
- return buckets;
-}
-
static void flow_free(struct sw_flow *flow)
{
int cpu;
@@ -168,31 +145,30 @@ void ovs_flow_free(struct sw_flow *flow, bool deferred)
flow_free(flow);
}
-static void free_buckets(struct flex_array *buckets)
-{
- flex_array_free(buckets);
-}
-
-
static void __table_instance_destroy(struct table_instance *ti)
{
- free_buckets(ti->buckets);
+ kvfree(ti->buckets);
kfree(ti);
}
static struct table_instance *table_instance_alloc(int new_size)
{
struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
+ int i;
if (!ti)
return NULL;
- ti->buckets = alloc_buckets(new_size);
-
+ ti->buckets = kvmalloc_array(new_size, sizeof(struct hlist_head),
+ GFP_KERNEL);
if (!ti->buckets) {
kfree(ti);
return NULL;
}
+
+ for (i = 0; i < new_size; i++)
+ INIT_HLIST_HEAD(&ti->buckets[i]);
+
ti->n_buckets = new_size;
ti->node_ver = 0;
ti->keep_flows = false;
@@ -249,7 +225,7 @@ static void table_instance_destroy(struct table_instance *ti,
for (i = 0; i < ti->n_buckets; i++) {
struct sw_flow *flow;
- struct hlist_head *head = flex_array_get(ti->buckets, i);
+ struct hlist_head *head = &ti->buckets[i];
struct hlist_node *n;
int ver = ti->node_ver;
int ufid_ver = ufid_ti->node_ver;
@@ -294,7 +270,7 @@ struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
ver = ti->node_ver;
while (*bucket < ti->n_buckets) {
i = 0;
- head = flex_array_get(ti->buckets, *bucket);
+ head = &ti->buckets[*bucket];
hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
if (i < *last) {
i++;
@@ -313,8 +289,7 @@ struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
{
hash = jhash_1word(hash, ti->hash_seed);
- return flex_array_get(ti->buckets,
- (hash & (ti->n_buckets - 1)));
+ return &ti->buckets[hash & (ti->n_buckets - 1)];
}
static void table_instance_insert(struct table_instance *ti,
@@ -347,9 +322,7 @@ static void flow_table_copy_flows(struct table_instance *old,
/* Insert in new table. */
for (i = 0; i < old->n_buckets; i++) {
struct sw_flow *flow;
- struct hlist_head *head;
-
- head = flex_array_get(old->buckets, i);
+ struct hlist_head *head = &old->buckets[i];
if (ufid)
hlist_for_each_entry(flow, head,
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index 2dd9900f533d..de5ec6cf5174 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -29,7 +29,6 @@
#include <linux/in6.h>
#include <linux/jiffies.h>
#include <linux/time.h>
-#include <linux/flex_array.h>
#include <net/inet_ecn.h>
#include <net/ip_tunnels.h>
@@ -37,7 +36,7 @@
#include "flow.h"
struct table_instance {
- struct flex_array *buckets;
+ struct hlist_head *buckets;
unsigned int n_buckets;
struct rcu_head rcu;
int node_ver;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 8376bc1c1508..9b81813dd16a 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1852,7 +1852,8 @@ oom:
static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
{
- if (!skb->protocol && sock->type == SOCK_RAW) {
+ if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
+ sock->type == SOCK_RAW) {
skb_reset_mac_header(skb);
skb->protocol = dev_parse_header_protocol(skb);
}
@@ -2601,8 +2602,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
void *ph;
DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
+ unsigned char *addr = NULL;
int tp_len, size_max;
- unsigned char *addr;
void *data;
int len_sum = 0;
int status = TP_STATUS_AVAILABLE;
@@ -2613,7 +2614,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
if (likely(saddr == NULL)) {
dev = packet_cached_dev_get(po);
proto = po->num;
- addr = NULL;
} else {
err = -EINVAL;
if (msg->msg_namelen < sizeof(struct sockaddr_ll))
@@ -2623,10 +2623,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
sll_addr)))
goto out;
proto = saddr->sll_protocol;
- addr = saddr->sll_halen ? saddr->sll_addr : NULL;
dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
- if (addr && dev && saddr->sll_halen < dev->addr_len)
- goto out_put;
+ if (po->sk.sk_socket->type == SOCK_DGRAM) {
+ if (dev && msg->msg_namelen < dev->addr_len +
+ offsetof(struct sockaddr_ll, sll_addr))
+ goto out_put;
+ addr = saddr->sll_addr;
+ }
}
err = -ENXIO;
@@ -2798,7 +2801,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
struct sk_buff *skb;
struct net_device *dev;
__be16 proto;
- unsigned char *addr;
+ unsigned char *addr = NULL;
int err, reserve = 0;
struct sockcm_cookie sockc;
struct virtio_net_hdr vnet_hdr = { 0 };
@@ -2815,7 +2818,6 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
if (likely(saddr == NULL)) {
dev = packet_cached_dev_get(po);
proto = po->num;
- addr = NULL;
} else {
err = -EINVAL;
if (msg->msg_namelen < sizeof(struct sockaddr_ll))
@@ -2823,10 +2825,13 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
goto out;
proto = saddr->sll_protocol;
- addr = saddr->sll_halen ? saddr->sll_addr : NULL;
dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
- if (addr && dev && saddr->sll_halen < dev->addr_len)
- goto out_unlock;
+ if (sock->type == SOCK_DGRAM) {
+ if (dev && msg->msg_namelen < dev->addr_len +
+ offsetof(struct sockaddr_ll, sll_addr))
+ goto out_unlock;
+ addr = saddr->sll_addr;
+ }
}
err = -ENXIO;
@@ -3243,7 +3248,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
}
mutex_lock(&net->packet.sklist_lock);
- sk_add_node_rcu(sk, &net->packet.sklist);
+ sk_add_node_tail_rcu(sk, &net->packet.sklist);
mutex_unlock(&net->packet.sklist_lock);
preempt_disable();
@@ -3343,20 +3348,29 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
sock_recv_ts_and_drops(msg, sk, skb);
if (msg->msg_name) {
+ int copy_len;
+
/* If the address length field is there to be filled
* in, we fill it in now.
*/
if (sock->type == SOCK_PACKET) {
__sockaddr_check_size(sizeof(struct sockaddr_pkt));
msg->msg_namelen = sizeof(struct sockaddr_pkt);
+ copy_len = msg->msg_namelen;
} else {
struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
msg->msg_namelen = sll->sll_halen +
offsetof(struct sockaddr_ll, sll_addr);
+ copy_len = msg->msg_namelen;
+ if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
+ memset(msg->msg_name +
+ offsetof(struct sockaddr_ll, sll_addr),
+ 0, sizeof(sll->sll_addr));
+ msg->msg_namelen = sizeof(struct sockaddr_ll);
+ }
}
- memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
- msg->msg_namelen);
+ memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
}
if (pkt_sk(sk)->auxdata) {
@@ -4209,7 +4223,7 @@ static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
struct pgv *pg_vec;
int i;
- pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
+ pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
if (unlikely(!pg_vec))
goto out;
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index d6cc97fbbbb0..2b969f99ef13 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -543,6 +543,9 @@ static int rds_connect(struct socket *sock, struct sockaddr *uaddr,
struct rds_sock *rs = rds_sk_to_rs(sk);
int ret = 0;
+ if (addr_len < offsetofend(struct sockaddr, sa_family))
+ return -EINVAL;
+
lock_sock(sk);
switch (uaddr->sa_family) {
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 17c9d9f0c848..0f4398e7f2a7 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -173,6 +173,8 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
/* We allow an RDS socket to be bound to either IPv4 or IPv6
* address.
*/
+ if (addr_len < offsetofend(struct sockaddr, sa_family))
+ return -EINVAL;
if (uaddr->sa_family == AF_INET) {
struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 752f92235a38..67a715b076ca 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -333,10 +333,8 @@ static inline void rds_ib_dma_sync_sg_for_cpu(struct ib_device *dev,
unsigned int i;
for_each_sg(sglist, sg, sg_dma_len, i) {
- ib_dma_sync_single_for_cpu(dev,
- ib_sg_dma_address(dev, sg),
- ib_sg_dma_len(dev, sg),
- direction);
+ ib_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
+ sg_dma_len(sg), direction);
}
}
#define ib_dma_sync_sg_for_cpu rds_ib_dma_sync_sg_for_cpu
@@ -350,10 +348,8 @@ static inline void rds_ib_dma_sync_sg_for_device(struct ib_device *dev,
unsigned int i;
for_each_sg(sglist, sg, sg_dma_len, i) {
- ib_dma_sync_single_for_device(dev,
- ib_sg_dma_address(dev, sg),
- ib_sg_dma_len(dev, sg),
- direction);
+ ib_dma_sync_single_for_device(dev, sg_dma_address(sg),
+ sg_dma_len(sg), direction);
}
}
#define ib_dma_sync_sg_for_device rds_ib_dma_sync_sg_for_device
diff --git a/net/rds/ib_fmr.c b/net/rds/ib_fmr.c
index e0f70c4051b6..93c0437e6a5f 100644
--- a/net/rds/ib_fmr.c
+++ b/net/rds/ib_fmr.c
@@ -44,6 +44,17 @@ struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, int npages)
else
pool = rds_ibdev->mr_1m_pool;
+ if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
+ queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
+
+ /* Switch pools if one of the pool is reaching upper limit */
+ if (atomic_read(&pool->dirty_count) >= pool->max_items * 9 / 10) {
+ if (pool->pool_type == RDS_IB_MR_8K_POOL)
+ pool = rds_ibdev->mr_1m_pool;
+ else
+ pool = rds_ibdev->mr_8k_pool;
+ }
+
ibmr = rds_ib_try_reuse_ibmr(pool);
if (ibmr)
return ibmr;
@@ -108,8 +119,8 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev,
page_cnt = 0;
for (i = 0; i < sg_dma_len; ++i) {
- unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
- u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
+ unsigned int dma_len = sg_dma_len(&scat[i]);
+ u64 dma_addr = sg_dma_address(&scat[i]);
if (dma_addr & ~PAGE_MASK) {
if (i > 0) {
@@ -148,8 +159,8 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev,
page_cnt = 0;
for (i = 0; i < sg_dma_len; ++i) {
- unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
- u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
+ unsigned int dma_len = sg_dma_len(&scat[i]);
+ u64 dma_addr = sg_dma_address(&scat[i]);
for (j = 0; j < dma_len; j += PAGE_SIZE)
dma_pages[page_cnt++] =
diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c
index 6431a023ac89..688dcd68d4ea 100644
--- a/net/rds/ib_frmr.c
+++ b/net/rds/ib_frmr.c
@@ -181,8 +181,8 @@ static int rds_ib_map_frmr(struct rds_ib_device *rds_ibdev,
ret = -EINVAL;
for (i = 0; i < ibmr->sg_dma_len; ++i) {
- unsigned int dma_len = ib_sg_dma_len(dev, &ibmr->sg[i]);
- u64 dma_addr = ib_sg_dma_address(dev, &ibmr->sg[i]);
+ unsigned int dma_len = sg_dma_len(&ibmr->sg[i]);
+ u64 dma_addr = sg_dma_address(&ibmr->sg[i]);
frmr->sg_byte_len += dma_len;
if (dma_addr & ~PAGE_MASK) {
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 63c8d107adcf..d664e9ade74d 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -454,9 +454,6 @@ struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
struct rds_ib_mr *ibmr = NULL;
int iter = 0;
- if (atomic_read(&pool->dirty_count) >= pool->max_items_soft / 10)
- queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
-
while (1) {
ibmr = rds_ib_reuse_mr(pool);
if (ibmr)
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index d395eec98959..8946c89d7392 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -346,8 +346,8 @@ static int rds_ib_recv_refill_one(struct rds_connection *conn,
sge->length = sizeof(struct rds_header);
sge = &recv->r_sge[1];
- sge->addr = ib_sg_dma_address(ic->i_cm_id->device, &recv->r_frag->f_sg);
- sge->length = ib_sg_dma_len(ic->i_cm_id->device, &recv->r_frag->f_sg);
+ sge->addr = sg_dma_address(&recv->r_frag->f_sg);
+ sge->length = sg_dma_len(&recv->r_frag->f_sg);
ret = 0;
out:
@@ -409,9 +409,7 @@ void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp)
rdsdebug("recv %p ibinc %p page %p addr %lu\n", recv,
recv->r_ibinc, sg_page(&recv->r_frag->f_sg),
- (long) ib_sg_dma_address(
- ic->i_cm_id->device,
- &recv->r_frag->f_sg));
+ (long)sg_dma_address(&recv->r_frag->f_sg));
/* XXX when can this fail? */
ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, NULL);
@@ -774,7 +772,7 @@ static void rds_ib_cong_recv(struct rds_connection *conn,
unsigned long frag_off;
unsigned long to_copy;
unsigned long copied;
- uint64_t uncongested = 0;
+ __le64 uncongested = 0;
void *addr;
/* catch completely corrupt packets */
@@ -791,7 +789,7 @@ static void rds_ib_cong_recv(struct rds_connection *conn,
copied = 0;
while (copied < RDS_CONG_MAP_BYTES) {
- uint64_t *src, *dst;
+ __le64 *src, *dst;
unsigned int k;
to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
@@ -826,9 +824,7 @@ static void rds_ib_cong_recv(struct rds_connection *conn,
}
/* the congestion map is in little endian order */
- uncongested = le64_to_cpu(uncongested);
-
- rds_cong_map_updated(map, uncongested);
+ rds_cong_map_updated(map, le64_to_cpu(uncongested));
}
static void rds_ib_process_recv(struct rds_connection *conn,
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 09c46f2e97fa..18f2341202f8 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -646,16 +646,16 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
if (i < work_alloc
&& scat != &rm->data.op_sg[rm->data.op_count]) {
len = min(RDS_FRAG_SIZE,
- ib_sg_dma_len(dev, scat) - rm->data.op_dmaoff);
+ sg_dma_len(scat) - rm->data.op_dmaoff);
send->s_wr.num_sge = 2;
- send->s_sge[1].addr = ib_sg_dma_address(dev, scat);
+ send->s_sge[1].addr = sg_dma_address(scat);
send->s_sge[1].addr += rm->data.op_dmaoff;
send->s_sge[1].length = len;
bytes_sent += len;
rm->data.op_dmaoff += len;
- if (rm->data.op_dmaoff == ib_sg_dma_len(dev, scat)) {
+ if (rm->data.op_dmaoff == sg_dma_len(scat)) {
scat++;
rm->data.op_dmasg++;
rm->data.op_dmaoff = 0;
@@ -809,8 +809,8 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
}
/* Convert our struct scatterlist to struct ib_sge */
- send->s_sge[0].addr = ib_sg_dma_address(ic->i_cm_id->device, op->op_sg);
- send->s_sge[0].length = ib_sg_dma_len(ic->i_cm_id->device, op->op_sg);
+ send->s_sge[0].addr = sg_dma_address(op->op_sg);
+ send->s_sge[0].length = sg_dma_len(op->op_sg);
send->s_sge[0].lkey = ic->i_pd->local_dma_lkey;
rdsdebug("rva %Lx rpa %Lx len %u\n", op->op_remote_addr,
@@ -922,9 +922,8 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
for (j = 0; j < send->s_rdma_wr.wr.num_sge &&
scat != &op->op_sg[op->op_count]; j++) {
- len = ib_sg_dma_len(ic->i_cm_id->device, scat);
- send->s_sge[j].addr =
- ib_sg_dma_address(ic->i_cm_id->device, scat);
+ len = sg_dma_len(scat);
+ send->s_sge[j].addr = sg_dma_address(scat);
send->s_sge[j].length = len;
send->s_sge[j].lkey = ic->i_pd->local_dma_lkey;
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index fd2694174607..faf726e00e27 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -608,7 +608,7 @@ static void rds_tcp_kill_sock(struct net *net)
list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
- if (net != c_net || !tc->t_sock)
+ if (net != c_net)
continue;
if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
list_move_tail(&tc->t_tcp_node, &tmp_list);
diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
index 7af4f99c4a93..094a6621f8e8 100644
--- a/net/rose/rose_loopback.c
+++ b/net/rose/rose_loopback.c
@@ -16,6 +16,7 @@
#include <linux/init.h>
static struct sk_buff_head loopback_queue;
+#define ROSE_LOOPBACK_LIMIT 1000
static struct timer_list loopback_timer;
static void rose_set_loopback_timer(void);
@@ -35,29 +36,27 @@ static int rose_loopback_running(void)
int rose_loopback_queue(struct sk_buff *skb, struct rose_neigh *neigh)
{
- struct sk_buff *skbn;
+ struct sk_buff *skbn = NULL;
- skbn = skb_clone(skb, GFP_ATOMIC);
+ if (skb_queue_len(&loopback_queue) < ROSE_LOOPBACK_LIMIT)
+ skbn = skb_clone(skb, GFP_ATOMIC);
- kfree_skb(skb);
-
- if (skbn != NULL) {
+ if (skbn) {
+ consume_skb(skb);
skb_queue_tail(&loopback_queue, skbn);
if (!rose_loopback_running())
rose_set_loopback_timer();
+ } else {
+ kfree_skb(skb);
}
return 1;
}
-
static void rose_set_loopback_timer(void)
{
- del_timer(&loopback_timer);
-
- loopback_timer.expires = jiffies + 10;
- add_timer(&loopback_timer);
+ mod_timer(&loopback_timer, jiffies + 10);
}
static void rose_loopback_timer(struct timer_list *unused)
@@ -68,8 +67,12 @@ static void rose_loopback_timer(struct timer_list *unused)
struct sock *sk;
unsigned short frametype;
unsigned int lci_i, lci_o;
+ int count;
- while ((skb = skb_dequeue(&loopback_queue)) != NULL) {
+ for (count = 0; count < ROSE_LOOPBACK_LIMIT; count++) {
+ skb = skb_dequeue(&loopback_queue);
+ if (!skb)
+ return;
if (skb->len < ROSE_MIN_LEN) {
kfree_skb(skb);
continue;
@@ -106,6 +109,8 @@ static void rose_loopback_timer(struct timer_list *unused)
kfree_skb(skb);
}
}
+ if (!skb_queue_empty(&loopback_queue))
+ mod_timer(&loopback_timer, jiffies + 1);
}
void __exit rose_loopback_clear(void)
diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c
index 7ca57741b2fb..7849f286bb93 100644
--- a/net/rose/rose_subr.c
+++ b/net/rose/rose_subr.c
@@ -105,16 +105,17 @@ void rose_write_internal(struct sock *sk, int frametype)
struct sk_buff *skb;
unsigned char *dptr;
unsigned char lci1, lci2;
- char buffer[100];
- int len, faclen = 0;
+ int maxfaclen = 0;
+ int len, faclen;
+ int reserve;
- len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 1;
+ reserve = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1;
+ len = ROSE_MIN_LEN;
switch (frametype) {
case ROSE_CALL_REQUEST:
len += 1 + ROSE_ADDR_LEN + ROSE_ADDR_LEN;
- faclen = rose_create_facilities(buffer, rose);
- len += faclen;
+ maxfaclen = 256;
break;
case ROSE_CALL_ACCEPTED:
case ROSE_CLEAR_REQUEST:
@@ -123,15 +124,16 @@ void rose_write_internal(struct sock *sk, int frametype)
break;
}
- if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
+ skb = alloc_skb(reserve + len + maxfaclen, GFP_ATOMIC);
+ if (!skb)
return;
/*
* Space for AX.25 header and PID.
*/
- skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1);
+ skb_reserve(skb, reserve);
- dptr = skb_put(skb, skb_tailroom(skb));
+ dptr = skb_put(skb, len);
lci1 = (rose->lci >> 8) & 0x0F;
lci2 = (rose->lci >> 0) & 0xFF;
@@ -146,7 +148,8 @@ void rose_write_internal(struct sock *sk, int frametype)
dptr += ROSE_ADDR_LEN;
memcpy(dptr, &rose->source_addr, ROSE_ADDR_LEN);
dptr += ROSE_ADDR_LEN;
- memcpy(dptr, buffer, faclen);
+ faclen = rose_create_facilities(dptr, rose);
+ skb_put(skb, faclen);
dptr += faclen;
break;
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 96f2952bbdfd..ae8c5d7f3bf1 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -135,7 +135,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)saddr;
struct rxrpc_local *local;
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
- u16 service_id = srx->srx_service;
+ u16 service_id;
int ret;
_enter("%p,%p,%d", rx, saddr, len);
@@ -143,6 +143,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
ret = rxrpc_validate_address(rx, srx, len);
if (ret < 0)
goto error;
+ service_id = srx->srx_service;
lock_sock(&rx->sk);
@@ -370,18 +371,22 @@ EXPORT_SYMBOL(rxrpc_kernel_end_call);
* rxrpc_kernel_check_life - Check to see whether a call is still alive
* @sock: The socket the call is on
* @call: The call to check
+ * @_life: Where to store the life value
*
* Allow a kernel service to find out whether a call is still alive - ie. we're
- * getting ACKs from the server. Returns a number representing the life state
- * which can be compared to that returned by a previous call.
+ * getting ACKs from the server. Passes back in *_life a number representing
+ * the life state which can be compared to that returned by a previous call and
+ * return true if the call is still alive.
*
* If the life state stalls, rxrpc_kernel_probe_life() should be called and
* then 2RTT waited.
*/
-u32 rxrpc_kernel_check_life(const struct socket *sock,
- const struct rxrpc_call *call)
+bool rxrpc_kernel_check_life(const struct socket *sock,
+ const struct rxrpc_call *call,
+ u32 *_life)
{
- return call->acks_latest;
+ *_life = call->acks_latest;
+ return call->state != RXRPC_CALL_COMPLETE;
}
EXPORT_SYMBOL(rxrpc_kernel_check_life);
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 4b1a534d290a..062ca9dc29b8 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -654,6 +654,7 @@ struct rxrpc_call {
u8 ackr_reason; /* reason to ACK */
u16 ackr_skew; /* skew on packet being ACK'd */
rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */
+ rxrpc_serial_t ackr_first_seq; /* first sequence number received */
rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */
rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */
rxrpc_seq_t ackr_seen; /* Highest packet shown seen */
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 8aa2937b069f..fe96881a334d 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -604,30 +604,30 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
_enter("");
- if (list_empty(&rxnet->calls))
- return;
+ if (!list_empty(&rxnet->calls)) {
+ write_lock(&rxnet->call_lock);
- write_lock(&rxnet->call_lock);
+ while (!list_empty(&rxnet->calls)) {
+ call = list_entry(rxnet->calls.next,
+ struct rxrpc_call, link);
+ _debug("Zapping call %p", call);
- while (!list_empty(&rxnet->calls)) {
- call = list_entry(rxnet->calls.next, struct rxrpc_call, link);
- _debug("Zapping call %p", call);
+ rxrpc_see_call(call);
+ list_del_init(&call->link);
- rxrpc_see_call(call);
- list_del_init(&call->link);
+ pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
+ call, atomic_read(&call->usage),
+ rxrpc_call_states[call->state],
+ call->flags, call->events);
- pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
- call, atomic_read(&call->usage),
- rxrpc_call_states[call->state],
- call->flags, call->events);
+ write_unlock(&rxnet->call_lock);
+ cond_resched();
+ write_lock(&rxnet->call_lock);
+ }
write_unlock(&rxnet->call_lock);
- cond_resched();
- write_lock(&rxnet->call_lock);
}
- write_unlock(&rxnet->call_lock);
-
atomic_dec(&rxnet->nr_calls);
wait_var_event(&rxnet->nr_calls, !atomic_read(&rxnet->nr_calls));
}
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index b2adfa825363..83797b3949e2 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -353,7 +353,7 @@ static int rxrpc_get_client_conn(struct rxrpc_sock *rx,
* normally have to take channel_lock but we do this before anyone else
* can see the connection.
*/
- list_add_tail(&call->chan_wait_link, &candidate->waiting_calls);
+ list_add(&call->chan_wait_link, &candidate->waiting_calls);
if (cp->exclusive) {
call->conn = candidate;
@@ -432,7 +432,7 @@ found_extant_conn:
call->conn = conn;
call->security_ix = conn->security_ix;
call->service_id = conn->service_id;
- list_add(&call->chan_wait_link, &conn->waiting_calls);
+ list_add_tail(&call->chan_wait_link, &conn->waiting_calls);
spin_unlock(&conn->channel_lock);
_leave(" = 0 [extant %d]", conn->debug_id);
return 0;
@@ -704,6 +704,7 @@ int rxrpc_connect_call(struct rxrpc_sock *rx,
ret = rxrpc_wait_for_channel(call, gfp);
if (ret < 0) {
+ trace_rxrpc_client(call->conn, ret, rxrpc_client_chan_wait_failed);
rxrpc_disconnect_client_call(call);
goto out;
}
@@ -774,16 +775,22 @@ static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet)
*/
void rxrpc_disconnect_client_call(struct rxrpc_call *call)
{
- unsigned int channel = call->cid & RXRPC_CHANNELMASK;
struct rxrpc_connection *conn = call->conn;
- struct rxrpc_channel *chan = &conn->channels[channel];
+ struct rxrpc_channel *chan = NULL;
struct rxrpc_net *rxnet = conn->params.local->rxnet;
+ unsigned int channel = -1;
+ u32 cid;
+ spin_lock(&conn->channel_lock);
+
+ cid = call->cid;
+ if (cid) {
+ channel = cid & RXRPC_CHANNELMASK;
+ chan = &conn->channels[channel];
+ }
trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
call->conn = NULL;
- spin_lock(&conn->channel_lock);
-
/* Calls that have never actually been assigned a channel can simply be
* discarded. If the conn didn't get used either, it will follow
* immediately unless someone else grabs it in the meantime.
@@ -807,7 +814,10 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
goto out;
}
- ASSERTCMP(rcu_access_pointer(chan->call), ==, call);
+ if (rcu_access_pointer(chan->call) != call) {
+ spin_unlock(&conn->channel_lock);
+ BUG();
+ }
/* If a client call was exposed to the world, we save the result for
* retransmission.
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index b6fca8ebb117..8d31fb4c51e1 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -153,7 +153,8 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
* pass a connection-level abort onto all calls on that connection
*/
static void rxrpc_abort_calls(struct rxrpc_connection *conn,
- enum rxrpc_call_completion compl)
+ enum rxrpc_call_completion compl,
+ rxrpc_serial_t serial)
{
struct rxrpc_call *call;
int i;
@@ -173,6 +174,9 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn,
call->call_id, 0,
conn->abort_code,
conn->error);
+ else
+ trace_rxrpc_rx_abort(call, serial,
+ conn->abort_code);
if (rxrpc_set_call_completion(call, compl,
conn->abort_code,
conn->error))
@@ -213,8 +217,6 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
conn->state = RXRPC_CONN_LOCALLY_ABORTED;
spin_unlock_bh(&conn->state_lock);
- rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED);
-
msg.msg_name = &conn->params.peer->srx.transport;
msg.msg_namelen = conn->params.peer->srx.transport_len;
msg.msg_control = NULL;
@@ -242,6 +244,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
len = iov[0].iov_len + iov[1].iov_len;
serial = atomic_inc_return(&conn->serial);
+ rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, serial);
whdr.serial = htonl(serial);
_proto("Tx CONN ABORT %%%u { %d }", serial, conn->abort_code);
@@ -321,7 +324,7 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
conn->error = -ECONNABORTED;
conn->abort_code = abort_code;
conn->state = RXRPC_CONN_REMOTELY_ABORTED;
- rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED);
+ rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED, sp->hdr.serial);
return -ECONNABORTED;
case RXRPC_PACKET_TYPE_CHALLENGE:
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 9128aa0e40aa..c2c35cf4e308 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -837,7 +837,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
u8 acks[RXRPC_MAXACKS];
} buf;
rxrpc_serial_t acked_serial;
- rxrpc_seq_t first_soft_ack, hard_ack;
+ rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt;
int nr_acks, offset, ioffset;
_enter("");
@@ -851,13 +851,14 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
acked_serial = ntohl(buf.ack.serial);
first_soft_ack = ntohl(buf.ack.firstPacket);
+ prev_pkt = ntohl(buf.ack.previousPacket);
hard_ack = first_soft_ack - 1;
nr_acks = buf.ack.nAcks;
summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ?
buf.ack.reason : RXRPC_ACK__INVALID);
trace_rxrpc_rx_ack(call, sp->hdr.serial, acked_serial,
- first_soft_ack, ntohl(buf.ack.previousPacket),
+ first_soft_ack, prev_pkt,
summary.ack_reason, nr_acks);
if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE)
@@ -878,8 +879,9 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
rxrpc_propose_ack_respond_to_ack);
}
- /* Discard any out-of-order or duplicate ACKs. */
- if (before_eq(sp->hdr.serial, call->acks_latest))
+ /* Discard any out-of-order or duplicate ACKs (outside lock). */
+ if (before(first_soft_ack, call->ackr_first_seq) ||
+ before(prev_pkt, call->ackr_prev_seq))
return;
buf.info.rxMTU = 0;
@@ -890,12 +892,16 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
spin_lock(&call->input_lock);
- /* Discard any out-of-order or duplicate ACKs. */
- if (before_eq(sp->hdr.serial, call->acks_latest))
+ /* Discard any out-of-order or duplicate ACKs (inside lock). */
+ if (before(first_soft_ack, call->ackr_first_seq) ||
+ before(prev_pkt, call->ackr_prev_seq))
goto out;
call->acks_latest_ts = skb->tstamp;
call->acks_latest = sp->hdr.serial;
+ call->ackr_first_seq = first_soft_ack;
+ call->ackr_prev_seq = prev_pkt;
+
/* Parse rwind and mtu sizes if provided. */
if (buf.info.rxMTU)
rxrpc_input_ackinfo(call, skb, &buf.info);
@@ -1155,19 +1161,19 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
* handle data received on the local endpoint
* - may be called in interrupt context
*
- * The socket is locked by the caller and this prevents the socket from being
- * shut down and the local endpoint from going away, thus sk_user_data will not
- * be cleared until this function returns.
+ * [!] Note that as this is called from the encap_rcv hook, the socket is not
+ * held locked by the caller and nothing prevents sk_user_data on the UDP from
+ * being cleared in the middle of processing this function.
*
* Called with the RCU read lock held from the IP layer via UDP.
*/
int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
{
+ struct rxrpc_local *local = rcu_dereference_sk_user_data(udp_sk);
struct rxrpc_connection *conn;
struct rxrpc_channel *chan;
struct rxrpc_call *call = NULL;
struct rxrpc_skb_priv *sp;
- struct rxrpc_local *local = udp_sk->sk_user_data;
struct rxrpc_peer *peer = NULL;
struct rxrpc_sock *rx = NULL;
unsigned int channel;
@@ -1175,6 +1181,10 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
_enter("%p", udp_sk);
+ if (unlikely(!local)) {
+ kfree_skb(skb);
+ return 0;
+ }
if (skb->tstamp == 0)
skb->tstamp = ktime_get_real();
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 15cf42d5b53a..01959db51445 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -304,7 +304,8 @@ nomem:
ret = -ENOMEM;
sock_error:
mutex_unlock(&rxnet->local_mutex);
- kfree(local);
+ if (local)
+ call_rcu(&local->rcu, rxrpc_local_rcu);
_leave(" = %d", ret);
return ERR_PTR(ret);
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 736aa9281100..004c762c2e8d 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -335,7 +335,6 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
struct kvec iov[2];
rxrpc_serial_t serial;
size_t len;
- bool lost = false;
int ret, opt;
_enter(",{%d}", skb->len);
@@ -393,14 +392,14 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
static int lose;
if ((lose++ & 7) == 7) {
ret = 0;
- lost = true;
+ trace_rxrpc_tx_data(call, sp->hdr.seq, serial,
+ whdr.flags, retrans, true);
+ goto done;
}
}
- trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags,
- retrans, lost);
- if (lost)
- goto done;
+ trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags, retrans,
+ false);
/* send the packet with the don't fragment bit set if we currently
* think it's small enough */
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index bc05af89fc38..6e84d878053c 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -157,6 +157,11 @@ void rxrpc_error_report(struct sock *sk)
_enter("%p{%d}", sk, local->debug_id);
+ /* Clear the outstanding error value on the socket so that it doesn't
+ * cause kernel_sendmsg() to return it later.
+ */
+ sock_error(sk);
+
skb = sock_dequeue_err_skb(sk);
if (!skb) {
_leave("UDP socket errqueue empty");
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index 46c9312085b1..bec64deb7b0a 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -152,12 +152,13 @@ static void rxrpc_notify_end_tx(struct rxrpc_sock *rx, struct rxrpc_call *call,
}
/*
- * Queue a DATA packet for transmission, set the resend timeout and send the
- * packet immediately
+ * Queue a DATA packet for transmission, set the resend timeout and send
+ * the packet immediately. Returns the error from rxrpc_send_data_packet()
+ * in case the caller wants to do something with it.
*/
-static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
- struct sk_buff *skb, bool last,
- rxrpc_notify_end_tx_t notify_end_tx)
+static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
+ struct sk_buff *skb, bool last,
+ rxrpc_notify_end_tx_t notify_end_tx)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
unsigned long now;
@@ -250,7 +251,8 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
out:
rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
- _leave("");
+ _leave(" = %d", ret);
+ return ret;
}
/*
@@ -423,9 +425,10 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
if (ret < 0)
goto out;
- rxrpc_queue_packet(rx, call, skb,
- !msg_data_left(msg) && !more,
- notify_end_tx);
+ ret = rxrpc_queue_packet(rx, call, skb,
+ !msg_data_left(msg) && !more,
+ notify_end_tx);
+ /* Should check for failure here */
skb = NULL;
}
} while (msg_data_left(msg) > 0);
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 1b9afdee5ba9..5c02ad97ef23 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -358,8 +358,7 @@ config NET_SCH_PIE
help
Say Y here if you want to use the Proportional Integral controller
Enhanced scheduler packet scheduling algorithm.
- For more information, please see
- http://tools.ietf.org/html/draft-pan-tsvwg-pie-00
+ For more information, please see https://tools.ietf.org/html/rfc8033
To compile this driver as a module, choose M here: the module
will be called sch_pie.
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index aecf1bf233c8..5a87e271d35a 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -28,27 +28,10 @@
#include <net/act_api.h>
#include <net/netlink.h>
-static int tcf_action_goto_chain_init(struct tc_action *a, struct tcf_proto *tp)
-{
- u32 chain_index = a->tcfa_action & TC_ACT_EXT_VAL_MASK;
-
- if (!tp)
- return -EINVAL;
- a->goto_chain = tcf_chain_get_by_act(tp->chain->block, chain_index);
- if (!a->goto_chain)
- return -ENOMEM;
- return 0;
-}
-
-static void tcf_action_goto_chain_fini(struct tc_action *a)
-{
- tcf_chain_put_by_act(a->goto_chain);
-}
-
static void tcf_action_goto_chain_exec(const struct tc_action *a,
struct tcf_result *res)
{
- const struct tcf_chain *chain = a->goto_chain;
+ const struct tcf_chain *chain = rcu_dereference_bh(a->goto_chain);
res->goto_tp = rcu_dereference_bh(chain->filter_chain);
}
@@ -71,6 +54,51 @@ static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie,
call_rcu(&old->rcu, tcf_free_cookie_rcu);
}
+int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
+ struct tcf_chain **newchain,
+ struct netlink_ext_ack *extack)
+{
+ int opcode = TC_ACT_EXT_OPCODE(action), ret = -EINVAL;
+ u32 chain_index;
+
+ if (!opcode)
+ ret = action > TC_ACT_VALUE_MAX ? -EINVAL : 0;
+ else if (opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC)
+ ret = 0;
+ if (ret) {
+ NL_SET_ERR_MSG(extack, "invalid control action");
+ goto end;
+ }
+
+ if (TC_ACT_EXT_CMP(action, TC_ACT_GOTO_CHAIN)) {
+ chain_index = action & TC_ACT_EXT_VAL_MASK;
+ if (!tp || !newchain) {
+ ret = -EINVAL;
+ NL_SET_ERR_MSG(extack,
+ "can't goto NULL proto/chain");
+ goto end;
+ }
+ *newchain = tcf_chain_get_by_act(tp->chain->block, chain_index);
+ if (!*newchain) {
+ ret = -ENOMEM;
+ NL_SET_ERR_MSG(extack,
+ "can't allocate goto_chain");
+ }
+ }
+end:
+ return ret;
+}
+EXPORT_SYMBOL(tcf_action_check_ctrlact);
+
+struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
+ struct tcf_chain *goto_chain)
+{
+ a->tcfa_action = action;
+ rcu_swap_protected(a->goto_chain, goto_chain, 1);
+ return goto_chain;
+}
+EXPORT_SYMBOL(tcf_action_set_ctrlact);
+
/* XXX: For standalone actions, we don't need a RCU grace period either, because
* actions are always connected to filters and filters are already destroyed in
* RCU callbacks, so after a RCU grace period actions are already disconnected
@@ -78,13 +106,15 @@ static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie,
*/
static void free_tcf(struct tc_action *p)
{
+ struct tcf_chain *chain = rcu_dereference_protected(p->goto_chain, 1);
+
free_percpu(p->cpu_bstats);
free_percpu(p->cpu_bstats_hw);
free_percpu(p->cpu_qstats);
tcf_set_action_cookie(&p->act_cookie, NULL);
- if (p->goto_chain)
- tcf_action_goto_chain_fini(p);
+ if (chain)
+ tcf_chain_put_by_act(chain);
kfree(p);
}
@@ -654,6 +684,10 @@ repeat:
return TC_ACT_OK;
}
} else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) {
+ if (unlikely(!rcu_access_pointer(a->goto_chain))) {
+ net_warn_ratelimited("can't go to NULL chain!\n");
+ return TC_ACT_SHOT;
+ }
tcf_action_goto_chain_exec(a, res);
}
@@ -800,15 +834,6 @@ static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
return c;
}
-static bool tcf_action_valid(int action)
-{
- int opcode = TC_ACT_EXT_OPCODE(action);
-
- if (!opcode)
- return action <= TC_ACT_VALUE_MAX;
- return opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC;
-}
-
struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
struct nlattr *nla, struct nlattr *est,
char *name, int ovr, int bind,
@@ -890,10 +915,10 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
/* backward compatibility for policer */
if (name == NULL)
err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind,
- rtnl_held, extack);
+ rtnl_held, tp, extack);
else
err = a_o->init(net, nla, est, &a, ovr, bind, rtnl_held,
- extack);
+ tp, extack);
if (err < 0)
goto err_mod;
@@ -907,18 +932,10 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
if (err != ACT_P_CREATED)
module_put(a_o->owner);
- if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) {
- err = tcf_action_goto_chain_init(a, tp);
- if (err) {
- tcf_action_destroy_1(a, bind);
- NL_SET_ERR_MSG(extack, "Failed to init TC action chain");
- return ERR_PTR(err);
- }
- }
-
- if (!tcf_action_valid(a->tcfa_action)) {
+ if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN) &&
+ !rcu_access_pointer(a->goto_chain)) {
tcf_action_destroy_1(a, bind);
- NL_SET_ERR_MSG(extack, "Invalid control action value");
+ NL_SET_ERR_MSG(extack, "can't use goto chain with NULL chain");
return ERR_PTR(-EINVAL);
}
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index aa5c38d11a30..3841156aa09f 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -17,6 +17,7 @@
#include <net/netlink.h>
#include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
#include <linux/tc_act/tc_bpf.h>
#include <net/tc_act/tc_bpf.h>
@@ -278,10 +279,11 @@ static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
static int tcf_bpf_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **act,
int replace, int bind, bool rtnl_held,
- struct netlink_ext_ack *extack)
+ struct tcf_proto *tp, struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, bpf_net_id);
struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
+ struct tcf_chain *goto_ch = NULL;
struct tcf_bpf_cfg cfg, old;
struct tc_act_bpf *parm;
struct tcf_bpf *prog;
@@ -323,12 +325,16 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
return ret;
}
+ ret = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+ if (ret < 0)
+ goto release_idr;
+
is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
is_ebpf = tb[TCA_ACT_BPF_FD];
if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) {
ret = -EINVAL;
- goto out;
+ goto put_chain;
}
memset(&cfg, 0, sizeof(cfg));
@@ -336,7 +342,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) :
tcf_bpf_init_from_efd(tb, &cfg);
if (ret < 0)
- goto out;
+ goto put_chain;
prog = to_bpf(*act);
@@ -350,10 +356,13 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
if (cfg.bpf_num_ops)
prog->bpf_num_ops = cfg.bpf_num_ops;
- prog->tcf_action = parm->action;
+ goto_ch = tcf_action_set_ctrlact(*act, parm->action, goto_ch);
rcu_assign_pointer(prog->filter, cfg.filter);
spin_unlock_bh(&prog->tcf_lock);
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+
if (res == ACT_P_CREATED) {
tcf_idr_insert(tn, *act);
} else {
@@ -363,9 +372,13 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
}
return res;
-out:
- tcf_idr_release(*act, bind);
+put_chain:
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+
+release_idr:
+ tcf_idr_release(*act, bind);
return ret;
}
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 5d24993cccfe..32ae0cd6e31c 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -21,6 +21,7 @@
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/act_api.h>
+#include <net/pkt_cls.h>
#include <uapi/linux/tc_act/tc_connmark.h>
#include <net/tc_act/tc_connmark.h>
@@ -97,13 +98,15 @@ static const struct nla_policy connmark_policy[TCA_CONNMARK_MAX + 1] = {
static int tcf_connmark_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
int ovr, int bind, bool rtnl_held,
+ struct tcf_proto *tp,
struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, connmark_net_id);
struct nlattr *tb[TCA_CONNMARK_MAX + 1];
+ struct tcf_chain *goto_ch = NULL;
struct tcf_connmark_info *ci;
struct tc_connmark *parm;
- int ret = 0;
+ int ret = 0, err;
if (!nla)
return -EINVAL;
@@ -128,7 +131,11 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
}
ci = to_connmark(*a);
- ci->tcf_action = parm->action;
+ err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch,
+ extack);
+ if (err < 0)
+ goto release_idr;
+ tcf_action_set_ctrlact(*a, parm->action, goto_ch);
ci->net = net;
ci->zone = parm->zone;
@@ -142,15 +149,24 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
tcf_idr_release(*a, bind);
return -EEXIST;
}
+ err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch,
+ extack);
+ if (err < 0)
+ goto release_idr;
/* replacing action and zone */
spin_lock_bh(&ci->tcf_lock);
- ci->tcf_action = parm->action;
+ goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
ci->zone = parm->zone;
spin_unlock_bh(&ci->tcf_lock);
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
ret = 0;
}
return ret;
+release_idr:
+ tcf_idr_release(*a, bind);
+ return err;
}
static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a,
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index c79aca29505e..0c77e7bdf6d5 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -33,6 +33,7 @@
#include <net/sctp/checksum.h>
#include <net/act_api.h>
+#include <net/pkt_cls.h>
#include <linux/tc_act/tc_csum.h>
#include <net/tc_act/tc_csum.h>
@@ -46,12 +47,13 @@ static struct tc_action_ops act_csum_ops;
static int tcf_csum_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a, int ovr,
- int bind, bool rtnl_held,
+ int bind, bool rtnl_held, struct tcf_proto *tp,
struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, csum_net_id);
struct tcf_csum_params *params_new;
struct nlattr *tb[TCA_CSUM_MAX + 1];
+ struct tcf_chain *goto_ch = NULL;
struct tc_csum *parm;
struct tcf_csum *p;
int ret = 0, err;
@@ -87,21 +89,27 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
return err;
}
+ err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+ if (err < 0)
+ goto release_idr;
+
p = to_tcf_csum(*a);
params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
if (unlikely(!params_new)) {
- tcf_idr_release(*a, bind);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto put_chain;
}
params_new->update_flags = parm->update_flags;
spin_lock_bh(&p->tcf_lock);
- p->tcf_action = parm->action;
+ goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
rcu_swap_protected(p->params, params_new,
lockdep_is_held(&p->tcf_lock));
spin_unlock_bh(&p->tcf_lock);
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
if (params_new)
kfree_rcu(params_new, rcu);
@@ -109,6 +117,12 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
tcf_idr_insert(tn, *a);
return ret;
+put_chain:
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+release_idr:
+ tcf_idr_release(*a, bind);
+ return err;
}
/**
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 93da0004e9f4..e540e31069d7 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -20,6 +20,7 @@
#include <linux/init.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
#include <linux/tc_act/tc_gact.h>
#include <net/tc_act/tc_gact.h>
@@ -57,10 +58,11 @@ static const struct nla_policy gact_policy[TCA_GACT_MAX + 1] = {
static int tcf_gact_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
int ovr, int bind, bool rtnl_held,
- struct netlink_ext_ack *extack)
+ struct tcf_proto *tp, struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, gact_net_id);
struct nlattr *tb[TCA_GACT_MAX + 1];
+ struct tcf_chain *goto_ch = NULL;
struct tc_gact *parm;
struct tcf_gact *gact;
int ret = 0;
@@ -116,10 +118,13 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
return err;
}
+ err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+ if (err < 0)
+ goto release_idr;
gact = to_gact(*a);
spin_lock_bh(&gact->tcf_lock);
- gact->tcf_action = parm->action;
+ goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
#ifdef CONFIG_GACT_PROB
if (p_parm) {
gact->tcfg_paction = p_parm->paction;
@@ -133,9 +138,15 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
#endif
spin_unlock_bh(&gact->tcf_lock);
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+
if (ret == ACT_P_CREATED)
tcf_idr_insert(tn, *a);
return ret;
+release_idr:
+ tcf_idr_release(*a, bind);
+ return err;
}
static int tcf_gact_act(struct sk_buff *skb, const struct tc_action *a,
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 9b1f2b3990ee..31c6ffb6abe7 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -29,6 +29,7 @@
#include <net/net_namespace.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
#include <uapi/linux/tc_act/tc_ife.h>
#include <net/tc_act/tc_ife.h>
#include <linux/etherdevice.h>
@@ -469,11 +470,12 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
static int tcf_ife_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
int ovr, int bind, bool rtnl_held,
- struct netlink_ext_ack *extack)
+ struct tcf_proto *tp, struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, ife_net_id);
struct nlattr *tb[TCA_IFE_MAX + 1];
struct nlattr *tb2[IFE_META_MAX + 1];
+ struct tcf_chain *goto_ch = NULL;
struct tcf_ife_params *p;
struct tcf_ife_info *ife;
u16 ife_type = ETH_P_IFE;
@@ -531,6 +533,10 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
}
ife = to_ife(*a);
+ err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+ if (err < 0)
+ goto release_idr;
+
p->flags = parm->flags;
if (parm->flags & IFE_ENCODE) {
@@ -563,13 +569,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
if (tb[TCA_IFE_METALST]) {
err = nla_parse_nested(tb2, IFE_META_MAX, tb[TCA_IFE_METALST],
NULL, NULL);
- if (err) {
-metadata_parse_err:
- tcf_idr_release(*a, bind);
- kfree(p);
- return err;
- }
-
+ if (err)
+ goto metadata_parse_err;
err = populate_metalist(ife, tb2, exists, rtnl_held);
if (err)
goto metadata_parse_err;
@@ -581,21 +582,20 @@ metadata_parse_err:
* going to bail out
*/
err = use_all_metadata(ife, exists);
- if (err) {
- tcf_idr_release(*a, bind);
- kfree(p);
- return err;
- }
+ if (err)
+ goto metadata_parse_err;
}
if (exists)
spin_lock_bh(&ife->tcf_lock);
- ife->tcf_action = parm->action;
/* protected by tcf_lock when modifying existing action */
+ goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
rcu_swap_protected(ife->params, p, 1);
if (exists)
spin_unlock_bh(&ife->tcf_lock);
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
if (p)
kfree_rcu(p, rcu);
@@ -603,6 +603,13 @@ metadata_parse_err:
tcf_idr_insert(tn, *a);
return ret;
+metadata_parse_err:
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+release_idr:
+ kfree(p);
+ tcf_idr_release(*a, bind);
+ return err;
}
static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 98f5b6ea77b4..04a0b5c61194 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -97,7 +97,8 @@ static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
- const struct tc_action_ops *ops, int ovr, int bind)
+ const struct tc_action_ops *ops, int ovr, int bind,
+ struct tcf_proto *tp)
{
struct tc_action_net *tn = net_generic(net, id);
struct nlattr *tb[TCA_IPT_MAX + 1];
@@ -205,20 +206,20 @@ err1:
static int tcf_ipt_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a, int ovr,
- int bind, bool rtnl_held,
+ int bind, bool rtnl_held, struct tcf_proto *tp,
struct netlink_ext_ack *extack)
{
return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr,
- bind);
+ bind, tp);
}
static int tcf_xt_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a, int ovr,
- int bind, bool unlocked,
+ int bind, bool unlocked, struct tcf_proto *tp,
struct netlink_ext_ack *extack)
{
return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr,
- bind);
+ bind, tp);
}
static int tcf_ipt_act(struct sk_buff *skb, const struct tc_action *a,
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 6692fd054617..17cc6bd4c57c 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -94,10 +94,12 @@ static struct tc_action_ops act_mirred_ops;
static int tcf_mirred_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
int ovr, int bind, bool rtnl_held,
+ struct tcf_proto *tp,
struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, mirred_net_id);
struct nlattr *tb[TCA_MIRRED_MAX + 1];
+ struct tcf_chain *goto_ch = NULL;
bool mac_header_xmit = false;
struct tc_mirred *parm;
struct tcf_mirred *m;
@@ -157,18 +159,23 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
tcf_idr_release(*a, bind);
return -EEXIST;
}
+
m = to_mirred(*a);
+ if (ret == ACT_P_CREATED)
+ INIT_LIST_HEAD(&m->tcfm_list);
+
+ err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+ if (err < 0)
+ goto release_idr;
spin_lock_bh(&m->tcf_lock);
- m->tcf_action = parm->action;
- m->tcfm_eaction = parm->eaction;
if (parm->ifindex) {
dev = dev_get_by_index(net, parm->ifindex);
if (!dev) {
spin_unlock_bh(&m->tcf_lock);
- tcf_idr_release(*a, bind);
- return -ENODEV;
+ err = -ENODEV;
+ goto put_chain;
}
mac_header_xmit = dev_is_mac_header_xmit(dev);
rcu_swap_protected(m->tcfm_dev, dev,
@@ -177,7 +184,11 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
dev_put(dev);
m->tcfm_mac_header_xmit = mac_header_xmit;
}
+ goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
+ m->tcfm_eaction = parm->eaction;
spin_unlock_bh(&m->tcf_lock);
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
if (ret == ACT_P_CREATED) {
spin_lock(&mirred_list_lock);
@@ -188,6 +199,12 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
}
return ret;
+put_chain:
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+release_idr:
+ tcf_idr_release(*a, bind);
+ return err;
}
static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 543eab9193f1..e91bb8eb81ec 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -21,6 +21,7 @@
#include <linux/string.h>
#include <linux/tc_act/tc_nat.h>
#include <net/act_api.h>
+#include <net/pkt_cls.h>
#include <net/icmp.h>
#include <net/ip.h>
#include <net/netlink.h>
@@ -38,10 +39,12 @@ static const struct nla_policy nat_policy[TCA_NAT_MAX + 1] = {
static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
struct tc_action **a, int ovr, int bind,
- bool rtnl_held, struct netlink_ext_ack *extack)
+ bool rtnl_held, struct tcf_proto *tp,
+ struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, nat_net_id);
struct nlattr *tb[TCA_NAT_MAX + 1];
+ struct tcf_chain *goto_ch = NULL;
struct tc_nat *parm;
int ret = 0, err;
struct tcf_nat *p;
@@ -76,6 +79,9 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
} else {
return err;
}
+ err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+ if (err < 0)
+ goto release_idr;
p = to_tcf_nat(*a);
spin_lock_bh(&p->tcf_lock);
@@ -84,13 +90,18 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
p->mask = parm->mask;
p->flags = parm->flags;
- p->tcf_action = parm->action;
+ goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
spin_unlock_bh(&p->tcf_lock);
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
if (ret == ACT_P_CREATED)
tcf_idr_insert(tn, *a);
return ret;
+release_idr:
+ tcf_idr_release(*a, bind);
+ return err;
}
static int tcf_nat_act(struct sk_buff *skb, const struct tc_action *a,
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index a80373878df7..287793abfaf9 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -23,6 +23,7 @@
#include <linux/tc_act/tc_pedit.h>
#include <net/tc_act/tc_pedit.h>
#include <uapi/linux/tc_act/tc_pedit.h>
+#include <net/pkt_cls.h>
static unsigned int pedit_net_id;
static struct tc_action_ops act_pedit_ops;
@@ -138,10 +139,11 @@ nla_failure:
static int tcf_pedit_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
int ovr, int bind, bool rtnl_held,
- struct netlink_ext_ack *extack)
+ struct tcf_proto *tp, struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, pedit_net_id);
struct nlattr *tb[TCA_PEDIT_MAX + 1];
+ struct tcf_chain *goto_ch = NULL;
struct tc_pedit_key *keys = NULL;
struct tcf_pedit_key_ex *keys_ex;
struct tc_pedit *parm;
@@ -205,6 +207,11 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
goto out_free;
}
+ err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+ if (err < 0) {
+ ret = err;
+ goto out_release;
+ }
p = to_pedit(*a);
spin_lock_bh(&p->tcf_lock);
@@ -214,7 +221,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
if (!keys) {
spin_unlock_bh(&p->tcf_lock);
ret = -ENOMEM;
- goto out_release;
+ goto put_chain;
}
kfree(p->tcfp_keys);
p->tcfp_keys = keys;
@@ -223,16 +230,21 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
memcpy(p->tcfp_keys, parm->keys, ksize);
p->tcfp_flags = parm->flags;
- p->tcf_action = parm->action;
+ goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
kfree(p->tcfp_keys_ex);
p->tcfp_keys_ex = keys_ex;
spin_unlock_bh(&p->tcf_lock);
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
if (ret == ACT_P_CREATED)
tcf_idr_insert(tn, *a);
return ret;
+put_chain:
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
out_release:
tcf_idr_release(*a, bind);
out_free:
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 8271a6263824..2b8581f6ab51 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -21,6 +21,7 @@
#include <linux/slab.h>
#include <net/act_api.h>
#include <net/netlink.h>
+#include <net/pkt_cls.h>
struct tcf_police_params {
int tcfp_result;
@@ -83,10 +84,12 @@ static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
static int tcf_police_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
int ovr, int bind, bool rtnl_held,
+ struct tcf_proto *tp,
struct netlink_ext_ack *extack)
{
int ret = 0, tcfp_result = TC_ACT_OK, err, size;
struct nlattr *tb[TCA_POLICE_MAX + 1];
+ struct tcf_chain *goto_ch = NULL;
struct tc_police *parm;
struct tcf_police *police;
struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
@@ -128,6 +131,9 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
tcf_idr_release(*a, bind);
return -EEXIST;
}
+ err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+ if (err < 0)
+ goto release_idr;
police = to_police(*a);
if (parm->rate.rate) {
@@ -213,12 +219,14 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
if (new->peak_present)
police->tcfp_ptoks = new->tcfp_mtu_ptoks;
spin_unlock_bh(&police->tcfp_lock);
- police->tcf_action = parm->action;
+ goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
rcu_swap_protected(police->params,
new,
lockdep_is_held(&police->tcf_lock));
spin_unlock_bh(&police->tcf_lock);
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
if (new)
kfree_rcu(new, rcu);
@@ -229,6 +237,9 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
failure:
qdisc_put_rtab(P_tab);
qdisc_put_rtab(R_tab);
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+release_idr:
tcf_idr_release(*a, bind);
return err;
}
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 203e399e5c85..0f82d50ea232 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -22,6 +22,7 @@
#include <linux/tc_act/tc_sample.h>
#include <net/tc_act/tc_sample.h>
#include <net/psample.h>
+#include <net/pkt_cls.h>
#include <linux/if_arp.h>
@@ -37,14 +38,15 @@ static const struct nla_policy sample_policy[TCA_SAMPLE_MAX + 1] = {
static int tcf_sample_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a, int ovr,
- int bind, bool rtnl_held,
+ int bind, bool rtnl_held, struct tcf_proto *tp,
struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, sample_net_id);
struct nlattr *tb[TCA_SAMPLE_MAX + 1];
struct psample_group *psample_group;
+ struct tcf_chain *goto_ch = NULL;
+ u32 psample_group_num, rate;
struct tc_sample *parm;
- u32 psample_group_num;
struct tcf_sample *s;
bool exists = false;
int ret, err;
@@ -79,19 +81,28 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
tcf_idr_release(*a, bind);
return -EEXIST;
}
+ err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+ if (err < 0)
+ goto release_idr;
+ rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
+ if (!rate) {
+ NL_SET_ERR_MSG(extack, "invalid sample rate");
+ err = -EINVAL;
+ goto put_chain;
+ }
psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]);
psample_group = psample_group_get(net, psample_group_num);
if (!psample_group) {
- tcf_idr_release(*a, bind);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto put_chain;
}
s = to_sample(*a);
spin_lock_bh(&s->tcf_lock);
- s->tcf_action = parm->action;
- s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
+ goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
+ s->rate = rate;
s->psample_group_num = psample_group_num;
RCU_INIT_POINTER(s->psample_group, psample_group);
@@ -100,10 +111,18 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]);
}
spin_unlock_bh(&s->tcf_lock);
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
if (ret == ACT_P_CREATED)
tcf_idr_insert(tn, *a);
return ret;
+put_chain:
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+release_idr:
+ tcf_idr_release(*a, bind);
+ return err;
}
static void tcf_sample_cleanup(struct tc_action *a)
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index d54cb608dbaf..23c8ca5615e5 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -18,6 +18,7 @@
#include <linux/rtnetlink.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
#include <linux/tc_act/tc_defact.h>
#include <net/tc_act/tc_defact.h>
@@ -60,14 +61,26 @@ static int alloc_defdata(struct tcf_defact *d, const struct nlattr *defdata)
return 0;
}
-static void reset_policy(struct tcf_defact *d, const struct nlattr *defdata,
- struct tc_defact *p)
+static int reset_policy(struct tc_action *a, const struct nlattr *defdata,
+ struct tc_defact *p, struct tcf_proto *tp,
+ struct netlink_ext_ack *extack)
{
+ struct tcf_chain *goto_ch = NULL;
+ struct tcf_defact *d;
+ int err;
+
+ err = tcf_action_check_ctrlact(p->action, tp, &goto_ch, extack);
+ if (err < 0)
+ return err;
+ d = to_defact(a);
spin_lock_bh(&d->tcf_lock);
- d->tcf_action = p->action;
+ goto_ch = tcf_action_set_ctrlact(a, p->action, goto_ch);
memset(d->tcfd_defdata, 0, SIMP_MAX_DATA);
nla_strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
spin_unlock_bh(&d->tcf_lock);
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+ return 0;
}
static const struct nla_policy simple_policy[TCA_DEF_MAX + 1] = {
@@ -78,10 +91,11 @@ static const struct nla_policy simple_policy[TCA_DEF_MAX + 1] = {
static int tcf_simp_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
int ovr, int bind, bool rtnl_held,
- struct netlink_ext_ack *extack)
+ struct tcf_proto *tp, struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, simp_net_id);
struct nlattr *tb[TCA_DEF_MAX + 1];
+ struct tcf_chain *goto_ch = NULL;
struct tc_defact *parm;
struct tcf_defact *d;
bool exists = false;
@@ -122,27 +136,37 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
}
d = to_defact(*a);
- ret = alloc_defdata(d, tb[TCA_DEF_DATA]);
- if (ret < 0) {
- tcf_idr_release(*a, bind);
- return ret;
- }
- d->tcf_action = parm->action;
+ err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch,
+ extack);
+ if (err < 0)
+ goto release_idr;
+
+ err = alloc_defdata(d, tb[TCA_DEF_DATA]);
+ if (err < 0)
+ goto put_chain;
+
+ tcf_action_set_ctrlact(*a, parm->action, goto_ch);
ret = ACT_P_CREATED;
} else {
- d = to_defact(*a);
-
if (!ovr) {
- tcf_idr_release(*a, bind);
- return -EEXIST;
+ err = -EEXIST;
+ goto release_idr;
}
- reset_policy(d, tb[TCA_DEF_DATA], parm);
+ err = reset_policy(*a, tb[TCA_DEF_DATA], parm, tp, extack);
+ if (err)
+ goto release_idr;
}
if (ret == ACT_P_CREATED)
tcf_idr_insert(tn, *a);
return ret;
+put_chain:
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+release_idr:
+ tcf_idr_release(*a, bind);
+ return err;
}
static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 65879500b688..7e1d261a31d2 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -26,6 +26,7 @@
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/dsfield.h>
+#include <net/pkt_cls.h>
#include <linux/tc_act/tc_skbedit.h>
#include <net/tc_act/tc_skbedit.h>
@@ -96,11 +97,13 @@ static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = {
static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
int ovr, int bind, bool rtnl_held,
+ struct tcf_proto *tp,
struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, skbedit_net_id);
struct tcf_skbedit_params *params_new;
struct nlattr *tb[TCA_SKBEDIT_MAX + 1];
+ struct tcf_chain *goto_ch = NULL;
struct tc_skbedit *parm;
struct tcf_skbedit *d;
u32 flags = 0, *priority = NULL, *mark = NULL, *mask = NULL;
@@ -186,11 +189,14 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
return -EEXIST;
}
}
+ err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+ if (err < 0)
+ goto release_idr;
params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
if (unlikely(!params_new)) {
- tcf_idr_release(*a, bind);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto put_chain;
}
params_new->flags = flags;
@@ -208,16 +214,24 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
params_new->mask = *mask;
spin_lock_bh(&d->tcf_lock);
- d->tcf_action = parm->action;
+ goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
rcu_swap_protected(d->params, params_new,
lockdep_is_held(&d->tcf_lock));
spin_unlock_bh(&d->tcf_lock);
if (params_new)
kfree_rcu(params_new, rcu);
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
if (ret == ACT_P_CREATED)
tcf_idr_insert(tn, *a);
return ret;
+put_chain:
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+release_idr:
+ tcf_idr_release(*a, bind);
+ return err;
}
static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index 7bac1d78e7a3..1d4c324d0a42 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -16,6 +16,7 @@
#include <linux/rtnetlink.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
#include <linux/tc_act/tc_skbmod.h>
#include <net/tc_act/tc_skbmod.h>
@@ -82,11 +83,13 @@ static const struct nla_policy skbmod_policy[TCA_SKBMOD_MAX + 1] = {
static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
int ovr, int bind, bool rtnl_held,
+ struct tcf_proto *tp,
struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, skbmod_net_id);
struct nlattr *tb[TCA_SKBMOD_MAX + 1];
struct tcf_skbmod_params *p, *p_old;
+ struct tcf_chain *goto_ch = NULL;
struct tc_skbmod *parm;
struct tcf_skbmod *d;
bool exists = false;
@@ -153,21 +156,24 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
tcf_idr_release(*a, bind);
return -EEXIST;
}
+ err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+ if (err < 0)
+ goto release_idr;
d = to_skbmod(*a);
p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL);
if (unlikely(!p)) {
- tcf_idr_release(*a, bind);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto put_chain;
}
p->flags = lflags;
- d->tcf_action = parm->action;
if (ovr)
spin_lock_bh(&d->tcf_lock);
/* Protected by tcf_lock if overwriting existing action. */
+ goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
p_old = rcu_dereference_protected(d->skbmod_p, 1);
if (lflags & SKBMOD_F_DMAC)
@@ -183,10 +189,18 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
if (p_old)
kfree_rcu(p_old, rcu);
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
if (ret == ACT_P_CREATED)
tcf_idr_insert(tn, *a);
return ret;
+put_chain:
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+release_idr:
+ tcf_idr_release(*a, bind);
+ return err;
}
static void tcf_skbmod_cleanup(struct tc_action *a)
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 3beb4717d3b7..d5aaf90a3971 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -17,6 +17,7 @@
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/dst.h>
+#include <net/pkt_cls.h>
#include <linux/tc_act/tc_tunnel_key.h>
#include <net/tc_act/tc_tunnel_key.h>
@@ -201,26 +202,23 @@ static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
{
if (!p)
return;
- if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET) {
-#ifdef CONFIG_DST_CACHE
- struct ip_tunnel_info *info = &p->tcft_enc_metadata->u.tun_info;
-
- dst_cache_destroy(&info->dst_cache);
-#endif
+ if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
dst_release(&p->tcft_enc_metadata->dst);
- }
+
kfree_rcu(p, rcu);
}
static int tunnel_key_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
int ovr, int bind, bool rtnl_held,
+ struct tcf_proto *tp,
struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1];
struct tcf_tunnel_key_params *params_new;
struct metadata_dst *metadata = NULL;
+ struct tcf_chain *goto_ch = NULL;
struct tc_tunnel_key *parm;
struct tcf_tunnel_key *t;
bool exists = false;
@@ -338,7 +336,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
&metadata->u.tun_info,
opts_len, extack);
if (ret < 0)
- goto release_dst_cache;
+ goto release_tun_meta;
}
metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
@@ -354,16 +352,22 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
&act_tunnel_key_ops, bind, true);
if (ret) {
NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
- goto release_dst_cache;
+ goto release_tun_meta;
}
ret = ACT_P_CREATED;
} else if (!ovr) {
NL_SET_ERR_MSG(extack, "TC IDR already exists");
ret = -EEXIST;
- goto release_dst_cache;
+ goto release_tun_meta;
}
+ err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+ if (err < 0) {
+ ret = err;
+ exists = true;
+ goto release_tun_meta;
+ }
t = to_tunnel_key(*a);
params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
@@ -371,29 +375,30 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters");
ret = -ENOMEM;
exists = true;
- goto release_dst_cache;
+ goto put_chain;
}
params_new->tcft_action = parm->t_action;
params_new->tcft_enc_metadata = metadata;
spin_lock_bh(&t->tcf_lock);
- t->tcf_action = parm->action;
+ goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
rcu_swap_protected(t->params, params_new,
lockdep_is_held(&t->tcf_lock));
spin_unlock_bh(&t->tcf_lock);
tunnel_key_release_params(params_new);
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
if (ret == ACT_P_CREATED)
tcf_idr_insert(tn, *a);
return ret;
-release_dst_cache:
-#ifdef CONFIG_DST_CACHE
- if (metadata)
- dst_cache_destroy(&metadata->u.tun_info.dst_cache);
+put_chain:
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+
release_tun_meta:
-#endif
if (metadata)
dst_release(&metadata->dst);
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index ac0061599225..0f40d0a74423 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -15,6 +15,7 @@
#include <linux/if_vlan.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
#include <linux/tc_act/tc_vlan.h>
#include <net/tc_act/tc_vlan.h>
@@ -105,10 +106,11 @@ static const struct nla_policy vlan_policy[TCA_VLAN_MAX + 1] = {
static int tcf_vlan_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
int ovr, int bind, bool rtnl_held,
- struct netlink_ext_ack *extack)
+ struct tcf_proto *tp, struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, vlan_net_id);
struct nlattr *tb[TCA_VLAN_MAX + 1];
+ struct tcf_chain *goto_ch = NULL;
struct tcf_vlan_params *p;
struct tc_vlan *parm;
struct tcf_vlan *v;
@@ -200,12 +202,16 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
return -EEXIST;
}
+ err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+ if (err < 0)
+ goto release_idr;
+
v = to_vlan(*a);
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p) {
- tcf_idr_release(*a, bind);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto put_chain;
}
p->tcfv_action = action;
@@ -214,16 +220,24 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
p->tcfv_push_proto = push_proto;
spin_lock_bh(&v->tcf_lock);
- v->tcf_action = parm->action;
+ goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
rcu_swap_protected(v->vlan_p, p, lockdep_is_held(&v->tcf_lock));
spin_unlock_bh(&v->tcf_lock);
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
if (p)
kfree_rcu(p, rcu);
if (ret == ACT_P_CREATED)
tcf_idr_insert(tn, *a);
return ret;
+put_chain:
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+release_idr:
+ tcf_idr_release(*a, bind);
+ return err;
}
static void tcf_vlan_cleanup(struct tc_action *a)
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 478095d50f95..99ae30c177c7 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -367,7 +367,7 @@ static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
struct tcf_block *block = chain->block;
mutex_destroy(&chain->filter_chain_lock);
- kfree(chain);
+ kfree_rcu(chain, rcu);
if (free_block)
tcf_block_destroy(block);
}
@@ -470,10 +470,9 @@ static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
{
struct tcf_block *block = chain->block;
const struct tcf_proto_ops *tmplt_ops;
- bool is_last, free_block = false;
+ bool free_block = false;
unsigned int refcnt;
void *tmplt_priv;
- u32 chain_index;
mutex_lock(&block->lock);
if (explicitly_created) {
@@ -492,23 +491,21 @@ static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
* save these to temporary variables.
*/
refcnt = --chain->refcnt;
- is_last = refcnt - chain->action_refcnt == 0;
tmplt_ops = chain->tmplt_ops;
tmplt_priv = chain->tmplt_priv;
- chain_index = chain->index;
-
- if (refcnt == 0)
- free_block = tcf_chain_detach(chain);
- mutex_unlock(&block->lock);
/* The last dropped non-action reference will trigger notification. */
- if (is_last && !by_act) {
- tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain_index,
+ if (refcnt - chain->action_refcnt == 0 && !by_act) {
+ tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
block, NULL, 0, 0, false);
/* Last reference to chain, no need to lock. */
chain->flushing = false;
}
+ if (refcnt == 0)
+ free_block = tcf_chain_detach(chain);
+ mutex_unlock(&block->lock);
+
if (refcnt == 0) {
tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
tcf_chain_destroy(chain, free_block);
@@ -1896,6 +1893,7 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
{
struct sk_buff *skb;
u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
+ int err = 0;
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb)
@@ -1909,10 +1907,14 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
}
if (unicast)
- return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
+ err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
+ else
+ err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
+ n->nlmsg_flags & NLM_F_ECHO);
- return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
- n->nlmsg_flags & NLM_F_ECHO);
+ if (err > 0)
+ err = 0;
+ return err;
}
static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
@@ -1944,12 +1946,15 @@ static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
}
if (unicast)
- return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
-
- err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
- n->nlmsg_flags & NLM_F_ECHO);
+ err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
+ else
+ err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
+ n->nlmsg_flags & NLM_F_ECHO);
if (err < 0)
NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
+
+ if (err > 0)
+ err = 0;
return err;
}
@@ -2691,6 +2696,7 @@ static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
struct tcf_block *block = chain->block;
struct net *net = block->net;
struct sk_buff *skb;
+ int err = 0;
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb)
@@ -2704,9 +2710,14 @@ static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
}
if (unicast)
- return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
+ err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
+ else
+ err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
+ flags & NLM_F_ECHO);
- return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
+ if (err > 0)
+ err = 0;
+ return err;
}
static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 27300a3e76c7..c04247b403ed 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -1348,46 +1348,46 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
if (err < 0)
goto errout;
- if (!handle) {
- handle = 1;
- err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
- INT_MAX, GFP_KERNEL);
- } else if (!fold) {
- /* user specifies a handle and it doesn't exist */
- err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
- handle, GFP_KERNEL);
- }
- if (err)
- goto errout;
- fnew->handle = handle;
-
if (tb[TCA_FLOWER_FLAGS]) {
fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
if (!tc_flags_valid(fnew->flags)) {
err = -EINVAL;
- goto errout_idr;
+ goto errout;
}
}
err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
tp->chain->tmplt_priv, extack);
if (err)
- goto errout_idr;
+ goto errout;
err = fl_check_assign_mask(head, fnew, fold, mask);
if (err)
- goto errout_idr;
+ goto errout;
+
+ if (!handle) {
+ handle = 1;
+ err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
+ INT_MAX, GFP_KERNEL);
+ } else if (!fold) {
+ /* user specifies a handle and it doesn't exist */
+ err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
+ handle, GFP_KERNEL);
+ }
+ if (err)
+ goto errout_mask;
+ fnew->handle = handle;
if (!fold && __fl_lookup(fnew->mask, &fnew->mkey)) {
err = -EEXIST;
- goto errout_mask;
+ goto errout_idr;
}
err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
fnew->mask->filter_ht_params);
if (err)
- goto errout_mask;
+ goto errout_idr;
if (!tc_skip_hw(fnew->flags)) {
err = fl_hw_replace_filter(tp, fnew, extack);
@@ -1426,12 +1426,13 @@ errout_mask_ht:
rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
fnew->mask->filter_ht_params);
-errout_mask:
- fl_mask_put(head, fnew->mask, false);
-
errout_idr:
if (!fold)
idr_remove(&head->handle_idr, fnew->handle);
+
+errout_mask:
+ fl_mask_put(head, fnew->mask, false);
+
errout:
tcf_exts_destroy(&fnew->exts);
kfree(fnew);
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index 459921bd3d87..a13bc351a414 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -130,6 +130,11 @@ static void mall_destroy(struct tcf_proto *tp, bool rtnl_held,
static void *mall_get(struct tcf_proto *tp, u32 handle)
{
+ struct cls_mall_head *head = rtnl_dereference(tp->root);
+
+ if (head && head->handle == handle)
+ return head;
+
return NULL;
}
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 352b46f98440..fb8f138b9776 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1824,6 +1824,7 @@ static int tclass_notify(struct net *net, struct sk_buff *oskb,
{
struct sk_buff *skb;
u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
+ int err = 0;
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb)
@@ -1834,8 +1835,11 @@ static int tclass_notify(struct net *net, struct sk_buff *oskb,
return -EINVAL;
}
- return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
- n->nlmsg_flags & NLM_F_ECHO);
+ err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
+ n->nlmsg_flags & NLM_F_ECHO);
+ if (err > 0)
+ err = 0;
+ return err;
}
static int tclass_del_notify(struct net *net,
@@ -1866,8 +1870,11 @@ static int tclass_del_notify(struct net *net,
return err;
}
- return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
- n->nlmsg_flags & NLM_F_ECHO);
+ err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
+ n->nlmsg_flags & NLM_F_ECHO);
+ if (err > 0)
+ err = 0;
+ return err;
}
#ifdef CONFIG_NET_CLS
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 1d2a12132abc..259d97bc2abd 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -211,6 +211,9 @@ struct cake_sched_data {
u8 ack_filter;
u8 atm_mode;
+ u32 fwmark_mask;
+ u16 fwmark_shft;
+
/* time_next = time_this + ((len * rate_ns) >> rate_shft) */
u16 rate_shft;
ktime_t time_next_packet;
@@ -258,8 +261,7 @@ enum {
CAKE_FLAG_AUTORATE_INGRESS = BIT(1),
CAKE_FLAG_INGRESS = BIT(2),
CAKE_FLAG_WASH = BIT(3),
- CAKE_FLAG_SPLIT_GSO = BIT(4),
- CAKE_FLAG_FWMARK = BIT(5)
+ CAKE_FLAG_SPLIT_GSO = BIT(4)
};
/* COBALT operates the Codel and BLUE algorithms in parallel, in order to
@@ -1515,16 +1517,27 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
{
+ int wlen = skb_network_offset(skb);
u8 dscp;
- switch (skb->protocol) {
+ switch (tc_skb_protocol(skb)) {
case htons(ETH_P_IP):
+ wlen += sizeof(struct iphdr);
+ if (!pskb_may_pull(skb, wlen) ||
+ skb_try_make_writable(skb, wlen))
+ return 0;
+
dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
if (wash && dscp)
ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
return dscp;
case htons(ETH_P_IPV6):
+ wlen += sizeof(struct ipv6hdr);
+ if (!pskb_may_pull(skb, wlen) ||
+ skb_try_make_writable(skb, wlen))
+ return 0;
+
dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
if (wash && dscp)
ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
@@ -1543,7 +1556,7 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
struct sk_buff *skb)
{
struct cake_sched_data *q = qdisc_priv(sch);
- u32 tin;
+ u32 tin, mark;
u8 dscp;
/* Tin selection: Default to diffserv-based selection, allow overriding
@@ -1551,14 +1564,13 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
*/
dscp = cake_handle_diffserv(skb,
q->rate_flags & CAKE_FLAG_WASH);
+ mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft;
if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT)
tin = 0;
- else if (q->rate_flags & CAKE_FLAG_FWMARK && /* use fw mark */
- skb->mark &&
- skb->mark <= q->tin_cnt)
- tin = q->tin_order[skb->mark - 1];
+ else if (mark && mark <= q->tin_cnt)
+ tin = q->tin_order[mark - 1];
else if (TC_H_MAJ(skb->priority) == sch->handle &&
TC_H_MIN(skb->priority) > 0 &&
@@ -2172,6 +2184,7 @@ static const struct nla_policy cake_policy[TCA_CAKE_MAX + 1] = {
[TCA_CAKE_MPU] = { .type = NLA_U32 },
[TCA_CAKE_INGRESS] = { .type = NLA_U32 },
[TCA_CAKE_ACK_FILTER] = { .type = NLA_U32 },
+ [TCA_CAKE_FWMARK] = { .type = NLA_U32 },
};
static void cake_set_rate(struct cake_tin_data *b, u64 rate, u32 mtu,
@@ -2619,10 +2632,8 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
}
if (tb[TCA_CAKE_FWMARK]) {
- if (!!nla_get_u32(tb[TCA_CAKE_FWMARK]))
- q->rate_flags |= CAKE_FLAG_FWMARK;
- else
- q->rate_flags &= ~CAKE_FLAG_FWMARK;
+ q->fwmark_mask = nla_get_u32(tb[TCA_CAKE_FWMARK]);
+ q->fwmark_shft = q->fwmark_mask ? __ffs(q->fwmark_mask) : 0;
}
if (q->tins) {
@@ -2784,8 +2795,7 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
!!(q->rate_flags & CAKE_FLAG_SPLIT_GSO)))
goto nla_put_failure;
- if (nla_put_u32(skb, TCA_CAKE_FWMARK,
- !!(q->rate_flags & CAKE_FLAG_FWMARK)))
+ if (nla_put_u32(skb, TCA_CAKE_FWMARK, q->fwmark_mask))
goto nla_put_failure;
return nla_nest_end(skb, opts);
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 4dc05409e3fb..114b9048ea7e 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1358,9 +1358,11 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl = (struct cbq_class *)arg;
+ __u32 qlen;
cl->xstats.avgidle = cl->avgidle;
cl->xstats.undertime = 0;
+ qdisc_qstats_qlen_backlog(cl->q, &qlen, &cl->qstats.backlog);
if (cl->undertime != PSCHED_PASTPERFECT)
cl->xstats.undertime = cl->undertime - q->now;
@@ -1368,7 +1370,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
- gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0)
+ gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
return -1;
return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
@@ -1665,17 +1667,13 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl = (struct cbq_class *)arg;
- unsigned int qlen, backlog;
if (cl->filters || cl->children || cl == &q->link)
return -EBUSY;
sch_tree_lock(sch);
- qlen = cl->q->q.qlen;
- backlog = cl->q->qstats.backlog;
- qdisc_reset(cl->q);
- qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
+ qdisc_purge_queue(cl->q);
if (cl->next_alive)
cbq_deactivate_class(cl);
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 09b800991065..430df9a55ec4 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -50,15 +50,6 @@ static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
return container_of(clc, struct drr_class, common);
}
-static void drr_purge_queue(struct drr_class *cl)
-{
- unsigned int len = cl->qdisc->q.qlen;
- unsigned int backlog = cl->qdisc->qstats.backlog;
-
- qdisc_reset(cl->qdisc);
- qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
-}
-
static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
[TCA_DRR_QUANTUM] = { .type = NLA_U32 },
};
@@ -167,7 +158,7 @@ static int drr_delete_class(struct Qdisc *sch, unsigned long arg)
sch_tree_lock(sch);
- drr_purge_queue(cl);
+ qdisc_purge_queue(cl->qdisc);
qdisc_class_hash_remove(&q->clhash, &cl->common);
sch_tree_unlock(sch);
@@ -269,7 +260,8 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
struct gnet_dump *d)
{
struct drr_class *cl = (struct drr_class *)arg;
- __u32 qlen = cl->qdisc->q.qlen;
+ __u32 qlen = qdisc_qlen_sum(cl->qdisc);
+ struct Qdisc *cl_q = cl->qdisc;
struct tc_drr_stats xstats;
memset(&xstats, 0, sizeof(xstats));
@@ -279,7 +271,7 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
- gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0)
+ gnet_stats_copy_queue(d, cl_q->cpu_qstats, &cl_q->qstats, qlen) < 0)
return -1;
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 24cc220a3218..d2ab463f22ae 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -845,16 +845,6 @@ qdisc_peek_len(struct Qdisc *sch)
}
static void
-hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
-{
- unsigned int len = cl->qdisc->q.qlen;
- unsigned int backlog = cl->qdisc->qstats.backlog;
-
- qdisc_reset(cl->qdisc);
- qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
-}
-
-static void
hfsc_adjust_levels(struct hfsc_class *cl)
{
struct hfsc_class *p;
@@ -1076,7 +1066,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
list_add_tail(&cl->siblings, &parent->children);
if (parent->level == 0)
- hfsc_purge_queue(sch, parent);
+ qdisc_purge_queue(parent->qdisc);
hfsc_adjust_levels(parent);
sch_tree_unlock(sch);
@@ -1112,7 +1102,7 @@ hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
list_del(&cl->siblings);
hfsc_adjust_levels(cl->cl_parent);
- hfsc_purge_queue(sch, cl);
+ qdisc_purge_queue(cl->qdisc);
qdisc_class_hash_remove(&q->clhash, &cl->cl_common);
sch_tree_unlock(sch);
@@ -1328,8 +1318,9 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
{
struct hfsc_class *cl = (struct hfsc_class *)arg;
struct tc_hfsc_stats xstats;
+ __u32 qlen;
- cl->qstats.backlog = cl->qdisc->qstats.backlog;
+ qdisc_qstats_qlen_backlog(cl->qdisc, &qlen, &cl->qstats.backlog);
xstats.level = cl->level;
xstats.period = cl->cl_vtperiod;
xstats.work = cl->cl_total;
@@ -1337,7 +1328,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
- gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->qdisc->q.qlen) < 0)
+ gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
return -1;
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 30f9da7e1076..2f9883b196e8 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1127,10 +1127,9 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
};
__u32 qlen = 0;
- if (!cl->level && cl->leaf.q) {
- qlen = cl->leaf.q->q.qlen;
- qs.backlog = cl->leaf.q->qstats.backlog;
- }
+ if (!cl->level && cl->leaf.q)
+ qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog);
+
cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
INT_MIN, INT_MAX);
cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
@@ -1270,13 +1269,8 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
sch_tree_lock(sch);
- if (!cl->level) {
- unsigned int qlen = cl->leaf.q->q.qlen;
- unsigned int backlog = cl->leaf.q->qstats.backlog;
-
- qdisc_reset(cl->leaf.q);
- qdisc_tree_reduce_backlog(cl->leaf.q, qlen, backlog);
- }
+ if (!cl->level)
+ qdisc_purge_queue(cl->leaf.q);
/* delete from hash and active; remainder in destroy_class */
qdisc_class_hash_remove(&q->clhash, &cl->common);
@@ -1404,12 +1398,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
classid, NULL);
sch_tree_lock(sch);
if (parent && !parent->level) {
- unsigned int qlen = parent->leaf.q->q.qlen;
- unsigned int backlog = parent->leaf.q->qstats.backlog;
-
/* turn parent into inner node */
- qdisc_reset(parent->leaf.q);
- qdisc_tree_reduce_backlog(parent->leaf.q, qlen, backlog);
+ qdisc_purge_queue(parent->leaf.q);
qdisc_put(parent->leaf.q);
if (parent->prio_activity)
htb_deactivate(q, parent);
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index 203659bc3906..3a3312467692 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -249,7 +249,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
sch = dev_queue->qdisc_sleeping;
if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
- gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0)
+ qdisc_qstats_copy(d, sch) < 0)
return -1;
return 0;
}
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index d364e63c396d..ea0dc112b38d 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -561,8 +561,7 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
sch = dev_queue->qdisc_sleeping;
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &sch->bstats) < 0 ||
- gnet_stats_copy_queue(d, NULL,
- &sch->qstats, sch->q.qlen) < 0)
+ qdisc_qstats_copy(d, sch) < 0)
return -1;
}
return 0;
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 7410ce4d0321..35b03ae08e0f 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -201,9 +201,9 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
for (i = q->bands; i < q->max_bands; i++) {
if (q->queues[i] != &noop_qdisc) {
struct Qdisc *child = q->queues[i];
+
q->queues[i] = &noop_qdisc;
- qdisc_tree_reduce_backlog(child, child->q.qlen,
- child->qstats.backlog);
+ qdisc_tree_flush_backlog(child);
qdisc_put(child);
}
}
@@ -225,9 +225,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
qdisc_hash_add(child, true);
if (old != &noop_qdisc) {
- qdisc_tree_reduce_backlog(old,
- old->q.qlen,
- old->qstats.backlog);
+ qdisc_tree_flush_backlog(old);
qdisc_put(old);
}
sch_tree_unlock(sch);
@@ -344,7 +342,7 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
cl_q = q->queues[cl - 1];
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &cl_q->bstats) < 0 ||
- gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
+ qdisc_qstats_copy(d, cl_q) < 0)
return -1;
return 0;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 847141cd900f..d519b21535b3 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -216,12 +216,8 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
q->bands = qopt->bands;
memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
- for (i = q->bands; i < oldbands; i++) {
- struct Qdisc *child = q->queues[i];
-
- qdisc_tree_reduce_backlog(child, child->q.qlen,
- child->qstats.backlog);
- }
+ for (i = q->bands; i < oldbands; i++)
+ qdisc_tree_flush_backlog(q->queues[i]);
for (i = oldbands; i < q->bands; i++) {
q->queues[i] = queues[i];
@@ -365,7 +361,7 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
cl_q = q->queues[cl - 1];
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &cl_q->bstats) < 0 ||
- gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
+ qdisc_qstats_copy(d, cl_q) < 0)
return -1;
return 0;
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 29f5c4a24688..1589364b54da 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -217,15 +217,6 @@ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
return container_of(clc, struct qfq_class, common);
}
-static void qfq_purge_queue(struct qfq_class *cl)
-{
- unsigned int len = cl->qdisc->q.qlen;
- unsigned int backlog = cl->qdisc->qstats.backlog;
-
- qdisc_reset(cl->qdisc);
- qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
-}
-
static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
[TCA_QFQ_WEIGHT] = { .type = NLA_U32 },
[TCA_QFQ_LMAX] = { .type = NLA_U32 },
@@ -551,7 +542,7 @@ static int qfq_delete_class(struct Qdisc *sch, unsigned long arg)
sch_tree_lock(sch);
- qfq_purge_queue(cl);
+ qdisc_purge_queue(cl->qdisc);
qdisc_class_hash_remove(&q->clhash, &cl->common);
sch_tree_unlock(sch);
@@ -655,8 +646,7 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
- gnet_stats_copy_queue(d, NULL,
- &cl->qdisc->qstats, cl->qdisc->q.qlen) < 0)
+ qdisc_qstats_copy(d, cl->qdisc) < 0)
return -1;
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 9df9942340ea..4e8c0abf6194 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -233,8 +233,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
q->flags = ctl->flags;
q->limit = ctl->limit;
if (child) {
- qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
- q->qdisc->qstats.backlog);
+ qdisc_tree_flush_backlog(q->qdisc);
old_child = q->qdisc;
q->qdisc = child;
}
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index bab506b01a32..2419fdb75966 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -521,8 +521,7 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt,
qdisc_hash_add(child, true);
sch_tree_lock(sch);
- qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
- q->qdisc->qstats.backlog);
+ qdisc_tree_flush_backlog(q->qdisc);
qdisc_put(q->qdisc);
q->qdisc = child;
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 206e4dbed12f..c7041999eb5d 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -895,7 +895,7 @@ static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
sch = dev_queue->qdisc_sleeping;
if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
- gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0)
+ qdisc_qstats_copy(d, sch) < 0)
return -1;
return 0;
}
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 7f272a9070c5..f71578dbb9e3 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -391,8 +391,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
sch_tree_lock(sch);
if (child) {
- qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
- q->qdisc->qstats.backlog);
+ qdisc_tree_flush_backlog(q->qdisc);
qdisc_put(q->qdisc);
q->qdisc = child;
}
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 5b537613946f..31569f4809f6 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -471,12 +471,6 @@ int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp)
struct crypto_shash *tfm = NULL;
__u16 id;
- /* If AUTH extension is disabled, we are done */
- if (!ep->auth_enable) {
- ep->auth_hmacs = NULL;
- return 0;
- }
-
/* If the transforms are already allocated, we are done */
if (ep->auth_hmacs)
return 0;
@@ -766,7 +760,6 @@ void sctp_auth_calculate_hmac(const struct sctp_association *asoc,
SHASH_DESC_ON_STACK(desc, tfm);
desc->tfm = tfm;
- desc->flags = 0;
crypto_shash_digest(desc, (u8 *)auth,
end - (unsigned char *)auth, digest);
shash_desc_zero(desc);
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 40c7eb941bc9..0448b68fce74 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -107,6 +107,13 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
auth_chunks->param_hdr.length =
htons(sizeof(struct sctp_paramhdr) + 2);
}
+
+ /* Allocate and initialize transorms arrays for supported
+ * HMACs.
+ */
+ err = sctp_auth_init_hmacs(ep, gfp);
+ if (err)
+ goto nomem;
}
/* Initialize the base structure. */
@@ -150,15 +157,10 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
INIT_LIST_HEAD(&ep->endpoint_shared_keys);
null_key = sctp_auth_shkey_create(0, gfp);
if (!null_key)
- goto nomem;
+ goto nomem_shkey;
list_add(&null_key->key_list, &ep->endpoint_shared_keys);
- /* Allocate and initialize transorms arrays for supported HMACs. */
- err = sctp_auth_init_hmacs(ep, gfp);
- if (err)
- goto nomem_hmacs;
-
/* Add the null key to the endpoint shared keys list and
* set the hmcas and chunks pointers.
*/
@@ -169,8 +171,8 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
return ep;
-nomem_hmacs:
- sctp_auth_destroy_keys(&ep->endpoint_shared_keys);
+nomem_shkey:
+ sctp_auth_destroy_hmacs(ep->auth_hmacs);
nomem:
/* Free all allocations */
kfree(auth_hmacs);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 6abc8b274270..951afdeea5e9 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -600,6 +600,7 @@ out:
static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
{
/* No address mapping for V4 sockets */
+ memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
return sizeof(struct sockaddr_in);
}
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index d05c57664e36..72e74503f9fc 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1684,7 +1684,6 @@ static struct sctp_cookie_param *sctp_pack_cookie(
/* Sign the message. */
desc->tfm = sctp_sk(ep->base.sk)->hmac;
- desc->flags = 0;
err = crypto_shash_setkey(desc->tfm, ep->secret_key,
sizeof(ep->secret_key)) ?:
@@ -1755,7 +1754,6 @@ struct sctp_association *sctp_unpack_cookie(
int err;
desc->tfm = sctp_sk(ep->base.sk)->hmac;
- desc->flags = 0;
err = crypto_shash_setkey(desc->tfm, ep->secret_key,
sizeof(ep->secret_key)) ?:
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 1d143bc3f73d..4aa03588f87b 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1112,32 +1112,6 @@ static void sctp_cmd_send_msg(struct sctp_association *asoc,
}
-/* Sent the next ASCONF packet currently stored in the association.
- * This happens after the ASCONF_ACK was succeffully processed.
- */
-static void sctp_cmd_send_asconf(struct sctp_association *asoc)
-{
- struct net *net = sock_net(asoc->base.sk);
-
- /* Send the next asconf chunk from the addip chunk
- * queue.
- */
- if (!list_empty(&asoc->addip_chunk_list)) {
- struct list_head *entry = asoc->addip_chunk_list.next;
- struct sctp_chunk *asconf = list_entry(entry,
- struct sctp_chunk, list);
- list_del_init(entry);
-
- /* Hold the chunk until an ASCONF_ACK is received. */
- sctp_chunk_hold(asconf);
- if (sctp_primitive_ASCONF(net, asoc, asconf))
- sctp_chunk_free(asconf);
- else
- asoc->addip_last_asconf = asconf;
- }
-}
-
-
/* These three macros allow us to pull the debugging code out of the
* main flow of sctp_do_sm() to keep attention focused on the real
* functionality there.
@@ -1783,9 +1757,6 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type,
}
sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp);
break;
- case SCTP_CMD_SEND_NEXT_ASCONF:
- sctp_cmd_send_asconf(asoc);
- break;
case SCTP_CMD_PURGE_ASCONF_QUEUE:
sctp_asconf_queue_teardown(asoc);
break;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index c9ae3404b1bb..713a669d2058 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3824,6 +3824,29 @@ enum sctp_disposition sctp_sf_do_asconf(struct net *net,
return SCTP_DISPOSITION_CONSUME;
}
+static enum sctp_disposition sctp_send_next_asconf(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ struct sctp_association *asoc,
+ const union sctp_subtype type,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *asconf;
+ struct list_head *entry;
+
+ if (list_empty(&asoc->addip_chunk_list))
+ return SCTP_DISPOSITION_CONSUME;
+
+ entry = asoc->addip_chunk_list.next;
+ asconf = list_entry(entry, struct sctp_chunk, list);
+
+ list_del_init(entry);
+ sctp_chunk_hold(asconf);
+ asoc->addip_last_asconf = asconf;
+
+ return sctp_sf_do_prm_asconf(net, ep, asoc, type, asconf, commands);
+}
+
/*
* ADDIP Section 4.3 General rules for address manipulation
* When building TLV parameters for the ASCONF Chunk that will add or
@@ -3915,14 +3938,10 @@ enum sctp_disposition sctp_sf_do_asconf_ack(struct net *net,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
if (!sctp_process_asconf_ack((struct sctp_association *)asoc,
- asconf_ack)) {
- /* Successfully processed ASCONF_ACK. We can
- * release the next asconf if we have one.
- */
- sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF,
- SCTP_NULL());
- return SCTP_DISPOSITION_CONSUME;
- }
+ asconf_ack))
+ return sctp_send_next_asconf(net, ep,
+ (struct sctp_association *)asoc,
+ type, commands);
abort = sctp_make_abort(asoc, asconf_ack,
sizeof(struct sctp_errhdr));
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 533207dbeae9..4583fa914e62 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -102,9 +102,9 @@ static int sctp_send_asconf(struct sctp_association *asoc,
struct sctp_chunk *chunk);
static int sctp_do_bind(struct sock *, union sctp_addr *, int);
static int sctp_autobind(struct sock *sk);
-static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
- struct sctp_association *assoc,
- enum sctp_socket_type type);
+static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
+ struct sctp_association *assoc,
+ enum sctp_socket_type type);
static unsigned long sctp_memory_pressure;
static atomic_long_t sctp_memory_allocated;
@@ -999,7 +999,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
if (unlikely(addrs_size <= 0))
return -EINVAL;
- kaddrs = vmemdup_user(addrs, addrs_size);
+ kaddrs = memdup_user(addrs, addrs_size);
if (unlikely(IS_ERR(kaddrs)))
return PTR_ERR(kaddrs);
@@ -1007,7 +1007,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
addr_buf = kaddrs;
while (walk_size < addrs_size) {
if (walk_size + sizeof(sa_family_t) > addrs_size) {
- kvfree(kaddrs);
+ kfree(kaddrs);
return -EINVAL;
}
@@ -1018,7 +1018,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
* causes the address buffer to overflow return EINVAL.
*/
if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
- kvfree(kaddrs);
+ kfree(kaddrs);
return -EINVAL;
}
addrcnt++;
@@ -1054,7 +1054,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
}
out:
- kvfree(kaddrs);
+ kfree(kaddrs);
return err;
}
@@ -1329,7 +1329,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
if (unlikely(addrs_size <= 0))
return -EINVAL;
- kaddrs = vmemdup_user(addrs, addrs_size);
+ kaddrs = memdup_user(addrs, addrs_size);
if (unlikely(IS_ERR(kaddrs)))
return PTR_ERR(kaddrs);
@@ -1349,7 +1349,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
err = __sctp_connect(sk, kaddrs, addrs_size, flags, assoc_id);
out_free:
- kvfree(kaddrs);
+ kfree(kaddrs);
return err;
}
@@ -2920,6 +2920,9 @@ static int sctp_setsockopt_delayed_ack(struct sock *sk,
return 0;
}
+ if (sctp_style(sk, TCP))
+ params.sack_assoc_id = SCTP_FUTURE_ASSOC;
+
if (params.sack_assoc_id == SCTP_FUTURE_ASSOC ||
params.sack_assoc_id == SCTP_ALL_ASSOC) {
if (params.sack_delay) {
@@ -3024,6 +3027,9 @@ static int sctp_setsockopt_default_send_param(struct sock *sk,
return 0;
}
+ if (sctp_style(sk, TCP))
+ info.sinfo_assoc_id = SCTP_FUTURE_ASSOC;
+
if (info.sinfo_assoc_id == SCTP_FUTURE_ASSOC ||
info.sinfo_assoc_id == SCTP_ALL_ASSOC) {
sp->default_stream = info.sinfo_stream;
@@ -3081,6 +3087,9 @@ static int sctp_setsockopt_default_sndinfo(struct sock *sk,
return 0;
}
+ if (sctp_style(sk, TCP))
+ info.snd_assoc_id = SCTP_FUTURE_ASSOC;
+
if (info.snd_assoc_id == SCTP_FUTURE_ASSOC ||
info.snd_assoc_id == SCTP_ALL_ASSOC) {
sp->default_stream = info.snd_sid;
@@ -3531,6 +3540,9 @@ static int sctp_setsockopt_context(struct sock *sk, char __user *optval,
return 0;
}
+ if (sctp_style(sk, TCP))
+ params.assoc_id = SCTP_FUTURE_ASSOC;
+
if (params.assoc_id == SCTP_FUTURE_ASSOC ||
params.assoc_id == SCTP_ALL_ASSOC)
sp->default_rcv_context = params.assoc_value;
@@ -3670,6 +3682,9 @@ static int sctp_setsockopt_maxburst(struct sock *sk,
return 0;
}
+ if (sctp_style(sk, TCP))
+ params.assoc_id = SCTP_FUTURE_ASSOC;
+
if (params.assoc_id == SCTP_FUTURE_ASSOC ||
params.assoc_id == SCTP_ALL_ASSOC)
sp->max_burst = params.assoc_value;
@@ -3798,6 +3813,9 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
goto out;
}
+ if (sctp_style(sk, TCP))
+ authkey->sca_assoc_id = SCTP_FUTURE_ASSOC;
+
if (authkey->sca_assoc_id == SCTP_FUTURE_ASSOC ||
authkey->sca_assoc_id == SCTP_ALL_ASSOC) {
ret = sctp_auth_set_key(ep, asoc, authkey);
@@ -3853,6 +3871,9 @@ static int sctp_setsockopt_active_key(struct sock *sk,
if (asoc)
return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber);
+ if (sctp_style(sk, TCP))
+ val.scact_assoc_id = SCTP_FUTURE_ASSOC;
+
if (val.scact_assoc_id == SCTP_FUTURE_ASSOC ||
val.scact_assoc_id == SCTP_ALL_ASSOC) {
ret = sctp_auth_set_active_key(ep, asoc, val.scact_keynumber);
@@ -3904,6 +3925,9 @@ static int sctp_setsockopt_del_key(struct sock *sk,
if (asoc)
return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber);
+ if (sctp_style(sk, TCP))
+ val.scact_assoc_id = SCTP_FUTURE_ASSOC;
+
if (val.scact_assoc_id == SCTP_FUTURE_ASSOC ||
val.scact_assoc_id == SCTP_ALL_ASSOC) {
ret = sctp_auth_del_key_id(ep, asoc, val.scact_keynumber);
@@ -3954,6 +3978,9 @@ static int sctp_setsockopt_deactivate_key(struct sock *sk, char __user *optval,
if (asoc)
return sctp_auth_deact_key_id(ep, asoc, val.scact_keynumber);
+ if (sctp_style(sk, TCP))
+ val.scact_assoc_id = SCTP_FUTURE_ASSOC;
+
if (val.scact_assoc_id == SCTP_FUTURE_ASSOC ||
val.scact_assoc_id == SCTP_ALL_ASSOC) {
ret = sctp_auth_deact_key_id(ep, asoc, val.scact_keynumber);
@@ -4169,6 +4196,9 @@ static int sctp_setsockopt_default_prinfo(struct sock *sk,
goto out;
}
+ if (sctp_style(sk, TCP))
+ info.pr_assoc_id = SCTP_FUTURE_ASSOC;
+
if (info.pr_assoc_id == SCTP_FUTURE_ASSOC ||
info.pr_assoc_id == SCTP_ALL_ASSOC) {
SCTP_PR_SET_POLICY(sp->default_flags, info.pr_policy);
@@ -4251,6 +4281,9 @@ static int sctp_setsockopt_enable_strreset(struct sock *sk,
goto out;
}
+ if (sctp_style(sk, TCP))
+ params.assoc_id = SCTP_FUTURE_ASSOC;
+
if (params.assoc_id == SCTP_FUTURE_ASSOC ||
params.assoc_id == SCTP_ALL_ASSOC)
ep->strreset_enable = params.assoc_value;
@@ -4376,6 +4409,9 @@ static int sctp_setsockopt_scheduler(struct sock *sk,
if (asoc)
return sctp_sched_set_sched(asoc, params.assoc_value);
+ if (sctp_style(sk, TCP))
+ params.assoc_id = SCTP_FUTURE_ASSOC;
+
if (params.assoc_id == SCTP_FUTURE_ASSOC ||
params.assoc_id == SCTP_ALL_ASSOC)
sp->default_ss = params.assoc_value;
@@ -4541,6 +4577,9 @@ static int sctp_setsockopt_event(struct sock *sk, char __user *optval,
if (asoc)
return sctp_assoc_ulpevent_type_set(&param, asoc);
+ if (sctp_style(sk, TCP))
+ param.se_assoc_id = SCTP_FUTURE_ASSOC;
+
if (param.se_assoc_id == SCTP_FUTURE_ASSOC ||
param.se_assoc_id == SCTP_ALL_ASSOC)
sctp_ulpevent_type_set(&sp->subscribe,
@@ -4808,7 +4847,8 @@ static int sctp_connect(struct sock *sk, struct sockaddr *addr,
}
/* Validate addr_len before calling common connect/connectx routine. */
- af = sctp_get_af_specific(addr->sa_family);
+ af = addr_len < offsetofend(struct sockaddr, sa_family) ? NULL :
+ sctp_get_af_specific(addr->sa_family);
if (!af || addr_len < af->sockaddr_len) {
err = -EINVAL;
} else {
@@ -4891,7 +4931,11 @@ static struct sock *sctp_accept(struct sock *sk, int flags, int *err, bool kern)
/* Populate the fields of the newsk from the oldsk and migrate the
* asoc to the newsk.
*/
- sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP);
+ error = sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP);
+ if (error) {
+ sk_common_release(newsk);
+ newsk = NULL;
+ }
out:
release_sock(sk);
@@ -5639,7 +5683,12 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
/* Populate the fields of the newsk from the oldsk and migrate the
* asoc to the newsk.
*/
- sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH);
+ err = sctp_sock_migrate(sk, sock->sk, asoc,
+ SCTP_SOCKET_UDP_HIGH_BANDWIDTH);
+ if (err) {
+ sock_release(sock);
+ sock = NULL;
+ }
*sockp = sock;
@@ -9160,7 +9209,7 @@ static inline void sctp_copy_descendant(struct sock *sk_to,
{
int ancestor_size = sizeof(struct inet_sock) +
sizeof(struct sctp_sock) -
- offsetof(struct sctp_sock, auto_asconf_list);
+ offsetof(struct sctp_sock, pd_lobby);
if (sk_from->sk_family == PF_INET6)
ancestor_size += sizeof(struct ipv6_pinfo);
@@ -9171,9 +9220,9 @@ static inline void sctp_copy_descendant(struct sock *sk_to,
/* Populate the fields of the newsk from the oldsk and migrate the assoc
* and its messages to the newsk.
*/
-static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
- struct sctp_association *assoc,
- enum sctp_socket_type type)
+static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
+ struct sctp_association *assoc,
+ enum sctp_socket_type type)
{
struct sctp_sock *oldsp = sctp_sk(oldsk);
struct sctp_sock *newsp = sctp_sk(newsk);
@@ -9182,6 +9231,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
struct sk_buff *skb, *tmp;
struct sctp_ulpevent *event;
struct sctp_bind_hashbucket *head;
+ int err;
/* Migrate socket buffer sizes and all the socket level options to the
* new socket.
@@ -9210,8 +9260,20 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
/* Copy the bind_addr list from the original endpoint to the new
* endpoint so that we can handle restarts properly
*/
- sctp_bind_addr_dup(&newsp->ep->base.bind_addr,
- &oldsp->ep->base.bind_addr, GFP_KERNEL);
+ err = sctp_bind_addr_dup(&newsp->ep->base.bind_addr,
+ &oldsp->ep->base.bind_addr, GFP_KERNEL);
+ if (err)
+ return err;
+
+ /* New ep's auth_hmacs should be set if old ep's is set, in case
+ * that net->sctp.auth_enable has been changed to 0 by users and
+ * new ep's auth_hmacs couldn't be set in sctp_endpoint_init().
+ */
+ if (oldsp->ep->auth_hmacs) {
+ err = sctp_auth_init_hmacs(newsp->ep, GFP_KERNEL);
+ if (err)
+ return err;
+ }
/* Move any messages in the old socket's receive queue that are for the
* peeled off association to the new socket's receive queue.
@@ -9231,7 +9293,6 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
* 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby.
* 3) Peeling off non-partial delivery; move pd_lobby to receive_queue.
*/
- skb_queue_head_init(&newsp->pd_lobby);
atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode);
if (atomic_read(&sctp_sk(oldsk)->pd_mode)) {
@@ -9296,6 +9357,8 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
}
release_sock(newsk);
+
+ return 0;
}
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 2936ed17bf9e..b6bb68adac6e 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -37,66 +37,6 @@
#include <net/sctp/sm.h>
#include <net/sctp/stream_sched.h>
-static struct flex_array *fa_alloc(size_t elem_size, size_t elem_count,
- gfp_t gfp)
-{
- struct flex_array *result;
- int err;
-
- result = flex_array_alloc(elem_size, elem_count, gfp);
- if (result) {
- err = flex_array_prealloc(result, 0, elem_count, gfp);
- if (err) {
- flex_array_free(result);
- result = NULL;
- }
- }
-
- return result;
-}
-
-static void fa_free(struct flex_array *fa)
-{
- if (fa)
- flex_array_free(fa);
-}
-
-static void fa_copy(struct flex_array *fa, struct flex_array *from,
- size_t index, size_t count)
-{
- void *elem;
-
- while (count--) {
- elem = flex_array_get(from, index);
- flex_array_put(fa, index, elem, 0);
- index++;
- }
-}
-
-static void fa_zero(struct flex_array *fa, size_t index, size_t count)
-{
- void *elem;
-
- while (count--) {
- elem = flex_array_get(fa, index);
- memset(elem, 0, fa->element_size);
- index++;
- }
-}
-
-static size_t fa_index(struct flex_array *fa, void *elem, size_t count)
-{
- size_t index = 0;
-
- while (count--) {
- if (elem == flex_array_get(fa, index))
- break;
- index++;
- }
-
- return index;
-}
-
/* Migrates chunks from stream queues to new stream queues if needed,
* but not across associations. Also, removes those chunks to streams
* higher than the new max.
@@ -153,53 +93,32 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream,
static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
gfp_t gfp)
{
- struct flex_array *out;
- size_t elem_size = sizeof(struct sctp_stream_out);
-
- out = fa_alloc(elem_size, outcnt, gfp);
- if (!out)
- return -ENOMEM;
-
- if (stream->out) {
- fa_copy(out, stream->out, 0, min(outcnt, stream->outcnt));
- if (stream->out_curr) {
- size_t index = fa_index(stream->out, stream->out_curr,
- stream->outcnt);
-
- BUG_ON(index == stream->outcnt);
- stream->out_curr = flex_array_get(out, index);
- }
- fa_free(stream->out);
- }
+ int ret;
- if (outcnt > stream->outcnt)
- fa_zero(out, stream->outcnt, (outcnt - stream->outcnt));
+ if (outcnt <= stream->outcnt)
+ return 0;
- stream->out = out;
+ ret = genradix_prealloc(&stream->out, outcnt, gfp);
+ if (ret)
+ return ret;
+ stream->outcnt = outcnt;
return 0;
}
static int sctp_stream_alloc_in(struct sctp_stream *stream, __u16 incnt,
gfp_t gfp)
{
- struct flex_array *in;
- size_t elem_size = sizeof(struct sctp_stream_in);
-
- in = fa_alloc(elem_size, incnt, gfp);
- if (!in)
- return -ENOMEM;
-
- if (stream->in) {
- fa_copy(in, stream->in, 0, min(incnt, stream->incnt));
- fa_free(stream->in);
- }
+ int ret;
- if (incnt > stream->incnt)
- fa_zero(in, stream->incnt, (incnt - stream->incnt));
+ if (incnt <= stream->incnt)
+ return 0;
- stream->in = in;
+ ret = genradix_prealloc(&stream->in, incnt, gfp);
+ if (ret)
+ return ret;
+ stream->incnt = incnt;
return 0;
}
@@ -226,12 +145,9 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
if (ret)
goto out;
- stream->outcnt = outcnt;
for (i = 0; i < stream->outcnt; i++)
SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
- sched->init(stream);
-
in:
sctp_stream_interleave_init(stream);
if (!incnt)
@@ -240,14 +156,11 @@ in:
ret = sctp_stream_alloc_in(stream, incnt, gfp);
if (ret) {
sched->free(stream);
- fa_free(stream->out);
- stream->out = NULL;
+ genradix_free(&stream->out);
stream->outcnt = 0;
goto out;
}
- stream->incnt = incnt;
-
out:
return ret;
}
@@ -272,8 +185,8 @@ void sctp_stream_free(struct sctp_stream *stream)
sched->free(stream);
for (i = 0; i < stream->outcnt; i++)
kfree(SCTP_SO(stream, i)->ext);
- fa_free(stream->out);
- fa_free(stream->in);
+ genradix_free(&stream->out);
+ genradix_free(&stream->in);
}
void sctp_stream_clear(struct sctp_stream *stream)
@@ -304,8 +217,8 @@ void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new)
sched->sched_all(stream);
- new->out = NULL;
- new->in = NULL;
+ new->out.tree.root = NULL;
+ new->in.tree.root = NULL;
new->outcnt = 0;
new->incnt = 0;
}
@@ -557,8 +470,6 @@ int sctp_send_add_streams(struct sctp_association *asoc,
goto out;
}
- stream->outcnt = outcnt;
-
asoc->strreset_outstanding = !!out + !!in;
out:
diff --git a/net/sctp/stream_interleave.c b/net/sctp/stream_interleave.c
index a6bf21579466..102c6fefe38c 100644
--- a/net/sctp/stream_interleave.c
+++ b/net/sctp/stream_interleave.c
@@ -101,7 +101,7 @@ static void sctp_chunk_assign_mid(struct sctp_chunk *chunk)
static bool sctp_validate_data(struct sctp_chunk *chunk)
{
- const struct sctp_stream *stream;
+ struct sctp_stream *stream;
__u16 sid, ssn;
if (chunk->chunk_hdr->type != SCTP_CID_DATA)
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 77ef53596d18..6f869ef49b32 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -167,10 +167,9 @@ static int smc_release(struct socket *sock)
if (sk->sk_state == SMC_CLOSED) {
if (smc->clcsock) {
- mutex_lock(&smc->clcsock_release_lock);
- sock_release(smc->clcsock);
- smc->clcsock = NULL;
- mutex_unlock(&smc->clcsock_release_lock);
+ release_sock(sk);
+ smc_clcsock_release(smc);
+ lock_sock(sk);
}
if (!smc->use_fallback)
smc_conn_free(&smc->conn);
@@ -446,10 +445,19 @@ static void smc_link_save_peer_info(struct smc_link *link,
link->peer_mtu = clc->qp_mtu;
}
+static void smc_switch_to_fallback(struct smc_sock *smc)
+{
+ smc->use_fallback = true;
+ if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
+ smc->clcsock->file = smc->sk.sk_socket->file;
+ smc->clcsock->file->private_data = smc->clcsock;
+ }
+}
+
/* fall back during connect */
static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
{
- smc->use_fallback = true;
+ smc_switch_to_fallback(smc);
smc->fallback_rsn = reason_code;
smc_copy_sock_settings_to_clc(smc);
if (smc->sk.sk_state == SMC_INIT)
@@ -775,10 +783,14 @@ static void smc_connect_work(struct work_struct *work)
smc->sk.sk_err = -rc;
out:
- if (smc->sk.sk_err)
- smc->sk.sk_state_change(&smc->sk);
- else
- smc->sk.sk_write_space(&smc->sk);
+ if (!sock_flag(&smc->sk, SOCK_DEAD)) {
+ if (smc->sk.sk_err) {
+ smc->sk.sk_state_change(&smc->sk);
+ } else { /* allow polling before and after fallback decision */
+ smc->clcsock->sk->sk_write_space(smc->clcsock->sk);
+ smc->sk.sk_write_space(&smc->sk);
+ }
+ }
kfree(smc->connect_info);
smc->connect_info = NULL;
release_sock(&smc->sk);
@@ -872,11 +884,11 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
if (rc < 0)
lsk->sk_err = -rc;
if (rc < 0 || lsk->sk_state == SMC_CLOSED) {
+ new_sk->sk_prot->unhash(new_sk);
if (new_clcsock)
sock_release(new_clcsock);
new_sk->sk_state = SMC_CLOSED;
sock_set_flag(new_sk, SOCK_DEAD);
- new_sk->sk_prot->unhash(new_sk);
sock_put(new_sk); /* final */
*new_smc = NULL;
goto out;
@@ -927,16 +939,21 @@ struct sock *smc_accept_dequeue(struct sock *parent,
smc_accept_unlink(new_sk);
if (new_sk->sk_state == SMC_CLOSED) {
+ new_sk->sk_prot->unhash(new_sk);
if (isk->clcsock) {
sock_release(isk->clcsock);
isk->clcsock = NULL;
}
- new_sk->sk_prot->unhash(new_sk);
sock_put(new_sk); /* final */
continue;
}
- if (new_sock)
+ if (new_sock) {
sock_graft(new_sk, new_sock);
+ if (isk->use_fallback) {
+ smc_sk(new_sk)->clcsock->file = new_sock->file;
+ isk->clcsock->file->private_data = isk->clcsock;
+ }
+ }
return new_sk;
}
return NULL;
@@ -956,6 +973,7 @@ void smc_close_non_accepted(struct sock *sk)
sock_set_flag(sk, SOCK_DEAD);
sk->sk_shutdown |= SHUTDOWN_MASK;
}
+ sk->sk_prot->unhash(sk);
if (smc->clcsock) {
struct socket *tcp;
@@ -971,7 +989,6 @@ void smc_close_non_accepted(struct sock *sk)
smc_conn_free(&smc->conn);
}
release_sock(sk);
- sk->sk_prot->unhash(sk);
sock_put(sk); /* final sock_put */
}
@@ -1037,13 +1054,13 @@ static void smc_listen_out(struct smc_sock *new_smc)
struct smc_sock *lsmc = new_smc->listen_smc;
struct sock *newsmcsk = &new_smc->sk;
- lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
if (lsmc->sk.sk_state == SMC_LISTEN) {
+ lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
smc_accept_enqueue(&lsmc->sk, newsmcsk);
+ release_sock(&lsmc->sk);
} else { /* no longer listening */
smc_close_non_accepted(newsmcsk);
}
- release_sock(&lsmc->sk);
/* Wake up accept */
lsmc->sk.sk_data_ready(&lsmc->sk);
@@ -1087,7 +1104,7 @@ static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
return;
}
smc_conn_free(&new_smc->conn);
- new_smc->use_fallback = true;
+ smc_switch_to_fallback(new_smc);
new_smc->fallback_rsn = reason_code;
if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) {
if (smc_clc_send_decline(new_smc, reason_code) < 0) {
@@ -1237,6 +1254,9 @@ static void smc_listen_work(struct work_struct *work)
int rc = 0;
u8 ibport;
+ if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN)
+ return smc_listen_out_err(new_smc);
+
if (new_smc->use_fallback) {
smc_listen_out_connected(new_smc);
return;
@@ -1244,7 +1264,7 @@ static void smc_listen_work(struct work_struct *work)
/* check if peer is smc capable */
if (!tcp_sk(newclcsock->sk)->syn_smc) {
- new_smc->use_fallback = true;
+ smc_switch_to_fallback(new_smc);
new_smc->fallback_rsn = SMC_CLC_DECL_PEERNOSMC;
smc_listen_out_connected(new_smc);
return;
@@ -1501,7 +1521,7 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
if (msg->msg_flags & MSG_FASTOPEN) {
if (sk->sk_state == SMC_INIT) {
- smc->use_fallback = true;
+ smc_switch_to_fallback(smc);
smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
} else {
rc = -EINVAL;
@@ -1703,7 +1723,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
case TCP_FASTOPEN_NO_COOKIE:
/* option not supported by SMC */
if (sk->sk_state == SMC_INIT) {
- smc->use_fallback = true;
+ smc_switch_to_fallback(smc);
smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
} else {
if (!smc->use_fallback)
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index 2ad37e998509..fc06720b53c1 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -21,6 +21,22 @@
#define SMC_CLOSE_WAIT_LISTEN_CLCSOCK_TIME (5 * HZ)
+/* release the clcsock that is assigned to the smc_sock */
+void smc_clcsock_release(struct smc_sock *smc)
+{
+ struct socket *tcp;
+
+ if (smc->listen_smc && current_work() != &smc->smc_listen_work)
+ cancel_work_sync(&smc->smc_listen_work);
+ mutex_lock(&smc->clcsock_release_lock);
+ if (smc->clcsock) {
+ tcp = smc->clcsock;
+ smc->clcsock = NULL;
+ sock_release(tcp);
+ }
+ mutex_unlock(&smc->clcsock_release_lock);
+}
+
static void smc_close_cleanup_listen(struct sock *parent)
{
struct sock *sk;
@@ -321,6 +337,7 @@ static void smc_close_passive_work(struct work_struct *work)
close_work);
struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
struct smc_cdc_conn_state_flags *rxflags;
+ bool release_clcsock = false;
struct sock *sk = &smc->sk;
int old_state;
@@ -400,13 +417,13 @@ wakeup:
if ((sk->sk_state == SMC_CLOSED) &&
(sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) {
smc_conn_free(conn);
- if (smc->clcsock) {
- sock_release(smc->clcsock);
- smc->clcsock = NULL;
- }
+ if (smc->clcsock)
+ release_clcsock = true;
}
}
release_sock(sk);
+ if (release_clcsock)
+ smc_clcsock_release(smc);
sock_put(sk); /* sock_hold done by schedulers of close_work */
}
diff --git a/net/smc/smc_close.h b/net/smc/smc_close.h
index 19eb6a211c23..e0e3b5df25d2 100644
--- a/net/smc/smc_close.h
+++ b/net/smc/smc_close.h
@@ -23,5 +23,6 @@ void smc_close_wake_tx_prepared(struct smc_sock *smc);
int smc_close_active(struct smc_sock *smc);
int smc_close_shutdown_write(struct smc_sock *smc);
void smc_close_init(struct smc_sock *smc);
+void smc_clcsock_release(struct smc_sock *smc);
#endif /* SMC_CLOSE_H */
diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c
index 2fff79db1a59..e89e918b88e0 100644
--- a/net/smc/smc_ism.c
+++ b/net/smc/smc_ism.c
@@ -289,6 +289,11 @@ struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
INIT_LIST_HEAD(&smcd->vlan);
smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)",
WQ_MEM_RECLAIM, name);
+ if (!smcd->event_wq) {
+ kfree(smcd->conn);
+ kfree(smcd);
+ return NULL;
+ }
return smcd;
}
EXPORT_SYMBOL_GPL(smcd_alloc_dev);
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 8d2f6296279c..0285c7f9e79b 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -603,7 +603,8 @@ static int smc_pnet_flush(struct sk_buff *skb, struct genl_info *info)
{
struct net *net = genl_info_net(info);
- return smc_pnet_remove_by_pnetid(net, NULL);
+ smc_pnet_remove_by_pnetid(net, NULL);
+ return 0;
}
/* SMC_PNETID generic netlink operation definition */
diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c
index bbcf0fe4ae10..413a6abf227e 100644
--- a/net/smc/smc_rx.c
+++ b/net/smc/smc_rx.c
@@ -136,7 +136,6 @@ static int smc_rx_pipe_buf_nosteal(struct pipe_inode_info *pipe,
}
static const struct pipe_buf_operations smc_pipe_ops = {
- .can_merge = 0,
.confirm = generic_pipe_buf_confirm,
.release = smc_rx_pipe_buf_release,
.steal = smc_rx_pipe_buf_nosteal,
diff --git a/net/socket.c b/net/socket.c
index 3c176a12fe48..8255f5bda0aa 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -384,6 +384,18 @@ static struct file_system_type sock_fs_type = {
* but we take care of internal coherence yet.
*/
+/**
+ * sock_alloc_file - Bind a &socket to a &file
+ * @sock: socket
+ * @flags: file status flags
+ * @dname: protocol name
+ *
+ * Returns the &file bound with @sock, implicitly storing it
+ * in sock->file. If dname is %NULL, sets to "".
+ * On failure the return is a ERR pointer (see linux/err.h).
+ * This function uses GFP_KERNEL internally.
+ */
+
struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
{
struct file *file;
@@ -424,6 +436,14 @@ static int sock_map_fd(struct socket *sock, int flags)
return PTR_ERR(newfile);
}
+/**
+ * sock_from_file - Return the &socket bounded to @file.
+ * @file: file
+ * @err: pointer to an error code return
+ *
+ * On failure returns %NULL and assigns -ENOTSOCK to @err.
+ */
+
struct socket *sock_from_file(struct file *file, int *err)
{
if (file->f_op == &socket_file_ops)
@@ -532,11 +552,11 @@ static const struct inode_operations sockfs_inode_ops = {
};
/**
- * sock_alloc - allocate a socket
+ * sock_alloc - allocate a socket
*
* Allocate a new inode and socket object. The two are bound together
* and initialised. The socket is then returned. If we are out of inodes
- * NULL is returned.
+ * NULL is returned. This functions uses GFP_KERNEL internally.
*/
struct socket *sock_alloc(void)
@@ -561,7 +581,7 @@ struct socket *sock_alloc(void)
EXPORT_SYMBOL(sock_alloc);
/**
- * sock_release - close a socket
+ * sock_release - close a socket
* @sock: socket to close
*
* The socket is released from the protocol stack if it has a release
@@ -617,6 +637,15 @@ void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags)
}
EXPORT_SYMBOL(__sock_tx_timestamp);
+/**
+ * sock_sendmsg - send a message through @sock
+ * @sock: socket
+ * @msg: message to send
+ *
+ * Sends @msg through @sock, passing through LSM.
+ * Returns the number of bytes sent, or an error code.
+ */
+
static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg)
{
int ret = sock->ops->sendmsg(sock, msg, msg_data_left(msg));
@@ -633,6 +662,18 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg)
}
EXPORT_SYMBOL(sock_sendmsg);
+/**
+ * kernel_sendmsg - send a message through @sock (kernel-space)
+ * @sock: socket
+ * @msg: message header
+ * @vec: kernel vec
+ * @num: vec array length
+ * @size: total message data size
+ *
+ * Builds the message data with @vec and sends it through @sock.
+ * Returns the number of bytes sent, or an error code.
+ */
+
int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
struct kvec *vec, size_t num, size_t size)
{
@@ -641,6 +682,19 @@ int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
}
EXPORT_SYMBOL(kernel_sendmsg);
+/**
+ * kernel_sendmsg_locked - send a message through @sock (kernel-space)
+ * @sk: sock
+ * @msg: message header
+ * @vec: output s/g array
+ * @num: output s/g array length
+ * @size: total message data size
+ *
+ * Builds the message data with @vec and sends it through @sock.
+ * Returns the number of bytes sent, or an error code.
+ * Caller must hold @sk.
+ */
+
int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg,
struct kvec *vec, size_t num, size_t size)
{
@@ -811,6 +865,16 @@ void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
}
EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops);
+/**
+ * sock_recvmsg - receive a message from @sock
+ * @sock: socket
+ * @msg: message to receive
+ * @flags: message flags
+ *
+ * Receives @msg from @sock, passing through LSM. Returns the total number
+ * of bytes received, or an error.
+ */
+
static inline int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg,
int flags)
{
@@ -826,20 +890,21 @@ int sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags)
EXPORT_SYMBOL(sock_recvmsg);
/**
- * kernel_recvmsg - Receive a message from a socket (kernel space)
- * @sock: The socket to receive the message from
- * @msg: Received message
- * @vec: Input s/g array for message data
- * @num: Size of input s/g array
- * @size: Number of bytes to read
- * @flags: Message flags (MSG_DONTWAIT, etc...)
+ * kernel_recvmsg - Receive a message from a socket (kernel space)
+ * @sock: The socket to receive the message from
+ * @msg: Received message
+ * @vec: Input s/g array for message data
+ * @num: Size of input s/g array
+ * @size: Number of bytes to read
+ * @flags: Message flags (MSG_DONTWAIT, etc...)
*
- * On return the msg structure contains the scatter/gather array passed in the
- * vec argument. The array is modified so that it consists of the unfilled
- * portion of the original array.
+ * On return the msg structure contains the scatter/gather array passed in the
+ * vec argument. The array is modified so that it consists of the unfilled
+ * portion of the original array.
*
- * The returned value is the total number of bytes received, or an error.
+ * The returned value is the total number of bytes received, or an error.
*/
+
int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
struct kvec *vec, size_t num, size_t size, int flags)
{
@@ -1005,6 +1070,13 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
* what to do with it - that's up to the protocol still.
*/
+/**
+ * get_net_ns - increment the refcount of the network namespace
+ * @ns: common namespace (net)
+ *
+ * Returns the net's common namespace.
+ */
+
struct ns_common *get_net_ns(struct ns_common *ns)
{
return &get_net(container_of(ns, struct net, ns))->ns;
@@ -1099,6 +1171,19 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
return err;
}
+/**
+ * sock_create_lite - creates a socket
+ * @family: protocol family (AF_INET, ...)
+ * @type: communication type (SOCK_STREAM, ...)
+ * @protocol: protocol (0, ...)
+ * @res: new socket
+ *
+ * Creates a new socket and assigns it to @res, passing through LSM.
+ * The new socket initialization is not complete, see kernel_accept().
+ * Returns 0 or an error. On failure @res is set to %NULL.
+ * This function internally uses GFP_KERNEL.
+ */
+
int sock_create_lite(int family, int type, int protocol, struct socket **res)
{
int err;
@@ -1224,6 +1309,21 @@ call_kill:
}
EXPORT_SYMBOL(sock_wake_async);
+/**
+ * __sock_create - creates a socket
+ * @net: net namespace
+ * @family: protocol family (AF_INET, ...)
+ * @type: communication type (SOCK_STREAM, ...)
+ * @protocol: protocol (0, ...)
+ * @res: new socket
+ * @kern: boolean for kernel space sockets
+ *
+ * Creates a new socket and assigns it to @res, passing through LSM.
+ * Returns 0 or an error. On failure @res is set to %NULL. @kern must
+ * be set to true if the socket resides in kernel space.
+ * This function internally uses GFP_KERNEL.
+ */
+
int __sock_create(struct net *net, int family, int type, int protocol,
struct socket **res, int kern)
{
@@ -1333,12 +1433,35 @@ out_release:
}
EXPORT_SYMBOL(__sock_create);
+/**
+ * sock_create - creates a socket
+ * @family: protocol family (AF_INET, ...)
+ * @type: communication type (SOCK_STREAM, ...)
+ * @protocol: protocol (0, ...)
+ * @res: new socket
+ *
+ * A wrapper around __sock_create().
+ * Returns 0 or an error. This function internally uses GFP_KERNEL.
+ */
+
int sock_create(int family, int type, int protocol, struct socket **res)
{
return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0);
}
EXPORT_SYMBOL(sock_create);
+/**
+ * sock_create_kern - creates a socket (kernel space)
+ * @net: net namespace
+ * @family: protocol family (AF_INET, ...)
+ * @type: communication type (SOCK_STREAM, ...)
+ * @protocol: protocol (0, ...)
+ * @res: new socket
+ *
+ * A wrapper around __sock_create().
+ * Returns 0 or an error. This function internally uses GFP_KERNEL.
+ */
+
int sock_create_kern(struct net *net, int family, int type, int protocol, struct socket **res)
{
return __sock_create(net, family, type, protocol, res, 1);
@@ -3322,18 +3445,46 @@ static long compat_sock_ioctl(struct file *file, unsigned int cmd,
}
#endif
+/**
+ * kernel_bind - bind an address to a socket (kernel space)
+ * @sock: socket
+ * @addr: address
+ * @addrlen: length of address
+ *
+ * Returns 0 or an error.
+ */
+
int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen)
{
return sock->ops->bind(sock, addr, addrlen);
}
EXPORT_SYMBOL(kernel_bind);
+/**
+ * kernel_listen - move socket to listening state (kernel space)
+ * @sock: socket
+ * @backlog: pending connections queue size
+ *
+ * Returns 0 or an error.
+ */
+
int kernel_listen(struct socket *sock, int backlog)
{
return sock->ops->listen(sock, backlog);
}
EXPORT_SYMBOL(kernel_listen);
+/**
+ * kernel_accept - accept a connection (kernel space)
+ * @sock: listening socket
+ * @newsock: new connected socket
+ * @flags: flags
+ *
+ * @flags must be SOCK_CLOEXEC, SOCK_NONBLOCK or 0.
+ * If it fails, @newsock is guaranteed to be %NULL.
+ * Returns 0 or an error.
+ */
+
int kernel_accept(struct socket *sock, struct socket **newsock, int flags)
{
struct sock *sk = sock->sk;
@@ -3359,6 +3510,19 @@ done:
}
EXPORT_SYMBOL(kernel_accept);
+/**
+ * kernel_connect - connect a socket (kernel space)
+ * @sock: socket
+ * @addr: address
+ * @addrlen: address length
+ * @flags: flags (O_NONBLOCK, ...)
+ *
+ * For datagram sockets, @addr is the addres to which datagrams are sent
+ * by default, and the only address from which datagrams are received.
+ * For stream sockets, attempts to connect to @addr.
+ * Returns 0 or an error code.
+ */
+
int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
int flags)
{
@@ -3366,18 +3530,48 @@ int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
}
EXPORT_SYMBOL(kernel_connect);
+/**
+ * kernel_getsockname - get the address which the socket is bound (kernel space)
+ * @sock: socket
+ * @addr: address holder
+ *
+ * Fills the @addr pointer with the address which the socket is bound.
+ * Returns 0 or an error code.
+ */
+
int kernel_getsockname(struct socket *sock, struct sockaddr *addr)
{
return sock->ops->getname(sock, addr, 0);
}
EXPORT_SYMBOL(kernel_getsockname);
+/**
+ * kernel_peername - get the address which the socket is connected (kernel space)
+ * @sock: socket
+ * @addr: address holder
+ *
+ * Fills the @addr pointer with the address which the socket is connected.
+ * Returns 0 or an error code.
+ */
+
int kernel_getpeername(struct socket *sock, struct sockaddr *addr)
{
return sock->ops->getname(sock, addr, 1);
}
EXPORT_SYMBOL(kernel_getpeername);
+/**
+ * kernel_getsockopt - get a socket option (kernel space)
+ * @sock: socket
+ * @level: API level (SOL_SOCKET, ...)
+ * @optname: option tag
+ * @optval: option value
+ * @optlen: option length
+ *
+ * Assigns the option length to @optlen.
+ * Returns 0 or an error.
+ */
+
int kernel_getsockopt(struct socket *sock, int level, int optname,
char *optval, int *optlen)
{
@@ -3400,6 +3594,17 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
}
EXPORT_SYMBOL(kernel_getsockopt);
+/**
+ * kernel_setsockopt - set a socket option (kernel space)
+ * @sock: socket
+ * @level: API level (SOL_SOCKET, ...)
+ * @optname: option tag
+ * @optval: option value
+ * @optlen: option length
+ *
+ * Returns 0 or an error.
+ */
+
int kernel_setsockopt(struct socket *sock, int level, int optname,
char *optval, unsigned int optlen)
{
@@ -3420,6 +3625,17 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
}
EXPORT_SYMBOL(kernel_setsockopt);
+/**
+ * kernel_sendpage - send a &page through a socket (kernel space)
+ * @sock: socket
+ * @page: page
+ * @offset: page offset
+ * @size: total size in bytes
+ * @flags: flags (MSG_DONTWAIT, ...)
+ *
+ * Returns the total amount sent in bytes or an error.
+ */
+
int kernel_sendpage(struct socket *sock, struct page *page, int offset,
size_t size, int flags)
{
@@ -3430,6 +3646,18 @@ int kernel_sendpage(struct socket *sock, struct page *page, int offset,
}
EXPORT_SYMBOL(kernel_sendpage);
+/**
+ * kernel_sendpage_locked - send a &page through the locked sock (kernel space)
+ * @sk: sock
+ * @page: page
+ * @offset: page offset
+ * @size: total size in bytes
+ * @flags: flags (MSG_DONTWAIT, ...)
+ *
+ * Returns the total amount sent in bytes or an error.
+ * Caller must hold @sk.
+ */
+
int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
size_t size, int flags)
{
@@ -3443,17 +3671,30 @@ int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
}
EXPORT_SYMBOL(kernel_sendpage_locked);
+/**
+ * kernel_shutdown - shut down part of a full-duplex connection (kernel space)
+ * @sock: socket
+ * @how: connection part
+ *
+ * Returns 0 or an error.
+ */
+
int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how)
{
return sock->ops->shutdown(sock, how);
}
EXPORT_SYMBOL(kernel_sock_shutdown);
-/* This routine returns the IP overhead imposed by a socket i.e.
- * the length of the underlying IP header, depending on whether
- * this is an IPv4 or IPv6 socket and the length from IP options turned
- * on at the socket. Assumes that the caller has a lock on the socket.
+/**
+ * kernel_sock_ip_overhead - returns the IP overhead imposed by a socket
+ * @sk: socket
+ *
+ * This routine returns the IP overhead imposed by a socket i.e.
+ * the length of the underlying IP header, depending on whether
+ * this is an IPv4 or IPv6 socket and the length from IP options turned
+ * on at the socket. Assumes that the caller has a lock on the socket.
*/
+
u32 kernel_sock_ip_overhead(struct sock *sk)
{
struct inet_sock *inet;
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
index da1a676860ca..fa6c977b4c41 100644
--- a/net/strparser/strparser.c
+++ b/net/strparser/strparser.c
@@ -140,13 +140,11 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
/* We are going to append to the frags_list of head.
* Need to unshare the frag_list.
*/
- if (skb_has_frag_list(head)) {
- err = skb_unclone(head, GFP_ATOMIC);
- if (err) {
- STRP_STATS_INCR(strp->stats.mem_fail);
- desc->error = err;
- return 0;
- }
+ err = skb_unclone(head, GFP_ATOMIC);
+ if (err) {
+ STRP_STATS_INCR(strp->stats.mem_fail);
+ desc->error = err;
+ return 0;
}
if (unlikely(skb_shinfo(head)->frag_list)) {
@@ -550,6 +548,8 @@ EXPORT_SYMBOL_GPL(strp_check_rcv);
static int __init strp_mod_init(void)
{
strp_wq = create_singlethread_workqueue("kstrp");
+ if (unlikely(!strp_wq))
+ return -ENOMEM;
return 0;
}
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig
index ac09ca803296..83f5617bae07 100644
--- a/net/sunrpc/Kconfig
+++ b/net/sunrpc/Kconfig
@@ -34,6 +34,22 @@ config RPCSEC_GSS_KRB5
If unsure, say Y.
+config CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES
+ bool "Secure RPC: Disable insecure Kerberos encryption types"
+ depends on RPCSEC_GSS_KRB5
+ default n
+ help
+ Choose Y here to disable the use of deprecated encryption types
+ with the Kerberos version 5 GSS-API mechanism (RFC 1964). The
+ deprecated encryption types include DES-CBC-MD5, DES-CBC-CRC,
+ and DES-CBC-MD4. These types were deprecated by RFC 6649 because
+ they were found to be insecure.
+
+ N is the default because many sites have deployed KDCs and
+ keytabs that contain only these deprecated encryption types.
+ Choosing Y prevents the use of known-insecure encryption types
+ but might result in compatibility problems.
+
config SUNRPC_DEBUG
bool "RPC: Enable dprintk debugging"
depends on SUNRPC && SYSCTL
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index f3023bbc0b7f..e7861026b9e5 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -17,9 +17,7 @@
#include <linux/sunrpc/gss_api.h>
#include <linux/spinlock.h>
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
-# define RPCDBG_FACILITY RPCDBG_AUTH
-#endif
+#include <trace/events/sunrpc.h>
#define RPC_CREDCACHE_DEFAULT_HASHBITS (4)
struct rpc_cred_cache {
@@ -267,8 +265,6 @@ rpcauth_list_flavors(rpc_authflavor_t *array, int size)
}
}
rcu_read_unlock();
-
- dprintk("RPC: %s returns %d\n", __func__, result);
return result;
}
EXPORT_SYMBOL_GPL(rpcauth_list_flavors);
@@ -636,9 +632,6 @@ rpcauth_lookupcred(struct rpc_auth *auth, int flags)
struct rpc_cred *ret;
const struct cred *cred = current_cred();
- dprintk("RPC: looking up %s cred\n",
- auth->au_ops->au_name);
-
memset(&acred, 0, sizeof(acred));
acred.cred = cred;
ret = auth->au_ops->lookup_cred(auth, &acred, flags);
@@ -670,8 +663,6 @@ rpcauth_bind_root_cred(struct rpc_task *task, int lookupflags)
};
struct rpc_cred *ret;
- dprintk("RPC: %5u looking up %s cred\n",
- task->tk_pid, task->tk_client->cl_auth->au_ops->au_name);
ret = auth->au_ops->lookup_cred(auth, &acred, lookupflags);
put_cred(acred.cred);
return ret;
@@ -688,8 +679,6 @@ rpcauth_bind_machine_cred(struct rpc_task *task, int lookupflags)
if (!acred.principal)
return NULL;
- dprintk("RPC: %5u looking up %s machine cred\n",
- task->tk_pid, task->tk_client->cl_auth->au_ops->au_name);
return auth->au_ops->lookup_cred(auth, &acred, lookupflags);
}
@@ -698,8 +687,6 @@ rpcauth_bind_new_cred(struct rpc_task *task, int lookupflags)
{
struct rpc_auth *auth = task->tk_client->cl_auth;
- dprintk("RPC: %5u looking up %s cred\n",
- task->tk_pid, auth->au_ops->au_name);
return rpcauth_lookupcred(auth, lookupflags);
}
@@ -771,75 +758,102 @@ destroy:
}
EXPORT_SYMBOL_GPL(put_rpccred);
-__be32 *
-rpcauth_marshcred(struct rpc_task *task, __be32 *p)
+/**
+ * rpcauth_marshcred - Append RPC credential to end of @xdr
+ * @task: controlling RPC task
+ * @xdr: xdr_stream containing initial portion of RPC Call header
+ *
+ * On success, an appropriate verifier is added to @xdr, @xdr is
+ * updated to point past the verifier, and zero is returned.
+ * Otherwise, @xdr is in an undefined state and a negative errno
+ * is returned.
+ */
+int rpcauth_marshcred(struct rpc_task *task, struct xdr_stream *xdr)
{
- struct rpc_cred *cred = task->tk_rqstp->rq_cred;
+ const struct rpc_credops *ops = task->tk_rqstp->rq_cred->cr_ops;
- dprintk("RPC: %5u marshaling %s cred %p\n",
- task->tk_pid, cred->cr_auth->au_ops->au_name, cred);
-
- return cred->cr_ops->crmarshal(task, p);
+ return ops->crmarshal(task, xdr);
}
-__be32 *
-rpcauth_checkverf(struct rpc_task *task, __be32 *p)
+/**
+ * rpcauth_wrap_req_encode - XDR encode the RPC procedure
+ * @task: controlling RPC task
+ * @xdr: stream where on-the-wire bytes are to be marshalled
+ *
+ * On success, @xdr contains the encoded and wrapped message.
+ * Otherwise, @xdr is in an undefined state.
+ */
+int rpcauth_wrap_req_encode(struct rpc_task *task, struct xdr_stream *xdr)
{
- struct rpc_cred *cred = task->tk_rqstp->rq_cred;
+ kxdreproc_t encode = task->tk_msg.rpc_proc->p_encode;
- dprintk("RPC: %5u validating %s cred %p\n",
- task->tk_pid, cred->cr_auth->au_ops->au_name, cred);
-
- return cred->cr_ops->crvalidate(task, p);
+ encode(task->tk_rqstp, xdr, task->tk_msg.rpc_argp);
+ return 0;
}
+EXPORT_SYMBOL_GPL(rpcauth_wrap_req_encode);
-static void rpcauth_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp,
- __be32 *data, void *obj)
+/**
+ * rpcauth_wrap_req - XDR encode and wrap the RPC procedure
+ * @task: controlling RPC task
+ * @xdr: stream where on-the-wire bytes are to be marshalled
+ *
+ * On success, @xdr contains the encoded and wrapped message,
+ * and zero is returned. Otherwise, @xdr is in an undefined
+ * state and a negative errno is returned.
+ */
+int rpcauth_wrap_req(struct rpc_task *task, struct xdr_stream *xdr)
{
- struct xdr_stream xdr;
+ const struct rpc_credops *ops = task->tk_rqstp->rq_cred->cr_ops;
- xdr_init_encode(&xdr, &rqstp->rq_snd_buf, data);
- encode(rqstp, &xdr, obj);
+ return ops->crwrap_req(task, xdr);
}
+/**
+ * rpcauth_checkverf - Validate verifier in RPC Reply header
+ * @task: controlling RPC task
+ * @xdr: xdr_stream containing RPC Reply header
+ *
+ * On success, @xdr is updated to point past the verifier and
+ * zero is returned. Otherwise, @xdr is in an undefined state
+ * and a negative errno is returned.
+ */
int
-rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp,
- __be32 *data, void *obj)
+rpcauth_checkverf(struct rpc_task *task, struct xdr_stream *xdr)
{
- struct rpc_cred *cred = task->tk_rqstp->rq_cred;
+ const struct rpc_credops *ops = task->tk_rqstp->rq_cred->cr_ops;
- dprintk("RPC: %5u using %s cred %p to wrap rpc data\n",
- task->tk_pid, cred->cr_ops->cr_name, cred);
- if (cred->cr_ops->crwrap_req)
- return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj);
- /* By default, we encode the arguments normally. */
- rpcauth_wrap_req_encode(encode, rqstp, data, obj);
- return 0;
+ return ops->crvalidate(task, xdr);
}
-static int
-rpcauth_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp,
- __be32 *data, void *obj)
+/**
+ * rpcauth_unwrap_resp_decode - Invoke XDR decode function
+ * @task: controlling RPC task
+ * @xdr: stream where the Reply message resides
+ *
+ * Returns zero on success; otherwise a negative errno is returned.
+ */
+int
+rpcauth_unwrap_resp_decode(struct rpc_task *task, struct xdr_stream *xdr)
{
- struct xdr_stream xdr;
+ kxdrdproc_t decode = task->tk_msg.rpc_proc->p_decode;
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, data);
- return decode(rqstp, &xdr, obj);
+ return decode(task->tk_rqstp, xdr, task->tk_msg.rpc_resp);
}
+EXPORT_SYMBOL_GPL(rpcauth_unwrap_resp_decode);
+/**
+ * rpcauth_unwrap_resp - Invoke unwrap and decode function for the cred
+ * @task: controlling RPC task
+ * @xdr: stream where the Reply message resides
+ *
+ * Returns zero on success; otherwise a negative errno is returned.
+ */
int
-rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp,
- __be32 *data, void *obj)
+rpcauth_unwrap_resp(struct rpc_task *task, struct xdr_stream *xdr)
{
- struct rpc_cred *cred = task->tk_rqstp->rq_cred;
+ const struct rpc_credops *ops = task->tk_rqstp->rq_cred->cr_ops;
- dprintk("RPC: %5u using %s cred %p to unwrap rpc data\n",
- task->tk_pid, cred->cr_ops->cr_name, cred);
- if (cred->cr_ops->crunwrap_resp)
- return cred->cr_ops->crunwrap_resp(task, decode, rqstp,
- data, obj);
- /* By default, we decode the arguments normally. */
- return rpcauth_unwrap_req_decode(decode, rqstp, data, obj);
+ return ops->crunwrap_resp(task, xdr);
}
bool
@@ -865,8 +879,6 @@ rpcauth_refreshcred(struct rpc_task *task)
goto out;
cred = task->tk_rqstp->rq_cred;
}
- dprintk("RPC: %5u refreshing %s cred %p\n",
- task->tk_pid, cred->cr_auth->au_ops->au_name, cred);
err = cred->cr_ops->crrefresh(task);
out:
@@ -880,8 +892,6 @@ rpcauth_invalcred(struct rpc_task *task)
{
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
- dprintk("RPC: %5u invalidating %s cred %p\n",
- task->tk_pid, cred->cr_auth->au_ops->au_name, cred);
if (cred)
clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
}
diff --git a/net/sunrpc/auth_gss/Makefile b/net/sunrpc/auth_gss/Makefile
index c374268b008f..4a29f4c5dac4 100644
--- a/net/sunrpc/auth_gss/Makefile
+++ b/net/sunrpc/auth_gss/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_SUNRPC_GSS) += auth_rpcgss.o
auth_rpcgss-y := auth_gss.o gss_generic_token.o \
gss_mech_switch.o svcauth_gss.o \
- gss_rpc_upcall.o gss_rpc_xdr.o
+ gss_rpc_upcall.o gss_rpc_xdr.o trace.o
obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 1531b0219344..3fd56c0c90ae 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: BSD-3-Clause
/*
* linux/net/sunrpc/auth_gss/auth_gss.c
*
@@ -8,34 +9,8 @@
*
* Dug Song <dugsong@monkey.org>
* Andy Adamson <andros@umich.edu>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the University nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -55,6 +30,8 @@
#include "../netns.h"
+#include <trace/events/rpcgss.h>
+
static const struct rpc_authops authgss_ops;
static const struct rpc_credops gss_credops;
@@ -260,6 +237,7 @@ gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct
}
ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx, NULL, GFP_NOFS);
if (ret < 0) {
+ trace_rpcgss_import_ctx(ret);
p = ERR_PTR(ret);
goto err;
}
@@ -275,12 +253,9 @@ gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct
if (IS_ERR(p))
goto err;
done:
- dprintk("RPC: %s Success. gc_expiry %lu now %lu timeout %u acceptor %.*s\n",
- __func__, ctx->gc_expiry, now, timeout, ctx->gc_acceptor.len,
- ctx->gc_acceptor.data);
- return p;
+ trace_rpcgss_context(ctx->gc_expiry, now, timeout,
+ ctx->gc_acceptor.len, ctx->gc_acceptor.data);
err:
- dprintk("RPC: %s returns error %ld\n", __func__, -PTR_ERR(p));
return p;
}
@@ -354,10 +329,8 @@ __gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid, const struct gss_auth *auth
if (auth && pos->auth->service != auth->service)
continue;
refcount_inc(&pos->count);
- dprintk("RPC: %s found msg %p\n", __func__, pos);
return pos;
}
- dprintk("RPC: %s found nothing\n", __func__);
return NULL;
}
@@ -456,7 +429,7 @@ static int gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
size_t buflen = sizeof(gss_msg->databuf);
int len;
- len = scnprintf(p, buflen, "mech=%s uid=%d ", mech->gm_name,
+ len = scnprintf(p, buflen, "mech=%s uid=%d", mech->gm_name,
from_kuid(&init_user_ns, gss_msg->uid));
buflen -= len;
p += len;
@@ -467,7 +440,7 @@ static int gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
* identity that we are authenticating to.
*/
if (target_name) {
- len = scnprintf(p, buflen, "target=%s ", target_name);
+ len = scnprintf(p, buflen, " target=%s", target_name);
buflen -= len;
p += len;
gss_msg->msg.len += len;
@@ -487,11 +460,11 @@ static int gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
char *c = strchr(service_name, '@');
if (!c)
- len = scnprintf(p, buflen, "service=%s ",
+ len = scnprintf(p, buflen, " service=%s",
service_name);
else
len = scnprintf(p, buflen,
- "service=%.*s srchost=%s ",
+ " service=%.*s srchost=%s",
(int)(c - service_name),
service_name, c + 1);
buflen -= len;
@@ -500,17 +473,17 @@ static int gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
}
if (mech->gm_upcall_enctypes) {
- len = scnprintf(p, buflen, "enctypes=%s ",
+ len = scnprintf(p, buflen, " enctypes=%s",
mech->gm_upcall_enctypes);
buflen -= len;
p += len;
gss_msg->msg.len += len;
}
+ trace_rpcgss_upcall_msg(gss_msg->databuf);
len = scnprintf(p, buflen, "\n");
if (len == 0)
goto out_overflow;
gss_msg->msg.len += len;
-
gss_msg->msg.data = gss_msg->databuf;
return 0;
out_overflow:
@@ -603,8 +576,6 @@ gss_refresh_upcall(struct rpc_task *task)
struct rpc_pipe *pipe;
int err = 0;
- dprintk("RPC: %5u %s for uid %u\n",
- task->tk_pid, __func__, from_kuid(&init_user_ns, cred->cr_cred->fsuid));
gss_msg = gss_setup_upcall(gss_auth, cred);
if (PTR_ERR(gss_msg) == -EAGAIN) {
/* XXX: warning on the first, under the assumption we
@@ -612,7 +583,8 @@ gss_refresh_upcall(struct rpc_task *task)
warn_gssd();
task->tk_timeout = 15*HZ;
rpc_sleep_on(&pipe_version_rpc_waitqueue, task, NULL);
- return -EAGAIN;
+ err = -EAGAIN;
+ goto out;
}
if (IS_ERR(gss_msg)) {
err = PTR_ERR(gss_msg);
@@ -635,9 +607,8 @@ gss_refresh_upcall(struct rpc_task *task)
spin_unlock(&pipe->lock);
gss_release_msg(gss_msg);
out:
- dprintk("RPC: %5u %s for uid %u result %d\n",
- task->tk_pid, __func__,
- from_kuid(&init_user_ns, cred->cr_cred->fsuid), err);
+ trace_rpcgss_upcall_result(from_kuid(&init_user_ns,
+ cred->cr_cred->fsuid), err);
return err;
}
@@ -652,14 +623,13 @@ gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
DEFINE_WAIT(wait);
int err;
- dprintk("RPC: %s for uid %u\n",
- __func__, from_kuid(&init_user_ns, cred->cr_cred->fsuid));
retry:
err = 0;
/* if gssd is down, just skip upcalling altogether */
if (!gssd_running(net)) {
warn_gssd();
- return -EACCES;
+ err = -EACCES;
+ goto out;
}
gss_msg = gss_setup_upcall(gss_auth, cred);
if (PTR_ERR(gss_msg) == -EAGAIN) {
@@ -700,8 +670,8 @@ out_intr:
finish_wait(&gss_msg->waitqueue, &wait);
gss_release_msg(gss_msg);
out:
- dprintk("RPC: %s for uid %u result %d\n",
- __func__, from_kuid(&init_user_ns, cred->cr_cred->fsuid), err);
+ trace_rpcgss_upcall_result(from_kuid(&init_user_ns,
+ cred->cr_cred->fsuid), err);
return err;
}
@@ -794,7 +764,6 @@ err_put_ctx:
err:
kfree(buf);
out:
- dprintk("RPC: %s returning %zd\n", __func__, err);
return err;
}
@@ -863,8 +832,6 @@ gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg);
if (msg->errno < 0) {
- dprintk("RPC: %s releasing msg %p\n",
- __func__, gss_msg);
refcount_inc(&gss_msg->count);
gss_unhash_msg(gss_msg);
if (msg->errno == -ETIMEDOUT)
@@ -1024,8 +991,6 @@ gss_create_new(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
struct rpc_auth * auth;
int err = -ENOMEM; /* XXX? */
- dprintk("RPC: creating GSS authenticator for client %p\n", clnt);
-
if (!try_module_get(THIS_MODULE))
return ERR_PTR(err);
if (!(gss_auth = kmalloc(sizeof(*gss_auth), GFP_KERNEL)))
@@ -1041,10 +1006,8 @@ gss_create_new(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
gss_auth->net = get_net(rpc_net_ns(clnt));
err = -EINVAL;
gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor);
- if (!gss_auth->mech) {
- dprintk("RPC: Pseudoflavor %d not found!\n", flavor);
+ if (!gss_auth->mech)
goto err_put_net;
- }
gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor);
if (gss_auth->service == 0)
goto err_put_mech;
@@ -1053,6 +1016,8 @@ gss_create_new(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
auth = &gss_auth->rpc_auth;
auth->au_cslack = GSS_CRED_SLACK >> 2;
auth->au_rslack = GSS_VERF_SLACK >> 2;
+ auth->au_verfsize = GSS_VERF_SLACK >> 2;
+ auth->au_ralign = GSS_VERF_SLACK >> 2;
auth->au_flags = 0;
auth->au_ops = &authgss_ops;
auth->au_flavor = flavor;
@@ -1099,6 +1064,7 @@ err_free:
kfree(gss_auth);
out_dec:
module_put(THIS_MODULE);
+ trace_rpcgss_createauth(flavor, err);
return ERR_PTR(err);
}
@@ -1135,9 +1101,6 @@ gss_destroy(struct rpc_auth *auth)
struct gss_auth *gss_auth = container_of(auth,
struct gss_auth, rpc_auth);
- dprintk("RPC: destroying GSS authenticator %p flavor %d\n",
- auth, auth->au_flavor);
-
if (hash_hashed(&gss_auth->hash)) {
spin_lock(&gss_auth_hash_lock);
hash_del(&gss_auth->hash);
@@ -1245,7 +1208,7 @@ gss_dup_cred(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
struct gss_cred *new;
/* Make a copy of the cred so that we can reference count it */
- new = kzalloc(sizeof(*gss_cred), GFP_NOIO);
+ new = kzalloc(sizeof(*gss_cred), GFP_NOFS);
if (new) {
struct auth_cred acred = {
.cred = gss_cred->gc_base.cr_cred,
@@ -1300,8 +1263,6 @@ gss_send_destroy_context(struct rpc_cred *cred)
static void
gss_do_free_ctx(struct gss_cl_ctx *ctx)
{
- dprintk("RPC: %s\n", __func__);
-
gss_delete_sec_context(&ctx->gc_gss_ctx);
kfree(ctx->gc_wire_ctx.data);
kfree(ctx->gc_acceptor.data);
@@ -1324,7 +1285,6 @@ gss_free_ctx(struct gss_cl_ctx *ctx)
static void
gss_free_cred(struct gss_cred *gss_cred)
{
- dprintk("RPC: %s cred=%p\n", __func__, gss_cred);
kfree(gss_cred);
}
@@ -1381,10 +1341,6 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags, gfp_t
struct gss_cred *cred = NULL;
int err = -ENOMEM;
- dprintk("RPC: %s for uid %d, flavor %d\n",
- __func__, from_kuid(&init_user_ns, acred->cred->fsuid),
- auth->au_flavor);
-
if (!(cred = kzalloc(sizeof(*cred), gfp)))
goto out_err;
@@ -1400,7 +1356,6 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags, gfp_t
return &cred->gc_base;
out_err:
- dprintk("RPC: %s failed with error %d\n", __func__, err);
return ERR_PTR(err);
}
@@ -1526,69 +1481,84 @@ out:
}
/*
-* Marshal credentials.
-* Maybe we should keep a cached credential for performance reasons.
-*/
-static __be32 *
-gss_marshal(struct rpc_task *task, __be32 *p)
+ * Marshal credentials.
+ *
+ * The expensive part is computing the verifier. We can't cache a
+ * pre-computed version of the verifier because the seqno, which
+ * is different every time, is included in the MIC.
+ */
+static int gss_marshal(struct rpc_task *task, struct xdr_stream *xdr)
{
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_cred *cred = req->rq_cred;
struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
gc_base);
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
- __be32 *cred_len;
+ __be32 *p, *cred_len;
u32 maj_stat = 0;
struct xdr_netobj mic;
struct kvec iov;
struct xdr_buf verf_buf;
+ int status;
- dprintk("RPC: %5u %s\n", task->tk_pid, __func__);
+ /* Credential */
- *p++ = htonl(RPC_AUTH_GSS);
+ p = xdr_reserve_space(xdr, 7 * sizeof(*p) +
+ ctx->gc_wire_ctx.len);
+ if (!p)
+ goto marshal_failed;
+ *p++ = rpc_auth_gss;
cred_len = p++;
spin_lock(&ctx->gc_seq_lock);
req->rq_seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ;
spin_unlock(&ctx->gc_seq_lock);
if (req->rq_seqno == MAXSEQ)
- goto out_expired;
+ goto expired;
+ trace_rpcgss_seqno(task);
- *p++ = htonl((u32) RPC_GSS_VERSION);
- *p++ = htonl((u32) ctx->gc_proc);
- *p++ = htonl((u32) req->rq_seqno);
- *p++ = htonl((u32) gss_cred->gc_service);
+ *p++ = cpu_to_be32(RPC_GSS_VERSION);
+ *p++ = cpu_to_be32(ctx->gc_proc);
+ *p++ = cpu_to_be32(req->rq_seqno);
+ *p++ = cpu_to_be32(gss_cred->gc_service);
p = xdr_encode_netobj(p, &ctx->gc_wire_ctx);
- *cred_len = htonl((p - (cred_len + 1)) << 2);
+ *cred_len = cpu_to_be32((p - (cred_len + 1)) << 2);
+
+ /* Verifier */
/* We compute the checksum for the verifier over the xdr-encoded bytes
* starting with the xid and ending at the end of the credential: */
- iov.iov_base = xprt_skip_transport_header(req->rq_xprt,
- req->rq_snd_buf.head[0].iov_base);
+ iov.iov_base = req->rq_snd_buf.head[0].iov_base;
iov.iov_len = (u8 *)p - (u8 *)iov.iov_base;
xdr_buf_from_iov(&iov, &verf_buf);
- /* set verifier flavor*/
- *p++ = htonl(RPC_AUTH_GSS);
-
+ p = xdr_reserve_space(xdr, sizeof(*p));
+ if (!p)
+ goto marshal_failed;
+ *p++ = rpc_auth_gss;
mic.data = (u8 *)(p + 1);
maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
- if (maj_stat == GSS_S_CONTEXT_EXPIRED) {
- goto out_expired;
- } else if (maj_stat != 0) {
- pr_warn("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat);
- task->tk_status = -EIO;
- goto out_put_ctx;
- }
- p = xdr_encode_opaque(p, NULL, mic.len);
+ if (maj_stat == GSS_S_CONTEXT_EXPIRED)
+ goto expired;
+ else if (maj_stat != 0)
+ goto bad_mic;
+ if (xdr_stream_encode_opaque_inline(xdr, (void **)&p, mic.len) < 0)
+ goto marshal_failed;
+ status = 0;
+out:
gss_put_ctx(ctx);
- return p;
-out_expired:
+ return status;
+expired:
clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
- task->tk_status = -EKEYEXPIRED;
-out_put_ctx:
- gss_put_ctx(ctx);
- return NULL;
+ status = -EKEYEXPIRED;
+ goto out;
+marshal_failed:
+ status = -EMSGSIZE;
+ goto out;
+bad_mic:
+ trace_rpcgss_get_mic(task, maj_stat);
+ status = -EIO;
+ goto out;
}
static int gss_renew_cred(struct rpc_task *task)
@@ -1662,116 +1632,105 @@ gss_refresh_null(struct rpc_task *task)
return 0;
}
-static __be32 *
-gss_validate(struct rpc_task *task, __be32 *p)
+static int
+gss_validate(struct rpc_task *task, struct xdr_stream *xdr)
{
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
- __be32 *seq = NULL;
+ __be32 *p, *seq = NULL;
struct kvec iov;
struct xdr_buf verf_buf;
struct xdr_netobj mic;
- u32 flav,len;
- u32 maj_stat;
- __be32 *ret = ERR_PTR(-EIO);
+ u32 len, maj_stat;
+ int status;
- dprintk("RPC: %5u %s\n", task->tk_pid, __func__);
+ p = xdr_inline_decode(xdr, 2 * sizeof(*p));
+ if (!p)
+ goto validate_failed;
+ if (*p++ != rpc_auth_gss)
+ goto validate_failed;
+ len = be32_to_cpup(p);
+ if (len > RPC_MAX_AUTH_SIZE)
+ goto validate_failed;
+ p = xdr_inline_decode(xdr, len);
+ if (!p)
+ goto validate_failed;
- flav = ntohl(*p++);
- if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE)
- goto out_bad;
- if (flav != RPC_AUTH_GSS)
- goto out_bad;
seq = kmalloc(4, GFP_NOFS);
if (!seq)
- goto out_bad;
- *seq = htonl(task->tk_rqstp->rq_seqno);
+ goto validate_failed;
+ *seq = cpu_to_be32(task->tk_rqstp->rq_seqno);
iov.iov_base = seq;
iov.iov_len = 4;
xdr_buf_from_iov(&iov, &verf_buf);
mic.data = (u8 *)p;
mic.len = len;
-
- ret = ERR_PTR(-EACCES);
maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
if (maj_stat == GSS_S_CONTEXT_EXPIRED)
clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
- if (maj_stat) {
- dprintk("RPC: %5u %s: gss_verify_mic returned error 0x%08x\n",
- task->tk_pid, __func__, maj_stat);
- goto out_bad;
- }
+ if (maj_stat)
+ goto bad_mic;
+
/* We leave it to unwrap to calculate au_rslack. For now we just
* calculate the length of the verifier: */
cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2;
+ status = 0;
+out:
gss_put_ctx(ctx);
- dprintk("RPC: %5u %s: gss_verify_mic succeeded.\n",
- task->tk_pid, __func__);
- kfree(seq);
- return p + XDR_QUADLEN(len);
-out_bad:
- gss_put_ctx(ctx);
- dprintk("RPC: %5u %s failed ret %ld.\n", task->tk_pid, __func__,
- PTR_ERR(ret));
kfree(seq);
- return ret;
-}
-
-static void gss_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp,
- __be32 *p, void *obj)
-{
- struct xdr_stream xdr;
+ return status;
- xdr_init_encode(&xdr, &rqstp->rq_snd_buf, p);
- encode(rqstp, &xdr, obj);
+validate_failed:
+ status = -EIO;
+ goto out;
+bad_mic:
+ trace_rpcgss_verify_mic(task, maj_stat);
+ status = -EACCES;
+ goto out;
}
-static inline int
-gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
- kxdreproc_t encode, struct rpc_rqst *rqstp,
- __be32 *p, void *obj)
+static int gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
+ struct rpc_task *task, struct xdr_stream *xdr)
{
- struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
- struct xdr_buf integ_buf;
- __be32 *integ_len = NULL;
+ struct rpc_rqst *rqstp = task->tk_rqstp;
+ struct xdr_buf integ_buf, *snd_buf = &rqstp->rq_snd_buf;
struct xdr_netobj mic;
- u32 offset;
- __be32 *q;
- struct kvec *iov;
- u32 maj_stat = 0;
- int status = -EIO;
+ __be32 *p, *integ_len;
+ u32 offset, maj_stat;
+ p = xdr_reserve_space(xdr, 2 * sizeof(*p));
+ if (!p)
+ goto wrap_failed;
integ_len = p++;
- offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
- *p++ = htonl(rqstp->rq_seqno);
+ *p = cpu_to_be32(rqstp->rq_seqno);
- gss_wrap_req_encode(encode, rqstp, p, obj);
+ if (rpcauth_wrap_req_encode(task, xdr))
+ goto wrap_failed;
+ offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
if (xdr_buf_subsegment(snd_buf, &integ_buf,
offset, snd_buf->len - offset))
- return status;
- *integ_len = htonl(integ_buf.len);
+ goto wrap_failed;
+ *integ_len = cpu_to_be32(integ_buf.len);
- /* guess whether we're in the head or the tail: */
- if (snd_buf->page_len || snd_buf->tail[0].iov_len)
- iov = snd_buf->tail;
- else
- iov = snd_buf->head;
- p = iov->iov_base + iov->iov_len;
+ p = xdr_reserve_space(xdr, 0);
+ if (!p)
+ goto wrap_failed;
mic.data = (u8 *)(p + 1);
-
maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
- status = -EIO; /* XXX? */
if (maj_stat == GSS_S_CONTEXT_EXPIRED)
clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
else if (maj_stat)
- return status;
- q = xdr_encode_opaque(p, NULL, mic.len);
-
- offset = (u8 *)q - (u8 *)p;
- iov->iov_len += offset;
- snd_buf->len += offset;
+ goto bad_mic;
+ /* Check that the trailing MIC fit in the buffer, after the fact */
+ if (xdr_stream_encode_opaque_inline(xdr, (void **)&p, mic.len) < 0)
+ goto wrap_failed;
return 0;
+wrap_failed:
+ return -EMSGSIZE;
+bad_mic:
+ trace_rpcgss_get_mic(task, maj_stat);
+ return -EIO;
}
static void
@@ -1822,61 +1781,62 @@ out:
return -EAGAIN;
}
-static inline int
-gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
- kxdreproc_t encode, struct rpc_rqst *rqstp,
- __be32 *p, void *obj)
+static int gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
+ struct rpc_task *task, struct xdr_stream *xdr)
{
+ struct rpc_rqst *rqstp = task->tk_rqstp;
struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
- u32 offset;
- u32 maj_stat;
+ u32 pad, offset, maj_stat;
int status;
- __be32 *opaque_len;
+ __be32 *p, *opaque_len;
struct page **inpages;
int first;
- int pad;
struct kvec *iov;
- char *tmp;
+ status = -EIO;
+ p = xdr_reserve_space(xdr, 2 * sizeof(*p));
+ if (!p)
+ goto wrap_failed;
opaque_len = p++;
- offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
- *p++ = htonl(rqstp->rq_seqno);
+ *p = cpu_to_be32(rqstp->rq_seqno);
- gss_wrap_req_encode(encode, rqstp, p, obj);
+ if (rpcauth_wrap_req_encode(task, xdr))
+ goto wrap_failed;
status = alloc_enc_pages(rqstp);
- if (status)
- return status;
+ if (unlikely(status))
+ goto wrap_failed;
first = snd_buf->page_base >> PAGE_SHIFT;
inpages = snd_buf->pages + first;
snd_buf->pages = rqstp->rq_enc_pages;
snd_buf->page_base -= first << PAGE_SHIFT;
/*
- * Give the tail its own page, in case we need extra space in the
- * head when wrapping:
+ * Move the tail into its own page, in case gss_wrap needs
+ * more space in the head when wrapping.
*
- * call_allocate() allocates twice the slack space required
- * by the authentication flavor to rq_callsize.
- * For GSS, slack is GSS_CRED_SLACK.
+ * Still... Why can't gss_wrap just slide the tail down?
*/
if (snd_buf->page_len || snd_buf->tail[0].iov_len) {
+ char *tmp;
+
tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]);
memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len);
snd_buf->tail[0].iov_base = tmp;
}
+ offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
/* slack space should prevent this ever happening: */
- BUG_ON(snd_buf->len > snd_buf->buflen);
- status = -EIO;
+ if (unlikely(snd_buf->len > snd_buf->buflen))
+ goto wrap_failed;
/* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
* done anyway, so it's safe to put the request on the wire: */
if (maj_stat == GSS_S_CONTEXT_EXPIRED)
clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
else if (maj_stat)
- return status;
+ goto bad_wrap;
- *opaque_len = htonl(snd_buf->len - offset);
- /* guess whether we're in the head or the tail: */
+ *opaque_len = cpu_to_be32(snd_buf->len - offset);
+ /* guess whether the pad goes into the head or the tail: */
if (snd_buf->page_len || snd_buf->tail[0].iov_len)
iov = snd_buf->tail;
else
@@ -1888,118 +1848,154 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
snd_buf->len += pad;
return 0;
+wrap_failed:
+ return status;
+bad_wrap:
+ trace_rpcgss_wrap(task, maj_stat);
+ return -EIO;
}
-static int
-gss_wrap_req(struct rpc_task *task,
- kxdreproc_t encode, void *rqstp, __be32 *p, void *obj)
+static int gss_wrap_req(struct rpc_task *task, struct xdr_stream *xdr)
{
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
gc_base);
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
- int status = -EIO;
+ int status;
- dprintk("RPC: %5u %s\n", task->tk_pid, __func__);
+ status = -EIO;
if (ctx->gc_proc != RPC_GSS_PROC_DATA) {
/* The spec seems a little ambiguous here, but I think that not
* wrapping context destruction requests makes the most sense.
*/
- gss_wrap_req_encode(encode, rqstp, p, obj);
- status = 0;
+ status = rpcauth_wrap_req_encode(task, xdr);
goto out;
}
switch (gss_cred->gc_service) {
case RPC_GSS_SVC_NONE:
- gss_wrap_req_encode(encode, rqstp, p, obj);
- status = 0;
+ status = rpcauth_wrap_req_encode(task, xdr);
break;
case RPC_GSS_SVC_INTEGRITY:
- status = gss_wrap_req_integ(cred, ctx, encode, rqstp, p, obj);
+ status = gss_wrap_req_integ(cred, ctx, task, xdr);
break;
case RPC_GSS_SVC_PRIVACY:
- status = gss_wrap_req_priv(cred, ctx, encode, rqstp, p, obj);
+ status = gss_wrap_req_priv(cred, ctx, task, xdr);
break;
+ default:
+ status = -EIO;
}
out:
gss_put_ctx(ctx);
- dprintk("RPC: %5u %s returning %d\n", task->tk_pid, __func__, status);
return status;
}
-static inline int
-gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
- struct rpc_rqst *rqstp, __be32 **p)
+static int
+gss_unwrap_resp_auth(struct rpc_cred *cred)
{
- struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
- struct xdr_buf integ_buf;
+ struct rpc_auth *auth = cred->cr_auth;
+
+ auth->au_rslack = auth->au_verfsize;
+ auth->au_ralign = auth->au_verfsize;
+ return 0;
+}
+
+static int
+gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred,
+ struct gss_cl_ctx *ctx, struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr)
+{
+ struct xdr_buf integ_buf, *rcv_buf = &rqstp->rq_rcv_buf;
+ u32 data_offset, mic_offset, integ_len, maj_stat;
+ struct rpc_auth *auth = cred->cr_auth;
struct xdr_netobj mic;
- u32 data_offset, mic_offset;
- u32 integ_len;
- u32 maj_stat;
- int status = -EIO;
+ __be32 *p;
- integ_len = ntohl(*(*p)++);
+ p = xdr_inline_decode(xdr, 2 * sizeof(*p));
+ if (unlikely(!p))
+ goto unwrap_failed;
+ integ_len = be32_to_cpup(p++);
if (integ_len & 3)
- return status;
- data_offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base;
+ goto unwrap_failed;
+ data_offset = (u8 *)(p) - (u8 *)rcv_buf->head[0].iov_base;
mic_offset = integ_len + data_offset;
if (mic_offset > rcv_buf->len)
- return status;
- if (ntohl(*(*p)++) != rqstp->rq_seqno)
- return status;
-
- if (xdr_buf_subsegment(rcv_buf, &integ_buf, data_offset,
- mic_offset - data_offset))
- return status;
+ goto unwrap_failed;
+ if (be32_to_cpup(p) != rqstp->rq_seqno)
+ goto bad_seqno;
+ if (xdr_buf_subsegment(rcv_buf, &integ_buf, data_offset, integ_len))
+ goto unwrap_failed;
if (xdr_buf_read_netobj(rcv_buf, &mic, mic_offset))
- return status;
-
+ goto unwrap_failed;
maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
if (maj_stat == GSS_S_CONTEXT_EXPIRED)
clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
if (maj_stat != GSS_S_COMPLETE)
- return status;
+ goto bad_mic;
+
+ auth->au_rslack = auth->au_verfsize + 2 + 1 + XDR_QUADLEN(mic.len);
+ auth->au_ralign = auth->au_verfsize + 2;
return 0;
+unwrap_failed:
+ trace_rpcgss_unwrap_failed(task);
+ return -EIO;
+bad_seqno:
+ trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, be32_to_cpup(p));
+ return -EIO;
+bad_mic:
+ trace_rpcgss_verify_mic(task, maj_stat);
+ return -EIO;
}
-static inline int
-gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
- struct rpc_rqst *rqstp, __be32 **p)
-{
- struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
- u32 offset;
- u32 opaque_len;
- u32 maj_stat;
- int status = -EIO;
-
- opaque_len = ntohl(*(*p)++);
- offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base;
+static int
+gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
+ struct gss_cl_ctx *ctx, struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr)
+{
+ struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
+ struct kvec *head = rqstp->rq_rcv_buf.head;
+ struct rpc_auth *auth = cred->cr_auth;
+ unsigned int savedlen = rcv_buf->len;
+ u32 offset, opaque_len, maj_stat;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 2 * sizeof(*p));
+ if (unlikely(!p))
+ goto unwrap_failed;
+ opaque_len = be32_to_cpup(p++);
+ offset = (u8 *)(p) - (u8 *)head->iov_base;
if (offset + opaque_len > rcv_buf->len)
- return status;
- /* remove padding: */
+ goto unwrap_failed;
rcv_buf->len = offset + opaque_len;
maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf);
if (maj_stat == GSS_S_CONTEXT_EXPIRED)
clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
if (maj_stat != GSS_S_COMPLETE)
- return status;
- if (ntohl(*(*p)++) != rqstp->rq_seqno)
- return status;
+ goto bad_unwrap;
+ /* gss_unwrap decrypted the sequence number */
+ if (be32_to_cpup(p++) != rqstp->rq_seqno)
+ goto bad_seqno;
- return 0;
-}
-
-static int
-gss_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp,
- __be32 *p, void *obj)
-{
- struct xdr_stream xdr;
+ /* gss_unwrap redacts the opaque blob from the head iovec.
+ * rcv_buf has changed, thus the stream needs to be reset.
+ */
+ xdr_init_decode(xdr, rcv_buf, p, rqstp);
- xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- return decode(rqstp, &xdr, obj);
+ auth->au_rslack = auth->au_verfsize + 2 +
+ XDR_QUADLEN(savedlen - rcv_buf->len);
+ auth->au_ralign = auth->au_verfsize + 2 +
+ XDR_QUADLEN(savedlen - rcv_buf->len);
+ return 0;
+unwrap_failed:
+ trace_rpcgss_unwrap_failed(task);
+ return -EIO;
+bad_seqno:
+ trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, be32_to_cpup(--p));
+ return -EIO;
+bad_unwrap:
+ trace_rpcgss_unwrap(task, maj_stat);
+ return -EIO;
}
static bool
@@ -2014,14 +2010,14 @@ gss_xmit_need_reencode(struct rpc_task *task)
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_cred *cred = req->rq_cred;
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
- u32 win, seq_xmit;
+ u32 win, seq_xmit = 0;
bool ret = true;
if (!ctx)
- return true;
+ goto out;
if (gss_seq_is_newer(req->rq_seqno, READ_ONCE(ctx->gc_seq)))
- goto out;
+ goto out_ctx;
seq_xmit = READ_ONCE(ctx->gc_seq_xmit);
while (gss_seq_is_newer(req->rq_seqno, seq_xmit)) {
@@ -2030,56 +2026,51 @@ gss_xmit_need_reencode(struct rpc_task *task)
seq_xmit = cmpxchg(&ctx->gc_seq_xmit, tmp, req->rq_seqno);
if (seq_xmit == tmp) {
ret = false;
- goto out;
+ goto out_ctx;
}
}
win = ctx->gc_win;
if (win > 0)
ret = !gss_seq_is_newer(req->rq_seqno, seq_xmit - win);
-out:
+
+out_ctx:
gss_put_ctx(ctx);
+out:
+ trace_rpcgss_need_reencode(task, seq_xmit, ret);
return ret;
}
static int
-gss_unwrap_resp(struct rpc_task *task,
- kxdrdproc_t decode, void *rqstp, __be32 *p, void *obj)
+gss_unwrap_resp(struct rpc_task *task, struct xdr_stream *xdr)
{
- struct rpc_cred *cred = task->tk_rqstp->rq_cred;
+ struct rpc_rqst *rqstp = task->tk_rqstp;
+ struct rpc_cred *cred = rqstp->rq_cred;
struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
gc_base);
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
- __be32 *savedp = p;
- struct kvec *head = ((struct rpc_rqst *)rqstp)->rq_rcv_buf.head;
- int savedlen = head->iov_len;
- int status = -EIO;
+ int status = -EIO;
if (ctx->gc_proc != RPC_GSS_PROC_DATA)
goto out_decode;
switch (gss_cred->gc_service) {
case RPC_GSS_SVC_NONE:
+ status = gss_unwrap_resp_auth(cred);
break;
case RPC_GSS_SVC_INTEGRITY:
- status = gss_unwrap_resp_integ(cred, ctx, rqstp, &p);
- if (status)
- goto out;
+ status = gss_unwrap_resp_integ(task, cred, ctx, rqstp, xdr);
break;
case RPC_GSS_SVC_PRIVACY:
- status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p);
- if (status)
- goto out;
+ status = gss_unwrap_resp_priv(task, cred, ctx, rqstp, xdr);
break;
}
- /* take into account extra slack for integrity and privacy cases: */
- cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp)
- + (savedlen - head->iov_len);
+ if (status)
+ goto out;
+
out_decode:
- status = gss_unwrap_req_decode(decode, rqstp, p, obj);
+ status = rpcauth_unwrap_resp_decode(task, xdr);
out:
gss_put_ctx(ctx);
- dprintk("RPC: %5u %s returning %d\n",
- task->tk_pid, __func__, status);
return status;
}
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 4f43383971ba..6f2d30d7b766 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -977,7 +977,6 @@ krb5_rc4_setup_seq_key(struct krb5_ctx *kctx,
}
desc->tfm = hmac;
- desc->flags = 0;
/* Compute intermediate Kseq from session key */
err = crypto_shash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength);
@@ -1045,7 +1044,6 @@ krb5_rc4_setup_enc_key(struct krb5_ctx *kctx,
}
desc->tfm = hmac;
- desc->flags = 0;
/* Compute intermediate Kcrypt from session key */
for (i = 0; i < kctx->gk5e->keylength; i++)
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index eab71fc7af3e..6e5d6d240215 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: BSD-3-Clause
/*
* linux/net/sunrpc/gss_krb5_mech.c
*
@@ -6,32 +7,6 @@
*
* Andy Adamson <andros@umich.edu>
* J. Bruce Fields <bfields@umich.edu>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the University nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <crypto/hash.h>
@@ -53,6 +28,7 @@
static struct gss_api_mech gss_kerberos_mech; /* forward declaration */
static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = {
+#ifndef CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES
/*
* DES (All DES enctypes are mapped to the same gss functionality)
*/
@@ -74,6 +50,7 @@ static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = {
.cksumlength = 8,
.keyed_cksum = 0,
},
+#endif /* CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES */
/*
* RC4-HMAC
*/
@@ -461,7 +438,6 @@ context_derive_keys_rc4(struct krb5_ctx *ctx)
}
desc->tfm = hmac;
- desc->flags = 0;
err = crypto_shash_digest(desc, sigkeyconstant, slen, ctx->cksum);
kzfree(desc);
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index 5cdde6cb703a..14a0aff0cd84 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -570,14 +570,16 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
*/
movelen = min_t(unsigned int, buf->head[0].iov_len, buf->len);
movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip;
- BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >
- buf->head[0].iov_len);
+ if (offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >
+ buf->head[0].iov_len)
+ return GSS_S_FAILURE;
memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen);
buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip;
/* Trim off the trailing "extra count" and checksum blob */
- xdr_buf_trim(buf, ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
+ buf->len -= ec + GSS_KRB5_TOK_HDR_LEN + tailskip;
+
return GSS_S_COMPLETE;
}
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index 379318dff534..82060099a429 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: BSD-3-Clause
/*
* linux/net/sunrpc/gss_mech_switch.c
*
@@ -5,32 +6,6 @@
* All rights reserved.
*
* J. Bruce Fields <bfields@umich.edu>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the University nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/types.h>
diff --git a/net/sunrpc/auth_gss/gss_rpc_upcall.c b/net/sunrpc/auth_gss/gss_rpc_upcall.c
index 73dcda060335..0349f455a862 100644
--- a/net/sunrpc/auth_gss/gss_rpc_upcall.c
+++ b/net/sunrpc/auth_gss/gss_rpc_upcall.c
@@ -1,21 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* linux/net/sunrpc/gss_rpc_upcall.c
*
* Copyright (C) 2012 Simo Sorce <simo@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/types.h>
diff --git a/net/sunrpc/auth_gss/gss_rpc_upcall.h b/net/sunrpc/auth_gss/gss_rpc_upcall.h
index 1e542aded90a..31e96344167e 100644
--- a/net/sunrpc/auth_gss/gss_rpc_upcall.h
+++ b/net/sunrpc/auth_gss/gss_rpc_upcall.h
@@ -1,21 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* linux/net/sunrpc/gss_rpc_upcall.h
*
* Copyright (C) 2012 Simo Sorce <simo@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _GSS_RPC_UPCALL_H
@@ -45,4 +32,5 @@ void gssp_free_upcall_data(struct gssp_upcall_data *data);
void init_gssp_clnt(struct sunrpc_net *);
int set_gssp_clnt(struct net *);
void clear_gssp_clnt(struct sunrpc_net *);
+
#endif /* _GSS_RPC_UPCALL_H */
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
index 006062ad5f58..2ff7b7083eba 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
@@ -1,21 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* GSS Proxy upcall module
*
* Copyright (C) 2012 Simo Sorce <simo@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/sunrpc/svcauth.h>
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.h b/net/sunrpc/auth_gss/gss_rpc_xdr.h
index 146c31032917..3f17411b7e65 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.h
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.h
@@ -1,21 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* GSS Proxy upcall module
*
* Copyright (C) 2012 Simo Sorce <simo@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _LINUX_GSS_RPC_XDR_H
@@ -262,6 +249,4 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
#define GSSX_ARG_wrap_size_limit_sz 0
#define GSSX_RES_wrap_size_limit_sz 0
-
-
#endif /* _LINUX_GSS_RPC_XDR_H */
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 152790ed309c..0c5d7896d6dd 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Neil Brown <neilb@cse.unsw.edu.au>
* J. Bruce Fields <bfields@umich.edu>
@@ -896,7 +897,7 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g
if (svc_getnl(&buf->head[0]) != seq)
goto out;
/* trim off the mic and padding at the end before returning */
- xdr_buf_trim(buf, round_up_to_quad(mic.len) + 4);
+ buf->len -= 4 + round_up_to_quad(mic.len);
stat = 0;
out:
kfree(mic.data);
diff --git a/net/sunrpc/auth_gss/trace.c b/net/sunrpc/auth_gss/trace.c
new file mode 100644
index 000000000000..5576f1e66de9
--- /dev/null
+++ b/net/sunrpc/auth_gss/trace.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, 2019 Oracle. All rights reserved.
+ */
+
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/sched.h>
+#include <linux/sunrpc/gss_err.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/rpcgss.h>
diff --git a/net/sunrpc/auth_null.c b/net/sunrpc/auth_null.c
index d0ceac57c06e..41a633a4049e 100644
--- a/net/sunrpc/auth_null.c
+++ b/net/sunrpc/auth_null.c
@@ -59,15 +59,21 @@ nul_match(struct auth_cred *acred, struct rpc_cred *cred, int taskflags)
/*
* Marshal credential.
*/
-static __be32 *
-nul_marshal(struct rpc_task *task, __be32 *p)
+static int
+nul_marshal(struct rpc_task *task, struct xdr_stream *xdr)
{
- *p++ = htonl(RPC_AUTH_NULL);
- *p++ = 0;
- *p++ = htonl(RPC_AUTH_NULL);
- *p++ = 0;
-
- return p;
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, 4 * sizeof(*p));
+ if (!p)
+ return -EMSGSIZE;
+ /* Credential */
+ *p++ = rpc_auth_null;
+ *p++ = xdr_zero;
+ /* Verifier */
+ *p++ = rpc_auth_null;
+ *p = xdr_zero;
+ return 0;
}
/*
@@ -80,25 +86,19 @@ nul_refresh(struct rpc_task *task)
return 0;
}
-static __be32 *
-nul_validate(struct rpc_task *task, __be32 *p)
+static int
+nul_validate(struct rpc_task *task, struct xdr_stream *xdr)
{
- rpc_authflavor_t flavor;
- u32 size;
-
- flavor = ntohl(*p++);
- if (flavor != RPC_AUTH_NULL) {
- printk("RPC: bad verf flavor: %u\n", flavor);
- return ERR_PTR(-EIO);
- }
-
- size = ntohl(*p++);
- if (size != 0) {
- printk("RPC: bad verf size: %u\n", size);
- return ERR_PTR(-EIO);
- }
-
- return p;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 2 * sizeof(*p));
+ if (!p)
+ return -EIO;
+ if (*p++ != rpc_auth_null)
+ return -EIO;
+ if (*p != xdr_zero)
+ return -EIO;
+ return 0;
}
const struct rpc_authops authnull_ops = {
@@ -114,6 +114,8 @@ static
struct rpc_auth null_auth = {
.au_cslack = NUL_CALLSLACK,
.au_rslack = NUL_REPLYSLACK,
+ .au_verfsize = NUL_REPLYSLACK,
+ .au_ralign = NUL_REPLYSLACK,
.au_ops = &authnull_ops,
.au_flavor = RPC_AUTH_NULL,
.au_count = REFCOUNT_INIT(1),
@@ -125,8 +127,10 @@ const struct rpc_credops null_credops = {
.crdestroy = nul_destroy_cred,
.crmatch = nul_match,
.crmarshal = nul_marshal,
+ .crwrap_req = rpcauth_wrap_req_encode,
.crrefresh = nul_refresh,
.crvalidate = nul_validate,
+ .crunwrap_resp = rpcauth_unwrap_resp_decode,
};
static
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c
index 387f6b3ffbea..d4018e5a24c5 100644
--- a/net/sunrpc/auth_unix.c
+++ b/net/sunrpc/auth_unix.c
@@ -28,8 +28,6 @@ static mempool_t *unix_pool;
static struct rpc_auth *
unx_create(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
{
- dprintk("RPC: creating UNIX authenticator for client %p\n",
- clnt);
refcount_inc(&unix_auth.au_count);
return &unix_auth;
}
@@ -37,7 +35,6 @@ unx_create(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
static void
unx_destroy(struct rpc_auth *auth)
{
- dprintk("RPC: destroying UNIX authenticator %p\n", auth);
}
/*
@@ -48,10 +45,6 @@ unx_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
{
struct rpc_cred *ret = mempool_alloc(unix_pool, GFP_NOFS);
- dprintk("RPC: allocating UNIX cred for uid %d gid %d\n",
- from_kuid(&init_user_ns, acred->cred->fsuid),
- from_kgid(&init_user_ns, acred->cred->fsgid));
-
rpcauth_init_cred(ret, acred, auth, &unix_credops);
ret->cr_flags = 1UL << RPCAUTH_CRED_UPTODATE;
return ret;
@@ -61,7 +54,7 @@ static void
unx_free_cred_callback(struct rcu_head *head)
{
struct rpc_cred *rpc_cred = container_of(head, struct rpc_cred, cr_rcu);
- dprintk("RPC: unx_free_cred %p\n", rpc_cred);
+
put_cred(rpc_cred->cr_cred);
mempool_free(rpc_cred, unix_pool);
}
@@ -87,7 +80,7 @@ unx_match(struct auth_cred *acred, struct rpc_cred *cred, int flags)
if (!uid_eq(cred->cr_cred->fsuid, acred->cred->fsuid) || !gid_eq(cred->cr_cred->fsgid, acred->cred->fsgid))
return 0;
- if (acred->cred && acred->cred->group_info != NULL)
+ if (acred->cred->group_info != NULL)
groups = acred->cred->group_info->ngroups;
if (groups > UNX_NGROUPS)
groups = UNX_NGROUPS;
@@ -106,37 +99,55 @@ unx_match(struct auth_cred *acred, struct rpc_cred *cred, int flags)
* Marshal credentials.
* Maybe we should keep a cached credential for performance reasons.
*/
-static __be32 *
-unx_marshal(struct rpc_task *task, __be32 *p)
+static int
+unx_marshal(struct rpc_task *task, struct xdr_stream *xdr)
{
struct rpc_clnt *clnt = task->tk_client;
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
- __be32 *base, *hold;
+ __be32 *p, *cred_len, *gidarr_len;
int i;
struct group_info *gi = cred->cr_cred->group_info;
- *p++ = htonl(RPC_AUTH_UNIX);
- base = p++;
- *p++ = htonl(jiffies/HZ);
-
- /*
- * Copy the UTS nodename captured when the client was created.
- */
- p = xdr_encode_array(p, clnt->cl_nodename, clnt->cl_nodelen);
-
- *p++ = htonl((u32) from_kuid(&init_user_ns, cred->cr_cred->fsuid));
- *p++ = htonl((u32) from_kgid(&init_user_ns, cred->cr_cred->fsgid));
- hold = p++;
+ /* Credential */
+
+ p = xdr_reserve_space(xdr, 3 * sizeof(*p));
+ if (!p)
+ goto marshal_failed;
+ *p++ = rpc_auth_unix;
+ cred_len = p++;
+ *p++ = xdr_zero; /* stamp */
+ if (xdr_stream_encode_opaque(xdr, clnt->cl_nodename,
+ clnt->cl_nodelen) < 0)
+ goto marshal_failed;
+ p = xdr_reserve_space(xdr, 3 * sizeof(*p));
+ if (!p)
+ goto marshal_failed;
+ *p++ = cpu_to_be32(from_kuid(&init_user_ns, cred->cr_cred->fsuid));
+ *p++ = cpu_to_be32(from_kgid(&init_user_ns, cred->cr_cred->fsgid));
+
+ gidarr_len = p++;
if (gi)
for (i = 0; i < UNX_NGROUPS && i < gi->ngroups; i++)
- *p++ = htonl((u32) from_kgid(&init_user_ns, gi->gid[i]));
- *hold = htonl(p - hold - 1); /* gid array length */
- *base = htonl((p - base - 1) << 2); /* cred length */
+ *p++ = cpu_to_be32(from_kgid(&init_user_ns,
+ gi->gid[i]));
+ *gidarr_len = cpu_to_be32(p - gidarr_len - 1);
+ *cred_len = cpu_to_be32((p - cred_len - 1) << 2);
+ p = xdr_reserve_space(xdr, (p - gidarr_len - 1) << 2);
+ if (!p)
+ goto marshal_failed;
+
+ /* Verifier */
+
+ p = xdr_reserve_space(xdr, 2 * sizeof(*p));
+ if (!p)
+ goto marshal_failed;
+ *p++ = rpc_auth_null;
+ *p = xdr_zero;
- *p++ = htonl(RPC_AUTH_NULL);
- *p++ = htonl(0);
+ return 0;
- return p;
+marshal_failed:
+ return -EMSGSIZE;
}
/*
@@ -149,29 +160,35 @@ unx_refresh(struct rpc_task *task)
return 0;
}
-static __be32 *
-unx_validate(struct rpc_task *task, __be32 *p)
+static int
+unx_validate(struct rpc_task *task, struct xdr_stream *xdr)
{
- rpc_authflavor_t flavor;
- u32 size;
-
- flavor = ntohl(*p++);
- if (flavor != RPC_AUTH_NULL &&
- flavor != RPC_AUTH_UNIX &&
- flavor != RPC_AUTH_SHORT) {
- printk("RPC: bad verf flavor: %u\n", flavor);
- return ERR_PTR(-EIO);
- }
-
- size = ntohl(*p++);
- if (size > RPC_MAX_AUTH_SIZE) {
- printk("RPC: giant verf size: %u\n", size);
- return ERR_PTR(-EIO);
+ struct rpc_auth *auth = task->tk_rqstp->rq_cred->cr_auth;
+ __be32 *p;
+ u32 size;
+
+ p = xdr_inline_decode(xdr, 2 * sizeof(*p));
+ if (!p)
+ return -EIO;
+ switch (*p++) {
+ case rpc_auth_null:
+ case rpc_auth_unix:
+ case rpc_auth_short:
+ break;
+ default:
+ return -EIO;
}
- task->tk_rqstp->rq_cred->cr_auth->au_rslack = (size >> 2) + 2;
- p += (size >> 2);
-
- return p;
+ size = be32_to_cpup(p);
+ if (size > RPC_MAX_AUTH_SIZE)
+ return -EIO;
+ p = xdr_inline_decode(xdr, size);
+ if (!p)
+ return -EIO;
+
+ auth->au_verfsize = XDR_QUADLEN(size) + 2;
+ auth->au_rslack = XDR_QUADLEN(size) + 2;
+ auth->au_ralign = XDR_QUADLEN(size) + 2;
+ return 0;
}
int __init rpc_init_authunix(void)
@@ -198,6 +215,7 @@ static
struct rpc_auth unix_auth = {
.au_cslack = UNX_CALLSLACK,
.au_rslack = NUL_REPLYSLACK,
+ .au_verfsize = NUL_REPLYSLACK,
.au_ops = &authunix_ops,
.au_flavor = RPC_AUTH_UNIX,
.au_count = REFCOUNT_INIT(1),
@@ -209,6 +227,8 @@ const struct rpc_credops unix_credops = {
.crdestroy = unx_destroy_cred,
.crmatch = unx_match,
.crmarshal = unx_marshal,
+ .crwrap_req = rpcauth_wrap_req_encode,
.crrefresh = unx_refresh,
.crvalidate = unx_validate,
+ .crunwrap_resp = rpcauth_unwrap_resp_decode,
};
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index ec451b8114b0..c47d82622fd1 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -235,7 +235,8 @@ out:
list_empty(&xprt->bc_pa_list) ? "true" : "false");
}
-static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
+static struct rpc_rqst *xprt_get_bc_request(struct rpc_xprt *xprt, __be32 xid,
+ struct rpc_rqst *new)
{
struct rpc_rqst *req = NULL;
@@ -243,22 +244,20 @@ static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
if (atomic_read(&xprt->bc_free_slots) <= 0)
goto not_found;
if (list_empty(&xprt->bc_pa_list)) {
- req = xprt_alloc_bc_req(xprt, GFP_ATOMIC);
- if (!req)
+ if (!new)
goto not_found;
- list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
+ list_add_tail(&new->rq_bc_pa_list, &xprt->bc_pa_list);
xprt->bc_alloc_count++;
}
req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
rq_bc_pa_list);
req->rq_reply_bytes_recvd = 0;
- req->rq_bytes_sent = 0;
memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
sizeof(req->rq_private_buf));
req->rq_xid = xid;
req->rq_connect_cookie = xprt->connect_cookie;
-not_found:
dprintk("RPC: backchannel req=%p\n", req);
+not_found:
return req;
}
@@ -321,18 +320,27 @@ void xprt_free_bc_rqst(struct rpc_rqst *req)
*/
struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid)
{
- struct rpc_rqst *req;
-
- spin_lock(&xprt->bc_pa_lock);
- list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) {
- if (req->rq_connect_cookie != xprt->connect_cookie)
- continue;
- if (req->rq_xid == xid)
- goto found;
- }
- req = xprt_alloc_bc_request(xprt, xid);
+ struct rpc_rqst *req, *new = NULL;
+
+ do {
+ spin_lock(&xprt->bc_pa_lock);
+ list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) {
+ if (req->rq_connect_cookie != xprt->connect_cookie)
+ continue;
+ if (req->rq_xid == xid)
+ goto found;
+ }
+ req = xprt_get_bc_request(xprt, xid, new);
found:
- spin_unlock(&xprt->bc_pa_lock);
+ spin_unlock(&xprt->bc_pa_lock);
+ if (new) {
+ if (req != new)
+ xprt_free_bc_rqst(new);
+ break;
+ } else if (req)
+ break;
+ new = xprt_alloc_bc_req(xprt, GFP_KERNEL);
+ } while (new);
return req;
}
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 12bb23b8e0c5..261131dfa1f1 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -54,6 +54,7 @@ static void cache_init(struct cache_head *h, struct cache_detail *detail)
h->last_refresh = now;
}
+static inline int cache_is_valid(struct cache_head *h);
static void cache_fresh_locked(struct cache_head *head, time_t expiry,
struct cache_detail *detail);
static void cache_fresh_unlocked(struct cache_head *head,
@@ -105,6 +106,8 @@ static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
if (cache_is_expired(detail, tmp)) {
hlist_del_init_rcu(&tmp->cache_list);
detail->entries --;
+ if (cache_is_valid(tmp) == -EAGAIN)
+ set_bit(CACHE_NEGATIVE, &tmp->flags);
cache_fresh_locked(tmp, 0, detail);
freeme = tmp;
break;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index d7ec6132c046..8ff11dc98d7f 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -66,20 +66,19 @@ static void call_decode(struct rpc_task *task);
static void call_bind(struct rpc_task *task);
static void call_bind_status(struct rpc_task *task);
static void call_transmit(struct rpc_task *task);
-#if defined(CONFIG_SUNRPC_BACKCHANNEL)
-static void call_bc_transmit(struct rpc_task *task);
-#endif /* CONFIG_SUNRPC_BACKCHANNEL */
static void call_status(struct rpc_task *task);
static void call_transmit_status(struct rpc_task *task);
static void call_refresh(struct rpc_task *task);
static void call_refreshresult(struct rpc_task *task);
-static void call_timeout(struct rpc_task *task);
static void call_connect(struct rpc_task *task);
static void call_connect_status(struct rpc_task *task);
-static __be32 *rpc_encode_header(struct rpc_task *task);
-static __be32 *rpc_verify_header(struct rpc_task *task);
+static int rpc_encode_header(struct rpc_task *task,
+ struct xdr_stream *xdr);
+static int rpc_decode_header(struct rpc_task *task,
+ struct xdr_stream *xdr);
static int rpc_ping(struct rpc_clnt *clnt);
+static void rpc_check_timeout(struct rpc_task *task);
static void rpc_register_client(struct rpc_clnt *clnt)
{
@@ -834,9 +833,6 @@ void rpc_killall_tasks(struct rpc_clnt *clnt)
if (!(rovr->tk_flags & RPC_TASK_KILLED)) {
rovr->tk_flags |= RPC_TASK_KILLED;
rpc_exit(rovr, -EIO);
- if (RPC_IS_QUEUED(rovr))
- rpc_wake_up_queued_task(rovr->tk_waitqueue,
- rovr);
}
}
spin_unlock(&clnt->cl_lock);
@@ -1131,6 +1127,8 @@ rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags,
EXPORT_SYMBOL_GPL(rpc_call_async);
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
+static void call_bc_encode(struct rpc_task *task);
+
/**
* rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
* rpc_execute against it
@@ -1152,7 +1150,7 @@ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req)
task = rpc_new_task(&task_setup_data);
xprt_init_bc_request(req, task);
- task->tk_action = call_bc_transmit;
+ task->tk_action = call_bc_encode;
atomic_inc(&task->tk_count);
WARN_ON_ONCE(atomic_read(&task->tk_count) != 2);
rpc_execute(task);
@@ -1162,6 +1160,29 @@ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req)
}
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
+/**
+ * rpc_prepare_reply_pages - Prepare to receive a reply data payload into pages
+ * @req: RPC request to prepare
+ * @pages: vector of struct page pointers
+ * @base: offset in first page where receive should start, in bytes
+ * @len: expected size of the upper layer data payload, in bytes
+ * @hdrsize: expected size of upper layer reply header, in XDR words
+ *
+ */
+void rpc_prepare_reply_pages(struct rpc_rqst *req, struct page **pages,
+ unsigned int base, unsigned int len,
+ unsigned int hdrsize)
+{
+ /* Subtract one to force an extra word of buffer space for the
+ * payload's XDR pad to fall into the rcv_buf's tail iovec.
+ */
+ hdrsize += RPC_REPHDRSIZE + req->rq_cred->cr_auth->au_ralign - 1;
+
+ xdr_inline_pages(&req->rq_rcv_buf, hdrsize << 2, pages, base, len);
+ trace_rpc_reply_pages(req);
+}
+EXPORT_SYMBOL_GPL(rpc_prepare_reply_pages);
+
void
rpc_call_start(struct rpc_task *task)
{
@@ -1665,7 +1686,7 @@ call_refreshresult(struct rpc_task *task)
static void
call_allocate(struct rpc_task *task)
{
- unsigned int slack = task->tk_rqstp->rq_cred->cr_auth->au_cslack;
+ const struct rpc_auth *auth = task->tk_rqstp->rq_cred->cr_auth;
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt;
const struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
@@ -1690,9 +1711,15 @@ call_allocate(struct rpc_task *task)
* and reply headers, and convert both values
* to byte sizes.
*/
- req->rq_callsize = RPC_CALLHDRSIZE + (slack << 1) + proc->p_arglen;
+ req->rq_callsize = RPC_CALLHDRSIZE + (auth->au_cslack << 1) +
+ proc->p_arglen;
req->rq_callsize <<= 2;
- req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen;
+ /*
+ * Note: the reply buffer must at minimum allocate enough space
+ * for the 'struct accepted_reply' from RFC5531.
+ */
+ req->rq_rcvsize = RPC_REPHDRSIZE + auth->au_rslack + \
+ max_t(size_t, proc->p_replen, 2);
req->rq_rcvsize <<= 2;
status = xprt->ops->buf_alloc(task);
@@ -1728,10 +1755,7 @@ static void
rpc_xdr_encode(struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
- kxdreproc_t encode;
- __be32 *p;
-
- dprint_status(task);
+ struct xdr_stream xdr;
xdr_buf_init(&req->rq_snd_buf,
req->rq_buffer,
@@ -1740,18 +1764,13 @@ rpc_xdr_encode(struct rpc_task *task)
req->rq_rbuffer,
req->rq_rcvsize);
- p = rpc_encode_header(task);
- if (p == NULL)
- return;
-
- encode = task->tk_msg.rpc_proc->p_encode;
- if (encode == NULL)
+ req->rq_snd_buf.head[0].iov_len = 0;
+ xdr_init_encode(&xdr, &req->rq_snd_buf,
+ req->rq_snd_buf.head[0].iov_base, req);
+ if (rpc_encode_header(task, &xdr))
return;
- task->tk_status = rpcauth_wrap_req(task, encode, req, p,
- task->tk_msg.rpc_argp);
- if (task->tk_status == 0)
- xprt_request_prepare(req);
+ task->tk_status = rpcauth_wrap_req(task, &xdr);
}
/*
@@ -1762,6 +1781,7 @@ call_encode(struct rpc_task *task)
{
if (!rpc_task_need_encode(task))
goto out;
+ dprint_status(task);
/* Encode here so that rpcsec_gss can use correct sequence number. */
rpc_xdr_encode(task);
/* Did the encode result in an error condition? */
@@ -1779,6 +1799,8 @@ call_encode(struct rpc_task *task)
rpc_exit(task, task->tk_status);
}
return;
+ } else {
+ xprt_request_prepare(task->tk_rqstp);
}
/* Add task to reply queue before transmission to avoid races */
@@ -1786,7 +1808,29 @@ call_encode(struct rpc_task *task)
xprt_request_enqueue_receive(task);
xprt_request_enqueue_transmit(task);
out:
- task->tk_action = call_bind;
+ task->tk_action = call_transmit;
+ /* Check that the connection is OK */
+ if (!xprt_bound(task->tk_xprt))
+ task->tk_action = call_bind;
+ else if (!xprt_connected(task->tk_xprt))
+ task->tk_action = call_connect;
+}
+
+/*
+ * Helpers to check if the task was already transmitted, and
+ * to take action when that is the case.
+ */
+static bool
+rpc_task_transmitted(struct rpc_task *task)
+{
+ return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
+}
+
+static void
+rpc_task_handle_transmitted(struct rpc_task *task)
+{
+ xprt_end_transmit(task);
+ task->tk_action = call_transmit_status;
}
/*
@@ -1797,14 +1841,24 @@ call_bind(struct rpc_task *task)
{
struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
- dprint_status(task);
+ if (rpc_task_transmitted(task)) {
+ rpc_task_handle_transmitted(task);
+ return;
+ }
- task->tk_action = call_connect;
- if (!xprt_bound(xprt)) {
- task->tk_action = call_bind_status;
- task->tk_timeout = xprt->bind_timeout;
- xprt->ops->rpcbind(task);
+ if (xprt_bound(xprt)) {
+ task->tk_action = call_connect;
+ return;
}
+
+ dprint_status(task);
+
+ task->tk_action = call_bind_status;
+ if (!xprt_prepare_transmit(task))
+ return;
+
+ task->tk_timeout = xprt->bind_timeout;
+ xprt->ops->rpcbind(task);
}
/*
@@ -1815,6 +1869,11 @@ call_bind_status(struct rpc_task *task)
{
int status = -EIO;
+ if (rpc_task_transmitted(task)) {
+ rpc_task_handle_transmitted(task);
+ return;
+ }
+
if (task->tk_status >= 0) {
dprint_status(task);
task->tk_status = 0;
@@ -1841,6 +1900,8 @@ call_bind_status(struct rpc_task *task)
task->tk_rebind_retry--;
rpc_delay(task, 3*HZ);
goto retry_timeout;
+ case -EAGAIN:
+ goto retry_timeout;
case -ETIMEDOUT:
dprintk("RPC: %5u rpcbind request timed out\n",
task->tk_pid);
@@ -1882,7 +1943,8 @@ call_bind_status(struct rpc_task *task)
retry_timeout:
task->tk_status = 0;
- task->tk_action = call_timeout;
+ task->tk_action = call_bind;
+ rpc_check_timeout(task);
}
/*
@@ -1893,21 +1955,30 @@ call_connect(struct rpc_task *task)
{
struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
+ if (rpc_task_transmitted(task)) {
+ rpc_task_handle_transmitted(task);
+ return;
+ }
+
+ if (xprt_connected(xprt)) {
+ task->tk_action = call_transmit;
+ return;
+ }
+
dprintk("RPC: %5u call_connect xprt %p %s connected\n",
task->tk_pid, xprt,
(xprt_connected(xprt) ? "is" : "is not"));
- task->tk_action = call_transmit;
- if (!xprt_connected(xprt)) {
- task->tk_action = call_connect_status;
- if (task->tk_status < 0)
- return;
- if (task->tk_flags & RPC_TASK_NOCONNECT) {
- rpc_exit(task, -ENOTCONN);
- return;
- }
- xprt_connect(task);
+ task->tk_action = call_connect_status;
+ if (task->tk_status < 0)
+ return;
+ if (task->tk_flags & RPC_TASK_NOCONNECT) {
+ rpc_exit(task, -ENOTCONN);
+ return;
}
+ if (!xprt_prepare_transmit(task))
+ return;
+ xprt_connect(task);
}
/*
@@ -1919,10 +1990,8 @@ call_connect_status(struct rpc_task *task)
struct rpc_clnt *clnt = task->tk_client;
int status = task->tk_status;
- /* Check if the task was already transmitted */
- if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) {
- xprt_end_transmit(task);
- task->tk_action = call_transmit_status;
+ if (rpc_task_transmitted(task)) {
+ rpc_task_handle_transmitted(task);
return;
}
@@ -1937,8 +2006,7 @@ call_connect_status(struct rpc_task *task)
break;
if (clnt->cl_autobind) {
rpc_force_rebind(clnt);
- task->tk_action = call_bind;
- return;
+ goto out_retry;
}
/* fall through */
case -ECONNRESET:
@@ -1958,16 +2026,19 @@ call_connect_status(struct rpc_task *task)
/* fall through */
case -ENOTCONN:
case -EAGAIN:
- /* Check for timeouts before looping back to call_bind */
case -ETIMEDOUT:
- task->tk_action = call_timeout;
- return;
+ goto out_retry;
case 0:
clnt->cl_stats->netreconn++;
task->tk_action = call_transmit;
return;
}
rpc_exit(task, status);
+ return;
+out_retry:
+ /* Check for timeouts before looping back to call_bind */
+ task->tk_action = call_bind;
+ rpc_check_timeout(task);
}
/*
@@ -1976,15 +2047,24 @@ call_connect_status(struct rpc_task *task)
static void
call_transmit(struct rpc_task *task)
{
+ if (rpc_task_transmitted(task)) {
+ rpc_task_handle_transmitted(task);
+ return;
+ }
+
dprint_status(task);
+ task->tk_action = call_transmit_status;
+ if (!xprt_prepare_transmit(task))
+ return;
task->tk_status = 0;
if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) {
- if (!xprt_prepare_transmit(task))
+ if (!xprt_connected(task->tk_xprt)) {
+ task->tk_status = -ENOTCONN;
return;
+ }
xprt_transmit(task);
}
- task->tk_action = call_transmit_status;
xprt_end_transmit(task);
}
@@ -2000,7 +2080,8 @@ call_transmit_status(struct rpc_task *task)
* Common case: success. Force the compiler to put this
* test first.
*/
- if (task->tk_status == 0) {
+ if (rpc_task_transmitted(task)) {
+ task->tk_status = 0;
xprt_request_wait_receive(task);
return;
}
@@ -2038,7 +2119,7 @@ call_transmit_status(struct rpc_task *task)
trace_xprt_ping(task->tk_xprt,
task->tk_status);
rpc_exit(task, task->tk_status);
- break;
+ return;
}
/* fall through */
case -ECONNRESET:
@@ -2046,11 +2127,24 @@ call_transmit_status(struct rpc_task *task)
case -EADDRINUSE:
case -ENOTCONN:
case -EPIPE:
+ task->tk_action = call_bind;
+ task->tk_status = 0;
break;
}
+ rpc_check_timeout(task);
}
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
+static void call_bc_transmit(struct rpc_task *task);
+static void call_bc_transmit_status(struct rpc_task *task);
+
+static void
+call_bc_encode(struct rpc_task *task)
+{
+ xprt_request_enqueue_transmit(task);
+ task->tk_action = call_bc_transmit;
+}
+
/*
* 5b. Send the backchannel RPC reply. On error, drop the reply. In
* addition, disconnect on connectivity errors.
@@ -2058,26 +2152,26 @@ call_transmit_status(struct rpc_task *task)
static void
call_bc_transmit(struct rpc_task *task)
{
- struct rpc_rqst *req = task->tk_rqstp;
-
- if (rpc_task_need_encode(task))
- xprt_request_enqueue_transmit(task);
- if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
- goto out_wakeup;
-
- if (!xprt_prepare_transmit(task))
- goto out_retry;
-
- if (task->tk_status < 0) {
- printk(KERN_NOTICE "RPC: Could not send backchannel reply "
- "error: %d\n", task->tk_status);
- goto out_done;
+ task->tk_action = call_bc_transmit_status;
+ if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) {
+ if (!xprt_prepare_transmit(task))
+ return;
+ task->tk_status = 0;
+ xprt_transmit(task);
}
+ xprt_end_transmit(task);
+}
- xprt_transmit(task);
+static void
+call_bc_transmit_status(struct rpc_task *task)
+{
+ struct rpc_rqst *req = task->tk_rqstp;
+
+ if (rpc_task_transmitted(task))
+ task->tk_status = 0;
- xprt_end_transmit(task);
dprint_status(task);
+
switch (task->tk_status) {
case 0:
/* Success */
@@ -2091,8 +2185,14 @@ call_bc_transmit(struct rpc_task *task)
case -ENOTCONN:
case -EPIPE:
break;
+ case -ENOBUFS:
+ rpc_delay(task, HZ>>2);
+ /* fall through */
+ case -EBADSLT:
case -EAGAIN:
- goto out_retry;
+ task->tk_status = 0;
+ task->tk_action = call_bc_transmit;
+ return;
case -ETIMEDOUT:
/*
* Problem reaching the server. Disconnect and let the
@@ -2111,18 +2211,11 @@ call_bc_transmit(struct rpc_task *task)
* We were unable to reply and will have to drop the
* request. The server should reconnect and retransmit.
*/
- WARN_ON_ONCE(task->tk_status == -EAGAIN);
printk(KERN_NOTICE "RPC: Could not send backchannel reply "
"error: %d\n", task->tk_status);
break;
}
-out_wakeup:
- rpc_wake_up_queued_task(&req->rq_xprt->pending, task);
-out_done:
task->tk_action = rpc_exit_task;
- return;
-out_retry:
- task->tk_status = 0;
}
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
@@ -2154,10 +2247,8 @@ call_status(struct rpc_task *task)
case -EHOSTUNREACH:
case -ENETUNREACH:
case -EPERM:
- if (RPC_IS_SOFTCONN(task)) {
- rpc_exit(task, status);
- break;
- }
+ if (RPC_IS_SOFTCONN(task))
+ goto out_exit;
/*
* Delay any retries for 3 seconds, then handle as if it
* were a timeout.
@@ -2165,7 +2256,6 @@ call_status(struct rpc_task *task)
rpc_delay(task, 3*HZ);
/* fall through */
case -ETIMEDOUT:
- task->tk_action = call_timeout;
break;
case -ECONNREFUSED:
case -ECONNRESET:
@@ -2178,42 +2268,48 @@ call_status(struct rpc_task *task)
case -EPIPE:
case -ENOTCONN:
case -EAGAIN:
- task->tk_action = call_encode;
break;
case -EIO:
/* shutdown or soft timeout */
- rpc_exit(task, status);
- break;
+ goto out_exit;
default:
if (clnt->cl_chatty)
printk("%s: RPC call returned error %d\n",
clnt->cl_program->name, -status);
- rpc_exit(task, status);
+ goto out_exit;
}
+ task->tk_action = call_encode;
+ rpc_check_timeout(task);
+ return;
+out_exit:
+ rpc_exit(task, status);
+}
+
+static bool
+rpc_check_connected(const struct rpc_rqst *req)
+{
+ /* No allocated request or transport? return true */
+ if (!req || !req->rq_xprt)
+ return true;
+ return xprt_connected(req->rq_xprt);
}
-/*
- * 6a. Handle RPC timeout
- * We do not release the request slot, so we keep using the
- * same XID for all retransmits.
- */
static void
-call_timeout(struct rpc_task *task)
+rpc_check_timeout(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
- if (xprt_adjust_timeout(task->tk_rqstp) == 0) {
- dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid);
- goto retry;
- }
+ if (xprt_adjust_timeout(task->tk_rqstp) == 0)
+ return;
dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid);
task->tk_timeouts++;
- if (RPC_IS_SOFTCONN(task)) {
+ if (RPC_IS_SOFTCONN(task) && !rpc_check_connected(task->tk_rqstp)) {
rpc_exit(task, -ETIMEDOUT);
return;
}
+
if (RPC_IS_SOFT(task)) {
if (clnt->cl_chatty) {
printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
@@ -2241,10 +2337,6 @@ call_timeout(struct rpc_task *task)
* event? RFC2203 requires the server to drop all such requests.
*/
rpcauth_invalcred(task);
-
-retry:
- task->tk_action = call_encode;
- task->tk_status = 0;
}
/*
@@ -2255,12 +2347,11 @@ call_decode(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
struct rpc_rqst *req = task->tk_rqstp;
- kxdrdproc_t decode = task->tk_msg.rpc_proc->p_decode;
- __be32 *p;
+ struct xdr_stream xdr;
dprint_status(task);
- if (!decode) {
+ if (!task->tk_msg.rpc_proc->p_decode) {
task->tk_action = rpc_exit_task;
return;
}
@@ -2285,223 +2376,186 @@ call_decode(struct rpc_task *task)
WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
sizeof(req->rq_rcv_buf)) != 0);
- if (req->rq_rcv_buf.len < 12) {
- if (!RPC_IS_SOFT(task)) {
- task->tk_action = call_encode;
- goto out_retry;
- }
- dprintk("RPC: %s: too small RPC reply size (%d bytes)\n",
- clnt->cl_program->name, task->tk_status);
- task->tk_action = call_timeout;
- goto out_retry;
- }
-
- p = rpc_verify_header(task);
- if (IS_ERR(p)) {
- if (p == ERR_PTR(-EAGAIN))
- goto out_retry;
+ xdr_init_decode(&xdr, &req->rq_rcv_buf,
+ req->rq_rcv_buf.head[0].iov_base, req);
+ switch (rpc_decode_header(task, &xdr)) {
+ case 0:
+ task->tk_action = rpc_exit_task;
+ task->tk_status = rpcauth_unwrap_resp(task, &xdr);
+ dprintk("RPC: %5u %s result %d\n",
+ task->tk_pid, __func__, task->tk_status);
return;
- }
- task->tk_action = rpc_exit_task;
-
- task->tk_status = rpcauth_unwrap_resp(task, decode, req, p,
- task->tk_msg.rpc_resp);
-
- dprintk("RPC: %5u call_decode result %d\n", task->tk_pid,
- task->tk_status);
- return;
-out_retry:
- task->tk_status = 0;
- /* Note: rpc_verify_header() may have freed the RPC slot */
- if (task->tk_rqstp == req) {
- xdr_free_bvec(&req->rq_rcv_buf);
- req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0;
- if (task->tk_client->cl_discrtry)
- xprt_conditional_disconnect(req->rq_xprt,
- req->rq_connect_cookie);
+ case -EAGAIN:
+ task->tk_status = 0;
+ /* Note: rpc_decode_header() may have freed the RPC slot */
+ if (task->tk_rqstp == req) {
+ xdr_free_bvec(&req->rq_rcv_buf);
+ req->rq_reply_bytes_recvd = 0;
+ req->rq_rcv_buf.len = 0;
+ if (task->tk_client->cl_discrtry)
+ xprt_conditional_disconnect(req->rq_xprt,
+ req->rq_connect_cookie);
+ }
+ task->tk_action = call_encode;
+ rpc_check_timeout(task);
}
}
-static __be32 *
-rpc_encode_header(struct rpc_task *task)
+static int
+rpc_encode_header(struct rpc_task *task, struct xdr_stream *xdr)
{
struct rpc_clnt *clnt = task->tk_client;
struct rpc_rqst *req = task->tk_rqstp;
- __be32 *p = req->rq_svec[0].iov_base;
-
- /* FIXME: check buffer size? */
-
- p = xprt_skip_transport_header(req->rq_xprt, p);
- *p++ = req->rq_xid; /* XID */
- *p++ = htonl(RPC_CALL); /* CALL */
- *p++ = htonl(RPC_VERSION); /* RPC version */
- *p++ = htonl(clnt->cl_prog); /* program number */
- *p++ = htonl(clnt->cl_vers); /* program version */
- *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */
- p = rpcauth_marshcred(task, p);
- if (p)
- req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
- return p;
+ __be32 *p;
+ int error;
+
+ error = -EMSGSIZE;
+ p = xdr_reserve_space(xdr, RPC_CALLHDRSIZE << 2);
+ if (!p)
+ goto out_fail;
+ *p++ = req->rq_xid;
+ *p++ = rpc_call;
+ *p++ = cpu_to_be32(RPC_VERSION);
+ *p++ = cpu_to_be32(clnt->cl_prog);
+ *p++ = cpu_to_be32(clnt->cl_vers);
+ *p = cpu_to_be32(task->tk_msg.rpc_proc->p_proc);
+
+ error = rpcauth_marshcred(task, xdr);
+ if (error < 0)
+ goto out_fail;
+ return 0;
+out_fail:
+ trace_rpc_bad_callhdr(task);
+ rpc_exit(task, error);
+ return error;
}
-static __be32 *
-rpc_verify_header(struct rpc_task *task)
+static noinline int
+rpc_decode_header(struct rpc_task *task, struct xdr_stream *xdr)
{
struct rpc_clnt *clnt = task->tk_client;
- struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0];
- int len = task->tk_rqstp->rq_rcv_buf.len >> 2;
- __be32 *p = iov->iov_base;
- u32 n;
- int error = -EACCES;
-
- if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) {
- /* RFC-1014 says that the representation of XDR data must be a
- * multiple of four bytes
- * - if it isn't pointer subtraction in the NFS client may give
- * undefined results
- */
- dprintk("RPC: %5u %s: XDR representation not a multiple of"
- " 4 bytes: 0x%x\n", task->tk_pid, __func__,
- task->tk_rqstp->rq_rcv_buf.len);
- error = -EIO;
- goto out_err;
- }
- if ((len -= 3) < 0)
- goto out_overflow;
-
- p += 1; /* skip XID */
- if ((n = ntohl(*p++)) != RPC_REPLY) {
- dprintk("RPC: %5u %s: not an RPC reply: %x\n",
- task->tk_pid, __func__, n);
- error = -EIO;
- goto out_garbage;
- }
+ int error;
+ __be32 *p;
- if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
- if (--len < 0)
- goto out_overflow;
- switch ((n = ntohl(*p++))) {
- case RPC_AUTH_ERROR:
- break;
- case RPC_MISMATCH:
- dprintk("RPC: %5u %s: RPC call version mismatch!\n",
- task->tk_pid, __func__);
- error = -EPROTONOSUPPORT;
- goto out_err;
- default:
- dprintk("RPC: %5u %s: RPC call rejected, "
- "unknown error: %x\n",
- task->tk_pid, __func__, n);
- error = -EIO;
- goto out_err;
- }
- if (--len < 0)
- goto out_overflow;
- switch ((n = ntohl(*p++))) {
- case RPC_AUTH_REJECTEDCRED:
- case RPC_AUTH_REJECTEDVERF:
- case RPCSEC_GSS_CREDPROBLEM:
- case RPCSEC_GSS_CTXPROBLEM:
- if (!task->tk_cred_retry)
- break;
- task->tk_cred_retry--;
- dprintk("RPC: %5u %s: retry stale creds\n",
- task->tk_pid, __func__);
- rpcauth_invalcred(task);
- /* Ensure we obtain a new XID! */
- xprt_release(task);
- task->tk_action = call_reserve;
- goto out_retry;
- case RPC_AUTH_BADCRED:
- case RPC_AUTH_BADVERF:
- /* possibly garbled cred/verf? */
- if (!task->tk_garb_retry)
- break;
- task->tk_garb_retry--;
- dprintk("RPC: %5u %s: retry garbled creds\n",
- task->tk_pid, __func__);
- task->tk_action = call_encode;
- goto out_retry;
- case RPC_AUTH_TOOWEAK:
- printk(KERN_NOTICE "RPC: server %s requires stronger "
- "authentication.\n",
- task->tk_xprt->servername);
- break;
- default:
- dprintk("RPC: %5u %s: unknown auth error: %x\n",
- task->tk_pid, __func__, n);
- error = -EIO;
- }
- dprintk("RPC: %5u %s: call rejected %d\n",
- task->tk_pid, __func__, n);
- goto out_err;
- }
- p = rpcauth_checkverf(task, p);
- if (IS_ERR(p)) {
- error = PTR_ERR(p);
- dprintk("RPC: %5u %s: auth check failed with %d\n",
- task->tk_pid, __func__, error);
- goto out_garbage; /* bad verifier, retry */
- }
- len = p - (__be32 *)iov->iov_base - 1;
- if (len < 0)
- goto out_overflow;
- switch ((n = ntohl(*p++))) {
- case RPC_SUCCESS:
- return p;
- case RPC_PROG_UNAVAIL:
- dprintk("RPC: %5u %s: program %u is unsupported "
- "by server %s\n", task->tk_pid, __func__,
- (unsigned int)clnt->cl_prog,
- task->tk_xprt->servername);
+ /* RFC-1014 says that the representation of XDR data must be a
+ * multiple of four bytes
+ * - if it isn't pointer subtraction in the NFS client may give
+ * undefined results
+ */
+ if (task->tk_rqstp->rq_rcv_buf.len & 3)
+ goto out_unparsable;
+
+ p = xdr_inline_decode(xdr, 3 * sizeof(*p));
+ if (!p)
+ goto out_unparsable;
+ p++; /* skip XID */
+ if (*p++ != rpc_reply)
+ goto out_unparsable;
+ if (*p++ != rpc_msg_accepted)
+ goto out_msg_denied;
+
+ error = rpcauth_checkverf(task, xdr);
+ if (error)
+ goto out_verifier;
+
+ p = xdr_inline_decode(xdr, sizeof(*p));
+ if (!p)
+ goto out_unparsable;
+ switch (*p) {
+ case rpc_success:
+ return 0;
+ case rpc_prog_unavail:
+ trace_rpc__prog_unavail(task);
error = -EPFNOSUPPORT;
goto out_err;
- case RPC_PROG_MISMATCH:
- dprintk("RPC: %5u %s: program %u, version %u unsupported "
- "by server %s\n", task->tk_pid, __func__,
- (unsigned int)clnt->cl_prog,
- (unsigned int)clnt->cl_vers,
- task->tk_xprt->servername);
+ case rpc_prog_mismatch:
+ trace_rpc__prog_mismatch(task);
error = -EPROTONOSUPPORT;
goto out_err;
- case RPC_PROC_UNAVAIL:
- dprintk("RPC: %5u %s: proc %s unsupported by program %u, "
- "version %u on server %s\n",
- task->tk_pid, __func__,
- rpc_proc_name(task),
- clnt->cl_prog, clnt->cl_vers,
- task->tk_xprt->servername);
+ case rpc_proc_unavail:
+ trace_rpc__proc_unavail(task);
error = -EOPNOTSUPP;
goto out_err;
- case RPC_GARBAGE_ARGS:
- dprintk("RPC: %5u %s: server saw garbage\n",
- task->tk_pid, __func__);
- break; /* retry */
+ case rpc_garbage_args:
+ case rpc_system_err:
+ trace_rpc__garbage_args(task);
+ error = -EIO;
+ break;
default:
- dprintk("RPC: %5u %s: server accept status: %x\n",
- task->tk_pid, __func__, n);
- /* Also retry */
+ goto out_unparsable;
}
out_garbage:
clnt->cl_stats->rpcgarbage++;
if (task->tk_garb_retry) {
task->tk_garb_retry--;
- dprintk("RPC: %5u %s: retrying\n",
- task->tk_pid, __func__);
task->tk_action = call_encode;
-out_retry:
- return ERR_PTR(-EAGAIN);
+ return -EAGAIN;
}
out_err:
rpc_exit(task, error);
- dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid,
- __func__, error);
- return ERR_PTR(error);
-out_overflow:
- dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid,
- __func__);
+ return error;
+
+out_unparsable:
+ trace_rpc__unparsable(task);
+ error = -EIO;
+ goto out_garbage;
+
+out_verifier:
+ trace_rpc_bad_verifier(task);
goto out_garbage;
+
+out_msg_denied:
+ error = -EACCES;
+ p = xdr_inline_decode(xdr, sizeof(*p));
+ if (!p)
+ goto out_unparsable;
+ switch (*p++) {
+ case rpc_auth_error:
+ break;
+ case rpc_mismatch:
+ trace_rpc__mismatch(task);
+ error = -EPROTONOSUPPORT;
+ goto out_err;
+ default:
+ goto out_unparsable;
+ }
+
+ p = xdr_inline_decode(xdr, sizeof(*p));
+ if (!p)
+ goto out_unparsable;
+ switch (*p++) {
+ case rpc_autherr_rejectedcred:
+ case rpc_autherr_rejectedverf:
+ case rpcsec_gsserr_credproblem:
+ case rpcsec_gsserr_ctxproblem:
+ if (!task->tk_cred_retry)
+ break;
+ task->tk_cred_retry--;
+ trace_rpc__stale_creds(task);
+ rpcauth_invalcred(task);
+ /* Ensure we obtain a new XID! */
+ xprt_release(task);
+ task->tk_action = call_reserve;
+ return -EAGAIN;
+ case rpc_autherr_badcred:
+ case rpc_autherr_badverf:
+ /* possibly garbled cred/verf? */
+ if (!task->tk_garb_retry)
+ break;
+ task->tk_garb_retry--;
+ trace_rpc__bad_creds(task);
+ task->tk_action = call_encode;
+ return -EAGAIN;
+ case rpc_autherr_tooweak:
+ trace_rpc__auth_tooweak(task);
+ pr_warn("RPC: server %s requires stronger authentication.\n",
+ task->tk_xprt->servername);
+ break;
+ default:
+ goto out_unparsable;
+ }
+ goto out_err;
}
static void rpcproc_encode_null(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index adc3c40cc733..28956c70100a 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -19,6 +19,7 @@
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/freezer.h>
+#include <linux/sched/mm.h>
#include <linux/sunrpc/clnt.h>
@@ -784,8 +785,7 @@ void rpc_exit(struct rpc_task *task, int status)
{
task->tk_status = status;
task->tk_action = rpc_exit_task;
- if (RPC_IS_QUEUED(task))
- rpc_wake_up_queued_task(task->tk_waitqueue, task);
+ rpc_wake_up_queued_task(task->tk_waitqueue, task);
}
EXPORT_SYMBOL_GPL(rpc_exit);
@@ -902,7 +902,10 @@ void rpc_execute(struct rpc_task *task)
static void rpc_async_schedule(struct work_struct *work)
{
+ unsigned int pflags = memalloc_nofs_save();
+
__rpc_execute(container_of(work, struct rpc_task, u.tk_work));
+ memalloc_nofs_restore(pflags);
}
/**
@@ -921,16 +924,13 @@ static void rpc_async_schedule(struct work_struct *work)
* Most requests are 'small' (under 2KiB) and can be serviced from a
* mempool, ensuring that NFS reads and writes can always proceed,
* and that there is good locality of reference for these buffers.
- *
- * In order to avoid memory starvation triggering more writebacks of
- * NFS requests, we avoid using GFP_KERNEL.
*/
int rpc_malloc(struct rpc_task *task)
{
struct rpc_rqst *rqst = task->tk_rqstp;
size_t size = rqst->rq_callsize + rqst->rq_rcvsize;
struct rpc_buffer *buf;
- gfp_t gfp = GFP_NOIO | __GFP_NOWARN;
+ gfp_t gfp = GFP_NOFS;
if (RPC_IS_SWAPPER(task))
gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
@@ -1011,7 +1011,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
static struct rpc_task *
rpc_alloc_task(void)
{
- return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOIO);
+ return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
}
/*
@@ -1067,7 +1067,10 @@ static void rpc_free_task(struct rpc_task *task)
static void rpc_async_release(struct work_struct *work)
{
+ unsigned int pflags = memalloc_nofs_save();
+
rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
+ memalloc_nofs_restore(pflags);
}
static void rpc_release_resources_task(struct rpc_task *task)
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index e87ddb9f7feb..dbd19697ee38 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1145,17 +1145,6 @@ static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ..
#endif
/*
- * Setup response header for TCP, it has a 4B record length field.
- */
-static void svc_tcp_prep_reply_hdr(struct svc_rqst *rqstp)
-{
- struct kvec *resv = &rqstp->rq_res.head[0];
-
- /* tcp needs a space for the record length... */
- svc_putnl(resv, 0);
-}
-
-/*
* Common routine for processing the RPC request.
*/
static int
@@ -1182,10 +1171,6 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
clear_bit(RQ_DROPME, &rqstp->rq_flags);
- /* Setup reply header */
- if (rqstp->rq_prot == IPPROTO_TCP)
- svc_tcp_prep_reply_hdr(rqstp);
-
svc_putu32(resv, rqstp->rq_xid);
vers = svc_getnl(argv);
@@ -1443,6 +1428,10 @@ svc_process(struct svc_rqst *rqstp)
goto out_drop;
}
+ /* Reserve space for the record marker */
+ if (rqstp->rq_prot == IPPROTO_TCP)
+ svc_putnl(resv, 0);
+
/* Returns 1 for send, 0 for drop */
if (likely(svc_process_common(rqstp, argv, resv)))
return svc_send(rqstp);
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 4eb8fbf2508d..61530b1b7754 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -357,15 +357,29 @@ static void svc_xprt_release_slot(struct svc_rqst *rqstp)
struct svc_xprt *xprt = rqstp->rq_xprt;
if (test_and_clear_bit(RQ_DATA, &rqstp->rq_flags)) {
atomic_dec(&xprt->xpt_nr_rqsts);
+ smp_wmb(); /* See smp_rmb() in svc_xprt_ready() */
svc_xprt_enqueue(xprt);
}
}
-static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
+static bool svc_xprt_ready(struct svc_xprt *xprt)
{
- if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE)))
+ unsigned long xpt_flags;
+
+ /*
+ * If another cpu has recently updated xpt_flags,
+ * sk_sock->flags, xpt_reserved, or xpt_nr_rqsts, we need to
+ * know about it; otherwise it's possible that both that cpu and
+ * this one could call svc_xprt_enqueue() without either
+ * svc_xprt_enqueue() recognizing that the conditions below
+ * are satisfied, and we could stall indefinitely:
+ */
+ smp_rmb();
+ xpt_flags = READ_ONCE(xprt->xpt_flags);
+
+ if (xpt_flags & (BIT(XPT_CONN) | BIT(XPT_CLOSE)))
return true;
- if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED))) {
+ if (xpt_flags & (BIT(XPT_DATA) | BIT(XPT_DEFERRED))) {
if (xprt->xpt_ops->xpo_has_wspace(xprt) &&
svc_xprt_slots_in_range(xprt))
return true;
@@ -381,7 +395,7 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt)
struct svc_rqst *rqstp = NULL;
int cpu;
- if (!svc_xprt_has_something_to_do(xprt))
+ if (!svc_xprt_ready(xprt))
return;
/* Mark transport as busy. It will remain in this state until
@@ -475,7 +489,7 @@ void svc_reserve(struct svc_rqst *rqstp, int space)
if (xprt && space < rqstp->rq_reserved) {
atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved);
rqstp->rq_reserved = space;
-
+ smp_wmb(); /* See smp_rmb() in svc_xprt_ready() */
svc_xprt_enqueue(xprt);
}
}
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index a6a060925e5d..43590a968b73 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -349,12 +349,16 @@ static ssize_t svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov,
/*
* Set socket snd and rcv buffer lengths
*/
-static void svc_sock_setbufsize(struct socket *sock, unsigned int snd,
- unsigned int rcv)
+static void svc_sock_setbufsize(struct svc_sock *svsk, unsigned int nreqs)
{
+ unsigned int max_mesg = svsk->sk_xprt.xpt_server->sv_max_mesg;
+ struct socket *sock = svsk->sk_sock;
+
+ nreqs = min(nreqs, INT_MAX / 2 / max_mesg);
+
lock_sock(sock->sk);
- sock->sk->sk_sndbuf = snd * 2;
- sock->sk->sk_rcvbuf = rcv * 2;
+ sock->sk->sk_sndbuf = nreqs * max_mesg * 2;
+ sock->sk->sk_rcvbuf = nreqs * max_mesg * 2;
sock->sk->sk_write_space(sock->sk);
release_sock(sock->sk);
}
@@ -516,9 +520,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
* provides an upper bound on the number of threads
* which will access the socket.
*/
- svc_sock_setbufsize(svsk->sk_sock,
- (serv->sv_nrthreads+3) * serv->sv_max_mesg,
- (serv->sv_nrthreads+3) * serv->sv_max_mesg);
+ svc_sock_setbufsize(svsk, serv->sv_nrthreads + 3);
clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
skb = NULL;
@@ -681,9 +683,7 @@ static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
* receive and respond to one request.
* svc_udp_recvfrom will re-adjust if necessary
*/
- svc_sock_setbufsize(svsk->sk_sock,
- 3 * svsk->sk_xprt.xpt_server->sv_max_mesg,
- 3 * svsk->sk_xprt.xpt_server->sv_max_mesg);
+ svc_sock_setbufsize(svsk, 3);
/* data might have come in before data_ready set up */
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index f302c6eb8779..aa8177ddcbda 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -16,6 +16,7 @@
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/msg_prot.h>
#include <linux/bvec.h>
+#include <trace/events/sunrpc.h>
/*
* XDR functions for basic NFS types
@@ -162,6 +163,15 @@ xdr_free_bvec(struct xdr_buf *buf)
buf->bvec = NULL;
}
+/**
+ * xdr_inline_pages - Prepare receive buffer for a large reply
+ * @xdr: xdr_buf into which reply will be placed
+ * @offset: expected offset where data payload will start, in bytes
+ * @pages: vector of struct page pointers
+ * @base: offset in first page where receive should start, in bytes
+ * @len: expected size of the upper layer data payload, in bytes
+ *
+ */
void
xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
struct page **pages, unsigned int base, unsigned int len)
@@ -179,6 +189,8 @@ xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
tail->iov_base = buf + offset;
tail->iov_len = buflen - offset;
+ if ((xdr->page_len & 3) == 0)
+ tail->iov_len -= sizeof(__be32);
xdr->buflen += len;
}
@@ -346,13 +358,15 @@ EXPORT_SYMBOL_GPL(_copy_from_pages);
* 'len' bytes. The extra data is not lost, but is instead
* moved into the inlined pages and/or the tail.
*/
-static void
+static unsigned int
xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
{
struct kvec *head, *tail;
size_t copy, offs;
unsigned int pglen = buf->page_len;
+ unsigned int result;
+ result = 0;
tail = buf->tail;
head = buf->head;
@@ -366,6 +380,7 @@ xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
copy = tail->iov_len - len;
memmove((char *)tail->iov_base + len,
tail->iov_base, copy);
+ result += copy;
}
/* Copy from the inlined pages into the tail */
copy = len;
@@ -376,11 +391,13 @@ xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
copy = 0;
else if (copy > tail->iov_len - offs)
copy = tail->iov_len - offs;
- if (copy != 0)
+ if (copy != 0) {
_copy_from_pages((char *)tail->iov_base + offs,
buf->pages,
buf->page_base + pglen + offs - len,
copy);
+ result += copy;
+ }
/* Do we also need to copy data from the head into the tail ? */
if (len > pglen) {
offs = copy = len - pglen;
@@ -390,6 +407,7 @@ xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
(char *)head->iov_base +
head->iov_len - offs,
copy);
+ result += copy;
}
}
/* Now handle pages */
@@ -405,12 +423,15 @@ xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
_copy_to_pages(buf->pages, buf->page_base,
(char *)head->iov_base + head->iov_len - len,
copy);
+ result += copy;
}
head->iov_len -= len;
buf->buflen -= len;
/* Have we truncated the message? */
if (buf->len > buf->buflen)
buf->len = buf->buflen;
+
+ return result;
}
/**
@@ -422,14 +443,16 @@ xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
* 'len' bytes. The extra data is not lost, but is instead
* moved into the tail.
*/
-static void
+static unsigned int
xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
{
struct kvec *tail;
size_t copy;
unsigned int pglen = buf->page_len;
unsigned int tailbuf_len;
+ unsigned int result;
+ result = 0;
tail = buf->tail;
BUG_ON (len > pglen);
@@ -447,18 +470,22 @@ xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
if (tail->iov_len > len) {
char *p = (char *)tail->iov_base + len;
memmove(p, tail->iov_base, tail->iov_len - len);
+ result += tail->iov_len - len;
} else
copy = tail->iov_len;
/* Copy from the inlined pages into the tail */
_copy_from_pages((char *)tail->iov_base,
buf->pages, buf->page_base + pglen - len,
copy);
+ result += copy;
}
buf->page_len -= len;
buf->buflen -= len;
/* Have we truncated the message? */
if (buf->len > buf->buflen)
buf->len = buf->buflen;
+
+ return result;
}
void
@@ -483,6 +510,7 @@ EXPORT_SYMBOL_GPL(xdr_stream_pos);
* @xdr: pointer to xdr_stream struct
* @buf: pointer to XDR buffer in which to encode data
* @p: current pointer inside XDR buffer
+ * @rqst: pointer to controlling rpc_rqst, for debugging
*
* Note: at the moment the RPC client only passes the length of our
* scratch buffer in the xdr_buf's header kvec. Previously this
@@ -491,7 +519,8 @@ EXPORT_SYMBOL_GPL(xdr_stream_pos);
* of the buffer length, and takes care of adjusting the kvec
* length for us.
*/
-void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
+void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
+ struct rpc_rqst *rqst)
{
struct kvec *iov = buf->head;
int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
@@ -513,6 +542,7 @@ void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
buf->len += len;
iov->iov_len += len;
}
+ xdr->rqst = rqst;
}
EXPORT_SYMBOL_GPL(xdr_init_encode);
@@ -551,9 +581,9 @@ static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
int frag1bytes, frag2bytes;
if (nbytes > PAGE_SIZE)
- return NULL; /* Bigger buffers require special handling */
+ goto out_overflow; /* Bigger buffers require special handling */
if (xdr->buf->len + nbytes > xdr->buf->buflen)
- return NULL; /* Sorry, we're totally out of space */
+ goto out_overflow; /* Sorry, we're totally out of space */
frag1bytes = (xdr->end - xdr->p) << 2;
frag2bytes = nbytes - frag1bytes;
if (xdr->iov)
@@ -582,6 +612,9 @@ static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
xdr->buf->page_len += frag2bytes;
xdr->buf->len += nbytes;
return p;
+out_overflow:
+ trace_rpc_xdr_overflow(xdr, nbytes);
+ return NULL;
}
/**
@@ -819,8 +852,10 @@ static bool xdr_set_next_buffer(struct xdr_stream *xdr)
* @xdr: pointer to xdr_stream struct
* @buf: pointer to XDR buffer from which to decode data
* @p: current pointer inside XDR buffer
+ * @rqst: pointer to controlling rpc_rqst, for debugging
*/
-void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
+void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
+ struct rpc_rqst *rqst)
{
xdr->buf = buf;
xdr->scratch.iov_base = NULL;
@@ -836,6 +871,7 @@ void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
xdr->nwords -= p - xdr->p;
xdr->p = p;
}
+ xdr->rqst = rqst;
}
EXPORT_SYMBOL_GPL(xdr_init_decode);
@@ -854,7 +890,7 @@ void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
buf->page_len = len;
buf->buflen = len;
buf->len = len;
- xdr_init_decode(xdr, buf, NULL);
+ xdr_init_decode(xdr, buf, NULL, NULL);
}
EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
@@ -896,20 +932,23 @@ static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
size_t cplen = (char *)xdr->end - (char *)xdr->p;
if (nbytes > xdr->scratch.iov_len)
- return NULL;
+ goto out_overflow;
p = __xdr_inline_decode(xdr, cplen);
if (p == NULL)
return NULL;
memcpy(cpdest, p, cplen);
+ if (!xdr_set_next_buffer(xdr))
+ goto out_overflow;
cpdest += cplen;
nbytes -= cplen;
- if (!xdr_set_next_buffer(xdr))
- return NULL;
p = __xdr_inline_decode(xdr, nbytes);
if (p == NULL)
return NULL;
memcpy(cpdest, p, nbytes);
return xdr->scratch.iov_base;
+out_overflow:
+ trace_rpc_xdr_overflow(xdr, nbytes);
+ return NULL;
}
/**
@@ -926,14 +965,17 @@ __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
{
__be32 *p;
- if (nbytes == 0)
+ if (unlikely(nbytes == 0))
return xdr->p;
if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
- return NULL;
+ goto out_overflow;
p = __xdr_inline_decode(xdr, nbytes);
if (p != NULL)
return p;
return xdr_copy_to_scratch(xdr, nbytes);
+out_overflow:
+ trace_rpc_xdr_overflow(xdr, nbytes);
+ return NULL;
}
EXPORT_SYMBOL_GPL(xdr_inline_decode);
@@ -943,13 +985,17 @@ static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len)
struct kvec *iov;
unsigned int nwords = XDR_QUADLEN(len);
unsigned int cur = xdr_stream_pos(xdr);
+ unsigned int copied, offset;
if (xdr->nwords == 0)
return 0;
+
/* Realign pages to current pointer position */
- iov = buf->head;
+ iov = buf->head;
if (iov->iov_len > cur) {
- xdr_shrink_bufhead(buf, iov->iov_len - cur);
+ offset = iov->iov_len - cur;
+ copied = xdr_shrink_bufhead(buf, offset);
+ trace_rpc_xdr_alignment(xdr, offset, copied);
xdr->nwords = XDR_QUADLEN(buf->len - cur);
}
@@ -961,7 +1007,9 @@ static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len)
len = buf->page_len;
else if (nwords < xdr->nwords) {
/* Truncate page data and move it into the tail */
- xdr_shrink_pagelen(buf, buf->page_len - len);
+ offset = buf->page_len - len;
+ copied = xdr_shrink_pagelen(buf, offset);
+ trace_rpc_xdr_alignment(xdr, offset, copied);
xdr->nwords = XDR_QUADLEN(buf->len - cur);
}
return len;
@@ -1102,47 +1150,6 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
}
EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
-/**
- * xdr_buf_trim - lop at most "len" bytes off the end of "buf"
- * @buf: buf to be trimmed
- * @len: number of bytes to reduce "buf" by
- *
- * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note
- * that it's possible that we'll trim less than that amount if the xdr_buf is
- * too small, or if (for instance) it's all in the head and the parser has
- * already read too far into it.
- */
-void xdr_buf_trim(struct xdr_buf *buf, unsigned int len)
-{
- size_t cur;
- unsigned int trim = len;
-
- if (buf->tail[0].iov_len) {
- cur = min_t(size_t, buf->tail[0].iov_len, trim);
- buf->tail[0].iov_len -= cur;
- trim -= cur;
- if (!trim)
- goto fix_len;
- }
-
- if (buf->page_len) {
- cur = min_t(unsigned int, buf->page_len, trim);
- buf->page_len -= cur;
- trim -= cur;
- if (!trim)
- goto fix_len;
- }
-
- if (buf->head[0].iov_len) {
- cur = min_t(size_t, buf->head[0].iov_len, trim);
- buf->head[0].iov_len -= cur;
- trim -= cur;
- }
-fix_len:
- buf->len -= (len - trim);
-}
-EXPORT_SYMBOL_GPL(xdr_buf_trim);
-
static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
{
unsigned int this_len;
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index f1ec2110efeb..d7117d241460 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -49,6 +49,7 @@
#include <linux/sunrpc/metrics.h>
#include <linux/sunrpc/bc_xprt.h>
#include <linux/rcupdate.h>
+#include <linux/sched/mm.h>
#include <trace/events/sunrpc.h>
@@ -643,11 +644,13 @@ static void xprt_autoclose(struct work_struct *work)
{
struct rpc_xprt *xprt =
container_of(work, struct rpc_xprt, task_cleanup);
+ unsigned int pflags = memalloc_nofs_save();
clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
xprt->ops->close(xprt);
xprt_release_write(xprt, NULL);
wake_up_bit(&xprt->state, XPRT_LOCKED);
+ memalloc_nofs_restore(pflags);
}
/**
@@ -661,7 +664,7 @@ void xprt_disconnect_done(struct rpc_xprt *xprt)
spin_lock_bh(&xprt->transport_lock);
xprt_clear_connected(xprt);
xprt_clear_write_space_locked(xprt);
- xprt_wake_pending_tasks(xprt, -EAGAIN);
+ xprt_wake_pending_tasks(xprt, -ENOTCONN);
spin_unlock_bh(&xprt->transport_lock);
}
EXPORT_SYMBOL_GPL(xprt_disconnect_done);
@@ -1165,6 +1168,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
/* Note: req is added _before_ pos */
list_add_tail(&req->rq_xmit, &pos->rq_xmit);
INIT_LIST_HEAD(&req->rq_xmit2);
+ trace_xprt_enq_xmit(task, 1);
goto out;
}
} else if (RPC_IS_SWAPPER(task)) {
@@ -1176,6 +1180,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
/* Note: req is added _before_ pos */
list_add_tail(&req->rq_xmit, &pos->rq_xmit);
INIT_LIST_HEAD(&req->rq_xmit2);
+ trace_xprt_enq_xmit(task, 2);
goto out;
}
} else if (!req->rq_seqno) {
@@ -1184,11 +1189,13 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
continue;
list_add_tail(&req->rq_xmit2, &pos->rq_xmit2);
INIT_LIST_HEAD(&req->rq_xmit);
+ trace_xprt_enq_xmit(task, 3);
goto out;
}
}
list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
INIT_LIST_HEAD(&req->rq_xmit2);
+ trace_xprt_enq_xmit(task, 4);
out:
set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
spin_unlock(&xprt->queue_lock);
@@ -1313,8 +1320,6 @@ xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
int is_retrans = RPC_WAS_SENT(task);
int status;
- dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
-
if (!req->rq_bytes_sent) {
if (xprt_request_data_received(task)) {
status = 0;
@@ -1325,6 +1330,13 @@ xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
status = -EBADMSG;
goto out_dequeue;
}
+ if (task->tk_ops->rpc_call_prepare_transmit) {
+ task->tk_ops->rpc_call_prepare_transmit(task,
+ task->tk_calldata);
+ status = task->tk_status;
+ if (status < 0)
+ goto out_dequeue;
+ }
}
/*
@@ -1336,9 +1348,9 @@ xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
connect_cookie = xprt->connect_cookie;
status = xprt->ops->send_request(req);
- trace_xprt_transmit(xprt, req->rq_xid, status);
if (status != 0) {
req->rq_ntrans--;
+ trace_xprt_transmit(req, status);
return status;
}
@@ -1347,7 +1359,6 @@ xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
xprt_inject_disconnect(xprt);
- dprintk("RPC: %5u xmit complete\n", task->tk_pid);
task->tk_flags |= RPC_TASK_SENT;
spin_lock_bh(&xprt->transport_lock);
@@ -1360,6 +1371,7 @@ xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
req->rq_connect_cookie = connect_cookie;
out_dequeue:
+ trace_xprt_transmit(req, status);
xprt_request_dequeue_transmit(task);
rpc_wake_up_queued_task_set_status(&xprt->sending, task, status);
return status;
@@ -1599,7 +1611,6 @@ xprt_request_init(struct rpc_task *task)
req->rq_buffer = NULL;
req->rq_xid = xprt_alloc_xid(xprt);
xprt_init_connect_cookie(req, xprt);
- req->rq_bytes_sent = 0;
req->rq_snd_buf.len = 0;
req->rq_snd_buf.buflen = 0;
req->rq_rcv_buf.len = 0;
@@ -1721,6 +1732,7 @@ void xprt_release(struct rpc_task *task)
xprt->ops->buf_free(task);
xprt_inject_disconnect(xprt);
xdr_free_bvec(&req->rq_rcv_buf);
+ xdr_free_bvec(&req->rq_snd_buf);
if (req->rq_cred != NULL)
put_rpccred(req->rq_cred);
task->tk_rqstp = NULL;
@@ -1749,7 +1761,6 @@ xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task)
*/
xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
xbufp->tail[0].iov_len;
- req->rq_bytes_sent = 0;
}
#endif
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c
index 0de9b3e63770..d79b18c1f4cd 100644
--- a/net/sunrpc/xprtrdma/backchannel.c
+++ b/net/sunrpc/xprtrdma/backchannel.c
@@ -123,7 +123,7 @@ static int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
xdr_init_encode(&req->rl_stream, &req->rl_hdrbuf,
- req->rl_rdmabuf->rg_base);
+ req->rl_rdmabuf->rg_base, rqst);
p = xdr_reserve_space(&req->rl_stream, 28);
if (unlikely(!p))
@@ -267,7 +267,6 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
/* Prepare rqst */
rqst->rq_reply_bytes_recvd = 0;
- rqst->rq_bytes_sent = 0;
rqst->rq_xid = *p;
rqst->rq_private_buf.len = size;
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index 6a561056b538..52cb6c1b0c2b 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -391,7 +391,7 @@ frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
*/
struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_mr_seg *seg,
- int nsegs, bool writing, u32 xid,
+ int nsegs, bool writing, __be32 xid,
struct rpcrdma_mr **out)
{
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
@@ -446,7 +446,7 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
goto out_mapmr_err;
ibmr->iova &= 0x00000000ffffffff;
- ibmr->iova |= ((u64)cpu_to_be32(xid)) << 32;
+ ibmr->iova |= ((u64)be32_to_cpu(xid)) << 32;
key = (u8)(ibmr->rkey & 0x000000FF);
ib_update_fast_reg_key(ibmr, ++key);
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index d18614e02b4e..6c1fb270f127 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -164,6 +164,21 @@ static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
return rqst->rq_rcv_buf.buflen <= ia->ri_max_inline_read;
}
+/* The client is required to provide a Reply chunk if the maximum
+ * size of the non-payload part of the RPC Reply is larger than
+ * the inline threshold.
+ */
+static bool
+rpcrdma_nonpayload_inline(const struct rpcrdma_xprt *r_xprt,
+ const struct rpc_rqst *rqst)
+{
+ const struct xdr_buf *buf = &rqst->rq_rcv_buf;
+ const struct rpcrdma_ia *ia = &r_xprt->rx_ia;
+
+ return buf->head[0].iov_len + buf->tail[0].iov_len <
+ ia->ri_max_inline_read;
+}
+
/* Split @vec on page boundaries into SGEs. FMR registers pages, not
* a byte range. Other modes coalesce these SGEs into a single MR
* when they can.
@@ -733,7 +748,7 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
xdr_init_encode(xdr, &req->rl_hdrbuf,
- req->rl_rdmabuf->rg_base);
+ req->rl_rdmabuf->rg_base, rqst);
/* Fixed header fields */
ret = -EMSGSIZE;
@@ -762,7 +777,8 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
*/
if (rpcrdma_results_inline(r_xprt, rqst))
wtype = rpcrdma_noch;
- else if (ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ)
+ else if ((ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ) &&
+ rpcrdma_nonpayload_inline(r_xprt, rqst))
wtype = rpcrdma_writech;
else
wtype = rpcrdma_replych;
@@ -1313,7 +1329,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
/* Fixed transport header fields */
xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf,
- rep->rr_hdrbuf.head[0].iov_base);
+ rep->rr_hdrbuf.head[0].iov_base, NULL);
p = xdr_inline_decode(&rep->rr_stream, 4 * sizeof(*p));
if (unlikely(!p))
goto out_shortreply;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
index b908f2ca08fd..907464c2a9f0 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
@@ -304,7 +304,6 @@ xprt_setup_rdma_bc(struct xprt_create *args)
xprt->idle_timeout = RPCRDMA_IDLE_DISC_TO;
xprt->prot = XPRT_TRANSPORT_BC_RDMA;
- xprt->tsh_size = 0;
xprt->ops = &xprt_rdma_bc_procs;
memcpy(&xprt->addr, args->dstaddr, args->addrlen);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 828b149eaaef..65e2fb9aac65 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -272,11 +272,8 @@ bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma)
return false;
ctxt->rc_temp = true;
ret = __svc_rdma_post_recv(rdma, ctxt);
- if (ret) {
- pr_err("svcrdma: failure posting recv buffers: %d\n",
- ret);
+ if (ret)
return false;
- }
}
return true;
}
@@ -314,17 +311,14 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
spin_lock(&rdma->sc_rq_dto_lock);
list_add_tail(&ctxt->rc_list, &rdma->sc_rq_dto_q);
- spin_unlock(&rdma->sc_rq_dto_lock);
+ /* Note the unlock pairs with the smp_rmb in svc_xprt_ready: */
set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
+ spin_unlock(&rdma->sc_rq_dto_lock);
if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags))
svc_xprt_enqueue(&rdma->sc_xprt);
goto out;
flushed:
- if (wc->status != IB_WC_WR_FLUSH_ERR)
- pr_err("svcrdma: Recv: %s (%u/0x%x)\n",
- ib_wc_status_msg(wc->status),
- wc->status, wc->vendor_err);
post_err:
svc_rdma_recv_ctxt_put(rdma, ctxt);
set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c
index dc1951759a8e..2121c9b4d275 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_rw.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c
@@ -64,8 +64,7 @@ svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
spin_unlock(&rdma->sc_rw_ctxt_lock);
} else {
spin_unlock(&rdma->sc_rw_ctxt_lock);
- ctxt = kmalloc(sizeof(*ctxt) +
- SG_CHUNK_SIZE * sizeof(struct scatterlist),
+ ctxt = kmalloc(struct_size(ctxt, rw_first_sgl, SG_CHUNK_SIZE),
GFP_KERNEL);
if (!ctxt)
goto out;
@@ -213,13 +212,8 @@ static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
wake_up(&rdma->sc_send_wait);
- if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (unlikely(wc->status != IB_WC_SUCCESS))
set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
- if (wc->status != IB_WC_WR_FLUSH_ERR)
- pr_err("svcrdma: write ctx: %s (%u/0x%x)\n",
- ib_wc_status_msg(wc->status),
- wc->status, wc->vendor_err);
- }
svc_rdma_write_info_free(info);
}
@@ -278,18 +272,15 @@ static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc)
if (unlikely(wc->status != IB_WC_SUCCESS)) {
set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
- if (wc->status != IB_WC_WR_FLUSH_ERR)
- pr_err("svcrdma: read ctx: %s (%u/0x%x)\n",
- ib_wc_status_msg(wc->status),
- wc->status, wc->vendor_err);
svc_rdma_recv_ctxt_put(rdma, info->ri_readctxt);
} else {
spin_lock(&rdma->sc_rq_dto_lock);
list_add_tail(&info->ri_readctxt->rc_list,
&rdma->sc_read_complete_q);
+ /* Note the unlock pairs with the smp_rmb in svc_xprt_ready: */
+ set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
spin_unlock(&rdma->sc_rq_dto_lock);
- set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
svc_xprt_enqueue(&rdma->sc_xprt);
}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 1f200119268c..6fdba72f89f4 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -272,10 +272,6 @@ static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
if (unlikely(wc->status != IB_WC_SUCCESS)) {
set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
svc_xprt_enqueue(&rdma->sc_xprt);
- if (wc->status != IB_WC_WR_FLUSH_ERR)
- pr_err("svcrdma: Send: %s (%u/0x%x)\n",
- ib_wc_status_msg(wc->status),
- wc->status, wc->vendor_err);
}
svc_xprt_put(&rdma->sc_xprt);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 57f86c63a463..027a3b07d329 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -390,8 +390,8 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
struct ib_qp_init_attr qp_attr;
unsigned int ctxts, rq_depth;
struct ib_device *dev;
- struct sockaddr *sap;
int ret = 0;
+ RPC_IFDEBUG(struct sockaddr *sap);
listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt);
clear_bit(XPT_CONN, &xprt->xpt_flags);
@@ -525,6 +525,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
if (ret)
goto errout;
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
dprintk("svcrdma: new connection %p accepted:\n", newxprt);
sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
dprintk(" local address : %pIS:%u\n", sap, rpc_get_port(sap));
@@ -535,6 +536,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
dprintk(" rdma_rw_ctxs : %d\n", ctxts);
dprintk(" max_requests : %d\n", newxprt->sc_max_requests);
dprintk(" ord : %d\n", conn_param.initiator_depth);
+#endif
trace_svcrdma_xprt_accept(&newxprt->sc_xprt);
return &newxprt->sc_xprt;
@@ -588,11 +590,6 @@ static void __svc_rdma_free(struct work_struct *work)
if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
ib_drain_qp(rdma->sc_qp);
- /* We should only be called from kref_put */
- if (kref_read(&xprt->xpt_ref) != 0)
- pr_err("svcrdma: sc_xprt still in use? (%d)\n",
- kref_read(&xprt->xpt_ref));
-
svc_rdma_flush_recv_queues(rdma);
/* Final put of backchannel client transport */
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index fbc171ebfe91..5d261353bd90 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -332,7 +332,6 @@ xprt_setup_rdma(struct xprt_create *args)
xprt->idle_timeout = RPCRDMA_IDLE_DISC_TO;
xprt->resvport = 0; /* privileged port not needed */
- xprt->tsh_size = 0; /* RPC-RDMA handles framing */
xprt->ops = &xprt_rdma_procs;
/*
@@ -738,7 +737,6 @@ xprt_rdma_send_request(struct rpc_rqst *rqst)
goto drop_connection;
rqst->rq_xmit_bytes_sent += rqst->rq_snd_buf.len;
- rqst->rq_bytes_sent = 0;
/* An RPC with no reply will throw off credit accounting,
* so drop the connection to reset the credit grant.
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 21113bfd4eca..30cfc0efe699 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -90,7 +90,7 @@ static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
/* Flush Receives, then wait for deferred Reply work
* to complete.
*/
- ib_drain_qp(ia->ri_id->qp);
+ ib_drain_rq(ia->ri_id->qp);
drain_workqueue(buf->rb_completion_wq);
/* Deferred Reply processing might have scheduled
@@ -1481,6 +1481,8 @@ rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
if (ep->rep_receive_count > needed)
goto out;
needed -= ep->rep_receive_count;
+ if (!temp)
+ needed += RPCRDMA_MAX_RECV_BATCH;
count = 0;
wr = NULL;
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 5a18472f2c9c..10f6593e1a6a 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -205,6 +205,16 @@ struct rpcrdma_rep {
struct ib_recv_wr rr_recv_wr;
};
+/* To reduce the rate at which a transport invokes ib_post_recv
+ * (and thus the hardware doorbell rate), xprtrdma posts Receive
+ * WRs in batches.
+ *
+ * Setting this to zero disables Receive post batching.
+ */
+enum {
+ RPCRDMA_MAX_RECV_BATCH = 7,
+};
+
/* struct rpcrdma_sendctx - DMA mapped SGEs to unmap after Send completes
*/
struct rpcrdma_req;
@@ -577,7 +587,7 @@ void frwr_release_mr(struct rpcrdma_mr *mr);
size_t frwr_maxpages(struct rpcrdma_xprt *r_xprt);
struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_mr_seg *seg,
- int nsegs, bool writing, u32 xid,
+ int nsegs, bool writing, __be32 xid,
struct rpcrdma_mr **mr);
int frwr_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req);
void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 7754aa3e434f..732d4b57411a 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -50,6 +50,7 @@
#include <linux/bvec.h>
#include <linux/highmem.h>
#include <linux/uio.h>
+#include <linux/sched/mm.h>
#include <trace/events/sunrpc.h>
@@ -404,8 +405,8 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
size_t want, seek_init = seek, offset = 0;
ssize_t ret;
- if (seek < buf->head[0].iov_len) {
- want = min_t(size_t, count, buf->head[0].iov_len);
+ want = min_t(size_t, count, buf->head[0].iov_len);
+ if (seek < want) {
ret = xs_read_kvec(sock, msg, flags, &buf->head[0], want, seek);
if (ret <= 0)
goto sock_err;
@@ -416,13 +417,13 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
goto out;
seek = 0;
} else {
- seek -= buf->head[0].iov_len;
- offset += buf->head[0].iov_len;
+ seek -= want;
+ offset += want;
}
want = xs_alloc_sparse_pages(buf,
min_t(size_t, count - offset, buf->page_len),
- GFP_NOWAIT);
+ GFP_KERNEL);
if (seek < want) {
ret = xs_read_bvec(sock, msg, flags, buf->bvec,
xdr_buf_pagecount(buf),
@@ -442,8 +443,8 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
offset += want;
}
- if (seek < buf->tail[0].iov_len) {
- want = min_t(size_t, count - offset, buf->tail[0].iov_len);
+ want = min_t(size_t, count - offset, buf->tail[0].iov_len);
+ if (seek < want) {
ret = xs_read_kvec(sock, msg, flags, &buf->tail[0], want, seek);
if (ret <= 0)
goto sock_err;
@@ -452,8 +453,8 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
goto out;
if (ret != want)
goto out;
- } else
- offset += buf->tail[0].iov_len;
+ } else if (offset < seek_init)
+ offset = seek_init;
ret = -EMSGSIZE;
out:
*read = offset - seek_init;
@@ -481,28 +482,40 @@ xs_read_stream_request_done(struct sock_xprt *transport)
return transport->recv.fraghdr & cpu_to_be32(RPC_LAST_STREAM_FRAGMENT);
}
+static void
+xs_read_stream_check_eor(struct sock_xprt *transport,
+ struct msghdr *msg)
+{
+ if (xs_read_stream_request_done(transport))
+ msg->msg_flags |= MSG_EOR;
+}
+
static ssize_t
xs_read_stream_request(struct sock_xprt *transport, struct msghdr *msg,
int flags, struct rpc_rqst *req)
{
struct xdr_buf *buf = &req->rq_private_buf;
- size_t want, read;
- ssize_t ret;
+ size_t want, uninitialized_var(read);
+ ssize_t uninitialized_var(ret);
xs_read_header(transport, buf);
want = transport->recv.len - transport->recv.offset;
- ret = xs_read_xdr_buf(transport->sock, msg, flags, buf,
- transport->recv.copied + want, transport->recv.copied,
- &read);
- transport->recv.offset += read;
- transport->recv.copied += read;
- if (transport->recv.offset == transport->recv.len) {
- if (xs_read_stream_request_done(transport))
- msg->msg_flags |= MSG_EOR;
- return read;
+ if (want != 0) {
+ ret = xs_read_xdr_buf(transport->sock, msg, flags, buf,
+ transport->recv.copied + want,
+ transport->recv.copied,
+ &read);
+ transport->recv.offset += read;
+ transport->recv.copied += read;
}
+ if (transport->recv.offset == transport->recv.len)
+ xs_read_stream_check_eor(transport, msg);
+
+ if (want == 0)
+ return 0;
+
switch (ret) {
default:
break;
@@ -655,13 +668,35 @@ out_err:
return ret != 0 ? ret : -ESHUTDOWN;
}
+static __poll_t xs_poll_socket(struct sock_xprt *transport)
+{
+ return transport->sock->ops->poll(transport->file, transport->sock,
+ NULL);
+}
+
+static bool xs_poll_socket_readable(struct sock_xprt *transport)
+{
+ __poll_t events = xs_poll_socket(transport);
+
+ return (events & (EPOLLIN | EPOLLRDNORM)) && !(events & EPOLLRDHUP);
+}
+
+static void xs_poll_check_readable(struct sock_xprt *transport)
+{
+
+ clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
+ if (!xs_poll_socket_readable(transport))
+ return;
+ if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
+ queue_work(xprtiod_workqueue, &transport->recv_worker);
+}
+
static void xs_stream_data_receive(struct sock_xprt *transport)
{
size_t read = 0;
ssize_t ret = 0;
mutex_lock(&transport->recv_mutex);
- clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
if (transport->sock == NULL)
goto out;
for (;;) {
@@ -671,6 +706,10 @@ static void xs_stream_data_receive(struct sock_xprt *transport)
read += ret;
cond_resched();
}
+ if (ret == -ESHUTDOWN)
+ kernel_sock_shutdown(transport->sock, SHUT_RDWR);
+ else
+ xs_poll_check_readable(transport);
out:
mutex_unlock(&transport->recv_mutex);
trace_xs_stream_read_data(&transport->xprt, ret, read);
@@ -680,7 +719,10 @@ static void xs_stream_data_receive_workfn(struct work_struct *work)
{
struct sock_xprt *transport =
container_of(work, struct sock_xprt, recv_worker);
+ unsigned int pflags = memalloc_nofs_save();
+
xs_stream_data_receive(transport);
+ memalloc_nofs_restore(pflags);
}
static void
@@ -690,65 +732,65 @@ xs_stream_reset_connect(struct sock_xprt *transport)
transport->recv.len = 0;
transport->recv.copied = 0;
transport->xmit.offset = 0;
+}
+
+static void
+xs_stream_start_connect(struct sock_xprt *transport)
+{
transport->xprt.stat.connect_count++;
transport->xprt.stat.connect_start = jiffies;
}
#define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL)
-static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen, struct kvec *vec, unsigned int base, int more)
+static int xs_sendmsg(struct socket *sock, struct msghdr *msg, size_t seek)
{
- struct msghdr msg = {
- .msg_name = addr,
- .msg_namelen = addrlen,
- .msg_flags = XS_SENDMSG_FLAGS | (more ? MSG_MORE : 0),
- };
- struct kvec iov = {
- .iov_base = vec->iov_base + base,
- .iov_len = vec->iov_len - base,
- };
+ if (seek)
+ iov_iter_advance(&msg->msg_iter, seek);
+ return sock_sendmsg(sock, msg);
+}
- if (iov.iov_len != 0)
- return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
- return kernel_sendmsg(sock, &msg, NULL, 0, 0);
+static int xs_send_kvec(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t seek)
+{
+ iov_iter_kvec(&msg->msg_iter, WRITE, vec, 1, vec->iov_len);
+ return xs_sendmsg(sock, msg, seek);
}
-static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more, bool zerocopy, int *sent_p)
+static int xs_send_pagedata(struct socket *sock, struct msghdr *msg, struct xdr_buf *xdr, size_t base)
{
- ssize_t (*do_sendpage)(struct socket *sock, struct page *page,
- int offset, size_t size, int flags);
- struct page **ppage;
- unsigned int remainder;
int err;
- remainder = xdr->page_len - base;
- base += xdr->page_base;
- ppage = xdr->pages + (base >> PAGE_SHIFT);
- base &= ~PAGE_MASK;
- do_sendpage = sock->ops->sendpage;
- if (!zerocopy)
- do_sendpage = sock_no_sendpage;
- for(;;) {
- unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder);
- int flags = XS_SENDMSG_FLAGS;
+ err = xdr_alloc_bvec(xdr, GFP_KERNEL);
+ if (err < 0)
+ return err;
- remainder -= len;
- if (more)
- flags |= MSG_MORE;
- if (remainder != 0)
- flags |= MSG_SENDPAGE_NOTLAST | MSG_MORE;
- err = do_sendpage(sock, *ppage, base, len, flags);
- if (remainder == 0 || err != len)
- break;
- *sent_p += err;
- ppage++;
- base = 0;
- }
- if (err > 0) {
- *sent_p += err;
- err = 0;
- }
- return err;
+ iov_iter_bvec(&msg->msg_iter, WRITE, xdr->bvec,
+ xdr_buf_pagecount(xdr),
+ xdr->page_len + xdr->page_base);
+ return xs_sendmsg(sock, msg, base + xdr->page_base);
+}
+
+#define xs_record_marker_len() sizeof(rpc_fraghdr)
+
+/* Common case:
+ * - stream transport
+ * - sending from byte 0 of the message
+ * - the message is wholly contained in @xdr's head iovec
+ */
+static int xs_send_rm_and_kvec(struct socket *sock, struct msghdr *msg,
+ rpc_fraghdr marker, struct kvec *vec, size_t base)
+{
+ struct kvec iov[2] = {
+ [0] = {
+ .iov_base = &marker,
+ .iov_len = sizeof(marker)
+ },
+ [1] = *vec,
+ };
+ size_t len = iov[0].iov_len + iov[1].iov_len;
+
+ iov_iter_kvec(&msg->msg_iter, WRITE, iov, 2, len);
+ return xs_sendmsg(sock, msg, base);
}
/**
@@ -758,49 +800,60 @@ static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned i
* @addrlen: UDP only -- length of destination address
* @xdr: buffer containing this request
* @base: starting position in the buffer
- * @zerocopy: true if it is safe to use sendpage()
+ * @rm: stream record marker field
* @sent_p: return the total number of bytes successfully queued for sending
*
*/
-static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, bool zerocopy, int *sent_p)
+static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, rpc_fraghdr rm, int *sent_p)
{
- unsigned int remainder = xdr->len - base;
+ struct msghdr msg = {
+ .msg_name = addr,
+ .msg_namelen = addrlen,
+ .msg_flags = XS_SENDMSG_FLAGS | MSG_MORE,
+ };
+ unsigned int rmsize = rm ? sizeof(rm) : 0;
+ unsigned int remainder = rmsize + xdr->len - base;
+ unsigned int want;
int err = 0;
- int sent = 0;
if (unlikely(!sock))
return -ENOTSOCK;
- if (base != 0) {
- addr = NULL;
- addrlen = 0;
- }
-
- if (base < xdr->head[0].iov_len || addr != NULL) {
- unsigned int len = xdr->head[0].iov_len - base;
+ want = xdr->head[0].iov_len + rmsize;
+ if (base < want) {
+ unsigned int len = want - base;
remainder -= len;
- err = xs_send_kvec(sock, addr, addrlen, &xdr->head[0], base, remainder != 0);
+ if (remainder == 0)
+ msg.msg_flags &= ~MSG_MORE;
+ if (rmsize)
+ err = xs_send_rm_and_kvec(sock, &msg, rm,
+ &xdr->head[0], base);
+ else
+ err = xs_send_kvec(sock, &msg, &xdr->head[0], base);
if (remainder == 0 || err != len)
goto out;
*sent_p += err;
base = 0;
} else
- base -= xdr->head[0].iov_len;
+ base -= want;
if (base < xdr->page_len) {
unsigned int len = xdr->page_len - base;
remainder -= len;
- err = xs_send_pagedata(sock, xdr, base, remainder != 0, zerocopy, &sent);
- *sent_p += sent;
- if (remainder == 0 || sent != len)
+ if (remainder == 0)
+ msg.msg_flags &= ~MSG_MORE;
+ err = xs_send_pagedata(sock, &msg, xdr, base);
+ if (remainder == 0 || err != len)
goto out;
+ *sent_p += err;
base = 0;
} else
base -= xdr->page_len;
if (base >= xdr->tail[0].iov_len)
return 0;
- err = xs_send_kvec(sock, NULL, 0, &xdr->tail[0], base, 0);
+ msg.msg_flags &= ~MSG_MORE;
+ err = xs_send_kvec(sock, &msg, &xdr->tail[0], base);
out:
if (err > 0) {
*sent_p += err;
@@ -856,7 +909,7 @@ static int xs_nospace(struct rpc_rqst *req)
static void
xs_stream_prepare_request(struct rpc_rqst *req)
{
- req->rq_task->tk_status = xdr_alloc_bvec(&req->rq_rcv_buf, GFP_NOIO);
+ req->rq_task->tk_status = xdr_alloc_bvec(&req->rq_rcv_buf, GFP_KERNEL);
}
/*
@@ -870,13 +923,14 @@ xs_send_request_was_aborted(struct sock_xprt *transport, struct rpc_rqst *req)
}
/*
- * Construct a stream transport record marker in @buf.
+ * Return the stream record marker field for a record of length < 2^31-1
*/
-static inline void xs_encode_stream_record_marker(struct xdr_buf *buf)
+static rpc_fraghdr
+xs_stream_record_marker(struct xdr_buf *xdr)
{
- u32 reclen = buf->len - sizeof(rpc_fraghdr);
- rpc_fraghdr *base = buf->head[0].iov_base;
- *base = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | reclen);
+ if (!xdr->len)
+ return 0;
+ return cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | (u32)xdr->len);
}
/**
@@ -905,15 +959,14 @@ static int xs_local_send_request(struct rpc_rqst *req)
return -ENOTCONN;
}
- xs_encode_stream_record_marker(&req->rq_snd_buf);
-
xs_pktdump("packet data:",
req->rq_svec->iov_base, req->rq_svec->iov_len);
req->rq_xtime = ktime_get();
status = xs_sendpages(transport->sock, NULL, 0, xdr,
transport->xmit.offset,
- true, &sent);
+ xs_stream_record_marker(xdr),
+ &sent);
dprintk("RPC: %s(%u) = %d\n",
__func__, xdr->len - transport->xmit.offset, status);
@@ -925,7 +978,6 @@ static int xs_local_send_request(struct rpc_rqst *req)
req->rq_bytes_sent = transport->xmit.offset;
if (likely(req->rq_bytes_sent >= req->rq_slen)) {
req->rq_xmit_bytes_sent += transport->xmit.offset;
- req->rq_bytes_sent = 0;
transport->xmit.offset = 0;
return 0;
}
@@ -981,7 +1033,7 @@ static int xs_udp_send_request(struct rpc_rqst *req)
req->rq_xtime = ktime_get();
status = xs_sendpages(transport->sock, xs_addr(xprt), xprt->addrlen,
- xdr, 0, true, &sent);
+ xdr, 0, 0, &sent);
dprintk("RPC: xs_udp_send_request(%u) = %d\n",
xdr->len, status);
@@ -1045,7 +1097,6 @@ static int xs_tcp_send_request(struct rpc_rqst *req)
struct rpc_xprt *xprt = req->rq_xprt;
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
struct xdr_buf *xdr = &req->rq_snd_buf;
- bool zerocopy = true;
bool vm_wait = false;
int status;
int sent;
@@ -1057,17 +1108,9 @@ static int xs_tcp_send_request(struct rpc_rqst *req)
return -ENOTCONN;
}
- xs_encode_stream_record_marker(&req->rq_snd_buf);
-
xs_pktdump("packet data:",
req->rq_svec->iov_base,
req->rq_svec->iov_len);
- /* Don't use zero copy if this is a resend. If the RPC call
- * completes while the socket holds a reference to the pages,
- * then we may end up resending corrupted data.
- */
- if (req->rq_task->tk_flags & RPC_TASK_SENT)
- zerocopy = false;
if (test_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state))
xs_tcp_set_socket_timeouts(xprt, transport->sock);
@@ -1080,7 +1123,8 @@ static int xs_tcp_send_request(struct rpc_rqst *req)
sent = 0;
status = xs_sendpages(transport->sock, NULL, 0, xdr,
transport->xmit.offset,
- zerocopy, &sent);
+ xs_stream_record_marker(xdr),
+ &sent);
dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
xdr->len - transport->xmit.offset, status);
@@ -1091,7 +1135,6 @@ static int xs_tcp_send_request(struct rpc_rqst *req)
req->rq_bytes_sent = transport->xmit.offset;
if (likely(req->rq_bytes_sent >= req->rq_slen)) {
req->rq_xmit_bytes_sent += transport->xmit.offset;
- req->rq_bytes_sent = 0;
transport->xmit.offset = 0;
return 0;
}
@@ -1211,6 +1254,7 @@ static void xs_reset_transport(struct sock_xprt *transport)
struct socket *sock = transport->sock;
struct sock *sk = transport->inet;
struct rpc_xprt *xprt = &transport->xprt;
+ struct file *filp = transport->file;
if (sk == NULL)
return;
@@ -1224,6 +1268,7 @@ static void xs_reset_transport(struct sock_xprt *transport)
write_lock_bh(&sk->sk_callback_lock);
transport->inet = NULL;
transport->sock = NULL;
+ transport->file = NULL;
sk->sk_user_data = NULL;
@@ -1231,10 +1276,12 @@ static void xs_reset_transport(struct sock_xprt *transport)
xprt_clear_connected(xprt);
write_unlock_bh(&sk->sk_callback_lock);
xs_sock_reset_connection_flags(xprt);
+ /* Reset stream record info */
+ xs_stream_reset_connect(transport);
mutex_unlock(&transport->recv_mutex);
trace_rpc_socket_close(xprt, sock);
- sock_release(sock);
+ fput(filp);
xprt_disconnect_done(xprt);
}
@@ -1358,7 +1405,6 @@ static void xs_udp_data_receive(struct sock_xprt *transport)
int err;
mutex_lock(&transport->recv_mutex);
- clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
sk = transport->inet;
if (sk == NULL)
goto out;
@@ -1370,6 +1416,7 @@ static void xs_udp_data_receive(struct sock_xprt *transport)
consume_skb(skb);
cond_resched();
}
+ xs_poll_check_readable(transport);
out:
mutex_unlock(&transport->recv_mutex);
}
@@ -1378,7 +1425,10 @@ static void xs_udp_data_receive_workfn(struct work_struct *work)
{
struct sock_xprt *transport =
container_of(work, struct sock_xprt, recv_worker);
+ unsigned int pflags = memalloc_nofs_save();
+
xs_udp_data_receive(transport);
+ memalloc_nofs_restore(pflags);
}
/**
@@ -1826,6 +1876,7 @@ static struct socket *xs_create_sock(struct rpc_xprt *xprt,
struct sock_xprt *transport, int family, int type,
int protocol, bool reuseport)
{
+ struct file *filp;
struct socket *sock;
int err;
@@ -1846,6 +1897,11 @@ static struct socket *xs_create_sock(struct rpc_xprt *xprt,
goto out;
}
+ filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
+ if (IS_ERR(filp))
+ return ERR_CAST(filp);
+ transport->file = filp;
+
return sock;
out:
return ERR_PTR(err);
@@ -1869,7 +1925,6 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt,
sk->sk_write_space = xs_udp_write_space;
sock_set_flag(sk, SOCK_FASYNC);
sk->sk_error_report = xs_error_report;
- sk->sk_allocation = GFP_NOIO;
xprt_clear_connected(xprt);
@@ -1880,7 +1935,7 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt,
write_unlock_bh(&sk->sk_callback_lock);
}
- xs_stream_reset_connect(transport);
+ xs_stream_start_connect(transport);
return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0);
}
@@ -1892,6 +1947,7 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt,
static int xs_local_setup_socket(struct sock_xprt *transport)
{
struct rpc_xprt *xprt = &transport->xprt;
+ struct file *filp;
struct socket *sock;
int status = -EIO;
@@ -1904,6 +1960,13 @@ static int xs_local_setup_socket(struct sock_xprt *transport)
}
xs_reclassify_socket(AF_LOCAL, sock);
+ filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
+ if (IS_ERR(filp)) {
+ status = PTR_ERR(filp);
+ goto out;
+ }
+ transport->file = filp;
+
dprintk("RPC: worker connecting xprt %p via AF_LOCAL to %s\n",
xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
@@ -2057,7 +2120,6 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
sk->sk_data_ready = xs_data_ready;
sk->sk_write_space = xs_udp_write_space;
sock_set_flag(sk, SOCK_FASYNC);
- sk->sk_allocation = GFP_NOIO;
xprt_set_connected(xprt);
@@ -2220,7 +2282,6 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
sk->sk_write_space = xs_tcp_write_space;
sock_set_flag(sk, SOCK_FASYNC);
sk->sk_error_report = xs_error_report;
- sk->sk_allocation = GFP_NOIO;
/* socket options */
sock_reset_flag(sk, SOCK_LINGER);
@@ -2240,8 +2301,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
xs_set_memalloc(xprt);
- /* Reset TCP record info */
- xs_stream_reset_connect(transport);
+ xs_stream_start_connect(transport);
/* Tell the socket layer to start connecting... */
set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
@@ -2534,26 +2594,35 @@ static int bc_sendto(struct rpc_rqst *req)
{
int len;
struct xdr_buf *xbufp = &req->rq_snd_buf;
- struct rpc_xprt *xprt = req->rq_xprt;
struct sock_xprt *transport =
- container_of(xprt, struct sock_xprt, xprt);
- struct socket *sock = transport->sock;
+ container_of(req->rq_xprt, struct sock_xprt, xprt);
unsigned long headoff;
unsigned long tailoff;
+ struct page *tailpage;
+ struct msghdr msg = {
+ .msg_flags = MSG_MORE
+ };
+ rpc_fraghdr marker = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT |
+ (u32)xbufp->len);
+ struct kvec iov = {
+ .iov_base = &marker,
+ .iov_len = sizeof(marker),
+ };
- xs_encode_stream_record_marker(xbufp);
+ len = kernel_sendmsg(transport->sock, &msg, &iov, 1, iov.iov_len);
+ if (len != iov.iov_len)
+ return -EAGAIN;
+ tailpage = NULL;
+ if (xbufp->tail[0].iov_len)
+ tailpage = virt_to_page(xbufp->tail[0].iov_base);
tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK;
headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK;
- len = svc_send_common(sock, xbufp,
+ len = svc_send_common(transport->sock, xbufp,
virt_to_page(xbufp->head[0].iov_base), headoff,
- xbufp->tail[0].iov_base, tailoff);
-
- if (len != xbufp->len) {
- printk(KERN_NOTICE "Error sending entire callback!\n");
- len = -EAGAIN;
- }
-
+ tailpage, tailoff);
+ if (len != xbufp->len)
+ return -EAGAIN;
return len;
}
@@ -2793,7 +2862,6 @@ static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
transport = container_of(xprt, struct sock_xprt, xprt);
xprt->prot = 0;
- xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
xprt->bind_timeout = XS_BIND_TO;
@@ -2862,7 +2930,6 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
transport = container_of(xprt, struct sock_xprt, xprt);
xprt->prot = IPPROTO_UDP;
- xprt->tsh_size = 0;
/* XXX: header size can vary due to auth type, IPv6, etc. */
xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
@@ -2942,7 +3009,6 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
transport = container_of(xprt, struct sock_xprt, xprt);
xprt->prot = IPPROTO_TCP;
- xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
xprt->bind_timeout = XS_BIND_TO;
@@ -3015,7 +3081,6 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
transport = container_of(xprt, struct sock_xprt, xprt);
xprt->prot = IPPROTO_TCP;
- xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
xprt->timeout = &xs_tcp_default_timeout;
diff --git a/net/tipc/group.c b/net/tipc/group.c
index 06fee142f09f..63f39201e41e 100644
--- a/net/tipc/group.c
+++ b/net/tipc/group.c
@@ -919,6 +919,9 @@ int tipc_group_fill_sock_diag(struct tipc_group *grp, struct sk_buff *skb)
{
struct nlattr *group = nla_nest_start(skb, TIPC_NLA_SOCK_GROUP);
+ if (!group)
+ return -EMSGSIZE;
+
if (nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_ID,
grp->type) ||
nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_INSTANCE,
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 341ecd796aa4..131aa2f0fd27 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -869,6 +869,8 @@ void tipc_link_reset(struct tipc_link *l)
__skb_queue_head_init(&list);
l->in_session = false;
+ /* Force re-synch of peer session number before establishing */
+ l->peer_session--;
l->session++;
l->mtu = l->advertised_mtu;
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index bff241f03525..89993afe0fbd 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -909,7 +909,8 @@ static int tipc_nl_service_list(struct net *net, struct tipc_nl_msg *msg,
for (; i < TIPC_NAMETBL_SIZE; i++) {
head = &tn->nametbl->services[i];
- if (*last_type) {
+ if (*last_type ||
+ (!i && *last_key && (*last_lower == *last_key))) {
service = tipc_service_find(net, *last_type);
if (!service)
return -EPIPE;
diff --git a/net/tipc/net.c b/net/tipc/net.c
index f076edb74338..7ce1e86b024f 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -163,12 +163,9 @@ void tipc_sched_net_finalize(struct net *net, u32 addr)
void tipc_net_stop(struct net *net)
{
- u32 self = tipc_own_addr(net);
-
- if (!self)
+ if (!tipc_own_id(net))
return;
- tipc_nametbl_withdraw(net, TIPC_CFG_SRV, self, self, self);
rtnl_lock();
tipc_bearer_stop(net);
tipc_node_stop(net);
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 4ad3586da8f0..340a6e7c43a7 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -267,8 +267,14 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
if (msg->rep_type)
tipc_tlv_init(msg->rep, msg->rep_type);
- if (cmd->header)
- (*cmd->header)(msg);
+ if (cmd->header) {
+ err = (*cmd->header)(msg);
+ if (err) {
+ kfree_skb(msg->rep);
+ msg->rep = NULL;
+ return err;
+ }
+ }
arg = nlmsg_new(0, GFP_KERNEL);
if (!arg) {
@@ -397,7 +403,12 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
if (!bearer)
return -EMSGSIZE;
- len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
+ len = TLV_GET_DATA_LEN(msg->req);
+ len -= offsetof(struct tipc_bearer_config, name);
+ if (len <= 0)
+ return -EINVAL;
+
+ len = min_t(int, len, TIPC_MAX_BEARER_NAME);
if (!string_is_valid(b->name, len))
return -EINVAL;
@@ -766,7 +777,12 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
- len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+ len = TLV_GET_DATA_LEN(msg->req);
+ len -= offsetof(struct tipc_link_config, name);
+ if (len <= 0)
+ return -EINVAL;
+
+ len = min_t(int, len, TIPC_MAX_LINK_NAME);
if (!string_is_valid(lc->name, len))
return -EINVAL;
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 2dc4919ab23c..dd3b6dc17662 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -817,10 +817,10 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
{
struct tipc_link_entry *le = &n->links[bearer_id];
+ struct tipc_media_addr *maddr = NULL;
struct tipc_link *l = le->link;
- struct tipc_media_addr *maddr;
- struct sk_buff_head xmitq;
int old_bearer_id = bearer_id;
+ struct sk_buff_head xmitq;
if (!l)
return;
@@ -844,7 +844,8 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
tipc_node_write_unlock(n);
if (delete)
tipc_mon_remove_peer(n->net, n->addr, old_bearer_id);
- tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
+ if (!skb_queue_empty(&xmitq))
+ tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
tipc_sk_rcv(n->net, &le->inputq);
}
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index e482b342bfa8..b542f14ed444 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1333,7 +1333,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
if (unlikely(!dest)) {
dest = &tsk->peer;
- if (!syn || dest->family != AF_TIPC)
+ if (!syn && dest->family != AF_TIPC)
return -EDESTADDRREQ;
}
@@ -2349,6 +2349,16 @@ static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
return 0;
}
+static bool tipc_sockaddr_is_sane(struct sockaddr_tipc *addr)
+{
+ if (addr->family != AF_TIPC)
+ return false;
+ if (addr->addrtype == TIPC_SERVICE_RANGE)
+ return (addr->addr.nameseq.lower <= addr->addr.nameseq.upper);
+ return (addr->addrtype == TIPC_SERVICE_ADDR ||
+ addr->addrtype == TIPC_SOCKET_ADDR);
+}
+
/**
* tipc_connect - establish a connection to another TIPC port
* @sock: socket structure
@@ -2384,18 +2394,18 @@ static int tipc_connect(struct socket *sock, struct sockaddr *dest,
if (!tipc_sk_type_connectionless(sk))
res = -EINVAL;
goto exit;
- } else if (dst->family != AF_TIPC) {
- res = -EINVAL;
}
- if (dst->addrtype != TIPC_ADDR_ID && dst->addrtype != TIPC_ADDR_NAME)
+ if (!tipc_sockaddr_is_sane(dst)) {
res = -EINVAL;
- if (res)
goto exit;
-
+ }
/* DGRAM/RDM connect(), just save the destaddr */
if (tipc_sk_type_connectionless(sk)) {
memcpy(&tsk->peer, dest, destlen);
goto exit;
+ } else if (dst->addrtype == TIPC_SERVICE_RANGE) {
+ res = -EINVAL;
+ goto exit;
}
previous = sk->sk_state;
@@ -3255,6 +3265,8 @@ static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
peer_port = tsk_peer_port(tsk);
nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);
+ if (!nest)
+ return -EMSGSIZE;
if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
goto msg_full;
diff --git a/net/tipc/sysctl.c b/net/tipc/sysctl.c
index 3481e4906bd6..9df82a573aa7 100644
--- a/net/tipc/sysctl.c
+++ b/net/tipc/sysctl.c
@@ -38,6 +38,8 @@
#include <linux/sysctl.h>
+static int zero;
+static int one = 1;
static struct ctl_table_header *tipc_ctl_hdr;
static struct ctl_table tipc_table[] = {
@@ -46,14 +48,16 @@ static struct ctl_table tipc_table[] = {
.data = &sysctl_tipc_rmem,
.maxlen = sizeof(sysctl_tipc_rmem),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &one,
},
{
.procname = "named_timeout",
.data = &sysctl_tipc_named_timeout,
.maxlen = sizeof(sysctl_tipc_named_timeout),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
},
{
.procname = "sk_filter",
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
index 4a708a4e8583..b45932d78004 100644
--- a/net/tipc/topsrv.c
+++ b/net/tipc/topsrv.c
@@ -363,6 +363,7 @@ static int tipc_conn_rcv_sub(struct tipc_topsrv *srv,
struct tipc_subscription *sub;
if (tipc_sub_read(s, filter) & TIPC_SUB_CANCEL) {
+ s->filter &= __constant_ntohl(~TIPC_SUB_CANCEL);
tipc_conn_delete_sub(con, s);
return 0;
}
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 4a1da837a733..14dedb24fa7b 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -52,8 +52,11 @@ static DEFINE_SPINLOCK(tls_device_lock);
static void tls_device_free_ctx(struct tls_context *ctx)
{
- if (ctx->tx_conf == TLS_HW)
+ if (ctx->tx_conf == TLS_HW) {
kfree(tls_offload_ctx_tx(ctx));
+ kfree(ctx->tx.rec_seq);
+ kfree(ctx->tx.iv);
+ }
if (ctx->rx_conf == TLS_HW)
kfree(tls_offload_ctx_rx(ctx));
@@ -216,6 +219,13 @@ void tls_device_sk_destruct(struct sock *sk)
}
EXPORT_SYMBOL(tls_device_sk_destruct);
+void tls_device_free_resources_tx(struct sock *sk)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+
+ tls_free_partial_record(sk, tls_ctx);
+}
+
static void tls_append_frag(struct tls_record_info *record,
struct page_frag *pfrag,
int size)
@@ -558,9 +568,6 @@ void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
MSG_DONTWAIT | MSG_NOSIGNAL);
sk->sk_allocation = sk_allocation;
}
-
- if (!rc)
- ctx->sk_write_space(sk);
}
void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
@@ -590,7 +597,7 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
{
struct strp_msg *rxm = strp_msg(skb);
- int err = 0, offset = rxm->offset, copy, nsg;
+ int err = 0, offset = rxm->offset, copy, nsg, data_len, pos;
struct sk_buff *skb_iter, *unused;
struct scatterlist sg[1];
char *orig_buf, *buf;
@@ -621,25 +628,42 @@ static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
else
err = 0;
- copy = min_t(int, skb_pagelen(skb) - offset,
- rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE);
+ data_len = rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE;
- if (skb->decrypted)
- skb_store_bits(skb, offset, buf, copy);
+ if (skb_pagelen(skb) > offset) {
+ copy = min_t(int, skb_pagelen(skb) - offset, data_len);
- offset += copy;
- buf += copy;
+ if (skb->decrypted)
+ skb_store_bits(skb, offset, buf, copy);
+ offset += copy;
+ buf += copy;
+ }
+
+ pos = skb_pagelen(skb);
skb_walk_frags(skb, skb_iter) {
- copy = min_t(int, skb_iter->len,
- rxm->full_len - offset + rxm->offset -
- TLS_CIPHER_AES_GCM_128_TAG_SIZE);
+ int frag_pos;
+
+ /* Practically all frags must belong to msg if reencrypt
+ * is needed with current strparser and coalescing logic,
+ * but strparser may "get optimized", so let's be safe.
+ */
+ if (pos + skb_iter->len <= offset)
+ goto done_with_frag;
+ if (pos >= data_len + rxm->offset)
+ break;
+
+ frag_pos = offset - pos;
+ copy = min_t(int, skb_iter->len - frag_pos,
+ data_len + rxm->offset - offset);
if (skb_iter->decrypted)
- skb_store_bits(skb_iter, offset, buf, copy);
+ skb_store_bits(skb_iter, frag_pos, buf, copy);
offset += copy;
buf += copy;
+done_with_frag:
+ pos += skb_iter->len;
}
free_buf:
@@ -897,7 +921,9 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
goto release_netdev;
free_sw_resources:
+ up_read(&device_offload_lock);
tls_sw_free_resources_rx(sk);
+ down_read(&device_offload_lock);
release_ctx:
ctx->priv_ctx_rx = NULL;
release_netdev:
@@ -932,8 +958,6 @@ void tls_device_offload_cleanup_rx(struct sock *sk)
}
out:
up_read(&device_offload_lock);
- kfree(tls_ctx->rx.rec_seq);
- kfree(tls_ctx->rx.iv);
tls_sw_release_resources_rx(sk);
}
diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
index 54c3a758f2a7..c3a5fe624b4e 100644
--- a/net/tls/tls_device_fallback.c
+++ b/net/tls/tls_device_fallback.c
@@ -194,18 +194,26 @@ static void update_chksum(struct sk_buff *skb, int headln)
static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
{
+ struct sock *sk = skb->sk;
+ int delta;
+
skb_copy_header(nskb, skb);
skb_put(nskb, skb->len);
memcpy(nskb->data, skb->data, headln);
- update_chksum(nskb, headln);
nskb->destructor = skb->destructor;
- nskb->sk = skb->sk;
+ nskb->sk = sk;
skb->destructor = NULL;
skb->sk = NULL;
- refcount_add(nskb->truesize - skb->truesize,
- &nskb->sk->sk_wmem_alloc);
+
+ update_chksum(nskb, headln);
+
+ delta = nskb->truesize - skb->truesize;
+ if (likely(delta < 0))
+ WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
+ else if (delta)
+ refcount_add(delta, &sk->sk_wmem_alloc);
}
/* This function may be called after the user socket is already
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 17e8667917aa..478603f43964 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -146,7 +146,6 @@ retry:
}
ctx->in_tcp_sendpages = false;
- ctx->sk_write_space(sk);
return 0;
}
@@ -209,6 +208,26 @@ int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
return tls_push_sg(sk, ctx, sg, offset, flags);
}
+bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx)
+{
+ struct scatterlist *sg;
+
+ sg = ctx->partially_sent_record;
+ if (!sg)
+ return false;
+
+ while (1) {
+ put_page(sg_page(sg));
+ sk_mem_uncharge(sk, sg->length);
+
+ if (sg_is_last(sg))
+ break;
+ sg++;
+ }
+ ctx->partially_sent_record = NULL;
+ return true;
+}
+
static void tls_write_space(struct sock *sk)
{
struct tls_context *ctx = tls_get_ctx(sk);
@@ -228,6 +247,8 @@ static void tls_write_space(struct sock *sk)
else
#endif
tls_sw_write_space(sk, ctx);
+
+ ctx->sk_write_space(sk);
}
static void tls_ctx_free(struct tls_context *ctx)
@@ -266,13 +287,14 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
kfree(ctx->tx.rec_seq);
kfree(ctx->tx.iv);
tls_sw_free_resources_tx(sk);
+#ifdef CONFIG_TLS_DEVICE
+ } else if (ctx->tx_conf == TLS_HW) {
+ tls_device_free_resources_tx(sk);
+#endif
}
- if (ctx->rx_conf == TLS_SW) {
- kfree(ctx->rx.rec_seq);
- kfree(ctx->rx.iv);
+ if (ctx->rx_conf == TLS_SW)
tls_sw_free_resources_rx(sk);
- }
#ifdef CONFIG_TLS_DEVICE
if (ctx->rx_conf == TLS_HW)
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 425351ac2a9b..29d6af43dd24 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -1484,6 +1484,8 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
return err;
}
+ } else {
+ *zc = false;
}
rxm->full_len -= padding_length(ctx, tls_ctx, skb);
@@ -2050,20 +2052,7 @@ void tls_sw_free_resources_tx(struct sock *sk)
/* Free up un-sent records in tx_list. First, free
* the partially sent record if any at head of tx_list.
*/
- if (tls_ctx->partially_sent_record) {
- struct scatterlist *sg = tls_ctx->partially_sent_record;
-
- while (1) {
- put_page(sg_page(sg));
- sk_mem_uncharge(sk, sg->length);
-
- if (sg_is_last(sg))
- break;
- sg++;
- }
-
- tls_ctx->partially_sent_record = NULL;
-
+ if (tls_free_partial_record(sk, tls_ctx)) {
rec = list_first_entry(&ctx->tx_list,
struct tls_rec, list);
list_del(&rec->list);
@@ -2089,6 +2078,9 @@ void tls_sw_release_resources_rx(struct sock *sk)
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+ kfree(tls_ctx->rx.rec_seq);
+ kfree(tls_ctx->rx.iv);
+
if (ctx->aead_recv) {
kfree_skb(ctx->recv_pkt);
ctx->recv_pkt = NULL;
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 3ae3a33da70b..602715fc9a75 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -662,6 +662,8 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
*/
static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
{
+ const struct virtio_transport *t;
+ struct virtio_vsock_pkt *reply;
struct virtio_vsock_pkt_info info = {
.op = VIRTIO_VSOCK_OP_RST,
.type = le16_to_cpu(pkt->hdr.type),
@@ -672,15 +674,21 @@ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
return 0;
- pkt = virtio_transport_alloc_pkt(&info, 0,
- le64_to_cpu(pkt->hdr.dst_cid),
- le32_to_cpu(pkt->hdr.dst_port),
- le64_to_cpu(pkt->hdr.src_cid),
- le32_to_cpu(pkt->hdr.src_port));
- if (!pkt)
+ reply = virtio_transport_alloc_pkt(&info, 0,
+ le64_to_cpu(pkt->hdr.dst_cid),
+ le32_to_cpu(pkt->hdr.dst_port),
+ le64_to_cpu(pkt->hdr.src_cid),
+ le32_to_cpu(pkt->hdr.src_port));
+ if (!reply)
return -ENOMEM;
- return virtio_transport_get_ops()->send_pkt(pkt);
+ t = virtio_transport_get_ops();
+ if (!t) {
+ virtio_transport_free_pkt(reply);
+ return -ENOTCONN;
+ }
+
+ return t->send_pkt(reply);
}
static void virtio_transport_wait_close(struct sock *sk, long timeout)
diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c
index 35f06563207d..11eaa5956f00 100644
--- a/net/wireless/lib80211_crypt_tkip.c
+++ b/net/wireless/lib80211_crypt_tkip.c
@@ -501,7 +501,6 @@ static int michael_mic(struct crypto_shash *tfm_michael, u8 *key, u8 *hdr,
}
desc->tfm = tfm_michael;
- desc->flags = 0;
if (crypto_shash_setkey(tfm_michael, key, 8))
return -1;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 25a9e3b5c154..47e30a58566c 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -13650,7 +13650,8 @@ static const struct genl_ops nl80211_ops[] = {
.policy = nl80211_policy,
.flags = GENL_UNS_ADMIN_PERM,
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
- NL80211_FLAG_NEED_RTNL,
+ NL80211_FLAG_NEED_RTNL |
+ NL80211_FLAG_CLEAR_SKB,
},
{
.cmd = NL80211_CMD_DEAUTHENTICATE,
@@ -13701,7 +13702,8 @@ static const struct genl_ops nl80211_ops[] = {
.policy = nl80211_policy,
.flags = GENL_UNS_ADMIN_PERM,
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
- NL80211_FLAG_NEED_RTNL,
+ NL80211_FLAG_NEED_RTNL |
+ NL80211_FLAG_CLEAR_SKB,
},
{
.cmd = NL80211_CMD_UPDATE_CONNECT_PARAMS,
@@ -13709,7 +13711,8 @@ static const struct genl_ops nl80211_ops[] = {
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
- NL80211_FLAG_NEED_RTNL,
+ NL80211_FLAG_NEED_RTNL |
+ NL80211_FLAG_CLEAR_SKB,
},
{
.cmd = NL80211_CMD_DISCONNECT,
@@ -13738,7 +13741,8 @@ static const struct genl_ops nl80211_ops[] = {
.policy = nl80211_policy,
.flags = GENL_UNS_ADMIN_PERM,
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
- NL80211_FLAG_NEED_RTNL,
+ NL80211_FLAG_NEED_RTNL |
+ NL80211_FLAG_CLEAR_SKB,
},
{
.cmd = NL80211_CMD_DEL_PMKSA,
@@ -14090,7 +14094,8 @@ static const struct genl_ops nl80211_ops[] = {
.policy = nl80211_policy,
.flags = GENL_UNS_ADMIN_PERM,
.internal_flags = NL80211_FLAG_NEED_WIPHY |
- NL80211_FLAG_NEED_RTNL,
+ NL80211_FLAG_NEED_RTNL |
+ NL80211_FLAG_CLEAR_SKB,
},
{
.cmd = NL80211_CMD_SET_QOS_MAP,
@@ -14145,7 +14150,8 @@ static const struct genl_ops nl80211_ops[] = {
.doit = nl80211_set_pmk,
.policy = nl80211_policy,
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
- NL80211_FLAG_NEED_RTNL,
+ NL80211_FLAG_NEED_RTNL |
+ NL80211_FLAG_CLEAR_SKB,
},
{
.cmd = NL80211_CMD_DEL_PMK,
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 2f1bf91eb226..a6fd5ce199da 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1309,6 +1309,16 @@ reg_intersect_dfs_region(const enum nl80211_dfs_regions dfs_region1,
return dfs_region1;
}
+static void reg_wmm_rules_intersect(const struct ieee80211_wmm_ac *wmm_ac1,
+ const struct ieee80211_wmm_ac *wmm_ac2,
+ struct ieee80211_wmm_ac *intersect)
+{
+ intersect->cw_min = max_t(u16, wmm_ac1->cw_min, wmm_ac2->cw_min);
+ intersect->cw_max = max_t(u16, wmm_ac1->cw_max, wmm_ac2->cw_max);
+ intersect->cot = min_t(u16, wmm_ac1->cot, wmm_ac2->cot);
+ intersect->aifsn = max_t(u8, wmm_ac1->aifsn, wmm_ac2->aifsn);
+}
+
/*
* Helper for regdom_intersect(), this does the real
* mathematical intersection fun
@@ -1323,6 +1333,8 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1,
struct ieee80211_freq_range *freq_range;
const struct ieee80211_power_rule *power_rule1, *power_rule2;
struct ieee80211_power_rule *power_rule;
+ const struct ieee80211_wmm_rule *wmm_rule1, *wmm_rule2;
+ struct ieee80211_wmm_rule *wmm_rule;
u32 freq_diff, max_bandwidth1, max_bandwidth2;
freq_range1 = &rule1->freq_range;
@@ -1333,6 +1345,10 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1,
power_rule2 = &rule2->power_rule;
power_rule = &intersected_rule->power_rule;
+ wmm_rule1 = &rule1->wmm_rule;
+ wmm_rule2 = &rule2->wmm_rule;
+ wmm_rule = &intersected_rule->wmm_rule;
+
freq_range->start_freq_khz = max(freq_range1->start_freq_khz,
freq_range2->start_freq_khz);
freq_range->end_freq_khz = min(freq_range1->end_freq_khz,
@@ -1376,6 +1392,29 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1,
intersected_rule->dfs_cac_ms = max(rule1->dfs_cac_ms,
rule2->dfs_cac_ms);
+ if (rule1->has_wmm && rule2->has_wmm) {
+ u8 ac;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ reg_wmm_rules_intersect(&wmm_rule1->client[ac],
+ &wmm_rule2->client[ac],
+ &wmm_rule->client[ac]);
+ reg_wmm_rules_intersect(&wmm_rule1->ap[ac],
+ &wmm_rule2->ap[ac],
+ &wmm_rule->ap[ac]);
+ }
+
+ intersected_rule->has_wmm = true;
+ } else if (rule1->has_wmm) {
+ *wmm_rule = *wmm_rule1;
+ intersected_rule->has_wmm = true;
+ } else if (rule2->has_wmm) {
+ *wmm_rule = *wmm_rule2;
+ intersected_rule->has_wmm = true;
+ } else {
+ intersected_rule->has_wmm = false;
+ }
+
if (!is_valid_reg_rule(intersected_rule))
return -EINVAL;
@@ -3739,10 +3778,9 @@ void wiphy_regulatory_register(struct wiphy *wiphy)
/*
* The last request may have been received before this
* registration call. Call the driver notifier if
- * initiator is USER and user type is CELL_BASE.
+ * initiator is USER.
*/
- if (lr->initiator == NL80211_REGDOM_SET_BY_USER &&
- lr->user_reg_hint_type == NL80211_USER_REG_HINT_CELL_BASE)
+ if (lr->initiator == NL80211_REGDOM_SET_BY_USER)
reg_call_notifier(wiphy, lr);
}
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 287518c6caa4..04d888628f29 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -190,10 +190,9 @@ static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen,
/* copy subelement as we need to change its content to
* mark an ie after it is processed.
*/
- sub_copy = kmalloc(subie_len, gfp);
+ sub_copy = kmemdup(subelement, subie_len, gfp);
if (!sub_copy)
return 0;
- memcpy(sub_copy, subelement, subie_len);
pos = &new_ie[0];
diff --git a/net/wireless/util.c b/net/wireless/util.c
index e4b8db5e81ec..75899b62bdc9 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1220,9 +1220,11 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate)
else if (rate->bw == RATE_INFO_BW_HE_RU &&
rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_26)
result = rates_26[rate->he_gi];
- else if (WARN(1, "invalid HE MCS: bw:%d, ru:%d\n",
- rate->bw, rate->he_ru_alloc))
+ else {
+ WARN(1, "invalid HE MCS: bw:%d, ru:%d\n",
+ rate->bw, rate->he_ru_alloc);
return 0;
+ }
/* now scale to the appropriate MCS */
tmp = result;
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index eff31348e20b..20a511398389 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -820,8 +820,13 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
sock->state = SS_CONNECTED;
rc = 0;
out_put_neigh:
- if (rc)
+ if (rc) {
+ read_lock_bh(&x25_list_lock);
x25_neigh_put(x25->neighbour);
+ x25->neighbour = NULL;
+ read_unlock_bh(&x25_list_lock);
+ x25->state = X25_STATE_0;
+ }
out_put_route:
x25_route_put(rt);
out:
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index 77520eacee8f..989e52386c35 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -193,9 +193,6 @@ static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
static void xdp_umem_release(struct xdp_umem *umem)
{
- struct task_struct *task;
- struct mm_struct *mm;
-
xdp_umem_clear_dev(umem);
ida_simple_remove(&umem_ida, umem->id);
@@ -214,21 +211,10 @@ static void xdp_umem_release(struct xdp_umem *umem)
xdp_umem_unpin_pages(umem);
- task = get_pid_task(umem->pid, PIDTYPE_PID);
- put_pid(umem->pid);
- if (!task)
- goto out;
- mm = get_task_mm(task);
- put_task_struct(task);
- if (!mm)
- goto out;
-
- mmput(mm);
kfree(umem->pages);
umem->pages = NULL;
xdp_umem_unaccount_pages(umem);
-out:
kfree(umem);
}
@@ -357,7 +343,6 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
if (size_chk < 0)
return -EINVAL;
- umem->pid = get_task_pid(current, PIDTYPE_PID);
umem->address = (unsigned long)addr;
umem->chunk_mask = ~((u64)chunk_size - 1);
umem->size = size;
@@ -373,7 +358,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
err = xdp_umem_account_pages(umem);
if (err)
- goto out;
+ return err;
err = xdp_umem_pin_pages(umem);
if (err)
@@ -392,8 +377,6 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
out_account:
xdp_umem_unaccount_pages(umem);
-out:
- put_pid(umem->pid);
return err;
}
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 6697084e3fdf..a14e8864e4fa 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -407,6 +407,10 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
if (sxdp->sxdp_family != AF_XDP)
return -EINVAL;
+ flags = sxdp->sxdp_flags;
+ if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY))
+ return -EINVAL;
+
mutex_lock(&xs->mutex);
if (xs->dev) {
err = -EBUSY;
@@ -425,7 +429,6 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
}
qid = sxdp->sxdp_queue_id;
- flags = sxdp->sxdp_flags;
if (flags & XDP_SHARED_UMEM) {
struct xdp_sock *umem_xs;
diff --git a/net/xdp/xsk_diag.c b/net/xdp/xsk_diag.c
index 661d007c3b28..d5e06c8e0cbf 100644
--- a/net/xdp/xsk_diag.c
+++ b/net/xdp/xsk_diag.c
@@ -68,9 +68,9 @@ static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb)
err = nla_put(nlskb, XDP_DIAG_UMEM, sizeof(du), &du);
if (!err && umem->fq)
- err = xsk_diag_put_ring(xs->tx, XDP_DIAG_UMEM_FILL_RING, nlskb);
+ err = xsk_diag_put_ring(umem->fq, XDP_DIAG_UMEM_FILL_RING, nlskb);
if (!err && umem->cq) {
- err = xsk_diag_put_ring(xs->tx, XDP_DIAG_UMEM_COMPLETION_RING,
+ err = xsk_diag_put_ring(umem->cq, XDP_DIAG_UMEM_COMPLETION_RING,
nlskb);
}
return err;
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index bcb5cbb40419..610c0bdc0c2b 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -174,8 +174,8 @@ static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d)
if (!xskq_is_valid_addr(q, d->addr))
return false;
- if (((d->addr + d->len) & q->chunk_mask) !=
- (d->addr & q->chunk_mask)) {
+ if (((d->addr + d->len) & q->chunk_mask) != (d->addr & q->chunk_mask) ||
+ d->options) {
q->invalid_descs++;
return false;
}
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
index dbb3c1945b5c..85fec98676d3 100644
--- a/net/xfrm/xfrm_interface.c
+++ b/net/xfrm/xfrm_interface.c
@@ -70,17 +70,28 @@ static struct xfrm_if *xfrmi_lookup(struct net *net, struct xfrm_state *x)
return NULL;
}
-static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb)
+static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb,
+ unsigned short family)
{
struct xfrmi_net *xfrmn;
- int ifindex;
struct xfrm_if *xi;
+ int ifindex = 0;
if (!secpath_exists(skb) || !skb->dev)
return NULL;
+ switch (family) {
+ case AF_INET6:
+ ifindex = inet6_sdif(skb);
+ break;
+ case AF_INET:
+ ifindex = inet_sdif(skb);
+ break;
+ }
+ if (!ifindex)
+ ifindex = skb->dev->ifindex;
+
xfrmn = net_generic(xs_net(xfrm_input_state(skb)), xfrmi_net_id);
- ifindex = skb->dev->ifindex;
for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
if (ifindex == xi->dev->ifindex &&
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 8d1a898d0ba5..a6b58df7a70f 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -3313,7 +3313,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
ifcb = xfrm_if_get_cb();
if (ifcb) {
- xi = ifcb->decode_session(skb);
+ xi = ifcb->decode_session(skb, family);
if (xi) {
if_id = xi->p.if_id;
net = xi->net;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 1bb971f46fc6..c62f712fdaf7 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -434,7 +434,7 @@ EXPORT_SYMBOL(xfrm_state_free);
static void ___xfrm_state_destroy(struct xfrm_state *x)
{
- tasklet_hrtimer_cancel(&x->mtimer);
+ hrtimer_cancel(&x->mtimer);
del_timer_sync(&x->rtimer);
kfree(x->aead);
kfree(x->aalg);
@@ -479,8 +479,8 @@ static void xfrm_state_gc_task(struct work_struct *work)
static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
{
- struct tasklet_hrtimer *thr = container_of(me, struct tasklet_hrtimer, timer);
- struct xfrm_state *x = container_of(thr, struct xfrm_state, mtimer);
+ struct xfrm_state *x = container_of(me, struct xfrm_state, mtimer);
+ enum hrtimer_restart ret = HRTIMER_NORESTART;
time64_t now = ktime_get_real_seconds();
time64_t next = TIME64_MAX;
int warn = 0;
@@ -544,7 +544,8 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
km_state_expired(x, 0, 0);
resched:
if (next != TIME64_MAX) {
- tasklet_hrtimer_start(&x->mtimer, ktime_set(next, 0), HRTIMER_MODE_REL);
+ hrtimer_forward_now(&x->mtimer, ktime_set(next, 0));
+ ret = HRTIMER_RESTART;
}
goto out;
@@ -561,7 +562,7 @@ expired:
out:
spin_unlock(&x->lock);
- return HRTIMER_NORESTART;
+ return ret;
}
static void xfrm_replay_timer_handler(struct timer_list *t);
@@ -580,8 +581,8 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
INIT_HLIST_NODE(&x->bydst);
INIT_HLIST_NODE(&x->bysrc);
INIT_HLIST_NODE(&x->byspi);
- tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler,
- CLOCK_BOOTTIME, HRTIMER_MODE_ABS);
+ hrtimer_init(&x->mtimer, CLOCK_BOOTTIME, HRTIMER_MODE_ABS_SOFT);
+ x->mtimer.function = xfrm_timer_handler;
timer_setup(&x->rtimer, xfrm_replay_timer_handler, 0);
x->curlft.add_time = ktime_get_real_seconds();
x->lft.soft_byte_limit = XFRM_INF;
@@ -1047,7 +1048,9 @@ found:
hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
}
x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
- tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL);
+ hrtimer_start(&x->mtimer,
+ ktime_set(net->xfrm.sysctl_acq_expires, 0),
+ HRTIMER_MODE_REL_SOFT);
net->xfrm.state_num++;
xfrm_hash_grow_check(net, x->bydst.next != NULL);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
@@ -1159,7 +1162,7 @@ static void __xfrm_state_insert(struct xfrm_state *x)
hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
}
- tasklet_hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL);
+ hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT);
if (x->replay_maxage)
mod_timer(&x->rtimer, jiffies + x->replay_maxage);
@@ -1266,7 +1269,9 @@ static struct xfrm_state *__find_acq_core(struct net *net,
x->mark.m = m->m;
x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
xfrm_state_hold(x);
- tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL);
+ hrtimer_start(&x->mtimer,
+ ktime_set(net->xfrm.sysctl_acq_expires, 0),
+ HRTIMER_MODE_REL_SOFT);
list_add(&x->km.all, &net->xfrm.state_all);
hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
h = xfrm_src_hash(net, daddr, saddr, family);
@@ -1571,7 +1576,8 @@ out:
memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
x1->km.dying = 0;
- tasklet_hrtimer_start(&x1->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL);
+ hrtimer_start(&x1->mtimer, ktime_set(1, 0),
+ HRTIMER_MODE_REL_SOFT);
if (x1->curlft.use_time)
xfrm_state_check_expire(x1);
@@ -1610,7 +1616,7 @@ int xfrm_state_check_expire(struct xfrm_state *x)
if (x->curlft.bytes >= x->lft.hard_byte_limit ||
x->curlft.packets >= x->lft.hard_packet_limit) {
x->km.state = XFRM_STATE_EXPIRED;
- tasklet_hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL);
+ hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL_SOFT);
return -EINVAL;
}
@@ -2384,7 +2390,7 @@ void xfrm_state_fini(struct net *net)
flush_work(&net->xfrm.state_hash_work);
flush_work(&xfrm_state_gc_work);
- xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
+ xfrm_state_flush(net, 0, false, true);
WARN_ON(!list_empty(&net->xfrm.state_all));
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index a131f9ff979e..6916931b1de1 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1424,7 +1424,7 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
ret = verify_policy_dir(p->dir);
if (ret)
return ret;
- if (p->index && ((p->index & XFRM_POLICY_MAX) != p->dir))
+ if (p->index && (xfrm_policy_id2dir(p->index) != p->dir))
return -EINVAL;
return 0;
@@ -1513,20 +1513,8 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
return -EINVAL;
}
- switch (ut[i].id.proto) {
- case IPPROTO_AH:
- case IPPROTO_ESP:
- case IPPROTO_COMP:
-#if IS_ENABLED(CONFIG_IPV6)
- case IPPROTO_ROUTING:
- case IPPROTO_DSTOPTS:
-#endif
- case IPSEC_PROTO_ANY:
- break;
- default:
+ if (!xfrm_id_proto_valid(ut[i].id.proto))
return -EINVAL;
- }
-
}
return 0;
diff --git a/samples/Kconfig b/samples/Kconfig
index ad1ec7016d4c..d19754ccad08 100644
--- a/samples/Kconfig
+++ b/samples/Kconfig
@@ -147,6 +147,13 @@ config SAMPLE_VFIO_MDEV_MBOCHS
Specifically it does *not* include any legacy vga stuff.
Device looks a lot like "qemu -device secondary-vga".
+config SAMPLE_ANDROID_BINDERFS
+ bool "Build Android binderfs example"
+ depends on CONFIG_ANDROID_BINDERFS
+ help
+ Builds a sample program to illustrate the use of the Android binderfs
+ filesystem.
+
config SAMPLE_STATX
bool "Build example extended-stat using code"
depends on BROKEN
diff --git a/samples/Makefile b/samples/Makefile
index bd601c038b86..b1142a958811 100644
--- a/samples/Makefile
+++ b/samples/Makefile
@@ -3,4 +3,4 @@
obj-$(CONFIG_SAMPLES) += kobject/ kprobes/ trace_events/ livepatch/ \
hw_breakpoint/ kfifo/ kdb/ hidraw/ rpmsg/ seccomp/ \
configfs/ connector/ v4l/ trace_printk/ \
- vfio-mdev/ statx/ qmi/
+ vfio-mdev/ statx/ qmi/ binderfs/
diff --git a/samples/binderfs/Makefile b/samples/binderfs/Makefile
new file mode 100644
index 000000000000..01ca9f2529a7
--- /dev/null
+++ b/samples/binderfs/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_SAMPLE_ANDROID_BINDERFS) += binderfs_example.o
diff --git a/samples/binderfs/binderfs_example.c b/samples/binderfs/binderfs_example.c
new file mode 100644
index 000000000000..5bbd2ebc0aea
--- /dev/null
+++ b/samples/binderfs/binderfs_example.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <linux/android/binder.h>
+#include <linux/android/binderfs.h>
+
+int main(int argc, char *argv[])
+{
+ int fd, ret, saved_errno;
+ size_t len;
+ struct binderfs_device device = { 0 };
+
+ ret = unshare(CLONE_NEWNS);
+ if (ret < 0) {
+ fprintf(stderr, "%s - Failed to unshare mount namespace\n",
+ strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ ret = mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0);
+ if (ret < 0) {
+ fprintf(stderr, "%s - Failed to mount / as private\n",
+ strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ ret = mkdir("/dev/binderfs", 0755);
+ if (ret < 0 && errno != EEXIST) {
+ fprintf(stderr, "%s - Failed to create binderfs mountpoint\n",
+ strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ ret = mount(NULL, "/dev/binderfs", "binder", 0, 0);
+ if (ret < 0) {
+ fprintf(stderr, "%s - Failed to mount binderfs\n",
+ strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ memcpy(device.name, "my-binder", strlen("my-binder"));
+
+ fd = open("/dev/binderfs/binder-control", O_RDONLY | O_CLOEXEC);
+ if (fd < 0) {
+ fprintf(stderr, "%s - Failed to open binder-control device\n",
+ strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ ret = ioctl(fd, BINDER_CTL_ADD, &device);
+ saved_errno = errno;
+ close(fd);
+ errno = saved_errno;
+ if (ret < 0) {
+ fprintf(stderr, "%s - Failed to allocate new binder device\n",
+ strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ printf("Allocated new binder device with major %d, minor %d, and name %s\n",
+ device.major, device.minor, device.name);
+
+ ret = unlink("/dev/binderfs/my-binder");
+ if (ret < 0) {
+ fprintf(stderr, "%s - Failed to delete binder device\n",
+ strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ /* Cleanup happens when the mount namespace dies. */
+ exit(EXIT_SUCCESS);
+}
diff --git a/samples/bpf/hbm.c b/samples/bpf/hbm.c
index 8408ccb7409f..a79828ab273f 100644
--- a/samples/bpf/hbm.c
+++ b/samples/bpf/hbm.c
@@ -18,7 +18,7 @@
* Default is /hbm1
* -r <rate> Rate limit in Mbps
* -s Get HBM stats (marked, dropped, etc.)
- * -t <time> Exit after specified seconds (deault is 0)
+ * -t <time> Exit after specified seconds (default is 0)
* -w Work conserving flag. cgroup can increase its bandwidth
* beyond the rate limit specified while there is available
* bandwidth. Current implementation assumes there is only
@@ -376,7 +376,7 @@ static void Usage(void)
" Default is /hbm1\n"
" -r <rate> Rate in Mbps\n"
" -s Update HBM stats\n"
- " -t <time> Exit after specified seconds (deault is 0)\n"
+ " -t <time> Exit after specified seconds (default is 0)\n"
" -w Work conserving flag. cgroup can increase\n"
" bandwidth beyond the rate limit specified\n"
" while there is available bandwidth. Current\n"
diff --git a/samples/v4l/v4l2-pci-skeleton.c b/samples/v4l/v4l2-pci-skeleton.c
index 27ec30952cfa..758ced8c3d06 100644
--- a/samples/v4l/v4l2-pci-skeleton.c
+++ b/samples/v4l/v4l2-pci-skeleton.c
@@ -139,16 +139,16 @@ static irqreturn_t skeleton_irq(int irq, void *dev_id)
spin_lock(&skel->qlock);
list_del(&new_buf->list);
spin_unlock(&skel->qlock);
- v4l2_get_timestamp(&new_buf->vb.v4l2_buf.timestamp);
- new_buf->vb.v4l2_buf.sequence = skel->sequence++;
- new_buf->vb.v4l2_buf.field = skel->field;
+ new_buf->vb.vb2_buf.timestamp = ktime_get_ns();
+ new_buf->vb.sequence = skel->sequence++;
+ new_buf->vb.field = skel->field;
if (skel->format.field == V4L2_FIELD_ALTERNATE) {
if (skel->field == V4L2_FIELD_BOTTOM)
skel->field = V4L2_FIELD_TOP;
else if (skel->field == V4L2_FIELD_TOP)
skel->field = V4L2_FIELD_BOTTOM;
}
- vb2_buffer_done(&new_buf->vb, VB2_BUF_STATE_DONE);
+ vb2_buffer_done(&new_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
#endif
return IRQ_HANDLED;
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index 30816037036e..7484b9d8272f 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -24,6 +24,10 @@ depfile = $(subst $(comma),_,$(dot-target).d)
basetarget = $(basename $(notdir $@))
###
+# real prerequisites without phony targets
+real-prereqs = $(filter-out $(PHONY), $^)
+
+###
# Escape single quote for use in echo statements
escsq = $(subst $(squote),'\$(squote)',$1)
@@ -67,14 +71,10 @@ endef
# cc-cross-prefix
# Usage: CROSS_COMPILE := $(call cc-cross-prefix, m68k-linux-gnu- m68k-linux-)
-# Return first prefix where a prefix$(CC) is found in PATH.
-# If no $(CC) found in PATH with listed prefixes return nothing
-cc-cross-prefix = \
- $(word 1, $(foreach c,$(1), \
- $(shell set -e; \
- if (which $(strip $(c))$(CC)) > /dev/null 2>&1 ; then \
- echo $(c); \
- fi)))
+# Return first <prefix> where a <prefix>gcc is found in PATH.
+# If no gcc found in PATH with listed prefixes return nothing
+cc-cross-prefix = $(firstword $(foreach c, $(filter-out -%, $(1)), \
+ $(if $(shell which $(c)gcc), $(c))))
# output directory for tests below
TMPOUT := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/)
@@ -134,12 +134,9 @@ cc-option-yn = $(call try-run,\
cc-disable-warning = $(call try-run,\
$(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
-# cc-version
-cc-version = $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC))
-
# cc-ifversion
# Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1)
-cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4))
+cc-ifversion = $(shell [ $(CONFIG_GCC_VERSION)0 $(1) $(2)000 ] && echo $(3) || echo $(4))
# cc-ldoption
# Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both)
diff --git a/scripts/Kconfig.include b/scripts/Kconfig.include
index dad5583451af..87ff1dcc6bd5 100644
--- a/scripts/Kconfig.include
+++ b/scripts/Kconfig.include
@@ -27,4 +27,4 @@ cc-option = $(success,$(CC) -Werror $(1) -E -x c /dev/null -o /dev/null)
ld-option = $(success,$(LD) -v $(1))
# gcc version including patch level
-gcc-version := $(shell,$(srctree)/scripts/gcc-version.sh -p $(CC) | sed 's/^0*//')
+gcc-version := $(shell,$(srctree)/scripts/gcc-version.sh $(CC))
diff --git a/scripts/Makefile b/scripts/Makefile
index feb1f71381d7..9d442ee050bd 100644
--- a/scripts/Makefile
+++ b/scripts/Makefile
@@ -39,7 +39,6 @@ build_unifdef: $(obj)/unifdef
subdir-$(CONFIG_GCC_PLUGINS) += gcc-plugins
subdir-$(CONFIG_MODVERSIONS) += genksyms
subdir-$(CONFIG_SECURITY_SELINUX) += selinux
-subdir-$(CONFIG_GDB_SCRIPTS) += gdb
# Let clean descend into subdirs
-subdir- += basic dtc kconfig mod package
+subdir- += basic dtc gdb kconfig mod package
diff --git a/scripts/Makefile.asm-generic b/scripts/Makefile.asm-generic
index a62d2823f6cf..82ad63dcd62b 100644
--- a/scripts/Makefile.asm-generic
+++ b/scripts/Makefile.asm-generic
@@ -12,8 +12,19 @@ all:
src := $(subst /generated,,$(obj))
-include $(src)/Kbuild
+# $(generic)/Kbuild lists mandatory-y. Exclude um since it is a special case.
+ifneq ($(SRCARCH),um)
+include $(generic)/Kbuild
+endif
+
include scripts/Kbuild.include
+redundant := $(filter $(mandatory-y) $(generated-y), $(generic-y))
+redundant += $(foreach f, $(generic-y), $(if $(wildcard $(srctree)/$(src)/$(f)),$(f)))
+redundant := $(sort $(redundant))
+$(if $(redundant),\
+ $(warning redundant generic-y found in $(src)/Kbuild: $(redundant)))
+
# If arch does not implement mandatory headers, fallback to asm-generic ones.
mandatory-y := $(filter-out $(generated-y), $(mandatory-y))
generic-y += $(foreach f, $(mandatory-y), $(if $(wildcard $(srctree)/$(src)/$(f)),,$(f)))
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index fd03d60f6c5a..0c5969fa795f 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -63,7 +63,9 @@ ifneq ($(strip $(real-obj-y) $(need-builtin)),)
builtin-target := $(obj)/built-in.a
endif
+ifdef CONFIG_MODULES
modorder-target := $(obj)/modules.order
+endif
# We keep a list of all modules in $(MODVERDIR)
@@ -104,7 +106,7 @@ modkern_cflags = \
quiet_modtag = $(if $(part-of-module),[M], )
quiet_cmd_cc_s_c = CC $(quiet_modtag) $@
-cmd_cc_s_c = $(CC) $(c_flags) $(DISABLE_LTO) -fverbose-asm -S -o $@ $<
+ cmd_cc_s_c = $(CC) $(filter-out $(DEBUG_CFLAGS), $(c_flags)) $(DISABLE_LTO) -fverbose-asm -S -o $@ $<
$(obj)/%.s: $(src)/%.c FORCE
$(call if_changed_dep,cc_s_c)
@@ -118,7 +120,7 @@ $(obj)/%.i: $(src)/%.c FORCE
# These mirror gensymtypes_S and co below, keep them in synch.
cmd_gensymtypes_c = \
$(CPP) -D__GENKSYMS__ $(c_flags) $< | \
- $(GENKSYMS) $(if $(1), -T $(2)) \
+ scripts/genksyms/genksyms $(if $(1), -T $(2)) \
$(patsubst y,-R,$(CONFIG_MODULE_REL_CRCS)) \
$(if $(KBUILD_PRESERVE),-p) \
-r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null))
@@ -197,11 +199,8 @@ sub_cmd_record_mcount = perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \
"$(if $(part-of-module),1,0)" "$(@)";
recordmcount_source := $(srctree)/scripts/recordmcount.pl
endif # BUILD_C_RECORDMCOUNT
-cmd_record_mcount = \
- if [ "$(findstring $(CC_FLAGS_FTRACE),$(_c_flags))" = \
- "$(CC_FLAGS_FTRACE)" ]; then \
- $(sub_cmd_record_mcount) \
- fi
+cmd_record_mcount = $(if $(findstring $(strip $(CC_FLAGS_FTRACE)),$(_c_flags)), \
+ $(sub_cmd_record_mcount))
endif # CC_USING_RECORD_MCOUNT
endif # CONFIG_FTRACE_MCOUNT_RECORD
@@ -223,6 +222,9 @@ endif
ifdef CONFIG_RETPOLINE
objtool_args += --retpoline
endif
+ifdef CONFIG_X86_SMAP
+ objtool_args += --uaccess
+endif
# 'OBJECT_FILES_NON_STANDARD := y': skip objtool checking for a directory
# 'OBJECT_FILES_NON_STANDARD_foo.o := 'y': skip objtool checking for a file
@@ -312,13 +314,13 @@ $(real-obj-m:.o=.s): modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE)
#
# These mirror gensymtypes_c and co above, keep them in synch.
cmd_gensymtypes_S = \
- (echo "\#include <linux/kernel.h>" ; \
+ { echo "\#include <linux/kernel.h>" ; \
echo "\#include <asm/asm-prototypes.h>" ; \
$(CPP) $(a_flags) $< | \
grep "\<___EXPORT_SYMBOL\>" | \
- sed 's/.*___EXPORT_SYMBOL[[:space:]]*\([a-zA-Z0-9_]*\)[[:space:]]*,.*/EXPORT_SYMBOL(\1);/' ) | \
+ sed 's/.*___EXPORT_SYMBOL[[:space:]]*\([a-zA-Z0-9_]*\)[[:space:]]*,.*/EXPORT_SYMBOL(\1);/' ; } | \
$(CPP) -D__GENKSYMS__ $(c_flags) -xc - | \
- $(GENKSYMS) $(if $(1), -T $(2)) \
+ scripts/genksyms/genksyms $(if $(1), -T $(2)) \
$(patsubst y,-R,$(CONFIG_MODULE_REL_CRCS)) \
$(if $(KBUILD_PRESERVE),-p) \
-r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null))
@@ -394,17 +396,12 @@ $(obj)/%.asn1.c $(obj)/%.asn1.h: $(src)/%.asn1 $(objtree)/scripts/asn1_compiler
$(sort $(subdir-obj-y)): $(subdir-ym) ;
#
-# Rule to compile a set of .o files into one .o file
+# Rule to compile a set of .o files into one .a file (without symbol table)
#
ifdef builtin-target
-# built-in.a archives are made with no symbol table or index which
-# makes them small and fast, but unable to be used by the linker.
-# scripts/link-vmlinux.sh builds an aggregate built-in.a with a symbol
-# table and index.
quiet_cmd_ar_builtin = AR $@
- cmd_ar_builtin = rm -f $@; \
- $(AR) rcSTP$(KBUILD_ARFLAGS) $@ $(filter $(real-obj-y), $^)
+ cmd_ar_builtin = rm -f $@; $(AR) rcSTP$(KBUILD_ARFLAGS) $@ $(real-prereqs)
$(builtin-target): $(real-obj-y) FORCE
$(call if_changed,ar_builtin)
@@ -426,16 +423,12 @@ $(modorder-target): $(subdir-ym) FORCE
$(Q)(cat /dev/null; $(modorder-cmds)) > $@
#
-# Rule to compile a set of .o files into one .a file
+# Rule to compile a set of .o files into one .a file (with symbol table)
#
ifdef lib-target
-quiet_cmd_link_l_target = AR $@
-
-# lib target archives do get a symbol table and index
-cmd_link_l_target = rm -f $@; $(AR) rcsTP$(KBUILD_ARFLAGS) $@ $(lib-y)
$(lib-target): $(lib-y) FORCE
- $(call if_changed,link_l_target)
+ $(call if_changed,ar)
targets += $(lib-target)
@@ -457,6 +450,10 @@ targets += $(obj)/lib-ksyms.o
endif
+# NOTE:
+# Do not replace $(filter %.o,^) with $(real-prereqs). When a single object
+# module is turned into a multi object module, $^ will contain header file
+# dependencies recorded in the .*.cmd file.
quiet_cmd_link_multi-m = LD [M] $@
cmd_link_multi-m = $(LD) $(ld_flags) -r -o $@ $(filter %.o,$^) $(cmd_secanalysis)
diff --git a/scripts/Makefile.host b/scripts/Makefile.host
index 0393f75db4d4..a115259b57e7 100644
--- a/scripts/Makefile.host
+++ b/scripts/Makefile.host
@@ -67,13 +67,15 @@ _hostc_flags = $(KBUILD_HOSTCFLAGS) $(HOST_EXTRACFLAGS) \
_hostcxx_flags = $(KBUILD_HOSTCXXFLAGS) $(HOST_EXTRACXXFLAGS) \
$(HOSTCXXFLAGS_$(basetarget).o)
-ifeq ($(KBUILD_SRC),)
__hostc_flags = $(_hostc_flags)
__hostcxx_flags = $(_hostcxx_flags)
-else
+
+ifeq ($(KBUILD_EXTMOD),)
+ifneq ($(KBUILD_SRC),)
__hostc_flags = -I$(obj) $(call flags,_hostc_flags)
__hostcxx_flags = -I$(obj) $(call flags,_hostcxx_flags)
endif
+endif
hostc_flags = -Wp,-MD,$(depfile) $(__hostc_flags)
hostcxx_flags = -Wp,-MD,$(depfile) $(__hostcxx_flags)
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 12b88d09c3a4..8a1f64f17740 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -137,14 +137,14 @@ _c_flags += $(if $(patsubst n%,, \
$(CFLAGS_KCOV))
endif
-# If building the kernel in a separate objtree expand all occurrences
-# of -Idir to -I$(srctree)/dir except for absolute paths (starting with '/').
-
-ifeq ($(KBUILD_SRC),)
__c_flags = $(_c_flags)
__a_flags = $(_a_flags)
__cpp_flags = $(_cpp_flags)
-else
+
+# If building the kernel in a separate objtree expand all occurrences
+# of -Idir to -I$(srctree)/dir except for absolute paths (starting with '/').
+ifeq ($(KBUILD_EXTMOD),)
+ifneq ($(KBUILD_SRC),)
# -I$(obj) locates generated .h files
# $(call addtree,-I$(obj)) locates .h files in srctree, from generated .c files
@@ -155,6 +155,7 @@ __c_flags = $(if $(obj),$(call addtree,-I$(src)) -I$(obj)) \
__a_flags = $(call flags,_a_flags)
__cpp_flags = $(call flags,_cpp_flags)
endif
+endif
c_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
-include $(srctree)/include/linux/compiler_types.h \
@@ -230,7 +231,13 @@ $(obj)/%: $(src)/%_shipped
# ---------------------------------------------------------------------------
quiet_cmd_ld = LD $@
-cmd_ld = $(LD) $(ld_flags) $(filter-out FORCE,$^) -o $@
+ cmd_ld = $(LD) $(ld_flags) $(real-prereqs) -o $@
+
+# Archive
+# ---------------------------------------------------------------------------
+
+quiet_cmd_ar = AR $@
+ cmd_ar = rm -f $@; $(AR) rcsTP$(KBUILD_ARFLAGS) $@ $(real-prereqs)
# Objcopy
# ---------------------------------------------------------------------------
@@ -242,7 +249,7 @@ cmd_objcopy = $(OBJCOPY) $(OBJCOPYFLAGS) $(OBJCOPYFLAGS_$(@F)) $< $@
# ---------------------------------------------------------------------------
quiet_cmd_gzip = GZIP $@
- cmd_gzip = cat $(filter-out FORCE,$^) | gzip -n -f -9 > $@
+ cmd_gzip = cat $(real-prereqs) | gzip -n -f -9 > $@
# DTC
# ---------------------------------------------------------------------------
@@ -270,7 +277,7 @@ DTC_FLAGS += $(DTC_FLAGS_$(basetarget))
# Generate an assembly file to wrap the output of the device tree compiler
quiet_cmd_dt_S_dtb= DTB $@
cmd_dt_S_dtb= \
-( \
+{ \
echo '\#include <asm-generic/vmlinux.lds.h>'; \
echo '.section .dtb.init.rodata,"a"'; \
echo '.balign STRUCT_ALIGNMENT'; \
@@ -280,7 +287,7 @@ cmd_dt_S_dtb= \
echo '__dtb_$(subst -,_,$(*F))_end:'; \
echo '.global __dtb_$(subst -,_,$(*F))_end'; \
echo '.balign STRUCT_ALIGNMENT'; \
-) > $@
+} > $@
$(obj)/%.dtb.S: $(obj)/%.dtb FORCE
$(call if_changed,dt_S_dtb)
@@ -320,7 +327,7 @@ dtc-tmp = $(subst $(comma),_,$(dot-target).dts.tmp)
# append the size as a 32-bit littleendian number as gzip does.
size_append = printf $(shell \
dec_size=0; \
-for F in $1; do \
+for F in $(real-prereqs); do \
fsize=$$($(CONFIG_SHELL) $(srctree)/scripts/file-size.sh $$F); \
dec_size=$$(expr $$dec_size + $$fsize); \
done; \
@@ -334,23 +341,20 @@ printf "%08x\n" $$dec_size | \
)
quiet_cmd_bzip2 = BZIP2 $@
-cmd_bzip2 = (cat $(filter-out FORCE,$^) | \
- bzip2 -9 && $(call size_append, $(filter-out FORCE,$^))) > $@
+ cmd_bzip2 = { cat $(real-prereqs) | bzip2 -9 && $(size_append); } > $@
# Lzma
# ---------------------------------------------------------------------------
quiet_cmd_lzma = LZMA $@
-cmd_lzma = (cat $(filter-out FORCE,$^) | \
- lzma -9 && $(call size_append, $(filter-out FORCE,$^))) > $@
+ cmd_lzma = { cat $(real-prereqs) | lzma -9 && $(size_append); } > $@
quiet_cmd_lzo = LZO $@
-cmd_lzo = (cat $(filter-out FORCE,$^) | \
- lzop -9 && $(call size_append, $(filter-out FORCE,$^))) > $@
+ cmd_lzo = { cat $(real-prereqs) | lzop -9 && $(size_append); } > $@
quiet_cmd_lz4 = LZ4 $@
-cmd_lz4 = (cat $(filter-out FORCE,$^) | \
- lz4c -l -c1 stdin stdout && $(call size_append, $(filter-out FORCE,$^))) > $@
+ cmd_lz4 = { cat $(real-prereqs) | lz4c -l -c1 stdin stdout && \
+ $(size_append); } > $@
# U-Boot mkimage
# ---------------------------------------------------------------------------
@@ -392,13 +396,11 @@ quiet_cmd_uimage = UIMAGE $@
# big dictionary would increase the memory usage too much in the multi-call
# decompression mode. A BCJ filter isn't used either.
quiet_cmd_xzkern = XZKERN $@
-cmd_xzkern = (cat $(filter-out FORCE,$^) | \
- sh $(srctree)/scripts/xz_wrap.sh && \
- $(call size_append, $(filter-out FORCE,$^))) > $@
+ cmd_xzkern = { cat $(real-prereqs) | sh $(srctree)/scripts/xz_wrap.sh && \
+ $(size_append); } > $@
quiet_cmd_xzmisc = XZMISC $@
-cmd_xzmisc = (cat $(filter-out FORCE,$^) | \
- xz --check=crc32 --lzma2=dict=1MiB) > $@
+ cmd_xzmisc = cat $(real-prereqs) | xz --check=crc32 --lzma2=dict=1MiB > $@
# ASM offsets
# ---------------------------------------------------------------------------
diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst
index ff5ca9817a85..0dae402661f3 100644
--- a/scripts/Makefile.modinst
+++ b/scripts/Makefile.modinst
@@ -23,7 +23,7 @@ quiet_cmd_modules_install = INSTALL $@
mkdir -p $(2) ; \
cp $@ $(2) ; \
$(mod_strip_cmd) $(2)/$(notdir $@) ; \
- $(mod_sign_cmd) $(2)/$(notdir $@) $(patsubst %,|| true,$(KBUILD_EXTMOD)) && \
+ $(mod_sign_cmd) $(2)/$(notdir $@) $(patsubst %,|| true,$(KBUILD_EXTMOD)) ; \
$(mod_compress_cmd) $(2)/$(notdir $@)
# Modules built outside the kernel source tree go into extra by default
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 7d4af0d0accb..6b7f354f189a 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -77,7 +77,6 @@ modpost = scripts/mod/modpost \
$(if $(KBUILD_EXTMOD),-I $(modulesymfile)) \
$(if $(KBUILD_EXTRA_SYMBOLS), $(patsubst %, -e %,$(KBUILD_EXTRA_SYMBOLS))) \
$(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \
- $(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S) \
$(if $(CONFIG_SECTION_MISMATCH_WARN_ONLY),,-E) \
$(if $(KBUILD_EXTMOD)$(KBUILD_MODPOST_WARN),-w)
@@ -122,7 +121,7 @@ quiet_cmd_ld_ko_o = LD [M] $@
cmd_ld_ko_o = \
$(LD) -r $(KBUILD_LDFLAGS) \
$(KBUILD_LDFLAGS_MODULE) $(LDFLAGS_MODULE) \
- -o $@ $(filter-out FORCE,$^) ; \
+ -o $@ $(real-prereqs) ; \
$(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true)
$(modules): %.ko :%.o %.mod.o FORCE
diff --git a/scripts/Makefile.ubsan b/scripts/Makefile.ubsan
index 38b2b4818e8e..019771b845c5 100644
--- a/scripts/Makefile.ubsan
+++ b/scripts/Makefile.ubsan
@@ -3,7 +3,6 @@ ifdef CONFIG_UBSAN
CFLAGS_UBSAN += $(call cc-option, -fsanitize=shift)
CFLAGS_UBSAN += $(call cc-option, -fsanitize=integer-divide-by-zero)
CFLAGS_UBSAN += $(call cc-option, -fsanitize=unreachable)
- CFLAGS_UBSAN += $(call cc-option, -fsanitize=vla-bound)
CFLAGS_UBSAN += $(call cc-option, -fsanitize=signed-integer-overflow)
CFLAGS_UBSAN += $(call cc-option, -fsanitize=bounds)
CFLAGS_UBSAN += $(call cc-option, -fsanitize=object-size)
diff --git a/scripts/adjust_autoksyms.sh b/scripts/adjust_autoksyms.sh
index 6e6d63957da3..84bf6b500815 100755
--- a/scripts/adjust_autoksyms.sh
+++ b/scripts/adjust_autoksyms.sh
@@ -39,14 +39,7 @@ case "$KBUILD_VERBOSE" in
esac
# We need access to CONFIG_ symbols
-case "${KCONFIG_CONFIG}" in
-*/*)
- . "${KCONFIG_CONFIG}"
- ;;
-*)
- # Force using a file from the current directory
- . "./${KCONFIG_CONFIG}"
-esac
+. include/config/auto.conf
# Generate a new ksym list file with symbols needed by the current
# set of modules.
diff --git a/scripts/atomic/gen-atomics.sh b/scripts/atomic/gen-atomics.sh
index 27400b0cd732..000dc6437893 100644
--- a/scripts/atomic/gen-atomics.sh
+++ b/scripts/atomic/gen-atomics.sh
@@ -13,7 +13,7 @@ gen-atomic-long.sh asm-generic/atomic-long.h
gen-atomic-fallback.sh linux/atomic-fallback.h
EOF
while read script header; do
- ${ATOMICDIR}/${script} ${ATOMICTBL} > ${LINUXDIR}/include/${header}
+ /bin/sh ${ATOMICDIR}/${script} ${ATOMICTBL} > ${LINUXDIR}/include/${header}
HASH="$(sha1sum ${LINUXDIR}/include/${header})"
HASH="${HASH%% *}"
printf "// %s\n" "${HASH}" >> ${LINUXDIR}/include/${header}
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 8d8d26b5cbbd..a09333fd7cef 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -5977,7 +5977,7 @@ sub process {
while ($fmt =~ /(\%[\*\d\.]*p(\w))/g) {
$specifier = $1;
$extension = $2;
- if ($extension !~ /[SsBKRraEhMmIiUDdgVCbGNOx]/) {
+ if ($extension !~ /[SsBKRraEhMmIiUDdgVCbGNOxt]/) {
$bad_specifier = $specifier;
last;
}
@@ -6396,19 +6396,6 @@ sub process {
}
}
-# check for bool bitfields
- if ($sline =~ /^.\s+bool\s*$Ident\s*:\s*\d+\s*;/) {
- WARN("BOOL_BITFIELD",
- "Avoid using bool as bitfield. Prefer bool bitfields as unsigned int or u<8|16|32>\n" . $herecurr);
- }
-
-# check for bool use in .h files
- if ($realfile =~ /\.h$/ &&
- $sline =~ /^.\s+bool\s*$Ident\s*(?::\s*d+\s*)?;/) {
- CHK("BOOL_MEMBER",
- "Avoid using bool structure members because of possible alignment issues - see: https://lkml.org/lkml/2017/11/21/384\n" . $herecurr);
- }
-
# check for semaphores initialized locked
if ($line =~ /^.\s*sema_init.+,\W?0\W?\)/) {
WARN("CONSIDER_COMPLETION",
diff --git a/scripts/clang-version.sh b/scripts/clang-version.sh
index e65fbc3079d4..6fabf0695761 100755
--- a/scripts/clang-version.sh
+++ b/scripts/clang-version.sh
@@ -1,14 +1,10 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
#
-# clang-version [-p] clang-command
-#
-# Prints the compiler version of `clang-command' in a canonical 4-digit form
-# such as `0500' for clang-5.0 etc.
-#
-# With the -p option, prints the patchlevel as well, for example `050001' for
-# clang-5.0.1 etc.
+# clang-version clang-command
#
+# Print the compiler version of `clang-command' in a 5 or 6-digit form
+# such as `50001' for clang-5.0.1 etc.
compiler="$*"
diff --git a/scripts/coccinelle/api/stream_open.cocci b/scripts/coccinelle/api/stream_open.cocci
new file mode 100644
index 000000000000..350145da7669
--- /dev/null
+++ b/scripts/coccinelle/api/stream_open.cocci
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: GPL-2.0
+// Author: Kirill Smelkov (kirr@nexedi.com)
+//
+// Search for stream-like files that are using nonseekable_open and convert
+// them to stream_open. A stream-like file is a file that does not use ppos in
+// its read and write. Rationale for the conversion is to avoid deadlock in
+// between read and write.
+
+virtual report
+virtual patch
+virtual explain // explain decisions in the patch (SPFLAGS="-D explain")
+
+// stream-like reader & writer - ones that do not depend on f_pos.
+@ stream_reader @
+identifier readstream, ppos;
+identifier f, buf, len;
+type loff_t;
+@@
+ ssize_t readstream(struct file *f, char *buf, size_t len, loff_t *ppos)
+ {
+ ... when != ppos
+ }
+
+@ stream_writer @
+identifier writestream, ppos;
+identifier f, buf, len;
+type loff_t;
+@@
+ ssize_t writestream(struct file *f, const char *buf, size_t len, loff_t *ppos)
+ {
+ ... when != ppos
+ }
+
+
+// a function that blocks
+@ blocks @
+identifier block_f;
+identifier wait_event =~ "^wait_event_.*";
+@@
+ block_f(...) {
+ ... when exists
+ wait_event(...)
+ ... when exists
+ }
+
+// stream_reader that can block inside.
+//
+// XXX wait_* can be called not directly from current function (e.g. func -> f -> g -> wait())
+// XXX currently reader_blocks supports only direct and 1-level indirect cases.
+@ reader_blocks_direct @
+identifier stream_reader.readstream;
+identifier wait_event =~ "^wait_event_.*";
+@@
+ readstream(...)
+ {
+ ... when exists
+ wait_event(...)
+ ... when exists
+ }
+
+@ reader_blocks_1 @
+identifier stream_reader.readstream;
+identifier blocks.block_f;
+@@
+ readstream(...)
+ {
+ ... when exists
+ block_f(...)
+ ... when exists
+ }
+
+@ reader_blocks depends on reader_blocks_direct || reader_blocks_1 @
+identifier stream_reader.readstream;
+@@
+ readstream(...) {
+ ...
+ }
+
+
+// file_operations + whether they have _any_ .read, .write, .llseek ... at all.
+//
+// XXX add support for file_operations xxx[N] = ... (sound/core/pcm_native.c)
+@ fops0 @
+identifier fops;
+@@
+ struct file_operations fops = {
+ ...
+ };
+
+@ has_read @
+identifier fops0.fops;
+identifier read_f;
+@@
+ struct file_operations fops = {
+ .read = read_f,
+ };
+
+@ has_read_iter @
+identifier fops0.fops;
+identifier read_iter_f;
+@@
+ struct file_operations fops = {
+ .read_iter = read_iter_f,
+ };
+
+@ has_write @
+identifier fops0.fops;
+identifier write_f;
+@@
+ struct file_operations fops = {
+ .write = write_f,
+ };
+
+@ has_write_iter @
+identifier fops0.fops;
+identifier write_iter_f;
+@@
+ struct file_operations fops = {
+ .write_iter = write_iter_f,
+ };
+
+@ has_llseek @
+identifier fops0.fops;
+identifier llseek_f;
+@@
+ struct file_operations fops = {
+ .llseek = llseek_f,
+ };
+
+@ has_no_llseek @
+identifier fops0.fops;
+@@
+ struct file_operations fops = {
+ .llseek = no_llseek,
+ };
+
+@ has_mmap @
+identifier fops0.fops;
+identifier mmap_f;
+@@
+ struct file_operations fops = {
+ .mmap = mmap_f,
+ };
+
+@ has_copy_file_range @
+identifier fops0.fops;
+identifier copy_file_range_f;
+@@
+ struct file_operations fops = {
+ .copy_file_range = copy_file_range_f,
+ };
+
+@ has_remap_file_range @
+identifier fops0.fops;
+identifier remap_file_range_f;
+@@
+ struct file_operations fops = {
+ .remap_file_range = remap_file_range_f,
+ };
+
+@ has_splice_read @
+identifier fops0.fops;
+identifier splice_read_f;
+@@
+ struct file_operations fops = {
+ .splice_read = splice_read_f,
+ };
+
+@ has_splice_write @
+identifier fops0.fops;
+identifier splice_write_f;
+@@
+ struct file_operations fops = {
+ .splice_write = splice_write_f,
+ };
+
+
+// file_operations that is candidate for stream_open conversion - it does not
+// use mmap and other methods that assume @offset access to file.
+//
+// XXX for simplicity require no .{read/write}_iter and no .splice_{read/write} for now.
+// XXX maybe_steam.fops cannot be used in other rules - it gives "bad rule maybe_stream or bad variable fops".
+@ maybe_stream depends on (!has_llseek || has_no_llseek) && !has_mmap && !has_copy_file_range && !has_remap_file_range && !has_read_iter && !has_write_iter && !has_splice_read && !has_splice_write @
+identifier fops0.fops;
+@@
+ struct file_operations fops = {
+ };
+
+
+// ---- conversions ----
+
+// XXX .open = nonseekable_open -> .open = stream_open
+// XXX .open = func -> openfunc -> nonseekable_open
+
+// read & write
+//
+// if both are used in the same file_operations together with an opener -
+// under that conditions we can use stream_open instead of nonseekable_open.
+@ fops_rw depends on maybe_stream @
+identifier fops0.fops, openfunc;
+identifier stream_reader.readstream;
+identifier stream_writer.writestream;
+@@
+ struct file_operations fops = {
+ .open = openfunc,
+ .read = readstream,
+ .write = writestream,
+ };
+
+@ report_rw depends on report @
+identifier fops_rw.openfunc;
+position p1;
+@@
+ openfunc(...) {
+ <...
+ nonseekable_open@p1
+ ...>
+ }
+
+@ script:python depends on report && reader_blocks @
+fops << fops0.fops;
+p << report_rw.p1;
+@@
+coccilib.report.print_report(p[0],
+ "ERROR: %s: .read() can deadlock .write(); change nonseekable_open -> stream_open to fix." % (fops,))
+
+@ script:python depends on report && !reader_blocks @
+fops << fops0.fops;
+p << report_rw.p1;
+@@
+coccilib.report.print_report(p[0],
+ "WARNING: %s: .read() and .write() have stream semantic; safe to change nonseekable_open -> stream_open." % (fops,))
+
+
+@ explain_rw_deadlocked depends on explain && reader_blocks @
+identifier fops_rw.openfunc;
+@@
+ openfunc(...) {
+ <...
+- nonseekable_open
++ nonseekable_open /* read & write (was deadlock) */
+ ...>
+ }
+
+
+@ explain_rw_nodeadlock depends on explain && !reader_blocks @
+identifier fops_rw.openfunc;
+@@
+ openfunc(...) {
+ <...
+- nonseekable_open
++ nonseekable_open /* read & write (no direct deadlock) */
+ ...>
+ }
+
+@ patch_rw depends on patch @
+identifier fops_rw.openfunc;
+@@
+ openfunc(...) {
+ <...
+- nonseekable_open
++ stream_open
+ ...>
+ }
+
+
+// read, but not write
+@ fops_r depends on maybe_stream && !has_write @
+identifier fops0.fops, openfunc;
+identifier stream_reader.readstream;
+@@
+ struct file_operations fops = {
+ .open = openfunc,
+ .read = readstream,
+ };
+
+@ report_r depends on report @
+identifier fops_r.openfunc;
+position p1;
+@@
+ openfunc(...) {
+ <...
+ nonseekable_open@p1
+ ...>
+ }
+
+@ script:python depends on report @
+fops << fops0.fops;
+p << report_r.p1;
+@@
+coccilib.report.print_report(p[0],
+ "WARNING: %s: .read() has stream semantic; safe to change nonseekable_open -> stream_open." % (fops,))
+
+@ explain_r depends on explain @
+identifier fops_r.openfunc;
+@@
+ openfunc(...) {
+ <...
+- nonseekable_open
++ nonseekable_open /* read only */
+ ...>
+ }
+
+@ patch_r depends on patch @
+identifier fops_r.openfunc;
+@@
+ openfunc(...) {
+ <...
+- nonseekable_open
++ stream_open
+ ...>
+ }
+
+
+// write, but not read
+@ fops_w depends on maybe_stream && !has_read @
+identifier fops0.fops, openfunc;
+identifier stream_writer.writestream;
+@@
+ struct file_operations fops = {
+ .open = openfunc,
+ .write = writestream,
+ };
+
+@ report_w depends on report @
+identifier fops_w.openfunc;
+position p1;
+@@
+ openfunc(...) {
+ <...
+ nonseekable_open@p1
+ ...>
+ }
+
+@ script:python depends on report @
+fops << fops0.fops;
+p << report_w.p1;
+@@
+coccilib.report.print_report(p[0],
+ "WARNING: %s: .write() has stream semantic; safe to change nonseekable_open -> stream_open." % (fops,))
+
+@ explain_w depends on explain @
+identifier fops_w.openfunc;
+@@
+ openfunc(...) {
+ <...
+- nonseekable_open
++ nonseekable_open /* write only */
+ ...>
+ }
+
+@ patch_w depends on patch @
+identifier fops_w.openfunc;
+@@
+ openfunc(...) {
+ <...
+- nonseekable_open
++ stream_open
+ ...>
+ }
+
+
+// no read, no write - don't change anything
diff --git a/scripts/coccinelle/free/put_device.cocci b/scripts/coccinelle/free/put_device.cocci
new file mode 100644
index 000000000000..c9f071b0a0ab
--- /dev/null
+++ b/scripts/coccinelle/free/put_device.cocci
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/// Find missing put_device for every of_find_device_by_node.
+///
+// Confidence: Moderate
+// Copyright: (C) 2018-2019 Wen Yang, ZTE.
+// Comments:
+// Options: --no-includes --include-headers
+
+virtual report
+virtual org
+
+@search exists@
+local idexpression id;
+expression x,e,e1;
+position p1,p2;
+type T,T1,T2,T3;
+@@
+
+id = of_find_device_by_node@p1(x)
+... when != e = id
+if (id == NULL || ...) { ... return ...; }
+... when != put_device(&id->dev)
+ when != platform_device_put(id)
+ when != of_dev_put(id)
+ when != if (id) { ... put_device(&id->dev) ... }
+ when != e1 = (T)id
+ when != e1 = &id->dev
+ when != e1 = get_device(&id->dev)
+ when != e1 = (T1)platform_get_drvdata(id)
+(
+ return
+( id
+| (T2)dev_get_drvdata(&id->dev)
+| (T3)platform_get_drvdata(id)
+| &id->dev
+);
+| return@p2 ...;
+)
+
+@script:python depends on report@
+p1 << search.p1;
+p2 << search.p2;
+@@
+
+coccilib.report.print_report(p2[0], "ERROR: missing put_device; "
+ + "call of_find_device_by_node on line "
+ + p1[0].line
+ + ", but without a corresponding object release "
+ + "within this function.")
+
+@script:python depends on org@
+p1 << search.p1;
+p2 << search.p2;
+@@
+
+cocci.print_main("of_find_device_by_node", p1)
+cocci.print_secs("needed put_device", p2)
diff --git a/scripts/coccinelle/misc/badty.cocci b/scripts/coccinelle/misc/badty.cocci
index 481cf301ccfc..08470362199c 100644
--- a/scripts/coccinelle/misc/badty.cocci
+++ b/scripts/coccinelle/misc/badty.cocci
@@ -1,4 +1,4 @@
-/// Use ARRAY_SIZE instead of dividing sizeof array with sizeof an element
+/// Correct the size argument to alloc functions
///
//# This makes an effort to find cases where the argument to sizeof is wrong
//# in memory allocation functions by checking the type of the allocated memory
diff --git a/scripts/dtc/dtx_diff b/scripts/dtc/dtx_diff
index 8c4fbad2055e..0d8572008729 100755
--- a/scripts/dtc/dtx_diff
+++ b/scripts/dtc/dtx_diff
@@ -21,6 +21,7 @@ Usage:
diff DTx_1 and DTx_2
+ --annotate synonym for -T
-f print full dts in diff (--unified=99999)
-h synonym for --help
-help synonym for --help
@@ -28,6 +29,7 @@ Usage:
-s SRCTREE linux kernel source tree is at path SRCTREE
(default is current directory)
-S linux kernel source tree is at root of current git repo
+ -T Annotate output .dts with input source file and line (-T -T for more details)
-u unsorted, do not sort DTx
@@ -174,6 +176,7 @@ compile_to_dts() {
# ----- start of script
+annotate=""
cmd_diff=0
diff_flags="-u"
dtx_file_1=""
@@ -208,6 +211,14 @@ while [ $# -gt 0 ] ; do
shift
;;
+ -T | --annotate )
+ if [ "${annotate}" = "" ] ; then
+ annotate="-T"
+ elif [ "${annotate}" = "-T" ] ; then
+ annotate="-T -T"
+ fi
+ shift
+ ;;
-u )
dtc_sort=""
shift
@@ -327,7 +338,7 @@ cpp_flags="\
DTC="\
${DTC} \
-i ${srctree}/scripts/dtc/include-prefixes \
- -O dts -qq -f ${dtc_sort} -o -"
+ -O dts -qq -f ${dtc_sort} ${annotate} -o -"
# ----- do the diff or decompile
diff --git a/scripts/gcc-version.sh b/scripts/gcc-version.sh
index 11bb909845e7..ae353432539b 100755
--- a/scripts/gcc-version.sh
+++ b/scripts/gcc-version.sh
@@ -1,33 +1,20 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
#
-# gcc-version [-p] gcc-command
-#
-# Prints the gcc version of `gcc-command' in a canonical 4-digit form
-# such as `0295' for gcc-2.95, `0303' for gcc-3.3, etc.
-#
-# With the -p option, prints the patchlevel as well, for example `029503' for
-# gcc-2.95.3, `030301' for gcc-3.3.1, etc.
+# gcc-version gcc-command
#
-
-if [ "$1" = "-p" ] ; then
- with_patchlevel=1;
- shift;
-fi
+# Print the gcc version of `gcc-command' in a 5 or 6-digit form
+# such as `29503' for gcc-2.95.3, `30301' for gcc-3.3.1, etc.
compiler="$*"
if [ ${#compiler} -eq 0 ]; then
- echo "Error: No compiler specified."
- printf "Usage:\n\t$0 <gcc-command>\n"
+ echo "Error: No compiler specified." >&2
+ printf "Usage:\n\t$0 <gcc-command>\n" >&2
exit 1
fi
MAJOR=$(echo __GNUC__ | $compiler -E -x c - | tail -n 1)
MINOR=$(echo __GNUC_MINOR__ | $compiler -E -x c - | tail -n 1)
-if [ "x$with_patchlevel" != "x" ] ; then
- PATCHLEVEL=$(echo __GNUC_PATCHLEVEL__ | $compiler -E -x c - | tail -n 1)
- printf "%02d%02d%02d\\n" $MAJOR $MINOR $PATCHLEVEL
-else
- printf "%02d%02d\\n" $MAJOR $MINOR
-fi
+PATCHLEVEL=$(echo __GNUC_PATCHLEVEL__ | $compiler -E -x c - | tail -n 1)
+printf "%d%02d%02d\\n" $MAJOR $MINOR $PATCHLEVEL
diff --git a/scripts/gdb/linux/Makefile b/scripts/gdb/linux/Makefile
index aba23be985e4..3df395a9c2ce 100644
--- a/scripts/gdb/linux/Makefile
+++ b/scripts/gdb/linux/Makefile
@@ -1,24 +1,25 @@
# SPDX-License-Identifier: GPL-2.0
-always := gdb-scripts
-SRCTREE := $(abspath $(srctree))
-
-$(obj)/gdb-scripts:
ifneq ($(KBUILD_SRC),)
- $(Q)ln -fsn $(SRCTREE)/$(obj)/*.py $(objtree)/$(obj)
+
+symlinks := $(patsubst $(srctree)/$(src)/%,%,$(wildcard $(srctree)/$(src)/*.py))
+
+quiet_cmd_symlink = SYMLINK $@
+ cmd_symlink = ln -fsn $(patsubst $(obj)/%,$(abspath $(srctree))/$(src)/%,$@) $@
+
+extra-y += $(symlinks)
+$(addprefix $(obj)/, $(symlinks)): FORCE
+ $(call if_changed,symlink)
+
endif
- @:
quiet_cmd_gen_constants_py = GEN $@
cmd_gen_constants_py = \
$(CPP) -E -x c -P $(c_flags) $< > $@ ;\
sed -i '1,/<!-- end-c-headers -->/d;' $@
-targets += constants.py
-$(obj)/constants.py: $(SRCTREE)/$(obj)/constants.py.in FORCE
+extra-y += constants.py
+$(obj)/constants.py: $(src)/constants.py.in FORCE
$(call if_changed_dep,gen_constants_py)
-build_constants_py: $(obj)/constants.py
- @:
-
-clean-files := *.pyc *.pyo $(if $(KBUILD_SRC),*.py) $(obj)/constants.py
+clean-files := *.pyc *.pyo
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index f75e7bda4889..e17837f1d3f2 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -62,11 +62,11 @@ static int all_symbols = 0;
static int absolute_percpu = 0;
static int base_relative = 0;
-int token_profit[0x10000];
+static int token_profit[0x10000];
/* the table that holds the result of the compression */
-unsigned char best_table[256][2];
-unsigned char best_table_len[256];
+static unsigned char best_table[256][2];
+static unsigned char best_table_len[256];
static void usage(void)
@@ -80,7 +80,7 @@ static void usage(void)
* This ignores the intensely annoying "mapping symbols" found
* in ARM ELF files: $a, $t and $d.
*/
-static inline int is_arm_mapping_symbol(const char *str)
+static int is_arm_mapping_symbol(const char *str)
{
return str[0] == '$' && strchr("axtd", str[1])
&& (str[2] == '\0' || str[2] == '.');
@@ -331,7 +331,7 @@ static void write_src(void)
unsigned int *markers;
char buf[KSYM_NAME_LEN];
- printf("#include <asm/types.h>\n");
+ printf("#include <asm/bitsperlong.h>\n");
printf("#if BITS_PER_LONG == 64\n");
printf("#define PTR .quad\n");
printf("#define ALGN .balign 8\n");
@@ -596,9 +596,6 @@ static void insert_real_symbols_in_table(void)
{
unsigned int i, j, c;
- memset(best_table, 0, sizeof(best_table));
- memset(best_table_len, 0, sizeof(best_table_len));
-
for (i = 0; i < table_cnt; i++) {
for (j = 0; j < table[i].len; j++) {
c = table[i].sym[j];
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index 181973509a05..7c5dc31c1d95 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -143,11 +143,12 @@ help:
# ===========================================================================
# object files used by all kconfig flavours
-common-objs := confdata.o expr.o symbol.o preprocess.o zconf.lex.o zconf.tab.o
+common-objs := confdata.o expr.o lexer.lex.o parser.tab.o preprocess.o \
+ symbol.o
-$(obj)/zconf.lex.o: $(obj)/zconf.tab.h
-HOSTCFLAGS_zconf.lex.o := -I$(src)
-HOSTCFLAGS_zconf.tab.o := -I$(src)
+$(obj)/lexer.lex.o: $(obj)/parser.tab.h
+HOSTCFLAGS_lexer.lex.o := -I$(src)
+HOSTCFLAGS_parser.tab.o := -I$(src)
# conf: Used for defconfig, oldconfig and related targets
hostprogs-y += conf
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
index da89ef788a8d..ef3678c24bab 100644
--- a/scripts/kconfig/conf.c
+++ b/scripts/kconfig/conf.c
@@ -488,7 +488,6 @@ int main(int ac, char **av)
const char *progname = av[0];
int opt;
const char *name, *defconfig_file = NULL /* gcc uninit */;
- struct stat tmpstat;
int no_conf_write = 0;
tty_stdio = isatty(0) && isatty(1);
@@ -560,18 +559,6 @@ int main(int ac, char **av)
name = av[optind];
conf_parse(name);
//zconfdump(stdout);
- if (sync_kconfig) {
- name = conf_get_configname();
- if (stat(name, &tmpstat)) {
- fprintf(stderr, "***\n"
- "*** Configuration file \"%s\" not found!\n"
- "***\n"
- "*** Please run some configurator (e.g. \"make oldconfig\" or\n"
- "*** \"make menuconfig\" or \"make xconfig\").\n"
- "***\n", name);
- exit(1);
- }
- }
switch (input_mode) {
case defconfig:
diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h
index 999edb60cd53..8dde65bc3165 100644
--- a/scripts/kconfig/expr.h
+++ b/scripts/kconfig/expr.h
@@ -172,7 +172,7 @@ struct symbol {
* int "BAZ Value"
* range 1..255
*
- * Please, also check zconf.y:print_symbol() when modifying the
+ * Please, also check parser.y:print_symbol() when modifying the
* list of property types!
*/
enum prop_type {
diff --git a/scripts/kconfig/zconf.l b/scripts/kconfig/lexer.l
index b2d0a3b0bce9..c9df1c8b9824 100644
--- a/scripts/kconfig/zconf.l
+++ b/scripts/kconfig/lexer.l
@@ -15,7 +15,7 @@
#include <unistd.h>
#include "lkc.h"
-#include "zconf.tab.h"
+#include "parser.tab.h"
#define YY_DECL static int yylex1(void)
diff --git a/scripts/kconfig/lkc.h b/scripts/kconfig/lkc.h
index 531ff7c57d92..d871539e4b45 100644
--- a/scripts/kconfig/lkc.h
+++ b/scripts/kconfig/lkc.h
@@ -90,7 +90,7 @@ void *xrealloc(void *p, size_t size);
char *xstrdup(const char *s);
char *xstrndup(const char *s, size_t n);
-/* zconf.l */
+/* lexer.l */
int yylex(void);
struct gstr {
diff --git a/scripts/kconfig/lxdialog/.gitignore b/scripts/kconfig/lxdialog/.gitignore
deleted file mode 100644
index 90b08ff025a6..000000000000
--- a/scripts/kconfig/lxdialog/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-#
-# Generated files
-#
-lxdialog
diff --git a/scripts/kconfig/lxdialog/inputbox.c b/scripts/kconfig/lxdialog/inputbox.c
index 611945611bf8..1dcfb288ee63 100644
--- a/scripts/kconfig/lxdialog/inputbox.c
+++ b/scripts/kconfig/lxdialog/inputbox.c
@@ -113,7 +113,8 @@ do_resize:
case KEY_DOWN:
break;
case KEY_BACKSPACE:
- case 127:
+ case 8: /* ^H */
+ case 127: /* ^? */
if (pos) {
wattrset(dialog, dlg.inputbox.atr);
if (input_x == 0) {
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
index a4670f4e825a..ac92c0ded6c5 100644
--- a/scripts/kconfig/nconf.c
+++ b/scripts/kconfig/nconf.c
@@ -1048,7 +1048,7 @@ static int do_match(int key, struct match_state *state, int *ans)
state->match_direction = FIND_NEXT_MATCH_UP;
*ans = get_mext_match(state->pattern,
state->match_direction);
- } else if (key == KEY_BACKSPACE || key == 127) {
+ } else if (key == KEY_BACKSPACE || key == 8 || key == 127) {
state->pattern[strlen(state->pattern)-1] = '\0';
adj_match_dir(&state->match_direction);
} else
diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c
index 7be620a1fcdb..77f525a8617c 100644
--- a/scripts/kconfig/nconf.gui.c
+++ b/scripts/kconfig/nconf.gui.c
@@ -439,7 +439,8 @@ int dialog_inputbox(WINDOW *main_window,
case KEY_F(F_EXIT):
case KEY_F(F_BACK):
break;
- case 127:
+ case 8: /* ^H */
+ case 127: /* ^? */
case KEY_BACKSPACE:
if (cursor_position > 0) {
memmove(&result[cursor_position-1],
diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/parser.y
index 60936c76865b..60936c76865b 100644
--- a/scripts/kconfig/zconf.y
+++ b/scripts/kconfig/parser.y
diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
index 8be8a70c5542..ce7fc87a49a7 100644
--- a/scripts/kconfig/qconf.cc
+++ b/scripts/kconfig/qconf.cc
@@ -1392,6 +1392,8 @@ ConfigMainWindow::ConfigMainWindow(void)
conf_set_changed_callback(conf_changed);
// Set saveAction's initial state
conf_changed();
+ configname = xstrdup(conf_get_configname());
+
QAction *saveAsAction = new QAction("Save &As...", this);
connect(saveAsAction, SIGNAL(triggered(bool)), SLOT(saveConfigAs()));
QAction *searchAction = new QAction("&Find", this);
@@ -1520,17 +1522,29 @@ ConfigMainWindow::ConfigMainWindow(void)
void ConfigMainWindow::loadConfig(void)
{
- QString s = QFileDialog::getOpenFileName(this, "", conf_get_configname());
- if (s.isNull())
+ QString str;
+ QByteArray ba;
+ const char *name;
+
+ str = QFileDialog::getOpenFileName(this, "", configname);
+ if (str.isNull())
return;
- if (conf_read(QFile::encodeName(s)))
+
+ ba = str.toLocal8Bit();
+ name = ba.data();
+
+ if (conf_read(name))
QMessageBox::information(this, "qconf", "Unable to load configuration!");
+
+ free(configname);
+ configname = xstrdup(name);
+
ConfigView::updateListAll();
}
bool ConfigMainWindow::saveConfig(void)
{
- if (conf_write(NULL)) {
+ if (conf_write(configname)) {
QMessageBox::information(this, "qconf", "Unable to save configuration!");
return false;
}
@@ -1541,10 +1555,24 @@ bool ConfigMainWindow::saveConfig(void)
void ConfigMainWindow::saveConfigAs(void)
{
- QString s = QFileDialog::getSaveFileName(this, "", conf_get_configname());
- if (s.isNull())
+ QString str;
+ QByteArray ba;
+ const char *name;
+
+ str = QFileDialog::getSaveFileName(this, "", configname);
+ if (str.isNull())
return;
- saveConfig();
+
+ ba = str.toLocal8Bit();
+ name = ba.data();
+
+ if (conf_write(name)) {
+ QMessageBox::information(this, "qconf", "Unable to save configuration!");
+ }
+ conf_write_autoconf(0);
+
+ free(configname);
+ configname = xstrdup(name);
}
void ConfigMainWindow::searchConfig(void)
diff --git a/scripts/kconfig/qconf.h b/scripts/kconfig/qconf.h
index 41df466e67d9..45bfe9b2b966 100644
--- a/scripts/kconfig/qconf.h
+++ b/scripts/kconfig/qconf.h
@@ -291,6 +291,7 @@ protected:
class ConfigMainWindow : public QMainWindow {
Q_OBJECT
+ char *configname;
static QAction *saveAction;
static void conf_changed(void);
public:
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index c5333d251985..3350e498b4ce 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -1474,7 +1474,7 @@ sub push_parameter($$$$) {
if (!defined $parameterdescs{$param} && $param !~ /^#/) {
$parameterdescs{$param} = $undescribed;
- if (show_warnings($type, $declaration_name)) {
+ if (show_warnings($type, $declaration_name) && $param !~ /\./) {
print STDERR
"${file}:$.: warning: Function parameter or member '$param' not described in '$declaration_name'\n";
++$warnings;
diff --git a/scripts/leaking_addresses.pl b/scripts/leaking_addresses.pl
index 6a897788f5a7..ef9e5b2a1614 100755
--- a/scripts/leaking_addresses.pl
+++ b/scripts/leaking_addresses.pl
@@ -97,7 +97,7 @@ Options:
--32-bit Scan 32-bit kernel.
--page-offset-32-bit=o Page offset (for 32-bit kernel 0xABCD1234).
-d, --debug Display debugging output.
- -h, --help, --version Display this help and exit.
+ -h, --help Display this help and exit.
Scans the running kernel for potential leaking addresses.
@@ -108,7 +108,6 @@ EOM
GetOptions(
'd|debug' => \$debug,
'h|help' => \$help,
- 'version' => \$help,
'o|output-raw=s' => \$output_raw,
'i|input-raw=s' => \$input_raw,
'suppress-dmesg' => \$suppress_dmesg,
@@ -231,7 +230,7 @@ sub get_kernel_config_option
my $tmp_file = "/tmp/tmpkconf";
if (system("gunzip < /proc/config.gz > $tmp_file")) {
- dprint "$0: system(gunzip < /proc/config.gz) failed\n";
+ dprint("system(gunzip < /proc/config.gz) failed\n");
return "";
} else {
@config_files = ($tmp_file);
@@ -243,7 +242,7 @@ sub get_kernel_config_option
}
foreach my $file (@config_files) {
- dprint("parsing config file: %s\n", $file);
+ dprint("parsing config file: $file\n");
$value = option_from_file($option, $file);
if ($value ne "") {
last;
@@ -502,7 +501,7 @@ sub walk
next;
}
- dprint "parsing: $path\n";
+ dprint("parsing: $path\n");
timed_parse_file($path);
}
}
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index c8cf45362bd6..dc0e8c5a1402 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -3,22 +3,17 @@
#
# link vmlinux
#
-# vmlinux is linked from the objects selected by $(KBUILD_VMLINUX_INIT) and
-# $(KBUILD_VMLINUX_MAIN) and $(KBUILD_VMLINUX_LIBS). Most are built-in.a files
-# from top-level directories in the kernel tree, others are specified in
-# arch/$(ARCH)/Makefile. Ordering when linking is important, and
-# $(KBUILD_VMLINUX_INIT) must be first. $(KBUILD_VMLINUX_LIBS) are archives
-# which are linked conditionally (not within --whole-archive), and do not
-# require symbol indexes added.
+# vmlinux is linked from the objects selected by $(KBUILD_VMLINUX_OBJS) and
+# $(KBUILD_VMLINUX_LIBS). Most are built-in.a files from top-level directories
+# in the kernel tree, others are specified in arch/$(ARCH)/Makefile.
+# $(KBUILD_VMLINUX_LIBS) are archives which are linked conditionally
+# (not within --whole-archive), and do not require symbol indexes added.
#
# vmlinux
# ^
# |
-# +-< $(KBUILD_VMLINUX_INIT)
-# | +--< init/version.o + more
-# |
-# +--< $(KBUILD_VMLINUX_MAIN)
-# | +--< drivers/built-in.a mm/built-in.a + more
+# +--< $(KBUILD_VMLINUX_OBJS)
+# | +--< init/built-in.a drivers/built-in.a mm/built-in.a + more
# |
# +--< $(KBUILD_VMLINUX_LIBS)
# | +--< lib/lib.a + more
@@ -44,24 +39,6 @@ info()
fi
}
-# Thin archive build here makes a final archive with symbol table and indexes
-# from vmlinux objects INIT and MAIN, which can be used as input to linker.
-# KBUILD_VMLINUX_LIBS archives should already have symbol table and indexes
-# added.
-#
-# Traditional incremental style of link does not require this step
-#
-# built-in.a output file
-#
-archive_builtin()
-{
- info AR built-in.a
- rm -f built-in.a;
- ${AR} rcsTP${KBUILD_ARFLAGS} built-in.a \
- ${KBUILD_VMLINUX_INIT} \
- ${KBUILD_VMLINUX_MAIN}
-}
-
# Link of vmlinux.o used for section mismatch analysis
# ${1} output file
modpost_link()
@@ -69,7 +46,7 @@ modpost_link()
local objects
objects="--whole-archive \
- built-in.a \
+ ${KBUILD_VMLINUX_OBJS} \
--no-whole-archive \
--start-group \
${KBUILD_VMLINUX_LIBS} \
@@ -88,7 +65,7 @@ vmlinux_link()
if [ "${SRCARCH}" != "um" ]; then
objects="--whole-archive \
- built-in.a \
+ ${KBUILD_VMLINUX_OBJS} \
--no-whole-archive \
--start-group \
${KBUILD_VMLINUX_LIBS} \
@@ -99,7 +76,7 @@ vmlinux_link()
-T ${lds} ${objects}
else
objects="-Wl,--whole-archive \
- built-in.a \
+ ${KBUILD_VMLINUX_OBJS} \
-Wl,--no-whole-archive \
-Wl,--start-group \
${KBUILD_VMLINUX_LIBS} \
@@ -160,7 +137,6 @@ cleanup()
rm -f .tmp_System.map
rm -f .tmp_kallsyms*
rm -f .tmp_vmlinux*
- rm -f built-in.a
rm -f System.map
rm -f vmlinux
rm -f vmlinux.o
@@ -195,14 +171,7 @@ if [ "$1" = "clean" ]; then
fi
# We need access to CONFIG_ symbols
-case "${KCONFIG_CONFIG}" in
-*/*)
- . "${KCONFIG_CONFIG}"
- ;;
-*)
- # Force using a file from the current directory
- . "./${KCONFIG_CONFIG}"
-esac
+. include/config/auto.conf
# Update version
info GEN .version
@@ -217,8 +186,6 @@ fi;
# final build of init/
${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init
-archive_builtin
-
#link vmlinux.o
info LD vmlinux.o
modpost_link vmlinux.o
diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h
index 87f1fc9801d7..2339f86126cb 100755
--- a/scripts/mkcompile_h
+++ b/scripts/mkcompile_h
@@ -62,7 +62,7 @@ UTS_TRUNCATE="cut -b -$UTS_LEN"
# Generate a temporary compile.h
-( echo /\* This file is auto generated, version $VERSION \*/
+{ echo /\* This file is auto generated, version $VERSION \*/
if [ -n "$CONFIG_FLAGS" ] ; then echo "/* $CONFIG_FLAGS */"; fi
echo \#define UTS_MACHINE \"$ARCH\"
@@ -73,7 +73,7 @@ UTS_TRUNCATE="cut -b -$UTS_LEN"
echo \#define LINUX_COMPILE_HOST \"`echo $LINUX_COMPILE_HOST | $UTS_TRUNCATE`\"
echo \#define LINUX_COMPILER \"`$CC -v 2>&1 | grep ' version ' | sed 's/[[:space:]]*$//'`\"
-) > .tmpcompile
+} > .tmpcompile
# Only replace the real compile.h if the new one is different,
# in order to preserve the timestamp and avoid unnecessary
diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c
index 160718383a71..054405b90ba4 100644
--- a/scripts/mod/devicetable-offsets.c
+++ b/scripts/mod/devicetable-offsets.c
@@ -228,5 +228,8 @@ int main(void)
DEVID(tee_client_device_id);
DEVID_FIELD(tee_client_device_id, uuid);
+ DEVID(wmi_device_id);
+ DEVID_FIELD(wmi_device_id, guid_string);
+
return 0;
}
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index d0e41723627f..e17a29ae2e97 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -40,6 +40,7 @@ typedef struct {
typedef struct {
__u8 b[16];
} uuid_t;
+#define UUID_STRING_LEN 36
/* Big exception to the "don't include kernel headers into userspace, which
* even potentially has different endianness and word sizes, since
@@ -53,6 +54,9 @@ struct devtable {
int (*do_entry)(const char *filename, void *symval, char *alias);
};
+/* Size of alias provided to do_entry functions */
+#define ALIAS_SIZE 500
+
/* Define a variable f that holds the value of field f of struct devid
* based at address m.
*/
@@ -1305,6 +1309,27 @@ static int do_tee_entry(const char *filename, void *symval, char *alias)
return 1;
}
+/* Looks like: wmi:guid */
+static int do_wmi_entry(const char *filename, void *symval, char *alias)
+{
+ int len;
+ DEF_FIELD_ADDR(symval, wmi_device_id, guid_string);
+
+ if (strlen(*guid_string) != UUID_STRING_LEN) {
+ warn("Invalid WMI device id 'wmi:%s' in '%s'\n",
+ *guid_string, filename);
+ return 0;
+ }
+
+ len = snprintf(alias, ALIAS_SIZE, WMI_MODULE_PREFIX "%s", *guid_string);
+ if (len < 0 || len >= ALIAS_SIZE) {
+ warn("Could not generate all MODULE_ALIAS's in '%s'\n",
+ filename);
+ return 0;
+ }
+ return 1;
+}
+
/* Does namelen bytes of name exactly match the symbol? */
static bool sym_is(const char *name, unsigned namelen, const char *symbol)
{
@@ -1321,7 +1346,7 @@ static void do_table(void *symval, unsigned long size,
struct module *mod)
{
unsigned int i;
- char alias[500];
+ char alias[ALIAS_SIZE];
device_id_check(mod->name, device_id, size, id_size, symval);
/* Leave last one: it's the terminator. */
@@ -1376,6 +1401,7 @@ static const struct devtable devtable[] = {
{"tbsvc", SIZE_tb_service_id, do_tbsvc_entry},
{"typec", SIZE_typec_device_id, do_typec_entry},
{"tee", SIZE_tee_client_device_id, do_tee_entry},
+ {"wmi", SIZE_wmi_device_id, do_wmi_entry},
};
/* Create MODULE_ALIAS() statements.
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 26bf886bd168..f277e116e0eb 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -35,7 +35,6 @@ static int vmlinux_section_warnings = 1;
static int warn_unresolved = 0;
/* How a symbol is exported */
static int sec_mismatch_count = 0;
-static int sec_mismatch_verbose = 1;
static int sec_mismatch_fatal = 0;
/* ignore missing files */
static int ignore_missing_files;
@@ -640,7 +639,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
info->sechdrs[sym->st_shndx].sh_offset -
(info->hdr->e_type != ET_REL ?
info->sechdrs[sym->st_shndx].sh_addr : 0);
- crc = *crcp;
+ crc = TO_NATIVE(*crcp);
}
sym_update_crc(symname + strlen("__crc_"), mod, crc,
export);
@@ -1406,8 +1405,6 @@ static void report_sec_mismatch(const char *modname,
char *prl_to;
sec_mismatch_count++;
- if (!sec_mismatch_verbose)
- return;
get_pretty_name(from_is_func, &from, &from_p);
get_pretty_name(to_is_func, &to, &to_p);
@@ -1655,9 +1652,7 @@ static void extable_mismatch_handler(const char* modname, struct elf_info *elf,
sec_mismatch_count++;
- if (sec_mismatch_verbose)
- report_extable_warnings(modname, elf, mismatch, r, sym,
- fromsec, tosec);
+ report_extable_warnings(modname, elf, mismatch, r, sym, fromsec, tosec);
if (match(tosec, mismatch->bad_tosec))
fatal("The relocation at %s+0x%lx references\n"
@@ -2433,7 +2428,7 @@ int main(int argc, char **argv)
struct ext_sym_list *extsym_iter;
struct ext_sym_list *extsym_start = NULL;
- while ((opt = getopt(argc, argv, "i:I:e:mnsST:o:awE")) != -1) {
+ while ((opt = getopt(argc, argv, "i:I:e:mnsT:o:awE")) != -1) {
switch (opt) {
case 'i':
kernel_read = optarg;
@@ -2465,9 +2460,6 @@ int main(int argc, char **argv)
case 's':
vmlinux_section_warnings = 0;
break;
- case 'S':
- sec_mismatch_verbose = 0;
- break;
case 'T':
files_source = optarg;
break;
@@ -2525,18 +2517,9 @@ int main(int argc, char **argv)
}
if (dump_write)
write_dump(dump_write);
- if (sec_mismatch_count) {
- if (!sec_mismatch_verbose) {
- warn("modpost: Found %d section mismatch(es).\n"
- "To see full details build your kernel with:\n"
- "'make CONFIG_DEBUG_SECTION_MISMATCH=y'\n",
- sec_mismatch_count);
- }
- if (sec_mismatch_fatal) {
- fatal("modpost: Section mismatches detected.\n"
- "Set CONFIG_SECTION_MISMATCH_WARN_ONLY=y to allow them.\n");
- }
- }
+ if (sec_mismatch_count && sec_mismatch_fatal)
+ fatal("modpost: Section mismatches detected.\n"
+ "Set CONFIG_SECTION_MISMATCH_WARN_ONLY=y to allow them.\n");
free(buf.p);
return err;
diff --git a/scripts/package/Makefile b/scripts/package/Makefile
index 453fecee62f0..2c6de21e5152 100644
--- a/scripts/package/Makefile
+++ b/scripts/package/Makefile
@@ -59,7 +59,7 @@ rpm-pkg: FORCE
# binrpm-pkg
# ---------------------------------------------------------------------------
binrpm-pkg: FORCE
- $(MAKE) KBUILD_SRC=
+ $(MAKE) -f $(srctree)/Makefile
$(CONFIG_SHELL) $(MKSPEC) prebuilt > $(objtree)/binkernel.spec
+rpmbuild $(RPMOPTS) --define "_builddir $(objtree)" --target \
$(UTS_MACHINE) -bb $(objtree)/binkernel.spec
@@ -72,11 +72,11 @@ deb-pkg: FORCE
$(call cmd,src_tar,$(KDEB_SOURCENAME))
origversion=$$(dpkg-parsechangelog -SVersion |sed 's/-[^-]*$$//');\
mv $(KDEB_SOURCENAME).tar.gz ../$(KDEB_SOURCENAME)_$${origversion}.orig.tar.gz
- +dpkg-buildpackage -r$(KBUILD_PKG_ROOTCMD) -a$$(cat debian/arch) -i.git -us -uc
+ +dpkg-buildpackage -r$(KBUILD_PKG_ROOTCMD) -a$$(cat debian/arch) $(DPKG_FLAGS) -i.git -us -uc
bindeb-pkg: FORCE
$(CONFIG_SHELL) $(srctree)/scripts/package/mkdebian
- +dpkg-buildpackage -r$(KBUILD_PKG_ROOTCMD) -a$$(cat debian/arch) -b -nc -uc
+ +dpkg-buildpackage -r$(KBUILD_PKG_ROOTCMD) -a$$(cat debian/arch) $(DPKG_FLAGS) -b -nc -uc
intdeb-pkg: FORCE
+$(CONFIG_SHELL) $(srctree)/scripts/package/builddeb
@@ -102,7 +102,7 @@ clean-dirs += $(objtree)/snap/
# tarball targets
# ---------------------------------------------------------------------------
tar%pkg: FORCE
- $(MAKE) KBUILD_SRC=
+ $(MAKE) -f $(srctree)/Makefile
$(CONFIG_SHELL) $(srctree)/scripts/package/buildtar $@
clean-dirs += $(objtree)/tar-install/
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index f43a274f4f1d..b03dd56a4782 100755
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -12,6 +12,18 @@
set -e
+is_enabled() {
+ grep -q "^$1=y" include/config/auto.conf
+}
+
+if_enabled_echo() {
+ if is_enabled "$1"; then
+ echo -n "$2"
+ elif [ $# -ge 3 ]; then
+ echo -n "$3"
+ fi
+}
+
create_package() {
local pname="$1" pdir="$2"
@@ -62,7 +74,7 @@ parisc|mips|powerpc)
installed_image_path="boot/vmlinuz-$version"
esac
-BUILD_DEBUG="$(grep -s '^CONFIG_DEBUG_INFO=y' $KCONFIG_CONFIG || true)"
+BUILD_DEBUG=$(if_enabled_echo CONFIG_DEBUG_INFO Yes)
# Setup the directory structure
rm -rf "$tmpdir" "$kernel_headers_dir" "$libc_headers_dir" "$dbg_dir" $objtree/debian/files
@@ -83,15 +95,15 @@ else
fi
cp "$($MAKE -s -f $srctree/Makefile image_name)" "$tmpdir/$installed_image_path"
-if grep -q "^CONFIG_OF_EARLY_FLATTREE=y" $KCONFIG_CONFIG ; then
+if is_enabled CONFIG_OF_EARLY_FLATTREE; then
# Only some architectures with OF support have this target
if [ -d "${srctree}/arch/$SRCARCH/boot/dts" ]; then
- $MAKE KBUILD_SRC= INSTALL_DTBS_PATH="$tmpdir/usr/lib/$packagename" dtbs_install
+ $MAKE -f $srctree/Makefile INSTALL_DTBS_PATH="$tmpdir/usr/lib/$packagename" dtbs_install
fi
fi
-if grep -q '^CONFIG_MODULES=y' $KCONFIG_CONFIG ; then
- INSTALL_MOD_PATH="$tmpdir" $MAKE KBUILD_SRC= modules_install
+if is_enabled CONFIG_MODULES; then
+ INSTALL_MOD_PATH="$tmpdir" $MAKE -f $srctree/Makefile modules_install
rm -f "$tmpdir/lib/modules/$version/build"
rm -f "$tmpdir/lib/modules/$version/source"
if [ "$ARCH" = "um" ] ; then
@@ -111,16 +123,15 @@ if grep -q '^CONFIG_MODULES=y' $KCONFIG_CONFIG ; then
done
# resign stripped modules
- MODULE_SIG_ALL="$(grep -s '^CONFIG_MODULE_SIG_ALL=y' $KCONFIG_CONFIG || true)"
- if [ -n "$MODULE_SIG_ALL" ]; then
- INSTALL_MOD_PATH="$tmpdir" $MAKE KBUILD_SRC= modules_sign
+ if is_enabled CONFIG_MODULE_SIG_ALL; then
+ INSTALL_MOD_PATH="$tmpdir" $MAKE -f $srctree/Makefile modules_sign
fi
fi
fi
if [ "$ARCH" != "um" ]; then
- $MAKE headers_check KBUILD_SRC=
- $MAKE headers_install KBUILD_SRC= INSTALL_HDR_PATH="$libc_headers_dir/usr"
+ $MAKE -f $srctree/Makefile headers_check
+ $MAKE -f $srctree/Makefile headers_install INSTALL_HDR_PATH="$libc_headers_dir/usr"
fi
# Install the maintainer scripts
@@ -129,11 +140,6 @@ fi
# make-kpkg sets $INITRD to indicate whether an initramfs is wanted, and
# so do we; recent versions of dracut and initramfs-tools will obey this.
debhookdir=${KDEB_HOOKDIR:-/etc/kernel}
-if grep -q '^CONFIG_BLK_DEV_INITRD=y' $KCONFIG_CONFIG; then
- want_initrd=Yes
-else
- want_initrd=No
-fi
for script in postinst postrm preinst prerm ; do
mkdir -p "$tmpdir$debhookdir/$script.d"
cat <<EOF > "$tmpdir/DEBIAN/$script"
@@ -145,7 +151,7 @@ set -e
export DEB_MAINT_PARAMS="\$*"
# Tell initramfs builder whether it's wanted
-export INITRD=$want_initrd
+export INITRD=$(if_enabled_echo CONFIG_BLK_DEV_INITRD Yes No)
test -d $debhookdir/$script.d && run-parts --arg="$version" --arg="/$installed_image_path" $debhookdir/$script.d
exit 0
@@ -158,11 +164,11 @@ done
(cd $srctree; find arch/*/include include scripts -type f -o -type l) >> "$objtree/debian/hdrsrcfiles"
(cd $srctree; find arch/$SRCARCH -name module.lds -o -name Kbuild.platforms -o -name Platform) >> "$objtree/debian/hdrsrcfiles"
(cd $srctree; find $(find arch/$SRCARCH -name include -o -name scripts -type d) -type f) >> "$objtree/debian/hdrsrcfiles"
-if grep -q '^CONFIG_STACK_VALIDATION=y' $KCONFIG_CONFIG ; then
+if is_enabled CONFIG_STACK_VALIDATION; then
(cd $objtree; find tools/objtool -type f -executable) >> "$objtree/debian/hdrobjfiles"
fi
(cd $objtree; find arch/$SRCARCH/include Module.symvers include scripts -type f) >> "$objtree/debian/hdrobjfiles"
-if grep -q '^CONFIG_GCC_PLUGINS=y' $KCONFIG_CONFIG ; then
+if is_enabled CONFIG_GCC_PLUGINS; then
(cd $objtree; find scripts/gcc-plugins -name \*.so -o -name gcc-common.h) >> "$objtree/debian/hdrobjfiles"
fi
destdir=$kernel_headers_dir/usr/src/linux-headers-$version
diff --git a/scripts/package/buildtar b/scripts/package/buildtar
index d624a07a4e77..2f66c81e4021 100755
--- a/scripts/package/buildtar
+++ b/scripts/package/buildtar
@@ -56,8 +56,8 @@ dirs=boot
#
# Try to install modules
#
-if grep -q '^CONFIG_MODULES=y' "${KCONFIG_CONFIG}"; then
- make ARCH="${ARCH}" O="${objtree}" KBUILD_SRC= INSTALL_MOD_PATH="${tmpdir}" modules_install
+if grep -q '^CONFIG_MODULES=y' include/config/auto.conf; then
+ make ARCH="${ARCH}" -f ${srctree}/Makefile INSTALL_MOD_PATH="${tmpdir}" modules_install
dirs="$dirs lib"
fi
diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian
index edcad61fe3cd..8351584cb24e 100755
--- a/scripts/package/mkdebian
+++ b/scripts/package/mkdebian
@@ -7,7 +7,7 @@
set -e
is_enabled() {
- grep -q "^CONFIG_$1=y" $KCONFIG_CONFIG
+ grep -q "^$1=y" include/config/auto.conf
}
if_enabled_echo() {
@@ -31,23 +31,23 @@ set_debarch() {
x86_64)
debarch=amd64 ;;
sparc*)
- debarch=sparc$(if_enabled_echo 64BIT 64) ;;
+ debarch=sparc$(if_enabled_echo CONFIG_64BIT 64) ;;
s390*)
debarch=s390x ;;
ppc*)
- if is_enabled 64BIT; then
- debarch=ppc64$(if_enabled_echo CPU_LITTLE_ENDIAN el)
+ if is_enabled CONFIG_64BIT; then
+ debarch=ppc64$(if_enabled_echo CONFIG_CPU_LITTLE_ENDIAN el)
else
- debarch=powerpc$(if_enabled_echo SPE spe)
+ debarch=powerpc$(if_enabled_echo CONFIG_SPE spe)
fi
;;
parisc*)
debarch=hppa ;;
mips*)
- if is_enabled CPU_LITTLE_ENDIAN; then
- debarch=mips$(if_enabled_echo 64BIT 64)$(if_enabled_echo CPU_MIPSR6 r6)el
- elif is_enabled CPU_MIPSR6; then
- debarch=mips$(if_enabled_echo 64BIT 64)r6
+ if is_enabled CONFIG_CPU_LITTLE_ENDIAN; then
+ debarch=mips$(if_enabled_echo CONFIG_64BIT 64)$(if_enabled_echo CONFIG_CPU_MIPSR6 r6)el
+ elif is_enabled CONFIG_CPU_MIPSR6; then
+ debarch=mips$(if_enabled_echo CONFIG_64BIT 64)r6
else
debarch=mips
fi
@@ -55,8 +55,8 @@ set_debarch() {
aarch64|arm64)
debarch=arm64 ;;
arm*)
- if is_enabled AEABI; then
- debarch=arm$(if_enabled_echo VFP hf el)
+ if is_enabled CONFIG_AEABI; then
+ debarch=arm$(if_enabled_echo CONFIG_VFP hf el)
else
debarch=arm
fi
@@ -64,10 +64,10 @@ set_debarch() {
openrisc)
debarch=or1k ;;
sh)
- if is_enabled CPU_SH3; then
- debarch=sh3$(if_enabled_echo CPU_BIG_ENDIAN eb)
- elif is_enabled CPU_SH4; then
- debarch=sh4$(if_enabled_echo CPU_BIG_ENDIAN eb)
+ if is_enabled CONFIG_CPU_SH3; then
+ debarch=sh3$(if_enabled_echo CONFIG_CPU_BIG_ENDIAN eb)
+ elif is_enabled CONFIG_CPU_SH4; then
+ debarch=sh4$(if_enabled_echo CONFIG_CPU_BIG_ENDIAN eb)
fi
;;
esac
@@ -132,8 +132,12 @@ else
echo >&2 "Install lsb-release or set \$KDEB_CHANGELOG_DIST explicitly"
fi
-mkdir -p debian/
+mkdir -p debian/source/
+echo "1.0" > debian/source/format
+
echo $debarch > debian/arch
+extra_build_depends=", $(if_enabled_echo CONFIG_UNWINDER_ORC libelf-dev)"
+extra_build_depends="$extra_build_depends, $(if_enabled_echo CONFIG_SYSTEM_TRUSTED_KEYRING libssl-dev:native)"
# Generate a simple changelog template
cat <<EOF > debian/changelog
@@ -170,7 +174,7 @@ Source: $sourcename
Section: kernel
Priority: optional
Maintainer: $maintainer
-Build-Depends: bc, kmod, cpio
+Build-Depends: bc, kmod, cpio, bison, flex | flex:native $extra_build_depends
Homepage: http://www.kernel.org/
Package: $packagename
@@ -205,13 +209,15 @@ EOF
cat <<EOF > debian/rules
#!$(command -v $MAKE) -f
+srctree ?= .
+
build:
\$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} \
- KBUILD_BUILD_VERSION=${revision} KBUILD_SRC=
+ KBUILD_BUILD_VERSION=${revision} -f \$(srctree)/Makefile
binary-arch:
\$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} \
- KBUILD_BUILD_VERSION=${revision} KBUILD_SRC= intdeb-pkg
+ KBUILD_BUILD_VERSION=${revision} -f \$(srctree)/Makefile intdeb-pkg
clean:
rm -rf debian/*tmp debian/files
@@ -219,5 +225,6 @@ clean:
binary: binary-arch
EOF
+chmod +x debian/rules
exit 0
diff --git a/scripts/selinux/genheaders/genheaders.c b/scripts/selinux/genheaders/genheaders.c
index 1ceedea847dd..544ca126a8a8 100644
--- a/scripts/selinux/genheaders/genheaders.c
+++ b/scripts/selinux/genheaders/genheaders.c
@@ -9,7 +9,6 @@
#include <string.h>
#include <errno.h>
#include <ctype.h>
-#include <sys/socket.h>
struct security_class_mapping {
const char *name;
diff --git a/scripts/selinux/mdp/mdp.c b/scripts/selinux/mdp/mdp.c
index 073fe7537f6c..6d51b74bc679 100644
--- a/scripts/selinux/mdp/mdp.c
+++ b/scripts/selinux/mdp/mdp.c
@@ -32,7 +32,6 @@
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
-#include <sys/socket.h>
static void usage(char *name)
{
diff --git a/scripts/spdxcheck.py b/scripts/spdxcheck.py
index e559c6294c39..4fe392e507fb 100755
--- a/scripts/spdxcheck.py
+++ b/scripts/spdxcheck.py
@@ -175,7 +175,13 @@ class id_parser(object):
self.lines_checked += 1
if line.find("SPDX-License-Identifier:") < 0:
continue
- expr = line.split(':')[1].replace('*/', '').strip()
+ expr = line.split(':')[1].strip()
+ # Remove trailing comment closure
+ if line.strip().endswith('*/'):
+ expr = expr.rstrip('*/').strip()
+ # Special case for SH magic boot code files
+ if line.startswith('LIST \"'):
+ expr = expr.rstrip('\"').strip()
self.parse(expr)
self.spdx_valid += 1
#
diff --git a/security/Kconfig b/security/Kconfig
index 1d6463fb1450..353cfef71d4e 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -239,8 +239,46 @@ source "security/safesetid/Kconfig"
source "security/integrity/Kconfig"
+choice
+ prompt "First legacy 'major LSM' to be initialized"
+ default DEFAULT_SECURITY_SELINUX if SECURITY_SELINUX
+ default DEFAULT_SECURITY_SMACK if SECURITY_SMACK
+ default DEFAULT_SECURITY_TOMOYO if SECURITY_TOMOYO
+ default DEFAULT_SECURITY_APPARMOR if SECURITY_APPARMOR
+ default DEFAULT_SECURITY_DAC
+
+ help
+ This choice is there only for converting CONFIG_DEFAULT_SECURITY
+ in old kernel configs to CONFIG_LSM in new kernel configs. Don't
+ change this choice unless you are creating a fresh kernel config,
+ for this choice will be ignored after CONFIG_LSM has been set.
+
+ Selects the legacy "major security module" that will be
+ initialized first. Overridden by non-default CONFIG_LSM.
+
+ config DEFAULT_SECURITY_SELINUX
+ bool "SELinux" if SECURITY_SELINUX=y
+
+ config DEFAULT_SECURITY_SMACK
+ bool "Simplified Mandatory Access Control" if SECURITY_SMACK=y
+
+ config DEFAULT_SECURITY_TOMOYO
+ bool "TOMOYO" if SECURITY_TOMOYO=y
+
+ config DEFAULT_SECURITY_APPARMOR
+ bool "AppArmor" if SECURITY_APPARMOR=y
+
+ config DEFAULT_SECURITY_DAC
+ bool "Unix Discretionary Access Controls"
+
+endchoice
+
config LSM
string "Ordered list of enabled LSMs"
+ default "yama,loadpin,safesetid,integrity,smack,selinux,tomoyo,apparmor" if DEFAULT_SECURITY_SMACK
+ default "yama,loadpin,safesetid,integrity,apparmor,selinux,smack,tomoyo" if DEFAULT_SECURITY_APPARMOR
+ default "yama,loadpin,safesetid,integrity,tomoyo" if DEFAULT_SECURITY_TOMOYO
+ default "yama,loadpin,safesetid,integrity" if DEFAULT_SECURITY_DAC
default "yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor"
help
A comma-separated list of LSMs, in initialization order.
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index 3f80a684c232..b9298d2e8165 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -123,17 +123,22 @@ static int aafs_show_path(struct seq_file *seq, struct dentry *dentry)
return 0;
}
-static void aafs_evict_inode(struct inode *inode)
+static void aafs_i_callback(struct rcu_head *head)
{
- truncate_inode_pages_final(&inode->i_data);
- clear_inode(inode);
+ struct inode *inode = container_of(head, struct inode, i_rcu);
if (S_ISLNK(inode->i_mode))
kfree(inode->i_link);
+ free_inode_nonrcu(inode);
+}
+
+static void aafs_destroy_inode(struct inode *inode)
+{
+ call_rcu(&inode->i_rcu, aafs_i_callback);
}
static const struct super_operations aafs_super_ops = {
.statfs = simple_statfs,
- .evict_inode = aafs_evict_inode,
+ .destroy_inode = aafs_destroy_inode,
.show_path = aafs_show_path,
};
@@ -356,6 +361,7 @@ static void aafs_remove(struct dentry *dentry)
simple_rmdir(dir, dentry);
else
simple_unlink(dir, dentry);
+ d_delete(dentry);
dput(dentry);
}
inode_unlock(dir);
diff --git a/security/apparmor/crypto.c b/security/apparmor/crypto.c
index af03d98c7552..baba63bc66b1 100644
--- a/security/apparmor/crypto.c
+++ b/security/apparmor/crypto.c
@@ -43,7 +43,6 @@ char *aa_calc_hash(void *data, size_t len)
goto fail;
desc->tfm = apparmor_tfm;
- desc->flags = 0;
error = crypto_shash_init(desc);
if (error)
@@ -81,7 +80,6 @@ int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start,
goto fail;
desc->tfm = apparmor_tfm;
- desc->flags = 0;
error = crypto_shash_init(desc);
if (error)
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 49d664ddff44..87500bde5a92 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -1336,9 +1336,16 @@ module_param_named(path_max, aa_g_path_max, aauint, S_IRUSR);
bool aa_g_paranoid_load = true;
module_param_named(paranoid_load, aa_g_paranoid_load, aabool, S_IRUGO);
+static int param_get_aaintbool(char *buffer, const struct kernel_param *kp);
+static int param_set_aaintbool(const char *val, const struct kernel_param *kp);
+#define param_check_aaintbool param_check_int
+static const struct kernel_param_ops param_ops_aaintbool = {
+ .set = param_set_aaintbool,
+ .get = param_get_aaintbool
+};
/* Boot time disable flag */
static int apparmor_enabled __lsm_ro_after_init = 1;
-module_param_named(enabled, apparmor_enabled, int, 0444);
+module_param_named(enabled, apparmor_enabled, aaintbool, 0444);
static int __init apparmor_enabled_setup(char *str)
{
@@ -1413,6 +1420,46 @@ static int param_get_aauint(char *buffer, const struct kernel_param *kp)
return param_get_uint(buffer, kp);
}
+/* Can only be set before AppArmor is initialized (i.e. on boot cmdline). */
+static int param_set_aaintbool(const char *val, const struct kernel_param *kp)
+{
+ struct kernel_param kp_local;
+ bool value;
+ int error;
+
+ if (apparmor_initialized)
+ return -EPERM;
+
+ /* Create local copy, with arg pointing to bool type. */
+ value = !!*((int *)kp->arg);
+ memcpy(&kp_local, kp, sizeof(kp_local));
+ kp_local.arg = &value;
+
+ error = param_set_bool(val, &kp_local);
+ if (!error)
+ *((int *)kp->arg) = *((bool *)kp_local.arg);
+ return error;
+}
+
+/*
+ * To avoid changing /sys/module/apparmor/parameters/enabled from Y/N to
+ * 1/0, this converts the "int that is actually bool" back to bool for
+ * display in the /sys filesystem, while keeping it "int" for the LSM
+ * infrastructure.
+ */
+static int param_get_aaintbool(char *buffer, const struct kernel_param *kp)
+{
+ struct kernel_param kp_local;
+ bool value;
+
+ /* Create local copy, with arg pointing to bool type. */
+ value = !!*((int *)kp->arg);
+ memcpy(&kp_local, kp, sizeof(kp_local));
+ kp_local.arg = &value;
+
+ return param_get_bool(buffer, &kp_local);
+}
+
static int param_get_audit(char *buffer, const struct kernel_param *kp)
{
if (!apparmor_enabled)
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index 379682e2a8d5..f6c2bcb2ab14 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -579,6 +579,7 @@ fail:
kfree(profile->secmark[i].label);
kfree(profile->secmark);
profile->secmark_count = 0;
+ profile->secmark = NULL;
}
e->pos = pos;
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index cd97929fac66..dc28914fa72e 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -560,7 +560,7 @@ static int propagate_exception(struct dev_cgroup *devcg_root,
devcg->behavior == DEVCG_DEFAULT_ALLOW) {
rc = dev_exception_add(devcg, ex);
if (rc)
- break;
+ return rc;
} else {
/*
* in the other possible cases:
diff --git a/security/inode.c b/security/inode.c
index b7772a9b315e..421dd72b5876 100644
--- a/security/inode.c
+++ b/security/inode.c
@@ -27,17 +27,22 @@
static struct vfsmount *mount;
static int mount_count;
-static void securityfs_evict_inode(struct inode *inode)
+static void securityfs_i_callback(struct rcu_head *head)
{
- truncate_inode_pages_final(&inode->i_data);
- clear_inode(inode);
+ struct inode *inode = container_of(head, struct inode, i_rcu);
if (S_ISLNK(inode->i_mode))
kfree(inode->i_link);
+ free_inode_nonrcu(inode);
+}
+
+static void securityfs_destroy_inode(struct inode *inode)
+{
+ call_rcu(&inode->i_rcu, securityfs_i_callback);
}
static const struct super_operations securityfs_super_operations = {
.statfs = simple_statfs,
- .evict_inode = securityfs_evict_inode,
+ .destroy_inode = securityfs_destroy_inode,
};
static int fill_super(struct super_block *sb, void *data, int silent)
diff --git a/security/integrity/Kconfig b/security/integrity/Kconfig
index 2ea4ec9991d5..3ba1168b1756 100644
--- a/security/integrity/Kconfig
+++ b/security/integrity/Kconfig
@@ -55,13 +55,22 @@ config INTEGRITY_PLATFORM_KEYRING
bool "Provide keyring for platform/firmware trusted keys"
depends on INTEGRITY_ASYMMETRIC_KEYS
depends on SYSTEM_BLACKLIST_KEYRING
- depends on EFI
help
Provide a separate, distinct keyring for platform trusted keys, which
the kernel automatically populates during initialization from values
provided by the platform for verifying the kexec'ed kerned image
and, possibly, the initramfs signature.
+config LOAD_UEFI_KEYS
+ depends on INTEGRITY_PLATFORM_KEYRING
+ depends on EFI
+ def_bool y
+
+config LOAD_IPL_KEYS
+ depends on INTEGRITY_PLATFORM_KEYRING
+ depends on S390
+ def_bool y
+
config INTEGRITY_AUDIT
bool "Enables integrity auditing support "
depends on AUDIT
diff --git a/security/integrity/Makefile b/security/integrity/Makefile
index 86df9aba8c0f..19faace69644 100644
--- a/security/integrity/Makefile
+++ b/security/integrity/Makefile
@@ -9,10 +9,10 @@ integrity-y := iint.o
integrity-$(CONFIG_INTEGRITY_AUDIT) += integrity_audit.o
integrity-$(CONFIG_INTEGRITY_SIGNATURE) += digsig.o
integrity-$(CONFIG_INTEGRITY_ASYMMETRIC_KEYS) += digsig_asymmetric.o
-integrity-$(CONFIG_INTEGRITY_PLATFORM_KEYRING) += platform_certs/platform_keyring.o \
- platform_certs/efi_parser.o \
- platform_certs/load_uefi.o
-obj-$(CONFIG_LOAD_UEFI_KEYS) += platform_certs/load_uefi.o
+integrity-$(CONFIG_INTEGRITY_PLATFORM_KEYRING) += platform_certs/platform_keyring.o
+integrity-$(CONFIG_LOAD_UEFI_KEYS) += platform_certs/efi_parser.o \
+ platform_certs/load_uefi.o
+integrity-$(CONFIG_LOAD_IPL_KEYS) += platform_certs/load_ipl_s390.o
$(obj)/load_uefi.o: KBUILD_CFLAGS += -fshort-wchar
subdir-$(CONFIG_IMA) += ima
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
index f45d6edecf99..e19c2eb72c51 100644
--- a/security/integrity/digsig.c
+++ b/security/integrity/digsig.c
@@ -87,6 +87,9 @@ static int __integrity_init_keyring(const unsigned int id, key_perm_t perm,
pr_info("Can't allocate %s keyring (%d)\n",
keyring_name[id], err);
keyring[id] = NULL;
+ } else {
+ if (id == INTEGRITY_KEYRING_PLATFORM)
+ set_platform_trusted_keys(keyring[id]);
}
return err;
diff --git a/security/integrity/digsig_asymmetric.c b/security/integrity/digsig_asymmetric.c
index d775e03fbbcc..99080871eb9f 100644
--- a/security/integrity/digsig_asymmetric.c
+++ b/security/integrity/digsig_asymmetric.c
@@ -104,9 +104,16 @@ int asymmetric_verify(struct key *keyring, const char *sig,
memset(&pks, 0, sizeof(pks));
- pks.pkey_algo = "rsa";
pks.hash_algo = hash_algo_name[hdr->hash_algo];
- pks.encoding = "pkcs1";
+ if (hdr->hash_algo == HASH_ALGO_STREEBOG_256 ||
+ hdr->hash_algo == HASH_ALGO_STREEBOG_512) {
+ /* EC-RDSA and Streebog should go together. */
+ pks.pkey_algo = "ecrdsa";
+ pks.encoding = "raw";
+ } else {
+ pks.pkey_algo = "rsa";
+ pks.encoding = "pkcs1";
+ }
pks.digest = (u8 *)data;
pks.digest_size = datalen;
pks.s = hdr->sig;
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index 43e2dc3a60d0..e11564eb645b 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -124,7 +124,6 @@ out:
return ERR_PTR(-ENOMEM);
desc->tfm = *tfm;
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
rc = crypto_shash_init(desc);
if (rc) {
@@ -173,8 +172,7 @@ static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof(hmac_misc));
if ((evm_hmac_attrs & EVM_ATTR_FSUUID) &&
type != EVM_XATTR_PORTABLE_DIGSIG)
- crypto_shash_update(desc, &inode->i_sb->s_uuid.b[0],
- sizeof(inode->i_sb->s_uuid));
+ crypto_shash_update(desc, (u8 *)&inode->i_sb->s_uuid, UUID_SIZE);
crypto_shash_final(desc, digest);
}
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index 5ecaa3d6fe0b..b6d9f14bc234 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -563,7 +563,6 @@ static int __init init_evm(void)
{
int error;
struct list_head *pos, *q;
- struct xattr_list *xattr;
evm_init_config();
@@ -580,11 +579,8 @@ static int __init init_evm(void)
error:
if (error != 0) {
if (!list_empty(&evm_config_xattrnames)) {
- list_for_each_safe(pos, q, &evm_config_xattrnames) {
- xattr = list_entry(pos, struct xattr_list,
- list);
+ list_for_each_safe(pos, q, &evm_config_xattrnames)
list_del(pos);
- }
}
}
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 026163f37ba1..d213e835c498 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -153,6 +153,7 @@ int ima_measurements_show(struct seq_file *m, void *v);
unsigned long ima_get_binary_runtime_size(void);
int ima_init_template(void);
void ima_init_template_list(void);
+int __init ima_init_digests(void);
/*
* used to protect h_table and sha_table
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index acf2c7df7145..a32878e10ebc 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -333,7 +333,6 @@ static int ima_calc_file_hash_tfm(struct file *file,
SHASH_DESC_ON_STACK(shash, tfm);
shash->tfm = tfm;
- shash->flags = 0;
hash->length = crypto_shash_digestsize(tfm);
@@ -469,7 +468,6 @@ static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
int rc, i;
shash->tfm = tfm;
- shash->flags = 0;
hash->length = crypto_shash_digestsize(tfm);
@@ -591,7 +589,6 @@ static int calc_buffer_shash_tfm(const void *buf, loff_t size,
int rc;
shash->tfm = tfm;
- shash->flags = 0;
hash->length = crypto_shash_digestsize(tfm);
@@ -643,12 +640,12 @@ int ima_calc_buffer_hash(const void *buf, loff_t len,
return calc_buffer_shash(buf, len, hash);
}
-static void __init ima_pcrread(u32 idx, u8 *pcr)
+static void __init ima_pcrread(u32 idx, struct tpm_digest *d)
{
if (!ima_tpm_chip)
return;
- if (tpm_pcr_read(ima_tpm_chip, idx, pcr) != 0)
+ if (tpm_pcr_read(ima_tpm_chip, idx, d) != 0)
pr_err("Error Communicating to TPM chip\n");
}
@@ -658,13 +655,12 @@ static void __init ima_pcrread(u32 idx, u8 *pcr)
static int __init ima_calc_boot_aggregate_tfm(char *digest,
struct crypto_shash *tfm)
{
- u8 pcr_i[TPM_DIGEST_SIZE];
+ struct tpm_digest d = { .alg_id = TPM_ALG_SHA1, .digest = {0} };
int rc;
u32 i;
SHASH_DESC_ON_STACK(shash, tfm);
shash->tfm = tfm;
- shash->flags = 0;
rc = crypto_shash_init(shash);
if (rc != 0)
@@ -672,9 +668,9 @@ static int __init ima_calc_boot_aggregate_tfm(char *digest,
/* cumulative sha1 over tpm registers 0-7 */
for (i = TPM_PCR0; i < TPM_PCR8; i++) {
- ima_pcrread(i, pcr_i);
+ ima_pcrread(i, &d);
/* now accumulate with current aggregate */
- rc = crypto_shash_update(shash, pcr_i, TPM_DIGEST_SIZE);
+ rc = crypto_shash_update(shash, d.digest, TPM_DIGEST_SIZE);
}
if (!rc)
crypto_shash_final(shash, digest);
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
index 6bb42a9c5e47..6c9295449751 100644
--- a/security/integrity/ima/ima_init.c
+++ b/security/integrity/ima/ima_init.c
@@ -123,8 +123,12 @@ int __init ima_init(void)
if (rc != 0)
return rc;
+ /* It can be called before ima_init_digests(), it does not use TPM. */
ima_load_kexec_buffer();
+ rc = ima_init_digests();
+ if (rc != 0)
+ return rc;
rc = ima_add_boot_aggregate(); /* boot aggregate must be first entry */
if (rc != 0)
return rc;
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 4ffac4f5c647..357edd140c09 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -397,6 +397,33 @@ int ima_file_check(struct file *file, int mask)
EXPORT_SYMBOL_GPL(ima_file_check);
/**
+ * ima_post_create_tmpfile - mark newly created tmpfile as new
+ * @file : newly created tmpfile
+ *
+ * No measuring, appraising or auditing of newly created tmpfiles is needed.
+ * Skip calling process_measurement(), but indicate which newly, created
+ * tmpfiles are in policy.
+ */
+void ima_post_create_tmpfile(struct inode *inode)
+{
+ struct integrity_iint_cache *iint;
+ int must_appraise;
+
+ must_appraise = ima_must_appraise(inode, MAY_ACCESS, FILE_CHECK);
+ if (!must_appraise)
+ return;
+
+ /* Nothing to do if we can't allocate memory */
+ iint = integrity_inode_get(inode);
+ if (!iint)
+ return;
+
+ /* needed for writing the security xattrs */
+ set_bit(IMA_UPDATE_XATTR, &iint->atomic_flags);
+ iint->ima_file_status = INTEGRITY_PASS;
+}
+
+/**
* ima_post_path_mknod - mark as a new inode
* @dentry: newly created dentry
*
@@ -413,9 +440,13 @@ void ima_post_path_mknod(struct dentry *dentry)
if (!must_appraise)
return;
+ /* Nothing to do if we can't allocate memory */
iint = integrity_inode_get(inode);
- if (iint)
- iint->flags |= IMA_NEW_FILE;
+ if (!iint)
+ return;
+
+ /* needed for re-opening empty files */
+ iint->flags |= IMA_NEW_FILE;
}
/**
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
index 0e41dc1df1d4..6b6d044e0440 100644
--- a/security/integrity/ima/ima_queue.c
+++ b/security/integrity/ima/ima_queue.c
@@ -27,6 +27,9 @@
#define AUDIT_CAUSE_LEN_MAX 32
+/* pre-allocated array of tpm_digest structures to extend a PCR */
+static struct tpm_digest *digests;
+
LIST_HEAD(ima_measurements); /* list of all measurements */
#ifdef CONFIG_IMA_KEXEC
static unsigned long binary_runtime_size;
@@ -140,11 +143,15 @@ unsigned long ima_get_binary_runtime_size(void)
static int ima_pcr_extend(const u8 *hash, int pcr)
{
int result = 0;
+ int i;
if (!ima_tpm_chip)
return result;
- result = tpm_pcr_extend(ima_tpm_chip, pcr, hash);
+ for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++)
+ memcpy(digests[i].digest, hash, TPM_DIGEST_SIZE);
+
+ result = tpm_pcr_extend(ima_tpm_chip, pcr, digests);
if (result != 0)
pr_err("Error Communicating to TPM chip, result: %d\n", result);
return result;
@@ -211,3 +218,21 @@ int ima_restore_measurement_entry(struct ima_template_entry *entry)
mutex_unlock(&ima_extend_list_mutex);
return result;
}
+
+int __init ima_init_digests(void)
+{
+ int i;
+
+ if (!ima_tpm_chip)
+ return 0;
+
+ digests = kcalloc(ima_tpm_chip->nr_allocated_banks, sizeof(*digests),
+ GFP_NOFS);
+ if (!digests)
+ return -ENOMEM;
+
+ for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++)
+ digests[i].alg_id = ima_tpm_chip->allocated_banks[i].alg_id;
+
+ return 0;
+}
diff --git a/security/integrity/platform_certs/load_ipl_s390.c b/security/integrity/platform_certs/load_ipl_s390.c
new file mode 100644
index 000000000000..e769dcb7ea94
--- /dev/null
+++ b/security/integrity/platform_certs/load_ipl_s390.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cred.h>
+#include <linux/err.h>
+#include <linux/efi.h>
+#include <linux/slab.h>
+#include <keys/asymmetric-type.h>
+#include <keys/system_keyring.h>
+#include <asm/boot_data.h>
+#include "../integrity.h"
+
+/*
+ * Load the certs contained in the IPL report created by the machine loader
+ * into the platform trusted keyring.
+ */
+static int __init load_ipl_certs(void)
+{
+ void *ptr, *end;
+ unsigned int len;
+
+ if (!ipl_cert_list_addr)
+ return 0;
+ /* Copy the certificates to the system keyring */
+ ptr = (void *) ipl_cert_list_addr;
+ end = ptr + ipl_cert_list_size;
+ while ((void *) ptr < end) {
+ len = *(unsigned int *) ptr;
+ ptr += sizeof(unsigned int);
+ add_to_platform_keyring("IPL:db", ptr, len);
+ ptr += len;
+ }
+ return 0;
+}
+late_initcall(load_ipl_certs);
diff --git a/security/keys/dh.c b/security/keys/dh.c
index 711e89d8c415..23f95dec771b 100644
--- a/security/keys/dh.c
+++ b/security/keys/dh.c
@@ -112,7 +112,6 @@ static int kdf_alloc(struct kdf_sdesc **sdesc_ret, char *hashname)
if (!sdesc)
goto out_free_tfm;
sdesc->shash.tfm = tfm;
- sdesc->shash.flags = 0x0;
*sdesc_ret = sdesc;
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index 389a298274d3..1b1456b21a93 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -60,11 +60,11 @@ static int blksize;
static struct crypto_shash *hash_tfm;
enum {
- Opt_err = -1, Opt_new, Opt_load, Opt_update
+ Opt_new, Opt_load, Opt_update, Opt_err
};
enum {
- Opt_error = -1, Opt_default, Opt_ecryptfs, Opt_enc32
+ Opt_default, Opt_ecryptfs, Opt_enc32, Opt_error
};
static const match_table_t key_format_tokens = {
@@ -333,7 +333,6 @@ static int calc_hash(struct crypto_shash *tfm, u8 *digest,
int err;
desc->tfm = tfm;
- desc->flags = 0;
err = crypto_shash_digest(desc, buf, buflen, digest);
shash_desc_zero(desc);
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 9320424c4a46..f05f7125a7d5 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -58,7 +58,7 @@ int install_user_keyrings(void)
kenter("%p{%u}", user, uid);
- if (user->uid_keyring && user->session_keyring) {
+ if (READ_ONCE(user->uid_keyring) && READ_ONCE(user->session_keyring)) {
kleave(" = 0 [exist]");
return 0;
}
@@ -111,8 +111,10 @@ int install_user_keyrings(void)
}
/* install the keyrings */
- user->uid_keyring = uid_keyring;
- user->session_keyring = session_keyring;
+ /* paired with READ_ONCE() */
+ smp_store_release(&user->uid_keyring, uid_keyring);
+ /* paired with READ_ONCE() */
+ smp_store_release(&user->session_keyring, session_keyring);
}
mutex_unlock(&key_user_keyring_mutex);
@@ -227,6 +229,7 @@ static int install_process_keyring(void)
* Install the given keyring as the session keyring of the given credentials
* struct, replacing the existing one if any. If the given keyring is NULL,
* then install a new anonymous session keyring.
+ * @cred can not be in use by any task yet.
*
* Return: 0 on success; -errno on failure.
*/
@@ -254,7 +257,7 @@ int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
/* install the keyring */
old = cred->session_keyring;
- rcu_assign_pointer(cred->session_keyring, keyring);
+ cred->session_keyring = keyring;
if (old)
key_put(old);
@@ -339,6 +342,7 @@ void key_fsgid_changed(struct task_struct *tsk)
key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx)
{
key_ref_t key_ref, ret, err;
+ const struct cred *cred = ctx->cred;
/* we want to return -EAGAIN or -ENOKEY if any of the keyrings were
* searchable, but we failed to find a key or we found a negative key;
@@ -352,9 +356,9 @@ key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx)
err = ERR_PTR(-EAGAIN);
/* search the thread keyring first */
- if (ctx->cred->thread_keyring) {
+ if (cred->thread_keyring) {
key_ref = keyring_search_aux(
- make_key_ref(ctx->cred->thread_keyring, 1), ctx);
+ make_key_ref(cred->thread_keyring, 1), ctx);
if (!IS_ERR(key_ref))
goto found;
@@ -370,9 +374,9 @@ key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx)
}
/* search the process keyring second */
- if (ctx->cred->process_keyring) {
+ if (cred->process_keyring) {
key_ref = keyring_search_aux(
- make_key_ref(ctx->cred->process_keyring, 1), ctx);
+ make_key_ref(cred->process_keyring, 1), ctx);
if (!IS_ERR(key_ref))
goto found;
@@ -391,12 +395,9 @@ key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx)
}
/* search the session keyring */
- if (ctx->cred->session_keyring) {
- rcu_read_lock();
+ if (cred->session_keyring) {
key_ref = keyring_search_aux(
- make_key_ref(rcu_dereference(ctx->cred->session_keyring), 1),
- ctx);
- rcu_read_unlock();
+ make_key_ref(cred->session_keyring, 1), ctx);
if (!IS_ERR(key_ref))
goto found;
@@ -415,9 +416,9 @@ key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx)
}
}
/* or search the user-session keyring */
- else if (ctx->cred->user->session_keyring) {
+ else if (READ_ONCE(cred->user->session_keyring)) {
key_ref = keyring_search_aux(
- make_key_ref(ctx->cred->user->session_keyring, 1),
+ make_key_ref(READ_ONCE(cred->user->session_keyring), 1),
ctx);
if (!IS_ERR(key_ref))
goto found;
@@ -604,7 +605,7 @@ try_again:
goto error;
goto reget_creds;
} else if (ctx.cred->session_keyring ==
- ctx.cred->user->session_keyring &&
+ READ_ONCE(ctx.cred->user->session_keyring) &&
lflags & KEY_LOOKUP_CREATE) {
ret = join_session_keyring(NULL);
if (ret < 0)
@@ -612,15 +613,13 @@ try_again:
goto reget_creds;
}
- rcu_read_lock();
- key = rcu_dereference(ctx.cred->session_keyring);
+ key = ctx.cred->session_keyring;
__key_get(key);
- rcu_read_unlock();
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_USER_KEYRING:
- if (!ctx.cred->user->uid_keyring) {
+ if (!READ_ONCE(ctx.cred->user->uid_keyring)) {
ret = install_user_keyrings();
if (ret < 0)
goto error;
@@ -632,7 +631,7 @@ try_again:
break;
case KEY_SPEC_USER_SESSION_KEYRING:
- if (!ctx.cred->user->session_keyring) {
+ if (!READ_ONCE(ctx.cred->user->session_keyring)) {
ret = install_user_keyrings();
if (ret < 0)
goto error;
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 2f17d84d46f1..75d87f9e0f49 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -142,12 +142,10 @@ static int call_sbin_request_key(struct key *authkey, void *aux)
prkey = cred->process_keyring->serial;
sprintf(keyring_str[1], "%d", prkey);
- rcu_read_lock();
- session = rcu_dereference(cred->session_keyring);
+ session = cred->session_keyring;
if (!session)
session = cred->user->session_keyring;
sskey = session->serial;
- rcu_read_unlock();
sprintf(keyring_str[2], "%d", sskey);
@@ -287,10 +285,7 @@ static int construct_get_dest_keyring(struct key **_dest_keyring)
/* fall through */
case KEY_REQKEY_DEFL_SESSION_KEYRING:
- rcu_read_lock();
- dest_keyring = key_get(
- rcu_dereference(cred->session_keyring));
- rcu_read_unlock();
+ dest_keyring = key_get(cred->session_keyring);
if (dest_keyring)
break;
@@ -298,11 +293,12 @@ static int construct_get_dest_keyring(struct key **_dest_keyring)
/* fall through */
case KEY_REQKEY_DEFL_USER_SESSION_KEYRING:
dest_keyring =
- key_get(cred->user->session_keyring);
+ key_get(READ_ONCE(cred->user->session_keyring));
break;
case KEY_REQKEY_DEFL_USER_KEYRING:
- dest_keyring = key_get(cred->user->uid_keyring);
+ dest_keyring =
+ key_get(READ_ONCE(cred->user->uid_keyring));
break;
case KEY_REQKEY_DEFL_GROUP_KEYRING:
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index 4d98f4f87236..a75b2f0f1230 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -34,6 +34,8 @@
static const char hmac_alg[] = "hmac(sha1)";
static const char hash_alg[] = "sha1";
+static struct tpm_chip *chip;
+static struct tpm_digest *digests;
struct sdesc {
struct shash_desc shash;
@@ -53,7 +55,6 @@ static struct sdesc *init_sdesc(struct crypto_shash *alg)
if (!sdesc)
return ERR_PTR(-ENOMEM);
sdesc->shash.tfm = alg;
- sdesc->shash.flags = 0x0;
return sdesc;
}
@@ -123,7 +124,7 @@ out:
*/
int TSS_authhmac(unsigned char *digest, const unsigned char *key,
unsigned int keylen, unsigned char *h1,
- unsigned char *h2, unsigned char h3, ...)
+ unsigned char *h2, unsigned int h3, ...)
{
unsigned char paramdigest[SHA1_DIGEST_SIZE];
struct sdesc *sdesc;
@@ -133,13 +134,16 @@ int TSS_authhmac(unsigned char *digest, const unsigned char *key,
int ret;
va_list argp;
+ if (!chip)
+ return -ENODEV;
+
sdesc = init_sdesc(hashalg);
if (IS_ERR(sdesc)) {
pr_info("trusted_key: can't alloc %s\n", hash_alg);
return PTR_ERR(sdesc);
}
- c = h3;
+ c = !!h3;
ret = crypto_shash_init(&sdesc->shash);
if (ret < 0)
goto out;
@@ -194,6 +198,9 @@ int TSS_checkhmac1(unsigned char *buffer,
va_list argp;
int ret;
+ if (!chip)
+ return -ENODEV;
+
bufsize = LOAD32(buffer, TPM_SIZE_OFFSET);
tag = LOAD16(buffer, 0);
ordinal = command;
@@ -361,8 +368,11 @@ int trusted_tpm_send(unsigned char *cmd, size_t buflen)
{
int rc;
+ if (!chip)
+ return -ENODEV;
+
dump_tpm_buf(cmd);
- rc = tpm_send(NULL, cmd, buflen);
+ rc = tpm_send(chip, cmd, buflen);
dump_tpm_buf(cmd);
if (rc > 0)
/* Can't return positive return codes values to keyctl */
@@ -379,15 +389,10 @@ EXPORT_SYMBOL_GPL(trusted_tpm_send);
*/
static int pcrlock(const int pcrnum)
{
- unsigned char hash[SHA1_DIGEST_SIZE];
- int ret;
-
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- ret = tpm_get_random(NULL, hash, SHA1_DIGEST_SIZE);
- if (ret != SHA1_DIGEST_SIZE)
- return ret;
- return tpm_pcr_extend(NULL, pcrnum, hash) ? -EINVAL : 0;
+
+ return tpm_pcr_extend(chip, pcrnum, digests) ? -EINVAL : 0;
}
/*
@@ -400,7 +405,7 @@ static int osap(struct tpm_buf *tb, struct osapsess *s,
unsigned char ononce[TPM_NONCE_SIZE];
int ret;
- ret = tpm_get_random(NULL, ononce, TPM_NONCE_SIZE);
+ ret = tpm_get_random(chip, ononce, TPM_NONCE_SIZE);
if (ret != TPM_NONCE_SIZE)
return ret;
@@ -432,6 +437,9 @@ int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce)
{
int ret;
+ if (!chip)
+ return -ENODEV;
+
INIT_BUF(tb);
store16(tb, TPM_TAG_RQU_COMMAND);
store32(tb, TPM_OIAP_SIZE);
@@ -496,7 +504,7 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
if (ret < 0)
goto out;
- ret = tpm_get_random(NULL, td->nonceodd, TPM_NONCE_SIZE);
+ ret = tpm_get_random(chip, td->nonceodd, TPM_NONCE_SIZE);
if (ret != TPM_NONCE_SIZE)
goto out;
ordinal = htonl(TPM_ORD_SEAL);
@@ -606,7 +614,7 @@ static int tpm_unseal(struct tpm_buf *tb,
ordinal = htonl(TPM_ORD_UNSEAL);
keyhndl = htonl(SRKHANDLE);
- ret = tpm_get_random(NULL, nonceodd, TPM_NONCE_SIZE);
+ ret = tpm_get_random(chip, nonceodd, TPM_NONCE_SIZE);
if (ret != TPM_NONCE_SIZE) {
pr_info("trusted_key: tpm_get_random failed (%d)\n", ret);
return ret;
@@ -751,7 +759,7 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
int i;
int tpm2;
- tpm2 = tpm_is_tpm2(NULL);
+ tpm2 = tpm_is_tpm2(chip);
if (tpm2 < 0)
return tpm2;
@@ -920,7 +928,7 @@ static struct trusted_key_options *trusted_options_alloc(void)
struct trusted_key_options *options;
int tpm2;
- tpm2 = tpm_is_tpm2(NULL);
+ tpm2 = tpm_is_tpm2(chip);
if (tpm2 < 0)
return NULL;
@@ -970,7 +978,7 @@ static int trusted_instantiate(struct key *key,
size_t key_len;
int tpm2;
- tpm2 = tpm_is_tpm2(NULL);
+ tpm2 = tpm_is_tpm2(chip);
if (tpm2 < 0)
return tpm2;
@@ -1011,7 +1019,7 @@ static int trusted_instantiate(struct key *key,
switch (key_cmd) {
case Opt_load:
if (tpm2)
- ret = tpm_unseal_trusted(NULL, payload, options);
+ ret = tpm_unseal_trusted(chip, payload, options);
else
ret = key_unseal(payload, options);
dump_payload(payload);
@@ -1021,13 +1029,13 @@ static int trusted_instantiate(struct key *key,
break;
case Opt_new:
key_len = payload->key_len;
- ret = tpm_get_random(NULL, payload->key, key_len);
+ ret = tpm_get_random(chip, payload->key, key_len);
if (ret != key_len) {
pr_info("trusted_key: key_create failed (%d)\n", ret);
goto out;
}
if (tpm2)
- ret = tpm_seal_trusted(NULL, payload, options);
+ ret = tpm_seal_trusted(chip, payload, options);
else
ret = key_seal(payload, options);
if (ret < 0)
@@ -1221,23 +1229,67 @@ hashalg_fail:
return ret;
}
+static int __init init_digests(void)
+{
+ u8 digest[TPM_MAX_DIGEST_SIZE];
+ int ret;
+ int i;
+
+ ret = tpm_get_random(chip, digest, TPM_MAX_DIGEST_SIZE);
+ if (ret < 0)
+ return ret;
+ if (ret < TPM_MAX_DIGEST_SIZE)
+ return -EFAULT;
+
+ digests = kcalloc(chip->nr_allocated_banks, sizeof(*digests),
+ GFP_KERNEL);
+ if (!digests)
+ return -ENOMEM;
+
+ for (i = 0; i < chip->nr_allocated_banks; i++)
+ memcpy(digests[i].digest, digest, TPM_MAX_DIGEST_SIZE);
+
+ return 0;
+}
+
static int __init init_trusted(void)
{
int ret;
+ /* encrypted_keys.ko depends on successful load of this module even if
+ * TPM is not used.
+ */
+ chip = tpm_default_chip();
+ if (!chip)
+ return 0;
+
+ ret = init_digests();
+ if (ret < 0)
+ goto err_put;
ret = trusted_shash_alloc();
if (ret < 0)
- return ret;
+ goto err_free;
ret = register_key_type(&key_type_trusted);
if (ret < 0)
- trusted_shash_release();
+ goto err_release;
+ return 0;
+err_release:
+ trusted_shash_release();
+err_free:
+ kfree(digests);
+err_put:
+ put_device(&chip->dev);
return ret;
}
static void __exit cleanup_trusted(void)
{
- trusted_shash_release();
- unregister_key_type(&key_type_trusted);
+ if (chip) {
+ put_device(&chip->dev);
+ kfree(digests);
+ trusted_shash_release();
+ unregister_key_type(&key_type_trusted);
+ }
}
late_initcall(init_trusted);
diff --git a/security/security.c b/security/security.c
index 301b141b9a32..23cbb1a295a3 100644
--- a/security/security.c
+++ b/security/security.c
@@ -764,6 +764,16 @@ void security_bprm_committed_creds(struct linux_binprm *bprm)
call_void_hook(bprm_committed_creds, bprm);
}
+int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc)
+{
+ return call_int_hook(fs_context_dup, 0, fc, src_fc);
+}
+
+int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ return call_int_hook(fs_context_parse_param, -ENOPARAM, fc, param);
+}
+
int security_sb_alloc(struct super_block *sb)
{
return call_int_hook(sb_alloc_security, 0, sb);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 2f82a54f8703..1d0b37af2444 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -48,6 +48,8 @@
#include <linux/fdtable.h>
#include <linux/namei.h>
#include <linux/mount.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/tty.h>
@@ -410,11 +412,11 @@ static inline int inode_doinit(struct inode *inode)
enum {
Opt_error = -1,
- Opt_context = 1,
+ Opt_context = 0,
+ Opt_defcontext = 1,
Opt_fscontext = 2,
- Opt_defcontext = 3,
- Opt_rootcontext = 4,
- Opt_seclabel = 5,
+ Opt_rootcontext = 3,
+ Opt_seclabel = 4,
};
#define A(s, has_arg) {#s, sizeof(#s) - 1, Opt_##s, has_arg}
@@ -937,8 +939,11 @@ static int selinux_sb_clone_mnt_opts(const struct super_block *oldsb,
BUG_ON(!(oldsbsec->flags & SE_SBINITIALIZED));
/* if fs is reusing a sb, make sure that the contexts match */
- if (newsbsec->flags & SE_SBINITIALIZED)
+ if (newsbsec->flags & SE_SBINITIALIZED) {
+ if ((kern_flags & SECURITY_LSM_NATIVE_LABELS) && !set_context)
+ *set_kern_flags |= SECURITY_LSM_NATIVE_LABELS;
return selinux_cmp_sb_context(oldsb, newsb);
+ }
mutex_lock(&newsbsec->lock);
@@ -1067,6 +1072,7 @@ static int show_sid(struct seq_file *m, u32 sid)
if (!rc) {
bool has_comma = context && strchr(context, ',');
+ seq_putc(m, '=');
if (has_comma)
seq_putc(m, '\"');
seq_escape(m, context, "\"\n\\");
@@ -1120,7 +1126,7 @@ static int selinux_sb_show_options(struct seq_file *m, struct super_block *sb)
}
if (sbsec->flags & SBLABEL_MNT) {
seq_putc(m, ',');
- seq_puts(m, LABELSUPP_STR);
+ seq_puts(m, SECLABEL_STR);
}
return 0;
}
@@ -2739,6 +2745,76 @@ static int selinux_umount(struct vfsmount *mnt, int flags)
FILESYSTEM__UNMOUNT, NULL);
}
+static int selinux_fs_context_dup(struct fs_context *fc,
+ struct fs_context *src_fc)
+{
+ const struct selinux_mnt_opts *src = src_fc->security;
+ struct selinux_mnt_opts *opts;
+
+ if (!src)
+ return 0;
+
+ fc->security = kzalloc(sizeof(struct selinux_mnt_opts), GFP_KERNEL);
+ if (!fc->security)
+ return -ENOMEM;
+
+ opts = fc->security;
+
+ if (src->fscontext) {
+ opts->fscontext = kstrdup(src->fscontext, GFP_KERNEL);
+ if (!opts->fscontext)
+ return -ENOMEM;
+ }
+ if (src->context) {
+ opts->context = kstrdup(src->context, GFP_KERNEL);
+ if (!opts->context)
+ return -ENOMEM;
+ }
+ if (src->rootcontext) {
+ opts->rootcontext = kstrdup(src->rootcontext, GFP_KERNEL);
+ if (!opts->rootcontext)
+ return -ENOMEM;
+ }
+ if (src->defcontext) {
+ opts->defcontext = kstrdup(src->defcontext, GFP_KERNEL);
+ if (!opts->defcontext)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static const struct fs_parameter_spec selinux_param_specs[] = {
+ fsparam_string(CONTEXT_STR, Opt_context),
+ fsparam_string(DEFCONTEXT_STR, Opt_defcontext),
+ fsparam_string(FSCONTEXT_STR, Opt_fscontext),
+ fsparam_string(ROOTCONTEXT_STR, Opt_rootcontext),
+ fsparam_flag (SECLABEL_STR, Opt_seclabel),
+ {}
+};
+
+static const struct fs_parameter_description selinux_fs_parameters = {
+ .name = "SELinux",
+ .specs = selinux_param_specs,
+};
+
+static int selinux_fs_context_parse_param(struct fs_context *fc,
+ struct fs_parameter *param)
+{
+ struct fs_parse_result result;
+ int opt, rc;
+
+ opt = fs_parse(fc, &selinux_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
+
+ rc = selinux_add_opt(opt, param->string, &fc->security);
+ if (!rc) {
+ param->string = NULL;
+ rc = 1;
+ }
+ return rc;
+}
+
/* inode security operations */
static int selinux_inode_alloc_security(struct inode *inode)
@@ -4472,7 +4548,7 @@ err_af:
}
/* This supports connect(2) and SCTP connect services such as sctp_connectx(3)
- * and sctp_sendmsg(3) as described in Documentation/security/LSM-sctp.rst
+ * and sctp_sendmsg(3) as described in Documentation/security/SCTP.rst
*/
static int selinux_socket_connect_helper(struct socket *sock,
struct sockaddr *address, int addrlen)
@@ -5061,6 +5137,9 @@ static int selinux_sctp_bind_connect(struct sock *sk, int optname,
return -EINVAL;
}
+ if (walk_size + len > addrlen)
+ return -EINVAL;
+
err = -EINVAL;
switch (optname) {
/* Bind checks */
@@ -6592,6 +6671,9 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(bprm_committing_creds, selinux_bprm_committing_creds),
LSM_HOOK_INIT(bprm_committed_creds, selinux_bprm_committed_creds),
+ LSM_HOOK_INIT(fs_context_dup, selinux_fs_context_dup),
+ LSM_HOOK_INIT(fs_context_parse_param, selinux_fs_context_parse_param),
+
LSM_HOOK_INIT(sb_alloc_security, selinux_sb_alloc_security),
LSM_HOOK_INIT(sb_free_security, selinux_sb_free_security),
LSM_HOOK_INIT(sb_eat_lsm_opts, selinux_sb_eat_lsm_opts),
@@ -6837,6 +6919,8 @@ static __init int selinux_init(void)
else
pr_debug("SELinux: Starting in permissive mode\n");
+ fs_validate_description(&selinux_fs_parameters);
+
return 0;
}
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
index bd5fe0d3204a..201f7e588a29 100644
--- a/security/selinux/include/classmap.h
+++ b/security/selinux/include/classmap.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/capability.h>
+#include <linux/socket.h>
#define COMMON_FILE_SOCK_PERMS "ioctl", "read", "write", "create", \
"getattr", "setattr", "lock", "relabelfrom", "relabelto", "append", "map"
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index f68fb25b5702..b5b7c5aade8c 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -59,11 +59,11 @@
#define SE_SBPROC 0x0200
#define SE_SBGENFS 0x0400
-#define CONTEXT_STR "context="
-#define FSCONTEXT_STR "fscontext="
-#define ROOTCONTEXT_STR "rootcontext="
-#define DEFCONTEXT_STR "defcontext="
-#define LABELSUPP_STR "seclabel"
+#define CONTEXT_STR "context"
+#define FSCONTEXT_STR "fscontext"
+#define ROOTCONTEXT_STR "rootcontext"
+#define DEFCONTEXT_STR "defcontext"
+#define SECLABEL_STR "seclabel"
struct netlbl_lsm_secattr;
diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c
index c0417cf17fee..8c5800750fa8 100644
--- a/security/selinux/ss/avtab.c
+++ b/security/selinux/ss/avtab.c
@@ -93,12 +93,10 @@ avtab_insert_node(struct avtab *h, int hvalue,
newnode->next = prev->next;
prev->next = newnode;
} else {
- newnode->next = flex_array_get_ptr(h->htable, hvalue);
- if (flex_array_put_ptr(h->htable, hvalue, newnode,
- GFP_KERNEL|__GFP_ZERO)) {
- kmem_cache_free(avtab_node_cachep, newnode);
- return NULL;
- }
+ struct avtab_node **n = &h->htable[hvalue];
+
+ newnode->next = *n;
+ *n = newnode;
}
h->nel++;
@@ -111,11 +109,11 @@ static int avtab_insert(struct avtab *h, struct avtab_key *key, struct avtab_dat
struct avtab_node *prev, *cur, *newnode;
u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
- if (!h || !h->htable)
+ if (!h)
return -EINVAL;
hvalue = avtab_hash(key, h->mask);
- for (prev = NULL, cur = flex_array_get_ptr(h->htable, hvalue);
+ for (prev = NULL, cur = h->htable[hvalue];
cur;
prev = cur, cur = cur->next) {
if (key->source_type == cur->key.source_type &&
@@ -156,10 +154,10 @@ avtab_insert_nonunique(struct avtab *h, struct avtab_key *key, struct avtab_datu
struct avtab_node *prev, *cur;
u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
- if (!h || !h->htable)
+ if (!h)
return NULL;
hvalue = avtab_hash(key, h->mask);
- for (prev = NULL, cur = flex_array_get_ptr(h->htable, hvalue);
+ for (prev = NULL, cur = h->htable[hvalue];
cur;
prev = cur, cur = cur->next) {
if (key->source_type == cur->key.source_type &&
@@ -186,11 +184,11 @@ struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *key)
struct avtab_node *cur;
u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
- if (!h || !h->htable)
+ if (!h)
return NULL;
hvalue = avtab_hash(key, h->mask);
- for (cur = flex_array_get_ptr(h->htable, hvalue); cur;
+ for (cur = h->htable[hvalue]; cur;
cur = cur->next) {
if (key->source_type == cur->key.source_type &&
key->target_type == cur->key.target_type &&
@@ -222,11 +220,11 @@ avtab_search_node(struct avtab *h, struct avtab_key *key)
struct avtab_node *cur;
u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
- if (!h || !h->htable)
+ if (!h)
return NULL;
hvalue = avtab_hash(key, h->mask);
- for (cur = flex_array_get_ptr(h->htable, hvalue); cur;
+ for (cur = h->htable[hvalue]; cur;
cur = cur->next) {
if (key->source_type == cur->key.source_type &&
key->target_type == cur->key.target_type &&
@@ -281,11 +279,11 @@ void avtab_destroy(struct avtab *h)
int i;
struct avtab_node *cur, *temp;
- if (!h || !h->htable)
+ if (!h)
return;
for (i = 0; i < h->nslot; i++) {
- cur = flex_array_get_ptr(h->htable, i);
+ cur = h->htable[i];
while (cur) {
temp = cur;
cur = cur->next;
@@ -295,7 +293,7 @@ void avtab_destroy(struct avtab *h)
kmem_cache_free(avtab_node_cachep, temp);
}
}
- flex_array_free(h->htable);
+ kvfree(h->htable);
h->htable = NULL;
h->nslot = 0;
h->mask = 0;
@@ -303,6 +301,7 @@ void avtab_destroy(struct avtab *h)
int avtab_init(struct avtab *h)
{
+ kvfree(h->htable);
h->htable = NULL;
h->nel = 0;
return 0;
@@ -329,8 +328,7 @@ int avtab_alloc(struct avtab *h, u32 nrules)
nslot = MAX_AVTAB_HASH_BUCKETS;
mask = nslot - 1;
- h->htable = flex_array_alloc(sizeof(struct avtab_node *), nslot,
- GFP_KERNEL | __GFP_ZERO);
+ h->htable = kvcalloc(nslot, sizeof(void *), GFP_KERNEL);
if (!h->htable)
return -ENOMEM;
@@ -353,7 +351,7 @@ void avtab_hash_eval(struct avtab *h, char *tag)
max_chain_len = 0;
chain2_len_sum = 0;
for (i = 0; i < h->nslot; i++) {
- cur = flex_array_get_ptr(h->htable, i);
+ cur = h->htable[i];
if (cur) {
slots_used++;
chain_len = 0;
@@ -646,7 +644,7 @@ int avtab_write(struct policydb *p, struct avtab *a, void *fp)
return rc;
for (i = 0; i < a->nslot; i++) {
- for (cur = flex_array_get_ptr(a->htable, i); cur;
+ for (cur = a->htable[i]; cur;
cur = cur->next) {
rc = avtab_write_item(p, cur, fp);
if (rc)
diff --git a/security/selinux/ss/avtab.h b/security/selinux/ss/avtab.h
index 0d652fad5319..de16673b2314 100644
--- a/security/selinux/ss/avtab.h
+++ b/security/selinux/ss/avtab.h
@@ -24,7 +24,6 @@
#define _SS_AVTAB_H_
#include "security.h"
-#include <linux/flex_array.h>
struct avtab_key {
u16 source_type; /* source type */
@@ -84,11 +83,10 @@ struct avtab_node {
};
struct avtab {
- struct flex_array *htable;
+ struct avtab_node **htable;
u32 nel; /* number of elements */
u32 nslot; /* number of hash slots */
u32 mask; /* mask to compute hash func */
-
};
int avtab_init(struct avtab *);
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
index f49e522e932d..3bbb60345209 100644
--- a/security/selinux/ss/conditional.c
+++ b/security/selinux/ss/conditional.c
@@ -195,7 +195,6 @@ int cond_index_bool(void *key, void *datum, void *datap)
{
struct policydb *p;
struct cond_bool_datum *booldatum;
- struct flex_array *fa;
booldatum = datum;
p = datap;
@@ -203,10 +202,7 @@ int cond_index_bool(void *key, void *datum, void *datap)
if (!booldatum->value || booldatum->value > p->p_bools.nprim)
return -EINVAL;
- fa = p->sym_val_to_name[SYM_BOOLS];
- if (flex_array_put_ptr(fa, booldatum->value - 1, key,
- GFP_KERNEL | __GFP_ZERO))
- BUG();
+ p->sym_val_to_name[SYM_BOOLS][booldatum->value - 1] = key;
p->bool_val_to_struct[booldatum->value - 1] = booldatum;
return 0;
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index c1c31e33657a..daecdfb15a9c 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -36,7 +36,6 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/audit.h>
-#include <linux/flex_array.h>
#include "security.h"
#include "policydb.h"
@@ -341,17 +340,14 @@ static int common_index(void *key, void *datum, void *datap)
{
struct policydb *p;
struct common_datum *comdatum;
- struct flex_array *fa;
comdatum = datum;
p = datap;
if (!comdatum->value || comdatum->value > p->p_commons.nprim)
return -EINVAL;
- fa = p->sym_val_to_name[SYM_COMMONS];
- if (flex_array_put_ptr(fa, comdatum->value - 1, key,
- GFP_KERNEL | __GFP_ZERO))
- BUG();
+ p->sym_val_to_name[SYM_COMMONS][comdatum->value - 1] = key;
+
return 0;
}
@@ -359,16 +355,13 @@ static int class_index(void *key, void *datum, void *datap)
{
struct policydb *p;
struct class_datum *cladatum;
- struct flex_array *fa;
cladatum = datum;
p = datap;
if (!cladatum->value || cladatum->value > p->p_classes.nprim)
return -EINVAL;
- fa = p->sym_val_to_name[SYM_CLASSES];
- if (flex_array_put_ptr(fa, cladatum->value - 1, key,
- GFP_KERNEL | __GFP_ZERO))
- BUG();
+
+ p->sym_val_to_name[SYM_CLASSES][cladatum->value - 1] = key;
p->class_val_to_struct[cladatum->value - 1] = cladatum;
return 0;
}
@@ -377,7 +370,6 @@ static int role_index(void *key, void *datum, void *datap)
{
struct policydb *p;
struct role_datum *role;
- struct flex_array *fa;
role = datum;
p = datap;
@@ -386,10 +378,7 @@ static int role_index(void *key, void *datum, void *datap)
|| role->bounds > p->p_roles.nprim)
return -EINVAL;
- fa = p->sym_val_to_name[SYM_ROLES];
- if (flex_array_put_ptr(fa, role->value - 1, key,
- GFP_KERNEL | __GFP_ZERO))
- BUG();
+ p->sym_val_to_name[SYM_ROLES][role->value - 1] = key;
p->role_val_to_struct[role->value - 1] = role;
return 0;
}
@@ -398,7 +387,6 @@ static int type_index(void *key, void *datum, void *datap)
{
struct policydb *p;
struct type_datum *typdatum;
- struct flex_array *fa;
typdatum = datum;
p = datap;
@@ -408,15 +396,8 @@ static int type_index(void *key, void *datum, void *datap)
|| typdatum->value > p->p_types.nprim
|| typdatum->bounds > p->p_types.nprim)
return -EINVAL;
- fa = p->sym_val_to_name[SYM_TYPES];
- if (flex_array_put_ptr(fa, typdatum->value - 1, key,
- GFP_KERNEL | __GFP_ZERO))
- BUG();
-
- fa = p->type_val_to_struct_array;
- if (flex_array_put_ptr(fa, typdatum->value - 1, typdatum,
- GFP_KERNEL | __GFP_ZERO))
- BUG();
+ p->sym_val_to_name[SYM_TYPES][typdatum->value - 1] = key;
+ p->type_val_to_struct_array[typdatum->value - 1] = typdatum;
}
return 0;
@@ -426,7 +407,6 @@ static int user_index(void *key, void *datum, void *datap)
{
struct policydb *p;
struct user_datum *usrdatum;
- struct flex_array *fa;
usrdatum = datum;
p = datap;
@@ -435,10 +415,7 @@ static int user_index(void *key, void *datum, void *datap)
|| usrdatum->bounds > p->p_users.nprim)
return -EINVAL;
- fa = p->sym_val_to_name[SYM_USERS];
- if (flex_array_put_ptr(fa, usrdatum->value - 1, key,
- GFP_KERNEL | __GFP_ZERO))
- BUG();
+ p->sym_val_to_name[SYM_USERS][usrdatum->value - 1] = key;
p->user_val_to_struct[usrdatum->value - 1] = usrdatum;
return 0;
}
@@ -447,7 +424,6 @@ static int sens_index(void *key, void *datum, void *datap)
{
struct policydb *p;
struct level_datum *levdatum;
- struct flex_array *fa;
levdatum = datum;
p = datap;
@@ -456,10 +432,8 @@ static int sens_index(void *key, void *datum, void *datap)
if (!levdatum->level->sens ||
levdatum->level->sens > p->p_levels.nprim)
return -EINVAL;
- fa = p->sym_val_to_name[SYM_LEVELS];
- if (flex_array_put_ptr(fa, levdatum->level->sens - 1, key,
- GFP_KERNEL | __GFP_ZERO))
- BUG();
+
+ p->sym_val_to_name[SYM_LEVELS][levdatum->level->sens - 1] = key;
}
return 0;
@@ -469,7 +443,6 @@ static int cat_index(void *key, void *datum, void *datap)
{
struct policydb *p;
struct cat_datum *catdatum;
- struct flex_array *fa;
catdatum = datum;
p = datap;
@@ -477,10 +450,8 @@ static int cat_index(void *key, void *datum, void *datap)
if (!catdatum->isalias) {
if (!catdatum->value || catdatum->value > p->p_cats.nprim)
return -EINVAL;
- fa = p->sym_val_to_name[SYM_CATS];
- if (flex_array_put_ptr(fa, catdatum->value - 1, key,
- GFP_KERNEL | __GFP_ZERO))
- BUG();
+
+ p->sym_val_to_name[SYM_CATS][catdatum->value - 1] = key;
}
return 0;
@@ -568,35 +539,23 @@ static int policydb_index(struct policydb *p)
if (!p->user_val_to_struct)
return -ENOMEM;
- /* Yes, I want the sizeof the pointer, not the structure */
- p->type_val_to_struct_array = flex_array_alloc(sizeof(struct type_datum *),
- p->p_types.nprim,
- GFP_KERNEL | __GFP_ZERO);
+ p->type_val_to_struct_array = kvcalloc(p->p_types.nprim,
+ sizeof(*p->type_val_to_struct_array),
+ GFP_KERNEL);
if (!p->type_val_to_struct_array)
return -ENOMEM;
- rc = flex_array_prealloc(p->type_val_to_struct_array, 0,
- p->p_types.nprim, GFP_KERNEL | __GFP_ZERO);
- if (rc)
- goto out;
-
rc = cond_init_bool_indexes(p);
if (rc)
goto out;
for (i = 0; i < SYM_NUM; i++) {
- p->sym_val_to_name[i] = flex_array_alloc(sizeof(char *),
- p->symtab[i].nprim,
- GFP_KERNEL | __GFP_ZERO);
+ p->sym_val_to_name[i] = kvcalloc(p->symtab[i].nprim,
+ sizeof(char *),
+ GFP_KERNEL);
if (!p->sym_val_to_name[i])
return -ENOMEM;
- rc = flex_array_prealloc(p->sym_val_to_name[i],
- 0, p->symtab[i].nprim,
- GFP_KERNEL | __GFP_ZERO);
- if (rc)
- goto out;
-
rc = hashtab_map(p->symtab[i].table, index_f[i], p);
if (rc)
goto out;
@@ -810,16 +769,13 @@ void policydb_destroy(struct policydb *p)
hashtab_destroy(p->symtab[i].table);
}
- for (i = 0; i < SYM_NUM; i++) {
- if (p->sym_val_to_name[i])
- flex_array_free(p->sym_val_to_name[i]);
- }
+ for (i = 0; i < SYM_NUM; i++)
+ kvfree(p->sym_val_to_name[i]);
kfree(p->class_val_to_struct);
kfree(p->role_val_to_struct);
kfree(p->user_val_to_struct);
- if (p->type_val_to_struct_array)
- flex_array_free(p->type_val_to_struct_array);
+ kvfree(p->type_val_to_struct_array);
avtab_destroy(&p->te_avtab);
@@ -873,15 +829,9 @@ void policydb_destroy(struct policydb *p)
hashtab_destroy(p->range_tr);
if (p->type_attr_map_array) {
- for (i = 0; i < p->p_types.nprim; i++) {
- struct ebitmap *e;
-
- e = flex_array_get(p->type_attr_map_array, i);
- if (!e)
- continue;
- ebitmap_destroy(e);
- }
- flex_array_free(p->type_attr_map_array);
+ for (i = 0; i < p->p_types.nprim; i++)
+ ebitmap_destroy(&p->type_attr_map_array[i]);
+ kvfree(p->type_attr_map_array);
}
ebitmap_destroy(&p->filename_trans_ttypes);
@@ -1770,8 +1720,7 @@ static int type_bounds_sanity_check(void *key, void *datum, void *datap)
return -EINVAL;
}
- upper = flex_array_get_ptr(p->type_val_to_struct_array,
- upper->bounds - 1);
+ upper = p->type_val_to_struct_array[upper->bounds - 1];
BUG_ON(!upper);
if (upper->attribute) {
@@ -2543,24 +2492,19 @@ int policydb_read(struct policydb *p, void *fp)
if (rc)
goto bad;
- rc = -ENOMEM;
- p->type_attr_map_array = flex_array_alloc(sizeof(struct ebitmap),
- p->p_types.nprim,
- GFP_KERNEL | __GFP_ZERO);
+ p->type_attr_map_array = kvcalloc(p->p_types.nprim,
+ sizeof(*p->type_attr_map_array),
+ GFP_KERNEL);
if (!p->type_attr_map_array)
goto bad;
- /* preallocate so we don't have to worry about the put ever failing */
- rc = flex_array_prealloc(p->type_attr_map_array, 0, p->p_types.nprim,
- GFP_KERNEL | __GFP_ZERO);
- if (rc)
- goto bad;
+ /* just in case ebitmap_init() becomes more than just a memset(0): */
+ for (i = 0; i < p->p_types.nprim; i++)
+ ebitmap_init(&p->type_attr_map_array[i]);
for (i = 0; i < p->p_types.nprim; i++) {
- struct ebitmap *e = flex_array_get(p->type_attr_map_array, i);
+ struct ebitmap *e = &p->type_attr_map_array[i];
- BUG_ON(!e);
- ebitmap_init(e);
if (p->policyvers >= POLICYDB_VERSION_AVTAB) {
rc = ebitmap_read(e, fp);
if (rc)
@@ -3554,9 +3498,8 @@ int policydb_write(struct policydb *p, void *fp)
return rc;
for (i = 0; i < p->p_types.nprim; i++) {
- struct ebitmap *e = flex_array_get(p->type_attr_map_array, i);
+ struct ebitmap *e = &p->type_attr_map_array[i];
- BUG_ON(!e);
rc = ebitmap_write(e, fp);
if (rc)
return rc;
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h
index 215f8f30ac5a..27039149ff0a 100644
--- a/security/selinux/ss/policydb.h
+++ b/security/selinux/ss/policydb.h
@@ -24,8 +24,6 @@
#ifndef _SS_POLICYDB_H_
#define _SS_POLICYDB_H_
-#include <linux/flex_array.h>
-
#include "symtab.h"
#include "avtab.h"
#include "sidtab.h"
@@ -251,13 +249,13 @@ struct policydb {
#define p_cats symtab[SYM_CATS]
/* symbol names indexed by (value - 1) */
- struct flex_array *sym_val_to_name[SYM_NUM];
+ char **sym_val_to_name[SYM_NUM];
/* class, role, and user attributes indexed by (value - 1) */
struct class_datum **class_val_to_struct;
struct role_datum **role_val_to_struct;
struct user_datum **user_val_to_struct;
- struct flex_array *type_val_to_struct_array;
+ struct type_datum **type_val_to_struct_array;
/* type enforcement access vectors and transitions */
struct avtab te_avtab;
@@ -294,7 +292,7 @@ struct policydb {
struct hashtab *range_tr;
/* type -> attribute reverse mapping */
- struct flex_array *type_attr_map_array;
+ struct ebitmap *type_attr_map_array;
struct ebitmap policycaps;
@@ -369,9 +367,7 @@ static inline int put_entry(const void *buf, size_t bytes, int num, struct polic
static inline char *sym_name(struct policydb *p, unsigned int sym_num, unsigned int element_nr)
{
- struct flex_array *fa = p->sym_val_to_name[sym_num];
-
- return flex_array_get_ptr(fa, element_nr);
+ return p->sym_val_to_name[sym_num][element_nr];
}
extern u16 string_to_security_class(struct policydb *p, const char *name);
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 1269e2be3c2d..ec62918521b1 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -49,7 +49,6 @@
#include <linux/sched.h>
#include <linux/audit.h>
#include <linux/mutex.h>
-#include <linux/flex_array.h>
#include <linux/vmalloc.h>
#include <net/netlabel.h>
@@ -545,15 +544,13 @@ static void type_attribute_bounds_av(struct policydb *policydb,
struct type_datum *target;
u32 masked = 0;
- source = flex_array_get_ptr(policydb->type_val_to_struct_array,
- scontext->type - 1);
+ source = policydb->type_val_to_struct_array[scontext->type - 1];
BUG_ON(!source);
if (!source->bounds)
return;
- target = flex_array_get_ptr(policydb->type_val_to_struct_array,
- tcontext->type - 1);
+ target = policydb->type_val_to_struct_array[tcontext->type - 1];
BUG_ON(!target);
memset(&lo_avd, 0, sizeof(lo_avd));
@@ -653,11 +650,9 @@ static void context_struct_compute_av(struct policydb *policydb,
*/
avkey.target_class = tclass;
avkey.specified = AVTAB_AV | AVTAB_XPERMS;
- sattr = flex_array_get(policydb->type_attr_map_array,
- scontext->type - 1);
+ sattr = &policydb->type_attr_map_array[scontext->type - 1];
BUG_ON(!sattr);
- tattr = flex_array_get(policydb->type_attr_map_array,
- tcontext->type - 1);
+ tattr = &policydb->type_attr_map_array[tcontext->type - 1];
BUG_ON(!tattr);
ebitmap_for_each_positive_bit(sattr, snode, i) {
ebitmap_for_each_positive_bit(tattr, tnode, j) {
@@ -900,8 +895,7 @@ int security_bounded_transition(struct selinux_state *state,
index = new_context->type;
while (true) {
- type = flex_array_get_ptr(policydb->type_val_to_struct_array,
- index - 1);
+ type = policydb->type_val_to_struct_array[index - 1];
BUG_ON(!type);
/* not bounded anymore */
@@ -1064,11 +1058,9 @@ void security_compute_xperms_decision(struct selinux_state *state,
avkey.target_class = tclass;
avkey.specified = AVTAB_XPERMS;
- sattr = flex_array_get(policydb->type_attr_map_array,
- scontext->type - 1);
+ sattr = &policydb->type_attr_map_array[scontext->type - 1];
BUG_ON(!sattr);
- tattr = flex_array_get(policydb->type_attr_map_array,
- tcontext->type - 1);
+ tattr = &policydb->type_attr_map_array[tcontext->type - 1];
BUG_ON(!tattr);
ebitmap_for_each_positive_bit(sattr, snode, i) {
ebitmap_for_each_positive_bit(tattr, tnode, j) {
diff --git a/security/smack/smack.h b/security/smack/smack.h
index 9c7c95a5c497..cf52af77d15e 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -196,22 +196,13 @@ struct smack_known_list_elem {
enum {
Opt_error = -1,
- Opt_fsdefault = 1,
- Opt_fsfloor = 2,
- Opt_fshat = 3,
- Opt_fsroot = 4,
- Opt_fstransmute = 5,
+ Opt_fsdefault = 0,
+ Opt_fsfloor = 1,
+ Opt_fshat = 2,
+ Opt_fsroot = 3,
+ Opt_fstransmute = 4,
};
-/*
- * Mount options
- */
-#define SMK_FSDEFAULT "smackfsdef="
-#define SMK_FSFLOOR "smackfsfloor="
-#define SMK_FSHAT "smackfshat="
-#define SMK_FSROOT "smackfsroot="
-#define SMK_FSTRANS "smackfstransmute="
-
#define SMACK_DELETE_OPTION "-DELETE"
#define SMACK_CIPSO_OPTION "-CIPSO"
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 424bce4ef21d..5c1613519d5a 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -43,6 +43,8 @@
#include <linux/shm.h>
#include <linux/binfmts.h>
#include <linux/parser.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include "smack.h"
#define TRANS_TRUE "TRUE"
@@ -526,7 +528,6 @@ static int smack_syslog(int typefrom_file)
return rc;
}
-
/*
* Superblock Hooks.
*/
@@ -631,6 +632,92 @@ out_opt_err:
return -EINVAL;
}
+/**
+ * smack_fs_context_dup - Duplicate the security data on fs_context duplication
+ * @fc: The new filesystem context.
+ * @src_fc: The source filesystem context being duplicated.
+ *
+ * Returns 0 on success or -ENOMEM on error.
+ */
+static int smack_fs_context_dup(struct fs_context *fc,
+ struct fs_context *src_fc)
+{
+ struct smack_mnt_opts *dst, *src = src_fc->security;
+
+ if (!src)
+ return 0;
+
+ fc->security = kzalloc(sizeof(struct smack_mnt_opts), GFP_KERNEL);
+ if (!fc->security)
+ return -ENOMEM;
+ dst = fc->security;
+
+ if (src->fsdefault) {
+ dst->fsdefault = kstrdup(src->fsdefault, GFP_KERNEL);
+ if (!dst->fsdefault)
+ return -ENOMEM;
+ }
+ if (src->fsfloor) {
+ dst->fsfloor = kstrdup(src->fsfloor, GFP_KERNEL);
+ if (!dst->fsfloor)
+ return -ENOMEM;
+ }
+ if (src->fshat) {
+ dst->fshat = kstrdup(src->fshat, GFP_KERNEL);
+ if (!dst->fshat)
+ return -ENOMEM;
+ }
+ if (src->fsroot) {
+ dst->fsroot = kstrdup(src->fsroot, GFP_KERNEL);
+ if (!dst->fsroot)
+ return -ENOMEM;
+ }
+ if (src->fstransmute) {
+ dst->fstransmute = kstrdup(src->fstransmute, GFP_KERNEL);
+ if (!dst->fstransmute)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static const struct fs_parameter_spec smack_param_specs[] = {
+ fsparam_string("fsdefault", Opt_fsdefault),
+ fsparam_string("fsfloor", Opt_fsfloor),
+ fsparam_string("fshat", Opt_fshat),
+ fsparam_string("fsroot", Opt_fsroot),
+ fsparam_string("fstransmute", Opt_fstransmute),
+ {}
+};
+
+static const struct fs_parameter_description smack_fs_parameters = {
+ .name = "smack",
+ .specs = smack_param_specs,
+};
+
+/**
+ * smack_fs_context_parse_param - Parse a single mount parameter
+ * @fc: The new filesystem context being constructed.
+ * @param: The parameter.
+ *
+ * Returns 0 on success, -ENOPARAM to pass the parameter on or anything else on
+ * error.
+ */
+static int smack_fs_context_parse_param(struct fs_context *fc,
+ struct fs_parameter *param)
+{
+ struct fs_parse_result result;
+ int opt, rc;
+
+ opt = fs_parse(fc, &smack_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
+
+ rc = smack_add_opt(opt, param->string, &fc->security);
+ if (!rc)
+ param->string = NULL;
+ return rc;
+}
+
static int smack_sb_eat_lsm_opts(char *options, void **mnt_opts)
{
char *from = options, *to = options;
@@ -4495,6 +4582,9 @@ static struct security_hook_list smack_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(ptrace_traceme, smack_ptrace_traceme),
LSM_HOOK_INIT(syslog, smack_syslog),
+ LSM_HOOK_INIT(fs_context_dup, smack_fs_context_dup),
+ LSM_HOOK_INIT(fs_context_parse_param, smack_fs_context_parse_param),
+
LSM_HOOK_INIT(sb_alloc_security, smack_sb_alloc_security),
LSM_HOOK_INIT(sb_free_security, smack_sb_free_security),
LSM_HOOK_INIT(sb_free_mnt_opts, smack_free_mnt_opts),
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index 57cc60722dd3..efac68556b45 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -206,7 +206,7 @@ static void yama_ptracer_del(struct task_struct *tracer,
* yama_task_free - check for task_pid to remove from exception list
* @task: task being removed
*/
-void yama_task_free(struct task_struct *task)
+static void yama_task_free(struct task_struct *task)
{
yama_ptracer_del(task, task);
}
@@ -222,7 +222,7 @@ void yama_task_free(struct task_struct *task)
* Return 0 on success, -ve on error. -ENOSYS is returned when Yama
* does not handle the given option.
*/
-int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
+static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5)
{
int rc = -ENOSYS;
@@ -401,7 +401,7 @@ static int yama_ptrace_access_check(struct task_struct *child,
*
* Returns 0 if following the ptrace is allowed, -ve on error.
*/
-int yama_ptrace_traceme(struct task_struct *parent)
+static int yama_ptrace_traceme(struct task_struct *parent)
{
int rc = 0;
@@ -452,7 +452,7 @@ static int yama_dointvec_minmax(struct ctl_table *table, int write,
static int zero;
static int max_scope = YAMA_SCOPE_NO_ATTACH;
-struct ctl_path yama_sysctl_path[] = {
+static struct ctl_path yama_sysctl_path[] = {
{ .procname = "kernel", },
{ .procname = "yama", },
{ }
diff --git a/sound/core/info.c b/sound/core/info.c
index 96a074019c33..0eb169acc850 100644
--- a/sound/core/info.c
+++ b/sound/core/info.c
@@ -713,8 +713,11 @@ snd_info_create_entry(const char *name, struct snd_info_entry *parent,
INIT_LIST_HEAD(&entry->list);
entry->parent = parent;
entry->module = module;
- if (parent)
+ if (parent) {
+ mutex_lock(&parent->access);
list_add_tail(&entry->list, &parent->children);
+ mutex_unlock(&parent->access);
+ }
return entry;
}
@@ -792,7 +795,12 @@ void snd_info_free_entry(struct snd_info_entry * entry)
list_for_each_entry_safe(p, n, &entry->children, list)
snd_info_free_entry(p);
- list_del(&entry->list);
+ p = entry->parent;
+ if (p) {
+ mutex_lock(&p->access);
+ list_del(&entry->list);
+ mutex_unlock(&p->access);
+ }
kfree(entry->name);
if (entry->private_free)
entry->private_free(entry);
diff --git a/sound/core/init.c b/sound/core/init.c
index 0c4dc40376a7..079c12d64b0e 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -382,14 +382,7 @@ int snd_card_disconnect(struct snd_card *card)
card->shutdown = 1;
spin_unlock(&card->files_lock);
- /* phase 1: disable fops (user space) operations for ALSA API */
- mutex_lock(&snd_card_mutex);
- snd_cards[card->number] = NULL;
- clear_bit(card->number, snd_cards_lock);
- mutex_unlock(&snd_card_mutex);
-
- /* phase 2: replace file->f_op with special dummy operations */
-
+ /* replace file->f_op with special dummy operations */
spin_lock(&card->files_lock);
list_for_each_entry(mfile, &card->files_list, list) {
/* it's critical part, use endless loop */
@@ -405,7 +398,7 @@ int snd_card_disconnect(struct snd_card *card)
}
spin_unlock(&card->files_lock);
- /* phase 3: notify all connected devices about disconnection */
+ /* notify all connected devices about disconnection */
/* at this point, they cannot respond to any calls except release() */
#if IS_ENABLED(CONFIG_SND_MIXER_OSS)
@@ -421,6 +414,13 @@ int snd_card_disconnect(struct snd_card *card)
device_del(&card->card_dev);
card->registered = false;
}
+
+ /* disable fops (user space) operations for ALSA API */
+ mutex_lock(&snd_card_mutex);
+ snd_cards[card->number] = NULL;
+ clear_bit(card->number, snd_cards_lock);
+ mutex_unlock(&snd_card_mutex);
+
#ifdef CONFIG_PM
wake_up(&card->power_sleep);
#endif
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index d5b0d7ba83c4..f6ae68017608 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -940,6 +940,28 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
oss_frame_size = snd_pcm_format_physical_width(params_format(params)) *
params_channels(params) / 8;
+ err = snd_pcm_oss_period_size(substream, params, sparams);
+ if (err < 0)
+ goto failure;
+
+ n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size);
+ err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL);
+ if (err < 0)
+ goto failure;
+
+ err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS,
+ runtime->oss.periods, NULL);
+ if (err < 0)
+ goto failure;
+
+ snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
+
+ err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams);
+ if (err < 0) {
+ pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err);
+ goto failure;
+ }
+
#ifdef CONFIG_SND_PCM_OSS_PLUGINS
snd_pcm_oss_plugin_clear(substream);
if (!direct) {
@@ -974,27 +996,6 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
}
#endif
- err = snd_pcm_oss_period_size(substream, params, sparams);
- if (err < 0)
- goto failure;
-
- n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size);
- err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL);
- if (err < 0)
- goto failure;
-
- err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS,
- runtime->oss.periods, NULL);
- if (err < 0)
- goto failure;
-
- snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
-
- if ((err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams)) < 0) {
- pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err);
- goto failure;
- }
-
if (runtime->oss.trigger) {
sw_params->start_threshold = 1;
} else {
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index f731f904e8cc..1d8452912b14 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -1445,8 +1445,15 @@ static int snd_pcm_pause(struct snd_pcm_substream *substream, int push)
static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
+ switch (runtime->status->state) {
+ case SNDRV_PCM_STATE_SUSPENDED:
return -EBUSY;
+ /* unresumable PCM state; return -EBUSY for skipping suspend */
+ case SNDRV_PCM_STATE_OPEN:
+ case SNDRV_PCM_STATE_SETUP:
+ case SNDRV_PCM_STATE_DISCONNECTED:
+ return -EBUSY;
+ }
runtime->trigger_master = substream;
return 0;
}
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index ee601d7f0926..c0690d1ecd55 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -30,6 +30,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/mm.h>
+#include <linux/nospec.h>
#include <sound/rawmidi.h>
#include <sound/info.h>
#include <sound/control.h>
@@ -601,6 +602,7 @@ static int __snd_rawmidi_info_select(struct snd_card *card,
return -ENXIO;
if (info->stream < 0 || info->stream > 1)
return -EINVAL;
+ info->stream = array_index_nospec(info->stream, 2);
pstr = &rmidi->streams[info->stream];
if (pstr->substream_count == 0)
return -ENOENT;
diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
index 278ebb993122..c93945917235 100644
--- a/sound/core/seq/oss/seq_oss_synth.c
+++ b/sound/core/seq/oss/seq_oss_synth.c
@@ -617,13 +617,14 @@ int
snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_info *inf)
{
struct seq_oss_synth *rec;
+ struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev);
- if (dev < 0 || dev >= dp->max_synthdev)
+ if (!info)
return -ENXIO;
- if (dp->synths[dev].is_midi) {
+ if (info->is_midi) {
struct midi_info minf;
- snd_seq_oss_midi_make_info(dp, dp->synths[dev].midi_mapped, &minf);
+ snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf);
inf->synth_type = SYNTH_TYPE_MIDI;
inf->synth_subtype = 0;
inf->nr_voices = 16;
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 7d4640d1fe9f..38e7deab6384 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -1252,7 +1252,7 @@ static int snd_seq_ioctl_set_client_info(struct snd_seq_client *client,
/* fill the info fields */
if (client_info->name[0])
- strlcpy(client->name, client_info->name, sizeof(client->name));
+ strscpy(client->name, client_info->name, sizeof(client->name));
client->filter = client_info->filter;
client->event_lost = client_info->event_lost;
@@ -1530,7 +1530,7 @@ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg)
/* set queue name */
if (!info->name[0])
snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
- strlcpy(q->name, info->name, sizeof(q->name));
+ strscpy(q->name, info->name, sizeof(q->name));
snd_use_lock_free(&q->use_lock);
return 0;
@@ -1592,7 +1592,7 @@ static int snd_seq_ioctl_set_queue_info(struct snd_seq_client *client,
queuefree(q);
return -EPERM;
}
- strlcpy(q->name, info->name, sizeof(q->name));
+ strscpy(q->name, info->name, sizeof(q->name));
queuefree(q);
return 0;
diff --git a/sound/drivers/opl3/opl3_voice.h b/sound/drivers/opl3/opl3_voice.h
index 5b02bd49fde4..4e4ecc21760b 100644
--- a/sound/drivers/opl3/opl3_voice.h
+++ b/sound/drivers/opl3/opl3_voice.h
@@ -41,7 +41,7 @@ void snd_opl3_timer_func(struct timer_list *t);
/* Prototypes for opl3_drums.c */
void snd_opl3_load_drums(struct snd_opl3 *opl3);
-void snd_opl3_drum_switch(struct snd_opl3 *opl3, int note, int on_off, int vel, struct snd_midi_channel *chan);
+void snd_opl3_drum_switch(struct snd_opl3 *opl3, int note, int vel, int on_off, struct snd_midi_channel *chan);
/* Prototypes for opl3_oss.c */
#if IS_ENABLED(CONFIG_SND_SEQUENCER_OSS)
diff --git a/sound/firewire/motu/motu.c b/sound/firewire/motu/motu.c
index 220e61926ea4..513291ba0ab0 100644
--- a/sound/firewire/motu/motu.c
+++ b/sound/firewire/motu/motu.c
@@ -36,7 +36,7 @@ static void name_card(struct snd_motu *motu)
fw_csr_iterator_init(&it, motu->unit->directory);
while (fw_csr_iterator_next(&it, &key, &val)) {
switch (key) {
- case CSR_VERSION:
+ case CSR_MODEL:
version = val;
break;
}
@@ -46,7 +46,7 @@ static void name_card(struct snd_motu *motu)
strcpy(motu->card->shortname, motu->spec->name);
strcpy(motu->card->mixername, motu->spec->name);
snprintf(motu->card->longname, sizeof(motu->card->longname),
- "MOTU %s (version:%d), GUID %08x%08x at %s, S%d",
+ "MOTU %s (version:%06x), GUID %08x%08x at %s, S%d",
motu->spec->name, version,
fw_dev->config_rom[3], fw_dev->config_rom[4],
dev_name(&motu->unit->device), 100 << fw_dev->max_speed);
@@ -237,20 +237,20 @@ static const struct snd_motu_spec motu_audio_express = {
#define SND_MOTU_DEV_ENTRY(model, data) \
{ \
.match_flags = IEEE1394_MATCH_VENDOR_ID | \
- IEEE1394_MATCH_MODEL_ID | \
- IEEE1394_MATCH_SPECIFIER_ID, \
+ IEEE1394_MATCH_SPECIFIER_ID | \
+ IEEE1394_MATCH_VERSION, \
.vendor_id = OUI_MOTU, \
- .model_id = model, \
.specifier_id = OUI_MOTU, \
+ .version = model, \
.driver_data = (kernel_ulong_t)data, \
}
static const struct ieee1394_device_id motu_id_table[] = {
- SND_MOTU_DEV_ENTRY(0x101800, &motu_828mk2),
- SND_MOTU_DEV_ENTRY(0x107800, &snd_motu_spec_traveler),
- SND_MOTU_DEV_ENTRY(0x106800, &motu_828mk3), /* FireWire only. */
- SND_MOTU_DEV_ENTRY(0x100800, &motu_828mk3), /* Hybrid. */
- SND_MOTU_DEV_ENTRY(0x104800, &motu_audio_express),
+ SND_MOTU_DEV_ENTRY(0x000003, &motu_828mk2),
+ SND_MOTU_DEV_ENTRY(0x000009, &snd_motu_spec_traveler),
+ SND_MOTU_DEV_ENTRY(0x000015, &motu_828mk3), /* FireWire only. */
+ SND_MOTU_DEV_ENTRY(0x000035, &motu_828mk3), /* Hybrid. */
+ SND_MOTU_DEV_ENTRY(0x000033, &motu_audio_express),
{ }
};
MODULE_DEVICE_TABLE(ieee1394, motu_id_table);
diff --git a/sound/hda/ext/hdac_ext_bus.c b/sound/hda/ext/hdac_ext_bus.c
index 9c37d9af3023..ec7715c6b0c0 100644
--- a/sound/hda/ext/hdac_ext_bus.c
+++ b/sound/hda/ext/hdac_ext_bus.c
@@ -107,7 +107,6 @@ int snd_hdac_ext_bus_init(struct hdac_bus *bus, struct device *dev,
INIT_LIST_HEAD(&bus->hlink_list);
bus->idx = idx++;
- mutex_init(&bus->lock);
bus->cmd_dma_state = true;
return 0;
diff --git a/sound/hda/hdac_bus.c b/sound/hda/hdac_bus.c
index 012305177f68..ad8eee08013f 100644
--- a/sound/hda/hdac_bus.c
+++ b/sound/hda/hdac_bus.c
@@ -38,6 +38,7 @@ int snd_hdac_bus_init(struct hdac_bus *bus, struct device *dev,
INIT_WORK(&bus->unsol_work, snd_hdac_bus_process_unsol_events);
spin_lock_init(&bus->reg_lock);
mutex_init(&bus->cmd_mutex);
+ mutex_init(&bus->lock);
bus->irq = -1;
return 0;
}
diff --git a/sound/hda/hdac_component.c b/sound/hda/hdac_component.c
index 5c95933e739a..1ea51e3b942a 100644
--- a/sound/hda/hdac_component.c
+++ b/sound/hda/hdac_component.c
@@ -69,13 +69,15 @@ void snd_hdac_display_power(struct hdac_bus *bus, unsigned int idx, bool enable)
dev_dbg(bus->dev, "display power %s\n",
enable ? "enable" : "disable");
+
+ mutex_lock(&bus->lock);
if (enable)
set_bit(idx, &bus->display_power_status);
else
clear_bit(idx, &bus->display_power_status);
if (!acomp || !acomp->ops)
- return;
+ goto unlock;
if (bus->display_power_status) {
if (!bus->display_power_active) {
@@ -92,6 +94,8 @@ void snd_hdac_display_power(struct hdac_bus *bus, unsigned int idx, bool enable)
bus->display_power_active = false;
}
}
+ unlock:
+ mutex_unlock(&bus->lock);
}
EXPORT_SYMBOL_GPL(snd_hdac_display_power);
diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c
index f5dd288d1a7a..76e9b41fcea2 100644
--- a/sound/hda/hdac_stream.c
+++ b/sound/hda/hdac_stream.c
@@ -95,7 +95,10 @@ void snd_hdac_stream_start(struct hdac_stream *azx_dev, bool fresh_start)
1 << azx_dev->index,
1 << azx_dev->index);
/* set stripe control */
- stripe_ctl = snd_hdac_get_stream_stripe_ctl(bus, azx_dev->substream);
+ if (azx_dev->substream)
+ stripe_ctl = snd_hdac_get_stream_stripe_ctl(bus, azx_dev->substream);
+ else
+ stripe_ctl = 0;
snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK,
stripe_ctl);
/* set DMA start and interrupt mask */
diff --git a/sound/isa/sb/sb8.c b/sound/isa/sb/sb8.c
index aa2a83eb81a9..dc27a480c2d9 100644
--- a/sound/isa/sb/sb8.c
+++ b/sound/isa/sb/sb8.c
@@ -111,6 +111,10 @@ static int snd_sb8_probe(struct device *pdev, unsigned int dev)
/* block the 0x388 port to avoid PnP conflicts */
acard->fm_res = request_region(0x388, 4, "SoundBlaster FM");
+ if (!acard->fm_res) {
+ err = -EBUSY;
+ goto _err;
+ }
if (port[dev] != SNDRV_AUTO_PORT) {
if ((err = snd_sbdsp_create(card, port[dev], irq[dev],
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
index ea876b0b02b9..dc0084dc8550 100644
--- a/sound/pci/echoaudio/echoaudio.c
+++ b/sound/pci/echoaudio/echoaudio.c
@@ -1952,6 +1952,11 @@ static int snd_echo_create(struct snd_card *card,
}
chip->dsp_registers = (volatile u32 __iomem *)
ioremap_nocache(chip->dsp_registers_phys, sz);
+ if (!chip->dsp_registers) {
+ dev_err(chip->card->dev, "ioremap failed\n");
+ snd_echo_free(chip);
+ return -ENOMEM;
+ }
if (request_irq(pci->irq, snd_echo_interrupt, IRQF_SHARED,
KBUILD_MODNAME, chip)) {
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 5f2005098a60..701a69d856f5 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -969,6 +969,7 @@ int snd_hda_codec_device_new(struct hda_bus *bus, struct snd_card *card,
/* power-up all before initialization */
hda_set_power_state(codec, AC_PWRST_D0);
+ codec->core.dev.power.power_state = PMSG_ON;
snd_hda_codec_proc_new(codec);
@@ -2939,6 +2940,20 @@ static int hda_codec_runtime_resume(struct device *dev)
#endif /* CONFIG_PM */
#ifdef CONFIG_PM_SLEEP
+static int hda_codec_force_resume(struct device *dev)
+{
+ int ret;
+
+ /* The get/put pair below enforces the runtime resume even if the
+ * device hasn't been used at suspend time. This trick is needed to
+ * update the jack state change during the sleep.
+ */
+ pm_runtime_get_noresume(dev);
+ ret = pm_runtime_force_resume(dev);
+ pm_runtime_put(dev);
+ return ret;
+}
+
static int hda_codec_pm_suspend(struct device *dev)
{
dev->power.power_state = PMSG_SUSPEND;
@@ -2948,7 +2963,7 @@ static int hda_codec_pm_suspend(struct device *dev)
static int hda_codec_pm_resume(struct device *dev)
{
dev->power.power_state = PMSG_RESUME;
- return pm_runtime_force_resume(dev);
+ return hda_codec_force_resume(dev);
}
static int hda_codec_pm_freeze(struct device *dev)
@@ -2960,13 +2975,13 @@ static int hda_codec_pm_freeze(struct device *dev)
static int hda_codec_pm_thaw(struct device *dev)
{
dev->power.power_state = PMSG_THAW;
- return pm_runtime_force_resume(dev);
+ return hda_codec_force_resume(dev);
}
static int hda_codec_pm_restore(struct device *dev)
{
dev->power.power_state = PMSG_RESTORE;
- return pm_runtime_force_resume(dev);
+ return hda_codec_force_resume(dev);
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index e5c49003e75f..2ec91085fa3e 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -947,7 +947,7 @@ static void __azx_runtime_suspend(struct azx *chip)
display_power(chip, false);
}
-static void __azx_runtime_resume(struct azx *chip)
+static void __azx_runtime_resume(struct azx *chip, bool from_rt)
{
struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
struct hdac_bus *bus = azx_bus(chip);
@@ -964,7 +964,7 @@ static void __azx_runtime_resume(struct azx *chip)
azx_init_pci(chip);
hda_intel_init_chip(chip, true);
- if (status) {
+ if (status && from_rt) {
list_for_each_codec(codec, &chip->bus)
if (status & (1 << codec->addr))
schedule_delayed_work(&codec->jackpoll_work,
@@ -1016,7 +1016,7 @@ static int azx_resume(struct device *dev)
chip->msi = 0;
if (azx_acquire_irq(chip, 1) < 0)
return -EIO;
- __azx_runtime_resume(chip);
+ __azx_runtime_resume(chip, false);
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
trace_azx_resume(chip);
@@ -1081,7 +1081,7 @@ static int azx_runtime_resume(struct device *dev)
chip = card->private_data;
if (!azx_has_pm_runtime(chip))
return 0;
- __azx_runtime_resume(chip);
+ __azx_runtime_resume(chip, true);
/* disable controller Wake Up event*/
azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) &
@@ -2142,12 +2142,18 @@ static struct snd_pci_quirk power_save_blacklist[] = {
SND_PCI_QUIRK(0x8086, 0x2040, "Intel DZ77BH-55K", 0),
/* https://bugzilla.kernel.org/show_bug.cgi?id=199607 */
SND_PCI_QUIRK(0x8086, 0x2057, "Intel NUC5i7RYB", 0),
+ /* https://bugs.launchpad.net/bugs/1821663 */
+ SND_PCI_QUIRK(0x8086, 0x2064, "Intel SDP 8086:2064", 0),
/* https://bugzilla.redhat.com/show_bug.cgi?id=1520902 */
SND_PCI_QUIRK(0x8086, 0x2068, "Intel NUC7i3BNB", 0),
- /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
- SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
/* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1689623 */
+ SND_PCI_QUIRK(0x17aa, 0x367b, "Lenovo IdeaCentre B550", 0),
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
+ SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
+ /* https://bugs.launchpad.net/bugs/1821663 */
+ SND_PCI_QUIRK(0x1631, 0xe017, "Packard Bell NEC IMEDIA 5204", 0),
{}
};
#endif /* CONFIG_PM */
diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
index dbd8da5685cb..3d68f9ef7694 100644
--- a/sound/pci/hda/hda_tegra.c
+++ b/sound/pci/hda/hda_tegra.c
@@ -219,7 +219,6 @@ disable_hda:
return rc;
}
-#ifdef CONFIG_PM_SLEEP
static void hda_tegra_disable_clocks(struct hda_tegra *data)
{
clk_disable_unprepare(data->hda2hdmi_clk);
@@ -230,7 +229,7 @@ static void hda_tegra_disable_clocks(struct hda_tegra *data)
/*
* power management
*/
-static int hda_tegra_suspend(struct device *dev)
+static int __maybe_unused hda_tegra_suspend(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
int rc;
@@ -243,7 +242,7 @@ static int hda_tegra_suspend(struct device *dev)
return 0;
}
-static int hda_tegra_resume(struct device *dev)
+static int __maybe_unused hda_tegra_resume(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
int rc;
@@ -255,10 +254,8 @@ static int hda_tegra_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
-#ifdef CONFIG_PM
-static int hda_tegra_runtime_suspend(struct device *dev)
+static int __maybe_unused hda_tegra_runtime_suspend(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
@@ -275,7 +272,7 @@ static int hda_tegra_runtime_suspend(struct device *dev)
return 0;
}
-static int hda_tegra_runtime_resume(struct device *dev)
+static int __maybe_unused hda_tegra_runtime_resume(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
@@ -292,7 +289,6 @@ static int hda_tegra_runtime_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM */
static const struct dev_pm_ops hda_tegra_pm = {
SET_SYSTEM_SLEEP_PM_OPS(hda_tegra_suspend, hda_tegra_resume)
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 29882bda7632..e1ebc6d5f382 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -1005,7 +1005,6 @@ struct ca0132_spec {
unsigned int scp_resp_header;
unsigned int scp_resp_data[4];
unsigned int scp_resp_count;
- bool alt_firmware_present;
bool startup_check_entered;
bool dsp_reload;
@@ -7518,7 +7517,7 @@ static bool ca0132_download_dsp_images(struct hda_codec *codec)
bool dsp_loaded = false;
struct ca0132_spec *spec = codec->spec;
const struct dsp_image_seg *dsp_os_image;
- const struct firmware *fw_entry;
+ const struct firmware *fw_entry = NULL;
/*
* Alternate firmwares for different variants. The Recon3Di apparently
* can use the default firmware, but I'll leave the option in case
@@ -7529,33 +7528,26 @@ static bool ca0132_download_dsp_images(struct hda_codec *codec)
case QUIRK_R3D:
case QUIRK_AE5:
if (request_firmware(&fw_entry, DESKTOP_EFX_FILE,
- codec->card->dev) != 0) {
+ codec->card->dev) != 0)
codec_dbg(codec, "Desktop firmware not found.");
- spec->alt_firmware_present = false;
- } else {
+ else
codec_dbg(codec, "Desktop firmware selected.");
- spec->alt_firmware_present = true;
- }
break;
case QUIRK_R3DI:
if (request_firmware(&fw_entry, R3DI_EFX_FILE,
- codec->card->dev) != 0) {
+ codec->card->dev) != 0)
codec_dbg(codec, "Recon3Di alt firmware not detected.");
- spec->alt_firmware_present = false;
- } else {
+ else
codec_dbg(codec, "Recon3Di firmware selected.");
- spec->alt_firmware_present = true;
- }
break;
default:
- spec->alt_firmware_present = false;
break;
}
/*
* Use default ctefx.bin if no alt firmware is detected, or if none
* exists for your particular codec.
*/
- if (!spec->alt_firmware_present) {
+ if (!fw_entry) {
codec_dbg(codec, "Default firmware selected.");
if (request_firmware(&fw_entry, EFX_FILE,
codec->card->dev) != 0)
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index a4ee7656d9ee..fb65ad31e86c 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -936,6 +936,9 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x8456, "HP Z2 G4 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x8457, "HP Z2 G4 mini", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x8458, "HP Z2 G4 mini premium", CXT_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 73d7042ff884..8b3ac690efa3 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -57,10 +57,11 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
#define is_geminilake(codec) (((codec)->core.vendor_id == 0x8086280d) || \
((codec)->core.vendor_id == 0x80862800))
#define is_cannonlake(codec) ((codec)->core.vendor_id == 0x8086280c)
+#define is_icelake(codec) ((codec)->core.vendor_id == 0x8086280f)
#define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec) \
|| is_skylake(codec) || is_broxton(codec) \
- || is_kabylake(codec)) || is_geminilake(codec) \
- || is_cannonlake(codec)
+ || is_kabylake(codec) || is_geminilake(codec) \
+ || is_cannonlake(codec) || is_icelake(codec))
#define is_valleyview(codec) ((codec)->core.vendor_id == 0x80862882)
#define is_cherryview(codec) ((codec)->core.vendor_id == 0x80862883)
#define is_valleyview_plus(codec) (is_valleyview(codec) || is_cherryview(codec))
@@ -181,6 +182,8 @@ struct hdmi_spec {
struct hdac_chmap chmap;
hda_nid_t vendor_nid;
+ const int *port_map;
+ int port_num;
};
#ifdef CONFIG_SND_HDA_COMPONENT
@@ -2418,12 +2421,11 @@ static void intel_haswell_fixup_connect_list(struct hda_codec *codec,
snd_hda_override_conn_list(codec, nid, spec->num_cvts, spec->cvt_nids);
}
-#define INTEL_VENDOR_NID 0x08
-#define INTEL_GLK_VENDOR_NID 0x0B
-#define INTEL_GET_VENDOR_VERB 0xf81
-#define INTEL_SET_VENDOR_VERB 0x781
-#define INTEL_EN_DP12 0x02 /* enable DP 1.2 features */
-#define INTEL_EN_ALL_PIN_CVTS 0x01 /* enable 2nd & 3rd pins and convertors */
+#define INTEL_GET_VENDOR_VERB 0xf81
+#define INTEL_GET_VENDOR_VERB 0xf81
+#define INTEL_SET_VENDOR_VERB 0x781
+#define INTEL_EN_DP12 0x02 /* enable DP 1.2 features */
+#define INTEL_EN_ALL_PIN_CVTS 0x01 /* enable 2nd & 3rd pins and convertors */
static void intel_haswell_enable_all_pins(struct hda_codec *codec,
bool update_tree)
@@ -2503,11 +2505,29 @@ static int intel_base_nid(struct hda_codec *codec)
static int intel_pin2port(void *audio_ptr, int pin_nid)
{
- int base_nid = intel_base_nid(audio_ptr);
+ struct hda_codec *codec = audio_ptr;
+ struct hdmi_spec *spec = codec->spec;
+ int base_nid, i;
- if (WARN_ON(pin_nid < base_nid || pin_nid >= base_nid + 3))
- return -1;
- return pin_nid - base_nid + 1; /* intel port is 1-based */
+ if (!spec->port_num) {
+ base_nid = intel_base_nid(codec);
+ if (WARN_ON(pin_nid < base_nid || pin_nid >= base_nid + 3))
+ return -1;
+ return pin_nid - base_nid + 1; /* intel port is 1-based */
+ }
+
+ /*
+ * looking for the pin number in the mapping table and return
+ * the index which indicate the port number
+ */
+ for (i = 0; i < spec->port_num; i++) {
+ if (pin_nid == spec->port_map[i])
+ return i + 1;
+ }
+
+ /* return -1 if pin number exceeds our expectation */
+ codec_info(codec, "Can't find the HDMI/DP port for pin %d\n", pin_nid);
+ return -1;
}
static void intel_pin_eld_notify(void *audio_ptr, int port, int pipe)
@@ -2608,7 +2628,8 @@ static int parse_intel_hdmi(struct hda_codec *codec)
}
/* Intel Haswell and onwards; audio component with eld notifier */
-static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid)
+static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid,
+ const int *port_map, int port_num)
{
struct hdmi_spec *spec;
int err;
@@ -2620,6 +2641,8 @@ static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid)
codec->dp_mst = true;
spec->dyn_pcm_assign = true;
spec->vendor_nid = vendor_nid;
+ spec->port_map = port_map;
+ spec->port_num = port_num;
intel_haswell_enable_all_pins(codec, true);
intel_haswell_fixup_enable_dp12(codec);
@@ -2638,12 +2661,23 @@ static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid)
static int patch_i915_hsw_hdmi(struct hda_codec *codec)
{
- return intel_hsw_common_init(codec, INTEL_VENDOR_NID);
+ return intel_hsw_common_init(codec, 0x08, NULL, 0);
}
static int patch_i915_glk_hdmi(struct hda_codec *codec)
{
- return intel_hsw_common_init(codec, INTEL_GLK_VENDOR_NID);
+ return intel_hsw_common_init(codec, 0x0b, NULL, 0);
+}
+
+static int patch_i915_icl_hdmi(struct hda_codec *codec)
+{
+ /*
+ * pin to port mapping table where the value indicate the pin number and
+ * the index indicate the port number with 1 base.
+ */
+ static const int map[] = {0x4, 0x6, 0x8, 0xa, 0xb};
+
+ return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map));
}
/* Intel Baytrail and Braswell; with eld notifier */
@@ -3886,6 +3920,7 @@ HDA_CODEC_ENTRY(0x11069f81, "VX900 HDMI/DP", patch_via_hdmi),
HDA_CODEC_ENTRY(0x11069f84, "VX11 HDMI/DP", patch_generic_hdmi),
HDA_CODEC_ENTRY(0x11069f85, "VX11 HDMI/DP", patch_generic_hdmi),
HDA_CODEC_ENTRY(0x80860054, "IbexPeak HDMI", patch_i915_cpt_hdmi),
+HDA_CODEC_ENTRY(0x80862800, "Geminilake HDMI", patch_i915_glk_hdmi),
HDA_CODEC_ENTRY(0x80862801, "Bearlake HDMI", patch_generic_hdmi),
HDA_CODEC_ENTRY(0x80862802, "Cantiga HDMI", patch_generic_hdmi),
HDA_CODEC_ENTRY(0x80862803, "Eaglelake HDMI", patch_generic_hdmi),
@@ -3899,7 +3934,7 @@ HDA_CODEC_ENTRY(0x8086280a, "Broxton HDMI", patch_i915_hsw_hdmi),
HDA_CODEC_ENTRY(0x8086280b, "Kabylake HDMI", patch_i915_hsw_hdmi),
HDA_CODEC_ENTRY(0x8086280c, "Cannonlake HDMI", patch_i915_glk_hdmi),
HDA_CODEC_ENTRY(0x8086280d, "Geminilake HDMI", patch_i915_glk_hdmi),
-HDA_CODEC_ENTRY(0x80862800, "Geminilake HDMI", patch_i915_glk_hdmi),
+HDA_CODEC_ENTRY(0x8086280f, "Icelake HDMI", patch_i915_icl_hdmi),
HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi),
HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_i915_byt_hdmi),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index c8413d44973c..42cd3945e0de 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1864,8 +1864,8 @@ enum {
ALC887_FIXUP_BASS_CHMAP,
ALC1220_FIXUP_GB_DUAL_CODECS,
ALC1220_FIXUP_CLEVO_P950,
- ALC1220_FIXUP_SYSTEM76_ORYP5,
- ALC1220_FIXUP_SYSTEM76_ORYP5_PINS,
+ ALC1220_FIXUP_CLEVO_PB51ED,
+ ALC1220_FIXUP_CLEVO_PB51ED_PINS,
};
static void alc889_fixup_coef(struct hda_codec *codec,
@@ -2070,7 +2070,7 @@ static void alc1220_fixup_clevo_p950(struct hda_codec *codec,
static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
const struct hda_fixup *fix, int action);
-static void alc1220_fixup_system76_oryp5(struct hda_codec *codec,
+static void alc1220_fixup_clevo_pb51ed(struct hda_codec *codec,
const struct hda_fixup *fix,
int action)
{
@@ -2322,18 +2322,18 @@ static const struct hda_fixup alc882_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc1220_fixup_clevo_p950,
},
- [ALC1220_FIXUP_SYSTEM76_ORYP5] = {
+ [ALC1220_FIXUP_CLEVO_PB51ED] = {
.type = HDA_FIXUP_FUNC,
- .v.func = alc1220_fixup_system76_oryp5,
+ .v.func = alc1220_fixup_clevo_pb51ed,
},
- [ALC1220_FIXUP_SYSTEM76_ORYP5_PINS] = {
+ [ALC1220_FIXUP_CLEVO_PB51ED_PINS] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
{ 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
{}
},
.chained = true,
- .chain_id = ALC1220_FIXUP_SYSTEM76_ORYP5,
+ .chain_id = ALC1220_FIXUP_CLEVO_PB51ED,
},
};
@@ -2411,8 +2411,9 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
- SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
- SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
+ SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ SND_PCI_QUIRK(0x1558, 0x65d1, "Tuxedo Book XC1509", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
@@ -5449,6 +5450,8 @@ static void alc274_fixup_bind_dacs(struct hda_codec *codec,
return;
spec->gen.preferred_dacs = preferred_pairs;
+ spec->gen.auto_mute_via_amp = 1;
+ codec->power_save_node = 0;
}
/* The DAC of NID 0x3 will introduce click/pop noise on headphones, so invalidate it */
@@ -5521,6 +5524,25 @@ static void alc_fixup_headset_jack(struct hda_codec *codec,
}
}
+static void alc295_fixup_chromebook(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ switch (action) {
+ case HDA_FIXUP_ACT_INIT:
+ switch (codec->core.vendor_id) {
+ case 0x10ec0295:
+ alc_update_coef_idx(codec, 0x4a, 0x8000, 1 << 15); /* Reset HP JD */
+ alc_update_coef_idx(codec, 0x4a, 0x8000, 0 << 15);
+ break;
+ case 0x10ec0236:
+ alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */
+ alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15);
+ break;
+ }
+ break;
+ }
+}
+
static void alc_fixup_disable_mic_vref(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
@@ -5653,6 +5675,7 @@ enum {
ALC233_FIXUP_ASUS_MIC_NO_PRESENCE,
ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE,
ALC233_FIXUP_LENOVO_MULTI_CODECS,
+ ALC233_FIXUP_ACER_HEADSET_MIC,
ALC294_FIXUP_LENOVO_MIC_LOCATION,
ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE,
ALC700_FIXUP_INTEL_REFERENCE,
@@ -5670,9 +5693,17 @@ enum {
ALC294_FIXUP_ASUS_MIC,
ALC294_FIXUP_ASUS_HEADSET_MIC,
ALC294_FIXUP_ASUS_SPK,
- ALC225_FIXUP_HEADSET_JACK,
ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
+ ALC255_FIXUP_ACER_HEADSET_MIC,
+ ALC295_FIXUP_CHROME_BOOK,
+ ALC225_FIXUP_HEADSET_JACK,
+ ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE,
+ ALC225_FIXUP_WYSE_AUTO_MUTE,
+ ALC225_FIXUP_WYSE_DISABLE_MIC_VREF,
+ ALC286_FIXUP_ACER_AIO_HEADSET_MIC,
+ ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
+ ALC299_FIXUP_PREDATOR_SPK,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -6473,6 +6504,16 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc233_alc662_fixup_lenovo_dual_codecs,
},
+ [ALC233_FIXUP_ACER_HEADSET_MIC] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 },
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC233_FIXUP_ASUS_MIC_NO_PRESENCE
+ },
[ALC294_FIXUP_LENOVO_MIC_LOCATION] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
@@ -6615,6 +6656,12 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
},
+ [ALC295_FIXUP_CHROME_BOOK] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc295_fixup_chromebook,
+ .chained = true,
+ .chain_id = ALC225_FIXUP_HEADSET_JACK
+ },
[ALC225_FIXUP_HEADSET_JACK] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_headset_jack,
@@ -6639,6 +6686,64 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC285_FIXUP_LENOVO_HEADPHONE_NOISE
},
+ [ALC255_FIXUP_ACER_HEADSET_MIC] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x19, 0x03a11130 },
+ { 0x1a, 0x90a60140 }, /* use as internal mic */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC
+ },
+ [ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x16, 0x01011020 }, /* Rear Line out */
+ { 0x19, 0x01a1913c }, /* use as Front headset mic, without its own jack detect */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC225_FIXUP_WYSE_AUTO_MUTE
+ },
+ [ALC225_FIXUP_WYSE_AUTO_MUTE] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_auto_mute_via_amp,
+ .chained = true,
+ .chain_id = ALC225_FIXUP_WYSE_DISABLE_MIC_VREF
+ },
+ [ALC225_FIXUP_WYSE_DISABLE_MIC_VREF] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_disable_mic_vref,
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+ },
+ [ALC286_FIXUP_ACER_AIO_HEADSET_MIC] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x4f },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x5029 },
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE
+ },
+ [ALC256_FIXUP_ASUS_MIC_NO_PRESENCE] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x19, 0x04a11120 }, /* use as headset mic, without its own jack detect */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
+ },
+ [ALC299_FIXUP_PREDATOR_SPK] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x21, 0x90170150 }, /* use as headset mic, without its own jack detect */
+ { }
+ }
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6655,9 +6760,15 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
- SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1025, 0x1099, "Acer Aspire E5-523G", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1025, 0x110e, "Acer Aspire ES1-432", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1025, 0x1246, "Acer Predator Helios 500", ALC299_FIXUP_PREDATOR_SPK),
+ SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X),
@@ -6702,6 +6813,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB),
+ SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -6764,11 +6877,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x103c, 0x802e, "HP Z240 SFF", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x802f, "HP Z240", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360),
- SND_PCI_QUIRK(0x103c, 0x82bf, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x103c, 0x82c0, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
@@ -7048,7 +7163,9 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC255_FIXUP_DUMMY_LINEOUT_VERB, .name = "alc255-dummy-lineout"},
{.id = ALC255_FIXUP_DELL_HEADSET_MIC, .name = "alc255-dell-headset"},
{.id = ALC295_FIXUP_HP_X360, .name = "alc295-hp-x360"},
- {.id = ALC225_FIXUP_HEADSET_JACK, .name = "alc-sense-combo"},
+ {.id = ALC225_FIXUP_HEADSET_JACK, .name = "alc-headset-jack"},
+ {.id = ALC295_FIXUP_CHROME_BOOK, .name = "alc-chrome-book"},
+ {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"},
{}
};
#define ALC225_STANDARD_PINS \
@@ -7151,6 +7268,12 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x12, 0x90a60140},
{0x14, 0x90170150},
{0x21, 0x02211020}),
+ SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x21, 0x02211020}),
+ SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x12, 0x40000000},
+ {0x14, 0x90170110},
+ {0x21, 0x02211020}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
{0x14, 0x90170110},
{0x21, 0x02211020}),
@@ -7261,6 +7384,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x21, 0x0221101f}),
SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC256_STANDARD_PINS),
+ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x14, 0x90170110},
+ {0x1b, 0x01011020},
+ {0x21, 0x0221101f}),
SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC,
{0x14, 0x90170110},
{0x1b, 0x90a70130},
@@ -7269,6 +7396,18 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x14, 0x90170110},
{0x1b, 0x90a70130},
{0x21, 0x03211020}),
+ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
+ {0x12, 0x90a60130},
+ {0x14, 0x90170110},
+ {0x21, 0x03211020}),
+ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
+ {0x12, 0x90a60130},
+ {0x14, 0x90170110},
+ {0x21, 0x04211020}),
+ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
+ {0x1a, 0x90a70130},
+ {0x1b, 0x90170110},
+ {0x21, 0x03211020}),
SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
{0x12, 0xb7a60130},
{0x13, 0xb8a61140},
@@ -7408,6 +7547,13 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x12, 0x90a60130},
{0x17, 0x90170110},
{0x21, 0x04211020}),
+ SND_HDA_PIN_QUIRK(0x10ec0295, 0x1043, "ASUS", ALC294_FIXUP_ASUS_SPK,
+ {0x12, 0x90a60130},
+ {0x17, 0x90170110},
+ {0x21, 0x03211020}),
+ SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x14, 0x90170110},
+ {0x21, 0x04211020}),
SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC295_STANDARD_PINS,
{0x17, 0x21014020},
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 419114edfd57..667fc1d59e18 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -1151,6 +1151,7 @@ config SND_SOC_WCD9335
tristate "WCD9335 Codec"
depends on SLIMBUS
select REGMAP_SLIMBUS
+ select REGMAP_IRQ
help
The WCD9335 is a standalone Hi-Fi audio CODEC IC, supports
Qualcomm Technologies, Inc. (QTI) multimedia solutions,
diff --git a/sound/soc/codecs/ab8500-codec.c b/sound/soc/codecs/ab8500-codec.c
index 03bbbcd3b6c1..19e7f0333c2a 100644
--- a/sound/soc/codecs/ab8500-codec.c
+++ b/sound/soc/codecs/ab8500-codec.c
@@ -1062,10 +1062,10 @@ static void anc_iir(struct snd_soc_component *component, unsigned int bnk,
snd_soc_component_update_bits(component, AB8500_ANCCONF1,
BIT(AB8500_ANCCONF1_ANCIIRINIT),
BIT(AB8500_ANCCONF1_ANCIIRINIT));
- usleep_range(AB8500_ANC_SM_DELAY, AB8500_ANC_SM_DELAY);
+ usleep_range(AB8500_ANC_SM_DELAY, AB8500_ANC_SM_DELAY*2);
snd_soc_component_update_bits(component, AB8500_ANCCONF1,
BIT(AB8500_ANCCONF1_ANCIIRINIT), 0);
- usleep_range(AB8500_ANC_SM_DELAY, AB8500_ANC_SM_DELAY);
+ usleep_range(AB8500_ANC_SM_DELAY, AB8500_ANC_SM_DELAY*2);
} else {
snd_soc_component_update_bits(component, AB8500_ANCCONF1,
BIT(AB8500_ANCCONF1_ANCIIRUPDATE),
@@ -2129,6 +2129,7 @@ static int ab8500_codec_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
dev_err(dai->component->dev,
"%s: ERROR: The device is either a master or a slave.\n",
__func__);
+ /* fall through */
default:
dev_err(dai->component->dev,
"%s: ERROR: Unsupporter master mask 0x%x\n",
diff --git a/sound/soc/codecs/adau1977-spi.c b/sound/soc/codecs/adau1977-spi.c
index 84ffbde9583f..2baf61567b59 100644
--- a/sound/soc/codecs/adau1977-spi.c
+++ b/sound/soc/codecs/adau1977-spi.c
@@ -10,6 +10,8 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/regmap.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/spi/spi.h>
#include <sound/soc.h>
@@ -54,9 +56,18 @@ static const struct spi_device_id adau1977_spi_ids[] = {
};
MODULE_DEVICE_TABLE(spi, adau1977_spi_ids);
+static const struct of_device_id adau1977_spi_of_match[] = {
+ { .compatible = "adi,adau1977" },
+ { .compatible = "adi,adau1978" },
+ { .compatible = "adi,adau1979" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, adau1977_spi_of_match);
+
static struct spi_driver adau1977_spi_driver = {
.driver = {
.name = "adau1977",
+ .of_match_table = of_match_ptr(adau1977_spi_of_match),
},
.probe = adau1977_spi_probe,
.id_table = adau1977_spi_ids,
diff --git a/sound/soc/codecs/cs35l35.c b/sound/soc/codecs/cs35l35.c
index 9f4a59871cee..c71696146c5e 100644
--- a/sound/soc/codecs/cs35l35.c
+++ b/sound/soc/codecs/cs35l35.c
@@ -1635,6 +1635,16 @@ err:
return ret;
}
+static int cs35l35_i2c_remove(struct i2c_client *i2c_client)
+{
+ struct cs35l35_private *cs35l35 = i2c_get_clientdata(i2c_client);
+
+ regulator_bulk_disable(cs35l35->num_supplies, cs35l35->supplies);
+ gpiod_set_value_cansleep(cs35l35->reset_gpio, 0);
+
+ return 0;
+}
+
static const struct of_device_id cs35l35_of_match[] = {
{.compatible = "cirrus,cs35l35"},
{},
@@ -1655,6 +1665,7 @@ static struct i2c_driver cs35l35_i2c_driver = {
},
.id_table = cs35l35_id,
.probe = cs35l35_i2c_probe,
+ .remove = cs35l35_i2c_remove,
};
module_i2c_driver(cs35l35_i2c_driver);
diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
index 33d74f163bd7..793a14d58667 100644
--- a/sound/soc/codecs/cs4270.c
+++ b/sound/soc/codecs/cs4270.c
@@ -642,6 +642,7 @@ static const struct regmap_config cs4270_regmap = {
.reg_defaults = cs4270_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(cs4270_reg_defaults),
.cache_type = REGCACHE_RBTREE,
+ .write_flag_mask = CS4270_I2C_INCR,
.readable_reg = cs4270_reg_is_readable,
.volatile_reg = cs4270_reg_is_volatile,
diff --git a/sound/soc/codecs/hdac_hda.c b/sound/soc/codecs/hdac_hda.c
index ffecdaaa8cf2..f889d94c8e3c 100644
--- a/sound/soc/codecs/hdac_hda.c
+++ b/sound/soc/codecs/hdac_hda.c
@@ -38,6 +38,9 @@ static void hdac_hda_dai_close(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai);
static int hdac_hda_dai_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai);
+static int hdac_hda_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai);
static int hdac_hda_dai_hw_free(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai);
static int hdac_hda_dai_set_tdm_slot(struct snd_soc_dai *dai,
@@ -50,6 +53,7 @@ static const struct snd_soc_dai_ops hdac_hda_dai_ops = {
.startup = hdac_hda_dai_open,
.shutdown = hdac_hda_dai_close,
.prepare = hdac_hda_dai_prepare,
+ .hw_params = hdac_hda_dai_hw_params,
.hw_free = hdac_hda_dai_hw_free,
.set_tdm_slot = hdac_hda_dai_set_tdm_slot,
};
@@ -139,6 +143,39 @@ static int hdac_hda_dai_set_tdm_slot(struct snd_soc_dai *dai,
return 0;
}
+static int hdac_hda_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct hdac_hda_priv *hda_pvt;
+ unsigned int format_val;
+ unsigned int maxbps;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ maxbps = dai->driver->playback.sig_bits;
+ else
+ maxbps = dai->driver->capture.sig_bits;
+
+ hda_pvt = snd_soc_component_get_drvdata(component);
+ format_val = snd_hdac_calc_stream_format(params_rate(params),
+ params_channels(params),
+ params_format(params),
+ maxbps,
+ 0);
+ if (!format_val) {
+ dev_err(dai->dev,
+ "invalid format_val, rate=%d, ch=%d, format=%d, maxbps=%d\n",
+ params_rate(params), params_channels(params),
+ params_format(params), maxbps);
+
+ return -EINVAL;
+ }
+
+ hda_pvt->pcm[dai->id].format_val[substream->stream] = format_val;
+ return 0;
+}
+
static int hdac_hda_dai_hw_free(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
@@ -162,10 +199,9 @@ static int hdac_hda_dai_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_component *component = dai->component;
+ struct hda_pcm_stream *hda_stream;
struct hdac_hda_priv *hda_pvt;
- struct snd_pcm_runtime *runtime = substream->runtime;
struct hdac_device *hdev;
- struct hda_pcm_stream *hda_stream;
unsigned int format_val;
struct hda_pcm *pcm;
unsigned int stream;
@@ -179,19 +215,8 @@ static int hdac_hda_dai_prepare(struct snd_pcm_substream *substream,
hda_stream = &pcm->stream[substream->stream];
- format_val = snd_hdac_calc_stream_format(runtime->rate,
- runtime->channels,
- runtime->format,
- hda_stream->maxbps,
- 0);
- if (!format_val) {
- dev_err(&hdev->dev,
- "invalid format_val, rate=%d, ch=%d, format=%d\n",
- runtime->rate, runtime->channels, runtime->format);
- return -EINVAL;
- }
-
stream = hda_pvt->pcm[dai->id].stream_tag[substream->stream];
+ format_val = hda_pvt->pcm[dai->id].format_val[substream->stream];
ret = snd_hda_codec_prepare(&hda_pvt->codec, hda_stream,
stream, format_val, substream);
diff --git a/sound/soc/codecs/hdac_hda.h b/sound/soc/codecs/hdac_hda.h
index e444ef593360..6b1bd4f428e7 100644
--- a/sound/soc/codecs/hdac_hda.h
+++ b/sound/soc/codecs/hdac_hda.h
@@ -8,6 +8,7 @@
struct hdac_hda_pcm {
int stream_tag[2];
+ unsigned int format_val[2];
};
struct hdac_hda_priv {
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
index e5b6769b9797..35df73e42cbc 100644
--- a/sound/soc/codecs/hdmi-codec.c
+++ b/sound/soc/codecs/hdmi-codec.c
@@ -484,9 +484,6 @@ static int hdmi_codec_hw_params(struct snd_pcm_substream *substream,
params_width(params), params_rate(params),
params_channels(params));
- if (params_width(params) > 24)
- params->msbits = 24;
-
ret = snd_pcm_create_iec958_consumer_hw_params(params, hp.iec.status,
sizeof(hp.iec.status));
if (ret < 0) {
@@ -529,73 +526,71 @@ static int hdmi_codec_set_fmt(struct snd_soc_dai *dai,
{
struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
struct hdmi_codec_daifmt cf = { 0 };
- int ret = 0;
dev_dbg(dai->dev, "%s()\n", __func__);
- if (dai->id == DAI_ID_SPDIF) {
- cf.fmt = HDMI_SPDIF;
- } else {
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
- cf.bit_clk_master = 1;
- cf.frame_clk_master = 1;
- break;
- case SND_SOC_DAIFMT_CBS_CFM:
- cf.frame_clk_master = 1;
- break;
- case SND_SOC_DAIFMT_CBM_CFS:
- cf.bit_clk_master = 1;
- break;
- case SND_SOC_DAIFMT_CBS_CFS:
- break;
- default:
- return -EINVAL;
- }
+ if (dai->id == DAI_ID_SPDIF)
+ return 0;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ cf.bit_clk_master = 1;
+ cf.frame_clk_master = 1;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFM:
+ cf.frame_clk_master = 1;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFS:
+ cf.bit_clk_master = 1;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ default:
+ return -EINVAL;
+ }
- switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
- case SND_SOC_DAIFMT_NB_NF:
- break;
- case SND_SOC_DAIFMT_NB_IF:
- cf.frame_clk_inv = 1;
- break;
- case SND_SOC_DAIFMT_IB_NF:
- cf.bit_clk_inv = 1;
- break;
- case SND_SOC_DAIFMT_IB_IF:
- cf.frame_clk_inv = 1;
- cf.bit_clk_inv = 1;
- break;
- }
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ cf.frame_clk_inv = 1;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ cf.bit_clk_inv = 1;
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ cf.frame_clk_inv = 1;
+ cf.bit_clk_inv = 1;
+ break;
+ }
- switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
- case SND_SOC_DAIFMT_I2S:
- cf.fmt = HDMI_I2S;
- break;
- case SND_SOC_DAIFMT_DSP_A:
- cf.fmt = HDMI_DSP_A;
- break;
- case SND_SOC_DAIFMT_DSP_B:
- cf.fmt = HDMI_DSP_B;
- break;
- case SND_SOC_DAIFMT_RIGHT_J:
- cf.fmt = HDMI_RIGHT_J;
- break;
- case SND_SOC_DAIFMT_LEFT_J:
- cf.fmt = HDMI_LEFT_J;
- break;
- case SND_SOC_DAIFMT_AC97:
- cf.fmt = HDMI_AC97;
- break;
- default:
- dev_err(dai->dev, "Invalid DAI interface format\n");
- return -EINVAL;
- }
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ cf.fmt = HDMI_I2S;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ cf.fmt = HDMI_DSP_A;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ cf.fmt = HDMI_DSP_B;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ cf.fmt = HDMI_RIGHT_J;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ cf.fmt = HDMI_LEFT_J;
+ break;
+ case SND_SOC_DAIFMT_AC97:
+ cf.fmt = HDMI_AC97;
+ break;
+ default:
+ dev_err(dai->dev, "Invalid DAI interface format\n");
+ return -EINVAL;
}
hcp->daifmt[dai->id] = cf;
- return ret;
+ return 0;
}
static int hdmi_codec_digital_mute(struct snd_soc_dai *dai, int mute)
@@ -792,8 +787,10 @@ static int hdmi_codec_probe(struct platform_device *pdev)
i++;
}
- if (hcd->spdif)
+ if (hcd->spdif) {
hcp->daidrv[i] = hdmi_spdif_dai;
+ hcp->daifmt[DAI_ID_SPDIF].fmt = HDMI_SPDIF;
+ }
dev_set_drvdata(dev, hcp);
diff --git a/sound/soc/codecs/nau8810.c b/sound/soc/codecs/nau8810.c
index bfd74b86c9d2..645aa0794123 100644
--- a/sound/soc/codecs/nau8810.c
+++ b/sound/soc/codecs/nau8810.c
@@ -411,9 +411,9 @@ static const struct snd_soc_dapm_widget nau8810_dapm_widgets[] = {
SND_SOC_DAPM_MIXER("Mono Mixer", NAU8810_REG_POWER3,
NAU8810_MOUTMX_EN_SFT, 0, &nau8810_mono_mixer_controls[0],
ARRAY_SIZE(nau8810_mono_mixer_controls)),
- SND_SOC_DAPM_DAC("DAC", "HiFi Playback", NAU8810_REG_POWER3,
+ SND_SOC_DAPM_DAC("DAC", "Playback", NAU8810_REG_POWER3,
NAU8810_DAC_EN_SFT, 0),
- SND_SOC_DAPM_ADC("ADC", "HiFi Capture", NAU8810_REG_POWER2,
+ SND_SOC_DAPM_ADC("ADC", "Capture", NAU8810_REG_POWER2,
NAU8810_ADC_EN_SFT, 0),
SND_SOC_DAPM_PGA("SpkN Out", NAU8810_REG_POWER3,
NAU8810_NSPK_EN_SFT, 0, NULL, 0),
diff --git a/sound/soc/codecs/nau8824.c b/sound/soc/codecs/nau8824.c
index 87ed3dc496dc..5ab05e75edea 100644
--- a/sound/soc/codecs/nau8824.c
+++ b/sound/soc/codecs/nau8824.c
@@ -681,8 +681,8 @@ static const struct snd_soc_dapm_widget nau8824_dapm_widgets[] = {
SND_SOC_DAPM_ADC("ADCR", NULL, NAU8824_REG_ANALOG_ADC_2,
NAU8824_ADCR_EN_SFT, 0),
- SND_SOC_DAPM_AIF_OUT("AIFTX", "HiFi Capture", 0, SND_SOC_NOPM, 0, 0),
- SND_SOC_DAPM_AIF_IN("AIFRX", "HiFi Playback", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIFTX", "Capture", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("AIFRX", "Playback", 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_DAC("DACL", NULL, NAU8824_REG_RDAC,
NAU8824_DACL_EN_SFT, 0),
@@ -831,6 +831,36 @@ static void nau8824_int_status_clear_all(struct regmap *regmap)
}
}
+static void nau8824_dapm_disable_pin(struct nau8824 *nau8824, const char *pin)
+{
+ struct snd_soc_dapm_context *dapm = nau8824->dapm;
+ const char *prefix = dapm->component->name_prefix;
+ char prefixed_pin[80];
+
+ if (prefix) {
+ snprintf(prefixed_pin, sizeof(prefixed_pin), "%s %s",
+ prefix, pin);
+ snd_soc_dapm_disable_pin(dapm, prefixed_pin);
+ } else {
+ snd_soc_dapm_disable_pin(dapm, pin);
+ }
+}
+
+static void nau8824_dapm_enable_pin(struct nau8824 *nau8824, const char *pin)
+{
+ struct snd_soc_dapm_context *dapm = nau8824->dapm;
+ const char *prefix = dapm->component->name_prefix;
+ char prefixed_pin[80];
+
+ if (prefix) {
+ snprintf(prefixed_pin, sizeof(prefixed_pin), "%s %s",
+ prefix, pin);
+ snd_soc_dapm_force_enable_pin(dapm, prefixed_pin);
+ } else {
+ snd_soc_dapm_force_enable_pin(dapm, pin);
+ }
+}
+
static void nau8824_eject_jack(struct nau8824 *nau8824)
{
struct snd_soc_dapm_context *dapm = nau8824->dapm;
@@ -839,8 +869,8 @@ static void nau8824_eject_jack(struct nau8824 *nau8824)
/* Clear all interruption status */
nau8824_int_status_clear_all(regmap);
- snd_soc_dapm_disable_pin(dapm, "SAR");
- snd_soc_dapm_disable_pin(dapm, "MICBIAS");
+ nau8824_dapm_disable_pin(nau8824, "SAR");
+ nau8824_dapm_disable_pin(nau8824, "MICBIAS");
snd_soc_dapm_sync(dapm);
/* Enable the insertion interruption, disable the ejection
@@ -870,8 +900,8 @@ static void nau8824_jdet_work(struct work_struct *work)
struct regmap *regmap = nau8824->regmap;
int adc_value, event = 0, event_mask = 0;
- snd_soc_dapm_force_enable_pin(dapm, "MICBIAS");
- snd_soc_dapm_force_enable_pin(dapm, "SAR");
+ nau8824_dapm_enable_pin(nau8824, "MICBIAS");
+ nau8824_dapm_enable_pin(nau8824, "SAR");
snd_soc_dapm_sync(dapm);
msleep(100);
@@ -882,8 +912,8 @@ static void nau8824_jdet_work(struct work_struct *work)
if (adc_value < HEADSET_SARADC_THD) {
event |= SND_JACK_HEADPHONE;
- snd_soc_dapm_disable_pin(dapm, "SAR");
- snd_soc_dapm_disable_pin(dapm, "MICBIAS");
+ nau8824_dapm_disable_pin(nau8824, "SAR");
+ nau8824_dapm_disable_pin(nau8824, "MICBIAS");
snd_soc_dapm_sync(dapm);
} else {
event |= SND_JACK_HEADSET;
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
index 9d5acd2d04ab..86a7fa31c294 100644
--- a/sound/soc/codecs/rt5682.c
+++ b/sound/soc/codecs/rt5682.c
@@ -910,13 +910,21 @@ static int rt5682_headset_detect(struct snd_soc_component *component,
int jack_insert)
{
struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
- struct snd_soc_dapm_context *dapm =
- snd_soc_component_get_dapm(component);
unsigned int val, count;
if (jack_insert) {
- snd_soc_dapm_force_enable_pin(dapm, "CBJ Power");
- snd_soc_dapm_sync(dapm);
+
+ snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1,
+ RT5682_PWR_VREF2 | RT5682_PWR_MB,
+ RT5682_PWR_VREF2 | RT5682_PWR_MB);
+ snd_soc_component_update_bits(component,
+ RT5682_PWR_ANLG_1, RT5682_PWR_FV2, 0);
+ usleep_range(15000, 20000);
+ snd_soc_component_update_bits(component,
+ RT5682_PWR_ANLG_1, RT5682_PWR_FV2, RT5682_PWR_FV2);
+ snd_soc_component_update_bits(component, RT5682_PWR_ANLG_3,
+ RT5682_PWR_CBJ, RT5682_PWR_CBJ);
+
snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_HIGH);
@@ -944,8 +952,10 @@ static int rt5682_headset_detect(struct snd_soc_component *component,
rt5682_enable_push_button_irq(component, false);
snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_LOW);
- snd_soc_dapm_disable_pin(dapm, "CBJ Power");
- snd_soc_dapm_sync(dapm);
+ snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1,
+ RT5682_PWR_VREF2 | RT5682_PWR_MB, 0);
+ snd_soc_component_update_bits(component, RT5682_PWR_ANLG_3,
+ RT5682_PWR_CBJ, 0);
rt5682->jack_type = 0;
}
@@ -1198,7 +1208,7 @@ static int set_filter_clk(struct snd_soc_dapm_widget *w,
struct snd_soc_component *component =
snd_soc_dapm_to_component(w->dapm);
struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
- int ref, val, reg, sft, mask, idx = -EINVAL;
+ int ref, val, reg, idx = -EINVAL;
static const int div_f[] = {1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48};
static const int div_o[] = {1, 2, 4, 6, 8, 12, 16, 24, 32, 48};
@@ -1212,15 +1222,10 @@ static int set_filter_clk(struct snd_soc_dapm_widget *w,
idx = rt5682_div_sel(rt5682, ref, div_f, ARRAY_SIZE(div_f));
- if (w->shift == RT5682_PWR_ADC_S1F_BIT) {
+ if (w->shift == RT5682_PWR_ADC_S1F_BIT)
reg = RT5682_PLL_TRACK_3;
- sft = RT5682_ADC_OSR_SFT;
- mask = RT5682_ADC_OSR_MASK;
- } else {
+ else
reg = RT5682_PLL_TRACK_2;
- sft = RT5682_DAC_OSR_SFT;
- mask = RT5682_DAC_OSR_MASK;
- }
snd_soc_component_update_bits(component, reg,
RT5682_FILTER_CLK_DIV_MASK, idx << RT5682_FILTER_CLK_DIV_SFT);
@@ -1232,7 +1237,8 @@ static int set_filter_clk(struct snd_soc_dapm_widget *w,
}
snd_soc_component_update_bits(component, RT5682_ADDA_CLK_1,
- mask, idx << sft);
+ RT5682_ADC_OSR_MASK | RT5682_DAC_OSR_MASK,
+ (idx << RT5682_ADC_OSR_SFT) | (idx << RT5682_DAC_OSR_SFT));
return 0;
}
@@ -1591,8 +1597,6 @@ static const struct snd_soc_dapm_widget rt5682_dapm_widgets[] = {
0, NULL, 0),
SND_SOC_DAPM_SUPPLY("Vref1", RT5682_PWR_ANLG_1, RT5682_PWR_VREF1_BIT, 0,
rt5655_set_verf, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
- SND_SOC_DAPM_SUPPLY("Vref2", RT5682_PWR_ANLG_1, RT5682_PWR_VREF2_BIT, 0,
- rt5655_set_verf, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
/* ASRC */
SND_SOC_DAPM_SUPPLY_S("DAC STO1 ASRC", 1, RT5682_PLL_TRACK_1,
@@ -1627,9 +1631,6 @@ static const struct snd_soc_dapm_widget rt5682_dapm_widgets[] = {
SND_SOC_DAPM_PGA("BST1 CBJ", SND_SOC_NOPM,
0, 0, NULL, 0),
- SND_SOC_DAPM_SUPPLY("CBJ Power", RT5682_PWR_ANLG_3,
- RT5682_PWR_CBJ_BIT, 0, NULL, 0),
-
/* REC Mixer */
SND_SOC_DAPM_MIXER("RECMIX1L", SND_SOC_NOPM, 0, 0, rt5682_rec1_l_mix,
ARRAY_SIZE(rt5682_rec1_l_mix)),
@@ -1792,17 +1793,13 @@ static const struct snd_soc_dapm_route rt5682_dapm_routes[] = {
/*Vref*/
{"MICBIAS1", NULL, "Vref1"},
- {"MICBIAS1", NULL, "Vref2"},
{"MICBIAS2", NULL, "Vref1"},
- {"MICBIAS2", NULL, "Vref2"},
{"CLKDET SYS", NULL, "CLKDET"},
{"IN1P", NULL, "LDO2"},
{"BST1 CBJ", NULL, "IN1P"},
- {"BST1 CBJ", NULL, "CBJ Power"},
- {"CBJ Power", NULL, "Vref2"},
{"RECMIX1L", "CBJ Switch", "BST1 CBJ"},
{"RECMIX1L", NULL, "RECMIX1L Power"},
@@ -1912,9 +1909,7 @@ static const struct snd_soc_dapm_route rt5682_dapm_routes[] = {
{"HP Amp", NULL, "Capless"},
{"HP Amp", NULL, "Charge Pump"},
{"HP Amp", NULL, "CLKDET SYS"},
- {"HP Amp", NULL, "CBJ Power"},
{"HP Amp", NULL, "Vref1"},
- {"HP Amp", NULL, "Vref2"},
{"HPOL Playback", "Switch", "HP Amp"},
{"HPOR Playback", "Switch", "HP Amp"},
{"HPOL", NULL, "HPOL Playback"},
@@ -2303,16 +2298,13 @@ static int rt5682_set_bias_level(struct snd_soc_component *component,
switch (level) {
case SND_SOC_BIAS_PREPARE:
regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_1,
- RT5682_PWR_MB | RT5682_PWR_BG,
- RT5682_PWR_MB | RT5682_PWR_BG);
+ RT5682_PWR_BG, RT5682_PWR_BG);
regmap_update_bits(rt5682->regmap, RT5682_PWR_DIG_1,
RT5682_DIG_GATE_CTRL | RT5682_PWR_LDO,
RT5682_DIG_GATE_CTRL | RT5682_PWR_LDO);
break;
case SND_SOC_BIAS_STANDBY:
- regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_1,
- RT5682_PWR_MB, RT5682_PWR_MB);
regmap_update_bits(rt5682->regmap, RT5682_PWR_DIG_1,
RT5682_DIG_GATE_CTRL, RT5682_DIG_GATE_CTRL);
break;
@@ -2320,7 +2312,7 @@ static int rt5682_set_bias_level(struct snd_soc_component *component,
regmap_update_bits(rt5682->regmap, RT5682_PWR_DIG_1,
RT5682_DIG_GATE_CTRL | RT5682_PWR_LDO, 0);
regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_1,
- RT5682_PWR_MB | RT5682_PWR_BG, 0);
+ RT5682_PWR_BG, 0);
break;
default:
@@ -2363,6 +2355,8 @@ static int rt5682_resume(struct snd_soc_component *component)
regcache_cache_only(rt5682->regmap, false);
regcache_sync(rt5682->regmap);
+ rt5682_irq(0, rt5682);
+
return 0;
}
#else
diff --git a/sound/soc/codecs/tlv320aic32x4-i2c.c b/sound/soc/codecs/tlv320aic32x4-i2c.c
index 385fa2e9525a..22c3a6bc0b6c 100644
--- a/sound/soc/codecs/tlv320aic32x4-i2c.c
+++ b/sound/soc/codecs/tlv320aic32x4-i2c.c
@@ -3,7 +3,7 @@
*
* Copyright 2011 NW Digital Radio
*
- * Author: Jeremy McDermond <nh6z@nh6z.net>
+ * Author: Annaliese McDermond <nh6z@nh6z.net>
*
* Based on sound/soc/codecs/wm8974 and TI driver for kernel 2.6.27.
*
@@ -72,5 +72,5 @@ static struct i2c_driver aic32x4_i2c_driver = {
module_i2c_driver(aic32x4_i2c_driver);
MODULE_DESCRIPTION("ASoC TLV320AIC32x4 codec driver I2C");
-MODULE_AUTHOR("Jeremy McDermond <nh6z@nh6z.net>");
+MODULE_AUTHOR("Annaliese McDermond <nh6z@nh6z.net>");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/tlv320aic32x4-spi.c b/sound/soc/codecs/tlv320aic32x4-spi.c
index 07d78ae51e05..aa5b7ba0254b 100644
--- a/sound/soc/codecs/tlv320aic32x4-spi.c
+++ b/sound/soc/codecs/tlv320aic32x4-spi.c
@@ -3,7 +3,7 @@
*
* Copyright 2011 NW Digital Radio
*
- * Author: Jeremy McDermond <nh6z@nh6z.net>
+ * Author: Annaliese McDermond <nh6z@nh6z.net>
*
* Based on sound/soc/codecs/wm8974 and TI driver for kernel 2.6.27.
*
@@ -74,5 +74,5 @@ static struct spi_driver aic32x4_spi_driver = {
module_spi_driver(aic32x4_spi_driver);
MODULE_DESCRIPTION("ASoC TLV320AIC32x4 codec driver SPI");
-MODULE_AUTHOR("Jeremy McDermond <nh6z@nh6z.net>");
+MODULE_AUTHOR("Annaliese McDermond <nh6z@nh6z.net>");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
index 96f1526cb258..5520044929f4 100644
--- a/sound/soc/codecs/tlv320aic32x4.c
+++ b/sound/soc/codecs/tlv320aic32x4.c
@@ -490,6 +490,8 @@ static const struct snd_soc_dapm_widget aic32x4_dapm_widgets[] = {
SND_SOC_DAPM_INPUT("IN2_R"),
SND_SOC_DAPM_INPUT("IN3_L"),
SND_SOC_DAPM_INPUT("IN3_R"),
+ SND_SOC_DAPM_INPUT("CM_L"),
+ SND_SOC_DAPM_INPUT("CM_R"),
};
static const struct snd_soc_dapm_route aic32x4_dapm_routes[] = {
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index 283583d1db60..516d17cb2182 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -1609,7 +1609,6 @@ static int aic3x_probe(struct snd_soc_component *component)
struct aic3x_priv *aic3x = snd_soc_component_get_drvdata(component);
int ret, i;
- INIT_LIST_HEAD(&aic3x->list);
aic3x->component = component;
for (i = 0; i < ARRAY_SIZE(aic3x->supplies); i++) {
@@ -1873,6 +1872,7 @@ static int aic3x_i2c_probe(struct i2c_client *i2c,
if (ret != 0)
goto err_gpio;
+ INIT_LIST_HEAD(&aic3x->list);
list_add(&aic3x->list, &reset_list);
return 0;
@@ -1889,6 +1889,8 @@ static int aic3x_i2c_remove(struct i2c_client *client)
{
struct aic3x_priv *aic3x = i2c_get_clientdata(client);
+ list_del(&aic3x->list);
+
if (gpio_is_valid(aic3x->gpio_reset) &&
!aic3x_is_shared_reset(aic3x)) {
gpio_set_value(aic3x->gpio_reset, 0);
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index b93fdc8d2d6f..b0b48eb9c7c9 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -2905,6 +2905,8 @@ int wm_adsp2_event(struct snd_soc_dapm_widget *w,
if (wm_adsp_fw[dsp->fw].num_caps != 0)
wm_adsp_buffer_free(dsp);
+ dsp->fatal_error = false;
+
mutex_unlock(&dsp->pwr_lock);
adsp_dbg(dsp, "Execution stopped\n");
@@ -3000,6 +3002,9 @@ static int wm_adsp_compr_attach(struct wm_adsp_compr *compr)
{
struct wm_adsp_compr_buf *buf = NULL, *tmp;
+ if (compr->dsp->fatal_error)
+ return -EINVAL;
+
list_for_each_entry(tmp, &compr->dsp->buffer_list, list) {
if (!tmp->name || !strcmp(compr->name, tmp->name)) {
buf = tmp;
@@ -3535,11 +3540,11 @@ static int wm_adsp_buffer_get_error(struct wm_adsp_compr_buf *buf)
ret = wm_adsp_buffer_read(buf, HOST_BUFFER_FIELD(error), &buf->error);
if (ret < 0) {
- adsp_err(buf->dsp, "Failed to check buffer error: %d\n", ret);
+ compr_err(buf, "Failed to check buffer error: %d\n", ret);
return ret;
}
if (buf->error != 0) {
- adsp_err(buf->dsp, "Buffer error occurred: %d\n", buf->error);
+ compr_err(buf, "Buffer error occurred: %d\n", buf->error);
return -EIO;
}
@@ -3571,8 +3576,6 @@ int wm_adsp_compr_trigger(struct snd_compr_stream *stream, int cmd)
if (ret < 0)
break;
- wm_adsp_buffer_clear(compr->buf);
-
/* Trigger the IRQ at one fragment of data */
ret = wm_adsp_buffer_write(compr->buf,
HOST_BUFFER_FIELD(high_water_mark),
@@ -3584,6 +3587,8 @@ int wm_adsp_compr_trigger(struct snd_compr_stream *stream, int cmd)
}
break;
case SNDRV_PCM_TRIGGER_STOP:
+ if (wm_adsp_compr_attached(compr))
+ wm_adsp_buffer_clear(compr->buf);
break;
default:
ret = -EINVAL;
@@ -3917,22 +3922,40 @@ int wm_adsp2_lock(struct wm_adsp *dsp, unsigned int lock_regions)
}
EXPORT_SYMBOL_GPL(wm_adsp2_lock);
+static void wm_adsp_fatal_error(struct wm_adsp *dsp)
+{
+ struct wm_adsp_compr *compr;
+
+ dsp->fatal_error = true;
+
+ list_for_each_entry(compr, &dsp->compr_list, list) {
+ if (compr->stream) {
+ snd_compr_stop_error(compr->stream,
+ SNDRV_PCM_STATE_XRUN);
+ snd_compr_fragment_elapsed(compr->stream);
+ }
+ }
+}
+
irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp)
{
unsigned int val;
struct regmap *regmap = dsp->regmap;
int ret = 0;
+ mutex_lock(&dsp->pwr_lock);
+
ret = regmap_read(regmap, dsp->base + ADSP2_LOCK_REGION_CTRL, &val);
if (ret) {
adsp_err(dsp,
"Failed to read Region Lock Ctrl register: %d\n", ret);
- return IRQ_HANDLED;
+ goto error;
}
if (val & ADSP2_WDT_TIMEOUT_STS_MASK) {
adsp_err(dsp, "watchdog timeout error\n");
wm_adsp_stop_watchdog(dsp);
+ wm_adsp_fatal_error(dsp);
}
if (val & (ADSP2_SLAVE_ERR_MASK | ADSP2_REGION_LOCK_ERR_MASK)) {
@@ -3946,7 +3969,7 @@ irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp)
adsp_err(dsp,
"Failed to read Bus Err Addr register: %d\n",
ret);
- return IRQ_HANDLED;
+ goto error;
}
adsp_err(dsp, "bus error address = 0x%x\n",
@@ -3959,7 +3982,7 @@ irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp)
adsp_err(dsp,
"Failed to read Pmem Xmem Err Addr register: %d\n",
ret);
- return IRQ_HANDLED;
+ goto error;
}
adsp_err(dsp, "xmem error address = 0x%x\n",
@@ -3972,6 +3995,9 @@ irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp)
regmap_update_bits(regmap, dsp->base + ADSP2_LOCK_REGION_CTRL,
ADSP2_CTRL_ERR_EINT, ADSP2_CTRL_ERR_EINT);
+error:
+ mutex_unlock(&dsp->pwr_lock);
+
return IRQ_HANDLED;
}
EXPORT_SYMBOL_GPL(wm_adsp2_bus_error);
diff --git a/sound/soc/codecs/wm_adsp.h b/sound/soc/codecs/wm_adsp.h
index 59e07ad16329..8f09b4419a91 100644
--- a/sound/soc/codecs/wm_adsp.h
+++ b/sound/soc/codecs/wm_adsp.h
@@ -85,6 +85,7 @@ struct wm_adsp {
bool preloaded;
bool booted;
bool running;
+ bool fatal_error;
struct list_head ctl_list;
diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c
index 528e8b108422..0b937924d2e4 100644
--- a/sound/soc/fsl/fsl_asrc.c
+++ b/sound/soc/fsl/fsl_asrc.c
@@ -445,6 +445,19 @@ struct dma_chan *fsl_asrc_get_dma_channel(struct fsl_asrc_pair *pair, bool dir)
}
EXPORT_SYMBOL_GPL(fsl_asrc_get_dma_channel);
+static int fsl_asrc_dai_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct fsl_asrc *asrc_priv = snd_soc_dai_get_drvdata(dai);
+
+ /* Odd channel number is not valid for older ASRC (channel_bits==3) */
+ if (asrc_priv->channel_bits == 3)
+ snd_pcm_hw_constraint_step(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS, 2);
+
+ return 0;
+}
+
static int fsl_asrc_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
@@ -539,6 +552,7 @@ static int fsl_asrc_dai_trigger(struct snd_pcm_substream *substream, int cmd,
}
static const struct snd_soc_dai_ops fsl_asrc_dai_ops = {
+ .startup = fsl_asrc_dai_startup,
.hw_params = fsl_asrc_dai_hw_params,
.hw_free = fsl_asrc_dai_hw_free,
.trigger = fsl_asrc_dai_trigger,
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index afe67c865330..3623aa9a6f2e 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -54,6 +54,8 @@ struct fsl_esai {
u32 fifo_depth;
u32 slot_width;
u32 slots;
+ u32 tx_mask;
+ u32 rx_mask;
u32 hck_rate[2];
u32 sck_rate[2];
bool hck_dir[2];
@@ -361,21 +363,13 @@ static int fsl_esai_set_dai_tdm_slot(struct snd_soc_dai *dai, u32 tx_mask,
regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR,
ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots));
- regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMA,
- ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(tx_mask));
- regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMB,
- ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(tx_mask));
-
regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR,
ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots));
- regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMA,
- ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(rx_mask));
- regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMB,
- ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(rx_mask));
-
esai_priv->slot_width = slot_width;
esai_priv->slots = slots;
+ esai_priv->tx_mask = tx_mask;
+ esai_priv->rx_mask = rx_mask;
return 0;
}
@@ -596,6 +590,7 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
u8 i, channels = substream->runtime->channels;
u32 pins = DIV_ROUND_UP(channels, esai_priv->slots);
+ u32 mask;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
@@ -608,15 +603,38 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
for (i = 0; tx && i < channels; i++)
regmap_write(esai_priv->regmap, REG_ESAI_ETDR, 0x0);
+ /*
+ * When set the TE/RE in the end of enablement flow, there
+ * will be channel swap issue for multi data line case.
+ * In order to workaround this issue, we switch the bit
+ * enablement sequence to below sequence
+ * 1) clear the xSMB & xSMA: which is done in probe and
+ * stop state.
+ * 2) set TE/RE
+ * 3) set xSMB
+ * 4) set xSMA: xSMA is the last one in this flow, which
+ * will trigger esai to start.
+ */
regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx),
tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK,
tx ? ESAI_xCR_TE(pins) : ESAI_xCR_RE(pins));
+ mask = tx ? esai_priv->tx_mask : esai_priv->rx_mask;
+
+ regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMB(tx),
+ ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(mask));
+ regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMA(tx),
+ ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(mask));
+
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx),
tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK, 0);
+ regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMA(tx),
+ ESAI_xSMA_xS_MASK, 0);
+ regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMB(tx),
+ ESAI_xSMB_xS_MASK, 0);
/* Disable and reset FIFO */
regmap_update_bits(esai_priv->regmap, REG_ESAI_xFCR(tx),
@@ -906,6 +924,15 @@ static int fsl_esai_probe(struct platform_device *pdev)
return ret;
}
+ esai_priv->tx_mask = 0xFFFFFFFF;
+ esai_priv->rx_mask = 0xFFFFFFFF;
+
+ /* Clear the TSMA, TSMB, RSMA, RSMB */
+ regmap_write(esai_priv->regmap, REG_ESAI_TSMA, 0);
+ regmap_write(esai_priv->regmap, REG_ESAI_TSMB, 0);
+ regmap_write(esai_priv->regmap, REG_ESAI_RSMA, 0);
+ regmap_write(esai_priv->regmap, REG_ESAI_RSMB, 0);
+
ret = devm_snd_soc_register_component(&pdev->dev, &fsl_esai_component,
&fsl_esai_dai, 1);
if (ret) {
diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
index bb12351330e8..69bc4848d787 100644
--- a/sound/soc/generic/audio-graph-card.c
+++ b/sound/soc/generic/audio-graph-card.c
@@ -20,6 +20,8 @@
#include <linux/string.h>
#include <sound/simple_card_utils.h>
+#define DPCM_SELECTABLE 1
+
struct graph_priv {
struct snd_soc_card snd_card;
struct graph_dai_props {
@@ -440,6 +442,7 @@ static int graph_for_each_link(struct graph_priv *priv,
struct device_node *codec_port;
struct device_node *codec_port_old = NULL;
struct asoc_simple_card_data adata;
+ uintptr_t dpcm_selectable = (uintptr_t)of_device_get_match_data(dev);
int rc, ret;
/* loop for all listed CPU port */
@@ -470,8 +473,9 @@ static int graph_for_each_link(struct graph_priv *priv,
* if Codec port has many endpoints,
* or has convert-xxx property
*/
- if ((of_get_child_count(codec_port) > 1) ||
- adata.convert_rate || adata.convert_channels)
+ if (dpcm_selectable &&
+ ((of_get_child_count(codec_port) > 1) ||
+ adata.convert_rate || adata.convert_channels))
ret = func_dpcm(priv, cpu_ep, codec_ep, li,
(codec_port_old == codec_port));
/* else normal sound */
@@ -732,7 +736,8 @@ static int graph_remove(struct platform_device *pdev)
static const struct of_device_id graph_of_match[] = {
{ .compatible = "audio-graph-card", },
- { .compatible = "audio-graph-scu-card", },
+ { .compatible = "audio-graph-scu-card",
+ .data = (void *)DPCM_SELECTABLE },
{},
};
MODULE_DEVICE_TABLE(of, graph_of_match);
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index 7147bba45a2a..34de32efc4c4 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -9,12 +9,15 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/string.h>
#include <sound/simple_card.h>
#include <sound/soc-dai.h>
#include <sound/soc.h>
+#define DPCM_SELECTABLE 1
+
struct simple_priv {
struct snd_soc_card snd_card;
struct simple_dai_props {
@@ -441,6 +444,7 @@ static int simple_for_each_link(struct simple_priv *priv,
struct device *dev = simple_priv_to_dev(priv);
struct device_node *top = dev->of_node;
struct device_node *node;
+ uintptr_t dpcm_selectable = (uintptr_t)of_device_get_match_data(dev);
bool is_top = 0;
int ret = 0;
@@ -480,8 +484,9 @@ static int simple_for_each_link(struct simple_priv *priv,
* if it has many CPUs,
* or has convert-xxx property
*/
- if (num > 2 ||
- adata.convert_rate || adata.convert_channels)
+ if (dpcm_selectable &&
+ (num > 2 ||
+ adata.convert_rate || adata.convert_channels))
ret = func_dpcm(priv, np, codec, li, is_top);
/* else normal sound */
else
@@ -822,7 +827,8 @@ static int simple_remove(struct platform_device *pdev)
static const struct of_device_id simple_of_match[] = {
{ .compatible = "simple-audio-card", },
- { .compatible = "simple-scu-audio-card", },
+ { .compatible = "simple-scu-audio-card",
+ .data = (void *)DPCM_SELECTABLE },
{},
};
MODULE_DEVICE_TABLE(of, simple_of_match);
diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
index 08cea5b5cda9..0e8b1c5eec88 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
@@ -706,9 +706,17 @@ static int sst_soc_probe(struct snd_soc_component *component)
return sst_dsp_init_v2_dpcm(component);
}
+static void sst_soc_remove(struct snd_soc_component *component)
+{
+ struct sst_data *drv = dev_get_drvdata(component->dev);
+
+ drv->soc_card = NULL;
+}
+
static const struct snd_soc_component_driver sst_soc_platform_drv = {
.name = DRV_NAME,
.probe = sst_soc_probe,
+ .remove = sst_soc_remove,
.ops = &sst_platform_ops,
.compr_ops = &sst_platform_compr_ops,
.pcm_new = sst_pcm_new,
diff --git a/sound/soc/intel/boards/bytcht_da7213.c b/sound/soc/intel/boards/bytcht_da7213.c
index b8e884803777..4decba338156 100644
--- a/sound/soc/intel/boards/bytcht_da7213.c
+++ b/sound/soc/intel/boards/bytcht_da7213.c
@@ -226,7 +226,7 @@ static int bytcht_da7213_probe(struct platform_device *pdev)
struct snd_soc_card *card;
struct snd_soc_acpi_mach *mach;
const char *platform_name;
- const char *i2c_name = NULL;
+ struct acpi_device *adev;
int dai_index = 0;
int ret_val = 0;
int i;
@@ -244,10 +244,11 @@ static int bytcht_da7213_probe(struct platform_device *pdev)
}
/* fixup codec name based on HID */
- i2c_name = acpi_dev_get_first_match_name(mach->id, NULL, -1);
- if (i2c_name) {
+ adev = acpi_dev_get_first_match_dev(mach->id, NULL, -1);
+ if (adev) {
snprintf(codec_name, sizeof(codec_name),
- "%s%s", "i2c-", i2c_name);
+ "i2c-%s", acpi_dev_name(adev));
+ put_device(&adev->dev);
dailink[dai_index].codec_name = codec_name;
}
diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c
index d2a7e6ba11ae..6937c00cf63d 100644
--- a/sound/soc/intel/boards/bytcht_es8316.c
+++ b/sound/soc/intel/boards/bytcht_es8316.c
@@ -442,7 +442,7 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct snd_soc_acpi_mach *mach;
const char *platform_name;
- const char *i2c_name = NULL;
+ struct acpi_device *adev;
struct device *codec_dev;
int dai_index = 0;
int i;
@@ -463,10 +463,11 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
}
/* fixup codec name based on HID */
- i2c_name = acpi_dev_get_first_match_name(mach->id, NULL, -1);
- if (i2c_name) {
+ adev = acpi_dev_get_first_match_dev(mach->id, NULL, -1);
+ if (adev) {
snprintf(codec_name, sizeof(codec_name),
- "%s%s", "i2c-", i2c_name);
+ "i2c-%s", acpi_dev_name(adev));
+ put_device(&adev->dev);
byt_cht_es8316_dais[dai_index].codec_name = codec_name;
}
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 940eb27158da..f9175cf6747e 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -1154,7 +1154,7 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
struct byt_rt5640_private *priv;
struct snd_soc_acpi_mach *mach;
const char *platform_name;
- const char *i2c_name = NULL;
+ struct acpi_device *adev;
int ret_val = 0;
int dai_index = 0;
int i;
@@ -1178,11 +1178,11 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
}
/* fixup codec name based on HID */
- i2c_name = acpi_dev_get_first_match_name(mach->id, NULL, -1);
- if (i2c_name) {
+ adev = acpi_dev_get_first_match_dev(mach->id, NULL, -1);
+ if (adev) {
snprintf(byt_rt5640_codec_name, sizeof(byt_rt5640_codec_name),
- "%s%s", "i2c-", i2c_name);
-
+ "i2c-%s", acpi_dev_name(adev));
+ put_device(&adev->dev);
byt_rt5640_dais[dai_index].codec_name = byt_rt5640_codec_name;
}
diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
index b0a4d297176e..b744add01d12 100644
--- a/sound/soc/intel/boards/bytcr_rt5651.c
+++ b/sound/soc/intel/boards/bytcr_rt5651.c
@@ -867,8 +867,8 @@ static int snd_byt_rt5651_mc_probe(struct platform_device *pdev)
struct byt_rt5651_private *priv;
struct snd_soc_acpi_mach *mach;
const char *platform_name;
+ struct acpi_device *adev;
struct device *codec_dev;
- const char *i2c_name = NULL;
const char *hp_swapped;
bool is_bytcr = false;
int ret_val = 0;
@@ -894,14 +894,16 @@ static int snd_byt_rt5651_mc_probe(struct platform_device *pdev)
}
/* fixup codec name based on HID */
- i2c_name = acpi_dev_get_first_match_name(mach->id, NULL, -1);
- if (!i2c_name) {
+ adev = acpi_dev_get_first_match_dev(mach->id, NULL, -1);
+ if (adev) {
+ snprintf(byt_rt5651_codec_name, sizeof(byt_rt5651_codec_name),
+ "i2c-%s", acpi_dev_name(adev));
+ put_device(&adev->dev);
+ byt_rt5651_dais[dai_index].codec_name = byt_rt5651_codec_name;
+ } else {
dev_err(&pdev->dev, "Error cannot find '%s' dev\n", mach->id);
return -ENODEV;
}
- snprintf(byt_rt5651_codec_name, sizeof(byt_rt5651_codec_name),
- "%s%s", "i2c-", i2c_name);
- byt_rt5651_dais[dai_index].codec_name = byt_rt5651_codec_name;
codec_dev = bus_find_device_by_name(&i2c_bus_type, NULL,
byt_rt5651_codec_name);
diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
index 3263b0495853..c0e0844f75b9 100644
--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
+++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
@@ -43,6 +43,7 @@ struct cht_mc_private {
struct clk *mclk;
struct snd_soc_jack jack;
bool ts3a227e_present;
+ int quirks;
};
static int platform_clock_control(struct snd_soc_dapm_widget *w,
@@ -54,6 +55,10 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w,
struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card);
int ret;
+ /* See the comment in snd_cht_mc_probe() */
+ if (ctx->quirks & QUIRK_PMC_PLT_CLK_0)
+ return 0;
+
codec_dai = snd_soc_card_get_codec_dai(card, CHT_CODEC_DAI);
if (!codec_dai) {
dev_err(card->dev, "Codec dai not found; Unable to set platform clock\n");
@@ -223,6 +228,10 @@ static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
"jack detection gpios not added, error %d\n", ret);
}
+ /* See the comment in snd_cht_mc_probe() */
+ if (ctx->quirks & QUIRK_PMC_PLT_CLK_0)
+ return 0;
+
/*
* The firmware might enable the clock at
* boot (this information may or may not
@@ -423,16 +432,15 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
const char *mclk_name;
struct snd_soc_acpi_mach *mach;
const char *platform_name;
- int quirks = 0;
-
- dmi_id = dmi_first_match(cht_max98090_quirk_table);
- if (dmi_id)
- quirks = (unsigned long)dmi_id->driver_data;
drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
if (!drv)
return -ENOMEM;
+ dmi_id = dmi_first_match(cht_max98090_quirk_table);
+ if (dmi_id)
+ drv->quirks = (unsigned long)dmi_id->driver_data;
+
drv->ts3a227e_present = acpi_dev_found("104C227E");
if (!drv->ts3a227e_present) {
/* no need probe TI jack detection chip */
@@ -458,7 +466,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
snd_soc_card_cht.dev = &pdev->dev;
snd_soc_card_set_drvdata(&snd_soc_card_cht, drv);
- if (quirks & QUIRK_PMC_PLT_CLK_0)
+ if (drv->quirks & QUIRK_PMC_PLT_CLK_0)
mclk_name = "pmc_plt_clk_0";
else
mclk_name = "pmc_plt_clk_3";
@@ -471,6 +479,21 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
return PTR_ERR(drv->mclk);
}
+ /*
+ * Boards which have the MAX98090's clk connected to clk_0 do not seem
+ * to like it if we muck with the clock. If we disable the clock when
+ * it is unused we get "max98090 i2c-193C9890:00: PLL unlocked" errors
+ * and the PLL never seems to lock again.
+ * So for these boards we enable it here once and leave it at that.
+ */
+ if (drv->quirks & QUIRK_PMC_PLT_CLK_0) {
+ ret_val = clk_prepare_enable(drv->mclk);
+ if (ret_val < 0) {
+ dev_err(&pdev->dev, "MCLK enable error: %d\n", ret_val);
+ return ret_val;
+ }
+ }
+
ret_val = devm_snd_soc_register_card(&pdev->dev, &snd_soc_card_cht);
if (ret_val) {
dev_err(&pdev->dev,
@@ -481,11 +504,23 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
return ret_val;
}
+static int snd_cht_mc_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+ struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card);
+
+ if (ctx->quirks & QUIRK_PMC_PLT_CLK_0)
+ clk_disable_unprepare(ctx->mclk);
+
+ return 0;
+}
+
static struct platform_driver snd_cht_mc_driver = {
.driver = {
.name = "cht-bsw-max98090",
},
.probe = snd_cht_mc_probe,
+ .remove = snd_cht_mc_remove,
};
module_platform_driver(snd_cht_mc_driver)
diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
index cbc2d458483f..32dbeaf1ab94 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5645.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
@@ -532,7 +532,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
struct snd_soc_acpi_mach *mach;
const char *platform_name;
struct cht_mc_private *drv;
- const char *i2c_name = NULL;
+ struct acpi_device *adev;
bool found = false;
bool is_bytcr = false;
int dai_index = 0;
@@ -573,10 +573,11 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
}
/* fixup codec name based on HID */
- i2c_name = acpi_dev_get_first_match_name(mach->id, NULL, -1);
- if (i2c_name) {
+ adev = acpi_dev_get_first_match_dev(mach->id, NULL, -1);
+ if (adev) {
snprintf(cht_rt5645_codec_name, sizeof(cht_rt5645_codec_name),
- "%s%s", "i2c-", i2c_name);
+ "i2c-%s", acpi_dev_name(adev));
+ put_device(&adev->dev);
cht_dailink[dai_index].codec_name = cht_rt5645_codec_name;
}
diff --git a/sound/soc/intel/boards/cht_bsw_rt5672.c b/sound/soc/intel/boards/cht_bsw_rt5672.c
index 3d5a2b3a06f0..0f7770822388 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5672.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5672.c
@@ -401,7 +401,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
struct cht_mc_private *drv;
struct snd_soc_acpi_mach *mach = pdev->dev.platform_data;
const char *platform_name;
- const char *i2c_name;
+ struct acpi_device *adev;
int i;
drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
@@ -411,10 +411,11 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
strcpy(drv->codec_name, RT5672_I2C_DEFAULT);
/* fixup codec name based on HID */
- i2c_name = acpi_dev_get_first_match_name(mach->id, NULL, -1);
- if (i2c_name) {
+ adev = acpi_dev_get_first_match_dev(mach->id, NULL, -1);
+ if (adev) {
snprintf(drv->codec_name, sizeof(drv->codec_name),
- "i2c-%s", i2c_name);
+ "i2c-%s", acpi_dev_name(adev));
+ put_device(&adev->dev);
for (i = 0; i < ARRAY_SIZE(cht_dailink); i++) {
if (!strcmp(cht_dailink[i].codec_name,
RT5672_I2C_DEFAULT)) {
diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
index 7044d8c2b187..879f14257a3e 100644
--- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
@@ -405,7 +405,7 @@ static const struct snd_pcm_hw_constraint_list constraints_dmic_channels = {
};
static const unsigned int dmic_2ch[] = {
- 4,
+ 2,
};
static const struct snd_pcm_hw_constraint_list constraints_dmic_2ch = {
diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c
index 28c4806b196a..4bf70b4429f0 100644
--- a/sound/soc/intel/skylake/skl-messages.c
+++ b/sound/soc/intel/skylake/skl-messages.c
@@ -483,6 +483,7 @@ static void skl_set_base_module_format(struct skl_sst *ctx,
base_cfg->audio_fmt.bit_depth = format->bit_depth;
base_cfg->audio_fmt.valid_bit_depth = format->valid_bit_depth;
base_cfg->audio_fmt.ch_cfg = format->ch_cfg;
+ base_cfg->audio_fmt.sample_type = format->sample_type;
dev_dbg(ctx->dev, "bit_depth=%x valid_bd=%x ch_config=%x\n",
format->bit_depth, format->valid_bit_depth,
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
index 1ae83f4ccc36..9735e2412251 100644
--- a/sound/soc/intel/skylake/skl-pcm.c
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -181,6 +181,7 @@ int skl_pcm_link_dma_prepare(struct device *dev, struct skl_pipe_params *params)
struct hdac_stream *hstream;
struct hdac_ext_stream *stream;
struct hdac_ext_link *link;
+ unsigned char stream_tag;
hstream = snd_hdac_get_stream(bus, params->stream,
params->link_dma_id + 1);
@@ -199,10 +200,13 @@ int skl_pcm_link_dma_prepare(struct device *dev, struct skl_pipe_params *params)
snd_hdac_ext_link_stream_setup(stream, format_val);
- list_for_each_entry(link, &bus->hlink_list, list) {
- if (link->index == params->link_index)
- snd_hdac_ext_link_set_stream_id(link,
- hstream->stream_tag);
+ stream_tag = hstream->stream_tag;
+ if (stream->hstream.direction == SNDRV_PCM_STREAM_PLAYBACK) {
+ list_for_each_entry(link, &bus->hlink_list, list) {
+ if (link->index == params->link_index)
+ snd_hdac_ext_link_set_stream_id(link,
+ stream_tag);
+ }
}
stream->link_prepared = 1;
@@ -645,6 +649,7 @@ static int skl_link_hw_free(struct snd_pcm_substream *substream,
struct hdac_ext_stream *link_dev =
snd_soc_dai_get_dma_data(dai, substream);
struct hdac_ext_link *link;
+ unsigned char stream_tag;
dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
@@ -654,7 +659,11 @@ static int skl_link_hw_free(struct snd_pcm_substream *substream,
if (!link)
return -EINVAL;
- snd_hdac_ext_link_clear_stream_id(link, hdac_stream(link_dev)->stream_tag);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ stream_tag = hdac_stream(link_dev)->stream_tag;
+ snd_hdac_ext_link_clear_stream_id(link, stream_tag);
+ }
+
snd_hdac_ext_stream_release(link_dev, HDAC_EXT_STREAM_TYPE_LINK);
return 0;
}
@@ -1453,13 +1462,20 @@ static int skl_platform_soc_probe(struct snd_soc_component *component)
return 0;
}
+static void skl_pcm_remove(struct snd_soc_component *component)
+{
+ /* remove topology */
+ snd_soc_tplg_component_remove(component, SND_SOC_TPLG_INDEX_ALL);
+}
+
static const struct snd_soc_component_driver skl_component = {
.name = "pcm",
.probe = skl_platform_soc_probe,
+ .remove = skl_pcm_remove,
.ops = &skl_platform_ops,
.pcm_new = skl_pcm_new,
.pcm_free = skl_pcm_free,
- .ignore_module_refcount = 1, /* do not increase the refcount in core */
+ .module_get_upon_open = 1, /* increment refcount when a pcm is opened */
};
int skl_platform_register(struct device *dev)
diff --git a/sound/soc/mediatek/common/mtk-btcvsd.c b/sound/soc/mediatek/common/mtk-btcvsd.c
index 1b8bcdaf02d1..9a163d7064d1 100644
--- a/sound/soc/mediatek/common/mtk-btcvsd.c
+++ b/sound/soc/mediatek/common/mtk-btcvsd.c
@@ -49,6 +49,7 @@ enum bt_sco_state {
BT_SCO_STATE_IDLE,
BT_SCO_STATE_RUNNING,
BT_SCO_STATE_ENDING,
+ BT_SCO_STATE_LOOPBACK,
};
enum bt_sco_direct {
@@ -486,7 +487,8 @@ static irqreturn_t mtk_btcvsd_snd_irq_handler(int irq_id, void *dev)
if (bt->rx->state != BT_SCO_STATE_RUNNING &&
bt->rx->state != BT_SCO_STATE_ENDING &&
bt->tx->state != BT_SCO_STATE_RUNNING &&
- bt->tx->state != BT_SCO_STATE_ENDING) {
+ bt->tx->state != BT_SCO_STATE_ENDING &&
+ bt->tx->state != BT_SCO_STATE_LOOPBACK) {
dev_warn(bt->dev, "%s(), in idle state: rx->state: %d, tx->state: %d\n",
__func__, bt->rx->state, bt->tx->state);
goto irq_handler_exit;
@@ -512,6 +514,42 @@ static irqreturn_t mtk_btcvsd_snd_irq_handler(int irq_id, void *dev)
buf_cnt_tx = btsco_packet_info[packet_type][2];
buf_cnt_rx = btsco_packet_info[packet_type][3];
+ if (bt->tx->state == BT_SCO_STATE_LOOPBACK) {
+ u8 *src, *dst;
+ unsigned long connsys_addr_rx, ap_addr_rx;
+ unsigned long connsys_addr_tx, ap_addr_tx;
+
+ connsys_addr_rx = *bt->bt_reg_pkt_r;
+ ap_addr_rx = (unsigned long)bt->bt_sram_bank2_base +
+ (connsys_addr_rx & 0xFFFF);
+
+ connsys_addr_tx = *bt->bt_reg_pkt_w;
+ ap_addr_tx = (unsigned long)bt->bt_sram_bank2_base +
+ (connsys_addr_tx & 0xFFFF);
+
+ if (connsys_addr_tx == 0xdeadfeed ||
+ connsys_addr_rx == 0xdeadfeed) {
+ /* bt return 0xdeadfeed if read reg during bt sleep */
+ dev_warn(bt->dev, "%s(), connsys_addr_tx == 0xdeadfeed\n",
+ __func__);
+ goto irq_handler_exit;
+ }
+
+ src = (u8 *)ap_addr_rx;
+ dst = (u8 *)ap_addr_tx;
+
+ mtk_btcvsd_snd_data_transfer(BT_SCO_DIRECT_BT2ARM, src,
+ bt->tx->temp_packet_buf,
+ packet_length,
+ packet_num);
+ mtk_btcvsd_snd_data_transfer(BT_SCO_DIRECT_ARM2BT,
+ bt->tx->temp_packet_buf, dst,
+ packet_length,
+ packet_num);
+ bt->rx->rw_cnt++;
+ bt->tx->rw_cnt++;
+ }
+
if (bt->rx->state == BT_SCO_STATE_RUNNING ||
bt->rx->state == BT_SCO_STATE_ENDING) {
if (bt->rx->xrun) {
@@ -1067,6 +1105,33 @@ static int btcvsd_band_set(struct snd_kcontrol *kcontrol,
return 0;
}
+static int btcvsd_loopback_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
+ bool lpbk_en = bt->tx->state == BT_SCO_STATE_LOOPBACK;
+
+ ucontrol->value.integer.value[0] = lpbk_en;
+ return 0;
+}
+
+static int btcvsd_loopback_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
+
+ if (ucontrol->value.integer.value[0]) {
+ mtk_btcvsd_snd_set_state(bt, bt->tx, BT_SCO_STATE_LOOPBACK);
+ mtk_btcvsd_snd_set_state(bt, bt->rx, BT_SCO_STATE_LOOPBACK);
+ } else {
+ mtk_btcvsd_snd_set_state(bt, bt->tx, BT_SCO_STATE_RUNNING);
+ mtk_btcvsd_snd_set_state(bt, bt->rx, BT_SCO_STATE_RUNNING);
+ }
+ return 0;
+}
+
static int btcvsd_tx_mute_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -1202,6 +1267,8 @@ static int btcvsd_tx_timestamp_get(struct snd_kcontrol *kcontrol,
static const struct snd_kcontrol_new mtk_btcvsd_snd_controls[] = {
SOC_ENUM_EXT("BTCVSD Band", btcvsd_enum[0],
btcvsd_band_get, btcvsd_band_set),
+ SOC_SINGLE_BOOL_EXT("BTCVSD Loopback Switch", 0,
+ btcvsd_loopback_get, btcvsd_loopback_set),
SOC_SINGLE_BOOL_EXT("BTCVSD Tx Mute Switch", 0,
btcvsd_tx_mute_get, btcvsd_tx_mute_set),
SOC_SINGLE_BOOL_EXT("BTCVSD Tx Irq Received Switch", 0,
diff --git a/sound/soc/mediatek/mt8183/mt8183-afe-clk.c b/sound/soc/mediatek/mt8183/mt8183-afe-clk.c
index f523ad103acc..48e81c5d52fc 100644
--- a/sound/soc/mediatek/mt8183/mt8183-afe-clk.c
+++ b/sound/soc/mediatek/mt8183/mt8183-afe-clk.c
@@ -605,6 +605,10 @@ void mt8183_mck_disable(struct mtk_base_afe *afe, int mck_id)
int m_sel_id = mck_div[mck_id].m_sel_id;
int div_clk_id = mck_div[mck_id].div_clk_id;
+ /* i2s5 mck not support */
+ if (mck_id == MT8183_I2S5_MCK)
+ return;
+
clk_disable_unprepare(afe_priv->clk[div_clk_id]);
if (m_sel_id >= 0)
clk_disable_unprepare(afe_priv->clk[m_sel_id]);
diff --git a/sound/soc/rockchip/rockchip_pdm.c b/sound/soc/rockchip/rockchip_pdm.c
index 400e29edb1c9..d0b403a0e27b 100644
--- a/sound/soc/rockchip/rockchip_pdm.c
+++ b/sound/soc/rockchip/rockchip_pdm.c
@@ -24,7 +24,7 @@
#include "rockchip_pdm.h"
-#define PDM_DMA_BURST_SIZE (16) /* size * width: 16*4 = 64 bytes */
+#define PDM_DMA_BURST_SIZE (8) /* size * width: 8*4 = 32 bytes */
struct rk_pdm_dev {
struct device *dev;
@@ -208,7 +208,9 @@ static int rockchip_pdm_set_fmt(struct snd_soc_dai *cpu_dai,
return -EINVAL;
}
+ pm_runtime_get_sync(cpu_dai->dev);
regmap_update_bits(pdm->regmap, PDM_CLK_CTRL, mask, val);
+ pm_runtime_put(cpu_dai->dev);
return 0;
}
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index 4231001226f4..ab471d550d17 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -1130,11 +1130,11 @@ static const struct snd_soc_dapm_widget samsung_i2s_widgets[] = {
};
static const struct snd_soc_dapm_route samsung_i2s_dapm_routes[] = {
- { "Playback Mixer", NULL, "Primary" },
- { "Playback Mixer", NULL, "Secondary" },
+ { "Playback Mixer", NULL, "Primary Playback" },
+ { "Playback Mixer", NULL, "Secondary Playback" },
{ "Mixer DAI TX", NULL, "Playback Mixer" },
- { "Playback Mixer", NULL, "Mixer DAI RX" },
+ { "Primary Capture", NULL, "Mixer DAI RX" },
};
static const struct snd_soc_component_driver samsung_i2s_component = {
@@ -1155,7 +1155,8 @@ static int i2s_alloc_dais(struct samsung_i2s_priv *priv,
int num_dais)
{
static const char *dai_names[] = { "samsung-i2s", "samsung-i2s-sec" };
- static const char *stream_names[] = { "Primary", "Secondary" };
+ static const char *stream_names[] = { "Primary Playback",
+ "Secondary Playback" };
struct snd_soc_dai_driver *dai_drv;
struct i2s_dai *dai;
int i;
@@ -1201,6 +1202,7 @@ static int i2s_alloc_dais(struct samsung_i2s_priv *priv,
dai_drv->capture.channels_max = 2;
dai_drv->capture.rates = i2s_dai_data->pcm_rates;
dai_drv->capture.formats = SAMSUNG_I2S_FMTS;
+ dai_drv->capture.stream_name = "Primary Capture";
return 0;
}
diff --git a/sound/soc/samsung/odroid.c b/sound/soc/samsung/odroid.c
index 694512f980fd..1dc54c4206f0 100644
--- a/sound/soc/samsung/odroid.c
+++ b/sound/soc/samsung/odroid.c
@@ -91,11 +91,11 @@ static int odroid_card_be_hw_params(struct snd_pcm_substream *substream,
return ret;
/*
- * We add 1 to the rclk_freq value in order to avoid too low clock
+ * We add 2 to the rclk_freq value in order to avoid too low clock
* frequency values due to the EPLL output frequency not being exact
* multiple of the audio sampling rate.
*/
- rclk_freq = params_rate(params) * rfs + 1;
+ rclk_freq = params_rate(params) * rfs + 2;
ret = clk_set_rate(priv->sclk_i2s, rclk_freq);
if (ret < 0)
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 022996d2db13..4fe83e611c01 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -110,6 +110,8 @@ static const struct of_device_id rsnd_of_match[] = {
{ .compatible = "renesas,rcar_sound-gen1", .data = (void *)RSND_GEN1 },
{ .compatible = "renesas,rcar_sound-gen2", .data = (void *)RSND_GEN2 },
{ .compatible = "renesas,rcar_sound-gen3", .data = (void *)RSND_GEN3 },
+ /* Special Handling */
+ { .compatible = "renesas,rcar_sound-r8a77990", .data = (void *)(RSND_GEN3 | RSND_SOC_E) },
{},
};
MODULE_DEVICE_TABLE(of, rsnd_of_match);
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index 90625c57847b..0e6ef4e18400 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -607,6 +607,8 @@ struct rsnd_priv {
#define RSND_GEN1 (1 << 0)
#define RSND_GEN2 (2 << 0)
#define RSND_GEN3 (3 << 0)
+#define RSND_SOC_MASK (0xFF << 4)
+#define RSND_SOC_E (1 << 4) /* E1/E2/E3 */
/*
* below value will be filled on rsnd_gen_probe()
@@ -679,6 +681,9 @@ struct rsnd_priv {
#define rsnd_is_gen1(priv) (((priv)->flags & RSND_GEN_MASK) == RSND_GEN1)
#define rsnd_is_gen2(priv) (((priv)->flags & RSND_GEN_MASK) == RSND_GEN2)
#define rsnd_is_gen3(priv) (((priv)->flags & RSND_GEN_MASK) == RSND_GEN3)
+#define rsnd_is_e3(priv) (((priv)->flags & \
+ (RSND_GEN_MASK | RSND_SOC_MASK)) == \
+ (RSND_GEN3 | RSND_SOC_E))
#define rsnd_flags_has(p, f) ((p)->flags & (f))
#define rsnd_flags_set(p, f) ((p)->flags |= (f))
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index db81e066b92e..585ffba0244b 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -14,7 +14,6 @@
*/
#include "rsnd.h"
-#include <linux/sys_soc.h>
#define SRC_NAME "src"
@@ -135,7 +134,7 @@ unsigned int rsnd_src_get_rate(struct rsnd_priv *priv,
return rate;
}
-const static u32 bsdsr_table_pattern1[] = {
+static const u32 bsdsr_table_pattern1[] = {
0x01800000, /* 6 - 1/6 */
0x01000000, /* 6 - 1/4 */
0x00c00000, /* 6 - 1/3 */
@@ -144,7 +143,7 @@ const static u32 bsdsr_table_pattern1[] = {
0x00400000, /* 6 - 1 */
};
-const static u32 bsdsr_table_pattern2[] = {
+static const u32 bsdsr_table_pattern2[] = {
0x02400000, /* 6 - 1/6 */
0x01800000, /* 6 - 1/4 */
0x01200000, /* 6 - 1/3 */
@@ -153,7 +152,7 @@ const static u32 bsdsr_table_pattern2[] = {
0x00600000, /* 6 - 1 */
};
-const static u32 bsisr_table[] = {
+static const u32 bsisr_table[] = {
0x00100060, /* 6 - 1/6 */
0x00100040, /* 6 - 1/4 */
0x00100030, /* 6 - 1/3 */
@@ -162,7 +161,7 @@ const static u32 bsisr_table[] = {
0x00100020, /* 6 - 1 */
};
-const static u32 chan288888[] = {
+static const u32 chan288888[] = {
0x00000006, /* 1 to 2 */
0x000001fe, /* 1 to 8 */
0x000001fe, /* 1 to 8 */
@@ -171,7 +170,7 @@ const static u32 chan288888[] = {
0x000001fe, /* 1 to 8 */
};
-const static u32 chan244888[] = {
+static const u32 chan244888[] = {
0x00000006, /* 1 to 2 */
0x0000001e, /* 1 to 4 */
0x0000001e, /* 1 to 4 */
@@ -180,7 +179,7 @@ const static u32 chan244888[] = {
0x000001fe, /* 1 to 8 */
};
-const static u32 chan222222[] = {
+static const u32 chan222222[] = {
0x00000006, /* 1 to 2 */
0x00000006, /* 1 to 2 */
0x00000006, /* 1 to 2 */
@@ -189,18 +188,12 @@ const static u32 chan222222[] = {
0x00000006, /* 1 to 2 */
};
-static const struct soc_device_attribute ov_soc[] = {
- { .soc_id = "r8a77990" }, /* E3 */
- { /* sentinel */ }
-};
-
static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
struct rsnd_mod *mod)
{
struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
struct device *dev = rsnd_priv_to_dev(priv);
struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
- const struct soc_device_attribute *soc = soc_device_match(ov_soc);
int is_play = rsnd_io_is_play(io);
int use_src = 0;
u32 fin, fout;
@@ -307,7 +300,7 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
/*
* E3 need to overwrite
*/
- if (soc)
+ if (rsnd_is_e3(priv))
switch (rsnd_mod_id(mod)) {
case 0:
case 4:
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 93d316d5bf8e..46e3ab0fced4 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -947,7 +947,7 @@ static void soc_cleanup_component(struct snd_soc_component *component)
snd_soc_dapm_free(snd_soc_component_get_dapm(component));
soc_cleanup_component_debugfs(component);
component->card = NULL;
- if (!component->driver->ignore_module_refcount)
+ if (!component->driver->module_get_upon_open)
module_put(component->dev->driver->owner);
}
@@ -1381,7 +1381,7 @@ static int soc_probe_component(struct snd_soc_card *card,
return 0;
}
- if (!component->driver->ignore_module_refcount &&
+ if (!component->driver->module_get_upon_open &&
!try_module_get(component->dev->driver->owner))
return -ENODEV;
@@ -2797,6 +2797,7 @@ int snd_soc_register_card(struct snd_soc_card *card)
ret = soc_init_dai_link(card, link);
if (ret) {
+ soc_cleanup_platform(card);
dev_err(card->dev, "ASoC: failed to init link %s\n",
link->name);
mutex_unlock(&client_mutex);
@@ -2819,6 +2820,7 @@ int snd_soc_register_card(struct snd_soc_card *card)
card->instantiated = 0;
mutex_init(&card->mutex);
mutex_init(&card->dapm_mutex);
+ spin_lock_init(&card->dpcm_lock);
return snd_soc_bind_card(card);
}
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 1ec06ef6d161..0382a47b30bd 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -3650,6 +3650,13 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
case snd_soc_dapm_dac:
case snd_soc_dapm_aif_in:
case snd_soc_dapm_pga:
+ case snd_soc_dapm_buffer:
+ case snd_soc_dapm_scheduler:
+ case snd_soc_dapm_effect:
+ case snd_soc_dapm_src:
+ case snd_soc_dapm_asrc:
+ case snd_soc_dapm_encoder:
+ case snd_soc_dapm_decoder:
case snd_soc_dapm_out_drv:
case snd_soc_dapm_micbias:
case snd_soc_dapm_line:
@@ -3957,6 +3964,10 @@ snd_soc_dapm_free_kcontrol(struct snd_soc_card *card,
int count;
devm_kfree(card->dev, (void *)*private_value);
+
+ if (!w_param_text)
+ return;
+
for (count = 0 ; count < num_params; count++)
devm_kfree(card->dev, (void *)w_param_text[count]);
devm_kfree(card->dev, w_param_text);
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 0d5ec68a1e50..be80a12fba27 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/export.h>
@@ -463,6 +464,9 @@ static int soc_pcm_components_close(struct snd_pcm_substream *substream,
continue;
component->driver->ops->close(substream);
+
+ if (component->driver->module_get_upon_open)
+ module_put(component->dev->driver->owner);
}
return 0;
@@ -513,6 +517,12 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
!component->driver->ops->open)
continue;
+ if (component->driver->module_get_upon_open &&
+ !try_module_get(component->dev->driver->owner)) {
+ ret = -ENODEV;
+ goto module_err;
+ }
+
ret = component->driver->ops->open(substream);
if (ret < 0) {
dev_err(component->dev,
@@ -628,7 +638,7 @@ codec_dai_err:
component_err:
soc_pcm_components_close(substream, component);
-
+module_err:
if (cpu_dai->driver->ops->shutdown)
cpu_dai->driver->ops->shutdown(substream, cpu_dai);
out:
@@ -954,10 +964,13 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
codec_params = *params;
/* fixup params based on TDM slot masks */
- if (codec_dai->tx_mask)
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
+ codec_dai->tx_mask)
soc_pcm_codec_params_fixup(&codec_params,
codec_dai->tx_mask);
- if (codec_dai->rx_mask)
+
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE &&
+ codec_dai->rx_mask)
soc_pcm_codec_params_fixup(&codec_params,
codec_dai->rx_mask);
@@ -1213,6 +1226,7 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
struct snd_soc_pcm_runtime *be, int stream)
{
struct snd_soc_dpcm *dpcm;
+ unsigned long flags;
/* only add new dpcms */
for_each_dpcm_be(fe, stream, dpcm) {
@@ -1228,8 +1242,10 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
dpcm->fe = fe;
be->dpcm[stream].runtime = fe->dpcm[stream].runtime;
dpcm->state = SND_SOC_DPCM_LINK_STATE_NEW;
+ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
list_add(&dpcm->list_be, &fe->dpcm[stream].be_clients);
list_add(&dpcm->list_fe, &be->dpcm[stream].fe_clients);
+ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
dev_dbg(fe->dev, "connected new DPCM %s path %s %s %s\n",
stream ? "capture" : "playback", fe->dai_link->name,
@@ -1275,6 +1291,7 @@ static void dpcm_be_reparent(struct snd_soc_pcm_runtime *fe,
void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream)
{
struct snd_soc_dpcm *dpcm, *d;
+ unsigned long flags;
for_each_dpcm_be_safe(fe, stream, dpcm, d) {
dev_dbg(fe->dev, "ASoC: BE %s disconnect check for %s\n",
@@ -1294,8 +1311,10 @@ void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream)
#ifdef CONFIG_DEBUG_FS
debugfs_remove(dpcm->debugfs_state);
#endif
+ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
list_del(&dpcm->list_be);
list_del(&dpcm->list_fe);
+ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
kfree(dpcm);
}
}
@@ -1547,10 +1566,13 @@ int dpcm_process_paths(struct snd_soc_pcm_runtime *fe,
void dpcm_clear_pending_state(struct snd_soc_pcm_runtime *fe, int stream)
{
struct snd_soc_dpcm *dpcm;
+ unsigned long flags;
+ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
for_each_dpcm_be(fe, stream, dpcm)
dpcm->be->dpcm[stream].runtime_update =
SND_SOC_DPCM_UPDATE_NO;
+ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
}
static void dpcm_be_dai_startup_unwind(struct snd_soc_pcm_runtime *fe,
@@ -1899,10 +1921,15 @@ static int dpcm_apply_symmetry(struct snd_pcm_substream *fe_substream,
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_pcm_substream *be_substream =
snd_soc_dpcm_get_substream(be, stream);
- struct snd_soc_pcm_runtime *rtd = be_substream->private_data;
+ struct snd_soc_pcm_runtime *rtd;
struct snd_soc_dai *codec_dai;
int i;
+ /* A backend may not have the requested substream */
+ if (!be_substream)
+ continue;
+
+ rtd = be_substream->private_data;
if (rtd->dai_link->be_hw_params_fixup)
continue;
@@ -2571,6 +2598,7 @@ static int dpcm_run_update_startup(struct snd_soc_pcm_runtime *fe, int stream)
struct snd_soc_dpcm *dpcm;
enum snd_soc_dpcm_trigger trigger = fe->dai_link->trigger[stream];
int ret;
+ unsigned long flags;
dev_dbg(fe->dev, "ASoC: runtime %s open on FE %s\n",
stream ? "capture" : "playback", fe->dai_link->name);
@@ -2640,11 +2668,13 @@ close:
dpcm_be_dai_shutdown(fe, stream);
disconnect:
/* disconnect any non started BEs */
+ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START)
dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
}
+ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
return ret;
}
@@ -3221,7 +3251,10 @@ int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe,
{
struct snd_soc_dpcm *dpcm;
int state;
+ int ret = 1;
+ unsigned long flags;
+ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
for_each_dpcm_fe(be, stream, dpcm) {
if (dpcm->fe == fe)
@@ -3230,12 +3263,15 @@ int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe,
state = dpcm->fe->dpcm[stream].state;
if (state == SND_SOC_DPCM_STATE_START ||
state == SND_SOC_DPCM_STATE_PAUSED ||
- state == SND_SOC_DPCM_STATE_SUSPEND)
- return 0;
+ state == SND_SOC_DPCM_STATE_SUSPEND) {
+ ret = 0;
+ break;
+ }
}
+ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
/* it's safe to free/stop this BE DAI */
- return 1;
+ return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_dpcm_can_be_free_stop);
@@ -3248,7 +3284,10 @@ int snd_soc_dpcm_can_be_params(struct snd_soc_pcm_runtime *fe,
{
struct snd_soc_dpcm *dpcm;
int state;
+ int ret = 1;
+ unsigned long flags;
+ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
for_each_dpcm_fe(be, stream, dpcm) {
if (dpcm->fe == fe)
@@ -3258,12 +3297,15 @@ int snd_soc_dpcm_can_be_params(struct snd_soc_pcm_runtime *fe,
if (state == SND_SOC_DPCM_STATE_START ||
state == SND_SOC_DPCM_STATE_PAUSED ||
state == SND_SOC_DPCM_STATE_SUSPEND ||
- state == SND_SOC_DPCM_STATE_PREPARE)
- return 0;
+ state == SND_SOC_DPCM_STATE_PREPARE) {
+ ret = 0;
+ break;
+ }
}
+ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
/* it's safe to change hw_params */
- return 1;
+ return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_dpcm_can_be_params);
@@ -3302,6 +3344,7 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
struct snd_pcm_hw_params *params = &fe->dpcm[stream].hw_params;
struct snd_soc_dpcm *dpcm;
ssize_t offset = 0;
+ unsigned long flags;
/* FE state */
offset += snprintf(buf + offset, size - offset,
@@ -3329,6 +3372,7 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
goto out;
}
+ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
params = &dpcm->hw_params;
@@ -3349,7 +3393,7 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
params_channels(params),
params_rate(params));
}
-
+ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
out:
return offset;
}
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 25fca7055464..96852d250619 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -482,10 +482,11 @@ static void remove_widget(struct snd_soc_component *comp,
snd_ctl_remove(card, kcontrol);
- kfree(dobj->control.dvalues);
+ /* free enum kcontrol's dvalues and dtexts */
+ kfree(se->dobj.control.dvalues);
for (j = 0; j < se->items; j++)
- kfree(dobj->control.dtexts[j]);
- kfree(dobj->control.dtexts);
+ kfree(se->dobj.control.dtexts[j]);
+ kfree(se->dobj.control.dtexts);
kfree(se);
kfree(w->kcontrol_news[i].name);
diff --git a/sound/soc/stm/stm32_adfsdm.c b/sound/soc/stm/stm32_adfsdm.c
index 47901983a6ff..78bed9734713 100644
--- a/sound/soc/stm/stm32_adfsdm.c
+++ b/sound/soc/stm/stm32_adfsdm.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -37,6 +38,8 @@ struct stm32_adfsdm_priv {
/* PCM buffer */
unsigned char *pcm_buff;
unsigned int pos;
+
+ struct mutex lock; /* protect against race condition on iio state */
};
static const struct snd_pcm_hardware stm32_adfsdm_pcm_hw = {
@@ -62,10 +65,12 @@ static void stm32_adfsdm_shutdown(struct snd_pcm_substream *substream,
{
struct stm32_adfsdm_priv *priv = snd_soc_dai_get_drvdata(dai);
+ mutex_lock(&priv->lock);
if (priv->iio_active) {
iio_channel_stop_all_cb(priv->iio_cb);
priv->iio_active = false;
}
+ mutex_unlock(&priv->lock);
}
static int stm32_adfsdm_dai_prepare(struct snd_pcm_substream *substream,
@@ -74,13 +79,19 @@ static int stm32_adfsdm_dai_prepare(struct snd_pcm_substream *substream,
struct stm32_adfsdm_priv *priv = snd_soc_dai_get_drvdata(dai);
int ret;
+ mutex_lock(&priv->lock);
+ if (priv->iio_active) {
+ iio_channel_stop_all_cb(priv->iio_cb);
+ priv->iio_active = false;
+ }
+
ret = iio_write_channel_attribute(priv->iio_ch,
substream->runtime->rate, 0,
IIO_CHAN_INFO_SAMP_FREQ);
if (ret < 0) {
dev_err(dai->dev, "%s: Failed to set %d sampling rate\n",
__func__, substream->runtime->rate);
- return ret;
+ goto out;
}
if (!priv->iio_active) {
@@ -92,6 +103,9 @@ static int stm32_adfsdm_dai_prepare(struct snd_pcm_substream *substream,
__func__, ret);
}
+out:
+ mutex_unlock(&priv->lock);
+
return ret;
}
@@ -291,6 +305,7 @@ MODULE_DEVICE_TABLE(of, stm32_adfsdm_of_match);
static int stm32_adfsdm_probe(struct platform_device *pdev)
{
struct stm32_adfsdm_priv *priv;
+ struct snd_soc_component *component;
int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
@@ -299,6 +314,7 @@ static int stm32_adfsdm_probe(struct platform_device *pdev)
priv->dev = &pdev->dev;
priv->dai_drv = stm32_adfsdm_dai;
+ mutex_init(&priv->lock);
dev_set_drvdata(&pdev->dev, priv);
@@ -317,9 +333,15 @@ static int stm32_adfsdm_probe(struct platform_device *pdev)
if (IS_ERR(priv->iio_cb))
return PTR_ERR(priv->iio_cb);
- ret = devm_snd_soc_register_component(&pdev->dev,
- &stm32_adfsdm_soc_platform,
- NULL, 0);
+ component = devm_kzalloc(&pdev->dev, sizeof(*component), GFP_KERNEL);
+ if (!component)
+ return -ENOMEM;
+#ifdef CONFIG_DEBUG_FS
+ component->debugfs_prefix = "pcm";
+#endif
+
+ ret = snd_soc_add_component(&pdev->dev, component,
+ &stm32_adfsdm_soc_platform, NULL, 0);
if (ret < 0)
dev_err(&pdev->dev, "%s: Failed to register PCM platform\n",
__func__);
@@ -327,12 +349,20 @@ static int stm32_adfsdm_probe(struct platform_device *pdev)
return ret;
}
+static int stm32_adfsdm_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_component(&pdev->dev);
+
+ return 0;
+}
+
static struct platform_driver stm32_adfsdm_driver = {
.driver = {
.name = STM32_ADFSDM_DRV_NAME,
.of_match_table = stm32_adfsdm_of_match,
},
.probe = stm32_adfsdm_probe,
+ .remove = stm32_adfsdm_remove,
};
module_platform_driver(stm32_adfsdm_driver);
diff --git a/sound/soc/stm/stm32_i2s.c b/sound/soc/stm/stm32_i2s.c
index 47c334de6b09..8968458eec62 100644
--- a/sound/soc/stm/stm32_i2s.c
+++ b/sound/soc/stm/stm32_i2s.c
@@ -281,7 +281,6 @@ static bool stm32_i2s_readable_reg(struct device *dev, unsigned int reg)
case STM32_I2S_CFG2_REG:
case STM32_I2S_IER_REG:
case STM32_I2S_SR_REG:
- case STM32_I2S_TXDR_REG:
case STM32_I2S_RXDR_REG:
case STM32_I2S_CGFR_REG:
return true;
@@ -293,7 +292,7 @@ static bool stm32_i2s_readable_reg(struct device *dev, unsigned int reg)
static bool stm32_i2s_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
- case STM32_I2S_TXDR_REG:
+ case STM32_I2S_SR_REG:
case STM32_I2S_RXDR_REG:
return true;
default:
diff --git a/sound/soc/stm/stm32_sai.c b/sound/soc/stm/stm32_sai.c
index 14c9591aae42..d68d62f12df5 100644
--- a/sound/soc/stm/stm32_sai.c
+++ b/sound/soc/stm/stm32_sai.c
@@ -105,6 +105,7 @@ static int stm32_sai_set_sync(struct stm32_sai_data *sai_client,
if (!pdev) {
dev_err(&sai_client->pdev->dev,
"Device not found for node %pOFn\n", np_provider);
+ of_node_put(np_provider);
return -ENODEV;
}
@@ -113,19 +114,20 @@ static int stm32_sai_set_sync(struct stm32_sai_data *sai_client,
dev_err(&sai_client->pdev->dev,
"SAI sync provider data not found\n");
ret = -EINVAL;
- goto out_put_dev;
+ goto error;
}
/* Configure sync client */
ret = stm32_sai_sync_conf_client(sai_client, synci);
if (ret < 0)
- goto out_put_dev;
+ goto error;
/* Configure sync provider */
ret = stm32_sai_sync_conf_provider(sai_provider, synco);
-out_put_dev:
+error:
put_device(&pdev->dev);
+ of_node_put(np_provider);
return ret;
}
diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c
index f9297228c41c..d7045aa520de 100644
--- a/sound/soc/stm/stm32_sai_sub.c
+++ b/sound/soc/stm/stm32_sai_sub.c
@@ -70,6 +70,7 @@
#define SAI_IEC60958_STATUS_BYTES 24
#define SAI_MCLK_NAME_LEN 32
+#define SAI_RATE_11K 11025
/**
* struct stm32_sai_sub_data - private data of SAI sub block (block A or B)
@@ -100,8 +101,9 @@
* @slot_mask: rx or tx active slots mask. set at init or at runtime
* @data_size: PCM data width. corresponds to PCM substream width.
* @spdif_frm_cnt: S/PDIF playback frame counter
- * @snd_aes_iec958: iec958 data
+ * @iec958: iec958 data
* @ctrl_lock: control lock
+ * @irq_lock: prevent race condition with IRQ
*/
struct stm32_sai_sub_data {
struct platform_device *pdev;
@@ -133,6 +135,7 @@ struct stm32_sai_sub_data {
unsigned int spdif_frm_cnt;
struct snd_aes_iec958 iec958;
struct mutex ctrl_lock; /* protect resources accessed by controls */
+ spinlock_t irq_lock; /* used to prevent race condition with IRQ */
};
enum stm32_sai_fifo_th {
@@ -307,6 +310,25 @@ static int stm32_sai_set_clk_div(struct stm32_sai_sub_data *sai,
return ret;
}
+static int stm32_sai_set_parent_clock(struct stm32_sai_sub_data *sai,
+ unsigned int rate)
+{
+ struct platform_device *pdev = sai->pdev;
+ struct clk *parent_clk = sai->pdata->clk_x8k;
+ int ret;
+
+ if (!(rate % SAI_RATE_11K))
+ parent_clk = sai->pdata->clk_x11k;
+
+ ret = clk_set_parent(sai->sai_ck, parent_clk);
+ if (ret)
+ dev_err(&pdev->dev, " Error %d setting sai_ck parent clock. %s",
+ ret, ret == -EBUSY ?
+ "Active stream rates conflict\n" : "\n");
+
+ return ret;
+}
+
static long stm32_sai_mclk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
@@ -474,8 +496,10 @@ static irqreturn_t stm32_sai_isr(int irq, void *devid)
status = SNDRV_PCM_STATE_XRUN;
}
- if (status != SNDRV_PCM_STATE_RUNNING)
+ spin_lock(&sai->irq_lock);
+ if (status != SNDRV_PCM_STATE_RUNNING && sai->substream)
snd_pcm_stop_xrun(sai->substream);
+ spin_unlock(&sai->irq_lock);
return IRQ_HANDLED;
}
@@ -486,25 +510,29 @@ static int stm32_sai_set_sysclk(struct snd_soc_dai *cpu_dai,
struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
int ret;
- if (dir == SND_SOC_CLOCK_OUT) {
+ if (dir == SND_SOC_CLOCK_OUT && sai->sai_mclk) {
ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
SAI_XCR1_NODIV,
(unsigned int)~SAI_XCR1_NODIV);
if (ret < 0)
return ret;
- dev_dbg(cpu_dai->dev, "SAI MCLK frequency is %uHz\n", freq);
- sai->mclk_rate = freq;
+ /* If master clock is used, set parent clock now */
+ ret = stm32_sai_set_parent_clock(sai, freq);
+ if (ret)
+ return ret;
- if (sai->sai_mclk) {
- ret = clk_set_rate_exclusive(sai->sai_mclk,
- sai->mclk_rate);
- if (ret) {
- dev_err(cpu_dai->dev,
- "Could not set mclk rate\n");
- return ret;
- }
+ ret = clk_set_rate_exclusive(sai->sai_mclk, freq);
+ if (ret) {
+ dev_err(cpu_dai->dev,
+ ret == -EBUSY ?
+ "Active streams have incompatible rates" :
+ "Could not set mclk rate\n");
+ return ret;
}
+
+ dev_dbg(cpu_dai->dev, "SAI MCLK frequency is %uHz\n", freq);
+ sai->mclk_rate = freq;
}
return 0;
@@ -679,8 +707,19 @@ static int stm32_sai_startup(struct snd_pcm_substream *substream,
{
struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
int imr, cr2, ret;
+ unsigned long flags;
+ spin_lock_irqsave(&sai->irq_lock, flags);
sai->substream = substream;
+ spin_unlock_irqrestore(&sai->irq_lock, flags);
+
+ if (STM_SAI_PROTOCOL_IS_SPDIF(sai)) {
+ snd_pcm_hw_constraint_mask64(substream->runtime,
+ SNDRV_PCM_HW_PARAM_FORMAT,
+ SNDRV_PCM_FMTBIT_S32_LE);
+ snd_pcm_hw_constraint_single(substream->runtime,
+ SNDRV_PCM_HW_PARAM_CHANNELS, 2);
+ }
ret = clk_prepare_enable(sai->sai_ck);
if (ret < 0) {
@@ -898,14 +937,16 @@ static int stm32_sai_configure_clock(struct snd_soc_dai *cpu_dai,
struct snd_pcm_hw_params *params)
{
struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
- int div = 0;
+ int div = 0, cr1 = 0;
int sai_clk_rate, mclk_ratio, den;
unsigned int rate = params_rate(params);
+ int ret;
- if (!(rate % 11025))
- clk_set_parent(sai->sai_ck, sai->pdata->clk_x11k);
- else
- clk_set_parent(sai->sai_ck, sai->pdata->clk_x8k);
+ if (!sai->sai_mclk) {
+ ret = stm32_sai_set_parent_clock(sai, rate);
+ if (ret)
+ return ret;
+ }
sai_clk_rate = clk_get_rate(sai->sai_ck);
if (STM_SAI_IS_F4(sai->pdata)) {
@@ -943,13 +984,19 @@ static int stm32_sai_configure_clock(struct snd_soc_dai *cpu_dai,
} else {
if (sai->mclk_rate) {
mclk_ratio = sai->mclk_rate / rate;
- if ((mclk_ratio != 512) &&
- (mclk_ratio != 256)) {
+ if (mclk_ratio == 512) {
+ cr1 = SAI_XCR1_OSR;
+ } else if (mclk_ratio != 256) {
dev_err(cpu_dai->dev,
"Wrong mclk ratio %d\n",
mclk_ratio);
return -EINVAL;
}
+
+ regmap_update_bits(sai->regmap,
+ STM_SAI_CR1_REGX,
+ SAI_XCR1_OSR, cr1);
+
div = stm32_sai_get_clk_div(sai, sai_clk_rate,
sai->mclk_rate);
if (div < 0)
@@ -1051,28 +1098,36 @@ static void stm32_sai_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
+ unsigned long flags;
regmap_update_bits(sai->regmap, STM_SAI_IMR_REGX, SAI_XIMR_MASK, 0);
regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, SAI_XCR1_NODIV,
SAI_XCR1_NODIV);
- clk_disable_unprepare(sai->sai_ck);
+ /* Release mclk rate only if rate was actually set */
+ if (sai->mclk_rate) {
+ clk_rate_exclusive_put(sai->sai_mclk);
+ sai->mclk_rate = 0;
+ }
- clk_rate_exclusive_put(sai->sai_mclk);
+ clk_disable_unprepare(sai->sai_ck);
+ spin_lock_irqsave(&sai->irq_lock, flags);
sai->substream = NULL;
+ spin_unlock_irqrestore(&sai->irq_lock, flags);
}
static int stm32_sai_pcm_new(struct snd_soc_pcm_runtime *rtd,
struct snd_soc_dai *cpu_dai)
{
struct stm32_sai_sub_data *sai = dev_get_drvdata(cpu_dai->dev);
+ struct snd_kcontrol_new knew = iec958_ctls;
if (STM_SAI_PROTOCOL_IS_SPDIF(sai)) {
dev_dbg(&sai->pdev->dev, "%s: register iec controls", __func__);
- return snd_ctl_add(rtd->pcm->card,
- snd_ctl_new1(&iec958_ctls, sai));
+ knew.device = rtd->pcm->device;
+ return snd_ctl_add(rtd->pcm->card, snd_ctl_new1(&knew, sai));
}
return 0;
@@ -1081,7 +1136,7 @@ static int stm32_sai_pcm_new(struct snd_soc_pcm_runtime *rtd,
static int stm32_sai_dai_probe(struct snd_soc_dai *cpu_dai)
{
struct stm32_sai_sub_data *sai = dev_get_drvdata(cpu_dai->dev);
- int cr1 = 0, cr1_mask;
+ int cr1 = 0, cr1_mask, ret;
sai->cpu_dai = cpu_dai;
@@ -1111,8 +1166,10 @@ static int stm32_sai_dai_probe(struct snd_soc_dai *cpu_dai)
/* Configure synchronization */
if (sai->sync == SAI_SYNC_EXTERNAL) {
/* Configure synchro client and provider */
- sai->pdata->set_sync(sai->pdata, sai->np_sync_provider,
- sai->synco, sai->synci);
+ ret = sai->pdata->set_sync(sai->pdata, sai->np_sync_provider,
+ sai->synco, sai->synci);
+ if (ret)
+ return ret;
}
cr1_mask |= SAI_XCR1_SYNCEN_MASK;
@@ -1392,7 +1449,6 @@ static int stm32_sai_sub_dais_init(struct platform_device *pdev,
if (!sai->cpu_dai_drv)
return -ENOMEM;
- sai->cpu_dai_drv->name = dev_name(&pdev->dev);
if (STM_SAI_IS_PLAYBACK(sai)) {
memcpy(sai->cpu_dai_drv, &stm32_sai_playback_dai,
sizeof(stm32_sai_playback_dai));
@@ -1402,6 +1458,7 @@ static int stm32_sai_sub_dais_init(struct platform_device *pdev,
sizeof(stm32_sai_capture_dai));
sai->cpu_dai_drv->capture.stream_name = sai->cpu_dai_drv->name;
}
+ sai->cpu_dai_drv->name = dev_name(&pdev->dev);
return 0;
}
@@ -1424,6 +1481,7 @@ static int stm32_sai_sub_probe(struct platform_device *pdev)
sai->pdev = pdev;
mutex_init(&sai->ctrl_lock);
+ spin_lock_init(&sai->irq_lock);
platform_set_drvdata(pdev, sai);
sai->pdata = dev_get_drvdata(pdev->dev.parent);
diff --git a/sound/soc/txx9/txx9aclc-ac97.c b/sound/soc/txx9/txx9aclc-ac97.c
index 1cfca698ae4b..b0fa285c7ba2 100644
--- a/sound/soc/txx9/txx9aclc-ac97.c
+++ b/sound/soc/txx9/txx9aclc-ac97.c
@@ -102,7 +102,6 @@ static void txx9aclc_ac97_cold_reset(struct snd_ac97 *ac97)
u32 ready = ACINT_CODECRDY(ac97->num) | ACINT_REGACCRDY;
__raw_writel(ACCTL_ENLINK, base + ACCTLDIS);
- mmiowb();
udelay(1);
__raw_writel(ACCTL_ENLINK, base + ACCTLEN);
/* wait for primary codec ready status */
diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
index 7afe8fae4939..b61f65bed4e4 100644
--- a/sound/usb/line6/driver.c
+++ b/sound/usb/line6/driver.c
@@ -351,12 +351,16 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
{
struct usb_device *usbdev = line6->usbdev;
int ret;
- unsigned char len;
+ unsigned char *len;
unsigned count;
if (address > 0xffff || datalen > 0xff)
return -EINVAL;
+ len = kmalloc(sizeof(*len), GFP_KERNEL);
+ if (!len)
+ return -ENOMEM;
+
/* query the serial number: */
ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
@@ -365,7 +369,7 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
if (ret < 0) {
dev_err(line6->ifcdev, "read request failed (error %d)\n", ret);
- return ret;
+ goto exit;
}
/* Wait for data length. We'll get 0xff until length arrives. */
@@ -375,28 +379,29 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
USB_TYPE_VENDOR | USB_RECIP_DEVICE |
USB_DIR_IN,
- 0x0012, 0x0000, &len, 1,
+ 0x0012, 0x0000, len, 1,
LINE6_TIMEOUT * HZ);
if (ret < 0) {
dev_err(line6->ifcdev,
"receive length failed (error %d)\n", ret);
- return ret;
+ goto exit;
}
- if (len != 0xff)
+ if (*len != 0xff)
break;
}
- if (len == 0xff) {
+ ret = -EIO;
+ if (*len == 0xff) {
dev_err(line6->ifcdev, "read failed after %d retries\n",
count);
- return -EIO;
- } else if (len != datalen) {
+ goto exit;
+ } else if (*len != datalen) {
/* should be equal or something went wrong */
dev_err(line6->ifcdev,
"length mismatch (expected %d, got %d)\n",
- (int)datalen, (int)len);
- return -EIO;
+ (int)datalen, (int)*len);
+ goto exit;
}
/* receive the result: */
@@ -405,12 +410,12 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
0x0013, 0x0000, data, datalen,
LINE6_TIMEOUT * HZ);
- if (ret < 0) {
+ if (ret < 0)
dev_err(line6->ifcdev, "read failed (error %d)\n", ret);
- return ret;
- }
- return 0;
+exit:
+ kfree(len);
+ return ret;
}
EXPORT_SYMBOL_GPL(line6_read_data);
@@ -422,12 +427,16 @@ int line6_write_data(struct usb_line6 *line6, unsigned address, void *data,
{
struct usb_device *usbdev = line6->usbdev;
int ret;
- unsigned char status;
+ unsigned char *status;
int count;
if (address > 0xffff || datalen > 0xffff)
return -EINVAL;
+ status = kmalloc(sizeof(*status), GFP_KERNEL);
+ if (!status)
+ return -ENOMEM;
+
ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
0x0022, address, data, datalen,
@@ -436,7 +445,7 @@ int line6_write_data(struct usb_line6 *line6, unsigned address, void *data,
if (ret < 0) {
dev_err(line6->ifcdev,
"write request failed (error %d)\n", ret);
- return ret;
+ goto exit;
}
for (count = 0; count < LINE6_READ_WRITE_MAX_RETRIES; count++) {
@@ -447,28 +456,29 @@ int line6_write_data(struct usb_line6 *line6, unsigned address, void *data,
USB_TYPE_VENDOR | USB_RECIP_DEVICE |
USB_DIR_IN,
0x0012, 0x0000,
- &status, 1, LINE6_TIMEOUT * HZ);
+ status, 1, LINE6_TIMEOUT * HZ);
if (ret < 0) {
dev_err(line6->ifcdev,
"receiving status failed (error %d)\n", ret);
- return ret;
+ goto exit;
}
- if (status != 0xff)
+ if (*status != 0xff)
break;
}
- if (status == 0xff) {
+ if (*status == 0xff) {
dev_err(line6->ifcdev, "write failed after %d retries\n",
count);
- return -EIO;
- } else if (status != 0) {
+ ret = -EIO;
+ } else if (*status != 0) {
dev_err(line6->ifcdev, "write failed (error %d)\n", ret);
- return -EIO;
+ ret = -EIO;
}
-
- return 0;
+exit:
+ kfree(status);
+ return ret;
}
EXPORT_SYMBOL_GPL(line6_write_data);
diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c
index 36ed9c85c0eb..5f3c87264e66 100644
--- a/sound/usb/line6/podhd.c
+++ b/sound/usb/line6/podhd.c
@@ -225,28 +225,32 @@ static void podhd_startup_start_workqueue(struct timer_list *t)
static int podhd_dev_start(struct usb_line6_podhd *pod)
{
int ret;
- u8 init_bytes[8];
+ u8 *init_bytes;
int i;
struct usb_device *usbdev = pod->line6.usbdev;
+ init_bytes = kmalloc(8, GFP_KERNEL);
+ if (!init_bytes)
+ return -ENOMEM;
+
ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0),
0x67, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
0x11, 0,
NULL, 0, LINE6_TIMEOUT * HZ);
if (ret < 0) {
dev_err(pod->line6.ifcdev, "read request failed (error %d)\n", ret);
- return ret;
+ goto exit;
}
/* NOTE: looks like some kind of ping message */
ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
0x11, 0x0,
- &init_bytes, 3, LINE6_TIMEOUT * HZ);
+ init_bytes, 3, LINE6_TIMEOUT * HZ);
if (ret < 0) {
dev_err(pod->line6.ifcdev,
"receive length failed (error %d)\n", ret);
- return ret;
+ goto exit;
}
pod->firmware_version =
@@ -255,7 +259,7 @@ static int podhd_dev_start(struct usb_line6_podhd *pod)
for (i = 0; i <= 16; i++) {
ret = line6_read_data(&pod->line6, 0xf000 + 0x08 * i, init_bytes, 8);
if (ret < 0)
- return ret;
+ goto exit;
}
ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0),
@@ -263,10 +267,9 @@ static int podhd_dev_start(struct usb_line6_podhd *pod)
USB_TYPE_STANDARD | USB_RECIP_DEVICE | USB_DIR_OUT,
1, 0,
NULL, 0, LINE6_TIMEOUT * HZ);
- if (ret < 0)
- return ret;
-
- return 0;
+exit:
+ kfree(init_bytes);
+ return ret;
}
static void podhd_startup_workqueue(struct work_struct *work)
diff --git a/sound/usb/line6/toneport.c b/sound/usb/line6/toneport.c
index f47ba94e6f4a..19bee725de00 100644
--- a/sound/usb/line6/toneport.c
+++ b/sound/usb/line6/toneport.c
@@ -365,16 +365,21 @@ static bool toneport_has_source_select(struct usb_line6_toneport *toneport)
/*
Setup Toneport device.
*/
-static void toneport_setup(struct usb_line6_toneport *toneport)
+static int toneport_setup(struct usb_line6_toneport *toneport)
{
- u32 ticks;
+ u32 *ticks;
struct usb_line6 *line6 = &toneport->line6;
struct usb_device *usbdev = line6->usbdev;
+ ticks = kmalloc(sizeof(*ticks), GFP_KERNEL);
+ if (!ticks)
+ return -ENOMEM;
+
/* sync time on device with host: */
/* note: 32-bit timestamps overflow in year 2106 */
- ticks = (u32)ktime_get_real_seconds();
- line6_write_data(line6, 0x80c6, &ticks, 4);
+ *ticks = (u32)ktime_get_real_seconds();
+ line6_write_data(line6, 0x80c6, ticks, 4);
+ kfree(ticks);
/* enable device: */
toneport_send_cmd(usbdev, 0x0301, 0x0000);
@@ -389,6 +394,7 @@ static void toneport_setup(struct usb_line6_toneport *toneport)
toneport_update_led(toneport);
mod_timer(&toneport->timer, jiffies + TONEPORT_PCM_DELAY * HZ);
+ return 0;
}
/*
@@ -451,7 +457,9 @@ static int toneport_init(struct usb_line6 *line6,
return err;
}
- toneport_setup(toneport);
+ err = toneport_setup(toneport);
+ if (err)
+ return err;
/* register audio system: */
return snd_card_register(line6->card);
@@ -463,7 +471,11 @@ static int toneport_init(struct usb_line6 *line6,
*/
static int toneport_reset_resume(struct usb_interface *interface)
{
- toneport_setup(usb_get_intfdata(interface));
+ int err;
+
+ err = toneport_setup(usb_get_intfdata(interface));
+ if (err)
+ return err;
return line6_resume(interface);
}
#endif
diff --git a/sound/usb/usx2y/usb_stream.c b/sound/usb/usx2y/usb_stream.c
index b0f8979ff2d2..221adf68bd0c 100644
--- a/sound/usb/usx2y/usb_stream.c
+++ b/sound/usb/usx2y/usb_stream.c
@@ -104,7 +104,12 @@ static int init_urbs(struct usb_stream_kernel *sk, unsigned use_packsize,
for (u = 0; u < USB_STREAM_NURBS; ++u) {
sk->inurb[u] = usb_alloc_urb(sk->n_o_ps, GFP_KERNEL);
+ if (!sk->inurb[u])
+ return -ENOMEM;
+
sk->outurb[u] = usb_alloc_urb(sk->n_o_ps, GFP_KERNEL);
+ if (!sk->outurb[u])
+ return -ENOMEM;
}
if (init_pipe_urbs(sk, use_packsize, sk->inurb, indata, dev, in_pipe) ||
diff --git a/sound/xen/xen_snd_front_alsa.c b/sound/xen/xen_snd_front_alsa.c
index a7f413cb704d..b14ab512c2ce 100644
--- a/sound/xen/xen_snd_front_alsa.c
+++ b/sound/xen/xen_snd_front_alsa.c
@@ -441,7 +441,7 @@ static int shbuf_setup_backstore(struct xen_snd_front_pcm_stream_info *stream,
{
int i;
- stream->buffer = alloc_pages_exact(stream->buffer_sz, GFP_KERNEL);
+ stream->buffer = alloc_pages_exact(buffer_sz, GFP_KERNEL);
if (!stream->buffer)
return -ENOMEM;
diff --git a/tools/Makefile b/tools/Makefile
index 77f1aee8ea01..3dfd72ae6c1a 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -12,6 +12,7 @@ help:
@echo ' acpi - ACPI tools'
@echo ' cgroup - cgroup tools'
@echo ' cpupower - a tool for all things x86 CPU power'
+ @echo ' debugging - tools for debugging'
@echo ' firewire - the userspace part of nosy, an IEEE-1394 traffic sniffer'
@echo ' firmware - Firmware tools'
@echo ' freefall - laptop accelerometer program for disk protection'
@@ -61,7 +62,7 @@ acpi: FORCE
cpupower: FORCE
$(call descend,power/$@)
-cgroup firewire hv guest spi usb virtio vm bpf iio gpio objtool leds wmi pci firmware: FORCE
+cgroup firewire hv guest spi usb virtio vm bpf iio gpio objtool leds wmi pci firmware debugging: FORCE
$(call descend,$@)
liblockdep: FORCE
@@ -96,7 +97,8 @@ kvm_stat: FORCE
all: acpi cgroup cpupower gpio hv firewire liblockdep \
perf selftests spi turbostat usb \
virtio vm bpf x86_energy_perf_policy \
- tmon freefall iio objtool kvm_stat wmi pci
+ tmon freefall iio objtool kvm_stat wmi \
+ pci debugging
acpi_install:
$(call descend,power/$(@:_install=),install)
@@ -104,7 +106,7 @@ acpi_install:
cpupower_install:
$(call descend,power/$(@:_install=),install)
-cgroup_install firewire_install gpio_install hv_install iio_install perf_install spi_install usb_install virtio_install vm_install bpf_install objtool_install wmi_install pci_install:
+cgroup_install firewire_install gpio_install hv_install iio_install perf_install spi_install usb_install virtio_install vm_install bpf_install objtool_install wmi_install pci_install debugging_install:
$(call descend,$(@:_install=),install)
liblockdep_install:
@@ -130,7 +132,7 @@ install: acpi_install cgroup_install cpupower_install gpio_install \
perf_install selftests_install turbostat_install usb_install \
virtio_install vm_install bpf_install x86_energy_perf_policy_install \
tmon_install freefall_install objtool_install kvm_stat_install \
- wmi_install pci_install
+ wmi_install pci_install debugging_install
acpi_clean:
$(call descend,power/acpi,clean)
@@ -138,7 +140,7 @@ acpi_clean:
cpupower_clean:
$(call descend,power/cpupower,clean)
-cgroup_clean hv_clean firewire_clean spi_clean usb_clean virtio_clean vm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean firmware_clean:
+cgroup_clean hv_clean firewire_clean spi_clean usb_clean virtio_clean vm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean firmware_clean debugging_clean:
$(call descend,$(@:_clean=),clean)
liblockdep_clean:
@@ -176,6 +178,6 @@ clean: acpi_clean cgroup_clean cpupower_clean hv_clean firewire_clean \
perf_clean selftests_clean turbostat_clean spi_clean usb_clean virtio_clean \
vm_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean \
freefall_clean build_clean libbpf_clean libsubcmd_clean liblockdep_clean \
- gpio_clean objtool_clean leds_clean wmi_clean pci_clean firmware_clean
+ gpio_clean objtool_clean leds_clean wmi_clean pci_clean firmware_clean debugging_clean
.PHONY: FORCE
diff --git a/tools/arch/alpha/include/uapi/asm/mman.h b/tools/arch/alpha/include/uapi/asm/mman.h
index c317d3e6867a..ea6a255ae61f 100644
--- a/tools/arch/alpha/include/uapi/asm/mman.h
+++ b/tools/arch/alpha/include/uapi/asm/mman.h
@@ -27,8 +27,6 @@
#define MAP_NONBLOCK 0x40000
#define MAP_NORESERVE 0x10000
#define MAP_POPULATE 0x20000
-#define MAP_PRIVATE 0x02
-#define MAP_SHARED 0x01
#define MAP_STACK 0x80000
#define PROT_EXEC 0x4
#define PROT_GROWSDOWN 0x01000000
diff --git a/tools/arch/arc/include/uapi/asm/unistd.h b/tools/arch/arc/include/uapi/asm/unistd.h
new file mode 100644
index 000000000000..5eafa1115162
--- /dev/null
+++ b/tools/arch/arc/include/uapi/asm/unistd.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/******** no-legacy-syscalls-ABI *******/
+
+/*
+ * Non-typical guard macro to enable inclusion twice in ARCH sys.c
+ * That is how the Generic syscall wrapper generator works
+ */
+#if !defined(_UAPI_ASM_ARC_UNISTD_H) || defined(__SYSCALL)
+#define _UAPI_ASM_ARC_UNISTD_H
+
+#define __ARCH_WANT_RENAMEAT
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SET_GET_RLIMIT
+#define __ARCH_WANT_SYS_EXECVE
+#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_VFORK
+#define __ARCH_WANT_SYS_FORK
+#define __ARCH_WANT_TIME32_SYSCALLS
+
+#define sys_mmap2 sys_mmap_pgoff
+
+#include <asm-generic/unistd.h>
+
+#define NR_syscalls __NR_syscalls
+
+/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
+#define __NR_sysfs (__NR_arch_specific_syscall + 3)
+
+/* ARC specific syscall */
+#define __NR_cacheflush (__NR_arch_specific_syscall + 0)
+#define __NR_arc_settls (__NR_arch_specific_syscall + 1)
+#define __NR_arc_gettls (__NR_arch_specific_syscall + 2)
+#define __NR_arc_usr_cmpxchg (__NR_arch_specific_syscall + 4)
+
+__SYSCALL(__NR_cacheflush, sys_cacheflush)
+__SYSCALL(__NR_arc_settls, sys_arc_settls)
+__SYSCALL(__NR_arc_gettls, sys_arc_gettls)
+__SYSCALL(__NR_arc_usr_cmpxchg, sys_arc_usr_cmpxchg)
+__SYSCALL(__NR_sysfs, sys_sysfs)
+
+#undef __SYSCALL
+
+#endif
diff --git a/tools/arch/arm64/include/uapi/asm/unistd.h b/tools/arch/arm64/include/uapi/asm/unistd.h
index dae1584cf017..4703d218663a 100644
--- a/tools/arch/arm64/include/uapi/asm/unistd.h
+++ b/tools/arch/arm64/include/uapi/asm/unistd.h
@@ -17,5 +17,7 @@
#define __ARCH_WANT_RENAMEAT
#define __ARCH_WANT_NEW_STAT
+#define __ARCH_WANT_SET_GET_RLIMIT
+#define __ARCH_WANT_TIME32_SYSCALLS
#include <asm-generic/unistd.h>
diff --git a/tools/arch/hexagon/include/uapi/asm/unistd.h b/tools/arch/hexagon/include/uapi/asm/unistd.h
new file mode 100644
index 000000000000..432c4db1b623
--- /dev/null
+++ b/tools/arch/hexagon/include/uapi/asm/unistd.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Syscall support for Hexagon
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+/*
+ * The kernel pulls this unistd.h in three different ways:
+ * 1. the "normal" way which gets all the __NR defines
+ * 2. with __SYSCALL defined to produce function declarations
+ * 3. with __SYSCALL defined to produce syscall table initialization
+ * See also: syscalltab.c
+ */
+
+#define sys_mmap2 sys_mmap_pgoff
+#define __ARCH_WANT_RENAMEAT
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SET_GET_RLIMIT
+#define __ARCH_WANT_SYS_EXECVE
+#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_VFORK
+#define __ARCH_WANT_SYS_FORK
+#define __ARCH_WANT_TIME32_SYSCALLS
+
+#include <asm-generic/unistd.h>
diff --git a/tools/arch/mips/include/uapi/asm/mman.h b/tools/arch/mips/include/uapi/asm/mman.h
index de2206883abc..c8acaa138d46 100644
--- a/tools/arch/mips/include/uapi/asm/mman.h
+++ b/tools/arch/mips/include/uapi/asm/mman.h
@@ -28,8 +28,6 @@
#define MAP_NONBLOCK 0x20000
#define MAP_NORESERVE 0x0400
#define MAP_POPULATE 0x10000
-#define MAP_PRIVATE 0x002
-#define MAP_SHARED 0x001
#define MAP_STACK 0x40000
#define PROT_EXEC 0x04
#define PROT_GROWSDOWN 0x01000000
diff --git a/tools/arch/parisc/include/uapi/asm/mman.h b/tools/arch/parisc/include/uapi/asm/mman.h
index 1bd78758bde9..f9fd1325f5bd 100644
--- a/tools/arch/parisc/include/uapi/asm/mman.h
+++ b/tools/arch/parisc/include/uapi/asm/mman.h
@@ -27,8 +27,6 @@
#define MAP_NONBLOCK 0x20000
#define MAP_NORESERVE 0x4000
#define MAP_POPULATE 0x10000
-#define MAP_PRIVATE 0x02
-#define MAP_SHARED 0x01
#define MAP_STACK 0x40000
#define PROT_EXEC 0x4
#define PROT_GROWSDOWN 0x01000000
diff --git a/tools/arch/powerpc/include/uapi/asm/kvm.h b/tools/arch/powerpc/include/uapi/asm/kvm.h
index 8c876c166ef2..26ca425f4c2c 100644
--- a/tools/arch/powerpc/include/uapi/asm/kvm.h
+++ b/tools/arch/powerpc/include/uapi/asm/kvm.h
@@ -463,10 +463,12 @@ struct kvm_ppc_cpu_char {
#define KVM_PPC_CPU_CHAR_BR_HINT_HONOURED (1ULL << 58)
#define KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF (1ULL << 57)
#define KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS (1ULL << 56)
+#define KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST (1ull << 54)
#define KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY (1ULL << 63)
#define KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR (1ULL << 62)
#define KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ULL << 61)
+#define KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58)
/* Per-vcpu XICS interrupt controller state */
#define KVM_REG_PPC_ICP_STATE (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8c)
diff --git a/tools/arch/riscv/include/uapi/asm/unistd.h b/tools/arch/riscv/include/uapi/asm/unistd.h
new file mode 100644
index 000000000000..0e2eeeb1fd27
--- /dev/null
+++ b/tools/arch/riscv/include/uapi/asm/unistd.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2018 David Abdurachmanov <david.abdurachmanov@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifdef __LP64__
+#define __ARCH_WANT_NEW_STAT
+#define __ARCH_WANT_SET_GET_RLIMIT
+#endif /* __LP64__ */
+
+#include <asm-generic/unistd.h>
+
+/*
+ * Allows the instruction cache to be flushed from userspace. Despite RISC-V
+ * having a direct 'fence.i' instruction available to userspace (which we
+ * can't trap!), that's not actually viable when running on Linux because the
+ * kernel might schedule a process on another hart. There is no way for
+ * userspace to handle this without invoking the kernel (as it doesn't know the
+ * thread->hart mappings), so we've defined a RISC-V specific system call to
+ * flush the instruction cache.
+ *
+ * __NR_riscv_flush_icache is defined to flush the instruction cache over an
+ * address range, with the flush applying to either all threads or just the
+ * caller. We don't currently do anything with the address range, that's just
+ * in there for forwards compatibility.
+ */
+#ifndef __NR_riscv_flush_icache
+#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15)
+#endif
+__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache)
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 6d6122524711..981ff9479648 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -344,6 +344,7 @@
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
diff --git a/tools/arch/x86/include/uapi/asm/vmx.h b/tools/arch/x86/include/uapi/asm/vmx.h
index f0b0c90dd398..d213ec5c3766 100644
--- a/tools/arch/x86/include/uapi/asm/vmx.h
+++ b/tools/arch/x86/include/uapi/asm/vmx.h
@@ -146,6 +146,7 @@
#define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1
#define VMX_ABORT_LOAD_HOST_PDPTE_FAIL 2
+#define VMX_ABORT_VMCS_CORRUPTED 3
#define VMX_ABORT_LOAD_HOST_MSR_FAIL 4
#endif /* _UAPIVMX_H */
diff --git a/tools/arch/xtensa/include/uapi/asm/mman.h b/tools/arch/xtensa/include/uapi/asm/mman.h
index 34dde6f44dae..f2b08c990afc 100644
--- a/tools/arch/xtensa/include/uapi/asm/mman.h
+++ b/tools/arch/xtensa/include/uapi/asm/mman.h
@@ -27,8 +27,6 @@
#define MAP_NONBLOCK 0x20000
#define MAP_NORESERVE 0x0400
#define MAP_POPULATE 0x10000
-#define MAP_PRIVATE 0x002
-#define MAP_SHARED 0x001
#define MAP_STACK 0x40000
#define PROT_EXEC 0x4
#define PROT_GROWSDOWN 0x01000000
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index e0c650d91784..994a7e0d16fb 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -1151,6 +1151,9 @@ static int do_create(int argc, char **argv)
return -1;
}
NEXT_ARG();
+ } else {
+ p_err("unknown arg %s", *argv);
+ return -1;
}
}
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 8ef80d65a474..d2be5a06c339 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -401,41 +401,31 @@ static int do_show(int argc, char **argv)
static int do_dump(int argc, char **argv)
{
- unsigned int finfo_rec_size, linfo_rec_size, jited_linfo_rec_size;
- void *func_info = NULL, *linfo = NULL, *jited_linfo = NULL;
- unsigned int nr_finfo, nr_linfo = 0, nr_jited_linfo = 0;
+ struct bpf_prog_info_linear *info_linear;
struct bpf_prog_linfo *prog_linfo = NULL;
- unsigned long *func_ksyms = NULL;
- struct bpf_prog_info info = {};
- unsigned int *func_lens = NULL;
+ enum {DUMP_JITED, DUMP_XLATED} mode;
const char *disasm_opt = NULL;
- unsigned int nr_func_ksyms;
- unsigned int nr_func_lens;
+ struct bpf_prog_info *info;
struct dump_data dd = {};
- __u32 len = sizeof(info);
+ void *func_info = NULL;
struct btf *btf = NULL;
- unsigned int buf_size;
char *filepath = NULL;
bool opcodes = false;
bool visual = false;
char func_sig[1024];
unsigned char *buf;
bool linum = false;
- __u32 *member_len;
- __u64 *member_ptr;
+ __u32 member_len;
+ __u64 arrays;
ssize_t n;
- int err;
int fd;
if (is_prefix(*argv, "jited")) {
if (disasm_init())
return -1;
-
- member_len = &info.jited_prog_len;
- member_ptr = &info.jited_prog_insns;
+ mode = DUMP_JITED;
} else if (is_prefix(*argv, "xlated")) {
- member_len = &info.xlated_prog_len;
- member_ptr = &info.xlated_prog_insns;
+ mode = DUMP_XLATED;
} else {
p_err("expected 'xlated' or 'jited', got: %s", *argv);
return -1;
@@ -474,175 +464,50 @@ static int do_dump(int argc, char **argv)
return -1;
}
- err = bpf_obj_get_info_by_fd(fd, &info, &len);
- if (err) {
- p_err("can't get prog info: %s", strerror(errno));
- return -1;
- }
-
- if (!*member_len) {
- p_info("no instructions returned");
- close(fd);
- return 0;
- }
+ if (mode == DUMP_JITED)
+ arrays = 1UL << BPF_PROG_INFO_JITED_INSNS;
+ else
+ arrays = 1UL << BPF_PROG_INFO_XLATED_INSNS;
- buf_size = *member_len;
+ arrays |= 1UL << BPF_PROG_INFO_JITED_KSYMS;
+ arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
+ arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
+ arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
+ arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
- buf = malloc(buf_size);
- if (!buf) {
- p_err("mem alloc failed");
- close(fd);
+ info_linear = bpf_program__get_prog_info_linear(fd, arrays);
+ close(fd);
+ if (IS_ERR_OR_NULL(info_linear)) {
+ p_err("can't get prog info: %s", strerror(errno));
return -1;
}
- nr_func_ksyms = info.nr_jited_ksyms;
- if (nr_func_ksyms) {
- func_ksyms = malloc(nr_func_ksyms * sizeof(__u64));
- if (!func_ksyms) {
- p_err("mem alloc failed");
- close(fd);
- goto err_free;
- }
- }
-
- nr_func_lens = info.nr_jited_func_lens;
- if (nr_func_lens) {
- func_lens = malloc(nr_func_lens * sizeof(__u32));
- if (!func_lens) {
- p_err("mem alloc failed");
- close(fd);
+ info = &info_linear->info;
+ if (mode == DUMP_JITED) {
+ if (info->jited_prog_len == 0) {
+ p_info("no instructions returned");
goto err_free;
}
- }
-
- nr_finfo = info.nr_func_info;
- finfo_rec_size = info.func_info_rec_size;
- if (nr_finfo && finfo_rec_size) {
- func_info = malloc(nr_finfo * finfo_rec_size);
- if (!func_info) {
- p_err("mem alloc failed");
- close(fd);
+ buf = (unsigned char *)(info->jited_prog_insns);
+ member_len = info->jited_prog_len;
+ } else { /* DUMP_XLATED */
+ if (info->xlated_prog_len == 0) {
+ p_err("error retrieving insn dump: kernel.kptr_restrict set?");
goto err_free;
}
+ buf = (unsigned char *)info->xlated_prog_insns;
+ member_len = info->xlated_prog_len;
}
- linfo_rec_size = info.line_info_rec_size;
- if (info.nr_line_info && linfo_rec_size && info.btf_id) {
- nr_linfo = info.nr_line_info;
- linfo = malloc(nr_linfo * linfo_rec_size);
- if (!linfo) {
- p_err("mem alloc failed");
- close(fd);
- goto err_free;
- }
- }
-
- jited_linfo_rec_size = info.jited_line_info_rec_size;
- if (info.nr_jited_line_info &&
- jited_linfo_rec_size &&
- info.nr_jited_ksyms &&
- info.nr_jited_func_lens &&
- info.btf_id) {
- nr_jited_linfo = info.nr_jited_line_info;
- jited_linfo = malloc(nr_jited_linfo * jited_linfo_rec_size);
- if (!jited_linfo) {
- p_err("mem alloc failed");
- close(fd);
- goto err_free;
- }
- }
-
- memset(&info, 0, sizeof(info));
-
- *member_ptr = ptr_to_u64(buf);
- *member_len = buf_size;
- info.jited_ksyms = ptr_to_u64(func_ksyms);
- info.nr_jited_ksyms = nr_func_ksyms;
- info.jited_func_lens = ptr_to_u64(func_lens);
- info.nr_jited_func_lens = nr_func_lens;
- info.nr_func_info = nr_finfo;
- info.func_info_rec_size = finfo_rec_size;
- info.func_info = ptr_to_u64(func_info);
- info.nr_line_info = nr_linfo;
- info.line_info_rec_size = linfo_rec_size;
- info.line_info = ptr_to_u64(linfo);
- info.nr_jited_line_info = nr_jited_linfo;
- info.jited_line_info_rec_size = jited_linfo_rec_size;
- info.jited_line_info = ptr_to_u64(jited_linfo);
-
- err = bpf_obj_get_info_by_fd(fd, &info, &len);
- close(fd);
- if (err) {
- p_err("can't get prog info: %s", strerror(errno));
- goto err_free;
- }
-
- if (*member_len > buf_size) {
- p_err("too many instructions returned");
- goto err_free;
- }
-
- if (info.nr_jited_ksyms > nr_func_ksyms) {
- p_err("too many addresses returned");
- goto err_free;
- }
-
- if (info.nr_jited_func_lens > nr_func_lens) {
- p_err("too many values returned");
- goto err_free;
- }
-
- if (info.nr_func_info != nr_finfo) {
- p_err("incorrect nr_func_info %d vs. expected %d",
- info.nr_func_info, nr_finfo);
- goto err_free;
- }
-
- if (info.func_info_rec_size != finfo_rec_size) {
- p_err("incorrect func_info_rec_size %d vs. expected %d",
- info.func_info_rec_size, finfo_rec_size);
- goto err_free;
- }
-
- if (linfo && info.nr_line_info != nr_linfo) {
- p_err("incorrect nr_line_info %u vs. expected %u",
- info.nr_line_info, nr_linfo);
- goto err_free;
- }
-
- if (info.line_info_rec_size != linfo_rec_size) {
- p_err("incorrect line_info_rec_size %u vs. expected %u",
- info.line_info_rec_size, linfo_rec_size);
- goto err_free;
- }
-
- if (jited_linfo && info.nr_jited_line_info != nr_jited_linfo) {
- p_err("incorrect nr_jited_line_info %u vs. expected %u",
- info.nr_jited_line_info, nr_jited_linfo);
- goto err_free;
- }
-
- if (info.jited_line_info_rec_size != jited_linfo_rec_size) {
- p_err("incorrect jited_line_info_rec_size %u vs. expected %u",
- info.jited_line_info_rec_size, jited_linfo_rec_size);
- goto err_free;
- }
-
- if ((member_len == &info.jited_prog_len &&
- info.jited_prog_insns == 0) ||
- (member_len == &info.xlated_prog_len &&
- info.xlated_prog_insns == 0)) {
- p_err("error retrieving insn dump: kernel.kptr_restrict set?");
- goto err_free;
- }
-
- if (info.btf_id && btf__get_from_id(info.btf_id, &btf)) {
+ if (info->btf_id && btf__get_from_id(info->btf_id, &btf)) {
p_err("failed to get btf");
goto err_free;
}
- if (nr_linfo) {
- prog_linfo = bpf_prog_linfo__new(&info);
+ func_info = (void *)info->func_info;
+
+ if (info->nr_line_info) {
+ prog_linfo = bpf_prog_linfo__new(info);
if (!prog_linfo)
p_info("error in processing bpf_line_info. continue without it.");
}
@@ -655,9 +520,9 @@ static int do_dump(int argc, char **argv)
goto err_free;
}
- n = write(fd, buf, *member_len);
+ n = write(fd, buf, member_len);
close(fd);
- if (n != *member_len) {
+ if (n != member_len) {
p_err("error writing output file: %s",
n < 0 ? strerror(errno) : "short write");
goto err_free;
@@ -665,19 +530,19 @@ static int do_dump(int argc, char **argv)
if (json_output)
jsonw_null(json_wtr);
- } else if (member_len == &info.jited_prog_len) {
+ } else if (mode == DUMP_JITED) {
const char *name = NULL;
- if (info.ifindex) {
- name = ifindex_to_bfd_params(info.ifindex,
- info.netns_dev,
- info.netns_ino,
+ if (info->ifindex) {
+ name = ifindex_to_bfd_params(info->ifindex,
+ info->netns_dev,
+ info->netns_ino,
&disasm_opt);
if (!name)
goto err_free;
}
- if (info.nr_jited_func_lens && info.jited_func_lens) {
+ if (info->nr_jited_func_lens && info->jited_func_lens) {
struct kernel_sym *sym = NULL;
struct bpf_func_info *record;
char sym_name[SYM_MAX_NAME];
@@ -685,17 +550,16 @@ static int do_dump(int argc, char **argv)
__u64 *ksyms = NULL;
__u32 *lens;
__u32 i;
-
- if (info.nr_jited_ksyms) {
+ if (info->nr_jited_ksyms) {
kernel_syms_load(&dd);
- ksyms = (__u64 *) info.jited_ksyms;
+ ksyms = (__u64 *) info->jited_ksyms;
}
if (json_output)
jsonw_start_array(json_wtr);
- lens = (__u32 *) info.jited_func_lens;
- for (i = 0; i < info.nr_jited_func_lens; i++) {
+ lens = (__u32 *) info->jited_func_lens;
+ for (i = 0; i < info->nr_jited_func_lens; i++) {
if (ksyms) {
sym = kernel_syms_search(&dd, ksyms[i]);
if (sym)
@@ -707,7 +571,7 @@ static int do_dump(int argc, char **argv)
}
if (func_info) {
- record = func_info + i * finfo_rec_size;
+ record = func_info + i * info->func_info_rec_size;
btf_dumper_type_only(btf, record->type_id,
func_sig,
sizeof(func_sig));
@@ -744,49 +608,37 @@ static int do_dump(int argc, char **argv)
if (json_output)
jsonw_end_array(json_wtr);
} else {
- disasm_print_insn(buf, *member_len, opcodes, name,
+ disasm_print_insn(buf, member_len, opcodes, name,
disasm_opt, btf, NULL, 0, 0, false);
}
} else if (visual) {
if (json_output)
jsonw_null(json_wtr);
else
- dump_xlated_cfg(buf, *member_len);
+ dump_xlated_cfg(buf, member_len);
} else {
kernel_syms_load(&dd);
- dd.nr_jited_ksyms = info.nr_jited_ksyms;
- dd.jited_ksyms = (__u64 *) info.jited_ksyms;
+ dd.nr_jited_ksyms = info->nr_jited_ksyms;
+ dd.jited_ksyms = (__u64 *) info->jited_ksyms;
dd.btf = btf;
dd.func_info = func_info;
- dd.finfo_rec_size = finfo_rec_size;
+ dd.finfo_rec_size = info->func_info_rec_size;
dd.prog_linfo = prog_linfo;
if (json_output)
- dump_xlated_json(&dd, buf, *member_len, opcodes,
+ dump_xlated_json(&dd, buf, member_len, opcodes,
linum);
else
- dump_xlated_plain(&dd, buf, *member_len, opcodes,
+ dump_xlated_plain(&dd, buf, member_len, opcodes,
linum);
kernel_syms_destroy(&dd);
}
- free(buf);
- free(func_ksyms);
- free(func_lens);
- free(func_info);
- free(linfo);
- free(jited_linfo);
- bpf_prog_linfo__free(prog_linfo);
+ free(info_linear);
return 0;
err_free:
- free(buf);
- free(func_ksyms);
- free(func_lens);
- free(func_info);
- free(linfo);
- free(jited_linfo);
- bpf_prog_linfo__free(prog_linfo);
+ free(info_linear);
return -1;
}
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index 61e46d54a67c..361207387b1b 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -66,7 +66,9 @@ FEATURE_TESTS_BASIC := \
sched_getcpu \
sdt \
setns \
- libaio
+ libaio \
+ libzstd \
+ disassembler-four-args
# FEATURE_TESTS_BASIC + FEATURE_TESTS_EXTRA is the complete list
# of all feature tests
@@ -118,7 +120,9 @@ FEATURE_DISPLAY ?= \
lzma \
get_cpuid \
bpf \
- libaio
+ libaio \
+ libzstd \
+ disassembler-four-args
# Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features.
# If in the future we need per-feature checks/flags for features not
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index 7ceb4441b627..4b8244ee65ce 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -62,7 +62,8 @@ FILES= \
test-clang.bin \
test-llvm.bin \
test-llvm-version.bin \
- test-libaio.bin
+ test-libaio.bin \
+ test-libzstd.bin
FILES := $(addprefix $(OUTPUT),$(FILES))
@@ -301,6 +302,9 @@ $(OUTPUT)test-clang.bin:
$(OUTPUT)test-libaio.bin:
$(BUILD) -lrt
+$(OUTPUT)test-libzstd.bin:
+ $(BUILD) -lzstd
+
###############################
clean:
diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
index e903b86b742f..a59c53705093 100644
--- a/tools/build/feature/test-all.c
+++ b/tools/build/feature/test-all.c
@@ -178,6 +178,14 @@
# include "test-reallocarray.c"
#undef main
+#define main main_test_disassembler_four_args
+# include "test-disassembler-four-args.c"
+#undef main
+
+#define main main_test_zstd
+# include "test-libzstd.c"
+#undef main
+
int main(int argc, char *argv[])
{
main_test_libpython();
@@ -219,6 +227,8 @@ int main(int argc, char *argv[])
main_test_setns();
main_test_libaio();
main_test_reallocarray();
+ main_test_disassembler_four_args();
+ main_test_libzstd();
return 0;
}
diff --git a/tools/build/feature/test-libopencsd.c b/tools/build/feature/test-libopencsd.c
index d68eb4fb40cc..2b0e02c38870 100644
--- a/tools/build/feature/test-libopencsd.c
+++ b/tools/build/feature/test-libopencsd.c
@@ -4,9 +4,9 @@
/*
* Check OpenCSD library version is sufficient to provide required features
*/
-#define OCSD_MIN_VER ((0 << 16) | (10 << 8) | (0))
+#define OCSD_MIN_VER ((0 << 16) | (11 << 8) | (0))
#if !defined(OCSD_VER_NUM) || (OCSD_VER_NUM < OCSD_MIN_VER)
-#error "OpenCSD >= 0.10.0 is required"
+#error "OpenCSD >= 0.11.0 is required"
#endif
int main(void)
diff --git a/tools/build/feature/test-libzstd.c b/tools/build/feature/test-libzstd.c
new file mode 100644
index 000000000000..55268c01b84d
--- /dev/null
+++ b/tools/build/feature/test-libzstd.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <zstd.h>
+
+int main(void)
+{
+ ZSTD_CStream *cstream;
+
+ cstream = ZSTD_createCStream();
+ ZSTD_freeCStream(cstream);
+
+ return 0;
+}
diff --git a/tools/debugging/Makefile b/tools/debugging/Makefile
new file mode 100644
index 000000000000..e2b7c1a6fb8f
--- /dev/null
+++ b/tools/debugging/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for debugging tools
+
+PREFIX ?= /usr
+BINDIR ?= bin
+INSTALL ?= install
+
+TARGET = kernel-chktaint
+
+all: $(TARGET)
+
+clean:
+
+install: kernel-chktaint
+ $(INSTALL) -D -m 755 $(TARGET) $(DESTDIR)$(PREFIX)/$(BINDIR)/$(TARGET)
+
diff --git a/tools/debugging/kernel-chktaint b/tools/debugging/kernel-chktaint
new file mode 100755
index 000000000000..2240cb56e6e5
--- /dev/null
+++ b/tools/debugging/kernel-chktaint
@@ -0,0 +1,202 @@
+#! /bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Randy Dunlap <rdunlap@infradead.org>, 2018
+# Thorsten Leemhuis <linux@leemhuis.info>, 2018
+
+usage()
+{
+ cat <<EOF
+usage: ${0##*/}
+ ${0##*/} <int>
+
+Call without parameters to decode /proc/sys/kernel/tainted.
+
+Call with a positive integer as parameter to decode a value you
+retrieved from /proc/sys/kernel/tainted on another system.
+
+EOF
+}
+
+if [ "$1"x != "x" ]; then
+ if [ "$1"x == "--helpx" ] || [ "$1"x == "-hx" ] ; then
+ usage
+ exit 1
+ elif [ $1 -ge 0 ] 2>/dev/null ; then
+ taint=$1
+ else
+ echo "Error: Parameter '$1' not a positive interger. Aborting." >&2
+ exit 1
+ fi
+else
+ TAINTFILE="/proc/sys/kernel/tainted"
+ if [ ! -r $TAINTFILE ]; then
+ echo "No file: $TAINTFILE"
+ exit
+ fi
+
+ taint=`cat $TAINTFILE`
+fi
+
+if [ $taint -eq 0 ]; then
+ echo "Kernel not Tainted"
+ exit
+else
+ echo "Kernel is \"tainted\" for the following reasons:"
+fi
+
+T=$taint
+out=
+
+addout() {
+ out=$out$1
+}
+
+if [ `expr $T % 2` -eq 0 ]; then
+ addout "G"
+else
+ addout "P"
+ echo " * proprietary module was loaded (#0)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "F"
+ echo " * module was force loaded (#1)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "S"
+ echo " * SMP kernel oops on an officially SMP incapable processor (#2)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "R"
+ echo " * module was force unloaded (#3)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "M"
+ echo " * processor reported a Machine Check Exception (MCE) (#4)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "B"
+ echo " * bad page referenced or some unexpected page flags (#5)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "U"
+ echo " * taint requested by userspace application (#6)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "D"
+ echo " * kernel died recently, i.e. there was an OOPS or BUG (#7)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "A"
+ echo " * an ACPI table was overridden by user (#8)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "W"
+ echo " * kernel issued warning (#9)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "C"
+ echo " * staging driver was loaded (#10)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "I"
+ echo " * workaround for bug in platform firmware applied (#11)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "O"
+ echo " * externally-built ('out-of-tree') module was loaded (#12)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "E"
+ echo " * unsigned module was loaded (#13)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "L"
+ echo " * soft lockup occurred (#14)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "K"
+ echo " * kernel has been live patched (#15)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "X"
+ echo " * auxiliary taint, defined for and used by distros (#16)"
+
+fi
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "T"
+ echo " * kernel was built with the struct randomization plugin (#17)"
+fi
+
+echo "For a more detailed explanation of the various taint flags see"
+echo " Documentation/admin-guide/tainted-kernels.rst in the the Linux kernel sources"
+echo " or https://kernel.org/doc/html/latest/admin-guide/tainted-kernels.html"
+echo "Raw taint value as int/string: $taint/'$out'"
+#EOF#
diff --git a/tools/include/linux/poison.h b/tools/include/linux/poison.h
index 9fdcd3eaac3b..d29725769107 100644
--- a/tools/include/linux/poison.h
+++ b/tools/include/linux/poison.h
@@ -87,9 +87,6 @@
#define MUTEX_DEBUG_INIT 0x11
#define MUTEX_DEBUG_FREE 0x22
-/********** lib/flex_array.c **********/
-#define FLEX_ARRAY_FREE 0x6c /* for use-after-free poisoning */
-
/********** security/ **********/
#define KEY_DESTROY 0xbd
diff --git a/tools/include/uapi/asm-generic/mman-common-tools.h b/tools/include/uapi/asm-generic/mman-common-tools.h
new file mode 100644
index 000000000000..af7d0d3a3182
--- /dev/null
+++ b/tools/include/uapi/asm-generic/mman-common-tools.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_GENERIC_MMAN_COMMON_TOOLS_ONLY_H
+#define __ASM_GENERIC_MMAN_COMMON_TOOLS_ONLY_H
+
+#include <asm-generic/mman-common.h>
+
+/* We need this because we need to have tools/include/uapi/ included in the tools
+ * header search path to get access to stuff that is not yet in the system's
+ * copy of the files in that directory, but since this cset:
+ *
+ * 746c9398f5ac ("arch: move common mmap flags to linux/mman.h")
+ *
+ * We end up making sys/mman.h, that is in the system headers, to not find the
+ * MAP_SHARED and MAP_PRIVATE defines because they are not anymore in our copy
+ * of asm-generic/mman-common.h. So we define them here and include this header
+ * from each of the per arch mman.h headers.
+ */
+#ifndef MAP_SHARED
+#define MAP_SHARED 0x01 /* Share changes */
+#define MAP_PRIVATE 0x02 /* Changes are private */
+#define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */
+#endif
+#endif // __ASM_GENERIC_MMAN_COMMON_TOOLS_ONLY_H
diff --git a/tools/include/uapi/asm-generic/mman-common.h b/tools/include/uapi/asm-generic/mman-common.h
index e7ee32861d51..abd238d0f7a4 100644
--- a/tools/include/uapi/asm-generic/mman-common.h
+++ b/tools/include/uapi/asm-generic/mman-common.h
@@ -15,9 +15,7 @@
#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
-#define MAP_SHARED 0x01 /* Share changes */
-#define MAP_PRIVATE 0x02 /* Changes are private */
-#define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */
+/* 0x01 - 0x03 are defined in linux/mman.h */
#define MAP_TYPE 0x0f /* Mask for type of mapping */
#define MAP_FIXED 0x10 /* Interpret addr exactly */
#define MAP_ANONYMOUS 0x20 /* don't use a file */
diff --git a/tools/include/uapi/asm-generic/mman.h b/tools/include/uapi/asm-generic/mman.h
index 653687d9771b..36c197fc44a0 100644
--- a/tools/include/uapi/asm-generic/mman.h
+++ b/tools/include/uapi/asm-generic/mman.h
@@ -2,7 +2,7 @@
#ifndef __ASM_GENERIC_MMAN_H
#define __ASM_GENERIC_MMAN_H
-#include <asm-generic/mman-common.h>
+#include <asm-generic/mman-common-tools.h>
#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index d90127298f12..dee7292e1df6 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -38,8 +38,10 @@ __SYSCALL(__NR_io_destroy, sys_io_destroy)
__SC_COMP(__NR_io_submit, sys_io_submit, compat_sys_io_submit)
#define __NR_io_cancel 3
__SYSCALL(__NR_io_cancel, sys_io_cancel)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_io_getevents 4
-__SC_COMP(__NR_io_getevents, sys_io_getevents, compat_sys_io_getevents)
+__SC_3264(__NR_io_getevents, sys_io_getevents_time32, sys_io_getevents)
+#endif
/* fs/xattr.c */
#define __NR_setxattr 5
@@ -179,7 +181,7 @@ __SYSCALL(__NR_fchownat, sys_fchownat)
#define __NR_fchown 55
__SYSCALL(__NR_fchown, sys_fchown)
#define __NR_openat 56
-__SC_COMP(__NR_openat, sys_openat, compat_sys_openat)
+__SYSCALL(__NR_openat, sys_openat)
#define __NR_close 57
__SYSCALL(__NR_close, sys_close)
#define __NR_vhangup 58
@@ -222,10 +224,12 @@ __SC_COMP(__NR_pwritev, sys_pwritev, compat_sys_pwritev)
__SYSCALL(__NR3264_sendfile, sys_sendfile64)
/* fs/select.c */
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_pselect6 72
-__SC_COMP(__NR_pselect6, sys_pselect6, compat_sys_pselect6)
+__SC_COMP_3264(__NR_pselect6, sys_pselect6_time32, sys_pselect6, compat_sys_pselect6_time32)
#define __NR_ppoll 73
-__SC_COMP(__NR_ppoll, sys_ppoll, compat_sys_ppoll)
+__SC_COMP_3264(__NR_ppoll, sys_ppoll_time32, sys_ppoll, compat_sys_ppoll_time32)
+#endif
/* fs/signalfd.c */
#define __NR_signalfd4 74
@@ -269,16 +273,20 @@ __SC_COMP(__NR_sync_file_range, sys_sync_file_range, \
/* fs/timerfd.c */
#define __NR_timerfd_create 85
__SYSCALL(__NR_timerfd_create, sys_timerfd_create)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_timerfd_settime 86
-__SC_COMP(__NR_timerfd_settime, sys_timerfd_settime, \
- compat_sys_timerfd_settime)
+__SC_3264(__NR_timerfd_settime, sys_timerfd_settime32, \
+ sys_timerfd_settime)
#define __NR_timerfd_gettime 87
-__SC_COMP(__NR_timerfd_gettime, sys_timerfd_gettime, \
- compat_sys_timerfd_gettime)
+__SC_3264(__NR_timerfd_gettime, sys_timerfd_gettime32, \
+ sys_timerfd_gettime)
+#endif
/* fs/utimes.c */
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_utimensat 88
-__SC_COMP(__NR_utimensat, sys_utimensat, compat_sys_utimensat)
+__SC_3264(__NR_utimensat, sys_utimensat_time32, sys_utimensat)
+#endif
/* kernel/acct.c */
#define __NR_acct 89
@@ -309,8 +317,10 @@ __SYSCALL(__NR_set_tid_address, sys_set_tid_address)
__SYSCALL(__NR_unshare, sys_unshare)
/* kernel/futex.c */
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_futex 98
-__SC_COMP(__NR_futex, sys_futex, compat_sys_futex)
+__SC_3264(__NR_futex, sys_futex_time32, sys_futex)
+#endif
#define __NR_set_robust_list 99
__SC_COMP(__NR_set_robust_list, sys_set_robust_list, \
compat_sys_set_robust_list)
@@ -319,8 +329,10 @@ __SC_COMP(__NR_get_robust_list, sys_get_robust_list, \
compat_sys_get_robust_list)
/* kernel/hrtimer.c */
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_nanosleep 101
-__SC_COMP(__NR_nanosleep, sys_nanosleep, compat_sys_nanosleep)
+__SC_3264(__NR_nanosleep, sys_nanosleep_time32, sys_nanosleep)
+#endif
/* kernel/itimer.c */
#define __NR_getitimer 102
@@ -341,23 +353,29 @@ __SYSCALL(__NR_delete_module, sys_delete_module)
/* kernel/posix-timers.c */
#define __NR_timer_create 107
__SC_COMP(__NR_timer_create, sys_timer_create, compat_sys_timer_create)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_timer_gettime 108
-__SC_COMP(__NR_timer_gettime, sys_timer_gettime, compat_sys_timer_gettime)
+__SC_3264(__NR_timer_gettime, sys_timer_gettime32, sys_timer_gettime)
+#endif
#define __NR_timer_getoverrun 109
__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_timer_settime 110
-__SC_COMP(__NR_timer_settime, sys_timer_settime, compat_sys_timer_settime)
+__SC_3264(__NR_timer_settime, sys_timer_settime32, sys_timer_settime)
+#endif
#define __NR_timer_delete 111
__SYSCALL(__NR_timer_delete, sys_timer_delete)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_clock_settime 112
-__SC_COMP(__NR_clock_settime, sys_clock_settime, compat_sys_clock_settime)
+__SC_3264(__NR_clock_settime, sys_clock_settime32, sys_clock_settime)
#define __NR_clock_gettime 113
-__SC_COMP(__NR_clock_gettime, sys_clock_gettime, compat_sys_clock_gettime)
+__SC_3264(__NR_clock_gettime, sys_clock_gettime32, sys_clock_gettime)
#define __NR_clock_getres 114
-__SC_COMP(__NR_clock_getres, sys_clock_getres, compat_sys_clock_getres)
+__SC_3264(__NR_clock_getres, sys_clock_getres_time32, sys_clock_getres)
#define __NR_clock_nanosleep 115
-__SC_COMP(__NR_clock_nanosleep, sys_clock_nanosleep, \
- compat_sys_clock_nanosleep)
+__SC_3264(__NR_clock_nanosleep, sys_clock_nanosleep_time32, \
+ sys_clock_nanosleep)
+#endif
/* kernel/printk.c */
#define __NR_syslog 116
@@ -388,9 +406,11 @@ __SYSCALL(__NR_sched_yield, sys_sched_yield)
__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
#define __NR_sched_get_priority_min 126
__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_sched_rr_get_interval 127
-__SC_COMP(__NR_sched_rr_get_interval, sys_sched_rr_get_interval, \
- compat_sys_sched_rr_get_interval)
+__SC_3264(__NR_sched_rr_get_interval, sys_sched_rr_get_interval_time32, \
+ sys_sched_rr_get_interval)
+#endif
/* kernel/signal.c */
#define __NR_restart_syscall 128
@@ -411,9 +431,11 @@ __SC_COMP(__NR_rt_sigaction, sys_rt_sigaction, compat_sys_rt_sigaction)
__SC_COMP(__NR_rt_sigprocmask, sys_rt_sigprocmask, compat_sys_rt_sigprocmask)
#define __NR_rt_sigpending 136
__SC_COMP(__NR_rt_sigpending, sys_rt_sigpending, compat_sys_rt_sigpending)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_rt_sigtimedwait 137
-__SC_COMP(__NR_rt_sigtimedwait, sys_rt_sigtimedwait, \
- compat_sys_rt_sigtimedwait)
+__SC_COMP_3264(__NR_rt_sigtimedwait, sys_rt_sigtimedwait_time32, \
+ sys_rt_sigtimedwait, compat_sys_rt_sigtimedwait_time32)
+#endif
#define __NR_rt_sigqueueinfo 138
__SC_COMP(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo, \
compat_sys_rt_sigqueueinfo)
@@ -467,10 +489,15 @@ __SYSCALL(__NR_uname, sys_newuname)
__SYSCALL(__NR_sethostname, sys_sethostname)
#define __NR_setdomainname 162
__SYSCALL(__NR_setdomainname, sys_setdomainname)
+
+#ifdef __ARCH_WANT_SET_GET_RLIMIT
+/* getrlimit and setrlimit are superseded with prlimit64 */
#define __NR_getrlimit 163
__SC_COMP(__NR_getrlimit, sys_getrlimit, compat_sys_getrlimit)
#define __NR_setrlimit 164
__SC_COMP(__NR_setrlimit, sys_setrlimit, compat_sys_setrlimit)
+#endif
+
#define __NR_getrusage 165
__SC_COMP(__NR_getrusage, sys_getrusage, compat_sys_getrusage)
#define __NR_umask 166
@@ -481,12 +508,14 @@ __SYSCALL(__NR_prctl, sys_prctl)
__SYSCALL(__NR_getcpu, sys_getcpu)
/* kernel/time.c */
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_gettimeofday 169
__SC_COMP(__NR_gettimeofday, sys_gettimeofday, compat_sys_gettimeofday)
#define __NR_settimeofday 170
__SC_COMP(__NR_settimeofday, sys_settimeofday, compat_sys_settimeofday)
#define __NR_adjtimex 171
-__SC_COMP(__NR_adjtimex, sys_adjtimex, compat_sys_adjtimex)
+__SC_3264(__NR_adjtimex, sys_adjtimex_time32, sys_adjtimex)
+#endif
/* kernel/timer.c */
#define __NR_getpid 172
@@ -511,11 +540,13 @@ __SC_COMP(__NR_sysinfo, sys_sysinfo, compat_sys_sysinfo)
__SC_COMP(__NR_mq_open, sys_mq_open, compat_sys_mq_open)
#define __NR_mq_unlink 181
__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_mq_timedsend 182
-__SC_COMP(__NR_mq_timedsend, sys_mq_timedsend, compat_sys_mq_timedsend)
+__SC_3264(__NR_mq_timedsend, sys_mq_timedsend_time32, sys_mq_timedsend)
#define __NR_mq_timedreceive 183
-__SC_COMP(__NR_mq_timedreceive, sys_mq_timedreceive, \
- compat_sys_mq_timedreceive)
+__SC_3264(__NR_mq_timedreceive, sys_mq_timedreceive_time32, \
+ sys_mq_timedreceive)
+#endif
#define __NR_mq_notify 184
__SC_COMP(__NR_mq_notify, sys_mq_notify, compat_sys_mq_notify)
#define __NR_mq_getsetattr 185
@@ -536,8 +567,10 @@ __SC_COMP(__NR_msgsnd, sys_msgsnd, compat_sys_msgsnd)
__SYSCALL(__NR_semget, sys_semget)
#define __NR_semctl 191
__SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_semtimedop 192
-__SC_COMP(__NR_semtimedop, sys_semtimedop, compat_sys_semtimedop)
+__SC_COMP(__NR_semtimedop, sys_semtimedop, sys_semtimedop_time32)
+#endif
#define __NR_semop 193
__SYSCALL(__NR_semop, sys_semop)
@@ -658,8 +691,10 @@ __SC_COMP(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo, \
__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
#define __NR_accept4 242
__SYSCALL(__NR_accept4, sys_accept4)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_recvmmsg 243
-__SC_COMP(__NR_recvmmsg, sys_recvmmsg, compat_sys_recvmmsg)
+__SC_COMP_3264(__NR_recvmmsg, sys_recvmmsg_time32, sys_recvmmsg, compat_sys_recvmmsg_time32)
+#endif
/*
* Architectures may provide up to 16 syscalls of their own
@@ -667,8 +702,10 @@ __SC_COMP(__NR_recvmmsg, sys_recvmmsg, compat_sys_recvmmsg)
*/
#define __NR_arch_specific_syscall 244
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_wait4 260
__SC_COMP(__NR_wait4, sys_wait4, compat_sys_wait4)
+#endif
#define __NR_prlimit64 261
__SYSCALL(__NR_prlimit64, sys_prlimit64)
#define __NR_fanotify_init 262
@@ -678,10 +715,11 @@ __SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
#define __NR_name_to_handle_at 264
__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
#define __NR_open_by_handle_at 265
-__SC_COMP(__NR_open_by_handle_at, sys_open_by_handle_at, \
- compat_sys_open_by_handle_at)
+__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_clock_adjtime 266
-__SC_COMP(__NR_clock_adjtime, sys_clock_adjtime, compat_sys_clock_adjtime)
+__SC_3264(__NR_clock_adjtime, sys_clock_adjtime32, sys_clock_adjtime)
+#endif
#define __NR_syncfs 267
__SYSCALL(__NR_syncfs, sys_syncfs)
#define __NR_setns 268
@@ -734,15 +772,69 @@ __SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
__SYSCALL(__NR_pkey_free, sys_pkey_free)
#define __NR_statx 291
__SYSCALL(__NR_statx, sys_statx)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_io_pgetevents 292
-__SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents)
+__SC_COMP_3264(__NR_io_pgetevents, sys_io_pgetevents_time32, sys_io_pgetevents, compat_sys_io_pgetevents)
+#endif
#define __NR_rseq 293
__SYSCALL(__NR_rseq, sys_rseq)
#define __NR_kexec_file_load 294
__SYSCALL(__NR_kexec_file_load, sys_kexec_file_load)
+/* 295 through 402 are unassigned to sync up with generic numbers, don't use */
+#if __BITS_PER_LONG == 32
+#define __NR_clock_gettime64 403
+__SYSCALL(__NR_clock_gettime64, sys_clock_gettime)
+#define __NR_clock_settime64 404
+__SYSCALL(__NR_clock_settime64, sys_clock_settime)
+#define __NR_clock_adjtime64 405
+__SYSCALL(__NR_clock_adjtime64, sys_clock_adjtime)
+#define __NR_clock_getres_time64 406
+__SYSCALL(__NR_clock_getres_time64, sys_clock_getres)
+#define __NR_clock_nanosleep_time64 407
+__SYSCALL(__NR_clock_nanosleep_time64, sys_clock_nanosleep)
+#define __NR_timer_gettime64 408
+__SYSCALL(__NR_timer_gettime64, sys_timer_gettime)
+#define __NR_timer_settime64 409
+__SYSCALL(__NR_timer_settime64, sys_timer_settime)
+#define __NR_timerfd_gettime64 410
+__SYSCALL(__NR_timerfd_gettime64, sys_timerfd_gettime)
+#define __NR_timerfd_settime64 411
+__SYSCALL(__NR_timerfd_settime64, sys_timerfd_settime)
+#define __NR_utimensat_time64 412
+__SYSCALL(__NR_utimensat_time64, sys_utimensat)
+#define __NR_pselect6_time64 413
+__SC_COMP(__NR_pselect6_time64, sys_pselect6, compat_sys_pselect6_time64)
+#define __NR_ppoll_time64 414
+__SC_COMP(__NR_ppoll_time64, sys_ppoll, compat_sys_ppoll_time64)
+#define __NR_io_pgetevents_time64 416
+__SYSCALL(__NR_io_pgetevents_time64, sys_io_pgetevents)
+#define __NR_recvmmsg_time64 417
+__SC_COMP(__NR_recvmmsg_time64, sys_recvmmsg, compat_sys_recvmmsg_time64)
+#define __NR_mq_timedsend_time64 418
+__SYSCALL(__NR_mq_timedsend_time64, sys_mq_timedsend)
+#define __NR_mq_timedreceive_time64 419
+__SYSCALL(__NR_mq_timedreceive_time64, sys_mq_timedreceive)
+#define __NR_semtimedop_time64 420
+__SYSCALL(__NR_semtimedop_time64, sys_semtimedop)
+#define __NR_rt_sigtimedwait_time64 421
+__SC_COMP(__NR_rt_sigtimedwait_time64, sys_rt_sigtimedwait, compat_sys_rt_sigtimedwait_time64)
+#define __NR_futex_time64 422
+__SYSCALL(__NR_futex_time64, sys_futex)
+#define __NR_sched_rr_get_interval_time64 423
+__SYSCALL(__NR_sched_rr_get_interval_time64, sys_sched_rr_get_interval)
+#endif
+
+#define __NR_pidfd_send_signal 424
+__SYSCALL(__NR_pidfd_send_signal, sys_pidfd_send_signal)
+#define __NR_io_uring_setup 425
+__SYSCALL(__NR_io_uring_setup, sys_io_uring_setup)
+#define __NR_io_uring_enter 426
+__SYSCALL(__NR_io_uring_enter, sys_io_uring_enter)
+#define __NR_io_uring_register 427
+__SYSCALL(__NR_io_uring_register, sys_io_uring_register)
#undef __NR_syscalls
-#define __NR_syscalls 295
+#define __NR_syscalls 428
/*
* 32 bit systems traditionally used different
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index 298b2e197744..397810fa2d33 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -1486,9 +1486,73 @@ struct drm_i915_gem_context_param {
#define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */
#define I915_CONTEXT_DEFAULT_PRIORITY 0
#define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */
+ /*
+ * When using the following param, value should be a pointer to
+ * drm_i915_gem_context_param_sseu.
+ */
+#define I915_CONTEXT_PARAM_SSEU 0x7
__u64 value;
};
+/**
+ * Context SSEU programming
+ *
+ * It may be necessary for either functional or performance reason to configure
+ * a context to run with a reduced number of SSEU (where SSEU stands for Slice/
+ * Sub-slice/EU).
+ *
+ * This is done by configuring SSEU configuration using the below
+ * @struct drm_i915_gem_context_param_sseu for every supported engine which
+ * userspace intends to use.
+ *
+ * Not all GPUs or engines support this functionality in which case an error
+ * code -ENODEV will be returned.
+ *
+ * Also, flexibility of possible SSEU configuration permutations varies between
+ * GPU generations and software imposed limitations. Requesting such a
+ * combination will return an error code of -EINVAL.
+ *
+ * NOTE: When perf/OA is active the context's SSEU configuration is ignored in
+ * favour of a single global setting.
+ */
+struct drm_i915_gem_context_param_sseu {
+ /*
+ * Engine class & instance to be configured or queried.
+ */
+ __u16 engine_class;
+ __u16 engine_instance;
+
+ /*
+ * Unused for now. Must be cleared to zero.
+ */
+ __u32 flags;
+
+ /*
+ * Mask of slices to enable for the context. Valid values are a subset
+ * of the bitmask value returned for I915_PARAM_SLICE_MASK.
+ */
+ __u64 slice_mask;
+
+ /*
+ * Mask of subslices to enable for the context. Valid values are a
+ * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK.
+ */
+ __u64 subslice_mask;
+
+ /*
+ * Minimum/Maximum number of EUs to enable per subslice for the
+ * context. min_eus_per_subslice must be inferior or equal to
+ * max_eus_per_subslice.
+ */
+ __u16 min_eus_per_subslice;
+ __u16 max_eus_per_subslice;
+
+ /*
+ * Unused for now. Must be cleared to zero.
+ */
+ __u32 rsvd;
+};
+
enum drm_i915_oa_format {
I915_OA_FORMAT_A13 = 1, /* HSW only */
I915_OA_FORMAT_A29, /* HSW only */
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 3c38ac9a92a7..929c8e537a14 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -502,16 +502,6 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
- * Description
- * Push an element *value* in *map*. *flags* is one of:
- *
- * **BPF_EXIST**
- * If the queue/stack is full, the oldest element is removed to
- * make room for this.
- * Return
- * 0 on success, or a negative error in case of failure.
- *
* int bpf_probe_read(void *dst, u32 size, const void *src)
* Description
* For tracing programs, safely attempt to read *size* bytes from
@@ -1435,14 +1425,14 @@ union bpf_attr {
* u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
* Description
* Equivalent to bpf_get_socket_cookie() helper that accepts
- * *skb*, but gets socket from **struct bpf_sock_addr** contex.
+ * *skb*, but gets socket from **struct bpf_sock_addr** context.
* Return
* A 8-byte long non-decreasing number.
*
* u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
* Description
* Equivalent to bpf_get_socket_cookie() helper that accepts
- * *skb*, but gets socket from **struct bpf_sock_ops** contex.
+ * *skb*, but gets socket from **struct bpf_sock_ops** context.
* Return
* A 8-byte long non-decreasing number.
*
@@ -2098,52 +2088,52 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
+ * int bpf_rc_repeat(void *ctx)
* Description
* This helper is used in programs implementing IR decoding, to
- * report a successfully decoded key press with *scancode*,
- * *toggle* value in the given *protocol*. The scancode will be
- * translated to a keycode using the rc keymap, and reported as
- * an input key down event. After a period a key up event is
- * generated. This period can be extended by calling either
- * **bpf_rc_keydown**\ () again with the same values, or calling
- * **bpf_rc_repeat**\ ().
+ * report a successfully decoded repeat key message. This delays
+ * the generation of a key up event for previously generated
+ * key down event.
*
- * Some protocols include a toggle bit, in case the button was
- * released and pressed again between consecutive scancodes.
+ * Some IR protocols like NEC have a special IR message for
+ * repeating last button, for when a button is held down.
*
* The *ctx* should point to the lirc sample as passed into
* the program.
*
- * The *protocol* is the decoded protocol number (see
- * **enum rc_proto** for some predefined values).
- *
* This helper is only available is the kernel was compiled with
* the **CONFIG_BPF_LIRC_MODE2** configuration option set to
* "**y**".
* Return
* 0
*
- * int bpf_rc_repeat(void *ctx)
+ * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
* Description
* This helper is used in programs implementing IR decoding, to
- * report a successfully decoded repeat key message. This delays
- * the generation of a key up event for previously generated
- * key down event.
+ * report a successfully decoded key press with *scancode*,
+ * *toggle* value in the given *protocol*. The scancode will be
+ * translated to a keycode using the rc keymap, and reported as
+ * an input key down event. After a period a key up event is
+ * generated. This period can be extended by calling either
+ * **bpf_rc_keydown**\ () again with the same values, or calling
+ * **bpf_rc_repeat**\ ().
*
- * Some IR protocols like NEC have a special IR message for
- * repeating last button, for when a button is held down.
+ * Some protocols include a toggle bit, in case the button was
+ * released and pressed again between consecutive scancodes.
*
* The *ctx* should point to the lirc sample as passed into
* the program.
*
+ * The *protocol* is the decoded protocol number (see
+ * **enum rc_proto** for some predefined values).
+ *
* This helper is only available is the kernel was compiled with
* the **CONFIG_BPF_LIRC_MODE2** configuration option set to
* "**y**".
* Return
* 0
*
- * uint64_t bpf_skb_cgroup_id(struct sk_buff *skb)
+ * u64 bpf_skb_cgroup_id(struct sk_buff *skb)
* Description
* Return the cgroup v2 id of the socket associated with the *skb*.
* This is roughly similar to the **bpf_get_cgroup_classid**\ ()
@@ -2159,30 +2149,12 @@ union bpf_attr {
* Return
* The id is returned or 0 in case the id could not be retrieved.
*
- * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
- * Description
- * Return id of cgroup v2 that is ancestor of cgroup associated
- * with the *skb* at the *ancestor_level*. The root cgroup is at
- * *ancestor_level* zero and each step down the hierarchy
- * increments the level. If *ancestor_level* == level of cgroup
- * associated with *skb*, then return value will be same as that
- * of **bpf_skb_cgroup_id**\ ().
- *
- * The helper is useful to implement policies based on cgroups
- * that are upper in hierarchy than immediate cgroup associated
- * with *skb*.
- *
- * The format of returned id and helper limitations are same as in
- * **bpf_skb_cgroup_id**\ ().
- * Return
- * The id is returned or 0 in case the id could not be retrieved.
- *
* u64 bpf_get_current_cgroup_id(void)
* Return
* A 64-bit integer containing the current cgroup id based
* on the cgroup within which the current task is running.
*
- * void* get_local_storage(void *map, u64 flags)
+ * void *bpf_get_local_storage(void *map, u64 flags)
* Description
* Get the pointer to the local storage area.
* The type and the size of the local storage is defined
@@ -2209,6 +2181,24 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
+ * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
+ * Description
+ * Return id of cgroup v2 that is ancestor of cgroup associated
+ * with the *skb* at the *ancestor_level*. The root cgroup is at
+ * *ancestor_level* zero and each step down the hierarchy
+ * increments the level. If *ancestor_level* == level of cgroup
+ * associated with *skb*, then return value will be same as that
+ * of **bpf_skb_cgroup_id**\ ().
+ *
+ * The helper is useful to implement policies based on cgroups
+ * that are upper in hierarchy than immediate cgroup associated
+ * with *skb*.
+ *
+ * The format of returned id and helper limitations are same as in
+ * **bpf_skb_cgroup_id**\ ().
+ * Return
+ * The id is returned or 0 in case the id could not be retrieved.
+ *
* struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
* Description
* Look for TCP socket matching *tuple*, optionally in a child
@@ -2289,6 +2279,16 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
+ * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
+ * Description
+ * Push an element *value* in *map*. *flags* is one of:
+ *
+ * **BPF_EXIST**
+ * If the queue/stack is full, the oldest element is
+ * removed to make room for this.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
* int bpf_map_pop_elem(struct bpf_map *map, void *value)
* Description
* Pop an element from *map*.
@@ -2343,29 +2343,94 @@ union bpf_attr {
* Return
* 0
*
+ * int bpf_spin_lock(struct bpf_spin_lock *lock)
+ * Description
+ * Acquire a spinlock represented by the pointer *lock*, which is
+ * stored as part of a value of a map. Taking the lock allows to
+ * safely update the rest of the fields in that value. The
+ * spinlock can (and must) later be released with a call to
+ * **bpf_spin_unlock**\ (\ *lock*\ ).
+ *
+ * Spinlocks in BPF programs come with a number of restrictions
+ * and constraints:
+ *
+ * * **bpf_spin_lock** objects are only allowed inside maps of
+ * types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this
+ * list could be extended in the future).
+ * * BTF description of the map is mandatory.
+ * * The BPF program can take ONE lock at a time, since taking two
+ * or more could cause dead locks.
+ * * Only one **struct bpf_spin_lock** is allowed per map element.
+ * * When the lock is taken, calls (either BPF to BPF or helpers)
+ * are not allowed.
+ * * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not
+ * allowed inside a spinlock-ed region.
+ * * The BPF program MUST call **bpf_spin_unlock**\ () to release
+ * the lock, on all execution paths, before it returns.
+ * * The BPF program can access **struct bpf_spin_lock** only via
+ * the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ ()
+ * helpers. Loading or storing data into the **struct
+ * bpf_spin_lock** *lock*\ **;** field of a map is not allowed.
+ * * To use the **bpf_spin_lock**\ () helper, the BTF description
+ * of the map value must be a struct and have **struct
+ * bpf_spin_lock** *anyname*\ **;** field at the top level.
+ * Nested lock inside another struct is not allowed.
+ * * The **struct bpf_spin_lock** *lock* field in a map value must
+ * be aligned on a multiple of 4 bytes in that value.
+ * * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy
+ * the **bpf_spin_lock** field to user space.
+ * * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from
+ * a BPF program, do not update the **bpf_spin_lock** field.
+ * * **bpf_spin_lock** cannot be on the stack or inside a
+ * networking packet (it can only be inside of a map values).
+ * * **bpf_spin_lock** is available to root only.
+ * * Tracing programs and socket filter programs cannot use
+ * **bpf_spin_lock**\ () due to insufficient preemption checks
+ * (but this may change in the future).
+ * * **bpf_spin_lock** is not allowed in inner maps of map-in-map.
+ * Return
+ * 0
+ *
+ * int bpf_spin_unlock(struct bpf_spin_lock *lock)
+ * Description
+ * Release the *lock* previously locked by a call to
+ * **bpf_spin_lock**\ (\ *lock*\ ).
+ * Return
+ * 0
+ *
* struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk)
* Description
* This helper gets a **struct bpf_sock** pointer such
- * that all the fields in bpf_sock can be accessed.
+ * that all the fields in this **bpf_sock** can be accessed.
* Return
- * A **struct bpf_sock** pointer on success, or NULL in
+ * A **struct bpf_sock** pointer on success, or **NULL** in
* case of failure.
*
* struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk)
* Description
* This helper gets a **struct bpf_tcp_sock** pointer from a
* **struct bpf_sock** pointer.
- *
* Return
- * A **struct bpf_tcp_sock** pointer on success, or NULL in
+ * A **struct bpf_tcp_sock** pointer on success, or **NULL** in
* case of failure.
*
* int bpf_skb_ecn_set_ce(struct sk_buf *skb)
- * Description
- * Sets ECN of IP header to ce (congestion encountered) if
- * current value is ect (ECN capable). Works with IPv6 and IPv4.
- * Return
- * 1 if set, 0 if not set.
+ * Description
+ * Set ECN (Explicit Congestion Notification) field of IP header
+ * to **CE** (Congestion Encountered) if current value is **ECT**
+ * (ECN Capable Transport). Otherwise, do nothing. Works with IPv6
+ * and IPv4.
+ * Return
+ * 1 if the **CE** flag is set (either by the current helper call
+ * or because it was already present), 0 if it is not set.
+ *
+ * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk)
+ * Description
+ * Return a **struct bpf_sock** pointer in **TCP_LISTEN** state.
+ * **bpf_sk_release**\ () is unnecessary and not allowed.
+ * Return
+ * A **struct bpf_sock** pointer on success, or **NULL** in
+ * case of failure.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -2465,7 +2530,8 @@ union bpf_attr {
FN(spin_unlock), \
FN(sk_fullsock), \
FN(tcp_sock), \
- FN(skb_ecn_set_ce),
+ FN(skb_ecn_set_ce), \
+ FN(get_listener_sock),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
diff --git a/tools/include/uapi/linux/fcntl.h b/tools/include/uapi/linux/fcntl.h
index 6448cdd9a350..a2f8658f1c55 100644
--- a/tools/include/uapi/linux/fcntl.h
+++ b/tools/include/uapi/linux/fcntl.h
@@ -41,6 +41,7 @@
#define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */
#define F_SEAL_GROW 0x0004 /* prevent file from growing */
#define F_SEAL_WRITE 0x0008 /* prevent writes */
+#define F_SEAL_FUTURE_WRITE 0x0010 /* prevent future writes while mapped */
/* (1U << 31) is reserved for signed error codes */
/*
diff --git a/tools/include/uapi/linux/in.h b/tools/include/uapi/linux/in.h
index a55cb8b10165..e7ad9d350a28 100644
--- a/tools/include/uapi/linux/in.h
+++ b/tools/include/uapi/linux/in.h
@@ -292,10 +292,11 @@ struct sockaddr_in {
#define IN_LOOPBACK(a) ((((long int) (a)) & 0xff000000) == 0x7f000000)
/* Defines for Multicast INADDR */
-#define INADDR_UNSPEC_GROUP 0xe0000000U /* 224.0.0.0 */
-#define INADDR_ALLHOSTS_GROUP 0xe0000001U /* 224.0.0.1 */
-#define INADDR_ALLRTRS_GROUP 0xe0000002U /* 224.0.0.2 */
-#define INADDR_MAX_LOCAL_GROUP 0xe00000ffU /* 224.0.0.255 */
+#define INADDR_UNSPEC_GROUP 0xe0000000U /* 224.0.0.0 */
+#define INADDR_ALLHOSTS_GROUP 0xe0000001U /* 224.0.0.1 */
+#define INADDR_ALLRTRS_GROUP 0xe0000002U /* 224.0.0.2 */
+#define INADDR_ALLSNOOPERS_GROUP 0xe000006aU /* 224.0.0.106 */
+#define INADDR_MAX_LOCAL_GROUP 0xe00000ffU /* 224.0.0.255 */
#endif
/* <asm/byteorder.h> contains the htonl type stuff.. */
diff --git a/tools/include/uapi/linux/lirc.h b/tools/include/uapi/linux/lirc.h
index f189931042a7..45fcbf99d72e 100644
--- a/tools/include/uapi/linux/lirc.h
+++ b/tools/include/uapi/linux/lirc.h
@@ -134,6 +134,12 @@
#define LIRC_SET_WIDEBAND_RECEIVER _IOW('i', 0x00000023, __u32)
/*
+ * Return the recording timeout, which is either set by
+ * the ioctl LIRC_SET_REC_TIMEOUT or by the kernel after setting the protocols.
+ */
+#define LIRC_GET_REC_TIMEOUT _IOR('i', 0x00000024, __u32)
+
+/*
* struct lirc_scancode - decoded scancode with protocol for use with
* LIRC_MODE_SCANCODE
*
@@ -186,6 +192,9 @@ struct lirc_scancode {
* @RC_PROTO_XMP: XMP protocol
* @RC_PROTO_CEC: CEC protocol
* @RC_PROTO_IMON: iMon Pad protocol
+ * @RC_PROTO_RCMM12: RC-MM protocol 12 bits
+ * @RC_PROTO_RCMM24: RC-MM protocol 24 bits
+ * @RC_PROTO_RCMM32: RC-MM protocol 32 bits
*/
enum rc_proto {
RC_PROTO_UNKNOWN = 0,
@@ -212,6 +221,9 @@ enum rc_proto {
RC_PROTO_XMP = 21,
RC_PROTO_CEC = 22,
RC_PROTO_IMON = 23,
+ RC_PROTO_RCMM12 = 24,
+ RC_PROTO_RCMM24 = 25,
+ RC_PROTO_RCMM32 = 26,
};
#endif
diff --git a/tools/include/uapi/linux/mman.h b/tools/include/uapi/linux/mman.h
index d0f515d53299..fc1a64c3447b 100644
--- a/tools/include/uapi/linux/mman.h
+++ b/tools/include/uapi/linux/mman.h
@@ -12,6 +12,10 @@
#define OVERCOMMIT_ALWAYS 1
#define OVERCOMMIT_NEVER 2
+#define MAP_SHARED 0x01 /* Share changes */
+#define MAP_PRIVATE 0x02 /* Changes are private */
+#define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */
+
/*
* Huge page size encoding when MAP_HUGETLB is specified, and a huge page
* size other than the default is desired. See hugetlb_encode.h.
diff --git a/tools/include/uapi/sound/asound.h b/tools/include/uapi/sound/asound.h
index 404d4b9ffe76..df1153cea0b7 100644
--- a/tools/include/uapi/sound/asound.h
+++ b/tools/include/uapi/sound/asound.h
@@ -32,6 +32,7 @@
#ifndef __KERNEL__
#include <stdlib.h>
+#include <time.h>
#endif
/*
diff --git a/tools/io_uring/io_uring-bench.c b/tools/io_uring/io_uring-bench.c
index 512306a37531..0f257139b003 100644
--- a/tools/io_uring/io_uring-bench.c
+++ b/tools/io_uring/io_uring-bench.c
@@ -32,10 +32,6 @@
#include "liburing.h"
#include "barrier.h"
-#ifndef IOCQE_FLAG_CACHEHIT
-#define IOCQE_FLAG_CACHEHIT (1U << 0)
-#endif
-
#define min(a, b) ((a < b) ? (a) : (b))
struct io_sq_ring {
@@ -85,7 +81,6 @@ struct submitter {
unsigned long reaps;
unsigned long done;
unsigned long calls;
- unsigned long cachehit, cachemiss;
volatile int finish;
__s32 *fds;
@@ -270,10 +265,6 @@ static int reap_events(struct submitter *s)
return -1;
}
}
- if (cqe->flags & IOCQE_FLAG_CACHEHIT)
- s->cachehit++;
- else
- s->cachemiss++;
reaped++;
head++;
} while (1);
@@ -489,7 +480,7 @@ static void file_depths(char *buf)
int main(int argc, char *argv[])
{
struct submitter *s = &submitters[0];
- unsigned long done, calls, reap, cache_hit, cache_miss;
+ unsigned long done, calls, reap;
int err, i, flags, fd;
char *fdepths;
void *ret;
@@ -569,44 +560,29 @@ int main(int argc, char *argv[])
pthread_create(&s->thread, NULL, submitter_fn, s);
fdepths = malloc(8 * s->nr_files);
- cache_hit = cache_miss = reap = calls = done = 0;
+ reap = calls = done = 0;
do {
unsigned long this_done = 0;
unsigned long this_reap = 0;
unsigned long this_call = 0;
- unsigned long this_cache_hit = 0;
- unsigned long this_cache_miss = 0;
unsigned long rpc = 0, ipc = 0;
- double hit = 0.0;
sleep(1);
this_done += s->done;
this_call += s->calls;
this_reap += s->reaps;
- this_cache_hit += s->cachehit;
- this_cache_miss += s->cachemiss;
- if (this_cache_hit && this_cache_miss) {
- unsigned long hits, total;
-
- hits = this_cache_hit - cache_hit;
- total = hits + this_cache_miss - cache_miss;
- hit = (double) hits / (double) total;
- hit *= 100.0;
- }
if (this_call - calls) {
rpc = (this_done - done) / (this_call - calls);
ipc = (this_reap - reap) / (this_call - calls);
} else
rpc = ipc = -1;
file_depths(fdepths);
- printf("IOPS=%lu, IOS/call=%ld/%ld, inflight=%u (%s), Cachehit=%0.2f%%\n",
+ printf("IOPS=%lu, IOS/call=%ld/%ld, inflight=%u (%s)\n",
this_done - done, rpc, ipc, s->inflight,
- fdepths, hit);
+ fdepths);
done = this_done;
calls = this_call;
reap = this_reap;
- cache_hit = s->cachehit;
- cache_miss = s->cachemiss;
} while (!finish);
pthread_join(s->thread, &ret);
diff --git a/tools/lib/bpf/.gitignore b/tools/lib/bpf/.gitignore
index 4db74758c674..fecb78afea3f 100644
--- a/tools/lib/bpf/.gitignore
+++ b/tools/lib/bpf/.gitignore
@@ -1,3 +1,4 @@
libbpf_version.h
FEATURE-DUMP.libbpf
test_libbpf
+libbpf.so.*
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index a05c43468bd0..8e7c56e9590f 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -3,7 +3,7 @@
BPF_VERSION = 0
BPF_PATCHLEVEL = 0
-BPF_EXTRAVERSION = 1
+BPF_EXTRAVERSION = 2
MAKEFLAGS += --no-print-directory
@@ -79,8 +79,6 @@ export prefix libdir src obj
libdir_SQ = $(subst ','\'',$(libdir))
libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
-LIB_FILE = libbpf.a libbpf.so
-
VERSION = $(BPF_VERSION)
PATCHLEVEL = $(BPF_PATCHLEVEL)
EXTRAVERSION = $(BPF_EXTRAVERSION)
@@ -88,7 +86,10 @@ EXTRAVERSION = $(BPF_EXTRAVERSION)
OBJ = $@
N =
-LIBBPF_VERSION = $(BPF_VERSION).$(BPF_PATCHLEVEL).$(BPF_EXTRAVERSION)
+LIBBPF_VERSION = $(BPF_VERSION).$(BPF_PATCHLEVEL).$(BPF_EXTRAVERSION)
+
+LIB_TARGET = libbpf.a libbpf.so.$(LIBBPF_VERSION)
+LIB_FILE = libbpf.a libbpf.so*
# Set compile option CFLAGS
ifdef EXTRA_CFLAGS
@@ -128,16 +129,18 @@ all:
export srctree OUTPUT CC LD CFLAGS V
include $(srctree)/tools/build/Makefile.include
-BPF_IN := $(OUTPUT)libbpf-in.o
-LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE))
-VERSION_SCRIPT := libbpf.map
+BPF_IN := $(OUTPUT)libbpf-in.o
+VERSION_SCRIPT := libbpf.map
+
+LIB_TARGET := $(addprefix $(OUTPUT),$(LIB_TARGET))
+LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE))
GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN) | \
awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {s++} END{print s}')
VERSIONED_SYM_COUNT = $(shell readelf -s --wide $(OUTPUT)libbpf.so | \
grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l)
-CMD_TARGETS = $(LIB_FILE)
+CMD_TARGETS = $(LIB_TARGET)
CXX_TEST_TARGET = $(OUTPUT)test_libbpf
@@ -147,7 +150,8 @@ endif
TARGETS = $(CMD_TARGETS)
-all: fixdep all_cmd
+all: fixdep
+ $(Q)$(MAKE) all_cmd
all_cmd: $(CMD_TARGETS) check
@@ -169,9 +173,13 @@ $(BPF_IN): force elfdep bpfdep
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true
$(Q)$(MAKE) $(build)=libbpf
-$(OUTPUT)libbpf.so: $(BPF_IN)
- $(QUIET_LINK)$(CC) --shared -Wl,--version-script=$(VERSION_SCRIPT) \
- $^ -o $@
+$(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
+
+$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN)
+ $(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(VERSION) \
+ -Wl,--version-script=$(VERSION_SCRIPT) $^ -lelf -o $@
+ @ln -sf $(@F) $(OUTPUT)libbpf.so
+ @ln -sf $(@F) $(OUTPUT)libbpf.so.$(VERSION)
$(OUTPUT)libbpf.a: $(BPF_IN)
$(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
@@ -191,6 +199,12 @@ check_abi: $(OUTPUT)libbpf.so
exit 1; \
fi
+define do_install_mkdir
+ if [ ! -d '$(DESTDIR_SQ)$1' ]; then \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$1'; \
+ fi
+endef
+
define do_install
if [ ! -d '$(DESTDIR_SQ)$2' ]; then \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \
@@ -199,14 +213,16 @@ define do_install
endef
install_lib: all_cmd
- $(call QUIET_INSTALL, $(LIB_FILE)) \
- $(call do_install,$(LIB_FILE),$(libdir_SQ))
+ $(call QUIET_INSTALL, $(LIB_TARGET)) \
+ $(call do_install_mkdir,$(libdir_SQ)); \
+ cp -fpR $(LIB_FILE) $(DESTDIR)$(libdir_SQ)
install_headers:
$(call QUIET_INSTALL, headers) \
$(call do_install,bpf.h,$(prefix)/include/bpf,644); \
- $(call do_install,libbpf.h,$(prefix)/include/bpf,644);
- $(call do_install,btf.h,$(prefix)/include/bpf,644);
+ $(call do_install,libbpf.h,$(prefix)/include/bpf,644); \
+ $(call do_install,btf.h,$(prefix)/include/bpf,644); \
+ $(call do_install,xsk.h,$(prefix)/include/bpf,644);
install: install_lib
@@ -218,7 +234,7 @@ config-clean:
clean:
$(call QUIET_CLEAN, libbpf) $(RM) $(TARGETS) $(CXX_TEST_TARGET) \
- *.o *~ *.a *.so .*.d .*.cmd LIBBPF-CFLAGS
+ *.o *~ *.a *.so *.so.$(VERSION) .*.d .*.cmd LIBBPF-CFLAGS
$(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf
diff --git a/tools/lib/bpf/README.rst b/tools/lib/bpf/README.rst
index 5788479384ca..cef7b77eab69 100644
--- a/tools/lib/bpf/README.rst
+++ b/tools/lib/bpf/README.rst
@@ -111,6 +111,7 @@ starting from ``0.0.1``.
Every time ABI is being changed, e.g. because a new symbol is added or
semantic of existing symbol is changed, ABI version should be bumped.
+This bump in ABI version is at most once per kernel development cycle.
For example, if current state of ``libbpf.map`` is:
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 1b8d8cdd3575..cf119c9b6f27 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -1602,16 +1602,12 @@ static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2)
/* Calculate type signature hash of ENUM. */
static __u32 btf_hash_enum(struct btf_type *t)
{
- struct btf_enum *member = (struct btf_enum *)(t + 1);
- __u32 vlen = BTF_INFO_VLEN(t->info);
- __u32 h = btf_hash_common(t);
- int i;
+ __u32 h;
- for (i = 0; i < vlen; i++) {
- h = hash_combine(h, member->name_off);
- h = hash_combine(h, member->val);
- member++;
- }
+ /* don't hash vlen and enum members to support enum fwd resolving */
+ h = hash_combine(0, t->name_off);
+ h = hash_combine(h, t->info & ~0xffff);
+ h = hash_combine(h, t->size);
return h;
}
@@ -1637,6 +1633,22 @@ static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
return true;
}
+static inline bool btf_is_enum_fwd(struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM &&
+ BTF_INFO_VLEN(t->info) == 0;
+}
+
+static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
+{
+ if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
+ return btf_equal_enum(t1, t2);
+ /* ignore vlen when comparing */
+ return t1->name_off == t2->name_off &&
+ (t1->info & ~0xffff) == (t2->info & ~0xffff) &&
+ t1->size == t2->size;
+}
+
/*
* Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
* as referenced type IDs equivalence is established separately during type
@@ -1860,6 +1872,17 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
new_id = cand_node->type_id;
break;
}
+ if (d->opts.dont_resolve_fwds)
+ continue;
+ if (btf_compat_enum(t, cand)) {
+ if (btf_is_enum_fwd(t)) {
+ /* resolve fwd to full enum */
+ new_id = cand_node->type_id;
+ break;
+ }
+ /* resolve canonical enum fwd to full enum */
+ d->map[cand_node->type_id] = type_id;
+ }
}
break;
@@ -2084,7 +2107,7 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
return fwd_kind == real_kind;
}
- if (cand_type->info != canon_type->info)
+ if (cand_kind != canon_kind)
return 0;
switch (cand_kind) {
@@ -2092,7 +2115,10 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
return btf_equal_int(cand_type, canon_type);
case BTF_KIND_ENUM:
- return btf_equal_enum(cand_type, canon_type);
+ if (d->opts.dont_resolve_fwds)
+ return btf_equal_enum(cand_type, canon_type);
+ else
+ return btf_compat_enum(cand_type, canon_type);
case BTF_KIND_FWD:
return btf_equal_common(cand_type, canon_type);
@@ -2103,6 +2129,8 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
case BTF_KIND_PTR:
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
+ if (cand_type->info != canon_type->info)
+ return 0;
return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
case BTF_KIND_ARRAY: {
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index f5eb60379c8d..11c25d9ea431 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -112,6 +112,11 @@ void libbpf_print(enum libbpf_print_level level, const char *format, ...)
# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
#endif
+static inline __u64 ptr_to_u64(const void *ptr)
+{
+ return (__u64) (unsigned long) ptr;
+}
+
struct bpf_capabilities {
/* v4.14: kernel support for program & map names. */
__u32 name:1;
@@ -622,7 +627,7 @@ bpf_object__init_maps(struct bpf_object *obj, int flags)
bool strict = !(flags & MAPS_RELAX_COMPAT);
int i, map_idx, map_def_sz, nr_maps = 0;
Elf_Scn *scn;
- Elf_Data *data;
+ Elf_Data *data = NULL;
Elf_Data *symbols = obj->efile.symbols;
if (obj->efile.maps_shndx < 0)
@@ -835,10 +840,19 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
obj->efile.maps_shndx = idx;
else if (strcmp(name, BTF_ELF_SEC) == 0) {
obj->btf = btf__new(data->d_buf, data->d_size);
- if (IS_ERR(obj->btf) || btf__load(obj->btf)) {
+ if (IS_ERR(obj->btf)) {
pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
BTF_ELF_SEC, PTR_ERR(obj->btf));
obj->btf = NULL;
+ continue;
+ }
+ err = btf__load(obj->btf);
+ if (err) {
+ pr_warning("Error loading %s into kernel: %d. Ignored and continue.\n",
+ BTF_ELF_SEC, err);
+ btf__free(obj->btf);
+ obj->btf = NULL;
+ err = 0;
}
} else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
btf_ext_data = data;
@@ -2997,3 +3011,249 @@ bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
ring_buffer_write_tail(header, data_tail);
return ret;
}
+
+struct bpf_prog_info_array_desc {
+ int array_offset; /* e.g. offset of jited_prog_insns */
+ int count_offset; /* e.g. offset of jited_prog_len */
+ int size_offset; /* > 0: offset of rec size,
+ * < 0: fix size of -size_offset
+ */
+};
+
+static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
+ [BPF_PROG_INFO_JITED_INSNS] = {
+ offsetof(struct bpf_prog_info, jited_prog_insns),
+ offsetof(struct bpf_prog_info, jited_prog_len),
+ -1,
+ },
+ [BPF_PROG_INFO_XLATED_INSNS] = {
+ offsetof(struct bpf_prog_info, xlated_prog_insns),
+ offsetof(struct bpf_prog_info, xlated_prog_len),
+ -1,
+ },
+ [BPF_PROG_INFO_MAP_IDS] = {
+ offsetof(struct bpf_prog_info, map_ids),
+ offsetof(struct bpf_prog_info, nr_map_ids),
+ -(int)sizeof(__u32),
+ },
+ [BPF_PROG_INFO_JITED_KSYMS] = {
+ offsetof(struct bpf_prog_info, jited_ksyms),
+ offsetof(struct bpf_prog_info, nr_jited_ksyms),
+ -(int)sizeof(__u64),
+ },
+ [BPF_PROG_INFO_JITED_FUNC_LENS] = {
+ offsetof(struct bpf_prog_info, jited_func_lens),
+ offsetof(struct bpf_prog_info, nr_jited_func_lens),
+ -(int)sizeof(__u32),
+ },
+ [BPF_PROG_INFO_FUNC_INFO] = {
+ offsetof(struct bpf_prog_info, func_info),
+ offsetof(struct bpf_prog_info, nr_func_info),
+ offsetof(struct bpf_prog_info, func_info_rec_size),
+ },
+ [BPF_PROG_INFO_LINE_INFO] = {
+ offsetof(struct bpf_prog_info, line_info),
+ offsetof(struct bpf_prog_info, nr_line_info),
+ offsetof(struct bpf_prog_info, line_info_rec_size),
+ },
+ [BPF_PROG_INFO_JITED_LINE_INFO] = {
+ offsetof(struct bpf_prog_info, jited_line_info),
+ offsetof(struct bpf_prog_info, nr_jited_line_info),
+ offsetof(struct bpf_prog_info, jited_line_info_rec_size),
+ },
+ [BPF_PROG_INFO_PROG_TAGS] = {
+ offsetof(struct bpf_prog_info, prog_tags),
+ offsetof(struct bpf_prog_info, nr_prog_tags),
+ -(int)sizeof(__u8) * BPF_TAG_SIZE,
+ },
+
+};
+
+static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info, int offset)
+{
+ __u32 *array = (__u32 *)info;
+
+ if (offset >= 0)
+ return array[offset / sizeof(__u32)];
+ return -(int)offset;
+}
+
+static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info, int offset)
+{
+ __u64 *array = (__u64 *)info;
+
+ if (offset >= 0)
+ return array[offset / sizeof(__u64)];
+ return -(int)offset;
+}
+
+static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
+ __u32 val)
+{
+ __u32 *array = (__u32 *)info;
+
+ if (offset >= 0)
+ array[offset / sizeof(__u32)] = val;
+}
+
+static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
+ __u64 val)
+{
+ __u64 *array = (__u64 *)info;
+
+ if (offset >= 0)
+ array[offset / sizeof(__u64)] = val;
+}
+
+struct bpf_prog_info_linear *
+bpf_program__get_prog_info_linear(int fd, __u64 arrays)
+{
+ struct bpf_prog_info_linear *info_linear;
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ __u32 data_len = 0;
+ int i, err;
+ void *ptr;
+
+ if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
+ return ERR_PTR(-EINVAL);
+
+ /* step 1: get array dimensions */
+ err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
+ if (err) {
+ pr_debug("can't get prog info: %s", strerror(errno));
+ return ERR_PTR(-EFAULT);
+ }
+
+ /* step 2: calculate total size of all arrays */
+ for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
+ bool include_array = (arrays & (1UL << i)) > 0;
+ struct bpf_prog_info_array_desc *desc;
+ __u32 count, size;
+
+ desc = bpf_prog_info_array_desc + i;
+
+ /* kernel is too old to support this field */
+ if (info_len < desc->array_offset + sizeof(__u32) ||
+ info_len < desc->count_offset + sizeof(__u32) ||
+ (desc->size_offset > 0 && info_len < desc->size_offset))
+ include_array = false;
+
+ if (!include_array) {
+ arrays &= ~(1UL << i); /* clear the bit */
+ continue;
+ }
+
+ count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
+ size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
+
+ data_len += count * size;
+ }
+
+ /* step 3: allocate continuous memory */
+ data_len = roundup(data_len, sizeof(__u64));
+ info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
+ if (!info_linear)
+ return ERR_PTR(-ENOMEM);
+
+ /* step 4: fill data to info_linear->info */
+ info_linear->arrays = arrays;
+ memset(&info_linear->info, 0, sizeof(info));
+ ptr = info_linear->data;
+
+ for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
+ struct bpf_prog_info_array_desc *desc;
+ __u32 count, size;
+
+ if ((arrays & (1UL << i)) == 0)
+ continue;
+
+ desc = bpf_prog_info_array_desc + i;
+ count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
+ size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
+ bpf_prog_info_set_offset_u32(&info_linear->info,
+ desc->count_offset, count);
+ bpf_prog_info_set_offset_u32(&info_linear->info,
+ desc->size_offset, size);
+ bpf_prog_info_set_offset_u64(&info_linear->info,
+ desc->array_offset,
+ ptr_to_u64(ptr));
+ ptr += count * size;
+ }
+
+ /* step 5: call syscall again to get required arrays */
+ err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
+ if (err) {
+ pr_debug("can't get prog info: %s", strerror(errno));
+ free(info_linear);
+ return ERR_PTR(-EFAULT);
+ }
+
+ /* step 6: verify the data */
+ for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
+ struct bpf_prog_info_array_desc *desc;
+ __u32 v1, v2;
+
+ if ((arrays & (1UL << i)) == 0)
+ continue;
+
+ desc = bpf_prog_info_array_desc + i;
+ v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
+ v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
+ desc->count_offset);
+ if (v1 != v2)
+ pr_warning("%s: mismatch in element count\n", __func__);
+
+ v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
+ v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
+ desc->size_offset);
+ if (v1 != v2)
+ pr_warning("%s: mismatch in rec size\n", __func__);
+ }
+
+ /* step 7: update info_len and data_len */
+ info_linear->info_len = sizeof(struct bpf_prog_info);
+ info_linear->data_len = data_len;
+
+ return info_linear;
+}
+
+void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
+{
+ int i;
+
+ for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
+ struct bpf_prog_info_array_desc *desc;
+ __u64 addr, offs;
+
+ if ((info_linear->arrays & (1UL << i)) == 0)
+ continue;
+
+ desc = bpf_prog_info_array_desc + i;
+ addr = bpf_prog_info_read_offset_u64(&info_linear->info,
+ desc->array_offset);
+ offs = addr - ptr_to_u64(info_linear->data);
+ bpf_prog_info_set_offset_u64(&info_linear->info,
+ desc->array_offset, offs);
+ }
+}
+
+void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
+{
+ int i;
+
+ for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
+ struct bpf_prog_info_array_desc *desc;
+ __u64 addr, offs;
+
+ if ((info_linear->arrays & (1UL << i)) == 0)
+ continue;
+
+ desc = bpf_prog_info_array_desc + i;
+ offs = bpf_prog_info_read_offset_u64(&info_linear->info,
+ desc->array_offset);
+ addr = offs + ptr_to_u64(info_linear->data);
+ bpf_prog_info_set_offset_u64(&info_linear->info,
+ desc->array_offset, addr);
+ }
+}
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index b4652aa1a58a..c70785cc8ef5 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -10,6 +10,7 @@
#ifndef __LIBBPF_LIBBPF_H
#define __LIBBPF_LIBBPF_H
+#include <stdarg.h>
#include <stdio.h>
#include <stdint.h>
#include <stdbool.h>
@@ -377,6 +378,69 @@ LIBBPF_API bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex);
LIBBPF_API bool bpf_probe_helper(enum bpf_func_id id,
enum bpf_prog_type prog_type, __u32 ifindex);
+/*
+ * Get bpf_prog_info in continuous memory
+ *
+ * struct bpf_prog_info has multiple arrays. The user has option to choose
+ * arrays to fetch from kernel. The following APIs provide an uniform way to
+ * fetch these data. All arrays in bpf_prog_info are stored in a single
+ * continuous memory region. This makes it easy to store the info in a
+ * file.
+ *
+ * Before writing bpf_prog_info_linear to files, it is necessary to
+ * translate pointers in bpf_prog_info to offsets. Helper functions
+ * bpf_program__bpil_addr_to_offs() and bpf_program__bpil_offs_to_addr()
+ * are introduced to switch between pointers and offsets.
+ *
+ * Examples:
+ * # To fetch map_ids and prog_tags:
+ * __u64 arrays = (1UL << BPF_PROG_INFO_MAP_IDS) |
+ * (1UL << BPF_PROG_INFO_PROG_TAGS);
+ * struct bpf_prog_info_linear *info_linear =
+ * bpf_program__get_prog_info_linear(fd, arrays);
+ *
+ * # To save data in file
+ * bpf_program__bpil_addr_to_offs(info_linear);
+ * write(f, info_linear, sizeof(*info_linear) + info_linear->data_len);
+ *
+ * # To read data from file
+ * read(f, info_linear, <proper_size>);
+ * bpf_program__bpil_offs_to_addr(info_linear);
+ */
+enum bpf_prog_info_array {
+ BPF_PROG_INFO_FIRST_ARRAY = 0,
+ BPF_PROG_INFO_JITED_INSNS = 0,
+ BPF_PROG_INFO_XLATED_INSNS,
+ BPF_PROG_INFO_MAP_IDS,
+ BPF_PROG_INFO_JITED_KSYMS,
+ BPF_PROG_INFO_JITED_FUNC_LENS,
+ BPF_PROG_INFO_FUNC_INFO,
+ BPF_PROG_INFO_LINE_INFO,
+ BPF_PROG_INFO_JITED_LINE_INFO,
+ BPF_PROG_INFO_PROG_TAGS,
+ BPF_PROG_INFO_LAST_ARRAY,
+};
+
+struct bpf_prog_info_linear {
+ /* size of struct bpf_prog_info, when the tool is compiled */
+ __u32 info_len;
+ /* total bytes allocated for data, round up to 8 bytes */
+ __u32 data_len;
+ /* which arrays are included in data */
+ __u64 arrays;
+ struct bpf_prog_info info;
+ __u8 data[];
+};
+
+LIBBPF_API struct bpf_prog_info_linear *
+bpf_program__get_prog_info_linear(int fd, __u64 arrays);
+
+LIBBPF_API void
+bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear);
+
+LIBBPF_API void
+bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear);
+
#ifdef __cplusplus
} /* extern "C" */
#endif
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 778a26702a70..f3ce50500cf2 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -153,4 +153,7 @@ LIBBPF_0.0.2 {
xsk_socket__delete;
xsk_umem__fd;
xsk_socket__fd;
+ bpf_program__get_prog_info_linear;
+ bpf_program__bpil_addr_to_offs;
+ bpf_program__bpil_offs_to_addr;
} LIBBPF_0.0.1;
diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
index f98ac82c9aea..8d0078b65486 100644
--- a/tools/lib/bpf/xsk.c
+++ b/tools/lib/bpf/xsk.c
@@ -126,8 +126,8 @@ static void xsk_set_umem_config(struct xsk_umem_config *cfg,
cfg->frame_headroom = usr_cfg->frame_headroom;
}
-static void xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
- const struct xsk_socket_config *usr_cfg)
+static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
+ const struct xsk_socket_config *usr_cfg)
{
if (!usr_cfg) {
cfg->rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
@@ -135,14 +135,19 @@ static void xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
cfg->libbpf_flags = 0;
cfg->xdp_flags = 0;
cfg->bind_flags = 0;
- return;
+ return 0;
}
+ if (usr_cfg->libbpf_flags & ~XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)
+ return -EINVAL;
+
cfg->rx_size = usr_cfg->rx_size;
cfg->tx_size = usr_cfg->tx_size;
cfg->libbpf_flags = usr_cfg->libbpf_flags;
cfg->xdp_flags = usr_cfg->xdp_flags;
cfg->bind_flags = usr_cfg->bind_flags;
+
+ return 0;
}
int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area, __u64 size,
@@ -557,7 +562,9 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
}
strncpy(xsk->ifname, ifname, IFNAMSIZ);
- xsk_set_xdp_socket_config(&xsk->config, usr_config);
+ err = xsk_set_xdp_socket_config(&xsk->config, usr_config);
+ if (err)
+ goto out_socket;
if (rx) {
err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING,
diff --git a/tools/lib/traceevent/event-parse-api.c b/tools/lib/traceevent/event-parse-api.c
index d463761a58f4..988587840c80 100644
--- a/tools/lib/traceevent/event-parse-api.c
+++ b/tools/lib/traceevent/event-parse-api.c
@@ -9,6 +9,22 @@
#include "event-utils.h"
/**
+ * tep_get_event - returns the event with the given index
+ * @tep: a handle to the tep_handle
+ * @index: index of the requested event, in the range 0 .. nr_events
+ *
+ * This returns pointer to the element of the events array with the given index
+ * If @tep is NULL, or @index is not in the range 0 .. nr_events, NULL is returned.
+ */
+struct tep_event *tep_get_event(struct tep_handle *tep, int index)
+{
+ if (tep && tep->events && index < tep->nr_events)
+ return tep->events[index];
+
+ return NULL;
+}
+
+/**
* tep_get_first_event - returns the first event in the events array
* @tep: a handle to the tep_handle
*
@@ -17,10 +33,7 @@
*/
struct tep_event *tep_get_first_event(struct tep_handle *tep)
{
- if (tep && tep->events)
- return tep->events[0];
-
- return NULL;
+ return tep_get_event(tep, 0);
}
/**
@@ -32,7 +45,7 @@ struct tep_event *tep_get_first_event(struct tep_handle *tep)
*/
int tep_get_events_count(struct tep_handle *tep)
{
- if(tep)
+ if (tep)
return tep->nr_events;
return 0;
}
@@ -43,19 +56,47 @@ int tep_get_events_count(struct tep_handle *tep)
* @flag: flag, or combination of flags to be set
* can be any combination from enum tep_flag
*
- * This sets a flag or mbination of flags from enum tep_flag
- */
+ * This sets a flag or combination of flags from enum tep_flag
+ */
void tep_set_flag(struct tep_handle *tep, int flag)
{
- if(tep)
+ if (tep)
tep->flags |= flag;
}
-unsigned short tep_data2host2(struct tep_handle *pevent, unsigned short data)
+/**
+ * tep_clear_flag - clear event parser flag
+ * @tep: a handle to the tep_handle
+ * @flag: flag to be cleared
+ *
+ * This clears a tep flag
+ */
+void tep_clear_flag(struct tep_handle *tep, enum tep_flag flag)
+{
+ if (tep)
+ tep->flags &= ~flag;
+}
+
+/**
+ * tep_test_flag - check the state of event parser flag
+ * @tep: a handle to the tep_handle
+ * @flag: flag to be checked
+ *
+ * This returns the state of the requested tep flag.
+ * Returns: true if the flag is set, false otherwise.
+ */
+bool tep_test_flag(struct tep_handle *tep, enum tep_flag flag)
+{
+ if (tep)
+ return tep->flags & flag;
+ return false;
+}
+
+unsigned short tep_data2host2(struct tep_handle *tep, unsigned short data)
{
unsigned short swap;
- if (!pevent || pevent->host_bigendian == pevent->file_bigendian)
+ if (!tep || tep->host_bigendian == tep->file_bigendian)
return data;
swap = ((data & 0xffULL) << 8) |
@@ -64,11 +105,11 @@ unsigned short tep_data2host2(struct tep_handle *pevent, unsigned short data)
return swap;
}
-unsigned int tep_data2host4(struct tep_handle *pevent, unsigned int data)
+unsigned int tep_data2host4(struct tep_handle *tep, unsigned int data)
{
unsigned int swap;
- if (!pevent || pevent->host_bigendian == pevent->file_bigendian)
+ if (!tep || tep->host_bigendian == tep->file_bigendian)
return data;
swap = ((data & 0xffULL) << 24) |
@@ -80,11 +121,11 @@ unsigned int tep_data2host4(struct tep_handle *pevent, unsigned int data)
}
unsigned long long
-tep_data2host8(struct tep_handle *pevent, unsigned long long data)
+tep_data2host8(struct tep_handle *tep, unsigned long long data)
{
unsigned long long swap;
- if (!pevent || pevent->host_bigendian == pevent->file_bigendian)
+ if (!tep || tep->host_bigendian == tep->file_bigendian)
return data;
swap = ((data & 0xffULL) << 56) |
@@ -101,175 +142,232 @@ tep_data2host8(struct tep_handle *pevent, unsigned long long data)
/**
* tep_get_header_page_size - get size of the header page
- * @pevent: a handle to the tep_handle
+ * @tep: a handle to the tep_handle
*
* This returns size of the header page
- * If @pevent is NULL, 0 is returned.
+ * If @tep is NULL, 0 is returned.
+ */
+int tep_get_header_page_size(struct tep_handle *tep)
+{
+ if (tep)
+ return tep->header_page_size_size;
+ return 0;
+}
+
+/**
+ * tep_get_header_timestamp_size - get size of the timestamp in the header page
+ * @tep: a handle to the tep_handle
+ *
+ * This returns size of the timestamp in the header page
+ * If @tep is NULL, 0 is returned.
*/
-int tep_get_header_page_size(struct tep_handle *pevent)
+int tep_get_header_timestamp_size(struct tep_handle *tep)
{
- if(pevent)
- return pevent->header_page_size_size;
+ if (tep)
+ return tep->header_page_ts_size;
return 0;
}
/**
* tep_get_cpus - get the number of CPUs
- * @pevent: a handle to the tep_handle
+ * @tep: a handle to the tep_handle
*
* This returns the number of CPUs
- * If @pevent is NULL, 0 is returned.
+ * If @tep is NULL, 0 is returned.
*/
-int tep_get_cpus(struct tep_handle *pevent)
+int tep_get_cpus(struct tep_handle *tep)
{
- if(pevent)
- return pevent->cpus;
+ if (tep)
+ return tep->cpus;
return 0;
}
/**
* tep_set_cpus - set the number of CPUs
- * @pevent: a handle to the tep_handle
+ * @tep: a handle to the tep_handle
*
* This sets the number of CPUs
*/
-void tep_set_cpus(struct tep_handle *pevent, int cpus)
+void tep_set_cpus(struct tep_handle *tep, int cpus)
{
- if(pevent)
- pevent->cpus = cpus;
+ if (tep)
+ tep->cpus = cpus;
}
/**
- * tep_get_long_size - get the size of a long integer on the current machine
- * @pevent: a handle to the tep_handle
+ * tep_get_long_size - get the size of a long integer on the traced machine
+ * @tep: a handle to the tep_handle
*
- * This returns the size of a long integer on the current machine
- * If @pevent is NULL, 0 is returned.
+ * This returns the size of a long integer on the traced machine
+ * If @tep is NULL, 0 is returned.
*/
-int tep_get_long_size(struct tep_handle *pevent)
+int tep_get_long_size(struct tep_handle *tep)
{
- if(pevent)
- return pevent->long_size;
+ if (tep)
+ return tep->long_size;
return 0;
}
/**
- * tep_set_long_size - set the size of a long integer on the current machine
- * @pevent: a handle to the tep_handle
+ * tep_set_long_size - set the size of a long integer on the traced machine
+ * @tep: a handle to the tep_handle
* @size: size, in bytes, of a long integer
*
- * This sets the size of a long integer on the current machine
+ * This sets the size of a long integer on the traced machine
*/
-void tep_set_long_size(struct tep_handle *pevent, int long_size)
+void tep_set_long_size(struct tep_handle *tep, int long_size)
{
- if(pevent)
- pevent->long_size = long_size;
+ if (tep)
+ tep->long_size = long_size;
}
/**
- * tep_get_page_size - get the size of a memory page on the current machine
- * @pevent: a handle to the tep_handle
+ * tep_get_page_size - get the size of a memory page on the traced machine
+ * @tep: a handle to the tep_handle
*
- * This returns the size of a memory page on the current machine
- * If @pevent is NULL, 0 is returned.
+ * This returns the size of a memory page on the traced machine
+ * If @tep is NULL, 0 is returned.
*/
-int tep_get_page_size(struct tep_handle *pevent)
+int tep_get_page_size(struct tep_handle *tep)
{
- if(pevent)
- return pevent->page_size;
+ if (tep)
+ return tep->page_size;
return 0;
}
/**
- * tep_set_page_size - set the size of a memory page on the current machine
- * @pevent: a handle to the tep_handle
+ * tep_set_page_size - set the size of a memory page on the traced machine
+ * @tep: a handle to the tep_handle
* @_page_size: size of a memory page, in bytes
*
- * This sets the size of a memory page on the current machine
+ * This sets the size of a memory page on the traced machine
*/
-void tep_set_page_size(struct tep_handle *pevent, int _page_size)
+void tep_set_page_size(struct tep_handle *tep, int _page_size)
{
- if(pevent)
- pevent->page_size = _page_size;
+ if (tep)
+ tep->page_size = _page_size;
}
/**
- * tep_file_bigendian - get if the file is in big endian order
- * @pevent: a handle to the tep_handle
+ * tep_is_file_bigendian - return the endian of the file
+ * @tep: a handle to the tep_handle
*
- * This returns if the file is in big endian order
- * If @pevent is NULL, 0 is returned.
+ * This returns true if the file is in big endian order
+ * If @tep is NULL, false is returned.
*/
-int tep_file_bigendian(struct tep_handle *pevent)
+bool tep_is_file_bigendian(struct tep_handle *tep)
{
- if(pevent)
- return pevent->file_bigendian;
- return 0;
+ if (tep)
+ return (tep->file_bigendian == TEP_BIG_ENDIAN);
+ return false;
}
/**
* tep_set_file_bigendian - set if the file is in big endian order
- * @pevent: a handle to the tep_handle
+ * @tep: a handle to the tep_handle
* @endian: non zero, if the file is in big endian order
*
* This sets if the file is in big endian order
*/
-void tep_set_file_bigendian(struct tep_handle *pevent, enum tep_endian endian)
+void tep_set_file_bigendian(struct tep_handle *tep, enum tep_endian endian)
{
- if(pevent)
- pevent->file_bigendian = endian;
+ if (tep)
+ tep->file_bigendian = endian;
}
/**
- * tep_is_host_bigendian - get if the order of the current host is big endian
- * @pevent: a handle to the tep_handle
+ * tep_is_local_bigendian - return the endian of the saved local machine
+ * @tep: a handle to the tep_handle
*
- * This gets if the order of the current host is big endian
- * If @pevent is NULL, 0 is returned.
+ * This returns true if the saved local machine in @tep is big endian.
+ * If @tep is NULL, false is returned.
*/
-int tep_is_host_bigendian(struct tep_handle *pevent)
+bool tep_is_local_bigendian(struct tep_handle *tep)
{
- if(pevent)
- return pevent->host_bigendian;
+ if (tep)
+ return (tep->host_bigendian == TEP_BIG_ENDIAN);
return 0;
}
/**
- * tep_set_host_bigendian - set the order of the local host
- * @pevent: a handle to the tep_handle
+ * tep_set_local_bigendian - set the stored local machine endian order
+ * @tep: a handle to the tep_handle
* @endian: non zero, if the local host has big endian order
*
- * This sets the order of the local host
+ * This sets the endian order for the local machine.
*/
-void tep_set_host_bigendian(struct tep_handle *pevent, enum tep_endian endian)
+void tep_set_local_bigendian(struct tep_handle *tep, enum tep_endian endian)
{
- if(pevent)
- pevent->host_bigendian = endian;
+ if (tep)
+ tep->host_bigendian = endian;
}
/**
* tep_is_latency_format - get if the latency output format is configured
- * @pevent: a handle to the tep_handle
+ * @tep: a handle to the tep_handle
*
- * This gets if the latency output format is configured
- * If @pevent is NULL, 0 is returned.
+ * This returns true if the latency output format is configured
+ * If @tep is NULL, false is returned.
*/
-int tep_is_latency_format(struct tep_handle *pevent)
+bool tep_is_latency_format(struct tep_handle *tep)
{
- if(pevent)
- return pevent->latency_format;
- return 0;
+ if (tep)
+ return (tep->latency_format);
+ return false;
}
/**
* tep_set_latency_format - set the latency output format
- * @pevent: a handle to the tep_handle
+ * @tep: a handle to the tep_handle
* @lat: non zero for latency output format
*
* This sets the latency output format
*/
-void tep_set_latency_format(struct tep_handle *pevent, int lat)
+void tep_set_latency_format(struct tep_handle *tep, int lat)
+{
+ if (tep)
+ tep->latency_format = lat;
+}
+
+/**
+ * tep_is_old_format - get if an old kernel is used
+ * @tep: a handle to the tep_handle
+ *
+ * This returns true, if an old kernel is used to generate the tracing events or
+ * false if a new kernel is used. Old kernels did not have header page info.
+ * If @tep is NULL, false is returned.
+ */
+bool tep_is_old_format(struct tep_handle *tep)
+{
+ if (tep)
+ return tep->old_format;
+ return false;
+}
+
+/**
+ * tep_set_print_raw - set a flag to force print in raw format
+ * @tep: a handle to the tep_handle
+ * @print_raw: the new value of the print_raw flag
+ *
+ * This sets a flag to force print in raw format
+ */
+void tep_set_print_raw(struct tep_handle *tep, int print_raw)
+{
+ if (tep)
+ tep->print_raw = print_raw;
+}
+
+/**
+ * tep_set_test_filters - set a flag to test a filter string
+ * @tep: a handle to the tep_handle
+ * @test_filters: the new value of the test_filters flag
+ *
+ * This sets a flag to test a filter string. If this flag is set, when
+ * tep_filter_add_filter_str() API as called,it will print the filter string
+ * instead of adding it.
+ */
+void tep_set_test_filters(struct tep_handle *tep, int test_filters)
{
- if(pevent)
- pevent->latency_format = lat;
+ if (tep)
+ tep->test_filters = test_filters;
}
diff --git a/tools/lib/traceevent/event-parse-local.h b/tools/lib/traceevent/event-parse-local.h
index 35833ee32d6c..09aa142f7fdd 100644
--- a/tools/lib/traceevent/event-parse-local.h
+++ b/tools/lib/traceevent/event-parse-local.h
@@ -92,8 +92,8 @@ struct tep_handle {
void tep_free_event(struct tep_event *event);
void tep_free_format_field(struct tep_format_field *field);
-unsigned short tep_data2host2(struct tep_handle *pevent, unsigned short data);
-unsigned int tep_data2host4(struct tep_handle *pevent, unsigned int data);
-unsigned long long tep_data2host8(struct tep_handle *pevent, unsigned long long data);
+unsigned short tep_data2host2(struct tep_handle *tep, unsigned short data);
+unsigned int tep_data2host4(struct tep_handle *tep, unsigned int data);
+unsigned long long tep_data2host8(struct tep_handle *tep, unsigned long long data);
#endif /* _PARSE_EVENTS_INT_H */
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index abd4fa5d3088..b36b536a9fcb 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -148,14 +148,14 @@ struct cmdline_list {
int pid;
};
-static int cmdline_init(struct tep_handle *pevent)
+static int cmdline_init(struct tep_handle *tep)
{
- struct cmdline_list *cmdlist = pevent->cmdlist;
+ struct cmdline_list *cmdlist = tep->cmdlist;
struct cmdline_list *item;
struct tep_cmdline *cmdlines;
int i;
- cmdlines = malloc(sizeof(*cmdlines) * pevent->cmdline_count);
+ cmdlines = malloc(sizeof(*cmdlines) * tep->cmdline_count);
if (!cmdlines)
return -1;
@@ -169,15 +169,15 @@ static int cmdline_init(struct tep_handle *pevent)
free(item);
}
- qsort(cmdlines, pevent->cmdline_count, sizeof(*cmdlines), cmdline_cmp);
+ qsort(cmdlines, tep->cmdline_count, sizeof(*cmdlines), cmdline_cmp);
- pevent->cmdlines = cmdlines;
- pevent->cmdlist = NULL;
+ tep->cmdlines = cmdlines;
+ tep->cmdlist = NULL;
return 0;
}
-static const char *find_cmdline(struct tep_handle *pevent, int pid)
+static const char *find_cmdline(struct tep_handle *tep, int pid)
{
const struct tep_cmdline *comm;
struct tep_cmdline key;
@@ -185,13 +185,13 @@ static const char *find_cmdline(struct tep_handle *pevent, int pid)
if (!pid)
return "<idle>";
- if (!pevent->cmdlines && cmdline_init(pevent))
+ if (!tep->cmdlines && cmdline_init(tep))
return "<not enough memory for cmdlines!>";
key.pid = pid;
- comm = bsearch(&key, pevent->cmdlines, pevent->cmdline_count,
- sizeof(*pevent->cmdlines), cmdline_cmp);
+ comm = bsearch(&key, tep->cmdlines, tep->cmdline_count,
+ sizeof(*tep->cmdlines), cmdline_cmp);
if (comm)
return comm->comm;
@@ -199,32 +199,32 @@ static const char *find_cmdline(struct tep_handle *pevent, int pid)
}
/**
- * tep_pid_is_registered - return if a pid has a cmdline registered
- * @pevent: handle for the pevent
+ * tep_is_pid_registered - return if a pid has a cmdline registered
+ * @tep: a handle to the trace event parser context
* @pid: The pid to check if it has a cmdline registered with.
*
- * Returns 1 if the pid has a cmdline mapped to it
- * 0 otherwise.
+ * Returns true if the pid has a cmdline mapped to it
+ * false otherwise.
*/
-int tep_pid_is_registered(struct tep_handle *pevent, int pid)
+bool tep_is_pid_registered(struct tep_handle *tep, int pid)
{
const struct tep_cmdline *comm;
struct tep_cmdline key;
if (!pid)
- return 1;
+ return true;
- if (!pevent->cmdlines && cmdline_init(pevent))
- return 0;
+ if (!tep->cmdlines && cmdline_init(tep))
+ return false;
key.pid = pid;
- comm = bsearch(&key, pevent->cmdlines, pevent->cmdline_count,
- sizeof(*pevent->cmdlines), cmdline_cmp);
+ comm = bsearch(&key, tep->cmdlines, tep->cmdline_count,
+ sizeof(*tep->cmdlines), cmdline_cmp);
if (comm)
- return 1;
- return 0;
+ return true;
+ return false;
}
/*
@@ -232,10 +232,10 @@ int tep_pid_is_registered(struct tep_handle *pevent, int pid)
* we must add this pid. This is much slower than when cmdlines
* are added before the array is initialized.
*/
-static int add_new_comm(struct tep_handle *pevent,
+static int add_new_comm(struct tep_handle *tep,
const char *comm, int pid, bool override)
{
- struct tep_cmdline *cmdlines = pevent->cmdlines;
+ struct tep_cmdline *cmdlines = tep->cmdlines;
struct tep_cmdline *cmdline;
struct tep_cmdline key;
char *new_comm;
@@ -246,8 +246,8 @@ static int add_new_comm(struct tep_handle *pevent,
/* avoid duplicates */
key.pid = pid;
- cmdline = bsearch(&key, pevent->cmdlines, pevent->cmdline_count,
- sizeof(*pevent->cmdlines), cmdline_cmp);
+ cmdline = bsearch(&key, tep->cmdlines, tep->cmdline_count,
+ sizeof(*tep->cmdlines), cmdline_cmp);
if (cmdline) {
if (!override) {
errno = EEXIST;
@@ -264,37 +264,37 @@ static int add_new_comm(struct tep_handle *pevent,
return 0;
}
- cmdlines = realloc(cmdlines, sizeof(*cmdlines) * (pevent->cmdline_count + 1));
+ cmdlines = realloc(cmdlines, sizeof(*cmdlines) * (tep->cmdline_count + 1));
if (!cmdlines) {
errno = ENOMEM;
return -1;
}
- cmdlines[pevent->cmdline_count].comm = strdup(comm);
- if (!cmdlines[pevent->cmdline_count].comm) {
+ cmdlines[tep->cmdline_count].comm = strdup(comm);
+ if (!cmdlines[tep->cmdline_count].comm) {
free(cmdlines);
errno = ENOMEM;
return -1;
}
- cmdlines[pevent->cmdline_count].pid = pid;
+ cmdlines[tep->cmdline_count].pid = pid;
- if (cmdlines[pevent->cmdline_count].comm)
- pevent->cmdline_count++;
+ if (cmdlines[tep->cmdline_count].comm)
+ tep->cmdline_count++;
- qsort(cmdlines, pevent->cmdline_count, sizeof(*cmdlines), cmdline_cmp);
- pevent->cmdlines = cmdlines;
+ qsort(cmdlines, tep->cmdline_count, sizeof(*cmdlines), cmdline_cmp);
+ tep->cmdlines = cmdlines;
return 0;
}
-static int _tep_register_comm(struct tep_handle *pevent,
+static int _tep_register_comm(struct tep_handle *tep,
const char *comm, int pid, bool override)
{
struct cmdline_list *item;
- if (pevent->cmdlines)
- return add_new_comm(pevent, comm, pid, override);
+ if (tep->cmdlines)
+ return add_new_comm(tep, comm, pid, override);
item = malloc(sizeof(*item));
if (!item)
@@ -309,17 +309,17 @@ static int _tep_register_comm(struct tep_handle *pevent,
return -1;
}
item->pid = pid;
- item->next = pevent->cmdlist;
+ item->next = tep->cmdlist;
- pevent->cmdlist = item;
- pevent->cmdline_count++;
+ tep->cmdlist = item;
+ tep->cmdline_count++;
return 0;
}
/**
* tep_register_comm - register a pid / comm mapping
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
* @comm: the command line to register
* @pid: the pid to map the command line to
*
@@ -327,14 +327,14 @@ static int _tep_register_comm(struct tep_handle *pevent,
* a given pid. The comm is duplicated. If a command with the same pid
* already exist, -1 is returned and errno is set to EEXIST
*/
-int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid)
+int tep_register_comm(struct tep_handle *tep, const char *comm, int pid)
{
- return _tep_register_comm(pevent, comm, pid, false);
+ return _tep_register_comm(tep, comm, pid, false);
}
/**
* tep_override_comm - register a pid / comm mapping
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
* @comm: the command line to register
* @pid: the pid to map the command line to
*
@@ -342,19 +342,19 @@ int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid)
* a given pid. The comm is duplicated. If a command with the same pid
* already exist, the command string is udapted with the new one
*/
-int tep_override_comm(struct tep_handle *pevent, const char *comm, int pid)
+int tep_override_comm(struct tep_handle *tep, const char *comm, int pid)
{
- if (!pevent->cmdlines && cmdline_init(pevent)) {
+ if (!tep->cmdlines && cmdline_init(tep)) {
errno = ENOMEM;
return -1;
}
- return _tep_register_comm(pevent, comm, pid, true);
+ return _tep_register_comm(tep, comm, pid, true);
}
-int tep_register_trace_clock(struct tep_handle *pevent, const char *trace_clock)
+int tep_register_trace_clock(struct tep_handle *tep, const char *trace_clock)
{
- pevent->trace_clock = strdup(trace_clock);
- if (!pevent->trace_clock) {
+ tep->trace_clock = strdup(trace_clock);
+ if (!tep->trace_clock) {
errno = ENOMEM;
return -1;
}
@@ -408,18 +408,18 @@ static int func_bcmp(const void *a, const void *b)
return 1;
}
-static int func_map_init(struct tep_handle *pevent)
+static int func_map_init(struct tep_handle *tep)
{
struct func_list *funclist;
struct func_list *item;
struct func_map *func_map;
int i;
- func_map = malloc(sizeof(*func_map) * (pevent->func_count + 1));
+ func_map = malloc(sizeof(*func_map) * (tep->func_count + 1));
if (!func_map)
return -1;
- funclist = pevent->funclist;
+ funclist = tep->funclist;
i = 0;
while (funclist) {
@@ -432,34 +432,34 @@ static int func_map_init(struct tep_handle *pevent)
free(item);
}
- qsort(func_map, pevent->func_count, sizeof(*func_map), func_cmp);
+ qsort(func_map, tep->func_count, sizeof(*func_map), func_cmp);
/*
* Add a special record at the end.
*/
- func_map[pevent->func_count].func = NULL;
- func_map[pevent->func_count].addr = 0;
- func_map[pevent->func_count].mod = NULL;
+ func_map[tep->func_count].func = NULL;
+ func_map[tep->func_count].addr = 0;
+ func_map[tep->func_count].mod = NULL;
- pevent->func_map = func_map;
- pevent->funclist = NULL;
+ tep->func_map = func_map;
+ tep->funclist = NULL;
return 0;
}
static struct func_map *
-__find_func(struct tep_handle *pevent, unsigned long long addr)
+__find_func(struct tep_handle *tep, unsigned long long addr)
{
struct func_map *func;
struct func_map key;
- if (!pevent->func_map)
- func_map_init(pevent);
+ if (!tep->func_map)
+ func_map_init(tep);
key.addr = addr;
- func = bsearch(&key, pevent->func_map, pevent->func_count,
- sizeof(*pevent->func_map), func_bcmp);
+ func = bsearch(&key, tep->func_map, tep->func_count,
+ sizeof(*tep->func_map), func_bcmp);
return func;
}
@@ -472,15 +472,14 @@ struct func_resolver {
/**
* tep_set_function_resolver - set an alternative function resolver
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
* @resolver: function to be used
* @priv: resolver function private state.
*
* Some tools may have already a way to resolve kernel functions, allow them to
- * keep using it instead of duplicating all the entries inside
- * pevent->funclist.
+ * keep using it instead of duplicating all the entries inside tep->funclist.
*/
-int tep_set_function_resolver(struct tep_handle *pevent,
+int tep_set_function_resolver(struct tep_handle *tep,
tep_func_resolver_t *func, void *priv)
{
struct func_resolver *resolver = malloc(sizeof(*resolver));
@@ -491,38 +490,38 @@ int tep_set_function_resolver(struct tep_handle *pevent,
resolver->func = func;
resolver->priv = priv;
- free(pevent->func_resolver);
- pevent->func_resolver = resolver;
+ free(tep->func_resolver);
+ tep->func_resolver = resolver;
return 0;
}
/**
* tep_reset_function_resolver - reset alternative function resolver
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
*
* Stop using whatever alternative resolver was set, use the default
* one instead.
*/
-void tep_reset_function_resolver(struct tep_handle *pevent)
+void tep_reset_function_resolver(struct tep_handle *tep)
{
- free(pevent->func_resolver);
- pevent->func_resolver = NULL;
+ free(tep->func_resolver);
+ tep->func_resolver = NULL;
}
static struct func_map *
-find_func(struct tep_handle *pevent, unsigned long long addr)
+find_func(struct tep_handle *tep, unsigned long long addr)
{
struct func_map *map;
- if (!pevent->func_resolver)
- return __find_func(pevent, addr);
+ if (!tep->func_resolver)
+ return __find_func(tep, addr);
- map = &pevent->func_resolver->map;
+ map = &tep->func_resolver->map;
map->mod = NULL;
map->addr = addr;
- map->func = pevent->func_resolver->func(pevent->func_resolver->priv,
- &map->addr, &map->mod);
+ map->func = tep->func_resolver->func(tep->func_resolver->priv,
+ &map->addr, &map->mod);
if (map->func == NULL)
return NULL;
@@ -531,18 +530,18 @@ find_func(struct tep_handle *pevent, unsigned long long addr)
/**
* tep_find_function - find a function by a given address
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
* @addr: the address to find the function with
*
* Returns a pointer to the function stored that has the given
* address. Note, the address does not have to be exact, it
* will select the function that would contain the address.
*/
-const char *tep_find_function(struct tep_handle *pevent, unsigned long long addr)
+const char *tep_find_function(struct tep_handle *tep, unsigned long long addr)
{
struct func_map *map;
- map = find_func(pevent, addr);
+ map = find_func(tep, addr);
if (!map)
return NULL;
@@ -551,7 +550,7 @@ const char *tep_find_function(struct tep_handle *pevent, unsigned long long addr
/**
* tep_find_function_address - find a function address by a given address
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
* @addr: the address to find the function with
*
* Returns the address the function starts at. This can be used in
@@ -559,11 +558,11 @@ const char *tep_find_function(struct tep_handle *pevent, unsigned long long addr
* name and the function offset.
*/
unsigned long long
-tep_find_function_address(struct tep_handle *pevent, unsigned long long addr)
+tep_find_function_address(struct tep_handle *tep, unsigned long long addr)
{
struct func_map *map;
- map = find_func(pevent, addr);
+ map = find_func(tep, addr);
if (!map)
return 0;
@@ -572,7 +571,7 @@ tep_find_function_address(struct tep_handle *pevent, unsigned long long addr)
/**
* tep_register_function - register a function with a given address
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
* @function: the function name to register
* @addr: the address the function starts at
* @mod: the kernel module the function may be in (NULL for none)
@@ -580,7 +579,7 @@ tep_find_function_address(struct tep_handle *pevent, unsigned long long addr)
* This registers a function name with an address and module.
* The @func passed in is duplicated.
*/
-int tep_register_function(struct tep_handle *pevent, char *func,
+int tep_register_function(struct tep_handle *tep, char *func,
unsigned long long addr, char *mod)
{
struct func_list *item = malloc(sizeof(*item));
@@ -588,7 +587,7 @@ int tep_register_function(struct tep_handle *pevent, char *func,
if (!item)
return -1;
- item->next = pevent->funclist;
+ item->next = tep->funclist;
item->func = strdup(func);
if (!item->func)
goto out_free;
@@ -601,8 +600,8 @@ int tep_register_function(struct tep_handle *pevent, char *func,
item->mod = NULL;
item->addr = addr;
- pevent->funclist = item;
- pevent->func_count++;
+ tep->funclist = item;
+ tep->func_count++;
return 0;
@@ -617,23 +616,23 @@ out_free:
/**
* tep_print_funcs - print out the stored functions
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
*
* This prints out the stored functions.
*/
-void tep_print_funcs(struct tep_handle *pevent)
+void tep_print_funcs(struct tep_handle *tep)
{
int i;
- if (!pevent->func_map)
- func_map_init(pevent);
+ if (!tep->func_map)
+ func_map_init(tep);
- for (i = 0; i < (int)pevent->func_count; i++) {
+ for (i = 0; i < (int)tep->func_count; i++) {
printf("%016llx %s",
- pevent->func_map[i].addr,
- pevent->func_map[i].func);
- if (pevent->func_map[i].mod)
- printf(" [%s]\n", pevent->func_map[i].mod);
+ tep->func_map[i].addr,
+ tep->func_map[i].func);
+ if (tep->func_map[i].mod)
+ printf(" [%s]\n", tep->func_map[i].mod);
else
printf("\n");
}
@@ -663,18 +662,18 @@ static int printk_cmp(const void *a, const void *b)
return 0;
}
-static int printk_map_init(struct tep_handle *pevent)
+static int printk_map_init(struct tep_handle *tep)
{
struct printk_list *printklist;
struct printk_list *item;
struct printk_map *printk_map;
int i;
- printk_map = malloc(sizeof(*printk_map) * (pevent->printk_count + 1));
+ printk_map = malloc(sizeof(*printk_map) * (tep->printk_count + 1));
if (!printk_map)
return -1;
- printklist = pevent->printklist;
+ printklist = tep->printklist;
i = 0;
while (printklist) {
@@ -686,41 +685,41 @@ static int printk_map_init(struct tep_handle *pevent)
free(item);
}
- qsort(printk_map, pevent->printk_count, sizeof(*printk_map), printk_cmp);
+ qsort(printk_map, tep->printk_count, sizeof(*printk_map), printk_cmp);
- pevent->printk_map = printk_map;
- pevent->printklist = NULL;
+ tep->printk_map = printk_map;
+ tep->printklist = NULL;
return 0;
}
static struct printk_map *
-find_printk(struct tep_handle *pevent, unsigned long long addr)
+find_printk(struct tep_handle *tep, unsigned long long addr)
{
struct printk_map *printk;
struct printk_map key;
- if (!pevent->printk_map && printk_map_init(pevent))
+ if (!tep->printk_map && printk_map_init(tep))
return NULL;
key.addr = addr;
- printk = bsearch(&key, pevent->printk_map, pevent->printk_count,
- sizeof(*pevent->printk_map), printk_cmp);
+ printk = bsearch(&key, tep->printk_map, tep->printk_count,
+ sizeof(*tep->printk_map), printk_cmp);
return printk;
}
/**
* tep_register_print_string - register a string by its address
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
* @fmt: the string format to register
* @addr: the address the string was located at
*
* This registers a string by the address it was stored in the kernel.
* The @fmt passed in is duplicated.
*/
-int tep_register_print_string(struct tep_handle *pevent, const char *fmt,
+int tep_register_print_string(struct tep_handle *tep, const char *fmt,
unsigned long long addr)
{
struct printk_list *item = malloc(sizeof(*item));
@@ -729,7 +728,7 @@ int tep_register_print_string(struct tep_handle *pevent, const char *fmt,
if (!item)
return -1;
- item->next = pevent->printklist;
+ item->next = tep->printklist;
item->addr = addr;
/* Strip off quotes and '\n' from the end */
@@ -747,8 +746,8 @@ int tep_register_print_string(struct tep_handle *pevent, const char *fmt,
if (strcmp(p, "\\n") == 0)
*p = 0;
- pevent->printklist = item;
- pevent->printk_count++;
+ tep->printklist = item;
+ tep->printk_count++;
return 0;
@@ -760,21 +759,21 @@ out_free:
/**
* tep_print_printk - print out the stored strings
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
*
* This prints the string formats that were stored.
*/
-void tep_print_printk(struct tep_handle *pevent)
+void tep_print_printk(struct tep_handle *tep)
{
int i;
- if (!pevent->printk_map)
- printk_map_init(pevent);
+ if (!tep->printk_map)
+ printk_map_init(tep);
- for (i = 0; i < (int)pevent->printk_count; i++) {
+ for (i = 0; i < (int)tep->printk_count; i++) {
printf("%016llx %s\n",
- pevent->printk_map[i].addr,
- pevent->printk_map[i].printk);
+ tep->printk_map[i].addr,
+ tep->printk_map[i].printk);
}
}
@@ -783,29 +782,29 @@ static struct tep_event *alloc_event(void)
return calloc(1, sizeof(struct tep_event));
}
-static int add_event(struct tep_handle *pevent, struct tep_event *event)
+static int add_event(struct tep_handle *tep, struct tep_event *event)
{
int i;
- struct tep_event **events = realloc(pevent->events, sizeof(event) *
- (pevent->nr_events + 1));
+ struct tep_event **events = realloc(tep->events, sizeof(event) *
+ (tep->nr_events + 1));
if (!events)
return -1;
- pevent->events = events;
+ tep->events = events;
- for (i = 0; i < pevent->nr_events; i++) {
- if (pevent->events[i]->id > event->id)
+ for (i = 0; i < tep->nr_events; i++) {
+ if (tep->events[i]->id > event->id)
break;
}
- if (i < pevent->nr_events)
- memmove(&pevent->events[i + 1],
- &pevent->events[i],
- sizeof(event) * (pevent->nr_events - i));
+ if (i < tep->nr_events)
+ memmove(&tep->events[i + 1],
+ &tep->events[i],
+ sizeof(event) * (tep->nr_events - i));
- pevent->events[i] = event;
- pevent->nr_events++;
+ tep->events[i] = event;
+ tep->nr_events++;
- event->pevent = pevent;
+ event->tep = tep;
return 0;
}
@@ -1184,7 +1183,7 @@ static enum tep_event_type read_token(char **tok)
}
/**
- * tep_read_token - access to utilities to use the pevent parser
+ * tep_read_token - access to utilities to use the tep parser
* @tok: The token to return
*
* This will parse tokens from the string given by
@@ -1657,8 +1656,8 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field **
else if (field->flags & TEP_FIELD_IS_STRING)
field->elementsize = 1;
else if (field->flags & TEP_FIELD_IS_LONG)
- field->elementsize = event->pevent ?
- event->pevent->long_size :
+ field->elementsize = event->tep ?
+ event->tep->long_size :
sizeof(long);
} else
field->elementsize = field->size;
@@ -2233,7 +2232,7 @@ eval_type_str(unsigned long long val, const char *type, int pointer)
return val & 0xffffffff;
if (strcmp(type, "u64") == 0 ||
- strcmp(type, "s64"))
+ strcmp(type, "s64") == 0)
return val;
if (strcmp(type, "s8") == 0)
@@ -2457,7 +2456,7 @@ static int arg_num_eval(struct tep_print_arg *arg, long long *val)
static char *arg_eval (struct tep_print_arg *arg)
{
long long val;
- static char buf[20];
+ static char buf[24];
switch (arg->type) {
case TEP_PRINT_ATOM:
@@ -2942,14 +2941,14 @@ process_bitmask(struct tep_event *event __maybe_unused, struct tep_print_arg *ar
}
static struct tep_function_handler *
-find_func_handler(struct tep_handle *pevent, char *func_name)
+find_func_handler(struct tep_handle *tep, char *func_name)
{
struct tep_function_handler *func;
- if (!pevent)
+ if (!tep)
return NULL;
- for (func = pevent->func_handlers; func; func = func->next) {
+ for (func = tep->func_handlers; func; func = func->next) {
if (strcmp(func->name, func_name) == 0)
break;
}
@@ -2957,12 +2956,12 @@ find_func_handler(struct tep_handle *pevent, char *func_name)
return func;
}
-static void remove_func_handler(struct tep_handle *pevent, char *func_name)
+static void remove_func_handler(struct tep_handle *tep, char *func_name)
{
struct tep_function_handler *func;
struct tep_function_handler **next;
- next = &pevent->func_handlers;
+ next = &tep->func_handlers;
while ((func = *next)) {
if (strcmp(func->name, func_name) == 0) {
*next = func->next;
@@ -3076,7 +3075,7 @@ process_function(struct tep_event *event, struct tep_print_arg *arg,
return process_dynamic_array_len(event, arg, tok);
}
- func = find_func_handler(event->pevent, token);
+ func = find_func_handler(event->tep, token);
if (func) {
free_token(token);
return process_func_handler(event, func, arg, tok);
@@ -3357,14 +3356,14 @@ tep_find_any_field(struct tep_event *event, const char *name)
/**
* tep_read_number - read a number from data
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
* @ptr: the raw data
* @size: the size of the data that holds the number
*
* Returns the number (converted to host) from the
* raw data.
*/
-unsigned long long tep_read_number(struct tep_handle *pevent,
+unsigned long long tep_read_number(struct tep_handle *tep,
const void *ptr, int size)
{
unsigned long long val;
@@ -3373,12 +3372,12 @@ unsigned long long tep_read_number(struct tep_handle *pevent,
case 1:
return *(unsigned char *)ptr;
case 2:
- return tep_data2host2(pevent, *(unsigned short *)ptr);
+ return tep_data2host2(tep, *(unsigned short *)ptr);
case 4:
- return tep_data2host4(pevent, *(unsigned int *)ptr);
+ return tep_data2host4(tep, *(unsigned int *)ptr);
case 8:
memcpy(&val, (ptr), sizeof(unsigned long long));
- return tep_data2host8(pevent, val);
+ return tep_data2host8(tep, val);
default:
/* BUG! */
return 0;
@@ -3406,7 +3405,7 @@ int tep_read_number_field(struct tep_format_field *field, const void *data,
case 2:
case 4:
case 8:
- *value = tep_read_number(field->event->pevent,
+ *value = tep_read_number(field->event->tep,
data + field->offset, field->size);
return 0;
default:
@@ -3414,7 +3413,7 @@ int tep_read_number_field(struct tep_format_field *field, const void *data,
}
}
-static int get_common_info(struct tep_handle *pevent,
+static int get_common_info(struct tep_handle *tep,
const char *type, int *offset, int *size)
{
struct tep_event *event;
@@ -3424,12 +3423,12 @@ static int get_common_info(struct tep_handle *pevent,
* All events should have the same common elements.
* Pick any event to find where the type is;
*/
- if (!pevent->events) {
+ if (!tep->events) {
do_warning("no event_list!");
return -1;
}
- event = pevent->events[0];
+ event = tep->events[0];
field = tep_find_common_field(event, type);
if (!field)
return -1;
@@ -3440,58 +3439,58 @@ static int get_common_info(struct tep_handle *pevent,
return 0;
}
-static int __parse_common(struct tep_handle *pevent, void *data,
+static int __parse_common(struct tep_handle *tep, void *data,
int *size, int *offset, const char *name)
{
int ret;
if (!*size) {
- ret = get_common_info(pevent, name, offset, size);
+ ret = get_common_info(tep, name, offset, size);
if (ret < 0)
return ret;
}
- return tep_read_number(pevent, data + *offset, *size);
+ return tep_read_number(tep, data + *offset, *size);
}
-static int trace_parse_common_type(struct tep_handle *pevent, void *data)
+static int trace_parse_common_type(struct tep_handle *tep, void *data)
{
- return __parse_common(pevent, data,
- &pevent->type_size, &pevent->type_offset,
+ return __parse_common(tep, data,
+ &tep->type_size, &tep->type_offset,
"common_type");
}
-static int parse_common_pid(struct tep_handle *pevent, void *data)
+static int parse_common_pid(struct tep_handle *tep, void *data)
{
- return __parse_common(pevent, data,
- &pevent->pid_size, &pevent->pid_offset,
+ return __parse_common(tep, data,
+ &tep->pid_size, &tep->pid_offset,
"common_pid");
}
-static int parse_common_pc(struct tep_handle *pevent, void *data)
+static int parse_common_pc(struct tep_handle *tep, void *data)
{
- return __parse_common(pevent, data,
- &pevent->pc_size, &pevent->pc_offset,
+ return __parse_common(tep, data,
+ &tep->pc_size, &tep->pc_offset,
"common_preempt_count");
}
-static int parse_common_flags(struct tep_handle *pevent, void *data)
+static int parse_common_flags(struct tep_handle *tep, void *data)
{
- return __parse_common(pevent, data,
- &pevent->flags_size, &pevent->flags_offset,
+ return __parse_common(tep, data,
+ &tep->flags_size, &tep->flags_offset,
"common_flags");
}
-static int parse_common_lock_depth(struct tep_handle *pevent, void *data)
+static int parse_common_lock_depth(struct tep_handle *tep, void *data)
{
- return __parse_common(pevent, data,
- &pevent->ld_size, &pevent->ld_offset,
+ return __parse_common(tep, data,
+ &tep->ld_size, &tep->ld_offset,
"common_lock_depth");
}
-static int parse_common_migrate_disable(struct tep_handle *pevent, void *data)
+static int parse_common_migrate_disable(struct tep_handle *tep, void *data)
{
- return __parse_common(pevent, data,
- &pevent->ld_size, &pevent->ld_offset,
+ return __parse_common(tep, data,
+ &tep->ld_size, &tep->ld_offset,
"common_migrate_disable");
}
@@ -3499,28 +3498,28 @@ static int events_id_cmp(const void *a, const void *b);
/**
* tep_find_event - find an event by given id
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
* @id: the id of the event
*
* Returns an event that has a given @id.
*/
-struct tep_event *tep_find_event(struct tep_handle *pevent, int id)
+struct tep_event *tep_find_event(struct tep_handle *tep, int id)
{
struct tep_event **eventptr;
struct tep_event key;
struct tep_event *pkey = &key;
/* Check cache first */
- if (pevent->last_event && pevent->last_event->id == id)
- return pevent->last_event;
+ if (tep->last_event && tep->last_event->id == id)
+ return tep->last_event;
key.id = id;
- eventptr = bsearch(&pkey, pevent->events, pevent->nr_events,
- sizeof(*pevent->events), events_id_cmp);
+ eventptr = bsearch(&pkey, tep->events, tep->nr_events,
+ sizeof(*tep->events), events_id_cmp);
if (eventptr) {
- pevent->last_event = *eventptr;
+ tep->last_event = *eventptr;
return *eventptr;
}
@@ -3529,7 +3528,7 @@ struct tep_event *tep_find_event(struct tep_handle *pevent, int id)
/**
* tep_find_event_by_name - find an event by given name
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
* @sys: the system name to search for
* @name: the name of the event to search for
*
@@ -3537,19 +3536,19 @@ struct tep_event *tep_find_event(struct tep_handle *pevent, int id)
* @sys. If @sys is NULL the first event with @name is returned.
*/
struct tep_event *
-tep_find_event_by_name(struct tep_handle *pevent,
+tep_find_event_by_name(struct tep_handle *tep,
const char *sys, const char *name)
{
struct tep_event *event = NULL;
int i;
- if (pevent->last_event &&
- strcmp(pevent->last_event->name, name) == 0 &&
- (!sys || strcmp(pevent->last_event->system, sys) == 0))
- return pevent->last_event;
+ if (tep->last_event &&
+ strcmp(tep->last_event->name, name) == 0 &&
+ (!sys || strcmp(tep->last_event->system, sys) == 0))
+ return tep->last_event;
- for (i = 0; i < pevent->nr_events; i++) {
- event = pevent->events[i];
+ for (i = 0; i < tep->nr_events; i++) {
+ event = tep->events[i];
if (strcmp(event->name, name) == 0) {
if (!sys)
break;
@@ -3557,17 +3556,17 @@ tep_find_event_by_name(struct tep_handle *pevent,
break;
}
}
- if (i == pevent->nr_events)
+ if (i == tep->nr_events)
event = NULL;
- pevent->last_event = event;
+ tep->last_event = event;
return event;
}
static unsigned long long
eval_num_arg(void *data, int size, struct tep_event *event, struct tep_print_arg *arg)
{
- struct tep_handle *pevent = event->pevent;
+ struct tep_handle *tep = event->tep;
unsigned long long val = 0;
unsigned long long left, right;
struct tep_print_arg *typearg = NULL;
@@ -3589,7 +3588,7 @@ eval_num_arg(void *data, int size, struct tep_event *event, struct tep_print_arg
}
/* must be a number */
- val = tep_read_number(pevent, data + arg->field.field->offset,
+ val = tep_read_number(tep, data + arg->field.field->offset,
arg->field.field->size);
break;
case TEP_PRINT_FLAGS:
@@ -3629,11 +3628,11 @@ eval_num_arg(void *data, int size, struct tep_event *event, struct tep_print_arg
}
/* Default to long size */
- field_size = pevent->long_size;
+ field_size = tep->long_size;
switch (larg->type) {
case TEP_PRINT_DYNAMIC_ARRAY:
- offset = tep_read_number(pevent,
+ offset = tep_read_number(tep,
data + larg->dynarray.field->offset,
larg->dynarray.field->size);
if (larg->dynarray.field->elementsize)
@@ -3662,7 +3661,7 @@ eval_num_arg(void *data, int size, struct tep_event *event, struct tep_print_arg
default:
goto default_op; /* oops, all bets off */
}
- val = tep_read_number(pevent,
+ val = tep_read_number(tep,
data + offset, field_size);
if (typearg)
val = eval_type(val, typearg, 1);
@@ -3763,7 +3762,7 @@ eval_num_arg(void *data, int size, struct tep_event *event, struct tep_print_arg
}
break;
case TEP_PRINT_DYNAMIC_ARRAY_LEN:
- offset = tep_read_number(pevent,
+ offset = tep_read_number(tep,
data + arg->dynarray.field->offset,
arg->dynarray.field->size);
/*
@@ -3775,7 +3774,7 @@ eval_num_arg(void *data, int size, struct tep_event *event, struct tep_print_arg
break;
case TEP_PRINT_DYNAMIC_ARRAY:
/* Without [], we pass the address to the dynamic data */
- offset = tep_read_number(pevent,
+ offset = tep_read_number(tep,
data + arg->dynarray.field->offset,
arg->dynarray.field->size);
/*
@@ -3850,7 +3849,7 @@ static void print_str_to_seq(struct trace_seq *s, const char *format,
trace_seq_printf(s, format, str);
}
-static void print_bitmask_to_seq(struct tep_handle *pevent,
+static void print_bitmask_to_seq(struct tep_handle *tep,
struct trace_seq *s, const char *format,
int len_arg, const void *data, int size)
{
@@ -3882,7 +3881,7 @@ static void print_bitmask_to_seq(struct tep_handle *pevent,
* In the kernel, this is an array of long words, thus
* endianness is very important.
*/
- if (pevent->file_bigendian)
+ if (tep->file_bigendian)
index = size - (len + 1);
else
index = len;
@@ -3908,7 +3907,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
struct tep_event *event, const char *format,
int len_arg, struct tep_print_arg *arg)
{
- struct tep_handle *pevent = event->pevent;
+ struct tep_handle *tep = event->tep;
struct tep_print_flag_sym *flag;
struct tep_format_field *field;
struct printk_map *printk;
@@ -3945,7 +3944,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
* is a pointer.
*/
if (!(field->flags & TEP_FIELD_IS_ARRAY) &&
- field->size == pevent->long_size) {
+ field->size == tep->long_size) {
/* Handle heterogeneous recording and processing
* architectures
@@ -3960,12 +3959,12 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
* on 32-bit devices:
* In this case, 64 bits must be read.
*/
- addr = (pevent->long_size == 8) ?
+ addr = (tep->long_size == 8) ?
*(unsigned long long *)(data + field->offset) :
(unsigned long long)*(unsigned int *)(data + field->offset);
/* Check if it matches a print format */
- printk = find_printk(pevent, addr);
+ printk = find_printk(tep, addr);
if (printk)
trace_seq_puts(s, printk->printk);
else
@@ -4022,7 +4021,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
case TEP_PRINT_HEX_STR:
if (arg->hex.field->type == TEP_PRINT_DYNAMIC_ARRAY) {
unsigned long offset;
- offset = tep_read_number(pevent,
+ offset = tep_read_number(tep,
data + arg->hex.field->dynarray.field->offset,
arg->hex.field->dynarray.field->size);
hex = data + (offset & 0xffff);
@@ -4053,7 +4052,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
unsigned long offset;
struct tep_format_field *field =
arg->int_array.field->dynarray.field;
- offset = tep_read_number(pevent,
+ offset = tep_read_number(tep,
data + field->offset,
field->size);
num = data + (offset & 0xffff);
@@ -4104,7 +4103,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
f = tep_find_any_field(event, arg->string.string);
arg->string.offset = f->offset;
}
- str_offset = tep_data2host4(pevent, *(unsigned int *)(data + arg->string.offset));
+ str_offset = tep_data2host4(tep, *(unsigned int *)(data + arg->string.offset));
str_offset &= 0xffff;
print_str_to_seq(s, format, len_arg, ((char *)data) + str_offset);
break;
@@ -4122,10 +4121,10 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
f = tep_find_any_field(event, arg->bitmask.bitmask);
arg->bitmask.offset = f->offset;
}
- bitmask_offset = tep_data2host4(pevent, *(unsigned int *)(data + arg->bitmask.offset));
+ bitmask_offset = tep_data2host4(tep, *(unsigned int *)(data + arg->bitmask.offset));
bitmask_size = bitmask_offset >> 16;
bitmask_offset &= 0xffff;
- print_bitmask_to_seq(pevent, s, format, len_arg,
+ print_bitmask_to_seq(tep, s, format, len_arg,
data + bitmask_offset, bitmask_size);
break;
}
@@ -4257,7 +4256,7 @@ static void free_args(struct tep_print_arg *args)
static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, struct tep_event *event)
{
- struct tep_handle *pevent = event->pevent;
+ struct tep_handle *tep = event->tep;
struct tep_format_field *field, *ip_field;
struct tep_print_arg *args, *arg, **next;
unsigned long long ip, val;
@@ -4265,8 +4264,8 @@ static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, s
void *bptr;
int vsize = 0;
- field = pevent->bprint_buf_field;
- ip_field = pevent->bprint_ip_field;
+ field = tep->bprint_buf_field;
+ ip_field = tep->bprint_ip_field;
if (!field) {
field = tep_find_field(event, "buf");
@@ -4279,11 +4278,11 @@ static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, s
do_warning_event(event, "can't find ip field for binary printk");
return NULL;
}
- pevent->bprint_buf_field = field;
- pevent->bprint_ip_field = ip_field;
+ tep->bprint_buf_field = field;
+ tep->bprint_ip_field = ip_field;
}
- ip = tep_read_number(pevent, data + ip_field->offset, ip_field->size);
+ ip = tep_read_number(tep, data + ip_field->offset, ip_field->size);
/*
* The first arg is the IP pointer.
@@ -4338,6 +4337,7 @@ static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, s
case 'S':
case 'f':
case 'F':
+ case 'x':
break;
default:
/*
@@ -4360,7 +4360,7 @@ static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, s
vsize = 4;
break;
case 1:
- vsize = pevent->long_size;
+ vsize = tep->long_size;
break;
case 2:
vsize = 8;
@@ -4377,7 +4377,7 @@ static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, s
/* the pointers are always 4 bytes aligned */
bptr = (void *)(((unsigned long)bptr + 3) &
~3);
- val = tep_read_number(pevent, bptr, vsize);
+ val = tep_read_number(tep, bptr, vsize);
bptr += vsize;
arg = alloc_arg();
if (!arg) {
@@ -4434,13 +4434,13 @@ static char *
get_bprint_format(void *data, int size __maybe_unused,
struct tep_event *event)
{
- struct tep_handle *pevent = event->pevent;
+ struct tep_handle *tep = event->tep;
unsigned long long addr;
struct tep_format_field *field;
struct printk_map *printk;
char *format;
- field = pevent->bprint_fmt_field;
+ field = tep->bprint_fmt_field;
if (!field) {
field = tep_find_field(event, "fmt");
@@ -4448,12 +4448,12 @@ get_bprint_format(void *data, int size __maybe_unused,
do_warning_event(event, "can't find format field for binary printk");
return NULL;
}
- pevent->bprint_fmt_field = field;
+ tep->bprint_fmt_field = field;
}
- addr = tep_read_number(pevent, data + field->offset, field->size);
+ addr = tep_read_number(tep, data + field->offset, field->size);
- printk = find_printk(pevent, addr);
+ printk = find_printk(tep, addr);
if (!printk) {
if (asprintf(&format, "%%pf: (NO FORMAT FOUND at %llx)\n", addr) < 0)
return NULL;
@@ -4835,13 +4835,13 @@ void tep_print_field(struct trace_seq *s, void *data,
{
unsigned long long val;
unsigned int offset, len, i;
- struct tep_handle *pevent = field->event->pevent;
+ struct tep_handle *tep = field->event->tep;
if (field->flags & TEP_FIELD_IS_ARRAY) {
offset = field->offset;
len = field->size;
if (field->flags & TEP_FIELD_IS_DYNAMIC) {
- val = tep_read_number(pevent, data + offset, len);
+ val = tep_read_number(tep, data + offset, len);
offset = val;
len = offset >> 16;
offset &= 0xffff;
@@ -4861,7 +4861,7 @@ void tep_print_field(struct trace_seq *s, void *data,
field->flags &= ~TEP_FIELD_IS_STRING;
}
} else {
- val = tep_read_number(pevent, data + field->offset,
+ val = tep_read_number(tep, data + field->offset,
field->size);
if (field->flags & TEP_FIELD_IS_POINTER) {
trace_seq_printf(s, "0x%llx", val);
@@ -4910,7 +4910,7 @@ void tep_print_fields(struct trace_seq *s, void *data,
static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_event *event)
{
- struct tep_handle *pevent = event->pevent;
+ struct tep_handle *tep = event->tep;
struct tep_print_fmt *print_fmt = &event->print_fmt;
struct tep_print_arg *arg = print_fmt->args;
struct tep_print_arg *args = NULL;
@@ -5002,7 +5002,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_e
case '-':
goto cont_process;
case 'p':
- if (pevent->long_size == 4)
+ if (tep->long_size == 4)
ls = 1;
else
ls = 2;
@@ -5063,7 +5063,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_e
arg = arg->next;
if (show_func) {
- func = find_func(pevent, val);
+ func = find_func(tep, val);
if (func) {
trace_seq_puts(s, func->func);
if (show_func == 'F')
@@ -5073,7 +5073,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_e
break;
}
}
- if (pevent->long_size == 8 && ls == 1 &&
+ if (tep->long_size == 8 && ls == 1 &&
sizeof(long) != 8) {
char *p;
@@ -5171,8 +5171,8 @@ out_failed:
}
/**
- * tep_data_lat_fmt - parse the data for the latency format
- * @pevent: a handle to the pevent
+ * tep_data_latency_format - parse the data for the latency format
+ * @tep: a handle to the trace event parser context
* @s: the trace_seq to write to
* @record: the record to read from
*
@@ -5180,8 +5180,8 @@ out_failed:
* need rescheduling, in hard/soft interrupt, preempt count
* and lock depth) and places it into the trace_seq.
*/
-void tep_data_lat_fmt(struct tep_handle *pevent,
- struct trace_seq *s, struct tep_record *record)
+void tep_data_latency_format(struct tep_handle *tep,
+ struct trace_seq *s, struct tep_record *record)
{
static int check_lock_depth = 1;
static int check_migrate_disable = 1;
@@ -5195,13 +5195,13 @@ void tep_data_lat_fmt(struct tep_handle *pevent,
int softirq;
void *data = record->data;
- lat_flags = parse_common_flags(pevent, data);
- pc = parse_common_pc(pevent, data);
+ lat_flags = parse_common_flags(tep, data);
+ pc = parse_common_pc(tep, data);
/* lock_depth may not always exist */
if (lock_depth_exists)
- lock_depth = parse_common_lock_depth(pevent, data);
+ lock_depth = parse_common_lock_depth(tep, data);
else if (check_lock_depth) {
- lock_depth = parse_common_lock_depth(pevent, data);
+ lock_depth = parse_common_lock_depth(tep, data);
if (lock_depth < 0)
check_lock_depth = 0;
else
@@ -5210,9 +5210,9 @@ void tep_data_lat_fmt(struct tep_handle *pevent,
/* migrate_disable may not always exist */
if (migrate_disable_exists)
- migrate_disable = parse_common_migrate_disable(pevent, data);
+ migrate_disable = parse_common_migrate_disable(tep, data);
else if (check_migrate_disable) {
- migrate_disable = parse_common_migrate_disable(pevent, data);
+ migrate_disable = parse_common_migrate_disable(tep, data);
if (migrate_disable < 0)
check_migrate_disable = 0;
else
@@ -5255,79 +5255,79 @@ void tep_data_lat_fmt(struct tep_handle *pevent,
/**
* tep_data_type - parse out the given event type
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
* @rec: the record to read from
*
* This returns the event id from the @rec.
*/
-int tep_data_type(struct tep_handle *pevent, struct tep_record *rec)
+int tep_data_type(struct tep_handle *tep, struct tep_record *rec)
{
- return trace_parse_common_type(pevent, rec->data);
+ return trace_parse_common_type(tep, rec->data);
}
/**
* tep_data_pid - parse the PID from record
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
* @rec: the record to parse
*
* This returns the PID from a record.
*/
-int tep_data_pid(struct tep_handle *pevent, struct tep_record *rec)
+int tep_data_pid(struct tep_handle *tep, struct tep_record *rec)
{
- return parse_common_pid(pevent, rec->data);
+ return parse_common_pid(tep, rec->data);
}
/**
* tep_data_preempt_count - parse the preempt count from the record
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
* @rec: the record to parse
*
* This returns the preempt count from a record.
*/
-int tep_data_preempt_count(struct tep_handle *pevent, struct tep_record *rec)
+int tep_data_preempt_count(struct tep_handle *tep, struct tep_record *rec)
{
- return parse_common_pc(pevent, rec->data);
+ return parse_common_pc(tep, rec->data);
}
/**
* tep_data_flags - parse the latency flags from the record
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
* @rec: the record to parse
*
* This returns the latency flags from a record.
*
* Use trace_flag_type enum for the flags (see event-parse.h).
*/
-int tep_data_flags(struct tep_handle *pevent, struct tep_record *rec)
+int tep_data_flags(struct tep_handle *tep, struct tep_record *rec)
{
- return parse_common_flags(pevent, rec->data);
+ return parse_common_flags(tep, rec->data);
}
/**
* tep_data_comm_from_pid - return the command line from PID
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
* @pid: the PID of the task to search for
*
* This returns a pointer to the command line that has the given
* @pid.
*/
-const char *tep_data_comm_from_pid(struct tep_handle *pevent, int pid)
+const char *tep_data_comm_from_pid(struct tep_handle *tep, int pid)
{
const char *comm;
- comm = find_cmdline(pevent, pid);
+ comm = find_cmdline(tep, pid);
return comm;
}
static struct tep_cmdline *
-pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct tep_cmdline *next)
+pid_from_cmdlist(struct tep_handle *tep, const char *comm, struct tep_cmdline *next)
{
struct cmdline_list *cmdlist = (struct cmdline_list *)next;
if (cmdlist)
cmdlist = cmdlist->next;
else
- cmdlist = pevent->cmdlist;
+ cmdlist = tep->cmdlist;
while (cmdlist && strcmp(cmdlist->comm, comm) != 0)
cmdlist = cmdlist->next;
@@ -5337,7 +5337,7 @@ pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct tep_cmdline
/**
* tep_data_pid_from_comm - return the pid from a given comm
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
* @comm: the cmdline to find the pid from
* @next: the cmdline structure to find the next comm
*
@@ -5348,7 +5348,7 @@ pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct tep_cmdline
* next pid.
* Also, it does a linear search, so it may be slow.
*/
-struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm,
+struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *tep, const char *comm,
struct tep_cmdline *next)
{
struct tep_cmdline *cmdline;
@@ -5357,25 +5357,25 @@ struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char
* If the cmdlines have not been converted yet, then use
* the list.
*/
- if (!pevent->cmdlines)
- return pid_from_cmdlist(pevent, comm, next);
+ if (!tep->cmdlines)
+ return pid_from_cmdlist(tep, comm, next);
if (next) {
/*
* The next pointer could have been still from
* a previous call before cmdlines were created
*/
- if (next < pevent->cmdlines ||
- next >= pevent->cmdlines + pevent->cmdline_count)
+ if (next < tep->cmdlines ||
+ next >= tep->cmdlines + tep->cmdline_count)
next = NULL;
else
cmdline = next++;
}
if (!next)
- cmdline = pevent->cmdlines;
+ cmdline = tep->cmdlines;
- while (cmdline < pevent->cmdlines + pevent->cmdline_count) {
+ while (cmdline < tep->cmdlines + tep->cmdline_count) {
if (strcmp(cmdline->comm, comm) == 0)
return cmdline;
cmdline++;
@@ -5385,12 +5385,13 @@ struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char
/**
* tep_cmdline_pid - return the pid associated to a given cmdline
+ * @tep: a handle to the trace event parser context
* @cmdline: The cmdline structure to get the pid from
*
* Returns the pid for a give cmdline. If @cmdline is NULL, then
* -1 is returned.
*/
-int tep_cmdline_pid(struct tep_handle *pevent, struct tep_cmdline *cmdline)
+int tep_cmdline_pid(struct tep_handle *tep, struct tep_cmdline *cmdline)
{
struct cmdline_list *cmdlist = (struct cmdline_list *)cmdline;
@@ -5401,9 +5402,9 @@ int tep_cmdline_pid(struct tep_handle *pevent, struct tep_cmdline *cmdline)
* If cmdlines have not been created yet, or cmdline is
* not part of the array, then treat it as a cmdlist instead.
*/
- if (!pevent->cmdlines ||
- cmdline < pevent->cmdlines ||
- cmdline >= pevent->cmdlines + pevent->cmdline_count)
+ if (!tep->cmdlines ||
+ cmdline < tep->cmdlines ||
+ cmdline >= tep->cmdlines + tep->cmdline_count)
return cmdlist->pid;
return cmdline->pid;
@@ -5423,7 +5424,7 @@ void tep_event_info(struct trace_seq *s, struct tep_event *event,
{
int print_pretty = 1;
- if (event->pevent->print_raw || (event->flags & TEP_EVENT_FL_PRINTRAW))
+ if (event->tep->print_raw || (event->flags & TEP_EVENT_FL_PRINTRAW))
tep_print_fields(s, record->data, record->size, event);
else {
@@ -5444,7 +5445,8 @@ static bool is_timestamp_in_us(char *trace_clock, bool use_trace_clock)
return true;
if (!strcmp(trace_clock, "local") || !strcmp(trace_clock, "global")
- || !strcmp(trace_clock, "uptime") || !strcmp(trace_clock, "perf"))
+ || !strcmp(trace_clock, "uptime") || !strcmp(trace_clock, "perf")
+ || !strncmp(trace_clock, "mono", 4))
return true;
/* trace_clock is setting in tsc or counter mode */
@@ -5453,14 +5455,14 @@ static bool is_timestamp_in_us(char *trace_clock, bool use_trace_clock)
/**
* tep_find_event_by_record - return the event from a given record
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
* @record: The record to get the event from
*
* Returns the associated event for a given record, or NULL if non is
* is found.
*/
struct tep_event *
-tep_find_event_by_record(struct tep_handle *pevent, struct tep_record *record)
+tep_find_event_by_record(struct tep_handle *tep, struct tep_record *record)
{
int type;
@@ -5469,21 +5471,21 @@ tep_find_event_by_record(struct tep_handle *pevent, struct tep_record *record)
return NULL;
}
- type = trace_parse_common_type(pevent, record->data);
+ type = trace_parse_common_type(tep, record->data);
- return tep_find_event(pevent, type);
+ return tep_find_event(tep, type);
}
/**
* tep_print_event_task - Write the event task comm, pid and CPU
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
* @s: the trace_seq to write to
* @event: the handle to the record's event
* @record: The record to get the event from
*
* Writes the tasks comm, pid and CPU to @s.
*/
-void tep_print_event_task(struct tep_handle *pevent, struct trace_seq *s,
+void tep_print_event_task(struct tep_handle *tep, struct trace_seq *s,
struct tep_event *event,
struct tep_record *record)
{
@@ -5491,27 +5493,26 @@ void tep_print_event_task(struct tep_handle *pevent, struct trace_seq *s,
const char *comm;
int pid;
- pid = parse_common_pid(pevent, data);
- comm = find_cmdline(pevent, pid);
+ pid = parse_common_pid(tep, data);
+ comm = find_cmdline(tep, pid);
- if (pevent->latency_format) {
- trace_seq_printf(s, "%8.8s-%-5d %3d",
- comm, pid, record->cpu);
- } else
+ if (tep->latency_format)
+ trace_seq_printf(s, "%8.8s-%-5d %3d", comm, pid, record->cpu);
+ else
trace_seq_printf(s, "%16s-%-5d [%03d]", comm, pid, record->cpu);
}
/**
* tep_print_event_time - Write the event timestamp
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
* @s: the trace_seq to write to
* @event: the handle to the record's event
* @record: The record to get the event from
- * @use_trace_clock: Set to parse according to the @pevent->trace_clock
+ * @use_trace_clock: Set to parse according to the @tep->trace_clock
*
* Writes the timestamp of the record into @s.
*/
-void tep_print_event_time(struct tep_handle *pevent, struct trace_seq *s,
+void tep_print_event_time(struct tep_handle *tep, struct trace_seq *s,
struct tep_event *event,
struct tep_record *record,
bool use_trace_clock)
@@ -5522,19 +5523,18 @@ void tep_print_event_time(struct tep_handle *pevent, struct trace_seq *s,
int p;
bool use_usec_format;
- use_usec_format = is_timestamp_in_us(pevent->trace_clock,
- use_trace_clock);
+ use_usec_format = is_timestamp_in_us(tep->trace_clock, use_trace_clock);
if (use_usec_format) {
secs = record->ts / NSEC_PER_SEC;
nsecs = record->ts - secs * NSEC_PER_SEC;
}
- if (pevent->latency_format) {
- tep_data_lat_fmt(pevent, s, record);
+ if (tep->latency_format) {
+ tep_data_latency_format(tep, s, record);
}
if (use_usec_format) {
- if (pevent->flags & TEP_NSEC_OUTPUT) {
+ if (tep->flags & TEP_NSEC_OUTPUT) {
usecs = nsecs;
p = 9;
} else {
@@ -5554,14 +5554,14 @@ void tep_print_event_time(struct tep_handle *pevent, struct trace_seq *s,
/**
* tep_print_event_data - Write the event data section
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
* @s: the trace_seq to write to
* @event: the handle to the record's event
* @record: The record to get the event from
*
* Writes the parsing of the record's data to @s.
*/
-void tep_print_event_data(struct tep_handle *pevent, struct trace_seq *s,
+void tep_print_event_data(struct tep_handle *tep, struct trace_seq *s,
struct tep_event *event,
struct tep_record *record)
{
@@ -5578,15 +5578,15 @@ void tep_print_event_data(struct tep_handle *pevent, struct trace_seq *s,
tep_event_info(s, event, record);
}
-void tep_print_event(struct tep_handle *pevent, struct trace_seq *s,
+void tep_print_event(struct tep_handle *tep, struct trace_seq *s,
struct tep_record *record, bool use_trace_clock)
{
struct tep_event *event;
- event = tep_find_event_by_record(pevent, record);
+ event = tep_find_event_by_record(tep, record);
if (!event) {
int i;
- int type = trace_parse_common_type(pevent, record->data);
+ int type = trace_parse_common_type(tep, record->data);
do_warning("ug! no event found for type %d", type);
trace_seq_printf(s, "[UNKNOWN TYPE %d]", type);
@@ -5596,9 +5596,9 @@ void tep_print_event(struct tep_handle *pevent, struct trace_seq *s,
return;
}
- tep_print_event_task(pevent, s, event, record);
- tep_print_event_time(pevent, s, event, record, use_trace_clock);
- tep_print_event_data(pevent, s, event, record);
+ tep_print_event_task(tep, s, event, record);
+ tep_print_event_time(tep, s, event, record, use_trace_clock);
+ tep_print_event_data(tep, s, event, record);
}
static int events_id_cmp(const void *a, const void *b)
@@ -5649,32 +5649,26 @@ static int events_system_cmp(const void *a, const void *b)
return events_id_cmp(a, b);
}
-struct tep_event **tep_list_events(struct tep_handle *pevent, enum tep_event_sort_type sort_type)
+static struct tep_event **list_events_copy(struct tep_handle *tep)
{
struct tep_event **events;
- int (*sort)(const void *a, const void *b);
-
- events = pevent->sort_events;
-
- if (events && pevent->last_type == sort_type)
- return events;
- if (!events) {
- events = malloc(sizeof(*events) * (pevent->nr_events + 1));
- if (!events)
- return NULL;
+ if (!tep)
+ return NULL;
- memcpy(events, pevent->events, sizeof(*events) * pevent->nr_events);
- events[pevent->nr_events] = NULL;
+ events = malloc(sizeof(*events) * (tep->nr_events + 1));
+ if (!events)
+ return NULL;
- pevent->sort_events = events;
+ memcpy(events, tep->events, sizeof(*events) * tep->nr_events);
+ events[tep->nr_events] = NULL;
+ return events;
+}
- /* the internal events are sorted by id */
- if (sort_type == TEP_EVENT_SORT_ID) {
- pevent->last_type = sort_type;
- return events;
- }
- }
+static void list_events_sort(struct tep_event **events, int nr_events,
+ enum tep_event_sort_type sort_type)
+{
+ int (*sort)(const void *a, const void *b);
switch (sort_type) {
case TEP_EVENT_SORT_ID:
@@ -5687,11 +5681,82 @@ struct tep_event **tep_list_events(struct tep_handle *pevent, enum tep_event_sor
sort = events_system_cmp;
break;
default:
+ sort = NULL;
+ }
+
+ if (sort)
+ qsort(events, nr_events, sizeof(*events), sort);
+}
+
+/**
+ * tep_list_events - Get events, sorted by given criteria.
+ * @tep: a handle to the tep context
+ * @sort_type: desired sort order of the events in the array
+ *
+ * Returns an array of pointers to all events, sorted by the given
+ * @sort_type criteria. The last element of the array is NULL. The returned
+ * memory must not be freed, it is managed by the library.
+ * The function is not thread safe.
+ */
+struct tep_event **tep_list_events(struct tep_handle *tep,
+ enum tep_event_sort_type sort_type)
+{
+ struct tep_event **events;
+
+ if (!tep)
+ return NULL;
+
+ events = tep->sort_events;
+ if (events && tep->last_type == sort_type)
return events;
+
+ if (!events) {
+ events = list_events_copy(tep);
+ if (!events)
+ return NULL;
+
+ tep->sort_events = events;
+
+ /* the internal events are sorted by id */
+ if (sort_type == TEP_EVENT_SORT_ID) {
+ tep->last_type = sort_type;
+ return events;
+ }
}
- qsort(events, pevent->nr_events, sizeof(*events), sort);
- pevent->last_type = sort_type;
+ list_events_sort(events, tep->nr_events, sort_type);
+ tep->last_type = sort_type;
+
+ return events;
+}
+
+
+/**
+ * tep_list_events_copy - Thread safe version of tep_list_events()
+ * @tep: a handle to the tep context
+ * @sort_type: desired sort order of the events in the array
+ *
+ * Returns an array of pointers to all events, sorted by the given
+ * @sort_type criteria. The last element of the array is NULL. The returned
+ * array is newly allocated inside the function and must be freed by the caller
+ */
+struct tep_event **tep_list_events_copy(struct tep_handle *tep,
+ enum tep_event_sort_type sort_type)
+{
+ struct tep_event **events;
+
+ if (!tep)
+ return NULL;
+
+ events = list_events_copy(tep);
+ if (!events)
+ return NULL;
+
+ /* the internal events are sorted by id */
+ if (sort_type == TEP_EVENT_SORT_ID)
+ return events;
+
+ list_events_sort(events, tep->nr_events, sort_type);
return events;
}
@@ -5950,7 +6015,7 @@ static void parse_header_field(const char *field,
/**
* tep_parse_header_page - parse the data stored in the header page
- * @pevent: the handle to the pevent
+ * @tep: a handle to the trace event parser context
* @buf: the buffer storing the header page format string
* @size: the size of @buf
* @long_size: the long size to use if there is no header
@@ -5960,7 +6025,7 @@ static void parse_header_field(const char *field,
*
* /sys/kernel/debug/tracing/events/header_page
*/
-int tep_parse_header_page(struct tep_handle *pevent, char *buf, unsigned long size,
+int tep_parse_header_page(struct tep_handle *tep, char *buf, unsigned long size,
int long_size)
{
int ignore;
@@ -5970,22 +6035,22 @@ int tep_parse_header_page(struct tep_handle *pevent, char *buf, unsigned long si
* Old kernels did not have header page info.
* Sorry but we just use what we find here in user space.
*/
- pevent->header_page_ts_size = sizeof(long long);
- pevent->header_page_size_size = long_size;
- pevent->header_page_data_offset = sizeof(long long) + long_size;
- pevent->old_format = 1;
+ tep->header_page_ts_size = sizeof(long long);
+ tep->header_page_size_size = long_size;
+ tep->header_page_data_offset = sizeof(long long) + long_size;
+ tep->old_format = 1;
return -1;
}
init_input_buf(buf, size);
- parse_header_field("timestamp", &pevent->header_page_ts_offset,
- &pevent->header_page_ts_size, 1);
- parse_header_field("commit", &pevent->header_page_size_offset,
- &pevent->header_page_size_size, 1);
- parse_header_field("overwrite", &pevent->header_page_overwrite,
+ parse_header_field("timestamp", &tep->header_page_ts_offset,
+ &tep->header_page_ts_size, 1);
+ parse_header_field("commit", &tep->header_page_size_offset,
+ &tep->header_page_size_size, 1);
+ parse_header_field("overwrite", &tep->header_page_overwrite,
&ignore, 0);
- parse_header_field("data", &pevent->header_page_data_offset,
- &pevent->header_page_data_size, 1);
+ parse_header_field("data", &tep->header_page_data_offset,
+ &tep->header_page_data_size, 1);
return 0;
}
@@ -6013,11 +6078,11 @@ static void free_handler(struct event_handler *handle)
free(handle);
}
-static int find_event_handle(struct tep_handle *pevent, struct tep_event *event)
+static int find_event_handle(struct tep_handle *tep, struct tep_event *event)
{
struct event_handler *handle, **next;
- for (next = &pevent->handlers; *next;
+ for (next = &tep->handlers; *next;
next = &(*next)->next) {
handle = *next;
if (event_matches(event, handle->id,
@@ -6055,7 +6120,7 @@ static int find_event_handle(struct tep_handle *pevent, struct tep_event *event)
* /sys/kernel/debug/tracing/events/.../.../format
*/
enum tep_errno __tep_parse_format(struct tep_event **eventp,
- struct tep_handle *pevent, const char *buf,
+ struct tep_handle *tep, const char *buf,
unsigned long size, const char *sys)
{
struct tep_event *event;
@@ -6097,8 +6162,8 @@ enum tep_errno __tep_parse_format(struct tep_event **eventp,
goto event_alloc_failed;
}
- /* Add pevent to event so that it can be referenced */
- event->pevent = pevent;
+ /* Add tep to event so that it can be referenced */
+ event->tep = tep;
ret = event_read_format(event);
if (ret < 0) {
@@ -6110,7 +6175,7 @@ enum tep_errno __tep_parse_format(struct tep_event **eventp,
* If the event has an override, don't print warnings if the event
* print format fails to parse.
*/
- if (pevent && find_event_handle(pevent, event))
+ if (tep && find_event_handle(tep, event))
show_warning = 0;
ret = event_read_print(event);
@@ -6162,18 +6227,18 @@ enum tep_errno __tep_parse_format(struct tep_event **eventp,
}
static enum tep_errno
-__parse_event(struct tep_handle *pevent,
+__parse_event(struct tep_handle *tep,
struct tep_event **eventp,
const char *buf, unsigned long size,
const char *sys)
{
- int ret = __tep_parse_format(eventp, pevent, buf, size, sys);
+ int ret = __tep_parse_format(eventp, tep, buf, size, sys);
struct tep_event *event = *eventp;
if (event == NULL)
return ret;
- if (pevent && add_event(pevent, event)) {
+ if (tep && add_event(tep, event)) {
ret = TEP_ERRNO__MEM_ALLOC_FAILED;
goto event_add_failed;
}
@@ -6191,7 +6256,7 @@ event_add_failed:
/**
* tep_parse_format - parse the event format
- * @pevent: the handle to the pevent
+ * @tep: a handle to the trace event parser context
* @eventp: returned format
* @buf: the buffer storing the event format string
* @size: the size of @buf
@@ -6204,17 +6269,17 @@ event_add_failed:
*
* /sys/kernel/debug/tracing/events/.../.../format
*/
-enum tep_errno tep_parse_format(struct tep_handle *pevent,
+enum tep_errno tep_parse_format(struct tep_handle *tep,
struct tep_event **eventp,
const char *buf,
unsigned long size, const char *sys)
{
- return __parse_event(pevent, eventp, buf, size, sys);
+ return __parse_event(tep, eventp, buf, size, sys);
}
/**
* tep_parse_event - parse the event format
- * @pevent: the handle to the pevent
+ * @tep: a handle to the trace event parser context
* @buf: the buffer storing the event format string
* @size: the size of @buf
* @sys: the system the event belongs to
@@ -6226,11 +6291,11 @@ enum tep_errno tep_parse_format(struct tep_handle *pevent,
*
* /sys/kernel/debug/tracing/events/.../.../format
*/
-enum tep_errno tep_parse_event(struct tep_handle *pevent, const char *buf,
+enum tep_errno tep_parse_event(struct tep_handle *tep, const char *buf,
unsigned long size, const char *sys)
{
struct tep_event *event = NULL;
- return __parse_event(pevent, &event, buf, size, sys);
+ return __parse_event(tep, &event, buf, size, sys);
}
int get_field_val(struct trace_seq *s, struct tep_format_field *field,
@@ -6292,8 +6357,8 @@ void *tep_get_field_raw(struct trace_seq *s, struct tep_event *event,
offset = field->offset;
if (field->flags & TEP_FIELD_IS_DYNAMIC) {
- offset = tep_read_number(event->pevent,
- data + offset, field->size);
+ offset = tep_read_number(event->tep,
+ data + offset, field->size);
*len = offset >> 16;
offset &= 0xffff;
} else
@@ -6386,7 +6451,8 @@ int tep_get_any_field_val(struct trace_seq *s, struct tep_event *event,
* @record: The record with the field name.
* @err: print default error if failed.
*
- * Returns: 0 on success, -1 field not found, or 1 if buffer is full.
+ * Returns positive value on success, negative in case of an error,
+ * or 0 if buffer is full.
*/
int tep_print_num_field(struct trace_seq *s, const char *fmt,
struct tep_event *event, const char *name,
@@ -6418,14 +6484,15 @@ int tep_print_num_field(struct trace_seq *s, const char *fmt,
* @record: The record with the field name.
* @err: print default error if failed.
*
- * Returns: 0 on success, -1 field not found, or 1 if buffer is full.
+ * Returns positive value on success, negative in case of an error,
+ * or 0 if buffer is full.
*/
int tep_print_func_field(struct trace_seq *s, const char *fmt,
struct tep_event *event, const char *name,
struct tep_record *record, int err)
{
struct tep_format_field *field = tep_find_field(event, name);
- struct tep_handle *pevent = event->pevent;
+ struct tep_handle *tep = event->tep;
unsigned long long val;
struct func_map *func;
char tmp[128];
@@ -6436,7 +6503,7 @@ int tep_print_func_field(struct trace_seq *s, const char *fmt,
if (tep_read_number_field(field, record->data, &val))
goto failed;
- func = find_func(pevent, val);
+ func = find_func(tep, val);
if (func)
snprintf(tmp, 128, "%s/0x%llx", func->func, func->addr - val);
@@ -6468,7 +6535,7 @@ static void free_func_handle(struct tep_function_handler *func)
/**
* tep_register_print_function - register a helper function
- * @pevent: the handle to the pevent
+ * @tep: a handle to the trace event parser context
* @func: the function to process the helper function
* @ret_type: the return type of the helper function
* @name: the name of the helper function
@@ -6481,7 +6548,7 @@ static void free_func_handle(struct tep_function_handler *func)
* The @parameters is a variable list of tep_func_arg_type enums that
* must end with TEP_FUNC_ARG_VOID.
*/
-int tep_register_print_function(struct tep_handle *pevent,
+int tep_register_print_function(struct tep_handle *tep,
tep_func_handler func,
enum tep_func_arg_type ret_type,
char *name, ...)
@@ -6493,7 +6560,7 @@ int tep_register_print_function(struct tep_handle *pevent,
va_list ap;
int ret;
- func_handle = find_func_handler(pevent, name);
+ func_handle = find_func_handler(tep, name);
if (func_handle) {
/*
* This is most like caused by the users own
@@ -6501,7 +6568,7 @@ int tep_register_print_function(struct tep_handle *pevent,
* system defaults.
*/
pr_stat("override of function helper '%s'", name);
- remove_func_handler(pevent, name);
+ remove_func_handler(tep, name);
}
func_handle = calloc(1, sizeof(*func_handle));
@@ -6548,8 +6615,8 @@ int tep_register_print_function(struct tep_handle *pevent,
}
va_end(ap);
- func_handle->next = pevent->func_handlers;
- pevent->func_handlers = func_handle;
+ func_handle->next = tep->func_handlers;
+ tep->func_handlers = func_handle;
return 0;
out_free:
@@ -6560,7 +6627,7 @@ int tep_register_print_function(struct tep_handle *pevent,
/**
* tep_unregister_print_function - unregister a helper function
- * @pevent: the handle to the pevent
+ * @tep: a handle to the trace event parser context
* @func: the function to process the helper function
* @name: the name of the helper function
*
@@ -6568,20 +6635,20 @@ int tep_register_print_function(struct tep_handle *pevent,
*
* Returns 0 if the handler was removed successully, -1 otherwise.
*/
-int tep_unregister_print_function(struct tep_handle *pevent,
+int tep_unregister_print_function(struct tep_handle *tep,
tep_func_handler func, char *name)
{
struct tep_function_handler *func_handle;
- func_handle = find_func_handler(pevent, name);
+ func_handle = find_func_handler(tep, name);
if (func_handle && func_handle->func == func) {
- remove_func_handler(pevent, name);
+ remove_func_handler(tep, name);
return 0;
}
return -1;
}
-static struct tep_event *search_event(struct tep_handle *pevent, int id,
+static struct tep_event *search_event(struct tep_handle *tep, int id,
const char *sys_name,
const char *event_name)
{
@@ -6589,7 +6656,7 @@ static struct tep_event *search_event(struct tep_handle *pevent, int id,
if (id >= 0) {
/* search by id */
- event = tep_find_event(pevent, id);
+ event = tep_find_event(tep, id);
if (!event)
return NULL;
if (event_name && (strcmp(event_name, event->name) != 0))
@@ -6597,7 +6664,7 @@ static struct tep_event *search_event(struct tep_handle *pevent, int id,
if (sys_name && (strcmp(sys_name, event->system) != 0))
return NULL;
} else {
- event = tep_find_event_by_name(pevent, sys_name, event_name);
+ event = tep_find_event_by_name(tep, sys_name, event_name);
if (!event)
return NULL;
}
@@ -6606,7 +6673,7 @@ static struct tep_event *search_event(struct tep_handle *pevent, int id,
/**
* tep_register_event_handler - register a way to parse an event
- * @pevent: the handle to the pevent
+ * @tep: a handle to the trace event parser context
* @id: the id of the event to register
* @sys_name: the system name the event belongs to
* @event_name: the name of the event
@@ -6627,14 +6694,14 @@ static struct tep_event *search_event(struct tep_handle *pevent, int id,
* negative TEP_ERRNO_... in case of an error
*
*/
-int tep_register_event_handler(struct tep_handle *pevent, int id,
+int tep_register_event_handler(struct tep_handle *tep, int id,
const char *sys_name, const char *event_name,
tep_event_handler_func func, void *context)
{
struct tep_event *event;
struct event_handler *handle;
- event = search_event(pevent, id, sys_name, event_name);
+ event = search_event(tep, id, sys_name, event_name);
if (event == NULL)
goto not_found;
@@ -6669,8 +6736,8 @@ int tep_register_event_handler(struct tep_handle *pevent, int id,
}
handle->func = func;
- handle->next = pevent->handlers;
- pevent->handlers = handle;
+ handle->next = tep->handlers;
+ tep->handlers = handle;
handle->context = context;
return TEP_REGISTER_SUCCESS;
@@ -6697,7 +6764,7 @@ static int handle_matches(struct event_handler *handler, int id,
/**
* tep_unregister_event_handler - unregister an existing event handler
- * @pevent: the handle to the pevent
+ * @tep: a handle to the trace event parser context
* @id: the id of the event to unregister
* @sys_name: the system name the handler belongs to
* @event_name: the name of the event handler
@@ -6711,7 +6778,7 @@ static int handle_matches(struct event_handler *handler, int id,
*
* Returns 0 if handler was removed successfully, -1 if event was not found.
*/
-int tep_unregister_event_handler(struct tep_handle *pevent, int id,
+int tep_unregister_event_handler(struct tep_handle *tep, int id,
const char *sys_name, const char *event_name,
tep_event_handler_func func, void *context)
{
@@ -6719,7 +6786,7 @@ int tep_unregister_event_handler(struct tep_handle *pevent, int id,
struct event_handler *handle;
struct event_handler **next;
- event = search_event(pevent, id, sys_name, event_name);
+ event = search_event(tep, id, sys_name, event_name);
if (event == NULL)
goto not_found;
@@ -6733,7 +6800,7 @@ int tep_unregister_event_handler(struct tep_handle *pevent, int id,
}
not_found:
- for (next = &pevent->handlers; *next; next = &(*next)->next) {
+ for (next = &tep->handlers; *next; next = &(*next)->next) {
handle = *next;
if (handle_matches(handle, id, sys_name, event_name,
func, context))
@@ -6750,23 +6817,23 @@ not_found:
}
/**
- * tep_alloc - create a pevent handle
+ * tep_alloc - create a tep handle
*/
struct tep_handle *tep_alloc(void)
{
- struct tep_handle *pevent = calloc(1, sizeof(*pevent));
+ struct tep_handle *tep = calloc(1, sizeof(*tep));
- if (pevent) {
- pevent->ref_count = 1;
- pevent->host_bigendian = tep_host_bigendian();
+ if (tep) {
+ tep->ref_count = 1;
+ tep->host_bigendian = tep_is_bigendian();
}
- return pevent;
+ return tep;
}
-void tep_ref(struct tep_handle *pevent)
+void tep_ref(struct tep_handle *tep)
{
- pevent->ref_count++;
+ tep->ref_count++;
}
int tep_get_ref(struct tep_handle *tep)
@@ -6816,10 +6883,10 @@ void tep_free_event(struct tep_event *event)
}
/**
- * tep_free - free a pevent handle
- * @pevent: the pevent handle to free
+ * tep_free - free a tep handle
+ * @tep: the tep handle to free
*/
-void tep_free(struct tep_handle *pevent)
+void tep_free(struct tep_handle *tep)
{
struct cmdline_list *cmdlist, *cmdnext;
struct func_list *funclist, *funcnext;
@@ -6828,21 +6895,21 @@ void tep_free(struct tep_handle *pevent)
struct event_handler *handle;
int i;
- if (!pevent)
+ if (!tep)
return;
- cmdlist = pevent->cmdlist;
- funclist = pevent->funclist;
- printklist = pevent->printklist;
+ cmdlist = tep->cmdlist;
+ funclist = tep->funclist;
+ printklist = tep->printklist;
- pevent->ref_count--;
- if (pevent->ref_count)
+ tep->ref_count--;
+ if (tep->ref_count)
return;
- if (pevent->cmdlines) {
- for (i = 0; i < pevent->cmdline_count; i++)
- free(pevent->cmdlines[i].comm);
- free(pevent->cmdlines);
+ if (tep->cmdlines) {
+ for (i = 0; i < tep->cmdline_count; i++)
+ free(tep->cmdlines[i].comm);
+ free(tep->cmdlines);
}
while (cmdlist) {
@@ -6852,12 +6919,12 @@ void tep_free(struct tep_handle *pevent)
cmdlist = cmdnext;
}
- if (pevent->func_map) {
- for (i = 0; i < (int)pevent->func_count; i++) {
- free(pevent->func_map[i].func);
- free(pevent->func_map[i].mod);
+ if (tep->func_map) {
+ for (i = 0; i < (int)tep->func_count; i++) {
+ free(tep->func_map[i].func);
+ free(tep->func_map[i].mod);
}
- free(pevent->func_map);
+ free(tep->func_map);
}
while (funclist) {
@@ -6868,16 +6935,16 @@ void tep_free(struct tep_handle *pevent)
funclist = funcnext;
}
- while (pevent->func_handlers) {
- func_handler = pevent->func_handlers;
- pevent->func_handlers = func_handler->next;
+ while (tep->func_handlers) {
+ func_handler = tep->func_handlers;
+ tep->func_handlers = func_handler->next;
free_func_handle(func_handler);
}
- if (pevent->printk_map) {
- for (i = 0; i < (int)pevent->printk_count; i++)
- free(pevent->printk_map[i].printk);
- free(pevent->printk_map);
+ if (tep->printk_map) {
+ for (i = 0; i < (int)tep->printk_count; i++)
+ free(tep->printk_map[i].printk);
+ free(tep->printk_map);
}
while (printklist) {
@@ -6887,24 +6954,24 @@ void tep_free(struct tep_handle *pevent)
printklist = printknext;
}
- for (i = 0; i < pevent->nr_events; i++)
- tep_free_event(pevent->events[i]);
+ for (i = 0; i < tep->nr_events; i++)
+ tep_free_event(tep->events[i]);
- while (pevent->handlers) {
- handle = pevent->handlers;
- pevent->handlers = handle->next;
+ while (tep->handlers) {
+ handle = tep->handlers;
+ tep->handlers = handle->next;
free_handler(handle);
}
- free(pevent->trace_clock);
- free(pevent->events);
- free(pevent->sort_events);
- free(pevent->func_resolver);
+ free(tep->trace_clock);
+ free(tep->events);
+ free(tep->sort_events);
+ free(tep->func_resolver);
- free(pevent);
+ free(tep);
}
-void tep_unref(struct tep_handle *pevent)
+void tep_unref(struct tep_handle *tep)
{
- tep_free(pevent);
+ tep_free(tep);
}
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h
index aec48f2aea8a..642f68ab5fb2 100644
--- a/tools/lib/traceevent/event-parse.h
+++ b/tools/lib/traceevent/event-parse.h
@@ -64,8 +64,8 @@ typedef int (*tep_event_handler_func)(struct trace_seq *s,
struct tep_event *event,
void *context);
-typedef int (*tep_plugin_load_func)(struct tep_handle *pevent);
-typedef int (*tep_plugin_unload_func)(struct tep_handle *pevent);
+typedef int (*tep_plugin_load_func)(struct tep_handle *tep);
+typedef int (*tep_plugin_unload_func)(struct tep_handle *tep);
struct tep_plugin_option {
struct tep_plugin_option *next;
@@ -85,12 +85,12 @@ struct tep_plugin_option {
* TEP_PLUGIN_LOADER: (required)
* The function name to initialized the plugin.
*
- * int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+ * int TEP_PLUGIN_LOADER(struct tep_handle *tep)
*
* TEP_PLUGIN_UNLOADER: (optional)
* The function called just before unloading
*
- * int TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+ * int TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
*
* TEP_PLUGIN_OPTIONS: (optional)
* Plugin options that can be set before loading
@@ -278,7 +278,7 @@ struct tep_print_fmt {
};
struct tep_event {
- struct tep_handle *pevent;
+ struct tep_handle *tep;
char *name;
int id;
int flags;
@@ -393,9 +393,9 @@ struct tep_plugin_list;
#define INVALID_PLUGIN_LIST_OPTION ((char **)((unsigned long)-1))
-struct tep_plugin_list *tep_load_plugins(struct tep_handle *pevent);
+struct tep_plugin_list *tep_load_plugins(struct tep_handle *tep);
void tep_unload_plugins(struct tep_plugin_list *plugin_list,
- struct tep_handle *pevent);
+ struct tep_handle *tep);
char **tep_plugin_list_options(void);
void tep_plugin_free_options_list(char **list);
int tep_plugin_add_options(const char *name,
@@ -409,8 +409,10 @@ void tep_print_plugins(struct trace_seq *s,
typedef char *(tep_func_resolver_t)(void *priv,
unsigned long long *addrp, char **modp);
void tep_set_flag(struct tep_handle *tep, int flag);
+void tep_clear_flag(struct tep_handle *tep, enum tep_flag flag);
+bool tep_test_flag(struct tep_handle *tep, enum tep_flag flags);
-static inline int tep_host_bigendian(void)
+static inline int tep_is_bigendian(void)
{
unsigned char str[] = { 0x1, 0x2, 0x3, 0x4 };
unsigned int val;
@@ -428,37 +430,37 @@ enum trace_flag_type {
TRACE_FLAG_SOFTIRQ = 0x10,
};
-int tep_set_function_resolver(struct tep_handle *pevent,
+int tep_set_function_resolver(struct tep_handle *tep,
tep_func_resolver_t *func, void *priv);
-void tep_reset_function_resolver(struct tep_handle *pevent);
-int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid);
-int tep_override_comm(struct tep_handle *pevent, const char *comm, int pid);
-int tep_register_trace_clock(struct tep_handle *pevent, const char *trace_clock);
-int tep_register_function(struct tep_handle *pevent, char *name,
+void tep_reset_function_resolver(struct tep_handle *tep);
+int tep_register_comm(struct tep_handle *tep, const char *comm, int pid);
+int tep_override_comm(struct tep_handle *tep, const char *comm, int pid);
+int tep_register_trace_clock(struct tep_handle *tep, const char *trace_clock);
+int tep_register_function(struct tep_handle *tep, char *name,
unsigned long long addr, char *mod);
-int tep_register_print_string(struct tep_handle *pevent, const char *fmt,
+int tep_register_print_string(struct tep_handle *tep, const char *fmt,
unsigned long long addr);
-int tep_pid_is_registered(struct tep_handle *pevent, int pid);
+bool tep_is_pid_registered(struct tep_handle *tep, int pid);
-void tep_print_event_task(struct tep_handle *pevent, struct trace_seq *s,
+void tep_print_event_task(struct tep_handle *tep, struct trace_seq *s,
struct tep_event *event,
struct tep_record *record);
-void tep_print_event_time(struct tep_handle *pevent, struct trace_seq *s,
+void tep_print_event_time(struct tep_handle *tep, struct trace_seq *s,
struct tep_event *event,
struct tep_record *record,
bool use_trace_clock);
-void tep_print_event_data(struct tep_handle *pevent, struct trace_seq *s,
+void tep_print_event_data(struct tep_handle *tep, struct trace_seq *s,
struct tep_event *event,
struct tep_record *record);
-void tep_print_event(struct tep_handle *pevent, struct trace_seq *s,
+void tep_print_event(struct tep_handle *tep, struct trace_seq *s,
struct tep_record *record, bool use_trace_clock);
-int tep_parse_header_page(struct tep_handle *pevent, char *buf, unsigned long size,
+int tep_parse_header_page(struct tep_handle *tep, char *buf, unsigned long size,
int long_size);
-enum tep_errno tep_parse_event(struct tep_handle *pevent, const char *buf,
+enum tep_errno tep_parse_event(struct tep_handle *tep, const char *buf,
unsigned long size, const char *sys);
-enum tep_errno tep_parse_format(struct tep_handle *pevent,
+enum tep_errno tep_parse_format(struct tep_handle *tep,
struct tep_event **eventp,
const char *buf,
unsigned long size, const char *sys);
@@ -490,50 +492,50 @@ enum tep_reg_handler {
TEP_REGISTER_SUCCESS_OVERWRITE,
};
-int tep_register_event_handler(struct tep_handle *pevent, int id,
+int tep_register_event_handler(struct tep_handle *tep, int id,
const char *sys_name, const char *event_name,
tep_event_handler_func func, void *context);
-int tep_unregister_event_handler(struct tep_handle *pevent, int id,
+int tep_unregister_event_handler(struct tep_handle *tep, int id,
const char *sys_name, const char *event_name,
tep_event_handler_func func, void *context);
-int tep_register_print_function(struct tep_handle *pevent,
+int tep_register_print_function(struct tep_handle *tep,
tep_func_handler func,
enum tep_func_arg_type ret_type,
char *name, ...);
-int tep_unregister_print_function(struct tep_handle *pevent,
+int tep_unregister_print_function(struct tep_handle *tep,
tep_func_handler func, char *name);
struct tep_format_field *tep_find_common_field(struct tep_event *event, const char *name);
struct tep_format_field *tep_find_field(struct tep_event *event, const char *name);
struct tep_format_field *tep_find_any_field(struct tep_event *event, const char *name);
-const char *tep_find_function(struct tep_handle *pevent, unsigned long long addr);
+const char *tep_find_function(struct tep_handle *tep, unsigned long long addr);
unsigned long long
-tep_find_function_address(struct tep_handle *pevent, unsigned long long addr);
-unsigned long long tep_read_number(struct tep_handle *pevent, const void *ptr, int size);
+tep_find_function_address(struct tep_handle *tep, unsigned long long addr);
+unsigned long long tep_read_number(struct tep_handle *tep, const void *ptr, int size);
int tep_read_number_field(struct tep_format_field *field, const void *data,
unsigned long long *value);
struct tep_event *tep_get_first_event(struct tep_handle *tep);
int tep_get_events_count(struct tep_handle *tep);
-struct tep_event *tep_find_event(struct tep_handle *pevent, int id);
+struct tep_event *tep_find_event(struct tep_handle *tep, int id);
struct tep_event *
-tep_find_event_by_name(struct tep_handle *pevent, const char *sys, const char *name);
+tep_find_event_by_name(struct tep_handle *tep, const char *sys, const char *name);
struct tep_event *
-tep_find_event_by_record(struct tep_handle *pevent, struct tep_record *record);
-
-void tep_data_lat_fmt(struct tep_handle *pevent,
- struct trace_seq *s, struct tep_record *record);
-int tep_data_type(struct tep_handle *pevent, struct tep_record *rec);
-int tep_data_pid(struct tep_handle *pevent, struct tep_record *rec);
-int tep_data_preempt_count(struct tep_handle *pevent, struct tep_record *rec);
-int tep_data_flags(struct tep_handle *pevent, struct tep_record *rec);
-const char *tep_data_comm_from_pid(struct tep_handle *pevent, int pid);
+tep_find_event_by_record(struct tep_handle *tep, struct tep_record *record);
+
+void tep_data_latency_format(struct tep_handle *tep,
+ struct trace_seq *s, struct tep_record *record);
+int tep_data_type(struct tep_handle *tep, struct tep_record *rec);
+int tep_data_pid(struct tep_handle *tep, struct tep_record *rec);
+int tep_data_preempt_count(struct tep_handle *tep, struct tep_record *rec);
+int tep_data_flags(struct tep_handle *tep, struct tep_record *rec);
+const char *tep_data_comm_from_pid(struct tep_handle *tep, int pid);
struct tep_cmdline;
-struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm,
+struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *tep, const char *comm,
struct tep_cmdline *next);
-int tep_cmdline_pid(struct tep_handle *pevent, struct tep_cmdline *cmdline);
+int tep_cmdline_pid(struct tep_handle *tep, struct tep_cmdline *cmdline);
void tep_print_field(struct trace_seq *s, void *data,
struct tep_format_field *field);
@@ -541,10 +543,12 @@ void tep_print_fields(struct trace_seq *s, void *data,
int size __maybe_unused, struct tep_event *event);
void tep_event_info(struct trace_seq *s, struct tep_event *event,
struct tep_record *record);
-int tep_strerror(struct tep_handle *pevent, enum tep_errno errnum,
+int tep_strerror(struct tep_handle *tep, enum tep_errno errnum,
char *buf, size_t buflen);
-struct tep_event **tep_list_events(struct tep_handle *pevent, enum tep_event_sort_type);
+struct tep_event **tep_list_events(struct tep_handle *tep, enum tep_event_sort_type);
+struct tep_event **tep_list_events_copy(struct tep_handle *tep,
+ enum tep_event_sort_type);
struct tep_format_field **tep_event_common_fields(struct tep_event *event);
struct tep_format_field **tep_event_fields(struct tep_event *event);
@@ -552,24 +556,28 @@ enum tep_endian {
TEP_LITTLE_ENDIAN = 0,
TEP_BIG_ENDIAN
};
-int tep_get_cpus(struct tep_handle *pevent);
-void tep_set_cpus(struct tep_handle *pevent, int cpus);
-int tep_get_long_size(struct tep_handle *pevent);
-void tep_set_long_size(struct tep_handle *pevent, int long_size);
-int tep_get_page_size(struct tep_handle *pevent);
-void tep_set_page_size(struct tep_handle *pevent, int _page_size);
-int tep_file_bigendian(struct tep_handle *pevent);
-void tep_set_file_bigendian(struct tep_handle *pevent, enum tep_endian endian);
-int tep_is_host_bigendian(struct tep_handle *pevent);
-void tep_set_host_bigendian(struct tep_handle *pevent, enum tep_endian endian);
-int tep_is_latency_format(struct tep_handle *pevent);
-void tep_set_latency_format(struct tep_handle *pevent, int lat);
-int tep_get_header_page_size(struct tep_handle *pevent);
+int tep_get_cpus(struct tep_handle *tep);
+void tep_set_cpus(struct tep_handle *tep, int cpus);
+int tep_get_long_size(struct tep_handle *tep);
+void tep_set_long_size(struct tep_handle *tep, int long_size);
+int tep_get_page_size(struct tep_handle *tep);
+void tep_set_page_size(struct tep_handle *tep, int _page_size);
+bool tep_is_file_bigendian(struct tep_handle *tep);
+void tep_set_file_bigendian(struct tep_handle *tep, enum tep_endian endian);
+bool tep_is_local_bigendian(struct tep_handle *tep);
+void tep_set_local_bigendian(struct tep_handle *tep, enum tep_endian endian);
+bool tep_is_latency_format(struct tep_handle *tep);
+void tep_set_latency_format(struct tep_handle *tep, int lat);
+int tep_get_header_page_size(struct tep_handle *tep);
+int tep_get_header_timestamp_size(struct tep_handle *tep);
+bool tep_is_old_format(struct tep_handle *tep);
+void tep_set_print_raw(struct tep_handle *tep, int print_raw);
+void tep_set_test_filters(struct tep_handle *tep, int test_filters);
struct tep_handle *tep_alloc(void);
-void tep_free(struct tep_handle *pevent);
-void tep_ref(struct tep_handle *pevent);
-void tep_unref(struct tep_handle *pevent);
+void tep_free(struct tep_handle *tep);
+void tep_ref(struct tep_handle *tep);
+void tep_unref(struct tep_handle *tep);
int tep_get_ref(struct tep_handle *tep);
/* access to the internal parser */
@@ -581,8 +589,8 @@ const char *tep_get_input_buf(void);
unsigned long long tep_get_input_buf_ptr(void);
/* for debugging */
-void tep_print_funcs(struct tep_handle *pevent);
-void tep_print_printk(struct tep_handle *pevent);
+void tep_print_funcs(struct tep_handle *tep);
+void tep_print_printk(struct tep_handle *tep);
/* ----------------------- filtering ----------------------- */
@@ -709,13 +717,13 @@ struct tep_filter_type {
#define TEP_FILTER_ERROR_BUFSZ 1024
struct tep_event_filter {
- struct tep_handle *pevent;
+ struct tep_handle *tep;
int filters;
struct tep_filter_type *event_filters;
char error_buffer[TEP_FILTER_ERROR_BUFSZ];
};
-struct tep_event_filter *tep_filter_alloc(struct tep_handle *pevent);
+struct tep_event_filter *tep_filter_alloc(struct tep_handle *tep);
/* for backward compatibility */
#define FILTER_NONE TEP_ERRNO__NO_FILTER
@@ -723,12 +731,6 @@ struct tep_event_filter *tep_filter_alloc(struct tep_handle *pevent);
#define FILTER_MISS TEP_ERRNO__FILTER_MISS
#define FILTER_MATCH TEP_ERRNO__FILTER_MATCH
-enum tep_filter_trivial_type {
- TEP_FILTER_TRIVIAL_FALSE,
- TEP_FILTER_TRIVIAL_TRUE,
- TEP_FILTER_TRIVIAL_BOTH,
-};
-
enum tep_errno tep_filter_add_filter_str(struct tep_event_filter *filter,
const char *filter_str);
@@ -743,9 +745,6 @@ int tep_event_filtered(struct tep_event_filter *filter,
void tep_filter_reset(struct tep_event_filter *filter);
-int tep_filter_clear_trivial(struct tep_event_filter *filter,
- enum tep_filter_trivial_type type);
-
void tep_filter_free(struct tep_event_filter *filter);
char *tep_filter_make_string(struct tep_event_filter *filter, int event_id);
@@ -753,15 +752,8 @@ char *tep_filter_make_string(struct tep_event_filter *filter, int event_id);
int tep_filter_remove_event(struct tep_event_filter *filter,
int event_id);
-int tep_filter_event_has_trivial(struct tep_event_filter *filter,
- int event_id,
- enum tep_filter_trivial_type type);
-
int tep_filter_copy(struct tep_event_filter *dest, struct tep_event_filter *source);
-int tep_update_trivial(struct tep_event_filter *dest, struct tep_event_filter *source,
- enum tep_filter_trivial_type type);
-
int tep_filter_compare(struct tep_event_filter *filter1, struct tep_event_filter *filter2);
#endif /* _PARSE_EVENTS_H */
diff --git a/tools/lib/traceevent/event-plugin.c b/tools/lib/traceevent/event-plugin.c
index e74f16c88398..8ca28de9337a 100644
--- a/tools/lib/traceevent/event-plugin.c
+++ b/tools/lib/traceevent/event-plugin.c
@@ -269,7 +269,7 @@ void tep_print_plugins(struct trace_seq *s,
}
static void
-load_plugin(struct tep_handle *pevent, const char *path,
+load_plugin(struct tep_handle *tep, const char *path,
const char *file, void *data)
{
struct tep_plugin_list **plugin_list = data;
@@ -316,7 +316,7 @@ load_plugin(struct tep_handle *pevent, const char *path,
*plugin_list = list;
pr_stat("registering plugin: %s", plugin);
- func(pevent);
+ func(tep);
return;
out_free:
@@ -324,9 +324,9 @@ load_plugin(struct tep_handle *pevent, const char *path,
}
static void
-load_plugins_dir(struct tep_handle *pevent, const char *suffix,
+load_plugins_dir(struct tep_handle *tep, const char *suffix,
const char *path,
- void (*load_plugin)(struct tep_handle *pevent,
+ void (*load_plugin)(struct tep_handle *tep,
const char *path,
const char *name,
void *data),
@@ -359,15 +359,15 @@ load_plugins_dir(struct tep_handle *pevent, const char *suffix,
if (strcmp(name + (strlen(name) - strlen(suffix)), suffix) != 0)
continue;
- load_plugin(pevent, path, name, data);
+ load_plugin(tep, path, name, data);
}
closedir(dir);
}
static void
-load_plugins(struct tep_handle *pevent, const char *suffix,
- void (*load_plugin)(struct tep_handle *pevent,
+load_plugins(struct tep_handle *tep, const char *suffix,
+ void (*load_plugin)(struct tep_handle *tep,
const char *path,
const char *name,
void *data),
@@ -378,7 +378,7 @@ load_plugins(struct tep_handle *pevent, const char *suffix,
char *envdir;
int ret;
- if (pevent->flags & TEP_DISABLE_PLUGINS)
+ if (tep->flags & TEP_DISABLE_PLUGINS)
return;
/*
@@ -386,8 +386,8 @@ load_plugins(struct tep_handle *pevent, const char *suffix,
* check that first.
*/
#ifdef PLUGIN_DIR
- if (!(pevent->flags & TEP_DISABLE_SYS_PLUGINS))
- load_plugins_dir(pevent, suffix, PLUGIN_DIR,
+ if (!(tep->flags & TEP_DISABLE_SYS_PLUGINS))
+ load_plugins_dir(tep, suffix, PLUGIN_DIR,
load_plugin, data);
#endif
@@ -397,7 +397,7 @@ load_plugins(struct tep_handle *pevent, const char *suffix,
*/
envdir = getenv("TRACEEVENT_PLUGIN_DIR");
if (envdir)
- load_plugins_dir(pevent, suffix, envdir, load_plugin, data);
+ load_plugins_dir(tep, suffix, envdir, load_plugin, data);
/*
* Now let the home directory override the environment
@@ -413,22 +413,22 @@ load_plugins(struct tep_handle *pevent, const char *suffix,
return;
}
- load_plugins_dir(pevent, suffix, path, load_plugin, data);
+ load_plugins_dir(tep, suffix, path, load_plugin, data);
free(path);
}
struct tep_plugin_list*
-tep_load_plugins(struct tep_handle *pevent)
+tep_load_plugins(struct tep_handle *tep)
{
struct tep_plugin_list *list = NULL;
- load_plugins(pevent, ".so", load_plugin, &list);
+ load_plugins(tep, ".so", load_plugin, &list);
return list;
}
void
-tep_unload_plugins(struct tep_plugin_list *plugin_list, struct tep_handle *pevent)
+tep_unload_plugins(struct tep_plugin_list *plugin_list, struct tep_handle *tep)
{
tep_plugin_unload_func func;
struct tep_plugin_list *list;
@@ -438,7 +438,7 @@ tep_unload_plugins(struct tep_plugin_list *plugin_list, struct tep_handle *peven
plugin_list = list->next;
func = dlsym(list->handle, TEP_PLUGIN_UNLOADER_NAME);
if (func)
- func(pevent);
+ func(tep);
dlclose(list->handle);
free(list->name);
free(list);
diff --git a/tools/lib/traceevent/kbuffer-parse.c b/tools/lib/traceevent/kbuffer-parse.c
index af2a1f3b7424..b887e7437d67 100644
--- a/tools/lib/traceevent/kbuffer-parse.c
+++ b/tools/lib/traceevent/kbuffer-parse.c
@@ -727,3 +727,52 @@ int kbuffer_start_of_data(struct kbuffer *kbuf)
{
return kbuf->start;
}
+
+/**
+ * kbuffer_raw_get - get raw buffer info
+ * @kbuf: The kbuffer
+ * @subbuf: Start of mapped subbuffer
+ * @info: Info descriptor to fill in
+ *
+ * For debugging. This can return internals of the ring buffer.
+ * Expects to have info->next set to what it will read.
+ * The type, length and timestamp delta will be filled in, and
+ * @info->next will be updated to the next element.
+ * The @subbuf is used to know if the info is passed the end of
+ * data and NULL will be returned if it is.
+ */
+struct kbuffer_raw_info *
+kbuffer_raw_get(struct kbuffer *kbuf, void *subbuf, struct kbuffer_raw_info *info)
+{
+ unsigned long long flags;
+ unsigned long long delta;
+ unsigned int type_len;
+ unsigned int size;
+ int start;
+ int length;
+ void *ptr = info->next;
+
+ if (!kbuf || !subbuf)
+ return NULL;
+
+ if (kbuf->flags & KBUFFER_FL_LONG_8)
+ start = 16;
+ else
+ start = 12;
+
+ flags = read_long(kbuf, subbuf + 8);
+ size = (unsigned int)flags & COMMIT_MASK;
+
+ if (ptr < subbuf || ptr >= subbuf + start + size)
+ return NULL;
+
+ type_len = translate_data(kbuf, ptr, &ptr, &delta, &length);
+
+ info->next = ptr + length;
+
+ info->type = type_len;
+ info->delta = delta;
+ info->length = length;
+
+ return info;
+}
diff --git a/tools/lib/traceevent/kbuffer.h b/tools/lib/traceevent/kbuffer.h
index 03dce757553f..ed4d697fc137 100644
--- a/tools/lib/traceevent/kbuffer.h
+++ b/tools/lib/traceevent/kbuffer.h
@@ -65,4 +65,17 @@ int kbuffer_subbuffer_size(struct kbuffer *kbuf);
void kbuffer_set_old_format(struct kbuffer *kbuf);
int kbuffer_start_of_data(struct kbuffer *kbuf);
+/* Debugging */
+
+struct kbuffer_raw_info {
+ int type;
+ int length;
+ unsigned long long delta;
+ void *next;
+};
+
+/* Read raw data */
+struct kbuffer_raw_info *kbuffer_raw_get(struct kbuffer *kbuf, void *subbuf,
+ struct kbuffer_raw_info *info);
+
#endif /* _K_BUFFER_H */
diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
index cb5ce66dab6e..552592d153fb 100644
--- a/tools/lib/traceevent/parse-filter.c
+++ b/tools/lib/traceevent/parse-filter.c
@@ -154,7 +154,7 @@ add_filter_type(struct tep_event_filter *filter, int id)
filter_type = &filter->event_filters[i];
filter_type->event_id = id;
- filter_type->event = tep_find_event(filter->pevent, id);
+ filter_type->event = tep_find_event(filter->tep, id);
filter_type->filter = NULL;
filter->filters++;
@@ -164,9 +164,9 @@ add_filter_type(struct tep_event_filter *filter, int id)
/**
* tep_filter_alloc - create a new event filter
- * @pevent: The pevent that this filter is associated with
+ * @tep: The tep that this filter is associated with
*/
-struct tep_event_filter *tep_filter_alloc(struct tep_handle *pevent)
+struct tep_event_filter *tep_filter_alloc(struct tep_handle *tep)
{
struct tep_event_filter *filter;
@@ -175,8 +175,8 @@ struct tep_event_filter *tep_filter_alloc(struct tep_handle *pevent)
return NULL;
memset(filter, 0, sizeof(*filter));
- filter->pevent = pevent;
- tep_ref(pevent);
+ filter->tep = tep;
+ tep_ref(tep);
return filter;
}
@@ -256,7 +256,7 @@ static int event_match(struct tep_event *event,
}
static enum tep_errno
-find_event(struct tep_handle *pevent, struct event_list **events,
+find_event(struct tep_handle *tep, struct event_list **events,
char *sys_name, char *event_name)
{
struct tep_event *event;
@@ -299,8 +299,8 @@ find_event(struct tep_handle *pevent, struct event_list **events,
}
}
- for (i = 0; i < pevent->nr_events; i++) {
- event = pevent->events[i];
+ for (i = 0; i < tep->nr_events; i++) {
+ event = tep->events[i];
if (event_match(event, sys_name ? &sreg : NULL, &ereg)) {
match = 1;
if (add_event(events, event) < 0) {
@@ -1257,7 +1257,7 @@ static void filter_init_error_buf(struct tep_event_filter *filter)
enum tep_errno tep_filter_add_filter_str(struct tep_event_filter *filter,
const char *filter_str)
{
- struct tep_handle *pevent = filter->pevent;
+ struct tep_handle *tep = filter->tep;
struct event_list *event;
struct event_list *events = NULL;
const char *filter_start;
@@ -1313,7 +1313,7 @@ enum tep_errno tep_filter_add_filter_str(struct tep_event_filter *filter,
}
/* Find this event */
- ret = find_event(pevent, &events, strim(sys_name), strim(event_name));
+ ret = find_event(tep, &events, strim(sys_name), strim(event_name));
if (ret < 0) {
free_events(events);
free(this_event);
@@ -1334,7 +1334,7 @@ enum tep_errno tep_filter_add_filter_str(struct tep_event_filter *filter,
if (ret < 0)
rtn = ret;
- if (ret >= 0 && pevent->test_filters) {
+ if (ret >= 0 && tep->test_filters) {
char *test;
test = tep_filter_make_string(filter, event->event->id);
if (test) {
@@ -1346,9 +1346,6 @@ enum tep_errno tep_filter_add_filter_str(struct tep_event_filter *filter,
free_events(events);
- if (rtn >= 0 && pevent->test_filters)
- exit(0);
-
return rtn;
}
@@ -1380,7 +1377,7 @@ int tep_filter_strerror(struct tep_event_filter *filter, enum tep_errno err,
return 0;
}
- return tep_strerror(filter->pevent, err, buf, buflen);
+ return tep_strerror(filter->tep, err, buf, buflen);
}
/**
@@ -1443,7 +1440,7 @@ void tep_filter_reset(struct tep_event_filter *filter)
void tep_filter_free(struct tep_event_filter *filter)
{
- tep_unref(filter->pevent);
+ tep_unref(filter->tep);
tep_filter_reset(filter);
@@ -1462,10 +1459,10 @@ static int copy_filter_type(struct tep_event_filter *filter,
const char *name;
char *str;
- /* Can't assume that the pevent's are the same */
+ /* Can't assume that the tep's are the same */
sys = filter_type->event->system;
name = filter_type->event->name;
- event = tep_find_event_by_name(filter->pevent, sys, name);
+ event = tep_find_event_by_name(filter->tep, sys, name);
if (!event)
return -1;
@@ -1522,167 +1519,6 @@ int tep_filter_copy(struct tep_event_filter *dest, struct tep_event_filter *sour
return ret;
}
-
-/**
- * tep_update_trivial - update the trivial filters with the given filter
- * @dest - the filter to update
- * @source - the filter as the source of the update
- * @type - the type of trivial filter to update.
- *
- * Scan dest for trivial events matching @type to replace with the source.
- *
- * Returns 0 on success and -1 if there was a problem updating, but
- * events may have still been updated on error.
- */
-int tep_update_trivial(struct tep_event_filter *dest, struct tep_event_filter *source,
- enum tep_filter_trivial_type type)
-{
- struct tep_handle *src_pevent;
- struct tep_handle *dest_pevent;
- struct tep_event *event;
- struct tep_filter_type *filter_type;
- struct tep_filter_arg *arg;
- char *str;
- int i;
-
- src_pevent = source->pevent;
- dest_pevent = dest->pevent;
-
- /* Do nothing if either of the filters has nothing to filter */
- if (!dest->filters || !source->filters)
- return 0;
-
- for (i = 0; i < dest->filters; i++) {
- filter_type = &dest->event_filters[i];
- arg = filter_type->filter;
- if (arg->type != TEP_FILTER_ARG_BOOLEAN)
- continue;
- if ((arg->boolean.value && type == TEP_FILTER_TRIVIAL_FALSE) ||
- (!arg->boolean.value && type == TEP_FILTER_TRIVIAL_TRUE))
- continue;
-
- event = filter_type->event;
-
- if (src_pevent != dest_pevent) {
- /* do a look up */
- event = tep_find_event_by_name(src_pevent,
- event->system,
- event->name);
- if (!event)
- return -1;
- }
-
- str = tep_filter_make_string(source, event->id);
- if (!str)
- continue;
-
- /* Don't bother if the filter is trivial too */
- if (strcmp(str, "TRUE") != 0 && strcmp(str, "FALSE") != 0)
- filter_event(dest, event, str, NULL);
- free(str);
- }
- return 0;
-}
-
-/**
- * tep_filter_clear_trivial - clear TRUE and FALSE filters
- * @filter: the filter to remove trivial filters from
- * @type: remove only true, false, or both
- *
- * Removes filters that only contain a TRUE or FALES boolean arg.
- *
- * Returns 0 on success and -1 if there was a problem.
- */
-int tep_filter_clear_trivial(struct tep_event_filter *filter,
- enum tep_filter_trivial_type type)
-{
- struct tep_filter_type *filter_type;
- int count = 0;
- int *ids = NULL;
- int i;
-
- if (!filter->filters)
- return 0;
-
- /*
- * Two steps, first get all ids with trivial filters.
- * then remove those ids.
- */
- for (i = 0; i < filter->filters; i++) {
- int *new_ids;
-
- filter_type = &filter->event_filters[i];
- if (filter_type->filter->type != TEP_FILTER_ARG_BOOLEAN)
- continue;
- switch (type) {
- case TEP_FILTER_TRIVIAL_FALSE:
- if (filter_type->filter->boolean.value)
- continue;
- break;
- case TEP_FILTER_TRIVIAL_TRUE:
- if (!filter_type->filter->boolean.value)
- continue;
- default:
- break;
- }
-
- new_ids = realloc(ids, sizeof(*ids) * (count + 1));
- if (!new_ids) {
- free(ids);
- return -1;
- }
-
- ids = new_ids;
- ids[count++] = filter_type->event_id;
- }
-
- if (!count)
- return 0;
-
- for (i = 0; i < count; i++)
- tep_filter_remove_event(filter, ids[i]);
-
- free(ids);
- return 0;
-}
-
-/**
- * tep_filter_event_has_trivial - return true event contains trivial filter
- * @filter: the filter with the information
- * @event_id: the id of the event to test
- * @type: trivial type to test for (TRUE, FALSE, EITHER)
- *
- * Returns 1 if the event contains a matching trivial type
- * otherwise 0.
- */
-int tep_filter_event_has_trivial(struct tep_event_filter *filter,
- int event_id,
- enum tep_filter_trivial_type type)
-{
- struct tep_filter_type *filter_type;
-
- if (!filter->filters)
- return 0;
-
- filter_type = find_filter_type(filter, event_id);
-
- if (!filter_type)
- return 0;
-
- if (filter_type->filter->type != TEP_FILTER_ARG_BOOLEAN)
- return 0;
-
- switch (type) {
- case TEP_FILTER_TRIVIAL_FALSE:
- return !filter_type->filter->boolean.value;
-
- case TEP_FILTER_TRIVIAL_TRUE:
- return filter_type->filter->boolean.value;
- default:
- return 1;
- }
-}
-
static int test_filter(struct tep_event *event, struct tep_filter_arg *arg,
struct tep_record *record, enum tep_errno *err);
@@ -1692,8 +1528,8 @@ get_comm(struct tep_event *event, struct tep_record *record)
const char *comm;
int pid;
- pid = tep_data_pid(event->pevent, record);
- comm = tep_data_comm_from_pid(event->pevent, pid);
+ pid = tep_data_pid(event->tep, record);
+ comm = tep_data_comm_from_pid(event->tep, pid);
return comm;
}
@@ -1861,7 +1697,7 @@ static int test_num(struct tep_event *event, struct tep_filter_arg *arg,
static const char *get_field_str(struct tep_filter_arg *arg, struct tep_record *record)
{
struct tep_event *event;
- struct tep_handle *pevent;
+ struct tep_handle *tep;
unsigned long long addr;
const char *val = NULL;
unsigned int size;
@@ -1891,12 +1727,12 @@ static const char *get_field_str(struct tep_filter_arg *arg, struct tep_record *
} else {
event = arg->str.field->event;
- pevent = event->pevent;
+ tep = event->tep;
addr = get_value(event, arg->str.field, record);
if (arg->str.field->flags & (TEP_FIELD_IS_POINTER | TEP_FIELD_IS_LONG))
/* convert to a kernel symbol */
- val = tep_find_function(pevent, addr);
+ val = tep_find_function(tep, addr);
if (val == NULL) {
/* just use the hex of the string name */
@@ -2036,7 +1872,7 @@ int tep_event_filtered(struct tep_event_filter *filter, int event_id)
enum tep_errno tep_filter_match(struct tep_event_filter *filter,
struct tep_record *record)
{
- struct tep_handle *pevent = filter->pevent;
+ struct tep_handle *tep = filter->tep;
struct tep_filter_type *filter_type;
int event_id;
int ret;
@@ -2047,7 +1883,7 @@ enum tep_errno tep_filter_match(struct tep_event_filter *filter,
if (!filter->filters)
return TEP_ERRNO__NO_FILTER;
- event_id = tep_data_type(pevent, record);
+ event_id = tep_data_type(tep, record);
filter_type = find_filter_type(filter, event_id);
if (!filter_type)
@@ -2409,14 +2245,6 @@ int tep_filter_compare(struct tep_event_filter *filter1, struct tep_event_filter
break;
if (filter_type1->filter->type != filter_type2->filter->type)
break;
- switch (filter_type1->filter->type) {
- case TEP_FILTER_TRIVIAL_FALSE:
- case TEP_FILTER_TRIVIAL_TRUE:
- /* trivial types just need the type compared */
- continue;
- default:
- break;
- }
/* The best way to compare complex filters is with strings */
str1 = arg_to_str(filter1, filter_type1->filter);
str2 = arg_to_str(filter2, filter_type2->filter);
diff --git a/tools/lib/traceevent/parse-utils.c b/tools/lib/traceevent/parse-utils.c
index 77e4ec6402dd..e99867111387 100644
--- a/tools/lib/traceevent/parse-utils.c
+++ b/tools/lib/traceevent/parse-utils.c
@@ -14,7 +14,7 @@
void __vwarning(const char *fmt, va_list ap)
{
if (errno)
- perror("trace-cmd");
+ perror("libtraceevent");
errno = 0;
fprintf(stderr, " ");
diff --git a/tools/lib/traceevent/plugin_cfg80211.c b/tools/lib/traceevent/plugin_cfg80211.c
index a51b366f47da..3d43b56a6c98 100644
--- a/tools/lib/traceevent/plugin_cfg80211.c
+++ b/tools/lib/traceevent/plugin_cfg80211.c
@@ -25,9 +25,9 @@ process___le16_to_cpup(struct trace_seq *s, unsigned long long *args)
return val ? (long long) le16toh(*val) : 0;
}
-int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *tep)
{
- tep_register_print_function(pevent,
+ tep_register_print_function(tep,
process___le16_to_cpup,
TEP_FUNC_ARG_INT,
"__le16_to_cpup",
@@ -36,8 +36,8 @@ int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
return 0;
}
-void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
{
- tep_unregister_print_function(pevent, process___le16_to_cpup,
+ tep_unregister_print_function(tep, process___le16_to_cpup,
"__le16_to_cpup");
}
diff --git a/tools/lib/traceevent/plugin_function.c b/tools/lib/traceevent/plugin_function.c
index a73eca34a8f9..7770fcb78e0f 100644
--- a/tools/lib/traceevent/plugin_function.c
+++ b/tools/lib/traceevent/plugin_function.c
@@ -126,7 +126,7 @@ static int add_and_get_index(const char *parent, const char *child, int cpu)
static int function_handler(struct trace_seq *s, struct tep_record *record,
struct tep_event *event, void *context)
{
- struct tep_handle *pevent = event->pevent;
+ struct tep_handle *tep = event->tep;
unsigned long long function;
unsigned long long pfunction;
const char *func;
@@ -136,12 +136,12 @@ static int function_handler(struct trace_seq *s, struct tep_record *record,
if (tep_get_field_val(s, event, "ip", record, &function, 1))
return trace_seq_putc(s, '!');
- func = tep_find_function(pevent, function);
+ func = tep_find_function(tep, function);
if (tep_get_field_val(s, event, "parent_ip", record, &pfunction, 1))
return trace_seq_putc(s, '!');
- parent = tep_find_function(pevent, pfunction);
+ parent = tep_find_function(tep, pfunction);
if (parent && ftrace_indent->set)
index = add_and_get_index(parent, func, record->cpu);
@@ -164,9 +164,9 @@ static int function_handler(struct trace_seq *s, struct tep_record *record,
return 0;
}
-int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *tep)
{
- tep_register_event_handler(pevent, -1, "ftrace", "function",
+ tep_register_event_handler(tep, -1, "ftrace", "function",
function_handler, NULL);
tep_plugin_add_options("ftrace", plugin_options);
@@ -174,11 +174,11 @@ int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
return 0;
}
-void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
{
int i, x;
- tep_unregister_event_handler(pevent, -1, "ftrace", "function",
+ tep_unregister_event_handler(tep, -1, "ftrace", "function",
function_handler, NULL);
for (i = 0; i <= cpus; i++) {
diff --git a/tools/lib/traceevent/plugin_hrtimer.c b/tools/lib/traceevent/plugin_hrtimer.c
index 5db5e401275f..bb434e0ed03a 100644
--- a/tools/lib/traceevent/plugin_hrtimer.c
+++ b/tools/lib/traceevent/plugin_hrtimer.c
@@ -67,23 +67,23 @@ static int timer_start_handler(struct trace_seq *s,
return 0;
}
-int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *tep)
{
- tep_register_event_handler(pevent, -1,
+ tep_register_event_handler(tep, -1,
"timer", "hrtimer_expire_entry",
timer_expire_handler, NULL);
- tep_register_event_handler(pevent, -1, "timer", "hrtimer_start",
+ tep_register_event_handler(tep, -1, "timer", "hrtimer_start",
timer_start_handler, NULL);
return 0;
}
-void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
{
- tep_unregister_event_handler(pevent, -1,
+ tep_unregister_event_handler(tep, -1,
"timer", "hrtimer_expire_entry",
timer_expire_handler, NULL);
- tep_unregister_event_handler(pevent, -1, "timer", "hrtimer_start",
+ tep_unregister_event_handler(tep, -1, "timer", "hrtimer_start",
timer_start_handler, NULL);
}
diff --git a/tools/lib/traceevent/plugin_jbd2.c b/tools/lib/traceevent/plugin_jbd2.c
index a5e34135dd6a..04fc125f38cb 100644
--- a/tools/lib/traceevent/plugin_jbd2.c
+++ b/tools/lib/traceevent/plugin_jbd2.c
@@ -48,16 +48,16 @@ process_jiffies_to_msecs(struct trace_seq *s, unsigned long long *args)
return jiffies;
}
-int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *tep)
{
- tep_register_print_function(pevent,
+ tep_register_print_function(tep,
process_jbd2_dev_to_name,
TEP_FUNC_ARG_STRING,
"jbd2_dev_to_name",
TEP_FUNC_ARG_INT,
TEP_FUNC_ARG_VOID);
- tep_register_print_function(pevent,
+ tep_register_print_function(tep,
process_jiffies_to_msecs,
TEP_FUNC_ARG_LONG,
"jiffies_to_msecs",
@@ -66,11 +66,11 @@ int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
return 0;
}
-void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
{
- tep_unregister_print_function(pevent, process_jbd2_dev_to_name,
+ tep_unregister_print_function(tep, process_jbd2_dev_to_name,
"jbd2_dev_to_name");
- tep_unregister_print_function(pevent, process_jiffies_to_msecs,
+ tep_unregister_print_function(tep, process_jiffies_to_msecs,
"jiffies_to_msecs");
}
diff --git a/tools/lib/traceevent/plugin_kmem.c b/tools/lib/traceevent/plugin_kmem.c
index 0e3c601f9ed1..edaec5d962c3 100644
--- a/tools/lib/traceevent/plugin_kmem.c
+++ b/tools/lib/traceevent/plugin_kmem.c
@@ -39,57 +39,57 @@ static int call_site_handler(struct trace_seq *s, struct tep_record *record,
if (tep_read_number_field(field, data, &val))
return 1;
- func = tep_find_function(event->pevent, val);
+ func = tep_find_function(event->tep, val);
if (!func)
return 1;
- addr = tep_find_function_address(event->pevent, val);
+ addr = tep_find_function_address(event->tep, val);
trace_seq_printf(s, "(%s+0x%x) ", func, (int)(val - addr));
return 1;
}
-int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *tep)
{
- tep_register_event_handler(pevent, -1, "kmem", "kfree",
+ tep_register_event_handler(tep, -1, "kmem", "kfree",
call_site_handler, NULL);
- tep_register_event_handler(pevent, -1, "kmem", "kmalloc",
+ tep_register_event_handler(tep, -1, "kmem", "kmalloc",
call_site_handler, NULL);
- tep_register_event_handler(pevent, -1, "kmem", "kmalloc_node",
+ tep_register_event_handler(tep, -1, "kmem", "kmalloc_node",
call_site_handler, NULL);
- tep_register_event_handler(pevent, -1, "kmem", "kmem_cache_alloc",
+ tep_register_event_handler(tep, -1, "kmem", "kmem_cache_alloc",
call_site_handler, NULL);
- tep_register_event_handler(pevent, -1, "kmem",
+ tep_register_event_handler(tep, -1, "kmem",
"kmem_cache_alloc_node",
call_site_handler, NULL);
- tep_register_event_handler(pevent, -1, "kmem", "kmem_cache_free",
+ tep_register_event_handler(tep, -1, "kmem", "kmem_cache_free",
call_site_handler, NULL);
return 0;
}
-void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
{
- tep_unregister_event_handler(pevent, -1, "kmem", "kfree",
+ tep_unregister_event_handler(tep, -1, "kmem", "kfree",
call_site_handler, NULL);
- tep_unregister_event_handler(pevent, -1, "kmem", "kmalloc",
+ tep_unregister_event_handler(tep, -1, "kmem", "kmalloc",
call_site_handler, NULL);
- tep_unregister_event_handler(pevent, -1, "kmem", "kmalloc_node",
+ tep_unregister_event_handler(tep, -1, "kmem", "kmalloc_node",
call_site_handler, NULL);
- tep_unregister_event_handler(pevent, -1, "kmem", "kmem_cache_alloc",
+ tep_unregister_event_handler(tep, -1, "kmem", "kmem_cache_alloc",
call_site_handler, NULL);
- tep_unregister_event_handler(pevent, -1, "kmem",
+ tep_unregister_event_handler(tep, -1, "kmem",
"kmem_cache_alloc_node",
call_site_handler, NULL);
- tep_unregister_event_handler(pevent, -1, "kmem", "kmem_cache_free",
+ tep_unregister_event_handler(tep, -1, "kmem", "kmem_cache_free",
call_site_handler, NULL);
}
diff --git a/tools/lib/traceevent/plugin_kvm.c b/tools/lib/traceevent/plugin_kvm.c
index 64b9c25a1fd3..c8e623065a7e 100644
--- a/tools/lib/traceevent/plugin_kvm.c
+++ b/tools/lib/traceevent/plugin_kvm.c
@@ -389,8 +389,8 @@ static int kvm_mmu_print_role(struct trace_seq *s, struct tep_record *record,
* We can only use the structure if file is of the same
* endianness.
*/
- if (tep_file_bigendian(event->pevent) ==
- tep_is_host_bigendian(event->pevent)) {
+ if (tep_is_file_bigendian(event->tep) ==
+ tep_is_local_bigendian(event->tep)) {
trace_seq_printf(s, "%u q%u%s %s%s %spae %snxe %swp%s%s%s",
role.level,
@@ -445,40 +445,40 @@ process_is_writable_pte(struct trace_seq *s, unsigned long long *args)
return pte & PT_WRITABLE_MASK;
}
-int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *tep)
{
init_disassembler();
- tep_register_event_handler(pevent, -1, "kvm", "kvm_exit",
+ tep_register_event_handler(tep, -1, "kvm", "kvm_exit",
kvm_exit_handler, NULL);
- tep_register_event_handler(pevent, -1, "kvm", "kvm_emulate_insn",
+ tep_register_event_handler(tep, -1, "kvm", "kvm_emulate_insn",
kvm_emulate_insn_handler, NULL);
- tep_register_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit",
+ tep_register_event_handler(tep, -1, "kvm", "kvm_nested_vmexit",
kvm_nested_vmexit_handler, NULL);
- tep_register_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit_inject",
+ tep_register_event_handler(tep, -1, "kvm", "kvm_nested_vmexit_inject",
kvm_nested_vmexit_inject_handler, NULL);
- tep_register_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_get_page",
+ tep_register_event_handler(tep, -1, "kvmmmu", "kvm_mmu_get_page",
kvm_mmu_get_page_handler, NULL);
- tep_register_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_sync_page",
+ tep_register_event_handler(tep, -1, "kvmmmu", "kvm_mmu_sync_page",
kvm_mmu_print_role, NULL);
- tep_register_event_handler(pevent, -1,
+ tep_register_event_handler(tep, -1,
"kvmmmu", "kvm_mmu_unsync_page",
kvm_mmu_print_role, NULL);
- tep_register_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_zap_page",
+ tep_register_event_handler(tep, -1, "kvmmmu", "kvm_mmu_zap_page",
kvm_mmu_print_role, NULL);
- tep_register_event_handler(pevent, -1, "kvmmmu",
+ tep_register_event_handler(tep, -1, "kvmmmu",
"kvm_mmu_prepare_zap_page", kvm_mmu_print_role,
NULL);
- tep_register_print_function(pevent,
+ tep_register_print_function(tep,
process_is_writable_pte,
TEP_FUNC_ARG_INT,
"is_writable_pte",
@@ -487,37 +487,37 @@ int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
return 0;
}
-void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
{
- tep_unregister_event_handler(pevent, -1, "kvm", "kvm_exit",
+ tep_unregister_event_handler(tep, -1, "kvm", "kvm_exit",
kvm_exit_handler, NULL);
- tep_unregister_event_handler(pevent, -1, "kvm", "kvm_emulate_insn",
+ tep_unregister_event_handler(tep, -1, "kvm", "kvm_emulate_insn",
kvm_emulate_insn_handler, NULL);
- tep_unregister_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit",
+ tep_unregister_event_handler(tep, -1, "kvm", "kvm_nested_vmexit",
kvm_nested_vmexit_handler, NULL);
- tep_unregister_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit_inject",
+ tep_unregister_event_handler(tep, -1, "kvm", "kvm_nested_vmexit_inject",
kvm_nested_vmexit_inject_handler, NULL);
- tep_unregister_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_get_page",
+ tep_unregister_event_handler(tep, -1, "kvmmmu", "kvm_mmu_get_page",
kvm_mmu_get_page_handler, NULL);
- tep_unregister_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_sync_page",
+ tep_unregister_event_handler(tep, -1, "kvmmmu", "kvm_mmu_sync_page",
kvm_mmu_print_role, NULL);
- tep_unregister_event_handler(pevent, -1,
+ tep_unregister_event_handler(tep, -1,
"kvmmmu", "kvm_mmu_unsync_page",
kvm_mmu_print_role, NULL);
- tep_unregister_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_zap_page",
+ tep_unregister_event_handler(tep, -1, "kvmmmu", "kvm_mmu_zap_page",
kvm_mmu_print_role, NULL);
- tep_unregister_event_handler(pevent, -1, "kvmmmu",
+ tep_unregister_event_handler(tep, -1, "kvmmmu",
"kvm_mmu_prepare_zap_page", kvm_mmu_print_role,
NULL);
- tep_unregister_print_function(pevent, process_is_writable_pte,
+ tep_unregister_print_function(tep, process_is_writable_pte,
"is_writable_pte");
}
diff --git a/tools/lib/traceevent/plugin_mac80211.c b/tools/lib/traceevent/plugin_mac80211.c
index e38b9477aad2..884303c26b5c 100644
--- a/tools/lib/traceevent/plugin_mac80211.c
+++ b/tools/lib/traceevent/plugin_mac80211.c
@@ -87,17 +87,17 @@ static int drv_bss_info_changed(struct trace_seq *s,
return 0;
}
-int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *tep)
{
- tep_register_event_handler(pevent, -1, "mac80211",
+ tep_register_event_handler(tep, -1, "mac80211",
"drv_bss_info_changed",
drv_bss_info_changed, NULL);
return 0;
}
-void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
{
- tep_unregister_event_handler(pevent, -1, "mac80211",
+ tep_unregister_event_handler(tep, -1, "mac80211",
"drv_bss_info_changed",
drv_bss_info_changed, NULL);
}
diff --git a/tools/lib/traceevent/plugin_sched_switch.c b/tools/lib/traceevent/plugin_sched_switch.c
index 834c9e378ff8..957389a0ff7a 100644
--- a/tools/lib/traceevent/plugin_sched_switch.c
+++ b/tools/lib/traceevent/plugin_sched_switch.c
@@ -62,7 +62,7 @@ static void write_and_save_comm(struct tep_format_field *field,
comm = &s->buffer[len];
/* Help out the comm to ids. This will handle dups */
- tep_register_comm(field->event->pevent, comm, pid);
+ tep_register_comm(field->event->tep, comm, pid);
}
static int sched_wakeup_handler(struct trace_seq *s,
@@ -135,27 +135,27 @@ static int sched_switch_handler(struct trace_seq *s,
return 0;
}
-int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *tep)
{
- tep_register_event_handler(pevent, -1, "sched", "sched_switch",
+ tep_register_event_handler(tep, -1, "sched", "sched_switch",
sched_switch_handler, NULL);
- tep_register_event_handler(pevent, -1, "sched", "sched_wakeup",
+ tep_register_event_handler(tep, -1, "sched", "sched_wakeup",
sched_wakeup_handler, NULL);
- tep_register_event_handler(pevent, -1, "sched", "sched_wakeup_new",
+ tep_register_event_handler(tep, -1, "sched", "sched_wakeup_new",
sched_wakeup_handler, NULL);
return 0;
}
-void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
{
- tep_unregister_event_handler(pevent, -1, "sched", "sched_switch",
+ tep_unregister_event_handler(tep, -1, "sched", "sched_switch",
sched_switch_handler, NULL);
- tep_unregister_event_handler(pevent, -1, "sched", "sched_wakeup",
+ tep_unregister_event_handler(tep, -1, "sched", "sched_wakeup",
sched_wakeup_handler, NULL);
- tep_unregister_event_handler(pevent, -1, "sched", "sched_wakeup_new",
+ tep_unregister_event_handler(tep, -1, "sched", "sched_wakeup_new",
sched_wakeup_handler, NULL);
}
diff --git a/tools/lib/traceevent/plugin_scsi.c b/tools/lib/traceevent/plugin_scsi.c
index 4eba25cc1431..5d0387a4b65a 100644
--- a/tools/lib/traceevent/plugin_scsi.c
+++ b/tools/lib/traceevent/plugin_scsi.c
@@ -414,9 +414,9 @@ unsigned long long process_scsi_trace_parse_cdb(struct trace_seq *s,
return 0;
}
-int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *tep)
{
- tep_register_print_function(pevent,
+ tep_register_print_function(tep,
process_scsi_trace_parse_cdb,
TEP_FUNC_ARG_STRING,
"scsi_trace_parse_cdb",
@@ -427,8 +427,8 @@ int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
return 0;
}
-void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
{
- tep_unregister_print_function(pevent, process_scsi_trace_parse_cdb,
+ tep_unregister_print_function(tep, process_scsi_trace_parse_cdb,
"scsi_trace_parse_cdb");
}
diff --git a/tools/lib/traceevent/plugin_xen.c b/tools/lib/traceevent/plugin_xen.c
index bc0496e4c296..993b208d0323 100644
--- a/tools/lib/traceevent/plugin_xen.c
+++ b/tools/lib/traceevent/plugin_xen.c
@@ -120,9 +120,9 @@ unsigned long long process_xen_hypercall_name(struct trace_seq *s,
return 0;
}
-int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *tep)
{
- tep_register_print_function(pevent,
+ tep_register_print_function(tep,
process_xen_hypercall_name,
TEP_FUNC_ARG_STRING,
"xen_hypercall_name",
@@ -131,8 +131,8 @@ int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
return 0;
}
-void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
{
- tep_unregister_print_function(pevent, process_xen_hypercall_name,
+ tep_unregister_print_function(tep, process_xen_hypercall_name,
"xen_hypercall_name");
}
diff --git a/tools/memory-model/Documentation/explanation.txt b/tools/memory-model/Documentation/explanation.txt
index 35bff92cc773..68caa9a976d0 100644
--- a/tools/memory-model/Documentation/explanation.txt
+++ b/tools/memory-model/Documentation/explanation.txt
@@ -27,7 +27,7 @@ Explanation of the Linux-Kernel Memory Consistency Model
19. AND THEN THERE WAS ALPHA
20. THE HAPPENS-BEFORE RELATION: hb
21. THE PROPAGATES-BEFORE RELATION: pb
- 22. RCU RELATIONS: rcu-link, gp, rscs, rcu-fence, and rb
+ 22. RCU RELATIONS: rcu-link, rcu-gp, rcu-rscsi, rcu-fence, and rb
23. LOCKING
24. ODDS AND ENDS
@@ -1430,8 +1430,8 @@ they execute means that it cannot have cycles. This requirement is
the content of the LKMM's "propagation" axiom.
-RCU RELATIONS: rcu-link, gp, rscs, rcu-fence, and rb
-----------------------------------------------------
+RCU RELATIONS: rcu-link, rcu-gp, rcu-rscsi, rcu-fence, and rb
+-------------------------------------------------------------
RCU (Read-Copy-Update) is a powerful synchronization mechanism. It
rests on two concepts: grace periods and read-side critical sections.
@@ -1446,17 +1446,19 @@ As far as memory models are concerned, RCU's main feature is its
Grace-Period Guarantee, which states that a critical section can never
span a full grace period. In more detail, the Guarantee says:
- If a critical section starts before a grace period then it
- must end before the grace period does. In addition, every
- store that propagates to the critical section's CPU before the
- end of the critical section must propagate to every CPU before
- the end of the grace period.
+ For any critical section C and any grace period G, at least
+ one of the following statements must hold:
- If a critical section ends after a grace period ends then it
- must start after the grace period does. In addition, every
- store that propagates to the grace period's CPU before the
- start of the grace period must propagate to every CPU before
- the start of the critical section.
+(1) C ends before G does, and in addition, every store that
+ propagates to C's CPU before the end of C must propagate to
+ every CPU before G ends.
+
+(2) G starts before C does, and in addition, every store that
+ propagates to G's CPU before the start of G must propagate
+ to every CPU before C starts.
+
+In particular, it is not possible for a critical section to both start
+before and end after a grace period.
Here is a simple example of RCU in action:
@@ -1483,10 +1485,11 @@ The Grace Period Guarantee tells us that when this code runs, it will
never end with r1 = 1 and r2 = 0. The reasoning is as follows. r1 = 1
means that P0's store to x propagated to P1 before P1 called
synchronize_rcu(), so P0's critical section must have started before
-P1's grace period. On the other hand, r2 = 0 means that P0's store to
-y, which occurs before the end of the critical section, did not
-propagate to P1 before the end of the grace period, violating the
-Guarantee.
+P1's grace period, contrary to part (2) of the Guarantee. On the
+other hand, r2 = 0 means that P0's store to y, which occurs before the
+end of the critical section, did not propagate to P1 before the end of
+the grace period, contrary to part (1). Together the results violate
+the Guarantee.
In the kernel's implementations of RCU, the requirements for stores
to propagate to every CPU are fulfilled by placing strong fences at
@@ -1504,11 +1507,11 @@ before" or "ends after" a grace period? Some aspects of the meaning
are pretty obvious, as in the example above, but the details aren't
entirely clear. The LKMM formalizes this notion by means of the
rcu-link relation. rcu-link encompasses a very general notion of
-"before": Among other things, X ->rcu-link Z includes cases where X
-happens-before or is equal to some event Y which is equal to or comes
-before Z in the coherence order. When Y = Z this says that X ->rfe Z
-implies X ->rcu-link Z. In addition, when Y = X it says that X ->fr Z
-and X ->co Z each imply X ->rcu-link Z.
+"before": If E and F are RCU fence events (i.e., rcu_read_lock(),
+rcu_read_unlock(), or synchronize_rcu()) then among other things,
+E ->rcu-link F includes cases where E is po-before some memory-access
+event X, F is po-after some memory-access event Y, and we have any of
+X ->rfe Y, X ->co Y, or X ->fr Y.
The formal definition of the rcu-link relation is more than a little
obscure, and we won't give it here. It is closely related to the pb
@@ -1516,171 +1519,173 @@ relation, and the details don't matter unless you want to comb through
a somewhat lengthy formal proof. Pretty much all you need to know
about rcu-link is the information in the preceding paragraph.
-The LKMM also defines the gp and rscs relations. They bring grace
-periods and read-side critical sections into the picture, in the
+The LKMM also defines the rcu-gp and rcu-rscsi relations. They bring
+grace periods and read-side critical sections into the picture, in the
following way:
- E ->gp F means there is a synchronize_rcu() fence event S such
- that E ->po S and either S ->po F or S = F. In simple terms,
- there is a grace period po-between E and F.
+ E ->rcu-gp F means that E and F are in fact the same event,
+ and that event is a synchronize_rcu() fence (i.e., a grace
+ period).
- E ->rscs F means there is a critical section delimited by an
- rcu_read_lock() fence L and an rcu_read_unlock() fence U, such
- that E ->po U and either L ->po F or L = F. You can think of
- this as saying that E and F are in the same critical section
- (in fact, it also allows E to be po-before the start of the
- critical section and F to be po-after the end).
+ E ->rcu-rscsi F means that E and F are the rcu_read_unlock()
+ and rcu_read_lock() fence events delimiting some read-side
+ critical section. (The 'i' at the end of the name emphasizes
+ that this relation is "inverted": It links the end of the
+ critical section to the start.)
If we think of the rcu-link relation as standing for an extended
-"before", then X ->gp Y ->rcu-link Z says that X executes before a
-grace period which ends before Z executes. (In fact it covers more
-than this, because it also includes cases where X executes before a
-grace period and some store propagates to Z's CPU before Z executes
-but doesn't propagate to some other CPU until after the grace period
-ends.) Similarly, X ->rscs Y ->rcu-link Z says that X is part of (or
-before the start of) a critical section which starts before Z
-executes.
-
-The LKMM goes on to define the rcu-fence relation as a sequence of gp
-and rscs links separated by rcu-link links, in which the number of gp
-links is >= the number of rscs links. For example:
+"before", then X ->rcu-gp Y ->rcu-link Z roughly says that X is a
+grace period which ends before Z begins. (In fact it covers more than
+this, because it also includes cases where some store propagates to
+Z's CPU before Z begins but doesn't propagate to some other CPU until
+after X ends.) Similarly, X ->rcu-rscsi Y ->rcu-link Z says that X is
+the end of a critical section which starts before Z begins.
+
+The LKMM goes on to define the rcu-fence relation as a sequence of
+rcu-gp and rcu-rscsi links separated by rcu-link links, in which the
+number of rcu-gp links is >= the number of rcu-rscsi links. For
+example:
- X ->gp Y ->rcu-link Z ->rscs T ->rcu-link U ->gp V
+ X ->rcu-gp Y ->rcu-link Z ->rcu-rscsi T ->rcu-link U ->rcu-gp V
would imply that X ->rcu-fence V, because this sequence contains two
-gp links and only one rscs link. (It also implies that X ->rcu-fence T
-and Z ->rcu-fence V.) On the other hand:
+rcu-gp links and one rcu-rscsi link. (It also implies that
+X ->rcu-fence T and Z ->rcu-fence V.) On the other hand:
- X ->rscs Y ->rcu-link Z ->rscs T ->rcu-link U ->gp V
+ X ->rcu-rscsi Y ->rcu-link Z ->rcu-rscsi T ->rcu-link U ->rcu-gp V
does not imply X ->rcu-fence V, because the sequence contains only
-one gp link but two rscs links.
+one rcu-gp link but two rcu-rscsi links.
The rcu-fence relation is important because the Grace Period Guarantee
means that rcu-fence acts kind of like a strong fence. In particular,
-if W is a write and we have W ->rcu-fence Z, the Guarantee says that W
-will propagate to every CPU before Z executes.
+E ->rcu-fence F implies not only that E begins before F ends, but also
+that any write po-before E will propagate to every CPU before any
+instruction po-after F can execute. (However, it does not imply that
+E must execute before F; in fact, each synchronize_rcu() fence event
+is linked to itself by rcu-fence as a degenerate case.)
To prove this in full generality requires some intellectual effort.
We'll consider just a very simple case:
- W ->gp X ->rcu-link Y ->rscs Z.
+ G ->rcu-gp W ->rcu-link Z ->rcu-rscsi F.
-This formula means that there is a grace period G and a critical
-section C such that:
+This formula means that G and W are the same event (a grace period),
+and there are events X, Y and a read-side critical section C such that:
- 1. W is po-before G;
+ 1. G = W is po-before or equal to X;
- 2. X is equal to or po-after G;
+ 2. X comes "before" Y in some sense (including rfe, co and fr);
- 3. X comes "before" Y in some sense;
+ 2. Y is po-before Z;
- 4. Y is po-before the end of C;
+ 4. Z is the rcu_read_unlock() event marking the end of C;
- 5. Z is equal to or po-after the start of C.
+ 5. F is the rcu_read_lock() event marking the start of C.
-From 2 - 4 we deduce that the grace period G ends before the critical
-section C. Then the second part of the Grace Period Guarantee says
-not only that G starts before C does, but also that W (which executes
-on G's CPU before G starts) must propagate to every CPU before C
-starts. In particular, W propagates to every CPU before Z executes
-(or finishes executing, in the case where Z is equal to the
-rcu_read_lock() fence event which starts C.) This sort of reasoning
-can be expanded to handle all the situations covered by rcu-fence.
+From 1 - 4 we deduce that the grace period G ends before the critical
+section C. Then part (2) of the Grace Period Guarantee says not only
+that G starts before C does, but also that any write which executes on
+G's CPU before G starts must propagate to every CPU before C starts.
+In particular, the write propagates to every CPU before F finishes
+executing and hence before any instruction po-after F can execute.
+This sort of reasoning can be extended to handle all the situations
+covered by rcu-fence.
Finally, the LKMM defines the RCU-before (rb) relation in terms of
rcu-fence. This is done in essentially the same way as the pb
relation was defined in terms of strong-fence. We will omit the
-details; the end result is that E ->rb F implies E must execute before
-F, just as E ->pb F does (and for much the same reasons).
+details; the end result is that E ->rb F implies E must execute
+before F, just as E ->pb F does (and for much the same reasons).
Putting this all together, the LKMM expresses the Grace Period
Guarantee by requiring that the rb relation does not contain a cycle.
-Equivalently, this "rcu" axiom requires that there are no events E and
-F with E ->rcu-link F ->rcu-fence E. Or to put it a third way, the
-axiom requires that there are no cycles consisting of gp and rscs
-alternating with rcu-link, where the number of gp links is >= the
-number of rscs links.
+Equivalently, this "rcu" axiom requires that there are no events E
+and F with E ->rcu-link F ->rcu-fence E. Or to put it a third way,
+the axiom requires that there are no cycles consisting of rcu-gp and
+rcu-rscsi alternating with rcu-link, where the number of rcu-gp links
+is >= the number of rcu-rscsi links.
Justifying the axiom isn't easy, but it is in fact a valid
formalization of the Grace Period Guarantee. We won't attempt to go
through the detailed argument, but the following analysis gives a
-taste of what is involved. Suppose we have a violation of the first
-part of the Guarantee: A critical section starts before a grace
-period, and some store propagates to the critical section's CPU before
-the end of the critical section but doesn't propagate to some other
-CPU until after the end of the grace period.
+taste of what is involved. Suppose both parts of the Guarantee are
+violated: A critical section starts before a grace period, and some
+store propagates to the critical section's CPU before the end of the
+critical section but doesn't propagate to some other CPU until after
+the end of the grace period.
Putting symbols to these ideas, let L and U be the rcu_read_lock() and
rcu_read_unlock() fence events delimiting the critical section in
question, and let S be the synchronize_rcu() fence event for the grace
period. Saying that the critical section starts before S means there
-are events E and F where E is po-after L (which marks the start of the
-critical section), E is "before" F in the sense of the rcu-link
-relation, and F is po-before the grace period S:
+are events Q and R where Q is po-after L (which marks the start of the
+critical section), Q is "before" R in the sense used by the rcu-link
+relation, and R is po-before the grace period S. Thus we have:
- L ->po E ->rcu-link F ->po S.
+ L ->rcu-link S.
-Let W be the store mentioned above, let Z come before the end of the
+Let W be the store mentioned above, let Y come before the end of the
critical section and witness that W propagates to the critical
-section's CPU by reading from W, and let Y on some arbitrary CPU be a
-witness that W has not propagated to that CPU, where Y happens after
+section's CPU by reading from W, and let Z on some arbitrary CPU be a
+witness that W has not propagated to that CPU, where Z happens after
some event X which is po-after S. Symbolically, this amounts to:
- S ->po X ->hb* Y ->fr W ->rf Z ->po U.
+ S ->po X ->hb* Z ->fr W ->rf Y ->po U.
-The fr link from Y to W indicates that W has not propagated to Y's CPU
-at the time that Y executes. From this, it can be shown (see the
-discussion of the rcu-link relation earlier) that X and Z are related
-by rcu-link, yielding:
+The fr link from Z to W indicates that W has not propagated to Z's CPU
+at the time that Z executes. From this, it can be shown (see the
+discussion of the rcu-link relation earlier) that S and U are related
+by rcu-link:
- S ->po X ->rcu-link Z ->po U.
+ S ->rcu-link U.
-The formulas say that S is po-between F and X, hence F ->gp X. They
-also say that Z comes before the end of the critical section and E
-comes after its start, hence Z ->rscs E. From all this we obtain:
+Since S is a grace period we have S ->rcu-gp S, and since L and U are
+the start and end of the critical section C we have U ->rcu-rscsi L.
+From this we obtain:
- F ->gp X ->rcu-link Z ->rscs E ->rcu-link F,
+ S ->rcu-gp S ->rcu-link U ->rcu-rscsi L ->rcu-link S,
a forbidden cycle. Thus the "rcu" axiom rules out this violation of
the Grace Period Guarantee.
For something a little more down-to-earth, let's see how the axiom
works out in practice. Consider the RCU code example from above, this
-time with statement labels added to the memory access instructions:
+time with statement labels added:
int x, y;
P0()
{
- rcu_read_lock();
- W: WRITE_ONCE(x, 1);
- X: WRITE_ONCE(y, 1);
- rcu_read_unlock();
+ L: rcu_read_lock();
+ X: WRITE_ONCE(x, 1);
+ Y: WRITE_ONCE(y, 1);
+ U: rcu_read_unlock();
}
P1()
{
int r1, r2;
- Y: r1 = READ_ONCE(x);
- synchronize_rcu();
- Z: r2 = READ_ONCE(y);
+ Z: r1 = READ_ONCE(x);
+ S: synchronize_rcu();
+ W: r2 = READ_ONCE(y);
}
-If r2 = 0 at the end then P0's store at X overwrites the value that
-P1's load at Z reads from, so we have Z ->fre X and thus Z ->rcu-link X.
-In addition, there is a synchronize_rcu() between Y and Z, so therefore
-we have Y ->gp Z.
+If r2 = 0 at the end then P0's store at Y overwrites the value that
+P1's load at W reads from, so we have W ->fre Y. Since S ->po W and
+also Y ->po U, we get S ->rcu-link U. In addition, S ->rcu-gp S
+because S is a grace period.
-If r1 = 1 at the end then P1's load at Y reads from P0's store at W,
-so we have W ->rcu-link Y. In addition, W and X are in the same critical
-section, so therefore we have X ->rscs W.
+If r1 = 1 at the end then P1's load at Z reads from P0's store at X,
+so we have X ->rfe Z. Together with L ->po X and Z ->po S, this
+yields L ->rcu-link S. And since L and U are the start and end of a
+critical section, we have U ->rcu-rscsi L.
-Then X ->rscs W ->rcu-link Y ->gp Z ->rcu-link X is a forbidden cycle,
-violating the "rcu" axiom. Hence the outcome is not allowed by the
-LKMM, as we would expect.
+Then U ->rcu-rscsi L ->rcu-link S ->rcu-gp S ->rcu-link U is a
+forbidden cycle, violating the "rcu" axiom. Hence the outcome is not
+allowed by the LKMM, as we would expect.
For contrast, let's see what can happen in a more complicated example:
@@ -1690,51 +1695,52 @@ For contrast, let's see what can happen in a more complicated example:
{
int r0;
- rcu_read_lock();
- W: r0 = READ_ONCE(x);
- X: WRITE_ONCE(y, 1);
- rcu_read_unlock();
+ L0: rcu_read_lock();
+ r0 = READ_ONCE(x);
+ WRITE_ONCE(y, 1);
+ U0: rcu_read_unlock();
}
P1()
{
int r1;
- Y: r1 = READ_ONCE(y);
- synchronize_rcu();
- Z: WRITE_ONCE(z, 1);
+ r1 = READ_ONCE(y);
+ S1: synchronize_rcu();
+ WRITE_ONCE(z, 1);
}
P2()
{
int r2;
- rcu_read_lock();
- U: r2 = READ_ONCE(z);
- V: WRITE_ONCE(x, 1);
- rcu_read_unlock();
+ L2: rcu_read_lock();
+ r2 = READ_ONCE(z);
+ WRITE_ONCE(x, 1);
+ U2: rcu_read_unlock();
}
If r0 = r1 = r2 = 1 at the end, then similar reasoning to before shows
-that W ->rscs X ->rcu-link Y ->gp Z ->rcu-link U ->rscs V ->rcu-link W.
-However this cycle is not forbidden, because the sequence of relations
-contains fewer instances of gp (one) than of rscs (two). Consequently
-the outcome is allowed by the LKMM. The following instruction timing
-diagram shows how it might actually occur:
+that U0 ->rcu-rscsi L0 ->rcu-link S1 ->rcu-gp S1 ->rcu-link U2 ->rcu-rscsi
+L2 ->rcu-link U0. However this cycle is not forbidden, because the
+sequence of relations contains fewer instances of rcu-gp (one) than of
+rcu-rscsi (two). Consequently the outcome is allowed by the LKMM.
+The following instruction timing diagram shows how it might actually
+occur:
P0 P1 P2
-------------------- -------------------- --------------------
rcu_read_lock()
-X: WRITE_ONCE(y, 1)
- Y: r1 = READ_ONCE(y)
+WRITE_ONCE(y, 1)
+ r1 = READ_ONCE(y)
synchronize_rcu() starts
. rcu_read_lock()
- . V: WRITE_ONCE(x, 1)
-W: r0 = READ_ONCE(x) .
+ . WRITE_ONCE(x, 1)
+r0 = READ_ONCE(x) .
rcu_read_unlock() .
synchronize_rcu() ends
- Z: WRITE_ONCE(z, 1)
- U: r2 = READ_ONCE(z)
+ WRITE_ONCE(z, 1)
+ r2 = READ_ONCE(z)
rcu_read_unlock()
This requires P0 and P2 to execute their loads and stores out of
@@ -1744,6 +1750,15 @@ section in P0 both starts before P1's grace period does and ends
before it does, and the critical section in P2 both starts after P1's
grace period does and ends after it does.
+Addendum: The LKMM now supports SRCU (Sleepable Read-Copy-Update) in
+addition to normal RCU. The ideas involved are much the same as
+above, with new relations srcu-gp and srcu-rscsi added to represent
+SRCU grace periods and read-side critical sections. There is a
+restriction on the srcu-gp and srcu-rscsi links that can appear in an
+rcu-fence sequence (the srcu-rscsi links must be paired with srcu-gp
+links having the same SRCU domain with proper nesting); the details
+are relatively unimportant.
+
LOCKING
-------
diff --git a/tools/memory-model/README b/tools/memory-model/README
index 0f2c366518c6..2b87f3971548 100644
--- a/tools/memory-model/README
+++ b/tools/memory-model/README
@@ -20,13 +20,17 @@ that litmus test to be exercised within the Linux kernel.
REQUIREMENTS
============
-Version 7.49 of the "herd7" and "klitmus7" tools must be downloaded
-separately:
+Version 7.52 or higher of the "herd7" and "klitmus7" tools must be
+downloaded separately:
https://github.com/herd/herdtools7
See "herdtools7/INSTALL.md" for installation instructions.
+Note that although these tools usually provide backwards compatibility,
+this is not absolutely guaranteed. Therefore, if a later version does
+not work, please try using the exact version called out above.
+
==================
BASIC USAGE: HERD7
@@ -221,8 +225,29 @@ The Linux-kernel memory model has the following limitations:
additional call_rcu() process to the site of the
emulated rcu-barrier().
- e. Sleepable RCU (SRCU) is not modeled. It can be
- emulated, but perhaps not simply.
+ e. Although sleepable RCU (SRCU) is now modeled, there
+ are some subtle differences between its semantics and
+ those in the Linux kernel. For example, the kernel
+ might interpret the following sequence as two partially
+ overlapping SRCU read-side critical sections:
+
+ 1 r1 = srcu_read_lock(&my_srcu);
+ 2 do_something_1();
+ 3 r2 = srcu_read_lock(&my_srcu);
+ 4 do_something_2();
+ 5 srcu_read_unlock(&my_srcu, r1);
+ 6 do_something_3();
+ 7 srcu_read_unlock(&my_srcu, r2);
+
+ In contrast, LKMM will interpret this as a nested pair of
+ SRCU read-side critical sections, with the outer critical
+ section spanning lines 1-7 and the inner critical section
+ spanning lines 3-5.
+
+ This difference would be more of a concern had anyone
+ identified a reasonable use case for partially overlapping
+ SRCU read-side critical sections. For more information,
+ please see: https://paulmck.livejournal.com/40593.html
f. Reader-writer locking is not modeled. It can be
emulated in litmus tests using atomic read-modify-write
diff --git a/tools/memory-model/linux-kernel.bell b/tools/memory-model/linux-kernel.bell
index 796513362c05..def9131d3d8e 100644
--- a/tools/memory-model/linux-kernel.bell
+++ b/tools/memory-model/linux-kernel.bell
@@ -33,8 +33,14 @@ enum Barriers = 'wmb (*smp_wmb*) ||
'after-unlock-lock (*smp_mb__after_unlock_lock*)
instructions F[Barriers]
+(* SRCU *)
+enum SRCU = 'srcu-lock || 'srcu-unlock || 'sync-srcu
+instructions SRCU[SRCU]
+(* All srcu events *)
+let Srcu = Srcu-lock | Srcu-unlock | Sync-srcu
+
(* Compute matching pairs of nested Rcu-lock and Rcu-unlock *)
-let matched = let rec
+let rcu-rscs = let rec
unmatched-locks = Rcu-lock \ domain(matched)
and unmatched-unlocks = Rcu-unlock \ range(matched)
and unmatched = unmatched-locks | unmatched-unlocks
@@ -46,8 +52,27 @@ let matched = let rec
in matched
(* Validate nesting *)
-flag ~empty Rcu-lock \ domain(matched) as unbalanced-rcu-locking
-flag ~empty Rcu-unlock \ range(matched) as unbalanced-rcu-locking
+flag ~empty Rcu-lock \ domain(rcu-rscs) as unbalanced-rcu-locking
+flag ~empty Rcu-unlock \ range(rcu-rscs) as unbalanced-rcu-locking
+
+(* Compute matching pairs of nested Srcu-lock and Srcu-unlock *)
+let srcu-rscs = let rec
+ unmatched-locks = Srcu-lock \ domain(matched)
+ and unmatched-unlocks = Srcu-unlock \ range(matched)
+ and unmatched = unmatched-locks | unmatched-unlocks
+ and unmatched-po = ([unmatched] ; po ; [unmatched]) & loc
+ and unmatched-locks-to-unlocks =
+ ([unmatched-locks] ; po ; [unmatched-unlocks]) & loc
+ and matched = matched | (unmatched-locks-to-unlocks \
+ (unmatched-po ; unmatched-po))
+ in matched
+
+(* Validate nesting *)
+flag ~empty Srcu-lock \ domain(srcu-rscs) as unbalanced-srcu-locking
+flag ~empty Srcu-unlock \ range(srcu-rscs) as unbalanced-srcu-locking
+
+(* Check for use of synchronize_srcu() inside an RCU critical section *)
+flag ~empty rcu-rscs & (po ; [Sync-srcu] ; po) as invalid-sleep
-(* Outermost level of nesting only *)
-let crit = matched \ (po^-1 ; matched ; po^-1)
+(* Validate SRCU dynamic match *)
+flag ~empty different-values(srcu-rscs) as srcu-bad-nesting
diff --git a/tools/memory-model/linux-kernel.cat b/tools/memory-model/linux-kernel.cat
index 8f23c74a96fd..8dcb37835b61 100644
--- a/tools/memory-model/linux-kernel.cat
+++ b/tools/memory-model/linux-kernel.cat
@@ -33,7 +33,7 @@ let mb = ([M] ; fencerel(Mb) ; [M]) |
([M] ; po? ; [LKW] ; fencerel(After-spinlock) ; [M]) |
([M] ; po ; [UL] ; (co | po) ; [LKW] ;
fencerel(After-unlock-lock) ; [M])
-let gp = po ; [Sync-rcu] ; po?
+let gp = po ; [Sync-rcu | Sync-srcu] ; po?
let strong-fence = mb | gp
@@ -91,32 +91,47 @@ acyclic pb as propagation
(*******)
(*
- * Effect of read-side critical section proceeds from the rcu_read_lock()
- * onward on the one hand and from the rcu_read_unlock() backwards on the
- * other hand.
+ * Effects of read-side critical sections proceed from the rcu_read_unlock()
+ * or srcu_read_unlock() backwards on the one hand, and from the
+ * rcu_read_lock() or srcu_read_lock() forwards on the other hand.
+ *
+ * In the definition of rcu-fence below, the po term at the left-hand side
+ * of each disjunct and the po? term at the right-hand end have been factored
+ * out. They have been moved into the definitions of rcu-link and rb.
+ * This was necessary in order to apply the "& loc" tests correctly.
*)
-let rscs = po ; crit^-1 ; po?
+let rcu-gp = [Sync-rcu] (* Compare with gp *)
+let srcu-gp = [Sync-srcu]
+let rcu-rscsi = rcu-rscs^-1
+let srcu-rscsi = srcu-rscs^-1
(*
* The synchronize_rcu() strong fence is special in that it can order not
* one but two non-rf relations, but only in conjunction with an RCU
* read-side critical section.
*)
-let rcu-link = hb* ; pb* ; prop
+let rcu-link = po? ; hb* ; pb* ; prop ; po
(*
* Any sequence containing at least as many grace periods as RCU read-side
* critical sections (joined by rcu-link) acts as a generalized strong fence.
+ * Likewise for SRCU grace periods and read-side critical sections, provided
+ * the synchronize_srcu() and srcu_read_[un]lock() calls refer to the same
+ * struct srcu_struct location.
*)
-let rec rcu-fence = gp |
- (gp ; rcu-link ; rscs) |
- (rscs ; rcu-link ; gp) |
- (gp ; rcu-link ; rcu-fence ; rcu-link ; rscs) |
- (rscs ; rcu-link ; rcu-fence ; rcu-link ; gp) |
+let rec rcu-fence = rcu-gp | srcu-gp |
+ (rcu-gp ; rcu-link ; rcu-rscsi) |
+ ((srcu-gp ; rcu-link ; srcu-rscsi) & loc) |
+ (rcu-rscsi ; rcu-link ; rcu-gp) |
+ ((srcu-rscsi ; rcu-link ; srcu-gp) & loc) |
+ (rcu-gp ; rcu-link ; rcu-fence ; rcu-link ; rcu-rscsi) |
+ ((srcu-gp ; rcu-link ; rcu-fence ; rcu-link ; srcu-rscsi) & loc) |
+ (rcu-rscsi ; rcu-link ; rcu-fence ; rcu-link ; rcu-gp) |
+ ((srcu-rscsi ; rcu-link ; rcu-fence ; rcu-link ; srcu-gp) & loc) |
(rcu-fence ; rcu-link ; rcu-fence)
(* rb orders instructions just as pb does *)
-let rb = prop ; rcu-fence ; hb* ; pb*
+let rb = prop ; po ; rcu-fence ; po? ; hb* ; pb*
irreflexive rb as rcu
diff --git a/tools/memory-model/linux-kernel.def b/tools/memory-model/linux-kernel.def
index b27911cc087d..551eeaa389d4 100644
--- a/tools/memory-model/linux-kernel.def
+++ b/tools/memory-model/linux-kernel.def
@@ -47,6 +47,12 @@ rcu_read_unlock() { __fence{rcu-unlock}; }
synchronize_rcu() { __fence{sync-rcu}; }
synchronize_rcu_expedited() { __fence{sync-rcu}; }
+// SRCU
+srcu_read_lock(X) __srcu{srcu-lock}(X)
+srcu_read_unlock(X,Y) { __srcu{srcu-unlock}(X,Y); }
+synchronize_srcu(X) { __srcu{sync-srcu}(X); }
+synchronize_srcu_expedited(X) { __srcu{sync-srcu}(X); }
+
// Atomic
atomic_read(X) READ_ONCE(*X)
atomic_set(X,V) { WRITE_ONCE(*X,V); }
diff --git a/tools/memory-model/lock.cat b/tools/memory-model/lock.cat
index 305ded17e741..a059d1a6d8a2 100644
--- a/tools/memory-model/lock.cat
+++ b/tools/memory-model/lock.cat
@@ -6,9 +6,6 @@
(*
* Generate coherence orders and handle lock operations
- *
- * Warning: spin_is_locked() crashes herd7 versions strictly before 7.48.
- * spin_is_locked() is functional from herd7 version 7.49.
*)
include "cross.cat"
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index c9d038f91af6..53f8be0f4a1f 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -25,14 +25,17 @@ LIBSUBCMD = $(LIBSUBCMD_OUTPUT)libsubcmd.a
OBJTOOL := $(OUTPUT)objtool
OBJTOOL_IN := $(OBJTOOL)-in.o
+LIBELF_FLAGS := $(shell pkg-config libelf --cflags 2>/dev/null)
+LIBELF_LIBS := $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf)
+
all: $(OBJTOOL)
INCLUDES := -I$(srctree)/tools/include \
-I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
-I$(srctree)/tools/objtool/arch/$(ARCH)/include
WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
-CFLAGS += -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES)
-LDFLAGS += -lelf $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
+CFLAGS += -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS)
+LDFLAGS += $(LIBELF_LIBS) $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
# Allow old libelf to be used:
elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr)
diff --git a/tools/objtool/arch.h b/tools/objtool/arch.h
index b0d7dc3d71b5..7a111a77b7aa 100644
--- a/tools/objtool/arch.h
+++ b/tools/objtool/arch.h
@@ -33,7 +33,11 @@
#define INSN_STACK 8
#define INSN_BUG 9
#define INSN_NOP 10
-#define INSN_OTHER 11
+#define INSN_STAC 11
+#define INSN_CLAC 12
+#define INSN_STD 13
+#define INSN_CLD 14
+#define INSN_OTHER 15
#define INSN_LAST INSN_OTHER
enum op_dest_type {
@@ -41,6 +45,7 @@ enum op_dest_type {
OP_DEST_REG_INDIRECT,
OP_DEST_MEM,
OP_DEST_PUSH,
+ OP_DEST_PUSHF,
OP_DEST_LEAVE,
};
@@ -55,6 +60,7 @@ enum op_src_type {
OP_SRC_REG_INDIRECT,
OP_SRC_CONST,
OP_SRC_POP,
+ OP_SRC_POPF,
OP_SRC_ADD,
OP_SRC_AND,
};
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index 540a209b78ab..472e991f6512 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -357,19 +357,26 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
/* pushf */
*type = INSN_STACK;
op->src.type = OP_SRC_CONST;
- op->dest.type = OP_DEST_PUSH;
+ op->dest.type = OP_DEST_PUSHF;
break;
case 0x9d:
/* popf */
*type = INSN_STACK;
- op->src.type = OP_SRC_POP;
+ op->src.type = OP_SRC_POPF;
op->dest.type = OP_DEST_MEM;
break;
case 0x0f:
- if (op2 >= 0x80 && op2 <= 0x8f) {
+ if (op2 == 0x01) {
+
+ if (modrm == 0xca)
+ *type = INSN_CLAC;
+ else if (modrm == 0xcb)
+ *type = INSN_STAC;
+
+ } else if (op2 >= 0x80 && op2 <= 0x8f) {
*type = INSN_JUMP_CONDITIONAL;
@@ -444,6 +451,14 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
*type = INSN_CALL;
break;
+ case 0xfc:
+ *type = INSN_CLD;
+ break;
+
+ case 0xfd:
+ *type = INSN_STD;
+ break;
+
case 0xff:
if (modrm_reg == 2 || modrm_reg == 3)
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index 694abc628e9b..f3b378126011 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -29,7 +29,7 @@
#include "builtin.h"
#include "check.h"
-bool no_fp, no_unreachable, retpoline, module;
+bool no_fp, no_unreachable, retpoline, module, backtrace, uaccess;
static const char * const check_usage[] = {
"objtool check [<options>] file.o",
@@ -41,6 +41,8 @@ const struct option check_options[] = {
OPT_BOOLEAN('u', "no-unreachable", &no_unreachable, "Skip 'unreachable instruction' warnings"),
OPT_BOOLEAN('r', "retpoline", &retpoline, "Validate retpoline assumptions"),
OPT_BOOLEAN('m', "module", &module, "Indicates the object will be part of a kernel module"),
+ OPT_BOOLEAN('b', "backtrace", &backtrace, "unwind on error"),
+ OPT_BOOLEAN('a', "uaccess", &uaccess, "enable uaccess checking"),
OPT_END(),
};
diff --git a/tools/objtool/builtin.h b/tools/objtool/builtin.h
index 28ff40e19a14..69762f9c5602 100644
--- a/tools/objtool/builtin.h
+++ b/tools/objtool/builtin.h
@@ -20,7 +20,7 @@
#include <subcmd/parse-options.h>
extern const struct option check_options[];
-extern bool no_fp, no_unreachable, retpoline, module;
+extern bool no_fp, no_unreachable, retpoline, module, backtrace, uaccess;
extern int cmd_check(int argc, const char **argv);
extern int cmd_orc(int argc, const char **argv);
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 0414a0d52262..ac743a1d53ab 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -31,6 +31,7 @@
struct alternative {
struct list_head list;
struct instruction *insn;
+ bool skip_orig;
};
const char *objname;
@@ -105,29 +106,6 @@ static struct instruction *next_insn_same_func(struct objtool_file *file,
insn = next_insn_same_sec(file, insn))
/*
- * Check if the function has been manually whitelisted with the
- * STACK_FRAME_NON_STANDARD macro, or if it should be automatically whitelisted
- * due to its use of a context switching instruction.
- */
-static bool ignore_func(struct objtool_file *file, struct symbol *func)
-{
- struct rela *rela;
-
- /* check for STACK_FRAME_NON_STANDARD */
- if (file->whitelist && file->whitelist->rela)
- list_for_each_entry(rela, &file->whitelist->rela->rela_list, list) {
- if (rela->sym->type == STT_SECTION &&
- rela->sym->sec == func->sec &&
- rela->addend == func->offset)
- return true;
- if (rela->sym->type == STT_FUNC && rela->sym == func)
- return true;
- }
-
- return false;
-}
-
-/*
* This checks to see if the given function is a "noreturn" function.
*
* For global functions which are outside the scope of this object file, we
@@ -165,6 +143,7 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
"fortify_panic",
"usercopy_abort",
"machine_real_restart",
+ "rewind_stack_do_exit",
};
if (func->bind == STB_WEAK)
@@ -436,18 +415,107 @@ static void add_ignores(struct objtool_file *file)
struct instruction *insn;
struct section *sec;
struct symbol *func;
+ struct rela *rela;
- for_each_sec(file, sec) {
- list_for_each_entry(func, &sec->symbol_list, list) {
- if (func->type != STT_FUNC)
- continue;
+ sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
+ if (!sec)
+ return;
+
+ list_for_each_entry(rela, &sec->rela_list, list) {
+ switch (rela->sym->type) {
+ case STT_FUNC:
+ func = rela->sym;
+ break;
- if (!ignore_func(file, func))
+ case STT_SECTION:
+ func = find_symbol_by_offset(rela->sym->sec, rela->addend);
+ if (!func || func->type != STT_FUNC)
continue;
+ break;
- func_for_each_insn_all(file, func, insn)
- insn->ignore = true;
+ default:
+ WARN("unexpected relocation symbol type in %s: %d", sec->name, rela->sym->type);
+ continue;
}
+
+ func_for_each_insn_all(file, func, insn)
+ insn->ignore = true;
+ }
+}
+
+/*
+ * This is a whitelist of functions that is allowed to be called with AC set.
+ * The list is meant to be minimal and only contains compiler instrumentation
+ * ABI and a few functions used to implement *_{to,from}_user() functions.
+ *
+ * These functions must not directly change AC, but may PUSHF/POPF.
+ */
+static const char *uaccess_safe_builtin[] = {
+ /* KASAN */
+ "kasan_report",
+ "check_memory_region",
+ /* KASAN out-of-line */
+ "__asan_loadN_noabort",
+ "__asan_load1_noabort",
+ "__asan_load2_noabort",
+ "__asan_load4_noabort",
+ "__asan_load8_noabort",
+ "__asan_load16_noabort",
+ "__asan_storeN_noabort",
+ "__asan_store1_noabort",
+ "__asan_store2_noabort",
+ "__asan_store4_noabort",
+ "__asan_store8_noabort",
+ "__asan_store16_noabort",
+ /* KASAN in-line */
+ "__asan_report_load_n_noabort",
+ "__asan_report_load1_noabort",
+ "__asan_report_load2_noabort",
+ "__asan_report_load4_noabort",
+ "__asan_report_load8_noabort",
+ "__asan_report_load16_noabort",
+ "__asan_report_store_n_noabort",
+ "__asan_report_store1_noabort",
+ "__asan_report_store2_noabort",
+ "__asan_report_store4_noabort",
+ "__asan_report_store8_noabort",
+ "__asan_report_store16_noabort",
+ /* KCOV */
+ "write_comp_data",
+ "__sanitizer_cov_trace_pc",
+ "__sanitizer_cov_trace_const_cmp1",
+ "__sanitizer_cov_trace_const_cmp2",
+ "__sanitizer_cov_trace_const_cmp4",
+ "__sanitizer_cov_trace_const_cmp8",
+ "__sanitizer_cov_trace_cmp1",
+ "__sanitizer_cov_trace_cmp2",
+ "__sanitizer_cov_trace_cmp4",
+ "__sanitizer_cov_trace_cmp8",
+ /* UBSAN */
+ "ubsan_type_mismatch_common",
+ "__ubsan_handle_type_mismatch",
+ "__ubsan_handle_type_mismatch_v1",
+ /* misc */
+ "csum_partial_copy_generic",
+ "__memcpy_mcsafe",
+ "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
+ NULL
+};
+
+static void add_uaccess_safe(struct objtool_file *file)
+{
+ struct symbol *func;
+ const char **name;
+
+ if (!uaccess)
+ return;
+
+ for (name = uaccess_safe_builtin; *name; name++) {
+ func = find_symbol_by_name(file->elf, *name);
+ if (!func)
+ continue;
+
+ func->alias->uaccess_safe = true;
}
}
@@ -457,13 +525,13 @@ static void add_ignores(struct objtool_file *file)
* But it at least allows objtool to understand the control flow *around* the
* retpoline.
*/
-static int add_nospec_ignores(struct objtool_file *file)
+static int add_ignore_alternatives(struct objtool_file *file)
{
struct section *sec;
struct rela *rela;
struct instruction *insn;
- sec = find_section_by_name(file->elf, ".rela.discard.nospec");
+ sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts");
if (!sec)
return 0;
@@ -475,7 +543,7 @@ static int add_nospec_ignores(struct objtool_file *file)
insn = find_insn(file, rela->sym->sec, rela->addend);
if (!insn) {
- WARN("bad .discard.nospec entry");
+ WARN("bad .discard.ignore_alts entry");
return -1;
}
@@ -524,7 +592,8 @@ static int add_jump_destinations(struct objtool_file *file)
continue;
} else {
/* sibling call */
- insn->jump_dest = 0;
+ insn->call_dest = rela->sym;
+ insn->jump_dest = NULL;
continue;
}
@@ -546,25 +615,38 @@ static int add_jump_destinations(struct objtool_file *file)
}
/*
- * For GCC 8+, create parent/child links for any cold
- * subfunctions. This is _mostly_ redundant with a similar
- * initialization in read_symbols().
- *
- * If a function has aliases, we want the *first* such function
- * in the symbol table to be the subfunction's parent. In that
- * case we overwrite the initialization done in read_symbols().
- *
- * However this code can't completely replace the
- * read_symbols() code because this doesn't detect the case
- * where the parent function's only reference to a subfunction
- * is through a switch table.
+ * Cross-function jump.
*/
if (insn->func && insn->jump_dest->func &&
- insn->func != insn->jump_dest->func &&
- !strstr(insn->func->name, ".cold.") &&
- strstr(insn->jump_dest->func->name, ".cold.")) {
- insn->func->cfunc = insn->jump_dest->func;
- insn->jump_dest->func->pfunc = insn->func;
+ insn->func != insn->jump_dest->func) {
+
+ /*
+ * For GCC 8+, create parent/child links for any cold
+ * subfunctions. This is _mostly_ redundant with a
+ * similar initialization in read_symbols().
+ *
+ * If a function has aliases, we want the *first* such
+ * function in the symbol table to be the subfunction's
+ * parent. In that case we overwrite the
+ * initialization done in read_symbols().
+ *
+ * However this code can't completely replace the
+ * read_symbols() code because this doesn't detect the
+ * case where the parent function's only reference to a
+ * subfunction is through a switch table.
+ */
+ if (!strstr(insn->func->name, ".cold.") &&
+ strstr(insn->jump_dest->func->name, ".cold.")) {
+ insn->func->cfunc = insn->jump_dest->func;
+ insn->jump_dest->func->pfunc = insn->func;
+
+ } else if (insn->jump_dest->func->pfunc != insn->func->pfunc &&
+ insn->jump_dest->offset == insn->jump_dest->func->offset) {
+
+ /* sibling class */
+ insn->call_dest = insn->jump_dest->func;
+ insn->jump_dest = NULL;
+ }
}
}
@@ -633,9 +715,6 @@ static int add_call_destinations(struct objtool_file *file)
* conditionally jumps to the _end_ of the entry. We have to modify these
* jumps' destinations to point back to .text rather than the end of the
* entry in .altinstr_replacement.
- *
- * 4. It has been requested that we don't validate the !POPCNT feature path
- * which is a "very very small percentage of machines".
*/
static int handle_group_alt(struct objtool_file *file,
struct special_alt *special_alt,
@@ -651,9 +730,6 @@ static int handle_group_alt(struct objtool_file *file,
if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
break;
- if (special_alt->skip_orig)
- insn->type = INSN_NOP;
-
insn->alt_group = true;
last_orig_insn = insn;
}
@@ -695,6 +771,7 @@ static int handle_group_alt(struct objtool_file *file,
last_new_insn = insn;
insn->ignore = orig_insn->ignore_alts;
+ insn->func = orig_insn->func;
if (insn->type != INSN_JUMP_CONDITIONAL &&
insn->type != INSN_JUMP_UNCONDITIONAL)
@@ -817,6 +894,8 @@ static int add_special_section_alts(struct objtool_file *file)
}
alt->insn = new_insn;
+ alt->skip_orig = special_alt->skip_orig;
+ orig_insn->ignore_alts |= special_alt->skip_alt;
list_add_tail(&alt->list, &orig_insn->alts);
list_del(&special_alt->list);
@@ -1238,8 +1317,9 @@ static int decode_sections(struct objtool_file *file)
return ret;
add_ignores(file);
+ add_uaccess_safe(file);
- ret = add_nospec_ignores(file);
+ ret = add_ignore_alternatives(file);
if (ret)
return ret;
@@ -1319,11 +1399,11 @@ static int update_insn_state_regs(struct instruction *insn, struct insn_state *s
return 0;
/* push */
- if (op->dest.type == OP_DEST_PUSH)
+ if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
cfa->offset += 8;
/* pop */
- if (op->src.type == OP_SRC_POP)
+ if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
cfa->offset -= 8;
/* add immediate to sp */
@@ -1580,6 +1660,7 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state)
break;
case OP_SRC_POP:
+ case OP_SRC_POPF:
if (!state->drap && op->dest.type == OP_DEST_REG &&
op->dest.reg == cfa->base) {
@@ -1644,6 +1725,7 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state)
break;
case OP_DEST_PUSH:
+ case OP_DEST_PUSHF:
state->stack_size += 8;
if (cfa->base == CFI_SP)
cfa->offset += 8;
@@ -1734,7 +1816,7 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state)
break;
case OP_DEST_MEM:
- if (op->src.type != OP_SRC_POP) {
+ if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
WARN_FUNC("unknown stack-related memory operation",
insn->sec, insn->offset);
return -1;
@@ -1798,6 +1880,50 @@ static bool insn_state_match(struct instruction *insn, struct insn_state *state)
return false;
}
+static inline bool func_uaccess_safe(struct symbol *func)
+{
+ if (func)
+ return func->alias->uaccess_safe;
+
+ return false;
+}
+
+static inline const char *insn_dest_name(struct instruction *insn)
+{
+ if (insn->call_dest)
+ return insn->call_dest->name;
+
+ return "{dynamic}";
+}
+
+static int validate_call(struct instruction *insn, struct insn_state *state)
+{
+ if (state->uaccess && !func_uaccess_safe(insn->call_dest)) {
+ WARN_FUNC("call to %s() with UACCESS enabled",
+ insn->sec, insn->offset, insn_dest_name(insn));
+ return 1;
+ }
+
+ if (state->df) {
+ WARN_FUNC("call to %s() with DF set",
+ insn->sec, insn->offset, insn_dest_name(insn));
+ return 1;
+ }
+
+ return 0;
+}
+
+static int validate_sibling_call(struct instruction *insn, struct insn_state *state)
+{
+ if (has_modified_stack_frame(state)) {
+ WARN_FUNC("sibling call from callable instruction with modified stack frame",
+ insn->sec, insn->offset);
+ return 1;
+ }
+
+ return validate_call(insn, state);
+}
+
/*
* Follow the branch starting at the given instruction, and recursively follow
* any other branches (jumps). Meanwhile, track the frame pointer state at
@@ -1843,7 +1969,9 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
if (!insn->hint && !insn_state_match(insn, &state))
return 1;
- return 0;
+ /* If we were here with AC=0, but now have AC=1, go again */
+ if (insn->state.uaccess || !state.uaccess)
+ return 0;
}
if (insn->hint) {
@@ -1892,16 +2020,42 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
insn->visited = true;
if (!insn->ignore_alts) {
+ bool skip_orig = false;
+
list_for_each_entry(alt, &insn->alts, list) {
+ if (alt->skip_orig)
+ skip_orig = true;
+
ret = validate_branch(file, alt->insn, state);
- if (ret)
- return 1;
+ if (ret) {
+ if (backtrace)
+ BT_FUNC("(alt)", insn);
+ return ret;
+ }
}
+
+ if (skip_orig)
+ return 0;
}
switch (insn->type) {
case INSN_RETURN:
+ if (state.uaccess && !func_uaccess_safe(func)) {
+ WARN_FUNC("return with UACCESS enabled", sec, insn->offset);
+ return 1;
+ }
+
+ if (!state.uaccess && func_uaccess_safe(func)) {
+ WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function", sec, insn->offset);
+ return 1;
+ }
+
+ if (state.df) {
+ WARN_FUNC("return with DF set", sec, insn->offset);
+ return 1;
+ }
+
if (func && has_modified_stack_frame(&state)) {
WARN_FUNC("return with modified stack frame",
sec, insn->offset);
@@ -1917,17 +2071,22 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
return 0;
case INSN_CALL:
- if (is_fentry_call(insn))
- break;
+ case INSN_CALL_DYNAMIC:
+ ret = validate_call(insn, &state);
+ if (ret)
+ return ret;
- ret = dead_end_function(file, insn->call_dest);
- if (ret == 1)
- return 0;
- if (ret == -1)
- return 1;
+ if (insn->type == INSN_CALL) {
+ if (is_fentry_call(insn))
+ break;
+
+ ret = dead_end_function(file, insn->call_dest);
+ if (ret == 1)
+ return 0;
+ if (ret == -1)
+ return 1;
+ }
- /* fallthrough */
- case INSN_CALL_DYNAMIC:
if (!no_fp && func && !has_valid_stack_frame(&state)) {
WARN_FUNC("call without frame pointer save/setup",
sec, insn->offset);
@@ -1937,18 +2096,21 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
case INSN_JUMP_CONDITIONAL:
case INSN_JUMP_UNCONDITIONAL:
- if (insn->jump_dest &&
- (!func || !insn->jump_dest->func ||
- insn->jump_dest->func->pfunc == func)) {
- ret = validate_branch(file, insn->jump_dest,
- state);
+ if (func && !insn->jump_dest) {
+ ret = validate_sibling_call(insn, &state);
if (ret)
- return 1;
+ return ret;
- } else if (func && has_modified_stack_frame(&state)) {
- WARN_FUNC("sibling call from callable instruction with modified stack frame",
- sec, insn->offset);
- return 1;
+ } else if (insn->jump_dest &&
+ (!func || !insn->jump_dest->func ||
+ insn->jump_dest->func->pfunc == func)) {
+ ret = validate_branch(file, insn->jump_dest,
+ state);
+ if (ret) {
+ if (backtrace)
+ BT_FUNC("(branch)", insn);
+ return ret;
+ }
}
if (insn->type == INSN_JUMP_UNCONDITIONAL)
@@ -1957,11 +2119,10 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
break;
case INSN_JUMP_DYNAMIC:
- if (func && list_empty(&insn->alts) &&
- has_modified_stack_frame(&state)) {
- WARN_FUNC("sibling call from callable instruction with modified stack frame",
- sec, insn->offset);
- return 1;
+ if (func && list_empty(&insn->alts)) {
+ ret = validate_sibling_call(insn, &state);
+ if (ret)
+ return ret;
}
return 0;
@@ -1978,6 +2139,63 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
if (update_insn_state(insn, &state))
return 1;
+ if (insn->stack_op.dest.type == OP_DEST_PUSHF) {
+ if (!state.uaccess_stack) {
+ state.uaccess_stack = 1;
+ } else if (state.uaccess_stack >> 31) {
+ WARN_FUNC("PUSHF stack exhausted", sec, insn->offset);
+ return 1;
+ }
+ state.uaccess_stack <<= 1;
+ state.uaccess_stack |= state.uaccess;
+ }
+
+ if (insn->stack_op.src.type == OP_SRC_POPF) {
+ if (state.uaccess_stack) {
+ state.uaccess = state.uaccess_stack & 1;
+ state.uaccess_stack >>= 1;
+ if (state.uaccess_stack == 1)
+ state.uaccess_stack = 0;
+ }
+ }
+
+ break;
+
+ case INSN_STAC:
+ if (state.uaccess) {
+ WARN_FUNC("recursive UACCESS enable", sec, insn->offset);
+ return 1;
+ }
+
+ state.uaccess = true;
+ break;
+
+ case INSN_CLAC:
+ if (!state.uaccess && insn->func) {
+ WARN_FUNC("redundant UACCESS disable", sec, insn->offset);
+ return 1;
+ }
+
+ if (func_uaccess_safe(func) && !state.uaccess_stack) {
+ WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset);
+ return 1;
+ }
+
+ state.uaccess = false;
+ break;
+
+ case INSN_STD:
+ if (state.df)
+ WARN_FUNC("recursive STD", sec, insn->offset);
+
+ state.df = true;
+ break;
+
+ case INSN_CLD:
+ if (!state.df && insn->func)
+ WARN_FUNC("redundant CLD", sec, insn->offset);
+
+ state.df = false;
break;
default:
@@ -2014,6 +2232,8 @@ static int validate_unwind_hints(struct objtool_file *file)
for_each_insn(file, insn) {
if (insn->hint && !insn->visited) {
ret = validate_branch(file, insn, state);
+ if (ret && backtrace)
+ BT_FUNC("<=== (hint)", insn);
warnings += ret;
}
}
@@ -2141,7 +2361,11 @@ static int validate_functions(struct objtool_file *file)
if (!insn || insn->ignore)
continue;
+ state.uaccess = func->alias->uaccess_safe;
+
ret = validate_branch(file, insn, state);
+ if (ret && backtrace)
+ BT_FUNC("<=== (func)", insn);
warnings += ret;
}
}
@@ -2184,9 +2408,10 @@ static void cleanup(struct objtool_file *file)
elf_close(file->elf);
}
+static struct objtool_file file;
+
int check(const char *_objname, bool orc)
{
- struct objtool_file file;
int ret, warnings = 0;
objname = _objname;
@@ -2197,7 +2422,6 @@ int check(const char *_objname, bool orc)
INIT_LIST_HEAD(&file.insn_list);
hash_init(file.insn_hash);
- file.whitelist = find_section_by_name(file.elf, ".discard.func_stack_frame_non_standard");
file.c_file = find_section_by_name(file.elf, ".comment");
file.ignore_unreachables = no_unreachable;
file.hints = false;
diff --git a/tools/objtool/check.h b/tools/objtool/check.h
index e6e8a655b556..71e54f97dbcd 100644
--- a/tools/objtool/check.h
+++ b/tools/objtool/check.h
@@ -31,7 +31,8 @@ struct insn_state {
int stack_size;
unsigned char type;
bool bp_scratch;
- bool drap, end;
+ bool drap, end, uaccess, df;
+ unsigned int uaccess_stack;
int drap_reg, drap_offset;
struct cfi_reg vals[CFI_NUM_REGS];
};
@@ -60,7 +61,6 @@ struct objtool_file {
struct elf *elf;
struct list_head insn_list;
DECLARE_HASHTABLE(insn_hash, 16);
- struct section *whitelist;
bool ignore_unreachables, c_file, hints, rodata;
};
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index b8f3cca8e58b..dd198d53387d 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -219,7 +219,7 @@ static int read_sections(struct elf *elf)
static int read_symbols(struct elf *elf)
{
struct section *symtab, *sec;
- struct symbol *sym, *pfunc;
+ struct symbol *sym, *pfunc, *alias;
struct list_head *entry, *tmp;
int symbols_nr, i;
char *coldstr;
@@ -239,6 +239,7 @@ static int read_symbols(struct elf *elf)
return -1;
}
memset(sym, 0, sizeof(*sym));
+ alias = sym;
sym->idx = i;
@@ -288,11 +289,17 @@ static int read_symbols(struct elf *elf)
break;
}
- if (sym->offset == s->offset && sym->len >= s->len) {
- entry = tmp;
- break;
+ if (sym->offset == s->offset) {
+ if (sym->len == s->len && alias == sym)
+ alias = s;
+
+ if (sym->len >= s->len) {
+ entry = tmp;
+ break;
+ }
}
}
+ sym->alias = alias;
list_add(&sym->list, entry);
hash_add(sym->sec->symbol_hash, &sym->hash, sym->idx);
}
diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h
index bc97ed86b9cd..2cc2ed49322d 100644
--- a/tools/objtool/elf.h
+++ b/tools/objtool/elf.h
@@ -61,7 +61,8 @@ struct symbol {
unsigned char bind, type;
unsigned long offset;
unsigned int len;
- struct symbol *pfunc, *cfunc;
+ struct symbol *pfunc, *cfunc, *alias;
+ bool uaccess_safe;
};
struct rela {
diff --git a/tools/objtool/special.c b/tools/objtool/special.c
index 50af4e1274b3..4e50563d87c6 100644
--- a/tools/objtool/special.c
+++ b/tools/objtool/special.c
@@ -23,6 +23,7 @@
#include <stdlib.h>
#include <string.h>
+#include "builtin.h"
#include "special.h"
#include "warn.h"
@@ -42,6 +43,7 @@
#define ALT_NEW_LEN_OFFSET 11
#define X86_FEATURE_POPCNT (4*32+23)
+#define X86_FEATURE_SMAP (9*32+20)
struct special_entry {
const char *sec;
@@ -110,6 +112,22 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
*/
if (feature == X86_FEATURE_POPCNT)
alt->skip_orig = true;
+
+ /*
+ * If UACCESS validation is enabled; force that alternative;
+ * otherwise force it the other way.
+ *
+ * What we want to avoid is having both the original and the
+ * alternative code flow at the same time, in that case we can
+ * find paths that see the STAC but take the NOP instead of
+ * CLAC and the other way around.
+ */
+ if (feature == X86_FEATURE_SMAP) {
+ if (uaccess)
+ alt->skip_orig = true;
+ else
+ alt->skip_alt = true;
+ }
}
orig_rela = find_rela_by_dest(sec, offset + entry->orig);
diff --git a/tools/objtool/special.h b/tools/objtool/special.h
index fad1d092f679..d5c062e718ef 100644
--- a/tools/objtool/special.h
+++ b/tools/objtool/special.h
@@ -26,6 +26,7 @@ struct special_alt {
bool group;
bool skip_orig;
+ bool skip_alt;
bool jump_or_nop;
struct section *orig_sec;
diff --git a/tools/objtool/warn.h b/tools/objtool/warn.h
index afd9f7a05f6d..f4fbb972b611 100644
--- a/tools/objtool/warn.h
+++ b/tools/objtool/warn.h
@@ -64,6 +64,14 @@ static inline char *offstr(struct section *sec, unsigned long offset)
free(_str); \
})
+#define BT_FUNC(format, insn, ...) \
+({ \
+ struct instruction *_insn = (insn); \
+ char *_str = offstr(_insn->sec, _insn->offset); \
+ WARN(" %s: " format, _str, ##__VA_ARGS__); \
+ free(_str); \
+})
+
#define WARN_ELF(format, ...) \
WARN(format ": %s", ##__VA_ARGS__, elf_errmsg(-1))
diff --git a/tools/perf/Documentation/Build.txt b/tools/perf/Documentation/Build.txt
index f6fc6507ba55..3766886c4bca 100644
--- a/tools/perf/Documentation/Build.txt
+++ b/tools/perf/Documentation/Build.txt
@@ -47,3 +47,27 @@ Those objects are then used in final linking:
NOTE this description is omitting other libraries involved, only
focusing on build framework outcomes
+
+3) Build with ASan or UBSan
+==========================
+ $ cd tools/perf
+ $ make DESTDIR=/usr
+ $ make DESTDIR=/usr install
+
+AddressSanitizer (or ASan) is a GCC feature that detects memory corruption bugs
+such as buffer overflows and memory leaks.
+
+ $ cd tools/perf
+ $ make DEBUG=1 EXTRA_CFLAGS='-fno-omit-frame-pointer -fsanitize=address'
+ $ ASAN_OPTIONS=log_path=asan.log ./perf record -a
+
+ASan outputs all detected issues into a log file named 'asan.log.<pid>'.
+
+UndefinedBehaviorSanitizer (or UBSan) is a fast undefined behavior detector
+supported by GCC. UBSan detects undefined behaviors of programs at runtime.
+
+ $ cd tools/perf
+ $ make DEBUG=1 EXTRA_CFLAGS='-fno-omit-frame-pointer -fsanitize=undefined'
+ $ UBSAN_OPTIONS=print_stacktrace=1 ./perf record -a
+
+If UBSan detects any problem at runtime, it outputs a “runtime error:†message.
diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
index 86f3dcc15f83..462b3cde0675 100644
--- a/tools/perf/Documentation/perf-config.txt
+++ b/tools/perf/Documentation/perf-config.txt
@@ -114,7 +114,7 @@ Given a $HOME/.perfconfig like this:
[report]
# Defaults
- sort-order = comm,dso,symbol
+ sort_order = comm,dso,symbol
percent-limit = 0
queue-size = 0
children = true
@@ -584,6 +584,20 @@ llvm.*::
llvm.opts::
Options passed to llc.
+samples.*::
+
+ samples.context::
+ Define how many ns worth of time to show
+ around samples in perf report sample context browser.
+
+scripts.*::
+
+ Any option defines a script that is added to the scripts menu
+ in the interactive perf browser and whose output is displayed.
+ The name of the option is the name, the value is a script command line.
+ The script gets the same options passed as a full perf script,
+ in particular -i perfdata file, --cpu, --tid
+
SEE ALSO
--------
linkperf:perf[1]
diff --git a/tools/perf/Documentation/perf-diff.txt b/tools/perf/Documentation/perf-diff.txt
index a79c84ae61aa..da7809b15cc9 100644
--- a/tools/perf/Documentation/perf-diff.txt
+++ b/tools/perf/Documentation/perf-diff.txt
@@ -118,6 +118,62 @@ OPTIONS
sum of shown entries will be always 100%. "absolute" means it retains
the original value before and after the filter is applied.
+--time::
+ Analyze samples within given time window. It supports time
+ percent with multiple time ranges. Time string is 'a%/n,b%/m,...'
+ or 'a%-b%,c%-%d,...'.
+
+ For example:
+
+ Select the second 10% time slice to diff:
+
+ perf diff --time 10%/2
+
+ Select from 0% to 10% time slice to diff:
+
+ perf diff --time 0%-10%
+
+ Select the first and the second 10% time slices to diff:
+
+ perf diff --time 10%/1,10%/2
+
+ Select from 0% to 10% and 30% to 40% slices to diff:
+
+ perf diff --time 0%-10%,30%-40%
+
+ It also supports analyzing samples within a given time window
+ <start>,<stop>. Times have the format seconds.microseconds. If 'start'
+ is not given (i.e., time string is ',x.y') then analysis starts at
+ the beginning of the file. If stop time is not given (i.e, time
+ string is 'x.y,') then analysis goes to the end of the file. Time string is
+ 'a1.b1,c1.d1:a2.b2,c2.d2'. Use ':' to separate timestamps for different
+ perf.data files.
+
+ For example, we get the timestamp information from 'perf script'.
+
+ perf script -i perf.data.old
+ mgen 13940 [000] 3946.361400: ...
+
+ perf script -i perf.data
+ mgen 13940 [000] 3971.150589 ...
+
+ perf diff --time 3946.361400,:3971.150589,
+
+ It analyzes the perf.data.old from the timestamp 3946.361400 to
+ the end of perf.data.old and analyzes the perf.data from the
+ timestamp 3971.150589 to the end of perf.data.
+
+--cpu:: Only diff samples for the list of CPUs provided. Multiple CPUs can
+ be provided as a comma-separated list with no space: 0,1. Ranges of
+ CPUs are specified with -: 0-2. Default is to report samples on all
+ CPUs.
+
+--pid=::
+ Only diff samples for given process ID (comma separated list).
+
+--tid=::
+ Only diff samples for given thread ID (comma separated list).
+
COMPARISON
----------
The comparison is governed by the baseline file. The baseline perf.data
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 8f0c2be34848..58986f4cc190 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -459,6 +459,25 @@ Set affinity mask of trace reading thread according to the policy defined by 'mo
node - thread affinity mask is set to NUMA node cpu mask of the processed mmap buffer
cpu - thread affinity mask is set to cpu of the processed mmap buffer
+--mmap-flush=number::
+
+Specify minimal number of bytes that is extracted from mmap data pages and
+processed for output. One can specify the number using B/K/M/G suffixes.
+
+The maximal allowed value is a quarter of the size of mmaped data pages.
+
+The default option value is 1 byte which means that every time that the output
+writing thread finds some new data in the mmaped buffer the data is extracted,
+possibly compressed (-z) and written to the output, perf.data or pipe.
+
+Larger data chunks are compressed more effectively in comparison to smaller
+chunks so extraction of larger chunks from the mmap data pages is preferable
+from the perspective of output size reduction.
+
+Also at some cases executing less output write syscalls with bigger data size
+can take less time than executing more output write syscalls with smaller data
+size thus lowering runtime profiling overhead.
+
--all-kernel::
Configure all used events to run in kernel space.
@@ -495,6 +514,10 @@ overhead. You can still switch them on with:
--switch-output --no-no-buildid --no-no-buildid-cache
+--switch-max-files=N::
+
+When rotating perf.data with --switch-output, only keep N files.
+
--dry-run::
Parse options then exit. --dry-run can be used to detect errors in cmdline
options.
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 1a27bfe05039..f441baa794ce 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -105,6 +105,8 @@ OPTIONS
guest machine
- sample: Number of sample
- period: Raw number of event count of sample
+ - time: Separate the samples by time stamp with the resolution specified by
+ --time-quantum (default 100ms). Specify with overhead and before it.
By default, comm, dso and symbol keys are used.
(i.e. --sort comm,dso,symbol)
@@ -459,6 +461,10 @@ include::itrace.txt[]
--socket-filter::
Only report the samples on the processor socket that match with this filter
+--samples=N::
+ Save N individual samples for each histogram entry to show context in perf
+ report tui browser.
+
--raw-trace::
When displaying traceevent output, do not use print fmt or plugins.
@@ -477,6 +483,9 @@ include::itrace.txt[]
Please note that not all mmaps are stored, options affecting which ones
are include 'perf record --data', for instance.
+--ns::
+ Show time stamps in nanoseconds.
+
--stats::
Display overall events statistics without any further processing.
(like the one at the end of the perf report -D command)
@@ -494,6 +503,10 @@ include::itrace.txt[]
The period/hits keywords set the base the percentage is computed
on - the samples period or the number of samples (hits).
+--time-quantum::
+ Configure time quantum for time sort key. Default 100ms.
+ Accepts s, us, ms, ns units.
+
include::callchain-overhead-calculation.txt[]
SEE ALSO
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 2e19fd7ffe35..9b0d04dd2a61 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -380,6 +380,9 @@ include::itrace.txt[]
Set the maximum number of program blocks to print with brstackasm for
each sample.
+--reltime::
+ Print time stamps relative to trace start.
+
--per-event-dump::
Create per event files with a "perf.data.EVENT.dump" name instead of
printing to stdout, useful, for instance, for generating flamegraphs.
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index 4bc2085e5197..39c05f89104e 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -72,9 +72,8 @@ report::
--all-cpus::
system-wide collection from all CPUs (default if no target is specified)
--c::
---scale::
- scale/normalize counter values
+--no-scale::
+ Don't scale/normalize counter values
-d::
--detailed::
diff --git a/tools/perf/Documentation/tips.txt b/tools/perf/Documentation/tips.txt
index 849599f39c5e..869965d629ce 100644
--- a/tools/perf/Documentation/tips.txt
+++ b/tools/perf/Documentation/tips.txt
@@ -15,6 +15,7 @@ To see callchains in a more compact form: perf report -g folded
Show individual samples with: perf script
Limit to show entries above 5% only: perf report --percent-limit 5
Profiling branch (mis)predictions with: perf record -b / perf report
+To show assembler sample contexts use perf record -b / perf script -F +brstackinsn --xed
Treat branches as callchains: perf report --branch-history
To count events in every 1000 msec: perf stat -I 1000
Print event counts in CSV format with: perf stat -x,
@@ -34,3 +35,9 @@ Show current config key-value pairs: perf config --list
Show user configuration overrides: perf config --user --list
To add Node.js USDT(User-Level Statically Defined Tracing): perf buildid-cache --add `which node`
To report cacheline events from previous recording: perf c2c report
+To browse sample contexts use perf report --sample 10 and select in context menu
+To separate samples by time use perf report --sort time,overhead,sym
+To set sample time separation other than 100ms with --sort time use --time-quantum
+Add -I to perf report to sample register values visible in perf report context.
+To show IPC for sampling periods use perf record -e '{cycles,instructions}:S' and then browse context
+To show context switches in perf report sample context add --switch-events to perf record.
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 0f11d5891301..0c52a01dc759 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -152,6 +152,13 @@ endif
FEATURE_CHECK_CFLAGS-libbabeltrace := $(LIBBABELTRACE_CFLAGS)
FEATURE_CHECK_LDFLAGS-libbabeltrace := $(LIBBABELTRACE_LDFLAGS) -lbabeltrace-ctf
+ifdef LIBZSTD_DIR
+ LIBZSTD_CFLAGS := -I$(LIBZSTD_DIR)/lib
+ LIBZSTD_LDFLAGS := -L$(LIBZSTD_DIR)/lib
+endif
+FEATURE_CHECK_CFLAGS-libzstd := $(LIBZSTD_CFLAGS)
+FEATURE_CHECK_LDFLAGS-libzstd := $(LIBZSTD_LDFLAGS)
+
FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi -I$(srctree)/tools/include/uapi
# include ARCH specific config
-include $(src-perf)/arch/$(SRCARCH)/Makefile
@@ -227,6 +234,8 @@ FEATURE_CHECK_LDFLAGS-libpython-version := $(PYTHON_EMBED_LDOPTS)
FEATURE_CHECK_LDFLAGS-libaio = -lrt
+FEATURE_CHECK_LDFLAGS-disassembler-four-args = -lbfd -lopcodes -ldl
+
CFLAGS += -fno-omit-frame-pointer
CFLAGS += -ggdb3
CFLAGS += -funwind-tables
@@ -713,7 +722,7 @@ else
endif
ifeq ($(feature-libbfd), 1)
- EXTLIBS += -lbfd
+ EXTLIBS += -lbfd -lopcodes
else
# we are on a system that requires -liberty and (maybe) -lz
# to link against -lbfd; test each case individually here
@@ -724,12 +733,15 @@ else
$(call feature_check,libbfd-liberty-z)
ifeq ($(feature-libbfd-liberty), 1)
- EXTLIBS += -lbfd -liberty
+ EXTLIBS += -lbfd -lopcodes -liberty
+ FEATURE_CHECK_LDFLAGS-disassembler-four-args += -liberty -ldl
else
ifeq ($(feature-libbfd-liberty-z), 1)
- EXTLIBS += -lbfd -liberty -lz
+ EXTLIBS += -lbfd -lopcodes -liberty -lz
+ FEATURE_CHECK_LDFLAGS-disassembler-four-args += -liberty -lz -ldl
endif
endif
+ $(call feature_check,disassembler-four-args)
endif
ifdef NO_DEMANGLE
@@ -782,6 +794,19 @@ ifndef NO_LZMA
endif
endif
+ifndef NO_LIBZSTD
+ ifeq ($(feature-libzstd), 1)
+ CFLAGS += -DHAVE_ZSTD_SUPPORT
+ CFLAGS += $(LIBZSTD_CFLAGS)
+ LDFLAGS += $(LIBZSTD_LDFLAGS)
+ EXTLIBS += -lzstd
+ $(call detected,CONFIG_ZSTD)
+ else
+ msg := $(warning No libzstd found, disables trace compression, please install libzstd-dev[el] and/or set LIBZSTD_DIR);
+ NO_LIBZSTD := 1
+ endif
+endif
+
ifndef NO_BACKTRACE
ifeq ($(feature-backtrace), 1)
CFLAGS += -DHAVE_BACKTRACE_SUPPORT
@@ -808,6 +833,10 @@ ifdef HAVE_KVM_STAT_SUPPORT
CFLAGS += -DHAVE_KVM_STAT_SUPPORT
endif
+ifeq ($(feature-disassembler-four-args), 1)
+ CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
+endif
+
ifeq (${IS_64_BIT}, 1)
ifndef NO_PERF_READ_VDSO32
$(call feature_check,compile-32)
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 01f7555fd933..c706548d5b10 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -108,6 +108,9 @@ include ../scripts/utilities.mak
# streaming for record mode. Currently Posix AIO trace streaming is
# supported only when linking with glibc.
#
+# Define NO_LIBZSTD if you do not want support of Zstandard based runtime
+# trace compression in record mode.
+#
# As per kernel Makefile, avoid funny character set dependencies
unexport LC_ALL
@@ -481,8 +484,8 @@ $(madvise_behavior_array): $(madvise_hdr_dir)/mman-common.h $(madvise_behavior_t
mmap_flags_array := $(beauty_outdir)/mmap_flags_array.c
mmap_flags_tbl := $(srctree)/tools/perf/trace/beauty/mmap_flags.sh
-$(mmap_flags_array): $(asm_generic_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman-common.h $(mmap_flags_tbl)
- $(Q)$(SHELL) '$(mmap_flags_tbl)' $(asm_generic_uapi_dir) $(arch_asm_uapi_dir) > $@
+$(mmap_flags_array): $(linux_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman-common.h $(mmap_flags_tbl)
+ $(Q)$(SHELL) '$(mmap_flags_tbl)' $(linux_uapi_dir) $(asm_generic_uapi_dir) $(arch_asm_uapi_dir) > $@
mount_flags_array := $(beauty_outdir)/mount_flags_array.c
mount_flags_tbl := $(srctree)/tools/perf/trace/beauty/mount_flags.sh
diff --git a/tools/perf/arch/arm64/annotate/instructions.c b/tools/perf/arch/arm64/annotate/instructions.c
index 76c6345a57d5..8f70a1b282df 100644
--- a/tools/perf/arch/arm64/annotate/instructions.c
+++ b/tools/perf/arch/arm64/annotate/instructions.c
@@ -58,7 +58,7 @@ out_free_source:
}
static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops);
+ struct ins_operands *ops, int max_ins_name);
static struct ins_ops arm64_mov_ops = {
.parse = arm64_mov__parse,
diff --git a/tools/perf/arch/s390/annotate/instructions.c b/tools/perf/arch/s390/annotate/instructions.c
index de0dd66dbb48..89bb8f2c54ce 100644
--- a/tools/perf/arch/s390/annotate/instructions.c
+++ b/tools/perf/arch/s390/annotate/instructions.c
@@ -46,7 +46,7 @@ static int s390_call__parse(struct arch *arch, struct ins_operands *ops,
}
static int call__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops);
+ struct ins_operands *ops, int max_ins_name);
static struct ins_ops s390_call_ops = {
.parse = s390_call__parse,
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index f0b1709a5ffb..92ee0b4378d4 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -343,6 +343,12 @@
332 common statx __x64_sys_statx
333 common io_pgetevents __x64_sys_io_pgetevents
334 common rseq __x64_sys_rseq
+# don't use numbers 387 through 423, add new calls after the last
+# 'common' entry
+424 common pidfd_send_signal __x64_sys_pidfd_send_signal
+425 common io_uring_setup __x64_sys_io_uring_setup
+426 common io_uring_enter __x64_sys_io_uring_enter
+427 common io_uring_register __x64_sys_io_uring_register
#
# x32-specific system call numbers start at 512 to avoid cache impact
@@ -361,7 +367,7 @@
520 x32 execve __x32_compat_sys_execve/ptregs
521 x32 ptrace __x32_compat_sys_ptrace
522 x32 rt_sigpending __x32_compat_sys_rt_sigpending
-523 x32 rt_sigtimedwait __x32_compat_sys_rt_sigtimedwait
+523 x32 rt_sigtimedwait __x32_compat_sys_rt_sigtimedwait_time64
524 x32 rt_sigqueueinfo __x32_compat_sys_rt_sigqueueinfo
525 x32 sigaltstack __x32_compat_sys_sigaltstack
526 x32 timer_create __x32_compat_sys_timer_create
@@ -375,7 +381,7 @@
534 x32 preadv __x32_compat_sys_preadv64
535 x32 pwritev __x32_compat_sys_pwritev64
536 x32 rt_tgsigqueueinfo __x32_compat_sys_rt_tgsigqueueinfo
-537 x32 recvmmsg __x32_compat_sys_recvmmsg
+537 x32 recvmmsg __x32_compat_sys_recvmmsg_time64
538 x32 sendmmsg __x32_compat_sys_sendmmsg
539 x32 process_vm_readv __x32_compat_sys_process_vm_readv
540 x32 process_vm_writev __x32_compat_sys_process_vm_writev
diff --git a/tools/perf/arch/x86/util/Build b/tools/perf/arch/x86/util/Build
index 7aab0be5fc5f..47f9c56e744f 100644
--- a/tools/perf/arch/x86/util/Build
+++ b/tools/perf/arch/x86/util/Build
@@ -14,5 +14,6 @@ perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
perf-$(CONFIG_AUXTRACE) += auxtrace.o
+perf-$(CONFIG_AUXTRACE) += archinsn.o
perf-$(CONFIG_AUXTRACE) += intel-pt.o
perf-$(CONFIG_AUXTRACE) += intel-bts.o
diff --git a/tools/perf/arch/x86/util/archinsn.c b/tools/perf/arch/x86/util/archinsn.c
new file mode 100644
index 000000000000..4237bb2e7fa2
--- /dev/null
+++ b/tools/perf/arch/x86/util/archinsn.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "perf.h"
+#include "archinsn.h"
+#include "util/intel-pt-decoder/insn.h"
+#include "machine.h"
+#include "thread.h"
+#include "symbol.h"
+
+void arch_fetch_insn(struct perf_sample *sample,
+ struct thread *thread,
+ struct machine *machine)
+{
+ struct insn insn;
+ int len;
+ bool is64bit = false;
+
+ if (!sample->ip)
+ return;
+ len = thread__memcpy(thread, machine, sample->insn, sample->ip, sizeof(sample->insn), &is64bit);
+ if (len <= 0)
+ return;
+ insn_init(&insn, sample->insn, len, is64bit);
+ insn_get_length(&insn);
+ if (insn_complete(&insn) && insn.length <= len)
+ sample->insn_len = insn.length;
+}
diff --git a/tools/perf/bench/epoll-ctl.c b/tools/perf/bench/epoll-ctl.c
index 0c0a6e824934..2af067859966 100644
--- a/tools/perf/bench/epoll-ctl.c
+++ b/tools/perf/bench/epoll-ctl.c
@@ -224,7 +224,7 @@ static int do_threads(struct worker *worker, struct cpu_map *cpu)
pthread_attr_t thread_attr, *attrp = NULL;
cpu_set_t cpuset;
unsigned int i, j;
- int ret;
+ int ret = 0;
if (!noaffinity)
pthread_attr_init(&thread_attr);
diff --git a/tools/perf/bench/epoll-wait.c b/tools/perf/bench/epoll-wait.c
index 5a11534e96a0..fe85448abd45 100644
--- a/tools/perf/bench/epoll-wait.c
+++ b/tools/perf/bench/epoll-wait.c
@@ -293,7 +293,7 @@ static int do_threads(struct worker *worker, struct cpu_map *cpu)
pthread_attr_t thread_attr, *attrp = NULL;
cpu_set_t cpuset;
unsigned int i, j;
- int ret, events = EPOLLIN;
+ int ret = 0, events = EPOLLIN;
if (oneshot)
events |= EPOLLONESHOT;
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index 98ad783efc69..a7784554a80d 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -39,6 +39,10 @@
#include <numa.h>
#include <numaif.h>
+#ifndef RUSAGE_THREAD
+# define RUSAGE_THREAD 1
+#endif
+
/*
* Regular printout to the terminal, supressed if -q is specified:
*/
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index 4272763a5e96..9e6cc868bdb4 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -2056,6 +2056,12 @@ static int setup_nodes(struct perf_session *session)
if (!set)
return -ENOMEM;
+ nodes[node] = set;
+
+ /* empty node, skip */
+ if (cpu_map__empty(map))
+ continue;
+
for (cpu = 0; cpu < map->nr; cpu++) {
set_bit(map->map[cpu], set);
@@ -2064,8 +2070,6 @@ static int setup_nodes(struct perf_session *session)
cpu2node[map->map[cpu]] = node;
}
-
- nodes[node] = set;
}
setup_nodes_header();
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 58fe0e88215c..6e7920793729 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -19,12 +19,21 @@
#include "util/util.h"
#include "util/data.h"
#include "util/config.h"
+#include "util/time-utils.h"
#include <errno.h>
#include <inttypes.h>
#include <stdlib.h>
#include <math.h>
+struct perf_diff {
+ struct perf_tool tool;
+ const char *time_str;
+ struct perf_time_interval *ptime_range;
+ int range_size;
+ int range_num;
+};
+
/* Diff command specific HPP columns. */
enum {
PERF_HPP_DIFF__BASELINE,
@@ -74,6 +83,9 @@ static unsigned int sort_compute = 1;
static s64 compute_wdiff_w1;
static s64 compute_wdiff_w2;
+static const char *cpu_list;
+static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
+
enum {
COMPUTE_DELTA,
COMPUTE_RATIO,
@@ -323,22 +335,33 @@ static int formula_fprintf(struct hist_entry *he, struct hist_entry *pair,
return -1;
}
-static int diff__process_sample_event(struct perf_tool *tool __maybe_unused,
+static int diff__process_sample_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine)
{
+ struct perf_diff *pdiff = container_of(tool, struct perf_diff, tool);
struct addr_location al;
struct hists *hists = evsel__hists(evsel);
int ret = -1;
+ if (perf_time__ranges_skip_sample(pdiff->ptime_range, pdiff->range_num,
+ sample->time)) {
+ return 0;
+ }
+
if (machine__resolve(machine, &al, sample) < 0) {
pr_warning("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
}
+ if (cpu_list && !test_bit(sample->cpu, cpu_bitmap)) {
+ ret = 0;
+ goto out_put;
+ }
+
if (!hists__add_entry(hists, &al, NULL, NULL, NULL, sample, true)) {
pr_warning("problem incrementing symbol period, skipping event\n");
goto out_put;
@@ -359,17 +382,19 @@ out_put:
return ret;
}
-static struct perf_tool tool = {
- .sample = diff__process_sample_event,
- .mmap = perf_event__process_mmap,
- .mmap2 = perf_event__process_mmap2,
- .comm = perf_event__process_comm,
- .exit = perf_event__process_exit,
- .fork = perf_event__process_fork,
- .lost = perf_event__process_lost,
- .namespaces = perf_event__process_namespaces,
- .ordered_events = true,
- .ordering_requires_timestamps = true,
+static struct perf_diff pdiff = {
+ .tool = {
+ .sample = diff__process_sample_event,
+ .mmap = perf_event__process_mmap,
+ .mmap2 = perf_event__process_mmap2,
+ .comm = perf_event__process_comm,
+ .exit = perf_event__process_exit,
+ .fork = perf_event__process_fork,
+ .lost = perf_event__process_lost,
+ .namespaces = perf_event__process_namespaces,
+ .ordered_events = true,
+ .ordering_requires_timestamps = true,
+ },
};
static struct perf_evsel *evsel_match(struct perf_evsel *evsel,
@@ -771,19 +796,117 @@ static void data__free(struct data__file *d)
}
}
+static int abstime_str_dup(char **pstr)
+{
+ char *str = NULL;
+
+ if (pdiff.time_str && strchr(pdiff.time_str, ':')) {
+ str = strdup(pdiff.time_str);
+ if (!str)
+ return -ENOMEM;
+ }
+
+ *pstr = str;
+ return 0;
+}
+
+static int parse_absolute_time(struct data__file *d, char **pstr)
+{
+ char *p = *pstr;
+ int ret;
+
+ /*
+ * Absolute timestamp for one file has the format: a.b,c.d
+ * For multiple files, the format is: a.b,c.d:a.b,c.d
+ */
+ p = strchr(*pstr, ':');
+ if (p) {
+ if (p == *pstr) {
+ pr_err("Invalid time string\n");
+ return -EINVAL;
+ }
+
+ *p = 0;
+ p++;
+ if (*p == 0) {
+ pr_err("Invalid time string\n");
+ return -EINVAL;
+ }
+ }
+
+ ret = perf_time__parse_for_ranges(*pstr, d->session,
+ &pdiff.ptime_range,
+ &pdiff.range_size,
+ &pdiff.range_num);
+ if (ret < 0)
+ return ret;
+
+ if (!p || *p == 0)
+ *pstr = NULL;
+ else
+ *pstr = p;
+
+ return ret;
+}
+
+static int parse_percent_time(struct data__file *d)
+{
+ int ret;
+
+ ret = perf_time__parse_for_ranges(pdiff.time_str, d->session,
+ &pdiff.ptime_range,
+ &pdiff.range_size,
+ &pdiff.range_num);
+ return ret;
+}
+
+static int parse_time_str(struct data__file *d, char *abstime_ostr,
+ char **pabstime_tmp)
+{
+ int ret = 0;
+
+ if (abstime_ostr)
+ ret = parse_absolute_time(d, pabstime_tmp);
+ else if (pdiff.time_str)
+ ret = parse_percent_time(d);
+
+ return ret;
+}
+
static int __cmd_diff(void)
{
struct data__file *d;
- int ret = -EINVAL, i;
+ int ret, i;
+ char *abstime_ostr, *abstime_tmp;
+
+ ret = abstime_str_dup(&abstime_ostr);
+ if (ret)
+ return ret;
+
+ abstime_tmp = abstime_ostr;
+ ret = -EINVAL;
data__for_each_file(i, d) {
- d->session = perf_session__new(&d->data, false, &tool);
+ d->session = perf_session__new(&d->data, false, &pdiff.tool);
if (!d->session) {
pr_err("Failed to open %s\n", d->data.path);
ret = -1;
goto out_delete;
}
+ if (pdiff.time_str) {
+ ret = parse_time_str(d, abstime_ostr, &abstime_tmp);
+ if (ret < 0)
+ goto out_delete;
+ }
+
+ if (cpu_list) {
+ ret = perf_session__cpu_bitmap(d->session, cpu_list,
+ cpu_bitmap);
+ if (ret < 0)
+ goto out_delete;
+ }
+
ret = perf_session__process_events(d->session);
if (ret) {
pr_err("Failed to process %s\n", d->data.path);
@@ -791,6 +914,9 @@ static int __cmd_diff(void)
}
perf_evlist__collapse_resort(d->session->evlist);
+
+ if (pdiff.ptime_range)
+ zfree(&pdiff.ptime_range);
}
data_process();
@@ -802,6 +928,13 @@ static int __cmd_diff(void)
}
free(data__files);
+
+ if (pdiff.ptime_range)
+ zfree(&pdiff.ptime_range);
+
+ if (abstime_ostr)
+ free(abstime_ostr);
+
return ret;
}
@@ -849,6 +982,13 @@ static const struct option options[] = {
OPT_UINTEGER('o', "order", &sort_compute, "Specify compute sorting."),
OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
"How to display percentage of filtered entries", parse_filter_percentage),
+ OPT_STRING(0, "time", &pdiff.time_str, "str",
+ "Time span (time percent or absolute timestamp)"),
+ OPT_STRING(0, "cpu", &cpu_list, "cpu", "list of cpus to profile"),
+ OPT_STRING(0, "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
+ "only consider symbols in these pids"),
+ OPT_STRING(0, "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
+ "only consider symbols in these tids"),
OPT_END()
};
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index fa520f4b8095..b80eee455111 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -1975,7 +1975,7 @@ int cmd_kmem(int argc, const char **argv)
goto out_delete;
}
- kmem_page_size = tep_get_page_size(evsel->tp_format->pevent);
+ kmem_page_size = tep_get_page_size(evsel->tp_format->tep);
symbol_conf.use_callchain = true;
}
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index c9f98d00c0e9..e0312a1c4792 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -70,10 +70,11 @@ int cmd_list(int argc, const char **argv)
print_symbol_events(NULL, PERF_TYPE_HARDWARE,
event_symbols_hw, PERF_COUNT_HW_MAX, raw_dump);
else if (strcmp(argv[i], "sw") == 0 ||
- strcmp(argv[i], "software") == 0)
+ strcmp(argv[i], "software") == 0) {
print_symbol_events(NULL, PERF_TYPE_SOFTWARE,
event_symbols_sw, PERF_COUNT_SW_MAX, raw_dump);
- else if (strcmp(argv[i], "cache") == 0 ||
+ print_tool_events(NULL, raw_dump);
+ } else if (strcmp(argv[i], "cache") == 0 ||
strcmp(argv[i], "hwcache") == 0)
print_hwcache_events(NULL, raw_dump);
else if (strcmp(argv[i], "pmu") == 0)
@@ -113,13 +114,14 @@ int cmd_list(int argc, const char **argv)
event_symbols_hw, PERF_COUNT_HW_MAX, raw_dump);
print_symbol_events(s, PERF_TYPE_SOFTWARE,
event_symbols_sw, PERF_COUNT_SW_MAX, raw_dump);
+ print_tool_events(s, raw_dump);
print_hwcache_events(s, raw_dump);
print_pmu_events(s, raw_dump, !desc_flag,
long_desc_flag,
details_flag);
print_tracepoint_events(NULL, s, raw_dump);
print_sdt_events(NULL, s, raw_dump);
- metricgroup__print(true, true, NULL, raw_dump, details_flag);
+ metricgroup__print(true, true, s, raw_dump, details_flag);
free(s);
}
}
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index f3f7f3100336..c5e10552776a 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -62,6 +62,9 @@ struct switch_output {
unsigned long time;
const char *str;
bool set;
+ char **filenames;
+ int num_files;
+ int cur_file;
};
struct record {
@@ -334,6 +337,41 @@ static int record__aio_enabled(struct record *rec)
return rec->opts.nr_cblocks > 0;
}
+#define MMAP_FLUSH_DEFAULT 1
+static int record__mmap_flush_parse(const struct option *opt,
+ const char *str,
+ int unset)
+{
+ int flush_max;
+ struct record_opts *opts = (struct record_opts *)opt->value;
+ static struct parse_tag tags[] = {
+ { .tag = 'B', .mult = 1 },
+ { .tag = 'K', .mult = 1 << 10 },
+ { .tag = 'M', .mult = 1 << 20 },
+ { .tag = 'G', .mult = 1 << 30 },
+ { .tag = 0 },
+ };
+
+ if (unset)
+ return 0;
+
+ if (str) {
+ opts->mmap_flush = parse_tag_value(str, tags);
+ if (opts->mmap_flush == (int)-1)
+ opts->mmap_flush = strtol(str, NULL, 0);
+ }
+
+ if (!opts->mmap_flush)
+ opts->mmap_flush = MMAP_FLUSH_DEFAULT;
+
+ flush_max = perf_evlist__mmap_size(opts->mmap_pages);
+ flush_max /= 4;
+ if (opts->mmap_flush > flush_max)
+ opts->mmap_flush = flush_max;
+
+ return 0;
+}
+
static int process_synthesized_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
@@ -392,7 +430,7 @@ static int record__process_auxtrace(struct perf_tool *tool,
size_t padding;
u8 pad[8] = {0};
- if (!perf_data__is_pipe(data)) {
+ if (!perf_data__is_pipe(data) && !perf_data__is_dir(data)) {
off_t file_offset;
int fd = perf_data__fd(data);
int err;
@@ -543,7 +581,8 @@ static int record__mmap_evlist(struct record *rec,
if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
opts->auxtrace_mmap_pages,
opts->auxtrace_snapshot_mode,
- opts->nr_cblocks, opts->affinity) < 0) {
+ opts->nr_cblocks, opts->affinity,
+ opts->mmap_flush) < 0) {
if (errno == EPERM) {
pr_err("Permission error mapping pages.\n"
"Consider increasing "
@@ -733,7 +772,7 @@ static void record__adjust_affinity(struct record *rec, struct perf_mmap *map)
}
static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
- bool overwrite)
+ bool overwrite, bool synch)
{
u64 bytes_written = rec->bytes_written;
int i;
@@ -756,12 +795,19 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
off = record__aio_get_pos(trace_fd);
for (i = 0; i < evlist->nr_mmaps; i++) {
+ u64 flush = 0;
struct perf_mmap *map = &maps[i];
if (map->base) {
record__adjust_affinity(rec, map);
+ if (synch) {
+ flush = map->flush;
+ map->flush = 1;
+ }
if (!record__aio_enabled(rec)) {
if (perf_mmap__push(map, rec, record__pushfn) != 0) {
+ if (synch)
+ map->flush = flush;
rc = -1;
goto out;
}
@@ -774,10 +820,14 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
idx = record__aio_sync(map, false);
if (perf_mmap__aio_push(map, rec, idx, record__aio_pushfn, &off) != 0) {
record__aio_set_pos(trace_fd, off);
+ if (synch)
+ map->flush = flush;
rc = -1;
goto out;
}
}
+ if (synch)
+ map->flush = flush;
}
if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
@@ -803,15 +853,15 @@ out:
return rc;
}
-static int record__mmap_read_all(struct record *rec)
+static int record__mmap_read_all(struct record *rec, bool synch)
{
int err;
- err = record__mmap_read_evlist(rec, rec->evlist, false);
+ err = record__mmap_read_evlist(rec, rec->evlist, false, synch);
if (err)
return err;
- return record__mmap_read_evlist(rec, rec->evlist, true);
+ return record__mmap_read_evlist(rec, rec->evlist, true, synch);
}
static void record__init_features(struct record *rec)
@@ -837,6 +887,8 @@ static void record__init_features(struct record *rec)
if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
perf_header__clear_feat(&session->header, HEADER_CLOCKID);
+ perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
+
perf_header__clear_feat(&session->header, HEADER_STAT);
}
@@ -890,6 +942,7 @@ record__switch_output(struct record *rec, bool at_exit)
{
struct perf_data *data = &rec->data;
int fd, err;
+ char *new_filename;
/* Same Size: "2015122520103046"*/
char timestamp[] = "InvalidTimestamp";
@@ -910,7 +963,7 @@ record__switch_output(struct record *rec, bool at_exit)
fd = perf_data__switch(data, timestamp,
rec->session->header.data_offset,
- at_exit);
+ at_exit, &new_filename);
if (fd >= 0 && !at_exit) {
rec->bytes_written = 0;
rec->session->header.data_size = 0;
@@ -920,6 +973,21 @@ record__switch_output(struct record *rec, bool at_exit)
fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
data->path, timestamp);
+ if (rec->switch_output.num_files) {
+ int n = rec->switch_output.cur_file + 1;
+
+ if (n >= rec->switch_output.num_files)
+ n = 0;
+ rec->switch_output.cur_file = n;
+ if (rec->switch_output.filenames[n]) {
+ remove(rec->switch_output.filenames[n]);
+ free(rec->switch_output.filenames[n]);
+ }
+ rec->switch_output.filenames[n] = new_filename;
+ } else {
+ free(new_filename);
+ }
+
/* Output tracking events */
if (!at_exit) {
record__synthesize(rec, false);
@@ -1093,7 +1161,7 @@ static int record__synthesize(struct record *rec, bool tail)
return err;
}
- err = perf_event__synthesize_bpf_events(tool, process_synthesized_event,
+ err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
machine, opts);
if (err < 0)
pr_warning("Couldn't synthesize bpf events.\n");
@@ -1116,6 +1184,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
struct perf_data *data = &rec->data;
struct perf_session *session;
bool disabled = false, draining = false;
+ struct perf_evlist *sb_evlist = NULL;
int fd;
atexit(record__sig_exit);
@@ -1216,6 +1285,14 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
goto out_child;
}
+ if (!opts->no_bpf_event)
+ bpf_event__add_sb_event(&sb_evlist, &session->header.env);
+
+ if (perf_evlist__start_sb_thread(sb_evlist, &rec->opts.target)) {
+ pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
+ opts->no_bpf_event = true;
+ }
+
err = record__synthesize(rec, false);
if (err < 0)
goto out_child;
@@ -1310,7 +1387,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
if (trigger_is_hit(&switch_output_trigger) || done || draining)
perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
- if (record__mmap_read_all(rec) < 0) {
+ if (record__mmap_read_all(rec, false) < 0) {
trigger_error(&auxtrace_snapshot_trigger);
trigger_error(&switch_output_trigger);
err = -1;
@@ -1411,6 +1488,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
record__synthesize_workload(rec, true);
out_child:
+ record__mmap_read_all(rec, true);
record__aio_mmap_read_sync(rec);
if (forks) {
@@ -1466,6 +1544,9 @@ out_child:
out_delete_session:
perf_session__delete(session);
+
+ if (!opts->no_bpf_event)
+ perf_evlist__stop_sb_thread(sb_evlist);
return status;
}
@@ -1813,6 +1894,7 @@ static struct record record = {
.uses_mmap = true,
.default_per_cpu = true,
},
+ .mmap_flush = MMAP_FLUSH_DEFAULT,
},
.tool = {
.sample = process_sample_event,
@@ -1870,7 +1952,7 @@ static struct option __record_options[] = {
OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
"synthesize non-sample events at the end of output"),
OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
- OPT_BOOLEAN(0, "bpf-event", &record.opts.bpf_event, "record bpf events"),
+ OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "record bpf events"),
OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
"Fail if the specified frequency can't be used"),
OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
@@ -1879,6 +1961,9 @@ static struct option __record_options[] = {
OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
"number of mmap data pages and AUX area tracing mmap pages",
record__parse_mmap_pages),
+ OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
+ "Minimal number of bytes that is extracted from mmap data pages (default: 1)",
+ record__mmap_flush_parse),
OPT_BOOLEAN(0, "group", &record.opts.group,
"put the counters into a counter group"),
OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
@@ -1968,9 +2053,11 @@ static struct option __record_options[] = {
OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
"Record timestamp boundary (time of first/last samples)"),
OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
- &record.switch_output.set, "signal,size,time",
- "Switch output when receive SIGUSR2 or cross size,time threshold",
+ &record.switch_output.set, "signal or size[BKMG] or time[smhd]",
+ "Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
"signal"),
+ OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
+ "Limit number of switch output generated files"),
OPT_BOOLEAN(0, "dry-run", &dry_run,
"Parse options then exit"),
#ifdef HAVE_AIO_SUPPORT
@@ -2057,6 +2144,13 @@ int cmd_record(int argc, const char **argv)
alarm(rec->switch_output.time);
}
+ if (rec->switch_output.num_files) {
+ rec->switch_output.filenames = calloc(sizeof(char *),
+ rec->switch_output.num_files);
+ if (!rec->switch_output.filenames)
+ return -EINVAL;
+ }
+
/*
* Allow aliases to facilitate the lookup of symbols for address
* filters. Refer to auxtrace_parse_filters().
@@ -2182,6 +2276,7 @@ int cmd_record(int argc, const char **argv)
pr_info("nr_cblocks: %d\n", rec->opts.nr_cblocks);
pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
+ pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
err = __cmd_record(&record, argc, argv);
out:
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 1532ebde6c4b..4054eb1f98ac 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -47,9 +47,11 @@
#include <errno.h>
#include <inttypes.h>
#include <regex.h>
+#include "sane_ctype.h"
#include <signal.h>
#include <linux/bitmap.h>
#include <linux/stringify.h>
+#include <linux/time64.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
@@ -926,6 +928,43 @@ report_parse_callchain_opt(const struct option *opt, const char *arg, int unset)
return parse_callchain_report_opt(arg);
}
+static int
+parse_time_quantum(const struct option *opt, const char *arg,
+ int unset __maybe_unused)
+{
+ unsigned long *time_q = opt->value;
+ char *end;
+
+ *time_q = strtoul(arg, &end, 0);
+ if (end == arg)
+ goto parse_err;
+ if (*time_q == 0) {
+ pr_err("time quantum cannot be 0");
+ return -1;
+ }
+ while (isspace(*end))
+ end++;
+ if (*end == 0)
+ return 0;
+ if (!strcmp(end, "s")) {
+ *time_q *= NSEC_PER_SEC;
+ return 0;
+ }
+ if (!strcmp(end, "ms")) {
+ *time_q *= NSEC_PER_MSEC;
+ return 0;
+ }
+ if (!strcmp(end, "us")) {
+ *time_q *= NSEC_PER_USEC;
+ return 0;
+ }
+ if (!strcmp(end, "ns"))
+ return 0;
+parse_err:
+ pr_err("Cannot parse time quantum `%s'\n", arg);
+ return -1;
+}
+
int
report_parse_ignore_callees_opt(const struct option *opt __maybe_unused,
const char *arg, int unset __maybe_unused)
@@ -1044,10 +1083,9 @@ int cmd_report(int argc, const char **argv)
OPT_BOOLEAN(0, "header-only", &report.header_only,
"Show only data header."),
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
- "sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline, ..."
- " Please refer the man page for the complete list."),
+ sort_help("sort by key(s):")),
OPT_STRING('F', "fields", &field_order, "key[,keys...]",
- "output field(s): overhead, period, sample plus all of sort keys"),
+ sort_help("output field(s): overhead period sample ")),
OPT_BOOLEAN(0, "show-cpu-utilization", &symbol_conf.show_cpu_utilization,
"Show sample percentage for different cpu modes"),
OPT_BOOLEAN_FLAG(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
@@ -1120,6 +1158,8 @@ int cmd_report(int argc, const char **argv)
OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
"Enable kernel symbol demangling"),
OPT_BOOLEAN(0, "mem-mode", &report.mem_mode, "mem access profile"),
+ OPT_INTEGER(0, "samples", &symbol_conf.res_sample,
+ "Number of samples to save per histogram entry for individual browsing"),
OPT_CALLBACK(0, "percent-limit", &report, "percent",
"Don't show entries under that percent", parse_percent_limit),
OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
@@ -1147,6 +1187,10 @@ int cmd_report(int argc, const char **argv)
OPT_CALLBACK(0, "percent-type", &report.annotation_opts, "local-period",
"Set percent type local/global-period/hits",
annotate_parse_percent_type),
+ OPT_BOOLEAN(0, "ns", &symbol_conf.nanosecs, "Show times in nanosecs"),
+ OPT_CALLBACK(0, "time-quantum", &symbol_conf.time_quantum, "time (ms|us|ns|s)",
+ "Set time quantum for time sort key (default 100ms)",
+ parse_time_quantum),
OPT_END()
};
struct perf_data data = {
@@ -1375,36 +1419,13 @@ repeat:
if (symbol__init(&session->header.env) < 0)
goto error;
- report.ptime_range = perf_time__range_alloc(report.time_str,
- &report.range_size);
- if (!report.ptime_range) {
- ret = -ENOMEM;
- goto error;
- }
-
- if (perf_time__parse_str(report.ptime_range, report.time_str) != 0) {
- if (session->evlist->first_sample_time == 0 &&
- session->evlist->last_sample_time == 0) {
- pr_err("HINT: no first/last sample time found in perf data.\n"
- "Please use latest perf binary to execute 'perf record'\n"
- "(if '--buildid-all' is enabled, please set '--timestamp-boundary').\n");
- ret = -EINVAL;
- goto error;
- }
-
- report.range_num = perf_time__percent_parse_str(
- report.ptime_range, report.range_size,
- report.time_str,
- session->evlist->first_sample_time,
- session->evlist->last_sample_time);
-
- if (report.range_num < 0) {
- pr_err("Invalid time string\n");
- ret = -EINVAL;
+ if (report.time_str) {
+ ret = perf_time__parse_for_ranges(report.time_str, session,
+ &report.ptime_range,
+ &report.range_size,
+ &report.range_num);
+ if (ret < 0)
goto error;
- }
- } else {
- report.range_num = 1;
}
if (session->tevent.pevent &&
@@ -1426,7 +1447,8 @@ repeat:
ret = 0;
error:
- zfree(&report.ptime_range);
+ if (report.ptime_range)
+ zfree(&report.ptime_range);
perf_session__delete(session);
return ret;
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 2d8cb1d1682c..61cfd8f70989 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -29,10 +29,12 @@
#include "util/time-utils.h"
#include "util/path.h"
#include "print_binary.h"
+#include "archinsn.h"
#include <linux/bitmap.h>
#include <linux/kernel.h>
#include <linux/stringify.h>
#include <linux/time64.h>
+#include <sys/utsname.h>
#include "asm/bug.h"
#include "util/mem-events.h"
#include "util/dump-insn.h"
@@ -51,6 +53,8 @@
static char const *script_name;
static char const *generate_script_lang;
+static bool reltime;
+static u64 initial_time;
static bool debug_mode;
static u64 last_timestamp;
static u64 nr_unordered;
@@ -58,11 +62,11 @@ static bool no_callchain;
static bool latency_format;
static bool system_wide;
static bool print_flags;
-static bool nanosecs;
static const char *cpu_list;
static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
static struct perf_stat_config stat_config;
static int max_blocks;
+static bool native_arch;
unsigned int scripting_max_stack = PERF_MAX_STACK_DEPTH;
@@ -684,15 +688,21 @@ static int perf_sample__fprintf_start(struct perf_sample *sample,
}
if (PRINT_FIELD(TIME)) {
- nsecs = sample->time;
+ u64 t = sample->time;
+ if (reltime) {
+ if (!initial_time)
+ initial_time = sample->time;
+ t = sample->time - initial_time;
+ }
+ nsecs = t;
secs = nsecs / NSEC_PER_SEC;
nsecs -= secs * NSEC_PER_SEC;
- if (nanosecs)
+ if (symbol_conf.nanosecs)
printed += fprintf(fp, "%5lu.%09llu: ", secs, nsecs);
else {
char sample_time[32];
- timestamp__scnprintf_usec(sample->time, sample_time, sizeof(sample_time));
+ timestamp__scnprintf_usec(t, sample_time, sizeof(sample_time));
printed += fprintf(fp, "%12s: ", sample_time);
}
}
@@ -1227,6 +1237,12 @@ static int perf_sample__fprintf_callindent(struct perf_sample *sample,
return len + dlen;
}
+__weak void arch_fetch_insn(struct perf_sample *sample __maybe_unused,
+ struct thread *thread __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+}
+
static int perf_sample__fprintf_insn(struct perf_sample *sample,
struct perf_event_attr *attr,
struct thread *thread,
@@ -1234,9 +1250,12 @@ static int perf_sample__fprintf_insn(struct perf_sample *sample,
{
int printed = 0;
+ if (sample->insn_len == 0 && native_arch)
+ arch_fetch_insn(sample, thread, machine);
+
if (PRINT_FIELD(INSNLEN))
printed += fprintf(fp, " ilen: %d", sample->insn_len);
- if (PRINT_FIELD(INSN)) {
+ if (PRINT_FIELD(INSN) && sample->insn_len) {
int i;
printed += fprintf(fp, " insn:");
@@ -1922,6 +1941,13 @@ static int cleanup_scripting(void)
return scripting_ops ? scripting_ops->stop_script() : 0;
}
+static bool filter_cpu(struct perf_sample *sample)
+{
+ if (cpu_list)
+ return !test_bit(sample->cpu, cpu_bitmap);
+ return false;
+}
+
static int process_sample_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
@@ -1956,7 +1982,7 @@ static int process_sample_event(struct perf_tool *tool,
if (al.filtered)
goto out_put;
- if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
+ if (filter_cpu(sample))
goto out_put;
if (scripting_ops)
@@ -2041,9 +2067,11 @@ static int process_comm_event(struct perf_tool *tool,
sample->tid = event->comm.tid;
sample->pid = event->comm.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel,
+ if (!filter_cpu(sample)) {
+ perf_sample__fprintf_start(sample, thread, evsel,
PERF_RECORD_COMM, stdout);
- perf_event__fprintf(event, stdout);
+ perf_event__fprintf(event, stdout);
+ }
ret = 0;
out:
thread__put(thread);
@@ -2077,9 +2105,11 @@ static int process_namespaces_event(struct perf_tool *tool,
sample->tid = event->namespaces.tid;
sample->pid = event->namespaces.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel,
- PERF_RECORD_NAMESPACES, stdout);
- perf_event__fprintf(event, stdout);
+ if (!filter_cpu(sample)) {
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_NAMESPACES, stdout);
+ perf_event__fprintf(event, stdout);
+ }
ret = 0;
out:
thread__put(thread);
@@ -2111,9 +2141,11 @@ static int process_fork_event(struct perf_tool *tool,
sample->tid = event->fork.tid;
sample->pid = event->fork.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel,
- PERF_RECORD_FORK, stdout);
- perf_event__fprintf(event, stdout);
+ if (!filter_cpu(sample)) {
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_FORK, stdout);
+ perf_event__fprintf(event, stdout);
+ }
thread__put(thread);
return 0;
@@ -2141,9 +2173,11 @@ static int process_exit_event(struct perf_tool *tool,
sample->tid = event->fork.tid;
sample->pid = event->fork.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel,
- PERF_RECORD_EXIT, stdout);
- perf_event__fprintf(event, stdout);
+ if (!filter_cpu(sample)) {
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_EXIT, stdout);
+ perf_event__fprintf(event, stdout);
+ }
if (perf_event__process_exit(tool, event, sample, machine) < 0)
err = -1;
@@ -2177,9 +2211,11 @@ static int process_mmap_event(struct perf_tool *tool,
sample->tid = event->mmap.tid;
sample->pid = event->mmap.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel,
- PERF_RECORD_MMAP, stdout);
- perf_event__fprintf(event, stdout);
+ if (!filter_cpu(sample)) {
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_MMAP, stdout);
+ perf_event__fprintf(event, stdout);
+ }
thread__put(thread);
return 0;
}
@@ -2209,9 +2245,11 @@ static int process_mmap2_event(struct perf_tool *tool,
sample->tid = event->mmap2.tid;
sample->pid = event->mmap2.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel,
- PERF_RECORD_MMAP2, stdout);
- perf_event__fprintf(event, stdout);
+ if (!filter_cpu(sample)) {
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_MMAP2, stdout);
+ perf_event__fprintf(event, stdout);
+ }
thread__put(thread);
return 0;
}
@@ -2236,9 +2274,11 @@ static int process_switch_event(struct perf_tool *tool,
return -1;
}
- perf_sample__fprintf_start(sample, thread, evsel,
- PERF_RECORD_SWITCH, stdout);
- perf_event__fprintf(event, stdout);
+ if (!filter_cpu(sample)) {
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_SWITCH, stdout);
+ perf_event__fprintf(event, stdout);
+ }
thread__put(thread);
return 0;
}
@@ -2259,9 +2299,11 @@ process_lost_event(struct perf_tool *tool,
if (thread == NULL)
return -1;
- perf_sample__fprintf_start(sample, thread, evsel,
- PERF_RECORD_LOST, stdout);
- perf_event__fprintf(event, stdout);
+ if (!filter_cpu(sample)) {
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_LOST, stdout);
+ perf_event__fprintf(event, stdout);
+ }
thread__put(thread);
return 0;
}
@@ -2948,7 +2990,8 @@ static int check_ev_match(char *dir_name, char *scriptname,
* will list all statically runnable scripts, select one, execute it and
* show the output in a perf browser.
*/
-int find_scripts(char **scripts_array, char **scripts_path_array)
+int find_scripts(char **scripts_array, char **scripts_path_array, int num,
+ int pathlen)
{
struct dirent *script_dirent, *lang_dirent;
char scripts_path[MAXPATHLEN], lang_path[MAXPATHLEN];
@@ -2993,7 +3036,10 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
/* Skip those real time scripts: xxxtop.p[yl] */
if (strstr(script_dirent->d_name, "top."))
continue;
- sprintf(scripts_path_array[i], "%s/%s", lang_path,
+ if (i >= num)
+ break;
+ snprintf(scripts_path_array[i], pathlen, "%s/%s",
+ lang_path,
script_dirent->d_name);
temp = strchr(script_dirent->d_name, '.');
snprintf(scripts_array[i],
@@ -3232,7 +3278,7 @@ static int parse_insn_trace(const struct option *opt __maybe_unused,
{
parse_output_fields(NULL, "+insn,-event,-period", 0);
itrace_parse_synth_opts(opt, "i0ns", 0);
- nanosecs = true;
+ symbol_conf.nanosecs = true;
return 0;
}
@@ -3250,7 +3296,7 @@ static int parse_call_trace(const struct option *opt __maybe_unused,
{
parse_output_fields(NULL, "-ip,-addr,-event,-period,+callindent", 0);
itrace_parse_synth_opts(opt, "cewp", 0);
- nanosecs = true;
+ symbol_conf.nanosecs = true;
return 0;
}
@@ -3260,7 +3306,7 @@ static int parse_callret_trace(const struct option *opt __maybe_unused,
{
parse_output_fields(NULL, "-ip,-addr,-event,-period,+callindent,+flags", 0);
itrace_parse_synth_opts(opt, "crewp", 0);
- nanosecs = true;
+ symbol_conf.nanosecs = true;
return 0;
}
@@ -3277,6 +3323,7 @@ int cmd_script(int argc, const char **argv)
.set = false,
.default_no_sample = true,
};
+ struct utsname uts;
char *script_path = NULL;
const char **__argv;
int i, j, err = 0;
@@ -3374,6 +3421,7 @@ int cmd_script(int argc, const char **argv)
"Set the maximum stack depth when parsing the callchain, "
"anything beyond the specified depth will be ignored. "
"Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
+ OPT_BOOLEAN(0, "reltime", &reltime, "Show time stamps relative to start"),
OPT_BOOLEAN('I', "show-info", &show_full_info,
"display extended information from perf.data file"),
OPT_BOOLEAN('\0', "show-kernel-path", &symbol_conf.show_kernel_path,
@@ -3395,7 +3443,7 @@ int cmd_script(int argc, const char **argv)
OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
OPT_INTEGER(0, "max-blocks", &max_blocks,
"Maximum number of code blocks to dump with brstackinsn"),
- OPT_BOOLEAN(0, "ns", &nanosecs,
+ OPT_BOOLEAN(0, "ns", &symbol_conf.nanosecs,
"Use 9 decimal places when displaying time"),
OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
"Instruction Tracing options\n" ITRACE_HELP,
@@ -3448,6 +3496,11 @@ int cmd_script(int argc, const char **argv)
}
}
+ if (script.time_str && reltime) {
+ fprintf(stderr, "Don't combine --reltime with --time\n");
+ return -1;
+ }
+
if (itrace_synth_opts.callchain &&
itrace_synth_opts.callchain_sz > scripting_max_stack)
scripting_max_stack = itrace_synth_opts.callchain_sz;
@@ -3615,6 +3668,12 @@ int cmd_script(int argc, const char **argv)
if (symbol__init(&session->header.env) < 0)
goto out_delete;
+ uname(&uts);
+ if (!strcmp(uts.machine, session->header.env.arch) ||
+ (!strcmp(uts.machine, "x86_64") &&
+ !strcmp(session->header.env.arch, "i386")))
+ native_arch = true;
+
script.session = session;
script__setup_sample_type(&script);
@@ -3699,37 +3758,13 @@ int cmd_script(int argc, const char **argv)
if (err < 0)
goto out_delete;
- script.ptime_range = perf_time__range_alloc(script.time_str,
- &script.range_size);
- if (!script.ptime_range) {
- err = -ENOMEM;
- goto out_delete;
- }
-
- /* needs to be parsed after looking up reference time */
- if (perf_time__parse_str(script.ptime_range, script.time_str) != 0) {
- if (session->evlist->first_sample_time == 0 &&
- session->evlist->last_sample_time == 0) {
- pr_err("HINT: no first/last sample time found in perf data.\n"
- "Please use latest perf binary to execute 'perf record'\n"
- "(if '--buildid-all' is enabled, please set '--timestamp-boundary').\n");
- err = -EINVAL;
- goto out_delete;
- }
-
- script.range_num = perf_time__percent_parse_str(
- script.ptime_range, script.range_size,
- script.time_str,
- session->evlist->first_sample_time,
- session->evlist->last_sample_time);
-
- if (script.range_num < 0) {
- pr_err("Invalid time string\n");
- err = -EINVAL;
+ if (script.time_str) {
+ err = perf_time__parse_for_ranges(script.time_str, session,
+ &script.ptime_range,
+ &script.range_size,
+ &script.range_num);
+ if (err < 0)
goto out_delete;
- }
- } else {
- script.range_num = 1;
}
err = __cmd_script(&script);
@@ -3737,7 +3772,8 @@ int cmd_script(int argc, const char **argv)
flush_scripting();
out_delete:
- zfree(&script.ptime_range);
+ if (script.ptime_range)
+ zfree(&script.ptime_range);
perf_evlist__free_stats(session->evlist);
perf_session__delete(session);
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 7b8f09b0b8bf..a3c060878faa 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -244,11 +244,25 @@ perf_evsel__write_stat_event(struct perf_evsel *counter, u32 cpu, u32 thread,
process_synthesized_event, NULL);
}
+static int read_single_counter(struct perf_evsel *counter, int cpu,
+ int thread, struct timespec *rs)
+{
+ if (counter->tool_event == PERF_TOOL_DURATION_TIME) {
+ u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL;
+ struct perf_counts_values *count =
+ perf_counts(counter->counts, cpu, thread);
+ count->ena = count->run = val;
+ count->val = val;
+ return 0;
+ }
+ return perf_evsel__read_counter(counter, cpu, thread);
+}
+
/*
* Read out the results of a single counter:
* do not aggregate counts across CPUs in system-wide mode
*/
-static int read_counter(struct perf_evsel *counter)
+static int read_counter(struct perf_evsel *counter, struct timespec *rs)
{
int nthreads = thread_map__nr(evsel_list->threads);
int ncpus, cpu, thread;
@@ -275,7 +289,7 @@ static int read_counter(struct perf_evsel *counter)
* (via perf_evsel__read_counter) and sets threir count->loaded.
*/
if (!count->loaded &&
- perf_evsel__read_counter(counter, cpu, thread)) {
+ read_single_counter(counter, cpu, thread, rs)) {
counter->counts->scaled = -1;
perf_counts(counter->counts, cpu, thread)->ena = 0;
perf_counts(counter->counts, cpu, thread)->run = 0;
@@ -304,13 +318,13 @@ static int read_counter(struct perf_evsel *counter)
return 0;
}
-static void read_counters(void)
+static void read_counters(struct timespec *rs)
{
struct perf_evsel *counter;
int ret;
evlist__for_each_entry(evsel_list, counter) {
- ret = read_counter(counter);
+ ret = read_counter(counter, rs);
if (ret)
pr_debug("failed to read counter %s\n", counter->name);
@@ -323,11 +337,11 @@ static void process_interval(void)
{
struct timespec ts, rs;
- read_counters();
-
clock_gettime(CLOCK_MONOTONIC, &ts);
diff_timespec(&rs, &ts, &ref_time);
+ read_counters(&rs);
+
if (STAT_RECORD) {
if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL))
pr_err("failed to write stat round event\n");
@@ -593,7 +607,7 @@ try_again:
* avoid arbitrary skew, we must read all counters before closing any
* group leaders.
*/
- read_counters();
+ read_counters(&(struct timespec) { .tv_nsec = t1-t0 });
perf_evlist__close(evsel_list);
return WEXITSTATUS(status);
@@ -718,7 +732,8 @@ static struct option stat_options[] = {
"system-wide collection from all CPUs"),
OPT_BOOLEAN('g', "group", &group,
"put the counters into a counter group"),
- OPT_BOOLEAN('c', "scale", &stat_config.scale, "scale/normalize counters"),
+ OPT_BOOLEAN(0, "scale", &stat_config.scale,
+ "Use --no-scale to disable counter scaling for multiplexing"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_INTEGER('r', "repeat", &stat_config.run_count,
@@ -1307,6 +1322,7 @@ static void init_features(struct perf_session *session)
for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
perf_header__set_feat(&session->header, feat);
+ perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 231a90daa958..fbbb0da43abb 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -1189,30 +1189,26 @@ static int __cmd_top(struct perf_top *top)
pthread_t thread, thread_process;
int ret;
- top->session = perf_session__new(NULL, false, NULL);
- if (top->session == NULL)
- return -1;
-
if (!top->annotation_opts.objdump_path) {
ret = perf_env__lookup_objdump(&top->session->header.env,
&top->annotation_opts.objdump_path);
if (ret)
- goto out_delete;
+ return ret;
}
ret = callchain_param__setup_sample_type(&callchain_param);
if (ret)
- goto out_delete;
+ return ret;
if (perf_session__register_idle_thread(top->session) < 0)
- goto out_delete;
+ return ret;
if (top->nr_threads_synthesize > 1)
perf_set_multithreaded();
init_process_thread(top);
- ret = perf_event__synthesize_bpf_events(&top->tool, perf_event__process,
+ ret = perf_event__synthesize_bpf_events(top->session, perf_event__process,
&top->session->machines.host,
&top->record_opts);
if (ret < 0)
@@ -1227,13 +1223,18 @@ static int __cmd_top(struct perf_top *top)
if (perf_hpp_list.socket) {
ret = perf_env__read_cpu_topology_map(&perf_env);
- if (ret < 0)
- goto out_err_cpu_topo;
+ if (ret < 0) {
+ char errbuf[BUFSIZ];
+ const char *err = str_error_r(-ret, errbuf, sizeof(errbuf));
+
+ ui__error("Could not read the CPU topology map: %s\n", err);
+ return ret;
+ }
}
ret = perf_top__start_counters(top);
if (ret)
- goto out_delete;
+ return ret;
top->session->evlist = top->evlist;
perf_session__set_id_hdr_size(top->session);
@@ -1252,7 +1253,7 @@ static int __cmd_top(struct perf_top *top)
ret = -1;
if (pthread_create(&thread_process, NULL, process_thread, top)) {
ui__error("Could not create process thread.\n");
- goto out_delete;
+ return ret;
}
if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
@@ -1296,19 +1297,7 @@ out_join:
out_join_thread:
pthread_cond_signal(&top->qe.cond);
pthread_join(thread_process, NULL);
-out_delete:
- perf_session__delete(top->session);
- top->session = NULL;
-
return ret;
-
-out_err_cpu_topo: {
- char errbuf[BUFSIZ];
- const char *err = str_error_r(-ret, errbuf, sizeof(errbuf));
-
- ui__error("Could not read the CPU topology map: %s\n", err);
- goto out_delete;
-}
}
static int
@@ -1388,6 +1377,7 @@ int cmd_top(int argc, const char **argv)
* */
.overwrite = 0,
.sample_time = true,
+ .sample_time_set = true,
},
.max_stack = sysctl__max_stack(),
.annotation_opts = annotation__default_options,
@@ -1480,6 +1470,7 @@ int cmd_top(int argc, const char **argv)
"Display raw encoding of assembly instructions (default)"),
OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
"Enable kernel symbol demangling"),
+ OPT_BOOLEAN(0, "no-bpf-event", &top.record_opts.no_bpf_event, "do not record bpf events"),
OPT_STRING(0, "objdump", &top.annotation_opts.objdump_path, "path",
"objdump binary to use for disassembly and annotations"),
OPT_STRING('M', "disassembler-style", &top.annotation_opts.disassembler_style, "disassembler style",
@@ -1511,6 +1502,7 @@ int cmd_top(int argc, const char **argv)
"number of thread to run event synthesize"),
OPT_END()
};
+ struct perf_evlist *sb_evlist = NULL;
const char * const top_usage[] = {
"perf top [<options>]",
NULL
@@ -1628,8 +1620,9 @@ int cmd_top(int argc, const char **argv)
annotation_config__init();
symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
- if (symbol__init(NULL) < 0)
- return -1;
+ status = symbol__init(NULL);
+ if (status < 0)
+ goto out_delete_evlist;
sort__setup_elide(stdout);
@@ -1639,10 +1632,28 @@ int cmd_top(int argc, const char **argv)
signal(SIGWINCH, winch_sig);
}
+ top.session = perf_session__new(NULL, false, NULL);
+ if (top.session == NULL) {
+ status = -1;
+ goto out_delete_evlist;
+ }
+
+ if (!top.record_opts.no_bpf_event)
+ bpf_event__add_sb_event(&sb_evlist, &perf_env);
+
+ if (perf_evlist__start_sb_thread(sb_evlist, target)) {
+ pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
+ opts->no_bpf_event = true;
+ }
+
status = __cmd_top(&top);
+ if (!opts->no_bpf_event)
+ perf_evlist__stop_sb_thread(sb_evlist);
+
out_delete_evlist:
perf_evlist__delete(top.evlist);
+ perf_session__delete(top.session);
return status;
}
diff --git a/tools/perf/builtin-version.c b/tools/perf/builtin-version.c
index 50df168be326..f470144d1a70 100644
--- a/tools/perf/builtin-version.c
+++ b/tools/perf/builtin-version.c
@@ -78,6 +78,8 @@ static void library_status(void)
STATUS(HAVE_LZMA_SUPPORT, lzma);
STATUS(HAVE_AUXTRACE_SUPPORT, get_cpuid);
STATUS(HAVE_LIBBPF_SUPPORT, bpf);
+ STATUS(HAVE_AIO_SUPPORT, aio);
+ STATUS(HAVE_ZSTD_SUPPORT, zstd);
}
int cmd_version(int argc, const char **argv)
diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h
index 05745f3ce912..999fe9170122 100644
--- a/tools/perf/builtin.h
+++ b/tools/perf/builtin.h
@@ -40,5 +40,6 @@ int cmd_mem(int argc, const char **argv);
int cmd_data(int argc, const char **argv);
int cmd_ftrace(int argc, const char **argv);
-int find_scripts(char **scripts_array, char **scripts_path_array);
+int find_scripts(char **scripts_array, char **scripts_path_array, int num,
+ int pathlen);
#endif
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index 7b55613924de..c68ee06cae63 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -103,7 +103,7 @@ done
# diff with extra ignore lines
check arch/x86/lib/memcpy_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"'
check arch/x86/lib/memset_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"'
-check include/uapi/asm-generic/mman.h '-I "^#include <\(uapi/\)*asm-generic/mman-common.h>"'
+check include/uapi/asm-generic/mman.h '-I "^#include <\(uapi/\)*asm-generic/mman-common\(-tools\)*.h>"'
check include/uapi/linux/mman.h '-I "^#include <\(uapi/\)*asm/mman.h>"'
# diff non-symmetric files
diff --git a/tools/perf/examples/bpf/augmented_raw_syscalls.c b/tools/perf/examples/bpf/augmented_raw_syscalls.c
index f9b2161e1ca4..2422894a8194 100644
--- a/tools/perf/examples/bpf/augmented_raw_syscalls.c
+++ b/tools/perf/examples/bpf/augmented_raw_syscalls.c
@@ -15,6 +15,7 @@
*/
#include <unistd.h>
+#include <linux/limits.h>
#include <pid_filter.h>
/* bpf-output associated map */
@@ -41,32 +42,110 @@ struct syscall_exit_args {
struct augmented_filename {
unsigned int size;
int reserved;
- char value[256];
+ char value[PATH_MAX];
};
-#define SYS_OPEN 2
-#define SYS_ACCESS 21
-#define SYS_OPENAT 257
+/* syscalls where the first arg is a string */
+#define SYS_OPEN 2
+#define SYS_STAT 4
+#define SYS_LSTAT 6
+#define SYS_ACCESS 21
+#define SYS_EXECVE 59
+#define SYS_TRUNCATE 76
+#define SYS_CHDIR 80
+#define SYS_RENAME 82
+#define SYS_MKDIR 83
+#define SYS_RMDIR 84
+#define SYS_CREAT 85
+#define SYS_LINK 86
+#define SYS_UNLINK 87
+#define SYS_SYMLINK 88
+#define SYS_READLINK 89
+#define SYS_CHMOD 90
+#define SYS_CHOWN 92
+#define SYS_LCHOWN 94
+#define SYS_MKNOD 133
+#define SYS_STATFS 137
+#define SYS_PIVOT_ROOT 155
+#define SYS_CHROOT 161
+#define SYS_ACCT 163
+#define SYS_SWAPON 167
+#define SYS_SWAPOFF 168
+#define SYS_DELETE_MODULE 176
+#define SYS_SETXATTR 188
+#define SYS_LSETXATTR 189
+#define SYS_GETXATTR 191
+#define SYS_LGETXATTR 192
+#define SYS_LISTXATTR 194
+#define SYS_LLISTXATTR 195
+#define SYS_REMOVEXATTR 197
+#define SYS_LREMOVEXATTR 198
+#define SYS_MQ_OPEN 240
+#define SYS_MQ_UNLINK 241
+#define SYS_ADD_KEY 248
+#define SYS_REQUEST_KEY 249
+#define SYS_SYMLINKAT 266
+#define SYS_MEMFD_CREATE 319
+
+/* syscalls where the first arg is a string */
+
+#define SYS_PWRITE64 18
+#define SYS_EXECVE 59
+#define SYS_RENAME 82
+#define SYS_QUOTACTL 179
+#define SYS_FSETXATTR 190
+#define SYS_FGETXATTR 193
+#define SYS_FREMOVEXATTR 199
+#define SYS_MQ_TIMEDSEND 242
+#define SYS_REQUEST_KEY 249
+#define SYS_INOTIFY_ADD_WATCH 254
+#define SYS_OPENAT 257
+#define SYS_MKDIRAT 258
+#define SYS_MKNODAT 259
+#define SYS_FCHOWNAT 260
+#define SYS_FUTIMESAT 261
+#define SYS_NEWFSTATAT 262
+#define SYS_UNLINKAT 263
+#define SYS_RENAMEAT 264
+#define SYS_LINKAT 265
+#define SYS_READLINKAT 267
+#define SYS_FCHMODAT 268
+#define SYS_FACCESSAT 269
+#define SYS_UTIMENSAT 280
+#define SYS_NAME_TO_HANDLE_AT 303
+#define SYS_FINIT_MODULE 313
+#define SYS_RENAMEAT2 316
+#define SYS_EXECVEAT 322
+#define SYS_STATX 332
pid_filter(pids_filtered);
+struct augmented_args_filename {
+ struct syscall_enter_args args;
+ struct augmented_filename filename;
+};
+
+bpf_map(augmented_filename_map, PERCPU_ARRAY, int, struct augmented_args_filename, 1);
+
SEC("raw_syscalls:sys_enter")
int sys_enter(struct syscall_enter_args *args)
{
- struct {
- struct syscall_enter_args args;
- struct augmented_filename filename;
- } augmented_args;
- struct syscall *syscall;
- unsigned int len = sizeof(augmented_args);
+ struct augmented_args_filename *augmented_args;
+ unsigned int len = sizeof(*augmented_args);
const void *filename_arg = NULL;
+ struct syscall *syscall;
+ int key = 0;
+
+ augmented_args = bpf_map_lookup_elem(&augmented_filename_map, &key);
+ if (augmented_args == NULL)
+ return 1;
if (pid_filter__has(&pids_filtered, getpid()))
return 0;
- probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
+ probe_read(&augmented_args->args, sizeof(augmented_args->args), args);
- syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
+ syscall = bpf_map_lookup_elem(&syscalls, &augmented_args->args.syscall_nr);
if (syscall == NULL || !syscall->enabled)
return 0;
/*
@@ -109,30 +188,105 @@ int sys_enter(struct syscall_enter_args *args)
*
* after the ctx memory access to prevent their down stream merging.
*/
- switch (augmented_args.args.syscall_nr) {
+ /*
+ * This table of what args are strings will be provided by userspace,
+ * in the syscalls map, i.e. we will already have to do the lookup to
+ * see if this specific syscall is filtered, so we can as well get more
+ * info about what syscall args are strings or pointers, and how many
+ * bytes to copy, per arg, etc.
+ *
+ * For now hard code it, till we have all the basic mechanisms in place
+ * to automate everything and make the kernel part be completely driven
+ * by information obtained in userspace for each kernel version and
+ * processor architecture, making the kernel part the same no matter what
+ * kernel version or processor architecture it runs on.
+ */
+ switch (augmented_args->args.syscall_nr) {
+ case SYS_ACCT:
+ case SYS_ADD_KEY:
+ case SYS_CHDIR:
+ case SYS_CHMOD:
+ case SYS_CHOWN:
+ case SYS_CHROOT:
+ case SYS_CREAT:
+ case SYS_DELETE_MODULE:
+ case SYS_EXECVE:
+ case SYS_GETXATTR:
+ case SYS_LCHOWN:
+ case SYS_LGETXATTR:
+ case SYS_LINK:
+ case SYS_LISTXATTR:
+ case SYS_LLISTXATTR:
+ case SYS_LREMOVEXATTR:
+ case SYS_LSETXATTR:
+ case SYS_LSTAT:
+ case SYS_MEMFD_CREATE:
+ case SYS_MKDIR:
+ case SYS_MKNOD:
+ case SYS_MQ_OPEN:
+ case SYS_MQ_UNLINK:
+ case SYS_PIVOT_ROOT:
+ case SYS_READLINK:
+ case SYS_REMOVEXATTR:
+ case SYS_RENAME:
+ case SYS_REQUEST_KEY:
+ case SYS_RMDIR:
+ case SYS_SETXATTR:
+ case SYS_STAT:
+ case SYS_STATFS:
+ case SYS_SWAPOFF:
+ case SYS_SWAPON:
+ case SYS_SYMLINK:
+ case SYS_SYMLINKAT:
+ case SYS_TRUNCATE:
+ case SYS_UNLINK:
case SYS_ACCESS:
case SYS_OPEN: filename_arg = (const void *)args->args[0];
__asm__ __volatile__("": : :"memory");
break;
+ case SYS_EXECVEAT:
+ case SYS_FACCESSAT:
+ case SYS_FCHMODAT:
+ case SYS_FCHOWNAT:
+ case SYS_FGETXATTR:
+ case SYS_FINIT_MODULE:
+ case SYS_FREMOVEXATTR:
+ case SYS_FSETXATTR:
+ case SYS_FUTIMESAT:
+ case SYS_INOTIFY_ADD_WATCH:
+ case SYS_LINKAT:
+ case SYS_MKDIRAT:
+ case SYS_MKNODAT:
+ case SYS_MQ_TIMEDSEND:
+ case SYS_NAME_TO_HANDLE_AT:
+ case SYS_NEWFSTATAT:
+ case SYS_PWRITE64:
+ case SYS_QUOTACTL:
+ case SYS_READLINKAT:
+ case SYS_RENAMEAT:
+ case SYS_RENAMEAT2:
+ case SYS_STATX:
+ case SYS_UNLINKAT:
+ case SYS_UTIMENSAT:
case SYS_OPENAT: filename_arg = (const void *)args->args[1];
break;
}
if (filename_arg != NULL) {
- augmented_args.filename.reserved = 0;
- augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
- sizeof(augmented_args.filename.value),
+ augmented_args->filename.reserved = 0;
+ augmented_args->filename.size = probe_read_str(&augmented_args->filename.value,
+ sizeof(augmented_args->filename.value),
filename_arg);
- if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) {
- len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size;
- len &= sizeof(augmented_args.filename.value) - 1;
+ if (augmented_args->filename.size < sizeof(augmented_args->filename.value)) {
+ len -= sizeof(augmented_args->filename.value) - augmented_args->filename.size;
+ len &= sizeof(augmented_args->filename.value) - 1;
}
} else {
- len = sizeof(augmented_args.args);
+ len = sizeof(augmented_args->args);
}
/* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */
- return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
+ return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, augmented_args, len);
}
SEC("raw_syscalls:sys_exit")
diff --git a/tools/perf/include/bpf/bpf.h b/tools/perf/include/bpf/bpf.h
index 5df7ed9d9020..2eac6d804b2d 100644
--- a/tools/perf/include/bpf/bpf.h
+++ b/tools/perf/include/bpf/bpf.h
@@ -24,7 +24,13 @@ struct bpf_map SEC("maps") name = { \
.key_size = sizeof(type_key), \
.value_size = sizeof(type_val), \
.max_entries = _max_entries, \
-}
+}; \
+struct ____btf_map_##name { \
+ type_key key; \
+ type_val value; \
+}; \
+struct ____btf_map_##name __attribute__((section(".maps." #name), used)) \
+ ____btf_map_##name = { }
/*
* FIXME: this should receive .max_entries as a parameter, as careful
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index a11cb006f968..72df4b6fa36f 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -298,6 +298,7 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
use_pager = 1;
commit_pager_choice();
+ perf_env__init(&perf_env);
perf_env__set_cmdline(&perf_env, argc, argv);
status = p->fn(argc, argv);
perf_config__exit();
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index b120e547ddc7..369eae61068d 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -66,7 +66,7 @@ struct record_opts {
bool ignore_missing_thread;
bool strict_freq;
bool sample_id;
- bool bpf_event;
+ bool no_bpf_event;
unsigned int freq;
unsigned int mmap_pages;
unsigned int auxtrace_mmap_pages;
@@ -85,6 +85,7 @@ struct record_opts {
u64 clockid_res_ns;
int nr_cblocks;
int affinity;
+ int mmap_flush;
};
enum perf_affinity {
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/other.json b/tools/perf/pmu-events/arch/powerpc/power8/other.json
index 704302c3e67d..9dc2f6b70354 100644
--- a/tools/perf/pmu-events/arch/powerpc/power8/other.json
+++ b/tools/perf/pmu-events/arch/powerpc/power8/other.json
@@ -348,18 +348,6 @@
"PublicDescription": ""
},
{,
- "EventCode": "0x517082",
- "EventName": "PM_CO_DISP_FAIL",
- "BriefDescription": "CO dispatch failed due to all CO machines being busy",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x527084",
- "EventName": "PM_CO_TM_SC_FOOTPRINT",
- "BriefDescription": "L2 did a cleanifdirty CO to the L3 (ie created an SC line in the L3)",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x3608a",
"EventName": "PM_CO_USAGE",
"BriefDescription": "Continuous 16 cycle(2to1) window where this signals rotates thru sampling each L2 CO machine busy. PMU uses this wave to then do 16 cyc count to sample total number of machs running",
@@ -1578,36 +1566,12 @@
"PublicDescription": ""
},
{,
- "EventCode": "0x617082",
- "EventName": "PM_ISIDE_DISP",
- "BriefDescription": "All i-side dispatch attempts",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x627084",
- "EventName": "PM_ISIDE_DISP_FAIL",
- "BriefDescription": "All i-side dispatch attempts that failed due to a addr collision with another machine",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x627086",
- "EventName": "PM_ISIDE_DISP_FAIL_OTHER",
- "BriefDescription": "All i-side dispatch attempts that failed due to a reason other than addrs collision",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x4608e",
"EventName": "PM_ISIDE_L2MEMACC",
"BriefDescription": "valid when first beat of data comes in for an i-side fetch where data came from mem(or L4)",
"PublicDescription": ""
},
{,
- "EventCode": "0x44608e",
- "EventName": "PM_ISIDE_MRU_TOUCH",
- "BriefDescription": "Iside L2 MRU touch",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x30ac",
"EventName": "PM_ISU_REF_FX0",
"BriefDescription": "FX0 ISU reject",
@@ -1734,222 +1698,36 @@
"PublicDescription": ""
},
{,
- "EventCode": "0x417080",
- "EventName": "PM_L2_CASTOUT_MOD",
- "BriefDescription": "L2 Castouts - Modified (M, Mu, Me)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x417082",
- "EventName": "PM_L2_CASTOUT_SHR",
- "BriefDescription": "L2 Castouts - Shared (T, Te, Si, S)",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x27084",
"EventName": "PM_L2_CHIP_PUMP",
"BriefDescription": "RC requests that were local on chip pump attempts",
"PublicDescription": ""
},
{,
- "EventCode": "0x427086",
- "EventName": "PM_L2_DC_INV",
- "BriefDescription": "Dcache invalidates from L2",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x44608c",
- "EventName": "PM_L2_DISP_ALL_L2MISS",
- "BriefDescription": "All successful Ld/St dispatches for this thread that were an L2miss",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x27086",
"EventName": "PM_L2_GROUP_PUMP",
"BriefDescription": "RC requests that were on Node Pump attempts",
"PublicDescription": ""
},
{,
- "EventCode": "0x626084",
- "EventName": "PM_L2_GRP_GUESS_CORRECT",
- "BriefDescription": "L2 guess grp and guess was correct (data intra-6chip AND ^on-chip)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x626086",
- "EventName": "PM_L2_GRP_GUESS_WRONG",
- "BriefDescription": "L2 guess grp and guess was not correct (ie data on-chip OR beyond-6chip)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x427084",
- "EventName": "PM_L2_IC_INV",
- "BriefDescription": "Icache Invalidates from L2",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x436088",
- "EventName": "PM_L2_INST",
- "BriefDescription": "All successful I-side dispatches for this thread (excludes i_l2mru_tch reqs)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x43608a",
- "EventName": "PM_L2_INST_MISS",
- "BriefDescription": "All successful i-side dispatches that were an L2miss for this thread (excludes i_l2mru_tch reqs)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x416080",
- "EventName": "PM_L2_LD",
- "BriefDescription": "All successful D-side Load dispatches for this thread",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x437088",
- "EventName": "PM_L2_LD_DISP",
- "BriefDescription": "All successful load dispatches",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x43708a",
- "EventName": "PM_L2_LD_HIT",
- "BriefDescription": "All successful load dispatches that were L2 hits",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x426084",
- "EventName": "PM_L2_LD_MISS",
- "BriefDescription": "All successful D-Side Load dispatches that were an L2miss for this thread",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x616080",
- "EventName": "PM_L2_LOC_GUESS_CORRECT",
- "BriefDescription": "L2 guess loc and guess was correct (ie data local)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x616082",
- "EventName": "PM_L2_LOC_GUESS_WRONG",
- "BriefDescription": "L2 guess loc and guess was not correct (ie data not on chip)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x516080",
- "EventName": "PM_L2_RCLD_DISP",
- "BriefDescription": "L2 RC load dispatch attempt",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x516082",
- "EventName": "PM_L2_RCLD_DISP_FAIL_ADDR",
- "BriefDescription": "L2 RC load dispatch attempt failed due to address collision with RC/CO/SN/SQ",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x526084",
- "EventName": "PM_L2_RCLD_DISP_FAIL_OTHER",
- "BriefDescription": "L2 RC load dispatch attempt failed due to other reasons",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x536088",
- "EventName": "PM_L2_RCST_DISP",
- "BriefDescription": "L2 RC store dispatch attempt",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x53608a",
- "EventName": "PM_L2_RCST_DISP_FAIL_ADDR",
- "BriefDescription": "L2 RC store dispatch attempt failed due to address collision with RC/CO/SN/SQ",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x54608c",
- "EventName": "PM_L2_RCST_DISP_FAIL_OTHER",
- "BriefDescription": "L2 RC store dispatch attempt failed due to other reasons",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x537088",
- "EventName": "PM_L2_RC_ST_DONE",
- "BriefDescription": "RC did st to line that was Tx or Sx",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x63708a",
- "EventName": "PM_L2_RTY_LD",
- "BriefDescription": "RC retries on PB for any load from core",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x3708a",
"EventName": "PM_L2_RTY_ST",
"BriefDescription": "RC retries on PB for any store from core",
"PublicDescription": ""
},
{,
- "EventCode": "0x54708c",
- "EventName": "PM_L2_SN_M_RD_DONE",
- "BriefDescription": "SNP dispatched for a read and was M",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x54708e",
- "EventName": "PM_L2_SN_M_WR_DONE",
- "BriefDescription": "SNP dispatched for a write and was M",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x53708a",
- "EventName": "PM_L2_SN_SX_I_DONE",
- "BriefDescription": "SNP dispatched and went from Sx or Tx to Ix",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x17080",
"EventName": "PM_L2_ST",
"BriefDescription": "All successful D-side store dispatches for this thread",
"PublicDescription": ""
},
{,
- "EventCode": "0x44708c",
- "EventName": "PM_L2_ST_DISP",
- "BriefDescription": "All successful store dispatches",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x44708e",
- "EventName": "PM_L2_ST_HIT",
- "BriefDescription": "All successful store dispatches that were L2Hits",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x17082",
"EventName": "PM_L2_ST_MISS",
"BriefDescription": "All successful D-side store dispatches for this thread that were L2 Miss",
"PublicDescription": ""
},
{,
- "EventCode": "0x636088",
- "EventName": "PM_L2_SYS_GUESS_CORRECT",
- "BriefDescription": "L2 guess sys and guess was correct (ie data beyond-6chip)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x63608a",
- "EventName": "PM_L2_SYS_GUESS_WRONG",
- "BriefDescription": "L2 guess sys and guess was not correct (ie data ^beyond-6chip)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x617080",
- "EventName": "PM_L2_SYS_PUMP",
- "BriefDescription": "RC requests that were system pump attempts",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x1e05e",
"EventName": "PM_L2_TM_REQ_ABORT",
"BriefDescription": "TM abort",
@@ -1962,36 +1740,12 @@
"PublicDescription": ""
},
{,
- "EventCode": "0x23808a",
- "EventName": "PM_L3_CINJ",
- "BriefDescription": "l3 ci of cache inject",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x128084",
- "EventName": "PM_L3_CI_HIT",
- "BriefDescription": "L3 Castins Hit (total count",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x128086",
- "EventName": "PM_L3_CI_MISS",
- "BriefDescription": "L3 castins miss (total count",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x819082",
"EventName": "PM_L3_CI_USAGE",
"BriefDescription": "rotating sample of 16 CI or CO actives",
"PublicDescription": ""
},
{,
- "EventCode": "0x438088",
- "EventName": "PM_L3_CO",
- "BriefDescription": "l3 castout occurring ( does not include casthrough or log writes (cinj/dmaw)",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x83908b",
"EventName": "PM_L3_CO0_ALLOC",
"BriefDescription": "lifetime, sample of CO machine 0 valid",
@@ -2010,120 +1764,18 @@
"PublicDescription": ""
},
{,
- "EventCode": "0x238088",
- "EventName": "PM_L3_CO_LCO",
- "BriefDescription": "Total L3 castouts occurred on LCO",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x28084",
"EventName": "PM_L3_CO_MEM",
"BriefDescription": "L3 CO to memory OR of port 0 and 1 ( lossy)",
"PublicDescription": ""
},
{,
- "EventCode": "0xb19082",
- "EventName": "PM_L3_GRP_GUESS_CORRECT",
- "BriefDescription": "Initial scope=group and data from same group (near) (pred successful)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0xb3908a",
- "EventName": "PM_L3_GRP_GUESS_WRONG_HIGH",
- "BriefDescription": "Initial scope=group but data from local node. Predition too high",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0xb39088",
- "EventName": "PM_L3_GRP_GUESS_WRONG_LOW",
- "BriefDescription": "Initial scope=group but data from outside group (far or rem). Prediction too Low",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x218080",
- "EventName": "PM_L3_HIT",
- "BriefDescription": "L3 Hits",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x138088",
- "EventName": "PM_L3_L2_CO_HIT",
- "BriefDescription": "L2 castout hits",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x13808a",
- "EventName": "PM_L3_L2_CO_MISS",
- "BriefDescription": "L2 castout miss",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x14808c",
- "EventName": "PM_L3_LAT_CI_HIT",
- "BriefDescription": "L3 Lateral Castins Hit",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x14808e",
- "EventName": "PM_L3_LAT_CI_MISS",
- "BriefDescription": "L3 Lateral Castins Miss",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x228084",
- "EventName": "PM_L3_LD_HIT",
- "BriefDescription": "L3 demand LD Hits",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x228086",
- "EventName": "PM_L3_LD_MISS",
- "BriefDescription": "L3 demand LD Miss",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x1e052",
"EventName": "PM_L3_LD_PREF",
"BriefDescription": "L3 Load Prefetches",
"PublicDescription": ""
},
{,
- "EventCode": "0xb19080",
- "EventName": "PM_L3_LOC_GUESS_CORRECT",
- "BriefDescription": "initial scope=node/chip and data from local node (local) (pred successful)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0xb29086",
- "EventName": "PM_L3_LOC_GUESS_WRONG",
- "BriefDescription": "Initial scope=node but data from out side local node (near or far or rem). Prediction too Low",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x218082",
- "EventName": "PM_L3_MISS",
- "BriefDescription": "L3 Misses",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x54808c",
- "EventName": "PM_L3_P0_CO_L31",
- "BriefDescription": "l3 CO to L3.1 (lco) port 0",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x538088",
- "EventName": "PM_L3_P0_CO_MEM",
- "BriefDescription": "l3 CO to memory port 0",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x929084",
- "EventName": "PM_L3_P0_CO_RTY",
- "BriefDescription": "L3 CO received retry port 0",
- "PublicDescription": ""
- },
- {,
"EventCode": "0xa29084",
"EventName": "PM_L3_P0_GRP_PUMP",
"BriefDescription": "L3 pf sent with grp scope port 0",
@@ -2148,120 +1800,6 @@
"PublicDescription": ""
},
{,
- "EventCode": "0xa19080",
- "EventName": "PM_L3_P0_NODE_PUMP",
- "BriefDescription": "L3 pf sent with nodal scope port 0",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x919080",
- "EventName": "PM_L3_P0_PF_RTY",
- "BriefDescription": "L3 PF received retry port 0",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x939088",
- "EventName": "PM_L3_P0_SN_HIT",
- "BriefDescription": "L3 snoop hit port 0",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x118080",
- "EventName": "PM_L3_P0_SN_INV",
- "BriefDescription": "Port0 snooper detects someone doing a store to a line thats Sx",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x94908c",
- "EventName": "PM_L3_P0_SN_MISS",
- "BriefDescription": "L3 snoop miss port 0",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0xa39088",
- "EventName": "PM_L3_P0_SYS_PUMP",
- "BriefDescription": "L3 pf sent with sys scope port 0",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x54808e",
- "EventName": "PM_L3_P1_CO_L31",
- "BriefDescription": "l3 CO to L3.1 (lco) port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x53808a",
- "EventName": "PM_L3_P1_CO_MEM",
- "BriefDescription": "l3 CO to memory port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x929086",
- "EventName": "PM_L3_P1_CO_RTY",
- "BriefDescription": "L3 CO received retry port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0xa29086",
- "EventName": "PM_L3_P1_GRP_PUMP",
- "BriefDescription": "L3 pf sent with grp scope port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x528086",
- "EventName": "PM_L3_P1_LCO_DATA",
- "BriefDescription": "lco sent with data port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x518082",
- "EventName": "PM_L3_P1_LCO_NO_DATA",
- "BriefDescription": "dataless l3 lco sent port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0xa4908e",
- "EventName": "PM_L3_P1_LCO_RTY",
- "BriefDescription": "L3 LCO received retry port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0xa19082",
- "EventName": "PM_L3_P1_NODE_PUMP",
- "BriefDescription": "L3 pf sent with nodal scope port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x919082",
- "EventName": "PM_L3_P1_PF_RTY",
- "BriefDescription": "L3 PF received retry port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x93908a",
- "EventName": "PM_L3_P1_SN_HIT",
- "BriefDescription": "L3 snoop hit port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x118082",
- "EventName": "PM_L3_P1_SN_INV",
- "BriefDescription": "Port1 snooper detects someone doing a store to a line thats Sx",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x94908e",
- "EventName": "PM_L3_P1_SN_MISS",
- "BriefDescription": "L3 snoop miss port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0xa3908a",
- "EventName": "PM_L3_P1_SYS_PUMP",
- "BriefDescription": "L3 pf sent with sys scope port 1",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x84908d",
"EventName": "PM_L3_PF0_ALLOC",
"BriefDescription": "lifetime, sample of PF machine 0 valid",
@@ -2274,12 +1812,6 @@
"PublicDescription": ""
},
{,
- "EventCode": "0x428084",
- "EventName": "PM_L3_PF_HIT_L3",
- "BriefDescription": "l3 pf hit in l3",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x18080",
"EventName": "PM_L3_PF_MISS_L3",
"BriefDescription": "L3 Prefetch missed in L3",
@@ -2370,42 +1902,12 @@
"PublicDescription": ""
},
{,
- "EventCode": "0xb29084",
- "EventName": "PM_L3_SYS_GUESS_CORRECT",
- "BriefDescription": "Initial scope=system and data from outside group (far or rem)(pred successful)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0xb4908c",
- "EventName": "PM_L3_SYS_GUESS_WRONG",
- "BriefDescription": "Initial scope=system but data from local or near. Predction too high",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x24808e",
- "EventName": "PM_L3_TRANS_PF",
- "BriefDescription": "L3 Transient prefetch",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x18081",
"EventName": "PM_L3_WI0_ALLOC",
"BriefDescription": "lifetime, sample of Write Inject machine 0 valid",
"PublicDescription": "0.0"
},
{,
- "EventCode": "0x418080",
- "EventName": "PM_L3_WI0_BUSY",
- "BriefDescription": "lifetime, sample of Write Inject machine 0 valid",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x418082",
- "EventName": "PM_L3_WI_USAGE",
- "BriefDescription": "rotating sample of 8 WI actives",
- "PublicDescription": ""
- },
- {,
"EventCode": "0xc080",
"EventName": "PM_LD_REF_L1_LSU0",
"BriefDescription": "LS0 L1 D cache load references counted at finish, gated by reject",
@@ -3312,12 +2814,6 @@
"PublicDescription": ""
},
{,
- "EventCode": "0x328084",
- "EventName": "PM_NON_TM_RST_SC",
- "BriefDescription": "non tm snp rst tm sc",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x2001a",
"EventName": "PM_NTCG_ALL_FIN",
"BriefDescription": "Cycles after all instructions have finished to group completed",
@@ -3420,24 +2916,6 @@
"PublicDescription": ""
},
{,
- "EventCode": "0x34808e",
- "EventName": "PM_RD_CLEARING_SC",
- "BriefDescription": "rd clearing sc",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x34808c",
- "EventName": "PM_RD_FORMING_SC",
- "BriefDescription": "rd forming sc",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x428086",
- "EventName": "PM_RD_HIT_PF",
- "BriefDescription": "rd machine hit l3 pf machine",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x20004",
"EventName": "PM_REAL_SRQ_FULL",
"BriefDescription": "Out of real srq entries",
@@ -3504,18 +2982,6 @@
"PublicDescription": "TLBIE snoopSnoop TLBIE"
},
{,
- "EventCode": "0x338088",
- "EventName": "PM_SNP_TM_HIT_M",
- "BriefDescription": "snp tm st hit m mu",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x33808a",
- "EventName": "PM_SNP_TM_HIT_T",
- "BriefDescription": "snp tm_st_hit t tn te",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x4608c",
"EventName": "PM_SN_USAGE",
"BriefDescription": "Continuous 16 cycle(2to1) window where this signals rotates thru sampling each L2 SN machine busy. PMU uses this wave to then do 16 cyc count to sample total number of machs running",
@@ -3534,12 +3000,6 @@
"PublicDescription": "STCX executed reported at sent to nest42"
},
{,
- "EventCode": "0x717080",
- "EventName": "PM_ST_CAUSED_FAIL",
- "BriefDescription": "Non TM St caused any thread to fail",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x3090",
"EventName": "PM_SWAP_CANCEL",
"BriefDescription": "SWAP cancel , rtag not available",
@@ -3624,18 +3084,6 @@
"PublicDescription": ""
},
{,
- "EventCode": "0x318082",
- "EventName": "PM_TM_CAM_OVERFLOW",
- "BriefDescription": "l3 tm cam overflow during L2 co of SC",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x74708c",
- "EventName": "PM_TM_CAP_OVERFLOW",
- "BriefDescription": "TM Footprint Capactiy Overflow",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x20ba",
"EventName": "PM_TM_END_ALL",
"BriefDescription": "Tm any tend",
@@ -3690,48 +3138,6 @@
"PublicDescription": "Transactional conflict from LSU, whatever gets reported to texas 42"
},
{,
- "EventCode": "0x727086",
- "EventName": "PM_TM_FAV_CAUSED_FAIL",
- "BriefDescription": "TM Load (fav) caused another thread to fail",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x717082",
- "EventName": "PM_TM_LD_CAUSED_FAIL",
- "BriefDescription": "Non TM Ld caused any thread to fail",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x727084",
- "EventName": "PM_TM_LD_CONF",
- "BriefDescription": "TM Load (fav or non-fav) ran into conflict (failed)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x328086",
- "EventName": "PM_TM_RST_SC",
- "BriefDescription": "tm snp rst tm sc",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x318080",
- "EventName": "PM_TM_SC_CO",
- "BriefDescription": "l3 castout tm Sc line",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x73708a",
- "EventName": "PM_TM_ST_CAUSED_FAIL",
- "BriefDescription": "TM Store (fav or non-fav) caused another thread to fail",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x737088",
- "EventName": "PM_TM_ST_CONF",
- "BriefDescription": "TM Store (fav or non-fav) ran into conflict (failed)",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x20bc",
"EventName": "PM_TM_TBEGIN",
"BriefDescription": "Tm nested tbegin",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/extended.json b/tools/perf/pmu-events/arch/s390/cf_z14/extended.json
index e7a3524b748f..68618152ea2c 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z14/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z14/extended.json
@@ -4,7 +4,7 @@
"EventCode": "128",
"EventName": "L1D_RO_EXCL_WRITES",
"BriefDescription": "L1D Read-only Exclusive Writes",
- "PublicDescription": "Counter:128 Name:L1D_RO_EXCL_WRITES A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
+ "PublicDescription": "L1D_RO_EXCL_WRITES A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
},
{
"Unit": "CPU-M-CF",
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/branch.json b/tools/perf/pmu-events/arch/x86/amdfam17h/branch.json
new file mode 100644
index 000000000000..93ddfd8053ca
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdfam17h/branch.json
@@ -0,0 +1,12 @@
+[
+ {
+ "EventName": "bp_l1_btb_correct",
+ "EventCode": "0x8a",
+ "BriefDescription": "L1 BTB Correction."
+ },
+ {
+ "EventName": "bp_l2_btb_correct",
+ "EventCode": "0x8b",
+ "BriefDescription": "L2 BTB Correction."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/cache.json b/tools/perf/pmu-events/arch/x86/amdfam17h/cache.json
new file mode 100644
index 000000000000..fad4af9142cb
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdfam17h/cache.json
@@ -0,0 +1,287 @@
+[
+ {
+ "EventName": "ic_fw32",
+ "EventCode": "0x80",
+ "BriefDescription": "The number of 32B fetch windows transferred from IC pipe to DE instruction decoder (includes non-cacheable and cacheable fill responses)."
+ },
+ {
+ "EventName": "ic_fw32_miss",
+ "EventCode": "0x81",
+ "BriefDescription": "The number of 32B fetch windows tried to read the L1 IC and missed in the full tag."
+ },
+ {
+ "EventName": "ic_cache_fill_l2",
+ "EventCode": "0x82",
+ "BriefDescription": "The number of 64 byte instruction cache line was fulfilled from the L2 cache."
+ },
+ {
+ "EventName": "ic_cache_fill_sys",
+ "EventCode": "0x83",
+ "BriefDescription": "The number of 64 byte instruction cache line fulfilled from system memory or another cache."
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_hit",
+ "EventCode": "0x84",
+ "BriefDescription": "The number of instruction fetches that miss in the L1 ITLB but hit in the L2 ITLB."
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_miss",
+ "EventCode": "0x85",
+ "BriefDescription": "The number of instruction fetches that miss in both the L1 and L2 TLBs."
+ },
+ {
+ "EventName": "bp_snp_re_sync",
+ "EventCode": "0x86",
+ "BriefDescription": "The number of pipeline restarts caused by invalidating probes that hit on the instruction stream currently being executed. This would happen if the active instruction stream was being modified by another processor in an MP system - typically a highly unlikely event."
+ },
+ {
+ "EventName": "ic_fetch_stall.ic_stall_any",
+ "EventCode": "0x87",
+ "BriefDescription": "IC pipe was stalled during this clock cycle for any reason (nothing valid in pipe ICM1).",
+ "PublicDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle for any reason (nothing valid in pipe ICM1).",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "ic_fetch_stall.ic_stall_dq_empty",
+ "EventCode": "0x87",
+ "BriefDescription": "IC pipe was stalled during this clock cycle (including IC to OC fetches) due to DQ empty.",
+ "PublicDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle (including IC to OC fetches) due to DQ empty.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ic_fetch_stall.ic_stall_back_pressure",
+ "EventCode": "0x87",
+ "BriefDescription": "IC pipe was stalled during this clock cycle (including IC to OC fetches) due to back-pressure.",
+ "PublicDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle (including IC to OC fetches) due to back-pressure.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ic_cache_inval.l2_invalidating_probe",
+ "EventCode": "0x8c",
+ "BriefDescription": "IC line invalidated due to L2 invalidating probe (external or LS).",
+ "PublicDescription": "The number of instruction cache lines invalidated. A non-SMC event is CMC (cross modifying code), either from the other thread of the core or another core. IC line invalidated due to L2 invalidating probe (external or LS).",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ic_cache_inval.fill_invalidated",
+ "EventCode": "0x8c",
+ "BriefDescription": "IC line invalidated due to overwriting fill response.",
+ "PublicDescription": "The number of instruction cache lines invalidated. A non-SMC event is CMC (cross modifying code), either from the other thread of the core or another core. IC line invalidated due to overwriting fill response.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "bp_tlb_rel",
+ "EventCode": "0x99",
+ "BriefDescription": "The number of ITLB reload requests."
+ },
+ {
+ "EventName": "l2_request_g1.rd_blk_l",
+ "EventCode": "0x60",
+ "BriefDescription": "Requests to L2 Group1.",
+ "PublicDescription": "Requests to L2 Group1.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "l2_request_g1.rd_blk_x",
+ "EventCode": "0x60",
+ "BriefDescription": "Requests to L2 Group1.",
+ "PublicDescription": "Requests to L2 Group1.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_request_g1.ls_rd_blk_c_s",
+ "EventCode": "0x60",
+ "BriefDescription": "Requests to L2 Group1.",
+ "PublicDescription": "Requests to L2 Group1.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_request_g1.cacheable_ic_read",
+ "EventCode": "0x60",
+ "BriefDescription": "Requests to L2 Group1.",
+ "PublicDescription": "Requests to L2 Group1.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_request_g1.change_to_x",
+ "EventCode": "0x60",
+ "BriefDescription": "Requests to L2 Group1.",
+ "PublicDescription": "Requests to L2 Group1.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "l2_request_g1.prefetch_l2",
+ "EventCode": "0x60",
+ "BriefDescription": "Requests to L2 Group1.",
+ "PublicDescription": "Requests to L2 Group1.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "l2_request_g1.l2_hw_pf",
+ "EventCode": "0x60",
+ "BriefDescription": "Requests to L2 Group1.",
+ "PublicDescription": "Requests to L2 Group1.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "l2_request_g1.other_requests",
+ "EventCode": "0x60",
+ "BriefDescription": "Events covered by l2_request_g2.",
+ "PublicDescription": "Requests to L2 Group1. Events covered by l2_request_g2.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "l2_request_g2.group1",
+ "EventCode": "0x61",
+ "BriefDescription": "All Group 1 commands not in unit0.",
+ "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous. All Group 1 commands not in unit0.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "l2_request_g2.ls_rd_sized",
+ "EventCode": "0x61",
+ "BriefDescription": "RdSized, RdSized32, RdSized64.",
+ "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous. RdSized, RdSized32, RdSized64.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_request_g2.ls_rd_sized_nc",
+ "EventCode": "0x61",
+ "BriefDescription": "RdSizedNC, RdSized32NC, RdSized64NC.",
+ "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous. RdSizedNC, RdSized32NC, RdSized64NC.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_request_g2.ic_rd_sized",
+ "EventCode": "0x61",
+ "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+ "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_request_g2.ic_rd_sized_nc",
+ "EventCode": "0x61",
+ "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+ "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "l2_request_g2.smc_inval",
+ "EventCode": "0x61",
+ "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+ "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "l2_request_g2.bus_locks_originator",
+ "EventCode": "0x61",
+ "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+ "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "l2_request_g2.bus_locks_responses",
+ "EventCode": "0x61",
+ "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+ "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "l2_latency.l2_cycles_waiting_on_fills",
+ "EventCode": "0x62",
+ "BriefDescription": "Total cycles spent waiting for L2 fills to complete from L3 or memory, divided by four. Event counts are for both threads. To calculate average latency, the number of fills from both threads must be used.",
+ "PublicDescription": "Total cycles spent waiting for L2 fills to complete from L3 or memory, divided by four. Event counts are for both threads. To calculate average latency, the number of fills from both threads must be used.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "l2_wcb_req.wcb_write",
+ "EventCode": "0x63",
+ "PublicDescription": "LS (Load/Store unit) to L2 WCB (Write Combining Buffer) write requests.",
+ "BriefDescription": "LS to L2 WCB write requests.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_wcb_req.wcb_close",
+ "EventCode": "0x63",
+ "BriefDescription": "LS to L2 WCB close requests.",
+ "PublicDescription": "LS (Load/Store unit) to L2 WCB (Write Combining Buffer) close requests.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_wcb_req.zero_byte_store",
+ "EventCode": "0x63",
+ "BriefDescription": "LS to L2 WCB zero byte store requests.",
+ "PublicDescription": "LS (Load/Store unit) to L2 WCB (Write Combining Buffer) zero byte store requests.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "l2_wcb_req.cl_zero",
+ "EventCode": "0x63",
+ "PublicDescription": "LS to L2 WCB cache line zeroing requests.",
+ "BriefDescription": "LS (Load/Store unit) to L2 WCB (Write Combining Buffer) cache line zeroing requests.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_cs",
+ "EventCode": "0x64",
+ "BriefDescription": "LS ReadBlock C/S Hit.",
+ "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LS ReadBlock C/S Hit.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_l_hit_x",
+ "EventCode": "0x64",
+ "BriefDescription": "LS Read Block L Hit X.",
+ "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LS Read Block L Hit X.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_l_hit_s",
+ "EventCode": "0x64",
+ "BriefDescription": "LsRdBlkL Hit Shared.",
+ "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LsRdBlkL Hit Shared.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_x",
+ "EventCode": "0x64",
+ "BriefDescription": "LsRdBlkX/ChgToX Hit X. Count RdBlkX finding Shared as a Miss.",
+ "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LsRdBlkX/ChgToX Hit X. Count RdBlkX finding Shared as a Miss.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_c",
+ "EventCode": "0x64",
+ "BriefDescription": "LS Read Block C S L X Change to X Miss.",
+ "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LS Read Block C S L X Change to X Miss.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_fill_hit_x",
+ "EventCode": "0x64",
+ "BriefDescription": "IC Fill Hit Exclusive Stale.",
+ "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. IC Fill Hit Exclusive Stale.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_fill_hit_s",
+ "EventCode": "0x64",
+ "BriefDescription": "IC Fill Hit Shared.",
+ "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. IC Fill Hit Shared.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_fill_miss",
+ "EventCode": "0x64",
+ "BriefDescription": "IC Fill Miss.",
+ "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. IC Fill Miss.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "l2_fill_pending.l2_fill_busy",
+ "EventCode": "0x6d",
+ "BriefDescription": "Total cycles spent with one or more fill requests in flight from L2.",
+ "PublicDescription": "Total cycles spent with one or more fill requests in flight from L2.",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/core.json b/tools/perf/pmu-events/arch/x86/amdfam17h/core.json
new file mode 100644
index 000000000000..7b285b0a7f35
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdfam17h/core.json
@@ -0,0 +1,134 @@
+[
+ {
+ "EventName": "ex_ret_instr",
+ "EventCode": "0xc0",
+ "BriefDescription": "Retired Instructions."
+ },
+ {
+ "EventName": "ex_ret_cops",
+ "EventCode": "0xc1",
+ "BriefDescription": "Retired Uops.",
+ "PublicDescription": "The number of uOps retired. This includes all processor activity (instructions, exceptions, interrupts, microcode assists, etc.). The number of events logged per cycle can vary from 0 to 4."
+ },
+ {
+ "EventName": "ex_ret_brn",
+ "EventCode": "0xc2",
+ "BriefDescription": "[Retired Branch Instructions.",
+ "PublicDescription": "The number of branch instructions retired. This includes all types of architectural control flow changes, including exceptions and interrupts."
+ },
+ {
+ "EventName": "ex_ret_brn_misp",
+ "EventCode": "0xc3",
+ "BriefDescription": "Retired Branch Instructions Mispredicted.",
+ "PublicDescription": "The number of branch instructions retired, of any type, that were not correctly predicted. This includes those for which prediction is not attempted (far control transfers, exceptions and interrupts)."
+ },
+ {
+ "EventName": "ex_ret_brn_tkn",
+ "EventCode": "0xc4",
+ "BriefDescription": "Retired Taken Branch Instructions.",
+ "PublicDescription": "The number of taken branches that were retired. This includes all types of architectural control flow changes, including exceptions and interrupts."
+ },
+ {
+ "EventName": "ex_ret_brn_tkn_misp",
+ "EventCode": "0xc5",
+ "BriefDescription": "Retired Taken Branch Instructions Mispredicted.",
+ "PublicDescription": "The number of retired taken branch instructions that were mispredicted."
+ },
+ {
+ "EventName": "ex_ret_brn_far",
+ "EventCode": "0xc6",
+ "BriefDescription": "Retired Far Control Transfers.",
+ "PublicDescription": "The number of far control transfers retired including far call/jump/return, IRET, SYSCALL and SYSRET, plus exceptions and interrupts. Far control transfers are not subject to branch prediction."
+ },
+ {
+ "EventName": "ex_ret_brn_resync",
+ "EventCode": "0xc7",
+ "BriefDescription": "Retired Branch Resyncs.",
+ "PublicDescription": "The number of resync branches. These reflect pipeline restarts due to certain microcode assists and events such as writes to the active instruction stream, among other things. Each occurrence reflects a restart penalty similar to a branch mispredict. This is relatively rare."
+ },
+ {
+ "EventName": "ex_ret_near_ret",
+ "EventCode": "0xc8",
+ "BriefDescription": "Retired Near Returns.",
+ "PublicDescription": "The number of near return instructions (RET or RET Iw) retired."
+ },
+ {
+ "EventName": "ex_ret_near_ret_mispred",
+ "EventCode": "0xc9",
+ "BriefDescription": "Retired Near Returns Mispredicted.",
+ "PublicDescription": "The number of near returns retired that were not correctly predicted by the return address predictor. Each such mispredict incurs the same penalty as a mispredicted conditional branch instruction."
+ },
+ {
+ "EventName": "ex_ret_brn_ind_misp",
+ "EventCode": "0xca",
+ "BriefDescription": "Retired Indirect Branch Instructions Mispredicted.",
+ "PublicDescription": "Retired Indirect Branch Instructions Mispredicted."
+ },
+ {
+ "EventName": "ex_ret_mmx_fp_instr.sse_instr",
+ "EventCode": "0xcb",
+ "BriefDescription": "SSE instructions (SSE, SSE2, SSE3, SSSE3, SSE4A, SSE41, SSE42, AVX).",
+ "PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. SSE instructions (SSE, SSE2, SSE3, SSSE3, SSE4A, SSE41, SSE42, AVX).",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "ex_ret_mmx_fp_instr.mmx_instr",
+ "EventCode": "0xcb",
+ "BriefDescription": "MMX instructions.",
+ "PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. MMX instructions.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ex_ret_mmx_fp_instr.x87_instr",
+ "EventCode": "0xcb",
+ "BriefDescription": "x87 instructions.",
+ "PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. x87 instructions.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ex_ret_cond",
+ "EventCode": "0xd1",
+ "BriefDescription": "Retired Conditional Branch Instructions."
+ },
+ {
+ "EventName": "ex_ret_cond_misp",
+ "EventCode": "0xd2",
+ "BriefDescription": "Retired Conditional Branch Instructions Mispredicted."
+ },
+ {
+ "EventName": "ex_div_busy",
+ "EventCode": "0xd3",
+ "BriefDescription": "Div Cycles Busy count."
+ },
+ {
+ "EventName": "ex_div_count",
+ "EventCode": "0xd4",
+ "BriefDescription": "Div Op Count."
+ },
+ {
+ "EventName": "ex_tagged_ibs_ops.ibs_count_rollover",
+ "EventCode": "0x1cf",
+ "BriefDescription": "Number of times an op could not be tagged by IBS because of a previous tagged op that has not retired.",
+ "PublicDescription": "Tagged IBS Ops. Number of times an op could not be tagged by IBS because of a previous tagged op that has not retired.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "ex_tagged_ibs_ops.ibs_tagged_ops_ret",
+ "EventCode": "0x1cf",
+ "BriefDescription": "Number of Ops tagged by IBS that retired.",
+ "PublicDescription": "Tagged IBS Ops. Number of Ops tagged by IBS that retired.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ex_tagged_ibs_ops.ibs_tagged_ops",
+ "EventCode": "0x1cf",
+ "BriefDescription": "Number of Ops tagged by IBS.",
+ "PublicDescription": "Tagged IBS Ops. Number of Ops tagged by IBS.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ex_ret_fus_brnch_inst",
+ "EventCode": "0x1d0",
+ "BriefDescription": "The number of fused retired branch instructions retired per cycle. The number of events logged per cycle can vary from 0 to 3."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/floating-point.json b/tools/perf/pmu-events/arch/x86/amdfam17h/floating-point.json
new file mode 100644
index 000000000000..ea4711983d1d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdfam17h/floating-point.json
@@ -0,0 +1,168 @@
+[
+ {
+ "EventName": "fpu_pipe_assignment.dual",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number multi-pipe uOps.",
+ "PublicDescription": "The number of operations (uOps) and dual-pipe uOps dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number multi-pipe uOps assigned to Pipe 3.",
+ "UMask": "0xf0"
+ },
+ {
+ "EventName": "fpu_pipe_assignment.total",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number uOps.",
+ "PublicDescription": "The number of operations (uOps) and dual-pipe uOps dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to Pipe 3.",
+ "UMask": "0xf"
+ },
+ {
+ "EventName": "fp_sched_empty",
+ "EventCode": "0x01",
+ "BriefDescription": "This is a speculative event. The number of cycles in which the FPU scheduler is empty. Note that some Ops like FP loads bypass the scheduler."
+ },
+ {
+ "EventName": "fp_retx87_fp_ops.all",
+ "EventCode": "0x02",
+ "BriefDescription": "All Ops.",
+ "PublicDescription": "The number of x87 floating-point Ops that have retired. The number of events logged per cycle can vary from 0 to 8.",
+ "UMask": "0x7"
+ },
+ {
+ "EventName": "fp_retx87_fp_ops.div_sqr_r_ops",
+ "EventCode": "0x02",
+ "BriefDescription": "Divide and square root Ops.",
+ "PublicDescription": "The number of x87 floating-point Ops that have retired. The number of events logged per cycle can vary from 0 to 8. Divide and square root Ops.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "fp_retx87_fp_ops.mul_ops",
+ "EventCode": "0x02",
+ "BriefDescription": "Multiply Ops.",
+ "PublicDescription": "The number of x87 floating-point Ops that have retired. The number of events logged per cycle can vary from 0 to 8. Multiply Ops.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "fp_retx87_fp_ops.add_sub_ops",
+ "EventCode": "0x02",
+ "BriefDescription": "Add/subtract Ops.",
+ "PublicDescription": "The number of x87 floating-point Ops that have retired. The number of events logged per cycle can vary from 0 to 8. Add/subtract Ops.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.all",
+ "EventCode": "0x03",
+ "BriefDescription": "All FLOPS.",
+ "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.dp_mult_add_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Double precision multiply-add FLOPS. Multiply-add counts as 2 FLOPS.",
+ "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Double precision multiply-add FLOPS. Multiply-add counts as 2 FLOPS.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.dp_div_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Double precision divide/square root FLOPS.",
+ "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Double precision divide/square root FLOPS.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.dp_mult_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Double precision multiply FLOPS.",
+ "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Double precision multiply FLOPS.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.dp_add_sub_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Double precision add/subtract FLOPS.",
+ "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Double precision add/subtract FLOPS.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.sp_mult_add_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Single precision multiply-add FLOPS. Multiply-add counts as 2 FLOPS.",
+ "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Single precision multiply-add FLOPS. Multiply-add counts as 2 FLOPS.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.sp_div_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Single-precision divide/square root FLOPS.",
+ "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Single-precision divide/square root FLOPS.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.sp_mult_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Single-precision multiply FLOPS.",
+ "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Single-precision multiply FLOPS.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.sp_add_sub_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Single-precision add/subtract FLOPS.",
+ "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Single-precision add/subtract FLOPS.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "fp_num_mov_elim_scal_op.optimized",
+ "EventCode": "0x04",
+ "BriefDescription": "Number of Scalar Ops optimized.",
+ "PublicDescription": "This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes. Number of Scalar Ops optimized.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "fp_num_mov_elim_scal_op.opt_potential",
+ "EventCode": "0x04",
+ "BriefDescription": "Number of Ops that are candidates for optimization (have Z-bit either set or pass).",
+ "PublicDescription": "This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes. Number of Ops that are candidates for optimization (have Z-bit either set or pass).",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "fp_num_mov_elim_scal_op.sse_mov_ops_elim",
+ "EventCode": "0x04",
+ "BriefDescription": "Number of SSE Move Ops eliminated.",
+ "PublicDescription": "This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes. Number of SSE Move Ops eliminated.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "fp_num_mov_elim_scal_op.sse_mov_ops",
+ "EventCode": "0x04",
+ "BriefDescription": "Number of SSE Move Ops.",
+ "PublicDescription": "This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes. Number of SSE Move Ops.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.x87_ctrl_ret",
+ "EventCode": "0x05",
+ "BriefDescription": "x87 control word mispredict traps due to mispredictions in RC or PC, or changes in mask bits.",
+ "PublicDescription": "The number of serializing Ops retired. x87 control word mispredict traps due to mispredictions in RC or PC, or changes in mask bits.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.x87_bot_ret",
+ "EventCode": "0x05",
+ "BriefDescription": "x87 bottom-executing uOps retired.",
+ "PublicDescription": "The number of serializing Ops retired. x87 bottom-executing uOps retired.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.sse_ctrl_ret",
+ "EventCode": "0x05",
+ "BriefDescription": "SSE control word mispredict traps due to mispredictions in RC, FTZ or DAZ, or changes in mask bits.",
+ "PublicDescription": "The number of serializing Ops retired. SSE control word mispredict traps due to mispredictions in RC, FTZ or DAZ, or changes in mask bits.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.sse_bot_ret",
+ "EventCode": "0x05",
+ "BriefDescription": "SSE bottom-executing uOps retired.",
+ "PublicDescription": "The number of serializing Ops retired. SSE bottom-executing uOps retired.",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/memory.json b/tools/perf/pmu-events/arch/x86/amdfam17h/memory.json
new file mode 100644
index 000000000000..fa2d60d4def0
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdfam17h/memory.json
@@ -0,0 +1,162 @@
+[
+ {
+ "EventName": "ls_locks.bus_lock",
+ "EventCode": "0x25",
+ "BriefDescription": "Bus lock when a locked operations crosses a cache boundary or is done on an uncacheable memory type.",
+ "PublicDescription": "Bus lock when a locked operations crosses a cache boundary or is done on an uncacheable memory type.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ls_dispatch.ld_st_dispatch",
+ "EventCode": "0x29",
+ "BriefDescription": "Load-op-Stores.",
+ "PublicDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed. Load-op-Stores.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "ls_dispatch.store_dispatch",
+ "EventCode": "0x29",
+ "BriefDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+ "PublicDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ls_dispatch.ld_dispatch",
+ "EventCode": "0x29",
+ "BriefDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+ "PublicDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ls_stlf",
+ "EventCode": "0x35",
+ "BriefDescription": "Number of STLF hits."
+ },
+ {
+ "EventName": "ls_dc_accesses",
+ "EventCode": "0x40",
+ "BriefDescription": "The number of accesses to the data cache for load and store references. This may include certain microcode scratchpad accesses, although these are generally rare. Each increment represents an eight-byte access, although the instruction may only be accessing a portion of that. This event is a speculative event."
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.all",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss or Reload off all sizes.",
+ "PublicDescription": "L1 DTLB Miss or Reload off all sizes.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss of a page of 1G size.",
+ "PublicDescription": "L1 DTLB Miss of a page of 1G size.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss of a page of 2M size.",
+ "PublicDescription": "L1 DTLB Miss of a page of 2M size.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_32k_l2_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss of a page of 32K size.",
+ "PublicDescription": "L1 DTLB Miss of a page of 32K size.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss of a page of 4K size.",
+ "PublicDescription": "L1 DTLB Miss of a page of 4K size.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_hit",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Reload of a page of 1G size.",
+ "PublicDescription": "L1 DTLB Reload of a page of 1G size.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_hit",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Reload of a page of 2M size.",
+ "PublicDescription": "L1 DTLB Reload of a page of 2M size.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_32k_l2_hit",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Reload of a page of 32K size.",
+ "PublicDescription": "L1 DTLB Reload of a page of 32K size.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_hit",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Reload of a page of 4K size.",
+ "PublicDescription": "L1 DTLB Reload of a page of 4K size.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ls_tablewalker.perf_mon_tablewalk_alloc_iside",
+ "EventCode": "0x46",
+ "BriefDescription": "Tablewalker allocation.",
+ "PublicDescription": "Tablewalker allocation.",
+ "UMask": "0xc"
+ },
+ {
+ "EventName": "ls_tablewalker.perf_mon_tablewalk_alloc_dside",
+ "EventCode": "0x46",
+ "BriefDescription": "Tablewalker allocation.",
+ "PublicDescription": "Tablewalker allocation.",
+ "UMask": "0x3"
+ },
+ {
+ "EventName": "ls_misal_accesses",
+ "EventCode": "0x47",
+ "BriefDescription": "Misaligned loads."
+ },
+ {
+ "EventName": "ls_pref_instr_disp.prefetch_nta",
+ "EventCode": "0x4b",
+ "BriefDescription": "Software Prefetch Instructions (PREFETCHNTA instruction) Dispatched.",
+ "PublicDescription": "Software Prefetch Instructions (PREFETCHNTA instruction) Dispatched.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "ls_pref_instr_disp.store_prefetch_w",
+ "EventCode": "0x4b",
+ "BriefDescription": "Software Prefetch Instructions (3DNow PREFETCHW instruction) Dispatched.",
+ "PublicDescription": "Software Prefetch Instructions (3DNow PREFETCHW instruction) Dispatched.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ls_pref_instr_disp.load_prefetch_w",
+ "EventCode": "0x4b",
+ "BriefDescription": "Prefetch, Prefetch_T0_T1_T2.",
+ "PublicDescription": "Software Prefetch Instructions Dispatched. Prefetch, Prefetch_T0_T1_T2.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ls_inef_sw_pref.mab_mch_cnt",
+ "EventCode": "0x52",
+ "BriefDescription": "The number of software prefetches that did not fetch data outside of the processor core.",
+ "PublicDescription": "The number of software prefetches that did not fetch data outside of the processor core.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ls_inef_sw_pref.data_pipe_sw_pf_dc_hit",
+ "EventCode": "0x52",
+ "BriefDescription": "The number of software prefetches that did not fetch data outside of the processor core.",
+ "PublicDescription": "The number of software prefetches that did not fetch data outside of the processor core.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ls_not_halted_cyc",
+ "EventCode": "0x76",
+ "BriefDescription": "Cycles not in Halt."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/other.json b/tools/perf/pmu-events/arch/x86/amdfam17h/other.json
new file mode 100644
index 000000000000..b26a00d05a2e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdfam17h/other.json
@@ -0,0 +1,65 @@
+[
+ {
+ "EventName": "ic_oc_mode_switch.oc_ic_mode_switch",
+ "EventCode": "0x28a",
+ "BriefDescription": "OC to IC mode switch.",
+ "PublicDescription": "OC Mode Switch. OC to IC mode switch.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ic_oc_mode_switch.ic_oc_mode_switch",
+ "EventCode": "0x28a",
+ "BriefDescription": "IC to OC mode switch.",
+ "PublicDescription": "OC Mode Switch. IC to OC mode switch.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.retire_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "RETIRE Tokens unavailable.",
+ "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. RETIRE Tokens unavailable.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.agsq_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "AGSQ Tokens unavailable.",
+ "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. AGSQ Tokens unavailable.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.alu_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "ALU tokens total unavailable.",
+ "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALU tokens total unavailable.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.alsq3_0_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall.",
+ "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.alsq3_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "ALSQ 3 Tokens unavailable.",
+ "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 3 Tokens unavailable.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.alsq2_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "ALSQ 2 Tokens unavailable.",
+ "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 2 Tokens unavailable.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.alsq1_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "ALSQ 1 Tokens unavailable.",
+ "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 1 Tokens unavailable.",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/frontend.json b/tools/perf/pmu-events/arch/x86/bonnell/frontend.json
index 935b7dcf067d..ef69540ab61d 100644
--- a/tools/perf/pmu-events/arch/x86/bonnell/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/bonnell/frontend.json
@@ -77,7 +77,7 @@
"UMask": "0x1",
"EventName": "UOPS.MS_CYCLES",
"SampleAfterValue": "2000000",
- "BriefDescription": "This event counts the cycles where 1 or more uops are issued by the micro-sequencer (MS), including microcode assists and inserted flows, and written to the IQ. ",
+ "BriefDescription": "This event counts the cycles where 1 or more uops are issued by the micro-sequencer (MS), including microcode assists and inserted flows, and written to the IQ.",
"CounterMask": "1"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/pipeline.json b/tools/perf/pmu-events/arch/x86/bonnell/pipeline.json
index b2e681c78466..09c6de13de20 100644
--- a/tools/perf/pmu-events/arch/x86/bonnell/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/bonnell/pipeline.json
@@ -189,7 +189,7 @@
"UMask": "0x8",
"EventName": "BR_MISSP_TYPE_RETIRED.IND_CALL",
"SampleAfterValue": "200000",
- "BriefDescription": "Mispredicted indirect calls, including both register and memory indirect. "
+ "BriefDescription": "Mispredicted indirect calls, including both register and memory indirect."
},
{
"EventCode": "0x89",
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json b/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json
index 00bfdb5c5acb..212b117a8ffb 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json
@@ -1,164 +1,352 @@
[
{
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Frontend_Bound"
+ },
+ {
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Frontend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Bad_Speculation"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Bad_Speculation_SMT"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Backend_Bound"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Backend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Retiring"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Retiring_SMT"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "BriefDescription": "Uops Per Instruction",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline",
+ "BriefDescription": "Uops Per Instruction",
+ "MetricGroup": "Pipeline;Retiring",
"MetricName": "UPI"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Instruction per taken branch",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "IpTB"
+ },
+ {
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Branch instructions per taken branch. ",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "BpTB"
+ },
+ {
"MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
- "MetricGroup": "Frontend",
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+ "MetricGroup": "PGO",
"MetricName": "IFetch_Line_Utilization"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
- "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricGroup": "DSB;Frontend_Bandwidth",
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
- "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
- "BriefDescription": "Total issue-pipeline slots",
- "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricExpr": "4 * cycles",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
- "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
+ "MetricGroup": "TopDownL1_SMT",
+ "MetricName": "SLOTS_SMT"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+ "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
+ "MetricGroup": "Instruction_Type;L1_Bound",
+ "MetricName": "IpL"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+ "BriefDescription": "Instructions per Store",
+ "MetricGroup": "Instruction_Type;Store_Bound",
+ "MetricName": "IpS"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Instructions per Branch",
+ "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+ "MetricName": "IpB"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "BriefDescription": "Instruction per (near) call",
+ "MetricGroup": "Branches",
+ "MetricName": "IpCall"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY",
+ "BriefDescription": "Total number of retired Instructions",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC_SMT"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / cycles",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS",
+ "MetricName": "FLOPc"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS_SMT",
+ "MetricName": "FLOPc_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP"
},
{
- "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
- "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - (( 14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7* ITLB_MISSES.WALK_COMPLETED )) ) / RS_EVENTS.EMPTY_END)",
- "MetricGroup": "Unknown_Branches",
- "MetricName": "BAClear_Cost"
+ "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
+ "MetricGroup": "Branch_Mispredicts",
+ "MetricName": "Branch_Misprediction_Cost"
},
{
+ "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
+ "MetricGroup": "Branch_Mispredicts_SMT",
+ "MetricName": "Branch_Misprediction_Cost_SMT"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+ "MetricGroup": "Branch_Mispredicts",
+ "MetricName": "IpMispredict"
+ },
+ {
+ "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
- "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
+ "MetricExpr": "( cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / cycles",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7*(DTLB_STORE_MISSES.WALK_COMPLETED+DTLB_LOAD_MISSES.WALK_COMPLETED+ITLB_MISSES.WALK_COMPLETED)) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
- "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "( cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricGroup": "TLB_SMT",
+ "MetricName": "Page_Walks_Utilization_SMT"
+ },
+ {
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L1MPKI"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI"
+ },
+ {
+ "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2HPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L3MPKI"
+ },
+ {
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "BriefDescription": "Average CPU Utilization",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
+ "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "DRAM_BW_Use"
+ },
+ {
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
- "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
- "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
- "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
- "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
- "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
- "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/cache.json b/tools/perf/pmu-events/arch/x86/broadwell/cache.json
index 0b080b0352d8..7938bf5689ab 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/cache.json
@@ -56,10 +56,10 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.",
+ "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x41",
+ "UMask": "0xc1",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"SampleAfterValue": "200003",
"BriefDescription": "Demand Data Read requests that hit L2 cache",
@@ -68,7 +68,7 @@
{
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x42",
+ "UMask": "0xc2",
"EventName": "L2_RQSTS.RFO_HIT",
"SampleAfterValue": "200003",
"BriefDescription": "RFO requests that hit L2 cache.",
@@ -77,7 +77,7 @@
{
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x44",
+ "UMask": "0xc4",
"EventName": "L2_RQSTS.CODE_RD_HIT",
"SampleAfterValue": "200003",
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
@@ -87,7 +87,7 @@
"PublicDescription": "This event counts the number of requests from the L2 hardware prefetchers that hit L2 cache. L3 prefetch new types.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x50",
+ "UMask": "0xd0",
"EventName": "L2_RQSTS.L2_PF_HIT",
"SampleAfterValue": "200003",
"BriefDescription": "L2 prefetch requests that hit L2 cache",
@@ -433,7 +433,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x41",
@@ -445,7 +445,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x42",
@@ -771,2628 +771,2628 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts demand data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010001 ",
+ "MSRValue": "0x0000010001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that have any response type.",
+ "BriefDescription": "Counts demand data reads have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020001 ",
+ "MSRValue": "0x0080020001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_NONE",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020001 ",
+ "MSRValue": "0x0100020001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020001 ",
+ "MSRValue": "0x0200020001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_MISS",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020001 ",
+ "MSRValue": "0x0400020001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020001 ",
+ "MSRValue": "0x1000020001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_HITM",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f80020001 ",
+ "MSRValue": "0x3F80020001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts demand data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803c0001 ",
+ "MSRValue": "0x00803C0001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003c0001 ",
+ "MSRValue": "0x01003C0001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts demand data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003c0001 ",
+ "MSRValue": "0x02003C0001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the L3 with a snoop miss response.",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0001 ",
+ "MSRValue": "0x04003C0001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0001 ",
+ "MSRValue": "0x10003C0001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts demand data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0001 ",
+ "MSRValue": "0x3F803C0001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the L3.",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs) that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010002 ",
+ "MSRValue": "0x0000010002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that have any response type.",
+ "BriefDescription": "Counts all demand data writes (RFOs) have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803c0002 ",
+ "MSRValue": "0x00803C0002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003c0002 ",
+ "MSRValue": "0x01003C0002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003c0002 ",
+ "MSRValue": "0x02003C0002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0002 ",
+ "MSRValue": "0x04003C0002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0002 ",
+ "MSRValue": "0x10003C0002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0002 ",
+ "MSRValue": "0x3F803C0002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3.",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010004 ",
+ "MSRValue": "0x0000010004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that have any response type.",
+ "BriefDescription": "Counts all demand code reads have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020004 ",
+ "MSRValue": "0x0080020004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_NONE",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020004 ",
+ "MSRValue": "0x0100020004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020004 ",
+ "MSRValue": "0x0200020004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_MISS",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020004 ",
+ "MSRValue": "0x0400020004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020004 ",
+ "MSRValue": "0x1000020004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_HITM",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f80020004 ",
+ "MSRValue": "0x3F80020004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & ANY_SNOOP",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand code reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803c0004 ",
+ "MSRValue": "0x00803C0004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that hit in the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003c0004 ",
+ "MSRValue": "0x01003C0004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand code reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003c0004 ",
+ "MSRValue": "0x02003C0004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that hit in the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0004 ",
+ "MSRValue": "0x04003C0004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0004 ",
+ "MSRValue": "0x10003C0004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0004 ",
+ "MSRValue": "0x3F803C0004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that hit in the L3.",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive) that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive) have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010008 ",
+ "MSRValue": "0x0000010008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive) that have any response type.",
+ "BriefDescription": "Counts writebacks (modified to exclusive) have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020008 ",
+ "MSRValue": "0x0080020008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & SUPPLIER_NONE & SNOOP_NONE",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020008 ",
+ "MSRValue": "0x0100020008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020008 ",
+ "MSRValue": "0x0200020008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & SUPPLIER_NONE & SNOOP_MISS",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020008 ",
+ "MSRValue": "0x0400020008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020008 ",
+ "MSRValue": "0x1000020008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & SUPPLIER_NONE & SNOOP_HITM",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f80020008 ",
+ "MSRValue": "0x3F80020008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & SUPPLIER_NONE & ANY_SNOOP",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803c0008 ",
+ "MSRValue": "0x00803C0008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive) that hit in the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003c0008 ",
+ "MSRValue": "0x01003C0008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003c0008 ",
+ "MSRValue": "0x02003C0008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive) that hit in the L3 with a snoop miss response.",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0008 ",
+ "MSRValue": "0x04003C0008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0008 ",
+ "MSRValue": "0x10003C0008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0008 ",
+ "MSRValue": "0x3F803C0008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive) that hit in the L3.",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010010 ",
+ "MSRValue": "0x0000010010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that have any response type.",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020010 ",
+ "MSRValue": "0x0080020010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & SNOOP_NONE",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020010 ",
+ "MSRValue": "0x0100020010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020010 ",
+ "MSRValue": "0x0200020010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & SNOOP_MISS",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020010 ",
+ "MSRValue": "0x0400020010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020010 ",
+ "MSRValue": "0x1000020010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & SNOOP_HITM",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f80020010 ",
+ "MSRValue": "0x3F80020010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803c0010 ",
+ "MSRValue": "0x00803C0010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003c0010 ",
+ "MSRValue": "0x01003C0010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003c0010 ",
+ "MSRValue": "0x02003C0010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 with a snoop miss response.",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0010 ",
+ "MSRValue": "0x04003C0010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0010 ",
+ "MSRValue": "0x10003C0010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0010 ",
+ "MSRValue": "0x3F803C0010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3.",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010020 ",
+ "MSRValue": "0x0000010020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that have any response type.",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020020 ",
+ "MSRValue": "0x0080020020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & SNOOP_NONE",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020020 ",
+ "MSRValue": "0x0100020020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020020 ",
+ "MSRValue": "0x0200020020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & SNOOP_MISS",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020020 ",
+ "MSRValue": "0x0400020020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020020 ",
+ "MSRValue": "0x1000020020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f80020020 ",
+ "MSRValue": "0x3F80020020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & ANY_SNOOP",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803c0020 ",
+ "MSRValue": "0x00803C0020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003c0020 ",
+ "MSRValue": "0x01003C0020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003c0020 ",
+ "MSRValue": "0x02003C0020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0020 ",
+ "MSRValue": "0x04003C0020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0020 ",
+ "MSRValue": "0x10003C0020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0020 ",
+ "MSRValue": "0x3F803C0020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3.",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010040 ",
+ "MSRValue": "0x0000010040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that have any response type.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020040 ",
+ "MSRValue": "0x0080020040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & SNOOP_NONE",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020040 ",
+ "MSRValue": "0x0100020040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020040 ",
+ "MSRValue": "0x0200020040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & SNOOP_MISS",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020040 ",
+ "MSRValue": "0x0400020040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020040 ",
+ "MSRValue": "0x1000020040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f80020040 ",
+ "MSRValue": "0x3F80020040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & ANY_SNOOP",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803c0040 ",
+ "MSRValue": "0x00803C0040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003c0040 ",
+ "MSRValue": "0x01003C0040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003c0040 ",
+ "MSRValue": "0x02003C0040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0040 ",
+ "MSRValue": "0x04003C0040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0040 ",
+ "MSRValue": "0x10003C0040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0040 ",
+ "MSRValue": "0x3F803C0040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010080 ",
+ "MSRValue": "0x0000010080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that have any response type.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020080 ",
+ "MSRValue": "0x0080020080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_NONE",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020080 ",
+ "MSRValue": "0x0100020080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020080 ",
+ "MSRValue": "0x0200020080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_MISS",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020080 ",
+ "MSRValue": "0x0400020080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020080 ",
+ "MSRValue": "0x1000020080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f80020080 ",
+ "MSRValue": "0x3F80020080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803c0080 ",
+ "MSRValue": "0x00803C0080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003c0080 ",
+ "MSRValue": "0x01003C0080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003c0080 ",
+ "MSRValue": "0x02003C0080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0080 ",
+ "MSRValue": "0x04003C0080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0080 ",
+ "MSRValue": "0x10003C0080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0080 ",
+ "MSRValue": "0x3F803C0080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010100 ",
+ "MSRValue": "0x0000010100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that have any response type.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020100 ",
+ "MSRValue": "0x0080020100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_NONE",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020100 ",
+ "MSRValue": "0x0100020100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020100 ",
+ "MSRValue": "0x0200020100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_MISS",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020100 ",
+ "MSRValue": "0x0400020100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020100 ",
+ "MSRValue": "0x1000020100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f80020100 ",
+ "MSRValue": "0x3F80020100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & ANY_SNOOP",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803c0100 ",
+ "MSRValue": "0x00803C0100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003c0100 ",
+ "MSRValue": "0x01003C0100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003c0100 ",
+ "MSRValue": "0x02003C0100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0100 ",
+ "MSRValue": "0x04003C0100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0100 ",
+ "MSRValue": "0x10003C0100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0100 ",
+ "MSRValue": "0x3F803C0100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010200 ",
+ "MSRValue": "0x0000010200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that have any response type.",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020200 ",
+ "MSRValue": "0x0080020200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & SNOOP_NONE",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020200 ",
+ "MSRValue": "0x0100020200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020200 ",
+ "MSRValue": "0x0200020200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & SNOOP_MISS",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020200 ",
+ "MSRValue": "0x0400020200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020200 ",
+ "MSRValue": "0x1000020200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & SNOOP_HITM",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f80020200 ",
+ "MSRValue": "0x3F80020200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & ANY_SNOOP",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803c0200 ",
+ "MSRValue": "0x00803C0200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003c0200 ",
+ "MSRValue": "0x01003C0200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003c0200 ",
+ "MSRValue": "0x02003C0200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 with a snoop miss response.",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0200 ",
+ "MSRValue": "0x04003C0200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0200 ",
+ "MSRValue": "0x10003C0200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0200 ",
+ "MSRValue": "0x3F803C0200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3.",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts any other requests that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000018000 ",
+ "MSRValue": "0x0000018000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests that have any response type.",
+ "BriefDescription": "Counts any other requests have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080028000 ",
+ "MSRValue": "0x0080028000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_NONE",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100028000 ",
+ "MSRValue": "0x0100028000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200028000 ",
+ "MSRValue": "0x0200028000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_MISS",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400028000 ",
+ "MSRValue": "0x0400028000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000028000 ",
+ "MSRValue": "0x1000028000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_HITM",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f80028000 ",
+ "MSRValue": "0x3F80028000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & ANY_SNOOP",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts any other requests that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803c8000 ",
+ "MSRValue": "0x00803C8000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests that hit in the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts any other requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003c8000 ",
+ "MSRValue": "0x01003C8000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts any other requests that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003c8000 ",
+ "MSRValue": "0x02003C8000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests that hit in the L3 with a snoop miss response.",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts any other requests that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c8000 ",
+ "MSRValue": "0x04003C8000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c8000 ",
+ "MSRValue": "0x10003C8000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts any other requests that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c8000 ",
+ "MSRValue": "0x3F803C8000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests that hit in the L3.",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010090 ",
+ "MSRValue": "0x0000010090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads that have any response type.",
+ "BriefDescription": "Counts all prefetch data reads have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020090 ",
+ "MSRValue": "0x0080020090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_NONE",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020090 ",
+ "MSRValue": "0x0100020090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020090 ",
+ "MSRValue": "0x0200020090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_MISS",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020090 ",
+ "MSRValue": "0x0400020090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020090 ",
+ "MSRValue": "0x1000020090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f80020090 ",
+ "MSRValue": "0x3F80020090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803c0090 ",
+ "MSRValue": "0x00803C0090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads that hit in the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003c0090 ",
+ "MSRValue": "0x01003C0090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003c0090 ",
+ "MSRValue": "0x02003C0090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads that hit in the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0090 ",
+ "MSRValue": "0x04003C0090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0090 ",
+ "MSRValue": "0x10003C0090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0090 ",
+ "MSRValue": "0x3F803C0090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads that hit in the L3.",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010120 ",
+ "MSRValue": "0x0000010120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs that have any response type.",
+ "BriefDescription": "Counts prefetch RFOs have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020120 ",
+ "MSRValue": "0x0080020120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_NONE",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020120 ",
+ "MSRValue": "0x0100020120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020120 ",
+ "MSRValue": "0x0200020120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_MISS",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020120 ",
+ "MSRValue": "0x0400020120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020120 ",
+ "MSRValue": "0x1000020120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_HITM",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f80020120 ",
+ "MSRValue": "0x3F80020120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & ANY_SNOOP",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch RFOs that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803c0120 ",
+ "MSRValue": "0x00803C0120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs that hit in the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003c0120 ",
+ "MSRValue": "0x01003C0120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch RFOs that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003c0120 ",
+ "MSRValue": "0x02003C0120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs that hit in the L3 with a snoop miss response.",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0120 ",
+ "MSRValue": "0x04003C0120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0120 ",
+ "MSRValue": "0x10003C0120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0120 ",
+ "MSRValue": "0x3F803C0120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs that hit in the L3.",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010240 ",
+ "MSRValue": "0x0000010240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads that have any response type.",
+ "BriefDescription": "Counts all prefetch code reads have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020240 ",
+ "MSRValue": "0x0080020240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & SNOOP_NONE",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020240 ",
+ "MSRValue": "0x0100020240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020240 ",
+ "MSRValue": "0x0200020240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & SNOOP_MISS",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020240 ",
+ "MSRValue": "0x0400020240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020240 ",
+ "MSRValue": "0x1000020240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f80020240 ",
+ "MSRValue": "0x3F80020240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & ANY_SNOOP",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch code reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803c0240 ",
+ "MSRValue": "0x00803C0240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads that hit in the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003c0240 ",
+ "MSRValue": "0x01003C0240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch code reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003c0240 ",
+ "MSRValue": "0x02003C0240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads that hit in the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0240 ",
+ "MSRValue": "0x04003C0240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0240 ",
+ "MSRValue": "0x10003C0240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0240 ",
+ "MSRValue": "0x3F803C0240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads that hit in the L3.",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010091 ",
+ "MSRValue": "0x0000010091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads that have any response type.",
+ "BriefDescription": "Counts all demand & prefetch data reads have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020091 ",
+ "MSRValue": "0x0080020091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_NONE",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020091 ",
+ "MSRValue": "0x0100020091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020091 ",
+ "MSRValue": "0x0200020091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_MISS",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020091 ",
+ "MSRValue": "0x0400020091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020091 ",
+ "MSRValue": "0x1000020091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_HITM",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f80020091 ",
+ "MSRValue": "0x3F80020091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803c0091 ",
+ "MSRValue": "0x00803C0091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003c0091 ",
+ "MSRValue": "0x01003C0091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003c0091 ",
+ "MSRValue": "0x02003C0091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0091 ",
+ "MSRValue": "0x04003C0091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0091 ",
+ "MSRValue": "0x10003C0091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0091 ",
+ "MSRValue": "0x3F803C0091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3.",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010122 ",
+ "MSRValue": "0x0000010122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs that have any response type.",
+ "BriefDescription": "Counts all demand & prefetch RFOs have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020122 ",
+ "MSRValue": "0x0080020122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_NONE",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020122 ",
+ "MSRValue": "0x0100020122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020122 ",
+ "MSRValue": "0x0200020122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_MISS",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020122 ",
+ "MSRValue": "0x0400020122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020122 ",
+ "MSRValue": "0x1000020122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_HITM",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f80020122 ",
+ "MSRValue": "0x3F80020122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & SUPPLIER_NONE & ANY_SNOOP",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803c0122 ",
+ "MSRValue": "0x00803C0122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003c0122 ",
+ "MSRValue": "0x01003C0122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003c0122 ",
+ "MSRValue": "0x02003C0122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0122 ",
+ "MSRValue": "0x04003C0122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0122 ",
+ "MSRValue": "0x10003C0122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0122 ",
+ "MSRValue": "0x3F803C0122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3.",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json b/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json
index 689d478dae93..15291239c128 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json
@@ -1,24 +1,26 @@
[
{
- "PublicDescription": "This event counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
"EventCode": "0xC1",
"Counter": "0,1,2,3",
"UMask": "0x8",
"Errata": "BDM30",
"EventName": "OTHER_ASSISTS.AVX_TO_SSE",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+ "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable (Precise Event)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
"EventCode": "0xC1",
"Counter": "0,1,2,3",
"UMask": "0x10",
"Errata": "BDM30",
"EventName": "OTHER_ASSISTS.SSE_TO_AVX",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+ "BriefDescription": "Number of transitions from legacy SSE to AVX-256 when penalty applicable (Precise Event)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -45,7 +47,7 @@
"UMask": "0x3",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. (RSQRT for single precision?)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -54,7 +56,7 @@
"UMask": "0x4",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired. Each count represents 2 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired. Each count represents 2 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"CounterHTOff": "0,1,2,3"
},
{
@@ -63,7 +65,7 @@
"UMask": "0x8",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"CounterHTOff": "0,1,2,3"
},
{
@@ -72,7 +74,7 @@
"UMask": "0x10",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"CounterHTOff": "0,1,2,3"
},
{
@@ -81,7 +83,7 @@
"UMask": "0x15",
"EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
"SampleAfterValue": "2000006",
- "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+ "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"CounterHTOff": "0,1,2,3"
},
{
@@ -90,7 +92,7 @@
"UMask": "0x20",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"CounterHTOff": "0,1,2,3"
},
{
@@ -99,7 +101,7 @@
"UMask": "0x2a",
"EventName": "FP_ARITH_INST_RETIRED.SINGLE",
"SampleAfterValue": "2000005",
- "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+ "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"CounterHTOff": "0,1,2,3"
},
{
@@ -108,57 +110,62 @@
"UMask": "0x3c",
"EventName": "FP_ARITH_INST_RETIRED.PACKED",
"SampleAfterValue": "2000004",
- "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. (RSQRT for single-precision?)",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "This event counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "FP_ASSIST.X87_OUTPUT",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of X87 assists due to output value.",
+ "BriefDescription": "output - Numeric Overflow, Numeric Underflow, Inexact Result (Precise Event)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "FP_ASSIST.X87_INPUT",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of X87 assists due to input value.",
+ "BriefDescription": "input - Invalid Operation, Denormal Operand, SNaN Operand (Precise Event)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "FP_ASSIST.SIMD_OUTPUT",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of SIMD FP assists due to Output values",
+ "BriefDescription": "SSE* FP micro-code assist when output value is invalid. (Precise Event)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts any input SSE* floating-point (FP) assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x10",
"EventName": "FP_ASSIST.SIMD_INPUT",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of SIMD FP assists due to input values",
+ "BriefDescription": "Any input SSE* FP Assist - (Precise Event)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.",
+ "PEBS": "1",
+ "PublicDescription": "This event counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1. Uses PEBS.",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x1e",
"EventName": "FP_ASSIST.ANY",
"SampleAfterValue": "100003",
- "BriefDescription": "Cycles with any input/output SSE or FP assist",
+ "BriefDescription": "Counts any FP_ASSIST umask was incrementing (Precise Event)",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/frontend.json b/tools/perf/pmu-events/arch/x86/broadwell/frontend.json
index 7142c76d7f11..aa4a5d762f21 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/frontend.json
@@ -211,7 +211,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
+ "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding \u201c4 \u2013 x\u201d when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
"EventCode": "0x9C",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -274,7 +274,7 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
+ "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0\u20132 cycles.",
"EventCode": "0xAB",
"Counter": "0,1,2,3",
"UMask": "0x2",
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/memory.json b/tools/perf/pmu-events/arch/x86/broadwell/memory.json
index c9154cebbdf0..b6b5247d3d5a 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/memory.json
@@ -311,7 +311,7 @@
},
{
"PEBS": "2",
- "PublicDescription": "This event counts loads with latency value being above four.",
+ "PublicDescription": "Counts randomly selected loads with latency value being above four.",
"EventCode": "0xCD",
"MSRValue": "0x4",
"Counter": "3",
@@ -320,13 +320,13 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
"SampleAfterValue": "100003",
- "BriefDescription": "Loads with latency value being above 4",
+ "BriefDescription": "Randomly selected loads with latency value being above 4",
"TakenAlone": "1",
"CounterHTOff": "3"
},
{
"PEBS": "2",
- "PublicDescription": "This event counts loads with latency value being above eight.",
+ "PublicDescription": "Counts randomly selected loads with latency value being above eight.",
"EventCode": "0xCD",
"MSRValue": "0x8",
"Counter": "3",
@@ -335,13 +335,13 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
"SampleAfterValue": "50021",
- "BriefDescription": "Loads with latency value being above 8",
+ "BriefDescription": "Randomly selected loads with latency value being above 8",
"TakenAlone": "1",
"CounterHTOff": "3"
},
{
"PEBS": "2",
- "PublicDescription": "This event counts loads with latency value being above 16.",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 16.",
"EventCode": "0xCD",
"MSRValue": "0x10",
"Counter": "3",
@@ -350,13 +350,13 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
"MSRIndex": "0x3F6",
"SampleAfterValue": "20011",
- "BriefDescription": "Loads with latency value being above 16",
+ "BriefDescription": "Randomly selected loads with latency value being above 16",
"TakenAlone": "1",
"CounterHTOff": "3"
},
{
"PEBS": "2",
- "PublicDescription": "This event counts loads with latency value being above 32.",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 32.",
"EventCode": "0xCD",
"MSRValue": "0x20",
"Counter": "3",
@@ -365,13 +365,13 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
"SampleAfterValue": "100007",
- "BriefDescription": "Loads with latency value being above 32",
+ "BriefDescription": "Randomly selected loads with latency value being above 32",
"TakenAlone": "1",
"CounterHTOff": "3"
},
{
"PEBS": "2",
- "PublicDescription": "This event counts loads with latency value being above 64.",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 64.",
"EventCode": "0xCD",
"MSRValue": "0x40",
"Counter": "3",
@@ -380,13 +380,13 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
"SampleAfterValue": "2003",
- "BriefDescription": "Loads with latency value being above 64",
+ "BriefDescription": "Randomly selected loads with latency value being above 64",
"TakenAlone": "1",
"CounterHTOff": "3"
},
{
"PEBS": "2",
- "PublicDescription": "This event counts loads with latency value being above 128.",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 128.",
"EventCode": "0xCD",
"MSRValue": "0x80",
"Counter": "3",
@@ -395,13 +395,13 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
"MSRIndex": "0x3F6",
"SampleAfterValue": "1009",
- "BriefDescription": "Loads with latency value being above 128",
+ "BriefDescription": "Randomly selected loads with latency value being above 128",
"TakenAlone": "1",
"CounterHTOff": "3"
},
{
"PEBS": "2",
- "PublicDescription": "This event counts loads with latency value being above 256.",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 256.",
"EventCode": "0xCD",
"MSRValue": "0x100",
"Counter": "3",
@@ -410,13 +410,13 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
"MSRIndex": "0x3F6",
"SampleAfterValue": "503",
- "BriefDescription": "Loads with latency value being above 256",
+ "BriefDescription": "Randomly selected loads with latency value being above 256",
"TakenAlone": "1",
"CounterHTOff": "3"
},
{
"PEBS": "2",
- "PublicDescription": "This event counts loads with latency value being above 512.",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 512.",
"EventCode": "0xCD",
"MSRValue": "0x200",
"Counter": "3",
@@ -425,2620 +425,2620 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
"SampleAfterValue": "101",
- "BriefDescription": "Loads with latency value being above 512",
+ "BriefDescription": "Randomly selected loads with latency value being above 512",
"TakenAlone": "1",
"CounterHTOff": "3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020001 ",
+ "MSRValue": "0x2000020001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts demand data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003c0001 ",
+ "MSRValue": "0x20003C0001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the L3 and the target was non-DRAM system address.",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000001 ",
+ "MSRValue": "0x0084000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000001 ",
+ "MSRValue": "0x0104000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000001 ",
+ "MSRValue": "0x0204000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000001 ",
+ "MSRValue": "0x0404000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000001 ",
+ "MSRValue": "0x1004000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000001 ",
+ "MSRValue": "0x2004000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f84000001 ",
+ "MSRValue": "0x3F84000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts demand data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000001 ",
+ "MSRValue": "0x00BC000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that miss the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000001 ",
+ "MSRValue": "0x013C000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts demand data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000001 ",
+ "MSRValue": "0x023C000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that miss the L3 with a snoop miss response.",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000001 ",
+ "MSRValue": "0x043C000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003c0002 ",
+ "MSRValue": "0x20003C0002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the target was non-DRAM system address.",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f84000002 ",
+ "MSRValue": "0x3F84000002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000002 ",
+ "MSRValue": "0x00BC000002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000002 ",
+ "MSRValue": "0x013C000002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000002 ",
+ "MSRValue": "0x023C000002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000002 ",
+ "MSRValue": "0x043C000002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020004 ",
+ "MSRValue": "0x2000020004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand code reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003c0004 ",
+ "MSRValue": "0x20003C0004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that hit in the L3 and the target was non-DRAM system address.",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000004 ",
+ "MSRValue": "0x0084000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000004 ",
+ "MSRValue": "0x0104000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000004 ",
+ "MSRValue": "0x0204000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000004 ",
+ "MSRValue": "0x0404000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000004 ",
+ "MSRValue": "0x1004000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000004 ",
+ "MSRValue": "0x2004000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f84000004 ",
+ "MSRValue": "0x3F84000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand code reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000004 ",
+ "MSRValue": "0x00BC000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that miss the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000004 ",
+ "MSRValue": "0x013C000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand code reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000004 ",
+ "MSRValue": "0x023C000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that miss the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000004 ",
+ "MSRValue": "0x043C000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020008 ",
+ "MSRValue": "0x2000020008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003c0008 ",
+ "MSRValue": "0x20003C0008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and the target was non-DRAM system address.",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000008 ",
+ "MSRValue": "0x0084000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000008 ",
+ "MSRValue": "0x0104000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000008 ",
+ "MSRValue": "0x0204000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000008 ",
+ "MSRValue": "0x0404000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000008 ",
+ "MSRValue": "0x1004000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000008 ",
+ "MSRValue": "0x2004000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f84000008 ",
+ "MSRValue": "0x3F84000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive) that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000008 ",
+ "MSRValue": "0x00BC000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive) that miss the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000008 ",
+ "MSRValue": "0x013C000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive) that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000008 ",
+ "MSRValue": "0x023C000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive) that miss the L3 with a snoop miss response.",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000008 ",
+ "MSRValue": "0x043C000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020010 ",
+ "MSRValue": "0x2000020010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003c0010 ",
+ "MSRValue": "0x20003C0010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the target was non-DRAM system address.",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000010 ",
+ "MSRValue": "0x0084000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000010 ",
+ "MSRValue": "0x0104000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000010 ",
+ "MSRValue": "0x0204000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000010 ",
+ "MSRValue": "0x0404000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000010 ",
+ "MSRValue": "0x1004000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000010 ",
+ "MSRValue": "0x2004000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f84000010 ",
+ "MSRValue": "0x3F84000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000010 ",
+ "MSRValue": "0x00BC000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000010 ",
+ "MSRValue": "0x013C000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000010 ",
+ "MSRValue": "0x023C000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 with a snoop miss response.",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000010 ",
+ "MSRValue": "0x043C000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020020 ",
+ "MSRValue": "0x2000020020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003c0020 ",
+ "MSRValue": "0x20003C0020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the target was non-DRAM system address.",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000020 ",
+ "MSRValue": "0x0084000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000020 ",
+ "MSRValue": "0x0104000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000020 ",
+ "MSRValue": "0x0204000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000020 ",
+ "MSRValue": "0x0404000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000020 ",
+ "MSRValue": "0x1004000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000020 ",
+ "MSRValue": "0x2004000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f84000020 ",
+ "MSRValue": "0x3F84000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000020 ",
+ "MSRValue": "0x00BC000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000020 ",
+ "MSRValue": "0x013C000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000020 ",
+ "MSRValue": "0x023C000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000020 ",
+ "MSRValue": "0x043C000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020040 ",
+ "MSRValue": "0x2000020040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003c0040 ",
+ "MSRValue": "0x20003C0040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and the target was non-DRAM system address.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000040 ",
+ "MSRValue": "0x0084000040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000040 ",
+ "MSRValue": "0x0104000040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000040 ",
+ "MSRValue": "0x0204000040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000040 ",
+ "MSRValue": "0x0404000040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000040 ",
+ "MSRValue": "0x1004000040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000040 ",
+ "MSRValue": "0x2004000040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f84000040 ",
+ "MSRValue": "0x3F84000040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000040 ",
+ "MSRValue": "0x00BC000040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000040 ",
+ "MSRValue": "0x013C000040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000040 ",
+ "MSRValue": "0x023C000040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000040 ",
+ "MSRValue": "0x043C000040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020080 ",
+ "MSRValue": "0x2000020080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003c0080 ",
+ "MSRValue": "0x20003C0080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the target was non-DRAM system address.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000080 ",
+ "MSRValue": "0x0084000080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000080 ",
+ "MSRValue": "0x0104000080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000080 ",
+ "MSRValue": "0x0204000080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000080 ",
+ "MSRValue": "0x0404000080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000080 ",
+ "MSRValue": "0x1004000080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000080 ",
+ "MSRValue": "0x2004000080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f84000080 ",
+ "MSRValue": "0x3F84000080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000080 ",
+ "MSRValue": "0x00BC000080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000080 ",
+ "MSRValue": "0x013C000080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000080 ",
+ "MSRValue": "0x023C000080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000080 ",
+ "MSRValue": "0x043C000080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020100 ",
+ "MSRValue": "0x2000020100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003c0100 ",
+ "MSRValue": "0x20003C0100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the target was non-DRAM system address.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000100 ",
+ "MSRValue": "0x0084000100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000100 ",
+ "MSRValue": "0x0104000100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000100 ",
+ "MSRValue": "0x0204000100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000100 ",
+ "MSRValue": "0x0404000100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000100 ",
+ "MSRValue": "0x1004000100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000100 ",
+ "MSRValue": "0x2004000100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f84000100 ",
+ "MSRValue": "0x3F84000100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000100 ",
+ "MSRValue": "0x00BC000100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000100 ",
+ "MSRValue": "0x013C000100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000100 ",
+ "MSRValue": "0x023C000100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000100 ",
+ "MSRValue": "0x043C000100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020200 ",
+ "MSRValue": "0x2000020200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003c0200 ",
+ "MSRValue": "0x20003C0200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and the target was non-DRAM system address.",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000200 ",
+ "MSRValue": "0x0084000200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000200 ",
+ "MSRValue": "0x0104000200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000200 ",
+ "MSRValue": "0x0204000200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000200 ",
+ "MSRValue": "0x0404000200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000200 ",
+ "MSRValue": "0x1004000200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000200 ",
+ "MSRValue": "0x2004000200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f84000200 ",
+ "MSRValue": "0x3F84000200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000200 ",
+ "MSRValue": "0x00BC000200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that miss the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000200 ",
+ "MSRValue": "0x013C000200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000200 ",
+ "MSRValue": "0x023C000200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that miss the L3 with a snoop miss response.",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000200 ",
+ "MSRValue": "0x043C000200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000028000 ",
+ "MSRValue": "0x2000028000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts any other requests that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003c8000 ",
+ "MSRValue": "0x20003C8000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests that hit in the L3 and the target was non-DRAM system address.",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084008000 ",
+ "MSRValue": "0x0084008000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104008000 ",
+ "MSRValue": "0x0104008000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204008000 ",
+ "MSRValue": "0x0204008000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404008000 ",
+ "MSRValue": "0x0404008000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004008000 ",
+ "MSRValue": "0x1004008000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004008000 ",
+ "MSRValue": "0x2004008000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f84008000 ",
+ "MSRValue": "0x3F84008000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts any other requests that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc008000 ",
+ "MSRValue": "0x00BC008000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests that miss the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c008000 ",
+ "MSRValue": "0x013C008000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts any other requests that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c008000 ",
+ "MSRValue": "0x023C008000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests that miss the L3 with a snoop miss response.",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c008000 ",
+ "MSRValue": "0x043C008000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020090 ",
+ "MSRValue": "0x2000020090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003c0090 ",
+ "MSRValue": "0x20003C0090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads that hit in the L3 and the target was non-DRAM system address.",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000090 ",
+ "MSRValue": "0x0084000090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000090 ",
+ "MSRValue": "0x0104000090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000090 ",
+ "MSRValue": "0x0204000090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000090 ",
+ "MSRValue": "0x0404000090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000090 ",
+ "MSRValue": "0x1004000090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000090 ",
+ "MSRValue": "0x2004000090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f84000090 ",
+ "MSRValue": "0x3F84000090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000090 ",
+ "MSRValue": "0x00BC000090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads that miss the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000090 ",
+ "MSRValue": "0x013C000090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000090 ",
+ "MSRValue": "0x023C000090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads that miss the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000090 ",
+ "MSRValue": "0x043C000090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020120 ",
+ "MSRValue": "0x2000020120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003c0120 ",
+ "MSRValue": "0x20003C0120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs that hit in the L3 and the target was non-DRAM system address.",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000120 ",
+ "MSRValue": "0x0084000120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000120 ",
+ "MSRValue": "0x0104000120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000120 ",
+ "MSRValue": "0x0204000120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000120 ",
+ "MSRValue": "0x0404000120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000120 ",
+ "MSRValue": "0x1004000120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000120 ",
+ "MSRValue": "0x2004000120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f84000120 ",
+ "MSRValue": "0x3F84000120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch RFOs that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000120 ",
+ "MSRValue": "0x00BC000120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs that miss the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000120 ",
+ "MSRValue": "0x013C000120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch RFOs that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000120 ",
+ "MSRValue": "0x023C000120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs that miss the L3 with a snoop miss response.",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000120 ",
+ "MSRValue": "0x043C000120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020240 ",
+ "MSRValue": "0x2000020240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch code reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003c0240 ",
+ "MSRValue": "0x20003C0240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads that hit in the L3 and the target was non-DRAM system address.",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000240 ",
+ "MSRValue": "0x0084000240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000240 ",
+ "MSRValue": "0x0104000240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000240 ",
+ "MSRValue": "0x0204000240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000240 ",
+ "MSRValue": "0x0404000240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000240 ",
+ "MSRValue": "0x1004000240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000240 ",
+ "MSRValue": "0x2004000240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f84000240 ",
+ "MSRValue": "0x3F84000240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch code reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000240 ",
+ "MSRValue": "0x00BC000240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads that miss the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000240 ",
+ "MSRValue": "0x013C000240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch code reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000240 ",
+ "MSRValue": "0x023C000240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads that miss the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000240 ",
+ "MSRValue": "0x043C000240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020091 ",
+ "MSRValue": "0x2000020091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003c0091 ",
+ "MSRValue": "0x20003C0091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the target was non-DRAM system address.",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000091 ",
+ "MSRValue": "0x0084000091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000091 ",
+ "MSRValue": "0x0104000091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000091 ",
+ "MSRValue": "0x0204000091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000091 ",
+ "MSRValue": "0x0404000091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000091 ",
+ "MSRValue": "0x1004000091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000091 ",
+ "MSRValue": "0x2004000091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f84000091 ",
+ "MSRValue": "0x3F84000091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000091 ",
+ "MSRValue": "0x00BC000091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000091 ",
+ "MSRValue": "0x013C000091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000091 ",
+ "MSRValue": "0x023C000091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000091 ",
+ "MSRValue": "0x043C000091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020122 ",
+ "MSRValue": "0x2000020122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003c0122 ",
+ "MSRValue": "0x20003C0122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the target was non-DRAM system address.",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000122 ",
+ "MSRValue": "0x0084000122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000122 ",
+ "MSRValue": "0x0104000122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000122 ",
+ "MSRValue": "0x0204000122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000122 ",
+ "MSRValue": "0x0404000122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000122 ",
+ "MSRValue": "0x1004000122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000122 ",
+ "MSRValue": "0x2004000122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f84000122 ",
+ "MSRValue": "0x3F84000122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000122 ",
+ "MSRValue": "0x00BC000122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000122 ",
+ "MSRValue": "0x013C000122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000122 ",
+ "MSRValue": "0x023C000122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000122 ",
+ "MSRValue": "0x043C000122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
index 999cf3066363..bb25574b8d21 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
@@ -1,7 +1,6 @@
[
{
"PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. \nNotes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. \nCounting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
- "EventCode": "0x00",
"Counter": "Fixed counter 0",
"UMask": "0x1",
"EventName": "INST_RETIRED.ANY",
@@ -11,7 +10,6 @@
},
{
"PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
- "EventCode": "0x00",
"Counter": "Fixed counter 1",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
@@ -20,7 +18,6 @@
"CounterHTOff": "Fixed counter 1"
},
{
- "EventCode": "0x00",
"Counter": "Fixed counter 1",
"UMask": "0x2",
"AnyThread": "1",
@@ -31,7 +28,6 @@
},
{
"PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. \nNote: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. This event is clocked by base clock (100 Mhz) on Sandy Bridge. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
- "EventCode": "0x00",
"Counter": "Fixed counter 2",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
@@ -317,7 +313,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts stalls occurred due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
+ "PublicDescription": "This event counts stalls occured due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
"EventCode": "0x87",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -786,8 +782,8 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts resource-related stall cycles. Reasons for stalls can be as follows:\n - *any* u-arch structure got full (LB, SB, RS, ROB, BOB, LM, Physical Register Reclaim Table (PRRT), or Physical History Table (PHT) slots)\n - *any* u-arch structure got empty (like INT/SIMD FreeLists)\n - FPU control word (FPCW), MXCSR\nand others. This counts cycles that the pipeline backend blocked uop delivery from the front end.",
- "EventCode": "0xA2",
+ "PublicDescription": "This event counts resource-related stall cycles.",
+ "EventCode": "0xa2",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "RESOURCE_STALLS.ANY",
@@ -973,6 +969,7 @@
"CounterHTOff": "2"
},
{
+ "PublicDescription": "Number of Uops delivered by the LSD.",
"EventCode": "0xA8",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -1147,7 +1144,8 @@
"CounterHTOff": "1"
},
{
- "PublicDescription": "This event counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
"EventCode": "0xC0",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -1157,12 +1155,12 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PEBS": "1",
"EventCode": "0xC1",
"Counter": "0,1,2,3",
"UMask": "0x40",
"EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -1178,26 +1176,28 @@
"Data_LA": "1"
},
{
- "PublicDescription": "This event counts cycles without actually retired uops.",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts cycles without actually retired uops.",
"EventCode": "0xC2",
"Invert": "1",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles without actually retired uops.",
+ "BriefDescription": "Cycles no executable uops retired (Precise Event)",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
+ "PEBS": "1",
+ "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to PEBS uops retired event.",
"EventCode": "0xC2",
"Invert": "1",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "BriefDescription": "Number of cycles using always true condition applied to PEBS uops retired event.",
"CounterMask": "10",
"CounterHTOff": "0,1,2,3"
},
@@ -1320,13 +1320,14 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts not taken branch instructions retired.",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts not taken branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x10",
"EventName": "BR_INST_RETIRED.NOT_TAKEN",
"SampleAfterValue": "400009",
- "BriefDescription": "Not taken branch instructions retired.",
+ "BriefDescription": "Counts all not taken macro branch instructions retired. (Precise Event)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -1341,14 +1342,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts far branch instructions retired.",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts far branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x40",
"Errata": "BDW98",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"SampleAfterValue": "100007",
- "BriefDescription": "Far branch instructions retired.",
+ "BriefDescription": "Counts the number of far branch instructions retired.(Precise Event)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/cache.json b/tools/perf/pmu-events/arch/x86/broadwellde/cache.json
index 4ad425312bdc..bf243fe2a0ec 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/cache.json
@@ -439,7 +439,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -451,7 +451,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"SampleAfterValue": "100003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
index 0d04bf9db000..e2f0540625a2 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
@@ -1,6 +1,5 @@
[
{
- "EventCode": "0x00",
"UMask": "0x1",
"BriefDescription": "Instructions retired from execution.",
"Counter": "Fixed counter 0",
@@ -10,7 +9,6 @@
"CounterHTOff": "Fixed counter 0"
},
{
- "EventCode": "0x00",
"UMask": "0x2",
"BriefDescription": "Core cycles when the thread is not in halt state",
"Counter": "Fixed counter 1",
@@ -20,7 +18,6 @@
"CounterHTOff": "Fixed counter 1"
},
{
- "EventCode": "0x00",
"UMask": "0x2",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
"Counter": "Fixed counter 1",
@@ -30,7 +27,6 @@
"CounterHTOff": "Fixed counter 1"
},
{
- "EventCode": "0x00",
"UMask": "0x3",
"BriefDescription": "Reference cycles when the core is not in halt state.",
"Counter": "Fixed counter 2",
@@ -322,7 +318,7 @@
"BriefDescription": "Stalls caused by changing prefix length of the instruction.",
"Counter": "0,1,2,3",
"EventName": "ILD_STALL.LCP",
- "PublicDescription": "This event counts stalls occurred due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
+ "PublicDescription": "This event counts stalls occured due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json b/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json
index 5a7f1ec24200..c6f9762f32c0 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json
@@ -1,164 +1,370 @@
[
{
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Frontend_Bound"
+ },
+ {
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Frontend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Bad_Speculation"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Bad_Speculation_SMT"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Backend_Bound"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Backend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Retiring"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Retiring_SMT"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "BriefDescription": "Uops Per Instruction",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline",
+ "BriefDescription": "Uops Per Instruction",
+ "MetricGroup": "Pipeline;Retiring",
"MetricName": "UPI"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Instruction per taken branch",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "IpTB"
+ },
+ {
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Branch instructions per taken branch. ",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "BpTB"
+ },
+ {
"MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
- "MetricGroup": "Frontend",
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+ "MetricGroup": "PGO",
"MetricName": "IFetch_Line_Utilization"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
- "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricGroup": "DSB;Frontend_Bandwidth",
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
- "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
- "BriefDescription": "Total issue-pipeline slots",
- "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricExpr": "4 * cycles",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
- "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
+ "MetricGroup": "TopDownL1_SMT",
+ "MetricName": "SLOTS_SMT"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+ "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
+ "MetricGroup": "Instruction_Type;L1_Bound",
+ "MetricName": "IpL"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+ "BriefDescription": "Instructions per Store",
+ "MetricGroup": "Instruction_Type;Store_Bound",
+ "MetricName": "IpS"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Instructions per Branch",
+ "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+ "MetricName": "IpB"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "BriefDescription": "Instruction per (near) call",
+ "MetricGroup": "Branches",
+ "MetricName": "IpCall"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY",
+ "BriefDescription": "Total number of retired Instructions",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC_SMT"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / cycles",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS",
+ "MetricName": "FLOPc"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS_SMT",
+ "MetricName": "FLOPc_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP"
},
{
- "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
- "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - (( 14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7* ITLB_MISSES.WALK_COMPLETED )) ) / RS_EVENTS.EMPTY_END)",
- "MetricGroup": "Unknown_Branches",
- "MetricName": "BAClear_Cost"
+ "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
+ "MetricGroup": "Branch_Mispredicts",
+ "MetricName": "Branch_Misprediction_Cost"
+ },
+ {
+ "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
+ "MetricGroup": "Branch_Mispredicts_SMT",
+ "MetricName": "Branch_Misprediction_Cost_SMT"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+ "MetricGroup": "Branch_Mispredicts",
+ "MetricName": "IpMispredict"
},
{
+ "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
- "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / ( 2 * cycles )",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION + 7*(DTLB_STORE_MISSES.WALK_COMPLETED+DTLB_LOAD_MISSES.WALK_COMPLETED+ITLB_MISSES.WALK_COMPLETED) ) / (2*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles))",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
- "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )",
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricGroup": "TLB_SMT",
+ "MetricName": "Page_Walks_Utilization_SMT"
+ },
+ {
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L1MPKI"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI"
+ },
+ {
+ "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2HPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L3MPKI"
+ },
+ {
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "BriefDescription": "Average CPU Utilization",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
+ "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "DRAM_BW_Use"
+ },
+ {
+ "MetricExpr": "1000000000 * ( cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x35\\,umask\\=0x3\\,filter_opc\\=0x182@ ) / ( cbox_0@event\\=0x0@ / duration_time )",
+ "BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
+ "MetricGroup": "Memory_Lat",
+ "MetricName": "DRAM_Read_Latency"
+ },
+ {
+ "MetricExpr": "cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182\\,thresh\\=1@",
+ "BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "DRAM_Parallel_Reads"
+ },
+ {
+ "MetricExpr": "cbox_0@event\\=0x0@",
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricGroup": "",
+ "MetricName": "Socket_CLKS"
+ },
+ {
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
- "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
- "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
- "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
- "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
- "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
- "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/cache.json b/tools/perf/pmu-events/arch/x86/broadwellx/cache.json
index 141b1080429d..75a3098d5775 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/cache.json
@@ -57,17 +57,17 @@
},
{
"EventCode": "0x24",
- "UMask": "0x41",
+ "UMask": "0xc1",
"BriefDescription": "Demand Data Read requests that hit L2 cache",
"Counter": "0,1,2,3",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
- "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.",
+ "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache.",
"SampleAfterValue": "200003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x24",
- "UMask": "0x42",
+ "UMask": "0xc2",
"BriefDescription": "RFO requests that hit L2 cache.",
"Counter": "0,1,2,3",
"EventName": "L2_RQSTS.RFO_HIT",
@@ -76,7 +76,7 @@
},
{
"EventCode": "0x24",
- "UMask": "0x44",
+ "UMask": "0xc4",
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
"Counter": "0,1,2,3",
"EventName": "L2_RQSTS.CODE_RD_HIT",
@@ -85,7 +85,7 @@
},
{
"EventCode": "0x24",
- "UMask": "0x50",
+ "UMask": "0xd0",
"BriefDescription": "L2 prefetch requests that hit L2 cache",
"Counter": "0,1,2,3",
"EventName": "L2_RQSTS.L2_PF_HIT",
@@ -396,24 +396,24 @@
{
"EventCode": "0xD0",
"UMask": "0x11",
- "BriefDescription": "Retired load uops that miss the STLB. (Precise Event - PEBS)",
+ "BriefDescription": "Retired load uops that miss the STLB.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "PublicDescription": "This event counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x12",
- "BriefDescription": "Retired store uops that miss the STLB. (Precise Event - PEBS)",
+ "BriefDescription": "Retired store uops that miss the STLB.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts store uops true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "PublicDescription": "This event counts store uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
"SampleAfterValue": "100003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -421,37 +421,37 @@
{
"EventCode": "0xD0",
"UMask": "0x21",
- "BriefDescription": "Retired load uops with locked access. (Precise Event - PEBS)",
+ "BriefDescription": "Retired load uops with locked access.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"Errata": "BDM35",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops with locked access retired to the architected path.",
+ "PublicDescription": "This event counts load uops with locked access retired to the architected path.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x41",
- "BriefDescription": "Retired load uops that split across a cacheline boundary.(Precise Event - PEBS)",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This event counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x42",
- "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS)",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This event counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"SampleAfterValue": "100003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -459,24 +459,24 @@
{
"EventCode": "0xD0",
"UMask": "0x81",
- "BriefDescription": "All retired load uops. (Precise Event - PEBS)",
+ "BriefDescription": "All retired load uops.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.",
+ "PublicDescription": "This event counts load uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x82",
- "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS)",
+ "BriefDescription": "All retired store uops.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts store uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement.",
+ "PublicDescription": "This event counts store uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement.",
"SampleAfterValue": "2000003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -484,69 +484,69 @@
{
"EventCode": "0xD1",
"UMask": "0x1",
- "BriefDescription": "Retired load uops with L1 cache hits as data sources. (Precise Event - PEBS)",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data source were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.",
+ "PublicDescription": "This event counts retired load uops which data sources were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x2",
- "BriefDescription": "Retired load uops with L2 cache hits as data sources. (Precise Event - PEBS)",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"Errata": "BDM35",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were hits in the mid-level (L2) cache.",
+ "PublicDescription": "This event counts retired load uops which data sources were hits in the mid-level (L2) cache.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x4",
- "BriefDescription": "Hit in last-level (L3) cache. Excludes Unknown data-source. (Precise Event - PEBS)",
+ "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
"Errata": "BDM100",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
+ "PublicDescription": "This event counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
"SampleAfterValue": "50021",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x8",
- "BriefDescription": "Retired load uops misses in L1 cache as data sources. Uses PEBS.",
+ "BriefDescription": "Retired load uops misses in L1 cache as data sources.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
+ "PublicDescription": "This event counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x10",
- "BriefDescription": "Retired load uops with L2 cache misses as data sources. Uses PEBS.",
+ "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.",
+ "PublicDescription": "This event counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.",
"SampleAfterValue": "50021",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x20",
- "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source. (Precise Event - PEBS).",
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -558,83 +558,84 @@
{
"EventCode": "0xD1",
"UMask": "0x40",
- "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready. (Precise Event - PEBS)",
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
+ "PublicDescription": "This event counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x1",
- "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache. (Precise Event - PEBS)",
+ "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS",
"Errata": "BDM100",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
+ "PublicDescription": "This event counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x2",
- "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache. (Precise Event - PEBS)",
+ "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
"Errata": "BDM100",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
+ "PublicDescription": "This event counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x4",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3. (Precise Event - PEBS)",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
"Errata": "BDM100",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
+ "PublicDescription": "This event counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x8",
- "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required. (Precise Event - PEBS)",
+ "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_NONE",
"Errata": "BDM100",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
+ "PublicDescription": "This event counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD3",
"UMask": "0x1",
+ "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
"Errata": "BDE70, BDM100",
- "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches. This is a precise event.",
+ "PublicDescription": "Retired load uop whose Data Source was: local DRAM either Snoop not needed or Snoop Miss (RspI).",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD3",
"UMask": "0x4",
- "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI) (Precise Event)",
+ "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -646,7 +647,7 @@
{
"EventCode": "0xD3",
"UMask": "0x10",
- "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM (Precise Event)",
+ "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -658,7 +659,7 @@
{
"EventCode": "0xD3",
"UMask": "0x20",
- "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache (Precise Event)",
+ "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -810,12 +811,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all requests that hit in the L3",
- "MSRValue": "0x3f803c8fff",
+ "BriefDescription": "Counts all requests hit in the L3",
+ "MSRValue": "0x3F803C8FFF",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all requests that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all requests hit in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -823,12 +824,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "MSRValue": "0x10003c07f7",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003C07F7",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -836,12 +837,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003c07f7",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003C07F7",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -849,12 +850,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003c0244",
+ "BriefDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003C0244",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -862,12 +863,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "MSRValue": "0x10003c0122",
+ "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003C0122",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -875,12 +876,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003c0122",
+ "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003C0122",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -888,12 +889,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "MSRValue": "0x10003c0091",
+ "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003C0091",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -901,12 +902,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003c0091",
+ "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003C0091",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -914,12 +915,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3",
- "MSRValue": "0x3f803c0200",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
+ "MSRValue": "0x3F803C0200",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -927,12 +928,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3",
- "MSRValue": "0x3f803c0100",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
+ "MSRValue": "0x3F803C0100",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -940,12 +941,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "MSRValue": "0x10003c0002",
+ "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003C0002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -953,12 +954,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3",
- "MSRValue": "0x3f803c0002",
+ "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3",
+ "MSRValue": "0x3F803C0002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json b/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json
index d7b9d9c9c518..ba0e0c4e74eb 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json
@@ -42,7 +42,7 @@
{
"EventCode": "0xC7",
"UMask": "0x3",
- "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. (RSQRT for single precision?)",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR",
"SampleAfterValue": "2000003",
@@ -51,7 +51,7 @@
{
"EventCode": "0xC7",
"UMask": "0x4",
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired. Each count represents 2 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired. Each count represents 2 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
"SampleAfterValue": "2000003",
@@ -60,7 +60,7 @@
{
"EventCode": "0xC7",
"UMask": "0x8",
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
"SampleAfterValue": "2000003",
@@ -69,7 +69,7 @@
{
"EventCode": "0xC7",
"UMask": "0x10",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
"SampleAfterValue": "2000003",
@@ -78,7 +78,7 @@
{
"EventCode": "0xC7",
"UMask": "0x15",
- "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+ "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
"SampleAfterValue": "2000006",
@@ -87,7 +87,7 @@
{
"EventCode": "0xc7",
"UMask": "0x20",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
"SampleAfterValue": "2000003",
@@ -96,7 +96,7 @@
{
"EventCode": "0xC7",
"UMask": "0x2a",
- "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+ "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.SINGLE",
"SampleAfterValue": "2000005",
@@ -105,7 +105,7 @@
{
"EventCode": "0xC7",
"UMask": "0x3c",
- "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. (RSQRT for single-precision?)",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.PACKED",
"SampleAfterValue": "2000004",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/memory.json b/tools/perf/pmu-events/arch/x86/broadwellx/memory.json
index d79a5cfea44b..ecb413bb67ca 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/memory.json
@@ -170,11 +170,11 @@
{
"EventCode": "0xc8",
"UMask": "0x4",
- "BriefDescription": "Number of times HLE abort was triggered (PEBS)",
+ "BriefDescription": "Number of times HLE abort was triggered",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "HLE_RETIRED.ABORTED",
- "PublicDescription": "Number of times HLE abort was triggered (PEBS).",
+ "PublicDescription": "Number of times HLE abort was triggered.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -251,11 +251,11 @@
{
"EventCode": "0xc9",
"UMask": "0x4",
- "BriefDescription": "Number of times RTM abort was triggered (PEBS)",
+ "BriefDescription": "Number of times RTM abort was triggered",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "RTM_RETIRED.ABORTED",
- "PublicDescription": "Number of times RTM abort was triggered (PEBS).",
+ "PublicDescription": "Number of times RTM abort was triggered .",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -312,14 +312,14 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 4",
+ "BriefDescription": "Randomly selected loads with latency value being above 4",
"PEBS": "2",
"MSRValue": "0x4",
"Counter": "3",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
"Errata": "BDM100, BDM35",
- "PublicDescription": "This event counts loads with latency value being above four.",
+ "PublicDescription": "Counts randomly selected loads with latency value being above four.",
"TakenAlone": "1",
"SampleAfterValue": "100003",
"CounterHTOff": "3"
@@ -327,14 +327,14 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 8",
+ "BriefDescription": "Randomly selected loads with latency value being above 8",
"PEBS": "2",
"MSRValue": "0x8",
"Counter": "3",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
"Errata": "BDM100, BDM35",
- "PublicDescription": "This event counts loads with latency value being above eight.",
+ "PublicDescription": "Counts randomly selected loads with latency value being above eight.",
"TakenAlone": "1",
"SampleAfterValue": "50021",
"CounterHTOff": "3"
@@ -342,14 +342,14 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 16",
+ "BriefDescription": "Randomly selected loads with latency value being above 16",
"PEBS": "2",
"MSRValue": "0x10",
"Counter": "3",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
"MSRIndex": "0x3F6",
"Errata": "BDM100, BDM35",
- "PublicDescription": "This event counts loads with latency value being above 16.",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 16.",
"TakenAlone": "1",
"SampleAfterValue": "20011",
"CounterHTOff": "3"
@@ -357,14 +357,14 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 32",
+ "BriefDescription": "Randomly selected loads with latency value being above 32",
"PEBS": "2",
"MSRValue": "0x20",
"Counter": "3",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
"Errata": "BDM100, BDM35",
- "PublicDescription": "This event counts loads with latency value being above 32.",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 32.",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "3"
@@ -372,14 +372,14 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 64",
+ "BriefDescription": "Randomly selected loads with latency value being above 64",
"PEBS": "2",
"MSRValue": "0x40",
"Counter": "3",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
"Errata": "BDM100, BDM35",
- "PublicDescription": "This event counts loads with latency value being above 64.",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 64.",
"TakenAlone": "1",
"SampleAfterValue": "2003",
"CounterHTOff": "3"
@@ -387,14 +387,14 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 128",
+ "BriefDescription": "Randomly selected loads with latency value being above 128",
"PEBS": "2",
"MSRValue": "0x80",
"Counter": "3",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
"MSRIndex": "0x3F6",
"Errata": "BDM100, BDM35",
- "PublicDescription": "This event counts loads with latency value being above 128.",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 128.",
"TakenAlone": "1",
"SampleAfterValue": "1009",
"CounterHTOff": "3"
@@ -402,14 +402,14 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 256",
+ "BriefDescription": "Randomly selected loads with latency value being above 256",
"PEBS": "2",
"MSRValue": "0x100",
"Counter": "3",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
"MSRIndex": "0x3F6",
"Errata": "BDM100, BDM35",
- "PublicDescription": "This event counts loads with latency value being above 256.",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 256.",
"TakenAlone": "1",
"SampleAfterValue": "503",
"CounterHTOff": "3"
@@ -417,14 +417,14 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 512",
+ "BriefDescription": "Randomly selected loads with latency value being above 512",
"PEBS": "2",
"MSRValue": "0x200",
"Counter": "3",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
"Errata": "BDM100, BDM35",
- "PublicDescription": "This event counts loads with latency value being above 512.",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 512.",
"TakenAlone": "1",
"SampleAfterValue": "101",
"CounterHTOff": "3"
@@ -433,12 +433,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all requests that miss in the L3",
- "MSRValue": "0x3fbfc08fff",
+ "BriefDescription": "Counts all requests miss in the L3",
+ "MSRValue": "0x3FBFC08FFF",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all requests that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all requests miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -446,12 +446,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and clean or shared data is transferred from remote cache",
- "MSRValue": "0x087fc007f7",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and clean or shared data is transferred from remote cache",
+ "MSRValue": "0x087FC007F7",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and clean or shared data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and clean or shared data is transferred from remote cache",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -459,12 +459,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the modified data is transferred from remote cache",
- "MSRValue": "0x103fc007f7",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the modified data is transferred from remote cache",
+ "MSRValue": "0x103FC007F7",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the modified data is transferred from remote cache",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -472,12 +472,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from remote dram",
- "MSRValue": "0x063bc007f7",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from remote dram",
+ "MSRValue": "0x063BC007F7",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from remote dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from remote dram",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -485,12 +485,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram",
- "MSRValue": "0x06040007f7",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from local dram",
+ "MSRValue": "0x06040007F7",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -498,12 +498,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3",
- "MSRValue": "0x3fbfc007f7",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss in the L3",
+ "MSRValue": "0x3FBFC007F7",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -511,12 +511,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
"MSRValue": "0x0604000244",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -524,12 +524,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch code reads that miss in the L3",
- "MSRValue": "0x3fbfc00244",
+ "BriefDescription": "Counts all demand & prefetch code reads miss in the L3",
+ "MSRValue": "0x3FBFC00244",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch code reads miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -537,12 +537,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
"MSRValue": "0x0604000122",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -550,12 +550,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that miss in the L3",
- "MSRValue": "0x3fbfc00122",
+ "BriefDescription": "Counts all demand & prefetch RFOs miss in the L3",
+ "MSRValue": "0x3FBFC00122",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -563,12 +563,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache",
- "MSRValue": "0x087fc00091",
+ "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and clean or shared data is transferred from remote cache",
+ "MSRValue": "0x087FC00091",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and clean or shared data is transferred from remote cache",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -576,12 +576,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache",
- "MSRValue": "0x103fc00091",
+ "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the modified data is transferred from remote cache",
+ "MSRValue": "0x103FC00091",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the modified data is transferred from remote cache",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -589,12 +589,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram",
- "MSRValue": "0x063bc00091",
+ "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from remote dram",
+ "MSRValue": "0x063BC00091",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from remote dram",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -602,12 +602,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
"MSRValue": "0x0604000091",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -615,12 +615,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that miss in the L3",
- "MSRValue": "0x3fbfc00091",
+ "BriefDescription": "Counts all demand & prefetch data reads miss in the L3",
+ "MSRValue": "0x3FBFC00091",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -628,12 +628,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3",
- "MSRValue": "0x3fbfc00200",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
+ "MSRValue": "0x3FBFC00200",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -641,12 +641,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3",
- "MSRValue": "0x3fbfc00100",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
+ "MSRValue": "0x3FBFC00100",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -654,12 +654,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache",
- "MSRValue": "0x103fc00002",
+ "BriefDescription": "Counts all demand data writes (RFOs) miss the L3 and the modified data is transferred from remote cache",
+ "MSRValue": "0x103FC00002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) miss the L3 and the modified data is transferred from remote cache",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -667,12 +667,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that miss in the L3",
- "MSRValue": "0x3fbfc00002",
+ "BriefDescription": "Counts all demand data writes (RFOs) miss in the L3",
+ "MSRValue": "0x3FBFC00002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
index 0d04bf9db000..c2f6932a5817 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
@@ -1,6 +1,5 @@
[
{
- "EventCode": "0x00",
"UMask": "0x1",
"BriefDescription": "Instructions retired from execution.",
"Counter": "Fixed counter 0",
@@ -10,7 +9,6 @@
"CounterHTOff": "Fixed counter 0"
},
{
- "EventCode": "0x00",
"UMask": "0x2",
"BriefDescription": "Core cycles when the thread is not in halt state",
"Counter": "Fixed counter 1",
@@ -20,7 +18,6 @@
"CounterHTOff": "Fixed counter 1"
},
{
- "EventCode": "0x00",
"UMask": "0x2",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
"Counter": "Fixed counter 1",
@@ -30,7 +27,6 @@
"CounterHTOff": "Fixed counter 1"
},
{
- "EventCode": "0x00",
"UMask": "0x3",
"BriefDescription": "Reference cycles when the core is not in halt state.",
"Counter": "Fixed counter 2",
@@ -322,7 +318,7 @@
"BriefDescription": "Stalls caused by changing prefix length of the instruction.",
"Counter": "0,1,2,3",
"EventName": "ILD_STALL.LCP",
- "PublicDescription": "This event counts stalls occurred due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
+ "PublicDescription": "This event counts stalls occured due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -786,12 +782,12 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA2",
+ "EventCode": "0xa2",
"UMask": "0x1",
"BriefDescription": "Resource-related stall cycles",
"Counter": "0,1,2,3",
"EventName": "RESOURCE_STALLS.ANY",
- "PublicDescription": "This event counts resource-related stall cycles. Reasons for stalls can be as follows:\n - *any* u-arch structure got full (LB, SB, RS, ROB, BOB, LM, Physical Register Reclaim Table (PRRT), or Physical History Table (PHT) slots)\n - *any* u-arch structure got empty (like INT/SIMD FreeLists)\n - FPU control word (FPCW), MXCSR\nand others. This counts cycles that the pipeline backend blocked uop delivery from the front end.",
+ "PublicDescription": "This event counts resource-related stall cycles.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -1168,12 +1164,12 @@
{
"EventCode": "0xC2",
"UMask": "0x1",
- "BriefDescription": "Actually retired uops. (Precise Event - PEBS)",
+ "BriefDescription": "Actually retired uops.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "UOPS_RETIRED.ALL",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
+ "PublicDescription": "This event counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -1204,11 +1200,11 @@
{
"EventCode": "0xC2",
"UMask": "0x2",
- "BriefDescription": "Retirement slots used. (Precise Event - PEBS)",
+ "BriefDescription": "Retirement slots used.",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "UOPS_RETIRED.RETIRE_SLOTS",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of retirement slots used.",
+ "PublicDescription": "This event counts the number of retirement slots used.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -1266,33 +1262,33 @@
{
"EventCode": "0xC4",
"UMask": "0x1",
- "BriefDescription": "Conditional branch instructions retired. (Precise Event - PEBS)",
+ "BriefDescription": "Conditional branch instructions retired.",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts conditional branch instructions retired.",
+ "PublicDescription": "This event counts conditional branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
"UMask": "0x2",
- "BriefDescription": "Direct and indirect near call instructions retired. (Precise Event - PEBS)",
+ "BriefDescription": "Direct and indirect near call instructions retired.",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect near call instructions retired.",
+ "PublicDescription": "This event counts both direct and indirect near call instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
"UMask": "0x2",
- "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3). (Precise Event - PEBS)",
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect macro near call instructions retired (captured in ring 3).",
+ "PublicDescription": "This event counts both direct and indirect macro near call instructions retired (captured in ring 3).",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -1311,11 +1307,11 @@
{
"EventCode": "0xC4",
"UMask": "0x8",
- "BriefDescription": "Return instructions retired. (Precise Event - PEBS)",
+ "BriefDescription": "Return instructions retired.",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts return instructions retired.",
+ "PublicDescription": "This event counts return instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -1332,11 +1328,11 @@
{
"EventCode": "0xC4",
"UMask": "0x20",
- "BriefDescription": "Taken branch instructions retired. (Precise Event - PEBS)",
+ "BriefDescription": "Taken branch instructions retired.",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts taken branch instructions retired.",
+ "PublicDescription": "This event counts taken branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -1364,11 +1360,11 @@
{
"EventCode": "0xC5",
"UMask": "0x1",
- "BriefDescription": "Mispredicted conditional branch instructions retired. (Precise Event - PEBS)",
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted conditional branch instructions retired.",
+ "PublicDescription": "This event counts mispredicted conditional branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -1386,22 +1382,22 @@
{
"EventCode": "0xC5",
"UMask": "0x8",
- "BriefDescription": "This event counts the number of mispredicted ret instructions retired.(Precise Event)",
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.RET",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted return instructions retired.",
+ "PublicDescription": "This event counts mispredicted return instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC5",
"UMask": "0x20",
- "BriefDescription": "number of near branch instructions retired that were mispredicted and taken. (Precise Event - PEBS).",
+ "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken. (Precise Event - PEBS).",
+ "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
index 71e9737f4614..1a1a3501180a 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
@@ -1,164 +1,394 @@
[
{
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Frontend_Bound"
+ },
+ {
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Frontend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Bad_Speculation"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Bad_Speculation_SMT"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Backend_Bound"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Backend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Retiring"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Retiring_SMT"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "BriefDescription": "Uops Per Instruction",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline",
+ "BriefDescription": "Uops Per Instruction",
+ "MetricGroup": "Pipeline;Retiring",
"MetricName": "UPI"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
- "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ((UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1) )",
- "MetricGroup": "Frontend",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Instruction per taken branch",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "IpTB"
+ },
+ {
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Branch instructions per taken branch. ",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "BpTB"
+ },
+ {
+ "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1 ) )",
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+ "MetricGroup": "PGO",
"MetricName": "IFetch_Line_Utilization"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
- "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ))",
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricGroup": "DSB;Frontend_Bandwidth",
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
- "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
- "BriefDescription": "Total issue-pipeline slots",
- "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricExpr": "4 * cycles",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
- "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
+ "MetricGroup": "TopDownL1_SMT",
+ "MetricName": "SLOTS_SMT"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
+ "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
+ "MetricGroup": "Instruction_Type;L1_Bound",
+ "MetricName": "IpL"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
+ "BriefDescription": "Instructions per Store",
+ "MetricGroup": "Instruction_Type;Store_Bound",
+ "MetricName": "IpS"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Instructions per Branch",
+ "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+ "MetricName": "IpB"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "BriefDescription": "Instruction per (near) call",
+ "MetricGroup": "Branches",
+ "MetricName": "IpCall"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY",
+ "BriefDescription": "Total number of retired Instructions",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC_SMT"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / cycles",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS",
+ "MetricName": "FLOPc"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS_SMT",
+ "MetricName": "FLOPc_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP"
},
{
- "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
- "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE_16B.IFDATA_STALL - ICACHE_64B.IFTAG_STALL ) / RS_EVENTS.EMPTY_END)",
- "MetricGroup": "Unknown_Branches",
- "MetricName": "BAClear_Cost"
+ "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
+ "MetricGroup": "Branch_Mispredicts",
+ "MetricName": "Branch_Misprediction_Cost"
+ },
+ {
+ "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
+ "MetricGroup": "Branch_Mispredicts_SMT",
+ "MetricName": "Branch_Misprediction_Cost_SMT"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+ "MetricGroup": "Branch_Mispredicts",
+ "MetricName": "IpMispredict"
+ },
+ {
+ "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
- "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / (( L1D_PEND_MISS.PENDING_CYCLES_ANY / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
+ "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * cycles )",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles) )",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
- "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )",
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricGroup": "TLB_SMT",
+ "MetricName": "Page_Walks_Utilization_SMT"
+ },
+ {
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L3_Cache_Access_BW"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L1MPKI"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI"
+ },
+ {
+ "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2HPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L3MPKI"
+ },
+ {
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "BriefDescription": "Average CPU Utilization",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
+ "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "DRAM_BW_Use"
+ },
+ {
+ "MetricExpr": "1000000000 * ( cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x35\\\\\\,umask\\=0x21@ ) / ( cha_0@event\\=0x0@ / duration_time )",
+ "BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
+ "MetricGroup": "Memory_Lat",
+ "MetricName": "DRAM_Read_Latency"
+ },
+ {
+ "MetricExpr": "cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,thresh\\=1@",
+ "BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "DRAM_Parallel_Reads"
+ },
+ {
+ "MetricExpr": "( 1000000000 * ( imc@event\\=0xe0\\\\\\,umask\\=0x1@ / imc@event\\=0xe3@ ) / imc_0@event\\=0x0@ ) if 1 if 1 == 1 else 0 else 0",
+ "BriefDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches",
+ "MetricGroup": "Memory_Lat",
+ "MetricName": "MEM_PMM_Read_Latency"
+ },
+ {
+ "MetricExpr": "( ( 64 * imc@event\\=0xe3@ / 1000000000 ) / duration_time ) if 1 if 1 == 1 else 0 else 0",
+ "BriefDescription": "Average 3DXP Memory Bandwidth Use for reads [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "PMM_Read_BW"
+ },
+ {
+ "MetricExpr": "( ( 64 * imc@event\\=0xe7@ / 1000000000 ) / duration_time ) if 1 if 1 == 1 else 0 else 0",
+ "BriefDescription": "Average 3DXP Memory Bandwidth Use for Writes [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "PMM_Write_BW"
+ },
+ {
+ "MetricExpr": "cha_0@event\\=0x0@",
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricGroup": "",
+ "MetricName": "Socket_CLKS"
+ },
+ {
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
- "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
- "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
- "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
- "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
- "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
- "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/cache.json b/tools/perf/pmu-events/arch/x86/goldmont/cache.json
index f8bbe087b0f8..52a105666afc 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/cache.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/cache.json
@@ -77,7 +77,8 @@
"UMask": "0x21",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"SampleAfterValue": "200003",
- "BriefDescription": "Locked load uops retired (Precise event capable)"
+ "BriefDescription": "Locked load uops retired (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -88,7 +89,8 @@
"UMask": "0x41",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that split a cache-line (Precise event capable)"
+ "BriefDescription": "Load uops retired that split a cache-line (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -99,7 +101,8 @@
"UMask": "0x42",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"SampleAfterValue": "200003",
- "BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)"
+ "BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -110,7 +113,8 @@
"UMask": "0x43",
"EventName": "MEM_UOPS_RETIRED.SPLIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)"
+ "BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -121,7 +125,8 @@
"UMask": "0x81",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired (Precise event capable)"
+ "BriefDescription": "Load uops retired (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -132,7 +137,8 @@
"UMask": "0x82",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"SampleAfterValue": "200003",
- "BriefDescription": "Store uops retired (Precise event capable)"
+ "BriefDescription": "Store uops retired (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -143,7 +149,8 @@
"UMask": "0x83",
"EventName": "MEM_UOPS_RETIRED.ALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Memory uops retired (Precise event capable)"
+ "BriefDescription": "Memory uops retired (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -154,7 +161,8 @@
"UMask": "0x1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that hit L1 data cache (Precise event capable)"
+ "BriefDescription": "Load uops retired that hit L1 data cache (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -165,7 +173,8 @@
"UMask": "0x2",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that hit L2 (Precise event capable)"
+ "BriefDescription": "Load uops retired that hit L2 (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -176,7 +185,8 @@
"UMask": "0x8",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)"
+ "BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -187,7 +197,8 @@
"UMask": "0x10",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that missed L2 (Precise event capable)"
+ "BriefDescription": "Load uops retired that missed L2 (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -198,7 +209,8 @@
"UMask": "0x20",
"EventName": "MEM_LOAD_UOPS_RETIRED.HITM",
"SampleAfterValue": "200003",
- "BriefDescription": "Memory uop retired where cross core or cross module HITM occurred (Precise event capable)"
+ "BriefDescription": "Memory uop retired where cross core or cross module HITM occurred (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -209,7 +221,8 @@
"UMask": "0x40",
"EventName": "MEM_LOAD_UOPS_RETIRED.WCB_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads retired that hit WCB (Precise event capable)"
+ "BriefDescription": "Loads retired that hit WCB (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -220,26 +233,14 @@
"UMask": "0x80",
"EventName": "MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads retired that came from DRAM (Precise event capable)"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x40000032b7 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
+ "BriefDescription": "Loads retired that came from DRAM (Precise event capable)",
+ "Data_LA": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x36000032b7 ",
+ "MSRValue": "0x36000032b7",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.ANY",
@@ -252,7 +253,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x10000032b7 ",
+ "MSRValue": "0x10000032b7",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.HITM_OTHER_CORE",
@@ -265,7 +266,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x04000032b7 ",
+ "MSRValue": "0x04000032b7",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -278,20 +279,20 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x02000032b7 ",
+ "MSRValue": "0x02000032b7",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x00000432b7 ",
+ "MSRValue": "0x00000432b7",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT",
@@ -302,35 +303,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x00000132b7 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x4000000022 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x3600000022 ",
+ "MSRValue": "0x3600000022",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.ANY",
@@ -343,7 +318,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x1000000022 ",
+ "MSRValue": "0x1000000022",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HITM_OTHER_CORE",
@@ -356,7 +331,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0400000022 ",
+ "MSRValue": "0x0400000022",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -369,20 +344,20 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0200000022 ",
+ "MSRValue": "0x0200000022",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0000040022 ",
+ "MSRValue": "0x0000040022",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT",
@@ -393,32 +368,6 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000010022 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data reads (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x4000003091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts data reads (demand & prefetch) that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600003091",
@@ -466,7 +415,7 @@
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts data reads (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
@@ -484,35 +433,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts data reads (demand & prefetch) that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000013091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads (demand & prefetch) that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x4000003010 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x3600003010 ",
+ "MSRValue": "0x3600003010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.ANY",
@@ -525,7 +448,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x1000003010 ",
+ "MSRValue": "0x1000003010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.HITM_OTHER_CORE",
@@ -538,7 +461,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0400003010 ",
+ "MSRValue": "0x0400003010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -551,20 +474,20 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0200003010 ",
+ "MSRValue": "0x0200003010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0000043010 ",
+ "MSRValue": "0x0000043010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_HIT",
@@ -575,48 +498,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000013010 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts requests to the uncore subsystem that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x4000008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts requests to the uncore subsystem that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x3600008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.ANY",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts requests to the uncore subsystem that miss the L2 cache.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x1000008000 ",
+ "MSRValue": "0x1000008000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HITM_OTHER_CORE",
@@ -629,7 +513,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0400008000 ",
+ "MSRValue": "0x0400008000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -642,20 +526,20 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts requests to the uncore subsystem that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0200008000 ",
+ "MSRValue": "0x0200008000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts requests to the uncore subsystem that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts requests to the uncore subsystem that true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts requests to the uncore subsystem that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0000048000 ",
+ "MSRValue": "0x0000048000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT",
@@ -668,7 +552,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts requests to the uncore subsystem that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0000018000 ",
+ "MSRValue": "0x0000018000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_RESPONSE",
@@ -679,22 +563,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x4000004800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x3600004800 ",
+ "MSRValue": "0x3600004800",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.ANY",
@@ -705,48 +576,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x1000004800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0400004800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0200004800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. ",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0000044800 ",
+ "MSRValue": "0x0000044800",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_HIT",
@@ -757,35 +589,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000014800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x4000004000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x3600004000 ",
+ "MSRValue": "0x3600004000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.ANY",
@@ -798,7 +604,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x1000004000 ",
+ "MSRValue": "0x1000004000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
@@ -811,7 +617,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0400004000 ",
+ "MSRValue": "0x0400004000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -824,20 +630,20 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0200004000 ",
+ "MSRValue": "0x0200004000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0000044000 ",
+ "MSRValue": "0x0000044000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_HIT",
@@ -848,35 +654,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000014000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x4000002000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x3600002000 ",
+ "MSRValue": "0x3600002000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.ANY",
@@ -889,7 +669,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x1000002000 ",
+ "MSRValue": "0x1000002000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HITM_OTHER_CORE",
@@ -902,7 +682,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0400002000 ",
+ "MSRValue": "0x0400002000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -915,20 +695,20 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0200002000 ",
+ "MSRValue": "0x0200002000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0000042000 ",
+ "MSRValue": "0x0000042000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT",
@@ -939,35 +719,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000012000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cache lines requests by software prefetch instructions that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x4000001000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache lines requests by software prefetch instructions that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x3600001000 ",
+ "MSRValue": "0x3600001000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.ANY",
@@ -980,7 +734,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x1000001000 ",
+ "MSRValue": "0x1000001000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.HITM_OTHER_CORE",
@@ -993,7 +747,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0400001000 ",
+ "MSRValue": "0x0400001000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -1006,20 +760,20 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cache lines requests by software prefetch instructions that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0200001000 ",
+ "MSRValue": "0x0200001000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache lines requests by software prefetch instructions that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions that true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cache lines requests by software prefetch instructions that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0000041000 ",
+ "MSRValue": "0x0000041000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_HIT",
@@ -1030,35 +784,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cache lines requests by software prefetch instructions that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000011000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache lines requests by software prefetch instructions that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x4000000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x3600000800 ",
+ "MSRValue": "0x3600000800",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.ANY",
@@ -1071,7 +799,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x1000000800 ",
+ "MSRValue": "0x1000000800",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
@@ -1084,7 +812,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0400000800 ",
+ "MSRValue": "0x0400000800",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -1097,20 +825,20 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0200000800 ",
+ "MSRValue": "0x0200000800",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0000040800 ",
+ "MSRValue": "0x0000040800",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_HIT",
@@ -1121,100 +849,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000010800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts bus lock and split lock requests that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x4000000400 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts bus lock and split lock requests that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts bus lock and split lock requests that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x3600000400 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.ANY",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts bus lock and split lock requests that miss the L2 cache.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts bus lock and split lock requests that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x1000000400 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts bus lock and split lock requests that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts bus lock and split lock requests that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0400000400 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts bus lock and split lock requests that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts bus lock and split lock requests that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0200000400 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts bus lock and split lock requests that true miss for the L2 cache with a snoop miss in the other processor module. ",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts bus lock and split lock requests that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000040400 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts bus lock and split lock requests that hit the L2 cache.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts bus lock and split lock requests that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0000010400 ",
+ "MSRValue": "0x0000010400",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.ANY_RESPONSE",
@@ -1225,113 +862,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts code reads in uncacheable (UC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x4000000200 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts code reads in uncacheable (UC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x3600000200 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.ANY",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x1000000200 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0400000200 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts code reads in uncacheable (UC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0200000200 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts code reads in uncacheable (UC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. ",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts code reads in uncacheable (UC) memory region that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000040200 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts code reads in uncacheable (UC) memory region that hit the L2 cache.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts code reads in uncacheable (UC) memory region that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000010200 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts code reads in uncacheable (UC) memory region that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x4000000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x3600000100 ",
+ "MSRValue": "0x3600000100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.ANY",
@@ -1342,87 +875,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x1000000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0400000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0200000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that true miss for the L2 cache with a snoop miss in the other processor module. ",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000040100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that hit the L2 cache.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000010100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x4000000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x3600000080 ",
+ "MSRValue": "0x3600000080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.ANY",
@@ -1433,87 +888,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x1000000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0400000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0200000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that true miss for the L2 cache with a snoop miss in the other processor module. ",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000040080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that hit the L2 cache.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000010080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x4000000020 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x3600000020 ",
+ "MSRValue": "0x3600000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.ANY",
@@ -1526,7 +903,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x1000000020 ",
+ "MSRValue": "0x1000000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.HITM_OTHER_CORE",
@@ -1539,7 +916,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0400000020 ",
+ "MSRValue": "0x0400000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -1552,20 +929,20 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0200000020 ",
+ "MSRValue": "0x0200000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0000040020 ",
+ "MSRValue": "0x0000040020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT",
@@ -1576,35 +953,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000010020 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x4000000010 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x3600000010 ",
+ "MSRValue": "0x3600000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.ANY",
@@ -1617,7 +968,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x1000000010 ",
+ "MSRValue": "0x1000000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.HITM_OTHER_CORE",
@@ -1630,7 +981,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0400000010 ",
+ "MSRValue": "0x0400000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -1643,20 +994,20 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0200000010 ",
+ "MSRValue": "0x0200000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0000040010 ",
+ "MSRValue": "0x0000040010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_HIT",
@@ -1667,35 +1018,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000010010 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x4000000008 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x3600000008 ",
+ "MSRValue": "0x3600000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.ANY",
@@ -1708,7 +1033,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x1000000008 ",
+ "MSRValue": "0x1000000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.HITM_OTHER_CORE",
@@ -1721,7 +1046,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0400000008 ",
+ "MSRValue": "0x0400000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -1734,20 +1059,20 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0200000008 ",
+ "MSRValue": "0x0200000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0000040008 ",
+ "MSRValue": "0x0000040008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L2_HIT",
@@ -1758,22 +1083,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000010008 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x4000000004 ",
+ "MSRValue": "0x4000000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.OUTSTANDING",
@@ -1786,7 +1098,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x3600000004 ",
+ "MSRValue": "0x3600000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.ANY",
@@ -1797,22 +1109,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x1000000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0400000004 ",
+ "MSRValue": "0x0400000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -1825,20 +1124,20 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0200000004 ",
+ "MSRValue": "0x0200000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0000040004 ",
+ "MSRValue": "0x0000040004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT",
@@ -1849,22 +1148,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000010004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x4000000002 ",
+ "MSRValue": "0x4000000002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OUTSTANDING",
@@ -1877,7 +1163,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x3600000002 ",
+ "MSRValue": "0x3600000002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.ANY",
@@ -1890,7 +1176,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x1000000002 ",
+ "MSRValue": "0x1000000002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.HITM_OTHER_CORE",
@@ -1903,7 +1189,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0400000002 ",
+ "MSRValue": "0x0400000002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -1916,20 +1202,20 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0200000002 ",
+ "MSRValue": "0x0200000002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0000040002 ",
+ "MSRValue": "0x0000040002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT",
@@ -1940,22 +1226,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000010002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts demand cacheable data reads of full cache lines that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x4000000001 ",
+ "MSRValue": "0x4000000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OUTSTANDING",
@@ -1968,7 +1241,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x3600000001 ",
+ "MSRValue": "0x3600000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.ANY",
@@ -1981,7 +1254,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x1000000001 ",
+ "MSRValue": "0x1000000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.HITM_OTHER_CORE",
@@ -1994,7 +1267,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0400000001 ",
+ "MSRValue": "0x0400000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -2007,20 +1280,20 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand cacheable data reads of full cache lines that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0200000001 ",
+ "MSRValue": "0x0200000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data reads of full cache lines that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts demand cacheable data reads of full cache lines that true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand cacheable data reads of full cache lines that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0000040001 ",
+ "MSRValue": "0x0000040001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT",
@@ -2028,18 +1301,5 @@
"SampleAfterValue": "100007",
"BriefDescription": "Counts demand cacheable data reads of full cache lines that hit the L2 cache.",
"Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand cacheable data reads of full cache lines that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000010001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data reads of full cache lines that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/memory.json b/tools/perf/pmu-events/arch/x86/goldmont/memory.json
index 690cebd12a94..197dc76d49dd 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/memory.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/memory.json
@@ -30,265 +30,5 @@
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"SampleAfterValue": "200003",
"BriefDescription": "Machine clears due to memory ordering issue"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x20000032b7 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000000022 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data reads (demand & prefetch) that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000003091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads (demand & prefetch) that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000003010 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts requests to the uncore subsystem that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000004800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000004000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000002000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000001000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts bus lock and split lock requests that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000000400 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts bus lock and split lock requests that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000000200 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000000020 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000000010 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000000008 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000000001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json b/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json
index 254788af8ab6..6342368accf8 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json
@@ -1,7 +1,6 @@
[
{
"PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The counter continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0. You cannot collect a PEBs record for this event.",
- "EventCode": "0x00",
"Counter": "Fixed counter 0",
"UMask": "0x1",
"EventName": "INST_RETIRED.ANY",
@@ -10,7 +9,6 @@
},
{
"PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1. You cannot collect a PEBs record for this event.",
- "EventCode": "0x00",
"Counter": "Fixed counter 1",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.CORE",
@@ -19,7 +17,6 @@
},
{
"PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. This event uses fixed counter 2. You cannot collect a PEBs record for this event.",
- "EventCode": "0x00",
"Counter": "Fixed counter 2",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
@@ -188,7 +185,7 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel architecture processors.",
+ "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel\u00ae architecture processors.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
"UMask": "0x1",
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json b/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json
index 9805198d3f5f..343d66bbd777 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json
@@ -48,7 +48,8 @@
"UMask": "0x11",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that missed the DTLB (Precise event capable)"
+ "BriefDescription": "Load uops retired that missed the DTLB (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -59,7 +60,8 @@
"UMask": "0x12",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS_STORES",
"SampleAfterValue": "200003",
- "BriefDescription": "Store uops retired that missed the DTLB (Precise event capable)"
+ "BriefDescription": "Store uops retired that missed the DTLB (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -70,6 +72,7 @@
"UMask": "0x13",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Memory uops retired that missed the DTLB (Precise event capable)"
+ "BriefDescription": "Memory uops retired that missed the DTLB (Precise event capable)",
+ "Data_LA": "1"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json b/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json
index b4791b443a66..5a6ac8285ad4 100644
--- a/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json
@@ -92,7 +92,8 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"SampleAfterValue": "200003",
- "BriefDescription": "Locked load uops retired (Precise event capable)"
+ "BriefDescription": "Locked load uops retired (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -104,7 +105,8 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that split a cache-line (Precise event capable)"
+ "BriefDescription": "Load uops retired that split a cache-line (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -116,7 +118,8 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"SampleAfterValue": "200003",
- "BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)"
+ "BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -128,7 +131,8 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)"
+ "BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -140,7 +144,8 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired (Precise event capable)"
+ "BriefDescription": "Load uops retired (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -152,7 +157,8 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"SampleAfterValue": "200003",
- "BriefDescription": "Store uops retired (Precise event capable)"
+ "BriefDescription": "Store uops retired (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -164,7 +170,8 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.ALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Memory uops retired (Precise event capable)"
+ "BriefDescription": "Memory uops retired (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -176,7 +183,8 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that hit L1 data cache (Precise event capable)"
+ "BriefDescription": "Load uops retired that hit L1 data cache (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -188,7 +196,8 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that hit L2 (Precise event capable)"
+ "BriefDescription": "Load uops retired that hit L2 (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -200,7 +209,8 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)"
+ "BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -212,7 +222,8 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that missed L2 (Precise event capable)"
+ "BriefDescription": "Load uops retired that missed L2 (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -224,7 +235,8 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.HITM",
"SampleAfterValue": "200003",
- "BriefDescription": "Memory uop retired where cross core or cross module HITM occurred (Precise event capable)"
+ "BriefDescription": "Memory uop retired where cross core or cross module HITM occurred (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -236,7 +248,8 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.WCB_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads retired that hit WCB (Precise event capable)"
+ "BriefDescription": "Loads retired that hit WCB (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -248,7 +261,8 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads retired that came from DRAM (Precise event capable)"
+ "BriefDescription": "Loads retired that came from DRAM (Precise event capable)",
+ "Data_LA": "1"
},
{
"CollectPEBSRecord": "1",
@@ -292,7 +306,7 @@
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data reads of full cache lines true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts demand cacheable data reads of full cache lines true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
@@ -367,7 +381,7 @@
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
@@ -442,7 +456,7 @@
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
@@ -517,7 +531,7 @@
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
@@ -592,7 +606,7 @@
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
@@ -667,7 +681,7 @@
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
@@ -742,7 +756,7 @@
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts bus lock and split lock requests true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts bus lock and split lock requests true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
@@ -817,7 +831,7 @@
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
@@ -892,7 +906,7 @@
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache lines requests by software prefetch instructions true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
@@ -967,7 +981,7 @@
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
@@ -1042,7 +1056,7 @@
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
@@ -1117,7 +1131,7 @@
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts requests to the uncore subsystem true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts requests to the uncore subsystem true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
@@ -1192,7 +1206,7 @@
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
@@ -1267,7 +1281,7 @@
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts data reads (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
@@ -1342,7 +1356,7 @@
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
@@ -1417,7 +1431,7 @@
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json b/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json
index ccf1aed69197..e3fa1a0ba71b 100644
--- a/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json
@@ -3,7 +3,6 @@
"PEBS": "2",
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The counter continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0. You cannot collect a PEBs record for this event.",
- "EventCode": "0x00",
"Counter": "Fixed counter 0",
"UMask": "0x1",
"PEBScounters": "32",
@@ -15,7 +14,6 @@
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1. You cannot collect a PEBs record for this event.",
- "EventCode": "0x00",
"Counter": "Fixed counter 1",
"UMask": "0x2",
"PEBScounters": "33",
@@ -27,7 +25,6 @@
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. This event uses fixed counter 2. You cannot collect a PEBs record for this event.",
- "EventCode": "0x00",
"Counter": "Fixed counter 2",
"UMask": "0x3",
"PEBScounters": "34",
@@ -231,7 +228,7 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel architecture processors.",
+ "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel\u00ae architecture processors.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
"UMask": "0x1",
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json b/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json
index 0b53a3b0dfb8..0d32fd26ded1 100644
--- a/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json
@@ -189,7 +189,8 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that missed the DTLB (Precise event capable)"
+ "BriefDescription": "Load uops retired that missed the DTLB (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -201,7 +202,8 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS_STORES",
"SampleAfterValue": "200003",
- "BriefDescription": "Store uops retired that missed the DTLB (Precise event capable)"
+ "BriefDescription": "Store uops retired that missed the DTLB (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -213,6 +215,7 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Memory uops retired that missed the DTLB (Precise event capable)"
+ "BriefDescription": "Memory uops retired that missed the DTLB (Precise event capable)",
+ "Data_LA": "1"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswell/cache.json b/tools/perf/pmu-events/arch/x86/haswell/cache.json
index da4d6ddd4f92..7fb0ad8d8ca1 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/cache.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/cache.json
@@ -63,10 +63,10 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Demand data read requests that hit L2 cache.",
+ "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x41",
+ "UMask": "0xc1",
"Errata": "HSD78",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"SampleAfterValue": "200003",
@@ -77,7 +77,7 @@
"PublicDescription": "Counts the number of store RFO requests that hit the L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x42",
+ "UMask": "0xc2",
"EventName": "L2_RQSTS.RFO_HIT",
"SampleAfterValue": "200003",
"BriefDescription": "RFO requests that hit L2 cache",
@@ -87,7 +87,7 @@
"PublicDescription": "Number of instruction fetches that hit the L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x44",
+ "UMask": "0xc4",
"EventName": "L2_RQSTS.CODE_RD_HIT",
"SampleAfterValue": "200003",
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
@@ -97,7 +97,7 @@
"PublicDescription": "Counts all L2 HW prefetcher requests that hit L2.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x50",
+ "UMask": "0xd0",
"EventName": "L2_RQSTS.L2_PF_HIT",
"SampleAfterValue": "200003",
"BriefDescription": "L2 prefetch requests that hit L2 cache",
@@ -610,7 +610,7 @@
"Errata": "HSD29, HSD25, HSM26, HSM30",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache. ",
+ "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
@@ -623,7 +623,7 @@
"Errata": "HSD29, HSD25, HSM26, HSM30",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3. ",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
@@ -792,7 +792,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "",
"EventCode": "0xf4",
"Counter": "0,1,2,3",
"UMask": "0x10",
@@ -802,262 +801,262 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts all requests that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all requests hit in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c8fff",
+ "MSRValue": "0x3F803C8FFF",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.L3_HIT.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all requests that hit in the L3",
+ "BriefDescription": "Counts all requests hit in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c07f7",
+ "MSRValue": "0x10003C07F7",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "BriefDescription": "hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c07f7",
+ "MSRValue": "0x04003C07F7",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "BriefDescription": "hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0244",
+ "MSRValue": "0x04003C0244",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "BriefDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0122",
+ "MSRValue": "0x10003C0122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0122",
+ "MSRValue": "0x04003C0122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0091",
+ "MSRValue": "0x10003C0091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0091",
+ "MSRValue": "0x04003C0091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0200",
+ "MSRValue": "0x3F803C0200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0100",
+ "MSRValue": "0x3F803C0100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads hit in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0080",
+ "MSRValue": "0x3F803C0080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads hit in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads hit in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0040",
+ "MSRValue": "0x3F803C0040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads hit in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs hit in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0020",
+ "MSRValue": "0x3F803C0020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs hit in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads hit in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0010",
+ "MSRValue": "0x3F803C0010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads hit in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0004",
+ "MSRValue": "0x10003C0004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "BriefDescription": "Counts all demand code reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0004",
+ "MSRValue": "0x04003C0004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "BriefDescription": "Counts all demand code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0002",
+ "MSRValue": "0x10003C0002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0002",
+ "MSRValue": "0x04003C0002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0001",
+ "MSRValue": "0x10003C0001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "BriefDescription": "Counts demand data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0001",
+ "MSRValue": "0x04003C0001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "BriefDescription": "Counts demand data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/haswell/floating-point.json b/tools/perf/pmu-events/arch/x86/haswell/floating-point.json
index f9843e5a9b42..f5a3beaa19fc 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/floating-point.json
@@ -1,22 +1,26 @@
[
{
+ "PEBS": "1",
+ "PublicDescription": "",
"EventCode": "0xC1",
"Counter": "0,1,2,3",
"UMask": "0x8",
"Errata": "HSD56, HSM57",
"EventName": "OTHER_ASSISTS.AVX_TO_SSE",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+ "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PEBS": "1",
+ "PublicDescription": "",
"EventCode": "0xC1",
"Counter": "0,1,2,3",
"UMask": "0x10",
"Errata": "HSD56, HSM57",
"EventName": "OTHER_ASSISTS.SSE_TO_AVX",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+ "BriefDescription": "Number of transitions from legacy SSE to AVX-256 when penalty applicable",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -30,53 +34,58 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of X87 FP assists due to output values.",
+ "PEBS": "1",
+ "PublicDescription": "",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "FP_ASSIST.X87_OUTPUT",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of X87 assists due to output value.",
+ "BriefDescription": "output - Numeric Overflow, Numeric Underflow, Inexact Result",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of X87 FP assists due to input values.",
+ "PEBS": "1",
+ "PublicDescription": "",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "FP_ASSIST.X87_INPUT",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of X87 assists due to input value.",
+ "BriefDescription": "input - Invalid Operation, Denormal Operand, SNaN Operand",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of SIMD FP assists due to output values.",
+ "PEBS": "1",
+ "PublicDescription": "",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "FP_ASSIST.SIMD_OUTPUT",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of SIMD FP assists due to Output values",
+ "BriefDescription": "SSE* FP micro-code assist when output value is invalid.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of SIMD FP assists due to input values.",
+ "PEBS": "1",
+ "PublicDescription": "",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x10",
"EventName": "FP_ASSIST.SIMD_INPUT",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of SIMD FP assists due to input values",
+ "BriefDescription": "Any input SSE* FP Assist",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles with any input/output SSE* or FP assists.",
+ "PEBS": "1",
+ "PublicDescription": "",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x1e",
"EventName": "FP_ASSIST.ANY",
"SampleAfterValue": "100003",
- "BriefDescription": "Cycles with any input/output SSE or FP assist",
+ "BriefDescription": "Counts any FP_ASSIST umask was incrementing",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json b/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json
index 5ab5c78fe580..21b27488b621 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json
@@ -1,158 +1,322 @@
[
{
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Frontend_Bound"
+ },
+ {
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Frontend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Bad_Speculation"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Bad_Speculation_SMT"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Backend_Bound"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Backend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Retiring"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Retiring_SMT"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "BriefDescription": "Uops Per Instruction",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline",
+ "BriefDescription": "Uops Per Instruction",
+ "MetricGroup": "Pipeline;Retiring",
"MetricName": "UPI"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Instruction per taken branch",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "IpTB"
+ },
+ {
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Branch instructions per taken branch. ",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "BpTB"
+ },
+ {
"MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
- "MetricGroup": "Frontend",
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+ "MetricGroup": "PGO",
"MetricName": "IFetch_Line_Utilization"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
- "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricGroup": "DSB;Frontend_Bandwidth",
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
- "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
- "BriefDescription": "Total issue-pipeline slots",
- "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricExpr": "4 * cycles",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
- "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
+ "MetricGroup": "TopDownL1_SMT",
+ "MetricName": "SLOTS_SMT"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+ "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
+ "MetricGroup": "Instruction_Type;L1_Bound",
+ "MetricName": "IpL"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+ "BriefDescription": "Instructions per Store",
+ "MetricGroup": "Instruction_Type;Store_Bound",
+ "MetricName": "IpS"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Instructions per Branch",
+ "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+ "MetricName": "IpB"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "BriefDescription": "Instruction per (near) call",
+ "MetricGroup": "Branches",
+ "MetricName": "IpCall"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY",
+ "BriefDescription": "Total number of retired Instructions",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC_SMT"
+ },
+ {
+ "MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP"
},
{
- "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
- "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - (( 14 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION )) ) / RS_EVENTS.EMPTY_END)",
- "MetricGroup": "Unknown_Branches",
- "MetricName": "BAClear_Cost"
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+ "MetricGroup": "Branch_Mispredicts",
+ "MetricName": "IpMispredict"
},
{
+ "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
- "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
- "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricGroup": "TLB_SMT",
+ "MetricName": "Page_Walks_Utilization_SMT"
+ },
+ {
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L1MPKI"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2HPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L3MPKI"
+ },
+ {
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "BriefDescription": "Average CPU Utilization",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "DRAM_BW_Use"
+ },
+ {
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
- "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
- "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
- "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
- "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
- "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
- "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/haswell/memory.json b/tools/perf/pmu-events/arch/x86/haswell/memory.json
index e5f9fa6655b3..ef13ed88e2ea 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/memory.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/memory.json
@@ -298,7 +298,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
"SampleAfterValue": "100003",
- "BriefDescription": "Loads with latency value being above 4.",
+ "BriefDescription": "Randomly selected loads with latency value being above 4.",
"TakenAlone": "1",
"CounterHTOff": "3"
},
@@ -312,7 +312,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
"SampleAfterValue": "50021",
- "BriefDescription": "Loads with latency value being above 8.",
+ "BriefDescription": "Randomly selected loads with latency value being above 8.",
"TakenAlone": "1",
"CounterHTOff": "3"
},
@@ -326,7 +326,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
"MSRIndex": "0x3F6",
"SampleAfterValue": "20011",
- "BriefDescription": "Loads with latency value being above 16.",
+ "BriefDescription": "Randomly selected loads with latency value being above 16.",
"TakenAlone": "1",
"CounterHTOff": "3"
},
@@ -340,7 +340,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
"SampleAfterValue": "100003",
- "BriefDescription": "Loads with latency value being above 32.",
+ "BriefDescription": "Randomly selected loads with latency value being above 32.",
"TakenAlone": "1",
"CounterHTOff": "3"
},
@@ -354,7 +354,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
"SampleAfterValue": "2003",
- "BriefDescription": "Loads with latency value being above 64.",
+ "BriefDescription": "Randomly selected loads with latency value being above 64.",
"TakenAlone": "1",
"CounterHTOff": "3"
},
@@ -368,7 +368,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
"MSRIndex": "0x3F6",
"SampleAfterValue": "1009",
- "BriefDescription": "Loads with latency value being above 128.",
+ "BriefDescription": "Randomly selected loads with latency value being above 128.",
"TakenAlone": "1",
"CounterHTOff": "3"
},
@@ -382,7 +382,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
"MSRIndex": "0x3F6",
"SampleAfterValue": "503",
- "BriefDescription": "Loads with latency value being above 256.",
+ "BriefDescription": "Randomly selected loads with latency value being above 256.",
"TakenAlone": "1",
"CounterHTOff": "3"
},
@@ -396,280 +396,280 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
"SampleAfterValue": "101",
- "BriefDescription": "Loads with latency value being above 512.",
+ "BriefDescription": "Randomly selected loads with latency value being above 512.",
"TakenAlone": "1",
"CounterHTOff": "3"
},
{
- "PublicDescription": "Counts all requests that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all requests miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc08fff",
+ "MSRValue": "0x3FFFC08FFF",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all requests that miss in the L3",
+ "BriefDescription": "Counts all requests miss in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "miss the L3 and the data is returned from local dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01004007f7",
+ "MSRValue": "0x01004007F7",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram",
+ "BriefDescription": "miss the L3 and the data is returned from local dram",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc007f7",
+ "MSRValue": "0x3FFFC007F7",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3",
+ "BriefDescription": "miss in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400244",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.L3_MISS.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch code reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc00244",
+ "MSRValue": "0x3FFFC00244",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch code reads that miss in the L3",
+ "BriefDescription": "Counts all demand & prefetch code reads miss in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc00122",
+ "MSRValue": "0x3FFFC00122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs that miss in the L3",
+ "BriefDescription": "Counts all demand & prefetch RFOs miss in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc00091",
+ "MSRValue": "0x3FFFC00091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads that miss in the L3",
+ "BriefDescription": "Counts all demand & prefetch data reads miss in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc00200",
+ "MSRValue": "0x3FFFC00200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc00100",
+ "MSRValue": "0x3FFFC00100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc00080",
+ "MSRValue": "0x3FFFC00080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads miss in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc00040",
+ "MSRValue": "0x3FFFC00040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the L3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads miss in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc00020",
+ "MSRValue": "0x3FFFC00020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs miss in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc00010",
+ "MSRValue": "0x3FFFC00010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads miss in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads miss the L3 and the data is returned from local dram",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand code reads miss the L3 and the data is returned from local dram",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc00004",
+ "MSRValue": "0x3FFFC00004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that miss in the L3",
+ "BriefDescription": "Counts all demand code reads miss in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) miss the L3 and the data is returned from local dram",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand data writes (RFOs) miss the L3 and the data is returned from local dram",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc00002",
+ "MSRValue": "0x3FFFC00002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that miss in the L3",
+ "BriefDescription": "Counts all demand data writes (RFOs) miss in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads miss the L3 and the data is returned from local dram",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts demand data reads miss the L3 and the data is returned from local dram",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts demand data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc00001",
+ "MSRValue": "0x3FFFC00001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that miss in the L3",
+ "BriefDescription": "Counts demand data reads miss in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/haswell/pipeline.json b/tools/perf/pmu-events/arch/x86/haswell/pipeline.json
index a4dcfce4a512..734d3873729e 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/pipeline.json
@@ -1,7 +1,6 @@
[
{
"PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. INST_RETIRED.ANY is counted by a designated fixed counter, leaving the programmable counters available for other events. Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
- "EventCode": "0x00",
"Counter": "Fixed counter 0",
"UMask": "0x1",
"Errata": "HSD140, HSD143",
@@ -12,7 +11,6 @@
},
{
"PublicDescription": "This event counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
- "EventCode": "0x00",
"Counter": "Fixed counter 1",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
@@ -21,7 +19,6 @@
"CounterHTOff": "Fixed counter 1"
},
{
- "EventCode": "0x00",
"Counter": "Fixed counter 1",
"UMask": "0x2",
"AnyThread": "1",
@@ -32,7 +29,6 @@
},
{
"PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state.",
- "EventCode": "0x00",
"Counter": "Fixed counter 2",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
@@ -1071,7 +1067,8 @@
"CounterHTOff": "1"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
"EventCode": "0xC0",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -1081,13 +1078,13 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of microcode assists invoked by HW upon uop writeback.",
+ "PEBS": "1",
+ "PublicDescription": "",
"EventCode": "0xC1",
"Counter": "0,1,2,3",
"UMask": "0x40",
"EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -1102,28 +1099,34 @@
"Data_LA": "1"
},
{
+ "PEBS": "1",
+ "PublicDescription": "",
"EventCode": "0xC2",
"Invert": "1",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles without actually retired uops.",
+ "BriefDescription": "Cycles no executable uops retired",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3"
},
{
+ "PEBS": "1",
+ "PublicDescription": "",
"EventCode": "0xC2",
"Invert": "1",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "BriefDescription": "Number of cycles using always true condition applied to PEBS uops retired event.",
"CounterMask": "10",
"CounterHTOff": "0,1,2,3"
},
{
+ "PEBS": "1",
+ "PublicDescription": "",
"EventCode": "0xC2",
"Invert": "1",
"Counter": "0,1,2,3",
@@ -1131,7 +1134,7 @@
"AnyThread": "1",
"EventName": "UOPS_RETIRED.CORE_STALL_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles without actually retired uops.",
+ "BriefDescription": "Cycles no executable uops retired on core",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3"
},
@@ -1245,13 +1248,14 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts the number of not taken branch instructions retired.",
+ "PEBS": "1",
+ "PublicDescription": "",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x10",
"EventName": "BR_INST_RETIRED.NOT_TAKEN",
"SampleAfterValue": "400009",
- "BriefDescription": "Not taken branch instructions retired.",
+ "BriefDescription": "Counts all not taken macro branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -1265,13 +1269,14 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of far branches retired.",
+ "PEBS": "1",
+ "PublicDescription": "",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x40",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"SampleAfterValue": "100003",
- "BriefDescription": "Far branch instructions retired.",
+ "BriefDescription": "Counts the number of far branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/cache.json b/tools/perf/pmu-events/arch/x86/haswellx/cache.json
index b2fbd617306a..a9e62d4357af 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/cache.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/cache.json
@@ -64,18 +64,18 @@
},
{
"EventCode": "0x24",
- "UMask": "0x41",
+ "UMask": "0xc1",
"BriefDescription": "Demand Data Read requests that hit L2 cache",
"Counter": "0,1,2,3",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"Errata": "HSD78",
- "PublicDescription": "Demand data read requests that hit L2 cache.",
+ "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache",
"SampleAfterValue": "200003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x24",
- "UMask": "0x42",
+ "UMask": "0xc2",
"BriefDescription": "RFO requests that hit L2 cache",
"Counter": "0,1,2,3",
"EventName": "L2_RQSTS.RFO_HIT",
@@ -85,7 +85,7 @@
},
{
"EventCode": "0x24",
- "UMask": "0x44",
+ "UMask": "0xc4",
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
"Counter": "0,1,2,3",
"EventName": "L2_RQSTS.CODE_RD_HIT",
@@ -95,7 +95,7 @@
},
{
"EventCode": "0x24",
- "UMask": "0x50",
+ "UMask": "0xd0",
"BriefDescription": "L2 prefetch requests that hit L2 cache",
"Counter": "0,1,2,3",
"EventName": "L2_RQSTS.L2_PF_HIT",
@@ -416,7 +416,7 @@
{
"EventCode": "0xD0",
"UMask": "0x11",
- "BriefDescription": "Retired load uops that miss the STLB. (precise Event)",
+ "BriefDescription": "Retired load uops that miss the STLB.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -428,7 +428,7 @@
{
"EventCode": "0xD0",
"UMask": "0x12",
- "BriefDescription": "Retired store uops that miss the STLB. (precise Event)",
+ "BriefDescription": "Retired store uops that miss the STLB.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -441,7 +441,7 @@
{
"EventCode": "0xD0",
"UMask": "0x21",
- "BriefDescription": "Retired load uops with locked access. (precise Event)",
+ "BriefDescription": "Retired load uops with locked access.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -453,34 +453,32 @@
{
"EventCode": "0xD0",
"UMask": "0x41",
- "BriefDescription": "Retired load uops that split across a cacheline boundary. (precise Event)",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"Errata": "HSD29, HSM30",
- "PublicDescription": "This event counts load uops retired which had memory addresses spilt across 2 cache lines. A line split is across 64B cache-lines which may include a page split (4K). This is a precise event.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x42",
- "BriefDescription": "Retired store uops that split across a cacheline boundary. (precise Event)",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"Errata": "HSD29, HSM30",
"L1_Hit_Indication": "1",
- "PublicDescription": "This event counts store uops retired which had memory addresses spilt across 2 cache lines. A line split is across 64B cache-lines which may include a page split (4K). This is a precise event.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x81",
- "BriefDescription": "All retired load uops. (precise Event)",
+ "BriefDescription": "All retired load uops.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -492,14 +490,13 @@
{
"EventCode": "0xD0",
"UMask": "0x82",
- "BriefDescription": "All retired store uops. (precise Event)",
+ "BriefDescription": "All retired store uops.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"Errata": "HSD29, HSM30",
"L1_Hit_Indication": "1",
- "PublicDescription": "This event counts all store uops retired. This is a precise event.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -530,13 +527,13 @@
{
"EventCode": "0xD1",
"UMask": "0x4",
- "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
"Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
- "PublicDescription": "This event counts retired load uops in which data sources were data hits in the L3 cache without snoops required. This does not include hardware prefetches. This is a precise event.",
+ "PublicDescription": "Retired load uops with L3 cache hits as data sources.",
"SampleAfterValue": "50021",
"CounterHTOff": "0,1,2,3"
},
@@ -549,19 +546,20 @@
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
"Errata": "HSM30",
- "PublicDescription": "This event counts retired load uops in which data sources missed in the L1 cache. This does not include hardware prefetches. This is a precise event.",
+ "PublicDescription": "Retired load uops missed L1 cache as data sources.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x10",
- "BriefDescription": "Retired load uops with L2 cache misses as data sources.",
+ "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
"Errata": "HSD29, HSM30",
+ "PublicDescription": "Retired load uops missed L2. Unknown data source excluded.",
"SampleAfterValue": "50021",
"CounterHTOff": "0,1,2,3"
},
@@ -574,6 +572,7 @@
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_MISS",
"Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
+ "PublicDescription": "Retired load uops missed L3. Excludes unknown data source .",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -604,26 +603,24 @@
{
"EventCode": "0xD2",
"UMask": "0x2",
- "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache. ",
+ "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
"Errata": "HSD29, HSD25, HSM26, HSM30",
- "PublicDescription": "This event counts retired load uops that hit in the L3 cache, but required a cross-core snoop which resulted in a HIT in an on-pkg core cache. This does not include hardware prefetches. This is a precise event.",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x4",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3. ",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
"Errata": "HSD29, HSD25, HSM26, HSM30",
- "PublicDescription": "This event counts retired load uops that hit in the L3 cache, but required a cross-core snoop which resulted in a HITM (hit modified) in an on-pkg core cache. This does not include hardware prefetches. This is a precise event.",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
@@ -642,19 +639,20 @@
{
"EventCode": "0xD3",
"UMask": "0x1",
+ "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
"Errata": "HSD74, HSD29, HSD25, HSM30",
- "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches. This is a precise event.",
+ "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD3",
"UMask": "0x4",
- "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI) (Precise Event)",
+ "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -666,7 +664,7 @@
{
"EventCode": "0xD3",
"UMask": "0x10",
- "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM (Precise Event)",
+ "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -678,7 +676,7 @@
{
"EventCode": "0xD3",
"UMask": "0x20",
- "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache (Precise Event)",
+ "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -833,7 +831,6 @@
"BriefDescription": "Split locks in SQ",
"Counter": "0,1,2,3",
"EventName": "SQ_MISC.SPLIT_LOCK",
- "PublicDescription": "",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -841,12 +838,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003c0001",
+ "BriefDescription": "Counts demand data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003C0001",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -854,12 +851,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "MSRValue": "0x10003c0001",
+ "BriefDescription": "Counts demand data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003C0001",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -867,12 +864,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003c0002",
+ "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003C0002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -880,12 +877,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "MSRValue": "0x10003c0002",
+ "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003C0002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -893,12 +890,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003c0004",
+ "BriefDescription": "Counts all demand code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003C0004",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -906,12 +903,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "MSRValue": "0x10003c0004",
+ "BriefDescription": "Counts all demand code reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003C0004",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -919,12 +916,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3",
- "MSRValue": "0x3f803c0010",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads hit in the L3",
+ "MSRValue": "0x3F803C0010",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads hit in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -932,12 +929,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3",
- "MSRValue": "0x3f803c0020",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs hit in the L3",
+ "MSRValue": "0x3F803C0020",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs hit in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -945,12 +942,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3",
- "MSRValue": "0x3f803c0040",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads hit in the L3",
+ "MSRValue": "0x3F803C0040",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads hit in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -958,12 +955,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3",
- "MSRValue": "0x3f803c0080",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads hit in the L3",
+ "MSRValue": "0x3F803C0080",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads hit in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -971,12 +968,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3",
- "MSRValue": "0x3f803c0100",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
+ "MSRValue": "0x3F803C0100",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -984,12 +981,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3",
- "MSRValue": "0x3f803c0200",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
+ "MSRValue": "0x3F803C0200",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -997,12 +994,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003c0091",
+ "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003C0091",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1010,12 +1007,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "MSRValue": "0x10003c0091",
+ "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003C0091",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1023,12 +1020,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003c0122",
+ "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003C0122",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1036,12 +1033,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "MSRValue": "0x10003c0122",
+ "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003C0122",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1049,12 +1046,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003c0244",
+ "BriefDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003C0244",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1062,12 +1059,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003c07f7",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003C07F7",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1075,12 +1072,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "MSRValue": "0x10003c07f7",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003C07F7",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1088,12 +1085,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all requests that hit in the L3",
- "MSRValue": "0x3f803c8fff",
+ "BriefDescription": "Counts all requests hit in the L3",
+ "MSRValue": "0x3F803C8FFF",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all requests that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all requests hit in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json b/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json
index 5ab5c78fe580..e5aac148c941 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json
@@ -1,158 +1,340 @@
[
{
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Frontend_Bound"
+ },
+ {
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Frontend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Bad_Speculation"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Bad_Speculation_SMT"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Backend_Bound"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Backend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Retiring"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Retiring_SMT"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "BriefDescription": "Uops Per Instruction",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline",
+ "BriefDescription": "Uops Per Instruction",
+ "MetricGroup": "Pipeline;Retiring",
"MetricName": "UPI"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Instruction per taken branch",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "IpTB"
+ },
+ {
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Branch instructions per taken branch. ",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "BpTB"
+ },
+ {
"MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
- "MetricGroup": "Frontend",
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+ "MetricGroup": "PGO",
"MetricName": "IFetch_Line_Utilization"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
- "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricGroup": "DSB;Frontend_Bandwidth",
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
- "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
- "BriefDescription": "Total issue-pipeline slots",
- "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricExpr": "4 * cycles",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
- "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
+ "MetricGroup": "TopDownL1_SMT",
+ "MetricName": "SLOTS_SMT"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+ "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
+ "MetricGroup": "Instruction_Type;L1_Bound",
+ "MetricName": "IpL"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+ "BriefDescription": "Instructions per Store",
+ "MetricGroup": "Instruction_Type;Store_Bound",
+ "MetricName": "IpS"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Instructions per Branch",
+ "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+ "MetricName": "IpB"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "BriefDescription": "Instruction per (near) call",
+ "MetricGroup": "Branches",
+ "MetricName": "IpCall"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY",
+ "BriefDescription": "Total number of retired Instructions",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC_SMT"
+ },
+ {
+ "MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP"
},
{
- "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
- "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - (( 14 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION )) ) / RS_EVENTS.EMPTY_END)",
- "MetricGroup": "Unknown_Branches",
- "MetricName": "BAClear_Cost"
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+ "MetricGroup": "Branch_Mispredicts",
+ "MetricName": "IpMispredict"
},
{
+ "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
- "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
- "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricGroup": "TLB_SMT",
+ "MetricName": "Page_Walks_Utilization_SMT"
+ },
+ {
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L1MPKI"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2HPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L3MPKI"
+ },
+ {
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "BriefDescription": "Average CPU Utilization",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "DRAM_BW_Use"
+ },
+ {
+ "MetricExpr": "1000000000 * ( cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x35\\,umask\\=0x3\\,filter_opc\\=0x182@ ) / ( cbox_0@event\\=0x0@ / duration_time )",
+ "BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
+ "MetricGroup": "Memory_Lat",
+ "MetricName": "DRAM_Read_Latency"
+ },
+ {
+ "MetricExpr": "cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182\\,thresh\\=1@",
+ "BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "DRAM_Parallel_Reads"
+ },
+ {
+ "MetricExpr": "cbox_0@event\\=0x0@",
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricGroup": "",
+ "MetricName": "Socket_CLKS"
+ },
+ {
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
- "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
- "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
- "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
- "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
- "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
- "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/memory.json b/tools/perf/pmu-events/arch/x86/haswellx/memory.json
index 56b0f24b8029..a42d5ce86b6f 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/memory.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/memory.json
@@ -291,7 +291,7 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 4.",
+ "BriefDescription": "Randomly selected loads with latency value being above 4.",
"PEBS": "2",
"MSRValue": "0x4",
"Counter": "3",
@@ -305,7 +305,7 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 8.",
+ "BriefDescription": "Randomly selected loads with latency value being above 8.",
"PEBS": "2",
"MSRValue": "0x8",
"Counter": "3",
@@ -319,7 +319,7 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 16.",
+ "BriefDescription": "Randomly selected loads with latency value being above 16.",
"PEBS": "2",
"MSRValue": "0x10",
"Counter": "3",
@@ -333,7 +333,7 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 32.",
+ "BriefDescription": "Randomly selected loads with latency value being above 32.",
"PEBS": "2",
"MSRValue": "0x20",
"Counter": "3",
@@ -347,7 +347,7 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 64.",
+ "BriefDescription": "Randomly selected loads with latency value being above 64.",
"PEBS": "2",
"MSRValue": "0x40",
"Counter": "3",
@@ -361,7 +361,7 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 128.",
+ "BriefDescription": "Randomly selected loads with latency value being above 128.",
"PEBS": "2",
"MSRValue": "0x80",
"Counter": "3",
@@ -375,7 +375,7 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 256.",
+ "BriefDescription": "Randomly selected loads with latency value being above 256.",
"PEBS": "2",
"MSRValue": "0x100",
"Counter": "3",
@@ -389,7 +389,7 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 512.",
+ "BriefDescription": "Randomly selected loads with latency value being above 512.",
"PEBS": "2",
"MSRValue": "0x200",
"Counter": "3",
@@ -404,12 +404,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts demand data reads that miss in the L3",
- "MSRValue": "0x3fbfc00001",
+ "BriefDescription": "Counts demand data reads miss in the L3",
+ "MSRValue": "0x3FBFC00001",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -417,12 +417,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts demand data reads miss the L3 and the data is returned from local dram",
"MSRValue": "0x0600400001",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -430,12 +430,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that miss in the L3",
- "MSRValue": "0x3fbfc00002",
+ "BriefDescription": "Counts all demand data writes (RFOs) miss in the L3",
+ "MSRValue": "0x3FBFC00002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -443,12 +443,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand data writes (RFOs) miss the L3 and the data is returned from local dram",
"MSRValue": "0x0600400002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -456,12 +456,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache",
- "MSRValue": "0x103fc00002",
+ "BriefDescription": "Counts all demand data writes (RFOs) miss the L3 and the modified data is transferred from remote cache",
+ "MSRValue": "0x103FC00002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) miss the L3 and the modified data is transferred from remote cache",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -469,12 +469,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand code reads that miss in the L3",
- "MSRValue": "0x3fbfc00004",
+ "BriefDescription": "Counts all demand code reads miss in the L3",
+ "MSRValue": "0x3FBFC00004",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -482,12 +482,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand code reads miss the L3 and the data is returned from local dram",
"MSRValue": "0x0600400004",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -495,12 +495,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3",
- "MSRValue": "0x3fbfc00010",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads miss in the L3",
+ "MSRValue": "0x3FBFC00010",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -508,12 +508,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3",
- "MSRValue": "0x3fbfc00020",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs miss in the L3",
+ "MSRValue": "0x3FBFC00020",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -521,12 +521,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the L3",
- "MSRValue": "0x3fbfc00040",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads miss in the L3",
+ "MSRValue": "0x3FBFC00040",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -534,12 +534,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3",
- "MSRValue": "0x3fbfc00080",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads miss in the L3",
+ "MSRValue": "0x3FBFC00080",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -547,12 +547,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3",
- "MSRValue": "0x3fbfc00100",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
+ "MSRValue": "0x3FBFC00100",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -560,12 +560,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3",
- "MSRValue": "0x3fbfc00200",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
+ "MSRValue": "0x3FBFC00200",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -573,12 +573,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that miss in the L3",
- "MSRValue": "0x3fbfc00091",
+ "BriefDescription": "Counts all demand & prefetch data reads miss in the L3",
+ "MSRValue": "0x3FBFC00091",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -586,12 +586,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
"MSRValue": "0x0600400091",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -599,12 +599,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram",
- "MSRValue": "0x063f800091",
+ "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from remote dram",
+ "MSRValue": "0x063F800091",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from remote dram",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -612,12 +612,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache",
- "MSRValue": "0x103fc00091",
+ "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the modified data is transferred from remote cache",
+ "MSRValue": "0x103FC00091",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the modified data is transferred from remote cache",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -625,12 +625,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache",
- "MSRValue": "0x083fc00091",
+ "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and clean or shared data is transferred from remote cache",
+ "MSRValue": "0x083FC00091",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and clean or shared data is transferred from remote cache",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -638,12 +638,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that miss in the L3",
- "MSRValue": "0x3fbfc00122",
+ "BriefDescription": "Counts all demand & prefetch RFOs miss in the L3",
+ "MSRValue": "0x3FBFC00122",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -651,12 +651,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
"MSRValue": "0x0600400122",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -664,12 +664,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch code reads that miss in the L3",
- "MSRValue": "0x3fbfc00244",
+ "BriefDescription": "Counts all demand & prefetch code reads miss in the L3",
+ "MSRValue": "0x3FBFC00244",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch code reads miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -677,12 +677,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
"MSRValue": "0x0600400244",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -690,12 +690,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3",
- "MSRValue": "0x3fbfc007f7",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss in the L3",
+ "MSRValue": "0x3FBFC007F7",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -703,12 +703,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram",
- "MSRValue": "0x06004007f7",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from local dram",
+ "MSRValue": "0x06004007F7",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -716,12 +716,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from remote dram",
- "MSRValue": "0x063f8007f7",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from remote dram",
+ "MSRValue": "0x063F8007F7",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from remote dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from remote dram",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -729,12 +729,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the modified data is transferred from remote cache",
- "MSRValue": "0x103fc007f7",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the modified data is transferred from remote cache",
+ "MSRValue": "0x103FC007F7",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the modified data is transferred from remote cache",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -742,12 +742,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and clean or shared data is transferred from remote cache",
- "MSRValue": "0x083fc007f7",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and clean or shared data is transferred from remote cache",
+ "MSRValue": "0x083FC007F7",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and clean or shared data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and clean or shared data is transferred from remote cache",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -755,12 +755,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all requests that miss in the L3",
- "MSRValue": "0x3fbfc08fff",
+ "BriefDescription": "Counts all requests miss in the L3",
+ "MSRValue": "0x3FBFC08FFF",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all requests that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all requests miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json b/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json
index 8a18bfe9e3e4..26f2888341ee 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json
@@ -1,6 +1,5 @@
[
{
- "EventCode": "0x00",
"UMask": "0x1",
"BriefDescription": "Instructions retired from execution.",
"Counter": "Fixed counter 0",
@@ -11,7 +10,6 @@
"CounterHTOff": "Fixed counter 0"
},
{
- "EventCode": "0x00",
"UMask": "0x2",
"BriefDescription": "Core cycles when the thread is not in halt state.",
"Counter": "Fixed counter 1",
@@ -21,7 +19,6 @@
"CounterHTOff": "Fixed counter 1"
},
{
- "EventCode": "0x00",
"UMask": "0x2",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
"Counter": "Fixed counter 1",
@@ -31,7 +28,6 @@
"CounterHTOff": "Fixed counter 1"
},
{
- "EventCode": "0x00",
"UMask": "0x3",
"BriefDescription": "Reference cycles when the core is not in halt state.",
"Counter": "Fixed counter 2",
@@ -1098,6 +1094,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "UOPS_RETIRED.ALL",
+ "PublicDescription": "Counts the number of micro-ops retired. Use Cmask=1 and invert to count active cycles or stalled cycles.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -1142,6 +1139,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "PublicDescription": "This event counts the number of retirement slots used each cycle. There are potentially 4 slots that can be used each cycle - meaning, 4 uops or 4 instructions could retire each cycle.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -1201,6 +1199,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "PublicDescription": "Counts the number of conditional branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -1241,6 +1240,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PublicDescription": "Counts the number of near return instructions retired.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -1261,6 +1261,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PublicDescription": "Number of near taken branches retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -1312,6 +1313,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PublicDescription": "Number of near branch instructions retired that were taken but mispredicted.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/cache.json b/tools/perf/pmu-events/arch/x86/ivybridge/cache.json
index 999a01bc6467..5f6cb2abc384 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/cache.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/cache.json
@@ -1012,7 +1012,7 @@
"EventName": "OFFCORE_RESPONSE.SPLIT_LOCK_UC_LOCK.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts requests where the address of an atomic lock instruction spans a cache line boundary or the lock instruction is executed on uncacheable address ",
+ "BriefDescription": "Counts requests where the address of an atomic lock instruction spans a cache line boundary or the lock instruction is executed on uncacheable address",
"CounterHTOff": "0,1,2,3"
},
{
@@ -1036,7 +1036,7 @@
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data reads ",
+ "BriefDescription": "Counts all demand data reads",
"CounterHTOff": "0,1,2,3"
},
{
@@ -1048,7 +1048,7 @@
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand rfo's ",
+ "BriefDescription": "Counts all demand rfo's",
"CounterHTOff": "0,1,2,3"
},
{
@@ -1084,7 +1084,7 @@
"EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch prefetch RFOs ",
+ "BriefDescription": "Counts all demand & prefetch prefetch RFOs",
"CounterHTOff": "0,1,2,3"
},
{
@@ -1096,7 +1096,7 @@
"EventName": "OFFCORE_RESPONSE.ALL_READS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo references (demand & prefetch) ",
+ "BriefDescription": "Counts all data/code/rfo references (demand & prefetch)",
"CounterHTOff": "0,1,2,3"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json b/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json
index 7c2679514efb..bc4d5fc284a0 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json
@@ -1,164 +1,340 @@
[
{
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Frontend_Bound"
+ },
+ {
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Frontend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Bad_Speculation"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Bad_Speculation_SMT"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Backend_Bound"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Backend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Retiring"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Retiring_SMT"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "BriefDescription": "Uops Per Instruction",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline",
+ "BriefDescription": "Uops Per Instruction",
+ "MetricGroup": "Pipeline;Retiring",
"MetricName": "UPI"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
- "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )",
- "MetricGroup": "Frontend",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Instruction per taken branch",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "IpTB"
+ },
+ {
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Branch instructions per taken branch. ",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "BpTB"
+ },
+ {
+ "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+ "MetricGroup": "PGO",
"MetricName": "IFetch_Line_Utilization"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
- "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricGroup": "DSB;Frontend_Bandwidth",
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
- "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
- "BriefDescription": "Total issue-pipeline slots",
- "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricExpr": "4 * cycles",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
- "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
+ "MetricGroup": "TopDownL1_SMT",
+ "MetricName": "SLOTS_SMT"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+ "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
+ "MetricGroup": "Instruction_Type;L1_Bound",
+ "MetricName": "IpL"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+ "BriefDescription": "Instructions per Store",
+ "MetricGroup": "Instruction_Type;Store_Bound",
+ "MetricName": "IpS"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Instructions per Branch",
+ "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+ "MetricName": "IpB"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "BriefDescription": "Instruction per (near) call",
+ "MetricGroup": "Branches",
+ "MetricName": "IpCall"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY",
+ "BriefDescription": "Total number of retired Instructions",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC_SMT"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS",
+ "MetricName": "FLOPc"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS_SMT",
+ "MetricName": "FLOPc_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP"
},
{
- "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
- "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFETCH_STALL ) / RS_EVENTS.EMPTY_END)",
- "MetricGroup": "Unknown_Branches",
- "MetricName": "BAClear_Cost"
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+ "MetricGroup": "Branch_Mispredicts",
+ "MetricName": "IpMispredict"
},
{
+ "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
- "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
- "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricGroup": "TLB_SMT",
+ "MetricName": "Page_Walks_Utilization_SMT"
+ },
+ {
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L1MPKI"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2HPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.LLC_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L3MPKI"
+ },
+ {
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "BriefDescription": "Average CPU Utilization",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
+ "MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "(( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "DRAM_BW_Use"
+ },
+ {
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
- "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
- "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
- "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
- "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
- "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
- "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json b/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json
index 0afbfd95ea30..2a0aad91d83d 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json
@@ -1,6 +1,5 @@
[
{
- "EventCode": "0x00",
"Counter": "Fixed counter 0",
"UMask": "0x1",
"EventName": "INST_RETIRED.ANY",
@@ -9,7 +8,6 @@
"CounterHTOff": "Fixed counter 0"
},
{
- "EventCode": "0x00",
"Counter": "Fixed counter 1",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
@@ -19,7 +17,6 @@
},
{
"PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "EventCode": "0x00",
"Counter": "Fixed counter 1",
"UMask": "0x2",
"AnyThread": "1",
@@ -29,7 +26,6 @@
"CounterHTOff": "Fixed counter 1"
},
{
- "EventCode": "0x00",
"Counter": "Fixed counter 2",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json b/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json
index 7c2679514efb..f3874b5f9995 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json
@@ -1,164 +1,346 @@
[
{
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Frontend_Bound"
+ },
+ {
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Frontend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Bad_Speculation"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Bad_Speculation_SMT"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Backend_Bound"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Backend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Retiring"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Retiring_SMT"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "BriefDescription": "Uops Per Instruction",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline",
+ "BriefDescription": "Uops Per Instruction",
+ "MetricGroup": "Pipeline;Retiring",
"MetricName": "UPI"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
- "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )",
- "MetricGroup": "Frontend",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Instruction per taken branch",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "IpTB"
+ },
+ {
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Branch instructions per taken branch. ",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "BpTB"
+ },
+ {
+ "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+ "MetricGroup": "PGO",
"MetricName": "IFetch_Line_Utilization"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
- "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricGroup": "DSB;Frontend_Bandwidth",
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
- "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
- "BriefDescription": "Total issue-pipeline slots",
- "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricExpr": "4 * cycles",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
- "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
+ "MetricGroup": "TopDownL1_SMT",
+ "MetricName": "SLOTS_SMT"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+ "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
+ "MetricGroup": "Instruction_Type;L1_Bound",
+ "MetricName": "IpL"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+ "BriefDescription": "Instructions per Store",
+ "MetricGroup": "Instruction_Type;Store_Bound",
+ "MetricName": "IpS"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Instructions per Branch",
+ "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+ "MetricName": "IpB"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "BriefDescription": "Instruction per (near) call",
+ "MetricGroup": "Branches",
+ "MetricName": "IpCall"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY",
+ "BriefDescription": "Total number of retired Instructions",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC_SMT"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS",
+ "MetricName": "FLOPc"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS_SMT",
+ "MetricName": "FLOPc_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP"
},
{
- "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
- "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFETCH_STALL ) / RS_EVENTS.EMPTY_END)",
- "MetricGroup": "Unknown_Branches",
- "MetricName": "BAClear_Cost"
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+ "MetricGroup": "Branch_Mispredicts",
+ "MetricName": "IpMispredict"
},
{
+ "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
- "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
- "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricGroup": "TLB_SMT",
+ "MetricName": "Page_Walks_Utilization_SMT"
+ },
+ {
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L1MPKI"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2HPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.LLC_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L3MPKI"
+ },
+ {
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "BriefDescription": "Average CPU Utilization",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
+ "MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "(( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "DRAM_BW_Use"
+ },
+ {
+ "MetricExpr": "cbox_0@event\\=0x0@",
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricGroup": "",
+ "MetricName": "Socket_CLKS"
+ },
+ {
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
- "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
- "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
- "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
- "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
- "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
- "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json b/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json
index 0afbfd95ea30..2a0aad91d83d 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json
@@ -1,6 +1,5 @@
[
{
- "EventCode": "0x00",
"Counter": "Fixed counter 0",
"UMask": "0x1",
"EventName": "INST_RETIRED.ANY",
@@ -9,7 +8,6 @@
"CounterHTOff": "Fixed counter 0"
},
{
- "EventCode": "0x00",
"Counter": "Fixed counter 1",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
@@ -19,7 +17,6 @@
},
{
"PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "EventCode": "0x00",
"Counter": "Fixed counter 1",
"UMask": "0x2",
"AnyThread": "1",
@@ -29,7 +26,6 @@
"CounterHTOff": "Fixed counter 1"
},
{
- "EventCode": "0x00",
"Counter": "Fixed counter 2",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/cache.json b/tools/perf/pmu-events/arch/x86/jaketown/cache.json
index ee22e4a5e30d..52dc6ef40e63 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/cache.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/cache.json
@@ -31,7 +31,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "This event counts line-split load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This event counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x41",
@@ -42,7 +42,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "This event counts line-split store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This event counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x42",
@@ -179,7 +179,7 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "This event counts L1D data line replacements. Replacements occur when a new line is brought into the cache, causing eviction of a line loaded earlier. ",
+ "PublicDescription": "This event counts L1D data line replacements. Replacements occur when a new line is brought into the cache, causing eviction of a line loaded earlier.",
"EventCode": "0x51",
"Counter": "0,1,2,3",
"UMask": "0x1",
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json b/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
index fd7d7c438226..98c73e430b05 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
@@ -1,140 +1,232 @@
[
{
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Frontend_Bound"
+ },
+ {
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Frontend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Bad_Speculation"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Bad_Speculation_SMT"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Backend_Bound"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Backend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Retiring"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Retiring_SMT"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "BriefDescription": "Uops Per Instruction",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline",
+ "BriefDescription": "Uops Per Instruction",
+ "MetricGroup": "Pipeline;Retiring",
"MetricName": "UPI"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
- "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )",
- "MetricGroup": "Frontend",
+ "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+ "MetricGroup": "PGO",
"MetricName": "IFetch_Line_Utilization"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
- "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricGroup": "DSB;Frontend_Bandwidth",
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
- "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
- "BriefDescription": "Total issue-pipeline slots",
- "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricExpr": "4 * cycles",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
- "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
+ "MetricGroup": "TopDownL1_SMT",
+ "MetricName": "SLOTS_SMT"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY",
+ "BriefDescription": "Total number of retired Instructions",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC_SMT"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS",
+ "MetricName": "FLOPc"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS_SMT",
+ "MetricName": "FLOPc_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP"
},
{
+ "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
- "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "BriefDescription": "Average CPU Utilization",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "BriefDescription": "Average CPU Utilization",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
+ "MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "(( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "DRAM_BW_Use"
+ },
+ {
+ "MetricExpr": "cbox_0@event\\=0x0@",
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricGroup": "",
+ "MetricName": "Socket_CLKS"
+ },
+ {
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
- "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
- "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
- "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
- "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
- "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
- "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json b/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json
index 34a519d9bfa0..783a5b4a67b1 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json
@@ -1,7 +1,6 @@
[
{
- "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. ",
- "EventCode": "0x00",
+ "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers.",
"Counter": "Fixed counter 1",
"UMask": "0x1",
"EventName": "INST_RETIRED.ANY",
@@ -10,8 +9,7 @@
"CounterHTOff": "Fixed counter 1"
},
{
- "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. ",
- "EventCode": "0x00",
+ "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"Counter": "Fixed counter 2",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
@@ -20,8 +18,7 @@
"CounterHTOff": "Fixed counter 2"
},
{
- "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. ",
- "EventCode": "0x00",
+ "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"Counter": "Fixed counter 3",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
@@ -778,7 +775,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceding smaller uncompleted store. See the table of not supported store forwards in the Intel? 64 and IA-32 Architectures Optimization Reference Manual. The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.",
+ "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceeding smaller uncompleted store. See the table of not supported store forwards in the Intel? 64 and IA-32 Architectures Optimization Reference Manual. The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.",
"EventCode": "0x03",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -1098,7 +1095,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x00",
"Counter": "Fixed counter 2",
"UMask": "0x2",
"AnyThread": "1",
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/cache.json b/tools/perf/pmu-events/arch/x86/knightslanding/cache.json
index e434ec723001..e847b0fd696d 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/cache.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/cache.json
@@ -32,16 +32,16 @@
"BriefDescription": "Counts the number of L2 cache misses"
},
{
- "PublicDescription": "This event counts the number of core cycles the fetch stalls because of an icache miss. This is a cumulative count of cycles the NIP stalled for all icache misses. ",
+ "PublicDescription": "This event counts the number of core cycles the fetch stalls because of an icache miss. This is a cumulative count of cycles the NIP stalled for all icache misses.",
"EventCode": "0x86",
"Counter": "0,1",
"UMask": "0x4",
"EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of core cycles the fetch stalls because of an icache miss. This is a cummulative count of core cycles the fetch stalled for all icache misses. "
+ "BriefDescription": "Counts the number of core cycles the fetch stalls because of an icache miss. This is a cummulative count of core cycles the fetch stalled for all icache misses."
},
{
- "PublicDescription": "This event counts the number of load micro-ops retired that miss in L1 Data cache. Note that prefetch misses will not be counted. ",
+ "PublicDescription": "This event counts the number of load micro-ops retired that miss in L1 Data cache. Note that prefetch misses will not be counted.",
"EventCode": "0x04",
"Counter": "0,1",
"UMask": "0x1",
@@ -115,29 +115,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x4000000070 ",
+ "MSRValue": "0x4000000070",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts any Prefetch requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000400070 ",
+ "MSRValue": "0x1000400070",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800400070 ",
+ "MSRValue": "0x0800400070",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_FAR_TILE_E_F",
@@ -148,29 +148,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000080070 ",
+ "MSRValue": "0x1000080070",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800080070 ",
+ "MSRValue": "0x0800080070",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000010070 ",
+ "MSRValue": "0x0000010070",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.ANY_RESPONSE",
@@ -181,29 +181,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x40000032f7 ",
+ "MSRValue": "0x40000032f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts any Read request that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x10004032f7 ",
+ "MSRValue": "0x10004032f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x08004032f7 ",
+ "MSRValue": "0x08004032f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_FAR_TILE_E_F",
@@ -214,29 +214,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x10000832f7 ",
+ "MSRValue": "0x10000832f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x08000832f7 ",
+ "MSRValue": "0x08000832f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x00000132f7 ",
+ "MSRValue": "0x00000132f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.ANY_RESPONSE",
@@ -247,29 +247,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x4000000044 ",
+ "MSRValue": "0x4000000044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000400044 ",
+ "MSRValue": "0x1000400044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800400044 ",
+ "MSRValue": "0x0800400044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_FAR_TILE_E_F",
@@ -280,29 +280,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000080044 ",
+ "MSRValue": "0x1000080044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800080044 ",
+ "MSRValue": "0x0800080044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000010044 ",
+ "MSRValue": "0x0000010044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.ANY_RESPONSE",
@@ -313,29 +313,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x4000000022 ",
+ "MSRValue": "0x4000000022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts Demand cacheable data write requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000400022 ",
+ "MSRValue": "0x1000400022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800400022 ",
+ "MSRValue": "0x0800400022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_FAR_TILE_E_F",
@@ -346,29 +346,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000080022 ",
+ "MSRValue": "0x1000080022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800080022 ",
+ "MSRValue": "0x0800080022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000010022 ",
+ "MSRValue": "0x0000010022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_RESPONSE",
@@ -379,29 +379,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x4000003091 ",
+ "MSRValue": "0x4000003091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000403091 ",
+ "MSRValue": "0x1000403091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800403091 ",
+ "MSRValue": "0x0800403091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_FAR_TILE_E_F",
@@ -412,29 +412,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000083091 ",
+ "MSRValue": "0x1000083091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800083091 ",
+ "MSRValue": "0x0800083091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000013091 ",
+ "MSRValue": "0x0000013091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.ANY_RESPONSE",
@@ -445,29 +445,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x4000008000 ",
+ "MSRValue": "0x4000008000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts any request that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000408000 ",
+ "MSRValue": "0x1000408000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800408000 ",
+ "MSRValue": "0x0800408000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_FAR_TILE_E_F",
@@ -478,29 +478,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000088000 ",
+ "MSRValue": "0x1000088000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800088000 ",
+ "MSRValue": "0x0800088000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000018000 ",
+ "MSRValue": "0x0000018000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_RESPONSE",
@@ -511,7 +511,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000014800 ",
+ "MSRValue": "0x0000014800",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
@@ -522,7 +522,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000014000 ",
+ "MSRValue": "0x0000014000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.ANY_RESPONSE",
@@ -533,29 +533,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x4000002000 ",
+ "MSRValue": "0x4000002000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts L1 data HW prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000402000 ",
+ "MSRValue": "0x1000402000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800402000 ",
+ "MSRValue": "0x0800402000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_FAR_TILE_E_F",
@@ -566,29 +566,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000082000 ",
+ "MSRValue": "0x1000082000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800082000 ",
+ "MSRValue": "0x0800082000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000012000 ",
+ "MSRValue": "0x0000012000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.ANY_RESPONSE",
@@ -599,29 +599,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x4000001000 ",
+ "MSRValue": "0x4000001000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts Software Prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000401000 ",
+ "MSRValue": "0x1000401000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800401000 ",
+ "MSRValue": "0x0800401000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_FAR_TILE_E_F",
@@ -632,29 +632,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000081000 ",
+ "MSRValue": "0x1000081000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800081000 ",
+ "MSRValue": "0x0800081000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000011000 ",
+ "MSRValue": "0x0000011000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.ANY_RESPONSE",
@@ -665,7 +665,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000010800 ",
+ "MSRValue": "0x0000010800",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.ANY_RESPONSE",
@@ -676,29 +676,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x4000000400 ",
+ "MSRValue": "0x4000000400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts Bus locks and split lock requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000400400 ",
+ "MSRValue": "0x1000400400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800400400 ",
+ "MSRValue": "0x0800400400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_FAR_TILE_E_F",
@@ -709,29 +709,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000080400 ",
+ "MSRValue": "0x1000080400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800080400 ",
+ "MSRValue": "0x0800080400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000010400 ",
+ "MSRValue": "0x0000010400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.ANY_RESPONSE",
@@ -742,29 +742,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x4000000200 ",
+ "MSRValue": "0x4000000200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000400200 ",
+ "MSRValue": "0x1000400200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800400200 ",
+ "MSRValue": "0x0800400200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_FAR_TILE_E_F",
@@ -775,29 +775,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000080200 ",
+ "MSRValue": "0x1000080200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800080200 ",
+ "MSRValue": "0x0800080200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000010200 ",
+ "MSRValue": "0x0000010200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.ANY_RESPONSE",
@@ -808,18 +808,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000400100 ",
+ "MSRValue": "0x1000400100",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800400100 ",
+ "MSRValue": "0x0800400100",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_FAR_TILE_E_F",
@@ -830,29 +830,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000080100 ",
+ "MSRValue": "0x1000080100",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800080100 ",
+ "MSRValue": "0x0800080100",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000010100 ",
+ "MSRValue": "0x0000010100",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.ANY_RESPONSE",
@@ -863,29 +863,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x4000000080 ",
+ "MSRValue": "0x4000000080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000400080 ",
+ "MSRValue": "0x1000400080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800400080 ",
+ "MSRValue": "0x0800400080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_FAR_TILE_E_F",
@@ -896,29 +896,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000080080 ",
+ "MSRValue": "0x1000080080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800080080 ",
+ "MSRValue": "0x0800080080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000010080 ",
+ "MSRValue": "0x0000010080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.ANY_RESPONSE",
@@ -929,29 +929,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x4000000040 ",
+ "MSRValue": "0x4000000040",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 code HW prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts L2 code HW prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000400040 ",
+ "MSRValue": "0x1000400040",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800400040 ",
+ "MSRValue": "0x0800400040",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_FAR_TILE_E_F",
@@ -962,29 +962,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000080040 ",
+ "MSRValue": "0x1000080040",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800080040 ",
+ "MSRValue": "0x0800080040",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000010040 ",
+ "MSRValue": "0x0000010040",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.ANY_RESPONSE",
@@ -995,18 +995,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000400020 ",
+ "MSRValue": "0x1000400020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800400020 ",
+ "MSRValue": "0x0800400020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_FAR_TILE_E_F",
@@ -1017,29 +1017,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000080020 ",
+ "MSRValue": "0x1000080020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800080020 ",
+ "MSRValue": "0x0800080020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000020020 ",
+ "MSRValue": "0x0000020020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE",
@@ -1050,7 +1050,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000010020 ",
+ "MSRValue": "0x0000010020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
@@ -1061,29 +1061,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x4000000004 ",
+ "MSRValue": "0x4000000004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000400004 ",
+ "MSRValue": "0x1000400004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800400004 ",
+ "MSRValue": "0x0800400004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_FAR_TILE_E_F",
@@ -1094,29 +1094,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000080004 ",
+ "MSRValue": "0x1000080004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800080004 ",
+ "MSRValue": "0x0800080004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000010004 ",
+ "MSRValue": "0x0000010004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
@@ -1127,29 +1127,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x4000000002 ",
+ "MSRValue": "0x4000000002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts Demand cacheable data writes that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000400002 ",
+ "MSRValue": "0x1000400002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800400002 ",
+ "MSRValue": "0x0800400002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_FAR_TILE_E_F",
@@ -1160,29 +1160,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000080002 ",
+ "MSRValue": "0x1000080002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800080002 ",
+ "MSRValue": "0x0800080002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000010002 ",
+ "MSRValue": "0x0000010002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
@@ -1193,29 +1193,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x4000000001 ",
+ "MSRValue": "0x4000000001",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000400001 ",
+ "MSRValue": "0x1000400001",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800400001 ",
+ "MSRValue": "0x0800400001",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_FAR_TILE_E_F",
@@ -1226,29 +1226,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000080001 ",
+ "MSRValue": "0x1000080001",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800080001 ",
+ "MSRValue": "0x0800080001",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000010001 ",
+ "MSRValue": "0x0000010001",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
@@ -1259,722 +1259,722 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0002000001 ",
+ "MSRValue": "0x0002000001",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in M state ",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in M state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0002000002 ",
+ "MSRValue": "0x0002000002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in M state ",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in M state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0002000004 ",
+ "MSRValue": "0x0002000004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in M state ",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in M state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0002000020 ",
+ "MSRValue": "0x0002000020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in M state ",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in M state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0002000080 ",
+ "MSRValue": "0x0002000080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in M state ",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in M state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0002000100 ",
+ "MSRValue": "0x0002000100",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in M state ",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in M state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0002000200 ",
+ "MSRValue": "0x0002000200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in M state ",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in M state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0002000400 ",
+ "MSRValue": "0x0002000400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in M state ",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in M state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0002001000 ",
+ "MSRValue": "0x0002001000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in M state ",
+ "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in M state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0002002000 ",
+ "MSRValue": "0x0002002000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in M state ",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in M state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0002008000 ",
+ "MSRValue": "0x0002008000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in M state ",
+ "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in M state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0002003091 ",
+ "MSRValue": "0x0002003091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in M state ",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in M state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0002000022 ",
+ "MSRValue": "0x0002000022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in M state ",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in M state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0002000044 ",
+ "MSRValue": "0x0002000044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in M state ",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in M state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x00020032f7 ",
+ "MSRValue": "0x00020032f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in M state ",
+ "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in M state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0002000070 ",
+ "MSRValue": "0x0002000070",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in M state ",
+ "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in M state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0004000001 ",
+ "MSRValue": "0x0004000001",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0004000002 ",
+ "MSRValue": "0x0004000002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0004000004 ",
+ "MSRValue": "0x0004000004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0004000020 ",
+ "MSRValue": "0x0004000020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0004000040 ",
+ "MSRValue": "0x0004000040",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 code HW prefetches that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0004000080 ",
+ "MSRValue": "0x0004000080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0004000100 ",
+ "MSRValue": "0x0004000100",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0004000200 ",
+ "MSRValue": "0x0004000200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0004000400 ",
+ "MSRValue": "0x0004000400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0004001000 ",
+ "MSRValue": "0x0004001000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0004002000 ",
+ "MSRValue": "0x0004002000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0004008000 ",
+ "MSRValue": "0x0004008000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0004003091 ",
+ "MSRValue": "0x0004003091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0004000022 ",
+ "MSRValue": "0x0004000022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0004000044 ",
+ "MSRValue": "0x0004000044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x00040032f7 ",
+ "MSRValue": "0x00040032f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0004000070 ",
+ "MSRValue": "0x0004000070",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0008000001 ",
+ "MSRValue": "0x0008000001",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in S state ",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in S state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0008000002 ",
+ "MSRValue": "0x0008000002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in S state ",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in S state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0008000004 ",
+ "MSRValue": "0x0008000004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in S state ",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in S state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0008000020 ",
+ "MSRValue": "0x0008000020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in S state ",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in S state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0008000080 ",
+ "MSRValue": "0x0008000080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in S state ",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in S state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0008000100 ",
+ "MSRValue": "0x0008000100",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in S state ",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in S state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0008000200 ",
+ "MSRValue": "0x0008000200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in S state ",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in S state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0008000400 ",
+ "MSRValue": "0x0008000400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in S state ",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in S state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0008001000 ",
+ "MSRValue": "0x0008001000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in S state ",
+ "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in S state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0008002000 ",
+ "MSRValue": "0x0008002000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in S state ",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in S state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0008008000 ",
+ "MSRValue": "0x0008008000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in S state ",
+ "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in S state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0008003091 ",
+ "MSRValue": "0x0008003091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in S state ",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in S state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0008000022 ",
+ "MSRValue": "0x0008000022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in S state ",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in S state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0008000044 ",
+ "MSRValue": "0x0008000044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in S state ",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in S state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x00080032f7 ",
+ "MSRValue": "0x00080032f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in S state ",
+ "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in S state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0010000001 ",
+ "MSRValue": "0x0010000001",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0010000002 ",
+ "MSRValue": "0x0010000002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0010000004 ",
+ "MSRValue": "0x0010000004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0010000020 ",
+ "MSRValue": "0x0010000020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0010000040 ",
+ "MSRValue": "0x0010000040",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 code HW prefetches that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0010000080 ",
+ "MSRValue": "0x0010000080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0010000100 ",
+ "MSRValue": "0x0010000100",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0010000200 ",
+ "MSRValue": "0x0010000200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0010000400 ",
+ "MSRValue": "0x0010000400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0010001000 ",
+ "MSRValue": "0x0010001000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0010002000 ",
+ "MSRValue": "0x0010002000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0010008000 ",
+ "MSRValue": "0x0010008000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0010003091 ",
+ "MSRValue": "0x0010003091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0010000022 ",
+ "MSRValue": "0x0010000022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0010000044 ",
+ "MSRValue": "0x0010000044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x00100032f7 ",
+ "MSRValue": "0x00100032f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0010000070 ",
+ "MSRValue": "0x0010000070",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800180002 ",
+ "MSRValue": "0x1800180002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_NEAR_TILE",
@@ -1985,7 +1985,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800180004 ",
+ "MSRValue": "0x1800180004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_NEAR_TILE",
@@ -1996,7 +1996,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800180020 ",
+ "MSRValue": "0x1800180020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_NEAR_TILE",
@@ -2007,7 +2007,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800180040 ",
+ "MSRValue": "0x1800180040",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_NEAR_TILE",
@@ -2018,7 +2018,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800180080 ",
+ "MSRValue": "0x1800180080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_NEAR_TILE",
@@ -2029,7 +2029,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800180100 ",
+ "MSRValue": "0x1800180100",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_NEAR_TILE",
@@ -2040,7 +2040,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800180200 ",
+ "MSRValue": "0x1800180200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_NEAR_TILE",
@@ -2051,7 +2051,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800180400 ",
+ "MSRValue": "0x1800180400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_NEAR_TILE",
@@ -2062,7 +2062,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800181000 ",
+ "MSRValue": "0x1800181000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_NEAR_TILE",
@@ -2073,7 +2073,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800182000 ",
+ "MSRValue": "0x1800182000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_NEAR_TILE",
@@ -2084,7 +2084,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800188000 ",
+ "MSRValue": "0x1800188000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_NEAR_TILE",
@@ -2095,7 +2095,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800183091 ",
+ "MSRValue": "0x1800183091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_NEAR_TILE",
@@ -2106,7 +2106,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800180022 ",
+ "MSRValue": "0x1800180022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_NEAR_TILE",
@@ -2117,7 +2117,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800180044 ",
+ "MSRValue": "0x1800180044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_NEAR_TILE",
@@ -2128,7 +2128,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x18001832f7 ",
+ "MSRValue": "0x18001832f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_NEAR_TILE",
@@ -2139,7 +2139,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800180070 ",
+ "MSRValue": "0x1800180070",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_NEAR_TILE",
@@ -2150,7 +2150,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800400002 ",
+ "MSRValue": "0x1800400002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_FAR_TILE",
@@ -2161,7 +2161,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800400004 ",
+ "MSRValue": "0x1800400004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_FAR_TILE",
@@ -2172,7 +2172,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800400040 ",
+ "MSRValue": "0x1800400040",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_FAR_TILE",
@@ -2183,7 +2183,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800400080 ",
+ "MSRValue": "0x1800400080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_FAR_TILE",
@@ -2194,7 +2194,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800400100 ",
+ "MSRValue": "0x1800400100",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_FAR_TILE",
@@ -2205,7 +2205,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800400400 ",
+ "MSRValue": "0x1800400400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_FAR_TILE",
@@ -2216,7 +2216,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800401000 ",
+ "MSRValue": "0x1800401000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_FAR_TILE",
@@ -2227,7 +2227,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800402000 ",
+ "MSRValue": "0x1800402000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_FAR_TILE",
@@ -2238,7 +2238,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800408000 ",
+ "MSRValue": "0x1800408000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_FAR_TILE",
@@ -2249,7 +2249,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800403091 ",
+ "MSRValue": "0x1800403091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_FAR_TILE",
@@ -2260,7 +2260,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800400022 ",
+ "MSRValue": "0x1800400022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_FAR_TILE",
@@ -2271,7 +2271,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800400044 ",
+ "MSRValue": "0x1800400044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_FAR_TILE",
@@ -2282,7 +2282,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x18004032f7 ",
+ "MSRValue": "0x18004032f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_FAR_TILE",
@@ -2293,7 +2293,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800400070 ",
+ "MSRValue": "0x1800400070",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_FAR_TILE",
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/memory.json b/tools/perf/pmu-events/arch/x86/knightslanding/memory.json
index 700652566200..c6bb16ba0f86 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/memory.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/memory.json
@@ -9,18 +9,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0100400070 ",
+ "MSRValue": "0x0100400070",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts any Prefetch requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080200070 ",
+ "MSRValue": "0x0080200070",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.MCDRAM_NEAR",
@@ -31,18 +31,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0101000070 ",
+ "MSRValue": "0x0101000070",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts any Prefetch requests that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080800070 ",
+ "MSRValue": "0x0080800070",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.DDR_NEAR",
@@ -53,18 +53,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x01004032f7 ",
+ "MSRValue": "0x01004032f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts any Read request that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x00802032f7 ",
+ "MSRValue": "0x00802032f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.MCDRAM_NEAR",
@@ -75,18 +75,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x01010032f7 ",
+ "MSRValue": "0x01010032f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts any Read request that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x00808032f7 ",
+ "MSRValue": "0x00808032f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.DDR_NEAR",
@@ -97,18 +97,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0100400044 ",
+ "MSRValue": "0x0100400044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080200044 ",
+ "MSRValue": "0x0080200044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.MCDRAM_NEAR",
@@ -119,18 +119,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0101000044 ",
+ "MSRValue": "0x0101000044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080800044 ",
+ "MSRValue": "0x0080800044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.DDR_NEAR",
@@ -141,18 +141,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0100400022 ",
+ "MSRValue": "0x0100400022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080200022 ",
+ "MSRValue": "0x0080200022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.MCDRAM_NEAR",
@@ -163,18 +163,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0101000022 ",
+ "MSRValue": "0x0101000022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080800022 ",
+ "MSRValue": "0x0080800022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.DDR_NEAR",
@@ -185,18 +185,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0100403091 ",
+ "MSRValue": "0x0100403091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080203091 ",
+ "MSRValue": "0x0080203091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.MCDRAM_NEAR",
@@ -207,18 +207,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0101003091 ",
+ "MSRValue": "0x0101003091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080803091 ",
+ "MSRValue": "0x0080803091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.DDR_NEAR",
@@ -229,18 +229,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0100408000 ",
+ "MSRValue": "0x0100408000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts any request that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080208000 ",
+ "MSRValue": "0x0080208000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.MCDRAM_NEAR",
@@ -251,18 +251,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0101008000 ",
+ "MSRValue": "0x0101008000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts any request that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080808000 ",
+ "MSRValue": "0x0080808000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.DDR_NEAR",
@@ -273,18 +273,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0100402000 ",
+ "MSRValue": "0x0100402000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080202000 ",
+ "MSRValue": "0x0080202000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.MCDRAM_NEAR",
@@ -295,18 +295,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0101002000 ",
+ "MSRValue": "0x0101002000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080802000 ",
+ "MSRValue": "0x0080802000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.DDR_NEAR",
@@ -317,18 +317,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0100401000 ",
+ "MSRValue": "0x0100401000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts Software Prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080201000 ",
+ "MSRValue": "0x0080201000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.MCDRAM_NEAR",
@@ -339,18 +339,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0101001000 ",
+ "MSRValue": "0x0101001000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts Software Prefetches that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080801000 ",
+ "MSRValue": "0x0080801000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.DDR_NEAR",
@@ -361,18 +361,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0100400400 ",
+ "MSRValue": "0x0100400400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080200400 ",
+ "MSRValue": "0x0080200400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.MCDRAM_NEAR",
@@ -383,18 +383,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0101000400 ",
+ "MSRValue": "0x0101000400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080800400 ",
+ "MSRValue": "0x0080800400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.DDR_NEAR",
@@ -405,18 +405,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0100400200 ",
+ "MSRValue": "0x0100400200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080200200 ",
+ "MSRValue": "0x0080200200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.MCDRAM_NEAR",
@@ -427,18 +427,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0101000200 ",
+ "MSRValue": "0x0101000200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080800200 ",
+ "MSRValue": "0x0080800200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.DDR_NEAR",
@@ -449,18 +449,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0100400100 ",
+ "MSRValue": "0x0100400100",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.MCDRAM_FAR",
"MSRIndex": "0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080200100 ",
+ "MSRValue": "0x0080200100",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.MCDRAM_NEAR",
@@ -471,18 +471,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0101000100 ",
+ "MSRValue": "0x0101000100",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.DDR_FAR",
"MSRIndex": "0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080800100 ",
+ "MSRValue": "0x0080800100",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.DDR_NEAR",
@@ -493,7 +493,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x2000020080 ",
+ "MSRValue": "0x2000020080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.NON_DRAM",
@@ -504,18 +504,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0100400080 ",
+ "MSRValue": "0x0100400080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080200080 ",
+ "MSRValue": "0x0080200080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.MCDRAM_NEAR",
@@ -526,18 +526,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0101000080 ",
+ "MSRValue": "0x0101000080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080800080 ",
+ "MSRValue": "0x0080800080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.DDR_NEAR",
@@ -548,18 +548,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0100400040 ",
+ "MSRValue": "0x0100400040",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 code HW prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080200040 ",
+ "MSRValue": "0x0080200040",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.MCDRAM_NEAR",
@@ -570,18 +570,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0101000040 ",
+ "MSRValue": "0x0101000040",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 code HW prefetches that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080800040 ",
+ "MSRValue": "0x0080800040",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.DDR_NEAR",
@@ -592,7 +592,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x2000020020 ",
+ "MSRValue": "0x2000020020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.NON_DRAM",
@@ -603,18 +603,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0100400020 ",
+ "MSRValue": "0x0100400020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080200020 ",
+ "MSRValue": "0x0080200020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.MCDRAM_NEAR",
@@ -625,18 +625,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0101000020 ",
+ "MSRValue": "0x0101000020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080800020 ",
+ "MSRValue": "0x0080800020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.DDR_NEAR",
@@ -647,18 +647,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0100400004 ",
+ "MSRValue": "0x0100400004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080200004 ",
+ "MSRValue": "0x0080200004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.MCDRAM_NEAR",
@@ -669,18 +669,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0101000004 ",
+ "MSRValue": "0x0101000004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080800004 ",
+ "MSRValue": "0x0080800004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.DDR_NEAR",
@@ -691,18 +691,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0100400002 ",
+ "MSRValue": "0x0100400002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080200002 ",
+ "MSRValue": "0x0080200002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.MCDRAM_NEAR",
@@ -713,18 +713,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0101000002 ",
+ "MSRValue": "0x0101000002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080800002 ",
+ "MSRValue": "0x0080800002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.DDR_NEAR",
@@ -735,18 +735,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0100400001 ",
+ "MSRValue": "0x0100400001",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080200001 ",
+ "MSRValue": "0x0080200001",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.MCDRAM_NEAR",
@@ -757,18 +757,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0101000001 ",
+ "MSRValue": "0x0101000001",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080800001 ",
+ "MSRValue": "0x0080800001",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.DDR_NEAR",
@@ -779,7 +779,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0180600001 ",
+ "MSRValue": "0x0180600001",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.MCDRAM",
@@ -790,7 +790,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0180600002 ",
+ "MSRValue": "0x0180600002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.MCDRAM",
@@ -801,7 +801,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0180600004 ",
+ "MSRValue": "0x0180600004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.MCDRAM",
@@ -812,7 +812,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0180600020 ",
+ "MSRValue": "0x0180600020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.MCDRAM",
@@ -823,7 +823,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0180600080 ",
+ "MSRValue": "0x0180600080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.MCDRAM",
@@ -834,7 +834,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0180600100 ",
+ "MSRValue": "0x0180600100",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.MCDRAM",
@@ -845,7 +845,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0180600200 ",
+ "MSRValue": "0x0180600200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.MCDRAM",
@@ -856,7 +856,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0180600400 ",
+ "MSRValue": "0x0180600400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.MCDRAM",
@@ -867,7 +867,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0180601000 ",
+ "MSRValue": "0x0180601000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.MCDRAM",
@@ -878,7 +878,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0180608000 ",
+ "MSRValue": "0x0180608000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.MCDRAM",
@@ -889,7 +889,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0180603091 ",
+ "MSRValue": "0x0180603091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.MCDRAM",
@@ -900,7 +900,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0180600022 ",
+ "MSRValue": "0x0180600022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.MCDRAM",
@@ -911,7 +911,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0180600044 ",
+ "MSRValue": "0x0180600044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.MCDRAM",
@@ -922,7 +922,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x01806032f7 ",
+ "MSRValue": "0x01806032f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.MCDRAM",
@@ -933,7 +933,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0180600070 ",
+ "MSRValue": "0x0180600070",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.MCDRAM",
@@ -944,7 +944,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0181800001 ",
+ "MSRValue": "0x0181800001",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.DDR",
@@ -955,7 +955,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0181800002 ",
+ "MSRValue": "0x0181800002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.DDR",
@@ -966,7 +966,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0181800004 ",
+ "MSRValue": "0x0181800004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.DDR",
@@ -977,7 +977,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0181800020 ",
+ "MSRValue": "0x0181800020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.DDR",
@@ -988,7 +988,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0181800040 ",
+ "MSRValue": "0x0181800040",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.DDR",
@@ -999,7 +999,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0181800080 ",
+ "MSRValue": "0x0181800080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.DDR",
@@ -1010,7 +1010,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0181800200 ",
+ "MSRValue": "0x0181800200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.DDR",
@@ -1021,7 +1021,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0181800400 ",
+ "MSRValue": "0x0181800400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.DDR",
@@ -1032,7 +1032,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0181801000 ",
+ "MSRValue": "0x0181801000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.DDR",
@@ -1043,7 +1043,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0181802000 ",
+ "MSRValue": "0x0181802000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.DDR",
@@ -1054,7 +1054,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0181808000 ",
+ "MSRValue": "0x0181808000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.DDR",
@@ -1065,7 +1065,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0181803091 ",
+ "MSRValue": "0x0181803091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.DDR",
@@ -1076,7 +1076,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0181800022 ",
+ "MSRValue": "0x0181800022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.DDR",
@@ -1087,7 +1087,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0181800044 ",
+ "MSRValue": "0x0181800044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.DDR",
@@ -1098,7 +1098,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x01818032f7 ",
+ "MSRValue": "0x01818032f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.DDR",
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json b/tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json
index bb5494cfb5ae..92e4ef2e22c6 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json
@@ -144,7 +144,7 @@
"BriefDescription": "Counts the number of micro-ops retired that are from the complex flows issued by the micro-sequencer (MS)."
},
{
- "PublicDescription": "This event counts the number of micro-ops (uops) retired. The processor decodes complex macro instructions into a sequence of simpler uops. Most instructions are composed of one or two uops. Some instructions are decoded into longer sequences such as repeat instructions, floating point transcendental instructions, and assists. ",
+ "PublicDescription": "This event counts the number of micro-ops (uops) retired. The processor decodes complex macro instructions into a sequence of simpler uops. Most instructions are composed of one or two uops. Some instructions are decoded into longer sequences such as repeat instructions, floating point transcendental instructions, and assists.",
"EventCode": "0xC2",
"Counter": "0,1",
"UMask": "0x10",
@@ -218,7 +218,7 @@
"UMask": "0x20",
"EventName": "NO_ALLOC_CYCLES.RAT_STALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of core cycles when no micro-ops are allocated and a RATstall (caused by reservation station full) is asserted. "
+ "BriefDescription": "Counts the number of core cycles when no micro-ops are allocated and a RATstall (caused by reservation station full) is asserted."
},
{
"PublicDescription": "This event counts the number of core cycles when no uops are allocated, the instruction queue is empty and the alloc pipe is stalled waiting for instructions to be fetched.",
@@ -251,7 +251,7 @@
"UMask": "0x1f",
"EventName": "RS_FULL_STALL.ALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the total number of core cycles the Alloc pipeline is stalled when any one of the reservation stations is full. "
+ "BriefDescription": "Counts the total number of core cycles the Alloc pipeline is stalled when any one of the reservation stations is full."
},
{
"EventCode": "0xC0",
@@ -268,11 +268,10 @@
"UMask": "0x1",
"EventName": "CYCLES_DIV_BUSY.ALL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles the number of core cycles when divider is busy. Does not imply a stall waiting for the divider. "
+ "BriefDescription": "Cycles the number of core cycles when divider is busy. Does not imply a stall waiting for the divider."
},
{
"PublicDescription": "This event counts the number of instructions that retire. For instructions that consist of multiple micro-ops, this event counts exactly once, as the last micro-op of the instruction retires. The event continues counting while instructions retire, including during interrupt service routines caused by hardware interrupts, faults or traps.",
- "EventCode": "0x00",
"Counter": "Fixed counter 1",
"UMask": "0x1",
"EventName": "INST_RETIRED.ANY",
@@ -296,8 +295,7 @@
"BriefDescription": "Counts the number of unhalted reference clock cycles"
},
{
- "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter\r\n",
- "EventCode": "0x00",
+ "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter",
"Counter": "Fixed counter 2",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
@@ -305,7 +303,6 @@
"BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles"
},
{
- "EventCode": "0x00",
"Counter": "Fixed counter 3",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
@@ -343,7 +340,7 @@
"UMask": "0x1",
"EventName": "RECYCLEQ.LD_BLOCK_ST_FORWARD",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of occurences a retired load gets blocked because its address partially overlaps with a store ",
+ "BriefDescription": "Counts the number of occurences a retired load gets blocked because its address partially overlaps with a store",
"Data_LA": "1"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json b/tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json
index f31594507f8c..9e493977771f 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json
@@ -36,7 +36,7 @@
"EdgeDetect": "1"
},
{
- "PublicDescription": "This event counts every cycle when an I-side (walks due to an instruction fetch) page walk is in progress. ",
+ "PublicDescription": "This event counts every cycle when an I-side (walks due to an instruction fetch) page walk is in progress.",
"EventCode": "0x05",
"Counter": "0,1",
"UMask": "0x2",
diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv
index e05c2c8458fc..d6984a3017e0 100644
--- a/tools/perf/pmu-events/arch/x86/mapfile.csv
+++ b/tools/perf/pmu-events/arch/x86/mapfile.csv
@@ -33,3 +33,4 @@ GenuineIntel-6-25,v2,westmereep-sp,core
GenuineIntel-6-2F,v2,westmereex,core
GenuineIntel-6-55-[01234],v1,skylakex,core
GenuineIntel-6-55-[56789ABCDEF],v1,cascadelakex,core
+AuthenticAMD-23-[[:xdigit:]]+,v1,amdfam17h,core
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/cache.json b/tools/perf/pmu-events/arch/x86/sandybridge/cache.json
index 16b04a20bc12..bb79e89c2049 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/cache.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/cache.json
@@ -1,207 +1,200 @@
[
{
- "PEBS": "1",
- "EventCode": "0xD0",
+ "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x11",
- "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that miss the STLB.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xD0",
+ "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x12",
- "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that miss the STLB.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xD0",
+ "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x21",
- "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired load uops with locked access.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that hit L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts line-split load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
- "EventCode": "0xD0",
+ "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that split across a cacheline boundary.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that miss L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts line-split store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
- "EventCode": "0xD0",
+ "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x42",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that split across a cacheline boundary.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0xc",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests to L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts the number of load uops retired",
- "EventCode": "0xD0",
+ "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "All retired load uops.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts the number of store uops retired.",
- "EventCode": "0xD0",
+ "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "All retired store uops.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x20",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache misses when fetching instructions.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xD1",
+ "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x30",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 code requests.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xD1",
+ "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x40",
+ "EventName": "L2_RQSTS.PF_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Requests from the L2 hardware prefetchers that hit L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts retired load uops that hit in the last-level (L3) cache without snoops required.",
- "EventCode": "0xD1",
+ "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MEM_LOAD_UOPS_RETIRED.LLC_HIT",
- "SampleAfterValue": "50021",
- "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x80",
+ "EventName": "L2_RQSTS.PF_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Requests from the L2 hardware prefetchers that miss L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xD1",
+ "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0xc0",
+ "EventName": "L2_RQSTS.ALL_PF",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Requests from L2 hardware prefetchers.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xD2",
+ "EventCode": "0x27",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PEBS": "1",
- "PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package). Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line. In this case, a snoop was required, and another L2 had the line in a non-modified state.",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache.",
- "CounterHTOff": "0,1,2,3"
+ "EventName": "L2_STORE_LOCK_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFOs that miss cache lines.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package). Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line. In this case, a snoop was required, and another L2 had the line in a modified state, so the line had to be invalidated in that L2 cache and transferred to the requesting L2.",
- "EventCode": "0xD2",
+ "EventCode": "0x27",
"Counter": "0,1,2,3",
"UMask": "0x4",
- "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC.",
- "CounterHTOff": "0,1,2,3"
+ "EventName": "L2_STORE_LOCK_RQSTS.HIT_E",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFOs that hit cache lines in E state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xD2",
+ "EventCode": "0x27",
"Counter": "0,1,2,3",
"UMask": "0x8",
- "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required.",
- "CounterHTOff": "0,1,2,3"
+ "EventName": "L2_STORE_LOCK_RQSTS.HIT_M",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFOs that hit cache lines in M state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts retired demand loads that missed the last-level (L3) cache. This means that the load is usually satisfied from memory in a client system or possibly from the remote socket in a server. Demand loads are non speculative load uops.",
- "EventCode": "0xD4",
+ "EventCode": "0x27",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired load uops with unknown information as data source in cache serviced the load.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0xf",
+ "EventName": "L2_STORE_LOCK_RQSTS.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFOs that access cache lines in any state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts L1D data line replacements. Replacements occur when a new line is brought into the cache, causing eviction of a line loaded earlier. ",
- "EventCode": "0x51",
+ "EventCode": "0x28",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "L1D.REPLACEMENT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "L1D data line replacements.",
+ "EventName": "L2_L1D_WB_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Count the number of modified Lines evicted from L1 and missed L2. (Non-rejected WBs from the DCU.).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x51",
+ "EventCode": "0x28",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "L1D.ALLOCATED_IN_M",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Allocated L1D data cache lines in M state.",
+ "EventName": "L2_L1D_WB_RQSTS.HIT_S",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in S state.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x51",
+ "EventCode": "0x28",
"Counter": "0,1,2,3",
"UMask": "0x4",
- "EventName": "L1D.EVICTION",
- "SampleAfterValue": "2000003",
- "BriefDescription": "L1D data cache lines in M state evicted due to replacement.",
+ "EventName": "L2_L1D_WB_RQSTS.HIT_E",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in E state.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x51",
+ "EventCode": "0x28",
"Counter": "0,1,2,3",
"UMask": "0x8",
- "EventName": "L1D.ALL_M_REPLACEMENT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cache lines in M state evicted out of L1D due to Snoop HitM or dirty line replacement.",
+ "EventName": "L2_L1D_WB_RQSTS.HIT_M",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in M state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "L2_L1D_WB_RQSTS.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in any state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Core-originated cacheable demand requests missed LLC.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4f",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Core-originated cacheable demand requests that refer to LLC.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -224,12 +217,61 @@
"CounterHTOff": "2"
},
{
- "EventCode": "0x63",
+ "EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "CounterMask": "1",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when L1D is locked.",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts L1D data line replacements. Replacements occur when a new line is brought into the cache, causing eviction of a line loaded earlier.",
+ "EventCode": "0x51",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L1D.REPLACEMENT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "L1D data line replacements.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x51",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1D.ALLOCATED_IN_M",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Allocated L1D data cache lines in M state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x51",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L1D.EVICTION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "L1D data cache lines in M state evicted due to replacement.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x51",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L1D.ALL_M_REPLACEMENT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cache lines in M state evicted out of L1D due to Snoop HitM or dirty line replacement.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -254,6 +296,16 @@
{
"EventCode": "0x60",
"Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_C6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
"SampleAfterValue": "2000003",
@@ -263,6 +315,16 @@
{
"EventCode": "0x60",
"Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"SampleAfterValue": "2000003",
@@ -280,6 +342,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x63",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when L1D is locked.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xB0",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -325,148 +396,182 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x24",
+ "EventCode": "0xBF",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests that hit L2 cache.",
+ "UMask": "0x5",
+ "EventName": "L1D_BLOCKS.BANK_CONFLICT_CYCLES",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cycles when dispatched loads are cancelled due to L1D bank conflicts with other load ports.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x24",
+ "PEBS": "1",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_RQSTS.RFO_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that hit L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x11",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops that miss the STLB. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x24",
+ "PEBS": "1",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_RQSTS.RFO_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that miss L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x12",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired store uops that miss the STLB. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x24",
+ "PEBS": "1",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "L2_RQSTS.CODE_RD_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x21",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired load uops with locked access. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x24",
+ "PEBS": "1",
+ "PublicDescription": "This event counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K). (Precise Event - PEBS)",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 cache misses when fetching instructions.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x41",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x24",
+ "PEBS": "1",
+ "PublicDescription": "This event counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K). (Precise Event - PEBS)",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "L2_RQSTS.PF_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "Requests from the L2 hardware prefetchers that hit L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x42",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x24",
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of load uops retired (Precise Event)",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "L2_RQSTS.PF_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Requests from the L2 hardware prefetchers that miss L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x81",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "All retired load uops. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x27",
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of store uops retired. (Precise Event - PEBS)",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x82",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "All retired store uops. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "L2_STORE_LOCK_RQSTS.MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFOs that miss cache lines.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x27",
+ "PEBS": "1",
+ "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_STORE_LOCK_RQSTS.HIT_E",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFOs that hit cache lines in E state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x27",
+ "PEBS": "1",
+ "PublicDescription": "This event counts retired load uops that hit in the last-level (L3) cache without snoops required. (Precise Event - PEBS)",
+ "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_STORE_LOCK_RQSTS.HIT_M",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFOs that hit cache lines in M state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.LLC_HIT",
+ "SampleAfterValue": "50021",
+ "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x27",
+ "PEBS": "1",
+ "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0xf",
- "EventName": "L2_STORE_LOCK_RQSTS.ALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFOs that access cache lines in any state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x28",
+ "PEBS": "1",
+ "EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "L2_L1D_WB_RQSTS.MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Count the number of modified Lines evicted from L1 and missed L2. (Non-rejected WBs from the DCU.).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x28",
+ "PEBS": "1",
+ "PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package). Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line. In this case, a snoop was required, and another L2 had the line in a non-modified state. (Precise Event - PEBS)",
+ "EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "L2_L1D_WB_RQSTS.HIT_S",
- "SampleAfterValue": "200003",
- "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in S state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x28",
+ "PEBS": "1",
+ "PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package). Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line. In this case, a snoop was required, and another L2 had the line in a modified state, so the line had to be invalidated in that L2 cache and transferred to the requesting L2. (Precise Event - PEBS)",
+ "EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x4",
- "EventName": "L2_L1D_WB_RQSTS.HIT_E",
- "SampleAfterValue": "200003",
- "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in E state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x28",
+ "PEBS": "1",
+ "EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x8",
- "EventName": "L2_L1D_WB_RQSTS.HIT_M",
- "SampleAfterValue": "200003",
- "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in M state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x28",
+ "PEBS": "1",
+ "PublicDescription": "This event counts retired demand loads that missed the last-level (L3) cache. This means that the load is usually satisfied from memory in a client system or possibly from the remote socket in a server. Demand loads are non speculative load uops. (Precise Event - PEBS)",
+ "EventCode": "0xD4",
"Counter": "0,1,2,3",
- "UMask": "0xf",
- "EventName": "L2_L1D_WB_RQSTS.ALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in any state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2",
+ "EventName": "MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired load uops with unknown information as data source in cache serviced the load. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xF0",
@@ -623,24 +728,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x2E",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "LONGEST_LAT_CACHE.MISS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Core-originated cacheable demand requests missed LLC.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x2E",
- "Counter": "0,1,2,3",
- "UMask": "0x4f",
- "EventName": "LONGEST_LAT_CACHE.REFERENCE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Core-originated cacheable demand requests that refer to LLC.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0xF4",
"Counter": "0,1,2,3",
"UMask": "0x10",
@@ -650,93 +737,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "L2_RQSTS.ALL_RFO",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests to L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "L2_RQSTS.ALL_CODE_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 code requests.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc0",
- "EventName": "L2_RQSTS.ALL_PF",
- "SampleAfterValue": "200003",
- "BriefDescription": "Requests from L2 hardware prefetchers.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xBF",
- "Counter": "0,1,2,3",
- "UMask": "0x5",
- "EventName": "L1D_BLOCKS.BANK_CONFLICT_CYCLES",
- "SampleAfterValue": "100003",
- "BriefDescription": "Cycles when dispatched loads are cancelled due to L1D bank conflicts with other load ports.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_C6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "CounterMask": "1",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0244",
"Counter": "0,1,2,3",
@@ -1825,7 +1825,7 @@
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": " REQUEST = DATA_INTO_CORE and RESPONSE = ANY_RESPONSE",
+ "BriefDescription": "REQUEST = DATA_INTO_CORE and RESPONSE = ANY_RESPONSE",
"CounterHTOff": "0,1,2,3"
},
{
@@ -1837,7 +1837,7 @@
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_M.HITM",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": " REQUEST = DEMAND_RFO and RESPONSE = LLC_HIT_M and SNOOP = HITM",
+ "BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = LLC_HIT_M and SNOOP = HITM",
"CounterHTOff": "0,1,2,3"
},
{
@@ -1849,7 +1849,7 @@
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": " REQUEST = PF_RFO and RESPONSE = ANY_RESPONSE",
+ "BriefDescription": "REQUEST = PF_RFO and RESPONSE = ANY_RESPONSE",
"CounterHTOff": "0,1,2,3"
},
{
@@ -1861,7 +1861,7 @@
"EventName": "OFFCORE_RESPONSE.PF_L_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": " REQUEST = PF_LLC_DATA_RD and RESPONSE = ANY_RESPONSE",
+ "BriefDescription": "REQUEST = PF_LLC_DATA_RD and RESPONSE = ANY_RESPONSE",
"CounterHTOff": "0,1,2,3"
},
{
@@ -1873,7 +1873,7 @@
"EventName": "OFFCORE_RESPONSE.PF_L_IFETCH.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": " REQUEST = PF_LLC_IFETCH and RESPONSE = ANY_RESPONSE",
+ "BriefDescription": "REQUEST = PF_LLC_IFETCH and RESPONSE = ANY_RESPONSE",
"CounterHTOff": "0,1,2,3"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json b/tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json
index 982eda48785e..ce26537c7d47 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json
@@ -1,68 +1,5 @@
[
{
- "EventCode": "0xC1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OTHER_ASSISTS.AVX_STORE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of GSSE memory assist for stores. GSSE microcode assist is being invoked whenever the hardware is unable to properly handle GSSE-256b operations.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "OTHER_ASSISTS.AVX_TO_SSE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "OTHER_ASSISTS.SSE_TO_AVX",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "FP_ASSIST.X87_OUTPUT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of X87 assists due to output value.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "FP_ASSIST.X87_INPUT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of X87 assists due to input value.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "FP_ASSIST.SIMD_OUTPUT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of SIMD FP assists due to Output values.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "FP_ASSIST.SIMD_INPUT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of SIMD FP assists due to input values.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0x10",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -126,6 +63,69 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xC1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "OTHER_ASSISTS.AVX_STORE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of GSSE memory assist for stores. GSSE microcode assist is being invoked whenever the hardware is unable to properly handle GSSE-256b operations.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "OTHER_ASSISTS.AVX_TO_SSE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "OTHER_ASSISTS.SSE_TO_AVX",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "FP_ASSIST.X87_OUTPUT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of X87 assists due to output value.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "FP_ASSIST.X87_INPUT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of X87 assists due to input value.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "FP_ASSIST.SIMD_OUTPUT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of SIMD FP assists due to Output values.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "FP_ASSIST.SIMD_INPUT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of SIMD FP assists due to input values.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x1e",
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/frontend.json b/tools/perf/pmu-events/arch/x86/sandybridge/frontend.json
index 1b7b1dd36c68..e58ed14a204c 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/frontend.json
@@ -1,24 +1,5 @@
[
{
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ICACHE.HIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of instruction cache, streaming buffer and victim cache misses. Counting includes unchacheable accesses.",
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "ICACHE.MISSES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Instruction cache, streaming buffer and victim cache misses.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0x79",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -39,159 +20,201 @@
{
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_UOPS",
+ "UMask": "0x4",
+ "EventName": "IDQ.MITE_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_UOPS",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "IDQ.MS_MITE_UOPS",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_UOPS",
+ "UMask": "0x10",
+ "EventName": "IDQ.MS_DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance. See the Intel? 64 and IA-32 Architectures Optimization Reference Manual for more information.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_CYCLES",
+ "UMask": "0x10",
+ "EventName": "IDQ.MS_DSB_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of uops not delivered to the back-end per cycle, per thread, when the back-end was not stalled. In the ideal case 4 uops can be delivered each cycle. The event counts the undelivered uops - so if 3 were delivered in one cycle, the counter would be incremented by 1 for that cycle (4 - 3). If the back-end is stalled, the count for this event is not incremented even when uops were not delivered, because the back-end would not have been able to accept them. This event is used in determining the front-end bound category of the top-down pipeline slots characterization.",
- "EventCode": "0x9C",
+ "EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "UMask": "0x10",
+ "EdgeDetect": "1",
+ "EventName": "IDQ.MS_DSB_OCCUR",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled .",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x9C",
+ "EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "UMask": "0x18",
+ "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops.",
"CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x9C",
+ "EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+ "UMask": "0x18",
+ "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xAB",
+ "EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "DSB2MITE_SWITCHES.COUNT",
+ "UMask": "0x20",
+ "EventName": "IDQ.MS_MITE_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches.",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the cycles attributed to a switch from the Decoded Stream Buffer (DSB), which holds decoded instructions, to the legacy decode pipeline. It excludes cycles when the back-end cannot accept new micro-ops. The penalty for these switches is potentially several cycles of instruction starvation, where no micro-ops are delivered to the back-end.",
- "EventCode": "0xAB",
+ "EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "UMask": "0x24",
+ "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
+ "BriefDescription": "Cycles MITE is delivering 4 Uops.",
+ "CounterMask": "4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xAC",
+ "EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "DSB_FILL.OTHER_CANCEL",
+ "UMask": "0x24",
+ "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cases of cancelling valid DSB fill not because of exceeding way limit.",
+ "BriefDescription": "Cycles MITE is delivering any Uop.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xAC",
+ "EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "DSB_FILL.EXCEED_DSB_LINES",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines.",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance. See the Intel\u00ae 64 and IA-32 Architectures Optimization Reference Manual for more information.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_CYCLES",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_CYCLES",
+ "UMask": "0x30",
+ "EdgeDetect": "1",
+ "EventName": "IDQ.MS_SWITCHES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_CYCLES",
+ "UMask": "0x3c",
+ "EventName": "IDQ.MITE_ALL_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
- "CounterMask": "1",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x79",
+ "EventCode": "0x80",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_DSB_OCCUR",
+ "UMask": "0x1",
+ "EventName": "ICACHE.HIT",
"SampleAfterValue": "2000003",
- "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy.",
- "CounterMask": "1",
+ "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "This event counts the number of instruction cache, streaming buffer and victim cache misses. Counting includes unchacheable accesses.",
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ICACHE.MISSES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Instruction cache, streaming buffer and victim cache misses.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of uops not delivered to the back-end per cycle, per thread, when the back-end was not stalled. In the ideal case 4 uops can be delivered each cycle. The event counts the undelivered uops - so if 3 were delivered in one cycle, the counter would be incremented by 1 for that cycle (4 - 3). If the back-end is stalled, the count for this event is not incremented even when uops were not delivered, because the back-end would not have been able to accept them. This event is used in determining the front-end bound category of the top-down pipeline slots characterization.",
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled .",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0x9C",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -223,83 +246,60 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x18",
- "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
+ "EventCode": "0x9C",
+ "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0x18",
- "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop.",
+ "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x79",
+ "EventCode": "0xAB",
"Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "UMask": "0x1",
+ "EventName": "DSB2MITE_SWITCHES.COUNT",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles MITE is delivering 4 Uops.",
- "CounterMask": "4",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x79",
+ "PublicDescription": "This event counts the cycles attributed to a switch from the Decoded Stream Buffer (DSB), which holds decoded instructions, to the legacy decode pipeline. It excludes cycles when the back-end cannot accept new micro-ops. The penalty for these switches is potentially several cycles of instruction starvation, where no micro-ops are delivered to the back-end.",
+ "EventCode": "0xAB",
"Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "UMask": "0x2",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles MITE is delivering any Uop.",
- "CounterMask": "1",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xAC",
"Counter": "0,1,2,3",
- "UMask": "0xa",
- "EventName": "DSB_FILL.ALL_CANCEL",
+ "UMask": "0x2",
+ "EventName": "DSB_FILL.OTHER_CANCEL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cases of cancelling valid Decode Stream Buffer (DSB) fill not because of exceeding way limit.",
+ "BriefDescription": "Cases of cancelling valid DSB fill not because of exceeding way limit.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x9C",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x79",
+ "EventCode": "0xAC",
"Counter": "0,1,2,3",
- "UMask": "0x3c",
- "EventName": "IDQ.MITE_ALL_UOPS",
+ "UMask": "0x8",
+ "EventName": "DSB_FILL.EXCEED_DSB_LINES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x79",
+ "EventCode": "0xAC",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_SWITCHES",
+ "UMask": "0xa",
+ "EventName": "DSB_FILL.ALL_CANCEL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "CounterMask": "1",
+ "BriefDescription": "Cases of cancelling valid Decode Stream Buffer (DSB) fill not because of exceeding way limit.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/memory.json b/tools/perf/pmu-events/arch/x86/sandybridge/memory.json
index e6dfa89d00f3..78c1a987f9a2 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/memory.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/memory.json
@@ -1,5 +1,32 @@
[
{
+ "EventCode": "0x05",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MISALIGN_MEM_REF.LOADS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x05",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MISALIGN_MEM_REF.STORES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xBE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "PAGE_WALKS.LLC_MISS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of any page walk that had a miss in LLC. Does not necessary cause a SUSPEND.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from memory disambiguation, external snoops, or cross SMT-HW-thread snoop (stores) hitting load buffers. Machine clears can have a significant performance impact if they are happening frequently.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
@@ -126,33 +153,6 @@
"CounterHTOff": "3"
},
{
- "EventCode": "0xBE",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "PAGE_WALKS.LLC_MISS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of any page walk that had a miss in LLC. Does not necessary cause a SUSPEND.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x05",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MISALIGN_MEM_REF.LOADS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x05",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MISALIGN_MEM_REF.STORES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x300400244",
"Counter": "0,1,2,3",
@@ -367,7 +367,7 @@
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_MISS_LOCAL.DRAM",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": " REQUEST = ANY_REQUEST and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
+ "BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
"CounterHTOff": "0,1,2,3"
},
{
@@ -379,7 +379,7 @@
"EventName": "OFFCORE_RESPONSE.DATA_IN_SOCKET.LLC_MISS_LOCAL.ANY_LLC_HIT",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": " REQUEST = DATA_IN_SOCKET and RESPONSE = LLC_MISS_LOCAL and SNOOP = ANY_LLC_HIT",
+ "BriefDescription": "REQUEST = DATA_IN_SOCKET and RESPONSE = LLC_MISS_LOCAL and SNOOP = ANY_LLC_HIT",
"CounterHTOff": "0,1,2,3"
},
{
@@ -391,7 +391,7 @@
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_MISS_LOCAL.DRAM",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": " REQUEST = DEMAND_IFETCH and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
+ "BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
"CounterHTOff": "0,1,2,3"
},
{
@@ -403,7 +403,7 @@
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_MISS_LOCAL.DRAM",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": " REQUEST = PF_DATA_RD and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
+ "BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
"CounterHTOff": "0,1,2,3"
},
{
@@ -415,7 +415,7 @@
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_MISS_LOCAL.DRAM",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": " REQUEST = PF_RFO and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
+ "BriefDescription": "REQUEST = PF_RFO and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
"CounterHTOff": "0,1,2,3"
},
{
@@ -427,7 +427,7 @@
"EventName": "OFFCORE_RESPONSE.PF_L_DATA_RD.LLC_MISS_LOCAL.DRAM",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": " REQUEST = PF_LLC_DATA_RD and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
+ "BriefDescription": "REQUEST = PF_LLC_DATA_RD and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
"CounterHTOff": "0,1,2,3"
},
{
@@ -439,7 +439,7 @@
"EventName": "OFFCORE_RESPONSE.PF_L_IFETCH.LLC_MISS_LOCAL.DRAM",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": " REQUEST = PF_LLC_IFETCH and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
+ "BriefDescription": "REQUEST = PF_LLC_IFETCH and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
"CounterHTOff": "0,1,2,3"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/other.json b/tools/perf/pmu-events/arch/x86/sandybridge/other.json
index 64b195b82c50..874eb40a2e0f 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/other.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/other.json
@@ -9,6 +9,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x4E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "HW_PRE_REQ.DL1_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Hardware Prefetch requests that miss the L1D cache. This accounts for both L1 streamer and IP-based (IPP) HW prefetchers. A request is being counted each time it access the cache & miss it, including if a block is applicable or if hit the Fill Buffer for .",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x5C",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -38,15 +47,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x4E",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "HW_PRE_REQ.DL1_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Hardware Prefetch requests that miss the L1D cache. This accounts for both L1 streamer and IP-based (IPP) HW prefetchers. A request is being counted each time it access the cache & miss it, including if a block is applicable or if hit the Fill Buffer for .",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0x63",
"Counter": "0,1,2,3",
"UMask": "0x1",
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json b/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json
index 34a519d9bfa0..b7150f65f16d 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json
@@ -1,289 +1,307 @@
[
{
- "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. ",
- "EventCode": "0x00",
- "Counter": "Fixed counter 1",
+ "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
+ "Counter": "Fixed counter 2",
+ "UMask": "0x3",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "CounterHTOff": "Fixed counter 2"
+ },
+ {
+ "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers.",
+ "Counter": "Fixed counter 0",
"UMask": "0x1",
"EventName": "INST_RETIRED.ANY",
"SampleAfterValue": "2000003",
"BriefDescription": "Instructions retired from execution.",
- "CounterHTOff": "Fixed counter 1"
+ "CounterHTOff": "Fixed counter 0"
},
{
- "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. ",
- "EventCode": "0x00",
- "Counter": "Fixed counter 2",
+ "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
+ "Counter": "Fixed counter 1",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000003",
"BriefDescription": "Core cycles when the thread is not in halt state.",
- "CounterHTOff": "Fixed counter 2"
+ "CounterHTOff": "Fixed counter 1"
},
{
- "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. ",
- "EventCode": "0x00",
- "Counter": "Fixed counter 3",
- "UMask": "0x3",
- "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the core is not in halt state.",
- "CounterHTOff": "Fixed counter 3"
- },
- {
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Not taken macro-conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "CounterHTOff": "Fixed counter 1"
},
{
- "EventCode": "0x88",
+ "EventCode": "0x03",
"Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired macro-conditional branches.",
+ "UMask": "0x1",
+ "EventName": "LD_BLOCKS.DATA_UNKNOWN",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Loads delayed due to SB blocks, preceding store operations with known addresses but unknown data.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x88",
+ "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceeding smaller uncompleted store. See the table of not supported store forwards in the Intel\u00ae 64 and IA-32 Architectures Optimization Reference Manual. The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.",
+ "EventCode": "0x03",
"Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.",
+ "UMask": "0x2",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x88",
+ "EventCode": "0x03",
"Counter": "0,1,2,3",
- "UMask": "0x84",
- "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns.",
+ "UMask": "0x8",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x88",
+ "EventCode": "0x03",
"Counter": "0,1,2,3",
- "UMask": "0x88",
- "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic.",
+ "UMask": "0x10",
+ "EventName": "LD_BLOCKS.ALL_BLOCK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of cases where any load ends up with a valid block-code written to the load buffer (including blocks due to Memory Order Buffer (MOB), Data Cache Unit (DCU), TLB, but load has no DCU miss).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x88",
+ "PublicDescription": "Aliasing occurs when a load is issued after a store and their memory addresses are offset by 4K. This event counts the number of loads that aliased with a preceding store, resulting in an extended address check in the pipeline. The enhanced address check typically has a performance penalty of 5 cycles.",
+ "EventCode": "0x07",
"Counter": "0,1,2,3",
- "UMask": "0x90",
- "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired direct near calls.",
+ "UMask": "0x1",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "False dependencies in MOB due to partial compare.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x88",
+ "EventCode": "0x07",
"Counter": "0,1,2,3",
- "UMask": "0xa0",
- "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired indirect calls.",
+ "UMask": "0x8",
+ "EventName": "LD_BLOCKS_PARTIAL.ALL_STA_BLOCK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "This event counts the number of times that load operations are temporarily blocked because of older stores, with addresses that are not yet known. A load operation may incur more than one block of this type.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x88",
+ "EventCode": "0x0D",
"Counter": "0,1,2,3",
- "UMask": "0xc1",
- "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired macro-conditional branches.",
+ "UMask": "0x3",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x88",
+ "EventCode": "0x0D",
"Counter": "0,1,2,3",
- "UMask": "0xc2",
- "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.",
+ "UMask": "0x3",
+ "EdgeDetect": "1",
+ "EventName": "INT_MISC.RECOVERY_STALLS_COUNT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of occurences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x88",
+ "EventCode": "0x0D",
"Counter": "0,1,2,3",
- "UMask": "0xc4",
- "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired indirect branches excluding calls and returns.",
+ "UMask": "0x3",
+ "AnyThread": "1",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x88",
+ "EventCode": "0x0D",
"Counter": "0,1,2,3",
- "UMask": "0xc8",
- "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired indirect return branches.",
+ "UMask": "0x40",
+ "EventName": "INT_MISC.RAT_STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x88",
+ "PublicDescription": "This event counts the number of Uops issued by the front-end of the pipeilne to the back-end.",
+ "EventCode": "0x0E",
"Counter": "0,1,2,3",
- "UMask": "0xd0",
- "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired direct near calls.",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x89",
+ "EventCode": "0x0E",
+ "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x89",
+ "EventCode": "0x0E",
+ "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x89",
+ "EventCode": "0x14",
"Counter": "0,1,2,3",
- "UMask": "0x84",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.",
+ "UMask": "0x1",
+ "EventName": "ARITH.FPU_DIV_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when divider is busy executing divide operations.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x89",
+ "PublicDescription": "This event counts the number of the divide operations executed.",
+ "EventCode": "0x14",
"Counter": "0,1,2,3",
- "UMask": "0x88",
- "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "ARITH.FPU_DIV",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Divide operations executed.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x89",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0x90",
- "EventName": "BR_MISP_EXEC.TAKEN_DIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted direct near calls.",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Thread cycles when thread is not in halt state.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x89",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0xa0",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "UMask": "0x0",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x89",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0xc1",
- "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x89",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0xc4",
- "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
- "SampleAfterValue": "200003",
- "BriefDescription": "Mispredicted indirect branches excluding calls and returns.",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x89",
+ "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0xd0",
- "EventName": "BR_MISP_EXEC.ALL_DIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired mispredicted direct near calls.",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Thread cycles when thread is not in halt state.",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA8",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.UOPS",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of Uops delivered by the LSD.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other is halted.",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xA8",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_ACTIVE",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "1",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x87",
+ "EventCode": "0x4C",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "ILD_STALL.LCP",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "EventName": "LOAD_HIT_PRE.SW_PF",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x87",
+ "EventCode": "0x4C",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "ILD_STALL.IQ_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Stall cycles because IQ is full.",
+ "UMask": "0x2",
+ "EventName": "LOAD_HIT_PRE.HW_PF",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x0D",
+ "EventCode": "0x59",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "INT_MISC.RAT_STALL_CYCLES",
+ "UMask": "0x20",
+ "EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread.",
+ "BriefDescription": "Increments the number of flags-merge uops in flight each cycle.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "This event counts the number of cycles spent executing performance-sensitive flags-merging uops. For example, shift CL (merge_arith_flags). For more details, See the Intel\u00ae 64 and IA-32 Architectures Optimization Reference Manual.",
"EventCode": "0x59",
"Counter": "0,1,2,3",
"UMask": "0x20",
- "EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP",
+ "EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Increments the number of flags-merge uops in flight each cycle.",
+ "BriefDescription": "Performance sensitive flags-merging uops added by Sandy Bridge u-arch.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of cycles with at least one slow LEA uop being allocated. A uop is generally considered as slow LEA if it has three sources (for example, two sources and immediate) regardless of whether it is a result of LEA instruction or not. Examples of the slow LEA uop are or uops with base, index, and offset source operands using base and index reqisters, where base is EBR/RBP/R13, using RIP relative or 16-bit addressing modes. See the Intel? 64 and IA-32 Architectures Optimization Reference Manual for more details about slow LEA instructions.",
+ "PublicDescription": "This event counts the number of cycles with at least one slow LEA uop being allocated. A uop is generally considered as slow LEA if it has three sources (for example, two sources and immediate) regardless of whether it is a result of LEA instruction or not. Examples of the slow LEA uop are or uops with base, index, and offset source operands using base and index reqisters, where base is EBR/RBP/R13, using RIP relative or 16-bit addressing modes. See the Intel\u00ae 64 and IA-32 Architectures Optimization Reference Manual for more details about slow LEA instructions.",
"EventCode": "0x59",
"Counter": "0,1,2,3",
"UMask": "0x40",
@@ -302,48 +320,21 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "RESOURCE_STALLS.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Resource-related stall cycles.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "RESOURCE_STALLS.LB",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the cycles of stall due to lack of load buffers.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "RESOURCE_STALLS.RS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA2",
+ "EventCode": "0x5B",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "RESOURCE_STALLS.SB",
+ "UMask": "0xc",
+ "EventName": "RESOURCE_STALLS2.ALL_FL_EMPTY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "BriefDescription": "Cycles with either free list is empty.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA2",
+ "EventCode": "0x5B",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "RESOURCE_STALLS.ROB",
+ "UMask": "0xf",
+ "EventName": "RESOURCE_STALLS2.ALL_PRF_CONTROL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to re-order buffer full.",
+ "BriefDescription": "Resource stalls2 control structures full for physical registers.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -356,702 +347,663 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of Uops issued by the front-end of the pipeilne to the back-end.",
- "EventCode": "0x0E",
+ "EventCode": "0x5B",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_ISSUED.ANY",
+ "UMask": "0x4f",
+ "EventName": "RESOURCE_STALLS2.OOO_RSRC",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS).",
+ "BriefDescription": "Resource stalls out of order resources full.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x0E",
- "Invert": "1",
+ "EventCode": "0x5E",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x0E",
+ "EventCode": "0x5E",
"Invert": "1",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "AnyThread": "1",
- "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "EdgeDetect": "1",
+ "EventName": "RS_EVENTS.EMPTY_END",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x5E",
+ "EventCode": "0x87",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "EventName": "ILD_STALL.LCP",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xCC",
+ "EventCode": "0x87",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "UMask": "0x4",
+ "EventName": "ILD_STALL.IQ_FULL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Count cases of saving new LBR.",
+ "BriefDescription": "Stall cycles because IQ is full.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event is incremented when self-modifying code (SMC) is detected, which causes a machine clear. Machine clears can have a significant performance impact if they are happening frequently.",
- "EventCode": "0xC3",
+ "EventCode": "0x88",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MACHINE_CLEARS.SMC",
- "SampleAfterValue": "100003",
- "BriefDescription": "Self-modifying code (SMC) detected.",
+ "UMask": "0x41",
+ "EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not taken macro-conditional branches.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.",
- "EventCode": "0xC3",
+ "EventCode": "0x88",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "MACHINE_CLEARS.MASKMOV",
- "SampleAfterValue": "100003",
- "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "UMask": "0x81",
+ "EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired macro-conditional branches.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC0",
+ "EventCode": "0x88",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "INST_RETIRED.ANY_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event.",
+ "UMask": "0x82",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts the number of micro-ops retired.",
- "EventCode": "0xC2",
+ "EventCode": "0x88",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.ALL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Actually retired uops.",
+ "UMask": "0x84",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts the number of retirement slots used each cycle. There are potentially 4 slots that can be used each cycle - meaning, 4 micro-ops or 4 instructions could retire each cycle. This event is used in determining the 'Retiring' category of the Top-Down pipeline slots characterization.",
- "EventCode": "0xC2",
+ "EventCode": "0x88",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Retirement slots used.",
+ "UMask": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC2",
- "Invert": "1",
+ "EventCode": "0x88",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.STALL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles without actually retired uops.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x90",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired direct near calls.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC2",
- "Invert": "1",
+ "EventCode": "0x88",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with less than 10 actually retired uops.",
- "CounterMask": "10",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0xa0",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired indirect calls.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xC4",
+ "EventCode": "0x88",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BR_INST_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Conditional branch instructions retired.",
+ "UMask": "0xc1",
+ "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired macro-conditional branches.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xC4",
+ "EventCode": "0x88",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "BR_INST_RETIRED.NEAR_CALL",
- "SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect near call instructions retired.",
+ "UMask": "0xc2",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC4",
+ "EventCode": "0x88",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired.",
+ "UMask": "0xc4",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired indirect branches excluding calls and returns.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xC4",
+ "EventCode": "0x88",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "SampleAfterValue": "100007",
- "BriefDescription": "Return instructions retired.",
+ "UMask": "0xc8",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired indirect return branches.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC4",
+ "EventCode": "0x88",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "BR_INST_RETIRED.NOT_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Not taken branch instructions retired.",
+ "UMask": "0xd0",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired direct near calls.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xC4",
+ "EventCode": "0x88",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Taken branch instructions retired.",
+ "UMask": "0xff",
+ "EventName": "BR_INST_EXEC.ALL_BRANCHES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired branches.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC4",
+ "EventCode": "0x89",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "SampleAfterValue": "100007",
- "BriefDescription": "Far branch instructions retired.",
+ "UMask": "0x41",
+ "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "2",
- "EventCode": "0xC4",
+ "EventCode": "0x89",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x81",
+ "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xC5",
+ "EventCode": "0x89",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "UMask": "0x84",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xC5",
+ "EventCode": "0x89",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "BR_MISP_RETIRED.NEAR_CALL",
- "SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect mispredicted near call instructions retired.",
+ "UMask": "0x88",
+ "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC5",
+ "EventCode": "0x89",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All mispredicted macro branch instructions retired.",
+ "UMask": "0x90",
+ "EventName": "BR_MISP_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted direct near calls.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xC5",
+ "EventCode": "0x89",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "BR_MISP_RETIRED.NOT_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted not taken branch instructions retired.",
+ "UMask": "0xa0",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xC5",
+ "EventCode": "0x89",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "BR_MISP_RETIRED.TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted taken branch instructions retired.",
+ "UMask": "0xc1",
+ "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "2",
- "PublicDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS)",
- "EventCode": "0xC5",
+ "EventCode": "0x89",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0xc4",
+ "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Mispredicted indirect branches excluding calls and returns.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC1",
+ "EventCode": "0x89",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OTHER_ASSISTS.ITLB_MISS_RETIRED",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired instructions experiencing ITLB misses.",
+ "UMask": "0xd0",
+ "EventName": "BR_MISP_EXEC.ALL_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired mispredicted direct near calls.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x14",
+ "EventCode": "0x89",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ARITH.FPU_DIV_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when divider is busy executing divide operations.",
+ "UMask": "0xff",
+ "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of the divide operations executed.",
- "EventCode": "0x14",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "ARITH.FPU_DIV",
- "SampleAfterValue": "100003",
- "BriefDescription": "Divide operations executed.",
- "CounterMask": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 0.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "UOPS_DISPATCHED.THREAD",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops dispatched per thread.",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 0.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "UOPS_DISPATCHED.CORE",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops dispatched from any thread.",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 1.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 0.",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 1.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "UMask": "0xc",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 1.",
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "UMask": "0xc",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 4.",
+ "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 2.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "UMask": "0x30",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 5.",
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA3",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "CYCLE_ACTIVITY.CYCLES_NO_DISPATCH",
+ "UMask": "0x30",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Each cycle there was no dispatch for this thread, increment by 1. Note this is connect to Umask 2. No dispatch can be deduced from the UOPS_EXECUTED event.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
"SampleAfterValue": "2000003",
- "BriefDescription": "Each cycle there was a miss-pending demand load this thread, increment by 1. Note this is in DCU and connected to Umask 1. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.",
- "CounterMask": "2",
- "CounterHTOff": "2"
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 4.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA3",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
+ "UMask": "0x40",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Each cycle there was a MLC-miss pending demand load this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0.",
- "CounterMask": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 4.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x6",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
"SampleAfterValue": "2000003",
- "BriefDescription": "Each cycle there was a miss-pending demand load this thread and no uops dispatched, increment by 1. Note this is in DCU and connected to Umask 1 and 2. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.",
- "CounterMask": "6",
- "CounterHTOff": "2"
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 5.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA3",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x5",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
+ "UMask": "0x80",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Each cycle there was a MLC-miss pending demand load and no uops dispatched on this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0 and 2.",
- "CounterMask": "5",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Cycles per core when uops are dispatched to port 5.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x4C",
+ "EventCode": "0xA2",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "LOAD_HIT_PRE.SW_PF",
- "SampleAfterValue": "100003",
- "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch.",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Resource-related stall cycles.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x4C",
+ "EventCode": "0xA2",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "LOAD_HIT_PRE.HW_PF",
- "SampleAfterValue": "100003",
- "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LD_BLOCKS.DATA_UNKNOWN",
- "SampleAfterValue": "100003",
- "BriefDescription": "Loads delayed due to SB blocks, preceding store operations with known addresses but unknown data.",
+ "EventName": "RESOURCE_STALLS.LB",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the cycles of stall due to lack of load buffers.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceding smaller uncompleted store. See the table of not supported store forwards in the Intel? 64 and IA-32 Architectures Optimization Reference Manual. The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.",
- "EventCode": "0x03",
+ "EventCode": "0xA2",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "LD_BLOCKS.STORE_FORWARD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding.",
+ "UMask": "0x4",
+ "EventName": "RESOURCE_STALLS.RS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x03",
+ "EventCode": "0xA2",
"Counter": "0,1,2,3",
"UMask": "0x8",
- "EventName": "LD_BLOCKS.NO_SR",
- "SampleAfterValue": "100003",
- "BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "EventName": "RESOURCE_STALLS.SB",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x03",
+ "EventCode": "0xA2",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "LD_BLOCKS.ALL_BLOCK",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of cases where any load ends up with a valid block-code written to the load buffer (including blocks due to Memory Order Buffer (MOB), Data Cache Unit (DCU), TLB, but load has no DCU miss).",
+ "UMask": "0xa",
+ "EventName": "RESOURCE_STALLS.LB_SB",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Resource stalls due to load or store buffers all being in use.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Aliasing occurs when a load is issued after a store and their memory addresses are offset by 4K. This event counts the number of loads that aliased with a preceding store, resulting in an extended address check in the pipeline. The enhanced address check typically has a performance penalty of 5 cycles.",
- "EventCode": "0x07",
+ "EventCode": "0xA2",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
- "SampleAfterValue": "100003",
- "BriefDescription": "False dependencies in MOB due to partial compare.",
+ "UMask": "0xe",
+ "EventName": "RESOURCE_STALLS.MEM_RS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Resource stalls due to memory buffers or Reservation Station (RS) being fully utilized.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x07",
+ "EventCode": "0xA2",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "LD_BLOCKS_PARTIAL.ALL_STA_BLOCK",
- "SampleAfterValue": "100003",
- "BriefDescription": "This event counts the number of times that load operations are temporarily blocked because of older stores, with addresses that are not yet known. A load operation may incur more than one block of this type.",
+ "UMask": "0x10",
+ "EventName": "RESOURCE_STALLS.ROB",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles stalled due to re-order buffer full.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB6",
+ "EventCode": "0xA2",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "AGU_BYPASS_CANCEL.COUNT",
- "SampleAfterValue": "100003",
- "BriefDescription": "This event counts executed load operations with all the following traits: 1. addressing of the format [base + offset], 2. the offset is between 1 and 2047, 3. the address specified in the base register is in one page and the address [base+offset] is in an.",
+ "UMask": "0xf0",
+ "EventName": "RESOURCE_STALLS.OOO_RSRC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Resource stalls due to Rob being full, FCSW, MXCSR and OTHER.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x3C",
+ "EventCode": "0xA3",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
"SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+ "BriefDescription": "Each cycle there was a MLC-miss pending demand load this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
+ "EventCode": "0xA3",
+ "Counter": "2",
"UMask": "0x2",
- "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
"SampleAfterValue": "2000003",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other is halted.",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Each cycle there was a miss-pending demand load this thread, increment by 1. Note this is in DCU and connected to Umask 1. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.",
+ "CounterMask": "2",
+ "CounterHTOff": "2"
},
{
- "EventCode": "0xA1",
+ "EventCode": "0xA3",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
+ "UMask": "0x4",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_NO_DISPATCH",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 0.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Each cycle there was no dispatch for this thread, increment by 1. Note this is connect to Umask 2. No dispatch can be deduced from the UOPS_EXECUTED event.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xA1",
+ "EventCode": "0xA3",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
+ "UMask": "0x5",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 1.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Each cycle there was a MLC-miss pending demand load and no uops dispatched on this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0 and 2.",
+ "CounterMask": "5",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x6",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 4.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Each cycle there was a miss-pending demand load this thread and no uops dispatched, increment by 1. Note this is in DCU and connected to Umask 1 and 2. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.",
+ "CounterMask": "6",
+ "CounterHTOff": "2"
},
{
- "EventCode": "0xA1",
+ "EventCode": "0xA8",
"Counter": "0,1,2,3",
- "UMask": "0x80",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
+ "UMask": "0x1",
+ "EventName": "LSD.UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 5.",
+ "BriefDescription": "Number of Uops delivered by the LSD.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA1",
+ "EventCode": "0xA8",
"Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2.",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA1",
+ "EventCode": "0xA8",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_4_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3.",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA1",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0xc",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
+ "UMask": "0x1",
+ "EventName": "UOPS_DISPATCHED.THREAD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 2.",
+ "BriefDescription": "Uops dispatched per thread.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA1",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
+ "UMask": "0x2",
+ "EventName": "UOPS_DISPATCHED.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
+ "BriefDescription": "Uops dispatched from any thread.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "2",
- "EventCode": "0xC0",
- "Counter": "1",
- "UMask": "0x1",
- "EventName": "INST_RETIRED.PREC_DIST",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Instructions retired. (Precise Event - PEBS).",
- "TakenAlone": "1",
- "CounterHTOff": "1"
- },
- {
- "EventCode": "0x5B",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0xf",
- "EventName": "RESOURCE_STALLS2.ALL_PRF_CONTROL",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Resource stalls2 control structures full for physical registers.",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x5B",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "RESOURCE_STALLS2.ALL_FL_EMPTY",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with either free list is empty.",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CounterMask": "2",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA2",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0xe",
- "EventName": "RESOURCE_STALLS.MEM_RS",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Resource stalls due to memory buffers or Reservation Station (RS) being fully utilized.",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CounterMask": "3",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA2",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0xf0",
- "EventName": "RESOURCE_STALLS.OOO_RSRC",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
"SampleAfterValue": "2000003",
- "BriefDescription": "Resource stalls due to Rob being full, FCSW, MXCSR and OTHER.",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CounterMask": "4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x5B",
+ "EventCode": "0xB1",
+ "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0x4f",
- "EventName": "RESOURCE_STALLS2.OOO_RSRC",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Resource stalls out of order resources full.",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA2",
+ "EventCode": "0xB6",
"Counter": "0,1,2,3",
- "UMask": "0xa",
- "EventName": "RESOURCE_STALLS.LB_SB",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Resource stalls due to load or store buffers all being in use.",
+ "UMask": "0x1",
+ "EventName": "AGU_BYPASS_CANCEL.COUNT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "This event counts executed load operations with all the following traits: 1. addressing of the format [base + offset], 2. the offset is between 1 and 2047, 3. the address specified in the base register is in one page and the address [base+offset] is in an.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x0D",
+ "EventCode": "0xC0",
"Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "UMask": "0x0",
+ "EventName": "INST_RETIRED.ANY_P",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
- "CounterMask": "1",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of cycles spent executing performance-sensitive flags-merging uops. For example, shift CL (merge_arith_flags). For more details, See the Intel? 64 and IA-32 Architectures Optimization Reference Manual.",
- "EventCode": "0x59",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP_CYCLES",
+ "PEBS": "2",
+ "EventCode": "0xC0",
+ "Counter": "1",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.PREC_DIST",
"SampleAfterValue": "2000003",
- "BriefDescription": "Performance sensitive flags-merging uops added by Sandy Bridge u-arch.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Instructions retired. (Precise Event - PEBS).",
+ "TakenAlone": "1",
+ "CounterHTOff": "1"
},
{
- "EventCode": "0x0D",
+ "EventCode": "0xC1",
"Counter": "0,1,2,3",
- "UMask": "0x3",
- "EdgeDetect": "1",
- "EventName": "INT_MISC.RECOVERY_STALLS_COUNT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of occurences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
- "CounterMask": "1",
+ "UMask": "0x2",
+ "EventName": "OTHER_ASSISTS.ITLB_MISS_RETIRED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired instructions experiencing ITLB misses.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xE6",
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of micro-ops retired. (Precise Event)",
+ "EventCode": "0xC2",
"Counter": "0,1,2,3",
- "UMask": "0x1f",
- "EventName": "BACLEARS.ANY",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.ALL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Actually retired uops. (Precise Event - PEBS).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x88",
+ "EventCode": "0xC2",
+ "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0xff",
- "EventName": "BR_INST_EXEC.ALL_BRANCHES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x89",
+ "EventCode": "0xC2",
+ "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0xff",
- "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "CounterMask": "10",
+ "CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xC2",
@@ -1065,13 +1017,14 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xA8",
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of retirement slots used each cycle. There are potentially 4 slots that can be used each cycle - meaning, 4 micro-ops or 4 instructions could retire each cycle. This event is used in determining the 'Retiring' category of the Top-Down pipeline slots characterization. (Precise Event - PEBS)",
+ "EventCode": "0xC2",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_4_UOPS",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "4",
+ "BriefDescription": "Retirement slots used. (Precise Event - PEBS).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -1086,135 +1039,188 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x5E",
- "Invert": "1",
+ "PublicDescription": "This event is incremented when self-modifying code (SMC) is detected, which causes a machine clear. Machine clears can have a significant performance impact if they are happening frequently.",
+ "EventCode": "0xC3",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "RS_EVENTS.EMPTY_END",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "CounterMask": "1",
+ "UMask": "0x4",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Self-modifying code (SMC) detected.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x00",
- "Counter": "Fixed counter 2",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "CounterHTOff": "Fixed counter 2"
+ "PublicDescription": "Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "MACHINE_CLEARS.MASKMOV",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x3C",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x0",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x3C",
+ "PEBS": "1",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Conditional branch instructions retired. (Precise Event - PEBS).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x0D",
+ "PEBS": "1",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x3",
- "AnyThread": "1",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "CounterMask": "1",
+ "UMask": "0x2",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Direct and indirect near call instructions retired. (Precise Event - PEBS).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
+ "PEBS": "1",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "CounterMask": "1",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3). (Precise Event - PEBS).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
+ "PEBS": "2",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "CounterMask": "2",
+ "UMask": "0x4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Return instructions retired. (Precise Event - PEBS).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "CounterMask": "3",
+ "UMask": "0x10",
+ "EventName": "BR_INST_RETIRED.NOT_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Not taken branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
+ "PEBS": "1",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "CounterMask": "4",
+ "UMask": "0x20",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Taken branch instructions retired. (Precise Event - PEBS).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
- "Invert": "1",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "UMask": "0x40",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Far branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "EventCode": "0x3C",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+ "UMask": "0x0",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x3C",
+ "PEBS": "1",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted conditional branch instructions retired. (Precise Event - PEBS).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x3C",
+ "PEBS": "1",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "EventName": "BR_MISP_RETIRED.NEAR_CALL",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Direct and indirect mispredicted near call instructions retired. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS)",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "BR_MISP_RETIRED.NOT_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted not taken branch instructions retired.(Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_MISP_RETIRED.TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted taken branch instructions retired. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "BriefDescription": "Count cases of saving new LBR.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xE6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1f",
+ "EventName": "BACLEARS.ANY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json b/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json
index fd7d7c438226..cfeba5067bab 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json
@@ -1,140 +1,226 @@
[
{
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Frontend_Bound"
+ },
+ {
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Frontend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Bad_Speculation"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Bad_Speculation_SMT"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Backend_Bound"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Backend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Retiring"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Retiring_SMT"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "BriefDescription": "Uops Per Instruction",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline",
+ "BriefDescription": "Uops Per Instruction",
+ "MetricGroup": "Pipeline;Retiring",
"MetricName": "UPI"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
- "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )",
- "MetricGroup": "Frontend",
+ "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+ "MetricGroup": "PGO",
"MetricName": "IFetch_Line_Utilization"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
- "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricGroup": "DSB;Frontend_Bandwidth",
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
- "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
- "BriefDescription": "Total issue-pipeline slots",
- "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricExpr": "4 * cycles",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
- "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
+ "MetricGroup": "TopDownL1_SMT",
+ "MetricName": "SLOTS_SMT"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY",
+ "BriefDescription": "Total number of retired Instructions",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC_SMT"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS",
+ "MetricName": "FLOPc"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS_SMT",
+ "MetricName": "FLOPc_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP"
},
{
+ "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
- "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "BriefDescription": "Average CPU Utilization",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "BriefDescription": "Average CPU Utilization",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
+ "MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "(( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "DRAM_BW_Use"
+ },
+ {
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
- "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
- "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
- "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
- "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
- "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
- "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json b/tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json
index a654ab771fce..b8eccce5d75d 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json
@@ -1,131 +1,131 @@
[
{
- "EventCode": "0xAE",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ITLB.ITLB_FLUSH",
- "SampleAfterValue": "100007",
- "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x4F",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "EPT.WALK_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycle count for an Extended Page table walk. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
+ "EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
+ "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
"SampleAfterValue": "100003",
- "BriefDescription": "Misses at all ITLB levels that cause page walks.",
+ "BriefDescription": "Load misses in all DTLB levels that cause page walks.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x85",
+ "EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"SampleAfterValue": "100003",
- "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
+ "BriefDescription": "Load misses at all DTLB levels that cause completed page walks.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event count cycles when Page Miss Handler (PMH) is servicing page walks caused by ITLB misses.",
- "EventCode": "0x85",
+ "PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB load misses.",
+ "EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x4",
- "EventName": "ITLB_MISSES.WALK_DURATION",
+ "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
"SampleAfterValue": "2000003",
"BriefDescription": "Cycles when PMH is busy with page walks.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x85",
+ "PublicDescription": "This event counts load operations that miss the first DTLB level but hit the second and do not cause any page walks. The penalty in this case is approximately 7 cycles.",
+ "EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x10",
- "EventName": "ITLB_MISSES.STLB_HIT",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
- "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x08",
+ "EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
+ "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
"SampleAfterValue": "100003",
- "BriefDescription": "Load misses in all DTLB levels that cause page walks.",
+ "BriefDescription": "Store misses in all DTLB levels that cause page walks.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x08",
+ "EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"SampleAfterValue": "100003",
- "BriefDescription": "Load misses at all DTLB levels that cause completed page walks.",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB load misses.",
- "EventCode": "0x08",
+ "EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x4",
- "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+ "EventName": "DTLB_STORE_MISSES.WALK_DURATION",
"SampleAfterValue": "2000003",
"BriefDescription": "Cycles when PMH is busy with page walks.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts load operations that miss the first DTLB level but hit the second and do not cause any page walks. The penalty in this case is approximately 7 cycles.",
- "EventCode": "0x08",
+ "EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x10",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
- "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x49",
+ "EventCode": "0x4F",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "EPT.WALK_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycle count for an Extended Page table walk. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
+ "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause page walks.",
+ "BriefDescription": "Misses at all ITLB levels that cause page walks.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x49",
+ "EventCode": "0x85",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x49",
+ "PublicDescription": "This event count cycles when Page Miss Handler (PMH) is servicing page walks caused by ITLB misses.",
+ "EventCode": "0x85",
"Counter": "0,1,2,3",
"UMask": "0x4",
- "EventName": "DTLB_STORE_MISSES.WALK_DURATION",
+ "EventName": "ITLB_MISSES.WALK_DURATION",
"SampleAfterValue": "2000003",
"BriefDescription": "Cycles when PMH is busy with page walks.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x49",
+ "EventCode": "0x85",
"Counter": "0,1,2,3",
"UMask": "0x10",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "EventName": "ITLB_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
- "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xAE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ITLB.ITLB_FLUSH",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/cache.json b/tools/perf/pmu-events/arch/x86/silvermont/cache.json
index 82be7d1b8b81..805ef1436539 100644
--- a/tools/perf/pmu-events/arch/x86/silvermont/cache.json
+++ b/tools/perf/pmu-events/arch/x86/silvermont/cache.json
@@ -36,7 +36,7 @@
"BriefDescription": "L2 cache request misses"
},
{
- "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ICache miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ICache miss. Note: this event is not the same as the total number of cycles spent retrieving instruction cache lines from the memory hierarchy.\r\nCounts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events. \r\n",
+ "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ICache miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ICache miss. Note: this event is not the same as the total number of cycles spent retrieving instruction cache lines from the memory hierarchy.\r\nCounts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events.",
"EventCode": "0x86",
"Counter": "0,1",
"UMask": "0x4",
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/other.json b/tools/perf/pmu-events/arch/x86/silvermont/other.json
new file mode 100644
index 000000000000..47814046fa9d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/silvermont/other.json
@@ -0,0 +1,20 @@
+[
+ {
+ "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ITLB miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ITLB miss. Note: this event is not the same as page walk cycles to retrieve an instruction translation.",
+ "EventCode": "0x86",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "FETCH_STALL.ITLB_FILL_PENDING_CYCLES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Cycles code-fetch stalled due to an outstanding ITLB miss."
+ },
+ {
+ "PublicDescription": "Counts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events.",
+ "EventCode": "0x86",
+ "Counter": "0,1",
+ "UMask": "0x3f",
+ "EventName": "FETCH_STALL.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Cycles code-fetch stalled due to any reason."
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/pipeline.json b/tools/perf/pmu-events/arch/x86/silvermont/pipeline.json
index 7468af99190a..1ed62ad4cf77 100644
--- a/tools/perf/pmu-events/arch/x86/silvermont/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/silvermont/pipeline.json
@@ -210,7 +210,7 @@
"UMask": "0x4",
"EventName": "NO_ALLOC_CYCLES.MISPREDICTS",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of cycles when no uops are allocated and the alloc pipe is stalled waiting for a mispredicted jump to retire. After the misprediction is detected, the front end will start immediately but the allocate pipe stalls until the mispredicted "
+ "BriefDescription": "Counts the number of cycles when no uops are allocated and the alloc pipe is stalled waiting for a mispredicted jump to retire. After the misprediction is detected, the front end will start immediately but the allocate pipe stalls until the mispredicted"
},
{
"EventCode": "0xCA",
@@ -275,7 +275,6 @@
},
{
"PublicDescription": "This event counts the number of instructions that retire. For instructions that consist of multiple micro-ops, this event counts exactly once, as the last micro-op of the instruction retires. The event continues counting while instructions retire, including during interrupt service routines caused by hardware interrupts, faults or traps. Background: Modern microprocessors employ extensive pipelining and speculative techniques. Since sometimes an instruction is started but never completed, the notion of \"retirement\" is introduced. A retired instruction is one that commits its states. Or stated differently, an instruction might be abandoned at some point. No instruction is truly finished until it retires. This counter measures the number of completed instructions. The fixed event is INST_RETIRED.ANY and the programmable event is INST_RETIRED.ANY_P.",
- "EventCode": "0x00",
"Counter": "Fixed counter 1",
"UMask": "0x1",
"EventName": "INST_RETIRED.ANY",
@@ -284,7 +283,6 @@
},
{
"PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. In systems with a constant core frequency, this event can give you a measurement of the elapsed time while the core was not in halt state by dividing the event count by the core frequency. This event is architecturally defined and is a designated fixed counter. CPU_CLK_UNHALTED.CORE and CPU_CLK_UNHALTED.CORE_P use the core frequency which may change from time to time. CPU_CLK_UNHALTE.REF_TSC and CPU_CLK_UNHALTED.REF are not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. The fixed events are CPU_CLK_UNHALTED.CORE and CPU_CLK_UNHALTED.REF_TSC and the programmable events are CPU_CLK_UNHALTED.CORE_P and CPU_CLK_UNHALTED.REF.",
- "EventCode": "0x00",
"Counter": "Fixed counter 2",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.CORE",
@@ -293,7 +291,6 @@
},
{
"PublicDescription": "Counts the number of reference cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. Divide this event count by core frequency to determine the elapsed time while the core was not in halt state. Divide this event count by core frequency to determine the elapsed time while the core was not in halt state. This event is architecturally defined and is a designated fixed counter. CPU_CLK_UNHALTED.CORE and CPU_CLK_UNHALTED.CORE_P use the core frequency which may change from time to time. CPU_CLK_UNHALTE.REF_TSC and CPU_CLK_UNHALTED.REF are not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. The fixed events are CPU_CLK_UNHALTED.CORE and CPU_CLK_UNHALTED.REF_TSC and the programmable events are CPU_CLK_UNHALTED.CORE_P and CPU_CLK_UNHALTED.REF.",
- "EventCode": "0x00",
"Counter": "Fixed counter 3",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/cache.json b/tools/perf/pmu-events/arch/x86/skylake/cache.json
index 54bfe9e4045c..720458139049 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/cache.json
@@ -60,10 +60,10 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts the number of demand Data Read requests that hit L2 cache. Only non rejected loads are counted.",
+ "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x41",
+ "UMask": "0xc1",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"SampleAfterValue": "200003",
"BriefDescription": "Demand Data Read requests that hit L2 cache",
@@ -73,7 +73,7 @@
"PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x42",
+ "UMask": "0xc2",
"EventName": "L2_RQSTS.RFO_HIT",
"SampleAfterValue": "200003",
"BriefDescription": "RFO requests that hit L2 cache",
@@ -83,7 +83,7 @@
"PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x44",
+ "UMask": "0xc4",
"EventName": "L2_RQSTS.CODE_RD_HIT",
"SampleAfterValue": "200003",
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
@@ -482,7 +482,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.\r\n",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -554,7 +554,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready. \r\n",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x40",
@@ -661,13 +661,13 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache.",
+ "PublicDescription": "This event is deprecated. Refer to new event L2_LINES_OUT.USELESS_HWPF",
"EventCode": "0xF2",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "L2_LINES_OUT.USELESS_PREF",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache",
+ "BriefDescription": "This event is deprecated. Refer to new event L2_LINES_OUT.USELESS_HWPF",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -690,249 +690,2238 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC01C8000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10001C8000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04001C8000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x02001C8000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x01001C8000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00801C8000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00401C8000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0108000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000108000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400108000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200108000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100108000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080108000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040108000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0088000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000088000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400088000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200088000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100088000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080088000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040088000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0048000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000048000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400048000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200048000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100048000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080048000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040048000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0028000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000028000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400028000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0400001 ",
+ "MSRValue": "0x0200028000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100028000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080028000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040028000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests have any response type.",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0000018000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests have any response type.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC01C0004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10001C0004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04001C0004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x02001C0004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x01001C0004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00801C0004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00401C0004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0100004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000100004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400100004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200100004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100100004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080100004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040100004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0080004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000080004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400080004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200080004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100080004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080080004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040080004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0040004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000040004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400040004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200040004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100040004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080040004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040040004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0020004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000020004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400020004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200020004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100020004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080020004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040020004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any response type.",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0000010004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any response type.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC01C0002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10001C0002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04001C0002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x02001C0002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x01001C0002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00801C0002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00401C0002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0100002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000100002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400100002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200100002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100100002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080100002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040100002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0080002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000080002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400080002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200080002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100080002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080080002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040080002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0040002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000040002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400040002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200040002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100040002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080040002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040040002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0020002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000020002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400020002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200020002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100020002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080020002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040020002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs) have any response type.",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0000010002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs) have any response type.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0400001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & ANY_SNOOP",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000400001 ",
+ "MSRValue": "0x1000400001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_HITM",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400400001 ",
+ "MSRValue": "0x0400400001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200400001 ",
+ "MSRValue": "0x0200400001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_MISS",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100400001 ",
+ "MSRValue": "0x0100400001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080400001 ",
+ "MSRValue": "0x0080400001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040400001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_NONE",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc01c0001 ",
+ "MSRValue": "0x3FC01C0001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT & ANY_SNOOP",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10001c0001 ",
+ "MSRValue": "0x10001C0001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04001c0001 ",
+ "MSRValue": "0x04001C0001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops sent to sibling cores return clean response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02001c0001 ",
+ "MSRValue": "0x02001C0001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the L3 and the snoops sent to sibling cores return clean response.",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01001c0001 ",
+ "MSRValue": "0x01001C0001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00801c0001 ",
+ "MSRValue": "0x00801C0001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00401C0001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0100001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000100001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400100001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200100001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100100001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT & SNOOP_NONE",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0020001 ",
+ "MSRValue": "0x0080100001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040100001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0080001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000080001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400080001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200080001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100080001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080080001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040080001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0040001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000040001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400040001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200040001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100040001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080040001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040040001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0020001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020001 ",
+ "MSRValue": "0x1000020001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_HITM",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020001 ",
+ "MSRValue": "0x0400020001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020001 ",
+ "MSRValue": "0x0200020001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_MISS",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020001 ",
+ "MSRValue": "0x0100020001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020001 ",
+ "MSRValue": "0x0080020001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040020001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_NONE",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts demand data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010001 ",
+ "MSRValue": "0x0000010001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that have any response type.",
+ "BriefDescription": "Counts demand data reads have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/skylake/frontend.json b/tools/perf/pmu-events/arch/x86/skylake/frontend.json
index 578dff5bd823..7fa95a35e3ca 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/frontend.json
@@ -177,7 +177,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.",
+ "PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding \u201c4 \u2013 x\u201d when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.",
"EventCode": "0x9C",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -242,7 +242,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
+ "PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0\u20132 cycles.",
"EventCode": "0xAB",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -253,7 +253,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. \r\n",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
"EventCode": "0xC6",
"MSRValue": "0x11",
"Counter": "0,1,2,3",
@@ -360,7 +360,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops. \r\n",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
"EventCode": "0xC6",
"MSRValue": "0x400806",
"Counter": "0,1,2,3",
@@ -374,7 +374,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.\r\n",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
"EventCode": "0xC6",
"MSRValue": "0x401006",
"Counter": "0,1,2,3",
@@ -388,7 +388,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.\r\n",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
"EventCode": "0xC6",
"MSRValue": "0x402006",
"Counter": "0,1,2,3",
@@ -454,7 +454,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.\r\n",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
"EventCode": "0xC6",
"MSRValue": "0x100206",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/memory.json b/tools/perf/pmu-events/arch/x86/skylake/memory.json
index 3bd8b712c889..f197b4c7695b 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/memory.json
@@ -215,7 +215,7 @@
"UMask": "0x4",
"EventName": "HLE_RETIRED.ABORTED",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one). ",
+ "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -237,6 +237,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
"EventCode": "0xC8",
"Counter": "0,1,2,3",
"UMask": "0x20",
@@ -292,7 +293,7 @@
"UMask": "0x4",
"EventName": "RTM_RETIRED.ABORTED",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one). ",
+ "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -346,7 +347,7 @@
},
{
"PEBS": "2",
- "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
"EventCode": "0xCD",
"MSRValue": "0x4",
"Counter": "0,1,2,3",
@@ -354,13 +355,13 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "2",
- "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
"EventCode": "0xCD",
"MSRValue": "0x8",
"Counter": "0,1,2,3",
@@ -368,13 +369,13 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
"SampleAfterValue": "50021",
- "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "2",
- "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
"EventCode": "0xCD",
"MSRValue": "0x10",
"Counter": "0,1,2,3",
@@ -382,13 +383,13 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
"MSRIndex": "0x3F6",
"SampleAfterValue": "20011",
- "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "2",
- "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
"EventCode": "0xCD",
"MSRValue": "0x20",
"Counter": "0,1,2,3",
@@ -396,13 +397,13 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "2",
- "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
"EventCode": "0xCD",
"MSRValue": "0x40",
"Counter": "0,1,2,3",
@@ -410,13 +411,13 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
"SampleAfterValue": "2003",
- "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "2",
- "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
"EventCode": "0xCD",
"MSRValue": "0x80",
"Counter": "0,1,2,3",
@@ -424,13 +425,13 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
"MSRIndex": "0x3F6",
"SampleAfterValue": "1009",
- "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "2",
- "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
"EventCode": "0xCD",
"MSRValue": "0x100",
"Counter": "0,1,2,3",
@@ -438,13 +439,13 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
"MSRIndex": "0x3F6",
"SampleAfterValue": "503",
- "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "2",
- "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
"EventCode": "0xCD",
"MSRValue": "0x200",
"Counter": "0,1,2,3",
@@ -452,163 +453,1151 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
"SampleAfterValue": "101",
- "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3ffc000001 ",
+ "MSRValue": "0x3FFC408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x203C408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x103C408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x043C408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x023C408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x013C408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00BC408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x007C408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC4008000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2004008000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1004008000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0404008000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0204008000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0104008000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0084008000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0044008000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20001C8000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000108000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000088000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000048000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000028000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FFC400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x203C400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x103C400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x043C400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x023C400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x013C400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00BC400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x007C400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC4000004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2004000004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1004000004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0404000004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0204000004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0104000004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0084000004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0044000004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20001C0004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000100004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000080004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000040004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000020004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FFC400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x203C400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x103C400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x043C400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x023C400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x013C400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00BC400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x007C400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC4000002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2004000002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1004000002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0404000002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0204000002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0104000002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0084000002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0044000002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20001C0002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000100002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000080002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000040002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000020002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FFC400001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x203C400001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS & ANY_SNOOP",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x103c000001 ",
+ "MSRValue": "0x103C400001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_HITM",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000001 ",
+ "MSRValue": "0x043C400001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000001 ",
+ "MSRValue": "0x023C400001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_MISS",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000001 ",
+ "MSRValue": "0x013C400001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000001 ",
+ "MSRValue": "0x00BC400001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_NONE",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc4000001 ",
+ "MSRValue": "0x007C400001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC4000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2004000001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000001 ",
+ "MSRValue": "0x1004000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000001 ",
+ "MSRValue": "0x0404000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000001 ",
+ "MSRValue": "0x0204000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000001 ",
+ "MSRValue": "0x0104000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000001 ",
+ "MSRValue": "0x0084000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0044000001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000400001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20001C0001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000100001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000080001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000040001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000020001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/skylake/pipeline.json b/tools/perf/pmu-events/arch/x86/skylake/pipeline.json
index bc6d2afbcd8a..4a891fbbc4bb 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/pipeline.json
@@ -1,7 +1,6 @@
[
{
"PublicDescription": "Counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, Counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. Counting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
- "EventCode": "0x00",
"Counter": "Fixed counter 0",
"UMask": "0x1",
"EventName": "INST_RETIRED.ANY",
@@ -11,7 +10,6 @@
},
{
"PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
- "EventCode": "0x00",
"Counter": "Fixed counter 1",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
@@ -20,7 +18,6 @@
"CounterHTOff": "Fixed counter 1"
},
{
- "EventCode": "0x00",
"Counter": "Fixed counter 1",
"UMask": "0x2",
"AnyThread": "1",
@@ -31,7 +28,6 @@
},
{
"PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
- "EventCode": "0x00",
"Counter": "Fixed counter 2",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
@@ -121,7 +117,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to Mixing Intel AVX and Intel SSE Code section of the Optimization Guide.",
+ "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to \u201cMixing Intel AVX and Intel SSE Code\u201d section of the Optimization Guide.",
"EventCode": "0x0E",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -248,6 +244,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "This event counts cycles during which the microcode scoreboard stalls happen.",
+ "EventCode": "0x59",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "PARTIAL_RAT_STALLS.SCOREBOARD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where the pipeline is stalled due to serializing operations.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts cycles during which the reservation station (RS) is empty for the thread.; Note: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
"EventCode": "0x5E",
"Counter": "0,1,2,3",
@@ -361,8 +367,8 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts resource-related stall cycles. Reasons for stalls can be as follows:a. *any* u-arch structure got full (LB, SB, RS, ROB, BOB, LM, Physical Register Reclaim Table (PRRT), or Physical History Table (PHT) slots).b. *any* u-arch structure got empty (like INT/SIMD FreeLists).c. FPU control word (FPCW), MXCSR.and others. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
- "EventCode": "0xA2",
+ "PublicDescription": "Counts resource-related stall cycles.",
+ "EventCode": "0xa2",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "RESOURCE_STALLS.ANY",
@@ -735,7 +741,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts cycles without actually retired uops.",
+ "PublicDescription": "This event counts cycles without actually retired uops.",
"EventCode": "0xC2",
"Invert": "1",
"Counter": "0,1,2,3",
@@ -759,6 +765,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Number of machine clears (nukes) of any type.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -839,14 +846,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts not taken branch instructions retired.",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts not taken branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x10",
"Errata": "SKL091",
"EventName": "BR_INST_RETIRED.NOT_TAKEN",
"SampleAfterValue": "400009",
- "BriefDescription": "Not taken branch instructions retired.",
+ "BriefDescription": "Counts all not taken macro branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -924,7 +932,7 @@
"UMask": "0x20",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
"SampleAfterValue": "400009",
- "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken. ",
+ "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -938,6 +946,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xCC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "ROB_MISC_EVENTS.PAUSE_INST",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of retired PAUSE instructions (that do not end up with a VMExit to the VMM; TSX aborted Instructions may be counted). This event is not supported on first SKL and KBL products.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
"EventCode": "0xE6",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
index 71e9737f4614..2c95417a4dae 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
@@ -1,164 +1,364 @@
[
{
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Frontend_Bound"
+ },
+ {
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Frontend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Bad_Speculation"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Bad_Speculation_SMT"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Backend_Bound"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Backend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Retiring"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Retiring_SMT"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "BriefDescription": "Uops Per Instruction",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline",
+ "BriefDescription": "Uops Per Instruction",
+ "MetricGroup": "Pipeline;Retiring",
"MetricName": "UPI"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
- "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ((UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1) )",
- "MetricGroup": "Frontend",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Instruction per taken branch",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "IpTB"
+ },
+ {
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Branch instructions per taken branch. ",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "BpTB"
+ },
+ {
+ "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1 ) )",
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+ "MetricGroup": "PGO",
"MetricName": "IFetch_Line_Utilization"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
- "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ))",
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricGroup": "DSB;Frontend_Bandwidth",
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
- "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
- "BriefDescription": "Total issue-pipeline slots",
- "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricExpr": "4 * cycles",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
- "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
+ "MetricGroup": "TopDownL1_SMT",
+ "MetricName": "SLOTS_SMT"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
+ "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
+ "MetricGroup": "Instruction_Type;L1_Bound",
+ "MetricName": "IpL"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
+ "BriefDescription": "Instructions per Store",
+ "MetricGroup": "Instruction_Type;Store_Bound",
+ "MetricName": "IpS"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Instructions per Branch",
+ "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+ "MetricName": "IpB"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "BriefDescription": "Instruction per (near) call",
+ "MetricGroup": "Branches",
+ "MetricName": "IpCall"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY",
+ "BriefDescription": "Total number of retired Instructions",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC_SMT"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / cycles",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS",
+ "MetricName": "FLOPc"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS_SMT",
+ "MetricName": "FLOPc_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP"
},
{
- "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
- "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE_16B.IFDATA_STALL - ICACHE_64B.IFTAG_STALL ) / RS_EVENTS.EMPTY_END)",
- "MetricGroup": "Unknown_Branches",
- "MetricName": "BAClear_Cost"
+ "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
+ "MetricGroup": "Branch_Mispredicts",
+ "MetricName": "Branch_Misprediction_Cost"
},
{
+ "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
+ "MetricGroup": "Branch_Mispredicts_SMT",
+ "MetricName": "Branch_Misprediction_Cost_SMT"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+ "MetricGroup": "Branch_Mispredicts",
+ "MetricName": "IpMispredict"
+ },
+ {
+ "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
- "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / (( L1D_PEND_MISS.PENDING_CYCLES_ANY / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
+ "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * cycles )",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles) )",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
- "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )",
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricGroup": "TLB_SMT",
+ "MetricName": "Page_Walks_Utilization_SMT"
+ },
+ {
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L3_Cache_Access_BW"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L1MPKI"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI"
+ },
+ {
+ "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2HPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L3MPKI"
+ },
+ {
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "BriefDescription": "Average CPU Utilization",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
+ "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "DRAM_BW_Use"
+ },
+ {
+ "MetricExpr": "arb@event\\=0x80\\,umask\\=0x2@ / arb@event\\=0x80\\,umask\\=0x2\\,thresh\\=1@",
+ "BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "DRAM_Parallel_Reads"
+ },
+ {
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
- "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
- "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
- "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
- "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
- "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
- "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/cache.json b/tools/perf/pmu-events/arch/x86/skylakex/cache.json
index 5c9940866acd..24df183693fa 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/cache.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/cache.json
@@ -61,17 +61,17 @@
},
{
"EventCode": "0x24",
- "UMask": "0x41",
+ "UMask": "0xc1",
"BriefDescription": "Demand Data Read requests that hit L2 cache",
"Counter": "0,1,2,3",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
- "PublicDescription": "Counts the number of demand Data Read requests that hit L2 cache. Only non rejected loads are counted.",
+ "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache",
"SampleAfterValue": "200003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x24",
- "UMask": "0x42",
+ "UMask": "0xc2",
"BriefDescription": "RFO requests that hit L2 cache",
"Counter": "0,1,2,3",
"EventName": "L2_RQSTS.RFO_HIT",
@@ -81,7 +81,7 @@
},
{
"EventCode": "0x24",
- "UMask": "0x44",
+ "UMask": "0xc4",
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
"Counter": "0,1,2,3",
"EventName": "L2_RQSTS.CODE_RD_HIT",
@@ -165,6 +165,7 @@
"BriefDescription": "Core-originated cacheable demand requests missed L3",
"Counter": "0,1,2,3",
"EventName": "LONGEST_LAT_CACHE.MISS",
+ "Errata": "SKL057",
"PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all misses to the L3.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
@@ -175,28 +176,29 @@
"BriefDescription": "Core-originated cacheable demand requests that refer to L3",
"Counter": "0,1,2,3",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
- "PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all accesses to the L3.",
+ "Errata": "SKL057",
+ "PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all accesses to the L3.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x48",
"UMask": "0x1",
- "BriefDescription": "L1D miss outstandings duration in cycles",
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
"Counter": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.PENDING",
- "PublicDescription": "Counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch.Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x48",
"UMask": "0x1",
- "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "BriefDescription": "L1D miss outstandings duration in cycles",
"Counter": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "PublicDescription": "Counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch.Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -234,21 +236,21 @@
{
"EventCode": "0x60",
"UMask": "0x1",
- "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
- "PublicDescription": "Counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.Note: A prefetch promoted to Demand is counted from the promotion point.",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "CounterMask": "1",
+ "PublicDescription": "Counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x60",
"UMask": "0x1",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
- "CounterMask": "1",
- "PublicDescription": "Counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "PublicDescription": "Counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.Note: A prefetch promoted to Demand is counted from the promotion point.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -307,21 +309,21 @@
{
"EventCode": "0x60",
"UMask": "0x8",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
- "PublicDescription": "Counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "CounterMask": "1",
+ "PublicDescription": "Counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x60",
"UMask": "0x8",
- "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
- "CounterMask": "1",
- "PublicDescription": "Counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "PublicDescription": "Counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -486,7 +488,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_RETIRED.L1_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.\r\n",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -558,7 +560,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_RETIRED.FB_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready. \r\n",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
@@ -690,6 +692,7 @@
"BriefDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared state. A non-threaded event.",
"Counter": "0,1,2,3",
"EventName": "L2_LINES_OUT.SILENT",
+ "PublicDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
"SampleAfterValue": "200003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -699,17 +702,18 @@
"BriefDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3. Clean lines may either be allocated in L3 or dropped",
"Counter": "0,1,2,3",
"EventName": "L2_LINES_OUT.NON_SILENT",
- "PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3. Clean lines may either be allocated in L3 or dropped.",
+ "PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines are in Modified state. Modified lines are written back to L3",
"SampleAfterValue": "200003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xF2",
"UMask": "0x4",
- "BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache",
+ "BriefDescription": "This event is deprecated. Refer to new event L2_LINES_OUT.USELESS_HWPF",
+ "Deprecated": "1",
"Counter": "0,1,2,3",
"EventName": "L2_LINES_OUT.USELESS_PREF",
- "PublicDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache.",
+ "PublicDescription": "This event is deprecated. Refer to new event L2_LINES_OUT.USELESS_HWPF",
"SampleAfterValue": "200003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -736,12 +740,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts demand data reads that have any response type.",
- "MSRValue": "0x0000010001 ",
+ "BriefDescription": "Counts demand data reads have any response type.",
+ "MSRValue": "0x0000010001",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand data reads have any response type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -749,12 +753,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003c0001 ",
+ "BriefDescription": "Counts demand data reads TBD TBD",
+ "MSRValue": "0x01003C0001",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand data reads TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -762,25 +766,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x04003c0001 ",
+ "BriefDescription": "Counts demand data reads TBD TBD",
+ "MSRValue": "0x04003C0001",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT & SNOOP_HIT_WITH_FWD",
- "MSRValue": "0x08003c0001 ",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand data reads TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -788,12 +779,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x10003c0001 ",
+ "BriefDescription": "Counts demand data reads TBD TBD",
+ "MSRValue": "0x10003C0001",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand data reads TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -801,12 +792,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts demand data reads that hit in the L3.",
- "MSRValue": "0x3f803c0001 ",
+ "BriefDescription": "Counts demand data reads TBD TBD",
+ "MSRValue": "0x3F803C0001",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand data reads TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -814,12 +805,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that have any response type.",
- "MSRValue": "0x0000010002 ",
+ "BriefDescription": "Counts all demand data writes (RFOs) have any response type.",
+ "MSRValue": "0x0000010002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) have any response type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -827,12 +818,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003c0002 ",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
+ "MSRValue": "0x01003C0002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -840,12 +831,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x04003c0002 ",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
+ "MSRValue": "0x04003C0002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -853,25 +844,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "DEMAND_RFO & L3_HIT & SNOOP_HIT_WITH_FWD",
- "MSRValue": "0x08003c0002 ",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x10003c0002 ",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
+ "MSRValue": "0x10003C0002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -879,12 +857,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3.",
- "MSRValue": "0x3f803c0002 ",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
+ "MSRValue": "0x3F803C0002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -892,12 +870,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand code reads that have any response type.",
- "MSRValue": "0x0000010004 ",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any response type.",
+ "MSRValue": "0x0000010004",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any response type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -905,12 +883,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003c0004 ",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
+ "MSRValue": "0x01003C0004",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -918,12 +896,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x04003c0004 ",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
+ "MSRValue": "0x04003C0004",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -931,25 +909,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT & SNOOP_HIT_WITH_FWD",
- "MSRValue": "0x08003c0004 ",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x10003c0004 ",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
+ "MSRValue": "0x10003C0004",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -957,12 +922,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand code reads that hit in the L3.",
- "MSRValue": "0x3f803c0004 ",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
+ "MSRValue": "0x3F803C0004",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -970,12 +935,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that have any response type.",
- "MSRValue": "0x0000010010 ",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads have any response type.",
+ "MSRValue": "0x0000010010",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads have any response type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -983,12 +948,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003c0010 ",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
+ "MSRValue": "0x01003C0010",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -996,12 +961,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x04003c0010 ",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
+ "MSRValue": "0x04003C0010",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1009,25 +974,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "PF_L2_DATA_RD & L3_HIT & SNOOP_HIT_WITH_FWD",
- "MSRValue": "0x08003c0010 ",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x10003c0010 ",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
+ "MSRValue": "0x10003C0010",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1035,12 +987,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3.",
- "MSRValue": "0x3f803c0010 ",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
+ "MSRValue": "0x3F803C0010",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1048,12 +1000,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that have any response type.",
- "MSRValue": "0x0000010020 ",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs have any response type.",
+ "MSRValue": "0x0000010020",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs have any response type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1061,12 +1013,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003c0020 ",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
+ "MSRValue": "0x01003C0020",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1074,12 +1026,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x04003c0020 ",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
+ "MSRValue": "0x04003C0020",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1087,25 +1039,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "PF_L2_RFO & L3_HIT & SNOOP_HIT_WITH_FWD",
- "MSRValue": "0x08003c0020 ",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x10003c0020 ",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
+ "MSRValue": "0x10003C0020",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1113,12 +1052,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3.",
- "MSRValue": "0x3f803c0020 ",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
+ "MSRValue": "0x3F803C0020",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1126,12 +1065,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that have any response type.",
- "MSRValue": "0x0000010080 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads have any response type.",
+ "MSRValue": "0x0000010080",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads have any response type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1139,12 +1078,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003c0080 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
+ "MSRValue": "0x01003C0080",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1152,25 +1091,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x04003c0080 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
+ "MSRValue": "0x04003C0080",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT & SNOOP_HIT_WITH_FWD",
- "MSRValue": "0x08003c0080 ",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1178,12 +1104,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x10003c0080 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
+ "MSRValue": "0x10003C0080",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1191,12 +1117,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3.",
- "MSRValue": "0x3f803c0080 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
+ "MSRValue": "0x3F803C0080",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1204,12 +1130,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that have any response type.",
- "MSRValue": "0x0000010100 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs have any response type.",
+ "MSRValue": "0x0000010100",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs have any response type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1217,12 +1143,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003c0100 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
+ "MSRValue": "0x01003C0100",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1230,12 +1156,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x04003c0100 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
+ "MSRValue": "0x04003C0100",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1243,12 +1169,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "PF_L3_RFO & L3_HIT & SNOOP_HIT_WITH_FWD",
- "MSRValue": "0x08003c0100 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
+ "MSRValue": "0x10003C0100",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1256,12 +1182,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x10003c0100 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
+ "MSRValue": "0x3F803C0100",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1269,12 +1195,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3.",
- "MSRValue": "0x3f803c0100 ",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests have any response type.",
+ "MSRValue": "0x0000010400",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.ANY_RESPONSE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests have any response type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1282,12 +1208,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that have any response type.",
- "MSRValue": "0x0000010400 ",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
+ "MSRValue": "0x01003C0400",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1295,12 +1221,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003c0400 ",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
+ "MSRValue": "0x04003C0400",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1308,12 +1234,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x04003c0400 ",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
+ "MSRValue": "0x10003C0400",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1321,12 +1247,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "PF_L1D_AND_SW & L3_HIT & SNOOP_HIT_WITH_FWD",
- "MSRValue": "0x08003c0400 ",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
+ "MSRValue": "0x3F803C0400",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1334,12 +1260,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x10003c0400 ",
+ "BriefDescription": "TBD have any response type.",
+ "MSRValue": "0x0000010490",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD have any response type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1347,12 +1273,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3.",
- "MSRValue": "0x3f803c0400 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x01003C0490",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1360,12 +1286,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts any other requests that have any response type.",
- "MSRValue": "0x0000018000 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x04003C0490",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts any other requests that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1373,12 +1299,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts any other requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003c8000 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x10003C0490",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts any other requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1386,12 +1312,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts any other requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x04003c8000 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x3F803C0490",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts any other requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1399,12 +1325,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "OTHER & L3_HIT & SNOOP_HIT_WITH_FWD",
- "MSRValue": "0x08003c8000 ",
+ "BriefDescription": "TBD have any response type.",
+ "MSRValue": "0x0000010120",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD have any response type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1412,12 +1338,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts any other requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x10003c8000 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x01003C0120",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts any other requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1425,12 +1351,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts any other requests that hit in the L3.",
- "MSRValue": "0x3f803c8000 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x04003C0120",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts any other requests that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1438,12 +1364,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch data reads that have any response type.",
- "MSRValue": "0x0000010490 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x10003C0120",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1451,12 +1377,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003c0490 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x3F803C0120",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1464,12 +1390,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x04003c0490 ",
+ "BriefDescription": "TBD have any response type.",
+ "MSRValue": "0x0000010491",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD have any response type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1477,12 +1403,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT & SNOOP_HIT_WITH_FWD",
- "MSRValue": "0x08003c0490 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x01003C0491",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1490,12 +1416,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x10003c0490 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x04003C0491",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1503,12 +1429,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch data reads that hit in the L3.",
- "MSRValue": "0x3f803c0490 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x10003C0491",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1516,12 +1442,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch RFOs that have any response type.",
- "MSRValue": "0x0000010120 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x3F803C0491",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1529,12 +1455,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003c0120 ",
+ "BriefDescription": "TBD have any response type.",
+ "MSRValue": "0x0000010122",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD have any response type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1542,12 +1468,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x04003c0120 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x01003C0122",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1555,12 +1481,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "ALL_PF_RFO & L3_HIT & SNOOP_HIT_WITH_FWD",
- "MSRValue": "0x08003c0120 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x04003C0122",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1568,12 +1494,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x10003c0120 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x10003C0122",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1581,12 +1507,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch RFOs that hit in the L3.",
- "MSRValue": "0x3f803c0120 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x3F803C0122",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1594,12 +1520,11 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that have any response type.",
- "MSRValue": "0x0000010491 ",
+ "BriefDescription": "Counts demand data reads",
+ "MSRValue": "0x08007C0001",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "PublicDescription": "Counts demand data reads",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1607,12 +1532,11 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003c0491 ",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "MSRValue": "0x08007C0002",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "PublicDescription": "Counts all demand data writes (RFOs)",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1620,12 +1544,11 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x04003c0491 ",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "MSRValue": "0x08007C0004",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1633,12 +1556,11 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "ALL_DATA_RD & L3_HIT & SNOOP_HIT_WITH_FWD",
- "MSRValue": "0x08003c0491 ",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "MSRValue": "0x08007C0010",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1646,12 +1568,11 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x10003c0491 ",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "MSRValue": "0x08007C0020",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1659,12 +1580,11 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3.",
- "MSRValue": "0x3f803c0491 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "MSRValue": "0x08007C0080",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1672,12 +1592,11 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that have any response type.",
- "MSRValue": "0x0000010122 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "MSRValue": "0x08007C0100",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1685,12 +1604,11 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003c0122 ",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "MSRValue": "0x08007C0400",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1698,12 +1616,11 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x04003c0122 ",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x08007C0490",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "PublicDescription": "TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1711,12 +1628,11 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "ALL_RFO & L3_HIT & SNOOP_HIT_WITH_FWD",
- "MSRValue": "0x08003c0122 ",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x08007C0120",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "PublicDescription": "TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1724,12 +1640,11 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x10003c0122 ",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x08007C0491",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "PublicDescription": "TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1737,12 +1652,11 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3.",
- "MSRValue": "0x3f803c0122 ",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x08007C0122",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "PublicDescription": "TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json b/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json
index 286ed1a37ec9..c5d0babe89fc 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json
@@ -59,7 +59,6 @@
"BriefDescription": "Number of Packed Double-Precision FP arithmetic instructions (Use operation multiplier of 8)",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
- "PublicDescription": "Number of Packed Double-Precision FP arithmetic instructions (Use operation multiplier of 8).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -69,7 +68,6 @@
"BriefDescription": "Number of Packed Single-Precision FP arithmetic instructions (Use operation multiplier of 16)",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
- "PublicDescription": "Number of Packed Single-Precision FP arithmetic instructions (Use operation multiplier of 16).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/frontend.json b/tools/perf/pmu-events/arch/x86/skylakex/frontend.json
index 403a4f89e9b2..4dc583cfb545 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/frontend.json
@@ -2,16 +2,6 @@
{
"EventCode": "0x79",
"UMask": "0x4",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MITE_UOPS",
- "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x4",
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
"Counter": "0,1,2,3",
"EventName": "IDQ.MITE_CYCLES",
@@ -22,11 +12,11 @@
},
{
"EventCode": "0x79",
- "UMask": "0x8",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "UMask": "0x4",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"Counter": "0,1,2,3",
- "EventName": "IDQ.DSB_UOPS",
- "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+ "EventName": "IDQ.MITE_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -43,6 +33,16 @@
},
{
"EventCode": "0x79",
+ "UMask": "0x8",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
"UMask": "0x10",
"BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"Counter": "0,1,2,3",
@@ -55,22 +55,22 @@
{
"EventCode": "0x79",
"UMask": "0x18",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
"Counter": "0,1,2,3",
- "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
- "CounterMask": "4",
- "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
+ "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "CounterMask": "1",
+ "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
"UMask": "0x18",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
"Counter": "0,1,2,3",
- "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
- "CounterMask": "1",
- "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
+ "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "CounterMask": "4",
+ "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -87,22 +87,22 @@
{
"EventCode": "0x79",
"UMask": "0x24",
- "BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "BriefDescription": "Cycles MITE is delivering any Uop",
"Counter": "0,1,2,3",
- "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
- "CounterMask": "4",
- "PublicDescription": "Counts the number of cycles 4 uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "CounterMask": "1",
+ "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
"UMask": "0x24",
- "BriefDescription": "Cycles MITE is delivering any Uop",
+ "BriefDescription": "Cycles MITE is delivering 4 Uops",
"Counter": "0,1,2,3",
- "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
- "CounterMask": "1",
- "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "CounterMask": "4",
+ "PublicDescription": "Counts the number of cycles 4 uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -118,24 +118,24 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EdgeDetect": "1",
"EventCode": "0x79",
"UMask": "0x30",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MS_SWITCHES",
- "CounterMask": "1",
- "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EdgeDetect": "1",
"EventCode": "0x79",
"UMask": "0x30",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MS_UOPS",
- "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
+ "EventName": "IDQ.MS_SWITCHES",
+ "CounterMask": "1",
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -177,67 +177,67 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "Invert": "1",
"EventCode": "0x9C",
"UMask": "0x1",
- "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
+ "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
"Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
- "PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "CounterMask": "1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x9C",
"UMask": "0x1",
- "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
"Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
- "CounterMask": "4",
- "PublicDescription": "Counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
+ "CounterMask": "1",
+ "PublicDescription": "Cycles with less than 3 uops delivered by the front-end.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x9C",
"UMask": "0x1",
- "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
"Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
- "CounterMask": "3",
- "PublicDescription": "Counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >= 3.",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
+ "CounterMask": "2",
+ "PublicDescription": "Cycles with less than 2 uops delivered by the front-end.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x9C",
"UMask": "0x1",
- "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
"Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
- "CounterMask": "2",
- "PublicDescription": "Cycles with less than 2 uops delivered by the front-end.",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+ "CounterMask": "3",
+ "PublicDescription": "Counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >= 3.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x9C",
"UMask": "0x1",
- "BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
+ "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
"Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
- "CounterMask": "1",
- "PublicDescription": "Cycles with less than 3 uops delivered by the front-end.",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "CounterMask": "4",
+ "PublicDescription": "Counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "Invert": "1",
"EventCode": "0x9C",
"UMask": "0x1",
- "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
"Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
- "CounterMask": "1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding \u201c4 \u2013 x\u201d when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -247,20 +247,19 @@
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
"Counter": "0,1,2,3",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
- "PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
+ "PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0\u20132 cycles.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced decode stream buffer (DSB - the decoded instruction-cache) miss. Precise Event.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
- "MSRValue": "0x11",
+ "MSRValue": "0x400406",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.DSB_MISS",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. \r\n",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -268,11 +267,11 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss. Precise Event.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
- "MSRValue": "0x12",
+ "MSRValue": "0x200206",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.L1I_MISS",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_2",
"MSRIndex": "0x3F7",
"TakenAlone": "1",
"SampleAfterValue": "100007",
@@ -281,11 +280,11 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss. Precise Event.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
- "MSRValue": "0x13",
+ "MSRValue": "0x400206",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.L2_MISS",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
"MSRIndex": "0x3F7",
"TakenAlone": "1",
"SampleAfterValue": "100007",
@@ -294,13 +293,13 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced iTLB true miss. Precise Event.",
+ "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss. Precise Event.",
"PEBS": "1",
- "MSRValue": "0x14",
+ "MSRValue": "0x15",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.ITLB_MISS",
+ "EventName": "FRONTEND_RETIRED.STLB_MISS",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -308,13 +307,13 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss. Precise Event.",
+ "BriefDescription": "Retired Instructions who experienced iTLB true miss. Precise Event.",
"PEBS": "1",
- "MSRValue": "0x15",
+ "MSRValue": "0x14",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.STLB_MISS",
+ "EventName": "FRONTEND_RETIRED.ITLB_MISS",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
+ "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -322,11 +321,11 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
+ "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss. Precise Event.",
"PEBS": "1",
- "MSRValue": "0x400206",
+ "MSRValue": "0x13",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
+ "EventName": "FRONTEND_RETIRED.L2_MISS",
"MSRIndex": "0x3F7",
"TakenAlone": "1",
"SampleAfterValue": "100007",
@@ -335,11 +334,11 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
+ "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss. Precise Event.",
"PEBS": "1",
- "MSRValue": "0x200206",
+ "MSRValue": "0x12",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_2",
+ "EventName": "FRONTEND_RETIRED.L1I_MISS",
"MSRIndex": "0x3F7",
"TakenAlone": "1",
"SampleAfterValue": "100007",
@@ -348,12 +347,13 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall. Precise Event.",
+ "BriefDescription": "Retired Instructions who experienced decode stream buffer (DSB - the decoded instruction-cache) miss. Precise Event.",
"PEBS": "1",
- "MSRValue": "0x400406",
+ "MSRValue": "0x11",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
+ "EventName": "FRONTEND_RETIRED.DSB_MISS",
"MSRIndex": "0x3F7",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -361,13 +361,12 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
- "MSRValue": "0x400806",
+ "MSRValue": "0x300206",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_3",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops. \r\n",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -375,13 +374,13 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall. Precise Event.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
- "MSRValue": "0x401006",
+ "MSRValue": "0x100206",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.\r\n",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -389,13 +388,12 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall. Precise Event.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
- "MSRValue": "0x402006",
+ "MSRValue": "0x420006",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.\r\n",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -403,11 +401,11 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall. Precise Event.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
- "MSRValue": "0x404006",
+ "MSRValue": "0x410006",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
"MSRIndex": "0x3F7",
"TakenAlone": "1",
"SampleAfterValue": "100007",
@@ -429,11 +427,11 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall. Precise Event.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
- "MSRValue": "0x410006",
+ "MSRValue": "0x404006",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
"MSRIndex": "0x3F7",
"TakenAlone": "1",
"SampleAfterValue": "100007",
@@ -442,12 +440,13 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall. Precise Event.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
- "MSRValue": "0x420006",
+ "MSRValue": "0x402006",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -455,13 +454,13 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
- "MSRValue": "0x100206",
+ "MSRValue": "0x401006",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.\r\n",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -469,12 +468,13 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
"PEBS": "1",
- "MSRValue": "0x300206",
+ "MSRValue": "0x400806",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_3",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
"MSRIndex": "0x3F7",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/memory.json b/tools/perf/pmu-events/arch/x86/skylakex/memory.json
index e7f1aa31226d..48a9cdf81307 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/memory.json
@@ -129,20 +129,20 @@
{
"EventCode": "0x60",
"UMask": "0x10",
- "BriefDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.",
+ "BriefDescription": "Cycles with at least 6 Demand Data Read requests that miss L3 cache in the superQ.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
- "CounterMask": "1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD_GE_6",
+ "CounterMask": "6",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x60",
"UMask": "0x10",
- "BriefDescription": "Cycles with at least 6 Demand Data Read requests that miss L3 cache in the superQ.",
+ "BriefDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD_GE_6",
- "CounterMask": "6",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
+ "CounterMask": "1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -210,7 +210,7 @@
{
"EventCode": "0xC8",
"UMask": "0x4",
- "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one). ",
+ "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one).",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "HLE_RETIRED.ABORTED",
@@ -242,6 +242,7 @@
"BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
"Counter": "0,1,2,3",
"EventName": "HLE_RETIRED.ABORTED_UNFRIENDLY",
+ "PublicDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -287,7 +288,7 @@
{
"EventCode": "0xC9",
"UMask": "0x4",
- "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one). ",
+ "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one).",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "RTM_RETIRED.ABORTED",
@@ -347,125 +348,125 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
"PEBS": "2",
- "MSRValue": "0x4",
+ "MSRValue": "0x200",
"Counter": "0,1,2,3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
- "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
"TakenAlone": "1",
- "SampleAfterValue": "100003",
+ "SampleAfterValue": "101",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
"PEBS": "2",
- "MSRValue": "0x8",
+ "MSRValue": "0x100",
"Counter": "0,1,2,3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
"MSRIndex": "0x3F6",
- "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
"TakenAlone": "1",
- "SampleAfterValue": "50021",
+ "SampleAfterValue": "503",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
"PEBS": "2",
- "MSRValue": "0x10",
+ "MSRValue": "0x80",
"Counter": "0,1,2,3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
"MSRIndex": "0x3F6",
- "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
"TakenAlone": "1",
- "SampleAfterValue": "20011",
+ "SampleAfterValue": "1009",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
"PEBS": "2",
- "MSRValue": "0x20",
+ "MSRValue": "0x40",
"Counter": "0,1,2,3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
- "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
"TakenAlone": "1",
- "SampleAfterValue": "100007",
+ "SampleAfterValue": "2003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
"PEBS": "2",
- "MSRValue": "0x40",
+ "MSRValue": "0x20",
"Counter": "0,1,2,3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
- "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
"TakenAlone": "1",
- "SampleAfterValue": "2003",
+ "SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
"PEBS": "2",
- "MSRValue": "0x80",
+ "MSRValue": "0x10",
"Counter": "0,1,2,3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
"MSRIndex": "0x3F6",
- "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
"TakenAlone": "1",
- "SampleAfterValue": "1009",
+ "SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
"PEBS": "2",
- "MSRValue": "0x100",
+ "MSRValue": "0x8",
"Counter": "0,1,2,3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
- "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
"TakenAlone": "1",
- "SampleAfterValue": "503",
+ "SampleAfterValue": "50021",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
"PEBS": "2",
- "MSRValue": "0x200",
+ "MSRValue": "0x4",
"Counter": "0,1,2,3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
- "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
"TakenAlone": "1",
- "SampleAfterValue": "101",
+ "SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts demand data reads that miss in the L3.",
- "MSRValue": "0x3fbc000001 ",
+ "BriefDescription": "Counts demand data reads TBD TBD",
+ "MSRValue": "0x3FBC000001",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand data reads TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -473,12 +474,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts demand data reads that miss the L3 and clean or shared data is transferred from remote cache.",
- "MSRValue": "0x083fc00001 ",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x083FC00001",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand data reads TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -486,12 +487,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts demand data reads that miss the L3 and the modified data is transferred from remote cache.",
- "MSRValue": "0x103fc00001 ",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x103FC00001",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand data reads TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -499,12 +500,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts demand data reads that miss the L3 and the data is returned from local or remote dram.",
- "MSRValue": "0x063fc00001 ",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x063FC00001",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand data reads TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -512,12 +513,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts demand data reads that miss the L3 and the data is returned from remote dram.",
- "MSRValue": "0x063b800001 ",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x063B800001",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand data reads TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -525,12 +526,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram.",
- "MSRValue": "0x0604000001 ",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x0604000001",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand data reads TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -538,12 +539,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that miss in the L3.",
- "MSRValue": "0x3fbc000002 ",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
+ "MSRValue": "0x3FBC000002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -551,12 +552,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and clean or shared data is transferred from remote cache.",
- "MSRValue": "0x083fc00002 ",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x083FC00002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -564,12 +565,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache.",
- "MSRValue": "0x103fc00002 ",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x103FC00002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -577,12 +578,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local or remote dram.",
- "MSRValue": "0x063fc00002 ",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x063FC00002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -590,12 +591,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from remote dram.",
- "MSRValue": "0x063b800002 ",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x063B800002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -603,12 +604,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram.",
- "MSRValue": "0x0604000002 ",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x0604000002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -616,12 +617,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand code reads that miss in the L3.",
- "MSRValue": "0x3fbc000004 ",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
+ "MSRValue": "0x3FBC000004",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -629,12 +630,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand code reads that miss the L3 and clean or shared data is transferred from remote cache.",
- "MSRValue": "0x083fc00004 ",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
+ "MSRValue": "0x083FC00004",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -642,12 +643,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand code reads that miss the L3 and the modified data is transferred from remote cache.",
- "MSRValue": "0x103fc00004 ",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
+ "MSRValue": "0x103FC00004",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -655,12 +656,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand code reads that miss the L3 and the data is returned from local or remote dram.",
- "MSRValue": "0x063fc00004 ",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
+ "MSRValue": "0x063FC00004",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -668,12 +669,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand code reads that miss the L3 and the data is returned from remote dram.",
- "MSRValue": "0x063b800004 ",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
+ "MSRValue": "0x063B800004",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -681,12 +682,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram.",
- "MSRValue": "0x0604000004 ",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
+ "MSRValue": "0x0604000004",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -694,12 +695,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3.",
- "MSRValue": "0x3fbc000010 ",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
+ "MSRValue": "0x3FBC000010",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -707,12 +708,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and clean or shared data is transferred from remote cache.",
- "MSRValue": "0x083fc00010 ",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x083FC00010",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -720,12 +721,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the modified data is transferred from remote cache.",
- "MSRValue": "0x103fc00010 ",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x103FC00010",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -733,12 +734,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from local or remote dram.",
- "MSRValue": "0x063fc00010 ",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x063FC00010",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -746,12 +747,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from remote dram.",
- "MSRValue": "0x063b800010 ",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x063B800010",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -759,12 +760,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from local dram.",
- "MSRValue": "0x0604000010 ",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x0604000010",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -772,12 +773,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3.",
- "MSRValue": "0x3fbc000020 ",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
+ "MSRValue": "0x3FBC000020",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -785,12 +786,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and clean or shared data is transferred from remote cache.",
- "MSRValue": "0x083fc00020 ",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x083FC00020",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -798,12 +799,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the modified data is transferred from remote cache.",
- "MSRValue": "0x103fc00020 ",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x103FC00020",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -811,12 +812,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from local or remote dram.",
- "MSRValue": "0x063fc00020 ",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x063FC00020",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -824,12 +825,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from remote dram.",
- "MSRValue": "0x063b800020 ",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x063B800020",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -837,12 +838,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from local dram.",
- "MSRValue": "0x0604000020 ",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x0604000020",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -850,12 +851,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3.",
- "MSRValue": "0x3fbc000080 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
+ "MSRValue": "0x3FBC000080",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -863,12 +864,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and clean or shared data is transferred from remote cache.",
- "MSRValue": "0x083fc00080 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x083FC00080",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -876,12 +877,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the modified data is transferred from remote cache.",
- "MSRValue": "0x103fc00080 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x103FC00080",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -889,12 +890,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from local or remote dram.",
- "MSRValue": "0x063fc00080 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x063FC00080",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -902,12 +903,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from remote dram.",
- "MSRValue": "0x063b800080 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x063B800080",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -915,12 +916,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from local dram.",
- "MSRValue": "0x0604000080 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x0604000080",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -928,12 +929,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3.",
- "MSRValue": "0x3fbc000100 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
+ "MSRValue": "0x3FBC000100",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -941,12 +942,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and clean or shared data is transferred from remote cache.",
- "MSRValue": "0x083fc00100 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x083FC00100",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -954,12 +955,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the modified data is transferred from remote cache.",
- "MSRValue": "0x103fc00100 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x103FC00100",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -967,12 +968,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from local or remote dram.",
- "MSRValue": "0x063fc00100 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x063FC00100",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -980,12 +981,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from remote dram.",
- "MSRValue": "0x063b800100 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x063B800100",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -993,12 +994,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from local dram.",
- "MSRValue": "0x0604000100 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x0604000100",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1006,12 +1007,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss in the L3.",
- "MSRValue": "0x3fbc000400 ",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
+ "MSRValue": "0x3FBC000400",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1019,12 +1020,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and clean or shared data is transferred from remote cache.",
- "MSRValue": "0x083fc00400 ",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x083FC00400",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1032,12 +1033,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the modified data is transferred from remote cache.",
- "MSRValue": "0x103fc00400 ",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x103FC00400",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1045,12 +1046,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from local or remote dram.",
- "MSRValue": "0x063fc00400 ",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x063FC00400",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1058,12 +1059,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from remote dram.",
- "MSRValue": "0x063b800400 ",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x063B800400",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1071,90 +1072,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from local dram.",
- "MSRValue": "0x0604000400 ",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x0604000400",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests that miss in the L3.",
- "MSRValue": "0x3fbc008000 ",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts any other requests that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests that miss the L3 and clean or shared data is transferred from remote cache.",
- "MSRValue": "0x083fc08000 ",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts any other requests that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests that miss the L3 and the modified data is transferred from remote cache.",
- "MSRValue": "0x103fc08000 ",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts any other requests that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests that miss the L3 and the data is returned from local or remote dram.",
- "MSRValue": "0x063fc08000 ",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts any other requests that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests that miss the L3 and the data is returned from remote dram.",
- "MSRValue": "0x063b808000 ",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts any other requests that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests that miss the L3 and the data is returned from local dram.",
- "MSRValue": "0x0604008000 ",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts any other requests that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1162,12 +1085,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch data reads that miss in the L3.",
- "MSRValue": "0x3fbc000490 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x3FBC000490",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1175,12 +1098,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache.",
- "MSRValue": "0x083fc00490 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x083FC00490",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1188,12 +1111,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch data reads that miss the L3 and the modified data is transferred from remote cache.",
- "MSRValue": "0x103fc00490 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x103FC00490",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1201,12 +1124,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from local or remote dram.",
- "MSRValue": "0x063fc00490 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x063FC00490",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1214,12 +1137,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from remote dram.",
- "MSRValue": "0x063b800490 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x063B800490",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1227,12 +1150,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from local dram.",
- "MSRValue": "0x0604000490 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0604000490",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1240,12 +1163,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch RFOs that miss in the L3.",
- "MSRValue": "0x3fbc000120 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x3FBC000120",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1253,12 +1176,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch RFOs that miss the L3 and clean or shared data is transferred from remote cache.",
- "MSRValue": "0x083fc00120 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x083FC00120",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1266,12 +1189,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch RFOs that miss the L3 and the modified data is transferred from remote cache.",
- "MSRValue": "0x103fc00120 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x103FC00120",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1279,12 +1202,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from local or remote dram.",
- "MSRValue": "0x063fc00120 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x063FC00120",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1292,12 +1215,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from remote dram.",
- "MSRValue": "0x063b800120 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x063B800120",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1305,12 +1228,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from local dram.",
- "MSRValue": "0x0604000120 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0604000120",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1318,12 +1241,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that miss in the L3.",
- "MSRValue": "0x3fbc000491 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x3FBC000491",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1331,12 +1254,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache.",
- "MSRValue": "0x083fc00491 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x083FC00491",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1344,12 +1267,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache.",
- "MSRValue": "0x103fc00491 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x103FC00491",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1357,12 +1280,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local or remote dram.",
- "MSRValue": "0x063fc00491 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x063FC00491",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1370,12 +1293,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram.",
- "MSRValue": "0x063b800491 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x063B800491",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1383,12 +1306,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram.",
- "MSRValue": "0x0604000491 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0604000491",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1396,12 +1319,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that miss in the L3.",
- "MSRValue": "0x3fbc000122 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x3FBC000122",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1409,12 +1332,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and clean or shared data is transferred from remote cache.",
- "MSRValue": "0x083fc00122 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x083FC00122",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1422,12 +1345,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the modified data is transferred from remote cache.",
- "MSRValue": "0x103fc00122 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x103FC00122",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1435,12 +1358,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local or remote dram.",
- "MSRValue": "0x063fc00122 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x063FC00122",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1448,12 +1371,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from remote dram.",
- "MSRValue": "0x063b800122 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x063B800122",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1461,12 +1384,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram.",
- "MSRValue": "0x0604000122 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0604000122",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json b/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
index f99f7ae27820..369f56c1d1b5 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
@@ -1,6 +1,5 @@
[
{
- "EventCode": "0x00",
"UMask": "0x1",
"BriefDescription": "Instructions retired from execution.",
"Counter": "Fixed counter 0",
@@ -10,7 +9,6 @@
"CounterHTOff": "Fixed counter 0"
},
{
- "EventCode": "0x00",
"UMask": "0x2",
"BriefDescription": "Core cycles when the thread is not in halt state",
"Counter": "Fixed counter 1",
@@ -20,7 +18,6 @@
"CounterHTOff": "Fixed counter 1"
},
{
- "EventCode": "0x00",
"UMask": "0x2",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
"Counter": "Fixed counter 1",
@@ -30,7 +27,6 @@
"CounterHTOff": "Fixed counter 1"
},
{
- "EventCode": "0x00",
"UMask": "0x3",
"BriefDescription": "Reference cycles when the core is not in halt state.",
"Counter": "Fixed counter 2",
@@ -99,24 +95,24 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "Invert": "1",
"EventCode": "0x0E",
"UMask": "0x1",
- "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
"Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.ANY",
- "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "Invert": "1",
"EventCode": "0x0E",
"UMask": "0x1",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
"Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -126,7 +122,7 @@
"BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
"Counter": "0,1,2,3",
"EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
- "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to Mixing Intel AVX and Intel SSE Code section of the Optimization Guide.",
+ "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to \u201cMixing Intel AVX and Intel SSE Code\u201d section of the Optimization Guide.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -203,19 +199,19 @@
{
"EventCode": "0x3C",
"UMask": "0x1",
- "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
+ "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
"Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "AnyThread": "1",
"SampleAfterValue": "2503",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x3C",
"UMask": "0x1",
- "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
+ "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
"Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
"SampleAfterValue": "2503",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -248,12 +244,12 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x5E",
+ "EventCode": "0x59",
"UMask": "0x1",
- "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "BriefDescription": "Cycles where the pipeline is stalled due to serializing operations.",
"Counter": "0,1,2,3",
- "EventName": "RS_EVENTS.EMPTY_CYCLES",
- "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for the thread.; Note: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
+ "EventName": "PARTIAL_RAT_STALLS.SCOREBOARD",
+ "PublicDescription": "This event counts cycles during which the microcode scoreboard stalls happen.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -271,6 +267,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x5E",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "Counter": "0,1,2,3",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for the thread.; Note: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x87",
"UMask": "0x1",
"BriefDescription": "Stalls caused by changing prefix length of the instruction.",
@@ -361,12 +367,12 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA2",
+ "EventCode": "0xa2",
"UMask": "0x1",
"BriefDescription": "Resource-related stall cycles",
"Counter": "0,1,2,3",
"EventName": "RESOURCE_STALLS.ANY",
- "PublicDescription": "Counts resource-related stall cycles. Reasons for stalls can be as follows:a. *any* u-arch structure got full (LB, SB, RS, ROB, BOB, LM, Physical Register Reclaim Table (PRRT), or Physical History Table (PHT) slots).b. *any* u-arch structure got empty (like INT/SIMD FreeLists).c. FPU control word (FPCW), MXCSR.and others. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
+ "PublicDescription": "Counts resource-related stall cycles.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -522,6 +528,17 @@
{
"EventCode": "0xA8",
"UMask": "0x1",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "CounterMask": "4",
+ "PublicDescription": "Counts the cycles when 4 uops are delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "UMask": "0x1",
"BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
"Counter": "0,1,2,3",
"EventName": "LSD.CYCLES_ACTIVE",
@@ -531,35 +548,35 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA8",
+ "EventCode": "0xB1",
"UMask": "0x1",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
"Counter": "0,1,2,3",
- "EventName": "LSD.CYCLES_4_UOPS",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
"CounterMask": "4",
- "PublicDescription": "Counts the cycles when 4 uops are delivered by the LSD (Loop-stream detector).",
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xB1",
"UMask": "0x1",
- "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.THREAD",
- "PublicDescription": "Number of uops to be executed per-thread each cycle.",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "CounterMask": "3",
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "Invert": "1",
"EventCode": "0xB1",
"UMask": "0x1",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "CounterMask": "2",
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -575,35 +592,24 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "Invert": "1",
"EventCode": "0xB1",
"UMask": "0x1",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
- "CounterMask": "2",
- "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
- "CounterMask": "3",
- "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xB1",
"UMask": "0x1",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
- "CounterMask": "4",
- "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "PublicDescription": "Number of uops to be executed per-thread each cycle.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -618,11 +624,12 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "Invert": "1",
"EventCode": "0xB1",
"UMask": "0x2",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
"CounterMask": "1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
@@ -630,10 +637,10 @@
{
"EventCode": "0xB1",
"UMask": "0x2",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "CounterMask": "2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "CounterMask": "4",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -650,20 +657,19 @@
{
"EventCode": "0xB1",
"UMask": "0x2",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "CounterMask": "4",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "CounterMask": "2",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "Invert": "1",
"EventCode": "0xB1",
"UMask": "0x2",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"CounterMask": "1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
@@ -725,12 +731,14 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "Invert": "1",
"EventCode": "0xC2",
"UMask": "0x2",
- "BriefDescription": "Retirement slots used.",
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
- "PublicDescription": "Counts the retirement slots used.",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "CounterMask": "10",
+ "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -742,19 +750,17 @@
"Counter": "0,1,2,3",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
"CounterMask": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts cycles without actually retired uops.",
+ "PublicDescription": "This event counts cycles without actually retired uops.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "Invert": "1",
"EventCode": "0xC2",
"UMask": "0x2",
- "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "BriefDescription": "Retirement slots used.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
- "CounterMask": "10",
- "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "PublicDescription": "Counts the retirement slots used.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -766,6 +772,7 @@
"Counter": "0,1,2,3",
"EventName": "MACHINE_CLEARS.COUNT",
"CounterMask": "1",
+ "PublicDescription": "Number of machine clears (nukes) of any type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -841,11 +848,12 @@
{
"EventCode": "0xC4",
"UMask": "0x10",
- "BriefDescription": "Not taken branch instructions retired.",
+ "BriefDescription": "Counts all not taken macro branch instructions retired.",
+ "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NOT_TAKEN",
"Errata": "SKL091",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts not taken branch instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts not taken branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -919,7 +927,7 @@
{
"EventCode": "0xC5",
"UMask": "0x20",
- "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken. ",
+ "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
@@ -938,6 +946,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xCC",
+ "UMask": "0x40",
+ "BriefDescription": "Number of retired PAUSE instructions (that do not end up with a VMExit to the VMM; TSX aborted Instructions may be counted). This event is not supported on first SKL and KBL products.",
+ "Counter": "0,1,2,3",
+ "EventName": "ROB_MISC_EVENTS.PAUSE_INST",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xE6",
"UMask": "0x1",
"BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
index 71e9737f4614..56e03ba771f4 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
@@ -1,164 +1,394 @@
[
{
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Frontend_Bound"
+ },
+ {
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Frontend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Bad_Speculation"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Bad_Speculation_SMT"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Backend_Bound"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Backend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Retiring"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Retiring_SMT"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "BriefDescription": "Uops Per Instruction",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline",
+ "BriefDescription": "Uops Per Instruction",
+ "MetricGroup": "Pipeline;Retiring",
"MetricName": "UPI"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
- "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ((UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1) )",
- "MetricGroup": "Frontend",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Instruction per taken branch",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "IpTB"
+ },
+ {
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Branch instructions per taken branch. ",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "BpTB"
+ },
+ {
+ "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1 ) )",
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+ "MetricGroup": "PGO",
"MetricName": "IFetch_Line_Utilization"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
- "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ))",
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricGroup": "DSB;Frontend_Bandwidth",
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
- "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
- "BriefDescription": "Total issue-pipeline slots",
- "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricExpr": "4 * cycles",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
- "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
+ "MetricGroup": "TopDownL1_SMT",
+ "MetricName": "SLOTS_SMT"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
+ "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
+ "MetricGroup": "Instruction_Type;L1_Bound",
+ "MetricName": "IpL"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
+ "BriefDescription": "Instructions per Store",
+ "MetricGroup": "Instruction_Type;Store_Bound",
+ "MetricName": "IpS"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Instructions per Branch",
+ "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+ "MetricName": "IpB"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "BriefDescription": "Instruction per (near) call",
+ "MetricGroup": "Branches",
+ "MetricName": "IpCall"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY",
+ "BriefDescription": "Total number of retired Instructions",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC_SMT"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / cycles",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS",
+ "MetricName": "FLOPc"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS_SMT",
+ "MetricName": "FLOPc_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP"
},
{
- "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
- "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE_16B.IFDATA_STALL - ICACHE_64B.IFTAG_STALL ) / RS_EVENTS.EMPTY_END)",
- "MetricGroup": "Unknown_Branches",
- "MetricName": "BAClear_Cost"
+ "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
+ "MetricGroup": "Branch_Mispredicts",
+ "MetricName": "Branch_Misprediction_Cost"
+ },
+ {
+ "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
+ "MetricGroup": "Branch_Mispredicts_SMT",
+ "MetricName": "Branch_Misprediction_Cost_SMT"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+ "MetricGroup": "Branch_Mispredicts",
+ "MetricName": "IpMispredict"
+ },
+ {
+ "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
- "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / (( L1D_PEND_MISS.PENDING_CYCLES_ANY / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
+ "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * cycles )",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles) )",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
- "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )",
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricGroup": "TLB_SMT",
+ "MetricName": "Page_Walks_Utilization_SMT"
+ },
+ {
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L3_Cache_Access_BW"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L1MPKI"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI"
+ },
+ {
+ "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2HPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L3MPKI"
+ },
+ {
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "BriefDescription": "Average CPU Utilization",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
+ "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "DRAM_BW_Use"
+ },
+ {
+ "MetricExpr": "1000000000 * ( cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x35\\\\\\,umask\\=0x21@ ) / ( cha_0@event\\=0x0@ / duration_time )",
+ "BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
+ "MetricGroup": "Memory_Lat",
+ "MetricName": "DRAM_Read_Latency"
+ },
+ {
+ "MetricExpr": "cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,thresh\\=1@",
+ "BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "DRAM_Parallel_Reads"
+ },
+ {
+ "MetricExpr": "( 1000000000 * ( imc@event\\=0xe0\\\\\\,umask\\=0x1@ / imc@event\\=0xe3@ ) / imc_0@event\\=0x0@ ) if 1 if 0 == 1 else 0 else 0",
+ "BriefDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches",
+ "MetricGroup": "Memory_Lat",
+ "MetricName": "MEM_PMM_Read_Latency"
+ },
+ {
+ "MetricExpr": "( ( 64 * imc@event\\=0xe3@ / 1000000000 ) / duration_time ) if 1 if 0 == 1 else 0 else 0",
+ "BriefDescription": "Average 3DXP Memory Bandwidth Use for reads [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "PMM_Read_BW"
+ },
+ {
+ "MetricExpr": "( ( 64 * imc@event\\=0xe7@ / 1000000000 ) / duration_time ) if 1 if 0 == 1 else 0 else 0",
+ "BriefDescription": "Average 3DXP Memory Bandwidth Use for Writes [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "PMM_Write_BW"
+ },
+ {
+ "MetricExpr": "cha_0@event\\=0x0@",
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricGroup": "",
+ "MetricName": "Socket_CLKS"
+ },
+ {
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
- "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
- "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
- "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
- "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
- "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
- "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/scripts/python/check-perf-trace.py b/tools/perf/scripts/python/check-perf-trace.py
index 334599c6032c..d2c22954800d 100644
--- a/tools/perf/scripts/python/check-perf-trace.py
+++ b/tools/perf/scripts/python/check-perf-trace.py
@@ -7,6 +7,8 @@
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
+from __future__ import print_function
+
import os
import sys
@@ -19,64 +21,64 @@ from perf_trace_context import *
unhandled = autodict()
def trace_begin():
- print "trace_begin"
+ print("trace_begin")
pass
def trace_end():
- print_unhandled()
+ print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, vec):
- print_header(event_name, common_cpu, common_secs, common_nsecs,
- common_pid, common_comm)
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, vec):
+ print_header(event_name, common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm)
- print_uncommon(context)
+ print_uncommon(context)
- print "vec=%s\n" % \
- (symbol_str("irq__softirq_entry", "vec", vec)),
+ print("vec=%s" % (symbol_str("irq__softirq_entry", "vec", vec)))
def kmem__kmalloc(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, call_site, ptr, bytes_req, bytes_alloc,
- gfp_flags):
- print_header(event_name, common_cpu, common_secs, common_nsecs,
- common_pid, common_comm)
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, call_site, ptr, bytes_req, bytes_alloc,
+ gfp_flags):
+ print_header(event_name, common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm)
- print_uncommon(context)
+ print_uncommon(context)
- print "call_site=%u, ptr=%u, bytes_req=%u, " \
- "bytes_alloc=%u, gfp_flags=%s\n" % \
+ print("call_site=%u, ptr=%u, bytes_req=%u, "
+ "bytes_alloc=%u, gfp_flags=%s" %
(call_site, ptr, bytes_req, bytes_alloc,
-
- flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
+ flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)))
def trace_unhandled(event_name, context, event_fields_dict):
- try:
- unhandled[event_name] += 1
- except TypeError:
- unhandled[event_name] = 1
+ try:
+ unhandled[event_name] += 1
+ except TypeError:
+ unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
- print "%-20s %5u %05u.%09u %8u %-20s " % \
- (event_name, cpu, secs, nsecs, pid, comm),
+ print("%-20s %5u %05u.%09u %8u %-20s " %
+ (event_name, cpu, secs, nsecs, pid, comm),
+ end=' ')
# print trace fields not included in handler args
def print_uncommon(context):
- print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
- % (common_pc(context), trace_flag_str(common_flags(context)), \
- common_lock_depth(context))
+ print("common_preempt_count=%d, common_flags=%s, "
+ "common_lock_depth=%d, " %
+ (common_pc(context), trace_flag_str(common_flags(context)),
+ common_lock_depth(context)))
def print_unhandled():
- keys = unhandled.keys()
- if not keys:
- return
+ keys = unhandled.keys()
+ if not keys:
+ return
- print "\nunhandled events:\n\n",
+ print("\nunhandled events:\n")
- print "%-40s %10s\n" % ("event", "count"),
- print "%-40s %10s\n" % ("----------------------------------------", \
- "-----------"),
+ print("%-40s %10s" % ("event", "count"))
+ print("%-40s %10s" % ("----------------------------------------",
+ "-----------"))
- for event_name in keys:
- print "%-40s %10d\n" % (event_name, unhandled[event_name])
+ for event_name in keys:
+ print("%-40s %10d\n" % (event_name, unhandled[event_name]))
diff --git a/tools/perf/scripts/python/compaction-times.py b/tools/perf/scripts/python/compaction-times.py
index 239cb0568ec3..2560a042dc6f 100644
--- a/tools/perf/scripts/python/compaction-times.py
+++ b/tools/perf/scripts/python/compaction-times.py
@@ -216,15 +216,15 @@ def compaction__mm_compaction_migratepages(event_name, context, common_cpu,
pair(nr_migrated, nr_failed), None, None)
def compaction__mm_compaction_isolate_freepages(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
chead.increment_pending(common_pid,
None, pair(nr_scanned, nr_taken), None)
def compaction__mm_compaction_isolate_migratepages(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
chead.increment_pending(common_pid,
None, None, pair(nr_scanned, nr_taken))
diff --git a/tools/perf/scripts/python/event_analyzing_sample.py b/tools/perf/scripts/python/event_analyzing_sample.py
index 4e843b9864ec..aa1e2cfa26a6 100644
--- a/tools/perf/scripts/python/event_analyzing_sample.py
+++ b/tools/perf/scripts/python/event_analyzing_sample.py
@@ -15,6 +15,8 @@
# for a x86 HW PMU event: PEBS with load latency data.
#
+from __future__ import print_function
+
import os
import sys
import math
@@ -37,7 +39,7 @@ con = sqlite3.connect("/dev/shm/perf.db")
con.isolation_level = None
def trace_begin():
- print "In trace_begin:\n"
+ print("In trace_begin:\n")
#
# Will create several tables at the start, pebs_ll is for PEBS data with
@@ -76,12 +78,12 @@ def process_event(param_dict):
name = param_dict["ev_name"]
# Symbol and dso info are not always resolved
- if (param_dict.has_key("dso")):
+ if ("dso" in param_dict):
dso = param_dict["dso"]
else:
dso = "Unknown_dso"
- if (param_dict.has_key("symbol")):
+ if ("symbol" in param_dict):
symbol = param_dict["symbol"]
else:
symbol = "Unknown_symbol"
@@ -102,7 +104,7 @@ def insert_db(event):
event.ip, event.status, event.dse, event.dla, event.lat))
def trace_end():
- print "In trace_end:\n"
+ print("In trace_end:\n")
# We show the basic info for the 2 type of event classes
show_general_events()
show_pebs_ll()
@@ -123,29 +125,29 @@ def show_general_events():
# Check the total record number in the table
count = con.execute("select count(*) from gen_events")
for t in count:
- print "There is %d records in gen_events table" % t[0]
+ print("There is %d records in gen_events table" % t[0])
if t[0] == 0:
return
- print "Statistics about the general events grouped by thread/symbol/dso: \n"
+ print("Statistics about the general events grouped by thread/symbol/dso: \n")
# Group by thread
commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
- print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
+ print("\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42))
for row in commq:
- print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%16s %8d %s" % (row[0], row[1], num2sym(row[1])))
# Group by symbol
- print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
+ print("\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58))
symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
for row in symbolq:
- print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))
# Group by dso
- print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
+ print("\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74))
dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
for row in dsoq:
- print "%40s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%40s %8d %s" % (row[0], row[1], num2sym(row[1])))
#
# This function just shows the basic info, and we could do more with the
@@ -156,35 +158,35 @@ def show_pebs_ll():
count = con.execute("select count(*) from pebs_ll")
for t in count:
- print "There is %d records in pebs_ll table" % t[0]
+ print("There is %d records in pebs_ll table" % t[0])
if t[0] == 0:
return
- print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
+ print("Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n")
# Group by thread
commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
- print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
+ print("\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42))
for row in commq:
- print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%16s %8d %s" % (row[0], row[1], num2sym(row[1])))
# Group by symbol
- print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
+ print("\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58))
symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
for row in symbolq:
- print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))
# Group by dse
dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
- print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
+ print("\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58))
for row in dseq:
- print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))
# Group by latency
latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
- print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
+ print("\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58))
for row in latq:
- print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))
def trace_unhandled(event_name, context, event_fields_dict):
- print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
+ print (' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]))
diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
index 30130213da7e..c3eae1d77d36 100644
--- a/tools/perf/scripts/python/export-to-postgresql.py
+++ b/tools/perf/scripts/python/export-to-postgresql.py
@@ -10,6 +10,8 @@
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
+from __future__ import print_function
+
import os
import sys
import struct
@@ -199,6 +201,18 @@ import datetime
from PySide.QtSql import *
+if sys.version_info < (3, 0):
+ def toserverstr(str):
+ return str
+ def toclientstr(str):
+ return str
+else:
+ # Assume UTF-8 server_encoding and client_encoding
+ def toserverstr(str):
+ return bytes(str, "UTF_8")
+ def toclientstr(str):
+ return bytes(str, "UTF_8")
+
# Need to access PostgreSQL C library directly to use COPY FROM STDIN
from ctypes import *
libpq = CDLL("libpq.so.5")
@@ -234,12 +248,17 @@ perf_db_export_mode = True
perf_db_export_calls = False
perf_db_export_callchains = False
+def printerr(*args, **kw_args):
+ print(*args, file=sys.stderr, **kw_args)
+
+def printdate(*args, **kw_args):
+ print(datetime.datetime.today(), *args, sep=' ', **kw_args)
def usage():
- print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]"
- print >> sys.stderr, "where: columns 'all' or 'branches'"
- print >> sys.stderr, " calls 'calls' => create calls and call_paths table"
- print >> sys.stderr, " callchains 'callchains' => create call_paths table"
+ printerr("Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]")
+ printerr("where: columns 'all' or 'branches'")
+ printerr(" calls 'calls' => create calls and call_paths table")
+ printerr(" callchains 'callchains' => create call_paths table")
raise Exception("Too few arguments")
if (len(sys.argv) < 2):
@@ -273,7 +292,7 @@ def do_query(q, s):
return
raise Exception("Query failed: " + q.lastError().text())
-print datetime.datetime.today(), "Creating database..."
+printdate("Creating database...")
db = QSqlDatabase.addDatabase('QPSQL')
query = QSqlQuery(db)
@@ -394,7 +413,8 @@ if perf_db_export_calls:
'call_id bigint,'
'return_id bigint,'
'parent_call_path_id bigint,'
- 'flags integer)')
+ 'flags integer,'
+ 'parent_id bigint)')
do_query(query, 'CREATE VIEW machines_view AS '
'SELECT '
@@ -478,8 +498,9 @@ if perf_db_export_calls:
'branch_count,'
'call_id,'
'return_id,'
- 'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE flags END AS flags,'
- 'parent_call_path_id'
+ 'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE CAST ( flags AS VARCHAR(6) ) END AS flags,'
+ 'parent_call_path_id,'
+ 'calls.parent_id'
' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id')
do_query(query, 'CREATE VIEW samples_view AS '
@@ -504,12 +525,12 @@ do_query(query, 'CREATE VIEW samples_view AS '
' FROM samples')
-file_header = struct.pack("!11sii", "PGCOPY\n\377\r\n\0", 0, 0)
-file_trailer = "\377\377"
+file_header = struct.pack("!11sii", b"PGCOPY\n\377\r\n\0", 0, 0)
+file_trailer = b"\377\377"
def open_output_file(file_name):
path_name = output_dir_name + "/" + file_name
- file = open(path_name, "w+")
+ file = open(path_name, "wb+")
file.write(file_header)
return file
@@ -524,13 +545,13 @@ def copy_output_file_direct(file, table_name):
# Use COPY FROM STDIN because security may prevent postgres from accessing the files directly
def copy_output_file(file, table_name):
- conn = PQconnectdb("dbname = " + dbname)
+ conn = PQconnectdb(toclientstr("dbname = " + dbname))
if (PQstatus(conn)):
raise Exception("COPY FROM STDIN PQconnectdb failed")
file.write(file_trailer)
file.seek(0)
sql = "COPY " + table_name + " FROM STDIN (FORMAT 'binary')"
- res = PQexec(conn, sql)
+ res = PQexec(conn, toclientstr(sql))
if (PQresultStatus(res) != 4):
raise Exception("COPY FROM STDIN PQexec failed")
data = file.read(65536)
@@ -564,7 +585,7 @@ if perf_db_export_calls:
call_file = open_output_file("call_table.bin")
def trace_begin():
- print datetime.datetime.today(), "Writing to intermediate files..."
+ printdate("Writing to intermediate files...")
# id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
evsel_table(0, "unknown")
machine_table(0, 0, "unknown")
@@ -575,11 +596,12 @@ def trace_begin():
sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
if perf_db_export_calls or perf_db_export_callchains:
call_path_table(0, 0, 0, 0)
+ call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
unhandled_count = 0
def trace_end():
- print datetime.datetime.today(), "Copying to database..."
+ printdate("Copying to database...")
copy_output_file(evsel_file, "selected_events")
copy_output_file(machine_file, "machines")
copy_output_file(thread_file, "threads")
@@ -594,7 +616,7 @@ def trace_end():
if perf_db_export_calls:
copy_output_file(call_file, "calls")
- print datetime.datetime.today(), "Removing intermediate files..."
+ printdate("Removing intermediate files...")
remove_output_file(evsel_file)
remove_output_file(machine_file)
remove_output_file(thread_file)
@@ -609,7 +631,7 @@ def trace_end():
if perf_db_export_calls:
remove_output_file(call_file)
os.rmdir(output_dir_name)
- print datetime.datetime.today(), "Adding primary keys"
+ printdate("Adding primary keys")
do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE machines ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)')
@@ -624,7 +646,7 @@ def trace_end():
if perf_db_export_calls:
do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)')
- print datetime.datetime.today(), "Adding foreign keys"
+ printdate("Adding foreign keys")
do_query(query, 'ALTER TABLE threads '
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)')
@@ -657,10 +679,11 @@ def trace_end():
'ADD CONSTRAINT returnfk FOREIGN KEY (return_id) REFERENCES samples (id),'
'ADD CONSTRAINT parent_call_pathfk FOREIGN KEY (parent_call_path_id) REFERENCES call_paths (id)')
do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
+ do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')
if (unhandled_count):
- print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
- print datetime.datetime.today(), "Done"
+ printdate("Warning: ", unhandled_count, " unhandled events")
+ printdate("Done")
def trace_unhandled(event_name, context, event_fields_dict):
global unhandled_count
@@ -670,12 +693,14 @@ def sched__sched_switch(*x):
pass
def evsel_table(evsel_id, evsel_name, *x):
+ evsel_name = toserverstr(evsel_name)
n = len(evsel_name)
fmt = "!hiqi" + str(n) + "s"
value = struct.pack(fmt, 2, 8, evsel_id, n, evsel_name)
evsel_file.write(value)
def machine_table(machine_id, pid, root_dir, *x):
+ root_dir = toserverstr(root_dir)
n = len(root_dir)
fmt = "!hiqiii" + str(n) + "s"
value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, root_dir)
@@ -686,6 +711,7 @@ def thread_table(thread_id, machine_id, process_id, pid, tid, *x):
thread_file.write(value)
def comm_table(comm_id, comm_str, *x):
+ comm_str = toserverstr(comm_str)
n = len(comm_str)
fmt = "!hiqi" + str(n) + "s"
value = struct.pack(fmt, 2, 8, comm_id, n, comm_str)
@@ -697,6 +723,9 @@ def comm_thread_table(comm_thread_id, comm_id, thread_id, *x):
comm_thread_file.write(value)
def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
+ short_name = toserverstr(short_name)
+ long_name = toserverstr(long_name)
+ build_id = toserverstr(build_id)
n1 = len(short_name)
n2 = len(long_name)
n3 = len(build_id)
@@ -705,12 +734,14 @@ def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
dso_file.write(value)
def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x):
+ symbol_name = toserverstr(symbol_name)
n = len(symbol_name)
fmt = "!hiqiqiqiqiii" + str(n) + "s"
value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name)
symbol_file.write(value)
def branch_type_table(branch_type, name, *x):
+ name = toserverstr(name)
n = len(name)
fmt = "!hiii" + str(n) + "s"
value = struct.pack(fmt, 2, 4, branch_type, n, name)
@@ -728,7 +759,7 @@ def call_path_table(cp_id, parent_id, symbol_id, ip, *x):
value = struct.pack(fmt, 4, 8, cp_id, 8, parent_id, 8, symbol_id, 8, ip)
call_path_file.write(value)
-def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, *x):
- fmt = "!hiqiqiqiqiqiqiqiqiqiqii"
- value = struct.pack(fmt, 11, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags)
+def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, parent_id, *x):
+ fmt = "!hiqiqiqiqiqiqiqiqiqiqiiiq"
+ value = struct.pack(fmt, 12, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags, 8, parent_id)
call_file.write(value)
diff --git a/tools/perf/scripts/python/export-to-sqlite.py b/tools/perf/scripts/python/export-to-sqlite.py
index ed237f2ed03f..bf271fbc3a88 100644
--- a/tools/perf/scripts/python/export-to-sqlite.py
+++ b/tools/perf/scripts/python/export-to-sqlite.py
@@ -10,6 +10,8 @@
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
+from __future__ import print_function
+
import os
import sys
import struct
@@ -60,11 +62,17 @@ perf_db_export_mode = True
perf_db_export_calls = False
perf_db_export_callchains = False
+def printerr(*args, **keyword_args):
+ print(*args, file=sys.stderr, **keyword_args)
+
+def printdate(*args, **kw_args):
+ print(datetime.datetime.today(), *args, sep=' ', **kw_args)
+
def usage():
- print >> sys.stderr, "Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>]"
- print >> sys.stderr, "where: columns 'all' or 'branches'"
- print >> sys.stderr, " calls 'calls' => create calls and call_paths table"
- print >> sys.stderr, " callchains 'callchains' => create call_paths table"
+ printerr("Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>]");
+ printerr("where: columns 'all' or 'branches'");
+ printerr(" calls 'calls' => create calls and call_paths table");
+ printerr(" callchains 'callchains' => create call_paths table");
raise Exception("Too few arguments")
if (len(sys.argv) < 2):
@@ -100,7 +108,7 @@ def do_query_(q):
return
raise Exception("Query failed: " + q.lastError().text())
-print datetime.datetime.today(), "Creating database..."
+printdate("Creating database ...")
db_exists = False
try:
@@ -222,7 +230,8 @@ if perf_db_export_calls:
'call_id bigint,'
'return_id bigint,'
'parent_call_path_id bigint,'
- 'flags integer)')
+ 'flags integer,'
+ 'parent_id bigint)')
# printf was added to sqlite in version 3.8.3
sqlite_has_printf = False
@@ -321,7 +330,8 @@ if perf_db_export_calls:
'call_id,'
'return_id,'
'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE flags END AS flags,'
- 'parent_call_path_id'
+ 'parent_call_path_id,'
+ 'calls.parent_id'
' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id')
do_query(query, 'CREATE VIEW samples_view AS '
@@ -373,10 +383,10 @@ if perf_db_export_calls or perf_db_export_callchains:
call_path_query.prepare("INSERT INTO call_paths VALUES (?, ?, ?, ?)")
if perf_db_export_calls:
call_query = QSqlQuery(db)
- call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
+ call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
def trace_begin():
- print datetime.datetime.today(), "Writing records..."
+ printdate("Writing records...")
do_query(query, 'BEGIN TRANSACTION')
# id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
evsel_table(0, "unknown")
@@ -388,19 +398,21 @@ def trace_begin():
sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
if perf_db_export_calls or perf_db_export_callchains:
call_path_table(0, 0, 0, 0)
+ call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
unhandled_count = 0
def trace_end():
do_query(query, 'END TRANSACTION')
- print datetime.datetime.today(), "Adding indexes"
+ printdate("Adding indexes")
if perf_db_export_calls:
do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
+ do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')
if (unhandled_count):
- print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
- print datetime.datetime.today(), "Done"
+ printdate("Warning: ", unhandled_count, " unhandled events")
+ printdate("Done")
def trace_unhandled(event_name, context, event_fields_dict):
global unhandled_count
@@ -452,4 +464,4 @@ def call_path_table(*x):
bind_exec(call_path_query, 4, x)
def call_return_table(*x):
- bind_exec(call_query, 11, x)
+ bind_exec(call_query, 12, x)
diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py
index 09ce73b07d35..74ef92f1d19a 100755
--- a/tools/perf/scripts/python/exported-sql-viewer.py
+++ b/tools/perf/scripts/python/exported-sql-viewer.py
@@ -88,20 +88,39 @@
# 7fab593ea956 48 89 15 3b 13 22 00 movq %rdx, 0x22133b(%rip)
# 8107675243232 2 ls 22011 22011 hardware interrupt No 7fab593ea956 _dl_start+0x26 (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
+from __future__ import print_function
+
import sys
import weakref
import threading
import string
-import cPickle
+try:
+ # Python2
+ import cPickle as pickle
+ # size of pickled integer big enough for record size
+ glb_nsz = 8
+except ImportError:
+ import pickle
+ glb_nsz = 16
import re
import os
from PySide.QtCore import *
from PySide.QtGui import *
from PySide.QtSql import *
+pyside_version_1 = True
from decimal import *
from ctypes import *
from multiprocessing import Process, Array, Value, Event
+# xrange is range in Python3
+try:
+ xrange
+except NameError:
+ xrange = range
+
+def printerr(*args, **keyword_args):
+ print(*args, file=sys.stderr, **keyword_args)
+
# Data formatting helpers
def tohex(ip):
@@ -167,9 +186,10 @@ class Thread(QThread):
class TreeModel(QAbstractItemModel):
- def __init__(self, root, parent=None):
+ def __init__(self, glb, parent=None):
super(TreeModel, self).__init__(parent)
- self.root = root
+ self.glb = glb
+ self.root = self.GetRoot()
self.last_row_read = 0
def Item(self, parent):
@@ -557,24 +577,12 @@ class CallGraphRootItem(CallGraphLevelItemBase):
self.child_items.append(child_item)
self.child_count += 1
-# Context-sensitive call graph data model
+# Context-sensitive call graph data model base
-class CallGraphModel(TreeModel):
+class CallGraphModelBase(TreeModel):
def __init__(self, glb, parent=None):
- super(CallGraphModel, self).__init__(CallGraphRootItem(glb), parent)
- self.glb = glb
-
- def columnCount(self, parent=None):
- return 7
-
- def columnHeader(self, column):
- headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
- return headers[column]
-
- def columnAlignment(self, column):
- alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
- return alignment[column]
+ super(CallGraphModelBase, self).__init__(glb, parent)
def FindSelect(self, value, pattern, query):
if pattern:
@@ -594,34 +602,7 @@ class CallGraphModel(TreeModel):
match = " GLOB '" + str(value) + "'"
else:
match = " = '" + str(value) + "'"
- QueryExec(query, "SELECT call_path_id, comm_id, thread_id"
- " FROM calls"
- " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
- " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
- " WHERE symbols.name" + match +
- " GROUP BY comm_id, thread_id, call_path_id"
- " ORDER BY comm_id, thread_id, call_path_id")
-
- def FindPath(self, query):
- # Turn the query result into a list of ids that the tree view can walk
- # to open the tree at the right place.
- ids = []
- parent_id = query.value(0)
- while parent_id:
- ids.insert(0, parent_id)
- q2 = QSqlQuery(self.glb.db)
- QueryExec(q2, "SELECT parent_id"
- " FROM call_paths"
- " WHERE id = " + str(parent_id))
- if not q2.next():
- break
- parent_id = q2.value(0)
- # The call path root is not used
- if ids[0] == 1:
- del ids[0]
- ids.insert(0, query.value(2))
- ids.insert(0, query.value(1))
- return ids
+ self.DoFindSelect(query, match)
def Found(self, query, found):
if found:
@@ -675,6 +656,201 @@ class CallGraphModel(TreeModel):
def FindDone(self, thread, callback, ids):
callback(ids)
+# Context-sensitive call graph data model
+
+class CallGraphModel(CallGraphModelBase):
+
+ def __init__(self, glb, parent=None):
+ super(CallGraphModel, self).__init__(glb, parent)
+
+ def GetRoot(self):
+ return CallGraphRootItem(self.glb)
+
+ def columnCount(self, parent=None):
+ return 7
+
+ def columnHeader(self, column):
+ headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
+ return headers[column]
+
+ def columnAlignment(self, column):
+ alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
+ return alignment[column]
+
+ def DoFindSelect(self, query, match):
+ QueryExec(query, "SELECT call_path_id, comm_id, thread_id"
+ " FROM calls"
+ " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
+ " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
+ " WHERE symbols.name" + match +
+ " GROUP BY comm_id, thread_id, call_path_id"
+ " ORDER BY comm_id, thread_id, call_path_id")
+
+ def FindPath(self, query):
+ # Turn the query result into a list of ids that the tree view can walk
+ # to open the tree at the right place.
+ ids = []
+ parent_id = query.value(0)
+ while parent_id:
+ ids.insert(0, parent_id)
+ q2 = QSqlQuery(self.glb.db)
+ QueryExec(q2, "SELECT parent_id"
+ " FROM call_paths"
+ " WHERE id = " + str(parent_id))
+ if not q2.next():
+ break
+ parent_id = q2.value(0)
+ # The call path root is not used
+ if ids[0] == 1:
+ del ids[0]
+ ids.insert(0, query.value(2))
+ ids.insert(0, query.value(1))
+ return ids
+
+# Call tree data model level 2+ item base
+
+class CallTreeLevelTwoPlusItemBase(CallGraphLevelItemBase):
+
+ def __init__(self, glb, row, comm_id, thread_id, calls_id, time, branch_count, parent_item):
+ super(CallTreeLevelTwoPlusItemBase, self).__init__(glb, row, parent_item)
+ self.comm_id = comm_id
+ self.thread_id = thread_id
+ self.calls_id = calls_id
+ self.branch_count = branch_count
+ self.time = time
+
+ def Select(self):
+ self.query_done = True;
+ if self.calls_id == 0:
+ comm_thread = " AND comm_id = " + str(self.comm_id) + " AND thread_id = " + str(self.thread_id)
+ else:
+ comm_thread = ""
+ query = QSqlQuery(self.glb.db)
+ QueryExec(query, "SELECT calls.id, name, short_name, call_time, return_time - call_time, branch_count"
+ " FROM calls"
+ " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
+ " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
+ " INNER JOIN dsos ON symbols.dso_id = dsos.id"
+ " WHERE calls.parent_id = " + str(self.calls_id) + comm_thread +
+ " ORDER BY call_time, calls.id")
+ while query.next():
+ child_item = CallTreeLevelThreeItem(self.glb, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), int(query.value(5)), self)
+ self.child_items.append(child_item)
+ self.child_count += 1
+
+# Call tree data model level three item
+
+class CallTreeLevelThreeItem(CallTreeLevelTwoPlusItemBase):
+
+ def __init__(self, glb, row, comm_id, thread_id, calls_id, name, dso, count, time, branch_count, parent_item):
+ super(CallTreeLevelThreeItem, self).__init__(glb, row, comm_id, thread_id, calls_id, time, branch_count, parent_item)
+ dso = dsoname(dso)
+ self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ]
+ self.dbid = calls_id
+
+# Call tree data model level two item
+
+class CallTreeLevelTwoItem(CallTreeLevelTwoPlusItemBase):
+
+ def __init__(self, glb, row, comm_id, thread_id, pid, tid, parent_item):
+ super(CallTreeLevelTwoItem, self).__init__(glb, row, comm_id, thread_id, 0, 0, 0, parent_item)
+ self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""]
+ self.dbid = thread_id
+
+ def Select(self):
+ super(CallTreeLevelTwoItem, self).Select()
+ for child_item in self.child_items:
+ self.time += child_item.time
+ self.branch_count += child_item.branch_count
+ for child_item in self.child_items:
+ child_item.data[4] = PercentToOneDP(child_item.time, self.time)
+ child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count)
+
+# Call tree data model level one item
+
+class CallTreeLevelOneItem(CallGraphLevelItemBase):
+
+ def __init__(self, glb, row, comm_id, comm, parent_item):
+ super(CallTreeLevelOneItem, self).__init__(glb, row, parent_item)
+ self.data = [comm, "", "", "", "", "", ""]
+ self.dbid = comm_id
+
+ def Select(self):
+ self.query_done = True;
+ query = QSqlQuery(self.glb.db)
+ QueryExec(query, "SELECT thread_id, pid, tid"
+ " FROM comm_threads"
+ " INNER JOIN threads ON thread_id = threads.id"
+ " WHERE comm_id = " + str(self.dbid))
+ while query.next():
+ child_item = CallTreeLevelTwoItem(self.glb, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self)
+ self.child_items.append(child_item)
+ self.child_count += 1
+
+# Call tree data model root item
+
+class CallTreeRootItem(CallGraphLevelItemBase):
+
+ def __init__(self, glb):
+ super(CallTreeRootItem, self).__init__(glb, 0, None)
+ self.dbid = 0
+ self.query_done = True;
+ query = QSqlQuery(glb.db)
+ QueryExec(query, "SELECT id, comm FROM comms")
+ while query.next():
+ if not query.value(0):
+ continue
+ child_item = CallTreeLevelOneItem(glb, self.child_count, query.value(0), query.value(1), self)
+ self.child_items.append(child_item)
+ self.child_count += 1
+
+# Call Tree data model
+
+class CallTreeModel(CallGraphModelBase):
+
+ def __init__(self, glb, parent=None):
+ super(CallTreeModel, self).__init__(glb, parent)
+
+ def GetRoot(self):
+ return CallTreeRootItem(self.glb)
+
+ def columnCount(self, parent=None):
+ return 7
+
+ def columnHeader(self, column):
+ headers = ["Call Path", "Object", "Call Time", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
+ return headers[column]
+
+ def columnAlignment(self, column):
+ alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
+ return alignment[column]
+
+ def DoFindSelect(self, query, match):
+ QueryExec(query, "SELECT calls.id, comm_id, thread_id"
+ " FROM calls"
+ " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
+ " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
+ " WHERE symbols.name" + match +
+ " ORDER BY comm_id, thread_id, call_time, calls.id")
+
+ def FindPath(self, query):
+ # Turn the query result into a list of ids that the tree view can walk
+ # to open the tree at the right place.
+ ids = []
+ parent_id = query.value(0)
+ while parent_id:
+ ids.insert(0, parent_id)
+ q2 = QSqlQuery(self.glb.db)
+ QueryExec(q2, "SELECT parent_id"
+ " FROM calls"
+ " WHERE id = " + str(parent_id))
+ if not q2.next():
+ break
+ parent_id = q2.value(0)
+ ids.insert(0, query.value(2))
+ ids.insert(0, query.value(1))
+ return ids
+
# Vertical widget layout
class VBox():
@@ -693,28 +869,16 @@ class VBox():
def Widget(self):
return self.vbox
-# Context-sensitive call graph window
-
-class CallGraphWindow(QMdiSubWindow):
+# Tree window base
- def __init__(self, glb, parent=None):
- super(CallGraphWindow, self).__init__(parent)
-
- self.model = LookupCreateModel("Context-Sensitive Call Graph", lambda x=glb: CallGraphModel(x))
-
- self.view = QTreeView()
- self.view.setModel(self.model)
-
- for c, w in ((0, 250), (1, 100), (2, 60), (3, 70), (4, 70), (5, 100)):
- self.view.setColumnWidth(c, w)
-
- self.find_bar = FindBar(self, self)
+class TreeWindowBase(QMdiSubWindow):
- self.vbox = VBox(self.view, self.find_bar.Widget())
-
- self.setWidget(self.vbox.Widget())
+ def __init__(self, parent=None):
+ super(TreeWindowBase, self).__init__(parent)
- AddSubWindow(glb.mainwindow.mdi_area, self, "Context-Sensitive Call Graph")
+ self.model = None
+ self.view = None
+ self.find_bar = None
def DisplayFound(self, ids):
if not len(ids):
@@ -747,6 +911,53 @@ class CallGraphWindow(QMdiSubWindow):
if not found:
self.find_bar.NotFound()
+
+# Context-sensitive call graph window
+
+class CallGraphWindow(TreeWindowBase):
+
+ def __init__(self, glb, parent=None):
+ super(CallGraphWindow, self).__init__(parent)
+
+ self.model = LookupCreateModel("Context-Sensitive Call Graph", lambda x=glb: CallGraphModel(x))
+
+ self.view = QTreeView()
+ self.view.setModel(self.model)
+
+ for c, w in ((0, 250), (1, 100), (2, 60), (3, 70), (4, 70), (5, 100)):
+ self.view.setColumnWidth(c, w)
+
+ self.find_bar = FindBar(self, self)
+
+ self.vbox = VBox(self.view, self.find_bar.Widget())
+
+ self.setWidget(self.vbox.Widget())
+
+ AddSubWindow(glb.mainwindow.mdi_area, self, "Context-Sensitive Call Graph")
+
+# Call tree window
+
+class CallTreeWindow(TreeWindowBase):
+
+ def __init__(self, glb, parent=None):
+ super(CallTreeWindow, self).__init__(parent)
+
+ self.model = LookupCreateModel("Call Tree", lambda x=glb: CallTreeModel(x))
+
+ self.view = QTreeView()
+ self.view.setModel(self.model)
+
+ for c, w in ((0, 230), (1, 100), (2, 100), (3, 70), (4, 70), (5, 100)):
+ self.view.setColumnWidth(c, w)
+
+ self.find_bar = FindBar(self, self)
+
+ self.vbox = VBox(self.view, self.find_bar.Widget())
+
+ self.setWidget(self.vbox.Widget())
+
+ AddSubWindow(glb.mainwindow.mdi_area, self, "Call Tree")
+
# Child data item finder
class ChildDataItemFinder():
@@ -812,10 +1023,6 @@ class ChildDataItemFinder():
glb_chunk_sz = 10000
-# size of pickled integer big enough for record size
-
-glb_nsz = 8
-
# Background process for SQL data fetcher
class SQLFetcherProcess():
@@ -874,7 +1081,7 @@ class SQLFetcherProcess():
return True
if space >= glb_nsz:
# Use 0 (or space < glb_nsz) to mean there is no more at the top of the buffer
- nd = cPickle.dumps(0, cPickle.HIGHEST_PROTOCOL)
+ nd = pickle.dumps(0, pickle.HIGHEST_PROTOCOL)
self.buffer[self.local_head : self.local_head + len(nd)] = nd
self.local_head = 0
if self.local_tail - self.local_head > sz:
@@ -892,9 +1099,9 @@ class SQLFetcherProcess():
self.wait_event.wait()
def AddToBuffer(self, obj):
- d = cPickle.dumps(obj, cPickle.HIGHEST_PROTOCOL)
+ d = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
n = len(d)
- nd = cPickle.dumps(n, cPickle.HIGHEST_PROTOCOL)
+ nd = pickle.dumps(n, pickle.HIGHEST_PROTOCOL)
sz = n + glb_nsz
self.WaitForSpace(sz)
pos = self.local_head
@@ -1006,12 +1213,12 @@ class SQLFetcher(QObject):
pos = self.local_tail
if len(self.buffer) - pos < glb_nsz:
pos = 0
- n = cPickle.loads(self.buffer[pos : pos + glb_nsz])
+ n = pickle.loads(self.buffer[pos : pos + glb_nsz])
if n == 0:
pos = 0
- n = cPickle.loads(self.buffer[0 : glb_nsz])
+ n = pickle.loads(self.buffer[0 : glb_nsz])
pos += glb_nsz
- obj = cPickle.loads(self.buffer[pos : pos + n])
+ obj = pickle.loads(self.buffer[pos : pos + n])
self.local_tail = pos + n
return obj
@@ -1320,6 +1527,19 @@ def BranchDataPrep(query):
" (" + dsoname(query.value(15)) + ")")
return data
+def BranchDataPrepWA(query):
+ data = []
+ data.append(query.value(0))
+ # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
+ data.append("{:>19}".format(query.value(1)))
+ for i in xrange(2, 8):
+ data.append(query.value(i))
+ data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) +
+ " (" + dsoname(query.value(11)) + ")" + " -> " +
+ tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) +
+ " (" + dsoname(query.value(15)) + ")")
+ return data
+
# Branch data model
class BranchModel(TreeModel):
@@ -1327,8 +1547,7 @@ class BranchModel(TreeModel):
progress = Signal(object)
def __init__(self, glb, event_id, where_clause, parent=None):
- super(BranchModel, self).__init__(BranchRootItem(), parent)
- self.glb = glb
+ super(BranchModel, self).__init__(glb, parent)
self.event_id = event_id
self.more = True
self.populated = 0
@@ -1348,10 +1567,17 @@ class BranchModel(TreeModel):
" AND evsel_id = " + str(self.event_id) +
" ORDER BY samples.id"
" LIMIT " + str(glb_chunk_sz))
- self.fetcher = SQLFetcher(glb, sql, BranchDataPrep, self.AddSample)
+ if pyside_version_1 and sys.version_info[0] == 3:
+ prep = BranchDataPrepWA
+ else:
+ prep = BranchDataPrep
+ self.fetcher = SQLFetcher(glb, sql, prep, self.AddSample)
self.fetcher.done.connect(self.Update)
self.fetcher.Fetch(glb_chunk_sz)
+ def GetRoot(self):
+ return BranchRootItem()
+
def columnCount(self, parent=None):
return 8
@@ -1863,22 +2089,14 @@ def GetEventList(db):
# Is a table selectable
-def IsSelectable(db, table):
+def IsSelectable(db, table, sql = ""):
query = QSqlQuery(db)
try:
- QueryExec(query, "SELECT * FROM " + table + " LIMIT 1")
+ QueryExec(query, "SELECT * FROM " + table + " " + sql + " LIMIT 1")
except:
return False
return True
-# SQL data preparation
-
-def SQLTableDataPrep(query, count):
- data = []
- for i in xrange(count):
- data.append(query.value(i))
- return data
-
# SQL table data model item
class SQLTableItem():
@@ -1902,7 +2120,7 @@ class SQLTableModel(TableModel):
self.more = True
self.populated = 0
self.column_headers = column_headers
- self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): SQLTableDataPrep(x, y), self.AddSample)
+ self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): self.SQLTableDataPrep(x, y), self.AddSample)
self.fetcher.done.connect(self.Update)
self.fetcher.Fetch(glb_chunk_sz)
@@ -1946,6 +2164,12 @@ class SQLTableModel(TableModel):
def columnHeader(self, column):
return self.column_headers[column]
+ def SQLTableDataPrep(self, query, count):
+ data = []
+ for i in xrange(count):
+ data.append(query.value(i))
+ return data
+
# SQL automatic table data model
class SQLAutoTableModel(SQLTableModel):
@@ -1974,8 +2198,32 @@ class SQLAutoTableModel(SQLTableModel):
QueryExec(query, "SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' and table_name = '" + select_table_name + "'")
while query.next():
column_headers.append(query.value(0))
+ if pyside_version_1 and sys.version_info[0] == 3:
+ if table_name == "samples_view":
+ self.SQLTableDataPrep = self.samples_view_DataPrep
+ if table_name == "samples":
+ self.SQLTableDataPrep = self.samples_DataPrep
super(SQLAutoTableModel, self).__init__(glb, sql, column_headers, parent)
+ def samples_view_DataPrep(self, query, count):
+ data = []
+ data.append(query.value(0))
+ # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
+ data.append("{:>19}".format(query.value(1)))
+ for i in xrange(2, count):
+ data.append(query.value(i))
+ return data
+
+ def samples_DataPrep(self, query, count):
+ data = []
+ for i in xrange(9):
+ data.append(query.value(i))
+ # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
+ data.append("{:>19}".format(query.value(9)))
+ for i in xrange(10, count):
+ data.append(query.value(i))
+ return data
+
# Base class for custom ResizeColumnsToContents
class ResizeColumnsToContentsBase(QObject):
@@ -2275,9 +2523,10 @@ p.c2 {
</style>
<p class=c1><a href=#reports>1. Reports</a></p>
<p class=c2><a href=#callgraph>1.1 Context-Sensitive Call Graph</a></p>
-<p class=c2><a href=#allbranches>1.2 All branches</a></p>
-<p class=c2><a href=#selectedbranches>1.3 Selected branches</a></p>
-<p class=c2><a href=#topcallsbyelapsedtime>1.4 Top calls by elapsed time</a></p>
+<p class=c2><a href=#calltree>1.2 Call Tree</a></p>
+<p class=c2><a href=#allbranches>1.3 All branches</a></p>
+<p class=c2><a href=#selectedbranches>1.4 Selected branches</a></p>
+<p class=c2><a href=#topcallsbyelapsedtime>1.5 Top calls by elapsed time</a></p>
<p class=c1><a href=#tables>2. Tables</a></p>
<h1 id=reports>1. Reports</h1>
<h2 id=callgraph>1.1 Context-Sensitive Call Graph</h2>
@@ -2313,7 +2562,10 @@ v- ls
<h3>Find</h3>
Ctrl-F displays a Find bar which finds function names by either an exact match or a pattern match.
The pattern matching symbols are ? for any character and * for zero or more characters.
-<h2 id=allbranches>1.2 All branches</h2>
+<h2 id=calltree>1.2 Call Tree</h2>
+The Call Tree report is very similar to the Context-Sensitive Call Graph, but the data is not aggregated.
+Also the 'Count' column, which would be always 1, is replaced by the 'Call Time'.
+<h2 id=allbranches>1.3 All branches</h2>
The All branches report displays all branches in chronological order.
Not all data is fetched immediately. More records can be fetched using the Fetch bar provided.
<h3>Disassembly</h3>
@@ -2339,10 +2591,10 @@ sudo ldconfig
Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
Refer to Python documentation for the regular expression syntax.
All columns are searched, but only currently fetched rows are searched.
-<h2 id=selectedbranches>1.3 Selected branches</h2>
+<h2 id=selectedbranches>1.4 Selected branches</h2>
This is the same as the <a href=#allbranches>All branches</a> report but with the data reduced
by various selection criteria. A dialog box displays available criteria which are AND'ed together.
-<h3>1.3.1 Time ranges</h3>
+<h3>1.4.1 Time ranges</h3>
The time ranges hint text shows the total time range. Relative time ranges can also be entered in
ms, us or ns. Also, negative values are relative to the end of trace. Examples:
<pre>
@@ -2353,7 +2605,7 @@ ms, us or ns. Also, negative values are relative to the end of trace. Examples:
-10ms- The last 10ms
</pre>
N.B. Due to the granularity of timestamps, there could be no branches in any given time range.
-<h2 id=topcallsbyelapsedtime>1.4 Top calls by elapsed time</h2>
+<h2 id=topcallsbyelapsedtime>1.5 Top calls by elapsed time</h2>
The Top calls by elapsed time report displays calls in descending order of time elapsed between when the function was called and when it returned.
The data is reduced by various selection criteria. A dialog box displays available criteria which are AND'ed together.
If not all data is fetched, a Fetch bar is provided. Ctrl-F displays a Find bar.
@@ -2489,6 +2741,9 @@ class MainWindow(QMainWindow):
if IsSelectable(glb.db, "calls"):
reports_menu.addAction(CreateAction("Context-Sensitive Call &Graph", "Create a new window containing a context-sensitive call graph", self.NewCallGraph, self))
+ if IsSelectable(glb.db, "calls", "WHERE parent_id >= 0"):
+ reports_menu.addAction(CreateAction("Call &Tree", "Create a new window containing a call tree", self.NewCallTree, self))
+
self.EventMenu(GetEventList(glb.db), reports_menu)
if IsSelectable(glb.db, "calls"):
@@ -2549,6 +2804,9 @@ class MainWindow(QMainWindow):
def NewCallGraph(self):
CallGraphWindow(self.glb, self)
+ def NewCallTree(self):
+ CallTreeWindow(self.glb, self)
+
def NewTopCalls(self):
dialog = TopCallsDialog(self.glb, self)
ret = dialog.exec_()
@@ -2650,9 +2908,13 @@ class LibXED():
ok = self.xed_format_context(2, inst.xedp, inst.bufferp, sizeof(inst.buffer), ip, 0, 0)
if not ok:
return 0, ""
+ if sys.version_info[0] == 2:
+ result = inst.buffer.value
+ else:
+ result = inst.buffer.value.decode()
# Return instruction length and the disassembled instruction text
# For now, assume the length is in byte 166
- return inst.xedd[166], inst.buffer.value
+ return inst.xedd[166], result
def TryOpen(file_name):
try:
@@ -2668,9 +2930,14 @@ def Is64Bit(f):
header = f.read(7)
f.seek(pos)
magic = header[0:4]
- eclass = ord(header[4])
- encoding = ord(header[5])
- version = ord(header[6])
+ if sys.version_info[0] == 2:
+ eclass = ord(header[4])
+ encoding = ord(header[5])
+ version = ord(header[6])
+ else:
+ eclass = header[4]
+ encoding = header[5]
+ version = header[6]
if magic == chr(127) + "ELF" and eclass > 0 and eclass < 3 and encoding > 0 and encoding < 3 and version == 1:
result = True if eclass == 2 else False
return result
@@ -2769,7 +3036,7 @@ class DBRef():
def Main():
if (len(sys.argv) < 2):
- print >> sys.stderr, "Usage is: exported-sql-viewer.py {<database name> | --help-only}"
+ printerr("Usage is: exported-sql-viewer.py {<database name> | --help-only}");
raise Exception("Too few arguments")
dbname = sys.argv[1]
@@ -2782,8 +3049,8 @@ def Main():
is_sqlite3 = False
try:
- f = open(dbname)
- if f.read(15) == "SQLite format 3":
+ f = open(dbname, "rb")
+ if f.read(15) == b'SQLite format 3':
is_sqlite3 = True
f.close()
except:
diff --git a/tools/perf/scripts/python/failed-syscalls-by-pid.py b/tools/perf/scripts/python/failed-syscalls-by-pid.py
index 3648e8b986ec..310efe5e7e23 100644
--- a/tools/perf/scripts/python/failed-syscalls-by-pid.py
+++ b/tools/perf/scripts/python/failed-syscalls-by-pid.py
@@ -58,22 +58,22 @@ def syscalls__sys_exit(event_name, context, common_cpu,
raw_syscalls__sys_exit(**locals())
def print_error_totals():
- if for_comm is not None:
- print("\nsyscall errors for %s:\n" % (for_comm))
- else:
- print("\nsyscall errors:\n")
-
- print("%-30s %10s" % ("comm [pid]", "count"))
- print("%-30s %10s" % ("------------------------------", "----------"))
-
- comm_keys = syscalls.keys()
- for comm in comm_keys:
- pid_keys = syscalls[comm].keys()
- for pid in pid_keys:
- print("\n%s [%d]" % (comm, pid))
- id_keys = syscalls[comm][pid].keys()
- for id in id_keys:
- print(" syscall: %-16s" % syscall_name(id))
- ret_keys = syscalls[comm][pid][id].keys()
- for ret, val in sorted(syscalls[comm][pid][id].items(), key = lambda kv: (kv[1], kv[0]), reverse = True):
- print(" err = %-20s %10d" % (strerror(ret), val))
+ if for_comm is not None:
+ print("\nsyscall errors for %s:\n" % (for_comm))
+ else:
+ print("\nsyscall errors:\n")
+
+ print("%-30s %10s" % ("comm [pid]", "count"))
+ print("%-30s %10s" % ("------------------------------", "----------"))
+
+ comm_keys = syscalls.keys()
+ for comm in comm_keys:
+ pid_keys = syscalls[comm].keys()
+ for pid in pid_keys:
+ print("\n%s [%d]" % (comm, pid))
+ id_keys = syscalls[comm][pid].keys()
+ for id in id_keys:
+ print(" syscall: %-16s" % syscall_name(id))
+ ret_keys = syscalls[comm][pid][id].keys()
+ for ret, val in sorted(syscalls[comm][pid][id].items(), key = lambda kv: (kv[1], kv[0]), reverse = True):
+ print(" err = %-20s %10d" % (strerror(ret), val))
diff --git a/tools/perf/scripts/python/futex-contention.py b/tools/perf/scripts/python/futex-contention.py
index 0f5cf437b602..0c4841acf75d 100644
--- a/tools/perf/scripts/python/futex-contention.py
+++ b/tools/perf/scripts/python/futex-contention.py
@@ -10,6 +10,8 @@
#
# Measures futex contention
+from __future__ import print_function
+
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
@@ -33,18 +35,18 @@ def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
nr, ret):
- if thread_blocktime.has_key(tid):
+ if tid in thread_blocktime:
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
- print "Press control+C to stop and show the summary"
+ print("Press control+C to stop and show the summary")
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
- print "%s[%d] lock %x contended %d times, %d avg ns" % \
- (process_names[tid], tid, lock, count, avg)
+ print("%s[%d] lock %x contended %d times, %d avg ns" %
+ (process_names[tid], tid, lock, count, avg))
diff --git a/tools/perf/scripts/python/intel-pt-events.py b/tools/perf/scripts/python/intel-pt-events.py
index b19172d673af..a73847c8f548 100644
--- a/tools/perf/scripts/python/intel-pt-events.py
+++ b/tools/perf/scripts/python/intel-pt-events.py
@@ -10,6 +10,8 @@
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
+from __future__ import print_function
+
import os
import sys
import struct
@@ -22,34 +24,34 @@ sys.path.append(os.environ['PERF_EXEC_PATH'] + \
#from Core import *
def trace_begin():
- print "Intel PT Power Events and PTWRITE"
+ print("Intel PT Power Events and PTWRITE")
def trace_end():
- print "End"
+ print("End")
def trace_unhandled(event_name, context, event_fields_dict):
- print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
+ print(' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]))
def print_ptwrite(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
flags = data[0]
payload = data[1]
exact_ip = flags & 1
- print "IP: %u payload: %#x" % (exact_ip, payload),
+ print("IP: %u payload: %#x" % (exact_ip, payload), end=' ')
def print_cbr(raw_buf):
data = struct.unpack_from("<BBBBII", raw_buf)
cbr = data[0]
f = (data[4] + 500) / 1000
p = ((cbr * 1000 / data[2]) + 5) / 10
- print "%3u freq: %4u MHz (%3u%%)" % (cbr, f, p),
+ print("%3u freq: %4u MHz (%3u%%)" % (cbr, f, p), end=' ')
def print_mwait(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
payload = data[1]
hints = payload & 0xff
extensions = (payload >> 32) & 0x3
- print "hints: %#x extensions: %#x" % (hints, extensions),
+ print("hints: %#x extensions: %#x" % (hints, extensions), end=' ')
def print_pwre(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
@@ -57,13 +59,14 @@ def print_pwre(raw_buf):
hw = (payload >> 7) & 1
cstate = (payload >> 12) & 0xf
subcstate = (payload >> 8) & 0xf
- print "hw: %u cstate: %u sub-cstate: %u" % (hw, cstate, subcstate),
+ print("hw: %u cstate: %u sub-cstate: %u" % (hw, cstate, subcstate),
+ end=' ')
def print_exstop(raw_buf):
data = struct.unpack_from("<I", raw_buf)
flags = data[0]
exact_ip = flags & 1
- print "IP: %u" % (exact_ip),
+ print("IP: %u" % (exact_ip), end=' ')
def print_pwrx(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
@@ -71,36 +74,39 @@ def print_pwrx(raw_buf):
deepest_cstate = payload & 0xf
last_cstate = (payload >> 4) & 0xf
wake_reason = (payload >> 8) & 0xf
- print "deepest cstate: %u last cstate: %u wake reason: %#x" % (deepest_cstate, last_cstate, wake_reason),
+ print("deepest cstate: %u last cstate: %u wake reason: %#x" %
+ (deepest_cstate, last_cstate, wake_reason), end=' ')
def print_common_start(comm, sample, name):
ts = sample["time"]
cpu = sample["cpu"]
pid = sample["pid"]
tid = sample["tid"]
- print "%16s %5u/%-5u [%03u] %9u.%09u %7s:" % (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000, name),
+ print("%16s %5u/%-5u [%03u] %9u.%09u %7s:" %
+ (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000, name),
+ end=' ')
def print_common_ip(sample, symbol, dso):
ip = sample["ip"]
- print "%16x %s (%s)" % (ip, symbol, dso)
+ print("%16x %s (%s)" % (ip, symbol, dso))
def process_event(param_dict):
- event_attr = param_dict["attr"]
- sample = param_dict["sample"]
- raw_buf = param_dict["raw_buf"]
- comm = param_dict["comm"]
- name = param_dict["ev_name"]
-
- # Symbol and dso info are not always resolved
- if (param_dict.has_key("dso")):
- dso = param_dict["dso"]
- else:
- dso = "[unknown]"
-
- if (param_dict.has_key("symbol")):
- symbol = param_dict["symbol"]
- else:
- symbol = "[unknown]"
+ event_attr = param_dict["attr"]
+ sample = param_dict["sample"]
+ raw_buf = param_dict["raw_buf"]
+ comm = param_dict["comm"]
+ name = param_dict["ev_name"]
+
+ # Symbol and dso info are not always resolved
+ if "dso" in param_dict:
+ dso = param_dict["dso"]
+ else:
+ dso = "[unknown]"
+
+ if "symbol" in param_dict:
+ symbol = param_dict["symbol"]
+ else:
+ symbol = "[unknown]"
if name == "ptwrite":
print_common_start(comm, sample, name)
diff --git a/tools/perf/scripts/python/mem-phys-addr.py b/tools/perf/scripts/python/mem-phys-addr.py
index fb0bbcbfa0f0..1f332e72b9b0 100644
--- a/tools/perf/scripts/python/mem-phys-addr.py
+++ b/tools/perf/scripts/python/mem-phys-addr.py
@@ -44,12 +44,13 @@ def print_memory_type():
print("%-40s %10s %10s\n" % ("Memory type", "count", "percentage"), end='')
print("%-40s %10s %10s\n" % ("----------------------------------------",
"-----------", "-----------"),
- end='');
+ end='');
total = sum(load_mem_type_cnt.values())
for mem_type, count in sorted(load_mem_type_cnt.most_common(), \
key = lambda kv: (kv[1], kv[0]), reverse = True):
- print("%-40s %10d %10.1f%%\n" % (mem_type, count, 100 * count / total),
- end='')
+ print("%-40s %10d %10.1f%%\n" %
+ (mem_type, count, 100 * count / total),
+ end='')
def trace_begin():
parse_iomem()
diff --git a/tools/perf/scripts/python/net_dropmonitor.py b/tools/perf/scripts/python/net_dropmonitor.py
index 212557a02c50..101059971738 100755
--- a/tools/perf/scripts/python/net_dropmonitor.py
+++ b/tools/perf/scripts/python/net_dropmonitor.py
@@ -7,7 +7,7 @@ import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
- '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
diff --git a/tools/perf/scripts/python/netdev-times.py b/tools/perf/scripts/python/netdev-times.py
index 267bda49325d..ea0c8b90a783 100644
--- a/tools/perf/scripts/python/netdev-times.py
+++ b/tools/perf/scripts/python/netdev-times.py
@@ -124,14 +124,16 @@ def print_receive(hunk):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print(PF_NAPI_POLL %
- (diff_msec(base_t, event['event_t']), event['dev']))
+ (diff_msec(base_t, event['event_t']),
+ event['dev']))
if i == len(event_list) - 1:
print("")
else:
print(PF_JOINT)
else:
print(PF_NET_RECV %
- (diff_msec(base_t, event['event_t']), event['skbaddr'],
+ (diff_msec(base_t, event['event_t']),
+ event['skbaddr'],
event['len']))
if 'comm' in event.keys():
print(PF_WJOINT)
@@ -256,7 +258,7 @@ def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, callchain, i
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, callchain, napi,
- dev_name, work=None, budget=None):
+ dev_name, work=None, budget=None):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name, work, budget)
all_event_list.append(event_info)
@@ -353,7 +355,7 @@ def handle_irq_softirq_exit(event_info):
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
- 'irq_list':irq_list, 'event_list':event_list}
+ 'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
@@ -390,7 +392,7 @@ def handle_netif_receive_skb(event_info):
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
- 'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
+ 'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
diff --git a/tools/perf/scripts/python/sched-migration.py b/tools/perf/scripts/python/sched-migration.py
index 3984bf51f3c5..8196e3087c9e 100644
--- a/tools/perf/scripts/python/sched-migration.py
+++ b/tools/perf/scripts/python/sched-migration.py
@@ -14,10 +14,10 @@ import sys
from collections import defaultdict
try:
- from UserList import UserList
+ from UserList import UserList
except ImportError:
- # Python 3: UserList moved to the collections package
- from collections import UserList
+ # Python 3: UserList moved to the collections package
+ from collections import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
diff --git a/tools/perf/scripts/python/sctop.py b/tools/perf/scripts/python/sctop.py
index 987ffae7c8ca..6e0278dcb092 100644
--- a/tools/perf/scripts/python/sctop.py
+++ b/tools/perf/scripts/python/sctop.py
@@ -13,9 +13,9 @@ from __future__ import print_function
import os, sys, time
try:
- import thread
+ import thread
except ImportError:
- import _thread as thread
+ import _thread as thread
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
@@ -75,11 +75,12 @@ def print_syscall_totals(interval):
print("%-40s %10s" % ("event", "count"))
print("%-40s %10s" %
- ("----------------------------------------",
- "----------"))
+ ("----------------------------------------",
+ "----------"))
- for id, val in sorted(syscalls.items(), key = lambda kv: (kv[1], kv[0]), \
- reverse = True):
+ for id, val in sorted(syscalls.items(),
+ key = lambda kv: (kv[1], kv[0]),
+ reverse = True):
try:
print("%-40s %10d" % (syscall_name(id), val))
except TypeError:
diff --git a/tools/perf/scripts/python/stackcollapse.py b/tools/perf/scripts/python/stackcollapse.py
index 5e703efaddcc..b1c4def1410a 100755
--- a/tools/perf/scripts/python/stackcollapse.py
+++ b/tools/perf/scripts/python/stackcollapse.py
@@ -27,7 +27,7 @@ from collections import defaultdict
from optparse import OptionParser, make_option
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
- '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
diff --git a/tools/perf/scripts/python/syscall-counts-by-pid.py b/tools/perf/scripts/python/syscall-counts-by-pid.py
index 42782487b0e9..f254e40c6f0f 100644
--- a/tools/perf/scripts/python/syscall-counts-by-pid.py
+++ b/tools/perf/scripts/python/syscall-counts-by-pid.py
@@ -39,11 +39,10 @@ def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, id, args):
-
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, id, args):
if (for_comm and common_comm != for_comm) or \
- (for_pid and common_pid != for_pid ):
+ (for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
@@ -51,26 +50,26 @@ def raw_syscalls__sys_enter(event_name, context, common_cpu,
syscalls[common_comm][common_pid][id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- id, args):
+ common_secs, common_nsecs, common_pid, common_comm,
+ id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals():
- if for_comm is not None:
- print("\nsyscall events for %s:\n" % (for_comm))
- else:
- print("\nsyscall events by comm/pid:\n")
-
- print("%-40s %10s" % ("comm [pid]/syscalls", "count"))
- print("%-40s %10s" % ("----------------------------------------",
- "----------"))
-
- comm_keys = syscalls.keys()
- for comm in comm_keys:
- pid_keys = syscalls[comm].keys()
- for pid in pid_keys:
- print("\n%s [%d]" % (comm, pid))
- id_keys = syscalls[comm][pid].keys()
- for id, val in sorted(syscalls[comm][pid].items(), \
- key = lambda kv: (kv[1], kv[0]), reverse = True):
- print(" %-38s %10d" % (syscall_name(id), val))
+ if for_comm is not None:
+ print("\nsyscall events for %s:\n" % (for_comm))
+ else:
+ print("\nsyscall events by comm/pid:\n")
+
+ print("%-40s %10s" % ("comm [pid]/syscalls", "count"))
+ print("%-40s %10s" % ("----------------------------------------",
+ "----------"))
+
+ comm_keys = syscalls.keys()
+ for comm in comm_keys:
+ pid_keys = syscalls[comm].keys()
+ for pid in pid_keys:
+ print("\n%s [%d]" % (comm, pid))
+ id_keys = syscalls[comm][pid].keys()
+ for id, val in sorted(syscalls[comm][pid].items(),
+ key = lambda kv: (kv[1], kv[0]), reverse = True):
+ print(" %-38s %10d" % (syscall_name(id), val))
diff --git a/tools/perf/scripts/python/syscall-counts.py b/tools/perf/scripts/python/syscall-counts.py
index 0ebd89cfd42c..8adb95ff1664 100644
--- a/tools/perf/scripts/python/syscall-counts.py
+++ b/tools/perf/scripts/python/syscall-counts.py
@@ -36,8 +36,8 @@ def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, id, args):
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, id, args):
if for_comm is not None:
if common_comm != for_comm:
return
@@ -47,20 +47,19 @@ def raw_syscalls__sys_enter(event_name, context, common_cpu,
syscalls[id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- id, args):
+ common_secs, common_nsecs, common_pid, common_comm, id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals():
- if for_comm is not None:
- print("\nsyscall events for %s:\n" % (for_comm))
- else:
- print("\nsyscall events:\n")
-
- print("%-40s %10s" % ("event", "count"))
- print("%-40s %10s" % ("----------------------------------------",
- "-----------"))
-
- for id, val in sorted(syscalls.items(), key = lambda kv: (kv[1], kv[0]), \
- reverse = True):
- print("%-40s %10d" % (syscall_name(id), val))
+ if for_comm is not None:
+ print("\nsyscall events for %s:\n" % (for_comm))
+ else:
+ print("\nsyscall events:\n")
+
+ print("%-40s %10s" % ("event", "count"))
+ print("%-40s %10s" % ("----------------------------------------",
+ "-----------"))
+
+ for id, val in sorted(syscalls.items(),
+ key = lambda kv: (kv[1], kv[0]), reverse = True):
+ print("%-40s %10d" % (syscall_name(id), val))
diff --git a/tools/perf/tests/attr/test-record-C0 b/tools/perf/tests/attr/test-record-C0
index cb0a3138fa54..93818054ae20 100644
--- a/tools/perf/tests/attr/test-record-C0
+++ b/tools/perf/tests/attr/test-record-C0
@@ -1,6 +1,6 @@
[config]
command = record
-args = -C 0 kill >/dev/null 2>&1
+args = --no-bpf-event -C 0 kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-basic b/tools/perf/tests/attr/test-record-basic
index 85a23cf35ba1..b0ca42a5ecc9 100644
--- a/tools/perf/tests/attr/test-record-basic
+++ b/tools/perf/tests/attr/test-record-basic
@@ -1,6 +1,6 @@
[config]
command = record
-args = kill >/dev/null 2>&1
+args = --no-bpf-event kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-any b/tools/perf/tests/attr/test-record-branch-any
index 81f839e2fad0..1a99b3ce6b89 100644
--- a/tools/perf/tests/attr/test-record-branch-any
+++ b/tools/perf/tests/attr/test-record-branch-any
@@ -1,6 +1,6 @@
[config]
command = record
-args = -b kill >/dev/null 2>&1
+args = --no-bpf-event -b kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-filter-any b/tools/perf/tests/attr/test-record-branch-filter-any
index 357421f4dfce..709768b508c6 100644
--- a/tools/perf/tests/attr/test-record-branch-filter-any
+++ b/tools/perf/tests/attr/test-record-branch-filter-any
@@ -1,6 +1,6 @@
[config]
command = record
-args = -j any kill >/dev/null 2>&1
+args = --no-bpf-event -j any kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-filter-any_call b/tools/perf/tests/attr/test-record-branch-filter-any_call
index dbc55f2ab845..f943221f7825 100644
--- a/tools/perf/tests/attr/test-record-branch-filter-any_call
+++ b/tools/perf/tests/attr/test-record-branch-filter-any_call
@@ -1,6 +1,6 @@
[config]
command = record
-args = -j any_call kill >/dev/null 2>&1
+args = --no-bpf-event -j any_call kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-filter-any_ret b/tools/perf/tests/attr/test-record-branch-filter-any_ret
index a0824ff8e131..fd4f5b4154a9 100644
--- a/tools/perf/tests/attr/test-record-branch-filter-any_ret
+++ b/tools/perf/tests/attr/test-record-branch-filter-any_ret
@@ -1,6 +1,6 @@
[config]
command = record
-args = -j any_ret kill >/dev/null 2>&1
+args = --no-bpf-event -j any_ret kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-filter-hv b/tools/perf/tests/attr/test-record-branch-filter-hv
index f34d6f120181..4e52d685ebe1 100644
--- a/tools/perf/tests/attr/test-record-branch-filter-hv
+++ b/tools/perf/tests/attr/test-record-branch-filter-hv
@@ -1,6 +1,6 @@
[config]
command = record
-args = -j hv kill >/dev/null 2>&1
+args = --no-bpf-event -j hv kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-filter-ind_call b/tools/perf/tests/attr/test-record-branch-filter-ind_call
index b86a35232248..e08c6ab3796e 100644
--- a/tools/perf/tests/attr/test-record-branch-filter-ind_call
+++ b/tools/perf/tests/attr/test-record-branch-filter-ind_call
@@ -1,6 +1,6 @@
[config]
command = record
-args = -j ind_call kill >/dev/null 2>&1
+args = --no-bpf-event -j ind_call kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-filter-k b/tools/perf/tests/attr/test-record-branch-filter-k
index d3fbc5e1858a..b4b98f84fc2f 100644
--- a/tools/perf/tests/attr/test-record-branch-filter-k
+++ b/tools/perf/tests/attr/test-record-branch-filter-k
@@ -1,6 +1,6 @@
[config]
command = record
-args = -j k kill >/dev/null 2>&1
+args = --no-bpf-event -j k kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-filter-u b/tools/perf/tests/attr/test-record-branch-filter-u
index a318f0dda173..fb9610edbb0d 100644
--- a/tools/perf/tests/attr/test-record-branch-filter-u
+++ b/tools/perf/tests/attr/test-record-branch-filter-u
@@ -1,6 +1,6 @@
[config]
command = record
-args = -j u kill >/dev/null 2>&1
+args = --no-bpf-event -j u kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-count b/tools/perf/tests/attr/test-record-count
index 34f6cc577263..5e9b9019d786 100644
--- a/tools/perf/tests/attr/test-record-count
+++ b/tools/perf/tests/attr/test-record-count
@@ -1,6 +1,6 @@
[config]
command = record
-args = -c 123 kill >/dev/null 2>&1
+args = --no-bpf-event -c 123 kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-data b/tools/perf/tests/attr/test-record-data
index a9cf2233b0ce..a99bb13149c2 100644
--- a/tools/perf/tests/attr/test-record-data
+++ b/tools/perf/tests/attr/test-record-data
@@ -1,6 +1,6 @@
[config]
command = record
-args = -d kill >/dev/null 2>&1
+args = --no-bpf-event -d kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-freq b/tools/perf/tests/attr/test-record-freq
index bf4cb459f0d5..89e29f6b2ae0 100644
--- a/tools/perf/tests/attr/test-record-freq
+++ b/tools/perf/tests/attr/test-record-freq
@@ -1,6 +1,6 @@
[config]
command = record
-args = -F 100 kill >/dev/null 2>&1
+args = --no-bpf-event -F 100 kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-graph-default b/tools/perf/tests/attr/test-record-graph-default
index 0b216e69760c..5d8234d50845 100644
--- a/tools/perf/tests/attr/test-record-graph-default
+++ b/tools/perf/tests/attr/test-record-graph-default
@@ -1,6 +1,6 @@
[config]
command = record
-args = -g kill >/dev/null 2>&1
+args = --no-bpf-event -g kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-graph-dwarf b/tools/perf/tests/attr/test-record-graph-dwarf
index da2fa73bd0a2..ae92061d611d 100644
--- a/tools/perf/tests/attr/test-record-graph-dwarf
+++ b/tools/perf/tests/attr/test-record-graph-dwarf
@@ -1,6 +1,6 @@
[config]
command = record
-args = --call-graph dwarf -- kill >/dev/null 2>&1
+args = --no-bpf-event --call-graph dwarf -- kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-graph-fp b/tools/perf/tests/attr/test-record-graph-fp
index 625d190bb798..5630521c0b0f 100644
--- a/tools/perf/tests/attr/test-record-graph-fp
+++ b/tools/perf/tests/attr/test-record-graph-fp
@@ -1,6 +1,6 @@
[config]
command = record
-args = --call-graph fp kill >/dev/null 2>&1
+args = --no-bpf-event --call-graph fp kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-group b/tools/perf/tests/attr/test-record-group
index 618ba1c17474..14ee60fd3f41 100644
--- a/tools/perf/tests/attr/test-record-group
+++ b/tools/perf/tests/attr/test-record-group
@@ -1,6 +1,6 @@
[config]
command = record
-args = --group -e cycles,instructions kill >/dev/null 2>&1
+args = --no-bpf-event --group -e cycles,instructions kill >/dev/null 2>&1
ret = 1
[event-1:base-record]
diff --git a/tools/perf/tests/attr/test-record-group-sampling b/tools/perf/tests/attr/test-record-group-sampling
index f0729c454f16..300b9f7e6d69 100644
--- a/tools/perf/tests/attr/test-record-group-sampling
+++ b/tools/perf/tests/attr/test-record-group-sampling
@@ -1,6 +1,6 @@
[config]
command = record
-args = -e '{cycles,cache-misses}:S' kill >/dev/null 2>&1
+args = --no-bpf-event -e '{cycles,cache-misses}:S' kill >/dev/null 2>&1
ret = 1
[event-1:base-record]
diff --git a/tools/perf/tests/attr/test-record-group1 b/tools/perf/tests/attr/test-record-group1
index 48e8bd12fe46..3ffe246e0228 100644
--- a/tools/perf/tests/attr/test-record-group1
+++ b/tools/perf/tests/attr/test-record-group1
@@ -1,6 +1,6 @@
[config]
command = record
-args = -e '{cycles,instructions}' kill >/dev/null 2>&1
+args = --no-bpf-event -e '{cycles,instructions}' kill >/dev/null 2>&1
ret = 1
[event-1:base-record]
diff --git a/tools/perf/tests/attr/test-record-no-buffering b/tools/perf/tests/attr/test-record-no-buffering
index aa3956d8fe20..583dcbb078ba 100644
--- a/tools/perf/tests/attr/test-record-no-buffering
+++ b/tools/perf/tests/attr/test-record-no-buffering
@@ -1,6 +1,6 @@
[config]
command = record
-args = --no-buffering kill >/dev/null 2>&1
+args = --no-bpf-event --no-buffering kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-no-inherit b/tools/perf/tests/attr/test-record-no-inherit
index 560943decb87..15d1dc162e1c 100644
--- a/tools/perf/tests/attr/test-record-no-inherit
+++ b/tools/perf/tests/attr/test-record-no-inherit
@@ -1,6 +1,6 @@
[config]
command = record
-args = -i kill >/dev/null 2>&1
+args = --no-bpf-event -i kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-no-samples b/tools/perf/tests/attr/test-record-no-samples
index 8eb73ab639e0..596fbd6d5a2c 100644
--- a/tools/perf/tests/attr/test-record-no-samples
+++ b/tools/perf/tests/attr/test-record-no-samples
@@ -1,6 +1,6 @@
[config]
command = record
-args = -n kill >/dev/null 2>&1
+args = --no-bpf-event -n kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-period b/tools/perf/tests/attr/test-record-period
index 69bc748f0f27..119101154c5e 100644
--- a/tools/perf/tests/attr/test-record-period
+++ b/tools/perf/tests/attr/test-record-period
@@ -1,6 +1,6 @@
[config]
command = record
-args = -c 100 -P kill >/dev/null 2>&1
+args = --no-bpf-event -c 100 -P kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-raw b/tools/perf/tests/attr/test-record-raw
index a188a614a44c..13a5f7860c78 100644
--- a/tools/perf/tests/attr/test-record-raw
+++ b/tools/perf/tests/attr/test-record-raw
@@ -1,6 +1,6 @@
[config]
command = record
-args = -R kill >/dev/null 2>&1
+args = --no-bpf-event -R kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/backward-ring-buffer.c b/tools/perf/tests/backward-ring-buffer.c
index 6d598cc071ae..1a9c3becf5ff 100644
--- a/tools/perf/tests/backward-ring-buffer.c
+++ b/tools/perf/tests/backward-ring-buffer.c
@@ -18,7 +18,7 @@ static void testcase(void)
int i;
for (i = 0; i < NR_ITERS; i++) {
- char proc_name[10];
+ char proc_name[15];
snprintf(proc_name, sizeof(proc_name), "p:%d\n", i);
prctl(PR_SET_NAME, proc_name);
diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c
index ea7acf403727..71f60c0f9faa 100644
--- a/tools/perf/tests/evsel-tp-sched.c
+++ b/tools/perf/tests/evsel-tp-sched.c
@@ -85,5 +85,6 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
if (perf_evsel__test_field(evsel, "target_cpu", 4, true))
ret = -1;
+ perf_evsel__delete(evsel);
return ret;
}
diff --git a/tools/perf/tests/expr.c b/tools/perf/tests/expr.c
index 01f0706995a9..9acc1e80b936 100644
--- a/tools/perf/tests/expr.c
+++ b/tools/perf/tests/expr.c
@@ -19,7 +19,7 @@ int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
const char *p;
const char **other;
double val;
- int ret;
+ int i, ret;
struct parse_ctx ctx;
int num_other;
@@ -56,6 +56,9 @@ int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
TEST_ASSERT_VAL("find other", !strcmp(other[1], "BAZ"));
TEST_ASSERT_VAL("find other", !strcmp(other[2], "BOZO"));
TEST_ASSERT_VAL("find other", other[3] == NULL);
+
+ for (i = 0; i < num_other; i++)
+ free((void *)other[i]);
free((void *)other);
return 0;
diff --git a/tools/perf/tests/openat-syscall-all-cpus.c b/tools/perf/tests/openat-syscall-all-cpus.c
index c531e6deb104..493ecb611540 100644
--- a/tools/perf/tests/openat-syscall-all-cpus.c
+++ b/tools/perf/tests/openat-syscall-all-cpus.c
@@ -45,7 +45,7 @@ int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int
if (IS_ERR(evsel)) {
tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
pr_debug("%s\n", errbuf);
- goto out_thread_map_delete;
+ goto out_cpu_map_delete;
}
if (perf_evsel__open(evsel, cpus, threads) < 0) {
@@ -119,6 +119,8 @@ out_close_fd:
perf_evsel__close_fd(evsel);
out_evsel_delete:
perf_evsel__delete(evsel);
+out_cpu_map_delete:
+ cpu_map__put(cpus);
out_thread_map_delete:
thread_map__put(threads);
return err;
diff --git a/tools/perf/trace/beauty/mmap_flags.sh b/tools/perf/trace/beauty/mmap_flags.sh
index 32bac9c0d694..5f5eefcb3c74 100755
--- a/tools/perf/trace/beauty/mmap_flags.sh
+++ b/tools/perf/trace/beauty/mmap_flags.sh
@@ -1,15 +1,18 @@
#!/bin/sh
# SPDX-License-Identifier: LGPL-2.1
-if [ $# -ne 2 ] ; then
+if [ $# -ne 3 ] ; then
[ $# -eq 1 ] && hostarch=$1 || hostarch=`uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/`
+ linux_header_dir=tools/include/uapi/linux
header_dir=tools/include/uapi/asm-generic
arch_header_dir=tools/arch/${hostarch}/include/uapi/asm
else
- header_dir=$1
- arch_header_dir=$2
+ linux_header_dir=$1
+ header_dir=$2
+ arch_header_dir=$3
fi
+linux_mman=${linux_header_dir}/mman.h
arch_mman=${arch_header_dir}/mman.h
# those in egrep -vw are flags, we want just the bits
@@ -20,6 +23,11 @@ egrep -q $regex ${arch_mman} && \
(egrep $regex ${arch_mman} | \
sed -r "s/$regex/\2 \1/g" | \
xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n")
+egrep -q $regex ${linux_mman} && \
+(egrep $regex ${linux_mman} | \
+ egrep -vw 'MAP_(UNINITIALIZED|TYPE|SHARED_VALIDATE)' | \
+ sed -r "s/$regex/\2 \1/g" | \
+ xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n")
([ ! -f ${arch_mman} ] || egrep -q '#[[:space:]]*include[[:space:]]+<uapi/asm-generic/mman.*' ${arch_mman}) &&
(egrep $regex ${header_dir}/mman-common.h | \
egrep -vw 'MAP_(UNINITIALIZED|TYPE|SHARED_VALIDATE)' | \
diff --git a/tools/perf/trace/beauty/msg_flags.c b/tools/perf/trace/beauty/msg_flags.c
index d66c66315987..ea68db08b8e7 100644
--- a/tools/perf/trace/beauty/msg_flags.c
+++ b/tools/perf/trace/beauty/msg_flags.c
@@ -29,7 +29,7 @@ static size_t syscall_arg__scnprintf_msg_flags(char *bf, size_t size,
return scnprintf(bf, size, "NONE");
#define P_MSG_FLAG(n) \
if (flags & MSG_##n) { \
- printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
+ printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
flags &= ~MSG_##n; \
}
diff --git a/tools/perf/trace/beauty/renameat.c b/tools/perf/trace/beauty/renameat.c
index 6dab340cc506..852d2e271833 100644
--- a/tools/perf/trace/beauty/renameat.c
+++ b/tools/perf/trace/beauty/renameat.c
@@ -2,7 +2,6 @@
// Copyright (C) 2018, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
#include "trace/beauty/beauty.h"
-#include <uapi/linux/fs.h>
static size_t renameat2__scnprintf_flags(unsigned long flags, char *bf, size_t size, bool show_prefix)
{
diff --git a/tools/perf/trace/strace/groups/string b/tools/perf/trace/strace/groups/string
new file mode 100644
index 000000000000..c87129a3e3c4
--- /dev/null
+++ b/tools/perf/trace/strace/groups/string
@@ -0,0 +1,65 @@
+access
+acct
+add_key
+chdir
+chmod
+chown
+chroot
+creat
+delete_module
+execve
+execveat
+faccessat
+fchmodat
+fchownat
+fgetxattr
+finit_module
+fremovexattr
+fsetxattr
+futimesat
+getxattr
+inotify_add_watch
+lchown
+lgetxattr
+link
+linkat
+listxattr
+llistxattr
+lremovexattr
+lsetxattr
+lstat
+memfd_create
+mkdir
+mkdirat
+mknod
+mknodat
+mq_open
+mq_timedsend
+mq_unlink
+name_to_handle_at
+newfstatat
+open
+openat
+pivot_root
+pwrite64
+quotactl
+readlink
+readlinkat
+removexattr
+rename
+renameat
+renameat2
+request_key
+rmdir
+setxattr
+stat
+statfs
+statx
+swapoff
+swapon
+symlink
+symlinkat
+truncate
+unlink
+unlinkat
+utimensat
diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
index 4f75561424ed..4ad37d8c7d6a 100644
--- a/tools/perf/ui/browser.c
+++ b/tools/perf/ui/browser.c
@@ -611,14 +611,16 @@ void ui_browser__argv_seek(struct ui_browser *browser, off_t offset, int whence)
browser->top = browser->entries;
break;
case SEEK_CUR:
- browser->top = browser->top + browser->top_idx + offset;
+ browser->top = (char **)browser->top + offset;
break;
case SEEK_END:
- browser->top = browser->top + browser->nr_entries - 1 + offset;
+ browser->top = (char **)browser->entries + browser->nr_entries - 1 + offset;
break;
default:
return;
}
+ assert((char **)browser->top < (char **)browser->entries + browser->nr_entries);
+ assert((char **)browser->top >= (char **)browser->entries);
}
unsigned int ui_browser__argv_refresh(struct ui_browser *browser)
@@ -630,7 +632,9 @@ unsigned int ui_browser__argv_refresh(struct ui_browser *browser)
browser->top = browser->entries;
pos = (char **)browser->top;
- while (idx < browser->nr_entries) {
+ while (idx < browser->nr_entries &&
+ row < (unsigned)SLtt_Screen_Rows - 1) {
+ assert(pos < (char **)browser->entries + browser->nr_entries);
if (!browser->filter || !browser->filter(browser, *pos)) {
ui_browser__gotorc(browser, row, 0);
browser->write(browser, pos, row);
diff --git a/tools/perf/ui/browsers/Build b/tools/perf/ui/browsers/Build
index 8fee56b46502..fdf86f7981ca 100644
--- a/tools/perf/ui/browsers/Build
+++ b/tools/perf/ui/browsers/Build
@@ -3,6 +3,7 @@ perf-y += hists.o
perf-y += map.o
perf-y += scripts.o
perf-y += header.o
+perf-y += res_sample.o
CFLAGS_annotate.o += -DENABLE_SLFUTURE_CONST
CFLAGS_hists.o += -DENABLE_SLFUTURE_CONST
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 35bdfd8b1e71..98d934a36d86 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -750,7 +750,7 @@ static int annotate_browser__run(struct annotate_browser *browser,
continue;
case 'r':
{
- script_browse(NULL);
+ script_browse(NULL, NULL);
continue;
}
case 'k':
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index aef800d97ea1..3421ecbdd3f0 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -7,6 +7,7 @@
#include <string.h>
#include <linux/rbtree.h>
#include <sys/ttydefaults.h>
+#include <linux/time64.h>
#include "../../util/callchain.h"
#include "../../util/evsel.h"
@@ -30,6 +31,7 @@
#include "srcline.h"
#include "string2.h"
#include "units.h"
+#include "time-utils.h"
#include "sane_ctype.h"
@@ -1224,6 +1226,8 @@ void hist_browser__init_hpp(void)
hist_browser__hpp_color_overhead_guest_us;
perf_hpp__format[PERF_HPP__OVERHEAD_ACC].color =
hist_browser__hpp_color_overhead_acc;
+
+ res_sample_init();
}
static int hist_browser__show_entry(struct hist_browser *browser,
@@ -2338,9 +2342,12 @@ close_file_and_continue:
}
struct popup_action {
+ unsigned long time;
struct thread *thread;
struct map_symbol ms;
int socket;
+ struct perf_evsel *evsel;
+ enum rstype rstype;
int (*fn)(struct hist_browser *browser, struct popup_action *act);
};
@@ -2527,46 +2534,137 @@ static int
do_run_script(struct hist_browser *browser __maybe_unused,
struct popup_action *act)
{
- char script_opt[64];
- memset(script_opt, 0, sizeof(script_opt));
+ char *script_opt;
+ int len;
+ int n = 0;
+ len = 100;
+ if (act->thread)
+ len += strlen(thread__comm_str(act->thread));
+ else if (act->ms.sym)
+ len += strlen(act->ms.sym->name);
+ script_opt = malloc(len);
+ if (!script_opt)
+ return -1;
+
+ script_opt[0] = 0;
if (act->thread) {
- scnprintf(script_opt, sizeof(script_opt), " -c %s ",
+ n = scnprintf(script_opt, len, " -c %s ",
thread__comm_str(act->thread));
} else if (act->ms.sym) {
- scnprintf(script_opt, sizeof(script_opt), " -S %s ",
+ n = scnprintf(script_opt, len, " -S %s ",
act->ms.sym->name);
}
- script_browse(script_opt);
+ if (act->time) {
+ char start[32], end[32];
+ unsigned long starttime = act->time;
+ unsigned long endtime = act->time + symbol_conf.time_quantum;
+
+ if (starttime == endtime) { /* Display 1ms as fallback */
+ starttime -= 1*NSEC_PER_MSEC;
+ endtime += 1*NSEC_PER_MSEC;
+ }
+ timestamp__scnprintf_usec(starttime, start, sizeof start);
+ timestamp__scnprintf_usec(endtime, end, sizeof end);
+ n += snprintf(script_opt + n, len - n, " --time %s,%s", start, end);
+ }
+
+ script_browse(script_opt, act->evsel);
+ free(script_opt);
return 0;
}
static int
-add_script_opt(struct hist_browser *browser __maybe_unused,
+do_res_sample_script(struct hist_browser *browser __maybe_unused,
+ struct popup_action *act)
+{
+ struct hist_entry *he;
+
+ he = hist_browser__selected_entry(browser);
+ res_sample_browse(he->res_samples, he->num_res, act->evsel, act->rstype);
+ return 0;
+}
+
+static int
+add_script_opt_2(struct hist_browser *browser __maybe_unused,
struct popup_action *act, char **optstr,
- struct thread *thread, struct symbol *sym)
+ struct thread *thread, struct symbol *sym,
+ struct perf_evsel *evsel, const char *tstr)
{
+
if (thread) {
- if (asprintf(optstr, "Run scripts for samples of thread [%s]",
- thread__comm_str(thread)) < 0)
+ if (asprintf(optstr, "Run scripts for samples of thread [%s]%s",
+ thread__comm_str(thread), tstr) < 0)
return 0;
} else if (sym) {
- if (asprintf(optstr, "Run scripts for samples of symbol [%s]",
- sym->name) < 0)
+ if (asprintf(optstr, "Run scripts for samples of symbol [%s]%s",
+ sym->name, tstr) < 0)
return 0;
} else {
- if (asprintf(optstr, "Run scripts for all samples") < 0)
+ if (asprintf(optstr, "Run scripts for all samples%s", tstr) < 0)
return 0;
}
act->thread = thread;
act->ms.sym = sym;
+ act->evsel = evsel;
act->fn = do_run_script;
return 1;
}
static int
+add_script_opt(struct hist_browser *browser,
+ struct popup_action *act, char **optstr,
+ struct thread *thread, struct symbol *sym,
+ struct perf_evsel *evsel)
+{
+ int n, j;
+ struct hist_entry *he;
+
+ n = add_script_opt_2(browser, act, optstr, thread, sym, evsel, "");
+
+ he = hist_browser__selected_entry(browser);
+ if (sort_order && strstr(sort_order, "time")) {
+ char tstr[128];
+
+ optstr++;
+ act++;
+ j = sprintf(tstr, " in ");
+ j += timestamp__scnprintf_usec(he->time, tstr + j,
+ sizeof tstr - j);
+ j += sprintf(tstr + j, "-");
+ timestamp__scnprintf_usec(he->time + symbol_conf.time_quantum,
+ tstr + j, sizeof tstr - j);
+ n += add_script_opt_2(browser, act, optstr, thread, sym,
+ evsel, tstr);
+ act->time = he->time;
+ }
+ return n;
+}
+
+static int
+add_res_sample_opt(struct hist_browser *browser __maybe_unused,
+ struct popup_action *act, char **optstr,
+ struct res_sample *res_sample,
+ struct perf_evsel *evsel,
+ enum rstype type)
+{
+ if (!res_sample)
+ return 0;
+
+ if (asprintf(optstr, "Show context for individual samples %s",
+ type == A_ASM ? "with assembler" :
+ type == A_SOURCE ? "with source" : "") < 0)
+ return 0;
+
+ act->fn = do_res_sample_script;
+ act->evsel = evsel;
+ act->rstype = type;
+ return 1;
+}
+
+static int
do_switch_data(struct hist_browser *browser __maybe_unused,
struct popup_action *act __maybe_unused)
{
@@ -3031,7 +3129,7 @@ skip_annotation:
nr_options += add_script_opt(browser,
&actions[nr_options],
&options[nr_options],
- thread, NULL);
+ thread, NULL, evsel);
}
/*
* Note that browser->selection != NULL
@@ -3046,11 +3144,24 @@ skip_annotation:
nr_options += add_script_opt(browser,
&actions[nr_options],
&options[nr_options],
- NULL, browser->selection->sym);
+ NULL, browser->selection->sym,
+ evsel);
}
}
nr_options += add_script_opt(browser, &actions[nr_options],
- &options[nr_options], NULL, NULL);
+ &options[nr_options], NULL, NULL, evsel);
+ nr_options += add_res_sample_opt(browser, &actions[nr_options],
+ &options[nr_options],
+ hist_browser__selected_entry(browser)->res_samples,
+ evsel, A_NORMAL);
+ nr_options += add_res_sample_opt(browser, &actions[nr_options],
+ &options[nr_options],
+ hist_browser__selected_entry(browser)->res_samples,
+ evsel, A_ASM);
+ nr_options += add_res_sample_opt(browser, &actions[nr_options],
+ &options[nr_options],
+ hist_browser__selected_entry(browser)->res_samples,
+ evsel, A_SOURCE);
nr_options += add_switch_opt(browser, &actions[nr_options],
&options[nr_options]);
skip_scripting:
diff --git a/tools/perf/ui/browsers/res_sample.c b/tools/perf/ui/browsers/res_sample.c
new file mode 100644
index 000000000000..c0dd73176d42
--- /dev/null
+++ b/tools/perf/ui/browsers/res_sample.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Display a menu with individual samples to browse with perf script */
+#include "util.h"
+#include "hist.h"
+#include "evsel.h"
+#include "hists.h"
+#include "sort.h"
+#include "config.h"
+#include "time-utils.h"
+#include <linux/time64.h>
+
+static u64 context_len = 10 * NSEC_PER_MSEC;
+
+static int res_sample_config(const char *var, const char *value, void *data __maybe_unused)
+{
+ if (!strcmp(var, "samples.context"))
+ return perf_config_u64(&context_len, var, value);
+ return 0;
+}
+
+void res_sample_init(void)
+{
+ perf_config(res_sample_config, NULL);
+}
+
+int res_sample_browse(struct res_sample *res_samples, int num_res,
+ struct perf_evsel *evsel, enum rstype rstype)
+{
+ char **names;
+ int i, n;
+ int choice;
+ char *cmd;
+ char pbuf[256], tidbuf[32], cpubuf[32];
+ const char *perf = perf_exe(pbuf, sizeof pbuf);
+ char trange[128], tsample[64];
+ struct res_sample *r;
+ char extra_format[256];
+
+ names = calloc(num_res, sizeof(char *));
+ if (!names)
+ return -1;
+ for (i = 0; i < num_res; i++) {
+ char tbuf[64];
+
+ timestamp__scnprintf_nsec(res_samples[i].time, tbuf, sizeof tbuf);
+ if (asprintf(&names[i], "%s: CPU %d tid %d", tbuf,
+ res_samples[i].cpu, res_samples[i].tid) < 0) {
+ while (--i >= 0)
+ free(names[i]);
+ free(names);
+ return -1;
+ }
+ }
+ choice = ui__popup_menu(num_res, names);
+ for (i = 0; i < num_res; i++)
+ free(names[i]);
+ free(names);
+
+ if (choice < 0 || choice >= num_res)
+ return -1;
+ r = &res_samples[choice];
+
+ n = timestamp__scnprintf_nsec(r->time - context_len, trange, sizeof trange);
+ trange[n++] = ',';
+ timestamp__scnprintf_nsec(r->time + context_len, trange + n, sizeof trange - n);
+
+ timestamp__scnprintf_nsec(r->time, tsample, sizeof tsample);
+
+ attr_to_script(extra_format, &evsel->attr);
+
+ if (asprintf(&cmd, "%s script %s%s --time %s %s%s %s%s --ns %s %s %s %s %s | less +/%s",
+ perf,
+ input_name ? "-i " : "",
+ input_name ? input_name : "",
+ trange,
+ r->cpu >= 0 ? "--cpu " : "",
+ r->cpu >= 0 ? (sprintf(cpubuf, "%d", r->cpu), cpubuf) : "",
+ r->tid ? "--tid " : "",
+ r->tid ? (sprintf(tidbuf, "%d", r->tid), tidbuf) : "",
+ extra_format,
+ rstype == A_ASM ? "-F +insn --xed" :
+ rstype == A_SOURCE ? "-F +srcline,+srccode" : "",
+ symbol_conf.inline_name ? "--inline" : "",
+ "--show-lost-events ",
+ r->tid ? "--show-switch-events --show-task-events " : "",
+ tsample) < 0)
+ return -1;
+ run_script(cmd);
+ free(cmd);
+ return 0;
+}
diff --git a/tools/perf/ui/browsers/scripts.c b/tools/perf/ui/browsers/scripts.c
index 90a32ac69e76..27cf3ab88d13 100644
--- a/tools/perf/ui/browsers/scripts.c
+++ b/tools/perf/ui/browsers/scripts.c
@@ -1,34 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
-#include <elf.h>
-#include <inttypes.h>
-#include <sys/ttydefaults.h>
-#include <string.h>
#include "../../util/sort.h"
#include "../../util/util.h"
#include "../../util/hist.h"
#include "../../util/debug.h"
#include "../../util/symbol.h"
#include "../browser.h"
-#include "../helpline.h"
#include "../libslang.h"
-
-/* 2048 lines should be enough for a script output */
-#define MAX_LINES 2048
-
-/* 160 bytes for one output line */
-#define AVERAGE_LINE_LEN 160
-
-struct script_line {
- struct list_head node;
- char line[AVERAGE_LINE_LEN];
-};
-
-struct perf_script_browser {
- struct ui_browser b;
- struct list_head entries;
- const char *script_name;
- int nr_lines;
-};
+#include "config.h"
#define SCRIPT_NAMELEN 128
#define SCRIPT_MAX_NO 64
@@ -40,149 +18,169 @@ struct perf_script_browser {
*/
#define SCRIPT_FULLPATH_LEN 256
+struct script_config {
+ const char **names;
+ char **paths;
+ int index;
+ const char *perf;
+ char extra_format[256];
+};
+
+void attr_to_script(char *extra_format, struct perf_event_attr *attr)
+{
+ extra_format[0] = 0;
+ if (attr->read_format & PERF_FORMAT_GROUP)
+ strcat(extra_format, " -F +metric");
+ if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK)
+ strcat(extra_format, " -F +brstackinsn --xed");
+ if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
+ strcat(extra_format, " -F +iregs");
+ if (attr->sample_type & PERF_SAMPLE_REGS_USER)
+ strcat(extra_format, " -F +uregs");
+ if (attr->sample_type & PERF_SAMPLE_PHYS_ADDR)
+ strcat(extra_format, " -F +phys_addr");
+}
+
+static int add_script_option(const char *name, const char *opt,
+ struct script_config *c)
+{
+ c->names[c->index] = name;
+ if (asprintf(&c->paths[c->index],
+ "%s script %s -F +metric %s %s",
+ c->perf, opt, symbol_conf.inline_name ? " --inline" : "",
+ c->extra_format) < 0)
+ return -1;
+ c->index++;
+ return 0;
+}
+
+static int scripts_config(const char *var, const char *value, void *data)
+{
+ struct script_config *c = data;
+
+ if (!strstarts(var, "scripts."))
+ return -1;
+ if (c->index >= SCRIPT_MAX_NO)
+ return -1;
+ c->names[c->index] = strdup(var + 7);
+ if (!c->names[c->index])
+ return -1;
+ if (asprintf(&c->paths[c->index], "%s %s", value,
+ c->extra_format) < 0)
+ return -1;
+ c->index++;
+ return 0;
+}
+
/*
* When success, will copy the full path of the selected script
* into the buffer pointed by script_name, and return 0.
* Return -1 on failure.
*/
-static int list_scripts(char *script_name)
+static int list_scripts(char *script_name, bool *custom,
+ struct perf_evsel *evsel)
{
- char *buf, *names[SCRIPT_MAX_NO], *paths[SCRIPT_MAX_NO];
- int i, num, choice, ret = -1;
+ char *buf, *paths[SCRIPT_MAX_NO], *names[SCRIPT_MAX_NO];
+ int i, num, choice;
+ int ret = 0;
+ int max_std, custom_perf;
+ char pbuf[256];
+ const char *perf = perf_exe(pbuf, sizeof pbuf);
+ struct script_config scriptc = {
+ .names = (const char **)names,
+ .paths = paths,
+ .perf = perf
+ };
+
+ script_name[0] = 0;
/* Preset the script name to SCRIPT_NAMELEN */
buf = malloc(SCRIPT_MAX_NO * (SCRIPT_NAMELEN + SCRIPT_FULLPATH_LEN));
if (!buf)
- return ret;
+ return -1;
- for (i = 0; i < SCRIPT_MAX_NO; i++) {
- names[i] = buf + i * (SCRIPT_NAMELEN + SCRIPT_FULLPATH_LEN);
+ if (evsel)
+ attr_to_script(scriptc.extra_format, &evsel->attr);
+ add_script_option("Show individual samples", "", &scriptc);
+ add_script_option("Show individual samples with assembler", "-F +insn --xed",
+ &scriptc);
+ add_script_option("Show individual samples with source", "-F +srcline,+srccode",
+ &scriptc);
+ perf_config(scripts_config, &scriptc);
+ custom_perf = scriptc.index;
+ add_script_option("Show samples with custom perf script arguments", "", &scriptc);
+ i = scriptc.index;
+ max_std = i;
+
+ for (; i < SCRIPT_MAX_NO; i++) {
+ names[i] = buf + (i - max_std) * (SCRIPT_NAMELEN + SCRIPT_FULLPATH_LEN);
paths[i] = names[i] + SCRIPT_NAMELEN;
}
- num = find_scripts(names, paths);
- if (num > 0) {
- choice = ui__popup_menu(num, names);
- if (choice < num && choice >= 0) {
- strcpy(script_name, paths[choice]);
- ret = 0;
- }
+ num = find_scripts(names + max_std, paths + max_std, SCRIPT_MAX_NO - max_std,
+ SCRIPT_FULLPATH_LEN);
+ if (num < 0)
+ num = 0;
+ choice = ui__popup_menu(num + max_std, (char * const *)names);
+ if (choice < 0) {
+ ret = -1;
+ goto out;
}
+ if (choice == custom_perf) {
+ char script_args[50];
+ int key = ui_browser__input_window("perf script command",
+ "Enter perf script command line (without perf script prefix)",
+ script_args, "", 0);
+ if (key != K_ENTER)
+ return -1;
+ sprintf(script_name, "%s script %s", perf, script_args);
+ } else if (choice < num + max_std) {
+ strcpy(script_name, paths[choice]);
+ }
+ *custom = choice >= max_std;
+out:
free(buf);
+ for (i = 0; i < max_std; i++)
+ free(paths[i]);
return ret;
}
-static void script_browser__write(struct ui_browser *browser,
- void *entry, int row)
+void run_script(char *cmd)
{
- struct script_line *sline = list_entry(entry, struct script_line, node);
- bool current_entry = ui_browser__is_current_entry(browser, row);
-
- ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED :
- HE_COLORSET_NORMAL);
-
- ui_browser__write_nstring(browser, sline->line, browser->width);
+ pr_debug("Running %s\n", cmd);
+ SLang_reset_tty();
+ if (system(cmd) < 0)
+ pr_warning("Cannot run %s\n", cmd);
+ /*
+ * SLang doesn't seem to reset the whole terminal, so be more
+ * forceful to get back to the original state.
+ */
+ printf("\033[c\033[H\033[J");
+ fflush(stdout);
+ SLang_init_tty(0, 0, 0);
+ SLsmg_refresh();
}
-static int script_browser__run(struct perf_script_browser *browser)
+int script_browse(const char *script_opt, struct perf_evsel *evsel)
{
- int key;
+ char *cmd, script_name[SCRIPT_FULLPATH_LEN];
+ bool custom = false;
- if (ui_browser__show(&browser->b, browser->script_name,
- "Press ESC to exit") < 0)
+ memset(script_name, 0, SCRIPT_FULLPATH_LEN);
+ if (list_scripts(script_name, &custom, evsel))
return -1;
- while (1) {
- key = ui_browser__run(&browser->b, 0);
-
- /* We can add some special key handling here if needed */
- break;
- }
-
- ui_browser__hide(&browser->b);
- return key;
-}
-
-
-int script_browse(const char *script_opt)
-{
- char cmd[SCRIPT_FULLPATH_LEN*2], script_name[SCRIPT_FULLPATH_LEN];
- char *line = NULL;
- size_t len = 0;
- ssize_t retlen;
- int ret = -1, nr_entries = 0;
- FILE *fp;
- void *buf;
- struct script_line *sline;
-
- struct perf_script_browser script = {
- .b = {
- .refresh = ui_browser__list_head_refresh,
- .seek = ui_browser__list_head_seek,
- .write = script_browser__write,
- },
- .script_name = script_name,
- };
-
- INIT_LIST_HEAD(&script.entries);
-
- /* Save each line of the output in one struct script_line object. */
- buf = zalloc((sizeof(*sline)) * MAX_LINES);
- if (!buf)
+ if (asprintf(&cmd, "%s%s %s %s%s 2>&1 | less",
+ custom ? "perf script -s " : "",
+ script_name,
+ script_opt ? script_opt : "",
+ input_name ? "-i " : "",
+ input_name ? input_name : "") < 0)
return -1;
- sline = buf;
-
- memset(script_name, 0, SCRIPT_FULLPATH_LEN);
- if (list_scripts(script_name))
- goto exit;
-
- sprintf(cmd, "perf script -s %s ", script_name);
- if (script_opt)
- strcat(cmd, script_opt);
+ run_script(cmd);
+ free(cmd);
- if (input_name) {
- strcat(cmd, " -i ");
- strcat(cmd, input_name);
- }
-
- strcat(cmd, " 2>&1");
-
- fp = popen(cmd, "r");
- if (!fp)
- goto exit;
-
- while ((retlen = getline(&line, &len, fp)) != -1) {
- strncpy(sline->line, line, AVERAGE_LINE_LEN);
-
- /* If one output line is very large, just cut it short */
- if (retlen >= AVERAGE_LINE_LEN) {
- sline->line[AVERAGE_LINE_LEN - 1] = '\0';
- sline->line[AVERAGE_LINE_LEN - 2] = '\n';
- }
- list_add_tail(&sline->node, &script.entries);
-
- if (script.b.width < retlen)
- script.b.width = retlen;
-
- if (nr_entries++ >= MAX_LINES - 1)
- break;
- sline++;
- }
-
- if (script.b.width > AVERAGE_LINE_LEN)
- script.b.width = AVERAGE_LINE_LEN;
-
- free(line);
- pclose(fp);
-
- script.nr_lines = nr_entries;
- script.b.nr_entries = nr_entries;
- script.b.entries = &script.entries;
-
- ret = script_browser__run(&script);
-exit:
- free(buf);
- return ret;
+ return 0;
}
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 11a8a447a3af..09762985c713 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -10,6 +10,10 @@
#include <errno.h>
#include <inttypes.h>
#include <libgen.h>
+#include <bpf/bpf.h>
+#include <bpf/btf.h>
+#include <bpf/libbpf.h>
+#include <linux/btf.h>
#include "util.h"
#include "ui/ui.h"
#include "sort.h"
@@ -24,6 +28,7 @@
#include "annotate.h"
#include "evsel.h"
#include "evlist.h"
+#include "bpf-event.h"
#include "block-range.h"
#include "string2.h"
#include "arch/common.h"
@@ -31,6 +36,7 @@
#include <pthread.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
+#include <bpf/libbpf.h>
/* FIXME: For the HE_COLORSET */
#include "ui/browser.h"
@@ -198,18 +204,18 @@ static void ins__delete(struct ins_operands *ops)
}
static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops)
+ struct ins_operands *ops, int max_ins_name)
{
- return scnprintf(bf, size, "%-6s %s", ins->name, ops->raw);
+ return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->raw);
}
int ins__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops)
+ struct ins_operands *ops, int max_ins_name)
{
if (ins->ops->scnprintf)
- return ins->ops->scnprintf(ins, bf, size, ops);
+ return ins->ops->scnprintf(ins, bf, size, ops, max_ins_name);
- return ins__raw_scnprintf(ins, bf, size, ops);
+ return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
}
bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2)
@@ -273,18 +279,18 @@ indirect_call:
}
static int call__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops)
+ struct ins_operands *ops, int max_ins_name)
{
if (ops->target.sym)
- return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.sym->name);
+ return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.sym->name);
if (ops->target.addr == 0)
- return ins__raw_scnprintf(ins, bf, size, ops);
+ return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
if (ops->target.name)
- return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.name);
+ return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.name);
- return scnprintf(bf, size, "%-6s *%" PRIx64, ins->name, ops->target.addr);
+ return scnprintf(bf, size, "%-*s *%" PRIx64, max_ins_name, ins->name, ops->target.addr);
}
static struct ins_ops call_ops = {
@@ -388,15 +394,15 @@ static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_s
}
static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops)
+ struct ins_operands *ops, int max_ins_name)
{
const char *c;
if (!ops->target.addr || ops->target.offset < 0)
- return ins__raw_scnprintf(ins, bf, size, ops);
+ return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
if (ops->target.outside && ops->target.sym != NULL)
- return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.sym->name);
+ return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.sym->name);
c = strchr(ops->raw, ',');
c = validate_comma(c, ops);
@@ -415,7 +421,7 @@ static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
c++;
}
- return scnprintf(bf, size, "%-6s %.*s%" PRIx64,
+ return scnprintf(bf, size, "%-*s %.*s%" PRIx64, max_ins_name,
ins->name, c ? c - ops->raw : 0, ops->raw,
ops->target.offset);
}
@@ -483,16 +489,16 @@ out_free_ops:
}
static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops)
+ struct ins_operands *ops, int max_ins_name)
{
int printed;
if (ops->locked.ins.ops == NULL)
- return ins__raw_scnprintf(ins, bf, size, ops);
+ return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
- printed = scnprintf(bf, size, "%-6s ", ins->name);
+ printed = scnprintf(bf, size, "%-*s ", max_ins_name, ins->name);
return printed + ins__scnprintf(&ops->locked.ins, bf + printed,
- size - printed, ops->locked.ops);
+ size - printed, ops->locked.ops, max_ins_name);
}
static void lock__delete(struct ins_operands *ops)
@@ -564,9 +570,9 @@ out_free_source:
}
static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops)
+ struct ins_operands *ops, int max_ins_name)
{
- return scnprintf(bf, size, "%-6s %s,%s", ins->name,
+ return scnprintf(bf, size, "%-*s %s,%s", max_ins_name, ins->name,
ops->source.name ?: ops->source.raw,
ops->target.name ?: ops->target.raw);
}
@@ -604,9 +610,9 @@ static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops
}
static int dec__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops)
+ struct ins_operands *ops, int max_ins_name)
{
- return scnprintf(bf, size, "%-6s %s", ins->name,
+ return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name,
ops->target.name ?: ops->target.raw);
}
@@ -616,9 +622,9 @@ static struct ins_ops dec_ops = {
};
static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size,
- struct ins_operands *ops __maybe_unused)
+ struct ins_operands *ops __maybe_unused, int max_ins_name)
{
- return scnprintf(bf, size, "%-6s", "nop");
+ return scnprintf(bf, size, "%-*s", max_ins_name, "nop");
}
static struct ins_ops nop_ops = {
@@ -1232,12 +1238,12 @@ void disasm_line__free(struct disasm_line *dl)
annotation_line__delete(&dl->al);
}
-int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw)
+int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw, int max_ins_name)
{
if (raw || !dl->ins.ops)
- return scnprintf(bf, size, "%-6s %s", dl->ins.name, dl->ops.raw);
+ return scnprintf(bf, size, "%-*s %s", max_ins_name, dl->ins.name, dl->ops.raw);
- return ins__scnprintf(&dl->ins, bf, size, &dl->ops);
+ return ins__scnprintf(&dl->ins, bf, size, &dl->ops, max_ins_name);
}
static void annotation_line__add(struct annotation_line *al, struct list_head *head)
@@ -1615,6 +1621,9 @@ int symbol__strerror_disassemble(struct symbol *sym __maybe_unused, struct map *
" --vmlinux vmlinux\n", build_id_msg ?: "");
}
break;
+ case SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF:
+ scnprintf(buf, buflen, "Please link with binutils's libopcode to enable BPF annotation");
+ break;
default:
scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum);
break;
@@ -1674,6 +1683,156 @@ fallback:
return 0;
}
+#if defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
+#define PACKAGE "perf"
+#include <bfd.h>
+#include <dis-asm.h>
+
+static int symbol__disassemble_bpf(struct symbol *sym,
+ struct annotate_args *args)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct annotation_options *opts = args->options;
+ struct bpf_prog_info_linear *info_linear;
+ struct bpf_prog_linfo *prog_linfo = NULL;
+ struct bpf_prog_info_node *info_node;
+ int len = sym->end - sym->start;
+ disassembler_ftype disassemble;
+ struct map *map = args->ms.map;
+ struct disassemble_info info;
+ struct dso *dso = map->dso;
+ int pc = 0, count, sub_id;
+ struct btf *btf = NULL;
+ char tpath[PATH_MAX];
+ size_t buf_size;
+ int nr_skip = 0;
+ int ret = -1;
+ char *buf;
+ bfd *bfdf;
+ FILE *s;
+
+ if (dso->binary_type != DSO_BINARY_TYPE__BPF_PROG_INFO)
+ return -1;
+
+ pr_debug("%s: handling sym %s addr %" PRIx64 " len %" PRIx64 "\n", __func__,
+ sym->name, sym->start, sym->end - sym->start);
+
+ memset(tpath, 0, sizeof(tpath));
+ perf_exe(tpath, sizeof(tpath));
+
+ bfdf = bfd_openr(tpath, NULL);
+ assert(bfdf);
+ assert(bfd_check_format(bfdf, bfd_object));
+
+ s = open_memstream(&buf, &buf_size);
+ if (!s)
+ goto out;
+ init_disassemble_info(&info, s,
+ (fprintf_ftype) fprintf);
+
+ info.arch = bfd_get_arch(bfdf);
+ info.mach = bfd_get_mach(bfdf);
+
+ info_node = perf_env__find_bpf_prog_info(dso->bpf_prog.env,
+ dso->bpf_prog.id);
+ if (!info_node)
+ goto out;
+ info_linear = info_node->info_linear;
+ sub_id = dso->bpf_prog.sub_id;
+
+ info.buffer = (void *)(uintptr_t)(info_linear->info.jited_prog_insns);
+ info.buffer_length = info_linear->info.jited_prog_len;
+
+ if (info_linear->info.nr_line_info)
+ prog_linfo = bpf_prog_linfo__new(&info_linear->info);
+
+ if (info_linear->info.btf_id) {
+ struct btf_node *node;
+
+ node = perf_env__find_btf(dso->bpf_prog.env,
+ info_linear->info.btf_id);
+ if (node)
+ btf = btf__new((__u8 *)(node->data),
+ node->data_size);
+ }
+
+ disassemble_init_for_target(&info);
+
+#ifdef DISASM_FOUR_ARGS_SIGNATURE
+ disassemble = disassembler(info.arch,
+ bfd_big_endian(bfdf),
+ info.mach,
+ bfdf);
+#else
+ disassemble = disassembler(bfdf);
+#endif
+ assert(disassemble);
+
+ fflush(s);
+ do {
+ const struct bpf_line_info *linfo = NULL;
+ struct disasm_line *dl;
+ size_t prev_buf_size;
+ const char *srcline;
+ u64 addr;
+
+ addr = pc + ((u64 *)(uintptr_t)(info_linear->info.jited_ksyms))[sub_id];
+ count = disassemble(pc, &info);
+
+ if (prog_linfo)
+ linfo = bpf_prog_linfo__lfind_addr_func(prog_linfo,
+ addr, sub_id,
+ nr_skip);
+
+ if (linfo && btf) {
+ srcline = btf__name_by_offset(btf, linfo->line_off);
+ nr_skip++;
+ } else
+ srcline = NULL;
+
+ fprintf(s, "\n");
+ prev_buf_size = buf_size;
+ fflush(s);
+
+ if (!opts->hide_src_code && srcline) {
+ args->offset = -1;
+ args->line = strdup(srcline);
+ args->line_nr = 0;
+ args->ms.sym = sym;
+ dl = disasm_line__new(args);
+ if (dl) {
+ annotation_line__add(&dl->al,
+ &notes->src->source);
+ }
+ }
+
+ args->offset = pc;
+ args->line = buf + prev_buf_size;
+ args->line_nr = 0;
+ args->ms.sym = sym;
+ dl = disasm_line__new(args);
+ if (dl)
+ annotation_line__add(&dl->al, &notes->src->source);
+
+ pc += count;
+ } while (count > 0 && pc < len);
+
+ ret = 0;
+out:
+ free(prog_linfo);
+ free(btf);
+ fclose(s);
+ bfd_close(bfdf);
+ return ret;
+}
+#else // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
+static int symbol__disassemble_bpf(struct symbol *sym __maybe_unused,
+ struct annotate_args *args __maybe_unused)
+{
+ return SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF;
+}
+#endif // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
+
static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
{
struct annotation_options *opts = args->options;
@@ -1701,7 +1860,9 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
pr_debug("annotating [%p] %30s : [%p] %30s\n",
dso, dso->long_name, sym, sym->name);
- if (dso__is_kcore(dso)) {
+ if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) {
+ return symbol__disassemble_bpf(sym, args);
+ } else if (dso__is_kcore(dso)) {
kce.kcore_filename = symfs_filename;
kce.addr = map__rip_2objdump(map, sym->start);
kce.offs = sym->start;
@@ -2414,12 +2575,30 @@ static inline int width_jumps(int n)
return 1;
}
+static int annotation__max_ins_name(struct annotation *notes)
+{
+ int max_name = 0, len;
+ struct annotation_line *al;
+
+ list_for_each_entry(al, &notes->src->source, node) {
+ if (al->offset == -1)
+ continue;
+
+ len = strlen(disasm_line(al)->ins.name);
+ if (max_name < len)
+ max_name = len;
+ }
+
+ return max_name;
+}
+
void annotation__init_column_widths(struct annotation *notes, struct symbol *sym)
{
notes->widths.addr = notes->widths.target =
notes->widths.min_addr = hex_width(symbol__size(sym));
notes->widths.max_addr = hex_width(sym->end);
notes->widths.jumps = width_jumps(notes->max_jump_sources);
+ notes->widths.max_ins_name = annotation__max_ins_name(notes);
}
void annotation__update_column_widths(struct annotation *notes)
@@ -2583,7 +2762,7 @@ call_like:
obj__printf(obj, " ");
}
- disasm_line__scnprintf(dl, bf, size, !notes->options->use_offset);
+ disasm_line__scnprintf(dl, bf, size, !notes->options->use_offset, notes->widths.max_ins_name);
}
static void ipc_coverage_string(char *bf, int size, struct annotation *notes)
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index 95053cab41fe..5bc0cf655d37 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -59,14 +59,14 @@ struct ins_ops {
void (*free)(struct ins_operands *ops);
int (*parse)(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms);
int (*scnprintf)(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops);
+ struct ins_operands *ops, int max_ins_name);
};
bool ins__is_jump(const struct ins *ins);
bool ins__is_call(const struct ins *ins);
bool ins__is_ret(const struct ins *ins);
bool ins__is_lock(const struct ins *ins);
-int ins__scnprintf(struct ins *ins, char *bf, size_t size, struct ins_operands *ops);
+int ins__scnprintf(struct ins *ins, char *bf, size_t size, struct ins_operands *ops, int max_ins_name);
bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2);
#define ANNOTATION__IPC_WIDTH 6
@@ -219,7 +219,7 @@ int __annotation__scnprintf_samples_period(struct annotation *notes,
struct perf_evsel *evsel,
bool show_freq);
-int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw);
+int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw, int max_ins_name);
size_t disasm__fprintf(struct list_head *head, FILE *fp);
void symbol__calc_percent(struct symbol *sym, struct perf_evsel *evsel);
@@ -289,6 +289,7 @@ struct annotation {
u8 target;
u8 min_addr;
u8 max_addr;
+ u8 max_ins_name;
} widths;
bool have_cycles;
struct annotated_source *src;
@@ -368,6 +369,7 @@ enum symbol_disassemble_errno {
__SYMBOL_ANNOTATE_ERRNO__START = -10000,
SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX = __SYMBOL_ANNOTATE_ERRNO__START,
+ SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF,
__SYMBOL_ANNOTATE_ERRNO__END,
};
diff --git a/tools/perf/util/archinsn.h b/tools/perf/util/archinsn.h
new file mode 100644
index 000000000000..448cbb6b8d7e
--- /dev/null
+++ b/tools/perf/util/archinsn.h
@@ -0,0 +1,12 @@
+#ifndef INSN_H
+#define INSN_H 1
+
+struct perf_sample;
+struct machine;
+struct thread;
+
+void arch_fetch_insn(struct perf_sample *sample,
+ struct thread *thread,
+ struct machine *machine);
+
+#endif
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 267e54df511b..fb76b6b232d4 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -1918,7 +1918,8 @@ static struct dso *load_dso(const char *name)
if (!map)
return NULL;
- map__load(map);
+ if (map__load(map) < 0)
+ pr_err("File '%s' not found or has no symbols.\n", name);
dso = dso__get(map->dso);
diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
index 028c8ec1f62a..2a4a0da35632 100644
--- a/tools/perf/util/bpf-event.c
+++ b/tools/perf/util/bpf-event.c
@@ -3,11 +3,17 @@
#include <stdlib.h>
#include <bpf/bpf.h>
#include <bpf/btf.h>
+#include <bpf/libbpf.h>
#include <linux/btf.h>
+#include <linux/err.h>
#include "bpf-event.h"
#include "debug.h"
#include "symbol.h"
#include "machine.h"
+#include "env.h"
+#include "session.h"
+#include "map.h"
+#include "evlist.h"
#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr))
@@ -21,15 +27,122 @@ static int snprintf_hex(char *buf, size_t size, unsigned char *data, size_t len)
return ret;
}
+static int machine__process_bpf_event_load(struct machine *machine,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused)
+{
+ struct bpf_prog_info_linear *info_linear;
+ struct bpf_prog_info_node *info_node;
+ struct perf_env *env = machine->env;
+ int id = event->bpf_event.id;
+ unsigned int i;
+
+ /* perf-record, no need to handle bpf-event */
+ if (env == NULL)
+ return 0;
+
+ info_node = perf_env__find_bpf_prog_info(env, id);
+ if (!info_node)
+ return 0;
+ info_linear = info_node->info_linear;
+
+ for (i = 0; i < info_linear->info.nr_jited_ksyms; i++) {
+ u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
+ u64 addr = addrs[i];
+ struct map *map;
+
+ map = map_groups__find(&machine->kmaps, addr);
+
+ if (map) {
+ map->dso->binary_type = DSO_BINARY_TYPE__BPF_PROG_INFO;
+ map->dso->bpf_prog.id = id;
+ map->dso->bpf_prog.sub_id = i;
+ map->dso->bpf_prog.env = env;
+ }
+ }
+ return 0;
+}
+
int machine__process_bpf_event(struct machine *machine __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused)
{
if (dump_trace)
perf_event__fprintf_bpf_event(event, stdout);
+
+ switch (event->bpf_event.type) {
+ case PERF_BPF_EVENT_PROG_LOAD:
+ return machine__process_bpf_event_load(machine, event, sample);
+
+ case PERF_BPF_EVENT_PROG_UNLOAD:
+ /*
+ * Do not free bpf_prog_info and btf of the program here,
+ * as annotation still need them. They will be freed at
+ * the end of the session.
+ */
+ break;
+ default:
+ pr_debug("unexpected bpf_event type of %d\n",
+ event->bpf_event.type);
+ break;
+ }
return 0;
}
+static int perf_env__fetch_btf(struct perf_env *env,
+ u32 btf_id,
+ struct btf *btf)
+{
+ struct btf_node *node;
+ u32 data_size;
+ const void *data;
+
+ data = btf__get_raw_data(btf, &data_size);
+
+ node = malloc(data_size + sizeof(struct btf_node));
+ if (!node)
+ return -1;
+
+ node->id = btf_id;
+ node->data_size = data_size;
+ memcpy(node->data, data, data_size);
+
+ perf_env__insert_btf(env, node);
+ return 0;
+}
+
+static int synthesize_bpf_prog_name(char *buf, int size,
+ struct bpf_prog_info *info,
+ struct btf *btf,
+ u32 sub_id)
+{
+ u8 (*prog_tags)[BPF_TAG_SIZE] = (void *)(uintptr_t)(info->prog_tags);
+ void *func_infos = (void *)(uintptr_t)(info->func_info);
+ u32 sub_prog_cnt = info->nr_jited_ksyms;
+ const struct bpf_func_info *finfo;
+ const char *short_name = NULL;
+ const struct btf_type *t;
+ int name_len;
+
+ name_len = snprintf(buf, size, "bpf_prog_");
+ name_len += snprintf_hex(buf + name_len, size - name_len,
+ prog_tags[sub_id], BPF_TAG_SIZE);
+ if (btf) {
+ finfo = func_infos + sub_id * info->func_info_rec_size;
+ t = btf__type_by_id(btf, finfo->type_id);
+ short_name = btf__name_by_offset(btf, t->name_off);
+ } else if (sub_id == 0 && sub_prog_cnt == 1) {
+ /* no subprog */
+ if (info->name[0])
+ short_name = info->name;
+ } else
+ short_name = "F";
+ if (short_name)
+ name_len += snprintf(buf + name_len, size - name_len,
+ "_%s", short_name);
+ return name_len;
+}
+
/*
* Synthesize PERF_RECORD_KSYMBOL and PERF_RECORD_BPF_EVENT for one bpf
* program. One PERF_RECORD_BPF_EVENT is generated for the program. And
@@ -40,7 +153,7 @@ int machine__process_bpf_event(struct machine *machine __maybe_unused,
* -1 for failures;
* -2 for lack of kernel support.
*/
-static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
+static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
perf_event__handler_t process,
struct machine *machine,
int fd,
@@ -49,102 +162,71 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
{
struct ksymbol_event *ksymbol_event = &event->ksymbol_event;
struct bpf_event *bpf_event = &event->bpf_event;
- u32 sub_prog_cnt, i, func_info_rec_size = 0;
- u8 (*prog_tags)[BPF_TAG_SIZE] = NULL;
- struct bpf_prog_info info = { .type = 0, };
- u32 info_len = sizeof(info);
- void *func_infos = NULL;
- u64 *prog_addrs = NULL;
+ struct bpf_prog_info_linear *info_linear;
+ struct perf_tool *tool = session->tool;
+ struct bpf_prog_info_node *info_node;
+ struct bpf_prog_info *info;
struct btf *btf = NULL;
- u32 *prog_lens = NULL;
- bool has_btf = false;
- char errbuf[512];
+ struct perf_env *env;
+ u32 sub_prog_cnt, i;
int err = 0;
+ u64 arrays;
+
+ /*
+ * for perf-record and perf-report use header.env;
+ * otherwise, use global perf_env.
+ */
+ env = session->data ? &session->header.env : &perf_env;
- /* Call bpf_obj_get_info_by_fd() to get sizes of arrays */
- err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
+ arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS;
+ arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
+ arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
+ arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS;
+ arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS;
+ arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
+ arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
- if (err) {
- pr_debug("%s: failed to get BPF program info: %s, aborting\n",
- __func__, str_error_r(errno, errbuf, sizeof(errbuf)));
+ info_linear = bpf_program__get_prog_info_linear(fd, arrays);
+ if (IS_ERR_OR_NULL(info_linear)) {
+ info_linear = NULL;
+ pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
return -1;
}
- if (info_len < offsetof(struct bpf_prog_info, prog_tags)) {
+
+ if (info_linear->info_len < offsetof(struct bpf_prog_info, prog_tags)) {
pr_debug("%s: the kernel is too old, aborting\n", __func__);
return -2;
}
+ info = &info_linear->info;
+
/* number of ksyms, func_lengths, and tags should match */
- sub_prog_cnt = info.nr_jited_ksyms;
- if (sub_prog_cnt != info.nr_prog_tags ||
- sub_prog_cnt != info.nr_jited_func_lens)
+ sub_prog_cnt = info->nr_jited_ksyms;
+ if (sub_prog_cnt != info->nr_prog_tags ||
+ sub_prog_cnt != info->nr_jited_func_lens)
return -1;
/* check BTF func info support */
- if (info.btf_id && info.nr_func_info && info.func_info_rec_size) {
+ if (info->btf_id && info->nr_func_info && info->func_info_rec_size) {
/* btf func info number should be same as sub_prog_cnt */
- if (sub_prog_cnt != info.nr_func_info) {
+ if (sub_prog_cnt != info->nr_func_info) {
pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__);
- return -1;
- }
- if (btf__get_from_id(info.btf_id, &btf)) {
- pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info.btf_id);
- return -1;
+ err = -1;
+ goto out;
}
- func_info_rec_size = info.func_info_rec_size;
- func_infos = calloc(sub_prog_cnt, func_info_rec_size);
- if (!func_infos) {
- pr_debug("%s: failed to allocate memory for func_infos, aborting\n", __func__);
- return -1;
+ if (btf__get_from_id(info->btf_id, &btf)) {
+ pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info->btf_id);
+ err = -1;
+ btf = NULL;
+ goto out;
}
- has_btf = true;
- }
-
- /*
- * We need address, length, and tag for each sub program.
- * Allocate memory and call bpf_obj_get_info_by_fd() again
- */
- prog_addrs = calloc(sub_prog_cnt, sizeof(u64));
- if (!prog_addrs) {
- pr_debug("%s: failed to allocate memory for prog_addrs, aborting\n", __func__);
- goto out;
- }
- prog_lens = calloc(sub_prog_cnt, sizeof(u32));
- if (!prog_lens) {
- pr_debug("%s: failed to allocate memory for prog_lens, aborting\n", __func__);
- goto out;
- }
- prog_tags = calloc(sub_prog_cnt, BPF_TAG_SIZE);
- if (!prog_tags) {
- pr_debug("%s: failed to allocate memory for prog_tags, aborting\n", __func__);
- goto out;
- }
-
- memset(&info, 0, sizeof(info));
- info.nr_jited_ksyms = sub_prog_cnt;
- info.nr_jited_func_lens = sub_prog_cnt;
- info.nr_prog_tags = sub_prog_cnt;
- info.jited_ksyms = ptr_to_u64(prog_addrs);
- info.jited_func_lens = ptr_to_u64(prog_lens);
- info.prog_tags = ptr_to_u64(prog_tags);
- info_len = sizeof(info);
- if (has_btf) {
- info.nr_func_info = sub_prog_cnt;
- info.func_info_rec_size = func_info_rec_size;
- info.func_info = ptr_to_u64(func_infos);
- }
-
- err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
- if (err) {
- pr_debug("%s: failed to get BPF program info, aborting\n", __func__);
- goto out;
+ perf_env__fetch_btf(env, info->btf_id, btf);
}
/* Synthesize PERF_RECORD_KSYMBOL */
for (i = 0; i < sub_prog_cnt; i++) {
- const struct bpf_func_info *finfo;
- const char *short_name = NULL;
- const struct btf_type *t;
+ __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
+ __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
int name_len;
*ksymbol_event = (struct ksymbol_event){
@@ -157,26 +239,9 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
.ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF,
.flags = 0,
};
- name_len = snprintf(ksymbol_event->name, KSYM_NAME_LEN,
- "bpf_prog_");
- name_len += snprintf_hex(ksymbol_event->name + name_len,
- KSYM_NAME_LEN - name_len,
- prog_tags[i], BPF_TAG_SIZE);
- if (has_btf) {
- finfo = func_infos + i * info.func_info_rec_size;
- t = btf__type_by_id(btf, finfo->type_id);
- short_name = btf__name_by_offset(btf, t->name_off);
- } else if (i == 0 && sub_prog_cnt == 1) {
- /* no subprog */
- if (info.name[0])
- short_name = info.name;
- } else
- short_name = "F";
- if (short_name)
- name_len += snprintf(ksymbol_event->name + name_len,
- KSYM_NAME_LEN - name_len,
- "_%s", short_name);
+ name_len = synthesize_bpf_prog_name(ksymbol_event->name,
+ KSYM_NAME_LEN, info, btf, i);
ksymbol_event->header.size += PERF_ALIGN(name_len + 1,
sizeof(u64));
@@ -186,8 +251,8 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
machine, process);
}
- /* Synthesize PERF_RECORD_BPF_EVENT */
- if (opts->bpf_event) {
+ if (!opts->no_bpf_event) {
+ /* Synthesize PERF_RECORD_BPF_EVENT */
*bpf_event = (struct bpf_event){
.header = {
.type = PERF_RECORD_BPF_EVENT,
@@ -195,25 +260,38 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
},
.type = PERF_BPF_EVENT_PROG_LOAD,
.flags = 0,
- .id = info.id,
+ .id = info->id,
};
- memcpy(bpf_event->tag, prog_tags[i], BPF_TAG_SIZE);
+ memcpy(bpf_event->tag, info->tag, BPF_TAG_SIZE);
memset((void *)event + event->header.size, 0, machine->id_hdr_size);
event->header.size += machine->id_hdr_size;
+
+ /* save bpf_prog_info to env */
+ info_node = malloc(sizeof(struct bpf_prog_info_node));
+ if (!info_node) {
+ err = -1;
+ goto out;
+ }
+
+ info_node->info_linear = info_linear;
+ perf_env__insert_bpf_prog_info(env, info_node);
+ info_linear = NULL;
+
+ /*
+ * process after saving bpf_prog_info to env, so that
+ * required information is ready for look up
+ */
err = perf_tool__process_synth_event(tool, event,
machine, process);
}
out:
- free(prog_tags);
- free(prog_lens);
- free(prog_addrs);
- free(func_infos);
+ free(info_linear);
free(btf);
return err ? -1 : 0;
}
-int perf_event__synthesize_bpf_events(struct perf_tool *tool,
+int perf_event__synthesize_bpf_events(struct perf_session *session,
perf_event__handler_t process,
struct machine *machine,
struct record_opts *opts)
@@ -247,7 +325,7 @@ int perf_event__synthesize_bpf_events(struct perf_tool *tool,
continue;
}
- err = perf_event__synthesize_one_bpf_prog(tool, process,
+ err = perf_event__synthesize_one_bpf_prog(session, process,
machine, fd,
event, opts);
close(fd);
@@ -261,3 +339,142 @@ int perf_event__synthesize_bpf_events(struct perf_tool *tool,
free(event);
return err;
}
+
+static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
+{
+ struct bpf_prog_info_linear *info_linear;
+ struct bpf_prog_info_node *info_node;
+ struct btf *btf = NULL;
+ u64 arrays;
+ u32 btf_id;
+ int fd;
+
+ fd = bpf_prog_get_fd_by_id(id);
+ if (fd < 0)
+ return;
+
+ arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS;
+ arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
+ arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
+ arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS;
+ arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS;
+ arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
+ arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
+
+ info_linear = bpf_program__get_prog_info_linear(fd, arrays);
+ if (IS_ERR_OR_NULL(info_linear)) {
+ pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
+ goto out;
+ }
+
+ btf_id = info_linear->info.btf_id;
+
+ info_node = malloc(sizeof(struct bpf_prog_info_node));
+ if (info_node) {
+ info_node->info_linear = info_linear;
+ perf_env__insert_bpf_prog_info(env, info_node);
+ } else
+ free(info_linear);
+
+ if (btf_id == 0)
+ goto out;
+
+ if (btf__get_from_id(btf_id, &btf)) {
+ pr_debug("%s: failed to get BTF of id %u, aborting\n",
+ __func__, btf_id);
+ goto out;
+ }
+ perf_env__fetch_btf(env, btf_id, btf);
+
+out:
+ free(btf);
+ close(fd);
+}
+
+static int bpf_event__sb_cb(union perf_event *event, void *data)
+{
+ struct perf_env *env = data;
+
+ if (event->header.type != PERF_RECORD_BPF_EVENT)
+ return -1;
+
+ switch (event->bpf_event.type) {
+ case PERF_BPF_EVENT_PROG_LOAD:
+ perf_env__add_bpf_info(env, event->bpf_event.id);
+
+ case PERF_BPF_EVENT_PROG_UNLOAD:
+ /*
+ * Do not free bpf_prog_info and btf of the program here,
+ * as annotation still need them. They will be freed at
+ * the end of the session.
+ */
+ break;
+ default:
+ pr_debug("unexpected bpf_event type of %d\n",
+ event->bpf_event.type);
+ break;
+ }
+
+ return 0;
+}
+
+int bpf_event__add_sb_event(struct perf_evlist **evlist,
+ struct perf_env *env)
+{
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_DUMMY,
+ .sample_id_all = 1,
+ .watermark = 1,
+ .bpf_event = 1,
+ .size = sizeof(attr), /* to capture ABI version */
+ };
+
+ /*
+ * Older gcc versions don't support designated initializers, like above,
+ * for unnamed union members, such as the following:
+ */
+ attr.wakeup_watermark = 1;
+
+ return perf_evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env);
+}
+
+void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+ struct perf_env *env,
+ FILE *fp)
+{
+ __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
+ __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
+ char name[KSYM_NAME_LEN];
+ struct btf *btf = NULL;
+ u32 sub_prog_cnt, i;
+
+ sub_prog_cnt = info->nr_jited_ksyms;
+ if (sub_prog_cnt != info->nr_prog_tags ||
+ sub_prog_cnt != info->nr_jited_func_lens)
+ return;
+
+ if (info->btf_id) {
+ struct btf_node *node;
+
+ node = perf_env__find_btf(env, info->btf_id);
+ if (node)
+ btf = btf__new((__u8 *)(node->data),
+ node->data_size);
+ }
+
+ if (sub_prog_cnt == 1) {
+ synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, 0);
+ fprintf(fp, "# bpf_prog_info %u: %s addr 0x%llx size %u\n",
+ info->id, name, prog_addrs[0], prog_lens[0]);
+ return;
+ }
+
+ fprintf(fp, "# bpf_prog_info %u:\n", info->id);
+ for (i = 0; i < sub_prog_cnt; i++) {
+ synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, i);
+
+ fprintf(fp, "# \tsub_prog %u: %s addr 0x%llx size %u\n",
+ i, name, prog_addrs[i], prog_lens[i]);
+ }
+}
diff --git a/tools/perf/util/bpf-event.h b/tools/perf/util/bpf-event.h
index 7890067e1a37..04c33b3bfe28 100644
--- a/tools/perf/util/bpf-event.h
+++ b/tools/perf/util/bpf-event.h
@@ -3,22 +3,45 @@
#define __PERF_BPF_EVENT_H
#include <linux/compiler.h>
+#include <linux/rbtree.h>
+#include <pthread.h>
+#include <api/fd/array.h>
#include "event.h"
+#include <stdio.h>
struct machine;
union perf_event;
+struct perf_env;
struct perf_sample;
-struct perf_tool;
struct record_opts;
+struct evlist;
+struct target;
+
+struct bpf_prog_info_node {
+ struct bpf_prog_info_linear *info_linear;
+ struct rb_node rb_node;
+};
+
+struct btf_node {
+ struct rb_node rb_node;
+ u32 id;
+ u32 data_size;
+ char data[];
+};
#ifdef HAVE_LIBBPF_SUPPORT
int machine__process_bpf_event(struct machine *machine, union perf_event *event,
struct perf_sample *sample);
-int perf_event__synthesize_bpf_events(struct perf_tool *tool,
+int perf_event__synthesize_bpf_events(struct perf_session *session,
perf_event__handler_t process,
struct machine *machine,
struct record_opts *opts);
+int bpf_event__add_sb_event(struct perf_evlist **evlist,
+ struct perf_env *env);
+void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+ struct perf_env *env,
+ FILE *fp);
#else
static inline int machine__process_bpf_event(struct machine *machine __maybe_unused,
union perf_event *event __maybe_unused,
@@ -27,12 +50,25 @@ static inline int machine__process_bpf_event(struct machine *machine __maybe_unu
return 0;
}
-static inline int perf_event__synthesize_bpf_events(struct perf_tool *tool __maybe_unused,
+static inline int perf_event__synthesize_bpf_events(struct perf_session *session __maybe_unused,
perf_event__handler_t process __maybe_unused,
struct machine *machine __maybe_unused,
struct record_opts *opts __maybe_unused)
{
return 0;
}
+
+static inline int bpf_event__add_sb_event(struct perf_evlist **evlist __maybe_unused,
+ struct perf_env *env __maybe_unused)
+{
+ return 0;
+}
+
+static inline void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused,
+ struct perf_env *env __maybe_unused,
+ FILE *fp __maybe_unused)
+{
+
+}
#endif // HAVE_LIBBPF_SUPPORT
#endif
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index bff0d17920ed..0c5517a8d0b7 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -185,6 +185,7 @@ char *build_id_cache__linkname(const char *sbuild_id, char *bf, size_t size)
return bf;
}
+/* The caller is responsible to free the returned buffer. */
char *build_id_cache__origname(const char *sbuild_id)
{
char *linkname;
diff --git a/tools/perf/util/c++/clang.cpp b/tools/perf/util/c++/clang.cpp
index 39c0004f2886..fc361c3f8570 100644
--- a/tools/perf/util/c++/clang.cpp
+++ b/tools/perf/util/c++/clang.cpp
@@ -156,7 +156,7 @@ getBPFObjectFromModule(llvm::Module *Module)
#endif
if (NotAdded) {
llvm::errs() << "TargetMachine can't emit a file of this type\n";
- return std::unique_ptr<llvm::SmallVectorImpl<char>>(nullptr);;
+ return std::unique_ptr<llvm::SmallVectorImpl<char>>(nullptr);
}
PM.run(*Module);
diff --git a/tools/perf/util/cloexec.c b/tools/perf/util/cloexec.c
index ca0fff6272be..06f48312c5ed 100644
--- a/tools/perf/util/cloexec.c
+++ b/tools/perf/util/cloexec.c
@@ -7,7 +7,6 @@
#include "asm/bug.h"
#include "debug.h"
#include <unistd.h>
-#include <asm/unistd.h>
#include <sys/syscall.h>
static unsigned long flag = PERF_FLAG_FD_CLOEXEC;
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index fa092511c52b..7e3c1b60120c 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -633,11 +633,10 @@ static int collect_config(const char *var, const char *value,
}
ret = set_value(item, value);
- return ret;
out_free:
free(key);
- return -1;
+ return ret;
}
int perf_config_set__collect(struct perf_config_set *set, const char *file_name,
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
index ba4c623cd8de..39fe21e1cf93 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
@@ -387,6 +387,7 @@ cs_etm_decoder__buffer_range(struct cs_etm_decoder *decoder,
break;
case OCSD_INSTR_ISB:
case OCSD_INSTR_DSB_DMB:
+ case OCSD_INSTR_WFI_WFE:
case OCSD_INSTR_OTHER:
default:
packet->last_instr_taken_branch = false;
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index 110804936fc3..de488b43f440 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -422,11 +422,9 @@ static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm)
if (!etmq->packet)
goto out_free;
- if (etm->synth_opts.last_branch || etm->sample_branches) {
- etmq->prev_packet = zalloc(szp);
- if (!etmq->prev_packet)
- goto out_free;
- }
+ etmq->prev_packet = zalloc(szp);
+ if (!etmq->prev_packet)
+ goto out_free;
if (etm->synth_opts.last_branch) {
size_t sz = sizeof(struct branch_stack);
@@ -981,7 +979,6 @@ static int cs_etm__sample(struct cs_etm_queue *etmq)
* PREV_PACKET is a branch.
*/
if (etm->synth_opts.last_branch &&
- etmq->prev_packet &&
etmq->prev_packet->sample_type == CS_ETM_RANGE &&
etmq->prev_packet->last_instr_taken_branch)
cs_etm__update_last_branch_rb(etmq);
@@ -1014,7 +1011,7 @@ static int cs_etm__sample(struct cs_etm_queue *etmq)
etmq->period_instructions = instrs_over;
}
- if (etm->sample_branches && etmq->prev_packet) {
+ if (etm->sample_branches) {
bool generate_sample = false;
/* Generate sample for tracing on packet */
@@ -1071,9 +1068,6 @@ static int cs_etm__flush(struct cs_etm_queue *etmq)
struct cs_etm_auxtrace *etm = etmq->etm;
struct cs_etm_packet *tmp;
- if (!etmq->prev_packet)
- return 0;
-
/* Handle start tracing packet */
if (etmq->prev_packet->sample_type == CS_ETM_EMPTY)
goto swap_packet;
diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c
index 26af43ad9ddd..e0311c9750ad 100644
--- a/tools/perf/util/data-convert-bt.c
+++ b/tools/perf/util/data-convert-bt.c
@@ -310,7 +310,7 @@ static int add_tracepoint_field_value(struct ctf_writer *cw,
if (flags & TEP_FIELD_IS_DYNAMIC) {
unsigned long long tmp_val;
- tmp_val = tep_read_number(fmtf->event->pevent,
+ tmp_val = tep_read_number(fmtf->event->tep,
data + offset, len);
offset = tmp_val;
len = offset >> 16;
@@ -354,7 +354,7 @@ static int add_tracepoint_field_value(struct ctf_writer *cw,
unsigned long long value_int;
value_int = tep_read_number(
- fmtf->event->pevent,
+ fmtf->event->tep,
data + offset + i * len, len);
if (!(flags & TEP_FIELD_IS_SIGNED))
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index 7bd5ddeb7a41..6a64f713710d 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -14,6 +14,7 @@
#include "data.h"
#include "util.h"
#include "debug.h"
+#include "header.h"
static void close_dir(struct perf_data_file *files, int nr)
{
@@ -34,12 +35,16 @@ int perf_data__create_dir(struct perf_data *data, int nr)
struct perf_data_file *files = NULL;
int i, ret = -1;
+ if (WARN_ON(!data->is_dir))
+ return -EINVAL;
+
files = zalloc(nr * sizeof(*files));
if (!files)
return -ENOMEM;
- data->dir.files = files;
- data->dir.nr = nr;
+ data->dir.version = PERF_DIR_VERSION;
+ data->dir.files = files;
+ data->dir.nr = nr;
for (i = 0; i < nr; i++) {
struct perf_data_file *file = &files[i];
@@ -69,6 +74,13 @@ int perf_data__open_dir(struct perf_data *data)
DIR *dir;
int nr = 0;
+ if (WARN_ON(!data->is_dir))
+ return -EINVAL;
+
+ /* The version is provided by DIR_FORMAT feature. */
+ if (WARN_ON(data->dir.version != PERF_DIR_VERSION))
+ return -1;
+
dir = opendir(data->path);
if (!dir)
return -EINVAL;
@@ -118,6 +130,26 @@ out_err:
return ret;
}
+int perf_data__update_dir(struct perf_data *data)
+{
+ int i;
+
+ if (WARN_ON(!data->is_dir))
+ return -EINVAL;
+
+ for (i = 0; i < data->dir.nr; i++) {
+ struct perf_data_file *file = &data->dir.files[i];
+ struct stat st;
+
+ if (fstat(file->fd, &st))
+ return -1;
+
+ file->size = st.st_size;
+ }
+
+ return 0;
+}
+
static bool check_pipe(struct perf_data *data)
{
struct stat st;
@@ -173,6 +205,16 @@ static int check_backup(struct perf_data *data)
return 0;
}
+static bool is_dir(struct perf_data *data)
+{
+ struct stat st;
+
+ if (stat(data->path, &st))
+ return false;
+
+ return (st.st_mode & S_IFMT) == S_IFDIR;
+}
+
static int open_file_read(struct perf_data *data)
{
struct stat st;
@@ -237,7 +279,7 @@ static int open_file(struct perf_data *data)
open_file_read(data) : open_file_write(data);
if (fd < 0) {
- free(data->file.path);
+ zfree(&data->file.path);
return -1;
}
@@ -254,6 +296,30 @@ static int open_file_dup(struct perf_data *data)
return open_file(data);
}
+static int open_dir(struct perf_data *data)
+{
+ int ret;
+
+ /*
+ * So far we open only the header, so we can read the data version and
+ * layout.
+ */
+ if (asprintf(&data->file.path, "%s/header", data->path) < 0)
+ return -1;
+
+ if (perf_data__is_write(data) &&
+ mkdir(data->path, S_IRWXU) < 0)
+ return -1;
+
+ ret = open_file(data);
+
+ /* Cleanup whatever we managed to create so far. */
+ if (ret && perf_data__is_write(data))
+ rm_rf_perf_data(data->path);
+
+ return ret;
+}
+
int perf_data__open(struct perf_data *data)
{
if (check_pipe(data))
@@ -265,12 +331,19 @@ int perf_data__open(struct perf_data *data)
if (check_backup(data))
return -1;
- return open_file_dup(data);
+ if (perf_data__is_read(data))
+ data->is_dir = is_dir(data);
+
+ return perf_data__is_dir(data) ?
+ open_dir(data) : open_file_dup(data);
}
void perf_data__close(struct perf_data *data)
{
- free(data->file.path);
+ if (perf_data__is_dir(data))
+ perf_data__close_dir(data);
+
+ zfree(&data->file.path);
close(data->file.fd);
}
@@ -288,9 +361,9 @@ ssize_t perf_data__write(struct perf_data *data,
int perf_data__switch(struct perf_data *data,
const char *postfix,
- size_t pos, bool at_exit)
+ size_t pos, bool at_exit,
+ char **new_filepath)
{
- char *new_filepath;
int ret;
if (check_pipe(data))
@@ -298,15 +371,15 @@ int perf_data__switch(struct perf_data *data,
if (perf_data__is_read(data))
return -EINVAL;
- if (asprintf(&new_filepath, "%s.%s", data->path, postfix) < 0)
+ if (asprintf(new_filepath, "%s.%s", data->path, postfix) < 0)
return -ENOMEM;
/*
* Only fire a warning, don't return error, continue fill
* original file.
*/
- if (rename(data->path, new_filepath))
- pr_warning("Failed to rename %s to %s\n", data->path, new_filepath);
+ if (rename(data->path, *new_filepath))
+ pr_warning("Failed to rename %s to %s\n", data->path, *new_filepath);
if (!at_exit) {
close(data->file.fd);
@@ -323,6 +396,22 @@ int perf_data__switch(struct perf_data *data,
}
ret = data->file.fd;
out:
- free(new_filepath);
return ret;
}
+
+unsigned long perf_data__size(struct perf_data *data)
+{
+ u64 size = data->file.size;
+ int i;
+
+ if (!data->is_dir)
+ return size;
+
+ for (i = 0; i < data->dir.nr; i++) {
+ struct perf_data_file *file = &data->dir.files[i];
+
+ size += file->size;
+ }
+
+ return size;
+}
diff --git a/tools/perf/util/data.h b/tools/perf/util/data.h
index 14b47be2bd69..259868a39019 100644
--- a/tools/perf/util/data.h
+++ b/tools/perf/util/data.h
@@ -19,10 +19,12 @@ struct perf_data {
const char *path;
struct perf_data_file file;
bool is_pipe;
+ bool is_dir;
bool force;
enum perf_data_mode mode;
struct {
+ u64 version;
struct perf_data_file *files;
int nr;
} dir;
@@ -43,14 +45,14 @@ static inline int perf_data__is_pipe(struct perf_data *data)
return data->is_pipe;
}
-static inline int perf_data__fd(struct perf_data *data)
+static inline bool perf_data__is_dir(struct perf_data *data)
{
- return data->file.fd;
+ return data->is_dir;
}
-static inline unsigned long perf_data__size(struct perf_data *data)
+static inline int perf_data__fd(struct perf_data *data)
{
- return data->file.size;
+ return data->file.fd;
}
int perf_data__open(struct perf_data *data);
@@ -68,9 +70,11 @@ ssize_t perf_data_file__write(struct perf_data_file *file,
*/
int perf_data__switch(struct perf_data *data,
const char *postfix,
- size_t pos, bool at_exit);
+ size_t pos, bool at_exit, char **new_filepath);
int perf_data__create_dir(struct perf_data *data, int nr);
int perf_data__open_dir(struct perf_data *data);
void perf_data__close_dir(struct perf_data *data);
+int perf_data__update_dir(struct perf_data *data);
+unsigned long perf_data__size(struct perf_data *data);
#endif /* __PERF_DATA_H */
diff --git a/tools/perf/util/db-export.c b/tools/perf/util/db-export.c
index de9b4769d06c..d7315a00c731 100644
--- a/tools/perf/util/db-export.c
+++ b/tools/perf/util/db-export.c
@@ -510,18 +510,23 @@ int db_export__call_path(struct db_export *dbe, struct call_path *cp)
return 0;
}
-int db_export__call_return(struct db_export *dbe, struct call_return *cr)
+int db_export__call_return(struct db_export *dbe, struct call_return *cr,
+ u64 *parent_db_id)
{
int err;
- if (cr->db_id)
- return 0;
-
err = db_export__call_path(dbe, cr->cp);
if (err)
return err;
- cr->db_id = ++dbe->call_return_last_db_id;
+ if (!cr->db_id)
+ cr->db_id = ++dbe->call_return_last_db_id;
+
+ if (parent_db_id) {
+ if (!*parent_db_id)
+ *parent_db_id = ++dbe->call_return_last_db_id;
+ cr->parent_db_id = *parent_db_id;
+ }
if (dbe->export_call_return)
return dbe->export_call_return(dbe, cr);
diff --git a/tools/perf/util/db-export.h b/tools/perf/util/db-export.h
index 67bc6b8ad2d6..4e2424c89df9 100644
--- a/tools/perf/util/db-export.h
+++ b/tools/perf/util/db-export.h
@@ -104,6 +104,7 @@ int db_export__sample(struct db_export *dbe, union perf_event *event,
int db_export__branch_types(struct db_export *dbe);
int db_export__call_path(struct db_export *dbe, struct call_path *cp);
-int db_export__call_return(struct db_export *dbe, struct call_return *cr);
+int db_export__call_return(struct db_export *dbe, struct call_return *cr,
+ u64 *parent_db_id);
#endif
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index ba58ba603b69..e059976d9d93 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -184,6 +184,7 @@ int dso__read_binary_type_filename(const struct dso *dso,
case DSO_BINARY_TYPE__KALLSYMS:
case DSO_BINARY_TYPE__GUEST_KALLSYMS:
case DSO_BINARY_TYPE__JAVA_JIT:
+ case DSO_BINARY_TYPE__BPF_PROG_INFO:
case DSO_BINARY_TYPE__NOT_FOUND:
ret = -1;
break;
@@ -1141,28 +1142,34 @@ void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
static void dso__set_basename(struct dso *dso)
{
- /*
- * basename() may modify path buffer, so we must pass
- * a copy.
- */
- char *base, *lname = strdup(dso->long_name);
+ char *base, *lname;
+ int tid;
- if (!lname)
- return;
-
- /*
- * basename() may return a pointer to internal
- * storage which is reused in subsequent calls
- * so copy the result.
- */
- base = strdup(basename(lname));
+ if (sscanf(dso->long_name, "/tmp/perf-%d.map", &tid) == 1) {
+ if (asprintf(&base, "[JIT] tid %d", tid) < 0)
+ return;
+ } else {
+ /*
+ * basename() may modify path buffer, so we must pass
+ * a copy.
+ */
+ lname = strdup(dso->long_name);
+ if (!lname)
+ return;
- free(lname);
+ /*
+ * basename() may return a pointer to internal
+ * storage which is reused in subsequent calls
+ * so copy the result.
+ */
+ base = strdup(basename(lname));
- if (!base)
- return;
+ free(lname);
- dso__set_short_name(dso, base, true);
+ if (!base)
+ return;
+ }
+ dso__set_short_name(dso, base, true);
}
int dso__name_len(const struct dso *dso)
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index bb417c54c25a..6e3f63781e51 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -14,6 +14,7 @@
struct machine;
struct map;
+struct perf_env;
enum dso_binary_type {
DSO_BINARY_TYPE__KALLSYMS = 0,
@@ -35,6 +36,7 @@ enum dso_binary_type {
DSO_BINARY_TYPE__KCORE,
DSO_BINARY_TYPE__GUEST_KCORE,
DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
+ DSO_BINARY_TYPE__BPF_PROG_INFO,
DSO_BINARY_TYPE__NOT_FOUND,
};
@@ -189,6 +191,12 @@ struct dso {
u64 debug_frame_offset;
u64 eh_frame_hdr_offset;
} data;
+ /* bpf prog information */
+ struct {
+ u32 id;
+ u32 sub_id;
+ struct perf_env *env;
+ } bpf_prog;
union { /* Tool specific area */
void *priv;
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 4c23779e271a..6a3eaf7d9353 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -3,15 +3,167 @@
#include "env.h"
#include "sane_ctype.h"
#include "util.h"
+#include "bpf-event.h"
#include <errno.h>
#include <sys/utsname.h>
+#include <bpf/libbpf.h>
struct perf_env perf_env;
+void perf_env__insert_bpf_prog_info(struct perf_env *env,
+ struct bpf_prog_info_node *info_node)
+{
+ __u32 prog_id = info_node->info_linear->info.id;
+ struct bpf_prog_info_node *node;
+ struct rb_node *parent = NULL;
+ struct rb_node **p;
+
+ down_write(&env->bpf_progs.lock);
+ p = &env->bpf_progs.infos.rb_node;
+
+ while (*p != NULL) {
+ parent = *p;
+ node = rb_entry(parent, struct bpf_prog_info_node, rb_node);
+ if (prog_id < node->info_linear->info.id) {
+ p = &(*p)->rb_left;
+ } else if (prog_id > node->info_linear->info.id) {
+ p = &(*p)->rb_right;
+ } else {
+ pr_debug("duplicated bpf prog info %u\n", prog_id);
+ goto out;
+ }
+ }
+
+ rb_link_node(&info_node->rb_node, parent, p);
+ rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
+ env->bpf_progs.infos_cnt++;
+out:
+ up_write(&env->bpf_progs.lock);
+}
+
+struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
+ __u32 prog_id)
+{
+ struct bpf_prog_info_node *node = NULL;
+ struct rb_node *n;
+
+ down_read(&env->bpf_progs.lock);
+ n = env->bpf_progs.infos.rb_node;
+
+ while (n) {
+ node = rb_entry(n, struct bpf_prog_info_node, rb_node);
+ if (prog_id < node->info_linear->info.id)
+ n = n->rb_left;
+ else if (prog_id > node->info_linear->info.id)
+ n = n->rb_right;
+ else
+ goto out;
+ }
+ node = NULL;
+
+out:
+ up_read(&env->bpf_progs.lock);
+ return node;
+}
+
+void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
+{
+ struct rb_node *parent = NULL;
+ __u32 btf_id = btf_node->id;
+ struct btf_node *node;
+ struct rb_node **p;
+
+ down_write(&env->bpf_progs.lock);
+ p = &env->bpf_progs.btfs.rb_node;
+
+ while (*p != NULL) {
+ parent = *p;
+ node = rb_entry(parent, struct btf_node, rb_node);
+ if (btf_id < node->id) {
+ p = &(*p)->rb_left;
+ } else if (btf_id > node->id) {
+ p = &(*p)->rb_right;
+ } else {
+ pr_debug("duplicated btf %u\n", btf_id);
+ goto out;
+ }
+ }
+
+ rb_link_node(&btf_node->rb_node, parent, p);
+ rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
+ env->bpf_progs.btfs_cnt++;
+out:
+ up_write(&env->bpf_progs.lock);
+}
+
+struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
+{
+ struct btf_node *node = NULL;
+ struct rb_node *n;
+
+ down_read(&env->bpf_progs.lock);
+ n = env->bpf_progs.btfs.rb_node;
+
+ while (n) {
+ node = rb_entry(n, struct btf_node, rb_node);
+ if (btf_id < node->id)
+ n = n->rb_left;
+ else if (btf_id > node->id)
+ n = n->rb_right;
+ else
+ goto out;
+ }
+ node = NULL;
+
+out:
+ up_read(&env->bpf_progs.lock);
+ return node;
+}
+
+/* purge data in bpf_progs.infos tree */
+static void perf_env__purge_bpf(struct perf_env *env)
+{
+ struct rb_root *root;
+ struct rb_node *next;
+
+ down_write(&env->bpf_progs.lock);
+
+ root = &env->bpf_progs.infos;
+ next = rb_first(root);
+
+ while (next) {
+ struct bpf_prog_info_node *node;
+
+ node = rb_entry(next, struct bpf_prog_info_node, rb_node);
+ next = rb_next(&node->rb_node);
+ rb_erase(&node->rb_node, root);
+ free(node);
+ }
+
+ env->bpf_progs.infos_cnt = 0;
+
+ root = &env->bpf_progs.btfs;
+ next = rb_first(root);
+
+ while (next) {
+ struct btf_node *node;
+
+ node = rb_entry(next, struct btf_node, rb_node);
+ next = rb_next(&node->rb_node);
+ rb_erase(&node->rb_node, root);
+ free(node);
+ }
+
+ env->bpf_progs.btfs_cnt = 0;
+
+ up_write(&env->bpf_progs.lock);
+}
+
void perf_env__exit(struct perf_env *env)
{
int i;
+ perf_env__purge_bpf(env);
zfree(&env->hostname);
zfree(&env->os_release);
zfree(&env->version);
@@ -38,6 +190,13 @@ void perf_env__exit(struct perf_env *env)
zfree(&env->memory_nodes);
}
+void perf_env__init(struct perf_env *env)
+{
+ env->bpf_progs.infos = RB_ROOT;
+ env->bpf_progs.btfs = RB_ROOT;
+ init_rwsem(&env->bpf_progs.lock);
+}
+
int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
{
int i;
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index d01b8355f4ca..4f8e2b485c01 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -3,7 +3,9 @@
#define __PERF_ENV_H
#include <linux/types.h>
+#include <linux/rbtree.h>
#include "cpumap.h"
+#include "rwsem.h"
struct cpu_topology_map {
int socket_id;
@@ -64,8 +66,23 @@ struct perf_env {
struct memory_node *memory_nodes;
unsigned long long memory_bsize;
u64 clockid_res_ns;
+
+ /*
+ * bpf_info_lock protects bpf rbtrees. This is needed because the
+ * trees are accessed by different threads in perf-top
+ */
+ struct {
+ struct rw_semaphore lock;
+ struct rb_root infos;
+ u32 infos_cnt;
+ struct rb_root btfs;
+ u32 btfs_cnt;
+ } bpf_progs;
};
+struct bpf_prog_info_node;
+struct btf_node;
+
extern struct perf_env perf_env;
void perf_env__exit(struct perf_env *env);
@@ -80,4 +97,11 @@ const char *perf_env__arch(struct perf_env *env);
const char *perf_env__raw_arch(struct perf_env *env);
int perf_env__nr_cpus_avail(struct perf_env *env);
+void perf_env__init(struct perf_env *env);
+void perf_env__insert_bpf_prog_info(struct perf_env *env,
+ struct bpf_prog_info_node *info_node);
+struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
+ __u32 prog_id);
+void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
+struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
#endif /* __PERF_ENV_H */
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 36ae7e92dab1..4e908ec1ef64 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -6,6 +6,7 @@
#include <stdio.h>
#include <linux/kernel.h>
#include <linux/bpf.h>
+#include <linux/perf_event.h>
#include "../perf.h"
#include "build-id.h"
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 08cedb643ea6..4b6783ff5813 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -19,6 +19,7 @@
#include "debug.h"
#include "units.h"
#include "asm/bug.h"
+#include "bpf-event.h"
#include <signal.h>
#include <unistd.h>
@@ -230,20 +231,6 @@ void perf_evlist__set_leader(struct perf_evlist *evlist)
}
}
-void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr)
-{
- attr->precise_ip = 3;
-
- while (attr->precise_ip != 0) {
- int fd = sys_perf_event_open(attr, 0, -1, -1, 0);
- if (fd != -1) {
- close(fd);
- break;
- }
- --attr->precise_ip;
- }
-}
-
int __perf_evlist__add_default(struct perf_evlist *evlist, bool precise)
{
struct perf_evsel *evsel = perf_evsel__new_cycles(precise);
@@ -1022,7 +1009,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
*/
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
unsigned int auxtrace_pages,
- bool auxtrace_overwrite, int nr_cblocks, int affinity)
+ bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush)
{
struct perf_evsel *evsel;
const struct cpu_map *cpus = evlist->cpus;
@@ -1032,7 +1019,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
* Its value is decided by evsel's write_backward.
* So &mp should not be passed through const pointer.
*/
- struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = affinity };
+ struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = affinity, .flush = flush };
if (!evlist->mmap)
evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
@@ -1064,7 +1051,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages)
{
- return perf_evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS);
+ return perf_evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1);
}
int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
@@ -1841,3 +1828,125 @@ struct perf_evsel *perf_evlist__reset_weak_group(struct perf_evlist *evsel_list,
}
return leader;
}
+
+int perf_evlist__add_sb_event(struct perf_evlist **evlist,
+ struct perf_event_attr *attr,
+ perf_evsel__sb_cb_t cb,
+ void *data)
+{
+ struct perf_evsel *evsel;
+ bool new_evlist = (*evlist) == NULL;
+
+ if (*evlist == NULL)
+ *evlist = perf_evlist__new();
+ if (*evlist == NULL)
+ return -1;
+
+ if (!attr->sample_id_all) {
+ pr_warning("enabling sample_id_all for all side band events\n");
+ attr->sample_id_all = 1;
+ }
+
+ evsel = perf_evsel__new_idx(attr, (*evlist)->nr_entries);
+ if (!evsel)
+ goto out_err;
+
+ evsel->side_band.cb = cb;
+ evsel->side_band.data = data;
+ perf_evlist__add(*evlist, evsel);
+ return 0;
+
+out_err:
+ if (new_evlist) {
+ perf_evlist__delete(*evlist);
+ *evlist = NULL;
+ }
+ return -1;
+}
+
+static void *perf_evlist__poll_thread(void *arg)
+{
+ struct perf_evlist *evlist = arg;
+ bool draining = false;
+ int i, done = 0;
+
+ while (!done) {
+ bool got_data = false;
+
+ if (evlist->thread.done)
+ draining = true;
+
+ if (!draining)
+ perf_evlist__poll(evlist, 1000);
+
+ for (i = 0; i < evlist->nr_mmaps; i++) {
+ struct perf_mmap *map = &evlist->mmap[i];
+ union perf_event *event;
+
+ if (perf_mmap__read_init(map))
+ continue;
+ while ((event = perf_mmap__read_event(map)) != NULL) {
+ struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
+
+ if (evsel && evsel->side_band.cb)
+ evsel->side_band.cb(event, evsel->side_band.data);
+ else
+ pr_warning("cannot locate proper evsel for the side band event\n");
+
+ perf_mmap__consume(map);
+ got_data = true;
+ }
+ perf_mmap__read_done(map);
+ }
+
+ if (draining && !got_data)
+ break;
+ }
+ return NULL;
+}
+
+int perf_evlist__start_sb_thread(struct perf_evlist *evlist,
+ struct target *target)
+{
+ struct perf_evsel *counter;
+
+ if (!evlist)
+ return 0;
+
+ if (perf_evlist__create_maps(evlist, target))
+ goto out_delete_evlist;
+
+ evlist__for_each_entry(evlist, counter) {
+ if (perf_evsel__open(counter, evlist->cpus,
+ evlist->threads) < 0)
+ goto out_delete_evlist;
+ }
+
+ if (perf_evlist__mmap(evlist, UINT_MAX))
+ goto out_delete_evlist;
+
+ evlist__for_each_entry(evlist, counter) {
+ if (perf_evsel__enable(counter))
+ goto out_delete_evlist;
+ }
+
+ evlist->thread.done = 0;
+ if (pthread_create(&evlist->thread.th, NULL, perf_evlist__poll_thread, evlist))
+ goto out_delete_evlist;
+
+ return 0;
+
+out_delete_evlist:
+ perf_evlist__delete(evlist);
+ evlist = NULL;
+ return -1;
+}
+
+void perf_evlist__stop_sb_thread(struct perf_evlist *evlist)
+{
+ if (!evlist)
+ return;
+ evlist->thread.done = 1;
+ pthread_join(evlist->thread.th, NULL);
+ perf_evlist__delete(evlist);
+}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 744906dd4887..c9a0f72677fd 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -54,6 +54,10 @@ struct perf_evlist {
struct perf_sample *sample);
u64 first_sample_time;
u64 last_sample_time;
+ struct {
+ pthread_t th;
+ volatile int done;
+ } thread;
};
struct perf_evsel_str_handler {
@@ -87,6 +91,14 @@ int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
int perf_evlist__add_dummy(struct perf_evlist *evlist);
+int perf_evlist__add_sb_event(struct perf_evlist **evlist,
+ struct perf_event_attr *attr,
+ perf_evsel__sb_cb_t cb,
+ void *data);
+int perf_evlist__start_sb_thread(struct perf_evlist *evlist,
+ struct target *target);
+void perf_evlist__stop_sb_thread(struct perf_evlist *evlist);
+
int perf_evlist__add_newtp(struct perf_evlist *evlist,
const char *sys, const char *name, void *handler);
@@ -165,7 +177,8 @@ unsigned long perf_event_mlock_kb_in_pages(void);
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
unsigned int auxtrace_pages,
- bool auxtrace_overwrite, int nr_cblocks, int affinity);
+ bool auxtrace_overwrite, int nr_cblocks,
+ int affinity, int flush);
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages);
void perf_evlist__munmap(struct perf_evlist *evlist);
@@ -303,8 +316,6 @@ void perf_evlist__to_front(struct perf_evlist *evlist,
void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
struct perf_evsel *tracking_evsel);
-void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr);
-
struct perf_evsel *
perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index dfe2958e6287..a10cf4cde920 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -294,25 +294,18 @@ struct perf_evsel *perf_evsel__new_cycles(bool precise)
if (!precise)
goto new_event;
- /*
- * Unnamed union member, not supported as struct member named
- * initializer in older compilers such as gcc 4.4.7
- *
- * Just for probing the precise_ip:
- */
- attr.sample_period = 1;
- perf_event_attr__set_max_precise_ip(&attr);
/*
* Now let the usual logic to set up the perf_event_attr defaults
* to kick in when we return and before perf_evsel__open() is called.
*/
- attr.sample_period = 0;
new_event:
evsel = perf_evsel__new(&attr);
if (evsel == NULL)
goto out;
+ evsel->precise_max = true;
+
/* use asprintf() because free(evsel) assumes name is allocated */
if (asprintf(&evsel->name, "cycles%s%s%.*s",
(attr.precise_ip || attr.exclude_kernel) ? ":" : "",
@@ -587,6 +580,12 @@ static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
}
+static int perf_evsel__tool_name(char *bf, size_t size)
+{
+ int ret = scnprintf(bf, size, "duration_time");
+ return ret;
+}
+
const char *perf_evsel__name(struct perf_evsel *evsel)
{
char bf[128];
@@ -608,7 +607,10 @@ const char *perf_evsel__name(struct perf_evsel *evsel)
break;
case PERF_TYPE_SOFTWARE:
- perf_evsel__sw_name(evsel, bf, sizeof(bf));
+ if (evsel->tool_event)
+ perf_evsel__tool_name(bf, sizeof(bf));
+ else
+ perf_evsel__sw_name(evsel, bf, sizeof(bf));
break;
case PERF_TYPE_TRACEPOINT:
@@ -1044,7 +1046,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
attr->mmap2 = track && !perf_missing_features.mmap2;
attr->comm = track;
attr->ksymbol = track && !perf_missing_features.ksymbol;
- attr->bpf_event = track && opts->bpf_event &&
+ attr->bpf_event = track && !opts->no_bpf_event &&
!perf_missing_features.bpf_event;
if (opts->record_namespaces)
@@ -1091,7 +1093,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
}
if (evsel->precise_max)
- perf_event_attr__set_max_precise_ip(attr);
+ attr->precise_ip = 3;
if (opts->all_user) {
attr->exclude_kernel = 1;
@@ -1300,6 +1302,7 @@ void perf_evsel__exit(struct perf_evsel *evsel)
{
assert(list_empty(&evsel->node));
assert(evsel->evlist == NULL);
+ perf_evsel__free_counts(evsel);
perf_evsel__free_fd(evsel);
perf_evsel__free_id(evsel);
perf_evsel__free_config_terms(evsel);
@@ -1350,10 +1353,9 @@ void perf_counts_values__scale(struct perf_counts_values *count,
count->val = 0;
} else if (count->run < count->ena) {
scaled = 1;
- count->val = (u64)((double) count->val * count->ena / count->run + 0.5);
+ count->val = (u64)((double) count->val * count->ena / count->run);
}
- } else
- count->ena = count->run = 0;
+ }
if (pscaled)
*pscaled = scaled;
@@ -1757,6 +1759,59 @@ static bool ignore_missing_thread(struct perf_evsel *evsel,
return true;
}
+static void display_attr(struct perf_event_attr *attr)
+{
+ if (verbose >= 2) {
+ fprintf(stderr, "%.60s\n", graph_dotted_line);
+ fprintf(stderr, "perf_event_attr:\n");
+ perf_event_attr__fprintf(stderr, attr, __open_attr__fprintf, NULL);
+ fprintf(stderr, "%.60s\n", graph_dotted_line);
+ }
+}
+
+static int perf_event_open(struct perf_evsel *evsel,
+ pid_t pid, int cpu, int group_fd,
+ unsigned long flags)
+{
+ int precise_ip = evsel->attr.precise_ip;
+ int fd;
+
+ while (1) {
+ pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
+ pid, cpu, group_fd, flags);
+
+ fd = sys_perf_event_open(&evsel->attr, pid, cpu, group_fd, flags);
+ if (fd >= 0)
+ break;
+
+ /*
+ * Do quick precise_ip fallback if:
+ * - there is precise_ip set in perf_event_attr
+ * - maximum precise is requested
+ * - sys_perf_event_open failed with ENOTSUP error,
+ * which is associated with wrong precise_ip
+ */
+ if (!precise_ip || !evsel->precise_max || (errno != ENOTSUP))
+ break;
+
+ /*
+ * We tried all the precise_ip values, and it's
+ * still failing, so leave it to standard fallback.
+ */
+ if (!evsel->attr.precise_ip) {
+ evsel->attr.precise_ip = precise_ip;
+ break;
+ }
+
+ pr_debug2("\nsys_perf_event_open failed, error %d\n", -ENOTSUP);
+ evsel->attr.precise_ip--;
+ pr_debug2("decreasing precise_ip by one (%d)\n", evsel->attr.precise_ip);
+ display_attr(&evsel->attr);
+ }
+
+ return fd;
+}
+
int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
struct thread_map *threads)
{
@@ -1832,12 +1887,7 @@ retry_sample_id:
if (perf_missing_features.sample_id_all)
evsel->attr.sample_id_all = 0;
- if (verbose >= 2) {
- fprintf(stderr, "%.60s\n", graph_dotted_line);
- fprintf(stderr, "perf_event_attr:\n");
- perf_event_attr__fprintf(stderr, &evsel->attr, __open_attr__fprintf, NULL);
- fprintf(stderr, "%.60s\n", graph_dotted_line);
- }
+ display_attr(&evsel->attr);
for (cpu = 0; cpu < cpus->nr; cpu++) {
@@ -1849,13 +1899,10 @@ retry_sample_id:
group_fd = get_group_fd(evsel, cpu, thread);
retry_open:
- pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
- pid, cpus->map[cpu], group_fd, flags);
-
test_attr__ready();
- fd = sys_perf_event_open(&evsel->attr, pid, cpus->map[cpu],
- group_fd, flags);
+ fd = perf_event_open(evsel, pid, cpus->map[cpu],
+ group_fd, flags);
FD(evsel, cpu, thread) = fd;
@@ -2330,7 +2377,7 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
if (data->user_regs.abi) {
u64 mask = evsel->attr.sample_regs_user;
- sz = hweight_long(mask) * sizeof(u64);
+ sz = hweight64(mask) * sizeof(u64);
OVERFLOW_CHECK(array, sz, max_size);
data->user_regs.mask = mask;
data->user_regs.regs = (u64 *)array;
@@ -2386,7 +2433,7 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) {
u64 mask = evsel->attr.sample_regs_intr;
- sz = hweight_long(mask) * sizeof(u64);
+ sz = hweight64(mask) * sizeof(u64);
OVERFLOW_CHECK(array, sz, max_size);
data->intr_regs.mask = mask;
data->intr_regs.regs = (u64 *)array;
@@ -2514,7 +2561,7 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
if (type & PERF_SAMPLE_REGS_USER) {
if (sample->user_regs.abi) {
result += sizeof(u64);
- sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
+ sz = hweight64(sample->user_regs.mask) * sizeof(u64);
result += sz;
} else {
result += sizeof(u64);
@@ -2542,7 +2589,7 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
if (type & PERF_SAMPLE_REGS_INTR) {
if (sample->intr_regs.abi) {
result += sizeof(u64);
- sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
+ sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
result += sz;
} else {
result += sizeof(u64);
@@ -2672,7 +2719,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
if (type & PERF_SAMPLE_REGS_USER) {
if (sample->user_regs.abi) {
*array++ = sample->user_regs.abi;
- sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
+ sz = hweight64(sample->user_regs.mask) * sizeof(u64);
memcpy(array, sample->user_regs.regs, sz);
array = (void *)array + sz;
} else {
@@ -2708,7 +2755,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
if (type & PERF_SAMPLE_REGS_INTR) {
if (sample->intr_regs.abi) {
*array++ = sample->intr_regs.abi;
- sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
+ sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
memcpy(array, sample->intr_regs.regs, sz);
array = (void *)array + sz;
} else {
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index cc578e02e08f..6d190cbf1070 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -73,6 +73,13 @@ struct perf_evsel_config_term {
struct perf_stat_evsel;
+typedef int (perf_evsel__sb_cb_t)(union perf_event *event, void *data);
+
+enum perf_tool_event {
+ PERF_TOOL_NONE = 0,
+ PERF_TOOL_DURATION_TIME = 1,
+};
+
/** struct perf_evsel - event selector
*
* @evlist - evlist this evsel is in, if it is in one.
@@ -119,6 +126,7 @@ struct perf_evsel {
unsigned int sample_size;
int id_pos;
int is_pos;
+ enum perf_tool_event tool_event;
bool uniquified_name;
bool snapshot;
bool supported;
@@ -151,6 +159,10 @@ struct perf_evsel {
bool collect_stat;
bool weak_group;
const char *pmu_name;
+ struct {
+ perf_evsel__sb_cb_t *cb;
+ void *data;
+ } side_band;
};
union u64_swap {
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 01b324c275b9..2d2af2ac2b1e 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -18,6 +18,7 @@
#include <sys/utsname.h>
#include <linux/time64.h>
#include <dirent.h>
+#include <bpf/libbpf.h>
#include "evlist.h"
#include "evsel.h"
@@ -40,6 +41,7 @@
#include "time-utils.h"
#include "units.h"
#include "cputopo.h"
+#include "bpf-event.h"
#include "sane_ctype.h"
@@ -861,6 +863,104 @@ static int write_clockid(struct feat_fd *ff,
sizeof(ff->ph->env.clockid_res_ns));
}
+static int write_dir_format(struct feat_fd *ff,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ struct perf_session *session;
+ struct perf_data *data;
+
+ session = container_of(ff->ph, struct perf_session, header);
+ data = session->data;
+
+ if (WARN_ON(!perf_data__is_dir(data)))
+ return -1;
+
+ return do_write(ff, &data->dir.version, sizeof(data->dir.version));
+}
+
+#ifdef HAVE_LIBBPF_SUPPORT
+static int write_bpf_prog_info(struct feat_fd *ff,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ struct perf_env *env = &ff->ph->env;
+ struct rb_root *root;
+ struct rb_node *next;
+ int ret;
+
+ down_read(&env->bpf_progs.lock);
+
+ ret = do_write(ff, &env->bpf_progs.infos_cnt,
+ sizeof(env->bpf_progs.infos_cnt));
+ if (ret < 0)
+ goto out;
+
+ root = &env->bpf_progs.infos;
+ next = rb_first(root);
+ while (next) {
+ struct bpf_prog_info_node *node;
+ size_t len;
+
+ node = rb_entry(next, struct bpf_prog_info_node, rb_node);
+ next = rb_next(&node->rb_node);
+ len = sizeof(struct bpf_prog_info_linear) +
+ node->info_linear->data_len;
+
+ /* before writing to file, translate address to offset */
+ bpf_program__bpil_addr_to_offs(node->info_linear);
+ ret = do_write(ff, node->info_linear, len);
+ /*
+ * translate back to address even when do_write() fails,
+ * so that this function never changes the data.
+ */
+ bpf_program__bpil_offs_to_addr(node->info_linear);
+ if (ret < 0)
+ goto out;
+ }
+out:
+ up_read(&env->bpf_progs.lock);
+ return ret;
+}
+#else // HAVE_LIBBPF_SUPPORT
+static int write_bpf_prog_info(struct feat_fd *ff __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ return 0;
+}
+#endif // HAVE_LIBBPF_SUPPORT
+
+static int write_bpf_btf(struct feat_fd *ff,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ struct perf_env *env = &ff->ph->env;
+ struct rb_root *root;
+ struct rb_node *next;
+ int ret;
+
+ down_read(&env->bpf_progs.lock);
+
+ ret = do_write(ff, &env->bpf_progs.btfs_cnt,
+ sizeof(env->bpf_progs.btfs_cnt));
+
+ if (ret < 0)
+ goto out;
+
+ root = &env->bpf_progs.btfs;
+ next = rb_first(root);
+ while (next) {
+ struct btf_node *node;
+
+ node = rb_entry(next, struct btf_node, rb_node);
+ next = rb_next(&node->rb_node);
+ ret = do_write(ff, &node->id,
+ sizeof(u32) * 2 + node->data_size);
+ if (ret < 0)
+ goto out;
+ }
+out:
+ up_read(&env->bpf_progs.lock);
+ return ret;
+}
+
static int cpu_cache_level__sort(const void *a, const void *b)
{
struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
@@ -1341,6 +1441,63 @@ static void print_clockid(struct feat_fd *ff, FILE *fp)
ff->ph->env.clockid_res_ns * 1000);
}
+static void print_dir_format(struct feat_fd *ff, FILE *fp)
+{
+ struct perf_session *session;
+ struct perf_data *data;
+
+ session = container_of(ff->ph, struct perf_session, header);
+ data = session->data;
+
+ fprintf(fp, "# directory data version : %"PRIu64"\n", data->dir.version);
+}
+
+static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
+{
+ struct perf_env *env = &ff->ph->env;
+ struct rb_root *root;
+ struct rb_node *next;
+
+ down_read(&env->bpf_progs.lock);
+
+ root = &env->bpf_progs.infos;
+ next = rb_first(root);
+
+ while (next) {
+ struct bpf_prog_info_node *node;
+
+ node = rb_entry(next, struct bpf_prog_info_node, rb_node);
+ next = rb_next(&node->rb_node);
+
+ bpf_event__print_bpf_prog_info(&node->info_linear->info,
+ env, fp);
+ }
+
+ up_read(&env->bpf_progs.lock);
+}
+
+static void print_bpf_btf(struct feat_fd *ff, FILE *fp)
+{
+ struct perf_env *env = &ff->ph->env;
+ struct rb_root *root;
+ struct rb_node *next;
+
+ down_read(&env->bpf_progs.lock);
+
+ root = &env->bpf_progs.btfs;
+ next = rb_first(root);
+
+ while (next) {
+ struct btf_node *node;
+
+ node = rb_entry(next, struct btf_node, rb_node);
+ next = rb_next(&node->rb_node);
+ fprintf(fp, "# btf info of id %u\n", node->id);
+ }
+
+ up_read(&env->bpf_progs.lock);
+}
+
static void free_event_desc(struct perf_evsel *events)
{
struct perf_evsel *evsel;
@@ -2373,6 +2530,143 @@ static int process_clockid(struct feat_fd *ff,
return 0;
}
+static int process_dir_format(struct feat_fd *ff,
+ void *_data __maybe_unused)
+{
+ struct perf_session *session;
+ struct perf_data *data;
+
+ session = container_of(ff->ph, struct perf_session, header);
+ data = session->data;
+
+ if (WARN_ON(!perf_data__is_dir(data)))
+ return -1;
+
+ return do_read_u64(ff, &data->dir.version);
+}
+
+#ifdef HAVE_LIBBPF_SUPPORT
+static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
+{
+ struct bpf_prog_info_linear *info_linear;
+ struct bpf_prog_info_node *info_node;
+ struct perf_env *env = &ff->ph->env;
+ u32 count, i;
+ int err = -1;
+
+ if (ff->ph->needs_swap) {
+ pr_warning("interpreting bpf_prog_info from systems with endianity is not yet supported\n");
+ return 0;
+ }
+
+ if (do_read_u32(ff, &count))
+ return -1;
+
+ down_write(&env->bpf_progs.lock);
+
+ for (i = 0; i < count; ++i) {
+ u32 info_len, data_len;
+
+ info_linear = NULL;
+ info_node = NULL;
+ if (do_read_u32(ff, &info_len))
+ goto out;
+ if (do_read_u32(ff, &data_len))
+ goto out;
+
+ if (info_len > sizeof(struct bpf_prog_info)) {
+ pr_warning("detected invalid bpf_prog_info\n");
+ goto out;
+ }
+
+ info_linear = malloc(sizeof(struct bpf_prog_info_linear) +
+ data_len);
+ if (!info_linear)
+ goto out;
+ info_linear->info_len = sizeof(struct bpf_prog_info);
+ info_linear->data_len = data_len;
+ if (do_read_u64(ff, (u64 *)(&info_linear->arrays)))
+ goto out;
+ if (__do_read(ff, &info_linear->info, info_len))
+ goto out;
+ if (info_len < sizeof(struct bpf_prog_info))
+ memset(((void *)(&info_linear->info)) + info_len, 0,
+ sizeof(struct bpf_prog_info) - info_len);
+
+ if (__do_read(ff, info_linear->data, data_len))
+ goto out;
+
+ info_node = malloc(sizeof(struct bpf_prog_info_node));
+ if (!info_node)
+ goto out;
+
+ /* after reading from file, translate offset to address */
+ bpf_program__bpil_offs_to_addr(info_linear);
+ info_node->info_linear = info_linear;
+ perf_env__insert_bpf_prog_info(env, info_node);
+ }
+
+ up_write(&env->bpf_progs.lock);
+ return 0;
+out:
+ free(info_linear);
+ free(info_node);
+ up_write(&env->bpf_progs.lock);
+ return err;
+}
+#else // HAVE_LIBBPF_SUPPORT
+static int process_bpf_prog_info(struct feat_fd *ff __maybe_unused, void *data __maybe_unused)
+{
+ return 0;
+}
+#endif // HAVE_LIBBPF_SUPPORT
+
+static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
+{
+ struct perf_env *env = &ff->ph->env;
+ struct btf_node *node = NULL;
+ u32 count, i;
+ int err = -1;
+
+ if (ff->ph->needs_swap) {
+ pr_warning("interpreting btf from systems with endianity is not yet supported\n");
+ return 0;
+ }
+
+ if (do_read_u32(ff, &count))
+ return -1;
+
+ down_write(&env->bpf_progs.lock);
+
+ for (i = 0; i < count; ++i) {
+ u32 id, data_size;
+
+ if (do_read_u32(ff, &id))
+ goto out;
+ if (do_read_u32(ff, &data_size))
+ goto out;
+
+ node = malloc(sizeof(struct btf_node) + data_size);
+ if (!node)
+ goto out;
+
+ node->id = id;
+ node->data_size = data_size;
+
+ if (__do_read(ff, node->data, data_size))
+ goto out;
+
+ perf_env__insert_btf(env, node);
+ node = NULL;
+ }
+
+ err = 0;
+out:
+ up_write(&env->bpf_progs.lock);
+ free(node);
+ return err;
+}
+
struct feature_ops {
int (*write)(struct feat_fd *ff, struct perf_evlist *evlist);
void (*print)(struct feat_fd *ff, FILE *fp);
@@ -2432,7 +2726,10 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
FEAT_OPN(CACHE, cache, true),
FEAT_OPR(SAMPLE_TIME, sample_time, false),
FEAT_OPR(MEM_TOPOLOGY, mem_topology, true),
- FEAT_OPR(CLOCKID, clockid, false)
+ FEAT_OPR(CLOCKID, clockid, false),
+ FEAT_OPN(DIR_FORMAT, dir_format, false),
+ FEAT_OPR(BPF_PROG_INFO, bpf_prog_info, false),
+ FEAT_OPR(BPF_BTF, bpf_btf, false),
};
struct header_print_data {
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 0d553ddca0a3..386da49e1bfa 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -39,6 +39,9 @@ enum {
HEADER_SAMPLE_TIME,
HEADER_MEM_TOPOLOGY,
HEADER_CLOCKID,
+ HEADER_DIR_FORMAT,
+ HEADER_BPF_PROG_INFO,
+ HEADER_BPF_BTF,
HEADER_LAST_FEATURE,
HEADER_FEAT_BITS = 256,
};
@@ -48,6 +51,10 @@ enum perf_header_version {
PERF_HEADER_VERSION_2,
};
+enum perf_dir_version {
+ PERF_DIR_VERSION = 1,
+};
+
struct perf_file_section {
u64 offset;
u64 size;
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 669f961316f0..7ace7a10054d 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -19,6 +19,7 @@
#include <math.h>
#include <inttypes.h>
#include <sys/param.h>
+#include <linux/time64.h>
static bool hists__filter_entry_by_dso(struct hists *hists,
struct hist_entry *he);
@@ -192,6 +193,7 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
+ hists__new_col_len(hists, HISTC_TIME, 12);
if (h->srcline) {
len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
@@ -246,6 +248,14 @@ static void he_stat__add_cpumode_period(struct he_stat *he_stat,
}
}
+static long hist_time(unsigned long htime)
+{
+ unsigned long time_quantum = symbol_conf.time_quantum;
+ if (time_quantum)
+ return (htime / time_quantum) * time_quantum;
+ return htime;
+}
+
static void he_stat__add_period(struct he_stat *he_stat, u64 period,
u64 weight)
{
@@ -396,11 +406,8 @@ static int hist_entry__init(struct hist_entry *he,
* adding new entries. So we need to save a copy.
*/
he->branch_info = malloc(sizeof(*he->branch_info));
- if (he->branch_info == NULL) {
- map__zput(he->ms.map);
- free(he->stat_acc);
- return -ENOMEM;
- }
+ if (he->branch_info == NULL)
+ goto err;
memcpy(he->branch_info, template->branch_info,
sizeof(*he->branch_info));
@@ -419,22 +426,23 @@ static int hist_entry__init(struct hist_entry *he,
if (he->raw_data) {
he->raw_data = memdup(he->raw_data, he->raw_size);
+ if (he->raw_data == NULL)
+ goto err_infos;
+ }
- if (he->raw_data == NULL) {
- map__put(he->ms.map);
- if (he->branch_info) {
- map__put(he->branch_info->from.map);
- map__put(he->branch_info->to.map);
- free(he->branch_info);
- }
- if (he->mem_info) {
- map__put(he->mem_info->iaddr.map);
- map__put(he->mem_info->daddr.map);
- }
- free(he->stat_acc);
- return -ENOMEM;
- }
+ if (he->srcline) {
+ he->srcline = strdup(he->srcline);
+ if (he->srcline == NULL)
+ goto err_rawdata;
+ }
+
+ if (symbol_conf.res_sample) {
+ he->res_samples = calloc(sizeof(struct res_sample),
+ symbol_conf.res_sample);
+ if (!he->res_samples)
+ goto err_srcline;
}
+
INIT_LIST_HEAD(&he->pairs.node);
thread__get(he->thread);
he->hroot_in = RB_ROOT_CACHED;
@@ -444,6 +452,27 @@ static int hist_entry__init(struct hist_entry *he,
he->leaf = true;
return 0;
+
+err_srcline:
+ free(he->srcline);
+
+err_rawdata:
+ free(he->raw_data);
+
+err_infos:
+ if (he->branch_info) {
+ map__put(he->branch_info->from.map);
+ map__put(he->branch_info->to.map);
+ free(he->branch_info);
+ }
+ if (he->mem_info) {
+ map__put(he->mem_info->iaddr.map);
+ map__put(he->mem_info->daddr.map);
+ }
+err:
+ map__zput(he->ms.map);
+ free(he->stat_acc);
+ return -ENOMEM;
}
static void *hist_entry__zalloc(size_t size)
@@ -584,6 +613,32 @@ out:
return he;
}
+static unsigned random_max(unsigned high)
+{
+ unsigned thresh = -high % high;
+ for (;;) {
+ unsigned r = random();
+ if (r >= thresh)
+ return r % high;
+ }
+}
+
+static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample)
+{
+ struct res_sample *r;
+ int j;
+
+ if (he->num_res < symbol_conf.res_sample) {
+ j = he->num_res++;
+ } else {
+ j = random_max(symbol_conf.res_sample);
+ }
+ r = &he->res_samples[j];
+ r->time = sample->time;
+ r->cpu = sample->cpu;
+ r->tid = sample->tid;
+}
+
static struct hist_entry*
__hists__add_entry(struct hists *hists,
struct addr_location *al,
@@ -606,7 +661,7 @@ __hists__add_entry(struct hists *hists,
.map = al->map,
.sym = al->sym,
},
- .srcline = al->srcline ? strdup(al->srcline) : NULL,
+ .srcline = (char *) al->srcline,
.socket = al->socket,
.cpu = al->cpu,
.cpumode = al->cpumode,
@@ -626,10 +681,13 @@ __hists__add_entry(struct hists *hists,
.raw_data = sample->raw_data,
.raw_size = sample->raw_size,
.ops = ops,
+ .time = hist_time(sample->time),
}, *he = hists__findnew_entry(hists, &entry, al, sample_self);
if (!hists->has_callchains && he && he->callchain_size != 0)
hists->has_callchains = true;
+ if (he && symbol_conf.res_sample)
+ hists__res_sample(he, sample);
return he;
}
@@ -963,7 +1021,7 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
.map = al->map,
.sym = al->sym,
},
- .srcline = al->srcline ? strdup(al->srcline) : NULL,
+ .srcline = (char *) al->srcline,
.parent = iter->parent,
.raw_data = sample->raw_data,
.raw_size = sample->raw_size,
@@ -1053,8 +1111,10 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
iter->evsel, al, max_stack_depth);
- if (err)
+ if (err) {
+ map__put(alm);
return err;
+ }
err = iter->ops->prepare_entry(iter, al);
if (err)
@@ -1153,6 +1213,7 @@ void hist_entry__delete(struct hist_entry *he)
mem_info__zput(he->mem_info);
}
+ zfree(&he->res_samples);
zfree(&he->stat_acc);
free_srcline(he->srcline);
if (he->srcfile && he->srcfile[0])
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 4af27fbab24f..76ff6c6d03b8 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -31,6 +31,7 @@ enum hist_filter {
enum hist_column {
HISTC_SYMBOL,
+ HISTC_TIME,
HISTC_DSO,
HISTC_THREAD,
HISTC_COMM,
@@ -432,9 +433,18 @@ struct hist_browser_timer {
};
struct annotation_options;
+struct res_sample;
+
+enum rstype {
+ A_NORMAL,
+ A_ASM,
+ A_SOURCE
+};
#ifdef HAVE_SLANG_SUPPORT
#include "../ui/keysyms.h"
+void attr_to_script(char *buf, struct perf_event_attr *attr);
+
int map_symbol__tui_annotate(struct map_symbol *ms, struct perf_evsel *evsel,
struct hist_browser_timer *hbt,
struct annotation_options *annotation_opts);
@@ -449,7 +459,13 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
struct perf_env *env,
bool warn_lost_event,
struct annotation_options *annotation_options);
-int script_browse(const char *script_opt);
+
+int script_browse(const char *script_opt, struct perf_evsel *evsel);
+
+void run_script(char *cmd);
+int res_sample_browse(struct res_sample *res_samples, int num_res,
+ struct perf_evsel *evsel, enum rstype rstype);
+void res_sample_init(void);
#else
static inline
int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __maybe_unused,
@@ -478,11 +494,22 @@ static inline int hist_entry__tui_annotate(struct hist_entry *he __maybe_unused,
return 0;
}
-static inline int script_browse(const char *script_opt __maybe_unused)
+static inline int script_browse(const char *script_opt __maybe_unused,
+ struct perf_evsel *evsel __maybe_unused)
{
return 0;
}
+static inline int res_sample_browse(struct res_sample *res_samples __maybe_unused,
+ int num_res __maybe_unused,
+ struct perf_evsel *evsel __maybe_unused,
+ enum rstype rstype __maybe_unused)
+{
+ return 0;
+}
+
+static inline void res_sample_init(void) {}
+
#define K_LEFT -1000
#define K_RIGHT -2000
#define K_SWITCH_INPUT_DATA -3000
diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c
index 0c0180c67574..47025bc727e1 100644
--- a/tools/perf/util/intel-bts.c
+++ b/tools/perf/util/intel-bts.c
@@ -328,35 +328,19 @@ static int intel_bts_get_next_insn(struct intel_bts_queue *btsq, u64 ip)
{
struct machine *machine = btsq->bts->machine;
struct thread *thread;
- struct addr_location al;
unsigned char buf[INTEL_PT_INSN_BUF_SZ];
ssize_t len;
- int x86_64;
- uint8_t cpumode;
+ bool x86_64;
int err = -1;
- if (machine__kernel_ip(machine, ip))
- cpumode = PERF_RECORD_MISC_KERNEL;
- else
- cpumode = PERF_RECORD_MISC_USER;
-
thread = machine__find_thread(machine, -1, btsq->tid);
if (!thread)
return -1;
- if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso)
- goto out_put;
-
- len = dso__data_read_addr(al.map->dso, al.map, machine, ip, buf,
- INTEL_PT_INSN_BUF_SZ);
+ len = thread__memcpy(thread, machine, buf, ip, INTEL_PT_INSN_BUF_SZ, &x86_64);
if (len <= 0)
goto out_put;
- /* Load maps to ensure dso->is_64_bit has been updated */
- map__load(al.map);
-
- x86_64 = al.map->dso->is_64_bit;
-
if (intel_pt_get_insn(buf, len, x86_64, &btsq->intel_pt_insn))
goto out_put;
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 6e03db142091..872fab163585 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -251,19 +251,15 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
if (!(decoder->tsc_ctc_ratio_n % decoder->tsc_ctc_ratio_d))
decoder->tsc_ctc_mult = decoder->tsc_ctc_ratio_n /
decoder->tsc_ctc_ratio_d;
-
- /*
- * Allow for timestamps appearing to backwards because a TSC
- * packet has slipped past a MTC packet, so allow 2 MTC ticks
- * or ...
- */
- decoder->tsc_slip = multdiv(2 << decoder->mtc_shift,
- decoder->tsc_ctc_ratio_n,
- decoder->tsc_ctc_ratio_d);
}
- /* ... or 0x100 paranoia */
- if (decoder->tsc_slip < 0x100)
- decoder->tsc_slip = 0x100;
+
+ /*
+ * A TSC packet can slip past MTC packets so that the timestamp appears
+ * to go backwards. One estimate is that can be up to about 40 CPU
+ * cycles, which is certainly less than 0x1000 TSC ticks, but accept
+ * slippage an order of magnitude more to be on the safe side.
+ */
+ decoder->tsc_slip = 0x10000;
intel_pt_log("timestamp: mtc_shift %u\n", decoder->mtc_shift);
intel_pt_log("timestamp: tsc_ctc_ratio_n %u\n", decoder->tsc_ctc_ratio_n);
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 3b497bab4324..6d288237887b 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -2531,6 +2531,8 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
}
pt->timeless_decoding = intel_pt_timeless_decoding(pt);
+ if (pt->timeless_decoding && !pt->tc.time_mult)
+ pt->tc.time_mult = 1;
pt->have_tsc = intel_pt_have_tsc(pt);
pt->sampling_mode = false;
pt->est_tsc = !pt->timeless_decoding;
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 61959aba7e27..3c520baa198c 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1421,6 +1421,20 @@ static void machine__set_kernel_mmap(struct machine *machine,
machine->vmlinux_map->end = ~0ULL;
}
+static void machine__update_kernel_mmap(struct machine *machine,
+ u64 start, u64 end)
+{
+ struct map *map = machine__kernel_map(machine);
+
+ map__get(map);
+ map_groups__remove(&machine->kmaps, map);
+
+ machine__set_kernel_mmap(machine, start, end);
+
+ map_groups__insert(&machine->kmaps, map);
+ map__put(map);
+}
+
int machine__create_kernel_maps(struct machine *machine)
{
struct dso *kernel = machine__get_kernel(machine);
@@ -1453,17 +1467,11 @@ int machine__create_kernel_maps(struct machine *machine)
goto out_put;
}
- /* we have a real start address now, so re-order the kmaps */
- map = machine__kernel_map(machine);
-
- map__get(map);
- map_groups__remove(&machine->kmaps, map);
-
- /* assume it's the last in the kmaps */
- machine__set_kernel_mmap(machine, addr, ~0ULL);
-
- map_groups__insert(&machine->kmaps, map);
- map__put(map);
+ /*
+ * we have a real start address now, so re-order the kmaps
+ * assume it's the last in the kmaps
+ */
+ machine__update_kernel_mmap(machine, addr, ~0ULL);
}
if (machine__create_extra_kernel_maps(machine, kernel))
@@ -1599,7 +1607,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
if (strstr(kernel->long_name, "vmlinux"))
dso__set_short_name(kernel, "[kernel.vmlinux]", false);
- machine__set_kernel_mmap(machine, event->mmap.start,
+ machine__update_kernel_mmap(machine, event->mmap.start,
event->mmap.start + event->mmap.len);
/*
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index fbeb0c6efaa6..ee71efb9db62 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -261,6 +261,22 @@ bool __map__is_extra_kernel_map(const struct map *map)
return kmap && kmap->name[0];
}
+bool __map__is_bpf_prog(const struct map *map)
+{
+ const char *name;
+
+ if (map->dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
+ return true;
+
+ /*
+ * If PERF_RECORD_BPF_EVENT is not included, the dso will not have
+ * type of DSO_BINARY_TYPE__BPF_PROG_INFO. In such cases, we can
+ * guess the type based on name.
+ */
+ name = map->dso->short_name;
+ return name && (strstr(name, "bpf_prog_") == name);
+}
+
bool map__has_symbols(const struct map *map)
{
return dso__has_symbols(map->dso);
@@ -577,10 +593,25 @@ static void __maps__purge(struct maps *maps)
}
}
+static void __maps__purge_names(struct maps *maps)
+{
+ struct rb_root *root = &maps->names;
+ struct rb_node *next = rb_first(root);
+
+ while (next) {
+ struct map *pos = rb_entry(next, struct map, rb_node_name);
+
+ next = rb_next(&pos->rb_node_name);
+ rb_erase_init(&pos->rb_node_name, root);
+ map__put(pos);
+ }
+}
+
static void maps__exit(struct maps *maps)
{
down_write(&maps->lock);
__maps__purge(maps);
+ __maps__purge_names(maps);
up_write(&maps->lock);
}
@@ -895,10 +926,8 @@ static void __maps__insert_name(struct maps *maps, struct map *map)
rc = strcmp(m->dso->short_name, map->dso->short_name);
if (rc < 0)
p = &(*p)->rb_left;
- else if (rc > 0)
- p = &(*p)->rb_right;
else
- return;
+ p = &(*p)->rb_right;
}
rb_link_node(&map->rb_node_name, parent, p);
rb_insert_color(&map->rb_node_name, &maps->names);
@@ -917,6 +946,9 @@ static void __maps__remove(struct maps *maps, struct map *map)
{
rb_erase_init(&map->rb_node, &maps->entries);
map__put(map);
+
+ rb_erase_init(&map->rb_node_name, &maps->names);
+ map__put(map);
}
void maps__remove(struct maps *maps, struct map *map)
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 0e20749f2c55..dc93787c74f0 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -159,10 +159,12 @@ int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name,
bool __map__is_kernel(const struct map *map);
bool __map__is_extra_kernel_map(const struct map *map);
+bool __map__is_bpf_prog(const struct map *map);
static inline bool __map__is_kmodule(const struct map *map)
{
- return !__map__is_kernel(map) && !__map__is_extra_kernel_map(map);
+ return !__map__is_kernel(map) && !__map__is_extra_kernel_map(map) &&
+ !__map__is_bpf_prog(map);
}
bool map__has_symbols(const struct map *map);
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index cdc7740fc181..ef3d79b2c90b 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -440,6 +440,8 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int c
perf_mmap__setup_affinity_mask(map, mp);
+ map->flush = mp->flush;
+
if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
&mp->auxtrace_mp, map->base, fd))
return -1;
@@ -492,7 +494,7 @@ static int __perf_mmap__read_init(struct perf_mmap *md)
md->start = md->overwrite ? head : old;
md->end = md->overwrite ? old : head;
- if (md->start == md->end)
+ if ((md->end - md->start) < md->flush)
return -EAGAIN;
size = md->end - md->start;
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
index e566c19b242b..b82f8c2d55c4 100644
--- a/tools/perf/util/mmap.h
+++ b/tools/perf/util/mmap.h
@@ -39,6 +39,7 @@ struct perf_mmap {
} aio;
#endif
cpu_set_t affinity_mask;
+ u64 flush;
};
/*
@@ -70,7 +71,7 @@ enum bkw_mmap_state {
};
struct mmap_params {
- int prot, mask, nr_cblocks, affinity;
+ int prot, mask, nr_cblocks, affinity, flush;
struct auxtrace_mmap_params auxtrace_mp;
};
diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c
index ea523d3b248f..989fed6f43b5 100644
--- a/tools/perf/util/ordered-events.c
+++ b/tools/perf/util/ordered-events.c
@@ -270,6 +270,8 @@ static int __ordered_events__flush(struct ordered_events *oe, enum oe_flush how,
"FINAL",
"ROUND",
"HALF ",
+ "TOP ",
+ "TIME ",
};
int err;
bool show_progress = false;
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 4dcc01b2532c..4432bfe039fd 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -317,10 +317,12 @@ static struct perf_evsel *
__add_event(struct list_head *list, int *idx,
struct perf_event_attr *attr,
char *name, struct perf_pmu *pmu,
- struct list_head *config_terms, bool auto_merge_stats)
+ struct list_head *config_terms, bool auto_merge_stats,
+ const char *cpu_list)
{
struct perf_evsel *evsel;
- struct cpu_map *cpus = pmu ? pmu->cpus : NULL;
+ struct cpu_map *cpus = pmu ? pmu->cpus :
+ cpu_list ? cpu_map__new(cpu_list) : NULL;
event_attr_init(attr);
@@ -348,7 +350,25 @@ static int add_event(struct list_head *list, int *idx,
struct perf_event_attr *attr, char *name,
struct list_head *config_terms)
{
- return __add_event(list, idx, attr, name, NULL, config_terms, false) ? 0 : -ENOMEM;
+ return __add_event(list, idx, attr, name, NULL, config_terms, false, NULL) ? 0 : -ENOMEM;
+}
+
+static int add_event_tool(struct list_head *list, int *idx,
+ enum perf_tool_event tool_event)
+{
+ struct perf_evsel *evsel;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_DUMMY,
+ };
+
+ evsel = __add_event(list, idx, &attr, NULL, NULL, NULL, false, "0");
+ if (!evsel)
+ return -ENOMEM;
+ evsel->tool_event = tool_event;
+ if (tool_event == PERF_TOOL_DURATION_TIME)
+ evsel->unit = strdup("ns");
+ return 0;
}
static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES], int size)
@@ -1233,6 +1253,13 @@ int parse_events_add_numeric(struct parse_events_state *parse_state,
get_config_name(head_config), &config_terms);
}
+int parse_events_add_tool(struct parse_events_state *parse_state,
+ struct list_head *list,
+ enum perf_tool_event tool_event)
+{
+ return add_event_tool(list, &parse_state->idx, tool_event);
+}
+
int parse_events_add_pmu(struct parse_events_state *parse_state,
struct list_head *list, char *name,
struct list_head *head_config,
@@ -1267,7 +1294,8 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
if (!head_config) {
attr.type = pmu->type;
- evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu, NULL, auto_merge_stats);
+ evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu, NULL,
+ auto_merge_stats, NULL);
if (evsel) {
evsel->pmu_name = name;
evsel->use_uncore_alias = use_uncore_alias;
@@ -1295,7 +1323,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
evsel = __add_event(list, &parse_state->idx, &attr,
get_config_name(head_config), pmu,
- &config_terms, auto_merge_stats);
+ &config_terms, auto_merge_stats, NULL);
if (evsel) {
evsel->unit = info.unit;
evsel->scale = info.scale;
@@ -2271,6 +2299,7 @@ static bool is_event_supported(u8 type, unsigned config)
perf_evsel__delete(evsel);
}
+ thread_map__put(tmap);
return ret;
}
@@ -2341,6 +2370,7 @@ void print_sdt_events(const char *subsys_glob, const char *event_glob,
printf(" %-50s [%s]\n", buf, "SDT event");
free(buf);
}
+ free(path);
} else
printf(" %-50s [%s]\n", nd->s, "SDT event");
if (nd2) {
@@ -2427,6 +2457,25 @@ out_enomem:
return evt_num;
}
+static void print_tool_event(const char *name, const char *event_glob,
+ bool name_only)
+{
+ if (event_glob && !strglobmatch(name, event_glob))
+ return;
+ if (name_only)
+ printf("%s ", name);
+ else
+ printf(" %-50s [%s]\n", name, "Tool event");
+
+}
+
+void print_tool_events(const char *event_glob, bool name_only)
+{
+ print_tool_event("duration_time", event_glob, name_only);
+ if (pager_in_use())
+ printf("\n");
+}
+
void print_symbol_events(const char *event_glob, unsigned type,
struct event_symbol *syms, unsigned max,
bool name_only)
@@ -2510,6 +2559,7 @@ void print_events(const char *event_glob, bool name_only, bool quiet_flag,
print_symbol_events(event_glob, PERF_TYPE_SOFTWARE,
event_symbols_sw, PERF_COUNT_SW_MAX, name_only);
+ print_tool_events(event_glob, name_only);
print_hwcache_events(event_glob, name_only);
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 5ed035cbcbb7..a052cd6ac63e 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -160,6 +160,10 @@ int parse_events_add_numeric(struct parse_events_state *parse_state,
struct list_head *list,
u32 type, u64 config,
struct list_head *head_config);
+enum perf_tool_event;
+int parse_events_add_tool(struct parse_events_state *parse_state,
+ struct list_head *list,
+ enum perf_tool_event tool_event);
int parse_events_add_cache(struct list_head *list, int *idx,
char *type, char *op_result1, char *op_result2,
struct parse_events_error *error,
@@ -200,6 +204,7 @@ extern struct event_symbol event_symbols_sw[];
void print_symbol_events(const char *event_glob, unsigned type,
struct event_symbol *syms, unsigned max,
bool name_only);
+void print_tool_events(const char *event_glob, bool name_only);
void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
bool name_only);
int print_hwcache_events(const char *event_glob, bool name_only);
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 7805c71aaae2..c54bfe88626c 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -15,6 +15,7 @@
#include "../perf.h"
#include "parse-events.h"
#include "parse-events-bison.h"
+#include "evsel.h"
char *parse_events_get_text(yyscan_t yyscanner);
YYSTYPE *parse_events_get_lval(yyscan_t yyscanner);
@@ -154,6 +155,14 @@ static int sym(yyscan_t scanner, int type, int config)
return type == PERF_TYPE_HARDWARE ? PE_VALUE_SYM_HW : PE_VALUE_SYM_SW;
}
+static int tool(yyscan_t scanner, enum perf_tool_event event)
+{
+ YYSTYPE *yylval = parse_events_get_lval(scanner);
+
+ yylval->num = event;
+ return PE_VALUE_SYM_TOOL;
+}
+
static int term(yyscan_t scanner, int type)
{
YYSTYPE *yylval = parse_events_get_lval(scanner);
@@ -322,7 +331,7 @@ cpu-migrations|migrations { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COU
alignment-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_ALIGNMENT_FAULTS); }
emulation-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS); }
dummy { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_DUMMY); }
-duration_time { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_DUMMY); }
+duration_time { return tool(yyscanner, PERF_TOOL_DURATION_TIME); }
bpf-output { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_BPF_OUTPUT); }
/*
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index 44819bdb037d..6ad8d4914969 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include "util.h"
#include "pmu.h"
+#include "evsel.h"
#include "debug.h"
#include "parse-events.h"
#include "parse-events-bison.h"
@@ -45,6 +46,7 @@ static void inc_group_count(struct list_head *list,
%token PE_START_EVENTS PE_START_TERMS
%token PE_VALUE PE_VALUE_SYM_HW PE_VALUE_SYM_SW PE_RAW PE_TERM
+%token PE_VALUE_SYM_TOOL
%token PE_EVENT_NAME
%token PE_NAME
%token PE_BPF_OBJECT PE_BPF_SOURCE
@@ -58,6 +60,7 @@ static void inc_group_count(struct list_head *list,
%type <num> PE_VALUE
%type <num> PE_VALUE_SYM_HW
%type <num> PE_VALUE_SYM_SW
+%type <num> PE_VALUE_SYM_TOOL
%type <num> PE_RAW
%type <num> PE_TERM
%type <str> PE_NAME
@@ -321,6 +324,15 @@ value_sym sep_slash_slash_dc
ABORT_ON(parse_events_add_numeric(_parse_state, list, type, config, NULL));
$$ = list;
}
+|
+PE_VALUE_SYM_TOOL sep_slash_slash_dc
+{
+ struct list_head *list;
+
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_tool(_parse_state, list, $1));
+ $$ = list;
+}
event_legacy_cache:
PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT opt_event_config
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 51d437f55d18..e0429f4ef335 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -732,10 +732,20 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
if (!is_arm_pmu_core(name)) {
pname = pe->pmu ? pe->pmu : "cpu";
+
+ /*
+ * uncore alias may be from different PMU
+ * with common prefix
+ */
+ if (pmu_is_uncore(name) &&
+ !strncmp(pname, name, strlen(pname)))
+ goto new_alias;
+
if (strcmp(pname, name))
continue;
}
+new_alias:
/* need type casts to override 'const' */
__perf_pmu__new_alias(head, NULL, (char *)pe->name,
(char *)pe->desc, (char *)pe->event,
@@ -752,6 +762,19 @@ perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
return NULL;
}
+static int pmu_max_precise(const char *name)
+{
+ char path[PATH_MAX];
+ int max_precise = -1;
+
+ scnprintf(path, PATH_MAX,
+ "bus/event_source/devices/%s/caps/max_precise",
+ name);
+
+ sysfs__read_int(path, &max_precise);
+ return max_precise;
+}
+
static struct perf_pmu *pmu_lookup(const char *name)
{
struct perf_pmu *pmu;
@@ -784,6 +807,7 @@ static struct perf_pmu *pmu_lookup(const char *name)
pmu->name = strdup(name);
pmu->type = type;
pmu->is_uncore = pmu_is_uncore(name);
+ pmu->max_precise = pmu_max_precise(name);
pmu_add_cpu_aliases(&aliases, pmu);
INIT_LIST_HEAD(&pmu->format);
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 47253c3daf55..bd9ec2704a57 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -26,6 +26,7 @@ struct perf_pmu {
__u32 type;
bool selectable;
bool is_uncore;
+ int max_precise;
struct perf_event_attr *default_config;
struct cpu_map *cpus;
struct list_head format; /* HEAD struct perf_pmu_format -> list */
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 0030f9b9bf7e..198e09ff611e 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -160,8 +160,10 @@ static struct map *kernel_get_module_map(const char *module)
if (module && strchr(module, '/'))
return dso__new_map(module);
- if (!module)
- module = "kernel";
+ if (!module) {
+ pos = machine__kernel_map(host_machine);
+ return map__get(pos);
+ }
for (pos = maps__first(maps); pos; pos = map__next(pos)) {
/* short_name is "[module]" */
@@ -472,9 +474,12 @@ static struct debuginfo *open_debuginfo(const char *module, struct nsinfo *nsi,
strcpy(reason, "(unknown)");
} else
dso__strerror_load(dso, reason, STRERR_BUFSIZE);
- if (!silent)
- pr_err("Failed to find the path for %s: %s\n",
- module ?: "kernel", reason);
+ if (!silent) {
+ if (module)
+ pr_err("Module %s is not loaded, please specify its full path name.\n", module);
+ else
+ pr_err("Failed to find the path for the kernel: %s\n", reason);
+ }
return NULL;
}
path = dso->long_name;
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index dda0ac978b1e..6aa7e2352e16 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -342,7 +342,7 @@ static bool is_tracepoint(struct pyrf_event *pevent)
static PyObject*
tracepoint_field(struct pyrf_event *pe, struct tep_format_field *field)
{
- struct tep_handle *pevent = field->event->pevent;
+ struct tep_handle *pevent = field->event->tep;
void *data = pe->sample.raw_data;
PyObject *ret = NULL;
unsigned long long val;
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index 5f06378a482b..61aa7f3df915 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -372,7 +372,7 @@ static void perl_process_tracepoint(struct perf_sample *sample,
ns = nsecs - s * NSEC_PER_SEC;
scripting_context->event_data = data;
- scripting_context->pevent = evsel->tp_format->pevent;
+ scripting_context->pevent = evsel->tp_format->tep;
ENTER;
SAVETMPS;
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 0e17db41b49b..22f52b669871 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -837,7 +837,7 @@ static void python_process_tracepoint(struct perf_sample *sample,
ns = nsecs - s * NSEC_PER_SEC;
scripting_context->event_data = data;
- scripting_context->pevent = evsel->tp_format->pevent;
+ scripting_context->pevent = evsel->tp_format->tep;
context = _PyCapsule_New(scripting_context, NULL, NULL);
@@ -1173,7 +1173,7 @@ static int python_export_call_return(struct db_export *dbe,
u64 comm_db_id = cr->comm ? cr->comm->db_id : 0;
PyObject *t;
- t = tuple_new(11);
+ t = tuple_new(12);
tuple_set_u64(t, 0, cr->db_id);
tuple_set_u64(t, 1, cr->thread->db_id);
@@ -1186,6 +1186,7 @@ static int python_export_call_return(struct db_export *dbe,
tuple_set_u64(t, 8, cr->return_ref);
tuple_set_u64(t, 9, cr->cp->parent->db_id);
tuple_set_s32(t, 10, cr->flags);
+ tuple_set_u64(t, 11, cr->parent_db_id);
call_object(tables->call_return_handler, t, "call_return_table");
@@ -1194,11 +1195,12 @@ static int python_export_call_return(struct db_export *dbe,
return 0;
}
-static int python_process_call_return(struct call_return *cr, void *data)
+static int python_process_call_return(struct call_return *cr, u64 *parent_db_id,
+ void *data)
{
struct db_export *dbe = data;
- return db_export__call_return(dbe, cr);
+ return db_export__call_return(dbe, cr, parent_db_id);
}
static void python_process_general_event(struct perf_sample *sample,
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index c764bbc91009..bad5f87ae001 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -132,6 +132,7 @@ struct perf_session *perf_session__new(struct perf_data *data,
ordered_events__init(&session->ordered_events,
ordered_events__deliver_event, NULL);
+ perf_env__init(&session->header.env);
if (data) {
if (perf_data__open(data))
goto out_delete;
@@ -140,7 +141,7 @@ struct perf_session *perf_session__new(struct perf_data *data,
if (perf_data__is_read(data)) {
if (perf_session__open(session) < 0)
- goto out_close;
+ goto out_delete;
/*
* set session attributes that are present in perf.data
@@ -152,6 +153,10 @@ struct perf_session *perf_session__new(struct perf_data *data,
}
perf_evlist__init_trace_event_sample_raw(session->evlist);
+
+ /* Open the directory data. */
+ if (data->is_dir && perf_data__open_dir(data))
+ goto out_delete;
}
} else {
session->machines.host.env = &perf_env;
@@ -181,8 +186,6 @@ struct perf_session *perf_session__new(struct perf_data *data,
return session;
- out_close:
- perf_data__close(data);
out_delete:
perf_session__delete(session);
out:
@@ -1845,10 +1848,17 @@ fetch_mmaped_event(struct perf_session *session,
#define NUM_MMAPS 128
#endif
+struct reader;
+
+typedef s64 (*reader_cb_t)(struct perf_session *session,
+ union perf_event *event,
+ u64 file_offset);
+
struct reader {
- int fd;
- u64 data_size;
- u64 data_offset;
+ int fd;
+ u64 data_size;
+ u64 data_offset;
+ reader_cb_t process;
};
static int
@@ -1918,12 +1928,14 @@ more:
size = event->header.size;
+ skip = -EINVAL;
+
if (size < sizeof(struct perf_event_header) ||
- (skip = perf_session__process_event(session, event, file_pos)) < 0) {
- pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
+ (skip = rd->process(session, event, file_pos)) < 0) {
+ pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
file_offset + head, event->header.size,
- event->header.type);
- err = -EINVAL;
+ event->header.type, strerror(-skip));
+ err = skip;
goto out;
}
@@ -1945,12 +1957,20 @@ out:
return err;
}
+static s64 process_simple(struct perf_session *session,
+ union perf_event *event,
+ u64 file_offset)
+{
+ return perf_session__process_event(session, event, file_offset);
+}
+
static int __perf_session__process_events(struct perf_session *session)
{
struct reader rd = {
.fd = perf_data__fd(session->data),
.data_size = session->header.data_size,
.data_offset = session->header.data_offset,
+ .process = process_simple,
};
struct ordered_events *oe = &session->ordered_events;
struct perf_tool *tool = session->tool;
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index d2299e912e59..5d2518e89fc4 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -3,6 +3,7 @@
#include <inttypes.h>
#include <regex.h>
#include <linux/mman.h>
+#include <linux/time64.h>
#include "sort.h"
#include "hist.h"
#include "comm.h"
@@ -12,9 +13,11 @@
#include "evsel.h"
#include "evlist.h"
#include "strlist.h"
+#include "strbuf.h"
#include <traceevent/event-parse.h>
#include "mem-events.h"
#include "annotate.h"
+#include "time-utils.h"
#include <linux/kernel.h>
regex_t parent_regex;
@@ -654,6 +657,42 @@ struct sort_entry sort_socket = {
.se_width_idx = HISTC_SOCKET,
};
+/* --sort time */
+
+static int64_t
+sort__time_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return right->time - left->time;
+}
+
+static int hist_entry__time_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ unsigned long secs;
+ unsigned long long nsecs;
+ char he_time[32];
+
+ nsecs = he->time;
+ secs = nsecs / NSEC_PER_SEC;
+ nsecs -= secs * NSEC_PER_SEC;
+
+ if (symbol_conf.nanosecs)
+ snprintf(he_time, sizeof he_time, "%5lu.%09llu: ",
+ secs, nsecs);
+ else
+ timestamp__scnprintf_usec(he->time, he_time,
+ sizeof(he_time));
+
+ return repsep_snprintf(bf, size, "%-.*s", width, he_time);
+}
+
+struct sort_entry sort_time = {
+ .se_header = "Time",
+ .se_cmp = sort__time_cmp,
+ .se_snprintf = hist_entry__time_snprintf,
+ .se_width_idx = HISTC_TIME,
+};
+
/* --sort trace */
static char *get_trace_output(struct hist_entry *he)
@@ -1634,6 +1673,7 @@ static struct sort_dimension common_sort_dimensions[] = {
DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
+ DIM(SORT_TIME, "time", sort_time),
};
#undef DIM
@@ -3068,3 +3108,54 @@ void reset_output_field(void)
reset_dimensions();
perf_hpp__reset_output_field(&perf_hpp_list);
}
+
+#define INDENT (3*8 + 1)
+
+static void add_key(struct strbuf *sb, const char *str, int *llen)
+{
+ if (*llen >= 75) {
+ strbuf_addstr(sb, "\n\t\t\t ");
+ *llen = INDENT;
+ }
+ strbuf_addf(sb, " %s", str);
+ *llen += strlen(str) + 1;
+}
+
+static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n,
+ int *llen)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ add_key(sb, s[i].name, llen);
+}
+
+static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n,
+ int *llen)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ add_key(sb, s[i].name, llen);
+}
+
+const char *sort_help(const char *prefix)
+{
+ struct strbuf sb;
+ char *s;
+ int len = strlen(prefix) + INDENT;
+
+ strbuf_init(&sb, 300);
+ strbuf_addstr(&sb, prefix);
+ add_hpp_sort_string(&sb, hpp_sort_dimensions,
+ ARRAY_SIZE(hpp_sort_dimensions), &len);
+ add_sort_string(&sb, common_sort_dimensions,
+ ARRAY_SIZE(common_sort_dimensions), &len);
+ add_sort_string(&sb, bstack_sort_dimensions,
+ ARRAY_SIZE(bstack_sort_dimensions), &len);
+ add_sort_string(&sb, memory_sort_dimensions,
+ ARRAY_SIZE(memory_sort_dimensions), &len);
+ s = strbuf_detach(&sb, NULL);
+ strbuf_release(&sb);
+ return s;
+}
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 2fbee0b1011c..ce376a73f964 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -47,6 +47,12 @@ extern struct sort_entry sort_srcline;
extern enum sort_type sort__first_dimension;
extern const char default_mem_sort_order[];
+struct res_sample {
+ u64 time;
+ int cpu;
+ int tid;
+};
+
struct he_stat {
u64 period;
u64 period_sys;
@@ -135,10 +141,13 @@ struct hist_entry {
char *srcfile;
struct symbol *parent;
struct branch_info *branch_info;
+ long time;
struct hists *hists;
struct mem_info *mem_info;
void *raw_data;
u32 raw_size;
+ int num_res;
+ struct res_sample *res_samples;
void *trace_output;
struct perf_hpp_list *hpp_list;
struct hist_entry *parent_he;
@@ -231,6 +240,7 @@ enum sort_type {
SORT_DSO_SIZE,
SORT_CGROUP_ID,
SORT_SYM_IPC_NULL,
+ SORT_TIME,
/* branch stack specific sort keys */
__SORT_BRANCH_STACK,
@@ -286,6 +296,8 @@ void reset_output_field(void);
void sort__setup_elide(FILE *fp);
void perf_hpp__set_elide(int idx, bool elide);
+const char *sort_help(const char *prefix);
+
int report_parse_ignore_callees_opt(const struct option *opt, const char *arg, int unset);
bool is_strict_order(const char *order);
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index 6d043c78f3c2..3324f23c7efc 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -18,11 +18,6 @@
#define CNTR_NOT_SUPPORTED "<not supported>"
#define CNTR_NOT_COUNTED "<not counted>"
-static bool is_duration_time(struct perf_evsel *evsel)
-{
- return !strcmp(evsel->name, "duration_time");
-}
-
static void print_running(struct perf_stat_config *config,
u64 run, u64 ena)
{
@@ -628,9 +623,6 @@ static void print_aggr(struct perf_stat_config *config,
ad.id = id = config->aggr_map->map[s];
first = true;
evlist__for_each_entry(evlist, counter) {
- if (is_duration_time(counter))
- continue;
-
ad.val = ad.ena = ad.run = 0;
ad.nr = 0;
if (!collect_data(config, counter, aggr_cb, &ad))
@@ -848,8 +840,6 @@ static void print_no_aggr_metric(struct perf_stat_config *config,
if (prefix)
fputs(prefix, config->output);
evlist__for_each_entry(evlist, counter) {
- if (is_duration_time(counter))
- continue;
if (first) {
aggr_printout(config, counter, cpu, 0);
first = false;
@@ -906,8 +896,6 @@ static void print_metric_headers(struct perf_stat_config *config,
/* Print metrics headers only */
evlist__for_each_entry(evlist, counter) {
- if (is_duration_time(counter))
- continue;
os.evsel = counter;
out.ctx = &os;
out.print_metric = print_metric_header;
@@ -1136,15 +1124,11 @@ perf_evlist__print_counters(struct perf_evlist *evlist,
break;
case AGGR_THREAD:
evlist__for_each_entry(evlist, counter) {
- if (is_duration_time(counter))
- continue;
print_aggr_thread(config, _target, counter, prefix);
}
break;
case AGGR_GLOBAL:
evlist__for_each_entry(evlist, counter) {
- if (is_duration_time(counter))
- continue;
print_counter_aggr(config, counter, prefix);
}
if (metric_only)
@@ -1155,8 +1139,6 @@ perf_evlist__print_counters(struct perf_evlist *evlist,
print_no_aggr_metric(config, evlist, prefix);
else {
evlist__for_each_entry(evlist, counter) {
- if (is_duration_time(counter))
- continue;
print_counter(config, counter, prefix);
}
}
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 4d40515307b8..2856cc9d5a31 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -291,10 +291,8 @@ process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel
break;
case AGGR_GLOBAL:
aggr->val += count->val;
- if (config->scale) {
- aggr->ena += count->ena;
- aggr->run += count->run;
- }
+ aggr->ena += count->ena;
+ aggr->run += count->run;
case AGGR_UNSET:
default:
break;
@@ -442,10 +440,8 @@ int create_perf_stat_counter(struct perf_evsel *evsel,
struct perf_event_attr *attr = &evsel->attr;
struct perf_evsel *leader = evsel->leader;
- if (config->scale) {
- attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
- PERF_FORMAT_TOTAL_TIME_RUNNING;
- }
+ attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
+ PERF_FORMAT_TOTAL_TIME_RUNNING;
/*
* The event is part of non trivial group, let's enable
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 758bf5f74e6e..5cbad55cd99d 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -6,6 +6,7 @@
#include <string.h>
#include <linux/kernel.h>
#include <linux/mman.h>
+#include <linux/time64.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/param.h>
@@ -39,15 +40,18 @@ int vmlinux_path__nr_entries;
char **vmlinux_path;
struct symbol_conf symbol_conf = {
+ .nanosecs = false,
.use_modules = true,
.try_vmlinux_path = true,
.demangle = true,
.demangle_kernel = false,
.cumulate_callchain = true,
+ .time_quantum = 100 * NSEC_PER_MSEC, /* 100ms */
.show_hist_headers = true,
.symfs = "",
.event_group = true,
.inline_name = true,
+ .res_sample = 0,
};
static enum dso_binary_type binary_type_symtab[] = {
@@ -1451,6 +1455,7 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
return true;
+ case DSO_BINARY_TYPE__BPF_PROG_INFO:
case DSO_BINARY_TYPE__NOT_FOUND:
default:
return false;
diff --git a/tools/perf/util/symbol_conf.h b/tools/perf/util/symbol_conf.h
index fffea68c1203..6c55fa6fccec 100644
--- a/tools/perf/util/symbol_conf.h
+++ b/tools/perf/util/symbol_conf.h
@@ -8,6 +8,7 @@ struct strlist;
struct intlist;
struct symbol_conf {
+ bool nanosecs;
unsigned short priv_size;
bool try_vmlinux_path,
init_annotation,
@@ -55,6 +56,7 @@ struct symbol_conf {
*sym_list_str,
*col_width_list_str,
*bt_stop_list_str;
+ unsigned long time_quantum;
struct strlist *dso_list,
*comm_list,
*sym_list,
@@ -66,6 +68,7 @@ struct symbol_conf {
struct intlist *pid_list,
*tid_list;
const char *symfs;
+ int res_sample;
};
extern struct symbol_conf symbol_conf;
diff --git a/tools/perf/util/thread-stack.c b/tools/perf/util/thread-stack.c
index a8b45168513c..41942c2aaa18 100644
--- a/tools/perf/util/thread-stack.c
+++ b/tools/perf/util/thread-stack.c
@@ -49,6 +49,7 @@ enum retpoline_state_t {
* @timestamp: timestamp (if known)
* @ref: external reference (e.g. db_id of sample)
* @branch_count: the branch count when the entry was created
+ * @db_id: id used for db-export
* @cp: call path
* @no_call: a 'call' was not seen
* @trace_end: a 'call' but trace ended
@@ -59,6 +60,7 @@ struct thread_stack_entry {
u64 timestamp;
u64 ref;
u64 branch_count;
+ u64 db_id;
struct call_path *cp;
bool no_call;
bool trace_end;
@@ -280,12 +282,14 @@ static int thread_stack__call_return(struct thread *thread,
.comm = ts->comm,
.db_id = 0,
};
+ u64 *parent_db_id;
tse = &ts->stack[idx];
cr.cp = tse->cp;
cr.call_time = tse->timestamp;
cr.return_time = timestamp;
cr.branch_count = ts->branch_count - tse->branch_count;
+ cr.db_id = tse->db_id;
cr.call_ref = tse->ref;
cr.return_ref = ref;
if (tse->no_call)
@@ -295,7 +299,14 @@ static int thread_stack__call_return(struct thread *thread,
if (tse->non_call)
cr.flags |= CALL_RETURN_NON_CALL;
- return crp->process(&cr, crp->data);
+ /*
+ * The parent db_id must be assigned before exporting the child. Note
+ * it is not possible to export the parent first because its information
+ * is not yet complete because its 'return' has not yet been processed.
+ */
+ parent_db_id = idx ? &(tse - 1)->db_id : NULL;
+
+ return crp->process(&cr, parent_db_id, crp->data);
}
static int __thread_stack__flush(struct thread *thread, struct thread_stack *ts)
@@ -484,7 +495,7 @@ void thread_stack__sample(struct thread *thread, int cpu,
}
struct call_return_processor *
-call_return_processor__new(int (*process)(struct call_return *cr, void *data),
+call_return_processor__new(int (*process)(struct call_return *cr, u64 *parent_db_id, void *data),
void *data)
{
struct call_return_processor *crp;
@@ -537,6 +548,7 @@ static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr,
tse->no_call = no_call;
tse->trace_end = trace_end;
tse->non_call = false;
+ tse->db_id = 0;
return 0;
}
diff --git a/tools/perf/util/thread-stack.h b/tools/perf/util/thread-stack.h
index b7c04e19ad41..9c45f947f5a9 100644
--- a/tools/perf/util/thread-stack.h
+++ b/tools/perf/util/thread-stack.h
@@ -55,6 +55,7 @@ enum {
* @call_ref: external reference to 'call' sample (e.g. db_id)
* @return_ref: external reference to 'return' sample (e.g. db_id)
* @db_id: id used for db-export
+ * @parent_db_id: id of parent call used for db-export
* @flags: Call/Return flags
*/
struct call_return {
@@ -67,6 +68,7 @@ struct call_return {
u64 call_ref;
u64 return_ref;
u64 db_id;
+ u64 parent_db_id;
u32 flags;
};
@@ -79,7 +81,7 @@ struct call_return {
*/
struct call_return_processor {
struct call_path_root *cpr;
- int (*process)(struct call_return *cr, void *data);
+ int (*process)(struct call_return *cr, u64 *parent_db_id, void *data);
void *data;
};
@@ -93,7 +95,7 @@ void thread_stack__free(struct thread *thread);
size_t thread_stack__depth(struct thread *thread, int cpu);
struct call_return_processor *
-call_return_processor__new(int (*process)(struct call_return *cr, void *data),
+call_return_processor__new(int (*process)(struct call_return *cr, u64 *parent_db_id, void *data),
void *data);
void call_return_processor__free(struct call_return_processor *crp);
int thread_stack__process(struct thread *thread, struct comm *comm,
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 4c179fef442d..50678d318185 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -12,6 +12,7 @@
#include "debug.h"
#include "namespaces.h"
#include "comm.h"
+#include "map.h"
#include "symbol.h"
#include "unwind.h"
@@ -393,3 +394,25 @@ struct thread *thread__main_thread(struct machine *machine, struct thread *threa
return machine__find_thread(machine, thread->pid_, thread->pid_);
}
+
+int thread__memcpy(struct thread *thread, struct machine *machine,
+ void *buf, u64 ip, int len, bool *is64bit)
+{
+ u8 cpumode = PERF_RECORD_MISC_USER;
+ struct addr_location al;
+ long offset;
+
+ if (machine__kernel_ip(machine, ip))
+ cpumode = PERF_RECORD_MISC_KERNEL;
+
+ if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso ||
+ al.map->dso->data.status == DSO_DATA_STATUS_ERROR ||
+ map__load(al.map) < 0)
+ return -1;
+
+ offset = al.map->map_ip(al.map, ip);
+ if (is64bit)
+ *is64bit = al.map->dso->is_64_bit;
+
+ return dso__data_read_offset(al.map->dso, machine, offset, buf, len);
+}
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 8276ffeec556..cf8375c017a0 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -113,6 +113,9 @@ struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
struct addr_location *al);
+int thread__memcpy(struct thread *thread, struct machine *machine,
+ void *buf, u64 ip, int len, bool *is64bit);
+
static inline void *thread__priv(struct thread *thread)
{
return thread->priv;
diff --git a/tools/perf/util/time-utils.c b/tools/perf/util/time-utils.c
index 6193b46050a5..20663a460df3 100644
--- a/tools/perf/util/time-utils.c
+++ b/tools/perf/util/time-utils.c
@@ -11,6 +11,8 @@
#include "perf.h"
#include "debug.h"
#include "time-utils.h"
+#include "session.h"
+#include "evlist.h"
int parse_nsec_time(const char *str, u64 *ptime)
{
@@ -374,7 +376,7 @@ bool perf_time__ranges_skip_sample(struct perf_time_interval *ptime_buf,
struct perf_time_interval *ptime;
int i;
- if ((timestamp == 0) || (num == 0))
+ if ((!ptime_buf) || (timestamp == 0) || (num == 0))
return false;
if (num == 1)
@@ -396,6 +398,53 @@ bool perf_time__ranges_skip_sample(struct perf_time_interval *ptime_buf,
return (i == num) ? true : false;
}
+int perf_time__parse_for_ranges(const char *time_str,
+ struct perf_session *session,
+ struct perf_time_interval **ranges,
+ int *range_size, int *range_num)
+{
+ struct perf_time_interval *ptime_range;
+ int size, num, ret;
+
+ ptime_range = perf_time__range_alloc(time_str, &size);
+ if (!ptime_range)
+ return -ENOMEM;
+
+ if (perf_time__parse_str(ptime_range, time_str) != 0) {
+ if (session->evlist->first_sample_time == 0 &&
+ session->evlist->last_sample_time == 0) {
+ pr_err("HINT: no first/last sample time found in perf data.\n"
+ "Please use latest perf binary to execute 'perf record'\n"
+ "(if '--buildid-all' is enabled, please set '--timestamp-boundary').\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ num = perf_time__percent_parse_str(
+ ptime_range, size,
+ time_str,
+ session->evlist->first_sample_time,
+ session->evlist->last_sample_time);
+
+ if (num < 0) {
+ pr_err("Invalid time string\n");
+ ret = -EINVAL;
+ goto error;
+ }
+ } else {
+ num = 1;
+ }
+
+ *range_size = size;
+ *range_num = num;
+ *ranges = ptime_range;
+ return 0;
+
+error:
+ free(ptime_range);
+ return ret;
+}
+
int timestamp__scnprintf_usec(u64 timestamp, char *buf, size_t sz)
{
u64 sec = timestamp / NSEC_PER_SEC;
@@ -404,6 +453,14 @@ int timestamp__scnprintf_usec(u64 timestamp, char *buf, size_t sz)
return scnprintf(buf, sz, "%"PRIu64".%06"PRIu64, sec, usec);
}
+int timestamp__scnprintf_nsec(u64 timestamp, char *buf, size_t sz)
+{
+ u64 sec = timestamp / NSEC_PER_SEC,
+ nsec = timestamp % NSEC_PER_SEC;
+
+ return scnprintf(buf, sz, "%" PRIu64 ".%09" PRIu64, sec, nsec);
+}
+
int fetch_current_timestamp(char *buf, size_t sz)
{
struct timeval tv;
diff --git a/tools/perf/util/time-utils.h b/tools/perf/util/time-utils.h
index 70b177d2b98c..72a42ea1d513 100644
--- a/tools/perf/util/time-utils.h
+++ b/tools/perf/util/time-utils.h
@@ -23,7 +23,14 @@ bool perf_time__skip_sample(struct perf_time_interval *ptime, u64 timestamp);
bool perf_time__ranges_skip_sample(struct perf_time_interval *ptime_buf,
int num, u64 timestamp);
+struct perf_session;
+
+int perf_time__parse_for_ranges(const char *str, struct perf_session *session,
+ struct perf_time_interval **ranges,
+ int *range_size, int *range_num);
+
int timestamp__scnprintf_usec(u64 timestamp, char *buf, size_t sz);
+int timestamp__scnprintf_nsec(u64 timestamp, char *buf, size_t sz);
int fetch_current_timestamp(char *buf, size_t sz);
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index ad74be1f0e42..863955e4094e 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -111,7 +111,7 @@ raw_field_value(struct tep_event *event, const char *name, void *data)
unsigned long long read_size(struct tep_event *event, void *ptr, int size)
{
- return tep_read_number(event->pevent, ptr, size);
+ return tep_read_number(event->tep, ptr, size);
}
void event_format__fprintf(struct tep_event *event,
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
index efe2f58cff4e..48d53d8e3e16 100644
--- a/tools/perf/util/trace-event-read.c
+++ b/tools/perf/util/trace-event-read.c
@@ -442,7 +442,7 @@ ssize_t trace_report(int fd, struct trace_event *tevent, bool __repipe)
tep_set_flag(pevent, TEP_NSEC_OUTPUT);
tep_set_file_bigendian(pevent, file_bigendian);
- tep_set_host_bigendian(pevent, host_bigendian);
+ tep_set_local_bigendian(pevent, host_bigendian);
if (do_read(buf, 1) < 0)
goto out;
diff --git a/tools/perf/util/trace-event.c b/tools/perf/util/trace-event.c
index cbe0dd758e3a..01b9d89bf5bf 100644
--- a/tools/perf/util/trace-event.c
+++ b/tools/perf/util/trace-event.c
@@ -40,7 +40,7 @@ int trace_event__init(struct trace_event *t)
static int trace_event__init2(void)
{
- int be = tep_host_bigendian();
+ int be = tep_is_bigendian();
struct tep_handle *pevent;
if (trace_event__init(&tevent))
@@ -49,7 +49,7 @@ static int trace_event__init2(void)
pevent = tevent.pevent;
tep_set_flag(pevent, TEP_NSEC_OUTPUT);
tep_set_file_bigendian(pevent, be);
- tep_set_host_bigendian(pevent, be);
+ tep_set_local_bigendian(pevent, be);
tevent_initialized = true;
return 0;
}
diff --git a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
index 2a1fd9182f94..d1f3d44e315e 100644
--- a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
+++ b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
@@ -19,7 +19,7 @@ ACPI_MODULE_NAME("oslinuxtbl")
typedef struct osl_table_info {
struct osl_table_info *next;
u32 instance;
- char signature[ACPI_NAME_SIZE];
+ char signature[ACPI_NAMESEG_SIZE];
} osl_table_info;
@@ -286,14 +286,14 @@ static acpi_status osl_add_table_to_list(char *signature, u32 instance)
return (AE_NO_MEMORY);
}
- ACPI_MOVE_NAME(new_info->signature, signature);
+ ACPI_COPY_NAMESEG(new_info->signature, signature);
if (!gbl_table_list_head) {
gbl_table_list_head = new_info;
} else {
next = gbl_table_list_head;
while (1) {
- if (ACPI_COMPARE_NAME(next->signature, signature)) {
+ if (ACPI_COMPARE_NAMESEG(next->signature, signature)) {
if (next->instance == instance) {
found = TRUE;
}
@@ -782,11 +782,11 @@ osl_get_bios_table(char *signature,
/* Handle special tables whose addresses are not in RSDT/XSDT */
- if (ACPI_COMPARE_NAME(signature, ACPI_RSDP_NAME) ||
- ACPI_COMPARE_NAME(signature, ACPI_SIG_RSDT) ||
- ACPI_COMPARE_NAME(signature, ACPI_SIG_XSDT) ||
- ACPI_COMPARE_NAME(signature, ACPI_SIG_DSDT) ||
- ACPI_COMPARE_NAME(signature, ACPI_SIG_FACS)) {
+ if (ACPI_COMPARE_NAMESEG(signature, ACPI_RSDP_NAME) ||
+ ACPI_COMPARE_NAMESEG(signature, ACPI_SIG_RSDT) ||
+ ACPI_COMPARE_NAMESEG(signature, ACPI_SIG_XSDT) ||
+ ACPI_COMPARE_NAMESEG(signature, ACPI_SIG_DSDT) ||
+ ACPI_COMPARE_NAMESEG(signature, ACPI_SIG_FACS)) {
find_next_instance:
@@ -797,7 +797,7 @@ find_next_instance:
* careful about the FADT length and validate table addresses.
* Note: The 64-bit addresses have priority.
*/
- if (ACPI_COMPARE_NAME(signature, ACPI_SIG_DSDT)) {
+ if (ACPI_COMPARE_NAMESEG(signature, ACPI_SIG_DSDT)) {
if (current_instance < 2) {
if ((gbl_fadt->header.length >=
MIN_FADT_FOR_XDSDT) && gbl_fadt->Xdsdt
@@ -815,7 +815,7 @@ find_next_instance:
dsdt;
}
}
- } else if (ACPI_COMPARE_NAME(signature, ACPI_SIG_FACS)) {
+ } else if (ACPI_COMPARE_NAMESEG(signature, ACPI_SIG_FACS)) {
if (current_instance < 2) {
if ((gbl_fadt->header.length >=
MIN_FADT_FOR_XFACS) && gbl_fadt->Xfacs
@@ -833,7 +833,7 @@ find_next_instance:
facs;
}
}
- } else if (ACPI_COMPARE_NAME(signature, ACPI_SIG_XSDT)) {
+ } else if (ACPI_COMPARE_NAMESEG(signature, ACPI_SIG_XSDT)) {
if (!gbl_revision) {
return (AE_BAD_SIGNATURE);
}
@@ -842,7 +842,7 @@ find_next_instance:
(acpi_physical_address)gbl_rsdp.
xsdt_physical_address;
}
- } else if (ACPI_COMPARE_NAME(signature, ACPI_SIG_RSDT)) {
+ } else if (ACPI_COMPARE_NAMESEG(signature, ACPI_SIG_RSDT)) {
if (current_instance == 0) {
table_address =
(acpi_physical_address)gbl_rsdp.
@@ -931,7 +931,7 @@ find_next_instance:
/* Does this table match the requested signature? */
- if (!ACPI_COMPARE_NAME
+ if (!ACPI_COMPARE_NAMESEG
(mapped_table->signature, signature)) {
osl_unmap_table(mapped_table);
mapped_table = NULL;
@@ -995,7 +995,7 @@ static acpi_status osl_list_customized_tables(char *directory)
{
void *table_dir;
u32 instance;
- char temp_name[ACPI_NAME_SIZE];
+ char temp_name[ACPI_NAMESEG_SIZE];
char *filename;
acpi_status status = AE_OK;
@@ -1086,8 +1086,8 @@ osl_map_table(acpi_size address,
return (AE_BAD_SIGNATURE);
}
} else
- if (!ACPI_COMPARE_NAME(signature, mapped_table->signature))
- {
+ if (!ACPI_COMPARE_NAMESEG
+ (signature, mapped_table->signature)) {
acpi_os_unmap_memory(mapped_table,
sizeof(struct acpi_table_header));
return (AE_BAD_SIGNATURE);
@@ -1158,15 +1158,15 @@ osl_table_name_from_file(char *filename, char *signature, u32 *instance)
/* Ignore meaningless files */
- if (strlen(filename) < ACPI_NAME_SIZE) {
+ if (strlen(filename) < ACPI_NAMESEG_SIZE) {
return (AE_BAD_SIGNATURE);
}
/* Extract instance number */
- if (isdigit((int)filename[ACPI_NAME_SIZE])) {
- sscanf(&filename[ACPI_NAME_SIZE], "%u", instance);
- } else if (strlen(filename) != ACPI_NAME_SIZE) {
+ if (isdigit((int)filename[ACPI_NAMESEG_SIZE])) {
+ sscanf(&filename[ACPI_NAMESEG_SIZE], "%u", instance);
+ } else if (strlen(filename) != ACPI_NAMESEG_SIZE) {
return (AE_BAD_SIGNATURE);
} else {
*instance = 0;
@@ -1174,7 +1174,7 @@ osl_table_name_from_file(char *filename, char *signature, u32 *instance)
/* Extract signature */
- ACPI_MOVE_NAME(signature, filename);
+ ACPI_COPY_NAMESEG(signature, filename);
return (AE_OK);
}
@@ -1236,7 +1236,7 @@ osl_read_table_from_file(char *filename,
status = AE_BAD_SIGNATURE;
goto exit;
}
- } else if (!ACPI_COMPARE_NAME(signature, header.signature)) {
+ } else if (!ACPI_COMPARE_NAMESEG(signature, header.signature)) {
fprintf(stderr,
"Incorrect signature: Expecting %4.4s, found %4.4s\n",
signature, header.signature);
@@ -1311,7 +1311,7 @@ osl_get_customized_table(char *pathname,
{
void *table_dir;
u32 current_instance = 0;
- char temp_name[ACPI_NAME_SIZE];
+ char temp_name[ACPI_NAMESEG_SIZE];
char table_filename[PATH_MAX];
char *filename;
acpi_status status;
@@ -1329,7 +1329,7 @@ osl_get_customized_table(char *pathname,
/* Ignore meaningless files */
- if (!ACPI_COMPARE_NAME(filename, signature)) {
+ if (!ACPI_COMPARE_NAMESEG(filename, signature)) {
continue;
}
diff --git a/tools/power/acpi/tools/acpidump/apdump.c b/tools/power/acpi/tools/acpidump/apdump.c
index e256c2ac5ddc..820baeb5092b 100644
--- a/tools/power/acpi/tools/acpidump/apdump.c
+++ b/tools/power/acpi/tools/acpidump/apdump.c
@@ -289,14 +289,14 @@ int ap_dump_table_by_address(char *ascii_address)
int ap_dump_table_by_name(char *signature)
{
- char local_signature[ACPI_NAME_SIZE + 1];
+ char local_signature[ACPI_NAMESEG_SIZE + 1];
u32 instance;
struct acpi_table_header *table;
acpi_physical_address address;
acpi_status status;
int table_status;
- if (strlen(signature) != ACPI_NAME_SIZE) {
+ if (strlen(signature) != ACPI_NAMESEG_SIZE) {
fprintf(stderr,
"Invalid table signature [%s]: must be exactly 4 characters\n",
signature);
@@ -310,9 +310,9 @@ int ap_dump_table_by_name(char *signature)
/* To be friendly, handle tables whose signatures do not match the name */
- if (ACPI_COMPARE_NAME(local_signature, "FADT")) {
+ if (ACPI_COMPARE_NAMESEG(local_signature, "FADT")) {
strcpy(local_signature, ACPI_SIG_FADT);
- } else if (ACPI_COMPARE_NAME(local_signature, "MADT")) {
+ } else if (ACPI_COMPARE_NAMESEG(local_signature, "MADT")) {
strcpy(local_signature, ACPI_SIG_MADT);
}
diff --git a/tools/power/acpi/tools/acpidump/apfiles.c b/tools/power/acpi/tools/acpidump/apfiles.c
index 49972bc78bc5..a42cfcaa3293 100644
--- a/tools/power/acpi/tools/acpidump/apfiles.c
+++ b/tools/power/acpi/tools/acpidump/apfiles.c
@@ -97,7 +97,7 @@ int ap_open_output_file(char *pathname)
int ap_write_to_binary_file(struct acpi_table_header *table, u32 instance)
{
- char filename[ACPI_NAME_SIZE + 16];
+ char filename[ACPI_NAMESEG_SIZE + 16];
char instance_str[16];
ACPI_FILE file;
acpi_size actual;
@@ -110,16 +110,16 @@ int ap_write_to_binary_file(struct acpi_table_header *table, u32 instance)
/* Construct lower-case filename from the table local signature */
if (ACPI_VALIDATE_RSDP_SIG(table->signature)) {
- ACPI_MOVE_NAME(filename, ACPI_RSDP_NAME);
+ ACPI_COPY_NAMESEG(filename, ACPI_RSDP_NAME);
} else {
- ACPI_MOVE_NAME(filename, table->signature);
+ ACPI_COPY_NAMESEG(filename, table->signature);
}
filename[0] = (char)tolower((int)filename[0]);
filename[1] = (char)tolower((int)filename[1]);
filename[2] = (char)tolower((int)filename[2]);
filename[3] = (char)tolower((int)filename[3]);
- filename[ACPI_NAME_SIZE] = 0;
+ filename[ACPI_NAMESEG_SIZE] = 0;
/* Handle multiple SSDts - create different filenames for each */
diff --git a/tools/power/cpupower/lib/cpufreq.c b/tools/power/cpupower/lib/cpufreq.c
index 0c0f3e3f0d80..80650497fb80 100644
--- a/tools/power/cpupower/lib/cpufreq.c
+++ b/tools/power/cpupower/lib/cpufreq.c
@@ -333,17 +333,20 @@ void cpufreq_put_available_governors(struct cpufreq_available_governors *any)
}
-struct cpufreq_available_frequencies
-*cpufreq_get_available_frequencies(unsigned int cpu)
+struct cpufreq_frequencies
+*cpufreq_get_frequencies(const char *type, unsigned int cpu)
{
- struct cpufreq_available_frequencies *first = NULL;
- struct cpufreq_available_frequencies *current = NULL;
+ struct cpufreq_frequencies *first = NULL;
+ struct cpufreq_frequencies *current = NULL;
char one_value[SYSFS_PATH_MAX];
char linebuf[MAX_LINE_LEN];
+ char fname[MAX_LINE_LEN];
unsigned int pos, i;
unsigned int len;
- len = sysfs_cpufreq_read_file(cpu, "scaling_available_frequencies",
+ snprintf(fname, MAX_LINE_LEN, "scaling_%s_frequencies", type);
+
+ len = sysfs_cpufreq_read_file(cpu, fname,
linebuf, sizeof(linebuf));
if (len == 0)
return NULL;
@@ -389,9 +392,9 @@ struct cpufreq_available_frequencies
return NULL;
}
-void cpufreq_put_available_frequencies(struct cpufreq_available_frequencies
- *any) {
- struct cpufreq_available_frequencies *tmp, *next;
+void cpufreq_put_frequencies(struct cpufreq_frequencies *any)
+{
+ struct cpufreq_frequencies *tmp, *next;
if (!any)
return;
diff --git a/tools/power/cpupower/lib/cpufreq.h b/tools/power/cpupower/lib/cpufreq.h
index 60beaf5ed2ea..775738269cbf 100644
--- a/tools/power/cpupower/lib/cpufreq.h
+++ b/tools/power/cpupower/lib/cpufreq.h
@@ -28,10 +28,10 @@ struct cpufreq_available_governors {
struct cpufreq_available_governors *first;
};
-struct cpufreq_available_frequencies {
+struct cpufreq_frequencies {
unsigned long frequency;
- struct cpufreq_available_frequencies *next;
- struct cpufreq_available_frequencies *first;
+ struct cpufreq_frequencies *next;
+ struct cpufreq_frequencies *first;
};
@@ -129,14 +129,14 @@ void cpufreq_put_available_governors(
*
* Only present on _some_ ->target() cpufreq drivers. For information purposes
* only. Please free allocated memory by calling
- * cpufreq_put_available_frequencies after use.
+ * cpufreq_put_frequencies after use.
*/
-struct cpufreq_available_frequencies
-*cpufreq_get_available_frequencies(unsigned int cpu);
+struct cpufreq_frequencies
+*cpufreq_get_frequencies(const char *type, unsigned int cpu);
-void cpufreq_put_available_frequencies(
- struct cpufreq_available_frequencies *first);
+void cpufreq_put_frequencies(
+ struct cpufreq_frequencies *first);
/* determine affected CPUs
diff --git a/tools/power/cpupower/utils/cpufreq-info.c b/tools/power/cpupower/utils/cpufreq-info.c
index c3f39d5128ee..10290b308797 100644
--- a/tools/power/cpupower/utils/cpufreq-info.c
+++ b/tools/power/cpupower/utils/cpufreq-info.c
@@ -161,19 +161,12 @@ static void print_duration(unsigned long duration)
return;
}
-/* --boost / -b */
-
-static int get_boost_mode(unsigned int cpu)
+static int get_boost_mode_x86(unsigned int cpu)
{
int support, active, b_states = 0, ret, pstate_no, i;
/* ToDo: Make this more global */
unsigned long pstates[MAX_HW_PSTATES] = {0,};
- if (cpupower_cpu_info.vendor != X86_VENDOR_AMD &&
- cpupower_cpu_info.vendor != X86_VENDOR_HYGON &&
- cpupower_cpu_info.vendor != X86_VENDOR_INTEL)
- return 0;
-
ret = cpufreq_has_boost_support(cpu, &support, &active, &b_states);
if (ret) {
printf(_("Error while evaluating Boost Capabilities"
@@ -248,6 +241,33 @@ static int get_boost_mode(unsigned int cpu)
return 0;
}
+/* --boost / -b */
+
+static int get_boost_mode(unsigned int cpu)
+{
+ struct cpufreq_frequencies *freqs;
+
+ if (cpupower_cpu_info.vendor == X86_VENDOR_AMD ||
+ cpupower_cpu_info.vendor == X86_VENDOR_HYGON ||
+ cpupower_cpu_info.vendor == X86_VENDOR_INTEL)
+ return get_boost_mode_x86(cpu);
+
+ freqs = cpufreq_get_frequencies("boost", cpu);
+ if (freqs) {
+ printf(_(" boost frequency steps: "));
+ while (freqs->next) {
+ print_speed(freqs->frequency);
+ printf(", ");
+ freqs = freqs->next;
+ }
+ print_speed(freqs->frequency);
+ printf("\n");
+ cpufreq_put_frequencies(freqs);
+ }
+
+ return 0;
+}
+
/* --freq / -f */
static int get_freq_kernel(unsigned int cpu, unsigned int human)
@@ -456,7 +476,7 @@ static int get_latency(unsigned int cpu, unsigned int human)
static void debug_output_one(unsigned int cpu)
{
- struct cpufreq_available_frequencies *freqs;
+ struct cpufreq_frequencies *freqs;
get_driver(cpu);
get_related_cpus(cpu);
@@ -464,7 +484,7 @@ static void debug_output_one(unsigned int cpu)
get_latency(cpu, 1);
get_hardware_limits(cpu, 1);
- freqs = cpufreq_get_available_frequencies(cpu);
+ freqs = cpufreq_get_frequencies("available", cpu);
if (freqs) {
printf(_(" available frequency steps: "));
while (freqs->next) {
@@ -474,7 +494,7 @@ static void debug_output_one(unsigned int cpu)
}
print_speed(freqs->frequency);
printf("\n");
- cpufreq_put_available_frequencies(freqs);
+ cpufreq_put_frequencies(freqs);
}
get_available_governors(cpu);
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 9327c0ddc3a5..c7727be9719f 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -44,6 +44,7 @@
#include <cpuid.h>
#include <linux/capability.h>
#include <errno.h>
+#include <math.h>
char *proc_stat = "/proc/stat";
FILE *outf;
@@ -63,7 +64,6 @@ unsigned int dump_only;
unsigned int do_snb_cstates;
unsigned int do_knl_cstates;
unsigned int do_slm_cstates;
-unsigned int do_cnl_cstates;
unsigned int use_c1_residency_msr;
unsigned int has_aperf;
unsigned int has_epb;
@@ -141,9 +141,21 @@ unsigned int first_counter_read = 1;
#define RAPL_CORES_ENERGY_STATUS (1 << 9)
/* 0x639 MSR_PP0_ENERGY_STATUS */
+#define RAPL_PER_CORE_ENERGY (1 << 10)
+ /* Indicates cores energy collection is per-core,
+ * not per-package. */
+#define RAPL_AMD_F17H (1 << 11)
+ /* 0xc0010299 MSR_RAPL_PWR_UNIT */
+ /* 0xc001029a MSR_CORE_ENERGY_STAT */
+ /* 0xc001029b MSR_PKG_ENERGY_STAT */
#define RAPL_CORES (RAPL_CORES_ENERGY_STATUS | RAPL_CORES_POWER_LIMIT)
#define TJMAX_DEFAULT 100
+/* MSRs that are not yet in the kernel-provided header. */
+#define MSR_RAPL_PWR_UNIT 0xc0010299
+#define MSR_CORE_ENERGY_STAT 0xc001029a
+#define MSR_PKG_ENERGY_STAT 0xc001029b
+
#define MAX(a, b) ((a) > (b) ? (a) : (b))
/*
@@ -187,6 +199,7 @@ struct core_data {
unsigned long long c7;
unsigned long long mc6_us; /* duplicate as per-core for now, even though per module */
unsigned int core_temp_c;
+ unsigned int core_energy; /* MSR_CORE_ENERGY_STAT */
unsigned int core_id;
unsigned long long counter[MAX_ADDED_COUNTERS];
} *core_even, *core_odd;
@@ -273,6 +286,7 @@ struct system_summary {
struct cpu_topology {
int physical_package_id;
+ int die_id;
int logical_cpu_id;
int physical_node_id;
int logical_node_id; /* 0-based count within the package */
@@ -283,6 +297,7 @@ struct cpu_topology {
struct topo_params {
int num_packages;
+ int num_die;
int num_cpus;
int num_cores;
int max_cpu_num;
@@ -314,9 +329,8 @@ int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg
int retval, pkg_no, core_no, thread_no, node_no;
for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
- for (core_no = 0; core_no < topo.cores_per_node; ++core_no) {
- for (node_no = 0; node_no < topo.nodes_per_pkg;
- node_no++) {
+ for (node_no = 0; node_no < topo.nodes_per_pkg; node_no++) {
+ for (core_no = 0; core_no < topo.cores_per_node; ++core_no) {
for (thread_no = 0; thread_no <
topo.threads_per_core; ++thread_no) {
struct thread_data *t;
@@ -442,6 +456,7 @@ struct msr_counter bic[] = {
{ 0x0, "CPU" },
{ 0x0, "APIC" },
{ 0x0, "X2APIC" },
+ { 0x0, "Die" },
};
#define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter))
@@ -495,6 +510,7 @@ struct msr_counter bic[] = {
#define BIC_CPU (1ULL << 47)
#define BIC_APIC (1ULL << 48)
#define BIC_X2APIC (1ULL << 49)
+#define BIC_Die (1ULL << 50)
#define BIC_DISABLED_BY_DEFAULT (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC)
@@ -621,6 +637,8 @@ void print_header(char *delim)
outp += sprintf(outp, "%sTime_Of_Day_Seconds", (printed++ ? delim : ""));
if (DO_BIC(BIC_Package))
outp += sprintf(outp, "%sPackage", (printed++ ? delim : ""));
+ if (DO_BIC(BIC_Die))
+ outp += sprintf(outp, "%sDie", (printed++ ? delim : ""));
if (DO_BIC(BIC_Node))
outp += sprintf(outp, "%sNode", (printed++ ? delim : ""));
if (DO_BIC(BIC_Core))
@@ -667,7 +685,7 @@ void print_header(char *delim)
if (DO_BIC(BIC_CPU_c1))
outp += sprintf(outp, "%sCPU%%c1", (printed++ ? delim : ""));
- if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates)
+ if (DO_BIC(BIC_CPU_c3))
outp += sprintf(outp, "%sCPU%%c3", (printed++ ? delim : ""));
if (DO_BIC(BIC_CPU_c6))
outp += sprintf(outp, "%sCPU%%c6", (printed++ ? delim : ""));
@@ -680,6 +698,14 @@ void print_header(char *delim)
if (DO_BIC(BIC_CoreTmp))
outp += sprintf(outp, "%sCoreTmp", (printed++ ? delim : ""));
+ if (do_rapl && !rapl_joules) {
+ if (DO_BIC(BIC_CorWatt) && (do_rapl & RAPL_PER_CORE_ENERGY))
+ outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : ""));
+ } else if (do_rapl && rapl_joules) {
+ if (DO_BIC(BIC_Cor_J) && (do_rapl & RAPL_PER_CORE_ENERGY))
+ outp += sprintf(outp, "%sCor_J", (printed++ ? delim : ""));
+ }
+
for (mp = sys.cp; mp; mp = mp->next) {
if (mp->format == FORMAT_RAW) {
if (mp->width == 64)
@@ -734,7 +760,7 @@ void print_header(char *delim)
if (do_rapl && !rapl_joules) {
if (DO_BIC(BIC_PkgWatt))
outp += sprintf(outp, "%sPkgWatt", (printed++ ? delim : ""));
- if (DO_BIC(BIC_CorWatt))
+ if (DO_BIC(BIC_CorWatt) && !(do_rapl & RAPL_PER_CORE_ENERGY))
outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : ""));
if (DO_BIC(BIC_GFXWatt))
outp += sprintf(outp, "%sGFXWatt", (printed++ ? delim : ""));
@@ -747,7 +773,7 @@ void print_header(char *delim)
} else if (do_rapl && rapl_joules) {
if (DO_BIC(BIC_Pkg_J))
outp += sprintf(outp, "%sPkg_J", (printed++ ? delim : ""));
- if (DO_BIC(BIC_Cor_J))
+ if (DO_BIC(BIC_Cor_J) && !(do_rapl & RAPL_PER_CORE_ENERGY))
outp += sprintf(outp, "%sCor_J", (printed++ ? delim : ""));
if (DO_BIC(BIC_GFX_J))
outp += sprintf(outp, "%sGFX_J", (printed++ ? delim : ""));
@@ -808,6 +834,7 @@ int dump_counters(struct thread_data *t, struct core_data *c,
outp += sprintf(outp, "c6: %016llX\n", c->c6);
outp += sprintf(outp, "c7: %016llX\n", c->c7);
outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c);
+ outp += sprintf(outp, "Joules: %0X\n", c->core_energy);
for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
outp += sprintf(outp, "cADDED [%d] msr0x%x: %08llX\n",
@@ -904,6 +931,8 @@ int format_counters(struct thread_data *t, struct core_data *c,
if (t == &average.threads) {
if (DO_BIC(BIC_Package))
outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
+ if (DO_BIC(BIC_Die))
+ outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
if (DO_BIC(BIC_Node))
outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
if (DO_BIC(BIC_Core))
@@ -921,6 +950,12 @@ int format_counters(struct thread_data *t, struct core_data *c,
else
outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
}
+ if (DO_BIC(BIC_Die)) {
+ if (c)
+ outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), cpus[t->cpu_id].die_id);
+ else
+ outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
+ }
if (DO_BIC(BIC_Node)) {
if (t)
outp += sprintf(outp, "%s%d",
@@ -1003,7 +1038,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
goto done;
- if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates)
+ if (DO_BIC(BIC_CPU_c3))
outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c3/tsc);
if (DO_BIC(BIC_CPU_c6))
outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c6/tsc);
@@ -1033,6 +1068,20 @@ int format_counters(struct thread_data *t, struct core_data *c,
}
}
+ /*
+ * If measurement interval exceeds minimum RAPL Joule Counter range,
+ * indicate that results are suspect by printing "**" in fraction place.
+ */
+ if (interval_float < rapl_joule_counter_range)
+ fmt8 = "%s%.2f";
+ else
+ fmt8 = "%6.0f**";
+
+ if (DO_BIC(BIC_CorWatt) && (do_rapl & RAPL_PER_CORE_ENERGY))
+ outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units / interval_float);
+ if (DO_BIC(BIC_Cor_J) && (do_rapl & RAPL_PER_CORE_ENERGY))
+ outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units);
+
/* print per-package data only for 1st core in package */
if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
goto done;
@@ -1085,18 +1134,9 @@ int format_counters(struct thread_data *t, struct core_data *c,
if (DO_BIC(BIC_SYS_LPI))
outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->sys_lpi / 1000000.0 / interval_float);
- /*
- * If measurement interval exceeds minimum RAPL Joule Counter range,
- * indicate that results are suspect by printing "**" in fraction place.
- */
- if (interval_float < rapl_joule_counter_range)
- fmt8 = "%s%.2f";
- else
- fmt8 = "%6.0f**";
-
if (DO_BIC(BIC_PkgWatt))
outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units / interval_float);
- if (DO_BIC(BIC_CorWatt))
+ if (DO_BIC(BIC_CorWatt) && !(do_rapl & RAPL_PER_CORE_ENERGY))
outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units / interval_float);
if (DO_BIC(BIC_GFXWatt))
outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units / interval_float);
@@ -1104,7 +1144,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_dram * rapl_dram_energy_units / interval_float);
if (DO_BIC(BIC_Pkg_J))
outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units);
- if (DO_BIC(BIC_Cor_J))
+ if (DO_BIC(BIC_Cor_J) && !(do_rapl & RAPL_PER_CORE_ENERGY))
outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units);
if (DO_BIC(BIC_GFX_J))
outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units);
@@ -1249,6 +1289,8 @@ delta_core(struct core_data *new, struct core_data *old)
old->core_temp_c = new->core_temp_c;
old->mc6_us = new->mc6_us - old->mc6_us;
+ DELTA_WRAP32(new->core_energy, old->core_energy);
+
for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
if (mp->format == FORMAT_RAW)
old->counter[i] = new->counter[i];
@@ -1391,6 +1433,7 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data
c->c7 = 0;
c->mc6_us = 0;
c->core_temp_c = 0;
+ c->core_energy = 0;
p->pkg_wtd_core_c0 = 0;
p->pkg_any_core_c0 = 0;
@@ -1473,6 +1516,8 @@ int sum_counters(struct thread_data *t, struct core_data *c,
average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c);
+ average.cores.core_energy += c->core_energy;
+
for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
if (mp->format == FORMAT_RAW)
continue;
@@ -1818,7 +1863,7 @@ retry:
if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
goto done;
- if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates) {
+ if (DO_BIC(BIC_CPU_c3)) {
if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
return -6;
}
@@ -1845,6 +1890,12 @@ retry:
c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
}
+ if (do_rapl & RAPL_AMD_F17H) {
+ if (get_msr(cpu, MSR_CORE_ENERGY_STAT, &msr))
+ return -14;
+ c->core_energy = msr & 0xFFFFFFFF;
+ }
+
for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
if (get_mp(cpu, mp, &c->counter[i]))
return -10;
@@ -1934,6 +1985,11 @@ retry:
return -16;
p->rapl_dram_perf_status = msr & 0xFFFFFFFF;
}
+ if (do_rapl & RAPL_AMD_F17H) {
+ if (get_msr(cpu, MSR_PKG_ENERGY_STAT, &msr))
+ return -13;
+ p->energy_pkg = msr & 0xFFFFFFFF;
+ }
if (DO_BIC(BIC_PkgTmp)) {
if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
return -17;
@@ -2456,6 +2512,8 @@ void free_all_buffers(void)
/*
* Parse a file containing a single int.
+ * Return 0 if file can not be opened
+ * Exit if file can be opened, but can not be parsed
*/
int parse_int_file(const char *fmt, ...)
{
@@ -2467,7 +2525,9 @@ int parse_int_file(const char *fmt, ...)
va_start(args, fmt);
vsnprintf(path, sizeof(path), fmt, args);
va_end(args);
- filep = fopen_or_die(path, "r");
+ filep = fopen(path, "r");
+ if (!filep)
+ return 0;
if (fscanf(filep, "%d", &value) != 1)
err(1, "%s: failed to parse number from file", path);
fclose(filep);
@@ -2488,6 +2548,11 @@ int get_physical_package_id(int cpu)
return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
}
+int get_die_id(int cpu)
+{
+ return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/die_id", cpu);
+}
+
int get_core_id(int cpu)
{
return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
@@ -2578,7 +2643,8 @@ int get_thread_siblings(struct cpu_topology *thiscpu)
filep = fopen_or_die(path, "r");
do {
offset -= BITMASK_SIZE;
- fscanf(filep, "%lx%c", &map, &character);
+ if (fscanf(filep, "%lx%c", &map, &character) != 2)
+ err(1, "%s: failed to parse file", path);
for (shift = 0; shift < BITMASK_SIZE; shift++) {
if ((map >> shift) & 0x1) {
so = shift + offset;
@@ -2855,8 +2921,11 @@ int snapshot_cpu_lpi_us(void)
fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us", "r");
retval = fscanf(fp, "%lld", &cpuidle_cur_cpu_lpi_us);
- if (retval != 1)
- err(1, "CPU LPI");
+ if (retval != 1) {
+ fprintf(stderr, "Disabling Low Power Idle CPU output\n");
+ BIC_NOT_PRESENT(BIC_CPU_LPI);
+ return -1;
+ }
fclose(fp);
@@ -2878,9 +2947,11 @@ int snapshot_sys_lpi_us(void)
fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us", "r");
retval = fscanf(fp, "%lld", &cpuidle_cur_sys_lpi_us);
- if (retval != 1)
- err(1, "SYS LPI");
-
+ if (retval != 1) {
+ fprintf(stderr, "Disabling Low Power Idle System output\n");
+ BIC_NOT_PRESENT(BIC_SYS_LPI);
+ return -1;
+ }
fclose(fp);
return 0;
@@ -3410,14 +3481,14 @@ dump_sysfs_cstate_config(void)
input = fopen(path, "r");
if (input == NULL)
continue;
- fgets(name_buf, sizeof(name_buf), input);
+ if (!fgets(name_buf, sizeof(name_buf), input))
+ err(1, "%s: failed to read file", path);
/* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
sp = strchr(name_buf, '-');
if (!sp)
sp = strchrnul(name_buf, '\n');
*sp = '\0';
-
fclose(input);
sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/desc",
@@ -3425,7 +3496,8 @@ dump_sysfs_cstate_config(void)
input = fopen(path, "r");
if (input == NULL)
continue;
- fgets(desc, sizeof(desc), input);
+ if (!fgets(desc, sizeof(desc), input))
+ err(1, "%s: failed to read file", path);
fprintf(outf, "cpu%d: %s: %s", base_cpu, name_buf, desc);
fclose(input);
@@ -3444,20 +3516,22 @@ dump_sysfs_pstate_config(void)
base_cpu);
input = fopen(path, "r");
if (input == NULL) {
- fprintf(stderr, "NSFOD %s\n", path);
+ fprintf(outf, "NSFOD %s\n", path);
return;
}
- fgets(driver_buf, sizeof(driver_buf), input);
+ if (!fgets(driver_buf, sizeof(driver_buf), input))
+ err(1, "%s: failed to read file", path);
fclose(input);
sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor",
base_cpu);
input = fopen(path, "r");
if (input == NULL) {
- fprintf(stderr, "NSFOD %s\n", path);
+ fprintf(outf, "NSFOD %s\n", path);
return;
}
- fgets(governor_buf, sizeof(governor_buf), input);
+ if (!fgets(governor_buf, sizeof(governor_buf), input))
+ err(1, "%s: failed to read file", path);
fclose(input);
fprintf(outf, "cpu%d: cpufreq driver: %s", base_cpu, driver_buf);
@@ -3466,7 +3540,8 @@ dump_sysfs_pstate_config(void)
sprintf(path, "/sys/devices/system/cpu/cpufreq/boost");
input = fopen(path, "r");
if (input != NULL) {
- fscanf(input, "%d", &turbo);
+ if (fscanf(input, "%d", &turbo) != 1)
+ err(1, "%s: failed to parse number from file", path);
fprintf(outf, "cpufreq boost: %d\n", turbo);
fclose(input);
}
@@ -3474,7 +3549,8 @@ dump_sysfs_pstate_config(void)
sprintf(path, "/sys/devices/system/cpu/intel_pstate/no_turbo");
input = fopen(path, "r");
if (input != NULL) {
- fscanf(input, "%d", &turbo);
+ if (fscanf(input, "%d", &turbo) != 1)
+ err(1, "%s: failed to parse number from file", path);
fprintf(outf, "cpufreq intel_pstate no_turbo: %d\n", turbo);
fclose(input);
}
@@ -3718,7 +3794,7 @@ int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data
#define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */
#define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */
-double get_tdp(unsigned int model)
+double get_tdp_intel(unsigned int model)
{
unsigned long long msr;
@@ -3735,6 +3811,16 @@ double get_tdp(unsigned int model)
}
}
+double get_tdp_amd(unsigned int family)
+{
+ switch (family) {
+ case 0x17:
+ default:
+ /* This is the max stock TDP of HEDT/Server Fam17h chips */
+ return 250.0;
+ }
+}
+
/*
* rapl_dram_energy_units_probe()
* Energy units are either hard-coded, or come from RAPL Energy Unit MSR.
@@ -3754,21 +3840,12 @@ rapl_dram_energy_units_probe(int model, double rapl_energy_units)
}
}
-
-/*
- * rapl_probe()
- *
- * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units
- */
-void rapl_probe(unsigned int family, unsigned int model)
+void rapl_probe_intel(unsigned int family, unsigned int model)
{
unsigned long long msr;
unsigned int time_unit;
double tdp;
- if (!genuine_intel)
- return;
-
if (family != 6)
return;
@@ -3892,13 +3969,69 @@ void rapl_probe(unsigned int family, unsigned int model)
rapl_time_units = 1.0 / (1 << (time_unit));
- tdp = get_tdp(model);
+ tdp = get_tdp_intel(model);
rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
if (!quiet)
fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
+}
- return;
+void rapl_probe_amd(unsigned int family, unsigned int model)
+{
+ unsigned long long msr;
+ unsigned int eax, ebx, ecx, edx;
+ unsigned int has_rapl = 0;
+ double tdp;
+
+ if (max_extended_level >= 0x80000007) {
+ __cpuid(0x80000007, eax, ebx, ecx, edx);
+ /* RAPL (Fam 17h) */
+ has_rapl = edx & (1 << 14);
+ }
+
+ if (!has_rapl)
+ return;
+
+ switch (family) {
+ case 0x17: /* Zen, Zen+ */
+ do_rapl = RAPL_AMD_F17H | RAPL_PER_CORE_ENERGY;
+ if (rapl_joules) {
+ BIC_PRESENT(BIC_Pkg_J);
+ BIC_PRESENT(BIC_Cor_J);
+ } else {
+ BIC_PRESENT(BIC_PkgWatt);
+ BIC_PRESENT(BIC_CorWatt);
+ }
+ break;
+ default:
+ return;
+ }
+
+ if (get_msr(base_cpu, MSR_RAPL_PWR_UNIT, &msr))
+ return;
+
+ rapl_time_units = ldexp(1.0, -(msr >> 16 & 0xf));
+ rapl_energy_units = ldexp(1.0, -(msr >> 8 & 0x1f));
+ rapl_power_units = ldexp(1.0, -(msr & 0xf));
+
+ tdp = get_tdp_amd(model);
+
+ rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
+ if (!quiet)
+ fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
+}
+
+/*
+ * rapl_probe()
+ *
+ * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units
+ */
+void rapl_probe(unsigned int family, unsigned int model)
+{
+ if (genuine_intel)
+ rapl_probe_intel(family, model);
+ if (authentic_amd)
+ rapl_probe_amd(family, model);
}
void perf_limit_reasons_probe(unsigned int family, unsigned int model)
@@ -4003,6 +4136,7 @@ void print_power_limit_msr(int cpu, unsigned long long msr, char *label)
int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
{
unsigned long long msr;
+ const char *msr_name;
int cpu;
if (!do_rapl)
@@ -4018,10 +4152,17 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
return -1;
}
- if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr))
- return -1;
+ if (do_rapl & RAPL_AMD_F17H) {
+ msr_name = "MSR_RAPL_PWR_UNIT";
+ if (get_msr(cpu, MSR_RAPL_PWR_UNIT, &msr))
+ return -1;
+ } else {
+ msr_name = "MSR_RAPL_POWER_UNIT";
+ if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr))
+ return -1;
+ }
- fprintf(outf, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx (%f Watts, %f Joules, %f sec.)\n", cpu, msr,
+ fprintf(outf, "cpu%d: %s: 0x%08llx (%f Watts, %f Joules, %f sec.)\n", cpu, msr_name, msr,
rapl_power_units, rapl_energy_units, rapl_time_units);
if (do_rapl & RAPL_PKG_POWER_INFO) {
@@ -4451,6 +4592,9 @@ unsigned int intel_model_duplicates(unsigned int model)
case INTEL_FAM6_KABYLAKE_MOBILE:
case INTEL_FAM6_KABYLAKE_DESKTOP:
return INTEL_FAM6_SKYLAKE_MOBILE;
+
+ case INTEL_FAM6_ICELAKE_MOBILE:
+ return INTEL_FAM6_CANNONLAKE_MOBILE;
}
return model;
}
@@ -4702,7 +4846,9 @@ void process_cpuid()
}
do_slm_cstates = is_slm(family, model);
do_knl_cstates = is_knl(family, model);
- do_cnl_cstates = is_cnl(family, model);
+
+ if (do_slm_cstates || do_knl_cstates || is_cnl(family, model))
+ BIC_NOT_PRESENT(BIC_CPU_c3);
if (!quiet)
decode_misc_pwr_mgmt_msr();
@@ -4769,6 +4915,7 @@ void topology_probe()
int i;
int max_core_id = 0;
int max_package_id = 0;
+ int max_die_id = 0;
int max_siblings = 0;
/* Initialize num_cpus, max_cpu_num */
@@ -4835,6 +4982,11 @@ void topology_probe()
if (cpus[i].physical_package_id > max_package_id)
max_package_id = cpus[i].physical_package_id;
+ /* get die information */
+ cpus[i].die_id = get_die_id(i);
+ if (cpus[i].die_id > max_die_id)
+ max_die_id = cpus[i].die_id;
+
/* get numa node information */
cpus[i].physical_node_id = get_physical_node_id(&cpus[i]);
if (cpus[i].physical_node_id > topo.max_node_num)
@@ -4860,6 +5012,13 @@ void topology_probe()
if (!summary_only && topo.cores_per_node > 1)
BIC_PRESENT(BIC_Core);
+ topo.num_die = max_die_id + 1;
+ if (debug > 1)
+ fprintf(outf, "max_die_id %d, sizing for %d die\n",
+ max_die_id, topo.num_die);
+ if (!summary_only && topo.num_die > 1)
+ BIC_PRESENT(BIC_Die);
+
topo.num_packages = max_package_id + 1;
if (debug > 1)
fprintf(outf, "max_package_id %d, sizing for %d packages\n",
@@ -4884,8 +5043,8 @@ void topology_probe()
if (cpu_is_not_present(i))
continue;
fprintf(outf,
- "cpu %d pkg %d node %d lnode %d core %d thread %d\n",
- i, cpus[i].physical_package_id,
+ "cpu %d pkg %d die %d node %d lnode %d core %d thread %d\n",
+ i, cpus[i].physical_package_id, cpus[i].die_id,
cpus[i].physical_node_id,
cpus[i].logical_node_id,
cpus[i].physical_core_id,
@@ -5077,6 +5236,9 @@ int fork_it(char **argv)
signal(SIGQUIT, SIG_IGN);
if (waitpid(child_pid, &status, 0) == -1)
err(status, "waitpid");
+
+ if (WIFEXITED(status))
+ status = WEXITSTATUS(status);
}
/*
* n.b. fork_it() does not check for errors from for_all_cpus()
@@ -5119,7 +5281,7 @@ int get_and_dump_counters(void)
}
void print_version() {
- fprintf(outf, "turbostat version 18.07.27"
+ fprintf(outf, "turbostat version 19.03.20"
" - Len Brown <lenb@kernel.org>\n");
}
@@ -5316,7 +5478,8 @@ void probe_sysfs(void)
input = fopen(path, "r");
if (input == NULL)
continue;
- fgets(name_buf, sizeof(name_buf), input);
+ if (!fgets(name_buf, sizeof(name_buf), input))
+ err(1, "%s: failed to read file", path);
/* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
sp = strchr(name_buf, '-');
@@ -5343,7 +5506,8 @@ void probe_sysfs(void)
input = fopen(path, "r");
if (input == NULL)
continue;
- fgets(name_buf, sizeof(name_buf), input);
+ if (!fgets(name_buf, sizeof(name_buf), input))
+ err(1, "%s: failed to read file", path);
/* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
sp = strchr(name_buf, '-');
if (!sp)
diff --git a/tools/testing/nvdimm/Kbuild b/tools/testing/nvdimm/Kbuild
index 10ddf223055b..e1286d2cdfbf 100644
--- a/tools/testing/nvdimm/Kbuild
+++ b/tools/testing/nvdimm/Kbuild
@@ -35,6 +35,8 @@ obj-$(CONFIG_DAX) += dax.o
endif
obj-$(CONFIG_DEV_DAX) += device_dax.o
obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem.o
+obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem_core.o
+obj-$(CONFIG_DEV_DAX_PMEM_COMPAT) += dax_pmem_compat.o
nfit-y := $(ACPI_SRC)/core.o
nfit-y += $(ACPI_SRC)/intel.o
@@ -57,6 +59,7 @@ nd_e820-y := $(NVDIMM_SRC)/e820.o
nd_e820-y += config_check.o
dax-y := $(DAX_SRC)/super.o
+dax-y += $(DAX_SRC)/bus.o
dax-y += config_check.o
device_dax-y := $(DAX_SRC)/device.o
@@ -64,7 +67,9 @@ device_dax-y += dax-dev.o
device_dax-y += device_dax_test.o
device_dax-y += config_check.o
-dax_pmem-y := $(DAX_SRC)/pmem.o
+dax_pmem-y := $(DAX_SRC)/pmem/pmem.o
+dax_pmem_core-y := $(DAX_SRC)/pmem/core.o
+dax_pmem_compat-y := $(DAX_SRC)/pmem/compat.o
dax_pmem-y += config_check.o
libnvdimm-y := $(NVDIMM_SRC)/core.o
diff --git a/tools/testing/nvdimm/dax-dev.c b/tools/testing/nvdimm/dax-dev.c
index 36ee3d8797c3..f36e708265b8 100644
--- a/tools/testing/nvdimm/dax-dev.c
+++ b/tools/testing/nvdimm/dax-dev.c
@@ -17,20 +17,11 @@
phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
unsigned long size)
{
- struct resource *res;
+ struct resource *res = &dev_dax->region->res;
phys_addr_t addr;
- int i;
- for (i = 0; i < dev_dax->num_resources; i++) {
- res = &dev_dax->res[i];
- addr = pgoff * PAGE_SIZE + res->start;
- if (addr >= res->start && addr <= res->end)
- break;
- pgoff -= PHYS_PFN(resource_size(res));
- }
-
- if (i < dev_dax->num_resources) {
- res = &dev_dax->res[i];
+ addr = pgoff * PAGE_SIZE + res->start;
+ if (addr >= res->start && addr <= res->end) {
if (addr + size - 1 <= res->end) {
if (get_nfit_res(addr)) {
struct page *page;
@@ -44,6 +35,5 @@ phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
return addr;
}
}
-
return -1;
}
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index b579f962451d..85ffdcfa596b 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -146,6 +146,7 @@ static int dimm_fail_cmd_code[ARRAY_SIZE(handle)];
struct nfit_test_sec {
u8 state;
u8 ext_state;
+ u8 old_state;
u8 passphrase[32];
u8 master_passphrase[32];
u64 overwrite_end_time;
@@ -225,6 +226,8 @@ static struct workqueue_struct *nfit_wq;
static struct gen_pool *nfit_pool;
+static const char zero_key[NVDIMM_PASSPHRASE_LEN];
+
static struct nfit_test *to_nfit_test(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -1059,8 +1062,7 @@ static int nd_intel_test_cmd_secure_erase(struct nfit_test *t,
struct device *dev = &t->pdev.dev;
struct nfit_test_sec *sec = &dimm_sec_info[dimm];
- if (!(sec->state & ND_INTEL_SEC_STATE_ENABLED) ||
- (sec->state & ND_INTEL_SEC_STATE_FROZEN)) {
+ if (sec->state & ND_INTEL_SEC_STATE_FROZEN) {
nd_cmd->status = ND_INTEL_STATUS_INVALID_STATE;
dev_dbg(dev, "secure erase: wrong security state\n");
} else if (memcmp(nd_cmd->passphrase, sec->passphrase,
@@ -1068,6 +1070,12 @@ static int nd_intel_test_cmd_secure_erase(struct nfit_test *t,
nd_cmd->status = ND_INTEL_STATUS_INVALID_PASS;
dev_dbg(dev, "secure erase: wrong passphrase\n");
} else {
+ if (!(sec->state & ND_INTEL_SEC_STATE_ENABLED)
+ && (memcmp(nd_cmd->passphrase, zero_key,
+ ND_INTEL_PASSPHRASE_SIZE) != 0)) {
+ dev_dbg(dev, "invalid zero key\n");
+ return 0;
+ }
memset(sec->passphrase, 0, ND_INTEL_PASSPHRASE_SIZE);
memset(sec->master_passphrase, 0, ND_INTEL_PASSPHRASE_SIZE);
sec->state = 0;
@@ -1093,7 +1101,7 @@ static int nd_intel_test_cmd_overwrite(struct nfit_test *t,
return 0;
}
- memset(sec->passphrase, 0, ND_INTEL_PASSPHRASE_SIZE);
+ sec->old_state = sec->state;
sec->state = ND_INTEL_SEC_STATE_OVERWRITE;
dev_dbg(dev, "overwrite progressing.\n");
sec->overwrite_end_time = get_jiffies_64() + 5 * HZ;
@@ -1115,7 +1123,8 @@ static int nd_intel_test_cmd_query_overwrite(struct nfit_test *t,
if (time_is_before_jiffies64(sec->overwrite_end_time)) {
sec->overwrite_end_time = 0;
- sec->state = 0;
+ sec->state = sec->old_state;
+ sec->old_state = 0;
sec->ext_state = ND_INTEL_SEC_ESTATE_ENABLED;
dev_dbg(dev, "overwrite is complete\n");
} else
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 313dc1091e6f..f2ebf8cf4686 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -15,11 +15,11 @@ TARGETS += firmware
TARGETS += ftrace
TARGETS += futex
TARGETS += gpio
-TARGETS += ima
TARGETS += intel_pstate
TARGETS += ipc
TARGETS += ir
TARGETS += kcmp
+TARGETS += kexec
TARGETS += kvm
TARGETS += lib
TARGETS += livepatch
@@ -32,6 +32,7 @@ TARGETS += net
TARGETS += netfilter
TARGETS += networking/timestamping
TARGETS += nsfs
+TARGETS += pidfd
TARGETS += powerpc
TARGETS += proc
TARGETS += pstore
@@ -50,6 +51,7 @@ ifneq (1, $(quicktest))
TARGETS += timers
endif
TARGETS += tmpfs
+TARGETS += tpm2
TARGETS += user
TARGETS += vm
TARGETS += x86
@@ -73,12 +75,15 @@ ifneq ($(KBUILD_SRC),)
override LDFLAGS =
endif
-BUILD := $(O)
-ifndef BUILD
- BUILD := $(KBUILD_OUTPUT)
-endif
-ifndef BUILD
- BUILD := $(shell pwd)
+ifneq ($(O),)
+ BUILD := $(O)
+else
+ ifneq ($(KBUILD_OUTPUT),)
+ BUILD := $(KBUILD_OUTPUT)
+ else
+ BUILD := $(shell pwd)
+ DEFAULT_INSTALL_HDR_PATH := 1
+ endif
endif
# KSFT_TAP_LEVEL is used from KSFT framework to prevent nested TAP header
@@ -87,8 +92,50 @@ endif
# with system() call. Export it here to cover override RUN_TESTS defines.
export KSFT_TAP_LEVEL=`echo 1`
+# Prepare for headers install
+top_srcdir ?= ../../..
+include $(top_srcdir)/scripts/subarch.include
+ARCH ?= $(SUBARCH)
+export KSFT_KHDR_INSTALL_DONE := 1
export BUILD
-all:
+
+# build and run gpio when output directory is the src dir.
+# gpio has dependency on tools/gpio and builds tools/gpio
+# objects in the src directory in all cases making the src
+# repo dirty even when objects are relocated.
+ifneq (1,$(DEFAULT_INSTALL_HDR_PATH))
+ TMP := $(filter-out gpio, $(TARGETS))
+ TARGETS := $(TMP)
+endif
+
+# set default goal to all, so make without a target runs all, even when
+# all isn't the first target in the file.
+.DEFAULT_GOAL := all
+
+# Install headers here once for all tests. KSFT_KHDR_INSTALL_DONE
+# is used to avoid running headers_install from lib.mk.
+# Invoke headers install with --no-builtin-rules to avoid circular
+# dependency in "make kselftest" case. In this case, second level
+# make inherits builtin-rules which will use the rule generate
+# Makefile.o and runs into
+# "Circular Makefile.o <- prepare dependency dropped."
+# and headers_install fails and test compile fails.
+#
+# O= KBUILD_OUTPUT cases don't run into this error, since main Makefile
+# invokes them as sub-makes and --no-builtin-rules is not necessary,
+# but doesn't cause any failures. Keep it simple and use the same
+# flags in both cases.
+# Local build cases: "make kselftest", "make -C" - headers are installed
+# in the default INSTALL_HDR_PATH usr/include.
+khdr:
+ifeq (1,$(DEFAULT_INSTALL_HDR_PATH))
+ make --no-builtin-rules ARCH=$(ARCH) -C $(top_srcdir) headers_install
+else
+ make --no-builtin-rules INSTALL_HDR_PATH=$$BUILD/usr \
+ ARCH=$(ARCH) -C $(top_srcdir) headers_install
+endif
+
+all: khdr
@for TARGET in $(TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
mkdir $$BUILD_TARGET -p; \
@@ -171,4 +218,4 @@ clean:
make OUTPUT=$$BUILD_TARGET -C $$TARGET clean;\
done;
-.PHONY: all run_tests hotplug run_hotplug clean_hotplug run_pstore_crash install clean
+.PHONY: khdr all run_tests hotplug run_hotplug clean_hotplug run_pstore_crash install clean
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 518cd587cd63..2aed37ea61a4 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -153,6 +153,9 @@ endif
endif
endif
+TEST_PROGS_CFLAGS := -I. -I$(OUTPUT)
+TEST_VERIFIER_CFLAGS := -I. -I$(OUTPUT) -Iverifier
+
ifneq ($(SUBREG_CODEGEN),)
ALU32_BUILD_DIR = $(OUTPUT)/alu32
TEST_CUSTOM_PROGS += $(ALU32_BUILD_DIR)/test_progs_32
@@ -162,13 +165,15 @@ $(ALU32_BUILD_DIR):
$(ALU32_BUILD_DIR)/urandom_read: $(OUTPUT)/urandom_read
cp $< $@
-$(ALU32_BUILD_DIR)/test_progs_32: test_progs.c $(ALU32_BUILD_DIR) \
+$(ALU32_BUILD_DIR)/test_progs_32: test_progs.c $(OUTPUT)/libbpf.a\
+ $(ALU32_BUILD_DIR) \
$(ALU32_BUILD_DIR)/urandom_read
- $(CC) $(CFLAGS) -o $(ALU32_BUILD_DIR)/test_progs_32 $< \
- trace_helpers.c prog_tests/*.c $(OUTPUT)/libbpf.a $(LDLIBS)
+ $(CC) $(TEST_PROGS_CFLAGS) $(CFLAGS) \
+ -o $(ALU32_BUILD_DIR)/test_progs_32 \
+ test_progs.c trace_helpers.c prog_tests/*.c \
+ $(OUTPUT)/libbpf.a $(LDLIBS)
$(ALU32_BUILD_DIR)/test_progs_32: $(PROG_TESTS_H)
-$(ALU32_BUILD_DIR)/test_progs_32: CFLAGS += -I$(OUTPUT)
$(ALU32_BUILD_DIR)/test_progs_32: prog_tests/*.c
$(ALU32_BUILD_DIR)/%.o: progs/%.c $(ALU32_BUILD_DIR) \
@@ -202,12 +207,16 @@ endif
PROG_TESTS_H := $(OUTPUT)/prog_tests/tests.h
$(OUTPUT)/test_progs: $(PROG_TESTS_H)
-$(OUTPUT)/test_progs: CFLAGS += -I$(OUTPUT)
+$(OUTPUT)/test_progs: CFLAGS += $(TEST_PROGS_CFLAGS)
$(OUTPUT)/test_progs: prog_tests/*.c
+PROG_TESTS_DIR = $(OUTPUT)/prog_tests
+$(PROG_TESTS_DIR):
+ mkdir -p $@
+
PROG_TESTS_FILES := $(wildcard prog_tests/*.c)
-$(PROG_TESTS_H): $(PROG_TESTS_FILES)
- $(shell ( cd prog_tests/
+$(PROG_TESTS_H): $(PROG_TESTS_DIR) $(PROG_TESTS_FILES)
+ $(shell ( cd prog_tests/; \
echo '/* Generated header, do not edit */'; \
echo '#ifdef DECLARE'; \
ls *.c 2> /dev/null | \
@@ -221,11 +230,15 @@ $(PROG_TESTS_H): $(PROG_TESTS_FILES)
VERIFIER_TESTS_H := $(OUTPUT)/verifier/tests.h
$(OUTPUT)/test_verifier: $(VERIFIER_TESTS_H)
-$(OUTPUT)/test_verifier: CFLAGS += -I$(OUTPUT)
+$(OUTPUT)/test_verifier: CFLAGS += $(TEST_VERIFIER_CFLAGS)
+
+VERIFIER_TESTS_DIR = $(OUTPUT)/verifier
+$(VERIFIER_TESTS_DIR):
+ mkdir -p $@
VERIFIER_TEST_FILES := $(wildcard verifier/*.c)
-$(OUTPUT)/verifier/tests.h: $(VERIFIER_TEST_FILES)
- $(shell ( cd verifier/
+$(OUTPUT)/verifier/tests.h: $(VERIFIER_TESTS_DIR) $(VERIFIER_TEST_FILES)
+ $(shell ( cd verifier/; \
echo '/* Generated header, do not edit */'; \
echo '#ifdef FILL_ARRAY'; \
ls *.c 2> /dev/null | \
diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h
index c9433a496d54..c81fc350f7ad 100644
--- a/tools/testing/selftests/bpf/bpf_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_helpers.h
@@ -180,6 +180,8 @@ static struct bpf_sock *(*bpf_sk_fullsock)(struct bpf_sock *sk) =
(void *) BPF_FUNC_sk_fullsock;
static struct bpf_tcp_sock *(*bpf_tcp_sock)(struct bpf_sock *sk) =
(void *) BPF_FUNC_tcp_sock;
+static struct bpf_sock *(*bpf_get_listener_sock)(struct bpf_sock *sk) =
+ (void *) BPF_FUNC_get_listener_sock;
static int (*bpf_skb_ecn_set_ce)(void *ctx) =
(void *) BPF_FUNC_skb_ecn_set_ce;
diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
index bcbd928c96ab..fc818bc1d729 100644
--- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
+++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
@@ -39,6 +39,58 @@ static struct bpf_flow_keys pkt_v6_flow_keys = {
.n_proto = __bpf_constant_htons(ETH_P_IPV6),
};
+#define VLAN_HLEN 4
+
+static struct {
+ struct ethhdr eth;
+ __u16 vlan_tci;
+ __u16 vlan_proto;
+ struct iphdr iph;
+ struct tcphdr tcp;
+} __packed pkt_vlan_v4 = {
+ .eth.h_proto = __bpf_constant_htons(ETH_P_8021Q),
+ .vlan_proto = __bpf_constant_htons(ETH_P_IP),
+ .iph.ihl = 5,
+ .iph.protocol = IPPROTO_TCP,
+ .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
+ .tcp.urg_ptr = 123,
+ .tcp.doff = 5,
+};
+
+static struct bpf_flow_keys pkt_vlan_v4_flow_keys = {
+ .nhoff = VLAN_HLEN,
+ .thoff = VLAN_HLEN + sizeof(struct iphdr),
+ .addr_proto = ETH_P_IP,
+ .ip_proto = IPPROTO_TCP,
+ .n_proto = __bpf_constant_htons(ETH_P_IP),
+};
+
+static struct {
+ struct ethhdr eth;
+ __u16 vlan_tci;
+ __u16 vlan_proto;
+ __u16 vlan_tci2;
+ __u16 vlan_proto2;
+ struct ipv6hdr iph;
+ struct tcphdr tcp;
+} __packed pkt_vlan_v6 = {
+ .eth.h_proto = __bpf_constant_htons(ETH_P_8021AD),
+ .vlan_proto = __bpf_constant_htons(ETH_P_8021Q),
+ .vlan_proto2 = __bpf_constant_htons(ETH_P_IPV6),
+ .iph.nexthdr = IPPROTO_TCP,
+ .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
+ .tcp.urg_ptr = 123,
+ .tcp.doff = 5,
+};
+
+static struct bpf_flow_keys pkt_vlan_v6_flow_keys = {
+ .nhoff = VLAN_HLEN * 2,
+ .thoff = VLAN_HLEN * 2 + sizeof(struct ipv6hdr),
+ .addr_proto = ETH_P_IPV6,
+ .ip_proto = IPPROTO_TCP,
+ .n_proto = __bpf_constant_htons(ETH_P_IPV6),
+};
+
void test_flow_dissector(void)
{
struct bpf_flow_keys flow_keys;
@@ -68,5 +120,21 @@ void test_flow_dissector(void)
err, errno, retval, duration, size, sizeof(flow_keys));
CHECK_FLOW_KEYS("ipv6_flow_keys", flow_keys, pkt_v6_flow_keys);
+ err = bpf_prog_test_run(prog_fd, 10, &pkt_vlan_v4, sizeof(pkt_vlan_v4),
+ &flow_keys, &size, &retval, &duration);
+ CHECK(size != sizeof(flow_keys) || err || retval != 1, "vlan_ipv4",
+ "err %d errno %d retval %d duration %d size %u/%lu\n",
+ err, errno, retval, duration, size, sizeof(flow_keys));
+ CHECK_FLOW_KEYS("vlan_ipv4_flow_keys", flow_keys,
+ pkt_vlan_v4_flow_keys);
+
+ err = bpf_prog_test_run(prog_fd, 10, &pkt_vlan_v6, sizeof(pkt_vlan_v6),
+ &flow_keys, &size, &retval, &duration);
+ CHECK(size != sizeof(flow_keys) || err || retval != 1, "vlan_ipv6",
+ "err %d errno %d retval %d duration %d size %u/%lu\n",
+ err, errno, retval, duration, size, sizeof(flow_keys));
+ CHECK_FLOW_KEYS("vlan_ipv6_flow_keys", flow_keys,
+ pkt_vlan_v6_flow_keys);
+
bpf_object__close(obj);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/map_lock.c b/tools/testing/selftests/bpf/prog_tests/map_lock.c
index 90f8a206340a..ee99368c595c 100644
--- a/tools/testing/selftests/bpf/prog_tests/map_lock.c
+++ b/tools/testing/selftests/bpf/prog_tests/map_lock.c
@@ -37,7 +37,7 @@ void test_map_lock(void)
const char *file = "./test_map_lock.o";
int prog_fd, map_fd[2], vars[17] = {};
pthread_t thread_id[6];
- struct bpf_object *obj;
+ struct bpf_object *obj = NULL;
int err = 0, key = 0, i;
void *ret;
diff --git a/tools/testing/selftests/bpf/prog_tests/signal_pending.c b/tools/testing/selftests/bpf/prog_tests/signal_pending.c
index f2a37bbf91ab..996e808f43a2 100644
--- a/tools/testing/selftests/bpf/prog_tests/signal_pending.c
+++ b/tools/testing/selftests/bpf/prog_tests/signal_pending.c
@@ -12,7 +12,7 @@ static void test_signal_pending_by_type(enum bpf_prog_type prog_type)
struct itimerval timeo = {
.it_value.tv_usec = 100000, /* 100ms */
};
- __u32 duration, retval;
+ __u32 duration = 0, retval;
int prog_fd;
int err;
int i;
diff --git a/tools/testing/selftests/bpf/prog_tests/spinlock.c b/tools/testing/selftests/bpf/prog_tests/spinlock.c
index 9a573a9675d7..114ebe6a438e 100644
--- a/tools/testing/selftests/bpf/prog_tests/spinlock.c
+++ b/tools/testing/selftests/bpf/prog_tests/spinlock.c
@@ -5,7 +5,7 @@ void test_spinlock(void)
{
const char *file = "./test_spin_lock.o";
pthread_t thread_id[4];
- struct bpf_object *obj;
+ struct bpf_object *obj = NULL;
int prog_fd;
int err = 0, i;
void *ret;
diff --git a/tools/testing/selftests/bpf/progs/bpf_flow.c b/tools/testing/selftests/bpf/progs/bpf_flow.c
index 284660f5aa95..75b17cada539 100644
--- a/tools/testing/selftests/bpf/progs/bpf_flow.c
+++ b/tools/testing/selftests/bpf/progs/bpf_flow.c
@@ -92,7 +92,6 @@ static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto)
{
struct bpf_flow_keys *keys = skb->flow_keys;
- keys->n_proto = proto;
switch (proto) {
case bpf_htons(ETH_P_IP):
bpf_tail_call(skb, &jmp_table, IP);
@@ -119,10 +118,9 @@ static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto)
SEC("flow_dissector")
int _dissect(struct __sk_buff *skb)
{
- if (!skb->vlan_present)
- return parse_eth_proto(skb, skb->protocol);
- else
- return parse_eth_proto(skb, skb->vlan_proto);
+ struct bpf_flow_keys *keys = skb->flow_keys;
+
+ return parse_eth_proto(skb, keys->n_proto);
}
/* Parses on IPPROTO_* */
@@ -336,15 +334,9 @@ PROG(VLAN)(struct __sk_buff *skb)
{
struct bpf_flow_keys *keys = skb->flow_keys;
struct vlan_hdr *vlan, _vlan;
- __be16 proto;
-
- /* Peek back to see if single or double-tagging */
- if (bpf_skb_load_bytes(skb, keys->thoff - sizeof(proto), &proto,
- sizeof(proto)))
- return BPF_DROP;
/* Account for double-tagging */
- if (proto == bpf_htons(ETH_P_8021AD)) {
+ if (keys->n_proto == bpf_htons(ETH_P_8021AD)) {
vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan);
if (!vlan)
return BPF_DROP;
@@ -352,6 +344,7 @@ PROG(VLAN)(struct __sk_buff *skb)
if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q))
return BPF_DROP;
+ keys->nhoff += sizeof(*vlan);
keys->thoff += sizeof(*vlan);
}
@@ -359,12 +352,14 @@ PROG(VLAN)(struct __sk_buff *skb)
if (!vlan)
return BPF_DROP;
+ keys->nhoff += sizeof(*vlan);
keys->thoff += sizeof(*vlan);
/* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/
if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) ||
vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q))
return BPF_DROP;
+ keys->n_proto = vlan->h_vlan_encapsulated_proto;
return parse_eth_proto(skb, vlan->h_vlan_encapsulated_proto);
}
diff --git a/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c b/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c
index de1a43e8f610..37328f148538 100644
--- a/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c
@@ -8,38 +8,51 @@
#include "bpf_helpers.h"
#include "bpf_endian.h"
-enum bpf_array_idx {
- SRV_IDX,
- CLI_IDX,
- __NR_BPF_ARRAY_IDX,
+enum bpf_addr_array_idx {
+ ADDR_SRV_IDX,
+ ADDR_CLI_IDX,
+ __NR_BPF_ADDR_ARRAY_IDX,
+};
+
+enum bpf_result_array_idx {
+ EGRESS_SRV_IDX,
+ EGRESS_CLI_IDX,
+ INGRESS_LISTEN_IDX,
+ __NR_BPF_RESULT_ARRAY_IDX,
+};
+
+enum bpf_linum_array_idx {
+ EGRESS_LINUM_IDX,
+ INGRESS_LINUM_IDX,
+ __NR_BPF_LINUM_ARRAY_IDX,
};
struct bpf_map_def SEC("maps") addr_map = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct sockaddr_in6),
- .max_entries = __NR_BPF_ARRAY_IDX,
+ .max_entries = __NR_BPF_ADDR_ARRAY_IDX,
};
struct bpf_map_def SEC("maps") sock_result_map = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct bpf_sock),
- .max_entries = __NR_BPF_ARRAY_IDX,
+ .max_entries = __NR_BPF_RESULT_ARRAY_IDX,
};
struct bpf_map_def SEC("maps") tcp_sock_result_map = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct bpf_tcp_sock),
- .max_entries = __NR_BPF_ARRAY_IDX,
+ .max_entries = __NR_BPF_RESULT_ARRAY_IDX,
};
struct bpf_map_def SEC("maps") linum_map = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
- .max_entries = 1,
+ .max_entries = __NR_BPF_LINUM_ARRAY_IDX,
};
static bool is_loopback6(__u32 *a6)
@@ -100,18 +113,20 @@ static void tpcpy(struct bpf_tcp_sock *dst,
#define RETURN { \
linum = __LINE__; \
- bpf_map_update_elem(&linum_map, &idx0, &linum, 0); \
+ bpf_map_update_elem(&linum_map, &linum_idx, &linum, 0); \
return 1; \
}
SEC("cgroup_skb/egress")
-int read_sock_fields(struct __sk_buff *skb)
+int egress_read_sock_fields(struct __sk_buff *skb)
{
- __u32 srv_idx = SRV_IDX, cli_idx = CLI_IDX, idx;
+ __u32 srv_idx = ADDR_SRV_IDX, cli_idx = ADDR_CLI_IDX, result_idx;
struct sockaddr_in6 *srv_sa6, *cli_sa6;
struct bpf_tcp_sock *tp, *tp_ret;
struct bpf_sock *sk, *sk_ret;
- __u32 linum, idx0 = 0;
+ __u32 linum, linum_idx;
+
+ linum_idx = EGRESS_LINUM_IDX;
sk = skb->sk;
if (!sk || sk->state == 10)
@@ -132,14 +147,55 @@ int read_sock_fields(struct __sk_buff *skb)
RETURN;
if (sk->src_port == bpf_ntohs(srv_sa6->sin6_port))
- idx = srv_idx;
+ result_idx = EGRESS_SRV_IDX;
else if (sk->src_port == bpf_ntohs(cli_sa6->sin6_port))
- idx = cli_idx;
+ result_idx = EGRESS_CLI_IDX;
else
RETURN;
- sk_ret = bpf_map_lookup_elem(&sock_result_map, &idx);
- tp_ret = bpf_map_lookup_elem(&tcp_sock_result_map, &idx);
+ sk_ret = bpf_map_lookup_elem(&sock_result_map, &result_idx);
+ tp_ret = bpf_map_lookup_elem(&tcp_sock_result_map, &result_idx);
+ if (!sk_ret || !tp_ret)
+ RETURN;
+
+ skcpy(sk_ret, sk);
+ tpcpy(tp_ret, tp);
+
+ RETURN;
+}
+
+SEC("cgroup_skb/ingress")
+int ingress_read_sock_fields(struct __sk_buff *skb)
+{
+ __u32 srv_idx = ADDR_SRV_IDX, result_idx = INGRESS_LISTEN_IDX;
+ struct bpf_tcp_sock *tp, *tp_ret;
+ struct bpf_sock *sk, *sk_ret;
+ struct sockaddr_in6 *srv_sa6;
+ __u32 linum, linum_idx;
+
+ linum_idx = INGRESS_LINUM_IDX;
+
+ sk = skb->sk;
+ if (!sk || sk->family != AF_INET6 || !is_loopback6(sk->src_ip6))
+ RETURN;
+
+ srv_sa6 = bpf_map_lookup_elem(&addr_map, &srv_idx);
+ if (!srv_sa6 || sk->src_port != bpf_ntohs(srv_sa6->sin6_port))
+ RETURN;
+
+ if (sk->state != 10 && sk->state != 12)
+ RETURN;
+
+ sk = bpf_get_listener_sock(sk);
+ if (!sk)
+ RETURN;
+
+ tp = bpf_tcp_sock(sk);
+ if (!tp)
+ RETURN;
+
+ sk_ret = bpf_map_lookup_elem(&sock_result_map, &result_idx);
+ tp_ret = bpf_map_lookup_elem(&tcp_sock_result_map, &result_idx);
if (!sk_ret || !tp_ret)
RETURN;
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c
index 38797aa627a7..ec5794e4205b 100644
--- a/tools/testing/selftests/bpf/test_btf.c
+++ b/tools/testing/selftests/bpf/test_btf.c
@@ -5777,6 +5777,53 @@ const struct btf_dedup_test dedup_tests[] = {
},
},
{
+ .descr = "dedup: void equiv check",
+ /*
+ * // CU 1:
+ * struct s {
+ * struct {} *x;
+ * };
+ * // CU 2:
+ * struct s {
+ * int *x;
+ * };
+ */
+ .input = {
+ .raw_types = {
+ /* CU 1 */
+ BTF_STRUCT_ENC(0, 0, 1), /* [1] struct {} */
+ BTF_PTR_ENC(1), /* [2] ptr -> [1] */
+ BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [3] struct s */
+ BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
+ /* CU 2 */
+ BTF_PTR_ENC(0), /* [4] ptr -> void */
+ BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [5] struct s */
+ BTF_MEMBER_ENC(NAME_NTH(2), 4, 0),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0s\0x"),
+ },
+ .expect = {
+ .raw_types = {
+ /* CU 1 */
+ BTF_STRUCT_ENC(0, 0, 1), /* [1] struct {} */
+ BTF_PTR_ENC(1), /* [2] ptr -> [1] */
+ BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [3] struct s */
+ BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
+ /* CU 2 */
+ BTF_PTR_ENC(0), /* [4] ptr -> void */
+ BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [5] struct s */
+ BTF_MEMBER_ENC(NAME_NTH(2), 4, 0),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0s\0x"),
+ },
+ .opts = {
+ .dont_resolve_fwds = false,
+ .dedup_table_size = 1, /* force hash collisions */
+ },
+},
+{
.descr = "dedup: all possible kinds (no duplicates)",
.input = {
.raw_types = {
@@ -5874,6 +5921,50 @@ const struct btf_dedup_test dedup_tests[] = {
.dont_resolve_fwds = false,
},
},
+{
+ .descr = "dedup: enum fwd resolution",
+ .input = {
+ .raw_types = {
+ /* [1] fwd enum 'e1' before full enum */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 4),
+ /* [2] full enum 'e1' after fwd */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+ BTF_ENUM_ENC(NAME_NTH(2), 123),
+ /* [3] full enum 'e2' before fwd */
+ BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+ BTF_ENUM_ENC(NAME_NTH(4), 456),
+ /* [4] fwd enum 'e2' after full enum */
+ BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 4),
+ /* [5] incompatible fwd enum with different size */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 1),
+ /* [6] incompatible full enum with different value */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+ BTF_ENUM_ENC(NAME_NTH(2), 321),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0e1\0e1_val\0e2\0e2_val"),
+ },
+ .expect = {
+ .raw_types = {
+ /* [1] full enum 'e1' */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+ BTF_ENUM_ENC(NAME_NTH(2), 123),
+ /* [2] full enum 'e2' */
+ BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+ BTF_ENUM_ENC(NAME_NTH(4), 456),
+ /* [3] incompatible fwd enum with different size */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 1),
+ /* [4] incompatible full enum with different value */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+ BTF_ENUM_ENC(NAME_NTH(2), 321),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0e1\0e1_val\0e2\0e2_val"),
+ },
+ .opts = {
+ .dont_resolve_fwds = false,
+ },
+},
};
diff --git a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
index 612632c1425f..d4d3391cc13a 100755
--- a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
+++ b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
@@ -78,6 +78,8 @@ TEST_STATUS=0
TESTS_SUCCEEDED=0
TESTS_FAILED=0
+TMPFILE=""
+
process_test_results()
{
if [[ "${TEST_STATUS}" -eq 0 ]] ; then
@@ -147,7 +149,6 @@ setup()
ip -netns ${NS2} -6 addr add ${IPv6_7}/128 nodad dev veth7
ip -netns ${NS3} -6 addr add ${IPv6_8}/128 nodad dev veth8
-
ip -netns ${NS1} link set dev veth1 up
ip -netns ${NS2} link set dev veth2 up
ip -netns ${NS2} link set dev veth3 up
@@ -205,7 +206,7 @@ setup()
# configure IPv4 GRE device in NS3, and a route to it via the "bottom" route
ip -netns ${NS3} tunnel add gre_dev mode gre remote ${IPv4_1} local ${IPv4_GRE} ttl 255
ip -netns ${NS3} link set gre_dev up
- ip -netns ${NS3} addr add ${IPv4_GRE} nodad dev gre_dev
+ ip -netns ${NS3} addr add ${IPv4_GRE} dev gre_dev
ip -netns ${NS1} route add ${IPv4_GRE}/32 dev veth5 via ${IPv4_6}
ip -netns ${NS2} route add ${IPv4_GRE}/32 dev veth7 via ${IPv4_8}
@@ -222,12 +223,18 @@ setup()
ip netns exec ${NS2} sysctl -wq net.ipv4.conf.all.rp_filter=0
ip netns exec ${NS3} sysctl -wq net.ipv4.conf.all.rp_filter=0
+ TMPFILE=$(mktemp /tmp/test_lwt_ip_encap.XXXXXX)
+
sleep 1 # reduce flakiness
set +e
}
cleanup()
{
+ if [ -f ${TMPFILE} ] ; then
+ rm ${TMPFILE}
+ fi
+
ip netns del ${NS1} 2> /dev/null
ip netns del ${NS2} 2> /dev/null
ip netns del ${NS3} 2> /dev/null
@@ -278,6 +285,46 @@ test_ping()
fi
}
+test_gso()
+{
+ local readonly PROTO=$1
+ local readonly PKT_SZ=5000
+ local IP_DST=""
+ : > ${TMPFILE} # trim the capture file
+
+ # check that nc is present
+ command -v nc >/dev/null 2>&1 || \
+ { echo >&2 "nc is not available: skipping TSO tests"; return; }
+
+ # listen on IPv*_DST, capture TCP into $TMPFILE
+ if [ "${PROTO}" == "IPv4" ] ; then
+ IP_DST=${IPv4_DST}
+ ip netns exec ${NS3} bash -c \
+ "nc -4 -l -s ${IPv4_DST} -p 9000 > ${TMPFILE} &"
+ elif [ "${PROTO}" == "IPv6" ] ; then
+ IP_DST=${IPv6_DST}
+ ip netns exec ${NS3} bash -c \
+ "nc -6 -l -s ${IPv6_DST} -p 9000 > ${TMPFILE} &"
+ RET=$?
+ else
+ echo " test_gso: unknown PROTO: ${PROTO}"
+ TEST_STATUS=1
+ fi
+ sleep 1 # let nc start listening
+
+ # send a packet larger than MTU
+ ip netns exec ${NS1} bash -c \
+ "dd if=/dev/zero bs=$PKT_SZ count=1 > /dev/tcp/${IP_DST}/9000 2>/dev/null"
+ sleep 2 # let the packet get delivered
+
+ # verify we received all expected bytes
+ SZ=$(stat -c %s ${TMPFILE})
+ if [ "$SZ" != "$PKT_SZ" ] ; then
+ echo " test_gso failed: ${PROTO}"
+ TEST_STATUS=1
+ fi
+}
+
test_egress()
{
local readonly ENCAP=$1
@@ -307,6 +354,8 @@ test_egress()
fi
test_ping IPv4 0
test_ping IPv6 0
+ test_gso IPv4
+ test_gso IPv6
# a negative test: remove routes to GRE devices: ping fails
remove_routes_to_gredev
@@ -350,6 +399,7 @@ test_ingress()
ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj test_lwt_ip_encap.o sec encap_gre6 dev veth2
else
echo "FAIL: unknown encap ${ENCAP}"
+ TEST_STATUS=1
fi
test_ping IPv4 0
test_ping IPv6 0
diff --git a/tools/testing/selftests/bpf/test_sock_fields.c b/tools/testing/selftests/bpf/test_sock_fields.c
index bc8943938bf5..dcae7f664dce 100644
--- a/tools/testing/selftests/bpf/test_sock_fields.c
+++ b/tools/testing/selftests/bpf/test_sock_fields.c
@@ -16,10 +16,23 @@
#include "cgroup_helpers.h"
#include "bpf_rlimit.h"
-enum bpf_array_idx {
- SRV_IDX,
- CLI_IDX,
- __NR_BPF_ARRAY_IDX,
+enum bpf_addr_array_idx {
+ ADDR_SRV_IDX,
+ ADDR_CLI_IDX,
+ __NR_BPF_ADDR_ARRAY_IDX,
+};
+
+enum bpf_result_array_idx {
+ EGRESS_SRV_IDX,
+ EGRESS_CLI_IDX,
+ INGRESS_LISTEN_IDX,
+ __NR_BPF_RESULT_ARRAY_IDX,
+};
+
+enum bpf_linum_array_idx {
+ EGRESS_LINUM_IDX,
+ INGRESS_LINUM_IDX,
+ __NR_BPF_LINUM_ARRAY_IDX,
};
#define CHECK(condition, tag, format...) ({ \
@@ -41,8 +54,16 @@ static int linum_map_fd;
static int addr_map_fd;
static int tp_map_fd;
static int sk_map_fd;
-static __u32 srv_idx = SRV_IDX;
-static __u32 cli_idx = CLI_IDX;
+
+static __u32 addr_srv_idx = ADDR_SRV_IDX;
+static __u32 addr_cli_idx = ADDR_CLI_IDX;
+
+static __u32 egress_srv_idx = EGRESS_SRV_IDX;
+static __u32 egress_cli_idx = EGRESS_CLI_IDX;
+static __u32 ingress_listen_idx = INGRESS_LISTEN_IDX;
+
+static __u32 egress_linum_idx = EGRESS_LINUM_IDX;
+static __u32 ingress_linum_idx = INGRESS_LINUM_IDX;
static void init_loopback6(struct sockaddr_in6 *sa6)
{
@@ -93,29 +114,46 @@ static void print_tp(const struct bpf_tcp_sock *tp)
static void check_result(void)
{
- struct bpf_tcp_sock srv_tp, cli_tp;
- struct bpf_sock srv_sk, cli_sk;
- __u32 linum, idx0 = 0;
+ struct bpf_tcp_sock srv_tp, cli_tp, listen_tp;
+ struct bpf_sock srv_sk, cli_sk, listen_sk;
+ __u32 ingress_linum, egress_linum;
int err;
- err = bpf_map_lookup_elem(linum_map_fd, &idx0, &linum);
+ err = bpf_map_lookup_elem(linum_map_fd, &egress_linum_idx,
+ &egress_linum);
CHECK(err == -1, "bpf_map_lookup_elem(linum_map_fd)",
"err:%d errno:%d", err, errno);
- err = bpf_map_lookup_elem(sk_map_fd, &srv_idx, &srv_sk);
- CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &srv_idx)",
+ err = bpf_map_lookup_elem(linum_map_fd, &ingress_linum_idx,
+ &ingress_linum);
+ CHECK(err == -1, "bpf_map_lookup_elem(linum_map_fd)",
+ "err:%d errno:%d", err, errno);
+
+ err = bpf_map_lookup_elem(sk_map_fd, &egress_srv_idx, &srv_sk);
+ CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &egress_srv_idx)",
+ "err:%d errno:%d", err, errno);
+ err = bpf_map_lookup_elem(tp_map_fd, &egress_srv_idx, &srv_tp);
+ CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &egress_srv_idx)",
+ "err:%d errno:%d", err, errno);
+
+ err = bpf_map_lookup_elem(sk_map_fd, &egress_cli_idx, &cli_sk);
+ CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &egress_cli_idx)",
"err:%d errno:%d", err, errno);
- err = bpf_map_lookup_elem(tp_map_fd, &srv_idx, &srv_tp);
- CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &srv_idx)",
+ err = bpf_map_lookup_elem(tp_map_fd, &egress_cli_idx, &cli_tp);
+ CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &egress_cli_idx)",
"err:%d errno:%d", err, errno);
- err = bpf_map_lookup_elem(sk_map_fd, &cli_idx, &cli_sk);
- CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &cli_idx)",
+ err = bpf_map_lookup_elem(sk_map_fd, &ingress_listen_idx, &listen_sk);
+ CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &ingress_listen_idx)",
"err:%d errno:%d", err, errno);
- err = bpf_map_lookup_elem(tp_map_fd, &cli_idx, &cli_tp);
- CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &cli_idx)",
+ err = bpf_map_lookup_elem(tp_map_fd, &ingress_listen_idx, &listen_tp);
+ CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &ingress_listen_idx)",
"err:%d errno:%d", err, errno);
+ printf("listen_sk: ");
+ print_sk(&listen_sk);
+ printf("\n");
+
printf("srv_sk: ");
print_sk(&srv_sk);
printf("\n");
@@ -124,6 +162,10 @@ static void check_result(void)
print_sk(&cli_sk);
printf("\n");
+ printf("listen_tp: ");
+ print_tp(&listen_tp);
+ printf("\n");
+
printf("srv_tp: ");
print_tp(&srv_tp);
printf("\n");
@@ -132,6 +174,19 @@ static void check_result(void)
print_tp(&cli_tp);
printf("\n");
+ CHECK(listen_sk.state != 10 ||
+ listen_sk.family != AF_INET6 ||
+ listen_sk.protocol != IPPROTO_TCP ||
+ memcmp(listen_sk.src_ip6, &in6addr_loopback,
+ sizeof(listen_sk.src_ip6)) ||
+ listen_sk.dst_ip6[0] || listen_sk.dst_ip6[1] ||
+ listen_sk.dst_ip6[2] || listen_sk.dst_ip6[3] ||
+ listen_sk.src_port != ntohs(srv_sa6.sin6_port) ||
+ listen_sk.dst_port,
+ "Unexpected listen_sk",
+ "Check listen_sk output. ingress_linum:%u",
+ ingress_linum);
+
CHECK(srv_sk.state == 10 ||
!srv_sk.state ||
srv_sk.family != AF_INET6 ||
@@ -142,7 +197,8 @@ static void check_result(void)
sizeof(srv_sk.dst_ip6)) ||
srv_sk.src_port != ntohs(srv_sa6.sin6_port) ||
srv_sk.dst_port != cli_sa6.sin6_port,
- "Unexpected srv_sk", "Check srv_sk output. linum:%u", linum);
+ "Unexpected srv_sk", "Check srv_sk output. egress_linum:%u",
+ egress_linum);
CHECK(cli_sk.state == 10 ||
!cli_sk.state ||
@@ -154,21 +210,31 @@ static void check_result(void)
sizeof(cli_sk.dst_ip6)) ||
cli_sk.src_port != ntohs(cli_sa6.sin6_port) ||
cli_sk.dst_port != srv_sa6.sin6_port,
- "Unexpected cli_sk", "Check cli_sk output. linum:%u", linum);
+ "Unexpected cli_sk", "Check cli_sk output. egress_linum:%u",
+ egress_linum);
+
+ CHECK(listen_tp.data_segs_out ||
+ listen_tp.data_segs_in ||
+ listen_tp.total_retrans ||
+ listen_tp.bytes_acked,
+ "Unexpected listen_tp", "Check listen_tp output. ingress_linum:%u",
+ ingress_linum);
CHECK(srv_tp.data_segs_out != 1 ||
srv_tp.data_segs_in ||
srv_tp.snd_cwnd != 10 ||
srv_tp.total_retrans ||
srv_tp.bytes_acked != DATA_LEN,
- "Unexpected srv_tp", "Check srv_tp output. linum:%u", linum);
+ "Unexpected srv_tp", "Check srv_tp output. egress_linum:%u",
+ egress_linum);
CHECK(cli_tp.data_segs_out ||
cli_tp.data_segs_in != 1 ||
cli_tp.snd_cwnd != 10 ||
cli_tp.total_retrans ||
cli_tp.bytes_received != DATA_LEN,
- "Unexpected cli_tp", "Check cli_tp output. linum:%u", linum);
+ "Unexpected cli_tp", "Check cli_tp output. egress_linum:%u",
+ egress_linum);
}
static void test(void)
@@ -211,10 +277,10 @@ static void test(void)
err, errno);
/* Update addr_map with srv_sa6 and cli_sa6 */
- err = bpf_map_update_elem(addr_map_fd, &srv_idx, &srv_sa6, 0);
+ err = bpf_map_update_elem(addr_map_fd, &addr_srv_idx, &srv_sa6, 0);
CHECK(err, "map_update", "err:%d errno:%d", err, errno);
- err = bpf_map_update_elem(addr_map_fd, &cli_idx, &cli_sa6, 0);
+ err = bpf_map_update_elem(addr_map_fd, &addr_cli_idx, &cli_sa6, 0);
CHECK(err, "map_update", "err:%d errno:%d", err, errno);
/* Connect from cli_sa6 to srv_sa6 */
@@ -273,9 +339,9 @@ int main(int argc, char **argv)
struct bpf_prog_load_attr attr = {
.file = "test_sock_fields_kern.o",
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- .expected_attach_type = BPF_CGROUP_INET_EGRESS,
};
- int cgroup_fd, prog_fd, err;
+ int cgroup_fd, egress_fd, ingress_fd, err;
+ struct bpf_program *ingress_prog;
struct bpf_object *obj;
struct bpf_map *map;
@@ -293,12 +359,24 @@ int main(int argc, char **argv)
err = join_cgroup(TEST_CGROUP);
CHECK(err, "join_cgroup", "err:%d errno:%d", err, errno);
- err = bpf_prog_load_xattr(&attr, &obj, &prog_fd);
+ err = bpf_prog_load_xattr(&attr, &obj, &egress_fd);
CHECK(err, "bpf_prog_load_xattr()", "err:%d", err);
- err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0);
+ ingress_prog = bpf_object__find_program_by_title(obj,
+ "cgroup_skb/ingress");
+ CHECK(!ingress_prog,
+ "bpf_object__find_program_by_title(cgroup_skb/ingress)",
+ "not found");
+ ingress_fd = bpf_program__fd(ingress_prog);
+
+ err = bpf_prog_attach(egress_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0);
CHECK(err == -1, "bpf_prog_attach(CPF_CGROUP_INET_EGRESS)",
"err:%d errno%d", err, errno);
+
+ err = bpf_prog_attach(ingress_fd, cgroup_fd,
+ BPF_CGROUP_INET_INGRESS, 0);
+ CHECK(err == -1, "bpf_prog_attach(CPF_CGROUP_INET_INGRESS)",
+ "err:%d errno%d", err, errno);
close(cgroup_fd);
map = bpf_object__find_map_by_name(obj, "addr_map");
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
index 4004891afa9c..9093a8f64dc6 100644
--- a/tools/testing/selftests/bpf/verifier/calls.c
+++ b/tools/testing/selftests/bpf/verifier/calls.c
@@ -375,6 +375,31 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
+ "calls: ptr null check in subprog",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .fixup_map_hash_48b = { 3 },
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
"calls: two calls with args",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
@@ -908,6 +933,44 @@
.result = REJECT,
},
{
+ "calls: stack depth check in dead code",
+ .insns = {
+ /* main */
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), /* call B */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* B */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
+ BPF_EXIT_INSN(),
+ /* C */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
+ BPF_EXIT_INSN(),
+ /* D */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
+ BPF_EXIT_INSN(),
+ /* E */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
+ BPF_EXIT_INSN(),
+ /* F */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
+ BPF_EXIT_INSN(),
+ /* G */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
+ BPF_EXIT_INSN(),
+ /* H */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "call stack",
+ .result = REJECT,
+},
+{
"calls: spill into caller stack frame",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
@@ -1940,3 +2003,28 @@
.errstr = "!read_ok",
.result = REJECT,
},
+{
+ "calls: cross frame pruning - liveness propagation",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_IMM(BPF_REG_9, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .errstr = "!read_ok",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/direct_packet_access.c b/tools/testing/selftests/bpf/verifier/direct_packet_access.c
index e3fc22e672c2..d5c596fdc4b9 100644
--- a/tools/testing/selftests/bpf/verifier/direct_packet_access.c
+++ b/tools/testing/selftests/bpf/verifier/direct_packet_access.c
@@ -631,3 +631,25 @@
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
+{
+ "direct packet access: test29 (reg > pkt_end in subprog)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/ld_imm64.c b/tools/testing/selftests/bpf/verifier/ld_imm64.c
index 28b8c805a293..3856dba733e9 100644
--- a/tools/testing/selftests/bpf/verifier/ld_imm64.c
+++ b/tools/testing/selftests/bpf/verifier/ld_imm64.c
@@ -122,7 +122,7 @@
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
- BPF_RAW_INSN(0, 0, 0, 0, 1),
+ BPF_RAW_INSN(0, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
.errstr = "not pointing to valid bpf_map",
@@ -139,3 +139,16 @@
.errstr = "invalid bpf_ld_imm64 insn",
.result = REJECT,
},
+{
+ "test14 ld_imm64: reject 2nd imm != 0",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_1,
+ BPF_PSEUDO_MAP_FD, 0, 0),
+ BPF_RAW_INSN(0, 0, 0, 0, 0xfefefe),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 1 },
+ .errstr = "unrecognized bpf_ld_imm64 insn",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/ref_tracking.c b/tools/testing/selftests/bpf/verifier/ref_tracking.c
index 3ed3593bd8b6..923f2110072d 100644
--- a/tools/testing/selftests/bpf/verifier/ref_tracking.c
+++ b/tools/testing/selftests/bpf/verifier/ref_tracking.c
@@ -605,3 +605,171 @@
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
+{
+ "reference tracking: use ptr from bpf_tcp_sock() after release",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_tcp_sock, snd_cwnd)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "invalid mem access",
+},
+{
+ "reference tracking: use ptr from bpf_sk_fullsock() after release",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "invalid mem access",
+},
+{
+ "reference tracking: use ptr from bpf_sk_fullsock(tp) after release",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "invalid mem access",
+},
+{
+ "reference tracking: use sk after bpf_sk_release(tp)",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "invalid mem access",
+},
+{
+ "reference tracking: use ptr from bpf_get_listener_sock() after bpf_sk_release(sk)",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_get_listener_sock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, src_port)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking: bpf_sk_release(listen_sk)",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_get_listener_sock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "reference has not been acquired before",
+},
+{
+ /* !bpf_sk_fullsock(sk) is checked but !bpf_tcp_sock(sk) is not checked */
+ "reference tracking: tp->snd_cwnd after bpf_sk_fullsock(sk) and bpf_tcp_sock(sk)",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_8, offsetof(struct bpf_tcp_sock, snd_cwnd)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "invalid mem access",
+},
diff --git a/tools/testing/selftests/bpf/verifier/sock.c b/tools/testing/selftests/bpf/verifier/sock.c
index 0ddfdf76aba5..416436231fab 100644
--- a/tools/testing/selftests/bpf/verifier/sock.c
+++ b/tools/testing/selftests/bpf/verifier/sock.c
@@ -342,7 +342,7 @@
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
- .errstr = "type=sock_common expected=sock",
+ .errstr = "reference has not been acquired before",
},
{
"bpf_sk_release(bpf_sk_fullsock(skb->sk))",
@@ -380,5 +380,5 @@
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
- .errstr = "type=tcp_sock expected=sock",
+ .errstr = "reference has not been acquired before",
},
diff --git a/tools/testing/selftests/cgroup/test_memcontrol.c b/tools/testing/selftests/cgroup/test_memcontrol.c
index 28d321ba311b..6f339882a6ca 100644
--- a/tools/testing/selftests/cgroup/test_memcontrol.c
+++ b/tools/testing/selftests/cgroup/test_memcontrol.c
@@ -26,7 +26,7 @@
*/
static int test_memcg_subtree_control(const char *root)
{
- char *parent, *child, *parent2, *child2;
+ char *parent, *child, *parent2 = NULL, *child2 = NULL;
int ret = KSFT_FAIL;
char buf[PAGE_SIZE];
@@ -34,50 +34,54 @@ static int test_memcg_subtree_control(const char *root)
parent = cg_name(root, "memcg_test_0");
child = cg_name(root, "memcg_test_0/memcg_test_1");
if (!parent || !child)
- goto cleanup;
+ goto cleanup_free;
if (cg_create(parent))
- goto cleanup;
+ goto cleanup_free;
if (cg_write(parent, "cgroup.subtree_control", "+memory"))
- goto cleanup;
+ goto cleanup_parent;
if (cg_create(child))
- goto cleanup;
+ goto cleanup_parent;
if (cg_read_strstr(child, "cgroup.controllers", "memory"))
- goto cleanup;
+ goto cleanup_child;
/* Create two nested cgroups without enabling memory controller */
parent2 = cg_name(root, "memcg_test_1");
child2 = cg_name(root, "memcg_test_1/memcg_test_1");
if (!parent2 || !child2)
- goto cleanup;
+ goto cleanup_free2;
if (cg_create(parent2))
- goto cleanup;
+ goto cleanup_free2;
if (cg_create(child2))
- goto cleanup;
+ goto cleanup_parent2;
if (cg_read(child2, "cgroup.controllers", buf, sizeof(buf)))
- goto cleanup;
+ goto cleanup_all;
if (!cg_read_strstr(child2, "cgroup.controllers", "memory"))
- goto cleanup;
+ goto cleanup_all;
ret = KSFT_PASS;
-cleanup:
- cg_destroy(child);
- cg_destroy(parent);
- free(parent);
- free(child);
-
+cleanup_all:
cg_destroy(child2);
+cleanup_parent2:
cg_destroy(parent2);
+cleanup_free2:
free(parent2);
free(child2);
+cleanup_child:
+ cg_destroy(child);
+cleanup_parent:
+ cg_destroy(parent);
+cleanup_free:
+ free(parent);
+ free(child);
return ret;
}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
index c4cf6e6d800e..a6c196c8534c 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
@@ -11,6 +11,7 @@ lib_dir=$(dirname $0)/../../../net/forwarding
ALL_TESTS="
rif_set_addr_test
+ rif_vrf_set_addr_test
rif_inherit_bridge_addr_test
rif_non_inherit_bridge_addr_test
vlan_interface_deletion_test
@@ -98,6 +99,25 @@ rif_set_addr_test()
ip link set dev $swp1 addr $swp1_mac
}
+rif_vrf_set_addr_test()
+{
+ # Test that it is possible to set an IP address on a VRF upper despite
+ # its random MAC address.
+ RET=0
+
+ ip link add name vrf-test type vrf table 10
+ ip link set dev $swp1 master vrf-test
+
+ ip -4 address add 192.0.2.1/24 dev vrf-test
+ check_err $? "failed to set IPv4 address on VRF"
+ ip -6 address add 2001:db8:1::1/64 dev vrf-test
+ check_err $? "failed to set IPv6 address on VRF"
+
+ log_test "RIF - setting IP address on VRF"
+
+ ip link del dev vrf-test
+}
+
rif_inherit_bridge_addr_test()
{
RET=0
diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh
index a47029a799d2..a90f394f9aa9 100755
--- a/tools/testing/selftests/efivarfs/efivarfs.sh
+++ b/tools/testing/selftests/efivarfs/efivarfs.sh
@@ -7,6 +7,12 @@ test_guid=210be57c-9849-4fc7-a635-e6382d1aec27
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
+file_cleanup()
+{
+ chattr -i $1
+ rm -f $1
+}
+
check_prereqs()
{
local msg="skip all tests:"
@@ -58,8 +64,10 @@ test_create()
if [ $(stat -c %s $file) -ne 5 ]; then
echo "$file has invalid size" >&2
+ file_cleanup $file
exit 1
fi
+ file_cleanup $file
}
test_create_empty()
@@ -72,12 +80,14 @@ test_create_empty()
echo "$file can not be created without writing" >&2
exit 1
fi
+ file_cleanup $file
}
test_create_read()
{
local file=$efivarfs_mount/$FUNCNAME-$test_guid
./create-read $file
+ file_cleanup $file
}
test_delete()
@@ -92,11 +102,7 @@ test_delete()
exit 1
fi
- rm $file 2>/dev/null
- if [ $? -ne 0 ]; then
- chattr -i $file
- rm $file
- fi
+ file_cleanup $file
if [ -e $file ]; then
echo "$file couldn't be deleted" >&2
@@ -150,11 +156,7 @@ test_valid_filenames()
echo "$file could not be created" >&2
ret=1
else
- rm $file 2>/dev/null
- if [ $? -ne 0 ]; then
- chattr -i $file
- rm $file
- fi
+ file_cleanup $file
fi
done
@@ -187,11 +189,7 @@ test_invalid_filenames()
if [ -e $file ]; then
echo "Creating $file should have failed" >&2
- rm $file 2>/dev/null
- if [ $? -ne 0 ]; then
- chattr -i $file
- rm $file
- fi
+ file_cleanup $file
ret=1
fi
done
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-action-hist-xfail.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-action-hist-xfail.tc
new file mode 100644
index 000000000000..1221240f8cf6
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-action-hist-xfail.tc
@@ -0,0 +1,30 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: event trigger - test inter-event histogram trigger expected fail actions
+
+fail() { #msg
+ echo $1
+ exit_fail
+}
+
+if [ ! -f set_event ]; then
+ echo "event tracing is not supported"
+ exit_unsupported
+fi
+
+if [ ! -f snapshot ]; then
+ echo "snapshot is not supported"
+ exit_unsupported
+fi
+
+grep -q "snapshot()" README || exit_unsupported # version issue
+
+echo "Test expected snapshot action failure"
+
+echo 'hist:keys=comm:onmatch(sched.sched_wakeup).snapshot()' >> /sys/kernel/debug/tracing/events/sched/sched_waking/trigger && exit_fail
+
+echo "Test expected save action failure"
+
+echo 'hist:keys=comm:onmatch(sched.sched_wakeup).save(comm,prio)' >> /sys/kernel/debug/tracing/events/sched/sched_waking/trigger && exit_fail
+
+exit_xfail
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc
index 401104344593..9912616a8672 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc
@@ -1,4 +1,5 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
# description: event trigger - test extended error support
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc
index f59b2a9a1f22..77be6e1f6e7b 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc
@@ -1,4 +1,5 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
# description: event trigger - test field variable support
fail() { #msg
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc
index 524d9ce361e2..f3eb8aacec0e 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc
@@ -1,4 +1,5 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
# description: event trigger - test inter-event combined histogram trigger
fail() { #msg
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc
index 4ddc546771b5..d281f056f980 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc
@@ -1,4 +1,5 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
# description: event trigger - test multiple actions on hist trigger
fail() { #msg
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onchange-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onchange-action-hist.tc
new file mode 100644
index 000000000000..064a284e4e75
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onchange-action-hist.tc
@@ -0,0 +1,28 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: event trigger - test inter-event histogram trigger onchange action
+
+fail() { #msg
+ echo $1
+ exit_fail
+}
+
+if [ ! -f set_event ]; then
+ echo "event tracing is not supported"
+ exit_unsupported
+fi
+
+grep -q "onchange(var)" README || exit_unsupported # version issue
+
+echo "Test onchange action"
+
+echo 'hist:keys=comm:newprio=prio:onchange($newprio).save(comm,prio) if comm=="ping"' >> /sys/kernel/debug/tracing/events/sched/sched_waking/trigger
+
+ping $LOCALHOST -c 3
+nice -n 1 ping $LOCALHOST -c 3
+
+if ! grep -q "changed:" events/sched/sched_waking/hist; then
+ fail "Failed to create onchange action inter-event histogram"
+fi
+
+exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc
index 39fb65b0cd9f..a708f0e7858a 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc
@@ -1,4 +1,5 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
# description: event trigger - test inter-event histogram trigger onmatch action
fail() { #msg
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc
index 81ab3939c96a..dfce6932d8be 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc
@@ -1,4 +1,5 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
# description: event trigger - test inter-event histogram trigger onmatch-onmax action
fail() { #msg
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc
index 1180ab5f0845..0035995c2194 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc
@@ -1,4 +1,5 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
# description: event trigger - test inter-event histogram trigger onmax action
fail() { #msg
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc
new file mode 100644
index 000000000000..18fff69fc433
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc
@@ -0,0 +1,43 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: event trigger - test inter-event histogram trigger snapshot action
+
+fail() { #msg
+ echo $1
+ exit_fail
+}
+
+if [ ! -f set_event ]; then
+ echo "event tracing is not supported"
+ exit_unsupported
+fi
+
+if [ ! -f snapshot ]; then
+ echo "snapshot is not supported"
+ exit_unsupported
+fi
+
+grep -q "onchange(var)" README || exit_unsupported # version issue
+
+grep -q "snapshot()" README || exit_unsupported # version issue
+
+echo "Test snapshot action"
+
+echo 1 > /sys/kernel/debug/tracing/events/sched/enable
+
+echo 'hist:keys=comm:newprio=prio:onchange($newprio).save(comm,prio):onchange($newprio).snapshot() if comm=="ping"' >> /sys/kernel/debug/tracing/events/sched/sched_waking/trigger
+
+ping $LOCALHOST -c 3
+nice -n 1 ping $LOCALHOST -c 3
+
+echo 0 > tracing_on
+
+if ! grep -q "changed:" events/sched/sched_waking/hist; then
+ fail "Failed to create onchange action inter-event histogram"
+fi
+
+if ! grep -q "comm=ping" snapshot; then
+ fail "Failed to create snapshot action inter-event histogram"
+fi
+
+exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-createremove.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-createremove.tc
index 41128219231a..df44b14724a4 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-createremove.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-createremove.tc
@@ -1,4 +1,5 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
# description: event trigger - test synthetic event create remove
fail() { #msg
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-trace-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-trace-action-hist.tc
new file mode 100644
index 000000000000..8021d60aafec
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-trace-action-hist.tc
@@ -0,0 +1,42 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: event trigger - test inter-event histogram trigger trace action
+
+fail() { #msg
+ echo $1
+ exit_fail
+}
+
+if [ ! -f set_event ]; then
+ echo "event tracing is not supported"
+ exit_unsupported
+fi
+
+if [ ! -f synthetic_events ]; then
+ echo "synthetic event is not supported"
+ exit_unsupported
+fi
+
+grep -q "trace(<synthetic_event>" README || exit_unsupported # version issue
+
+echo "Test create synthetic event"
+
+echo 'wakeup_latency u64 lat pid_t pid char comm[16]' > synthetic_events
+if [ ! -d events/synthetic/wakeup_latency ]; then
+ fail "Failed to create wakeup_latency synthetic event"
+fi
+
+echo "Test create histogram for synthetic event using trace action"
+echo "Test histogram variables,simple expression support and trace action"
+
+echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="ping"' > events/sched/sched_wakeup/trigger
+echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0:onmatch(sched.sched_wakeup).trace(wakeup_latency,$wakeup_lat,next_pid,next_comm) if next_comm=="ping"' > events/sched/sched_switch/trigger
+echo 'hist:keys=comm,pid,lat:wakeup_lat=lat:sort=lat' > events/synthetic/wakeup_latency/trigger
+
+ping $LOCALHOST -c 5
+
+if ! grep -q "ping" events/synthetic/wakeup_latency/hist; then
+ fail "Failed to create trace action inter-event histogram"
+fi
+
+exit 0
diff --git a/tools/testing/selftests/gpio/gpio-mockup-chardev.c b/tools/testing/selftests/gpio/gpio-mockup-chardev.c
index aaa1e9f083c3..d587c814a9ca 100644
--- a/tools/testing/selftests/gpio/gpio-mockup-chardev.c
+++ b/tools/testing/selftests/gpio/gpio-mockup-chardev.c
@@ -12,7 +12,6 @@
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
-#include <stdio.h>
#include <errno.h>
#include <string.h>
#include <fcntl.h>
diff --git a/tools/testing/selftests/ima/config b/tools/testing/selftests/ima/config
deleted file mode 100644
index 6bc86d4d9bb4..000000000000
--- a/tools/testing/selftests/ima/config
+++ /dev/null
@@ -1,4 +0,0 @@
-CONFIG_IMA_APPRAISE
-CONFIG_IMA_ARCH_POLICY
-CONFIG_SECURITYFS
-CONFIG_KEXEC_VERIFY_SIG
diff --git a/tools/testing/selftests/ima/test_kexec_load.sh b/tools/testing/selftests/ima/test_kexec_load.sh
deleted file mode 100755
index 1c10093fb526..000000000000
--- a/tools/testing/selftests/ima/test_kexec_load.sh
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0+
-# Loading a kernel image via the kexec_load syscall should fail
-# when the kerne is CONFIG_KEXEC_VERIFY_SIG enabled and the system
-# is booted in secureboot mode.
-
-TEST="$0"
-EFIVARFS="/sys/firmware/efi/efivars"
-rc=0
-
-# Kselftest framework requirement - SKIP code is 4.
-ksft_skip=4
-
-# kexec requires root privileges
-if [ $UID != 0 ]; then
- echo "$TEST: must be run as root" >&2
- exit $ksft_skip
-fi
-
-# Make sure that efivars is mounted in the normal location
-if ! grep -q "^\S\+ $EFIVARFS efivarfs" /proc/mounts; then
- echo "$TEST: efivars is not mounted on $EFIVARFS" >&2
- exit $ksft_skip
-fi
-
-# Get secureboot mode
-file="$EFIVARFS/SecureBoot-*"
-if [ ! -e $file ]; then
- echo "$TEST: unknown secureboot mode" >&2
- exit $ksft_skip
-fi
-secureboot=`hexdump $file | awk '{print substr($4,length($4),1)}'`
-
-# kexec_load should fail in secure boot mode
-KERNEL_IMAGE="/boot/vmlinuz-`uname -r`"
-kexec -l $KERNEL_IMAGE &>> /dev/null
-if [ $? == 0 ]; then
- kexec -u
- if [ "$secureboot" == "1" ]; then
- echo "$TEST: kexec_load succeeded [FAIL]"
- rc=1
- else
- echo "$TEST: kexec_load succeeded [PASS]"
- fi
-else
- if [ "$secureboot" == "1" ]; then
- echo "$TEST: kexec_load failed [PASS]"
- else
- echo "$TEST: kexec_load failed [FAIL]"
- rc=1
- fi
-fi
-
-exit $rc
diff --git a/tools/testing/selftests/ipc/msgque.c b/tools/testing/selftests/ipc/msgque.c
index dac927e82336..4c156aeab6b8 100644
--- a/tools/testing/selftests/ipc/msgque.c
+++ b/tools/testing/selftests/ipc/msgque.c
@@ -1,9 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <errno.h>
-#include <linux/msg.h>
+#include <sys/msg.h>
#include <fcntl.h>
#include "../kselftest.h"
@@ -73,7 +74,7 @@ int restore_queue(struct msgque_data *msgque)
return 0;
destroy:
- if (msgctl(id, IPC_RMID, 0))
+ if (msgctl(id, IPC_RMID, NULL))
printf("Failed to destroy queue: %d\n", -errno);
return ret;
}
@@ -120,7 +121,7 @@ int check_and_destroy_queue(struct msgque_data *msgque)
ret = 0;
err:
- if (msgctl(msgque->msq_id, IPC_RMID, 0)) {
+ if (msgctl(msgque->msq_id, IPC_RMID, NULL)) {
printf("Failed to destroy queue: %d\n", -errno);
return -errno;
}
@@ -129,7 +130,7 @@ err:
int dump_queue(struct msgque_data *msgque)
{
- struct msqid64_ds ds;
+ struct msqid_ds ds;
int kern_id;
int i, ret;
@@ -245,7 +246,7 @@ int main(int argc, char **argv)
return ksft_exit_pass();
err_destroy:
- if (msgctl(msgque.msq_id, IPC_RMID, 0)) {
+ if (msgctl(msgque.msq_id, IPC_RMID, NULL)) {
printf("Failed to destroy queue: %d\n", -errno);
return ksft_exit_fail();
}
diff --git a/tools/testing/selftests/ir/ir_loopback.c b/tools/testing/selftests/ir/ir_loopback.c
index ff351bb7c163..e700e09e3682 100644
--- a/tools/testing/selftests/ir/ir_loopback.c
+++ b/tools/testing/selftests/ir/ir_loopback.c
@@ -53,6 +53,10 @@ static const struct {
{ RC_PROTO_RC6_6A_32, "rc-6-6a-32", 0xffffffff, "rc-6" },
{ RC_PROTO_RC6_MCE, "rc-6-mce", 0x00007fff, "rc-6" },
{ RC_PROTO_SHARP, "sharp", 0x1fff, "sharp" },
+ { RC_PROTO_IMON, "imon", 0x7fffffff, "imon" },
+ { RC_PROTO_RCMM12, "rcmm-12", 0x00000fff, "rcmm" },
+ { RC_PROTO_RCMM24, "rcmm-24", 0x00ffffff, "rcmm" },
+ { RC_PROTO_RCMM32, "rcmm-32", 0xffffffff, "rcmm" },
};
int lirc_open(const char *rc)
@@ -141,6 +145,11 @@ int main(int argc, char **argv)
(((scancode >> 8) ^ ~scancode) & 0xff) == 0)
continue;
+ if (rc_proto == RC_PROTO_RCMM32 &&
+ (scancode & 0x000c0000) != 0x000c0000 &&
+ scancode & 0x00008000)
+ continue;
+
struct lirc_scancode lsc = {
.rc_proto = rc_proto,
.scancode = scancode
diff --git a/tools/testing/selftests/ima/Makefile b/tools/testing/selftests/kexec/Makefile
index 0b3adf5444b6..8e9b27a7452f 100644
--- a/tools/testing/selftests/ima/Makefile
+++ b/tools/testing/selftests/kexec/Makefile
@@ -1,10 +1,11 @@
-# Makefile for kexec_load
+# Makefile for kexec tests
uname_M := $(shell uname -m 2>/dev/null || echo not)
ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
ifeq ($(ARCH),x86)
-TEST_PROGS := test_kexec_load.sh
+TEST_PROGS := test_kexec_load.sh test_kexec_file_load.sh
+TEST_FILES := kexec_common_lib.sh
include ../lib.mk
diff --git a/tools/testing/selftests/kexec/config b/tools/testing/selftests/kexec/config
new file mode 100644
index 000000000000..8962e862b2b8
--- /dev/null
+++ b/tools/testing/selftests/kexec/config
@@ -0,0 +1,3 @@
+CONFIG_IMA_APPRAISE=y
+CONFIG_IMA_ARCH_POLICY=y
+CONFIG_SECURITYFS=y
diff --git a/tools/testing/selftests/kexec/kexec_common_lib.sh b/tools/testing/selftests/kexec/kexec_common_lib.sh
new file mode 100755
index 000000000000..43017cfe88f7
--- /dev/null
+++ b/tools/testing/selftests/kexec/kexec_common_lib.sh
@@ -0,0 +1,220 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Kselftest framework defines: ksft_pass=0, ksft_fail=1, ksft_skip=4
+
+VERBOSE="${VERBOSE:-1}"
+IKCONFIG="/tmp/config-`uname -r`"
+KERNEL_IMAGE="/boot/vmlinuz-`uname -r`"
+SECURITYFS=$(grep "securityfs" /proc/mounts | awk '{print $2}')
+
+log_info()
+{
+ [ $VERBOSE -ne 0 ] && echo "[INFO] $1"
+}
+
+# The ksefltest framework requirement returns 0 for PASS.
+log_pass()
+{
+ [ $VERBOSE -ne 0 ] && echo "$1 [PASS]"
+ exit 0
+}
+
+# The ksefltest framework requirement returns 1 for FAIL.
+log_fail()
+{
+ [ $VERBOSE -ne 0 ] && echo "$1 [FAIL]"
+ exit 1
+}
+
+# The ksefltest framework requirement returns 4 for SKIP.
+log_skip()
+{
+ [ $VERBOSE -ne 0 ] && echo "$1"
+ exit 4
+}
+
+# Check efivar SecureBoot-$(the UUID) and SetupMode-$(the UUID).
+# (Based on kdump-lib.sh)
+get_efivarfs_secureboot_mode()
+{
+ local efivarfs="/sys/firmware/efi/efivars"
+ local secure_boot_file=""
+ local setup_mode_file=""
+ local secureboot_mode=0
+ local setup_mode=0
+
+ # Make sure that efivar_fs is mounted in the normal location
+ if ! grep -q "^\S\+ $efivarfs efivarfs" /proc/mounts; then
+ log_info "efivars is not mounted on $efivarfs"
+ return 0;
+ fi
+ secure_boot_file=$(find "$efivarfs" -name SecureBoot-* 2>/dev/null)
+ setup_mode_file=$(find "$efivarfs" -name SetupMode-* 2>/dev/null)
+ if [ -f "$secure_boot_file" ] && [ -f "$setup_mode_file" ]; then
+ secureboot_mode=$(hexdump -v -e '/1 "%d\ "' \
+ "$secure_boot_file"|cut -d' ' -f 5)
+ setup_mode=$(hexdump -v -e '/1 "%d\ "' \
+ "$setup_mode_file"|cut -d' ' -f 5)
+
+ if [ $secureboot_mode -eq 1 ] && [ $setup_mode -eq 0 ]; then
+ log_info "secure boot mode enabled (CONFIG_EFIVAR_FS)"
+ return 1;
+ fi
+ fi
+ return 0;
+}
+
+get_efi_var_secureboot_mode()
+{
+ local efi_vars
+ local secure_boot_file
+ local setup_mode_file
+ local secureboot_mode
+ local setup_mode
+
+ if [ ! -d "$efi_vars" ]; then
+ log_skip "efi_vars is not enabled\n"
+ fi
+ secure_boot_file=$(find "$efi_vars" -name SecureBoot-* 2>/dev/null)
+ setup_mode_file=$(find "$efi_vars" -name SetupMode-* 2>/dev/null)
+ if [ -f "$secure_boot_file/data" ] && \
+ [ -f "$setup_mode_file/data" ]; then
+ secureboot_mode=`od -An -t u1 "$secure_boot_file/data"`
+ setup_mode=`od -An -t u1 "$setup_mode_file/data"`
+
+ if [ $secureboot_mode -eq 1 ] && [ $setup_mode -eq 0 ]; then
+ log_info "secure boot mode enabled (CONFIG_EFI_VARS)"
+ return 1;
+ fi
+ fi
+ return 0;
+}
+
+# Check efivar SecureBoot-$(the UUID) and SetupMode-$(the UUID).
+# The secure boot mode can be accessed either as the last integer
+# of "od -An -t u1 /sys/firmware/efi/efivars/SecureBoot-*" or from
+# "od -An -t u1 /sys/firmware/efi/vars/SecureBoot-*/data". The efi
+# SetupMode can be similarly accessed.
+# Return 1 for SecureBoot mode enabled and SetupMode mode disabled.
+get_secureboot_mode()
+{
+ local secureboot_mode=0
+
+ get_efivarfs_secureboot_mode
+ secureboot_mode=$?
+
+ # fallback to using the efi_var files
+ if [ $secureboot_mode -eq 0 ]; then
+ get_efi_var_secureboot_mode
+ secureboot_mode=$?
+ fi
+
+ if [ $secureboot_mode -eq 0 ]; then
+ log_info "secure boot mode not enabled"
+ fi
+ return $secureboot_mode;
+}
+
+require_root_privileges()
+{
+ if [ $(id -ru) -ne 0 ]; then
+ log_skip "requires root privileges"
+ fi
+}
+
+# Look for config option in Kconfig file.
+# Return 1 for found and 0 for not found.
+kconfig_enabled()
+{
+ local config="$1"
+ local msg="$2"
+
+ grep -E -q $config $IKCONFIG
+ if [ $? -eq 0 ]; then
+ log_info "$msg"
+ return 1
+ fi
+ return 0
+}
+
+# Attempt to get the kernel config first via proc, and then by
+# extracting it from the kernel image or the configs.ko using
+# scripts/extract-ikconfig.
+# Return 1 for found.
+get_kconfig()
+{
+ local proc_config="/proc/config.gz"
+ local module_dir="/lib/modules/`uname -r`"
+ local configs_module="$module_dir/kernel/kernel/configs.ko"
+
+ if [ ! -f $proc_config ]; then
+ modprobe configs > /dev/null 2>&1
+ fi
+ if [ -f $proc_config ]; then
+ cat $proc_config | gunzip > $IKCONFIG 2>/dev/null
+ if [ $? -eq 0 ]; then
+ return 1
+ fi
+ fi
+
+ local extract_ikconfig="$module_dir/source/scripts/extract-ikconfig"
+ if [ ! -f $extract_ikconfig ]; then
+ log_skip "extract-ikconfig not found"
+ fi
+
+ $extract_ikconfig $KERNEL_IMAGE > $IKCONFIG 2>/dev/null
+ if [ $? -eq 1 ]; then
+ if [ ! -f $configs_module ]; then
+ log_skip "CONFIG_IKCONFIG not enabled"
+ fi
+ $extract_ikconfig $configs_module > $IKCONFIG
+ if [ $? -eq 1 ]; then
+ log_skip "CONFIG_IKCONFIG not enabled"
+ fi
+ fi
+ return 1
+}
+
+# Make sure that securityfs is mounted
+mount_securityfs()
+{
+ if [ -z $SECURITYFS ]; then
+ SECURITYFS=/sys/kernel/security
+ mount -t securityfs security $SECURITYFS
+ fi
+
+ if [ ! -d "$SECURITYFS" ]; then
+ log_fail "$SECURITYFS :securityfs is not mounted"
+ fi
+}
+
+# The policy rule format is an "action" followed by key-value pairs. This
+# function supports up to two key-value pairs, in any order.
+# For example: action func=<keyword> [appraise_type=<type>]
+# Return 1 for found and 0 for not found.
+check_ima_policy()
+{
+ local action="$1"
+ local keypair1="$2"
+ local keypair2="$3"
+ local ret=0
+
+ mount_securityfs
+
+ local ima_policy=$SECURITYFS/ima/policy
+ if [ ! -e $ima_policy ]; then
+ log_fail "$ima_policy not found"
+ fi
+
+ if [ -n $keypair2 ]; then
+ grep -e "^$action.*$keypair1" "$ima_policy" | \
+ grep -q -e "$keypair2"
+ else
+ grep -q -e "^$action.*$keypair1" "$ima_policy"
+ fi
+
+ # invert "grep -q" result, returning 1 for found.
+ [ $? -eq 0 ] && ret=1
+ return $ret
+}
diff --git a/tools/testing/selftests/kexec/test_kexec_file_load.sh b/tools/testing/selftests/kexec/test_kexec_file_load.sh
new file mode 100755
index 000000000000..fa7c24e8eefb
--- /dev/null
+++ b/tools/testing/selftests/kexec/test_kexec_file_load.sh
@@ -0,0 +1,208 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Loading a kernel image via the kexec_file_load syscall can verify either
+# the IMA signature stored in the security.ima xattr or the PE signature,
+# both signatures depending on the IMA policy, or none.
+#
+# To determine whether the kernel image is signed, this test depends
+# on pesign and getfattr. This test also requires the kernel to be
+# built with CONFIG_IKCONFIG enabled and either CONFIG_IKCONFIG_PROC
+# enabled or access to the extract-ikconfig script.
+
+TEST="KEXEC_FILE_LOAD"
+. ./kexec_common_lib.sh
+
+trap "{ rm -f $IKCONFIG ; }" EXIT
+
+# Some of the IMA builtin policies may require the kexec kernel image to
+# be signed, but these policy rules may be replaced with a custom
+# policy. Only CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS persists after
+# loading a custom policy. Check if it is enabled, before reading the
+# IMA runtime sysfs policy file.
+# Return 1 for IMA signature required and 0 for not required.
+is_ima_sig_required()
+{
+ local ret=0
+
+ kconfig_enabled "CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS=y" \
+ "IMA kernel image signature required"
+ if [ $? -eq 1 ]; then
+ log_info "IMA signature required"
+ return 1
+ fi
+
+ # The architecture specific or a custom policy may require the
+ # kexec kernel image be signed. Policy rules are walked
+ # sequentially. As a result, a policy rule may be defined, but
+ # might not necessarily be used. This test assumes if a policy
+ # rule is specified, that is the intent.
+ if [ $ima_read_policy -eq 1 ]; then
+ check_ima_policy "appraise" "func=KEXEC_KERNEL_CHECK" \
+ "appraise_type=imasig"
+ ret=$?
+ [ $ret -eq 1 ] && log_info "IMA signature required";
+ fi
+ return $ret
+}
+
+# The kexec_file_load_test() is complicated enough, require pesign.
+# Return 1 for PE signature found and 0 for not found.
+check_for_pesig()
+{
+ which pesign > /dev/null 2>&1 || log_skip "pesign not found"
+
+ pesign -i $KERNEL_IMAGE --show-signature | grep -q "No signatures"
+ local ret=$?
+ if [ $ret -eq 1 ]; then
+ log_info "kexec kernel image PE signed"
+ else
+ log_info "kexec kernel image not PE signed"
+ fi
+ return $ret
+}
+
+# The kexec_file_load_test() is complicated enough, require getfattr.
+# Return 1 for IMA signature found and 0 for not found.
+check_for_imasig()
+{
+ local ret=0
+
+ which getfattr > /dev/null 2>&1
+ if [ $? -eq 1 ]; then
+ log_skip "getfattr not found"
+ fi
+
+ line=$(getfattr -n security.ima -e hex --absolute-names $KERNEL_IMAGE 2>&1)
+ echo $line | grep -q "security.ima=0x03"
+ if [ $? -eq 0 ]; then
+ ret=1
+ log_info "kexec kernel image IMA signed"
+ else
+ log_info "kexec kernel image not IMA signed"
+ fi
+ return $ret
+}
+
+kexec_file_load_test()
+{
+ local succeed_msg="kexec_file_load succeeded"
+ local failed_msg="kexec_file_load failed"
+ local key_msg="try enabling the CONFIG_INTEGRITY_PLATFORM_KEYRING"
+
+ line=$(kexec --load --kexec-file-syscall $KERNEL_IMAGE 2>&1)
+
+ if [ $? -eq 0 ]; then
+ kexec --unload --kexec-file-syscall
+
+ # In secureboot mode with an architecture specific
+ # policy, make sure either an IMA or PE signature exists.
+ if [ $secureboot -eq 1 ] && [ $arch_policy -eq 1 ] && \
+ [ $ima_signed -eq 0 ] && [ $pe_signed -eq 0 ]; then
+ log_fail "$succeed_msg (missing sig)"
+ fi
+
+ if [ $kexec_sig_required -eq 1 -o $pe_sig_required -eq 1 ] \
+ && [ $pe_signed -eq 0 ]; then
+ log_fail "$succeed_msg (missing PE sig)"
+ fi
+
+ if [ $ima_sig_required -eq 1 ] && [ $ima_signed -eq 0 ]; then
+ log_fail "$succeed_msg (missing IMA sig)"
+ fi
+
+ if [ $pe_sig_required -eq 0 ] && [ $ima_appraise -eq 1 ] \
+ && [ $ima_sig_required -eq 0 ] && [ $ima_signed -eq 0 ] \
+ && [ $ima_read_policy -eq 0 ]; then
+ log_fail "$succeed_msg (possibly missing IMA sig)"
+ fi
+
+ if [ $pe_sig_required -eq 0 ] && [ $ima_appraise -eq 0 ]; then
+ log_info "No signature verification required"
+ elif [ $pe_sig_required -eq 0 ] && [ $ima_appraise -eq 1 ] \
+ && [ $ima_sig_required -eq 0 ] && [ $ima_signed -eq 0 ] \
+ && [ $ima_read_policy -eq 1 ]; then
+ log_info "No signature verification required"
+ fi
+
+ log_pass "$succeed_msg"
+ fi
+
+ # Check the reason for the kexec_file_load failure
+ echo $line | grep -q "Required key not available"
+ if [ $? -eq 0 ]; then
+ if [ $platform_keyring -eq 0 ]; then
+ log_pass "$failed_msg (-ENOKEY), $key_msg"
+ else
+ log_pass "$failed_msg (-ENOKEY)"
+ fi
+ fi
+
+ if [ $kexec_sig_required -eq 1 -o $pe_sig_required -eq 1 ] \
+ && [ $pe_signed -eq 0 ]; then
+ log_pass "$failed_msg (missing PE sig)"
+ fi
+
+ if [ $ima_sig_required -eq 1 ] && [ $ima_signed -eq 0 ]; then
+ log_pass "$failed_msg (missing IMA sig)"
+ fi
+
+ if [ $pe_sig_required -eq 0 ] && [ $ima_appraise -eq 1 ] \
+ && [ $ima_sig_required -eq 0 ] && [ $ima_read_policy -eq 0 ] \
+ && [ $ima_signed -eq 0 ]; then
+ log_pass "$failed_msg (possibly missing IMA sig)"
+ fi
+
+ log_pass "$failed_msg"
+ return 0
+}
+
+# kexec requires root privileges
+require_root_privileges
+
+# get the kernel config
+get_kconfig
+
+kconfig_enabled "CONFIG_KEXEC_FILE=y" "kexec_file_load is enabled"
+if [ $? -eq 0 ]; then
+ log_skip "kexec_file_load is not enabled"
+fi
+
+# Determine which kernel config options are enabled
+kconfig_enabled "CONFIG_IMA_APPRAISE=y" "IMA enabled"
+ima_appraise=$?
+
+kconfig_enabled "CONFIG_IMA_ARCH_POLICY=y" \
+ "architecture specific policy enabled"
+arch_policy=$?
+
+kconfig_enabled "CONFIG_INTEGRITY_PLATFORM_KEYRING=y" \
+ "platform keyring enabled"
+platform_keyring=$?
+
+kconfig_enabled "CONFIG_IMA_READ_POLICY=y" "reading IMA policy permitted"
+ima_read_policy=$?
+
+kconfig_enabled "CONFIG_KEXEC_SIG_FORCE=y" \
+ "kexec signed kernel image required"
+kexec_sig_required=$?
+
+kconfig_enabled "CONFIG_KEXEC_BZIMAGE_VERIFY_SIG=y" \
+ "PE signed kernel image required"
+pe_sig_required=$?
+
+is_ima_sig_required
+ima_sig_required=$?
+
+get_secureboot_mode
+secureboot=$?
+
+# Are there pe and ima signatures
+check_for_pesig
+pe_signed=$?
+
+check_for_imasig
+ima_signed=$?
+
+# Test loading the kernel image via kexec_file_load syscall
+kexec_file_load_test
diff --git a/tools/testing/selftests/kexec/test_kexec_load.sh b/tools/testing/selftests/kexec/test_kexec_load.sh
new file mode 100755
index 000000000000..49c6aa929137
--- /dev/null
+++ b/tools/testing/selftests/kexec/test_kexec_load.sh
@@ -0,0 +1,47 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Prevent loading a kernel image via the kexec_load syscall when
+# signatures are required. (Dependent on CONFIG_IMA_ARCH_POLICY.)
+
+TEST="$0"
+. ./kexec_common_lib.sh
+
+# kexec requires root privileges
+require_root_privileges
+
+# get the kernel config
+get_kconfig
+
+kconfig_enabled "CONFIG_KEXEC=y" "kexec_load is enabled"
+if [ $? -eq 0 ]; then
+ log_skip "kexec_load is not enabled"
+fi
+
+kconfig_enabled "CONFIG_IMA_APPRAISE=y" "IMA enabled"
+ima_appraise=$?
+
+kconfig_enabled "CONFIG_IMA_ARCH_POLICY=y" \
+ "IMA architecture specific policy enabled"
+arch_policy=$?
+
+get_secureboot_mode
+secureboot=$?
+
+# kexec_load should fail in secure boot mode and CONFIG_IMA_ARCH_POLICY enabled
+kexec --load $KERNEL_IMAGE > /dev/null 2>&1
+if [ $? -eq 0 ]; then
+ kexec --unload
+ if [ $secureboot -eq 1 ] && [ $arch_policy -eq 1 ]; then
+ log_fail "kexec_load succeeded"
+ elif [ $ima_appraise -eq 0 -o $arch_policy -eq 0 ]; then
+ log_info "Either IMA or the IMA arch policy is not enabled"
+ fi
+ log_pass "kexec_load succeeded"
+else
+ if [ $secureboot -eq 1 ] && [ $arch_policy -eq 1 ] ; then
+ log_pass "kexec_load failed"
+ else
+ log_fail "kexec_load failed"
+ fi
+fi
diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
index 2d90c98eeb67..941d9391377f 100644
--- a/tools/testing/selftests/kselftest_harness.h
+++ b/tools/testing/selftests/kselftest_harness.h
@@ -696,6 +696,7 @@ void __run_test(struct __test_metadata *t)
t->passed = 1;
t->trigger = 0;
printf("[ RUN ] %s\n", t->name);
+ alarm(30);
child_pid = fork();
if (child_pid < 0) {
printf("ERROR SPAWNING TEST CHILD\n");
@@ -744,6 +745,7 @@ void __run_test(struct __test_metadata *t)
}
}
printf("[ %4s ] %s\n", (t->passed ? "OK" : "FAIL"), t->name);
+ alarm(0);
}
static int test_harness_run(int __attribute__((unused)) argc,
diff --git a/tools/testing/selftests/kselftest_module.h b/tools/testing/selftests/kselftest_module.h
new file mode 100644
index 000000000000..e8eafaf0941a
--- /dev/null
+++ b/tools/testing/selftests/kselftest_module.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef __KSELFTEST_MODULE_H
+#define __KSELFTEST_MODULE_H
+
+#include <linux/module.h>
+
+/*
+ * Test framework for writing test modules to be loaded by kselftest.
+ * See Documentation/dev-tools/kselftest.rst for an example test module.
+ */
+
+#define KSTM_MODULE_GLOBALS() \
+static unsigned int total_tests __initdata; \
+static unsigned int failed_tests __initdata
+
+#define KSTM_CHECK_ZERO(x) do { \
+ total_tests++; \
+ if (x) { \
+ pr_warn("TC failed at %s:%d\n", __func__, __LINE__); \
+ failed_tests++; \
+ } \
+} while (0)
+
+static inline int kstm_report(unsigned int total_tests, unsigned int failed_tests)
+{
+ if (failed_tests == 0)
+ pr_info("all %u tests passed\n", total_tests);
+ else
+ pr_warn("failed %u out of %u tests\n", failed_tests, total_tests);
+
+ return failed_tests ? -EINVAL : 0;
+}
+
+#define KSTM_MODULE_LOADERS(__module) \
+static int __init __module##_init(void) \
+{ \
+ pr_info("loaded.\n"); \
+ selftest(); \
+ return kstm_report(total_tests, failed_tests); \
+} \
+static void __exit __module##_exit(void) \
+{ \
+ pr_info("unloaded.\n"); \
+} \
+module_init(__module##_init); \
+module_exit(__module##_exit)
+
+#endif /* __KSELFTEST_MODULE_H */
diff --git a/tools/testing/selftests/kselftest_module.sh b/tools/testing/selftests/kselftest_module.sh
new file mode 100755
index 000000000000..18e1c7992d30
--- /dev/null
+++ b/tools/testing/selftests/kselftest_module.sh
@@ -0,0 +1,84 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+
+#
+# Runs an individual test module.
+#
+# kselftest expects a separate executable for each test, this can be
+# created by adding a script like this:
+#
+# #!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+# $(dirname $0)/../kselftest_module.sh "description" module_name
+#
+# Example: tools/testing/selftests/lib/printf.sh
+
+desc="" # Output prefix.
+module="" # Filename (without the .ko).
+args="" # modprobe arguments.
+
+modprobe="/sbin/modprobe"
+
+main() {
+ parse_args "$@"
+ assert_root
+ assert_have_module
+ run_module
+}
+
+parse_args() {
+ script=${0##*/}
+
+ if [ $# -lt 2 ]; then
+ echo "Usage: $script <description> <module_name> [FAIL]"
+ exit 1
+ fi
+
+ desc="$1"
+ shift || true
+ module="$1"
+ shift || true
+ args="$@"
+}
+
+assert_root() {
+ if [ ! -w /dev ]; then
+ skip "please run as root"
+ fi
+}
+
+assert_have_module() {
+ if ! $modprobe -q -n $module; then
+ skip "module $module is not found"
+ fi
+}
+
+run_module() {
+ if $modprobe -q $module $args; then
+ $modprobe -q -r $module
+ say "ok"
+ else
+ fail ""
+ fi
+}
+
+say() {
+ echo "$desc: $1"
+}
+
+
+fail() {
+ say "$1 [FAIL]" >&2
+ exit 1
+}
+
+skip() {
+ say "$1 [SKIP]" >&2
+ # Kselftest framework requirement - SKIP code is 4.
+ exit 4
+}
+
+#
+# Main script
+#
+main "$@"
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index 6210ba41c29e..2689d1ea6d7a 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -3,6 +3,7 @@
/x86_64/platform_info_test
/x86_64/set_sregs_test
/x86_64/sync_regs_test
+/x86_64/vmx_close_while_nested_test
/x86_64/vmx_tsc_adjust_test
/x86_64/state_test
/dirty_log_test
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index f9a0e9938480..f8588cca2bef 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -1,3 +1,5 @@
+include ../../../../scripts/Kbuild.include
+
all:
top_srcdir = ../../../..
@@ -16,6 +18,8 @@ TEST_GEN_PROGS_x86_64 += x86_64/cr4_cpuid_sync_test
TEST_GEN_PROGS_x86_64 += x86_64/state_test
TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid
+TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
+TEST_GEN_PROGS_x86_64 += x86_64/smm_test
TEST_GEN_PROGS_x86_64 += dirty_log_test
TEST_GEN_PROGS_x86_64 += clear_dirty_log_test
@@ -28,8 +32,12 @@ LIBKVM += $(LIBKVM_$(UNAME_M))
INSTALL_HDR_PATH = $(top_srcdir)/usr
LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/
LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include
-CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I..
-LDFLAGS += -pthread
+CFLAGS += -O2 -g -std=gnu99 -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I..
+
+no-pie-option := $(call try-run, echo 'int main() { return 0; }' | \
+ $(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -no-pie -x c - -o "$$TMP", -no-pie)
+
+LDFLAGS += -pthread $(no-pie-option)
# After inclusion, $(OUTPUT) is defined and
# $(TEST_GEN_PROGS) starts with $(OUTPUT)/
diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index 4715cfba20dc..93f99c6b7d79 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -288,8 +288,11 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
#endif
max_gfn = (1ul << (guest_pa_bits - guest_page_shift)) - 1;
guest_page_size = (1ul << guest_page_shift);
- /* 1G of guest page sized pages */
- guest_num_pages = (1ul << (30 - guest_page_shift));
+ /*
+ * A little more than 1G of guest page sized pages. Cover the
+ * case where the size is not aligned to 64 pages.
+ */
+ guest_num_pages = (1ul << (30 - guest_page_shift)) + 3;
host_page_size = getpagesize();
host_num_pages = (guest_num_pages * guest_page_size) / host_page_size +
!!((guest_num_pages * guest_page_size) % host_page_size);
@@ -359,7 +362,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
#ifdef USE_CLEAR_DIRTY_LOG
kvm_vm_clear_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap, 0,
- DIV_ROUND_UP(host_num_pages, 64) * 64);
+ host_num_pages);
#endif
vm_dirty_log_verify(bmap);
iteration++;
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index a84785b02557..07b71ad9734a 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -102,6 +102,7 @@ vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid);
void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
+void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid);
void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
struct kvm_mp_state *mp_state);
void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs);
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index e2884c2b81ff..6063d5b2f356 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -778,6 +778,33 @@ void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
#define MSR_IA32_APICBASE_ENABLE (1<<11)
#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
+#define APIC_BASE_MSR 0x800
+#define X2APIC_ENABLE (1UL << 10)
+#define APIC_ICR 0x300
+#define APIC_DEST_SELF 0x40000
+#define APIC_DEST_ALLINC 0x80000
+#define APIC_DEST_ALLBUT 0xC0000
+#define APIC_ICR_RR_MASK 0x30000
+#define APIC_ICR_RR_INVALID 0x00000
+#define APIC_ICR_RR_INPROG 0x10000
+#define APIC_ICR_RR_VALID 0x20000
+#define APIC_INT_LEVELTRIG 0x08000
+#define APIC_INT_ASSERT 0x04000
+#define APIC_ICR_BUSY 0x01000
+#define APIC_DEST_LOGICAL 0x00800
+#define APIC_DEST_PHYSICAL 0x00000
+#define APIC_DM_FIXED 0x00000
+#define APIC_DM_FIXED_MASK 0x00700
+#define APIC_DM_LOWEST 0x00100
+#define APIC_DM_SMI 0x00200
+#define APIC_DM_REMRD 0x00300
+#define APIC_DM_NMI 0x00400
+#define APIC_DM_INIT 0x00500
+#define APIC_DM_STARTUP 0x00600
+#define APIC_DM_EXTINT 0x00700
+#define APIC_VECTOR_MASK 0x000FF
+#define APIC_ICR2 0x310
+
#define MSR_IA32_TSCDEADLINE 0x000006e0
#define MSR_IA32_UCODE_WRITE 0x00000079
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index b52cfdefecbf..4ca96b228e46 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -91,6 +91,11 @@ static void vm_open(struct kvm_vm *vm, int perm, unsigned long type)
if (vm->kvm_fd < 0)
exit(KSFT_SKIP);
+ if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) {
+ fprintf(stderr, "immediate_exit not available, skipping test\n");
+ exit(KSFT_SKIP);
+ }
+
vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, type);
TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, "
"rc: %i errno: %i", vm->fd, errno);
@@ -1121,6 +1126,22 @@ int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
return rc;
}
+void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid)
+{
+ struct vcpu *vcpu = vcpu_find(vm, vcpuid);
+ int ret;
+
+ TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
+
+ vcpu->state->immediate_exit = 1;
+ ret = ioctl(vcpu->fd, KVM_RUN, NULL);
+ vcpu->state->immediate_exit = 0;
+
+ TEST_ASSERT(ret == -1 && errno == EINTR,
+ "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i",
+ ret, errno);
+}
+
/*
* VM VCPU Set MP State
*
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index f28127f4a3af..dc7fae9fa424 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -1030,6 +1030,14 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
nested_size, sizeof(state->nested_));
}
+ /*
+ * When KVM exits to userspace with KVM_EXIT_IO, KVM guarantees
+ * guest state is consistent only after userspace re-enters the
+ * kernel with KVM_RUN. Complete IO prior to migrating state
+ * to a new VM.
+ */
+ vcpu_run_complete_io(vm, vcpuid);
+
nmsrs = kvm_get_num_msrs(vm);
list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0]));
list->nmsrs = nmsrs;
@@ -1093,12 +1101,6 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
int r;
- if (state->nested.size) {
- r = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, &state->nested);
- TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_NESTED_STATE, r: %i",
- r);
- }
-
r = ioctl(vcpu->fd, KVM_SET_XSAVE, &state->xsave);
TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i",
r);
@@ -1130,4 +1132,10 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
r = ioctl(vcpu->fd, KVM_SET_REGS, &state->regs);
TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_REGS, r: %i",
r);
+
+ if (state->nested.size) {
+ r = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, &state->nested);
+ TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_NESTED_STATE, r: %i",
+ r);
+ }
}
diff --git a/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
index d503a51fad30..7c2c4d4055a8 100644
--- a/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
+++ b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
@@ -87,22 +87,25 @@ int main(int argc, char *argv[])
while (1) {
rc = _vcpu_run(vm, VCPU_ID);
- if (run->exit_reason == KVM_EXIT_IO) {
- switch (get_ucall(vm, VCPU_ID, &uc)) {
- case UCALL_SYNC:
- /* emulate hypervisor clearing CR4.OSXSAVE */
- vcpu_sregs_get(vm, VCPU_ID, &sregs);
- sregs.cr4 &= ~X86_CR4_OSXSAVE;
- vcpu_sregs_set(vm, VCPU_ID, &sregs);
- break;
- case UCALL_ABORT:
- TEST_ASSERT(false, "Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit.");
- break;
- case UCALL_DONE:
- goto done;
- default:
- TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
- }
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Unexpected exit reason: %u (%s),\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+
+ switch (get_ucall(vm, VCPU_ID, &uc)) {
+ case UCALL_SYNC:
+ /* emulate hypervisor clearing CR4.OSXSAVE */
+ vcpu_sregs_get(vm, VCPU_ID, &sregs);
+ sregs.cr4 &= ~X86_CR4_OSXSAVE;
+ vcpu_sregs_set(vm, VCPU_ID, &sregs);
+ break;
+ case UCALL_ABORT:
+ TEST_ASSERT(false, "Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit.");
+ break;
+ case UCALL_DONE:
+ goto done;
+ default:
+ TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
}
}
diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
index c49c2a28b0eb..36669684eca5 100644
--- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
@@ -123,8 +123,6 @@ int main(int argc, char *argv[])
stage, run->exit_reason,
exit_reason_str(run->exit_reason));
- memset(&regs1, 0, sizeof(regs1));
- vcpu_regs_get(vm, VCPU_ID, &regs1);
switch (get_ucall(vm, VCPU_ID, &uc)) {
case UCALL_ABORT:
TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
@@ -144,6 +142,9 @@ int main(int argc, char *argv[])
stage, (ulong)uc.args[1]);
state = vcpu_save_state(vm, VCPU_ID);
+ memset(&regs1, 0, sizeof(regs1));
+ vcpu_regs_get(vm, VCPU_ID, &regs1);
+
kvm_vm_release(vm);
/* Restore state in a new VM. */
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
index 264425f75806..9a21e912097c 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
@@ -141,7 +141,13 @@ int main(int argc, char *argv[])
free(hv_cpuid_entries);
- vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
+ rv = _vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
+
+ if (rv) {
+ fprintf(stderr,
+ "Enlightened VMCS is unsupported, skip related test\n");
+ goto vm_free;
+ }
hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm);
if (!hv_cpuid_entries)
@@ -151,6 +157,7 @@ int main(int argc, char *argv[])
free(hv_cpuid_entries);
+vm_free:
kvm_vm_free(vm);
return 0;
diff --git a/tools/testing/selftests/kvm/x86_64/smm_test.c b/tools/testing/selftests/kvm/x86_64/smm_test.c
new file mode 100644
index 000000000000..fb8086964d83
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/smm_test.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018, Red Hat, Inc.
+ *
+ * Tests for SMM.
+ */
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "test_util.h"
+
+#include "kvm_util.h"
+
+#include "vmx.h"
+
+#define VCPU_ID 1
+
+#define PAGE_SIZE 4096
+
+#define SMRAM_SIZE 65536
+#define SMRAM_MEMSLOT ((1 << 16) | 1)
+#define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE)
+#define SMRAM_GPA 0x1000000
+#define SMRAM_STAGE 0xfe
+
+#define STR(x) #x
+#define XSTR(s) STR(s)
+
+#define SYNC_PORT 0xe
+#define DONE 0xff
+
+/*
+ * This is compiled as normal 64-bit code, however, SMI handler is executed
+ * in real-address mode. To stay simple we're limiting ourselves to a mode
+ * independent subset of asm here.
+ * SMI handler always report back fixed stage SMRAM_STAGE.
+ */
+uint8_t smi_handler[] = {
+ 0xb0, SMRAM_STAGE, /* mov $SMRAM_STAGE, %al */
+ 0xe4, SYNC_PORT, /* in $SYNC_PORT, %al */
+ 0x0f, 0xaa, /* rsm */
+};
+
+void sync_with_host(uint64_t phase)
+{
+ asm volatile("in $" XSTR(SYNC_PORT)", %%al \n"
+ : : "a" (phase));
+}
+
+void self_smi(void)
+{
+ wrmsr(APIC_BASE_MSR + (APIC_ICR >> 4),
+ APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_SMI);
+}
+
+void guest_code(struct vmx_pages *vmx_pages)
+{
+ uint64_t apicbase = rdmsr(MSR_IA32_APICBASE);
+
+ sync_with_host(1);
+
+ wrmsr(MSR_IA32_APICBASE, apicbase | X2APIC_ENABLE);
+
+ sync_with_host(2);
+
+ self_smi();
+
+ sync_with_host(4);
+
+ if (vmx_pages) {
+ GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+
+ sync_with_host(5);
+
+ self_smi();
+
+ sync_with_host(7);
+ }
+
+ sync_with_host(DONE);
+}
+
+int main(int argc, char *argv[])
+{
+ struct vmx_pages *vmx_pages = NULL;
+ vm_vaddr_t vmx_pages_gva = 0;
+
+ struct kvm_regs regs;
+ struct kvm_vm *vm;
+ struct kvm_run *run;
+ struct kvm_x86_state *state;
+ int stage, stage_reported;
+
+ /* Create VM */
+ vm = vm_create_default(VCPU_ID, 0, guest_code);
+
+ vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+
+ run = vcpu_state(vm, VCPU_ID);
+
+ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, SMRAM_GPA,
+ SMRAM_MEMSLOT, SMRAM_PAGES, 0);
+ TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, SMRAM_GPA, SMRAM_MEMSLOT)
+ == SMRAM_GPA, "could not allocate guest physical addresses?");
+
+ memset(addr_gpa2hva(vm, SMRAM_GPA), 0x0, SMRAM_SIZE);
+ memcpy(addr_gpa2hva(vm, SMRAM_GPA) + 0x8000, smi_handler,
+ sizeof(smi_handler));
+
+ vcpu_set_msr(vm, VCPU_ID, MSR_IA32_SMBASE, SMRAM_GPA);
+
+ if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
+ vmx_pages = vcpu_alloc_vmx(vm, &vmx_pages_gva);
+ vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
+ } else {
+ printf("will skip SMM test with VMX enabled\n");
+ vcpu_args_set(vm, VCPU_ID, 1, 0);
+ }
+
+ for (stage = 1;; stage++) {
+ _vcpu_run(vm, VCPU_ID);
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Stage %d: unexpected exit reason: %u (%s),\n",
+ stage, run->exit_reason,
+ exit_reason_str(run->exit_reason));
+
+ memset(&regs, 0, sizeof(regs));
+ vcpu_regs_get(vm, VCPU_ID, &regs);
+
+ stage_reported = regs.rax & 0xff;
+
+ if (stage_reported == DONE)
+ goto done;
+
+ TEST_ASSERT(stage_reported == stage ||
+ stage_reported == SMRAM_STAGE,
+ "Unexpected stage: #%x, got %x",
+ stage, stage_reported);
+
+ state = vcpu_save_state(vm, VCPU_ID);
+ kvm_vm_release(vm);
+ kvm_vm_restart(vm, O_RDWR);
+ vm_vcpu_add(vm, VCPU_ID, 0, 0);
+ vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+ vcpu_load_state(vm, VCPU_ID, state);
+ run = vcpu_state(vm, VCPU_ID);
+ free(state);
+ }
+
+done:
+ kvm_vm_free(vm);
+}
diff --git a/tools/testing/selftests/kvm/x86_64/state_test.c b/tools/testing/selftests/kvm/x86_64/state_test.c
index 4b3f556265f1..e0a3c0204b7c 100644
--- a/tools/testing/selftests/kvm/x86_64/state_test.c
+++ b/tools/testing/selftests/kvm/x86_64/state_test.c
@@ -156,8 +156,6 @@ int main(int argc, char *argv[])
stage, run->exit_reason,
exit_reason_str(run->exit_reason));
- memset(&regs1, 0, sizeof(regs1));
- vcpu_regs_get(vm, VCPU_ID, &regs1);
switch (get_ucall(vm, VCPU_ID, &uc)) {
case UCALL_ABORT:
TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
@@ -177,6 +175,9 @@ int main(int argc, char *argv[])
stage, (ulong)uc.args[1]);
state = vcpu_save_state(vm, VCPU_ID);
+ memset(&regs1, 0, sizeof(regs1));
+ vcpu_regs_get(vm, VCPU_ID, &regs1);
+
kvm_vm_release(vm);
/* Restore state in a new VM. */
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c b/tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c
new file mode 100644
index 000000000000..6edec6fd790b
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c
@@ -0,0 +1,95 @@
+/*
+ * vmx_close_while_nested
+ *
+ * Copyright (C) 2019, Red Hat, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * Verify that nothing bad happens if a KVM user exits with open
+ * file descriptors while executing a nested guest.
+ */
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "vmx.h"
+
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "kselftest.h"
+
+#define VCPU_ID 5
+
+enum {
+ PORT_L0_EXIT = 0x2000,
+};
+
+/* The virtual machine object. */
+static struct kvm_vm *vm;
+
+static void l2_guest_code(void)
+{
+ /* Exit to L0 */
+ asm volatile("inb %%dx, %%al"
+ : : [port] "d" (PORT_L0_EXIT) : "rax");
+}
+
+static void l1_guest_code(struct vmx_pages *vmx_pages)
+{
+#define L2_GUEST_STACK_SIZE 64
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+ uint32_t control;
+ uintptr_t save_cr3;
+
+ GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+ GUEST_ASSERT(load_vmcs(vmx_pages));
+
+ /* Prepare the VMCS for L2 execution. */
+ prepare_vmcs(vmx_pages, l2_guest_code,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ GUEST_ASSERT(!vmlaunch());
+ GUEST_ASSERT(0);
+}
+
+int main(int argc, char *argv[])
+{
+ struct vmx_pages *vmx_pages;
+ vm_vaddr_t vmx_pages_gva;
+ struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
+
+ if (!(entry->ecx & CPUID_VMX)) {
+ fprintf(stderr, "nested VMX not enabled, skipping test\n");
+ exit(KSFT_SKIP);
+ }
+
+ vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
+ vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+
+ /* Allocate VMX pages and shared descriptors (vmx_pages). */
+ vmx_pages = vcpu_alloc_vmx(vm, &vmx_pages_gva);
+ vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
+
+ for (;;) {
+ volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ struct ucall uc;
+
+ vcpu_run(vm, VCPU_ID);
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+
+ if (run->io.port == PORT_L0_EXIT)
+ break;
+
+ switch (get_ucall(vm, VCPU_ID, &uc)) {
+ case UCALL_ABORT:
+ TEST_ASSERT(false, "%s", (const char *)uc.args[0]);
+ /* NOT REACHED */
+ default:
+ TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
+ }
+ }
+}
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 8b0f16409ed7..5979fdc4f36c 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -3,7 +3,16 @@
CC := $(CROSS_COMPILE)gcc
ifeq (0,$(MAKELEVEL))
-OUTPUT := $(shell pwd)
+ ifneq ($(O),)
+ OUTPUT := $(O)
+ else
+ ifneq ($(KBUILD_OUTPUT),)
+ OUTPUT := $(KBUILD_OUTPUT)
+ else
+ OUTPUT := $(shell pwd)
+ DEFAULT_INSTALL_HDR_PATH := 1
+ endif
+ endif
endif
# The following are built by lib.mk common compile rules.
@@ -21,9 +30,34 @@ top_srcdir ?= ../../../..
include $(top_srcdir)/scripts/subarch.include
ARCH ?= $(SUBARCH)
+# set default goal to all, so make without a target runs all, even when
+# all isn't the first target in the file.
+.DEFAULT_GOAL := all
+
+# Invoke headers install with --no-builtin-rules to avoid circular
+# dependency in "make kselftest" case. In this case, second level
+# make inherits builtin-rules which will use the rule generate
+# Makefile.o and runs into
+# "Circular Makefile.o <- prepare dependency dropped."
+# and headers_install fails and test compile fails.
+# O= KBUILD_OUTPUT cases don't run into this error, since main Makefile
+# invokes them as sub-makes and --no-builtin-rules is not necessary,
+# but doesn't cause any failures. Keep it simple and use the same
+# flags in both cases.
+# Note that the support to install headers from lib.mk is necessary
+# when test Makefile is run directly with "make -C".
+# When local build is done, headers are installed in the default
+# INSTALL_HDR_PATH usr/include.
.PHONY: khdr
khdr:
- make ARCH=$(ARCH) -C $(top_srcdir) headers_install
+ifndef KSFT_KHDR_INSTALL_DONE
+ifeq (1,$(DEFAULT_INSTALL_HDR_PATH))
+ make --no-builtin-rules ARCH=$(ARCH) -C $(top_srcdir) headers_install
+else
+ make --no-builtin-rules INSTALL_HDR_PATH=$$OUTPUT/usr \
+ ARCH=$(ARCH) -C $(top_srcdir) headers_install
+endif
+endif
all: khdr $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
else
diff --git a/tools/testing/selftests/lib/Makefile b/tools/testing/selftests/lib/Makefile
index 70d5711e3ac8..9f26635f3e57 100644
--- a/tools/testing/selftests/lib/Makefile
+++ b/tools/testing/selftests/lib/Makefile
@@ -3,6 +3,6 @@
# No binaries, but make sure arg-less "make" doesn't trigger "run_tests"
all:
-TEST_PROGS := printf.sh bitmap.sh prime_numbers.sh
+TEST_PROGS := printf.sh bitmap.sh prime_numbers.sh strscpy.sh
include ../lib.mk
diff --git a/tools/testing/selftests/lib/bitmap.sh b/tools/testing/selftests/lib/bitmap.sh
index 5a90006d1aea..5511dddc5c2d 100755
--- a/tools/testing/selftests/lib/bitmap.sh
+++ b/tools/testing/selftests/lib/bitmap.sh
@@ -1,19 +1,3 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
-
-# Kselftest framework requirement - SKIP code is 4.
-ksft_skip=4
-
-# Runs bitmap infrastructure tests using test_bitmap kernel module
-if ! /sbin/modprobe -q -n test_bitmap; then
- echo "bitmap: module test_bitmap is not found [SKIP]"
- exit $ksft_skip
-fi
-
-if /sbin/modprobe -q test_bitmap; then
- /sbin/modprobe -q -r test_bitmap
- echo "bitmap: ok"
-else
- echo "bitmap: [FAIL]"
- exit 1
-fi
+$(dirname $0)/../kselftest_module.sh "bitmap" test_bitmap
diff --git a/tools/testing/selftests/lib/config b/tools/testing/selftests/lib/config
index 126933bcc950..14a77ea4a8da 100644
--- a/tools/testing/selftests/lib/config
+++ b/tools/testing/selftests/lib/config
@@ -1,3 +1,4 @@
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_PRIME_NUMBERS=m
+CONFIG_TEST_STRSCPY=m
diff --git a/tools/testing/selftests/lib/prime_numbers.sh b/tools/testing/selftests/lib/prime_numbers.sh
index 78e7483c8d60..43b28f24e453 100755
--- a/tools/testing/selftests/lib/prime_numbers.sh
+++ b/tools/testing/selftests/lib/prime_numbers.sh
@@ -1,19 +1,4 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
# Checks fast/slow prime_number generation for inconsistencies
-
-# Kselftest framework requirement - SKIP code is 4.
-ksft_skip=4
-
-if ! /sbin/modprobe -q -n prime_numbers; then
- echo "prime_numbers: module prime_numbers is not found [SKIP]"
- exit $ksft_skip
-fi
-
-if /sbin/modprobe -q prime_numbers selftest=65536; then
- /sbin/modprobe -q -r prime_numbers
- echo "prime_numbers: ok"
-else
- echo "prime_numbers: [FAIL]"
- exit 1
-fi
+$(dirname $0)/../kselftest_module.sh "prime numbers" prime_numbers selftest=65536
diff --git a/tools/testing/selftests/lib/printf.sh b/tools/testing/selftests/lib/printf.sh
index 45a23e2d64ad..2ffa61da0296 100755
--- a/tools/testing/selftests/lib/printf.sh
+++ b/tools/testing/selftests/lib/printf.sh
@@ -1,19 +1,4 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
-# Runs printf infrastructure using test_printf kernel module
-
-# Kselftest framework requirement - SKIP code is 4.
-ksft_skip=4
-
-if ! /sbin/modprobe -q -n test_printf; then
- echo "printf: module test_printf is not found [SKIP]"
- exit $ksft_skip
-fi
-
-if /sbin/modprobe -q test_printf; then
- /sbin/modprobe -q -r test_printf
- echo "printf: ok"
-else
- echo "printf: [FAIL]"
- exit 1
-fi
+# Tests the printf infrastructure using test_printf kernel module.
+$(dirname $0)/../kselftest_module.sh "printf" test_printf
diff --git a/tools/testing/selftests/lib/strscpy.sh b/tools/testing/selftests/lib/strscpy.sh
new file mode 100755
index 000000000000..71f2be6afba6
--- /dev/null
+++ b/tools/testing/selftests/lib/strscpy.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+$(dirname $0)/../kselftest_module.sh "strscpy*" test_strscpy
diff --git a/tools/testing/selftests/livepatch/Makefile b/tools/testing/selftests/livepatch/Makefile
index af4aee79bebb..fd405402c3ff 100644
--- a/tools/testing/selftests/livepatch/Makefile
+++ b/tools/testing/selftests/livepatch/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
-TEST_GEN_PROGS := \
+TEST_PROGS_EXTENDED := functions.sh
+TEST_PROGS := \
test-livepatch.sh \
test-callbacks.sh \
test-shadow-vars.sh
diff --git a/tools/testing/selftests/net/fib_rule_tests.sh b/tools/testing/selftests/net/fib_rule_tests.sh
index d4cfb6a7a086..4b7e107865bf 100755
--- a/tools/testing/selftests/net/fib_rule_tests.sh
+++ b/tools/testing/selftests/net/fib_rule_tests.sh
@@ -27,6 +27,7 @@ log_test()
nsuccess=$((nsuccess+1))
printf "\n TEST: %-50s [ OK ]\n" "${msg}"
else
+ ret=1
nfail=$((nfail+1))
printf "\n TEST: %-50s [FAIL]\n" "${msg}"
if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
@@ -147,8 +148,8 @@ fib_rule6_test()
fib_check_iproute_support "ipproto" "ipproto"
if [ $? -eq 0 ]; then
- match="ipproto icmp"
- fib_rule6_test_match_n_redirect "$match" "$match" "ipproto icmp match"
+ match="ipproto ipv6-icmp"
+ fib_rule6_test_match_n_redirect "$match" "$match" "ipproto ipv6-icmp match"
fi
}
@@ -245,4 +246,9 @@ setup
run_fibrule_tests
cleanup
+if [ "$TESTS" != "none" ]; then
+ printf "\nTests passed: %3d\n" ${nsuccess}
+ printf "Tests failed: %3d\n" ${nfail}
+fi
+
exit $ret
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index 1080ff55a788..0d2a5f4f1e63 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -605,6 +605,39 @@ run_cmd()
return $rc
}
+check_expected()
+{
+ local out="$1"
+ local expected="$2"
+ local rc=0
+
+ [ "${out}" = "${expected}" ] && return 0
+
+ if [ -z "${out}" ]; then
+ if [ "$VERBOSE" = "1" ]; then
+ printf "\nNo route entry found\n"
+ printf "Expected:\n"
+ printf " ${expected}\n"
+ fi
+ return 1
+ fi
+
+ # tricky way to convert output to 1-line without ip's
+ # messy '\'; this drops all extra white space
+ out=$(echo ${out})
+ if [ "${out}" != "${expected}" ]; then
+ rc=1
+ if [ "${VERBOSE}" = "1" ]; then
+ printf " Unexpected route entry. Have:\n"
+ printf " ${out}\n"
+ printf " Expected:\n"
+ printf " ${expected}\n\n"
+ fi
+ fi
+
+ return $rc
+}
+
# add route for a prefix, flushing any existing routes first
# expected to be the first step of a test
add_route6()
@@ -652,31 +685,7 @@ check_route6()
pfx=$1
out=$($IP -6 ro ls match ${pfx} | sed -e 's/ pref medium//')
- [ "${out}" = "${expected}" ] && return 0
-
- if [ -z "${out}" ]; then
- if [ "$VERBOSE" = "1" ]; then
- printf "\nNo route entry found\n"
- printf "Expected:\n"
- printf " ${expected}\n"
- fi
- return 1
- fi
-
- # tricky way to convert output to 1-line without ip's
- # messy '\'; this drops all extra white space
- out=$(echo ${out})
- if [ "${out}" != "${expected}" ]; then
- rc=1
- if [ "${VERBOSE}" = "1" ]; then
- printf " Unexpected route entry. Have:\n"
- printf " ${out}\n"
- printf " Expected:\n"
- printf " ${expected}\n\n"
- fi
- fi
-
- return $rc
+ check_expected "${out}" "${expected}"
}
route_cleanup()
@@ -725,7 +734,7 @@ route_setup()
ip -netns ns2 addr add 172.16.103.2/24 dev veth4
ip -netns ns2 addr add 172.16.104.1/24 dev dummy1
- set +ex
+ set +e
}
# assumption is that basic add of a single path route works
@@ -960,7 +969,8 @@ ipv6_addr_metric_test()
run_cmd "$IP li set dev dummy2 down"
rc=$?
if [ $rc -eq 0 ]; then
- check_route6 ""
+ out=$($IP -6 ro ls match 2001:db8:104::/64)
+ check_expected "${out}" ""
rc=$?
fi
log_test $rc 0 "Prefix route removed on link down"
@@ -1091,38 +1101,13 @@ check_route()
local pfx
local expected="$1"
local out
- local rc=0
set -- $expected
pfx=$1
[ "${pfx}" = "unreachable" ] && pfx=$2
out=$($IP ro ls match ${pfx})
- [ "${out}" = "${expected}" ] && return 0
-
- if [ -z "${out}" ]; then
- if [ "$VERBOSE" = "1" ]; then
- printf "\nNo route entry found\n"
- printf "Expected:\n"
- printf " ${expected}\n"
- fi
- return 1
- fi
-
- # tricky way to convert output to 1-line without ip's
- # messy '\'; this drops all extra white space
- out=$(echo ${out})
- if [ "${out}" != "${expected}" ]; then
- rc=1
- if [ "${VERBOSE}" = "1" ]; then
- printf " Unexpected route entry. Have:\n"
- printf " ${out}\n"
- printf " Expected:\n"
- printf " ${expected}\n\n"
- fi
- fi
-
- return $rc
+ check_expected "${out}" "${expected}"
}
# assumption is that basic add of a single path route works
@@ -1387,7 +1372,8 @@ ipv4_addr_metric_test()
run_cmd "$IP li set dev dummy2 down"
rc=$?
if [ $rc -eq 0 ]; then
- check_route ""
+ out=$($IP ro ls match 172.16.104.0/24)
+ check_expected "${out}" ""
rc=$?
fi
log_test $rc 0 "Prefix route removed on link down"
diff --git a/tools/testing/selftests/net/run_afpackettests b/tools/testing/selftests/net/run_afpackettests
index 2dc95fda7ef7..ea5938ec009a 100755
--- a/tools/testing/selftests/net/run_afpackettests
+++ b/tools/testing/selftests/net/run_afpackettests
@@ -6,12 +6,14 @@ if [ $(id -u) != 0 ]; then
exit 0
fi
+ret=0
echo "--------------------"
echo "running psock_fanout test"
echo "--------------------"
./in_netns.sh ./psock_fanout
if [ $? -ne 0 ]; then
echo "[FAIL]"
+ ret=1
else
echo "[PASS]"
fi
@@ -22,6 +24,7 @@ echo "--------------------"
./in_netns.sh ./psock_tpacket
if [ $? -ne 0 ]; then
echo "[FAIL]"
+ ret=1
else
echo "[PASS]"
fi
@@ -32,6 +35,8 @@ echo "--------------------"
./in_netns.sh ./txring_overwrite
if [ $? -ne 0 ]; then
echo "[FAIL]"
+ ret=1
else
echo "[PASS]"
fi
+exit $ret
diff --git a/tools/testing/selftests/net/run_netsocktests b/tools/testing/selftests/net/run_netsocktests
index b093f39c298c..14e41faf2c57 100755
--- a/tools/testing/selftests/net/run_netsocktests
+++ b/tools/testing/selftests/net/run_netsocktests
@@ -7,7 +7,7 @@ echo "--------------------"
./socket
if [ $? -ne 0 ]; then
echo "[FAIL]"
+ exit 1
else
echo "[PASS]"
fi
-
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
index c9ff2b47bd1c..a37cb1192c6a 100644
--- a/tools/testing/selftests/netfilter/Makefile
+++ b/tools/testing/selftests/netfilter/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for netfilter selftests
-TEST_PROGS := nft_trans_stress.sh nft_nat.sh
+TEST_PROGS := nft_trans_stress.sh nft_nat.sh conntrack_icmp_related.sh
include ../lib.mk
diff --git a/tools/testing/selftests/netfilter/conntrack_icmp_related.sh b/tools/testing/selftests/netfilter/conntrack_icmp_related.sh
new file mode 100755
index 000000000000..b48e1833bc89
--- /dev/null
+++ b/tools/testing/selftests/netfilter/conntrack_icmp_related.sh
@@ -0,0 +1,283 @@
+#!/bin/bash
+#
+# check that ICMP df-needed/pkttoobig icmp are set are set as related
+# state
+#
+# Setup is:
+#
+# nsclient1 -> nsrouter1 -> nsrouter2 -> nsclient2
+# MTU 1500, except for nsrouter2 <-> nsclient2 link (1280).
+# ping nsclient2 from nsclient1, checking that conntrack did set RELATED
+# 'fragmentation needed' icmp packet.
+#
+# In addition, nsrouter1 will perform IP masquerading, i.e. also
+# check the icmp errors are propagated to the correct host as per
+# nat of "established" icmp-echo "connection".
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without nft tool"
+ exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+cleanup() {
+ for i in 1 2;do ip netns del nsclient$i;done
+ for i in 1 2;do ip netns del nsrouter$i;done
+}
+
+ipv4() {
+ echo -n 192.168.$1.2
+}
+
+ipv6 () {
+ echo -n dead:$1::2
+}
+
+check_counter()
+{
+ ns=$1
+ name=$2
+ expect=$3
+ local lret=0
+
+ cnt=$(ip netns exec $ns nft list counter inet filter "$name" | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ echo "ERROR: counter $name in $ns has unexpected value (expected $expect)" 1>&2
+ ip netns exec $ns nft list counter inet filter "$name" 1>&2
+ lret=1
+ fi
+
+ return $lret
+}
+
+check_unknown()
+{
+ expect="packets 0 bytes 0"
+ for n in nsclient1 nsclient2 nsrouter1 nsrouter2; do
+ check_counter $n "unknown" "$expect"
+ if [ $? -ne 0 ] ;then
+ return 1
+ fi
+ done
+
+ return 0
+}
+
+for n in nsclient1 nsclient2 nsrouter1 nsrouter2; do
+ ip netns add $n
+ ip -net $n link set lo up
+done
+
+DEV=veth0
+ip link add $DEV netns nsclient1 type veth peer name eth1 netns nsrouter1
+DEV=veth0
+ip link add $DEV netns nsclient2 type veth peer name eth1 netns nsrouter2
+
+DEV=veth0
+ip link add $DEV netns nsrouter1 type veth peer name eth2 netns nsrouter2
+
+DEV=veth0
+for i in 1 2; do
+ ip -net nsclient$i link set $DEV up
+ ip -net nsclient$i addr add $(ipv4 $i)/24 dev $DEV
+ ip -net nsclient$i addr add $(ipv6 $i)/64 dev $DEV
+done
+
+ip -net nsrouter1 link set eth1 up
+ip -net nsrouter1 link set veth0 up
+
+ip -net nsrouter2 link set eth1 up
+ip -net nsrouter2 link set eth2 up
+
+ip -net nsclient1 route add default via 192.168.1.1
+ip -net nsclient1 -6 route add default via dead:1::1
+
+ip -net nsclient2 route add default via 192.168.2.1
+ip -net nsclient2 route add default via dead:2::1
+
+i=3
+ip -net nsrouter1 addr add 192.168.1.1/24 dev eth1
+ip -net nsrouter1 addr add 192.168.3.1/24 dev veth0
+ip -net nsrouter1 addr add dead:1::1/64 dev eth1
+ip -net nsrouter1 addr add dead:3::1/64 dev veth0
+ip -net nsrouter1 route add default via 192.168.3.10
+ip -net nsrouter1 -6 route add default via dead:3::10
+
+ip -net nsrouter2 addr add 192.168.2.1/24 dev eth1
+ip -net nsrouter2 addr add 192.168.3.10/24 dev eth2
+ip -net nsrouter2 addr add dead:2::1/64 dev eth1
+ip -net nsrouter2 addr add dead:3::10/64 dev eth2
+ip -net nsrouter2 route add default via 192.168.3.1
+ip -net nsrouter2 route add default via dead:3::1
+
+sleep 2
+for i in 4 6; do
+ ip netns exec nsrouter1 sysctl -q net.ipv$i.conf.all.forwarding=1
+ ip netns exec nsrouter2 sysctl -q net.ipv$i.conf.all.forwarding=1
+done
+
+for netns in nsrouter1 nsrouter2; do
+ip netns exec $netns nft -f - <<EOF
+table inet filter {
+ counter unknown { }
+ counter related { }
+ chain forward {
+ type filter hook forward priority 0; policy accept;
+ meta l4proto icmpv6 icmpv6 type "packet-too-big" ct state "related" counter name "related" accept
+ meta l4proto icmp icmp type "destination-unreachable" ct state "related" counter name "related" accept
+ meta l4proto { icmp, icmpv6 } ct state new,established accept
+ counter name "unknown" drop
+ }
+}
+EOF
+done
+
+ip netns exec nsclient1 nft -f - <<EOF
+table inet filter {
+ counter unknown { }
+ counter related { }
+ chain input {
+ type filter hook input priority 0; policy accept;
+ meta l4proto { icmp, icmpv6 } ct state established,untracked accept
+
+ meta l4proto { icmp, icmpv6 } ct state "related" counter name "related" accept
+ counter name "unknown" drop
+ }
+}
+EOF
+
+ip netns exec nsclient2 nft -f - <<EOF
+table inet filter {
+ counter unknown { }
+ counter new { }
+ counter established { }
+
+ chain input {
+ type filter hook input priority 0; policy accept;
+ meta l4proto { icmp, icmpv6 } ct state established,untracked accept
+
+ meta l4proto { icmp, icmpv6 } ct state "new" counter name "new" accept
+ meta l4proto { icmp, icmpv6 } ct state "established" counter name "established" accept
+ counter name "unknown" drop
+ }
+ chain output {
+ type filter hook output priority 0; policy accept;
+ meta l4proto { icmp, icmpv6 } ct state established,untracked accept
+
+ meta l4proto { icmp, icmpv6 } ct state "new" counter name "new"
+ meta l4proto { icmp, icmpv6 } ct state "established" counter name "established"
+ counter name "unknown" drop
+ }
+}
+EOF
+
+
+# make sure NAT core rewrites adress of icmp error if nat is used according to
+# conntrack nat information (icmp error will be directed at nsrouter1 address,
+# but it needs to be routed to nsclient1 address).
+ip netns exec nsrouter1 nft -f - <<EOF
+table ip nat {
+ chain postrouting {
+ type nat hook postrouting priority 0; policy accept;
+ ip protocol icmp oifname "veth0" counter masquerade
+ }
+}
+table ip6 nat {
+ chain postrouting {
+ type nat hook postrouting priority 0; policy accept;
+ ip6 nexthdr icmpv6 oifname "veth0" counter masquerade
+ }
+}
+EOF
+
+ip netns exec nsrouter2 ip link set eth1 mtu 1280
+ip netns exec nsclient2 ip link set veth0 mtu 1280
+sleep 1
+
+ip netns exec nsclient1 ping -c 1 -s 1000 -q -M do 192.168.2.2 >/dev/null
+if [ $? -ne 0 ]; then
+ echo "ERROR: netns ip routing/connectivity broken" 1>&2
+ cleanup
+ exit 1
+fi
+ip netns exec nsclient1 ping6 -q -c 1 -s 1000 dead:2::2 >/dev/null
+if [ $? -ne 0 ]; then
+ echo "ERROR: netns ipv6 routing/connectivity broken" 1>&2
+ cleanup
+ exit 1
+fi
+
+check_unknown
+if [ $? -ne 0 ]; then
+ ret=1
+fi
+
+expect="packets 0 bytes 0"
+for netns in nsrouter1 nsrouter2 nsclient1;do
+ check_counter "$netns" "related" "$expect"
+ if [ $? -ne 0 ]; then
+ ret=1
+ fi
+done
+
+expect="packets 2 bytes 2076"
+check_counter nsclient2 "new" "$expect"
+if [ $? -ne 0 ]; then
+ ret=1
+fi
+
+ip netns exec nsclient1 ping -q -c 1 -s 1300 -M do 192.168.2.2 > /dev/null
+if [ $? -eq 0 ]; then
+ echo "ERROR: ping should have failed with PMTU too big error" 1>&2
+ ret=1
+fi
+
+# nsrouter2 should have generated the icmp error, so
+# related counter should be 0 (its in forward).
+expect="packets 0 bytes 0"
+check_counter "nsrouter2" "related" "$expect"
+if [ $? -ne 0 ]; then
+ ret=1
+fi
+
+# but nsrouter1 should have seen it, same for nsclient1.
+expect="packets 1 bytes 576"
+for netns in nsrouter1 nsclient1;do
+ check_counter "$netns" "related" "$expect"
+ if [ $? -ne 0 ]; then
+ ret=1
+ fi
+done
+
+ip netns exec nsclient1 ping6 -c 1 -s 1300 dead:2::2 > /dev/null
+if [ $? -eq 0 ]; then
+ echo "ERROR: ping6 should have failed with PMTU too big error" 1>&2
+ ret=1
+fi
+
+expect="packets 2 bytes 1856"
+for netns in nsrouter1 nsclient1;do
+ check_counter "$netns" "related" "$expect"
+ if [ $? -ne 0 ]; then
+ ret=1
+ fi
+done
+
+if [ $ret -eq 0 ];then
+ echo "PASS: icmp mtu error had RELATED state"
+else
+ echo "ERROR: icmp error RELATED state test has failed"
+fi
+
+cleanup
+exit $ret
diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh
index 8ec76681605c..3194007cf8d1 100755
--- a/tools/testing/selftests/netfilter/nft_nat.sh
+++ b/tools/testing/selftests/netfilter/nft_nat.sh
@@ -321,6 +321,7 @@ EOF
test_masquerade6()
{
+ local natflags=$1
local lret=0
ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
@@ -354,13 +355,13 @@ ip netns exec ns0 nft -f - <<EOF
table ip6 nat {
chain postrouting {
type nat hook postrouting priority 0; policy accept;
- meta oif veth0 masquerade
+ meta oif veth0 masquerade $natflags
}
}
EOF
ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
if [ $? -ne 0 ] ; then
- echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerading"
+ echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerade $natflags"
lret=1
fi
@@ -397,19 +398,26 @@ EOF
fi
done
+ ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+ if [ $? -ne 0 ] ; then
+ echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerade $natflags (attempt 2)"
+ lret=1
+ fi
+
ip netns exec ns0 nft flush chain ip6 nat postrouting
if [ $? -ne 0 ]; then
echo "ERROR: Could not flush ip6 nat postrouting" 1>&2
lret=1
fi
- test $lret -eq 0 && echo "PASS: IPv6 masquerade for ns2"
+ test $lret -eq 0 && echo "PASS: IPv6 masquerade $natflags for ns2"
return $lret
}
test_masquerade()
{
+ local natflags=$1
local lret=0
ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
@@ -417,7 +425,7 @@ test_masquerade()
ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
if [ $? -ne 0 ] ; then
- echo "ERROR: canot ping ns1 from ns2"
+ echo "ERROR: cannot ping ns1 from ns2 $natflags"
lret=1
fi
@@ -443,13 +451,13 @@ ip netns exec ns0 nft -f - <<EOF
table ip nat {
chain postrouting {
type nat hook postrouting priority 0; policy accept;
- meta oif veth0 masquerade
+ meta oif veth0 masquerade $natflags
}
}
EOF
ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
if [ $? -ne 0 ] ; then
- echo "ERROR: cannot ping ns1 from ns2 with active ip masquerading"
+ echo "ERROR: cannot ping ns1 from ns2 with active ip masquere $natflags"
lret=1
fi
@@ -485,13 +493,19 @@ EOF
fi
done
+ ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+ if [ $? -ne 0 ] ; then
+ echo "ERROR: cannot ping ns1 from ns2 with active ip masquerade $natflags (attempt 2)"
+ lret=1
+ fi
+
ip netns exec ns0 nft flush chain ip nat postrouting
if [ $? -ne 0 ]; then
echo "ERROR: Could not flush nat postrouting" 1>&2
lret=1
fi
- test $lret -eq 0 && echo "PASS: IP masquerade for ns2"
+ test $lret -eq 0 && echo "PASS: IP masquerade $natflags for ns2"
return $lret
}
@@ -750,8 +764,12 @@ test_local_dnat
test_local_dnat6
reset_counters
-test_masquerade
-test_masquerade6
+test_masquerade ""
+test_masquerade6 ""
+
+reset_counters
+test_masquerade "fully-random"
+test_masquerade6 "fully-random"
reset_counters
test_redirect
diff --git a/tools/testing/selftests/pidfd/Makefile b/tools/testing/selftests/pidfd/Makefile
new file mode 100644
index 000000000000..deaf8073bc06
--- /dev/null
+++ b/tools/testing/selftests/pidfd/Makefile
@@ -0,0 +1,6 @@
+CFLAGS += -g -I../../../../usr/include/
+
+TEST_GEN_PROGS := pidfd_test
+
+include ../lib.mk
+
diff --git a/tools/testing/selftests/pidfd/pidfd_test.c b/tools/testing/selftests/pidfd/pidfd_test.c
new file mode 100644
index 000000000000..d59378a93782
--- /dev/null
+++ b/tools/testing/selftests/pidfd/pidfd_test.c
@@ -0,0 +1,381 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/types.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <syscall.h>
+#include <sys/mount.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "../kselftest.h"
+
+static inline int sys_pidfd_send_signal(int pidfd, int sig, siginfo_t *info,
+ unsigned int flags)
+{
+ return syscall(__NR_pidfd_send_signal, pidfd, sig, info, flags);
+}
+
+static int signal_received;
+
+static void set_signal_received_on_sigusr1(int sig)
+{
+ if (sig == SIGUSR1)
+ signal_received = 1;
+}
+
+/*
+ * Straightforward test to see whether pidfd_send_signal() works is to send
+ * a signal to ourself.
+ */
+static int test_pidfd_send_signal_simple_success(void)
+{
+ int pidfd, ret;
+ const char *test_name = "pidfd_send_signal send SIGUSR1";
+
+ pidfd = open("/proc/self", O_DIRECTORY | O_CLOEXEC);
+ if (pidfd < 0)
+ ksft_exit_fail_msg(
+ "%s test: Failed to open process file descriptor\n",
+ test_name);
+
+ signal(SIGUSR1, set_signal_received_on_sigusr1);
+
+ ret = sys_pidfd_send_signal(pidfd, SIGUSR1, NULL, 0);
+ close(pidfd);
+ if (ret < 0)
+ ksft_exit_fail_msg("%s test: Failed to send signal\n",
+ test_name);
+
+ if (signal_received != 1)
+ ksft_exit_fail_msg("%s test: Failed to receive signal\n",
+ test_name);
+
+ signal_received = 0;
+ ksft_test_result_pass("%s test: Sent signal\n", test_name);
+ return 0;
+}
+
+static int wait_for_pid(pid_t pid)
+{
+ int status, ret;
+
+again:
+ ret = waitpid(pid, &status, 0);
+ if (ret == -1) {
+ if (errno == EINTR)
+ goto again;
+
+ return -1;
+ }
+
+ if (ret != pid)
+ goto again;
+
+ if (!WIFEXITED(status))
+ return -1;
+
+ return WEXITSTATUS(status);
+}
+
+static int test_pidfd_send_signal_exited_fail(void)
+{
+ int pidfd, ret, saved_errno;
+ char buf[256];
+ pid_t pid;
+ const char *test_name = "pidfd_send_signal signal exited process";
+
+ pid = fork();
+ if (pid < 0)
+ ksft_exit_fail_msg("%s test: Failed to create new process\n",
+ test_name);
+
+ if (pid == 0)
+ _exit(EXIT_SUCCESS);
+
+ snprintf(buf, sizeof(buf), "/proc/%d", pid);
+
+ pidfd = open(buf, O_DIRECTORY | O_CLOEXEC);
+
+ (void)wait_for_pid(pid);
+
+ if (pidfd < 0)
+ ksft_exit_fail_msg(
+ "%s test: Failed to open process file descriptor\n",
+ test_name);
+
+ ret = sys_pidfd_send_signal(pidfd, 0, NULL, 0);
+ saved_errno = errno;
+ close(pidfd);
+ if (ret == 0)
+ ksft_exit_fail_msg(
+ "%s test: Managed to send signal to process even though it should have failed\n",
+ test_name);
+
+ if (saved_errno != ESRCH)
+ ksft_exit_fail_msg(
+ "%s test: Expected to receive ESRCH as errno value but received %d instead\n",
+ test_name, saved_errno);
+
+ ksft_test_result_pass("%s test: Failed to send signal as expected\n",
+ test_name);
+ return 0;
+}
+
+/*
+ * The kernel reserves 300 pids via RESERVED_PIDS in kernel/pid.c
+ * That means, when it wraps around any pid < 300 will be skipped.
+ * So we need to use a pid > 300 in order to test recycling.
+ */
+#define PID_RECYCLE 1000
+
+/*
+ * Maximum number of cycles we allow. This is equivalent to PID_MAX_DEFAULT.
+ * If users set a higher limit or we have cycled PIDFD_MAX_DEFAULT number of
+ * times then we skip the test to not go into an infinite loop or block for a
+ * long time.
+ */
+#define PIDFD_MAX_DEFAULT 0x8000
+
+/*
+ * Define a few custom error codes for the child process to clearly indicate
+ * what is happening. This way we can tell the difference between a system
+ * error, a test error, etc.
+ */
+#define PIDFD_PASS 0
+#define PIDFD_FAIL 1
+#define PIDFD_ERROR 2
+#define PIDFD_SKIP 3
+#define PIDFD_XFAIL 4
+
+static int test_pidfd_send_signal_recycled_pid_fail(void)
+{
+ int i, ret;
+ pid_t pid1;
+ const char *test_name = "pidfd_send_signal signal recycled pid";
+
+ ret = unshare(CLONE_NEWPID);
+ if (ret < 0)
+ ksft_exit_fail_msg("%s test: Failed to unshare pid namespace\n",
+ test_name);
+
+ ret = unshare(CLONE_NEWNS);
+ if (ret < 0)
+ ksft_exit_fail_msg(
+ "%s test: Failed to unshare mount namespace\n",
+ test_name);
+
+ ret = mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0);
+ if (ret < 0)
+ ksft_exit_fail_msg("%s test: Failed to remount / private\n",
+ test_name);
+
+ /* pid 1 in new pid namespace */
+ pid1 = fork();
+ if (pid1 < 0)
+ ksft_exit_fail_msg("%s test: Failed to create new process\n",
+ test_name);
+
+ if (pid1 == 0) {
+ char buf[256];
+ pid_t pid2;
+ int pidfd = -1;
+
+ (void)umount2("/proc", MNT_DETACH);
+ ret = mount("proc", "/proc", "proc", 0, NULL);
+ if (ret < 0)
+ _exit(PIDFD_ERROR);
+
+ /* grab pid PID_RECYCLE */
+ for (i = 0; i <= PIDFD_MAX_DEFAULT; i++) {
+ pid2 = fork();
+ if (pid2 < 0)
+ _exit(PIDFD_ERROR);
+
+ if (pid2 == 0)
+ _exit(PIDFD_PASS);
+
+ if (pid2 == PID_RECYCLE) {
+ snprintf(buf, sizeof(buf), "/proc/%d", pid2);
+ ksft_print_msg("pid to recycle is %d\n", pid2);
+ pidfd = open(buf, O_DIRECTORY | O_CLOEXEC);
+ }
+
+ if (wait_for_pid(pid2))
+ _exit(PIDFD_ERROR);
+
+ if (pid2 >= PID_RECYCLE)
+ break;
+ }
+
+ /*
+ * We want to be as predictable as we can so if we haven't been
+ * able to grab pid PID_RECYCLE skip the test.
+ */
+ if (pid2 != PID_RECYCLE) {
+ /* skip test */
+ close(pidfd);
+ _exit(PIDFD_SKIP);
+ }
+
+ if (pidfd < 0)
+ _exit(PIDFD_ERROR);
+
+ for (i = 0; i <= PIDFD_MAX_DEFAULT; i++) {
+ char c;
+ int pipe_fds[2];
+ pid_t recycled_pid;
+ int child_ret = PIDFD_PASS;
+
+ ret = pipe2(pipe_fds, O_CLOEXEC);
+ if (ret < 0)
+ _exit(PIDFD_ERROR);
+
+ recycled_pid = fork();
+ if (recycled_pid < 0)
+ _exit(PIDFD_ERROR);
+
+ if (recycled_pid == 0) {
+ close(pipe_fds[1]);
+ (void)read(pipe_fds[0], &c, 1);
+ close(pipe_fds[0]);
+
+ _exit(PIDFD_PASS);
+ }
+
+ /*
+ * Stop the child so we can inspect whether we have
+ * recycled pid PID_RECYCLE.
+ */
+ close(pipe_fds[0]);
+ ret = kill(recycled_pid, SIGSTOP);
+ close(pipe_fds[1]);
+ if (ret) {
+ (void)wait_for_pid(recycled_pid);
+ _exit(PIDFD_ERROR);
+ }
+
+ /*
+ * We have recycled the pid. Try to signal it. This
+ * needs to fail since this is a different process than
+ * the one the pidfd refers to.
+ */
+ if (recycled_pid == PID_RECYCLE) {
+ ret = sys_pidfd_send_signal(pidfd, SIGCONT,
+ NULL, 0);
+ if (ret && errno == ESRCH)
+ child_ret = PIDFD_XFAIL;
+ else
+ child_ret = PIDFD_FAIL;
+ }
+
+ /* let the process move on */
+ ret = kill(recycled_pid, SIGCONT);
+ if (ret)
+ (void)kill(recycled_pid, SIGKILL);
+
+ if (wait_for_pid(recycled_pid))
+ _exit(PIDFD_ERROR);
+
+ switch (child_ret) {
+ case PIDFD_FAIL:
+ /* fallthrough */
+ case PIDFD_XFAIL:
+ _exit(child_ret);
+ case PIDFD_PASS:
+ break;
+ default:
+ /* not reached */
+ _exit(PIDFD_ERROR);
+ }
+
+ /*
+ * If the user set a custom pid_max limit we could be
+ * in the millions.
+ * Skip the test in this case.
+ */
+ if (recycled_pid > PIDFD_MAX_DEFAULT)
+ _exit(PIDFD_SKIP);
+ }
+
+ /* failed to recycle pid */
+ _exit(PIDFD_SKIP);
+ }
+
+ ret = wait_for_pid(pid1);
+ switch (ret) {
+ case PIDFD_FAIL:
+ ksft_exit_fail_msg(
+ "%s test: Managed to signal recycled pid %d\n",
+ test_name, PID_RECYCLE);
+ case PIDFD_PASS:
+ ksft_exit_fail_msg("%s test: Failed to recycle pid %d\n",
+ test_name, PID_RECYCLE);
+ case PIDFD_SKIP:
+ ksft_print_msg("%s test: Skipping test\n", test_name);
+ ret = 0;
+ break;
+ case PIDFD_XFAIL:
+ ksft_test_result_pass(
+ "%s test: Failed to signal recycled pid as expected\n",
+ test_name);
+ ret = 0;
+ break;
+ default /* PIDFD_ERROR */:
+ ksft_exit_fail_msg("%s test: Error while running tests\n",
+ test_name);
+ }
+
+ return ret;
+}
+
+static int test_pidfd_send_signal_syscall_support(void)
+{
+ int pidfd, ret;
+ const char *test_name = "pidfd_send_signal check for support";
+
+ pidfd = open("/proc/self", O_DIRECTORY | O_CLOEXEC);
+ if (pidfd < 0)
+ ksft_exit_fail_msg(
+ "%s test: Failed to open process file descriptor\n",
+ test_name);
+
+ ret = sys_pidfd_send_signal(pidfd, 0, NULL, 0);
+ if (ret < 0) {
+ /*
+ * pidfd_send_signal() will currently return ENOSYS when
+ * CONFIG_PROC_FS is not set.
+ */
+ if (errno == ENOSYS)
+ ksft_exit_skip(
+ "%s test: pidfd_send_signal() syscall not supported (Ensure that CONFIG_PROC_FS=y is set)\n",
+ test_name);
+
+ ksft_exit_fail_msg("%s test: Failed to send signal\n",
+ test_name);
+ }
+
+ close(pidfd);
+ ksft_test_result_pass(
+ "%s test: pidfd_send_signal() syscall is supported. Tests can be executed\n",
+ test_name);
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ ksft_print_header();
+
+ test_pidfd_send_signal_syscall_support();
+ test_pidfd_send_signal_simple_success();
+ test_pidfd_send_signal_exited_fail();
+ test_pidfd_send_signal_recycled_pid_fail();
+
+ return ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/proc/proc-pid-vm.c b/tools/testing/selftests/proc/proc-pid-vm.c
index bbe8150d18aa..853aa164a401 100644
--- a/tools/testing/selftests/proc/proc-pid-vm.c
+++ b/tools/testing/selftests/proc/proc-pid-vm.c
@@ -29,6 +29,7 @@
#include <errno.h>
#include <sched.h>
#include <signal.h>
+#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
@@ -36,11 +37,14 @@
#include <sys/mount.h>
#include <sys/types.h>
#include <sys/stat.h>
+#include <sys/wait.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/uio.h>
#include <linux/kdev_t.h>
+#include <sys/time.h>
+#include <sys/resource.h>
static inline long sys_execveat(int dirfd, const char *pathname, char **argv, char **envp, int flags)
{
@@ -183,8 +187,8 @@ static int make_exe(const uint8_t *payload, size_t len)
ph.p_offset = 0;
ph.p_vaddr = VADDR;
ph.p_paddr = 0;
- ph.p_filesz = sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr) + sizeof(payload);
- ph.p_memsz = sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr) + sizeof(payload);
+ ph.p_filesz = sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr) + len;
+ ph.p_memsz = sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr) + len;
ph.p_align = 4096;
fd = openat(AT_FDCWD, "/tmp", O_WRONLY|O_EXCL|O_TMPFILE, 0700);
@@ -205,12 +209,44 @@ static int make_exe(const uint8_t *payload, size_t len)
}
#endif
+static bool g_vsyscall = false;
+
+static const char str_vsyscall[] =
+"ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall]\n";
+
#ifdef __x86_64__
+/*
+ * vsyscall page can't be unmapped, probe it with memory load.
+ */
+static void vsyscall(void)
+{
+ pid_t pid;
+ int wstatus;
+
+ pid = fork();
+ if (pid < 0) {
+ fprintf(stderr, "fork, errno %d\n", errno);
+ exit(1);
+ }
+ if (pid == 0) {
+ struct rlimit rlim = {0, 0};
+ (void)setrlimit(RLIMIT_CORE, &rlim);
+ *(volatile int *)0xffffffffff600000UL;
+ exit(0);
+ }
+ wait(&wstatus);
+ if (WIFEXITED(wstatus)) {
+ g_vsyscall = true;
+ }
+}
+
int main(void)
{
int pipefd[2];
int exec_fd;
+ vsyscall();
+
atexit(ate);
make_private_tmp();
@@ -261,9 +297,9 @@ int main(void)
snprintf(buf0 + MAPS_OFFSET, sizeof(buf0) - MAPS_OFFSET,
"/tmp/#%llu (deleted)\n", (unsigned long long)st.st_ino);
-
/* Test /proc/$PID/maps */
{
+ const size_t len = strlen(buf0) + (g_vsyscall ? strlen(str_vsyscall) : 0);
char buf[256];
ssize_t rv;
int fd;
@@ -274,13 +310,16 @@ int main(void)
return 1;
}
rv = read(fd, buf, sizeof(buf));
- assert(rv == strlen(buf0));
+ assert(rv == len);
assert(memcmp(buf, buf0, strlen(buf0)) == 0);
+ if (g_vsyscall) {
+ assert(memcmp(buf + strlen(buf0), str_vsyscall, strlen(str_vsyscall)) == 0);
+ }
}
/* Test /proc/$PID/smaps */
{
- char buf[1024];
+ char buf[4096];
ssize_t rv;
int fd;
@@ -319,6 +358,10 @@ int main(void)
for (i = 0; i < sizeof(S)/sizeof(S[0]); i++) {
assert(memmem(buf, rv, S[i], strlen(S[i])));
}
+
+ if (g_vsyscall) {
+ assert(memmem(buf, rv, str_vsyscall, strlen(str_vsyscall)));
+ }
}
/* Test /proc/$PID/smaps_rollup */
diff --git a/tools/testing/selftests/proc/proc-self-map-files-002.c b/tools/testing/selftests/proc/proc-self-map-files-002.c
index 762cb01f2ca7..47b7473dedef 100644
--- a/tools/testing/selftests/proc/proc-self-map-files-002.c
+++ b/tools/testing/selftests/proc/proc-self-map-files-002.c
@@ -46,12 +46,9 @@ static void fail(const char *fmt, unsigned long a, unsigned long b)
int main(void)
{
- const unsigned int PAGE_SIZE = sysconf(_SC_PAGESIZE);
-#ifdef __arm__
- unsigned long va = 2 * PAGE_SIZE;
-#else
- unsigned long va = 0;
-#endif
+ const int PAGE_SIZE = sysconf(_SC_PAGESIZE);
+ const unsigned long va_max = 1UL << 32;
+ unsigned long va;
void *p;
int fd;
unsigned long a, b;
@@ -60,10 +57,13 @@ int main(void)
if (fd == -1)
return 1;
- p = mmap((void *)va, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_FILE|MAP_FIXED, fd, 0);
- if (p == MAP_FAILED) {
- if (errno == EPERM)
- return 4;
+ for (va = 0; va < va_max; va += PAGE_SIZE) {
+ p = mmap((void *)va, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_FILE|MAP_FIXED, fd, 0);
+ if (p == (void *)va)
+ break;
+ }
+ if (va == va_max) {
+ fprintf(stderr, "error: mmap doesn't like you\n");
return 1;
}
diff --git a/tools/testing/selftests/rcutorture/bin/configNR_CPUS.sh b/tools/testing/selftests/rcutorture/bin/configNR_CPUS.sh
index 43540f1828cc..2deea2169fc2 100755
--- a/tools/testing/selftests/rcutorture/bin/configNR_CPUS.sh
+++ b/tools/testing/selftests/rcutorture/bin/configNR_CPUS.sh
@@ -1,4 +1,5 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Extract the number of CPUs expected from the specified Kconfig-file
# fragment by checking CONFIG_SMP and CONFIG_NR_CPUS. If the specified
@@ -7,23 +8,9 @@
#
# Usage: configNR_CPUS.sh config-frag
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2013
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
cf=$1
if test ! -r $cf
diff --git a/tools/testing/selftests/rcutorture/bin/config_override.sh b/tools/testing/selftests/rcutorture/bin/config_override.sh
index ef7fcbac3d42..90016c359e83 100755
--- a/tools/testing/selftests/rcutorture/bin/config_override.sh
+++ b/tools/testing/selftests/rcutorture/bin/config_override.sh
@@ -1,4 +1,5 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# config_override.sh base override
#
@@ -6,23 +7,9 @@
# that conflict with any in override, concatenating what remains and
# sending the result to standard output.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2017
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
base=$1
if test -r $base
diff --git a/tools/testing/selftests/rcutorture/bin/configcheck.sh b/tools/testing/selftests/rcutorture/bin/configcheck.sh
index 197deece7c7c..31584cee84d7 100755
--- a/tools/testing/selftests/rcutorture/bin/configcheck.sh
+++ b/tools/testing/selftests/rcutorture/bin/configcheck.sh
@@ -1,23 +1,11 @@
#!/bin/bash
-# Usage: configcheck.sh .config .config-template
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
+# SPDX-License-Identifier: GPL-2.0+
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
+# Usage: configcheck.sh .config .config-template
#
# Copyright (C) IBM Corporation, 2011
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
T=${TMPDIR-/tmp}/abat-chk-config.sh.$$
trap 'rm -rf $T' 0
@@ -26,6 +14,7 @@ mkdir $T
cat $1 > $T/.config
cat $2 | sed -e 's/\(.*\)=n/# \1 is not set/' -e 's/^#CHECK#//' |
+grep -v '^CONFIG_INITRAMFS_SOURCE' |
awk '
{
print "if grep -q \"" $0 "\" < '"$T/.config"'";
diff --git a/tools/testing/selftests/rcutorture/bin/configinit.sh b/tools/testing/selftests/rcutorture/bin/configinit.sh
index 65541c21a544..40359486b3a8 100755
--- a/tools/testing/selftests/rcutorture/bin/configinit.sh
+++ b/tools/testing/selftests/rcutorture/bin/configinit.sh
@@ -1,4 +1,5 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Usage: configinit.sh config-spec-file build-output-dir results-dir
#
@@ -14,23 +15,9 @@
# for example, "O=/tmp/foo". If this argument is omitted, the .config
# file will be generated directly in the current directory.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2013
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
T=${TMPDIR-/tmp}/configinit.sh.$$
trap 'rm -rf $T' 0
diff --git a/tools/testing/selftests/rcutorture/bin/cpus2use.sh b/tools/testing/selftests/rcutorture/bin/cpus2use.sh
index bb99cde3f5f9..ff7102212703 100755
--- a/tools/testing/selftests/rcutorture/bin/cpus2use.sh
+++ b/tools/testing/selftests/rcutorture/bin/cpus2use.sh
@@ -1,26 +1,13 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Get an estimate of how CPU-hoggy to be.
#
# Usage: cpus2use.sh
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2013
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
ncpus=`grep '^processor' /proc/cpuinfo | wc -l`
idlecpus=`mpstat | tail -1 | \
diff --git a/tools/testing/selftests/rcutorture/bin/functions.sh b/tools/testing/selftests/rcutorture/bin/functions.sh
index 65f6655026f0..6bcb8b5b2ff2 100644
--- a/tools/testing/selftests/rcutorture/bin/functions.sh
+++ b/tools/testing/selftests/rcutorture/bin/functions.sh
@@ -1,24 +1,11 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Shell functions for the rest of the scripts.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2013
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
# bootparam_hotplug_cpu bootparam-string
#
diff --git a/tools/testing/selftests/rcutorture/bin/jitter.sh b/tools/testing/selftests/rcutorture/bin/jitter.sh
index 3633828375e3..435b60933985 100755
--- a/tools/testing/selftests/rcutorture/bin/jitter.sh
+++ b/tools/testing/selftests/rcutorture/bin/jitter.sh
@@ -1,4 +1,5 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Alternate sleeping and spinning on randomly selected CPUs. The purpose
# of this script is to inflict random OS jitter on a concurrently running
@@ -11,23 +12,9 @@
# sleepmax: Maximum microseconds to sleep, defaults to one second.
# spinmax: Maximum microseconds to spin, defaults to one millisecond.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2016
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
me=$(($1 * 1000))
duration=$2
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-build.sh b/tools/testing/selftests/rcutorture/bin/kvm-build.sh
index 9115fcdb5617..c27a0bbb9c02 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-build.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-build.sh
@@ -1,26 +1,13 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Build a kvm-ready Linux kernel from the tree in the current directory.
#
# Usage: kvm-build.sh config-template build-dir resdir
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2011
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
config_template=${1}
if test -z "$config_template" -o ! -f "$config_template" -o ! -r "$config_template"
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh b/tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh
index 98f650c9bf54..8426fe1f15ee 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh
@@ -1,4 +1,5 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
#
# Invoke a text editor on all console.log files for all runs with diagnostics,
# that is, on all such files having a console.log.diags counterpart.
@@ -10,6 +11,10 @@
#
# The "directory" above should end with the date/time directory, for example,
# "tools/testing/selftests/rcutorture/res/2018.02.25-14:27:27".
+#
+# Copyright (C) IBM Corporation, 2018
+#
+# Author: Paul E. McKenney <paulmck@linux.ibm.com>
rundir="${1}"
if test -z "$rundir" -o ! -d "$rundir"
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh
index 2de92f43ee8c..f3a7a5e2b89d 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh
@@ -1,26 +1,13 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Analyze a given results directory for locktorture progress.
#
# Usage: kvm-recheck-lock.sh resdir
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2014
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
i="$1"
if test -d "$i" -a -r "$i"
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
index 0fa8a61ccb7b..2a7f3f4756a7 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
@@ -1,26 +1,13 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Analyze a given results directory for rcutorture progress.
#
# Usage: kvm-recheck-rcu.sh resdir
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2014
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
i="$1"
if test -d "$i" -a -r "$i"
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh
index 8948f7926b21..7d3c2be66c64 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh
@@ -1,4 +1,5 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Analyze a given results directory for rcuperf performance measurements,
# looking for ftrace data. Exits with 0 if data was found, analyzed, and
@@ -7,23 +8,9 @@
#
# Usage: kvm-recheck-rcuperf-ftrace.sh resdir
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2016
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
i="$1"
. functions.sh
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh
index ccebf772fa1e..db0375a57f28 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh
@@ -1,26 +1,13 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Analyze a given results directory for rcuperf performance measurements.
#
# Usage: kvm-recheck-rcuperf.sh resdir
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2016
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
i="$1"
if test -d "$i" -a -r "$i"
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
index c9bab57a77eb..2adde6aaafdb 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
@@ -1,4 +1,5 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Given the results directories for previous KVM-based torture runs,
# check the build and console output for errors. Given a directory
@@ -6,23 +7,9 @@
#
# Usage: kvm-recheck.sh resdir ...
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2011
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
PATH=`pwd`/tools/testing/selftests/rcutorture/bin:$PATH; export PATH
. functions.sh
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
index 58ca758a5786..0eb1ec16d78a 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
@@ -1,4 +1,5 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Run a kvm-based test of the specified tree on the specified configs.
# Fully automated run and error checking, no graphics console.
@@ -20,23 +21,9 @@
#
# More sophisticated argument parsing is clearly needed.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2011
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
T=${TMPDIR-/tmp}/kvm-test-1-run.sh.$$
trap 'rm -rf $T' 0
diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh
index 19864f1cb27a..8f1e337b9b54 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm.sh
@@ -1,4 +1,5 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Run a series of tests under KVM. By default, this series is specified
# by the relevant CFLIST file, but can be overridden by the --configs
@@ -6,23 +7,9 @@
#
# Usage: kvm.sh [ options ]
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2011
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
scriptname=$0
args="$*"
diff --git a/tools/testing/selftests/rcutorture/bin/mkinitrd.sh b/tools/testing/selftests/rcutorture/bin/mkinitrd.sh
index 83552bb007b4..6fa9bd1ddc09 100755
--- a/tools/testing/selftests/rcutorture/bin/mkinitrd.sh
+++ b/tools/testing/selftests/rcutorture/bin/mkinitrd.sh
@@ -1,21 +1,8 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Create an initrd directory if one does not already exist.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2013
#
# Author: Connor Shu <Connor.Shu@ibm.com>
diff --git a/tools/testing/selftests/rcutorture/bin/parse-build.sh b/tools/testing/selftests/rcutorture/bin/parse-build.sh
index 24fe5f822b28..0701b3bf6ade 100755
--- a/tools/testing/selftests/rcutorture/bin/parse-build.sh
+++ b/tools/testing/selftests/rcutorture/bin/parse-build.sh
@@ -1,4 +1,5 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Check the build output from an rcutorture run for goodness.
# The "file" is a pathname on the local system, and "title" is
@@ -8,23 +9,9 @@
#
# Usage: parse-build.sh file title
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2011
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
F=$1
title=$2
diff --git a/tools/testing/selftests/rcutorture/bin/parse-console.sh b/tools/testing/selftests/rcutorture/bin/parse-console.sh
index 84933f6aed77..4508373a922f 100755
--- a/tools/testing/selftests/rcutorture/bin/parse-console.sh
+++ b/tools/testing/selftests/rcutorture/bin/parse-console.sh
@@ -1,4 +1,5 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Check the console output from an rcutorture run for oopses.
# The "file" is a pathname on the local system, and "title" is
@@ -6,23 +7,9 @@
#
# Usage: parse-console.sh file title
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2011
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
T=${TMPDIR-/tmp}/parse-console.sh.$$
file="$1"
diff --git a/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh
index 80eb646e1319..d3e4b2971f92 100644
--- a/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh
+++ b/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh
@@ -1,24 +1,11 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Kernel-version-dependent shell functions for the rest of the scripts.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2014
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
# locktorture_param_onoff bootparam-string config-file
#
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
index 7bab8246392b..effa415f9b92 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
+++ b/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
@@ -1,24 +1,11 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Kernel-version-dependent shell functions for the rest of the scripts.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2013
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
# rcutorture_param_n_barrier_cbs bootparam-string
#
diff --git a/tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh
index d36b8fd6f0fc..777d5b0c190f 100644
--- a/tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh
+++ b/tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh
@@ -1,24 +1,11 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Torture-suite-dependent shell functions for the rest of the scripts.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2015
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
# per_version_boot_params bootparam-string config-file seconds
#
diff --git a/tools/testing/selftests/rseq/rseq-s390.h b/tools/testing/selftests/rseq/rseq-s390.h
index 1069e85258ce..0afdf7957974 100644
--- a/tools/testing/selftests/rseq/rseq-s390.h
+++ b/tools/testing/selftests/rseq/rseq-s390.h
@@ -1,6 +1,13 @@
/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
-#define RSEQ_SIG 0x53053053
+/*
+ * RSEQ_SIG uses the trap4 instruction. As Linux does not make use of the
+ * access-register mode nor the linkage stack this instruction will always
+ * cause a special-operation exception (the trap-enabled bit in the DUCT
+ * is and will stay 0). The instruction pattern is
+ * b2 ff 0f ff trap4 4095(%r0)
+ */
+#define RSEQ_SIG 0xB2FF0FFF
#define rseq_smp_mb() __asm__ __volatile__ ("bcr 15,0" ::: "memory")
#define rseq_smp_rmb() rseq_smp_mb()
diff --git a/tools/testing/selftests/rseq/rseq.h b/tools/testing/selftests/rseq/rseq.h
index c72eb70f9b52..6c1126e7f685 100644
--- a/tools/testing/selftests/rseq/rseq.h
+++ b/tools/testing/selftests/rseq/rseq.h
@@ -16,7 +16,6 @@
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
-#include <sched.h>
#include <linux/rseq.h>
/*
diff --git a/tools/testing/selftests/rseq/run_param_test.sh b/tools/testing/selftests/rseq/run_param_test.sh
index 3acd6d75ff9f..e426304fd4a0 100755
--- a/tools/testing/selftests/rseq/run_param_test.sh
+++ b/tools/testing/selftests/rseq/run_param_test.sh
@@ -1,6 +1,8 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0+ or MIT
+NR_CPUS=`grep '^processor' /proc/cpuinfo | wc -l`
+
EXTRA_ARGS=${@}
OLDIFS="$IFS"
@@ -28,15 +30,16 @@ IFS="$OLDIFS"
REPS=1000
SLOW_REPS=100
+NR_THREADS=$((6*${NR_CPUS}))
function do_tests()
{
local i=0
while [ "$i" -lt "${#TEST_LIST[@]}" ]; do
echo "Running test ${TEST_NAME[$i]}"
- ./param_test ${TEST_LIST[$i]} -r ${REPS} ${@} ${EXTRA_ARGS} || exit 1
+ ./param_test ${TEST_LIST[$i]} -r ${REPS} -t ${NR_THREADS} ${@} ${EXTRA_ARGS} || exit 1
echo "Running compare-twice test ${TEST_NAME[$i]}"
- ./param_test_compare_twice ${TEST_LIST[$i]} -r ${REPS} ${@} ${EXTRA_ARGS} || exit 1
+ ./param_test_compare_twice ${TEST_LIST[$i]} -r ${REPS} -t ${NR_THREADS} ${@} ${EXTRA_ARGS} || exit 1
let "i++"
done
}
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index f69d2ee29742..0fad0dc62338 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -2166,11 +2166,14 @@ TEST(detect_seccomp_filter_flags)
SECCOMP_FILTER_FLAG_LOG,
SECCOMP_FILTER_FLAG_SPEC_ALLOW,
SECCOMP_FILTER_FLAG_NEW_LISTENER };
- unsigned int flag, all_flags;
+ unsigned int exclusive[] = {
+ SECCOMP_FILTER_FLAG_TSYNC,
+ SECCOMP_FILTER_FLAG_NEW_LISTENER };
+ unsigned int flag, all_flags, exclusive_mask;
int i;
long ret;
- /* Test detection of known-good filter flags */
+ /* Test detection of individual known-good filter flags */
for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) {
int bits = 0;
@@ -2197,16 +2200,29 @@ TEST(detect_seccomp_filter_flags)
all_flags |= flag;
}
- /* Test detection of all known-good filter flags */
- ret = seccomp(SECCOMP_SET_MODE_FILTER, all_flags, NULL);
- EXPECT_EQ(-1, ret);
- EXPECT_EQ(EFAULT, errno) {
- TH_LOG("Failed to detect that all known-good filter flags (0x%X) are supported!",
- all_flags);
+ /*
+ * Test detection of all known-good filter flags combined. But
+ * for the exclusive flags we need to mask them out and try them
+ * individually for the "all flags" testing.
+ */
+ exclusive_mask = 0;
+ for (i = 0; i < ARRAY_SIZE(exclusive); i++)
+ exclusive_mask |= exclusive[i];
+ for (i = 0; i < ARRAY_SIZE(exclusive); i++) {
+ flag = all_flags & ~exclusive_mask;
+ flag |= exclusive[i];
+
+ ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
+ EXPECT_EQ(-1, ret);
+ EXPECT_EQ(EFAULT, errno) {
+ TH_LOG("Failed to detect that all known-good filter flags (0x%X) are supported!",
+ flag);
+ }
}
- /* Test detection of an unknown filter flag */
+ /* Test detection of an unknown filter flags, without exclusives. */
flag = -1;
+ flag &= ~exclusive_mask;
ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
EXPECT_EQ(-1, ret);
EXPECT_EQ(EINVAL, errno) {
@@ -3079,9 +3095,9 @@ TEST(user_notification_basic)
/* Check that we get -ENOSYS with no listener attached */
if (pid == 0) {
- if (user_trap_syscall(__NR_getpid, 0) < 0)
+ if (user_trap_syscall(__NR_getppid, 0) < 0)
exit(1);
- ret = syscall(__NR_getpid);
+ ret = syscall(__NR_getppid);
exit(ret >= 0 || errno != ENOSYS);
}
@@ -3096,12 +3112,12 @@ TEST(user_notification_basic)
EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0);
/* Check that the basic notification machinery works */
- listener = user_trap_syscall(__NR_getpid,
+ listener = user_trap_syscall(__NR_getppid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
/* Installing a second listener in the chain should EBUSY */
- EXPECT_EQ(user_trap_syscall(__NR_getpid,
+ EXPECT_EQ(user_trap_syscall(__NR_getppid,
SECCOMP_FILTER_FLAG_NEW_LISTENER),
-1);
EXPECT_EQ(errno, EBUSY);
@@ -3110,7 +3126,7 @@ TEST(user_notification_basic)
ASSERT_GE(pid, 0);
if (pid == 0) {
- ret = syscall(__NR_getpid);
+ ret = syscall(__NR_getppid);
exit(ret != USER_NOTIF_MAGIC);
}
@@ -3128,7 +3144,7 @@ TEST(user_notification_basic)
EXPECT_GT(poll(&pollfd, 1, -1), 0);
EXPECT_EQ(pollfd.revents, POLLOUT);
- EXPECT_EQ(req.data.nr, __NR_getpid);
+ EXPECT_EQ(req.data.nr, __NR_getppid);
resp.id = req.id;
resp.error = 0;
@@ -3160,7 +3176,7 @@ TEST(user_notification_kill_in_middle)
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
- listener = user_trap_syscall(__NR_getpid,
+ listener = user_trap_syscall(__NR_getppid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
@@ -3172,7 +3188,7 @@ TEST(user_notification_kill_in_middle)
ASSERT_GE(pid, 0);
if (pid == 0) {
- ret = syscall(__NR_getpid);
+ ret = syscall(__NR_getppid);
exit(ret != USER_NOTIF_MAGIC);
}
@@ -3282,7 +3298,7 @@ TEST(user_notification_closed_listener)
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
- listener = user_trap_syscall(__NR_getpid,
+ listener = user_trap_syscall(__NR_getppid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
@@ -3293,7 +3309,7 @@ TEST(user_notification_closed_listener)
ASSERT_GE(pid, 0);
if (pid == 0) {
close(listener);
- ret = syscall(__NR_getpid);
+ ret = syscall(__NR_getppid);
exit(ret != -1 && errno != ENOSYS);
}
@@ -3316,14 +3332,15 @@ TEST(user_notification_child_pid_ns)
ASSERT_EQ(unshare(CLONE_NEWUSER | CLONE_NEWPID), 0);
- listener = user_trap_syscall(__NR_getpid, SECCOMP_FILTER_FLAG_NEW_LISTENER);
+ listener = user_trap_syscall(__NR_getppid,
+ SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0)
- exit(syscall(__NR_getpid) != USER_NOTIF_MAGIC);
+ exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC);
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
EXPECT_EQ(req.pid, pid);
@@ -3355,7 +3372,8 @@ TEST(user_notification_sibling_pid_ns)
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
- listener = user_trap_syscall(__NR_getpid, SECCOMP_FILTER_FLAG_NEW_LISTENER);
+ listener = user_trap_syscall(__NR_getppid,
+ SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
pid = fork();
@@ -3368,7 +3386,7 @@ TEST(user_notification_sibling_pid_ns)
ASSERT_GE(pid2, 0);
if (pid2 == 0)
- exit(syscall(__NR_getpid) != USER_NOTIF_MAGIC);
+ exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC);
EXPECT_EQ(waitpid(pid2, &status, 0), pid2);
EXPECT_EQ(true, WIFEXITED(status));
@@ -3377,11 +3395,11 @@ TEST(user_notification_sibling_pid_ns)
}
/* Create the sibling ns, and sibling in it. */
- EXPECT_EQ(unshare(CLONE_NEWPID), 0);
- EXPECT_EQ(errno, 0);
+ ASSERT_EQ(unshare(CLONE_NEWPID), 0);
+ ASSERT_EQ(errno, 0);
pid2 = fork();
- EXPECT_GE(pid2, 0);
+ ASSERT_GE(pid2, 0);
if (pid2 == 0) {
ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
@@ -3389,7 +3407,7 @@ TEST(user_notification_sibling_pid_ns)
* The pid should be 0, i.e. the task is in some namespace that
* we can't "see".
*/
- ASSERT_EQ(req.pid, 0);
+ EXPECT_EQ(req.pid, 0);
resp.id = req.id;
resp.error = 0;
@@ -3419,14 +3437,15 @@ TEST(user_notification_fault_recv)
ASSERT_EQ(unshare(CLONE_NEWUSER), 0);
- listener = user_trap_syscall(__NR_getpid, SECCOMP_FILTER_FLAG_NEW_LISTENER);
+ listener = user_trap_syscall(__NR_getppid,
+ SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0)
- exit(syscall(__NR_getpid) != USER_NOTIF_MAGIC);
+ exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC);
/* Do a bad recv() */
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, NULL), -1);
diff --git a/tools/testing/selftests/sysctl/sysctl.sh b/tools/testing/selftests/sysctl/sysctl.sh
index 584eb8ea780a..780ce7123374 100755
--- a/tools/testing/selftests/sysctl/sysctl.sh
+++ b/tools/testing/selftests/sysctl/sysctl.sh
@@ -290,6 +290,58 @@ run_numerictests()
test_rc
}
+check_failure()
+{
+ echo -n "Testing that $1 fails as expected..."
+ reset_vals
+ TEST_STR="$1"
+ orig="$(cat $TARGET)"
+ echo -n "$TEST_STR" > $TARGET 2> /dev/null
+
+ # write should fail and $TARGET should retain its original value
+ if [ $? = 0 ] || [ "$(cat $TARGET)" != "$orig" ]; then
+ echo "FAIL" >&2
+ rc=1
+ else
+ echo "ok"
+ fi
+ test_rc
+}
+
+run_wideint_tests()
+{
+ # sysctl conversion functions receive a boolean sign and ulong
+ # magnitude; here we list the magnitudes we want to test (each of
+ # which will be tested in both positive and negative forms). Since
+ # none of these values fit in 32 bits, writing them to an int- or
+ # uint-typed sysctl should fail.
+ local magnitudes=(
+ # common boundary-condition values (zero, +1, -1, INT_MIN,
+ # and INT_MAX respectively) if truncated to lower 32 bits
+ # (potential for being falsely deemed in range)
+ 0x0000000100000000
+ 0x0000000100000001
+ 0x00000001ffffffff
+ 0x0000000180000000
+ 0x000000017fffffff
+
+ # these look like negatives, but without a leading '-' are
+ # actually large positives (should be rejected as above
+ # despite being zero/+1/-1/INT_MIN/INT_MAX in the lower 32)
+ 0xffffffff00000000
+ 0xffffffff00000001
+ 0xffffffffffffffff
+ 0xffffffff80000000
+ 0xffffffff7fffffff
+ )
+
+ for sign in '' '-'; do
+ for mag in "${magnitudes[@]}"; do
+ check_failure "${sign}${mag}"
+ done
+ done
+}
+
# Your test must accept digits 3 and 4 to use this
run_limit_digit()
{
@@ -556,6 +608,7 @@ sysctl_test_0001()
TEST_STR=$(( $ORIG + 1 ))
run_numerictests
+ run_wideint_tests
run_limit_digit
}
@@ -580,6 +633,7 @@ sysctl_test_0003()
TEST_STR=$(( $ORIG + 1 ))
run_numerictests
+ run_wideint_tests
run_limit_digit
run_limit_digit_int
}
@@ -592,6 +646,7 @@ sysctl_test_0004()
TEST_STR=$(( $ORIG + 1 ))
run_numerictests
+ run_wideint_tests
run_limit_digit
run_limit_digit_uint
}
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json b/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
index 5970cee6d05f..b074ea9b6fe8 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
@@ -286,5 +286,30 @@
"teardown": [
"$TC action flush action bpf"
]
+ },
+ {
+ "id": "b8a1",
+ "name": "Replace bpf action with invalid goto_chain control",
+ "category": [
+ "actions",
+ "bpf"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action bpf",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC action add action bpf bytecode '1,6 0 0 4294967295' pass index 90"
+ ],
+ "cmdUnderTest": "$TC action replace action bpf bytecode '1,6 0 0 4294967295' goto chain 42 index 90 cookie c1a0c1a0",
+ "expExitCode": "255",
+ "verifyCmd": "$TC action list action bpf",
+ "matchPattern": "action order [0-9]*: bpf.* default-action pass.*index 90",
+ "matchCount": "1",
+ "teardown": [
+ "$TC action flush action bpf"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json b/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json
index 13147a1f5731..cadde8f41fcd 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json
@@ -287,5 +287,30 @@
"teardown": [
"$TC actions flush action connmark"
]
+ },
+ {
+ "id": "c506",
+ "name": "Replace connmark with invalid goto chain control",
+ "category": [
+ "actions",
+ "connmark"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action connmark",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action connmark pass index 90"
+ ],
+ "cmdUnderTest": "$TC actions replace action connmark goto chain 42 index 90 cookie c1a0c1a0",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action connmark index 90",
+ "matchPattern": "action order [0-9]+: connmark zone 0 pass.*index 90 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action connmark"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json b/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json
index a022792d392a..ddabb2fbb7c7 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json
@@ -500,5 +500,30 @@
"matchPattern": "^[ \t]+index [0-9]+ ref",
"matchCount": "0",
"teardown": []
+ },
+ {
+ "id": "d128",
+ "name": "Replace csum action with invalid goto chain control",
+ "category": [
+ "actions",
+ "csum"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action csum",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action csum iph index 90"
+ ],
+ "cmdUnderTest": "$TC actions replace action csum iph goto chain 42 index 90 cookie c1a0c1a0",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action csum index 90",
+ "matchPattern": "action order [0-9]*: csum \\(iph\\) action pass.*index 90 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action csum"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json b/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
index 89189a03ce3d..814b7a8a478b 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
@@ -560,5 +560,30 @@
"teardown": [
"$TC actions flush action gact"
]
+ },
+ {
+ "id": "ca89",
+ "name": "Replace gact action with invalid goto chain control",
+ "category": [
+ "actions",
+ "gact"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action gact",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action pass random determ drop 2 index 90"
+ ],
+ "cmdUnderTest": "$TC actions replace action goto chain 42 random determ drop 5 index 90 cookie c1a0c1a0",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions list action gact",
+ "matchPattern": "action order [0-9]*: gact action pass.*random type determ drop val 2.*index 90 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action gact"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
index 0da3545cabdb..c13a68b98fc7 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
@@ -1060,5 +1060,30 @@
"matchPattern": "action order [0-9]*: ife encode action pipe.*allow prio.*index 4",
"matchCount": "0",
"teardown": []
+ },
+ {
+ "id": "a0e2",
+ "name": "Replace ife encode action with invalid goto chain control",
+ "category": [
+ "actions",
+ "ife"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action ife",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action ife encode allow mark pass index 90"
+ ],
+ "cmdUnderTest": "$TC actions replace action ife encode allow mark goto chain 42 index 90 cookie c1a0c1a0",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action ife index 90",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E .*allow mark.*index 90 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action ife"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json b/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
index db49fd0f8445..6e5fb3d25681 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
@@ -434,5 +434,30 @@
"teardown": [
"$TC actions flush action mirred"
]
+ },
+ {
+ "id": "2a9a",
+ "name": "Replace mirred action with invalid goto chain control",
+ "category": [
+ "actions",
+ "mirred"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action mirred",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action mirred ingress mirror dev lo drop index 90"
+ ],
+ "cmdUnderTest": "$TC actions replace action mirred ingress mirror dev lo goto chain 42 index 90 cookie c1a0c1a0",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action mirred index 90",
+ "matchPattern": "action order [0-9]*: mirred \\(Ingress Mirror to device lo\\) drop.*index 90 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action mirred"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/nat.json b/tools/testing/selftests/tc-testing/tc-tests/actions/nat.json
index 0080dc2fd41c..bc12c1ccad30 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/nat.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/nat.json
@@ -589,5 +589,30 @@
"teardown": [
"$TC actions flush action nat"
]
+ },
+ {
+ "id": "4b12",
+ "name": "Replace nat action with invalid goto chain control",
+ "category": [
+ "actions",
+ "nat"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action nat",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action nat ingress 1.18.1.1 1.18.2.2 drop index 90"
+ ],
+ "cmdUnderTest": "$TC actions replace action nat ingress 1.18.1.1 1.18.2.2 goto chain 42 index 90 cookie c1a0c1a0",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action nat index 90",
+ "matchPattern": "action order [0-9]+: nat ingress 1.18.1.1/32 1.18.2.2 drop.*index 90 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action nat"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json b/tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json
new file mode 100644
index 000000000000..b73ceb9e28b1
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json
@@ -0,0 +1,51 @@
+[
+ {
+ "id": "319a",
+ "name": "Add pedit action that mangles IP TTL",
+ "category": [
+ "actions",
+ "pedit"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit ex munge ip ttl set 10",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions ls action pedit",
+ "matchPattern": "action order [0-9]+: pedit action pass keys 1.*index 1 ref.*key #0 at ipv4\\+8: val 0a000000 mask 00ffffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "7e67",
+ "name": "Replace pedit action with invalid goto chain",
+ "category": [
+ "actions",
+ "pedit"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action pedit ex munge ip ttl set 10 pass index 90"
+ ],
+ "cmdUnderTest": "$TC actions replace action pedit ex munge ip ttl set 10 goto chain 42 index 90 cookie c1a0c1a0",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions ls action pedit",
+ "matchPattern": "action order [0-9]+: pedit action pass keys 1.*index 90 ref.*key #0 at ipv4\\+8: val 0a000000 mask 00ffffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ }
+]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json
index 4086a50a670e..b8268da5adaa 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json
@@ -739,5 +739,30 @@
"teardown": [
"$TC actions flush action police"
]
+ },
+ {
+ "id": "689e",
+ "name": "Replace police action with invalid goto chain control",
+ "category": [
+ "actions",
+ "police"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action police",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action police rate 3mbit burst 250k drop index 90"
+ ],
+ "cmdUnderTest": "$TC actions replace action police rate 3mbit burst 250k goto chain 42 index 90 cookie c1a0c1a0",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action police index 90",
+ "matchPattern": "action order [0-9]*: police 0x5a rate 3Mbit burst 250Kb mtu 2Kb action drop",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action police"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json b/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json
index 3aca33c00039..ddabb160a11b 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json
@@ -144,6 +144,30 @@
]
},
{
+ "id": "7571",
+ "name": "Add sample action with invalid rate",
+ "category": [
+ "actions",
+ "sample"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action sample",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action sample rate 0 group 1 index 2",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action sample index 2",
+ "matchPattern": "action order [0-9]+: sample rate 1/0 group 1.*index 2 ref",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action sample"
+ ]
+ },
+ {
"id": "b6d4",
"name": "Add sample action with mandatory arguments and invalid control action",
"category": [
@@ -584,5 +608,30 @@
"teardown": [
"$TC actions flush action sample"
]
+ },
+ {
+ "id": "0a6e",
+ "name": "Replace sample action with invalid goto chain control",
+ "category": [
+ "actions",
+ "sample"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action sample",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action sample rate 1024 group 4 pass index 90"
+ ],
+ "cmdUnderTest": "$TC actions replace action sample rate 1024 group 7 goto chain 42 index 90 cookie c1a0c1a0",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions list action sample",
+ "matchPattern": "action order [0-9]+: sample rate 1/1024 group 4 pass.*index 90",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action sample"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/simple.json b/tools/testing/selftests/tc-testing/tc-tests/actions/simple.json
index e89a7aa4012d..8e8c1ae12260 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/simple.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/simple.json
@@ -126,5 +126,30 @@
"teardown": [
""
]
+ },
+ {
+ "id": "b776",
+ "name": "Replace simple action with invalid goto chain control",
+ "category": [
+ "actions",
+ "simple"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action simple",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action simple sdata \"hello\" pass index 90"
+ ],
+ "cmdUnderTest": "$TC actions replace action simple sdata \"world\" goto chain 42 index 90 cookie c1a0c1a0",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions list action simple",
+ "matchPattern": "action order [0-9]*: Simple <hello>.*index 90 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action simple"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json b/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
index 5aaf593b914a..ecd96eda7f6a 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
@@ -484,5 +484,30 @@
"teardown": [
"$TC actions flush action skbedit"
]
+ },
+ {
+ "id": "1b2b",
+ "name": "Replace skbedit action with invalid goto_chain control",
+ "category": [
+ "actions",
+ "skbedit"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action skbedit",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action skbedit ptype host pass index 90"
+ ],
+ "cmdUnderTest": "$TC actions replace action skbedit ptype host goto chain 42 index 90 cookie c1a0c1a0",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions list action skbedit",
+ "matchPattern": "action order [0-9]*: skbedit ptype host pass.*index 90 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action skbedit"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json b/tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json
index fe3326e939c1..6eb4c4f97060 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json
@@ -392,5 +392,30 @@
"teardown": [
"$TC actions flush action skbmod"
]
+ },
+ {
+ "id": "b651",
+ "name": "Replace skbmod action with invalid goto_chain control",
+ "category": [
+ "actions",
+ "skbmod"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action skbmod",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action skbmod set etype 0x1111 pass index 90"
+ ],
+ "cmdUnderTest": "$TC actions replace action skbmod set etype 0x1111 goto chain 42 index 90 cookie c1a0c1a0",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions ls action skbmod",
+ "matchPattern": "action order [0-9]*: skbmod pass set etype 0x1111\\s+index 90 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action skbmod"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
index e7e15a7336b6..28453a445fdb 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
@@ -884,5 +884,30 @@
"teardown": [
"$TC actions flush action tunnel_key"
]
+ },
+ {
+ "id": "8242",
+ "name": "Replace tunnel_key set action with invalid goto chain",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 nocsum id 1 pass index 90"
+ ],
+ "cmdUnderTest": "$TC actions replace action tunnel_key set src_ip 10.10.10.2 dst_ip 20.20.20.1 dst_port 3129 id 2 csum goto chain 42 index 90 cookie c1a0c1a0",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action tunnel_key index 90",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2.*key_id 1.*dst_port 3128.*csum pass.*index 90 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json b/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json
index 69ea09eefffc..cc7c7d758008 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json
@@ -688,5 +688,30 @@
"teardown": [
"$TC actions flush action vlan"
]
+ },
+ {
+ "id": "e394",
+ "name": "Replace vlan push action with invalid goto chain control",
+ "category": [
+ "actions",
+ "vlan"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action vlan",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action vlan push id 500 pass index 90"
+ ],
+ "cmdUnderTest": "$TC actions replace action vlan push id 500 goto chain 42 index 90 cookie c1a0c1a0",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action vlan index 90",
+ "matchPattern": "action order [0-9]+: vlan.*push id 500 protocol 802.1Q priority 0 pass.*index 90 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action vlan"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
index 99a5ffca1088..2d096b2abf2c 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
@@ -19,6 +19,26 @@
]
},
{
+ "id": "2638",
+ "name": "Add matchall and try to get it",
+ "category": [
+ "filter",
+ "matchall"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 clsact",
+ "$TC filter add dev $DEV1 protocol all pref 1 ingress handle 0x1234 matchall action ok"
+ ],
+ "cmdUnderTest": "$TC filter get dev $DEV1 protocol all pref 1 ingress handle 0x1234 matchall",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter show dev $DEV1 ingress",
+ "matchPattern": "filter protocol all pref 1 matchall chain 0 handle 0x1234",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 clsact"
+ ]
+ },
+ {
"id": "d052",
"name": "Add 1M filters with the same action",
"category": [
diff --git a/tools/testing/selftests/timers/skew_consistency.c b/tools/testing/selftests/timers/skew_consistency.c
index 022b711c78ee..8066be9aff11 100644
--- a/tools/testing/selftests/timers/skew_consistency.c
+++ b/tools/testing/selftests/timers/skew_consistency.c
@@ -32,7 +32,6 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
-#include <stdlib.h>
#include <string.h>
#include <sys/wait.h>
#include "../kselftest.h"
diff --git a/tools/testing/selftests/tpm2/Makefile b/tools/testing/selftests/tpm2/Makefile
new file mode 100644
index 000000000000..9dd848427a7b
--- /dev/null
+++ b/tools/testing/selftests/tpm2/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+include ../lib.mk
+
+TEST_PROGS := test_smoke.sh test_space.sh
diff --git a/tools/testing/selftests/tpm2/test_smoke.sh b/tools/testing/selftests/tpm2/test_smoke.sh
new file mode 100755
index 000000000000..80521d46220c
--- /dev/null
+++ b/tools/testing/selftests/tpm2/test_smoke.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+
+python -m unittest -v tpm2_tests.SmokeTest
diff --git a/tools/testing/selftests/tpm2/test_space.sh b/tools/testing/selftests/tpm2/test_space.sh
new file mode 100755
index 000000000000..a6f5e346635e
--- /dev/null
+++ b/tools/testing/selftests/tpm2/test_space.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+
+python -m unittest -v tpm2_tests.SpaceTest
diff --git a/tools/testing/selftests/tpm2/tpm2.py b/tools/testing/selftests/tpm2/tpm2.py
new file mode 100644
index 000000000000..828c18584624
--- /dev/null
+++ b/tools/testing/selftests/tpm2/tpm2.py
@@ -0,0 +1,697 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+
+import hashlib
+import os
+import socket
+import struct
+import sys
+import unittest
+from fcntl import ioctl
+
+
+TPM2_ST_NO_SESSIONS = 0x8001
+TPM2_ST_SESSIONS = 0x8002
+
+TPM2_CC_FIRST = 0x01FF
+
+TPM2_CC_CREATE_PRIMARY = 0x0131
+TPM2_CC_DICTIONARY_ATTACK_LOCK_RESET = 0x0139
+TPM2_CC_CREATE = 0x0153
+TPM2_CC_LOAD = 0x0157
+TPM2_CC_UNSEAL = 0x015E
+TPM2_CC_FLUSH_CONTEXT = 0x0165
+TPM2_CC_START_AUTH_SESSION = 0x0176
+TPM2_CC_GET_CAPABILITY = 0x017A
+TPM2_CC_GET_RANDOM = 0x017B
+TPM2_CC_PCR_READ = 0x017E
+TPM2_CC_POLICY_PCR = 0x017F
+TPM2_CC_PCR_EXTEND = 0x0182
+TPM2_CC_POLICY_PASSWORD = 0x018C
+TPM2_CC_POLICY_GET_DIGEST = 0x0189
+
+TPM2_SE_POLICY = 0x01
+TPM2_SE_TRIAL = 0x03
+
+TPM2_ALG_RSA = 0x0001
+TPM2_ALG_SHA1 = 0x0004
+TPM2_ALG_AES = 0x0006
+TPM2_ALG_KEYEDHASH = 0x0008
+TPM2_ALG_SHA256 = 0x000B
+TPM2_ALG_NULL = 0x0010
+TPM2_ALG_CBC = 0x0042
+TPM2_ALG_CFB = 0x0043
+
+TPM2_RH_OWNER = 0x40000001
+TPM2_RH_NULL = 0x40000007
+TPM2_RH_LOCKOUT = 0x4000000A
+TPM2_RS_PW = 0x40000009
+
+TPM2_RC_SIZE = 0x01D5
+TPM2_RC_AUTH_FAIL = 0x098E
+TPM2_RC_POLICY_FAIL = 0x099D
+TPM2_RC_COMMAND_CODE = 0x0143
+
+TSS2_RC_LAYER_SHIFT = 16
+TSS2_RESMGR_TPM_RC_LAYER = (11 << TSS2_RC_LAYER_SHIFT)
+
+TPM2_CAP_HANDLES = 0x00000001
+TPM2_CAP_COMMANDS = 0x00000002
+TPM2_CAP_TPM_PROPERTIES = 0x00000006
+
+TPM2_PT_FIXED = 0x100
+TPM2_PT_TOTAL_COMMANDS = TPM2_PT_FIXED + 41
+
+HR_SHIFT = 24
+HR_LOADED_SESSION = 0x02000000
+HR_TRANSIENT = 0x80000000
+
+SHA1_DIGEST_SIZE = 20
+SHA256_DIGEST_SIZE = 32
+
+TPM2_VER0_ERRORS = {
+ 0x000: "TPM_RC_SUCCESS",
+ 0x030: "TPM_RC_BAD_TAG",
+}
+
+TPM2_VER1_ERRORS = {
+ 0x000: "TPM_RC_FAILURE",
+ 0x001: "TPM_RC_FAILURE",
+ 0x003: "TPM_RC_SEQUENCE",
+ 0x00B: "TPM_RC_PRIVATE",
+ 0x019: "TPM_RC_HMAC",
+ 0x020: "TPM_RC_DISABLED",
+ 0x021: "TPM_RC_EXCLUSIVE",
+ 0x024: "TPM_RC_AUTH_TYPE",
+ 0x025: "TPM_RC_AUTH_MISSING",
+ 0x026: "TPM_RC_POLICY",
+ 0x027: "TPM_RC_PCR",
+ 0x028: "TPM_RC_PCR_CHANGED",
+ 0x02D: "TPM_RC_UPGRADE",
+ 0x02E: "TPM_RC_TOO_MANY_CONTEXTS",
+ 0x02F: "TPM_RC_AUTH_UNAVAILABLE",
+ 0x030: "TPM_RC_REBOOT",
+ 0x031: "TPM_RC_UNBALANCED",
+ 0x042: "TPM_RC_COMMAND_SIZE",
+ 0x043: "TPM_RC_COMMAND_CODE",
+ 0x044: "TPM_RC_AUTHSIZE",
+ 0x045: "TPM_RC_AUTH_CONTEXT",
+ 0x046: "TPM_RC_NV_RANGE",
+ 0x047: "TPM_RC_NV_SIZE",
+ 0x048: "TPM_RC_NV_LOCKED",
+ 0x049: "TPM_RC_NV_AUTHORIZATION",
+ 0x04A: "TPM_RC_NV_UNINITIALIZED",
+ 0x04B: "TPM_RC_NV_SPACE",
+ 0x04C: "TPM_RC_NV_DEFINED",
+ 0x050: "TPM_RC_BAD_CONTEXT",
+ 0x051: "TPM_RC_CPHASH",
+ 0x052: "TPM_RC_PARENT",
+ 0x053: "TPM_RC_NEEDS_TEST",
+ 0x054: "TPM_RC_NO_RESULT",
+ 0x055: "TPM_RC_SENSITIVE",
+ 0x07F: "RC_MAX_FM0",
+}
+
+TPM2_FMT1_ERRORS = {
+ 0x001: "TPM_RC_ASYMMETRIC",
+ 0x002: "TPM_RC_ATTRIBUTES",
+ 0x003: "TPM_RC_HASH",
+ 0x004: "TPM_RC_VALUE",
+ 0x005: "TPM_RC_HIERARCHY",
+ 0x007: "TPM_RC_KEY_SIZE",
+ 0x008: "TPM_RC_MGF",
+ 0x009: "TPM_RC_MODE",
+ 0x00A: "TPM_RC_TYPE",
+ 0x00B: "TPM_RC_HANDLE",
+ 0x00C: "TPM_RC_KDF",
+ 0x00D: "TPM_RC_RANGE",
+ 0x00E: "TPM_RC_AUTH_FAIL",
+ 0x00F: "TPM_RC_NONCE",
+ 0x010: "TPM_RC_PP",
+ 0x012: "TPM_RC_SCHEME",
+ 0x015: "TPM_RC_SIZE",
+ 0x016: "TPM_RC_SYMMETRIC",
+ 0x017: "TPM_RC_TAG",
+ 0x018: "TPM_RC_SELECTOR",
+ 0x01A: "TPM_RC_INSUFFICIENT",
+ 0x01B: "TPM_RC_SIGNATURE",
+ 0x01C: "TPM_RC_KEY",
+ 0x01D: "TPM_RC_POLICY_FAIL",
+ 0x01F: "TPM_RC_INTEGRITY",
+ 0x020: "TPM_RC_TICKET",
+ 0x021: "TPM_RC_RESERVED_BITS",
+ 0x022: "TPM_RC_BAD_AUTH",
+ 0x023: "TPM_RC_EXPIRED",
+ 0x024: "TPM_RC_POLICY_CC",
+ 0x025: "TPM_RC_BINDING",
+ 0x026: "TPM_RC_CURVE",
+ 0x027: "TPM_RC_ECC_POINT",
+}
+
+TPM2_WARN_ERRORS = {
+ 0x001: "TPM_RC_CONTEXT_GAP",
+ 0x002: "TPM_RC_OBJECT_MEMORY",
+ 0x003: "TPM_RC_SESSION_MEMORY",
+ 0x004: "TPM_RC_MEMORY",
+ 0x005: "TPM_RC_SESSION_HANDLES",
+ 0x006: "TPM_RC_OBJECT_HANDLES",
+ 0x007: "TPM_RC_LOCALITY",
+ 0x008: "TPM_RC_YIELDED",
+ 0x009: "TPM_RC_CANCELED",
+ 0x00A: "TPM_RC_TESTING",
+ 0x010: "TPM_RC_REFERENCE_H0",
+ 0x011: "TPM_RC_REFERENCE_H1",
+ 0x012: "TPM_RC_REFERENCE_H2",
+ 0x013: "TPM_RC_REFERENCE_H3",
+ 0x014: "TPM_RC_REFERENCE_H4",
+ 0x015: "TPM_RC_REFERENCE_H5",
+ 0x016: "TPM_RC_REFERENCE_H6",
+ 0x018: "TPM_RC_REFERENCE_S0",
+ 0x019: "TPM_RC_REFERENCE_S1",
+ 0x01A: "TPM_RC_REFERENCE_S2",
+ 0x01B: "TPM_RC_REFERENCE_S3",
+ 0x01C: "TPM_RC_REFERENCE_S4",
+ 0x01D: "TPM_RC_REFERENCE_S5",
+ 0x01E: "TPM_RC_REFERENCE_S6",
+ 0x020: "TPM_RC_NV_RATE",
+ 0x021: "TPM_RC_LOCKOUT",
+ 0x022: "TPM_RC_RETRY",
+ 0x023: "TPM_RC_NV_UNAVAILABLE",
+ 0x7F: "TPM_RC_NOT_USED",
+}
+
+RC_VER1 = 0x100
+RC_FMT1 = 0x080
+RC_WARN = 0x900
+
+ALG_DIGEST_SIZE_MAP = {
+ TPM2_ALG_SHA1: SHA1_DIGEST_SIZE,
+ TPM2_ALG_SHA256: SHA256_DIGEST_SIZE,
+}
+
+ALG_HASH_FUNCTION_MAP = {
+ TPM2_ALG_SHA1: hashlib.sha1,
+ TPM2_ALG_SHA256: hashlib.sha256
+}
+
+NAME_ALG_MAP = {
+ "sha1": TPM2_ALG_SHA1,
+ "sha256": TPM2_ALG_SHA256,
+}
+
+
+class UnknownAlgorithmIdError(Exception):
+ def __init__(self, alg):
+ self.alg = alg
+
+ def __str__(self):
+ return '0x%0x' % (alg)
+
+
+class UnknownAlgorithmNameError(Exception):
+ def __init__(self, name):
+ self.name = name
+
+ def __str__(self):
+ return name
+
+
+class UnknownPCRBankError(Exception):
+ def __init__(self, alg):
+ self.alg = alg
+
+ def __str__(self):
+ return '0x%0x' % (alg)
+
+
+class ProtocolError(Exception):
+ def __init__(self, cc, rc):
+ self.cc = cc
+ self.rc = rc
+
+ if (rc & RC_FMT1) == RC_FMT1:
+ self.name = TPM2_FMT1_ERRORS.get(rc & 0x3f, "TPM_RC_UNKNOWN")
+ elif (rc & RC_WARN) == RC_WARN:
+ self.name = TPM2_WARN_ERRORS.get(rc & 0x7f, "TPM_RC_UNKNOWN")
+ elif (rc & RC_VER1) == RC_VER1:
+ self.name = TPM2_VER1_ERRORS.get(rc & 0x7f, "TPM_RC_UNKNOWN")
+ else:
+ self.name = TPM2_VER0_ERRORS.get(rc & 0x7f, "TPM_RC_UNKNOWN")
+
+ def __str__(self):
+ if self.cc:
+ return '%s: cc=0x%08x, rc=0x%08x' % (self.name, self.cc, self.rc)
+ else:
+ return '%s: rc=0x%08x' % (self.name, self.rc)
+
+
+class AuthCommand(object):
+ """TPMS_AUTH_COMMAND"""
+
+ def __init__(self, session_handle=TPM2_RS_PW, nonce='', session_attributes=0,
+ hmac=''):
+ self.session_handle = session_handle
+ self.nonce = nonce
+ self.session_attributes = session_attributes
+ self.hmac = hmac
+
+ def __str__(self):
+ fmt = '>I H%us B H%us' % (len(self.nonce), len(self.hmac))
+ return struct.pack(fmt, self.session_handle, len(self.nonce),
+ self.nonce, self.session_attributes, len(self.hmac),
+ self.hmac)
+
+ def __len__(self):
+ fmt = '>I H%us B H%us' % (len(self.nonce), len(self.hmac))
+ return struct.calcsize(fmt)
+
+
+class SensitiveCreate(object):
+ """TPMS_SENSITIVE_CREATE"""
+
+ def __init__(self, user_auth='', data=''):
+ self.user_auth = user_auth
+ self.data = data
+
+ def __str__(self):
+ fmt = '>H%us H%us' % (len(self.user_auth), len(self.data))
+ return struct.pack(fmt, len(self.user_auth), self.user_auth,
+ len(self.data), self.data)
+
+ def __len__(self):
+ fmt = '>H%us H%us' % (len(self.user_auth), len(self.data))
+ return struct.calcsize(fmt)
+
+
+class Public(object):
+ """TPMT_PUBLIC"""
+
+ FIXED_TPM = (1 << 1)
+ FIXED_PARENT = (1 << 4)
+ SENSITIVE_DATA_ORIGIN = (1 << 5)
+ USER_WITH_AUTH = (1 << 6)
+ RESTRICTED = (1 << 16)
+ DECRYPT = (1 << 17)
+
+ def __fmt(self):
+ return '>HHIH%us%usH%us' % \
+ (len(self.auth_policy), len(self.parameters), len(self.unique))
+
+ def __init__(self, object_type, name_alg, object_attributes, auth_policy='',
+ parameters='', unique=''):
+ self.object_type = object_type
+ self.name_alg = name_alg
+ self.object_attributes = object_attributes
+ self.auth_policy = auth_policy
+ self.parameters = parameters
+ self.unique = unique
+
+ def __str__(self):
+ return struct.pack(self.__fmt(),
+ self.object_type,
+ self.name_alg,
+ self.object_attributes,
+ len(self.auth_policy),
+ self.auth_policy,
+ self.parameters,
+ len(self.unique),
+ self.unique)
+
+ def __len__(self):
+ return struct.calcsize(self.__fmt())
+
+
+def get_digest_size(alg):
+ ds = ALG_DIGEST_SIZE_MAP.get(alg)
+ if not ds:
+ raise UnknownAlgorithmIdError(alg)
+ return ds
+
+
+def get_hash_function(alg):
+ f = ALG_HASH_FUNCTION_MAP.get(alg)
+ if not f:
+ raise UnknownAlgorithmIdError(alg)
+ return f
+
+
+def get_algorithm(name):
+ alg = NAME_ALG_MAP.get(name)
+ if not alg:
+ raise UnknownAlgorithmNameError(name)
+ return alg
+
+
+def hex_dump(d):
+ d = [format(ord(x), '02x') for x in d]
+ d = [d[i: i + 16] for i in xrange(0, len(d), 16)]
+ d = [' '.join(x) for x in d]
+ d = os.linesep.join(d)
+
+ return d
+
+class Client:
+ FLAG_DEBUG = 0x01
+ FLAG_SPACE = 0x02
+ TPM_IOC_NEW_SPACE = 0xa200
+
+ def __init__(self, flags = 0):
+ self.flags = flags
+
+ if (self.flags & Client.FLAG_SPACE) == 0:
+ self.tpm = open('/dev/tpm0', 'r+b', buffering=0)
+ else:
+ self.tpm = open('/dev/tpmrm0', 'r+b', buffering=0)
+
+ def close(self):
+ self.tpm.close()
+
+ def send_cmd(self, cmd):
+ self.tpm.write(cmd)
+ rsp = self.tpm.read()
+
+ if (self.flags & Client.FLAG_DEBUG) != 0:
+ sys.stderr.write('cmd' + os.linesep)
+ sys.stderr.write(hex_dump(cmd) + os.linesep)
+ sys.stderr.write('rsp' + os.linesep)
+ sys.stderr.write(hex_dump(rsp) + os.linesep)
+
+ rc = struct.unpack('>I', rsp[6:10])[0]
+ if rc != 0:
+ cc = struct.unpack('>I', cmd[6:10])[0]
+ raise ProtocolError(cc, rc)
+
+ return rsp
+
+ def read_pcr(self, i, bank_alg = TPM2_ALG_SHA1):
+ pcrsel_len = max((i >> 3) + 1, 3)
+ pcrsel = [0] * pcrsel_len
+ pcrsel[i >> 3] = 1 << (i & 7)
+ pcrsel = ''.join(map(chr, pcrsel))
+
+ fmt = '>HII IHB%us' % (pcrsel_len)
+ cmd = struct.pack(fmt,
+ TPM2_ST_NO_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_PCR_READ,
+ 1,
+ bank_alg,
+ pcrsel_len, pcrsel)
+
+ rsp = self.send_cmd(cmd)
+
+ pcr_update_cnt, pcr_select_cnt = struct.unpack('>II', rsp[10:18])
+ assert pcr_select_cnt == 1
+ rsp = rsp[18:]
+
+ alg2, pcrsel_len2 = struct.unpack('>HB', rsp[:3])
+ assert bank_alg == alg2 and pcrsel_len == pcrsel_len2
+ rsp = rsp[3 + pcrsel_len:]
+
+ digest_cnt = struct.unpack('>I', rsp[:4])[0]
+ if digest_cnt == 0:
+ return None
+ rsp = rsp[6:]
+
+ return rsp
+
+ def extend_pcr(self, i, dig, bank_alg = TPM2_ALG_SHA1):
+ ds = get_digest_size(bank_alg)
+ assert(ds == len(dig))
+
+ auth_cmd = AuthCommand()
+
+ fmt = '>HII I I%us IH%us' % (len(auth_cmd), ds)
+ cmd = struct.pack(
+ fmt,
+ TPM2_ST_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_PCR_EXTEND,
+ i,
+ len(auth_cmd),
+ str(auth_cmd),
+ 1, bank_alg, dig)
+
+ self.send_cmd(cmd)
+
+ def start_auth_session(self, session_type, name_alg = TPM2_ALG_SHA1):
+ fmt = '>HII IIH16sHBHH'
+ cmd = struct.pack(fmt,
+ TPM2_ST_NO_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_START_AUTH_SESSION,
+ TPM2_RH_NULL,
+ TPM2_RH_NULL,
+ 16,
+ '\0' * 16,
+ 0,
+ session_type,
+ TPM2_ALG_NULL,
+ name_alg)
+
+ return struct.unpack('>I', self.send_cmd(cmd)[10:14])[0]
+
+ def __calc_pcr_digest(self, pcrs, bank_alg = TPM2_ALG_SHA1,
+ digest_alg = TPM2_ALG_SHA1):
+ x = []
+ f = get_hash_function(digest_alg)
+
+ for i in pcrs:
+ pcr = self.read_pcr(i, bank_alg)
+ if pcr == None:
+ return None
+ x += pcr
+
+ return f(bytearray(x)).digest()
+
+ def policy_pcr(self, handle, pcrs, bank_alg = TPM2_ALG_SHA1,
+ name_alg = TPM2_ALG_SHA1):
+ ds = get_digest_size(name_alg)
+ dig = self.__calc_pcr_digest(pcrs, bank_alg, name_alg)
+ if not dig:
+ raise UnknownPCRBankError(bank_alg)
+
+ pcrsel_len = max((max(pcrs) >> 3) + 1, 3)
+ pcrsel = [0] * pcrsel_len
+ for i in pcrs:
+ pcrsel[i >> 3] |= 1 << (i & 7)
+ pcrsel = ''.join(map(chr, pcrsel))
+
+ fmt = '>HII IH%usIHB3s' % ds
+ cmd = struct.pack(fmt,
+ TPM2_ST_NO_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_POLICY_PCR,
+ handle,
+ len(dig), str(dig),
+ 1,
+ bank_alg,
+ pcrsel_len, pcrsel)
+
+ self.send_cmd(cmd)
+
+ def policy_password(self, handle):
+ fmt = '>HII I'
+ cmd = struct.pack(fmt,
+ TPM2_ST_NO_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_POLICY_PASSWORD,
+ handle)
+
+ self.send_cmd(cmd)
+
+ def get_policy_digest(self, handle):
+ fmt = '>HII I'
+ cmd = struct.pack(fmt,
+ TPM2_ST_NO_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_POLICY_GET_DIGEST,
+ handle)
+
+ return self.send_cmd(cmd)[12:]
+
+ def flush_context(self, handle):
+ fmt = '>HIII'
+ cmd = struct.pack(fmt,
+ TPM2_ST_NO_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_FLUSH_CONTEXT,
+ handle)
+
+ self.send_cmd(cmd)
+
+ def create_root_key(self, auth_value = ''):
+ attributes = \
+ Public.FIXED_TPM | \
+ Public.FIXED_PARENT | \
+ Public.SENSITIVE_DATA_ORIGIN | \
+ Public.USER_WITH_AUTH | \
+ Public.RESTRICTED | \
+ Public.DECRYPT
+
+ auth_cmd = AuthCommand()
+ sensitive = SensitiveCreate(user_auth=auth_value)
+
+ public_parms = struct.pack(
+ '>HHHHHI',
+ TPM2_ALG_AES,
+ 128,
+ TPM2_ALG_CFB,
+ TPM2_ALG_NULL,
+ 2048,
+ 0)
+
+ public = Public(
+ object_type=TPM2_ALG_RSA,
+ name_alg=TPM2_ALG_SHA1,
+ object_attributes=attributes,
+ parameters=public_parms)
+
+ fmt = '>HIII I%us H%us H%us HI' % \
+ (len(auth_cmd), len(sensitive), len(public))
+ cmd = struct.pack(
+ fmt,
+ TPM2_ST_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_CREATE_PRIMARY,
+ TPM2_RH_OWNER,
+ len(auth_cmd),
+ str(auth_cmd),
+ len(sensitive),
+ str(sensitive),
+ len(public),
+ str(public),
+ 0, 0)
+
+ return struct.unpack('>I', self.send_cmd(cmd)[10:14])[0]
+
+ def seal(self, parent_key, data, auth_value, policy_dig,
+ name_alg = TPM2_ALG_SHA1):
+ ds = get_digest_size(name_alg)
+ assert(not policy_dig or ds == len(policy_dig))
+
+ attributes = 0
+ if not policy_dig:
+ attributes |= Public.USER_WITH_AUTH
+ policy_dig = ''
+
+ auth_cmd = AuthCommand()
+ sensitive = SensitiveCreate(user_auth=auth_value, data=data)
+
+ public = Public(
+ object_type=TPM2_ALG_KEYEDHASH,
+ name_alg=name_alg,
+ object_attributes=attributes,
+ auth_policy=policy_dig,
+ parameters=struct.pack('>H', TPM2_ALG_NULL))
+
+ fmt = '>HIII I%us H%us H%us HI' % \
+ (len(auth_cmd), len(sensitive), len(public))
+ cmd = struct.pack(
+ fmt,
+ TPM2_ST_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_CREATE,
+ parent_key,
+ len(auth_cmd),
+ str(auth_cmd),
+ len(sensitive),
+ str(sensitive),
+ len(public),
+ str(public),
+ 0, 0)
+
+ rsp = self.send_cmd(cmd)
+
+ return rsp[14:]
+
+ def unseal(self, parent_key, blob, auth_value, policy_handle):
+ private_len = struct.unpack('>H', blob[0:2])[0]
+ public_start = private_len + 2
+ public_len = struct.unpack('>H', blob[public_start:public_start + 2])[0]
+ blob = blob[:private_len + public_len + 4]
+
+ auth_cmd = AuthCommand()
+
+ fmt = '>HII I I%us %us' % (len(auth_cmd), len(blob))
+ cmd = struct.pack(
+ fmt,
+ TPM2_ST_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_LOAD,
+ parent_key,
+ len(auth_cmd),
+ str(auth_cmd),
+ blob)
+
+ data_handle = struct.unpack('>I', self.send_cmd(cmd)[10:14])[0]
+
+ if policy_handle:
+ auth_cmd = AuthCommand(session_handle=policy_handle, hmac=auth_value)
+ else:
+ auth_cmd = AuthCommand(hmac=auth_value)
+
+ fmt = '>HII I I%us' % (len(auth_cmd))
+ cmd = struct.pack(
+ fmt,
+ TPM2_ST_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_UNSEAL,
+ data_handle,
+ len(auth_cmd),
+ str(auth_cmd))
+
+ try:
+ rsp = self.send_cmd(cmd)
+ finally:
+ self.flush_context(data_handle)
+
+ data_len = struct.unpack('>I', rsp[10:14])[0] - 2
+
+ return rsp[16:16 + data_len]
+
+ def reset_da_lock(self):
+ auth_cmd = AuthCommand()
+
+ fmt = '>HII I I%us' % (len(auth_cmd))
+ cmd = struct.pack(
+ fmt,
+ TPM2_ST_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_DICTIONARY_ATTACK_LOCK_RESET,
+ TPM2_RH_LOCKOUT,
+ len(auth_cmd),
+ str(auth_cmd))
+
+ self.send_cmd(cmd)
+
+ def __get_cap_cnt(self, cap, pt, cnt):
+ handles = []
+ fmt = '>HII III'
+
+ cmd = struct.pack(fmt,
+ TPM2_ST_NO_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_GET_CAPABILITY,
+ cap, pt, cnt)
+
+ rsp = self.send_cmd(cmd)[10:]
+ more_data, cap, cnt = struct.unpack('>BII', rsp[:9])
+ rsp = rsp[9:]
+
+ for i in xrange(0, cnt):
+ handle = struct.unpack('>I', rsp[:4])[0]
+ handles.append(handle)
+ rsp = rsp[4:]
+
+ return handles, more_data
+
+ def get_cap(self, cap, pt):
+ handles = []
+
+ more_data = True
+ while more_data:
+ next_handles, more_data = self.__get_cap_cnt(cap, pt, 1)
+ handles += next_handles
+ pt += 1
+
+ return handles
diff --git a/tools/testing/selftests/tpm2/tpm2_tests.py b/tools/testing/selftests/tpm2/tpm2_tests.py
new file mode 100644
index 000000000000..d4973be53493
--- /dev/null
+++ b/tools/testing/selftests/tpm2/tpm2_tests.py
@@ -0,0 +1,290 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+
+from argparse import ArgumentParser
+from argparse import FileType
+import os
+import sys
+import tpm2
+from tpm2 import ProtocolError
+import unittest
+import logging
+import struct
+
+class SmokeTest(unittest.TestCase):
+ def setUp(self):
+ self.client = tpm2.Client()
+ self.root_key = self.client.create_root_key()
+
+ def tearDown(self):
+ self.client.flush_context(self.root_key)
+ self.client.close()
+
+ def test_seal_with_auth(self):
+ data = 'X' * 64
+ auth = 'A' * 15
+
+ blob = self.client.seal(self.root_key, data, auth, None)
+ result = self.client.unseal(self.root_key, blob, auth, None)
+ self.assertEqual(data, result)
+
+ def test_seal_with_policy(self):
+ handle = self.client.start_auth_session(tpm2.TPM2_SE_TRIAL)
+
+ data = 'X' * 64
+ auth = 'A' * 15
+ pcrs = [16]
+
+ try:
+ self.client.policy_pcr(handle, pcrs)
+ self.client.policy_password(handle)
+
+ policy_dig = self.client.get_policy_digest(handle)
+ finally:
+ self.client.flush_context(handle)
+
+ blob = self.client.seal(self.root_key, data, auth, policy_dig)
+
+ handle = self.client.start_auth_session(tpm2.TPM2_SE_POLICY)
+
+ try:
+ self.client.policy_pcr(handle, pcrs)
+ self.client.policy_password(handle)
+
+ result = self.client.unseal(self.root_key, blob, auth, handle)
+ except:
+ self.client.flush_context(handle)
+ raise
+
+ self.assertEqual(data, result)
+
+ def test_unseal_with_wrong_auth(self):
+ data = 'X' * 64
+ auth = 'A' * 20
+ rc = 0
+
+ blob = self.client.seal(self.root_key, data, auth, None)
+ try:
+ result = self.client.unseal(self.root_key, blob, auth[:-1] + 'B', None)
+ except ProtocolError, e:
+ rc = e.rc
+
+ self.assertEqual(rc, tpm2.TPM2_RC_AUTH_FAIL)
+
+ def test_unseal_with_wrong_policy(self):
+ handle = self.client.start_auth_session(tpm2.TPM2_SE_TRIAL)
+
+ data = 'X' * 64
+ auth = 'A' * 17
+ pcrs = [16]
+
+ try:
+ self.client.policy_pcr(handle, pcrs)
+ self.client.policy_password(handle)
+
+ policy_dig = self.client.get_policy_digest(handle)
+ finally:
+ self.client.flush_context(handle)
+
+ blob = self.client.seal(self.root_key, data, auth, policy_dig)
+
+ # Extend first a PCR that is not part of the policy and try to unseal.
+ # This should succeed.
+
+ ds = tpm2.get_digest_size(tpm2.TPM2_ALG_SHA1)
+ self.client.extend_pcr(1, 'X' * ds)
+
+ handle = self.client.start_auth_session(tpm2.TPM2_SE_POLICY)
+
+ try:
+ self.client.policy_pcr(handle, pcrs)
+ self.client.policy_password(handle)
+
+ result = self.client.unseal(self.root_key, blob, auth, handle)
+ except:
+ self.client.flush_context(handle)
+ raise
+
+ self.assertEqual(data, result)
+
+ # Then, extend a PCR that is part of the policy and try to unseal.
+ # This should fail.
+ self.client.extend_pcr(16, 'X' * ds)
+
+ handle = self.client.start_auth_session(tpm2.TPM2_SE_POLICY)
+
+ rc = 0
+
+ try:
+ self.client.policy_pcr(handle, pcrs)
+ self.client.policy_password(handle)
+
+ result = self.client.unseal(self.root_key, blob, auth, handle)
+ except ProtocolError, e:
+ rc = e.rc
+ self.client.flush_context(handle)
+ except:
+ self.client.flush_context(handle)
+ raise
+
+ self.assertEqual(rc, tpm2.TPM2_RC_POLICY_FAIL)
+
+ def test_seal_with_too_long_auth(self):
+ ds = tpm2.get_digest_size(tpm2.TPM2_ALG_SHA1)
+ data = 'X' * 64
+ auth = 'A' * (ds + 1)
+
+ rc = 0
+ try:
+ blob = self.client.seal(self.root_key, data, auth, None)
+ except ProtocolError, e:
+ rc = e.rc
+
+ self.assertEqual(rc, tpm2.TPM2_RC_SIZE)
+
+ def test_too_short_cmd(self):
+ rejected = False
+ try:
+ fmt = '>HIII'
+ cmd = struct.pack(fmt,
+ tpm2.TPM2_ST_NO_SESSIONS,
+ struct.calcsize(fmt) + 1,
+ tpm2.TPM2_CC_FLUSH_CONTEXT,
+ 0xDEADBEEF)
+
+ self.client.send_cmd(cmd)
+ except IOError, e:
+ rejected = True
+ except:
+ pass
+ self.assertEqual(rejected, True)
+
+ def test_read_partial_resp(self):
+ try:
+ fmt = '>HIIH'
+ cmd = struct.pack(fmt,
+ tpm2.TPM2_ST_NO_SESSIONS,
+ struct.calcsize(fmt),
+ tpm2.TPM2_CC_GET_RANDOM,
+ 0x20)
+ self.client.tpm.write(cmd)
+ hdr = self.client.tpm.read(10)
+ sz = struct.unpack('>I', hdr[2:6])[0]
+ rsp = self.client.tpm.read()
+ except:
+ pass
+ self.assertEqual(sz, 10 + 2 + 32)
+ self.assertEqual(len(rsp), 2 + 32)
+
+ def test_read_partial_overwrite(self):
+ try:
+ fmt = '>HIIH'
+ cmd = struct.pack(fmt,
+ tpm2.TPM2_ST_NO_SESSIONS,
+ struct.calcsize(fmt),
+ tpm2.TPM2_CC_GET_RANDOM,
+ 0x20)
+ self.client.tpm.write(cmd)
+ # Read part of the respone
+ rsp1 = self.client.tpm.read(15)
+
+ # Send a new cmd
+ self.client.tpm.write(cmd)
+
+ # Read the whole respone
+ rsp2 = self.client.tpm.read()
+ except:
+ pass
+ self.assertEqual(len(rsp1), 15)
+ self.assertEqual(len(rsp2), 10 + 2 + 32)
+
+ def test_send_two_cmds(self):
+ rejected = False
+ try:
+ fmt = '>HIIH'
+ cmd = struct.pack(fmt,
+ tpm2.TPM2_ST_NO_SESSIONS,
+ struct.calcsize(fmt),
+ tpm2.TPM2_CC_GET_RANDOM,
+ 0x20)
+ self.client.tpm.write(cmd)
+
+ # expect the second one to raise -EBUSY error
+ self.client.tpm.write(cmd)
+ rsp = self.client.tpm.read()
+
+ except IOError, e:
+ # read the response
+ rsp = self.client.tpm.read()
+ rejected = True
+ pass
+ except:
+ pass
+ self.assertEqual(rejected, True)
+
+class SpaceTest(unittest.TestCase):
+ def setUp(self):
+ logging.basicConfig(filename='SpaceTest.log', level=logging.DEBUG)
+
+ def test_make_two_spaces(self):
+ log = logging.getLogger(__name__)
+ log.debug("test_make_two_spaces")
+
+ space1 = tpm2.Client(tpm2.Client.FLAG_SPACE)
+ root1 = space1.create_root_key()
+ space2 = tpm2.Client(tpm2.Client.FLAG_SPACE)
+ root2 = space2.create_root_key()
+ root3 = space2.create_root_key()
+
+ log.debug("%08x" % (root1))
+ log.debug("%08x" % (root2))
+ log.debug("%08x" % (root3))
+
+ def test_flush_context(self):
+ log = logging.getLogger(__name__)
+ log.debug("test_flush_context")
+
+ space1 = tpm2.Client(tpm2.Client.FLAG_SPACE)
+ root1 = space1.create_root_key()
+ log.debug("%08x" % (root1))
+
+ space1.flush_context(root1)
+
+ def test_get_handles(self):
+ log = logging.getLogger(__name__)
+ log.debug("test_get_handles")
+
+ space1 = tpm2.Client(tpm2.Client.FLAG_SPACE)
+ space1.create_root_key()
+ space2 = tpm2.Client(tpm2.Client.FLAG_SPACE)
+ space2.create_root_key()
+ space2.create_root_key()
+
+ handles = space2.get_cap(tpm2.TPM2_CAP_HANDLES, tpm2.HR_TRANSIENT)
+
+ self.assertEqual(len(handles), 2)
+
+ log.debug("%08x" % (handles[0]))
+ log.debug("%08x" % (handles[1]))
+
+ def test_invalid_cc(self):
+ log = logging.getLogger(__name__)
+ log.debug(sys._getframe().f_code.co_name)
+
+ TPM2_CC_INVALID = tpm2.TPM2_CC_FIRST - 1
+
+ space1 = tpm2.Client(tpm2.Client.FLAG_SPACE)
+ root1 = space1.create_root_key()
+ log.debug("%08x" % (root1))
+
+ fmt = '>HII'
+ cmd = struct.pack(fmt, tpm2.TPM2_ST_NO_SESSIONS, struct.calcsize(fmt),
+ TPM2_CC_INVALID)
+
+ rc = 0
+ try:
+ space1.send_cmd(cmd)
+ except ProtocolError, e:
+ rc = e.rc
+
+ self.assertEqual(rc, tpm2.TPM2_RC_COMMAND_CODE |
+ tpm2.TSS2_RESMGR_TPM_RC_LAYER)
diff --git a/tools/testing/selftests/vm/test_vmalloc.sh b/tools/testing/selftests/vm/test_vmalloc.sh
index 06d2bb109f06..06d2bb109f06 100644..100755
--- a/tools/testing/selftests/vm/test_vmalloc.sh
+++ b/tools/testing/selftests/vm/test_vmalloc.sh
diff --git a/tools/testing/selftests/x86/mpx-dig.c b/tools/testing/selftests/x86/mpx-dig.c
index c13607ef5c11..880fbf676968 100644
--- a/tools/testing/selftests/x86/mpx-dig.c
+++ b/tools/testing/selftests/x86/mpx-dig.c
@@ -8,9 +8,7 @@
#include <unistd.h>
#include <stdio.h>
#include <errno.h>
-#include <sys/types.h>
#include <sys/stat.h>
-#include <unistd.h>
#include <sys/mman.h>
#include <string.h>
#include <fcntl.h>
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index b07ac4614e1c..7fc272ecae16 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -25,6 +25,7 @@
#include <clocksource/arm_arch_timer.h>
#include <asm/arch_timer.h>
+#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
#include <kvm/arm_vgic.h>
@@ -34,7 +35,9 @@
static struct timecounter *timecounter;
static unsigned int host_vtimer_irq;
+static unsigned int host_ptimer_irq;
static u32 host_vtimer_irq_flags;
+static u32 host_ptimer_irq_flags;
static DEFINE_STATIC_KEY_FALSE(has_gic_active_state);
@@ -52,12 +55,34 @@ static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx);
static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
struct arch_timer_context *timer_ctx);
static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx);
+static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
+ struct arch_timer_context *timer,
+ enum kvm_arch_timer_regs treg,
+ u64 val);
+static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
+ struct arch_timer_context *timer,
+ enum kvm_arch_timer_regs treg);
u64 kvm_phys_timer_read(void)
{
return timecounter->cc->read(timecounter->cc);
}
+static void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
+{
+ if (has_vhe()) {
+ map->direct_vtimer = vcpu_vtimer(vcpu);
+ map->direct_ptimer = vcpu_ptimer(vcpu);
+ map->emul_ptimer = NULL;
+ } else {
+ map->direct_vtimer = vcpu_vtimer(vcpu);
+ map->direct_ptimer = NULL;
+ map->emul_ptimer = vcpu_ptimer(vcpu);
+ }
+
+ trace_kvm_get_timer_map(vcpu->vcpu_id, map);
+}
+
static inline bool userspace_irqchip(struct kvm *kvm)
{
return static_branch_unlikely(&userspace_irqchip_in_use) &&
@@ -78,20 +103,27 @@ static void soft_timer_cancel(struct hrtimer *hrt)
static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
{
struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
- struct arch_timer_context *vtimer;
+ struct arch_timer_context *ctx;
+ struct timer_map map;
/*
* We may see a timer interrupt after vcpu_put() has been called which
* sets the CPU's vcpu pointer to NULL, because even though the timer
- * has been disabled in vtimer_save_state(), the hardware interrupt
+ * has been disabled in timer_save_state(), the hardware interrupt
* signal may not have been retired from the interrupt controller yet.
*/
if (!vcpu)
return IRQ_HANDLED;
- vtimer = vcpu_vtimer(vcpu);
- if (kvm_timer_should_fire(vtimer))
- kvm_timer_update_irq(vcpu, true, vtimer);
+ get_timer_map(vcpu, &map);
+
+ if (irq == host_vtimer_irq)
+ ctx = map.direct_vtimer;
+ else
+ ctx = map.direct_ptimer;
+
+ if (kvm_timer_should_fire(ctx))
+ kvm_timer_update_irq(vcpu, true, ctx);
if (userspace_irqchip(vcpu->kvm) &&
!static_branch_unlikely(&has_gic_active_state))
@@ -122,7 +154,9 @@ static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
{
- return !(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_IT_MASK) &&
+ WARN_ON(timer_ctx && timer_ctx->loaded);
+ return timer_ctx &&
+ !(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_IT_MASK) &&
(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_ENABLE);
}
@@ -132,21 +166,22 @@ static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
*/
static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
{
- u64 min_virt = ULLONG_MAX, min_phys = ULLONG_MAX;
- struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
- struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
+ u64 min_delta = ULLONG_MAX;
+ int i;
- if (kvm_timer_irq_can_fire(vtimer))
- min_virt = kvm_timer_compute_delta(vtimer);
+ for (i = 0; i < NR_KVM_TIMERS; i++) {
+ struct arch_timer_context *ctx = &vcpu->arch.timer_cpu.timers[i];
- if (kvm_timer_irq_can_fire(ptimer))
- min_phys = kvm_timer_compute_delta(ptimer);
+ WARN(ctx->loaded, "timer %d loaded\n", i);
+ if (kvm_timer_irq_can_fire(ctx))
+ min_delta = min(min_delta, kvm_timer_compute_delta(ctx));
+ }
/* If none of timers can fire, then return 0 */
- if ((min_virt == ULLONG_MAX) && (min_phys == ULLONG_MAX))
+ if (min_delta == ULLONG_MAX)
return 0;
- return min(min_virt, min_phys);
+ return min_delta;
}
static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt)
@@ -173,41 +208,58 @@ static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt)
return HRTIMER_NORESTART;
}
-static enum hrtimer_restart kvm_phys_timer_expire(struct hrtimer *hrt)
+static enum hrtimer_restart kvm_hrtimer_expire(struct hrtimer *hrt)
{
- struct arch_timer_context *ptimer;
- struct arch_timer_cpu *timer;
+ struct arch_timer_context *ctx;
struct kvm_vcpu *vcpu;
u64 ns;
- timer = container_of(hrt, struct arch_timer_cpu, phys_timer);
- vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
- ptimer = vcpu_ptimer(vcpu);
+ ctx = container_of(hrt, struct arch_timer_context, hrtimer);
+ vcpu = ctx->vcpu;
+
+ trace_kvm_timer_hrtimer_expire(ctx);
/*
* Check that the timer has really expired from the guest's
* PoV (NTP on the host may have forced it to expire
* early). If not ready, schedule for a later time.
*/
- ns = kvm_timer_compute_delta(ptimer);
+ ns = kvm_timer_compute_delta(ctx);
if (unlikely(ns)) {
hrtimer_forward_now(hrt, ns_to_ktime(ns));
return HRTIMER_RESTART;
}
- kvm_timer_update_irq(vcpu, true, ptimer);
+ kvm_timer_update_irq(vcpu, true, ctx);
return HRTIMER_NORESTART;
}
static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
{
+ enum kvm_arch_timers index;
u64 cval, now;
+ if (!timer_ctx)
+ return false;
+
+ index = arch_timer_ctx_index(timer_ctx);
+
if (timer_ctx->loaded) {
- u32 cnt_ctl;
+ u32 cnt_ctl = 0;
+
+ switch (index) {
+ case TIMER_VTIMER:
+ cnt_ctl = read_sysreg_el0(cntv_ctl);
+ break;
+ case TIMER_PTIMER:
+ cnt_ctl = read_sysreg_el0(cntp_ctl);
+ break;
+ case NR_KVM_TIMERS:
+ /* GCC is braindead */
+ cnt_ctl = 0;
+ break;
+ }
- /* Only the virtual timer can be loaded so far */
- cnt_ctl = read_sysreg_el0(cntv_ctl);
return (cnt_ctl & ARCH_TIMER_CTRL_ENABLE) &&
(cnt_ctl & ARCH_TIMER_CTRL_IT_STAT) &&
!(cnt_ctl & ARCH_TIMER_CTRL_IT_MASK);
@@ -224,13 +276,13 @@ static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
bool kvm_timer_is_pending(struct kvm_vcpu *vcpu)
{
- struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
- struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
+ struct timer_map map;
- if (kvm_timer_should_fire(vtimer))
- return true;
+ get_timer_map(vcpu, &map);
- return kvm_timer_should_fire(ptimer);
+ return kvm_timer_should_fire(map.direct_vtimer) ||
+ kvm_timer_should_fire(map.direct_ptimer) ||
+ kvm_timer_should_fire(map.emul_ptimer);
}
/*
@@ -269,77 +321,70 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
}
}
-/* Schedule the background timer for the emulated timer. */
-static void phys_timer_emulate(struct kvm_vcpu *vcpu)
+static void timer_emulate(struct arch_timer_context *ctx)
{
- struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
- struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
+ bool should_fire = kvm_timer_should_fire(ctx);
+
+ trace_kvm_timer_emulate(ctx, should_fire);
+
+ if (should_fire) {
+ kvm_timer_update_irq(ctx->vcpu, true, ctx);
+ return;
+ }
/*
* If the timer can fire now, we don't need to have a soft timer
* scheduled for the future. If the timer cannot fire at all,
* then we also don't need a soft timer.
*/
- if (kvm_timer_should_fire(ptimer) || !kvm_timer_irq_can_fire(ptimer)) {
- soft_timer_cancel(&timer->phys_timer);
+ if (!kvm_timer_irq_can_fire(ctx)) {
+ soft_timer_cancel(&ctx->hrtimer);
return;
}
- soft_timer_start(&timer->phys_timer, kvm_timer_compute_delta(ptimer));
+ soft_timer_start(&ctx->hrtimer, kvm_timer_compute_delta(ctx));
}
-/*
- * Check if there was a change in the timer state, so that we should either
- * raise or lower the line level to the GIC or schedule a background timer to
- * emulate the physical timer.
- */
-static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
+static void timer_save_state(struct arch_timer_context *ctx)
{
- struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
- struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
- struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
- bool level;
+ struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
+ enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
+ unsigned long flags;
- if (unlikely(!timer->enabled))
+ if (!timer->enabled)
return;
- /*
- * The vtimer virtual interrupt is a 'mapped' interrupt, meaning part
- * of its lifecycle is offloaded to the hardware, and we therefore may
- * not have lowered the irq.level value before having to signal a new
- * interrupt, but have to signal an interrupt every time the level is
- * asserted.
- */
- level = kvm_timer_should_fire(vtimer);
- kvm_timer_update_irq(vcpu, level, vtimer);
+ local_irq_save(flags);
- phys_timer_emulate(vcpu);
+ if (!ctx->loaded)
+ goto out;
- if (kvm_timer_should_fire(ptimer) != ptimer->irq.level)
- kvm_timer_update_irq(vcpu, !ptimer->irq.level, ptimer);
-}
+ switch (index) {
+ case TIMER_VTIMER:
+ ctx->cnt_ctl = read_sysreg_el0(cntv_ctl);
+ ctx->cnt_cval = read_sysreg_el0(cntv_cval);
-static void vtimer_save_state(struct kvm_vcpu *vcpu)
-{
- struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
- struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
- unsigned long flags;
+ /* Disable the timer */
+ write_sysreg_el0(0, cntv_ctl);
+ isb();
- local_irq_save(flags);
+ break;
+ case TIMER_PTIMER:
+ ctx->cnt_ctl = read_sysreg_el0(cntp_ctl);
+ ctx->cnt_cval = read_sysreg_el0(cntp_cval);
- if (!vtimer->loaded)
- goto out;
+ /* Disable the timer */
+ write_sysreg_el0(0, cntp_ctl);
+ isb();
- if (timer->enabled) {
- vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl);
- vtimer->cnt_cval = read_sysreg_el0(cntv_cval);
+ break;
+ case NR_KVM_TIMERS:
+ BUG();
}
- /* Disable the virtual timer */
- write_sysreg_el0(0, cntv_ctl);
- isb();
+ trace_kvm_timer_save_state(ctx);
- vtimer->loaded = false;
+ ctx->loaded = false;
out:
local_irq_restore(flags);
}
@@ -349,67 +394,72 @@ out:
* thread is removed from its waitqueue and made runnable when there's a timer
* interrupt to handle.
*/
-void kvm_timer_schedule(struct kvm_vcpu *vcpu)
+static void kvm_timer_blocking(struct kvm_vcpu *vcpu)
{
- struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
- struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
- struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
-
- vtimer_save_state(vcpu);
+ struct arch_timer_cpu *timer = vcpu_timer(vcpu);
+ struct timer_map map;
- /*
- * No need to schedule a background timer if any guest timer has
- * already expired, because kvm_vcpu_block will return before putting
- * the thread to sleep.
- */
- if (kvm_timer_should_fire(vtimer) || kvm_timer_should_fire(ptimer))
- return;
+ get_timer_map(vcpu, &map);
/*
- * If both timers are not capable of raising interrupts (disabled or
+ * If no timers are capable of raising interrupts (disabled or
* masked), then there's no more work for us to do.
*/
- if (!kvm_timer_irq_can_fire(vtimer) && !kvm_timer_irq_can_fire(ptimer))
+ if (!kvm_timer_irq_can_fire(map.direct_vtimer) &&
+ !kvm_timer_irq_can_fire(map.direct_ptimer) &&
+ !kvm_timer_irq_can_fire(map.emul_ptimer))
return;
/*
- * The guest timers have not yet expired, schedule a background timer.
+ * At least one guest time will expire. Schedule a background timer.
* Set the earliest expiration time among the guest timers.
*/
soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu));
}
-static void vtimer_restore_state(struct kvm_vcpu *vcpu)
+static void kvm_timer_unblocking(struct kvm_vcpu *vcpu)
{
- struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
- struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
+ struct arch_timer_cpu *timer = vcpu_timer(vcpu);
+
+ soft_timer_cancel(&timer->bg_timer);
+}
+
+static void timer_restore_state(struct arch_timer_context *ctx)
+{
+ struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
+ enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
unsigned long flags;
+ if (!timer->enabled)
+ return;
+
local_irq_save(flags);
- if (vtimer->loaded)
+ if (ctx->loaded)
goto out;
- if (timer->enabled) {
- write_sysreg_el0(vtimer->cnt_cval, cntv_cval);
+ switch (index) {
+ case TIMER_VTIMER:
+ write_sysreg_el0(ctx->cnt_cval, cntv_cval);
+ isb();
+ write_sysreg_el0(ctx->cnt_ctl, cntv_ctl);
+ break;
+ case TIMER_PTIMER:
+ write_sysreg_el0(ctx->cnt_cval, cntp_cval);
isb();
- write_sysreg_el0(vtimer->cnt_ctl, cntv_ctl);
+ write_sysreg_el0(ctx->cnt_ctl, cntp_ctl);
+ break;
+ case NR_KVM_TIMERS:
+ BUG();
}
- vtimer->loaded = true;
+ trace_kvm_timer_restore_state(ctx);
+
+ ctx->loaded = true;
out:
local_irq_restore(flags);
}
-void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
-{
- struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
-
- vtimer_restore_state(vcpu);
-
- soft_timer_cancel(&timer->bg_timer);
-}
-
static void set_cntvoff(u64 cntvoff)
{
u32 low = lower_32_bits(cntvoff);
@@ -425,23 +475,32 @@ static void set_cntvoff(u64 cntvoff)
kvm_call_hyp(__kvm_timer_set_cntvoff, low, high);
}
-static inline void set_vtimer_irq_phys_active(struct kvm_vcpu *vcpu, bool active)
+static inline void set_timer_irq_phys_active(struct arch_timer_context *ctx, bool active)
{
int r;
- r = irq_set_irqchip_state(host_vtimer_irq, IRQCHIP_STATE_ACTIVE, active);
+ r = irq_set_irqchip_state(ctx->host_timer_irq, IRQCHIP_STATE_ACTIVE, active);
WARN_ON(r);
}
-static void kvm_timer_vcpu_load_gic(struct kvm_vcpu *vcpu)
+static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
{
- struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
- bool phys_active;
+ struct kvm_vcpu *vcpu = ctx->vcpu;
+ bool phys_active = false;
+
+ /*
+ * Update the timer output so that it is likely to match the
+ * state we're about to restore. If the timer expires between
+ * this point and the register restoration, we'll take the
+ * interrupt anyway.
+ */
+ kvm_timer_update_irq(ctx->vcpu, kvm_timer_should_fire(ctx), ctx);
if (irqchip_in_kernel(vcpu->kvm))
- phys_active = kvm_vgic_map_is_active(vcpu, vtimer->irq.irq);
- else
- phys_active = vtimer->irq.level;
- set_vtimer_irq_phys_active(vcpu, phys_active);
+ phys_active = kvm_vgic_map_is_active(vcpu, ctx->irq.irq);
+
+ phys_active |= ctx->irq.level;
+
+ set_timer_irq_phys_active(ctx, phys_active);
}
static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
@@ -449,6 +508,14 @@ static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
/*
+ * Update the timer output so that it is likely to match the
+ * state we're about to restore. If the timer expires between
+ * this point and the register restoration, we'll take the
+ * interrupt anyway.
+ */
+ kvm_timer_update_irq(vcpu, kvm_timer_should_fire(vtimer), vtimer);
+
+ /*
* When using a userspace irqchip with the architected timers and a
* host interrupt controller that doesn't support an active state, we
* must still prevent continuously exiting from the guest, and
@@ -466,28 +533,32 @@ static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
{
- struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
- struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
- struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
+ struct arch_timer_cpu *timer = vcpu_timer(vcpu);
+ struct timer_map map;
if (unlikely(!timer->enabled))
return;
- if (static_branch_likely(&has_gic_active_state))
- kvm_timer_vcpu_load_gic(vcpu);
- else
+ get_timer_map(vcpu, &map);
+
+ if (static_branch_likely(&has_gic_active_state)) {
+ kvm_timer_vcpu_load_gic(map.direct_vtimer);
+ if (map.direct_ptimer)
+ kvm_timer_vcpu_load_gic(map.direct_ptimer);
+ } else {
kvm_timer_vcpu_load_nogic(vcpu);
+ }
- set_cntvoff(vtimer->cntvoff);
+ set_cntvoff(map.direct_vtimer->cntvoff);
- vtimer_restore_state(vcpu);
+ kvm_timer_unblocking(vcpu);
- /* Set the background timer for the physical timer emulation. */
- phys_timer_emulate(vcpu);
+ timer_restore_state(map.direct_vtimer);
+ if (map.direct_ptimer)
+ timer_restore_state(map.direct_ptimer);
- /* If the timer fired while we weren't running, inject it now */
- if (kvm_timer_should_fire(ptimer) != ptimer->irq.level)
- kvm_timer_update_irq(vcpu, !ptimer->irq.level, ptimer);
+ if (map.emul_ptimer)
+ timer_emulate(map.emul_ptimer);
}
bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
@@ -509,15 +580,20 @@ bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
{
- struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ struct arch_timer_cpu *timer = vcpu_timer(vcpu);
+ struct timer_map map;
if (unlikely(!timer->enabled))
return;
- vtimer_save_state(vcpu);
+ get_timer_map(vcpu, &map);
+
+ timer_save_state(map.direct_vtimer);
+ if (map.direct_ptimer)
+ timer_save_state(map.direct_ptimer);
/*
- * Cancel the physical timer emulation, because the only case where we
+ * Cancel soft timer emulation, because the only case where we
* need it after a vcpu_put is in the context of a sleeping VCPU, and
* in that case we already factor in the deadline for the physical
* timer when scheduling the bg_timer.
@@ -525,7 +601,11 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
* In any case, we re-schedule the hrtimer for the physical timer when
* coming back to the VCPU thread in kvm_timer_vcpu_load().
*/
- soft_timer_cancel(&timer->phys_timer);
+ if (map.emul_ptimer)
+ soft_timer_cancel(&map.emul_ptimer->hrtimer);
+
+ if (swait_active(kvm_arch_vcpu_wq(vcpu)))
+ kvm_timer_blocking(vcpu);
/*
* The kernel may decide to run userspace after calling vcpu_put, so
@@ -534,8 +614,7 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
* counter of non-VHE case. For VHE, the virtual counter uses a fixed
* virtual offset of zero, so no need to zero CNTVOFF_EL2 register.
*/
- if (!has_vhe())
- set_cntvoff(0);
+ set_cntvoff(0);
}
/*
@@ -550,7 +629,7 @@ static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
if (!kvm_timer_should_fire(vtimer)) {
kvm_timer_update_irq(vcpu, false, vtimer);
if (static_branch_likely(&has_gic_active_state))
- set_vtimer_irq_phys_active(vcpu, false);
+ set_timer_irq_phys_active(vtimer, false);
else
enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
}
@@ -558,7 +637,7 @@ static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
{
- struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ struct arch_timer_cpu *timer = vcpu_timer(vcpu);
if (unlikely(!timer->enabled))
return;
@@ -569,9 +648,10 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
{
- struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
- struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
- struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
+ struct arch_timer_cpu *timer = vcpu_timer(vcpu);
+ struct timer_map map;
+
+ get_timer_map(vcpu, &map);
/*
* The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
@@ -579,12 +659,22 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
* resets the timer to be disabled and unmasked and is compliant with
* the ARMv7 architecture.
*/
- vtimer->cnt_ctl = 0;
- ptimer->cnt_ctl = 0;
- kvm_timer_update_state(vcpu);
+ vcpu_vtimer(vcpu)->cnt_ctl = 0;
+ vcpu_ptimer(vcpu)->cnt_ctl = 0;
+
+ if (timer->enabled) {
+ kvm_timer_update_irq(vcpu, false, vcpu_vtimer(vcpu));
+ kvm_timer_update_irq(vcpu, false, vcpu_ptimer(vcpu));
+
+ if (irqchip_in_kernel(vcpu->kvm)) {
+ kvm_vgic_reset_mapped_irq(vcpu, map.direct_vtimer->irq.irq);
+ if (map.direct_ptimer)
+ kvm_vgic_reset_mapped_irq(vcpu, map.direct_ptimer->irq.irq);
+ }
+ }
- if (timer->enabled && irqchip_in_kernel(vcpu->kvm))
- kvm_vgic_reset_mapped_irq(vcpu, vtimer->irq.irq);
+ if (map.emul_ptimer)
+ soft_timer_cancel(&map.emul_ptimer->hrtimer);
return 0;
}
@@ -610,56 +700,71 @@ static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
{
- struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ struct arch_timer_cpu *timer = vcpu_timer(vcpu);
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
/* Synchronize cntvoff across all vtimers of a VM. */
update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
- vcpu_ptimer(vcpu)->cntvoff = 0;
+ ptimer->cntvoff = 0;
hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
timer->bg_timer.function = kvm_bg_timer_expire;
- hrtimer_init(&timer->phys_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- timer->phys_timer.function = kvm_phys_timer_expire;
+ hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ vtimer->hrtimer.function = kvm_hrtimer_expire;
+ ptimer->hrtimer.function = kvm_hrtimer_expire;
vtimer->irq.irq = default_vtimer_irq.irq;
ptimer->irq.irq = default_ptimer_irq.irq;
+
+ vtimer->host_timer_irq = host_vtimer_irq;
+ ptimer->host_timer_irq = host_ptimer_irq;
+
+ vtimer->host_timer_irq_flags = host_vtimer_irq_flags;
+ ptimer->host_timer_irq_flags = host_ptimer_irq_flags;
+
+ vtimer->vcpu = vcpu;
+ ptimer->vcpu = vcpu;
}
static void kvm_timer_init_interrupt(void *info)
{
enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
+ enable_percpu_irq(host_ptimer_irq, host_ptimer_irq_flags);
}
int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
{
- struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
- struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
+ struct arch_timer_context *timer;
switch (regid) {
case KVM_REG_ARM_TIMER_CTL:
- vtimer->cnt_ctl = value & ~ARCH_TIMER_CTRL_IT_STAT;
+ timer = vcpu_vtimer(vcpu);
+ kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
break;
case KVM_REG_ARM_TIMER_CNT:
+ timer = vcpu_vtimer(vcpu);
update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value);
break;
case KVM_REG_ARM_TIMER_CVAL:
- vtimer->cnt_cval = value;
+ timer = vcpu_vtimer(vcpu);
+ kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
break;
case KVM_REG_ARM_PTIMER_CTL:
- ptimer->cnt_ctl = value & ~ARCH_TIMER_CTRL_IT_STAT;
+ timer = vcpu_ptimer(vcpu);
+ kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
break;
case KVM_REG_ARM_PTIMER_CVAL:
- ptimer->cnt_cval = value;
+ timer = vcpu_ptimer(vcpu);
+ kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
break;
default:
return -1;
}
- kvm_timer_update_state(vcpu);
return 0;
}
@@ -679,26 +784,113 @@ static u64 read_timer_ctl(struct arch_timer_context *timer)
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
{
- struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
- struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
-
switch (regid) {
case KVM_REG_ARM_TIMER_CTL:
- return read_timer_ctl(vtimer);
+ return kvm_arm_timer_read(vcpu,
+ vcpu_vtimer(vcpu), TIMER_REG_CTL);
case KVM_REG_ARM_TIMER_CNT:
- return kvm_phys_timer_read() - vtimer->cntvoff;
+ return kvm_arm_timer_read(vcpu,
+ vcpu_vtimer(vcpu), TIMER_REG_CNT);
case KVM_REG_ARM_TIMER_CVAL:
- return vtimer->cnt_cval;
+ return kvm_arm_timer_read(vcpu,
+ vcpu_vtimer(vcpu), TIMER_REG_CVAL);
case KVM_REG_ARM_PTIMER_CTL:
- return read_timer_ctl(ptimer);
- case KVM_REG_ARM_PTIMER_CVAL:
- return ptimer->cnt_cval;
+ return kvm_arm_timer_read(vcpu,
+ vcpu_ptimer(vcpu), TIMER_REG_CTL);
case KVM_REG_ARM_PTIMER_CNT:
- return kvm_phys_timer_read();
+ return kvm_arm_timer_read(vcpu,
+ vcpu_vtimer(vcpu), TIMER_REG_CNT);
+ case KVM_REG_ARM_PTIMER_CVAL:
+ return kvm_arm_timer_read(vcpu,
+ vcpu_ptimer(vcpu), TIMER_REG_CVAL);
}
return (u64)-1;
}
+static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
+ struct arch_timer_context *timer,
+ enum kvm_arch_timer_regs treg)
+{
+ u64 val;
+
+ switch (treg) {
+ case TIMER_REG_TVAL:
+ val = timer->cnt_cval - kvm_phys_timer_read() + timer->cntvoff;
+ break;
+
+ case TIMER_REG_CTL:
+ val = read_timer_ctl(timer);
+ break;
+
+ case TIMER_REG_CVAL:
+ val = timer->cnt_cval;
+ break;
+
+ case TIMER_REG_CNT:
+ val = kvm_phys_timer_read() - timer->cntvoff;
+ break;
+
+ default:
+ BUG();
+ }
+
+ return val;
+}
+
+u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
+ enum kvm_arch_timers tmr,
+ enum kvm_arch_timer_regs treg)
+{
+ u64 val;
+
+ preempt_disable();
+ kvm_timer_vcpu_put(vcpu);
+
+ val = kvm_arm_timer_read(vcpu, vcpu_get_timer(vcpu, tmr), treg);
+
+ kvm_timer_vcpu_load(vcpu);
+ preempt_enable();
+
+ return val;
+}
+
+static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
+ struct arch_timer_context *timer,
+ enum kvm_arch_timer_regs treg,
+ u64 val)
+{
+ switch (treg) {
+ case TIMER_REG_TVAL:
+ timer->cnt_cval = kvm_phys_timer_read() - timer->cntvoff + val;
+ break;
+
+ case TIMER_REG_CTL:
+ timer->cnt_ctl = val & ~ARCH_TIMER_CTRL_IT_STAT;
+ break;
+
+ case TIMER_REG_CVAL:
+ timer->cnt_cval = val;
+ break;
+
+ default:
+ BUG();
+ }
+}
+
+void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
+ enum kvm_arch_timers tmr,
+ enum kvm_arch_timer_regs treg,
+ u64 val)
+{
+ preempt_disable();
+ kvm_timer_vcpu_put(vcpu);
+
+ kvm_arm_timer_write(vcpu, vcpu_get_timer(vcpu, tmr), treg, val);
+
+ kvm_timer_vcpu_load(vcpu);
+ preempt_enable();
+}
+
static int kvm_timer_starting_cpu(unsigned int cpu)
{
kvm_timer_init_interrupt(NULL);
@@ -724,6 +916,8 @@ int kvm_timer_hyp_init(bool has_gic)
return -ENODEV;
}
+ /* First, do the virtual EL1 timer irq */
+
if (info->virtual_irq <= 0) {
kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n",
info->virtual_irq);
@@ -734,15 +928,15 @@ int kvm_timer_hyp_init(bool has_gic)
host_vtimer_irq_flags = irq_get_trigger_type(host_vtimer_irq);
if (host_vtimer_irq_flags != IRQF_TRIGGER_HIGH &&
host_vtimer_irq_flags != IRQF_TRIGGER_LOW) {
- kvm_err("Invalid trigger for IRQ%d, assuming level low\n",
+ kvm_err("Invalid trigger for vtimer IRQ%d, assuming level low\n",
host_vtimer_irq);
host_vtimer_irq_flags = IRQF_TRIGGER_LOW;
}
err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
- "kvm guest timer", kvm_get_running_vcpus());
+ "kvm guest vtimer", kvm_get_running_vcpus());
if (err) {
- kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n",
+ kvm_err("kvm_arch_timer: can't request vtimer interrupt %d (%d)\n",
host_vtimer_irq, err);
return err;
}
@@ -760,6 +954,43 @@ int kvm_timer_hyp_init(bool has_gic)
kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq);
+ /* Now let's do the physical EL1 timer irq */
+
+ if (info->physical_irq > 0) {
+ host_ptimer_irq = info->physical_irq;
+ host_ptimer_irq_flags = irq_get_trigger_type(host_ptimer_irq);
+ if (host_ptimer_irq_flags != IRQF_TRIGGER_HIGH &&
+ host_ptimer_irq_flags != IRQF_TRIGGER_LOW) {
+ kvm_err("Invalid trigger for ptimer IRQ%d, assuming level low\n",
+ host_ptimer_irq);
+ host_ptimer_irq_flags = IRQF_TRIGGER_LOW;
+ }
+
+ err = request_percpu_irq(host_ptimer_irq, kvm_arch_timer_handler,
+ "kvm guest ptimer", kvm_get_running_vcpus());
+ if (err) {
+ kvm_err("kvm_arch_timer: can't request ptimer interrupt %d (%d)\n",
+ host_ptimer_irq, err);
+ return err;
+ }
+
+ if (has_gic) {
+ err = irq_set_vcpu_affinity(host_ptimer_irq,
+ kvm_get_running_vcpus());
+ if (err) {
+ kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
+ goto out_free_irq;
+ }
+ }
+
+ kvm_debug("physical timer IRQ%d\n", host_ptimer_irq);
+ } else if (has_vhe()) {
+ kvm_err("kvm_arch_timer: invalid physical timer IRQ: %d\n",
+ info->physical_irq);
+ err = -ENODEV;
+ goto out_free_irq;
+ }
+
cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING,
"kvm/arm/timer:starting", kvm_timer_starting_cpu,
kvm_timer_dying_cpu);
@@ -771,7 +1002,7 @@ out_free_irq:
void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
{
- struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ struct arch_timer_cpu *timer = vcpu_timer(vcpu);
soft_timer_cancel(&timer->bg_timer);
}
@@ -807,16 +1038,18 @@ bool kvm_arch_timer_get_input_level(int vintid)
if (vintid == vcpu_vtimer(vcpu)->irq.irq)
timer = vcpu_vtimer(vcpu);
+ else if (vintid == vcpu_ptimer(vcpu)->irq.irq)
+ timer = vcpu_ptimer(vcpu);
else
- BUG(); /* We only map the vtimer so far */
+ BUG();
return kvm_timer_should_fire(timer);
}
int kvm_timer_enable(struct kvm_vcpu *vcpu)
{
- struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
- struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
+ struct arch_timer_cpu *timer = vcpu_timer(vcpu);
+ struct timer_map map;
int ret;
if (timer->enabled)
@@ -834,19 +1067,33 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
return -EINVAL;
}
- ret = kvm_vgic_map_phys_irq(vcpu, host_vtimer_irq, vtimer->irq.irq,
+ get_timer_map(vcpu, &map);
+
+ ret = kvm_vgic_map_phys_irq(vcpu,
+ map.direct_vtimer->host_timer_irq,
+ map.direct_vtimer->irq.irq,
kvm_arch_timer_get_input_level);
if (ret)
return ret;
+ if (map.direct_ptimer) {
+ ret = kvm_vgic_map_phys_irq(vcpu,
+ map.direct_ptimer->host_timer_irq,
+ map.direct_ptimer->irq.irq,
+ kvm_arch_timer_get_input_level);
+ }
+
+ if (ret)
+ return ret;
+
no_vgic:
timer->enabled = 1;
return 0;
}
/*
- * On VHE system, we only need to configure trap on physical timer and counter
- * accesses in EL0 and EL1 once, not for every world switch.
+ * On VHE system, we only need to configure the EL2 timer trap register once,
+ * not for every world switch.
* The host kernel runs at EL2 with HCR_EL2.TGE == 1,
* and this makes those bits have no effect for the host kernel execution.
*/
@@ -857,11 +1104,11 @@ void kvm_timer_init_vhe(void)
u64 val;
/*
- * Disallow physical timer access for the guest.
- * Physical counter access is allowed.
+ * VHE systems allow the guest direct access to the EL1 physical
+ * timer/counter.
*/
val = read_sysreg(cnthctl_el2);
- val &= ~(CNTHCTL_EL1PCEN << cnthctl_shift);
+ val |= (CNTHCTL_EL1PCEN << cnthctl_shift);
val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
write_sysreg(val, cnthctl_el2);
}
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 9c486fad3f9f..f412ebc90610 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -65,7 +65,6 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
/* The VMID used in the VTTBR */
static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
static u32 kvm_next_vmid;
-static unsigned int kvm_vmid_bits __read_mostly;
static DEFINE_SPINLOCK(kvm_vmid_lock);
static bool vgic_present;
@@ -142,7 +141,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm_vgic_early_init(kvm);
/* Mark the initial VMID generation invalid */
- kvm->arch.vmid_gen = 0;
+ kvm->arch.vmid.vmid_gen = 0;
/* The maximum number of VCPUs is limited by the host's GIC model */
kvm->arch.max_vcpus = vgic_present ?
@@ -336,13 +335,11 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
{
- kvm_timer_schedule(vcpu);
kvm_vgic_v4_enable_doorbell(vcpu);
}
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
- kvm_timer_unschedule(vcpu);
kvm_vgic_v4_disable_doorbell(vcpu);
}
@@ -472,37 +469,31 @@ void force_vm_exit(const cpumask_t *mask)
/**
* need_new_vmid_gen - check that the VMID is still valid
- * @kvm: The VM's VMID to check
+ * @vmid: The VMID to check
*
* return true if there is a new generation of VMIDs being used
*
- * The hardware supports only 256 values with the value zero reserved for the
- * host, so we check if an assigned value belongs to a previous generation,
- * which which requires us to assign a new value. If we're the first to use a
- * VMID for the new generation, we must flush necessary caches and TLBs on all
- * CPUs.
+ * The hardware supports a limited set of values with the value zero reserved
+ * for the host, so we check if an assigned value belongs to a previous
+ * generation, which which requires us to assign a new value. If we're the
+ * first to use a VMID for the new generation, we must flush necessary caches
+ * and TLBs on all CPUs.
*/
-static bool need_new_vmid_gen(struct kvm *kvm)
+static bool need_new_vmid_gen(struct kvm_vmid *vmid)
{
u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen);
smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */
- return unlikely(READ_ONCE(kvm->arch.vmid_gen) != current_vmid_gen);
+ return unlikely(READ_ONCE(vmid->vmid_gen) != current_vmid_gen);
}
/**
- * update_vttbr - Update the VTTBR with a valid VMID before the guest runs
- * @kvm The guest that we are about to run
- *
- * Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the
- * VM has a valid VMID, otherwise assigns a new one and flushes corresponding
- * caches and TLBs.
+ * update_vmid - Update the vmid with a valid VMID for the current generation
+ * @kvm: The guest that struct vmid belongs to
+ * @vmid: The stage-2 VMID information struct
*/
-static void update_vttbr(struct kvm *kvm)
+static void update_vmid(struct kvm_vmid *vmid)
{
- phys_addr_t pgd_phys;
- u64 vmid, cnp = kvm_cpu_has_cnp() ? VTTBR_CNP_BIT : 0;
-
- if (!need_new_vmid_gen(kvm))
+ if (!need_new_vmid_gen(vmid))
return;
spin_lock(&kvm_vmid_lock);
@@ -512,7 +503,7 @@ static void update_vttbr(struct kvm *kvm)
* already allocated a valid vmid for this vm, then this vcpu should
* use the same vmid.
*/
- if (!need_new_vmid_gen(kvm)) {
+ if (!need_new_vmid_gen(vmid)) {
spin_unlock(&kvm_vmid_lock);
return;
}
@@ -536,18 +527,12 @@ static void update_vttbr(struct kvm *kvm)
kvm_call_hyp(__kvm_flush_vm_context);
}
- kvm->arch.vmid = kvm_next_vmid;
+ vmid->vmid = kvm_next_vmid;
kvm_next_vmid++;
- kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
-
- /* update vttbr to be used with the new vmid */
- pgd_phys = virt_to_phys(kvm->arch.pgd);
- BUG_ON(pgd_phys & ~kvm_vttbr_baddr_mask(kvm));
- vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
- kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid | cnp;
+ kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1;
smp_wmb();
- WRITE_ONCE(kvm->arch.vmid_gen, atomic64_read(&kvm_vmid_gen));
+ WRITE_ONCE(vmid->vmid_gen, atomic64_read(&kvm_vmid_gen));
spin_unlock(&kvm_vmid_lock);
}
@@ -700,7 +685,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
*/
cond_resched();
- update_vttbr(vcpu->kvm);
+ update_vmid(&vcpu->kvm->arch.vmid);
check_vcpu_requests(vcpu);
@@ -749,7 +734,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
*/
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
- if (ret <= 0 || need_new_vmid_gen(vcpu->kvm) ||
+ if (ret <= 0 || need_new_vmid_gen(&vcpu->kvm->arch.vmid) ||
kvm_request_pending(vcpu)) {
vcpu->mode = OUTSIDE_GUEST_MODE;
isb(); /* Ensure work in x_flush_hwstate is committed */
@@ -775,7 +760,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
ret = kvm_vcpu_run_vhe(vcpu);
kvm_arm_vhe_guest_exit();
} else {
- ret = kvm_call_hyp(__kvm_vcpu_run_nvhe, vcpu);
+ ret = kvm_call_hyp_ret(__kvm_vcpu_run_nvhe, vcpu);
}
vcpu->mode = OUTSIDE_GUEST_MODE;
@@ -949,7 +934,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
const struct kvm_vcpu_init *init)
{
- unsigned int i;
+ unsigned int i, ret;
int phys_target = kvm_target_cpu();
if (init->target != phys_target)
@@ -984,9 +969,14 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
vcpu->arch.target = phys_target;
/* Now we know what it is, we can reset it. */
- return kvm_reset_vcpu(vcpu);
-}
+ ret = kvm_reset_vcpu(vcpu);
+ if (ret) {
+ vcpu->arch.target = -1;
+ bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
+ }
+ return ret;
+}
static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
struct kvm_vcpu_init *init)
@@ -1427,10 +1417,6 @@ static inline void hyp_cpu_pm_exit(void)
static int init_common_resources(void)
{
- /* set size of VMID supported by CPU */
- kvm_vmid_bits = kvm_get_vmid_bits();
- kvm_info("%d-bit VMID\n", kvm_vmid_bits);
-
kvm_set_ipa_limit();
return 0;
@@ -1571,6 +1557,7 @@ static int init_hyp_mode(void)
kvm_cpu_context_t *cpu_ctxt;
cpu_ctxt = per_cpu_ptr(&kvm_host_cpu_state, cpu);
+ kvm_init_host_cpu_context(cpu_ctxt, cpu);
err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP);
if (err) {
@@ -1581,7 +1568,7 @@ static int init_hyp_mode(void)
err = hyp_map_aux_data();
if (err)
- kvm_err("Cannot map host auxilary data: %d\n", err);
+ kvm_err("Cannot map host auxiliary data: %d\n", err);
return 0;
diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c
index 9652c453480f..370bd6c5e6cb 100644
--- a/virt/kvm/arm/hyp/vgic-v3-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v3-sr.c
@@ -222,11 +222,11 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
}
}
- if (used_lrs) {
+ if (used_lrs || cpu_if->its_vpe.its_vm) {
int i;
u32 elrsr;
- elrsr = read_gicreg(ICH_ELSR_EL2);
+ elrsr = read_gicreg(ICH_ELRSR_EL2);
write_gicreg(cpu_if->vgic_hcr & ~ICH_HCR_EN, ICH_HCR_EL2);
@@ -247,7 +247,7 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
int i;
- if (used_lrs) {
+ if (used_lrs || cpu_if->its_vpe.its_vm) {
write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
for (i = 0; i < used_lrs; i++)
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index e9d28a7ca673..74b6582eaa3c 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -102,8 +102,7 @@ static bool kvm_is_device_pfn(unsigned long pfn)
* @addr: IPA
* @pmd: pmd pointer for IPA
*
- * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs. Marks all
- * pages in the range dirty.
+ * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs.
*/
static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
{
@@ -121,8 +120,7 @@ static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
* @addr: IPA
* @pud: pud pointer for IPA
*
- * Function clears a PUD entry, flushes addr 1st and 2nd stage TLBs. Marks all
- * pages in the range dirty.
+ * Function clears a PUD entry, flushes addr 1st and 2nd stage TLBs.
*/
static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp)
{
@@ -191,7 +189,7 @@ static void clear_stage2_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr
VM_BUG_ON(pmd_thp_or_huge(*pmd));
pmd_clear(pmd);
kvm_tlb_flush_vmid_ipa(kvm, addr);
- pte_free_kernel(NULL, pte_table);
+ free_page((unsigned long)pte_table);
put_page(virt_to_page(pmd));
}
@@ -899,15 +897,15 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
* kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
* @kvm: The KVM struct pointer for the VM.
*
- * Allocates only the stage-2 HW PGD level table(s) (can support either full
- * 40-bit input addresses or limited to 32-bit input addresses). Clears the
- * allocated pages.
+ * Allocates only the stage-2 HW PGD level table(s) of size defined by
+ * stage2_pgd_size(kvm).
*
* Note we don't need locking here as this is only called when the VM is
* created, which can only be done once.
*/
int kvm_alloc_stage2_pgd(struct kvm *kvm)
{
+ phys_addr_t pgd_phys;
pgd_t *pgd;
if (kvm->arch.pgd != NULL) {
@@ -920,7 +918,12 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
if (!pgd)
return -ENOMEM;
+ pgd_phys = virt_to_phys(pgd);
+ if (WARN_ON(pgd_phys & ~kvm_vttbr_baddr_mask(kvm)))
+ return -EINVAL;
+
kvm->arch.pgd = pgd;
+ kvm->arch.pgd_phys = pgd_phys;
return 0;
}
@@ -1008,6 +1011,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
unmap_stage2_range(kvm, 0, kvm_phys_size(kvm));
pgd = READ_ONCE(kvm->arch.pgd);
kvm->arch.pgd = NULL;
+ kvm->arch.pgd_phys = 0;
}
spin_unlock(&kvm->mmu_lock);
@@ -1060,25 +1064,43 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
{
pmd_t *pmd, old_pmd;
+retry:
pmd = stage2_get_pmd(kvm, cache, addr);
VM_BUG_ON(!pmd);
old_pmd = *pmd;
+ /*
+ * Multiple vcpus faulting on the same PMD entry, can
+ * lead to them sequentially updating the PMD with the
+ * same value. Following the break-before-make
+ * (pmd_clear() followed by tlb_flush()) process can
+ * hinder forward progress due to refaults generated
+ * on missing translations.
+ *
+ * Skip updating the page table if the entry is
+ * unchanged.
+ */
+ if (pmd_val(old_pmd) == pmd_val(*new_pmd))
+ return 0;
+
if (pmd_present(old_pmd)) {
/*
- * Multiple vcpus faulting on the same PMD entry, can
- * lead to them sequentially updating the PMD with the
- * same value. Following the break-before-make
- * (pmd_clear() followed by tlb_flush()) process can
- * hinder forward progress due to refaults generated
- * on missing translations.
+ * If we already have PTE level mapping for this block,
+ * we must unmap it to avoid inconsistent TLB state and
+ * leaking the table page. We could end up in this situation
+ * if the memory slot was marked for dirty logging and was
+ * reverted, leaving PTE level mappings for the pages accessed
+ * during the period. So, unmap the PTE level mapping for this
+ * block and retry, as we could have released the upper level
+ * table in the process.
*
- * Skip updating the page table if the entry is
- * unchanged.
+ * Normal THP split/merge follows mmu_notifier callbacks and do
+ * get handled accordingly.
*/
- if (pmd_val(old_pmd) == pmd_val(*new_pmd))
- return 0;
-
+ if (!pmd_thp_or_huge(old_pmd)) {
+ unmap_stage2_range(kvm, addr & S2_PMD_MASK, S2_PMD_SIZE);
+ goto retry;
+ }
/*
* Mapping in huge pages should only happen through a
* fault. If a page is merged into a transparent huge
@@ -1090,8 +1112,7 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
* should become splitting first, unmapped, merged,
* and mapped back in on-demand.
*/
- VM_BUG_ON(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
-
+ WARN_ON_ONCE(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
pmd_clear(pmd);
kvm_tlb_flush_vmid_ipa(kvm, addr);
} else {
@@ -1107,6 +1128,7 @@ static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cac
{
pud_t *pudp, old_pud;
+retry:
pudp = stage2_get_pud(kvm, cache, addr);
VM_BUG_ON(!pudp);
@@ -1114,14 +1136,23 @@ static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cac
/*
* A large number of vcpus faulting on the same stage 2 entry,
- * can lead to a refault due to the
- * stage2_pud_clear()/tlb_flush(). Skip updating the page
- * tables if there is no change.
+ * can lead to a refault due to the stage2_pud_clear()/tlb_flush().
+ * Skip updating the page tables if there is no change.
*/
if (pud_val(old_pud) == pud_val(*new_pudp))
return 0;
if (stage2_pud_present(kvm, old_pud)) {
+ /*
+ * If we already have table level mapping for this block, unmap
+ * the range for this block and retry.
+ */
+ if (!stage2_pud_huge(kvm, old_pud)) {
+ unmap_stage2_range(kvm, addr & S2_PUD_MASK, S2_PUD_SIZE);
+ goto retry;
+ }
+
+ WARN_ON_ONCE(kvm_pud_pfn(old_pud) != kvm_pud_pfn(*new_pudp));
stage2_pud_clear(kvm, pudp);
kvm_tlb_flush_vmid_ipa(kvm, addr);
} else {
@@ -1396,14 +1427,6 @@ static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
return false;
}
-static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
-{
- if (kvm_vcpu_trap_is_iabt(vcpu))
- return false;
-
- return kvm_vcpu_dabt_iswrite(vcpu);
-}
-
/**
* stage2_wp_ptes - write protect PMD range
* @pmd: pointer to pmd entry
@@ -1452,13 +1475,11 @@ static void stage2_wp_pmds(struct kvm *kvm, pud_t *pud,
}
/**
- * stage2_wp_puds - write protect PGD range
- * @pgd: pointer to pgd entry
- * @addr: range start address
- * @end: range end address
- *
- * Process PUD entries, for a huge PUD we cause a panic.
- */
+ * stage2_wp_puds - write protect PGD range
+ * @pgd: pointer to pgd entry
+ * @addr: range start address
+ * @end: range end address
+ */
static void stage2_wp_puds(struct kvm *kvm, pgd_t *pgd,
phys_addr_t addr, phys_addr_t end)
{
@@ -1595,51 +1616,51 @@ static void kvm_send_hwpoison_signal(unsigned long address,
send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
}
-static bool fault_supports_stage2_pmd_mappings(struct kvm_memory_slot *memslot,
- unsigned long hva)
+static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
+ unsigned long hva,
+ unsigned long map_size)
{
- gpa_t gpa_start, gpa_end;
+ gpa_t gpa_start;
hva_t uaddr_start, uaddr_end;
size_t size;
size = memslot->npages * PAGE_SIZE;
gpa_start = memslot->base_gfn << PAGE_SHIFT;
- gpa_end = gpa_start + size;
uaddr_start = memslot->userspace_addr;
uaddr_end = uaddr_start + size;
/*
* Pages belonging to memslots that don't have the same alignment
- * within a PMD for userspace and IPA cannot be mapped with stage-2
- * PMD entries, because we'll end up mapping the wrong pages.
+ * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2
+ * PMD/PUD entries, because we'll end up mapping the wrong pages.
*
* Consider a layout like the following:
*
* memslot->userspace_addr:
* +-----+--------------------+--------------------+---+
- * |abcde|fgh Stage-1 PMD | Stage-1 PMD tv|xyz|
+ * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz|
* +-----+--------------------+--------------------+---+
*
* memslot->base_gfn << PAGE_SIZE:
* +---+--------------------+--------------------+-----+
- * |abc|def Stage-2 PMD | Stage-2 PMD |tvxyz|
+ * |abc|def Stage-2 block | Stage-2 block |tvxyz|
* +---+--------------------+--------------------+-----+
*
- * If we create those stage-2 PMDs, we'll end up with this incorrect
+ * If we create those stage-2 blocks, we'll end up with this incorrect
* mapping:
* d -> f
* e -> g
* f -> h
*/
- if ((gpa_start & ~S2_PMD_MASK) != (uaddr_start & ~S2_PMD_MASK))
+ if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1)))
return false;
/*
* Next, let's make sure we're not trying to map anything not covered
- * by the memslot. This means we have to prohibit PMD size mappings
- * for the beginning and end of a non-PMD aligned and non-PMD sized
+ * by the memslot. This means we have to prohibit block size mappings
+ * for the beginning and end of a non-block aligned and non-block sized
* memory slot (illustrated by the head and tail parts of the
* userspace view above containing pages 'abcde' and 'xyz',
* respectively).
@@ -1648,8 +1669,8 @@ static bool fault_supports_stage2_pmd_mappings(struct kvm_memory_slot *memslot,
* userspace_addr or the base_gfn, as both are equally aligned (per
* the check above) and equally sized.
*/
- return (hva & S2_PMD_MASK) >= uaddr_start &&
- (hva & S2_PMD_MASK) + S2_PMD_SIZE <= uaddr_end;
+ return (hva & ~(map_size - 1)) >= uaddr_start &&
+ (hva & ~(map_size - 1)) + map_size <= uaddr_end;
}
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
@@ -1678,12 +1699,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return -EFAULT;
}
- if (!fault_supports_stage2_pmd_mappings(memslot, hva))
- force_pte = true;
-
- if (logging_active)
- force_pte = true;
-
/* Let's check if we will get back a huge page backed by hugetlbfs */
down_read(&current->mm->mmap_sem);
vma = find_vma_intersection(current->mm, hva, hva + 1);
@@ -1694,6 +1709,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
}
vma_pagesize = vma_kernel_pagesize(vma);
+ if (logging_active ||
+ !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
+ force_pte = true;
+ vma_pagesize = PAGE_SIZE;
+ }
+
/*
* The stage2 has a minimum of 2 level table (For arm64 see
* kvm_arm_setup_stage2()). Hence, we are guaranteed that we can
@@ -1701,11 +1722,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* As for PUD huge maps, we must make sure that we have at least
* 3 levels, i.e, PMD is not folded.
*/
- if ((vma_pagesize == PMD_SIZE ||
- (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) &&
- !force_pte) {
+ if (vma_pagesize == PMD_SIZE ||
+ (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
- }
up_read(&current->mm->mmap_sem);
/* We need minimum second+third level pages */
@@ -1762,8 +1781,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* Only PMD_SIZE transparent hugepages(THP) are
* currently supported. This code will need to be
* updated to support other THP sizes.
+ *
+ * Make sure the host VA and the guest IPA are sufficiently
+ * aligned and that the block is contained within the memslot.
*/
- if (transparent_hugepage_adjust(&pfn, &fault_ipa))
+ if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) &&
+ transparent_hugepage_adjust(&pfn, &fault_ipa))
vma_pagesize = PMD_SIZE;
}
@@ -2353,7 +2376,7 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
return 0;
}
-void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
+void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
{
}
diff --git a/virt/kvm/arm/trace.h b/virt/kvm/arm/trace.h
index 3828beab93f2..204d210d01c2 100644
--- a/virt/kvm/arm/trace.h
+++ b/virt/kvm/arm/trace.h
@@ -2,6 +2,7 @@
#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_KVM_H
+#include <kvm/arm_arch_timer.h>
#include <linux/tracepoint.h>
#undef TRACE_SYSTEM
@@ -262,10 +263,114 @@ TRACE_EVENT(kvm_timer_update_irq,
__entry->vcpu_id, __entry->irq, __entry->level)
);
+TRACE_EVENT(kvm_get_timer_map,
+ TP_PROTO(unsigned long vcpu_id, struct timer_map *map),
+ TP_ARGS(vcpu_id, map),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, vcpu_id )
+ __field( int, direct_vtimer )
+ __field( int, direct_ptimer )
+ __field( int, emul_ptimer )
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu_id;
+ __entry->direct_vtimer = arch_timer_ctx_index(map->direct_vtimer);
+ __entry->direct_ptimer =
+ (map->direct_ptimer) ? arch_timer_ctx_index(map->direct_ptimer) : -1;
+ __entry->emul_ptimer =
+ (map->emul_ptimer) ? arch_timer_ctx_index(map->emul_ptimer) : -1;
+ ),
+
+ TP_printk("VCPU: %ld, dv: %d, dp: %d, ep: %d",
+ __entry->vcpu_id,
+ __entry->direct_vtimer,
+ __entry->direct_ptimer,
+ __entry->emul_ptimer)
+);
+
+TRACE_EVENT(kvm_timer_save_state,
+ TP_PROTO(struct arch_timer_context *ctx),
+ TP_ARGS(ctx),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, ctl )
+ __field( unsigned long long, cval )
+ __field( int, timer_idx )
+ ),
+
+ TP_fast_assign(
+ __entry->ctl = ctx->cnt_ctl;
+ __entry->cval = ctx->cnt_cval;
+ __entry->timer_idx = arch_timer_ctx_index(ctx);
+ ),
+
+ TP_printk(" CTL: %#08lx CVAL: %#16llx arch_timer_ctx_index: %d",
+ __entry->ctl,
+ __entry->cval,
+ __entry->timer_idx)
+);
+
+TRACE_EVENT(kvm_timer_restore_state,
+ TP_PROTO(struct arch_timer_context *ctx),
+ TP_ARGS(ctx),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, ctl )
+ __field( unsigned long long, cval )
+ __field( int, timer_idx )
+ ),
+
+ TP_fast_assign(
+ __entry->ctl = ctx->cnt_ctl;
+ __entry->cval = ctx->cnt_cval;
+ __entry->timer_idx = arch_timer_ctx_index(ctx);
+ ),
+
+ TP_printk("CTL: %#08lx CVAL: %#16llx arch_timer_ctx_index: %d",
+ __entry->ctl,
+ __entry->cval,
+ __entry->timer_idx)
+);
+
+TRACE_EVENT(kvm_timer_hrtimer_expire,
+ TP_PROTO(struct arch_timer_context *ctx),
+ TP_ARGS(ctx),
+
+ TP_STRUCT__entry(
+ __field( int, timer_idx )
+ ),
+
+ TP_fast_assign(
+ __entry->timer_idx = arch_timer_ctx_index(ctx);
+ ),
+
+ TP_printk("arch_timer_ctx_index: %d", __entry->timer_idx)
+);
+
+TRACE_EVENT(kvm_timer_emulate,
+ TP_PROTO(struct arch_timer_context *ctx, bool should_fire),
+ TP_ARGS(ctx, should_fire),
+
+ TP_STRUCT__entry(
+ __field( int, timer_idx )
+ __field( bool, should_fire )
+ ),
+
+ TP_fast_assign(
+ __entry->timer_idx = arch_timer_ctx_index(ctx);
+ __entry->should_fire = should_fire;
+ ),
+
+ TP_printk("arch_timer_ctx_index: %d (should_fire: %d)",
+ __entry->timer_idx, __entry->should_fire)
+);
+
#endif /* _TRACE_KVM_H */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH ../../../virt/kvm/arm
+#define TRACE_INCLUDE_PATH ../../virt/kvm/arm
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index ab3f47745d9c..44ceaccb18cf 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -754,8 +754,9 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
u64 indirect_ptr, type = GITS_BASER_TYPE(baser);
phys_addr_t base = GITS_BASER_ADDR_48_to_52(baser);
int esz = GITS_BASER_ENTRY_SIZE(baser);
- int index;
+ int index, idx;
gfn_t gfn;
+ bool ret;
switch (type) {
case GITS_BASER_TYPE_DEVICE:
@@ -782,7 +783,8 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
if (eaddr)
*eaddr = addr;
- return kvm_is_visible_gfn(its->dev->kvm, gfn);
+
+ goto out;
}
/* calculate and check the index into the 1st level */
@@ -812,7 +814,12 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
if (eaddr)
*eaddr = indirect_ptr;
- return kvm_is_visible_gfn(its->dev->kvm, gfn);
+
+out:
+ idx = srcu_read_lock(&its->dev->kvm->srcu);
+ ret = kvm_is_visible_gfn(its->dev->kvm, gfn);
+ srcu_read_unlock(&its->dev->kvm->srcu, idx);
+ return ret;
}
static int vgic_its_alloc_collection(struct vgic_its *its,
@@ -1729,8 +1736,8 @@ static void vgic_its_destroy(struct kvm_device *kvm_dev)
kfree(its);
}
-int vgic_its_has_attr_regs(struct kvm_device *dev,
- struct kvm_device_attr *attr)
+static int vgic_its_has_attr_regs(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
{
const struct vgic_register_region *region;
gpa_t offset = attr->attr;
@@ -1750,9 +1757,9 @@ int vgic_its_has_attr_regs(struct kvm_device *dev,
return 0;
}
-int vgic_its_attr_regs_access(struct kvm_device *dev,
- struct kvm_device_attr *attr,
- u64 *reg, bool is_write)
+static int vgic_its_attr_regs_access(struct kvm_device *dev,
+ struct kvm_device_attr *attr,
+ u64 *reg, bool is_write)
{
const struct vgic_register_region *region;
struct vgic_its *its;
@@ -1919,7 +1926,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) |
ite->collection->collection_id;
val = cpu_to_le64(val);
- return kvm_write_guest(kvm, gpa, &val, ite_esz);
+ return kvm_write_guest_lock(kvm, gpa, &val, ite_esz);
}
/**
@@ -2066,7 +2073,7 @@ static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
(itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) |
(dev->num_eventid_bits - 1));
val = cpu_to_le64(val);
- return kvm_write_guest(kvm, ptr, &val, dte_esz);
+ return kvm_write_guest_lock(kvm, ptr, &val, dte_esz);
}
/**
@@ -2246,7 +2253,7 @@ static int vgic_its_save_cte(struct vgic_its *its,
((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) |
collection->collection_id);
val = cpu_to_le64(val);
- return kvm_write_guest(its->dev->kvm, gpa, &val, esz);
+ return kvm_write_guest_lock(its->dev->kvm, gpa, &val, esz);
}
static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
@@ -2317,7 +2324,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
*/
val = 0;
BUG_ON(cte_esz > sizeof(val));
- ret = kvm_write_guest(its->dev->kvm, gpa, &val, cte_esz);
+ ret = kvm_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
return ret;
}
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index 4a12322bf7df..9f4843fe9cda 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -200,6 +200,9 @@ static void vgic_mmio_write_v3r_ctlr(struct kvm_vcpu *vcpu,
vgic_cpu->lpis_enabled = val & GICR_CTLR_ENABLE_LPIS;
+ if (was_enabled && !vgic_cpu->lpis_enabled)
+ vgic_flush_pending_lpis(vcpu);
+
if (!was_enabled && vgic_cpu->lpis_enabled)
vgic_enable_lpis(vcpu);
}
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 4ee0aeb9a905..9f87e58dbd4a 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -358,7 +358,7 @@ retry:
if (status) {
/* clear consumed data */
val &= ~(1 << bit_nr);
- ret = kvm_write_guest(kvm, ptr, &val, 1);
+ ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
if (ret)
return ret;
}
@@ -409,7 +409,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
else
val &= ~(1 << bit_nr);
- ret = kvm_write_guest(kvm, ptr, &val, 1);
+ ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
if (ret)
return ret;
}
@@ -589,7 +589,7 @@ early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
*/
int vgic_v3_probe(const struct gic_kvm_info *info)
{
- u32 ich_vtr_el2 = kvm_call_hyp(__vgic_v3_get_ich_vtr_el2);
+ u32 ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_ich_vtr_el2);
int ret;
/*
@@ -679,7 +679,7 @@ void vgic_v3_put(struct kvm_vcpu *vcpu)
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
if (likely(cpu_if->vgic_sre))
- cpu_if->vgic_vmcr = kvm_call_hyp(__vgic_v3_read_vmcr);
+ cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr);
kvm_call_hyp(__vgic_v3_save_aprs, vcpu);
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index abd9c7352677..191deccf60bf 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -151,6 +151,27 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
kfree(irq);
}
+void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu)
+{
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ struct vgic_irq *irq, *tmp;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
+
+ list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
+ if (irq->intid >= VGIC_MIN_LPI) {
+ raw_spin_lock(&irq->irq_lock);
+ list_del(&irq->ap_list);
+ irq->vcpu = NULL;
+ raw_spin_unlock(&irq->irq_lock);
+ vgic_put_irq(vcpu->kvm, irq);
+ }
+ }
+
+ raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
+}
+
void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending)
{
WARN_ON(irq_set_irqchip_state(irq->host_irq,
@@ -867,15 +888,21 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
* either observe the new interrupt before or after doing this check,
* and introducing additional synchronization mechanism doesn't change
* this.
+ *
+ * Note that we still need to go through the whole thing if anything
+ * can be directly injected (GICv4).
*/
- if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
+ if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) &&
+ !vgic_supports_direct_msis(vcpu->kvm))
return;
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
- raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
- vgic_flush_lr_state(vcpu);
- raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
+ if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) {
+ raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
+ vgic_flush_lr_state(vcpu);
+ raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
+ }
if (can_access_vgic_from_kernel())
vgic_restore_state(vcpu);
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index a90024718ca4..abeeffabc456 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -238,6 +238,7 @@ void vgic_v3_put(struct kvm_vcpu *vcpu);
bool vgic_has_its(struct kvm *kvm);
int kvm_vgic_register_its_device(void);
void vgic_enable_lpis(struct kvm_vcpu *vcpu);
+void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu);
int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi);
int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr);
int vgic_v3_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 6855cce3e528..5294abb3f178 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -144,7 +144,8 @@ int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
if (zone->pio != 1 && zone->pio != 0)
return -EINVAL;
- dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
+ dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev),
+ GFP_KERNEL_ACCOUNT);
if (!dev)
return -ENOMEM;
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index b20b751286fc..001aeda4c154 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -214,9 +214,9 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
if (flags & EPOLLHUP) {
/* The eventfd is closing, detach from KVM */
- unsigned long flags;
+ unsigned long iflags;
- spin_lock_irqsave(&kvm->irqfds.lock, flags);
+ spin_lock_irqsave(&kvm->irqfds.lock, iflags);
/*
* We must check if someone deactivated the irqfd before
@@ -230,7 +230,7 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
if (irqfd_is_active(irqfd))
irqfd_deactivate(irqfd);
- spin_unlock_irqrestore(&kvm->irqfds.lock, flags);
+ spin_unlock_irqrestore(&kvm->irqfds.lock, iflags);
}
return 0;
@@ -297,7 +297,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
if (!kvm_arch_intc_initialized(kvm))
return -EAGAIN;
- irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
+ irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL_ACCOUNT);
if (!irqfd)
return -ENOMEM;
@@ -345,7 +345,8 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
}
if (!irqfd->resampler) {
- resampler = kzalloc(sizeof(*resampler), GFP_KERNEL);
+ resampler = kzalloc(sizeof(*resampler),
+ GFP_KERNEL_ACCOUNT);
if (!resampler) {
ret = -ENOMEM;
mutex_unlock(&kvm->irqfds.resampler_lock);
@@ -797,7 +798,7 @@ static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
if (IS_ERR(eventfd))
return PTR_ERR(eventfd);
- p = kzalloc(sizeof(*p), GFP_KERNEL);
+ p = kzalloc(sizeof(*p), GFP_KERNEL_ACCOUNT);
if (!p) {
ret = -ENOMEM;
goto fail;
diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c
index b1286c4e0712..79e59e4fa3dc 100644
--- a/virt/kvm/irqchip.c
+++ b/virt/kvm/irqchip.c
@@ -144,18 +144,19 @@ static int setup_routing_entry(struct kvm *kvm,
{
struct kvm_kernel_irq_routing_entry *ei;
int r;
+ u32 gsi = array_index_nospec(ue->gsi, KVM_MAX_IRQ_ROUTES);
/*
* Do not allow GSI to be mapped to the same irqchip more than once.
* Allow only one to one mapping between GSI and non-irqchip routing.
*/
- hlist_for_each_entry(ei, &rt->map[ue->gsi], link)
+ hlist_for_each_entry(ei, &rt->map[gsi], link)
if (ei->type != KVM_IRQ_ROUTING_IRQCHIP ||
ue->type != KVM_IRQ_ROUTING_IRQCHIP ||
ue->u.irqchip.irqchip == ei->irqchip.irqchip)
return -EINVAL;
- e->gsi = ue->gsi;
+ e->gsi = gsi;
e->type = ue->type;
r = kvm_set_routing_entry(kvm, e, ue);
if (r)
@@ -196,7 +197,7 @@ int kvm_set_irq_routing(struct kvm *kvm,
nr_rt_entries += 1;
new = kzalloc(sizeof(*new) + (nr_rt_entries * sizeof(struct hlist_head)),
- GFP_KERNEL);
+ GFP_KERNEL_ACCOUNT);
if (!new)
return -ENOMEM;
@@ -208,7 +209,7 @@ int kvm_set_irq_routing(struct kvm *kvm,
for (i = 0; i < nr; ++i) {
r = -ENOMEM;
- e = kzalloc(sizeof(*e), GFP_KERNEL);
+ e = kzalloc(sizeof(*e), GFP_KERNEL_ACCOUNT);
if (!e)
goto out;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index d237d3350a99..a704d1f9bd96 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -81,6 +81,11 @@ unsigned int halt_poll_ns_grow = 2;
module_param(halt_poll_ns_grow, uint, 0644);
EXPORT_SYMBOL_GPL(halt_poll_ns_grow);
+/* The start value to grow halt_poll_ns from */
+unsigned int halt_poll_ns_grow_start = 10000; /* 10us */
+module_param(halt_poll_ns_grow_start, uint, 0644);
+EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start);
+
/* Default resets per-vcpu halt_poll_ns . */
unsigned int halt_poll_ns_shrink;
module_param(halt_poll_ns_shrink, uint, 0644);
@@ -525,7 +530,7 @@ static struct kvm_memslots *kvm_alloc_memslots(void)
int i;
struct kvm_memslots *slots;
- slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+ slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT);
if (!slots)
return NULL;
@@ -601,12 +606,12 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
sizeof(*kvm->debugfs_stat_data),
- GFP_KERNEL);
+ GFP_KERNEL_ACCOUNT);
if (!kvm->debugfs_stat_data)
return -ENOMEM;
for (p = debugfs_entries; p->name; p++) {
- stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL);
+ stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
if (!stat_data)
return -ENOMEM;
@@ -656,12 +661,8 @@ static struct kvm *kvm_create_vm(unsigned long type)
struct kvm_memslots *slots = kvm_alloc_memslots();
if (!slots)
goto out_err_no_srcu;
- /*
- * Generations must be different for each address space.
- * Init kvm generation close to the maximum to easily test the
- * code of handling generation number wrap-around.
- */
- slots->generation = i * 2 - 150;
+ /* Generations must be different for each address space. */
+ slots->generation = i;
rcu_assign_pointer(kvm->memslots[i], slots);
}
@@ -671,7 +672,7 @@ static struct kvm *kvm_create_vm(unsigned long type)
goto out_err_no_irq_srcu;
for (i = 0; i < KVM_NR_BUSES; i++) {
rcu_assign_pointer(kvm->buses[i],
- kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL));
+ kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT));
if (!kvm->buses[i])
goto out_err;
}
@@ -789,7 +790,7 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
{
unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
- memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL);
+ memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL_ACCOUNT);
if (!memslot->dirty_bitmap)
return -ENOMEM;
@@ -874,31 +875,34 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
int as_id, struct kvm_memslots *slots)
{
struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id);
+ u64 gen = old_memslots->generation;
- /*
- * Set the low bit in the generation, which disables SPTE caching
- * until the end of synchronize_srcu_expedited.
- */
- WARN_ON(old_memslots->generation & 1);
- slots->generation = old_memslots->generation + 1;
+ WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
+ slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
rcu_assign_pointer(kvm->memslots[as_id], slots);
synchronize_srcu_expedited(&kvm->srcu);
/*
- * Increment the new memslot generation a second time. This prevents
- * vm exits that race with memslot updates from caching a memslot
- * generation that will (potentially) be valid forever.
- *
+ * Increment the new memslot generation a second time, dropping the
+ * update in-progress flag and incrementing then generation based on
+ * the number of address spaces. This provides a unique and easily
+ * identifiable generation number while the memslots are in flux.
+ */
+ gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
+
+ /*
* Generations must be unique even across address spaces. We do not need
* a global counter for that, instead the generation space is evenly split
* across address spaces. For example, with two address spaces, address
- * space 0 will use generations 0, 4, 8, ... while * address space 1 will
- * use generations 2, 6, 10, 14, ...
+ * space 0 will use generations 0, 2, 4, ... while address space 1 will
+ * use generations 1, 3, 5, ...
*/
- slots->generation += KVM_ADDRESS_SPACE_NUM * 2 - 1;
+ gen += KVM_ADDRESS_SPACE_NUM;
- kvm_arch_memslots_updated(kvm, slots);
+ kvm_arch_memslots_updated(kvm, gen);
+
+ slots->generation = gen;
return old_memslots;
}
@@ -1018,7 +1022,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
goto out_free;
}
- slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+ slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT);
if (!slots)
goto out_free;
memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots));
@@ -1201,11 +1205,9 @@ int kvm_get_dirty_log_protect(struct kvm *kvm,
mask = xchg(&dirty_bitmap[i], 0);
dirty_bitmap_buffer[i] = mask;
- if (mask) {
- offset = i * BITS_PER_LONG;
- kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
- offset, mask);
- }
+ offset = i * BITS_PER_LONG;
+ kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
+ offset, mask);
}
spin_unlock(&kvm->mmu_lock);
}
@@ -1238,7 +1240,7 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm,
if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
return -EINVAL;
- if ((log->first_page & 63) || (log->num_pages & 63))
+ if (log->first_page & 63)
return -EINVAL;
slots = __kvm_memslots(kvm, as_id);
@@ -1251,8 +1253,9 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm,
n = kvm_dirty_bitmap_bytes(memslot);
if (log->first_page > memslot->npages ||
- log->num_pages > memslot->npages - log->first_page)
- return -EINVAL;
+ log->num_pages > memslot->npages - log->first_page ||
+ (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
+ return -EINVAL;
*flush = false;
dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
@@ -2185,20 +2188,23 @@ void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
{
- unsigned int old, val, grow;
+ unsigned int old, val, grow, grow_start;
old = val = vcpu->halt_poll_ns;
+ grow_start = READ_ONCE(halt_poll_ns_grow_start);
grow = READ_ONCE(halt_poll_ns_grow);
- /* 10us base */
- if (val == 0 && grow)
- val = 10000;
- else
- val *= grow;
+ if (!grow)
+ goto out;
+
+ val *= grow;
+ if (val < grow_start)
+ val = grow_start;
if (val > halt_poll_ns)
val = halt_poll_ns;
vcpu->halt_poll_ns = val;
+out:
trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
}
@@ -2683,7 +2689,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
struct kvm_regs *kvm_regs;
r = -ENOMEM;
- kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
+ kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT);
if (!kvm_regs)
goto out;
r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
@@ -2711,7 +2717,8 @@ out_free1:
break;
}
case KVM_GET_SREGS: {
- kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
+ kvm_sregs = kzalloc(sizeof(struct kvm_sregs),
+ GFP_KERNEL_ACCOUNT);
r = -ENOMEM;
if (!kvm_sregs)
goto out;
@@ -2803,7 +2810,7 @@ out_free1:
break;
}
case KVM_GET_FPU: {
- fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
+ fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT);
r = -ENOMEM;
if (!fpu)
goto out;
@@ -2899,6 +2906,9 @@ static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
{
struct kvm_device *dev = filp->private_data;
+ if (dev->kvm->mm != current->mm)
+ return -EIO;
+
switch (ioctl) {
case KVM_SET_DEVICE_ATTR:
return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
@@ -2968,19 +2978,21 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
struct kvm_device_ops *ops = NULL;
struct kvm_device *dev;
bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
+ int type;
int ret;
if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
return -ENODEV;
- ops = kvm_device_ops_table[cd->type];
+ type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table));
+ ops = kvm_device_ops_table[type];
if (ops == NULL)
return -ENODEV;
if (test)
return 0;
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT);
if (!dev)
return -ENOMEM;
@@ -2988,7 +3000,7 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
dev->kvm = kvm;
mutex_lock(&kvm->lock);
- ret = ops->create(dev, cd->type);
+ ret = ops->create(dev, type);
if (ret < 0) {
mutex_unlock(&kvm->lock);
kfree(dev);
@@ -3625,6 +3637,7 @@ int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
r = __kvm_io_bus_write(vcpu, bus, &range, val);
return r < 0 ? r : 0;
}
+EXPORT_SYMBOL_GPL(kvm_io_bus_write);
/* kvm_io_bus_write_cookie - called under kvm->slots_lock */
int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
@@ -3675,7 +3688,6 @@ static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
return -EOPNOTSUPP;
}
-EXPORT_SYMBOL_GPL(kvm_io_bus_write);
/* kvm_io_bus_read - called under kvm->slots_lock */
int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
@@ -3697,7 +3709,6 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
return r < 0 ? r : 0;
}
-
/* Caller must hold slots_lock. */
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, struct kvm_io_device *dev)
@@ -3714,8 +3725,8 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
return -ENOSPC;
- new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count + 1) *
- sizeof(struct kvm_io_range)), GFP_KERNEL);
+ new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1),
+ GFP_KERNEL_ACCOUNT);
if (!new_bus)
return -ENOMEM;
@@ -3760,8 +3771,8 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
if (i == bus->dev_count)
return;
- new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) *
- sizeof(struct kvm_io_range)), GFP_KERNEL);
+ new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
+ GFP_KERNEL_ACCOUNT);
if (!new_bus) {
pr_err("kvm: failed to shrink bus, removing it completely\n");
goto broken;
@@ -4029,7 +4040,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
active = kvm_active_vms;
spin_unlock(&kvm_lock);
- env = kzalloc(sizeof(*env), GFP_KERNEL);
+ env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT);
if (!env)
return;
@@ -4045,7 +4056,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
add_uevent_var(env, "PID=%d", kvm->userspace_pid);
if (!IS_ERR_OR_NULL(kvm->debugfs_dentry)) {
- char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL);
+ char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
if (p) {
tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);
diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
index d99850c462a1..524cbd20379f 100644
--- a/virt/kvm/vfio.c
+++ b/virt/kvm/vfio.c
@@ -219,7 +219,7 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
}
}
- kvg = kzalloc(sizeof(*kvg), GFP_KERNEL);
+ kvg = kzalloc(sizeof(*kvg), GFP_KERNEL_ACCOUNT);
if (!kvg) {
mutex_unlock(&kv->lock);
kvm_vfio_group_put_external_user(vfio_group);
@@ -405,7 +405,7 @@ static int kvm_vfio_create(struct kvm_device *dev, u32 type)
if (tmp->ops == &kvm_vfio_ops)
return -EBUSY;
- kv = kzalloc(sizeof(*kv), GFP_KERNEL);
+ kv = kzalloc(sizeof(*kv), GFP_KERNEL_ACCOUNT);
if (!kv)
return -ENOMEM;